Repository: angr/pypcode Branch: master Commit: 1e972f2dc7cc Files: 930 Total size: 14.9 MB Directory structure: gitextract_s1iu7cno/ ├── .clang-format ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── bug-report.yml │ │ ├── config.yml │ │ ├── feature-request.yml │ │ └── question.yml │ ├── dependabot.yml │ └── workflows/ │ └── build.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .pylintrc ├── .readthedocs.yml ├── CMakeLists.txt ├── LICENSE.txt ├── MANIFEST.in ├── README.md ├── docs/ │ ├── Makefile │ ├── api.rst │ ├── conf.py │ ├── guide.rst │ ├── index.rst │ ├── languages.rst │ └── make.bat ├── pypcode/ │ ├── __init__.py │ ├── __main__.py │ ├── __version__.py │ ├── docs/ │ │ └── ghidra/ │ │ ├── DISCLAIMER.md │ │ ├── LICENSE │ │ └── NOTICE │ ├── printing.py │ ├── processors/ │ │ ├── 6502/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── 6502.cspec │ │ │ │ ├── 6502.ldefs │ │ │ │ ├── 6502.pspec │ │ │ │ ├── 6502.slaspec │ │ │ │ └── 65c02.slaspec │ │ │ └── manuals/ │ │ │ ├── 6502.idx │ │ │ └── 65c02.idx │ │ ├── 68000/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── 68000.cspec │ │ │ │ ├── 68000.dwarf │ │ │ │ ├── 68000.ldefs │ │ │ │ ├── 68000.opinion │ │ │ │ ├── 68000.pspec │ │ │ │ ├── 68000.sinc │ │ │ │ ├── 68000_register.cspec │ │ │ │ ├── 68020.slaspec │ │ │ │ ├── 68030.slaspec │ │ │ │ ├── 68040.slaspec │ │ │ │ └── coldfire.slaspec │ │ │ ├── manuals/ │ │ │ │ └── 68000.idx │ │ │ └── patterns/ │ │ │ ├── 68000_patterns.xml │ │ │ └── patternconstraints.xml │ │ ├── 8048/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── 8048.cspec │ │ │ │ ├── 8048.ldefs │ │ │ │ ├── 8048.pspec │ │ │ │ └── 8048.slaspec │ │ │ └── manuals/ │ │ │ └── 8048.idx │ │ ├── 8051/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── 80251.cspec │ │ │ │ ├── 80251.pspec │ │ │ │ ├── 80251.sinc │ │ │ │ ├── 80251.slaspec │ │ │ │ ├── 80390.cspec │ │ │ │ ├── 80390.slaspec │ │ │ │ ├── 8051.cspec │ │ │ │ ├── 8051.ldefs │ │ │ │ ├── 8051.opinion │ │ │ │ ├── 8051.pspec │ │ │ │ ├── 8051.slaspec │ │ │ │ ├── 8051_archimedes.cspec │ │ │ │ ├── 8051_main.sinc │ │ │ │ ├── mx51.cspec │ │ │ │ ├── mx51.pspec │ │ │ │ ├── mx51.sinc │ │ │ │ ├── mx51.slaspec │ │ │ │ └── old/ │ │ │ │ ├── 8051v1.lang │ │ │ │ └── 8051v1.trans │ │ │ └── manuals/ │ │ │ └── 8051.idx │ │ ├── 8085/ │ │ │ └── data/ │ │ │ └── languages/ │ │ │ ├── 8085.cspec │ │ │ ├── 8085.ldefs │ │ │ ├── 8085.pspec │ │ │ └── 8085.slaspec │ │ ├── AARCH64/ │ │ │ └── data/ │ │ │ ├── aarch64-pltThunks.xml │ │ │ ├── languages/ │ │ │ │ ├── AARCH64.cspec │ │ │ │ ├── AARCH64.dwarf │ │ │ │ ├── AARCH64.ldefs │ │ │ │ ├── AARCH64.opinion │ │ │ │ ├── AARCH64.pspec │ │ │ │ ├── AARCH64.slaspec │ │ │ │ ├── AARCH64BE.slaspec │ │ │ │ ├── AARCH64_AMXext.sinc │ │ │ │ ├── AARCH64_AppleSilicon.slaspec │ │ │ │ ├── AARCH64_apple.cspec │ │ │ │ ├── AARCH64_base_PACoptions.sinc │ │ │ │ ├── AARCH64_golang.cspec │ │ │ │ ├── AARCH64_golang.register.info │ │ │ │ ├── AARCH64_ilp32.cspec │ │ │ │ ├── AARCH64_swift.cspec │ │ │ │ ├── AARCH64_win.cspec │ │ │ │ ├── AARCH64base.sinc │ │ │ │ ├── AARCH64instructions.sinc │ │ │ │ ├── AARCH64ldst.sinc │ │ │ │ ├── AARCH64neon.sinc │ │ │ │ ├── AARCH64sve.sinc │ │ │ │ └── AppleSilicon.ldefs │ │ │ ├── manuals/ │ │ │ │ └── AARCH64.idx │ │ │ └── patterns/ │ │ │ ├── AARCH64_LE_patterns.xml │ │ │ ├── AARCH64_win_patterns.xml │ │ │ ├── patternconstraints.xml │ │ │ └── prepatternconstraints.xml │ │ ├── ARM/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── ARM.cspec │ │ │ │ ├── ARM.dwarf │ │ │ │ ├── ARM.gdis │ │ │ │ ├── ARM.ldefs │ │ │ │ ├── ARM.opinion │ │ │ │ ├── ARM.sinc │ │ │ │ ├── ARM4_be.slaspec │ │ │ │ ├── ARM4_le.slaspec │ │ │ │ ├── ARM4t_be.slaspec │ │ │ │ ├── ARM4t_le.slaspec │ │ │ │ ├── ARM5_be.slaspec │ │ │ │ ├── ARM5_le.slaspec │ │ │ │ ├── ARM5t_be.slaspec │ │ │ │ ├── ARM5t_le.slaspec │ │ │ │ ├── ARM6_be.slaspec │ │ │ │ ├── ARM6_le.slaspec │ │ │ │ ├── ARM7_be.slaspec │ │ │ │ ├── ARM7_le.slaspec │ │ │ │ ├── ARM8_be.slaspec │ │ │ │ ├── ARM8_le.slaspec │ │ │ │ ├── ARM8m_be.slaspec │ │ │ │ ├── ARM8m_le.slaspec │ │ │ │ ├── ARMCortex.pspec │ │ │ │ ├── ARMTHUMBinstructions.sinc │ │ │ │ ├── ARM_CDE.sinc │ │ │ │ ├── ARM_apcs.cspec │ │ │ │ ├── ARM_v45.cspec │ │ │ │ ├── ARM_v45.pspec │ │ │ │ ├── ARM_win.cspec │ │ │ │ ├── ARMinstructions.sinc │ │ │ │ ├── ARMneon.dwarf │ │ │ │ ├── ARMneon.sinc │ │ │ │ ├── ARMt.pspec │ │ │ │ ├── ARMtTHUMB.pspec │ │ │ │ ├── ARMt_v45.pspec │ │ │ │ ├── ARMt_v6.pspec │ │ │ │ ├── ARMv8.sinc │ │ │ │ └── old/ │ │ │ │ ├── ARMv5.lang │ │ │ │ ├── ARMv5.trans │ │ │ │ ├── THUMBv2.lang │ │ │ │ └── THUMBv2.trans │ │ │ ├── manuals/ │ │ │ │ └── ARM.idx │ │ │ └── patterns/ │ │ │ ├── ARM_BE_patterns.xml │ │ │ ├── ARM_LE_patterns.xml │ │ │ ├── ARM_switch_patterns.xml │ │ │ ├── patternconstraints.xml │ │ │ └── prepatternconstraints.xml │ │ ├── Atmel/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── atmega256.pspec │ │ │ │ ├── avr32.opinion │ │ │ │ ├── avr32a.cspec │ │ │ │ ├── avr32a.ldefs │ │ │ │ ├── avr32a.pspec │ │ │ │ ├── avr32a.slaspec │ │ │ │ ├── avr32a_arithmetic_operations.sinc │ │ │ │ ├── avr32a_autogen.sinc │ │ │ │ ├── avr32a_bit_operations.sinc │ │ │ │ ├── avr32a_coprocessor_interface.sinc │ │ │ │ ├── avr32a_data_transfer.sinc │ │ │ │ ├── avr32a_dsp_operations.sinc │ │ │ │ ├── avr32a_dsp_operations2.sinc │ │ │ │ ├── avr32a_instruction_flow.sinc │ │ │ │ ├── avr32a_logic_operations.sinc │ │ │ │ ├── avr32a_multiplication_operations.sinc │ │ │ │ ├── avr32a_shift_operations.sinc │ │ │ │ ├── avr32a_simd_operations.sinc │ │ │ │ ├── avr32a_system_control.sinc │ │ │ │ ├── avr8.ldefs │ │ │ │ ├── avr8.opinion │ │ │ │ ├── avr8.pspec │ │ │ │ ├── avr8.sinc │ │ │ │ ├── avr8.slaspec │ │ │ │ ├── avr8e.slaspec │ │ │ │ ├── avr8egcc.cspec │ │ │ │ ├── avr8eind.slaspec │ │ │ │ ├── avr8gcc.cspec │ │ │ │ ├── avr8iarV1.cspec │ │ │ │ ├── avr8imgCraftV8.cspec │ │ │ │ ├── avr8xmega.pspec │ │ │ │ └── avr8xmega.slaspec │ │ │ ├── manuals/ │ │ │ │ ├── AVR32.idx │ │ │ │ └── AVR8.idx │ │ │ └── patterns/ │ │ │ ├── AVR8_patterns.xml │ │ │ └── patternconstraints.xml │ │ ├── BPF/ │ │ │ └── data/ │ │ │ └── languages/ │ │ │ ├── BPF.cspec │ │ │ ├── BPF.ldefs │ │ │ ├── BPF.pspec │ │ │ ├── BPF.sinc │ │ │ └── BPF_le.slaspec │ │ ├── CP1600/ │ │ │ └── data/ │ │ │ └── languages/ │ │ │ ├── CP1600.cspec │ │ │ ├── CP1600.ldefs │ │ │ ├── CP1600.opinion │ │ │ ├── CP1600.pspec │ │ │ └── CP1600.slaspec │ │ ├── CR16/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── CR16.cspec │ │ │ │ ├── CR16.ldefs │ │ │ │ ├── CR16.opinion │ │ │ │ ├── CR16.pspec │ │ │ │ ├── CR16B.sinc │ │ │ │ ├── CR16B.slaspec │ │ │ │ ├── CR16C.sinc │ │ │ │ └── CR16C.slaspec │ │ │ └── manuals/ │ │ │ └── CR16.idx │ │ ├── DATA/ │ │ │ └── data/ │ │ │ └── languages/ │ │ │ ├── data-be-64.slaspec │ │ │ ├── data-le-64.slaspec │ │ │ ├── data-ptr16.cspec │ │ │ ├── data-ptr32.cspec │ │ │ ├── data-ptr64.cspec │ │ │ ├── data.ldefs │ │ │ ├── data.pspec │ │ │ └── data.sinc │ │ ├── Dalvik/ │ │ │ └── data/ │ │ │ └── languages/ │ │ │ ├── Dalvik.ldefs │ │ │ ├── Dalvik.opinion │ │ │ ├── Dalvik_Base.cspec │ │ │ ├── Dalvik_Base.pspec │ │ │ ├── Dalvik_Base.sinc │ │ │ ├── Dalvik_Base.slaspec │ │ │ ├── Dalvik_DEX_Android10.slaspec │ │ │ ├── Dalvik_DEX_Android11.slaspec │ │ │ ├── Dalvik_DEX_Android12.slaspec │ │ │ ├── Dalvik_DEX_KitKat.slaspec │ │ │ ├── Dalvik_DEX_Lollipop.slaspec │ │ │ ├── Dalvik_DEX_Marshmallow.slaspec │ │ │ ├── Dalvik_DEX_Nougat.slaspec │ │ │ ├── Dalvik_DEX_Oreo.slaspec │ │ │ ├── Dalvik_DEX_Pie.slaspec │ │ │ ├── Dalvik_ODEX_KitKat.slaspec │ │ │ ├── Dalvik_OpCode_3E_43_unused.sinc │ │ │ ├── Dalvik_OpCode_73_return_void_barrier.sinc │ │ │ ├── Dalvik_OpCode_73_return_void_no_barrier.sinc │ │ │ ├── Dalvik_OpCode_73_unused.sinc │ │ │ ├── Dalvik_OpCode_79_unused.sinc │ │ │ ├── Dalvik_OpCode_7A_unused.sinc │ │ │ ├── Dalvik_OpCode_E3_EA_dex.sinc │ │ │ ├── Dalvik_OpCode_E3_EA_unused.sinc │ │ │ ├── Dalvik_OpCode_EB_F2_iput_iget.sinc │ │ │ ├── Dalvik_OpCode_EB_F2_unused.sinc │ │ │ ├── Dalvik_OpCode_F3_unused.sinc │ │ │ ├── Dalvik_OpCode_F4_unused.sinc │ │ │ ├── Dalvik_OpCode_F5_unused.sinc │ │ │ ├── Dalvik_OpCode_F6_unused.sinc │ │ │ ├── Dalvik_OpCode_F7_unused.sinc │ │ │ ├── Dalvik_OpCode_F8_unused.sinc │ │ │ ├── Dalvik_OpCode_F9_unused.sinc │ │ │ ├── Dalvik_OpCode_FA_FD_dex.sinc │ │ │ ├── Dalvik_OpCode_FA_unused.sinc │ │ │ ├── Dalvik_OpCode_FB_unused.sinc │ │ │ ├── Dalvik_OpCode_FC_unused.sinc │ │ │ ├── Dalvik_OpCode_FD_unused.sinc │ │ │ ├── Dalvik_OpCode_FE_FF_dex.sinc │ │ │ ├── Dalvik_OpCode_FE_unused.sinc │ │ │ └── Dalvik_OpCode_FF_unused.sinc │ │ ├── HCS08/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── HC05-M68HC05TB.pspec │ │ │ │ ├── HC05.cspec │ │ │ │ ├── HC05.ldefs │ │ │ │ ├── HC05.pspec │ │ │ │ ├── HC05.slaspec │ │ │ │ ├── HC08-MC68HC908QY4.pspec │ │ │ │ ├── HC08.ldefs │ │ │ │ ├── HC08.pspec │ │ │ │ ├── HC08.slaspec │ │ │ │ ├── HCS08-MC9S08GB60.pspec │ │ │ │ ├── HCS08.cspec │ │ │ │ ├── HCS08.ldefs │ │ │ │ ├── HCS08.opinion │ │ │ │ ├── HCS08.pspec │ │ │ │ ├── HCS08.slaspec │ │ │ │ └── HCS_HC.sinc │ │ │ ├── manuals/ │ │ │ │ ├── HC05.idx │ │ │ │ ├── HC08.idx │ │ │ │ └── HCS08.idx │ │ │ └── test-vectors/ │ │ │ ├── HC05_tv.s │ │ │ ├── HC08_tv.s │ │ │ └── HCS08_tv.s │ │ ├── HCS12/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── HC12.cspec │ │ │ │ ├── HC12.pspec │ │ │ │ ├── HC12.slaspec │ │ │ │ ├── HCS12.cspec │ │ │ │ ├── HCS12.ldefs │ │ │ │ ├── HCS12.opinion │ │ │ │ ├── HCS12.pspec │ │ │ │ ├── HCS12.slaspec │ │ │ │ ├── HCS12X.cspec │ │ │ │ ├── HCS12X.pspec │ │ │ │ ├── HCS12X.slaspec │ │ │ │ ├── HCS_HC12.sinc │ │ │ │ └── XGATE.sinc │ │ │ └── manuals/ │ │ │ └── HCS12.idx │ │ ├── JVM/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── JVM.cspec │ │ │ │ ├── JVM.ldefs │ │ │ │ ├── JVM.opinion │ │ │ │ ├── JVM.pspec │ │ │ │ └── JVM.slaspec │ │ │ └── manuals/ │ │ │ └── JVM.idx │ │ ├── Loongarch/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── ilp32d.cspec │ │ │ │ ├── ilp32f.cspec │ │ │ │ ├── lasx.sinc │ │ │ │ ├── lbt.sinc │ │ │ │ ├── loongarch.ldefs │ │ │ │ ├── loongarch.opinion │ │ │ │ ├── loongarch32.pspec │ │ │ │ ├── loongarch32_f32.slaspec │ │ │ │ ├── loongarch32_f64.slaspec │ │ │ │ ├── loongarch32_instructions.sinc │ │ │ │ ├── loongarch64.pspec │ │ │ │ ├── loongarch64_f32.slaspec │ │ │ │ ├── loongarch64_f64.slaspec │ │ │ │ ├── loongarch64_instructions.sinc │ │ │ │ ├── loongarch_double.sinc │ │ │ │ ├── loongarch_float.sinc │ │ │ │ ├── loongarch_main.sinc │ │ │ │ ├── lp64d.cspec │ │ │ │ ├── lp64f.cspec │ │ │ │ ├── lsx.sinc │ │ │ │ └── lvz.sinc │ │ │ ├── manuals/ │ │ │ │ └── loongarch.idx │ │ │ └── patterns/ │ │ │ ├── loongarch_patterns.xml │ │ │ └── patternconstraints.xml │ │ ├── M16C/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── M16C_60.cspec │ │ │ │ ├── M16C_60.ldefs │ │ │ │ ├── M16C_60.pspec │ │ │ │ ├── M16C_60.slaspec │ │ │ │ ├── M16C_80.cspec │ │ │ │ ├── M16C_80.ldefs │ │ │ │ ├── M16C_80.pspec │ │ │ │ └── M16C_80.slaspec │ │ │ └── manuals/ │ │ │ ├── M16C_60.idx │ │ │ └── M16C_80.idx │ │ ├── M8C/ │ │ │ └── data/ │ │ │ └── languages/ │ │ │ ├── m8c.cspec │ │ │ ├── m8c.ldefs │ │ │ ├── m8c.opinion │ │ │ ├── m8c.pspec │ │ │ └── m8c.slaspec │ │ ├── MC6800/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── 6800.ldefs │ │ │ │ ├── 6805.cspec │ │ │ │ ├── 6805.ldefs │ │ │ │ ├── 6805.pspec │ │ │ │ ├── 6805.slaspec │ │ │ │ ├── 6809.cspec │ │ │ │ ├── 6809.pspec │ │ │ │ ├── 6809.slaspec │ │ │ │ ├── 6x09.sinc │ │ │ │ ├── 6x09_exg_tfr.sinc │ │ │ │ ├── 6x09_pull.sinc │ │ │ │ ├── 6x09_push.sinc │ │ │ │ └── H6309.slaspec │ │ │ └── manuals/ │ │ │ └── 6809.idx │ │ ├── MCS96/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── MCS96.cspec │ │ │ │ ├── MCS96.ldefs │ │ │ │ ├── MCS96.pspec │ │ │ │ ├── MCS96.sinc │ │ │ │ └── MCS96.slaspec │ │ │ └── manuals/ │ │ │ └── MCS96.idx │ │ ├── MIPS/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── MIPS.opinion │ │ │ │ ├── mips.dwarf │ │ │ │ ├── mips.ldefs │ │ │ │ ├── mips.sinc │ │ │ │ ├── mips16.sinc │ │ │ │ ├── mips32.pspec │ │ │ │ ├── mips32Instructions.sinc │ │ │ │ ├── mips32R6.pspec │ │ │ │ ├── mips32R6be.slaspec │ │ │ │ ├── mips32R6le.slaspec │ │ │ │ ├── mips32_eabi.cspec │ │ │ │ ├── mips32_fp64.cspec │ │ │ │ ├── mips32be.cspec │ │ │ │ ├── mips32be.slaspec │ │ │ │ ├── mips32le.cspec │ │ │ │ ├── mips32le.slaspec │ │ │ │ ├── mips32micro.pspec │ │ │ │ ├── mips64.pspec │ │ │ │ ├── mips64Instructions.sinc │ │ │ │ ├── mips64R6.pspec │ │ │ │ ├── mips64_32_n32.cspec │ │ │ │ ├── mips64_32_o32.cspec │ │ │ │ ├── mips64_32_o64.cspec │ │ │ │ ├── mips64be.cspec │ │ │ │ ├── mips64be.slaspec │ │ │ │ ├── mips64le.cspec │ │ │ │ ├── mips64le.slaspec │ │ │ │ ├── mips64micro.pspec │ │ │ │ ├── mips_dsp.sinc │ │ │ │ ├── mips_mt.sinc │ │ │ │ ├── mipsfloat.sinc │ │ │ │ └── mipsmicro.sinc │ │ │ ├── manuals/ │ │ │ │ ├── MIPS.idx │ │ │ │ ├── mipsM16.idx │ │ │ │ ├── mipsMic.idx │ │ │ │ └── r4000.idx │ │ │ └── patterns/ │ │ │ ├── MIPS_BE_patterns.xml │ │ │ ├── MIPS_LE_patterns.xml │ │ │ └── patternconstraints.xml │ │ ├── NDS32/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── lsmw.sinc │ │ │ │ ├── nds32.cspec │ │ │ │ ├── nds32.dwarf │ │ │ │ ├── nds32.ldefs │ │ │ │ ├── nds32.opinion │ │ │ │ ├── nds32.pspec │ │ │ │ ├── nds32.sinc │ │ │ │ ├── nds32be.slaspec │ │ │ │ └── nds32le.slaspec │ │ │ └── patterns/ │ │ │ ├── nds32_patterns.xml │ │ │ └── patternconstraints.xml │ │ ├── PA-RISC/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── pa-risc.ldefs │ │ │ │ ├── pa-risc.opinion │ │ │ │ ├── pa-risc.sinc │ │ │ │ ├── pa-risc32.cspec │ │ │ │ ├── pa-risc32.pspec │ │ │ │ ├── pa-risc32be.slaspec │ │ │ │ └── pa-riscInstructions.sinc │ │ │ ├── manuals/ │ │ │ │ └── pa11_acd.idx │ │ │ └── patterns/ │ │ │ ├── pa-risc_patterns.xml │ │ │ └── patternconstraints.xml │ │ ├── PIC/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── PIC24.cspec │ │ │ │ ├── PIC24.ldefs │ │ │ │ ├── PIC24.opinion │ │ │ │ ├── PIC24.pspec │ │ │ │ ├── PIC24.sinc │ │ │ │ ├── PIC24E.slaspec │ │ │ │ ├── PIC24F.slaspec │ │ │ │ ├── PIC24H.slaspec │ │ │ │ ├── PIC30.dwarf │ │ │ │ ├── PIC33.dwarf │ │ │ │ ├── dsPIC30F.slaspec │ │ │ │ ├── dsPIC33C.slaspec │ │ │ │ ├── dsPIC33E.slaspec │ │ │ │ ├── dsPIC33F.slaspec │ │ │ │ ├── pic12.sinc │ │ │ │ ├── pic12_instructions.sinc │ │ │ │ ├── pic12c5xx.cspec │ │ │ │ ├── pic12c5xx.ldefs │ │ │ │ ├── pic12c5xx.pspec │ │ │ │ ├── pic12c5xx.slaspec │ │ │ │ ├── pic16.cspec │ │ │ │ ├── pic16.ldefs │ │ │ │ ├── pic16.pspec │ │ │ │ ├── pic16.sinc │ │ │ │ ├── pic16.slaspec │ │ │ │ ├── pic16_instructions.sinc │ │ │ │ ├── pic16c5x.cspec │ │ │ │ ├── pic16c5x.ldefs │ │ │ │ ├── pic16c5x.pspec │ │ │ │ ├── pic16c5x.slaspec │ │ │ │ ├── pic16f.cspec │ │ │ │ ├── pic16f.pspec │ │ │ │ ├── pic16f.slaspec │ │ │ │ ├── pic17c7xx.cspec │ │ │ │ ├── pic17c7xx.ldefs │ │ │ │ ├── pic17c7xx.pspec │ │ │ │ ├── pic17c7xx.sinc │ │ │ │ ├── pic17c7xx.slaspec │ │ │ │ ├── pic17c7xx_instructions.sinc │ │ │ │ ├── pic18.cspec │ │ │ │ ├── pic18.ldefs │ │ │ │ ├── pic18.pspec │ │ │ │ ├── pic18.sinc │ │ │ │ ├── pic18.slaspec │ │ │ │ └── pic18_instructions.sinc │ │ │ └── manuals/ │ │ │ ├── PIC-12.idx │ │ │ ├── PIC-16.idx │ │ │ ├── PIC-16F.idx │ │ │ ├── PIC-17.idx │ │ │ ├── PIC-18.idx │ │ │ └── PIC24.idx │ │ ├── PowerPC/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── 4xx.sinc │ │ │ │ ├── FPRC.sinc │ │ │ │ ├── PowerPC.opinion │ │ │ │ ├── SPEF_SCR.sinc │ │ │ │ ├── SPE_APU.sinc │ │ │ │ ├── SPE_EFSD.sinc │ │ │ │ ├── SPE_EFV.sinc │ │ │ │ ├── SPE_FloatMulAdd.sinc │ │ │ │ ├── Scalar_SPFP.sinc │ │ │ │ ├── altivec.sinc │ │ │ │ ├── evx.sinc │ │ │ │ ├── g2.sinc │ │ │ │ ├── lmwInstructions.sinc │ │ │ │ ├── lswInstructions.sinc │ │ │ │ ├── mulhwInstructions.sinc │ │ │ │ ├── old/ │ │ │ │ │ ├── oldPPC.lang │ │ │ │ │ └── oldPPC.trans │ │ │ │ ├── ppc.dwarf │ │ │ │ ├── ppc.ldefs │ │ │ │ ├── ppc.ldefs.orig │ │ │ │ ├── ppc_32.cspec │ │ │ │ ├── ppc_32.pspec │ │ │ │ ├── ppc_32_4xx_be.slaspec │ │ │ │ ├── ppc_32_4xx_le.slaspec │ │ │ │ ├── ppc_32_be.cspec │ │ │ │ ├── ppc_32_be.slaspec │ │ │ │ ├── ppc_32_be_Mac.cspec │ │ │ │ ├── ppc_32_e200.cspec │ │ │ │ ├── ppc_32_e200.pspec │ │ │ │ ├── ppc_32_e200.slaspec │ │ │ │ ├── ppc_32_e500_be.cspec │ │ │ │ ├── ppc_32_e500_be.slaspec │ │ │ │ ├── ppc_32_e500_le.cspec │ │ │ │ ├── ppc_32_e500_le.slaspec │ │ │ │ ├── ppc_32_e500mc_be.cspec │ │ │ │ ├── ppc_32_e500mc_be.slaspec │ │ │ │ ├── ppc_32_e500mc_le.cspec │ │ │ │ ├── ppc_32_e500mc_le.slaspec │ │ │ │ ├── ppc_32_le.slaspec │ │ │ │ ├── ppc_32_mpc8270.pspec │ │ │ │ ├── ppc_32_quicciii_be.slaspec │ │ │ │ ├── ppc_32_quicciii_le.slaspec │ │ │ │ ├── ppc_64.pspec │ │ │ │ ├── ppc_64_32.cspec │ │ │ │ ├── ppc_64_be.cspec │ │ │ │ ├── ppc_64_be.slaspec │ │ │ │ ├── ppc_64_be_Mac.cspec │ │ │ │ ├── ppc_64_isa_altivec_be.slaspec │ │ │ │ ├── ppc_64_isa_altivec_le.slaspec │ │ │ │ ├── ppc_64_isa_altivec_vle_be.slaspec │ │ │ │ ├── ppc_64_isa_be.slaspec │ │ │ │ ├── ppc_64_isa_le.slaspec │ │ │ │ ├── ppc_64_isa_vle_be.slaspec │ │ │ │ ├── ppc_64_le.cspec │ │ │ │ ├── ppc_64_le.slaspec │ │ │ │ ├── ppc_a2.sinc │ │ │ │ ├── ppc_common.sinc │ │ │ │ ├── ppc_embedded.sinc │ │ │ │ ├── ppc_instructions.sinc │ │ │ │ ├── ppc_isa.sinc │ │ │ │ ├── ppc_vle.sinc │ │ │ │ ├── quicciii.sinc │ │ │ │ ├── stmwInstructions.sinc │ │ │ │ ├── stswiInstructions.sinc │ │ │ │ └── vsx.sinc │ │ │ ├── manuals/ │ │ │ │ ├── PowerISA.idx │ │ │ │ └── PowerPC.idx │ │ │ └── patterns/ │ │ │ ├── PPC_BE_patterns.xml │ │ │ ├── PPC_BE_prepatterns.xml │ │ │ ├── PPC_LE_patterns.xml │ │ │ ├── PPC_LE_prepatterns.xml │ │ │ ├── patternconstraints.xml │ │ │ └── prepatternconstraints.xml │ │ ├── RISCV/ │ │ │ ├── data/ │ │ │ │ ├── languages/ │ │ │ │ │ ├── RV32.pspec │ │ │ │ │ ├── RV64.pspec │ │ │ │ │ ├── andestar_v5.instr.sinc │ │ │ │ │ ├── andestar_v5.ldefs │ │ │ │ │ ├── andestar_v5.slaspec │ │ │ │ │ ├── old/ │ │ │ │ │ │ └── riscv_deprecated.ldefs │ │ │ │ │ ├── riscv.csr.sinc │ │ │ │ │ ├── riscv.custom.sinc │ │ │ │ │ ├── riscv.ilp32d.slaspec │ │ │ │ │ ├── riscv.instr.sinc │ │ │ │ │ ├── riscv.ldefs │ │ │ │ │ ├── riscv.lp64d.slaspec │ │ │ │ │ ├── riscv.opinion │ │ │ │ │ ├── riscv.priv.sinc │ │ │ │ │ ├── riscv.reg.sinc │ │ │ │ │ ├── riscv.rv32a.sinc │ │ │ │ │ ├── riscv.rv32b.sinc │ │ │ │ │ ├── riscv.rv32d.sinc │ │ │ │ │ ├── riscv.rv32f.sinc │ │ │ │ │ ├── riscv.rv32i.sinc │ │ │ │ │ ├── riscv.rv32k.sinc │ │ │ │ │ ├── riscv.rv32m.sinc │ │ │ │ │ ├── riscv.rv32p.sinc │ │ │ │ │ ├── riscv.rv32q.sinc │ │ │ │ │ ├── riscv.rv64a.sinc │ │ │ │ │ ├── riscv.rv64b.sinc │ │ │ │ │ ├── riscv.rv64d.sinc │ │ │ │ │ ├── riscv.rv64f.sinc │ │ │ │ │ ├── riscv.rv64i.sinc │ │ │ │ │ ├── riscv.rv64k.sinc │ │ │ │ │ ├── riscv.rv64m.sinc │ │ │ │ │ ├── riscv.rv64p.sinc │ │ │ │ │ ├── riscv.rv64q.sinc │ │ │ │ │ ├── riscv.rvc.sinc │ │ │ │ │ ├── riscv.rvv.sinc │ │ │ │ │ ├── riscv.table.sinc │ │ │ │ │ ├── riscv.zi.sinc │ │ │ │ │ ├── riscv32-fp.cspec │ │ │ │ │ ├── riscv32.cspec │ │ │ │ │ ├── riscv32.dwarf │ │ │ │ │ ├── riscv64-fp.cspec │ │ │ │ │ ├── riscv64.cspec │ │ │ │ │ └── riscv64.dwarf │ │ │ │ └── patterns/ │ │ │ │ ├── patternconstraints.xml │ │ │ │ └── riscv_gc_patterns.xml │ │ │ └── scripts/ │ │ │ └── binutil.py │ │ ├── Sparc/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── Sparc.dwarf │ │ │ │ ├── Sparc.opinion │ │ │ │ ├── SparcV9.ldefs │ │ │ │ ├── SparcV9.pspec │ │ │ │ ├── SparcV9.sinc │ │ │ │ ├── SparcV9_32.cspec │ │ │ │ ├── SparcV9_32.slaspec │ │ │ │ ├── SparcV9_64.cspec │ │ │ │ ├── SparcV9_64.slaspec │ │ │ │ └── SparcVIS.sinc │ │ │ ├── manuals/ │ │ │ │ └── Sparc.idx │ │ │ └── patterns/ │ │ │ ├── SPARC_patterns.xml │ │ │ └── patternconstraints.xml │ │ ├── SuperH/ │ │ │ └── data/ │ │ │ └── languages/ │ │ │ ├── sh-1.slaspec │ │ │ ├── sh-2.slaspec │ │ │ ├── sh-2a.slaspec │ │ │ ├── superh.cspec │ │ │ ├── superh.ldefs │ │ │ ├── superh.pspec │ │ │ ├── superh.sinc │ │ │ └── superh2a.cspec │ │ ├── SuperH4/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── SuperH4.ldefs │ │ │ │ ├── SuperH4.opinion │ │ │ │ ├── SuperH4.pspec │ │ │ │ ├── SuperH4.sinc │ │ │ │ ├── SuperH4_be.cspec │ │ │ │ ├── SuperH4_be.slaspec │ │ │ │ ├── SuperH4_le.cspec │ │ │ │ ├── SuperH4_le.slaspec │ │ │ │ └── old/ │ │ │ │ ├── SuperH4-BE-16.lang │ │ │ │ ├── SuperH4-BE-16.trans │ │ │ │ ├── SuperH4-LE-16.lang │ │ │ │ └── SuperH4-LE-16.trans │ │ │ ├── manuals/ │ │ │ │ └── superh4.idx │ │ │ └── patterns/ │ │ │ ├── SuperH4_patterns.xml │ │ │ └── patternconstraints.xml │ │ ├── TI_MSP430/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── TI430Common.sinc │ │ │ │ ├── TI430X.sinc │ │ │ │ ├── TI_MSP430.cspec │ │ │ │ ├── TI_MSP430.dwarf │ │ │ │ ├── TI_MSP430.ldefs │ │ │ │ ├── TI_MSP430.pspec │ │ │ │ ├── TI_MSP430.slaspec │ │ │ │ ├── TI_MSP430X.cspec │ │ │ │ ├── TI_MSP430X.dwarf │ │ │ │ ├── TI_MSP430X.slaspec │ │ │ │ └── ti_msp430.opinion │ │ │ └── manuals/ │ │ │ └── MSP430.idx │ │ ├── Toy/ │ │ │ └── data/ │ │ │ └── languages/ │ │ │ ├── old/ │ │ │ │ ├── ToyV00BE64.lang │ │ │ │ ├── ToyV0BE64.trans │ │ │ │ ├── ToyV0LE64.lang │ │ │ │ ├── ToyV0LE64.trans │ │ │ │ └── v01stuff/ │ │ │ │ ├── toy.cspec │ │ │ │ ├── toy.ldefs_v01 │ │ │ │ ├── toy.sinc │ │ │ │ ├── toy64.cspec │ │ │ │ ├── toyInstructions.sinc │ │ │ │ └── toyPosStack.cspec │ │ │ ├── toy.cspec │ │ │ ├── toy.ldefs │ │ │ ├── toy.pspec │ │ │ ├── toy.sinc │ │ │ ├── toy64-long8.cspec │ │ │ ├── toy64.cspec │ │ │ ├── toy64_be.slaspec │ │ │ ├── toy64_be_harvard.slaspec │ │ │ ├── toy64_be_harvard_rev.slaspec │ │ │ ├── toy64_le.slaspec │ │ │ ├── toyInstructions.sinc │ │ │ ├── toyPosStack.cspec │ │ │ ├── toy_be.slaspec │ │ │ ├── toy_be_posStack.slaspec │ │ │ ├── toy_builder.sinc │ │ │ ├── toy_builder_be.slaspec │ │ │ ├── toy_builder_be_align2.slaspec │ │ │ ├── toy_builder_le.slaspec │ │ │ ├── toy_builder_le_align2.slaspec │ │ │ ├── toy_harvard.pspec │ │ │ ├── toy_le.slaspec │ │ │ ├── toy_wsz_be.slaspec │ │ │ └── toy_wsz_le.slaspec │ │ ├── V850/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── Helpers/ │ │ │ │ │ ├── Conditions.sinc │ │ │ │ │ ├── Extras.sinc │ │ │ │ │ ├── Macros.sinc │ │ │ │ │ ├── Register.sinc │ │ │ │ │ ├── Tokens.sinc │ │ │ │ │ └── Variables.sinc │ │ │ │ ├── Instructions/ │ │ │ │ │ ├── Arithmetic.sinc │ │ │ │ │ ├── Float.sinc │ │ │ │ │ ├── Load_Store.sinc │ │ │ │ │ ├── Logic.sinc │ │ │ │ │ └── Special.sinc │ │ │ │ ├── V850.cspec │ │ │ │ ├── V850.ldefs │ │ │ │ ├── V850.opinion │ │ │ │ ├── V850.pspec │ │ │ │ └── V850.slaspec │ │ │ ├── manuals/ │ │ │ │ └── v850.idx │ │ │ └── patterns/ │ │ │ ├── V850_patterns.xml │ │ │ └── patternconstraints.xml │ │ ├── Xtensa/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── cust.sinc │ │ │ │ ├── flix.sinc │ │ │ │ ├── xtensa.cspec │ │ │ │ ├── xtensa.dwarf │ │ │ │ ├── xtensa.ldefs │ │ │ │ ├── xtensa.opinion │ │ │ │ ├── xtensa.pspec │ │ │ │ ├── xtensaArch.sinc │ │ │ │ ├── xtensaInstructions.sinc │ │ │ │ ├── xtensaMain.sinc │ │ │ │ ├── xtensa_be.slaspec │ │ │ │ ├── xtensa_depbits.sinc │ │ │ │ └── xtensa_le.slaspec │ │ │ ├── manuals/ │ │ │ │ └── xtensa.idx │ │ │ └── patterns/ │ │ │ ├── patternconstraints.xml │ │ │ └── xtensa_patterns.xml │ │ ├── Z80/ │ │ │ ├── data/ │ │ │ │ ├── languages/ │ │ │ │ │ ├── z180.pspec │ │ │ │ │ ├── z180.slaspec │ │ │ │ │ ├── z182.pspec │ │ │ │ │ ├── z80.cspec │ │ │ │ │ ├── z80.ldefs │ │ │ │ │ ├── z80.pspec │ │ │ │ │ ├── z80.slaspec │ │ │ │ │ └── z8401x.pspec │ │ │ │ └── manuals/ │ │ │ │ ├── Z180.idx │ │ │ │ └── Z80.idx │ │ │ └── temp/ │ │ │ └── z8401x.pspec │ │ ├── eBPF/ │ │ │ └── data/ │ │ │ └── languages/ │ │ │ ├── eBPF.cspec │ │ │ ├── eBPF.dwarf │ │ │ ├── eBPF.ldefs │ │ │ ├── eBPF.opinion │ │ │ ├── eBPF.pspec │ │ │ ├── eBPF.sinc │ │ │ ├── eBPF_be.slaspec │ │ │ └── eBPF_le.slaspec │ │ ├── tricore/ │ │ │ └── data/ │ │ │ ├── languages/ │ │ │ │ ├── tc172x.pspec │ │ │ │ ├── tc176x.pspec │ │ │ │ ├── tc29x.pspec │ │ │ │ ├── tricore.cspec │ │ │ │ ├── tricore.dwarf │ │ │ │ ├── tricore.ldefs │ │ │ │ ├── tricore.opinion │ │ │ │ ├── tricore.pcp.sinc │ │ │ │ ├── tricore.pspec │ │ │ │ ├── tricore.sinc │ │ │ │ └── tricore.slaspec │ │ │ ├── manuals/ │ │ │ │ ├── tricore.idx │ │ │ │ └── tricore2.idx │ │ │ └── patterns/ │ │ │ ├── patternconstraints.xml │ │ │ └── tricore_patterns.xml │ │ └── x86/ │ │ └── data/ │ │ ├── extensions/ │ │ │ └── rust/ │ │ │ ├── unix32/ │ │ │ │ ├── cc.xml │ │ │ │ ├── probe_fixup.xml │ │ │ │ └── try_fixup.xml │ │ │ ├── unix64/ │ │ │ │ ├── cc.xml │ │ │ │ ├── probe_fixup.xml │ │ │ │ └── try_fixup.xml │ │ │ ├── windows32/ │ │ │ │ ├── probe_fixup.xml │ │ │ │ └── try_fixup.xml │ │ │ └── windows64/ │ │ │ ├── probe_fixup.xml │ │ │ └── try_fixup.xml │ │ ├── languages/ │ │ │ ├── adx.sinc │ │ │ ├── avx.sinc │ │ │ ├── avx2.sinc │ │ │ ├── avx2_manual.sinc │ │ │ ├── avx512.sinc │ │ │ ├── avx512_manual.sinc │ │ │ ├── avx_manual.sinc │ │ │ ├── bmi1.sinc │ │ │ ├── bmi2.sinc │ │ │ ├── cet.sinc │ │ │ ├── clwb.sinc │ │ │ ├── fma.sinc │ │ │ ├── ia.sinc │ │ │ ├── lockable.sinc │ │ │ ├── lzcnt.sinc │ │ │ ├── macros.sinc │ │ │ ├── mpx.sinc │ │ │ ├── old/ │ │ │ │ ├── x86RealV1.lang │ │ │ │ ├── x86RealV1.trans │ │ │ │ ├── x86RealV2.lang │ │ │ │ ├── x86RealV2.trans │ │ │ │ ├── x86RealV3.lang │ │ │ │ ├── x86RealV3.trans │ │ │ │ ├── x86V1.lang │ │ │ │ ├── x86V1.trans │ │ │ │ ├── x86V2.lang │ │ │ │ ├── x86V2.trans │ │ │ │ ├── x86V3.lang │ │ │ │ ├── x86V3.trans │ │ │ │ ├── x86_64bit_compat32_v2.lang │ │ │ │ ├── x86_64bit_compat32_v2.trans │ │ │ │ ├── x86_64bit_compat32_v3.lang │ │ │ │ ├── x86_64bit_compat32_v3.trans │ │ │ │ ├── x86_64bit_v1.lang │ │ │ │ ├── x86_64bit_v1.trans │ │ │ │ ├── x86_64bit_v2.lang │ │ │ │ ├── x86_64bit_v2.trans │ │ │ │ ├── x86_64bit_v3.lang │ │ │ │ ├── x86_64bit_v3.trans │ │ │ │ ├── x86_ProtV2.lang │ │ │ │ ├── x86_ProtV2.trans │ │ │ │ ├── x86_ProtV3.lang │ │ │ │ ├── x86_ProtV3.trans │ │ │ │ ├── x86smmV1.lang │ │ │ │ ├── x86smmV1.trans │ │ │ │ ├── x86smmV2.lang │ │ │ │ ├── x86smmV2.trans │ │ │ │ ├── x86smmV3.lang │ │ │ │ └── x86smmV3.trans │ │ │ ├── pclmulqdq.sinc │ │ │ ├── rdrand.sinc │ │ │ ├── sgx.sinc │ │ │ ├── sha.sinc │ │ │ ├── smx.sinc │ │ │ ├── x86-16-real.pspec │ │ │ ├── x86-16.cspec │ │ │ ├── x86-16.gdis │ │ │ ├── x86-16.pspec │ │ │ ├── x86-32-golang.cspec │ │ │ ├── x86-32-golang.register.info │ │ │ ├── x86-64-compat32.pspec │ │ │ ├── x86-64-gcc.cspec │ │ │ ├── x86-64-golang.cspec │ │ │ ├── x86-64-golang.register.info │ │ │ ├── x86-64-swift.cspec │ │ │ ├── x86-64-win.cspec │ │ │ ├── x86-64.dwarf │ │ │ ├── x86-64.pspec │ │ │ ├── x86-64.slaspec │ │ │ ├── x86.dwarf │ │ │ ├── x86.ldefs │ │ │ ├── x86.opinion │ │ │ ├── x86.pspec │ │ │ ├── x86.slaspec │ │ │ ├── x86borland.cspec │ │ │ ├── x86delphi.cspec │ │ │ ├── x86gcc.cspec │ │ │ └── x86win.cspec │ │ ├── manuals/ │ │ │ └── x86.idx │ │ └── patterns/ │ │ ├── patternconstraints.xml │ │ ├── prepatternconstraints.xml │ │ ├── x86-16_default_patterns.xml │ │ ├── x86-64gcc_patterns.xml │ │ ├── x86-64win_patterns.xml │ │ ├── x86delphi_patterns.xml │ │ ├── x86gcc_patterns.xml │ │ ├── x86gcc_prepatterns.xml │ │ ├── x86win_patterns.xml │ │ └── x86win_prepatterns.xml │ ├── py.typed │ ├── pypcode_native.cpp │ ├── pypcode_native.pyi │ ├── sleigh/ │ │ ├── Makefile │ │ ├── address.cc │ │ ├── address.hh │ │ ├── compression.cc │ │ ├── compression.hh │ │ ├── context.cc │ │ ├── context.hh │ │ ├── emulate.cc │ │ ├── emulate.hh │ │ ├── error.hh │ │ ├── filemanage.cc │ │ ├── filemanage.hh │ │ ├── float.cc │ │ ├── float.hh │ │ ├── globalcontext.cc │ │ ├── globalcontext.hh │ │ ├── loadimage.cc │ │ ├── loadimage.hh │ │ ├── loadimage_bfd.cc │ │ ├── loadimage_bfd.hh │ │ ├── marshal.cc │ │ ├── marshal.hh │ │ ├── memstate.cc │ │ ├── memstate.hh │ │ ├── opbehavior.cc │ │ ├── opbehavior.hh │ │ ├── opcodes.cc │ │ ├── opcodes.hh │ │ ├── partmap.hh │ │ ├── pcodecompile.cc │ │ ├── pcodecompile.hh │ │ ├── pcodeparse.cc │ │ ├── pcodeparse.hh │ │ ├── pcodeparse.y │ │ ├── pcoderaw.cc │ │ ├── pcoderaw.hh │ │ ├── semantics.cc │ │ ├── semantics.hh │ │ ├── slaformat.cc │ │ ├── slaformat.hh │ │ ├── sleigh.cc │ │ ├── sleigh.hh │ │ ├── sleighbase.cc │ │ ├── sleighbase.hh │ │ ├── slgh_compile.cc │ │ ├── slgh_compile.hh │ │ ├── slghparse.cc │ │ ├── slghparse.hh │ │ ├── slghparse.y │ │ ├── slghpatexpress.cc │ │ ├── slghpatexpress.hh │ │ ├── slghpattern.cc │ │ ├── slghpattern.hh │ │ ├── slghscan.cc │ │ ├── slghscan.l │ │ ├── slghsymbol.cc │ │ ├── slghsymbol.hh │ │ ├── space.cc │ │ ├── space.hh │ │ ├── translate.cc │ │ ├── translate.hh │ │ ├── types.h │ │ ├── xml.cc │ │ ├── xml.hh │ │ └── xml.y │ └── zlib/ │ ├── README.txt │ ├── adler32.c │ ├── deflate.c │ ├── deflate.h │ ├── gzguts.h │ ├── inffast.c │ ├── inffast.h │ ├── inffixed.h │ ├── inflate.c │ ├── inflate.h │ ├── inftrees.c │ ├── inftrees.h │ ├── trees.c │ ├── trees.h │ ├── zconf.h │ ├── zlib.h │ ├── zutil.c │ └── zutil.h ├── pyproject.toml ├── scripts/ │ ├── benchmark.py │ └── sleigh_download.sh ├── setup.py └── tests/ ├── test_cli.py └── test_pypcode.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .clang-format ================================================ # https://clang.llvm.org/docs/ClangFormat.html # https://clang.llvm.org/docs/ClangFormatStyleOptions.html --- Language: Cpp AlignAfterOpenBracket: Align AlignConsecutiveAssignments: false AlignConsecutiveDeclarations: false AlignEscapedNewlinesLeft: true AlignOperands: true AlignTrailingComments: false # churn AllowShortBlocksOnASingleLine: false AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: None AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: false BinPackArguments: false BinPackParameters: false AllowAllParametersOfDeclarationOnNextLine: false AllowAllArgumentsOnNextLine: true BraceWrapping: AfterControlStatement: false AfterEnum: false AfterFunction: true AfterStruct: false AfterUnion: false BeforeElse: false IndentBraces: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Custom BreakBeforeTernaryOperators: false BreakStringLiterals: true ColumnLimit: 120 ContinuationIndentWidth: 4 Cpp11BracedListStyle: false DerivePointerAlignment: false DisableFormat: false IncludeIsMainRegex: '$' IndentCaseLabels: false IndentWidth: 4 AccessModifierOffset: -4 IndentWrappedFunctionNames: false KeepEmptyLinesAtTheStartOfBlocks: false MacroBlockBegin: '.*_BEGIN$' # only PREC_BEGIN ? MacroBlockEnd: '.*_END$' MaxEmptyLinesToKeep: 2 PointerAlignment: Right ReflowComments: true SortIncludes: true SpaceAfterCStyleCast: false SpaceBeforeAssignmentOperators: true SpaceBeforeParens: ControlStatements SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 1 SpacesInContainerLiterals: true SpacesInParentheses: false SpacesInSquareBrackets: false Standard: Auto UseTab: Never ... ================================================ FILE: .github/ISSUE_TEMPLATE/bug-report.yml ================================================ name: Report a bug description: Report a bug in pypcode labels: [bug,needs-triage] body: - type: markdown attributes: value: | Thank you for taking the time to submit this bug report! Before submitting this bug report, please check the following, which may resolve your issue: * Have you checked that you are running the latest versions of angr and its components? angr is rapidly-evolving! * Have you [searched existing issues](https://github.com/angr/pypcode/issues?q=is%3Aopen+is%3Aissue+label%3Abug) to see if this bug has been reported before? * Have you checked the [documentation](https://docs.angr.io/)? * Have you checked the [FAQ](https://docs.angr.io/introductory-errata/faq)? **Important:** If this bug is a security vulnerability, please submit it privately. See our [security policy](https://github.com/angr/angr/blob/master/SECURITY.md) for more details. Please note: The angr suite is maintained by a small team. While we cannot guarantee any timeliness for fixes and enhancements, we will do our best. For more real-time help with angr, from us and the community, join our [Slack](https://angr.io/invite/). - type: textarea attributes: label: Description description: Brief description of the bug, with any relevant log messages. validations: required: true - type: textarea attributes: label: Steps to reproduce the bug description: | If appropriate, include both a **script to reproduce the bug**, and if possible **attach the binary used**. **Tip:** You can attach files to the issue by first clicking on the textarea to select it, then dragging & dropping the file onto the textarea. - type: textarea attributes: label: Environment description: Many common issues are caused by problems with the local Python environment. Before submitting, double-check that your versions of all modules in the angr suite (angr, cle, pyvex, ...) are up to date and include the output of `python -m angr.misc.bug_report` here. - type: textarea attributes: label: Additional context description: Any additional context about the problem. ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ blank_issues_enabled: false contact_links: - name: Join our Slack community url: https://angr.io/invite/ about: For questions and help with angr, you are invited to join the angr Slack community ================================================ FILE: .github/ISSUE_TEMPLATE/feature-request.yml ================================================ name: Request a feature description: Request a new feature for pypcode labels: [enhancement,needs-triage] body: - type: markdown attributes: value: | Thank you for taking the time to submit this feature request! Before submitting this feature request, please check the following: * Have you checked that you are running the latest versions of angr and its components? angr is rapidly-evolving! * Have you checked the [documentation](https://docs.angr.io/) to see if this feature exists already? * Have you [searched existing issues](https://github.com/angr/pypcode/issues?q=is%3Aissue+label%3Aenhancement+) to see if this feature has been requested before? Please note: The angr suite is maintained by a small team. While we cannot guarantee any timeliness for fixes and enhancements, we will do our best. For more real-time help with angr, from us and the community, join our [Slack](https://angr.io/invite/). - type: textarea attributes: label: Description description: | Brief description of the desired feature. If the feature is intended to solve some problem, please clearly describe the problem, including any relevant binaries, etc. **Tip:** You can attach files to the issue by first clicking on the textarea to select it, then dragging & dropping the file onto the textarea. validations: required: true - type: textarea attributes: label: Alternatives description: Possible alternative solutions or features that you have considered. - type: textarea attributes: label: Additional context description: Any other context or screenshots about the feature request. ================================================ FILE: .github/ISSUE_TEMPLATE/question.yml ================================================ name: Ask a question description: Ask a question about pypcode labels: [question,needs-triage] body: - type: markdown attributes: value: | If you have a question about pypcode, that is not a bug report or a feature request, you can ask it here. For more real-time help with pypcode, from us and the community, join our [Slack](https://angr.io/invite/). Before submitting this question, please check the following, which may answer your question: * Have you checked the [documentation](https://docs.angr.io/)? * Have you checked the [FAQ](https://docs.angr.io/introductory-errata/faq)? * Have you checked our library of [examples](https://github.com/angr/angr-doc/tree/master/examples)? * Have you [searched existing issues](https://github.com/angr/pypcode/issues?q=is%3Aissue+label%3Aquestion) to see if this question has been answered before? * Have you checked that you are running the latest versions of angr and its components. angr is rapidly-evolving! Please note: The angr suite is maintained by a small team. While we cannot guarantee any timeliness for fixes and enhancements, we will do our best. - type: textarea attributes: label: Question description: validations: required: true ================================================ FILE: .github/dependabot.yml ================================================ version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly" commit-message: prefix: "ci" ================================================ FILE: .github/workflows/build.yml ================================================ name: Build on: [push, pull_request] concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true permissions: contents: read jobs: lint: name: Lint runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' - run: | python -m pip install setuptools pylint python -m pip install -e . - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1 with: extra_args: pylint --all-files build_sdist: name: Build source distribution runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' - name: Build sdist run: | python -m pip install --user build python -m build --sdist - uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: source path: dist/*.tar.gz build_wheels: needs: build_sdist name: Build wheel ${{ matrix.py }}-${{ matrix.platform.wheel_tag }} on ${{ matrix.platform.os }} runs-on: ${{ matrix.platform.os }} strategy: matrix: py: [cp312, cp313, cp314] platform: - { arch: x86_64, os: windows-latest, wheel_tag: win_amd64 } - { arch: x86_64, os: macos-15-intel, wheel_tag: macosx_x86_64 } - { arch: arm64, os: macos-latest, wheel_tag: macosx_arm64 } - { arch: x86_64, os: ubuntu-latest, wheel_tag: manylinux_x86_64 } - { arch: aarch64, os: ubuntu-24.04-arm, wheel_tag: manylinux_aarch64 } steps: - name: Download source distribution uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 with: name: source - name: Unpack source distribution shell: bash run: tar --strip-components 1 -xvf *.tar.gz - name: Build wheel uses: pypa/cibuildwheel@8d2b08b68458a16aeb24b64e68a09ab1c8e82084 # v3.4.1 with: output-dir: wheelhouse env: CIBW_ARCHS_MACOS: ${{ matrix.platform.arch }} CIBW_BUILD: ${{ matrix.py }}-${{ matrix.platform.wheel_tag }} CIBW_TEST_COMMAND: python -m unittest discover -v -s {package}/tests CIBW_BUILD_VERBOSITY: 1 MACOSX_DEPLOYMENT_TARGET: ${{ matrix.platform.arch == 'arm64' && '11' || '10.14' }} - uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: ${{ matrix.py }}-${{ matrix.platform.wheel_tag }} path: ./wheelhouse/*.whl build_docs: name: Build docs runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.12' - run: | pip install -e .[docs] cd docs && make html coverage test_coverage: name: Test with coverage runs-on: ubuntu-latest steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6.2.0 with: python-version: '3.13' - name: Run tests run: | pip install setuptools nanobind cmake COVERAGE=1 pip install --no-build-isolation -e .[testing] pytest -vv \ --junitxml=junit.xml -o junit_family=legacy \ --cov-report=xml \ || [[ $? -lt 2 ]] # Accept success and test failures, fail on infrastructure problems (exit codes >1) gcovr -r . \ --print-summary \ --xml-pretty \ -o coverage-native.xml \ --gcov-filter 'pypcode_native.cpp' [[ -e ./junit.xml && -e coverage.xml && -e ./coverage-native.xml ]] - name: Upload test results uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 with: name: results include-hidden-files: true if-no-files-found: error path: | ./junit.xml ./coverage.xml ./coverage-native.xml upload_coverage: name: Upload test results to Codecov needs: [test_coverage] runs-on: ubuntu-latest permissions: id-token: write steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 - uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 with: name: results - name: Upload test coverage to Codecov uses: codecov/codecov-action@v6 with: use_oidc: true fail_ci_if_error: true verbose: true files: ./coverage.xml ./coverage-native.xml - name: Upload test results to Codecov uses: codecov/codecov-action@v6 with: use_oidc: true fail_ci_if_error: true verbose: true files: ./junit.xml report_type: test_results upload_pypi: name: Upload wheels to PyPI needs: [lint, build_docs, build_sdist, build_wheels, upload_coverage] environment: name: pypi url: https://pypi.org/p/pypcode permissions: id-token: write runs-on: ubuntu-latest # Upload to PyPI on every tag starting with 'v' if: github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags/v') steps: - uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8.0.1 with: path: artifacts - run: | mkdir dist find artifacts -type f \( -name '*.whl' -o -name '*.tar.gz' \) -exec mv {} dist \; - uses: pypa/gh-action-pypi-publish@cef221092ed1bacb1cc03d23a2d87d1d172e277b # v1.14.0 ================================================ FILE: .gitignore ================================================ *.egg-info/ __pycache__/ build/ dist/ pypcode/bin/ docs/_build/ *.so *.sla *.manifest *.gradle *.java .coverage coverage.xml coverage-native.xml ================================================ FILE: .pre-commit-config.yaml ================================================ ci: skip: [pylint] exclude: ^pypcode/sleigh|^pypcode/processors repos: # # Fail fast # - repo: https://github.com/abravalheri/validate-pyproject rev: v0.25 hooks: - id: validate-pyproject fail_fast: true - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: # General - id: check-merge-conflict fail_fast: true - id: check-case-conflict fail_fast: true - id: destroyed-symlinks fail_fast: true - id: check-symlinks fail_fast: true - id: check-added-large-files fail_fast: true # Syntax - id: check-toml fail_fast: true - id: check-json fail_fast: true - id: check-yaml fail_fast: true - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: - id: check-ast fail_fast: true # # Modifiers # - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: - id: mixed-line-ending - id: trailing-whitespace - repo: https://github.com/dannysepler/rm_unneeded_f_str rev: v0.2.0 hooks: - id: rm-unneeded-f-str - repo: https://github.com/asottile/pyupgrade rev: v3.21.2 hooks: - id: pyupgrade args: [--py312-plus] # Last modifier: Coding Standard - repo: https://github.com/psf/black-pre-commit-mirror rev: 26.3.1 hooks: - id: black - repo: https://github.com/pre-commit/mirrors-clang-format rev: v22.1.4 hooks: - id: clang-format files: pypcode/pypcode_native.cpp # # Static Checks # - repo: https://github.com/pre-commit/pygrep-hooks rev: v1.10.0 hooks: # Python - id: python-use-type-annotations - id: python-no-log-warn # Documentation - id: rst-backticks - id: rst-directive-colons - id: rst-inline-touching-normal - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: - id: debug-statements - id: check-builtin-literals - id: check-docstring-first - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.15.12 hooks: - id: ruff - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.20.2 hooks: - id: mypy - repo: local hooks: - id: pylint name: pylint entry: pylint language: system types: [python] args: [ "-rn", # Only display messages "-sn", # Don't display the score ] ================================================ FILE: .pylintrc ================================================ [MASTER] # Specify a configuration file. #rcfile= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Add files or directories to the blacklist. They should be base names, not # paths. ignore=CVS # Pickle collected data for later comparisons. persistent=yes # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins=pylint.extensions.no_self_use,pylint.extensions.bad_builtin [MESSAGES CONTROL] # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option # multiple time. #enable= # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). disable= abstract-method, fixme, invalid-name, len-as-condition, locally-disabled, missing-function-docstring, missing-module-docstring, no-else-return, protected-access, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-nested-blocks, too-many-public-methods, too-many-return-statements, too-many-statements, unidiomatic-typecheck, consider-using-f-string, attribute-defined-outside-init [REPORTS] # Set the output format. Available formats are text, parseable, colorized, msvs # (visual studio) and html output-format=text # Tells whether to display a full report or only the messages reports=yes # Python expression which should return a note less than 10 (10 is the highest # note). You have access to the variables errors warning, statement which # respectively contain the number of errors / warnings messages and the total # number of statements analyzed. This is used by the global evaluation report # (RP0004). evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) [TYPECHECK] # Tells whether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). ignore-mixin-members=yes # List of classes names for which member attributes should not be checked # (useful for classes with attributes dynamically set). ignored-classes=SQLObject,nose.tools,nose.tools.trivial,sympy # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E0201 when accessed. Python regular # expressions are accepted. generated-members=REQUEST,acl_users,aq_parent ignored-modules=sh,PySide2,PySide2.QtTest,PySide2.QtCore,PySide2.QtWidgets,PySide2.QtGui [FORMAT] # Maximum number of characters on a single line. max-line-length=120 # Maximum number of lines in a module max-module-lines=1000 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' [BASIC] # List of builtins function names that should not be used, separated by a comma bad-functions=map,filter,apply,input # Regular expression which should only match correct module names module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Regular expression which should only match correct module level names const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ # Regular expression which should only match correct class names class-rgx=[A-Z_][a-zA-Z0-9]+$ # Regular expression which should only match correct function names function-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct method names method-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct instance attribute names attr-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct argument names argument-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct variable names variable-rgx=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match correct list comprehension / # generator expression variable names inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ # Good variable names which should always be accepted, separated by a comma good-names=i,j,k,ex,Run,_,l # Bad variable names which should always be refused, separated by a comma bad-names= # Regular expression which should only match functions or classes name which do # not require a docstring no-docstring-rgx=__.*__ [VARIABLES] # Tells whether we should check for unused import in __init__ files. init-import=no # A regular expression matching the beginning of the name of dummy variables # (i.e. not used). dummy-variables-rgx=_|dummy # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. notes=FIXME,XXX,TODO [SIMILARITIES] # Minimum lines number of a similarity. min-similarity-lines=4 # Ignore comments when computing similarities. ignore-comments=yes # Ignore docstrings when computing similarities. ignore-docstrings=yes [DESIGN] # Maximum number of arguments for function / method max-args=5 # Argument names that match this expression will be ignored. Default to name # with leading underscore ignored-argument-names=_.* # Maximum number of locals for function / method body max-locals=15 # Maximum number of return / yield for function / method body max-returns=6 # Maximum number of branch for function / method body max-branches=12 # Maximum number of statements in function / method body max-statements=50 # Maximum number of parents for a class (see R0901). max-parents=7 # Maximum number of attributes for a class (see R0902). max-attributes=7 # Minimum number of public methods for a class (see R0903). min-public-methods=2 # Maximum number of public methods for a class (see R0904). max-public-methods=20 [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules=regsub,TERMIOS,Bastion,rexec # Create a graph of every (i.e. internal and external) dependencies in the # given file (report RP0402 must not be disabled) import-graph= # Create a graph of external dependencies in the given file (report RP0402 must # not be disabled) ext-import-graph= # Create a graph of internal dependencies in the given file (report RP0402 must # not be disabled) int-import-graph= [CLASSES] # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__,__new__,setUp # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls [EXCEPTIONS] # Exceptions that will emit a warning when being caught. Defaults to # "Exception" overgeneral-exceptions=builtins.Exception ================================================ FILE: .readthedocs.yml ================================================ # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details version: 2 build: os: ubuntu-22.04 tools: python: "3.12" python: install: - method: pip path: . extra_requirements: - docs sphinx: configuration: docs/conf.py ================================================ FILE: CMakeLists.txt ================================================ cmake_minimum_required(VERSION 3.18...3.22) project(pypcode) find_package(Python COMPONENTS Interpreter Development.Module REQUIRED) set(CMAKE_CXX_STANDARD 17) set(CMAKE_OSX_DEPLOYMENT_TARGET 10.14) if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES) set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build." FORCE) set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo") endif() # Detect the installed nanobind package and import it into CMake execute_process( COMMAND "${Python_EXECUTABLE}" -c "import nanobind; print(nanobind.cmake_dir())" OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE NB_DIR) list(APPEND CMAKE_PREFIX_PATH "${NB_DIR}") find_package(nanobind CONFIG REQUIRED) if(MSVC) add_compile_options(/O2 /D_HAS_STD_BYTE=0 /DLOCAL_ZLIB=1 /DNO_GZIP=1) else() add_compile_options(-O3 -Wall -Wno-sign-compare -D__TERMINAL__ -DLOCAL_ZLIB=1 -DNO_GZIP=1) endif() include_directories(pypcode/thirdparty) set(ZLIB pypcode/zlib/adler32.c pypcode/zlib/deflate.c pypcode/zlib/inffast.c pypcode/zlib/inflate.c pypcode/zlib/inftrees.c pypcode/zlib/trees.c pypcode/zlib/zutil.c ) set(SLEIGH_COMMON pypcode/sleigh/address.cc pypcode/sleigh/compression.cc pypcode/sleigh/context.cc pypcode/sleigh/float.cc pypcode/sleigh/globalcontext.cc pypcode/sleigh/marshal.cc pypcode/sleigh/opcodes.cc pypcode/sleigh/pcodecompile.cc pypcode/sleigh/pcodeparse.cc pypcode/sleigh/pcoderaw.cc pypcode/sleigh/semantics.cc pypcode/sleigh/slaformat.cc pypcode/sleigh/sleigh.cc pypcode/sleigh/sleighbase.cc pypcode/sleigh/slghpatexpress.cc pypcode/sleigh/slghpattern.cc pypcode/sleigh/slghsymbol.cc pypcode/sleigh/space.cc pypcode/sleigh/translate.cc pypcode/sleigh/xml.cc ) add_executable(sleigh pypcode/sleigh/filemanage.cc pypcode/sleigh/slgh_compile.cc pypcode/sleigh/slghparse.cc pypcode/sleigh/slghscan.cc ${SLEIGH_COMMON} ${ZLIB} ) install(TARGETS sleigh DESTINATION bin) nanobind_add_module(pypcode_native pypcode/pypcode_native.cpp ${SLEIGH_COMMON} ${ZLIB} ) if(DEFINED ENV{COVERAGE} AND NOT "$ENV{COVERAGE}" STREQUAL "") if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang") target_compile_options(pypcode_native PRIVATE --coverage -O0 -g) target_link_options(pypcode_native PRIVATE --coverage -O0 -g) endif() endif() install(TARGETS pypcode_native DESTINATION .) ================================================ FILE: LICENSE.txt ================================================ pypcode is a library built around the SLEIGH library. SLEIGH and the processor definition files under the pypcode/processors directory originate from the Ghidra project (https://ghidra-sre.org/). SLEIGH is released under the terms of the Apache 2 license as defined in docs/ghidra/LICENSE. See NOTICE file in docs/ghidra/NOTICE. The remaining code of pypcode, unless stated otherwise, is licensed under the terms of the 2-clause BSD license below. ================================================================================ Copyright (c) 2021, Arizona Board of Regents Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: MANIFEST.in ================================================ graft pypcode prune pypcode/bin include CMakeLists.txt include LICENSE.txt graft tests global-exclude *.so global-exclude *.gitignore ================================================ FILE: README.md ================================================ pypcode ======= [![pypi](https://img.shields.io/pypi/v/pypcode)](https://pypi.org/project/pypcode/) [![codecov](https://codecov.io/gh/angr/pypcode/graph/badge.svg?token=JCV27I1SPZ)](https://codecov.io/gh/angr/pypcode) Machine code disassembly and IR translation library for Python using the excellent SLEIGH library from the [Ghidra](https://ghidra-sre.org/) framework. This library was created primarily for use with [angr](http://angr.io), which provides analyses and symbolic execution of p-code. Documentation covering how to install and use pypcode is [available here](https://api.angr.io/projects/pypcode/en/latest/). ================================================ FILE: docs/Makefile ================================================ # Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = . BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) ================================================ FILE: docs/api.rst ================================================ :mod:`pypcode` ========================================= .. automodule:: pypcode ================================================ FILE: docs/conf.py ================================================ # Configuration file for the Sphinx documentation builder. # # For the full list of built-in configuration values, see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html import datetime # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information project = "pypcode" project_copyright = f"{datetime.datetime.now().year}, The angr Project contributors" author = "The angr Project" # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "IPython.sphinxext.ipython_console_highlighting", "IPython.sphinxext.ipython_directive", "sphinx.ext.coverage", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", "sphinx_autodoc_typehints", "myst_parser", ] templates_path = ["_templates"] exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # -- Options for autodoc ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#configuration autoclass_content = "class" autodoc_default_options = { "members": True, "member-order": "bysource", "inherited-members": True, "show-inheritance": True, "undoc-members": True, } autodoc_inherit_docstrings = True autodoc_typehints = "both" # -- Options for coverage ---------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/extensions/coverage.html coverage_write_headline = False # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output html_theme = "furo" html_static_path = ["_static"] ================================================ FILE: docs/guide.rst ================================================ Guide ===== Installation ------------ This package can be installed on Linux, macOS, and Windows platforms for recent (3.8+) versions of both CPython and PyPy. Wheels are provided for several configurations. The latest release can be installed from PyPI using ``pip``: .. code:: bash pip install pypcode The very latest development version can be installed from GitHub via: .. code:: bash pip install --user https://github.com/angr/pypcode/archive/refs/heads/master.zip Usage Example ------------- Disassemble with :meth:`pypcode.Context.disassemble`: .. ipython:: In [0]: from pypcode import Context ...: ctx = Context("x86:LE:64:default") ...: dx = ctx.disassemble(bytes.fromhex("483578563412c3")) ...: print(dx) Work with :class:`pypcode.Disassembly` and :class:`pypcode.Instruction`: .. ipython:: In [0]: (dx.instructions[0].mnem, dx.instructions[0].body) Translate to P-Code with :meth:`pypcode.Context.translate`: .. ipython:: In [0]: from pypcode import Context ...: ctx = Context("x86:LE:64:default") ...: tx = ctx.translate(bytes.fromhex("483578563412c3")) ...: print(tx) Work with :class:`pypcode.Translation` and :class:`pypcode.PcodeOp`: .. ipython:: In [0]: tx.ops[3].opcode In [0]: tx.ops[3].inputs[0].space.name In [0]: tx.ops[3].inputs[0].getRegisterName() Command Line Usage Example -------------------------- The ``pypcode`` module can be invoked from command line to disassemble and translate supported machine code to P-code from command line. Run ``python -m pypcode --help`` for usage information. :: $ python -m pypcode -b x86:LE:64:default test-x64.bin -------------------------------------------------------------------------------- 00000000/2: XOR EAX,EAX -------------------------------------------------------------------------------- 0: CF = 0x0 1: OF = 0x0 2: EAX = EAX ^ EAX 3: RAX = zext(EAX) 4: SF = EAX s< 0x0 5: ZF = EAX == 0x0 6: unique[0x2580:4] = EAX & 0xff 7: unique[0x2590:1] = popcount(unique[0x2580:4]) 8: unique[0x25a0:1] = unique[0x2590:1] & 0x1 9: PF = unique[0x25a0:1] == 0x0 -------------------------------------------------------------------------------- 00000002/2: CMP ESI,EAX -------------------------------------------------------------------------------- 0: CF = ESI < EAX 1: OF = sborrow(ESI, EAX) 2: unique[0x5180:4] = ESI - EAX 3: SF = unique[0x5180:4] s< 0x0 4: ZF = unique[0x5180:4] == 0x0 5: unique[0x2580:4] = unique[0x5180:4] & 0xff 6: unique[0x2590:1] = popcount(unique[0x2580:4]) 7: unique[0x25a0:1] = unique[0x2590:1] & 0x1 8: PF = unique[0x25a0:1] == 0x0 -------------------------------------------------------------------------------- 00000004/2: JBE 0x17 -------------------------------------------------------------------------------- 0: unique[0x18f0:1] = CF || ZF 1: if (unique[0x18f0:1]) goto ram[0x17:8] SLEIGH & P-Code References -------------------------- Extensive documentation covering SLEIGH and P-Code is available online: * `SLEIGH, P-Code Introduction `_ * `P-Code Reference Manual `_ ================================================ FILE: docs/index.rst ================================================ pypcode documentation ===================== pypcode is a machine code disassembly and IR translation library for Python using the excellent `SLEIGH `__ library from the `Ghidra `__ framework (version 12.0.2). This library was created primarily for use with `angr `__, which provides analyses and symbolic execution of p-code. Table of Contents ----------------- .. toctree:: :maxdepth: 2 Guide Architecture Support API Reference Indices and Tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` ================================================ FILE: docs/languages.rst ================================================ Architecture Support ==================== .. ipython:: In [0]: for arch in pypcode.Arch.enumerate(): ...: for lang in arch.languages: ...: print(f'{lang.id:32} - {lang.description}') ================================================ FILE: docs/make.bat ================================================ @ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=. set BUILDDIR=_build %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.https://www.sphinx-doc.org/ exit /b 1 ) if "%1" == "" goto help %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd ================================================ FILE: pypcode/__init__.py ================================================ """ Pythonic interface to SLEIGH """ from __future__ import annotations import os.path import xml.etree.ElementTree as ET from enum import IntEnum from typing import cast from collections.abc import Generator, Sequence, Mapping from .__version__ import __version__ from .pypcode_native import __version__ as __pypcode_native_version__ # pylint:disable=no-name-in-module assert __version__ == __pypcode_native_version__ from .pypcode_native import ( # pylint:disable=no-name-in-module Address, AddrSpace, BadDataError, DecoderError, Disassembly, Instruction, LowlevelError, OpCode, PcodeOp, TRANSLATE_FLAGS_BB_TERMINATING, Translation, UnimplError, Varnode, ) from .pypcode_native import Context as _Context # pylint:disable=no-name-in-module from .printing import ( OpFormat, OpFormatBinary, OpFormatFunc, OpFormatSpecial, OpFormatUnary, PcodePrettyPrinter, ) __all__ = [ "Address", "AddrSpace", "Arch", "ArchLanguage", "BadDataError", "Context", "DecoderError", "Disassembly", "Instruction", "LowlevelError", "OpCode", "OpFormat", "OpFormatBinary", "OpFormatFunc", "OpFormatSpecial", "OpFormatUnary", "PcodeOp", "PcodePrettyPrinter", "TranslateFlags", "Translation", "UnimplError", "Varnode", ] class TranslateFlags(IntEnum): """ Flags that can be passed to Context::translate """ BB_TERMINATING = TRANSLATE_FLAGS_BB_TERMINATING PKG_SRC_DIR = os.path.abspath(os.path.dirname(__file__)) SPECFILES_DIR = os.path.join(PKG_SRC_DIR, "processors") class ArchLanguage: """ A specific language for an architecture. Provides access to language, pspec, and cspecs. """ __slots__ = ( "archdir", "ldef", "_pspec", "_cspecs", ) archdir: str ldef: ET.Element def __init__(self, archdir: str, ldef: ET.Element): self.archdir = archdir self.ldef = ldef self._pspec: ET.Element | None = None self._cspecs: dict[tuple[str, str], ET.Element] | None = None @property def pspec_path(self) -> str: return os.path.join(self.archdir, self.processorspec) @property def slafile_path(self) -> str: return os.path.join(self.archdir, self.slafile) @property def description(self) -> str: elem = self.ldef.find("description") if elem is not None: return elem.text or "" return "" def __getattr__(self, key): if key in self.ldef.attrib: return self.ldef.attrib[key] raise AttributeError(key) @property def pspec(self) -> ET.Element | None: if self._pspec is None: self._pspec = ET.parse(self.pspec_path).getroot() return self._pspec @property def cspecs(self) -> Mapping[tuple[str, str], ET.Element]: if self._cspecs is None: self._cspecs = {} for e in self.ldef.findall("compiler"): path = os.path.join(self.archdir, e.attrib["spec"]) cspec = ET.parse(path).getroot() self._cspecs[(e.attrib["id"], e.attrib["name"])] = cspec return self._cspecs def init_context_from_pspec(self, ctx: Context) -> None: if self.pspec is None: return cd = self.pspec.find("context_data") if cd is None: return cs = cd.find("context_set") if cs is None: return for e in cs: assert e.tag == "set" ctx.setVariableDefault(e.attrib["name"], int(e.attrib["val"])) @classmethod def from_id(cls, langid: str) -> ArchLanguage | None: """ Return language with given id, or None if the language could not be found. """ for arch in Arch.enumerate(): for lang in arch.languages: if lang.id == langid: return lang return None class Arch: """ Main class representing an architecture describing available languages. """ __slots__ = ( "archpath", "archname", "ldefpath", "ldef", "languages", ) archpath: str archname: str ldefpath: str ldef: ET.ElementTree[ET.Element[str]] languages: Sequence[ArchLanguage] def __init__(self, name: str, ldefpath: str): """ Initialize the Arch. :param name: The name of the architecture :param ldefpath: Path to language definition files (.ldefs) """ self.archpath = os.path.dirname(ldefpath) self.archname = name self.ldefpath = ldefpath self.ldef = ET.parse(ldefpath) self.languages = [ArchLanguage(self.archpath, e) for e in self.ldef.getroot()] @classmethod def enumerate(cls) -> Generator[Arch]: """ Enumerate all available architectures and languages. Language definitions are sourced from definitions shipped with pypcode and can be found in processors//data/languages/.ldefs """ for archname in os.listdir(SPECFILES_DIR): langdir = os.path.join(SPECFILES_DIR, archname, "data", "languages") if not (os.path.exists(langdir) and os.path.isdir(langdir)): continue for langname in os.listdir(langdir): if not langname.endswith(".ldefs"): continue ldefpath = os.path.join(langdir, langname) yield Arch(archname, ldefpath) class Context(_Context): """ Context for translation. """ __slots__ = ( "language", "registers", ) language: ArchLanguage registers: dict[str, Varnode] def __init__(self, language: ArchLanguage | str): """ Initialize a context for translation or disassembly. :param language: The ``ArchLanguage`` to initialize the context with, or a language id ``str`` """ if isinstance(language, ArchLanguage): self.language = language elif isinstance(language, str): _l = ArchLanguage.from_id(cast(str, language)) assert _l is not None self.language = _l else: raise TypeError("Context must be initialized with a language or language id") super().__init__(f"{self.language.slafile_path}") self.language.init_context_from_pspec(self) self.registers = {n: v for v, n in self.getAllRegisters().items()} ================================================ FILE: pypcode/__main__.py ================================================ #!/usr/bin/env python """ Runs when invoking pypcode module from command line. Lists supported architectures, and handles basic disassembly and translation to P-code of supported binaries. Does not parse object files, the binary files must be plain machine code bytes in a file. """ import argparse import logging import sys from difflib import SequenceMatcher from pypcode import Arch, BadDataError, Context, OpCode, TranslateFlags, UnimplError log = logging.getLogger(__name__) logging.basicConfig(level=logging.DEBUG, format="[%(name)s:%(levelname)s] %(message)s") def main(): ap = argparse.ArgumentParser( prog="pypcode", description="Disassemble and translate machine code to P-code using SLEIGH", ) ap.add_argument( "-l", "--list", action="store_true", help="list supported architecture languages", ) ap.add_argument("langid", help="architecture language id") ap.add_argument("binary", help="path to flat binary code") ap.add_argument("base", default="0", nargs="?", help="base address to load at") ap.add_argument("-o", "--offset", default="0", help="offset in binary file to load from") ap.add_argument("-s", "--length", default=None, help="length of code in bytes to load") ap.add_argument( "-i", "--max-instructions", default=0, type=int, help="maximum number of instructions to translate", ) ap.add_argument( "-b", "--basic-block", action="store_true", default=False, help="stop translation at end of basic block", ) # List supported languages langs = {lang.id: lang for arch in Arch.enumerate() for lang in arch.languages} if ("-l" in sys.argv) or ("--list" in sys.argv): for langid in sorted(langs): print("%-35s - %s" % (langid, langs[langid].description)) return args = ap.parse_args() # Get requested language if args.langid not in langs: print(f'Language "{args.langid}" not found.') t = args.langid.upper() suggestions = [langid for langid in langs if SequenceMatcher(None, t, langid.split()[0].upper()).ratio() > 0.25] if len(suggestions): print("\nSuggestions:") for langid in sorted(suggestions): print(" %-35s - %s" % (langid, langs[langid].description)) print("") print("Try `--list` for full list of architectures.") sys.exit(1) # Load target binary code base = int(args.base, 0) with open(args.binary, "rb") as f: f.seek(int(args.offset, 0)) code = f.read(int(args.length, 0)) if args.length else f.read() # Translate ctx = Context(langs[args.langid]) try: flags = TranslateFlags.BB_TERMINATING if args.basic_block else 0 res = ctx.translate(code, base, max_instructions=args.max_instructions, flags=flags) last_imark_idx = 0 for i, op in enumerate(res.ops): if op.opcode == OpCode.IMARK: last_imark_idx = i disas_addr = op.inputs[0].offset disas_offset = disas_addr - base disas_len = sum(vn.size for vn in op.inputs) disas_slice = code[disas_offset : disas_offset + disas_len] print(ctx.disassemble(disas_slice, disas_addr)) else: print(f" {i - last_imark_idx - 1:3d}: {op}") print("") except (BadDataError, UnimplError) as e: print(f"An error occurred during translation: {e}") sys.exit(1) if __name__ == "__main__": main() ================================================ FILE: pypcode/__version__.py ================================================ __version__ = "3.3.4.dev0" ================================================ FILE: pypcode/docs/ghidra/DISCLAIMER.md ================================================ # Disclaimer of Warranty This Work is provided "AS IS." Any express or implied warranties, including but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall the United States Government be liable for any direct, indirect, incidental, special, exemplary or consequential damages (including, but not limited to, procurement of substitute goods or services, loss of use, data or profits, or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this Work, even if advised of the possibility of such damage. The User of this Work agrees to hold harmless and indemnify the United States Government, its agents and employees from every claim or liability (whether in tort or in contract), including attorney's fees, court costs, and expenses, arising in direct consequence of Recipient's use of the item, including, but not limited to, claims or liabilities made for injury to or death of personnel of User or third parties, damage to or destruction of property of User or third parties, and infringement or other violations of intellectual property or technical data rights. # Disclaimer of Endorsement Nothing in this Work is intended to constitute an endorsement, explicit or implied, by the United States Government of any particular manufacturer's product or service. Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or otherwise, in this Work does not constitute an endorsement, recommendation, or favoring by the United States Government and shall not be used for advertising or product endorsement purposes. ================================================ FILE: pypcode/docs/ghidra/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: pypcode/docs/ghidra/NOTICE ================================================ Ghidra This product includes software developed at National Security Agency (https://www.nsa.gov) Portions of this product were created by the U.S. Government and not subject to U.S. copyright protections under 17 U.S.C. The remaining portions are copyright their respective authors and have been contributed under the terms of one or more open source licenses, and made available to you under the terms of those licenses. (See LICENSE) Licensing Intent The intent is that this software and documentation ("Project") should be treated as if it is licensed under the license associated with the Project ("License") in the LICENSE file. However, because we are part of the United States (U.S.) Federal Government, it is not that simple. The portions of this Project written by U.S. Federal Government employees within the scope of their federal employment are ineligible for copyright protection in the U.S.; this is generally understood to mean that these portions of the Project are placed in the public domain. In countries where copyright protection is available (which does not include the U.S.), contributions made by U.S. Federal Government employees are released under the License. Merged contributions from private contributors are released under the License. The Ghidra software is released under the Apache License, Version 2.0 ("Apache 2.0"). In addition, each module may contain numerous 3rd party components (libraries, icons, etc.) that each have their own license which is compatible with Apache 2.0. Each module has a LICENSE.txt file that lists each license used in that module and the 3rd party files that fall under that license. The license files for each license used by Ghidra can be found in the licenses directory at the installation root. Also, in the GPL directory, there are several stand-alone support programs that are released using the GPL 3 license. Ghidra executes these programs as needed and parses the output to get the desired results. There is a licenses directory under the GPL directory that has the GPL license files. Consistent with the inbound=outbound model, contributions to any module must be made available, by the contributor, under the applicable license(s). Please read the Legal section of the CONTRIBUTING.md guide. ================================================ FILE: pypcode/printing.py ================================================ from __future__ import annotations from .pypcode_native import ( # pylint:disable=no-name-in-module Disassembly, Instruction, OpCode, PcodeOp, Translation, Varnode, ) class OpFormat: """ General op pretty-printer. """ @staticmethod def fmt_vn(vn: Varnode) -> str: if vn.space.name == "const": return "%#x" % vn.offset elif vn.space.name == "register": name = vn.getRegisterName() if name: return name return f"{vn.space.name}[{vn.offset:x}:{vn.size:d}]" def fmt(self, op: PcodeOp) -> str: return f'{op.opcode.__name__} {", ".join(self.fmt_vn(i) for i in op.inputs)}' class OpFormatUnary(OpFormat): """ General unary op pretty-printer. """ __slots__ = ("operator",) def __init__(self, operator: str): super().__init__() self.operator = operator def fmt(self, op: PcodeOp) -> str: return f"{self.operator}{self.fmt_vn(op.inputs[0])}" class OpFormatBinary(OpFormat): """ General binary op pretty-printer. """ __slots__ = ("operator",) def __init__(self, operator: str): super().__init__() self.operator = operator def fmt(self, op: PcodeOp) -> str: return f"{self.fmt_vn(op.inputs[0])} {self.operator} {self.fmt_vn(op.inputs[1])}" class OpFormatFunc(OpFormat): """ Function-call style op pretty-printer. """ __slots__ = ("operator",) def __init__(self, operator: str): super().__init__() self.operator = operator def fmt(self, op: PcodeOp) -> str: return f'{self.operator}({", ".join(self.fmt_vn(i) for i in op.inputs)})' class OpFormatSpecial(OpFormat): """ Specialized op pretty-printers. """ def fmt_BRANCH(self, op: PcodeOp) -> str: return f"goto {self.fmt_vn(op.inputs[0])}" def fmt_BRANCHIND(self, op: PcodeOp) -> str: return f"goto [{self.fmt_vn(op.inputs[0])}]" def fmt_CALL(self, op: PcodeOp) -> str: return f"call {self.fmt_vn(op.inputs[0])}" def fmt_CALLIND(self, op: PcodeOp) -> str: return f"call [{self.fmt_vn(op.inputs[0])}]" def fmt_CALLOTHER(self, op: PcodeOp) -> str: return f'{op.inputs[0].getUserDefinedOpName()}({", ".join(self.fmt_vn(i) for i in op.inputs[1:])})' def fmt_CBRANCH(self, op: PcodeOp) -> str: return f"if ({self.fmt_vn(op.inputs[1])}) goto {self.fmt_vn(op.inputs[0])}" def fmt_LOAD(self, op: PcodeOp) -> str: return f"*[{op.inputs[0].getSpaceFromConst().name}]{self.fmt_vn(op.inputs[1])}" def fmt_RETURN(self, op: PcodeOp) -> str: return f"return {self.fmt_vn(op.inputs[0])}" def fmt_STORE(self, op: PcodeOp) -> str: return f"*[{op.inputs[0].getSpaceFromConst().name}]{self.fmt_vn(op.inputs[1])} = {self.fmt_vn(op.inputs[2])}" def fmt(self, op: PcodeOp) -> str: return { OpCode.BRANCH: self.fmt_BRANCH, OpCode.BRANCHIND: self.fmt_BRANCHIND, OpCode.CALL: self.fmt_CALL, OpCode.CALLIND: self.fmt_CALLIND, OpCode.CALLOTHER: self.fmt_CALLOTHER, OpCode.CBRANCH: self.fmt_CBRANCH, OpCode.LOAD: self.fmt_LOAD, OpCode.RETURN: self.fmt_RETURN, OpCode.STORE: self.fmt_STORE, }.get(op.opcode, super().fmt)(op) class PcodePrettyPrinter: """ P-code pretty-printer. """ DEFAULT_OP_FORMAT = OpFormat() OP_FORMATS = { OpCode.BOOL_AND: OpFormatBinary("&&"), OpCode.BOOL_NEGATE: OpFormatUnary("!"), OpCode.BOOL_OR: OpFormatBinary("||"), OpCode.BOOL_XOR: OpFormatBinary("^^"), OpCode.BRANCH: OpFormatSpecial(), OpCode.BRANCHIND: OpFormatSpecial(), OpCode.CALL: OpFormatSpecial(), OpCode.CALLIND: OpFormatSpecial(), OpCode.CALLOTHER: OpFormatSpecial(), OpCode.CBRANCH: OpFormatSpecial(), OpCode.COPY: OpFormatUnary(""), OpCode.CPOOLREF: OpFormatFunc("cpool"), OpCode.FLOAT_ABS: OpFormatFunc("abs"), OpCode.FLOAT_ADD: OpFormatBinary("f+"), OpCode.FLOAT_CEIL: OpFormatFunc("ceil"), OpCode.FLOAT_DIV: OpFormatBinary("f/"), OpCode.FLOAT_EQUAL: OpFormatBinary("f=="), OpCode.FLOAT_FLOAT2FLOAT: OpFormatFunc("float2float"), OpCode.FLOAT_FLOOR: OpFormatFunc("floor"), OpCode.FLOAT_INT2FLOAT: OpFormatFunc("int2float"), OpCode.FLOAT_LESS: OpFormatBinary("f<"), OpCode.FLOAT_LESSEQUAL: OpFormatBinary("f<="), OpCode.FLOAT_MULT: OpFormatBinary("f*"), OpCode.FLOAT_NAN: OpFormatFunc("nan"), OpCode.FLOAT_NEG: OpFormatUnary("f- "), OpCode.FLOAT_NOTEQUAL: OpFormatBinary("f!="), OpCode.FLOAT_ROUND: OpFormatFunc("round"), OpCode.FLOAT_SQRT: OpFormatFunc("sqrt"), OpCode.FLOAT_SUB: OpFormatBinary("f-"), OpCode.FLOAT_TRUNC: OpFormatFunc("trunc"), OpCode.INT_2COMP: OpFormatUnary("-"), OpCode.INT_ADD: OpFormatBinary("+"), OpCode.INT_AND: OpFormatBinary("&"), OpCode.INT_CARRY: OpFormatFunc("carry"), OpCode.INT_DIV: OpFormatBinary("/"), OpCode.INT_EQUAL: OpFormatBinary("=="), OpCode.INT_LEFT: OpFormatBinary("<<"), OpCode.INT_LESS: OpFormatBinary("<"), OpCode.INT_LESSEQUAL: OpFormatBinary("<="), OpCode.INT_MULT: OpFormatBinary("*"), OpCode.INT_NEGATE: OpFormatUnary("~"), OpCode.INT_NOTEQUAL: OpFormatBinary("!="), OpCode.INT_OR: OpFormatBinary("|"), OpCode.INT_REM: OpFormatBinary("%"), OpCode.INT_RIGHT: OpFormatBinary(">>"), OpCode.INT_SBORROW: OpFormatFunc("sborrow"), OpCode.INT_SCARRY: OpFormatFunc("scarry"), OpCode.INT_SDIV: OpFormatBinary("s/"), OpCode.INT_SEXT: OpFormatFunc("sext"), OpCode.INT_SLESS: OpFormatBinary("s<"), OpCode.INT_SLESSEQUAL: OpFormatBinary("s<="), OpCode.INT_SREM: OpFormatBinary("s%"), OpCode.INT_SRIGHT: OpFormatBinary("s>>"), OpCode.INT_SUB: OpFormatBinary("-"), OpCode.INT_XOR: OpFormatBinary("^"), OpCode.INT_ZEXT: OpFormatFunc("zext"), OpCode.LOAD: OpFormatSpecial(), OpCode.NEW: OpFormatFunc("newobject"), OpCode.POPCOUNT: OpFormatFunc("popcount"), OpCode.LZCOUNT: OpFormatFunc("lzcount"), OpCode.RETURN: OpFormatSpecial(), OpCode.STORE: OpFormatSpecial(), } @staticmethod def fmt_op(op: PcodeOp) -> str: fmt = PcodePrettyPrinter.OP_FORMATS.get(op.opcode, PcodePrettyPrinter.DEFAULT_OP_FORMAT) return (f"{fmt.fmt_vn(op.output)} = " if op.output else "") + fmt.fmt(op) @staticmethod def fmt_translation(tx: Translation) -> str: return "\n".join(PcodePrettyPrinter.fmt_op(op) for op in tx.ops) def fmt_instruction(insn: Instruction) -> str: return f"{insn.addr.offset:#x}/{insn.length}: {insn.mnem} {insn.body}" def fmt_disassembly(dx: Disassembly) -> str: return "\n".join(fmt_instruction(insn) for insn in dx.instructions) # Monkey patch print handlers Disassembly.__str__ = fmt_disassembly # type: ignore[assignment,method-assign] Instruction.__str__ = fmt_instruction # type: ignore[assignment,method-assign] PcodeOp.__str__ = PcodePrettyPrinter.fmt_op # type: ignore[assignment,method-assign] Translation.__str__ = PcodePrettyPrinter.fmt_translation # type: ignore[assignment,method-assign] Varnode.__str__ = OpFormat.fmt_vn # type: ignore[assignment,method-assign] ================================================ FILE: pypcode/processors/6502/data/languages/6502.cspec ================================================ ================================================ FILE: pypcode/processors/6502/data/languages/6502.ldefs ================================================ 6502 Microcontroller Family 65C02 Microcontroller Family ================================================ FILE: pypcode/processors/6502/data/languages/6502.pspec ================================================ ================================================ FILE: pypcode/processors/6502/data/languages/6502.slaspec ================================================ # sleigh specification file for MOS 6502 define endian=little; define alignment=1; define space RAM type=ram_space size=2 default; define space register type=register_space size=1; define register offset=0x00 size=1 [ A X Y P ]; define register offset=0x20 size=2 [ PC SP ]; define register offset=0x20 size=1 [ PCL PCH S SH ]; define register offset=0x30 size=1 [ N V B D I Z C ]; # status bits #TOKENS define token opbyte (8) op = (0,7) aaa = (5,7) bbb = (2,4) cc = (0,1) ; define token data8 (8) imm8 = (0,7) rel = (0,7) signed ; define token data (16) imm16 = (0,15) ; macro popSR() { SP = SP + 1; local ccr = *:1 SP; N = ccr[7,1]; V = ccr[6,1]; B = ccr[4,1]; D = ccr[3,1]; I = ccr[2,1]; Z = ccr[1,1]; C = ccr[0,1]; } macro pushSR() { local ccr:1 = 0xff; ccr[7,1] = N; ccr[6,1] = V; ccr[4,1] = B; ccr[3,1] = D; ccr[2,1] = I; ccr[1,1] = Z; ccr[0,1] = C; *:1 (SP) = ccr; SP = SP -1; } macro resultFlags(value) { Z = (value == 0); N = (value s< 0); } macro subtraction_flags1(register, operand, result) { local complement_register = ~register; V = ( ((register & ~operand & ~result) | (complement_register & operand & result)) & 0b10000000 ) != 0; N = (result s< 0); Z = (result == 0); C = ( ((complement_register & operand) | (operand & result) | (result & complement_register)) & 0b10000000 ) != 0; } ################################################################ # Pseudo Instructions ################################################################ define pcodeop readIRQ; ################################################################ REL: reloc is rel [ reloc = inst_next + rel; ] { export *:2 reloc; } # Immediate OP1: "#"imm8 is bbb=2; imm8 { tmp:1 = imm8; export tmp; } # Zero Page OP1: imm8 is bbb=1; imm8 { export *:1 imm8; } # Zero Page Indexed X OP1: imm8,X is bbb=5 & X; imm8 { tmp:2 = zext(imm8 + X); export *:1 tmp; } # Absolute OP1: imm16 is bbb=3; imm16 { export *:1 imm16; } # Absolute Indexed X OP1: imm16,X is bbb=7 & X; imm16 { tmp:2 = imm16 + zext(X); export *:1 tmp; } # Absolute Indexed Y OP1: imm16,Y is bbb=6 & Y; imm16 { tmp:2 = imm16 + zext(Y); export *:1 tmp; } # Indirect X OP1: (imm8,X) is bbb=0 & X; imm8 { addr:2 = zext(imm8 + X); tmp:2 = *:2 addr; export *:1 tmp; } # Indirect Y OP1: (imm8),Y is bbb=4 & Y; imm8 { addr:2 = imm8; tmp:2 = *:2 addr; tmp = tmp + zext(Y); export *:1 tmp; } # Immediate OP2: "#"imm8 is bbb=0; imm8 { tmp:1 = imm8; export tmp; } # Zero Page OP2: imm8 is bbb=1; imm8 { export *:1 imm8; } OP2: A is bbb=2 & A { export A; } # Absolute OP2: imm16 is bbb=3; imm16 { export *:1 imm16; } # Zero Page Indexed X OP2: imm8,X is bbb=5 & X; imm8 { tmp:2 = zext(imm8 + X); export *:1 tmp; } # Absolute Indexed X OP2: imm16,X is bbb=7 & X; imm16 { tmp:2 = imm16 + zext(X); export *:1 tmp; } OP2ST: OP2 is OP2 { export OP2; } OP2ST: imm8,Y is bbb=5 & Y; imm8 { tmp:2 = zext(imm8 + Y); export *:1 tmp; } OP2LD: OP2 is OP2 { export OP2; } OP2LD: imm8,Y is bbb=5 & Y; imm8 { tmp:2 = zext(imm8 + Y); export *:1 tmp; } OP2LD: imm16,Y is bbb=7 & Y; imm16 { tmp:2 = imm16 + zext(Y); export *:1 tmp; } ADDR8: imm8 is imm8 { export *:1 imm8; } ADDR16: imm16 is imm16 { export *:1 imm16; } ADDRI: (imm16) is imm16 { tmp:2 = imm16; export *:2 tmp; } # Instructions :ADC OP1 is (cc=1 & aaa=3) ... & OP1 { local op1 = OP1; local tmpC = C; C = carry(A, op1); A = A + op1 + tmpC; resultFlags(A); V = C; } :AND OP1 is (cc=1 & aaa=1) ... & OP1 { A = A & OP1; resultFlags(A); } :ASL OP2 is (op=0x06 | op=0x0A | op=0x0E | op=0x16 | op=0x1E) ... & OP2 { local tmp = OP2; C = tmp >> 7; tmp = tmp << 1; OP2 = tmp; resultFlags(tmp); } :BCC REL is op=0x90; REL { if (C == 0) goto REL; } :BCS REL is op=0xB0; REL { if (C) goto REL; } :BEQ REL is op=0xF0; REL { if (Z) goto REL; } :BIT OP2 is (op=0x24 | op=0x2C) ... & OP2 { N = (OP2 & 0x80) == 0x80; V = (OP2 & 0x40) == 0x40; local value = A & OP2; Z = (value == 0); } :BMI REL is op=0x30; REL { if (N) goto REL; } :BNE REL is op=0xD0; REL { if (Z == 0) goto REL; } :BPL REL is op=0x10; REL { if (N == 0) goto REL; } :BRK is op=0x00 { *:2 (SP - 1) = inst_next; SP = SP - 2; B = 1; pushSR(); I = 1; local target:2 = 0xFFFE; goto [*:2 target]; } :BVC REL is op=0x50; REL { if (V == 0) goto REL; } :BVS REL is op=0x70; REL { if (V) goto REL; } :CLC is op=0x18 { C = 0; } :CLD is op=0xD8 { D = 0; } :CLI is op=0x58 { I = 0; } :CLV is op=0xB8 { V = 0; } :CMP OP1 is (cc=1 & aaa=6) ... & OP1 { local op1 = OP1; local tmp = A - op1; resultFlags(tmp); C = (A >= op1); } :CPX OP2 is (op=0xE0 | op=0xE4 | op=0xEC) ... & OP2 { local op1 = OP2; local tmp = X - op1; resultFlags(tmp); C = (X >= op1); } :CPY OP2 is (op=0xC0 | op=0xC4 | op=0xCC) ... & OP2 { local op1 = OP2; local tmp = Y - op1; resultFlags(tmp); C = (Y >= op1); } :DEC OP2 is (op=0xC6 | op=0xCE | op=0xD6 | op=0xDE) ... & OP2 { local tmp = OP2 - 1; OP2 = tmp; resultFlags(tmp); } :DEX is op=0xCA { X = X - 1; resultFlags(X); } :DEY is op=0x88 { Y = Y -1; resultFlags(Y); } :EOR OP1 is (cc=1 & aaa=2) ... & OP1 { local op1 = OP1; A = A ^ op1; resultFlags(A); } :INC OP2 is (op=0xE6 | op=0xEE | op=0xF6 | op=0xFE) ... & OP2 { local tmp = OP2 + 1; OP2 = tmp; resultFlags(tmp); } :INY is op=0xC8 { Y = Y + 1; resultFlags(Y); } :INX is op=0xE8 { X = X + 1; resultFlags(X); } :JMP ADDR16 is (op=0x4C); ADDR16 { goto ADDR16; } :JMP ADDRI is (op=0x6c); ADDRI { goto [ADDRI]; } :JSR ADDR16 is op=0x20; ADDR16 { *:2 (SP-1) = inst_next; SP=SP-2; call ADDR16; } :LDA OP1 is (cc=1 & aaa=5) ... & OP1 { A = OP1; resultFlags(A); } :LDY OP2 is (op=0xA0 | op=0xA4 | op=0xAC | op=0xB4 | op=0xBC) ... & OP2 { Y = OP2; resultFlags(Y); } :LDX OP2LD is (op=0xA2 | op=0xA6 | op=0xAE | op=0xB6 | op=0xBE) ... & OP2LD { X = OP2LD; resultFlags(X); } :LSR OP2 is (op=0x46 | op=0x4A | op=0x4E | op=0x56 | op=0x5E) ... & OP2 { local tmp = OP2; C = tmp & 1; tmp = tmp >> 1; OP2 = tmp; Z = (tmp == 0); N = 0; } :NOP is op=0xEA { } :ORA OP1 is (cc=1 & aaa=0) ... & OP1 { A = A | OP1; resultFlags(A); } :PHP is op=0x8 { pushSR(); } :PLP is op=0x28 { popSR(); } :PHA is op=0x48 { *:1 (SP) = A; SP = SP - 1; } :PLA is op=0x68 { SP = SP + 1; A = *:1 (SP); resultFlags(A); } :ROL OP2 is (op=0x26 | op=0x2A | op=0x2E | op=0x36 | op=0x3E) ... & OP2 { local tmpC = C; local op2 = OP2; C = op2 >> 7; local result = op2 << 1; result = result | tmpC; OP2 = result; resultFlags(result); } :ROR OP2 is (op=0x66 | op=0x6A | op=0x6E | op=0x76 | op=0x7E) ... & OP2 { local tmpC = C << 7; local tmp = OP2; C = tmp & 1; tmp = tmp >> 1; tmp = tmp | tmpC; OP2 = tmp; resultFlags(tmp); } :RTI is op=0x40 { popSR(); SP = SP+1; tmp:2 = *:2 SP; SP = SP+1; return [tmp]; } :RTS is op=0x60 { SP = SP+1; tmp:2 = *:2 SP; SP = SP+1; return [tmp]; } :SBC OP1 is (cc=1 & aaa=7) ... & OP1 { local op1 = OP1; local result = A - op1 - !C; subtraction_flags1(A, op1, result); A = result; # resultFlags(tmp); # C = ((A <= op1) * C) | (A < op1); # A = tmp; } :SEC is op=0x38 { C = 1; } :SED is op=0xF8 { D = 1; } :SEI is op=0x78 { I = 1; } :STA OP1 is (cc=1 & aaa=4) ... & OP1 { OP1 = A; } :STX OP2ST is (op=0x86 | op=0x8E | op=0x96) ... & OP2ST { OP2ST = X; } :STY OP2 is (op=0x84 | op=0x8C | op=0x94) ... & OP2 { OP2 = Y; } :TAX is op=0xAA { X = A; resultFlags(X); } :TAY is op=0xA8 { Y = A; resultFlags(Y); } :TSX is op=0xBA { X = S; resultFlags(X); } :TXA is op=0x8A { A = X; resultFlags(A); } :TXS is op=0x9A { S = X; } :TYA is op=0x98 { A = Y; resultFlags(A); } ================================================ FILE: pypcode/processors/6502/data/languages/65c02.slaspec ================================================ @include "6502.slaspec" define token bitopbyte (8) bitop = (0,7) action = (7,7) bitindex = (4,6) dec optype = (0,3) ; define token testopbyte (8) top = (0, 7) taaa = (5, 7) td = (4, 4) tbb = (2, 3) tcc = (0, 1) ; ################################################################ # Zero Page Indirect ZIOP: (imm8) is bbb=4; imm8 { addr:2 = imm8; tmp:2 = *:2 addr; export *:1 tmp; } OPTB: imm8 is tbb=1; imm8 { export *:1 imm8; } OPTB: imm16 is tbb=3; imm16 { export *:1 imm16; } # Absolute Indexed Indirect ADDRIX: (imm16,X) is X; imm16 { addr:2 = imm16 + zext(X); tmp:2 = *:2 addr; export tmp; } # Instructions :ADC ZIOP is (cc=2 & aaa=3) ... & ZIOP { local op1 = ZIOP; local tmpC = C; C = carry(A, op1); A = A + op1 + tmpC; resultFlags(A); V = C; } :AND ZIOP is (cc=2 & aaa=1) ... & ZIOP { A = A & ZIOP; resultFlags(A); } :BBR "#"bitindex, imm8, REL is (action=0 & optype=0xF) & bitindex ; imm8 ; REL { local ptr:2 = imm8; local value:1 = *:1 ptr; local jump = (value & (1 << bitindex)) == 0; if (jump) goto REL; } :BBS "#"bitindex, imm8, REL is (action=1 & optype=0xF) & bitindex ; imm8 ; REL { local ptr:2 = imm8; local value:1 = *:1 ptr; local jump = (value & (1 << bitindex)) != 0; if (jump) goto REL; } :BIT "#"imm8 is op=0x89; imm8 { local value:1 = imm8; N = (value & 0x80) == 0x80; V = (value & 0x40) == 0x40; value = A & value; Z = (value == 0); } :BIT OP2 is (op=0x34 | op=0x3C) ... & OP2 { N = (OP2 & 0x80) == 0x80; V = (OP2 & 0x40) == 0x40; local value = A & OP2; Z = (value == 0); } :BRA REL is op=0x80; REL { goto REL; } :CMP ZIOP is (cc=2 & aaa=6) ... & ZIOP { local op1 = ZIOP; local tmp = A - op1; resultFlags(tmp); C = (A >= op1); } :DEC A is op=0x3A & A { local tmp = A - 1; A = tmp; resultFlags(tmp); } :EOR ZIOP is (cc=2 & aaa=2) ... & ZIOP { local op1 = ZIOP; A = A ^ op1; resultFlags(A); } :INC A is op=0x1A & A { A = A + 1; resultFlags(A); } :JMP ADDRIX is (op=0x7C); ADDRIX { goto [ADDRIX]; } :LDA ZIOP is (cc=2 & aaa=5) ... & ZIOP { A = ZIOP; resultFlags(A); } :ORA ZIOP is (cc=2 & aaa=0) ... & ZIOP { A = A | ZIOP; resultFlags(A); } :PHX is op=0xDA { *:1 (SP) = X; SP = SP - 1; } :PLX is op=0xFA { SP = SP + 1; X = *:1 (SP); resultFlags(X); } :PHY is op=0x5A { *:1 (SP) = Y; SP = SP - 1; } :PLY is op=0x7A { SP = SP + 1; Y = *:1 (SP); resultFlags(Y); } :RMB "#"bitindex, imm8 is (action=0 & optype=7) & bitindex ; imm8 { local ptr:2 = imm8; local value:1 = *:1 ptr; value = value & ~(1 << bitindex); *:1 ptr = value; } :SBC ZIOP is (cc=2 & aaa=7) ... & ZIOP { local op1 = ZIOP; local result = A - op1 - !C; subtraction_flags1(A, op1, result); A = result; } :SMB "#"bitindex, imm8 is (action=1 & optype=7) & bitindex ; imm8 { local ptr:2 = imm8; local value:1 = *:1 ptr; value = value | (1 << bitindex); *:1 ptr = value; } :STA ZIOP is (cc=2 & aaa=4) ... & ZIOP { ZIOP = A; } :STZ imm8 is op=0x64 ; imm8 { local tmp:2 = imm8; *:1 tmp = 0; } :STZ imm8,X is op=0x74 & X ; imm8 { local tmp:2 = zext(imm8 + X); *:1 tmp = 0; } :STZ imm16 is op=0x9C ; imm16 { local tmp:2 = imm16; *:1 tmp = 0; } :STZ imm16,X is op=0x9E & X ; imm16 { local tmp:2 = imm16 + zext(X); *:1 tmp = 0; } :TRB OPTB is (tcc=0 & taaa=0 & td=1) ... & OPTB { local op1 = OPTB; local result = (~A) & op1; OPTB = result; Z = result == 0; } :TSB OPTB is (tcc=0 & taaa=0 & td=0) ... & OPTB { local op1 = OPTB; local result = A | op1; OPTB = result; Z = result == 0; } ================================================ FILE: pypcode/processors/6502/data/manuals/6502.idx ================================================ @mcs6500_family_programming_manual.pdf [MCS 6500 Microcomputer Family Programming Manual, January 1976] ADC, 205 AND, 205 ASL, 206 BCC, 206 BCS, 207 BEQ, 207 BIT, 208 BMI, 208 BNE, 209 BPL, 209 BRK, 210 BVC, 210 BVS, 211 CLC, 211 CLD, 212 CLI, 212 CLV, 213 CMP, 213 CPX, 214 CPY, 214 DEC, 215 DEX, 215 DEY, 216 EOR, 216 INC, 217 INX, 217 INY, 218 JMP, 218 JSR, 219 LDA, 219 LDX, 220 LDY, 220 LSR, 221 NOP, 221 ORA, 222 PHA, 222 PHP, 223 PLA, 223 PLP, 224 ROL, 224 ROR, 225 RTI, 225 RTS, 225 SBC, 226 SEC, 226 SED, 227 SEI, 227 STA, 228 STX, 228 STY, 229 TAX, 229 TAY, 230 TSX, 231 TXA, 231 TXS, 231 TYA, 230 ================================================ FILE: pypcode/processors/6502/data/manuals/65c02.idx ================================================ @wdc_65816_programming_manual.pdf [Programming the 65816 - Including the 6502, 65C02 and 65802, 2007] ADC, 327 AND, 328 ASL, 329 BBR, 457 BBS, 458 BCC, 330 BCS, 331 BEQ, 332 BIT, 333 BMI, 334 BNE, 335 BPL, 336 BRA, 337 BRK, 338 BVC, 341 BVS, 342 CLC, 343 CLD, 344 CLI, 345 CLV, 346 CMP, 347 CPX, 350 CPY, 351 DEC, 352 DEX, 353 DEY, 354 EOR, 355 INC, 357 INX, 358 INY, 359 JMP, 360 JSR, 362 LDA, 363 LDX, 364 LDY, 365 LSR, 366 NOP, 369 ORA, 370 PHA, 375 PHP, 379 PHX, 380 PHY, 381 PLA, 382 PLP, 385 PLX, 386 PLY, 387 RMB, 459 ROL, 389 ROR, 390 RTI, 391 RTS, 393 SBC, 395 SEC, 397 SED, 398 SEI, 399 SMB, 460 STA, 401 STX, 403 STY, 404 STZ, 405 TAX, 406 TAY, 407 TRB, 411 TSB, 412 TSX, 414 TXA, 415 TXS, 416 TYA, 418 ================================================ FILE: pypcode/processors/68000/data/languages/68000.cspec ================================================ ================================================ FILE: pypcode/processors/68000/data/languages/68000.dwarf ================================================ ================================================ FILE: pypcode/processors/68000/data/languages/68000.ldefs ================================================ Motorola 32-bit 68040 Motorola 32-bit 68030 Motorola 32-bit 68020 Motorola 32-bit Coldfire ================================================ FILE: pypcode/processors/68000/data/languages/68000.opinion ================================================ ================================================ FILE: pypcode/processors/68000/data/languages/68000.pspec ================================================ ================================================ FILE: pypcode/processors/68000/data/languages/68000.sinc ================================================ # SLA specification for Motorola 68000 series define endian=big; define alignment=2; define space ram type=ram_space size=4 default; define space register type=register_space size=4; define register offset=0 size=4 [ D0 D1 D2 D3 D4 D5 D6 D7 ]; # Data registers define register offset=0 size=2 [ D0u D0w D1u D1w D2u D2w D3u D3w D4u D4w D5u D5w D6u D6w D7u D7w]; define register offset=0 size=1 [ _ _ _ D0b _ _ _ D1b _ _ _ D2b _ _ _ D3b _ _ _ D4b _ _ _ D5b _ _ _ D6b _ _ _ D7b ]; define register offset=0x20 size=4 [ A0 A1 A2 A3 A4 A5 A6 SP ]; # Address registers define register offset=0x20 size=2 [ A0u A0w A1u A1w A2u A2w A3u A3w A4u A4w A5u A5w A6u A6w A7u A7w]; define register offset=0x20 size=1 [ _ _ _ A0b _ _ _ A1b _ _ _ A2b _ _ _ A3b _ _ _ A4b _ _ _ A5b _ _ _ A6b _ _ _ A7b ]; define register offset=0x40 size=1 [ TF SVF IPL XF NF ZF VF CF ]; # Condition flags define register offset=0x50 size=4 PC; # Program counter register define register offset=0xb0 size=4 [ FPCR FPSR FPIAR ]; define register offset=0xe0 size=8 [ CRP ]; define register offset=0x100 size=4 [ ISP MSP VBR CACR CAAR AC0 AC1 USP TT0 TT1 ]; define register offset=0x140 size=4 [ SFC DFC TC ITT0 ITT1 DTT0 DTT1 MMUSR URP SRP PCR CAC ]; define register offset=0x180 size=4 [ BUSCR MBB RAMBAR0 RAMBAR1 ]; define register offset=0x200 size=2 [ SR ACUSR ]; # NOTE that SR overlaps XF, ZF, VF, CF # NOTE that A7 refers to USP, ISP, or MSP depending on privilege level define register offset=0x300 size=4 [ glbdenom movemptr ]; define register offset=0x400 size=4 [ contextreg ]; @ifdef COLDFIRE # TODO: add a pure MAC variant, for now, just do EMAC define register offset=0x500 size=4 [ MACSR MASK ]; define register offset=0x600 size=4 [ EMACSR ACC0 ACC1 ACC2 ACC3 ACCext01 ACCext23 EMASK ]; @endif # Floating point registers are 80 bits internally, but are 96 bits to/from memory. # Note that 12-byte float needed to be added to FloatFormat.java # Also note that the 96 bit format is not really IEEE, because it gets mapped to 80 bits. define register offset=0x700 size=10 [ FP0 ]; define register offset=0x70a size=10 [ FP1 ]; define register offset=0x714 size=10 [ FP2 ]; define register offset=0x71e size=10 [ FP3 ]; define register offset=0x728 size=10 [ FP4 ]; define register offset=0x732 size=10 [ FP5 ]; define register offset=0x73c size=10 [ FP6 ]; define register offset=0x746 size=10 [ FP7 ]; #TODO: These mode constraints do not constrain the various mode=7 sub-modes identified by regan bits @define MEM_ALTER_ADDR_MODES "(op4=1 | op5=1)" # Memory alterable addressing modes (All modes except mode=1 and mode=0) @define DAT_ALTER_ADDR_MODES "(mode=0 | op4=1 | op5=1)" # Data alterable addressing modes (All modes except mode=1) @define DAT_DIR_CTL_ADDR_MODES "(mode=0 | mode=2 | mode=5 | mode=6 | mode=7)" # Data direct and control addressing modes @define CTL_ADDR_MODES "(mode=2 | mode=5 | mode=6 | mode=7)" # Control addressing modes @define POSTINC_CTL_ADDR_MODES "(mode=2 | mode=3 | mode=5 | mode=6 | mode=7)" # Control addressing modes @define PREDEC_CTL_ADDR_MODES "(mode=2 | mode=4 | mode=5 | mode=6 | mode=7)" # Control addressing modes #TODO: These mode constraints do not constrain the various mode=7 sub-modes identified by regan bits @define MEM_ALTER_ADDR_MODES2 "(op7=1 | op8=1)" # Memory alterable addressing modes (All modes except mode=1 and mode=0) @define DAT_ALTER_ADDR_MODES2 "(mode2=0 | op7=1 | op8=1)" # Data alterable addressing modes (All modes except mode=1) @define DAT_DIR_CTL_ADDR_MODES2 "(mode2=0 | mode2=2 | mode2=5 | mode2=6 | mode=7)" # Data direct and control addressing modes @define CTL_ADDR_MODES2 "(mode2=2 | mode2=5 | mode2=6 | mode2=7)" # Control addressing modes # Floating-point condition code bits within FPSR @define N_FP "FPSR[27,1]" @define Z_FP "FPSR[26,1]" @define I_FP "FPSR[25,1]" @define NAN_FP "FPSR[24,1]" define token instr (16) mode = (3,5) mode2 = (6,8) regdn = (0,2) regdnw = (0,2) regdnb = (0,2) regan = (0,2) reganw = (0,2) reganb = (0,2) rmbit = (3,3) reg9dn = (9,11) reg9dnw = (9,11) reg9dnb = (9,11) reg9an = (9,11) copid = (9,11) op = (12,15) opbig = (8,15) op01 = (0,1) op02 = (0,2) op03 = (0,3) op08 = (0,8) op015 = (0,15) op34 = (3,4) op35 = (3,5) op37 = (3,7) op38 = (3,8) op45 = (4,5) op48 = (4,8) op69 = (6,9) op68 = (6,8) op67 = (6,7) op1315 = (13,15) op4 = (4,4) op5 = (5,5) op7 = (7,7) op8 = (8,8) op10 = (10,10) op11 = (11,11) quick = (9,11) op811 = (8,11) copcc1 = (0,5) d8base = (0,7) signed @ifdef COLDFIRE reg03y = (0,3) reg03ywu = (0,3) reg03ywl = (0,3) op47 = (4,7) op611 = (6,11) op6 = (6,6) op0910 = (9,10) acclsb = (7,7) d911 = (9,11) reg315 = (3, 15) reg9dnu = (9,11) reg9dnl = (9,11) reg9anu = (9,11) reg9anl = (9,11) @endif ; define token extword (16) opx015 = (0,15) opx1315= (13,15) opx515 = (5,15) da = (15,15) regda = (12,15) regxdn = (12,14) regxdnw = (12,14) regxan = (12,14) regxanw = (12,14) wl = (11,11) mregn = (10,12) rwx = (9,9) scale = (9,10) ext_911 = (9,11) bigopx = (8,15) fbit = (8,8) regdu = (6,8) regduw = (6,8) regdub = (6,8) fcmask = (5,7) aregx = (5,7) ext_35 = (3,5) regdc = (0,2) regdcw = (0,2) regdcb = (0,2) d8 = (0,7) signed bs = (7,7) IS = (6,6) bdsize = (4,5) iis = (0,2) odsize = (0,1) copcc2 = (0,5) fc4 = (4,4) fc3 = (3,3) fc03 = (0,3) fc02 = (0,2) ctl = (0,11) @ifdef COLDFIRE sfact = (9,10) accmsb = (4,4) reg03yu = (0,3) reg03yl = (0,3) ereg03y = (0,3) accw = (2,3) reg12x = (12,15) reg12xwu = (12,15) reg12xwl = (12,15) @endif ; define token extword2 (16) regda2 = (12,15) ext2_911 = (9,11) ext2_35 = (3,5) regdu2 = (6,8) regdu2w = (6,8) regdc2 = (0,2) regdc2w = (0,2) ; define token fpword (16) fop = (12,15) fcopid = (9,11) fword = (0,15) fcnt = (0,2) f1515 = (15,15) f1415 = (14,15) f1315 = (13,15) f1015 = (10,15) f0009 = (0,9) f0008 = (0,8) f0808 = (8,8) f0810 = (8,10) f0707 = (7,7) f0609 = (6,9) f0608 = (6,8) f0615 = (6,15) f0308 = (3,8) f0306 = (3,6) fmode = (0,2) frm = (14,14) f1313 = (13,13) fsrc = (10,12) # attached to FP registers f1012 = (10,12) f10 = (10,10) f11 = (11,11) f12 = (12,12) fdcos = (0,2) # attached to FP registers fdsin = (7,9) # attached to FP registers ffmt = (10,12) fdst = (7,9) # attached to FP registers fdr = (13,13) fsize = (6,6) fcode = (0,5) fopmode = (0,6) fkfactor = (0,6) fkfacreg = (4,6) fromoffset = (0,6) flmode_t = (11,11) flmode_m = (12,12) fldynreg = (4,6) freglist = (0,7) frlist0 = (0,0) frlist1 = (1,1) frlist2 = (2,2) frlist3 = (3,3) frlist4 = (4,4) frlist5 = (5,5) frlist6 = (6,6) frlist7 = (7,7) ; define token disp16 (16) d16 = (0,15) signed; define token disp32 (32) d32 = (0,31) signed; define token disp64 (64) signD = (63,63) exponentD = (52,62) mantissaD = (0,51) d64 = (0,63) signed ; define token disp96X_1 (32) signX = (31,31) exponentX = (16,30) ; define token disp96X_2 (64) expintbitX = (63,63) mantissaX = (0,62) ; define token bdisp16 (16) bd16 = (0,15) signed; define token bdisp32 (32) bd32 = (0,31) signed; define token odisp16 (16) od16 = (0,15) signed; define token odisp32 (32) od32 = (0,31) signed; define token fldparm (16) fldpar=(0,15) flddo=(11,11) fldoffdat=(6,10) fldoffreg=(6,8) flddw=(5,5) fldwddat=(0,4) fldwdreg=(0,2) f_reg=(12,14) regdr=(0,2) regdq=(12,14) divsgn=(11,11) divsz=(10,10) mvm0 = (0,0) # Bits in the register list mask for movem mvm1 = (1,1) mvm2 = (2,2) mvm3 = (3,3) mvm4 = (4,4) mvm5 = (5,5) mvm6 = (6,6) mvm7 = (7,7) mvm8 = (8,8) mvm9 = (9,9) mvm10 = (10,10) mvm11 = (11,11) mvm12 = (12,12) mvm13 = (13,13) mvm14 = (14,14) mvm15 = (15,15) ; # Context bits for getting base register bits into the addressing mode define context contextreg eanum = (0,0) # Which effective address is this (the regf's or the regs's) pcmode = (1,1) # is this a PC relative mode regfan = (2,4) # saved base register for first effective address regtfan = (2,4) savmod1 = (5,7) savmod2 = (8,10) # Mode for the second effective address regsdn = (11,13) regsdnw = (11,13) regsdnb = (11,13) regsan = (11,13) regtsan = (11,13) regsanw = (11,13) regsanb = (11,13) extGUARD = (14,14) # guard for saving off modes before starting instructions ; attach variables [ regdn regxdn reg9dn regdr regdq regsdn regdu regdc regdu2 regdc2 ] [ D0 D1 D2 D3 D4 D5 D6 D7 ]; attach variables [ fldoffreg fldwdreg f_reg fcnt fkfacreg fldynreg ] [ D0 D1 D2 D3 D4 D5 D6 D7 ]; attach variables [ regdnw regxdnw reg9dnw regsdnw regduw regdcw regdu2w regdc2w ] [ D0w D1w D2w D3w D4w D5w D6w D7w ]; attach variables [ regdnb reg9dnb regsdnb regdub regdcb ] [ D0b D1b D2b D3b D4b D5b D6b D7b ]; attach variables [ regda regda2 ] [ D0 D1 D2 D3 D4 D5 D6 D7 A0 A1 A2 A3 A4 A5 A6 SP ]; attach variables [ regan regxan reg9an regfan regsan aregx ] [ A0 A1 A2 A3 A4 A5 A6 SP ]; attach variables [ reganw regxanw regsanw ] [ A0w A1w A2w A3w A4w A5w A6w A7w ]; attach variables [ reganb regsanb ] [ A0b A1b A2b A3b A4b A5b A6b A7b ]; attach variables [ fsrc fdst fdcos fdsin ] [ FP0 FP1 FP2 FP3 FP4 FP5 FP6 FP7 ]; @ifdef COLDFIRE attach variables [ reg03y ereg03y reg12x ] [ D0 D1 D2 D3 D4 D5 D6 D7 A0 A1 A2 A3 A4 A5 A6 SP ]; attach variables [reg03ywu reg12xwu reg03yu ] [ D0u D1u D2u D3u D4u D5u D6u D7u A0u A1u A2u A3u A4u A5u A6u A7u ]; attach variables [reg03ywl reg12xwl reg03yl ] [ D0w D1w D2w D3w D4w D5w D6w D7w A0w A1w A2w A3w A4w A5w A6w A7w ]; attach variables [reg9dnu] [ D0u D1u D2u D3u D4u D5u D6u D7u ]; attach variables [reg9dnl] [ D0w D1w D2w D3w D4w D5w D6w D7w ]; attach variables [reg9anu] [ A0u A1u A2u A3u A4u A5u A6u A7u ]; attach variables [reg9anl] [ A0w A1w A2w A3w A4w A5w A6w A7w ]; attach variables [accw] [ACC0 ACC1 ACC2 ACC3]; attach values d911 [ -1 1 2 3 4 5 6 7 ]; @endif attach values scale [ 1 2 4 8 ]; attach values quick [ 8 1 2 3 4 5 6 7 ]; define pcodeop kfactor; define pcodeop ftrap; define pcodeop __m68k_trap; define pcodeop __m68k_trapv; define pcodeop reset; define pcodeop saveFPUStateFrame; define pcodeop restoreFPUStateFrame; define pcodeop invalidateCacheLines; define pcodeop pushInvalidateCaches; define pcodeop fetox; define pcodeop fetoxm1; define pcodeop fgetexp; define pcodeop fgetman; define pcodeop fint; define pcodeop flog10; define pcodeop flog2; define pcodeop flogn; define pcodeop flognp1; define pcodeop fmod; define pcodeop frem; define pcodeop fscale; define pcodeop fsgldiv; define pcodeop ftentox; define pcodeop ftwotox; define pcodeop bcdAdjust; define pcodeop sin; define pcodeop cos; define pcodeop tan; define pcodeop asin; define pcodeop acos; define pcodeop atan; define pcodeop sinh; define pcodeop cosh; define pcodeop tanh; ea_index: regxan*scale is da=1 & wl=1 & regxan & scale { tmp:4 = regxan*scale; export tmp; } ea_index: regxanw*scale is da=1 & wl=0 & regxanw & scale { tmp:4 = sext(regxanw)*scale; export tmp; } ea_index: regxdn*scale is da=0 & wl=1 & regxdn & scale { tmp:4 = regxdn*scale; export tmp; } ea_index: regxdnw*scale is da=0 & wl=0 & regxdnw & scale { tmp:4 = sext(regxdnw)*scale; export tmp; } breg: regfan is eanum=0 & regfan { export regfan; } breg: regsan is eanum=1 & regsan { export regsan; } #breg: PC is pcmode=1 & eanum & PC { tmp:4 = inst_start; } fl_breg: "-" is bs=1 { export 0:4; } fl_breg: "ZPC" is bs=1 & pcmode=1 { export 0:4; } fl_breg: breg is bs=0 & pcmode=0 & breg { export breg; } fl_breg: PC is PC & bs=0 & pcmode=1 { tmp:4 = inst_start + 2; export tmp; } # shift amount parameter cntreg: reg9dn is reg9dn & op5=1 { local tmp = reg9dn & 63; export tmp; } cntreg: "#"^quick is quick & op5=0 { export *[const]:4 quick; } # Extension word forms of the effective address # 8-bit displacement (brief extension) extw: d8,breg,ea_index is breg; ea_index & d8 & fbit=0 { local tmp = breg+d8+ea_index; export tmp; } extw: rela,PC,ea_index is pcmode=1 & PC; ea_index & d8 & fbit=0 [ rela = inst_start+d8+2; ] { tmp:4 = rela; tmp = tmp+ea_index; export tmp; } # Base displacement (indexed) extw: fl_breg,ea_index is ea_index & fl_breg & fbit=1 & bdsize=1 & IS=0 & iis=0 { local tmp = fl_breg+ea_index; export tmp; } extw: bd16,fl_breg,ea_index is ea_index & fl_breg & fbit=1 & bdsize=2 & IS=0 & iis=0; bd16 { local tmp = fl_breg+ea_index+bd16; export tmp; } extw: bd32,fl_breg,ea_index is ea_index & fl_breg & fbit=1 & bdsize=3 & IS=0 & iis=0; bd32 { local tmp = fl_breg+ea_index+bd32; export tmp; } # Memory Indirect Postindexed Mode extw: [fl_breg],ea_index is ea_index & fl_breg & fbit=1 & bdsize=1 & IS=0 & iis=1 { local tmp = *:4 fl_breg + ea_index; export tmp; } extw: [bd16,fl_breg],ea_index is ea_index & fl_breg & fbit=1 & bdsize=2 & IS=0 & iis=1; bd16 { local tmp = *:4 (fl_breg+bd16) + ea_index; export tmp; } extw: [bd32,fl_breg],ea_index is ea_index & fl_breg & fbit=1 & bdsize=3 & IS=0 & iis=1; bd32 { local tmp = *:4 (fl_breg+bd32) + ea_index; export tmp; } extw: [fl_breg],ea_index,od16 is ea_index & fl_breg & fbit=1 & bdsize=1 & IS=0 & iis=2; od16 { local tmp = *:4 fl_breg + ea_index + od16; export tmp; } extw: [bd16,fl_breg],ea_index,od16 is ea_index & fl_breg & fbit=1 & bdsize=2 & IS=0 & iis=2; bd16; od16 { local tmp = *:4 (fl_breg+bd16) + ea_index + od16; export tmp; } extw: [bd32,fl_breg],ea_index,od16 is ea_index & fl_breg & fbit=1 & bdsize=3 & IS=0 & iis=2; bd32; od16 { local tmp = *:4 (fl_breg+bd32) + ea_index + od16; export tmp; } extw: [fl_breg],ea_index,od32 is ea_index & fl_breg & fbit=1 & bdsize=1 & IS=0 & iis=3; od32 { local tmp = *:4 fl_breg + ea_index + od32; export tmp; } extw: [bd16,fl_breg],ea_index,od32 is ea_index & fl_breg & fbit=1 & bdsize=2 & IS=0 & iis=3; bd16; od32 { local tmp = *:4 (fl_breg+bd16) + ea_index + od32; export tmp; } extw: [bd32,fl_breg],ea_index,od32 is ea_index & fl_breg & fbit=1 & bdsize=3 & IS=0 & iis=3; bd32; od32 { local tmp = *:4 (fl_breg+bd32) + ea_index + od32; export tmp; } # Memory Indirect Preindexed Mode extw: [fl_breg,ea_index] is ea_index & fl_breg & fbit=1 & bdsize=1 & IS=0 & iis=5 { local tmp = *:4 (fl_breg+ea_index); export tmp; } extw: [bd16,fl_breg,ea_index] is ea_index & fl_breg & fbit=1 & bdsize=2 & IS=0 & iis=5; bd16 { local tmp = *:4 (fl_breg+ea_index+bd16); export tmp; } extw: [bd32,fl_breg,ea_index] is ea_index & fl_breg & fbit=1 & bdsize=3 & IS=0 & iis=5; bd32 { local tmp = *:4 (fl_breg+ea_index+bd32); export tmp; } extw: [fl_breg,ea_index],od16 is ea_index & fl_breg & fbit=1 & bdsize=1 & IS=0 & iis=6; od16 { local tmp = *:4 (fl_breg+ea_index) + od16; export tmp; } extw: [bd16,fl_breg,ea_index],od16 is ea_index & fl_breg & fbit=1 & bdsize=2 & IS=0 & iis=6; bd16; od16 { local tmp = *:4 (fl_breg+ea_index+bd16) + od16; export tmp; } extw: [bd32,fl_breg,ea_index],od16 is ea_index & fl_breg & fbit=1 & bdsize=3 & IS=0 & iis=6; bd32; od16 { local tmp = *:4 (fl_breg+ea_index+bd32) + od16; export tmp; } extw: [fl_breg,ea_index],od32 is ea_index & fl_breg & fbit=1 & bdsize=1 & IS=0 & iis=7; od32 { local tmp = *:4 (fl_breg+ea_index) + od32; export tmp; } extw: [bd16,fl_breg,ea_index],od32 is ea_index & fl_breg & fbit=1 & bdsize=2 & IS=0 & iis=7; bd16; od32 { local tmp = *:4 (fl_breg+ea_index+bd16) + od32; export tmp; } extw: [bd32,fl_breg,ea_index],od32 is ea_index & fl_breg & fbit=1 & bdsize=3 & IS=0 & iis=7; bd32; od32 { local tmp = *:4 (fl_breg+ea_index+bd32) + od32; export tmp; } # Base displacement extw: fl_breg is fl_breg & fbit=1 & bdsize=1 & IS=1 & iis=0 { export fl_breg; } extw: bd16,fl_breg is fl_breg & fbit=1 & bdsize=2 & IS=1 & iis=0; bd16 { local tmp = fl_breg+bd16; export tmp; } extw: bd32,fl_breg is fl_breg & fbit=1 & bdsize=3 & IS=1 & iis=0; bd32 { local tmp = fl_breg+bd32; export tmp; } # Memory Indirect extw: [fl_breg] is fl_breg & fbit=1 & bdsize=1 & IS=1 & iis=1 { local tmp = *:4 fl_breg; export tmp; } extw: [bd16,fl_breg] is fl_breg & fbit=1 & bdsize=2 & IS=1 & iis=1; bd16 { local tmp = *:4 (fl_breg+bd16); export tmp; } extw: [bd32,fl_breg] is fl_breg & fbit=1 & bdsize=3 & IS=1 & iis=1; bd32 { local tmp = *:4 (fl_breg+bd32); export tmp; } extw: [fl_breg],od16 is fl_breg & fbit=1 & bdsize=1 & IS=1 & iis=2; od16 { local tmp = *:4 fl_breg + od16; export tmp; } extw: [bd16,fl_breg],od16 is fl_breg & fbit=1 & bdsize=2 & IS=1 & iis=2; bd16; od16 { local tmp = *:4 (fl_breg+bd16) + od16; export tmp; } extw: [bd32,fl_breg],od16 is fl_breg & fbit=1 & bdsize=3 & IS=1 & iis=2; bd32; od16 { local tmp = *:4 (fl_breg+bd32) + od16; export tmp; } extw: [fl_breg],od32 is fl_breg & fbit=1 & bdsize=1 & IS=1 & iis=3; od32 { local tmp = *:4 fl_breg + od32; export tmp; } extw: [bd16,fl_breg],od32 is fl_breg & fbit=1 & bdsize=2 & IS=1 & iis=3; bd16; od32 { local tmp = *:4 (fl_breg+bd16) + od32; export tmp; } extw: [bd32,fl_breg],od32 is fl_breg & fbit=1 & bdsize=3 & IS=1 & iis=3; bd32; od32 { local tmp = *:4 (fl_breg+bd32) + od32; export tmp; } # The main effective address table # size=long eal: regdn is mode=0 & regdn { export regdn; } eal: regan is mode=1 & regan { export regan; } eal: (regan) is mode=2 & regan { export *:4 regan; } eal: (regan)+ is mode=3 & regan { local tmp = regan; regan = regan + 4; export *:4 tmp; } eal: -(regan) is mode=4 & regan { regan = regan - 4; export *:4 regan; } eal: (d16,regan) is mode=5 & regan; d16 { local tmp = regan + d16; export *:4 tmp; } eal: (extw) is mode=6 & regan; extw [ regtfan = regan; pcmode = 0; ] { build extw; export *:4 extw; } eal: (d16,PC) is PC & mode=7 & regan=2; d16 { tmp:4 = inst_start + 2 + d16; export *:4 tmp; } eal: (extw) is mode=7 & regan=3; extw [ pcmode=1; ] { build extw; export *:4 extw; } eal: (d16)".w" is mode=7 & regan=0; d16 { export *:4 d16; } eal: (d32)".l" is mode=7 & regan=1; d32 { export *:4 d32; } eal: "#"^d32 is mode=7 & regan=4; d32 { export *[const]:4 d32; } # size=word eaw: regdnw is mode=0 & regdnw { export regdnw; } eaw: reganw is mode=1 & reganw { export reganw; } eaw: (regan) is mode=2 & regan { export *:2 regan; } eaw: (regan)+ is mode=3 & regan { local tmp = regan; regan = regan + 2; export *:2 tmp; } eaw: -(regan) is mode=4 & regan { regan = regan - 2; export *:2 regan; } eaw: (d16,regan) is mode=5 & regan; d16 { local tmp = regan + d16; export *:2 tmp; } eaw: (extw) is mode=6 & regan; extw [ pcmode=0; regtfan=regan; ] { build extw; export *:2 extw; } eaw: (d16,PC) is PC & mode=7 & regan=2; d16 { tmp:4 = inst_start + 2 + d16; export *:2 tmp; } eaw: (extw) is mode=7 & regan=3; extw [ pcmode=1; ] { build extw; export *:2 extw; } eaw: (d16)".w" is mode=7 & regan=0; d16 { export *:2 d16; } eaw: (d32)".l" is mode=7 & regan=1; d32 { export *:2 d32; } eaw: "#"^d16 is mode=7 & regan=4; d16 { export *[const]:2 d16; } # size=byte eab: regdnb is mode=0 & regdnb { export regdnb; } eab: reganb is mode=1 & reganb { export reganb; } eab: (regan) is mode=2 & regan { export *:1 regan; } eab: (regan)+ is mode=3 & regan & regan=7 { local tmp = regan; regan = regan + 2; export *:1 tmp; } eab: (regan)+ is mode=3 & regan { local tmp = regan; regan = regan + 1; export *:1 tmp; } eab: -(regan) is mode=4 & regan & regan=7 { regan = regan - 2; export *:1 regan; } eab: -(regan) is mode=4 & regan { regan = regan - 1; export *:1 regan; } eab: (d16,regan) is mode=5 & regan; d16 { local tmp = regan + d16; export *:1 tmp; } eab: (extw) is mode=6 & regan; extw [ pcmode=0; regtfan=regan; ] { build extw; export *:1 extw; } eab: (d16,PC) is PC & mode=7 & regan=2; d16 { tmp:4 = inst_start + 2 + d16; export *:1 tmp; } eab: (extw) is mode=7 & regan=3; extw [ pcmode=1; ] { build extw; export *:1 extw; } eab: (d16)".w" is mode=7 & regan=0; d16 { export *:1 d16; } eab: (d32)".l" is mode=7 & regan=1; d32 { export *:1 d32; } eab: "#"^d8 is mode=7 & regan=4; d8 { export *[const]:1 d8; } # Second effective address calculation for mov # NB- Extended-precsion are 12 bytes, so we need to increment or decrement the reg by 12 not 4 # # size=extend | packed (96-bit) # The fmovem.x insn needs the movemptr to be set here e2x: (regsan) is savmod2=2 & regsan { movemptr = regsan; export *:12 regsan; } e2x: (regsan)+ is savmod2=3 & regsan { movemptr = regsan; local tmp = regsan; regsan = regsan + 12; export *:12 tmp; } e2x: -(regsan) is savmod2=4 & regsan { movemptr = regsan; regsan = regsan - 12; export *:12 regsan; } e2x: (d16,regsan) is savmod2=5 & regsan; d16 { local tmp = regsan + d16; movemptr = tmp; export *:12 tmp; } e2x: (extw) is savmod2=6; extw [ pcmode=0; eanum=1; ] { build extw; movemptr = extw; export *:12 extw; } e2x: (d16,PC) is PC & savmod2=7 & regsan=2; d16 { tmp:4 = inst_start + 2 + d16; movemptr = tmp; export *:12 tmp; } e2x: (extw) is savmod2=7 & regsan=3; extw [ pcmode=1; ] { build extw; movemptr = extw; export *:12 extw; } e2x: (d16)".w" is savmod2=7 & regsan=0; d16 { movemptr = d16; export *:12 d16; } e2x: (d32)".l" is savmod2=7 & regsan=1; d32 { movemptr = d32; export *:12 d32; } e2x: "#( -1E"^signX^" * 2E"^exp^"*1."^mantissaX) is savmod1=7 & regtfan=4; signX & exponentX; expintbitX & mantissaX [exp=exponentX-0x3FFF;] { movemptr = mantissaX; export *[const]:12 mantissaX; } # bug: doesn't construct a real, only exports the 64-bit mantissa # size=quad (limited mode) # NB- Doubles are 8 bytes, so we need to increment or decrement the reg by 8 not 4 # e2d: (regsan) is savmod2=2 & regsan { export *:8 regsan; } e2d: (regsan)+ is savmod2=3 & regsan { local tmp = regsan; regsan = regsan + 8; export *:8 tmp; } e2d: -(regsan) is savmod2=4 & regsan { regsan = regsan - 8; export *:8 regsan; } e2d: (d16,regsan) is savmod2=5 & regsan; d16 { local tmp = regsan + d16; export *:8 tmp; } e2d: (extw) is savmod2=6; extw [ pcmode=0; eanum=1; ] { build extw; export *:8 extw; } e2d: (d16,PC) is PC & savmod2=7 & regsan=2; d16 { tmp:4 = inst_start + 2 + d16; export *:8 tmp; } e2d: (extw) is savmod2=7 & regsan=3; extw [ pcmode=1; ] { build extw; export *:8 extw; } e2d: (d16)".w" is savmod2=7 & regsan=0; d16 { export *:8 d16; } e2d: (d32)".l" is savmod2=7 & regsan=1; d32 { export *:8 d32; } e2d: "#"^d64 is savmod2=7 & regsan=4; d64 { export *[const]:8 d64; } # size=long e2l: regsdn is savmod2=0 & regsdn { export regsdn; } e2l: regsan is savmod2=1 & regsan { export regsan; } e2l: (regsan) is savmod2=2 & regsan { export *:4 regsan; } e2l: (regsan)+ is savmod2=3 & regsan { local tmp = regsan; regsan = regsan + 4; export *:4 tmp; } e2l: -(regsan) is savmod2=4 & regsan { regsan = regsan - 4; export *:4 regsan; } e2l: (d16,regsan) is savmod2=5 & regsan; d16 { local tmp = regsan + d16; export *:4 tmp; } e2l: (extw) is savmod2=6; extw [ pcmode=0; eanum=1; ] { build extw; export *:4 extw; } e2l: (d16,PC) is PC & savmod2=7 & regsan=2; d16 { tmp:4 = inst_start + 2 + d16; export *:4 tmp; } e2l: (extw) is savmod2=7 & regsan=3; extw [ pcmode=1; ] { build extw; export *:4 extw; } e2l: (d16)".w" is savmod2=7 & regsan=0; d16 { export *:4 d16; } e2l: (d32)".l" is savmod2=7 & regsan=1; d32 { export *:4 d32; } e2l: "#"^d32 is savmod2=7 & regsan=4; d32 { export *[const]:4 d32; } # size=word e2w: regsdnw is savmod2=0 & regsdnw { export regsdnw; } e2w: regsanw is savmod2=1 & regsanw { export regsanw; } e2w: (regsan) is savmod2=2 & regsan { export *:2 regsan; } e2w: (regsan)+ is savmod2=3 & regsan { local tmp = regsan; regsan = regsan + 2; export *:2 tmp; } e2w: -(regsan) is savmod2=4 & regsan { regsan = regsan - 2; export *:2 regsan; } e2w: (d16,regsan) is savmod2=5 & regsan; d16 { local tmp = regsan + d16; export *:2 tmp; } e2w: (extw) is savmod2=6; extw [ pcmode=0; eanum=1; ] { build extw; export *:2 extw; } e2w: (d16,PC) is PC & savmod2=7 & regsan=2; d16 { tmp:4 = inst_start + 2 + d16; export *:2 tmp; } e2w: (extw) is savmod2=7 & regsan=3; extw [ pcmode=1; ] { build extw; export *:2 extw; } e2w: (d16)".w" is savmod2=7 & regsan=0; d16 { export *:2 d16; } e2w: (d32)".l" is savmod2=7 & regsan=1; d32 { export *:2 d32; } e2w: "#"^d16 is savmod2=7 & regsan=4; d16 { export *[const]:2 d16; } # size=byte # NB- Manual says that if in predecrement or postincrement mode and the res is the SP, then must inc/dec by 2, not by 1 e2b: regsdnb is savmod2=0 & regsdnb { export regsdnb; } e2b: regsanb is savmod2=1 & regsanb { export regsanb; } e2b: (regsan) is savmod2=2 & regsan { export *:1 regsan; } e2b: (regsan)+ is savmod2=3 & regsan & regsan=7 { local tmp = regsan; regsan = regsan + 2; export *:1 tmp; } e2b: (regsan)+ is savmod2=3 & regsan { local tmp = regsan; regsan = regsan + 1; export *:1 tmp; } e2b: -(regsan) is savmod2=4 & regsan & regsan=7 { regsan = regsan - 2; export *:1 regsan; } e2b: -(regsan) is savmod2=4 & regsan { regsan = regsan - 1; export *:1 regsan; } e2b: (d16,regsan) is savmod2=5 & regsan; d16 { local tmp = regsan + d16; export *:1 tmp; } e2b: (extw) is savmod2=6; extw [ pcmode=0; eanum=1; ] { build extw; export *:1 extw; } e2b: (d16,PC) is PC & savmod2=7 & regsan=2; d16 { tmp:4 = inst_start + 2 + d16; export *:1 tmp; } e2b: (extw) is savmod2=7 & regsan=3; extw [ pcmode=1; ] { build extw; export *:1 extw; } e2b: (d16)".w" is savmod2=7 & regsan=0; d16 { export *:1 d16; } e2b: (d32)".l" is savmod2=7 & regsan=1; d32 { export *:1 d32; } e2b: "#"^d8 is savmod2=7 & regsan=4; d8 { export *[const]:1 d8; } # For instructions like lea and pea that manipulative the effective address # itself rather than the data the address is pointing at eaptr: (regan) is mode=2 & regan { export regan; } eaptr: (d16,regan) is mode=5 & regan; d16 { local tmp = regan+d16; export tmp; } eaptr: (extw) is mode=6 & regan; extw [ pcmode=0; regtfan=regan; ] { export extw; } eaptr: (d16,PC) is mode=7 & regan=2; d16 & PC { tmp:4 = inst_start+2+d16; export tmp; } eaptr: (extw) is mode=7 & regan=3; extw [ pcmode=1; ] { export extw; } eaptr: (d16)".w" is mode=7 & regan=0; d16 { export *[const]:4 d16; } eaptr: (d32)".l" is mode=7 & regan=1; d32 { export *[const]:4 d32; } # Data register or predecrement addressing Ty: -(regan) is rmbit=1 & regan { regan = regan-4; export *:4 regan; } Ty: regdn is rmbit=0 & regdn { export regdn; } Tx: -(reg9an) is rmbit=1 & reg9an { reg9an = reg9an-4; export *:4 reg9an; } Tx: reg9dn is rmbit=0 & reg9dn { export reg9dn; } Tyw: -(regan) is rmbit=1 & regan { regan = regan-2; export *:2 regan; } Tyw: regdnw is rmbit=0 & regdnw { export regdnw; } Txw: -(reg9an) is rmbit=1 & reg9an { reg9an = reg9an-2; export *:2 reg9an; } Txw: reg9dnw is rmbit=0 & reg9dnw { export reg9dnw; } Tyb: -(regan) is rmbit=1 & regan { regan = regan-1; export *:1 regan; } Tyb: regdnb is rmbit=0 & regdnb { export regdnb; } Txb: -(reg9an) is rmbit=1 & reg9an { reg9an = reg9an-1; export *:1 reg9an; } Txb: reg9dnb is rmbit=0 & reg9dnb { export reg9dnb; } # Bit field parameters f_off: fldoffdat is flddo=0 & fldoffdat { export *[const]:4 fldoffdat; } f_off: fldoffreg is flddo=1 & fldoffreg { export fldoffreg; } f_wd: fldwddat is flddw=0 & fldwddat { export *[const]:4 fldwddat; } f_wd: fldwdreg is flddw=1 & fldwdreg { export fldwdreg; } rreg: regxdn is da=0 & regxdn { export regxdn; } rreg: regxan is da=1 & regxan { export regxan; } regPlus: (regan)+ is regan { export regan; } regxPlus: (regxan)+ is regxan { export regxan; } reg9Plus: (reg9an)+ is reg9an { export reg9an; } regParen: (regan) is regan { export regan; } d32l: (d32)".l" is d32 { export *[const]:4 d32; } # Condition codes cc: "t" is op811=0 { export 1:1; } cc: "f" is op811=1 { export 0:1; } cc: "hi" is op811=2 { tmp:1 = !(CF || ZF); export tmp; } cc: "ls" is op811=3 { tmp:1 = CF || ZF; export tmp; } cc: "cc" is op811=4 { tmp:1 = !CF; export tmp; } cc: "cs" is op811=5 { export CF; } cc: "ne" is op811=6 { tmp:1 = !ZF; export tmp; } cc: "eq" is op811=7 { export ZF; } cc: "vc" is op811=8 { tmp:1 = !VF; export tmp; } cc: "vs" is op811=9 { export VF; } cc: "pl" is op811=10 { tmp:1 = !NF; export tmp; } cc: "mi" is op811=11 { export NF; } cc: "ge" is op811=12 { tmp:1 = (VF==NF); export tmp; } cc: "lt" is op811=13 { tmp:1 = (VF!=NF); export tmp; } cc: "gt" is op811=14 { tmp:1 = !ZF && (VF==NF); export tmp; } cc: "le" is op811=15 { tmp:1 = ZF || (VF!=NF); export tmp; } const8: "#"^d8 is d8 { export *[const]:1 d8; } const16: "#"^d16 is d16 { export *[const]:2 d16; } const32: "#"^d32 is d32 { export *[const]:4 d32; } ctlreg: SFC is SFC & ctl=0x000 { export SFC; } ctlreg: DFC is DFC & ctl=0x001 { export DFC; } ctlreg: USP is USP & ctl=0x800 { export USP; } ctlreg: VBR is VBR & ctl=0x801 { export VBR; } ctlreg: CACR is CACR & ctl=0x002 { export CACR; } ctlreg: CAAR is CAAR & ctl=0x802 { export CAAR; } ctlreg: MSP is MSP & ctl=0x803 { export MSP; } ctlreg: ISP is ISP & ctl=0x804 { export ISP; } ctlreg: TC is TC & ctl=0x003 { export TC; } ctlreg: ITT0 is ITT0 & ctl=0x004 { export ITT0; } ctlreg: ITT1 is ITT1 & ctl=0x005 { export ITT1; } ctlreg: DTT0 is DTT0 & ctl=0x006 { export DTT0; } ctlreg: DTT1 is DTT1 & ctl=0x007 { export DTT1; } ctlreg: SRP is SRP & ctl=0x008 { export BUSCR; } ctlreg: MMUSR is MMUSR & ctl=0x805 { export MMUSR; } ctlreg: URP is URP & ctl=0x806 { export URP; } ctlreg: SRP is SRP & ctl=0x807 { export SRP; } ctlreg: PCR is PCR & ctl=0x808 { export PCR; } ctlreg: RAMBAR0 is RAMBAR0 & ctl=0xc04 { export RAMBAR0; } ctlreg: RAMBAR1 is RAMBAR1 & ctl=0xc05 { export RAMBAR1; } # ctlreg: PCR is PCR & ctl=0x808 { export PCR; } ctlreg: CAC is CAC & ctl=0xffe { export CAC; } ctlreg: MBB is MBB & ctl=0xfff { export MBB; } ctlreg: "UNK_CTL_"^ctl is ctl { tmp:4 = 0xffffffff; export tmp; } # Relative jump destinations addr8: reloc is d8base [ reloc=inst_start+2+d8base; ] { export *[ram]:4 reloc; } addr16: reloc is d16 [ reloc=inst_start+2+d16; ] { export *[ram]:4 reloc; } addr32: reloc is d32 [ reloc=inst_start+2+d32; ] { export *[ram]:4 reloc; } # Jump locations for coprocessor instructions #caddr16: reloc is d16 [ reloc=inst_next-2+d16; ] { export *[ram]:4 reloc; } #caddr32: reloc is d32 [ reloc=inst_next-4+d32; ] { export *[ram]:4 reloc; } # Macros for flags etc macro resflags(result) { NF = result s< 0; ZF = result == 0; } macro logflags() { VF=0; CF=0; } macro addflags(op1,op2) { CF = carry(op1,op2); VF = scarry(op1,op2); XF = CF; } macro addxflags(op1, op2) { local opSum = op1 + op2; CF = carry(op1, op2) || carry(opSum, zext(XF)); VF = scarry(op1, op2) ^^ scarry(opSum, zext(XF)); } macro add(op1, op2res) { local var1 = op1; local var2 = op2res; addflags(var1, var2); local result = var1 + var2; op2res = result; resflags(result); } macro and(op1,op2res) { logflags(); local result = op1 & op2res; op2res = result; resflags(result); } macro eor(op1,op2res) { logflags(); local result = op1 ^ op2res; op2res = result; resflags(result); } macro or(op1,op2res) { logflags(); local result = op1 | op2res; op2res = result; resflags(result); } macro subflags(op1,op2) { CF = op1 < op2; VF = sborrow(op1,op2); XF = CF; } macro sub(op1,op2res) { local var1 = op1; local var2 = op2res; subflags(var2, var1); local result = var2 - var1; op2res = result; resflags(result); } # This macro needs to consider the XF flag when finding the CF carry flag value: op1=op1-op2-XF # original was: CF = op1 < op2; macro subxflags(op1,op2) { CF = (op1 < op2) || ( (XF == 1) && (op1 == op2) ); VF = sborrow(op1,op2); XF = CF; } macro negxsubflags(op1) { CF = 0 s< op1; VF = sborrow(0,op1); XF = CF; } macro resflags_fp(result) { $(I_FP) = 0; $(NAN_FP) = 0; $(N_FP) = result f< 0; $(Z_FP) = result == 0; } macro clearflags_fp() { $(N_FP) = 0; $(Z_FP) = 0; $(NAN_FP) = 0; } macro bcdflags(result) { XF = CF; ZF = (result == 0) * ZF + (result != 0); } macro getbit(res,in,bitnum) { res = ((in >> bitnum) & 1) != 0; } macro bitmask(res, width) { res = (1 << width) - 1; } macro bfmask (res, off, width) { res = ((1 << width) - 1) << (32 - off - width); } macro getbitfield(res, off, width) { res = (res << off) >> (32 - width); } macro resbitflags(result, bitnum) { NF = ((result >> bitnum) & 1) != 0; ZF = result == 0; } macro packflags(res) { res = zext((TF<<15)|(SVF<<13)|(IPL<<8)|(XF<<4)|(NF<<3)|(ZF<<2)|(VF<<1)|CF); } macro unpackflags(in) { TF = (in & 0x8000)!=0; SVF = (in & 0x2000)!=0; IPL = in[8,3]; XF = (in & 0x10)!=0; NF = (in & 8)!=0; ZF = (in & 4)!=0; VF = (in & 2)!=0; CF = (in & 1)!=0; } # This macro sets the NF and ZF flags for extended arithmetic insns- addx, negx, and subx macro extendedResultFlags(result) { NF = result s< 0; ZF = (result == 0) && (ZF == 1); } macro arithmeticShiftLeft(count, register, width) { local modcount = count & 63; local lbit:1 = ((register >> (width - modcount) & 1) != 0); local msbBefore:4 = zext(register s< 0); register = register << modcount; resflags(register); local msbAfter:4 = zext(register s< 0); VF = (msbBefore ^ msbAfter) != 0; CF = (modcount != 0) * lbit; XF = ((modcount == 0) * XF) + ((modcount != 0) * CF); } macro arithmeticShiftRight(count, register, width) { local modcount = count & 63; local lbit:1 = ((register >> (modcount-1) & 1) != 0); local msbBefore:4 = zext(register s< 0); register = register s>> modcount; resflags(register); local msbAfter:4 = zext(register s< 0); VF = (msbBefore ^ msbAfter) != 0; CF = (modcount != 0) * lbit; XF = ((modcount == 0) * XF) + ((modcount != 0) * CF); } macro logicalShiftLeft(count, register, width) { local modcount = count & 63; local lbit:1 = ((register >> (width - modcount) & 1) != 0); local msbBefore:4 = zext(register s< 0); register = register << modcount; resflags(register); local msbAfter:4 = zext(register s< 0); VF = (msbBefore ^ msbAfter) != 0; CF = (modcount != 0) * lbit; XF = ((modcount == 0) * XF) + ((modcount != 0) * CF); } macro logicalShiftRight(count, register, width) { local modcount = count & 63; local lbit:1 = ((register >> (modcount-1) & 1) != 0); local msbBefore:4 = zext(register s< 0); register = register >> modcount; resflags(register); local msbAfter:4 = zext(register s< 0); VF = (msbBefore ^ msbAfter) != 0; CF = (modcount != 0) * lbit; XF = ((modcount == 0) * XF) + ((modcount != 0) * CF); } macro rotateLeft(count, register, width) { local modcount = count & 63; register = (register << modcount) | (register >> (width - modcount)); resflags(register); CF = (register & 1) != 0; VF = 0; } macro rotateRight(count, register, width) { local modcount = count & 63; register = (register << (width - modcount)) | (register >> modcount); resflags(register); CF = zext(register s< 0); VF = 0; } macro rotateLeftExtended(count, register, width) { local modcount = count & 63; local xflag = (register & (1 << (width - modcount))) != 0; local result = (register << modcount) | (zext(XF) << (modcount - 1)) | (register >> (width - modcount + 1)); register = (zext(modcount != 0) * result) + (zext(modcount == 0) * register); resflags(register); XF = (zext(modcount != 0) * xflag) + (zext(modcount == 0) * XF); CF = XF; VF = 0; } macro rotateRightExtended(count, register, width) { local modcount = count & 63; local xflag = (register & (1 << (modcount - 1))) != 0; local result = (zext(XF) << (width - modcount)) | (register >> modcount) | (register << (width - modcount + 1)); register = (zext(modcount != 0) * result) + (zext(modcount == 0) * register); resflags(register); XF = (zext(modcount != 0) * xflag) + (zext(modcount == 0) * XF); CF = XF; VF = 0; } :^instruction is extGUARD=0 & mode2 & reg9an & mode & regan & instruction [ extGUARD=1; regtfan=regan; savmod1=mode; regtsan=reg9an; savmod2=mode2; ] {} # Here are the instructions with : extGUARD=1 { :abcd Tyb,Txb is op=12 & op48=16 & Tyb & Txb { CF = carry(Txb,carry(Tyb,XF)); Txb = Txb + Tyb + XF; XF = bcdAdjust(Txb); bcdflags(Txb); } :add.b eab,reg9dnb is (op=13 & reg9dnb & op68=0)... & eab { add(eab, reg9dnb); } :add.w eaw,reg9dnw is (op=13 & reg9dnw & op68=1)... & eaw { add(eaw,reg9dnw); } :add.l eal,reg9dn is (op=13 & reg9dn & op68=2)... & eal { add(eal,reg9dn); } :add.b reg9dnb,eab is (op=13 & reg9dnb & op68=4 & $(MEM_ALTER_ADDR_MODES))... & eab { add( reg9dnb, eab); } :add.w reg9dnw,eaw is (op=13 & reg9dnw & op68=5 & $(MEM_ALTER_ADDR_MODES))... & eaw { add(reg9dnw,eaw); } :add.l reg9dn,eal is (op=13 & reg9dn & op68=6 & $(MEM_ALTER_ADDR_MODES))... & eal { add(reg9dn,eal); } :adda.w eaw,reg9an is (op=13 & reg9an & op68=3)... & eaw { reg9an = sext(eaw) + reg9an; } :adda.l eal,reg9an is (op=13 & reg9an & op68=7)... & eal { reg9an = eal + reg9an; } :addi.b const8,e2b is opbig=6 & op67=0 & savmod1 & regtfan & $(DAT_ALTER_ADDR_MODES); const8; e2b [ savmod2=savmod1; regtsan=regtfan; ] { add(const8, e2b); } :addi.w const16,e2w is opbig=6 & op67=1 & savmod1 & regtfan & $(DAT_ALTER_ADDR_MODES); const16; e2w [ savmod2=savmod1; regtsan=regtfan; ] { add(const16,e2w); } :addi.l const32,e2l is opbig=6 & op67=2 & savmod1 & regtfan & $(DAT_ALTER_ADDR_MODES); const32; e2l [ savmod2=savmod1; regtsan=regtfan; ] { add(const32,e2l); } :addq.b "#"^quick,eab is (op=5 & quick & op68=0)... & eab { add(quick, eab); } :addq.w "#"^quick,eaw is (op=5 & quick & op68=1)... & eaw { add(quick, eaw); } :addq.l "#"^quick,eal is (op=5 & quick & op68=2)... & eal { add(quick, eal); } # special case for address register destination :addq.w "#"^quick,regan is op=5 & quick & op68=1 & mode=1 & regan { regan = regan + quick; } :addq.l "#"^quick,regan is op=5 & quick & op68=2 & mode=1 & regan { regan = regan + quick; } :addx.b Tyb,Txb is op=13 & op8=1 & op67=0 & op45=0 & Tyb & Txb { addxflags(Tyb,Txb); Txb=Tyb+Txb+zext(XF); extendedResultFlags(Txb); } :addx.w Tyw,Txw is op=13 & op8=1 & op67=1 & op45=0 & Tyw & Txw { addxflags(Tyw,Txw); Txw=Tyw+Txw+zext(XF); extendedResultFlags(Txw); } :addx.l Ty,Tx is op=13 & op8=1 & op67=2 & op45=0 & Ty & Tx { addxflags(Ty,Tx); Tx=Ty+Tx+zext(XF); extendedResultFlags(Tx); } :and.b eab,reg9dnb is (op=12 & reg9dnb & op68=0 & $(DAT_ALTER_ADDR_MODES))... & eab { and(eab, reg9dnb); } :and.w eaw,reg9dnw is (op=12 & reg9dnw & op68=1 & $(DAT_ALTER_ADDR_MODES))... & eaw { and(eaw, reg9dnw); } :and.l eal,reg9dn is (op=12 & reg9dn & op68=2 & $(DAT_ALTER_ADDR_MODES))... & eal { and(eal, reg9dn); } :and.b reg9dnb,eab is (op=12 & reg9dnb & op68=4 & $(MEM_ALTER_ADDR_MODES))... & eab { and(reg9dnb, eab); } :and.w reg9dnw,eaw is (op=12 & reg9dnw & op68=5 & $(MEM_ALTER_ADDR_MODES))... & eaw { and(reg9dnw, eaw); } :and.l reg9dn,eal is (op=12 & reg9dn & op68=6 & $(MEM_ALTER_ADDR_MODES))... & eal { and(reg9dn, eal); } :andi.b const8,e2b is opbig=2 & op67=0 & $(DAT_ALTER_ADDR_MODES); const8; e2b [ savmod2=savmod1; regtsan=regtfan; ] { and(const8, e2b); } :andi.w const16,e2w is opbig=2 & op67=1 & $(DAT_ALTER_ADDR_MODES); const16; e2w [ savmod2=savmod1; regtsan=regtfan; ] { and(const16, e2w); } :andi.l const32,e2l is opbig=2 & op67=2 & $(DAT_ALTER_ADDR_MODES); const32; e2l [ savmod2=savmod1; regtsan=regtfan; ] { and(const32, e2l); } :andi const8,"CCR" is d16=0x23c; const8 { packflags(SR); SR = SR & zext(const8); unpackflags(SR); } :andi const16,SR is opbig=0x2 & d8base=0x7c; const16 & SR { packflags(SR); SR = SR & const16; unpackflags(SR); } :asl.b cntreg,regdnb is op=14 & cntreg & op8=1 & op67=0 & op34=0 & regdnb { local cnt = cntreg; local result = regdnb; arithmeticShiftLeft(cnt, result, 8); regdnb = result; } :asl.w cntreg,regdnw is op=14 & cntreg & op8=1 & op67=1 & op34=0 & regdnw { local cnt = cntreg; local result = regdnw; arithmeticShiftLeft(cnt, result, 16); regdnw = result; } :asl.l cntreg,regdn is op=14 & cntreg & op8=1 & op67=2 & op34=0 & regdn { local cnt = cntreg; local result = regdn; arithmeticShiftLeft(cnt, result, 32); regdn = result; } :asl eaw is (opbig=0xe1 & op67=3 & $(MEM_ALTER_ADDR_MODES)) ... & eaw { local value:2 = eaw; local msbBefore = value & 0x8000; getbit(CF, value, 15); value = value << 1; resflags(value); local msbAfter = value & 0x8000; VF = (msbBefore ^ msbAfter) != 0; eaw = value; XF = CF; } :asr.b cntreg,regdnb is op=14 & cntreg & op8=0 & op67=0 & op34=0 & regdnb { local cnt = cntreg; local result = regdnb; arithmeticShiftRight(cnt, result, 8); regdnb = result; } :asr.w cntreg,regdnw is op=14 & cntreg & op8=0 & op67=1 & op34=0 & regdnw { local cnt = cntreg; local result = regdnw; arithmeticShiftRight(cntreg, result, 16); regdnw = result; } :asr.l cntreg,regdn is op=14 & cntreg & op8=0 & op67=2 & op34=0 & regdn { local cnt = cntreg; local result = regdn; arithmeticShiftRight(cntreg, result, 32); regdn = result; } :asr eaw is (opbig=0xe0 & op67=3 & $(MEM_ALTER_ADDR_MODES)) ... & eaw { local value:2 = eaw; local msbBefore = value & 0x8000; getbit(CF, value, 0); value = value s>> 1; resflags(value); local msbAfter = value & 0x8000; VF = (msbBefore ^ msbAfter) != 0; eaw = value; XF = CF; } :b^cc^".b" addr8 is op=6 & cc & addr8 { if (cc) goto addr8; } :b^cc^".w" addr16 is op=6 & cc & d8base=0; addr16 { if (cc) goto addr16; } :b^cc^".l": addr32 is op=6 & cc & d8base=255; addr32 { if (cc) goto addr32; } :bchg.b reg9dn,eab is (op=0 & reg9dn & op68=5 & $(MEM_ALTER_ADDR_MODES))... & eab { local source = eab; local mask:1 = 1 << (reg9dn & 7); ZF = (source & mask) == 0; eab = source ^ mask; } :bchg.b const8,e2b is opbig=8 & op67=1 & $(MEM_ALTER_ADDR_MODES); const8; e2b [ savmod2=savmod1; regtsan=regtfan; ] { local source = e2b; local mask:1 = 1 << (const8 & 7); # target is a byte in memory, so the bit number in the byte is modulo 8 ZF = (source & mask) == 0; e2b = source ^ mask; } :bchg.l reg9dn,regdn is op=0 & reg9dn & op68=5 & mode=0 & regdn { local source = regdn; local mask:4 = 1 << (reg9dn & 31); ZF = (source & mask) == 0; regdn = source ^ mask; } :bchg.l const8,regdn is opbig=8 & op67=1 & mode=0 & regdn; const8 { local source = regdn; local mask:4 = 1 << (const8 & 31); ZF = (source & mask) == 0; regdn = source ^ mask; } :bclr.b reg9dn,eab is (op=0 & reg9dn & op68=6 & $(MEM_ALTER_ADDR_MODES))... & eab { local source = eab; mask:1 = 1 << (reg9dn & 7); ZF = (source & mask) == 0; eab = source & (~mask); } :bclr.b const8,e2b is opbig=8 & op67=2 & savmod1 & regtfan & $(MEM_ALTER_ADDR_MODES); const8; e2b [ savmod2=savmod1; regtsan=regtfan; ] { local source = e2b; mask:1 = 1 << (const8 & 7); ZF = (source & mask) == 0; e2b = source & (~mask); } :bclr.l reg9dn,regdn is op=0 & reg9dn & op68=6 & mode=0 & regdn { local source = regdn; mask:4 = 1 << (reg9dn & 31); ZF = (source & mask) == 0; regdn = source & (~mask); } :bclr.l const8,regdn is opbig=8 & op67=2 & mode=0 & regdn; const8 { local source = regdn; mask:4 = 1 << (const8 & 31); ZF = (source & mask) == 0; regdn = source & (~mask); } bfOffWd: {f_off:f_wd} is f_off & f_wd { } :bfchg e2l^bfOffWd is opbig=0xea & op67=3 & $(DAT_DIR_CTL_ADDR_MODES); bfOffWd & f_off & f_wd; e2l [ savmod2=savmod1; regtsan=regtfan; ] { logflags(); tmp:4 = e2l; getbitfield(tmp, f_off, f_wd); resbitflags(tmp, f_wd-1); mask:4 = 0; bfmask(mask, f_off, f_wd); e2l = (tmp & ~mask) | (~(tmp & mask) & mask); } :bfclr e2l^bfOffWd is opbig=0xec & op67=3 & $(DAT_DIR_CTL_ADDR_MODES); bfOffWd & f_off & f_wd; e2l [ savmod2=savmod1; regtsan=regtfan; ] { logflags(); tmp:4 = e2l; getbitfield(tmp, f_off, f_wd); resbitflags(tmp, f_wd-1); mask:4 = 0; bfmask(mask, f_off, f_wd); e2l = tmp & ~mask; } :bfexts e2l^bfOffWd,f_reg is opbig=0xeb & op67=3 & $(DAT_DIR_CTL_ADDR_MODES); bfOffWd & f_off & f_wd & f_reg; e2l [ savmod2=savmod1; regtsan=regtfan; ] { logflags(); tmp:4 = e2l; tmp = tmp << f_off; tmp = tmp s>> (32 - f_wd); f_reg = tmp; tmp2:4 = e2l; getbitfield(tmp2, f_off, f_wd); resbitflags(tmp2, f_wd-1); } :bfextu e2l^bfOffWd,f_reg is opbig=0xe9 & op67=3 & $(DAT_DIR_CTL_ADDR_MODES); bfOffWd & f_off & f_wd & f_reg; e2l [ savmod2=savmod1; regtsan=regtfan; ] { logflags(); tmp:4 = e2l; getbitfield(tmp, f_off, f_wd); f_reg = tmp; resbitflags(tmp, f_wd-1); } :bfffo e2l^bfOffWd,f_reg is opbig=0xed & op67=3 & $(DAT_DIR_CTL_ADDR_MODES); bfOffWd & f_off & f_wd & f_reg & flddo=0 & fldoffdat=0 & flddw=0 & fldwddat=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { # "Find First One in Bit Field" pronounced "boo-foe" # Set the destination f_reg with the position of the first 1 bit in the source e2l. # f_off and f_wd specify the offset and width of the field of the source to consider. # If f_off=0 and f_wd=0 then this means the full 32-bit source is examined (implemented here). # local tmp:4 = e2l; NF = (tmp & 0x80000000) != 0; ZF = (tmp == 0); VF = 0; CF = 0; f_reg = zext(tmp != 0) * lzcount(tmp); } :bfffo e2l^bfOffWd,f_reg is opbig=0xed & op67=3 & $(DAT_DIR_CTL_ADDR_MODES); bfOffWd & f_off & f_wd & f_reg ; e2l [ savmod2=savmod1; regtsan=regtfan; ] { local tmp:4 = e2l; tmp = (tmp << f_off) >> (32 - f_wd); tmp = (tmp << (32 - f_wd)); local offw = f_off + f_wd; NF = (tmp & 0x80000000) != 0; ZF = (tmp == 0); VF = 0; CF = 0; f_reg = (zext(tmp != 0) * lzcount(tmp)) + (zext(tmp == 0) * zext(offw)); } :bfins f_reg,e2l^bfOffWd is opbig=0xef & op67=3 & $(DAT_DIR_CTL_ADDR_MODES); bfOffWd & f_off & f_wd & f_reg; e2l [ savmod2=savmod1; regtsan=regtfan; ] { logflags(); mask:4 = 0; bitmask(mask, f_wd); tmp:4 = f_reg & mask; resbitflags(tmp, f_wd-1); bfmask(mask,f_off,f_wd); e2l = (e2l & ~mask) | (tmp << (32 - f_off - f_wd)); } :bfset e2l^bfOffWd is opbig=0xee & op67=3 & $(DAT_DIR_CTL_ADDR_MODES); bfOffWd & f_off & f_wd; e2l [ savmod2=savmod1; regtsan=regtfan; ] { logflags(); tmp:4 = e2l; getbitfield(tmp, f_off, f_wd); resbitflags(tmp, f_wd-1); mask:4 = 0; bfmask(mask,f_off,f_wd); e2l = e2l & ~mask; } :bftst e2l^bfOffWd is opbig=0xe8 & op67=3 & $(DAT_DIR_CTL_ADDR_MODES); bfOffWd & f_off & f_wd; e2l [ savmod2=savmod1; regtsan=regtfan; ] { logflags(); tmp:4 = e2l; getbitfield(tmp, f_off, f_wd); resbitflags(tmp, f_wd-1); } define pcodeop breakpoint; :bkpt "#"op02 is opbig=0x48 & op67=1 & op5=0 & op34=1 & op02 { breakpoint(); } :bra.b addr8 is opbig=0x60 & addr8 { goto addr8; } :bra.w addr16 is opbig=0x60 & d8base=0; addr16 { goto addr16; } :bra.l addr32 is opbig=0x60 & d8base=255; addr32 { goto addr32; } :bset.b reg9dn,eab is (op=0 & reg9dn & op68=7 & $(MEM_ALTER_ADDR_MODES))... & eab { local tmp = eab; mask:1 = 1 << (reg9dn & 7); ZF = (tmp & mask) == 0; eab = tmp | mask; } :bset.b const8,e2b is opbig=8 & op67=3 & $(MEM_ALTER_ADDR_MODES); const8; e2b [ savmod2=savmod1; regtsan=regtfan; ] { local tmp = e2b; mask:1 = 1 << (const8 & 7); ZF = (tmp & mask) == 0; e2b = tmp | mask; } :bset.l reg9dn,regdn is op=0 & reg9dn & op68=7 & mode=0 & regdn { local tmp = regdn; mask:4 = 1 << (reg9dn & 31); ZF = (tmp & mask) == 0; regdn = tmp | mask; } :bset.l const8,regdn is opbig=8 & op67=3 & mode=0 & regdn; const8 { local tmp = regdn; mask:4 = 1 << (const8 & 31); ZF = (tmp & mask) == 0; regdn = tmp | mask; } :bsr.b addr8 is opbig=0x61 & addr8 { SP=SP-4; *:4 SP = inst_next; call addr8; } :bsr.w addr16 is opbig=0x61 & d8base=0; addr16 { SP=SP-4; *:4 SP = inst_next; call addr16; } :bsr.l addr32 is opbig=0x61 & d8base=255; addr32 { SP=SP-4; *:4 SP = inst_next; call addr32; } :btst.b reg9dn,eab is (op=0 & reg9dn & op68=4 & $(MEM_ALTER_ADDR_MODES))... & eab { mask:1 = 1 << (reg9dn & 7); ZF = (eab & mask) == 0; } :btst.b const8,e2b is opbig=8 & op67=0 & regan & $(MEM_ALTER_ADDR_MODES); const8; e2b [ savmod2=savmod1; regtsan=regtfan; ] { mask:1 = 1 << (const8 & 7); ZF = (e2b & mask) == 0; } :btst.l reg9dn,regdn is op=0 & reg9dn & op68=4 & mode=0 & regdn { mask:4 = 1 << (reg9dn & 31); ZF = (regdn & mask) == 0; } :btst.l const8,regdn is opbig=8 & op67=0 & mode=0 & regdn; const8 { mask:4 = 1 << (const8 & 31); ZF = (regdn & mask) == 0; } @ifdef COLDFIRE :bitrev regdn is reg315=0x18 & regdn { local dword = regdn; local v = regdn; v = ((v & 0xffff0000) >> 16) | ((v & 0x0000ffff) << 16); v = ((v & 0xff00ff00) >> 8) | ((v & 0x00ff00ff) << 8); v = ((v & 0xf0f0f0f0) >> 4) | ((v & 0x0f0f0f0f) << 4); v = ((v & 0xcccccccc) >> 2) | ((v & 0x33333333) << 2); v = ((v & 0xaaaaaaaa) >> 1) | ((v & 0x55555555) << 1); regdn = v; } :byterev regdn is reg315=0x58 & regdn { regdn = ((regdn & 0x000000FF) << 24) | ((regdn & 0x0000FF00) << 8) | ((regdn & 0x00FF0000) >> 8) | ((regdn & 0xFF000000) >> 24); } @endif # COLDFIRE # TODO: Determine layout of a module descriptor define pcodeop callm; :callm const8,e2l is opbig=6 & op67=3 & $(CTL_ADDR_MODES); const8; e2l [ savmod2=savmod1; regtsan=regtfan; ] { PC = callm(const8, e2l); call [PC]; } #TODO: should constrain CAS to ignore mode=7 & regan=4 (place CAS2 before CAS to avoid problem) :cas2.w regdcw:regdc2w,regduw:regdu2w,(regda):(regda2) is op015=0x0cfc; regda & ext_911=0 & regduw & ext_35=0 & regdcw; regda2 & ext2_911=0 & regdu2w & ext2_35=0 & regdc2w { dc1:4 = zext(regdcw); dc2:4 = zext(regdc2w); if(dc1!=regda) goto ; if(dc2!=regda2) goto ; regda = zext(regduw); regda2 = zext(regdu2w); ZF = 1; NF = 0; goto inst_next; regdcw = regda(2); regdc2w = regda2(2); ZF = 0; NF = 1; } :cas2.l regdc:regdc2,regdu:regdu2,(regda):(regda2) is op015=0x0efc; regda & ext_911=0 & regdu & ext_35=0 & regdc; regda2 & ext2_911=0 & regdu2 & ext2_35=0 & regdc2 { if(regdc!=regda) goto ; if(regdc2!=regda2) goto ; regda = regdu; regda2 = regdu2; ZF = 1; NF = 0; goto inst_next; regdc = regda; regdc2 = regda2; ZF = 0; NF = 1; } :cas.b regdcb,regdub,e2b is opbig=0x0a & op67=3 & $(MEM_ALTER_ADDR_MODES); regda=0 & ext_911=0 & regdub & ext_35=0 & regdcb; e2b [ savmod2=savmod1; regtsan=regtfan; ] { local tmp = e2b; if(tmp==regdcb) goto ; regdcb = tmp; ZF = 0; NF = 1; goto inst_next; e2b = regdub; ZF = 1; NF = 0; } :cas.w regdcw,regduw,e2w is opbig=0x0c & op67=3 & $(MEM_ALTER_ADDR_MODES); regda=0 & ext_911=0 & regduw & ext_35=0 & regdcw; e2w [ savmod2=savmod1; regtsan=regtfan; ] { local tmp = e2w; if(tmp==regdcw) goto ; regdcw = tmp; ZF = 0; NF = 1; goto inst_next; e2w = regduw; ZF = 1; NF = 0; } :cas.l regdc,regdu,e2l is opbig=0x0e & op67=3 & $(MEM_ALTER_ADDR_MODES); regda=0 & ext_911=0 & regdu & ext_35=0 & regdc; e2l [ savmod2=savmod1; regtsan=regtfan; ] { local tmp = e2l; if(tmp==regdc) goto ; regdc = tmp; ZF = 0; NF = 1; goto inst_next; e2l = regdu; ZF = 1; NF = 0; } :chk.w eaw,reg9dnw is (op=4 & reg9dnw & op68=6 & $(DAT_ALTER_ADDR_MODES))... & eaw { build eaw; local address:4 = zext(eaw); local bound:2 = *:2 address; local signed_bound:4 = sext(bound); local signed_register:4 = sext(reg9dnw); if ((signed_register s>= 0) && (signed_register s<= signed_bound)) goto inst_next; NF = signed_register s< 0; __m68k_trap(6:1); } :chk.l eal,reg9dn is (op=4 & reg9dn & op68=4 & $(DAT_ALTER_ADDR_MODES))... & eal { build eal; local address:4 = zext(eal); local bound:4 = *:4 address; local signed_bound:4 = sext(bound); local signed_register:4 = sext(reg9dn); if ((signed_register s>= 0) && (signed_register s<= signed_bound)) goto inst_next; NF = signed_register s< 0; __m68k_trap(6:1); } :chk2.b e2b,rreg is opbig=0 & op67=3 & $(CTL_ADDR_MODES); rreg & wl=1; e2b [ savmod2=savmod1; regtsan=regtfan; ] { build e2b; local address:4 = zext(e2b); local lower:1 = *:1 address; local upper:1 = *:1 (address + 1); local signed_lower:4 = sext(lower); local signed_upper:4 = sext(upper); local signed_register:4 = sext(rreg); ZF = ((signed_register == signed_lower) || (signed_register == signed_upper)); CF = !((signed_register s>= signed_lower) && (signed_register s<= signed_upper)); if (!CF) goto inst_next; __m68k_trap(6:1); } :chk2.w e2w,rreg is opbig=2 & op67=3 & $(CTL_ADDR_MODES); rreg & wl=1; e2w [ savmod2=savmod1; regtsan=regtfan; ] { build e2w; local address:4 = zext(e2w); local lower:2 = *:2 address; local upper:2 = *:2 (address + 2); local signed_lower:4 = sext(lower); local signed_upper:4 = sext(upper); local signed_register:4 = sext(rreg); ZF = ((signed_register == signed_lower) || (signed_register == signed_upper)); CF = !((signed_register s>= signed_lower) && (signed_register s<= signed_upper)); if (!CF) goto inst_next; __m68k_trap(6:1); } :chk2.l e2l,rreg is opbig=4 & op67=3 & $(CTL_ADDR_MODES); rreg & wl=1; e2l [ savmod2=savmod1; regtsan=regtfan; ] { build e2l; local address:4 = zext(e2l); local lower:4 = *:4 address; local upper:4 = *:4 (address + 4); local signed_lower:4 = sext(lower); local signed_upper:4 = sext(upper); local signed_register:4 = sext(rreg); ZF = ((signed_register == signed_lower) || (signed_register == signed_upper)); CF = !((signed_register s>= signed_lower) && (signed_register s<= signed_upper)); if (!CF) goto inst_next; __m68k_trap(6:1); } :cmp2.b e2b,rreg is opbig=0 & op67=3 & $(CTL_ADDR_MODES); rreg & wl=0; e2b [ savmod2=savmod1; regtsan=regtfan; ] { build e2b; local address:4 = zext(e2b); local lower:1 = *:1 address; local upper:1 = *:1 (address + 1); local signed_lower:4 = sext(lower); local signed_upper:4 = sext(upper); local signed_register:4 = sext(rreg); ZF = ((signed_register == signed_lower) || (signed_register == signed_upper)); CF = !((signed_register s>= signed_lower) && (signed_register s<= signed_upper)); } :cmp2.w e2w,rreg is opbig=2 & op67=3 & $(CTL_ADDR_MODES); rreg & wl=0; e2w [ savmod2=savmod1; regtsan=regtfan; ] { build e2w; local address:4 = zext(e2w); local lower:2 = *:2 address; local upper:2 = *:2 (address + 2); local signed_lower:4 = sext(lower); local signed_upper:4 = sext(upper); local signed_register:4 = sext(rreg); ZF = ((signed_register == signed_lower) || (signed_register == signed_upper)); CF = !((signed_register s>= signed_lower) && (signed_register s<= signed_upper)); } :cmp2.l e2l,rreg is opbig=4 & op67=3 & $(CTL_ADDR_MODES); rreg & wl=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { build e2l; local address:4 = zext(e2l); local lower:4 = *:4 address; local upper:4 = *:4 (address + 4); local signed_lower:4 = sext(lower); local signed_upper:4 = sext(upper); local signed_register:4 = sext(rreg); ZF = ((signed_register == signed_lower) || (signed_register == signed_upper)); CF = !((signed_register s>= signed_lower) && (signed_register s<= signed_upper)); } @ifdef MC68040 cachetype: "none" is op67=0 { export 0:4; } cachetype: "data" is op67=1 { export 1:4; } cachetype: "instr" is op67=2 { export 2:4; } cachetype: "both" is op67=3 { export 3:4; } :cinvl cachetype,(regan) is opbig=0xf4 & cachetype & op5=0 & op34=1 & regan { invalidateCacheLines(cachetype, regan); } :cinvp cachetype,(regan) is opbig=0xf4 & cachetype & op5=0 & op34=2 & regan { invalidateCacheLines(cachetype, regan); } :cinva cachetype is opbig=0xf4 & cachetype & op5=0 & op34=3 { invalidateCacheLines(cachetype); } @endif # MC68040 @ifdef MC68040 :cpushl cachetype,(regan) is opbig=0xf4 & cachetype & op5=1 & op34=1 & regan {pushInvalidateCaches(cachetype, regan);} :cpushp cachetype,(regan) is opbig=0xf4 & cachetype & op5=1 & op34=2 & regan {pushInvalidateCaches(cachetype, regan);} :cpusha cachetype is opbig=0xf4 & cachetype & op5=1 & op34=3 {pushInvalidateCaches(cachetype);} @endif # MC68040 :clr.b eab is (opbig=0x42 & op67=0 & $(DAT_ALTER_ADDR_MODES))... & eab { eab = 0; NF=0; ZF=1; VF=0; CF=0; } :clr.w eaw is (opbig=0x42 & op67=1 & $(DAT_ALTER_ADDR_MODES))... & eaw { eaw = 0; NF=0; ZF=1; VF=0; CF=0; } :clr.l eal is (opbig=0x42 & op67=2 & $(DAT_ALTER_ADDR_MODES))... & eal { eal=0; NF=0; ZF=1; VF=0; CF=0; } :cmp.b eab,reg9dnb is (op=11 & reg9dnb & op68=0)... & eab { o2:1=eab; subflags(reg9dnb,o2); local tmp =reg9dnb-o2; resflags(tmp); } :cmp.w eaw,reg9dnw is (op=11 & reg9dnw & op68=1)... & eaw { o2:2=eaw; subflags(reg9dnw,o2); local tmp =reg9dnw-o2; resflags(tmp); } :cmp.l eal,reg9dn is (op=11 & reg9dn & op68=2)... & eal { o2:4=eal; subflags(reg9dn,o2); local tmp =reg9dn-o2; resflags(tmp); } :cmpa.w eaw,reg9an is (op=11 & reg9an & op68=3)... & eaw { tmp1:4 = sext(eaw); subflags(reg9an,tmp1); local tmp =reg9an-tmp1; resflags(tmp); } :cmpa.l eal,reg9an is (op=11 & reg9an & op68=7)... & eal { o2:4=eal; subflags(reg9an,o2); local tmp =reg9an-o2; resflags(tmp); } :cmpi.b const8,e2b is opbig=12 & op67=0 & savmod1 & regtfan & $(DAT_ALTER_ADDR_MODES); const8; e2b [ savmod2=savmod1; regtsan=regtfan; ] { o2:1=e2b; subflags(o2,const8); local tmp =o2-const8; resflags(tmp); } :cmpi.w const16,e2w is opbig=12 & op67=1 & savmod1 & regtfan & $(DAT_ALTER_ADDR_MODES); const16; e2w [ savmod2=savmod1; regtsan=regtfan; ] { o2:2=e2w; subflags(o2,const16); local tmp =o2-const16; resflags(tmp);} :cmpi.l const32,e2l is opbig=12 & op67=2 & savmod1 & regtfan & $(DAT_ALTER_ADDR_MODES); const32; e2l [ savmod2=savmod1; regtsan=regtfan; ] { o2:4=e2l; subflags(o2,const32); local tmp =o2-const32; resflags(tmp);} :cmpm.b regPlus,reg9Plus is op=11 & reg9Plus & op8=1 & op67=0 & op5=0 & op34=1 & regPlus { local tmp1=*:1 regPlus; regPlus=regPlus+1; local tmp2=*:1 reg9Plus; reg9Plus=reg9Plus+1; subflags(tmp2,tmp1); local tmp =tmp2-tmp1; resflags(tmp); } :cmpm.w regPlus,reg9Plus is op=11 & reg9Plus & op8=1 & op67=1 & op5=0 & op34=1 & regPlus { local tmp1=*:2 regPlus; regPlus=regPlus+2; local tmp2=*:2 reg9Plus; reg9Plus=reg9Plus+2; subflags(tmp2,tmp1); local tmp =tmp2-tmp1; resflags(tmp); } :cmpm.l regPlus,reg9Plus is op=11 & reg9Plus & op8=1 & op67=2 & op5=0 & op34=1 & regPlus { local tmp1=*:4 regPlus; regPlus=regPlus+4; local tmp2=*:4 reg9Plus; reg9Plus=reg9Plus+4; subflags(tmp2,tmp1); local tmp =tmp2-tmp1; resflags(tmp); } # cpBcc # need to know specific copressors use copcc1 # cpDBcc # use copcc2 # cpGEN # cpScc # use copcc2 # cpTRAPcc # use copcc2 :db^cc regdnw,addr16 is op=5 & cc & op67=3 & op5=0 & op34=1 & regdnw; addr16 { if (cc) goto inst_next; regdnw=regdnw-1; if (regdnw!=-1) goto addr16; } :divs.w eaw,reg9dn is (op=8 & reg9dn & op68=7)... & eaw { local denom = sext(eaw); local divis = reg9dn; local div = divis s/ denom; local rem = divis s% denom; CF=0; resflags(div); reg9dn = (rem << 16) | (div & 0xffff); } :divu.w eaw,reg9dn is (op=8 & reg9dn & op68=3)... & eaw { local denom = zext(eaw); local divis = reg9dn; local div = divis / denom; local rem = divis % denom; CF=0; resflags(div); reg9dn = (rem << 16) | (div & 0xffff); } #remyes: "s" is regdq & (regdr=regdq) & divsgn=1 { } remyes: "sl" is divsgn=1 { } #remyes: "u" is regdq & (regdr=regdq) & divsgn=0 { } remyes: "ul" is divsgn=0 { } #subdiv: regdq is regdq & regdr=regdq & divsz=0 & divsgn=0 { regdq = regdq/glbdenom; export regdq; } # NB- Need to be very careful with div to not clobber when regdr and regdq refer to the same reg. # When this happens it seems the destination reg should get the quotient, not the remainder. # subdiv: regdr:regdq is regdq & regdr & divsz=0 & divsgn=0 { local divis = regdq; local denom = glbdenom; local rem = divis % denom; local quot = divis / denom; regdr = rem; regdq = quot; export regdq; } subdiv: regdr:regdq is regdq & regdr & divsz=1 & divsgn=0 { divi:8 = (zext(regdr) << 32) | zext(regdq); denom:8 = zext(glbdenom); local quot = divi / denom; local rem = divi % denom; regdr = rem:4; regdq = quot:4; export regdq; } #subdiv: regdq is regdq & regdr=regdq & divsz=0 & divsgn=1 { regdq = regdq s/ glbdenom; export regdq; } subdiv: regdr:regdq is regdq & regdr & divsz=0 & divsgn=1 { local divis = regdq; local denom = glbdenom; local rem = divis s% denom; local quot = divis s/ denom; regdr = rem; regdq = quot; export regdq; } subdiv: regdr:regdq is regdq & regdr & divsz=1 & divsgn=1 { divi:8 = (sext(regdr)<<32)|sext(regdq); denom:8=sext(glbdenom); local quot=divi s/ denom; local rem=divi s% denom; regdr=rem:4; regdq=quot:4; export regdq; } # when divsgn=0 # divu.l is regdq / e2l -> regdq # divu.l (when divsz = 1) is regdr concat regdq / el2 - > regdr and regdq # divul.l (when divsz = 0) is regdq / el2 -> regdr and regdq # :div^remyes^".l" e2l,subdiv is opbig=0x4c & op67=1 & $(DAT_ALTER_ADDR_MODES); subdiv & remyes; e2l [ savmod2=savmod1; regtsan=regtfan;] { glbdenom=e2l; build subdiv; CF=0; resflags(subdiv);} :eor.b reg9dnb,eab is (op=11 & reg9dnb & op68=4 & $(DAT_ALTER_ADDR_MODES))... & eab { eor(reg9dnb, eab); } :eor.w reg9dnw,eaw is (op=11 & reg9dnw & op68=5 & $(DAT_ALTER_ADDR_MODES))... & eaw { eor(reg9dnw, eaw); } :eor.l reg9dn,eal is (op=11 & reg9dn & op68=6 & $(DAT_ALTER_ADDR_MODES))... & eal { eor(reg9dn, eal); } :eori.b const8,e2b is opbig=10 & op67=0 & $(DAT_ALTER_ADDR_MODES); const8; e2b [ savmod2=savmod1; regtsan=regtfan; ] { eor(const8, e2b); } :eori.w const16,e2w is opbig=10 & op67=1 & $(DAT_ALTER_ADDR_MODES); const16; e2w [ savmod2=savmod1; regtsan=regtfan; ] { eor(const16, e2w); } :eori.l const32,e2l is opbig=10 & op67=2 & $(DAT_ALTER_ADDR_MODES); const32; e2l [ savmod2=savmod1; regtsan=regtfan; ] { eor(const32, e2l); } :eori const8,"CCR" is d16=0xa3c; const8 { packflags(SR); SR = SR ^ zext(const8); unpackflags(SR); } :eori const16,SR is opbig=0x0a & d8base=0x7c; const16 & SR { packflags(SR); SR = SR ^ const16; unpackflags(SR); } :exg reg9dn,regdn is op=12 & reg9dn & op8=1 & op37=8 & regdn { local tmp = reg9dn; reg9dn=regdn; regdn=tmp; } :exg reg9an,regan is op=12 & reg9an & op8=1 & op37=9 & regan { local tmp = reg9an; reg9an=regan; regan=tmp; } :exg reg9dn,regan is op=12 & reg9dn & op8=1 & op37=17 & regan { local tmp = reg9dn; reg9dn=regan; regan=tmp; } :ext.w regdnw is op=4 & reg9dn=4 & op68=2 & op35=0 & regdnw { local tmp = regdnw:1; regdnw = sext(tmp); resflags(regdnw); logflags(); } :ext.l regdn is op=4 & reg9dn=4 & op68=3 & op35=0 & regdn { local tmp = regdn:2; regdn = sext(tmp); resflags(regdn); logflags(); } :extb.l regdn is op=4 & reg9dn=4 & op68=7 & op35=0 & regdn { local tmp = regdn:1; regdn = sext(tmp); resflags(regdn); logflags(); } @ifdef COLDFIRE :halt is d16=0x4ac8 unimpl @endif :illegal is d16=0x4afc unimpl # jump addresses derived from effective address calculation addrpc16: reloc is d16 [ reloc = inst_start+2+d16; ] { export *[ram]:4 reloc; } addrd16: d16".w" is d16 { export *[ram]:4 d16; } addrd32: d32".l" is d32 { export *[ram]:4 d32; } addrReg: (regan) is regan { export regan; } addrRegD16: (d16,regan) is regan; d16 {local tmp = regan + d16; export *[ram]:4 tmp; } addrextw: (extw) is extw { export extw; } :jmp addrReg is opbig=0x4e & op67=3 & mode=2 & addrReg { goto [addrReg]; } :jmp addrRegD16 is (opbig=0x4e & op67=3 & mode=5) ... & addrRegD16 { goto [addrRegD16]; } :jmp addrextw is opbig=0x4e & op67=3 & mode=6 & regan; addrextw [ pcmode=0; regtfan=regan; ] { goto [addrextw]; } :jmp addrpc16 is opbig=0x4e & op67=3 & mode=7 & regan=2; addrpc16 { goto addrpc16; } :jmp addrextw is opbig=0x4e & op67=3 & mode=7 & regan=3; addrextw [ pcmode=1; ] { goto [addrextw]; } :jmp addrd16 is opbig=0x4e & op67=3 & mode=7 & regan=0; addrd16 { goto addrd16; } :jmp addrd32 is opbig=0x4e & op67=3 & mode=7 & regan=1; addrd32 { goto addrd32; } :jsr addrReg is opbig=0x4e & op67=2 & mode=2 & addrReg { SP=SP-4; *:4 SP = inst_next; call [addrReg]; } :jsr addrRegD16 is (opbig=0x4e & op67=2 & mode=5) ... & addrRegD16 { SP=SP-4; *:4 SP = inst_next; call [addrRegD16]; } :jsr addrextw is opbig=0x4e & op67=2 & mode=6 & regan; addrextw [ pcmode=0; regtfan=regan;] { build addrextw; SP=SP-4; *:4 SP=inst_next; call [addrextw];} :jsr addrpc16 is opbig=0x4e & op67=2 & mode=7 & regan=2; addrpc16 { SP=SP-4; *:4 SP = inst_next; call addrpc16; } :jsr addrextw is opbig=0x4e & op67=2 & mode=7 & regan=3; addrextw [ pcmode=1; ] { build addrextw; SP=SP-4; *:4 SP = inst_next; call [addrextw]; } :jsr addrd16 is opbig=0x4e & op67=2 & mode=7 & regan=0; addrd16 { SP=SP-4; *:4 SP = inst_next; call addrd16; } :jsr addrd32 is opbig=0x4e & op67=2 & mode=7 & regan=1; addrd32 { SP=SP-4; *:4 SP = inst_next; call addrd32; } :lea eaptr,reg9an is (op=4 & reg9an & op68=7)... & eaptr { reg9an = eaptr; } :link.w regan,d16 is opbig=0x4e & op37=10 & regan; d16 { SP=SP-4; *:4 SP = regan; regan=SP; SP = SP + d16; } :link.l regan,d32 is opbig=0x48 & op37=1 & regan; d32 { SP=SP-4; *:4 SP = regan; regan=SP; SP = SP + d32; } macro shiftCXFlags(cntreg) { CF = CF * (cntreg != 0); XF = CF * (cntreg != 0) + XF * (cntreg == 0); } :lsl.b cntreg,regdnb is op=14 & cntreg & op8=1 & op67=0 & op34=1 & regdnb { logicalShiftLeft(cntreg, regdnb, 8); } :lsl.w cntreg,regdnw is op=14 & cntreg & op8=1 & op67=1 & op34=1 & regdnw { logicalShiftLeft(cntreg, regdnw, 16); } :lsl.l cntreg,regdn is op=14 & cntreg & op8=1 & op67=2 & op34=1 & regdn { logicalShiftLeft(cntreg, regdn, 32); } :lsl eaw is (opbig=0xe3 & op67=3 & $(MEM_ALTER_ADDR_MODES)) ... & eaw { local value:2 = eaw; getbit(CF, value, 15); value = value << 1; resflags(value); eaw = value; VF = 0; XF = CF; } :lsr.b cntreg,regdnb is op=14 & cntreg & op8=0 & op67=0 & op34=1 & regdnb { logicalShiftRight(cntreg, regdnb, 8); } :lsr.w cntreg,regdnw is op=14 & cntreg & op8=0 & op67=1 & op34=1 & regdnw { logicalShiftRight(cntreg, regdnw, 16); } :lsr.l cntreg,regdn is op=14 & cntreg & op8=0 & op67=2 & op34=1 & regdn { logicalShiftRight(cntreg, regdn, 32); } :lsr eaw is (opbig=0xe2 & op67=3 & $(MEM_ALTER_ADDR_MODES)) ... & eaw { local value:2 = eaw; getbit(CF, value, 0); value = value >> 1; resflags(value); eaw = value; VF = 0; XF = CF; } :move.b eab,e2b is (op=1 & $(DAT_ALTER_ADDR_MODES2))... & eab ; e2b { build eab; local tmp = eab; build e2b; e2b = tmp; resflags(tmp); logflags(); } :move.w eaw,e2w is (op=3 & $(DAT_ALTER_ADDR_MODES2))... & eaw ; e2w { build eaw; local tmp = eaw; build e2w; e2w = tmp; resflags(tmp); logflags(); } :move.l eal,e2l is (op=2 & $(DAT_ALTER_ADDR_MODES2))... & eal ; e2l { build eal; local tmp = eal; build e2l; e2l = tmp; resflags(tmp); logflags(); } :move "CCR",eaw is (opbig=0x42 & op67=3 & $(DAT_ALTER_ADDR_MODES))... & eaw { packflags(SR); eaw = SR; } :move eaw,"CCR" is (opbig=0x44 & op67=3 & $(DAT_ALTER_ADDR_MODES))... & eaw { unpackflags(eaw); } :move SR,eaw is SR; (opbig=0x40 & op67=3 & $(DAT_ALTER_ADDR_MODES))... & eaw { packflags(SR); eaw = SR; } :move eaw,SR is SR; (opbig=0x46 & op67=3 & $(DAT_ALTER_ADDR_MODES))... & eaw { SR = eaw; unpackflags(SR); } :move USP,regan is opbig=0x4e & op37=13 & regan & USP { regan = USP; } :move regan,USP is opbig=0x4e & op37=12 & regan & USP { USP = regan; } :movea.w eaw,reg9an is (op=3 & reg9an & mode2=1)... & eaw { reg9an = sext(eaw); } :movea.l eal,reg9an is (op=2 & reg9an & mode2=1)... & eal { reg9an = eal; } :movec ctlreg,rreg is d16=0x4e7a; rreg & ctlreg { rreg = ctlreg; } :movec rreg,ctlreg is d16=0x4e7b; rreg & ctlreg { ctlreg = rreg; } @ifdef MC68040 macro move16(src, dst) { *:4 dst= *:4 src; src=src+4; dst=dst+4; *:4 dst= *:4 src; src=src+4; dst=dst+4; *:4 dst= *:4 src; src=src+4; dst=dst+4; *:4 dst= *:4 src; } :move16 regPlus,regxPlus is opbig=0xf6 & op37=4 & regan & regPlus; regxan & regxPlus & da=1 { local src=regan&0xfffffff0; local dst=regxan&0xfffffff0; regan=regan+16; regxan=regxan+16; move16(src, dst); } :move16 regPlus,d32l is opbig=0xf6 & op37=0 & regan & regPlus; d32 & d32l { local src=regan&0xfffffff0; local dst:4=d32&0xfffffff0; regan=regan+16; move16(src, dst); } :move16 d32l,regPlus is opbig=0xf6 & op37=1 & regan & regPlus; d32 & d32l { local dst=regan&0xfffffff0; local src:4=d32&0xfffffff0; regan=regan+16; move16(src, dst); } :move16 regParen,d32l is opbig=0xf6 & op37=2 & regan & regParen; d32 & d32l { local src=regan&0xfffffff0; local dst:4=d32&0xfffffff0; move16(src, dst); } :move16 d32l,regParen is opbig=0xf6 & op37=3 & regan & regParen; d32 & d32l { local dst=regan&0xfffffff0; local src:4=d32&0xfffffff0; move16(src, dst); } @endif # MC68040 @ifdef COLDFIRE :mvs.b: eab, reg9dn is (op=0x7 & op68=4 & reg9dn )... & eab { reg9dn = sext(eab); } :mvs.w: eaw, reg9dn is (op=0x7 & op68=5 & reg9dn )... & eaw { reg9dn = sext(eaw); } :mvz.b: eab, reg9dn is (op=0x7 & op68=6 & reg9dn )... & eab { reg9dn = zext(eab); } :mvz.w: eaw, reg9dn is (op=0x7 & op68=7 & reg9dn )... & eaw { reg9dn = zext(eaw); } :mov3q "#"^d911, eal is (op=0xa & op68=5 & d911 ) ... & eal { eal = d911; } :sats.l regdn is opbig=0x4c & op37=0x10 & regdn { if (VF == 0) goto inst_next; regdn = (zext(regdn == 0 ) * 0x80000000) + (zext(regdn != 0) * 0x7fffffff); VF=0; CF=0; } skip_addr: skipAddr is op02=2 [skipAddr = inst_next + 2;] { export *[ram]:4 skipAddr; } skip_addr: skipAddr is op02=3 [skipAddr = inst_next + 4;] { export *[ram]:4 skipAddr; } # TPF.w/l is occassionally used as a branch over a valid instruction. :tpf is opbig=0x51 & op37=0x1f & op02=4 { } # nop :tpf.w is opbig=0x51 & op37=0x1f & op02=2 & skip_addr { goto skip_addr; } # nop + 1 word :tpf.l is opbig=0x51 & op37=0x1f & op02=3 & skip_addr { goto skip_addr; } # nop + 2 word @endif # COLDFIRE # Tables for register lists, for the movem instruction # Register to mememory, forward direction, via word r2mfwf: D0w is D0w & mvm0=1 { *movemptr = D0w; movemptr = movemptr + 2; } r2mfwf: is mvm0=0 { } r2mfwe: r2mfwf" "D1w is D1w & mvm1=1 & r2mfwf { *movemptr = D1w; movemptr = movemptr + 2; } r2mfwe: r2mfwf is mvm1=0 & r2mfwf { } r2mfwd: r2mfwe" "D2w is D2w & mvm2=1 & r2mfwe { *movemptr = D2w; movemptr = movemptr + 2; } r2mfwd: r2mfwe is mvm2=0 & r2mfwe { } r2mfwc: r2mfwd" "D3w is D3w & mvm3=1 & r2mfwd { *movemptr = D3w; movemptr = movemptr + 2; } r2mfwc: r2mfwd is mvm3=0 & r2mfwd { } r2mfwb: r2mfwc" "D4w is D4w & mvm4=1 & r2mfwc { *movemptr = D4w; movemptr = movemptr + 2; } r2mfwb: r2mfwc is mvm4=0 & r2mfwc { } r2mfwa: r2mfwb" "D5w is D5w & mvm5=1 & r2mfwb { *movemptr = D5w; movemptr = movemptr + 2; } r2mfwa: r2mfwb is mvm5=0 & r2mfwb { } r2mfw9: r2mfwa" "D6w is D6w & mvm6=1 & r2mfwa { *movemptr = D6w; movemptr = movemptr + 2; } r2mfw9: r2mfwa is mvm6=0 & r2mfwa { } r2mfw8: r2mfw9" "D7w is D7w & mvm7=1 & r2mfw9 { *movemptr = D7w; movemptr = movemptr + 2; } r2mfw8: r2mfw9 is mvm7=0 & r2mfw9 { } r2mfw7: r2mfw8" "A0w is A0w & mvm8=1 & r2mfw8 { *movemptr = A0w; movemptr = movemptr + 2; } r2mfw7: r2mfw8 is mvm8=0 & r2mfw8 { } r2mfw6: r2mfw7" "A1w is A1w & mvm9=1 & r2mfw7 { *movemptr = A1w; movemptr = movemptr + 2; } r2mfw6: r2mfw7 is mvm9=0 & r2mfw7 { } r2mfw5: r2mfw6" "A2w is A2w & mvm10=1 & r2mfw6 { *movemptr = A2w; movemptr = movemptr + 2; } r2mfw5: r2mfw6 is mvm10=0 & r2mfw6 { } r2mfw4: r2mfw5" "A3w is A3w & mvm11=1 & r2mfw5 { *movemptr = A3w; movemptr = movemptr + 2; } r2mfw4: r2mfw5 is mvm11=0 & r2mfw5 { } r2mfw3: r2mfw4" "A4w is A4w & mvm12=1 & r2mfw4 { *movemptr = A4w; movemptr = movemptr + 2; } r2mfw3: r2mfw4 is mvm12=0 & r2mfw4 { } r2mfw2: r2mfw3" "A5w is A5w & mvm13=1 & r2mfw3 { *movemptr = A5w; movemptr = movemptr + 2; } r2mfw2: r2mfw3 is mvm13=0 & r2mfw3 { } r2mfw1: r2mfw2" "A6w is A6w & mvm14=1 & r2mfw2 { *movemptr = A6w; movemptr = movemptr + 2; } r2mfw1: r2mfw2 is mvm14=0 & r2mfw2 { } r2mfw0: { r2mfw1" "A7w} is A7w & mvm15=1 & r2mfw1 { *movemptr = A7w; movemptr = movemptr + 2; } r2mfw0: { r2mfw1} is mvm15=0 & r2mfw1 { } # Register to memory, forward direction, via long r2mflf: D0 is D0 & mvm0=1 { *movemptr = D0; movemptr = movemptr + 4; } r2mflf: is mvm0=0 { } r2mfle: r2mflf" "D1 is D1 & mvm1=1 & r2mflf { *movemptr = D1; movemptr = movemptr + 4; } r2mfle: r2mflf is mvm1=0 & r2mflf { } r2mfld: r2mfle" "D2 is D2 & mvm2=1 & r2mfle { *movemptr = D2; movemptr = movemptr + 4; } r2mfld: r2mfle is mvm2=0 & r2mfle { } r2mflc: r2mfld" "D3 is D3 & mvm3=1 & r2mfld { *movemptr = D3; movemptr = movemptr + 4; } r2mflc: r2mfld is mvm3=0 & r2mfld { } r2mflb: r2mflc" "D4 is D4 & mvm4=1 & r2mflc { *movemptr = D4; movemptr = movemptr + 4; } r2mflb: r2mflc is mvm4=0 & r2mflc { } r2mfla: r2mflb" "D5 is D5 & mvm5=1 & r2mflb { *movemptr = D5; movemptr = movemptr + 4; } r2mfla: r2mflb is mvm5=0 & r2mflb { } r2mfl9: r2mfla" "D6 is D6 & mvm6=1 & r2mfla { *movemptr = D6; movemptr = movemptr + 4; } r2mfl9: r2mfla is mvm6=0 & r2mfla { } r2mfl8: r2mfl9" "D7 is D7 & mvm7=1 & r2mfl9 { *movemptr = D7; movemptr = movemptr + 4; } r2mfl8: r2mfl9 is mvm7=0 & r2mfl9 { } r2mfl7: r2mfl8" "A0 is A0 & mvm8=1 & r2mfl8 { *movemptr = A0; movemptr = movemptr + 4; } r2mfl7: r2mfl8 is mvm8=0 & r2mfl8 { } r2mfl6: r2mfl7" "A1 is A1 & mvm9=1 & r2mfl7 { *movemptr = A1; movemptr = movemptr + 4; } r2mfl6: r2mfl7 is mvm9=0 & r2mfl7 { } r2mfl5: r2mfl6" "A2 is A2 & mvm10=1 & r2mfl6 { *movemptr = A2; movemptr = movemptr + 4; } r2mfl5: r2mfl6 is mvm10=0 & r2mfl6 { } r2mfl4: r2mfl5" "A3 is A3 & mvm11=1 & r2mfl5 { *movemptr = A3; movemptr = movemptr + 4; } r2mfl4: r2mfl5 is mvm11=0 & r2mfl5 { } r2mfl3: r2mfl4" "A4 is A4 & mvm12=1 & r2mfl4 { *movemptr = A4; movemptr = movemptr + 4; } r2mfl3: r2mfl4 is mvm12=0 & r2mfl4 { } r2mfl2: r2mfl3" "A5 is A5 & mvm13=1 & r2mfl3 { *movemptr = A5; movemptr = movemptr + 4; } r2mfl2: r2mfl3 is mvm13=0 & r2mfl3 { } r2mfl1: r2mfl2" "A6 is A6 & mvm14=1 & r2mfl2 { *movemptr = A6; movemptr = movemptr + 4; } r2mfl1: r2mfl2 is mvm14=0 & r2mfl2 { } r2mfl0: { r2mfl1" "SP} is SP & mvm15=1 & r2mfl1 { *movemptr = SP; movemptr = movemptr + 4; } r2mfl0: { r2mfl1} is mvm15=0 & r2mfl1 { } # Register to memory, backward direction, via word r2mbwf: A7w is A7w & mvm0=1 { movemptr = movemptr - 4; *movemptr = A7w; } r2mbwf: is mvm0=0 { } r2mbwe: r2mbwf" "A6w is A6w & mvm1=1 & r2mbwf { movemptr = movemptr - 4; *movemptr = A6w; } r2mbwe: r2mbwf is mvm1=0 & r2mbwf { } r2mbwd: r2mbwe" "A5w is A5w & mvm2=1 & r2mbwe { movemptr = movemptr - 4; *movemptr = A5w; } r2mbwd: r2mbwe is mvm2=0 & r2mbwe { } r2mbwc: r2mbwd" "A4w is A4w & mvm3=1 & r2mbwd { movemptr = movemptr - 4; *movemptr = A4w; } r2mbwc: r2mbwd is mvm3=0 & r2mbwd { } r2mbwb: r2mbwc" "A3w is A3w & mvm4=1 & r2mbwc { movemptr = movemptr - 4; *movemptr = A3w; } r2mbwb: r2mbwc is mvm4=0 & r2mbwc { } r2mbwa: r2mbwb" "A2w is A2w & mvm5=1 & r2mbwb { movemptr = movemptr - 4; *movemptr = A2w; } r2mbwa: r2mbwb is mvm5=0 & r2mbwb { } r2mbw9: r2mbwa" "A1w is A1w & mvm6=1 & r2mbwa { movemptr = movemptr - 4; *movemptr = A1w; } r2mbw9: r2mbwa is mvm6=0 & r2mbwa { } r2mbw8: r2mbw9" "A0w is A0w & mvm7=1 & r2mbw9 { movemptr = movemptr - 4; *movemptr = A0w; } r2mbw8: r2mbw9 is mvm7=0 & r2mbw9 { } r2mbw7: r2mbw8" "D7w is D7w & mvm8=1 & r2mbw8 { movemptr = movemptr - 4; *movemptr = D7w; } r2mbw7: r2mbw8 is mvm8=0 & r2mbw8 { } r2mbw6: r2mbw7" "D6w is D6w & mvm9=1 & r2mbw7 { movemptr = movemptr - 4; *movemptr = D6w; } r2mbw6: r2mbw7 is mvm9=0 & r2mbw7 { } r2mbw5: r2mbw6" "D5w is D5w & mvm10=1 & r2mbw6 { movemptr = movemptr - 4; *movemptr = D5w; } r2mbw5: r2mbw6 is mvm10=0 & r2mbw6 { } r2mbw4: r2mbw5" "D4w is D4w & mvm11=1 & r2mbw5 { movemptr = movemptr - 4; *movemptr = D4w; } r2mbw4: r2mbw5 is mvm11=0 & r2mbw5 { } r2mbw3: r2mbw4" "D3w is D3w & mvm12=1 & r2mbw4 { movemptr = movemptr - 4; *movemptr = D3w; } r2mbw3: r2mbw4 is mvm12=0 & r2mbw4 { } r2mbw2: r2mbw3" "D2w is D2w & mvm13=1 & r2mbw3 { movemptr = movemptr - 4; *movemptr = D2w; } r2mbw2: r2mbw3 is mvm13=0 & r2mbw3 { } r2mbw1: r2mbw2" "D1w is D1w & mvm14=1 & r2mbw2 { movemptr = movemptr - 4; *movemptr = D1w; } r2mbw1: r2mbw2 is mvm14=0 & r2mbw2 { } r2mbw0: { r2mbw1" "D0w} is D0w & mvm15=1 & r2mbw1 { movemptr = movemptr - 4; *movemptr = D0w; } r2mbw0: { r2mbw1} is mvm15=0 & r2mbw1 { } # Register to memory, backward direction, via long r2mblf: SP is SP & mvm0=1 { movemptr = movemptr - 4; *movemptr = SP; } r2mblf: is mvm0=0 { } r2mble: r2mblf" "A6 is A6 & mvm1=1 & r2mblf { movemptr = movemptr - 4; *movemptr = A6; } r2mble: r2mblf is mvm1=0 & r2mblf { } r2mbld: r2mble" "A5 is A5 & mvm2=1 & r2mble { movemptr = movemptr - 4; *movemptr = A5; } r2mbld: r2mble is mvm2=0 & r2mble { } r2mblc: r2mbld" "A4 is A4 & mvm3=1 & r2mbld { movemptr = movemptr - 4; *movemptr = A4; } r2mblc: r2mbld is mvm3=0 & r2mbld { } r2mblb: r2mblc" "A3 is A3 & mvm4=1 & r2mblc { movemptr = movemptr - 4; *movemptr = A3; } r2mblb: r2mblc is mvm4=0 & r2mblc { } r2mbla: r2mblb" "A2 is A2 & mvm5=1 & r2mblb { movemptr = movemptr - 4; *movemptr = A2; } r2mbla: r2mblb is mvm5=0 & r2mblb { } r2mbl9: r2mbla" "A1 is A1 & mvm6=1 & r2mbla { movemptr = movemptr - 4; *movemptr = A1; } r2mbl9: r2mbla is mvm6=0 & r2mbla { } r2mbl8: r2mbl9" "A0 is A0 & mvm7=1 & r2mbl9 { movemptr = movemptr - 4; *movemptr = A0; } r2mbl8: r2mbl9 is mvm7=0 & r2mbl9 { } r2mbl7: r2mbl8" "D7 is D7 & mvm8=1 & r2mbl8 { movemptr = movemptr - 4; *movemptr = D7; } r2mbl7: r2mbl8 is mvm8=0 & r2mbl8 { } r2mbl6: r2mbl7" "D6 is D6 & mvm9=1 & r2mbl7 { movemptr = movemptr - 4; *movemptr = D6; } r2mbl6: r2mbl7 is mvm9=0 & r2mbl7 { } r2mbl5: r2mbl6" "D5 is D5 & mvm10=1 & r2mbl6 { movemptr = movemptr - 4; *movemptr = D5; } r2mbl5: r2mbl6 is mvm10=0 & r2mbl6 { } r2mbl4: r2mbl5" "D4 is D4 & mvm11=1 & r2mbl5 { movemptr = movemptr - 4; *movemptr = D4; } r2mbl4: r2mbl5 is mvm11=0 & r2mbl5 { } r2mbl3: r2mbl4" "D3 is D3 & mvm12=1 & r2mbl4 { movemptr = movemptr - 4; *movemptr = D3; } r2mbl3: r2mbl4 is mvm12=0 & r2mbl4 { } r2mbl2: r2mbl3" "D2 is D2 & mvm13=1 & r2mbl3 { movemptr = movemptr - 4; *movemptr = D2; } r2mbl2: r2mbl3 is mvm13=0 & r2mbl3 { } r2mbl1: r2mbl2" "D1 is D1 & mvm14=1 & r2mbl2 { movemptr = movemptr - 4; *movemptr = D1; } r2mbl1: r2mbl2 is mvm14=0 & r2mbl2 { } r2mbl0: { r2mbl1" "D0} is D0 & mvm15=1 & r2mbl1 { movemptr = movemptr - 4; *movemptr = D0; } r2mbl0: { r2mbl1} is mvm15=0 & r2mbl1 { } # Memory to register, forward direction, via word m2rfwf: D0 is D0 & mvm0=1 { D0 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfwf: is mvm0=0 { } m2rfwe: m2rfwf" "D1 is D1 & mvm1=1 & m2rfwf { D1 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfwe: m2rfwf is mvm1=0 & m2rfwf { } m2rfwd: m2rfwe" "D2 is D2 & mvm2=1 & m2rfwe { D2 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfwd: m2rfwe is mvm2=0 & m2rfwe { } m2rfwc: m2rfwd" "D3 is D3 & mvm3=1 & m2rfwd { D3 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfwc: m2rfwd is mvm3=0 & m2rfwd { } m2rfwb: m2rfwc" "D4 is D4 & mvm4=1 & m2rfwc { D4 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfwb: m2rfwc is mvm4=0 & m2rfwc { } m2rfwa: m2rfwb" "D5 is D5 & mvm5=1 & m2rfwb { D5 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfwa: m2rfwb is mvm5=0 & m2rfwb { } m2rfw9: m2rfwa" "D6 is D6 & mvm6=1 & m2rfwa { D6 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfw9: m2rfwa is mvm6=0 & m2rfwa { } m2rfw8: m2rfw9" "D7 is D7 & mvm7=1 & m2rfw9 { D7 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfw8: m2rfw9 is mvm7=0 & m2rfw9 { } m2rfw7: m2rfw8" "A0 is A0 & mvm8=1 & m2rfw8 { A0 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfw7: m2rfw8 is mvm8=0 & m2rfw8 { } m2rfw6: m2rfw7" "A1 is A1 & mvm9=1 & m2rfw7 { A1 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfw6: m2rfw7 is mvm9=0 & m2rfw7 { } m2rfw5: m2rfw6" "A2 is A2 & mvm10=1 & m2rfw6 { A2 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfw5: m2rfw6 is mvm10=0 & m2rfw6 { } m2rfw4: m2rfw5" "A3 is A3 & mvm11=1 & m2rfw5 { A3 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfw4: m2rfw5 is mvm11=0 & m2rfw5 { } m2rfw3: m2rfw4" "A4 is A4 & mvm12=1 & m2rfw4 { A4 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfw3: m2rfw4 is mvm12=0 & m2rfw4 { } m2rfw2: m2rfw3" "A5 is A5 & mvm13=1 & m2rfw3 { A5 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfw2: m2rfw3 is mvm13=0 & m2rfw3 { } m2rfw1: m2rfw2" "A6 is A6 & mvm14=1 & m2rfw2 { A6 = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfw1: m2rfw2 is mvm14=0 & m2rfw2 { } m2rfw0: { m2rfw1" "SP} is SP & mvm15=1 & m2rfw1 { SP = sext(*:2 movemptr); movemptr = movemptr + 2; } m2rfw0: { m2rfw1} is mvm15=0 & m2rfw1 { } # Memory to register, forward direction, via long m2rflf: D0 is D0 & mvm0=1 { D0 = *movemptr; movemptr = movemptr + 4; } m2rflf: is mvm0=0 { } m2rfle: m2rflf" "D1 is D1 & mvm1=1 & m2rflf { D1 = *movemptr; movemptr = movemptr + 4; } m2rfle: m2rflf is mvm1=0 & m2rflf { } m2rfld: m2rfle" "D2 is D2 & mvm2=1 & m2rfle { D2 = *movemptr; movemptr = movemptr + 4; } m2rfld: m2rfle is mvm2=0 & m2rfle { } m2rflc: m2rfld" "D3 is D3 & mvm3=1 & m2rfld { D3 = *movemptr; movemptr = movemptr + 4; } m2rflc: m2rfld is mvm3=0 & m2rfld { } m2rflb: m2rflc" "D4 is D4 & mvm4=1 & m2rflc { D4 = *movemptr; movemptr = movemptr + 4; } m2rflb: m2rflc is mvm4=0 & m2rflc { } m2rfla: m2rflb" "D5 is D5 & mvm5=1 & m2rflb { D5 = *movemptr; movemptr = movemptr + 4; } m2rfla: m2rflb is mvm5=0 & m2rflb { } m2rfl9: m2rfla" "D6 is D6 & mvm6=1 & m2rfla { D6 = *movemptr; movemptr = movemptr + 4; } m2rfl9: m2rfla is mvm6=0 & m2rfla { } m2rfl8: m2rfl9" "D7 is D7 & mvm7=1 & m2rfl9 { D7 = *movemptr; movemptr = movemptr + 4; } m2rfl8: m2rfl9 is mvm7=0 & m2rfl9 { } m2rfl7: m2rfl8" "A0 is A0 & mvm8=1 & m2rfl8 { A0 = *movemptr; movemptr = movemptr + 4; } m2rfl7: m2rfl8 is mvm8=0 & m2rfl8 { } m2rfl6: m2rfl7" "A1 is A1 & mvm9=1 & m2rfl7 { A1 = *movemptr; movemptr = movemptr + 4; } m2rfl6: m2rfl7 is mvm9=0 & m2rfl7 { } m2rfl5: m2rfl6" "A2 is A2 & mvm10=1 & m2rfl6 { A2 = *movemptr; movemptr = movemptr + 4; } m2rfl5: m2rfl6 is mvm10=0 & m2rfl6 { } m2rfl4: m2rfl5" "A3 is A3 & mvm11=1 & m2rfl5 { A3 = *movemptr; movemptr = movemptr + 4; } m2rfl4: m2rfl5 is mvm11=0 & m2rfl5 { } m2rfl3: m2rfl4" "A4 is A4 & mvm12=1 & m2rfl4 { A4 = *movemptr; movemptr = movemptr + 4; } m2rfl3: m2rfl4 is mvm12=0 & m2rfl4 { } m2rfl2: m2rfl3" "A5 is A5 & mvm13=1 & m2rfl3 { A5 = *movemptr; movemptr = movemptr + 4; } m2rfl2: m2rfl3 is mvm13=0 & m2rfl3 { } m2rfl1: m2rfl2" "A6 is A6 & mvm14=1 & m2rfl2 { A6 = *movemptr; movemptr = movemptr + 4; } m2rfl1: m2rfl2 is mvm14=0 & m2rfl2 { } m2rfl0: { m2rfl1" "SP} is SP & mvm15=1 & m2rfl1 { SP = *movemptr; movemptr = movemptr + 4; } m2rfl0: { m2rfl1} is mvm15=0 & m2rfl1 { } movemOp: (regan) is mode=2 & regan { export regan; } movemOp: (regan)+ is mode=3 & regan { export regan; } movemOp: -(regan) is mode=4 & regan { export regan; } movemOp: (d16, regan) is mode=5 & regan; fldpar ; d16 { local tmp = regan + d16; export tmp; } movemOp: (extw) is mode=6 & regan; fldpar ; extw [ pcmode=0; regtfan=regan; ] {build extw; export extw; } movemOp: (d16)".w" is mode=7 & regan=0; fldpar; d16 { local tmp:4 = d16; export tmp; } movemOp: (d32)".l" is mode=7 & regan=1; fldpar; d32 { local tmp:4 = d32; export tmp; } movemOp: (d16,PC) is op10=1 & mode=7 & regan=2; fldpar; d16 & PC { local tmp = inst_start + 4 + d16:4; export tmp; } movemOp: (extw) is op10=1 & mode=7 & regan=3; fldpar; extw [ pcmode=1; ] { build extw; export extw; } movemWrt: is (mode=3 | mode=4) & regan { regan = movemptr; } movemWrt: is mode { } :movem.w r2mfw0, movemOp is (opbig=0x48 & op67=2; r2mfw0) ... & movemOp { build movemOp; movemptr = movemOp; build r2mfw0; } :movem.w r2mbw0, movemOp is (opbig=0x48 & op67=2 & mode=4 & movemWrt; r2mbw0) ... & movemOp { build movemOp; movemptr = movemOp; build r2mbw0; build movemWrt; } :movem.l r2mfl0, movemOp is (opbig=0x48 & op67=3; r2mfl0) ... & movemOp { build movemOp; movemptr = movemOp; build r2mfl0; } :movem.l r2mbl0, movemOp is (opbig=0x48 & op67=3 & mode=4 & movemWrt; r2mbl0) ... & movemOp { build movemOp; movemptr = movemOp; build r2mbl0; build movemWrt; } :movem.w movemOp, m2rfw0 is (opbig=0x4c & op67=2 & movemWrt; m2rfw0) ... & movemOp { build movemOp; movemptr = movemOp; build m2rfw0; build movemWrt; } :movem.l movemOp, m2rfl0 is (opbig=0x4c & op67=3 & movemWrt; m2rfl0) ... & movemOp { build movemOp; movemptr = movemOp; build m2rfl0; build movemWrt; } epw: (d16, regan) is regan; d16 { local tmp = regan + d16; export tmp; } :movep.w epw,reg9dnw is (op=0 & reg9dnw & op68=4 & op35=1) ... & epw { src:4 = epw; ho:1 = *:1 src; lo:1 = *:1(src+2); reg9dnw = (zext(ho) << 8) | zext(lo); } :movep.l epw,reg9dn is (op=0 & reg9dn & op68=5 & op35=1) ... & epw { src:4 = epw; ho:1 = *:1 src; mu:1 = *:1(src+2); ml:1 = *(src+4); lo:1 = *:1(src+6); reg9dn = (zext(ho) << 24) | (zext(mu) << 16) | (zext(ml) << 8) | zext(lo); } :movep.w reg9dnw,epw is (op=0 & reg9dnw & op68=6 & op35=1) ... & epw { src:4 = epw; local tmp = (reg9dnw >> 8); *:1 src = tmp:1; src = src+2; *:1 src = reg9dnw:1; } :movep.l reg9dn,epw is (op=0 & reg9dn & op68=7 & op35=1)... & epw { src:4 = epw; local tmp = (reg9dn >> 24); *:1 src = tmp:1; src = src+2; tmp = (reg9dn >> 16); *:1 src = tmp:1; src = src+2; tmp = (reg9dn >> 8); *:1 src = tmp:1; src = src+2; *:1 src = reg9dn:1; } :moveq d8base,reg9dn is op=7 & reg9dn & op8=0 & d8base { reg9dn = d8base; resflags(reg9dn); logflags(); } :moves.b rreg,e2b is opbig=0x0e & op67=0 & mode & regan; rreg & wl=1; e2b [ regtsan=regan; savmod2=mode; ] { e2b = rreg:1; } :moves.w rreg,e2w is opbig=0x0e & op67=1 & mode & regan; rreg & wl=1; e2w [ regtsan=regan; savmod2=mode; ] { e2w = rreg:2; } :moves.l rreg,e2l is opbig=0x0e & op67=2 & mode & regan; rreg & wl=1; e2l [ regtsan=regan; savmod2=mode; ] { e2l = rreg; } :moves.b e2b,rreg is opbig=0x0e & op67=0 & mode & regan; da=0 & rreg & wl=0; e2b [ regtsan=regan; savmod2=mode; ] { rreg = (rreg & 0xffffff00) | zext(e2b); } :moves.w e2w,rreg is opbig=0x0e & op67=1 & mode & regan; da=0 & rreg & wl=0; e2w [ regtsan=regan; savmod2=mode; ] { rreg = (rreg & 0xffff0000) | zext(e2w); } :moves.b e2b,rreg is opbig=0x0e & op67=0 & mode & regan; da=1 & rreg & wl=0; e2b [ regtsan=regan; savmod2=mode; ] { rreg = sext(e2b); } :moves.w e2w,rreg is opbig=0x0e & op67=1 & mode & regan; da=1 & rreg & wl=0; e2w [ regtsan=regan; savmod2=mode; ] { rreg = sext(e2w); } :moves.l e2l,rreg is opbig=0x0e & op67=2 & mode & regan; rreg & wl=0; e2l [ regtsan=regan; savmod2=mode; ] { rreg = e2l; } :muls.w eaw,reg9dn is (op=12 & reg9dn & op68=7)... & eaw { tmp1:4 = sext( reg9dn:2 ); tmp2:4 = sext(eaw); reg9dn = tmp1 * tmp2; resflags(reg9dn); CF=0; VF=0;} :mulu.w eaw,reg9dn is (op=12 & reg9dn & op68=3)... & eaw { tmp1:4 = zext( reg9dn:2 ); tmp2:4 = zext(eaw); reg9dn = tmp1 * tmp2; resflags(reg9dn); CF=0; VF=0; } mulsize: "s.l" is divsgn=1 { } mulsize: "u.l" is divsgn=0 { } submul: regdq is regdq & divsgn=1 & divsz=0 { regdq = glbdenom * regdq; resflags(regdq); CF=0; } submul: regdr-regdq is regdq & divsgn=1 & divsz=1 & regdr { tmp1:8 = sext(glbdenom); tmp2:8 = sext(regdq); local res = tmp1 * tmp2; regdq = res:4; regdr = res(4); resflags(res); CF=0; VF=0; } submul: regdq is regdq & divsgn=0 & divsz=0 { regdq = glbdenom * regdq; resflags(regdq); CF=0; } submul: regdr-regdq is regdq & divsgn=0 & divsz=1 & regdr { tmp1:8 = zext(glbdenom); tmp2:8 = zext(regdq); local res = tmp1 * tmp2; regdq = res:4; regdr = res(4); resflags(res); CF=0; VF=0; } :mul^mulsize e2l,submul is opbig=0x4c & op67=0 & $(DAT_ALTER_ADDR_MODES); submul & mulsize; e2l [ savmod2=savmod1; regtsan=regtfan; ] { glbdenom=e2l; build submul; } :nbcd eab is (opbig=0x48 & op67=0 & $(DAT_ALTER_ADDR_MODES))... & eab { local tmp = eab; CF = (tmp != 0) || (XF == 1); tmp = 0 - tmp - XF; eab = bcdAdjust(tmp); bcdflags(tmp); } # NB: For the neg insn the CF carry flag is not set like other insns, from the manual: # XF - Set the same as the carry bit. # NF - Set if the result is negative; cleared otherwise. # ZF - Set if the result is zero; cleared otherwise. # VF - Set if an overflow occurs; cleared otherwise. # CF - Cleared if the result is zero; set otherwise. macro negFlags(op1) { VF = sborrow(0, op1); } macro negResFlags(result) { NF = result s< 0; CF = result != 0; XF = CF; ZF = result == 0; } :neg.b eab is (opbig=0x44 & op67=0 & $(DAT_ALTER_ADDR_MODES))... & eab { o2:1=eab; negFlags(o2); o2 = -o2; eab=o2; negResFlags(o2); } :neg.w eaw is (opbig=0x44 & op67=1 & $(DAT_ALTER_ADDR_MODES))... & eaw { o2:2=eaw; negFlags(o2); o2 = -o2; eaw=o2; negResFlags(o2); } :neg.l eal is (opbig=0x44 & op67=2 & $(DAT_ALTER_ADDR_MODES))... & eal { o2:4=eal; negFlags(o2); o2 = -o2; eal=o2; negResFlags(o2); } # NB: For the negx insn the CF and ZF flags are not set like other insns, from the manual: # XF - Set the same as the carry bit. # NF - Set if the result is negative; cleared otherwise. # ZF - Cleared if the result is nonzero; unchanged otherwise. # VF - Set if an overflow occurs; cleared otherwise. # CF - Set if borrow occurs; otherwise. :negx.b eab is (opbig=0x40 & op67=0 & $(DAT_ALTER_ADDR_MODES))... & eab { local tmp = eab + XF; negxsubflags(tmp); tmp = -tmp; eab=tmp; extendedResultFlags(tmp); } :negx.w eaw is (opbig=0x40 & op67=1 & $(DAT_ALTER_ADDR_MODES))... & eaw { local tmp = eaw + zext(XF); negxsubflags(tmp); tmp = -tmp; eaw=tmp; extendedResultFlags(tmp); } :negx.l eal is (opbig=0x40 & op67=2 & $(DAT_ALTER_ADDR_MODES))... & eal { local tmp = eal + zext(XF); negxsubflags(tmp); tmp = -tmp; eal=tmp; extendedResultFlags(tmp); } :nop is opbig=0x4e & op37=14 & op02=1 { } :not.b eab is (opbig=0x46 & op67=0 & $(DAT_ALTER_ADDR_MODES))... & eab { local tmp = eab; logflags(); tmp = ~tmp; eab = tmp; resflags(tmp); } :not.w eaw is (opbig=0x46 & op67=1 & $(DAT_ALTER_ADDR_MODES))... & eaw { local tmp = eaw; logflags(); tmp = ~tmp; eaw = tmp; resflags(tmp); } :not.l eal is (opbig=0x46 & op67=2 & $(DAT_ALTER_ADDR_MODES))... & eal { local tmp = eal; logflags(); tmp = ~tmp; eal = tmp; resflags(tmp); } :or.b eab,reg9dnb is (op=8 & reg9dnb & op68=0 & $(DAT_ALTER_ADDR_MODES))... & eab { or(eab, reg9dnb); } :or.w eaw,reg9dnw is (op=8 & reg9dnw & op68=1 & $(DAT_ALTER_ADDR_MODES))... & eaw { or(eaw, reg9dnw); } :or.l eal,reg9dn is (op=8 & reg9dn & op68=2 & $(DAT_ALTER_ADDR_MODES))... & eal { or(eal, reg9dn); } :or.b reg9dnb,eab is (op=8 & reg9dnb & op68=4 & $(MEM_ALTER_ADDR_MODES))... & eab { or(reg9dnb, eab); } :or.w reg9dnw,eaw is (op=8 & reg9dnw & op68=5 & $(MEM_ALTER_ADDR_MODES))... & eaw { or(reg9dnw, eaw); } :or.l reg9dn,eal is (op=8 & reg9dn & op68=6 & $(MEM_ALTER_ADDR_MODES))... & eal { or(reg9dn, eal); } :ori.b const8,e2b is opbig=0 & op67=0 & $(DAT_ALTER_ADDR_MODES); const8; e2b [ savmod2=savmod1; regtsan=regtfan; ] { or(const8, e2b); } :ori.w const16,e2w is opbig=0 & op67=1 & $(DAT_ALTER_ADDR_MODES); const16; e2w [ savmod2=savmod1; regtsan=regtfan; ] { or(const16, e2w); } :ori.l const32,e2l is opbig=0 & op67=2 & $(DAT_ALTER_ADDR_MODES); const32; e2l [ savmod2=savmod1; regtsan=regtfan; ] { or(const32, e2l); } :ori const8,"CCR" is opbig=0 & op37=7 & op02=4; const8 { packflags(SR); SR=SR|zext(const8); unpackflags(SR); } :ori const16,SR is SR; opbig=0x00 & d8base=0x7c; const16 { packflags(SR); SR=SR|const16; unpackflags(SR); } :pack Tyw,Txw,const16 is op=8 & op48=20 & Txw & Tyw & rmbit=0; const16 { local value = (Tyw & 0x0F0F) + const16; Txw = (Txw & 0xFF00) | ((value & 0x0F00) >> 4) | (value & 0x000F); } :pack Tyw,Txb,const16 is op=8 & op48=20 & Tyw & Txb & rmbit=1; const16 { local value = (Tyw & 0x0F0F) + const16; local result:2 = ((value & 0x0F00) >> 4) | (value & 0x000F); Txb = result:1; } :pea eaptr is (opbig=0x48 & op67=1 & $(CTL_ADDR_MODES))... & eaptr { value:4 = eaptr; SP = SP-4; *SP = value; } @ifdef MC68040 :pflushn regPlus is opbig=0xf5 & op67=0 & op5=0 & op34=0 & regPlus unimpl :pflush regPlus is opbig=0xf5 & op67=0 & op5=0 & op34=1 & regPlus unimpl :pflushan is opbig=0xf5 & op67=0 & op5=0 & op34=2 & regan=0 unimpl :pflusha is opbig=0xf5 & op67=0 & op5=0 & op34=3 & regan=0 unimpl @endif # MC68040 @ifdef MC68030 :pflusha is opbig=0xf0 & op67=0 & mode=0 & regan=0; opx015=0x2400 unimpl FC: SFC is fc4=0 & fc3=0 & fc02=0 & SFC { export SFC; } FC: DFC is fc4=0 & fc3=0 & fc02=1 & DFC { export DFC; } FC: regdc is fc4=0 & fc3=1 & regdc { export regdc; } FC: "#"^fc03 is fc4=1 & fc3=0 & fc03 { export *[const]:4 fc03; } FCmask: "#"^fcmask is fcmask { export *[const]:1 fcmask; } :pflush FC,FCmask is opbig=0xf0 & op67=0 & mode=0 & regan=0; bigopx=0x30 & FCmask & FC unimpl :pflush FC,FCmask,e2l is opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); bigopx=0x38 & FCmask & FC; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :ploadr FC,e2l is opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx515=0x110 & FC; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :ploadw FC,e2l is opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx515=0x100 & FC; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmove.l TC,e2l is TC & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=2 & mregn=0 & rwx=1 & fbit=0 & d8=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmove.l e2l,TC is TC & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=2 & mregn=0 & rwx=0 & fbit=0 & d8=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmovefd.l e2l,TC is TC & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=2 & mregn=0 & rwx=0 & fbit=1 & d8=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmove.d SRP,e2d is SRP & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=2 & mregn=2 & rwx=1 & fbit=0 & d8=0; e2d [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmove.d e2d,SRP is SRP & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=2 & mregn=2 & rwx=0 & fbit=0 & d8=0; e2d [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmovefd.d e2d,SRP is SRP & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=2 & mregn=2 & rwx=0 & fbit=1 & d8=0; e2d [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmove.d CRP,e2d is CRP & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=2 & mregn=3 & rwx=1 & fbit=0 & d8=0; e2d [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmove.d e2d,CRP is CRP & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=2 & mregn=3 & rwx=0 & fbit=0 & d8=0; e2d [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmovefd.d e2d,CRP is CRP & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=2 & mregn=3 & rwx=0 & fbit=1 & d8=0; e2d [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmove.w MMUSR,e2w is MMUSR & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=3 & mregn=0 & rwx=1 & fbit=0 & d8=0; e2w [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmove.w e2w,MMUSR is MMUSR & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=3 & mregn=0 & rwx=0 & fbit=0 & d8=0; e2w [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmove.l TT0,e2l is TT0 & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=0 & mregn=2 & rwx=1 & fbit=0 & d8=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmove.l e2l,TT0 is TT0 & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=0 & mregn=2 & rwx=0 & fbit=0 & d8=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmovefd.l e2l,TT0 is TT0 & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=0 & mregn=2 & rwx=0 & fbit=1 & d8=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmove.l TT1,e2l is TT1 & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=0 & mregn=3 & rwx=1 & fbit=0 & d8=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmove.l e2l,TT1 is TT1 & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=0 & mregn=3 & rwx=0 & fbit=0 & d8=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :pmovefd.l e2l,TT1 is TT1 & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=0 & mregn=3 & rwx=0 & fbit=1 & d8=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl #TODO: MC68EC030 only - conflicts with MMUSR form above #:pmove.w ACUSR,e2w is ACUSR & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=3 & mregn=0 & rwx=1 & fbit=0 & d8=0; e2w [ savmod2=savmod1; regtsan=regtfan; ] unimpl #:pmove.w e2w,ACUSR is ACUSR & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=3 & mregn=0 & rwx=0 & fbit=0 & d8=0; e2w [ savmod2=savmod1; regtsan=regtfan; ] unimpl #TODO: MC68EC030 only - - conflicts with TTx form above #:pmove.l ACx,e2l is ACx & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=0 & mregn=? & rwx=1 & fbit=0 & d8=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl #:pmove.l e2l,ACx is ACx & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=0 & mregn=? & rwx=0 & fbit=0 & d8=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl #:pmovefd.l e2l,ACx is ACx & opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=0 & mregn=? & rwx=0 & fbit=1 & d8=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl @endif # MC68030 #TODO: PMOVE PMMU Register - MC68851 only #TODO: PRESTORE PMMU Register - MC68851 only #TODO: PSAVE PMMU Register - MC68851 only #TODO: PScc PMMU Register - MC68851 only @ifdef MC68030 ptestLevel: "#"^mregn is mregn { export *[const]:1 mregn; } :ptestr FC,e2l,ptestLevel is opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=4 & ptestLevel & rwx=1 & fbit=0 & aregx=0 & FC; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :ptestr FC,e2l,ptestLevel,aregx is opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=4 & ptestLevel & rwx=1 & fbit=1 & aregx & FC; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :ptestw FC,e2l,ptestLevel is opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=4 & ptestLevel & rwx=0 & fbit=0 & aregx=0 & FC; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl :ptestw FC,e2l,ptestLevel,aregx is opbig=0xf0 & op67=0 & $(CTL_ADDR_MODES); opx1315=4 & ptestLevel & rwx=0 & fbit=1 & aregx & FC; e2l [ savmod2=savmod1; regtsan=regtfan; ] unimpl @endif # MC68030 #TODO: PTEST FC, - MC68EX030 only #TODO: PTEST FC,,#level,(An) - MC68851 only @ifdef MC68040 :ptestr regPlus is opbig=0xf5 & op67=1 & op35=5 & regPlus unimpl :ptestw regPlus is opbig=0xf5 & op67=1 & op35=1 & regPlus unimpl @endif # MC68040 #TODO: PTRAPcc - MC68851 only #TODO: PVALID - MC68851 only :reset is d16=0x4e70 { reset(); } :rol.b cntreg,regdnb is op=14 & cntreg & op8=1 & op67=0 & op34=3 & regdnb { rotateLeft(cntreg, regdnb, 8); } :rol.w cntreg,regdnw is op=14 & cntreg & op8=1 & op67=1 & op34=3 & regdnw { rotateLeft(cntreg, regdnw, 16); } :rol.l cntreg,regdn is op=14 & cntreg & op8=1 & op67=2 & op34=3 & regdn { rotateLeft(cntreg, regdn, 32); } :rol eaw is (opbig=0xe7 & op67=3 & $(MEM_ALTER_ADDR_MODES)) ... & eaw { local value:2 = eaw; value = (value << 1) | (value >> 15); getbit(CF, value, 0); resflags(value); eaw = value; VF = 0; } :ror.b cntreg,regdnb is op=14 & cntreg & op8=0 & op67=0 & op34=3 & regdnb { rotateRight(cntreg, regdnb, 8); } :ror.w cntreg,regdnw is op=14 & cntreg & op8=0 & op67=1 & op34=3 & regdnw { rotateRight(cntreg, regdnw, 16); } :ror.l cntreg,regdn is op=14 & cntreg & op8=0 & op67=2 & op34=3 & regdn { rotateRight(cntreg, regdn, 32); } :ror eaw is (opbig=0xe6 & op67=3 & $(MEM_ALTER_ADDR_MODES)) ... & eaw { local value:2 = eaw; value = (value << 15) | (value >> 1); getbit(CF, value, 15); resflags(value); eaw = value; VF = 0; } :roxl.b cntreg,regdnb is op=14 & cntreg & op8=1 & op67=0 & op34=2 & regdnb { rotateLeftExtended(cntreg, regdnb, 8); } :roxl.w cntreg,regdnw is op=14 & cntreg & op8=1 & op67=1 & op34=2 & regdnw { rotateLeftExtended(cntreg, regdnw, 16); } :roxl.l cntreg,regdn is op=14 & cntreg & op8=1 & op67=2 & op34=2 & regdn { rotateLeftExtended(cntreg, regdn, 32); } :roxl eaw is (opbig=0xe5 & op67=3 & $(MEM_ALTER_ADDR_MODES)) ... & eaw { local value:2 = eaw; local xflag = (value & 0x8000) != 0; value = (value << 1) | zext(XF); resflags(value); eaw = value; VF = 0; XF = xflag; CF = XF; } :roxr.b cntreg,regdnb is op=14 & cntreg & op8=0 & op67=0 & op34=2 & regdnb { rotateRightExtended(cntreg, regdnb, 8); } :roxr.w cntreg,regdnw is op=14 & cntreg & op8=0 & op67=1 & op34=2 & regdnw { rotateRightExtended(cntreg, regdnw, 16); } :roxr.l cntreg,regdn is op=14 & cntreg & op8=0 & op67=2 & op34=2 & regdn { rotateRightExtended(cntreg, regdn, 32); } :roxr eaw is (opbig=0xe4 & op67=3 & $(MEM_ALTER_ADDR_MODES)) ... & eaw { local value:2 = eaw; local xflag = (value & 0x0001) != 0; value = (zext(XF) << 15) | (value >> 1); resflags(value); eaw = value; VF = 0; XF = xflag; CF = XF; } :rtd const16 is opbig=0x4e & op37=14 & op02=4; const16 { PC = *SP; SP = SP + 4 + zext(const16); return [PC]; } :rte is d16=0x4e73 { tmp:4 = 0; return [tmp]; } define pcodeop rtm; :rtm regdn is opbig=0x06 & op37=24 & regdn { PC = rtm(regdn); return [PC]; } :rtm regan is opbig=0x06 & op37=25 & regan { PC = rtm(regan); return [PC];} :rtr is opbig=0x4e & op37=14 & op02=7 { SR = *SP; SP = SP+2; PC = *SP; SP = SP+4; unpackflags(SR); return [PC]; } :rts is opbig=0x4e & op37=14 & op02=5 { PC = *SP; SP = SP+4; return [PC]; } :sbcd Tyb,Txb is op=8 & op48=16 & Txb & Tyb { CF = (Txb < Tyb) || ( (XF == 1) && (Txb == Tyb) ); Txb = Txb - Tyb - XF; Txb = bcdAdjust(Txb); bcdflags(Txb); } :s^cc eab is (op=5 & cc & op67=3 & $(DAT_ALTER_ADDR_MODES))... & eab { eab = -cc; } define pcodeop stop; :stop const16 is opbig=0x4e & d8base=0x72; const16 { SR = const16; unpackflags(SR); stop(); } :sub.b eab,reg9dnb is (op=9 & reg9dnb & op68=0)... & eab { sub(eab, reg9dnb); } :sub.w eaw,reg9dnw is (op=9 & reg9dnw & op68=1)... & eaw { sub(eaw, reg9dnw); } :sub.l eal,reg9dn is (op=9 & reg9dn & op68=2)... & eal { sub(eal, reg9dn); } :sub.b reg9dnb,eab is (op=9 & reg9dnb & op68=4 & $(MEM_ALTER_ADDR_MODES))... & eab { sub(reg9dnb, eab); } :sub.w reg9dnw,eaw is (op=9 & reg9dnw & op68=5 & $(MEM_ALTER_ADDR_MODES))... & eaw { sub(reg9dnw, eaw); } :sub.l reg9dn,eal is (op=9 & reg9dn & op68=6 & $(MEM_ALTER_ADDR_MODES))... & eal { sub(reg9dn, eal); } :suba.w eaw,reg9an is (op=9 & reg9an & op68=3)... & eaw { reg9an = reg9an - sext(eaw); } :suba.l eal,reg9an is (op=9 & reg9an & op68=7)... & eal { reg9an = reg9an - eal; } :subi.b const8,e2b is opbig=4 & op67=0 & $(DAT_ALTER_ADDR_MODES); const8; e2b [ savmod2=savmod1; regtsan=regtfan; ] { sub(const8, e2b); } :subi.w const16,e2w is opbig=4 & op67=1 & $(DAT_ALTER_ADDR_MODES); const16; e2w [ savmod2=savmod1; regtsan=regtfan; ] { sub(const16, e2w); } :subi.l const32,e2l is opbig=4 & op67=2 & $(DAT_ALTER_ADDR_MODES); const32; e2l [ savmod2=savmod1; regtsan=regtfan; ] { sub(const32, e2l); } :subq.b "#"^quick,eab is (op=5 & quick & op68=4)... & eab { sub(quick, eab); } :subq.w "#"^quick,eaw is (op=5 & quick & op68=5)... & eaw { sub(quick, eaw); } :subq.l "#"^quick,eal is (op=5 & quick & op68=6)... & eal { sub(quick, eal); } # special case for address register destination: condition codes not affected :subq.w "#"^quick,regan is op=5 & quick & op68=5 & mode=1 & regan { regan = regan - quick; } :subq.l "#"^quick,regan is op=5 & quick & op68=6 & mode=1 & regan { regan = regan - quick; } :subx.b Tyb,Txb is op=9 & op8=1 & op67=0 & op45=0 & Tyb & Txb { tmp0:1 = zext(XF); subxflags(Txb, Tyb); local tmp =tmp0+Tyb; Txb=Txb-tmp; extendedResultFlags(Txb); } :subx.w Tyw,Txw is op=9 & op8=1 & op67=1 & op45=0 & Tyw & Txw { tmp0:2 = zext(XF); subxflags(Txw, Tyw); local tmp =tmp0+Tyw; Txw=Txw-tmp; extendedResultFlags(Txw); } :subx.l Ty,Tx is op=9 & op8=1 & op67=2 & op45=0 & Ty & Tx { tmp0:4 = zext(XF); subxflags(Tx, Ty); local tmp =tmp0+Ty; Tx=Tx-tmp; extendedResultFlags(Tx); } :swap regdn is opbig=0x48 & op37=8 & regdn { logflags(); regdn = (regdn << 16) | (regdn>>16); resflags(regdn); } @ifndef COLDFIRE :tas eab is (opbig=0x4a & op67=3 & $(DAT_ALTER_ADDR_MODES))... & eab { logflags(); resflags(eab); eab = eab | 0x80; } @endif # COLDFIRE :trap "#"^op03 is opbig=0x4e & op67=1 & op45=0 & op03 { vector:1 = op03; __m68k_trap(vector); } :trap^cc is op=5 & cc & op37=31 & op02=4 { if (!cc) goto inst_next; SP = SP - 4; *:4 SP = inst_next; vector:1 = 7; __m68k_trap(vector); } :trap^cc^".w" const16 is op=5 & cc & op37=31 & op02=2; const16 { if (!cc) goto inst_next; SP = SP - 4; *:4 SP = inst_next; __m68k_trapv(); } :trap^cc^".l" const32 is op=5 & cc & op37=31 & op02=3; const32 { if (!cc) goto inst_next; SP = SP - 4; *:4 SP = inst_next; __m68k_trapv(); } :trapv is opbig=0x4e & op37=14 & op02=6 { if (!VF) goto inst_next; __m68k_trapv(); } :tst.b eab is (opbig=0x4a & op67=0)... & eab { logflags(); resflags(eab); } :tst.w eaw is (opbig=0x4a & op67=1)... & eaw { logflags(); resflags(eaw); } :tst.l eal is (opbig=0x4a & op67=2)... & eal { logflags(); resflags(eal); } @ifdef COLDFIRE :tas eab is (opbig=0x4a & op67=3 & $(MEM_ALTER_ADDR_MODES))... & eab { logflags(); resflags(eab); eab = eab | 0x80; } @endif # COLDFIRE :unlk regan is opbig=0x4e & op37=11 & regan { SP = regan; regan = *SP; SP = SP+4; } :unpk Tyw,Txw,const16 is op=8 & Txw & op48=24 & Tyw & rmbit=0; const16 { Txw = (Txw & 0xF0F0) | ((((Tyw & 0x00F0) << 4) | (Tyw & 0x000F)) + const16); } :unpk Tyb,Txw,const16 is op=8 & Tyb & op48=24 & Txw & rmbit=1; const16 { local source:2 = zext(Tyb); source = (((source & 0x00F0) << 4) | (source & 0x000F)) + const16; Txw = (Txw & 0xF0F0) | source; } # Floating Point Instructions # 68040 directly implements Floating Point instructions but requires Coprocessor ID be 001 @ifdef MC68040 @define FP_COP "copid=1" @define FP_FCOP "fcopid=1" @else @define FP_COP "epsilon" @define FP_FCOP "epsilon" @endif # Condition codes fcc: "eq" is fcode=0x01 { tmp:1 = $(Z_FP); clearflags_fp(); export tmp; } fcc: "ne" is fcode=0x0e { tmp:1 = !($(Z_FP)); clearflags_fp(); export tmp; } # note this is wrong in the manual fcc: "gt" is fcode=0x12 { tmp:1 = !($(NAN_FP) || $(Z_FP) || $(N_FP)); clearflags_fp(); export tmp; } fcc: "ngt" is fcode=0x1d { tmp:1 = $(NAN_FP) || $(Z_FP) || $(N_FP); clearflags_fp(); export tmp; } fcc: "ge" is fcode=0x13 { tmp:1 = $(Z_FP) || !($(NAN_FP) || $(N_FP)); clearflags_fp(); export tmp; } fcc: "nge" is fcode=0x1c { tmp:1 = $(NAN_FP) || ($(N_FP) && !$(Z_FP)); clearflags_fp(); export tmp; } fcc: "lt" is fcode=0x14 { tmp:1 = $(N_FP) && !($(NAN_FP) || $(Z_FP)); clearflags_fp(); export tmp; } fcc: "nlt" is fcode=0x1b { tmp:1 = $(NAN_FP) || ($(Z_FP) || !$(N_FP)); clearflags_fp(); export tmp; } fcc: "le" is fcode=0x15 { tmp:1 = $(Z_FP) || ($(N_FP) && !$(NAN_FP)); clearflags_fp(); export tmp; } fcc: "nle" is fcode=0x1a { tmp:1 = $(NAN_FP) || !($(N_FP) || $(Z_FP)); clearflags_fp(); export tmp; } fcc: "gl" is fcode=0x16 { tmp:1 = !($(NAN_FP) || $(Z_FP)); clearflags_fp(); export tmp; } fcc: "ngl" is fcode=0x19 { tmp:1 = $(NAN_FP) || $(Z_FP); clearflags_fp(); export tmp; } fcc: "gle" is fcode=0x17 { tmp:1 = $(NAN_FP); clearflags_fp(); export tmp; } fcc: "ngle" is fcode=0x18 { tmp:1 = $(NAN_FP); clearflags_fp(); export tmp; } fcc: "ogt" is fcode=0x02 { tmp:1 = !($(NAN_FP) || $(Z_FP) || $(N_FP)); clearflags_fp(); export tmp; } fcc: "ule" is fcode=0x0d { tmp:1 = $(NAN_FP) || $(Z_FP) || $(N_FP); clearflags_fp(); export tmp; } fcc: "oge" is fcode=0x03 { tmp:1 = $(Z_FP) || !($(NAN_FP) || $(N_FP)); clearflags_fp(); export tmp; } fcc: "ult" is fcode=0x0c { tmp:1 = $(NAN_FP) || ($(N_FP) && !$(Z_FP)); clearflags_fp(); export tmp; } fcc: "olt" is fcode=0x04 { tmp:1 = $(N_FP) && !($(NAN_FP) || $(Z_FP)); clearflags_fp(); export tmp; } fcc: "uge" is fcode=0x0b { tmp:1 = $(NAN_FP) || $(Z_FP) || $(N_FP); clearflags_fp(); export tmp; } fcc: "ole" is fcode=0x05 { tmp:1 = $(Z_FP) || ($(N_FP) && !$(NAN_FP)); clearflags_fp(); export tmp; } fcc: "ugt" is fcode=0x0a { tmp:1 = !$(NAN_FP) || !($(N_FP) || $(Z_FP)); clearflags_fp(); export tmp; } fcc: "ogl" is fcode=0x06 { tmp:1 = !($(NAN_FP) || $(Z_FP)); clearflags_fp(); export tmp; } fcc: "ueq" is fcode=0x09 { tmp:1 = $(NAN_FP) || $(Z_FP); clearflags_fp(); export tmp; } fcc: "or" is fcode=0x07 { tmp:1 = $(NAN_FP); clearflags_fp(); export tmp; } fcc: "un" is fcode=0x08 { tmp:1 = $(NAN_FP); clearflags_fp(); export tmp; } fcc: "f" is fcode=0x00 { export 0:1; } fcc: "t" is fcode=0x0f { export 1:1; } fcc: "sf" is fcode=0x10 { export 0:1; } fcc: "st" is fcode=0x1f { export 1:1; } fcc: "seq" is fcode=0x11 { tmp:1 = $(Z_FP); clearflags_fp(); export tmp; } fcc: "sne" is fcode=0x1e { tmp:1 = $(Z_FP); clearflags_fp(); export tmp; } @define FormatByteWordLongSimple "( ffmt=0 | ffmt=1 | ffmt=4 | ffmt=6 )" # The following constraint should be used when using fprec @define FPREC_BWL "fprec & ( ffmt=0 | ffmt=4 | ffmt=6 )" # Byte,Word,Long only @define FPREC_S "fprec & ffmt=1" # Single only @define FPREC_BWLS "fprec & ( ffmt=0 | ffmt=1 | ffmt=4 | ffmt=6 )" # Byte,Word,Long,Single only @define FPREC_DXP "fprec & ( ffmt=2 | ffmt=3 | ffmt=5)" # Double,Extended,Packed only @define FPREC_XP "fprec & ( ffmt=2 | ffmt=3)" # Extended,Packed only @define FPREC_DX "fprec & ( ffmt=2 | ffmt=5)" # Double,Extended only @define FPREC_X "fprec & ffmt=2" # Extended only @define FPREC_D "fprec & ffmt=5" # Double only @define FPREC_P "fprec & ffmt=3" # Packed only @define FPREC_Pd "fprec & ffmt=7" # Packed-dynamic only fprec: "l" is ffmt=0 {} fprec: "s" is ffmt=1 {} fprec: "x" is ffmt=2 {} fprec: "p" is ffmt=3 {} fprec: "w" is ffmt=4 {} fprec: "d" is ffmt=5 {} fprec: "b" is ffmt=6 {} fprec: "p" is ffmt=7 {} # 0 = long # 4 = word # 6 = byte # 1 = Single precision # 2 = Extended-Precision real # 3 = Packed-decimal real # 5 = Double-Precision real f_mem: e2l is ffmt=0; e2l { val:4 = e2l; tmp:10 = int2float(val); export tmp; } f_mem: e2w is ffmt=4; e2w { val:2 = e2w; tmp:10 = int2float(val); export tmp; } f_mem: e2b is ffmt=6; e2b { val:1 = e2b; tmp:10 = int2float(val); export tmp; } f_mem: e2l is ffmt=1; e2l { tmp:10 = float2float(e2l); export tmp; } f_mem: e2x is ffmt=2; e2x { tmp:10 = float2float(e2x); export tmp; } f_mem: e2x is ffmt=3; e2x { tmp:10 = float2float(e2x); export tmp; } f_mem: e2d is ffmt=5; e2d { tmp:10 = float2float(e2d); export tmp; } :fabs.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x18) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = abs(f_mem); } :fabs fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x18 { fdst = abs(fsrc); } @ifdef MC68040 fabsrnd: "s" is fdst & fopmode=0x58 { tmp:4 = float2float(fdst); fdst = float2float(tmp); } fabsrnd: "d" is fdst & fopmode=0x5c { tmp:8 = float2float(fdst); fdst = float2float(tmp); } :f^fabsrnd^"abs."^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fabsrnd & fprec & (fopmode=0x58 | fopmode=0x5c)) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = abs(f_mem); build fabsrnd; } :f^fabsrnd^"abs" fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fabsrnd & (fopmode=0x58 | fopmode=0x5c) { fdst = abs(fsrc); build fabsrnd; } @endif # MC68040 :facos.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x1c) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = acos(f_mem);} :facos fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x1c { fdst = acos(fsrc); } :fadd.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x22) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fdst f+ f_mem; } :fadd fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x22 { fdst = fdst f+ fsrc; } @ifdef MC68040 faddrnd: "s" is fdst & fopmode=0x62 { tmp:4 = float2float(fdst); fdst = float2float(tmp); } faddrnd: "d" is fdst & fopmode=0x66 { tmp:8 = float2float(fdst); fdst = float2float(tmp); } :f^faddrnd^"add."^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & faddrnd & fprec & (fopmode=0x62 | fopmode=0x66)) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fdst f+ f_mem; build faddrnd; } :f^faddrnd^"add" fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & faddrnd & (fopmode=0x62 | fopmode=0x66) { fdst = fdst f+ fsrc; build faddrnd; } @endif # MC68040 :fasin.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x0c) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = asin(f_mem);} :fasin fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x0c { fdst = asin(fsrc);} :fatan.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x0a) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = atan(f_mem);} :fatan fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x0a { fdst = atan(fsrc);} :fatanh.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x0d) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = tanh(f_mem);} :fatanh fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x0d { fdst = tanh(fsrc);} :fb^fcc^".w" addr16 is fop=15 & $(FP_FCOP) & f0808=0 & f0707=1 & fsize=0 & fcc; addr16 { if (fcc) goto addr16; } :fb^fcc^".l" addr32 is fop=15 & $(FP_FCOP) & f0808=0 & f0707=1 & fsize=1 & fcc; addr32 { if (fcc) goto addr32; } :fcmp.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x38) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { local result = fdst f- f_mem; resflags_fp(result); } :fcmp fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x38 { local result=fdst f- fsrc; resflags_fp(result); } :fcos.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x1d) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = cos(f_mem);} :fcos fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x1d { fdst = cos(fsrc);} :fcosh.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x19) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = cosh(f_mem);} :fcosh fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x19 { fdst = cos(fsrc);} :fdb^fcc fcnt, addr16 is fop=15 & $(FP_FCOP) & f0308=9 & fcnt; f0615=0 & fcc; addr16 { if (fcc) goto inst_next; fcnt = fcnt - 1; local tst = (fcnt == -1); if (!tst) goto addr16; } :fdiv.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x20) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fdst f/ f_mem;} :fdiv fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x20 { fdst = fdst f/ fsrc;} @ifdef MC68040 fdivrnd: "s" is fdst & fopmode=0x60 { tmp:4 = float2float(fdst); fdst = float2float(tmp); } fdivrnd: "d" is fdst & fopmode=0x64 { tmp:8 = float2float(fdst); fdst = float2float(tmp); } :f^fdivrnd^"div."^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fdivrnd & fprec & (fopmode=0x60 | fopmode=0x64)) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fdst f/ f_mem; build fdivrnd; } :f^fdivrnd^"div" fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fdivrnd & (fopmode=0x60 | fopmode=0x64) { fdst = fdst f/ fsrc; build fdivrnd; } @endif # MC68040 :fetox.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec& fopmode=0x10) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fetox(f_mem); } :fetox fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x10 { fdst = fetox(fsrc); } :fetoxm1.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x08) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fetoxm1(f_mem); } :fetoxm1 fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x08 { fdst = fetoxm1(fsrc); } :fgetexp.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst &fprec & fopmode=0x1e) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fgetexp(f_mem); } :fgetexp fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x1e { fdst = fgetexp(fsrc); } :fgetman.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x1f) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fgetman(f_mem); } :fgetman fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x1f { fdst = fgetman(fsrc); } :fint.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x01) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fint(f_mem, FPCR); } :fint fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x01 { fdst = fint(fsrc); } :fintrz.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x03) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { tmp:8 = trunc(f_mem); fdst = int2float(tmp); } :fintrz fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x03 { tmp:8 = trunc(fsrc); fdst = int2float(tmp); } :flog10.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x15) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = flog10(f_mem); } :flog10 fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x15 { fdst = flog10(fsrc); } :flog2.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x16) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = flog2(f_mem); } :flog2 fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x16 { fdst = flog2(fsrc); } :flogn.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x14) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = flogn(f_mem); } :flogn fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x14 { fdst = flogn(fsrc); } :flognp1.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x06) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = flognp1(f_mem); } :flognp1 fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x06 { fdst = flognp1(fsrc); } :fmod.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0& fdst & fprec & fopmode=0x21) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fmod(f_mem); } :fmod fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x21 { fdst = fmod(fsrc); } :fmove.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x00) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = f_mem; resflags_fp(fdst); } :fmove fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x00 { fdst = fsrc; resflags_fp(fdst); } @ifdef MC68040 fmovernd: "s" is fdst & fopmode=0x40 { tmp:4 = float2float(fdst); fdst = float2float(tmp); } fmovernd: "d" is fdst & fopmode=0x44 { tmp:8 = float2float(fdst); fdst = float2float(tmp); } :f^fmovernd^"move."^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fmovernd & fprec & (fopmode=0x40 | fopmode=0x44)) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = f_mem; build fmovernd; } :f^fmovernd^"move" fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fmovernd & (fopmode=0x40 | fopmode=0x44) { fdst = fsrc; build fmovernd; } @endif # MC68040 #TODO: Documented decoding (w/ coprocess id in bits 10-12) conflicts with ASL instruction and differs from Instruction Format Summary # Convert float in fdst to an int and then move to byte :fmove.b fdst, e2b is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); frm=1 & f1315=3 & fdst & fkfacreg & ffmt=6; e2b [ savmod2=savmod1; regtsan=regtfan; ] { e2b = trunc(fdst); } # Convert float in fdst to an int and then move to word 16-bits :fmove.w fdst, e2w is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); frm=1 & f1315=3 & fdst & fkfacreg & ffmt=4; e2w [ savmod2=savmod1; regtsan=regtfan; ] { e2w = trunc(fdst); } # Convert float in fdst to an int and then move to long 32-bits :fmove.l fdst, e2l is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); frm=1 & f1315=3 & fdst & fkfacreg & ffmt=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { e2l = trunc(fdst); } # destination is single float (32-bits) :fmove.s fdst, e2l is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); frm=1 & f1315=3 & fdst & fkfacreg & ffmt=1; e2l [ savmod2=savmod1; regtsan=regtfan; ] { e2l = float2float(fdst); resflags_fp(e2l); } :fmove.^fprec fdst, e2x is op=15 & $(FP_COP) & op68=0 & $(MEM_ALTER_ADDR_MODES); frm=1 & f1315=3 & fdst & fkfacreg & $(FPREC_X); e2x [ savmod2=savmod1; regtsan=regtfan; ] { e2x = float2float(fdst); resflags_fp(e2x); } # Double float (64-bits) :fmove.^fprec fdst, e2d is op=15 & $(FP_COP) & op68=0 & $(MEM_ALTER_ADDR_MODES); frm=1 & f1315=3 & fdst & fkfacreg & $(FPREC_D); e2d [ savmod2=savmod1; regtsan=regtfan; ] { e2d = float2float(fdst); resflags_fp(e2d); } kfact: {"#"fkfactor} is fkfactor & $(FPREC_P) { local tmp:4 = fkfactor; export *[const]:4 tmp; } kfact: {fkfacreg} is fkfacreg & $(FPREC_Pd) { export fkfacreg; } :fmove.p fdst, e2l kfact is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); frm=1 & f1315=3 & fdst & kfact & $(FPREC_P); e2l [ savmod2=savmod1; regtsan=regtfan; ] { e2l = kfactor(fdst, kfact); } :fmove.p fdst, e2l kfact is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); frm=1 & f1315=3 & fdst & kfact & $(FPREC_Pd); e2l [ savmod2=savmod1; regtsan=regtfan; ] { e2l = kfactor(fdst, kfact); } #Special case for FMOVEM.L and must occur before it within this file :fmove.l e2l, FPCR is op=15 & $(FP_COP) & $(DAT_ALTER_ADDR_MODES) & op68=0 & FPCR; f1415=2 & fdr=0 & f1012=4 & f0009=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { FPCR = e2l; } :fmove.l FPCR, e2l is op=15 & $(FP_COP) & $(DAT_ALTER_ADDR_MODES) & op68=0 & FPCR; f1415=2 & fdr=1 & f1012=4 & f0009=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { e2l = FPCR; } :fmove.l e2l, FPSR is op=15 & $(FP_COP) & $(DAT_ALTER_ADDR_MODES) & op68=0 & FPSR; f1415=2 & fdr=0 & f1012=2 & f0009=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { FPSR = e2l; } :fmove.l FPSR, e2l is op=15 & $(FP_COP) & $(DAT_ALTER_ADDR_MODES) & op68=0 & FPSR; f1415=2 & fdr=1 & f1012=2 & f0009=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { e2l = FPSR; } :fmove.l e2l, FPIAR is op=15 & $(FP_COP) & op68=0 & FPIAR; f1415=2 & fdr=0 & f1012=1 & f0009=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { FPIAR = e2l; } :fmove.l FPIAR, e2l is op=15 & $(FP_COP) & op68=0 & FPIAR; f1415=2 & fdr=1 & f1012=1 & f0009=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { e2l = FPIAR; } # # TODO: this table should contain all the rom constants romconst: is fromoffset=0x00 { tmp1:8 = 0x400921FB4D12D84A:8; tmp:10 = float2float(tmp1); export tmp; } # pi=3.14... romconst: is fromoffset=0x0f { tmp1:8 = 0x0:8; tmp:10 = int2float(tmp1); export tmp; } romconst: is fromoffset=0x32 { tmp1:8 = 0x01:8; tmp:10 = int2float(tmp1); export tmp; } romconst: is fromoffset=0x33 { tmp1:8 = 10:8; tmp:10 = int2float(tmp1); export tmp; } romconst: is fromoffset=0x34 { tmp1:8 = 100:8; tmp:10 = int2float(tmp1); export tmp; } romconst: is fromoffset=0x35 { tmp1:8 = 10000:8; tmp:10 = int2float(tmp1); export tmp; } romconst: is fromoffset=0x36 { tmp1:8 = 100000000:8; tmp:10 = int2float(tmp1); export tmp; } romconst: is fromoffset { tmp1:8 = 0x0:8; tmp:10 = int2float(tmp1); export tmp; } :fmovecr.x "#"^fromoffset, fdst is op=15 & $(FP_COP) & op08=0; f1015=0x17 & fdst & fromoffset & romconst { fdst = romconst; } # Memory to Floating point register, forward direction m2fpF7: FP0 is FP0 & frlist7=1 { FP0 = *movemptr; movemptr = movemptr + 12; } m2fpF7: is frlist7=0 { } m2fpF6: m2fpF7" "FP1 is FP1 & frlist6=1 & m2fpF7 { FP1 = *movemptr; movemptr = movemptr + 12; } m2fpF6: m2fpF7 is frlist6=0 & m2fpF7 { } m2fpF5: m2fpF6" "FP2 is FP2 & frlist5=1 & m2fpF6 { FP2 = *movemptr; movemptr = movemptr + 12; } m2fpF5: m2fpF6 is frlist5=0 & m2fpF6 { } m2fpF4: m2fpF5" "FP3 is FP3 & frlist4=1 & m2fpF5 { FP3 = *movemptr; movemptr = movemptr + 12; } m2fpF4: m2fpF5 is frlist4=0 & m2fpF5 { } m2fpF3: m2fpF4" "FP4 is FP4 & frlist3=1 & m2fpF4 { FP4 = *movemptr; movemptr = movemptr + 12; } m2fpF3: m2fpF4 is frlist3=0 & m2fpF4 { } m2fpF2: m2fpF3" "FP5 is FP5 & frlist2=1 & m2fpF3 { FP5 = *movemptr; movemptr = movemptr + 12; } m2fpF2: m2fpF3 is frlist2=0 & m2fpF3 { } m2fpF1: m2fpF2" "FP6 is FP6 & frlist1=1 & m2fpF2 { FP6 = *movemptr; movemptr = movemptr + 12; } m2fpF1: m2fpF2 is frlist1=0 & m2fpF2 { } m2fpF0: { m2fpF1" "FP7 } is FP7 & frlist0=1 & m2fpF1 { FP7 = *movemptr; movemptr = movemptr + 12; } m2fpF0: { m2fpF1 } is frlist0=0 & m2fpF1 { } # Memory to Floating point register, reverse direction m2fpR7: FP7 is FP7 & frlist7=1 { movemptr = movemptr - 12; FP7 = *movemptr; } m2fpR7: is frlist7=0 { } m2fpR6: m2fpR7" "FP6 is FP6 & frlist6=1 & m2fpR7 { movemptr = movemptr - 12; FP6 = *movemptr; } m2fpR6: m2fpR7 is frlist6=0 & m2fpR7 { } m2fpR5: m2fpR6" "FP5 is FP5 & frlist5=1 & m2fpR6 { movemptr = movemptr - 12; FP5 = *movemptr; } m2fpR5: m2fpR6 is frlist5=0 & m2fpR6 { } m2fpR4: m2fpR5" "FP4 is FP4 & frlist4=1 & m2fpR5 { movemptr = movemptr - 12; FP4 = *movemptr; } m2fpR4: m2fpR5 is frlist4=0 & m2fpR5 { } m2fpR3: m2fpR4" "FP3 is FP3 & frlist3=1 & m2fpR4 { movemptr = movemptr - 12; FP3 = *movemptr; } m2fpR3: m2fpR4 is frlist3=0 & m2fpR4 { } m2fpR2: m2fpR3" "FP2 is FP2 & frlist2=1 & m2fpR3 { movemptr = movemptr - 12; FP2 = *movemptr; } m2fpR2: m2fpR3 is frlist2=0 & m2fpR3 { } m2fpR1: m2fpR2" "FP1 is FP1 & frlist1=1 & m2fpR2 { movemptr = movemptr - 12; FP1 = *movemptr; } m2fpR1: m2fpR2 is frlist1=0 & m2fpR2 { } m2fpR0: { m2fpR1" "FP0 } is FP0 & frlist0=1 & m2fpR1 { movemptr = movemptr - 12; FP0 = *movemptr; } m2fpR0: { m2fpR1 } is frlist0=0 & m2fpR1 { } # Floating point register to Memory, forward direction fp2mF7: FP0 is FP0 & frlist7=1 { *movemptr = FP0; movemptr = movemptr + 12; } fp2mF7: is frlist7=0 { } fp2mF6: fp2mF7" "FP1 is FP1 & frlist6=1 & fp2mF7 { *movemptr = FP1; movemptr = movemptr + 12; } fp2mF6: fp2mF7 is frlist6=0 & fp2mF7 { } fp2mF5: fp2mF6" "FP2 is FP2 & frlist5=1 & fp2mF6 { *movemptr = FP2; movemptr = movemptr + 12; } fp2mF5: fp2mF6 is frlist5=0 & fp2mF6 { } fp2mF4: fp2mF5" "FP3 is FP3 & frlist4=1 & fp2mF5 { *movemptr = FP3; movemptr = movemptr + 12; } fp2mF4: fp2mF5 is frlist4=0 & fp2mF5 { } fp2mF3: fp2mF4" "FP4 is FP4 & frlist3=1 & fp2mF4 { *movemptr = FP4; movemptr = movemptr + 12; } fp2mF3: fp2mF4 is frlist3=0 & fp2mF4 { } fp2mF2: fp2mF3" "FP5 is FP5 & frlist2=1 & fp2mF3 { *movemptr = FP5; movemptr = movemptr + 12; } fp2mF2: fp2mF3 is frlist2=0 & fp2mF3 { } fp2mF1: fp2mF2" "FP6 is FP6 & frlist1=1 & fp2mF2 { *movemptr = FP6; movemptr = movemptr + 12; } fp2mF1: fp2mF2 is frlist1=0 & fp2mF2 { } fp2mF0: { fp2mF1" "FP7 } is FP7 & frlist0=1 & fp2mF1 { *movemptr = FP7; movemptr = movemptr + 12; } fp2mF0: { fp2mF1 } is frlist0=0 & fp2mF1 { } # Floating point register to Memory, reverse direction fp2mR7: FP7 is FP7 & frlist7=1 { movemptr = movemptr - 12; *movemptr = FP7; } fp2mR7: is frlist7=0 { } fp2mR6: fp2mR7" "FP6 is FP6 & frlist6=1 & fp2mR7 { movemptr = movemptr - 12; *movemptr = FP6; } fp2mR6: fp2mR7 is frlist6=0 & fp2mR7 { } fp2mR5: fp2mR6" "FP5 is FP5 & frlist5=1 & fp2mR6 { movemptr = movemptr - 12; *movemptr = FP5; } fp2mR5: fp2mR6 is frlist5=0 & fp2mR6 { } fp2mR4: fp2mR5" "FP4 is FP4 & frlist4=1 & fp2mR5 { movemptr = movemptr - 12; *movemptr = FP4; } fp2mR4: fp2mR5 is frlist4=0 & fp2mR5 { } fp2mR3: fp2mR4" "FP3 is FP3 & frlist3=1 & fp2mR4 { movemptr = movemptr - 12; *movemptr = FP3; } fp2mR3: fp2mR4 is frlist3=0 & fp2mR4 { } fp2mR2: fp2mR3" "FP2 is FP2 & frlist2=1 & fp2mR3 { movemptr = movemptr - 12; *movemptr = FP2; } fp2mR2: fp2mR3 is frlist2=0 & fp2mR3 { } fp2mR1: fp2mR2" "FP1 is FP1 & frlist1=1 & fp2mR2 { movemptr = movemptr - 12; *movemptr = FP1; } fp2mR1: fp2mR2 is frlist1=0 & fp2mR2 { } fp2mR0: { fp2mR1" "FP0 } is FP0 & frlist0=1 & fp2mR1 { movemptr = movemptr - 12; *movemptr = FP0; } fp2mR0: { fp2mR1 } is frlist0=0 & fp2mR1 { } # NB- when doing preincrement or postincrement modes, the movemptr that is set in e2x is used as the starting address for the move. # Then at completion of the move, the reg is set to the movemptr. # Note that movem (non-floating point) does this slightly differently) # # Not a predecrement or postincrement :fmovem.x fp2mF0, e2x is op=15 & $(FP_COP) & op68=0 & (mode=2 | mode=5 | mode=6 | mode=7); f1415=3 & fdr=1 & f0810=0 & fp2mF0 & flmode_t=0 & flmode_m=1; e2x [ savmod2=savmod1; regtsan=regtfan; ] { build fp2mF0; } # When mode=3 it's a postincrement :fmovem.x fp2mF0, e2x is regan & op=15 & $(FP_COP) & op68=0 & mode=3; f1415=3 & fdr=1 & f0810=0 & fp2mF0 & flmode_t=0 & flmode_m=1; e2x [ savmod2=savmod1; regtsan=regtfan; ] { build fp2mF0; regan = movemptr; } # When mode=4 it's a predecrement, and also must update the address register with the new address :fmovem.x fp2mR0, e2x is regan & op=15 & $(FP_COP) & op68=0 & mode=4; f1415=3 & fdr=1 & f0810=0 & fp2mR0 & flmode_t=0 & flmode_m=0; e2x [ savmod2=savmod1; regtsan=regtfan; ] { build fp2mR0; regan = movemptr; } # Not a predecrement or postincrement :fmovem.x e2x, m2fpF0 is op=15 & $(FP_COP) & op68=0 & (mode=2 | mode=5 | mode=6 | mode=7); f1415=3 & fdr=0 & f0810=0 & m2fpF0 & flmode_t=0 & flmode_m=1; e2x [ savmod2=savmod1; regtsan=regtfan; ] { build m2fpF0; } # When mode=3 it's a postincrement :fmovem.x e2x, m2fpF0 is regan & op=15 & $(FP_COP) & op68=0 & mode=3; f1415=3 & fdr=0 & f0810=0 & m2fpF0 & flmode_t=0 & flmode_m=1; e2x [ savmod2=savmod1; regtsan=regtfan; ] { build m2fpF0; regan = movemptr; } # When mode=4 it's a predecrement, and also must update the address register with the new address :fmovem.x e2x, m2fpR0 is regan & op=15 & $(FP_COP) & op68=0 & mode=4; f1415=3 & fdr=0 & f0810=0 & m2fpR0 & flmode_t=0 & flmode_m=0; e2x [ savmod2=savmod1; regtsan=regtfan; ] { build m2fpR0; regan = movemptr; } define pcodeop fmovem; # TODO: Pcode for dynamic register mask is PITA :fmovem.x fldynreg, e2l is op=15 & $(FP_COP) & op68=0 & $(POSTINC_CTL_ADDR_MODES); f1415=3 & fdr=1 & f0810=0 & fldynreg & flmode_t=1 & flmode_m=1; e2l [ savmod2=savmod1; regtsan=regtfan; ] { fmovem(e2l,fldynreg); } :fmovem.x fldynreg, e2l is op=15 & $(FP_COP) & op68=0 & $(PREDEC_CTL_ADDR_MODES); f1415=3 & fdr=1 & f0810=0 & fldynreg & flmode_t=1 & flmode_m=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { fmovem(e2l,fldynreg); } :fmovem.x e2l, fldynreg is op=15 & $(FP_COP) & op68=0 & $(POSTINC_CTL_ADDR_MODES); f1415=3 & fdr=0 & f0810=0 & fldynreg & flmode_t=1 & flmode_m=1; e2l [ savmod2=savmod1; regtsan=regtfan; ] { fmovem(e2l,fldynreg); } :fmovem.x e2l, fldynreg is op=15 & $(FP_COP) & op68=0 & mode=4; f1415=3 & fdr=0 & f0810=0 & fldynreg & flmode_t=1 & flmode_m=0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { fmovem(e2l,fldynreg); } # Memory to Floating point control register m2fpC2: FPCR is FPCR & f12=1 { FPCR = *movemptr; movemptr = movemptr + 12; } m2fpC2: is f12=0 { } m2fpC1: m2fpC2" "FPSR is FPSR & f11=1 & m2fpC2 { FPSR = *movemptr; movemptr = movemptr + 12; } m2fpC1: m2fpC2 is f11=0 & m2fpC2 { } m2fpC0: { m2fpC1" "FPIAR } is FPIAR & f10=1 & m2fpC1 { FPIAR = *movemptr; movemptr = movemptr + 12; } m2fpC0: { m2fpC1 } is f10=0 & m2fpC1 { } # Floating point control register to Memory fp2mC2: FPCR is FPCR & f12=1 { *movemptr = FPCR; movemptr = movemptr + 12; } fp2mC2: is f12=0 { } fp2mC1: fp2mC2" "FPSR is FPSR & f11=1 & fp2mC2 { *movemptr = FPSR; movemptr = movemptr + 12; } fp2mC1: fp2mC2 is f11=0 & fp2mC2 { } fp2mC0: { fp2mC1" "FPIAR } is FPIAR & f10=1 & fp2mC1 { *movemptr = FPIAR; movemptr = movemptr + 12; } fp2mC0: { fp2mC1 } is f10=0 & fp2mC1 { } :fmovem.l fp2mC0, e2l is op=15 & $(FP_COP) & $(MEM_ALTER_ADDR_MODES) & op68=0; f1315=5 & f0009=0 & fp2mC0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { movemptr = e2l; build fp2mC0; } :fmovem.l e2l, m2fpC0 is op=15 & $(FP_COP) & $(MEM_ALTER_ADDR_MODES) & op68=0; f1315=4 & f0009=0 & m2fpC0; e2l [ savmod2=savmod1; regtsan=regtfan; ] { movemptr = e2l; build m2fpC0; } :fmul.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x23) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fdst f* f_mem; } :fmul fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x23 {fdst = fdst f* fsrc; } @ifdef MC68040 fmulrnd: "s" is fdst & fopmode=0x63 { tmp:4 = float2float(fdst); fdst = float2float(tmp); } fmulrnd: "d" is fdst & fopmode=0x67 { tmp:8 = float2float(fdst); fdst = float2float(tmp); } :f^fmulrnd^"mul."^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fmulrnd & (fopmode=0x63 | fopmode=0x67)) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fdst f* f_mem; build fmulrnd; } :f^fmulrnd^"mul" fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fmulrnd & (fopmode=0x63 | fopmode=0x67) {fdst = fdst f* fsrc; build fmulrnd; } @endif # MC68040 :fneg.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x1a) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = f- f_mem; } :fneg fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x1a { fdst = f- fsrc; } @ifdef MC68040 fnegrnd: "s" is fdst & fopmode=0x5a { tmp:4 = float2float(fdst); fdst = float2float(tmp); } fnegrnd: "d" is fdst & fopmode=0x5e { tmp:8 = float2float(fdst); fdst = float2float(tmp); } :f^fnegrnd^"neg."^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fnegrnd & (fopmode=0x5a | fopmode=0x5e)) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = f- f_mem; build fnegrnd; } :f^fnegrnd^"neg" fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fnegrnd & (fopmode=0x5a | fopmode=0x5e) { fdst = f- fsrc; build fnegrnd; } @endif # MC68040 :fnop is fop=15 & $(FP_FCOP) & f0008=0x080; fword=0 { } :frem.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x25) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = frem(f_mem); } :frem fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x25 { fdst = frem(fsrc); } :frestore eal is (op=15 & $(FP_COP) & op68=5 & $(POSTINC_CTL_ADDR_MODES))... & eal { restoreFPUStateFrame(eal); } :fsave eal is (op=15 & $(FP_COP) & op68=4 & $(PREDEC_CTL_ADDR_MODES))... & eal { saveFPUStateFrame(eal); } :fscale.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x26) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fscale(f_mem); } :fscale fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x26 { fdst = fscale(fsrc); } # Need to set the destination to all 1s if the condition is true, else set to 0 # :fs^fcc e2b is op=15 & $(FP_COP) & op68=1 & $(DAT_ALTER_ADDR_MODES); f0615=0 & fcc; e2b [ savmod2=savmod1; regtsan=regtfan; ] { e2b = fcc * 0xff; } :fsgldiv.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x24) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { tmp:4 = float2float(fdst f/ f_mem); fdst = float2float(tmp); } :fsgldiv fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x24 { tmp:4 = float2float(fdst f/ fsrc); fdst = float2float(tmp); } # Floating point single precision multiply # TODO: set condition flags :fsglmul.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x27) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] {tmp:4 = float2float(fdst f* f_mem); fdst = float2float(tmp); } :fsglmul fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x27 {tmp:4 = float2float(fdst f* fsrc); fdst = float2float(tmp); } :fsin.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x0e) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = sin(f_mem); } :fsin fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x0e { fdst = sin(fsrc); } :fsincos.^fprec f_mem, fdcos, fdsin is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdcos & fdsin & fprec & f0306=6) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { tmp:10 = f_mem; fdsin = sin(tmp); fdcos = cos(tmp); } :fsincos.x fsrc, fdcos, fdsin is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdcos & fdsin & f0306=6 { tmp:10 = fsrc; fdsin = sin(tmp); fdcos = cos(tmp); } :fsinh.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x02) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = sinh(f_mem); } :fsinh fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x02 { fdst = sinh(fsrc); } :fsqrt.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x04) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = sqrt(f_mem); } :fsqrt.x fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x04 { fdst = sqrt(fsrc); } @ifdef MC68040 fsqrtrnd: "s" is fdst & fopmode=0x41 { tmp:4 = float2float(fdst); fdst = float2float(tmp); } fsqrtrnd: "d" is fdst & fopmode=0x45 { tmp:8 = float2float(fdst); fdst = float2float(tmp); } :f^fsqrtrnd^"sqrt."^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fsqrtrnd & (fopmode=0x41 | fopmode=0x45)) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = sqrt(f_mem); build fsqrtrnd; } :f^fsqrtrnd^"sqrt.x" fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fsqrtrnd & (fopmode=0x41 | fopmode=0x45) { fdst = sqrt(fsrc); build fsqrtrnd; } @endif # MC68040 :fsub.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x28) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fdst f- f_mem; } :fsub.x fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x28 { fdst = fdst f- fsrc; } @ifdef MC68040 fsubrnd: "s" is fdst & fopmode=0x68 { tmp:4 = float2float(fdst); fdst = float2float(tmp); } fsubrnd: "d" is fdst & fopmode=0x6c { tmp:4 = float2float(fdst); fdst = float2float(tmp); } :f^fsubrnd^"sub."^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fsubrnd & fprec & (fopmode=0x68 | fopmode=0x6c)) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = fdst f- f_mem; build fsubrnd; } :f^fsubrnd^"sub.x" fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fsubrnd & (fopmode=0x68 | fopmode=0x6c) { fdst = fdst f- fsrc; build fsubrnd; } @endif # MC68040 :ftan.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x0f) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = tan(f_mem); } :ftan.x fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x0f { fdst = tan(fsrc); } :ftanh.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x09) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = tanh(f_mem); } :ftanh.x fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x09 { fdst = tanh(fsrc); } :ftentox.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x12) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = ftentox(f_mem); } :ftentox.x fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x12 { fdst = ftentox(fsrc); } :ftrap^fcc const16 is fop=15 & $(FP_FCOP) & f0308=0xf & fmode=2; f0615=0 & fcc; const16 { if (!fcc) goto inst_next; ftrap(const16); } :ftrap^fcc const32 is fop=15 & $(FP_FCOP) & f0308=0xf & fmode=3; f0615=0 & fcc; const32 { if (!fcc) goto inst_next; ftrap(const32); } :ftrap^fcc is fop=15 & $(FP_FCOP) & f0308=0xf & fmode=4; f0615=0 & fcc { if (!fcc) goto inst_next; ftrap(); } :ftst.^fprec f_mem is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x3a) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { tmp:10 = f_mem; resflags_fp(tmp); } :ftst.x fsrc is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x3a { tmp:10 = fsrc; resflags_fp(tmp); } :ftwotox.^fprec f_mem, fdst is op=15 & $(FP_COP) & op68=0 & $(DAT_ALTER_ADDR_MODES); (frm=1 & f1515=0 & f1313=0 & fdst & fprec & fopmode=0x11) ... & f_mem [ savmod2=savmod1; regtsan=regtfan; ] { fdst = ftwotox(f_mem); } :ftwotox.x fsrc, fdst is op=15 & $(FP_COP) & op68=0 & mode=0 & regan=0; frm=0 & f1515=0 & f1313=0 & fsrc & fdst & fopmode=0x11 { fdst = ftwotox(fsrc); } @ifdef COLDFIRE macregy: reg03ywl is reg03ywl; IS=0 { export reg03ywl; } macregy: reg03ywu is reg03ywu; IS=1 { export reg03ywu; } macregx: reg9dnl is reg9dnl & op6=0; bs=0 { export reg9dnl; } macregx: reg9dnu is reg9dnu & op6=0; bs=1 { export reg9dnu; } macregx: reg9anl is reg9anl & op6=1; bs=0 { export reg9anl; } macregx: reg9anu is reg9anu & op6=1; bs=1 { export reg9anu; } macrw: reg9dn is reg9dn & op6=0 { export reg9dn; } macrw: reg9an is reg9an & op6=1 { export reg9an; } macregy_e: ereg03y is ereg03y { export ereg03y; } macregyl: reg03yl is reg03yl & IS=0 { export reg03yl; } macregyl: reg03yu is reg03yu & IS=1 { export reg03yu; } macregxl: reg12xwl is reg12xwl & bs=0 { export reg12xwl; } macregxl: reg12xwu is reg12xwu & bs=1 { export reg12xwu; } scalefactor: "" is sfact=0 { export 0:1; } scalefactor: "<<1" is sfact=1 { export 1:1; } scalefactor: ">>1" is sfact=3 { export 2:1; } accreg: ACC0 is ACC0 & acclsb=0 ; accmsb=0 { export ACC0; } accreg: ACC1 is ACC1 & acclsb=1 ; accmsb=0 { export ACC1; } accreg: ACC2 is ACC2 & acclsb=0 ; accmsb=1 { export ACC2; } accreg: ACC3 is ACC3 & acclsb=1 ; accmsb=1 { export ACC3; } :ff1 regdn is reg315=0x98 & regdn { regdn = lzcount(regdn); VF = 0; CF = 0; resflags(regdn); } # MAC effective address table # size=long m_eal: (regan) is mode=2 & regan { export *:4 regan; } m_eal: (regan)+ is mode=3 & regan { local tmp = regan; regan = regan + 4; export *:4 tmp; } m_eal: -(regan) is mode=4 & regan { regan = regan - 4; export *:4 regan; } m_eal: (d16,regan) is mode=5 & regan; d16 { local tmp = regan + d16; export *:4 tmp; } :maaac.l reg03y, reg9dn^scalefactor, accreg, accw is (op=10 & reg9dn & reg03y & op6=0 & op8=0 & op45=0 ; fbit=0 & wl=1 & scalefactor & accw & odsize=1) ... & accreg ... { local tmp = reg03y * reg9dn; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg + tmp; accw = accw + tmp; } :maaac.l reg03y, reg9an^scalefactor, accreg, accw is (op=10 & reg9an & reg03y & op6=1 & op8=0 & op45=0 ; fbit=0 & wl=1 & scalefactor & accw & odsize=1) ... & accreg ... { local tmp = reg03y * reg9an; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg + tmp; accw = accw + tmp; } :maaac.w macregy, macregx^scalefactor, accreg, accw is (op=10 & op8=0 & op45=0 ; fbit=0 & wl=0 & scalefactor & accw & odsize=1) ... & macregy ... & macregx ... & accreg ... { local tmp = macregy * macregx; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg + sext(tmp); accw = accw + sext(tmp); } :mac.l reg03y, reg9dn^scalefactor, accreg is (op=10 & reg9dn & reg03y & op6=0 & op8=0 & op45=0 ; fbit=0 & wl=1 & scalefactor) ... & accreg ... { local tmp = reg03y * reg9dn; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg + tmp; } :mac.l reg03y, reg9an^scalefactor, accreg is (op=10 & reg9an & reg03y & op6=1 & op8=0 & op45=0 ; fbit=0 & wl=1 & scalefactor) ... & accreg ... { local tmp = reg03y * reg9an; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg + tmp; } :mac.w macregy, macregx^scalefactor, accreg is (op=10 & op8=0 & op45=0 ; fbit=0 & wl=0 & scalefactor) ... & macregy ... & macregx ... & accreg ... { local tmp = macregy * macregx; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg + sext(tmp); } #mac with load :mac.w macregyl, macregxl^scalefactor, m_eal, macrw, accreg is ((op=10 & macrw & op8=0 ; macregxl & fbit=0 & macregyl & wl=0 & scalefactor) ... & accreg ...) ... & m_eal { local tmp = macregyl * macregxl; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg + sext(tmp); macrw = m_eal; } :mac.l macregy_e, reg12x^scalefactor, m_eal, macrw, accreg is ((op=10 & macrw & op6=0 & op8=0 ; reg12x & fbit=0 & wl=1 & scalefactor & macregy_e) ... & accreg ...) ... & m_eal { local tmp = macregy_e * reg12x; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg + tmp; macrw = m_eal; } :masac.l reg03y, reg9dn^scalefactor, accreg, accw is (op=10 & reg9dn & reg03y & op6=0 & op8=0 & op45=0 ; fbit=0 & wl=1 & scalefactor & accw & odsize=3) ... & accreg ... { local tmp = reg03y * reg9dn; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg + tmp; accw = accw - tmp; } :masac.l reg03y, reg9an^scalefactor, accreg, accw is (op=10 & reg9an & reg03y & op6=1 & op8=0 & op45=0 ; fbit=0 & wl=1 & scalefactor & accw & odsize=3) ... & accreg ... { local tmp = reg03y * reg9an; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg + tmp; accw = accw - tmp; } :masac.w macregy, macregx^scalefactor, accreg, accw is (op=10 & op8=0 & op45=0 ; fbit=0 & wl=0 & scalefactor & accw & odsize=3) ... & macregy ... & macregx ... & accreg ... { local tmp = macregy * macregx; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg + sext(tmp); accw = accw - sext(tmp); } moveaccreg: ACC0 is ACC0 & op0910=0 { export ACC0; } moveaccreg: ACC1 is ACC1 & op0910=1 { export ACC1; } moveaccreg: ACC2 is ACC2 & op0910=2 { export ACC2; } moveaccreg: ACC3 is ACC3 & op0910=3 { export ACC3; } moveaccreg2: ACC0 is ACC0 & op01=0 { export ACC0; } moveaccreg2: ACC1 is ACC1 & op01=1 { export ACC1; } moveaccreg2: ACC2 is ACC2 & op01=2 { export ACC2; } moveaccreg2: ACC3 is ACC3 & op01=3 { export ACC3; } :move.l moveaccreg, reg03y is op=0b1010 & op11=0 & moveaccreg & op8=1 & op47=0b1000 & reg03y { reg03y = moveaccreg; } :move.l ACCext01, eal is (op=0b1010 & op811=0b1011 & op67=0 & ACCext01 & (mode=0 | mode=1 | mode=7)) ... & eal { ACCext01 = eal; } :move.l ACCext23, eal is (op=0b1010 & op811=0b1111 & op67=0 & ACCext23 & (mode=0 | mode=1 | mode=7)) ... & eal { ACCext23 = eal; } :move.l moveaccreg, eal is (op=0b1010 & op11=0 & moveaccreg & op8=1 & op67=0 & (mode=0 | mode=1 | mode=7)) ... & eal { moveaccreg = eal; } :move.l moveaccreg, moveaccreg2 is op=0b1010 & op11=0 & moveaccreg & op8=1 & op47=1 & moveaccreg2 { moveaccreg2 = moveaccreg; } :move.l MACSR, reg03y is op=0b1010 & op811=0b1001 & op47=0b1000 & MACSR & reg03y { reg03y = MACSR; } :move.l ACCext01, reg03y is op=0b1010 & op811=0b1011 & op47=0b1000 & ACCext01 & reg03y { reg03y = ACCext01; } :move.l ACCext23, reg03y is op=0b1010 & op811=0b1111 & op47=0b1000 & ACCext23 & reg03y { reg03y = ACCext23; } :move.l MASK, reg03y is op=10 & op811=13 & op47=8 & MASK & reg03y { reg03y = MASK; } :move.l MACSR, "CCR" is op=10 & op811=9 & op47=12 & MACSR { unpackflags(MACSR); } :move.l eal, MACSR is (op=10 & op611=36 & MACSR & (mode=0 | mode=1 | mode=7)) ... & eal { MACSR = eal; } :move.l eal, MASK is (op=10 & op611=52 & MASK & (mode=0 | mode=1 | mode=7)) ... & eal { MASK = eal; } :msaac.l reg03y, reg9dn^scalefactor, accreg, accw is (op=10 & reg9dn & reg03y & op6=0 & op8=0 & op45=0 ; fbit=1 & wl=1 & scalefactor & accw & odsize=1) ... & accreg ... { local tmp = reg03y * reg9dn; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg - tmp; accw = accw + tmp; } :msaac.l reg03y, reg9an^scalefactor, accreg, accw is (op=10 & reg9an & reg03y & op6=1 & op8=0 & op45=0 ; fbit=1 & wl=1 & scalefactor & accw & odsize=1) ... & accreg ... { local tmp = reg03y * reg9an; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg - tmp; accw = accw + tmp; } :msaac.w macregy, macregx^scalefactor, accreg, accw is (op=10 & op8=0 & op45=0 ; fbit=1 & wl=0 & scalefactor & accw & odsize=1) ... & macregy ... & macregx ... & accreg ... { local tmp = macregy * macregx; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg - sext(tmp); accw = accw + sext(tmp); } :msac.l reg03y, reg9dn^scalefactor, accreg is (op=10 & reg9dn & reg03y & op6=0 & op8=0 & op45=0 ; fbit=1 & wl=1 & scalefactor) ... & accreg ... { local tmp = reg03y * reg9dn; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg - tmp; } :msac.l reg03y, reg9an^scalefactor, accreg is (op=10 & reg9an & reg03y & op6=1 & op8=0 & op45=0 ; fbit=1 & wl=1 & scalefactor) ... & accreg ... { local tmp = reg03y * reg9an; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg - tmp; } :msac.w macregy, macregx^scalefactor, accreg is (op=10 & op6=0 & op8=0 & op45=0 ; fbit=1 & wl=0 & scalefactor) ... & macregy ... & macregx ... & accreg ... { local tmp = macregy * macregx; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg - sext(tmp); } # MSAC with load :msac.w macregyl, macregxl^scalefactor, m_eal, macrw, accreg is ((op=10 & macrw & op8=0 ; macregxl & fbit=1 & macregyl & wl=0 & scalefactor) ... & accreg ...) ... & m_eal { local tmp = macregyl * macregxl; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg - sext(tmp); macrw = m_eal; } :msac.l macregy_e, reg12x^scalefactor, m_eal, macrw, accreg is ((op=10 & macrw & op8=0 ; reg12x & fbit=1 & wl=1 & scalefactor & macregy_e) ... & accreg ...) ... & m_eal { local tmp = macregy_e * reg12x; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg - tmp; macrw = m_eal; } :msaac.l reg03y, reg9dn^scalefactor, accreg, accw is (op=10 & reg9dn & reg03y & op6=0 & op8=0 & op45=0 ; fbit=1 & wl=1 & scalefactor & accw & odsize=3) ... & accreg ... { local tmp = reg03y * reg9dn; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg - tmp; accw = accw - tmp; } :mssac.l reg03y, reg9an^scalefactor, accreg, accw is (op=10 & reg9an & reg03y & op6=1 & op8=0 & op45=0 ; fbit=1 & wl=1 & scalefactor & accw & odsize=3) ... & accreg ... { local tmp = reg03y * reg9an; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg - tmp; accw = accw - tmp; } :mssac.w macregy, macregx^scalefactor, accreg, accw is (op=10 & op8=0 & op45=0 ; fbit=1 & wl=0 & scalefactor & accw & odsize=3) ... & macregy ... & macregx ... & accreg ... { local tmp = macregy * macregx; tmp = tmp << (scalefactor == 1); tmp = tmp >> (scalefactor == 2); accreg = accreg - sext(tmp); accw = accw - sext(tmp); } @endif } # end with : extGUARD=1 ================================================ FILE: pypcode/processors/68000/data/languages/68000_register.cspec ================================================ ================================================ FILE: pypcode/processors/68000/data/languages/68020.slaspec ================================================ @include "68000.sinc" ================================================ FILE: pypcode/processors/68000/data/languages/68030.slaspec ================================================ @define MC68030 "" @include "68000.sinc" ================================================ FILE: pypcode/processors/68000/data/languages/68040.slaspec ================================================ @define MC68040 "" @include "68000.sinc" ================================================ FILE: pypcode/processors/68000/data/languages/coldfire.slaspec ================================================ # Motorola's Coldfire processor @define COLDFIRE "" @define MC68040 "" @include "68000.sinc" ================================================ FILE: pypcode/processors/68000/data/manuals/68000.idx ================================================ @M68000PRM.pdf [M68000 FAMILY Programmer's Reference Manual, 1992 (M68000PRM/AD REV.1)] ABCD, 106 ADD, 108 ADDA, 111 ADDI, 113 ADDQ, 115 ADDX, 117 AND, 119 ANDI, 122 ASL, 125 ASR, 126 B, 129 BCHG, 131 BCLR, 134 BFCHG, 137 BFCLR, 139 BFEXTS, 141 BFEXTU, 144 BFFFO, 147 BFINS, 150 BFSET, 153 BFTST, 155 BGND, 544 BKPT, 157 BRA, 159 BSET, 160 BSR, 163 BTST, 165 CALLM, 168 CAS, 170 CAS2, 171 CHK, 172 CHK2, 175 CINV, 457 CLR, 177 CMP, 179 CMPA, 181 CMPI, 183 CMPM, 185 CMP2, 186 cpB, 188 cpDB, 189 cpGEN, 190 cpRESTORE, 459 cpSAVE, 461 cpS, 191 cpTRAPcc, 193 CPUSH, 462 DB, 194 DIVS, 196 DIVSL, 197 DIVU, 200 DIVUL, 200 EOR, 204 EORI, 206 EXG, 207 EXT, 210 EXTB, 210 FABS, 307 FACOS, 310 FADD, 313 FASIN, 316 FATAN, 319 FATANH, 322 FB, 325 FCMP, 327 FCOS, 330 FCOSH, 333 FDB, 336 FDIV, 338 FETOX, 342 FETOXM1, 345 FGETEXP, 348 FGETMAN, 351 FINT, 354 FINTRZ, 357 FLOG10, 360 FLOG2, 363 FLOGN, 366 FLOGNP1, 369 FMOD, 372 FMOVE, 375 FMOVECR, 385 FMOVEM, 387 FMUL, 396 FNEG, 400 FNOP, 404 FREM, 406 FRESTORE, 465 FSAVE, 468 FSCALE, 409 FS, 412 FSGLDIV, 414 FSGLMUL, 417 FSIN, 420 FSINCOS, 423 FSINH, 427 FSQRT, 430 FSUB, 434 FTAN, 438 FTANH, 441 FTENTOX, 444 FTRAP, 447 FTST, 449 FTWOTOX, 452 ILLEGAL, 211 JMP, 211 JSR, 212 LEA, 213 LINK, 215 LPSTOP, 545 LSL, 217 LSR, 217 MOVE, 220 MOVEA, 223 MOVE USP, 475 MOVE16, 230 MOVEC, 476 MOVEM, 232 MOVEP, 235 MOVEQ, 238 MOVES, 478 MULS, 239 MULU, 242 NBCD, 245 NEG, 247 NEGX, 249 NOP, 251 NOT, 252 OR, 254 ORI, 257 PACK, 260 PB, 482 PDB, 484 PEA, 263 PFLUSH, 486 PFLUSHA, 492 PFLUSHR, 495 PFLUSHS, 492 PLOAD, 497 PMOVE, 501 PRESTORE, 511 PSAVE, 513 PScc, 515 PTEST, 517 PTRAPcc, 532 PVALID, 534 RESET, 537 ROL, 264 ROR, 264 ROXL, 267 ROXR, 267 RTD, 270 RTE, 538 RTM, 271 RTR, 272 RTS, 273 SBCD, 274 S, 276 STOP, 539 SUB, 278 SUBA, 281 SUBI, 283 SUBQ, 285 SUBX, 287 SWAP, 289 TAS, 290 TBLS, 546 TBLSN, 546 TBLU, 551 TBLUN, 551 TRAP, 292 TRAPcc, 293 TRAPV, 295 TST, 296 UNLK, 298 UNPK, 299 @CFPRM.pdf [ColdFire Family Programmer's Reference Manual, 03/2005 (CFPRM, REV.3)] BITREV, 95 BYTEREV, 102 CPUSHL, 262 FF1, 114 HALT, 267 INTOUCH, 268 MAAAC, 182 MAC, 166 MASAC, 188 MOV3Q, 122 MOVCLR, 190 MSAAC, 207 MSAC, 177 MSSAC, 213 MVS, 135 MVZ, 136 PULSE, 145 REMS, 146 REMU, 147 SATS, 149 STRLDSR, 276 TPF, 159 WDDATA, 163 WDEBUG, 278 ================================================ FILE: pypcode/processors/68000/data/patterns/68000_patterns.xml ================================================ 0x4e 0x75 0x4e 0x75 0x4e 0x71 0x4e 0x75 0x00 0x00 0x4e 0x5e 0x4e 0x75 0x4e 0x5e 0x4e 0x75 0x4e 0x71 0x4e 0x5e 0x4e 0x75 0x00 0x00 01001111 11101111 1111.... .......0 0x4e 0x56 0x00 0x00 0x4e 0x56 1111.... .......0 0101...1 10001111 01001000 11010111 ........ ........ 0010...0 0.101111 0000.... .......0 0x48 0xe7 ........ ........ 0x2f 0x02 0x2f 0x03 0x2f 0x0a 0x2f 0x0b 0x60 0x00 ........ ........ 01001111 11101111 1111.... .......0 0x4e 0x56 0x00 0x00 0x4e 0x56 1111.... .......0 ================================================ FILE: pypcode/processors/68000/data/patterns/patternconstraints.xml ================================================ 68000_patterns.xml ================================================ FILE: pypcode/processors/8048/data/languages/8048.cspec ================================================ ================================================ FILE: pypcode/processors/8048/data/languages/8048.ldefs ================================================ 8048 Microcontroller Family ================================================ FILE: pypcode/processors/8048/data/languages/8048.pspec ================================================ ================================================ FILE: pypcode/processors/8048/data/languages/8048.slaspec ================================================ # sleigh specification file for Intel 8048 # # The MCS-48 family can only handle a 4kB (12 bits) address space. # However, some applications use a custom method to access multiple # banks of 4kB, such as an IO pin driving extra address lines on an # external ROM IC. # # To be able to parse those non-standard >4kB ROMs, this implementation # keeps track of 16-bit addresses by simply preserving the upper 4 bits # (see Addr8 and Addr12 constructors). # # To redirect the flow to a different 4kB bank, it is necessary to manually # set a flow override (with Fallthrough->Set) on the specific instruction. # # That cannot really be automated at this level because there is no "standard" # mechanism for external bank control. # Do not take BS into account when decompiling @define SINGLE_REGISTER_BANK "" # Treat R0-R7 as not memory mapped (implies SINGLE_REGISTER_BANK) @define INTERNAL_REGISTERS "" @ifdef INTERNAL_REGISTERS @define SINGLE_REGISTER_BANK "" @endif define endian=little; define alignment=1; define space CODE type=ram_space size=2 default; define space INTMEM type=ram_space size=1; define space EXTMEM type=ram_space size=1; define space PORT type=ram_space size=1; define space register type=register_space size=1; define register offset=0x00 size=1 [ A SP ]; @ifdef INTERNAL_REGISTERS define register offset=0x10 size=1 [ R0 R1 R2 R3 R4 R5 R6 R7 ]; @endif define register offset=0x20 size=2 [ PC ]; define register offset=0x30 size=1 [ C AC F0 F1 BS ]; # single bit define register offset=0x80 size=4 bankreg; define context bankreg DBF=(0,0) ; ################################################################ # Tokens ################################################################ define token opbyte (8) opfull = (0,7) oplo = (0,3) ophi = (4,7) rn = (0,2) dec rnfill = (3,3) ri = (0,0) dec rifill = (1,3) opaddr = (5,7) addrfill = (4,4) pp = (0,1) dec xpp = (0,1) dec ppfill = (2,3) abit = (5,7) dec abfill = (4,4) dbf = (4,4) bs = (4,4) ; define token aopword (16) aoplo = (0,3) aaddrfill = (4,4) aopaddr = (5,7) adata = (8,15) ; define token ImmedByte (8) data=(0,7); define token AddrOne (8) addr8=(0,7); @ifdef INTERNAL_REGISTERS attach variables rn [ R0 R1 R2 R3 R4 R5 R6 R7 ]; attach variables ri [ R0 R1 ]; @else attach names rn [ R0 R1 R2 R3 R4 R5 R6 R7 ]; attach names ri [ R0 R1 ]; @endif attach names abit ["0" "1" "2" "3" "4" "5" "6" "7"]; attach names dbf [ MB0 MB1 ]; attach names bs [ RB0 RB1 ]; attach names pp [ BUS P1 P2 _ ]; attach names xpp [ P4 P5 P6 P7 ]; ################################################################ # Pseudo Instructions ################################################################ define pcodeop nop; define pcodeop enableExtInt; define pcodeop enableTCntInt; define pcodeop enableClockOutput; define pcodeop disableExtInt; define pcodeop disableTCntInt; define pcodeop startTimer; define pcodeop startEventCounter; define pcodeop stopTimerAndEventCounter; define pcodeop setTmr; define pcodeop getTmr; define pcodeop getT0; define pcodeop getT1; define pcodeop getTF; define pcodeop getExtInt; define pcodeop readPort; define pcodeop writePort; define pcodeop setBank; ################################################################ # Macros ################################################################ macro getPSW(reg) { local tmp:1 = 0; tmp[7,1] = C; tmp[6,1] = AC; tmp[5,1] = F0; tmp[4,1] = BS; tmp[3,1] = 1; tmp[0,3] = (SP>>1)&7; reg = tmp; } macro setPSW(reg) { local tmp:1 = reg; C = tmp[7,1]; AC = tmp[6,1]; F0 = tmp[5,1]; BS = tmp[4,1]; SP = 2*tmp[0,3] + 8; } macro savePSWtoPC(pc) { pc[15,1] = C; pc[14,1] = AC; pc[13,1] = F0; pc[12,1] = BS; } macro restorePSWfromPC(pc) { C = pc[15,1]; AC = pc[14,1]; F0 = pc[13,1]; BS = pc[12,1]; } macro push(v) { *[INTMEM]:2 SP = v; SP = SP + 2; } macro pop(v) { SP = SP - 2; v = *[INTMEM]:2 SP; } macro popPC(pc) { pop(pc); pc = pc & 0xfff; } macro popPCandPSW(pc) { pop(pc); restorePSWfromPC(pc); pc = pc & 0xfff; } macro funcall(target) { ret:2 = inst_next; savePSWtoPC(ret); push(ret); call target; } macro add(dest, op1, op2, cy_in) { local result:1 = op1 + op2 + cy_in; local half_result:1 = (op1 & 0xf) + (op2 & 0xf) + cy_in; C = carry(op1, op2) || carry(op1+op2, cy_in); AC = (half_result > 0xf); dest = result; } macro da(reg) { local tmp:1 = reg; local low:1 = 6*(AC || (tmp&0xf) > 9); local cy1:1 = C || carry(tmp, low); tmp = tmp + low; local high:1 = 0x60*(cy1 || tmp > 0x99); C = C || carry(tmp, high); tmp = tmp + high; reg = tmp; } macro rotc(cy, acc) { local tmp:1 = cy; A = acc; C = tmp; } macro xch(node1, node2) { local tmp:1 = node1; node1 = node2; node2 = tmp; } @ifdef SINGLE_REGISTER_BANK macro regbank(r) { r = r; } macro setbank(bs) { BS = bs; local tmp:1 = bs; setBank(tmp); } @else macro regbank(r) { r = r + BS*0x18; } macro setbank(bs) { BS = bs; } @endif ################################################################ Psw: "PSW" is epsilon { } ExtInt: "I" is epsilon { } TCntInt: "TCNTI" is epsilon { } Clk: "CLK" is epsilon { } Tmr: "T" is epsilon { } Cnt: "CNT" is epsilon { } TmrCnt: "TCNT" is epsilon { } @ifdef INTERNAL_REGISTERS Rn: rn is rn & rnfill=1 { export rn; } Rind: @ri is ri & rifill=0 { export ri; } @else Rn: rn is rn & rnfill=1 { local ptr:1 = rn; regbank(ptr); export *[INTMEM]:1 ptr; } Rind: @ri is ri & rifill=0 { local ptr:1 = ri; regbank(ptr); export *[INTMEM]:1 ptr; } @endif Ri: Rind is Rind { export *[INTMEM]:1 Rind; } RiX: Rind is Rind { export *[EXTMEM]:1 Rind; } PData: @A is A { local addr:2 = inst_next; addr[0,8] = A; export *[CODE]:1 addr; } P3Data: @A is A { local addr:2 = 0x300; addr[0,8] = A; export *[CODE]:1 addr; } AddrInd: PData is PData { local addr:2 = inst_next; addr[0,8] = PData; export addr; } Ab: abit is abit { local bit:1 = (A>>abit)&1; export bit; } Data: "#"^data is data { export *[const]:1 data; } Imm: Data is oplo=3; Data { export Data; } Addr8: addr is addr8 [ addr = (inst_next $and 0xff00)+addr8; ] { export *[CODE]:1 addr; } Addr12: addr is aopaddr & adata [ addr = (inst_next & 0xf000) + (DBF*0x800) + (aopaddr*256)+adata; ] { export *[CODE]:1 addr; } Bus: "BUS" is epsilon { local tmp:1 = 0; export *[PORT]:1 tmp; } Pp: pp is pp & ppfill=2 { export *[PORT]:1 pp; } Xpp: xpp is xpp & ppfill=3 { local tmp:1 = xpp+4; export *[PORT]:1 tmp; } Cc: "C" is ophi=15 { export C; } Cc: "F0" is ophi=11 { export F0; } Cc: "F1" is ophi=7 { export F1; } Cc: "NC" is ophi=14 { tmp:1 = !C; export tmp; } Cc: "NI" is ophi=8 { tmp:1 = getExtInt(); tmp = !tmp; export tmp; } Cc: "NT0" is ophi=2 { tmp:1 = getT0(); tmp = !tmp; export tmp; } Cc: "NT1" is ophi=4 { tmp:1 = getT1(); tmp = !tmp; export tmp; } Cc: "NZ" is ophi=9 { tmp:1 = A!=0; export tmp; } Cc: "TF" is ophi=1 { tmp:1 = getTF(); export tmp; } Cc: "T0" is ophi=3 { tmp:1 = getT0(); export tmp; } Cc: "T1" is ophi=5 { tmp:1 = getT1(); export tmp; } Cc: "Z" is ophi=12 { tmp:1 = A==0; export tmp; } # Conventience tables for opcodes taking both Rn and Ri (and Imm) Rni: Rn is Rn { export Rn; } Rni: Ri is Ri { export Ri; } RniI: Rni is Rni { export Rni; } RniI: Imm is Imm { export Imm; } :ADD A,Rni is ophi=6 & (rnfill=1 | rifill=0) & A & Rni { add(A,A,Rni,0); } :ADD A,Imm is (ophi=0 & A)... & Imm { add(A,A,Imm,0); } :ADDC A,Rni is ophi=7 & A & (rnfill=1 | rifill=0) & Rni { add(A,A,Rni,C); } :ADDC A,Imm is (ophi=1 & A)... & Imm { add(A,A,Imm,C); } :ANL A,RniI is (ophi=5 & (rnfill=1 | rifill=0 | oplo=3) & A)... & RniI { A = A & RniI; } :ANL Pp,Data is ophi=9 & ppfill=2 & Pp; Data { Pp = Pp & Data; } :ANLD Xpp,A is ophi=9 & ppfill=3 & Xpp & A { Xpp = Xpp & (A & 0xf); } :CALL Addr12 is aopaddr & aaddrfill=1 & aoplo=4 & Addr12 { funcall(Addr12); } :CLR A is ophi=2 & oplo=7 & A { A = 0; } :CLR C is ophi=9 & oplo=7 & C { C = 0; } :CLR F0 is ophi=8 & oplo=5 & F0 { F0 = 0; } :CLR F1 is ophi=10 & oplo=5 & F1 { F1 = 0; } :CPL A is ophi=3 & oplo=7 & A { A = ~A; } :CPL C is ophi=10 & oplo=7 & C { C = !C; } :CPL F0 is ophi=9 & oplo=5 & F0 { F0 = !F0; } :CPL F1 is ophi=11 & oplo=5 & F1 { F1 = !F1; } :DA A is ophi=5 & oplo=7 & A { da(A); } :DEC A is ophi=0 & oplo=7 & A { A = A - 1; } :DEC Rn is ophi=12 & Rn { Rn = Rn - 1; } :DIS ExtInt is ophi=1 & oplo=5 & ExtInt { disableExtInt(); } :DIS TCntInt is ophi=3 & oplo=5 & TCntInt { disableTCntInt(); } :DJNZ Rn,Addr8 is ophi=14 & Rn; Addr8 { Rn = Rn - 1; if(Rn != 0) goto Addr8; } :EN ExtInt is ophi=0 & oplo=5 & ExtInt { enableExtInt(); } :EN TCntInt is ophi=2 & oplo=5 & TCntInt { enableTCntInt(); } :ENT0 Clk is ophi=7 & oplo=5 & Clk { enableClockOutput(); } :IN A,Pp is ophi=0 & pp!=0 & A & Pp { A = Pp; } :INC A is ophi=1 & oplo=7 & A { A = A + 1; } :INC Rni is ophi=1 & (rnfill=1 | rifill=0) & Rni { Rni = Rni + 1; } :INS A,Bus is ophi=0 & oplo=8 & A & Bus { A = Bus; } :JB^Ab Addr8 is oplo=2 & opaddr & abfill=1 & Ab; Addr8 { if(Ab) goto Addr8; } :J^Cc Addr8 is ophi & oplo=6 & Cc; Addr8 { if(Cc) goto Addr8; } :JMP Addr12 is aopaddr & aaddrfill=0 & aoplo=4 & Addr12 { goto Addr12; } :JMPP AddrInd is ophi=11 & oplo=3 & AddrInd { goto [AddrInd]; } :MOV A,Imm is (ophi=2 & A)... & Imm { A = Imm; } :MOV A,Psw is ophi=12 & oplo=7 & A & Psw { getPSW(A); } :MOV A,Rni is ophi=15 & A & (rnfill=1 | rifill=0) & Rni { A = Rni; } :MOV A,Tmr is ophi=4 & oplo=2 & A & Tmr { A = getTmr(); } :MOV Psw,A is ophi=13 & oplo=7 & Psw & A { setPSW(A); } :MOV Rni,A is ophi=10 & (rnfill=1 | rifill=0) & Rni & A { Rni = A; } :MOV Rni,Data is ophi=11 & (rnfill=1 | rifill=0) & Rni; Data { Rni = Data; } :MOV Tmr,A is ophi=6 & oplo=2 & Tmr & A { setTmr(A); } :MOVD A,Xpp is ophi=0 & Xpp & A { A = (Xpp & 0xf); } :MOVD Xpp,A is ophi=3 & Xpp & A { Xpp = (A & 0xf); } :MOVP A,PData is ophi=10 & oplo=3 & A & PData { A = PData; } :MOVP3 A,P3Data is ophi=14 & oplo=3 & A & P3Data { A = P3Data; } :MOVX A,RiX is ophi=8 & A & RiX { A = RiX; } :MOVX RiX,A is ophi=9 & RiX & A { RiX = A; } :NOP is ophi=0 & oplo=0 { nop(); } :ORL A,RniI is (ophi=4 & (rnfill=1 | rifill=0 | oplo=3) & A)... & RniI { A = A | RniI; } :ORL Pp,Data is ophi=8 & Pp; Data { Pp = Pp | Data; } :ORLD Xpp,A is ophi=8 & Xpp & A { Xpp = Xpp | (A & 0xf); } :OUTL Bus,A is ophi=0 & oplo=2 & Bus & A { Bus = A; } :OUTL Pp,A is ophi=3 & pp!=0 & Pp & A { Pp = A; } :RET is ophi=8 & oplo=3 { pc:2 = 0; popPC(pc); return[pc]; } :RETR is ophi=9 & oplo=3 { pc:2 = 0; popPCandPSW(pc); return[pc]; } :RL A is ophi=14 & oplo=7 & A { A = (A<<1) | (A>>7); } :RLC A is ophi=15 & oplo=7 & A { rotc((A&0x80)>>7, (A<<1)|C); } :RR A is ophi=7 & oplo=7 & A { A = (A>>1) | (A<<7); } :RRC A is ophi=6 & oplo=7 & A { rotc(A&1, (A>>1)|(C<<7)); } :SEL dbf is (ophi=14 | ophi=15) & oplo=5 & dbf [ DBF=dbf; globalset(inst_next,DBF); ] {} :SEL bs is (ophi=12 | ophi=13) & oplo=5 & bs { setbank(bs); } :STOP TmrCnt is ophi=6 & oplo=5 & TmrCnt { stopTimerAndEventCounter(); } :STRT Cnt is ophi=4 & oplo=5 & Cnt { startEventCounter(); } :STRT Tmr is ophi=5 & oplo=5 & Tmr { startTimer(); } :SWAP A is ophi=4 & oplo=7 & A { A = (A<<4)|(A>>4); } :XCH A,Rni is ophi=2 & (rnfill=1 | rifill=0) & A & Rni { xch(A, Rni); } :XCHD A,Ri is ophi=3 & A & Ri { xch(A[0,4], Ri[0,4]); } :XRL A,RniI is (ophi=13 & (rnfill=1 | rifill=0 | oplo=3) & A)... & RniI { A = A ^ RniI; } ================================================ FILE: pypcode/processors/8048/data/manuals/8048.idx ================================================ @8048.pdf [MCS-48 Microcomputer User's Manual, February 1978] ADD, 63 ADDC, 63 ANL, 64 ANLD, 65 CALL, 66 CLR, 67 CPL, 67 DA, 68 DEC, 68 DIS, 69 DJNZ, 69 EN, 70 ENT0, 70 IN, 70 INC, 71 INS, 72 JB, 72 JC, 72 JF0, 72 JF1, 73 JMP, 73 JMPP, 73 JNC, 73 JNI, 74 JNT0, 74 JNT1, 74 JNZ, 74 JTF, 75 JT0, 75 JT1, 75 JZ, 75 MOV, 76 MOVD, 79 MOVP, 79 MOVP3, 80 MOVX, 80 NOP, 81 ORL, 81 ORLD, 82 OUTL, 82 RET, 83 RETR, 83 RL, 83 RLC, 84 RR, 84 RRC, 84 SEL, 85 STOP, 86 STRT, 87 SWAP, 87 XCH, 88 XCHD, 88 XRL, 89 ================================================ FILE: pypcode/processors/8051/data/languages/80251.cspec ================================================ ================================================ FILE: pypcode/processors/8051/data/languages/80251.pspec ================================================ ================================================ FILE: pypcode/processors/8051/data/languages/80251.sinc ================================================ # 80251 Instructions # NOTE! 80251 implementation is preliminary and has not tested !! define token srcDestByte (8) rm47 = (4,7) rm47_d1 = (4,7) rm47_d2 = (4,7) rm03 = (0,3) wrj47 = (4,7) wrj47_d1 = (4,7) wrj47_d2 = (4,7) wrj03 = (0,3) drk47 = (4,7) drk03 = (0,3) # constraint bits d7 = (7,7) d57 = (5,7) d47 = (4,7) s3 = (3,3) s23 = (2,3) s13 = (1,3) s03 = (0,3) s1 = (1,1) s0 = (0,0) short01 = (0,1) bit02 = (0,2) ; define token srcDestByte2 (8) rm47_ = (4,7) rm03_ = (0,3) wrj47_ = (4,7) wrj03_ = (0,3) drk47_ = (4,7) drk03_ = (0,3) # constraint bits d7_ = (7,7) d57_ = (5,7) s3_ = (3,3) s13_ = (1,3) s03_ = (0,3) ; define token AddrThree (24) addr24 = (0,23) ; define token ImmedThree (24) data24 = (0,23) ; attach values short01 [ 1 2 4 _ ]; attach variables [ rm47 rm03 rm47_ rm03_ ] [ R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 B ACC R12 R13 R14 R15 ]; attach variables [ rm47_d1 ] [ R0 R0 R2 R2 R4 R3 R6 R6 R8 R8 B B R12 R12 R14 R14 ]; attach variables [ rm47_d2 ] [ R1 R1 R3 R3 R5 R5 R7 R7 R9 R9 ACC ACC R13 R13 R15 R15 ]; attach variables [ wrj47 wrj03 wrj47_ wrj03_ ] [ WR0 WR2 WR4 WR6 WR8 AB WR12 WR14 WR16 WR18 WR20 WR22 WR24 WR26 WR28 WR30 ]; attach variables [ wrj47_d1 ] [ WR0 WR0 WR4 WR4 WR8 WR8 WR12 WR12 WR16 WR16 WR20 WR20 WR24 WR24 WR28 WR28 ]; attach variables [ wrj47_d2 ] [ WR2 WR2 WR6 WR6 AB AB WR14 WR14 WR18 WR18 WR22 WR22 WR26 WR26 WR30 WR30 ]; # NOTE: must use constraints DRK, DRKD and DRKS attach variables [ drk47 drk03 drk47_ drk03_ ] [ DR0 DR4 DR8 DR12 DR16 DR20 DR24 DR28 DPX SPX _ _ _ _ _ _ ]; @define DRK47 "drk47 & (d7=0 | d57=4)" # constraint for using drk47 @define DRK03 "drk03 & (s3=0 | s13=4)" # constraint for using drk03 @define DRK47_ "drk47_ & (d7_=0 | d57_=4)" # constraint for using drk47_ AtWRjb: "@"^wrj47 is wrj47 { ptr:3 = zext(wrj47); export *:1 ptr; } AtWRjw: "@"^wrj47 is wrj47 { ptr:3 = zext(wrj47); export *:2 ptr; } # NOTE: be sure to use the ATDRK constraint on the constructor AtDRkb: "@"^drk47 is drk47 { ptr:3 = drk47:3; export *:1 ptr; } AtDRkw: "@"^drk47 is drk47 { ptr:3 = drk47:3; export *:2 ptr; } AtDRkt: "@"^drk47 is drk47 { ptr:3 = drk47:3; export *:3 ptr; } @define ATDRK "(d7=0 | d57=4)" AtWRj47Dis16b: "@"^wrj47^"+"^data16 is wrj47; data16 { ptr:3 = zext(wrj47) + data16; export *:1 ptr; } AtWRj47Dis16w: "@"^wrj47^"+"^data16 is wrj47; data16 { ptr:3 = zext(wrj47) + data16; export *:2 ptr; } AtWRj03Dis16b: "@"^wrj03^"+"^data16 is wrj03; data16 { ptr:3 = zext(wrj03) + data16; export *:1 ptr; } AtWRj03Dis16w: "@"^wrj03^"+"^data16 is wrj03; data16 { ptr:3 = zext(wrj03) + data16; export *:2 ptr; } AtDRk47Dis24b: "@"^drk47^"+"^data24 is drk47; data24 { ptr:3 = drk47:3 + data24; export *:1 ptr; } AtDRk47Dis24w: "@"^drk47^"+"^data24 is drk47; data24 { ptr:3 = drk47:3 + data24; export *:2 ptr; } AtDRk03Dis24b: "@"^drk03^"+"^data24 is drk03; data24 { ptr:3 = drk03:3 + data24; export *:1 ptr; } AtDRk03Dis24w: "@"^drk03^"+"^data24 is drk03; data24 { ptr:3 = drk03:3 + data24; export *:2 ptr; } # TODO: Verify dir8 access restriction for word/dword accesses !! Direct8w: mainreg is bank=0 & mainreg { export *:2 mainreg; } Direct8w: Direct is bank=1 & Direct { tmp:2 = zext(Direct); export tmp; } # TODO: The dir16 mode does not map into the SFR's - is this correct ?? Direct16b: addr16 is addr16 { export *:1 addr16; } Direct16w: addr16 is addr16 { export *:2 addr16; } Direct16d: addr16 is addr16 { export *:4 addr16; } Data16x0: "#"data16 is data16 { export *[const]:4 data16; } Data16x1: "#"val is data16 [ val = 0xffff0000 + data16; ] { export *[const]:4 val; } Addr24: addr24 is addr24 { export *:1 addr24; } # NOTE: use SHORT constraint Short: "#"^short01 is short01 { export *[const]:1 short01; } @define SHORT "Short & (s0=0 | s1=0)" #TODO: Figure out new bit addressing for 251 ... xBitByteAddr: is bitaddr8 { export *:1 bitaddr8; } xBitAddr: bitaddr^"."^bit02 is bit02; bitaddr8 [ bitaddr = (bitaddr8 << 3) + bit02; ] { export *[BITS]:1 bitaddr; } xBitAddr2: "/"^bitaddr^"."^bit02 is bit02; bitaddr8 [ bitaddr = (bitaddr8 << 3) + bit02; ] { export *[BITS]:1 bitaddr; } macro push24(val) { al:1 = val:1; ah:1 = val(1); ax:1 = val(2); ptr:3 = SPX:3; ptr = ptr + 1; *[RAM]:1 ptr = al; ptr = ptr + 1; *[RAM]:1 ptr = ah; ptr = ptr + 1; *[RAM]:1 ptr = ax; SPX = SPX + 3; } macro pop24(val) { SPX = SPX - 2; ptr:3 = SPX:3; al:1 = *[RAM]:1 ptr; ah:1 = *[RAM]:1 (ptr+1); ax:1 = *[RAM]:1 (ptr+2); SPX = SPX - 1; val = (zext(ax) << 16) | (zext(ah) << 8) | zext(al); } # s s s s Binary representation of m or md # S S S S Binary representation of ms # t t t t Binary representation of j or jd # T T T T Binary representation of js # u u u u Binary representation of k or kd # U U U U Binary representation of ks # NOTE: >>>> Find MCS251 instructions by searching for "Binary Mode = [A5][Encoding]" in PDF manual <<<< # NOTE: All instructions should include the $(GROUP3) pattern prefix # ADD Rmd,Rms :ADD rm47,rm03 is $(GROUP3) & ophi=2 & oplo=12; rm47 & rm03 { addflags(rm47,rm03); rm47 = rm47 + rm03; resultflags(rm47); } # ADD WRjd,WRjs :ADD wrj47,wrj03 is $(GROUP3) & ophi=2 & oplo=13; wrj47 & wrj03 { addflags(wrj47,wrj03); wrj47 = wrj47 + wrj03; resultflags(wrj47); } # ADD DRkd,DRks :ADD drk47,drk03 is $(GROUP3) & ophi=2 & oplo=15; $(DRK47) & $(DRK03) { addflags(drk47,drk03); drk47 = drk47 + drk03; resultflags(drk47); } # ADD Rm,#data :ADD rm47,Data is $(GROUP3) & ophi=2 & oplo=14; rm47 & s03=0; Data { addflags(rm47,Data); rm47 = rm47 + Data; resultflags(rm47); } # ADD WRj,#data16 :ADD wrj47,Data16 is $(GROUP3) & ophi=2 & oplo=14; wrj47 & s03=4; Data16 { addflags(wrj47,Data16); wrj47 = wrj47 + Data16; resultflags(wrj47); } # ADD DRk,#0data16 :ADD drk47,Data16x0 is $(GROUP3) & ophi=2 & oplo=14; $(DRK47) & s03=8; Data16x0 { addflags(drk47,Data16x0); drk47 = drk47 + Data16x0; resultflags(drk47); } # ADD Rm,dir8 :ADD rm47,Direct is $(GROUP3) & ophi=2 & oplo=14; rm47 & s03=1; Direct { addflags(rm47,Direct); rm47 = rm47 + Direct; resultflags(rm47); } # ADD WRj,dir8 :ADD wrj47,Direct8w is $(GROUP3) & ophi=2 & oplo=14; wrj47 & s03=5; Direct8w { addflags(wrj47,Direct8w); wrj47 = wrj47 + Direct8w; resultflags(wrj47); } # ADD Rm,dir16 :ADD rm47,Direct16b is $(GROUP3) & ophi=2 & oplo=14; rm47 & s03=3; Direct16b { addflags(rm47,Direct16b); rm47 = rm47 + Direct16b; resultflags(rm47); } # ADD WRj,dir16 :ADD wrj47,Direct16w is $(GROUP3) & ophi=2 & oplo=14; wrj47 & s03=7; Direct16w { addflags(wrj47,Direct16w); wrj47 = wrj47 + Direct16w; resultflags(wrj47); } # ADD Rm,@WRj :ADD rm47_,AtWRjb is $(GROUP3) & ophi=2 & oplo=14; AtWRjb & s03=9; rm47_ & s03_=0 { addflags(rm47_,AtWRjb); rm47_ = rm47_ + AtWRjb; resultflags(rm47_); } # ADD Rm,@DRk :ADD rm47_,AtDRkb is $(GROUP3) & ophi=2 & oplo=14; $(ATDRK) & AtDRkb & s03=11; rm47_ & s03_=0 { addflags(rm47_,AtDRkb); rm47_ = rm47_ + AtDRkb; resultflags(rm47_); } # ANL Rmd,Rms :ANL rm47,rm03 is $(GROUP3) & ophi=5 & oplo=12; rm47 & rm03 { rm47 = rm47 & rm03; resultflags(rm47); } # ANL WRjd,WRjs :ANL wrj47,wrj03 is $(GROUP3) & ophi=5 & oplo=13; wrj47 & wrj03 { wrj47 = wrj47 & wrj03; resultflags(wrj47); } # ANL Rm,#data :ANL rm47,Data is $(GROUP3) & ophi=5 & oplo=14; rm47 & s03=0; Data { rm47 = rm47 & Data; resultflags(rm47); } # ANL WRj,#data16 :ANL wrj47,Data16 is $(GROUP3) & ophi=5 & oplo=14; wrj47 & s03=4; Data16 { wrj47 = wrj47 & Data16; resultflags(wrj47); } # ANL Rm,dir8 :ANL rm47,Direct is $(GROUP3) & ophi=5 & oplo=14; rm47 & s03=1; Direct { rm47 = rm47 & Direct; resultflags(rm47); } # ANL WRj,dir8 :ANL wrj47,Direct8w is $(GROUP3) & ophi=5 & oplo=14; wrj47 & s03=5; Direct8w { wrj47 = wrj47 & Direct8w; resultflags(wrj47); } # ANL Rm,dir16 :ANL rm47,Direct16b is $(GROUP3) & ophi=5 & oplo=14; rm47 & s03=3; Direct16b { rm47 = rm47 & Direct16b; resultflags(rm47); } # ANL WRj,dir16 :ANL wrj47,Direct16w is $(GROUP3) & ophi=5 & oplo=14; wrj47 & s03=7; Direct16w { wrj47 = wrj47 & Direct16w; resultflags(wrj47); } # ANL Rm,@WRj :ANL rm47_,AtWRjb is $(GROUP3) & ophi=5 & oplo=14; AtWRjb & s03=9; rm47_ & s03_=0 { rm47_ = rm47_ & AtWRjb; resultflags(rm47_); } # ANL Rm,@DRk :ANL rm47_,AtDRkb is $(GROUP3) & ophi=5 & oplo=14; $(ATDRK) & AtDRkb & s03=11; rm47_ & s03_=0 { rm47_ = rm47_ & AtDRkb; resultflags(rm47_); } # ANL C,bit :ANL "CY",xBitAddr is $(GROUP3) & ophi=10 & oplo=9; (d47=8 & s3=0 & bit02; xBitByteAddr) & xBitAddr { $(CY)=$(CY)& ((xBitByteAddr>>bit02)&1); resultflags(xBitByteAddr); } # ANL C,/bit :ANL "CY",xBitAddr2 is $(GROUP3) & ophi=10 & oplo=9; (d47=15 & s3=0 & bit02; xBitByteAddr) & xBitAddr2 { $(CY)=$(CY)& (~((xBitByteAddr>>bit02)&1)); resultflags(xBitByteAddr); } # CLR bit :CLR xBitAddr is $(GROUP3) & ophi=10 & oplo=9; (d47=12 & s3=0 & bit02; xBitByteAddr) & xBitAddr { tmp:1 = ~(1<>bit02)&1) == 1:1) goto Rel8; } # JBC bit,rel :JBC xBitAddr,Rel8 is $(GROUP3) & ophi=10 & oplo=9; (d47=1 & s3=0 & bit02; xBitByteAddr) & xBitAddr; Rel8 { tmp:1 = 1<>bit02)&1)==0:1) goto Rel8; } # JNE rel :JNE Rel8 is $(GROUP3) & ophi=7 & oplo=8; Rel8 { if ($(Z)==0) goto Rel8; } # JSG rel :JSG Rel8 is $(GROUP3) & ophi=1 & oplo=8; Rel8 { if ($(Z)==0 && $(N)==$(OV)) goto Rel8; } # JSGE rel :JSGE Rel8 is $(GROUP3) & ophi=5 & oplo=8; Rel8 { if ($(N)==$(OV)) goto Rel8; } # JSL rel :JSL Rel8 is $(GROUP3) & ophi=4 & oplo=8; Rel8 { if ($(N)!=$(OV)) goto Rel8; } # JSLE rel :JSLE Rel8 is $(GROUP3) & ophi=0 & oplo=8; Rel8 { if ($(Z)==1 || $(N)!=$(OV)) goto Rel8; } # LCALL @WRj :LCALL AtWRjw is $(GROUP3) & ophi=9 & oplo=9; AtWRjw & s03=4 { ptr:3 = inst_next; push16(ptr:2); pc:3 = (ptr & 0xff0000) + zext(AtWRjw); call [pc]; } # LJMP @WRj :LJMP AtWRjw is $(GROUP3) & ophi=8 & oplo=9; AtWRjw & s03=4 { ptr:3 = inst_next; pc:3 = (ptr & 0xff0000) + zext(AtWRjw); goto [pc]; } # MOV Rmd,Rms :MOV rm47,rm03 is $(GROUP3) & ophi=7 & oplo=12; rm47 & rm03 { rm47 = rm03; } # MOV WRjd,WRjs :MOV wrj47,wrj03 is $(GROUP3) & ophi=7 & oplo=13; wrj47 & wrj03 { wrj47 = wrj03; } # MOV DRkd,DRks :MOV drk47,drk03 is $(GROUP3) & ophi=7 & oplo=15; drk47 & drk03 { drk47 = drk03; } # MOV Rm,#data :MOV rm47,Data is $(GROUP3) & ophi=7 & oplo=14; rm47 & s03=0; Data { rm47 = Data; } # MOV WRj,#data16 :MOV wrj47,Data16 is $(GROUP3) & ophi=7 & oplo=14; wrj47 & s03=4; Data16 { wrj47 = Data16; } # MOV DRk,#0data16 :MOV drk47,Data16x0 is $(GROUP3) & ophi=7 & oplo=14; drk47 & s03=8; Data16x0 { drk47 = Data16x0; } # MOV DRk,#1data16 :MOV drk47,Data16x1 is $(GROUP3) & ophi=7 & oplo=14; drk47 & s03=12; Data16x1 { drk47 = Data16x1; } # MOV Rm,dir8 :MOV rm47,Direct is $(GROUP3) & ophi=7 & oplo=14; rm47 & s03=1; Direct { rm47 = Direct; } # MOV WRj,dir8 :MOV wrj47,Direct8w is $(GROUP3) & ophi=7 & oplo=14; wrj47 & s03=5; Direct8w { wrj47 = Direct8w; } # MOV DRk,dir8 :MOV drk47,Direct8w is $(GROUP3) & ophi=7 & oplo=14; drk47 & s03=13; Direct8w { drk47 = zext(Direct8w); } # MOV Rm,dir16 :MOV rm47,Direct16b is $(GROUP3) & ophi=7 & oplo=14; rm47 & s03=3; Direct16b { rm47 = Direct16b; } # MOV WRj,dir16 :MOV wrj47,Direct16w is $(GROUP3) & ophi=7 & oplo=14; wrj47 & s03=7; Direct16w { wrj47 = Direct16w; } # MOV DRk,dir16 :MOV drk47,Direct16d is $(GROUP3) & ophi=7 & oplo=14; drk47 & s03=15; Direct16d { drk47 = Direct16d; } # MOV Rm,@WRj :MOV rm47_,AtWRjb is $(GROUP3) & ophi=7 & oplo=14; AtWRjb & s03=9; rm47_ & s03_=0 { rm47_ = AtWRjb; } # MOV Rm,@DRk :MOV rm47_,AtDRkb is $(GROUP3) & ophi=7 & oplo=14; $(ATDRK) & AtDRkb & s03=11; rm47_ & s03_=0 { rm47_ = AtDRkb; } # MOV WRjd,@WRjs :MOV wrj47_,AtWRjw is $(GROUP3) & ophi=0 & oplo=11; AtWRjw & s03=8; wrj47_ & s03_=0 { wrj47_ = AtWRjw; } # MOV WRj,@DRk :MOV wrj47_,AtDRkw is $(GROUP3) & ophi=0 & oplo=11; $(ATDRK) & AtDRkw & s03=10; wrj47_ & s03_=0 { wrj47_ = AtDRkw; } # MOV dir8,Rm :MOV Direct,rm47 is $(GROUP3) & ophi=7 & oplo=10; rm47 & s03=1; Direct { Direct = rm47; } # MOV dir8,WRj # TODO: !! Verify direct byte write restriction to SFR registers :MOV Direct8w,wrj47 is $(GROUP3) & ophi=7 & oplo=10; wrj47 & s03=5; bank=0 & Direct8w { Direct8w = wrj47; } :MOV Direct,wrj47 is $(GROUP3) & ophi=7 & oplo=10; wrj47 & s03=5; bank=1 & Direct { Direct = wrj47:1; } # MOV dir8,DRk # TODO: !! Verify byte/word write restriction to internal memory (00-7f) # TODO: !! Verify byte write restriction to SFR registers :MOV Direct8w,drk47 is $(GROUP3) & ophi=7 & oplo=10; $(DRK47) & s03=13; bank=0 & Direct8w { Direct8w = drk47:2; } :MOV Direct,drk47 is $(GROUP3) & ophi=7 & oplo=10; $(DRK47) & s03=13; bank=1 & Direct { Direct = drk47:1; } # MOV dir16,Rm :MOV Direct16b,rm47 is $(GROUP3) & ophi=7 & oplo=10; rm47 & s03=3; Direct16b { Direct16b = rm47; } # MOV dir16,WRj :MOV Direct16w,wrj47 is $(GROUP3) & ophi=7 & oplo=10; wrj47 & s03=7; Direct16w { Direct16w = wrj47; } # MOV dir16,DRk :MOV Direct16d,drk47 is $(GROUP3) & ophi=7 & oplo=10; $(DRK47) & s03=15; Direct16d { Direct16d = drk47; } # MOV @WRj,Rm :MOV AtWRjb,rm47_ is $(GROUP3) & ophi=7 & oplo=10; AtWRjb & s03=9; rm47_ & s03_=0 { AtWRjb = rm47_; } # MOV @DRk,Rm :MOV AtDRkb,rm47_ is $(GROUP3) & ophi=7 & oplo=10; $(ATDRK) & AtDRkb & s03=11; rm47_ & s03_=0 { AtDRkb = rm47_; } # MOV @WRjd,WRjs :MOV AtWRjw,wrj47_ is $(GROUP3) & ophi=1 & oplo=11; AtWRjw & s03=8; wrj47_ & s03_=0 { AtWRjw = wrj47_; } # MOV @DRk,WRj :MOV AtDRkw,wrj47_ is $(GROUP3) & ophi=1 & oplo=11; $(ATDRK) & AtDRkw & s03=10; wrj47_ & s03_=0 { AtDRkw = wrj47_; } # MOV Rm,@WRj+dis16 :MOV rm47,AtWRj03Dis16b is $(GROUP3) & ophi=0 & oplo=9; rm47 ... & AtWRj03Dis16b { AtWRj03Dis16b = rm47; } # MOV WRj,@WRj+dis16 :MOV wrj47,AtWRj03Dis16w is $(GROUP3) & ophi=4 & oplo=9; wrj47 ... & AtWRj03Dis16w { AtWRj03Dis16w = wrj47; } # MOV Rm,@DRk+dis24 :MOV rm47,AtDRk03Dis24b is $(GROUP3) & ophi=2 & oplo=9; (rm47 & $(DRK03)) ... & AtDRk03Dis24b { AtDRk03Dis24b = rm47; } # MOV WRj,@DRk+dis24 :MOV wrj47,AtDRk03Dis24w is $(GROUP3) & ophi=6 & oplo=9; (wrj47 & $(DRK03)) ... & AtDRk03Dis24w { AtDRk03Dis24w = wrj47; } # MOV @WRj+dis16,Rm :MOV AtWRj47Dis16b,rm03 is $(GROUP3) & ophi=1 & oplo=9; rm03 ... & AtWRj47Dis16b { AtWRj47Dis16b = rm03; } # MOV @WRj+dis16,WRj :MOV AtWRj47Dis16w,wrj03 is $(GROUP3) & ophi=5 & oplo=9; wrj03 ... & AtWRj47Dis16w { AtWRj47Dis16w = wrj03; } # MOV @DRk+dis24,Rm :MOV AtDRk47Dis24b,rm03 is $(GROUP3) & ophi=3 & oplo=9; (rm03 & $(DRK47)) ... & AtDRk47Dis24b { AtDRk47Dis24b = rm03; } # MOV @DRk+dis24,WRj :MOV AtDRk47Dis24w,wrj03 is $(GROUP3) & ophi=7 & oplo=9; (wrj03 & $(DRK47)) ... & AtDRk47Dis24w { AtDRk47Dis24w = wrj03; } # MOV bit,C :MOV xBitAddr,"CY" is $(GROUP3) & ophi=10 & oplo=9; (d47=9 & s3=0 & bit02; xBitByteAddr) & xBitAddr { xBitByteAddr = (xBitByteAddr) | (1<>bit02)&1); } # MOVH DRk,#data16 :MOVH drk47,Data16x0 is $(GROUP3) & ophi=7 & oplo=14; $(DRK47) & s03=12; Data16x0 { drk47 = (drk47 & 0xffff0000) | (Data16x0 << 16); } # MOVS WRj,Rm :MOVZ wrj47,rm03 is $(GROUP3) & ophi=1 & oplo=10; wrj47 & rm03 { wrj47 = sext(rm03); } # MOVZ WRj,Rm :MOVZ wrj47,rm03 is $(GROUP3) & ophi=0 & oplo=10; wrj47 & rm03 { wrj47 = zext(rm03); } # MUL Rmd,Rms :MUL rm47,rm03 is $(GROUP3) & ophi=10 & oplo=12; rm47 & rm03 & rm47_d1 & rm47_d2 { result:2 = zext(rm47) * zext(rm03); tmp:2 = result>>8; rm47_d1 = tmp:1; rm47_d2 = result:1; } # MUL WRjd,WRjs :MUL wrj47,wrj03 is $(GROUP3) & ophi=10 & oplo=13; wrj47 & wrj03 & wrj47_d1 & wrj47_d2 { result:4 = zext(wrj47) * zext(wrj03); tmp:4 = result>>16; wrj47_d1 = tmp:2; wrj47_d2 = result:2; } # ORL Rmd,Rms :ORL rm47,rm03 is $(GROUP3) & ophi=4 & oplo=12; rm47 & rm03 { rm47 = rm47 | rm03; resultflags(rm47); } # ORL WRjd,WRjs :ORL wrj47,wrj03 is $(GROUP3) & ophi=4 & oplo=13; wrj47 & wrj03 { wrj47 = wrj47 | wrj03; resultflags(wrj47); } # ORL Rm,#data :ORL rm47,Data is $(GROUP3) & ophi=4 & oplo=14; rm47 & s03=0; Data { rm47 = rm47 | Data; resultflags(rm47); } # ORL WRj,#data16 :ORL wrj47,Data16 is $(GROUP3) & ophi=4 & oplo=14; wrj47 & s03=4; Data16 { wrj47 = wrj47 | Data16; resultflags(wrj47); } # ORL Rm,dir8 :ORL rm47,Direct is $(GROUP3) & ophi=4 & oplo=14; rm47 & s03=1; Direct { rm47 = rm47 | Direct; resultflags(rm47); } # ORL WRj,dir8 :ORL wrj47,Direct8w is $(GROUP3) & ophi=4 & oplo=15; wrj47 & s03=5; Direct8w { wrj47 = wrj47 | Direct8w; resultflags(wrj47); } # ORL Rm,dir16 :ORL rm47,Direct16b is $(GROUP3) & ophi=4 & oplo=14; rm47 & s03=3; Direct16b { rm47 = rm47 | Direct16b; resultflags(rm47); } # ORL WRj,dir16 :ORL wrj47,Direct16w is $(GROUP3) & ophi=4 & oplo=14; wrj47 & s03=7; Direct16w { wrj47 = wrj47 | Direct16w; resultflags(wrj47); } # ORL Rm,@WRj :ORL rm47_,AtWRjb is $(GROUP3) & ophi=4 & oplo=14; AtWRjb & s03=9; rm47_ & s03_=0 { rm47_ = rm47_ | AtWRjb; resultflags(rm47_); } # ORL Rm,@DRk :ORL rm47_,AtDRkb is $(GROUP3) & ophi=4 & oplo=14; $(ATDRK) & AtDRkb & s03=11; rm47_ & s03_=0 { rm47_ = rm47_ | AtDRkb; resultflags(rm47_); } # ORL C,bit :ORL "CY",xBitAddr is $(GROUP3) & ophi=10 & oplo=9; (d47=7 & s3=0 & bit02; xBitByteAddr) & xBitAddr { $(CY) = ((xBitByteAddr>>bit02)&1) | $(CY); } # ORL bit,C :ORL "CY",xBitAddr2 is $(GROUP3) & ophi=10 & oplo=9; (d47=14 & s3=0 & bit02; xBitByteAddr) & xBitAddr2 { $(CY) = ((xBitByteAddr>>bit02)&1) | ($(CY) == 0); } # POP Rm :POP rm47 is $(GROUP3) & ophi=13 & oplo=10; rm47 & s03=8 { pop8(rm47); } # POP WRj :POP wrj47 is $(GROUP3) & ophi=13 & oplo=10; wrj47 & s03=9 { pop16(wrj47); } # POP DRk :POP drk47 is $(GROUP3) & ophi=13 & oplo=10; $(DRK47) & s03=11 { pop16(drk47); } # PUSH #data # TODO: manual did not specify A5 prefix, but would otherwise conflict with the XCH A,Rn instruction (8051) :PUSH Data is $(GROUP3) & ophi=12 & oplo=10; d47=0 & s03=2; Data { push8(Data); } # PUSH #data16 :PUSH Data16 is $(GROUP3) & ophi=12 & oplo=10; d47=0 & s03=6; Data16 { push16(Data16); } # PUSH Rm :PUSH rm47 is $(GROUP3) & ophi=12 & oplo=10; rm47 & s03=8 { push8(rm47); } # PUSH WRj :PUSH wrj47 is $(GROUP3) & ophi=12 & oplo=10; wrj47 & s03=9 { push16(wrj47); } # PUSH DRk :PUSH drk47 is $(GROUP3) & ophi=12 & oplo=10; $(DRK47) & s03=11 { push16(drk47); } # SETB bit :SETB xBitAddr^"."^xBitByteAddr is $(GROUP3) & ophi=10 & oplo=9; (d47=13 & s3=0 & bit02; xBitByteAddr) & xBitAddr { xBitByteAddr = (xBitByteAddr) | (1 << bit02); } # SLL Rm :SLL rm47 is $(GROUP3) & ophi=3 & oplo=14; rm47 & s03=0 { $(CY) = ((rm47>>7) & 1); rm47 = rm47 << 1; resultflags(rm47); } # SLL WRj :SLL wrj47 is $(GROUP3) & ophi=3 & oplo=14; wrj47 & s03=4 { $(CY) = ((wrj47>>15) & 1) == 1; wrj47 = wrj47 << 1; resultflags(wrj47); } # SRA Rm :SRA rm47 is $(GROUP3) & ophi=0 & oplo=14; rm47 & s03=0 { $(CY) = rm47 & 1; rm47 = rm47 s>> 1; resultflags(rm47); } # SRA WRj :SRA wrj47 is $(GROUP3) & ophi=0 & oplo=14; wrj47 & s03=4 { $(CY) = (wrj47 & 1) == 1; wrj47 = wrj47 s>> 1; resultflags(wrj47); } # SRL Rm :SRL rm47 is $(GROUP3) & ophi=1 & oplo=14; rm47 & s03=0 { $(CY) = rm47 & 1; rm47 = rm47 >> 1; resultflags(rm47); } # SRL WRj :SRL wrj47 is $(GROUP3) & ophi=1 & oplo=14; wrj47 & s03=4 { $(CY) = (wrj47 & 1) == 1; wrj47 = wrj47 >> 1; resultflags(wrj47); } # SUB Rmd,Rms :SUB rm47,rm03 is $(GROUP3) & ophi=9 & oplo=12; rm47 & rm03 { subflags(rm47,rm03); rm47 = rm47 - rm03; resultflags(rm47); } # SUB WRjd,WRjs :SUB wrj47,wrj03 is $(GROUP3) & ophi=9 & oplo=13; wrj47 & wrj03 { subflags(wrj47,wrj03); wrj47 = wrj47 - wrj03; resultflags(wrj47); } # SUB DRkd,DRks :SUB drk47,drk03 is $(GROUP3) & ophi=9 & oplo=15; $(DRK47) & $(DRK03) { subflags(drk47,drk03); drk47 = drk47 - drk03; resultflags(drk47);} # SUB Rm,#data :SUB rm47,Data is $(GROUP3) & ophi=9 & oplo=14; rm47 & s03=0; Data { subflags(rm47,Data); rm47 = rm47 - Data; resultflags(rm47);} # SUB WRj,#data16 :SUB wrj47,Data16 is $(GROUP3) & ophi=9 & oplo=14; wrj47 & s03=4; Data16 { subflags(wrj47,Data16); wrj47 = wrj47 - Data16; resultflags(wrj47);} # SUB DRk,#data16 :SUB drk47,Data16x0 is $(GROUP3) & ophi=9 & oplo=14; $(DRK47) & s03=8; Data16x0 { subflags(drk47,Data16x0); drk47 = drk47 - Data16x0; resultflags(drk47);} # SUB Rm,dir8 :SUB rm47,Direct is $(GROUP3) & ophi=9 & oplo=14; rm47 & s03=1; Direct { subflags(rm47,Direct); rm47 = rm47 - Direct; resultflags(rm47);} # SUB WRj,dir8 :SUB wrj47,Direct8w is $(GROUP3) & ophi=9 & oplo=14; wrj47 & s03=5; Direct8w { subflags(wrj47,Direct8w); wrj47 = wrj47 - Direct8w; resultflags(wrj47);} # SUB Rm,dir16 :SUB rm47,Direct16b is $(GROUP3) & ophi=9 & oplo=14; rm47 & s03=3; Direct16b { subflags(rm47,Direct16b); rm47 = rm47 - Direct16b; resultflags(rm47);} # SUB WRj,dir16 :SUB wrj47,Direct16w is $(GROUP3) & ophi=9 & oplo=14; wrj47 & s03=7; Direct16w { subflags(wrj47,Direct16w); wrj47 = wrj47 - Direct16w; resultflags(wrj47);} # SUB Rm,@WRj :SUB rm47_,AtWRjb is $(GROUP3) & ophi=9 & oplo=14; AtWRjb & s03=9; rm47_ & s03_=0 { subflags(rm47_,AtWRjb); rm47_ = rm47_ - AtWRjb; resultflags(rm47_);} # SUB Rm,@DRk :SUB rm47_,AtDRkb is $(GROUP3) & ophi=9 & oplo=14; $(ATDRK) & AtDRkb & s03=11; rm47_ & s03_=0 { subflags(rm47_,AtDRkb); rm47_ = rm47_ - AtDRkb; resultflags(rm47_);} # XRL Rmd,Rms :XRL rm47,rm03 is $(GROUP3) & ophi=6 & oplo=12; rm47 & rm03 { rm47 = rm47 ^ rm03; resultflags(rm47); } # XRL WRjd,WRjs :XRL wrj47,wrj03 is $(GROUP3) & ophi=6 & oplo=13; wrj47 & wrj03 { wrj47 = wrj47 ^ wrj03; resultflags(wrj47); } # XRL Rm,#data :XRL rm47,Data is $(GROUP3) & ophi=6 & oplo=14; rm47 & s03=0; Data { rm47 = rm47 ^ Data; resultflags(rm47); } # XRL WRj,#data16 :XRL wrj47,Data16 is $(GROUP3) & ophi=6 & oplo=14; wrj47 & s03=4; Data16 { wrj47 = wrj47 ^ Data16; resultflags(wrj47); } # XRL Rm,dir8 :XRL rm47,Direct is $(GROUP3) & ophi=6 & oplo=14; rm47 & s03=1; Direct { rm47 = rm47 ^ Direct; resultflags(rm47); } # XRL WRj,dir8 :XRL wrj47,Direct8w is $(GROUP3) & ophi=6 & oplo=14; wrj47 & s03=5; Direct8w { wrj47 = wrj47 ^ Direct8w; resultflags(wrj47); } # XRL Rm,dir16 :XRL rm47,Direct16b is $(GROUP3) & ophi=6 & oplo=14; rm47 & s03=3; Direct16b { rm47 = rm47 ^ Direct16b; resultflags(rm47); } # XRL WRj,dir16 :XRL wrj47,Direct16w is $(GROUP3) & ophi=6 & oplo=14; wrj47 & s03=7; Direct16w { wrj47 = wrj47 ^ Direct16w; resultflags(wrj47); } # XRL Rm,@Wrj :XRL rm47_,AtWRjb is $(GROUP3) & ophi=6 & oplo=14; AtWRjb & s03=9; rm47_ & s03_=0 { rm47_ = rm47_ ^ AtWRjb; resultflags(rm47_); } # XRL Rm,@Drk :XRL rm47_,AtDRkb is $(GROUP3) & ophi=6 & oplo=14; $(ATDRK) & AtDRkb & s03=11; rm47_ & s03_=0 { rm47_ = rm47_ ^ AtDRkb; resultflags(rm47_); } ================================================ FILE: pypcode/processors/8051/data/languages/80251.slaspec ================================================ @define MCS251 "" @include "8051_main.sinc" @include "80251.sinc" ================================================ FILE: pypcode/processors/8051/data/languages/80390.cspec ================================================ ================================================ FILE: pypcode/processors/8051/data/languages/80390.slaspec ================================================ @define MCS80390 "" @include "8051_main.sinc" ================================================ FILE: pypcode/processors/8051/data/languages/8051.cspec ================================================ ================================================ FILE: pypcode/processors/8051/data/languages/8051.ldefs ================================================ 8051 Microcontroller Family 80251 Microcontroller Family 80390 in flat mode NXP/Phillips MX51 ================================================ FILE: pypcode/processors/8051/data/languages/8051.opinion ================================================ ================================================ FILE: pypcode/processors/8051/data/languages/8051.pspec ================================================ ================================================ FILE: pypcode/processors/8051/data/languages/8051.slaspec ================================================ @define MCS51 "" @include "8051_main.sinc" ================================================ FILE: pypcode/processors/8051/data/languages/8051_archimedes.cspec ================================================ ================================================ FILE: pypcode/processors/8051/data/languages/8051_main.sinc ================================================ # sleigh specification file for Intel 8051 #@define BIT_OPS "PCODEOPS" #@define BIT_OPS "SHIFTS" @define BIT_OPS "BIT_ADDRS" # It's sometimes clearer for decompilation to omit the pushing and # restoring of the return value for function calls. #@define OMIT_RETADDR # TODO !!! need to fully employ resultflags macro after resolving the use of the above BIT_OPS !!! # TODO !!! Need to reconcile use of PSW vs. PSW1 for MCS251 !!! # TODO !!! Need to fill-out SFR bits in 80251.pspec !!! @if defined(ENDIAN) define endian=$(ENDIAN); @else define endian=big; @endif define alignment=1; @if defined(MCS251) @define PTRSIZE 3 @define SP_SIZE 3 define space RAM type=ram_space size=3 default; define space SFR type=ram_space size=2; define space BITS type=ram_space size=2; # 8051 spaces map to the following regions of the 80251 RAM space: # CODE - 0xff0000-0xffffff # EXTERNAL - 0x010000-0x01ffff # INTERNAL - 0x000000-0x0000ff @elif defined(MCS51) @if defined(PTRSIZE) @else @define PTRSIZE 2 @endif # SP stack pointer should be set to the size of the space it is used in, to avoid "issues" # This is a minor inconsistency with the model and the actual processor in some cases # If pristine SP sizing is required for rollover and such, the model should be changed # @define SP_SIZE 1 define space CODE type=ram_space size=$(PTRSIZE) default; define space INTMEM type=ram_space size=1; define space EXTMEM type=ram_space size=2; define space SFR type=ram_space size=1; define space BITS type=ram_space size=1; @elif defined(MCS80390) @define PTRSIZE 3 @define SP_SIZE 1 define space CODE type=ram_space size=3 default; define space INTMEM type=ram_space size=1; define space EXTMEM type=ram_space size=3; define space SFR type=ram_space size=1; define space BITS type=ram_space size=1; @elif defined(MX51) @define PTRSIZE 3 @define SP_SIZE 3 # The right thing for decompilation is to represent these # as part of one uniform space, similar to the 80251. # Remember to load code at 800000 # # xdata 0 (EXTMEM) # data 7f0000 (INTMEM) # code 800000 define space RAM type=ram_space size=3 default; define space SFR type=ram_space size=1; define space ESFR type=ram_space size=1; define space BITS type=ram_space size=1; define space EBITS type=ram_space size=1; # Unsure where stack really is, but probably don't want it on normal # registers #@define STACKBASE 0x7f0100 @define STACKBASE 0 @else # "error Unknown processor" @endif define space register type=register_space size=1; # Register File define register offset=0x00 size=1 [ R0 R1 R2 R3 R4 R5 R6 R7 ]; # for future jump table fixup define register offset=0x70 size=1 [ jumpTableGuard1 jumpTableGuard2 ]; @if defined(MCS251) define register offset=0x08 size=1 [ R8 R9 B ACC R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 R30 R31 ]; define register offset=0x0 size=2 [ WR0 WR2 WR4 WR6 WR8 AB WR12 WR14 WR16 WR18 WR20 WR22 WR24 WR26 WR28 WR30 ]; define register offset=0x0 size=4 [ DR0 DR4 DR8 DR12 DR16 DR20 DR24 DR28 ]; define register offset=0x38 size=1 [ R56 DPXL DPH DPL R60 R61 SPH ]; define register offset=0x3A size=2 [ DPTR ]; define register offset=0x38 size=4 [ DPX SPX ]; @elif defined(MCS51) || defined(MCS80390) || defined(MX51) define register offset=0x00 size=4 [ R0R1R2R3 ]; define register offset=0x01 size=3 [ R1R2R3 ]; # Used as R3R2R1 define register offset=0x01 size=2 [ R2R1 ]; define register offset=0x00 size=2 [ R0R1 R2R3 R4R5 R6R7 ]; define register offset=0x04 size=4 [ R4R5R6R7 ]; define register offset=0x05 size=3 [ R5R6R7 ]; define register offset=0x0A size=1 [ B ACC ]; # relocated to facilitate AB 16-bit access define register offset=0x0A size=2 [ AB ]; @if defined(MCS51) || defined(MX51) define register offset=0x82 size=2 [ DPTR ]; define register offset=0x82 size=1 [ DPH DPL ]; # relocated to facilitate DPTR 16-bit access @elif defined(MCS80390) # Following existing example, relocated DPX, DPH, and DPL for DPTR # access. Rework some direct moves to compensate. Not clear that all # cases are covered, thus might be problematic hack in the long term. define register offset=0x82 size=3 [ DPTR ]; define register offset=0x82 size=1 [ DPX DPH DPL ]; @endif @else # "error Unknown processor" @endif define register offset=0x40 size=$(SP_SIZE) [ SP ]; define register offset=0x44 size=$(PTRSIZE) [ PC ]; define register offset=0x48 size=1 [ PSW ]; @if defined(DUAL_DPTR) # Dual DPTR # Rather than model it as context, we're just going # to special case the INC and DEC instructions # to swap the register values define register offset=0x4a size=2 [ DPTR2 ]; define register offset=$(DPS_REG_NUM) size=1 [ AUXR1 ]; # Bit 0 is DPS @endif @define CY "PSW[7,1]" @define AC "PSW[6,1]" @define N "PSW[5,1]" @define RS1 "PSW[4,1]" @define RS0 "PSW[3,1]" @define OV "PSW[2,1]" @define Z "PSW[1,1]" @if defined(MX51) # remap these into the normal register file for decompiler use. # Really belong on ESFR @0xfc - 0xfe with order EPL EPM EPH define register offset=0xc0 size=1 [ EPH EPM EPL ]; define register offset=0xc0 size=3 [ EPTR ]; @endif @if defined(MCS251) define register offset=0x50 size=4 contextReg; define context contextReg phase = (0,0) srcMode = (1,1) # (reflects UCONFIG0.0) 1: source mode, 0: binary mode A5Prefix = (2,2) # reflects presence of A5 prefix ; # GROUP1 - MCS51 instructions in 0x00-0x5F range @define GROUP1 "epsilon" # GROUP2 - MCS51 instructions in 0x60-0xff range @define GROUP2 "((srcMode=0 & A5Prefix=0) | (srcMode=1 & A5Prefix=1))" # GROUP3 - MCS251 instructions in 0x60-0xff range @define GROUP3 "((srcMode=0 & A5Prefix=1) | (srcMode=1 & A5Prefix=0))" @elif defined(MCS51) || defined(MCS80390) || defined(MX51) @define GROUP1 "epsilon" @define GROUP2 "epsilon" @define GROUP3 "epsilon" @endif define pcodeop decimal_adjust; define pcodeop nop; @if BIT_OPS == "PCODEOPS" define pcodeop get; define pcodeop set; define pcodeop set_bit_value; define pcodeop clr; @endif #TOKENS define token opbyte (8) opfull = (0,7) oplo = (0,3) ophi = (4,7) rn = (0,2) rnfill = (3,3) ri = (0,0) rifill = (1,3) opaddr = (5,7) addrfill = (4,4) b_0000 = (0,0) b_0001 = (0,1) b_0002 = (0,2) b_0005 = (0,5) b_0101 = (1,1) b_0107 = (1,7) b_0207 = (2,7) b_0307 = (3,7) b_0607 = (6,7) @if defined(MX51) PRi_sel = (2,2) PRi_revend = (2,2) # reverse endian emov_delta = (0,1) @endif ; define token AddrByte (8) direct = (0,7) bank = (7,7) sfr = (0,6) sfr6 = (6,6) sfrlo = (0,3) mainreg = (0,6) direct17 = (1,7) ; define token AddrByte2 (8) direct2 = (0,7) bank2 = (7,7) sfr2 = (0,6) sfr26 = (6,6) sfr2lo = (0,3) mainreg2 = (0,6) ; define token BitByte (8) bitaddr8 = (0,7) bitaddr27 = (2,7) bitbank = (7,7) sfrbyte = (3,7) bitaddr57 = (5,7) sfrbit6 = (6,6) sfrbit3 = (3,3) sfrbit = (0,2) dec lowbyte = (3,6) bitaddr0 = (0,0) ; define token AddrTwo (16) addr16 = (0,15) ; @if defined(MCS80390) # todo: deconflict with 80251 version define token AddrThree (24) addr24 = (0,23) ; @endif define token RelByte (8) rel8=(0,7) signed; define token ImmedByte (8) data=(0,7); define token ImmedTwo (16) data16 = (0,15) rel16 = (0,15) signed ; @if defined(MCS80390) define token ImmedThree (24) data24 = (0,23) ; @endif @if defined(MCS80390) define token aopword (24) aoplo = (16,19) aopaddr = (21,23) aaddrfill = (20,20) adata = (0,15) ; @else define token aopword (16) aoplo = (8,11) aopaddr = (13,15) aaddrfill = (12,12) adata = (0,7) ; @endif attach variables rn [ R0 R1 R2 R3 R4 R5 R6 R7 ]; attach variables ri [ R0 R1 ]; # flags macros #macro addflags(op1, op2) { # Flags set by add instructions # PSW = PSW & 0x7b; # PSW = PSW | (carry(op1,op2)<<7) # Check for carry # | (scarry(op1,op2)<<2); # Check for signed carry #} #macro subflags(op1, op2) { # Flags set by sub instructions # PSW = PSW & 0x7b; # PSW = PSW | ((op1>sfrbit)&1); resultflags(tmp); } :ANL CY,BitAddr2 is $(GROUP1) & CY & ophi=11 & oplo=0; BitAddr2 & bitaddr57=7 & sfrbit3=0 & sfrbit & BitByteAddr {tmp:1 = BitByteAddr; $(CY)=$(CY)& (~((tmp>>sfrbit)&1)); } @if BIT_OPS == "BIT_ADDRS" :ANL CY,BitAddr is $(GROUP1) & CY & ophi=8 & oplo=2; BitAddr & sfrbit & BitByteAddr {$(CY)=$(CY)& BitAddr; } :ANL CY,BitAddr2 is $(GROUP1) & CY & ophi=11 & oplo=0; BitAddr2 & sfrbit & BitByteAddr {$(CY)=$(CY)& ~BitAddr2; } @elif BIT_OPS == "PCODEOPS" :ANL CY,BitAddr is $(GROUP1) & CY & ophi=8 & oplo=2; BitAddr & sfrbit & BitByteAddr {$(CY)=$(CY)& get(BitAddr, BitByteAddr); } :ANL CY,BitAddr2 is $(GROUP1) & CY & ophi=11 & oplo=0; BitAddr2 & sfrbit & BitByteAddr {$(CY)=$(CY)& (get(BitAddr2, BitByteAddr)^1); } @elif BIT_OPS == "SHIFTS" :ANL CY,BitAddr is $(GROUP1) & CY & ophi=8 & oplo=2; BitAddr & sfrbit & BitByteAddr {$(CY)=$(CY)& ((BitByteAddr>>sfrbit)&1); } :ANL CY,BitAddr2 is $(GROUP1) & CY & ophi=11 & oplo=0; BitAddr2 & sfrbit & BitByteAddr {$(CY)=$(CY)& (~((BitByteAddr>>sfrbit)&1)); } @endif :CJNE Areg,Direct,Rel8 is $(GROUP1) & ophi=11 & oplo=5 & Areg; Direct; Rel8 { compflags(ACC,Direct); if (ACC!=Direct) goto Rel8; } :CJNE Areg,Data,Rel8 is $(GROUP1) & ophi=11 & oplo=4 & Areg; Data; Rel8 { compflags(ACC,Data); if (ACC!=Data) goto Rel8; } :CJNE rn,Data,Rel8 is $(GROUP2) & ophi=11 & rnfill=1 & rn; Data; Rel8 { compflags(rn,Data); if (rn!=Data) goto Rel8; } :CJNE Ri,Data,Rel8 is $(GROUP2) & ophi=11 & rifill=3 & Ri; Data; Rel8 { compflags(Ri,Data); if (Ri!=Data) goto Rel8; } :CLR Areg is $(GROUP1) & ophi=14 & oplo=4 & Areg { ACC = 0; } :CLR CY is $(GROUP1) & CY & ophi=12 & oplo=3 {$(CY)= 0; } :CLR BitAddr is $(GROUP1) & ophi=12 & oplo=2; BitAddr & bitaddr57=7 & sfrbit3=0 & sfrbit & BitByteAddr { local tmp = ~(1<>sfrbit)&1) == 1:1) goto Rel8; } :JBC BitAddr,Rel8 is $(GROUP1) & ophi=1 & oplo=0; BitAddr & bitaddr57=7 & sfrbit3=0 & sfrbit & BitByteAddr; Rel8 { tmp:1 = 1<>sfrbit)&1) == 1:1) goto Rel8; } :JBC BitAddr,Rel8 is $(GROUP1) & ophi=1 & oplo=0; BitAddr & sfrbit & BitByteAddr; Rel8 { tmp:1 = 1<>sfrbit)&1)==0:1) goto Rel8; } @if BIT_OPS == "BIT_ADDRS" :JNB BitAddr,Rel8 is $(GROUP1) & ophi=3 & oplo=0; BitAddr & sfrbit & BitByteAddr; Rel8 { if (BitAddr == 0:1) goto Rel8; } @elif BIT_OPS == "PCODEOPS" :JNB BitAddr,Rel8 is $(GROUP1) & ophi=3 & oplo=0; BitAddr & sfrbit & BitByteAddr; Rel8 { if (get(BitAddr, BitByteAddr)==0:1) goto Rel8; } @elif BIT_OPS == "SHIFTS" :JNB BitAddr,Rel8 is $(GROUP1) & ophi=3 & oplo=0; BitAddr & sfrbit & BitByteAddr; Rel8 { if (((BitByteAddr>>sfrbit)&1)==0:1) goto Rel8; } @endif :JNC Rel8 is $(GROUP1) & ophi=5 & oplo=0; Rel8 { if ($(CY) == 0) goto Rel8; } :JNZ Rel8 is $(GROUP1) & ophi=7 & oplo=0; Rel8 { if (ACC != 0) goto Rel8; } :JZ Rel8 is $(GROUP1) & ophi=6 & oplo=0; Rel8 { if (ACC == 0) goto Rel8; } @if defined(MCS80390) :LCALL Addr24 is $(GROUP1) & ophi=1 & oplo=2; Addr24 { ret:$(PTRSIZE) = inst_next; push24(ret); call Addr24; } :LJMP Addr24 is $(GROUP1) & ophi=0 & oplo=2; Addr24 { goto Addr24; } @elif defined(MX51) :LCALL Addr16 is $(GROUP1) & ophi=1 & oplo=2; Addr16 { ret:2 = inst_next; push16(ret); call Addr16; } :LJMP Addr16 is $(GROUP1) & ophi=0 & oplo=2; Addr16 { goto Addr16; } @else :LCALL Addr16 is $(GROUP1) & ophi=1 & oplo=2; Addr16 { ret:$(PTRSIZE) = inst_next; push16(ret); call Addr16; } :LJMP Addr16 is $(GROUP1) & ophi=0 & oplo=2; Addr16 { goto Addr16; } @endif :MOV Areg,rn is $(GROUP2) & ophi=14 & rnfill=1 & rn & Areg { ACC = rn; } :MOV Areg,Direct is $(GROUP1) & ophi=14 & oplo=5 & Areg; Direct { ACC = Direct; } :MOV Areg,Ri is $(GROUP2) & ophi=14 & Areg & rifill=3 & Ri { ACC = Ri; } :MOV Areg,Data is $(GROUP1) & ophi=7 & oplo=4 & Areg; Data { ACC = Data; } :MOV rn,Areg is $(GROUP2) & ophi=15 & rnfill=1 & rn & Areg { rn = ACC; } :MOV rn,Direct is $(GROUP2) & ophi=10 & rnfill=1 & rn; Direct { rn = Direct; } :MOV rn,Data is $(GROUP2) & ophi=7 & rnfill=1 & rn; Data { rn = Data; } :MOV Direct,Areg is $(GROUP1) & ophi=15 & oplo=5 & Areg; Direct { Direct = ACC; } :MOV Direct,rn is $(GROUP2) & ophi=8 & rnfill=1 & rn; Direct { Direct = rn; } :MOV Direct2,Direct is $(GROUP1) & ophi=8 & oplo=5; Direct; Direct2 { Direct2 = Direct; } :MOV Direct,Ri is $(GROUP2) & ophi=8 & rifill=3 & Ri; Direct { Direct = Ri; } :MOV Direct,Data is $(GROUP1) & ophi=7 & oplo=5; Direct; Data { Direct = Data; } :MOV Ri,Areg is $(GROUP2) & ophi=15 & rifill=3 & Ri & Areg { Ri = ACC; } :MOV Ri,Direct is $(GROUP2) & ophi=10 & rifill=3 & Ri; Direct { Ri = Direct; } :MOV Ri,Data is $(GROUP2) & ophi=7 & rifill=3 & Ri; Data { Ri = Data; } @if defined(MCS80390) :MOV DPTRreg,Data24 is $(GROUP1) & ophi=9 & oplo=0 & DPTRreg; Data24 { DPTR = Data24; } @else :MOV DPTRreg,Data16 is $(GROUP1) & ophi=9 & oplo=0 & DPTRreg; Data16 { DPTR = Data16; } @endif :MOV CY,BitAddr is $(GROUP1) & CY & ophi=10 & oplo=2; BitAddr & bitaddr57=7 & sfrbit3=0 & sfrbit & BitByteAddr {$(CY)= (BitByteAddr>>sfrbit)&1; } :MOV BitAddr,CY is $(GROUP1) & CY & ophi=9 & oplo=2; BitAddr & bitaddr57=7 & sfrbit3=0 & sfrbit & BitByteAddr { BitByteAddr = BitByteAddr & (~(1<>sfrbit)&1; } :MOV BitAddr,CY is $(GROUP1) & CY & ophi=9 & oplo=2; BitAddr & sfrbit & BitByteAddr { BitByteAddr = BitByteAddr & (~(1<>sfrbit)&1); } :ORL CY,BitAddr2 is $(GROUP1) & CY & ophi=10 & oplo=0; BitAddr2 & bitaddr57=7 & sfrbit3=0 & sfrbit & BitByteAddr {$(CY)=$(CY)| (((BitByteAddr>>sfrbit)&1)^1); } @if BIT_OPS == "BIT_ADDRS" :ORL CY,BitAddr is $(GROUP1) & CY & ophi=7 & oplo=2; BitAddr & sfrbit & BitByteAddr {$(CY)=$(CY)| BitAddr; } :ORL CY,BitAddr2 is $(GROUP1) & CY & ophi=10 & oplo=0; BitAddr2 & sfrbit & BitByteAddr {$(CY)=$(CY)| (BitAddr2^1); } @elif BIT_OPS == "PCODEOPS" :ORL CY,BitAddr is $(GROUP1) & CY & ophi=7 & oplo=2; BitAddr & sfrbit & BitByteAddr {$(CY)=$(CY)| get(BitAddr, BitByteAddr); } :ORL CY,BitAddr2 is $(GROUP1) & CY & ophi=10 & oplo=0; BitAddr2 & sfrbit & BitByteAddr {$(CY)=$(CY)| (get(BitAddr2, BitByteAddr)^1); } @elif BIT_OPS == "SHIFTS" :ORL CY,BitAddr is $(GROUP1) & CY & ophi=7 & oplo=2; BitAddr & sfrbit & BitByteAddr {$(CY)=$(CY)| ((BitByteAddr>>sfrbit)&1); } :ORL CY,BitAddr2 is $(GROUP1) & CY & ophi=10 & oplo=0; BitAddr2 & sfrbit & BitByteAddr {$(CY)=$(CY)| (((BitByteAddr>>sfrbit)&1)^1); } @endif :POP Direct is $(GROUP1) & ophi=13 & oplo=0; Direct { pop8(Direct); } :PUSH Direct is $(GROUP1) & ophi=12 & oplo=0; Direct { push8(Direct); } :RET is $(GROUP1) & ophi=2 & oplo=2 { @if defined(MCS251) || defined(MX51) pc:2 = 0; pop16(pc); pc3:3 = (inst_next & 0xff0000) + zext(pc); return[pc3]; @elif defined(MCS51) pc:2 = 0; pop16(pc); return[pc]; @elif defined(MCS80390) pc:3 = 0; pop24(pc); return[pc]; @endif } :RETI is $(GROUP1) & ophi=3 & oplo=2 { @if defined(MCS251) || defined(MX51) pc:2 = 0; pop16(pc); pc3:3 = (inst_next & 0xff0000) + zext(pc); return[pc3]; @elif defined(MCS51) pc:2 = 0; pop16(pc); return[pc]; @elif defined(MCS80390) pc:3 = 0; pop24(pc); return[pc]; @endif } :RL Areg is $(GROUP1) & ophi=2 & oplo=3 & Areg { ACC = (ACC<<1) | (ACC>>7); } :RLC Areg is $(GROUP1) & ophi=3 & oplo=3 & Areg { tmp : 1 = (ACC&0x80)>>7; ACC = (ACC<<1) | $(CY);$(CY)= tmp; } :RR Areg is $(GROUP1) & ophi=0 & oplo=3 & Areg { ACC = (ACC>>1) | (ACC<<7); } :RRC Areg is $(GROUP1) & ophi=1 & oplo=3 & Areg { tmp : 1 = ACC&1; ACC = (ACC>>1) | ($(CY)<<7);$(CY)= tmp; } :SETB CY is $(GROUP1) & CY & ophi=13 & oplo=3 { $(CY)=1; } :SETB BitAddr is $(GROUP1) & ophi=13 & oplo=2; BitAddr & bitaddr57=7 & sfrbit3=0 & sfrbit & BitByteAddr { BitByteAddr = BitByteAddr | (1<>4) | (ACC<<4); } :XCH Areg,rn is $(GROUP2) & ophi=12 & rnfill=1 & rn & Areg { tmp : 1 = ACC; ACC = rn; rn = tmp; } :XCH Areg,Direct is $(GROUP1) & ophi=12 & oplo=5 & Areg; Direct { tmp : 1 = ACC; ACC = Direct; Direct = tmp; } :XCH Areg,Ri is $(GROUP2) & ophi=12 & rifill=3 & Ri & Areg { tmp : 1 = ACC; ACC = Ri; Ri = tmp; } # TODO: This instruction appears to be in both GROUP2 & GROUP3 (always available) :XCHD Areg,Ri is ophi=13 & Areg & rifill=3 & Ri { tmp : 1 = ACC & 0xf; ACC = (ACC&0xf0) | (Ri&0xf); Ri = (Ri&0xf0) | tmp; } :XRL Areg,rn is $(GROUP2) & ophi=6 & rnfill=1 & rn & Areg { ACC = ACC ^ rn; } :XRL Areg,Direct is $(GROUP1) & ophi=6 & oplo=5 & Areg; Direct { ACC = ACC ^ Direct; } :XRL Areg,Ri is $(GROUP2) & ophi=6 & rifill=3 & Ri & Areg { ACC = ACC ^ Ri; } :XRL Areg,Data is $(GROUP1) & ophi=6 & oplo=4 & Areg; Data { ACC = ACC ^ Data; } :XRL Direct,Areg is $(GROUP1) & ophi=6 & oplo=2 & Areg; Direct { Direct = Direct ^ ACC; } :XRL Direct,Data is $(GROUP1) & ophi=6 & oplo=3; Direct; Data { Direct = Direct ^ Data; } ================================================ FILE: pypcode/processors/8051/data/languages/mx51.cspec ================================================ ================================================ FILE: pypcode/processors/8051/data/languages/mx51.pspec ================================================ ================================================ FILE: pypcode/processors/8051/data/languages/mx51.sinc ================================================ # Extended mx51 instructions live here, so as to avoid further # complicating the main 8051 file. # All have 0xa5 as prefix, so subtract one define token TwoByteOp (8) b2op = (0,7) ; define token ThreeByteOp (16) b3op = (8,15) ; define token FourByteOp (24) b4op = (16,23) ; define token EcallDispTok (24) imm24 = (0,23) ; #################### # Note that PRi is used in little endian format, as R3 is the MSB attach variables PRi_revend [ R1R2R3 R5R6R7 ]; @define ENDIANSWAPFUNC "" @if defined(ENDIANSWAPFUNC) define pcodeop endian_swap; PRi: PRi_revend is PRi_revend { tmp:3 = endian_swap(PRi_revend); export tmp; } @else PRi: PRi_revend is PRi_sel=0 & PRi_revend { tmp:3 = (zext(R3) << 16) | (zext(R2) << 8) | zext(R1); export tmp; } PRi: PRi_revend is PRi_sel=1 & PRi_revend { tmp:3 = (zext(R7) << 16) | (zext(R6) << 8) | zext(R5); export tmp; } @endif #################### @ifdef OMIT_RETADDR macro push24(val) { val = val; } macro pop24(val) { val = val; } @else # stack grows up. macro push24(val) { ptr:3 = zext(SP) + 1 + $(STACKBASE); *[RAM]:3 ptr = val; SP = SP + 2; } macro pop24(val) { ptr:3 = zext(SP - 2) + $(STACKBASE); val = *[RAM]:3 ptr; SP = SP - 3; } @endif #################### eptrReg: EPTR is EPTR { export EPTR; } APlusEptr: "@"Areg"+"eptrReg is Areg & eptrReg { tmp:3 = EPTR + zext(ACC); export tmp; } ecallImmAddr: imm24 is imm24 { export *:1 imm24; } add_with_pr_const: emov_delta is emov_delta { tmp:1 = emov_delta; export tmp; } add_with_pr_const: "4" is emov_delta = 0 { tmp:1 = 4; export tmp; } EDirect: mainreg is bank=0 & mainreg { tmp:3 = mainreg + 0x7f0000; export *[RAM]:1 tmp; } EDirect: direct is bank=1 & direct { export *[ESFR]:1 direct; } EDirect: EPL is bank=1 & direct=0xfc & EPL { export EPL; } EDirect: EPM is bank=1 & direct=0xfd & EPM { export EPM; } EDirect: EPH is bank=1 & direct=0xfe & EPH { export EPH; } EDirect2: mainreg2 is bank2=0 & mainreg2 { tmp:3 = mainreg2 + 0x7f0000; export *[RAM]:1 tmp; } EDirect2: direct2 is bank2=1 & direct2 { export *[ESFR]:1 direct2; } EDirect2: EPL is bank2=1 & direct2=0xfc & EPL { export EPL; } EDirect2: EPM is bank2=1 & direct2=0xfd & EPM { export EPM; } EDirect2: EPH is bank2=1 & direct2=0xfe & EPH { export EPH; } # Continuing with pattern via copying from stock 8051 sleighspec. # Note that there is a known bug with the Bit addressing of the SFR. EBitAddr: bitaddr is bitbank=1 & sfrbyte & sfrbit [ bitaddr =(sfrbyte << 3)+sfrbit; ] { export *[EBITS]:1 bitaddr; } EBitAddr: bitaddr is bitbank=0 & lowbyte & sfrbit [ bitaddr =(lowbyte << 3)+sfrbit; ] { export *[EBITS]:1 bitaddr; } EBitAddr2: "/"bitaddr is bitbank=1 & sfrbyte & sfrbit [ bitaddr =(sfrbyte << 3)+sfrbit; ] { export *[EBITS]:1 bitaddr; } EBitAddr2: "/"bitaddr is bitbank=0 & lowbyte & sfrbit [ bitaddr =(lowbyte << 3)+sfrbit; ] { export *[EBITS]:1 bitaddr; } EBitByteAddr: byteaddr is bitbank=1 & sfrbyte & sfrbit [ byteaddr =(sfrbyte << 3); ] { export *[ESFR]:1 byteaddr; } EBitByteAddr: byteaddr is bitbank=0 & lowbyte & sfrbit [ byteaddr = lowbyte + 0x20; ] { tmp:3 = byteaddr + 0x7f0000; export *[RAM]:1 tmp; } #################### :inc EDirect is opfull=0xa5; ophi=0 & oplo=5; EDirect { EDirect = EDirect + 1; } :dec EDirect is opfull=0xa5; opfull=0x15; EDirect { EDirect = EDirect - 1; } :add Areg,EDirect is opfull=0xa5; opfull=0x25 & Areg; EDirect { addflags(ACC,EDirect); ACC = ACC + EDirect; resultflags(ACC); } :addc Areg,EDirect is opfull=0xa5; opfull=0x35 & Areg; EDirect { tmp:1 =$(CY)+ EDirect; addflags(ACC,tmp); ACC = ACC + tmp; resultflags(ACC); } :orl Areg,EDirect is opfull=0xa5; opfull=0x45 & Areg; EDirect { ACC = ACC | EDirect; } :anl Areg,EDirect is opfull=0xa5; opfull=0x55 & Areg; EDirect { ACC = ACC & EDirect; resultflags(ACC); } :xrl Areg,EDirect is opfull=0xa5; opfull=0x65 & Areg; EDirect { ACC = ACC ^ EDirect; } :subb Areg,EDirect is opfull=0xa5; opfull=0x95 & Areg; EDirect { tmp:1 = EDirect+$(CY); subflags(ACC,tmp); ACC = ACC - tmp; } :xch Areg,EDirect is opfull=0xa5; opfull=0xc5 & Areg; EDirect { tmp:1 = ACC; ACC = EDirect; EDirect = tmp; } :mov Areg,EDirect is opfull=0xa5; opfull=0xe5 & Areg; EDirect { ACC = EDirect; } :mov EDirect,Areg is opfull=0xa5; opfull=0xf5 & Areg; EDirect { EDirect = ACC; } :mov EDirect,rn is opfull=0xa5; ophi=0x8 & rnfill=1 & rn; EDirect { EDirect = rn; } :mov rn,EDirect is opfull=0xa5; ophi=0xa & rnfill=1 & rn; EDirect { rn = EDirect; } :mov EDirect2,EDirect is opfull=0xa5; ophi=8 & oplo=5; EDirect; EDirect2 { EDirect2 = EDirect; } :mov EDirect,Data is opfull=0xa5; ophi=7 & oplo=5; EDirect; Data { EDirect = Data; } :mov EDirect,Ri is opfull=0xa5; ophi=8 & rifill=3 & Ri; EDirect { EDirect = Ri; } :mov Ri,EDirect is opfull=0xa5; ophi=10 & rifill=3 & Ri; EDirect { Ri = EDirect; } :orl EDirect,Areg is opfull=0xa5; ophi=4 & oplo=2 & Areg; EDirect { EDirect = EDirect | ACC; } :anl EDirect,Areg is opfull=0xa5; ophi=5 & oplo=2 & Areg; EDirect { tmp:1 = EDirect & ACC; EDirect = tmp; resultflags(tmp); } :xrl EDirect,Areg is opfull=0xa5; ophi=6 & oplo=2 & Areg; EDirect { EDirect = EDirect ^ ACC; } :xrl EDirect,Data is opfull=0xa5; ophi=6 & oplo=3; EDirect; Data { EDirect = EDirect ^ Data; } :anl EDirect,Data is opfull=0xa5; ophi=5 & oplo=3; EDirect; Data { tmp:1 = EDirect & Data; EDirect = tmp; resultflags(tmp); } :orl EDirect,Data is opfull=0xa5; ophi=4 & oplo=3 & Areg; EDirect; Data { EDirect = EDirect | Data; } :push EDirect is opfull=0xa5; opfull=0xc0; EDirect { push8(EDirect); } :pop EDirect is opfull=0xa5; opfull=0xd0; EDirect { pop8(EDirect); } :cjne Areg,EDirect,Rel8 is opfull=0xa5; ophi=11 & oplo=5 & Areg; EDirect; Rel8 { compflags(ACC,EDirect); if (ACC!=EDirect) goto Rel8; } :djnz EDirect,Rel8 is opfull=0xa5; ophi=13 & oplo=5; EDirect; Rel8 { EDirect = EDirect - 1; if (EDirect!=0) goto Rel8; } # EPTR operations :ejmp ecallImmAddr is opfull=0xa5; opfull=0x02; ecallImmAddr { goto ecallImmAddr; } :ecall ecallImmAddr is opfull=0xa5; opfull=0x12; ecallImmAddr { ret:3 = inst_next; push24(ret); call ecallImmAddr; } :mov eptrReg,ecallImmAddr is opfull=0xa5; opfull=0x90; ecallImmAddr & eptrReg & imm24 { EPTR = imm24; } :eret is opfull=0xa5; opfull=0x22 { pc:3 = 0; pop24(pc); return[pc]; } :jmp APlusEptr is opfull=0xa5; opfull=0x73 & APlusEptr { # this is correct, but causes disassembler problems # goto APlusEptr; # added additional indirection to stop disassembler problems. goto [APlusEptr]; } :movx Areg",@"eptrReg is opfull=0xa5; opfull=0xe0 & Areg & eptrReg { ACC = *:1 EPTR; } :movx "@"eptrReg,Areg is opfull=0xa5; opfull=0xf0 & Areg & eptrReg { *:1 EPTR = ACC; } :movc Areg,APlusEptr is opfull=0xa5; opfull=0x93 & Areg & APlusEptr { ACC = *:1 APlusEptr; } :inc EPTR is opfull=0xa5; opfull=0xa3 & EPTR { EPTR = EPTR + 1; } # PRi operations :emov Areg",@"PRi"+"emov_delta is opfull=0xa5; ophi=4 & PRi & emov_delta & Areg { tmp:3 = zext(PRi) + emov_delta; ACC = *:1 tmp; } :emov "@"PRi"+"emov_delta,Areg is opfull=0xa5; ophi=5 & PRi & emov_delta & Areg { tmp:3 = PRi + emov_delta; *:1 tmp = ACC; } :add PRi_revend,add_with_pr_const is opfull=0xa5; ophi=6 & PRi_revend & PRi & add_with_pr_const { x:3 = zext(add_with_pr_const); tmp:3 = PRi + x; @if defined(ENDIANSWAPFUNC) y:3 = endian_swap(tmp); @else y:3 = (tmp << 16) | (tmp & 0x00ff00) | zext(tmp >> 16); @endif PRi_revend = y; } # bit operations :anl CY,EBitAddr is opfull=0xa5; CY & ophi=8 & oplo=2; EBitAddr & bitaddr57=7 & sfrbit3=0 & sfrbit & EBitByteAddr {tmp:1 = EBitByteAddr; $(CY)=$(CY)& ((tmp>>sfrbit)&1); resultflags(tmp); } :anl CY,EBitAddr2 is opfull=0xa5; CY & ophi=11 & oplo=0; EBitAddr2 & bitaddr57=7 & sfrbit3=0 & sfrbit & EBitByteAddr {tmp:1 = EBitByteAddr; $(CY)=$(CY)& (~((tmp>>sfrbit)&1)); } @if BIT_OPS == "BIT_ADDRS" :anl CY,EBitAddr is opfull=0xa5; CY & ophi=8 & oplo=2; EBitAddr & sfrbit & EBitByteAddr {$(CY)=$(CY)& EBitAddr; } :anl CY,EBitAddr2 is opfull=0xa5; CY & ophi=11 & oplo=0; EBitAddr2 & sfrbit & EBitByteAddr {$(CY)=$(CY)& ~EBitAddr2; } @elif BIT_OPS == "PCODEOPS" :anl CY,EBitAddr is opfull=0xa5; CY & ophi=8 & oplo=2; EBitAddr & sfrbit & EBitByteAddr {$(CY)=$(CY)& get(EBitAddr, EBitByteAddr); } :anl CY,EBitAddr2 is opfull=0xa5; CY & ophi=11 & oplo=0; EBitAddr2 & sfrbit & EBitByteAddr {$(CY)=$(CY)& (get(EBitAddr2, EBitByteAddr)^1); } @elif BIT_OPS == "SHIFTS" :anl CY,EBitAddr is opfull=0xa5; CY & ophi=8 & oplo=2; EBitAddr & sfrbit & EBitByteAddr {$(CY)=$(CY)& ((EBitByteAddr>>sfrbit)&1); } :anl CY,EBitAddr2 is opfull=0xa5; CY & ophi=11 & oplo=0; EBitAddr2 & sfrbit & EBitByteAddr {$(CY)=$(CY)& (~((EBitByteAddr>>sfrbit)&1)); } @endif :clr EBitAddr is opfull=0xa5; ophi=12 & oplo=2; EBitAddr & bitaddr57=7 & sfrbit3=0 & sfrbit & EBitByteAddr { tmp:1 = ~(1<>sfrbit)&1) == 1:1) goto Rel8; } :jbc EBitAddr,Rel8 is opfull=0xa5; ophi=1 & oplo=0; EBitAddr & bitaddr57=7 & sfrbit3=0 & sfrbit & EBitByteAddr; Rel8 { tmp:1 = 1<>sfrbit)&1) == 1:1) goto Rel8; } :jbc EBitAddr,Rel8 is opfull=0xa5; ophi=1 & oplo=0; EBitAddr & sfrbit & EBitByteAddr; Rel8 { tmp:1 = 1<>sfrbit)&1)==0:1) goto Rel8; } @if BIT_OPS == "BIT_ADDRS" :jnb EBitAddr,Rel8 is opfull=0xa5; ophi=3 & oplo=0; EBitAddr & sfrbit & EBitByteAddr; Rel8 { if (EBitAddr == 0:1) goto Rel8; } @elif BIT_OPS == "PCODEOPS" :jnb EBitAddr,Rel8 is opfull=0xa5; ophi=3 & oplo=0; EBitAddr & sfrbit & EBitByteAddr; Rel8 { if (get(EBitAddr, EBitByteAddr)==0:1) goto Rel8; } @elif BIT_OPS == "SHIFTS" :jnb EBitAddr,Rel8 is opfull=0xa5; ophi=3 & oplo=0; EBitAddr & sfrbit & EBitByteAddr; Rel8 { if (((EBitByteAddr>>sfrbit)&1)==0:1) goto Rel8; } @endif :mov CY,EBitAddr is opfull=0xa5; CY & ophi=10 & oplo=2; EBitAddr & bitaddr57=7 & sfrbit3=0 & sfrbit & EBitByteAddr {$(CY)= (EBitByteAddr>>sfrbit)&1; } :mov EBitAddr,CY is opfull=0xa5; CY & ophi=9 & oplo=2; EBitAddr & bitaddr57=7 & sfrbit3=0 & sfrbit & EBitByteAddr { EBitByteAddr = EBitByteAddr & (~(1<>sfrbit)&1; } :mov EBitAddr,CY is opfull=0xa5; CY & ophi=9 & oplo=2; EBitAddr & sfrbit & EBitByteAddr { EBitByteAddr = EBitByteAddr & (~(1<>sfrbit)&1); } :orl CY,EBitAddr2 is opfull=0xa5; CY & ophi=10 & oplo=0; EBitAddr2 & bitaddr57=7 & sfrbit3=0 & sfrbit & EBitByteAddr {$(CY)=$(CY)| (((EBitByteAddr>>sfrbit)&1)^1); } @if BIT_OPS == "BIT_ADDRS" :orl CY,EBitAddr is opfull=0xa5; CY & ophi=7 & oplo=2; EBitAddr & sfrbit & EBitByteAddr {$(CY)=$(CY)| EBitAddr; } :orl CY,EBitAddr2 is opfull=0xa5; CY & ophi=10 & oplo=0; EBitAddr2 & sfrbit & EBitByteAddr {$(CY)=$(CY)| (EBitAddr2^1); } @elif BIT_OPS == "PCODEOPS" :orl CY,EBitAddr is opfull=0xa5; CY & ophi=7 & oplo=2; EBitAddr & sfrbit & EBitByteAddr {$(CY)=$(CY)| get(EBitAddr, EBitByteAddr); } :orl CY,EBitAddr2 is opfull=0xa5; CY & ophi=10 & oplo=0; EBitAddr2 & sfrbit & EBitByteAddr {$(CY)=$(CY)| (get(EBitAddr2, EBitByteAddr)^1); } @elif BIT_OPS == "SHIFTS" :orl CY,EBitAddr is opfull=0xa5; CY & ophi=7 & oplo=2; EBitAddr & sfrbit & EBitByteAddr {$(CY)=$(CY)| ((EBitByteAddr>>sfrbit)&1); } :orl CY,EBitAddr2 is opfull=0xa5; CY & ophi=10 & oplo=0; EBitAddr2 & sfrbit & EBitByteAddr {$(CY)=$(CY)| (((EBitByteAddr>>sfrbit)&1)^1); } @endif :setb EBitAddr is opfull=0xa5; ophi=13 & oplo=2; EBitAddr & bitaddr57=7 & sfrbit3=0 & sfrbit & EBitByteAddr { EBitByteAddr = EBitByteAddr | (1< 8051:BE:16:default 8051 default 16 ================================================ FILE: pypcode/processors/8051/data/languages/old/8051v1.trans ================================================ 8051:BE:16:default 8051:BE:16:default ================================================ FILE: pypcode/processors/8051/data/manuals/8051.idx ================================================ @8xc251sx_um.pdf [8XC251SA, 8XC251SB,8XC251SP, 8XC251SQ Embedded Microcontroller User�s Manual, May 1996] ACALL, 278 ADD, 279 ADDC, 284 AJMP, 286 ANL, 286 CJNE, 294 CLR, 295 CMP, 295 CPL, 301 DA, 303 DEC, 304 DIV, 307 DJNZ, 309 ECALL, 311 EJMP, 312 ERET, 313 INC, 313 JB, 317 JBC, 318 JC, 319 JE, 320 JG, 320 JLE, 321 JMP, 322 JNB, 322 JNC, 324 JNE, 324 JNZ, 325 JSG, 326 JSGE, 326 JSL, 327 JSLE, 328 JZ, 328 LCALL, 329 LJMP, 330 MOV, 331 MOVC, 348 MOVH, 349 MOVS, 349 MOVX, 350 MOVZ, 352 MUL, 353 NOP, 355 ORL, 355 POP, 362 PUSH, 364 RET, 366 RETI, 367 RL, 368 RLC, 368 RR, 369 RRC, 370 SETB, 370 SJMP, 371 SLL, 372 SRA, 373 SRL, 374 SUB, 374 SUBB, 379 SWAP, 381 TRAP, 381 XCH, 382 XCHD, 383 XRL, 384 ================================================ FILE: pypcode/processors/8085/data/languages/8085.cspec ================================================ ================================================ FILE: pypcode/processors/8085/data/languages/8085.ldefs ================================================ Intel 8085 ================================================ FILE: pypcode/processors/8085/data/languages/8085.pspec ================================================ ================================================ FILE: pypcode/processors/8085/data/languages/8085.slaspec ================================================ # sleigh specification file for Intel 8085 define endian=little; define alignment=1; define space ram type=ram_space size=2 default; define space io type=ram_space size=1; define space register type=register_space size=1; define register offset=0x00 size=1 [ F A C B E D L H ]; define register offset=0x00 size=2 [ AF BC DE HL ]; define register offset=0x10 size=1 [ A_ F_ B_ C_ D_ E_ H_ L_ ]; # Alternate registers define register offset=0x10 size=2 [ AF_ BC_ DE_ HL_ ]; # Alternate registers define register offset=0x20 size=2 [ PC SP ]; # Flag bits # CY: Carry # P: Parity/Overflow # AC: Half Carry (Auxiliary flag) # Z: Zero # S: Sign define register offset=0x30 size=1 [ S_flag Z_flag AC_flag P_flag CY_flag ]; define token opbyte (8) op0_8 = (0,7) op6_2 = (6,7) dRegPair4_2 = (4,5) pRegPair4_2 = (4,5) sRegPair4_2 = (4,5) qRegPair4_2 = (4,5) rRegPair4_2 = (4,5) reg3_3 = (3,5) bits3_3 = (3,5) bits0_4 = (0,3) reg0_3 = (0,2) bits0_3 = (0,2) ; define token data8 (8) imm8 = (0,7) sign8 = (7,7) simm8 = (0,7) signed ; define token data16 (16) imm16 = (0,15) sign16 = (15,15) simm16 = (0,15) signed ; attach variables [ reg0_3 reg3_3 ] [ B C D E H L _ A ]; attach variables [ sRegPair4_2 dRegPair4_2 ] [ BC DE HL SP ]; attach variables [ qRegPair4_2 ] [ BC DE HL AF ]; ################################################################ # Pseudo Instructions ################################################################ define pcodeop BCDadjust; define pcodeop hasEvenParity; define pcodeop disableMaskableInterrupts; define pcodeop enableMaskableInterrupts; define pcodeop readInterruptMask; define pcodeop setInterruptMask; ################################################################ # Macros ################################################################ macro setResultFlags(result) { Z_flag = (result == 0); S_flag = (result s< 0); } macro setAddCarryFlags(op1,op2) { CY_flag = (carry(op1,zext(CY_flag)) || carry(op2,op1 + zext(CY_flag))); # P_flag = (scarry(op1,CY_flag) || scarry(op2,op1 + CY_flag)); # AC_flag = ?? } macro setAddFlags(op1,op2) { CY_flag = carry(op1,op2); # P_flag = scarry(op1,op2); # AC_flag = ?? } macro setSubtractCarryFlags(op1,op2) { local notC = ~CY_flag; CY_flag = ((op1 < sext(notC)) || (op2 < (op1 - sext(notC)))); } macro setSubtractFlags(op1,op2) { CY_flag = (op1 < op2); } macro push16(val16) { SP = SP - 2; *:2 SP = val16; } macro pop16(ret16) { ret16 = *:2 SP; SP = SP + 2; } ################################################################ Mem8: (imm16) is imm16 { export *:1 imm16; } Mem16: (imm16) is imm16 { export *:2 imm16; } Addr16: imm16 is imm16 { export *:1 imm16; } RstAddr: loc is bits3_3 [ loc = bits3_3 << 3; ] { export *:1 loc; } IOAddr8: (imm8) is imm8 { export *[io]:1 imm8; } cc: "NZ" is bits3_3=0x0 { c:1 = (Z_flag == 0); export c; } cc: "Z" is bits3_3=0x1 { export Z_flag; } cc: "NC" is bits3_3=0x2 { c:1 = (CY_flag == 0); export c; } cc: "C" is bits3_3=0x3 { export CY_flag; } cc: "PO" is bits3_3=0x4 { c:1 = (P_flag == 0); export c; } cc: "PE" is bits3_3=0x5 { export P_flag; } cc: "P" is bits3_3=0x6 { c:1 = (S_flag == 0); export c; } cc: "M" is bits3_3=0x7 { export S_flag; } ################################################################ :MOV reg3_3,reg0_3 is op6_2=0x1 & reg3_3 & reg0_3 { reg3_3 = reg0_3; } :MVI reg3_3,imm8 is op6_2=0x0 & reg3_3 & bits0_3=0x6; imm8 { reg3_3 = imm8; } :MOV reg3_3,(HL) is op6_2=0x1 & reg3_3 & bits0_3=0x6 & HL { ptr:2 = HL; reg3_3 = *:1 ptr; } :MOV (HL),reg0_3 is op6_2=0x1 & bits3_3=0x6 & reg0_3 & HL { ptr:2 = HL; *:1 ptr = reg0_3; } :MVI (HL),imm8 is op0_8=0x36 & HL; imm8 { ptr:2 = HL; *:1 ptr = imm8; } :LDAX (BC) is op0_8=0x0a & BC { ptr:2 = BC; A = *:1 ptr; } :LDAX (DE) is op0_8=0x1a & DE { ptr:2 = DE; A = *:1 ptr; } :LDA Mem8 is op0_8=0x3a; Mem8 { A = Mem8; } :STAX (BC) is op0_8=0x2 & BC { ptr:2 = BC; *:1 ptr = A; } :STAX (DE) is op0_8=0x12 & DE { ptr:2 = DE; *:1 ptr = A; } :STA Mem8 is op0_8=0x32; Mem8 { Mem8 = A; } :LXI dRegPair4_2,imm16 is op6_2=0x0 & dRegPair4_2 & bits0_4=0x1; imm16 { dRegPair4_2 = imm16; } :LHLD Mem16 is op0_8=0x2a; Mem16 { HL = Mem16; } :SHLD Mem16 is op0_8=0x22; Mem16 { Mem16 = HL; } :SPHL is op0_8=0xf9 { SP = HL; } :PUSH qRegPair4_2 is op6_2=0x3 & qRegPair4_2 & bits0_4=0x5 { push16(qRegPair4_2); } :POP qRegPair4_2 is op6_2=0x3 & qRegPair4_2 & bits0_4=0x1 { pop16(qRegPair4_2); } :XCHG is op0_8=0xeb { tmp:2 = DE; DE = HL; HL = tmp; } :XTHL is op0_8=0xe3 { tmp:2 = *:2 SP; *:2 SP = HL; HL = tmp; } :ADD reg0_3 is op6_2=0x2 & bits3_3=0x0 & reg0_3 { setAddFlags(A,reg0_3); A = A + reg0_3; setResultFlags(A); } :ADI imm8 is op0_8=0xc6; imm8 { setAddFlags(A,imm8); A = A + imm8; setResultFlags(A); } :ADD (HL) is op0_8=0x86 & HL { val:1 = *:1 HL; setAddFlags(A,val); A = A + val; setResultFlags(A); } :ADC reg0_3 is op6_2=0x2 & bits3_3=0x1 & reg0_3 { setAddCarryFlags(A,reg0_3); A = A + reg0_3 + CY_flag; setResultFlags(A); } :ACI imm8 is op0_8=0xce; imm8 { setAddCarryFlags(A,imm8); A = A + imm8 + CY_flag; setResultFlags(A); } :ADC (HL) is op0_8=0x8e & HL { val:1 = *:1 HL; setAddCarryFlags(A,val); A = A + val + CY_flag; setResultFlags(A); } :SUB reg0_3 is op6_2=0x2 & bits3_3=0x2 & reg0_3 { setSubtractFlags(A,reg0_3); A = A - reg0_3; setResultFlags(A); } :SUI imm8 is op0_8=0xd6; imm8 { setSubtractFlags(A,imm8); A = A - imm8; setResultFlags(A); } :SUB (HL) is op0_8=0x96 & HL { val:1 = *:1 HL; setSubtractFlags(A,val); A = A - val; setResultFlags(A); } :SBB reg0_3 is op6_2=0x2 & bits3_3=0x3 & reg0_3 { setSubtractCarryFlags(A,reg0_3); A = A - reg0_3 - CY_flag; setResultFlags(A); } :SBI imm8 is op0_8=0xde; imm8 { setSubtractCarryFlags(A,imm8); A = A - imm8 - CY_flag; setResultFlags(A); } :SBB (HL) is op0_8=0x9e & HL { val:1 = *:1 HL; setSubtractCarryFlags(A,val); A = A - val - CY_flag; setResultFlags(A); } :ANA reg0_3 is op6_2=0x2 & bits3_3=0x4 & reg0_3 { AC_flag = 1; CY_flag = 0; P_flag = 0; A = A & reg0_3; setResultFlags(A); } :ANI imm8 is op0_8=0xe6; imm8 { AC_flag = 1; CY_flag = 0; P_flag = 0; A = A & imm8; setResultFlags(A); } :ANA (HL) is op0_8=0xa6 & HL { AC_flag = 1; CY_flag = 0; P_flag = 0; A = A & *:1 HL; setResultFlags(A); } :ORA reg0_3 is op6_2=0x2 & bits3_3=0x6 & reg0_3 { AC_flag = 0; CY_flag = 0; P_flag = 0; A = A | reg0_3; setResultFlags(A); } :ORI imm8 is op0_8=0xf6; imm8 { AC_flag = 0; CY_flag = 0; P_flag = 0; A = A | imm8; setResultFlags(A); } :ORA (HL) is op0_8=0xb6 & HL { AC_flag = 0; CY_flag = 0; P_flag = 0; A = A | *:1 HL; setResultFlags(A); } :XRA reg0_3 is op6_2=0x2 & bits3_3=0x5 & reg0_3 { AC_flag = 0; CY_flag = 0; P_flag = 0; A = A ^ reg0_3; setResultFlags(A); } :XRA (HL) is op0_8=0xae & HL { AC_flag = 0; CY_flag = 0; P_flag = 0; A = A ^ *:1 HL; setResultFlags(A); } :XRI imm8 is op0_8=0xee; imm8 { AC_flag = 0; CY_flag = 0; P_flag = 0; A = A ^ imm8; setResultFlags(A); } :CMP reg0_3 is op6_2=0x2 & bits3_3=0x7 & reg0_3 { setSubtractFlags(A,reg0_3); cmp:1 = A - reg0_3; setResultFlags(cmp); } :CPI imm8 is op0_8=0xfe; imm8 { setSubtractFlags(A,imm8); cmp:1 = A - imm8; setResultFlags(cmp); } :CMP (HL) is op0_8=0xbe & HL { val:1 = *:1 HL; setSubtractFlags(A,val); cmp:1 = A - val; setResultFlags(cmp); } :INR reg3_3 is op6_2=0x0 & reg3_3 & bits0_3=0x4 { P_flag = (reg3_3 == 0x7f); reg3_3 = reg3_3 + 1; setResultFlags(reg3_3); } :INR (HL) is op0_8=0x34 & HL { val:1 = *:1 HL; P_flag = (val == 0x7f); val = val + 1; *:1 HL = val; setResultFlags(val); } :DCR reg3_3 is op6_2=0x0 & reg3_3 & bits0_3=0x5 { P_flag = (reg3_3 == 0x80); reg3_3 = reg3_3 - 1; setResultFlags(reg3_3); } :DCR (HL) is op0_8=0x35 & HL { val:1 = *:1 HL; P_flag = (val == 0x80); val = val - 1; *:1 HL = val; setResultFlags(val); } :DAA is op0_8=0x27 { A = BCDadjust(A); setResultFlags(A); P_flag = hasEvenParity(A); } :CMA is op0_8=0x2f { A = ~A; } :CMC is op0_8=0x3f { CY_flag = !CY_flag; } :STC is op0_8=0x37 { CY_flag = 1; AC_flag = 0; } :NOP is op0_8=0x0 { } :HALT is op0_8=0x76 { goto inst_start; } :DI is op0_8=0xf3 { # IFF1 = 0; # IFF2 = 0; disableMaskableInterrupts(); } :EI is op0_8=0xfb { # IFF1 = 1; # IFF2 = 1; enableMaskableInterrupts(); } :RIM is op0_8=0x20 { A = readInterruptMask(); } :SIM is op0_8=0x30 { setInterruptMask(A); } :DAD HL,sRegPair4_2 is op6_2=0x0 & sRegPair4_2 & bits0_4=0x9 & HL { setAddFlags(HL,sRegPair4_2); HL = HL + sRegPair4_2; } :INX sRegPair4_2 is op6_2=0x0 & sRegPair4_2 & bits0_4=0x3 { sRegPair4_2 = sRegPair4_2 + 1; } :DCX sRegPair4_2 is op6_2=0x0 & sRegPair4_2 & bits0_4=0xb { sRegPair4_2 = sRegPair4_2 - 1; } :RLC is op0_8=0x07 { CY_flag = (A >> 7); A = (A << 1) | CY_flag; AC_flag = 0; } :RAL is op0_8=0x17 { nextC:1 = (A >> 7); A = (A << 1) | CY_flag; CY_flag = nextC; AC_flag = 0; } :RRC is op0_8=0x0f { CY_flag = (A & 1); A = (A >> 1) | (CY_flag << 7); AC_flag = 0; } :RAR is op0_8=0x1f { nextC:1 = (A & 1); A = (A >> 1) | (CY_flag << 7); CY_flag = nextC; AC_flag = 0; } :JMP Addr16 is op0_8=0xc3; Addr16 { goto Addr16; } :J^cc Addr16 is op6_2=0x3 & cc & bits0_3=0x2; Addr16 { if (cc) goto Addr16; } :PCHL is op0_8=0xe9 { goto [HL]; } :CALL Addr16 is op0_8=0xcd; Addr16 { tmp:2 = inst_next; push16(tmp); call Addr16; } :C^cc Addr16 is op6_2=0x3 & cc & bits0_3=0x4; Addr16 { if (!cc) goto inst_next; tmp:2 = inst_next; push16(tmp); call Addr16; } :RET is op0_8=0xc9 { tmp:2 = 0; pop16(tmp); return [tmp]; } :R^cc is op6_2=0x3 & cc & bits0_3=0x0 { if (!cc) goto inst_next; tmp:2 = 0; pop16(tmp); return [tmp]; } :RST RstAddr is op6_2=0x3 & RstAddr & bits0_3=0x7 { tmp:2 = inst_next; push16(tmp); call RstAddr; } :IN IOAddr8 is op0_8=0xdb; IOAddr8 { A = IOAddr8; } :OUT IOAddr8 is op0_8=0xd3; IOAddr8 { IOAddr8 = A; } ================================================ FILE: pypcode/processors/AARCH64/data/aarch64-pltThunks.xml ================================================ ...10000 0x.. 0x.. 1..10000 # adrp x16, PLTGOT + n * 8 0x11 ......10 01...... 0xf9 # ldr x17, [x16, PLTGOT + n * 8] 0x10 ......10 00...... 0x91 # add x16, x16, :lo12:PLTGOT + n * 8 0x20 0x02 0x1f 0xd6 # br x17 ...10000 0x.. 0x.. 1..10000 # adrp x16, PLTGOT + n * 4 0x11 ......10 01...... 0xb9 # ldr x17, [x16, PLTGOT + n * 4] 0x10 ......10 00...... 0x11 # add x16, x16, :lo12:PLTGOT + n * 4 0x20 0x02 0x1f 0xd6 # br x17 ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64.cspec ================================================ ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64.dwarf ================================================ ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64.ldefs ================================================ Generic ARM64 v8.5-A LE instructions, LE data, missing some 8.5 vector Generic ARM64 v8.5-A LE instructions, BE data, missing some 8.5 vector Generic ARM64 v8.5-A LE instructions, LE data, ilp32 Generic ARM64 v8.5-A LE instructions, BE data, ilp32 ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64.opinion ================================================ ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64.pspec ================================================ ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64.slaspec ================================================ @define DATA_ENDIAN "little" @include "AARCH64instructions.sinc" ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64BE.slaspec ================================================ @define DATA_ENDIAN "big" @include "AARCH64instructions.sinc" ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64_AMXext.sinc ================================================ # # Apple AARCH64 extended matrix instructions # Contents based on evolving information published on Web # # define pcodeop __amx_ldx; define pcodeop __amx_ldy; define pcodeop __amx_stx; define pcodeop __amx_sty; define pcodeop __amx_ldz; define pcodeop __amx_stz; define pcodeop __amx_ldzi; define pcodeop __amx_stzi; define pcodeop __amx_extrx; define pcodeop __amx_extry; define pcodeop __amx_fma64; define pcodeop __amx_fms64; define pcodeop __amx_fma32; define pcodeop __amx_fms32; define pcodeop __amx_mac16; define pcodeop __amx_fma16; define pcodeop __amx_fms16; define pcodeop __amx_enable; define pcodeop __amx_disable; define pcodeop __amx_vecint; define pcodeop __amx_vecfp; define pcodeop __amx_matint; define pcodeop __amx_matfp; define pcodeop __amx_genlut; with : ImmS_ImmR_TestSet=1 { AMXAddr: is Rd_GPR64 { addr:8 = Rd_GPR64 & 0x00FFFFFFFFFFFFFF; export addr; } AMXRegOff: is Rd_GPR64 { registerOff:8 = (Rd_GPR64 >> 56) & 0x1F; export registerOff; } AMXSize: is Rd_GPR64 { local size = ((Rd_GPR64 >> 62) & 1); size = zext(size == 0) * 0x40 | zext(size ==1 ) * 0x80; export size; } :__amx_ldx Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=0 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 { __amx_ldx(Rd_GPR64); } :__amx_ldy Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=1 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 { __amx_ldy(Rd_GPR64); } :__amx_stx Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=2 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 { __amx_stx(Rd_GPR64); } :__amx_sty Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=3 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 { __amx_sty(Rd_GPR64); } :__amx_ldz Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=4 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 { __amx_ldz(Rd_GPR64); } :__amx_stz Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=5 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 { __amx_stz(Rd_GPR64); } :__amx_ldzi Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=6 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 { __amx_ldzi(Rd_GPR64); } :__amx_stzi Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=7 & AMXAddr & AMXRegOff & AMXSize & Rd_GPR64 { __amx_stzi(Rd_GPR64); } :__amx_extrx Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=8 & Rd_GPR64 { __amx_extrx(Rd_GPR64); } :__amx_extry Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=9 & Rd_GPR64 { __amx_extry(Rd_GPR64); } :__amx_fma64 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=10 & Rd_GPR64 { __amx_fma64(Rd_GPR64); } :__amx_fms64 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=11 & Rd_GPR64 { __amx_fms64(Rd_GPR64); } :__amx_fma32 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=12 & Rd_GPR64 { __amx_fma32(Rd_GPR64); } :__amx_fms32 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=13 & Rd_GPR64 { __amx_fms32(Rd_GPR64); } :__amx_mac16 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=14 & Rd_GPR64 { __amx_mac16(Rd_GPR64); } :__amx_fma16 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=15 & Rd_GPR64 { __amx_fma16(Rd_GPR64); } :__amx_fms16 Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=16 & Rd_GPR64 { __amx_fms16(Rd_GPR64); } :__amxdisable is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=17 & b_0004=1 { __amx_disable(); } :__amxenable is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=17 & b_0004=0 { __amx_enable(); } :__amx_vecint Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=18 & Rd_GPR64 { __amx_vecint(Rd_GPR64); } :__amx_vecfp Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=19 & Rd_GPR64 { __amx_vecfp(Rd_GPR64); } :__amx_matint Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=20 & Rd_GPR64 { __amx_matint(Rd_GPR64); } :__amx_matfp Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=21 & Rd_GPR64 { __amx_matfp(Rd_GPR64); } :__amx_genlut Rd_GPR64 is b_2431=0x00 & b_1623=0x20 & b_1215=1 & b_1011=0 & b_0509=22 & Rd_GPR64 { __amx_genlut(Rd_GPR64); } } ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64_AppleSilicon.slaspec ================================================ @define DATA_ENDIAN "little" @include "AARCH64instructions.sinc" @include "AARCH64_AMXext.sinc" ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64_apple.cspec ================================================ ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64_base_PACoptions.sinc ================================================ autda__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = AuthDA(Rd_GPR64, Rn_GPR64xsp); } autda__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { AuthDA(Rd_GPR64, Rn_GPR64xsp); } autda__PACpart: "hide" is ShowPAC=0 { } autdza__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = AuthDA(Rd_GPR64, xzr); } autdza__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { AuthDA(Rd_GPR64, xzr); } autdza__PACpart: "hide" is ShowPAC=0 { } autdb__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = AuthDB(Rd_GPR64, Rn_GPR64xsp); } autdb__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { AuthDB(Rd_GPR64, Rn_GPR64xsp); } autdb__PACpart: "hide" is ShowPAC=0 { } autdzb__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = AuthDB(Rd_GPR64, xzr); } autdzb__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { AuthDB(Rd_GPR64, xzr); } autdzb__PACpart: "hide" is ShowPAC=0 { } autia__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = AuthIA(Rd_GPR64, Rn_GPR64xsp); } autia__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { AuthIA(Rd_GPR64, Rn_GPR64xsp); } autia__PACpart: "hide" is ShowPAC=0 { } autiza__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = AuthIA(Rd_GPR64, xzr); } autiza__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { AuthIA(Rd_GPR64, xzr); } autiza__PACpart: "hide" is ShowPAC=0 { } autia1716__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x17 = AuthIA(x17, x16); } autia1716__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIA(x17, x16); } autia1716__PACpart: "hide" is ShowPAC=0 { } autiasp__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = AuthIA(x30, sp); } autiasp__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIA(x30, sp); } autiasp__PACpart: "hide" is ShowPAC=0 { } autiaz__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = AuthIA(x30, xzr); } autiaz__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIA(x30, xzr); } autiaz__PACpart: "hide" is ShowPAC=0 { } autib__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = AuthIB(Rd_GPR64, Rn_GPR64xsp); } autib__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { AuthIB(Rd_GPR64, Rn_GPR64xsp); } autib__PACpart: "hide" is ShowPAC=0 { } autizb__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = AuthIB(Rd_GPR64, xzr); } autizb__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { AuthIB(Rd_GPR64, xzr); } autizb__PACpart: "hide" is ShowPAC=0 { } autib1716__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x17 = AuthIB(x17, x16); } autib1716__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIB(x17, x16); } autib1716__PACpart: "hide" is ShowPAC=0 { } autibsp__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = AuthIB(x30, sp); } autibsp__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIB(x30, sp); } autibsp__PACpart: "hide" is ShowPAC=0 { } autibz__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = AuthIB(x30, xzr); } autibz__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIB(x30, xzr); } autibz__PACpart: "hide" is ShowPAC=0 { } b_blinkop__raaz___PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64 { AuthIA(Rn_GPR64, xzr); } b_blinkop__raaz___PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64 { AuthIA(Rn_GPR64, xzr); } b_blinkop__raaz___PACpart: "hide" is ShowPAC=0 { } b_blinkop__raa___PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64xsp & Rn_GPR64 { AuthIA(Rn_GPR64, Rd_GPR64xsp); } b_blinkop__raa___PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64xsp & Rn_GPR64 { AuthIA(Rn_GPR64, Rd_GPR64xsp); } b_blinkop__raa___PACpart: "hide" is ShowPAC=0 { } b_blinkop__rabz___PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64 { AuthIB(Rn_GPR64, xzr); } b_blinkop__rabz___PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64 { AuthIB(Rn_GPR64, xzr); } b_blinkop__rabz___PACpart: "hide" is ShowPAC=0 { } b_blinkop__rab___PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64xsp & Rn_GPR64 { AuthIB(Rn_GPR64, Rd_GPR64xsp); } b_blinkop__rab___PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64xsp & Rn_GPR64 { AuthIB(Rn_GPR64, Rd_GPR64xsp); } b_blinkop__rab___PACpart: "hide" is ShowPAC=0 { } eretaa__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { AuthIA(pc, sp); } eretaa__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIA(pc, sp); } eretaa__PACpart: "hide" is ShowPAC=0 { } eretab__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { AuthIB(pc, sp); } eretab__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIB(pc, sp); } eretab__PACpart: "hide" is ShowPAC=0 { } ldraa__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp { AuthDA(Rn_GPR64xsp, xzr); } ldraa__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp { AuthDA(Rn_GPR64xsp, xzr); } ldraa__PACpart: "hide" is ShowPAC=0 { } ldrab__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp { AuthDB(Rn_GPR64xsp, xzr); } ldrab__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp { AuthDB(Rn_GPR64xsp, xzr); } ldrab__PACpart: "hide" is ShowPAC=0 { } pacda__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = pacda(Rd_GPR64, Rn_GPR64xsp); } pacda__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { pacda(Rd_GPR64, Rn_GPR64xsp); } pacda__PACpart: "hide" is ShowPAC=0 { } pacdza__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = pacdza(Rd_GPR64); } pacdza__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { pacdza(Rd_GPR64); } pacdza__PACpart: "hide" is ShowPAC=0 { } pacdb__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = pacdb(Rd_GPR64, Rn_GPR64xsp); } pacdb__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { pacdb(Rd_GPR64, Rn_GPR64xsp); } pacdb__PACpart: "hide" is ShowPAC=0 { } pacdzb__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = pacdzb(Rd_GPR64); } pacdzb__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { pacdzb(Rd_GPR64); } pacdzb__PACpart: "hide" is ShowPAC=0 { } pacia__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = pacia(Rd_GPR64, Rn_GPR64xsp); } pacia__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { pacia(Rd_GPR64, Rn_GPR64xsp); } pacia__PACpart: "hide" is ShowPAC=0 { } paciza__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = paciza(Rd_GPR64); } paciza__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { paciza(Rd_GPR64); } paciza__PACpart: "hide" is ShowPAC=0 { } pacia1716__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x17 = pacia(x17, x16); } pacia1716__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { pacia(x17, x16); } pacia1716__PACpart: "hide" is ShowPAC=0 { } paciasp__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = pacia(x30, sp); } paciasp__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { pacia(x30, sp); } paciasp__PACpart: "hide" is ShowPAC=0 { } paciaz__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = paciza(x30); } paciaz__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { paciza(x30); } paciaz__PACpart: "hide" is ShowPAC=0 { } pacib__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rn_GPR64xsp & Rd_GPR64 { Rd_GPR64 = pacib(Rd_GPR64, Rn_GPR64xsp); } pacib__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rn_GPR64xsp & Rd_GPR64 { pacib(Rd_GPR64, Rn_GPR64xsp); } pacib__PACpart: "hide" is ShowPAC=0 { } pacizb__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = pacizb(Rd_GPR64); } pacizb__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { pacizb(Rd_GPR64); } pacizb__PACpart: "hide" is ShowPAC=0 { } pacib1716__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x17 = pacib(x17, x16); } pacib1716__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { pacib(x17, x16); } pacib1716__PACpart: "hide" is ShowPAC=0 { } pacibsp__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = pacib(x30, sp); } pacibsp__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { pacib(x30, sp); } pacibsp__PACpart: "hide" is ShowPAC=0 { } pacibz__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = pacizb(x30); } pacibz__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { pacizb(x30); } pacibz__PACpart: "hide" is ShowPAC=0 { } retaa__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { AuthIA(x30, sp); } retaa__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIA(x30, sp); } retaa__PACpart: "hide" is ShowPAC=0 { } retab__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { AuthIB(x30, sp); } retab__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { AuthIB(x30, sp); } retab__PACpart: "hide" is ShowPAC=0 { } xpacd__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = xpac(Rd_GPR64, 1:1); } xpacd__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { xpac(Rd_GPR64, 1:1); } xpacd__PACpart: "hide" is ShowPAC=0 { } xpaci__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 & Rd_GPR64 { Rd_GPR64 = xpac(Rd_GPR64, 0:1); } xpaci__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 & Rd_GPR64 { xpac(Rd_GPR64, 0:1); } xpaci__PACpart: "hide" is ShowPAC=0 { } xpaclri__PACpart: "show_and_clobber" is ShowPAC=1 & PAC_clobber=1 { x30 = xpac(x30, 0:1); } xpaclri__PACpart: "show_noclobber" is ShowPAC=1 & PAC_clobber=0 { xpac(x30, 0:1); } xpaclri__PACpart: "hide" is ShowPAC=0 { } ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64_golang.cspec ================================================ ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64_golang.register.info ================================================ ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64_ilp32.cspec ================================================ ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64_swift.cspec ================================================ ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64_win.cspec ================================================ ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64base.sinc ================================================ # C6.2.1 ADC page C6-1144 line 67905 MATCH x1a000000/mask=x7fe0fc00 # C6.2.2 ADCS page C6-1146 line 67991 MATCH x3a000000/mask=x7fe0fc00 # CONSTRUCT x1a000000/mask=xdfe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x1a000000/mask=xdfe0fc00 --status pass --comment "flags" :adc^SBIT_CZNO Rd_GPR32, Rn_GPR32, Rm_GPR32 is sf=0 & b_30=0 & S & SBIT_CZNO & b_2428=0x1a & b_2123=0 & Rm_GPR32 & b_1015=0 & Rd_GPR32 & Rd_GPR64 & Rn_GPR32 { add_with_carry_flags(Rn_GPR32, Rm_GPR32); tmp:4 = Rm_GPR32 + Rn_GPR32 + zext(CY); Rd_GPR64 = zext(tmp); resultflags(tmp); build SBIT_CZNO; } # C6.2.1 ADC page C6-1144 line 67905 MATCH x1a000000/mask=x7fe0fc00 # C6.2.2 ADCS page C6-1146 line 67991 MATCH x3a000000/mask=x7fe0fc00 # CONSTRUCT x9a000000/mask=xdfe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9a000000/mask=xdfe0fc00 --status pass --comment "flags" :adc^SBIT_CZNO Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & b_30=0 & S & SBIT_CZNO & b_2428=0x1a & b_2123=0 & Rm_GPR64 & b_1015=0 & Rd_GPR64 & Rn_GPR64 { add_with_carry_flags(Rn_GPR64, Rm_GPR64); Rd_GPR64 = Rn_GPR64 + Rm_GPR64 + zext(CY); resultflags(Rd_GPR64); build SBIT_CZNO; } # C6.2.3 ADD (extended register) page C6-1148 line 68081 MATCH x0b200000/mask=x7fe00000 # C6.2.7 ADDS (extended register) page C6-1156 line 68516 MATCH x2b200000/mask=x7fe00000 # C6.2.59 CMN (extended register) page C6-1246 line 73092 MATCH x2b20001f/mask=x7fe0001f # CONSTRUCT x0b200000/mask=xdfe00000 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x0b200000/mask=xdfe00000 --status pass --comment "flags" :add^SBIT_CZNO Rd_GPR32wsp, Rn_GPR32wsp, ExtendRegShift32 is sf=0 & op=0 & S & SBIT_CZNO & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift32 & Rn_GPR32wsp & Rd_GPR32wsp & Rd_GPR64xsp { tmp_2:4 = ExtendRegShift32; addflags(Rn_GPR32wsp, tmp_2); tmp_1:4 = Rn_GPR32wsp + tmp_2; resultflags(tmp_1); Rd_GPR64xsp = zext(tmp_1); build SBIT_CZNO; } # C6.2.3 ADD (extended register) page C6-1148 line 68081 MATCH x0b200000/mask=x7fe00000 # C6.2.7 ADDS (extended register) page C6-1156 line 68516 MATCH x2b200000/mask=x7fe00000 # C6.2.59 CMN (extended register) page C6-1246 line 73092 MATCH x2b20001f/mask=x7fe0001f # CONSTRUCT x8b200000/mask=xdfe00000 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x8b200000/mask=xdfe00000 --status pass --comment "flags" :add^SBIT_CZNO Rd_GPR64xsp, Rn_GPR64xsp, ExtendRegShift64 is sf=1 & op=0 & S & SBIT_CZNO & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift64 & Rn_GPR64xsp & Rd_GPR64xsp { tmp_2:8 = ExtendRegShift64; addflags(Rn_GPR64xsp, tmp_2); tmp_1:8 = Rn_GPR64xsp + tmp_2; resultflags(tmp_1); Rd_GPR64xsp = tmp_1; build SBIT_CZNO; } # C6.2.4 ADD (immediate) page C6-1151 line 68228 MATCH x11000000/mask=x7f800000 # C6.2.8 ADDS (immediate) page C6-1159 line 68669 MATCH x31000000/mask=x7f800000 # C6.2.60 CMN (immediate) page C6-1248 line 73219 MATCH x3100001f/mask=x7f80001f # C6.2.220 MOV (to/from SP) page C6-1668 line 98876 MATCH x11000000/mask=x7ffffc00 # CONSTRUCT x11000000/mask=xdf000000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x11000000/mask=xdf000000 --status pass --comment "flags" :add^SBIT_CZNO Rd_GPR32xsp, Rn_GPR32xsp, ImmShift32 is sf=0 & b_30=0 & S & SBIT_CZNO & b_2428=0x011 & ImmShift32 & Rn_GPR32xsp & Rd_GPR32xsp & Rd_GPR64xsp { addflags(Rn_GPR32xsp, ImmShift32); tmp:4 = Rn_GPR32xsp + ImmShift32; resultflags(tmp); build SBIT_CZNO; Rd_GPR64xsp = zext(tmp); } # C6.2.4 ADD (immediate) page C6-1151 line 68228 MATCH x11000000/mask=x7f800000 # C6.2.8 ADDS (immediate) page C6-1159 line 68669 MATCH x31000000/mask=x7f800000 # C6.2.60 CMN (immediate) page C6-1248 line 73219 MATCH x3100001f/mask=x7f80001f # C6.2.220 MOV (to/from SP) page C6-1668 line 98876 MATCH x11000000/mask=x7ffffc00 # CONSTRUCT x91000000/mask=xdf000000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x91000000/mask=xdf000000 --status pass --comment "flags" :add^SBIT_CZNO Rd_GPR64xsp, Rn_GPR64xsp, ImmShift64 is sf=1 & b_30=0 & S & SBIT_CZNO & b_2428=0x11 & ImmShift64 & Rn_GPR64xsp & Rd_GPR64xsp { addflags(Rn_GPR64xsp, ImmShift64); Rd_GPR64xsp = Rn_GPR64xsp + ImmShift64; resultflags(Rd_GPR64xsp); build SBIT_CZNO; } # C6.2.4 ADD (immediate) page C6-1151 line 68228 MATCH x11000000/mask=x7f800000 # C6.2.8 ADDS (immediate) page C6-1159 line 68669 MATCH x31000000/mask=x7f800000 # C6.2.60 CMN (immediate) page C6-1248 line 73219 MATCH x3100001f/mask=x7f80001f # C6.2.220 MOV (to/from SP) page C6-1668 line 98876 MATCH x11000000/mask=x7ffffc00 # CONSTRUCT x11000000/mask=xdfc00000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x11000000/mask=xdfc00000 --status pass --comment "flags" :add^SBIT_CZNO Rd_GPR32wsp, Rn_GPR32wsp, Imm12_addsubimm_operand_i32_posimm_lsl0 is sf=0 & op=0 & S & SBIT_CZNO & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i32_posimm_lsl0 & Rn_GPR32wsp & Rd_GPR32wsp & Rd_GPR64xsp { tmp_2:4 = Imm12_addsubimm_operand_i32_posimm_lsl0; addflags(Rn_GPR32wsp, tmp_2); tmp_1:4 = Rn_GPR32wsp + tmp_2; resultflags(tmp_1); Rd_GPR64xsp = zext(tmp_1); build SBIT_CZNO; } # C6.2.4 ADD (immediate) page C6-1151 line 68228 MATCH x11000000/mask=x7f800000 # C6.2.8 ADDS (immediate) page C6-1159 line 68669 MATCH x31000000/mask=x7f800000 # C6.2.60 CMN (immediate) page C6-1248 line 73219 MATCH x3100001f/mask=x7f80001f # CONSTRUCT x11400000/mask=xdfc00000 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x11400000/mask=xdfc00000 --status pass --comment "flags" :add^SBIT_CZNO Rd_GPR32wsp, Rn_GPR32wsp, Imm12_addsubimm_operand_i32_posimm_lsl12 is sf=0 & op=0 & S & SBIT_CZNO & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i32_posimm_lsl12 & Rn_GPR32wsp & Rd_GPR32wsp & Rd_GPR64xsp { tmp_2:4 = Imm12_addsubimm_operand_i32_posimm_lsl12; addflags(Rn_GPR32wsp, tmp_2); tmp_1:4 = Rn_GPR32wsp + tmp_2; resultflags(tmp_1); Rd_GPR64xsp = zext(tmp_1); build SBIT_CZNO; } # C6.2.4 ADD (immediate) page C6-1151 line 68228 MATCH x11000000/mask=x7f800000 # C6.2.8 ADDS (immediate) page C6-1159 line 68669 MATCH x31000000/mask=x7f800000 # C6.2.60 CMN (immediate) page C6-1248 line 73219 MATCH x3100001f/mask=x7f80001f # C6.2.220 MOV (to/from SP) page C6-1668 line 98876 MATCH x11000000/mask=x7ffffc00 # CONSTRUCT x91000000/mask=xdfc00000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x91000000/mask=xdfc00000 --status pass --comment "flags" :add^SBIT_CZNO Rd_GPR64xsp, Rn_GPR64xsp, Imm12_addsubimm_operand_i64_posimm_lsl0 is sf=1 & op=0 & S & SBIT_CZNO & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i64_posimm_lsl0 & Rn_GPR64xsp & Rd_GPR64xsp { tmp_2:8 = Imm12_addsubimm_operand_i64_posimm_lsl0; addflags(Rn_GPR64xsp, tmp_2); tmp_1:8 = Rn_GPR64xsp + tmp_2; resultflags(tmp_1); Rd_GPR64xsp = tmp_1; build SBIT_CZNO; } # C6.2.4 ADD (immediate) page C6-1151 line 68228 MATCH x11000000/mask=x7f800000 # C6.2.8 ADDS (immediate) page C6-1159 line 68669 MATCH x31000000/mask=x7f800000 # C6.2.60 CMN (immediate) page C6-1248 line 73219 MATCH x3100001f/mask=x7f80001f # CONSTRUCT x91400000/mask=xdfc00000 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x91400000/mask=xdfc00000 --status pass --comment "flags" :add^SBIT_CZNO Rd_GPR64xsp, Rn_GPR64xsp, Imm12_addsubimm_operand_i64_posimm_lsl12 is sf=1 & op=0 & S & SBIT_CZNO & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i64_posimm_lsl12 & Rn_GPR64xsp & Rd_GPR64xsp { tmp_2:8 = Imm12_addsubimm_operand_i64_posimm_lsl12; addflags(Rn_GPR64xsp, tmp_2); tmp_1:8 = Rn_GPR64xsp + tmp_2; resultflags(tmp_1); Rd_GPR64xsp = tmp_1; build SBIT_CZNO; } # C6.2.5 ADD (shifted register) page C6-1153 line 68340 MATCH x0b000000/mask=x7f200000 # C6.2.9 ADDS (shifted register) page C6-1161 line 68775 MATCH x2b000000/mask=x7f200000 # C6.2.61 CMN (shifted register) page C6-1250 line 73309 MATCH x2b00001f/mask=x7f20001f # CONSTRUCT x0b000000/mask=xdf208000 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x0b000000/mask=xdf208000 --status pass --comment "flags" # if shift == '11' then ReservedValue(); :add^SBIT_CZNO Rd_GPR32, Rn_GPR32, RegShift32 is sf=0 & op=0 & S & SBIT_CZNO & b_2428=0xb & b_2121=0 & b_15=0 & RegShift32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = RegShift32; addflags(Rn_GPR32, tmp_2); tmp_1:4 = Rn_GPR32 + tmp_2; resultflags(tmp_1); Rd_GPR64 = zext(tmp_1); build SBIT_CZNO; } # C6.2.5 ADD (shifted register) page C6-1153 line 68340 MATCH x0b000000/mask=x7f200000 # C6.2.9 ADDS (shifted register) page C6-1161 line 68775 MATCH x2b000000/mask=x7f200000 # C6.2.61 CMN (shifted register) page C6-1250 line 73309 MATCH x2b00001f/mask=x7f20001f # CONSTRUCT x8b000000/mask=xdf200000 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x8b000000/mask=xdf200000 --status pass --comment "flags" :add^SBIT_CZNO Rd_GPR64, Rn_GPR64, RegShift64 is sf=1 & op=0 & S & SBIT_CZNO & b_2428=0xb & b_2121=0 & RegShift64 & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = RegShift64; addflags(Rn_GPR64, tmp_2); tmp_1:8 = Rn_GPR64 + tmp_2; resultflags(tmp_1); Rd_GPR64 = tmp_1; build SBIT_CZNO; } # C6.2.10 ADR page C6-1163 line 68896 MATCH x10000000/mask=x9f000000 # CONSTRUCT x10000000/mask=x9f000000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x10000000/mask=x9f000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" :adr Rd_GPR64, AdrReloff is b_31=0 & AdrReloff & b_2428=0x10 & Rd_GPR64 { Rd_GPR64 = &AdrReloff; } # C6.2.11 ADRP page C6-1164 line 68943 MATCH x90000000/mask=x9f000000 # CONSTRUCT x90000000/mask=x9f000000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x90000000/mask=x9f000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" :adrp Rd_GPR64, AdrReloff is b_31=1 & AdrReloff & b_2428=0x10 & Rd_GPR64 { Rd_GPR64 = &AdrReloff; } # C6.2.12 AND (immediate) page C6-1165 line 68992 MATCH x12000000/mask=x7f800000 # CONSTRUCT x12000000/mask=xff800000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x12000000/mask=xff800000 --status pass :and Rd_GPR32wsp, Rn_GPR32, DecodeWMask32 is sf=0 & opc=0 & b_2428=0x12 & b_2323=0 & DecodeWMask32 & Rn_GPR32 & Rd_GPR32wsp & Rd_GPR64xsp { tmp_1:4 = Rn_GPR32 & DecodeWMask32; Rd_GPR64xsp = zext(tmp_1); } # C6.2.12 AND (immediate) page C6-1165 line 68992 MATCH x12000000/mask=x7f800000 # CONSTRUCT x92000000/mask=xff800000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x92000000/mask=xff800000 --status pass :and Rd_GPR64xsp, Rn_GPR64, DecodeWMask64 is sf=1 & opc=0 & b_2428=0x12 & b_2323=0 & DecodeWMask64 & Rn_GPR64 & Rd_GPR64xsp { tmp_1:8 = Rn_GPR64 & DecodeWMask64; Rd_GPR64xsp = tmp_1; } # C6.2.13 AND (shifted register) page C6-1167 line 69083 MATCH x0a000000/mask=x7f200000 # CONSTRUCT x0a000000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x0a000000/mask=xff200000 --status pass :and Rd_GPR32, Rn_GPR32, RegShift32Log is sf=0 & opc=0 & b_2428=0xa & N=0 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = RegShift32Log; tmp_1:4 = Rn_GPR32 & tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.13 AND (shifted register) page C6-1167 line 69083 MATCH x0a000000/mask=x7f200000 # CONSTRUCT x8a000000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x8a000000/mask=xff200000 --status pass :and Rd_GPR64, Rn_GPR64, RegShift64Log is sf=1 & opc=0 & b_2428=0xa & N=0 & RegShift64Log & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = RegShift64Log; tmp_1:8 = Rn_GPR64 & tmp_2; Rd_GPR64 = tmp_1; } # C6.2.14 ANDS (immediate) page C6-1169 line 69185 MATCH x72000000/mask=x7f800000 # C6.2.382 TST (immediate) page C6-1983 line 116255 MATCH x7200001f/mask=x7f80001f # CONSTRUCT x72000000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x72000000/mask=xff800000 --status pass --comment "flags" :ands Rd_GPR32, Rn_GPR32, DecodeWMask32 is sf=0 & opc=3 & b_2428=0x12 & b_2323=0 & DecodeWMask32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_1:4 = Rn_GPR32 & DecodeWMask32; resultflags(tmp_1); Rd_GPR64 = zext(tmp_1); affectLflags(); } # C6.2.14 ANDS (immediate) page C6-1169 line 69185 MATCH x72000000/mask=x7f800000 # C6.2.382 TST (immediate) page C6-1983 line 116255 MATCH x7200001f/mask=x7f80001f # CONSTRUCT xf2000000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xf2000000/mask=xff800000 --status pass --comment "flags" :ands Rd_GPR64, Rn_GPR64, DecodeWMask64 is sf=1 & opc=3 & b_2428=0x12 & b_2323=0 & DecodeWMask64 & Rn_GPR64 & Rd_GPR64 { tmp_1:8 = Rn_GPR64 & DecodeWMask64; resultflags(tmp_1); Rd_GPR64 = tmp_1; affectLflags(); } # C6.2.15 ANDS (shifted register) page C6-1171 line 69286 MATCH x6a000000/mask=x7f200000 # C6.2.383 TST (shifted register) page C6-1984 line 116319 MATCH x6a00001f/mask=x7f20001f # CONSTRUCT x6a000000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x6a000000/mask=xff200000 --status pass --comment "flags" :ands Rd_GPR32, Rn_GPR32, RegShift32Log is sf=0 & opc=3 & b_2428=0xa & N=0 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = RegShift32Log; tmp_1:4 = Rn_GPR32 & tmp_2; resultflags(tmp_1); Rd_GPR64 = zext(tmp_1); affectLflags(); } # C6.2.15 ANDS (shifted register) page C6-1171 line 69286 MATCH x6a000000/mask=x7f200000 # C6.2.383 TST (shifted register) page C6-1984 line 116319 MATCH x6a00001f/mask=x7f20001f # CONSTRUCT xea000000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xea000000/mask=xff200000 --status pass --comment "flags" :ands Rd_GPR64, Rn_GPR64, RegShift64Log is sf=1 & opc=3 & b_2428=0xa & N=0 & RegShift64Log & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = RegShift64Log; tmp_1:8 = Rn_GPR64 & tmp_2; resultflags(tmp_1); Rd_GPR64 = tmp_1; affectLflags(); } # C6.2.16 ASR (register) page C6-1173 line 69404 MATCH x1ac02800/mask=x7fe0fc00 # C6.2.18 ASRV page C6-1177 line 69588 MATCH x1ac02800/mask=x7fe0fc00 # CONSTRUCT x1ac02800/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x1ac02800/mask=xffe0fc00 --status pass :asr Rd_GPR32, Rn_GPR32, Rm_GPR32 is sf=0 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR32 & b_1015=0xa & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = Rm_GPR32 & 0x1f; tmp_1:4 = Rn_GPR32 s>> tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.16 ASR (register) page C6-1173 line 69404 MATCH x1ac02800/mask=x7fe0fc00 # C6.2.18 ASRV page C6-1177 line 69588 MATCH x1ac02800/mask=x7fe0fc00 # CONSTRUCT x9ac02800/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9ac02800/mask=xffe0fc00 --status pass :asr Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR64 & b_1015=0xa & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = Rm_GPR64 & 0x3f; tmp_1:8 = Rn_GPR64 s>> tmp_2; Rd_GPR64 = tmp_1; } # C6.2.17 ASR (immediate) page C6-1175 line 69498 MATCH x13007c00/mask=x7f807c00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # CONSTRUCT x13007c00/mask=xffe0fc02 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x13007c00/mask=xffe0fc02 --status pass # Alias for sbfm when imms == '011111' # imms is MAX_INT5, so it will never be less than immr. Note that immr is limited to [0,31] # Ha! Two explicit cases passes -l # if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); :asr Rd_GPR32, Rn_GPR32, ImmRConst32 is ImmS=0x1f & ImmS_LT_ImmR=0 & (ImmS_EQ_ImmR=0 | ImmS_EQ_ImmR=1) & sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = ImmRConst32; tmp_1:4 = Rn_GPR32 s>> tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.17 ASR (immediate) page C6-1175 line 69498 MATCH x13007c00/mask=x7f807c00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # CONSTRUCT x9340fc00/mask=xffc0fc02 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x9340fc00/mask=xffc0fc02 --status pass # Alias for sbfm when imms == '111111' # imms is MAX_INT6, so it will never be less than immr (6-bit field) # Ha! Two explicit cases passes -l :asr Rd_GPR64, Rn_GPR64, ImmRConst64 is ImmS=0x3f & ImmS_LT_ImmR=0 & (ImmS_EQ_ImmR=0 | ImmS_EQ_ImmR=1) & sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & ImmRConst64 & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = ImmRConst64; tmp_1:8 = Rn_GPR64 s>> tmp_2; Rd_GPR64 = tmp_1; } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087800/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd5087800/mask=xffffffe0 --status noqemu :at "S1E1R", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b000 & Rt_GPR64 { par_el1 = AT_S1E1R(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c7800/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd50c7800/mask=xffffffe0 --status noqemu :at "S1E2R", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b000 & Rt_GPR64 { par_el1 = AT_S1E2R(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50e7800/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd50e7800/mask=xffffffe0 --status noqemu :at "S1E3R", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b000 & Rt_GPR64 { par_el1 = AT_S1E3R(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087820/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd5087820/mask=xffffffe0 --status noqemu :at "S1E1W", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b001 & Rt_GPR64 { par_el1 = AT_S1E1W(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c7820/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd50c7820/mask=xffffffe0 --status noqemu :at "S1E2W", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b001 & Rt_GPR64 { par_el1 = AT_S1E2W(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50e7820/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd50e7820/mask=xffffffe0 --status noqemu :at "S1E3W", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b001 & Rt_GPR64 { par_el1 = AT_S1E3W(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087840/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd5087840/mask=xffffffe0 --status noqemu :at "S1E0R", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b010 & Rt_GPR64 { par_el1 = AT_S1E0R(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087860/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd5087860/mask=xffffffe0 --status noqemu :at "S1E0W", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b011 & Rt_GPR64 { par_el1 = AT_S1E0W(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c7880/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd50c7880/mask=xffffffe0 --status noqemu :at "S12E1R", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b100 & Rt_GPR64 { par_el1 = AT_S12E1R(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c78a0/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd50c78a0/mask=xffffffe0 --status noqemu :at "S12E1W", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b101 & Rt_GPR64 { par_el1 = AT_S12E1W(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c78c0/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd50c78c0/mask=xffffffe0 --status noqemu :at "S12E0R", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b110 & Rt_GPR64 { par_el1 = AT_S12E0R(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c78e0/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd50c78e0/mask=xffffffe0 --status noqemu :at "S12E0W", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b0111 & b_0811=0b1000 & b_0507=0b111 & Rt_GPR64 { par_el1 = AT_S12E0W(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087900/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd5087900/mask=xffffffe0 --status noqemu :at "S1E1RP", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1001 & b_0507=0b000 & Rt_GPR64 { par_el1 = AT_S1E1RP(Rt_GPR64); } # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087920/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xd5087920/mask=xffffffe0 --status noqemu :at "S1E1WP", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1001 & b_0507=0b001 & Rt_GPR64 { par_el1 = AT_S1E1WP(Rt_GPR64); } # C6.2.20 AUTDA, AUTDZA page C6-1181 line 69758 MATCH xdac11800/mask=xffffdc00 # CONSTRUCT xdac11800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac11800/mask=xfffffc00 --status noqemu :autda Rd_GPR64, Rn_GPR64xsp is autda__PACpart & b_1431=0b110110101100000100 & b_1012=0b110 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 { build autda__PACpart; } # C6.2.20 AUTDA, AUTDZA page C6-1181 line 69758 MATCH xdac11800/mask=xffffdc00 # CONSTRUCT xdac13be0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac13be0/mask=xffffffe0 --status noqemu :autdza Rd_GPR64 is autdza__PACpart & b_1431=0b110110101100000100 & b_1012=0b110 & b_13=1 & b_0509=0b11111 & Rd_GPR64 { build autdza__PACpart; } # C6.2.21 AUTDB, AUTDZB page C6-1182 line 69833 MATCH xdac11c00/mask=xffffdc00 # CONSTRUCT xdac11c00/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac11c00/mask=xfffffc00 --status noqemu :autdb Rd_GPR64, Rn_GPR64xsp is autdb__PACpart & b_1431=0b110110101100000100 & b_1012=0b111 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 { build autdb__PACpart; } # C6.2.21 AUTDB, AUTDZB page C6-1182 line 69833 MATCH xdac11c00/mask=xffffdc00 # CONSTRUCT xdac13fe0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac13fe0/mask=xffffffe0 --status noqemu :autdzb Rd_GPR64 is autdzb__PACpart & b_1431=0b110110101100000100 & b_1012=0b111 & b_13=1 & b_0509=0b11111 & Rd_GPR64 { build autdzb__PACpart; } # C6.2.22 AUTIA, AUTIA1716, AUTIASP, AUTIAZ, AUTIZA page C6-1183 line 69908 MATCH xdac11000/mask=xffffdc00 # CONSTRUCT xdac11000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac11000/mask=xfffffc00 --status noqemu :autia Rd_GPR64, Rn_GPR64xsp is autia__PACpart & b_1431=0b110110101100000100 & b_1012=0b100 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 { build autia__PACpart; } # C6.2.22 AUTIA, AUTIA1716, AUTIASP, AUTIAZ, AUTIZA page C6-1183 line 69908 MATCH xdac11000/mask=xffffdc00 # CONSTRUCT xdac133e0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac133e0/mask=xffffffe0 --status noqemu :autiza Rd_GPR64 is autiza__PACpart & b_1431=0b110110101100000100 & b_1012=0b100 & b_13=1 & b_0509=0b11111 & Rd_GPR64 { build autiza__PACpart; } # C6.2.22 AUTIA, AUTIA1716, AUTIASP, AUTIAZ, AUTIZA page C6-1183 line 69908 MATCH xd503219f/mask=xfffffddf # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503219f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503219f/mask=xffffffff --status nodest :autia1716 is autia1716__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0001 & b_0507=0b100 & b_0004=0b11111 { build autia1716__PACpart; } # C6.2.22 AUTIA, AUTIA1716, AUTIASP, AUTIAZ, AUTIZA page C6-1183 line 69908 MATCH xd503219f/mask=xfffffddf # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd50323bf/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50323bf/mask=xffffffff --status nodest :autiasp is autiasp__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b101 & b_0004=0b11111 { build autiasp__PACpart; } # C6.2.22 AUTIA, AUTIA1716, AUTIASP, AUTIAZ, AUTIZA page C6-1183 line 69908 MATCH xd503219f/mask=xfffffddf # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503239f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503239f/mask=xffffffff --status nodest :autiaz is autiaz__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b100 & b_0004=0b11111 { build autiaz__PACpart; } # C6.2.23 AUTIB, AUTIB1716, AUTIBSP, AUTIBZ, AUTIZB page C6-1186 line 70065 MATCH xdac11400/mask=xffffdc00 # CONSTRUCT xdac11400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac11400/mask=xfffffc00 --status noqemu :autib Rd_GPR64, Rn_GPR64xsp is autib__PACpart & b_1431=0b110110101100000100 & b_1012=0b101 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 { build autib__PACpart; } # C6.2.23 AUTIB, AUTIB1716, AUTIBSP, AUTIBZ, AUTIZB page C6-1186 line 70065 MATCH xdac11400/mask=xffffdc00 # CONSTRUCT xdac137e0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac137e0/mask=xffffffe0 --status noqemu :autizb Rd_GPR64 is autizb__PACpart & b_1431=0b110110101100000100 & b_1012=0b101 & b_13=1 & b_0509=0b11111 & Rd_GPR64 { build autizb__PACpart; } # C6.2.23 AUTIB, AUTIB1716, AUTIBSP, AUTIBZ, AUTIZB page C6-1186 line 70065 MATCH xd50321df/mask=xfffffddf # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd50321df/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50321df/mask=xffffffff --status nodest :autib1716 is autib1716__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0001 & b_0507=0b110 & b_0004=0b11111 { build autib1716__PACpart; } # C6.2.23 AUTIB, AUTIB1716, AUTIBSP, AUTIBZ, AUTIZB page C6-1186 line 70065 MATCH xd50321df/mask=xfffffddf # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd50323ff/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50323ff/mask=xffffffff --status nodest :autibsp is autibsp__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b111 & b_0004=0b11111 { build autibsp__PACpart; } # C6.2.23 AUTIB, AUTIB1716, AUTIBSP, AUTIBZ, AUTIZB page C6-1186 line 70065 MATCH xd50321df/mask=xfffffddf # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd50323df/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50323df/mask=xffffffff --status nodest :autibz is autibz__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b110 & b_0004=0b11111 { build autibz__PACpart; } # C6.2.26 B.cond page C6-1191 line 70305 MATCH x54000000/mask=xff000010 # CONSTRUCT x5400000f/mask=xff00001f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x5400000f/mask=xff00001f --status nodest --comment "noflags qemuerr(illegal addresses cause qemu exit)" :b^"."^BranchCondOp Addr19 is b_2531=0x2a & o1=0 & Addr19 & o0=0 & br_cond_op=15 & BranchCondOp { goto Addr19; } # C6.2.26 B.cond page C6-1191 line 70305 MATCH x54000000/mask=xff000010 # CONSTRUCT x54000000/mask=xff000010 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x54000000/mask=xff000010 --status nodest --comment "flags qemuerr(illegal addresses cause qemu exit)" :b^"."^BranchCondOp Addr19 is b_2531=0x2a & o1=0 & Addr19 & o0=0 & br_cond_op & BranchCondOp { if (BranchCondOp) goto Addr19; } # C6.2.25 B page C6-1190 line 70265 MATCH x14000000/mask=xfc000000 # CONSTRUCT x14000000/mask=xfc000000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x14000000/mask=xfc000000 --status nodest --comment "flags qemuerr(illegal addresses cause qemu exit)" :b Addr26 is b_31=0 & b_2630=0x05 & Addr26 { goto Addr26; } # C6.2.27 BC.cond page C6-1192 line 70348 MATCH x54000010/mask=xff000010 # CONSTRUCT x54000010/mask=xff000010 MATCHED 1 DOCUMENTED OPCODES # b_0031=01010100...................1.... :bc^"."^BranchCondOp Addr19 is b_2531=0x2a & o1=0 & Addr19 & o0=1 & br_cond_op=15 & BranchCondOp { goto Addr19; } :bc^"."^BranchCondOp Addr19 is b_2531=0x2a & o1=0 & Addr19 & o0=1 & BranchCondOp { if (BranchCondOp) goto Addr19; } # C6.2.30 BFM page C6-1197 line 70576 MATCH x33000000/mask=x7f800000 # C6.2.28 BFC page C6-1193 line 70394 MATCH x330003e0/mask=x7f8003e0 # C6.2.29 BFI page C6-1195 line 70484 MATCH x33000000/mask=x7f800000 # C6.2.31 BFXIL page C6-1199 line 70700 MATCH x33000000/mask=x7f800000 # CONSTRUCT x33000000/mask=xffe08000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x33000000/mask=xffe08000 --status pass # if sf == '0' && (N != '0' || immr<5> (b_21) != '0' || imms<5> (b_15) != '0') then ReservedValue(); :bfm Rd_GPR32, Rn_GPR32, ImmR_bitfield32_imm, ImmS_bitfield32_imm is sf=0 & opc=1 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmR_bitfield32_imm & ImmS_bitfield32_imm & ImmRConst32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 & DecodeWMask32 & DecodeTMask32 { local wmask:4 = DecodeWMask32; local tmask:4 = DecodeTMask32; local dst:4 = Rd_GPR32; local src:4 = Rn_GPR32; local bot:4 = (dst & ~(wmask)) | (((src>>ImmRConst32)|(src<<(32-ImmRConst32))) & wmask); Rd_GPR64 = zext((dst & ~(tmask)) | (bot & tmask)); } # C6.2.30 BFM page C6-1197 line 70576 MATCH x33000000/mask=x7f800000 # C6.2.28 BFC page C6-1193 line 70394 MATCH x330003e0/mask=x7f8003e0 # C6.2.29 BFI page C6-1195 line 70484 MATCH x33000000/mask=x7f800000 # C6.2.31 BFXIL page C6-1199 line 70700 MATCH x33000000/mask=x7f800000 # CONSTRUCT xb3400002/mask=xffc00002 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xb3400002/mask=xffc00002 --status pass :bfm Rd_GPR64, Rn_GPR64, ImmR_bitfield64_imm, ImmS_bitfield64_imm is ImmS_LT_ImmR=1 & sf=1 & opc=1 & b_2428=0x13 & b_2323=0 & n=1 & ImmR_bitfield64_imm & ImmRConst64 & ImmS_bitfield64_imm & Rn_GPR64 & Rd_GPR64 & DecodeWMask64 & DecodeTMask64 { local wmask:8 = DecodeWMask64; local tmask:8 = DecodeTMask64; local dst:8 = Rd_GPR64; local src:8 = Rn_GPR64; local bot:8 = (dst & ~(wmask)) | (((src>>ImmRConst64)|(src<<(64-ImmRConst64))) & wmask); Rd_GPR64 = (dst & ~(tmask)) | (bot & tmask); } # C6.2.28 BFXIL page C6-567 line 33333 KEEPWITH BFextractWidth32: "#"^imm is ImmR & ImmS [ imm = ImmS - ImmR + 1; ] { export *[const]:4 imm; } BFextractWidth64: "#"^imm is ImmR & ImmS [ imm = ImmS - ImmR + 1; ] { export *[const]:8 imm; } # C6.2.31 BFXIL page C6-1199 line 70700 MATCH x33000000/mask=x7f800000 # C6.2.28 BFC page C6-1193 line 70394 MATCH x330003e0/mask=x7f8003e0 # C6.2.29 BFI page C6-1195 line 70484 MATCH x33000000/mask=x7f800000 # C6.2.30 BFM page C6-1197 line 70576 MATCH x33000000/mask=x7f800000 # CONSTRUCT x33000000/mask=xffe08002 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x33000000/mask=xffe08002 --status pass # Alias for bfm where UInt(imms) >= UInt(immr) :bfxil Rd_GPR32, Rn_GPR32, ImmRConst32, BFextractWidth32 is ImmS_LT_ImmR=0 & sf=0 & opc=1 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & BFextractWidth32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { dst:4 = Rd_GPR32; src:4 = Rn_GPR32; mask:4 = (0xffffffff >> (32 - BFextractWidth32)); tmp:4 = (src >> ImmRConst32) & mask; Rd_GPR64 = zext((dst & ~(mask)) | tmp); } # C6.2.31 BFXIL page C6-1199 line 70700 MATCH x33000000/mask=x7f800000 # C6.2.28 BFC page C6-1193 line 70394 MATCH x330003e0/mask=x7f8003e0 # C6.2.29 BFI page C6-1195 line 70484 MATCH x33000000/mask=x7f800000 # C6.2.30 BFM page C6-1197 line 70576 MATCH x33000000/mask=x7f800000 # CONSTRUCT xb3400000/mask=xffc00002 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xb3400000/mask=xffc00002 --status pass # Alias for bfm where UInt(imms) >= UInt(immr) :bfxil Rd_GPR64, Rn_GPR64, ImmRConst64, BFextractWidth64 is ImmS_LT_ImmR=0 & sf=1 & opc=1 & b_2428=0x13 & b_2323=0 & n=1 & ImmRConst64 & BFextractWidth64 & Rn_GPR64 & Rd_GPR64 { dst:8 = Rd_GPR64; src:8 = Rn_GPR64; mask:8 = (0xffffffffffffffff >> (64 - BFextractWidth64)); tmp:8 = (src >> ImmRConst64) & mask; Rd_GPR64 = ((dst & ~(mask)) | tmp); } # C6.2.32 BIC (shifted register) page C6-1201 line 70793 MATCH x0a200000/mask=x7f200000 # CONSTRUCT x0a200000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x0a200000/mask=xff200000 --status pass :bic Rd_GPR32, Rn_GPR32, RegShift32Log is sf=0 & opc=0 & b_2428=0xa & N=1 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_3:4 = RegShift32Log; tmp_2:4 = tmp_3 ^ -1:4; tmp_1:4 = Rn_GPR32 & tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.32 BIC (shifted register) page C6-1201 line 70793 MATCH x0a200000/mask=x7f200000 # CONSTRUCT x8a200000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x8a200000/mask=xff200000 --status pass :bic Rd_GPR64, Rn_GPR64, RegShift64Log is sf=1 & opc=0 & b_2428=0xa & N=1 & RegShift64Log & Rn_GPR64 & Rd_GPR64 { tmp_3:8= RegShift64Log; tmp_2:8 = tmp_3 ^ -1:8; tmp_1:8 = Rn_GPR64 & tmp_2; Rd_GPR64 = tmp_1; } # C6.2.33 BICS (shifted register) page C6-1203 line 70897 MATCH x6a200000/mask=x7f200000 # CONSTRUCT x6a200000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x6a200000/mask=xff200000 --status pass --comment "flags" :bics Rd_GPR32, Rn_GPR32, RegShift32Log is sf=0 & opc=3 & b_2428=0xa & N=1 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_3:4 = RegShift32Log; tmp_2:4 = tmp_3 ^ -1:4; tmp_1:4 = Rn_GPR32 & tmp_2; resultflags(tmp_1); Rd_GPR64 = zext(tmp_1); affectLflags(); } # C6.2.33 BICS (shifted register) page C6-1203 line 70897 MATCH x6a200000/mask=x7f200000 # CONSTRUCT xea200000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xea200000/mask=xff200000 --status pass --comment "flags" :bics Rd_GPR64, Rn_GPR64, RegShift64Log is sf=1 & opc=3 & b_2428=0xa & N=1 & RegShift64Log & Rn_GPR64 & Rd_GPR64 { tmp_3:8= RegShift64Log; tmp_2:8 = tmp_3 ^ -1:8; tmp_1:8 = Rn_GPR64 & tmp_2; resultflags(tmp_1); Rd_GPR64 = tmp_1; affectLflags(); } # C6.2.34 BL page C6-1205 line 71008 MATCH x94000000/mask=xfc000000 # CONSTRUCT x94000000/mask=xfc000000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x94000000/mask=xfc000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" :bl Addr26 is b_31=1 & b_2630=0x05 & Addr26 { x30 = inst_start + 4; call Addr26; } # C6.2.35 BLR page C6-1206 line 71050 MATCH xd63f0000/mask=xfffffc1f # CONSTRUCT xd63f0000/mask=xfffffc1f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd63f0000/mask=xfffffc1f --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" :blr Rn_GPR64 is b_2531=0x6b & b_2324=0 & b_2122=1 & b_1620=0x1f & b_1015=0 & Rn_GPR64 & b_0004=0 { pc = Rn_GPR64; x30 = inst_start + 4; call [pc]; } # C6.2.33 BLRAA, BLRAAZ, BLRAB, BLRABZ page C6-574 line 33668 KEEPWITH # Z == 0 && M == 0 && Rm = 11111 Key A, zero modifier variant blinkop: "l" is b_2122=0b01 { x30 = inst_start + 4; call [pc]; } blinkop: "" is b_2122=0b00 { goto[pc]; } # C6.2.36 BLRAA, BLRAAZ, BLRAB, BLRABZ page C6-1207 line 71095 MATCH xd63f0800/mask=xfefff800 # C6.2.38 BRAA, BRAAZ, BRAB, BRABZ page C6-1210 line 71251 MATCH xd61f0800/mask=xfefff800 # C6.2.255 RETAA, RETAB page C6-1731 line 102135 MATCH xd65f0bff/mask=xfffffbff # CONSTRUCT xd61f081f/mask=xff9ffc1f MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd61f081f/mask=xff9ffc1f --status nodest :b^blinkop^"raaz" Rn_GPR64 is b_blinkop__raaz___PACpart & b_2531=0b1101011 & b_24=0 & b_23=0 & blinkop & b_1220=0b111110000 & b_11=1 & b_10=0 & b_0004=0b11111 & Rn_GPR64 { build b_blinkop__raaz___PACpart; pc = Rn_GPR64; build blinkop; } # C6.2.36 BLRAA, BLRAAZ, BLRAB, BLRABZ page C6-1207 line 71095 MATCH xd63f0800/mask=xfefff800 # C6.2.38 BRAA, BRAAZ, BRAB, BRABZ page C6-1210 line 71251 MATCH xd61f0800/mask=xfefff800 # CONSTRUCT xd71f0800/mask=xff9ffc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd71f0800/mask=xff9ffc00 --status nodest # Z == 1 && M == 0 Key A, register modifier variant :b^blinkop^"raa" Rn_GPR64, Rd_GPR64xsp is b_blinkop__raa___PACpart & b_2531=0b1101011 & b_24=1 & b_23=0 & blinkop & b_1220=0b111110000 & b_11=1 & b_10=0 & Rd_GPR64xsp & Rn_GPR64 { build b_blinkop__raa___PACpart; pc = Rn_GPR64; build blinkop; } # C6.2.36 BLRAA, BLRAAZ, BLRAB, BLRABZ page C6-1207 line 71095 MATCH xd63f0800/mask=xfefff800 # C6.2.38 BRAA, BRAAZ, BRAB, BRABZ page C6-1210 line 71251 MATCH xd61f0800/mask=xfefff800 # C6.2.255 RETAA, RETAB page C6-1731 line 102135 MATCH xd65f0bff/mask=xfffffbff # CONSTRUCT xd61f0c1f/mask=xff9ffc1f MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd61f0c1f/mask=xff9ffc1f --status nodest # Z == 0 && M == 1 && Rm = 11111 Key B, zero modifier variant :b^blinkop^"rabz" Rn_GPR64 is b_blinkop__rabz___PACpart & b_2531=0b1101011 & b_24=0 & b_23=0 & blinkop & b_1220=0b111110000 & b_11=1 & b_10=1 & b_0004=0b11111 & Rn_GPR64 { build b_blinkop__rabz___PACpart; pc = Rn_GPR64; build blinkop; } # C6.2.36 BLRAA, BLRAAZ, BLRAB, BLRABZ page C6-1207 line 71095 MATCH xd63f0800/mask=xfefff800 # C6.2.38 BRAA, BRAAZ, BRAB, BRABZ page C6-1210 line 71251 MATCH xd61f0800/mask=xfefff800 # CONSTRUCT xd71f0c00/mask=xff9ffc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd71f0c00/mask=xff9ffc00 --status nodest # Z == 1 && M == 1 Key B, register modifier variant :b^blinkop^"rab" Rn_GPR64, Rd_GPR64xsp is b_blinkop__rab___PACpart & b_2531=0b1101011 & b_24=1 & b_23=0 & blinkop & b_1220=0b111110000 & b_11=1 & b_10=1 & Rd_GPR64xsp & Rn_GPR64 { build b_blinkop__rab___PACpart; pc = Rn_GPR64; build blinkop; } # C6.2.37 BR page C6-1209 line 71202 MATCH xd61f0000/mask=xfffffc1f # CONSTRUCT xd61f0000/mask=xfffffc1f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd61f0000/mask=xfffffc1f --status nodest :br Rn_GPR64 is b_2531=0x6b & b_2324=0 & b_2122=0 & b_1620=0x1f & b_1015=0 & Rn_GPR64 & b_0004=0 { pc = Rn_GPR64; goto [pc]; } # C6.2.40 BRK page C6-1213 line 71415 MATCH xd4200000/mask=xffe0001f # CONSTRUCT xd4200000/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd4200000/mask=xffe0001f --status nodest :brk "#"^imm16 is ALL_BTITARGETS & b_2431=0xd4 & b_2123=1 & imm16 & b_0204=0 & b_0001=0 { tmp:2 = imm16; preferred_exception_return:8 = inst_next; pc = SoftwareBreakpoint(tmp, preferred_exception_return); goto [pc]; } # C6.2.37 CASB, CASAB, CASALB, CASLB page C6-580 line 33952 KEEPWITH cas_var: "a" is b_22=1 & b_15=0 { } cas_var: "al" is b_22=1 & b_15=1 { } cas_var: "" is b_22=0 & b_15=0 { } cas_var: "l" is b_22=0 & b_15=1 { } # C6.2.42 CASB, CASAB, CASALB, CASLB page C6-1216 line 71570 MATCH x08a07c00/mask=xffa07c00 # CONSTRUCT x08a07c00/mask=xffa07c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x08a07c00/mask=xffa07c00 --status nomem # CAS{,A,AL,L}B size=0b10 (b_3031) :cas^cas_var^"b" aa_Ws, aa_Wt, [Rn_GPR64xsp] is b_3031=0b00 & b_2329=0b0010001 & b_21=1 & b_1014=0b11111 & cas_var & aa_Wt & Rn_GPR64xsp & aa_Ws { comparevalue:1 = aa_Ws:1; newvalue:1 = aa_Wt:1; data:1 = *:1 Rn_GPR64xsp; if (data != comparevalue) goto ; *:1 Rn_GPR64xsp = newvalue; aa_Ws = zext(data); } # C6.2.43 CASH, CASAH, CASALH, CASLH page C6-1218 line 71692 MATCH x48a07c00/mask=xffa07c00 # CONSTRUCT x48a07c00/mask=xffa07c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x48a07c00/mask=xffa07c00 --status nomem # CAS{,A,AL,L}H size=0b10 (b_3031) :cas^cas_var^"h" aa_Ws, aa_Wt, [Rn_GPR64xsp] is b_3031=0b01 & b_2329=0b0010001 & b_21=1 & b_1014=0b11111 & cas_var & aa_Wt & Rn_GPR64xsp & aa_Ws { comparevalue:2 = aa_Ws:2; newvalue:2 = aa_Wt:2; data:2 = *:2 Rn_GPR64xsp; if (data != comparevalue) goto ; *:2 Rn_GPR64xsp = newvalue; aa_Ws = zext(data); } # C6.2.44 CASP, CASPA, CASPAL, CASPL page C6-1220 line 71814 MATCH x08207c00/mask=xbfa07c00 # CONSTRUCT x08207c00/mask=xffa17c01 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x08207c00/mask=xffa17c01 --status nomem # CASP{,A,AL,L} size=0b00 (b_3031) :casp^cas_var aa_Ws, aa_Wss, aa_Wt, aa_Wtt, [Rn_GPR64xsp] is b_3031=0b00 & b_2329=0b0010000 & b_21=1 & b_1014=0b11111 & b_16=0 & b_00=0 & cas_var & aa_Ws & aa_Wss & aa_Wt & aa_Wtt & Rn_GPR64xsp { @if DATA_ENDIAN == "big" comparevalue:8 = (zext(aa_Ws) << 32) | zext(aa_Wss); newvalue:8 = (zext(aa_Wt) << 32) | zext(aa_Wtt); @else comparevalue:8 = (zext(aa_Wss) << 32) | zext(aa_Ws); newvalue:8 = (zext(aa_Wtt) << 32) | zext(aa_Wt); @endif data:8 = *:8 Rn_GPR64xsp; if (data != comparevalue) goto ; *:8 Rn_GPR64xsp = newvalue; @if DATA_ENDIAN == "big" aa_Ws = data(4); aa_Wss = data:4; @else aa_Ws = data:4; aa_Wss = data(4); @endif } # C6.2.44 CASP, CASPA, CASPAL, CASPL page C6-1220 line 71814 MATCH x08207c00/mask=xbfa07c00 # CONSTRUCT x48207c00/mask=xffa17c01 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x48207c00/mask=xffa17c01 --status nomem # CASP{,A,AL,L} size=0b01 (b_3031) :casp^cas_var aa_Xs, aa_Xss, aa_Xt, aa_Xtt, [Rn_GPR64xsp] is b_3031=0b01 & b_2329=0b0010000 & b_21=1 & b_1014=0b11111 & b_16=0 & b_00=0 & cas_var & aa_Xs & aa_Xss & aa_Xt & aa_Xtt & Rn_GPR64xsp { local tmp_s:8 = aa_Xs; local tmp_ss:8 = aa_Xss; local tmp_t:8 = aa_Xt; local tmp_tt:8 = aa_Xtt; @if DATA_ENDIAN == "little" # for little endian, swap Xss/Xs and Xtt/Xt tmp_s = aa_Xss; tmp_ss = aa_Xs; tmp_t = aa_Xtt; tmp_tt = aa_Xt; @endif local tmp_addr:8 = Rn_GPR64xsp; local tmp_d:8 = *:8 tmp_addr; tmp_addr = tmp_addr + 8; local tmp_dd:8 = *:8 tmp_addr; if (tmp_d != tmp_s) goto ; if (tmp_dd != tmp_ss) goto ; tmp_addr = Rn_GPR64xsp; *:8 tmp_addr = tmp_t; tmp_addr = tmp_addr + 8; *:8 tmp_addr = tmp_tt; aa_Xs = tmp_d; aa_Xss = tmp_dd; } # C6.2.45 CAS, CASA, CASAL, CASL page C6-1223 line 71996 MATCH x88a07c00/mask=xbfa07c00 # CONSTRUCT x88a07c00/mask=xffa07c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88a07c00/mask=xffa07c00 --status nomem # CAS{,A,AL,L} size=0b10 (b_3031) :cas^cas_var aa_Ws, aa_Wt, [Rn_GPR64xsp] is b_3031=0b10 & b_2329=0b0010001 & b_21=1 & b_1014=0b11111 & cas_var & aa_Wt & Rn_GPR64xsp & aa_Ws { comparevalue:4 = aa_Ws; newvalue:4 = aa_Wt; data:4 = *:4 Rn_GPR64xsp; if (data != comparevalue) goto ; *:4 Rn_GPR64xsp = newvalue; aa_Ws = data; } # C6.2.45 CAS, CASA, CASAL, CASL page C6-1223 line 71996 MATCH x88a07c00/mask=xbfa07c00 # CONSTRUCT xc8a07c00/mask=xffa07c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8a07c00/mask=xffa07c00 --status nomem # CAS{,A,AL,L} size=0b11 (b_3031) :cas^cas_var aa_Xs, aa_Xt, [Rn_GPR64xsp] is b_3031=0b11 & b_2329=0b0010001 & b_21=1 & b_1014=0b11111 & cas_var & aa_Xt & Rn_GPR64xsp & aa_Xs { comparevalue:8 = aa_Xs; newvalue:8 = aa_Xt; data:8 = *:8 Rn_GPR64xsp; if (data != comparevalue) goto ; *:8 Rn_GPR64xsp = newvalue; aa_Xs = data; } # C6.2.41 CBNZ page C6-589 line 34530 KEEPWITH ZeroOp: "z" is cmpr_op=0 { export 1:1; } ZeroOp: "nz" is cmpr_op=1 { export 0:1; } BitPos: "#"^bitpos is sf=1 & b_31 & b_1923 & Rt_GPR64 [ bitpos = b_31 << 5 | b_1923; ] { tmp:1 = ((Rt_GPR64 >> bitpos) & 1) == 0; export tmp; } BitPos: "#"^bitpos is sf=0 & b_31 & b_1923 & Rt_GPR32 [ bitpos = b_31 << 5 | b_1923; ] { tmp:1 = ((Rt_GPR32 >> bitpos) & 1) == 0; export tmp; } # C6.2.46 CBNZ page C6-1226 line 72159 MATCH x35000000/mask=x7f000000 # C6.2.47 CBZ page C6-1227 line 72216 MATCH x34000000/mask=x7f000000 # CONSTRUCT xb4000000/mask=xfe000000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xb4000000/mask=xfe000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" :cb^ZeroOp Rd_GPR64, Addr19 is sf=1 & b_2530=0x1a & ZeroOp & Addr19 & Rd_GPR64 { tmp:1 = Rd_GPR64 == 0; if (tmp == ZeroOp) goto Addr19; } # C6.2.46 CBNZ page C6-1226 line 72159 MATCH x35000000/mask=x7f000000 # C6.2.47 CBZ page C6-1227 line 72216 MATCH x34000000/mask=x7f000000 # CONSTRUCT x34000000/mask=xfe000000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x34000000/mask=xfe000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" :cb^ZeroOp Rd_GPR32, Addr19 is sf=0 & b_2530=0x1a & ZeroOp & Addr19 & Rd_GPR32 { tmp:1 = Rd_GPR32 == 0; if (tmp == ZeroOp) goto Addr19; } # C6.2.46 CBNZ page C6-1226 line 72159 MATCH x35000000/mask=x7f000000 # CONSTRUCT x35000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x35000000/mask=xff000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" :cbnz Rt_GPR32, Addr19 is sf=0 & b_2530=0x1a & cmpr_op=1 & Addr19 & Rt_GPR32 { if (Rt_GPR32 != 0) goto Addr19; } # C6.2.46 CBNZ page C6-1226 line 72159 MATCH x35000000/mask=x7f000000 # CONSTRUCT xb5000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xb5000000/mask=xff000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" :cbnz Rt_GPR64, Addr19 is sf=1 & b_2530=0x1a & cmpr_op=1 & Addr19 & Rt_GPR64 { if (Rt_GPR64 != 0) goto Addr19; } # C6.2.47 CBZ page C6-1227 line 72216 MATCH x34000000/mask=x7f000000 # CONSTRUCT x34000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x34000000/mask=xff000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" :cbz Rt_GPR32, Addr19 is sf=0 & b_2530=0x1a & cmpr_op=0 & Addr19 & Rt_GPR32 { if (Rt_GPR32 == 0) goto Addr19; } # C6.2.47 CBZ page C6-1227 line 72216 MATCH x34000000/mask=x7f000000 # CONSTRUCT xb4000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xb4000000/mask=xff000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" :cbz Rt_GPR64, Addr19 is sf=1 & b_2530=0x1a & cmpr_op=0 & Addr19 & Rt_GPR64 { if (Rt_GPR64 == 0) goto Addr19; } # C6.2.48 CCMN (immediate) page C6-1228 line 72273 MATCH x3a400800/mask=x7fe00c10 # CONSTRUCT x3a400800/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x3a400800/mask=xffe00c10 --status pass --comment "flags" :ccmn Rn_GPR32, UImm5, NZCVImm_uimm4, CondOp is sf=0 & op=0 & s=1 & b_2428=0x1a & b_2123=2 & UImm5 & CondOp & b_1111=1 & o2=0 & Rn_GPR32 & o3=0 & NZCVImm_uimm4 { condition:1 = CondOp; condMask:1 = NZCVImm_uimm4; setCC_NZCV(condMask); if (!condition) goto inst_next; tmp:4 = UImm5; addflags(Rn_GPR32, tmp); result:4 = Rn_GPR32 + tmp; resultflags(result); affectflags(); } # C6.2.48 CCMN (immediate) page C6-1228 line 72273 MATCH x3a400800/mask=x7fe00c10 # CONSTRUCT xba400800/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xba400800/mask=xffe00c10 --status pass --comment "flags" :ccmn Rn_GPR64, UImm5, NZCVImm_uimm4, CondOp is sf=1 & op=0 & s=1 & b_2428=0x1a & b_2123=2 & UImm5 & CondOp & b_1111=1 & o2=0 & Rn_GPR64 & o3=0 & NZCVImm_uimm4 { condition:1 = CondOp; condMask:1 = NZCVImm_uimm4; setCC_NZCV(condMask); if (!condition) goto inst_next; tmp:8 = zext(UImm5); addflags(Rn_GPR64, tmp); result:8 = Rn_GPR64 + tmp; resultflags(result); affectflags(); } # C6.2.49 CCMN (register) page C6-1230 line 72358 MATCH x3a400000/mask=x7fe00c10 # CONSTRUCT x3a400000/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x3a400000/mask=xffe00c10 --status pass --comment "flags" :ccmn Rn_GPR32, Rm_GPR32, NZCVImm_uimm4, CondOp is sf=0 & op=0 & s=1 & b_2428=0x1a & b_2123=2 & Rm_GPR32 & CondOp & b_1111=0 & o2=0 & Rn_GPR32 & o3=0 & NZCVImm_uimm4 { condition:1 = CondOp; condMask:1 = NZCVImm_uimm4; setCC_NZCV(condMask); if (!condition) goto inst_next; tmp:4 = Rm_GPR32; addflags(Rn_GPR32, tmp); result:4 = Rn_GPR32 + tmp; resultflags(result); affectflags(); } # C6.2.49 CCMN (register) page C6-1230 line 72358 MATCH x3a400000/mask=x7fe00c10 # CONSTRUCT xba400000/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xba400000/mask=xffe00c10 --status pass --comment "flags" :ccmn Rn_GPR64, Rm_GPR64, NZCVImm_uimm4, CondOp is sf=1 & op=0 & s=1 & b_2428=0x1a & b_2123=2 & Rm_GPR64 & CondOp & b_1111=0 & o2=0 & Rn_GPR64 & o3=0 & NZCVImm_uimm4 { condition:1 = CondOp; condMask:1 = NZCVImm_uimm4; setCC_NZCV(condMask); if (!condition) goto inst_next; tmp:8 = Rm_GPR64; addflags(Rn_GPR64, tmp); result:8 = Rn_GPR64 + tmp; resultflags(result); affectflags(); } # C6.2.50 CCMP (immediate) page C6-1232 line 72446 MATCH x7a400800/mask=x7fe00c10 # CONSTRUCT x7a400800/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x7a400800/mask=xffe00c10 --status pass --comment "flags" :ccmp Rn_GPR32, UImm5, NZCVImm_uimm4, CondOp is sf=0 & op=1 & s=1 & b_2428=0x1a & b_2123=2 & UImm5 & CondOp & b_1111=1 & o2=0 & Rn_GPR32 & o3=0 & NZCVImm_uimm4 { condition:1 = CondOp; condMask:1 = NZCVImm_uimm4; setCC_NZCV(condMask); if (!condition) goto inst_next; subflags(Rn_GPR32, UImm5); tmp:4 = Rn_GPR32 - UImm5; resultflags(tmp); affectflags(); } # C6.2.50 CCMP (immediate) page C6-1232 line 72446 MATCH x7a400800/mask=x7fe00c10 # CONSTRUCT xfa400800/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xfa400800/mask=xffe00c10 --status pass --comment "flags" :ccmp Rn_GPR64, UImm5, NZCVImm_uimm4, CondOp is sf=1 & op=1 & s=1 & b_2428=0x1a & b_2123=2 & UImm5 & CondOp & b_1111=1 & o2=0 & Rn_GPR64 & o3=0 & NZCVImm_uimm4 { condition:1 = CondOp; condMask:1 = NZCVImm_uimm4; setCC_NZCV(condMask); if (!condition) goto inst_next; tmp:8 = zext(UImm5); subflags(Rn_GPR64, tmp); tmp = Rn_GPR64 - tmp; resultflags(tmp); affectflags(); } # C6.2.51 CCMP (register) page C6-1234 line 72531 MATCH x7a400000/mask=x7fe00c10 # CONSTRUCT x7a400000/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x7a400000/mask=xffe00c10 --status pass --comment "flags" :ccmp Rn_GPR32, Rm_GPR32, NZCVImm_uimm4, CondOp is sf=0 & op=1 & s=1 & b_2428=0x1a & b_2123=2 & Rm_GPR32 & CondOp & b_1111=0 & o2=0 & Rn_GPR32 & o3=0 & NZCVImm_uimm4 { condition:1 = CondOp; condMask:1 = NZCVImm_uimm4; setCC_NZCV(condMask); if (!condition) goto inst_next; subflags(Rn_GPR32, Rm_GPR32); tmp:4 = Rn_GPR32 - Rm_GPR32; resultflags(tmp); affectflags(); } # C6.2.51 CCMP (register) page C6-1234 line 72531 MATCH x7a400000/mask=x7fe00c10 # CONSTRUCT xfa400000/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xfa400000/mask=xffe00c10 --status pass --comment "flags" :ccmp Rn_GPR64, Rm_GPR64, NZCVImm_uimm4, CondOp is sf=1 & op=1 & s=1 & b_2428=0x1a & b_2123=2 & Rm_GPR64 & CondOp & b_1111=0 & o2=0 & Rn_GPR64 & o3=0 & NZCVImm_uimm4 { condition:1 = CondOp; condMask:1 = NZCVImm_uimm4; setCC_NZCV(condMask); if (!condition) goto inst_next; subflags(Rn_GPR64, Rm_GPR64); tmp:8 = Rn_GPR64 - Rm_GPR64; resultflags(tmp); affectflags(); } # C6.2.52 CFINV page C6-1236 line 72619 MATCH xd500401f/mask=xfffff0ff # C6.2.229 MSR (immediate) page C6-1684 line 99649 MATCH xd500401f/mask=xfff8f01f # CONSTRUCT xd500401f/mask=xfffff0ff MATCHED 2 DOCUMENTED OPCODES # xd500401f/mask=xfffff0ff NOT MATCHED BY ANY CONSTRUCTOR :cfinv is b_1231=0b11010101000000000100 & b_0811 & b_0007=0b00011111 { CY = !CY; } # C6.2.54 CINC page C6-1238 line 72719 MATCH x1a800400/mask=x7fe00c00 # C6.2.104 CSET page C6-1445 line 86209 MATCH x1a9f07e0/mask=x7fff0fe0 # C6.2.106 CSINC page C6-1449 line 86382 MATCH x1a800400/mask=x7fe00c00 # CONSTRUCT x1a800400/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x1a800400/mask=xffe00c00 --status pass --comment "flags" :cinc Rd_GPR32, Rn_GPR32, InvCondOp is sf=0 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & InvCondOp & b_1011=1 & Rn=Rm & (Rn!=0x1f) & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { condition:1 = InvCondOp; tmp:4 = Rn_GPR32; if (!condition) goto ; tmp = Rn_GPR32 + 1; Rd_GPR64 = zext(tmp); } # C6.2.54 CINC page C6-1238 line 72719 MATCH x1a800400/mask=x7fe00c00 # C6.2.104 CSET page C6-1445 line 86209 MATCH x1a9f07e0/mask=x7fff0fe0 # C6.2.106 CSINC page C6-1449 line 86382 MATCH x1a800400/mask=x7fe00c00 # CONSTRUCT x9a800400/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x9a800400/mask=xffe00c00 --status pass --comment "flags" :cinc Rd_GPR64, Rn_GPR64, InvCondOp is sf=1 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & InvCondOp & b_1011=1 & Rn=Rm & (Rn!=0x1f) & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR64 & Rd_GPR64 { condition:1 = InvCondOp; tmp:8 = Rn_GPR64; if (!condition) goto ; tmp = Rn_GPR64 + 1; Rd_GPR64 = tmp; } # C6.2.55 CINV page C6-1240 line 72809 MATCH x5a800000/mask=x7fe00c00 # C6.2.105 CSETM page C6-1447 line 86295 MATCH x5a9f03e0/mask=x7fff0fe0 # C6.2.107 CSINV page C6-1451 line 86486 MATCH x5a800000/mask=x7fe00c00 # CONSTRUCT x5a800000/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x5a800000/mask=xffe00c00 --status pass --comment "flags" :cinv Rd_GPR32, Rn_GPR32, InvCondOp is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & InvCondOp & b_1011=0 & Rn=Rm & (Rn!=0x1f) & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { condition:1 = InvCondOp; tmp:4 = Rn_GPR32; if (!condition) goto ; tmp = ~Rn_GPR32; Rd_GPR64 = zext(tmp); } # C6.2.55 CINV page C6-1240 line 72809 MATCH x5a800000/mask=x7fe00c00 # C6.2.105 CSETM page C6-1447 line 86295 MATCH x5a9f03e0/mask=x7fff0fe0 # C6.2.107 CSINV page C6-1451 line 86486 MATCH x5a800000/mask=x7fe00c00 # CONSTRUCT xda800000/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xda800000/mask=xffe00c00 --status pass --comment "flags" :cinv Rd_GPR64, Rn_GPR64, InvCondOp is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & InvCondOp & b_1011=0 & Rn=Rm & (Rn!=0x1f) & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR64 & Rd_GPR64 { condition:1 = InvCondOp; tmp:8 = Rn_GPR64; if (!condition) goto ; tmp = ~Rn_GPR64; Rd_GPR64 = tmp; } # C6.2.56 CLREX page C6-1242 line 72899 MATCH xd503305f/mask=xfffff0ff # CONSTRUCT xd503305f/mask=xfffff0ff MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd503305f/mask=xfffff0ff --status nodest :clrex CRm_uimm4_def15 is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & CRm_uimm4_def15 & Op2=2 & Rt=0x1f { ClearExclusiveLocal(); } # C6.2.57 CLS page C6-1243 line 72939 MATCH x5ac01400/mask=x7ffffc00 # CONSTRUCT x5ac01400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x5ac01400/mask=xfffffc00 --status pass :cls Rd_GPR32, Rn_GPR32 is sf=0 & b_3030=1 & S=0 & b_2428=0x1a & b_2123=6 & dp1.opcode2=0x0 & b_1015=0x5 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local tmp:4 = (Rn_GPR32 ^ (Rn_GPR32<<1))|0x1; Rd_GPR64 = lzcount(tmp); } # C6.2.57 CLS page C6-1243 line 72939 MATCH x5ac01400/mask=x7ffffc00 # CONSTRUCT xdac01400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac01400/mask=xfffffc00 --status pass :cls Rd_GPR64, Rn_GPR64 is sf=1 & b_3030=1 & S=0 & b_2428=0x1a & b_2123=6 & dp1.opcode2=0x0 & b_1015=0x5 & Rn_GPR64 & Rd_GPR64 { local tmp:8 = (Rn_GPR64 ^ (Rn_GPR64<<1))|0x1; Rd_GPR64 = lzcount(tmp); } # C6.2.58 CLZ page C6-1245 line 73022 MATCH x5ac01000/mask=x7ffffc00 # CONSTRUCT x5ac01000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x5ac01000/mask=xfffffc00 --status pass :clz Rd_GPR32, Rn_GPR32 is sf=0 & b_3030=1 & S=0 & b_2428=0x1a & b_2123=6 & dp1.opcode2=0x0 & b_1015=0x4 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local tmp:4 = Rn_GPR32; Rd_GPR64 = lzcount(tmp); } # C6.2.58 CLZ page C6-1245 line 73022 MATCH x5ac01000/mask=x7ffffc00 # CONSTRUCT xdac01000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac01000/mask=xfffffc00 --status pass :clz Rd_GPR64, Rn_GPR64 is sf=1 & b_3030=1 & S=0 & b_2428=0x1a & b_2123=6 & dp1.opcode2=0x0 & b_1015=0x4 & Rn_GPR64 & Rd_GPR64 { local tmp:8 = Rn_GPR64; Rd_GPR64 = lzcount(tmp); } # C6.2.59 CMN (extended register) page C6-1246 line 73092 MATCH x2b20001f/mask=x7fe0001f # C6.2.7 ADDS (extended register) page C6-1156 line 68516 MATCH x2b200000/mask=x7fe00000 # CONSTRUCT x2b20001f/mask=xffe0001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x2b20001f/mask=xffe0001f --status pass --comment "flags" :cmn Rn_GPR32wsp, ExtendRegShift32 is sf=0 & op=0 & S=1 & SBIT_CZNO & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift32 & Rn_GPR32wsp & Rd=0x1f { tmp_1:4 = ExtendRegShift32; addflags(Rn_GPR32wsp, tmp_1); result:4 = Rn_GPR32wsp + tmp_1; resultflags(result); build SBIT_CZNO; } # C6.2.59 CMN (extended register) page C6-1246 line 73092 MATCH x2b20001f/mask=x7fe0001f # C6.2.7 ADDS (extended register) page C6-1156 line 68516 MATCH x2b200000/mask=x7fe00000 # CONSTRUCT xab20001f/mask=xffe0001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xab20001f/mask=xffe0001f --status pass --comment "flags" :cmn Rn_GPR64xsp, ExtendRegShift64 is sf=1 & op=0 & S=1 & SBIT_CZNO & b_2428=0xb & b_2121=1 & opt=0 & ExtendRegShift64 & Rn_GPR64xsp & Rd=0x1f { tmp_1:8 = ExtendRegShift64; addflags(Rn_GPR64xsp, tmp_1); result:8 = Rn_GPR64xsp + tmp_1; resultflags(result); build SBIT_CZNO; } # C6.2.60 CMN (immediate) page C6-1248 line 73219 MATCH x3100001f/mask=x7f80001f # C6.2.8 ADDS (immediate) page C6-1159 line 68669 MATCH x31000000/mask=x7f800000 # CONSTRUCT x3100001f/mask=xff00001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x3100001f/mask=xff00001f --status pass --comment "flags" :cmn Rn_GPR32xsp, ImmShift32 is sf=0 & b_30=0 & b_29=1 & aa_Xd=31 & b_2428=0x11 & ImmShift32 & Rn_GPR32xsp { addflags(Rn_GPR32xsp, ImmShift32); tmp:4 = Rn_GPR32xsp + ImmShift32; resultflags(tmp); affectflags(); } # C6.2.60 CMN (immediate) page C6-1248 line 73219 MATCH x3100001f/mask=x7f80001f # C6.2.8 ADDS (immediate) page C6-1159 line 68669 MATCH x31000000/mask=x7f800000 # CONSTRUCT xb100001f/mask=xff00001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xb100001f/mask=xff00001f --status pass --comment "flags" :cmn Rn_GPR64xsp, ImmShift64 is sf=1 & b_30=0 & b_29=1 & aa_Xd=31 & b_2428=0x11 & ImmShift64 & Rn_GPR64xsp { addflags(Rn_GPR64xsp, ImmShift64); tmp:8 = Rn_GPR64xsp + ImmShift64; resultflags(tmp); affectflags(); } # C6.2.60 CMN (immediate) page C6-1248 line 73219 MATCH x3100001f/mask=x7f80001f # C6.2.8 ADDS (immediate) page C6-1159 line 68669 MATCH x31000000/mask=x7f800000 # CONSTRUCT x3100001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x3100001f/mask=xffc0001f --status pass --comment "flags" :cmn Rn_GPR32wsp, Imm12_addsubimm_operand_i32_posimm_lsl0 is sf=0 & op=0 & S=1 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i32_posimm_lsl0 & Rn_GPR32wsp & Rd=0x1f { tmp_1:4 = Imm12_addsubimm_operand_i32_posimm_lsl0; addflags(Rn_GPR32wsp, tmp_1); result:4 = Rn_GPR32wsp + tmp_1; resultflags(result); affectflags(); } # C6.2.60 CMN (immediate) page C6-1248 line 73219 MATCH x3100001f/mask=x7f80001f # C6.2.8 ADDS (immediate) page C6-1159 line 68669 MATCH x31000000/mask=x7f800000 # CONSTRUCT x3140001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x3140001f/mask=xffc0001f --status pass --comment "flags" :cmn Rn_GPR32wsp, Imm12_addsubimm_operand_i32_posimm_lsl12 is sf=0 & op=0 & S=1 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i32_posimm_lsl12 & Rn_GPR32wsp & Rd=0x1f { tmp_1:4 = Imm12_addsubimm_operand_i32_posimm_lsl12; addflags(Rn_GPR32wsp, tmp_1); result:4 = Rn_GPR32wsp + tmp_1; resultflags(result); affectflags(); } # C6.2.60 CMN (immediate) page C6-1248 line 73219 MATCH x3100001f/mask=x7f80001f # C6.2.8 ADDS (immediate) page C6-1159 line 68669 MATCH x31000000/mask=x7f800000 # CONSTRUCT xb100001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xb100001f/mask=xffc0001f --status pass --comment "flags" :cmn Rn_GPR64xsp, Imm12_addsubimm_operand_i64_posimm_lsl0 is sf=1 & op=0 & S=1 & SBIT_CZNO & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i64_posimm_lsl0 & Rn_GPR64xsp & Rd=0x1f { tmp_1:8 = Imm12_addsubimm_operand_i64_posimm_lsl0; addflags(Rn_GPR64xsp, tmp_1); result:8 = Rn_GPR64xsp + tmp_1; resultflags(result); build SBIT_CZNO; } # C6.2.60 CMN (immediate) page C6-1248 line 73219 MATCH x3100001f/mask=x7f80001f # C6.2.8 ADDS (immediate) page C6-1159 line 68669 MATCH x31000000/mask=x7f800000 # CONSTRUCT xb140001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xb140001f/mask=xffc0001f --status pass --comment "flags" :cmn Rn_GPR64xsp, Imm12_addsubimm_operand_i64_posimm_lsl12 is sf=1 & op=0 & S=1 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i64_posimm_lsl12 & Rn_GPR64xsp & Rd=0x1f { tmp_1:8 = Imm12_addsubimm_operand_i64_posimm_lsl12; addflags(Rn_GPR64xsp, tmp_1); result:8 = Rn_GPR64xsp + tmp_1; resultflags(result); affectflags(); } # C6.2.61 CMN (shifted register) page C6-1250 line 73309 MATCH x2b00001f/mask=x7f20001f # C6.2.9 ADDS (shifted register) page C6-1161 line 68775 MATCH x2b000000/mask=x7f200000 # CONSTRUCT x2b00001f/mask=xff20801f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x2b00001f/mask=xff20801f --status pass --comment "flags" # if shift == '11' then ReservedValue(); :cmn Rn_GPR32, RegShift32 is sf=0 & op=0 & S=1 & SBIT_CZNO & b_2428=0xb & b_2121=0 & b_15=0 & RegShift32 & Rn_GPR32 & Rd=0x1f { tmp_1:4 = RegShift32; addflags(Rn_GPR32, tmp_1); result:4 = Rn_GPR32 + tmp_1; resultflags(result); build SBIT_CZNO; } # C6.2.61 CMN (shifted register) page C6-1250 line 73309 MATCH x2b00001f/mask=x7f20001f # C6.2.9 ADDS (shifted register) page C6-1161 line 68775 MATCH x2b000000/mask=x7f200000 # CONSTRUCT xab00001f/mask=xff20001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xab00001f/mask=xff20001f --status pass --comment "flags" :cmn Rn_GPR64, RegShift64 is sf=1 & op=0 & S=1 & SBIT_CZNO & b_2428=0xb & b_2121=0 & RegShift64 & Rn_GPR64 & Rd=0x1f { tmp_1:8 = RegShift64; addflags(Rn_GPR64, tmp_1); result:8 = Rn_GPR64 + tmp_1; resultflags(result); build SBIT_CZNO; } # C6.2.62 CMP (extended register) page C6-1252 line 73406 MATCH x6b20001f/mask=x7fe0001f # C6.2.362 SUBS (extended register) page C6-1950 line 114543 MATCH x6b200000/mask=x7fe00000 # CONSTRUCT x6b20001f/mask=xffe0001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x6b20001f/mask=xffe0001f --status pass --comment "flags" :cmp Rn_GPR32wsp, ExtendRegShift32 is sf=0 & op=1 & S=1 & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift32 & Rn_GPR32wsp & Rd=0x1f { subflags(Rn_GPR32wsp, ExtendRegShift32); tmp:4 = Rn_GPR32wsp - ExtendRegShift32; resultflags(tmp); affectflags(); } # C6.2.62 CMP (extended register) page C6-1252 line 73406 MATCH x6b20001f/mask=x7fe0001f # C6.2.362 SUBS (extended register) page C6-1950 line 114543 MATCH x6b200000/mask=x7fe00000 # CONSTRUCT xeb20001f/mask=xffe0001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xeb20001f/mask=xffe0001f --status pass --comment "flags" :cmp Rn_GPR64xsp, ExtendRegShift64 is sf=1 & op=1 & S=1 & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift64 & Rn_GPR64xsp & Rd=0x1f { subflags(Rn_GPR64xsp, ExtendRegShift64); tmp:8 = Rn_GPR64xsp - ExtendRegShift64; resultflags(tmp); affectflags(); } # C6.2.63 CMP (immediate) page C6-1254 line 73533 MATCH x7100001f/mask=x7f80001f # C6.2.363 SUBS (immediate) page C6-1953 line 114699 MATCH x71000000/mask=x7f800000 # CONSTRUCT x7100001f/mask=xff00001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x7100001f/mask=xff00001f --status pass --comment "flags" :cmp Rn_GPR32xsp, ImmShift32 is sf=0 & b_30=1 & b_29=1 & b_2428=0x11 & ImmShift32 & Rn_GPR32xsp & aa_Wd=31 { subflags(Rn_GPR32xsp, ImmShift32); tmp:4 = Rn_GPR32xsp - ImmShift32; resultflags(tmp); affectflags(); } # C6.2.63 CMP (immediate) page C6-1254 line 73533 MATCH x7100001f/mask=x7f80001f # C6.2.363 SUBS (immediate) page C6-1953 line 114699 MATCH x71000000/mask=x7f800000 # CONSTRUCT xf100001f/mask=xff00001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xf100001f/mask=xff00001f --status pass --comment "flags" :cmp Rn_GPR64xsp, ImmShift64 is sf=1 & b_30=1 & b_29=1 & b_2428=0x11 & ImmShift64 & Rn_GPR64xsp & aa_Wd=31 { subflags(Rn_GPR64xsp, ImmShift64); tmp:8 = Rn_GPR64xsp - ImmShift64; resultflags(tmp); affectflags(); } # C6.2.63 CMP (immediate) page C6-1254 line 73533 MATCH x7100001f/mask=x7f80001f # C6.2.363 SUBS (immediate) page C6-1953 line 114699 MATCH x71000000/mask=x7f800000 # CONSTRUCT x7100001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x7100001f/mask=xffc0001f --status pass --comment "flags" :cmp Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl0 is sf=0 & op=1 & S=1 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i32_negimm_lsl0 & Rn_GPR32wsp & Rd=0x1f { tmp_1:4 = Imm12_addsubimm_operand_i32_negimm_lsl0; subflags(Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl0); result:4 = Rn_GPR32wsp - tmp_1; resultflags(result); affectflags(); } # C6.2.63 CMP (immediate) page C6-1254 line 73533 MATCH x7100001f/mask=x7f80001f # C6.2.363 SUBS (immediate) page C6-1953 line 114699 MATCH x71000000/mask=x7f800000 # CONSTRUCT x7140001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x7140001f/mask=xffc0001f --status pass --comment "flags" :cmp Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl12 is sf=0 & op=1 & S=1 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i32_negimm_lsl12 & Rn_GPR32wsp & Rd=0x1f { tmp_2:4 = Imm12_addsubimm_operand_i32_negimm_lsl12; subflags(Rn_GPR32wsp, tmp_2); tmp_1:4 = Rn_GPR32wsp - tmp_2; resultflags(tmp_1); affectflags(); } # C6.2.63 CMP (immediate) page C6-1254 line 73533 MATCH x7100001f/mask=x7f80001f # C6.2.363 SUBS (immediate) page C6-1953 line 114699 MATCH x71000000/mask=x7f800000 # CONSTRUCT xf100001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xf100001f/mask=xffc0001f --status pass --comment "flags" :cmp Rn_GPR64xsp, Imm12_addsubimm_operand_i64_negimm_lsl0 is sf=1 & op=1 & S=1 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i64_negimm_lsl0 & Rn_GPR64xsp & Rd=0x1f { tmp_2:8 = Imm12_addsubimm_operand_i64_negimm_lsl0; subflags(Rn_GPR64xsp, tmp_2); tmp_1:8 = Rn_GPR64xsp - tmp_2; resultflags(tmp_1); affectflags(); } # C6.2.63 CMP (immediate) page C6-1254 line 73533 MATCH x7100001f/mask=x7f80001f # C6.2.363 SUBS (immediate) page C6-1953 line 114699 MATCH x71000000/mask=x7f800000 # CONSTRUCT xf140001f/mask=xffc0001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xf140001f/mask=xffc0001f --status pass --comment "flags" :cmp Rn_GPR64xsp, Imm12_addsubimm_operand_i64_negimm_lsl12 is sf=1 & op=1 & S=1 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i64_negimm_lsl12 & Rn_GPR64xsp & Rd=0x1f { tmp_2:8 = Imm12_addsubimm_operand_i64_negimm_lsl12; subflags(Rn_GPR64xsp, tmp_2); tmp_1:8 = Rn_GPR64xsp - tmp_2; resultflags(tmp_1); affectflags(); } # C6.2.64 CMP (shifted register) page C6-1256 line 73623 MATCH x6b00001f/mask=x7f20001f # C6.2.235 NEGS page C6-1696 line 100340 MATCH x6b0003e0/mask=x7f2003e0 # C6.2.364 SUBS (shifted register) page C6-1955 line 114807 MATCH x6b000000/mask=x7f200000 # CONSTRUCT x6b00001f/mask=xff20001f MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x6b00001f/mask=xff20001f --status pass --comment "flags" :cmp Rn_GPR32, RegShift32 is sf=0 & op=1 & S=1 & b_2428=0xb & b_2121=0 & RegShift32 & Rn!=0x1f & Rn_GPR32 & Rd=0x1f { subflags(Rn_GPR32, RegShift32); tmp:4 = Rn_GPR32 - RegShift32; resultflags(tmp); affectflags(); } # C6.2.64 CMP (shifted register) page C6-1256 line 73623 MATCH x6b00001f/mask=x7f20001f # C6.2.235 NEGS page C6-1696 line 100340 MATCH x6b0003e0/mask=x7f2003e0 # C6.2.364 SUBS (shifted register) page C6-1955 line 114807 MATCH x6b000000/mask=x7f200000 # CONSTRUCT xeb00001f/mask=xff20001f MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xeb00001f/mask=xff20001f --status pass --comment "flags" :cmp Rn_GPR64, RegShift64 is sf=1 & op=1 & S=1 & b_2428=0xb & b_2121=0 & Rm_GPR64 & RegShift64 & Rn!=0x1f & Rn_GPR64 & Rd=0x1f { subflags(Rn_GPR64, RegShift64); tmp:8 = Rn_GPR64 - RegShift64; resultflags(tmp); affectflags(); } # C6.2.66 CNEG page C6-1259 line 73771 MATCH x5a800400/mask=x7fe00c00 # C6.2.108 CSNEG page C6-1453 line 86590 MATCH x5a800400/mask=x7fe00c00 # CONSTRUCT x5a800400/mask=xffe00c00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x5a800400/mask=xffe00c00 --status pass --comment "flags" :cneg Rd_GPR32, Rn_GPR32, InvCondOp is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & InvCondOp & b_1011=1 & Rn=Rm & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { condition:1 = InvCondOp; tmp:4 = -Rn_GPR32; if (condition) goto ; tmp = Rn_GPR32; Rd_GPR64 = zext(tmp); } # C6.2.66 CNEG page C6-1259 line 73771 MATCH x5a800400/mask=x7fe00c00 # C6.2.108 CSNEG page C6-1453 line 86590 MATCH x5a800400/mask=x7fe00c00 # CONSTRUCT xda800400/mask=xffe00c00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xda800400/mask=xffe00c00 --status pass --comment "flags" :cneg Rd_GPR64, Rn_GPR64, InvCondOp is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & InvCondOp & b_1011=1 & Rn=Rm & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR64 & Rd_GPR64 { condition:1 = InvCondOp; tmp:8 = -Rn_GPR64; if (condition) goto ; tmp = Rn_GPR64; Rd_GPR64 = tmp; } # C6.2.68 CPYFP, CPYFM, CPYFE page C6-1262 line 73913 MATCH x19000400/mask=x3f20fc00 # C6.2.69 CPYFPN, CPYFMN, CPYFEN page C6-1267 line 74246 MATCH x1900c400/mask=x3f20fc00 # C6.2.70 CPYFPRN, CPYFMRN, CPYFERN page C6-1272 line 74579 MATCH x19008400/mask=x3f20fc00 # C6.2.79 CPYFPWN, CPYFMWN, CPYFEWN page C6-1317 line 77576 MATCH x19004400/mask=x3f20fc00 # CONSTRUCT x19000400/mask=x3f20fc00 MATCHED 2 DOCUMENTED OPCODES cpyPhase:"p" is opt=0 {export 0:1;} cpyPhase:"m" is opt=1 {export 1:1;} cpyPhase:"e" is opt=2 {export 2:1;} cpyType: "" is b_1015=0x01 {export 0x01:1; } cpyType: "n" is b_1015=0x31 {export 0x31:1; } cpyType: "rn" is b_1015=0x21 {export 0x21:1; } cpyType: "wn" is b_1015=0x11 {export 0x11:1; } cpyType: "rt" is b_1015=0x09 {export 0x09:1; } cpyType: "rtn" is b_1015=0x39 {export 0x39:1; } cpyType: "rtrn" is b_1015=0x29 {export 0x29:1; } cpyType: "rtwn" is b_1015=0x19 {export 0x19:1; } cpyType: "t" is b_1015=0x0d {export 0x0d:1; } cpyType: "tn" is b_1015=0x3d {export 0x3d:1; } cpyType: "trn" is b_1015=0x2d {export 0x2d:1; } cpyType: "twn" is b_1015=0x1d {export 0x1d:1; } cpyType: "wt" is b_1015=0x03 {export 0x03:1; } cpyType: "wtn" is b_1015=0x33 {export 0x33:1; } cpyType: "wtrn" is b_1015=0x23 {export 0x23:1; } cpyType: "wtwn" is b_1015=0x13 {export 0x13:1; } define pcodeop MemoryCopyForward; # Memory Copy Forward-only :cpyf^cpyPhase^cpyType [Rd_GPR64]!, [Rs_GPR64]!, Rn_GPR64^"!" is size.ldstr & b_2429=0x19 & opt != 3 & cpyPhase & b_2121=0 & Rs_GPR64 & cpyType & Rn_GPR64 & Rd_GPR64 { MemoryCopyForward(Rd_GPR64, Rs_GPR64, Rn_GPR64, cpyType, cpyPhase); } define pcodeop MemoryCopy; :cpy^cpyPhase^cpyType [Rd_GPR64]!, [Rs_GPR64]!, Rn_GPR64^"!" is size.ldstr & b_2429=0x1d & opt != 3 & cpyPhase & b_2121=0 & Rs_GPR64 & cpyType & Rn_GPR64 & Rd_GPR64 { MemoryCopy(Rd_GPR64, Rs_GPR64, Rn_GPR64, cpyType, cpyPhase); } # C6.2.59 CRC32B, CRC32H, CRC32W, CRC32X page C6-611 line 35802 KEEPWITH # sf == 0 && sz = 00 CRC32CB variant crcpoly: "" is b_12=0 {tmp:4 = 0x04C11DB7; export *[const]:4 tmp; } crcpoly: "c" is b_12=1 { tmp:4 = 0x1EDC6F41; export *[const]:4 tmp; } # C6.2.100 CRC32B, CRC32H, CRC32W, CRC32X page C6-1438 line 85850 MATCH x1ac04000/mask=x7fe0f000 # C6.2.101 CRC32CB, CRC32CH, CRC32CW, CRC32CX page C6-1440 line 85958 MATCH x1ac05000/mask=x7fe0f000 # CONSTRUCT x1ac04000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x1ac04000/mask=xffe0ec00 --status noqemu :crc32^crcpoly^"b" Rd_GPR32, Rn_GPR32, Rm_GPR32 is b_31=0 & b_2130=0b0011010110 & b_1315=0b010 & b_1011=0b00 & crcpoly & Rm_GPR32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local tmp_Rd:4 = crc32b(Rn_GPR32, Rm_GPR32, crcpoly); Rd_GPR64 = zext(tmp_Rd); } # C6.2.100 CRC32B, CRC32H, CRC32W, CRC32X page C6-1438 line 85850 MATCH x1ac04000/mask=x7fe0f000 # C6.2.101 CRC32CB, CRC32CH, CRC32CW, CRC32CX page C6-1440 line 85958 MATCH x1ac05000/mask=x7fe0f000 # CONSTRUCT x1ac04400/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x1ac04400/mask=xffe0ec00 --status noqemu # sf == 0 && sz = 01 CRC32CH variant :crc32^crcpoly^"h" Rd_GPR32, Rn_GPR32, Rm_GPR32 is b_31=0 & b_2130=0b0011010110 & b_1315=0b010 & b_1011=0b01 & crcpoly & Rm_GPR32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local tmp_Rd:4 = crc32h(Rn_GPR32, Rm_GPR32, crcpoly); Rd_GPR64 = zext(tmp_Rd); } # C6.2.100 CRC32B, CRC32H, CRC32W, CRC32X page C6-1438 line 85850 MATCH x1ac04000/mask=x7fe0f000 # C6.2.101 CRC32CB, CRC32CH, CRC32CW, CRC32CX page C6-1440 line 85958 MATCH x1ac05000/mask=x7fe0f000 # CONSTRUCT x1ac04800/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x1ac04800/mask=xffe0ec00 --status noqemu # sf == 0 && sz = 10 CRC32CW variant :crc32^crcpoly^"w" Rd_GPR32, Rn_GPR32, Rm_GPR32 is b_31=0 & b_2130=0b0011010110 & b_1315=0b010 & b_1011=0b10 & crcpoly & Rm_GPR32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local tmp_Rd:4 = crc32w(Rn_GPR32, Rm_GPR32, crcpoly); Rd_GPR64 = zext(tmp_Rd); } # C6.2.100 CRC32B, CRC32H, CRC32W, CRC32X page C6-1438 line 85850 MATCH x1ac04000/mask=x7fe0f000 # C6.2.101 CRC32CB, CRC32CH, CRC32CW, CRC32CX page C6-1440 line 85958 MATCH x1ac05000/mask=x7fe0f000 # CONSTRUCT x9ac04c00/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9ac04c00/mask=xffe0ec00 --status noqemu # sf == 1 && sz = 11 CRC32CX variant :crc32^crcpoly^"x" Rd_GPR32, Rn_GPR32, Rm_GPR64 is b_31=1 & b_2130=0b0011010110 & b_1315=0b010 & b_1011=0b11 & crcpoly & Rm_GPR64 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local tmp_Rd:4 = crc32x(Rn_GPR32, Rm_GPR64, crcpoly); Rd_GPR64 = zext(tmp_Rd); } # C6.2.103 CSEL page C6-1443 line 86120 MATCH x1a800000/mask=x7fe00c00 # CONSTRUCT x1a800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x1a800000/mask=xffe00c00 --status pass --comment "flags" :csel Rd_GPR32, Rn_GPR32, Rm_GPR32, CondOp is sf=0 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & CondOp & b_1011=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { condition:1 = CondOp; tmp:4 = Rn_GPR32; if (condition) goto ; tmp = Rm_GPR32; Rd_GPR64 = zext(tmp); } # C6.2.103 CSEL page C6-1443 line 86120 MATCH x1a800000/mask=x7fe00c00 # CONSTRUCT x9a800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x9a800000/mask=xffe00c00 --status pass --comment "flags" :csel Rd_GPR64, Rn_GPR64, Rm_GPR64, CondOp is sf=1 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & CondOp & b_1011=0 & Rn_GPR64 & Rd_GPR64 { condition:1 = CondOp; tmp:8 = Rn_GPR64; if (condition) goto ; tmp = Rm_GPR64; Rd_GPR64 = tmp; } # C6.2.104 CSET page C6-1445 line 86209 MATCH x1a9f07e0/mask=x7fff0fe0 # C6.2.54 CINC page C6-1238 line 72719 MATCH x1a800400/mask=x7fe00c00 # C6.2.106 CSINC page C6-1449 line 86382 MATCH x1a800400/mask=x7fe00c00 # CONSTRUCT x1a9f07e0/mask=xffff0fe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x1a9f07e0/mask=xffff0fe0 --status pass --comment "flags" :cset Rd_GPR32, InvCondOp is sf=0 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & InvCondOp & b_1011=1 & Rn=0x1f & Rm=0x1f & (b_15=0 | b_14=0 | b_13=0) & Rd_GPR32 & Rd_GPR64 { condition:1 = InvCondOp; Rd_GPR64 = zext(condition); } # C6.2.104 CSET page C6-1445 line 86209 MATCH x1a9f07e0/mask=x7fff0fe0 # C6.2.54 CINC page C6-1238 line 72719 MATCH x1a800400/mask=x7fe00c00 # C6.2.106 CSINC page C6-1449 line 86382 MATCH x1a800400/mask=x7fe00c00 # CONSTRUCT x9a9f07e0/mask=xffff0fe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x9a9f07e0/mask=xffff0fe0 --status pass --comment "flags" :cset Rd_GPR64, InvCondOp is sf=1 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & InvCondOp & b_1011=1 & Rn=0x1f & Rm=0x1f & (b_15=0 | b_14=0 | b_13=0) & Rd_GPR64 { condition:1 = InvCondOp; Rd_GPR64 = zext(condition); } # C6.2.105 CSETM page C6-1447 line 86295 MATCH x5a9f03e0/mask=x7fff0fe0 # C6.2.55 CINV page C6-1240 line 72809 MATCH x5a800000/mask=x7fe00c00 # C6.2.107 CSINV page C6-1451 line 86486 MATCH x5a800000/mask=x7fe00c00 # CONSTRUCT x5a9f03e0/mask=xffff0fe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x5a9f03e0/mask=xffff0fe0 --status pass --comment "flags" :csetm Rd_GPR32, InvCondOp is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & InvCondOp & b_1011=0 & Rn=0x1f & Rm=0x1f & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { condition:1 = InvCondOp; tmp:4 = zext(condition) * -1; Rd_GPR64 = zext(tmp); } # C6.2.105 CSETM page C6-1447 line 86295 MATCH x5a9f03e0/mask=x7fff0fe0 # C6.2.55 CINV page C6-1240 line 72809 MATCH x5a800000/mask=x7fe00c00 # C6.2.107 CSINV page C6-1451 line 86486 MATCH x5a800000/mask=x7fe00c00 # CONSTRUCT xda9f03e0/mask=xffff0fe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xda9f03e0/mask=xffff0fe0 --status pass --comment "flags" :csetm Rd_GPR64, InvCondOp is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & InvCondOp & b_1011=0 & Rn=0x1f & Rm=0x1f & (b_15=0 | b_14=0 | b_13=0) & Rn_GPR64 & Rd_GPR64 { condition:1 = InvCondOp; Rd_GPR64 = zext(condition) * -1; } # C6.2.106 CSINC page C6-1449 line 86382 MATCH x1a800400/mask=x7fe00c00 # C6.2.54 CINC page C6-1238 line 72719 MATCH x1a800400/mask=x7fe00c00 # C6.2.104 CSET page C6-1445 line 86209 MATCH x1a9f07e0/mask=x7fff0fe0 # CONSTRUCT x1a800400/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x1a800400/mask=xffe00c00 --status pass --comment "flags" :csinc Rd_GPR32, Rn_GPR32, Rm_GPR32, CondOp is sf=0 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & CondOp & b_1011=1 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { condition:1 = CondOp; tmp:4 = Rn_GPR32; if (condition) goto ; tmp = Rm_GPR32 + 1; Rd_GPR64 = zext(tmp); } # C6.2.106 CSINC page C6-1449 line 86382 MATCH x1a800400/mask=x7fe00c00 # C6.2.54 CINC page C6-1238 line 72719 MATCH x1a800400/mask=x7fe00c00 # C6.2.104 CSET page C6-1445 line 86209 MATCH x1a9f07e0/mask=x7fff0fe0 # CONSTRUCT x9a800400/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x9a800400/mask=xffe00c00 --status pass --comment "flags" :csinc Rd_GPR64, Rn_GPR64, Rm_GPR64, CondOp is sf=1 & op=0 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & CondOp & b_1011=1 & Rn_GPR64 & Rd_GPR64 { condition:1 = CondOp; tmp:8 = Rn_GPR64; if (condition) goto ; tmp = Rm_GPR64 + 1; Rd_GPR64 = tmp; } # C6.2.107 CSINV page C6-1451 line 86486 MATCH x5a800000/mask=x7fe00c00 # C6.2.55 CINV page C6-1240 line 72809 MATCH x5a800000/mask=x7fe00c00 # C6.2.105 CSETM page C6-1447 line 86295 MATCH x5a9f03e0/mask=x7fff0fe0 # CONSTRUCT x5a800000/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x5a800000/mask=xffe00c00 --status pass --comment "flags" :csinv Rd_GPR32, Rn_GPR32, Rm_GPR32, CondOp is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & CondOp & b_1011=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { condition:1 = CondOp; tmp:4 = Rn_GPR32; if (condition) goto ; tmp = ~Rm_GPR32; Rd_GPR64 = zext(tmp); } # C6.2.107 CSINV page C6-1451 line 86486 MATCH x5a800000/mask=x7fe00c00 # C6.2.55 CINV page C6-1240 line 72809 MATCH x5a800000/mask=x7fe00c00 # C6.2.105 CSETM page C6-1447 line 86295 MATCH x5a9f03e0/mask=x7fff0fe0 # CONSTRUCT xda800000/mask=xffe00c00 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xda800000/mask=xffe00c00 --status pass --comment "flags" :csinv Rd_GPR64, Rn_GPR64, Rm_GPR64, CondOp is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & CondOp & b_1011=0 & Rn_GPR64 & Rd_GPR64 { condition:1 = CondOp; tmp:8 = Rn_GPR64; if (condition) goto ; tmp = ~Rm_GPR64; Rd_GPR64 = tmp; } # C6.2.108 CSNEG page C6-1453 line 86590 MATCH x5a800400/mask=x7fe00c00 # C6.2.66 CNEG page C6-1259 line 73771 MATCH x5a800400/mask=x7fe00c00 # CONSTRUCT x5a800400/mask=xffe00c00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x5a800400/mask=xffe00c00 --status pass --comment "flags" :csneg Rd_GPR32, Rn_GPR32, Rm_GPR32, CondOp is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR32 & CondOp & b_1011=1 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { condition:1 = CondOp; tmp:4 = Rn_GPR32; if (condition) goto ; tmp = -Rm_GPR32; Rd_GPR64 = zext(tmp); } # C6.2.108 CSNEG page C6-1453 line 86590 MATCH x5a800400/mask=x7fe00c00 # C6.2.66 CNEG page C6-1259 line 73771 MATCH x5a800400/mask=x7fe00c00 # CONSTRUCT xda800400/mask=xffe00c00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xda800400/mask=xffe00c00 --status pass --comment "flags" :csneg Rd_GPR64, Rn_GPR64, Rm_GPR64, CondOp is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=4 & Rm_GPR64 & CondOp & b_1011=1 & Rn_GPR64 & Rd_GPR64 { condition:1 = CondOp; tmp:8 = Rn_GPR64; if (condition) goto ; tmp = -Rm_GPR64; Rd_GPR64 = tmp; } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7420/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd50b7420/mask=xffffffe0 --status nodest :dc "ZVA", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b0100 & b_0507=0b001 & Rt_GPR64 { DC_ZVA(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087620/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd5087620/mask=xffffffe0 --status nodest :dc "IVAC", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0110 & b_0507=0b001 & Rt_GPR64 { DC_IVAC(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087640/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd5087640/mask=xffffffe0 --status nodest :dc "ISW", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0110 & b_0507=0b010 & Rt_GPR64 { DC_ISW(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7a20/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd50b7a20/mask=xffffffe0 --status nopcodeop :dc "CVAC", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1010 & b_0507=0b001 & Rt_GPR64 { DC_CVAC(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087a40/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd5087a40/mask=xffffffe0 --status nodest :dc "CSW", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1010 & b_0507=0b010 & Rt_GPR64 { DC_CSW(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7b20/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd50b7b20/mask=xffffffe0 --status nodest :dc "CVAU", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1011 & b_0507=0b001 & Rt_GPR64 { DC_CVAU(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7e20/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd50b7e20/mask=xffffffe0 --status nodest :dc "CIVAC", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1110 & b_0507=0b001 & Rt_GPR64 { DC_CIVAC(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087e40/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd5087e40/mask=xffffffe0 --status nodest :dc "CISW", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1110 & b_0507=0b010 & Rt_GPR64 { DC_CISW(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7c20/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd50b7c20/mask=xffffffe0 --status nodest :dc "CVAP", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1100 & b_0507=0b001 & Rt_GPR64 { DC_CVAP(Rt_GPR64); } # C6.2.110 DCPS1 page C6-1457 line 86790 MATCH xd4a00001/mask=xffe0001f # CONSTRUCT xd4a00001/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd4a00001/mask=xffe0001f --status nodest :dcps1 imm16 is b_2431=0xd4 & excCode=5 & imm16 & excCode2=0 & ll=1 { DCPSInstruction(1:2, imm16:2); } # C6.2.111 DCPS2 page C6-1458 line 86856 MATCH xd4a00002/mask=xffe0001f # CONSTRUCT xd4a00002/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd4a00002/mask=xffe0001f --status nodest :dcps2 imm16 is b_2431=0xd4 & excCode=5 & imm16 & excCode2=0 & ll=2 { DCPSInstruction(2:2, imm16:2); } # C6.2.112 DCPS3 page C6-1459 line 86927 MATCH xd4a00003/mask=xffe0001f # CONSTRUCT xd4a00003/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd4a00003/mask=xffe0001f --status nodest :dcps3 imm16 is b_2431=0xd4 & excCode=5 & imm16 & excCode2=0 & ll=3 { DCPSInstruction(3:2, imm16:2); } # C6.2.114 DMB page C6-1461 line 87029 MATCH xd50330bf/mask=xfffff0ff # CONSTRUCT xd50330bf/mask=xfffff3ff MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd50330bf/mask=xfffff3ff --status nodest :dmb CRm_CRx is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & CRm_dbarrier_op & CRm_CRx & CRm_32 & CRm_10=0 & Op2=5 & Rt=0x1f { types:1 = 0x0; domain:1 = CRm_32; DataMemoryBarrier(domain, types); } # C6.2.114 DMB page C6-1461 line 87029 MATCH xd50330bf/mask=xfffff0ff # CONSTRUCT xd50330bf/mask=xfffff0ff MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd50330bf/mask=xfffff0ff --status nodest :dmb CRm_dbarrier_op is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & CRm_dbarrier_op & CRm_32 & CRm_10 & Op2=5 & Rt=0x1f { types:1 = CRm_10; domain:1 = CRm_32; DataMemoryBarrier(domain, types); } # C6.2.115 DRPS page C6-1463 line 87125 MATCH xd6bf03e0/mask=xffffffff # CONSTRUCT xd6bf03e0/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd6bf03e0/mask=xffffffff --status nodest :drps is b_2531=0x6b & b_2324=1 & b_2122=1 & b_1620=0x1f & b_1015=0 & aa_Xn=31 & b_0004=0 { pc = DRPSInstruction(); return [pc]; } # C6.2.116 DSB page C6-1464 line 87160 MATCH xd503309f/mask=xfffff0ff # C6.2.252 PSSBB page C6-1727 line 101951 MATCH xd503349f/mask=xffffffff # C6.2.290 SSBB page C6-1810 line 106930 MATCH xd503309f/mask=xffffffff # CONSTRUCT xd503309f/mask=xfffff3ff MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd503309f/mask=xfffff3ff --status nodest :dsb CRm_dbarrier_op is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & CRm_dbarrier_op & CRm_32 & CRm_10 & Op2=4 & Rt=0x1f { types:1 = CRm_10; domain:1 = CRm_32; nXS:1 = 0; DataSynchronizationBarrier(domain, types, nXS); } :ssbb is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & CRm_CRx=0 & CRm_32 & CRm_10 & Op2=4 & Rt=0x1f { types:1 = CRm_10; domain:1 = CRm_32; nXS:1 = 0; DataSynchronizationBarrier(domain, types, nXS); } :pssbb is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & CRm_CRx=4 & CRm_32 & CRm_10 & Op2=4 & Rt=0x1f { types:1 = CRm_10; domain:1 = CRm_32; nXS:1 = 0; DataSynchronizationBarrier(domain, types, nXS); } # C6.2.116 DSB page C6-1464 line 87160 MATCH xd503323f/mask=xfffff3ff # CONSTRUCT xd503323f/mask=xfffff3ff MATCHED 1 DOCUMENTED OPCODES # xd503323f/mask=xfffff3ff NOT MATCHED BY ANY CONSTRUCTOR # b_0031=11010101000000110011..1000111111 :dsb CRm_32 is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & CRm_32 & CRm_10=2 & Op2=1 & Rt=0x1f { types:1 = 0x3; # MBReqTypes_All domain:1 = CRm_32; nXS:1 = 1; DataSynchronizationBarrier(domain, types, nXS); } # C6.2.118 EON (shifted register) page C6-1468 line 87407 MATCH x4a200000/mask=x7f200000 # CONSTRUCT x4a200000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x4a200000/mask=xff200000 --status pass :eon Rd_GPR32, Rn_GPR32, RegShift32Log is sf=0 & opc=2 & b_2428=0xa & N=1 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_3:4 = RegShift32Log; tmp_2:4 = tmp_3 ^ -1:4; tmp_1:4 = Rn_GPR32 ^ tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.118 EON (shifted register) page C6-1468 line 87407 MATCH x4a200000/mask=x7f200000 # CONSTRUCT xca200000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xca200000/mask=xff200000 --status pass :eon Rd_GPR64, Rn_GPR64, RegShift64Log is sf=1 & opc=2 & b_2428=0xa & N=1 & Rm_GPR64 & RegShift64Log & Rn_GPR64 & Rd_GPR64 { tmp_3:8= RegShift64Log; tmp_2:8 = tmp_3 ^ -1:8; tmp_1:8 = Rn_GPR64 ^ tmp_2; Rd_GPR64 = tmp_1; } # C6.2.119 EOR (immediate) page C6-1470 line 87512 MATCH x52000000/mask=x7f800000 # CONSTRUCT x52000000/mask=xff800000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x52000000/mask=xff800000 --status pass :eor Rd_GPR32wsp, Rn_GPR32, DecodeWMask32 is sf=0 & opc=2 & b_2428=0x12 & b_2323=0 & DecodeWMask32 & Rn_GPR32 & Rd_GPR32wsp & Rd_GPR64xsp { tmp_1:4 = Rn_GPR32 ^ DecodeWMask32; Rd_GPR64xsp = zext(tmp_1); } # C6.2.119 EOR (immediate) page C6-1470 line 87512 MATCH x52000000/mask=x7f800000 # CONSTRUCT xd2000000/mask=xff800000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd2000000/mask=xff800000 --status pass :eor Rd_GPR64xsp, Rn_GPR64, DecodeWMask64 is sf=1 & opc=2 & b_2428=0x12 & b_2323=0 & DecodeWMask64 & Rn_GPR64 & Rd_GPR64xsp { tmp_1:8 = Rn_GPR64 ^ DecodeWMask64; Rd_GPR64xsp = tmp_1; } # C6.2.120 EOR (shifted register) page C6-1472 line 87604 MATCH x4a000000/mask=x7f200000 # CONSTRUCT x4a000000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x4a000000/mask=xff200000 --status pass :eor Rd_GPR32, Rn_GPR32, RegShift32Log is sf=0 & opc=2 & b_2428=0xa & N=0 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = RegShift32Log; tmp_1:4 = Rn_GPR32 ^ tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.120 EOR (shifted register) page C6-1472 line 87604 MATCH x4a000000/mask=x7f200000 # CONSTRUCT xca000000/mask=xff200000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xca000000/mask=xff200000 --status pass :eor Rd_GPR64, Rn_GPR64, RegShift64Log is sf=1 & opc=2 & b_2428=0xa & N=0 & Rm_GPR64 & RegShift64Log & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = RegShift64Log; tmp_1:8 = Rn_GPR64 ^ tmp_2; Rd_GPR64 = tmp_1; } # C6.2.121 ERET page C6-1474 line 87707 MATCH xd69f03e0/mask=xffffffff # CONSTRUCT xd69f03e0/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd69f03e0/mask=xffffffff --status nodest :eret is b_2531=0x6b & b_2324=1 & b_2122=0 & b_1620=0x1f & b_1015=0 & aa_Xn=31 & b_0004=0 { pc = ExceptionReturn(); return [pc]; } # C6.2.122 ERETAA, ERETAB page C6-1475 line 87749 MATCH xd69f0bff/mask=xfffffbff # CONSTRUCT xd69f0bff/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd69f0bff/mask=xffffffff --status nodest :eretaa is eretaa__PACpart & b_0031=0xd69f0bff { pc = ExceptionReturn(); build eretaa__PACpart; return [pc]; } # C6.2.122 ERETAA, ERETAB page C6-1475 line 87749 MATCH xd69f0bff/mask=xfffffbff # CONSTRUCT xd69f0fff/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd69f0fff/mask=xffffffff --status nodest :eretab is eretab__PACpart & b_0031=0xd69f0fff { pc = ExceptionReturn(); build eretab__PACpart; return [pc]; } # C6.2.124 EXTR page C6-1477 line 87864 MATCH x13800000/mask=x7fa00000 # C6.2.261 ROR (immediate) page C6-1740 line 102633 MATCH x13800000/mask=x7fa00000 # CONSTRUCT x13800000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x13800000/mask=xffe00000 --status pass :extr Rd_GPR32, Rn_GPR32, Rm_GPR32, LSB_bitfield32_imm is sf=0 & b_2930=0 & b_2428=0x13 & b_2323=1 & n=0 & b_21=0 & Rm_GPR32 & LSB_bitfield32_imm & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { lsb:8 = LSB_bitfield32_imm; result:8 = (zext(Rn_GPR32) << 32) | zext(Rm_GPR32); result = (result >> lsb); Rd_GPR64 = zext(result:4); } # C6.2.124 EXTR page C6-1477 line 87864 MATCH x13800000/mask=x7fa00000 # C6.2.261 ROR (immediate) page C6-1740 line 102633 MATCH x13800000/mask=x7fa00000 # CONSTRUCT x93c00000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x93c00000/mask=xffe00000 --status pass :extr Rd_GPR64, Rn_GPR64, Rm_GPR64, LSB_bitfield64_imm is sf=1 & b_2930=0 & b_2428=0x13 & b_2323=1 & n=1 & b_21=0 & Rm_GPR64 & LSB_bitfield64_imm & Rn_GPR64 & Rd_GPR64 { local tmp:8 = (Rm_GPR64 >> LSB_bitfield64_imm:1); Rd_GPR64 = tmp | (Rn_GPR64 << (64:1 - LSB_bitfield64_imm:1)); } # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # C6.2.22 AUTIA, AUTIA1716, AUTIASP, AUTIAZ, AUTIZA page C6-1183 line 69908 MATCH xd503219f/mask=xfffffddf # C6.2.23 AUTIB, AUTIB1716, AUTIBSP, AUTIBZ, AUTIZB page C6-1186 line 70065 MATCH xd50321df/mask=xfffffddf # C6.2.102 CSDB page C6-1442 line 86066 MATCH xd503229f/mask=xffffffff # C6.2.113 DGH page C6-1460 line 86992 MATCH xd50320df/mask=xffffffff # C6.2.123 ESB page C6-1476 line 87816 MATCH xd503221f/mask=xffffffff # C6.2.245 PACIA, PACIA1716, PACIASP, PACIAZ, PACIZA page C6-1712 line 101196 MATCH xd503211f/mask=xfffffddf # C6.2.246 PACIB, PACIB1716, PACIBSP, PACIBZ, PACIZB page C6-1715 line 101358 MATCH xd503215f/mask=xfffffddf # C6.2.251 PSB CSYNC page C6-1726 line 101911 MATCH xd503223f/mask=xffffffff # C6.2.381 TSB CSYNC page C6-1982 line 116218 MATCH xd503225f/mask=xffffffff # CONSTRUCT xd503201f/mask=xfffff01f MATCHED 10 DOCUMENTED OPCODES # AUNIT --inst xd503201f/mask=xfffff01f --status nodest :hint imm7Low is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low & Rt=0x1f {} # C6.2.127 HLT page C6-1482 line 88176 MATCH xd4400000/mask=xffe0001f # CONSTRUCT xd4400000/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd4400000/mask=xffe0001f --status nodest :hlt imm16 is ALL_BTITARGETS & b_2431=0xd4 & excCode=2 & imm16 & excCode2=0 & ll=0 { HaltBreakPoint(); } # C6.2.128 HVC page C6-1483 line 88218 MATCH xd4000002/mask=xffe0001f # CONSTRUCT xd4000002/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd4000002/mask=xffe0001f --status nodest :hvc imm16 is b_2431=0xd4 & excCode=0 & imm16 & excCode2=0 & ll=2 { CallHyperVisor(imm16:2); } # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087100/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd5087100/mask=xffffffe0 --status nodest :ic "IALLUIS" is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0001 & b_0507=0b000 { IC_IALLUIS(); } # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087500/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd5087500/mask=xffffffe0 --status nodest :ic "IALLU" is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0101 & b_0507=0b000 { IC_IALLU(); } # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7520/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd50b7520/mask=xffffffe0 --status nopcodeop :ic "IVAU", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b0101 & b_0507=0b001 & Rt_GPR64 { IC_IVAU(Rt_GPR64); } # C6.2.85 ISB page C6-647 line 37682 KEEPWITH IsbOption: "#"^CRm_isb_op is CRm_isb_op { export *[const]:4 CRm_isb_op; } IsbOption: "" is CRm_isb_op=0xf { tmp:4 = 0xf; export tmp; } # C6.2.131 ISB page C6-1487 line 88428 MATCH xd50330df/mask=xfffff0ff # CONSTRUCT xd50330df/mask=xfffff0ff MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd50330df/mask=xfffff0ff --status nodest :isb IsbOption is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x3 & IsbOption & Op2=6 & Rt=0x1f { InstructionSynchronizationBarrier(); } # C6.2.86 LDADDB, LDADDAB, LDADDALB, LDADDLB page C6-648 line 37726 KEEPWITH # variants, a=acquire, al=acquire+release, l=release # build ls_loa to acquire and ls_lor to release ls_loa: "a" is b_23=1 & b_22=0 { LOAcquire(); } ls_loa: "al" is b_23=1 & b_22=1 { LOAcquire(); } ls_loa: "" is b_23=0 & b_22=0 { } ls_loa: "l" is b_23=0 & b_22=1 { } ls_lor: "a" is b_23=1 & b_22=0 { } ls_lor: "al" is b_23=1 & b_22=1 { LORelease(); } ls_lor: "" is b_23=0 & b_22=0 { } ls_lor: "l" is b_23=0 & b_22=1 { LORelease(); } ls_data1: is b_3031=0b00 & Rn_GPR64xsp { tmp_ldWn = zext(*:1 Rn_GPR64xsp); } ls_data2: is b_3031=0b01 & Rn_GPR64xsp { tmp_ldWn = zext(*:2 Rn_GPR64xsp); } ls_data4: is b_3031=0b10 & Rn_GPR64xsp { tmp_ldWn = *:4 Rn_GPR64xsp; } ls_data8: is b_3031=0b11 & Rn_GPR64xsp { tmp_ldXn = *:8 Rn_GPR64xsp; } ls_mem1: is Rn_GPR64xsp { *:1 Rn_GPR64xsp = tmp_stWn:1; } ls_mem2: is Rn_GPR64xsp { *:2 Rn_GPR64xsp = tmp_stWn:2; } ls_mem4: is Rn_GPR64xsp { *:4 Rn_GPR64xsp = tmp_stWn; } ls_mem8: is Rn_GPR64xsp { *:8 Rn_GPR64xsp = tmp_stXn; } macro ls_opc_add (data, value, dest) { dest = data + value; } macro ls_opc_clr (data, value, dest) { dest = data & (~ value); } macro ls_opc_eor (data, value, dest) { dest = data ^ value; } macro ls_opc_set (data, value, dest) { dest = data | value; } macro ls_opc_smax(data, value, dest) { dest = zext(data s> value) * data + zext(data s<= value) * value; } macro ls_opc_smin(data, value, dest) { dest = zext(data s> value) * value + zext(data s<= value) * data; } macro ls_opc_umax(data, value, dest) { dest = zext(data > value) * data + zext(data <= value) * value; } macro ls_opc_umin(data, value, dest) { dest = zext(data > value) * value + zext(data <= value) * data; } macro ls_opc_swp (data, value, dest) { dest = value; } ls_opc1: "add" is b_3031=0b00 & b_1215=0b0000 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_add(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } ls_opc2: "add" is b_3031=0b01 & b_1215=0b0000 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_add(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } ls_opc4: "add" is b_3031=0b10 & b_1215=0b0000 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_add(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } ls_opc8: "add" is b_3031=0b11 & b_1215=0b0000 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_add(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } ls_opc1: "clr" is b_3031=0b00 & b_1215=0b0001 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_clr(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } ls_opc2: "clr" is b_3031=0b01 & b_1215=0b0001 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_clr(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } ls_opc4: "clr" is b_3031=0b10 & b_1215=0b0001 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_clr(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } ls_opc8: "clr" is b_3031=0b11 & b_1215=0b0001 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_clr(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } ls_opc1: "eor" is b_3031=0b00 & b_1215=0b0010 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_eor(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } ls_opc2: "eor" is b_3031=0b01 & b_1215=0b0010 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_eor(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } ls_opc4: "eor" is b_3031=0b10 & b_1215=0b0010 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_eor(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } ls_opc8: "eor" is b_3031=0b11 & b_1215=0b0010 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_eor(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } ls_opc1: "set" is b_3031=0b00 & b_1215=0b0011 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_set(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } ls_opc2: "set" is b_3031=0b01 & b_1215=0b0011 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_set(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } ls_opc4: "set" is b_3031=0b10 & b_1215=0b0011 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_set(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } ls_opc8: "set" is b_3031=0b11 & b_1215=0b0011 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_set(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } ls_opc1: "smax" is b_3031=0b00 & b_1215=0b0100 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_smax(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } ls_opc2: "smax" is b_3031=0b01 & b_1215=0b0100 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_smax(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } ls_opc4: "smax" is b_3031=0b10 & b_1215=0b0100 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_smax(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } ls_opc8: "smax" is b_3031=0b11 & b_1215=0b0100 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_smax(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } ls_opc1: "smin" is b_3031=0b00 & b_1215=0b0101 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_smin(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } ls_opc2: "smin" is b_3031=0b01 & b_1215=0b0101 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_smin(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } ls_opc4: "smin" is b_3031=0b10 & b_1215=0b0101 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_smin(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } ls_opc8: "smin" is b_3031=0b11 & b_1215=0b0101 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_smin(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } ls_opc1: "umax" is b_3031=0b00 & b_1215=0b0110 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_umax(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } ls_opc2: "umax" is b_3031=0b01 & b_1215=0b0110 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_umax(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } ls_opc4: "umax" is b_3031=0b10 & b_1215=0b0110 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_umax(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } ls_opc8: "umax" is b_3031=0b11 & b_1215=0b0110 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_umax(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } ls_opc1: "umin" is b_3031=0b00 & b_1215=0b0111 & aa_Ws & ls_data1 & ls_mem1 { build ls_data1; ls_opc_umin(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; } ls_opc2: "umin" is b_3031=0b01 & b_1215=0b0111 & aa_Ws & ls_data2 & ls_mem2 { build ls_data2; ls_opc_umin(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; } ls_opc4: "umin" is b_3031=0b10 & b_1215=0b0111 & aa_Ws & ls_data4 & ls_mem4 { build ls_data4; ls_opc_umin(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; } ls_opc8: "umin" is b_3031=0b11 & b_1215=0b0111 & aa_Xs & ls_data8 & ls_mem8 { build ls_data8; ls_opc_umin(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; } # Nearly all of these instructions have the same "operation" in the # manual, the differences being load vs store, the operation (o3:opc), # the data size, and the load store semantics of the atomic load and # store types (AccType). The opcode mnemonic varies, however. And to # facilitate reading, the LD/ST/SWP variants have been separated out. # C6.2.133 LDADDB, LDADDAB, LDADDALB, LDADDLB page C6-1489 line 88545 MATCH x38200000/mask=xff20fc00 # C6.2.152 LDCLRB, LDCLRAB, LDCLRALB, LDCLRLB page C6-1524 line 90495 MATCH x38201000/mask=xff20fc00 # C6.2.155 LDEORB, LDEORAB, LDEORALB, LDEORLB page C6-1531 line 90916 MATCH x38202000/mask=xff20fc00 # C6.2.181 LDSETB, LDSETAB, LDSETALB, LDSETLB page C6-1590 line 94403 MATCH x38203000/mask=xff20fc00 # C6.2.184 LDSMAXB, LDSMAXAB, LDSMAXALB, LDSMAXLB page C6-1597 line 94824 MATCH x38204000/mask=xff20fc00 # C6.2.187 LDSMINB, LDSMINAB, LDSMINALB, LDSMINLB page C6-1604 line 95245 MATCH x38205000/mask=xff20fc00 # C6.2.196 LDUMAXB, LDUMAXAB, LDUMAXALB, LDUMAXLB page C6-1623 line 96362 MATCH x38206000/mask=xff20fc00 # C6.2.199 LDUMINB, LDUMINAB, LDUMINALB, LDUMINLB page C6-1630 line 96783 MATCH x38207000/mask=xff20fc00 # CONSTRUCT x38200000/mask=xff208c00 MATCHED 8 DOCUMENTED OPCODES # AUNIT --inst x38200000/mask=xff208c00 --status nomem # size=0b00 (3031) :ld^ls_opc1^ls_lor^"b" aa_Ws, aa_Wt, [Rn_GPR64xsp] is b_3031=0b00 & b_2429=0b111000 & b_21=1 & b_1515=0 & b_1011=0b00 & ls_opc1 & ls_loa & ls_lor & aa_Wt & aa_Ws & Rn_GPR64xsp { build ls_loa; build ls_opc1; aa_Wt = tmp_ldWn; build ls_lor; } # C6.2.134 LDADDH, LDADDAH, LDADDALH, LDADDLH page C6-1491 line 88670 MATCH x78200000/mask=xff20fc00 # C6.2.153 LDCLRH, LDCLRAH, LDCLRALH, LDCLRLH page C6-1526 line 90621 MATCH x78201000/mask=xff20fc00 # C6.2.156 LDEORH, LDEORAH, LDEORALH, LDEORLH page C6-1533 line 91042 MATCH x78202000/mask=xff20fc00 # C6.2.182 LDSETH, LDSETAH, LDSETALH, LDSETLH page C6-1592 line 94529 MATCH x78203000/mask=xff20fc00 # C6.2.185 LDSMAXH, LDSMAXAH, LDSMAXALH, LDSMAXLH page C6-1599 line 94950 MATCH x78204000/mask=xff20fc00 # C6.2.188 LDSMINH, LDSMINAH, LDSMINALH, LDSMINLH page C6-1606 line 95371 MATCH x78205000/mask=xff20fc00 # C6.2.197 LDUMAXH, LDUMAXAH, LDUMAXALH, LDUMAXLH page C6-1625 line 96488 MATCH x78206000/mask=xff20fc00 # C6.2.200 LDUMINH, LDUMINAH, LDUMINALH, LDUMINLH page C6-1632 line 96909 MATCH x78207000/mask=xff20fc00 # CONSTRUCT x78200000/mask=xff208c00 MATCHED 8 DOCUMENTED OPCODES # AUNIT --inst x78200000/mask=xff208c00 --status nomem # size=0b01 (3031) :ld^ls_opc2^ls_lor^"h" aa_Ws, aa_Wt, [Rn_GPR64xsp] is b_3031=0b01 & b_2429=0b111000 & b_21=1 & b_1515=0 & b_1011=0b00 & ls_opc2 & ls_loa & ls_lor & aa_Wt & aa_Ws & Rn_GPR64xsp { build ls_loa; build ls_opc2; aa_Wt = tmp_ldWn; build ls_lor; } # C6.2.135 LDADD, LDADDA, LDADDAL, LDADDL page C6-1493 line 88796 MATCH xb8200000/mask=xbf20fc00 # C6.2.154 LDCLR, LDCLRA, LDCLRAL, LDCLRL page C6-1528 line 90747 MATCH xb8201000/mask=xbf20fc00 # C6.2.157 LDEOR, LDEORA, LDEORAL, LDEORL page C6-1535 line 91168 MATCH xb8202000/mask=xbf20fc00 # C6.2.183 LDSET, LDSETA, LDSETAL, LDSETL page C6-1594 line 94655 MATCH xb8203000/mask=xbf20fc00 # C6.2.186 LDSMAX, LDSMAXA, LDSMAXAL, LDSMAXL page C6-1601 line 95076 MATCH xb8204000/mask=xbf20fc00 # C6.2.189 LDSMIN, LDSMINA, LDSMINAL, LDSMINL page C6-1608 line 95497 MATCH xb8205000/mask=xbf20fc00 # C6.2.198 LDUMAX, LDUMAXA, LDUMAXAL, LDUMAXL page C6-1627 line 96614 MATCH xb8206000/mask=xbf20fc00 # C6.2.201 LDUMIN, LDUMINA, LDUMINAL, LDUMINL page C6-1634 line 97035 MATCH xb8207000/mask=xbf20fc00 # C6.2.297 STADD, STADDL page C6-1822 line 107555 MATCH xb820001f/mask=xbfa0fc1f # C6.2.300 STCLR, STCLRL page C6-1828 line 107842 MATCH xb820101f/mask=xbfa0fc1f # C6.2.303 STEOR, STEORL page C6-1834 line 108128 MATCH xb820201f/mask=xbfa0fc1f # C6.2.330 STSET, STSETL page C6-1890 line 111216 MATCH xb820301f/mask=xbfa0fc1f # C6.2.333 STSMAX, STSMAXL page C6-1896 line 111508 MATCH xb820401f/mask=xbfa0fc1f # C6.2.336 STSMIN, STSMINL page C6-1902 line 111801 MATCH xb820501f/mask=xbfa0fc1f # C6.2.342 STUMAX, STUMAXL page C6-1914 line 112409 MATCH xb820601f/mask=xbfa0fc1f # C6.2.345 STUMIN, STUMINL page C6-1920 line 112703 MATCH xb820701f/mask=xbfa0fc1f # CONSTRUCT xb8200000/mask=xff208c00 MATCHED 16 DOCUMENTED OPCODES # AUNIT --inst xb8200000/mask=xff208c00 --status nomem # size=0b10 (3031) :ld^ls_opc4^ls_lor aa_Ws, aa_Wt, [Rn_GPR64xsp] is b_3031=0b10 & b_2429=0b111000 & b_21=1 & b_1515=0 & b_1011=0b00 & ls_opc4 & ls_loa & ls_lor & aa_Wt & aa_Ws & Rn_GPR64xsp { build ls_loa; build ls_opc4; aa_Wt = tmp_ldWn; build ls_lor; } # C6.2.135 LDADD, LDADDA, LDADDAL, LDADDL page C6-1493 line 88796 MATCH xb8200000/mask=xbf20fc00 # C6.2.154 LDCLR, LDCLRA, LDCLRAL, LDCLRL page C6-1528 line 90747 MATCH xb8201000/mask=xbf20fc00 # C6.2.157 LDEOR, LDEORA, LDEORAL, LDEORL page C6-1535 line 91168 MATCH xb8202000/mask=xbf20fc00 # C6.2.183 LDSET, LDSETA, LDSETAL, LDSETL page C6-1594 line 94655 MATCH xb8203000/mask=xbf20fc00 # C6.2.186 LDSMAX, LDSMAXA, LDSMAXAL, LDSMAXL page C6-1601 line 95076 MATCH xb8204000/mask=xbf20fc00 # C6.2.189 LDSMIN, LDSMINA, LDSMINAL, LDSMINL page C6-1608 line 95497 MATCH xb8205000/mask=xbf20fc00 # C6.2.198 LDUMAX, LDUMAXA, LDUMAXAL, LDUMAXL page C6-1627 line 96614 MATCH xb8206000/mask=xbf20fc00 # C6.2.201 LDUMIN, LDUMINA, LDUMINAL, LDUMINL page C6-1634 line 97035 MATCH xb8207000/mask=xbf20fc00 # C6.2.297 STADD, STADDL page C6-1822 line 107555 MATCH xb820001f/mask=xbfa0fc1f # C6.2.300 STCLR, STCLRL page C6-1828 line 107842 MATCH xb820101f/mask=xbfa0fc1f # C6.2.303 STEOR, STEORL page C6-1834 line 108128 MATCH xb820201f/mask=xbfa0fc1f # C6.2.330 STSET, STSETL page C6-1890 line 111216 MATCH xb820301f/mask=xbfa0fc1f # C6.2.333 STSMAX, STSMAXL page C6-1896 line 111508 MATCH xb820401f/mask=xbfa0fc1f # C6.2.336 STSMIN, STSMINL page C6-1902 line 111801 MATCH xb820501f/mask=xbfa0fc1f # C6.2.342 STUMAX, STUMAXL page C6-1914 line 112409 MATCH xb820601f/mask=xbfa0fc1f # C6.2.345 STUMIN, STUMINL page C6-1920 line 112703 MATCH xb820701f/mask=xbfa0fc1f # CONSTRUCT xf8200000/mask=xff208c00 MATCHED 16 DOCUMENTED OPCODES # AUNIT --inst xf8200000/mask=xff208c00 --status nomem # size=0b11 (3031) :ld^ls_opc8^ls_lor aa_Xs, aa_Xt, [Rn_GPR64xsp] is b_3031=0b11 & b_2429=0b111000 & b_21=1 & b_1515=0 & b_1011=0b00 & ls_opc8 & ls_loa & ls_lor & aa_Xt & aa_Xs & Rn_GPR64xsp { build ls_loa; build ls_opc8; aa_Xt = tmp_ldXn; build ls_lor; } # C6.2.136 LDAPR page C6-1496 line 88965 MATCH xb8a0c000/mask=xbfe0fc00 # CONSTRUCT xb8a0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xb8a0c000/mask=xffe0fc00 --status nomem # TODO unsure of load/release semantics for this instruction # To enforce SHOULD BE ONE fields add: b_1620=0b11111 # size == 10 32-bit variant :ldapr aa_Wt, [Rn_GPR64xsp] is b_3031=0b10 & b_2129=0b111000101 & b_1015=0b110000 & Rn_GPR64xsp & aa_Wt & ls_data4 { aa_Wt = tmp_ldWn; } # C6.2.136 LDAPR page C6-1496 line 88965 MATCH xb8a0c000/mask=xbfe0fc00 # CONSTRUCT xf8a0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8a0c000/mask=xffe0fc00 --status nomem # TODO unsure of load/release semantics for this instruction # To enforce SHOULD BE ONE fields add: b_1620=0b11111 # size == 11 64-bit variant :ldapr aa_Xt, [Rn_GPR64xsp] is b_3031=0b11 & b_2129=0b111000101 & b_1015=0b110000 & Rn_GPR64xsp & aa_Xt & ls_data8 { aa_Xt = tmp_ldXn; } # C6.2.137 LDAPRB page C6-1498 line 89064 MATCH x38a0c000/mask=xffe0fc00 # CONSTRUCT x38a0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x38a0c000/mask=xffe0fc00 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 # TODO unsure of load/release semantics for this instruction :ldaprb aa_Wt, [Rn_GPR64xsp] is b_3031=0b00 & b_2129=0b111000101 & b_1015=0b110000 & Rn_GPR64xsp & aa_Wt & ls_data1 { aa_Wt = tmp_ldWn; } # C6.2.138 LDAPRH page C6-1500 line 89148 MATCH x78a0c000/mask=xffe0fc00 # CONSTRUCT x78a0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x78a0c000/mask=xffe0fc00 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 :ldaprh aa_Wt, [Rn_GPR64xsp] is b_3031=0b01 & b_2129=0b111000101 & b_1015=0b110000 & Rn_GPR64xsp & aa_Wt & ls_data2 { aa_Wt = tmp_ldWn; } # C6.2.139 LDAPUR page C6-1502 line 89232 MATCH x99400000/mask=xbfe00c00 # CONSTRUCT x99400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # The following commands are not yet implemented. # x99400000/mask=xbfe00c00 NOT MATCHED BY ANY CONSTRUCTOR :ldapur aa_Wt, addr_SIMM9 is b_3031=0b10 & b_2129=0b011001010 & b_1011=0b00 & addr_SIMM9 & aa_Wt & aa_Xt { aa_Xt = zext(*:4 addr_SIMM9); } # C6.2.139 LDAPUR page C6-1502 line 89232 MATCH x99400000/mask=xbfe00c00 # CONSTRUCT xd9400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES :ldapur aa_Xt, addr_SIMM9 is b_3031=0b11 & b_2129=0b011001010 & b_1011=0b00 & addr_SIMM9 & aa_Xt { aa_Xt = *addr_SIMM9; } # C6.2.140 LDAPURB page C6-1504 line 89343 MATCH x19400000/mask=xffe00c00 # CONSTRUCT x19400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # x19400000/mask=xffe00c00 NOT MATCHED BY ANY CONSTRUCTOR :ldapurb aa_Wt, addr_SIMM9 is b_3031=0b00 & b_2129=0b011001010 & b_1011=0b00 & addr_SIMM9 & aa_Wt & aa_Xt { aa_Xt = zext(*:1 addr_SIMM9); } # C6.2.141 LDAPURH page C6-1506 line 89439 MATCH x59400000/mask=xffe00c00 # CONSTRUCT x59400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # x59400000/mask=xffe00c00 NOT MATCHED BY ANY CONSTRUCTOR :ldapurh aa_Wt, addr_SIMM9 is b_3031=0b01 & b_2129=0b011001010 & b_1011=0b00 & addr_SIMM9 & aa_Wt & aa_Xt { aa_Xt = zext(*:2 addr_SIMM9); } # C6.2.142 LDAPURSB page C6-1508 line 89535 MATCH x19800000/mask=xffa00c00 # CONSTRUCT x19c00000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # x19800000/mask=xffa00c00 NOT MATCHED BY ANY CONSTRUCTOR :ldapursb aa_Wt, addr_SIMM9 is b_3031=0b00 & b_2329=0b0110011 & b_22=1 & b_2121=0b0 & b_1011=0b00 & addr_SIMM9 & aa_Wt & aa_Xt { aa_Xt = 0; aa_Wt = sext(*:1 addr_SIMM9); } # C6.2.142 LDAPURSB page C6-1508 line 89535 MATCH x19800000/mask=xffa00c00 # CONSTRUCT x19800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES :ldapursb aa_Xt, addr_SIMM9 is b_3031=0b00 & b_2329=0b0110011 & b_22=0 & b_2121=0b0 & b_1011=0b00 & addr_SIMM9 & aa_Xt { aa_Xt = sext(*:1 addr_SIMM9); } # C6.2.143 LDAPURSH page C6-1510 line 89667 MATCH x59800000/mask=xffa00c00 # CONSTRUCT x59c00000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # x59800000/mask=xffa00c00 NOT MATCHED BY ANY CONSTRUCTOR :ldapursh aa_Wt, addr_SIMM9 is b_3031=0b01 & b_2329=0b0110011 & b_22=1 & b_2121=0b0 & b_1011=0b00 & addr_SIMM9 & aa_Wt & aa_Xt { aa_Xt = 0; aa_Wt = sext(*:2 addr_SIMM9); } # C6.2.143 LDAPURSH page C6-1510 line 89667 MATCH x59800000/mask=xffa00c00 # CONSTRUCT x59800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES :ldapursh aa_Xt, addr_SIMM9 is b_3031=0b01 & b_2329=0b0110011 & b_22=0 & b_2121=0b0 & b_1011=0b00 & addr_SIMM9 & aa_Xt { aa_Xt = sext(*:2 addr_SIMM9); } # C6.2.144 LDAPURSW page C6-1512 line 89799 MATCH x99800000/mask=xffe00c00 # CONSTRUCT x99800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # x99800000/mask=xffe00c00 NOT MATCHED BY ANY CONSTRUCTOR :ldapursw aa_Xt, addr_SIMM9 is b_3031=0b10 & b_2129=0b011001100 & b_1011=0b00 & addr_SIMM9 & aa_Xt { aa_Xt = sext(*:4 addr_SIMM9); } # C6.2.145 LDAR page C6-1514 line 89895 MATCH x88c08000/mask=xbfe08000 # CONSTRUCT xc8c08000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8c08000/mask=xffe08000 --status nomem # The manual states that Rs and Rt2 should be all ones, which is # optionally enforced. # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :ldar Rt_GPR64, addrReg is size.ldstr=3 & b_2429=0x8 & b_23=1 & L=1 & b_21=0 & b_15=1 & addrReg & Rt_GPR64 { Rt_GPR64 = *addrReg; } # C6.2.145 LDAR page C6-1514 line 89895 MATCH x88c08000/mask=xbfe08000 # CONSTRUCT x88dffc00/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88dffc00/mask=xfffffc00 --status nomem # Enforce SHOULD BE ONE fields b_1620 & b_1014 :ldar Rt_GPR32, addrReg is size.ldstr=2 & b_2429=0x8 & b_23=1 & L=1 & b_21=0 & b_1620=0b11111 & b_15=1 & b_1014=0b11111 & addrReg & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = *addrReg; } # C6.2.146 LDARB page C6-1516 line 89986 MATCH x08c08000/mask=xffe08000 # CONSTRUCT x08c08000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x08c08000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :ldarb Rt_GPR32, addrReg is size.ldstr=0 & b_2429=0x8 & b_23=1 & L=1 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:1 addrReg); } # C6.2.147 LDARH page C6-1517 line 90054 MATCH x48c08000/mask=xffe08000 # CONSTRUCT x48dffc00/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x48dffc00/mask=xfffffc00 --status nomem # Enforce SHOULD BE ONE fields b_1620 & b_1014 :ldarh Rt_GPR32, addrReg is size.ldstr=1 & b_2429=0x8 & b_23=1 & L=1 & b_21=0 & b_1620=0b11111 & b_15=1 & b_1014=0b11111 & addrReg & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:2 addrReg); } # C6.2.148 LDAXP page C6-1518 line 90122 MATCH x88608000/mask=xbfe08000 # CONSTRUCT xc8608000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8608000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 :ldaxp Rt_GPR64, Rt2_GPR64, addrReg is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=1 & b_21=1 & b_15=1 & Rt2_GPR64 & addrReg & Rt_GPR64 { local addrval1:8 = *(addrReg); local addrval2:8 = *(addrReg+8); Rt_GPR64 = addrval1; Rt2_GPR64 = addrval2; } # C6.2.148 LDAXP page C6-1518 line 90122 MATCH x88608000/mask=xbfe08000 # CONSTRUCT x88608000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88608000/mask=xffe08000 --status nomem :ldaxp Rt_GPR32, Rt2_GPR32, addrReg is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=1 & b_21=1 & b_1620 & b_15=1 & Rt2_GPR32 & addrReg & Rt_GPR32 & Rt_GPR64 & Rt2_GPR64 { local addrval1:8 = zext(*:4(addrReg)); local addrval2:8 = zext(*:4(addrReg+4)); Rt_GPR64 = addrval1; Rt2_GPR64 = addrval2; } # C6.2.149 LDAXR page C6-1520 line 90256 MATCH x88408000/mask=xbfe08000 # CONSTRUCT xc8408000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8408000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :ldaxr Rt_GPR64, addrReg is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=1 & addrReg & Rt_GPR64 { Rt_GPR64 = *addrReg; } # C6.2.149 LDAXR page C6-1520 line 90256 MATCH x88408000/mask=xbfe08000 # CONSTRUCT x88408000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88408000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :ldaxr Rt_GPR32, addrReg is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 & Rt_GPR64 { tmp:4 = *addrReg; Rt_GPR64 = zext(tmp); } # C6.2.150 LDAXRB page C6-1522 line 90351 MATCH x08408000/mask=xffe08000 # CONSTRUCT x08408000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x08408000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :ldaxrb Rt_GPR32, addrReg is size.ldstr=0 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 & Rt_GPR64 { tmp:1 = *addrReg; Rt_GPR64 = zext(tmp); } # C6.2.151 LDAXRH page C6-1523 line 90423 MATCH x48408000/mask=xffe08000 # CONSTRUCT x48408000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x48408000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :ldaxrh Rt_GPR32, addrReg is size.ldstr=1 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 & Rt_GPR64 { tmp:2 = *addrReg; Rt_GPR64 = zext(tmp); } # C6.2.160 LDLARB page C6-1540 line 91472 MATCH x08c00000/mask=xffe08000 # CONSTRUCT x08c00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x08c00000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 # size=0b00 (3031) :ldlarb aa_Wt, [Rn_GPR64xsp] is b_3031=0b00 & b_2329=0b0010001 & b_22=1 & b_21=0 & b_15=0 & aa_Wt & Rn_GPR64xsp { LOAcquire(); aa_Wt = zext(*:1 Rn_GPR64xsp); } # C6.2.161 LDLARH page C6-1541 line 91541 MATCH x48c00000/mask=xffe08000 # CONSTRUCT x48c00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x48c00000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 # size=0b01 (3031) :ldlarh aa_Wt, [Rn_GPR64xsp] is b_3031=0b01 & b_2329=0b0010001 & b_22=1 & b_21=0 & b_15=0 & aa_Wt & Rn_GPR64xsp { LOAcquire(); aa_Wt = zext(*:2 Rn_GPR64xsp); } # C6.2.162 LDLAR page C6-1542 line 91610 MATCH x88c00000/mask=xbfe08000 # CONSTRUCT x88c00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88c00000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 # size=0b10 (3031) :ldlar aa_Wt, [Rn_GPR64xsp] is b_3031=0b10 & b_2329=0b0010001 & b_22=1 & b_21=0 & b_15=0 & aa_Wt & Rn_GPR64xsp { LOAcquire(); aa_Wt = *:4 Rn_GPR64xsp; } # C6.2.162 LDLAR page C6-1542 line 91610 MATCH x88c00000/mask=xbfe08000 # CONSTRUCT xc8c00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8c00000/mask=xffe08000 --status nomem # size=0b11 (3031) :ldlar aa_Xt, [Rn_GPR64xsp] is b_3031=0b11 & b_2329=0b0010001 & b_22=1 & b_21=0 & b_15=0 & aa_Xt & Rn_GPR64xsp { LOAcquire(); aa_Xt = *:8 Rn_GPR64xsp; } # C6.2.163 LDNP page C6-1544 line 91702 MATCH x28400000/mask=x7fc00000 # CONSTRUCT x28400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x28400000/mask=xffc00000 --status nomem :ldnp Rt_GPR32, Rt2_GPR32, addrPairIndexed is b_3031=0b00 & b_2229=0b10100001 & Rt2_GPR32 & addrPairIndexed & Rt_GPR32 & Rt_GPR64 & Rt2_GPR64 { local addrval1:8 = zext(*:4 addrPairIndexed); local addrval2:8 = zext(*:4 (addrPairIndexed + 4)); Rt_GPR64 = addrval1; Rt2_GPR64 = addrval2; } # C6.2.163 LDNP page C6-1544 line 91702 MATCH x28400000/mask=x7fc00000 # CONSTRUCT xa8400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xa8400000/mask=xffc00000 --status nomem :ldnp Rt_GPR64, Rt2_GPR64, addrPairIndexed is b_3031=0b10 & b_2229=0b10100001 & Rt2_GPR64 & addrPairIndexed & Rt_GPR64 { local addrval1:8 = *addrPairIndexed; local addrval2:8 = *(addrPairIndexed + 8); Rt_GPR64 = addrval1; Rt2_GPR64 = addrval2; } # C6.2.164 LDP page C6-1546 line 91841 MATCH x28c00000/mask=x7fc00000 # C6.2.164 LDP page C6-1546 line 91841 MATCH x29c00000/mask=x7fc00000 # C6.2.164 LDP page C6-1546 line 91841 MATCH x29400000/mask=x7fc00000 # C6.2.163 LDNP page C6-1544 line 91702 MATCH x28400000/mask=x7fc00000 # CONSTRUCT x28400000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x28400000/mask=xfe400000 --status nomem # opc == 00 post-index, pre-index, and signed 32-bit variant :ldp Rt_GPR32, Rt2_GPR32, addrPairIndexed is b_3031=0b00 & b_2529=0b10100 & (b_24=1 | b_23=1) & b_22=1 & Rt2_GPR32 & addrPairIndexed & Rt_GPR32 & Rt_GPR64 & Rt2_GPR64 { local addrval1:8 = zext(*:4 addrPairIndexed); local addrval2:8 = zext(*:4 (addrPairIndexed + 4)); Rt_GPR64 = addrval1; Rt2_GPR64 = addrval2; } # C6.2.164 LDP page C6-1546 line 91841 MATCH x28c00000/mask=x7fc00000 # C6.2.164 LDP page C6-1546 line 91841 MATCH x29c00000/mask=x7fc00000 # C6.2.164 LDP page C6-1546 line 91841 MATCH x29400000/mask=x7fc00000 # C6.2.163 LDNP page C6-1544 line 91702 MATCH x28400000/mask=x7fc00000 # CONSTRUCT xa8400000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xa8400000/mask=xfe400000 --status nomem # opc == 10 post-index, pre-index, and signed 64-bit variant :ldp Rt_GPR64, Rt2_GPR64, addrPairIndexed is b_3031=0b10 & b_2529=0b10100 & (b_24=1 | b_23=1) & b_22=1 & Rt2_GPR64 & addrPairIndexed & Rt_GPR64 { local addrval1:8 = *addrPairIndexed; local addrval2:8 = *(addrPairIndexed + 8); Rt_GPR64 = addrval1; Rt2_GPR64 = addrval2; } # C6.2.165 LDPSW page C6-1550 line 92077 MATCH x68c00000/mask=xffc00000 # C6.2.165 LDPSW page C6-1550 line 92077 MATCH x69c00000/mask=xffc00000 # C6.2.165 LDPSW page C6-1550 line 92077 MATCH x69400000/mask=xffc00000 # CONSTRUCT x68400000/mask=xfe400000 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x68400000/mask=xfe400000 --status nomem :ldpsw Rt_GPR64, Rt2_GPR64, addrPairIndexed is b_2531=0b0110100 & (b_24=1 | b_23=1) & b_22=1 & Rt2_GPR64 & addrPairIndexed & Rt_GPR64 { local addrval1:8 = sext(*:4 addrPairIndexed); local addrval2:8 = sext(*:4 (addrPairIndexed + 8)); Rt_GPR64 = addrval1; Rt2_GPR64 = addrval2; } # C6.2.166 LDR (immediate) page C6-1553 line 92262 MATCH xb9400000/mask=xbfc00000 # CONSTRUCT xb9400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xb9400000/mask=xffc00000 --status nomem :ldr Rt_GPR32, addrUIMM is size.ldstr=2 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=1 & addrUIMM & Rn_GPR64xsp & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:4 addrUIMM); } # C6.2.166 LDR (immediate) page C6-1553 line 92262 MATCH xb8400400/mask=xbfe00c00 # C6.2.166 LDR (immediate) page C6-1553 line 92262 MATCH xb8400c00/mask=xbfe00c00 # C6.2.190 LDTR page C6-1611 line 95666 MATCH xb8400800/mask=xbfe00c00 # C6.2.202 LDUR page C6-1637 line 97204 MATCH xb8400000/mask=xbfe00c00 # CONSTRUCT xb8400000/mask=xffe00000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xb8400000/mask=xffe00000 --status nomem :ld^UnscPriv^"r" Rt_GPR32, addrIndexed is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:4 addrIndexed); } # C6.2.166 LDR (immediate) page C6-1553 line 92262 MATCH xb8400400/mask=xbfe00c00 # C6.2.166 LDR (immediate) page C6-1553 line 92262 MATCH xb8400c00/mask=xbfe00c00 # CONSTRUCT xb8400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xb8400400/mask=xffe00400 --status nomem :ldr Rt_GPR32, addrIndexed is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:4 addrIndexed); } # C6.2.166 LDR (immediate) page C6-1553 line 92262 MATCH xb9400000/mask=xbfc00000 # CONSTRUCT xf9400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf9400000/mask=xffc00000 --status nomem :ldr Rt_GPR64, addrUIMM is size.ldstr=3 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=1 & addrUIMM & Rn_GPR64xsp & Rt_GPR64 { Rt_GPR64 = *addrUIMM; } # C6.2.166 LDR (immediate) page C6-1553 line 92262 MATCH xb8400400/mask=xbfe00c00 # C6.2.166 LDR (immediate) page C6-1553 line 92262 MATCH xb8400c00/mask=xbfe00c00 # CONSTRUCT xf8400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xf8400400/mask=xffe00400 --status nomem :ldr Rt_GPR64, addrIndexed is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR64 { Rt_GPR64 = *addrIndexed; } # C6.2.167 LDR (literal) page C6-1556 line 92457 MATCH x18000000/mask=xbf000000 # CONSTRUCT x18000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x18000000/mask=xff000000 --status nomem :ldr Rt_GPR32, AddrLoc19 is size.ldstr=0 & b_2729=3 & v=0 & b_2425=0 & AddrLoc19 & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:4 AddrLoc19); } # C6.2.167 LDR (literal) page C6-1556 line 92457 MATCH x18000000/mask=xbf000000 # CONSTRUCT x58000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x58000000/mask=xff000000 --status nomem :ldr Rt_GPR64, AddrLoc19 is size.ldstr=1 & b_2729=3 & v=0 & b_2425=0 & AddrLoc19 & Rt_GPR64 { Rt_GPR64 = *:4 AddrLoc19; } # C6.2.168 LDR (register) page C6-1558 line 92557 MATCH xb8600800/mask=xbfe00c00 # CONSTRUCT xb8600800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xb8600800/mask=xffe00c00 --status nomem :ldr Rt_GPR32, addrIndexed is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:4 addrIndexed); } # C6.2.168 LDR (register) page C6-1558 line 92557 MATCH xb8600800/mask=xbfe00c00 # CONSTRUCT xf8600800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8600800/mask=xffe00c00 --status nomem :ldr Rt_GPR64, addrIndexed is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR64 { Rt_GPR64 = *addrIndexed; } # C6.2.169 LDRAA, LDRAB page C6-1560 line 92679 MATCH xf8200400/mask=xff200400 # CONSTRUCT xf8200400/mask=xffa00400 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8200400/mask=xffa00400 --status nomem # M == 0 && W == 0 key A, offset variant # M == 0 && W == 1 key A, offset variant :ldraa Rt_GPR64, addrIndexed is ldraa__PACpart & b_2431=0b11111000 & b_23=0 & b_21=1 & b_10=1 & addrIndexed & Rn_GPR64xsp & Rt_GPR64 { build ldraa__PACpart; build addrIndexed; # Note: if writeback is used, the writeback'd value doesn't have a PAC code! It's the output of AuthDA. Rt_GPR64 = *:8 addrIndexed; } # C6.2.169 LDRAA, LDRAB page C6-1560 line 92679 MATCH xf8200400/mask=xff200400 # CONSTRUCT xf8a00400/mask=xffa00400 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8a00400/mask=xffa00400 --status nomem # M == 1 && W == 0 key B, offset variant # M == 1 && W == 1 key B, offset variant :ldrab Rt_GPR64, addrIndexed is ldrab__PACpart & b_2431=0b11111000 & b_23=1 & b_21=1 & b_10=1 & addrIndexed & Rn_GPR64xsp & Rt_GPR64 { build ldrab__PACpart; build addrIndexed; # Note: if writeback is used, the writeback'd value doesn't have a PAC code! It's the output of AuthDB. Rt_GPR64 = *:8 addrIndexed; } # C6.2.170 LDRB (immediate) page C6-1562 line 92814 MATCH x38400400/mask=xffe00c00 # C6.2.170 LDRB (immediate) page C6-1562 line 92814 MATCH x38400c00/mask=xffe00c00 # CONSTRUCT x38400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x38400400/mask=xffe00400 --status nomem # post-index and pre-index variants :ldrb Rt_GPR32, addrIndexed is b_2131=0b00111000010 & b_10=1 & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:1 addrIndexed); } # C6.2.170 LDRB (immediate) page C6-1562 line 92814 MATCH x39400000/mask=xffc00000 # CONSTRUCT x39400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x39400000/mask=xffc00000 --status nomem # unsigned offset variant :ldrb Rt_GPR32, addrIndexed is b_2231=0b0011100101 & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:1 addrIndexed); } # C6.2.171 LDRB (register) page C6-1565 line 92976 MATCH x38600800/mask=xffe00c00 # CONSTRUCT x38600800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x38600800/mask=xffe00c00 --status nomem # extended register and shifted register variant # determined in addrIndexed subtable :ldrb Rt_GPR32, addrIndexed is b_2131=0b00111000011 & b_1011=2 & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:1 addrIndexed); } # C6.2.172 LDRH (immediate) page C6-1567 line 93076 MATCH x79400000/mask=xffc00000 # CONSTRUCT x79400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x79400000/mask=xffc00000 --status nomem :ldrh Rt_GPR32, addrUIMM is size.ldstr=1 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=1 & addrUIMM & Rn_GPR64xsp & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:2 addrUIMM); } # C6.2.172 LDRH (immediate) page C6-1567 line 93076 MATCH x78400400/mask=xffe00c00 # C6.2.172 LDRH (immediate) page C6-1567 line 93076 MATCH x78400c00/mask=xffe00c00 # CONSTRUCT x78400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x78400400/mask=xffe00400 --status nomem :ldrh Rt_GPR32, addrIndexed is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:2 addrIndexed); } # C6.2.173 LDRH (register) page C6-1570 line 93238 MATCH x78600800/mask=xffe00c00 # CONSTRUCT x78600800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x78600800/mask=xffe00c00 --status nomem :ldrh Rt_GPR32, addrIndexed is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:2 addrIndexed); } # C6.2.174 LDRSB (immediate) page C6-1572 line 93336 MATCH x39800000/mask=xff800000 # CONSTRUCT x39c00000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x39c00000/mask=xffc00000 --status nomem :ldrsb Rt_GPR32, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=1 & b_2223=3 & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = sext(*:1 addrIndexed); } # C6.2.174 LDRSB (immediate) page C6-1572 line 93336 MATCH x38800400/mask=xffa00c00 # C6.2.174 LDRSB (immediate) page C6-1572 line 93336 MATCH x38800c00/mask=xffa00c00 # C6.2.193 LDTRSB page C6-1617 line 95984 MATCH x38800800/mask=xffa00c00 # C6.2.205 LDURSB page C6-1641 line 97443 MATCH x38800000/mask=xffa00c00 # CONSTRUCT x38c00000/mask=xffe00000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x38c00000/mask=xffe00000 --status nomem :ld^UnscPriv^"rsb" Rt_GPR32, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_2223=3 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:1 addrIndexed); } # C6.2.174 LDRSB (immediate) page C6-1572 line 93336 MATCH x38800400/mask=xffa00c00 # C6.2.174 LDRSB (immediate) page C6-1572 line 93336 MATCH x38800c00/mask=xffa00c00 # CONSTRUCT x38c00400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x38c00400/mask=xffe00400 --status nomem :ldrsb Rt_GPR32, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_2223=3 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = sext(*:1 addrIndexed); } # C6.2.174 LDRSB (immediate) page C6-1572 line 93336 MATCH x39800000/mask=xff800000 # CONSTRUCT x39800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x39800000/mask=xffc00000 --status nomem :ldrsb Rt_GPR64, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=1 & b_2223=2 & addrIndexed & Rt_GPR64 { Rt_GPR64 = sext(*:1 addrIndexed); } # C6.2.174 LDRSB (immediate) page C6-1572 line 93336 MATCH x38800400/mask=xffa00c00 # C6.2.174 LDRSB (immediate) page C6-1572 line 93336 MATCH x38800c00/mask=xffa00c00 # C6.2.193 LDTRSB page C6-1617 line 95984 MATCH x38800800/mask=xffa00c00 # C6.2.205 LDURSB page C6-1641 line 97443 MATCH x38800000/mask=xffa00c00 # CONSTRUCT x38800000/mask=xffe00000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x38800000/mask=xffe00000 --status nomem :ld^UnscPriv^"rsb" Rt_GPR64, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR64 { Rt_GPR64 = sext(*:1 addrIndexed); } # C6.2.174 LDRSB (immediate) page C6-1572 line 93336 MATCH x38800400/mask=xffa00c00 # C6.2.174 LDRSB (immediate) page C6-1572 line 93336 MATCH x38800c00/mask=xffa00c00 # CONSTRUCT x38800400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x38800400/mask=xffe00400 --status nomem :ldrsb Rt_GPR64, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR64 { Rt_GPR64 = sext(*:1 addrIndexed); } # C6.2.175 LDRSB (register) page C6-1576 line 93573 MATCH x38a00800/mask=xffa00c00 # CONSTRUCT x38e00800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x38e00800/mask=xffe00c00 --status nomem :ldrsb Rt_GPR32, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_2223=3 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = sext(*:1 addrIndexed); } # C6.2.175 LDRSB (register) page C6-1576 line 93573 MATCH x38a00800/mask=xffa00c00 # CONSTRUCT x38a00800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x38a00800/mask=xffe00c00 --status nomem :ldrsb Rt_GPR64, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR64 { Rt_GPR64 = sext(*:1 addrIndexed); } # C6.2.176 LDRSH (immediate) page C6-1578 line 93714 MATCH x79800000/mask=xff800000 # CONSTRUCT x79c00000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x79c00000/mask=xffc00000 --status nomem :ldrsh Rt_GPR32, addrUIMM is size.ldstr=1 & b_2729=7 & v=0 & b_2425=1 & b_2223=3 & addrUIMM & Rn_GPR64xsp & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = sext(*:2 addrUIMM); } # C6.2.176 LDRSH (immediate) page C6-1578 line 93714 MATCH x78800400/mask=xffa00c00 # C6.2.176 LDRSH (immediate) page C6-1578 line 93714 MATCH x78800c00/mask=xffa00c00 # C6.2.194 LDTRSH page C6-1619 line 96122 MATCH x78800800/mask=xffa00c00 # C6.2.206 LDURSH page C6-1643 line 97560 MATCH x78800000/mask=xffa00c00 # CONSTRUCT x78c00000/mask=xffe00000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x78c00000/mask=xffe00000 --status nomem :ld^UnscPriv^"rsh" Rt_GPR32, addrIndexed is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_2223=3 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = sext(*:2 addrIndexed); } # C6.2.176 LDRSH (immediate) page C6-1578 line 93714 MATCH x78800400/mask=xffa00c00 # C6.2.176 LDRSH (immediate) page C6-1578 line 93714 MATCH x78800c00/mask=xffa00c00 # CONSTRUCT x78c00400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x78c00400/mask=xffe00400 --status nomem :ldrsh Rt_GPR32, addrIndexed is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_2223=3 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = sext(*:2 addrIndexed); } # C6.2.176 LDRSH (immediate) page C6-1578 line 93714 MATCH x79800000/mask=xff800000 # CONSTRUCT x79800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x79800000/mask=xffc00000 --status nomem :ldrsh Rt_GPR64, addrUIMM is size.ldstr=1 & b_2729=7 & v=0 & b_2425=1 & b_2223=2 & addrUIMM & Rn_GPR64xsp & Rt_GPR64 { Rt_GPR64 = sext(*:2 addrUIMM); } # C6.2.176 LDRSH (immediate) page C6-1578 line 93714 MATCH x78800400/mask=xffa00c00 # C6.2.176 LDRSH (immediate) page C6-1578 line 93714 MATCH x78800c00/mask=xffa00c00 # C6.2.194 LDTRSH page C6-1619 line 96122 MATCH x78800800/mask=xffa00c00 # C6.2.206 LDURSH page C6-1643 line 97560 MATCH x78800000/mask=xffa00c00 # CONSTRUCT x78800000/mask=xffe00000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x78800000/mask=xffe00000 --status nomem :ld^UnscPriv^"rsh" Rt_GPR64, addrIndexed is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR64 { Rt_GPR64 = sext(*:2 addrIndexed); } # C6.2.176 LDRSH (immediate) page C6-1578 line 93714 MATCH x78800400/mask=xffa00c00 # C6.2.176 LDRSH (immediate) page C6-1578 line 93714 MATCH x78800c00/mask=xffa00c00 # CONSTRUCT x78800400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x78800400/mask=xffe00400 --status nomem :ldrsh Rt_GPR64, addrIndexed is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR64 { Rt_GPR64 = sext(*:2 addrIndexed); } # C6.2.177 LDRSH (register) page C6-1582 line 93951 MATCH x78a00800/mask=xffa00c00 # CONSTRUCT x78e00800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x78e00800/mask=xffe00c00 --status nomem :ldrsh Rt_GPR32, addrIndexed is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_2223=3 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = sext(*:2 addrIndexed); } # C6.2.177 LDRSH (register) page C6-1582 line 93951 MATCH x78a00800/mask=xffa00c00 # CONSTRUCT x78a00800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x78a00800/mask=xffe00c00 --status nomem :ldrsh Rt_GPR64, addrIndexed is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR64 { Rt_GPR64 = sext(*:2 addrIndexed); } # C6.2.178 LDRSW (immediate) page C6-1584 line 94088 MATCH xb8800400/mask=xffe00c00 # C6.2.178 LDRSW (immediate) page C6-1584 line 94088 MATCH xb8800c00/mask=xffe00c00 # CONSTRUCT xb8800400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xb8800400/mask=xffe00400 --status nomem :ldrsw Rt_GPR64, addrIndexed is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR64 { Rt_GPR64 = sext(*:4 addrIndexed); } # C6.2.178 LDRSW (immediate) page C6-1584 line 94088 MATCH xb9800000/mask=xffc00000 # CONSTRUCT xb9800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xb9800000/mask=xffc00000 --status nomem :ldrsw Rt_GPR64, addrIndexed is size.ldstr=2 & b_2729=7 & v=0 & b_2425=1 & b_2223=2 & addrIndexed & Rt_GPR64 { Rt_GPR64 = sext(*:4 addrIndexed); } # C6.2.179 LDRSW (literal) page C6-1587 line 94246 MATCH x98000000/mask=xff000000 # CONSTRUCT x98000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x98000000/mask=xff000000 --status nomem :ldrsw Rt_GPR64, AddrLoc19 is b_2431=0b10011000 & AddrLoc19 & Rt_GPR64 { Rt_GPR64 = sext(*:4 AddrLoc19); } # C6.2.180 LDRSW (register) page C6-1588 line 94304 MATCH xb8a00800/mask=xffe00c00 # CONSTRUCT xb8a00800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xb8a00800/mask=xffe00c00 --status nomem :ldrsw Rt_GPR64, addrIndexed is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR64 { Rt_GPR64 = sext(*:4 addrIndexed); } # C6.2.190 LDTR page C6-1611 line 95666 MATCH xb8400800/mask=xbfe00c00 # CONSTRUCT xf8400800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8400800/mask=xffe00c00 --status nomem :ld^UnscPriv^"r" Rt_GPR64, addrIndexed is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_2223=1 & b_2121=0 & b_1011=2 & UnscPriv & addrIndexed & Rt_GPR64 { Rt_GPR64 = *addrIndexed; } # C6.2.191 LDTRB page C6-1613 line 95782 MATCH x38400800/mask=xffe00c00 # C6.2.203 LDURB page C6-1639 line 97301 MATCH x38400000/mask=xffe00c00 # CONSTRUCT x38400000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x38400000/mask=xffe00000 --status nomem :ld^UnscPriv^"rb" Rt_GPR32, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:1 addrIndexed); } # C6.2.192 LDTRH page C6-1615 line 95883 MATCH x78400800/mask=xffe00c00 # C6.2.204 LDURH page C6-1640 line 97372 MATCH x78400000/mask=xffe00c00 # CONSTRUCT x78400000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x78400000/mask=xffe00000 --status nomem :ld^UnscPriv^"rh" Rt_GPR32, addrIndexed is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:2 addrIndexed); } # C6.2.195 LDTRSW page C6-1621 line 96261 MATCH xb8800800/mask=xffe00c00 # C6.2.207 LDURSW page C6-1645 line 97677 MATCH xb8800000/mask=xffe00c00 # CONSTRUCT xb8800000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xb8800000/mask=xffe00000 --status nomem :ld^UnscPriv^"rsw" Rt_GPR64, addrIndexed is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR64 { Rt_GPR64 = sext(*:4 addrIndexed); } # C6.2.202 LDUR page C6-1637 line 97204 MATCH xb8400000/mask=xbfe00c00 # CONSTRUCT xf8400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8400000/mask=xffe00c00 --status nomem :ld^UnscPriv^"r" Rt_GPR64, addrIndexed is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2122=2 & b_1011=0 & UnscPriv & addrIndexed & Rt_GPR64 { Rt_GPR64 = *addrIndexed; } # C6.2.208 LDXP page C6-1646 line 97748 MATCH x88600000/mask=xbfe08000 # CONSTRUCT xc8600000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8600000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 :ldxp Rt_GPR64, Rt2_GPR64, addrReg is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=1 & b_21=1 & b_15=0 & Rt2_GPR64 & addrReg & Rt_GPR64 { local addrval1:8 = *addrReg; local addrval2:8 = *(addrReg + 8); Rt_GPR64 = addrval1; Rt2_GPR64 = addrval2; } # C6.2.208 LDXP page C6-1646 line 97748 MATCH x88600000/mask=xbfe08000 # CONSTRUCT x88600000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88600000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 :ldxp Rt_GPR32, Rt2_GPR32, addrReg is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=1 & b_21=1 & b_15=0 & Rt2_GPR32 & addrReg & Rt_GPR32 & Rt_GPR64 & Rt2_GPR64 { local addrval1:8 = zext(*:4 addrReg); local addrval2:8 = zext(*:4 (addrReg + 4)); Rt_GPR64 = addrval1; Rt2_GPR64 = addrval2; } # C6.2.209 LDXR page C6-1648 line 97882 MATCH x88400000/mask=xbfe08000 # CONSTRUCT xc8400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8400000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :ldxr Rt_GPR64, addrReg is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=0 & addrReg & Rt_GPR64 { Rt_GPR64 = *addrReg; } # C6.2.209 LDXR page C6-1648 line 97882 MATCH x88400000/mask=xbfe08000 # CONSTRUCT x88400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88400000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :ldxr Rt_GPR32, addrReg is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=0 & addrReg & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:4 addrReg); } # C6.2.210 LDXRB page C6-1650 line 97976 MATCH x08400000/mask=xffe08000 # CONSTRUCT x08400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x08400000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :ldxrb Rt_GPR32, addrReg is size.ldstr=0 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=0 & addrReg & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:1 addrReg); } # C6.2.211 LDXRH page C6-1651 line 98048 MATCH x48400000/mask=xffe08000 # CONSTRUCT x48400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x48400000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :ldxrh Rt_GPR32, addrReg is size.ldstr=1 & b_2429=0x8 & b_23=0 & L=1 & b_21=0 & b_15=0 & addrReg & Rt_GPR32 & Rt_GPR64 { Rt_GPR64 = zext(*:2 addrReg); } # C6.2.212 LSL (register) page C6-1652 line 98120 MATCH x1ac02000/mask=x7fe0fc00 # C6.2.214 LSLV page C6-1656 line 98305 MATCH x1ac02000/mask=x7fe0fc00 # CONSTRUCT x1ac02000/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x1ac02000/mask=xffe0fc00 --status pass :lsl Rd_GPR32, Rn_GPR32, Rm_GPR32 is sf=0 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR32 & b_1015=0x8 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { shiftval:8 = zext(Rm_GPR32 & 0x1f); tmp_1:4 = Rn_GPR32 << shiftval; Rd_GPR64 = zext(tmp_1); } # C6.2.212 LSL (register) page C6-1652 line 98120 MATCH x1ac02000/mask=x7fe0fc00 # C6.2.214 LSLV page C6-1656 line 98305 MATCH x1ac02000/mask=x7fe0fc00 # CONSTRUCT x9ac02000/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9ac02000/mask=xffe0fc00 --status pass :lsl Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR64 & b_1015=0x8 & Rn_GPR64 & Rd_GPR64 { shiftval:8 = (Rm_GPR64 & 0x3f); tmp_1:8 = Rn_GPR64 << shiftval; Rd_GPR64 = tmp_1; } # C6.2.158 LSL (immediate) page C6-784 line 45779 KEEPWITH ubfiz_lsb: "#"^imm is ImmR [ imm = 32 - ImmR; ] { export *[const]:4 imm; } ubfiz_width: "#"^imm is ImmS [ imm = ImmS + 1; ] { export *[const]:4 imm; } ubfiz_lsb64: "#"^imm is ImmR [ imm = 64 - ImmR; ] { export *[const]:4 imm; } ubfx_width: "#"^imm is ImmR & ImmS [ imm = ImmS - ImmR + 1; ] { export *[const]:4 imm; } # C6.2.213 LSL (immediate) page C6-1654 line 98214 MATCH x53000000/mask=x7f800000 # C6.2.216 LSR (immediate) page C6-1660 line 98490 MATCH x53007c00/mask=x7f807c00 # C6.2.384 UBFIZ page C6-1986 line 116416 MATCH x53000000/mask=x7f800000 # C6.2.385 UBFM page C6-1988 line 116507 MATCH x53000000/mask=x7f800000 # C6.2.386 UBFX page C6-1991 line 116651 MATCH x53000000/mask=x7f800000 # C6.2.394 UXTB page C6-2002 line 117228 MATCH x53001c00/mask=xfffffc00 # C6.2.395 UXTH page C6-2003 line 117288 MATCH x53003c00/mask=xfffffc00 # CONSTRUCT x53000012/mask=xffe0801e MATCHED 7 DOCUMENTED OPCODES # AUNIT --inst x53000012/mask=xffe0801e --status pass # Alias for ubfm where imms+1=immr and imms != '011111' # if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); :lsl Rd_GPR32, Rn_GPR32, LSB_bitfield32_imm_shift is ImmR=ImmS+1 & ImmS_ne_1f=1 & ImmS_LT_ImmR_minus_1=0 & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=1 & sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & LSB_bitfield32_imm_shift & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local tmp:4 = Rn_GPR32 << LSB_bitfield32_imm_shift; Rd_GPR64 = zext(tmp); } # C6.2.213 LSL (immediate) page C6-1654 line 98214 MATCH x53000000/mask=x7f800000 # C6.2.216 LSR (immediate) page C6-1660 line 98490 MATCH x53007c00/mask=x7f807c00 # C6.2.384 UBFIZ page C6-1986 line 116416 MATCH x53000000/mask=x7f800000 # C6.2.385 UBFM page C6-1988 line 116507 MATCH x53000000/mask=x7f800000 # C6.2.386 UBFX page C6-1991 line 116651 MATCH x53000000/mask=x7f800000 # CONSTRUCT xd3400022/mask=xffc0002e MATCHED 5 DOCUMENTED OPCODES # AUNIT --inst xd3400022/mask=xffc0002e --status pass # Alias for ubfm where imms+1=immr and imms != '111111' :lsl Rd_GPR64, Rn_GPR64, LSB_bitfield64_imm_shift is ImmR=ImmS+1 & ImmS_ne_3f=1 & ImmS_LT_ImmR_minus_1=0 & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=1 & sf=1 & opc=2 & b_2428=0x13 & b_2323=0 & n=1 & LSB_bitfield64_imm_shift & Rn_GPR64 & Rd_GPR64 { Rd_GPR64 = Rn_GPR64 << LSB_bitfield64_imm_shift; } # C6.2.215 LSR (register) page C6-1658 line 98396 MATCH x1ac02400/mask=x7fe0fc00 # C6.2.217 LSRV page C6-1662 line 98580 MATCH x1ac02400/mask=x7fe0fc00 # CONSTRUCT x1ac02400/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x1ac02400/mask=xffe0fc00 --status pass :lsr Rd_GPR32, Rn_GPR32, Rm_GPR32 is sf=0 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR32 & b_1015=0x9 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { shiftval:8 = zext(Rm_GPR32 & 0x1f); tmp_1:4 = Rn_GPR32 >> shiftval; Rd_GPR64 = zext(tmp_1); } # C6.2.215 LSR (register) page C6-1658 line 98396 MATCH x1ac02400/mask=x7fe0fc00 # C6.2.217 LSRV page C6-1662 line 98580 MATCH x1ac02400/mask=x7fe0fc00 # CONSTRUCT x9ac02400/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9ac02400/mask=xffe0fc00 --status pass :lsr Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR64 & b_1015=0x9 & Rn_GPR64 & Rd_GPR64 { shiftval:8 = Rm_GPR64 & 0x3f; tmp_1:8 = Rn_GPR64 >> shiftval; Rd_GPR64 = tmp_1; } # C6.2.216 LSR (immediate) page C6-1660 line 98490 MATCH x53007c00/mask=x7f807c00 # C6.2.213 LSL (immediate) page C6-1654 line 98214 MATCH x53000000/mask=x7f800000 # C6.2.384 UBFIZ page C6-1986 line 116416 MATCH x53000000/mask=x7f800000 # C6.2.385 UBFM page C6-1988 line 116507 MATCH x53000000/mask=x7f800000 # C6.2.386 UBFX page C6-1991 line 116651 MATCH x53000000/mask=x7f800000 # CONSTRUCT x53007c00/mask=xffe0fc1a MATCHED 5 DOCUMENTED OPCODES # AUNIT --inst x53007c00/mask=xffe0fc1a --status pass # Alias for ubfm where imms=='011111' # imms is MAX_INT5, so it will never be less than immr. Note that immr is limited to [0,31] # if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); :lsr Rd_GPR32, Rn_GPR32, ImmRConst32 is ImmS=0x1f & ImmS_ne_1f=0 & ImmS_LT_ImmR=0 & ImmS_LT_ImmR_minus_1=0 & sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = ImmRConst32; tmp_1:4 = Rn_GPR32 >> tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.216 LSR (immediate) page C6-1660 line 98490 MATCH x53007c00/mask=x7f807c00 # C6.2.213 LSL (immediate) page C6-1654 line 98214 MATCH x53000000/mask=x7f800000 # C6.2.384 UBFIZ page C6-1986 line 116416 MATCH x53000000/mask=x7f800000 # C6.2.385 UBFM page C6-1988 line 116507 MATCH x53000000/mask=x7f800000 # C6.2.386 UBFX page C6-1991 line 116651 MATCH x53000000/mask=x7f800000 # CONSTRUCT xd340fc00/mask=xffc0fc2a MATCHED 5 DOCUMENTED OPCODES # AUNIT --inst xd340fc00/mask=xffc0fc2a --status pass # Alias for ubfm where imms=='111111' # imms is MAX_INT6, so it will never be less than immr. :lsr Rd_GPR64, Rn_GPR64, ImmRConst64 is ImmS=0x3f & ImmS_ne_3f=0 & ImmS_LT_ImmR=0 & ImmS_LT_ImmR_minus_1=0 & sf=1 & opc=2 & b_2428=0x13 & b_2323=0 & n=1 & ImmRConst64 & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = ImmRConst64; tmp_1:8 = Rn_GPR64 >> tmp_2; Rd_GPR64 = tmp_1; } # C6.2.218 MADD page C6-1664 line 98671 MATCH x1b000000/mask=x7fe08000 # C6.2.232 MUL page C6-1691 line 100073 MATCH x1b007c00/mask=x7fe0fc00 # CONSTRUCT x1b000000/mask=xffe08000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x1b000000/mask=xffe08000 --status pass :madd Rd_GPR32, Rn_GPR32, Rm_GPR32, Ra_GPR32 is sf=0 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR32 & op.dp3_o0=0 & Ra_GPR32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = Rn_GPR32 * Rm_GPR32; addflags(Ra_GPR32, tmp_2); tmp_1:4 = Ra_GPR32 + tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.218 MADD page C6-1664 line 98671 MATCH x1b000000/mask=x7fe08000 # C6.2.232 MUL page C6-1691 line 100073 MATCH x1b007c00/mask=x7fe0fc00 # CONSTRUCT x9b000000/mask=xffe08000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9b000000/mask=xffe08000 --status pass :madd Rd_GPR64, Rn_GPR64, Rm_GPR64, Ra_GPR64 is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR64 & op.dp3_o0=0 & Ra_GPR64 & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = Rn_GPR64 * Rm_GPR64; tmp_1:8 = Ra_GPR64 + tmp_2; Rd_GPR64 = tmp_1; } # C6.2.219 MNEG page C6-1666 line 98782 MATCH x1b00fc00/mask=x7fe0fc00 # C6.2.231 MSUB page C6-1689 line 99963 MATCH x1b008000/mask=x7fe08000 # CONSTRUCT x9b00fc00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9b00fc00/mask=xffe0fc00 --status pass :mneg Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR64 & op.dp3_o0=1 & Ra=0x1f & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = Rn_GPR64 * Rm_GPR64; tmp_1:8 = -tmp_2; Rd_GPR64 = tmp_1; } # C6.2.219 MNEG page C6-1666 line 98782 MATCH x1b00fc00/mask=x7fe0fc00 # C6.2.231 MSUB page C6-1689 line 99963 MATCH x1b008000/mask=x7fe08000 # CONSTRUCT x1b00fc00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x1b00fc00/mask=xffe0fc00 --status pass :mneg Rd_GPR32, Rn_GPR32, Rm_GPR32 is sf=0 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR32 & op.dp3_o0=1 & Ra=0x1f & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = Rn_GPR32 * Rm_GPR32; tmp_1:4 = -tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.220 MOV (to/from SP) page C6-1668 line 98876 MATCH x11000000/mask=x7ffffc00 # C6.2.4 ADD (immediate) page C6-1151 line 68228 MATCH x11000000/mask=x7f800000 # CONSTRUCT x11000000/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x11000000/mask=xfffffc00 --status pass :mov Rd_GPR32xsp, Rn_GPR32xsp is sf=0 & b_30=0 & S=0 & b_2428=0x011 & (aa_Xn=31 | aa_Xd=31) & shift=0 & imm12=0 & Rn_GPR32xsp & Rd_GPR32xsp & Rd_GPR64xsp { Rd_GPR64xsp = zext(Rn_GPR32xsp); } # C6.2.220 MOV (to/from SP) page C6-1668 line 98876 MATCH x11000000/mask=x7ffffc00 # C6.2.4 ADD (immediate) page C6-1151 line 68228 MATCH x11000000/mask=x7f800000 # CONSTRUCT x91000000/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x91000000/mask=xfffffc00 --status pass :mov Rd_GPR64xsp, Rn_GPR64xsp is sf=1 & b_30=0 & S=0 & b_2428=0x11 & (aa_Xn=31 | aa_Xd=31) & shift=0 & imm12=0 & Rn_GPR64xsp & Rd_GPR64xsp { Rd_GPR64xsp = Rn_GPR64xsp; } # C6.2.166 MOV (inverted wide immediate) page C6-793 line 46366 KEEPWITH FullImm_movz32_imm: "#"^val is imm16 & aa_hw=0 [ val = (imm16 << 0) & 0xffffffff; ] { export *[const]:8 val; } FullImm_movz32_imm: "#"^val is imm16 & aa_hw=1 [ val = (imm16 << 16) & 0xffffffff; ] { export *[const]:8 val; } FullImm_movz64_imm: "#"^val is imm16 & aa_hw [ val = imm16 << (aa_hw * 16); ] { export *[const]:8 val; } FullImm_movn32_imm: "#"^val is imm16 & aa_hw=0 [ val = ~(imm16 << 0) & 0xffffffff; ] { export *[const]:8 val; } FullImm_movn32_imm: "#"^val is imm16 & aa_hw=1 [ val = ~(imm16 << 16) & 0xffffffff; ] { export *[const]:8 val; } FullImm_movn64_imm: "#"^val is imm16 & aa_hw [ val = ~(imm16 << (aa_hw * 16)); ] { export *[const]:8 val; } FullImm_movk32_mask: mask is aa_hw [ mask = (~(0xffff << (aa_hw * 16))) & 0xffffffff; ] { export *[const]:4 mask; } FullImm_movk32_shift: tmp is imm16 & aa_hw [ tmp = (imm16 << (aa_hw * 16)) & 0xffffffff; ] { export *[const]:4 tmp; } FullImm_movk32_imm: "#"^imm16 is imm16 & aa_hw=0 { export *[const]:4 imm16; } FullImm_movk32_imm: "#"^imm16, "LSL #16" is imm16 & aa_hw=1 & FullImm_movk32_shift { export FullImm_movk32_shift; } FullImm_movk64_mask: mask is aa_hw [ mask = ~(0xffff << (aa_hw * 16)); ] { export *[const]:8 mask; } FullImm_movk64_shift: tmp is imm16 & aa_hw [ tmp = (imm16 << (aa_hw * 16)); ] { export *[const]:8 tmp; } FullImm_movk64_imm: "#"^imm16 is imm16 & aa_hw=0 { export *[const]:8 imm16; } FullImm_movk64_imm: "#"^imm16, "LSL #16" is imm16 & aa_hw=1 & FullImm_movk64_shift { export FullImm_movk64_shift; } FullImm_movk64_imm: "#"^imm16, "LSL #32" is imm16 & aa_hw=2 & FullImm_movk64_shift { export FullImm_movk64_shift; } FullImm_movk64_imm: "#"^imm16, "LSL #48" is imm16 & aa_hw=3 & FullImm_movk64_shift { export FullImm_movk64_shift; } # C6.2.221 MOV (inverted wide immediate) page C6-1669 line 98943 MATCH x12800000/mask=x7f800000 # C6.2.226 MOVN page C6-1679 line 99388 MATCH x12800000/mask=x7f800000 # CONSTRUCT x12800000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x12800000/mask=xff800000 --status pass :mov Rd_GPR32, FullImm_movn32_imm is sf=0 & opc=0 & b_2428=0x12 & b_2323=1 & FullImm_movn32_imm & Rd_GPR32 & Rd_GPR64 { # Special case MOVN Rd_GPR64 = FullImm_movn32_imm; } # C6.2.221 MOV (inverted wide immediate) page C6-1669 line 98943 MATCH x12800000/mask=x7f800000 # C6.2.226 MOVN page C6-1679 line 99388 MATCH x12800000/mask=x7f800000 # CONSTRUCT x92800000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x92800000/mask=xff800000 --status pass :mov Rd_GPR64, FullImm_movn64_imm is sf=1 & opc=0 & b_2428=0x12 & b_2323=1 & FullImm_movn64_imm & Rd_GPR64 { # Special case MOVN Rd_GPR64 = FullImm_movn64_imm; } # C6.2.222 MOV (wide immediate) page C6-1671 line 99035 MATCH x52800000/mask=x7f800000 # C6.2.227 MOVZ page C6-1681 line 99489 MATCH x52800000/mask=x7f800000 # CONSTRUCT x52800000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x52800000/mask=xff800000 --status pass :mov Rd_GPR32, FullImm_movz32_imm is sf=0 & opc=2 & b_2428=0x12 & b_2323=1 & FullImm_movz32_imm & Rd_GPR32 & Rd_GPR64 { Rd_GPR64 = FullImm_movz32_imm; } # C6.2.222 MOV (wide immediate) page C6-1671 line 99035 MATCH x52800000/mask=x7f800000 # C6.2.227 MOVZ page C6-1681 line 99489 MATCH x52800000/mask=x7f800000 # CONSTRUCT xd2800000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd2800000/mask=xff800000 --status pass :mov Rd_GPR64, FullImm_movz64_imm is sf=1 & opc=2 & b_2428=0x12 & b_2323=1 & FullImm_movz64_imm & Rd_GPR64 { Rd_GPR64 = FullImm_movz64_imm; } # C6.2.223 MOV (bitmask immediate) page C6-1673 line 99125 MATCH x320003e0/mask=x7f8003e0 # C6.2.240 ORR (immediate) page C6-1705 line 100779 MATCH x32000000/mask=x7f800000 # CONSTRUCT x320003e0/mask=xffe0ffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x320003e0/mask=xffe0ffe0 --status pass :mov Rd_GPR32wsp, DecodeWMask32 is sf=0 & opc=1 & b_2428=0x12 & b_2223=0 & N=0 & imm6=0 & DecodeWMask32 & aa_Xn=31 & Rd_GPR32wsp & Rd_GPR64xsp { # special case ORR tmp_1:4 = DecodeWMask32; Rd_GPR64xsp = zext(tmp_1); } # C6.2.223 MOV (bitmask immediate) page C6-1673 line 99125 MATCH x320003e0/mask=x7f8003e0 # C6.2.240 ORR (immediate) page C6-1705 line 100779 MATCH x32000000/mask=x7f800000 # CONSTRUCT xb20003e0/mask=xffc0ffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xb20003e0/mask=xffc0ffe0 --status pass :mov Rd_GPR64xsp, DecodeWMask64 is sf=1 & opc=1 & b_2428=0x12 & b_2223=0 & imm6=0 & DecodeWMask64 & aa_Xn=31 & Rd_GPR64xsp { # special case of ORR tmp_1:8 = DecodeWMask64; Rd_GPR64xsp = tmp_1; } # C6.2.224 MOV (register) page C6-1675 line 99214 MATCH x2a0003e0/mask=x7fe0ffe0 # C6.2.241 ORR (shifted register) page C6-1707 line 100882 MATCH x2a000000/mask=x7f200000 # CONSTRUCT x2a0003e0/mask=xff2003e0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x2a0003e0/mask=xff2003e0 --status pass :mov Rd_GPR32, RegShift32Log is b_31=0 & b_2430=0b0101010 & b_21=0 & b_0509=0b11111 & RegShift32Log & Rd_GPR32 & Rd_GPR64 { # special case ORR tmp_1:4 = RegShift32Log; Rd_GPR64 = zext(tmp_1); } # C6.2.224 MOV (register) page C6-1675 line 99214 MATCH x2a0003e0/mask=x7fe0ffe0 # C6.2.241 ORR (shifted register) page C6-1707 line 100882 MATCH x2a000000/mask=x7f200000 # CONSTRUCT xaa0003e0/mask=xff2003e0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xaa0003e0/mask=xff2003e0 --status pass :mov Rd_GPR64, RegShift64Log is b_31=1 & b_2430=0b0101010 & b_21=0 & b_0509=0b11111 & RegShift64Log & Rd_GPR64 { # special case of ORR tmp_1:8 = RegShift64Log; Rd_GPR64 = tmp_1; } # C6.2.225 MOVK page C6-1677 line 99301 MATCH x72800000/mask=x7f800000 # CONSTRUCT x72800000/mask=xff800000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x72800000/mask=xff800000 --status pass :movk Rd_GPR32, FullImm_movk32_imm is sf=0 & opc=3 & b_2428=0x12 & b_2323=1 & FullImm_movk32_imm & Rd_GPR32 & Rd_GPR64 & FullImm_movk32_mask { local tmp:4 = Rd_GPR32 & FullImm_movk32_mask; tmp = tmp | FullImm_movk32_imm; Rd_GPR64 = zext(tmp); } # C6.2.225 MOVK page C6-1677 line 99301 MATCH x72800000/mask=x7f800000 # CONSTRUCT xf2800000/mask=xff800000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf2800000/mask=xff800000 --status pass :movk Rd_GPR64, FullImm_movk64_imm is sf=1 & opc=3 & b_2428=0x12 & b_2323=1 & FullImm_movk64_imm & Rd_GPR64 & FullImm_movk64_mask { Rd_GPR64 = Rd_GPR64 & FullImm_movk64_mask; Rd_GPR64 = Rd_GPR64 | FullImm_movk64_imm; } # C6.2.173 MRS page C6-802 line 46877 MATCH KEEPWITH with : (l=0 | l=1) { CopReg: spsr_el1 is Op0=3 & Op1_uimm3=0 & CRn=4 & CRm=0 & Op2_uimm3=0 & spsr_el1 { export spsr_el1; } CopReg: elr_el1 is Op0=3 & Op1_uimm3=0 & CRn=4 & CRm=0 & Op2_uimm3=1 & elr_el1 { export elr_el1; } CopReg: sp_el0 is Op0=3 & Op1_uimm3=0 & CRn=4 & CRm=1 & Op2_uimm3=0 & sp_el0 { export sp_el0; } CopReg: spsel is Op0=3 & Op1_uimm3=0 & CRn=4 & CRm=2 & Op2_uimm3=0 & spsel { export spsel; } CopReg: daif is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=2 & Op2_uimm3=1 & daif { export daif; } CopReg: currentel is Op0=3 & Op1_uimm3=0 & CRn=4 & CRm=2 & Op2_uimm3=2 & currentel { export currentel; } CopReg: nzcv is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=2 & Op2_uimm3=0 & nzcv { export nzcv; } CopReg: fpcr is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=4 & Op2_uimm3=0 & fpcr { export fpcr; } CopReg: fpsr is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=4 & Op2_uimm3=1 & fpsr { export fpsr; } CopReg: dspsr_el0 is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=5 & Op2_uimm3=0 & dspsr_el0 { export dspsr_el0; } CopReg: dlr_el0 is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=5 & Op2_uimm3=1 & dlr_el0 { export dlr_el0; } CopReg: spsr_el2 is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=0 & Op2_uimm3=0 & spsr_el2 { export spsr_el2; } CopReg: elr_el2 is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=0 & Op2_uimm3=1 & elr_el2 { export elr_el2; } CopReg: sp_el1 is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=1 & Op2_uimm3=0 & sp_el1 { export sp_el1; } CopReg: spsr_irq is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=3 & Op2_uimm3=0 & spsr_irq { export spsr_irq; } CopReg: spsr_abt is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=3 & Op2_uimm3=1 & spsr_abt { export spsr_abt; } CopReg: spsr_und is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=3 & Op2_uimm3=2 & spsr_und { export spsr_und; } CopReg: spsr_fiq is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=3 & Op2_uimm3=3 & spsr_fiq { export spsr_fiq; } CopReg: spsr_el3 is Op0=3 & Op1_uimm3=6 & CRn=4 & CRm=0 & Op2_uimm3=0 & spsr_el3 { export spsr_el3; } CopReg: elr_el3 is Op0=3 & Op1_uimm3=6 & CRn=4 & CRm=0 & Op2_uimm3=1 & elr_el3 { export elr_el3; } CopReg: sp_el2 is Op0=3 & Op1_uimm3=6 & CRn=4 & CRm=1 & Op2_uimm3=0 & sp_el2 { export sp_el2; } # CopReg: spsr_svc is Op0=3 & Op1_uimm3=0 & CRn=4 & CRm=0 & Op2_uimm3=0 & spsr_svc { export spsr_svc; } # CopReg: spsr_hyp is Op0=3 & Op1_uimm3=4 & CRn=4 & CRm=0 & Op2_uimm3=0 & spsr_hyp { export spsr_hyp; } CopReg: midr_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=0 & midr_el1 { export midr_el1; } CopReg: mpidr_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=5 & mpidr_el1 { export mpidr_el1; } CopReg: revidr_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=6 & revidr_el1 { export revidr_el1; } CopReg: id_dfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=2 & id_dfr0_el1 { export id_dfr0_el1; } CopReg: id_pfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=0 & id_pfr0_el1 { export id_pfr0_el1; } CopReg: id_pfr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=1 & id_pfr1_el1 { export id_pfr1_el1; } CopReg: id_afr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=3 & id_afr0_el1 { export id_afr0_el1; } CopReg: id_mmfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=4 & id_mmfr0_el1 { export id_mmfr0_el1; } CopReg: id_mmfr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=5 & id_mmfr1_el1 { export id_mmfr1_el1; } CopReg: id_mmfr2_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=6 & id_mmfr2_el1 { export id_mmfr2_el1; } CopReg: id_mmfr3_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=7 & id_mmfr3_el1 { export id_mmfr3_el1; } CopReg: id_isar0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=0 & id_isar0_el1 { export id_isar0_el1; } CopReg: id_isar1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=1 & id_isar1_el1 { export id_isar1_el1; } CopReg: id_isar2_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=2 & id_isar2_el1 { export id_isar2_el1; } CopReg: id_isar3_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=3 & id_isar3_el1 { export id_isar3_el1; } CopReg: id_isar4_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=4 & id_isar4_el1 { export id_isar4_el1; } CopReg: id_isar5_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=5 & id_isar5_el1 { export id_isar5_el1; } CopReg: mvfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=0 & mvfr0_el1 { export mvfr0_el1; } CopReg: mvfr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=1 & mvfr1_el1 { export mvfr1_el1; } CopReg: mvfr2_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=2 & mvfr2_el1 { export mvfr2_el1; } CopReg: ccsidr_el1 is Op0=3 & Op1_uimm3=1 & CRn=0 & CRm=0 & Op2_uimm3=0 & ccsidr_el1 { export ccsidr_el1; } CopReg: id_aa64pfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=4 & Op2_uimm3=0 & id_aa64pfr0_el1 { export id_aa64pfr0_el1; } CopReg: id_aa64pfr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=4 & Op2_uimm3=1 & id_aa64pfr1_el1 { export id_aa64pfr1_el1; } CopReg: id_aa64dfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=0 & id_aa64dfr0_el1 { export id_aa64dfr0_el1; } CopReg: id_aa64dfr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=1 & id_aa64dfr1_el1 { export id_aa64dfr1_el1; } CopReg: id_aa64isar0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=0 & id_aa64isar0_el1 { export id_aa64isar0_el1; } CopReg: id_aa64isar1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=1 & id_aa64isar1_el1 { export id_aa64isar1_el1; } CopReg: id_aa64mmfr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=7 & Op2_uimm3=0 & id_aa64mmfr0_el1 { export id_aa64mmfr0_el1; } CopReg: id_aa64mmfr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=7 & Op2_uimm3=1 & id_aa64mmfr1_el1 { export id_aa64mmfr1_el1; } CopReg: id_aa64afr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=4 & id_aa64afr0_el1 { export id_aa64afr0_el1; } CopReg: id_aa64afr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=5 & id_aa64afr1_el1 { export id_aa64afr1_el1; } CopReg: clidr_el1 is Op0=3 & Op1_uimm3=1 & CRn=0 & CRm=0 & Op2_uimm3=1 & clidr_el1 { export clidr_el1; } CopReg: aidr_el1 is Op0=3 & Op1_uimm3=1 & CRn=0 & CRm=0 & Op2_uimm3=7 & aidr_el1 { export aidr_el1; } CopReg: csselr_el1 is Op0=3 & Op1_uimm3=2 & CRn=0 & CRm=0 & Op2_uimm3=0 & csselr_el1 { export csselr_el1; } CopReg: ctr_el0 is Op0=3 & Op1_uimm3=3 & CRn=0 & CRm=0 & Op2_uimm3=1 & ctr_el0 { export ctr_el0; } CopReg: dczid_el0 is Op0=3 & Op1_uimm3=3 & CRn=0 & CRm=0 & Op2_uimm3=7 & dczid_el0 { export dczid_el0; } CopReg: vpidr_el2 is Op0=3 & Op1_uimm3=4 & CRn=0 & CRm=0 & Op2_uimm3=0 & vpidr_el2 { export vpidr_el2; } CopReg: vmpidr_el2 is Op0=3 & Op1_uimm3=4 & CRn=0 & CRm=0 & Op2_uimm3=5 & vmpidr_el2 { export vmpidr_el2; } CopReg: sctlr_el1 is Op0=3 & Op1_uimm3=0 & CRn=1 & CRm=0 & Op2_uimm3=0 & sctlr_el1 { export sctlr_el1; } CopReg: actlr_el1 is Op0=3 & Op1_uimm3=0 & CRn=1 & CRm=0 & Op2_uimm3=1 & actlr_el1 { export actlr_el1; } CopReg: cpacr_el1 is Op0=3 & Op1_uimm3=0 & CRn=1 & CRm=0 & Op2_uimm3=2 & cpacr_el1 { export cpacr_el1; } CopReg: sctlr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=0 & Op2_uimm3=0 & sctlr_el2 { export sctlr_el2; } CopReg: actlr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=0 & Op2_uimm3=1 & actlr_el2 { export actlr_el2; } CopReg: hcr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=1 & Op2_uimm3=0 & hcr_el2 { export hcr_el2; } CopReg: mdcr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=1 & Op2_uimm3=1 & mdcr_el2 { export mdcr_el2; } CopReg: cptr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=1 & Op2_uimm3=2 & cptr_el2 { export cptr_el2; } CopReg: hstr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=1 & Op2_uimm3=3 & hstr_el2 { export hstr_el2; } CopReg: hacr_el2 is Op0=3 & Op1_uimm3=4 & CRn=1 & CRm=1 & Op2_uimm3=7 & hacr_el2 { export hacr_el2; } CopReg: sctlr_el3 is Op0=3 & Op1_uimm3=6 & CRn=1 & CRm=0 & Op2_uimm3=0 & sctlr_el3 { export sctlr_el3; } CopReg: actlr_el3 is Op0=3 & Op1_uimm3=6 & CRn=1 & CRm=0 & Op2_uimm3=1 & actlr_el3 { export actlr_el3; } CopReg: scr_el3 is Op0=3 & Op1_uimm3=6 & CRn=1 & CRm=1 & Op2_uimm3=0 & scr_el3 { export scr_el3; } CopReg: cptr_el3 is Op0=3 & Op1_uimm3=6 & CRn=1 & CRm=1 & Op2_uimm3=2 & cptr_el3 { export cptr_el3; } CopReg: mdcr_el3 is Op0=3 & Op1_uimm3=6 & CRn=1 & CRm=3 & Op2_uimm3=1 & mdcr_el3 { export mdcr_el3; } CopReg: ttbr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=2 & CRm=0 & Op2_uimm3=0 & ttbr0_el1 { export ttbr0_el1; } CopReg: ttbr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=2 & CRm=0 & Op2_uimm3=1 & ttbr1_el1 { export ttbr1_el1; } CopReg: ttbr0_el2 is Op0=3 & Op1_uimm3=4 & CRn=2 & CRm=0 & Op2_uimm3=0 & ttbr0_el2 { export ttbr0_el2; } CopReg: ttbr0_el3 is Op0=3 & Op1_uimm3=6 & CRn=2 & CRm=0 & Op2_uimm3=0 & ttbr0_el3 { export ttbr0_el3; } CopReg: vttbr_el2 is Op0=3 & Op1_uimm3=4 & CRn=2 & CRm=1 & Op2_uimm3=0 & vttbr_el2 { export vttbr_el2; } CopReg: tcr_el1 is Op0=3 & Op1_uimm3=0 & CRn=2 & CRm=0 & Op2_uimm3=2 & tcr_el1 { export tcr_el1; } CopReg: tcr_el2 is Op0=3 & Op1_uimm3=4 & CRn=2 & CRm=0 & Op2_uimm3=2 & tcr_el2 { export tcr_el2; } CopReg: tcr_el3 is Op0=3 & Op1_uimm3=6 & CRn=2 & CRm=0 & Op2_uimm3=2 & tcr_el3 { export tcr_el3; } CopReg: vtcr_el2 is Op0=3 & Op1_uimm3=4 & CRn=2 & CRm=1 & Op2_uimm3=2 & vtcr_el2 { export vtcr_el2; } CopReg: afsr0_el1 is Op0=3 & Op1_uimm3=0 & CRn=5 & CRm=1 & Op2_uimm3=0 & afsr0_el1 { export afsr0_el1; } CopReg: afsr1_el1 is Op0=3 & Op1_uimm3=0 & CRn=5 & CRm=1 & Op2_uimm3=1 & afsr1_el1 { export afsr1_el1; } CopReg: afsr0_el2 is Op0=3 & Op1_uimm3=4 & CRn=5 & CRm=1 & Op2_uimm3=0 & afsr0_el2 { export afsr0_el2; } CopReg: afsr1_el2 is Op0=3 & Op1_uimm3=4 & CRn=5 & CRm=1 & Op2_uimm3=1 & afsr1_el2 { export afsr1_el2; } CopReg: afsr0_el3 is Op0=3 & Op1_uimm3=6 & CRn=5 & CRm=1 & Op2_uimm3=0 & afsr0_el3 { export afsr0_el3; } CopReg: afsr1_el3 is Op0=3 & Op1_uimm3=6 & CRn=5 & CRm=1 & Op2_uimm3=1 & afsr1_el3 { export afsr1_el3; } CopReg: esr_el1 is Op0=3 & Op1_uimm3=0 & CRn=5 & CRm=2 & Op2_uimm3=0 & esr_el1 { export esr_el1; } CopReg: esr_el2 is Op0=3 & Op1_uimm3=4 & CRn=5 & CRm=2 & Op2_uimm3=0 & esr_el2 { export esr_el2; } CopReg: esr_el3 is Op0=3 & Op1_uimm3=6 & CRn=5 & CRm=2 & Op2_uimm3=0 & esr_el3 { export esr_el3; } CopReg: fpexc32_el2 is Op0=3 & Op1_uimm3=4 & CRn=5 & CRm=3 & Op2_uimm3=0 & fpexc32_el2 { export fpexc32_el2; } CopReg: far_el1 is Op0=3 & Op1_uimm3=0 & CRn=6 & CRm=0 & Op2_uimm3=0 & far_el1 { export far_el1; } CopReg: far_el2 is Op0=3 & Op1_uimm3=4 & CRn=6 & CRm=0 & Op2_uimm3=0 & far_el2 { export far_el2; } CopReg: far_el3 is Op0=3 & Op1_uimm3=6 & CRn=6 & CRm=0 & Op2_uimm3=0 & far_el3 { export far_el3; } CopReg: hpfar_el2 is Op0=3 & Op1_uimm3=4 & CRn=6 & CRm=0 & Op2_uimm3=4 & hpfar_el2 { export hpfar_el2; } CopReg: par_el1 is Op0=3 & Op1_uimm3=0 & CRn=7 & CRm=4 & Op2_uimm3=0 & par_el1 { export par_el1; } CopReg: pmintenset_el1 is Op0=3 & Op1_uimm3=0 & CRn=9 & CRm=14 & Op2_uimm3=1 & pmintenset_el1 { export pmintenset_el1; } CopReg: pmintenclr_el1 is Op0=3 & Op1_uimm3=0 & CRn=9 & CRm=14 & Op2_uimm3=2 & pmintenclr_el1 { export pmintenclr_el1; } CopReg: pmcr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=0 & pmcr_el0 { export pmcr_el0; } CopReg: pmcntenset_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=1 & pmcntenset_el0 { export pmcntenset_el0; } CopReg: pmcntenclr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=2 & pmcntenclr_el0 { export pmcntenclr_el0; } CopReg: pmovsclr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=3 & pmovsclr_el0 { export pmovsclr_el0; } CopReg: pmswinc_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=4 & pmswinc_el0 { export pmswinc_el0; } CopReg: pmselr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=5 & pmselr_el0 { export pmselr_el0; } CopReg: pmceid0_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=6 & pmceid0_el0 { export pmceid0_el0; } CopReg: pmceid1_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=12 & Op2_uimm3=7 & pmceid1_el0 { export pmceid1_el0; } CopReg: pmccntr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=13 & Op2_uimm3=0 & pmccntr_el0 { export pmccntr_el0; } CopReg: pmxevtyper_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=13 & Op2_uimm3=1 & pmxevtyper_el0 { export pmxevtyper_el0; } CopReg: pmxevcntr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=13 & Op2_uimm3=2 & pmxevcntr_el0 { export pmxevcntr_el0; } CopReg: pmuserenr_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=14 & Op2_uimm3=0 & pmuserenr_el0 { export pmuserenr_el0; } CopReg: pmovsset_el0 is Op0=3 & Op1_uimm3=3 & CRn=9 & CRm=14 & Op2_uimm3=3 & pmovsset_el0 { export pmovsset_el0; } CopReg: pmevcntr0_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=0 & pmevcntr0_el0 { export pmevcntr0_el0; } CopReg: pmevcntr1_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=1 & pmevcntr1_el0 { export pmevcntr1_el0; } CopReg: pmevcntr2_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=2 & pmevcntr2_el0 { export pmevcntr2_el0; } CopReg: pmevcntr3_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=3 & pmevcntr3_el0 { export pmevcntr3_el0; } CopReg: pmevcntr4_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=4 & pmevcntr4_el0 { export pmevcntr4_el0; } CopReg: pmevcntr5_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=5 & pmevcntr5_el0 { export pmevcntr5_el0; } CopReg: pmevcntr6_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=6 & pmevcntr6_el0 { export pmevcntr6_el0; } CopReg: pmevcntr7_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=8 & Op2_uimm3=7 & pmevcntr7_el0 { export pmevcntr7_el0; } CopReg: pmevcntr8_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=0 & pmevcntr8_el0 { export pmevcntr8_el0; } CopReg: pmevcntr9_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=1 & pmevcntr9_el0 { export pmevcntr9_el0; } CopReg: pmevcntr10_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=2 & pmevcntr10_el0 { export pmevcntr10_el0; } CopReg: pmevcntr11_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=3 & pmevcntr11_el0 { export pmevcntr11_el0; } CopReg: pmevcntr12_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=4 & pmevcntr12_el0 { export pmevcntr12_el0; } CopReg: pmevcntr13_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=5 & pmevcntr13_el0 { export pmevcntr13_el0; } CopReg: pmevcntr14_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=6 & pmevcntr14_el0 { export pmevcntr14_el0; } CopReg: pmevcntr15_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=9 & Op2_uimm3=7 & pmevcntr15_el0 { export pmevcntr15_el0; } CopReg: pmevcntr16_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=0 & pmevcntr16_el0 { export pmevcntr16_el0; } CopReg: pmevcntr17_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=1 & pmevcntr17_el0 { export pmevcntr17_el0; } CopReg: pmevcntr18_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=2 & pmevcntr18_el0 { export pmevcntr18_el0; } CopReg: pmevcntr19_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=3 & pmevcntr19_el0 { export pmevcntr19_el0; } CopReg: pmevcntr20_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=4 & pmevcntr20_el0 { export pmevcntr20_el0; } CopReg: pmevcntr21_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=5 & pmevcntr21_el0 { export pmevcntr21_el0; } CopReg: pmevcntr22_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=6 & pmevcntr22_el0 { export pmevcntr22_el0; } CopReg: pmevcntr23_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=10 & Op2_uimm3=7 & pmevcntr23_el0 { export pmevcntr23_el0; } CopReg: pmevcntr24_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=0 & pmevcntr24_el0 { export pmevcntr24_el0; } CopReg: pmevcntr25_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=1 & pmevcntr25_el0 { export pmevcntr25_el0; } CopReg: pmevcntr26_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=2 & pmevcntr26_el0 { export pmevcntr26_el0; } CopReg: pmevcntr27_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=3 & pmevcntr27_el0 { export pmevcntr27_el0; } CopReg: pmevcntr28_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=4 & pmevcntr28_el0 { export pmevcntr28_el0; } CopReg: pmevcntr29_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=5 & pmevcntr29_el0 { export pmevcntr29_el0; } CopReg: pmevcntr30_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=11 & Op2_uimm3=6 & pmevcntr30_el0 { export pmevcntr30_el0; } CopReg: pmevtyper0_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=0 & pmevtyper0_el0 { export pmevtyper0_el0; } CopReg: pmevtyper1_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=1 & pmevtyper1_el0 { export pmevtyper1_el0; } CopReg: pmevtyper2_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=2 & pmevtyper2_el0 { export pmevtyper2_el0; } CopReg: pmevtyper3_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=3 & pmevtyper3_el0 { export pmevtyper3_el0; } CopReg: pmevtyper4_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=4 & pmevtyper4_el0 { export pmevtyper4_el0; } CopReg: pmevtyper5_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=5 & pmevtyper5_el0 { export pmevtyper5_el0; } CopReg: pmevtyper6_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=6 & pmevtyper6_el0 { export pmevtyper6_el0; } CopReg: pmevtyper7_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=12 & Op2_uimm3=7 & pmevtyper7_el0 { export pmevtyper7_el0; } CopReg: pmevtyper8_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=0 & pmevtyper8_el0 { export pmevtyper8_el0; } CopReg: pmevtyper9_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=1 & pmevtyper9_el0 { export pmevtyper9_el0; } CopReg: pmevtyper10_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=2 & pmevtyper10_el0 { export pmevtyper10_el0; } CopReg: pmevtyper11_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=3 & pmevtyper11_el0 { export pmevtyper11_el0; } CopReg: pmevtyper12_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=4 & pmevtyper12_el0 { export pmevtyper12_el0; } CopReg: pmevtyper13_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=5 & pmevtyper13_el0 { export pmevtyper13_el0; } CopReg: pmevtyper14_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=6 & pmevtyper14_el0 { export pmevtyper14_el0; } CopReg: pmevtyper15_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=13 & Op2_uimm3=7 & pmevtyper15_el0 { export pmevtyper15_el0; } CopReg: pmevtyper16_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=0 & pmevtyper16_el0 { export pmevtyper16_el0; } CopReg: pmevtyper17_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=1 & pmevtyper17_el0 { export pmevtyper17_el0; } CopReg: pmevtyper18_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=2 & pmevtyper18_el0 { export pmevtyper18_el0; } CopReg: pmevtyper19_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=3 & pmevtyper19_el0 { export pmevtyper19_el0; } CopReg: pmevtyper20_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=4 & pmevtyper20_el0 { export pmevtyper20_el0; } CopReg: pmevtyper21_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=5 & pmevtyper21_el0 { export pmevtyper21_el0; } CopReg: pmevtyper22_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=6 & pmevtyper22_el0 { export pmevtyper22_el0; } CopReg: pmevtyper23_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=14 & Op2_uimm3=7 & pmevtyper23_el0 { export pmevtyper23_el0; } CopReg: pmevtyper24_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=0 & pmevtyper24_el0 { export pmevtyper24_el0; } CopReg: pmevtyper25_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=1 & pmevtyper25_el0 { export pmevtyper25_el0; } CopReg: pmevtyper26_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=2 & pmevtyper26_el0 { export pmevtyper26_el0; } CopReg: pmevtyper27_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=3 & pmevtyper27_el0 { export pmevtyper27_el0; } CopReg: pmevtyper28_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=4 & pmevtyper28_el0 { export pmevtyper28_el0; } CopReg: pmevtyper29_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=5 & pmevtyper29_el0 { export pmevtyper29_el0; } CopReg: pmevtyper30_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=6 & pmevtyper30_el0 { export pmevtyper30_el0; } CopReg: pmccfiltr_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=15 & Op2_uimm3=7 & pmccfiltr_el0 { export pmccfiltr_el0; } CopReg: mair_el1 is Op0=3 & Op1_uimm3=0 & CRn=10 & CRm=2 & Op2_uimm3=0 & mair_el1 { export mair_el1; } CopReg: mair_el2 is Op0=3 & Op1_uimm3=4 & CRn=10 & CRm=2 & Op2_uimm3=0 & mair_el2 { export mair_el2; } CopReg: mair_el3 is Op0=3 & Op1_uimm3=6 & CRn=10 & CRm=2 & Op2_uimm3=0 & mair_el3 { export mair_el3; } CopReg: amair_el1 is Op0=3 & Op1_uimm3=0 & CRn=10 & CRm=3 & Op2_uimm3=0 & amair_el1 { export amair_el1; } CopReg: amair_el2 is Op0=3 & Op1_uimm3=4 & CRn=10 & CRm=3 & Op2_uimm3=0 & amair_el2 { export amair_el2; } CopReg: amair_el3 is Op0=3 & Op1_uimm3=6 & CRn=10 & CRm=3 & Op2_uimm3=0 & amair_el3 { export amair_el3; } CopReg: vbar_el1 is Op0=3 & Op1_uimm3=0 & CRn=12 & CRm=0 & Op2_uimm3=0 & vbar_el1 { export vbar_el1; } CopReg: vbar_el2 is Op0=3 & Op1_uimm3=4 & CRn=12 & CRm=0 & Op2_uimm3=0 & vbar_el2 { export vbar_el2; } CopReg: vbar_el3 is Op0=3 & Op1_uimm3=6 & CRn=12 & CRm=0 & Op2_uimm3=0 & vbar_el3 { export vbar_el3; } CopReg: rvbar_el1 is Op0=3 & Op1_uimm3=0 & CRn=12 & CRm=0 & Op2_uimm3=1 & rvbar_el1 { export rvbar_el1; } CopReg: rvbar_el2 is Op0=3 & Op1_uimm3=4 & CRn=12 & CRm=0 & Op2_uimm3=1 & rvbar_el2 { export rvbar_el2; } CopReg: rvbar_el3 is Op0=3 & Op1_uimm3=6 & CRn=12 & CRm=0 & Op2_uimm3=1 & rvbar_el3 { export rvbar_el3; } CopReg: rmr_el1 is Op0=3 & Op1_uimm3=0 & CRn=12 & CRm=0 & Op2_uimm3=2 & rmr_el1 { export rmr_el1; } CopReg: rmr_el2 is Op0=3 & Op1_uimm3=4 & CRn=12 & CRm=0 & Op2_uimm3=2 & rmr_el2 { export rmr_el2; } CopReg: rmr_el3 is Op0=3 & Op1_uimm3=6 & CRn=12 & CRm=0 & Op2_uimm3=2 & rmr_el3 { export rmr_el3; } CopReg: isr_el1 is Op0=3 & Op1_uimm3=0 & CRn=12 & CRm=1 & Op2_uimm3=0 & isr_el1 { export isr_el1; } CopReg: contextidr_el1 is Op0=3 & Op1_uimm3=0 & CRn=13 & CRm=0 & Op2_uimm3=1 & contextidr_el1 { export contextidr_el1; } CopReg: tpidr_el0 is Op0=3 & Op1_uimm3=3 & CRn=13 & CRm=0 & Op2_uimm3=2 & tpidr_el0 { export tpidr_el0; } CopReg: tpidrro_el0 is Op0=3 & Op1_uimm3=3 & CRn=13 & CRm=0 & Op2_uimm3=3 & tpidrro_el0 { export tpidrro_el0; } CopReg: tpidr_el1 is Op0=3 & Op1_uimm3=0 & CRn=13 & CRm=0 & Op2_uimm3=4 & tpidr_el1 { export tpidr_el1; } CopReg: tpidr_el2 is Op0=3 & Op1_uimm3=4 & CRn=13 & CRm=0 & Op2_uimm3=2 & tpidr_el2 { export tpidr_el2; } CopReg: tpidr_el3 is Op0=3 & Op1_uimm3=6 & CRn=13 & CRm=0 & Op2_uimm3=2 & tpidr_el3 { export tpidr_el3; } CopReg: teecr32_el1 is Op0=2 & Op1_uimm3=2 & CRn=0 & CRm=0 & Op2_uimm3=0 & teecr32_el1 { export teecr32_el1; } CopReg: cntfrq_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=0 & Op2_uimm3=0 & cntfrq_el0 { export cntfrq_el0; } CopReg: cntpct_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=0 & Op2_uimm3=1 & cntpct_el0 { export cntpct_el0; } CopReg: cntvct_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=0 & Op2_uimm3=2 & cntvct_el0 { export cntvct_el0; } CopReg: cntvoff_el2 is Op0=3 & Op1_uimm3=4 & CRn=14 & CRm=0 & Op2_uimm3=3 & cntvoff_el2 { export cntvoff_el2; } CopReg: cntkctl_el1 is Op0=3 & Op1_uimm3=0 & CRn=14 & CRm=1 & Op2_uimm3=0 & cntkctl_el1 { export cntkctl_el1; } CopReg: cnthctl_el2 is Op0=3 & Op1_uimm3=4 & CRn=14 & CRm=1 & Op2_uimm3=0 & cnthctl_el2 { export cnthctl_el2; } CopReg: cntp_tval_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=2 & Op2_uimm3=0 & cntp_tval_el0 { export cntp_tval_el0; } CopReg: cntp_ctl_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=2 & Op2_uimm3=1 & cntp_ctl_el0 { export cntp_ctl_el0; } CopReg: cntp_cval_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=2 & Op2_uimm3=2 & cntp_cval_el0 { export cntp_cval_el0; } CopReg: cntv_tval_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=3 & Op2_uimm3=0 & cntv_tval_el0 { export cntv_tval_el0; } CopReg: cntv_ctl_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=3 & Op2_uimm3=1 & cntv_ctl_el0 { export cntv_ctl_el0; } CopReg: cntv_cval_el0 is Op0=3 & Op1_uimm3=3 & CRn=14 & CRm=3 & Op2_uimm3=2 & cntv_cval_el0 { export cntv_cval_el0; } CopReg: cnthp_tval_el2 is Op0=3 & Op1_uimm3=4 & CRn=14 & CRm=2 & Op2_uimm3=0 & cnthp_tval_el2 { export cnthp_tval_el2; } CopReg: cnthp_ctl_el2 is Op0=3 & Op1_uimm3=4 & CRn=14 & CRm=2 & Op2_uimm3=1 & cnthp_ctl_el2 { export cnthp_ctl_el2; } CopReg: cnthp_cval_el2 is Op0=3 & Op1_uimm3=4 & CRn=14 & CRm=2 & Op2_uimm3=2 & cnthp_cval_el2 { export cnthp_cval_el2; } CopReg: cntps_tval_el1 is Op0=3 & Op1_uimm3=7 & CRn=14 & CRm=2 & Op2_uimm3=0 & cntps_tval_el1 { export cntps_tval_el1; } CopReg: cntps_ctl_el1 is Op0=3 & Op1_uimm3=7 & CRn=14 & CRm=2 & Op2_uimm3=1 & cntps_ctl_el1 { export cntps_ctl_el1; } CopReg: cntps_cval_el1 is Op0=3 & Op1_uimm3=7 & CRn=14 & CRm=2 & Op2_uimm3=2 & cntps_cval_el1 { export cntps_cval_el1; } CopReg: dacr32_el2 is Op0=3 & Op1_uimm3=4 & CRn=3 & CRm=0 & Op2_uimm3=0 & dacr32_el2 { export dacr32_el2; } CopReg: ifsr32_el2 is Op0=3 & Op1_uimm3=4 & CRn=5 & CRm=0 & Op2_uimm3=1 & ifsr32_el2 { export ifsr32_el2; } CopReg: teehbr32_el1 is Op0=2 & Op1_uimm3=2 & CRn=1 & CRm=0 & Op2_uimm3=0 & teehbr32_el1 { export teehbr32_el1; } CopReg: sder32_el3 is Op0=3 & Op1_uimm3=6 & CRn=1 & CRm=1 & Op2_uimm3=1 & sder32_el3 { export sder32_el3; } CopReg: osdtrrx_el1 is Op0=3 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=2 & osdtrrx_el1 { export osdtrrx_el1; } CopReg: mdccint_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=0 & mdccint_el1 { export mdccint_el1; } CopReg: mdscr_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=2 & mdscr_el1 { export mdscr_el1; } CopReg: osdtrtx_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=2 & osdtrtx_el1 { export osdtrtx_el1; } CopReg: oseccr_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=2 & oseccr_el1 { export oseccr_el1; } CopReg: dbgbvr0_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=4 & dbgbvr0_el1 { export dbgbvr0_el1; } CopReg: dbgbvr1_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=4 & dbgbvr1_el1 { export dbgbvr1_el1; } CopReg: dbgbvr2_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=4 & dbgbvr2_el1 { export dbgbvr2_el1; } CopReg: dbgbvr3_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=4 & dbgbvr3_el1 { export dbgbvr3_el1; } CopReg: dbgbvr4_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=4 & Op2_uimm3=4 & dbgbvr4_el1 { export dbgbvr4_el1; } CopReg: dbgbvr5_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=4 & dbgbvr5_el1 { export dbgbvr5_el1; } CopReg: dbgbvr6_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=4 & dbgbvr6_el1 { export dbgbvr6_el1; } CopReg: dbgbvr7_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=7 & Op2_uimm3=4 & dbgbvr7_el1 { export dbgbvr7_el1; } CopReg: dbgbvr8_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=8 & Op2_uimm3=4 & dbgbvr8_el1 { export dbgbvr8_el1; } CopReg: dbgbvr9_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=9 & Op2_uimm3=4 & dbgbvr9_el1 { export dbgbvr9_el1; } CopReg: dbgbvr10_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=10 & Op2_uimm3=4 & dbgbvr10_el1 { export dbgbvr10_el1; } CopReg: dbgbvr11_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=11 & Op2_uimm3=4 & dbgbvr11_el1 { export dbgbvr11_el1; } CopReg: dbgbvr12_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=12 & Op2_uimm3=4 & dbgbvr12_el1 { export dbgbvr12_el1; } CopReg: dbgbvr13_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=13 & Op2_uimm3=4 & dbgbvr13_el1 { export dbgbvr13_el1; } CopReg: dbgbvr14_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=14 & Op2_uimm3=4 & dbgbvr14_el1 { export dbgbvr14_el1; } CopReg: dbgbvr15_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=15 & Op2_uimm3=4 & dbgbvr15_el1 { export dbgbvr15_el1; } CopReg: dbgbcr0_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=5 & dbgbcr0_el1 { export dbgbcr0_el1; } CopReg: dbgbcr1_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=5 & dbgbcr1_el1 { export dbgbcr1_el1; } CopReg: dbgbcr2_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=5 & dbgbcr2_el1 { export dbgbcr2_el1; } CopReg: dbgbcr3_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=5 & dbgbcr3_el1 { export dbgbcr3_el1; } CopReg: dbgbcr4_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=4 & Op2_uimm3=5 & dbgbcr4_el1 { export dbgbcr4_el1; } CopReg: dbgbcr5_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=5 & dbgbcr5_el1 { export dbgbcr5_el1; } CopReg: dbgbcr6_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=5 & dbgbcr6_el1 { export dbgbcr6_el1; } CopReg: dbgbcr7_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=7 & Op2_uimm3=5 & dbgbcr7_el1 { export dbgbcr7_el1; } CopReg: dbgbcr8_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=8 & Op2_uimm3=5 & dbgbcr8_el1 { export dbgbcr8_el1; } CopReg: dbgbcr9_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=9 & Op2_uimm3=5 & dbgbcr9_el1 { export dbgbcr9_el1; } CopReg: dbgbcr10_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=10 & Op2_uimm3=5 & dbgbcr10_el1 { export dbgbcr10_el1; } CopReg: dbgbcr11_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=11 & Op2_uimm3=5 & dbgbcr11_el1 { export dbgbcr11_el1; } CopReg: dbgbcr12_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=12 & Op2_uimm3=5 & dbgbcr12_el1 { export dbgbcr12_el1; } CopReg: dbgbcr13_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=13 & Op2_uimm3=5 & dbgbcr13_el1 { export dbgbcr13_el1; } CopReg: dbgbcr14_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=14 & Op2_uimm3=5 & dbgbcr14_el1 { export dbgbcr14_el1; } CopReg: dbgbcr15_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=15 & Op2_uimm3=5 & dbgbcr15_el1 { export dbgbcr15_el1; } CopReg: dbgwvr0_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=6 & dbgwvr0_el1 { export dbgwvr0_el1; } CopReg: dbgwvr1_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=6 & dbgwvr1_el1 { export dbgwvr1_el1; } CopReg: dbgwvr2_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=6 & dbgwvr2_el1 { export dbgwvr2_el1; } CopReg: dbgwvr3_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=6 & dbgwvr3_el1 { export dbgwvr3_el1; } CopReg: dbgwvr4_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=4 & Op2_uimm3=6 & dbgwvr4_el1 { export dbgwvr4_el1; } CopReg: dbgwvr5_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=6 & dbgwvr5_el1 { export dbgwvr5_el1; } CopReg: dbgwvr6_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=6 & dbgwvr6_el1 { export dbgwvr6_el1; } CopReg: dbgwvr7_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=7 & Op2_uimm3=6 & dbgwvr7_el1 { export dbgwvr7_el1; } CopReg: dbgwvr8_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=8 & Op2_uimm3=6 & dbgwvr8_el1 { export dbgwvr8_el1; } CopReg: dbgwvr9_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=9 & Op2_uimm3=6 & dbgwvr9_el1 { export dbgwvr9_el1; } CopReg: dbgwvr10_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=10 & Op2_uimm3=6 & dbgwvr10_el1 { export dbgwvr10_el1; } CopReg: dbgwvr11_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=11 & Op2_uimm3=6 & dbgwvr11_el1 { export dbgwvr11_el1; } CopReg: dbgwvr12_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=12 & Op2_uimm3=6 & dbgwvr12_el1 { export dbgwvr12_el1; } CopReg: dbgwvr13_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=13 & Op2_uimm3=6 & dbgwvr13_el1 { export dbgwvr13_el1; } CopReg: dbgwvr14_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=14 & Op2_uimm3=6 & dbgwvr14_el1 { export dbgwvr14_el1; } CopReg: dbgwvr15_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=15 & Op2_uimm3=6 & dbgwvr15_el1 { export dbgwvr15_el1; } CopReg: dbgwcr0_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=0 & Op2_uimm3=7 & dbgwcr0_el1 { export dbgwcr0_el1; } CopReg: dbgwcr1_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=1 & Op2_uimm3=7 & dbgwcr1_el1 { export dbgwcr1_el1; } CopReg: dbgwcr2_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=2 & Op2_uimm3=7 & dbgwcr2_el1 { export dbgwcr2_el1; } CopReg: dbgwcr3_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=3 & Op2_uimm3=7 & dbgwcr3_el1 { export dbgwcr3_el1; } CopReg: dbgwcr4_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=4 & Op2_uimm3=7 & dbgwcr4_el1 { export dbgwcr4_el1; } CopReg: dbgwcr5_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=5 & Op2_uimm3=7 & dbgwcr5_el1 { export dbgwcr5_el1; } CopReg: dbgwcr6_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=6 & Op2_uimm3=7 & dbgwcr6_el1 { export dbgwcr6_el1; } CopReg: dbgwcr7_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=7 & Op2_uimm3=7 & dbgwcr7_el1 { export dbgwcr7_el1; } CopReg: dbgwcr8_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=8 & Op2_uimm3=7 & dbgwcr8_el1 { export dbgwcr8_el1; } CopReg: dbgwcr9_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=9 & Op2_uimm3=7 & dbgwcr9_el1 { export dbgwcr9_el1; } CopReg: dbgwcr10_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=10 & Op2_uimm3=7 & dbgwcr10_el1 { export dbgwcr10_el1; } CopReg: dbgwcr11_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=11 & Op2_uimm3=7 & dbgwcr11_el1 { export dbgwcr11_el1; } CopReg: dbgwcr12_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=12 & Op2_uimm3=7 & dbgwcr12_el1 { export dbgwcr12_el1; } CopReg: dbgwcr13_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=13 & Op2_uimm3=7 & dbgwcr13_el1 { export dbgwcr13_el1; } CopReg: dbgwcr14_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=14 & Op2_uimm3=7 & dbgwcr14_el1 { export dbgwcr14_el1; } CopReg: dbgwcr15_el1 is Op0=2 & Op1_uimm3=0 & CRn=0 & CRm=15 & Op2_uimm3=7 & dbgwcr15_el1 { export dbgwcr15_el1; } CopReg: mdrar_el1 is Op0=2 & Op1_uimm3=0 & CRn=1 & CRm=0 & Op2_uimm3=0 & mdrar_el1 { export mdrar_el1; } CopReg: oslar_el1 is Op0=2 & Op1_uimm3=0 & CRn=1 & CRm=0 & Op2_uimm3=4 & oslar_el1 { export oslar_el1; } CopReg: oslsr_el1 is Op0=2 & Op1_uimm3=0 & CRn=1 & CRm=1 & Op2_uimm3=4 & oslsr_el1 { export oslsr_el1; } CopReg: osdlr_el1 is Op0=2 & Op1_uimm3=0 & CRn=1 & CRm=3 & Op2_uimm3=4 & osdlr_el1 { export osdlr_el1; } CopReg: dbgprcr_el1 is Op0=2 & Op1_uimm3=0 & CRn=1 & CRm=4 & Op2_uimm3=4 & dbgprcr_el1 { export dbgprcr_el1; } CopReg: dbgclaimset_el1 is Op0=2 & Op1_uimm3=0 & CRn=7 & CRm=8 & Op2_uimm3=6 & dbgclaimset_el1 { export dbgclaimset_el1; } CopReg: dbgclaimclr_el1 is Op0=2 & Op1_uimm3=0 & CRn=7 & CRm=9 & Op2_uimm3=6 & dbgclaimclr_el1 { export dbgclaimclr_el1; } CopReg: dbgauthstatus_el1 is Op0=2 & Op1_uimm3=0 & CRn=7 & CRm=14 & Op2_uimm3=6 & dbgauthstatus_el1 { export dbgauthstatus_el1; } CopReg: mdccsr_el0 is Op0=2 & Op1_uimm3=3 & CRn=0 & CRm=1 & Op2_uimm3=0 & mdccsr_el0 { export mdccsr_el0; } CopReg: dbgdtr_el0 is Op0=2 & Op1_uimm3=3 & CRn=0 & CRm=4 & Op2_uimm3=0 & dbgdtr_el0 { export dbgdtr_el0; } CopReg: dbgvcr32_el2 is Op0=2 & Op1_uimm3=4 & CRn=0 & CRm=7 & Op2_uimm3=0 & dbgvcr32_el2 { export dbgvcr32_el2; } # The SysReg document implies that GMID_EL1 can only be read - the doc only provides pseudocode for read access. # However, the register is in this block (without a required value for 'l') because that might not be fully accurate. CopReg: gmid_el1 is Op0=3 & Op1_uimm3=1 & CRn=0 & CRm=0 & Op2_uimm3=4 & gmid_el1 { export gmid_el1; } CopReg: ssbs is Op0=3 & Op1_uimm3=3 & CRn=4 & CRm=2 & Op2_uimm3=6 & ssbs { export ssbs; } } # with : (l=0 | l=1) { CopReg: dbgdtrrx_el0 is l=0 & Op0=2 & Op1_uimm3=3 & CRn=0 & CRm=5 & Op2_uimm3=0 & dbgdtrrx_el0 { export dbgdtrrx_el0; } CopReg: dbgdtrtx_el0 is l=1 & Op0=2 & Op1_uimm3=3 & CRn=0 & CRm=5 & Op2_uimm3=0 & dbgdtrtx_el0 { export dbgdtrtx_el0; } CopReg: "sreg("^Op0^", "^Op1_uimm3^", c"^CRn^", c"^CRm^", "^Op2_uimm3^")" is l=1 & Op0 & Op1_uimm3 & CRn & CRm & Op2_uimm3 { tmp:8 = UnkSytemRegRead(Op0:1, Op1_uimm3:1, CRn:1, CRm:1, Op2_uimm3:1); export tmp; } CopReg: "sreg("^Op0^", "^Op1_uimm3^", c"^CRn^", c"^CRm^", "^Op2_uimm3^")" is l=0 & Op0 & Op1_uimm3 & CRn & CRm & Op2_uimm3 & Rt_GPR64 { tmp:8 = UnkSytemRegWrite(Op0:1, Op1_uimm3:1, CRn:1, CRm:1, Op2_uimm3:1, Rt_GPR64); export tmp; } PState_pstate_op: "DAIFSet" is Op1_uimm3=3 & Op2_uimm3=6 & CRm { daif = (CRm << 6) | daif; } PState_pstate_op: "DAIFClr" is Op1_uimm3=3 & Op2_uimm3=7 & CRm { tmp:8 = CRm; daif = (~(tmp << 6)) & daif; } PState_pstate_op: "PState.UAO" is Op1_uimm3=0 & Op2_uimm3=3 & CRm { tmp:8 = CRm; uao = tmp & 1; } PState_pstate_op: "PState.PAN" is Op1_uimm3=0 & Op2_uimm3=4 & CRm { tmp:8 = CRm; pan = tmp & 1; } PState_pstate_op: "PState.SP" is Op1_uimm3=0 & Op2_uimm3=5 & CRm { tmp:8 = CRm; spsel = tmp & 1; } PState_pstate_op: "PState.TCO" is Op1_uimm3=3 & Op2_uimm3=4 & CRm { tmp:8 = CRm; tco = tmp & 1; } PState_pstate_op: "PState.ALLINT" is Op1_uimm3=1 & Op2_uimm3=0 & b_0911=0 & CRm { tmp:8 = CRm; allint = tmp & 1; } PState_pstate_op: "PState.DIT" is Op1_uimm3=3 & Op2_uimm3=2 & CRm { tmp:8 = CRm; dit = tmp & 1; } #PState_pstate_op: "PState.SVCRSM" is Op1_uimm3=3 & Op2_uimm3=3 & b_0911=1 & b_08 { tmp:8 = b_08; svcrsm = tmp & 1; } # see SMSTART/SMSTOP #PState_pstate_op: "PState.SVCRZA" is Op1_uimm3=3 & Op2_uimm3=3 & b_0911=2 & b_08 { tmp:8 = b_08; svcrza = tmp & 1; } # see SMSTART/SMSTOP #PState_pstate_op: "PState.SVZRMZA" is Op1_uimm3=3 & Op2_uimm3=3 & b_0911=3 & b_08 { tmp:8 = b_08; svcrsmza = tmp & 1; } # see SMSTART/SMSTOP PState_pstate_op: "PState.SSBS" is Op1_uimm3=3 & Op2_uimm3=1 & CRm { tmp:8 = CRm; ssbs = tmp & 1; } # C6.2.228 MRS page C6-1683 line 99588 MATCH xd5300000/mask=xfff00000 # C6.2.379 TSTART page C6-1979 line 116075 MATCH xd5233060/mask=xffffffe0 # C6.2.380 TTEST page C6-1981 line 116175 MATCH xd5233160/mask=xffffffe0 # CONSTRUCT xd5200000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd5200000/mask=xffe00000 --status noqemu :mrs Rt_GPR64, CopReg is b_2431=0xd5 & b_2223=0 & l=1 & CopReg & Rt_GPR64 { Rt_GPR64 = CopReg; } # C6.2.229 MSR (immediate) page C6-1684 line 99649 MATCH xd500401f/mask=xfff8f01f # CONSTRUCT xd500401f/mask=xfff8f01f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd500401f/mask=xfff8f01f --status nodest :msr PState_pstate_op, CRm_uimm4 is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & PState_pstate_op & CRn=0x4 & CRm_uimm4 & Rt=0x1f { } # CONSTRUCT xd5000000/mask=xffe00000 DID NOT MATCH ANY DOCUMENTED OPCODE # AUNIT --inst xd5000000/mask=xffe00000 --status noqemu :msr CopReg, Rt_GPR64 is b_2431=0xd5 & b_2223=0 & l=0 & CopReg & Rt_GPR64 { CopReg = Rt_GPR64; } # C6.2.231 MSUB page C6-1689 line 99963 MATCH x1b008000/mask=x7fe08000 # C6.2.219 MNEG page C6-1666 line 98782 MATCH x1b00fc00/mask=x7fe0fc00 # CONSTRUCT x1b008000/mask=xffe08000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x1b008000/mask=xffe08000 --status pass :msub Rd_GPR32, Rn_GPR32, Rm_GPR32, Ra_GPR32 is sf=0 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR32 & op.dp3_o0=1 & Ra_GPR32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = Rn_GPR32 * Rm_GPR32; tmp_1:4 = Ra_GPR32 - tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.231 MSUB page C6-1689 line 99963 MATCH x1b008000/mask=x7fe08000 # C6.2.219 MNEG page C6-1666 line 98782 MATCH x1b00fc00/mask=x7fe0fc00 # CONSTRUCT x9b008000/mask=xffe08000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9b008000/mask=xffe08000 --status pass :msub Rd_GPR64, Rn_GPR64, Rm_GPR64, Ra_GPR64 is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR64 & op.dp3_o0=1 & Ra_GPR64 & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = Rn_GPR64 * Rm_GPR64; tmp_1:8 = Ra_GPR64 - tmp_2; Rd_GPR64 = tmp_1; } # C6.2.232 MUL page C6-1691 line 100073 MATCH x1b007c00/mask=x7fe0fc00 # C6.2.218 MADD page C6-1664 line 98671 MATCH x1b000000/mask=x7fe08000 # CONSTRUCT x1b007c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x1b007c00/mask=xffe0fc00 --status pass :mul Rd_GPR32, Rn_GPR32, Rm_GPR32 is sf=0 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR32 & op.dp3_o0=0 & Ra=0x1f & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = Rn_GPR32 * Rm_GPR32; Rd_GPR64 = zext(tmp_2); } # C6.2.232 MUL page C6-1691 line 100073 MATCH x1b007c00/mask=x7fe0fc00 # C6.2.218 MADD page C6-1664 line 98671 MATCH x1b000000/mask=x7fe08000 # CONSTRUCT x9b007c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9b007c00/mask=xffe0fc00 --status pass :mul Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=0 & Rm_GPR64 & op.dp3_o0=0 & Ra=0x1f & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = Rn_GPR64 * Rm_GPR64; Rd_GPR64 = tmp_2; } # C6.2.233 MVN page C6-1692 line 100146 MATCH x2a2003e0/mask=x7f2003e0 # C6.2.239 ORN (shifted register) page C6-1703 line 100663 MATCH x2a200000/mask=x7f200000 # CONSTRUCT x2a2003e0/mask=xff2003e0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x2a2003e0/mask=xff2003e0 --status pass :mvn Rd_GPR32, RegShift32Log is sf=0 & opc=1 & b_2428=0xa & N=1 & RegShift32Log & Rn=0x1f & Rd_GPR32 & Rd_GPR64 { tmp_1:4 = ~RegShift32Log; Rd_GPR64 = zext(tmp_1); } # C6.2.233 MVN page C6-1692 line 100146 MATCH x2a2003e0/mask=x7f2003e0 # C6.2.239 ORN (shifted register) page C6-1703 line 100663 MATCH x2a200000/mask=x7f200000 # CONSTRUCT xaa2003e0/mask=xff2003e0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xaa2003e0/mask=xff2003e0 --status pass :mvn Rd_GPR64, RegShift64Log is sf=1 & opc=1 & b_2428=0xa & N=1 & Rm_GPR64 & RegShift64Log & Rn=0x1f & Rd_GPR64 { tmp_1:8 = ~RegShift64Log; Rd_GPR64 = tmp_1; } # C6.2.234 NEG (shifted register) page C6-1694 line 100243 MATCH x4b0003e0/mask=x7f2003e0 # C6.2.358 SUB (shifted register) page C6-1945 line 114221 MATCH x4b000000/mask=x7f200000 # CONSTRUCT x4b0003e0/mask=xff2003e0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x4b0003e0/mask=xff2003e0 --status pass :neg Rd_GPR32, RegShift32 is sf=0 & op=1 & s=0 & b_2428=0xb & b_2121=0 & RegShift32 & Rn=0x1f & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = RegShift32; tmp_1:4 = - tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.234 NEG (shifted register) page C6-1694 line 100243 MATCH x4b0003e0/mask=x7f2003e0 # C6.2.358 SUB (shifted register) page C6-1945 line 114221 MATCH x4b000000/mask=x7f200000 # CONSTRUCT xcb0003e0/mask=xff2003e0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xcb0003e0/mask=xff2003e0 --status pass :neg Rd_GPR64, RegShift64 is sf=1 & op=1 & s=0 & b_2428=0xb & b_2121=0 & RegShift64 & Rn=0x1f & Rd_GPR64 { tmp_2:8 = RegShift64; tmp_1:8 = - tmp_2; Rd_GPR64 = tmp_1; } # C6.2.235 NEGS page C6-1696 line 100340 MATCH x6b0003e0/mask=x7f2003e0 # C6.2.64 CMP (shifted register) page C6-1256 line 73623 MATCH x6b00001f/mask=x7f20001f # C6.2.364 SUBS (shifted register) page C6-1955 line 114807 MATCH x6b000000/mask=x7f200000 # CONSTRUCT x6b0003e0/mask=xff2003e0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x6b0003e0/mask=xff2003e0 --status pass --comment "flags" :negs Rd_GPR32, RegShift32 is sf=0 & op=1 & s=1 & b_2428=0xb & b_2121=0 & RegShift32 & Rn=0x1f & Rd_GPR32 & Rd & Rd_GPR64 { tmp_2:4 = RegShift32; subflags0(tmp_2); tmp_1:4 = 0:4 - tmp_2; resultflags(tmp_1); Rd_GPR64 = zext(tmp_1); affectflags(); } # C6.2.235 NEGS page C6-1696 line 100340 MATCH x6b0003e0/mask=x7f2003e0 # C6.2.64 CMP (shifted register) page C6-1256 line 73623 MATCH x6b00001f/mask=x7f20001f # C6.2.364 SUBS (shifted register) page C6-1955 line 114807 MATCH x6b000000/mask=x7f200000 # CONSTRUCT xeb0003e0/mask=xff2003e0 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xeb0003e0/mask=xff2003e0 --status pass --comment "flags" :negs Rd_GPR64, RegShift64 is sf=1 & op=1 & s=1 & b_2428=0xb & b_2121=0 & RegShift64 & Rn=0x1f & Rd_GPR64 & Rd { tmp_2:8 = RegShift64; subflags0(tmp_2); tmp_1:8 = 0:8 - tmp_2; resultflags(tmp_1); Rd_GPR64 = tmp_1; affectflags(); } # C6.2.236 NGC page C6-1698 line 100437 MATCH x5a0003e0/mask=x7fe0ffe0 # C6.2.265 SBC page C6-1747 line 102973 MATCH x5a000000/mask=x7fe0fc00 # CONSTRUCT x5a0003e0/mask=xffe0ffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x5a0003e0/mask=xffe0ffe0 --status pass --comment "flags" :ngc Rd_GPR32, Rm_GPR32 is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=0 & Rm_GPR32 & opcode2=0x0 & Rn=0x1f & Rd_GPR32 & Rd_GPR64 { tmp:4 = Rm_GPR32 + zext(!CY); Rd_GPR64 = zext(-tmp); } # C6.2.236 NGC page C6-1698 line 100437 MATCH x5a0003e0/mask=x7fe0ffe0 # C6.2.265 SBC page C6-1747 line 102973 MATCH x5a000000/mask=x7fe0fc00 # CONSTRUCT xda0003e0/mask=xffe0ffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xda0003e0/mask=xffe0ffe0 --status pass --comment "flags" :ngc Rd_GPR64, Rm_GPR64 is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=0 & Rm_GPR64 & opcode2=0x0 & Rn=0x1f & Rd_GPR64 { tmp:8 = Rm_GPR64 + zext(!CY); Rd_GPR64 = -tmp; } # C6.2.237 NGCS page C6-1700 line 100524 MATCH x7a0003e0/mask=x7fe0ffe0 # C6.2.266 SBCS page C6-1749 line 103074 MATCH x7a000000/mask=x7fe0fc00 # CONSTRUCT x7a0003e0/mask=xffe0ffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x7a0003e0/mask=xffe0ffe0 --status pass --comment "flags" :ngcs Rd_GPR32, Rm_GPR32 is sf=0 & op=1 & s=1 & b_2428=0x1a & b_2123=0 & Rn=0x1f & opcode2=0x0 & Rm_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp:4 = Rm_GPR32 + zext(!CY); add_with_carry_flags(0,~tmp); Rd_GPR64 = zext(-tmp); resultflags(Rd_GPR32); affectflags(); } # C6.2.237 NGCS page C6-1700 line 100524 MATCH x7a0003e0/mask=x7fe0ffe0 # C6.2.266 SBCS page C6-1749 line 103074 MATCH x7a000000/mask=x7fe0fc00 # CONSTRUCT xfa0003e0/mask=xffe0ffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xfa0003e0/mask=xffe0ffe0 --status pass --comment "flags" :ngcs Rd_GPR64, Rm_GPR64 is sf=1 & op=1 & s=1 & b_2428=0x1a & b_2123=0 & Rn=0x1f & opcode2=0x0 & Rm_GPR64 & Rd_GPR64 { tmp:8 = Rm_GPR64 + zext(!CY); add_with_carry_flags(0,~tmp); Rd_GPR64 = -tmp; resultflags(Rd_GPR64); affectflags(); } # C6.2.238 NOP page C6-1702 line 100611 MATCH xd503201f/mask=xffffffff # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503201f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503201f/mask=xffffffff --status nodest :nop is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low=0 & Rt=0x1f { } # C6.2.239 ORN (shifted register) page C6-1703 line 100663 MATCH x2a200000/mask=x7f200000 # C6.2.233 MVN page C6-1692 line 100146 MATCH x2a2003e0/mask=x7f2003e0 # CONSTRUCT x2a200000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x2a200000/mask=xff200000 --status pass :orn Rd_GPR32, Rn_GPR32, RegShift32Log is sf=0 & opc=1 & b_2428=0xa & N=1 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_3:4 = RegShift32Log; tmp_2:4 = tmp_3 ^ -1:4; tmp_1:4 = Rn_GPR32 | tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.239 ORN (shifted register) page C6-1703 line 100663 MATCH x2a200000/mask=x7f200000 # C6.2.233 MVN page C6-1692 line 100146 MATCH x2a2003e0/mask=x7f2003e0 # CONSTRUCT xaa200000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xaa200000/mask=xff200000 --status pass :orn Rd_GPR64, Rn_GPR64, RegShift64Log is sf=1 & opc=1 & b_2428=0xa & N=1 & RegShift64Log & Rn_GPR64 & Rd_GPR64 { tmp_3:8= RegShift64Log; tmp_2:8 = tmp_3 ^ -1:8; tmp_1:8 = Rn_GPR64 | tmp_2; Rd_GPR64 = tmp_1; } # C6.2.240 ORR (immediate) page C6-1705 line 100779 MATCH x32000000/mask=x7f800000 # C6.2.223 MOV (bitmask immediate) page C6-1673 line 99125 MATCH x320003e0/mask=x7f8003e0 # CONSTRUCT x32000000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x32000000/mask=xff800000 --status pass :orr Rd_GPR32wsp, Rn_GPR32, DecodeWMask32 is sf=0 & opc=1 & b_2428=0x12 & b_2323=0 & DecodeWMask32 & Rn_GPR32 & Rd_GPR32wsp & Rd_GPR64xsp { tmp_1:4 = Rn_GPR32 | DecodeWMask32; Rd_GPR64xsp = zext(tmp_1); } # C6.2.240 ORR (immediate) page C6-1705 line 100779 MATCH x32000000/mask=x7f800000 # C6.2.223 MOV (bitmask immediate) page C6-1673 line 99125 MATCH x320003e0/mask=x7f8003e0 # CONSTRUCT xb2000000/mask=xff800000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xb2000000/mask=xff800000 --status pass :orr Rd_GPR64xsp, Rn_GPR64, DecodeWMask64 is sf=1 & opc=1 & b_2428=0x12 & b_2323=0 & DecodeWMask64 & Rn_GPR64 & Rd_GPR64xsp { tmp_1:8 = Rn_GPR64 | DecodeWMask64; Rd_GPR64xsp = tmp_1; } # C6.2.241 ORR (shifted register) page C6-1707 line 100882 MATCH x2a000000/mask=x7f200000 # C6.2.224 MOV (register) page C6-1675 line 99214 MATCH x2a0003e0/mask=x7fe0ffe0 # CONSTRUCT x2a000000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x2a000000/mask=xff200000 --status pass :orr Rd_GPR32, Rn_GPR32, RegShift32Log is b_31=0 & b_2430=0b0101010 & b_21=0 & RegShift32Log & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = RegShift32Log; tmp_1:4 = Rn_GPR32 | tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.241 ORR (shifted register) page C6-1707 line 100882 MATCH x2a000000/mask=x7f200000 # C6.2.224 MOV (register) page C6-1675 line 99214 MATCH x2a0003e0/mask=x7fe0ffe0 # CONSTRUCT xaa000000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xaa000000/mask=xff200000 --status pass :orr Rd_GPR64, Rn_GPR64, RegShift64Log is b_31=1 & b_2430=0b0101010 & b_21=0 & RegShift64Log & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = RegShift64Log; tmp_1:8 = Rn_GPR64 | tmp_2; Rd_GPR64 = tmp_1; } # C6.2.242 PACDA, PACDZA page C6-1709 line 100996 MATCH xdac10800/mask=xffffdc00 # CONSTRUCT xdac10800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac10800/mask=xfffffc00 --status noqemu # z == 0 pacda variant :pacda Rd_GPR64, Rn_GPR64xsp is pacda__PACpart & b_1431=0b110110101100000100 & b_1012=0b010 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 { build pacda__PACpart; } # C6.2.242 PACDA, PACDZA page C6-1709 line 100996 MATCH xdac10800/mask=xffffdc00 # CONSTRUCT xdac12be0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac12be0/mask=xffffffe0 --status noqemu # z == 1 pacdza variant :pacdza Rd_GPR64 is pacdza__PACpart & b_1431=0b110110101100000100 & b_1012=0b010 & b_13=1 & b_0509=0b11111 & Rd_GPR64 { build pacdza__PACpart; } # C6.2.243 PACDB, PACDZB page C6-1710 line 101067 MATCH xdac10c00/mask=xffffdc00 # CONSTRUCT xdac10c00/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac10c00/mask=xfffffc00 --status noqemu # z == 0 pacdb variant :pacdb Rd_GPR64, Rn_GPR64xsp is pacdb__PACpart & b_1431=0b110110101100000100 & b_1012=0b011 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 { build pacdb__PACpart; } # C6.2.243 PACDB, PACDZB page C6-1710 line 101067 MATCH xdac10c00/mask=xffffdc00 # CONSTRUCT xdac12fe0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac12fe0/mask=xffffffe0 --status noqemu # z == 1 pacdzb variant :pacdzb Rd_GPR64 is pacdzb__PACpart & b_1431=0b110110101100000100 & b_1012=0b011 & b_13=1 & b_0509=0b11111 & Rd_GPR64 { build pacdzb__PACpart; } # C6.2.244 PACGA page C6-1711 line 101138 MATCH x9ac03000/mask=xffe0fc00 # CONSTRUCT x9ac03000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x9ac03000/mask=xffe0fc00 --status noqemu :pacga Rd_GPR64, Rn_GPR64, Rm_GPR64xsp is b_2131=0b10011010110 & b_1015=0b001100 & Rm_GPR64xsp & Rn_GPR64 & Rd_GPR64 { # This operation, unlike all other PAC operations, does not put its output in # the same register as its first input. This means that putting a "noclobber" # variant on this operation would violate the definition of PACGA. Rd_GPR64 = pacga(Rn_GPR64, Rm_GPR64xsp); } # C6.2.245 PACIA, PACIA1716, PACIASP, PACIAZ, PACIZA page C6-1712 line 101196 MATCH xdac10000/mask=xffffdc00 # CONSTRUCT xdac10000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac10000/mask=xfffffc00 --status noqemu # Z == 0 PACIA variant :pacia Rd_GPR64, Rn_GPR64xsp is pacia__PACpart & b_1431=0b110110101100000100 & b_1012=0b000 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 { build pacia__PACpart; } # C6.2.245 PACIA, PACIA1716, PACIASP, PACIAZ, PACIZA page C6-1712 line 101196 MATCH xdac10000/mask=xffffdc00 # CONSTRUCT xdac123e0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac123e0/mask=xffffffe0 --status noqemu # Z == 1 && Rn == 11111 PACIZA variant :paciza Rd_GPR64 is paciza__PACpart & b_1431=0b110110101100000100 & b_1012=0b000 & b_13=1 & b_0509=0b11111 & Rd_GPR64 { build paciza__PACpart; } # C6.2.245 PACIA, PACIA1716, PACIASP, PACIAZ, PACIZA page C6-1712 line 101196 MATCH xd503211f/mask=xfffffddf # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503211f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503211f/mask=xffffffff --status nodest # CRm == 0001 && op2 == 000 PICIA1716 variant :pacia1716 is pacia1716__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0001 & b_0507=0b000 & b_0004=0b11111 { build pacia1716__PACpart; } # C6.2.245 PACIA, PACIA1716, PACIASP, PACIAZ, PACIZA page C6-1712 line 101196 MATCH xd503211f/mask=xfffffddf # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503233f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503233f/mask=xffffffff --status nodest # CRm == 0011 && op2 == 001 PACIASP variant :paciasp is paciasp__PACpart & PACIXSP_BTITARGETS & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b001 & b_0004=0b11111 { build paciasp__PACpart; } # C6.2.245 PACIA, PACIA1716, PACIASP, PACIAZ, PACIZA page C6-1712 line 101196 MATCH xd503211f/mask=xfffffddf # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503231f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503231f/mask=xffffffff --status nodest # CRm == 0011 && op2 == 000 PACIAZ variant :paciaz is paciaz__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b000 & b_0004=0b11111 { build paciaz__PACpart; } # C6.2.246 PACIB, PACIB1716, PACIBSP, PACIBZ, PACIZB page C6-1715 line 101358 MATCH xdac10400/mask=xffffdc00 # CONSTRUCT xdac10400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac10400/mask=xfffffc00 --status noqemu # Z == 0 PACIB variant :pacib Rd_GPR64, Rn_GPR64xsp is pacib__PACpart & b_1431=0b110110101100000100 & b_1012=0b001 & b_13=0 & Rn_GPR64xsp & Rd_GPR64 { build pacib__PACpart; } # C6.2.246 PACIB, PACIB1716, PACIBSP, PACIBZ, PACIZB page C6-1715 line 101358 MATCH xdac10400/mask=xffffdc00 # CONSTRUCT xdac127e0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac127e0/mask=xffffffe0 --status noqemu # Z == 1 && Rn = 11111 PACIZB variant :pacizb Rd_GPR64 is pacizb__PACpart & b_1431=0b110110101100000100 & b_1012=0b001 & b_13=1 & b_0509=0b11111 & Rd_GPR64 { build pacizb__PACpart; } # C6.2.246 PACIB, PACIB1716, PACIBSP, PACIBZ, PACIZB page C6-1715 line 101358 MATCH xd503215f/mask=xfffffddf # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503215f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503215f/mask=xffffffff --status nodest # CRm == 0001 && op2 == 010 PACIB1716 variant :pacib1716 is pacib1716__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0001 & b_0507=0b010 & b_0004=0b11111 { build pacib1716__PACpart; } # C6.2.246 PACIB, PACIB1716, PACIBSP, PACIBZ, PACIZB page C6-1715 line 101358 MATCH xd503215f/mask=xfffffddf # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503237f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503237f/mask=xffffffff --status nodest # CRm == 0011 && op2 == 011 PACIBSP variant :pacibsp is pacibsp__PACpart & PACIXSP_BTITARGETS & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b011 & b_0004=0b11111 { build pacibsp__PACpart; } # C6.2.246 PACIB, PACIB1716, PACIBSP, PACIBZ, PACIZB page C6-1715 line 101358 MATCH xd503215f/mask=xfffffddf # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503235f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503235f/mask=xffffffff --status nodest # CRm == 0011 && op2 == 010 PACIBZ variant :pacibz is pacibz__PACpart & b_1231=0b11010101000000110010 & b_0811=0b0011 & b_0507=0b010 & b_0004=0b11111 { build pacibz__PACpart; } # C6.2.247 PRFM (immediate) page C6-1718 line 101520 MATCH xf9800000/mask=xffc00000 # CONSTRUCT xf9800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf9800000/mask=xffc00000 --status nomem :prfm aa_prefetch, addrIndexed is size.ldstr=3 & b_2729=7 & v=0 & b_2425=1 & b_2223=2 & addrIndexed & b_0304 & b_0102 & b_00 & aa_prefetch { addr:8 = addrIndexed; hint:1 = b_0304; target:1 = b_0102; stream:1 = b_00; Hint_Prefetch(addr, hint, target, stream); } # C6.2.248 PRFM (literal) page C6-1720 line 101616 MATCH xd8000000/mask=xff000000 # CONSTRUCT xd8000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd8000000/mask=xff000000 --status nodest --comment "qemuerr(illegal addresses cause qemu exit)" :prfm aa_prefetch, Addr19 is size.ldstr=3 & b_2729=3 & v=0 & b_2425=0 & Addr19 & b_0304 & b_0102 & b_00 & aa_prefetch { addr:8 = &Addr19; hint:1 = b_0304; target:1 = b_0102; stream:1 = b_00; Hint_Prefetch(addr, hint, target, stream); } # C6.2.249 PRFM (register) page C6-1722 line 101700 MATCH xf8a04800/mask=xffe04c00 # CONSTRUCT xf8a00800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8a00800/mask=xffe00c00 --status nomem :prfm aa_prefetch, addrIndexed is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=1 & addrIndexed & b_1011=2 & b_0304 & b_0102 & b_00 & aa_prefetch { addr:8 = addrIndexed; hint:1 = b_0304; target:1 = b_0102; stream:1 = b_00; Hint_Prefetch(addr, hint, target, stream); } # C6.2.250 PRFUM page C6-1724 line 101815 MATCH xf8800000/mask=xffe00c00 # CONSTRUCT xf8800000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8800000/mask=xfffffc00 --status nomem :prfum aa_prefetch, addr_SIMM9 is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & addr_SIMM9 & simm9=0 & b_1011=0 & b_0304 & b_0102 & b_00 & aa_prefetch { addr:8 = addr_SIMM9; hint:1 = b_0304; target:1 = b_0102; stream:1 = b_00; Hint_Prefetch(addr, hint, target, stream); } # C6.2.250 PRFUM page C6-1724 line 101815 MATCH xf8800000/mask=xffe00c00 # CONSTRUCT xf8800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8800000/mask=xffe00c00 --status nomem :prfum aa_prefetch, addr_SIMM9 is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_2223=2 & b_2121=0 & addr_SIMM9 & b_1011=0 & b_0304 & b_0102 & b_00 & aa_prefetch { addr:8 = addr_SIMM9; hint:1 = b_0304; target:1 = b_0102; stream:1 = b_00; Hint_Prefetch(addr, hint, target, stream); } # C6.2.253 RBIT page C6-1728 line 102006 MATCH x5ac00000/mask=x7ffffc00 # CONSTRUCT x5ac00000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x5ac00000/mask=xfffffc00 --status pass :rbit Rd_GPR32, Rn_GPR32 is sf=0 & b_3030=1 & S=0 & b_2428=0x1a & b_2123=6 & dp1.opcode2=0x0 & b_1015=0x0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { # The algorithm swaps 1, 2, 4, 8 bits, ect local tmp:4 = Rn_GPR32; tmp = (((tmp & 0xaaaaaaaa) >> 1) | ((tmp & 0x55555555) << 1)); tmp = (((tmp & 0xcccccccc) >> 2) | ((tmp & 0x33333333) << 2)); tmp = (((tmp & 0xf0f0f0f0) >> 4) | ((tmp & 0x0f0f0f0f) << 4)); tmp = (((tmp & 0xff00ff00) >> 8) | ((tmp & 0x00ff00ff) << 8)); tmp = ((tmp >> 16) | (tmp << 16)); Rd_GPR64 = zext(tmp); } # C6.2.253 RBIT page C6-1728 line 102006 MATCH x5ac00000/mask=x7ffffc00 # CONSTRUCT xdac00000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac00000/mask=xfffffc00 --status pass :rbit Rd_GPR64, Rn_GPR64 is sf=1 & b_3030=1 & S=0 & b_2428=0x1a & b_2123=6 & dp1.opcode2=0x0 & b_1015=0x0 & Rn_GPR64 & Rd_GPR64 { # The algorithm swaps 1, 2, 4, 8 bits, ect local tmp:8 = Rn_GPR64; tmp = (((tmp & 0xaaaaaaaaaaaaaaaa) >> 1) | ((tmp & 0x5555555555555555) << 1)); tmp = (((tmp & 0xcccccccccccccccc) >> 2) | ((tmp & 0x3333333333333333) << 2)); tmp = (((tmp & 0xf0f0f0f0f0f0f0f0) >> 4) | ((tmp & 0x0f0f0f0f0f0f0f0f) << 4)); tmp = (((tmp & 0xff00ff00ff00ff00) >> 8) | ((tmp & 0x00ff00ff00ff00ff) << 8)); tmp = (((tmp & 0xffff0000ffff0000) >> 16) | ((tmp & 0x0000ffff0000ffff) << 16)); Rd_GPR64 = ((tmp >> 32) | (tmp << 32)); } # C6.2.254 RET page C6-1730 line 102090 MATCH xd65f0000/mask=xfffffc1f # CONSTRUCT xd65f0000/mask=xfffffc1f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd65f0000/mask=xfffffc1f --status nodest :ret Rn_GPR64 is b_2531=0x6b & b_2324=0 & b_2122=2 & b_1620=0x1f & b_1015=0 & Rn_GPR64 & b_0004=0 { pc = Rn_GPR64; return [pc]; } # C6.2.254 RET page C6-1730 line 102090 MATCH xd65f0000/mask=xfffffc1f # CONSTRUCT xd65f03c0/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd65f03c0/mask=xffffffff --status nodest :ret is b_2531=0x6b & b_2324=0 & b_2122=2 & b_1620=0x1f & b_1015=0 & aa_Xn=30 & b_0004=0 { pc = x30; return [pc]; } # C6.2.255 RETAA, RETAB page C6-1731 line 102135 MATCH xd65f0bff/mask=xfffffbff # CONSTRUCT xd65f0bff/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd65f0bff/mask=xffffffff --status nodest # M == 0 RETAA variant :retaa is retaa__PACpart & b_1131=0b110101100101111100001 & b_0009=0b1111111111 & b_10=0 { build retaa__PACpart; pc = x30; return [pc]; } # C6.2.255 RETAA, RETAB page C6-1731 line 102135 MATCH xd65f0bff/mask=xfffffbff # CONSTRUCT xd65f0fff/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd65f0fff/mask=xffffffff --status nodest # M == 1 RETAB variant :retab is retab__PACpart & b_1131=0b110101100101111100001 & b_0009=0b1111111111 & b_10=1 { build retab__PACpart; pc = x30; return [pc]; } # C6.2.256 REV page C6-1732 line 102201 MATCH x5ac00800/mask=x7ffff800 # CONSTRUCT x5ac00800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x5ac00800/mask=xfffffc00 --status pass # sf == 0 && opc == 10 32-bit variant (3210 -> 0123) :rev Rd_GPR32, Rn_GPR32 is b_1230=0b1011010110000000000 & b_31=0 & b_1011=0b10 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local tmp:4 = Rn_GPR32; tmp = (((tmp & 0xff00ff00) >> 8) | ((tmp & 0x00ff00ff) << 8)); tmp = ((tmp >> 16) | (tmp << 16)); Rd_GPR64 = zext(tmp); } # C6.2.256 REV page C6-1732 line 102201 MATCH x5ac00800/mask=x7ffff800 # C6.2.259 REV64 page C6-1738 line 102502 MATCH xdac00c00/mask=xfffffc00 # CONSTRUCT xdac00c00/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xdac00c00/mask=xfffffc00 --status pass # sf == 1 && opc == 11 64-bit variant (76543210 -> 01234567) # NB equivalent to REV64, which is never the preferred disassembly :rev Rd_GPR64, Rn_GPR64 is b_1230=0b1011010110000000000 & b_31=1 & b_1011=0b11 & Rn_GPR64 & Rd_GPR64 { local tmp:8 = Rn_GPR64; tmp = (((tmp & 0xff00ff00ff00ff00) >> 8) | ((tmp & 0x00ff00ff00ff00ff) << 8)); tmp = (((tmp & 0xffff0000ffff0000) >> 16) | ((tmp & 0x0000ffff0000ffff) << 16)); Rd_GPR64 = ((tmp >> 32) | (tmp << 32)); } # C6.2.257 REV16 page C6-1734 line 102308 MATCH x5ac00400/mask=x7ffffc00 # CONSTRUCT x5ac00400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x5ac00400/mask=xfffffc00 --status pass # sf == 0 (and opc == 01) 32-bit variant (3210 -> 2301) :rev16 Rd_GPR32, Rn_GPR32 is b_1230=0b1011010110000000000 & b_31=0 & b_1011=0b01 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local tmp:4 = Rn_GPR32; tmp = (((tmp & 0xff00ff00) >> 8) | ((tmp & 0x00ff00ff) << 8)); Rd_GPR64 = zext(tmp); } # C6.2.257 REV16 page C6-1734 line 102308 MATCH x5ac00400/mask=x7ffffc00 # CONSTRUCT xdac00400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac00400/mask=xfffffc00 --status pass # sf == 1 (and opc=01) 64-bit variant (76543210 -> 67452301) :rev16 Rd_GPR64, Rn_GPR64 is b_1230=0b1011010110000000000 & b_31=1 & b_1011=0b01 & Rn_GPR64 & Rd_GPR64 { local tmp:8 = Rn_GPR64; Rd_GPR64 = (((tmp & 0xff00ff00ff00ff00) >> 8) | ((tmp & 0x00ff00ff00ff00ff) << 8)); } # C6.2.258 REV32 page C6-1736 line 102412 MATCH xdac00800/mask=xfffffc00 # C6.2.256 REV page C6-1732 line 102201 MATCH x5ac00800/mask=x7ffff800 # CONSTRUCT xdac00800/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xdac00800/mask=xfffffc00 --status pass # sf == 1 (and opc == 10) 64-bit variant (76543210 -> 45670123) :rev32 Rd_GPR64, Rn_GPR64 is b_1230=0b1011010110000000000 & b_31=1 & b_1011=0b10 & Rn_GPR64 & Rd_GPR64 { local tmp:8 = Rn_GPR64; tmp = (((tmp & 0xff00ff00ff00ff00) >> 8) | ((tmp & 0x00ff00ff00ff00ff) << 8)); Rd_GPR64 = (((tmp & 0xffff0000ffff0000) >> 16) | ((tmp & 0x0000ffff0000ffff) << 16)); } # C6.2.260 RMIF page C6-1739 line 102566 MATCH xba000400/mask=xffe07c10 # CONSTRUCT xba000400/mask=xffe07c10 MATCHED 1 DOCUMENTED OPCODES :rmif Rn_GPR64, UImm6, NZCVImm_uimm4 is b_2131=0b10111010000 & b_1014=0b00001 & b_04=0b0 & Rn_GPR64 & UImm6 & NZCVImm_uimm4 { tmp:8 = Rn_GPR64 >> UImm6; condMask:1 = NZCVImm_uimm4; set_NZCV(tmp,condMask); } # C6.2.261 ROR (immediate) page C6-1740 line 102633 MATCH x13800000/mask=x7fa00000 # C6.2.124 EXTR page C6-1477 line 87864 MATCH x13800000/mask=x7fa00000 # CONSTRUCT x13800000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x13800000/mask=xffe00000 --status pass :ror Rd_GPR32, Rn_GPR32, LSB_bitfield32_imm is sf=0 & b_2930=0 & b_2428=0x13 & b_2323=1 & n=0 & b_21=0 & Rn=Rm & Rm_GPR32 & LSB_bitfield32_imm & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { result:4 = (Rn_GPR32 >> LSB_bitfield32_imm) | (Rn_GPR32 << (32 - LSB_bitfield32_imm)); Rd_GPR64 = zext(result); } # C6.2.261 ROR (immediate) page C6-1740 line 102633 MATCH x13800000/mask=x7fa00000 # C6.2.124 EXTR page C6-1477 line 87864 MATCH x13800000/mask=x7fa00000 # CONSTRUCT x93c00000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x93c00000/mask=xffe00000 --status pass :ror Rd_GPR64, Rn_GPR64, LSB_bitfield64_imm is sf=1 & b_2930=0 & b_2428=0x13 & b_2323=1 & n=1 & b_21=0 & Rn=Rm & Rm_GPR64 & LSB_bitfield64_imm & Rn_GPR64 & Rd_GPR64 { result:8 = (Rn_GPR64 >> LSB_bitfield64_imm) | (Rn_GPR64 << (64 - LSB_bitfield64_imm)); Rd_GPR64 = result; } # C6.2.262 ROR (register) page C6-1742 line 102726 MATCH x1ac02c00/mask=x7fe0fc00 # C6.2.263 RORV page C6-1744 line 102821 MATCH x1ac02c00/mask=x7fe0fc00 # CONSTRUCT x1ac02c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x1ac02c00/mask=xffe0fc00 --status pass :ror Rd_GPR32, Rn_GPR32, Rm_GPR32 is sf=0 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR32 & b_1015=0xb & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { rval:4 = Rm_GPR32 & 0x1f; tmp_1:4 = ( Rn_GPR32 >> rval) | ( Rn_GPR32 << ( 32 - rval ) ); Rd_GPR64 = zext(tmp_1); } # C6.2.262 ROR (register) page C6-1742 line 102726 MATCH x1ac02c00/mask=x7fe0fc00 # C6.2.263 RORV page C6-1744 line 102821 MATCH x1ac02c00/mask=x7fe0fc00 # CONSTRUCT x9ac02c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9ac02c00/mask=xffe0fc00 --status pass :ror Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR64 & b_1015=0xb & Rn_GPR64 & Rd_GPR64 { rval:8 = Rm_GPR64 & 0x3f; tmp_1:8 = ( Rn_GPR64 >> rval ) | ( Rn_GPR64 << ( 64 - rval ) ); Rd_GPR64 = tmp_1; } # C6.2.264 SB page C6-1746 line 102913 MATCH xd50330ff/mask=xfffff0ff # CONSTRUCT xd50330ff/mask=xfffff0ff MATCHED 1 DOCUMENTED OPCODES :sb is b_1231=0xd5033 & b_0007=0xff { SpeculationBarrier(); } # C6.2.265 SBC page C6-1747 line 102973 MATCH x5a000000/mask=x7fe0fc00 # C6.2.236 NGC page C6-1698 line 100437 MATCH x5a0003e0/mask=x7fe0ffe0 # CONSTRUCT x5a000000/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x5a000000/mask=xffe0fc00 --status pass --comment "flags" :sbc Rd_GPR32, Rn_GPR32, Rm_GPR32 is sf=0 & op=1 & s=0 & b_2428=0x1a & b_2123=0 & Rm_GPR32 & opcode2=0x0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp:4 = Rm_GPR32 + zext(!CY); Rd_GPR64 = zext(Rn_GPR32 - tmp); } # C6.2.265 SBC page C6-1747 line 102973 MATCH x5a000000/mask=x7fe0fc00 # C6.2.236 NGC page C6-1698 line 100437 MATCH x5a0003e0/mask=x7fe0ffe0 # CONSTRUCT xda000000/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xda000000/mask=xffe0fc00 --status pass --comment "flags" :sbc Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & op=1 & s=0 & b_2428=0x1a & b_2123=0 & Rm_GPR64 & opcode2=0x0 & Rn_GPR64 & Rd_GPR64 { tmp:8 = Rm_GPR64 + zext(!CY); Rd_GPR64 = Rn_GPR64 - tmp; } # C6.2.266 SBCS page C6-1749 line 103074 MATCH x7a000000/mask=x7fe0fc00 # C6.2.237 NGCS page C6-1700 line 100524 MATCH x7a0003e0/mask=x7fe0ffe0 # CONSTRUCT x7a000000/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x7a000000/mask=xffe0fc00 --status pass --comment "flags" :sbcs Rd_GPR32, Rn_GPR32, Rm_GPR32 is sf=0 & op=1 & s=1 & b_2428=0x1a & b_2123=0 & Rm_GPR32 & opcode2=0x0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp:4 = Rm_GPR32 + zext(!CY); add_with_carry_flags(Rn_GPR32, ~Rm_GPR32); Rd_GPR64 = zext(Rn_GPR32 - tmp); resultflags(Rd_GPR32); affectflags(); } # C6.2.266 SBCS page C6-1749 line 103074 MATCH x7a000000/mask=x7fe0fc00 # C6.2.237 NGCS page C6-1700 line 100524 MATCH x7a0003e0/mask=x7fe0ffe0 # CONSTRUCT xfa000000/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xfa000000/mask=xffe0fc00 --status pass --comment "flags" :sbcs Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & op=1 & s=1 & b_2428=0x1a & b_2123=0 & Rm_GPR64 & opcode2=0x0 & Rn_GPR64 & Rd_GPR64 { tmp:8 = Rm_GPR64 + zext(!CY); add_with_carry_flags(Rn_GPR64, ~Rm_GPR64); Rd_GPR64 = Rn_GPR64 - tmp; resultflags(Rd_GPR64); affectflags(); } # C6.2.209 SBFIZ page C6-856 line 49751 KEEPWITH sbfiz_lsb: "#"^imm is ImmR [ imm = 32 - ImmR; ] { export *[const]:4 imm; } sbfiz_width: "#"^imm is ImmS [ imm = ImmS + 1; ] { export *[const]:4 imm; } sbfiz_lsb64: "#"^imm is ImmR [ imm = 64 - ImmR; ] { export *[const]:4 imm; } # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.17 ASR (immediate) page C6-1175 line 69498 MATCH x13007c00/mask=x7f807c00 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # C6.2.369 SXTB page C6-1964 line 115324 MATCH x13001c00/mask=x7fbffc00 # C6.2.370 SXTH page C6-1966 line 115411 MATCH x13003c00/mask=x7fbffc00 # CONSTRUCT x13000002/mask=xffe08006 MATCHED 6 DOCUMENTED OPCODES # AUNIT --inst x13000002/mask=xffe08006 --status pass # Special alias case of sbfm for when ImmS < ImmR-1 # if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); :sbfiz Rd_GPR32, Rn_GPR32, sbfiz_lsb, sbfiz_width is sbfiz_lsb & sbfiz_width & ImmS_LT_ImmR=1 & ImmS_EQ_ImmR=0 & sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & ImmSConst32 & DecodeWMask32 & DecodeTMask32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local wmask:4 = DecodeWMask32; local tmask:4 = DecodeTMask32; local src:4 = Rn_GPR32; local bot:4 = ((src>>ImmRConst32)|(src<<(32-ImmRConst32))) & wmask; local top:4 = (((src>>ImmSConst32)&0x1)*(-1))&0xffffffff; Rd_GPR64 = zext((top & ~(tmask)) | (bot & tmask)); } # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.17 ASR (immediate) page C6-1175 line 69498 MATCH x13007c00/mask=x7f807c00 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # C6.2.369 SXTB page C6-1964 line 115324 MATCH x13001c00/mask=x7fbffc00 # C6.2.370 SXTH page C6-1966 line 115411 MATCH x13003c00/mask=x7fbffc00 # C6.2.371 SXTW page C6-1968 line 115498 MATCH x93407c00/mask=xfffffc00 # CONSTRUCT x93400002/mask=xffc00006 MATCHED 7 DOCUMENTED OPCODES # AUNIT --inst x93400002/mask=xffc00006 --status pass # Special alias case of sbfm for when ImmS < ImmR-1 :sbfiz Rd_GPR64, Rn_GPR64, sbfiz_lsb64, sbfiz_width is sbfiz_lsb64 & sbfiz_width & ImmS_LT_ImmR=1 & ImmS_EQ_ImmR=0 & sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & ImmRConst64 & ImmSConst64 & DecodeWMask64 & DecodeTMask64 & Rn_GPR64 & Rd_GPR64 { local wmask:8 = DecodeWMask64; local tmask:8 = DecodeTMask64; local src:8 = Rn_GPR64; local bot:8 = ((src>>ImmRConst64)|(src<<(64-ImmRConst64))) & wmask; local top:8 = ((src>>ImmSConst64)&0x1)*(-1); Rd_GPR64 = (top & ~(tmask)) | (bot & tmask); } # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.17 ASR (immediate) page C6-1175 line 69498 MATCH x13007c00/mask=x7f807c00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # C6.2.369 SXTB page C6-1964 line 115324 MATCH x13001c00/mask=x7fbffc00 # C6.2.370 SXTH page C6-1966 line 115411 MATCH x13003c00/mask=x7fbffc00 # CONSTRUCT x13000000/mask=xffe08000 MATCHED 6 DOCUMENTED OPCODES # AUNIT --inst x13000000/mask=xffe08000 --status pass # if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); :sbfm Rd_GPR32, Rn_GPR32, ImmRConst32, ImmSConst32 is sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & ImmSConst32 & DecodeWMask32 & DecodeTMask32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local wmask:4 = DecodeWMask32; local tmask:4 = DecodeTMask32; local src:4 = Rn_GPR32; local bot:4 = ((src>>ImmRConst32)|(src<<(32-ImmRConst32))) & wmask; local top:4 = (((src>>ImmSConst32)&0x1)*(-1))&0xffffffff; Rd_GPR64 = zext((top & ~(tmask)) | (bot & tmask)); } # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.17 ASR (immediate) page C6-1175 line 69498 MATCH x13007c00/mask=x7f807c00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # C6.2.369 SXTB page C6-1964 line 115324 MATCH x13001c00/mask=x7fbffc00 # C6.2.370 SXTH page C6-1966 line 115411 MATCH x13003c00/mask=x7fbffc00 # C6.2.371 SXTW page C6-1968 line 115498 MATCH x93407c00/mask=xfffffc00 # CONSTRUCT x93400000/mask=xffc00000 MATCHED 7 DOCUMENTED OPCODES # AUNIT --inst x93400000/mask=xffc00000 --status pass :sbfm Rd_GPR64, Rn_GPR64, ImmRConst64, ImmSConst64 is sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & ImmRConst64 & ImmSConst64 & DecodeWMask64 & DecodeTMask64 & Rn_GPR64 & Rd_GPR64 { local wmask:8 = DecodeWMask64; local tmask:8 = DecodeTMask64; local src:8 = Rn_GPR64; local bot:8 = ((src>>ImmRConst64)|(src<<(64-ImmRConst64))) & wmask; local top:8 = ((src>>ImmSConst64)&0x1)*(-1); Rd_GPR64 = (top & ~(tmask)) | (bot & tmask); } # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # C6.2.17 ASR (immediate) page C6-1175 line 69498 MATCH x13007c00/mask=x7f807c00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.369 SXTB page C6-1964 line 115324 MATCH x13001c00/mask=x7fbffc00 # C6.2.370 SXTH page C6-1966 line 115411 MATCH x13003c00/mask=x7fbffc00 # CONSTRUCT x13000004/mask=xffe08006 MATCHED 6 DOCUMENTED OPCODES # AUNIT --inst x13000004/mask=xffe08006 --status pass # Special cases when just getting the 0 bit # >> Not sure about the above old comment one, this is actually for getting one bit from Rn # SBFX alias of SMFM is used when ImmS >= ImmR # We split the '>=' into two separate cases # Here ImmS = ImmR (for 32-bit) # Alias for sbfm as determined by BFXPreferred() # if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); :sbfx Rd_GPR32, Rn_GPR32, ImmRConst32, BFextractWidth32 is ImmS_LT_ImmR=0 & ImmS_EQ_ImmR=1 & sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & BFextractWidth32 & ImmSConst32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp:4 = ((Rn_GPR32 >> ImmSConst32) & 0x1) * 0xffffffff; Rd_GPR64 = zext(tmp); } # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # C6.2.17 ASR (immediate) page C6-1175 line 69498 MATCH x13007c00/mask=x7f807c00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.369 SXTB page C6-1964 line 115324 MATCH x13001c00/mask=x7fbffc00 # C6.2.370 SXTH page C6-1966 line 115411 MATCH x13003c00/mask=x7fbffc00 # C6.2.371 SXTW page C6-1968 line 115498 MATCH x93407c00/mask=xfffffc00 # CONSTRUCT x93400004/mask=xffc00006 MATCHED 7 DOCUMENTED OPCODES # AUNIT --inst x93400004/mask=xffc00006 --status pass # Now, the case where ImmS = ImmR (for 64-bit) :sbfx Rd_GPR64, Rn_GPR64, ImmRConst64, BFextractWidth64 is ImmS_LT_ImmR=0 & ImmS_EQ_ImmR=1 & sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & BFextractWidth64 & ImmRConst64 & ImmSConst64 & Rn_GPR64 & Rd_GPR64 { tmp:8 = ((Rn_GPR64 >> ImmSConst64) & 0x1) * 0xffffffffffffffff; Rd_GPR64 = tmp; } # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # C6.2.17 ASR (immediate) page C6-1175 line 69498 MATCH x13007c00/mask=x7f807c00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.369 SXTB page C6-1964 line 115324 MATCH x13001c00/mask=x7fbffc00 # C6.2.370 SXTH page C6-1966 line 115411 MATCH x13003c00/mask=x7fbffc00 # CONSTRUCT x13000000/mask=xffe08006 MATCHED 6 DOCUMENTED OPCODES # AUNIT --inst x13000000/mask=xffe08006 --status pass # Now, the case where ImmS > ImmR (for 32-bit) # if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); :sbfx Rd_GPR32, Rn_GPR32, ImmRConst32, BFextractWidth32 is ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0 & sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & BFextractWidth32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { src:4 = Rn_GPR32; tmp:4 = src << (31 - (ImmRConst32 + BFextractWidth32 - 1)); tmp = tmp s>> (32 - BFextractWidth32); Rd_GPR64 = zext(tmp); } # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # C6.2.17 ASR (immediate) page C6-1175 line 69498 MATCH x13007c00/mask=x7f807c00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.369 SXTB page C6-1964 line 115324 MATCH x13001c00/mask=x7fbffc00 # C6.2.370 SXTH page C6-1966 line 115411 MATCH x13003c00/mask=x7fbffc00 # C6.2.371 SXTW page C6-1968 line 115498 MATCH x93407c00/mask=xfffffc00 # CONSTRUCT x93400000/mask=xffc00000 MATCHED 7 DOCUMENTED OPCODES # AUNIT --inst x93400000/mask=xffc00000 --status pass # Finally, the case where ImmS > ImmR (for 64-bit) :sbfx Rd_GPR64, Rn_GPR64, ImmRConst64, BFextractWidth64 is sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & ImmRConst64 & BFextractWidth64 & (ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0) & Rn_GPR64 & Rd_GPR64 { src:8 = Rn_GPR64; tmp:8 = src << (63 - (ImmRConst64 + BFextractWidth64 - 1)); tmp = tmp s>> (64 - BFextractWidth64); Rd_GPR64 = tmp; } # C6.2.270 SDIV page C6-1758 line 103515 MATCH x1ac00c00/mask=x7fe0fc00 # CONSTRUCT x1ac00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x1ac00c00/mask=xffe0fc00 --status pass :sdiv Rd_GPR32, Rn_GPR32, Rm_GPR32 is sf=0 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR32 & b_1015=0x3 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local tmp_1:4 = 0; if (Rm_GPR32 == 0) goto ; tmp_1 = Rn_GPR32 s/ Rm_GPR32; Rd_GPR64 = zext(tmp_1); } # C6.2.270 SDIV page C6-1758 line 103515 MATCH x1ac00c00/mask=x7fe0fc00 # CONSTRUCT x9ac00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x9ac00c00/mask=xffe0fc00 --status pass :sdiv Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR64 & b_1015=0x3 & Rn_GPR64 & Rd_GPR64 { local tmp_1:8 = 0; if (Rm_GPR64 == 0) goto ; tmp_1 = Rn_GPR64 s/ Rm_GPR64; Rd_GPR64 = tmp_1; } # C6.2.271 SETF8, SETF16 page C6-1759 line 103584 MATCH x3a00080d/mask=xffffbc1f # CONSTRUCT x3a00080d/mask=xfffffc1f MATCHED 1 DOCUMENTED OPCODES :setf8 aa_Wn is b_1531=0b00111010000000000 & b_14=0 & b_1013=0b0010 & b_0004=0b01101 & aa_Wn { NG = ((aa_Wn:1 >> 7) & 1) == 1; ZR = (aa_Wn:1 == 0); OV = (((aa_Wn >> 7) & 1) ^ ((aa_Wn >>8) & 1)) == 1; } # C6.2.271 SETF8, SETF16 page C6-1759 line 103584 MATCH x3a00080d/mask=xffffbc1f # CONSTRUCT x3a00480d/mask=xfffffc1f MATCHED 1 DOCUMENTED OPCODES :setf16 aa_Wn is b_1531=0b00111010000000000 & b_14=1 & b_1013=0b0010 & b_0004=0b01101 & aa_Wn { NG = ((aa_Wn:2 >> 15) & 1) == 1; ZR = (aa_Wn:2 == 0); OV = (((aa_Wn >> 15) & 1) ^ ((aa_Wn >>16) & 1)) == 1; } # C6.2.276 SETP, SETM, SETE page C6-1780 line 105072 MATCH x19c00400/mask=x3fe03c00 # C6.2.272 SETGP, SETGM, SETGE page C6-1760 line 103652 MATCH x1dc00400/mask=x3fe03c00 # CONSTRUCT x1dc00400/mask=x3fe03c00 MATCHED 5 DOCUMENTED OPCODES setPhase:"p" is b_1415=0 {export 0:1;} setPhase:"m" is b_1415=1 {export 1:1;} setPhase:"e" is b_1415=2 {export 2:1;} setType: "" is b_1013=0x1 {export 1:1; } setType: "n" is b_1013=0x9 {export 9:1; } setType: "t" is b_1013=0x5 {export 5:1; } setType: "tn" is b_1013=0xd {export 13:1; } define pcodeop memorySetTag; :setg^setPhase^setType [Rd_GPR64]!, Rn_GPR64!, Rs_GPR64 is size.ldstr & b_2129=0xee & Rs_GPR64 & b_1415 <3 & setPhase & setType & Rn_GPR64 & Rd_GPR64 { memorySetTag(Rd_GPR64, Rn_GPR64, Rs_GPR64, setPhase, setType); } # C6.2.276 SETP, SETM, SETE page C6-1780 line 105072 MATCH x19c00400/mask=x3fe03c00 # CONSTRUCT x19c00400/mask=x3fe03c00 MATCHED 5 DOCUMENTED OPCODES define pcodeop memorySet; :setp^setPhase^setType [Rd_GPR64]!, Rn_GPR64!, Rs_GPR64 is size.ldstr & b_2129=0xce & Rs_GPR64 & b_1415 <3 & setPhase & setType & Rn_GPR64 & Rd_GPR64 { memorySet(Rd_GPR64, Rn_GPR64, Rs_GPR64, setPhase, setType); } # C6.2.280 SEV page C6-1796 line 106224 MATCH xd503209f/mask=xffffffff # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503209f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503209f/mask=xffffffff --status nodest :sev is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low=4 & Rt=0x1f { SendEvent(); } # C6.2.281 SEVL page C6-1797 line 106259 MATCH xd50320bf/mask=xffffffff # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd50320bf/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50320bf/mask=xffffffff --status nodest :sevl is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low=5 & Rt=0x1f { SendEventLocally(); } # C6.2.282 SMADDL page C6-1798 line 106294 MATCH x9b200000/mask=xffe08000 # CONSTRUCT x9b200000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x9b200000/mask=xffe08000 --status pass :smaddl Rd_GPR64, Rn_GPR32, Rm_GPR32, Ra_GPR64 is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=1 & Rm_GPR32 & op.dp3_o0=0 & Ra_GPR64 & Rn_GPR32 & Rd_GPR64 { tmp_3:8 = sext(Rn_GPR32); tmp_4:8 = sext(Rm_GPR32); tmp_2:8 = tmp_3 * tmp_4; tmp_1:8 = Ra_GPR64 + tmp_2; Rd_GPR64 = tmp_1; } # C6.2.283 SMC page C6-1800 line 106384 MATCH xd4000003/mask=xffe0001f # CONSTRUCT xd4000003/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd4000003/mask=xffe0001f --status nodest :smc imm16 is b_2431=0xd4 & excCode=0 & imm16 & excCode2=0 & ll=3 { CallSecureMonitor(imm16:2); } # C6.2.284 SMNEGL page C6-1801 line 106433 MATCH x9b20fc00/mask=xffe0fc00 # C6.2.287 SMSUBL page C6-1806 line 106711 MATCH x9b208000/mask=xffe08000 # CONSTRUCT x9b20fc00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9b20fc00/mask=xffe0fc00 --status pass :smnegl Rd_GPR64, Rn_GPR32, Rm_GPR32 is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=1 & Rm_GPR32 & op.dp3_o0=1 & Ra=0x1f & Rn_GPR32 & Rd_GPR64 { tmp_3:8 = sext(Rn_GPR32); tmp_4:8 = sext(Rm_GPR32); tmp_2:8 = tmp_3 * tmp_4; subflags0(tmp_2); tmp_1:8 = -tmp_2; Rd_GPR64 = tmp_1; } # C6.2.287 SMSUBL page C6-1806 line 106711 MATCH x9b208000/mask=xffe08000 # CONSTRUCT x9b208000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x9b208000/mask=xffe08000 --status pass :smsubl Rd_GPR64, Rn_GPR32, Rm_GPR32, Ra_GPR64 is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=1 & Rm_GPR32 & op.dp3_o0=1 & Ra_GPR64 & Rn_GPR32 & Rd_GPR64 { tmp_3:8 = sext(Rn_GPR32); tmp_4:8 = sext(Rm_GPR32); tmp_2:8 = tmp_3 * tmp_4; tmp_1:8 = Ra_GPR64 - tmp_2; Rd_GPR64 = tmp_1; } # C6.2.285 SMSTART page C6-1802 line 106497 MATCH xd503417f/mask=xfffff9ff # C6.2.229 MSR (immediate) page C6-1684 line 99649 MATCH xd500401f/mask=xfff8f01f # CONSTRUCT xd503417f/mask=xfffff9ff MATCHED 2 DOCUMENTED OPCODES # xd503417f/mask=xfffff9ff NOT MATCHED BY ANY CONSTRUCTOR # b_0031=110101010000001101000..101111111 define pcodeop sveStreamingModeStart; define pcodeop sveStreamingModeStop; SVAmodeOp: "SM" is b_0911=0x1 & b_08 { svcr[0,1] = b_08; } SVAmodeOp: "ZA" is b_0911=0x2 & b_08 { svcr[1,1] = b_08; } SVAmodeOp: "" is b_0911=0x3 & b_08 { svcr[0,1] = b_08; svcr[1,1] = b_08; } :smstart "{"^SVAmodeOp^"}" is b_1131=0x1aa068 & SVAmodeOp & b_08=1 & b_0507=0x3 & op4=0x1f { build SVAmodeOp; sveStreamingModeStart(); } # C6.2.286 SMSTOP page C6-1804 line 106604 MATCH xd503407f/mask=xfffff9ff # C6.2.229 MSR (immediate) page C6-1684 line 99649 MATCH xd500401f/mask=xfff8f01f # CONSTRUCT xd503407f/mask=xfffff9ff MATCHED 2 DOCUMENTED OPCODES # xd503407f/mask=xfffff9ff NOT MATCHED BY ANY CONSTRUCTOR # b_0031=110101010000001101000..001111111 :smstop "{"^SVAmodeOp^"}" is b_1131=0x1aa068 & SVAmodeOp & b_08=0 & b_0507=0x3 & op4=0x1f { sveStreamingModeStop(); } # C6.2.288 SMULH page C6-1808 line 106800 MATCH x9b400000/mask=xffe08000 # CONSTRUCT x9b400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x9b400000/mask=xffe08000 --status pass # To enforce SHOULD BE ONE fields add: b_1014=0b11111 :smulh Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & op.dp3=0 & b_2428=0x1b & op.dp3_op31=2 & Rm_GPR64 & op.dp3_o0=0 & Ra & Rn_GPR64 & Rd_GPR64 { local tmpq:16 = sext(Rn_GPR64) * sext(Rm_GPR64); Rd_GPR64 = tmpq(8); } # C6.2.289 SMULL page C6-1809 line 106867 MATCH x9b207c00/mask=xffe0fc00 # C6.2.282 SMADDL page C6-1798 line 106294 MATCH x9b200000/mask=xffe08000 # CONSTRUCT x9b207c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9b207c00/mask=xffe0fc00 --status pass :smull Rd_GPR64, Rn_GPR32, Rm_GPR32 is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=1 & Rm_GPR32 & op.dp3_o0=0 & Ra=0x1f & Rn_GPR32 & Rd_GPR64 { tmp_3:8 = sext(Rn_GPR32); tmp_4:8 = sext(Rm_GPR32); tmp_2:8 = tmp_3 * tmp_4; tmp_1:8 = tmp_2; Rd_GPR64 = tmp_1; } # C6.2.295 STADDB, STADDLB page C6-1818 line 107383 MATCH x3820001f/mask=xffa0fc1f # C6.2.298 STCLRB, STCLRLB page C6-1824 line 107670 MATCH x3820101f/mask=xffa0fc1f # C6.2.301 STEORB, STEORLB page C6-1830 line 107956 MATCH x3820201f/mask=xffa0fc1f # C6.2.328 STSETB, STSETLB page C6-1886 line 111044 MATCH x3820301f/mask=xffa0fc1f # C6.2.331 STSMAXB, STSMAXLB page C6-1892 line 111330 MATCH x3820401f/mask=xffa0fc1f # C6.2.334 STSMINB, STSMINLB page C6-1898 line 111623 MATCH x3820501f/mask=xffa0fc1f # C6.2.340 STUMAXB, STUMAXLB page C6-1910 line 112231 MATCH x3820601f/mask=xffa0fc1f # C6.2.343 STUMINB, STUMINLB page C6-1916 line 112525 MATCH x3820701f/mask=xffa0fc1f # C6.2.133 LDADDB, LDADDAB, LDADDALB, LDADDLB page C6-1489 line 88545 MATCH x38200000/mask=xff20fc00 # C6.2.152 LDCLRB, LDCLRAB, LDCLRALB, LDCLRLB page C6-1524 line 90495 MATCH x38201000/mask=xff20fc00 # C6.2.155 LDEORB, LDEORAB, LDEORALB, LDEORLB page C6-1531 line 90916 MATCH x38202000/mask=xff20fc00 # C6.2.181 LDSETB, LDSETAB, LDSETALB, LDSETLB page C6-1590 line 94403 MATCH x38203000/mask=xff20fc00 # C6.2.184 LDSMAXB, LDSMAXAB, LDSMAXALB, LDSMAXLB page C6-1597 line 94824 MATCH x38204000/mask=xff20fc00 # C6.2.187 LDSMINB, LDSMINAB, LDSMINALB, LDSMINLB page C6-1604 line 95245 MATCH x38205000/mask=xff20fc00 # C6.2.196 LDUMAXB, LDUMAXAB, LDUMAXALB, LDUMAXLB page C6-1623 line 96362 MATCH x38206000/mask=xff20fc00 # C6.2.199 LDUMINB, LDUMINAB, LDUMINALB, LDUMINLB page C6-1630 line 96783 MATCH x38207000/mask=xff20fc00 # CONSTRUCT x3820001f/mask=xffa08c1f MATCHED 16 DOCUMENTED OPCODES # AUNIT --inst x3820001f/mask=xffa08c1f --status nomem # size=0b00 (3031) :st^ls_opc1^ls_lor^"b" aa_Ws, [Rn_GPR64xsp] is b_3031=0b00 & b_2429=0b111000 & b_23=0 & b_21=1 & b_1515=0 & b_1011=0b00 & b_0004=0b11111 & ls_opc1 & ls_lor & aa_Ws & Rn_GPR64xsp { build ls_opc1; build ls_lor; } # C6.2.296 STADDH, STADDLH page C6-1820 line 107469 MATCH x7820001f/mask=xffa0fc1f # C6.2.299 STCLRH, STCLRLH page C6-1826 line 107756 MATCH x7820101f/mask=xffa0fc1f # C6.2.302 STEORH, STEORLH page C6-1832 line 108042 MATCH x7820201f/mask=xffa0fc1f # C6.2.329 STSETH, STSETLH page C6-1888 line 111130 MATCH x7820301f/mask=xffa0fc1f # C6.2.332 STSMAXH, STSMAXLH page C6-1894 line 111419 MATCH x7820401f/mask=xffa0fc1f # C6.2.335 STSMINH, STSMINLH page C6-1900 line 111712 MATCH x7820501f/mask=xffa0fc1f # C6.2.341 STUMAXH, STUMAXLH page C6-1912 line 112320 MATCH x7820601f/mask=xffa0fc1f # C6.2.344 STUMINH, STUMINLH page C6-1918 line 112614 MATCH x7820701f/mask=xffa0fc1f # C6.2.134 LDADDH, LDADDAH, LDADDALH, LDADDLH page C6-1491 line 88670 MATCH x78200000/mask=xff20fc00 # C6.2.153 LDCLRH, LDCLRAH, LDCLRALH, LDCLRLH page C6-1526 line 90621 MATCH x78201000/mask=xff20fc00 # C6.2.156 LDEORH, LDEORAH, LDEORALH, LDEORLH page C6-1533 line 91042 MATCH x78202000/mask=xff20fc00 # C6.2.182 LDSETH, LDSETAH, LDSETALH, LDSETLH page C6-1592 line 94529 MATCH x78203000/mask=xff20fc00 # C6.2.185 LDSMAXH, LDSMAXAH, LDSMAXALH, LDSMAXLH page C6-1599 line 94950 MATCH x78204000/mask=xff20fc00 # C6.2.188 LDSMINH, LDSMINAH, LDSMINALH, LDSMINLH page C6-1606 line 95371 MATCH x78205000/mask=xff20fc00 # C6.2.197 LDUMAXH, LDUMAXAH, LDUMAXALH, LDUMAXLH page C6-1625 line 96488 MATCH x78206000/mask=xff20fc00 # C6.2.200 LDUMINH, LDUMINAH, LDUMINALH, LDUMINLH page C6-1632 line 96909 MATCH x78207000/mask=xff20fc00 # CONSTRUCT x7820001f/mask=xffa08c1f MATCHED 16 DOCUMENTED OPCODES # AUNIT --inst x7820001f/mask=xffa08c1f --status nomem # size=0b01 (3031) :st^ls_opc2^ls_lor^"h" aa_Ws, [Rn_GPR64xsp] is b_3031=0b01 & b_2429=0b111000 & b_23=0 & b_21=1 & b_1515=0 & b_1011=0b00 & b_0004=0b11111 & ls_opc2 & ls_lor & aa_Ws & Rn_GPR64xsp { build ls_opc2; build ls_lor; } # C6.2.297 STADD, STADDL page C6-1822 line 107555 MATCH xb820001f/mask=xbfa0fc1f # C6.2.300 STCLR, STCLRL page C6-1828 line 107842 MATCH xb820101f/mask=xbfa0fc1f # C6.2.303 STEOR, STEORL page C6-1834 line 108128 MATCH xb820201f/mask=xbfa0fc1f # C6.2.330 STSET, STSETL page C6-1890 line 111216 MATCH xb820301f/mask=xbfa0fc1f # C6.2.333 STSMAX, STSMAXL page C6-1896 line 111508 MATCH xb820401f/mask=xbfa0fc1f # C6.2.336 STSMIN, STSMINL page C6-1902 line 111801 MATCH xb820501f/mask=xbfa0fc1f # C6.2.342 STUMAX, STUMAXL page C6-1914 line 112409 MATCH xb820601f/mask=xbfa0fc1f # C6.2.345 STUMIN, STUMINL page C6-1920 line 112703 MATCH xb820701f/mask=xbfa0fc1f # C6.2.135 LDADD, LDADDA, LDADDAL, LDADDL page C6-1493 line 88796 MATCH xb8200000/mask=xbf20fc00 # C6.2.154 LDCLR, LDCLRA, LDCLRAL, LDCLRL page C6-1528 line 90747 MATCH xb8201000/mask=xbf20fc00 # C6.2.157 LDEOR, LDEORA, LDEORAL, LDEORL page C6-1535 line 91168 MATCH xb8202000/mask=xbf20fc00 # C6.2.183 LDSET, LDSETA, LDSETAL, LDSETL page C6-1594 line 94655 MATCH xb8203000/mask=xbf20fc00 # C6.2.186 LDSMAX, LDSMAXA, LDSMAXAL, LDSMAXL page C6-1601 line 95076 MATCH xb8204000/mask=xbf20fc00 # C6.2.189 LDSMIN, LDSMINA, LDSMINAL, LDSMINL page C6-1608 line 95497 MATCH xb8205000/mask=xbf20fc00 # C6.2.198 LDUMAX, LDUMAXA, LDUMAXAL, LDUMAXL page C6-1627 line 96614 MATCH xb8206000/mask=xbf20fc00 # C6.2.201 LDUMIN, LDUMINA, LDUMINAL, LDUMINL page C6-1634 line 97035 MATCH xb8207000/mask=xbf20fc00 # CONSTRUCT xb820001f/mask=xffa08c1f MATCHED 16 DOCUMENTED OPCODES # AUNIT --inst xb820001f/mask=xffa08c1f --status nomem # size=0b10 (3031) :st^ls_opc4^ls_lor aa_Ws, [Rn_GPR64xsp] is b_3031=0b10 & b_2429=0b111000 & b_23=0 & b_21=1 & b_1515=0 & b_1011=0b00 & b_0004=0b11111 & ls_opc4 & ls_lor & aa_Ws & Rn_GPR64xsp { build ls_opc4; build ls_lor; } # C6.2.297 STADD, STADDL page C6-1822 line 107555 MATCH xb820001f/mask=xbfa0fc1f # C6.2.300 STCLR, STCLRL page C6-1828 line 107842 MATCH xb820101f/mask=xbfa0fc1f # C6.2.303 STEOR, STEORL page C6-1834 line 108128 MATCH xb820201f/mask=xbfa0fc1f # C6.2.330 STSET, STSETL page C6-1890 line 111216 MATCH xb820301f/mask=xbfa0fc1f # C6.2.333 STSMAX, STSMAXL page C6-1896 line 111508 MATCH xb820401f/mask=xbfa0fc1f # C6.2.336 STSMIN, STSMINL page C6-1902 line 111801 MATCH xb820501f/mask=xbfa0fc1f # C6.2.342 STUMAX, STUMAXL page C6-1914 line 112409 MATCH xb820601f/mask=xbfa0fc1f # C6.2.345 STUMIN, STUMINL page C6-1920 line 112703 MATCH xb820701f/mask=xbfa0fc1f # C6.2.135 LDADD, LDADDA, LDADDAL, LDADDL page C6-1493 line 88796 MATCH xb8200000/mask=xbf20fc00 # C6.2.154 LDCLR, LDCLRA, LDCLRAL, LDCLRL page C6-1528 line 90747 MATCH xb8201000/mask=xbf20fc00 # C6.2.157 LDEOR, LDEORA, LDEORAL, LDEORL page C6-1535 line 91168 MATCH xb8202000/mask=xbf20fc00 # C6.2.183 LDSET, LDSETA, LDSETAL, LDSETL page C6-1594 line 94655 MATCH xb8203000/mask=xbf20fc00 # C6.2.186 LDSMAX, LDSMAXA, LDSMAXAL, LDSMAXL page C6-1601 line 95076 MATCH xb8204000/mask=xbf20fc00 # C6.2.189 LDSMIN, LDSMINA, LDSMINAL, LDSMINL page C6-1608 line 95497 MATCH xb8205000/mask=xbf20fc00 # C6.2.198 LDUMAX, LDUMAXA, LDUMAXAL, LDUMAXL page C6-1627 line 96614 MATCH xb8206000/mask=xbf20fc00 # C6.2.201 LDUMIN, LDUMINA, LDUMINAL, LDUMINL page C6-1634 line 97035 MATCH xb8207000/mask=xbf20fc00 # CONSTRUCT xf820001f/mask=xffa08c1f MATCHED 16 DOCUMENTED OPCODES # AUNIT --inst xf820001f/mask=xffa08c1f --status nomem # size=0b11 (3031) :st^ls_opc8^ls_lor aa_Xs, [Rn_GPR64xsp] is b_3031=0b11 & b_2429=0b111000 & b_23=0 & b_21=1 & b_1515=0 & b_1011=0b00 & b_0004=0b11111 & ls_opc8 & ls_lor & aa_Xs & Rn_GPR64xsp { build ls_opc8; build ls_lor; } # C6.2.307 STLLRB page C6-1842 line 108609 MATCH x08800000/mask=xffe08000 # CONSTRUCT x08800000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x08800000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 # size=0b00 (3031) :stllrb aa_Wt, [Rn_GPR64xsp] is b_3031=0b00 & b_2329=0b0010001 & b_22=0 & b_21=0 & b_15=0 & aa_Wt & Rn_GPR64xsp { *:1 Rn_GPR64xsp = aa_Wt:1; LORelease(); } # C6.2.308 STLLRH page C6-1843 line 108673 MATCH x48800000/mask=xffe08000 # CONSTRUCT x48800000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x48800000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 # size=0b01 (3031) :stllrh aa_Wt, [Rn_GPR64xsp] is b_3031=0b01 & b_2329=0b0010001 & b_22=0 & b_21=0 & b_15=0 & aa_Wt & Rn_GPR64xsp { *:2 Rn_GPR64xsp = aa_Wt:2; LORelease(); } # C6.2.309 STLLR page C6-1844 line 108737 MATCH x88800000/mask=xbfe08000 # CONSTRUCT x88800000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88800000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 # size=0b10 (3031) :stllr aa_Wt, [Rn_GPR64xsp] is b_3031=0b10 & b_2329=0b0010001 & b_22=0 & b_21=0 & b_15=0 & aa_Wt & Rn_GPR64xsp { *:4 Rn_GPR64xsp = aa_Wt; LORelease(); } # C6.2.309 STLLR page C6-1844 line 108737 MATCH x88800000/mask=xbfe08000 # CONSTRUCT xc8800000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8800000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 # size=0b11 (3031) :stllr aa_Xt, [Rn_GPR64xsp] is b_3031=0b11 & b_2329=0b0010001 & b_22=0 & b_21=0 & b_15=0 & aa_Xt & Rn_GPR64xsp { *:8 Rn_GPR64xsp = aa_Xt; LORelease(); } # C6.2.310 STLR page C6-1846 line 108821 MATCH x88808000/mask=xbfe08000 # CONSTRUCT xc8808000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8808000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :stlr Rt_GPR64, addrReg is size.ldstr=3 & b_2429=0x8 & b_23=1 & L=0 & b_21=0 & b_15=1 & addrReg & Rt_GPR64 { *addrReg = Rt_GPR64; } # C6.2.310 STLR page C6-1846 line 108821 MATCH x88808000/mask=xbfe08000 # CONSTRUCT x88808000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88808000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :stlr Rt_GPR32, addrReg is size.ldstr=2 & b_2429=0x8 & b_23=1 & L=0 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 { *addrReg = Rt_GPR32; } # C6.2.311 STLRB page C6-1848 line 108904 MATCH x08808000/mask=xffe08000 # CONSTRUCT x08808000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x08808000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :stlrb Rt_GPR32, addrReg is size.ldstr=0 & b_2429=0x8 & b_23=1 & L=0 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 { *addrReg = Rt_GPR32; } # C6.2.312 STLRH page C6-1849 line 108967 MATCH x48808000/mask=xffe08000 # CONSTRUCT x48808000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x48808000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1620=0b11111 & b_1014=0b11111 :stlrh Rt_GPR32, addrReg is size.ldstr=1 & b_2429=0x8 & b_23=1 & L=0 & b_21=0 & b_15=1 & addrReg & Rt_GPR32 { *addrReg = Rt_GPR32; } # C6.2.313 STLUR page C6-1850 line 109030 MATCH x99000000/mask=xbfe00c00 # CONSTRUCT x99000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES :stlur aa_Wt, addr_SIMM9 is b_3031=0b10 & b_2129=0b011001000 & b_1011=0b00 & addr_SIMM9 & aa_Wt { *addr_SIMM9 = aa_Wt; } # C6.2.313 STLUR page C6-1850 line 109030 MATCH x99000000/mask=xbfe00c00 # CONSTRUCT xd9000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES :stlur aa_Xt, addr_SIMM9 is b_3031=0b11 & b_2129=0b011001000 & b_1011=0b00 & addr_SIMM9 & aa_Xt { *addr_SIMM9 = aa_Xt; } # C6.2.314 STLURB page C6-1852 line 109129 MATCH x19000000/mask=xffe00c00 # CONSTRUCT x19000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # x19000000/mask=xffe00c00 NOT MATCHED BY ANY CONSTRUCTOR :stlurb aa_Wt, addr_SIMM9 is b_2131=0b00011001000 & b_1011=0b00 & addr_SIMM9 & aa_Wt { *addr_SIMM9 = aa_Wt:1; } # C6.2.315 STLURH page C6-1854 line 109217 MATCH x59000000/mask=xffe00c00 # CONSTRUCT x59000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # x59000000/mask=xffe00c00 NOT MATCHED BY ANY CONSTRUCTOR :stlurh aa_Wt, addr_SIMM9 is b_2131=0b01011001000 & b_1011=0b00 & addr_SIMM9 & aa_Wt { *addr_SIMM9 = aa_Wt:2; } # C6.2.316 STLXP page C6-1856 line 109305 MATCH x88208000/mask=xbfe08000 # CONSTRUCT xc8208000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8208000/mask=xffe08000 --status nomem :stlxp Rs_GPR32, Rt_GPR64, Rt2_GPR64, addrReg is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=0 & b_21=1 & Rs_GPR32 & b_15=1 & Rt2_GPR64 & addrReg & Rt_GPR64 & Rs_GPR64 { status:1 = 1; rsize:1 = 16; check:1 = ExclusiveMonitorPass(addrReg, rsize); if (!check) goto ; *addrReg = Rt_GPR64; *(addrReg + 4) = Rt2_GPR64; status = ExclusiveMonitorsStatus(); Rs_GPR64 = zext(status); } # C6.2.316 STLXP page C6-1856 line 109305 MATCH x88208000/mask=xbfe08000 # CONSTRUCT x88208000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88208000/mask=xffe08000 --status nomem :stlxp Rs_GPR32, Rt_GPR32, Rt2_GPR32, addrReg is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=0 & b_21=1 & Rs_GPR32 & b_15=1 & Rt2_GPR32 & addrReg & Rt_GPR32 & Rs_GPR64 { status:1 = 1; rsize:1 = 16; check:1 = ExclusiveMonitorPass(addrReg, rsize); if (!check) goto ; *addrReg = Rt_GPR32; *(addrReg + 4) = Rt2_GPR32; status = ExclusiveMonitorsStatus(); Rs_GPR64 = zext(status); } # C6.2.317 STLXR page C6-1859 line 109472 MATCH x88008000/mask=xbfe08000 # CONSTRUCT xc8008000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8008000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1014=0b11111 :stlxr Rs_GPR32, Rt_GPR64, addrReg is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=1 & addrReg & Rt_GPR64 & Rs_GPR64 { status:1 = 1; rsize:1 = 16; check:1 = ExclusiveMonitorPass(addrReg, rsize); if (!check) goto ; *addrReg = Rt_GPR64; status = ExclusiveMonitorsStatus(); Rs_GPR64 = zext(status); } # C6.2.317 STLXR page C6-1859 line 109472 MATCH x88008000/mask=xbfe08000 # CONSTRUCT x88008000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88008000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1014=0b11111 :stlxr Rs_GPR32, Rt_GPR32, addrReg is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=1 & addrReg & Rt_GPR32 & Rs_GPR64 { status:1 = 1; rsize:1 = 16; check:1 = ExclusiveMonitorPass(addrReg, rsize); if (!check) goto ; *addrReg = Rt_GPR32; status = ExclusiveMonitorsStatus(); Rs_GPR64 = zext(status); } # C6.2.318 STLXRB page C6-1862 line 109627 MATCH x08008000/mask=xffe08000 # CONSTRUCT x08008000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x08008000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1014=0b11111 :stlxrb Rs_GPR32, Rt_GPR32, addrReg is size.ldstr=0 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=1 & addrReg & Rt_GPR32 & Rs_GPR64 { status:1 = 1; rsize:1 = 16; check:1 = ExclusiveMonitorPass(addrReg, rsize); if (!check) goto ; local tmp:4 = Rt_GPR32; *addrReg = tmp:1; status = ExclusiveMonitorsStatus(); Rs_GPR64 = zext(status); } # C6.2.319 STLXRH page C6-1864 line 109754 MATCH x48008000/mask=xffe08000 # CONSTRUCT x48008000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x48008000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1014=0b11111 :stlxrh Rs_GPR32, Rt_GPR32, addrReg is size.ldstr=1 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=1 & addrReg & Rt_GPR32 & Rs_GPR64 { status:1 = 1; rsize:1 = 16; check:1 = ExclusiveMonitorPass(addrReg, rsize); if (!check) goto ; local tmp:4 = Rt_GPR32; *addrReg = tmp:2; status = ExclusiveMonitorsStatus(); Rs_GPR64 = zext(status); } # C6.2.320 STNP page C6-1866 line 109888 MATCH x28000000/mask=x7fc00000 # CONSTRUCT x28000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x28000000/mask=xffc00000 --status nomem :stnp Rt_GPR32, Rt2_GPR32, addrPairIndexed is b_3031=0b00 & b_2229=0b10100000 & Rt2_GPR32 & addrPairIndexed & Rt_GPR32 { data1:4 = Rt_GPR32; data2:4 = Rt2_GPR32; build addrPairIndexed; *addrPairIndexed = data1; *(addrPairIndexed + 4) = data2; } # C6.2.320 STNP page C6-1866 line 109888 MATCH x28000000/mask=x7fc00000 # CONSTRUCT xa8000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xa8000000/mask=xffc00000 --status nomem :stnp Rt_GPR64, Rt2_GPR64, addrPairIndexed is b_3031=0b10 & b_2229=0b10100000 & Rt2_GPR64 & addrPairIndexed & Rt_GPR64 { data1:8 = Rt_GPR64; data2:8 = Rt2_GPR64; build addrPairIndexed; *addrPairIndexed = data1; *(addrPairIndexed + 8) = data2; } # C6.2.321 STP page C6-1868 line 109996 MATCH x28800000/mask=x7fc00000 # C6.2.321 STP page C6-1868 line 109996 MATCH x29800000/mask=x7fc00000 # C6.2.321 STP page C6-1868 line 109996 MATCH x29000000/mask=x7fc00000 # C6.2.320 STNP page C6-1866 line 109888 MATCH x28000000/mask=x7fc00000 # CONSTRUCT x28000000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x28000000/mask=xfe400000 --status nomem :stp Rt_GPR32, Rt2_GPR32, addrPairIndexed is b_3031=0b00 & b_2529=0b10100 & b_22=0b0 & Rt2_GPR32 & addrPairIndexed & Rt_GPR32 { data1:4 = Rt_GPR32; data2:4 = Rt2_GPR32; build addrPairIndexed; *addrPairIndexed = data1; *(addrPairIndexed + 4) = data2; } # C6.2.321 STP page C6-1868 line 109996 MATCH x28800000/mask=x7fc00000 # C6.2.321 STP page C6-1868 line 109996 MATCH x29800000/mask=x7fc00000 # C6.2.321 STP page C6-1868 line 109996 MATCH x29000000/mask=x7fc00000 # C6.2.320 STNP page C6-1866 line 109888 MATCH x28000000/mask=x7fc00000 # CONSTRUCT xa8000000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xa8000000/mask=xfe400000 --status nomem :stp Rt_GPR64, Rt2_GPR64, addrPairIndexed is b_3031=0b10 & b_2529=0b10100 & b_22=0b0 & Rt2_GPR64 & addrPairIndexed & Rt_GPR64 { data1:8 = Rt_GPR64; data2:8 = Rt2_GPR64; build addrPairIndexed; *addrPairIndexed = data1; *(addrPairIndexed + 8) = data2; } # C6.2.322 STR (immediate) page C6-1871 line 110209 MATCH xb9000000/mask=xbfc00000 # CONSTRUCT xb9000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xb9000000/mask=xffc00000 --status nomem :str Rt_GPR32, addrUIMM is size.ldstr=2 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=0 & addrUIMM & Rn_GPR64xsp & Rt_GPR32 { *addrUIMM = Rt_GPR32; } # C6.2.322 STR (immediate) page C6-1871 line 110209 MATCH xb8000400/mask=xbfe00c00 # C6.2.322 STR (immediate) page C6-1871 line 110209 MATCH xb8000c00/mask=xbfe00c00 # C6.2.337 STTR page C6-1904 line 111916 MATCH xb8000800/mask=xbfe00c00 # C6.2.346 STUR page C6-1922 line 112818 MATCH xb8000000/mask=xbfe00c00 # CONSTRUCT xb8000000/mask=xffe00000 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst xb8000000/mask=xffe00000 --status nomem :st^UnscPriv^"r" Rt_GPR32, addrIndexed is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 { data1:4 = Rt_GPR32; build addrIndexed; *addrIndexed = data1; } # C6.2.322 STR (immediate) page C6-1871 line 110209 MATCH xb8000400/mask=xbfe00c00 # C6.2.322 STR (immediate) page C6-1871 line 110209 MATCH xb8000c00/mask=xbfe00c00 # CONSTRUCT xb8000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xb8000400/mask=xffe00400 --status nomem :str Rt_GPR32, addrIndexed is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 { data1:4 = Rt_GPR32; build addrIndexed; *addrIndexed = data1; } # C6.2.322 STR (immediate) page C6-1871 line 110209 MATCH xb9000000/mask=xbfc00000 # CONSTRUCT xf9000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf9000000/mask=xffc00000 --status nomem :str Rt_GPR64, addrUIMM is size.ldstr=3 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=0 & addrUIMM & Rn_GPR64xsp & Rt_GPR64 { *addrUIMM = Rt_GPR64; } # C6.2.322 STR (immediate) page C6-1871 line 110209 MATCH xb8000400/mask=xbfe00c00 # C6.2.322 STR (immediate) page C6-1871 line 110209 MATCH xb8000c00/mask=xbfe00c00 # CONSTRUCT xf8000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xf8000400/mask=xffe00400 --status nomem :str Rt_GPR64, addrIndexed is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR64 { data1:8 = Rt_GPR64; build addrIndexed; *addrIndexed = data1; } # C6.2.323 STR (register) page C6-1874 line 110394 MATCH xb8200800/mask=xbfe00c00 # CONSTRUCT xb8200800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xb8200800/mask=xffe00c00 --status nomem :str Rt_GPR32, addrIndexed is size.ldstr=2 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 { data1:4 = Rt_GPR32; build addrIndexed; *addrIndexed = data1; } # C6.2.323 STR (register) page C6-1874 line 110394 MATCH xb8200800/mask=xbfe00c00 # CONSTRUCT xf8200800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8200800/mask=xffe00c00 --status nomem :str Rt_GPR64, addrIndexed is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR64 { data1:8 = Rt_GPR64; build addrIndexed; *addrIndexed = data1; } # C6.2.324 STRB (immediate) page C6-1876 line 110516 MATCH x39000000/mask=xffc00000 # CONSTRUCT x39000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x39000000/mask=xffc00000 --status nomem :strb Rt_GPR32, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=0 & addrIndexed & Rt_GPR32 { tmp:4 = Rt_GPR32; build addrIndexed; *addrIndexed = tmp:1; } # C6.2.324 STRB (immediate) page C6-1876 line 110516 MATCH x38000400/mask=xffe00c00 # C6.2.324 STRB (immediate) page C6-1876 line 110516 MATCH x38000c00/mask=xffe00c00 # CONSTRUCT x38000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x38000400/mask=xffe00400 --status nomem :strb Rt_GPR32, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 { tmp:4 = Rt_GPR32; build addrIndexed; *addrIndexed = tmp:1; } # C6.2.325 STRB (register) page C6-1879 line 110678 MATCH x38200800/mask=xffe00c00 # CONSTRUCT x38200800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x38200800/mask=xffe00c00 --status nomem :strb Rt_GPR32, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 { tmp:4 = Rt_GPR32; build addrIndexed; *addrIndexed = tmp:1; } # C6.2.326 STRH (immediate) page C6-1881 line 110781 MATCH x79000000/mask=xffc00000 # CONSTRUCT x79000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x79000000/mask=xffc00000 --status nomem :strh Rt_GPR32, addrUIMM is size.ldstr=1 & b_2729=7 & v=0 & b_2425=1 & b_23=0 & b_2222=0 & addrUIMM & Rn_GPR64xsp & Rt_GPR32 { tmp:4 = Rt_GPR32; *addrUIMM = tmp:2; } # C6.2.326 STRH (immediate) page C6-1881 line 110781 MATCH x78000400/mask=xffe00c00 # C6.2.326 STRH (immediate) page C6-1881 line 110781 MATCH x78000c00/mask=xffe00c00 # CONSTRUCT x78000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x78000400/mask=xffe00400 --status nomem :strh Rt_GPR32, addrIndexed is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1010=1 & addrIndexed & Rt_GPR32 { tmp:4 = Rt_GPR32; build addrIndexed; *addrIndexed = tmp:2; } # C6.2.327 STRH (register) page C6-1884 line 110943 MATCH x78200800/mask=xffe00c00 # CONSTRUCT x78200800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x78200800/mask=xffe00c00 --status nomem :strh Rt_GPR32, addrIndexed is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=1 & b_1011=2 & addrIndexed & Rt_GPR32 { tmp:4 = Rt_GPR32; build addrIndexed; *addrIndexed = tmp:2; } # C6.2.337 STTR page C6-1904 line 111916 MATCH xb8000800/mask=xbfe00c00 # CONSTRUCT xf8000800/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8000800/mask=xffe00c00 --status nomem :st^UnscPriv^"r" Rt_GPR64, addrIndexed is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_2223=0 & b_2121=0 & b_1011=2 & UnscPriv & addrIndexed & Rt_GPR64 { data1:8 = Rt_GPR64; build addrIndexed; *addrIndexed = data1; } # C6.2.338 STTRB page C6-1906 line 112029 MATCH x38000800/mask=xffe00c00 # C6.2.347 STURB page C6-1924 line 112913 MATCH x38000000/mask=xffe00c00 # CONSTRUCT x38000000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x38000000/mask=xffe00000 --status nomem :st^UnscPriv^"rb" Rt_GPR32, addrIndexed is size.ldstr=0 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 { local tmp:4 = Rt_GPR32; build addrIndexed; *addrIndexed = tmp:1; } # C6.2.339 STTRH page C6-1908 line 112130 MATCH x78000800/mask=xffe00c00 # C6.2.348 STURH page C6-1925 line 112984 MATCH x78000000/mask=xffe00c00 # CONSTRUCT x78000000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x78000000/mask=xffe00000 --status nomem :st^UnscPriv^"rh" Rt_GPR32, addrIndexed is size.ldstr=1 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & UnscPriv & addrIndexed & Rt_GPR32 { local tmp:4 = Rt_GPR32; build addrIndexed; *addrIndexed = tmp:2; } # C6.2.346 STUR page C6-1922 line 112818 MATCH xb8000000/mask=xbfe00c00 # CONSTRUCT xf8000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8000000/mask=xffe00c00 --status nomem :st^UnscPriv^"r" Rt_GPR64, addrIndexed is size.ldstr=3 & b_2729=7 & v=0 & b_2425=0 & b_23=0 & b_2122=0 & b_1011=0 & UnscPriv & addrIndexed & Rt_GPR64 { data1:8 = Rt_GPR64; build addrIndexed; *addrIndexed = data1; } # C6.2.349 STXP page C6-1926 line 113055 MATCH x88200000/mask=xbfe08000 # CONSTRUCT xc8200000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8200000/mask=xffe08000 --status nomem :stxp Rs_GPR32, Rt_GPR64, Rt2_GPR64, addrReg is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=0 & b_21=1 & Rs_GPR32 & b_15=0 & Rt2_GPR64 & addrReg & Rt_GPR64 & Rs_GPR64 { status:1 = 1; rsize:1 = 16; check:1 = ExclusiveMonitorPass(addrReg, rsize); if (!check) goto ; *addrReg = Rt_GPR64; *(addrReg + 8) = Rt2_GPR64; status = ExclusiveMonitorsStatus(); Rs_GPR64 = zext(status); } # C6.2.349 STXP page C6-1926 line 113055 MATCH x88200000/mask=xbfe08000 # CONSTRUCT x88200000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88200000/mask=xffe08000 --status nomem :stxp Rs_GPR32, Rt_GPR32, Rt2_GPR32, addrReg is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=0 & b_21=1 & Rs_GPR32 & b_15=0 & Rt2_GPR32 & addrReg & Rt_GPR32 & Rs_GPR64 { status:1 = 1; rsize:1 = 16; check:1 = ExclusiveMonitorPass(addrReg, rsize); if (!check) goto ; *addrReg = Rt_GPR32; *(addrReg + 4) = Rt2_GPR32; status = ExclusiveMonitorsStatus(); Rs_GPR64 = zext(status); } # C6.2.350 STXR page C6-1929 line 113222 MATCH x88000000/mask=xbfe08000 # CONSTRUCT xc8000000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xc8000000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1014=0b11111 :stxr Rs_GPR32, Rt_GPR64, addrReg is size.ldstr=3 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=0 & addrReg & Rt_GPR64 & Rs_GPR64 { status:1 = 1; rsize:1 = 16; check:1 = ExclusiveMonitorPass(addrReg, rsize); if (!check) goto ; *addrReg = Rt_GPR64; status = ExclusiveMonitorsStatus(); Rs_GPR64 = zext(status); } # C6.2.350 STXR page C6-1929 line 113222 MATCH x88000000/mask=xbfe08000 # CONSTRUCT x88000000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x88000000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1014=0b11111 :stxr Rs_GPR32, Rt_GPR32, addrReg is size.ldstr=2 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=0 & addrReg & Rt_GPR32 & Rs_GPR64 { status:1 = 1; rsize:1 = 16; check:1 = ExclusiveMonitorPass(addrReg, rsize); if (!check) goto ; *addrReg = Rt_GPR32; status = ExclusiveMonitorsStatus(); Rs_GPR64 = zext(status); } # C6.2.351 STXRB page C6-1931 line 113368 MATCH x08000000/mask=xffe08000 # CONSTRUCT x08000000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x08000000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1014=0b11111 :stxrb Rs_GPR32, Rt_GPR32, addrReg is size.ldstr=0 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=0 & addrReg & Rt_GPR32 & Rs_GPR64 { status:1 = 1; rsize:1 = 16; check:1 = ExclusiveMonitorPass(addrReg, rsize); if (!check) goto ; local tmp:4 = Rt_GPR32; *addrReg = tmp:1; status = ExclusiveMonitorsStatus(); Rs_GPR64 = zext(status); } # C6.2.352 STXRH page C6-1933 line 113496 MATCH x48000000/mask=xffe08000 # CONSTRUCT x48000000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x48000000/mask=xffe08000 --status nomem # To enforce SHOULD BE ONE fields add: b_1014=0b11111 :stxrh Rs_GPR32, Rt_GPR32, addrReg is size.ldstr=1 & b_2429=0x8 & b_23=0 & L=0 & b_21=0 & Rs_GPR32 & b_15=0 & addrReg & Rt_GPR32 & Rs_GPR64 { status:1 = 1; rsize:1 = 16; check:1 = ExclusiveMonitorPass(addrReg, rsize); if (!check) goto ; local tmp:4 = Rt_GPR32; *addrReg = tmp:2; status = ExclusiveMonitorsStatus(); Rs_GPR64 = zext(status); } # C6.2.356 SUB (extended register) page C6-1940 line 113972 MATCH x4b200000/mask=x7fe00000 # CONSTRUCT x4b200000/mask=xffe00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x4b200000/mask=xffe00000 --status pass :sub Rd_GPR32wsp, Rn_GPR32wsp, ExtendRegShift32 is sf=0 & op=1 & S=0 & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift32 & Rn_GPR32wsp & Rd_GPR32wsp & Rd_GPR64xsp { tmp_2:4 = ExtendRegShift32; tmp_1:4 = Rn_GPR32wsp - tmp_2; Rd_GPR64xsp = zext(tmp_1); } # C6.2.356 SUB (extended register) page C6-1940 line 113972 MATCH x4b200000/mask=x7fe00000 # CONSTRUCT xcb200000/mask=xffe00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xcb200000/mask=xffe00000 --status pass :sub Rd_GPR64xsp, Rn_GPR64xsp, ExtendRegShift64 is sf=1 & op=1 & S=0 & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift64 & Rn_GPR64xsp & Rd_GPR64xsp { tmp_2:8 = ExtendRegShift64; tmp_1:8 = Rn_GPR64xsp - tmp_2; Rd_GPR64xsp = tmp_1; } # C6.2.357 SUB (immediate) page C6-1943 line 114120 MATCH x51000000/mask=x7f800000 # C6.2.363 SUBS (immediate) page C6-1953 line 114699 MATCH x71000000/mask=x7f800000 # C6.2.63 CMP (immediate) page C6-1254 line 73533 MATCH x7100001f/mask=x7f80001f # CONSTRUCT x51000000/mask=xdf000000 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x51000000/mask=xdf000000 --status pass --comment "flags" :sub^SBIT_CZNO Rd_GPR32xsp, Rn_GPR32xsp, ImmShift32 is sf=0 & b_30=1 & S & SBIT_CZNO & b_2428=0x11 & ImmShift32 & Rn_GPR32xsp & Rd_GPR32xsp & Rd_GPR64xsp { subflags(Rn_GPR32xsp, ImmShift32); tmp:4 = Rn_GPR32xsp - ImmShift32; resultflags(tmp); build SBIT_CZNO; Rd_GPR64xsp = zext(tmp); } # C6.2.357 SUB (immediate) page C6-1943 line 114120 MATCH x51000000/mask=x7f800000 # C6.2.363 SUBS (immediate) page C6-1953 line 114699 MATCH x71000000/mask=x7f800000 # C6.2.63 CMP (immediate) page C6-1254 line 73533 MATCH x7100001f/mask=x7f80001f # CONSTRUCT xd1000000/mask=xdf000000 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xd1000000/mask=xdf000000 --status pass --comment "flags" :sub^SBIT_CZNO Rd_GPR64xsp, Rn_GPR64xsp, ImmShift64 is sf=1 & b_30=1 & S & SBIT_CZNO & b_2428=0x11 & ImmShift64 & Rn_GPR64xsp & Rd_GPR64xsp { subflags(Rn_GPR64xsp, ImmShift64); Rd_GPR64xsp = Rn_GPR64xsp - ImmShift64; resultflags(Rd_GPR64xsp); build SBIT_CZNO; } # C6.2.357 SUB (immediate) page C6-1943 line 114120 MATCH x51000000/mask=x7f800000 # CONSTRUCT x51000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x51000000/mask=xffc00000 --status pass :sub Rd_GPR32wsp, Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl0 is sf=0 & op=1 & S=0 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i32_negimm_lsl0 & Rn_GPR32wsp & Rd_GPR32wsp & Rd_GPR64xsp { tmp_2:4 = Imm12_addsubimm_operand_i32_negimm_lsl0; tmp_1:4 = Rn_GPR32wsp - tmp_2; Rd_GPR64xsp = zext(tmp_1); } # C6.2.357 SUB (immediate) page C6-1943 line 114120 MATCH x51000000/mask=x7f800000 # CONSTRUCT x51400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x51400000/mask=xffc00000 --status pass :sub Rd_GPR32wsp, Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl12 is sf=0 & op=1 & S=0 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i32_negimm_lsl12 & Rn_GPR32wsp & Rd_GPR32wsp & Rd_GPR64xsp { tmp_2:4 = Imm12_addsubimm_operand_i32_negimm_lsl12; tmp_1:4 = Rn_GPR32wsp - tmp_2; Rd_GPR64xsp = zext(tmp_1); } # C6.2.357 SUB (immediate) page C6-1943 line 114120 MATCH x51000000/mask=x7f800000 # CONSTRUCT xd1000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd1000000/mask=xffc00000 --status pass :sub Rd_GPR64xsp, Rn_GPR64xsp, Imm12_addsubimm_operand_i64_negimm_lsl0 is sf=1 & op=1 & S=0 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i64_negimm_lsl0 & Rn_GPR64xsp & Rd_GPR64xsp { tmp_2:8 = Imm12_addsubimm_operand_i64_negimm_lsl0; tmp_1:8 = Rn_GPR64xsp - tmp_2; Rd_GPR64xsp = tmp_1; } # C6.2.357 SUB (immediate) page C6-1943 line 114120 MATCH x51000000/mask=x7f800000 # CONSTRUCT xd1400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd1400000/mask=xffc00000 --status pass :sub Rd_GPR64xsp, Rn_GPR64xsp, Imm12_addsubimm_operand_i64_negimm_lsl12 is sf=1 & op=1 & S=0 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i64_negimm_lsl12 & Rn_GPR64xsp & Rd_GPR64xsp { tmp_2:8 = Imm12_addsubimm_operand_i64_negimm_lsl12; tmp_1:8 = Rn_GPR64xsp - tmp_2; Rd_GPR64xsp = tmp_1; } # C6.2.358 SUB (shifted register) page C6-1945 line 114221 MATCH x4b000000/mask=x7f200000 # C6.2.234 NEG (shifted register) page C6-1694 line 100243 MATCH x4b0003e0/mask=x7f2003e0 # CONSTRUCT x4b000000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x4b000000/mask=xff200000 --status pass :sub Rd_GPR32, Rn_GPR32, RegShift32 is sf=0 & op=1 & s=0 & b_2428=0xb & b_2121=0 & RegShift32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = RegShift32; tmp_1:4 = Rn_GPR32 - tmp_2; Rd_GPR64 = zext(tmp_1); } # C6.2.358 SUB (shifted register) page C6-1945 line 114221 MATCH x4b000000/mask=x7f200000 # C6.2.234 NEG (shifted register) page C6-1694 line 100243 MATCH x4b0003e0/mask=x7f2003e0 # CONSTRUCT xcb000000/mask=xff200000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xcb000000/mask=xff200000 --status pass :sub Rd_GPR64, Rn_GPR64, RegShift64 is sf=1 & op=1 & s=0 & b_2428=0xb & b_2121=0 & RegShift64 & Rn_GPR64 & Rd_GPR64 { tmp_2:8 = RegShift64; tmp_1:8 = Rn_GPR64 - tmp_2; Rd_GPR64 = tmp_1; } # C6.2.362 SUBS (extended register) page C6-1950 line 114543 MATCH x6b200000/mask=x7fe00000 # C6.2.62 CMP (extended register) page C6-1252 line 73406 MATCH x6b20001f/mask=x7fe0001f # CONSTRUCT x6b200000/mask=xffe00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x6b200000/mask=xffe00000 --status pass --comment "flags" :subs Rd_GPR32, Rn_GPR32wsp, ExtendRegShift32 is sf=0 & op=1 & S=1 & b_2428=0xb & opt=0 & b_2121=1 & ExtendRegShift32 & Rn_GPR32wsp & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = ExtendRegShift32; subflags(Rn_GPR32wsp, tmp_2); tmp_1:4 = Rn_GPR32wsp - tmp_2; resultflags(tmp_1); Rd_GPR64 = zext(tmp_1); affectflags(); } # C6.2.362 SUBS (extended register) page C6-1950 line 114543 MATCH x6b200000/mask=x7fe00000 # C6.2.364 SUBS (shifted register) page C6-1955 line 114807 MATCH x6b000000/mask=x7f200000 # C6.2.62 CMP (extended register) page C6-1252 line 73406 MATCH x6b20001f/mask=x7fe0001f # C6.2.64 CMP (shifted register) page C6-1256 line 73623 MATCH x6b00001f/mask=x7f20001f # C6.2.235 NEGS page C6-1696 line 100340 MATCH x6b0003e0/mask=x7f2003e0 # CONSTRUCT xeb000000/mask=xffc00000 MATCHED 5 DOCUMENTED OPCODES # AUNIT --inst xeb000000/mask=xffc00000 --status pass --comment "flags" :subs Rd_GPR64, Rn_GPR64xsp, ExtendRegShift64 is sf=1 & op=1 & S=1 & b_2428=0xb & opt=0 & ExtendRegShift64 & Rn_GPR64xsp & Rd_GPR64 { tmp_2:8 = ExtendRegShift64; subflags(Rn_GPR64xsp, tmp_2); tmp_1:8 = Rn_GPR64xsp - tmp_2; resultflags(tmp_1); Rd_GPR64 = tmp_1; affectflags(); } # C6.2.363 SUBS (immediate) page C6-1953 line 114699 MATCH x71000000/mask=x7f800000 # C6.2.63 CMP (immediate) page C6-1254 line 73533 MATCH x7100001f/mask=x7f80001f # CONSTRUCT x71000000/mask=xffc00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x71000000/mask=xffc00000 --status pass --comment "flags" :subs Rd_GPR32, Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl0 is sf=0 & op=1 & S=1 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i32_negimm_lsl0 & Rn_GPR32wsp & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = Imm12_addsubimm_operand_i32_negimm_lsl0; subflags(Rn_GPR32wsp, tmp_2); tmp_1:4 = Rn_GPR32wsp - tmp_2; resultflags(tmp_1); Rd_GPR64 = zext(tmp_1); affectflags(); } # C6.2.363 SUBS (immediate) page C6-1953 line 114699 MATCH x71000000/mask=x7f800000 # C6.2.63 CMP (immediate) page C6-1254 line 73533 MATCH x7100001f/mask=x7f80001f # CONSTRUCT x71400000/mask=xffc00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x71400000/mask=xffc00000 --status pass --comment "flags" :subs Rd_GPR32, Rn_GPR32wsp, Imm12_addsubimm_operand_i32_negimm_lsl12 is sf=0 & op=1 & S=1 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i32_negimm_lsl12 & Rn_GPR32wsp & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = Imm12_addsubimm_operand_i32_negimm_lsl12; subflags(Rn_GPR32wsp, tmp_2); tmp_1:4 = Rn_GPR32wsp - tmp_2; resultflags(tmp_1); Rd_GPR64 = zext(tmp_1); affectflags(); } # C6.2.363 SUBS (immediate) page C6-1953 line 114699 MATCH x71000000/mask=x7f800000 # C6.2.63 CMP (immediate) page C6-1254 line 73533 MATCH x7100001f/mask=x7f80001f # CONSTRUCT xf1000000/mask=xffc00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xf1000000/mask=xffc00000 --status pass --comment "flags" :subs Rd_GPR64, Rn_GPR64xsp, Imm12_addsubimm_operand_i64_negimm_lsl0 is sf=1 & op=1 & S=1 & b_2428=0x11 & shift=0 & Imm12_addsubimm_operand_i64_negimm_lsl0 & Rn_GPR64xsp & Rd_GPR64 { tmp_2:8 = Imm12_addsubimm_operand_i64_negimm_lsl0; subflags(Rn_GPR64xsp, tmp_2); tmp_1:8 = Rn_GPR64xsp - tmp_2; resultflags(tmp_1); Rd_GPR64 = tmp_1; affectflags(); } # C6.2.363 SUBS (immediate) page C6-1953 line 114699 MATCH x71000000/mask=x7f800000 # C6.2.63 CMP (immediate) page C6-1254 line 73533 MATCH x7100001f/mask=x7f80001f # CONSTRUCT xf1400000/mask=xffc00000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xf1400000/mask=xffc00000 --status pass --comment "flags" :subs Rd_GPR64, Rn_GPR64xsp, Imm12_addsubimm_operand_i64_negimm_lsl12 is sf=1 & op=1 & S=1 & b_2428=0x11 & shift=1 & Imm12_addsubimm_operand_i64_negimm_lsl12 & Rn_GPR64xsp & Rd_GPR64 { tmp_2:8 = Imm12_addsubimm_operand_i64_negimm_lsl12; subflags(Rn_GPR64xsp, tmp_2); tmp_1:8 = Rn_GPR64xsp - tmp_2; resultflags(tmp_1); Rd_GPR64 = tmp_1; affectflags(); } # C6.2.364 SUBS (shifted register) page C6-1955 line 114807 MATCH x6b000000/mask=x7f200000 # C6.2.64 CMP (shifted register) page C6-1256 line 73623 MATCH x6b00001f/mask=x7f20001f # C6.2.235 NEGS page C6-1696 line 100340 MATCH x6b0003e0/mask=x7f2003e0 # CONSTRUCT x6b000000/mask=xff200000 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst x6b000000/mask=xff200000 --status pass :subs Rd_GPR32, Rn_GPR32, RegShift32 is sf=0 & op=1 & s=1 & b_2428=0xb & b_2121=0 & RegShift32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp_2:4 = RegShift32; subflags(Rn_GPR32, tmp_2); tmp_1:4 = Rn_GPR32 - tmp_2; resultflags(tmp_1); Rd_GPR64 = zext(tmp_1); affectflags(); } # C6.2.364 SUBS (shifted register) page C6-1955 line 114807 MATCH x6b000000/mask=x7f200000 # C6.2.64 CMP (shifted register) page C6-1256 line 73623 MATCH x6b00001f/mask=x7f20001f # C6.2.235 NEGS page C6-1696 line 100340 MATCH x6b0003e0/mask=x7f2003e0 # CONSTRUCT xeb000000/mask=xff200000 MATCHED 3 DOCUMENTED OPCODES # AUNIT --inst xeb000000/mask=xff200000 --status pass :subs Rd_GPR64, Rn_GPR64, RegShift64 is sf=1 & op=1 & s=1 & b_2428=0xb & b_2121=0 & RegShift64 & Rn_GPR64 & Rd_GPR64 & Rd { tmp_2:8 = RegShift64; subflags(Rn_GPR64, tmp_2); tmp_1:8 = Rn_GPR64 - tmp_2; resultflags(tmp_1); Rd_GPR64 = tmp_1; affectflags(); } # C6.2.365 SVC page C6-1957 line 114930 MATCH xd4000001/mask=xffe0001f # CONSTRUCT xd4000001/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd4000001/mask=xffe0001f --status nodest :svc imm16 is b_2431=0xd4 & excCode=0 & imm16 & excCode2=0 & ll=1 { CallSupervisor(imm16:2); } # C6.2.366 SWPB, SWPAB, SWPALB, SWPLB page C6-1958 line 114973 MATCH x38208000/mask=xff20fc00 # CONSTRUCT x38208000/mask=xff20fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x38208000/mask=xff20fc00 --status nomem # size=0b00 (3031) :swp^ls_lor^"b" aa_Ws, aa_Wt, [Rn_GPR64xsp] is b_3031=0b00 & b_2429=0b111000 & b_21=1 & b_1215=0b1000 & b_1011=0b00 & ls_loa & ls_lor & aa_Wt & ls_data1 & ls_mem1 & aa_Ws & Rn_GPR64xsp { build ls_loa; build ls_data1; ls_opc_swp(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem1; aa_Wt = tmp_ldWn; build ls_lor; } # C6.2.367 SWPH, SWPAH, SWPALH, SWPLH page C6-1960 line 115079 MATCH x78208000/mask=xff20fc00 # CONSTRUCT x78208000/mask=xff20fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x78208000/mask=xff20fc00 --status nomem # size=0b01 (3031) :swp^ls_lor^"h" aa_Ws, aa_Wt, [Rn_GPR64xsp] is b_3031=0b01 & b_2429=0b111000 & b_21=1 & b_1215=0b1000 & b_1011=0b00 & ls_loa & ls_lor & aa_Wt & ls_data2 & ls_mem2 & aa_Ws & Rn_GPR64xsp { build ls_loa; build ls_data2; ls_opc_swp(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem2; aa_Wt = tmp_ldWn; build ls_lor; } # C6.2.368 SWP, SWPA, SWPAL, SWPL page C6-1962 line 115186 MATCH xb8208000/mask=xbf20fc00 # CONSTRUCT xb8208000/mask=xff20fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xb8208000/mask=xff20fc00 --status nomem # size=0b10 (3031) :swp^ls_lor aa_Ws, aa_Wt, [Rn_GPR64xsp] is b_3031=0b10 & b_2429=0b111000 & b_21=1 & b_1215=0b1000 & b_1011=0b00 & ls_loa & ls_lor & aa_Wt & ls_data4 & ls_mem4 & aa_Ws & Rn_GPR64xsp { build ls_loa; build ls_data4; ls_opc_swp(tmp_ldWn, aa_Ws, tmp_stWn); build ls_mem4; aa_Wt = tmp_ldWn; build ls_lor; } # C6.2.368 SWP, SWPA, SWPAL, SWPL page C6-1962 line 115186 MATCH xb8208000/mask=xbf20fc00 # CONSTRUCT xf8208000/mask=xff20fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xf8208000/mask=xff20fc00 --status nomem # size=0b11 (3031) :swp^ls_lor aa_Xs, aa_Xt, [Rn_GPR64xsp] is b_3031=0b11 & b_2429=0b111000 & b_21=1 & b_1215=0b1000 & b_1011=0b00 & ls_loa & ls_lor & aa_Xt & ls_data8 & ls_mem8 & aa_Xs & Rn_GPR64xsp { build ls_loa; build ls_data8; ls_opc_swp(tmp_ldXn, aa_Xs, tmp_stXn); build ls_mem8; aa_Xt = tmp_ldXn; build ls_lor; } # C6.2.369 SXTB page C6-1964 line 115324 MATCH x13001c00/mask=x7fbffc00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # CONSTRUCT x93401c00/mask=xfffffc06 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x93401c00/mask=xfffffc06 --status pass # Special case of sbfm where imms='000111' and immr='000000' :sxtb Rd_GPR64, Rn_GPR32 is ImmR=0x0 & ImmS=0x7 & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0 & sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & Rn_GPR32 & Rd_GPR64 { tmp:4 = Rn_GPR32; tmp_byte:1 = tmp:1; result:8 = sext(tmp_byte); Rd_GPR64 = result; } # C6.2.369 SXTB page C6-1964 line 115324 MATCH x13001c00/mask=x7fbffc00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # CONSTRUCT x13001c00/mask=xfffffc06 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x13001c00/mask=xfffffc06 --status pass # Special case of sbfm when ImmS=7 and ImmR=0. Note that this implies ImmS > ImmR-1 # Otherwise, this might appear to conflict with sbfiz :sxtb Rd_GPR32, Rn_GPR32 is ImmR=0x0 & ImmS=0x7 & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0 & sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp:4 = Rn_GPR32; tmp_byte:1 = tmp:1; result:4 = sext(tmp_byte); Rd_GPR64 = zext(result); } # C6.2.370 SXTH page C6-1966 line 115411 MATCH x13003c00/mask=x7fbffc00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # CONSTRUCT x93403c00/mask=xfffffc06 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x93403c00/mask=xfffffc06 --status pass # Special case of sbfm where imms='001111' and immr='000000' :sxth Rd_GPR64, Rn_GPR32 is ImmR=0x0 & ImmS=0xf & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0 & sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & Rn_GPR32 & Rd_GPR64 { tmp:4 = Rn_GPR32; tmp_1:2 = tmp:2; tmp_2:8 = sext(tmp_1); Rd_GPR64 = tmp_2; } # C6.2.370 SXTH page C6-1966 line 115411 MATCH x13003c00/mask=x7fbffc00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # CONSTRUCT x13003c00/mask=xfffffc06 MATCHED 4 DOCUMENTED OPCODES # AUNIT --inst x13003c00/mask=xfffffc06 --status pass # Special case of sbfm where imms='001111' and immr='000000' :sxth Rd_GPR32, Rn_GPR32 is ImmR=0x0 & ImmS=0xf & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0 & sf=0 & opc=0 & b_2428=0x13 & b_2323=0 & n=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp:4 = Rn_GPR32; tmp_1:2 = tmp:2; tmp_2:4 = sext(tmp_1); Rd_GPR64 = zext(tmp_2); } # C6.2.371 SXTW page C6-1968 line 115498 MATCH x93407c00/mask=xfffffc00 # C6.2.17 ASR (immediate) page C6-1175 line 69498 MATCH x13007c00/mask=x7f807c00 # C6.2.267 SBFIZ page C6-1751 line 103178 MATCH x13000000/mask=x7f800000 # C6.2.268 SBFM page C6-1753 line 103272 MATCH x13000000/mask=x7f800000 # C6.2.269 SBFX page C6-1756 line 103421 MATCH x13000000/mask=x7f800000 # CONSTRUCT x93407c00/mask=xfffffc06 MATCHED 5 DOCUMENTED OPCODES # AUNIT --inst x93407c00/mask=xfffffc06 --status pass # Special case of sbfm where imms='011111' and immr='000000' :sxtw Rd_GPR64, Rn_GPR32 is ImmR=0x0 & ImmS=0x1f & ImmS_EQ_ImmR=0 & ImmS_LT_ImmR=0 & sf=1 & opc=0 & b_2428=0x13 & b_2323=0 & n=1 & Rn_GPR32 & Rd_GPR64 { tmp:4 = Rn_GPR32; Rd_GPR64 = sext(tmp); } # C6.2.286 SYS page C6-979 line 56782 KEEPWITH SysArgs: Op1_uimm3, CRn_CRx, CRm_CRx, Op2_uimm3, Rt_GPR64 is Op1_uimm3 & CRn_CRx & CRm_CRx & Op2_uimm3 & aa_Xt & Rt_GPR64 { export Rt_GPR64; } SysArgs: Op1_uimm3, CRn_CRx, CRm_CRx, Op2_uimm3 is Op1_uimm3 & CRn_CRx & CRm_CRx & Op2_uimm3 & aa_Xt=31 & Rt_GPR64 { export 0:8; } # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # C6.2.19 AT page C6-1179 line 69679 MATCH xd5087800/mask=xfff8fe00 # C6.2.39 BRB page C6-1212 line 71361 MATCH xd5097200/mask=xffffff00 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # CONSTRUCT xd5080000/mask=xfff80000 MATCHED 6 DOCUMENTED OPCODES # AUNIT --inst xd5080000/mask=xfff80000 --status nodest :sys SysArgs is b_1931=0b1101010100001 & Op1_uimm3 & CRn_CRx & CRm_CRx & Op2_uimm3 & SysArgs { tmp1:4 = Op1_uimm3; tmp2:4 = CRn_CRx; tmp3:4 = CRm_CRx; tmp4:4 = Op2_uimm3; SysOp_W(tmp1, tmp2, tmp3, tmp4, SysArgs); } # C6.2.373 SYSL page C6-1971 line 115652 MATCH xd5280000/mask=xfff80000 # CONSTRUCT xd5280000/mask=xfff80000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd5280000/mask=xfff80000 --status nodest :sysl Rt_GPR64, Op1_uimm3, CRn_CRx, CRm_CRx, Op2_uimm3 is b_2431=0xd5 & b_2223=0 & l=1 & Op0=1 & Op1_uimm3 & CRn_CRx & CRm_CRx & Op2_uimm3 & aa_Xt & Rt_GPR64 { tmp1:4 = Op1_uimm3; tmp2:4 = CRn_CRx; tmp3:4 = CRm_CRx; tmp4:4 = Op2_uimm3; Rt_GPR64 = SysOp_R(tmp1, tmp2, tmp3, tmp4); } # C6.2.373 SYSL page C6-1971 line 115652 MATCH xd5280000/mask=xfff80000 # CONSTRUCT xd528001f/mask=xfff8001f MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xd528001f/mask=xfff8001f --status nodest :sysl Op1_uimm3, CRn_CRx, CRm_CRx, Op2_uimm3 is b_2431=0xd5 & b_2223=0 & l=1 & Op0=1 & Op1_uimm3 & CRn_CRx & CRm_CRx & Op2_uimm3 & aa_Xt=31 & Rt_GPR64 { tmp1:4 = Op1_uimm3; tmp2:4 = CRn_CRx; tmp3:4 = CRm_CRx; tmp4:4 = Op2_uimm3; SysOp_R(tmp1, tmp2, tmp3, tmp4); } # C6.2.374 TBNZ page C6-1972 line 115708 MATCH x37000000/mask=x7f000000 # C6.2.375 TBZ page C6-1973 line 115766 MATCH x36000000/mask=x7f000000 # CONSTRUCT xb6000000/mask=xfe000000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xb6000000/mask=xfe000000 --status nodest :tb^ZeroOp Rd_GPR64, BitPos, Addr14 is sf=1 & b_2530=0x1b & BitPos & ZeroOp & Addr14 & Rd_GPR64 { tmp:1 = BitPos; if (tmp == ZeroOp) goto Addr14; } # C6.2.374 TBNZ page C6-1972 line 115708 MATCH x37000000/mask=x7f000000 # C6.2.375 TBZ page C6-1973 line 115766 MATCH x36000000/mask=x7f000000 # CONSTRUCT x36000000/mask=xfe000000 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x36000000/mask=xfe000000 --status nodest :tb^ZeroOp Rd_GPR32, BitPos, Addr14 is sf=0 & b_2530=0x1b & BitPos & ZeroOp & Addr14 & Rd_GPR32 { tmp:1 = BitPos; if (tmp == ZeroOp) goto Addr14; } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c8020/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c8020/mask=xffffffe0 --status nodest :tlbi "IPAS2E1IS", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0000 & b_0507=0b001 & Rt_GPR64 { TLBI_IPAS2E1IS(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c80a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c80a0/mask=xffffffe0 --status nodest :tlbi "IPAS2LE1IS", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0000 & b_0507=0b101 & Rt_GPR64 { TLBI_IPAS2LE1IS(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5088300/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd5088300/mask=xffffffe0 --status nodest :tlbi "VMALLE1IS" is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b000 { TLBI_VMALLE1IS(); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c8300/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c8300/mask=xffffffe0 --status nodest :tlbi "ALLE2IS" is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b000 { TLBI_ALLE2IS(); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50e8300/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50e8300/mask=xffffffe0 --status nodest :tlbi "ALLE3IS" is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b000 { TLBI_ALLE3IS(); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5088320/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd5088320/mask=xffffffe0 --status nodest :tlbi "VAE1IS", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b001 & Rt_GPR64 { TLBI_VAE1IS(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c8320/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c8320/mask=xffffffe0 --status nodest :tlbi "VAE2IS", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b001 & Rt_GPR64 { TLBI_VAE2IS(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50e8320/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50e8320/mask=xffffffe0 --status nodest :tlbi "VAE3IS", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b001 & Rt_GPR64 { TLBI_VAE3IS(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5088340/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd5088340/mask=xffffffe0 --status nodest :tlbi "ASIDE1IS", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b010 & Rt_GPR64 { TLBI_ASIDE1IS(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5088360/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd5088360/mask=xffffffe0 --status nodest :tlbi "VAAE1IS", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b011 & Rt_GPR64 { TLBI_VAAE1IS(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c8380/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c8380/mask=xffffffe0 --status nodest :tlbi "ALLE1IS" is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b100 { TLBI_ALLE1IS(); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50883a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50883a0/mask=xffffffe0 --status nodest :tlbi "VALE1IS", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b101 & Rt_GPR64 { TLBI_VALE1IS(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c83a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c83a0/mask=xffffffe0 --status nodest :tlbi "VALE2IS", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b101 & Rt_GPR64 { TLBI_VALE2IS(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50e83a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50e83a0/mask=xffffffe0 --status nodest :tlbi "VALE3IS", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b101 & Rt_GPR64 { TLBI_VALE3IS(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c83c0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c83c0/mask=xffffffe0 --status nodest :tlbi "VMALLS12E1IS" is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b110 { TLBI_VMALLS12E1IS(); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50883e0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50883e0/mask=xffffffe0 --status nodest :tlbi "VAALE1IS", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0011 & b_0507=0b111 & Rt_GPR64 { TLBI_VAALE1IS(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c8420/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c8420/mask=xffffffe0 --status nodest :tlbi "IPAS2E1", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0100 & b_0507=0b001 & Rt_GPR64 { TLBI_IPAS2E1(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c84a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c84a0/mask=xffffffe0 --status nodest :tlbi "IPAS2LE1", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0100 & b_0507=0b101 & Rt_GPR64 { TLBI_IPAS2LE1(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5088700/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd5088700/mask=xffffffe0 --status nodest :tlbi "VMALLE1" is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b000 { TLBI_VMALLE1(); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c8700/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c8700/mask=xffffffe0 --status nodest :tlbi "ALLE2" is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b000 { TLBI_ALLE2(); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50e8700/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50e8700/mask=xffffffe0 --status nodest :tlbi "ALLE3" is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b000 { TLBI_ALLE3(); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5088720/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd5088720/mask=xffffffe0 --status nodest :tlbi "VAE1", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b001 & Rt_GPR64 { TLBI_VAE1(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c8720/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c8720/mask=xffffffe0 --status nodest :tlbi "VAE2", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b001 & Rt_GPR64 { TLBI_VAE2(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50e8720/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50e8720/mask=xffffffe0 --status nodest :tlbi "VAE3", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b001 & Rt_GPR64 { TLBI_VAE3(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5088740/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd5088740/mask=xffffffe0 --status nodest :tlbi "ASIDE1", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b010 & Rt_GPR64 { TLBI_ASIDE1(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5088760/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd5088760/mask=xffffffe0 --status nodest :tlbi "VAAE1", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b011 & Rt_GPR64 { TLBI_VAAE1(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c8780/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c8780/mask=xffffffe0 --status nodest :tlbi "ALLE1" is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b100 { TLBI_ALLE1(); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50887a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50887a0/mask=xffffffe0 --status nodest :tlbi "VALE1", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b101 & Rt_GPR64 { TLBI_VALE1(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c87a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c87a0/mask=xffffffe0 --status nodest :tlbi "VALE2", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b101 & Rt_GPR64 { TLBI_VALE2(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50e87a0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50e87a0/mask=xffffffe0 --status nodest :tlbi "VALE3", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b110 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b101 & Rt_GPR64 { TLBI_VALE3(Rt_GPR64); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50c87c0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50c87c0/mask=xffffffe0 --status nodest :tlbi "VMALLS12E1" is b_1931=0b1101010100001 & b_1618=0b100 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b110 { TLBI_VMALLS12E1(); } # C6.2.378 TLBI page C6-1976 line 115920 MATCH xd5088000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50887e0/mask=xffffffe0 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50887e0/mask=xffffffe0 --status nodest :tlbi "VAALE1", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b1000 & b_0811=0b0111 & b_0507=0b111 & Rt_GPR64 { TLBI_VAALE1(Rt_GPR64); } # C6.2.382 TST (immediate) page C6-1983 line 116255 MATCH x7200001f/mask=x7f80001f # C6.2.14 ANDS (immediate) page C6-1169 line 69185 MATCH x72000000/mask=x7f800000 # CONSTRUCT x7200001f/mask=xff80001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x7200001f/mask=xff80001f --status pass --comment "flags" :tst Rn_GPR32, DecodeWMask32 is sf=0 & opc=3 & b_2428=0x12 & b_2323=0 & DecodeWMask32 & Rn_GPR32 & Rd=0x1f { tmp_2:4 = DecodeWMask32; tmp_1:4 = Rn_GPR32 & tmp_2; resultflags(tmp_1); affectLflags(); } # C6.2.382 TST (immediate) page C6-1983 line 116255 MATCH x7200001f/mask=x7f80001f # C6.2.14 ANDS (immediate) page C6-1169 line 69185 MATCH x72000000/mask=x7f800000 # CONSTRUCT xf200001f/mask=xff80001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xf200001f/mask=xff80001f --status pass --comment "flags" :tst Rn_GPR64, DecodeWMask64 is sf=1 & opc=3 & b_2428=0x12 & b_2323=0 & DecodeWMask64 & Rn_GPR64 & Rd=0x1f { tmp_2:8 = DecodeWMask64; tmp_1:8 = Rn_GPR64 & tmp_2; resultflags(tmp_1); affectLflags(); } # C6.2.383 TST (shifted register) page C6-1984 line 116319 MATCH x6a00001f/mask=x7f20001f # C6.2.15 ANDS (shifted register) page C6-1171 line 69286 MATCH x6a000000/mask=x7f200000 # CONSTRUCT x6a00001f/mask=xff20001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x6a00001f/mask=xff20001f --status pass --comment "flags" :tst Rn_GPR32, RegShift32Log is sf=0 & opc=3 & b_2428=0xa & N=0 & RegShift32Log & Rn_GPR32 & Rd=0x1f { tmp_2:4 = RegShift32Log; tmp_1:4 = Rn_GPR32 & tmp_2; resultflags(tmp_1); affectLflags(); } # C6.2.383 TST (shifted register) page C6-1984 line 116319 MATCH x6a00001f/mask=x7f20001f # C6.2.15 ANDS (shifted register) page C6-1171 line 69286 MATCH x6a000000/mask=x7f200000 # CONSTRUCT xea00001f/mask=xff20001f MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xea00001f/mask=xff20001f --status pass --comment "flags" :tst Rn_GPR64, RegShift64Log is sf=1 & opc=3 & b_2428=0xa & N=0 & RegShift64Log & Rn_GPR64 & Rd=0x1f { tmp_2:8 = RegShift64Log; tmp_1:8 = Rn_GPR64 & tmp_2; resultflags(tmp_1); affectLflags(); } # C6.2.384 UBFIZ page C6-1986 line 116416 MATCH x53000000/mask=x7f800000 # C6.2.213 LSL (immediate) page C6-1654 line 98214 MATCH x53000000/mask=x7f800000 # C6.2.216 LSR (immediate) page C6-1660 line 98490 MATCH x53007c00/mask=x7f807c00 # C6.2.385 UBFM page C6-1988 line 116507 MATCH x53000000/mask=x7f800000 # C6.2.386 UBFX page C6-1991 line 116651 MATCH x53000000/mask=x7f800000 # C6.2.394 UXTB page C6-2002 line 117228 MATCH x53001c00/mask=xfffffc00 # C6.2.395 UXTH page C6-2003 line 117288 MATCH x53003c00/mask=xfffffc00 # CONSTRUCT x53000008/mask=xffe0800c MATCHED 7 DOCUMENTED OPCODES # AUNIT --inst x53000008/mask=xffe0800c --status pass # Special case of ubfm where UInt(imms) < UInt(immr). # Note because LSL is preferred where imms + 1 == immr, we use ImmS_LT_ImmR_minus_1 # if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); :ubfiz Rd_GPR32, Rn_GPR32, ubfiz_lsb, ubfiz_width is ImmS_LT_ImmR_minus_1=1 & ImmS_EQ_ImmR=0 & sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & ubfiz_lsb & ubfiz_width & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 & DecodeWMask32 & DecodeTMask32 { local wmask:4 = DecodeWMask32; local tmask:4 = DecodeTMask32; local src:4 = Rn_GPR32; local bot:4 = ((src>>ImmRConst32)|(src<<(32-ImmRConst32))) & wmask; Rd_GPR64 = zext(bot & tmask); } # C6.2.384 UBFIZ page C6-1986 line 116416 MATCH x53000000/mask=x7f800000 # C6.2.213 LSL (immediate) page C6-1654 line 98214 MATCH x53000000/mask=x7f800000 # C6.2.216 LSR (immediate) page C6-1660 line 98490 MATCH x53007c00/mask=x7f807c00 # C6.2.385 UBFM page C6-1988 line 116507 MATCH x53000000/mask=x7f800000 # C6.2.386 UBFX page C6-1991 line 116651 MATCH x53000000/mask=x7f800000 # CONSTRUCT xd340000a/mask=xffc0000a MATCHED 5 DOCUMENTED OPCODES # AUNIT --inst xd340000a/mask=xffc0000a --status pass # Special case of ubfm where UInt(imms) < UInt(immr). # Note because LSL is preferred where imms + 1 == immr, we use ImmS_LT_ImmR_minus_1 :ubfiz Rd_GPR64, Rn_GPR64, ubfiz_lsb64, ubfiz_width is ImmS_LT_ImmR_minus_1=1 & ImmS_LT_ImmR=1 & sf=1 & opc=2 & b_2428=0x13 & b_2323=0 & n=1 & ImmR_bitfield64_imm & ImmS_bitfield64_imm & ImmRConst64 & ubfiz_lsb64 & ubfiz_width & Rn_GPR64 & Rd_GPR64 & DecodeWMask64 & DecodeTMask64 { local wmask:8 = DecodeWMask64; local tmask:8 = DecodeTMask64; local src:8 = Rn_GPR64; local bot:8 = ((src>>ImmRConst64)|(src<<(64-ImmRConst64))) & wmask; Rd_GPR64 = bot & tmask; } # C6.2.385 UBFM page C6-1988 line 116507 MATCH x53000000/mask=x7f800000 # C6.2.213 LSL (immediate) page C6-1654 line 98214 MATCH x53000000/mask=x7f800000 # C6.2.216 LSR (immediate) page C6-1660 line 98490 MATCH x53007c00/mask=x7f807c00 # C6.2.384 UBFIZ page C6-1986 line 116416 MATCH x53000000/mask=x7f800000 # C6.2.386 UBFX page C6-1991 line 116651 MATCH x53000000/mask=x7f800000 # C6.2.394 UXTB page C6-2002 line 117228 MATCH x53001c00/mask=xfffffc00 # C6.2.395 UXTH page C6-2003 line 117288 MATCH x53003c00/mask=xfffffc00 # CONSTRUCT x53000000/mask=xffe08000 MATCHED 7 DOCUMENTED OPCODES # AUNIT --inst x53000000/mask=xffe08000 --status pass # if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); :ubfm Rd_GPR32, Rn_GPR32, ImmRConst32, ImmSConst32 is sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & ImmRConst32 & ImmSConst32 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 & DecodeWMask32 & DecodeTMask32 { local wmask:4 = DecodeWMask32; local tmask:4 = DecodeTMask32; local src:4 = Rn_GPR32; local bot:4 = ((src>>ImmRConst32)|(src<<(32-ImmRConst32))) & wmask; Rd_GPR64 = zext(bot & tmask); } # C6.2.385 UBFM page C6-1988 line 116507 MATCH x53000000/mask=x7f800000 # C6.2.213 LSL (immediate) page C6-1654 line 98214 MATCH x53000000/mask=x7f800000 # C6.2.216 LSR (immediate) page C6-1660 line 98490 MATCH x53007c00/mask=x7f807c00 # C6.2.384 UBFIZ page C6-1986 line 116416 MATCH x53000000/mask=x7f800000 # C6.2.386 UBFX page C6-1991 line 116651 MATCH x53000000/mask=x7f800000 # CONSTRUCT xd3400000/mask=xffc00000 MATCHED 5 DOCUMENTED OPCODES # AUNIT --inst xd3400000/mask=xffc00000 --status pass :ubfm Rd_GPR64, Rn_GPR64, ImmRConst64, ImmSConst64 is sf=1 & opc=2 & b_2428=0x13 & b_2323=0 & n=1 & ImmR_bitfield64_imm & ImmS_bitfield64_imm & ImmRConst64 & ImmSConst64 & Rn_GPR64 & Rd_GPR64 & DecodeWMask64 & DecodeTMask64 { local wmask:8 = DecodeWMask64; local tmask:8 = DecodeTMask64; local src:8 = Rn_GPR64; local bot:8 = ((src>>ImmRConst64)|(src<<(64-ImmRConst64))) & wmask; Rd_GPR64 = bot & tmask; } # C6.2.386 UBFX page C6-1991 line 116651 MATCH x53000000/mask=x7f800000 # C6.2.213 LSL (immediate) page C6-1654 line 98214 MATCH x53000000/mask=x7f800000 # C6.2.216 LSR (immediate) page C6-1660 line 98490 MATCH x53007c00/mask=x7f807c00 # C6.2.384 UBFIZ page C6-1986 line 116416 MATCH x53000000/mask=x7f800000 # C6.2.385 UBFM page C6-1988 line 116507 MATCH x53000000/mask=x7f800000 # C6.2.394 UXTB page C6-2002 line 117228 MATCH x53001c00/mask=xfffffc00 # C6.2.395 UXTH page C6-2003 line 117288 MATCH x53003c00/mask=xfffffc00 # CONSTRUCT x53000010/mask=xffe0801a MATCHED 7 DOCUMENTED OPCODES # AUNIT --inst x53000010/mask=xffe0801a --status pass # Special case of ubfm as determined by BFXPreferred() # if sf == '0' && (N != '0' || immr<5> != '0' || imms<5> != '0') then ReservedValue(); :ubfx Rd_GPR32, Rn_GPR32, ImmRConst32, ubfx_width is ImmS_ne_1f=1 & ImmS_LT_ImmR=0 & ImmS_LT_ImmR_minus_1=0 & ImmRConst32 & ubfx_width & sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & b_21=0 & b_15=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 & DecodeWMask32 & DecodeTMask32 { local wmask:4 = DecodeWMask32; local tmask:4 = DecodeTMask32; local src:4 = Rn_GPR32; local bot:4 = ((src>>ImmRConst32)|(src<<(32-ImmRConst32))) & wmask; Rd_GPR64 = zext(bot & tmask); } # C6.2.386 UBFX page C6-1991 line 116651 MATCH x53000000/mask=x7f800000 # C6.2.213 LSL (immediate) page C6-1654 line 98214 MATCH x53000000/mask=x7f800000 # C6.2.216 LSR (immediate) page C6-1660 line 98490 MATCH x53007c00/mask=x7f807c00 # C6.2.384 UBFIZ page C6-1986 line 116416 MATCH x53000000/mask=x7f800000 # C6.2.385 UBFM page C6-1988 line 116507 MATCH x53000000/mask=x7f800000 # CONSTRUCT xd3400020/mask=xffc0002a MATCHED 5 DOCUMENTED OPCODES # AUNIT --inst xd3400020/mask=xffc0002a --status pass # Special case of ubfm as determined by BFXPreferred() :ubfx Rd_GPR64, Rn_GPR64, ImmRConst64, ubfx_width is ImmS_ne_3f=1 & ImmS_LT_ImmR=0 & ImmS_LT_ImmR_minus_1=0 & ImmRConst64 & ubfx_width & sf=1 & opc=2 & b_2428=0x13 & b_2323=0 & n=1 & ImmR_bitfield64_imm & ImmS_bitfield64_imm & Rn_GPR64 & Rd_GPR64 & DecodeWMask64 & DecodeTMask64 { local wmask:8 = DecodeWMask64; local tmask:8 = DecodeTMask64; local src:8 = Rn_GPR64; local bot:8 = ((src>>ImmRConst64)|(src<<(64-ImmRConst64))) & wmask; Rd_GPR64 = bot & tmask; } # C6.2.388 UDIV page C6-1994 line 116786 MATCH x1ac00800/mask=x7fe0fc00 # CONSTRUCT x1ac00800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x1ac00800/mask=xffe0fc00 --status pass :udiv Rd_GPR32, Rn_GPR32, Rm_GPR32 is sf=0 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR32 & b_1015=0x2 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { local tmp_1:4 = 0; if (Rm_GPR32 == 0) goto ; tmp_1 = Rn_GPR32 / Rm_GPR32; Rd_GPR64 = zext(tmp_1); } # C6.2.388 UDIV page C6-1994 line 116786 MATCH x1ac00800/mask=x7fe0fc00 # CONSTRUCT x9ac00800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x9ac00800/mask=xffe0fc00 --status pass :udiv Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & b_3030=0 & S=0 & b_2428=0x1a & b_2123=6 & Rm_GPR64 & b_1015=0x2 & Rn_GPR64 & Rd_GPR64 { local tmp_1:8 = 0; if (Rm_GPR64 == 0) goto ; tmp_1 = Rn_GPR64 / Rm_GPR64; Rd_GPR64 = tmp_1; } # C6.2.389 UMADDL page C6-1995 line 116855 MATCH x9ba00000/mask=xffe08000 # CONSTRUCT x9ba00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x9ba00000/mask=xffe08000 --status pass :umaddl Rd_GPR64, Rn_GPR32, Rm_GPR32, Ra_GPR64 is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=5 & Rm_GPR32 & op.dp3_o0=0 & Ra_GPR64 & Rn_GPR32 & Rd_GPR64 { tmp_3:8 = zext(Rn_GPR32); tmp_4:8 = zext(Rm_GPR32); tmp_2:8 = tmp_3 * tmp_4; tmp_1:8 = Ra_GPR64 + tmp_2; Rd_GPR64 = tmp_1; } # C6.2.390 UMNEGL page C6-1997 line 116945 MATCH x9ba0fc00/mask=xffe0fc00 # C6.2.391 UMSUBL page C6-1998 line 117009 MATCH x9ba08000/mask=xffe08000 # CONSTRUCT x9ba0fc00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9ba0fc00/mask=xffe0fc00 --status pass :umnegl Rd_GPR64, Rn_GPR32, Rm_GPR32 is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=5 & Rm_GPR32 & op.dp3_o0=1 & Ra=0x1f & Rn_GPR32 & Rd_GPR64 { tmp_3:8 = zext(Rn_GPR32); tmp_4:8 = zext(Rm_GPR32); tmp_2:8 = tmp_3 * tmp_4; tmp_1:8 = - tmp_2; Rd_GPR64 = tmp_1; } # C6.2.391 UMSUBL page C6-1998 line 117009 MATCH x9ba08000/mask=xffe08000 # CONSTRUCT x9ba08000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x9ba08000/mask=xffe08000 --status pass :umsubl Rd_GPR64, Rn_GPR32, Rm_GPR32, Ra_GPR64 is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=5 & Rm_GPR32 & op.dp3_o0=1 & Ra_GPR64 & Rn_GPR32 & Rd_GPR64 { tmp_3:8 = zext(Rn_GPR32); tmp_4:8 = zext(Rm_GPR32); tmp_2:8 = tmp_3 * tmp_4; tmp_1:8 = Ra_GPR64 - tmp_2; Rd_GPR64 = tmp_1; } # C6.2.392 UMULH page C6-2000 line 117098 MATCH x9bc00000/mask=xffe08000 # CONSTRUCT x9bc00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst x9bc00000/mask=xffe08000 --status pass # To enforce SHOULD BE ONE fields add: b_1014=0b11111 :umulh Rd_GPR64, Rn_GPR64, Rm_GPR64 is sf=1 & op.dp3=0 & b_2428=0x1b & op.dp3_op31=6 & Rm_GPR64 & op.dp3_o0=0 & Ra & Rn_GPR64 & Rd_GPR64 { local tmpq:16 = zext(Rn_GPR64) * zext(Rm_GPR64); Rd_GPR64 = tmpq(8); } # C6.2.393 UMULL page C6-2001 line 117165 MATCH x9ba07c00/mask=xffe0fc00 # C6.2.389 UMADDL page C6-1995 line 116855 MATCH x9ba00000/mask=xffe08000 # CONSTRUCT x9ba07c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst x9ba07c00/mask=xffe0fc00 --status pass :umull Rd_GPR64, Rn_GPR32, Rm_GPR32 is sf=1 & op.dp3_op54=0 & b_2428=0x1b & op.dp3_op31=5 & Rm_GPR32 & op.dp3_o0=0 & Ra=0x1f & Rn_GPR32 & Rd_GPR64 { tmp_3:8 = zext(Rn_GPR32); tmp_4:8 = zext(Rm_GPR32); tmp_2:8 = tmp_3 * tmp_4; Rd_GPR64 = tmp_2; } # C6.2.394 UXTB page C6-2002 line 117228 MATCH x53001c00/mask=xfffffc00 # C6.2.213 LSL (immediate) page C6-1654 line 98214 MATCH x53000000/mask=x7f800000 # C6.2.384 UBFIZ page C6-1986 line 116416 MATCH x53000000/mask=x7f800000 # C6.2.385 UBFM page C6-1988 line 116507 MATCH x53000000/mask=x7f800000 # C6.2.386 UBFX page C6-1991 line 116651 MATCH x53000000/mask=x7f800000 # CONSTRUCT x53001c10/mask=xfffffc1e MATCHED 5 DOCUMENTED OPCODES # AUNIT --inst x53001c10/mask=xfffffc1e --status pass # Alias for ubfm where immr=='000000' and imms='000111' # These imply things about the inequalities :uxtb Rd_GPR32, Rn_GPR32 is ImmR=0x0 & ImmS=0x7 & ImmS_ne_1f=1 & ImmS_LT_ImmR=0 & ImmS_LT_ImmR_minus_1=0 & ImmS_EQ_ImmR=0 & sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp:4 = Rn_GPR32; Rd_GPR64 = zext(tmp:1); } # C6.2.395 UXTH page C6-2003 line 117288 MATCH x53003c00/mask=xfffffc00 # C6.2.213 LSL (immediate) page C6-1654 line 98214 MATCH x53000000/mask=x7f800000 # C6.2.384 UBFIZ page C6-1986 line 116416 MATCH x53000000/mask=x7f800000 # C6.2.385 UBFM page C6-1988 line 116507 MATCH x53000000/mask=x7f800000 # C6.2.386 UBFX page C6-1991 line 116651 MATCH x53000000/mask=x7f800000 # CONSTRUCT x53003c10/mask=xfffffc1e MATCHED 5 DOCUMENTED OPCODES # AUNIT --inst x53003c10/mask=xfffffc1e --status pass # Alias for ubfm where immr=='000000' and imms='001111' # These imply things about the inequalities :uxth Rd_GPR32, Rn_GPR32 is ImmR=0x0 & ImmS=0x0f & ImmS_ne_1f=1 & ImmS_LT_ImmR=0 & ImmS_LT_ImmR_minus_1=0 & ImmS_EQ_ImmR=0 & sf=0 & opc=2 & b_2428=0x13 & b_2323=0 & n=0 & Rn_GPR32 & Rd_GPR32 & Rd_GPR64 { tmp:4 = Rn_GPR32; Rd_GPR64 = zext(tmp:2); } # C6.2.396 WFE page C6-2004 line 117348 MATCH xd503205f/mask=xffffffff # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503205f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503205f/mask=xffffffff --status nodest :wfe is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low=2 & Rt=0x1f { WaitForEvent(); } # C6.2.397 WFET page C6-2005 line 117387 MATCH xd5031000/mask=xffffffe0 # CONSTRUCT xd5031000/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES # xd5031000/mask=xffffffe0 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=110101010000001100010000000..... :wfet Rd_GPR64 is b_0531=0x6a81880 & Rd_GPR64 { WaitForEvent(Rd_GPR64); } # C6.2.398 WFI page C6-2006 line 117439 MATCH xd503207f/mask=xffffffff # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503207f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503207f/mask=xffffffff --status nodest :wfi is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low=3 & Rt=0x1f { WaitForInterrupt(); } # C6.2.399 WFIT page C6-2007 line 117477 MATCH xd5031020/mask=xffffffe0 # CONSTRUCT xd5031020/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES # xd5031020/mask=xffffffe0 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=110101010000001100010000001..... :wfit Rd_GPR64 is b_0531=0x6a81881 & Rd_GPR64 { WaitForInterrupt(Rd_GPR64); } # C6.2.401 XPACD, XPACI, XPACLRI page C6-2009 line 117573 MATCH xdac143e0/mask=xfffffbe0 # CONSTRUCT xdac147e0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac147e0/mask=xffffffe0 --status noqemu # D == 1 XPACD variant :xpacd Rd_GPR64 is xpacd__PACpart & b_1131=0b110110101100000101000 & b_0509=0b11111 & b_10=1 & Rd_GPR64 { build xpacd__PACpart; } # C6.2.401 XPACD, XPACI, XPACLRI page C6-2009 line 117573 MATCH xdac143e0/mask=xfffffbe0 # CONSTRUCT xdac143e0/mask=xffffffe0 MATCHED 1 DOCUMENTED OPCODES # AUNIT --inst xdac143e0/mask=xffffffe0 --status noqemu # D == 0 XPACI variant :xpaci Rd_GPR64 is xpaci__PACpart & b_1131=0b110110101100000101000 & b_0509=0b11111 & b_10=0 & Rd_GPR64 { build xpaci__PACpart; } # C6.2.401 XPACD, XPACI, XPACLRI page C6-2009 line 117573 MATCH xd50320ff/mask=xffffffff # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd50320ff/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd50320ff/mask=xffffffff --status nodest # System variant :xpaclri is xpaclri__PACpart & b_0031=0b11010101000000110010000011111111 { build xpaclri__PACpart; } # C6.2.402 YIELD page C6-2011 line 117656 MATCH xd503203f/mask=xffffffff # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503203f/mask=xffffffff MATCHED 2 DOCUMENTED OPCODES # AUNIT --inst xd503203f/mask=xffffffff --status nodest :yield is b_2431=0xd5 & b_2223=0 & l=0 & Op0=0 & Op1=3 & CRn=0x2 & imm7Low=1 & Rt=0x1f { Yield(); } # C6.2.6 ADDG page C6-787 line 46877 MATCH KEEPWITH with : ShowMemTag=1 { # C6.2.6 ADDG page C6-1155 line 68448 MATCH x91800000/mask=xffc00000 # CONSTRUCT x91800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES :addg Rd_GPR64xsp, Rn_GPR64xsp, "#"^shifted_imm, "#"^b_1013 is sf=1 & op=0 & S=0 & b_2328=0b100011 & b_22=0 & b_1621 & b_1013 & Rd_GPR64xsp & Rn_GPR64xsp # " & b_1415=0" is not required by the spec (op3 doesn't have any requirements and is not used) [ shifted_imm = b_1621 << $(LOG2_TAG_GRANULE); ] { # we don't actually modify the target register, so Ghidra understands the pointer target is still the same. # pseudo-ops let us do that, but it means that the decompiler can put an unintuitive value in the # "CopyPtrTag_AddToPtrTag_Exclude" argument, e.g. "param_2 + 0x20". uimm4:1 = b_1013; exclude:2 = 0; Or2BytesWithExcludedTags(exclude); Rd_GPR64xsp = Rn_GPR64xsp + shifted_imm; CopyPtrTag_AddToPtrTag_Exclude(Rd_GPR64xsp, Rn_GPR64xsp, uimm4, exclude); } } # C6.2.6 ADDG page C6-787 line 44223 KEEPWITH with : ShowMemTag=0 { # C6.2.6 ADDG page C6-1155 line 68448 MATCH x91800000/mask=xffc00000 # CONSTRUCT x91800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES :addg Rd_GPR64xsp, Rn_GPR64xsp, "#"^shifted_imm, "#"^b_1013 is sf=1 & op=0 & S=0 & b_2328=0b100011 & b_22=0 & b_1621 & b_1013 & Rd_GPR64xsp & Rn_GPR64xsp # " & b_1415=0" is not required by the spec (op3 doesn't have any requirements and is not used) [ shifted_imm = b_1621 << $(LOG2_TAG_GRANULE); ] { Rd_GPR64xsp = Rn_GPR64xsp + shifted_imm; } } # C6.2.24 AXFLAG page C6-1189 line 70222 MATCH xd500405f/mask=xfffff0ff # C6.2.229 MSR (immediate) page C6-1684 line 99649 MATCH xd500401f/mask=xfff8f01f # CONSTRUCT xd500405f/mask=xfffff0ff MATCHED 2 DOCUMENTED OPCODES # To enforce SHOULD BE ZERO fields add: b_0811=0b0000 :axflag is b_1231=0b11010101000000000100 & b_0007=0b01011111 { tmpZR = ZR | OV; tmpCY = CY & !OV; NG = 0; ZR = tmpZR; CY = tmpCY; OV = 0; } # C6.2.41 BTI page C6-1214 line 71457 MATCH xd503241f/mask=xffffff3f # C6.2.126 HINT page C6-1480 line 88030 MATCH xd503201f/mask=xfffff01f # CONSTRUCT xd503241f/mask=xffffff1f MATCHED 2 DOCUMENTED OPCODES :bti BTI_BTITARGETS is BTI_BTITARGETS & b_1231=0xd5032 & b_0811=4 & b_0004=0x1f { # This instruction is a valid target for jumps, calls, or both; see the BTI_BTITARGETS table. } # C6.2.53 CFP page C6-1237 line 72667 MATCH xd50b7380/mask=xffffffe0 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7380/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES :cfp "RCTX", Rt_GPR64 is b_1931=0b1101010100001 & Op1_uimm3=3 & b_1215=7 & b_0811=3 & Op2_uimm3=4 & Rt_GPR64 { ControlFlowPredictionRestrictionByContext(Rt_GPR64); } # C6.2.65 CMPP page C6-1258 line 73720 MATCH xbac0001f/mask=xffe0fc1f # C6.2.361 SUBPS page C6-1949 line 114470 MATCH xbac00000/mask=xffe0fc00 # CONSTRUCT xbac0001f/mask=xffe0fc1f MATCHED 2 DOCUMENTED OPCODES # CMPP: Compare Pointers # Compare two pointer 56-bit pointer values and set flags :cmpp Rn_GPR64xsp, Rm_GPR64xsp is sf=1 & b_30=0 & S=1 & b_2128=0b11010110 & Rm_GPR64xsp & b_1015=0b000000 & Rd=0b11111 & Rn_GPR64xsp { # out of a 64-bit value, keep the lowest 56 bits, which is 7 bytes. # sign-extend a 7-byte value to an 8-byte value. If the boundary weren't byte-aligned, # sext() wouldn't work so well. tmp_2:8 = Rm_GPR64xsp; tmp_2 = sext(tmp_2:7); # if Rm:7 is used here, the decompiler considers the Rm register an int7 for the whole function. tmp_1:8 = Rn_GPR64xsp; tmp_1 = sext(tmp_1:7); subflags(tmp_1, tmp_2); tmp_1 = tmp_1 - tmp_2; resultflags(tmp_1); affectflags(); } # C6.2.67 CPP page C6-1261 line 73861 MATCH xd50b73e0/mask=xffffffe0 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b73e0/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES :cpp "RCTX", Rt_GPR64 is b_1931=0b1101010100001 & Op1_uimm3=3 & b_1215=7 & b_0811=3 & Op2_uimm3=7 & Rt_GPR64 { CachePrefetchPredictionRestrictionByContext(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087660/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES # the new DC instruction types from ARMv8.5 :dc "IGVAC", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0110 & b_0507=0b011 & Rt_GPR64 { DC_IGVAC(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087680/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "IGSW", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0110 & b_0507=0b100 & Rt_GPR64 { DC_IGSW(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50876a0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "IGDVAC", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0110 & b_0507=0b101 & Rt_GPR64 { DC_IGDVAC(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50876c0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "IGDSW", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b0110 & b_0507=0b110 & Rt_GPR64 { DC_IGDSW(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087a80/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "CGSW", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1010 & b_0507=0b100 & Rt_GPR64 { DC_CGSW(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087ac0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "CGDSW", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1010 & b_0507=0b110 & Rt_GPR64 { DC_CGDSW(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087e80/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "CIGSW", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1110 & b_0507=0b100 & Rt_GPR64 { DC_CIGSW(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd5087ec0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "CIGDSW", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b000 & b_1215=0b0111 & b_0811=0b1110 & b_0507=0b110 & Rt_GPR64 { DC_CIGDSW(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7460/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "GVA", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b0100 & b_0507=0b011 & Rt_GPR64 { DC_GVA(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7480/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "GZVA", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b0100 & b_0507=0b100 & Rt_GPR64 { DC_GZVA(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7a60/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "CGVAC", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1010 & b_0507=0b011 & Rt_GPR64 { DC_CGVAC(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7aa0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "CGDVAC", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1010 & b_0507=0b101 & Rt_GPR64 { DC_CGDVAC(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7c60/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "CGVAP", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1100 & b_0507=0b011 & Rt_GPR64 { DC_CGVAP(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7ca0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "CGDVAP", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1100 & b_0507=0b101 & Rt_GPR64 { DC_CGDVAP(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7d60/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "CGVADP", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1101 & b_0507=0b011 & Rt_GPR64 { DC_CGVADP(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7da0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "CGDVADP", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1101 & b_0507=0b101 & Rt_GPR64 { DC_CGDVADP(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7e60/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "CIGVAC", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1110 & b_0507=0b011 & Rt_GPR64 { DC_CIGVAC(Rt_GPR64); } # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b7ea0/mask=xffffffe0 MATCHED 3 DOCUMENTED OPCODES :dc "CIGDVAC", Rt_GPR64 is b_1931=0b1101010100001 & b_1618=0b011 & b_1215=0b0111 & b_0811=0b1110 & b_0507=0b101 & Rt_GPR64 { DC_CIGDVAC(Rt_GPR64); } # C6.2.117 DVP page C6-1467 line 87355 MATCH xd50b73a0/mask=xffffffe0 # C6.2.109 DC page C6-1455 line 86693 MATCH xd5087000/mask=xfff8f000 # C6.2.129 IC page C6-1484 line 88281 MATCH xd5087000/mask=xfff8f000 # C6.2.372 SYS page C6-1969 line 115559 MATCH xd5080000/mask=xfff80000 # CONSTRUCT xd50b73a0/mask=xffffffe0 MATCHED 4 DOCUMENTED OPCODES :dvp "RCTX", Rt_GPR64 is b_1931=0b1101010100001 & Op1_uimm3=3 & b_1215=7 & b_0811=3 & Op2_uimm3=5 & Rt_GPR64 { DataValuePredictionRestrictionByContext(Rt_GPR64); } # GMI: Tag Mask Insert # Extracts tag from first source register (Xn) and adds as an excluded tag to list of excluded # tags in second source register, writing the updated exclusion set to the destination register with : ShowMemTag=1 { # C6.2.125 GMI page C6-1479 line 87976 MATCH x9ac01400/mask=xffe0fc00 # CONSTRUCT x9ac01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES :gmi Rd_GPR64, Rn_GPR64xsp, Rm_GPR64 is sf=1 & b_30=0 & S=0 & b_2128=0b11010110 & Rm_GPR64 & b_1015=0b000101 & Rn_GPR64xsp & Rd_GPR64 { # get tag from address #tag:8 = (Rn_GPR64xsp >> 56) & 0xf; tag:8 = 0; AllocationTagFromAddress(tag, Rn_GPR64xsp); Rd_GPR64 = Rm_GPR64 | (1 << tag); } } with : ShowMemTag=0 { # C6.2.125 GMI page C6-1479 line 87976 MATCH x9ac01400/mask=xffe0fc00 # CONSTRUCT x9ac01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES :gmi Rd_GPR64, Rn_GPR64xsp, Rm_GPR64 is sf=1 & b_30=0 & S=0 & b_2128=0b11010110 & Rm_GPR64 & b_1015=0b000101 & Rn_GPR64xsp & Rd_GPR64 { # The only expected use of the output of this instruction is in "exclude" arguments, which will be totally ignored # with ShowMemTag off anyway, so for the sake of more concise code don't set any mask bits at all. Rd_GPR64 = Rm_GPR64; } } # IRG: Insert Random Tag # Generates random tag (honoring excluded tags specified in optional second source register # and GCR_EL1.Exclude) into the address from first source register, writing the result to the # destination register. with : ShowMemTag=1 { # C6.2.130 IRG page C6-1485 line 88340 MATCH x9ac01000/mask=xffe0fc00 # CONSTRUCT x9ac01000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES :irg Rd_GPR64xsp, Rn_GPR64xsp^OPTIONAL_XM is sf=1 & b_30=0 & S=0 & b_2128=0b11010110 & OPTIONAL_XM & b_1015=0b000100 & Rn_GPR64xsp & Rd_GPR64xsp { tmp:8 = OPTIONAL_XM; exclude:2 = tmp:2; Or2BytesWithExcludedTags(exclude); Rd_GPR64xsp = Rn_GPR64xsp; RandomizePtrTag_Exclude(Rd_GPR64xsp, exclude); } } with : ShowMemTag=0 { # C6.2.130 IRG page C6-1485 line 88340 MATCH x9ac01000/mask=xffe0fc00 # CONSTRUCT x9ac01000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES :irg Rd_GPR64xsp, Rn_GPR64xsp^OPTIONAL_XM is sf=1 & b_30=0 & S=0 & b_2128=0b11010110 & OPTIONAL_XM & b_1015=0b000100 & Rn_GPR64xsp & Rd_GPR64xsp { Rd_GPR64xsp = Rn_GPR64xsp; } } with : ShowMemTag=1 { # C6.2.158 LDG page C6-1538 line 91337 MATCH xd9600000/mask=xffe00c00 # CONSTRUCT xd9600000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES :ldg Rt_GPR64, addr_granuleSIMM is b_2131=0b11011001011 & addr_granuleSIMM & b_1011=0b00 & Rt_GPR64 { tmp:8 = addr_granuleSIMM; Align(tmp, $(TAG_GRANULE)); tag:8 = LoadMemTag(tmp); SetPtrTag(Rt_GPR64, tag); } } with : ShowMemTag=0 { # C6.2.158 LDG page C6-1538 line 91337 MATCH xd9600000/mask=xffe00c00 # CONSTRUCT xd9600000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES :ldg Rt_GPR64, addr_granuleSIMM is b_2131=0b11011001011 & addr_granuleSIMM & b_1011=0b00 & Rt_GPR64 { } } with : ShowMemTag=1 { # C6.2.159 LDGM page C6-1539 line 91400 MATCH xd9e00000/mask=xfffffc00 # CONSTRUCT xd9e00000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES :ldgm Rt_GPR64, "["^Rn_GPR64xsp^"]" is b_1031=0b1101100111100000000000 & Rt_GPR64 & Rn_GPR64xsp { sze:8 = 4 << (gmid_el1 & 0xf); # The value in parentheses (GMID_EL1.BS) varies between 2 and 6. address:8 = Rn_GPR64xsp; Align(address, sze); # this ensures that address will be granule-aligned, so we don't need to check it count:8 = sze >> $(LOG2_TAG_GRANULE); data:8 = 0:8; # output value index:8 = (address >> $(LOG2_TAG_GRANULE)) & 0xf; # for tmp = 0 to count-1 tmp:8 = 0; tag:8 = LoadMemTag(address) & 0xf; # The 0xf doesn't do anything to streamline the representation of this # instruction in the decompiler, but it shows the size of a tag. data = data | (tag << (index * 4)); address = address + $(TAG_GRANULE); index = index + 1; tmp = tmp + 1; # next tmp if (tmp < count) goto ; Rt_GPR64 = data; } } with : ShowMemTag=0 { # C6.2.159 LDGM page C6-1539 line 91400 MATCH xd9e00000/mask=xfffffc00 # CONSTRUCT xd9e00000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES :ldgm Rt_GPR64, "["^Rn_GPR64xsp^"]" is b_1031=0b1101100111100000000000 & Rt_GPR64 & Rn_GPR64xsp { data:8 = 0:8; # output value Rt_GPR64 = data; } } # C6.2.132 LD64B page C6-1488 line 88475 MATCH xf83fd000/mask=xfffffc00 # CONSTRUCT xf83fd000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # xf83fd000/mask=xfffffc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=1111100000111111110100.......... # Single-copy Atomic 64-byte Load :ld64b Rn_GPR64, [Rt_GPR64xsp] is b_1031=0x3e0ff4 & Rn_GPR64 & Rt_GPR64xsp { Rn_GPR64 = *Rt_GPR64xsp; } # C6.2.292 ST64B page C6-1813 line 107121 MATCH xf83f9000/mask=xfffffc00 # CONSTRUCT xf83f9000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES :st64b Rn_GPR64, [Rt_GPR64xsp] is b_1031=0x3e0fe4 & Rn_GPR64 & Rt_GPR64xsp { *Rt_GPR64xsp = Rn_GPR64; } # C6.2.293 ST64BV page C6-1814 line 107190 MATCH xf820b000/mask=xffe0fc00 # CONSTRUCT xf820b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES define pcodeop store64ByteAtomic; :st64bv Rs_GPR64, Rn_GPR64, [Rt_GPR64xsp] is b_2131=0x7c1 & Rs_GPR64 & b_1015=0x2c & Rn_GPR64 & Rt_GPR64xsp { *Rt_GPR64xsp = Rn_GPR64; Rs_GPR64 = store64ByteAtomic(Rn_GPR64, Rt_GPR64xsp); } # C6.2.294 ST64BV0 page C6-1816 line 107284 MATCH xf820a000/mask=xffe0fc00 # CONSTRUCT xf820a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES :st64bv0 is b_2131=0b11111000001 & Rs_GPR64 & b_1015=0x28 & Rn_GPR64 & Rt_GPR64xsp { local st64bv_addr:8 = (Rt_GPR64xsp & 0xffff0000) | (accdata & 0xffff); *st64bv_addr = Rn_GPR64; Rs_GPR64 = store64ByteAtomic(Rn_GPR64, st64bv_addr); } addrGranuleIndexed_checkAlignment: addrGranuleIndexed is Rn=0b11111 & addrGranuleIndexed { export addrGranuleIndexed; } # don't check alignment if we're working with the stack, it's assumed to be 16-byte-aligned, though that is technically optional addrGranuleIndexed_checkAlignment: addrGranuleIndexed is Rn & addrGranuleIndexed { tmp:8 = addrGranuleIndexed; RequireGranuleAlignment(tmp); export tmp; } # if the address in tmp is derived from sp, the error condition in RequireGranuleAlignment can still be an unreachable block; it doesn't seem possible to avoid the decompiler message in that case addrPairGranuleIndexed_checkAlignment: addrPairGranuleIndexed is Rn=0b11111 & addrPairGranuleIndexed { export addrPairGranuleIndexed; } # don't check alignment if we're working with the stack, it's assumed to be 16-byte-aligned, though that is technically optional addrPairGranuleIndexed_checkAlignment: addrPairGranuleIndexed is Rn & addrPairGranuleIndexed { tmp:8 = addrPairGranuleIndexed; RequireGranuleAlignment(tmp); export tmp; } # if the address in tmp is derived from sp, the error condition in RequireGranuleAlignment can still be an unreachable block; it doesn't seem possible to avoid the decompiler message in that case with : ShowMemTag=1 { # C6.2.291 ST2G page C6-1811 line 106987 MATCH xd9a00400/mask=xffe00c00 # C6.2.291 ST2G page C6-1811 line 106987 MATCH xd9a00c00/mask=xffe00c00 # C6.2.291 ST2G page C6-1811 line 106987 MATCH xd9a00800/mask=xffe00c00 # CONSTRUCT xd9a00000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES :st2g Rt_GPR64xsp, addrGranuleIndexed_checkAlignment is b_2131=0b11011001101 & (b_10=1 | b_11=1) & Rt_GPR64xsp & addrGranuleIndexed_checkAlignment { # in case Rt == Rn, get the tag first so any updates in addrGranuleIndexed_checkAlignment don't affect it tag:8 = 0; AllocationTagFromAddress(tag, Rt_GPR64xsp); build addrGranuleIndexed_checkAlignment; # this instruction throws an alignment fault if address is not granule-aligned address:8 = addrGranuleIndexed_checkAlignment; StoreMemTag(address, tag ); StoreMemTag(address + $(TAG_GRANULE), tag ); } } with : ShowMemTag=0 { # C6.2.291 ST2G page C6-1811 line 106987 MATCH xd9a00400/mask=xffe00c00 # C6.2.291 ST2G page C6-1811 line 106987 MATCH xd9a00c00/mask=xffe00c00 # C6.2.291 ST2G page C6-1811 line 106987 MATCH xd9a00800/mask=xffe00c00 # CONSTRUCT xd9a00000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES :st2g Rt_GPR64xsp, addrGranuleIndexed is b_2131=0b11011001101 & (b_10=1 | b_11=1) & Rt_GPR64xsp & addrGranuleIndexed { # for the sake of simplified output, omit the alignment check when ShowMemTag is off } } with : ShowMemTag=1 { # C6.2.304 STG page C6-1836 line 108242 MATCH xd9200400/mask=xffe00c00 # C6.2.304 STG page C6-1836 line 108242 MATCH xd9200c00/mask=xffe00c00 # C6.2.304 STG page C6-1836 line 108242 MATCH xd9200800/mask=xffe00c00 # CONSTRUCT xd9200000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES :stg Rt_GPR64xsp, addrGranuleIndexed_checkAlignment is b_2131=0b11011001001 & (b_10=1 | b_11=1) & Rt_GPR64xsp & addrGranuleIndexed_checkAlignment { # in case Rt == Rn, get the tag first so any updates in addrGranuleIndexed_checkAlignment don't affect it tag:8 = 0; AllocationTagFromAddress(tag, Rt_GPR64xsp); build addrGranuleIndexed_checkAlignment; # this instruction throws an alignment fault if address is not granule-aligned address:8 = addrGranuleIndexed_checkAlignment; StoreMemTag(address, tag ); } } with : ShowMemTag=0 { # C6.2.304 STG page C6-1836 line 108242 MATCH xd9200400/mask=xffe00c00 # C6.2.304 STG page C6-1836 line 108242 MATCH xd9200c00/mask=xffe00c00 # C6.2.304 STG page C6-1836 line 108242 MATCH xd9200800/mask=xffe00c00 # CONSTRUCT xd9200000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES :stg Rt_GPR64xsp, addrGranuleIndexed is b_2131=0b11011001001 & (b_10=1 | b_11=1) & Rt_GPR64xsp & addrGranuleIndexed { # for the sake of simplified output, omit the alignment check when ShowMemTag is off } } with : ShowMemTag=1 { # C6.2.305 STGM page C6-1838 line 108375 MATCH xd9a00000/mask=xfffffc00 # CONSTRUCT xd9a00000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES :stgm Rt_GPR64, "["^Rn_GPR64xsp^"]" is b_1031=0b1101100110100000000000 & Rt_GPR64 & Rn_GPR64xsp { sze:8 = 4 << (gmid_el1 & 0xf); # The value of GMID_EL1.BS varies between 2 and 6. Can that be asserted somehow? address:8 = Rn_GPR64xsp; Align(address, sze); # this ensures that address will be granule-aligned, so we don't need to check it count:8 = sze >> $(LOG2_TAG_GRANULE); data:8 = Rt_GPR64; index:8 = (address >> $(LOG2_TAG_GRANULE)) & 0xf; # for tmp = 0 to count-1 tmp:8 = 0; # This could also be done by leaving index and address constant and adding a tmp-based # offset to them both, but that crams everything together into the StoreMemTag line in # the decompiler and makes it harder to assign names and figure out what's going on. # (Or at least, my opinion is that it's harder that way.) # Also in favor of this design is that the ARM spec pseudocode describes it this way, # so it's easier to see that this code matches the pseudocode. tag:8 = (data >> (index * 4)) & 0xf; StoreMemTag(address, tag ); address = address + $(TAG_GRANULE); index = index + 1; tmp = tmp + 1; # next tmp if (tmp < count) goto ; } } with : ShowMemTag=0 { # C6.2.305 STGM page C6-1838 line 108375 MATCH xd9a00000/mask=xfffffc00 # CONSTRUCT xd9a00000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES :stgm Rt_GPR64, "["^Rn_GPR64xsp^"]" is b_1031=0b1101100110100000000000 & Rt_GPR64 & Rn_GPR64xsp { } } with : ShowMemTag=1 { # C6.2.306 STGP page C6-1839 line 108445 MATCH x68800000/mask=xffc00000 # C6.2.306 STGP page C6-1839 line 108445 MATCH x69800000/mask=xffc00000 # C6.2.306 STGP page C6-1839 line 108445 MATCH x69000000/mask=xffc00000 # CONSTRUCT x68000000/mask=xfe400000 MATCHED 3 DOCUMENTED OPCODES :stgp Rt_GPR64, Rt2_GPR64, addrPairGranuleIndexed_checkAlignment is b_3031=0b01 & b_2529=0b10100 & (b_23=1 | b_24=1) & b_22=0 & Rt2_GPR64 & addrPairGranuleIndexed_checkAlignment & Rt_GPR64 { # Read all registers before addrPairGranuleIndexed_checkAlignment takes effect, or pre-index writeback could modify their values # (unusually, this instruction does not have unpredictable behavior in that case). data1:8 = Rt_GPR64; data2:8 = Rt2_GPR64; build addrPairGranuleIndexed_checkAlignment; address:8 = addrPairGranuleIndexed_checkAlignment; # StoreMemTag requires granule alignment tag:8 = 0; AllocationTagFromAddress(tag, address); # The decompiler apparently doesn't show changes to [sp+X] unless the new values # are used in the function. However, the changes really are happening. *address = data1; *(address + 8) = data2; StoreMemTag(address, tag); } } with : ShowMemTag=0 { # C6.2.306 STGP page C6-1839 line 108445 MATCH x68800000/mask=xffc00000 # C6.2.306 STGP page C6-1839 line 108445 MATCH x69800000/mask=xffc00000 # C6.2.306 STGP page C6-1839 line 108445 MATCH x69000000/mask=xffc00000 # CONSTRUCT x68000000/mask=xfe400000 MATCHED 3 DOCUMENTED OPCODES :stgp Rt_GPR64, Rt2_GPR64, addrPairGranuleIndexed is b_3031=0b01 & b_2529=0b10100 & (b_23=1 | b_24=1) & b_22=0 & Rt2_GPR64 & addrPairGranuleIndexed & Rt_GPR64 { # Read all registers before addrPairGranuleIndexed takes effect, or pre-index writeback could modify their values # (unusually, this instruction does not have unpredictable behavior in this case). data1:8 = Rt_GPR64; data2:8 = Rt2_GPR64; # for the sake of simplified output, omit the alignment check when ShowMemTag is off build addrPairGranuleIndexed; address:8 = addrPairGranuleIndexed; # The decompiler apparently doesn't show changes to [sp+X] unless the new values # are used in the function. However, the changes really are happening. *address = data1; *(address + 8) = data2; } } with : ShowMemTag=1 { # C6.2.353 STZ2G page C6-1935 line 113626 MATCH xd9e00400/mask=xffe00c00 # C6.2.353 STZ2G page C6-1935 line 113626 MATCH xd9e00c00/mask=xffe00c00 # C6.2.353 STZ2G page C6-1935 line 113626 MATCH xd9e00800/mask=xffe00c00 # CONSTRUCT xd9e00000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES :stz2g Rt_GPR64xsp, addrGranuleIndexed_checkAlignment is b_2131=0b11011001111 & (b_10=1 | b_11=1) & addrGranuleIndexed_checkAlignment & Rt_GPR64xsp { tag:8 = 0; AllocationTagFromAddress(tag, Rt_GPR64xsp); # Although the zero-storage is not required to be granule-aligned, the tag-updating is, # so effectively the entire operation must be at a granule-aligned address. build addrGranuleIndexed_checkAlignment; address:8 = addrGranuleIndexed_checkAlignment; # store two granules worth of zeros and tag it from Rt tmp:8 = 0; addr:8 = 0; count:8 = $(TAG_GRANULE) * 2; addr = address + tmp; *addr = 0:8; tmp = tmp + 8; if (tmp < count) goto ; StoreMemTag(address, tag); StoreMemTag(address + $(TAG_GRANULE), tag); } } with : ShowMemTag=0 { # C6.2.353 STZ2G page C6-1935 line 113626 MATCH xd9e00400/mask=xffe00c00 # C6.2.353 STZ2G page C6-1935 line 113626 MATCH xd9e00c00/mask=xffe00c00 # C6.2.353 STZ2G page C6-1935 line 113626 MATCH xd9e00800/mask=xffe00c00 # CONSTRUCT xd9e00000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES :stz2g Rt_GPR64xsp, addrGranuleIndexed is b_2131=0b11011001111 & (b_10=1 | b_11=1) & addrGranuleIndexed & Rt_GPR64xsp { # for the sake of simplified output, omit the alignment check when ShowMemTag is off build addrGranuleIndexed; address:8 = addrGranuleIndexed; # store two granules worth of zeros tmp:8 = 0; addr:8 = 0; count:8 = $(TAG_GRANULE) * 2; addr = address + tmp; *addr = 0:8; tmp = tmp + 8; if (tmp < count) goto ; } } with : ShowMemTag=1 { # C6.2.354 STZG page C6-1937 line 113766 MATCH xd9600400/mask=xffe00c00 # C6.2.354 STZG page C6-1937 line 113766 MATCH xd9600c00/mask=xffe00c00 # C6.2.354 STZG page C6-1937 line 113766 MATCH xd9600800/mask=xffe00c00 # CONSTRUCT xd9600000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES :stzg Rt_GPR64xsp, addrGranuleIndexed_checkAlignment is b_2131=0b11011001011 & (b_10=1 | b_11=1) & addrGranuleIndexed_checkAlignment & Rt_GPR64xsp { tag:8 = 0; AllocationTagFromAddress(tag, Rt_GPR64xsp); # Although the zero-storage is not required to be granule-aligned, the tag-updating is, # so effectively the entire operation must be at a granule-aligned address. build addrGranuleIndexed_checkAlignment; address:8 = addrGranuleIndexed_checkAlignment; # store one granule worth of zeros and tag it from Rt tmp:8 = 0; addr:8 = 0; count:8 = $(TAG_GRANULE); addr = address + tmp; *addr = 0:8; tmp = tmp + 8; if (tmp < count) goto ; StoreMemTag(address, tag); } } with : ShowMemTag=0 { # C6.2.354 STZG page C6-1937 line 113766 MATCH xd9600400/mask=xffe00c00 # C6.2.354 STZG page C6-1937 line 113766 MATCH xd9600c00/mask=xffe00c00 # C6.2.354 STZG page C6-1937 line 113766 MATCH xd9600800/mask=xffe00c00 # CONSTRUCT xd9600000/mask=xffe00000 MATCHED 3 DOCUMENTED OPCODES :stzg Rt_GPR64xsp, addrGranuleIndexed is b_2131=0b11011001011 & (b_10=1 | b_11=1) & addrGranuleIndexed & Rt_GPR64xsp { # for the sake of simplified output, omit the alignment check when ShowMemTag is off build addrGranuleIndexed; address:8 = addrGranuleIndexed; # store one granule worth of zeros tmp:8 = 0; addr:8 = 0; count:8 = $(TAG_GRANULE); addr = address + tmp; *addr = 0:8; tmp = tmp + 8; if (tmp < count) goto ; } } with : ShowMemTag=1 { # C6.2.355 STZGM page C6-1939 line 113904 MATCH xd9200000/mask=xfffffc00 # CONSTRUCT xd9200000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES :stzgm Rt_GPR64, "["^Rn_GPR64xsp^"]" is b_1031=0b1101100100100000000000 & Rt_GPR64 & Rn_GPR64xsp { sze:8 = 4 << (dczid_el0 & 0xf); # the last value (DCZID_EL0.BS) can be up to 9 (for a size of 2KB) and seems # to be hardware-dependent and unwriteable (sysreg spec doesn't show how to write it). # minimum is probably 2, which would make the size equal to a tag granule address:8 = Rn_GPR64xsp; Align(address, sze); # based on the educated guess above, address is probably granule-aligned by this, so we won't check it explicitly (compare to LDGM or STGM) count:8 = sze >> $(LOG2_TAG_GRANULE); data:8 = Rt_GPR64; tag:8 = data & 0xf; # for tmp = 0 to count-1 tmp:8 = 0; StoreMemTag(address, tag ); # store zeros to the entire granule tmp_zero:8 = 0; addr_zero:8 = address; count_zero:8 = $(TAG_GRANULE); addr_zero = address + tmp_zero; *addr_zero = 0:8; tmp_zero = tmp_zero + 8; if (tmp_zero < count_zero) goto ; address = address + $(TAG_GRANULE); # next tmp tmp = tmp + 1; if (tmp < count) goto ; } } with : ShowMemTag=0 { # C6.2.355 STZGM page C6-1939 line 113904 MATCH xd9200000/mask=xfffffc00 # CONSTRUCT xd9200000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES :stzgm Rt_GPR64, "["^Rn_GPR64xsp^"]" is b_1031=0b1101100100100000000000 & Rt_GPR64 & Rn_GPR64xsp { sze:8 = 4 << (dczid_el0 & 0xf); # the last value (DCZID_EL0.BS) can be up to 9 (for a size of 2KB) and seems # to be hardware-dependent and unwriteable (sysreg spec doesn't show how to write it). # minimum is probably 2, which would make the size equal to a tag granule address:8 = Rn_GPR64xsp; Align(address, sze); # based on the educated guess above, address is probably granule-aligned by this, so we won't check it explicitly (compare to LDGM or STGM) count:8 = sze >> $(LOG2_TAG_GRANULE); # for tmp = 0 to count-1 tmp:8 = 0; # store zeros to the entire granule tmp_zero:8 = 0; addr_zero:8 = address; count_zero:8 = $(TAG_GRANULE); addr_zero = address + tmp_zero; *addr_zero = 0:8; tmp_zero = tmp_zero + 8; if (tmp_zero < count_zero) goto ; address = address + $(TAG_GRANULE); # next tmp tmp = tmp + 1; if (tmp < count) goto ; } } # To enforce SHOULD BE ZERO fields add: b_1415=0b00 with : ShowMemTag=1 { # C6.2.359 SUBG page C6-1947 line 114340 MATCH xd1800000/mask=xffc00000 # CONSTRUCT xd1800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES :subg Rd_GPR64xsp, Rn_GPR64xsp, "#"^shifted_imm, "#"^b_1013 is sf=1 & op=1 & S=0 & b_2328=0b100011 & b_22=0 & b_1621 & b_1013 & Rd_GPR64xsp & Rn_GPR64xsp # " & b_1415=0" is not required by the spec (op3 doesn't have any requirements and is not used) [ shifted_imm = b_1621 << $(LOG2_TAG_GRANULE); ] { # we don't actually modify the target register, so Ghidra understands the pointer target is still the same. # pseudo-ops let us do that, but it means that the decompiler can put an unintuitive value in the # "CopyPtrTag_AddToPtrTag_Exclude" argument, e.g. "param_2 - 0x20". uimm4:1 = b_1013; exclude:2 = 0; Or2BytesWithExcludedTags(exclude); Rd_GPR64xsp = Rn_GPR64xsp - shifted_imm; CopyPtrTag_AddToPtrTag_Exclude(Rd_GPR64xsp, Rn_GPR64xsp, uimm4, exclude); } } with : ShowMemTag=0 { # C6.2.359 SUBG page C6-1947 line 114340 MATCH xd1800000/mask=xffc00000 # CONSTRUCT xd1800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES :subg Rd_GPR64xsp, Rn_GPR64xsp, "#"^shifted_imm, "#"^b_1013 is sf=1 & op=1 & S=0 & b_2328=0b100011 & b_22=0 & b_1621 & b_1013 & Rd_GPR64xsp & Rn_GPR64xsp # " & b_1415=0" is not required by the spec (op3 doesn't have any requirements and is not used) [ shifted_imm = b_1621 << $(LOG2_TAG_GRANULE); ] { Rd_GPR64xsp = Rn_GPR64xsp - shifted_imm; } } # Subtract Pointer [setting Flags]: # Subtract the 56-bit address held in the second operand from the first and store the result # in the destination register. If the destination register is XZR, then just use as a side- # effect of being a pointer comparison (CMPP). # C6.2.360 SUBP page C6-1948 line 114410 MATCH x9ac00000/mask=xffe0fc00 # C6.2.361 SUBPS page C6-1949 line 114470 MATCH xbac00000/mask=xffe0fc00 # CONSTRUCT x9ac00000/mask=xdfe0fc00 MATCHED 2 DOCUMENTED OPCODES :subp^SBIT_CZNO Rd_GPR64, Rn_GPR64xsp, Rm_GPR64xsp is sf=1 & b_30=0 & S & SBIT_CZNO & b_2128=0b11010110 & b_1015=0b000000 & Rd_GPR64 & Rn_GPR64xsp & Rm_GPR64xsp { # out of a 64-bit value, keep the lowest 56 bits, which is 7 bytes. # sign-extend a 7-byte value to an 8-byte value. If the boundary weren't byte-aligned, # sext() wouldn't work so well. tmp_2:8 = Rm_GPR64xsp; tmp_2 = sext(tmp_2:7); # if Rm:7 is used here, the decompiler considers the Rm register an int7 for the whole function. tmp_1:8 = Rn_GPR64xsp; tmp_1 = sext(tmp_1:7); subflags(tmp_1, tmp_2); tmp_1 = tmp_1 - tmp_2; resultflags(tmp_1); Rd_GPR64 = tmp_1; build SBIT_CZNO; } # C6.2.376 TCANCEL page C6-1974 line 115824 MATCH xd4600000/mask=xffe0001f # CONSTRUCT xd4600000/mask=xffe0001f MATCHED 1 DOCUMENTED OPCODES define pcodeop cancelTransaction; :tcancel "#"^imm16 is b_2131=0x6a3 & imm16 & b_0519 & b_2020 & b_0004=0x0 { local tmp:2 = imm16; local reason:2 = b_0519; local retry:1 = b_2020; cancelTransaction(reason, retry); } # C6.2.377 TCOMMIT page C6-1975 line 115871 MATCH xd503307f/mask=xffffffff # CONSTRUCT xd503307f/mask=xffffffff MATCHED 1 DOCUMENTED OPCODES define pcodeop commitTransaction; :tcommit is b_0031=0xd503307f { commitTransaction(); } # C6.2.379 TSTART page C6-1979 line 116075 define pcodeop transactionStart; :tstart Rd_GPR64 is b_0531=0x6a9193a & Rd_GPR64 { transactionStart(Rd_GPR64); } # C6.2.380 TTEST page C6-1981 line 116175 define pcodeop transactionDepth; :ttest Rd_GPR64 is b_0531=0x6a9198b & Rd_GPR64 { Rd_GPR64 = transactionDepth(); } # C6.2.387 UDF page C6-1993 line 116744 MATCH x00000000/mask=xffff0000 # CONSTRUCT x00000000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # Undefined instruction :udf b_0015 is b_1631=0b0000000000000000 & b_0015 { local excaddr:8 = inst_start; local id:2 = b_0015; local target:8 = UndefinedInstructionException(id, excaddr); goto [target]; } # C6.2.400 XAFLAG page C6-2008 line 117528 MATCH xd500403f/mask=xfffff0ff # C6.2.229 MSR (immediate) page C6-1684 line 99649 MATCH xd500401f/mask=xfff8f01f # CONSTRUCT xd500403f/mask=xfffff0ff MATCHED 2 DOCUMENTED OPCODES :xaflag is b_1231=0b11010101000000000100 & b_0007=0b00111111 { tmpNG = !CY & !ZR; tmpZR = ZR & CY; tmpCY = CY | ZR; tmpOV = !CY & ZR; NG = tmpNG; ZR = tmpZR; CY = tmpCY; OV = tmpOV; } ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64instructions.sinc ================================================ # Specification for the AARCH64 64-bit ARM instruction set # # See "ARM Architecture Reference Manual ARMv8, for ARMv8-A architecture profile" # opcodes are always Little endian, although the data can be Big/Little Endian. # TODO Collapse SUB/SUBS forms # TODO MSR/MRS's need to be specified with special registers, coproc # TODO Many special case opcodes for UBFM and BFM, For example BFI # TODO? Floating point numbers don't display correctly as IEEE floats # TODO? Many special case opcodes like # TODO When writing to 32-bit Rd32, the upper bits of the bigger 64-bit Rd64 are zero'ed # Most pcode does this, but this needs to be carefully checked. There may be some # that do not zero extend into Rd64, and some that do extend into Rd64 but shouldn't. # If it's not done right (or naively) the decompiler gets confused. So # the accepted pattern for doing this is: # # ... calculate and set destination register ... # local tmps:SIZE = destination_register; # big_register = zext(tmps); # destination_register = big_register; # Note Implemented 2/2016 # # UBFM/SBFM/BFM is implemented # # When the destination is a 32-bit register, the upper 32 bits of the register must be set to 0. # This includes the wsp stack pointer, which might clobber the upper part of an address. # # And when the destination is a Rd_VPR vector register but the operand size is less than 128 bits, # and the destination is not the upper half of the register (ie, bit 30 q=0) # then the unused remaining upper bits must be set to 0. @if DATA_ENDIAN == "little" define endian=little; @else define endian=big; @endif define alignment=4; # Unlike the above, these are preprocessor macros. Use them with e.g. $(TAG_GRANULE) in SLEIGH statements. @define LOG2_TAG_GRANULE "4" @define TAG_GRANULE "16" # SECTION registers define space ram type=ram_space size=8 default; define space register type=register_space size=4; # See "ABOUT THE ENDIAN IFDEFS" below for an explanation of the endian # ifdefs @if DATA_ENDIAN == "little" define register offset=0x0000 size=8 [ pc sp ]; define register offset=0x0000 size=4 [ _ _ wsp _ ]; @else define register offset=0x0000 size=8 [ pc sp ]; define register offset=0x0000 size=4 [ _ _ _ wsp ]; @endif define register offset=0x0100 size=1 [ NG ZR CY OV shift_carry tmpCY tmpOV tmpNG tmpZR ]; define register offset=0x0200 size=4 [ glob_mask32 ]; define register offset=0x0204 size=8 [ glob_mask64 ]; # address set to load/store a value from memory in/out of vectors define register offset=0x0300 size=8 [ VecMemAddr VectorSelem ]; # register address to load/store a value from memory in/out of registers define register offset=0x0310 size=4 [ VecRegAddr ]; # Special Purpose Registers - most of these are really 1 bit and part # of a status register, however they all need to be consistent # 26 registers 0xd0 bytes define register offset=0x1000 size=8 [ spsr_el1 elr_el1 sp_el0 spsel daif currentel nzcv fpcr fpsr dspsr_el0 dlr_el0 spsr_el2 elr_el2 sp_el1 spsr_irq spsr_abt spsr_und spsr_fiq spsr_el3 elr_el3 sp_el2 spsr_svc spsr_hyp uao pan tco accdata ]; # System Registers # 202 registers 0x330 bytes define register offset=0x1100 size=8 [ midr_el1 mpidr_el1 revidr_el1 id_dfr0_el1 id_pfr0_el1 id_pfr1_el1 id_afr0_el1 id_mmfr0_el1 id_mmfr1_el1 id_mmfr2_el1 id_mmfr3_el1 id_isar0_el1 id_isar1_el1 id_isar2_el1 id_isar3_el1 id_isar4_el1 id_isar5_el1 mvfr0_el1 mvfr1_el1 mvfr2_el1 ccsidr_el1 id_aa64pfr0_el1 id_aa64pfr1_el1 id_aa64dfr0_el1 id_aa64dfr1_el1 id_aa64isar0_el1 id_aa64isar1_el1 id_aa64mmfr0_el1 id_aa64mmfr1_el1 id_aa64afr0_el1 id_aa64afr1_el1 clidr_el1 aidr_el1 csselr_el1 ctr_el0 dczid_el0 vpidr_el2 vmpidr_el2 sctlr_el1 actlr_el1 cpacr_el1 sctlr_el2 actlr_el2 hcr_el2 mdcr_el2 cptr_el2 hstr_el2 hacr_el2 sctlr_el3 actlr_el3 scr_el3 cptr_el3 mdcr_el3 ttbr0_el1 ttbr1_el1 ttbr0_el2 ttbr0_el3 vttbr_el2 tcr_el1 tcr_el2 tcr_el3 vtcr_el2 afsr0_el1 afsr1_el1 afsr0_el2 afsr1_el2 afsr0_el3 afsr1_el3 esr_el1 esr_el2 esr_el3 fpexc32_el2 far_el1 far_el2 far_el3 hpfar_el2 par_el1 pmintenset_el1 pmintenclr_el1 pmcr_el0 pmcntenset_el0 pmcntenclr_el0 pmovsclr_el0 pmswinc_el0 pmselr_el0 pmceid0_el0 pmceid1_el0 pmccntr_el0 pmxevtyper_el0 pmxevcntr_el0 pmuserenr_el0 pmovsset_el0 pmevcntr0_el0 pmevcntr1_el0 pmevcntr2_el0 pmevcntr3_el0 pmevcntr4_el0 pmevcntr5_el0 pmevcntr6_el0 pmevcntr7_el0 pmevcntr8_el0 pmevcntr9_el0 pmevcntr10_el0 pmevcntr11_el0 pmevcntr12_el0 pmevcntr13_el0 pmevcntr14_el0 pmevcntr15_el0 pmevcntr16_el0 pmevcntr17_el0 pmevcntr18_el0 pmevcntr19_el0 pmevcntr20_el0 pmevcntr21_el0 pmevcntr22_el0 pmevcntr23_el0 pmevcntr24_el0 pmevcntr25_el0 pmevcntr26_el0 pmevcntr27_el0 pmevcntr28_el0 pmevcntr29_el0 pmevcntr30_el0 pmevtyper0_el0 pmevtyper1_el0 pmevtyper2_el0 pmevtyper3_el0 pmevtyper4_el0 pmevtyper5_el0 pmevtyper6_el0 pmevtyper7_el0 pmevtyper8_el0 pmevtyper9_el0 pmevtyper10_el0 pmevtyper11_el0 pmevtyper12_el0 pmevtyper13_el0 pmevtyper14_el0 pmevtyper15_el0 pmevtyper16_el0 pmevtyper17_el0 pmevtyper18_el0 pmevtyper19_el0 pmevtyper20_el0 pmevtyper21_el0 pmevtyper22_el0 pmevtyper23_el0 pmevtyper24_el0 pmevtyper25_el0 pmevtyper26_el0 pmevtyper27_el0 pmevtyper28_el0 pmevtyper29_el0 pmevtyper30_el0 pmccfiltr_el0 mair_el1 mair_el2 mair_el3 amair_el1 amair_el2 amair_el3 vbar_el1 vbar_el2 vbar_el3 rvbar_el1 rvbar_el2 rvbar_el3 rmr_el1 rmr_el2 rmr_el3 isr_el1 contextidr_el1 tpidr_el0 tpidrro_el0 tpidr_el1 tpidr_el2 tpidr_el3 teecr32_el1 cntfrq_el0 cntpct_el0 cntvct_el0 cntvoff_el2 cntkctl_el1 cnthctl_el2 cntp_tval_el0 cntp_ctl_el0 cntp_cval_el0 cntv_tval_el0 cntv_ctl_el0 cntv_cval_el0 cnthp_tval_el2 cnthp_ctl_el2 cnthp_cval_el2 cntps_tval_el1 cntps_ctl_el1 cntps_cval_el1 dacr32_el2 ifsr32_el2 teehbr32_el1 sder32_el3 gmid_el1 gcr_el1 ssbs allint dit svcr ]; # bitrange definitions are [,] define bitrange gcr_el1.exclude=gcr_el1[0,16]; # Debug Registers # 82 registers 0x290 bytes define register offset=0x1800 size=8 [ osdtrrx_el1 mdccint_el1 mdscr_el1 osdtrtx_el1 oseccr_el1 dbgbvr0_el1 dbgbvr1_el1 dbgbvr2_el1 dbgbvr3_el1 dbgbvr4_el1 dbgbvr5_el1 dbgbvr6_el1 dbgbvr7_el1 dbgbvr8_el1 dbgbvr9_el1 dbgbvr10_el1 dbgbvr11_el1 dbgbvr12_el1 dbgbvr13_el1 dbgbvr14_el1 dbgbvr15_el1 dbgbcr0_el1 dbgbcr1_el1 dbgbcr2_el1 dbgbcr3_el1 dbgbcr4_el1 dbgbcr5_el1 dbgbcr6_el1 dbgbcr7_el1 dbgbcr8_el1 dbgbcr9_el1 dbgbcr10_el1 dbgbcr11_el1 dbgbcr12_el1 dbgbcr13_el1 dbgbcr14_el1 dbgbcr15_el1 dbgwvr0_el1 dbgwvr1_el1 dbgwvr2_el1 dbgwvr3_el1 dbgwvr4_el1 dbgwvr5_el1 dbgwvr6_el1 dbgwvr7_el1 dbgwvr8_el1 dbgwvr9_el1 dbgwvr10_el1 dbgwvr11_el1 dbgwvr12_el1 dbgwvr13_el1 dbgwvr14_el1 dbgwvr15_el1 dbgwcr0_el1 dbgwcr1_el1 dbgwcr2_el1 dbgwcr3_el1 dbgwcr4_el1 dbgwcr5_el1 dbgwcr6_el1 dbgwcr7_el1 dbgwcr8_el1 dbgwcr9_el1 dbgwcr10_el1 dbgwcr11_el1 dbgwcr12_el1 dbgwcr13_el1 dbgwcr14_el1 dbgwcr15_el1 mdrar_el1 oslar_el1 oslsr_el1 osdlr_el1 dbgprcr_el1 dbgclaimset_el1 dbgclaimclr_el1 dbgauthstatus_el1 mdccsr_el0 dbgdtr_el0 dbgdtrrx_el0 dbgdtrtx_el0 dbgvcr32_el2 ]; define register offset=0x3000 size=4 contextreg; # value loaded from memory to store in register # or computed to store in memory define register offset=0x3100 size=4 tmp_ldWn; define register offset=0x3104 size=8 tmp_ldXn; define register offset=0x310c size=4 tmp_stWn; define register offset=0x3110 size=8 tmp_stXn; # General purpose and SIMD registers # # These will start at 0x3800 and there should be no defined registers # after this address (this is because the size of the registers is # potentially variable). # # ABOUT THE ENDIAN IFDEFS # the *address* of the overlain registers depends on if the underlying # memory is in big or little endian order. In little endian order, the # LSB is byte 0, so (for example) w0 and x0 have the same address *in # register memory*. But in big endian order, the LSB of x0 is byte 7, # and so w0 starts at byte 4. All of that just gets at the address in # register memory. Any time a value is loaded into a varnode and # manipulated in sleigh code, it is always in big endian order. It is # only byte reversed when read or written to little endian memory. All # that means is that there are endian ifdefs for the overlain # registers here, but that can and should be ignored when writing # semantics. # General purpose registers R0-R30 (R31=zero register ZR) # They are accessed as # 64-bit register named X0-X30 # 32-bit registers named W0-W30 define register offset=0x4000 size=8 [ x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 xzr ]; @if DATA_ENDIAN == "little" define register offset=0x4000 size=4 [ w0 _ w1 _ w2 _ w3 _ w4 _ w5 _ w6 _ w7 _ w8 _ w9 _ w10 _ w11 _ w12 _ w13 _ w14 _ w15 _ w16 _ w17 _ w18 _ w19 _ w20 _ w21 _ w22 _ w23 _ w24 _ w25 _ w26 _ w27 _ w28 _ w29 _ w30 _ wzr _ ]; @else define register offset=0x4000 size=4 [ _ w0 _ w1 _ w2 _ w3 _ w4 _ w5 _ w6 _ w7 _ w8 _ w9 _ w10 _ w11 _ w12 _ w13 _ w14 _ w15 _ w16 _ w17 _ w18 _ w19 _ w20 _ w21 _ w22 _ w23 _ w24 _ w25 _ w26 _ w27 _ w28 _ w29 _ w30 _ wzr ]; @endif # SIMD&FP registers V0-V31 at 0x5000 # They are accessed as: # 128-bit registers named Q0-Q31 # 64-bit registers named D0-D31 # 32-bit registers named S0-S31 # 16-bit registers named H0-H31 # 8-bit registers named B0-B31 # a 128-bit vector of elements # a 64-bit vector of elements # The packing is endian dependent # For SVE, registers Z0-Z31 can be any size that is a multiple of 128 # up to 2048 bits, and they overlap the V0-V31 registers # temporary SIMD registers, needed for calculations in SIMD semantics define register offset=0x4800 size=32 [ TMPZ1 TMPZ2 TMPZ3 TMPZ4 TMPZ5 TMPZ6 ]; @if DATA_ENDIAN == "little" define register offset=0x4800 size=16 [ TMPQ1 _ TMPQ2 _ TMPQ3 _ TMPQ4 _ TMPQ5 _ TMPQ6 _ ]; define register offset=0x4800 size=8 [ TMPD1 _ _ _ TMPD2 _ _ _ TMPD3 _ _ _ TMPD4 _ _ _ TMPD5 _ _ _ TMPD6 _ _ _ ]; define register offset=0x4800 size=4 [ TMPS1 _ _ _ _ _ _ _ TMPS2 _ _ _ _ _ _ _ TMPS3 _ _ _ _ _ _ _ TMPS4 _ _ _ _ _ _ _ TMPS5 _ _ _ _ _ _ _ TMPS6 _ _ _ _ _ _ _ ]; @else # this is DATA_ENDIAN == "big" define register offset=0x4800 size=16 [ _ TMPQ1 _ TMPQ2 _ TMPQ3 _ TMPQ4 _ TMPQ5 _ TMPQ6 ]; define register offset=0x4800 size=8 [ _ _ _ TMPD1 _ _ _ TMPD2 _ _ _ TMPD3 _ _ _ TMPD4 _ _ _ TMPD5 _ _ _ TMPD6 ]; define register offset=0x4800 size=4 [ _ _ _ _ _ _ _ TMPS1 _ _ _ _ _ _ _ TMPS2 _ _ _ _ _ _ _ TMPS3 _ _ _ _ _ _ _ TMPS4 _ _ _ _ _ _ _ TMPS5 _ _ _ _ _ _ _ TMPS6 ]; @endif # The size of the simd (z) register (in bytes) can be any multiple of # 16 from 32 to 256 bytes. There are also 16 predicate registers are # 1/8 the size of the corresponding simd registers. @define SIMD_SIZE "32" @define PRED_SIZE "4" # In order to "move" the overlain registers to the right place, use # these defines to locate within the z register. The __128 is for an # 128-bit vector overlaid in a z-register, etc. For this to work # SIMD_SIZE must be at least 32. define register offset=0x5000 size=$(SIMD_SIZE) [ z0 z1 z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 ]; define register offset=0x6000 size=$(PRED_SIZE) [ p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 ]; # define the overlaid simd registers @if DATA_ENDIAN == "little" define register offset=0x5000 size=16 [ q0 _ q1 _ q2 _ q3 _ q4 _ q5 _ q6 _ q7 _ q8 _ q9 _ q10 _ q11 _ q12 _ q13 _ q14 _ q15 _ q16 _ q17 _ q18 _ q19 _ q20 _ q21 _ q22 _ q23 _ q24 _ q25 _ q26 _ q27 _ q28 _ q29 _ q30 _ q31 _ ]; define register offset=0x5000 size=8 [ d0 _ _ _ d1 _ _ _ d2 _ _ _ d3 _ _ _ d4 _ _ _ d5 _ _ _ d6 _ _ _ d7 _ _ _ d8 _ _ _ d9 _ _ _ d10 _ _ _ d11 _ _ _ d12 _ _ _ d13 _ _ _ d14 _ _ _ d15 _ _ _ d16 _ _ _ d17 _ _ _ d18 _ _ _ d19 _ _ _ d20 _ _ _ d21 _ _ _ d22 _ _ _ d23 _ _ _ d24 _ _ _ d25 _ _ _ d26 _ _ _ d27 _ _ _ d28 _ _ _ d29 _ _ _ d30 _ _ _ d31 _ _ _ ]; define register offset=0x5000 size=4 [ s0 _ _ _ _ _ _ _ s1 _ _ _ _ _ _ _ s2 _ _ _ _ _ _ _ s3 _ _ _ _ _ _ _ s4 _ _ _ _ _ _ _ s5 _ _ _ _ _ _ _ s6 _ _ _ _ _ _ _ s7 _ _ _ _ _ _ _ s8 _ _ _ _ _ _ _ s9 _ _ _ _ _ _ _ s10 _ _ _ _ _ _ _ s11 _ _ _ _ _ _ _ s12 _ _ _ _ _ _ _ s13 _ _ _ _ _ _ _ s14 _ _ _ _ _ _ _ s15 _ _ _ _ _ _ _ s16 _ _ _ _ _ _ _ s17 _ _ _ _ _ _ _ s18 _ _ _ _ _ _ _ s19 _ _ _ _ _ _ _ s20 _ _ _ _ _ _ _ s21 _ _ _ _ _ _ _ s22 _ _ _ _ _ _ _ s23 _ _ _ _ _ _ _ s24 _ _ _ _ _ _ _ s25 _ _ _ _ _ _ _ s26 _ _ _ _ _ _ _ s27 _ _ _ _ _ _ _ s28 _ _ _ _ _ _ _ s29 _ _ _ _ _ _ _ s30 _ _ _ _ _ _ _ s31 _ _ _ _ _ _ _ ]; define register offset=0x5000 size=2 [ h0 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h1 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h2 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h3 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h4 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h5 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h6 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h7 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h8 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h9 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h10 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h11 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h12 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h13 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h14 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h15 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h16 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h17 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h18 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h19 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h20 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h21 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h22 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h23 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h24 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h25 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h26 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h27 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h28 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h29 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h30 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h31 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ]; define register offset=0x5000 size=1 [ b0 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b1 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b2 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b3 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b4 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b5 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b6 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b7 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b8 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b9 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b10 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b11 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b12 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b13 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b14 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b15 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b16 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b17 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b18 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b19 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b20 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b21 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b22 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b23 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b24 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b25 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b26 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b27 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b28 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b29 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b30 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b31 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ]; @else # this is DATA_ENDIAN == "big" define register offset=0x5000 size=16 [ _ q0 _ q1 _ q2 _ q3 _ q4 _ q5 _ q6 _ q7 _ q8 _ q9 _ q10 _ q11 _ q12 _ q13 _ q14 _ q15 _ q16 _ q17 _ q18 _ q19 _ q20 _ q21 _ q22 _ q23 _ q24 _ q25 _ q26 _ q27 _ q28 _ q29 _ q30 _ q31 ]; define register offset=0x5000 size=8 [ _ _ _ d0 _ _ _ d1 _ _ _ d2 _ _ _ d3 _ _ _ d4 _ _ _ d5 _ _ _ d6 _ _ _ d7 _ _ _ d8 _ _ _ d9 _ _ _ d10 _ _ _ d11 _ _ _ d12 _ _ _ d13 _ _ _ d14 _ _ _ d15 _ _ _ d16 _ _ _ d17 _ _ _ d18 _ _ _ d19 _ _ _ d20 _ _ _ d21 _ _ _ d22 _ _ _ d23 _ _ _ d24 _ _ _ d25 _ _ _ d26 _ _ _ d27 _ _ _ d28 _ _ _ d29 _ _ _ d30 _ _ _ d31 ]; define register offset=0x5000 size=4 [ _ _ _ _ _ _ _ s0 _ _ _ _ _ _ _ s1 _ _ _ _ _ _ _ s2 _ _ _ _ _ _ _ s3 _ _ _ _ _ _ _ s4 _ _ _ _ _ _ _ s5 _ _ _ _ _ _ _ s6 _ _ _ _ _ _ _ s7 _ _ _ _ _ _ _ s8 _ _ _ _ _ _ _ s9 _ _ _ _ _ _ _ s10 _ _ _ _ _ _ _ s11 _ _ _ _ _ _ _ s12 _ _ _ _ _ _ _ s13 _ _ _ _ _ _ _ s14 _ _ _ _ _ _ _ s15 _ _ _ _ _ _ _ s16 _ _ _ _ _ _ _ s17 _ _ _ _ _ _ _ s18 _ _ _ _ _ _ _ s19 _ _ _ _ _ _ _ s20 _ _ _ _ _ _ _ s21 _ _ _ _ _ _ _ s22 _ _ _ _ _ _ _ s23 _ _ _ _ _ _ _ s24 _ _ _ _ _ _ _ s25 _ _ _ _ _ _ _ s26 _ _ _ _ _ _ _ s27 _ _ _ _ _ _ _ s28 _ _ _ _ _ _ _ s29 _ _ _ _ _ _ _ s30 _ _ _ _ _ _ _ s31 ]; define register offset=0x5000 size=2 [ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h0 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h1 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h2 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h3 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h4 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h5 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h6 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h7 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h8 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h9 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h10 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h11 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h12 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h13 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h14 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h15 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h16 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h17 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h18 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h19 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h20 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h21 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h22 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h23 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h24 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h25 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h26 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h27 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h28 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h29 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h30 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ h31 ]; define register offset=0x5000 size=1 [ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b0 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b1 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b2 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b3 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b4 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b5 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b6 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b7 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b8 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b9 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b10 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b11 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b12 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b13 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b14 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b15 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b16 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b17 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b18 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b19 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b20 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b21 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b22 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b23 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b24 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b25 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b26 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b27 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b28 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b29 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b30 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ b31 ]; @endif # SECTION token fields and context variables # "noflow" limits register changes to a single instruction (or a highlighted region) rather than following control flow. # This allows the select-clear-SetRegister-disassemble procedure to be done without affecting # any instructions other than those that are selected. define context contextreg ImmS_ImmR_TestSet = (0,0) ImmS_LT_ImmR = (1,1) ImmS_EQ_ImmR = (2,2) ImmS_LT_ImmR_minus_1 = (3,3) ImmS_ne_1f = (4,4) ImmS_ne_3f = (5,5) ShowPAC = (21,21) noflow PAC_clobber = (22,22) noflow ShowBTI = (23,23) noflow ShowMemTag = (24,24) noflow ; define token instrAARCH64 (32) endian = little Rm = (16,20) Rn = (5,9) Rd = (0,4) Rt = (0,4) Ra = (10,14) Rt2 = (10,14) Rm_FPR8 = (16,20) Rn_FPR8 = (5,9) Rd_FPR8 = (0,4) Rd_FPR8_2 = (0,4) Rt_FPR8 = (0,4) Rm_FPR16 = (16,20) Rn_FPR16 = (5,9) Rd_FPR16 = (0,4) Rd_FPR16_2 = (0,4) Rt_FPR16 = (0,4) Ra_FPR16 = (10,14) Rm_FPR32 = (16,20) Rn_FPR32 = (5,9) Rd_FPR32 = (0,4) Rd_FPR32_2 = (0,4) Ra_FPR32 = (10,14) Rm_FPR64 = (16,20) Rn_FPR64 = (5,9) Rd_FPR64 = (0,4) Rd_FPR64_2 = (0,4) Rt_FPR64 = (0,4) Rt_FPR32 = (0,4) Ra_FPR64 = (10,14) Rt2_FPR128 = (10,14) Rt2_FPR32 = (10,14) Rt2_FPR64 = (10,14) Ra_VPR128 = (10,14) Rm_VPR64 = (16,20) Rn_VPR64 = (5,9) Rd_VPR64 = (0,4) Re_VPR128 = (16,20) Re_VPR128Lo = (16,19) Rm_VPR128 = (16,20) Rm_VPR128Lo = (16,19) Rn_VPR128 = (5,9) Rnn_VPR128 = (5,9) Rnnn_VPR128 = (5,9) Rnnnn_VPR128 = (5,9) Rd_VPR128 = (0,4) Rt_VPR128 = (0,4) Rtt_VPR128 = (0,4) Rttt_VPR128 = (0,4) Rtttt_VPR128 = (0,4) Rt_VPR64 = (0,4) Rtt_VPR64 = (0,4) Rttt_VPR64 = (0,4) Rtttt_VPR64 = (0,4) Rt_FPR128 = (0,4) vRm_VPR64 = (16,20) vRm_VPR128Lo = (16,19) vRe_VPR128 = (16,20) vRe_VPR128Lo = (16,19) vRn_VPR64 = (5,9) vRd_VPR64 = (0,4) vRm_VPR128 = (16,20) vRn_VPR128 = (5,9) vRnn_VPR128 = (5,9) vRnnn_VPR128 = (5,9) vRnnnn_VPR128 = (5,9) vRd_VPR128 = (0,4) vRa_VPR128 = (10,14) Vt = (0,4) Vtt = (0,4) Vttt = (0,4) Vtttt = (0,4) vVt = (0,4) vVtt = (0,4) vVttt = (0,4) vVtttt = (0,4) aa_Xm = (16,20) aa_Xn = (5,9) aa_Xd = (0,4) aa_Xs = (16,20) aa_Xss = (16,20) aa_Xt = (0,4) aa_Xtt = (0,4) aa_Xa = (10,14) aa_Wm = (16,20) aa_Wn = (5,9) aa_Wd = (0,4) aa_Ws = (16,20) aa_Wss = (16,20) aa_Wt = (0,4) aa_Wtt = (0,4) aa_Wa = (10,14) aa_Wa2 = (10,14) aa_CRm = (8,11) br_cond_op = (0,3) cond_op = (12,15) aa_prefetch = (0,4) aa_hw = (21,22) aa_extreg_imm3 = (10,12) aa_extreg_shift = (22,23) aa_extreg_option = (13,15) imm6 = (10,15) aa_imm7 = (15,21) imm12 = (10,21) imm16 = (5,20) simm7 = (15,21) signed simm9 = (12,20) signed simm14 = (5,18) signed simm19 = (5,23) signed simm26 = (0,25) signed immlo = (29,30) immhi = (5,23) signed # Arbitrary bit fields b_00 = (0,0) b_01 = (1,1) b_02 = (2,2) b_03 = (3,3) b_04 = (4,4) b_05 = (5,5) b_06 = (6,6) b_07 = (7,7) b_08 = (8,8) b_09 = (9,9) b_10 = (10,10) b_11 = (11,11) b_12 = (12,12) b_13 = (13,13) b_14 = (14,14) b_15 = (15,15) b_16 = (16,16) b_17 = (17,17) b_18 = (18,18) b_19 = (19,19) b_20 = (20,20) b_21 = (21,21) b_22 = (22,22) b_23 = (23,23) b_24 = (24,24) b_25 = (25,25) b_26 = (26,26) b_27 = (27,27) b_28 = (28,28) b_29 = (29,29) b_30 = (30,30) b_31 = (31,31) b_0001 = (0,1) b_0003 = (0,3) b_0004 = (0,4) b_0006 = (0,6) b_0007 = (0,7) b_0009 = (0,9) b_0011 = (0,11) b_0015 = (0,15) b_0027 = (0,27) b_0031 = (0,31) b_0102 = (1,2) b_0103 = (1,3) b_0204 = (2,4) b_0304 = (3,4) b_0405 = (4,5) b_0406 = (4,6) b_0407 = (4,7) b_0409 = (4,9) b_0411 = (4,11) b_0427 = (4,27) b_0431 = (4,31) b_0506 = (5,6) b_0507 = (5,7) b_0508 = (5,8) b_0509 = (5,9) b_0510 = (5,10) b_0515 = (5,15) b_0519 = (5,19) b_0531 = (5,31) b_0607 = (6,7) b_0609 = (6,9) b_0610 = (6,10) b_0611 = (6,11) b_0708 = (7,8) b_0709 = (7,9) b_0710 = (7,10) b_0711 = (7,11) b_0809 = (8,9) b_0810 = (8,10) b_0811 = (8,11) b_0910 = (9,10) b_0911 = (9,11) b_0916 = (9,16) b_1010 = (10,10) b_1011 = (10,11) b_1012 = (10,12) b_1013 = (10,13) b_1014 = (10,14) b_1015 = (10,15) b_1021 = (10,21) b_1022 = (10,22) b_1028 = (10,28) b_1029 = (10,29) b_1031 = (10,31) b_1111 = (11,11) b_1112 = (11,12) b_1113 = (11,13) b_1114 = (11,14) b_1115 = (11,15) b_1116 = (11,16) b_1131 = (11,31) b_1212 = (12,12) b_1213 = (12,13) b_1214 = (12,14) b_1215 = (12,15) b_1216 = (12,16) b_1217 = (12,17) b_1220 = (12,20) b_1223 = (12,23) b_1229 = (12,29) b_1230 = (12,30) b_1231 = (12,31) b_1313 = (13,13) b_1314 = (13,14) b_1315 = (13,15) b_1317 = (13,17) b_1321 = (13,21) b_1322 = (13,22) b_1414 = (14,14) b_1417 = (14,17) b_1415 = (14,15) b_1431 = (14,31) b_1515 = (15,15) b_1517 = (15,17) b_1520 = (15,20) b_1531 = (15,31) b_1616 = (16,16) b_1617 = (16,17) b_1618 = (16,18) b_1619 = (16,19) b_1620 = (16,20) b_1621 = (16,21) b_1623 = (16,23) b_1627 = (16,27) b_1629 = (16,29) b_1631 = (16,31) b_1718 = (17,18) b_1719 = (17,19) b_1720 = (17,20) b_1721 = (17,21) b_1722 = (17,22) b_1818 = (18,18) b_1819 = (18,19) b_1820 = (18,20) b_1821 = (18,21) b_1920 = (19,20) b_1921 = (19,21) b_1922 = (19,22) b_1923 = (19,23) b_1928 = (19,28) b_1929 = (19,29) b_1931 = (19,31) b_2020 = (20,20) b_2021 = (20,21) b_2022 = (20,22) b_2023 = (20,23) b_2024 = (20,24) b_2027 = (20,27) b_2121 = (21,21) b_2122 = (21,22) b_2123 = (21,23) b_2124 = (21,24) b_2125 = (21,25) b_2127 = (21,27) b_2128 = (21,28) b_2129 = (21,29) b_2130 = (21,30) b_2131 = (21,31) b_2222 = (22,22) b_2223 = (22,23) b_2224 = (22,24) b_2225 = (22,25) b_2229 = (22,29) b_2231 = (22,31) b_2323 = (23,23) b_2324 = (23,24) b_2325 = (23,25) b_2327 = (23,27) b_2328 = (23,28) b_2329 = (23,29) b_2331 = (23,31) b_2425 = (24,25) b_2427 = (24,27) b_2428 = (24,28) b_2429 = (24,29) b_2430 = (24,30) b_2431 = (24,31) b_2525 = (25,25) b_2527 = (25,27) b_2529 = (25,29) b_2530 = (25,30) b_2531 = (25,31) b_2627 = (26,27) b_2629 = (26,29) b_2630 = (26,30) b_2631 = (26,31) b_2729 = (27,29) b_2929 = (29,29) b_2930 = (29,30) b_2931 = (29,31) b_3030 = (30,30) b_3031 = (30,31) b_3131 = (31,31) cmpr_op = (24,24) sf = (31,31) imm_neon_uimm1 = (20,20) imm_neon_uimm2 = (19,20) imm_neon_uimm3 = (18,20) imm_neon_uimm4 = (17,20) immN_neon_uimm1 = (14,14) immN_neon_uimm2 = (13,14) immN_neon_uimm3 = (12,14) immN_neon_uimm4 = (11,14) fpOpcode = (16,18) fpDpOpcode = (15,20) CRm_CRx = (8,11) CRm_32 = (10,11) CRm_10 = (8,9) CRm_dbarrier_op = (8,11) CRm_isb_op = (8,11) CRn = (12,15) CRm = (8,11) CRn_CRx = (12,15) Imm4 = (11,13) # C2.2.3 Modified immediate constants in A64 instructions page C2-158 Imm8_fmov_sign = (20,20) # a Imm8_fmov_exph = (19,19) # b Imm8_fmov_expl = (17,18) # cd Imm8_fmov_frac = (13,16) # efgh ImmN = (22,22) ImmR = (16,21) ImmS = (10,15) Imm_imm0_63 = (16,21) n_uimm8L = (5,9) n_uimm8H = (16,18) Imm_uimm3 = (16,18) Imm_uimm4 = (16,19) Imm_uimm5 = (16,20) Imm_uimm5_31 = (31,31) Imm_uimm6 = (31,31) L = (22,22) N = (21,21) Op0 = (19,20) Op1 = (16,18) Op1_uimm3 = (16,18) Op2 = (5,7) Op2_uimm3 = (5,7) Q = (30,30) S = (29,29) Scale = (10,15) excCode = (21,23) excCode2 = (2,4) imm7Low = (5,11) cmode = (12,15) imm4 = (11,14) imm5 = (5,9) l = (21,21) ll = (0,1) m = (31,31) mode = (19,20) n = (22,22) o0 = (4,4) o1 = (24,24) o2 = (10,10) o3 = (4,4) op = (30,30) fpccmp.op = (4,4) fpcmp.op = (14,15) op2 = (16,20) op3 = (10,15) op4 = (0,4) opc = (29,30) opc.indexmode = (10,11) op.dp3 = (29,30) op.dp3_o0 = (15,15) op.dp3_op31 = (21,23) op.dp3_op54 = (29,30) opcode2 = (10,15) dp1.opcode2 = (16,20) fpcmp.opcode2 = (0,4) opt = (22,23) option = (13,15) optionlo = (13,13) q = (30,30) rmode = (19,20) s = (29,29) size.ldstr = (30,31) shift = (22,23) advSIMD3.size = (22,23) size.neon = (10,11) size_high = (23,23) ftype = (22,23) u = (29,29) v = (26,26) # SVE tokens Zd = (0,4) Zt = (0,4) Ztt = (0,4) Zttt = (0,4) Ztttt = (0,4) Ze = (16,20) Zm = (16,20) Zn = (5,9) Zt2 = (10,14) sve_b_00 = (0,0) sve_b_0001 = (0,1) sve_b_01 = (1,1) sve_b_02 = (2,2) sve_b_03 = (3,3) sve_b_04 = (4,4) sve_b_0409 = (4,9) sve_b_0609 = (6,9) sve_b_09 = (9,9) sve_b_10 = (10,10) sve_b_1015 = (10,15) sve_b_1019 = (10,19) sve_b_1021 = (10,21) sve_b_11 = (11,11) sve_b_1112 = (11,12) sve_b_1115 = (11,15) sve_b_12 = (12,12) sve_b_1215 = (12,15) sve_b_13 = (13,13) sve_b_1315 = (13,15) sve_b_1321 = (13,21) sve_b_14 = (14,14) sve_b_1415 = (14,15) sve_b_1416 = (14,16) sve_b_1419 = (14,19) sve_b_1421 = (14,21) sve_b_15 = (15,15) sve_b_16 = (16,16) sve_b_17 = (17,17) sve_b_1718 = (17,18) sve_b_1719 = (17,19) sve_b_1720 = (17,20) sve_b_1721 = (17,21) sve_b_1731 = (17,31) sve_b_18 = (18,18) sve_b_1821 = (18,21) sve_b_1831 = (18,31) sve_b_1921 = (19,21) sve_b_20 = (20,20) sve_b_2021 = (20,21) sve_b_2022 = (20,22) sve_b_21 = (21,21) sve_b_2122 = (21,22) sve_b_2131 = (21,31) sve_b_22 = (22,22) sve_b_2224 = (22,24) sve_b_2231 = (22,31) sve_b_23 = (23,23) sve_b_2331 = (23,31) sve_b_24 = (24,24) sve_b_2429 = (24,29) sve_b_2431 = (24,31) sve_b_2531 = (25,31) sve_b_3031 = (30,31) sve_float_dec = (5,8) sve_float_exp = (9,11) sve_i1_05 = (5,5) sve_i1_20 = (20,20) sve_i2_1920 = (19,20) sve_i3h_22 = (22,22) sve_i3l_1920 = (19,20) sve_imm13_0517 = (5,17) sve_imm2_2223 = (22,23) sve_imm3_0507 = (5,7) sve_imm3_1618 = (16,18) sve_imm4_1619 = (16,19) sve_imm4s_1619 = (16,19) signed sve_imm5_0509 = (5,9) sve_imm5s_0509 = (5,9) signed sve_imm5_1620 = (16,20) sve_imm5s_1620 = (16,20) signed sve_imm5b_1620 = (16,20) signed sve_imm6_0510 = (5,10) sve_imm6s_0510 = (5,10) signed sve_imm6_1621 = (16,21) sve_imm6s_1621 = (16,21) signed sve_imm7_1420 = (14,20) sve_imm8_0512 = (5,12) sve_imm8s_0512 = (5,12) signed sve_imm8h_1620 = (16,20) sve_imm8l_1012 = (10,12) sve_imm9h_1621 = (16,21) sve_imm9hs_1621 = (16,21) signed sve_imm9l_1012 = (10,12) sve_m_04 = (4,4) sve_m_14 = (14,14) sve_m_16 = (16,16) sve_msz_1011 = (10,11) sve_pattern_0509 = (5,9) sve_pd_0003 = (0,3) sve_pdm_0003 = (0,3) sve_pdn_0003 = (0,3) sve_pg_0508 = (5,8) sve_pg_1012 = (10,12) sve_pg_1013 = (10,13) sve_pg_1619 = (16,19) sve_pm_1619 = (16,19) sve_pn_0508 = (5,8) sve_prfop_0003 = (0,3) sve_pt_0003 = (0,3) sve_rd_0004 = (0,4) sve_rdn_0004 = (0,4) sve_rm_0509 = (5,9) sve_rm_1620 = (16,20) sve_rn_0509 = (5,9) sve_rn_1620 = (16,20) sve_rot_1011 = (10,11) sve_rot_1314 = (13,14) sve_rot_16 = (16,16) sve_s_22 = (22,22) sve_sf_12 = (12,12) sve_sh_13 = (13,13) sve_size_2122 = (21,22) sve_size_2223 = (22,23) sve_sz_22 = (22,22) sve_tsz_1620 = (16,20) sve_tszh_2223 = (22,23) sve_tszl_0809 = (8,9) sve_tszl_1920 = (19,20) sve_vd_0004 = (0,4) sve_vdn_0004 = (0,4) sve_vm_0509 = (5,9) sve_vn_0509 = (5,9) sve_xs_14 = (14,14) sve_xs_22 = (22,22) sve_za_0509 = (5,9) sve_za_1620 = (16,20) sve_zd_0004 = (0,4) sve_zda_0004 = (0,4) sve_zdn_0004 = (0,4) sve_zm_0509 = (5,9) sve_zm_1618 = (16,18) sve_zm_1619 = (16,19) sve_zm_1620 = (16,20) sve_zn_0509 = (5,9) sve_zt_0004 = (0,4) sve_ztt_0004 = (0,4) sve_zttt_0004 = (0,4) sve_ztttt_0004 = (0,4) ; # SECTION variables and variable names attach variables [ Zd Ze Zm Zn Zt Zt2 ] [ z0 z1 z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 ]; attach variables [ Ztt ] [ z1 z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 z0 ]; attach variables [ Zttt ] [ z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 z0 z1 ]; attach variables [ Ztttt ] [ z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 z0 z1 z2 ]; attach variables [ aa_Xn aa_Xm aa_Xs aa_Xd aa_Xt aa_Xa ] [ x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 xzr ]; attach variables [ aa_Xss aa_Xtt ] [ x1 _ x3 _ x5 _ x7 _ x9 _ x11 _ x13 _ x15 _ x17 _ x19 _ x21 _ x23 _ x25 _ x27 _ x29 _ xzr _ ]; attach variables [ aa_Wn aa_Wm aa_Ws aa_Wd aa_Wt aa_Wa ] [ w0 w1 w2 w3 w4 w5 w6 w7 w8 w9 w10 w11 w12 w13 w14 w15 w16 w17 w18 w19 w20 w21 w22 w23 w24 w25 w26 w27 w28 w29 w30 wzr ]; attach variables [ aa_Wss aa_Wtt ] [ w1 _ w3 _ w5 _ w7 _ w9 _ w11 _ w13 _ w15 _ w17 _ w19 _ w21 _ w23 _ w25 _ w27 _ w29 _ wzr _ ]; attach variables [ Rm_VPR128 Rn_VPR128 Rd_VPR128 Rt_VPR128 Rt2_FPR128 Re_VPR128 Rt_FPR128 Ra_VPR128 ] [ q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 ]; attach variables [ Rnn_VPR128 Rtt_VPR128 ] [ q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 q0 ]; attach variables [ Rnnn_VPR128 Rttt_VPR128 ] [ q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 q0 q1 ]; attach variables [ Rnnnn_VPR128 Rtttt_VPR128 ] [ q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 q0 q1 q2 ]; attach names [ vRm_VPR128 vRn_VPR128 vRd_VPR128 vRe_VPR128 vRa_VPR128 ] [ v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 ]; attach names [ vRnn_VPR128 ] [ v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 v0 ]; attach names [ vRnnn_VPR128 ] [ v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 v0 v1 ]; attach names [ vRnnnn_VPR128 ] [ v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 v0 v1 v2 ]; attach variables [ Rm_VPR128Lo Re_VPR128Lo ] [ q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 ]; attach names [ vRm_VPR128Lo vRe_VPR128Lo ] [ v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 ]; attach variables [ Rm_VPR64 Rn_VPR64 Rd_VPR64 Rt_VPR64 ] [ d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 ]; attach variables [ Rtt_VPR64 ] [ d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 d0 ]; attach variables [ Rttt_VPR64 ] [ d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 d0 d1 ]; attach variables [ Rtttt_VPR64 ] [ d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 d0 d1 d2 ]; attach names [ vRm_VPR64 vRn_VPR64 vRd_VPR64 ] [ v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 ]; attach variables [ Rm_FPR64 Rn_FPR64 Rd_FPR64 Rd_FPR64_2 Rt2_FPR64 Ra_FPR64 Rt_FPR64 ] [ d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 ]; attach variables [ Rm_FPR32 Rn_FPR32 Rd_FPR32 Rd_FPR32_2 Rt2_FPR32 Ra_FPR32 Rt_FPR32 ] [ s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 s16 s17 s18 s19 s20 s21 s22 s23 s24 s25 s26 s27 s28 s29 s30 s31 ]; attach variables [ Rm_FPR16 Rn_FPR16 Rd_FPR16 Rd_FPR16_2 Rt_FPR16 Ra_FPR16 ] [ h0 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 h12 h13 h14 h15 h16 h17 h18 h19 h20 h21 h22 h23 h24 h25 h26 h27 h28 h29 h30 h31 ]; attach variables [ Rm_FPR8 Rn_FPR8 Rd_FPR8 Rd_FPR8_2 Rt_FPR8 ] [ b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 b16 b17 b18 b19 b20 b21 b22 b23 b24 b25 b26 b27 b28 b29 b30 b31 ]; attach variables [ Vt ] [ q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 ]; attach variables [ Vtt ] [ q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 q0 ]; attach variables [ Vttt ] [ q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 q0 q1 ]; attach variables [ Vtttt ] [ q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 q16 q17 q18 q19 q20 q21 q22 q23 q24 q25 q26 q27 q28 q29 q30 q31 q0 q1 q2 ]; attach names [ vVt ] [ v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 ]; attach names [ vVtt ] [ v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 v0 ]; attach names [ vVttt ] [ v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 v0 v1 ]; attach names [ vVtttt ] [ v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 v0 v1 v2 ]; attach names [ aa_prefetch ] [ PLDL1KEEP PLDL1STRM PLDL2KEEP PLDL2STRM PLDL3KEEP PLDL3STRM P_0x06 P_0x07 PLIL1KEEP PLIL1STRM PLIL2KEEP PLIL2STRM PLIL3KEEP PLIL3STRM P_0x0e P_0x0f PSTL1KEEP PSTL1STRM PSTL2KEEP PSTL2STRM PSTL3KEEP PSTL3STRM P_0x16 P_0x17 P_0x18 P_0x19 P_0x1a P_0x1b P_0x1c P_0x1d P_0x1e P_0x1f ]; attach names [ CRm_dbarrier_op ] [ _ OSHLD OSHST OSH _ NSHLD NSHST NSH _ ISHLD ISHST ISH _ LD ST SY ]; # SVE registers and names attach variables [ sve_zm_1618 ] [ z0 z1 z2 z3 z4 z5 z6 z7 ]; attach variables [ sve_zm_1619 ] [ z0 z1 z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 ]; attach variables [ sve_za_0509 sve_za_1620 sve_zd_0004 sve_zda_0004 sve_zdn_0004 sve_zm_0509 sve_zm_1620 sve_zn_0509 sve_zt_0004 ] [ z0 z1 z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 ]; attach variables [ sve_ztt_0004 ] [ z1 z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 z0 ]; attach variables [ sve_zttt_0004 ] [ z2 z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 z0 z1 ]; attach variables [ sve_ztttt_0004 ] [ z3 z4 z5 z6 z7 z8 z9 z10 z11 z12 z13 z14 z15 z16 z17 z18 z19 z20 z21 z22 z23 z24 z25 z26 z27 z28 z29 z30 z31 z0 z1 z2 ]; attach variables [ sve_pg_1012 ] [ p0 p1 p2 p3 p4 p5 p6 p7 ]; attach variables [ sve_pd_0003 sve_pdm_0003 sve_pdn_0003 sve_pg_0508 sve_pg_1013 sve_pg_1619 sve_pm_1619 sve_pn_0508 sve_pt_0003 ] [ p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 ]; attach names [ sve_sz_22 ] [ b h ]; attach names [ sve_msz_1011 ] [ "" " #1" " #2" " #3" ]; attach names [ sve_rot_16 ] [ "#90" "#270" ]; attach names [ sve_rot_1314 ] [ "#0" "#90" "#180" "#270" ]; attach names [ sve_rot_1011 ] [ "#0" "#90" "#180" "#270" ]; # SECTION subtables Rm_GPR32: aa_Wm is aa_Wm { export aa_Wm; } Rm_GPR32: wzr is aa_Wm=31 & wzr { tmp:4 = 0; export tmp; } Rd_GPR32: aa_Wd is aa_Wd { export aa_Wd; } Rd_GPR32: wzr is aa_Wd=31 & wzr { tmp:4 = 0; export tmp; } Rd_GPR32_2: aa_Wd is aa_Wd { export aa_Wd; } Rd_GPR32_2: wzr is aa_Wd=31 & wzr { tmp:4 = 0; export tmp; } Rd_GPR32xsp: aa_Wd is aa_Wd { export aa_Wd; } Rd_GPR32xsp: wsp is aa_Wd=31 & wsp { export wsp; } Rd_GPR32wsp: Rd_GPR32xsp is Rd_GPR32xsp { export Rd_GPR32xsp; } Rn_GPR32: aa_Wn is aa_Wn { export aa_Wn; } Rn_GPR32: wzr is aa_Wn=31 & wzr { tmp:4 = 0; export tmp; } Ra_GPR32: aa_Wa is aa_Wa { export aa_Wa; } Ra_GPR32: wzr is aa_Wa=31 & wzr { tmp:4 = 0; export tmp; } Rt2_GPR32: aa_Wa is aa_Wa { export aa_Wa; } Rt2_GPR32: wzr is aa_Wa=31 & wzr { tmp:4 = 0; export tmp; } Rn_GPR32xsp: aa_Wn is aa_Wn { export aa_Wn; } Rn_GPR32xsp: wsp is aa_Wn=31 & wsp { export wsp; } Rn_GPR32wsp: aa_Wn is aa_Wn { export aa_Wn; } Rn_GPR32wsp: wsp is aa_Wn=31 & wsp { export wsp; } Rt_GPR32: aa_Wt is aa_Wt { export aa_Wt; } Rt_GPR32: wzr is aa_Wt=31 & wzr { tmp:4 = 0; export tmp; } Rm_GPR64: aa_Xm is aa_Xm { export aa_Xm; } Rm_GPR64: xzr is aa_Xm=31 & xzr { export 0:8; } Rd_GPR64: aa_Xd is aa_Xd { export aa_Xd; } Rd_GPR64: xzr is aa_Xd=31 & xzr { tmp:8 = 0; export tmp; } Rd_GPR64_2: aa_Xd is aa_Xd { export aa_Xd; } Rd_GPR64_2: xzr is aa_Xd=31 & xzr { tmp:8 = 0; export tmp; } Ra_GPR64: aa_Xa is aa_Xa { export aa_Xa; } Ra_GPR64: xzr is aa_Xa=31 & xzr { tmp:8 = 0; export tmp; } Rt2_GPR64: aa_Xa is aa_Xa { export aa_Xa; } Rt2_GPR64: xzr is aa_Xa=31 & xzr { tmp:8 = 0; export tmp; } Rd_GPR64xsp: aa_Xd is aa_Xd { export aa_Xd; } Rd_GPR64xsp: sp is aa_Xd=31 & sp { export sp; } Rn_GPR64: aa_Xn is aa_Xn { export aa_Xn; } Rn_GPR64: xzr is aa_Xn=31 & xzr { tmp:8 = 0; export tmp; } Rt_GPR64: aa_Xt is aa_Xt { export aa_Xt; } Rt_GPR64: xzr is aa_Xt=31 & xzr { tmp:8 = 0; export tmp; } Rn_GPR64xsp: aa_Xn is aa_Xn { export aa_Xn; } Rn_GPR64xsp: sp is aa_Xn=31 & sp { export sp; } Rm_GPR64xsp: aa_Xm is aa_Xm { export aa_Xm; } Rm_GPR64xsp: sp is aa_Xm=31 & sp { export sp; } Rt_GPR64xsp: aa_Xt is aa_Xt { export aa_Xt; } Rt_GPR64xsp: sp is aa_Xt=31 & sp { export sp; } Rs_GPR32: Rm_GPR32 is Rm_GPR32 { export Rm_GPR32; } Rs_GPR64: Rm_GPR64 is Rm_GPR64 { export Rm_GPR64; } Rm_fpz16: "#0.0" is Rm { tmp:2 = int2float(0:2); export tmp; } Rm_fpz32: "#0.0" is Rm { tmp:4 = int2float(0:4); export tmp; } Rm_fpz64: "#0.0" is Rm { tmp:8 = int2float(0:8); export tmp; } Rd_VPR128.16B: vRd_VPR128^".16B" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } Rd_VPR128.8H: vRd_VPR128^".8H" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } Rd_VPR128.4S: vRd_VPR128^".4S" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } Rd_VPR128.2S: vRd_VPR128^".2S" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } Rd_VPR128.2D: vRd_VPR128^".2D" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } Rd_VPR128.1Q: vRd_VPR128^".1Q" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } Rn_VPR128.16B: vRn_VPR128^".16B" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } Rnn_VPR128.16B: vRnn_VPR128^".16B" is Rnn_VPR128 & vRnn_VPR128 { export Rnn_VPR128; } Rnnn_VPR128.16B: vRnnn_VPR128^".16B" is Rnnn_VPR128 & vRnnn_VPR128 { export Rnnn_VPR128; } Rnnnn_VPR128.16B: vRnnnn_VPR128^".16B" is Rnnnn_VPR128 & vRnnnn_VPR128 { export Rnnnn_VPR128; } Rn_VPR128.8B: vRn_VPR128^".8B" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } Rn_VPR128.8H: vRn_VPR128^".8H" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } Rn_VPR128.4S: vRn_VPR128^".4S" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } Rn_VPR128.4H: vRn_VPR128^".4H" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } Rn_VPR128.2D: vRn_VPR128^".2D" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } Rm_VPR128.8B: vRm_VPR128^".8B" is Rm_VPR128 & vRm_VPR128 { export Rm_VPR128; } Rm_VPR128.16B: vRm_VPR128^".16B" is Rm_VPR128 & vRm_VPR128 { export Rm_VPR128; } Rm_VPR128.8H: vRm_VPR128^".8H" is Rm_VPR128 & vRm_VPR128 { export Rm_VPR128; } Rm_VPR128.4S: vRm_VPR128^".4S" is Rm_VPR128 & vRm_VPR128 { export Rm_VPR128; } Rm_VPR128.4H: vRm_VPR128^".4H" is Rm_VPR128 & vRm_VPR128 { export Rm_VPR128; } Rm_VPR128.2D: vRm_VPR128^".2D" is Rm_VPR128 & vRm_VPR128 { export Rm_VPR128; } Ra_VPR128.16B: vRa_VPR128^".16B" is Ra_VPR128 & vRa_VPR128 { export Ra_VPR128; } # Ra_VPR128.8H: vRa_VPR128^".8H" is Ra_VPR128 & vRa_VPR128 { export Ra_VPR128; } Ra_VPR128.4S: vRa_VPR128^".4S" is Ra_VPR128 & vRa_VPR128 { export Ra_VPR128; } # Ra_VPR128.2D: vRa_VPR128^".2D" is Ra_VPR128 & vRa_VPR128 { export Ra_VPR128; } # Ra_VPR128.1Q: vRa_VPR128^".1Q" is Ra_VPR128 & vRa_VPR128 { export Ra_VPR128; } Rd_VPR64.8B: vRd_VPR64^".8B" is Rd_VPR64 & vRd_VPR64 { export Rd_VPR64; } Rd_VPR64.4H: vRd_VPR64^".4H" is Rd_VPR64 & vRd_VPR64 { export Rd_VPR64; } Rd_VPR64.2S: vRd_VPR64^".2S" is Rd_VPR64 & vRd_VPR64 { export Rd_VPR64; } Rd_VPR64.1D: vRd_VPR64^".1D" is Rd_VPR64 & vRd_VPR64 { export Rd_VPR64; } Rn_VPR64.8B: vRn_VPR64^".8B" is Rn_VPR64 & vRn_VPR64 { export Rn_VPR64; } Rn_VPR64.4H: vRn_VPR64^".4H" is Rn_VPR64 & vRn_VPR64 { export Rn_VPR64; } Rn_VPR64.2S: vRn_VPR64^".2S" is Rn_VPR64 & vRn_VPR64 { export Rn_VPR64; } Rn_VPR64.1D: vRn_VPR64^".1D" is Rn_VPR64 & vRn_VPR64 { export Rn_VPR64; } Rm_VPR64.8B: vRm_VPR64^".8B" is Rm_VPR64 & vRm_VPR64 { export Rm_VPR64; } Rm_VPR64.4H: vRm_VPR64^".4H" is Rm_VPR64 & vRm_VPR64 { export Rm_VPR64; } Rm_VPR64.2S: vRm_VPR64^".2S" is Rm_VPR64 & vRm_VPR64 { export Rm_VPR64; } Rm_VPR64.1D: vRm_VPR64^".1D" is Rm_VPR64 & vRm_VPR64 { export Rm_VPR64; } Rd_VPR128.B: vRd_VPR128^".B" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } Rd_VPR128.H: vRd_VPR128^".H" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } Rd_VPR128.S: vRd_VPR128^".S" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } Rd_VPR128.D: vRd_VPR128^".D" is Rd_VPR128 & vRd_VPR128 { export Rd_VPR128; } Rn_VPR128.B: vRn_VPR128^".B" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } Rn_VPR128.H: vRn_VPR128^".H" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } Rn_VPR128.S: vRn_VPR128^".S" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } Rn_VPR128.D: vRn_VPR128^".D" is Rn_VPR128 & vRn_VPR128 { export Rn_VPR128; } Re_VPR128.B: vRe_VPR128^".B" is Re_VPR128 & vRe_VPR128 { export Re_VPR128; } Re_VPR128.H: vRe_VPR128^".H" is Re_VPR128 & vRe_VPR128 { export Re_VPR128; } Re_VPR128.S: vRe_VPR128^".S" is Re_VPR128 & vRe_VPR128 { export Re_VPR128; } Re_VPR128.D: vRe_VPR128^".D" is Re_VPR128 & vRe_VPR128 { export Re_VPR128; } Re_VPR128Lo.H: vRe_VPR128Lo^".H" is Re_VPR128Lo & vRe_VPR128Lo { export Re_VPR128Lo; } br_cc_op: "eq" is br_cond_op=0 { export ZR; } br_cc_op: "ne" is br_cond_op=1 { tmp:1 = !ZR; export tmp; } br_cc_op: "cs" is br_cond_op=2 { export CY; } br_cc_op: "cc" is br_cond_op=3 { tmp:1 = !CY; export tmp; } br_cc_op: "mi" is br_cond_op=4 { export NG; } br_cc_op: "pl" is br_cond_op=5 { tmp:1 = !NG; export tmp; } br_cc_op: "vs" is br_cond_op=6 { export OV; } br_cc_op: "vc" is br_cond_op=7 { tmp:1 = !OV; export tmp; } br_cc_op: "hi" is br_cond_op=8 { tmp:1 = CY && (!ZR); export tmp; } br_cc_op: "ls" is br_cond_op=9 { tmp:1 = (!CY) || ZR; export tmp; } br_cc_op: "ge" is br_cond_op=10 { tmp:1 = (NG==OV); export tmp; } br_cc_op: "lt" is br_cond_op=11 { tmp:1 = (NG!=OV); export tmp; } br_cc_op: "gt" is br_cond_op=12 { tmp:1 = (!ZR) && (NG==OV); export tmp; } br_cc_op: "le" is br_cond_op=13 { tmp:1 = ZR || (NG!=OV); export tmp; } br_cc_op: "al" is br_cond_op=14 { export 1:1; } br_cc_op: "nv" is br_cond_op=15 { export 1:1; } BranchCondOp: br_cc_op is br_cc_op { export br_cc_op; } cc_op: "eq" is cond_op=0 { export ZR; } cc_op: "ne" is cond_op=1 { tmp:1 = !ZR; export tmp; } cc_op: "cs" is cond_op=2 { export CY; } cc_op: "cc" is cond_op=3 { tmp:1 = !CY; export tmp; } cc_op: "mi" is cond_op=4 { export NG; } cc_op: "pl" is cond_op=5 { tmp:1 = !NG; export tmp; } cc_op: "vs" is cond_op=6 { export OV; } cc_op: "vc" is cond_op=7 { tmp:1 = !OV; export tmp; } cc_op: "hi" is cond_op=8 { tmp:1 = CY && (!ZR); export tmp; } cc_op: "ls" is cond_op=9 { tmp:1 = (!CY) || ZR; export tmp; } cc_op: "ge" is cond_op=10 { tmp:1 = (NG==OV); export tmp; } cc_op: "lt" is cond_op=11 { tmp:1 = (NG!=OV); export tmp; } cc_op: "gt" is cond_op=12 { tmp:1 = (!ZR) && (NG==OV); export tmp; } cc_op: "le" is cond_op=13 { tmp:1 = ZR || (NG!=OV); export tmp; } cc_op: "al" is cond_op=14 { export 1:1; } cc_op: "nv" is cond_op=15 { export 1:1; } CondOp: cc_op is cc_op { export cc_op; } inv_cc_op: "eq" is cond_op=1 { export ZR; } inv_cc_op: "ne" is cond_op=0 { tmp:1 = !ZR; export tmp; } inv_cc_op: "cs" is cond_op=3 { export CY; } inv_cc_op: "cc" is cond_op=2 { tmp:1 = !CY; export tmp; } inv_cc_op: "mi" is cond_op=5 { export NG; } inv_cc_op: "pl" is cond_op=4 { tmp:1 = !NG; export tmp; } inv_cc_op: "vs" is cond_op=7 { export OV; } inv_cc_op: "vc" is cond_op=6 { tmp:1 = !OV; export tmp; } inv_cc_op: "hi" is cond_op=9 { tmp:1 = CY && (!ZR); export tmp; } inv_cc_op: "ls" is cond_op=8 { tmp:1 = (!CY) || ZR; export tmp; } inv_cc_op: "ge" is cond_op=11 { tmp:1 = (NG==OV); export tmp; } inv_cc_op: "lt" is cond_op=10 { tmp:1 = (NG!=OV); export tmp; } inv_cc_op: "gt" is cond_op=13 { tmp:1 = (!ZR) && (NG==OV); export tmp; } inv_cc_op: "le" is cond_op=12 { tmp:1 = ZR || (NG!=OV); export tmp; } inv_cc_op: "al" is cond_op=15 { export 1:1; } inv_cc_op: "nv" is cond_op=14 { export 1:1; } InvCondOp: inv_cc_op is inv_cc_op { export inv_cc_op; } SBIT_CZNO: is b_29=0 { } # Do nothing to the flag bits SBIT_CZNO: "s" is b_29=1 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; } Imm_uimm_exact8: "#"^value is aa_extreg_shift [ value = 8 << aa_extreg_shift; ] { export *[const]:4 value; } Imm_uimm_exact16: "#"^value is aa_extreg_shift [ value = 8 << aa_extreg_shift; ] { export *[const]:4 value; } Imm_uimm_exact32: "#"^value is aa_extreg_shift [ value = 8 << aa_extreg_shift; ] { export *[const]:4 value; } Imm_shr_imm8: "#"^val is b_1922 & b_1618 [ val = (8*2) - (b_1922 << 3 | b_1618); ] { export *[const]:4 val; } Imm_shr_imm16: "#"^val is b_1922 & b_1618 [ val = (16*2) - (b_1922 << 3 | b_1618); ] { export *[const]:4 val; } Imm_shr_imm32: "#"^val is b_1922 & b_1618 [ val = (32*2) - (b_1922 << 3 | b_1618); ] { export *[const]:4 val; } Imm_shr_imm64: "#"^val is b_1922 & b_1618 [ val = (64*2) - (b_1922 << 3 | b_1618); ] { export *[const]:4 val; } NZCVImm_uimm4: "#"^b_0003 is b_0003 { export *[const]:1 b_0003; } UImm5: "#"^b_1620 is b_1620 { export *[const]:4 b_1620; } UImm6: "#"^b_1520 is b_1520 { export *[const]:4 b_1520; } CRm_uimm4: "#"^b_0811 is b_0811 { export *[const]:1 b_0811; } CRm_uimm4_def15: "#"^b_0811 is b_0811 { export *[const]:1 b_0811; } CRm_uimm4_def15: is b_0811=0xf { export 15:1; } LSB_bitfield32_imm: "#"^imm6 is b_1515=0 & imm6 { export *[const]:8 imm6; } LSB_bitfield64_imm: "#"^imm6 is imm6 { export *[const]:8 imm6; } LSB_bitfield32_imm_shift: "#"^shift is b_1515=0 & imm6 [ shift = 31 - imm6; ] { export *[const]:4 shift; } LSB_bitfield64_imm_shift: "#"^shift is imm6 [ shift = 63 - imm6; ] { export *[const]:8 shift; } AddrLoc14: reloc is simm14 [ reloc = inst_start + (4*simm14); ] { export *[const]:8 reloc; } AddrLoc19: reloc is simm19 [ reloc = inst_start + (4*simm19); ] { export *[const]:8 reloc; } AddrLoc26: reloc is simm26 [ reloc = inst_start + (4*simm26); ] { export *[const]:8 reloc; } Addr14: AddrLoc14 is AddrLoc14 { export *:8 AddrLoc14; } Addr19: AddrLoc19 is AddrLoc19 { export *:8 AddrLoc19; } Addr26: AddrLoc26 is AddrLoc26 { export *:8 AddrLoc26; } AdrReloff: reloff is b_31=1 & immlo & immhi [ reloff = ((inst_start) & ~0xfff) + ( ((immhi << 2) | immlo) << 12 ); ] { export *[const]:8 reloff; } AdrReloff: reloff is b_31=0 & immlo & immhi [ reloff = (inst_start) + ( ((immhi << 2) | immlo) ); ] { export *[const]:8 reloff; } ImmShift32: "#"^imm12 is aa_extreg_shift=0 & imm12 { export *[const]:4 imm12; } ImmShift32: "#"^imm12, "LSL #12" is aa_extreg_shift=1 & imm12 { tmp:4 = imm12 << 12; export tmp; } ImmShift64: "#"^imm12 is aa_extreg_shift=0 & imm12 { export *[const]:8 imm12; } ImmShift64: "#"^imm12, "LSL #12" is aa_extreg_shift=1 & imm12 { tmp:8 = imm12 << 12; export tmp; } # TODO some instructions can't do ROR operation on immediate! RegShift32: Rm_GPR32, "LSL #"^imm6 is Rm_GPR32 & aa_extreg_shift = 0 & imm6 & b_1515=0 { tmp:4 = Rm_GPR32 << imm6; export tmp; } RegShift32: Rm_GPR32 is Rm_GPR32 & aa_extreg_shift = 0 & imm6=0 { export Rm_GPR32; } RegShift32: Rm_GPR32, "LSR #"^imm6 is Rm_GPR32 & aa_extreg_shift = 1 & imm6 & b_1515=0 { tmp:4 = Rm_GPR32 >> imm6; export tmp; } RegShift32: Rm_GPR32, "ASR #"^imm6 is Rm_GPR32 & aa_extreg_shift = 2 & imm6 & b_1515=0 { tmp:4 = Rm_GPR32 s>> imm6; export tmp; } RegShift32Log: RegShift32 is aa_extreg_shift & RegShift32 { export RegShift32; } RegShift32Log: Rm_GPR32, "ROR #"^imm6 is aa_extreg_shift=3 & Rm_GPR32 & imm6 & b_1515=0 { tmp:4 = (Rm_GPR32 >> imm6) | (Rm_GPR32 << (32 - imm6)); export tmp; } RegShift64: Rm_GPR64, "LSL #"^imm6 is Rm_GPR64 & aa_extreg_shift = 0 & imm6 { tmp:8 = Rm_GPR64 << imm6; export tmp; } RegShift64: Rm_GPR64 is Rm_GPR64 & aa_extreg_shift = 0 & imm6=0 { export Rm_GPR64; } RegShift64: Rm_GPR64, "LSR #"^imm6 is Rm_GPR64 & aa_extreg_shift = 1 & imm6 { tmp:8 = Rm_GPR64 >> imm6; export tmp; } RegShift64: Rm_GPR64, "ASR #"^imm6 is Rm_GPR64 & aa_extreg_shift = 2 & imm6 { tmp:8 = Rm_GPR64 s>> imm6; export tmp; } RegShift64Log: RegShift64 is aa_extreg_shift & RegShift64 & aa_Xn & aa_Xm & imm6 { export RegShift64; } RegShift64Log: Rm_GPR64, "ROR #"^imm6 is aa_extreg_shift=3 & Rm_GPR64 & aa_Xn & aa_Xm & imm6 { tmp:8 = (Rm_GPR64 >> imm6) | (Rm_GPR64 << (64 - imm6)); export tmp; } RegShiftVal32: " #"^b_1012 is aa_extreg_imm3=1 & b_1012 { export 1:4; } RegShiftVal32: " #"^b_1012 is aa_extreg_imm3=2 & b_1012 { export 2:4; } RegShiftVal32: " #"^b_1012 is aa_extreg_imm3=3 & b_1012 { export 3:4; } RegShiftVal32: " #"^b_1012 is aa_extreg_imm3=4 & b_1012 { export 4:4; } RegShiftVal32: "" is aa_extreg_imm3=0 { export 0:4; } LSL_Sp_Special32: Rm_GPR32, "LSL " is Rm_GPR32 & aa_extreg_imm3 { export Rm_GPR32; } LSL_Sp_Special32: Rm_GPR32 is Rm_GPR32 & aa_extreg_imm3=0 { export Rm_GPR32; } ExtendReg32: Rm_GPR32, "UXTB " is Rm_GPR32 & b_2121=1 & aa_extreg_option=0 { tmp0:4 = Rm_GPR32; tmp:4 = zext(tmp0:1); export tmp; } ExtendReg32: Rm_GPR32, "UXTH " is Rm_GPR32 & b_2121=1 & aa_extreg_option=1 { tmp0:4 = Rm_GPR32; tmp:4 = zext(tmp0:2); export tmp; } ExtendReg32: LSL_Sp_Special32 is Rm_GPR32 & b_2121=1 & aa_extreg_option=2 & b_29=1 & (Rn=0x1f) & LSL_Sp_Special32 { export Rm_GPR32; } ExtendReg32: LSL_Sp_Special32 is Rm_GPR32 & b_2121=1 & aa_extreg_option=2 & b_29=0 & (Rd=0x1f | Rn=0x1f) & LSL_Sp_Special32 { export Rm_GPR32; } ExtendReg32: Rm_GPR32, "UXTW " is Rm_GPR32 & b_2121=1 & aa_extreg_option=2 { tmp:4 = Rm_GPR32; export tmp; } ExtendReg32: Rm_GPR32, "UXTX " is Rm_GPR32 & b_2121=1 & aa_extreg_option=3 { tmp:4 = Rm_GPR32; export tmp; } ExtendReg32: Rm_GPR32, "SXTB " is Rm_GPR32 & b_2121=1 & aa_extreg_option=4 { tmp0:4 = Rm_GPR32; tmp:4 = sext(tmp0:1); export tmp; } ExtendReg32: Rm_GPR32, "SXTH " is Rm_GPR32 & b_2121=1 & aa_extreg_option=5 { tmp0:4 = Rm_GPR32; tmp:4 = sext(tmp0:2); export tmp; } ExtendReg32: Rm_GPR32, "SXTW " is Rm_GPR32 & b_2121=1 & aa_extreg_option=6 { tmp:4 = Rm_GPR32; export tmp; } ExtendReg32: Rm_GPR32, "SXTX " is Rm_GPR32 & b_2121=1 & aa_extreg_option=7 { tmp:4 = Rm_GPR32; export tmp; } ExtendRegShift32: ExtendReg32^RegShiftVal32 is aa_extreg_shift = 0 & aa_extreg_option & aa_extreg_imm3 & ExtendReg32 & RegShiftVal32 { tmp:4 = ExtendReg32; tmp = tmp << RegShiftVal32; export tmp; } ExtendRegShift32: ExtendReg32 is Rm_GPR32 & aa_extreg_shift = 0 & aa_extreg_option=2 & aa_extreg_imm3=0 & ExtendReg32 & RegShiftVal32 { export Rm_GPR32; } ExtendRegShift32: ExtendReg32 is Rm_GPR32 & aa_extreg_shift = 0 & aa_extreg_option=3 & aa_extreg_imm3=0 & ExtendReg32 & RegShiftVal32 { export Rm_GPR32; } ExtendRegShift32: ExtendReg32 is Rm_GPR32 & aa_extreg_shift = 0 & aa_extreg_option=6 & aa_extreg_imm3=0 & ExtendReg32 & RegShiftVal32 { export Rm_GPR32; } ExtendRegShift32: ExtendReg32 is Rm_GPR32 & aa_extreg_shift = 0 & aa_extreg_option=7 & aa_extreg_imm3=0 & ExtendReg32 & RegShiftVal32 { export Rm_GPR32; } Imm12_addsubimm_operand_i32_negimm_lsl0: ImmShift32 is ImmShift32 { export ImmShift32; } Imm12_addsubimm_operand_i32_negimm_lsl12: ImmShift32 is ImmShift32 { export ImmShift32; } Imm12_addsubimm_operand_i32_posimm_lsl0: ImmShift32 is ImmShift32 { export ImmShift32; } Imm12_addsubimm_operand_i32_posimm_lsl12: ImmShift32 is ImmShift32 { export ImmShift32; } Imm12_addsubimm_operand_i64_negimm_lsl0: ImmShift64 is ImmShift64 { export ImmShift64; } Imm12_addsubimm_operand_i64_negimm_lsl12: ImmShift64 is ImmShift64 { export ImmShift64; } Imm12_addsubimm_operand_i64_posimm_lsl0: ImmShift64 is ImmShift64 { export ImmShift64; } Imm12_addsubimm_operand_i64_posimm_lsl12: ImmShift64 is ImmShift64 { export ImmShift64; } RegShiftVal64: " #"^b_1012 is aa_extreg_imm3=1 & b_1012 { export 1:8; } RegShiftVal64: " #"^b_1012 is aa_extreg_imm3=2 & b_1012 { export 2:8; } RegShiftVal64: " #"^b_1012 is aa_extreg_imm3=3 & b_1012 { export 3:8; } RegShiftVal64: " #"^b_1012 is aa_extreg_imm3=4 & b_1012 { export 4:8; } RegShiftVal64: "" is aa_extreg_imm3=0 { export 0:8; } LSL_Sp_Special64: Rm_GPR64, "LSL " is Rm_GPR64 & aa_extreg_imm3 { export Rm_GPR64; } LSL_Sp_Special64: Rm_GPR64 is Rm_GPR64 & aa_extreg_imm3=0 { export Rm_GPR64; } ExtendReg64: Rm_GPR32, "UXTB " is Rm_GPR32 & b_2121=1 & aa_extreg_option=0 { tmp0:4 = Rm_GPR32; tmp:8 = zext(tmp0:1); export tmp; } ExtendReg64: Rm_GPR32, "UXTH " is Rm_GPR32 & b_2121=1 & aa_extreg_option=1 { tmp0:4 = Rm_GPR32; tmp:8 = zext(tmp0:2); export tmp; } ExtendReg64: Rm_GPR32, "UXTW " is Rm_GPR32 & b_2121=1 & aa_extreg_option=2 { tmp:8 = zext(Rm_GPR32); export tmp; } ExtendReg64: LSL_Sp_Special64 is Rm_GPR64 & b_2121=1 & aa_extreg_option=3 & b_29=1 & b_25=1 & (Rn=0x1f) & LSL_Sp_Special64 { tmp:8 = Rm_GPR64; export tmp; } ExtendReg64: LSL_Sp_Special64 is Rm_GPR64 & b_2121=1 & aa_extreg_option=3 & b_29=0 & b_25=1 & (Rd=0x1f | Rn=0x1f) & LSL_Sp_Special64 { tmp:8 = Rm_GPR64; export tmp; } ExtendReg64: Rm_GPR64, "LSL " is Rm_GPR64 & b_2121=1 & aa_extreg_option=3 & b_29 & b_25=0 { tmp:8 = Rm_GPR64; export tmp; } ExtendReg64: Rm_GPR64, "UXTX " is Rm_GPR64 & b_2121=1 & aa_extreg_option=3 { tmp:8 = Rm_GPR64; export tmp; } ExtendReg64: Rm_GPR32, "SXTB " is Rm_GPR32 & b_2121=1 & aa_extreg_option=4 { tmp0:4 = Rm_GPR32; tmp:8 = sext(tmp0:1); export tmp; } ExtendReg64: Rm_GPR32, "SXTH " is Rm_GPR32 & b_2121=1 & aa_extreg_option=5 { tmp0:4 = Rm_GPR32; tmp:8 = sext(tmp0:2); export tmp; } ExtendReg64: Rm_GPR32, "SXTW " is Rm_GPR32 & b_2121=1 & aa_extreg_option=6 { tmp:8 = sext(Rm_GPR32); export tmp; } ExtendReg64: Rm_GPR64, "SXTX " is Rm_GPR64 & b_2121=1 & aa_extreg_option=7 { tmp:8 = Rm_GPR64; export tmp; } ExtendRegShift64: ExtendReg64^RegShiftVal64 is aa_extreg_shift = 0 & aa_extreg_option & aa_extreg_imm3 & ExtendReg64 & RegShiftVal64 { build ExtendReg64; build RegShiftVal64; tmp:8 = ExtendReg64; tmp = tmp << RegShiftVal64; export tmp; } ExtendRegShift64: ExtendReg64 is Rm_GPR64 & aa_extreg_shift = 0 & aa_extreg_option=3 & aa_extreg_imm3=0 & ExtendReg64 & RegShiftVal64 { export Rm_GPR64; } ExtendRegShift64: ExtendReg64 is Rm_GPR64 & aa_extreg_shift = 0 & aa_extreg_option=7 & aa_extreg_imm3=0 & ExtendReg64 & RegShiftVal64 { export Rm_GPR64; } UnscPriv: "u" is b_1011=0 { } UnscPriv: "t" is b_1011=2 { } # Simple register load or store addrReg: "["^Rn_GPR64xsp^"]" is Rn_GPR64xsp { export Rn_GPR64xsp; } # Scaled Offset addrUIMM: "["^Rn_GPR64xsp, "#"^pimm^"]" is size.ldstr=0 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12 [ pimm = imm12 << 0; ] { tmp:8 = Rn_GPR64xsp + pimm; export tmp; } addrUIMM: "["^Rn_GPR64xsp, "#"^pimm^"]" is size.ldstr=1 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12 [ pimm = imm12 << 1; ] { tmp:8 = Rn_GPR64xsp + pimm; export tmp; } addrUIMM: "["^Rn_GPR64xsp, "#"^pimm^"]" is size.ldstr=2 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12 [ pimm = imm12 << 2; ] { tmp:8 = Rn_GPR64xsp + pimm; export tmp; } addrUIMM: "["^Rn_GPR64xsp, "#"^pimm^"]" is size.ldstr=3 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12 [ pimm = imm12 << 3; ] { tmp:8 = Rn_GPR64xsp + pimm; export tmp; } addrUIMM: "["^Rn_GPR64xsp, "#"^pimm^"]" is size.ldstr=0 & b_2729=7 & v=1 & b_2425=1 & b_2323=1 & Rn_GPR64xsp & imm12 [ pimm = imm12 << 4; ] { tmp:8 = Rn_GPR64xsp + pimm; export tmp; } addrUIMM: "["^Rn_GPR64xsp^"]" is size.ldstr=0 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12=0 { tmp:8 = Rn_GPR64xsp; export tmp; } addrUIMM: "["^Rn_GPR64xsp^"]" is size.ldstr=1 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12=0 { tmp:8 = Rn_GPR64xsp; export tmp; } addrUIMM: "["^Rn_GPR64xsp^"]" is size.ldstr=2 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12=0 { tmp:8 = Rn_GPR64xsp; export tmp; } addrUIMM: "["^Rn_GPR64xsp^"]" is size.ldstr=3 & b_2729=7 & v & b_2425=1 & b_2323 & Rn_GPR64xsp & imm12=0 { tmp:8 = Rn_GPR64xsp; export tmp; } addrUIMM: "["^Rn_GPR64xsp^"]" is size.ldstr=0 & b_2729=7 & v=1 & b_2425=1 & b_2323=1 & Rn_GPR64xsp & imm12=0 { tmp:8 = Rn_GPR64xsp; export tmp; } # Address Reg + signed offset -256 to 255 addr_SIMM9: "["^Rn_GPR64xsp, "#"^simm9^"]" is Rn_GPR64xsp & simm9 { tmp:8 = Rn_GPR64xsp + simm9; export tmp; } addr_SIMM9: "["^Rn_GPR64xsp^"]" is Rn_GPR64xsp & simm9=0 { tmp:8 = Rn_GPR64xsp; export tmp; } addrRegShift64: "#"^val is size.ldstr=0 & v=0 & opt & b_1212=1 [ val = 0 & 0xff; ] { export *[const]:8 val; } addrRegShift64: "" is size.ldstr=0 & v=0 & opt & b_1212=0 { export 0:8; } addrRegShift64: "#"^val is size.ldstr=0 & v=1 & opt=0 & b_1212=1 [ val = 0 & 0xff; ] { export *[const]:8 val; } addrRegShift64: "" is size.ldstr=0 & v=1 & opt=0 & b_1212=0 { export 0:8; } addrRegShift64: "#"^val is size.ldstr=0 & v=1 & opt=1 & b_1212=1 [ val = 0 & 0xff; ] { export *[const]:8 val; } addrRegShift64: "" is size.ldstr=0 & v=1 & opt=1 & b_1212=0 { export 0:8; } addrRegShift64: "#"^val is size.ldstr=0 & v=1 & opt=2 & b_1212 [ val = b_1212 * 4; ] { export *[const]:8 val; } addrRegShift64: "#"^val is size.ldstr=0 & v=1 & opt=3 & b_1212 [ val = b_1212 * 4; ] { export *[const]:8 val; } addrRegShift64: "#"^val is size.ldstr=1 & v=0 & opt & b_1212 [ val = b_1212 * 1; ] { export *[const]:8 val; } addrRegShift64: "#"^val is size.ldstr=1 & v=1 & opt & b_1212 [ val = b_1212 * 1; ] { export *[const]:8 val; } addrRegShift64: "#"^val is size.ldstr=2 & v=0 & opt & b_1212 [ val = b_1212 * 2; ] { export *[const]:8 val; } addrRegShift64: "#"^val is size.ldstr=2 & v=1 & opt & b_1212 [ val = b_1212 * 2; ] { export *[const]:8 val; } addrRegShift64: "#"^val is size.ldstr=3 & v=0 & opt & b_1212 [ val = b_1212 * 3; ] { export *[const]:8 val; } addrRegShift64: "#"^val is size.ldstr=3 & v=1 & opt & b_1212 [ val = b_1212 * 3; ] { export *[const]:8 val; } addrExtendRegShift64: ExtendReg64^addrRegShift64 is aa_extreg_option=2 & aa_extreg_imm3 & addrRegShift64 & ExtendReg64 { tmp:8 = ExtendReg64; tmp = tmp << addrRegShift64; export tmp; } addrExtendRegShift64: ExtendReg64^addrRegShift64 is aa_extreg_option=3 & aa_extreg_imm3 & addrRegShift64 & ExtendReg64 { tmp:8 = ExtendReg64; tmp = tmp << addrRegShift64; export tmp; } addrExtendRegShift64: ExtendReg64^addrRegShift64 is aa_extreg_option=6 & aa_extreg_imm3 & addrRegShift64 & ExtendReg64 { tmp:8 = ExtendReg64; tmp = tmp << addrRegShift64; export tmp; } addrExtendRegShift64: ExtendReg64^addrRegShift64 is aa_extreg_option=7 & aa_extreg_imm3 & addrRegShift64 & ExtendReg64 { tmp:8 = ExtendReg64; tmp = tmp << addrRegShift64; export tmp; } addrExtendRegShift64: Rm_GPR64 is Rm_GPR64 & aa_extreg_option=3 & aa_extreg_imm3=0 & ExtendReg64 { export Rm_GPR64; } addrExtendRegShift64: Rm_GPR64 is Rm_GPR64 & aa_extreg_option=7 & aa_extreg_imm3=0 & ExtendReg64 { export Rm_GPR64; } # unsigned offset addrIndexed: addrUIMM is size.ldstr & b_2729=7 & b_2425=1 & addrUIMM { export addrUIMM; } # unsinged offset unscaled immediate addrIndexed: addr_SIMM9 is size.ldstr & b_2729=7 & b_2425=0 & b_2121=0 & opc.indexmode=0 & addr_SIMM9 { export addr_SIMM9; } # register unpriviledged addrIndexed: addr_SIMM9 is size.ldstr & b_2729=7 & b_2425=0 & b_2121=0 & opc.indexmode=2 & addr_SIMM9 { export addr_SIMM9; } # post indexed wback addrIndexed: "["^Rn_GPR64xsp^"]", "#"^simm9 is size.ldstr & b_2729=7 & b_2425=0 & b_2121=0 & Rn_GPR64xsp & simm9 & opc.indexmode=1 { tmp:8 = Rn_GPR64xsp; Rn_GPR64xsp = Rn_GPR64xsp + simm9; export tmp; } # Register, Register offset extended addrIndexed: "["^Rn_GPR64xsp, addrExtendRegShift64^"]" is size.ldstr & b_2729=7 & b_2425=0 & b_2121=1 & Rn_GPR64xsp & opc.indexmode=2 & addrExtendRegShift64 { tmp:8 = Rn_GPR64xsp + addrExtendRegShift64; export tmp; } # pre indexed wback addrIndexed: "["^Rn_GPR64xsp, "#"^simm9^"]!" is size.ldstr & b_2729=7 & b_2425=0 & b_2121=0 & Rn_GPR64xsp & simm9 & opc.indexmode=3 { Rn_GPR64xsp = Rn_GPR64xsp + simm9; export Rn_GPR64xsp; } # For LDRAA/LDRAB # no offset (with S) addrIndexed: "["^Rn_GPR64xsp^"]" is size.ldstr & b_2729=7 & b_2425=0 & b_22=0 & b_2121=1 & Rn_GPR64xsp & simm9=0 & opc.indexmode=1 { export Rn_GPR64xsp; } # offset (with S) addrIndexed: "["^Rn_GPR64xsp, "#"^sim^"]" is size.ldstr & b_2729=7 & b_2425=0 & b_22 & b_2121=1 & Rn_GPR64xsp & simm9 & opc.indexmode=1 [ sim = (b_22 * (-1<<9) | (simm9 & 0x1ff)) << 3; ] { tmp:8 = Rn_GPR64xsp + sim; export tmp; } # no offset writeback (with S) addrIndexed: "["^Rn_GPR64xsp^"]!" is size.ldstr & b_2729=7 & b_2425=0 & b_22=0 & b_2121=1 & Rn_GPR64xsp & simm9=0 & opc.indexmode=3 { export Rn_GPR64xsp; } # pre indexed wback (with S) addrIndexed: "["^Rn_GPR64xsp, "#"^sim^"]!" is size.ldstr & b_2729=7 & b_2425=0 & b_22 & b_2121=1 & Rn_GPR64xsp & simm9 & opc.indexmode=3 [ sim = (b_22 * (-1<<9) | (simm9 & 0x1ff)) << 3; ] { Rn_GPR64xsp = Rn_GPR64xsp + sim; export Rn_GPR64xsp; } addrPairScale: pimm is b_3031=0 & v=0 & simm7 [ pimm = simm7 << 2; ] { export *[const]:8 pimm; } addrPairScale: pimm is b_3031=0 & v=1 & simm7 [ pimm = simm7 << 2; ] { export *[const]:8 pimm; } addrPairScale: pimm is b_3031=2 & v=0 & simm7 [ pimm = simm7 << 3; ] { export *[const]:8 pimm; } addrPairScale: pimm is b_3031=1 & v=0 & simm7 [ pimm = simm7 << 2; ] { export *[const]:8 pimm; } addrPairScale: pimm is b_3031=1 & v=1 & simm7 [ pimm = simm7 << 3; ] { export *[const]:8 pimm; } addrPairScale: pimm is b_3031=2 & v=1 & simm7 [ pimm = simm7 << 4; ] { export *[const]:8 pimm; } # Scaled Offset addrPairUIMM: "["^Rn_GPR64xsp, "#"^addrPairScale^"]" is sf & Rn_GPR64xsp & addrPairScale & simm7 { tmp:8 = Rn_GPR64xsp + addrPairScale; export tmp; } addrPairUIMM: "["^Rn_GPR64xsp^"]" is sf & Rn_GPR64xsp & addrPairScale & simm7=0 { tmp:8 = Rn_GPR64xsp; export tmp; } # unsigned offset addrPairIndexed: addrPairUIMM is b_2729=0b101 & b_2325=0b010 & addrPairUIMM { export addrPairUIMM; } # unsigned offset, non-temporal hint addrPairIndexed: addrPairUIMM is b_2729=0b101 & b_2325=0b000 & addrPairUIMM { export addrPairUIMM; } # post indexed wback addrPairIndexed: "["^Rn_GPR64xsp^"]", "#"^addrPairScale is b_2729=0b101 & b_2325=0b001 & Rn_GPR64xsp & addrPairScale { tmp:8 = Rn_GPR64xsp; Rn_GPR64xsp = Rn_GPR64xsp + addrPairScale; export tmp; } # pre indexed wback addrPairIndexed: "["^Rn_GPR64xsp, "#"^addrPairScale^"]!" is b_2729=0b101 & b_2325=0b011 & Rn_GPR64xsp & addrPairScale { Rn_GPR64xsp = Rn_GPR64xsp + addrPairScale; export Rn_GPR64xsp; } #### Undefined behavior on writeback #### # # Most instructions with writeback have unpredictable behavior when their address input register Rn # is the same register as another input, e.g. Rt. For example, LDR x1, [x1, 0x8]! has unpredictable # behavior in the ARM spec. Similarly, STR x5, [x5], 0x28 has unpredictable behavior in the spec # (but with slightly different possibilities for what forms that unpredictable behavior might take!). # # One of the few exceptions is STGP, which has no mention of unpredictable behavior. In such cases, # it's important to read all registers before addrGranuleIndexed or addrPairGranuleIndexed takes effect, # or pre-index writeback will modify the register values used if Rn is the same register as another R. # # This is an example of how to code a definition for an instruction with no unpredictable behavior: #{ # # save the initial register values # data1:8 = Rt_GPR64; # data2:8 = Rt2_GPR64; # # build addrPairGranuleIndexed; # may modify Rt or Rt2, so use data1/data2 instead afterward # # ...etc... #} OPTIONAL_XM: is Rm=0b11111 { export 0:8; } # default to XZR if Xm is absent OPTIONAL_XM: ,Rm_GPR64 is Rm_GPR64 { export Rm_GPR64; } addr_granuleSIMM: "["^Rn_GPR64xsp, "#"^pimm^"]" is Rn_GPR64xsp & simm9 [ pimm = simm9 << $(LOG2_TAG_GRANULE); ] { tmp:8 = Rn_GPR64xsp + ( simm9 << $(LOG2_TAG_GRANULE) ); export tmp; } addr_granuleSIMM: "["^Rn_GPR64xsp^"]" is Rn_GPR64xsp & simm9=0 { tmp:8 = Rn_GPR64xsp; export tmp; } # signed offset addrGranuleIndexed: addr_granuleSIMM is opc.indexmode=2 & addr_granuleSIMM { export addr_granuleSIMM; } # post indexed wback addrGranuleIndexed: "["^Rn_GPR64xsp^"]", "#"^pimm is Rn_GPR64xsp & simm9 & opc.indexmode=1 [ pimm = simm9 << $(LOG2_TAG_GRANULE); ] { tmp:8 = Rn_GPR64xsp; Rn_GPR64xsp = Rn_GPR64xsp + pimm; export tmp; } # pre indexed wback addrGranuleIndexed: "["^Rn_GPR64xsp, "#"^pimm^"]!" is Rn_GPR64xsp & simm9 & opc.indexmode=3 [ pimm = simm9 << $(LOG2_TAG_GRANULE); ] { Rn_GPR64xsp = Rn_GPR64xsp + pimm; tmp:8 = Rn_GPR64xsp; export tmp; } addrPairGranuleScale: pimm is simm7 [ pimm = simm7 << $(LOG2_TAG_GRANULE); ] { export *[const]:8 pimm; } # Scaled Offset addrPairGranuleSIMM: "["^Rn_GPR64xsp, "#"^addrPairGranuleScale^"]" is sf & Rn_GPR64xsp & addrPairGranuleScale & simm7 { tmp:8 = Rn_GPR64xsp + addrPairGranuleScale; export tmp; } addrPairGranuleSIMM: "["^Rn_GPR64xsp^"]" is sf & Rn_GPR64xsp & addrPairGranuleScale & simm7=0 { tmp:8 = Rn_GPR64xsp; export tmp; } # signed offset addrPairGranuleIndexed: addrPairGranuleSIMM is b_2729=0b101 & b_2325=0b010 & addrPairGranuleSIMM { export addrPairGranuleSIMM; } # post indexed wback addrPairGranuleIndexed: "["^Rn_GPR64xsp^"]", "#"^addrPairGranuleScale is b_2729=0b101 & b_2325=0b001 & Rn_GPR64xsp & addrPairGranuleScale { tmp:8 = Rn_GPR64xsp; Rn_GPR64xsp = Rn_GPR64xsp + addrPairGranuleScale; export tmp; } # pre indexed wback addrPairGranuleIndexed: "["^Rn_GPR64xsp, "#"^addrPairGranuleScale^"]!" is b_2729=0b101 & b_2325=0b011 & Rn_GPR64xsp & addrPairGranuleScale { Rn_GPR64xsp = Rn_GPR64xsp + addrPairGranuleScale; export Rn_GPR64xsp; } # esize=32, len=5, levels=0x1f: 32 bits with b_1014+1 1s; rotate right b_1620; replicate 1 time DecodeWMask32: "#"^wmask is b_31=0 & b_22=0 & b_15=0 & b_1014 & b_1620 [ wmask=(((~(-1<<(b_1014+1)))*0x100000001)>>b_1620)&0xffffffff; ] { export * [const]:4 wmask; } # esize=32, len=5, levels=0x1f: 32 bits with |b_1014-b_1620|+1 1s; replicate 1 time DecodeTMask32: "#"^tmask is b_31=0 & b_22=0 & b_15=0 & b_1014 & b_1620 [ tmask=(~(-1<<(((b_1014-b_1620)&0x1f)+1)))&0xffffffff; ] { export * [const]:4 tmask; } # esize=16, len=4, levels=0xf: 16 bits with b_1013+1 1s; rotate right b_1619; replicate 2 times DecodeWMask32: "#"^wmask is b_31=0 & b_22=0 & b_1415=0x2 & b_1013 & b_1619 [ wmask=((((~(-1<<(b_1013+1)))*0x10001)>>b_1619)&0xffff)*0x10001; ] { export * [const]:4 wmask; } # esize=16, len=4, levels=0xf: 16 bits with |b_1013-b_1619|+1 1s; replicate 2 times DecodeTMask32: "#"^tmask is b_31=0 & b_22=0 & b_1415=0x2 & b_1013 & b_1619 [ tmask=((~(-1<<(((b_1013-b_1619)&0xf)+1)))&0xffff)*0x10001; ] { export * [const]:4 tmask; } # esize=8, len=3, levels=0x7: 8 bits with b_1012+1 1s; rotate right b_1618; replicate 4 times DecodeWMask32: "#"^wmask is b_31=0 & b_22=0 & b_1315=0x6 & b_1012 & b_1618 [ wmask=((((~(-1<<(b_1012+1)))*0x101)>>b_1618)&0xff)*0x101*0x10001; ] { export * [const]:4 wmask; } # esize=8, len=3, levels=0x7: 8 bits with |b_1012-b_1618|+1 1s; replicate 4 times DecodeTMask32: "#"^tmask is b_31=0 & b_22=0 & b_1315=0x6 & b_1012 & b_1618 [ tmask=((~(-1<<(((b_1012-b_1618)&0x7)+1)))&0xff)*0x101*0x10001; ] { export * [const]:4 tmask; } # esize=4, len=2, levels=0x3: 4 bits with b_1011+1 1s; rotate right b_1617; replicate 8 times DecodeWMask32: "#"^wmask is b_31=0 & b_22=0 & b_1215=0xe & b_1011 & b_1617 [ wmask=((((~(-1<<(b_1011+1)))*0x11)>>b_1617)&0xf)*0x11*0x101*0x10001; ] { export * [const]:4 wmask; } # esize=4, len=2, levels=0x3: 4 bits with |b_1011-b_1617|+1 1s; replicate 8 times DecodeTMask32: "#"^tmask is b_31=0 & b_22=0 & b_1215=0xe & b_1011 & b_1617 [ tmask=((~(-1<<(((b_1011-b_1617)&0x7)+1)))&0xf)*0x11*0x101*0x10001; ] { export * [const]:4 tmask; } # esize=2, len=1, levels=0x1: 2 bits with b_1010+1 1s; rotate right b_1616; replicate 16 times DecodeWMask32: "#"^wmask is b_31=0 & b_22=0 & b_1115=0x1e & b_1010 & b_1616 [ wmask=((((~(-1<<(b_1010+1)))*0x5)>>b_1616)&0x3)*0x5*0x11*0x101*0x10001; ] { export * [const]:4 wmask; } # esize=2, len=1, levels=0x1: 2 bits with |b_1010-b_1616|+1 1s; replicate 16 times DecodeTMask32: "#"^tmask is b_31=0 & b_22=0 & b_1115=0x1e & b_1010 & b_1616 [ tmask=((~(-1<<(((b_1010-b_1616)&0x1)+1)))&0x3)*0x5*0x11*0x101*0x10001; ] { export * [const]:4 tmask; } # esize=64, len=6, levels=0x3f: 64 bits with b_1015+1 1s; rotate right b_1621; repeat 1 time # can't rotate 64 bits by multiplying, and can't shift by 64 bits all at once DecodeWMask64: "#"^wmask is b_31=1 & b_22=1 & b_1015 & b_1621 [ wmask=((~((-1<>b_1621)|((~((-1<>b_1620)&0xffffffff)*0x100000001; ] { export * [const]:8 wmask; } # esize=32, len=5, levels=0x1f: 32 bits with |b_1014-b_1620|+1 1s; replicate 2 times DecodeTMask64: "#"^tmask is b_31=1 & b_22=0 & b_15=0 & b_1014 & b_1620 [ tmask=((~(-1<<(((b_1014-b_1620)&0x1f)+1)))&0xffffffff)*0x100000001; ] { export * [const]:8 tmask; } # returned 0xffcfffdefcfffcf # shouldbe 0xffcfffcfffcfffcf # esize=16, len=4, levels=0xf: 16 bits with b_1013+1 1s; rotate right b_1619; replicate 4 times DecodeWMask64: "#"^wmask is b_31=1 & b_22=0 & b_1415=0x2 & b_1013 & b_1619 [ wmask=((((~(-1<<(b_1013+1)))*0x10001)>>b_1619)&0xffff)*0x10001*0x100000001; ] { export * [const]:8 wmask; } # esize=16, len=4, levels=0xf: 16 bits with |b_1013-b_1619|+1 1s; replicate 4 times DecodeTMask64: "#"^tmask is b_31=1 & b_22=0 & b_1415=0x2 & b_1013 & b_1619 [ tmask=((~(-1<<(((b_1013-b_1619)&0xf)+1)))&0xffff)*0x10001*0x100000001; ] { export * [const]:8 tmask; } # esize=8, len=3, levels=0x7: 8 bits with b_1012+1 1s; rotate right b_1618; replicate 8 times DecodeWMask64: "#"^wmask is b_31=1 & b_22=0 & b_1315=0x6 & b_1012 & b_1618 [ wmask=((((~(-1<<(b_1012+1)))*0x101)>>b_1618)&0xff)*0x101*0x10001*0x100000001; ] { export * [const]:8 wmask; } # esize=8, len=3, levels=0x7: 8 bits with |b_1012-b_1618|+1 1s; replicate 8 times DecodeTMask64: "#"^tmask is b_31=1 & b_22=0 & b_1315=0x6 & b_1012 & b_1618 [ tmask=((~(-1<<(((b_1012-b_1618)&0x7)+1)))&0xff)*0x101*0x10001*0x100000001; ] { export * [const]:8 tmask; } # esize=4, len=2, levels=0x3: 4 bits with b_1011+1 1s; rotate right b_1617; replicate 16 times DecodeWMask64: "#"^wmask is b_31=1 & b_22=0 & b_1215=0xe & b_1011 & b_1617 [ wmask=((((~(-1<<(b_1011+1)))*0x11)>>b_1617)&0xf)*0x11*0x101*0x10001*0x100000001; ] { export * [const]:8 wmask; } # esize=4, len=2, levels=0x3: 4 bits with |b_1011-b_1617|+1 1s; replicate 16 times DecodeTMask64: "#"^tmask is b_31=1 & b_22=0 & b_1215=0xe & b_1011 & b_1617 [ tmask=((~(-1<<(((b_1011-b_1617)&0x3)+1)))&0xf)*0x11*0x101*0x10001*0x100000001; ] { export * [const]:8 tmask; } # esize=2, len=1, levels=0x1: 2 bits with b_1010+1 1s; rotate right b_1616; replicate 32 times DecodeWMask64: "#"^wmask is b_31=1 & b_22=0 & b_1115=0x1e & b_1010 & b_1616 [ wmask=((((~((-1)<<(b_1010+1)))*0x5)>>b_1616)&0x3)*0x5*0x11*0x101*0x10001*0x100000001; ] { export * [const]:8 wmask; } # esize=2, len=1, levels=0x1: 2 bits with |b_1010-b_1616|+1 1s; replicate 32 times DecodeTMask64: "#"^tmask is b_31=1 & b_22=0 & b_1115=0x1e & b_1010 & b_1616 [ tmask=((~(-1<<(((b_1010-b_1616)&0x1)+1)))&0x3)*0x5*0x11*0x101*0x10001*0x100000001; ] { export * [const]:8 tmask; } ImmRConst32: "#"^ImmR is ImmR { export *[const]:4 ImmR; } ImmRConst64: "#"^ImmR is ImmR { export *[const]:8 ImmR; } ImmSConst32: "#"^ImmS is ImmS { export *[const]:4 ImmS; } ImmSConst64: "#"^ImmS is ImmS { export *[const]:8 ImmS; } ImmR_bitfield64_imm: "#"^ImmR is ImmR & DecodeWMask64 { export DecodeWMask64; } ImmR_bitfield32_imm: "#"^ImmR is ImmR & DecodeWMask32 { export DecodeWMask32; } ImmS_bitfield64_imm: "#"^ImmS is ImmS & DecodeTMask64 { export DecodeTMask64; } ImmS_bitfield32_imm: "#"^ImmS is ImmS & DecodeTMask32 { export DecodeTMask32; } abcdefgh: "#"^imm is n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L)); ] { export *[const]:8 imm; } repl000: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 32) | ((n_uimm8H << 5 | n_uimm8L)); ] { export *[const]:8 imm; } repl001: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 40) | ((n_uimm8H << 5 | n_uimm8L) << 8); ] { export *[const]:8 imm; } repl010: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 48) | ((n_uimm8H << 5 | n_uimm8L) << 16); ] { export *[const]:8 imm; } repl011: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 56) | ((n_uimm8H << 5 | n_uimm8L) << 24); ] { export *[const]:8 imm; } repl100: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 48) | ((n_uimm8H << 5 | n_uimm8L) << 32) | ((n_uimm8H << 5 | n_uimm8L) << 16) | ((n_uimm8H << 5 | n_uimm8L)); ] { export *[const]:8 imm; } repl101: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 56) | ((n_uimm8H << 5 | n_uimm8L) << 40) | ((n_uimm8H << 5 | n_uimm8L) << 24) | ((n_uimm8H << 5 | n_uimm8L) << 8); ] { export *[const]:8 imm; } repl1100: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((((n_uimm8H << 5 | n_uimm8L) << 8) | 0xff) << 32) | (((n_uimm8H << 5 | n_uimm8L) << 8) | 0xff); ] { export *[const]:8 imm; } repl1101: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((((n_uimm8H << 5 | n_uimm8L) << 16) | 0xffff) << 32) | (((n_uimm8H << 5 | n_uimm8L) << 16) | 0xffff); ] { export *[const]:8 imm; } repl11100: "#"^imm is abcdefgh & n_uimm8H & n_uimm8L [ imm = ((n_uimm8H << 5 | n_uimm8L) << 56) | ((n_uimm8H << 5 | n_uimm8L) << 48) | ((n_uimm8H << 5 | n_uimm8L) << 40) | ((n_uimm8H << 5 | n_uimm8L) << 32) | ((n_uimm8H << 5 | n_uimm8L) << 24) | ((n_uimm8H << 5 | n_uimm8L) << 16) | ((n_uimm8H << 5 | n_uimm8L) << 8) | (n_uimm8H << 5 | n_uimm8L); ] { export *[const]:8 imm; } repl11101: "#"^imm is abcdefgh & b_18 & b_17 & b_16 & b_09 & b_08 & b_07 & b_06 & b_05 [ imm = ((b_18 * 0xff) << 56) | ((b_17 * 0xff) << 48) | ((b_16 * 0xff) << 40) | ((b_09 * 0xff) << 32) | ((b_08 * 0xff) << 24) | ((b_07 * 0xff) << 16) | ((b_06 * 0xff) << 8) | (b_05 * 0xff); ] { export *[const]:8 imm; } repl11110: "#"^imm is abcdefgh & b_18 & b_17 & b_16 & b_09 & b_08 & b_07 & b_06 & b_05 [ imm = (b_18 << 31) | ((b_17 $xor 1) << 30) | ((b_17 * 0x1f) << 25) | (b_16 << 24) | (b_09 << 23) | (b_08 << 22) | (b_07 << 21) | (b_06 << 20) | (b_05 << 19); ] { tmp:8 = imm; tmp = (tmp << 32) | tmp; export tmp; } repl11111: "#"^imm is abcdefgh & b_18 & b_17 & b_16 & b_09 & b_08 & b_07 & b_06 & b_05 [ imm = (b_18 << 63) | ((b_17 $xor 1) << 62) | ((b_17 * 0xff) << 54) | (b_16 << 53) | (b_09 << 52) | (b_08 << 51) | (b_07 << 50) | (b_06 << 49) | (b_05 << 48); ] { tmp:8 = imm; export tmp; } Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x0 & b_29=0 & repl000 { export repl000; } Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x1 & b_29=0 & repl000 { export repl000; } Imm_neon_uimm8Shift: abcdefgh^", LSL #8" is abcdefgh & cmode=0x2 & b_29=0 & repl001 { export repl001; } Imm_neon_uimm8Shift: abcdefgh^", LSL #8" is abcdefgh & cmode=0x3 & b_29=0 & repl001 { export repl001; } Imm_neon_uimm8Shift: abcdefgh^", LSL #16" is abcdefgh & cmode=0x4 & b_29=0 & repl010 { export repl010; } Imm_neon_uimm8Shift: abcdefgh^", LSL #16" is abcdefgh & cmode=0x5 & b_29=0 & repl010 { export repl010; } Imm_neon_uimm8Shift: abcdefgh^", LSL #24" is abcdefgh & cmode=0x6 & b_29=0 & repl011 { export repl011; } Imm_neon_uimm8Shift: abcdefgh^", LSL #24" is abcdefgh & cmode=0x7 & b_29=0 & repl011 { export repl011; } Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x8 & b_29=0 & repl100 { export repl100; } Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x9 & b_29=0 & repl100 { export repl100; } Imm_neon_uimm8Shift: abcdefgh^", LSL #8" is abcdefgh & cmode=0xa & b_29=0 & repl101 { export repl101; } Imm_neon_uimm8Shift: abcdefgh^", LSL #8" is abcdefgh & cmode=0xb & b_29=0 & repl101 { export repl101; } Imm_neon_uimm8Shift: abcdefgh^", MSL #8" is abcdefgh & cmode=0xc & b_29=0 & repl1100 { export repl1100; } Imm_neon_uimm8Shift: abcdefgh^", MSL #16" is abcdefgh & cmode=0xd & b_29=0 & repl1101 { export repl1101; } Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0xe & b_29=0 & repl11100 { export repl11100; } Imm_neon_uimm8Shift: repl11101 is abcdefgh & cmode=0xe & b_29=1 & repl11101 { export repl11101; } # MOVI 64 Imm_neon_uimm8Shift: repl11110 is abcdefgh & cmode=0xf & b_29=0 & repl11110 { export repl11110; } # FMOV Imm_neon_uimm8Shift: repl11111 is abcdefgh & cmode=0xf & b_29=1 & repl11111 { export repl11111; } # FMOV Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x1 & b_29=1 & repl000 { export repl000; } # BIC32 Imm_neon_uimm8Shift: abcdefgh^", LSL 8" is abcdefgh & cmode=0x3 & b_29=1 & repl001 { export repl001; } Imm_neon_uimm8Shift: abcdefgh^", LSL 16" is abcdefgh & cmode=0x5 & b_29=1 & repl010 { export repl010; } Imm_neon_uimm8Shift: abcdefgh^", LSL 24" is abcdefgh & cmode=0x7 & b_29=1 & repl011 { export repl011; } # BIC16 Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x9 & b_29=1 & repl000 { export repl000; } Imm_neon_uimm8Shift: abcdefgh^", LSL 8" is abcdefgh & cmode=0xb & b_29=1 & repl001 { export repl001; } Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x0 & b_29=1 & repl000 { export repl000; } # MVNI Imm_neon_uimm8Shift: abcdefgh^", LSL 8" is abcdefgh & cmode=0x2 & b_29=1 & repl001 { export repl001; } Imm_neon_uimm8Shift: abcdefgh^", LSL 16" is abcdefgh & cmode=0x4 & b_29=1 & repl010 { export repl010; } Imm_neon_uimm8Shift: abcdefgh^", LSL 24" is abcdefgh & cmode=0x6 & b_29=1 & repl011 { export repl011; } Imm_neon_uimm8Shift: abcdefgh is abcdefgh & cmode=0x8 & b_29=1 & repl000 { export repl000; } # MVNI Imm_neon_uimm8Shift: abcdefgh^", LSL 8" is abcdefgh & cmode=0xa & b_29=1 & repl001 { export repl001; } Imm_neon_uimm8Shift: abcdefgh^", MSL 8" is abcdefgh & cmode=0xc & b_29=1 & repl1100 { export repl1100; } # MVNI Imm_neon_uimm8Shift: abcdefgh^", MSL 16" is abcdefgh & cmode=0xd & b_29=1 & repl1101 { export repl1101; } vIndex: val is b_2222=0 & b_2121 & b_1111 [ val = b_1111 << 1 | b_2121; ] { export *[const]:8 val; } vIndex: val is b_2222=1 & b_2121=0 & b_1111 [ val = b_1111 & 0x1; ] { export *[const]:8 val; } vIndexHLM: val is b_2223=2 & b_2121 & b_1111 [ val = b_1111 << 1 | b_2121; ] { export *[const]:8 val; } vIndexHLM: val is b_2223=1 & b_2121 & b_1111 & b_2020 [ val = b_1111 << 2 | b_2121 << 1 | b_2020; ] { export *[const]:8 val; } vIndexHLM: val is b_2223=0 & b_2121 & b_1111 & b_2020 [ val = b_1111 << 2 | b_2121 << 1 | b_2020; ] { export *[const]:8 val; } vIndexHL: val is b_2223=0b01 & b_21 & b_11 [ val = b_11 << 1 | b_21; ] { export *[const]:8 val; } vIndexHL: b_11 is b_2223=0b10 & b_11 { export *[const]:8 b_11; } @if DATA_ENDIAN == "little" Re_VPR128.B.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + b_1111 * 2 + b_2121; ] { export *[register]:1 val; } Re_VPR128.B.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + b_1111; ] { export *[register]:1 val; } Re_VPR128.S.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + (b_1111 * 2 + b_2121) * 4; ] { export *[register]:4 val; } Re_VPR128.S.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + b_1111 * 4; ] { export *[register]:4 val; } Re_VPR128.D.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + (b_1111 * 2 + b_2121) * 8; ] { export *[register]:8 val; } Re_VPR128.D.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x5000 + 32*Re_VPR128 + b_1111 * 8; ] { export *[register]:8 val; } @else Re_VPR128.B.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x501f + 32*Re_VPR128 - b_1111 * 2 - b_2121; ] { export *[register]:1 val; } Re_VPR128.B.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x501f + 32*Re_VPR128 - b_1111; ] { export *[register]:1 val; } Re_VPR128.S.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x501c + 32*Re_VPR128 - (b_1111 * 2 + b_2121) * 4; ] { export *[register]:4 val; } Re_VPR128.S.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x501c + 32*Re_VPR128 - b_1111 * 4; ] { export *[register]:4 val; } Re_VPR128.D.sel: Re_VPR128, val is Re_VPR128 & b_2222=0 & b_2121 & b_1111 [ val = 0x5018 + 32*Re_VPR128 - (b_1111 * 2 + b_2121) * 8; ] { export *[register]:8 val; } Re_VPR128.D.sel: Re_VPR128, val is Re_VPR128 & b_2222=1 & b_2121=0 & b_1111 [ val = 0x5018 + 32*Re_VPR128 - b_1111 * 8; ] { export *[register]:8 val; } @endif Re_VPR128.B.vIndex: Re_VPR128.B^"["^vIndex^"]" is Re_VPR128.B & vIndex & Re_VPR128.B.sel { export Re_VPR128.B.sel; } Re_VPR128.S.vIndex: Re_VPR128.S^"["^vIndex^"]" is Re_VPR128.S & vIndex & Re_VPR128.S.sel { export Re_VPR128.S.sel; } Re_VPR128.D.vIndex: Re_VPR128.D^"["^vIndex^"]" is Re_VPR128.D & vIndex & Re_VPR128.D.sel { export Re_VPR128.D.sel; } @if DATA_ENDIAN == "little" Rd_VPR128.B.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm4 [ val = 0x5000 + 32*Rd_VPR128 + imm_neon_uimm4; ] { export *[register]:1 val; } Rd_VPR128.H.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm3 [ val = 0x5000 + 32*Rd_VPR128 + 2*imm_neon_uimm3; ] { export *[register]:2 val; } Rd_VPR128.S.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm2 [ val = 0x5000 + 32*Rd_VPR128 + 4*imm_neon_uimm2; ] { export *[register]:4 val; } Rd_VPR128.D.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm1 [ val = 0x5000 + 32*Rd_VPR128 + 8*imm_neon_uimm1; ] { export *[register]:8 val; } @else Rd_VPR128.B.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm4 [ val = 0x501f + 32*Rd_VPR128 - imm_neon_uimm4; ] { export *[register]:1 val; } Rd_VPR128.H.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm3 [ val = 0x501e + 32*Rd_VPR128 - 2*imm_neon_uimm3; ] { export *[register]:2 val; } Rd_VPR128.S.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm2 [ val = 0x501c + 32*Rd_VPR128 - 4*imm_neon_uimm2; ] { export *[register]:4 val; } Rd_VPR128.D.sel: Rd_VPR128, val is Rd_VPR128 & imm_neon_uimm1 [ val = 0x5018 + 32*Rd_VPR128 - 8*imm_neon_uimm1; ] { export *[register]:8 val; } @endif Rd_VPR128.B.imm_neon_uimm4: Rd_VPR128.B^"["^imm_neon_uimm4^"]" is Rd_VPR128.B & imm_neon_uimm4 & Rd_VPR128.B.sel { export Rd_VPR128.B.sel; } Rd_VPR128.H.imm_neon_uimm3: Rd_VPR128.H^"["^imm_neon_uimm3^"]" is Rd_VPR128.H & imm_neon_uimm3 & Rd_VPR128.H.sel { export Rd_VPR128.H.sel; } Rd_VPR128.S.imm_neon_uimm2: Rd_VPR128.S^"["^imm_neon_uimm2^"]" is Rd_VPR128.S & imm_neon_uimm2 & Rd_VPR128.S.sel { export Rd_VPR128.S.sel; } Rd_VPR128.D.imm_neon_uimm1: Rd_VPR128.D^"["^imm_neon_uimm1^"]" is Rd_VPR128.D & imm_neon_uimm1 & Rd_VPR128.D.sel { export Rd_VPR128.D.sel; } @if DATA_ENDIAN == "little" Rn_VPR128.B.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm4 [ val = 0x5000 + 32*Rn_VPR128 + immN_neon_uimm4; ] { export *[register]:1 val; } Rn_VPR128.H.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm3 [ val = 0x5000 + 32*Rn_VPR128 + 2*immN_neon_uimm3; ] { export *[register]:2 val; } Rn_VPR128.S.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm2 [ val = 0x5000 + 32*Rn_VPR128 + 4*immN_neon_uimm2; ] { export *[register]:4 val; } Rn_VPR128.D.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm1 [ val = 0x5000 + 32*Rn_VPR128 + 8*immN_neon_uimm1; ] { export *[register]:8 val; } @else Rn_VPR128.B.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm4 [ val = 0x501f + 32*Rn_VPR128 - immN_neon_uimm4; ] { export *[register]:1 val; } Rn_VPR128.H.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm3 [ val = 0x501e + 32*Rn_VPR128 - 2*immN_neon_uimm3; ] { export *[register]:2 val; } Rn_VPR128.S.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm2 [ val = 0x501c + 32*Rn_VPR128 - 4*immN_neon_uimm2; ] { export *[register]:4 val; } Rn_VPR128.D.selN: Rn_VPR128, val is Rn_VPR128 & immN_neon_uimm1 [ val = 0x5018 + 32*Rn_VPR128 - 8*immN_neon_uimm1; ] { export *[register]:8 val; } @endif Rn_VPR128.B.immN_neon_uimm4: Rn_VPR128.B^"["^immN_neon_uimm4^"]" is Rn_VPR128.B & immN_neon_uimm4 & Rn_VPR128.B.selN { export Rn_VPR128.B.selN; } Rn_VPR128.H.immN_neon_uimm3: Rn_VPR128.H^"["^immN_neon_uimm3^"]" is Rn_VPR128.H & immN_neon_uimm3 & Rn_VPR128.H.selN { export Rn_VPR128.H.selN; } Rn_VPR128.S.immN_neon_uimm2: Rn_VPR128.S^"["^immN_neon_uimm2^"]" is Rn_VPR128.S & immN_neon_uimm2 & Rn_VPR128.S.selN { export Rn_VPR128.S.selN; } Rn_VPR128.D.immN_neon_uimm1: Rn_VPR128.D^"["^immN_neon_uimm1^"]" is Rn_VPR128.D & immN_neon_uimm1 & Rn_VPR128.D.selN { export Rn_VPR128.D.selN; } @if DATA_ENDIAN == "little" Rn_VPR128.B.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm4 [ val = 0x5000 + 32*Rn_VPR128 + imm_neon_uimm4; ] { export *[register]:1 val; } Rn_VPR128.H.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm3 [ val = 0x5000 + 32*Rn_VPR128 + 2*imm_neon_uimm3; ] { export *[register]:2 val; } Rn_VPR128.S.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm2 [ val = 0x5000 + 32*Rn_VPR128 + 4*imm_neon_uimm2; ] { export *[register]:4 val; } Rn_VPR128.D.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm1 [ val = 0x5000 + 32*Rn_VPR128 + 8*imm_neon_uimm1; ] { export *[register]:8 val; } @else Rn_VPR128.B.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm4 [ val = 0x501f + 32*Rn_VPR128 - imm_neon_uimm4; ] { export *[register]:1 val; } Rn_VPR128.H.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm3 [ val = 0x501e + 32*Rn_VPR128 - 2*imm_neon_uimm3; ] { export *[register]:2 val; } Rn_VPR128.S.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm2 [ val = 0x501c + 32*Rn_VPR128 - 4*imm_neon_uimm2; ] { export *[register]:4 val; } Rn_VPR128.D.sel: Rn_VPR128, val is Rn_VPR128 & imm_neon_uimm1 [ val = 0x5018 + 32*Rn_VPR128 - 8*imm_neon_uimm1; ] { export *[register]:8 val; } @endif Rn_VPR128.B.imm_neon_uimm4: Rn_VPR128.B^"["^imm_neon_uimm4^"]" is Rn_VPR128.B & imm_neon_uimm4 & Rn_VPR128.B.sel { export Rn_VPR128.B.sel; } Rn_VPR128.H.imm_neon_uimm3: Rn_VPR128.H^"["^imm_neon_uimm3^"]" is Rn_VPR128.H & imm_neon_uimm3 & Rn_VPR128.H.sel { export Rn_VPR128.H.sel; } Rn_VPR128.S.imm_neon_uimm2: Rn_VPR128.S^"["^imm_neon_uimm2^"]" is Rn_VPR128.S & imm_neon_uimm2 & Rn_VPR128.S.sel { export Rn_VPR128.S.sel; } Rn_VPR128.D.imm_neon_uimm1: Rn_VPR128.D^"["^imm_neon_uimm1^"]" is Rn_VPR128.D & imm_neon_uimm1 & Rn_VPR128.D.sel { export Rn_VPR128.D.sel; } Re_VPR128.H.vIndexHL: Re_VPR128.H^"["^vIndexHL^"]" is Re_VPR128.H & vIndexHL { } @if DATA_ENDIAN == "little" Re_VPR128Lo.H.sel: Re_VPR128Lo, val is Re_VPR128Lo & b_2223=2 & b_2121 & b_1111 [ val = 0x5000 + 32*Re_VPR128Lo + (b_1111 * 2 + b_2121)*2; ] { export *[register]:2 val; } Re_VPR128Lo.H.sel: Re_VPR128Lo, val is Re_VPR128Lo & b_2223=1 & b_2121 & b_1111 & b_2020 [ val = 0x5000 + 32*Re_VPR128Lo + (b_1111*4 + b_2121*2 + b_2020)*2; ] { export *[register]:2 val; } Re_VPR128Lo.H.sel: Re_VPR128Lo, val is Re_VPR128Lo & b_2223=0 & b_2121 & b_1111 & b_2020 [ val = 0x5000 + 32*Re_VPR128Lo + (b_1111*4 + b_2121*2 + b_2020)*2; ] { export *[register]:2 val; } @else Re_VPR128Lo.H.sel: Re_VPR128Lo, val is Re_VPR128Lo & b_2223=2 & b_2121 & b_1111 [ val = 0x501e + 32*Re_VPR128Lo - (b_1111 * 2 + b_2121)*2; ] { export *[register]:2 val; } Re_VPR128Lo.H.sel: Re_VPR128Lo, val is Re_VPR128Lo & b_2223=1 & b_2121 & b_1111 & b_2020 [ val = 0x501e + 32*Re_VPR128Lo - (b_1111*4 + b_2121*2 + b_2020)*2; ] { export *[register]:2 val; } Re_VPR128Lo.H.sel: Re_VPR128Lo, val is Re_VPR128Lo & b_2223=0 & b_2121 & b_1111 & b_2020 [ val = 0x501e + 32*Re_VPR128Lo - (b_1111*4 + b_2121*2 + b_2020)*2; ] { export *[register]:2 val; } @endif Re_VPR128Lo.H.vIndexHLM: Re_VPR128Lo.H^"["^vIndexHLM^"]" is Re_VPR128Lo.H & vIndexHLM & Re_VPR128Lo.H.sel { export Re_VPR128Lo.H.sel; } FBitsOp: "#"^fbits is Scale [ fbits = 64 - Scale; ] { export *[const]:2 fbits; } FBits64: factor is FBitsOp & Scale [ factor = 1 << (64 - Scale); ] { fval:8 = int2float(factor:8); export fval; } FBits32: factor is FBitsOp & Scale [ factor = 1 << (64 - Scale); ] { fval:4 = int2float(factor:8); export fval; } FBits16: factor is FBitsOp & Scale [ factor = 1 << (64 - Scale); ] { fval:2 = int2float(factor:8); export fval; } # float Imm8_fmov16_operand: imm is Imm8_fmov_sign & Imm8_fmov_exph & Imm8_fmov_expl & Imm8_fmov_frac & ftype=3 [ imm = (Imm8_fmov_sign << 15) | ((Imm8_fmov_exph $xor 1) << 14) | ((Imm8_fmov_exph * 0x3) << 12) | (Imm8_fmov_expl << 10) | (Imm8_fmov_frac << 6); ] { export *[const]:2 imm; } Imm8_fmov32_operand: imm is Imm8_fmov_sign & Imm8_fmov_exph & Imm8_fmov_expl & Imm8_fmov_frac & ftype=0 [ imm = (Imm8_fmov_sign << 31) | ((Imm8_fmov_exph $xor 1) << 30) | ((Imm8_fmov_exph * 0x1f) << 25) | (Imm8_fmov_expl << 23) | (Imm8_fmov_frac << 19); ] { export *[const]:4 imm; } # double Imm8_fmov64_operand: imm is Imm8_fmov_sign & Imm8_fmov_exph & Imm8_fmov_expl & Imm8_fmov_frac & ftype=1 [ imm = (Imm8_fmov_sign << 63) | ((Imm8_fmov_exph $xor 1) << 62) | ((Imm8_fmov_exph * 0xff) << 54) | (Imm8_fmov_expl << 52) | (Imm8_fmov_frac << 48); ] { export *[const]:8 imm; } # SVE subtables # The size qualifer (T) is encoded in several different ways. The # majority of encodings are in sve_size_2223 # encoded in "size" -- Is the size specifier, size 00 B 01 H 10 S 11 D T: "B" is sve_size_2223=0b00 { export 1:1; } T: "H" is sve_size_2223=0b01 { export 2:1; } T: "S" is sve_size_2223=0b10 { export 4:1; } T: "D" is sve_size_2223=0b11 { export 8:1; } T_sz: "S" is sve_sz_22=0 { export 4:1; } T_sz: "D" is sve_sz_22=1 { export 8:1; } # encoded in "tszh:tszl" -- Is the size specifier, tszh tszl 00 00 RESERVED 00 01 B 00 1x H 01 xx S 1x xx D # Note that tszl is either in b_0809 (if b_21=0) or b_1920 (if b_21=1) T_tszh: "B" is sve_tszh_2223=0b00 & b_21=0 & sve_tszl_0809=0b01 { export 1:1; } T_tszh: "B" is sve_tszh_2223=0b00 & b_21=1 & sve_tszl_1920=0b01 { export 1:1; } T_tszh: "H" is sve_tszh_2223=0b00 & b_21=0 & b_09=1 { export 2:1; } T_tszh: "H" is sve_tszh_2223=0b00 & b_21=1 & b_20=1 { export 2:1; } T_tszh: "S" is sve_tszh_2223=0b01 { export 4:1; } T_tszh: "D" is b_23=1 { export 8:1; } # encoded in "size" -- Is the size specifier, size 00 B 01 H 10 S 11 D T_size_2122: "B" is sve_size_2122=0b00 { export 1:1; } T_size_2122: "H" is sve_size_2122=0b01 { export 2:1; } T_size_2122: "S" is sve_size_2122=0b10 { export 4:1; } T_size_2122: "D" is sve_size_2122=0b11 { export 8:1; } # encoded in "tsz" -- Is the size specifier, tsz 00000 RESERVED xxxx1 B xxx10 H xx100 S x1000 D 10000 Q T_tsz: "B" is b_16=1 { export 1:1; } T_tsz: "H" is b_1617=0b10 { export 2:1; } T_tsz: "S" is b_1618=0b100 { export 4:1; } T_tsz: "D" is b_1619=0b1000 { export 8:1; } T_tsz: "Q" is b_1620=0b10000 { export 16:1; } sve_imm2_tsz: tmp is b_16=1 & sve_imm2_2223 & b_1720 [ tmp = sve_imm2_2223 * 16 + b_1720; ] { export *[const]:1 tmp; } sve_imm2_tsz: tmp is b_1617=0b10 & sve_imm2_2223 & b_1820 [ tmp = sve_imm2_2223 * 8 + b_1820; ] { export *[const]:1 tmp; } sve_imm2_tsz: tmp is b_1618=0b100 & sve_imm2_2223 & b_1920 [ tmp = sve_imm2_2223 * 4 + b_1920; ] { export *[const]:1 tmp; } sve_imm2_tsz: tmp is b_1619=0b1000 & sve_imm2_2223 & b_20 [ tmp = sve_imm2_2223 * 2 + b_20; ] { export *[const]:1 tmp; } sve_imm2_tsz: tmp is b_1620=0b10000 & sve_imm2_2223 [ tmp = sve_imm2_2223 + 0; ] { export *[const]:1 tmp; } # encoded in "imm13<12>:imm13<5:0>" -- Is the size specifier, imm13<12> imm13<5:0> 0 0xxxxx S 0 10xxxx H 0 110xxx B 0 1110xx B 0 11110x B 0 111110 RESERVED 0 111111 RESERVED 1 xxxxxx D T_imm13: "S" is b_17=0 & b_10=0 { export 4:1; } T_imm13: "H" is b_17=0 & b_0910=0b10 { export 2:1; } T_imm13: "B" is b_17=0 & b_0810=0b110 { export 1:1; } T_imm13: "B" is b_17=0 & b_0710=0b1110 { export 1:1; } T_imm13: "B" is b_17=0 & b_0610=0b11110 { export 1:1; } T_imm13: "D" is b_17=1 { export 8:1; } Zd.T: Zd^"."^T is Zd & T { export Zd; } Zd.T_2: Zd^"."^T is Zd & T { export Zd; } Zd.T_tszh: Zd^"."^T_tszh is Zd & T_tszh { export Zd; } Zd.T_tszh_2: Zd^"."^T_tszh is Zd & T_tszh { export Zd; } Zd.T_tsz: Zd^"."^T_tsz is Zd & T_tsz { export Zd; } Zd.T_imm13: Zd^"."^T_imm13 is Zd & T_imm13 { export Zd; } Zd.T_imm13_2: Zd^"."^T_imm13 is Zd & T_imm13 { export Zd; } Zd.T_sz: Zd^"."^T_sz is Zd & T_sz { export Zd; } Zd.T_sz_2: Zd^"."^T_sz is Zd & T_sz { export Zd; } Zd.T_size_2122: Zd^"."^T_size_2122 is Zd & T_size_2122 { export Zd; } Zd.B: Zd^".B" is Zd { export Zd; } Zd.B_2: Zd^".B" is Zd { export Zd; } Zd.H: Zd^".H" is Zd { export Zd; } Zd.S: Zd^".S" is Zd { export Zd; } Zd.D: Zd^".D" is Zd { export Zd; } Zt.B: sve_zt_0004^".B" is sve_zt_0004 { export sve_zt_0004; } Ztt.B: sve_ztt_0004^".B" is sve_ztt_0004 { export sve_ztt_0004; } Zttt.B: sve_zttt_0004^".B" is sve_zttt_0004 { export sve_zttt_0004; } Ztttt.B: sve_ztttt_0004^".B" is sve_ztttt_0004 { export sve_ztttt_0004; } Zt.H: sve_zt_0004^".H" is sve_zt_0004 { export sve_zt_0004; } Ztt.H: sve_ztt_0004^".H" is sve_ztt_0004 { export sve_ztt_0004; } Zttt.H: sve_zttt_0004^".H" is sve_zttt_0004 { export sve_zttt_0004; } Ztttt.H: sve_ztttt_0004^".H" is sve_ztttt_0004 { export sve_ztttt_0004; } Zt.S: sve_zt_0004^".S" is sve_zt_0004 { export sve_zt_0004; } Ztt.S: sve_ztt_0004^".S" is sve_ztt_0004 { export sve_ztt_0004; } Zttt.S: sve_zttt_0004^".S" is sve_zttt_0004 { export sve_zttt_0004; } Ztttt.S: sve_ztttt_0004^".S" is sve_ztttt_0004 { export sve_ztttt_0004; } Zt.D: sve_zt_0004^".D" is sve_zt_0004 { export sve_zt_0004; } Ztt.D: sve_ztt_0004^".D" is sve_ztt_0004 { export sve_ztt_0004; } Zttt.D: sve_zttt_0004^".D" is sve_zttt_0004 { export sve_zttt_0004; } Ztttt.D: sve_ztttt_0004^".D" is sve_ztttt_0004 { export sve_ztttt_0004; } Zn.T: sve_zn_0509^"."^T is sve_zn_0509 & T { export sve_zn_0509; } Zn.T_sz: sve_zn_0509^"."^T_sz is sve_zn_0509 & T_sz { export sve_zn_0509; } Zn.T_tszh: sve_zn_0509^"."^T_tszh is sve_zn_0509 & T_tszh { export sve_zn_0509; } Zn.T_tsz: sve_zn_0509^"."^T_tsz is sve_zn_0509 & T_tsz { export sve_zn_0509; } Zn.Tb_sz: sve_zn_0509^".B" is sve_zn_0509 & sve_sz_22=0 { export sve_zn_0509; } Zn.Tb_sz: sve_zn_0509^".H" is sve_zn_0509 & sve_sz_22=1 { export sve_zn_0509; } Zn.Tb: sve_zn_0509^".B" is sve_zn_0509 & sve_size_2223=0b01 { export sve_zn_0509; } Zn.Tb: sve_zn_0509^".H" is sve_zn_0509 & sve_size_2223=0b10 { export sve_zn_0509; } Zn.Tb: sve_zn_0509^".S" is sve_zn_0509 & sve_size_2223=0b11 { export sve_zn_0509; } Zn.B: sve_zn_0509^".B" is sve_zn_0509 { export sve_zn_0509; } Zn.H: sve_zn_0509^".H" is sve_zn_0509 { export sve_zn_0509; } Zn.S: sve_zn_0509^".S" is sve_zn_0509 { export sve_zn_0509; } Zn.D: sve_zn_0509^".D" is sve_zn_0509 { export sve_zn_0509; } Zm.T: sve_zm_1620^"."^T is sve_zm_1620 & T { export sve_zm_1620; } Zm.T_sz: sve_zm_1620^"."^T_sz is sve_zm_1620 & T_sz { export sve_zm_1620; } Zm.Tb_sz: sve_zm_1620^".B" is sve_zm_1620 & sve_sz_22=0 { export sve_zm_1620; } Zm.Tb_sz: sve_zm_1620^".H" is sve_zm_1620 & sve_sz_22=1 { export sve_zm_1620; } # Zm.Tb: sve_zm_1620^".B" is sve_zm_1620 & sve_size_2223=0b01 { export sve_zm_1620; } # Zm.Tb: sve_zm_1620^".H" is sve_zm_1620 & sve_size_2223=0b10 { export sve_zm_1620; } # Zm.Tb: sve_zm_1620^".S" is sve_zm_1620 & sve_size_2223=0b11 { export sve_zm_1620; } # Zm.B: sve_zm_1620^".B" is sve_zm_1620 { export sve_zm_1620; } # Zm.H: sve_zm_1620^".H" is sve_zm_1620 { export sve_zm_1620; } Zm.S: sve_zm_1620^".S" is sve_zm_1620 { export sve_zm_1620; } Zm.D: sve_zm_1620^".D" is sve_zm_1620 { export sve_zm_1620; } Zm3.B: sve_zm_1618^".B" is sve_zm_1618 { export sve_zm_1618; } Zm3.H: sve_zm_1618^".H" is sve_zm_1618 { export sve_zm_1618; } Zm3.S: sve_zm_1618^".S" is sve_zm_1618 { export sve_zm_1618; } # Zm3.D: sve_zm_1618^".D" is sve_zm_1618 { export sve_zm_1618; } # Zm4.B: sve_zm_1619^".B" is sve_zm_1619 { export sve_zm_1619; } Zm4.H: sve_zm_1619^".H" is sve_zm_1619 { export sve_zm_1619; } Zm4.S: sve_zm_1619^".S" is sve_zm_1619 { export sve_zm_1619; } Zm4.D: sve_zm_1619^".D" is sve_zm_1619 { export sve_zm_1619; } Pg: sve_pg_1013 is sve_pg_1013 { export sve_pg_1013; } Pg_z: sve_pg_1013^"/z" is sve_pg_1013 { export sve_pg_1013; } Pg_zm: sve_pg_1013^"/z" is sve_pg_1013 & sve_m_04=0 { export sve_pg_1013; } Pg_zm: sve_pg_1013^"/m" is sve_pg_1013 & sve_m_04=1 { export sve_pg_1013; } Pg3: sve_pg_1012 is sve_pg_1012 { export sve_pg_1012; } Pg3_m: sve_pg_1012^"/m" is sve_pg_1012 { export sve_pg_1012; } Pg3_z: sve_pg_1012^"/z" is sve_pg_1012 { export sve_pg_1012; } Pg3_zm: sve_pg_1012^"/z" is sve_pg_1012 & sve_m_16=0 { export sve_pg_1012; } Pg3_zm: sve_pg_1012^"/m" is sve_pg_1012 & sve_m_16=1 { export sve_pg_1012; } Pd.T: sve_pd_0003^"."^T is sve_pd_0003 & T { export sve_pd_0003; } Pd.T_2: sve_pd_0003^"."^T is sve_pd_0003 & T { export sve_pd_0003; } Pd: sve_pd_0003 is sve_pd_0003 { export sve_pd_0003; } Pd.B: sve_pd_0003^".B" is sve_pd_0003 { export sve_pd_0003; } Pd.B_2: sve_pd_0003^".B" is sve_pd_0003 { export sve_pd_0003; } Pd.H: sve_pd_0003^".H" is sve_pd_0003 { export sve_pd_0003; } # Pd.S: sve_pd_0003^".S" is sve_pd_0003 { export sve_pd_0003; } # Pd.D: sve_pd_0003^".D" is sve_pd_0003 { export sve_pd_0003; } Pn: sve_pn_0508 is sve_pn_0508 { export sve_pn_0508; } Pn_z: sve_pn_0508^"/z" is sve_pn_0508 { export sve_pn_0508; } Pn.T: sve_pn_0508^"."^T is sve_pn_0508 & T { export sve_pn_0508; } Pn.B: sve_pn_0508^".B" is sve_pn_0508 { export sve_pn_0508; } # Pn.H: sve_pn_0508^".H" is sve_pn_0508 { export sve_pn_0508; } # Pn.S: sve_pn_0508^".S" is sve_pn_0508 { export sve_pn_0508; } # Pn.D: sve_pn_0508^".D" is sve_pn_0508 { export sve_pn_0508; } Pm_m: sve_pm_1619^"/m" is sve_pm_1619 { export sve_pm_1619; } Pm_zm: sve_pm_1619^"/z" is sve_pm_1619 & sve_m_14=0 { export sve_pm_1619; } Pm_zm: sve_pm_1619^"/m" is sve_pm_1619 & sve_m_14=1 { export sve_pm_1619; } Pm.T: sve_pm_1619^"."^T is sve_pm_1619 & T { export sve_pm_1619; } Pm.B: sve_pm_1619^".B" is sve_pm_1619 { export sve_pm_1619; } # Pm.H: sve_pm_1619^".H" is sve_pm_1619 { export sve_pm_1619; } # Pm.S: sve_pm_1619^".S" is sve_pm_1619 { export sve_pm_1619; } # Pm.D: sve_pm_1619^".D" is sve_pm_1619 { export sve_pm_1619; } sve_i3h_i3l: tmp is sve_i3h_22 & sve_i3l_1920 [ tmp = sve_i3h_22 * 4 + sve_i3l_1920; ] { export *[const]:1 tmp; } sve_imm3_1_0to7: sve_imm3_1618 is sve_imm3_1618 { export *[const]:1 sve_imm3_1618; } sve_imm4_1_1to16: tmp is sve_imm4_1619 [ tmp = sve_imm4_1619 + 1; ] { export *[const]:1 tmp; } sve_imm4_1_m128to112: tmp is sve_imm4s_1619 [ tmp = sve_imm4s_1619 * 16; ] { export *[const]:1 tmp; } sve_opt4_1_m128to112: "" is sve_imm4s_1619=0 { export 0:1; } sve_opt4_1_m128to112: ", #"^sve_imm4_1_m128to112 is sve_imm4_1_m128to112 { export sve_imm4_1_m128to112; } sve_imm4_1_m16to14: tmp is sve_imm4s_1619 [ tmp = sve_imm4s_1619 * 2; ] { export *[const]:1 tmp; } sve_mul4_1_m16to14: "" is sve_imm4s_1619=0 { export 0:1; } sve_mul4_1_m16to14: ", #"^sve_imm4_1_m16to14^", mul vl" is sve_imm4_1_m16to14 { export *[const]:1 sve_imm4_1_m16to14; } sve_imm4_1_m24to21: tmp is sve_imm4s_1619 [ tmp = sve_imm4s_1619 * 3; ] { export *[const]:1 tmp; } sve_mul4_1_m24to21: "" is sve_imm4s_1619=0 { export 0:1; } sve_mul4_1_m24to21: ", #"^sve_imm4_1_m24to21^", mul vl" is sve_imm4_1_m24to21 { export *[const]:1 sve_imm4_1_m24to21; } sve_imm4_1_m32to28: tmp is sve_imm4s_1619 [ tmp = sve_imm4s_1619 * 4; ] { export *[const]:1 tmp; } sve_mul4_1_m32to28: "" is sve_imm4s_1619=0 { export 0:1; } sve_mul4_1_m32to28: ", #"^sve_imm4_1_m32to28^", mul vl" is sve_imm4_1_m32to28 { export *[const]:1 sve_imm4_1_m32to28; } sve_imm4_1_m8to7: tmp is sve_imm4s_1619 [ tmp = sve_imm4s_1619 * 1; ] { export *[const]:1 tmp; } sve_mul4_1_m8to7: "" is sve_imm4s_1619=0 { export 0:1; } sve_mul4_1_m8to7: ", #"^sve_imm4_1_m8to7^", mul vl" is sve_imm4_1_m8to7 { export *[const]:1 sve_imm4_1_m8to7; } sve_imm5_1_0to124: tmp is sve_imm5_1620 [ tmp = sve_imm5_1620 * 4; ] { export *[const]:1 tmp; } sve_opt5_1_0to124: "" is sve_imm5_1620=0 { export 0:1; } sve_opt5_1_0to124: ", #"^sve_imm5_1_0to124 is sve_imm5_1_0to124 { export sve_imm5_1_0to124; } sve_imm5_1_0to248: tmp is sve_imm5_1620 [ tmp = sve_imm5_1620 * 8; ] { export *[const]:1 tmp; } sve_opt5_1_0to248: "" is sve_imm5_1620=0 { export 0:1; } sve_opt5_1_0to248: ", #"^sve_imm5_1_0to248 is sve_imm5_1_0to248 { export sve_imm5_1_0to248; } sve_imm5_1_0to31: sve_imm5_1620 is sve_imm5_1620 { export *[const]:1 sve_imm5_1620; } sve_opt5_1_0to31: "" is sve_imm5_1620=0 { export 0:1; } sve_opt5_1_0to31: ", #"^sve_imm5_1_0to31 is sve_imm5_1_0to31 { export sve_imm5_1_0to31; } sve_imm5_1_0to62: tmp is sve_imm5_1620 [ tmp = sve_imm5_1620 * 2; ] { export *[const]:1 tmp; } sve_opt5_1_0to62: "" is sve_imm5_1620=0 { export 0:1; } sve_opt5_1_0to62: ", #"^sve_imm5_1_0to62 is sve_imm5_1_0to62 { export sve_imm5_1_0to62; } sve_imm5_1_m16to15: sve_imm5s_1620 is sve_b_1015=0b010001 & sve_imm5s_1620 { export *[const]:1 sve_imm5s_1620; } sve_imm5_1_m16to15: sve_imm5s_0509 is sve_b_1015=0b010010 & sve_imm5s_0509 { export *[const]:1 sve_imm5s_0509; } sve_imm6_1_0to126: tmp is sve_imm6_1621 [ tmp = sve_imm6_1621 * 2; ] { export *[const]:1 tmp; } sve_opt6_1_0to126: "" is sve_imm6_1621=0 { export 0:1; } sve_opt6_1_0to126: ", #"^sve_imm6_1_0to126 is sve_imm6_1_0to126 { export sve_imm6_1_0to126; } sve_imm6_1_0to252: tmp is sve_imm6_1621 [ tmp = sve_imm6_1621 * 4; ] { export *[const]:1 tmp; } sve_opt6_1_0to252: "" is sve_imm6_1621=0 { export 0:1; } sve_opt6_1_0to252: ", #"^sve_imm6_1_0to252 is sve_imm6_1_0to252 { export sve_imm6_1_0to252; } sve_imm6_1_0to504: tmp is sve_imm6_1621 [ tmp = sve_imm6_1621 * 8; ] { export *[const]:2 tmp; } sve_opt6_1_0to504: "" is sve_imm6_1621=0 { export 0:2; } sve_opt6_1_0to504: ", #"^sve_imm6_1_0to504 is sve_imm6_1_0to504 { export sve_imm6_1_0to504; } sve_imm6_1_0to63: sve_imm6_1621 is sve_imm6_1621 { export *[const]:1 sve_imm6_1621; } sve_opt6_1_0to63: "" is sve_imm6_1621=0 { export 0:1; } sve_opt6_1_0to63: ", #"^sve_imm6_1_0to63 is sve_imm6_1_0to63 { export sve_imm6_1_0to63; } sve_imm6_1_m32to31: sve_imm6s_0510 is sve_imm6s_0510 { export *[const]:1 sve_imm6s_0510; } sve_mul6_1_m32to31: "" is sve_imm6_1621=0 { export 0:1; } sve_mul6_1_m32to31: ", #"^sve_imm6s_1621^", mul vl" is sve_imm6s_1621 { export *[const]:1 sve_imm6s_1621; } sve_imm8_1_0to255: sve_imm8_0512 is sve_imm8_0512 { export *[const]:1 sve_imm8_0512; } sve_shf8_1_0to255: "#0, LSL #8" is sve_imm8_0512=0 & sve_sh_13=1 { export 0:2; } sve_shf8_1_0to255: "#"^tmp is sve_imm8_0512 & sve_sh_13 [ tmp = sve_imm8_0512 << (8 * sve_sh_13); ] { export *[const]:2 tmp; } sve_imm8_1_m128to127: sve_imm8s_0512 is sve_imm8s_0512 { export *[const]:1 sve_imm8s_0512; } sve_shf8_1_m128to127: "#0, LSL #8" is sve_imm8s_0512=0 & sve_sh_13=1 { export 0:2; } sve_shf8_1_m128to127: "#"^tmp is sve_imm8s_0512 & sve_sh_13 [ tmp = sve_imm8s_0512 << (8 * sve_sh_13); ] { export *[const]:2 tmp; } sve_imm8_2_0to255: tmp is sve_imm8h_1620 & sve_imm8l_1012 [ tmp = sve_imm8h_1620 * 8 + sve_imm8l_1012; ] { export *[const]:1 tmp; } sve_imm9_2_m256to255: tmp is sve_imm9hs_1621 & sve_imm9l_1012 [ tmp = sve_imm9hs_1621 * 8 + sve_imm9l_1012; ] { export *[const]:2 tmp; } sve_mul9_2_m256to255: "" is sve_imm6_1621=0 & sve_imm9l_1012=0 { export 0:2; } sve_mul9_2_m256to255: ", #"^sve_imm9_2_m256to255^", mul vl" is sve_imm9_2_m256to255 { export sve_imm9_2_m256to255; } sve_pattern: "POW2" is sve_pattern_0509=0b00000 { export 0b00000:1; } sve_pattern: "VL1" is sve_pattern_0509=0b00001 { export 0b00001:1; } sve_pattern: "VL2" is sve_pattern_0509=0b00010 { export 0b00010:1; } sve_pattern: "VL3" is sve_pattern_0509=0b00011 { export 0b00011:1; } sve_pattern: "VL4" is sve_pattern_0509=0b00100 { export 0b00100:1; } sve_pattern: "VL5" is sve_pattern_0509=0b00101 { export 0b00101:1; } sve_pattern: "VL6" is sve_pattern_0509=0b00110 { export 0b00110:1; } sve_pattern: "VL7" is sve_pattern_0509=0b00111 { export 0b00111:1; } sve_pattern: "VL8" is sve_pattern_0509=0b01000 { export 0b01000:1; } sve_pattern: "VL16" is sve_pattern_0509=0b01001 { export 0b01001:1; } sve_pattern: "VL32" is sve_pattern_0509=0b01010 { export 0b01010:1; } sve_pattern: "VL64" is sve_pattern_0509=0b01011 { export 0b01011:1; } sve_pattern: "VL128" is sve_pattern_0509=0b01100 { export 0b01100:1; } sve_pattern: "VL256" is sve_pattern_0509=0b01101 { export 0b01101:1; } sve_pattern: "#"^sve_pattern_0509 is b_0609=0b0111 & sve_pattern_0509 { export *[const]:1 sve_pattern_0509; } sve_pattern: "#"^sve_pattern_0509 is b_0709=0b101 & b_05=1 & sve_pattern_0509 { export *[const]:1 sve_pattern_0509; } sve_pattern: "#"^sve_pattern_0509 is b_0509=0b10110 & sve_pattern_0509 { export *[const]:1 sve_pattern_0509; } sve_pattern: "#"^sve_pattern_0509 is b_09=1 & b_07=0 & b_05=1 & sve_pattern_0509 { export *[const]:1 sve_pattern_0509; } sve_pattern: "#"^sve_pattern_0509 is b_09=1 & b_0507=0b010 & sve_pattern_0509 { export *[const]:1 sve_pattern_0509; } sve_pattern: "#"^sve_pattern_0509 is b_09=1 & b_0506=0b00 & sve_pattern_0509 { export *[const]:1 sve_pattern_0509; } sve_pattern: "MUL4" is sve_pattern_0509=0b11101 { export 0b11101:1; } sve_pattern: "MUL3" is sve_pattern_0509=0b11110 { export 0b11110:1; } sve_pattern: "ALL" is sve_pattern_0509=0b11111 { export 0b11111:1; } sve_opt_pattern: "" is sve_pattern_0509=0b11111 { export 0b11111:1; } sve_opt_pattern: ", "^sve_pattern is sve_pattern { export sve_pattern; } sve_mul_pattern: "" is sve_pattern_0509=0b11111 & sve_imm4_1619=0b0000 { export 0b11111:1; } sve_mul_pattern: ", "^sve_pattern is sve_pattern & sve_imm4_1619=0b0000 { export sve_pattern; } sve_mul_pattern: ", "^sve_pattern^", mul #"^sve_imm4_1_1to16 is sve_pattern & sve_imm4_1_1to16 { export sve_pattern; } sve_mod_amount: "" is sve_msz_1011=0b00 { export 0:1; } sve_mod_amount: ", LSL #1" is sve_msz_1011=0b01 { export 1:1; } sve_mod_amount: ", LSL #2" is sve_msz_1011=0b10 { export 2:1; } sve_mod_amount: ", LSL #3" is sve_msz_1011=0b11 { export 3:1; } sve_mod: "UXTW" is b_15=1 & b_14=0 { export 2:1; } sve_mod: "SXTW" is b_15=1 & b_14=1 { export 3:1; } sve_mod: "UXTW" is b_15=0 & b_22=0 { export 0:1; } sve_mod: "SXTW" is b_15=0 & b_22=1 { export 1:1; } sve_prfop: "PLDL1KEEP" is sve_prfop_0003=0b0000 { export 0b0000:1; } sve_prfop: "PLDL1STRM" is sve_prfop_0003=0b0001 { export 0b0001:1; } sve_prfop: "PLDL2KEEP" is sve_prfop_0003=0b0010 { export 0b0010:1; } sve_prfop: "PLDL2STRM" is sve_prfop_0003=0b0011 { export 0b0011:1; } sve_prfop: "PLDL3KEEP" is sve_prfop_0003=0b0100 { export 0b0100:1; } sve_prfop: "PLDL3STRM" is sve_prfop_0003=0b0101 { export 0b0101:1; } sve_prfop: "#"^sve_prfop_0003 is b_02 & b_01=1 & sve_prfop_0003 { export *[const]:1 sve_prfop_0003; } sve_prfop: "PSTL1KEEP" is sve_prfop_0003=0b1000 { export 0b1000:1; } sve_prfop: "PSTL1STRM" is sve_prfop_0003=0b1001 { export 0b1001:1; } sve_prfop: "PSTL2KEEP" is sve_prfop_0003=0b1010 { export 0b1010:1; } sve_prfop: "PSTL2STRM" is sve_prfop_0003=0b1011 { export 0b1011:1; } sve_prfop: "PSTL3KEEP" is sve_prfop_0003=0b1100 { export 0b1100:1; } sve_prfop: "PSTL3STRM" is sve_prfop_0003=0b1101 { export 0b1101:1; } sve_decode_bit_mask: wmask is b_17=0 & b_0510=0b111100 & b_11 [ wmask = (0x5555 >> b_11) & 0xff; ] { export *[const]:8 wmask; } sve_decode_bit_mask: wmask is b_17=0 & b_0710=0b1110 & b_0506=0b00 & b_1112 [ wmask = (0x1111 >> b_1112) & 0xff; ] { export *[const]:8 wmask; } sve_decode_bit_mask: wmask is b_17=0 & b_0710=0b1110 & b_0506=0b01 & b_1112 [ wmask = (0x3333 >> b_1112) & 0xff; ] { export *[const]:8 wmask; } sve_decode_bit_mask: wmask is b_17=0 & b_0710=0b1110 & b_0506=0b10 & b_1112 [ wmask = (0x7777 >> b_1112) & 0xff; ] { export *[const]:8 wmask; } sve_decode_bit_mask: wmask is b_17=0 & b_0810=0b110 & b_0507 & b_1113 [ wmask = (((~(-1<<(b_0507+1))) | (~(-1<<(b_0507+9)) & 0xff00)) >> b_1113) & 0xff; ] { export *[const]:8 wmask; } sve_decode_bit_mask: wmask is b_17=0 & b_0910=0b10 & b_0508 & b_1114 [ wmask = (((~(-1<<(b_0508+1))) | (~(-1<<(b_0508+17)) & 0xffff0000)) >> b_1114) & 0xffff; ] { export *[const]:8 wmask; } sve_decode_bit_mask: wmask is b_17=0 & b_10=0 & b_0509 & b_1115 [ wmask = (((~(-1<<(b_0509+1))) | (~(-1<<(b_0509+33)) & 0xffffffff00000000)) >> b_1115) & 0xffffffff; ] { export *[const]:8 wmask; } sve_decode_bit_mask: wmask is b_17=1 & b_0510 & b_1116 [ wmask = ( (((~(-1<<(b_0510+1)))) >> b_1116) | (((~(-1<<(b_0510+1)))) << (64-b_1116))) & 0xffffffffffffffff; ] { export *[const]:8 wmask; } sve_shift_13: "" is sve_sh_13=0 { export 0:1; } sve_shift_13: ", LSL #8" is sve_sh_13=1 { export 8:1; } # The immediate shift is computed from tszh, tszl, imm8. The formula # depends on the instruction, as does the location of tszl and imm8. # The conditions b_21=0/1 and b_17/b_11=0/1 were found by inspecting # the differences between the instructions. # Instructions where the immediate shift is 2 * esize - UInt(tsz:imm3) sve_imm_shift: tmp is b_21=0 & b_17=0 & sve_tszh_2223=0b00 & sve_tszl_0809=0b01 & sve_imm3_0507 [ tmp = 16 - ( 8 + sve_imm3_0507); ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=0 & b_17=0 & sve_tszh_2223=0b00 & b_09=1 & sve_tszl_0809 & sve_imm3_0507 [ tmp = 32 - ( 8 * sve_tszl_0809 + sve_imm3_0507); ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=0 & b_17=0 & sve_tszh_2223=0b01 & sve_tszl_0809 & sve_imm3_0507 [ tmp = 64 - (32 + 8 * sve_tszl_0809 + sve_imm3_0507); ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=0 & b_17=0 & b_23=1 & sve_tszh_2223 & sve_tszl_0809 & sve_imm3_0507 [ tmp = 128 - (32 * sve_tszh_2223 + 8 * sve_tszl_0809 + sve_imm3_0507); ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=1 & b_11=0 & sve_tszh_2223=0b00 & sve_tszl_1920=0b01 & sve_imm3_1618 [ tmp = 16 - ( 8 + sve_imm3_1618); ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=1 & b_11=0 & sve_tszh_2223=0b00 & b_20=1 & sve_tszl_1920 & sve_imm3_1618 [ tmp = 32 - ( 8 * sve_tszl_1920 + sve_imm3_1618); ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=1 & b_11=0 & sve_tszh_2223=0b01 & sve_tszl_1920 & sve_imm3_1618 [ tmp = 64 - (32 + 8 * sve_tszl_1920 + sve_imm3_1618); ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=1 & b_11=0 & b_23=1 & sve_tszh_2223 & sve_tszl_1920 & sve_imm3_1618 [ tmp = 128 - (32 * sve_tszh_2223 + 8 * sve_tszl_1920 + sve_imm3_1618); ] { export *[const]:1 tmp; } # Instructions where the immediate shift is UInt(tsz:imm3) - esize sve_imm_shift: tmp is b_21=0 & b_17=1 & sve_tszh_2223=0b00 & sve_tszl_0809=0b01 & sve_imm3_0507 [ tmp = ( 8 + sve_imm3_0507) - 8; ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=0 & b_17=1 & sve_tszh_2223=0b00 & b_09=1 & sve_tszl_0809 & sve_imm3_0507 [ tmp = ( 8 * sve_tszl_0809 + sve_imm3_0507) - 16; ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=0 & b_17=1 & sve_tszh_2223=0b01 & sve_tszl_0809 & sve_imm3_0507 [ tmp = (32 + 8 * sve_tszl_0809 + sve_imm3_0507) - 32; ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=0 & b_17=1 & b_23=1 & sve_tszh_2223 & sve_tszl_0809 & sve_imm3_0507 [ tmp = (32 * sve_tszh_2223 + 8 * sve_tszl_0809 + sve_imm3_0507) - 64; ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=1 & b_11=1 & sve_tszh_2223=0b00 & sve_tszl_1920=0b01 & sve_imm3_1618 [ tmp = ( 8 + sve_imm3_1618) - 8; ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=1 & b_11=1 & sve_tszh_2223=0b00 & b_20=1 & sve_tszl_1920 & sve_imm3_1618 [ tmp = ( 8 * sve_tszl_1920 + sve_imm3_1618) - 16; ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=1 & b_11=1 & sve_tszh_2223=0b01 & sve_tszl_1920 & sve_imm3_1618 [ tmp = (32 + 8 * sve_tszl_1920 + sve_imm3_1618) - 32; ] { export *[const]:1 tmp; } sve_imm_shift: tmp is b_21=1 & b_11=1 & b_23=1 & sve_tszh_2223 & sve_tszl_1920 & sve_imm3_1618 [ tmp = (32 * sve_tszh_2223 + 8 * sve_tszl_1920 + sve_imm3_1618) - 64; ] { export *[const]:1 tmp; } sve_float_0510: "#0.5" is sve_i1_05=0 { export 0:1; } sve_float_0510: "#1.0" is sve_i1_05=1 { export 1:1; } sve_float_0520: "#0.5" is sve_i1_05=0 { export 0:1; } sve_float_0520: "#2.0" is sve_i1_05=1 { export 1:1; } sve_float_0010: "#0.0" is sve_i1_05=0 { export 0:1; } sve_float_0010: "#1.0" is sve_i1_05=1 { export 1:1; } # there are no floating point constants in SLEIGH # generate equivalent hex floating point constant attach names [ sve_float_dec ] [ "0" "1" "2" "3" "4" "5" "6" "7" "8" "9" "a" "b" "c" "d" "e" "f" ]; attach names [ sve_float_exp ] [ "+1" "+2" "+3" "+4" "-3" "-2" "-1" "+0" ]; sve_float_imm8: s^"."^sve_float_dec^"p"^sve_float_exp is sve_imm8_0512 & sve_float_dec & sve_float_exp & b_12 [ s = (1 - 2 * b_12); ] { export *[const]:1 sve_imm8_0512; } # SECTION pcodeops # The following SIMD and MP versions of SLEIGH primitives are # implemented in java for AARCH64 define pcodeop MP_INT_ABS; define pcodeop MP_INT_RIGHT; define pcodeop MP_INT_MULT; define pcodeop MP_INT_UMULT; # The following AARCH64 instructions are implemented in java as a # pcodeop define pcodeop a64_TBL; # The following pcode ops are not implemented define pcodeop AT_S12E0R; define pcodeop AT_S12E0W; define pcodeop AT_S12E1R; define pcodeop AT_S12E1W; define pcodeop AT_S1E0R; define pcodeop AT_S1E0W; define pcodeop AT_S1E1R; define pcodeop AT_S1E1RP; define pcodeop AT_S1E1W; define pcodeop AT_S1E1WP; define pcodeop AT_S1E2R; define pcodeop AT_S1E2W; define pcodeop AT_S1E3R; define pcodeop AT_S1E3W; define pcodeop AuthIA; define pcodeop AuthIB; define pcodeop AuthDA; define pcodeop AuthDB; define pcodeop CallHyperVisor; define pcodeop CallSecureMonitor; define pcodeop CallSupervisor; define pcodeop ClearExclusiveLocal; define pcodeop crc32b; define pcodeop crc32h; define pcodeop crc32w; define pcodeop crc32x; define pcodeop DataMemoryBarrier; define pcodeop DataSynchronizationBarrier; define pcodeop DC_CISW; define pcodeop DC_CIVAC; define pcodeop DC_CSW; define pcodeop DC_CVAC; define pcodeop DC_CVAP; define pcodeop DC_CVAU; define pcodeop DC_ISW; define pcodeop DC_IVAC; define pcodeop DC_IGVAC; define pcodeop DC_IGSW; define pcodeop DC_IGDVAC; define pcodeop DC_IGDSW; define pcodeop DC_CGSW; define pcodeop DC_CGDSW; define pcodeop DC_CIGSW; define pcodeop DC_CIGDSW; define pcodeop DC_GVA; define pcodeop DC_GZVA; define pcodeop DC_CGVAC; define pcodeop DC_CGDVAC; define pcodeop DC_CGVAP; define pcodeop DC_CGDVAP; define pcodeop DC_CGVADP; define pcodeop DC_CGDVADP; define pcodeop DC_CIGVAC; define pcodeop DC_CIGDVAC; define pcodeop DCPSInstruction; define pcodeop DC_ZVA; define pcodeop DRPSInstruction; define pcodeop ExceptionReturn; define pcodeop ExclusiveMonitorPass; define pcodeop ExclusiveMonitorsStatus; define pcodeop HaltBreakPoint; define pcodeop Hint_Prefetch; define pcodeop IC_IALLU; define pcodeop IC_IALLUIS; define pcodeop IC_IVAU; define pcodeop InstructionSynchronizationBarrier; define pcodeop LOAcquire; define pcodeop LORelease; define pcodeop pacda; define pcodeop pacdb; define pcodeop pacdza; define pcodeop pacdzb; define pcodeop pacga; define pcodeop pacia; define pcodeop paciza; define pcodeop pacib; define pcodeop pacizb; define pcodeop SendEvent; define pcodeop SendEventLocally; define pcodeop SoftwareBreakpoint; define pcodeop SpeculationBarrier; define pcodeop SysOp_R; define pcodeop SysOp_W; define pcodeop TLBI_ALLE1; define pcodeop TLBI_ALLE1IS; define pcodeop TLBI_ALLE2; define pcodeop TLBI_ALLE2IS; define pcodeop TLBI_ALLE3; define pcodeop TLBI_ALLE3IS; define pcodeop TLBI_ASIDE1; define pcodeop TLBI_ASIDE1IS; define pcodeop TLBI_IPAS2E1; define pcodeop TLBI_IPAS2E1IS; define pcodeop TLBI_IPAS2LE1; define pcodeop TLBI_IPAS2LE1IS; define pcodeop TLBI_VAAE1; define pcodeop TLBI_VAALE1; define pcodeop TLBI_VAAE1IS; define pcodeop TLBI_VAALE1IS; define pcodeop TLBI_VAE1; define pcodeop TLBI_VAE1IS; define pcodeop TLBI_VAE2; define pcodeop TLBI_VAE2IS; define pcodeop TLBI_VAE3; define pcodeop TLBI_VAE3IS; define pcodeop TLBI_VALE1; define pcodeop TLBI_VALE1IS; define pcodeop TLBI_VALE2; define pcodeop TLBI_VALE2IS; define pcodeop TLBI_VALE3; define pcodeop TLBI_VALE3IS; define pcodeop TLBI_VMALLE1; define pcodeop TLBI_VMALLE1IS; define pcodeop TLBI_VMALLS12E1; define pcodeop TLBI_VMALLS12E1IS; define pcodeop UndefinedInstructionException; define pcodeop UnkSytemRegRead; define pcodeop UnkSytemRegWrite; define pcodeop WaitForEvent; define pcodeop WaitForInterrupt; define pcodeop xpac; define pcodeop Yield; # BTI and MemTag pseudo ops define pcodeop CopyPtrTag_AddToPtrTag_Exclude; # a combination of the ARM spec's ChooseNonExcludedTag and AddressWithAllocationTag define pcodeop ValidCallTarget; define pcodeop ValidJumpTarget; # jumps are valid regardless of the register holding the target define pcodeop ValidJumpTargetWhenDestIsX16OrX17; # jumps are valid if the register holding the target is x16 or x17, e.g. "br x16" define pcodeop ValidJumpTargetIfPermittedBySCTLR; # depending on EL and SCTLR[35,36], jumps using arbitrary registers may or may not be valid. define pcodeop ControlFlowPredictionRestrictionByContext; define pcodeop CachePrefetchPredictionRestrictionByContext; define pcodeop DataValuePredictionRestrictionByContext; define pcodeop RandomizePtrTag_Exclude; define pcodeop SetPtrTag; # this could be implemented in pcode, but it would break the data flow of the original ptr value define pcodeop LoadMemTag; define pcodeop StoreMemTag; define pcodeop AlignmentFault; # BTI show/hide operations, which use pcodeops defined above # for BTI BTI_BTITARGETS: is b_0607=0 { } # Not a valid target for jumps or calls BTI_BTITARGETS: "c" is ShowBTI=1 & b_0607=1 { ValidCallTarget(); ValidJumpTargetWhenDestIsX16OrX17(); } # BR x16 is valid, BR x5 isn't BTI_BTITARGETS: "j" is ShowBTI=1 & b_0607=2 { ValidJumpTarget(); } BTI_BTITARGETS: "jc" is ShowBTI=1 & b_0607=3 { ValidJumpTarget(); ValidCallTarget(); } # hidden versions of the above; use to prevent ValidXXXXTarget calls from cluttering decompiled code in switch statements etc. BTI_BTITARGETS: "c" is ShowBTI=0 & b_0607=1 { } BTI_BTITARGETS: "j" is ShowBTI=0 & b_0607=2 { } BTI_BTITARGETS: "jc" is ShowBTI=0 & b_0607=3 { } # for BRK and HLT ALL_BTITARGETS: is ShowBTI=1 { ValidJumpTarget(); ValidCallTarget(); } ALL_BTITARGETS: is ShowBTI=0 { } # for PACIASP and PACIBSP PACIXSP_BTITARGETS: is ShowBTI=1 { ValidCallTarget(); ValidJumpTargetWhenDestIsX16OrX17(); # global jump target in the following cases: # EL == 0 and SCTLR[35] == 0 # EL != 0 and SCTLR[36] == 0 ValidJumpTargetIfPermittedBySCTLR(); # this doesn't seem important enough to clutter decompilations with a decision tree } PACIXSP_BTITARGETS: is ShowBTI=0 { } # These pseudo ops are used in neon define pcodeop SIMD_PIECE; define pcodeop NEON_aesd; define pcodeop NEON_aese; define pcodeop NEON_aesimc; define pcodeop NEON_aesmc; define pcodeop NEON_bfdot; define pcodeop NEON_bfmlalb; define pcodeop NEON_bfmlalt; define pcodeop NEON_bfmmla; define pcodeop NEON_cls; define pcodeop NEON_ext; define pcodeop NEON_facge; define pcodeop NEON_facgt; define pcodeop NEON_fcadd; define pcodeop NEON_fcmeq; define pcodeop NEON_fcmge; define pcodeop NEON_fcmgt; define pcodeop NEON_fcmla; define pcodeop NEON_fcmle; define pcodeop NEON_fcmlt; define pcodeop NEON_fcvtzs; define pcodeop NEON_fcvtzu; define pcodeop NEON_fmax; define pcodeop NEON_fmaxnm; define pcodeop NEON_fmaxnmp; define pcodeop NEON_fmaxnmv; define pcodeop NEON_fmaxp; define pcodeop NEON_fmaxv; define pcodeop NEON_fmin; define pcodeop NEON_fminnm; define pcodeop NEON_fminnmp; define pcodeop NEON_fminnmv; define pcodeop NEON_fminp; define pcodeop NEON_fminv; define pcodeop NEON_fmov; define pcodeop NEON_fmulx; define pcodeop NEON_fnmadd; define pcodeop NEON_fnmsub; define pcodeop NEON_frecpe; define pcodeop NEON_frecps; define pcodeop NEON_frecpx; define pcodeop NEON_frsqrte; define pcodeop NEON_frsqrts; define pcodeop NEON_fsqrt; define pcodeop NEON_neg; define pcodeop NEON_pmul; define pcodeop NEON_pmull; define pcodeop NEON_pmull2; define pcodeop NEON_raddhn; define pcodeop NEON_rbit; define pcodeop NEON_rev16; define pcodeop NEON_rev32; define pcodeop NEON_rev64; define pcodeop NEON_rshrn; define pcodeop NEON_rshrn2; define pcodeop NEON_rsubhn; define pcodeop NEON_rsubhn2; define pcodeop NEON_saba; define pcodeop NEON_sabd; define pcodeop NEON_saddlv; define pcodeop NEON_scvtf; define pcodeop NEON_sdot; define pcodeop NEON_sha1c; define pcodeop NEON_sha1m; define pcodeop NEON_sha1p; define pcodeop NEON_sha1su0; define pcodeop NEON_sha1su1; define pcodeop NEON_sha256h; define pcodeop NEON_sha256h2; define pcodeop NEON_sha256su0; define pcodeop NEON_sha256su1; define pcodeop NEON_sha512h; define pcodeop NEON_sha512h2; define pcodeop NEON_sha512su0; define pcodeop NEON_sha512su1; define pcodeop NEON_shadd; define pcodeop NEON_shl; define pcodeop NEON_shsub; define pcodeop NEON_sli; define pcodeop NEON_sm3partw1; define pcodeop NEON_sm3partw2; define pcodeop NEON_sm3ss1; define pcodeop NEON_sm3tt1a; define pcodeop NEON_sm3tt1b; define pcodeop NEON_sm3tt2a; define pcodeop NEON_sm3tt2b; define pcodeop NEON_sm4e; define pcodeop NEON_sm4ekey; define pcodeop NEON_smax; define pcodeop NEON_smaxp; define pcodeop NEON_smaxv; define pcodeop NEON_smin; define pcodeop NEON_sminp; define pcodeop NEON_sminv; define pcodeop NEON_smmla; define pcodeop NEON_sqadd; define pcodeop NEON_sqdmulh; define pcodeop NEON_sqdmull; define pcodeop NEON_sqrdml_as_h; define pcodeop NEON_sqrdmulh; define pcodeop NEON_sqrshl; define pcodeop NEON_sqrshrn; define pcodeop NEON_sqrshrn2; define pcodeop NEON_sqrshrun; define pcodeop NEON_sqrshrun2; define pcodeop NEON_sqshl; define pcodeop NEON_sqshlu; define pcodeop NEON_sqshrn; define pcodeop NEON_sqshrn2; define pcodeop NEON_sqshrun; define pcodeop NEON_sqshrun2; define pcodeop NEON_sqsub; define pcodeop NEON_sqxtn; define pcodeop NEON_sqxtn2; define pcodeop NEON_sqxtun; define pcodeop NEON_sqxtun2; define pcodeop NEON_srhadd; define pcodeop NEON_sri; define pcodeop NEON_srshl; define pcodeop NEON_srshr; define pcodeop NEON_sshl; define pcodeop NEON_sshr; define pcodeop NEON_sudot; define pcodeop NEON_uaba; define pcodeop NEON_uabd; define pcodeop NEON_uaddlv; define pcodeop NEON_ucvtf; define pcodeop NEON_udot; define pcodeop NEON_uhadd; define pcodeop NEON_uhsub; define pcodeop NEON_umax; define pcodeop NEON_umaxp; define pcodeop NEON_umaxv; define pcodeop NEON_umin; define pcodeop NEON_uminp; define pcodeop NEON_uminv; define pcodeop NEON_ummla; define pcodeop NEON_umull; define pcodeop NEON_uqadd; define pcodeop NEON_uqrshl; define pcodeop NEON_uqrshrn; define pcodeop NEON_uqrshrn2; define pcodeop NEON_uqshl; define pcodeop NEON_uqshrn; define pcodeop NEON_uqshrn2; define pcodeop NEON_uqsub; define pcodeop NEON_uqxtn; define pcodeop NEON_uqxtn2; define pcodeop NEON_urecpe; define pcodeop NEON_urhadd; define pcodeop NEON_urshl; define pcodeop NEON_urshr; define pcodeop NEON_ursqrte; define pcodeop NEON_usdot; define pcodeop NEON_ushl; define pcodeop NEON_usmmla; define pcodeop NEON_usqadd; # These pseudo ops are automatically generated define pcodeop SVE_abs; define pcodeop SVE_add; define pcodeop SVE_addpl; define pcodeop SVE_addvl; define pcodeop SVE_adr; define pcodeop SVE_and; define pcodeop SVE_ands; define pcodeop SVE_andv; define pcodeop SVE_asr; define pcodeop SVE_asrd; define pcodeop SVE_asrr; define pcodeop SVE_bic; define pcodeop SVE_bics; define pcodeop SVE_brka; define pcodeop SVE_brkas; define pcodeop SVE_brkb; define pcodeop SVE_brkbs; define pcodeop SVE_brkn; define pcodeop SVE_brkns; define pcodeop SVE_brkpa; define pcodeop SVE_brkpas; define pcodeop SVE_brkpb; define pcodeop SVE_brkpbs; define pcodeop SVE_clasta; define pcodeop SVE_clastb; define pcodeop SVE_cls; define pcodeop SVE_clz; define pcodeop SVE_cmpeq; define pcodeop SVE_cmpge; define pcodeop SVE_cmpgt; define pcodeop SVE_cmphi; define pcodeop SVE_cmphs; define pcodeop SVE_cmple; define pcodeop SVE_cmplo; define pcodeop SVE_cmpls; define pcodeop SVE_cmplt; define pcodeop SVE_cmpne; define pcodeop SVE_cnot; define pcodeop SVE_cnt; define pcodeop SVE_cntb; define pcodeop SVE_cntd; define pcodeop SVE_cnth; define pcodeop SVE_cntp; define pcodeop SVE_cntw; define pcodeop SVE_compact; define pcodeop SVE_cpy; define pcodeop SVE_ctermeq; define pcodeop SVE_ctermne; define pcodeop SVE_decb; define pcodeop SVE_decd; define pcodeop SVE_dech; define pcodeop SVE_decp; define pcodeop SVE_decw; define pcodeop SVE_dup; define pcodeop SVE_dupm; define pcodeop SVE_eon; define pcodeop SVE_eor; define pcodeop SVE_eors; define pcodeop SVE_eorv; define pcodeop SVE_ext; define pcodeop SVE_fabd; define pcodeop SVE_fabs; define pcodeop SVE_facge; define pcodeop SVE_facgt; define pcodeop SVE_fadd; define pcodeop SVE_fadda; define pcodeop SVE_faddv; define pcodeop SVE_fcadd; define pcodeop SVE_fcmeq; define pcodeop SVE_fcmge; define pcodeop SVE_fcmgt; define pcodeop SVE_fcmla; define pcodeop SVE_fcmle; define pcodeop SVE_fcmlt; define pcodeop SVE_fcmne; define pcodeop SVE_fcmuo; define pcodeop SVE_fcpy; define pcodeop SVE_fcvt; define pcodeop SVE_fcvtzs; define pcodeop SVE_fcvtzu; define pcodeop SVE_fdiv; define pcodeop SVE_fdivr; define pcodeop SVE_fdup; define pcodeop SVE_fexpa; define pcodeop SVE_fmad; define pcodeop SVE_fmax; define pcodeop SVE_fmaxnm; define pcodeop SVE_fmaxnmv; define pcodeop SVE_fmaxv; define pcodeop SVE_fmin; define pcodeop SVE_fminnm; define pcodeop SVE_fminnmv; define pcodeop SVE_fminv; define pcodeop SVE_fmla; define pcodeop SVE_fmls; define pcodeop SVE_fmov; define pcodeop SVE_fmsb; define pcodeop SVE_fmul; define pcodeop SVE_fmulx; define pcodeop SVE_fneg; define pcodeop SVE_fnmad; define pcodeop SVE_fnmla; define pcodeop SVE_fnmls; define pcodeop SVE_fnmsb; define pcodeop SVE_frecpe; define pcodeop SVE_frecps; define pcodeop SVE_frecpx; define pcodeop SVE_frinta; define pcodeop SVE_frinti; define pcodeop SVE_frintm; define pcodeop SVE_frintn; define pcodeop SVE_frintp; define pcodeop SVE_frintx; define pcodeop SVE_frintz; define pcodeop SVE_frsqrte; define pcodeop SVE_frsqrts; define pcodeop SVE_fscale; define pcodeop SVE_fsqrt; define pcodeop SVE_fsub; define pcodeop SVE_fsubr; define pcodeop SVE_ftmad; define pcodeop SVE_ftsmul; define pcodeop SVE_ftssel; define pcodeop SVE_incb; define pcodeop SVE_incd; define pcodeop SVE_inch; define pcodeop SVE_incp; define pcodeop SVE_incw; define pcodeop SVE_index; define pcodeop SVE_insr; define pcodeop SVE_lasta; define pcodeop SVE_lastb; define pcodeop SVE_ld1b; define pcodeop SVE_ld1d; define pcodeop SVE_ld1h; define pcodeop SVE_ld1rb; define pcodeop SVE_ld1rd; define pcodeop SVE_ld1rh; define pcodeop SVE_ld1rqb; define pcodeop SVE_ld1rqd; define pcodeop SVE_ld1rqh; define pcodeop SVE_ld1rqw; define pcodeop SVE_ld1rsb; define pcodeop SVE_ld1rsh; define pcodeop SVE_ld1rsw; define pcodeop SVE_ld1rw; define pcodeop SVE_ld1sb; define pcodeop SVE_ld1sh; define pcodeop SVE_ld1sw; define pcodeop SVE_ld1w; define pcodeop SVE_ld2b; define pcodeop SVE_ld2d; define pcodeop SVE_ld2h; define pcodeop SVE_ld2w; define pcodeop SVE_ld3b; define pcodeop SVE_ld3d; define pcodeop SVE_ld3h; define pcodeop SVE_ld3w; define pcodeop SVE_ld4b; define pcodeop SVE_ld4d; define pcodeop SVE_ld4h; define pcodeop SVE_ld4w; define pcodeop SVE_ldff1b; define pcodeop SVE_ldff1d; define pcodeop SVE_ldff1h; define pcodeop SVE_ldff1sb; define pcodeop SVE_ldff1sh; define pcodeop SVE_ldff1sw; define pcodeop SVE_ldff1w; define pcodeop SVE_ldnf1b; define pcodeop SVE_ldnf1d; define pcodeop SVE_ldnf1h; define pcodeop SVE_ldnf1sb; define pcodeop SVE_ldnf1sh; define pcodeop SVE_ldnf1sw; define pcodeop SVE_ldnf1w; define pcodeop SVE_ldnt1b; define pcodeop SVE_ldnt1d; define pcodeop SVE_ldnt1h; define pcodeop SVE_ldnt1w; define pcodeop SVE_ldr; define pcodeop SVE_lsl; define pcodeop SVE_lslr; define pcodeop SVE_lsr; define pcodeop SVE_lsrr; define pcodeop SVE_mad; define pcodeop SVE_mla; define pcodeop SVE_mls; define pcodeop SVE_movprfx; define pcodeop SVE_msb; define pcodeop SVE_mul; define pcodeop SVE_nand; define pcodeop SVE_nands; define pcodeop SVE_neg; define pcodeop SVE_nor; define pcodeop SVE_nors; define pcodeop SVE_not; define pcodeop SVE_orn; define pcodeop SVE_orns; define pcodeop SVE_orr; define pcodeop SVE_orrs; define pcodeop SVE_orv; define pcodeop SVE_pfalse; define pcodeop SVE_pfirst; define pcodeop SVE_pnext; define pcodeop SVE_prfb; define pcodeop SVE_prfd; define pcodeop SVE_prfh; define pcodeop SVE_prfw; define pcodeop SVE_ptest; define pcodeop SVE_ptrue; define pcodeop SVE_ptrues; define pcodeop SVE_punpkhi; define pcodeop SVE_punpklo; define pcodeop SVE_rbit; define pcodeop SVE_rdffr; define pcodeop SVE_rdffrs; define pcodeop SVE_rdvl; define pcodeop SVE_rev; define pcodeop SVE_revb; define pcodeop SVE_revh; define pcodeop SVE_revw; define pcodeop SVE_sabd; define pcodeop SVE_saddv; define pcodeop SVE_scvtf; define pcodeop SVE_sdiv; define pcodeop SVE_sdivr; define pcodeop SVE_sdot; define pcodeop SVE_sel; define pcodeop SVE_smax; define pcodeop SVE_smaxv; define pcodeop SVE_smin; define pcodeop SVE_sminv; define pcodeop SVE_smulh; define pcodeop SVE_splice; define pcodeop SVE_sqadd; define pcodeop SVE_sqdecb; define pcodeop SVE_sqdecd; define pcodeop SVE_sqdech; define pcodeop SVE_sqdecp; define pcodeop SVE_sqdecw; define pcodeop SVE_sqincb; define pcodeop SVE_sqincd; define pcodeop SVE_sqinch; define pcodeop SVE_sqincp; define pcodeop SVE_sqincw; define pcodeop SVE_sqsub; define pcodeop SVE_st1b; define pcodeop SVE_st1d; define pcodeop SVE_st1h; define pcodeop SVE_st1w; define pcodeop SVE_st2b; define pcodeop SVE_st2d; define pcodeop SVE_st2h; define pcodeop SVE_st2w; define pcodeop SVE_st3b; define pcodeop SVE_st3d; define pcodeop SVE_st3h; define pcodeop SVE_st3w; define pcodeop SVE_st4b; define pcodeop SVE_st4d; define pcodeop SVE_st4h; define pcodeop SVE_st4w; define pcodeop SVE_stnt1b; define pcodeop SVE_stnt1d; define pcodeop SVE_stnt1h; define pcodeop SVE_stnt1w; define pcodeop SVE_str; define pcodeop SVE_sub; define pcodeop SVE_subr; define pcodeop SVE_sunpkhi; define pcodeop SVE_sunpklo; define pcodeop SVE_sxtb; define pcodeop SVE_sxth; define pcodeop SVE_sxtw; define pcodeop SVE_tbl; define pcodeop SVE_trn1; define pcodeop SVE_trn2; define pcodeop SVE_uabd; define pcodeop SVE_uaddv; define pcodeop SVE_ucvtf; define pcodeop SVE_udiv; define pcodeop SVE_udivr; define pcodeop SVE_udot; define pcodeop SVE_umax; define pcodeop SVE_umaxv; define pcodeop SVE_umin; define pcodeop SVE_uminv; define pcodeop SVE_umulh; define pcodeop SVE_uqadd; define pcodeop SVE_uqdecb; define pcodeop SVE_uqdecd; define pcodeop SVE_uqdech; define pcodeop SVE_uqdecp; define pcodeop SVE_uqdecw; define pcodeop SVE_uqincb; define pcodeop SVE_uqincd; define pcodeop SVE_uqinch; define pcodeop SVE_uqincp; define pcodeop SVE_uqincw; define pcodeop SVE_uqsub; define pcodeop SVE_uunpkhi; define pcodeop SVE_uunpklo; define pcodeop SVE_uxtb; define pcodeop SVE_uxth; define pcodeop SVE_uxtw; define pcodeop SVE_uzp1; define pcodeop SVE_uzp2; define pcodeop SVE_whilele; define pcodeop SVE_whilelo; define pcodeop SVE_whilels; define pcodeop SVE_whilelt; define pcodeop SVE_wrffr; define pcodeop SVE_zip1; define pcodeop SVE_zip2; # SECTION macros # begin macros related to memory-tagging macro AllocationTagFromAddress(result, op) { # Summary: Sometimes the decompiler won't show this, but that's usually okay. # # A potential downside to actually implementing this, rather than using a pseudo-op, # is that the whole operation can get optimized out to zero by the decompiler when # tags are being ignored/non-populated by the user. (This zero-tagging is helped along by # SetPtrTag being a pseudo-op rather than a macro, which is done to preserve data-flow.) # The optimization makes it harder to tell that tag-related things are happening; # however, it's arguably convenient to omit a bunch of tag-related stuff when tags # are being ignored by the user. result = (op >> 56) & 0xf; # decompiler output: return unaff_x30 | 1 << ((ulonglong)register0x00000008 >> 0x38 & 0xf); # An alternate implementation is the following, which has the downside of adding at least one extra length conversion: # result = zext(op[56,4]); # decompiler output: return unaff_x30 | 1 << (ulonglong)((byte)((ulonglong)register0x00000008 >> 0x38) & 0xf); } macro Align(value, sze) { value = value & ~(sze - 1); } macro RequireGranuleAlignment(addr) { misalignment:8 = addr & ($(TAG_GRANULE) - 1); if (misalignment == 0) goto ; AlignmentFault(); } macro Or2BytesWithExcludedTags(tmp) { tmp = (tmp | gcr_el1.exclude) & 0xffff; } # end of memory-tagging macros macro addflags(op1, op2) { tmpCY = carry(op1, op2); tmpOV = scarry(op1, op2); } macro add_with_carry_flags(op1,op2){ local carry_in = zext(CY); local tempResult = op1 + op2; tmpCY = carry(op1,op2) || carry(tempResult, carry_in); tmpOV = scarry(op1,op2) ^^ scarry(tempResult, carry_in); } macro affectflags() { NG = tmpNG; ZR = tmpZR; CY = tmpCY; OV = tmpOV; } macro affectLflags() { NG = tmpNG; ZR = tmpZR; CY = 0; OV = 0; } # NOTE unlike x86, carry flag is SET if there is NO borrow macro subflags(op1, op2) { tmpCY = op1 >= op2; tmpOV = sborrow(op1, op2); } # Special case when the first op of the macro call is 0 macro subflags0(op2) { tmpCY = 0 == op2; tmpOV = sborrow(0, op2); } macro logicflags() { tmpCY = shift_carry; tmpOV = OV; } macro CVunaffected() { tmpCY = CY; tmpOV = OV; } macro resultflags(result) { tmpNG = result s< 0; tmpZR = result == 0; } macro fcomp(a, b) { NG = a f< b; ZR = a f== b; CY = a f>= b; OV = 0; } # this sets NG, ZR, CY, and OV to the values the processor # uses to indicate an unordered comparison. If at least # one of the inputs is NaN, it skips to the next instruction. # A use of this macro should be followed by a use of the # fcomp macro, which will set the flags according to an # ordered comparison. Basically, set the flags to the "unordered" # values, check for unordered, and if not set the flags again with # fcomp. macro ftestNAN(a, b) { NG = 0; ZR = 0; CY = 1; OV = 1; tst:1 = nan(a) || nan(b); if (tst) goto inst_next; } macro ROR32(out, val, rotate) { out = ( val >> rotate) | ( val << ( 32 - rotate ) ); } macro ROR64(out, val, rotate) { out = ( val >> rotate) | ( val << ( 64 - rotate ) ); } macro selectCC(result, val1, val2, condition) { result = (zext(condition) * val1) + (zext(!condition) * val2); } macro setCC_NZCV(condMask) { NG = (condMask & 0x8) == 0x8; ZR = (condMask & 0x4) == 0x4; CY = (condMask & 0x2) == 0x2; OV = (condMask & 0x1) == 0x1; } macro set_NZCV(value, condMask) { setNG:1 = (condMask & 0x8) == 0x8; NG = ((setNG==0) * NG) | ((setNG==1) * (((value >> 3) & 1) ==1)); setZR:1 = (condMask & 0x4) == 0x4; ZR = ((setZR==0) * NG) | ((setZR==1) * (((value >> 2) & 1) ==1)); setCY:1 = (condMask & 0x2) == 0x2; CY = ((setCY==0) * NG) | ((setCY==1) * (((value >> 1) & 1) == 1)); setOV:1 = (condMask & 0x1) == 0x1; OV = ((setOV==0) * NG) | ((setOV==1) * (((value >> 0) & 1) == 1)); } # Macro to access simd lanes # Macros to zero the high bits of the Z or Q registers # These are friendlier to the decompiler macro zext_zb(reg) { reg[8,56] = 0; reg[64,64] = 0; reg[128,64] = 0; reg[192,64] = 0; } macro zext_zh(reg) { reg[16,48] = 0; reg[64,64] = 0; reg[128,64] = 0; reg[192,64] = 0; } macro zext_zs(reg) { reg[32,32] = 0; reg[64,64] = 0; reg[128,64] = 0; reg[192,64] = 0; } macro zext_zd(reg) { reg[64,64] = 0; reg[128,64] = 0; reg[192,64] = 0; } macro zext_zq(reg) { reg[128,64] = 0; reg[192,64] = 0; } macro zext_rb(reg) { reg[8,56] = 0; } macro zext_rh(reg) { reg[16,48] = 0; } macro zext_rs(reg) { reg[32,32] = 0; } # SECTION instructions :^instruction is ImmS_ImmR_TestSet=0 & ImmR & ImmS & instruction [ ImmS_LT_ImmR = (((ImmS - ImmR) >> 6) $and 1); ImmS_EQ_ImmR = ~((((ImmS - ImmR) >> 6) $and 1) | (((ImmR - ImmS) >> 6) $and 1)); # For ubfm, lsl is the preferred alias when imms + 1 == immr, so we must subtract an extra one # to determine when ubfiz is the preferred alias. ImmS_LT_ImmR_minus_1 = (((ImmS - (ImmR - 1)) >> 6) & 0x1) & (((ImmS - (ImmR - 1)) >> 7) & 0x1); ImmS_ne_1f = (((ImmS - 0x1f) >> 6) & 0x1) | (((0x1f - ImmS) >> 6) & 0x1); ImmS_ne_3f = (((ImmS - 0x3f) >> 6) & 0x1) | (((0x3f - ImmS) >> 6) & 0x1); ImmS_ImmR_TestSet=1; ]{} with : ImmS_ImmR_TestSet=1 { @include "AARCH64_base_PACoptions.sinc" @include "AARCH64base.sinc" @include "AARCH64neon.sinc" @include "AARCH64ldst.sinc" @include "AARCH64sve.sinc" # TODO These are placeholders until the correction instruction implementations can be found :NotYetImplemented_UNK1 is b_0031=0xe7ffdeff unimpl :NotYetImplemented_UNK2 is b_0031=0x00200820 unimpl :NotYetImplemented_UNK3 is b_0031=0x00200c20 unimpl } # end with ImmS_ImmR_TestSet=1 ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64ldst.sinc ================================================ # C7.2.162 LD1 (multiple structures) page C7-1359 line 78995 KEEPWITH # INFO This file automatically generated by andre on Fri Jun 8 10:47:29 2018 # INFO Direct edits to this file may be lost in future updates # INFO Command line arguments: ['../../../ProcessorTest/test/andre/scrape/a64ldst.py'] ldst_imm: tmp is b_24=1 & b_21=0 & b_15=0 & b_14=0 & b_13=0 [ tmp = 1; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=0 & b_10=0 [ tmp = 1; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=0 & b_14=0 & b_13=0 [ tmp = 2; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=0 & b_14=1 & b_13=0 & b_10=0 [ tmp = 2; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=0 & b_10=1 [ tmp = 2; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=0 & b_10=0 [ tmp = 2; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=0 & b_14=0 & b_13=1 [ tmp = 3; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=0 & b_10=0 [ tmp = 3; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=0 & b_14=0 & b_13=1 [ tmp = 4; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=0 & b_14=1 & b_13=0 & b_10=0 [ tmp = 4; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=0 & b_13=0 & b_11=0 & b_10=0 [ tmp = 4; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=1 & b_10=0 [ tmp = 4; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=0 & b_10=1 [ tmp = 4; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=0 & b_10=0 [ tmp = 4; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=0 & b_14=1 & b_13=1 & b_10=0 [ tmp = 6; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=0 & b_10=1 [ tmp = 6; ] { } ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=1 & b_13=1 & b_12=1 [ tmp = 8; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=0 & b_14=1 & b_13=1 & b_10=0 [ tmp = 8; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=0 & b_13=0 & b_11=0 & b_10=0 [ tmp = 8; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=0 & b_13=0 & b_12=0 & b_11=0 & b_10=1 [ tmp = 8; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=1 & b_10=1 [ tmp = 8; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=1 & b_10=0 [ tmp = 8; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=0 & b_10=1 [ tmp = 8; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=0 & b_13=1 & b_11=0 & b_10=0 [ tmp = 12; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=1 & b_10=0 [ tmp = 12; ] { } ldst_imm: tmp is b_30=1 & b_24=0 & b_21=0 & b_15=0 & b_14=1 & b_13=1 & b_12=1 [ tmp = 16; ] { } ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=1 & b_14=0 & b_12=0 & b_11=0 [ tmp = 16; ] { } ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=1 & b_14=0 & b_12=0 & b_11=1 & b_10=0 [ tmp = 16; ] { } ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=1 & b_14=0 & b_13=1 & b_12=0 & b_11=1 & b_10=1 [ tmp = 16; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=0 & b_13=0 & b_12=0 & b_11=0 & b_10=1 [ tmp = 16; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=0 & b_13=1 & b_11=0 & b_10=0 [ tmp = 16; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=0 & b_12=0 & b_11=1 & b_10=1 [ tmp = 16; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=1 & b_10=0 [ tmp = 16; ] { } ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=1 & b_12=0 & b_11=0 [ tmp = 24; ] { } ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=1 & b_12=0 & b_11=1 & b_10=0 [ tmp = 24; ] { } ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=1 & b_13=1 & b_12=0 & b_11=1 & b_10=1 [ tmp = 24; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=0 & b_13=1 & b_12=0 & b_11=0 & b_10=1 [ tmp = 24; ] { } ldst_imm: tmp is b_24=1 & b_21=0 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=1 & b_10=1 [ tmp = 24; ] { } ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=0 & b_12=0 & b_11=0 [ tmp = 32; ] { } ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=0 & b_12=0 & b_11=1 & b_10=0 [ tmp = 32; ] { } ldst_imm: tmp is b_30=0 & b_24=0 & b_21=0 & b_15=0 & b_14=0 & b_13=1 & b_12=0 & b_11=1 & b_10=1 [ tmp = 32; ] { } ldst_imm: tmp is b_30=1 & b_24=0 & b_21=0 & b_15=1 & b_14=0 & b_12=0 [ tmp = 32; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=0 & b_13=1 & b_12=0 & b_11=0 & b_10=1 [ tmp = 32; ] { } ldst_imm: tmp is b_24=1 & b_21=1 & b_15=1 & b_14=1 & b_13=1 & b_12=0 & b_11=1 & b_10=1 [ tmp = 32; ] { } ldst_imm: tmp is b_30=1 & b_24=0 & b_21=0 & b_15=0 & b_14=1 & b_12=0 [ tmp = 48; ] { } ldst_imm: tmp is b_30=1 & b_24=0 & b_21=0 & b_15=0 & b_14=0 & b_12=0 [ tmp = 64; ] { } ldst_wback: "" is b_23=0 & b_1620=0b00000 { } ldst_wback: ", #"^ldst_imm is b_23=1 & b_1620=0b11111 & Rn_GPR64xsp & ldst_imm { Rn_GPR64xsp = tmp_ldXn; } ldst_wback: ", "^Rm_GPR64 is b_23=1 & Rn_GPR64xsp & Rm_GPR64 { Rn_GPR64xsp = Rn_GPR64xsp + Rm_GPR64; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c402000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.8B, Vt2.8B, Vt3.8B, Vt4.8B}, [Xn|SP] [, wback] :ld1 {vVt^".8B", vVtt^".8B", vVttt^".8B", vVtttt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c402400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.4H, Vt2.4H, Vt3.4H, Vt4.4H}, [Xn|SP] [, wback] :ld1 {vVt^".4H", vVtt^".4H", vVttt^".4H", vVtttt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c402800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.2S, Vt2.2S, Vt3.2S, Vt4.2S}, [Xn|SP] [, wback] :ld1 {vVt^".2S", vVtt^".2S", vVttt^".2S", vVtttt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c402c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.1D, Vt2.1D, Vt3.1D, Vt4.1D}, [Xn|SP] [, wback] :ld1 {vVt^".1D", vVtt^".1D", vVttt^".1D", vVtttt^".1D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtttt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c402000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.16B, Vt2.16B, Vt3.16B, Vt4.16B}, [Xn|SP] [, wback] :ld1 {vVt^".16B", vVtt^".16B", vVttt^".16B", vVtttt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c402400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.8H, Vt2.8H, Vt3.8H, Vt4.8H}, [Xn|SP] [, wback] :ld1 {vVt^".8H", vVtt^".8H", vVttt^".8H", vVtttt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c402800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.4S, Vt2.4S, Vt3.4S, Vt4.4S}, [Xn|SP] [, wback] :ld1 {vVt^".4S", vVtt^".4S", vVttt^".4S", vVtttt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c402c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.2D, Vt2.2D, Vt3.2D, Vt4.2D}, [Xn|SP] [, wback] :ld1 {vVt^".2D", vVtt^".2D", vVttt^".2D", vVtttt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0010 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c406000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.8B, Vt2.8B, Vt3.8B}, [Xn|SP] [, wback] :ld1 {vVt^".8B", vVtt^".8B", vVttt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c406400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.4H, Vt2.4H, Vt3.4H}, [Xn|SP] [, wback] :ld1 {vVt^".4H", vVtt^".4H", vVttt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c406800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.2S, Vt2.2S, Vt3.2S}, [Xn|SP] [, wback] :ld1 {vVt^".2S", vVtt^".2S", vVttt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c406c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.1D, Vt2.1D, Vt3.1D}, [Xn|SP] [, wback] :ld1 {vVt^".1D", vVtt^".1D", vVttt^".1D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c406000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.16B, Vt2.16B, Vt3.16B}, [Xn|SP] [, wback] :ld1 {vVt^".16B", vVtt^".16B", vVttt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c406400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.8H, Vt2.8H, Vt3.8H}, [Xn|SP] [, wback] :ld1 {vVt^".8H", vVtt^".8H", vVttt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c406800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.4S, Vt2.4S, Vt3.4S}, [Xn|SP] [, wback] :ld1 {vVt^".4S", vVtt^".4S", vVttt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c406c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.2D, Vt2.2D, Vt3.2D}, [Xn|SP] [, wback] :ld1 {vVt^".2D", vVtt^".2D", vVttt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0110 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c407000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.8B}, [Xn|SP] [, wback] :ld1 {vVt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c407400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.4H}, [Xn|SP] [, wback] :ld1 {vVt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c407800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.2S}, [Xn|SP] [, wback] :ld1 {vVt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c407c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.1D}, [Xn|SP] [, wback] :ld1 {vVt^".1D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c407000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.16B}, [Xn|SP] [, wback] :ld1 {vVt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c407400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.8H}, [Xn|SP] [, wback] :ld1 {vVt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c407800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.4S}, [Xn|SP] [, wback] :ld1 {vVt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c407c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.2D}, [Xn|SP] [, wback] :ld1 {vVt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0111 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c40a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.8B, Vt2.8B}, [Xn|SP] [, wback] :ld1 {vVt^".8B", vVtt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c40a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.4H, Vt2.4H}, [Xn|SP] [, wback] :ld1 {vVt^".4H", vVtt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c40a800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.2S, Vt2.2S}, [Xn|SP] [, wback] :ld1 {vVt^".2S", vVtt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x0c40ac00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.1D, Vt2.1D}, [Xn|SP] [, wback] :ld1 {vVt^".1D", vVtt^".1D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR64[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c40a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.16B, Vt2.16B}, [Xn|SP] [, wback] :ld1 {vVt^".16B", vVtt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c40a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.8H, Vt2.8H}, [Xn|SP] [, wback] :ld1 {vVt^".8H", vVtt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c40a800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.4S, Vt2.4S}, [Xn|SP] [, wback] :ld1 {vVt^".4S", vVtt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0c402000/mask=xbfff2000 # C7.2.177 LD1 (multiple structures) page C7-2415 line 141110 MATCH x0cc02000/mask=xbfe02000 # CONSTRUCT x4c40ac00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.2D, Vt2.2D}, [Xn|SP] [, wback] :ld1 {vVt^".2D", vVtt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1010 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d400000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[0], [Xn|SP] [, wback] :ld1 {vVt^".B"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d400400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[1], [Xn|SP] [, wback] :ld1 {vVt^".B"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d400800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[2], [Xn|SP] [, wback] :ld1 {vVt^".B"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d400c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[3], [Xn|SP] [, wback] :ld1 {vVt^".B"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d401000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[4], [Xn|SP] [, wback] :ld1 {vVt^".B"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d401400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[5], [Xn|SP] [, wback] :ld1 {vVt^".B"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d401800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[6], [Xn|SP] [, wback] :ld1 {vVt^".B"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d401c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[7], [Xn|SP] [, wback] :ld1 {vVt^".B"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d400000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[8], [Xn|SP] [, wback] :ld1 {vVt^".B"}[8], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d400400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[9], [Xn|SP] [, wback] :ld1 {vVt^".B"}[9], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d400800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[10], [Xn|SP] [, wback] :ld1 {vVt^".B"}[10], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d400c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[11], [Xn|SP] [, wback] :ld1 {vVt^".B"}[11], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d401000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[12], [Xn|SP] [, wback] :ld1 {vVt^".B"}[12], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d401400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[13], [Xn|SP] [, wback] :ld1 {vVt^".B"}[13], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d401800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[14], [Xn|SP] [, wback] :ld1 {vVt^".B"}[14], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d401c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.B}[15], [Xn|SP] [, wback] :ld1 {vVt^".B"}[15], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d404000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.H}[0], [Xn|SP] [, wback] :ld1 {vVt^".H"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d404800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.H}[1], [Xn|SP] [, wback] :ld1 {vVt^".H"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d405000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.H}[2], [Xn|SP] [, wback] :ld1 {vVt^".H"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d405800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.H}[3], [Xn|SP] [, wback] :ld1 {vVt^".H"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d404000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.H}[4], [Xn|SP] [, wback] :ld1 {vVt^".H"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d404800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.H}[5], [Xn|SP] [, wback] :ld1 {vVt^".H"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d405000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.H}[6], [Xn|SP] [, wback] :ld1 {vVt^".H"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d405800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.H}[7], [Xn|SP] [, wback] :ld1 {vVt^".H"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d408000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.S}[0], [Xn|SP] [, wback] :ld1 {vVt^".S"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d409000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.S}[1], [Xn|SP] [, wback] :ld1 {vVt^".S"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d408000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.S}[2], [Xn|SP] [, wback] :ld1 {vVt^".S"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d409000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.S}[3], [Xn|SP] [, wback] :ld1 {vVt^".S"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d408400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.D}[0], [Xn|SP] [, wback] :ld1 {vVt^".D"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d408400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld1 {Vt.D}[1], [Xn|SP] [, wback] :ld1 {vVt^".D"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0d40c000/mask=xbffff000 # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0dc0c000/mask=xbfe0f000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d40c000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld1r {Vt.8B}, [Xn|SP] [, wback] :ld1r {vVt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; tmpv = *:1 tmp_ldXn; Rt_VPR64[0,8] = tmpv; Rt_VPR64[8,8] = tmpv; Rt_VPR64[16,8] = tmpv; Rt_VPR64[24,8] = tmpv; Rt_VPR64[32,8] = tmpv; Rt_VPR64[40,8] = tmpv; Rt_VPR64[48,8] = tmpv; Rt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0d40c000/mask=xbffff000 # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0dc0c000/mask=xbfe0f000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d40c400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld1r {Vt.4H}, [Xn|SP] [, wback] :ld1r {vVt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; tmpv = *:2 tmp_ldXn; Rt_VPR64[0,16] = tmpv; Rt_VPR64[16,16] = tmpv; Rt_VPR64[32,16] = tmpv; Rt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0d40c000/mask=xbffff000 # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0dc0c000/mask=xbfe0f000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d40c800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld1r {Vt.2S}, [Xn|SP] [, wback] :ld1r {vVt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; tmpv = *:4 tmp_ldXn; Rt_VPR64[0,32] = tmpv; Rt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0d40c000/mask=xbffff000 # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0dc0c000/mask=xbfe0f000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x0d40cc00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld1r {Vt.1D}, [Xn|SP] [, wback] :ld1r {vVt^".1D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; tmpv = *:8 tmp_ldXn; Rt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0d40c000/mask=xbffff000 # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0dc0c000/mask=xbfe0f000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d40c000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld1r {Vt.16B}, [Xn|SP] [, wback] :ld1r {vVt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; tmpv = *:1 tmp_ldXn; Rt_VPR128[0,8] = tmpv; Rt_VPR128[8,8] = tmpv; Rt_VPR128[16,8] = tmpv; Rt_VPR128[24,8] = tmpv; Rt_VPR128[32,8] = tmpv; Rt_VPR128[40,8] = tmpv; Rt_VPR128[48,8] = tmpv; Rt_VPR128[56,8] = tmpv; Rt_VPR128[64,8] = tmpv; Rt_VPR128[72,8] = tmpv; Rt_VPR128[80,8] = tmpv; Rt_VPR128[88,8] = tmpv; Rt_VPR128[96,8] = tmpv; Rt_VPR128[104,8] = tmpv; Rt_VPR128[112,8] = tmpv; Rt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0d40c000/mask=xbffff000 # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0dc0c000/mask=xbfe0f000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d40c400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld1r {Vt.8H}, [Xn|SP] [, wback] :ld1r {vVt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; tmpv = *:2 tmp_ldXn; Rt_VPR128[0,16] = tmpv; Rt_VPR128[16,16] = tmpv; Rt_VPR128[32,16] = tmpv; Rt_VPR128[48,16] = tmpv; Rt_VPR128[64,16] = tmpv; Rt_VPR128[80,16] = tmpv; Rt_VPR128[96,16] = tmpv; Rt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0d40c000/mask=xbffff000 # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0dc0c000/mask=xbfe0f000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d40c800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld1r {Vt.4S}, [Xn|SP] [, wback] :ld1r {vVt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; tmpv = *:4 tmp_ldXn; Rt_VPR128[0,32] = tmpv; Rt_VPR128[32,32] = tmpv; Rt_VPR128[64,32] = tmpv; Rt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0d40c000/mask=xbffff000 # C7.2.179 LD1R page C7-2423 line 141626 MATCH x0dc0c000/mask=xbfe0f000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0d400000/mask=xbfff2000 # C7.2.178 LD1 (single structure) page C7-2419 line 141371 MATCH x0dc00000/mask=xbfe02000 # CONSTRUCT x4d40cc00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld1r {Vt.2D}, [Xn|SP] [, wback] :ld1r {vVt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b110 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; tmpv = *:8 tmp_ldXn; Rt_VPR128[0,64] = tmpv; Rt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0c408000/mask=xbffff000 # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0cc08000/mask=xbfe0f000 # CONSTRUCT x0c408000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.8B, Vt2.8B}, [Xn|SP] [, wback] :ld2 {vVt^".8B", vVtt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0c408000/mask=xbffff000 # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0cc08000/mask=xbfe0f000 # CONSTRUCT x0c408400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.4H, Vt2.4H}, [Xn|SP] [, wback] :ld2 {vVt^".4H", vVtt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0c408000/mask=xbffff000 # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0cc08000/mask=xbfe0f000 # CONSTRUCT x0c408800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.2S, Vt2.2S}, [Xn|SP] [, wback] :ld2 {vVt^".2S", vVtt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0c408000/mask=xbffff000 # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0cc08000/mask=xbfe0f000 # CONSTRUCT x4c408000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.16B, Vt2.16B}, [Xn|SP] [, wback] :ld2 {vVt^".16B", vVtt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0c408000/mask=xbffff000 # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0cc08000/mask=xbfe0f000 # CONSTRUCT x4c408400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.8H, Vt2.8H}, [Xn|SP] [, wback] :ld2 {vVt^".8H", vVtt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0c408000/mask=xbffff000 # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0cc08000/mask=xbfe0f000 # CONSTRUCT x4c408800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.4S, Vt2.4S}, [Xn|SP] [, wback] :ld2 {vVt^".4S", vVtt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0c408000/mask=xbffff000 # C7.2.180 LD2 (multiple structures) page C7-2426 line 141824 MATCH x0cc08000/mask=xbfe0f000 # CONSTRUCT x4c408c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.2D, Vt2.2D}, [Xn|SP] [, wback] :ld2 {vVt^".2D", vVtt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b1000 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d600000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[0], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d600400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[1], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d600800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[2], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d600c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[3], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d601000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[4], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d601400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[5], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d601800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[6], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d601c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[7], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d600000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[8], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[8], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d600400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[9], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[9], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d600800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[10], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[10], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d600c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[11], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[11], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d601000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[12], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[12], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d601400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[13], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[13], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d601800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[14], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[14], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d601c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.B, Vt2.B}[15], [Xn|SP] [, wback] :ld2 {vVt^".B", vVtt^".B"}[15], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d604000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.H, Vt2.H}[0], [Xn|SP] [, wback] :ld2 {vVt^".H", vVtt^".H"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d604800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.H, Vt2.H}[1], [Xn|SP] [, wback] :ld2 {vVt^".H", vVtt^".H"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d605000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.H, Vt2.H}[2], [Xn|SP] [, wback] :ld2 {vVt^".H", vVtt^".H"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d605800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.H, Vt2.H}[3], [Xn|SP] [, wback] :ld2 {vVt^".H", vVtt^".H"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d604000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.H, Vt2.H}[4], [Xn|SP] [, wback] :ld2 {vVt^".H", vVtt^".H"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d604800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.H, Vt2.H}[5], [Xn|SP] [, wback] :ld2 {vVt^".H", vVtt^".H"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d605000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.H, Vt2.H}[6], [Xn|SP] [, wback] :ld2 {vVt^".H", vVtt^".H"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d605800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.H, Vt2.H}[7], [Xn|SP] [, wback] :ld2 {vVt^".H", vVtt^".H"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d608000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.S, Vt2.S}[0], [Xn|SP] [, wback] :ld2 {vVt^".S", vVtt^".S"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d609000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.S, Vt2.S}[1], [Xn|SP] [, wback] :ld2 {vVt^".S", vVtt^".S"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d608000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.S, Vt2.S}[2], [Xn|SP] [, wback] :ld2 {vVt^".S", vVtt^".S"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d609000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.S, Vt2.S}[3], [Xn|SP] [, wback] :ld2 {vVt^".S", vVtt^".S"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d608400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.D, Vt2.D}[0], [Xn|SP] [, wback] :ld2 {vVt^".D", vVtt^".D"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d608400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld2 {Vt.D, Vt2.D}[1], [Xn|SP] [, wback] :ld2 {vVt^".D", vVtt^".D"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0d60c000/mask=xbffff000 # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0de0c000/mask=xbfe0f000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d60c000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld2r {Vt.8B, Vt2.8B}, [Xn|SP] [, wback] :ld2r {vVt^".8B", vVtt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; tmpv = *:1 tmp_ldXn; Rt_VPR64[0,8] = tmpv; Rt_VPR64[8,8] = tmpv; Rt_VPR64[16,8] = tmpv; Rt_VPR64[24,8] = tmpv; Rt_VPR64[32,8] = tmpv; Rt_VPR64[40,8] = tmpv; Rt_VPR64[48,8] = tmpv; Rt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; Rtt_VPR64[0,8] = tmpv; Rtt_VPR64[8,8] = tmpv; Rtt_VPR64[16,8] = tmpv; Rtt_VPR64[24,8] = tmpv; Rtt_VPR64[32,8] = tmpv; Rtt_VPR64[40,8] = tmpv; Rtt_VPR64[48,8] = tmpv; Rtt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0d60c000/mask=xbffff000 # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0de0c000/mask=xbfe0f000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d60c400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld2r {Vt.4H, Vt2.4H}, [Xn|SP] [, wback] :ld2r {vVt^".4H", vVtt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; tmpv = *:2 tmp_ldXn; Rt_VPR64[0,16] = tmpv; Rt_VPR64[16,16] = tmpv; Rt_VPR64[32,16] = tmpv; Rt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; Rtt_VPR64[0,16] = tmpv; Rtt_VPR64[16,16] = tmpv; Rtt_VPR64[32,16] = tmpv; Rtt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0d60c000/mask=xbffff000 # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0de0c000/mask=xbfe0f000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d60c800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld2r {Vt.2S, Vt2.2S}, [Xn|SP] [, wback] :ld2r {vVt^".2S", vVtt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; tmpv = *:4 tmp_ldXn; Rt_VPR64[0,32] = tmpv; Rt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; Rtt_VPR64[0,32] = tmpv; Rtt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0d60c000/mask=xbffff000 # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0de0c000/mask=xbfe0f000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x0d60cc00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld2r {Vt.1D, Vt2.1D}, [Xn|SP] [, wback] :ld2r {vVt^".1D", vVtt^".1D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; tmpv = *:8 tmp_ldXn; Rt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; Rtt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0d60c000/mask=xbffff000 # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0de0c000/mask=xbfe0f000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d60c000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld2r {Vt.16B, Vt2.16B}, [Xn|SP] [, wback] :ld2r {vVt^".16B", vVtt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; tmpv = *:1 tmp_ldXn; Rt_VPR128[0,8] = tmpv; Rt_VPR128[8,8] = tmpv; Rt_VPR128[16,8] = tmpv; Rt_VPR128[24,8] = tmpv; Rt_VPR128[32,8] = tmpv; Rt_VPR128[40,8] = tmpv; Rt_VPR128[48,8] = tmpv; Rt_VPR128[56,8] = tmpv; Rt_VPR128[64,8] = tmpv; Rt_VPR128[72,8] = tmpv; Rt_VPR128[80,8] = tmpv; Rt_VPR128[88,8] = tmpv; Rt_VPR128[96,8] = tmpv; Rt_VPR128[104,8] = tmpv; Rt_VPR128[112,8] = tmpv; Rt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; Rtt_VPR128[0,8] = tmpv; Rtt_VPR128[8,8] = tmpv; Rtt_VPR128[16,8] = tmpv; Rtt_VPR128[24,8] = tmpv; Rtt_VPR128[32,8] = tmpv; Rtt_VPR128[40,8] = tmpv; Rtt_VPR128[48,8] = tmpv; Rtt_VPR128[56,8] = tmpv; Rtt_VPR128[64,8] = tmpv; Rtt_VPR128[72,8] = tmpv; Rtt_VPR128[80,8] = tmpv; Rtt_VPR128[88,8] = tmpv; Rtt_VPR128[96,8] = tmpv; Rtt_VPR128[104,8] = tmpv; Rtt_VPR128[112,8] = tmpv; Rtt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0d60c000/mask=xbffff000 # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0de0c000/mask=xbfe0f000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d60c400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld2r {Vt.8H, Vt2.8H}, [Xn|SP] [, wback] :ld2r {vVt^".8H", vVtt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; tmpv = *:2 tmp_ldXn; Rt_VPR128[0,16] = tmpv; Rt_VPR128[16,16] = tmpv; Rt_VPR128[32,16] = tmpv; Rt_VPR128[48,16] = tmpv; Rt_VPR128[64,16] = tmpv; Rt_VPR128[80,16] = tmpv; Rt_VPR128[96,16] = tmpv; Rt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; Rtt_VPR128[0,16] = tmpv; Rtt_VPR128[16,16] = tmpv; Rtt_VPR128[32,16] = tmpv; Rtt_VPR128[48,16] = tmpv; Rtt_VPR128[64,16] = tmpv; Rtt_VPR128[80,16] = tmpv; Rtt_VPR128[96,16] = tmpv; Rtt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0d60c000/mask=xbffff000 # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0de0c000/mask=xbfe0f000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d60c800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld2r {Vt.4S, Vt2.4S}, [Xn|SP] [, wback] :ld2r {vVt^".4S", vVtt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; tmpv = *:4 tmp_ldXn; Rt_VPR128[0,32] = tmpv; Rt_VPR128[32,32] = tmpv; Rt_VPR128[64,32] = tmpv; Rt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; Rtt_VPR128[0,32] = tmpv; Rtt_VPR128[32,32] = tmpv; Rtt_VPR128[64,32] = tmpv; Rtt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0d60c000/mask=xbffff000 # C7.2.182 LD2R page C7-2433 line 142264 MATCH x0de0c000/mask=xbfe0f000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0d600000/mask=xbfff2000 # C7.2.181 LD2 (single structure) page C7-2429 line 142006 MATCH x0de00000/mask=xbfe02000 # CONSTRUCT x4d60cc00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld2r {Vt.2D, Vt2.2D}, [Xn|SP] [, wback] :ld2r {vVt^".2D", vVtt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b110 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; tmpv = *:8 tmp_ldXn; Rt_VPR128[0,64] = tmpv; Rt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; Rtt_VPR128[0,64] = tmpv; Rtt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0c404000/mask=xbffff000 # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0cc04000/mask=xbfe0f000 # CONSTRUCT x0c404000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.8B, Vt2.8B, Vt3.8B}, [Xn|SP] [, wback] :ld3 {vVt^".8B", vVtt^".8B", vVttt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0c404000/mask=xbffff000 # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0cc04000/mask=xbfe0f000 # CONSTRUCT x0c404400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.4H, Vt2.4H, Vt3.4H}, [Xn|SP] [, wback] :ld3 {vVt^".4H", vVtt^".4H", vVttt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0c404000/mask=xbffff000 # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0cc04000/mask=xbfe0f000 # CONSTRUCT x0c404800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.2S, Vt2.2S, Vt3.2S}, [Xn|SP] [, wback] :ld3 {vVt^".2S", vVtt^".2S", vVttt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0c404000/mask=xbffff000 # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0cc04000/mask=xbfe0f000 # CONSTRUCT x4c404000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.16B, Vt2.16B, Vt3.16B}, [Xn|SP] [, wback] :ld3 {vVt^".16B", vVtt^".16B", vVttt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0c404000/mask=xbffff000 # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0cc04000/mask=xbfe0f000 # CONSTRUCT x4c404400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.8H, Vt2.8H, Vt3.8H}, [Xn|SP] [, wback] :ld3 {vVt^".8H", vVtt^".8H", vVttt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0c404000/mask=xbffff000 # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0cc04000/mask=xbfe0f000 # CONSTRUCT x4c404800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.4S, Vt2.4S, Vt3.4S}, [Xn|SP] [, wback] :ld3 {vVt^".4S", vVtt^".4S", vVttt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0c404000/mask=xbffff000 # C7.2.183 LD3 (multiple structures) page C7-2436 line 142465 MATCH x0cc04000/mask=xbfe0f000 # CONSTRUCT x4c404c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.2D, Vt2.2D, Vt3.2D}, [Xn|SP] [, wback] :ld3 {vVt^".2D", vVtt^".2D", vVttt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0100 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d402000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[0], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d402400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[1], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d402800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[2], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d402c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[3], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d403000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[4], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d403400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[5], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d403800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[6], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d403c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[7], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d402000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[8], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[8], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d402400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[9], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[9], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d402800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[10], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[10], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d402c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[11], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[11], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d403000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[12], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[12], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d403400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[13], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[13], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d403800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[14], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[14], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d403c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.B, Vt2.B, Vt3.B}[15], [Xn|SP] [, wback] :ld3 {vVt^".B", vVtt^".B", vVttt^".B"}[15], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d406000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.H, Vt2.H, Vt3.H}[0], [Xn|SP] [, wback] :ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d406800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.H, Vt2.H, Vt3.H}[1], [Xn|SP] [, wback] :ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d407000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.H, Vt2.H, Vt3.H}[2], [Xn|SP] [, wback] :ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d407800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.H, Vt2.H, Vt3.H}[3], [Xn|SP] [, wback] :ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d406000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.H, Vt2.H, Vt3.H}[4], [Xn|SP] [, wback] :ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d406800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.H, Vt2.H, Vt3.H}[5], [Xn|SP] [, wback] :ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d407000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.H, Vt2.H, Vt3.H}[6], [Xn|SP] [, wback] :ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d407800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.H, Vt2.H, Vt3.H}[7], [Xn|SP] [, wback] :ld3 {vVt^".H", vVtt^".H", vVttt^".H"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d40a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.S, Vt2.S, Vt3.S}[0], [Xn|SP] [, wback] :ld3 {vVt^".S", vVtt^".S", vVttt^".S"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d40b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.S, Vt2.S, Vt3.S}[1], [Xn|SP] [, wback] :ld3 {vVt^".S", vVtt^".S", vVttt^".S"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d40a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.S, Vt2.S, Vt3.S}[2], [Xn|SP] [, wback] :ld3 {vVt^".S", vVtt^".S", vVttt^".S"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d40b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.S, Vt2.S, Vt3.S}[3], [Xn|SP] [, wback] :ld3 {vVt^".S", vVtt^".S", vVttt^".S"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d40a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.D, Vt2.D, Vt3.D}[0], [Xn|SP] [, wback] :ld3 {vVt^".D", vVtt^".D", vVttt^".D"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d40a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld3 {Vt.D, Vt2.D, Vt3.D}[1], [Xn|SP] [, wback] :ld3 {vVt^".D", vVtt^".D", vVttt^".D"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0d40e000/mask=xbffff000 # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0dc0e000/mask=xbfe0f000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d40e000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld3r {Vt.8B, Vt2.8B, Vt3.8B}, [Xn|SP] [, wback] :ld3r {vVt^".8B", vVtt^".8B", vVttt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; tmpv = *:1 tmp_ldXn; Rt_VPR64[0,8] = tmpv; Rt_VPR64[8,8] = tmpv; Rt_VPR64[16,8] = tmpv; Rt_VPR64[24,8] = tmpv; Rt_VPR64[32,8] = tmpv; Rt_VPR64[40,8] = tmpv; Rt_VPR64[48,8] = tmpv; Rt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; Rtt_VPR64[0,8] = tmpv; Rtt_VPR64[8,8] = tmpv; Rtt_VPR64[16,8] = tmpv; Rtt_VPR64[24,8] = tmpv; Rtt_VPR64[32,8] = tmpv; Rtt_VPR64[40,8] = tmpv; Rtt_VPR64[48,8] = tmpv; Rtt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; Rttt_VPR64[0,8] = tmpv; Rttt_VPR64[8,8] = tmpv; Rttt_VPR64[16,8] = tmpv; Rttt_VPR64[24,8] = tmpv; Rttt_VPR64[32,8] = tmpv; Rttt_VPR64[40,8] = tmpv; Rttt_VPR64[48,8] = tmpv; Rttt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0d40e000/mask=xbffff000 # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0dc0e000/mask=xbfe0f000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d40e400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld3r {Vt.4H, Vt2.4H, Vt3.4H}, [Xn|SP] [, wback] :ld3r {vVt^".4H", vVtt^".4H", vVttt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; tmpv = *:2 tmp_ldXn; Rt_VPR64[0,16] = tmpv; Rt_VPR64[16,16] = tmpv; Rt_VPR64[32,16] = tmpv; Rt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; Rtt_VPR64[0,16] = tmpv; Rtt_VPR64[16,16] = tmpv; Rtt_VPR64[32,16] = tmpv; Rtt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; Rttt_VPR64[0,16] = tmpv; Rttt_VPR64[16,16] = tmpv; Rttt_VPR64[32,16] = tmpv; Rttt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0d40e000/mask=xbffff000 # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0dc0e000/mask=xbfe0f000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d40e800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld3r {Vt.2S, Vt2.2S, Vt3.2S}, [Xn|SP] [, wback] :ld3r {vVt^".2S", vVtt^".2S", vVttt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; tmpv = *:4 tmp_ldXn; Rt_VPR64[0,32] = tmpv; Rt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; Rtt_VPR64[0,32] = tmpv; Rtt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; Rttt_VPR64[0,32] = tmpv; Rttt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0d40e000/mask=xbffff000 # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0dc0e000/mask=xbfe0f000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x0d40ec00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld3r {Vt.1D, Vt2.1D, Vt3.1D}, [Xn|SP] [, wback] :ld3r {vVt^".1D", vVtt^".1D", vVttt^".1D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; tmpv = *:8 tmp_ldXn; Rt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; Rtt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; Rttt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0d40e000/mask=xbffff000 # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0dc0e000/mask=xbfe0f000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d40e000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld3r {Vt.16B, Vt2.16B, Vt3.16B}, [Xn|SP] [, wback] :ld3r {vVt^".16B", vVtt^".16B", vVttt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; tmpv = *:1 tmp_ldXn; Rt_VPR128[0,8] = tmpv; Rt_VPR128[8,8] = tmpv; Rt_VPR128[16,8] = tmpv; Rt_VPR128[24,8] = tmpv; Rt_VPR128[32,8] = tmpv; Rt_VPR128[40,8] = tmpv; Rt_VPR128[48,8] = tmpv; Rt_VPR128[56,8] = tmpv; Rt_VPR128[64,8] = tmpv; Rt_VPR128[72,8] = tmpv; Rt_VPR128[80,8] = tmpv; Rt_VPR128[88,8] = tmpv; Rt_VPR128[96,8] = tmpv; Rt_VPR128[104,8] = tmpv; Rt_VPR128[112,8] = tmpv; Rt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; Rtt_VPR128[0,8] = tmpv; Rtt_VPR128[8,8] = tmpv; Rtt_VPR128[16,8] = tmpv; Rtt_VPR128[24,8] = tmpv; Rtt_VPR128[32,8] = tmpv; Rtt_VPR128[40,8] = tmpv; Rtt_VPR128[48,8] = tmpv; Rtt_VPR128[56,8] = tmpv; Rtt_VPR128[64,8] = tmpv; Rtt_VPR128[72,8] = tmpv; Rtt_VPR128[80,8] = tmpv; Rtt_VPR128[88,8] = tmpv; Rtt_VPR128[96,8] = tmpv; Rtt_VPR128[104,8] = tmpv; Rtt_VPR128[112,8] = tmpv; Rtt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; Rttt_VPR128[0,8] = tmpv; Rttt_VPR128[8,8] = tmpv; Rttt_VPR128[16,8] = tmpv; Rttt_VPR128[24,8] = tmpv; Rttt_VPR128[32,8] = tmpv; Rttt_VPR128[40,8] = tmpv; Rttt_VPR128[48,8] = tmpv; Rttt_VPR128[56,8] = tmpv; Rttt_VPR128[64,8] = tmpv; Rttt_VPR128[72,8] = tmpv; Rttt_VPR128[80,8] = tmpv; Rttt_VPR128[88,8] = tmpv; Rttt_VPR128[96,8] = tmpv; Rttt_VPR128[104,8] = tmpv; Rttt_VPR128[112,8] = tmpv; Rttt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0d40e000/mask=xbffff000 # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0dc0e000/mask=xbfe0f000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d40e400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld3r {Vt.8H, Vt2.8H, Vt3.8H}, [Xn|SP] [, wback] :ld3r {vVt^".8H", vVtt^".8H", vVttt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; tmpv = *:2 tmp_ldXn; Rt_VPR128[0,16] = tmpv; Rt_VPR128[16,16] = tmpv; Rt_VPR128[32,16] = tmpv; Rt_VPR128[48,16] = tmpv; Rt_VPR128[64,16] = tmpv; Rt_VPR128[80,16] = tmpv; Rt_VPR128[96,16] = tmpv; Rt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; Rtt_VPR128[0,16] = tmpv; Rtt_VPR128[16,16] = tmpv; Rtt_VPR128[32,16] = tmpv; Rtt_VPR128[48,16] = tmpv; Rtt_VPR128[64,16] = tmpv; Rtt_VPR128[80,16] = tmpv; Rtt_VPR128[96,16] = tmpv; Rtt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; Rttt_VPR128[0,16] = tmpv; Rttt_VPR128[16,16] = tmpv; Rttt_VPR128[32,16] = tmpv; Rttt_VPR128[48,16] = tmpv; Rttt_VPR128[64,16] = tmpv; Rttt_VPR128[80,16] = tmpv; Rttt_VPR128[96,16] = tmpv; Rttt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0d40e000/mask=xbffff000 # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0dc0e000/mask=xbfe0f000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d40e800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld3r {Vt.4S, Vt2.4S, Vt3.4S}, [Xn|SP] [, wback] :ld3r {vVt^".4S", vVtt^".4S", vVttt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; tmpv = *:4 tmp_ldXn; Rt_VPR128[0,32] = tmpv; Rt_VPR128[32,32] = tmpv; Rt_VPR128[64,32] = tmpv; Rt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; Rtt_VPR128[0,32] = tmpv; Rtt_VPR128[32,32] = tmpv; Rtt_VPR128[64,32] = tmpv; Rtt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; Rttt_VPR128[0,32] = tmpv; Rttt_VPR128[32,32] = tmpv; Rttt_VPR128[64,32] = tmpv; Rttt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0d40e000/mask=xbffff000 # C7.2.185 LD3R page C7-2443 line 142925 MATCH x0dc0e000/mask=xbfe0f000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0d402000/mask=xbfff2000 # C7.2.184 LD3 (single structure) page C7-2439 line 142666 MATCH x0dc02000/mask=xbfe02000 # CONSTRUCT x4d40ec00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld3r {Vt.2D, Vt2.2D, Vt3.2D}, [Xn|SP] [, wback] :ld3r {vVt^".2D", vVtt^".2D", vVttt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=0 & b_1315=0b111 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; tmpv = *:8 tmp_ldXn; Rt_VPR128[0,64] = tmpv; Rt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; Rtt_VPR128[0,64] = tmpv; Rtt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; Rttt_VPR128[0,64] = tmpv; Rttt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0c400000/mask=xbffff000 # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0cc00000/mask=xbfe0f000 # CONSTRUCT x0c400000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.8B, Vt2.8B, Vt3.8B, Vt4.8B}, [Xn|SP] [, wback] :ld4 {vVt^".8B", vVtt^".8B", vVttt^".8B", vVtttt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR64[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0c400000/mask=xbffff000 # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0cc00000/mask=xbfe0f000 # CONSTRUCT x0c400400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.4H, Vt2.4H, Vt3.4H, Vt4.4H}, [Xn|SP] [, wback] :ld4 {vVt^".4H", vVtt^".4H", vVttt^".4H", vVtttt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR64[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR64[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR64[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR64[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0c400000/mask=xbffff000 # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0cc00000/mask=xbfe0f000 # CONSTRUCT x0c400800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.2S, Vt2.2S, Vt3.2S, Vt4.2S}, [Xn|SP] [, wback] :ld4 {vVt^".2S", vVtt^".2S", vVttt^".2S", vVtttt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR64[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR64[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0c400000/mask=xbffff000 # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0cc00000/mask=xbfe0f000 # CONSTRUCT x4c400000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.16B, Vt2.16B, Vt3.16B, Vt4.16B}, [Xn|SP] [, wback] :ld4 {vVt^".16B", vVtt^".16B", vVttt^".16B", vVtttt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0c400000/mask=xbffff000 # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0cc00000/mask=xbfe0f000 # CONSTRUCT x4c400400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.8H, Vt2.8H, Vt3.8H, Vt4.8H}, [Xn|SP] [, wback] :ld4 {vVt^".8H", vVtt^".8H", vVttt^".8H", vVtttt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0c400000/mask=xbffff000 # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0cc00000/mask=xbfe0f000 # CONSTRUCT x4c400800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.4S, Vt2.4S, Vt3.4S, Vt4.4S}, [Xn|SP] [, wback] :ld4 {vVt^".4S", vVtt^".4S", vVttt^".4S", vVtttt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0c400000/mask=xbffff000 # C7.2.186 LD4 (multiple structures) page C7-2446 line 143128 MATCH x0cc00000/mask=xbfe0f000 # CONSTRUCT x4c400c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.2D, Vt2.2D, Vt3.2D, Vt4.2D}, [Xn|SP] [, wback] :ld4 {vVt^".2D", vVtt^".2D", vVttt^".2D", vVtttt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=1 & b_21=0 & b_1215=0b0000 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d602000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[0], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[0,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d602400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[1], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[8,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d602800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[2], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[16,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d602c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[3], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[24,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d603000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[4], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[32,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d603400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[5], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[40,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d603800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[6], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[48,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d603c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[7], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[56,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d602000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[8], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[8], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[64,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d602400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[9], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[9], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[72,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d602800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[10], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[10], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[80,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d602c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[11], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[11], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[88,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d603000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[12], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[12], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[96,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d603400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[13], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[13], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[104,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d603800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[14], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[14], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[112,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d603c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[15], [Xn|SP] [, wback] :ld4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[15], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; Rtttt_VPR128[120,8] = *:1 tmp_ldXn; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d606000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[0], [Xn|SP] [, wback] :ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[0,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d606800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[1], [Xn|SP] [, wback] :ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[16,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d607000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[2], [Xn|SP] [, wback] :ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[32,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d607800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[3], [Xn|SP] [, wback] :ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[48,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d606000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[4], [Xn|SP] [, wback] :ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[64,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d606800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[5], [Xn|SP] [, wback] :ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[80,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d607000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[6], [Xn|SP] [, wback] :ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[96,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d607800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[7], [Xn|SP] [, wback] :ld4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; Rtttt_VPR128[112,16] = *:2 tmp_ldXn; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d60a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[0], [Xn|SP] [, wback] :ld4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR128[0,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d60b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[1], [Xn|SP] [, wback] :ld4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR128[32,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d60a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[2], [Xn|SP] [, wback] :ld4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR128[64,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d60b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[3], [Xn|SP] [, wback] :ld4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; Rtttt_VPR128[96,32] = *:4 tmp_ldXn; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d60a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.D, Vt2.D, Vt3.D, Vt4.D}[0], [Xn|SP] [, wback] :ld4 {vVt^".D", vVtt^".D", vVttt^".D", vVtttt^".D"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtttt_VPR128[0,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d60a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # ld4 {Vt.D, Vt2.D, Vt3.D, Vt4.D}[1], [Xn|SP] [, wback] :ld4 {vVt^".D", vVtt^".D", vVttt^".D", vVtttt^".D"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; Rt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; Rtttt_VPR128[64,64] = *:8 tmp_ldXn; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0d60e000/mask=xbffff000 # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0de0e000/mask=xbfe0f000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d60e000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld4r {Vt.8B, Vt2.8B, Vt3.8B, Vt4.8B}, [Xn|SP] [, wback] :ld4r {vVt^".8B", vVtt^".8B", vVttt^".8B", vVtttt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; tmpv = *:1 tmp_ldXn; Rt_VPR64[0,8] = tmpv; Rt_VPR64[8,8] = tmpv; Rt_VPR64[16,8] = tmpv; Rt_VPR64[24,8] = tmpv; Rt_VPR64[32,8] = tmpv; Rt_VPR64[40,8] = tmpv; Rt_VPR64[48,8] = tmpv; Rt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; Rtt_VPR64[0,8] = tmpv; Rtt_VPR64[8,8] = tmpv; Rtt_VPR64[16,8] = tmpv; Rtt_VPR64[24,8] = tmpv; Rtt_VPR64[32,8] = tmpv; Rtt_VPR64[40,8] = tmpv; Rtt_VPR64[48,8] = tmpv; Rtt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; Rttt_VPR64[0,8] = tmpv; Rttt_VPR64[8,8] = tmpv; Rttt_VPR64[16,8] = tmpv; Rttt_VPR64[24,8] = tmpv; Rttt_VPR64[32,8] = tmpv; Rttt_VPR64[40,8] = tmpv; Rttt_VPR64[48,8] = tmpv; Rttt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; Rtttt_VPR64[0,8] = tmpv; Rtttt_VPR64[8,8] = tmpv; Rtttt_VPR64[16,8] = tmpv; Rtttt_VPR64[24,8] = tmpv; Rtttt_VPR64[32,8] = tmpv; Rtttt_VPR64[40,8] = tmpv; Rtttt_VPR64[48,8] = tmpv; Rtttt_VPR64[56,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0d60e000/mask=xbffff000 # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0de0e000/mask=xbfe0f000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d60e400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld4r {Vt.4H, Vt2.4H, Vt3.4H, Vt4.4H}, [Xn|SP] [, wback] :ld4r {vVt^".4H", vVtt^".4H", vVttt^".4H", vVtttt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; tmpv = *:2 tmp_ldXn; Rt_VPR64[0,16] = tmpv; Rt_VPR64[16,16] = tmpv; Rt_VPR64[32,16] = tmpv; Rt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; Rtt_VPR64[0,16] = tmpv; Rtt_VPR64[16,16] = tmpv; Rtt_VPR64[32,16] = tmpv; Rtt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; Rttt_VPR64[0,16] = tmpv; Rttt_VPR64[16,16] = tmpv; Rttt_VPR64[32,16] = tmpv; Rttt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; Rtttt_VPR64[0,16] = tmpv; Rtttt_VPR64[16,16] = tmpv; Rtttt_VPR64[32,16] = tmpv; Rtttt_VPR64[48,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0d60e000/mask=xbffff000 # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0de0e000/mask=xbfe0f000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d60e800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld4r {Vt.2S, Vt2.2S, Vt3.2S, Vt4.2S}, [Xn|SP] [, wback] :ld4r {vVt^".2S", vVtt^".2S", vVttt^".2S", vVtttt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; tmpv = *:4 tmp_ldXn; Rt_VPR64[0,32] = tmpv; Rt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; Rtt_VPR64[0,32] = tmpv; Rtt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; Rttt_VPR64[0,32] = tmpv; Rttt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; Rtttt_VPR64[0,32] = tmpv; Rtttt_VPR64[32,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0d60e000/mask=xbffff000 # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0de0e000/mask=xbfe0f000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x0d60ec00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld4r {Vt.1D, Vt2.1D, Vt3.1D, Vt4.1D}, [Xn|SP] [, wback] :ld4r {vVt^".1D", vVtt^".1D", vVttt^".1D", vVtttt^".1D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR64 & Zt & vVtt & Rtt_VPR64 & Ztt & vVttt & Rttt_VPR64 & Zttt & vVtttt & Rtttt_VPR64 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; tmpv = *:8 tmp_ldXn; Rt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; Rtt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; Rttt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; Rtttt_VPR64[0,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0d60e000/mask=xbffff000 # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0de0e000/mask=xbfe0f000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d60e000/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld4r {Vt.16B, Vt2.16B, Vt3.16B, Vt4.16B}, [Xn|SP] [, wback] :ld4r {vVt^".16B", vVtt^".16B", vVttt^".16B", vVtttt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:1 = 0; tmpv = *:1 tmp_ldXn; Rt_VPR128[0,8] = tmpv; Rt_VPR128[8,8] = tmpv; Rt_VPR128[16,8] = tmpv; Rt_VPR128[24,8] = tmpv; Rt_VPR128[32,8] = tmpv; Rt_VPR128[40,8] = tmpv; Rt_VPR128[48,8] = tmpv; Rt_VPR128[56,8] = tmpv; Rt_VPR128[64,8] = tmpv; Rt_VPR128[72,8] = tmpv; Rt_VPR128[80,8] = tmpv; Rt_VPR128[88,8] = tmpv; Rt_VPR128[96,8] = tmpv; Rt_VPR128[104,8] = tmpv; Rt_VPR128[112,8] = tmpv; Rt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; Rtt_VPR128[0,8] = tmpv; Rtt_VPR128[8,8] = tmpv; Rtt_VPR128[16,8] = tmpv; Rtt_VPR128[24,8] = tmpv; Rtt_VPR128[32,8] = tmpv; Rtt_VPR128[40,8] = tmpv; Rtt_VPR128[48,8] = tmpv; Rtt_VPR128[56,8] = tmpv; Rtt_VPR128[64,8] = tmpv; Rtt_VPR128[72,8] = tmpv; Rtt_VPR128[80,8] = tmpv; Rtt_VPR128[88,8] = tmpv; Rtt_VPR128[96,8] = tmpv; Rtt_VPR128[104,8] = tmpv; Rtt_VPR128[112,8] = tmpv; Rtt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; Rttt_VPR128[0,8] = tmpv; Rttt_VPR128[8,8] = tmpv; Rttt_VPR128[16,8] = tmpv; Rttt_VPR128[24,8] = tmpv; Rttt_VPR128[32,8] = tmpv; Rttt_VPR128[40,8] = tmpv; Rttt_VPR128[48,8] = tmpv; Rttt_VPR128[56,8] = tmpv; Rttt_VPR128[64,8] = tmpv; Rttt_VPR128[72,8] = tmpv; Rttt_VPR128[80,8] = tmpv; Rttt_VPR128[88,8] = tmpv; Rttt_VPR128[96,8] = tmpv; Rttt_VPR128[104,8] = tmpv; Rttt_VPR128[112,8] = tmpv; Rttt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; tmpv = *:1 tmp_ldXn; Rtttt_VPR128[0,8] = tmpv; Rtttt_VPR128[8,8] = tmpv; Rtttt_VPR128[16,8] = tmpv; Rtttt_VPR128[24,8] = tmpv; Rtttt_VPR128[32,8] = tmpv; Rtttt_VPR128[40,8] = tmpv; Rtttt_VPR128[48,8] = tmpv; Rtttt_VPR128[56,8] = tmpv; Rtttt_VPR128[64,8] = tmpv; Rtttt_VPR128[72,8] = tmpv; Rtttt_VPR128[80,8] = tmpv; Rtttt_VPR128[88,8] = tmpv; Rtttt_VPR128[96,8] = tmpv; Rtttt_VPR128[104,8] = tmpv; Rtttt_VPR128[112,8] = tmpv; Rtttt_VPR128[120,8] = tmpv; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0d60e000/mask=xbffff000 # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0de0e000/mask=xbfe0f000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d60e400/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld4r {Vt.8H, Vt2.8H, Vt3.8H, Vt4.8H}, [Xn|SP] [, wback] :ld4r {vVt^".8H", vVtt^".8H", vVttt^".8H", vVtttt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:2 = 0; tmpv = *:2 tmp_ldXn; Rt_VPR128[0,16] = tmpv; Rt_VPR128[16,16] = tmpv; Rt_VPR128[32,16] = tmpv; Rt_VPR128[48,16] = tmpv; Rt_VPR128[64,16] = tmpv; Rt_VPR128[80,16] = tmpv; Rt_VPR128[96,16] = tmpv; Rt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; Rtt_VPR128[0,16] = tmpv; Rtt_VPR128[16,16] = tmpv; Rtt_VPR128[32,16] = tmpv; Rtt_VPR128[48,16] = tmpv; Rtt_VPR128[64,16] = tmpv; Rtt_VPR128[80,16] = tmpv; Rtt_VPR128[96,16] = tmpv; Rtt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; Rttt_VPR128[0,16] = tmpv; Rttt_VPR128[16,16] = tmpv; Rttt_VPR128[32,16] = tmpv; Rttt_VPR128[48,16] = tmpv; Rttt_VPR128[64,16] = tmpv; Rttt_VPR128[80,16] = tmpv; Rttt_VPR128[96,16] = tmpv; Rttt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; tmpv = *:2 tmp_ldXn; Rtttt_VPR128[0,16] = tmpv; Rtttt_VPR128[16,16] = tmpv; Rtttt_VPR128[32,16] = tmpv; Rtttt_VPR128[48,16] = tmpv; Rtttt_VPR128[64,16] = tmpv; Rtttt_VPR128[80,16] = tmpv; Rtttt_VPR128[96,16] = tmpv; Rtttt_VPR128[112,16] = tmpv; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0d60e000/mask=xbffff000 # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0de0e000/mask=xbfe0f000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d60e800/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld4r {Vt.4S, Vt2.4S, Vt3.4S, Vt4.4S}, [Xn|SP] [, wback] :ld4r {vVt^".4S", vVtt^".4S", vVttt^".4S", vVtttt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:4 = 0; tmpv = *:4 tmp_ldXn; Rt_VPR128[0,32] = tmpv; Rt_VPR128[32,32] = tmpv; Rt_VPR128[64,32] = tmpv; Rt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; Rtt_VPR128[0,32] = tmpv; Rtt_VPR128[32,32] = tmpv; Rtt_VPR128[64,32] = tmpv; Rtt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; Rttt_VPR128[0,32] = tmpv; Rttt_VPR128[32,32] = tmpv; Rttt_VPR128[64,32] = tmpv; Rttt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; tmpv = *:4 tmp_ldXn; Rtttt_VPR128[0,32] = tmpv; Rtttt_VPR128[32,32] = tmpv; Rtttt_VPR128[64,32] = tmpv; Rtttt_VPR128[96,32] = tmpv; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0d60e000/mask=xbffff000 # C7.2.188 LD4R page C7-2453 line 143575 MATCH x0de0e000/mask=xbfe0f000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0d602000/mask=xbfff2000 # C7.2.187 LD4 (single structure) page C7-2449 line 143314 MATCH x0de02000/mask=xbfe02000 # CONSTRUCT x4d60ec00/mask=xff60fc00 MATCHED 4 DOCUMENTED OPCODES # ld4r {Vt.2D, Vt2.2D, Vt3.2D, Vt4.2D}, [Xn|SP] [, wback] :ld4r {vVt^".2D", vVtt^".2D", vVttt^".2D", vVtttt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=1 & b_21=1 & b_1315=0b111 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Zt & vVtt & Rtt_VPR128 & Ztt & vVttt & Rttt_VPR128 & Zttt & vVtttt & Rtttt_VPR128 & Ztttt & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; local tmpv:8 = 0; tmpv = *:8 tmp_ldXn; Rt_VPR128[0,64] = tmpv; Rt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; Rtt_VPR128[0,64] = tmpv; Rtt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; Rttt_VPR128[0,64] = tmpv; Rttt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; tmpv = *:8 tmp_ldXn; Rtttt_VPR128[0,64] = tmpv; Rtttt_VPR128[64,64] = tmpv; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c002000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.8B, Vt2.8B, Vt3.8B, Vt4.8B}, [Xn|SP] [, wback] :st1 {vVt^".8B", vVtt^".8B", vVttt^".8B", vVtttt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c002400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.4H, Vt2.4H, Vt3.4H, Vt4.4H}, [Xn|SP] [, wback] :st1 {vVt^".4H", vVtt^".4H", vVttt^".4H", vVtttt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c002800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.2S, Vt2.2S, Vt3.2S, Vt4.2S}, [Xn|SP] [, wback] :st1 {vVt^".2S", vVtt^".2S", vVttt^".2S", vVtttt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c002c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.1D, Vt2.1D, Vt3.1D, Vt4.1D}, [Xn|SP] [, wback] :st1 {vVt^".1D", vVtt^".1D", vVttt^".1D", vVtttt^".1D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b11 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtttt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c002000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.16B, Vt2.16B, Vt3.16B, Vt4.16B}, [Xn|SP] [, wback] :st1 {vVt^".16B", vVtt^".16B", vVttt^".16B", vVtttt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c002400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.8H, Vt2.8H, Vt3.8H, Vt4.8H}, [Xn|SP] [, wback] :st1 {vVt^".8H", vVtt^".8H", vVttt^".8H", vVtttt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c002800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.4S, Vt2.4S, Vt3.4S, Vt4.4S}, [Xn|SP] [, wback] :st1 {vVt^".4S", vVtt^".4S", vVttt^".4S", vVtttt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c002c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.2D, Vt2.2D, Vt3.2D, Vt4.2D}, [Xn|SP] [, wback] :st1 {vVt^".2D", vVtt^".2D", vVttt^".2D", vVtttt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0010 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c006000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.8B, Vt2.8B, Vt3.8B}, [Xn|SP] [, wback] :st1 {vVt^".8B", vVtt^".8B", vVttt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c006400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.4H, Vt2.4H, Vt3.4H}, [Xn|SP] [, wback] :st1 {vVt^".4H", vVtt^".4H", vVttt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c006800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.2S, Vt2.2S, Vt3.2S}, [Xn|SP] [, wback] :st1 {vVt^".2S", vVtt^".2S", vVttt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c006c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.1D, Vt2.1D, Vt3.1D}, [Xn|SP] [, wback] :st1 {vVt^".1D", vVtt^".1D", vVttt^".1D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b11 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c006000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.16B, Vt2.16B, Vt3.16B}, [Xn|SP] [, wback] :st1 {vVt^".16B", vVtt^".16B", vVttt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c006400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.8H, Vt2.8H, Vt3.8H}, [Xn|SP] [, wback] :st1 {vVt^".8H", vVtt^".8H", vVttt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c006800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.4S, Vt2.4S, Vt3.4S}, [Xn|SP] [, wback] :st1 {vVt^".4S", vVtt^".4S", vVttt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c006c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.2D, Vt2.2D, Vt3.2D}, [Xn|SP] [, wback] :st1 {vVt^".2D", vVtt^".2D", vVttt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0110 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c007000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.8B}, [Xn|SP] [, wback] :st1 {vVt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b00 & vVt & Rt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c007400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.4H}, [Xn|SP] [, wback] :st1 {vVt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b01 & vVt & Rt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c007800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.2S}, [Xn|SP] [, wback] :st1 {vVt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b10 & vVt & Rt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c007c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.1D}, [Xn|SP] [, wback] :st1 {vVt^".1D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b11 & vVt & Rt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c007000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.16B}, [Xn|SP] [, wback] :st1 {vVt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c007400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.8H}, [Xn|SP] [, wback] :st1 {vVt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c007800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.4S}, [Xn|SP] [, wback] :st1 {vVt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c007c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.2D}, [Xn|SP] [, wback] :st1 {vVt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0111 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c00a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.8B, Vt2.8B}, [Xn|SP] [, wback] :st1 {vVt^".8B", vVtt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c00a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.4H, Vt2.4H}, [Xn|SP] [, wback] :st1 {vVt^".4H", vVtt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c00a800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.2S, Vt2.2S}, [Xn|SP] [, wback] :st1 {vVt^".2S", vVtt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x0c00ac00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.1D, Vt2.1D}, [Xn|SP] [, wback] :st1 {vVt^".1D", vVtt^".1D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b11 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR64[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c00a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.16B, Vt2.16B}, [Xn|SP] [, wback] :st1 {vVt^".16B", vVtt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c00a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.8H, Vt2.8H}, [Xn|SP] [, wback] :st1 {vVt^".8H", vVtt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c00a800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.4S, Vt2.4S}, [Xn|SP] [, wback] :st1 {vVt^".4S", vVtt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c002000/mask=xbfff2000 # C7.2.321 ST1 (multiple structures) page C7-2748 line 160331 MATCH x0c802000/mask=xbfe02000 # CONSTRUCT x4c00ac00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.2D, Vt2.2D}, [Xn|SP] [, wback] :st1 {vVt^".2D", vVtt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1010 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d000000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[0], [Xn|SP] [, wback] :st1 {vVt^".B"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d000400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[1], [Xn|SP] [, wback] :st1 {vVt^".B"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d000800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[2], [Xn|SP] [, wback] :st1 {vVt^".B"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d000c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[3], [Xn|SP] [, wback] :st1 {vVt^".B"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d001000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[4], [Xn|SP] [, wback] :st1 {vVt^".B"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d001400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[5], [Xn|SP] [, wback] :st1 {vVt^".B"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d001800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[6], [Xn|SP] [, wback] :st1 {vVt^".B"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d001c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[7], [Xn|SP] [, wback] :st1 {vVt^".B"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d000000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[8], [Xn|SP] [, wback] :st1 {vVt^".B"}[8], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d000400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[9], [Xn|SP] [, wback] :st1 {vVt^".B"}[9], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d000800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[10], [Xn|SP] [, wback] :st1 {vVt^".B"}[10], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d000c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[11], [Xn|SP] [, wback] :st1 {vVt^".B"}[11], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d001000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[12], [Xn|SP] [, wback] :st1 {vVt^".B"}[12], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d001400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[13], [Xn|SP] [, wback] :st1 {vVt^".B"}[13], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d001800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[14], [Xn|SP] [, wback] :st1 {vVt^".B"}[14], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d001c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.B}[15], [Xn|SP] [, wback] :st1 {vVt^".B"}[15], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d004000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.H}[0], [Xn|SP] [, wback] :st1 {vVt^".H"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d004800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.H}[1], [Xn|SP] [, wback] :st1 {vVt^".H"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d005000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.H}[2], [Xn|SP] [, wback] :st1 {vVt^".H"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d005800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.H}[3], [Xn|SP] [, wback] :st1 {vVt^".H"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d004000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.H}[4], [Xn|SP] [, wback] :st1 {vVt^".H"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d004800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.H}[5], [Xn|SP] [, wback] :st1 {vVt^".H"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d005000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.H}[6], [Xn|SP] [, wback] :st1 {vVt^".H"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d005800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.H}[7], [Xn|SP] [, wback] :st1 {vVt^".H"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d008000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.S}[0], [Xn|SP] [, wback] :st1 {vVt^".S"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d009000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.S}[1], [Xn|SP] [, wback] :st1 {vVt^".S"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d008000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.S}[2], [Xn|SP] [, wback] :st1 {vVt^".S"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d009000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.S}[3], [Xn|SP] [, wback] :st1 {vVt^".S"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x0d008400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.D}[0], [Xn|SP] [, wback] :st1 {vVt^".D"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d000000/mask=xbfff2000 # C7.2.322 ST1 (single structure) page C7-2752 line 160596 MATCH x0d800000/mask=xbfe02000 # CONSTRUCT x4d008400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st1 {Vt.D}[1], [Xn|SP] [, wback] :st1 {vVt^".D"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c008000/mask=xbffff000 # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c808000/mask=xbfe0f000 # CONSTRUCT x0c008000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.8B, Vt2.8B}, [Xn|SP] [, wback] :st2 {vVt^".8B", vVtt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c008000/mask=xbffff000 # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c808000/mask=xbfe0f000 # CONSTRUCT x0c008400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.4H, Vt2.4H}, [Xn|SP] [, wback] :st2 {vVt^".4H", vVtt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c008000/mask=xbffff000 # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c808000/mask=xbfe0f000 # CONSTRUCT x0c008800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.2S, Vt2.2S}, [Xn|SP] [, wback] :st2 {vVt^".2S", vVtt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c008000/mask=xbffff000 # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c808000/mask=xbfe0f000 # CONSTRUCT x4c008000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.16B, Vt2.16B}, [Xn|SP] [, wback] :st2 {vVt^".16B", vVtt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c008000/mask=xbffff000 # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c808000/mask=xbfe0f000 # CONSTRUCT x4c008400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.8H, Vt2.8H}, [Xn|SP] [, wback] :st2 {vVt^".8H", vVtt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c008000/mask=xbffff000 # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c808000/mask=xbfe0f000 # CONSTRUCT x4c008800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.4S, Vt2.4S}, [Xn|SP] [, wback] :st2 {vVt^".4S", vVtt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c008000/mask=xbffff000 # C7.2.323 ST2 (multiple structures) page C7-2756 line 160848 MATCH x0c808000/mask=xbfe0f000 # CONSTRUCT x4c008c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.2D, Vt2.2D}, [Xn|SP] [, wback] :st2 {vVt^".2D", vVtt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b1000 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d200000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[0], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d200400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[1], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d200800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[2], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d200c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[3], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d201000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[4], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d201400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[5], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d201800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[6], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d201c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[7], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d200000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[8], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[8], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d200400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[9], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[9], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d200800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[10], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[10], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d200c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[11], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[11], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d201000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[12], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[12], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d201400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[13], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[13], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d201800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[14], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[14], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d201c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.B, Vt2.B}[15], [Xn|SP] [, wback] :st2 {vVt^".B", vVtt^".B"}[15], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b000 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d204000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.H, Vt2.H}[0], [Xn|SP] [, wback] :st2 {vVt^".H", vVtt^".H"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d204800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.H, Vt2.H}[1], [Xn|SP] [, wback] :st2 {vVt^".H", vVtt^".H"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d205000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.H, Vt2.H}[2], [Xn|SP] [, wback] :st2 {vVt^".H", vVtt^".H"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d205800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.H, Vt2.H}[3], [Xn|SP] [, wback] :st2 {vVt^".H", vVtt^".H"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d204000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.H, Vt2.H}[4], [Xn|SP] [, wback] :st2 {vVt^".H", vVtt^".H"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d204800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.H, Vt2.H}[5], [Xn|SP] [, wback] :st2 {vVt^".H", vVtt^".H"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d205000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.H, Vt2.H}[6], [Xn|SP] [, wback] :st2 {vVt^".H", vVtt^".H"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d205800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.H, Vt2.H}[7], [Xn|SP] [, wback] :st2 {vVt^".H", vVtt^".H"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b010 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d208000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.S, Vt2.S}[0], [Xn|SP] [, wback] :st2 {vVt^".S", vVtt^".S"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d209000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.S, Vt2.S}[1], [Xn|SP] [, wback] :st2 {vVt^".S", vVtt^".S"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d208000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.S, Vt2.S}[2], [Xn|SP] [, wback] :st2 {vVt^".S", vVtt^".S"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d209000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.S, Vt2.S}[3], [Xn|SP] [, wback] :st2 {vVt^".S", vVtt^".S"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x0d208400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.D, Vt2.D}[0], [Xn|SP] [, wback] :st2 {vVt^".D", vVtt^".D"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0d200000/mask=xbfff2000 # C7.2.324 ST2 (single structure) page C7-2759 line 161029 MATCH x0da00000/mask=xbfe02000 # CONSTRUCT x4d208400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st2 {Vt.D, Vt2.D}[1], [Xn|SP] [, wback] :st2 {vVt^".D", vVtt^".D"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b100 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c004000/mask=xbffff000 # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c804000/mask=xbfe0f000 # CONSTRUCT x0c004000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.8B, Vt2.8B, Vt3.8B}, [Xn|SP] [, wback] :st3 {vVt^".8B", vVtt^".8B", vVttt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c004000/mask=xbffff000 # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c804000/mask=xbfe0f000 # CONSTRUCT x0c004400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.4H, Vt2.4H, Vt3.4H}, [Xn|SP] [, wback] :st3 {vVt^".4H", vVtt^".4H", vVttt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c004000/mask=xbffff000 # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c804000/mask=xbfe0f000 # CONSTRUCT x0c004800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.2S, Vt2.2S, Vt3.2S}, [Xn|SP] [, wback] :st3 {vVt^".2S", vVtt^".2S", vVttt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c004000/mask=xbffff000 # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c804000/mask=xbfe0f000 # CONSTRUCT x4c004000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.16B, Vt2.16B, Vt3.16B}, [Xn|SP] [, wback] :st3 {vVt^".16B", vVtt^".16B", vVttt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c004000/mask=xbffff000 # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c804000/mask=xbfe0f000 # CONSTRUCT x4c004400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.8H, Vt2.8H, Vt3.8H}, [Xn|SP] [, wback] :st3 {vVt^".8H", vVtt^".8H", vVttt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c004000/mask=xbffff000 # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c804000/mask=xbfe0f000 # CONSTRUCT x4c004800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.4S, Vt2.4S, Vt3.4S}, [Xn|SP] [, wback] :st3 {vVt^".4S", vVtt^".4S", vVttt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c004000/mask=xbffff000 # C7.2.325 ST3 (multiple structures) page C7-2763 line 161283 MATCH x0c804000/mask=xbfe0f000 # CONSTRUCT x4c004c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.2D, Vt2.2D, Vt3.2D}, [Xn|SP] [, wback] :st3 {vVt^".2D", vVtt^".2D", vVttt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0100 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d002000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[0], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d002400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[1], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d002800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[2], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d002c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[3], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d003000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[4], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d003400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[5], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d003800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[6], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d003c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[7], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d002000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[8], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[8], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d002400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[9], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[9], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d002800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[10], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[10], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d002c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[11], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[11], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d003000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[12], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[12], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d003400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[13], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[13], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d003800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[14], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[14], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d003c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.B, Vt2.B, Vt3.B}[15], [Xn|SP] [, wback] :st3 {vVt^".B", vVtt^".B", vVttt^".B"}[15], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d006000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.H, Vt2.H, Vt3.H}[0], [Xn|SP] [, wback] :st3 {vVt^".H", vVtt^".H", vVttt^".H"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d006800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.H, Vt2.H, Vt3.H}[1], [Xn|SP] [, wback] :st3 {vVt^".H", vVtt^".H", vVttt^".H"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d007000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.H, Vt2.H, Vt3.H}[2], [Xn|SP] [, wback] :st3 {vVt^".H", vVtt^".H", vVttt^".H"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d007800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.H, Vt2.H, Vt3.H}[3], [Xn|SP] [, wback] :st3 {vVt^".H", vVtt^".H", vVttt^".H"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d006000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.H, Vt2.H, Vt3.H}[4], [Xn|SP] [, wback] :st3 {vVt^".H", vVtt^".H", vVttt^".H"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d006800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.H, Vt2.H, Vt3.H}[5], [Xn|SP] [, wback] :st3 {vVt^".H", vVtt^".H", vVttt^".H"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d007000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.H, Vt2.H, Vt3.H}[6], [Xn|SP] [, wback] :st3 {vVt^".H", vVtt^".H", vVttt^".H"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d007800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.H, Vt2.H, Vt3.H}[7], [Xn|SP] [, wback] :st3 {vVt^".H", vVtt^".H", vVttt^".H"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d00a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.S, Vt2.S, Vt3.S}[0], [Xn|SP] [, wback] :st3 {vVt^".S", vVtt^".S", vVttt^".S"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d00b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.S, Vt2.S, Vt3.S}[1], [Xn|SP] [, wback] :st3 {vVt^".S", vVtt^".S", vVttt^".S"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d00a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.S, Vt2.S, Vt3.S}[2], [Xn|SP] [, wback] :st3 {vVt^".S", vVtt^".S", vVttt^".S"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d00b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.S, Vt2.S, Vt3.S}[3], [Xn|SP] [, wback] :st3 {vVt^".S", vVtt^".S", vVttt^".S"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x0d00a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.D, Vt2.D, Vt3.D}[0], [Xn|SP] [, wback] :st3 {vVt^".D", vVtt^".D", vVttt^".D"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d002000/mask=xbfff2000 # C7.2.326 ST3 (single structure) page C7-2766 line 161466 MATCH x0d802000/mask=xbfe02000 # CONSTRUCT x4d00a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st3 {Vt.D, Vt2.D, Vt3.D}[1], [Xn|SP] [, wback] :st3 {vVt^".D", vVtt^".D", vVttt^".D"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=0 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c000000/mask=xbffff000 # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c800000/mask=xbfe0f000 # CONSTRUCT x0c000000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.8B, Vt2.8B, Vt3.8B, Vt4.8B}, [Xn|SP] [, wback] :st4 {vVt^".8B", vVtt^".8B", vVttt^".8B", vVtttt^".8B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b00 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR64[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c000000/mask=xbffff000 # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c800000/mask=xbfe0f000 # CONSTRUCT x0c000400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.4H, Vt2.4H, Vt3.4H, Vt4.4H}, [Xn|SP] [, wback] :st4 {vVt^".4H", vVtt^".4H", vVttt^".4H", vVtttt^".4H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b01 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR64[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR64[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR64[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR64[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c000000/mask=xbffff000 # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c800000/mask=xbfe0f000 # CONSTRUCT x0c000800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.2S, Vt2.2S, Vt3.2S, Vt4.2S}, [Xn|SP] [, wback] :st4 {vVt^".2S", vVtt^".2S", vVttt^".2S", vVtttt^".2S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b10 & vVt & Rt_VPR64 & vVtt & Rtt_VPR64 & vVttt & Rttt_VPR64 & vVtttt & Rtttt_VPR64 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR64[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR64[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c000000/mask=xbffff000 # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c800000/mask=xbfe0f000 # CONSTRUCT x4c000000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.16B, Vt2.16B, Vt3.16B, Vt4.16B}, [Xn|SP] [, wback] :st4 {vVt^".16B", vVtt^".16B", vVttt^".16B", vVtttt^".16B"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c000000/mask=xbffff000 # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c800000/mask=xbfe0f000 # CONSTRUCT x4c000400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.8H, Vt2.8H, Vt3.8H, Vt4.8H}, [Xn|SP] [, wback] :st4 {vVt^".8H", vVtt^".8H", vVttt^".8H", vVtttt^".8H"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c000000/mask=xbffff000 # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c800000/mask=xbfe0f000 # CONSTRUCT x4c000800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.4S, Vt2.4S, Vt3.4S, Vt4.4S}, [Xn|SP] [, wback] :st4 {vVt^".4S", vVtt^".4S", vVttt^".4S", vVtttt^".4S"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c000000/mask=xbffff000 # C7.2.327 ST4 (multiple structures) page C7-2770 line 161722 MATCH x0c800000/mask=xbfe0f000 # CONSTRUCT x4c000c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.2D, Vt2.2D, Vt3.2D, Vt4.2D}, [Xn|SP] [, wback] :st4 {vVt^".2D", vVtt^".2D", vVttt^".2D", vVtttt^".2D"}, [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001100 & b_22=0 & b_21=0 & b_1215=0b0000 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d202000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[0], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[0,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d202400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[1], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[8,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d202800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[2], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[16,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d202c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[3], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[24,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d203000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[4], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[32,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d203400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[5], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[40,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d203800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[6], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[48,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d203c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[7], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[56,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d202000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[8], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[8], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[64,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d202400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[9], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[9], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[72,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d202800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[10], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[10], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[80,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d202c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[11], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[11], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=0 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[88,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d203000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[12], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[12], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[96,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d203400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[13], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[13], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[104,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d203800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[14], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[14], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[112,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d203c00/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.B, Vt2.B, Vt3.B, Vt4.B}[15], [Xn|SP] [, wback] :st4 {vVt^".B", vVtt^".B", vVttt^".B", vVtttt^".B"}[15], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b001 & b_12=1 & b_1011=0b11 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:1 tmp_ldXn = Rt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; *:1 tmp_ldXn = Rtttt_VPR128[120,8]; tmp_ldXn = tmp_ldXn + 1; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d206000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[0], [Xn|SP] [, wback] :st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[0,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d206800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[1], [Xn|SP] [, wback] :st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[16,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d207000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[2], [Xn|SP] [, wback] :st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[32,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d207800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[3], [Xn|SP] [, wback] :st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[48,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d206000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[4], [Xn|SP] [, wback] :st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[4], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[64,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d206800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[5], [Xn|SP] [, wback] :st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[5], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=0 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[80,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d207000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[6], [Xn|SP] [, wback] :st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[6], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[96,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d207800/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.H, Vt2.H, Vt3.H, Vt4.H}[7], [Xn|SP] [, wback] :st4 {vVt^".H", vVtt^".H", vVttt^".H", vVtttt^".H"}[7], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b011 & b_12=1 & b_1011=0b10 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:2 tmp_ldXn = Rt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; *:2 tmp_ldXn = Rtttt_VPR128[112,16]; tmp_ldXn = tmp_ldXn + 2; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d20a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[0], [Xn|SP] [, wback] :st4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR128[0,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d20b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[1], [Xn|SP] [, wback] :st4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR128[32,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d20a000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[2], [Xn|SP] [, wback] :st4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[2], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR128[64,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d20b000/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.S, Vt2.S, Vt3.S, Vt4.S}[3], [Xn|SP] [, wback] :st4 {vVt^".S", vVtt^".S", vVttt^".S", vVtttt^".S"}[3], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=1 & b_1011=0b00 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:4 tmp_ldXn = Rt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; *:4 tmp_ldXn = Rtttt_VPR128[96,32]; tmp_ldXn = tmp_ldXn + 4; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x0d20a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.D, Vt2.D, Vt3.D, Vt4.D}[0], [Xn|SP] [, wback] :st4 {vVt^".D", vVtt^".D", vVttt^".D", vVtttt^".D"}[0], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=0 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtttt_VPR128[0,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0d202000/mask=xbfff2000 # C7.2.328 ST4 (single structure) page C7-2773 line 161907 MATCH x0da02000/mask=xbfe02000 # CONSTRUCT x4d20a400/mask=xff60fc00 MATCHED 2 DOCUMENTED OPCODES # st4 {Vt.D, Vt2.D, Vt3.D, Vt4.D}[1], [Xn|SP] [, wback] :st4 {vVt^".D", vVtt^".D", vVttt^".D", vVtttt^".D"}[1], [Rn_GPR64xsp]^ldst_wback is b_31=0 & b_30=1 & b_2429=0b001101 & b_22=0 & b_21=1 & b_1315=0b101 & b_12=0 & b_1011=0b01 & vVt & Rt_VPR128 & vVtt & Rtt_VPR128 & vVttt & Rttt_VPR128 & vVtttt & Rtttt_VPR128 & Rn_GPR64xsp & ldst_wback & Rm_GPR64 { tmp_ldXn = Rn_GPR64xsp; *:8 tmp_ldXn = Rt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; *:8 tmp_ldXn = Rtttt_VPR128[64,64]; tmp_ldXn = tmp_ldXn + 8; # neglected zexts build ldst_wback; } ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64neon.sinc ================================================ # C7.2.1 ABS page C7-1009 line 58362 KEEPWITH # # The semantics in this file are auto-generated with armit.py script # in the andre directory (capture output and replace file): # # python ../../../ProcessorTest/test/andre/scrape/armit.py --arch a64 --sort --refurb --smacro primitive --sinc languages/AARCH64neon.sinc # # The AUNIT tests are run using the command line options from the # comment with the python script aunit.py in the cunit directory: # # (cd ../../../ProcessorTest/test/cunit; python aunit.py OPTIONS) # # (aunit.py may require a local copy of a current andre exhaust). # C7.2.1 ABS page C7-2017 line 117868 MATCH x5e20b800/mask=xff3ffc00 # CONSTRUCT x5ee0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =abs # SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1 # AUNIT --inst x5ee0b800/mask=xfffffc00 --status pass # ABS Scalar :abs Rd_FPR64, Rn_FPR64 is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000101110 & Rd_FPR64 & Rn_FPR64 & Zd { local test = Rn_FPR64 s< 0; Rd_FPR64 = (zext(!test)*Rn_FPR64) + (zext(test)*(-Rn_FPR64)); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.1 ABS page C7-2017 line 117868 MATCH x0e20b800/mask=xbf3ffc00 # CONSTRUCT x0e20b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@1 # AUNIT --inst x0e20b800/mask=xfffffc00 --status pass # ABS Vector 8B when size = 00 , Q = 0 :abs Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000101110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { # simd unary Rd_VPR64.8B = MP_INT_ABS(Rn_VPR64.8B) on lane size 1 Rd_VPR64.8B[0,8] = MP_INT_ABS(Rn_VPR64.8B[0,8]); Rd_VPR64.8B[8,8] = MP_INT_ABS(Rn_VPR64.8B[8,8]); Rd_VPR64.8B[16,8] = MP_INT_ABS(Rn_VPR64.8B[16,8]); Rd_VPR64.8B[24,8] = MP_INT_ABS(Rn_VPR64.8B[24,8]); Rd_VPR64.8B[32,8] = MP_INT_ABS(Rn_VPR64.8B[32,8]); Rd_VPR64.8B[40,8] = MP_INT_ABS(Rn_VPR64.8B[40,8]); Rd_VPR64.8B[48,8] = MP_INT_ABS(Rn_VPR64.8B[48,8]); Rd_VPR64.8B[56,8] = MP_INT_ABS(Rn_VPR64.8B[56,8]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.1 ABS page C7-2017 line 117868 MATCH x0e20b800/mask=xbf3ffc00 # CONSTRUCT x4e20b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@1 # AUNIT --inst x4e20b800/mask=xfffffc00 --status pass # ABS Vector SIMD 16B when size = 00 , Q = 1 :abs Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000101110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { # simd unary Rd_VPR128.16B = MP_INT_ABS(Rn_VPR128.16B) on lane size 1 Rd_VPR128.16B[0,8] = MP_INT_ABS(Rn_VPR128.16B[0,8]); Rd_VPR128.16B[8,8] = MP_INT_ABS(Rn_VPR128.16B[8,8]); Rd_VPR128.16B[16,8] = MP_INT_ABS(Rn_VPR128.16B[16,8]); Rd_VPR128.16B[24,8] = MP_INT_ABS(Rn_VPR128.16B[24,8]); Rd_VPR128.16B[32,8] = MP_INT_ABS(Rn_VPR128.16B[32,8]); Rd_VPR128.16B[40,8] = MP_INT_ABS(Rn_VPR128.16B[40,8]); Rd_VPR128.16B[48,8] = MP_INT_ABS(Rn_VPR128.16B[48,8]); Rd_VPR128.16B[56,8] = MP_INT_ABS(Rn_VPR128.16B[56,8]); Rd_VPR128.16B[64,8] = MP_INT_ABS(Rn_VPR128.16B[64,8]); Rd_VPR128.16B[72,8] = MP_INT_ABS(Rn_VPR128.16B[72,8]); Rd_VPR128.16B[80,8] = MP_INT_ABS(Rn_VPR128.16B[80,8]); Rd_VPR128.16B[88,8] = MP_INT_ABS(Rn_VPR128.16B[88,8]); Rd_VPR128.16B[96,8] = MP_INT_ABS(Rn_VPR128.16B[96,8]); Rd_VPR128.16B[104,8] = MP_INT_ABS(Rn_VPR128.16B[104,8]); Rd_VPR128.16B[112,8] = MP_INT_ABS(Rn_VPR128.16B[112,8]); Rd_VPR128.16B[120,8] = MP_INT_ABS(Rn_VPR128.16B[120,8]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.1 ABS page C7-2017 line 117868 MATCH x0e20b800/mask=xbf3ffc00 # CONSTRUCT x0e60b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@2 # AUNIT --inst x0e60b800/mask=xfffffc00 --status pass # ABS Vector SIMD 4H when size = 01 , Q = 0 :abs Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000101110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { # simd unary Rd_VPR64.4H = MP_INT_ABS(Rn_VPR64.4H) on lane size 2 Rd_VPR64.4H[0,16] = MP_INT_ABS(Rn_VPR64.4H[0,16]); Rd_VPR64.4H[16,16] = MP_INT_ABS(Rn_VPR64.4H[16,16]); Rd_VPR64.4H[32,16] = MP_INT_ABS(Rn_VPR64.4H[32,16]); Rd_VPR64.4H[48,16] = MP_INT_ABS(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.1 ABS page C7-2017 line 117868 MATCH x0e20b800/mask=xbf3ffc00 # CONSTRUCT x4e60b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@2 # AUNIT --inst x4e60b800/mask=xfffffc00 --status pass # ABS Vector SIMD 8H when size = 01 , Q = 1 :abs Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000101110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { # simd unary Rd_VPR128.8H = MP_INT_ABS(Rn_VPR128.8H) on lane size 2 Rd_VPR128.8H[0,16] = MP_INT_ABS(Rn_VPR128.8H[0,16]); Rd_VPR128.8H[16,16] = MP_INT_ABS(Rn_VPR128.8H[16,16]); Rd_VPR128.8H[32,16] = MP_INT_ABS(Rn_VPR128.8H[32,16]); Rd_VPR128.8H[48,16] = MP_INT_ABS(Rn_VPR128.8H[48,16]); Rd_VPR128.8H[64,16] = MP_INT_ABS(Rn_VPR128.8H[64,16]); Rd_VPR128.8H[80,16] = MP_INT_ABS(Rn_VPR128.8H[80,16]); Rd_VPR128.8H[96,16] = MP_INT_ABS(Rn_VPR128.8H[96,16]); Rd_VPR128.8H[112,16] = MP_INT_ABS(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.1 ABS page C7-2017 line 117868 MATCH x0e20b800/mask=xbf3ffc00 # CONSTRUCT x0ea0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@4 # AUNIT --inst x0ea0b800/mask=xfffffc00 --status pass # ABS Vector SIMD 2S when size = 10 , Q = 0 :abs Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000101110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { # simd unary Rd_VPR64.2S = MP_INT_ABS(Rn_VPR64.2S) on lane size 4 Rd_VPR64.2S[0,32] = MP_INT_ABS(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[32,32] = MP_INT_ABS(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.1 ABS page C7-2017 line 117868 MATCH x0e20b800/mask=xbf3ffc00 # CONSTRUCT x4ea0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@4 # AUNIT --inst x4ea0b800/mask=xfffffc00 --status pass # ABS Vector SIMD 4S when size = 10 , Q = 1 :abs Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000101110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { # simd unary Rd_VPR128.4S = MP_INT_ABS(Rn_VPR128.4S) on lane size 4 Rd_VPR128.4S[0,32] = MP_INT_ABS(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[32,32] = MP_INT_ABS(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[64,32] = MP_INT_ABS(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[96,32] = MP_INT_ABS(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.1 ABS page C7-2017 line 117868 MATCH x0e20b800/mask=xbf3ffc00 # CONSTRUCT x4ee0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@8 # SMACRO(pseudo) ARG1 ARG2 =NEON_abs/1@8 # AUNIT --inst x4ee0b800/mask=xfffffc00 --status pass # ABS Vector SIMD 2D when size = 11 , Q = 1 :abs Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b11 & b_1021=0b100000101110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { # simd unary Rd_VPR128.2D = MP_INT_ABS(Rn_VPR128.2D) on lane size 8 Rd_VPR128.2D[0,64] = MP_INT_ABS(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[64,64] = MP_INT_ABS(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.2 ADD (vector) page C7-2019 line 118000 MATCH x5e208400/mask=xff20fc00 # CONSTRUCT x5ee08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =+ # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2 # AUNIT --inst x5ee08400/mask=xffe0fc00 --status pass :add Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x10 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = Rn_FPR64 + Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.2 ADD (vector) page C7-2019 line 118000 MATCH x0e208400/mask=xbf20fc00 # CONSTRUCT x4e208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(force-primitive) ARG1 ARG2 ARG3 =$+@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@1 # AUNIT --inst x4e208400/mask=xffe0fc00 --status pass :add Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x10 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix Rd_VPR128.16B = Rn_VPR128.16B + Rm_VPR128.16B on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] + Rm_VPR128.16B[0,8]; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] + Rm_VPR128.16B[8,8]; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] + Rm_VPR128.16B[16,8]; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] + Rm_VPR128.16B[24,8]; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] + Rm_VPR128.16B[32,8]; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] + Rm_VPR128.16B[40,8]; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] + Rm_VPR128.16B[48,8]; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] + Rm_VPR128.16B[56,8]; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] + Rm_VPR128.16B[64,8]; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] + Rm_VPR128.16B[72,8]; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] + Rm_VPR128.16B[80,8]; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] + Rm_VPR128.16B[88,8]; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] + Rm_VPR128.16B[96,8]; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] + Rm_VPR128.16B[104,8]; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] + Rm_VPR128.16B[112,8]; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] + Rm_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.2 ADD (vector) page C7-2019 line 118000 MATCH x0e208400/mask=xbf20fc00 # CONSTRUCT x4e608400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@2 # AUNIT --inst x4e608400/mask=xffe0fc00 --status pass :add Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd infix Rd_VPR128.8H = Rn_VPR128.8H + Rm_VPR128.8H on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + Rm_VPR128.8H[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + Rm_VPR128.8H[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + Rm_VPR128.8H[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + Rm_VPR128.8H[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + Rm_VPR128.8H[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + Rm_VPR128.8H[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + Rm_VPR128.8H[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.2 ADD (vector) page C7-2019 line 118000 MATCH x0e208400/mask=xbf20fc00 # CONSTRUCT x4ea08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(force-primitive) ARG1 ARG2 ARG3 =$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@4 # AUNIT --inst x4ea08400/mask=xffe0fc00 --status pass :add Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix Rd_VPR128.4S = Rn_VPR128.4S + Rm_VPR128.4S on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + Rm_VPR128.4S[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + Rm_VPR128.4S[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + Rm_VPR128.4S[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.2 ADD (vector) page C7-2019 line 118000 MATCH x0e208400/mask=xbf20fc00 # CONSTRUCT x4ee08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@8 # AUNIT --inst x4ee08400/mask=xffe0fc00 --status pass :add Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd infix Rd_VPR128.2D = Rn_VPR128.2D + Rm_VPR128.2D on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + Rm_VPR128.2D[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + Rm_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.2 ADD (vector) page C7-2019 line 118000 MATCH x0e208400/mask=xbf20fc00 # CONSTRUCT x0e208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$+@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@1 # AUNIT --inst x0e208400/mask=xffe0fc00 --status pass :add Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x10 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix Rd_VPR64.8B = Rn_VPR64.8B + Rm_VPR64.8B on lane size 1 Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] + Rm_VPR64.8B[0,8]; Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] + Rm_VPR64.8B[8,8]; Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] + Rm_VPR64.8B[16,8]; Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] + Rm_VPR64.8B[24,8]; Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] + Rm_VPR64.8B[32,8]; Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] + Rm_VPR64.8B[40,8]; Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] + Rm_VPR64.8B[48,8]; Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] + Rm_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.2 ADD (vector) page C7-2019 line 118000 MATCH x0e208400/mask=xbf20fc00 # CONSTRUCT x0e608400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@2 # AUNIT --inst x0e608400/mask=xffe0fc00 --status pass :add Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x10 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd infix Rd_VPR64.4H = Rn_VPR64.4H + Rm_VPR64.4H on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] + Rm_VPR64.4H[0,16]; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] + Rm_VPR64.4H[16,16]; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] + Rm_VPR64.4H[32,16]; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] + Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.2 ADD (vector) page C7-2019 line 118000 MATCH x0e208400/mask=xbf20fc00 # CONSTRUCT x0ea08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(force-primitive) ARG1 ARG2 ARG3 =$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_add/2@4 # AUNIT --inst x0ea08400/mask=xffe0fc00 --status pass :add Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x10 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix Rd_VPR64.2S = Rn_VPR64.2S + Rm_VPR64.2S on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] + Rm_VPR64.2S[0,32]; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] + Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.3 ADDHN, ADDHN2 page C7-2021 line 118138 MATCH x0e204000/mask=xbf20fc00 # CONSTRUCT x0ea04000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $+@8 &=$shuffle@1-0@3-1:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_addhn/3@8 # AUNIT --inst x0ea04000/mask=xffe0fc00 --status pass :addhn Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x4 & b_1011=0 & Rn_VPR128.2D & Rd_VPR64.2S & Rd_VPR128 & Zd { # simd infix TMPQ1 = Rn_VPR128.2D + Rm_VPR128.2D on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] + Rm_VPR128.2D[0,64]; TMPQ1[64,64] = Rn_VPR128.2D[64,64] + Rm_VPR128.2D[64,64]; # simd shuffle Rd_VPR64.2S = TMPQ1 (@1-0@3-1) lane size 4 Rd_VPR64.2S[0,32] = TMPQ1[32,32]; Rd_VPR64.2S[32,32] = TMPQ1[96,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.3 ADDHN, ADDHN2 page C7-2021 line 118138 MATCH x0e204000/mask=xbf20fc00 # CONSTRUCT x0e604000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $+@4 &=$shuffle@1-0@3-1@5-2@7-3:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_addhn/3@4 # AUNIT --inst x0e604000/mask=xffe0fc00 --status pass :addhn Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x4 & b_1011=0 & Rn_VPR128.4S & Rd_VPR64.4H & Rd_VPR128 & Zd { # simd infix TMPQ1 = Rn_VPR128.4S + Rm_VPR128.4S on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] + Rm_VPR128.4S[0,32]; TMPQ1[32,32] = Rn_VPR128.4S[32,32] + Rm_VPR128.4S[32,32]; TMPQ1[64,32] = Rn_VPR128.4S[64,32] + Rm_VPR128.4S[64,32]; TMPQ1[96,32] = Rn_VPR128.4S[96,32] + Rm_VPR128.4S[96,32]; # simd shuffle Rd_VPR64.4H = TMPQ1 (@1-0@3-1@5-2@7-3) lane size 2 Rd_VPR64.4H[0,16] = TMPQ1[16,16]; Rd_VPR64.4H[16,16] = TMPQ1[48,16]; Rd_VPR64.4H[32,16] = TMPQ1[80,16]; Rd_VPR64.4H[48,16] = TMPQ1[112,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.3 ADDHN, ADDHN2 page C7-2021 line 118138 MATCH x0e204000/mask=xbf20fc00 # CONSTRUCT x0e204000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $+@2 &=$shuffle@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_addhn/3@2 # AUNIT --inst x0e204000/mask=xffe0fc00 --status pass :addhn Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x4 & b_1011=0 & Rn_VPR128.8H & Rd_VPR64.8B & Rd_VPR128 & Zd { # simd infix TMPQ1 = Rn_VPR128.8H + Rm_VPR128.8H on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] + Rm_VPR128.8H[0,16]; TMPQ1[16,16] = Rn_VPR128.8H[16,16] + Rm_VPR128.8H[16,16]; TMPQ1[32,16] = Rn_VPR128.8H[32,16] + Rm_VPR128.8H[32,16]; TMPQ1[48,16] = Rn_VPR128.8H[48,16] + Rm_VPR128.8H[48,16]; TMPQ1[64,16] = Rn_VPR128.8H[64,16] + Rm_VPR128.8H[64,16]; TMPQ1[80,16] = Rn_VPR128.8H[80,16] + Rm_VPR128.8H[80,16]; TMPQ1[96,16] = Rn_VPR128.8H[96,16] + Rm_VPR128.8H[96,16]; TMPQ1[112,16] = Rn_VPR128.8H[112,16] + Rm_VPR128.8H[112,16]; # simd shuffle Rd_VPR64.8B = TMPQ1 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 1 Rd_VPR64.8B[0,8] = TMPQ1[8,8]; Rd_VPR64.8B[8,8] = TMPQ1[24,8]; Rd_VPR64.8B[16,8] = TMPQ1[40,8]; Rd_VPR64.8B[24,8] = TMPQ1[56,8]; Rd_VPR64.8B[32,8] = TMPQ1[72,8]; Rd_VPR64.8B[40,8] = TMPQ1[88,8]; Rd_VPR64.8B[48,8] = TMPQ1[104,8]; Rd_VPR64.8B[56,8] = TMPQ1[120,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.3 ADDHN, ADDHN2 page C7-2021 line 118138 MATCH x0e204000/mask=xbf20fc00 # CONSTRUCT x4e204000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $+@2 &=$shuffle@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_addhn2/3@2 # AUNIT --inst x4e204000/mask=xffe0fc00 --status pass :addhn2 Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x4 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.16B & Rd_VPR128 & Zd { # simd infix TMPQ1 = Rn_VPR128.8H + Rm_VPR128.8H on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] + Rm_VPR128.8H[0,16]; TMPQ1[16,16] = Rn_VPR128.8H[16,16] + Rm_VPR128.8H[16,16]; TMPQ1[32,16] = Rn_VPR128.8H[32,16] + Rm_VPR128.8H[32,16]; TMPQ1[48,16] = Rn_VPR128.8H[48,16] + Rm_VPR128.8H[48,16]; TMPQ1[64,16] = Rn_VPR128.8H[64,16] + Rm_VPR128.8H[64,16]; TMPQ1[80,16] = Rn_VPR128.8H[80,16] + Rm_VPR128.8H[80,16]; TMPQ1[96,16] = Rn_VPR128.8H[96,16] + Rm_VPR128.8H[96,16]; TMPQ1[112,16] = Rn_VPR128.8H[112,16] + Rm_VPR128.8H[112,16]; # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15) lane size 1 Rd_VPR128.16B[64,8] = TMPQ1[8,8]; Rd_VPR128.16B[72,8] = TMPQ1[24,8]; Rd_VPR128.16B[80,8] = TMPQ1[40,8]; Rd_VPR128.16B[88,8] = TMPQ1[56,8]; Rd_VPR128.16B[96,8] = TMPQ1[72,8]; Rd_VPR128.16B[104,8] = TMPQ1[88,8]; Rd_VPR128.16B[112,8] = TMPQ1[104,8]; Rd_VPR128.16B[120,8] = TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.3 ADDHN, ADDHN2 page C7-2021 line 118138 MATCH x0e204000/mask=xbf20fc00 # CONSTRUCT x4ea04000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $+@8 &=$shuffle@1-2@3-3:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_addhn2/3@8 # AUNIT --inst x4ea04000/mask=xffe0fc00 --status pass :addhn2 Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x4 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.4S & Rd_VPR128 & Zd { # simd infix TMPQ1 = Rn_VPR128.2D + Rm_VPR128.2D on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] + Rm_VPR128.2D[0,64]; TMPQ1[64,64] = Rn_VPR128.2D[64,64] + Rm_VPR128.2D[64,64]; # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-2@3-3) lane size 4 Rd_VPR128.4S[64,32] = TMPQ1[32,32]; Rd_VPR128.4S[96,32] = TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.3 ADDHN, ADDHN2 page C7-2021 line 118138 MATCH x0e204000/mask=xbf20fc00 # CONSTRUCT x4e604000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $+@4 &=$shuffle@1-4@3-5@5-6@7-7:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_addhn2/3@4 # AUNIT --inst x4e604000/mask=xffe0fc00 --status pass :addhn2 Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x4 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.8H & Rd_VPR128 & Zd { # simd infix TMPQ1 = Rn_VPR128.4S + Rm_VPR128.4S on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] + Rm_VPR128.4S[0,32]; TMPQ1[32,32] = Rn_VPR128.4S[32,32] + Rm_VPR128.4S[32,32]; TMPQ1[64,32] = Rn_VPR128.4S[64,32] + Rm_VPR128.4S[64,32]; TMPQ1[96,32] = Rn_VPR128.4S[96,32] + Rm_VPR128.4S[96,32]; # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-4@3-5@5-6@7-7) lane size 2 Rd_VPR128.8H[64,16] = TMPQ1[16,16]; Rd_VPR128.8H[80,16] = TMPQ1[48,16]; Rd_VPR128.8H[96,16] = TMPQ1[80,16]; Rd_VPR128.8H[112,16] = TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.4 ADDP (scalar) page C7-2023 line 118265 MATCH x5e31b800/mask=xff3ffc00 # CONSTRUCT x5ef1b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =#+ # SMACRO(pseudo) ARG1 ARG2 =NEON_addp/1@8 # AUNIT --inst x5ef1b800/mask=xfffffc00 --status pass :addp Rd_FPR64, Rn_VPR128.2D is b_3031=1 & u=0 & b_2428=0x1e & b_23=1 & b_1722=0x38 & b_1216=0x1b & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd { # sipd infix Rd_FPR64 = +(Rn_VPR128.2D) on pairs lane size (8 to 8) local tmp1 = Rn_VPR128.2D[0,64]; local tmp2 = Rn_VPR128.2D[64,64]; Rd_FPR64 = tmp1 + tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.5 ADDP (vector) page C7-2025 line 118351 MATCH x0e20bc00/mask=xbf20fc00 # CONSTRUCT x4e20bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 ARG3 =#+/2 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@1 # AUNIT --inst x4e20bc00/mask=xffe0fc00 --status pass :addp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x17 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.16B,Rm_VPR128.16B) on pairs lane size (1 to 1) local tmp2 = Rn_VPR128.16B[0,8]; local tmp3 = Rn_VPR128.16B[8,8]; TMPQ1[0,8] = tmp2 + tmp3; tmp2 = Rn_VPR128.16B[16,8]; tmp3 = Rn_VPR128.16B[24,8]; TMPQ1[8,8] = tmp2 + tmp3; tmp2 = Rn_VPR128.16B[32,8]; tmp3 = Rn_VPR128.16B[40,8]; TMPQ1[16,8] = tmp2 + tmp3; tmp2 = Rn_VPR128.16B[48,8]; tmp3 = Rn_VPR128.16B[56,8]; TMPQ1[24,8] = tmp2 + tmp3; tmp2 = Rn_VPR128.16B[64,8]; tmp3 = Rn_VPR128.16B[72,8]; TMPQ1[32,8] = tmp2 + tmp3; tmp2 = Rn_VPR128.16B[80,8]; tmp3 = Rn_VPR128.16B[88,8]; TMPQ1[40,8] = tmp2 + tmp3; tmp2 = Rn_VPR128.16B[96,8]; tmp3 = Rn_VPR128.16B[104,8]; TMPQ1[48,8] = tmp2 + tmp3; tmp2 = Rn_VPR128.16B[112,8]; tmp3 = Rn_VPR128.16B[120,8]; TMPQ1[56,8] = tmp2 + tmp3; tmp2 = Rm_VPR128.16B[0,8]; tmp3 = Rm_VPR128.16B[8,8]; TMPQ1[64,8] = tmp2 + tmp3; tmp2 = Rm_VPR128.16B[16,8]; tmp3 = Rm_VPR128.16B[24,8]; TMPQ1[72,8] = tmp2 + tmp3; tmp2 = Rm_VPR128.16B[32,8]; tmp3 = Rm_VPR128.16B[40,8]; TMPQ1[80,8] = tmp2 + tmp3; tmp2 = Rm_VPR128.16B[48,8]; tmp3 = Rm_VPR128.16B[56,8]; TMPQ1[88,8] = tmp2 + tmp3; tmp2 = Rm_VPR128.16B[64,8]; tmp3 = Rm_VPR128.16B[72,8]; TMPQ1[96,8] = tmp2 + tmp3; tmp2 = Rm_VPR128.16B[80,8]; tmp3 = Rm_VPR128.16B[88,8]; TMPQ1[104,8] = tmp2 + tmp3; tmp2 = Rm_VPR128.16B[96,8]; tmp3 = Rm_VPR128.16B[104,8]; TMPQ1[112,8] = tmp2 + tmp3; tmp2 = Rm_VPR128.16B[112,8]; tmp3 = Rm_VPR128.16B[120,8]; TMPQ1[120,8] = tmp2 + tmp3; Rd_VPR128.16B = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.5 ADDP (vector) page C7-2025 line 118351 MATCH x0e20bc00/mask=xbf20fc00 # CONSTRUCT x4ee0bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 ARG3 =#+/2 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@8 # AUNIT --inst x4ee0bc00/mask=xffe0fc00 --status pass :addp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x17 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.2D,Rm_VPR128.2D) on pairs lane size (8 to 8) local tmp2 = Rn_VPR128.2D[0,64]; local tmp3 = Rn_VPR128.2D[64,64]; TMPQ1[0,64] = tmp2 + tmp3; tmp2 = Rm_VPR128.2D[0,64]; tmp3 = Rm_VPR128.2D[64,64]; TMPQ1[64,64] = tmp2 + tmp3; Rd_VPR128.2D = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.5 ADDP (vector) page C7-2025 line 118351 MATCH x0e20bc00/mask=xbf20fc00 # CONSTRUCT x0ea0bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:8 ARG2 ARG3 =#+/2 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@4 # AUNIT --inst x0ea0bc00/mask=xffe0fc00 --status pass :addp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x17 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.2S,Rm_VPR64.2S) on pairs lane size (4 to 4) local tmp2 = Rn_VPR64.2S[0,32]; local tmp3 = Rn_VPR64.2S[32,32]; TMPD1[0,32] = tmp2 + tmp3; tmp2 = Rm_VPR64.2S[0,32]; tmp3 = Rm_VPR64.2S[32,32]; TMPD1[32,32] = tmp2 + tmp3; Rd_VPR64.2S = TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.5 ADDP (vector) page C7-2025 line 118351 MATCH x0e20bc00/mask=xbf20fc00 # CONSTRUCT x0e60bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:8 ARG2 ARG3 =#+/2 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@2 # AUNIT --inst x0e60bc00/mask=xffe0fc00 --status pass :addp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x17 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.4H,Rm_VPR64.4H) on pairs lane size (2 to 2) local tmp2 = Rn_VPR64.4H[0,16]; local tmp3 = Rn_VPR64.4H[16,16]; TMPD1[0,16] = tmp2 + tmp3; tmp2 = Rn_VPR64.4H[32,16]; tmp3 = Rn_VPR64.4H[48,16]; TMPD1[16,16] = tmp2 + tmp3; tmp2 = Rm_VPR64.4H[0,16]; tmp3 = Rm_VPR64.4H[16,16]; TMPD1[32,16] = tmp2 + tmp3; tmp2 = Rm_VPR64.4H[32,16]; tmp3 = Rm_VPR64.4H[48,16]; TMPD1[48,16] = tmp2 + tmp3; Rd_VPR64.4H = TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.5 ADDP (vector) page C7-2025 line 118351 MATCH x0e20bc00/mask=xbf20fc00 # CONSTRUCT x4ea0bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 ARG3 =#+/2 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@4 # AUNIT --inst x4ea0bc00/mask=xffe0fc00 --status pass :addp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x17 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.4S,Rm_VPR128.4S) on pairs lane size (4 to 4) local tmp2 = Rn_VPR128.4S[0,32]; local tmp3 = Rn_VPR128.4S[32,32]; TMPQ1[0,32] = tmp2 + tmp3; tmp2 = Rn_VPR128.4S[64,32]; tmp3 = Rn_VPR128.4S[96,32]; TMPQ1[32,32] = tmp2 + tmp3; tmp2 = Rm_VPR128.4S[0,32]; tmp3 = Rm_VPR128.4S[32,32]; TMPQ1[64,32] = tmp2 + tmp3; tmp2 = Rm_VPR128.4S[64,32]; tmp3 = Rm_VPR128.4S[96,32]; TMPQ1[96,32] = tmp2 + tmp3; Rd_VPR128.4S = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.5 ADDP (vector) page C7-2025 line 118351 MATCH x0e20bc00/mask=xbf20fc00 # CONSTRUCT x0e20bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:8 ARG2 ARG3 =#+/2 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@1 # AUNIT --inst x0e20bc00/mask=xffe0fc00 --status pass :addp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x17 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.8B,Rm_VPR64.8B) on pairs lane size (1 to 1) local tmp2 = Rn_VPR64.8B[0,8]; local tmp3 = Rn_VPR64.8B[8,8]; TMPD1[0,8] = tmp2 + tmp3; tmp2 = Rn_VPR64.8B[16,8]; tmp3 = Rn_VPR64.8B[24,8]; TMPD1[8,8] = tmp2 + tmp3; tmp2 = Rn_VPR64.8B[32,8]; tmp3 = Rn_VPR64.8B[40,8]; TMPD1[16,8] = tmp2 + tmp3; tmp2 = Rn_VPR64.8B[48,8]; tmp3 = Rn_VPR64.8B[56,8]; TMPD1[24,8] = tmp2 + tmp3; tmp2 = Rm_VPR64.8B[0,8]; tmp3 = Rm_VPR64.8B[8,8]; TMPD1[32,8] = tmp2 + tmp3; tmp2 = Rm_VPR64.8B[16,8]; tmp3 = Rm_VPR64.8B[24,8]; TMPD1[40,8] = tmp2 + tmp3; tmp2 = Rm_VPR64.8B[32,8]; tmp3 = Rm_VPR64.8B[40,8]; TMPD1[48,8] = tmp2 + tmp3; tmp2 = Rm_VPR64.8B[48,8]; tmp3 = Rm_VPR64.8B[56,8]; TMPD1[56,8] = tmp2 + tmp3; Rd_VPR64.8B = TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.5 ADDP (vector) page C7-2025 line 118351 MATCH x0e20bc00/mask=xbf20fc00 # CONSTRUCT x4e60bc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 ARG3 =#+/2 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_addp/2@2 # AUNIT --inst x4e60bc00/mask=xffe0fc00 --status pass :addp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x17 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.8H,Rm_VPR128.8H) on pairs lane size (2 to 2) local tmp2 = Rn_VPR128.8H[0,16]; local tmp3 = Rn_VPR128.8H[16,16]; TMPQ1[0,16] = tmp2 + tmp3; tmp2 = Rn_VPR128.8H[32,16]; tmp3 = Rn_VPR128.8H[48,16]; TMPQ1[16,16] = tmp2 + tmp3; tmp2 = Rn_VPR128.8H[64,16]; tmp3 = Rn_VPR128.8H[80,16]; TMPQ1[32,16] = tmp2 + tmp3; tmp2 = Rn_VPR128.8H[96,16]; tmp3 = Rn_VPR128.8H[112,16]; TMPQ1[48,16] = tmp2 + tmp3; tmp2 = Rm_VPR128.8H[0,16]; tmp3 = Rm_VPR128.8H[16,16]; TMPQ1[64,16] = tmp2 + tmp3; tmp2 = Rm_VPR128.8H[32,16]; tmp3 = Rm_VPR128.8H[48,16]; TMPQ1[80,16] = tmp2 + tmp3; tmp2 = Rm_VPR128.8H[64,16]; tmp3 = Rm_VPR128.8H[80,16]; TMPQ1[96,16] = tmp2 + tmp3; tmp2 = Rm_VPR128.8H[96,16]; tmp3 = Rm_VPR128.8H[112,16]; TMPQ1[112,16] = tmp2 + tmp3; Rd_VPR128.8H = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.6 ADDV page C7-2027 line 118452 MATCH x0e31b800/mask=xbf3ffc00 # CONSTRUCT x4e31b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_addv/1@1 # AUNIT --inst x4e31b800/mask=xfffffc00 --status nopcodeop :addv Rd_FPR8, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd { Rd_FPR8 = Rn_VPR128.16B[0,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[8,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[16,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[24,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[32,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[40,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[48,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[56,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[64,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[72,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[80,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[88,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[96,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[104,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[112,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 31 bytes of Zd } # C7.2.6 ADDV page C7-2027 line 118452 MATCH x0e31b800/mask=xbf3ffc00 # CONSTRUCT x0e31b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_addv/1@1 # AUNIT --inst x0e31b800/mask=xfffffc00 --status nopcodeop :addv Rd_FPR8, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd { Rd_FPR8 = Rn_VPR64.8B[0,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR64.8B[8,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR64.8B[16,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR64.8B[24,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR64.8B[32,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR64.8B[40,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR64.8B[48,8]; Rd_FPR8 = Rd_FPR8 + Rn_VPR64.8B[56,8]; zext_zq(Zd); # zero upper 31 bytes of Zd } # C7.2.6 ADDV page C7-2027 line 118452 MATCH x0e31b800/mask=xbf3ffc00 # CONSTRUCT x0e71b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_addv/1@2 # AUNIT --inst x0e71b800/mask=xfffffc00 --status nopcodeop :addv Rd_FPR16, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd { Rd_FPR16 = Rn_VPR64.4H[0,16]; Rd_FPR16 = Rd_FPR16 + Rn_VPR64.4H[16,16]; Rd_FPR16 = Rd_FPR16 + Rn_VPR64.4H[32,16]; Rd_FPR16 = Rd_FPR16 + Rn_VPR64.4H[48,16]; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.6 ADDV page C7-2027 line 118452 MATCH x0e31b800/mask=xbf3ffc00 # CONSTRUCT x4e71b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_addv/1@2 # AUNIT --inst x4e71b800/mask=xfffffc00 --status nopcodeop :addv Rd_FPR16, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd { Rd_FPR16 = Rn_VPR128.8H[0,16]; Rd_FPR16 = Rd_FPR16 + Rn_VPR128.8H[16,16]; Rd_FPR16 = Rd_FPR16 + Rn_VPR128.8H[32,16]; Rd_FPR16 = Rd_FPR16 + Rn_VPR128.8H[48,16]; Rd_FPR16 = Rd_FPR16 + Rn_VPR128.8H[64,16]; Rd_FPR16 = Rd_FPR16 + Rn_VPR128.8H[80,16]; Rd_FPR16 = Rd_FPR16 + Rn_VPR128.8H[96,16]; Rd_FPR16 = Rd_FPR16 + Rn_VPR128.8H[112,16]; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.6 ADDV page C7-2027 line 118452 MATCH x0e31b800/mask=xbf3ffc00 # CONSTRUCT x4eb1b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(force-primitive) ARG1 ARG2[0]:4 ARG2[1]:4 + ARG2[2]:4 ARG2[3]:4 + =+ # SMACRO(pseudo) ARG1 ARG2 =NEON_addv/1@4 # AUNIT --inst x4eb1b800/mask=xfffffc00 --status pass :addv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x1b & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { local tmp1:4 = Rn_VPR128.4S[0,32]; local tmp2:4 = Rn_VPR128.4S[32,32]; local tmp3:4 = tmp1 + tmp2; local tmp4:4 = Rn_VPR128.4S[64,32]; local tmp5:4 = Rn_VPR128.4S[96,32]; local tmp6:4 = tmp4 + tmp5; Rd_FPR32 = tmp3 + tmp6; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.7 AESD page C7-2029 line 118544 MATCH x4e285800/mask=xfffffc00 # CONSTRUCT x4e285800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_aesd/2 # AUNIT --inst x4e285800/mask=xfffffc00 --status noqemu :aesd Rd_VPR128.16B, Rn_VPR128.16B is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=5 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_aesd(Rd_VPR128.16B, Rn_VPR128.16B); } # C7.2.8 AESE page C7-2030 line 118606 MATCH x4e284800/mask=xfffffc00 # CONSTRUCT x4e284800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_aese/2 # AUNIT --inst x4e284800/mask=xfffffc00 --status noqemu :aese Rd_VPR128.16B, Rn_VPR128.16B is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=4 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_aese(Rd_VPR128.16B, Rn_VPR128.16B); } # C7.2.9 AESIMC page C7-2031 line 118669 MATCH x4e287800/mask=xfffffc00 # CONSTRUCT x4e287800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_aesimc/2 # AUNIT --inst x4e287800/mask=xfffffc00 --status noqemu :aesimc Rd_VPR128.16B, Rn_VPR128.16B is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=7 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Rd_VPR128 & Zd { Rd_VPR128.16B = NEON_aesimc(Rd_VPR128.16B, Rn_VPR128.16B); } # C7.2.10 AESMC page C7-2032 line 118729 MATCH x4e286800/mask=xfffffc00 # CONSTRUCT x4e286800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_aesmc/2 # AUNIT --inst x4e286800/mask=xfffffc00 --status noqemu :aesmc Rd_VPR128.16B, Rn_VPR128.16B is b_2431=0b01001110 & b_2223=0b00 & b_1721=0b10100 & b_1216=6 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Rd_VPR128 & Zd { Rd_VPR128.16B = NEON_aesmc(Rd_VPR128.16B, Rn_VPR128.16B); } # C7.2.11 AND (vector) page C7-2033 line 118789 MATCH x0e201c00/mask=xbfe0fc00 # CONSTRUCT x4e201c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$&@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_and/2@1 # AUNIT --inst x4e201c00/mask=xffe0fc00 --status pass :and Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix Rd_VPR128.16B = Rn_VPR128.16B & Rm_VPR128.16B on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] & Rm_VPR128.16B[0,8]; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] & Rm_VPR128.16B[8,8]; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] & Rm_VPR128.16B[16,8]; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] & Rm_VPR128.16B[24,8]; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] & Rm_VPR128.16B[32,8]; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] & Rm_VPR128.16B[40,8]; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] & Rm_VPR128.16B[48,8]; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] & Rm_VPR128.16B[56,8]; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] & Rm_VPR128.16B[64,8]; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] & Rm_VPR128.16B[72,8]; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] & Rm_VPR128.16B[80,8]; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] & Rm_VPR128.16B[88,8]; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] & Rm_VPR128.16B[96,8]; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] & Rm_VPR128.16B[104,8]; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] & Rm_VPR128.16B[112,8]; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] & Rm_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.11 AND (vector) page C7-2033 line 118789 MATCH x0e201c00/mask=xbfe0fc00 # CONSTRUCT x0e201c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =& # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_and/2@1 # AUNIT --inst x0e201c00/mask=xffe0fc00 --status pass :and Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = Rn_VPR64.8B & Rm_VPR64.8B; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.12 BCAX page C7-2035 line 118871 MATCH xce200000/mask=xffe08000 # CONSTRUCT xce200000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 ARG4 $~@1 $&@1 =$|@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG3 =NEON_bcax/3@1 # AUNIT --inst xce200000/mask=xffe08000 --status noqemu :bcax Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, Ra_VPR128.16B is b_2131=0b11001110001 & b_15=0 & Rd_VPR128.16B & Rn_VPR128.16B & Rm_VPR128.16B & Ra_VPR128.16B & Zd { # simd unary TMPQ1 = ~(Ra_VPR128.16B) on lane size 1 TMPQ1[0,8] = ~(Ra_VPR128.16B[0,8]); TMPQ1[8,8] = ~(Ra_VPR128.16B[8,8]); TMPQ1[16,8] = ~(Ra_VPR128.16B[16,8]); TMPQ1[24,8] = ~(Ra_VPR128.16B[24,8]); TMPQ1[32,8] = ~(Ra_VPR128.16B[32,8]); TMPQ1[40,8] = ~(Ra_VPR128.16B[40,8]); TMPQ1[48,8] = ~(Ra_VPR128.16B[48,8]); TMPQ1[56,8] = ~(Ra_VPR128.16B[56,8]); TMPQ1[64,8] = ~(Ra_VPR128.16B[64,8]); TMPQ1[72,8] = ~(Ra_VPR128.16B[72,8]); TMPQ1[80,8] = ~(Ra_VPR128.16B[80,8]); TMPQ1[88,8] = ~(Ra_VPR128.16B[88,8]); TMPQ1[96,8] = ~(Ra_VPR128.16B[96,8]); TMPQ1[104,8] = ~(Ra_VPR128.16B[104,8]); TMPQ1[112,8] = ~(Ra_VPR128.16B[112,8]); TMPQ1[120,8] = ~(Ra_VPR128.16B[120,8]); # simd infix TMPQ2 = Rm_VPR128.16B & TMPQ1 on lane size 1 TMPQ2[0,8] = Rm_VPR128.16B[0,8] & TMPQ1[0,8]; TMPQ2[8,8] = Rm_VPR128.16B[8,8] & TMPQ1[8,8]; TMPQ2[16,8] = Rm_VPR128.16B[16,8] & TMPQ1[16,8]; TMPQ2[24,8] = Rm_VPR128.16B[24,8] & TMPQ1[24,8]; TMPQ2[32,8] = Rm_VPR128.16B[32,8] & TMPQ1[32,8]; TMPQ2[40,8] = Rm_VPR128.16B[40,8] & TMPQ1[40,8]; TMPQ2[48,8] = Rm_VPR128.16B[48,8] & TMPQ1[48,8]; TMPQ2[56,8] = Rm_VPR128.16B[56,8] & TMPQ1[56,8]; TMPQ2[64,8] = Rm_VPR128.16B[64,8] & TMPQ1[64,8]; TMPQ2[72,8] = Rm_VPR128.16B[72,8] & TMPQ1[72,8]; TMPQ2[80,8] = Rm_VPR128.16B[80,8] & TMPQ1[80,8]; TMPQ2[88,8] = Rm_VPR128.16B[88,8] & TMPQ1[88,8]; TMPQ2[96,8] = Rm_VPR128.16B[96,8] & TMPQ1[96,8]; TMPQ2[104,8] = Rm_VPR128.16B[104,8] & TMPQ1[104,8]; TMPQ2[112,8] = Rm_VPR128.16B[112,8] & TMPQ1[112,8]; TMPQ2[120,8] = Rm_VPR128.16B[120,8] & TMPQ1[120,8]; # simd infix Rd_VPR128.16B = Rn_VPR128.16B | TMPQ2 on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] | TMPQ2[0,8]; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] | TMPQ2[8,8]; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] | TMPQ2[16,8]; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] | TMPQ2[24,8]; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] | TMPQ2[32,8]; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] | TMPQ2[40,8]; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] | TMPQ2[48,8]; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] | TMPQ2[56,8]; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] | TMPQ2[64,8]; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] | TMPQ2[72,8]; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] | TMPQ2[80,8]; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] | TMPQ2[88,8]; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] | TMPQ2[96,8]; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] | TMPQ2[104,8]; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] | TMPQ2[112,8]; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] | TMPQ2[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.20 BIC (vector, immediate) page C7-2048 line 119572 MATCH x2f001400/mask=xbff81c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # C7.2.258 SLI page C7-2591 line 151468 MATCH x2f005400/mask=xbf80fc00 # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x2f007400/mask=xbf80fc00 # C7.2.387 URSRA page C7-2904 line 169558 MATCH x2f003400/mask=xbf80fc00 # C7.2.395 USRA page C7-2922 line 170519 MATCH x2f001400/mask=xbf80fc00 # CONSTRUCT x2f001400/mask=xfff89c00 MATCHED 7 DOCUMENTED OPCODES # SMACRO ARG1 Imm_neon_uimm8Shift:4 ~ &=$& # SMACRO(pseudo) ARG1 Imm_neon_uimm8Shift:4 &=NEON_bic/2@4 # AUNIT --inst x2f001400/mask=xfff89c00 --status pass :bic Rd_VPR64.2S, abcdefgh is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & b_1515=0 & abcdefgh & Imm_neon_uimm8Shift & b_1012=5 & Rd_VPR64.2S & Zd { local tmp1:4 = ~ Imm_neon_uimm8Shift:4; # simd infix Rd_VPR64.2S = Rd_VPR64.2S & tmp1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] & tmp1; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] & tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.20 BIC (vector, immediate) page C7-2048 line 119572 MATCH x2f001400/mask=xbff81c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # C7.2.379 UQSHRN, UQSHRN2 page C7-2887 line 168584 MATCH x2f009400/mask=xbf80fc00 # CONSTRUCT x2f009400/mask=xfff8dc00 MATCHED 4 DOCUMENTED OPCODES # SMACRO ARG1 Imm_neon_uimm8Shift:2 ~ &=$& # SMACRO(pseudo) ARG1 Imm_neon_uimm8Shift:2 &=NEON_bic/2@2 # AUNIT --inst x2f009400/mask=xfff8dc00 --status pass :bic Rd_VPR64.4H, abcdefgh is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & abcdefgh & b_1923=0x0 & b_1415=2 & Imm_neon_uimm8Shift & b_1012=5 & Rd_VPR64.4H & Zd { local tmp1:2 = ~ Imm_neon_uimm8Shift:2; # simd infix Rd_VPR64.4H = Rd_VPR64.4H & tmp1 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] & tmp1; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] & tmp1; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] & tmp1; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] & tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.20 BIC (vector, immediate) page C7-2048 line 119572 MATCH x2f001400/mask=xbff81c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # C7.2.258 SLI page C7-2591 line 151468 MATCH x2f005400/mask=xbf80fc00 # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x2f007400/mask=xbf80fc00 # C7.2.387 URSRA page C7-2904 line 169558 MATCH x2f003400/mask=xbf80fc00 # C7.2.395 USRA page C7-2922 line 170519 MATCH x2f001400/mask=xbf80fc00 # CONSTRUCT x6f001400/mask=xfff89c00 MATCHED 7 DOCUMENTED OPCODES # SMACRO ARG1 Imm_neon_uimm8Shift:4 ~ &=$& # SMACRO(pseudo) ARG1 Imm_neon_uimm8Shift:4 &=NEON_bic/2@4 # AUNIT --inst x6f001400/mask=xfff89c00 --status pass :bic Rd_VPR128.4S, abcdefgh is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & b_1515=0 & abcdefgh & Imm_neon_uimm8Shift & b_1012=5 & Rd_VPR128.4S & Zd { local tmp1:4 = ~ Imm_neon_uimm8Shift:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S & tmp1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] & tmp1; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] & tmp1; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] & tmp1; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] & tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.20 BIC (vector, immediate) page C7-2048 line 119572 MATCH x2f001400/mask=xbff81c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # C7.2.379 UQSHRN, UQSHRN2 page C7-2887 line 168584 MATCH x2f009400/mask=xbf80fc00 # CONSTRUCT x6f009400/mask=xfff8dc00 MATCHED 4 DOCUMENTED OPCODES # SMACRO ARG1 Imm_neon_uimm8Shift:2 ~ &=$& # SMACRO(pseudo) ARG1 Imm_neon_uimm8Shift:2 &=NEON_bic/2@2 # AUNIT --inst x6f009400/mask=xfff8dc00 --status pass :bic Rd_VPR128.8H, abcdefgh is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & abcdefgh & b_1415=2 & Imm_neon_uimm8Shift & b_1012=5 & Rd_VPR128.8H & Zd { local tmp1:2 = ~ Imm_neon_uimm8Shift:2; # simd infix Rd_VPR128.8H = Rd_VPR128.8H & tmp1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] & tmp1; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] & tmp1; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] & tmp1; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] & tmp1; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] & tmp1; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] & tmp1; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] & tmp1; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] & tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.21 BIC (vector, register) page C7-2050 line 119707 MATCH x0e601c00/mask=xbfe0fc00 # CONSTRUCT x4e601c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $~@1 =$&@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_bic/2@1 # AUNIT --inst x4e601c00/mask=xffe0fc00 --status pass :bic Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd unary TMPQ1 = ~(Rm_VPR128.16B) on lane size 1 TMPQ1[0,8] = ~(Rm_VPR128.16B[0,8]); TMPQ1[8,8] = ~(Rm_VPR128.16B[8,8]); TMPQ1[16,8] = ~(Rm_VPR128.16B[16,8]); TMPQ1[24,8] = ~(Rm_VPR128.16B[24,8]); TMPQ1[32,8] = ~(Rm_VPR128.16B[32,8]); TMPQ1[40,8] = ~(Rm_VPR128.16B[40,8]); TMPQ1[48,8] = ~(Rm_VPR128.16B[48,8]); TMPQ1[56,8] = ~(Rm_VPR128.16B[56,8]); TMPQ1[64,8] = ~(Rm_VPR128.16B[64,8]); TMPQ1[72,8] = ~(Rm_VPR128.16B[72,8]); TMPQ1[80,8] = ~(Rm_VPR128.16B[80,8]); TMPQ1[88,8] = ~(Rm_VPR128.16B[88,8]); TMPQ1[96,8] = ~(Rm_VPR128.16B[96,8]); TMPQ1[104,8] = ~(Rm_VPR128.16B[104,8]); TMPQ1[112,8] = ~(Rm_VPR128.16B[112,8]); TMPQ1[120,8] = ~(Rm_VPR128.16B[120,8]); # simd infix Rd_VPR128.16B = Rn_VPR128.16B & TMPQ1 on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] & TMPQ1[0,8]; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] & TMPQ1[8,8]; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] & TMPQ1[16,8]; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] & TMPQ1[24,8]; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] & TMPQ1[32,8]; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] & TMPQ1[40,8]; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] & TMPQ1[48,8]; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] & TMPQ1[56,8]; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] & TMPQ1[64,8]; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] & TMPQ1[72,8]; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] & TMPQ1[80,8]; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] & TMPQ1[88,8]; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] & TMPQ1[96,8]; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] & TMPQ1[104,8]; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] & TMPQ1[112,8]; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] & TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.21 BIC (vector, register) page C7-2050 line 119707 MATCH x0e601c00/mask=xbfe0fc00 # CONSTRUCT x0e601c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $~@1 =$&@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_bic/2@1 # AUNIT --inst x0e601c00/mask=xffe0fc00 --status pass :bic Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd unary TMPD1 = ~(Rm_VPR64.8B) on lane size 1 TMPD1[0,8] = ~(Rm_VPR64.8B[0,8]); TMPD1[8,8] = ~(Rm_VPR64.8B[8,8]); TMPD1[16,8] = ~(Rm_VPR64.8B[16,8]); TMPD1[24,8] = ~(Rm_VPR64.8B[24,8]); TMPD1[32,8] = ~(Rm_VPR64.8B[32,8]); TMPD1[40,8] = ~(Rm_VPR64.8B[40,8]); TMPD1[48,8] = ~(Rm_VPR64.8B[48,8]); TMPD1[56,8] = ~(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR64.8B = Rn_VPR64.8B & TMPD1 on lane size 1 Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] & TMPD1[0,8]; Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] & TMPD1[8,8]; Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] & TMPD1[16,8]; Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] & TMPD1[24,8]; Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] & TMPD1[32,8]; Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] & TMPD1[40,8]; Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] & TMPD1[48,8]; Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] & TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.22 BIF page C7-2052 line 119791 MATCH x2ee01c00/mask=xbfe0fc00 # CONSTRUCT x6ee01c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_bif/3@1 # AUNIT --inst x6ee01c00/mask=xffe0fc00 --status nopcodeop :bif Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = Rd_VPR128.16B ^ ((Rd_VPR128.16B ^ Rn_VPR128.16B) & ~Rm_VPR128.16B); } # C7.2.22 BIF page C7-2052 line 119791 MATCH x2ee01c00/mask=xbfe0fc00 # CONSTRUCT x2ee01c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_bif/3@1 # AUNIT --inst x2ee01c00/mask=xffe0fc00 --status nopcodeop :bif Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = Rd_VPR64.8B ^ ((Rd_VPR64.8B ^ Rn_VPR64.8B) & ~Rm_VPR64.8B); } # C7.2.23 BIT page C7-2054 line 119875 MATCH x2ea01c00/mask=xbfe0fc00 # CONSTRUCT x6ea01c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_bit/3@1 # AUNIT --inst x6ea01c00/mask=xffe0fc00 --status nopcodeop :bit Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = Rd_VPR128.16B ^ ((Rd_VPR128.16B ^ Rn_VPR128.16B) & Rm_VPR128.16B); } # C7.2.23 BIT page C7-2054 line 119875 MATCH x2ea01c00/mask=xbfe0fc00 # CONSTRUCT x2ea01c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_bit/3@1 # AUNIT --inst x2ea01c00/mask=xffe0fc00 --status nopcodeop :bit Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = Rd_VPR64.8B ^ ((Rd_VPR64.8B ^ Rn_VPR64.8B) & Rm_VPR64.8B); } # C7.2.24 BSL page C7-2056 line 119959 MATCH x2e601c00/mask=xbfe0fc00 # CONSTRUCT x6e601c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_bsl/3@1 # AUNIT --inst x6e601c00/mask=xffe0fc00 --status nopcodeop :bsl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = Rm_VPR128.16B ^ ((Rm_VPR128.16B ^ Rn_VPR128.16B) & Rd_VPR128.16B); } # C7.2.24 BSL page C7-2056 line 119959 MATCH x2e601c00/mask=xbfe0fc00 # CONSTRUCT x2e601c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_bsl/3@1 # AUNIT --inst x2e601c00/mask=xffe0fc00 --status nopcodeop :bsl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = Rm_VPR64.8B ^ ((Rm_VPR64.8B ^ Rn_VPR64.8B) & Rd_VPR64.8B); } # C7.2.25 CLS (vector) page C7-2058 line 120043 MATCH x0e204800/mask=xbf3ffc00 # CONSTRUCT x0e204800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_cls/1@1 # AUNIT --inst x0e204800/mask=xfffffc00 --status nopcodeop # CLS (vector) SIMD 8B when size = 00 , Q = 0 :cls Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { Rd_VPR64.8B = NEON_cls(Rn_VPR64.8B, 1:1); } # C7.2.25 CLS (vector) page C7-2058 line 120043 MATCH x0e204800/mask=xbf3ffc00 # CONSTRUCT x4e204800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_cls/1@1 # AUNIT --inst x4e204800/mask=xfffffc00 --status nopcodeop # CLS (vector) SIMD 16B when size = 00 , Q = 1 :cls Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { Rd_VPR128.16B = NEON_cls(Rn_VPR128.16B, 1:1); } # C7.2.25 CLS (vector) page C7-2058 line 120043 MATCH x0e204800/mask=xbf3ffc00 # CONSTRUCT x0e604800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_cls/1@2 # AUNIT --inst x0e604800/mask=xfffffc00 --status nopcodeop # CLS (vector) SIMD 4H when size = 01 , Q = 0 :cls Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { Rd_VPR64.4H = NEON_cls(Rn_VPR64.4H, 2:1); } # C7.2.25 CLS (vector) page C7-2058 line 120043 MATCH x0e204800/mask=xbf3ffc00 # CONSTRUCT x4e604800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_cls/1@2 # AUNIT --inst x4e604800/mask=xfffffc00 --status nopcodeop # CLS (vector) SIMD 8H when size = 01 , Q = 1 :cls Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { Rd_VPR128.8H = NEON_cls(Rn_VPR128.8H, 2:1); } # C7.2.25 CLS (vector) page C7-2058 line 120043 MATCH x0e204800/mask=xbf3ffc00 # CONSTRUCT x0ea04800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_cls/1@4 # AUNIT --inst x0ea04800/mask=xfffffc00 --status nopcodeop # CLS (vector) SIMD 2S when size = 10 , Q = 0 :cls Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { Rd_VPR64.2S = NEON_cls(Rn_VPR64.2S, 4:1); } # C7.2.25 CLS (vector) page C7-2058 line 120043 MATCH x0e204800/mask=xbf3ffc00 # CONSTRUCT x4ea04800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_cls/1@4 # AUNIT --inst x4ea04800/mask=xfffffc00 --status nopcodeop # CLS (vector) SIMD 4S when size = 10 , Q = 1 :cls Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { Rd_VPR128.4S = NEON_cls(Rn_VPR128.4S, 4:1); } # C7.2.26 CLZ (vector) page C7-2060 line 120140 MATCH x2e204800/mask=xbf3ffc00 # CONSTRUCT x2e204800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_clz/1@1 # AUNIT --inst x2e204800/mask=xfffffc00 --status nopcodeop # CLZ (vector) SIMD 8B when size = 00 , Q = 0 :clz Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { # simd unary Rd_VPR64.8B = lzcount(Rn_VPR64.8B) on lane size 1 Rd_VPR64.8B[0,8] = lzcount(Rn_VPR64.8B[0,8]); Rd_VPR64.8B[8,8] = lzcount(Rn_VPR64.8B[8,8]); Rd_VPR64.8B[16,8] = lzcount(Rn_VPR64.8B[16,8]); Rd_VPR64.8B[24,8] = lzcount(Rn_VPR64.8B[24,8]); Rd_VPR64.8B[32,8] = lzcount(Rn_VPR64.8B[32,8]); Rd_VPR64.8B[40,8] = lzcount(Rn_VPR64.8B[40,8]); Rd_VPR64.8B[48,8] = lzcount(Rn_VPR64.8B[48,8]); Rd_VPR64.8B[56,8] = lzcount(Rn_VPR64.8B[56,8]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.26 CLZ (vector) page C7-2060 line 120140 MATCH x2e204800/mask=xbf3ffc00 # CONSTRUCT x6e204800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_clz/1@1 # AUNIT --inst x6e204800/mask=xfffffc00 --status nopcodeop # CLZ (vector) SIMD 16B when size = 00 , Q = 1 :clz Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000010010 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { # simd unary Rd_VPR128.16B = lzcount(Rn_VPR128.16B) on lane size 1 Rd_VPR128.16B[0,8] = lzcount(Rn_VPR128.16B[0,8]); Rd_VPR128.16B[8,8] = lzcount(Rn_VPR128.16B[8,8]); Rd_VPR128.16B[16,8] = lzcount(Rn_VPR128.16B[16,8]); Rd_VPR128.16B[24,8] = lzcount(Rn_VPR128.16B[24,8]); Rd_VPR128.16B[32,8] = lzcount(Rn_VPR128.16B[32,8]); Rd_VPR128.16B[40,8] = lzcount(Rn_VPR128.16B[40,8]); Rd_VPR128.16B[48,8] = lzcount(Rn_VPR128.16B[48,8]); Rd_VPR128.16B[56,8] = lzcount(Rn_VPR128.16B[56,8]); Rd_VPR128.16B[64,8] = lzcount(Rn_VPR128.16B[64,8]); Rd_VPR128.16B[72,8] = lzcount(Rn_VPR128.16B[72,8]); Rd_VPR128.16B[80,8] = lzcount(Rn_VPR128.16B[80,8]); Rd_VPR128.16B[88,8] = lzcount(Rn_VPR128.16B[88,8]); Rd_VPR128.16B[96,8] = lzcount(Rn_VPR128.16B[96,8]); Rd_VPR128.16B[104,8] = lzcount(Rn_VPR128.16B[104,8]); Rd_VPR128.16B[112,8] = lzcount(Rn_VPR128.16B[112,8]); Rd_VPR128.16B[120,8] = lzcount(Rn_VPR128.16B[120,8]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.26 CLZ (vector) page C7-2060 line 120140 MATCH x2e204800/mask=xbf3ffc00 # CONSTRUCT x2e604800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_clz/1@2 # AUNIT --inst x2e604800/mask=xfffffc00 --status nopcodeop # CLZ (vector) SIMD 4H when size = 01 , Q = 0 :clz Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { # simd unary Rd_VPR64.4H = lzcount(Rn_VPR64.4H) on lane size 2 Rd_VPR64.4H[0,16] = lzcount(Rn_VPR64.4H[0,16]); Rd_VPR64.4H[16,16] = lzcount(Rn_VPR64.4H[16,16]); Rd_VPR64.4H[32,16] = lzcount(Rn_VPR64.4H[32,16]); Rd_VPR64.4H[48,16] = lzcount(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.26 CLZ (vector) page C7-2060 line 120140 MATCH x2e204800/mask=xbf3ffc00 # CONSTRUCT x6e604800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_clz/1@2 # AUNIT --inst x6e604800/mask=xfffffc00 --status nopcodeop # CLZ (vector) SIMD 8H when size = 01 , Q = 1 :clz Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000010010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { # simd unary Rd_VPR128.8H = lzcount(Rn_VPR128.8H) on lane size 2 Rd_VPR128.8H[0,16] = lzcount(Rn_VPR128.8H[0,16]); Rd_VPR128.8H[16,16] = lzcount(Rn_VPR128.8H[16,16]); Rd_VPR128.8H[32,16] = lzcount(Rn_VPR128.8H[32,16]); Rd_VPR128.8H[48,16] = lzcount(Rn_VPR128.8H[48,16]); Rd_VPR128.8H[64,16] = lzcount(Rn_VPR128.8H[64,16]); Rd_VPR128.8H[80,16] = lzcount(Rn_VPR128.8H[80,16]); Rd_VPR128.8H[96,16] = lzcount(Rn_VPR128.8H[96,16]); Rd_VPR128.8H[112,16] = lzcount(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.26 CLZ (vector) page C7-2060 line 120140 MATCH x2e204800/mask=xbf3ffc00 # CONSTRUCT x2ea04800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_clz/1@4 # AUNIT --inst x2ea04800/mask=xfffffc00 --status nopcodeop # CLZ (vector) SIMD 2S when size = 10 , Q = 0 :clz Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { # simd unary Rd_VPR64.2S = lzcount(Rn_VPR64.2S) on lane size 4 Rd_VPR64.2S[0,32] = lzcount(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[32,32] = lzcount(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.26 CLZ (vector) page C7-2060 line 120140 MATCH x2e204800/mask=xbf3ffc00 # CONSTRUCT x6ea04800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_clz/1@4 # AUNIT --inst x6ea04800/mask=xfffffc00 --status nopcodeop # CLZ (vector) SIMD 4S when size = 10 , Q = 1 :clz Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000010010 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { # simd unary Rd_VPR128.4S = lzcount(Rn_VPR128.4S) on lane size 4 Rd_VPR128.4S[0,32] = lzcount(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[32,32] = lzcount(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[64,32] = lzcount(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[96,32] = lzcount(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.27 CMEQ (register) page C7-2062 line 120236 MATCH x7e208c00/mask=xff20fc00 # CONSTRUCT x7ee08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 dup dup dup ARG2 ARG3 equal:1 zext:8 0:8 ~ =* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2 # AUNIT --inst x7ee08c00/mask=xffe0fc00 --status pass # CMEQ (register) Scalar :cmeq Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2431=0b01111110 & b_2223=0b11 & b_21=1 & b_1015=0b100011 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { local tmp1:1 = Rn_FPR64 == Rm_FPR64; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.27 CMEQ (register) page C7-2062 line 120236 MATCH x2e208c00/mask=xbf20fc00 # CONSTRUCT x2e208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@1 # AUNIT --inst x2e208c00/mask=xffe0fc00 --status nopcodeop # CMEQ (register) SIMD 8B when size = 00 , Q = 0 :cmeq Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_21=1 & b_1015=0b100011 & Rd_VPR64.8B & Rn_VPR64.8B & Rm_VPR64.8B & Zd { local eqVal:1 = ~ 0; Rd_VPR64.8B[0,8] = (Rn_VPR64.8B[0,8] == Rm_VPR64.8B[0,8]) * eqVal; Rd_VPR64.8B[8,8] = (Rn_VPR64.8B[8,8] == Rm_VPR64.8B[8,8]) * eqVal; Rd_VPR64.8B[16,8] = (Rn_VPR64.8B[16,8] == Rm_VPR64.8B[16,8]) * eqVal; Rd_VPR64.8B[24,8] = (Rn_VPR64.8B[24,8] == Rm_VPR64.8B[24,8]) * eqVal; Rd_VPR64.8B[32,8] = (Rn_VPR64.8B[32,8] == Rm_VPR64.8B[32,8]) * eqVal; Rd_VPR64.8B[40,8] = (Rn_VPR64.8B[40,8] == Rm_VPR64.8B[40,8]) * eqVal; Rd_VPR64.8B[48,8] = (Rn_VPR64.8B[48,8] == Rm_VPR64.8B[48,8]) * eqVal; Rd_VPR64.8B[56,8] = (Rn_VPR64.8B[56,8] == Rm_VPR64.8B[56,8]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.27 CMEQ (register) page C7-2062 line 120236 MATCH x2e208c00/mask=xbf20fc00 # CONSTRUCT x6e208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@1 # AUNIT --inst x6e208c00/mask=xffe0fc00 --status nopcodeop # CMEQ (register) SIMD 16B when size = 00 , Q = 1 :cmeq Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_21=1 & b_1015=0b100011 & Rd_VPR128.16B & Rn_VPR128.16B & Rm_VPR128.16B & Zd { local eqVal:1 = ~ 0; Rd_VPR128.16B[0,8] = (Rn_VPR128.16B[0,8] == Rm_VPR128.16B[0,8]) * eqVal; Rd_VPR128.16B[8,8] = (Rn_VPR128.16B[8,8] == Rm_VPR128.16B[8,8]) * eqVal; Rd_VPR128.16B[16,8] = (Rn_VPR128.16B[16,8] == Rm_VPR128.16B[16,8]) * eqVal; Rd_VPR128.16B[24,8] = (Rn_VPR128.16B[24,8] == Rm_VPR128.16B[24,8]) * eqVal; Rd_VPR128.16B[32,8] = (Rn_VPR128.16B[32,8] == Rm_VPR128.16B[32,8]) * eqVal; Rd_VPR128.16B[40,8] = (Rn_VPR128.16B[40,8] == Rm_VPR128.16B[40,8]) * eqVal; Rd_VPR128.16B[48,8] = (Rn_VPR128.16B[48,8] == Rm_VPR128.16B[48,8]) * eqVal; Rd_VPR128.16B[56,8] = (Rn_VPR128.16B[56,8] == Rm_VPR128.16B[56,8]) * eqVal; Rd_VPR128.16B[64,8] = (Rn_VPR128.16B[64,8] == Rm_VPR128.16B[64,8]) * eqVal; Rd_VPR128.16B[72,8] = (Rn_VPR128.16B[72,8] == Rm_VPR128.16B[72,8]) * eqVal; Rd_VPR128.16B[80,8] = (Rn_VPR128.16B[80,8] == Rm_VPR128.16B[80,8]) * eqVal; Rd_VPR128.16B[88,8] = (Rn_VPR128.16B[88,8] == Rm_VPR128.16B[88,8]) * eqVal; Rd_VPR128.16B[96,8] = (Rn_VPR128.16B[96,8] == Rm_VPR128.16B[96,8]) * eqVal; Rd_VPR128.16B[104,8] = (Rn_VPR128.16B[104,8] == Rm_VPR128.16B[104,8]) * eqVal; Rd_VPR128.16B[112,8] = (Rn_VPR128.16B[112,8] == Rm_VPR128.16B[112,8]) * eqVal; Rd_VPR128.16B[120,8] = (Rn_VPR128.16B[120,8] == Rm_VPR128.16B[120,8]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.27 CMEQ (register) page C7-2062 line 120236 MATCH x2e208c00/mask=xbf20fc00 # CONSTRUCT x2e608c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@2 # AUNIT --inst x2e608c00/mask=xffe0fc00 --status nopcodeop # CMEQ (register) SIMD 4H when size = 01 , Q = 0 :cmeq Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_21=1 & b_1015=0b100011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { local eqVal:2 = ~ 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] == Rm_VPR64.4H[0,16]) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] == Rm_VPR64.4H[16,16]) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] == Rm_VPR64.4H[32,16]) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] == Rm_VPR64.4H[48,16]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.27 CMEQ (register) page C7-2062 line 120236 MATCH x2e208c00/mask=xbf20fc00 # CONSTRUCT x6e608c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@2 # AUNIT --inst x6e608c00/mask=xffe0fc00 --status nopcodeop # CMEQ (register) SIMD 8H when size = 01 , Q = 1 :cmeq Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_21=1 & b_1015=0b100011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { local eqVal:2 = ~ 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] == Rm_VPR128.8H[0,16]) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] == Rm_VPR128.8H[16,16]) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] == Rm_VPR128.8H[32,16]) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] == Rm_VPR128.8H[48,16]) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] == Rm_VPR128.8H[64,16]) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] == Rm_VPR128.8H[80,16]) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] == Rm_VPR128.8H[96,16]) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] == Rm_VPR128.8H[112,16]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.27 CMEQ (register) page C7-2062 line 120236 MATCH x2e208c00/mask=xbf20fc00 # CONSTRUCT x2ea08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@4 # AUNIT --inst x2ea08c00/mask=xffe0fc00 --status nopcodeop # CMEQ (register) SIMD 2S when size = 10 , Q = 0 :cmeq Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=1 & b_1015=0b100011 & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd { local eqVal:4 = ~ 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] == Rm_VPR64.2S[0,32]) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] == Rm_VPR64.2S[32,32]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.27 CMEQ (register) page C7-2062 line 120236 MATCH x2e208c00/mask=xbf20fc00 # CONSTRUCT x6ea08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@4 # AUNIT --inst x6ea08c00/mask=xffe0fc00 --status nopcodeop # CMEQ (register) SIMD 4S when size = 10 , Q = 1 :cmeq Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=1 & b_1015=0b100011 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { local eqVal:4 = ~ 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] == Rm_VPR128.4S[0,32]) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] == Rm_VPR128.4S[32,32]) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] == Rm_VPR128.4S[64,32]) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] == Rm_VPR128.4S[96,32]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.27 CMEQ (register) page C7-2062 line 120236 MATCH x2e208c00/mask=xbf20fc00 # CONSTRUCT x6ee08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmeq/2@8 # AUNIT --inst x6ee08c00/mask=xffe0fc00 --status nopcodeop # CMEQ (register) SIMD 2D when size = 11 , Q = 1 :cmeq Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_21=1 & b_1015=0b100011 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { local eqVal:8 = ~ 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] == Rm_VPR128.2D[0,64]) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] == Rm_VPR128.2D[64,64]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.28 CMEQ (zero) page C7-2064 line 120376 MATCH x5e209800/mask=xff3ffc00 # CONSTRUCT x5ee09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmeq/2 # AUNIT --inst x5ee09800/mask=xfffffc00 --status nopcodeop :cmeq Rd_FPR64, Rn_FPR64, "#0" is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000100110 & Rd_FPR64 & Rn_FPR64 & Zd { local tmp1:1 = Rn_FPR64 == 0; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.28 CMEQ (zero) page C7-2064 line 120376 MATCH x0e209800/mask=xbf3ffc00 # CONSTRUCT x0e209800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmeq/2@1 # AUNIT --inst x0e209800/mask=xfffffc00 --status nopcodeop :cmeq Rd_VPR64.8B, Rn_VPR64.8B, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { local eqVal:1 = ~ 0; local zero:1 = 0; Rd_VPR64.8B[0,8] = (Rn_VPR64.8B[0,8] == zero) * eqVal; Rd_VPR64.8B[8,8] = (Rn_VPR64.8B[8,8] == zero) * eqVal; Rd_VPR64.8B[16,8] = (Rn_VPR64.8B[16,8] == zero) * eqVal; Rd_VPR64.8B[24,8] = (Rn_VPR64.8B[24,8] == zero) * eqVal; Rd_VPR64.8B[32,8] = (Rn_VPR64.8B[32,8] == zero) * eqVal; Rd_VPR64.8B[40,8] = (Rn_VPR64.8B[40,8] == zero) * eqVal; Rd_VPR64.8B[48,8] = (Rn_VPR64.8B[48,8] == zero) * eqVal; Rd_VPR64.8B[56,8] = (Rn_VPR64.8B[56,8] == zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.28 CMEQ (zero) page C7-2064 line 120376 MATCH x0e209800/mask=xbf3ffc00 # CONSTRUCT x4e209800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmeq/2@1 # AUNIT --inst x4e209800/mask=xfffffc00 --status nopcodeop :cmeq Rd_VPR128.16B, Rn_VPR128.16B, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { local eqVal:1 = ~ 0; local zero:1 = 0; Rd_VPR128.16B[0,8] = (Rn_VPR128.16B[0,8] == zero) * eqVal; Rd_VPR128.16B[8,8] = (Rn_VPR128.16B[8,8] == zero) * eqVal; Rd_VPR128.16B[16,8] = (Rn_VPR128.16B[16,8] == zero) * eqVal; Rd_VPR128.16B[24,8] = (Rn_VPR128.16B[24,8] == zero) * eqVal; Rd_VPR128.16B[32,8] = (Rn_VPR128.16B[32,8] == zero) * eqVal; Rd_VPR128.16B[40,8] = (Rn_VPR128.16B[40,8] == zero) * eqVal; Rd_VPR128.16B[48,8] = (Rn_VPR128.16B[48,8] == zero) * eqVal; Rd_VPR128.16B[56,8] = (Rn_VPR128.16B[56,8] == zero) * eqVal; Rd_VPR128.16B[64,8] = (Rn_VPR128.16B[64,8] == zero) * eqVal; Rd_VPR128.16B[72,8] = (Rn_VPR128.16B[72,8] == zero) * eqVal; Rd_VPR128.16B[80,8] = (Rn_VPR128.16B[80,8] == zero) * eqVal; Rd_VPR128.16B[88,8] = (Rn_VPR128.16B[88,8] == zero) * eqVal; Rd_VPR128.16B[96,8] = (Rn_VPR128.16B[96,8] == zero) * eqVal; Rd_VPR128.16B[104,8] = (Rn_VPR128.16B[104,8] == zero) * eqVal; Rd_VPR128.16B[112,8] = (Rn_VPR128.16B[112,8] == zero) * eqVal; Rd_VPR128.16B[120,8] = (Rn_VPR128.16B[120,8] == zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.28 CMEQ (zero) page C7-2064 line 120376 MATCH x0e209800/mask=xbf3ffc00 # CONSTRUCT x0e609800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmeq/2@2 # AUNIT --inst x0e609800/mask=xfffffc00 --status nopcodeop :cmeq Rd_VPR64.4H, Rn_VPR64.4H, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] == zero) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] == zero) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] == zero) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] == zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.28 CMEQ (zero) page C7-2064 line 120376 MATCH x0e209800/mask=xbf3ffc00 # CONSTRUCT x4e609800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmeq/2@2 # AUNIT --inst x4e609800/mask=xfffffc00 --status nopcodeop :cmeq Rd_VPR128.8H, Rn_VPR128.8H, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] == zero) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] == zero) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] == zero) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] == zero) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] == zero) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] == zero) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] == zero) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] == zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.28 CMEQ (zero) page C7-2064 line 120376 MATCH x0e209800/mask=xbf3ffc00 # CONSTRUCT x0ea09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmeq/2@4 # AUNIT --inst x0ea09800/mask=xfffffc00 --status nopcodeop :cmeq Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] == zero) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] == zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.28 CMEQ (zero) page C7-2064 line 120376 MATCH x0e209800/mask=xbf3ffc00 # CONSTRUCT x4ea09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmeq/2@2 # AUNIT --inst x4ea09800/mask=xfffffc00 --status nopcodeop :cmeq Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] == zero) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] == zero) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] == zero) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] == zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.28 CMEQ (zero) page C7-2064 line 120376 MATCH x0e209800/mask=xbf3ffc00 # CONSTRUCT x4ee09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmeq/2@8 # AUNIT --inst x4ee09800/mask=xfffffc00 --status nopcodeop :cmeq Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; local zero:8 = 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] == zero) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] == zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.29 CMGE (register) page C7-2067 line 120534 MATCH x5e203c00/mask=xff20fc00 # CONSTRUCT x5ee03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2 # AUNIT --inst x5ee03c00/mask=xffe0fc00 --status nopcodeop :cmge Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2431=0b01011110 & b_2223=0b11 & b_21=1 & b_1015=0b001111 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { local tmp1:1 = Rn_FPR64 s>= Rm_FPR64; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.29 CMGE (register) page C7-2067 line 120534 MATCH x0e203c00/mask=xbf20fc00 # CONSTRUCT x0e203c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@1 # AUNIT --inst x0e203c00/mask=xffe0fc00 --status nopcodeop :cmge Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x7 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { local eqVal:1 = ~ 0; Rd_VPR64.8B[0,8] = (Rn_VPR64.8B[0,8] s>= Rm_VPR64.8B[0,8]) * eqVal; Rd_VPR64.8B[8,8] = (Rn_VPR64.8B[8,8] s>= Rm_VPR64.8B[8,8]) * eqVal; Rd_VPR64.8B[16,8] = (Rn_VPR64.8B[16,8] s>= Rm_VPR64.8B[16,8]) * eqVal; Rd_VPR64.8B[24,8] = (Rn_VPR64.8B[24,8] s>= Rm_VPR64.8B[24,8]) * eqVal; Rd_VPR64.8B[32,8] = (Rn_VPR64.8B[32,8] s>= Rm_VPR64.8B[32,8]) * eqVal; Rd_VPR64.8B[40,8] = (Rn_VPR64.8B[40,8] s>= Rm_VPR64.8B[40,8]) * eqVal; Rd_VPR64.8B[48,8] = (Rn_VPR64.8B[48,8] s>= Rm_VPR64.8B[48,8]) * eqVal; Rd_VPR64.8B[56,8] = (Rn_VPR64.8B[56,8] s>= Rm_VPR64.8B[56,8]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.29 CMGE (register) page C7-2067 line 120534 MATCH x0e203c00/mask=xbf20fc00 # CONSTRUCT x4e203c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@1 # AUNIT --inst x4e203c00/mask=xffe0fc00 --status nopcodeop :cmge Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x7 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { local eqVal:1 = ~ 0; Rd_VPR128.16B[0,8] = (Rn_VPR128.16B[0,8] s>= Rm_VPR128.16B[0,8]) * eqVal; Rd_VPR128.16B[8,8] = (Rn_VPR128.16B[8,8] s>= Rm_VPR128.16B[8,8]) * eqVal; Rd_VPR128.16B[16,8] = (Rn_VPR128.16B[16,8] s>= Rm_VPR128.16B[16,8]) * eqVal; Rd_VPR128.16B[24,8] = (Rn_VPR128.16B[24,8] s>= Rm_VPR128.16B[24,8]) * eqVal; Rd_VPR128.16B[32,8] = (Rn_VPR128.16B[32,8] s>= Rm_VPR128.16B[32,8]) * eqVal; Rd_VPR128.16B[40,8] = (Rn_VPR128.16B[40,8] s>= Rm_VPR128.16B[40,8]) * eqVal; Rd_VPR128.16B[48,8] = (Rn_VPR128.16B[48,8] s>= Rm_VPR128.16B[48,8]) * eqVal; Rd_VPR128.16B[56,8] = (Rn_VPR128.16B[56,8] s>= Rm_VPR128.16B[56,8]) * eqVal; Rd_VPR128.16B[64,8] = (Rn_VPR128.16B[64,8] s>= Rm_VPR128.16B[64,8]) * eqVal; Rd_VPR128.16B[72,8] = (Rn_VPR128.16B[72,8] s>= Rm_VPR128.16B[72,8]) * eqVal; Rd_VPR128.16B[80,8] = (Rn_VPR128.16B[80,8] s>= Rm_VPR128.16B[80,8]) * eqVal; Rd_VPR128.16B[88,8] = (Rn_VPR128.16B[88,8] s>= Rm_VPR128.16B[88,8]) * eqVal; Rd_VPR128.16B[96,8] = (Rn_VPR128.16B[96,8] s>= Rm_VPR128.16B[96,8]) * eqVal; Rd_VPR128.16B[104,8] = (Rn_VPR128.16B[104,8] s>= Rm_VPR128.16B[104,8]) * eqVal; Rd_VPR128.16B[112,8] = (Rn_VPR128.16B[112,8] s>= Rm_VPR128.16B[112,8]) * eqVal; Rd_VPR128.16B[120,8] = (Rn_VPR128.16B[120,8] s>= Rm_VPR128.16B[120,8]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.29 CMGE (register) page C7-2067 line 120534 MATCH x0e203c00/mask=xbf20fc00 # CONSTRUCT x0e603c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@2 # AUNIT --inst x0e603c00/mask=xffe0fc00 --status nopcodeop :cmge Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x7 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { local eqVal:2 = ~ 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] s>= Rm_VPR64.4H[0,16]) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] s>= Rm_VPR64.4H[16,16]) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] s>= Rm_VPR64.4H[32,16]) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] s>= Rm_VPR64.4H[48,16]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.29 CMGE (register) page C7-2067 line 120534 MATCH x0e203c00/mask=xbf20fc00 # CONSTRUCT x4e603c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@2 # AUNIT --inst x4e603c00/mask=xffe0fc00 --status nopcodeop :cmge Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x7 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { local eqVal:2 = ~ 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] s>= Rm_VPR128.8H[0,16]) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] s>= Rm_VPR128.8H[16,16]) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] s>= Rm_VPR128.8H[32,16]) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] s>= Rm_VPR128.8H[48,16]) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] s>= Rm_VPR128.8H[64,16]) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] s>= Rm_VPR128.8H[80,16]) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] s>= Rm_VPR128.8H[96,16]) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] s>= Rm_VPR128.8H[112,16]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.29 CMGE (register) page C7-2067 line 120534 MATCH x0e203c00/mask=xbf20fc00 # CONSTRUCT x0ea03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@4 # AUNIT --inst x0ea03c00/mask=xffe0fc00 --status nopcodeop :cmge Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x7 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] s>= Rm_VPR64.2S[0,32]) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] s>= Rm_VPR64.2S[32,32]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.29 CMGE (register) page C7-2067 line 120534 MATCH x0e203c00/mask=xbf20fc00 # CONSTRUCT x4ea03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@4 # AUNIT --inst x4ea03c00/mask=xffe0fc00 --status nopcodeop :cmge Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x7 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] s>= Rm_VPR128.4S[0,32]) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] s>= Rm_VPR128.4S[32,32]) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] s>= Rm_VPR128.4S[64,32]) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] s>= Rm_VPR128.4S[96,32]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.29 CMGE (register) page C7-2067 line 120534 MATCH x0e203c00/mask=xbf20fc00 # CONSTRUCT x4ee03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmge/2@8 # AUNIT --inst x4ee03c00/mask=xffe0fc00 --status nopcodeop :cmge Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x7 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] s>= Rm_VPR128.2D[0,64]) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] s>= Rm_VPR128.2D[64,64]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.30 CMGE (zero) page C7-2070 line 120683 MATCH x7e208800/mask=xff3ffc00 # CONSTRUCT x7ee08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmge/2 # AUNIT --inst x7ee08800/mask=xfffffc00 --status nopcodeop :cmge Rd_FPR64, Rn_FPR64, "#0" is b_2431=0b01111110 & b_2223=0b11 & b_1021=0b100000100010 & Rd_FPR64 & Rn_FPR64 & Zd { local tmp1:1 = Rn_FPR64 s>= 0; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.30 CMGE (zero) page C7-2070 line 120683 MATCH x2e208800/mask=xbf3ffc00 # CONSTRUCT x2e208800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmge/2@1 # AUNIT --inst x2e208800/mask=xfffffc00 --status nopcodeop :cmge Rd_VPR64.8B, Rn_VPR64.8B, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { local eqVal:1 = ~ 0; local zero:1 = 0; Rd_VPR64.8B[0,8] = (Rn_VPR64.8B[0,8] s>= zero) * eqVal; Rd_VPR64.8B[8,8] = (Rn_VPR64.8B[8,8] s>= zero) * eqVal; Rd_VPR64.8B[16,8] = (Rn_VPR64.8B[16,8] s>= zero) * eqVal; Rd_VPR64.8B[24,8] = (Rn_VPR64.8B[24,8] s>= zero) * eqVal; Rd_VPR64.8B[32,8] = (Rn_VPR64.8B[32,8] s>= zero) * eqVal; Rd_VPR64.8B[40,8] = (Rn_VPR64.8B[40,8] s>= zero) * eqVal; Rd_VPR64.8B[48,8] = (Rn_VPR64.8B[48,8] s>= zero) * eqVal; Rd_VPR64.8B[56,8] = (Rn_VPR64.8B[56,8] s>= zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.30 CMGE (zero) page C7-2070 line 120683 MATCH x2e208800/mask=xbf3ffc00 # CONSTRUCT x6e208800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmge/2@1 # AUNIT --inst x6e208800/mask=xfffffc00 --status nopcodeop :cmge Rd_VPR128.16B, Rn_VPR128.16B, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { local eqVal:1 = ~ 0; local zero:1 = 0; Rd_VPR128.16B[0,8] = (Rn_VPR128.16B[0,8] s>= zero) * eqVal; Rd_VPR128.16B[8,8] = (Rn_VPR128.16B[8,8] s>= zero) * eqVal; Rd_VPR128.16B[16,8] = (Rn_VPR128.16B[16,8] s>= zero) * eqVal; Rd_VPR128.16B[24,8] = (Rn_VPR128.16B[24,8] s>= zero) * eqVal; Rd_VPR128.16B[32,8] = (Rn_VPR128.16B[32,8] s>= zero) * eqVal; Rd_VPR128.16B[40,8] = (Rn_VPR128.16B[40,8] s>= zero) * eqVal; Rd_VPR128.16B[48,8] = (Rn_VPR128.16B[48,8] s>= zero) * eqVal; Rd_VPR128.16B[56,8] = (Rn_VPR128.16B[56,8] s>= zero) * eqVal; Rd_VPR128.16B[64,8] = (Rn_VPR128.16B[64,8] s>= zero) * eqVal; Rd_VPR128.16B[72,8] = (Rn_VPR128.16B[72,8] s>= zero) * eqVal; Rd_VPR128.16B[80,8] = (Rn_VPR128.16B[80,8] s>= zero) * eqVal; Rd_VPR128.16B[88,8] = (Rn_VPR128.16B[88,8] s>= zero) * eqVal; Rd_VPR128.16B[96,8] = (Rn_VPR128.16B[96,8] s>= zero) * eqVal; Rd_VPR128.16B[104,8] = (Rn_VPR128.16B[104,8] s>= zero) * eqVal; Rd_VPR128.16B[112,8] = (Rn_VPR128.16B[112,8] s>= zero) * eqVal; Rd_VPR128.16B[120,8] = (Rn_VPR128.16B[120,8] s>= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.30 CMGE (zero) page C7-2070 line 120683 MATCH x2e208800/mask=xbf3ffc00 # CONSTRUCT x2e608800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmge/2@2 # AUNIT --inst x2e608800/mask=xfffffc00 --status nopcodeop :cmge Rd_VPR64.4H, Rn_VPR64.4H, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] s>= zero) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] s>= zero) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] s>= zero) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] s>= zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.30 CMGE (zero) page C7-2070 line 120683 MATCH x2e208800/mask=xbf3ffc00 # CONSTRUCT x6e608800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmge/2@2 # AUNIT --inst x6e608800/mask=xfffffc00 --status nopcodeop :cmge Rd_VPR128.8H, Rn_VPR128.8H, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] s>= zero) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] s>= zero) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] s>= zero) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] s>= zero) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] s>= zero) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] s>= zero) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] s>= zero) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] s>= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.30 CMGE (zero) page C7-2070 line 120683 MATCH x2e208800/mask=xbf3ffc00 # CONSTRUCT x2ea08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmge/2@4 # AUNIT --inst x2ea08800/mask=xfffffc00 --status nopcodeop :cmge Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] s>= zero) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] s>= zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.30 CMGE (zero) page C7-2070 line 120683 MATCH x2e208800/mask=xbf3ffc00 # CONSTRUCT x6ea08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmge/2@4 # AUNIT --inst x6ea08800/mask=xfffffc00 --status nopcodeop :cmge Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] s>= zero) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] s>= zero) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] s>= zero) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] s>= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.30 CMGE (zero) page C7-2070 line 120683 MATCH x2e208800/mask=xbf3ffc00 # CONSTRUCT x6ee08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmge/2@8 # AUNIT --inst x6ee08800/mask=xfffffc00 --status nopcodeop :cmge Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; local zero:8 = 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] s>= zero) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] s>= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.31 CMGT (register) page C7-2073 line 120841 MATCH x5e203400/mask=xff20fc00 # CONSTRUCT x5ee03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2 # AUNIT --inst x5ee03400/mask=xffe0fc00 --status nopcodeop :cmgt Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2431=0b01011110 & b_2223=0b11 & b_21=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { local tmp1:1 = Rn_FPR64 s> Rm_FPR64; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.31 CMGT (register) page C7-2073 line 120841 MATCH x0e203400/mask=xbf20fc00 # CONSTRUCT x0e203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@1 # AUNIT --inst x0e203400/mask=xffe0fc00 --status nopcodeop :cmgt Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x6 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { local eqVal:1 = ~ 0; Rd_VPR64.8B[0,8] = (Rn_VPR64.8B[0,8] s> Rm_VPR64.8B[0,8]) * eqVal; Rd_VPR64.8B[8,8] = (Rn_VPR64.8B[8,8] s> Rm_VPR64.8B[8,8]) * eqVal; Rd_VPR64.8B[16,8] = (Rn_VPR64.8B[16,8] s> Rm_VPR64.8B[16,8]) * eqVal; Rd_VPR64.8B[24,8] = (Rn_VPR64.8B[24,8] s> Rm_VPR64.8B[24,8]) * eqVal; Rd_VPR64.8B[32,8] = (Rn_VPR64.8B[32,8] s> Rm_VPR64.8B[32,8]) * eqVal; Rd_VPR64.8B[40,8] = (Rn_VPR64.8B[40,8] s> Rm_VPR64.8B[40,8]) * eqVal; Rd_VPR64.8B[48,8] = (Rn_VPR64.8B[48,8] s> Rm_VPR64.8B[48,8]) * eqVal; Rd_VPR64.8B[56,8] = (Rn_VPR64.8B[56,8] s> Rm_VPR64.8B[56,8]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.31 CMGT (register) page C7-2073 line 120841 MATCH x0e203400/mask=xbf20fc00 # CONSTRUCT x4e203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@1 # AUNIT --inst x4e203400/mask=xffe0fc00 --status nopcodeop :cmgt Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x6 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { local eqVal:1 = ~ 0; Rd_VPR128.16B[0,8] = (Rn_VPR128.16B[0,8] s> Rm_VPR128.16B[0,8]) * eqVal; Rd_VPR128.16B[8,8] = (Rn_VPR128.16B[8,8] s> Rm_VPR128.16B[8,8]) * eqVal; Rd_VPR128.16B[16,8] = (Rn_VPR128.16B[16,8] s> Rm_VPR128.16B[16,8]) * eqVal; Rd_VPR128.16B[24,8] = (Rn_VPR128.16B[24,8] s> Rm_VPR128.16B[24,8]) * eqVal; Rd_VPR128.16B[32,8] = (Rn_VPR128.16B[32,8] s> Rm_VPR128.16B[32,8]) * eqVal; Rd_VPR128.16B[40,8] = (Rn_VPR128.16B[40,8] s> Rm_VPR128.16B[40,8]) * eqVal; Rd_VPR128.16B[48,8] = (Rn_VPR128.16B[48,8] s> Rm_VPR128.16B[48,8]) * eqVal; Rd_VPR128.16B[56,8] = (Rn_VPR128.16B[56,8] s> Rm_VPR128.16B[56,8]) * eqVal; Rd_VPR128.16B[64,8] = (Rn_VPR128.16B[64,8] s> Rm_VPR128.16B[64,8]) * eqVal; Rd_VPR128.16B[72,8] = (Rn_VPR128.16B[72,8] s> Rm_VPR128.16B[72,8]) * eqVal; Rd_VPR128.16B[80,8] = (Rn_VPR128.16B[80,8] s> Rm_VPR128.16B[80,8]) * eqVal; Rd_VPR128.16B[88,8] = (Rn_VPR128.16B[88,8] s> Rm_VPR128.16B[88,8]) * eqVal; Rd_VPR128.16B[96,8] = (Rn_VPR128.16B[96,8] s> Rm_VPR128.16B[96,8]) * eqVal; Rd_VPR128.16B[104,8] = (Rn_VPR128.16B[104,8] s> Rm_VPR128.16B[104,8]) * eqVal; Rd_VPR128.16B[112,8] = (Rn_VPR128.16B[112,8] s> Rm_VPR128.16B[112,8]) * eqVal; Rd_VPR128.16B[120,8] = (Rn_VPR128.16B[120,8] s> Rm_VPR128.16B[120,8]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.31 CMGT (register) page C7-2073 line 120841 MATCH x0e203400/mask=xbf20fc00 # CONSTRUCT x0e603400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@2 # AUNIT --inst x0e603400/mask=xffe0fc00 --status nopcodeop :cmgt Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x6 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { local eqVal:2 = ~ 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] s> Rm_VPR64.4H[0,16]) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] s> Rm_VPR64.4H[16,16]) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] s> Rm_VPR64.4H[32,16]) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] s> Rm_VPR64.4H[48,16]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.31 CMGT (register) page C7-2073 line 120841 MATCH x0e203400/mask=xbf20fc00 # CONSTRUCT x4e603400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@2 # AUNIT --inst x4e603400/mask=xffe0fc00 --status nopcodeop :cmgt Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x6 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { local eqVal:2 = ~ 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] s> Rm_VPR128.8H[0,16]) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] s> Rm_VPR128.8H[16,16]) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] s> Rm_VPR128.8H[32,16]) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] s> Rm_VPR128.8H[48,16]) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] s> Rm_VPR128.8H[64,16]) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] s> Rm_VPR128.8H[80,16]) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] s> Rm_VPR128.8H[96,16]) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] s> Rm_VPR128.8H[112,16]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.31 CMGT (register) page C7-2073 line 120841 MATCH x0e203400/mask=xbf20fc00 # CONSTRUCT x0ea03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@4 # AUNIT --inst x0ea03400/mask=xffe0fc00 --status nopcodeop :cmgt Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x6 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] s> Rm_VPR64.2S[0,32]) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] s> Rm_VPR64.2S[32,32]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.31 CMGT (register) page C7-2073 line 120841 MATCH x0e203400/mask=xbf20fc00 # CONSTRUCT x4ea03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@4 # AUNIT --inst x4ea03400/mask=xffe0fc00 --status nopcodeop :cmgt Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x6 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] s> Rm_VPR128.4S[0,32]) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] s> Rm_VPR128.4S[32,32]) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] s> Rm_VPR128.4S[64,32]) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] s> Rm_VPR128.4S[96,32]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.31 CMGT (register) page C7-2073 line 120841 MATCH x0e203400/mask=xbf20fc00 # CONSTRUCT x4ee03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmgt/2@8 # AUNIT --inst x4ee03400/mask=xffe0fc00 --status nopcodeop :cmgt Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x6 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] s> Rm_VPR128.2D[0,64]) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] s> Rm_VPR128.2D[64,64]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.32 CMGT (zero) page C7-2076 line 120990 MATCH x5e208800/mask=xff3ffc00 # CONSTRUCT x5ee08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmgt/2 # AUNIT --inst x5ee08800/mask=xfffffc00 --status nopcodeop :cmgt Rd_FPR64, Rn_FPR64, "#0" is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000100010 & Rd_FPR64 & Rn_FPR64 & Zd { local tmp1:1 = Rn_FPR64 s> 0; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.32 CMGT (zero) page C7-2076 line 120990 MATCH x0e208800/mask=xbf3ffc00 # CONSTRUCT x0e208800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmgt/2@1 # AUNIT --inst x0e208800/mask=xfffffc00 --status nopcodeop :cmgt Rd_VPR64.8B, Rn_VPR64.8B, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { local eqVal:1 = ~ 0; local zero:1 = 0; Rd_VPR64.8B[0,8] = (Rn_VPR64.8B[0,8] s> zero) * eqVal; Rd_VPR64.8B[8,8] = (Rn_VPR64.8B[8,8] s> zero) * eqVal; Rd_VPR64.8B[16,8] = (Rn_VPR64.8B[16,8] s> zero) * eqVal; Rd_VPR64.8B[24,8] = (Rn_VPR64.8B[24,8] s> zero) * eqVal; Rd_VPR64.8B[32,8] = (Rn_VPR64.8B[32,8] s> zero) * eqVal; Rd_VPR64.8B[40,8] = (Rn_VPR64.8B[40,8] s> zero) * eqVal; Rd_VPR64.8B[48,8] = (Rn_VPR64.8B[48,8] s> zero) * eqVal; Rd_VPR64.8B[56,8] = (Rn_VPR64.8B[56,8] s> zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.32 CMGT (zero) page C7-2076 line 120990 MATCH x0e208800/mask=xbf3ffc00 # CONSTRUCT x4e208800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmgt/2@1 # AUNIT --inst x4e208800/mask=xfffffc00 --status nopcodeop :cmgt Rd_VPR128.16B, Rn_VPR128.16B, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { local eqVal:1 = ~ 0; local zero:1 = 0; Rd_VPR128.16B[0,8] = (Rn_VPR128.16B[0,8] s> zero) * eqVal; Rd_VPR128.16B[8,8] = (Rn_VPR128.16B[8,8] s> zero) * eqVal; Rd_VPR128.16B[16,8] = (Rn_VPR128.16B[16,8] s> zero) * eqVal; Rd_VPR128.16B[24,8] = (Rn_VPR128.16B[24,8] s> zero) * eqVal; Rd_VPR128.16B[32,8] = (Rn_VPR128.16B[32,8] s> zero) * eqVal; Rd_VPR128.16B[40,8] = (Rn_VPR128.16B[40,8] s> zero) * eqVal; Rd_VPR128.16B[48,8] = (Rn_VPR128.16B[48,8] s> zero) * eqVal; Rd_VPR128.16B[56,8] = (Rn_VPR128.16B[56,8] s> zero) * eqVal; Rd_VPR128.16B[64,8] = (Rn_VPR128.16B[64,8] s> zero) * eqVal; Rd_VPR128.16B[72,8] = (Rn_VPR128.16B[72,8] s> zero) * eqVal; Rd_VPR128.16B[80,8] = (Rn_VPR128.16B[80,8] s> zero) * eqVal; Rd_VPR128.16B[88,8] = (Rn_VPR128.16B[88,8] s> zero) * eqVal; Rd_VPR128.16B[96,8] = (Rn_VPR128.16B[96,8] s> zero) * eqVal; Rd_VPR128.16B[104,8] = (Rn_VPR128.16B[104,8] s> zero) * eqVal; Rd_VPR128.16B[112,8] = (Rn_VPR128.16B[112,8] s> zero) * eqVal; Rd_VPR128.16B[120,8] = (Rn_VPR128.16B[120,8] s> zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.32 CMGT (zero) page C7-2076 line 120990 MATCH x0e208800/mask=xbf3ffc00 # CONSTRUCT x0e608800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmgt/2@2 # AUNIT --inst x0e608800/mask=xfffffc00 --status nopcodeop :cmgt Rd_VPR64.4H, Rn_VPR64.4H, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] s> zero) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] s> zero) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] s> zero) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] s> zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.32 CMGT (zero) page C7-2076 line 120990 MATCH x0e208800/mask=xbf3ffc00 # CONSTRUCT x4e608800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmgt/2@2 # AUNIT --inst x4e608800/mask=xfffffc00 --status nopcodeop :cmgt Rd_VPR128.8H, Rn_VPR128.8H, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] s> zero) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] s> zero) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] s> zero) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] s> zero) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] s> zero) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] s> zero) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] s> zero) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] s> zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.32 CMGT (zero) page C7-2076 line 120990 MATCH x0e208800/mask=xbf3ffc00 # CONSTRUCT x0ea08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmgt/2@4 # AUNIT --inst x0ea08800/mask=xfffffc00 --status nopcodeop :cmgt Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] s> zero) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] s> zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.32 CMGT (zero) page C7-2076 line 120990 MATCH x0e208800/mask=xbf3ffc00 # CONSTRUCT x4ea08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmgt/2@4 # AUNIT --inst x4ea08800/mask=xfffffc00 --status nopcodeop :cmgt Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] s> zero) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] s> zero) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] s> zero) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] s> zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.32 CMGT (zero) page C7-2076 line 120990 MATCH x0e208800/mask=xbf3ffc00 # CONSTRUCT x4ee08800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmgt/2@8 # AUNIT --inst x4ee08800/mask=xfffffc00 --status nopcodeop :cmgt Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x8 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; local zero:8 = 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] s> zero) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] s> zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.33 CMHI (register) page C7-2079 line 121148 MATCH x7e203400/mask=xff20fc00 # CONSTRUCT x7ee03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2 # AUNIT --inst x7ee03400/mask=xffe0fc00 --status nopcodeop :cmhi Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2431=0b01111110 & b_2223=0b11 & b_21=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { local tmp1:1 = Rn_FPR64 > Rm_FPR64; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.33 CMHI (register) page C7-2079 line 121148 MATCH x2e203400/mask=xbf20fc00 # CONSTRUCT x2e203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@1 # AUNIT --inst x2e203400/mask=xffe0fc00 --status nopcodeop :cmhi Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x6 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { local eqVal:1 = ~ 0; Rd_VPR64.8B[0,8] = (Rn_VPR64.8B[0,8] > Rm_VPR64.8B[0,8]) * eqVal; Rd_VPR64.8B[8,8] = (Rn_VPR64.8B[8,8] > Rm_VPR64.8B[8,8]) * eqVal; Rd_VPR64.8B[16,8] = (Rn_VPR64.8B[16,8] > Rm_VPR64.8B[16,8]) * eqVal; Rd_VPR64.8B[24,8] = (Rn_VPR64.8B[24,8] > Rm_VPR64.8B[24,8]) * eqVal; Rd_VPR64.8B[32,8] = (Rn_VPR64.8B[32,8] > Rm_VPR64.8B[32,8]) * eqVal; Rd_VPR64.8B[40,8] = (Rn_VPR64.8B[40,8] > Rm_VPR64.8B[40,8]) * eqVal; Rd_VPR64.8B[48,8] = (Rn_VPR64.8B[48,8] > Rm_VPR64.8B[48,8]) * eqVal; Rd_VPR64.8B[56,8] = (Rn_VPR64.8B[56,8] > Rm_VPR64.8B[56,8]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.33 CMHI (register) page C7-2079 line 121148 MATCH x2e203400/mask=xbf20fc00 # CONSTRUCT x6e203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@1 # AUNIT --inst x6e203400/mask=xffe0fc00 --status nopcodeop :cmhi Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x6 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { local eqVal:1 = ~ 0; Rd_VPR128.16B[0,8] = (Rn_VPR128.16B[0,8] > Rm_VPR128.16B[0,8]) * eqVal; Rd_VPR128.16B[8,8] = (Rn_VPR128.16B[8,8] > Rm_VPR128.16B[8,8]) * eqVal; Rd_VPR128.16B[16,8] = (Rn_VPR128.16B[16,8] > Rm_VPR128.16B[16,8]) * eqVal; Rd_VPR128.16B[24,8] = (Rn_VPR128.16B[24,8] > Rm_VPR128.16B[24,8]) * eqVal; Rd_VPR128.16B[32,8] = (Rn_VPR128.16B[32,8] > Rm_VPR128.16B[32,8]) * eqVal; Rd_VPR128.16B[40,8] = (Rn_VPR128.16B[40,8] > Rm_VPR128.16B[40,8]) * eqVal; Rd_VPR128.16B[48,8] = (Rn_VPR128.16B[48,8] > Rm_VPR128.16B[48,8]) * eqVal; Rd_VPR128.16B[56,8] = (Rn_VPR128.16B[56,8] > Rm_VPR128.16B[56,8]) * eqVal; Rd_VPR128.16B[64,8] = (Rn_VPR128.16B[64,8] > Rm_VPR128.16B[64,8]) * eqVal; Rd_VPR128.16B[72,8] = (Rn_VPR128.16B[72,8] > Rm_VPR128.16B[72,8]) * eqVal; Rd_VPR128.16B[80,8] = (Rn_VPR128.16B[80,8] > Rm_VPR128.16B[80,8]) * eqVal; Rd_VPR128.16B[88,8] = (Rn_VPR128.16B[88,8] > Rm_VPR128.16B[88,8]) * eqVal; Rd_VPR128.16B[96,8] = (Rn_VPR128.16B[96,8] > Rm_VPR128.16B[96,8]) * eqVal; Rd_VPR128.16B[104,8] = (Rn_VPR128.16B[104,8] > Rm_VPR128.16B[104,8]) * eqVal; Rd_VPR128.16B[112,8] = (Rn_VPR128.16B[112,8] > Rm_VPR128.16B[112,8]) * eqVal; Rd_VPR128.16B[120,8] = (Rn_VPR128.16B[120,8] > Rm_VPR128.16B[120,8]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.33 CMHI (register) page C7-2079 line 121148 MATCH x2e203400/mask=xbf20fc00 # CONSTRUCT x2e603400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@2 # AUNIT --inst x2e603400/mask=xffe0fc00 --status nopcodeop :cmhi Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x6 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { local eqVal:2 = ~ 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] > Rm_VPR64.4H[0,16]) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] > Rm_VPR64.4H[16,16]) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] > Rm_VPR64.4H[32,16]) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] > Rm_VPR64.4H[48,16]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.33 CMHI (register) page C7-2079 line 121148 MATCH x2e203400/mask=xbf20fc00 # CONSTRUCT x6e603400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@2 # AUNIT --inst x6e603400/mask=xffe0fc00 --status nopcodeop :cmhi Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x6 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { local eqVal:2 = ~ 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] > Rm_VPR128.8H[0,16]) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] > Rm_VPR128.8H[16,16]) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] > Rm_VPR128.8H[32,16]) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] > Rm_VPR128.8H[48,16]) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] > Rm_VPR128.8H[64,16]) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] > Rm_VPR128.8H[80,16]) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] > Rm_VPR128.8H[96,16]) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] > Rm_VPR128.8H[112,16]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.33 CMHI (register) page C7-2079 line 121148 MATCH x2e203400/mask=xbf20fc00 # CONSTRUCT x2ea03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@4 # AUNIT --inst x2ea03400/mask=xffe0fc00 --status nopcodeop :cmhi Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x6 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] > Rm_VPR64.2S[0,32]) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] > Rm_VPR64.2S[32,32]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.33 CMHI (register) page C7-2079 line 121148 MATCH x2e203400/mask=xbf20fc00 # CONSTRUCT x6ea03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@4 # AUNIT --inst x6ea03400/mask=xffe0fc00 --status nopcodeop :cmhi Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x6 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] > Rm_VPR128.4S[0,32]) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] > Rm_VPR128.4S[32,32]) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] > Rm_VPR128.4S[64,32]) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] > Rm_VPR128.4S[96,32]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.33 CMHI (register) page C7-2079 line 121148 MATCH x2e203400/mask=xbf20fc00 # CONSTRUCT x6ee03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhi/2@8 # AUNIT --inst x6ee03400/mask=xffe0fc00 --status nopcodeop :cmhi Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x6 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] > Rm_VPR128.2D[0,64]) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] > Rm_VPR128.2D[64,64]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.34 CMHS (register) page C7-2082 line 121297 MATCH x7e203c00/mask=xff20fc00 # CONSTRUCT x7ee03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2 # AUNIT --inst x7ee03c00/mask=xffe0fc00 --status nopcodeop :cmhs Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2431=0b01111110 & b_2223=0b11 & b_21=1 & b_1015=0b001111 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { local tmp1:1 = Rn_FPR64 >= Rm_FPR64; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.34 CMHS (register) page C7-2082 line 121297 MATCH x2e203c00/mask=xbf20fc00 # CONSTRUCT x2e203c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@1 # AUNIT --inst x2e203c00/mask=xffe0fc00 --status nopcodeop :cmhs Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x7 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { local eqVal:1 = ~ 0; Rd_VPR64.8B[0,8] = (Rn_VPR64.8B[0,8] >= Rm_VPR64.8B[0,8]) * eqVal; Rd_VPR64.8B[8,8] = (Rn_VPR64.8B[8,8] >= Rm_VPR64.8B[8,8]) * eqVal; Rd_VPR64.8B[16,8] = (Rn_VPR64.8B[16,8] >= Rm_VPR64.8B[16,8]) * eqVal; Rd_VPR64.8B[24,8] = (Rn_VPR64.8B[24,8] >= Rm_VPR64.8B[24,8]) * eqVal; Rd_VPR64.8B[32,8] = (Rn_VPR64.8B[32,8] >= Rm_VPR64.8B[32,8]) * eqVal; Rd_VPR64.8B[40,8] = (Rn_VPR64.8B[40,8] >= Rm_VPR64.8B[40,8]) * eqVal; Rd_VPR64.8B[48,8] = (Rn_VPR64.8B[48,8] >= Rm_VPR64.8B[48,8]) * eqVal; Rd_VPR64.8B[56,8] = (Rn_VPR64.8B[56,8] >= Rm_VPR64.8B[56,8]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.34 CMHS (register) page C7-2082 line 121297 MATCH x2e203c00/mask=xbf20fc00 # CONSTRUCT x6e203c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@1 # AUNIT --inst x6e203c00/mask=xffe0fc00 --status nopcodeop :cmhs Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x7 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { local eqVal:1 = ~ 0; Rd_VPR128.16B[0,8] = (Rn_VPR128.16B[0,8] >= Rm_VPR128.16B[0,8]) * eqVal; Rd_VPR128.16B[8,8] = (Rn_VPR128.16B[8,8] >= Rm_VPR128.16B[8,8]) * eqVal; Rd_VPR128.16B[16,8] = (Rn_VPR128.16B[16,8] >= Rm_VPR128.16B[16,8]) * eqVal; Rd_VPR128.16B[24,8] = (Rn_VPR128.16B[24,8] >= Rm_VPR128.16B[24,8]) * eqVal; Rd_VPR128.16B[32,8] = (Rn_VPR128.16B[32,8] >= Rm_VPR128.16B[32,8]) * eqVal; Rd_VPR128.16B[40,8] = (Rn_VPR128.16B[40,8] >= Rm_VPR128.16B[40,8]) * eqVal; Rd_VPR128.16B[48,8] = (Rn_VPR128.16B[48,8] >= Rm_VPR128.16B[48,8]) * eqVal; Rd_VPR128.16B[56,8] = (Rn_VPR128.16B[56,8] >= Rm_VPR128.16B[56,8]) * eqVal; Rd_VPR128.16B[64,8] = (Rn_VPR128.16B[64,8] >= Rm_VPR128.16B[64,8]) * eqVal; Rd_VPR128.16B[72,8] = (Rn_VPR128.16B[72,8] >= Rm_VPR128.16B[72,8]) * eqVal; Rd_VPR128.16B[80,8] = (Rn_VPR128.16B[80,8] >= Rm_VPR128.16B[80,8]) * eqVal; Rd_VPR128.16B[88,8] = (Rn_VPR128.16B[88,8] >= Rm_VPR128.16B[88,8]) * eqVal; Rd_VPR128.16B[96,8] = (Rn_VPR128.16B[96,8] >= Rm_VPR128.16B[96,8]) * eqVal; Rd_VPR128.16B[104,8] = (Rn_VPR128.16B[104,8] >= Rm_VPR128.16B[104,8]) * eqVal; Rd_VPR128.16B[112,8] = (Rn_VPR128.16B[112,8] >= Rm_VPR128.16B[112,8]) * eqVal; Rd_VPR128.16B[120,8] = (Rn_VPR128.16B[120,8] >= Rm_VPR128.16B[120,8]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.34 CMHS (register) page C7-2082 line 121297 MATCH x2e203c00/mask=xbf20fc00 # CONSTRUCT x2e603c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@2 # AUNIT --inst x2e603c00/mask=xffe0fc00 --status nopcodeop :cmhs Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x7 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { local eqVal:2 = ~ 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] >= Rm_VPR64.4H[0,16]) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] >= Rm_VPR64.4H[16,16]) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] >= Rm_VPR64.4H[32,16]) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] >= Rm_VPR64.4H[48,16]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.34 CMHS (register) page C7-2082 line 121297 MATCH x2e203c00/mask=xbf20fc00 # CONSTRUCT x6e603c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@2 # AUNIT --inst x6e603c00/mask=xffe0fc00 --status nopcodeop :cmhs Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x7 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { local eqVal:2 = ~ 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] >= Rm_VPR128.8H[0,16]) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] >= Rm_VPR128.8H[16,16]) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] >= Rm_VPR128.8H[32,16]) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] >= Rm_VPR128.8H[48,16]) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] >= Rm_VPR128.8H[64,16]) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] >= Rm_VPR128.8H[80,16]) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] >= Rm_VPR128.8H[96,16]) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] >= Rm_VPR128.8H[112,16]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.34 CMHS (register) page C7-2082 line 121297 MATCH x2e203c00/mask=xbf20fc00 # CONSTRUCT x2ea03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@4 # AUNIT --inst x2ea03c00/mask=xffe0fc00 --status nopcodeop :cmhs Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x7 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] >= Rm_VPR64.2S[0,32]) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] >= Rm_VPR64.2S[32,32]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.34 CMHS (register) page C7-2082 line 121297 MATCH x2e203c00/mask=xbf20fc00 # CONSTRUCT x6ea03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@4 # AUNIT --inst x6ea03c00/mask=xffe0fc00 --status nopcodeop :cmhs Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x7 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] >= Rm_VPR128.4S[0,32]) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] >= Rm_VPR128.4S[32,32]) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] >= Rm_VPR128.4S[64,32]) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] >= Rm_VPR128.4S[96,32]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.34 CMHS (register) page C7-2082 line 121297 MATCH x2e203c00/mask=xbf20fc00 # CONSTRUCT x6ee03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmhs/2@8 # AUNIT --inst x6ee03c00/mask=xffe0fc00 --status nopcodeop :cmhs Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x7 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] >= Rm_VPR128.2D[0,64]) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] >= Rm_VPR128.2D[64,64]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.35 CMLE (zero) page C7-2085 line 121446 MATCH x7e209800/mask=xff3ffc00 # CONSTRUCT x7ee09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmle/2 # AUNIT --inst x7ee09800/mask=xfffffc00 --status nopcodeop :cmle Rd_FPR64, Rn_FPR64, "#0" is b_2431=0b01111110 & b_2223=0b11 & b_1021=0b100000100110 & Rd_FPR64 & Rn_FPR64 & Zd { local tmp1:1 = Rn_FPR64 s<= 0; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.35 CMLE (zero) page C7-2085 line 121446 MATCH x2e209800/mask=xbf3ffc00 # CONSTRUCT x2e209800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmle/2@1 # AUNIT --inst x2e209800/mask=xfffffc00 --status nopcodeop :cmle Rd_VPR64.8B, Rn_VPR64.8B, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { local eqVal:1 = ~ 0; local zero:1 = 0; Rd_VPR64.8B[0,8] = (Rn_VPR64.8B[0,8] s<= zero) * eqVal; Rd_VPR64.8B[8,8] = (Rn_VPR64.8B[8,8] s<= zero) * eqVal; Rd_VPR64.8B[16,8] = (Rn_VPR64.8B[16,8] s<= zero) * eqVal; Rd_VPR64.8B[24,8] = (Rn_VPR64.8B[24,8] s<= zero) * eqVal; Rd_VPR64.8B[32,8] = (Rn_VPR64.8B[32,8] s<= zero) * eqVal; Rd_VPR64.8B[40,8] = (Rn_VPR64.8B[40,8] s<= zero) * eqVal; Rd_VPR64.8B[48,8] = (Rn_VPR64.8B[48,8] s<= zero) * eqVal; Rd_VPR64.8B[56,8] = (Rn_VPR64.8B[56,8] s<= zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.35 CMLE (zero) page C7-2085 line 121446 MATCH x2e209800/mask=xbf3ffc00 # CONSTRUCT x6e209800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmle/2@1 # AUNIT --inst x6e209800/mask=xfffffc00 --status nopcodeop :cmle Rd_VPR128.16B, Rn_VPR128.16B, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { local eqVal:1 = ~ 0; local zero:1 = 0; Rd_VPR128.16B[0,8] = (Rn_VPR128.16B[0,8] s<= zero) * eqVal; Rd_VPR128.16B[8,8] = (Rn_VPR128.16B[8,8] s<= zero) * eqVal; Rd_VPR128.16B[16,8] = (Rn_VPR128.16B[16,8] s<= zero) * eqVal; Rd_VPR128.16B[24,8] = (Rn_VPR128.16B[24,8] s<= zero) * eqVal; Rd_VPR128.16B[32,8] = (Rn_VPR128.16B[32,8] s<= zero) * eqVal; Rd_VPR128.16B[40,8] = (Rn_VPR128.16B[40,8] s<= zero) * eqVal; Rd_VPR128.16B[48,8] = (Rn_VPR128.16B[48,8] s<= zero) * eqVal; Rd_VPR128.16B[56,8] = (Rn_VPR128.16B[56,8] s<= zero) * eqVal; Rd_VPR128.16B[64,8] = (Rn_VPR128.16B[64,8] s<= zero) * eqVal; Rd_VPR128.16B[72,8] = (Rn_VPR128.16B[72,8] s<= zero) * eqVal; Rd_VPR128.16B[80,8] = (Rn_VPR128.16B[80,8] s<= zero) * eqVal; Rd_VPR128.16B[88,8] = (Rn_VPR128.16B[88,8] s<= zero) * eqVal; Rd_VPR128.16B[96,8] = (Rn_VPR128.16B[96,8] s<= zero) * eqVal; Rd_VPR128.16B[104,8] = (Rn_VPR128.16B[104,8] s<= zero) * eqVal; Rd_VPR128.16B[112,8] = (Rn_VPR128.16B[112,8] s<= zero) * eqVal; Rd_VPR128.16B[120,8] = (Rn_VPR128.16B[120,8] s<= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.35 CMLE (zero) page C7-2085 line 121446 MATCH x2e209800/mask=xbf3ffc00 # CONSTRUCT x2e609800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmle/2@2 # AUNIT --inst x2e609800/mask=xfffffc00 --status nopcodeop :cmle Rd_VPR64.4H, Rn_VPR64.4H, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] s<= zero) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] s<= zero) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] s<= zero) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] s<= zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.35 CMLE (zero) page C7-2085 line 121446 MATCH x2e209800/mask=xbf3ffc00 # CONSTRUCT x6e609800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmle/2@2 # AUNIT --inst x6e609800/mask=xfffffc00 --status nopcodeop :cmle Rd_VPR128.8H, Rn_VPR128.8H, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] s<= zero) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] s<= zero) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] s<= zero) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] s<= zero) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] s<= zero) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] s<= zero) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] s<= zero) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] s<= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.35 CMLE (zero) page C7-2085 line 121446 MATCH x2e209800/mask=xbf3ffc00 # CONSTRUCT x2ea09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmle/2@4 # AUNIT --inst x2ea09800/mask=xfffffc00 --status nopcodeop :cmle Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] s<= zero) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] s<= zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.35 CMLE (zero) page C7-2085 line 121446 MATCH x2e209800/mask=xbf3ffc00 # CONSTRUCT x6ea09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmle/2@4 # AUNIT --inst x6ea09800/mask=xfffffc00 --status nopcodeop :cmle Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] s<= zero) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] s<= zero) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] s<= zero) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] s<= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.35 CMLE (zero) page C7-2085 line 121446 MATCH x2e209800/mask=xbf3ffc00 # CONSTRUCT x6ee09800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmle/2@8 # AUNIT --inst x6ee09800/mask=xfffffc00 --status nopcodeop :cmle Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0x9 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; local zero:8 = 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] s<= zero) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] s<= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.36 CMLT (zero) page C7-2088 line 121604 MATCH x5e20a800/mask=xff3ffc00 # CONSTRUCT x5ee0a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmlt/2 # AUNIT --inst x5ee0a800/mask=xfffffc00 --status nopcodeop :cmlt Rd_FPR64, Rn_FPR64, "#0" is b_2431=0b01011110 & b_2223=0b11 & b_1021=0b100000101010 & Rd_FPR64 & Rn_FPR64 & Zd { local tmp1:1 = Rn_FPR64 s< 0; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.36 CMLT (zero) page C7-2088 line 121604 MATCH x0e20a800/mask=xbf3ffc00 # CONSTRUCT x0e20a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmlt/2@1 # AUNIT --inst x0e20a800/mask=xfffffc00 --status nopcodeop :cmlt Rd_VPR64.8B, Rn_VPR64.8B, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { local eqVal:1 = ~ 0; local zero:1 = 0; Rd_VPR64.8B[0,8] = (Rn_VPR64.8B[0,8] s< zero) * eqVal; Rd_VPR64.8B[8,8] = (Rn_VPR64.8B[8,8] s< zero) * eqVal; Rd_VPR64.8B[16,8] = (Rn_VPR64.8B[16,8] s< zero) * eqVal; Rd_VPR64.8B[24,8] = (Rn_VPR64.8B[24,8] s< zero) * eqVal; Rd_VPR64.8B[32,8] = (Rn_VPR64.8B[32,8] s< zero) * eqVal; Rd_VPR64.8B[40,8] = (Rn_VPR64.8B[40,8] s< zero) * eqVal; Rd_VPR64.8B[48,8] = (Rn_VPR64.8B[48,8] s< zero) * eqVal; Rd_VPR64.8B[56,8] = (Rn_VPR64.8B[56,8] s< zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.36 CMLT (zero) page C7-2088 line 121604 MATCH x0e20a800/mask=xbf3ffc00 # CONSTRUCT x4e20a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:1 =NEON_cmlt/2@1 # AUNIT --inst x4e20a800/mask=xfffffc00 --status nopcodeop :cmlt Rd_VPR128.16B, Rn_VPR128.16B, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { local eqVal:1 = ~ 0; local zero:1 = 0; Rd_VPR128.16B[0,8] = (Rn_VPR128.16B[0,8] s< zero) * eqVal; Rd_VPR128.16B[8,8] = (Rn_VPR128.16B[8,8] s< zero) * eqVal; Rd_VPR128.16B[16,8] = (Rn_VPR128.16B[16,8] s< zero) * eqVal; Rd_VPR128.16B[24,8] = (Rn_VPR128.16B[24,8] s< zero) * eqVal; Rd_VPR128.16B[32,8] = (Rn_VPR128.16B[32,8] s< zero) * eqVal; Rd_VPR128.16B[40,8] = (Rn_VPR128.16B[40,8] s< zero) * eqVal; Rd_VPR128.16B[48,8] = (Rn_VPR128.16B[48,8] s< zero) * eqVal; Rd_VPR128.16B[56,8] = (Rn_VPR128.16B[56,8] s< zero) * eqVal; Rd_VPR128.16B[64,8] = (Rn_VPR128.16B[64,8] s< zero) * eqVal; Rd_VPR128.16B[72,8] = (Rn_VPR128.16B[72,8] s< zero) * eqVal; Rd_VPR128.16B[80,8] = (Rn_VPR128.16B[80,8] s< zero) * eqVal; Rd_VPR128.16B[88,8] = (Rn_VPR128.16B[88,8] s< zero) * eqVal; Rd_VPR128.16B[96,8] = (Rn_VPR128.16B[96,8] s< zero) * eqVal; Rd_VPR128.16B[104,8] = (Rn_VPR128.16B[104,8] s< zero) * eqVal; Rd_VPR128.16B[112,8] = (Rn_VPR128.16B[112,8] s< zero) * eqVal; Rd_VPR128.16B[120,8] = (Rn_VPR128.16B[120,8] s< zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.36 CMLT (zero) page C7-2088 line 121604 MATCH x0e20a800/mask=xbf3ffc00 # CONSTRUCT x0e60a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmlt/2@2 # AUNIT --inst x0e60a800/mask=xfffffc00 --status nopcodeop :cmlt Rd_VPR64.4H, Rn_VPR64.4H, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] s< zero) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] s< zero) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] s< zero) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] s< zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.36 CMLT (zero) page C7-2088 line 121604 MATCH x0e20a800/mask=xbf3ffc00 # CONSTRUCT x4e60a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_cmlt/2@2 # AUNIT --inst x4e60a800/mask=xfffffc00 --status nopcodeop :cmlt Rd_VPR128.8H, Rn_VPR128.8H, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] s< zero) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] s< zero) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] s< zero) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] s< zero) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] s< zero) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] s< zero) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] s< zero) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] s< zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.36 CMLT (zero) page C7-2088 line 121604 MATCH x0e20a800/mask=xbf3ffc00 # CONSTRUCT x0ea0a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmlt/2@4 # AUNIT --inst x0ea0a800/mask=xfffffc00 --status nopcodeop :cmlt Rd_VPR64.2S, Rn_VPR64.2S, "#0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] s< zero) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] s< zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.36 CMLT (zero) page C7-2088 line 121604 MATCH x0e20a800/mask=xbf3ffc00 # CONSTRUCT x4ea0a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_cmlt/2@4 # AUNIT --inst x4ea0a800/mask=xfffffc00 --status nopcodeop :cmlt Rd_VPR128.4S, Rn_VPR128.4S, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] s< zero) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] s< zero) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] s< zero) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] s< zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.36 CMLT (zero) page C7-2088 line 121604 MATCH x0e20a800/mask=xbf3ffc00 # CONSTRUCT x4ee0a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_cmlt/2@8 # AUNIT --inst x4ee0a800/mask=xfffffc00 --status nopcodeop :cmlt Rd_VPR128.2D, Rn_VPR128.2D, "#0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xa & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; local zero:8 = 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] s< zero) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] s< zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.37 CMTST page C7-2090 line 121743 MATCH x5e208c00/mask=xff20fc00 # CONSTRUCT x5ee08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2 # AUNIT --inst x5ee08c00/mask=xffe0fc00 --status nopcodeop :cmtst Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2431=0b01011110 & b_2223=0b11 & b_21=1 & b_1015=0b100011 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { local tmp1:1 = (Rn_FPR64 & Rm_FPR64) != 0; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.37 CMTST page C7-2090 line 121743 MATCH x0e208c00/mask=xbf20fc00 # CONSTRUCT x0e208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@1 # AUNIT --inst x0e208c00/mask=xffe0fc00 --status nopcodeop :cmtst Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x11 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { local eqVal:1 = ~ 0; Rd_VPR64.8B[0,8] = ((Rn_VPR64.8B[0,8] & Rm_VPR64.8B[0,8]) != 0) * eqVal; Rd_VPR64.8B[8,8] = ((Rn_VPR64.8B[8,8] & Rm_VPR64.8B[8,8]) != 0) * eqVal; Rd_VPR64.8B[16,8] = ((Rn_VPR64.8B[16,8] & Rm_VPR64.8B[16,8]) != 0) * eqVal; Rd_VPR64.8B[24,8] = ((Rn_VPR64.8B[24,8] & Rm_VPR64.8B[24,8]) != 0) * eqVal; Rd_VPR64.8B[32,8] = ((Rn_VPR64.8B[32,8] & Rm_VPR64.8B[32,8]) != 0) * eqVal; Rd_VPR64.8B[40,8] = ((Rn_VPR64.8B[40,8] & Rm_VPR64.8B[40,8]) != 0) * eqVal; Rd_VPR64.8B[48,8] = ((Rn_VPR64.8B[48,8] & Rm_VPR64.8B[48,8]) != 0) * eqVal; Rd_VPR64.8B[56,8] = ((Rn_VPR64.8B[56,8] & Rm_VPR64.8B[56,8]) != 0) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.37 CMTST page C7-2090 line 121743 MATCH x0e208c00/mask=xbf20fc00 # CONSTRUCT x4e208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@1 # AUNIT --inst x4e208c00/mask=xffe0fc00 --status nopcodeop :cmtst Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x11 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { local eqVal:1 = ~ 0; Rd_VPR128.16B[0,8] = ((Rn_VPR128.16B[0,8] & Rm_VPR128.16B[0,8]) != 0) * eqVal; Rd_VPR128.16B[8,8] = ((Rn_VPR128.16B[8,8] & Rm_VPR128.16B[8,8]) != 0) * eqVal; Rd_VPR128.16B[16,8] = ((Rn_VPR128.16B[16,8] & Rm_VPR128.16B[16,8]) != 0) * eqVal; Rd_VPR128.16B[24,8] = ((Rn_VPR128.16B[24,8] & Rm_VPR128.16B[24,8]) != 0) * eqVal; Rd_VPR128.16B[32,8] = ((Rn_VPR128.16B[32,8] & Rm_VPR128.16B[32,8]) != 0) * eqVal; Rd_VPR128.16B[40,8] = ((Rn_VPR128.16B[40,8] & Rm_VPR128.16B[40,8]) != 0) * eqVal; Rd_VPR128.16B[48,8] = ((Rn_VPR128.16B[48,8] & Rm_VPR128.16B[48,8]) != 0) * eqVal; Rd_VPR128.16B[56,8] = ((Rn_VPR128.16B[56,8] & Rm_VPR128.16B[56,8]) != 0) * eqVal; Rd_VPR128.16B[64,8] = ((Rn_VPR128.16B[64,8] & Rm_VPR128.16B[64,8]) != 0) * eqVal; Rd_VPR128.16B[72,8] = ((Rn_VPR128.16B[72,8] & Rm_VPR128.16B[72,8]) != 0) * eqVal; Rd_VPR128.16B[80,8] = ((Rn_VPR128.16B[80,8] & Rm_VPR128.16B[80,8]) != 0) * eqVal; Rd_VPR128.16B[88,8] = ((Rn_VPR128.16B[88,8] & Rm_VPR128.16B[88,8]) != 0) * eqVal; Rd_VPR128.16B[96,8] = ((Rn_VPR128.16B[96,8] & Rm_VPR128.16B[96,8]) != 0) * eqVal; Rd_VPR128.16B[104,8] = ((Rn_VPR128.16B[104,8] & Rm_VPR128.16B[104,8]) != 0) * eqVal; Rd_VPR128.16B[112,8] = ((Rn_VPR128.16B[112,8] & Rm_VPR128.16B[112,8]) != 0) * eqVal; Rd_VPR128.16B[120,8] = ((Rn_VPR128.16B[120,8] & Rm_VPR128.16B[120,8]) != 0) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.37 CMTST page C7-2090 line 121743 MATCH x0e208c00/mask=xbf20fc00 # CONSTRUCT x0e608c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@2 # AUNIT --inst x0e608c00/mask=xffe0fc00 --status nopcodeop :cmtst Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x11 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { local eqVal:2 = ~ 0; Rd_VPR64.4H[0,16] = zext((Rn_VPR64.4H[0,16] & Rm_VPR64.4H[0,16]) != 0) * eqVal; Rd_VPR64.4H[16,16] = zext((Rn_VPR64.4H[16,16] & Rm_VPR64.4H[16,16]) != 0) * eqVal; Rd_VPR64.4H[32,16] = zext((Rn_VPR64.4H[32,16] & Rm_VPR64.4H[32,16]) != 0) * eqVal; Rd_VPR64.4H[48,16] = zext((Rn_VPR64.4H[48,16] & Rm_VPR64.4H[48,16]) != 0) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.37 CMTST page C7-2090 line 121743 MATCH x0e208c00/mask=xbf20fc00 # CONSTRUCT x4e608c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@2 # AUNIT --inst x4e608c00/mask=xffe0fc00 --status nopcodeop :cmtst Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { local eqVal:2 = ~ 0; Rd_VPR128.8H[0,16] = zext((Rn_VPR128.8H[0,16] & Rm_VPR128.8H[0,16]) != 0) * eqVal; Rd_VPR128.8H[16,16] = zext((Rn_VPR128.8H[16,16] & Rm_VPR128.8H[16,16]) != 0) * eqVal; Rd_VPR128.8H[32,16] = zext((Rn_VPR128.8H[32,16] & Rm_VPR128.8H[32,16]) != 0) * eqVal; Rd_VPR128.8H[48,16] = zext((Rn_VPR128.8H[48,16] & Rm_VPR128.8H[48,16]) != 0) * eqVal; Rd_VPR128.8H[64,16] = zext((Rn_VPR128.8H[64,16] & Rm_VPR128.8H[64,16]) != 0) * eqVal; Rd_VPR128.8H[80,16] = zext((Rn_VPR128.8H[80,16] & Rm_VPR128.8H[80,16]) != 0) * eqVal; Rd_VPR128.8H[96,16] = zext((Rn_VPR128.8H[96,16] & Rm_VPR128.8H[96,16]) != 0) * eqVal; Rd_VPR128.8H[112,16] = zext((Rn_VPR128.8H[112,16] & Rm_VPR128.8H[112,16]) != 0) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.37 CMTST page C7-2090 line 121743 MATCH x0e208c00/mask=xbf20fc00 # CONSTRUCT x0ea08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@4 # AUNIT --inst x0ea08c00/mask=xffe0fc00 --status nopcodeop :cmtst Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x11 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; Rd_VPR64.2S[0,32] = zext((Rn_VPR64.2S[0,32] & Rm_VPR64.2S[0,32]) != 0) * eqVal; Rd_VPR64.2S[32,32] = zext((Rn_VPR64.2S[32,32] & Rm_VPR64.2S[32,32]) != 0) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.37 CMTST page C7-2090 line 121743 MATCH x0e208c00/mask=xbf20fc00 # CONSTRUCT x4ea08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@4 # AUNIT --inst x4ea08c00/mask=xffe0fc00 --status nopcodeop :cmtst Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; Rd_VPR128.4S[0,32] = zext((Rn_VPR128.4S[0,32] & Rm_VPR128.4S[0,32]) != 0) * eqVal; Rd_VPR128.4S[32,32] = zext((Rn_VPR128.4S[32,32] & Rm_VPR128.4S[32,32]) != 0) * eqVal; Rd_VPR128.4S[64,32] = zext((Rn_VPR128.4S[64,32] & Rm_VPR128.4S[64,32]) != 0) * eqVal; Rd_VPR128.4S[96,32] = zext((Rn_VPR128.4S[96,32] & Rm_VPR128.4S[96,32]) != 0) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.37 CMTST page C7-2090 line 121743 MATCH x0e208c00/mask=xbf20fc00 # CONSTRUCT x4ee08c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_cmtst/2@8 # AUNIT --inst x4ee08c00/mask=xffe0fc00 --status nopcodeop :cmtst Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; Rd_VPR128.2D[0,64] = zext((Rn_VPR128.2D[0,64] & Rm_VPR128.2D[0,64]) != 0) * eqVal; Rd_VPR128.2D[64,64] = zext((Rn_VPR128.2D[64,64] & Rm_VPR128.2D[64,64]) != 0) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.38 CNT page C7-2092 line 121883 MATCH x0e205800/mask=xbf3ffc00 # CONSTRUCT x0e205800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_cnt/1@1 # AUNIT --inst x0e205800/mask=xfffffc00 --status nopcodeop :cnt Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x5 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd unary Rd_VPR64.8B = popcount(Rn_VPR64.8B) on lane size 1 Rd_VPR64.8B[0,8] = popcount(Rn_VPR64.8B[0,8]); Rd_VPR64.8B[8,8] = popcount(Rn_VPR64.8B[8,8]); Rd_VPR64.8B[16,8] = popcount(Rn_VPR64.8B[16,8]); Rd_VPR64.8B[24,8] = popcount(Rn_VPR64.8B[24,8]); Rd_VPR64.8B[32,8] = popcount(Rn_VPR64.8B[32,8]); Rd_VPR64.8B[40,8] = popcount(Rn_VPR64.8B[40,8]); Rd_VPR64.8B[48,8] = popcount(Rn_VPR64.8B[48,8]); Rd_VPR64.8B[56,8] = popcount(Rn_VPR64.8B[56,8]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.38 CNT page C7-2092 line 121883 MATCH x0e205800/mask=xbf3ffc00 # CONSTRUCT x4e205800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_cnt/1@1 # AUNIT --inst x4e205800/mask=xfffffc00 --status nopcodeop :cnt Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x5 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd unary Rd_VPR128.16B = popcount(Rn_VPR128.16B) on lane size 1 Rd_VPR128.16B[0,8] = popcount(Rn_VPR128.16B[0,8]); Rd_VPR128.16B[8,8] = popcount(Rn_VPR128.16B[8,8]); Rd_VPR128.16B[16,8] = popcount(Rn_VPR128.16B[16,8]); Rd_VPR128.16B[24,8] = popcount(Rn_VPR128.16B[24,8]); Rd_VPR128.16B[32,8] = popcount(Rn_VPR128.16B[32,8]); Rd_VPR128.16B[40,8] = popcount(Rn_VPR128.16B[40,8]); Rd_VPR128.16B[48,8] = popcount(Rn_VPR128.16B[48,8]); Rd_VPR128.16B[56,8] = popcount(Rn_VPR128.16B[56,8]); Rd_VPR128.16B[64,8] = popcount(Rn_VPR128.16B[64,8]); Rd_VPR128.16B[72,8] = popcount(Rn_VPR128.16B[72,8]); Rd_VPR128.16B[80,8] = popcount(Rn_VPR128.16B[80,8]); Rd_VPR128.16B[88,8] = popcount(Rn_VPR128.16B[88,8]); Rd_VPR128.16B[96,8] = popcount(Rn_VPR128.16B[96,8]); Rd_VPR128.16B[104,8] = popcount(Rn_VPR128.16B[104,8]); Rd_VPR128.16B[112,8] = popcount(Rn_VPR128.16B[112,8]); Rd_VPR128.16B[120,8] = popcount(Rn_VPR128.16B[120,8]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.39 DUP (element) page C7-2094 line 121971 MATCH x0e000400/mask=xbfe0fc00 # CONSTRUCT x4e010400/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@1 # AUNIT --inst x4e010400/mask=xffe1fc00 --status pass :dup Rd_VPR128.16B, Rn_VPR128.B.imm_neon_uimm4 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.16B & Zd { # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; # simd duplicate Rd_VPR128.16B = all elements tmp1 (lane size 1) Rd_VPR128.16B[0,8] = tmp1; Rd_VPR128.16B[8,8] = tmp1; Rd_VPR128.16B[16,8] = tmp1; Rd_VPR128.16B[24,8] = tmp1; Rd_VPR128.16B[32,8] = tmp1; Rd_VPR128.16B[40,8] = tmp1; Rd_VPR128.16B[48,8] = tmp1; Rd_VPR128.16B[56,8] = tmp1; Rd_VPR128.16B[64,8] = tmp1; Rd_VPR128.16B[72,8] = tmp1; Rd_VPR128.16B[80,8] = tmp1; Rd_VPR128.16B[88,8] = tmp1; Rd_VPR128.16B[96,8] = tmp1; Rd_VPR128.16B[104,8] = tmp1; Rd_VPR128.16B[112,8] = tmp1; Rd_VPR128.16B[120,8] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.39 DUP (element) page C7-2094 line 121971 MATCH x0e000400/mask=xbfe0fc00 # CONSTRUCT x4e080400/mask=xffeffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@8 # AUNIT --inst x4e080400/mask=xffeffc00 --status pass :dup Rd_VPR128.2D, Rn_VPR128.D.imm_neon_uimm1 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.2D & Zd { # simd element Rn_VPR128[imm_neon_uimm1] lane size 8 local tmp1:8 = Rn_VPR128.D.imm_neon_uimm1; # simd duplicate Rd_VPR128.2D = all elements tmp1 (lane size 8) Rd_VPR128.2D[0,64] = tmp1; Rd_VPR128.2D[64,64] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.39 DUP (element) page C7-2094 line 121971 MATCH x0e000400/mask=xbfe0fc00 # CONSTRUCT x0e040400/mask=xffe7fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@4 # AUNIT --inst x0e040400/mask=xffe7fc00 --status pass :dup Rd_VPR64.2S, Rn_VPR128.S.imm_neon_uimm2 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR64.2S & Zd { # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 local tmp1:4 = Rn_VPR128.S.imm_neon_uimm2; # simd duplicate Rd_VPR64.2S = all elements tmp1 (lane size 4) Rd_VPR64.2S[0,32] = tmp1; Rd_VPR64.2S[32,32] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.39 DUP (element) page C7-2094 line 121971 MATCH x0e000400/mask=xbfe0fc00 # CONSTRUCT x0e020400/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@2 # AUNIT --inst x0e020400/mask=xffe3fc00 --status pass :dup Rd_VPR64.4H, Rn_VPR128.H.imm_neon_uimm3 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR64.4H & Zd { # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; # simd duplicate Rd_VPR64.4H = all elements tmp1 (lane size 2) Rd_VPR64.4H[0,16] = tmp1; Rd_VPR64.4H[16,16] = tmp1; Rd_VPR64.4H[32,16] = tmp1; Rd_VPR64.4H[48,16] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.39 DUP (element) page C7-2094 line 121971 MATCH x0e000400/mask=xbfe0fc00 # CONSTRUCT x4e040400/mask=xffe7fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@4 # AUNIT --inst x4e040400/mask=xffe7fc00 --status pass :dup Rd_VPR128.4S, Rn_VPR128.S.imm_neon_uimm2 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.4S & Zd { # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 local tmp1:4 = Rn_VPR128.S.imm_neon_uimm2; # simd duplicate Rd_VPR128.4S = all elements tmp1 (lane size 4) Rd_VPR128.4S[0,32] = tmp1; Rd_VPR128.4S[32,32] = tmp1; Rd_VPR128.4S[64,32] = tmp1; Rd_VPR128.4S[96,32] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.39 DUP (element) page C7-2094 line 121971 MATCH x0e000400/mask=xbfe0fc00 # CONSTRUCT x0e010400/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@1 # AUNIT --inst x0e010400/mask=xffe1fc00 --status pass :dup Rd_VPR64.8B, Rn_VPR128.B.imm_neon_uimm4 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR64.8B & Zd { # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; # simd duplicate Rd_VPR64.8B = all elements tmp1 (lane size 1) Rd_VPR64.8B[0,8] = tmp1; Rd_VPR64.8B[8,8] = tmp1; Rd_VPR64.8B[16,8] = tmp1; Rd_VPR64.8B[24,8] = tmp1; Rd_VPR64.8B[32,8] = tmp1; Rd_VPR64.8B[40,8] = tmp1; Rd_VPR64.8B[48,8] = tmp1; Rd_VPR64.8B[56,8] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.39 DUP (element) page C7-2094 line 121971 MATCH x0e000400/mask=xbfe0fc00 # CONSTRUCT x4e020400/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@2 # AUNIT --inst x4e020400/mask=xffe3fc00 --status pass :dup Rd_VPR128.8H, Rn_VPR128.H.imm_neon_uimm3 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_VPR128.8H & Zd { # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; # simd duplicate Rd_VPR128.8H = all elements tmp1 (lane size 2) Rd_VPR128.8H[0,16] = tmp1; Rd_VPR128.8H[16,16] = tmp1; Rd_VPR128.8H[32,16] = tmp1; Rd_VPR128.8H[48,16] = tmp1; Rd_VPR128.8H[64,16] = tmp1; Rd_VPR128.8H[80,16] = tmp1; Rd_VPR128.8H[96,16] = tmp1; Rd_VPR128.8H[112,16] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.39 DUP (element) page C7-2094 line 121971 MATCH x5e000400/mask=xffe0fc00 # C7.2.199 MOV (scalar) page C7-2481 line 145318 MATCH x5e000400/mask=xffe0fc00 # CONSTRUCT x5e010400/mask=xffe1fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 =ARG2 # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@1 # AUNIT --inst x5e010400/mask=xffe1fc00 --status pass :dup Rd_FPR8, Rn_VPR128.B.imm_neon_uimm4 is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR8 & Zd { # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 Rd_FPR8 = Rn_VPR128.B.imm_neon_uimm4; zext_zb(Zd); # zero upper 31 bytes of Zd } # C7.2.39 DUP (element) page C7-2094 line 121971 MATCH x5e000400/mask=xffe0fc00 # C7.2.199 MOV (scalar) page C7-2481 line 145318 MATCH x5e000400/mask=xffe0fc00 # CONSTRUCT x5e080400/mask=xffeffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 =ARG2 # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@8 # AUNIT --inst x5e080400/mask=xffeffc00 --status pass :dup Rd_FPR64, Rn_VPR128.D.imm_neon_uimm1 is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR64 & Zd { # simd element Rn_VPR128[imm_neon_uimm1] lane size 8 Rd_FPR64 = Rn_VPR128.D.imm_neon_uimm1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.39 DUP (element) page C7-2094 line 121971 MATCH x5e000400/mask=xffe0fc00 # C7.2.199 MOV (scalar) page C7-2481 line 145318 MATCH x5e000400/mask=xffe0fc00 # CONSTRUCT x5e020400/mask=xffe3fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 =ARG2 # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@2 # AUNIT --inst x5e020400/mask=xffe3fc00 --status pass :dup Rd_FPR16, Rn_VPR128.H.imm_neon_uimm3 is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR16 & Zd { # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 Rd_FPR16 = Rn_VPR128.H.imm_neon_uimm3; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.39 DUP (element) page C7-2094 line 121971 MATCH x5e000400/mask=xffe0fc00 # C7.2.199 MOV (scalar) page C7-2481 line 145318 MATCH x5e000400/mask=xffe0fc00 # CONSTRUCT x5e040400/mask=xffe7fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 =ARG2 # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@4 # AUNIT --inst x5e040400/mask=xffe7fc00 --status pass :dup Rd_FPR32, Rn_VPR128.S.imm_neon_uimm2 is b_3131=0 & q=1 & b_29=0 & b_2428=0x1e & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x0 & b_1010=1 & Rn_VPR128 & Rd_FPR32 & Zd { # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 Rd_FPR32 = Rn_VPR128.S.imm_neon_uimm2; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.40 DUP (general) page C7-2097 line 122143 MATCH x0e000c00/mask=xbfe0fc00 # CONSTRUCT x4e010c00/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(force-primitive) ARG1 ARG2[0]:1 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@1 # AUNIT --inst x4e010c00/mask=xffe1fc00 --status pass :dup Rd_VPR128.16B, Rn_GPR32 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_16=1 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR128.16B & Zd { local tmp1:1 = Rn_GPR32[0,8]; # simd duplicate Rd_VPR128.16B = all elements tmp1 (lane size 1) Rd_VPR128.16B[0,8] = tmp1; Rd_VPR128.16B[8,8] = tmp1; Rd_VPR128.16B[16,8] = tmp1; Rd_VPR128.16B[24,8] = tmp1; Rd_VPR128.16B[32,8] = tmp1; Rd_VPR128.16B[40,8] = tmp1; Rd_VPR128.16B[48,8] = tmp1; Rd_VPR128.16B[56,8] = tmp1; Rd_VPR128.16B[64,8] = tmp1; Rd_VPR128.16B[72,8] = tmp1; Rd_VPR128.16B[80,8] = tmp1; Rd_VPR128.16B[88,8] = tmp1; Rd_VPR128.16B[96,8] = tmp1; Rd_VPR128.16B[104,8] = tmp1; Rd_VPR128.16B[112,8] = tmp1; Rd_VPR128.16B[120,8] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.40 DUP (general) page C7-2097 line 122143 MATCH x0e000c00/mask=xbfe0fc00 # CONSTRUCT x4e080c00/mask=xffeffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@8 # AUNIT --inst x4e080c00/mask=xffeffc00 --status pass :dup Rd_VPR128.2D, Rn_GPR64 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_1619=0b1000 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR64 & Rd_VPR128.2D & Zd { # simd duplicate Rd_VPR128.2D = all elements Rn_GPR64 (lane size 8) Rd_VPR128.2D[0,64] = Rn_GPR64; Rd_VPR128.2D[64,64] = Rn_GPR64; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.40 DUP (general) page C7-2097 line 122143 MATCH x0e000c00/mask=xbfe0fc00 # CONSTRUCT x0e040c00/mask=xffe7fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@4 # AUNIT --inst x0e040c00/mask=xffe7fc00 --status pass :dup Rd_VPR64.2S, Rn_GPR32 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & b_1618=0b100 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR64.2S & Zd { # simd duplicate Rd_VPR64.2S = all elements Rn_GPR32 (lane size 4) Rd_VPR64.2S[0,32] = Rn_GPR32; Rd_VPR64.2S[32,32] = Rn_GPR32; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.40 DUP (general) page C7-2097 line 122143 MATCH x0e000c00/mask=xbfe0fc00 # CONSTRUCT x0e020c00/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[0]:2 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@2 # AUNIT --inst x0e020c00/mask=xffe3fc00 --status pass :dup Rd_VPR64.4H, Rn_GPR32 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & b_1617=0b10 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR64.4H & Zd { local tmp1:2 = Rn_GPR32[0,16]; # simd duplicate Rd_VPR64.4H = all elements tmp1 (lane size 2) Rd_VPR64.4H[0,16] = tmp1; Rd_VPR64.4H[16,16] = tmp1; Rd_VPR64.4H[32,16] = tmp1; Rd_VPR64.4H[48,16] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.40 DUP (general) page C7-2097 line 122143 MATCH x0e000c00/mask=xbfe0fc00 # CONSTRUCT x4e040c00/mask=xffe7fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@4 # AUNIT --inst x4e040c00/mask=xffe7fc00 --status pass :dup Rd_VPR128.4S, Rn_GPR32 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_1618=0b100 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR128.4S & Zd { # simd duplicate Rd_VPR128.4S = all elements Rn_GPR32 (lane size 4) Rd_VPR128.4S[0,32] = Rn_GPR32; Rd_VPR128.4S[32,32] = Rn_GPR32; Rd_VPR128.4S[64,32] = Rn_GPR32; Rd_VPR128.4S[96,32] = Rn_GPR32; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.40 DUP (general) page C7-2097 line 122143 MATCH x0e000c00/mask=xbfe0fc00 # CONSTRUCT x0e010c00/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[0]:1 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@1 # AUNIT --inst x0e010c00/mask=xffe1fc00 --status pass :dup Rd_VPR64.8B, Rn_GPR32 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & b_16=1 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR64.8B & Zd { local tmp1:1 = Rn_GPR32[0,8]; # simd duplicate Rd_VPR64.8B = all elements tmp1 (lane size 1) Rd_VPR64.8B[0,8] = tmp1; Rd_VPR64.8B[8,8] = tmp1; Rd_VPR64.8B[16,8] = tmp1; Rd_VPR64.8B[24,8] = tmp1; Rd_VPR64.8B[32,8] = tmp1; Rd_VPR64.8B[40,8] = tmp1; Rd_VPR64.8B[48,8] = tmp1; Rd_VPR64.8B[56,8] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.40 DUP (general) page C7-2097 line 122143 MATCH x0e000c00/mask=xbfe0fc00 # CONSTRUCT x4e020c00/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[0]:2 &=$dup # SMACRO(pseudo) ARG1 ARG2 &=NEON_dup/2@2 # AUNIT --inst x4e020c00/mask=xffe3fc00 --status pass :dup Rd_VPR128.8H, Rn_GPR32 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & b_1617=0b10 & b_1515=0 & imm4=0x1 & b_1010=1 & Rn_GPR32 & Rd_VPR128.8H & Zd { local tmp1:2 = Rn_GPR32[0,16]; # simd duplicate Rd_VPR128.8H = all elements tmp1 (lane size 2) Rd_VPR128.8H[0,16] = tmp1; Rd_VPR128.8H[16,16] = tmp1; Rd_VPR128.8H[32,16] = tmp1; Rd_VPR128.8H[48,16] = tmp1; Rd_VPR128.8H[64,16] = tmp1; Rd_VPR128.8H[80,16] = tmp1; Rd_VPR128.8H[96,16] = tmp1; Rd_VPR128.8H[112,16] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.41 EOR (vector) page C7-2099 line 122248 MATCH x2e201c00/mask=xbfe0fc00 # CONSTRUCT x6e201c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$^@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_eor/2@1 # AUNIT --inst x6e201c00/mask=xffe0fc00 --status pass :eor Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix Rd_VPR128.16B = Rn_VPR128.16B ^ Rm_VPR128.16B on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] ^ Rm_VPR128.16B[0,8]; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] ^ Rm_VPR128.16B[8,8]; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] ^ Rm_VPR128.16B[16,8]; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] ^ Rm_VPR128.16B[24,8]; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] ^ Rm_VPR128.16B[32,8]; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] ^ Rm_VPR128.16B[40,8]; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] ^ Rm_VPR128.16B[48,8]; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] ^ Rm_VPR128.16B[56,8]; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] ^ Rm_VPR128.16B[64,8]; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] ^ Rm_VPR128.16B[72,8]; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] ^ Rm_VPR128.16B[80,8]; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] ^ Rm_VPR128.16B[88,8]; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] ^ Rm_VPR128.16B[96,8]; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] ^ Rm_VPR128.16B[104,8]; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] ^ Rm_VPR128.16B[112,8]; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] ^ Rm_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.41 EOR (vector) page C7-2099 line 122248 MATCH x2e201c00/mask=xbfe0fc00 # CONSTRUCT x2e201c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$^@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_eor/2@1 # AUNIT --inst x2e201c00/mask=xffe0fc00 --status pass :eor Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix Rd_VPR64.8B = Rn_VPR64.8B ^ Rm_VPR64.8B on lane size 1 Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] ^ Rm_VPR64.8B[0,8]; Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] ^ Rm_VPR64.8B[8,8]; Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] ^ Rm_VPR64.8B[16,8]; Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] ^ Rm_VPR64.8B[24,8]; Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] ^ Rm_VPR64.8B[32,8]; Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] ^ Rm_VPR64.8B[40,8]; Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] ^ Rm_VPR64.8B[48,8]; Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] ^ Rm_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.42 EOR3 page C7-2101 line 122332 MATCH xce000000/mask=xffe08000 # CONSTRUCT xce000000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 ARG4 $|@1 =$|@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_eor3/3@1 # AUNIT --inst xce000000/mask=xffe08000 --status noqemu :eor3 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, Ra_VPR128.16B is b_2131=0b11001110000 & b_15=0 & Rd_VPR128.16B & Rn_VPR128.16B & Rm_VPR128.16B & Ra_VPR128.16B & Zd { # simd infix TMPQ1 = Rm_VPR128.16B | Ra_VPR128.16B on lane size 1 TMPQ1[0,8] = Rm_VPR128.16B[0,8] | Ra_VPR128.16B[0,8]; TMPQ1[8,8] = Rm_VPR128.16B[8,8] | Ra_VPR128.16B[8,8]; TMPQ1[16,8] = Rm_VPR128.16B[16,8] | Ra_VPR128.16B[16,8]; TMPQ1[24,8] = Rm_VPR128.16B[24,8] | Ra_VPR128.16B[24,8]; TMPQ1[32,8] = Rm_VPR128.16B[32,8] | Ra_VPR128.16B[32,8]; TMPQ1[40,8] = Rm_VPR128.16B[40,8] | Ra_VPR128.16B[40,8]; TMPQ1[48,8] = Rm_VPR128.16B[48,8] | Ra_VPR128.16B[48,8]; TMPQ1[56,8] = Rm_VPR128.16B[56,8] | Ra_VPR128.16B[56,8]; TMPQ1[64,8] = Rm_VPR128.16B[64,8] | Ra_VPR128.16B[64,8]; TMPQ1[72,8] = Rm_VPR128.16B[72,8] | Ra_VPR128.16B[72,8]; TMPQ1[80,8] = Rm_VPR128.16B[80,8] | Ra_VPR128.16B[80,8]; TMPQ1[88,8] = Rm_VPR128.16B[88,8] | Ra_VPR128.16B[88,8]; TMPQ1[96,8] = Rm_VPR128.16B[96,8] | Ra_VPR128.16B[96,8]; TMPQ1[104,8] = Rm_VPR128.16B[104,8] | Ra_VPR128.16B[104,8]; TMPQ1[112,8] = Rm_VPR128.16B[112,8] | Ra_VPR128.16B[112,8]; TMPQ1[120,8] = Rm_VPR128.16B[120,8] | Ra_VPR128.16B[120,8]; # simd infix Rd_VPR128.16B = Rn_VPR128.16B | TMPQ1 on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] | TMPQ1[0,8]; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] | TMPQ1[8,8]; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] | TMPQ1[16,8]; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] | TMPQ1[24,8]; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] | TMPQ1[32,8]; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] | TMPQ1[40,8]; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] | TMPQ1[48,8]; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] | TMPQ1[56,8]; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] | TMPQ1[64,8]; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] | TMPQ1[72,8]; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] | TMPQ1[80,8]; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] | TMPQ1[88,8]; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] | TMPQ1[96,8]; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] | TMPQ1[104,8]; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] | TMPQ1[112,8]; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] | TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.43 EXT page C7-2102 line 122403 MATCH x2e000000/mask=xbfe08400 # CONSTRUCT x6e000000/mask=xffe08400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 imm4:1 =NEON_ext/3@1 # AUNIT --inst x6e000000/mask=xffe08400 --status nopcodeop :ext Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, imm4 is b_3131=0 & q=1 & b_2429=0x2e & b_2223=0b00 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & imm4 & b_1010=0 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_ext(Rn_VPR128.16B, Rm_VPR128.16B, imm4:1, 1:1); } # C7.2.43 EXT page C7-2102 line 122403 MATCH x2e000000/mask=xbfe08400 # CONSTRUCT x2e000000/mask=xffe0c400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 imm4:1 =NEON_ext/3@1 # AUNIT --inst x2e000000/mask=xffe0c400 --status nopcodeop :ext Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, imm4 is b_3131=0 & q=0 & b_2429=0x2e & b_2223=0b00 & b_2121=0 & Rm_VPR64.8B & b_1415=0 & imm4 & b_1010=0 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_ext(Rn_VPR64.8B, Rm_VPR64.8B, imm4:1, 1:1); } # C7.2.44 FABD page C7-2104 line 122507 MATCH x2ec01400/mask=xbfe0fc00 # CONSTRUCT x2ec01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f-@2 =$fabs@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2@2 # AUNIT --inst x2ec01400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant when Q=0 sz=1 bb=0 cc=00 F=VPR64.4H :fabd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=1 & b_21=0 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { # simd infix TMPD1 = Rn_VPR64.4H f- Rm_VPR64.4H on lane size 2 TMPD1[0,16] = Rn_VPR64.4H[0,16] f- Rm_VPR64.4H[0,16]; TMPD1[16,16] = Rn_VPR64.4H[16,16] f- Rm_VPR64.4H[16,16]; TMPD1[32,16] = Rn_VPR64.4H[32,16] f- Rm_VPR64.4H[32,16]; TMPD1[48,16] = Rn_VPR64.4H[48,16] f- Rm_VPR64.4H[48,16]; # simd unary Rd_VPR64.4H = abs(TMPD1) on lane size 2 Rd_VPR64.4H[0,16] = abs(TMPD1[0,16]); Rd_VPR64.4H[16,16] = abs(TMPD1[16,16]); Rd_VPR64.4H[32,16] = abs(TMPD1[32,16]); Rd_VPR64.4H[48,16] = abs(TMPD1[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.44 FABD page C7-2104 line 122507 MATCH x2ec01400/mask=xbfe0fc00 # CONSTRUCT x6ec01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f-@2 =$fabs@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2@2 # AUNIT --inst x6ec01400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant when Q=1 sz=1 bb=0 cc=00 F=VPR128.8H :fabd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_21=0 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.8H f- Rm_VPR128.8H on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] f- Rm_VPR128.8H[0,16]; TMPQ1[16,16] = Rn_VPR128.8H[16,16] f- Rm_VPR128.8H[16,16]; TMPQ1[32,16] = Rn_VPR128.8H[32,16] f- Rm_VPR128.8H[32,16]; TMPQ1[48,16] = Rn_VPR128.8H[48,16] f- Rm_VPR128.8H[48,16]; TMPQ1[64,16] = Rn_VPR128.8H[64,16] f- Rm_VPR128.8H[64,16]; TMPQ1[80,16] = Rn_VPR128.8H[80,16] f- Rm_VPR128.8H[80,16]; TMPQ1[96,16] = Rn_VPR128.8H[96,16] f- Rm_VPR128.8H[96,16]; TMPQ1[112,16] = Rn_VPR128.8H[112,16] f- Rm_VPR128.8H[112,16]; # simd unary Rd_VPR128.8H = abs(TMPQ1) on lane size 2 Rd_VPR128.8H[0,16] = abs(TMPQ1[0,16]); Rd_VPR128.8H[16,16] = abs(TMPQ1[16,16]); Rd_VPR128.8H[32,16] = abs(TMPQ1[32,16]); Rd_VPR128.8H[48,16] = abs(TMPQ1[48,16]); Rd_VPR128.8H[64,16] = abs(TMPQ1[64,16]); Rd_VPR128.8H[80,16] = abs(TMPQ1[80,16]); Rd_VPR128.8H[96,16] = abs(TMPQ1[96,16]); Rd_VPR128.8H[112,16] = abs(TMPQ1[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.44 FABD page C7-2104 line 122507 MATCH x2ea0d400/mask=xbfa0fc00 # CONSTRUCT x2ea0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f-@4 =$fabs@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2@4 # AUNIT --inst x2ea0d400/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" # Vector half precision variant when Q=0 sz=0 bb=1 cc=11 F=VPR64.2S :fabd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110101 & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd { # simd infix TMPD1 = Rn_VPR64.2S f- Rm_VPR64.2S on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] f- Rm_VPR64.2S[0,32]; TMPD1[32,32] = Rn_VPR64.2S[32,32] f- Rm_VPR64.2S[32,32]; # simd unary Rd_VPR64.2S = abs(TMPD1) on lane size 4 Rd_VPR64.2S[0,32] = abs(TMPD1[0,32]); Rd_VPR64.2S[32,32] = abs(TMPD1[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.44 FABD page C7-2104 line 122507 MATCH x2ea0d400/mask=xbfa0fc00 # CONSTRUCT x6ea0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f-@4 =$fabs@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2@4 # AUNIT --inst x6ea0d400/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" # Vector half precision variant when Q=1 sz=0 bb=1 cc=11 F=VPR128.4S :fabd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110101 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { # simd infix TMPQ1 = Rn_VPR128.4S f- Rm_VPR128.4S on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] f- Rm_VPR128.4S[0,32]; TMPQ1[32,32] = Rn_VPR128.4S[32,32] f- Rm_VPR128.4S[32,32]; TMPQ1[64,32] = Rn_VPR128.4S[64,32] f- Rm_VPR128.4S[64,32]; TMPQ1[96,32] = Rn_VPR128.4S[96,32] f- Rm_VPR128.4S[96,32]; # simd unary Rd_VPR128.4S = abs(TMPQ1) on lane size 4 Rd_VPR128.4S[0,32] = abs(TMPQ1[0,32]); Rd_VPR128.4S[32,32] = abs(TMPQ1[32,32]); Rd_VPR128.4S[64,32] = abs(TMPQ1[64,32]); Rd_VPR128.4S[96,32] = abs(TMPQ1[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.44 FABD page C7-2104 line 122507 MATCH x2ea0d400/mask=xbfa0fc00 # CONSTRUCT x6ee0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f-@8 =$fabs@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2@8 # AUNIT --inst x6ee0d400/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" # Vector half precision variant when Q=1 sz=1 bb=1 cc=11 F=VPR128.2D :fabd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_21=1 & b_1015=0b110101 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { # simd infix TMPQ1 = Rn_VPR128.2D f- Rm_VPR128.2D on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] f- Rm_VPR128.2D[0,64]; TMPQ1[64,64] = Rn_VPR128.2D[64,64] f- Rm_VPR128.2D[64,64]; # simd unary Rd_VPR128.2D = abs(TMPQ1) on lane size 8 Rd_VPR128.2D[0,64] = abs(TMPQ1[0,64]); Rd_VPR128.2D[64,64] = abs(TMPQ1[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.44 FABD page C7-2104 line 122507 MATCH x7ec01400/mask=xffe0fc00 # CONSTRUCT x7ec01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f- =fabs # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2 # AUNIT --inst x7ec01400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Scalar half precision variant when sz=1 bb=0 cc=00 F=FPR16 :fabd Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01111110110 & b_1015=0b000101 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { local tmp1:2 = Rn_FPR16 f- Rm_FPR16; Rd_FPR16 = abs(tmp1); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.44 FABD page C7-2104 line 122507 MATCH x7ea0d400/mask=xffa0fc00 # CONSTRUCT x7ea0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f- =fabs # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2 # AUNIT --inst x7ea0d400/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" # Scalar half precision variant when sz=0 bb=1 cc=11 F=FPR32 :fabd Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_2131=0b01111110101 & b_1015=0b110101 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { local tmp1:4 = Rn_FPR32 f- Rm_FPR32; Rd_FPR32 = abs(tmp1); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.44 FABD page C7-2104 line 122507 MATCH x7ea0d400/mask=xffa0fc00 # CONSTRUCT x7ee0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f- =fabs # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fabd/2 # AUNIT --inst x7ee0d400/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" # Scalar half precision variant when sz=1 bb=1 cc=11 F=FPR64 :fabd Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2131=0b01111110111 & b_1015=0b110101 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { local tmp1:8 = Rn_FPR64 f- Rm_FPR64; Rd_FPR64 = abs(tmp1); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.45 FABS (vector) page C7-2107 line 122704 MATCH x0ea0f800/mask=xbfbffc00 # CONSTRUCT x4ee0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$fabs@8 # SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1@8 # AUNIT --inst x4ee0f800/mask=xfffffc00 --rand dfp --status pass :fabs Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd unary Rd_VPR128.2D = abs(Rn_VPR128.2D) on lane size 8 Rd_VPR128.2D[0,64] = abs(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[64,64] = abs(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.45 FABS (vector) page C7-2107 line 122704 MATCH x0ea0f800/mask=xbfbffc00 # CONSTRUCT x0ea0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$fabs@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1@4 # AUNIT --inst x0ea0f800/mask=xfffffc00 --rand sfp --status pass :fabs Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd unary Rd_VPR64.2S = abs(Rn_VPR64.2S) on lane size 4 Rd_VPR64.2S[0,32] = abs(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[32,32] = abs(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.45 FABS (vector) page C7-2107 line 122704 MATCH x0ea0f800/mask=xbfbffc00 # CONSTRUCT x4ea0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$fabs@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1@4 # AUNIT --inst x4ea0f800/mask=xfffffc00 --rand sfp --status pass :fabs Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd unary Rd_VPR128.4S = abs(Rn_VPR128.4S) on lane size 4 Rd_VPR128.4S[0,32] = abs(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[32,32] = abs(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[64,32] = abs(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[96,32] = abs(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.45 FABS (vector) page C7-2107 line 122704 MATCH x0ef8f800/mask=xbffffc00 # CONSTRUCT x0ef8f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$fabs@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1@2 # AUNIT --inst x0ef8f800/mask=xfffffc00 --rand hfp --status noqemu # FABS (vector) SIMD 4H when size=0 Q=0 :fabs Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111011111000111110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { # simd unary Rd_VPR64.4H = abs(Rn_VPR64.4H) on lane size 2 Rd_VPR64.4H[0,16] = abs(Rn_VPR64.4H[0,16]); Rd_VPR64.4H[16,16] = abs(Rn_VPR64.4H[16,16]); Rd_VPR64.4H[32,16] = abs(Rn_VPR64.4H[32,16]); Rd_VPR64.4H[48,16] = abs(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.45 FABS (vector) page C7-2107 line 122704 MATCH x0ef8f800/mask=xbffffc00 # CONSTRUCT x4ef8f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$fabs@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1@2 # AUNIT --inst x4ef8f800/mask=xfffffc00 --rand hfp --status noqemu # FABS (vector) SIMD 8H when size=0 Q=1 :fabs Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111011111000111110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { # simd unary Rd_VPR128.8H = abs(Rn_VPR128.8H) on lane size 2 Rd_VPR128.8H[0,16] = abs(Rn_VPR128.8H[0,16]); Rd_VPR128.8H[16,16] = abs(Rn_VPR128.8H[16,16]); Rd_VPR128.8H[32,16] = abs(Rn_VPR128.8H[32,16]); Rd_VPR128.8H[48,16] = abs(Rn_VPR128.8H[48,16]); Rd_VPR128.8H[64,16] = abs(Rn_VPR128.8H[64,16]); Rd_VPR128.8H[80,16] = abs(Rn_VPR128.8H[80,16]); Rd_VPR128.8H[96,16] = abs(Rn_VPR128.8H[96,16]); Rd_VPR128.8H[112,16] = abs(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.46 FABS (scalar) page C7-2109 line 122815 MATCH x1e20c000/mask=xff3ffc00 # CONSTRUCT x1ee0c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =fabs # SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1 # AUNIT --inst x1ee0c000/mask=xfffffc00 --rand hfp --status noqemu :fabs Rd_FPR16, Rn_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x1 & b_1014=0x10 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = abs(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.46 FABS (scalar) page C7-2109 line 122815 MATCH x1e20c000/mask=xff3ffc00 # CONSTRUCT x1e60c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =fabs # SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1 # AUNIT --inst x1e60c000/mask=xfffffc00 --rand dfp --status pass :fabs Rd_FPR64, Rn_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x1 & b_1014=0x10 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = abs(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.46 FABS (scalar) page C7-2109 line 122815 MATCH x1e20c000/mask=xff3ffc00 # CONSTRUCT x1e20c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =fabs # SMACRO(pseudo) ARG1 ARG2 =NEON_fabs/1 # AUNIT --inst x1e20c000/mask=xfffffc00 --rand sfp --status pass :fabs Rd_FPR32, Rn_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x1 & b_1014=0x10 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = abs(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.47 FACGE page C7-2111 line 122911 MATCH x2e20ec00/mask=xbfa0fc00 # CONSTRUCT x6e60ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2@8 # AUNIT --inst x6e60ec00/mask=xffe0fc00 --rand dfp --status nopcodeop :facge Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1d & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; Rd_VPR128.2D[0,64] = zext(abs(Rn_VPR128.2D[0,64]) f>= abs(Rm_VPR128.2D[0,64])) * eqVal; Rd_VPR128.2D[64,64] = zext(abs(Rn_VPR128.2D[64,64]) f>= abs(Rm_VPR128.2D[64,64])) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.47 FACGE page C7-2111 line 122911 MATCH x2e20ec00/mask=xbfa0fc00 # CONSTRUCT x2e20ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2@4 # AUNIT --inst x2e20ec00/mask=xffe0fc00 --rand sfp --status nopcodeop :facge Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1d & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; Rd_VPR64.2S[0,32] = zext(abs(Rn_VPR64.2S[0,32]) f>= abs(Rm_VPR64.2S[0,32])) * eqVal; Rd_VPR64.2S[32,32] = zext(abs(Rn_VPR64.2S[32,32]) f>= abs(Rm_VPR64.2S[32,32])) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.47 FACGE page C7-2111 line 122911 MATCH x2e20ec00/mask=xbfa0fc00 # CONSTRUCT x6e20ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2@4 # AUNIT --inst x6e20ec00/mask=xffe0fc00 --rand sfp --status nopcodeop :facge Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1d & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; Rd_VPR128.4S[0,32] = zext(abs(Rn_VPR128.4S[0,32]) f>= abs(Rm_VPR128.4S[0,32])) * eqVal; Rd_VPR128.4S[32,32] = zext(abs(Rn_VPR128.4S[32,32]) f>= abs(Rm_VPR128.4S[32,32])) * eqVal; Rd_VPR128.4S[64,32] = zext(abs(Rn_VPR128.4S[64,32]) f>= abs(Rm_VPR128.4S[64,32])) * eqVal; Rd_VPR128.4S[96,32] = zext(abs(Rn_VPR128.4S[96,32]) f>= abs(Rm_VPR128.4S[96,32])) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.47 FACGE page C7-2111 line 122911 MATCH x7e402c00/mask=xffe0fc00 # CONSTRUCT x7e402c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2 # AUNIT --inst x7e402c00/mask=xffe0fc00 --rand hfp --status noqemu # Scalar half precision :facge Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01111110010 & b_1015=0b001011 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { local tmp1:1 = abs(Rn_FPR16) f>= abs(Rm_FPR16); local tmp2:2 = zext(tmp1); local tmp3:2 = ~ 0:2; Rd_FPR16 = tmp2 * tmp3; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.47 FACGE page C7-2111 line 122911 MATCH x7e20ec00/mask=xffa0fc00 # CONSTRUCT x7e20ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2 # AUNIT --inst x7e20ec00/mask=xffe0fc00 --rand sfp --status nopcodeop # Scalar single-precision and double-precision sz=0 :facge Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_2331=0b011111100 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { local tmp1:1 = abs(Rn_FPR32) f>= abs(Rm_FPR32); local tmp2:4 = zext(tmp1); local tmp3:4 = ~ 0:4; Rd_FPR32 = tmp2 * tmp3; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.47 FACGE page C7-2111 line 122911 MATCH x7e20ec00/mask=xffa0fc00 # CONSTRUCT x7e60ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2 # AUNIT --inst x7e60ec00/mask=xffe0fc00 --rand dfp --status nopcodeop # Scalar single-precision and double-precision sz=1 :facge Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2331=0b011111100 & b_22=1 & b_21=1 & b_1015=0b111011 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { local tmp1:1 = abs(Rn_FPR64) f>= abs(Rm_FPR64); local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.47 FACGE page C7-2111 line 122911 MATCH x2e402c00/mask=xbfe0fc00 # CONSTRUCT x2e402c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2@2 # AUNIT --inst x2e402c00/mask=xffe0fc00 --rand hfp --status noqemu # FACGE SIMD 4H when size=0 Q=0 :facge Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { local eqVal:2 = ~ 0; Rd_VPR64.4H[0,16] = zext(abs(Rn_VPR64.4H[0,16]) f>= abs(Rm_VPR64.4H[0,16])) * eqVal; Rd_VPR64.4H[16,16] = zext(abs(Rn_VPR64.4H[16,16]) f>= abs(Rm_VPR64.4H[16,16])) * eqVal; Rd_VPR64.4H[32,16] = zext(abs(Rn_VPR64.4H[32,16]) f>= abs(Rm_VPR64.4H[32,16])) * eqVal; Rd_VPR64.4H[48,16] = zext(abs(Rn_VPR64.4H[48,16]) f>= abs(Rm_VPR64.4H[48,16])) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.47 FACGE page C7-2111 line 122911 MATCH x2e402c00/mask=xbfe0fc00 # CONSTRUCT x6e402c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facge/2@2 # AUNIT --inst x6e402c00/mask=xffe0fc00 --rand hfp --status noqemu # FACGE SIMD 8H when size=0 Q=1 :facge Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { local eqVal:2 = ~ 0; Rd_VPR128.8H[0,16] = zext(abs(Rn_VPR128.8H[0,16]) f>= abs(Rm_VPR128.8H[0,16])) * eqVal; Rd_VPR128.8H[16,16] = zext(abs(Rn_VPR128.8H[16,16]) f>= abs(Rm_VPR128.8H[16,16])) * eqVal; Rd_VPR128.8H[32,16] = zext(abs(Rn_VPR128.8H[32,16]) f>= abs(Rm_VPR128.8H[32,16])) * eqVal; Rd_VPR128.8H[48,16] = zext(abs(Rn_VPR128.8H[48,16]) f>= abs(Rm_VPR128.8H[48,16])) * eqVal; Rd_VPR128.8H[64,16] = zext(abs(Rn_VPR128.8H[64,16]) f>= abs(Rm_VPR128.8H[64,16])) * eqVal; Rd_VPR128.8H[80,16] = zext(abs(Rn_VPR128.8H[80,16]) f>= abs(Rm_VPR128.8H[80,16])) * eqVal; Rd_VPR128.8H[96,16] = zext(abs(Rn_VPR128.8H[96,16]) f>= abs(Rm_VPR128.8H[96,16])) * eqVal; Rd_VPR128.8H[112,16] = zext(abs(Rn_VPR128.8H[112,16]) f>= abs(Rm_VPR128.8H[112,16])) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.48 FACGT page C7-2115 line 123160 MATCH x2ea0ec00/mask=xbfa0fc00 # CONSTRUCT x6ee0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2@8 # AUNIT --inst x6ee0ec00/mask=xffe0fc00 --rand dfp --status nopcodeop :facgt Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1d & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; Rd_VPR128.2D[0,64] = zext(abs(Rn_VPR128.2D[0,64]) f> abs(Rm_VPR128.2D[0,64])) * eqVal; Rd_VPR128.2D[64,64] = zext(abs(Rn_VPR128.2D[64,64]) f> abs(Rm_VPR128.2D[64,64])) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.48 FACGT page C7-2115 line 123160 MATCH x2ea0ec00/mask=xbfa0fc00 # CONSTRUCT x2ea0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2@4 # AUNIT --inst x2ea0ec00/mask=xffe0fc00 --rand sfp --status nopcodeop :facgt Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1d & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; Rd_VPR64.2S[0,32] = zext(abs(Rn_VPR64.2S[0,32]) f> abs(Rm_VPR64.2S[0,32])) * eqVal; Rd_VPR64.2S[32,32] = zext(abs(Rn_VPR64.2S[32,32]) f> abs(Rm_VPR64.2S[32,32])) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.48 FACGT page C7-2115 line 123160 MATCH x2ea0ec00/mask=xbfa0fc00 # CONSTRUCT x6ea0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2@4 # AUNIT --inst x6ea0ec00/mask=xffe0fc00 --rand sfp --status nopcodeop :facgt Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1d & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; Rd_VPR128.4S[0,32] = zext(abs(Rn_VPR128.4S[0,32]) f> abs(Rm_VPR128.4S[0,32])) * eqVal; Rd_VPR128.4S[32,32] = zext(abs(Rn_VPR128.4S[32,32]) f> abs(Rm_VPR128.4S[32,32])) * eqVal; Rd_VPR128.4S[64,32] = zext(abs(Rn_VPR128.4S[64,32]) f> abs(Rm_VPR128.4S[64,32])) * eqVal; Rd_VPR128.4S[96,32] = zext(abs(Rn_VPR128.4S[96,32]) f> abs(Rm_VPR128.4S[96,32])) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.48 FACGT page C7-2115 line 123160 MATCH x7ec02c00/mask=xffe0fc00 # CONSTRUCT x7ec02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2 # AUNIT --inst x7ec02c00/mask=xffe0fc00 --rand hfp --status noqemu # Scalar half precision :facgt Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01111110110 & b_1015=0b001011 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { local tmp1:1 = abs(Rn_FPR16) f> abs(Rm_FPR16); local tmp2:2 = zext(tmp1); local tmp3:2 = ~ 0:2; Rd_FPR16 = tmp2 * tmp3; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.48 FACGT page C7-2115 line 123160 MATCH x7ea0ec00/mask=xffa0fc00 # CONSTRUCT x7ea0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2 # AUNIT --inst x7ea0ec00/mask=xffe0fc00 --rand sfp --status nopcodeop # Scalar single-precision and double-precision sz=0 :facgt Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_2331=0b011111101 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { local tmp1:1 = abs(Rn_FPR32) f> abs(Rm_FPR32); local tmp2:4 = zext(tmp1); local tmp3:4 = ~ 0:4; Rd_FPR32 = tmp2 * tmp3; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.48 FACGT page C7-2115 line 123160 MATCH x7ea0ec00/mask=xffa0fc00 # CONSTRUCT x7ee0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2 # AUNIT --inst x7ee0ec00/mask=xffe0fc00 --rand dfp --status nopcodeop # Scalar single-precision and double-precision sz=1 :facgt Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2331=0b011111101 & b_22=1 & b_21=1 & b_1015=0b111011 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { local tmp1:1 = abs(Rn_FPR64) f> abs(Rm_FPR64); local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.48 FACGT page C7-2115 line 123160 MATCH x2ec02c00/mask=xbfe0fc00 # CONSTRUCT x2ec02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2@2 # AUNIT --inst x2ec02c00/mask=xffe0fc00 --rand hfp --status noqemu # Vector half-precision SIMD 4H when Q=0 :facgt Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b001011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { local eqVal:2 = ~ 0; Rd_VPR64.4H[0,16] = zext(abs(Rn_VPR64.4H[0,16]) f> abs(Rm_VPR64.4H[0,16])) * eqVal; Rd_VPR64.4H[16,16] = zext(abs(Rn_VPR64.4H[16,16]) f> abs(Rm_VPR64.4H[16,16])) * eqVal; Rd_VPR64.4H[32,16] = zext(abs(Rn_VPR64.4H[32,16]) f> abs(Rm_VPR64.4H[32,16])) * eqVal; Rd_VPR64.4H[48,16] = zext(abs(Rn_VPR64.4H[48,16]) f> abs(Rm_VPR64.4H[48,16])) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.48 FACGT page C7-2115 line 123160 MATCH x2ec02c00/mask=xbfe0fc00 # CONSTRUCT x6ec02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_facgt/2@2 # AUNIT --inst x6ec02c00/mask=xffe0fc00 --rand hfp --status noqemu # Vector half-precision SIMD 8H when Q=1 :facgt Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b001011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { local eqVal:2 = ~ 0; Rd_VPR128.8H[0,16] = zext(abs(Rn_VPR128.8H[0,16]) f> abs(Rm_VPR128.8H[0,16])) * eqVal; Rd_VPR128.8H[16,16] = zext(abs(Rn_VPR128.8H[16,16]) f> abs(Rm_VPR128.8H[16,16])) * eqVal; Rd_VPR128.8H[32,16] = zext(abs(Rn_VPR128.8H[32,16]) f> abs(Rm_VPR128.8H[32,16])) * eqVal; Rd_VPR128.8H[48,16] = zext(abs(Rn_VPR128.8H[48,16]) f> abs(Rm_VPR128.8H[48,16])) * eqVal; Rd_VPR128.8H[64,16] = zext(abs(Rn_VPR128.8H[64,16]) f> abs(Rm_VPR128.8H[64,16])) * eqVal; Rd_VPR128.8H[80,16] = zext(abs(Rn_VPR128.8H[80,16]) f> abs(Rm_VPR128.8H[80,16])) * eqVal; Rd_VPR128.8H[96,16] = zext(abs(Rn_VPR128.8H[96,16]) f> abs(Rm_VPR128.8H[96,16])) * eqVal; Rd_VPR128.8H[112,16] = zext(abs(Rn_VPR128.8H[112,16]) f> abs(Rm_VPR128.8H[112,16])) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.49 FADD (vector) page C7-2119 line 123409 MATCH x0e20d400/mask=xbfa0fc00 # CONSTRUCT x4e60d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2@8 # AUNIT --inst x4e60d400/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" :fadd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1a & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd infix Rd_VPR128.2D = Rn_VPR128.2D f+ Rm_VPR128.2D on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f+ Rm_VPR128.2D[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f+ Rm_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.49 FADD (vector) page C7-2119 line 123409 MATCH x0e20d400/mask=xbfa0fc00 # CONSTRUCT x0e20d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2@4 # AUNIT --inst x0e20d400/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" :fadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1a & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix Rd_VPR64.2S = Rn_VPR64.2S f+ Rm_VPR64.2S on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f+ Rm_VPR64.2S[0,32]; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f+ Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.49 FADD (vector) page C7-2119 line 123409 MATCH x0e20d400/mask=xbfa0fc00 # CONSTRUCT x4e20d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2@4 # AUNIT --inst x4e20d400/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" :fadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1a & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix Rd_VPR128.4S = Rn_VPR128.4S f+ Rm_VPR128.4S on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f+ Rm_VPR128.4S[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f+ Rm_VPR128.4S[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f+ Rm_VPR128.4S[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f+ Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.49 FADD (vector) page C7-2119 line 123409 MATCH x0e401400/mask=xbfe0fc00 # CONSTRUCT x0e401400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2@2 # AUNIT --inst x0e401400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision SIMD 4H when Q=0 :fadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { # simd infix Rd_VPR64.4H = Rn_VPR64.4H f+ Rm_VPR64.4H on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f+ Rm_VPR64.4H[0,16]; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f+ Rm_VPR64.4H[16,16]; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f+ Rm_VPR64.4H[32,16]; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f+ Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.49 FADD (vector) page C7-2119 line 123409 MATCH x0e401400/mask=xbfe0fc00 # CONSTRUCT x4e401400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2@2 # AUNIT --inst x4e401400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision SIMD 8H when Q=1 :fadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { # simd infix Rd_VPR128.8H = Rn_VPR128.8H f+ Rm_VPR128.8H on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f+ Rm_VPR128.8H[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f+ Rm_VPR128.8H[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f+ Rm_VPR128.8H[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f+ Rm_VPR128.8H[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f+ Rm_VPR128.8H[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f+ Rm_VPR128.8H[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f+ Rm_VPR128.8H[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f+ Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.50 FADD (scalar) page C7-2121 line 123531 MATCH x1e202800/mask=xff20fc00 # CONSTRUCT x1e602800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f+ # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2 # AUNIT --inst x1e602800/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" :fadd Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x2 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = Rn_FPR64 f+ Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.50 FADD (scalar) page C7-2121 line 123531 MATCH x1e202800/mask=xff20fc00 # CONSTRUCT x1ee02800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f+ # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2 # AUNIT --inst x1ee02800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" :fadd Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x2 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = Rn_FPR16 f+ Rm_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.50 FADD (scalar) page C7-2121 line 123531 MATCH x1e202800/mask=xff20fc00 # CONSTRUCT x1e202800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f+ # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fadd/2 # AUNIT --inst x1e202800/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" :fadd Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x2 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = Rn_FPR32 f+ Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.51 FADDP (scalar) page C7-2123 line 123639 MATCH x7e30d800/mask=xffbffc00 # CONSTRUCT x7e70d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =#f+ # SMACRO(pseudo) ARG1 ARG2 =NEON_faddp/1@8 # AUNIT --inst x7e70d800/mask=xfffffc00 --rand dfp --status pass --comment "nofpround" :faddp Rd_FPR64, Rn_VPR128.2D is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x38 & b_1216=0xd & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd { # sipd infix Rd_FPR64 = f+(Rn_VPR128.2D) on pairs lane size (8 to 8) local tmp1 = Rn_VPR128.2D[0,64]; local tmp2 = Rn_VPR128.2D[64,64]; Rd_FPR64 = tmp1 f+ tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.51 FADDP (scalar) page C7-2123 line 123639 MATCH x7e30d800/mask=xffbffc00 # CONSTRUCT x7e30d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =#f+ # SMACRO(pseudo) ARG1 ARG2 =NEON_faddp/1@4 # AUNIT --inst x7e30d800/mask=xfffffc00 --rand sfp --status pass --comment "nofpround" :faddp Rd_FPR32, Rn_VPR64.2S is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x18 & b_1216=0xd & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd { # sipd infix Rd_FPR32 = f+(Rn_VPR64.2S) on pairs lane size (4 to 4) local tmp1 = Rn_VPR64.2S[0,32]; local tmp2 = Rn_VPR64.2S[32,32]; Rd_FPR32 = tmp1 f+ tmp2; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.51 FADDP (scalar) page C7-2123 line 123639 MATCH x5e30d800/mask=xffbffc00 # CONSTRUCT x5e30d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_FPR32 =#f+@2 # SMACRO(pseudo) ARG1 Rn_FPR32 =NEON_faddp/1@2 # AUNIT --inst x5e30d800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant :faddp Rd_FPR16, vRn_VPR128^".2H" is b_1031=0b0101111000110000110110 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd { # sipd infix Rd_FPR16 = f+(Rn_FPR32) on pairs lane size (2 to 2) local tmp1 = Rn_FPR32[0,16]; local tmp2 = Rn_FPR32[16,16]; Rd_FPR16 = tmp1 f+ tmp2; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.52 FADDP (vector) page C7-2125 line 123747 MATCH x2e20d400/mask=xbfa0fc00 # CONSTRUCT x6e60d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 ARG3 =#f+/2 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_faddp/2@8 # AUNIT --inst x6e60d400/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" :faddp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1a & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = f+(Rn_VPR128.2D,Rm_VPR128.2D) on pairs lane size (8 to 8) local tmp2 = Rn_VPR128.2D[0,64]; local tmp3 = Rn_VPR128.2D[64,64]; TMPQ1[0,64] = tmp2 f+ tmp3; tmp2 = Rm_VPR128.2D[0,64]; tmp3 = Rm_VPR128.2D[64,64]; TMPQ1[64,64] = tmp2 f+ tmp3; Rd_VPR128.2D = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.52 FADDP (vector) page C7-2125 line 123747 MATCH x2e20d400/mask=xbfa0fc00 # CONSTRUCT x2e20d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:8 ARG2 ARG3 =#f+/2 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_faddp/2@4 # AUNIT --inst x2e20d400/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" :faddp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1a & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { TMPD1 = 0; # sipd infix TMPD1 = f+(Rn_VPR64.2S,Rm_VPR64.2S) on pairs lane size (4 to 4) local tmp2 = Rn_VPR64.2S[0,32]; local tmp3 = Rn_VPR64.2S[32,32]; TMPD1[0,32] = tmp2 f+ tmp3; tmp2 = Rm_VPR64.2S[0,32]; tmp3 = Rm_VPR64.2S[32,32]; TMPD1[32,32] = tmp2 f+ tmp3; Rd_VPR64.2S = TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.52 FADDP (vector) page C7-2125 line 123747 MATCH x2e20d400/mask=xbfa0fc00 # CONSTRUCT x6e20d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 ARG3 =#f+/2 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_faddp/2@4 # AUNIT --inst x6e20d400/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" :faddp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1a & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = f+(Rn_VPR128.4S,Rm_VPR128.4S) on pairs lane size (4 to 4) local tmp2 = Rn_VPR128.4S[0,32]; local tmp3 = Rn_VPR128.4S[32,32]; TMPQ1[0,32] = tmp2 f+ tmp3; tmp2 = Rn_VPR128.4S[64,32]; tmp3 = Rn_VPR128.4S[96,32]; TMPQ1[32,32] = tmp2 f+ tmp3; tmp2 = Rm_VPR128.4S[0,32]; tmp3 = Rm_VPR128.4S[32,32]; TMPQ1[64,32] = tmp2 f+ tmp3; tmp2 = Rm_VPR128.4S[64,32]; tmp3 = Rm_VPR128.4S[96,32]; TMPQ1[96,32] = tmp2 f+ tmp3; Rd_VPR128.4S = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.52 FADDP (vector) page C7-2125 line 123747 MATCH x2e401400/mask=xbfe0fc00 # CONSTRUCT x2e401400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:8 ARG3 ARG2 =#f+/2 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_faddp/2@2 # AUNIT --inst x2e401400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision SIMD 4H when Q = 0 :faddp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { TMPD1 = 0; # sipd infix TMPD1 = f+(Rm_VPR64.4H,Rn_VPR64.4H) on pairs lane size (2 to 2) local tmp2 = Rm_VPR64.4H[0,16]; local tmp3 = Rm_VPR64.4H[16,16]; TMPD1[0,16] = tmp2 f+ tmp3; tmp2 = Rm_VPR64.4H[32,16]; tmp3 = Rm_VPR64.4H[48,16]; TMPD1[16,16] = tmp2 f+ tmp3; tmp2 = Rn_VPR64.4H[0,16]; tmp3 = Rn_VPR64.4H[16,16]; TMPD1[32,16] = tmp2 f+ tmp3; tmp2 = Rn_VPR64.4H[32,16]; tmp3 = Rn_VPR64.4H[48,16]; TMPD1[48,16] = tmp2 f+ tmp3; Rd_VPR64.4H = TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.52 FADDP (vector) page C7-2125 line 123747 MATCH x2e401400/mask=xbfe0fc00 # CONSTRUCT x6e401400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 ARG3 =#f+/2 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_faddp/2@2 # AUNIT --inst x6e401400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision SIMD 8H when Q = 1 :faddp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = f+(Rn_VPR128.8H,Rm_VPR128.8H) on pairs lane size (2 to 2) local tmp2 = Rn_VPR128.8H[0,16]; local tmp3 = Rn_VPR128.8H[16,16]; TMPQ1[0,16] = tmp2 f+ tmp3; tmp2 = Rn_VPR128.8H[32,16]; tmp3 = Rn_VPR128.8H[48,16]; TMPQ1[16,16] = tmp2 f+ tmp3; tmp2 = Rn_VPR128.8H[64,16]; tmp3 = Rn_VPR128.8H[80,16]; TMPQ1[32,16] = tmp2 f+ tmp3; tmp2 = Rn_VPR128.8H[96,16]; tmp3 = Rn_VPR128.8H[112,16]; TMPQ1[48,16] = tmp2 f+ tmp3; tmp2 = Rm_VPR128.8H[0,16]; tmp3 = Rm_VPR128.8H[16,16]; TMPQ1[64,16] = tmp2 f+ tmp3; tmp2 = Rm_VPR128.8H[32,16]; tmp3 = Rm_VPR128.8H[48,16]; TMPQ1[80,16] = tmp2 f+ tmp3; tmp2 = Rm_VPR128.8H[64,16]; tmp3 = Rm_VPR128.8H[80,16]; TMPQ1[96,16] = tmp2 f+ tmp3; tmp2 = Rm_VPR128.8H[96,16]; tmp3 = Rm_VPR128.8H[112,16]; TMPQ1[112,16] = tmp2 f+ tmp3; Rd_VPR128.8H = TMPQ1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.46 FCADD page C7-1090 line 63037 KEEPWITH # val is either 90 or 270 depending on the value of bit b_12 (0 or 1) fcadd_rotate: "#"^val is b_12 [ val = 90 + 180 * b_12; ] { export *[const]:2 val; } # C7.2.53 FCADD page C7-2127 line 123874 MATCH x2e00e400/mask=xbf20ec00 # CONSTRUCT x2e40e400/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcadd/3@2 # AUNIT --inst x2e40e400/mask=xffe0ec00 --rand hfp --status noqemu --comment "nofpround" # FCADD SIMD 4H when size = 01 , Q = 0 :fcadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, fcadd_rotate is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { Rd_VPR64.4H = NEON_fcadd(Rn_VPR64.4H, Rm_VPR64.4H, fcadd_rotate, 2:1); } # C7.2.53 FCADD page C7-2127 line 123874 MATCH x2e00e400/mask=xbf20ec00 # CONSTRUCT x6e40e400/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcadd/3@2 # AUNIT --inst x6e40e400/mask=xffe0ec00 --rand hfp --status noqemu --comment "nofpround" # FCADD SIMD 8H when size = 01 , Q = 1 :fcadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, fcadd_rotate is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { Rd_VPR128.8H = NEON_fcadd(Rn_VPR128.8H, Rm_VPR128.8H, fcadd_rotate, 2:1); } # C7.2.53 FCADD page C7-2127 line 123874 MATCH x2e00e400/mask=xbf20ec00 # CONSTRUCT x2e80e400/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcadd/3@4 # AUNIT --inst x2e80e400/mask=xffe0ec00 --rand sfp --status noqemu --comment "nofpround" # FCADD SIMD 2S when size = 10 , Q = 0 :fcadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, fcadd_rotate is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fcadd(Rn_VPR64.2S, Rm_VPR64.2S, fcadd_rotate, 4:1); } # C7.2.53 FCADD page C7-2127 line 123874 MATCH x2e00e400/mask=xbf20ec00 # CONSTRUCT x6e80e400/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcadd/3@4 # AUNIT --inst x6e80e400/mask=xffe0ec00 --rand sfp --status noqemu --comment "nofpround" # FCADD SIMD 4S when size = 10 , Q = 1 :fcadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, fcadd_rotate is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fcadd(Rn_VPR128.4S, Rm_VPR128.4S, fcadd_rotate, 4:1); } # C7.2.53 FCADD page C7-2127 line 123874 MATCH x2e00e400/mask=xbf20ec00 # CONSTRUCT x6ec0e400/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcadd/3@8 # AUNIT --inst x6ec0e400/mask=xffe0ec00 --rand dfp --status noqemu --comment "nofpround" # FCADD SIMD 2D when size = 11 , Q = 1 :fcadd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, fcadd_rotate is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_21=0 & b_1315=0b111 & b_1011=0b01 & fcadd_rotate & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { Rd_VPR128.2D = NEON_fcadd(Rn_VPR128.2D, Rm_VPR128.2D, fcadd_rotate, 8:1); } # C7.2.54 FCCMP page C7-2129 line 123987 MATCH x1e200400/mask=xff200c10 # CONSTRUCT x1e600400/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG3:1 =setCC_NZCV/1 ARG4:1 ! inst_next goto null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 ARG3:1 ARG4:1 =NEON_fccmp/4 # AUNIT --inst x1e600400/mask=xffe00c10 --rand dfp --status nodest --comment "flags" :fccmp Rn_FPR64, Rm_FPR64, NZCVImm_uimm4, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & CondOp & b_1011=1 & Rn_FPR64 & fpccmp.op=0 & NZCVImm_uimm4 { local tmp1:1 = ! CondOp:1; setCC_NZCV(NZCVImm_uimm4:1); if (tmp1) goto inst_next; ftestNAN(Rn_FPR64, Rm_FPR64); fcomp(Rn_FPR64, Rm_FPR64); } # C7.2.54 FCCMP page C7-2129 line 123987 MATCH x1e200400/mask=xff200c10 # CONSTRUCT x1e200400/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG3:1 =setCC_NZCV/1 ARG4:1 ! inst_next goto null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 ARG3:1 ARG4:1 =NEON_fccmp/4 # AUNIT --inst x1e200400/mask=xffe00c10 --rand sfp --status nodest --comment "flags" :fccmp Rn_FPR32, Rm_FPR32, NZCVImm_uimm4, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & CondOp & b_1011=1 & Rn_FPR32 & fpccmp.op=0 & NZCVImm_uimm4 { local tmp1:1 = ! CondOp:1; setCC_NZCV(NZCVImm_uimm4:1); if (tmp1) goto inst_next; ftestNAN(Rn_FPR32, Rm_FPR32); fcomp(Rn_FPR32, Rm_FPR32); } # C7.2.54 FCCMP page C7-2129 line 123987 MATCH x1e200400/mask=xff200c10 # CONSTRUCT x1ee00400/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG3:1 =setCC_NZCV/1 ARG4:1 ! inst_next goto null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 ARG3:1 ARG4:1 =NEON_fccmp/4 # AUNIT --inst x1ee00400/mask=xffe00c10 --rand hfp --status nodest --comment "flags" :fccmp Rn_FPR16, Rm_FPR16, NZCVImm_uimm4, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & CondOp & b_1011=1 & Rn_FPR16 & fpccmp.op=0 & NZCVImm_uimm4 { local tmp1:1 = ! CondOp:1; setCC_NZCV(NZCVImm_uimm4:1); if (tmp1) goto inst_next; ftestNAN(Rn_FPR16, Rm_FPR16); fcomp(Rn_FPR16, Rm_FPR16); } # C7.2.55 FCCMPE page C7-2131 line 124107 MATCH x1e200410/mask=xff200c10 # CONSTRUCT x1e600410/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG3:1 =setCC_NZCV/1 ARG4:1 ! inst_next goto null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 ARG3:1 ARG4:1 =NEON_fccmpe/4 # AUNIT --inst x1e600410/mask=xffe00c10 --rand dfp --status nodest --comment "flags" :fccmpe Rn_FPR64, Rm_FPR64, NZCVImm_uimm4, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & CondOp & b_1011=1 & Rn_FPR64 & fpccmp.op=1 & NZCVImm_uimm4 { local tmp1:1 = ! CondOp:1; setCC_NZCV(NZCVImm_uimm4:1); if (tmp1) goto inst_next; ftestNAN(Rn_FPR64, Rm_FPR64); fcomp(Rn_FPR64, Rm_FPR64); } # C7.2.55 FCCMPE page C7-2131 line 124107 MATCH x1e200410/mask=xff200c10 # CONSTRUCT x1e200410/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG3:1 =setCC_NZCV/1 ARG4:1 ! inst_next goto null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 ARG3:1 ARG4:1 =NEON_fccmpe/4 # AUNIT --inst x1e200410/mask=xffe00c10 --rand sfp --status nodest --comment "flags" :fccmpe Rn_FPR32, Rm_FPR32, NZCVImm_uimm4, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & CondOp & b_1011=1 & Rn_FPR32 & fpccmp.op=1 & NZCVImm_uimm4 { local tmp1:1 = ! CondOp:1; setCC_NZCV(NZCVImm_uimm4:1); if (tmp1) goto inst_next; ftestNAN(Rn_FPR32, Rm_FPR32); fcomp(Rn_FPR32, Rm_FPR32); } # C7.2.55 FCCMPE page C7-2131 line 124107 MATCH x1e200410/mask=xff200c10 # CONSTRUCT x1ee00410/mask=xffe00c10 MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG3:1 =setCC_NZCV/1 ARG4:1 ! inst_next goto null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 ARG3:1 ARG4:1 =NEON_fccmpe/4 # AUNIT --inst x1ee00410/mask=xffe00c10 --rand hfp --status nodest --comment "flags" :fccmpe Rn_FPR16, Rm_FPR16, NZCVImm_uimm4, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & CondOp & b_1011=1 & Rn_FPR16 & fpccmp.op=1 & NZCVImm_uimm4 { local tmp1:1 = ! CondOp:1; setCC_NZCV(NZCVImm_uimm4:1); if (tmp1) goto inst_next; ftestNAN(Rn_FPR16, Rm_FPR16); fcomp(Rn_FPR16, Rm_FPR16); } # C7.2.56 FCMEQ (register) page C7-2133 line 124227 MATCH x0e20e400/mask=xbfa0fc00 # CONSTRUCT x4e60e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2@8 # AUNIT --inst x4e60e400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "noflags" :fcmeq Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] f== Rm_VPR128.2D[0,64]) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] f== Rm_VPR128.2D[64,64]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.56 FCMEQ (register) page C7-2133 line 124227 MATCH x0e20e400/mask=xbfa0fc00 # CONSTRUCT x0e20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2@4 # AUNIT --inst x0e20e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" :fcmeq Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] f== Rm_VPR64.2S[0,32]) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] f== Rm_VPR64.2S[32,32]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.56 FCMEQ (register) page C7-2133 line 124227 MATCH x0e20e400/mask=xbfa0fc00 # CONSTRUCT x4e20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2@4 # AUNIT --inst x4e20e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" :fcmeq Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] f== Rm_VPR128.4S[0,32]) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] f== Rm_VPR128.4S[32,32]) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] f== Rm_VPR128.4S[64,32]) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] f== Rm_VPR128.4S[96,32]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.56 FCMEQ (register) page C7-2133 line 124227 MATCH x5e402400/mask=xffe0fc00 # CONSTRUCT x5e402400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2 # AUNIT --inst x5e402400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" # Scalar half precision variant :fcmeq Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01011110010 & b_1015=0b001001 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { local tmp1:1 = Rn_FPR16 f== Rm_FPR16; local tmp2:2 = zext(tmp1); local tmp3:2 = ~ 0:2; Rd_FPR16 = tmp2 * tmp3; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.56 FCMEQ (register) page C7-2133 line 124227 MATCH x5e20e400/mask=xffa0fc00 # CONSTRUCT x5e20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2 # AUNIT --inst x5e20e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision variant sz=0 :fcmeq Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_2331=0b010111100 & b_22=0 & b_21=1 & b_1015=0b111001 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { local tmp1:1 = Rn_FPR32 f== Rm_FPR32; local tmp2:4 = zext(tmp1); local tmp3:4 = ~ 0:4; Rd_FPR32 = tmp2 * tmp3; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.56 FCMEQ (register) page C7-2133 line 124227 MATCH x5e20e400/mask=xffa0fc00 # CONSTRUCT x5e60e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2 # AUNIT --inst x5e60e400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision variant sz=1 :fcmeq Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2331=0b010111100 & b_22=1 & b_21=1 & b_1015=0b111001 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { local tmp1:1 = Rn_FPR64 f== Rm_FPR64; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.56 FCMEQ (register) page C7-2133 line 124227 MATCH x0e402400/mask=xbfe0fc00 # CONSTRUCT x0e402400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2@2 # AUNIT --inst x0e402400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 4H when Q=0 :fcmeq Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b001001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { local eqVal:2 = ~ 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] f== Rm_VPR64.4H[0,16]) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] f== Rm_VPR64.4H[16,16]) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] f== Rm_VPR64.4H[32,16]) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] f== Rm_VPR64.4H[48,16]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.56 FCMEQ (register) page C7-2133 line 124227 MATCH x0e402400/mask=xbfe0fc00 # CONSTRUCT x4e402400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmeq/2@2 # AUNIT --inst x4e402400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 8H when Q=1 :fcmeq Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b001001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { local eqVal:2 = ~ 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] f== Rm_VPR128.8H[0,16]) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] f== Rm_VPR128.8H[16,16]) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] f== Rm_VPR128.8H[32,16]) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] f== Rm_VPR128.8H[48,16]) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] f== Rm_VPR128.8H[64,16]) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] f== Rm_VPR128.8H[80,16]) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] f== Rm_VPR128.8H[96,16]) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] == Rm_VPR128.8H[112,16]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.57 FCMEQ (zero) page C7-2137 line 124475 MATCH x0ea0d800/mask=xbfbffc00 # CONSTRUCT x4ee0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmeq/2@8 # AUNIT --inst x4ee0d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" :fcmeq Rd_VPR128.2D, Rn_VPR128.2D, "#0.0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xd & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; local zero:8 = 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] f== zero) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] f== zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.57 FCMEQ (zero) page C7-2137 line 124475 MATCH x0ea0d800/mask=xbfbffc00 # CONSTRUCT x0ea0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmeq/2@4 # AUNIT --inst x0ea0d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" :fcmeq Rd_VPR64.2S, Rn_VPR64.2S, "#0.0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] f== zero) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] f== zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.57 FCMEQ (zero) page C7-2137 line 124475 MATCH x0ea0d800/mask=xbfbffc00 # CONSTRUCT x4ea0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmeq/2@4 # AUNIT --inst x4ea0d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" :fcmeq Rd_VPR128.4S, Rn_VPR128.4S, "#0.0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] f== zero) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] f== zero) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] f== zero) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] f== zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.57 FCMEQ (zero) page C7-2137 line 124475 MATCH x5ef8d800/mask=xfffffc00 # CONSTRUCT x5ef8d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmeq/2 # AUNIT --inst x5ef8d800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Scalar half precision variant :fcmeq Rd_FPR16, Rn_FPR16, "#0.0" is b_1031=0b0101111011111000110110 & Rd_FPR16 & Rn_FPR16 & Zd { local tmp1:1 = Rn_FPR16 f== 0; local tmp2:2 = zext(tmp1); local tmp3:2 = ~ 0:2; Rd_FPR16 = tmp2 * tmp3; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.57 FCMEQ (zero) page C7-2137 line 124475 MATCH x5ea0d800/mask=xffbffc00 # CONSTRUCT x5ea0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmeq/2 # AUNIT --inst x5ea0d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision sz=0 :fcmeq Rd_FPR32, Rn_FPR32, "#0.0" is b_2331=0b010111101 & b_22=0 & b_1021=0b100000110110 & Rd_FPR32 & Rn_FPR32 & Zd { local tmp1:1 = Rn_FPR32 f== 0; local tmp2:4 = zext(tmp1); local tmp3:4 = ~ 0:4; Rd_FPR32 = tmp2 * tmp3; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.57 FCMEQ (zero) page C7-2137 line 124475 MATCH x5ea0d800/mask=xffbffc00 # CONSTRUCT x5ee0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmeq/2 # AUNIT --inst x5ee0d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision sz=1 :fcmeq Rd_FPR64, Rn_FPR64, "#0.0" is b_2331=0b010111101 & b_22=1 & b_1021=0b100000110110 & Rd_FPR64 & Rn_FPR64 & Zd { local tmp1:1 = Rn_FPR64 f== 0; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.57 FCMEQ (zero) page C7-2137 line 124475 MATCH x0ef8d800/mask=xbffffc00 # CONSTRUCT x0ef8d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmeq/2@2 # AUNIT --inst x0ef8d800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 4H when Q = 0 :fcmeq Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" is b_31=0 & b_30=0 & b_1029=0b00111011111000110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] f== zero) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] f== zero) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] f== zero) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] f== zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.57 FCMEQ (zero) page C7-2137 line 124475 MATCH x0ef8d800/mask=xbffffc00 # CONSTRUCT x4ef8d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmeq/2@2 # AUNIT --inst x4ef8d800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 8H when Q = 1 :fcmeq Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" is b_31=0 & b_30=1 & b_1029=0b00111011111000110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] f== zero) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] f== zero) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] f== zero) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] f== zero) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] f== zero) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] f== zero) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] f== zero) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] f== zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.58 FCMGE (register) page C7-2140 line 124691 MATCH x2e20e400/mask=xbfa0fc00 # CONSTRUCT x6e60e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2@8 # AUNIT --inst x6e60e400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "noflags" :fcmge Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] f>= Rm_VPR128.2D[0,64]) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] f>= Rm_VPR128.2D[64,64]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.58 FCMGE (register) page C7-2140 line 124691 MATCH x2e20e400/mask=xbfa0fc00 # CONSTRUCT x2e20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2@4 # AUNIT --inst x2e20e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" :fcmge Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] f>= Rm_VPR64.2S[0,32]) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] f>= Rm_VPR64.2S[32,32]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.58 FCMGE (register) page C7-2140 line 124691 MATCH x2e20e400/mask=xbfa0fc00 # CONSTRUCT x6e20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2@4 # AUNIT --inst x6e20e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" :fcmge Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] f>= Rm_VPR128.4S[0,32]) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] f>= Rm_VPR128.4S[32,32]) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] f>= Rm_VPR128.4S[64,32]) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] f>= Rm_VPR128.4S[96,32]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.58 FCMGE (register) page C7-2140 line 124691 MATCH x7e402400/mask=xffe0fc00 # CONSTRUCT x7e402400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2 # AUNIT --inst x7e402400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" # Scalar half precision variant :fcmge Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01111110010 & b_1015=0b001001 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { local tmp1:1 = Rn_FPR16 f>= Rm_FPR16; local tmp2:2 = zext(tmp1); local tmp3:2 = ~ 0:2; Rd_FPR16 = tmp2 * tmp3; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.58 FCMGE (register) page C7-2140 line 124691 MATCH x7e20e400/mask=xffa0fc00 # CONSTRUCT x7e20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2 # AUNIT --inst x7e20e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision variant sz=0 :fcmge Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_2331=0b011111100 & b_22=0 & b_21=1 & b_1015=0b111001 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { local tmp1:1 = Rn_FPR32 f>= Rm_FPR32; local tmp2:4 = zext(tmp1); local tmp3:4 = ~ 0:4; Rd_FPR32 = tmp2 * tmp3; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.58 FCMGE (register) page C7-2140 line 124691 MATCH x7e20e400/mask=xffa0fc00 # CONSTRUCT x7e60e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2 # AUNIT --inst x7e60e400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision variant sz=1 :fcmge Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2331=0b011111100 & b_22=1 & b_21=1 & b_1015=0b111001 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { local tmp1:1 = Rn_FPR64 f>= Rm_FPR64; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.58 FCMGE (register) page C7-2140 line 124691 MATCH x2e402400/mask=xbfe0fc00 # CONSTRUCT x2e402400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2@2 # AUNIT --inst x2e402400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 4H when Q = 0 :fcmge Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { local eqVal:2 = ~ 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] f>= Rm_VPR64.4H[0,16]) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] f>= Rm_VPR64.4H[16,16]) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] f>= Rm_VPR64.4H[32,16]) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] f>= Rm_VPR64.4H[48,16]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.58 FCMGE (register) page C7-2140 line 124691 MATCH x2e402400/mask=xbfe0fc00 # CONSTRUCT x6e402400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmge/2@2 # AUNIT --inst x6e402400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 8H when Q = 1 :fcmge Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { local eqVal:2 = ~ 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] f>= Rm_VPR128.8H[0,16]) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] f>= Rm_VPR128.8H[16,16]) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] f>= Rm_VPR128.8H[32,16]) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] f>= Rm_VPR128.8H[48,16]) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] f>= Rm_VPR128.8H[64,16]) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] f>= Rm_VPR128.8H[80,16]) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] f>= Rm_VPR128.8H[96,16]) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] f>= Rm_VPR128.8H[112,16]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.59 FCMGE (zero) page C7-2144 line 124940 MATCH x2ea0c800/mask=xbfbffc00 # CONSTRUCT x6ee0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmge/2@8 # AUNIT --inst x6ee0c800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" :fcmge Rd_VPR128.2D, Rn_VPR128.2D, "#0.0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xc & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; local zero:8 = 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] f>= zero) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] f>= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.59 FCMGE (zero) page C7-2144 line 124940 MATCH x2ea0c800/mask=xbfbffc00 # CONSTRUCT x2ea0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmge/2@4 # AUNIT --inst x2ea0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" :fcmge Rd_VPR64.2S, Rn_VPR64.2S, "#0.0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] f>= zero) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] f>= zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.59 FCMGE (zero) page C7-2144 line 124940 MATCH x2ea0c800/mask=xbfbffc00 # CONSTRUCT x6ea0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmge/2@4 # AUNIT --inst x6ea0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" :fcmge Rd_VPR128.4S, Rn_VPR128.4S, "#0.0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] f>= zero) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] f>= zero) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] f>= zero) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] f>= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.59 FCMGE (zero) page C7-2144 line 124940 MATCH x7ef8c800/mask=xfffffc00 # CONSTRUCT x7ef8c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmge/2 # AUNIT --inst x7ef8c800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Scalar half precision variant :fcmge Rd_FPR16, Rn_FPR16, "#0.0" is b_1031=0b0111111011111000110010 & Rd_FPR16 & Rn_FPR16 & Zd { local tmp1:1 = Rn_FPR16 f>= 0; local tmp2:2 = zext(tmp1); local tmp3:2 = ~ 0:2; Rd_FPR16 = tmp2 * tmp3; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.59 FCMGE (zero) page C7-2144 line 124940 MATCH x7ea0c800/mask=xffbffc00 # CONSTRUCT x7ea0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmge/2 # AUNIT --inst x7ea0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision sz=0 :fcmge Rd_FPR32, Rn_FPR32, "#0.0" is b_2331=0b011111101 & b_22=0 & b_1021=0b100000110010 & Rd_FPR32 & Rn_FPR32 & Zd { local tmp1:1 = Rn_FPR32 f>= 0; local tmp2:4 = zext(tmp1); local tmp3:4 = ~ 0:4; Rd_FPR32 = tmp2 * tmp3; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.59 FCMGE (zero) page C7-2144 line 124940 MATCH x7ea0c800/mask=xffbffc00 # CONSTRUCT x7ee0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmge/2 # AUNIT --inst x7ee0c800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision sz=1 :fcmge Rd_FPR64, Rn_FPR64, "#0.0" is b_2331=0b011111101 & b_22=1 & b_1021=0b100000110010 & Rd_FPR64 & Rn_FPR64 & Zd { local tmp1:1 = Rn_FPR64 f>= 0; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.59 FCMGE (zero) page C7-2144 line 124940 MATCH x2ef8c800/mask=xbffffc00 # CONSTRUCT x2ef8c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmge/2@2 # AUNIT --inst x2ef8c800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 4H when Q = 0 :fcmge Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" is b_31=0 & b_30=0 & b_1029=0b10111011111000110010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] f>= zero) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] f>= zero) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] f>= zero) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] f>= zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.59 FCMGE (zero) page C7-2144 line 124940 MATCH x2ef8c800/mask=xbffffc00 # CONSTRUCT x6ef8c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmge/2@2 # AUNIT --inst x6ef8c800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 8H when Q = 1 :fcmge Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" is b_31=0 & b_30=1 & b_1029=0b10111011111000110010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] f>= zero) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] f>= zero) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] f>= zero) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] f>= zero) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] f>= zero) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] f>= zero) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] f>= zero) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] f>= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.60 FCMGT (register) page C7-2147 line 125156 MATCH x2ea0e400/mask=xbfa0fc00 # CONSTRUCT x6ee0e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2@8 # AUNIT --inst x6ee0e400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "noflags" :fcmgt Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] f> Rm_VPR128.2D[0,64]) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] f> Rm_VPR128.2D[64,64]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.60 FCMGT (register) page C7-2147 line 125156 MATCH x2ea0e400/mask=xbfa0fc00 # CONSTRUCT x2ea0e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2@4 # AUNIT --inst x2ea0e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" :fcmgt Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] f> Rm_VPR64.2S[0,32]) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] f> Rm_VPR64.2S[32,32]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.60 FCMGT (register) page C7-2147 line 125156 MATCH x2ea0e400/mask=xbfa0fc00 # CONSTRUCT x6ea0e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2@4 # AUNIT --inst x6ea0e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" :fcmgt Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] f> Rm_VPR128.4S[0,32]) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] f> Rm_VPR128.4S[32,32]) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] f> Rm_VPR128.4S[64,32]) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] f> Rm_VPR128.4S[96,32]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.60 FCMGT (register) page C7-2147 line 125156 MATCH x7ec02400/mask=xffe0fc00 # CONSTRUCT x7ec02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2 # AUNIT --inst x7ec02400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" # Scalar half precision variant :fcmgt Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01111110110 & b_1015=0b001001 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { local tmp1:1 = Rn_FPR16 f> Rm_FPR16; local tmp2:2 = zext(tmp1); local tmp3:2 = ~ 0:2; Rd_FPR16 = tmp2 * tmp3; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.60 FCMGT (register) page C7-2147 line 125156 MATCH x7ea0e400/mask=xffa0fc00 # CONSTRUCT x7ea0e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2 # AUNIT --inst x7ea0e400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision variant sz=0 :fcmgt Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_2331=0b011111101 & b_22=0 & b_21=1 & b_1015=0b111001 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { local tmp1:1 = Rn_FPR32 f> Rm_FPR32; local tmp2:4 = zext(tmp1); local tmp3:4 = ~ 0:4; Rd_FPR32 = tmp2 * tmp3; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.60 FCMGT (register) page C7-2147 line 125156 MATCH x7ea0e400/mask=xffa0fc00 # CONSTRUCT x7ee0e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2 # AUNIT --inst x7ee0e400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision variant sz=1 :fcmgt Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_2331=0b011111101 & b_22=1 & b_21=1 & b_1015=0b111001 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { local tmp1:1 = Rn_FPR64 f> Rm_FPR64; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.60 FCMGT (register) page C7-2147 line 125156 MATCH x2ec02400/mask=xbfe0fc00 # CONSTRUCT x2ec02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2@2 # AUNIT --inst x2ec02400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 4H when Q = 0 :fcmgt Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b001001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { local eqVal:2 = ~ 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] f> Rm_VPR64.4H[0,16]) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] f> Rm_VPR64.4H[16,16]) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] f> Rm_VPR64.4H[32,16]) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] f> Rm_VPR64.4H[48,16]) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.60 FCMGT (register) page C7-2147 line 125156 MATCH x2ec02400/mask=xbfe0fc00 # CONSTRUCT x6ec02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcmgt/2@2 # AUNIT --inst x6ec02400/mask=xffe0fc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 8H when Q = 1 :fcmgt Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b001001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { local eqVal:2 = ~ 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] f> Rm_VPR128.8H[0,16]) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] f> Rm_VPR128.8H[16,16]) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] f> Rm_VPR128.8H[32,16]) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] f> Rm_VPR128.8H[48,16]) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] f> Rm_VPR128.8H[64,16]) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] f> Rm_VPR128.8H[80,16]) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] f> Rm_VPR128.8H[96,16]) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] f> Rm_VPR128.8H[112,16]) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.61 FCMGT (zero) page C7-2151 line 125404 MATCH x0ea0c800/mask=xbfbffc00 # CONSTRUCT x4ee0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmgt/2@8 # AUNIT --inst x4ee0c800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" :fcmgt Rd_VPR128.2D, Rn_VPR128.2D, "#0.0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xc & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; local zero:8 = 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] f> zero) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] f> zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.61 FCMGT (zero) page C7-2151 line 125404 MATCH x0ea0c800/mask=xbfbffc00 # CONSTRUCT x0ea0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmgt/2@4 # AUNIT --inst x0ea0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" :fcmgt Rd_VPR64.2S, Rn_VPR64.2S, "#0.0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] f> zero) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] f> zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.61 FCMGT (zero) page C7-2151 line 125404 MATCH x0ea0c800/mask=xbfbffc00 # CONSTRUCT x4ea0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmgt/2@4 # AUNIT --inst x4ea0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" :fcmgt Rd_VPR128.4S, Rn_VPR128.4S, "#0.0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xc & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] f> zero) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] f> zero) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] f> zero) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] f> zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.61 FCMGT (zero) page C7-2151 line 125404 MATCH x5ef8c800/mask=xfffffc00 # CONSTRUCT x5ef8c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmgt/2 # AUNIT --inst x5ef8c800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Scalar half precision variant :fcmgt Rd_FPR16, Rn_FPR16, "#0.0" is b_1031=0b0101111011111000110010 & Rd_FPR16 & Rn_FPR16 & Zd { local tmp1:1 = Rn_FPR16 f> 0; local tmp2:2 = zext(tmp1); local tmp3:2 = ~ 0:2; Rd_FPR16 = tmp2 * tmp3; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.61 FCMGT (zero) page C7-2151 line 125404 MATCH x5ea0c800/mask=xffbffc00 # CONSTRUCT x5ea0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmgt/2 # AUNIT --inst x5ea0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision sz=0 :fcmgt Rd_FPR32, Rn_FPR32, "#0.0" is b_2331=0b010111101 & b_22=0 & b_1021=0b100000110010 & Rd_FPR32 & Rn_FPR32 & Zd { local tmp1:1 = Rn_FPR32 f> 0; local tmp2:4 = zext(tmp1); local tmp3:4 = ~ 0:4; Rd_FPR32 = tmp2 * tmp3; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.61 FCMGT (zero) page C7-2151 line 125404 MATCH x5ea0c800/mask=xffbffc00 # CONSTRUCT x5ee0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmgt/2 # AUNIT --inst x5ee0c800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision sz=1 :fcmgt Rd_FPR64, Rn_FPR64, "#0.0" is b_2331=0b010111101 & b_22=1 & b_1021=0b100000110010 & Rd_FPR64 & Rn_FPR64 & Zd { local tmp1:1 = Rn_FPR64 f> 0; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.61 FCMGT (zero) page C7-2151 line 125404 MATCH x0ef8c800/mask=xbffffc00 # CONSTRUCT x0ef8c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmgt/2@2 # AUNIT --inst x0ef8c800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 4H when Q = 0 :fcmgt Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" is b_31=0 & b_30=0 & b_1029=0b00111011111000110010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] f> zero) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] f> zero) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] f> zero) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] f> zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.61 FCMGT (zero) page C7-2151 line 125404 MATCH x0ef8c800/mask=xbffffc00 # CONSTRUCT x4ef8c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmgt/2@2 # AUNIT --inst x4ef8c800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 8H when Q = 1 :fcmgt Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" is b_31=0 & b_30=1 & b_1029=0b00111011111000110010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] f> zero) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] f> zero) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] f> zero) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] f> zero) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] f> zero) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] f> zero) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] f> zero) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] f> zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.55 FCMLA (by element) page C7-1117 line 64749 KEEPWITH # Values in set {0,90,180,270} depending on b_1314/1112 values of {0,1,2,3} fcmla_rotate: "#"^val is b_15=0 & b_1314 [ val = 90 * b_1314; ] { export *[const]:2 val; } fcmla_rotate: "#"^val is b_15=1 & b_1112 [ val = 90 * b_1112; ] { export *[const]:2 val; } # C7.2.62 FCMLA (by element) page C7-2154 line 125620 MATCH x2f001000/mask=xbf009400 # CONSTRUCT x2f401000/mask=xffc09c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@2 # AUNIT --inst x2f401000/mask=xffc09c00 --rand hfp --status noqemu --comment "noflags" # The representation of Rm in the documentation as a 4 bit field # extended by M actually makes it a standard 5 bit field. # 4H variant when size = 01 , Q = 0 T=VPR64.4H imm=Re_VPR128.H.vIndexHL i1=Re_VPR128.H i2=vIndexHL # NOTE: if size == '01' and H == '1' && Q == '0' then ReservedValue(); :fcmla Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128.H.vIndexHL, fcmla_rotate is b_31=0 & b_30=0 & b_2429=0b101111 & b_2223=0b01 & b_15=0 & b_12=1 & b_11=0 & b_10=0 & fcmla_rotate & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128.H.vIndexHL & Re_VPR128.H & vIndexHL & Zd { local tmp1:2 = SIMD_PIECE(Re_VPR128.H, vIndexHL:1); Rd_VPR64.4H = NEON_fcmla(Rn_VPR64.4H, tmp1, fcmla_rotate, 2:1); } # C7.2.62 FCMLA (by element) page C7-2154 line 125620 MATCH x2f001000/mask=xbf009400 # CONSTRUCT x6f401000/mask=xffc09400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@2 # AUNIT --inst x6f401000/mask=xffc09400 --rand hfp --status noqemu --comment "noflags" # 8H variant when size = 01 , Q = 1 T=VPR128.8H imm=Re_VPR128.H.vIndexHL i1=Re_VPR128.H i2=vIndexHL :fcmla Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128.H.vIndexHL, fcmla_rotate is b_31=0 & b_30=1 & b_2429=0b101111 & b_2223=0b01 & b_15=0 & b_12=1 & b_10=0 & fcmla_rotate & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128.H.vIndexHL & Re_VPR128.H & vIndexHL & Zd { local tmp1:2 = SIMD_PIECE(Re_VPR128.H, vIndexHL:1); Rd_VPR128.8H = NEON_fcmla(Rn_VPR128.8H, tmp1, fcmla_rotate, 2:1); } # C7.2.62 FCMLA (by element) page C7-2154 line 125620 MATCH x2f001000/mask=xbf009400 # CONSTRUCT x6f801000/mask=xffe09400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@4 # AUNIT --inst x6f801000/mask=xffe09400 --rand sfp --status noqemu --comment "noflags" # 4S variant when size = 10 , Q = 1 T=VPR128.4S imm=Re_VPR128.S.vIndex i1=Re_VPR128.S i2=vIndex # NOTE: if size == '10' and (L == '1' || Q == '0') then ReservedValue(); :fcmla Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex, fcmla_rotate is b_31=0 & b_30=1 & b_2429=0b101111 & b_2223=0b10 & b_21=0 & b_15=0 & b_12=1 & b_10=0 & fcmla_rotate & Rd_VPR128.4S & Rn_VPR128.4S & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd { local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); Rd_VPR128.4S = NEON_fcmla(Rn_VPR128.4S, tmp1, fcmla_rotate, 4:1); } # C7.2.63 FCMLA page C7-2157 line 125798 MATCH x2e00c400/mask=xbf20e400 # CONSTRUCT x2e40c400/mask=xffe0e400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@4 # AUNIT --inst x2e40c400/mask=xffe0e400 --rand hfp --status noqemu --comment "noflags" # FCMLA SIMD 4H when size = 01 , Q = 0 :fcmla Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, fcmla_rotate is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { Rd_VPR64.4H = NEON_fcmla(Rn_VPR64.4H, Rm_VPR64.4H, fcmla_rotate, 4:1); } # C7.2.63 FCMLA page C7-2157 line 125798 MATCH x2e00c400/mask=xbf20e400 # CONSTRUCT x6e40c400/mask=xffe0e400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@4 # AUNIT --inst x6e40c400/mask=xffe0e400 --rand hfp --status noqemu --comment "noflags" # FCMLA SIMD 8H when size = 01 , Q = 1 :fcmla Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, fcmla_rotate is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { Rd_VPR128.8H = NEON_fcmla(Rn_VPR128.8H, Rm_VPR128.8H, fcmla_rotate, 4:1); } # C7.2.63 FCMLA page C7-2157 line 125798 MATCH x2e00c400/mask=xbf20e400 # CONSTRUCT x2e80c400/mask=xffe0e400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@4 # AUNIT --inst x2e80c400/mask=xffe0e400 --rand sfp --status noqemu --comment "noflags" # FCMLA SIMD 2S when size = 10 , Q = 0 :fcmla Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, fcmla_rotate is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fcmla(Rn_VPR64.2S, Rm_VPR64.2S, fcmla_rotate, 4:1); } # C7.2.63 FCMLA page C7-2157 line 125798 MATCH x2e00c400/mask=xbf20e400 # CONSTRUCT x6e80c400/mask=xffe0e400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@4 # AUNIT --inst x6e80c400/mask=xffe0e400 --rand sfp --status noqemu --comment "noflags" # FCMLA SIMD 4S when size = 10 , Q = 1 :fcmla Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, fcmla_rotate is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fcmla(Rn_VPR128.4S, Rm_VPR128.4S, fcmla_rotate, 4:1); } # C7.2.63 FCMLA page C7-2157 line 125798 MATCH x2e00c400/mask=xbf20e400 # CONSTRUCT x6ec0c400/mask=xffe0e400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fcmla/3@4 # AUNIT --inst x6ec0c400/mask=xffe0e400 --rand dfp --status noqemu --comment "noflags" # FCMLA SIMD 2D when size = 11 , Q = 1 :fcmla Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, fcmla_rotate is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_21=0 & b_1315=0b110 & b_10=1 & fcmla_rotate & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { Rd_VPR128.2D = NEON_fcmla(Rn_VPR128.2D, Rm_VPR128.2D, fcmla_rotate, 4:1); } # C7.2.64 FCMLE (zero) page C7-2160 line 125952 MATCH x2ea0d800/mask=xbfbffc00 # CONSTRUCT x6ee0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmle/2@8 # AUNIT --inst x6ee0d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" :fcmle Rd_VPR128.2D, Rn_VPR128.2D, "#0.0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xd & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; local zero:8 = 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] f<= zero) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] f<= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.64 FCMLE (zero) page C7-2160 line 125952 MATCH x2ea0d800/mask=xbfbffc00 # CONSTRUCT x2ea0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmle/2@2 # AUNIT --inst x2ea0d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" :fcmle Rd_VPR64.2S, Rn_VPR64.2S, "#0.0" is b_3131=0 & q=0 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] f<= zero) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] f<= zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.64 FCMLE (zero) page C7-2160 line 125952 MATCH x2ea0d800/mask=xbfbffc00 # CONSTRUCT x6ea0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmle/2@4 # AUNIT --inst x6ea0d800/mask=xfffffc00 --rand sfp --status nopcodeop :fcmle Rd_VPR128.4S, Rn_VPR128.4S, "#0.0" is b_3131=0 & q=1 & u=1 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xd & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] f<= zero) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] f<= zero) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] f<= zero) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] f<= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.64 FCMLE (zero) page C7-2160 line 125952 MATCH x7ef8d800/mask=xfffffc00 # CONSTRUCT x7ef8d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmle/2 # AUNIT --inst x7ef8d800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Scalar half precision variant :fcmle Rd_FPR16, Rn_FPR16, "#0.0" is b_1031=0b0111111011111000110110 & Rd_FPR16 & Rn_FPR16 & Zd { local tmp1:1 = Rn_FPR16 f<= 0; local tmp2:2 = zext(tmp1); local tmp3:2 = ~ 0:2; Rd_FPR16 = tmp2 * tmp3; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.64 FCMLE (zero) page C7-2160 line 125952 MATCH x7ea0d800/mask=xffbffc00 # CONSTRUCT x7ea0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmle/2 # AUNIT --inst x7ea0d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision sz=0 :fcmle Rd_FPR32, Rn_FPR32, "#0.0" is b_2331=0b011111101 & b_22=0 & b_1021=0b100000110110 & Rd_FPR32 & Rn_FPR32 & Zd { local tmp1:1 = Rn_FPR32 f<= 0; local tmp2:4 = zext(tmp1); local tmp3:4 = ~ 0:4; Rd_FPR32 = tmp2 * tmp3; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.64 FCMLE (zero) page C7-2160 line 125952 MATCH x7ea0d800/mask=xffbffc00 # CONSTRUCT x7ee0d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmle/2 # AUNIT --inst x7ee0d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision sz=1 :fcmle Rd_FPR64, Rn_FPR64, "#0.0" is b_2331=0b011111101 & b_22=1 & b_1021=0b100000110110 & Rd_FPR64 & Rn_FPR64 & Zd { local tmp1:1 = Rn_FPR64 f<= 0; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.64 FCMLE (zero) page C7-2160 line 125952 MATCH x2ef8d800/mask=xbffffc00 # CONSTRUCT x2ef8d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmle/2@2 # AUNIT --inst x2ef8d800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 4H when Q = 0 :fcmle Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" is b_31=0 & b_30=0 & b_1029=0b10111011111000110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] f<= zero) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] f<= zero) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] f<= zero) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] f<= zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.64 FCMLE (zero) page C7-2160 line 125952 MATCH x2ef8d800/mask=xbffffc00 # CONSTRUCT x6ef8d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmle/2@2 # AUNIT --inst x6ef8d800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 8H when Q = 1 :fcmle Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" is b_31=0 & b_30=1 & b_1029=0b10111011111000110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] f<= zero) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] f<= zero) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] f<= zero) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] f<= zero) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] f<= zero) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] f<= zero) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] f<= zero) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] f<= zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.65 FCMLT (zero) page C7-2163 line 126168 MATCH x0ea0e800/mask=xbfbffc00 # CONSTRUCT x4ee0e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmlt/2@8 # AUNIT --inst x4ee0e800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" :fcmlt Rd_VPR128.2D, Rn_VPR128.2D, "#0.0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x30 & b_1216=0xe & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local eqVal:8 = ~ 0; local zero:8 = 0; Rd_VPR128.2D[0,64] = zext(Rn_VPR128.2D[0,64] f< zero) * eqVal; Rd_VPR128.2D[64,64] = zext(Rn_VPR128.2D[64,64] f< zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.65 FCMLT (zero) page C7-2163 line 126168 MATCH x0ea0e800/mask=xbfbffc00 # CONSTRUCT x0ea0e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmlt/2@4 # AUNIT --inst x0ea0e800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" :fcmlt Rd_VPR64.2S, Rn_VPR64.2S, "#0.0" is b_3131=0 & q=0 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xe & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR64.2S[0,32] = zext(Rn_VPR64.2S[0,32] f< zero) * eqVal; Rd_VPR64.2S[32,32] = zext(Rn_VPR64.2S[32,32] f< zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.65 FCMLT (zero) page C7-2163 line 126168 MATCH x0ea0e800/mask=xbfbffc00 # CONSTRUCT x4ea0e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmlt/2@4 # AUNIT --inst x4ea0e800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" :fcmlt Rd_VPR128.4S, Rn_VPR128.4S, "#0.0" is b_3131=0 & q=1 & u=0 & b_2428=0xe & b_23=1 & b_1722=0x10 & b_1216=0xe & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local eqVal:4 = ~ 0; local zero:4 = 0; Rd_VPR128.4S[0,32] = zext(Rn_VPR128.4S[0,32] f< zero) * eqVal; Rd_VPR128.4S[32,32] = zext(Rn_VPR128.4S[32,32] f< zero) * eqVal; Rd_VPR128.4S[64,32] = zext(Rn_VPR128.4S[64,32] f< zero) * eqVal; Rd_VPR128.4S[96,32] = zext(Rn_VPR128.4S[96,32] f< zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.65 FCMLT (zero) page C7-2163 line 126168 MATCH x5ef8e800/mask=xfffffc00 # CONSTRUCT x5ef8e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmlt/2 # AUNIT --inst x5ef8e800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Scalar half precision variant :fcmlt Rd_FPR16, Rn_FPR16, "#0.0" is b_1031=0b0101111011111000111010 & Rd_FPR16 & Rn_FPR16 & Zd { local tmp1:1 = Rn_FPR16 f< 0; local tmp2:2 = zext(tmp1); local tmp3:2 = ~ 0:2; Rd_FPR16 = tmp2 * tmp3; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.65 FCMLT (zero) page C7-2163 line 126168 MATCH x5ea0e800/mask=xffbffc00 # CONSTRUCT x5ea0e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:4 =NEON_fcmlt/2 # AUNIT --inst x5ea0e800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision sz=0 :fcmlt Rd_FPR32, Rn_FPR32, "#0.0" is b_2331=0b010111101 & b_22=0 & b_1021=0b100000111010 & Rd_FPR32 & Rn_FPR32 & Zd { local tmp1:1 = Rn_FPR32 f< 0; local tmp2:4 = zext(tmp1); local tmp3:4 = ~ 0:4; Rd_FPR32 = tmp2 * tmp3; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.65 FCMLT (zero) page C7-2163 line 126168 MATCH x5ea0e800/mask=xffbffc00 # CONSTRUCT x5ee0e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:8 =NEON_fcmlt/2 # AUNIT --inst x5ee0e800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "noflags" # Scalar single-precision and double-precision sz=1 :fcmlt Rd_FPR64, Rn_FPR64, "#0.0" is b_2331=0b010111101 & b_22=1 & b_1021=0b100000111010 & Rd_FPR64 & Rn_FPR64 & Zd { local tmp1:1 = Rn_FPR64 f< 0; local tmp2:8 = zext(tmp1); local tmp3:8 = ~ 0:8; Rd_FPR64 = tmp2 * tmp3; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.65 FCMLT (zero) page C7-2163 line 126168 MATCH x0ef8e800/mask=xbffffc00 # CONSTRUCT x0ef8e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmlt/2@2 # AUNIT --inst x0ef8e800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 4H when Q = 0 :fcmlt Rd_VPR64.4H, Rn_VPR64.4H, "#0.0" is b_31=0 & b_30=0 & b_1029=0b00111011111000111010 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR64.4H[0,16] = zext(Rn_VPR64.4H[0,16] f< zero) * eqVal; Rd_VPR64.4H[16,16] = zext(Rn_VPR64.4H[16,16] f< zero) * eqVal; Rd_VPR64.4H[32,16] = zext(Rn_VPR64.4H[32,16] f< zero) * eqVal; Rd_VPR64.4H[48,16] = zext(Rn_VPR64.4H[48,16] f< zero) * eqVal; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.65 FCMLT (zero) page C7-2163 line 126168 MATCH x0ef8e800/mask=xbffffc00 # CONSTRUCT x4ef8e800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 0:2 =NEON_fcmlt/2@2 # AUNIT --inst x4ef8e800/mask=xfffffc00 --rand hfp --status noqemu --comment "noflags" # Vector half precision variant SIMD 8H when Q = 1 :fcmlt Rd_VPR128.8H, Rn_VPR128.8H, "#0.0" is b_31=0 & b_30=1 & b_1029=0b00111011111000111010 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { local eqVal:2 = ~ 0; local zero:2 = 0; Rd_VPR128.8H[0,16] = zext(Rn_VPR128.8H[0,16] f< zero) * eqVal; Rd_VPR128.8H[16,16] = zext(Rn_VPR128.8H[16,16] f< zero) * eqVal; Rd_VPR128.8H[32,16] = zext(Rn_VPR128.8H[32,16] f< zero) * eqVal; Rd_VPR128.8H[48,16] = zext(Rn_VPR128.8H[48,16] f< zero) * eqVal; Rd_VPR128.8H[64,16] = zext(Rn_VPR128.8H[64,16] f< zero) * eqVal; Rd_VPR128.8H[80,16] = zext(Rn_VPR128.8H[80,16] f< zero) * eqVal; Rd_VPR128.8H[96,16] = zext(Rn_VPR128.8H[96,16] f< zero) * eqVal; Rd_VPR128.8H[112,16] = zext(Rn_VPR128.8H[112,16] f< zero) * eqVal; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.66 FCMP page C7-2166 line 126365 MATCH x1e202000/mask=xff20fc17 # CONSTRUCT x1e602000/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmp/2 # AUNIT --inst x1e602000/mask=xffe0fc1f --rand dfp --status nodest --comment "flags" :fcmp Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR64 & fpcmp.opcode2=0x0 { ftestNAN(Rn_FPR64, Rm_FPR64); fcomp(Rn_FPR64, Rm_FPR64); } # C7.2.66 FCMP page C7-2166 line 126365 MATCH x1e202000/mask=xff20fc17 # CONSTRUCT x1e602008/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmp/2 # AUNIT --inst x1e602008/mask=xffe0fc1f --rand dfp --status nodest --comment "flags" :fcmp Rn_FPR64, Rm_fpz64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_fpz64 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR64 & fpcmp.opcode2=0x8 { ftestNAN(Rn_FPR64, Rm_fpz64); fcomp(Rn_FPR64, Rm_fpz64); } # C7.2.66 FCMP page C7-2166 line 126365 MATCH x1e202000/mask=xff20fc17 # CONSTRUCT x1e202008/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmp/2 # AUNIT --inst x1e202008/mask=xffe0fc1f --rand sfp --status nodest --comment "flags" :fcmp Rn_FPR32, Rm_fpz32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_fpz32 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR32 & fpcmp.opcode2=0x8 { ftestNAN(Rn_FPR32, Rm_fpz32); fcomp(Rn_FPR32, Rm_fpz32); } # C7.2.66 FCMP page C7-2166 line 126365 MATCH x1e202000/mask=xff20fc17 # CONSTRUCT x1e202000/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmp/2 # AUNIT --inst x1e202000/mask=xffe0fc1f --rand sfp --status nodest --comment "flags" :fcmp Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR32 & fpcmp.opcode2=0x0 { ftestNAN(Rn_FPR32, Rm_FPR32); fcomp(Rn_FPR32, Rm_FPR32); } # C7.2.66 FCMP page C7-2166 line 126365 MATCH x1e202000/mask=xff20fc17 # CONSTRUCT x1ee02008/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmp/2 # AUNIT --inst x1ee02008/mask=xffe0fc1f --rand hfp --status nodest --comment "flags" :fcmp Rn_FPR16, Rm_fpz16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_fpz16 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR16 & fpcmp.opcode2=0x8 { ftestNAN(Rn_FPR16, Rm_fpz16); fcomp(Rn_FPR16, Rm_fpz16); } # C7.2.66 FCMP page C7-2166 line 126365 MATCH x1e202000/mask=xff20fc17 # CONSTRUCT x1ee02000/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmp/2 # AUNIT --inst x1ee02000/mask=xffe0fc1f --rand hfp --status nodest --comment "flags" :fcmp Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR16 & fpcmp.opcode2=0x0 { ftestNAN(Rn_FPR16, Rm_FPR16); fcomp(Rn_FPR16, Rm_FPR16); } # C7.2.67 FCMPE page C7-2168 line 126506 MATCH x1e202010/mask=xff20fc17 # CONSTRUCT x1e602010/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmpe/2 # AUNIT --inst x1e602010/mask=xffe0fc1f --rand dfp --status nodest --comment "flags" :fcmpe Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR64 & fpcmp.opcode2=0x10 { ftestNAN(Rn_FPR64, Rm_FPR64); fcomp(Rn_FPR64, Rm_FPR64); } # C7.2.67 FCMPE page C7-2168 line 126506 MATCH x1e202010/mask=xff20fc17 # CONSTRUCT x1e602018/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmpe/2 # AUNIT --inst x1e602018/mask=xffe0fc1f --rand dfp --status nodest --comment "flags" :fcmpe Rn_FPR64, Rm_fpz64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_fpz64 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR64 & fpcmp.opcode2=0x18 { ftestNAN(Rn_FPR64, Rm_fpz64); fcomp(Rn_FPR64, Rm_fpz64); } # C7.2.67 FCMPE page C7-2168 line 126506 MATCH x1e202010/mask=xff20fc17 # CONSTRUCT x1e202018/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmpe/2 # AUNIT --inst x1e202018/mask=xffe0fc1f --rand sfp --status nodest --comment "flags" :fcmpe Rn_FPR32, Rm_fpz32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_fpz32 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR32 & fpcmp.opcode2=0x18 { ftestNAN(Rn_FPR32, Rm_fpz32); fcomp(Rn_FPR32, Rm_fpz32); } # C7.2.67 FCMPE page C7-2168 line 126506 MATCH x1e202010/mask=xff20fc17 # CONSTRUCT x1e202010/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmpe/2 # AUNIT --inst x1e202010/mask=xffe0fc1f --rand sfp --status nodest --comment "flags" :fcmpe Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR32 & fpcmp.opcode2=0x10 { ftestNAN(Rn_FPR32, Rm_FPR32); fcomp(Rn_FPR32, Rm_FPR32); } # C7.2.67 FCMPE page C7-2168 line 126506 MATCH x1e202010/mask=xff20fc17 # CONSTRUCT x1ee02018/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmpe/2 # AUNIT --inst x1ee02018/mask=xffe0fc1f --rand hfp --status nodest --comment "flags" :fcmpe Rn_FPR16, Rm_fpz16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_fpz16 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR16 & fpcmp.opcode2=0x18 { ftestNAN(Rn_FPR16, Rm_fpz16); fcomp(Rn_FPR16, Rm_fpz16); } # C7.2.67 FCMPE page C7-2168 line 126506 MATCH x1e202010/mask=xff20fc17 # CONSTRUCT x1ee02010/mask=xffe0fc1f MATCHED 1 DOCUMENTED OPCODES # SMACRO null ARG1 ARG2 =ftestNAN/2 null ARG1 ARG2 =fcomp/2 # SMACRO(pseudo) null ARG1 ARG2 =NEON_fcmpe/2 # AUNIT --inst x1ee02010/mask=xffe0fc1f --rand hfp --status nodest --comment "flags" :fcmpe Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & fpcmp.op=0 & b_1013=0x8 & Rn_FPR16 & fpcmp.opcode2=0x10 { ftestNAN(Rn_FPR16, Rm_FPR16); fcomp(Rn_FPR16, Rm_FPR16); } # C7.2.68 FCSEL page C7-2170 line 126647 MATCH x1e200c00/mask=xff200c00 # CONSTRUCT x1e600c00/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 = dup ext swap ARG4:1 inst_next goto = # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4:1 =NEON_fcsel/3 # AUNIT --inst x1e600c00/mask=xffe00c00 --rand dfp --status pass --comment "flags" # Rm may be the same register as Rd, so it needs to be saved :fcsel Rd_FPR64, Rn_FPR64, Rm_FPR64, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & CondOp & b_1011=3 & Rn_FPR64 & Rd_FPR64 & Zd { local tmp1:8 = Rm_FPR64; Rd_FPR64 = Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd if (CondOp:1) goto inst_next; Rd_FPR64 = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.68 FCSEL page C7-2170 line 126647 MATCH x1e200c00/mask=xff200c00 # CONSTRUCT x1e200c00/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 = dup ext swap ARG4:1 inst_next goto = # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4:1 =NEON_fcsel/3 # AUNIT --inst x1e200c00/mask=xffe00c00 --rand sfp --status pass --comment "flags" :fcsel Rd_FPR32, Rn_FPR32, Rm_FPR32, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & CondOp & b_1011=3 & Rn_FPR32 & Rd_FPR32 & Zd { local tmp1:4 = Rm_FPR32; Rd_FPR32 = Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd if (CondOp:1) goto inst_next; Rd_FPR32 = tmp1; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.68 FCSEL page C7-2170 line 126647 MATCH x1e200c00/mask=xff200c00 # CONSTRUCT x1ee00c00/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 = dup ext swap ARG4:1 inst_next goto = # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4:1 =NEON_fcsel/3 # AUNIT --inst x1ee00c00/mask=xffe00c00 --rand hfp --status noqemu --comment "flags" :fcsel Rd_FPR16, Rn_FPR16, Rm_FPR16, CondOp is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & CondOp & b_1011=3 & Rn_FPR16 & Rd_FPR16 & Zd { local tmp1:2 = Rm_FPR16; Rd_FPR16 = Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd if (CondOp:1) goto inst_next; Rd_FPR16 = tmp1; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.69 FCVT page C7-2172 line 126762 MATCH x1e224000/mask=xff3e7c00 # CONSTRUCT x1ee2c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt/1 # AUNIT --inst x1ee2c000/mask=xfffffc00 --rand hfp --status pass --comment "nofpround" :fcvt Rd_FPR64, Rn_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x5 & b_1014=0x10 & Rn_FPR16 & Rd_FPR64 & Zd { Rd_FPR64 = float2float(Rn_FPR16); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.69 FCVT page C7-2172 line 126762 MATCH x1e224000/mask=xff3e7c00 # CONSTRUCT x1e22c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt/1 # AUNIT --inst x1e22c000/mask=xfffffc00 --rand sfp --status pass --comment "nofpround" :fcvt Rd_FPR64, Rn_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x5 & b_1014=0x10 & Rn_FPR32 & Rd_FPR64 & Zd { Rd_FPR64 = float2float(Rn_FPR32); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.69 FCVT page C7-2172 line 126762 MATCH x1e224000/mask=xff3e7c00 # CONSTRUCT x1e63c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt/1 # AUNIT --inst x1e63c000/mask=xfffffc00 --rand hfp --status pass --comment "nofpround" :fcvt Rd_FPR16, Rn_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x7 & b_1014=0x10 & Rn_FPR64 & Rd_FPR16 & Zd { Rd_FPR16 = float2float(Rn_FPR64); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.69 FCVT page C7-2172 line 126762 MATCH x1e224000/mask=xff3e7c00 # CONSTRUCT x1e23c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt/1 # AUNIT --inst x1e23c000/mask=xfffffc00 --rand hfp --status fail --comment "nofpround" :fcvt Rd_FPR16, Rn_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x7 & b_1014=0x10 & Rn_FPR32 & Rd_FPR16 & Zd { Rd_FPR16 = float2float(Rn_FPR32); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.69 FCVT page C7-2172 line 126762 MATCH x1e224000/mask=xff3e7c00 # CONSTRUCT x1e624000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt/1 # AUNIT --inst x1e624000/mask=xfffffc00 --rand sfp --status fail --comment "nofpround" :fcvt Rd_FPR32, Rn_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x4 & b_1014=0x10 & Rn_FPR64 & Rd_FPR32 & Zd { Rd_FPR32 = float2float(Rn_FPR64); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.69 FCVT page C7-2172 line 126762 MATCH x1e224000/mask=xff3e7c00 # CONSTRUCT x1ee24000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt/1 # AUNIT --inst x1ee24000/mask=xfffffc00 --rand hfp --status pass --comment "nofpround" :fcvt Rd_FPR32, Rn_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x4 & b_1014=0x10 & Rn_FPR16 & Rd_FPR32 & Zd { Rd_FPR32 = float2float(Rn_FPR16); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.63 FCVTAS (vector) page C7-1136 line 65961 KEEPWITH fcvt_vmnemonic: "fcvtas" is b_29=0 & b_23=0 & b_1314=0b10 & b_12=0 { } fcvt_vmnemonic: "fcvtau" is b_29=1 & b_23=0 & b_1314=0b10 & b_12=0 { } fcvt_vmnemonic: "fcvtms" is b_29=0 & b_23=0 & b_1314=0b01 & b_12=1 { } fcvt_vmnemonic: "fcvtmu" is b_29=1 & b_23=0 & b_1314=0b01 & b_12=1 { } fcvt_vmnemonic: "fcvtns" is b_29=0 & b_23=0 & b_1314=0b01 & b_12=0 { } fcvt_vmnemonic: "fcvtnu" is b_29=1 & b_23=0 & b_1314=0b01 & b_12=0 { } fcvt_vmnemonic: "fcvtps" is b_29=0 & b_23=1 & b_1314=0b01 & b_12=0 { } fcvt_vmnemonic: "fcvtpu" is b_29=1 & b_23=1 & b_1314=0b01 & b_12=0 { } fcvt_vmnemonic: "fcvtzs" is b_29=0 & b_23=1 & b_1314=0b01 & b_12=1 { } fcvt_vmnemonic: "fcvtzu" is b_29=1 & b_23=1 & b_1314=0b01 & b_12=1 { } fcvt_smnemonic: "fcvtas" is b_1920=0b00 & b_1618=0b100 { } fcvt_smnemonic: "fcvtau" is b_1920=0b00 & b_1618=0b101 { } fcvt_smnemonic: "fcvtms" is b_1920=0b10 & b_1618=0b000 { } fcvt_smnemonic: "fcvtmu" is b_1920=0b10 & b_1618=0b001 { } fcvt_smnemonic: "fcvtns" is b_1920=0b00 & b_1618=0b000 { } fcvt_smnemonic: "fcvtnu" is b_1920=0b00 & b_1618=0b001 { } fcvt_smnemonic: "fcvtps" is b_1920=0b01 & b_1618=0b000 { } fcvt_smnemonic: "fcvtpu" is b_1920=0b01 & b_1618=0b001 { } fcvt_smnemonic: "fcvtzs" is b_1920=0b11 & b_1618=0b000 { } fcvt_smnemonic: "fcvtzu" is b_1920=0b11 & b_1618=0b001 { } # C7.2.70 FCVTAS (vector) page C7-2174 line 126882 MATCH x5e79c800/mask=xfffffc00 # C7.2.72 FCVTAU (vector) page C7-2179 line 127203 MATCH x7e79c800/mask=xfffffc00 # CONSTRUCT x5e79c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x5e79c800/mask=xdffffc00 --rand hfp --status noqemu --comment "nofpround" # Scalar half precision :^fcvt_vmnemonic Rd_FPR16, Rn_FPR16 is b_3031=0b01 & b_1028=0b1111001111001110010 & fcvt_vmnemonic & Rd_FPR16 & Rn_FPR16 & Zd { Rd_FPR16 = trunc(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.70 FCVTAS (vector) page C7-2174 line 126882 MATCH x5e21c800/mask=xffbffc00 # C7.2.72 FCVTAU (vector) page C7-2179 line 127203 MATCH x7e21c800/mask=xffbffc00 # CONSTRUCT x5e21c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x5e21c800/mask=xdffffc00 --rand sfp --status fail --comment "nofpround" # Scalar single-precision and double-precision variant sz=0 :^fcvt_vmnemonic Rd_FPR32, Rn_FPR32 is b_3031=0b01 & b_2328=0b111100 & b_22=0 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_FPR32 & Rn_FPR32 & Zd { Rd_FPR32 = trunc(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.70 FCVTAS (vector) page C7-2174 line 126882 MATCH x5e21c800/mask=xffbffc00 # C7.2.72 FCVTAU (vector) page C7-2179 line 127203 MATCH x7e21c800/mask=xffbffc00 # CONSTRUCT x5e61c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x5e61c800/mask=xdffffc00 --rand dfp --status fail --comment "nofpround" # Scalar single-precision and double-precision variant sz=1 :^fcvt_vmnemonic Rd_FPR64, Rn_FPR64 is b_3031=0b01 & b_2328=0b111100 & b_22=1 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_FPR64 & Rn_FPR64 & Zd { Rd_FPR64 = trunc(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.70 FCVTAS (vector) page C7-2174 line 126882 MATCH x0e79c800/mask=xbffffc00 # C7.2.72 FCVTAU (vector) page C7-2179 line 127203 MATCH x2e79c800/mask=xbffffc00 # CONSTRUCT x0e79c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@2 # AUNIT --inst x0e79c800/mask=xdffffc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant Q=0 :^fcvt_vmnemonic Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2328=0b011100 & b_1022=0b1111001110010 & fcvt_vmnemonic & Rd_VPR64.4H & Rn_VPR64.4H & Zd { # simd unary Rd_VPR64.4H = trunc(Rn_VPR64.4H) on lane size 2 Rd_VPR64.4H[0,16] = trunc(Rn_VPR64.4H[0,16]); Rd_VPR64.4H[16,16] = trunc(Rn_VPR64.4H[16,16]); Rd_VPR64.4H[32,16] = trunc(Rn_VPR64.4H[32,16]); Rd_VPR64.4H[48,16] = trunc(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.70 FCVTAS (vector) page C7-2174 line 126882 MATCH x0e79c800/mask=xbffffc00 # C7.2.72 FCVTAU (vector) page C7-2179 line 127203 MATCH x2e79c800/mask=xbffffc00 # CONSTRUCT x4e79c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@2 # AUNIT --inst x4e79c800/mask=xdffffc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant Q=1 :^fcvt_vmnemonic Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2328=0b011100 & b_1022=0b1111001110010 & fcvt_vmnemonic & Rd_VPR128.8H & Rn_VPR128.8H & Zd { # simd unary Rd_VPR128.8H = trunc(Rn_VPR128.8H) on lane size 2 Rd_VPR128.8H[0,16] = trunc(Rn_VPR128.8H[0,16]); Rd_VPR128.8H[16,16] = trunc(Rn_VPR128.8H[16,16]); Rd_VPR128.8H[32,16] = trunc(Rn_VPR128.8H[32,16]); Rd_VPR128.8H[48,16] = trunc(Rn_VPR128.8H[48,16]); Rd_VPR128.8H[64,16] = trunc(Rn_VPR128.8H[64,16]); Rd_VPR128.8H[80,16] = trunc(Rn_VPR128.8H[80,16]); Rd_VPR128.8H[96,16] = trunc(Rn_VPR128.8H[96,16]); Rd_VPR128.8H[112,16] = trunc(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.70 FCVTAS (vector) page C7-2174 line 126882 MATCH x0e21c800/mask=xbfbffc00 # C7.2.72 FCVTAU (vector) page C7-2179 line 127203 MATCH x2e21c800/mask=xbfbffc00 # CONSTRUCT x0e21c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@4 # AUNIT --inst x0e21c800/mask=xdffffc00 --rand sfp --status fail --comment "nofpround" # Vector single-precision and double-precision variant SIMD 2S when sz = 0 , Q = 0 :^fcvt_vmnemonic Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2328=0b011100 & b_22=0 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_VPR64.2S & Rn_VPR64.2S & Zd { # simd unary Rd_VPR64.2S = trunc(Rn_VPR64.2S) on lane size 4 Rd_VPR64.2S[0,32] = trunc(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[32,32] = trunc(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.70 FCVTAS (vector) page C7-2174 line 126882 MATCH x0e21c800/mask=xbfbffc00 # C7.2.72 FCVTAU (vector) page C7-2179 line 127203 MATCH x2e21c800/mask=xbfbffc00 # CONSTRUCT x4e21c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@4 # AUNIT --inst x4e21c800/mask=xdffffc00 --rand sfp --status fail --comment "nofpround" # Vector single-precision and double-precision variant SIMD 4S when sz = 0 , Q = 1 :^fcvt_vmnemonic Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2328=0b011100 & b_22=0 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_VPR128.4S & Rn_VPR128.4S & Zd { # simd unary Rd_VPR128.4S = trunc(Rn_VPR128.4S) on lane size 4 Rd_VPR128.4S[0,32] = trunc(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[32,32] = trunc(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[64,32] = trunc(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[96,32] = trunc(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.70 FCVTAS (vector) page C7-2174 line 126882 MATCH x0e21c800/mask=xbfbffc00 # C7.2.72 FCVTAU (vector) page C7-2179 line 127203 MATCH x2e21c800/mask=xbfbffc00 # CONSTRUCT x4e61c800/mask=xdffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@8 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@8 # AUNIT --inst x4e61c800/mask=xdffffc00 --rand dfp --status fail --comment "nofpround" # Vector single-precision and double-precision variant SIMD 2D when sz = 1 , Q = 1 :^fcvt_vmnemonic Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2328=0b011100 & b_22=1 & b_1021=0b100001110010 & fcvt_vmnemonic & Rd_VPR128.2D & Rn_VPR128.2D & Zd { # simd unary Rd_VPR128.2D = trunc(Rn_VPR128.2D) on lane size 8 Rd_VPR128.2D[0,64] = trunc(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[64,64] = trunc(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.71 FCVTAS (scalar) page C7-2177 line 127075 MATCH x1e240000/mask=x7f3ffc00 # C7.2.73 FCVTAU (scalar) page C7-2182 line 127396 MATCH x1e250000/mask=x7f3ffc00 # CONSTRUCT x1ee40000/mask=xfffefc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x1ee40000/mask=xfffefc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision to 32-bit variant when sf == 0 && type == 11 :^fcvt_smnemonic Rd_GPR32, Rn_FPR16 is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR16 & Rd_GPR64 { Rd_GPR32 = trunc(Rn_FPR16); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.71 FCVTAS (scalar) page C7-2177 line 127075 MATCH x1e240000/mask=x7f3ffc00 # C7.2.73 FCVTAU (scalar) page C7-2182 line 127396 MATCH x1e250000/mask=x7f3ffc00 # CONSTRUCT x9ee40000/mask=xfffefc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x9ee40000/mask=xfffefc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision to 64-bit variant when sf == 1 && type == 11 :^fcvt_smnemonic Rd_GPR64, Rn_FPR16 is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR16 { Rd_GPR64 = trunc(Rn_FPR16); } # C7.2.71 FCVTAS (scalar) page C7-2177 line 127075 MATCH x1e240000/mask=x7f3ffc00 # C7.2.73 FCVTAU (scalar) page C7-2182 line 127396 MATCH x1e250000/mask=x7f3ffc00 # CONSTRUCT x1e240000/mask=xfffefc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x1e240000/mask=xfffefc00 --rand sfp --status fail --comment "nofpround" # Single-precision to 32-bit variant when sf == 0 && type == 00 :^fcvt_smnemonic Rd_GPR32, Rn_FPR32 is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR32 & Rd_GPR64 { Rd_GPR32 = trunc(Rn_FPR32); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.71 FCVTAS (scalar) page C7-2177 line 127075 MATCH x1e240000/mask=x7f3ffc00 # C7.2.73 FCVTAU (scalar) page C7-2182 line 127396 MATCH x1e250000/mask=x7f3ffc00 # CONSTRUCT x9e240000/mask=xfffefc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x9e240000/mask=xfffefc00 --rand sfp --status fail --comment "nofpround" # Single-precision to 64-bit variant when sf == 1 && type == 00 :^fcvt_smnemonic Rd_GPR64, Rn_FPR32 is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR32 { Rd_GPR64 = trunc(Rn_FPR32); } # C7.2.71 FCVTAS (scalar) page C7-2177 line 127075 MATCH x1e240000/mask=x7f3ffc00 # C7.2.73 FCVTAU (scalar) page C7-2182 line 127396 MATCH x1e250000/mask=x7f3ffc00 # CONSTRUCT x1e640000/mask=xfffefc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x1e640000/mask=xfffefc00 --rand dfp --status fail --comment "nofpround" # Double-precision to 32-bit variant when sf == 0 && type == 01 :^fcvt_smnemonic Rd_GPR32, Rn_FPR64 is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 { Rd_GPR32 = trunc(Rn_FPR64); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.71 FCVTAS (scalar) page C7-2177 line 127075 MATCH x1e240000/mask=x7f3ffc00 # C7.2.73 FCVTAU (scalar) page C7-2182 line 127396 MATCH x1e250000/mask=x7f3ffc00 # CONSTRUCT x9e640000/mask=xfffefc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x9e640000/mask=xfffefc00 --rand dfp --status fail --comment "nofpround" # Double-precision to 64-bit variant sf == 1 && type == 01 :^fcvt_smnemonic Rd_GPR64, Rn_FPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1720=0b0010 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR64 { Rd_GPR64 = trunc(Rn_FPR64); } # C7.2.74 FCVTL, FCVTL2 page C7-2184 line 127524 MATCH x0e217800/mask=xbfbffc00 # CONSTRUCT x0e617800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =$float2float@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvtl/1@4 # AUNIT --inst x0e617800/mask=xfffffc00 --rand dfp --status fail --comment "ext nofpround" :fcvtl Rd_VPR128.2D, Rn_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x17 & b_1011=2 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR64.2S; # simd resize Rd_VPR128.2D = float2float(TMPD1) (lane size 4 to 8) Rd_VPR128.2D[0,64] = float2float(TMPD1[0,32]); Rd_VPR128.2D[64,64] = float2float(TMPD1[32,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.74 FCVTL, FCVTL2 page C7-2184 line 127524 MATCH x0e217800/mask=xbfbffc00 # CONSTRUCT x4e617800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 =$float2float@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvtl2/1@8 # AUNIT --inst x4e617800/mask=xfffffc00 --rand dfp --status fail --comment "ext nofpround" :fcvtl2 Rd_VPR128.2D, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x17 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize Rd_VPR128.2D = float2float(TMPD1) (lane size 4 to 8) Rd_VPR128.2D[0,64] = float2float(TMPD1[0,32]); Rd_VPR128.2D[64,64] = float2float(TMPD1[32,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.74 FCVTL, FCVTL2 page C7-2184 line 127524 MATCH x0e217800/mask=xbfbffc00 # CONSTRUCT x0e217800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =$float2float@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvtl/1@4 # AUNIT --inst x0e217800/mask=xfffffc00 --rand sfp --status fail --comment "ext nofpround" :fcvtl Rd_VPR128.4S, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x17 & b_1011=2 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR64.4H; # simd resize Rd_VPR128.4S = float2float(TMPD1) (lane size 2 to 4) Rd_VPR128.4S[0,32] = float2float(TMPD1[0,16]); Rd_VPR128.4S[32,32] = float2float(TMPD1[16,16]); Rd_VPR128.4S[64,32] = float2float(TMPD1[32,16]); Rd_VPR128.4S[96,32] = float2float(TMPD1[48,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.74 FCVTL, FCVTL2 page C7-2184 line 127524 MATCH x0e217800/mask=xbfbffc00 # CONSTRUCT x4e217800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 =$float2float@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvtl2/1@2 # AUNIT --inst x4e217800/mask=xfffffc00 --rand sfp --status fail --comment "ext nofpround" :fcvtl2 Rd_VPR128.4S, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x17 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize Rd_VPR128.4S = float2float(TMPD1) (lane size 2 to 4) Rd_VPR128.4S[0,32] = float2float(TMPD1[0,16]); Rd_VPR128.4S[32,32] = float2float(TMPD1[16,16]); Rd_VPR128.4S[64,32] = float2float(TMPD1[32,16]); Rd_VPR128.4S[96,32] = float2float(TMPD1[48,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.75 FCVTMS (vector) page C7-2186 line 127614 MATCH x5e79b800/mask=xfffffc00 # C7.2.77 FCVTMU (vector) page C7-2191 line 127938 MATCH x7e79b800/mask=xfffffc00 # C7.2.80 FCVTNS (vector) page C7-2198 line 128355 MATCH x5e79a800/mask=xfffffc00 # C7.2.82 FCVTNU (vector) page C7-2203 line 128679 MATCH x7e79a800/mask=xfffffc00 # C7.2.84 FCVTPS (vector) page C7-2208 line 129003 MATCH x5ef9a800/mask=xfffffc00 # C7.2.86 FCVTPU (vector) page C7-2213 line 129327 MATCH x7ef9a800/mask=xfffffc00 # C7.2.90 FCVTZS (vector, integer) page C7-2224 line 129963 MATCH x5ef9b800/mask=xfffffc00 # C7.2.94 FCVTZU (vector, integer) page C7-2234 line 130576 MATCH x7ef9b800/mask=xfffffc00 # CONSTRUCT x5e79a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x5e79a800/mask=xdf7fec00 --rand hfp --status noqemu --comment "nofpround" # Scalar half precision :^fcvt_vmnemonic Rd_FPR16, Rn_FPR16 is b_3031=0b01 & b_2428=0b11110 & b_1322=0b1111001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_FPR16 & Rn_FPR16 & Zd { Rd_FPR16 = trunc(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.75 FCVTMS (vector) page C7-2186 line 127614 MATCH x5e21b800/mask=xffbffc00 # C7.2.77 FCVTMU (vector) page C7-2191 line 127938 MATCH x7e21b800/mask=xffbffc00 # C7.2.80 FCVTNS (vector) page C7-2198 line 128355 MATCH x5e21a800/mask=xffbffc00 # C7.2.82 FCVTNU (vector) page C7-2203 line 128679 MATCH x7e21a800/mask=xffbffc00 # C7.2.84 FCVTPS (vector) page C7-2208 line 129003 MATCH x5ea1a800/mask=xffbffc00 # C7.2.86 FCVTPU (vector) page C7-2213 line 129327 MATCH x7ea1a800/mask=xffbffc00 # C7.2.90 FCVTZS (vector, integer) page C7-2224 line 129963 MATCH x5ea1b800/mask=xffbffc00 # C7.2.94 FCVTZU (vector, integer) page C7-2234 line 130576 MATCH x7ea1b800/mask=xffbffc00 # CONSTRUCT x5e21a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x5e21a800/mask=xdf7fec00 --rand sfp --status fail --comment "nofpround" # Scalar single-precision and double-precision variant sz=0 :^fcvt_vmnemonic Rd_FPR32, Rn_FPR32 is b_3031=0b01 & b_2428=0b11110 & b_22=0 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_FPR32 & Rn_FPR32 & Zd { Rd_FPR32 = trunc(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.75 FCVTMS (vector) page C7-2186 line 127614 MATCH x5e21b800/mask=xffbffc00 # C7.2.77 FCVTMU (vector) page C7-2191 line 127938 MATCH x7e21b800/mask=xffbffc00 # C7.2.80 FCVTNS (vector) page C7-2198 line 128355 MATCH x5e21a800/mask=xffbffc00 # C7.2.82 FCVTNU (vector) page C7-2203 line 128679 MATCH x7e21a800/mask=xffbffc00 # C7.2.84 FCVTPS (vector) page C7-2208 line 129003 MATCH x5ea1a800/mask=xffbffc00 # C7.2.86 FCVTPU (vector) page C7-2213 line 129327 MATCH x7ea1a800/mask=xffbffc00 # C7.2.90 FCVTZS (vector, integer) page C7-2224 line 129963 MATCH x5ea1b800/mask=xffbffc00 # C7.2.94 FCVTZU (vector, integer) page C7-2234 line 130576 MATCH x7ea1b800/mask=xffbffc00 # CONSTRUCT x5e61a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x5e61a800/mask=xdf7fec00 --rand dfp --status fail --comment "nofpround" # Scalar single-precision and double-precision variant sz=1 :^fcvt_vmnemonic Rd_FPR64, Rn_FPR64 is b_3031=0b01 & b_2428=0b11110 & b_22=1 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_FPR64 & Rn_FPR64 & Zd { Rd_FPR64 = trunc(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.75 FCVTMS (vector) page C7-2186 line 127614 MATCH x0e79b800/mask=xbffffc00 # C7.2.77 FCVTMU (vector) page C7-2191 line 127938 MATCH x2e79b800/mask=xbffffc00 # C7.2.80 FCVTNS (vector) page C7-2198 line 128355 MATCH x0e79a800/mask=xbffffc00 # C7.2.82 FCVTNU (vector) page C7-2203 line 128679 MATCH x2e79a800/mask=xbffffc00 # C7.2.84 FCVTPS (vector) page C7-2208 line 129003 MATCH x0ef9a800/mask=xbffffc00 # C7.2.86 FCVTPU (vector) page C7-2213 line 129327 MATCH x2ef9a800/mask=xbffffc00 # C7.2.90 FCVTZS (vector, integer) page C7-2224 line 129963 MATCH x0ef9b800/mask=xbffffc00 # C7.2.94 FCVTZU (vector, integer) page C7-2234 line 130576 MATCH x2ef9b800/mask=xbffffc00 # CONSTRUCT x0e79a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@2 # AUNIT --inst x0e79a800/mask=xdf7fec00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant Q=0 :^fcvt_vmnemonic Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2428=0b01110 & b_1322=0b1111001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR64.4H & Rn_VPR64.4H & Zd { # simd unary Rd_VPR64.4H = trunc(Rn_VPR64.4H) on lane size 2 Rd_VPR64.4H[0,16] = trunc(Rn_VPR64.4H[0,16]); Rd_VPR64.4H[16,16] = trunc(Rn_VPR64.4H[16,16]); Rd_VPR64.4H[32,16] = trunc(Rn_VPR64.4H[32,16]); Rd_VPR64.4H[48,16] = trunc(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.75 FCVTMS (vector) page C7-2186 line 127614 MATCH x0e79b800/mask=xbffffc00 # C7.2.77 FCVTMU (vector) page C7-2191 line 127938 MATCH x2e79b800/mask=xbffffc00 # C7.2.80 FCVTNS (vector) page C7-2198 line 128355 MATCH x0e79a800/mask=xbffffc00 # C7.2.82 FCVTNU (vector) page C7-2203 line 128679 MATCH x2e79a800/mask=xbffffc00 # C7.2.84 FCVTPS (vector) page C7-2208 line 129003 MATCH x0ef9a800/mask=xbffffc00 # C7.2.86 FCVTPU (vector) page C7-2213 line 129327 MATCH x2ef9a800/mask=xbffffc00 # C7.2.90 FCVTZS (vector, integer) page C7-2224 line 129963 MATCH x0ef9b800/mask=xbffffc00 # C7.2.94 FCVTZU (vector, integer) page C7-2234 line 130576 MATCH x2ef9b800/mask=xbffffc00 # CONSTRUCT x4e79a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@2 # AUNIT --inst x4e79a800/mask=xdf7fec00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant Q=1 :^fcvt_vmnemonic Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2428=0b01110 & b_1322=0b1111001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR128.8H & Rn_VPR128.8H & Zd { # simd unary Rd_VPR128.8H = trunc(Rn_VPR128.8H) on lane size 2 Rd_VPR128.8H[0,16] = trunc(Rn_VPR128.8H[0,16]); Rd_VPR128.8H[16,16] = trunc(Rn_VPR128.8H[16,16]); Rd_VPR128.8H[32,16] = trunc(Rn_VPR128.8H[32,16]); Rd_VPR128.8H[48,16] = trunc(Rn_VPR128.8H[48,16]); Rd_VPR128.8H[64,16] = trunc(Rn_VPR128.8H[64,16]); Rd_VPR128.8H[80,16] = trunc(Rn_VPR128.8H[80,16]); Rd_VPR128.8H[96,16] = trunc(Rn_VPR128.8H[96,16]); Rd_VPR128.8H[112,16] = trunc(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.75 FCVTMS (vector) page C7-2186 line 127614 MATCH x0e21b800/mask=xbfbffc00 # C7.2.77 FCVTMU (vector) page C7-2191 line 127938 MATCH x2e21b800/mask=xbfbffc00 # C7.2.80 FCVTNS (vector) page C7-2198 line 128355 MATCH x0e21a800/mask=xbfbffc00 # C7.2.82 FCVTNU (vector) page C7-2203 line 128679 MATCH x2e21a800/mask=xbfbffc00 # C7.2.84 FCVTPS (vector) page C7-2208 line 129003 MATCH x0ea1a800/mask=xbfbffc00 # C7.2.86 FCVTPU (vector) page C7-2213 line 129327 MATCH x2ea1a800/mask=xbfbffc00 # C7.2.90 FCVTZS (vector, integer) page C7-2224 line 129963 MATCH x0ea1b800/mask=xbfbffc00 # C7.2.94 FCVTZU (vector, integer) page C7-2234 line 130576 MATCH x2ea1b800/mask=xbfbffc00 # CONSTRUCT x0e21a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@4 # AUNIT --inst x0e21a800/mask=xdf7fec00 --rand sfp --status fail --comment "nofpround" # Vector single-precision and double-precision variant SIMD 2S when sz = 0 , Q = 0 :^fcvt_vmnemonic Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2428=0b01110 & b_22=0 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR64.2S & Rn_VPR64.2S & Zd { # simd unary Rd_VPR64.2S = trunc(Rn_VPR64.2S) on lane size 4 Rd_VPR64.2S[0,32] = trunc(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[32,32] = trunc(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.75 FCVTMS (vector) page C7-2186 line 127614 MATCH x0e21b800/mask=xbfbffc00 # C7.2.77 FCVTMU (vector) page C7-2191 line 127938 MATCH x2e21b800/mask=xbfbffc00 # C7.2.80 FCVTNS (vector) page C7-2198 line 128355 MATCH x0e21a800/mask=xbfbffc00 # C7.2.82 FCVTNU (vector) page C7-2203 line 128679 MATCH x2e21a800/mask=xbfbffc00 # C7.2.84 FCVTPS (vector) page C7-2208 line 129003 MATCH x0ea1a800/mask=xbfbffc00 # C7.2.86 FCVTPU (vector) page C7-2213 line 129327 MATCH x2ea1a800/mask=xbfbffc00 # C7.2.90 FCVTZS (vector, integer) page C7-2224 line 129963 MATCH x0ea1b800/mask=xbfbffc00 # C7.2.94 FCVTZU (vector, integer) page C7-2234 line 130576 MATCH x2ea1b800/mask=xbfbffc00 # CONSTRUCT x4e21a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@4 # AUNIT --inst x4e21a800/mask=xdf7fec00 --rand sfp --status fail --comment "nofpround" # Vector single-precision and double-precision variant SIMD 4S when sz = 0 , Q = 1 :^fcvt_vmnemonic Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2428=0b01110 & b_22=0 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR128.4S & Rn_VPR128.4S & Zd { # simd unary Rd_VPR128.4S = trunc(Rn_VPR128.4S) on lane size 4 Rd_VPR128.4S[0,32] = trunc(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[32,32] = trunc(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[64,32] = trunc(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[96,32] = trunc(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.75 FCVTMS (vector) page C7-2186 line 127614 MATCH x0e21b800/mask=xbfbffc00 # C7.2.77 FCVTMU (vector) page C7-2191 line 127938 MATCH x2e21b800/mask=xbfbffc00 # C7.2.80 FCVTNS (vector) page C7-2198 line 128355 MATCH x0e21a800/mask=xbfbffc00 # C7.2.82 FCVTNU (vector) page C7-2203 line 128679 MATCH x2e21a800/mask=xbfbffc00 # C7.2.84 FCVTPS (vector) page C7-2208 line 129003 MATCH x0ea1a800/mask=xbfbffc00 # C7.2.86 FCVTPU (vector) page C7-2213 line 129327 MATCH x2ea1a800/mask=xbfbffc00 # C7.2.90 FCVTZS (vector, integer) page C7-2224 line 129963 MATCH x0ea1b800/mask=xbfbffc00 # C7.2.94 FCVTZU (vector, integer) page C7-2234 line 130576 MATCH x2ea1b800/mask=xbfbffc00 # CONSTRUCT x4e61a800/mask=xdf7fec00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@8 # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1@8 # AUNIT --inst x4e61a800/mask=xdf7fec00 --rand dfp --status fail --comment "nofpround" # Vector single-precision and double-precision variant SIMD 2D when sz = 1 , Q = 1 :^fcvt_vmnemonic Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2428=0b01110 & b_22=1 & b_1321=0b100001101 & b_1011=0b10 & fcvt_vmnemonic & Rd_VPR128.2D & Rn_VPR128.2D & Zd { # simd unary Rd_VPR128.2D = trunc(Rn_VPR128.2D) on lane size 8 Rd_VPR128.2D[0,64] = trunc(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[64,64] = trunc(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.76 FCVTMS (scalar) page C7-2189 line 127807 MATCH x1e300000/mask=x7f3ffc00 # C7.2.78 FCVTMU (scalar) page C7-2194 line 128131 MATCH x1e310000/mask=x7f3ffc00 # C7.2.81 FCVTNS (scalar) page C7-2201 line 128548 MATCH x1e200000/mask=x7f3ffc00 # C7.2.83 FCVTNU (scalar) page C7-2206 line 128872 MATCH x1e210000/mask=x7f3ffc00 # C7.2.85 FCVTPS (scalar) page C7-2211 line 129196 MATCH x1e280000/mask=x7f3ffc00 # C7.2.87 FCVTPU (scalar) page C7-2216 line 129520 MATCH x1e290000/mask=x7f3ffc00 # C7.2.92 FCVTZS (scalar, integer) page C7-2229 line 130291 MATCH x1e380000/mask=x7f3ffc00 # C7.2.96 FCVTZU (scalar, integer) page C7-2239 line 130904 MATCH x1e390000/mask=x7f3ffc00 # CONSTRUCT x1ee00000/mask=xffe6fc00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x1ee00000/mask=xffe6fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision to 32-bit variant when sf == 0 && type == 11 :^fcvt_smnemonic Rd_GPR32, Rn_FPR16 is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR16 & Rd_GPR64 { Rd_GPR32 = trunc(Rn_FPR16); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.76 FCVTMS (scalar) page C7-2189 line 127807 MATCH x1e300000/mask=x7f3ffc00 # C7.2.78 FCVTMU (scalar) page C7-2194 line 128131 MATCH x1e310000/mask=x7f3ffc00 # C7.2.81 FCVTNS (scalar) page C7-2201 line 128548 MATCH x1e200000/mask=x7f3ffc00 # C7.2.83 FCVTNU (scalar) page C7-2206 line 128872 MATCH x1e210000/mask=x7f3ffc00 # C7.2.85 FCVTPS (scalar) page C7-2211 line 129196 MATCH x1e280000/mask=x7f3ffc00 # C7.2.87 FCVTPU (scalar) page C7-2216 line 129520 MATCH x1e290000/mask=x7f3ffc00 # C7.2.92 FCVTZS (scalar, integer) page C7-2229 line 130291 MATCH x1e380000/mask=x7f3ffc00 # C7.2.96 FCVTZU (scalar, integer) page C7-2239 line 130904 MATCH x1e390000/mask=x7f3ffc00 # CONSTRUCT x9ee00000/mask=xffe6fc00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x9ee00000/mask=xffe6fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision to 64-bit variant when sf == 1 && type == 11 :^fcvt_smnemonic Rd_GPR64, Rn_FPR16 is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR16 { Rd_GPR64 = trunc(Rn_FPR16); } # C7.2.76 FCVTMS (scalar) page C7-2189 line 127807 MATCH x1e300000/mask=x7f3ffc00 # C7.2.78 FCVTMU (scalar) page C7-2194 line 128131 MATCH x1e310000/mask=x7f3ffc00 # C7.2.81 FCVTNS (scalar) page C7-2201 line 128548 MATCH x1e200000/mask=x7f3ffc00 # C7.2.83 FCVTNU (scalar) page C7-2206 line 128872 MATCH x1e210000/mask=x7f3ffc00 # C7.2.85 FCVTPS (scalar) page C7-2211 line 129196 MATCH x1e280000/mask=x7f3ffc00 # C7.2.87 FCVTPU (scalar) page C7-2216 line 129520 MATCH x1e290000/mask=x7f3ffc00 # C7.2.92 FCVTZS (scalar, integer) page C7-2229 line 130291 MATCH x1e380000/mask=x7f3ffc00 # C7.2.96 FCVTZU (scalar, integer) page C7-2239 line 130904 MATCH x1e390000/mask=x7f3ffc00 # CONSTRUCT x1e200000/mask=xffe6fc00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x1e200000/mask=xffe6fc00 --rand sfp --status fail --comment "nofpround" # Single-precision to 32-bit variant when sf == 0 && type == 00 :^fcvt_smnemonic Rd_GPR32, Rn_FPR32 is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR32 & Rd_GPR64 { Rd_GPR32 = trunc(Rn_FPR32); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.76 FCVTMS (scalar) page C7-2189 line 127807 MATCH x1e300000/mask=x7f3ffc00 # C7.2.78 FCVTMU (scalar) page C7-2194 line 128131 MATCH x1e310000/mask=x7f3ffc00 # C7.2.81 FCVTNS (scalar) page C7-2201 line 128548 MATCH x1e200000/mask=x7f3ffc00 # C7.2.83 FCVTNU (scalar) page C7-2206 line 128872 MATCH x1e210000/mask=x7f3ffc00 # C7.2.85 FCVTPS (scalar) page C7-2211 line 129196 MATCH x1e280000/mask=x7f3ffc00 # C7.2.87 FCVTPU (scalar) page C7-2216 line 129520 MATCH x1e290000/mask=x7f3ffc00 # C7.2.92 FCVTZS (scalar, integer) page C7-2229 line 130291 MATCH x1e380000/mask=x7f3ffc00 # C7.2.96 FCVTZU (scalar, integer) page C7-2239 line 130904 MATCH x1e390000/mask=x7f3ffc00 # CONSTRUCT x9e200000/mask=xffe6fc00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x9e200000/mask=xffe6fc00 --rand sfp --status fail --comment "nofpround" # Single-precision to 64-bit variant when sf == 1 && type == 00 :^fcvt_smnemonic Rd_GPR64, Rn_FPR32 is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR32 { Rd_GPR64 = trunc(Rn_FPR32); } # C7.2.76 FCVTMS (scalar) page C7-2189 line 127807 MATCH x1e300000/mask=x7f3ffc00 # C7.2.78 FCVTMU (scalar) page C7-2194 line 128131 MATCH x1e310000/mask=x7f3ffc00 # C7.2.81 FCVTNS (scalar) page C7-2201 line 128548 MATCH x1e200000/mask=x7f3ffc00 # C7.2.83 FCVTNU (scalar) page C7-2206 line 128872 MATCH x1e210000/mask=x7f3ffc00 # C7.2.85 FCVTPS (scalar) page C7-2211 line 129196 MATCH x1e280000/mask=x7f3ffc00 # C7.2.87 FCVTPU (scalar) page C7-2216 line 129520 MATCH x1e290000/mask=x7f3ffc00 # C7.2.92 FCVTZS (scalar, integer) page C7-2229 line 130291 MATCH x1e380000/mask=x7f3ffc00 # C7.2.96 FCVTZU (scalar, integer) page C7-2239 line 130904 MATCH x1e390000/mask=x7f3ffc00 # CONSTRUCT x1e600000/mask=xffe6fc00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x1e600000/mask=xffe6fc00 --rand dfp --status fail --comment "nofpround" # Double-precision to 32-bit variant when sf == 0 && type == 01 :^fcvt_smnemonic Rd_GPR32, Rn_FPR64 is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 { Rd_GPR32 = trunc(Rn_FPR64); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.76 FCVTMS (scalar) page C7-2189 line 127807 MATCH x1e300000/mask=x7f3ffc00 # C7.2.78 FCVTMU (scalar) page C7-2194 line 128131 MATCH x1e310000/mask=x7f3ffc00 # C7.2.81 FCVTNS (scalar) page C7-2201 line 128548 MATCH x1e200000/mask=x7f3ffc00 # C7.2.83 FCVTNU (scalar) page C7-2206 line 128872 MATCH x1e210000/mask=x7f3ffc00 # C7.2.85 FCVTPS (scalar) page C7-2211 line 129196 MATCH x1e280000/mask=x7f3ffc00 # C7.2.87 FCVTPU (scalar) page C7-2216 line 129520 MATCH x1e290000/mask=x7f3ffc00 # C7.2.92 FCVTZS (scalar, integer) page C7-2229 line 130291 MATCH x1e380000/mask=x7f3ffc00 # C7.2.96 FCVTZU (scalar, integer) page C7-2239 line 130904 MATCH x1e390000/mask=x7f3ffc00 # CONSTRUCT x9e600000/mask=xffe6fc00 MATCHED 8 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fcvt_amnpz_su/1 # AUNIT --inst x9e600000/mask=xffe6fc00 --rand dfp --status fail --comment "nofpround" # Double-precision to 64-bit variant sf == 1 && type == 01 :^fcvt_smnemonic Rd_GPR64, Rn_FPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1718=0b00 & b_1015=0b000000 & fcvt_smnemonic & Rd_GPR64 & Rn_FPR64 { Rd_GPR64 = trunc(Rn_FPR64); } # C7.2.79 FCVTN, FCVTN2 page C7-2196 line 128262 MATCH x0e216800/mask=xbfbffc00 # CONSTRUCT x0e616800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =$float2float@8:8 # SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn/2@8 # AUNIT --inst x0e616800/mask=xfffffc00 --rand sfp --status fail --comment "ext nofpround" :fcvtn Rd_VPR64.2S, Rn_VPR128.2D is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x16 & b_1011=2 & Rn_VPR128.2D & Rd_VPR64.2S & Rd_VPR128 & Zd { TMPQ1 = Rn_VPR128.2D; # simd resize Rd_VPR64.2S = float2float(TMPQ1) (lane size 8 to 4) Rd_VPR64.2S[0,32] = float2float(TMPQ1[0,64]); Rd_VPR64.2S[32,32] = float2float(TMPQ1[64,64]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.79 FCVTN, FCVTN2 page C7-2196 line 128262 MATCH x0e216800/mask=xbfbffc00 # CONSTRUCT x4e616800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $float2float@8:8 1:1 &=$copy # SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn2/2@8 # AUNIT --inst x4e616800/mask=xfffffc00 --rand sfp --status pass --comment "ext nofpround" :fcvtn2 Rd_VPR128.4S, Rn_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x16 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { # simd resize TMPD1 = float2float(Rn_VPR128.2D) (lane size 8 to 4) TMPD1[0,32] = float2float(Rn_VPR128.2D[0,64]); TMPD1[32,32] = float2float(Rn_VPR128.2D[64,64]); # simd copy Rd_VPR128.4S element 1:1 = TMPD1 (lane size 8) Rd_VPR128.4S[64,64] = TMPD1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.79 FCVTN, FCVTN2 page C7-2196 line 128262 MATCH x0e216800/mask=xbfbffc00 # CONSTRUCT x0e216800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =$float2float@4:8 # SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn/2@4 # AUNIT --inst x0e216800/mask=xfffffc00 --rand hfp --status fail --comment "ext nofpround" :fcvtn Rd_VPR64.4H, Rn_VPR128.4S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x16 & b_1011=2 & Rn_VPR128.4S & Rd_VPR64.4H & Rd_VPR128 & Zd { TMPQ1 = Rn_VPR128.4S; # simd resize Rd_VPR64.4H = float2float(TMPQ1) (lane size 4 to 2) Rd_VPR64.4H[0,16] = float2float(TMPQ1[0,32]); Rd_VPR64.4H[16,16] = float2float(TMPQ1[32,32]); Rd_VPR64.4H[32,16] = float2float(TMPQ1[64,32]); Rd_VPR64.4H[48,16] = float2float(TMPQ1[96,32]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.79 FCVTN, FCVTN2 page C7-2196 line 128262 MATCH x0e216800/mask=xbfbffc00 # CONSTRUCT x4e216800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $float2float@4:8 1:1 &=$copy # SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn2/2@4 # AUNIT --inst x4e216800/mask=xfffffc00 --rand hfp --status fail --comment "ext nofpround" :fcvtn2 Rd_VPR128.8H, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x16 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { # simd resize TMPD1 = float2float(Rn_VPR128.4S) (lane size 4 to 2) TMPD1[0,16] = float2float(Rn_VPR128.4S[0,32]); TMPD1[16,16] = float2float(Rn_VPR128.4S[32,32]); TMPD1[32,16] = float2float(Rn_VPR128.4S[64,32]); TMPD1[48,16] = float2float(Rn_VPR128.4S[96,32]); # simd copy Rd_VPR128.8H element 1:1 = TMPD1 (lane size 8) Rd_VPR128.8H[64,64] = TMPD1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.88 FCVTXN, FCVTXN2 page C7-2218 line 129651 MATCH x7e216800/mask=xffbffc00 # CONSTRUCT x7e616800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float # SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtxn/2 # AUNIT --inst x7e616800/mask=xfffffc00 --rand sfp --status fail --comment "nofpround" :fcvtxn Rd_FPR32, Rn_FPR64 is b_2331=0b011111100 & b_22=1 & b_1021=0b100001011010 & Rd_FPR32 & Rn_FPR64 & Zd { Rd_FPR32 = float2float(Rn_FPR64); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.88 FCVTXN, FCVTXN2 page C7-2218 line 129651 MATCH x2e216800/mask=xbfbffc00 # CONSTRUCT x2e616800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =$float2float@8:8 # SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtxn/2@8 # AUNIT --inst x2e616800/mask=xfffffc00 --rand sfp --status fail --comment "ext nofpround" # Vector Variant :fcvtxn Rd_VPR64.2S, Rn_VPR128.2D is b_31=0 & b_30=0 & b_2329=0b1011100 & b_22=1 & b_1021=0b100001011010 & Rd_VPR64.2S & Rd_VPR128 & Rn_VPR128.2D & Zd { TMPQ1 = Rn_VPR128.2D; # simd resize Rd_VPR64.2S = float2float(TMPQ1) (lane size 8 to 4) Rd_VPR64.2S[0,32] = float2float(TMPQ1[0,64]); Rd_VPR64.2S[32,32] = float2float(TMPQ1[64,64]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.88 FCVTXN, FCVTXN2 page C7-2218 line 129651 MATCH x2e216800/mask=xbfbffc00 # CONSTRUCT x6e616800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $float2float@8:8 1:1 &=$copy # SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtxn2/2@8 # AUNIT --inst x6e616800/mask=xfffffc00 --rand sfp --status fail --comment "ext nofpround" # Vector Variant :fcvtxn2 Rd_VPR128.4S, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2329=0b1011100 & b_22=1 & b_1021=0b100001011010 & Rd_VPR128.4S & Rn_VPR128.2D & Rd_VPR128 & Zd { # simd resize TMPD1 = float2float(Rn_VPR128.2D) (lane size 8 to 4) TMPD1[0,32] = float2float(Rn_VPR128.2D[0,64]); TMPD1[32,32] = float2float(Rn_VPR128.2D[64,64]); # simd copy Rd_VPR128.4S element 1:1 = TMPD1 (lane size 8) Rd_VPR128.4S[64,64] = TMPD1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.89 FCVTZS (vector, fixed-point) page C7-2221 line 129809 MATCH x5f00fc00/mask=xff80fc00 # CONSTRUCT x5f40fc00/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 zext:8 =NEON_fcvtzs/2 # AUNIT --inst x5f40fc00/mask=xffc0fc00 --rand dfp --status nopcodeop --comment "nofpround" # Scalar variant when immh=1xxx :fcvtzs Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b010111110 & b_22=1 & b_1015=0b111111 & Imm_shr_imm64 & Rn_FPR64 & Rd_FPR64 & Zd { local tmp1:8 = zext(Imm_shr_imm64); Rd_FPR64 = NEON_fcvtzs(Rn_FPR64, tmp1); } # C7.2.89 FCVTZS (vector, fixed-point) page C7-2221 line 129809 MATCH x5f00fc00/mask=xff80fc00 # CONSTRUCT x5f20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_fcvtzs/2 # AUNIT --inst x5f20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" # Scalar variant when immh=01xx :fcvtzs Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_fcvtzs(Rn_FPR32, Imm_shr_imm32:4); } # C7.2.89 FCVTZS (vector, fixed-point) page C7-2221 line 129809 MATCH x5f00fc00/mask=xff80fc00 # CONSTRUCT x5f10fc00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzs/2 # AUNIT --inst x5f10fc00/mask=xfff0fc00 --rand hfp --status noqemu --comment "nofpround" # Scalar variant when immh=001x :fcvtzs Rd_FPR16, Rn_FPR16, Imm_shr_imm16 is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b111111 & Imm_shr_imm16 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_fcvtzs(Rn_FPR16, Imm_shr_imm16); } # C7.2.89 FCVTZS (vector, fixed-point) page C7-2221 line 129809 MATCH x0f00fc00/mask=xbf80fc00 # CONSTRUCT x4f40fc00/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 zext:8 =NEON_fcvtzs/2@8 # AUNIT --inst x4f40fc00/mask=xffc0fc00 --rand dfp --status nopcodeop --comment "nofpround" # Vector 2D variant when immh=1xxx Q=1 bb=b_22 cc=1 V=VPR128.2D imm=Imm_shr_imm64 :fcvtzs Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_31=0 & b_30=1 & b_2329=0b0011110 & b_22=1 & b_1015=0b111111 & Rd_VPR128.2D & Rn_VPR128.2D & Imm_shr_imm64 & Zd { local tmp1:8 = zext(Imm_shr_imm64); Rd_VPR128.2D = NEON_fcvtzs(Rn_VPR128.2D, tmp1, 8:1); } # C7.2.89 FCVTZS (vector, fixed-point) page C7-2221 line 129809 MATCH x0f00fc00/mask=xbf80fc00 # CONSTRUCT x0f20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_fcvtzs/2@4 # AUNIT --inst x0f20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" # Vector 2S variant when immh=01xx Q=0 bb=b_2122 cc=0b01 V=VPR64.2S imm=Imm_shr_imm32 :fcvtzs Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_31=0 & b_30=0 & b_2329=0b0011110 & b_2122=0b01 & b_1015=0b111111 & Rd_VPR64.2S & Rn_VPR64.2S & Imm_shr_imm32 & Zd { Rd_VPR64.2S = NEON_fcvtzs(Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); } # C7.2.89 FCVTZS (vector, fixed-point) page C7-2221 line 129809 MATCH x0f00fc00/mask=xbf80fc00 # CONSTRUCT x4f20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_fcvtzs/2@4 # AUNIT --inst x4f20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" # Vector 4S variant when immh=01xx Q=1 bb=b_2122 cc=0b01 V=VPR128.4S imm=Imm_shr_imm32 :fcvtzs Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_31=0 & b_30=1 & b_2329=0b0011110 & b_2122=0b01 & b_1015=0b111111 & Rd_VPR128.4S & Rn_VPR128.4S & Imm_shr_imm32 & Zd { Rd_VPR128.4S = NEON_fcvtzs(Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); } # C7.2.89 FCVTZS (vector, fixed-point) page C7-2221 line 129809 MATCH x0f00fc00/mask=xbf80fc00 # CONSTRUCT x0f10fc00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzs/2@2 # AUNIT --inst x0f10fc00/mask=xfff0fc00 --rand hfp --status noqemu --comment "nofpround" # Vector 4H variant when immh=001x Q=0 bb=b_2022 cc=0b001 V=VPR64.4H imm=Imm_shr_imm16 :fcvtzs Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_31=0 & b_30=0 & b_2329=0b0011110 & b_2022=0b001 & b_1015=0b111111 & Rd_VPR64.4H & Rn_VPR64.4H & Imm_shr_imm16 & Zd { Rd_VPR64.4H = NEON_fcvtzs(Rn_VPR64.4H, Imm_shr_imm16, 2:1); } # C7.2.89 FCVTZS (vector, fixed-point) page C7-2221 line 129809 MATCH x0f00fc00/mask=xbf80fc00 # CONSTRUCT x4f10fc00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzs/2@2 # AUNIT --inst x4f10fc00/mask=xfff0fc00 --rand hfp --status noqemu --comment "nofpround" # Vector 8H variant when immh=001x Q=1 bb=b_2022 cc=0b001 V=VPR128.8H imm=Imm_shr_imm16 :fcvtzs Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_31=0 & b_30=1 & b_2329=0b0011110 & b_2022=0b001 & b_1015=0b111111 & Rd_VPR128.8H & Rn_VPR128.8H & Imm_shr_imm16 & Zd { Rd_VPR128.8H = NEON_fcvtzs(Rn_VPR128.8H, Imm_shr_imm16, 2:1); } # C7.2.91 FCVTZS (scalar, fixed-point) page C7-2227 line 130156 MATCH x1e180000/mask=x7f3f0000 # CONSTRUCT x1ed88000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 FBits16 f* =trunc # SMACRO(pseudo) ARG1 ARG2 FBits16 =NEON_fcvtzs/2 # AUNIT --inst x1ed88000/mask=xffff8000 --rand hfp --status noqemu --comment "nofpround" # if sf == '0' && scale<5> == '0' then UnallocatedEncoding(); # Half-precision to 32-bit variant when sf == 0 && type == 11 G=GPR32 V=FPR16 size=2 fbits=FBits16 :fcvtzs Rd_GPR32, Rn_FPR16, FBitsOp is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_1621=0b011000 & b_15=1 & Rd_GPR32 & Rn_FPR16 & FBitsOp & FBits16 & Rd_GPR64 { local tmp1:2 = Rn_FPR16 f* FBits16; Rd_GPR32 = trunc(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.91 FCVTZS (scalar, fixed-point) page C7-2227 line 130156 MATCH x1e180000/mask=x7f3f0000 # CONSTRUCT x9ed80000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f* =trunc # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzs/2 # AUNIT --inst x9ed80000/mask=xffff0000 --rand hfp --status noqemu --comment "nofpround" # Half-precision to 64-bit variant when sf == 1 && type == 11 G=GPR64 V=FPR16 size=2 fbits=FBits16 :fcvtzs Rd_GPR64, Rn_FPR16, FBitsOp is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_1621=0b011000 & Rd_GPR64 & Rn_FPR16 & FBitsOp & FBits16 { local tmp1:2 = Rn_FPR16 f* FBitsOp; Rd_GPR64 = trunc(tmp1); } # C7.2.91 FCVTZS (scalar, fixed-point) page C7-2227 line 130156 MATCH x1e180000/mask=x7f3f0000 # CONSTRUCT x1e188000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 FBits32 f* =trunc # SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_fcvtzs/2 # AUNIT --inst x1e188000/mask=xffff8000 --rand sfp --status fail --comment "nofpround" # Single-precision to 32-bit variant when sf == 0 && type == 00 G=GPR32 V=FPR32 size=4 fbits=FBits32 :fcvtzs Rd_GPR32, Rn_FPR32, FBitsOp is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_1621=0b011000 & b_15=1 & Rd_GPR32 & Rn_FPR32 & FBitsOp & FBits32 & Rd_GPR64 { local tmp1:4 = Rn_FPR32 f* FBits32; Rd_GPR32 = trunc(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.91 FCVTZS (scalar, fixed-point) page C7-2227 line 130156 MATCH x1e180000/mask=x7f3f0000 # CONSTRUCT x9e180000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 FBits32 f* =trunc # SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_fcvtzs/2 # AUNIT --inst x9e180000/mask=xffff0000 --rand sfp --status pass --comment "nofpround" # Single-precision to 64-bit variant when sf == 1 && type == 00 G=GPR64 V=FPR32 size=4 fbits=FBits32 :fcvtzs Rd_GPR64, Rn_FPR32, FBitsOp is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_1621=0b011000 & Rd_GPR64 & Rn_FPR32 & FBitsOp & FBits32 { local tmp1:4 = Rn_FPR32 f* FBits32; Rd_GPR64 = trunc(tmp1); } # C7.2.91 FCVTZS (scalar, fixed-point) page C7-2227 line 130156 MATCH x1e180000/mask=x7f3f0000 # CONSTRUCT x1e588000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 FBits64 f* =trunc # SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_fcvtzs/2 # AUNIT --inst x1e588000/mask=xffff8000 --rand dfp --status fail --comment "nofpround" # Double-precision to 32-bit variant when sf == 0 && type == 01 G=GPR32 V=FPR64 size=8 fbits=FBits64 :fcvtzs Rd_GPR32, Rn_FPR64, FBitsOp is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_1621=0b011000 & b_15=1 & Rd_GPR32 & Rn_FPR64 & FBitsOp & FBits64 & Rd_GPR64 { local tmp1:8 = Rn_FPR64 f* FBits64; Rd_GPR32 = trunc(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.91 FCVTZS (scalar, fixed-point) page C7-2227 line 130156 MATCH x1e180000/mask=x7f3f0000 # CONSTRUCT x9e580000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 FBits64 f* =trunc # SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_fcvtzs/2 # AUNIT --inst x9e580000/mask=xffff0000 --rand dfp --status pass --comment "nofpround" # Double-precision to 64-bit variant when sf == 1 && type == 01 G=GPR64 V=FPR64 size=8 fbits=FBits64 :fcvtzs Rd_GPR64, Rn_FPR64, FBitsOp is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_1621=0b011000 & Rd_GPR64 & Rn_FPR64 & FBitsOp & FBits64 { local tmp1:8 = Rn_FPR64 f* FBits64; Rd_GPR64 = trunc(tmp1); } # C7.2.93 FCVTZU (vector, fixed-point) page C7-2231 line 130422 MATCH x2f00fc00/mask=xbf80fc00 # CONSTRUCT x6f40fc00/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 zext:8 =NEON_fcvtzu/2@8 # AUNIT --inst x6f40fc00/mask=xffc0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fcvtzu Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1f & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local tmp1:8 = zext(Imm_shr_imm64); Rd_VPR128.2D = NEON_fcvtzu(Rn_VPR128.2D, tmp1, 8:1); } # C7.2.93 FCVTZU (vector, fixed-point) page C7-2231 line 130422 MATCH x2f00fc00/mask=xbf80fc00 # CONSTRUCT x2f20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_fcvtzu/2@4 # AUNIT --inst x2f20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fcvtzu Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1f & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fcvtzu(Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); } # C7.2.93 FCVTZU (vector, fixed-point) page C7-2231 line 130422 MATCH x2f00fc00/mask=xbf80fc00 # CONSTRUCT x6f20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_fcvtzu/2@4 # AUNIT --inst x6f20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fcvtzu Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1f & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fcvtzu(Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); } # C7.2.93 FCVTZU (vector, fixed-point) page C7-2231 line 130422 MATCH x2f00fc00/mask=xbf80fc00 # CONSTRUCT x2f10fc00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2@2 # AUNIT --inst x2f10fc00/mask=xfff0fc00 --rand hfp --status noqemu --comment "nofpround" :fcvtzu Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm16 & b_1115=0x1f & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_fcvtzu(Rn_VPR64.4H, Imm_shr_imm16, 2:1); } # C7.2.93 FCVTZU (vector, fixed-point) page C7-2231 line 130422 MATCH x2f00fc00/mask=xbf80fc00 # CONSTRUCT x6f10fc00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2@2 # AUNIT --inst x6f10fc00/mask=xfff0fc00 --rand hfp --status noqemu --comment "nofpround" :fcvtzu Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm16 & b_1115=0x1f & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_fcvtzu(Rn_VPR128.8H, Imm_shr_imm16, 2:1); } # C7.2.93 FCVTZU (vector, fixed-point) page C7-2231 line 130422 MATCH x7f00fc00/mask=xff80fc00 # CONSTRUCT x7f10fc00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 1:2 ARG3 << int2float:2 f* fabs =trunc # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2 # AUNIT --inst x7f10fc00/mask=xfff0fc00 --rand hfp --status noqemu --comment "nofpround" # FCVTZU (vector, fixed-point) Scalar immh=001x :fcvtzu Rd_FPR16, Rn_FPR16, Imm_shr_imm32 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR16 & Rd_FPR16 & Zd { local tmp1:2 = 1:2 << Imm_shr_imm32; local tmp2:2 = int2float(tmp1); local tmp3:2 = Rn_FPR16 f* tmp2; local tmp4:2 = abs(tmp3); Rd_FPR16 = trunc(tmp4); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.93 FCVTZU (vector, fixed-point) page C7-2231 line 130422 MATCH x7f00fc00/mask=xff80fc00 # CONSTRUCT x7f20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 1:4 ARG3:4 << int2float:4 f* fabs =trunc # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2 # AUNIT --inst x7f20fc00/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" # FCVTZU (vector, fixed-point) Scalar immh=01xx :fcvtzu Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR32 & Rd_FPR32 & Zd { local tmp1:4 = 1:4 << Imm_shr_imm32:4; local tmp2:4 = int2float(tmp1); local tmp3:4 = Rn_FPR32 f* tmp2; local tmp4:4 = abs(tmp3); Rd_FPR32 = trunc(tmp4); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.93 FCVTZU (vector, fixed-point) page C7-2231 line 130422 MATCH x7f00fc00/mask=xff80fc00 # CONSTRUCT x7f40fc00/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 1:8 ARG3 zext:8 << int2float:8 f* fabs =trunc # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2 # AUNIT --inst x7f40fc00/mask=xffc0fc00 --rand dfp --status fail --comment "nofpround" # FCVTZU (vector, fixed-point) Scalar immh=1xxx :fcvtzu Rd_FPR64, Rn_FPR64, Imm_shr_imm32 is b_2331=0b011111110 & b_22=1 & b_1015=0b111111 & Imm_shr_imm32 & Rn_FPR64 & Rd_FPR64 & Zd { local tmp1:8 = zext(Imm_shr_imm32); local tmp2:8 = 1:8 << tmp1; local tmp3:8 = int2float(tmp2); local tmp4:8 = Rn_FPR64 f* tmp3; local tmp5:8 = abs(tmp4); Rd_FPR64 = trunc(tmp5); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.95 FCVTZU (scalar, fixed-point) page C7-2237 line 130769 MATCH x1e190000/mask=x7f3f0000 # CONSTRUCT x1ed98000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f* =trunc # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2 # AUNIT --inst x1ed98000/mask=xffff8000 --rand hfp --status noqemu --comment "nofpround" :fcvtzu Rd_GPR32, Rn_FPR16, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=3 & fpOpcode=1 & b_15=1 & FBitsOp & FBits16 & Rn_FPR16 & Rd_GPR32 & Rd_GPR64 { local tmp1:2 = Rn_FPR16 f* FBitsOp; Rd_GPR32 = trunc(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.95 FCVTZU (scalar, fixed-point) page C7-2237 line 130769 MATCH x1e190000/mask=x7f3f0000 # CONSTRUCT x9ed90000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f* =trunc # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2 # AUNIT --inst x9ed90000/mask=xffff0000 --rand hfp --status noqemu --comment "nofpround" :fcvtzu Rd_GPR64, Rn_FPR16, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=3 & fpOpcode=1 & FBitsOp & FBits16 & Rn_FPR16 & Rd_GPR64 { local tmp1:2 = Rn_FPR16 f* FBitsOp; Rd_GPR64 = trunc(tmp1); } # C7.2.95 FCVTZU (scalar, fixed-point) page C7-2237 line 130769 MATCH x1e190000/mask=x7f3f0000 # CONSTRUCT x1e598000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 FBits64 f* =trunc # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fcvtzu/2 # AUNIT --inst x1e598000/mask=xffff8000 --rand dfp --status fail --comment "nofpround" :fcvtzu Rd_GPR32, Rn_FPR64, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=3 & fpOpcode=1 & b_15=1 & FBitsOp & FBits64 & Rn_FPR64 & Rd_GPR32 & Rd_GPR64 { local tmp1:8 = Rn_FPR64 f* FBits64; Rd_GPR32 = trunc(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.95 FCVTZU (scalar, fixed-point) page C7-2237 line 130769 MATCH x1e190000/mask=x7f3f0000 # CONSTRUCT x1e198000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 FBits32 f* =trunc # SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_fcvtzu/2 # AUNIT --inst x1e198000/mask=xffff8000 --rand sfp --status fail --comment "nofpround" :fcvtzu Rd_GPR32, Rn_FPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=3 & fpOpcode=1 & b_15=1 & FBitsOp & FBits32 & Rn_FPR32 & Rd_GPR32 & Rd_GPR64 { local tmp1:4 = Rn_FPR32 f* FBits32; Rd_GPR32 = trunc(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.95 FCVTZU (scalar, fixed-point) page C7-2237 line 130769 MATCH x1e190000/mask=x7f3f0000 # CONSTRUCT x9e590000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 FBits64 f* =trunc # SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_fcvtzu/2 # AUNIT --inst x9e590000/mask=xffff0000 --rand dfp --status fail --comment "nofpround" :fcvtzu Rd_GPR64, Rn_FPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=3 & fpOpcode=1 & FBitsOp & FBits64 & Rn_FPR64 & Rd_GPR64 { local tmp1:8 = Rn_FPR64 f* FBits64; Rd_GPR64 = trunc(tmp1); } # C7.2.95 FCVTZU (scalar, fixed-point) page C7-2237 line 130769 MATCH x1e190000/mask=x7f3f0000 # CONSTRUCT x9e190000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 FBits32 f* =trunc # SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_fcvtzu/2 # AUNIT --inst x9e190000/mask=xffff0000 --rand sfp --status fail --comment "nofpround" :fcvtzu Rd_GPR64, Rn_FPR32, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=3 & fpOpcode=1 & FBitsOp & FBits32 & Rn_FPR32 & Rd_GPR64 { local tmp1:4 = Rn_FPR32 f* FBits32; Rd_GPR64 = trunc(tmp1); } # C7.2.97 FDIV (vector) page C7-2241 line 131035 MATCH x2e20fc00/mask=xbfa0fc00 # CONSTRUCT x6e60fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f/@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2@8 # AUNIT --inst x6e60fc00/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" :fdiv Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1f & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd infix Rd_VPR128.2D = Rn_VPR128.2D f/ Rm_VPR128.2D on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f/ Rm_VPR128.2D[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f/ Rm_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.97 FDIV (vector) page C7-2241 line 131035 MATCH x2e20fc00/mask=xbfa0fc00 # CONSTRUCT x2e20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f/@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2@4 # AUNIT --inst x2e20fc00/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" :fdiv Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1f & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix Rd_VPR64.2S = Rn_VPR64.2S f/ Rm_VPR64.2S on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f/ Rm_VPR64.2S[0,32]; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f/ Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.97 FDIV (vector) page C7-2241 line 131035 MATCH x2e20fc00/mask=xbfa0fc00 # CONSTRUCT x6e20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f/@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2@4 # AUNIT --inst x6e20fc00/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" :fdiv Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1f & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix Rd_VPR128.4S = Rn_VPR128.4S f/ Rm_VPR128.4S on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f/ Rm_VPR128.4S[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f/ Rm_VPR128.4S[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f/ Rm_VPR128.4S[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f/ Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.97 FDIV (vector) page C7-2241 line 131035 MATCH x2e403c00/mask=xbfe0fc00 # CONSTRUCT x2e403c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f/@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2@2 # AUNIT --inst x2e403c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fdiv Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { # simd infix Rd_VPR64.4H = Rn_VPR64.4H f/ Rm_VPR64.4H on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f/ Rm_VPR64.4H[0,16]; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f/ Rm_VPR64.4H[16,16]; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f/ Rm_VPR64.4H[32,16]; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f/ Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.97 FDIV (vector) page C7-2241 line 131035 MATCH x2e403c00/mask=xbfe0fc00 # CONSTRUCT x6e403c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f/@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2@2 # AUNIT --inst x6e403c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fdiv Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { # simd infix Rd_VPR128.8H = Rn_VPR128.8H f/ Rm_VPR128.8H on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f/ Rm_VPR128.8H[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f/ Rm_VPR128.8H[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f/ Rm_VPR128.8H[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f/ Rm_VPR128.8H[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f/ Rm_VPR128.8H[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f/ Rm_VPR128.8H[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f/ Rm_VPR128.8H[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f/ Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.98 FDIV (scalar) page C7-2243 line 131150 MATCH x1e201800/mask=xff20fc00 # CONSTRUCT x1e601800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f/ # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2 # AUNIT --inst x1e601800/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" :fdiv Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x1 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = Rn_FPR64 f/ Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.98 FDIV (scalar) page C7-2243 line 131150 MATCH x1e201800/mask=xff20fc00 # CONSTRUCT x1e201800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f/ # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2 # AUNIT --inst x1e201800/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" :fdiv Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x1 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = Rn_FPR32 f/ Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.98 FDIV (scalar) page C7-2243 line 131150 MATCH x1e201800/mask=xff20fc00 # CONSTRUCT x1ee01800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f/ # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fdiv/2 # AUNIT --inst x1ee01800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" :fdiv Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x1 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = Rn_FPR16 f/ Rm_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.99 FJCVTZS page C7-2245 line 131259 MATCH x1e7e0000/mask=xfffffc00 # CONSTRUCT x1e7e0000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_fjcvtzs/1 # AUNIT --inst x1e7e0000/mask=xfffffc00 --rand dfp --status noqemu --comment "nofpround" :fjcvtzs Rd_GPR32, Rn_FPR64 is b_1031=0b0001111001111110000000 & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 { Rd_GPR32 = trunc(Rn_FPR64); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.100 FMADD page C7-2246 line 131323 MATCH x1f000000/mask=xff208000 # CONSTRUCT x1f400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fmadd/3 # AUNIT --inst x1f400000/mask=xffe08000 --rand dfp --status nopcodeop --comment "nofpround" :fmadd Rd_FPR64, Rn_FPR64, Rm_FPR64, Ra_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=0 & Rm_FPR64 & b_15=0 & Ra_FPR64 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = Ra_FPR64 f+ (Rm_FPR64 f* Rn_FPR64); #NEON_fmadd(Rn_FPR64, Rm_FPR64, Ra_FPR64); } # C7.2.100 FMADD page C7-2246 line 131323 MATCH x1f000000/mask=xff208000 # CONSTRUCT x1f000000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fmadd/3 # AUNIT --inst x1f000000/mask=xffe08000 --rand sfp --status nopcodeop --comment "nofpround" :fmadd Rd_FPR32, Rn_FPR32, Rm_FPR32, Ra_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=0 & Rm_FPR32 & b_15=0 & Ra_FPR32 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = Ra_FPR32 f+ (Rm_FPR32 f* Rn_FPR32); #NEON_fmadd(Rn_FPR32, Rm_FPR32, Ra_FPR32); } # C7.2.100 FMADD page C7-2246 line 131323 MATCH x1f000000/mask=xff208000 # CONSTRUCT x1fc00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fmadd/3 # AUNIT --inst x1fc00000/mask=xffe08000 --rand hfp --status noqemu --comment "nofpround" :fmadd Rd_FPR16, Rn_FPR16, Rm_FPR16, Ra_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=0 & Rm_FPR16 & b_15=0 & Ra_FPR16 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = Ra_FPR16 f+ (Rm_FPR16 f* Rn_FPR16); #NEON_fmadd(Rn_FPR16, Rm_FPR16, Ra_FPR16); } # C7.2.101 FMAX (vector) page C7-2248 line 131451 MATCH x0e20f400/mask=xbfa0fc00 # CONSTRUCT x4e60f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2@8 # AUNIT --inst x4e60f400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fmax Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1e & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_fmax(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.101 FMAX (vector) page C7-2248 line 131451 MATCH x0e20f400/mask=xbfa0fc00 # CONSTRUCT x0e20f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2@4 # AUNIT --inst x0e20f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmax Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1e & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fmax(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.101 FMAX (vector) page C7-2248 line 131451 MATCH x0e20f400/mask=xbfa0fc00 # CONSTRUCT x4e20f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2@4 # AUNIT --inst x4e20f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmax Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1e & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fmax(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.101 FMAX (vector) page C7-2248 line 131451 MATCH x0e403400/mask=xbfe0fc00 # CONSTRUCT x0e403400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2@2 # AUNIT --inst x0e403400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fmax Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { Rd_VPR64.4H = NEON_fmax(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.101 FMAX (vector) page C7-2248 line 131451 MATCH x0e403400/mask=xbfe0fc00 # CONSTRUCT x4e403400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2@2 # AUNIT --inst x4e403400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fmax Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { Rd_VPR128.8H = NEON_fmax(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.102 FMAX (scalar) page C7-2250 line 131581 MATCH x1e204800/mask=xff20fc00 # CONSTRUCT x1e604800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = ext ARG2 ARG3 f>:1 inst_next goto ARG1 ARG3 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2 # AUNIT --inst x1e604800/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fmax Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x4 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd local tmp1:1 = Rn_FPR64 f> Rm_FPR64; if (tmp1) goto inst_next; Rd_FPR64 = Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.102 FMAX (scalar) page C7-2250 line 131581 MATCH x1e204800/mask=xff20fc00 # CONSTRUCT x1e204800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = ext ARG2 ARG3 f>:1 inst_next goto ARG1 ARG3 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2 # AUNIT --inst x1e204800/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmax Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x4 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd local tmp1:1 = Rn_FPR32 f> Rm_FPR32; if (tmp1) goto inst_next; Rd_FPR32 = Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.102 FMAX (scalar) page C7-2250 line 131581 MATCH x1e204800/mask=xff20fc00 # CONSTRUCT x1ee04800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmax/2 # AUNIT --inst x1ee04800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" :fmax Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x4 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd local tmp1:1 = Rn_FPR16 f> Rm_FPR16; if (tmp1) goto inst_next; Rd_FPR16 = Rm_FPR16; zext_zh(Zd);# zero upper 30 bytes of Zd } # C7.2.103 FMAXNM (vector) page C7-2252 line 131688 MATCH x0e20c400/mask=xbfa0fc00 # CONSTRUCT x4e60c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2@8 # AUNIT --inst x4e60c400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fmaxnm Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x18 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_fmaxnm(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.103 FMAXNM (vector) page C7-2252 line 131688 MATCH x0e20c400/mask=xbfa0fc00 # CONSTRUCT x0e20c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2@4 # AUNIT --inst x0e20c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmaxnm Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x18 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fmaxnm(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.103 FMAXNM (vector) page C7-2252 line 131688 MATCH x0e20c400/mask=xbfa0fc00 # CONSTRUCT x4e20c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2@4 # AUNIT --inst x4e20c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmaxnm Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x18 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fmaxnm(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.103 FMAXNM (vector) page C7-2252 line 131688 MATCH x0e400400/mask=xbfe0fc00 # CONSTRUCT x0e400400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2@2 # AUNIT --inst x0e400400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision SIMD 4H when Q = 0 :fmaxnm Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { Rd_VPR64.4H = NEON_fmaxnm(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.103 FMAXNM (vector) page C7-2252 line 131688 MATCH x0e400400/mask=xbfe0fc00 # CONSTRUCT x4e400400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2@2 # AUNIT --inst x4e400400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision SIMD 8H when Q = 1 :fmaxnm Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { Rd_VPR128.8H = NEON_fmaxnm(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.104 FMAXNM (scalar) page C7-2254 line 131821 MATCH x1e206800/mask=xff20fc00 # CONSTRUCT x1e606800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = ext ARG2 ARG3 f>:1 inst_next goto ARG1 ARG3 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2 # AUNIT --inst x1e606800/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fmaxnm Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x6 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd local tmp1:1 = Rn_FPR64 f> Rm_FPR64; if (tmp1) goto inst_next; Rd_FPR64 = Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.104 FMAXNM (scalar) page C7-2254 line 131821 MATCH x1e206800/mask=xff20fc00 # CONSTRUCT x1e206800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = ext ARG2 ARG3 f>:1 inst_next goto ARG1 ARG3 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2 # AUNIT --inst x1e206800/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmaxnm Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x6 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd local tmp1:1 = Rn_FPR32 f> Rm_FPR32; if (tmp1) goto inst_next; Rd_FPR32 = Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.104 FMAXNM (scalar) page C7-2254 line 131821 MATCH x1e206800/mask=xff20fc00 # CONSTRUCT x1ee06800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = ext ARG2 ARG3 f>:1 inst_next goto ARG1 ARG3 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnm/2 # AUNIT --inst x1ee06800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" :fmaxnm Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x6 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd local tmp1:1 = Rn_FPR16 f> Rm_FPR16; if (tmp1) goto inst_next; Rd_FPR16 = Rm_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.105 FMAXNMP (scalar) page C7-2256 line 131930 MATCH x7e30c800/mask=xffbffc00 # CONSTRUCT x7e70c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxnmp/1@8 # AUNIT --inst x7e70c800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" :fmaxnmp Rd_FPR64, Rn_VPR128.2D is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x38 & b_1216=0xc & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd { Rd_FPR64 = NEON_fmaxnmp(Rn_VPR128.2D, 8:1); } # C7.2.105 FMAXNMP (scalar) page C7-2256 line 131930 MATCH x7e30c800/mask=xffbffc00 # CONSTRUCT x7e30c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxnmp/1@4 # AUNIT --inst x7e30c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" :fmaxnmp Rd_FPR32, Rn_VPR64.2S is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x18 & b_1216=0xc & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd { Rd_FPR32 = NEON_fmaxnmp(Rn_VPR64.2S, 4:1); } # C7.2.105 FMAXNMP (scalar) page C7-2256 line 131930 MATCH x5e30c800/mask=xffbffc00 # CONSTRUCT x5e30c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 Rn_FPR32 =NEON_fmaxnmp/1@2 # AUNIT --inst x5e30c800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant :fmaxnmp Rd_FPR16, vRn_VPR128^".2H" is b_1031=0b0101111000110000110010 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd { Rd_FPR16 = NEON_fmaxnmp(Rn_FPR32, 2:1); } # C7.2.106 FMAXNMP (vector) page C7-2258 line 132036 MATCH x2e20c400/mask=xbfa0fc00 # CONSTRUCT x6e60c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnmp/2@8 # AUNIT --inst x6e60c400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fmaxnmp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x18 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_fmaxnmp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.106 FMAXNMP (vector) page C7-2258 line 132036 MATCH x2e20c400/mask=xbfa0fc00 # CONSTRUCT x2e20c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnmp/2@4 # AUNIT --inst x2e20c400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fmaxnmp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x18 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fmaxnmp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.106 FMAXNMP (vector) page C7-2258 line 132036 MATCH x2e20c400/mask=xbfa0fc00 # CONSTRUCT x6e20c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnmp/2@4 # AUNIT --inst x6e20c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmaxnmp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x18 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fmaxnmp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.106 FMAXNMP (vector) page C7-2258 line 132036 MATCH x2e400400/mask=xbfe0fc00 # CONSTRUCT x2e400400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnmp/2@2 # AUNIT --inst x2e400400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fmaxnmp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { Rd_VPR64.4H = NEON_fmaxnmp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.106 FMAXNMP (vector) page C7-2258 line 132036 MATCH x2e400400/mask=xbfe0fc00 # CONSTRUCT x6e400400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxnmp/2@2 # AUNIT --inst x6e400400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fmaxnmp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { Rd_VPR128.8H = NEON_fmaxnmp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.107 FMAXNMV page C7-2260 line 132171 MATCH x2e30c800/mask=xbfbffc00 # CONSTRUCT x6e30c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxnmv/1@4 # AUNIT --inst x6e30c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" :fmaxnmv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xc & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { Rd_FPR32 = NEON_fmaxnmv(Rn_VPR128.4S, 4:1); } # C7.2.107 FMAXNMV page C7-2260 line 132171 MATCH x0e30c800/mask=xbffffc00 # CONSTRUCT x0e30c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxnmv/1@2 # AUNIT --inst x0e30c800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fmaxnmv Rd_FPR16, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111000110000110010 & Rd_FPR16 & Rn_VPR64.4H & Zd { Rd_FPR16 = NEON_fmaxnmv(Rn_VPR64.4H, 2:1); } # C7.2.107 FMAXNMV page C7-2260 line 132171 MATCH x0e30c800/mask=xbffffc00 # CONSTRUCT x4e30c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxnmv/1@2 # AUNIT --inst x4e30c800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fmaxnmv Rd_FPR16, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111000110000110010 & Rd_FPR16 & Rn_VPR128.8H & Zd { Rd_FPR16 = NEON_fmaxnmv(Rn_VPR128.8H, 2:1); } # C7.2.108 FMAXP (scalar) page C7-2262 line 132280 MATCH x7e30f800/mask=xffbffc00 # CONSTRUCT x7e70f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxnmv/1@8 # AUNIT --inst x7e70f800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" :fmaxp Rd_FPR64, Rn_VPR128.2D is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x38 & b_1216=0xf & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd { Rd_FPR64 = NEON_fmaxnmv(Rn_VPR128.2D, 8:1); } # C7.2.108 FMAXP (scalar) page C7-2262 line 132280 MATCH x7e30f800/mask=xffbffc00 # CONSTRUCT x7e30f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxp/1@4 # AUNIT --inst x7e30f800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" :fmaxp Rd_FPR32, Rn_VPR64.2S is b_3031=1 & u=1 & b_2428=0x1e & b_23=0 & b_1722=0x18 & b_1216=0xf & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd { Rd_FPR32 = NEON_fmaxp(Rn_VPR64.2S, 4:1); } # C7.2.108 FMAXP (scalar) page C7-2262 line 132280 MATCH x5e30f800/mask=xffbffc00 # CONSTRUCT x5e30f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 Rn_FPR32 =NEON_fmaxp/1@2 # AUNIT --inst x5e30f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant :fmaxp Rd_FPR16, vRn_VPR128^".2H" is b_1031=0b0101111000110000111110 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd { Rd_FPR16 = NEON_fmaxp(Rn_FPR32, 2:1); } # C7.2.109 FMAXP (vector) page C7-2264 line 132387 MATCH x2e20f400/mask=xbfa0fc00 # CONSTRUCT x6e60f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxp/2@8 # AUNIT --inst x6e60f400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fmaxp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1e & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_fmaxp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.109 FMAXP (vector) page C7-2264 line 132387 MATCH x2e20f400/mask=xbfa0fc00 # CONSTRUCT x2e20f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxp/2@4 # AUNIT --inst x2e20f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmaxp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1e & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fmaxp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.109 FMAXP (vector) page C7-2264 line 132387 MATCH x2e20f400/mask=xbfa0fc00 # CONSTRUCT x6e20f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxp/2@4 # AUNIT --inst x6e20f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmaxp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1e & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fmaxp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.109 FMAXP (vector) page C7-2264 line 132387 MATCH x2e403400/mask=xbfe0fc00 # CONSTRUCT x2e403400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxp/2@2 # AUNIT --inst x2e403400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fmaxp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { Rd_VPR64.4H = NEON_fmaxp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.109 FMAXP (vector) page C7-2264 line 132387 MATCH x2e403400/mask=xbfe0fc00 # CONSTRUCT x6e403400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmaxp/2@2 # AUNIT --inst x6e403400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fmaxp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { Rd_VPR128.8H = NEON_fmaxp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.110 FMAXV page C7-2266 line 132519 MATCH x2e30f800/mask=xbfbffc00 # CONSTRUCT x6e30f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxv/1@4 # AUNIT --inst x6e30f800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" :fmaxv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xf & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { Rd_FPR32 = NEON_fmaxv(Rn_VPR128.4S, 4:1); } # C7.2.110 FMAXV page C7-2266 line 132519 MATCH x0e30f800/mask=xbffffc00 # CONSTRUCT x0e30f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxv/1@2 # AUNIT --inst x0e30f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fmaxv Rd_FPR16, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111000110000111110 & Rd_FPR16 & Rn_VPR64.4H & Zd { Rd_FPR16 = NEON_fmaxv(Rn_VPR64.4H, 2:1); } # C7.2.110 FMAXV page C7-2266 line 132519 MATCH x0e30f800/mask=xbffffc00 # CONSTRUCT x4e30f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fmaxv/1@2 # AUNIT --inst x4e30f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fmaxv Rd_FPR16, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111000110000111110 & Rd_FPR16 & Rn_VPR128.8H & Zd { Rd_FPR16 = NEON_fmaxv(Rn_VPR128.8H, 2:1); } # C7.2.111 FMIN (vector) page C7-2268 line 132628 MATCH x0ea0f400/mask=xbfa0fc00 # CONSTRUCT x4ee0f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2@8 # AUNIT --inst x4ee0f400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fmin Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1e & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_fmin(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.111 FMIN (vector) page C7-2268 line 132628 MATCH x0ea0f400/mask=xbfa0fc00 # CONSTRUCT x0ea0f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2@4 # AUNIT --inst x0ea0f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmin Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1e & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fmin(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.111 FMIN (vector) page C7-2268 line 132628 MATCH x0ea0f400/mask=xbfa0fc00 # CONSTRUCT x4ea0f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2@4 # AUNIT --inst x4ea0f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmin Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1e & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fmin(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.111 FMIN (vector) page C7-2268 line 132628 MATCH x0ec03400/mask=xbfe0fc00 # CONSTRUCT x0ec03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2@2 # AUNIT --inst x0ec03400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fmin Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { Rd_VPR64.4H = NEON_fmin(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.111 FMIN (vector) page C7-2268 line 132628 MATCH x0ec03400/mask=xbfe0fc00 # CONSTRUCT x4ec03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2@2 # AUNIT --inst x4ec03400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fmin Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { Rd_VPR128.8H = NEON_fmin(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.112 FMIN (scalar) page C7-2270 line 132758 MATCH x1e205800/mask=xff20fc00 # CONSTRUCT x1e605800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2 # AUNIT --inst x1e605800/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fmin Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x5 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd local tmp1:1 = Rn_FPR64 f<= Rm_FPR64; if (tmp1) goto inst_next; Rd_FPR64 = Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.112 FMIN (scalar) page C7-2270 line 132758 MATCH x1e205800/mask=xff20fc00 # CONSTRUCT x1e205800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2 # AUNIT --inst x1e205800/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmin Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x5 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd local tmp1:1 = Rn_FPR32 f<= Rm_FPR32; if (tmp1) goto inst_next; Rd_FPR32 = Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.112 FMIN (scalar) page C7-2270 line 132758 MATCH x1e205800/mask=xff20fc00 # CONSTRUCT x1ee05800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmin/2 # AUNIT --inst x1ee05800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" :fmin Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x5 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd local tmp1:1 = Rn_FPR16 f<= Rm_FPR16; if (tmp1) goto inst_next; Rd_FPR16 = Rm_FPR16; zext_zh(Zd);# zero upper 30 bytes of Zd } # C7.2.113 FMINNM (vector) page C7-2272 line 132865 MATCH x0ea0c400/mask=xbfa0fc00 # CONSTRUCT x4ee0c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2@8 # AUNIT --inst x4ee0c400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fminnm Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x18 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_fminnm(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.113 FMINNM (vector) page C7-2272 line 132865 MATCH x0ea0c400/mask=xbfa0fc00 # CONSTRUCT x0ea0c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2@4 # AUNIT --inst x0ea0c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fminnm Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x18 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fminnm(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.113 FMINNM (vector) page C7-2272 line 132865 MATCH x0ea0c400/mask=xbfa0fc00 # CONSTRUCT x4ea0c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2@4 # AUNIT --inst x4ea0c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fminnm Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x18 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fminnm(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.113 FMINNM (vector) page C7-2272 line 132865 MATCH x0ec00400/mask=xbfe0fc00 # CONSTRUCT x0ec00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2@2 # AUNIT --inst x0ec00400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fminnm Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { Rd_VPR64.4H = NEON_fminnm(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.113 FMINNM (vector) page C7-2272 line 132865 MATCH x0ec00400/mask=xbfe0fc00 # CONSTRUCT x4ec00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2@2 # AUNIT --inst x4ec00400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fminnm Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { Rd_VPR128.8H = NEON_fminnm(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.114 FMINNM (scalar) page C7-2274 line 132998 MATCH x1e207800/mask=xff20fc00 # CONSTRUCT x1e607800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2 # AUNIT --inst x1e607800/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fminnm Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x7 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_fminnm(Rn_FPR64, Rm_FPR64); } # C7.2.114 FMINNM (scalar) page C7-2274 line 132998 MATCH x1e207800/mask=xff20fc00 # CONSTRUCT x1e207800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2 # AUNIT --inst x1e207800/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fminnm Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x7 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_fminnm(Rn_FPR32, Rm_FPR32); } # C7.2.114 FMINNM (scalar) page C7-2274 line 132998 MATCH x1e207800/mask=xff20fc00 # CONSTRUCT x1ee07800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnm/2 # AUNIT --inst x1ee07800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" :fminnm Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x7 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_fminnm(Rn_FPR16, Rm_FPR16); } # C7.2.115 FMINNMP (scalar) page C7-2276 line 133108 MATCH x7eb0c800/mask=xffbffc00 # CONSTRUCT x7ef0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fminnmp/1@8 # AUNIT --inst x7ef0c800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" :fminnmp Rd_FPR64, Rn_VPR128.2D is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x38 & b_1216=0xc & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd { Rd_FPR64 = NEON_fminnmp(Rn_VPR128.2D, 8:1); } # C7.2.115 FMINNMP (scalar) page C7-2276 line 133108 MATCH x7eb0c800/mask=xffbffc00 # CONSTRUCT x7eb0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fminnmp/1@4 # AUNIT --inst x7eb0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" :fminnmp Rd_FPR32, Rn_VPR64.2S is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x18 & b_1216=0xc & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd { Rd_FPR32 = NEON_fminnmp(Rn_VPR64.2S, 4:1); } # C7.2.115 FMINNMP (scalar) page C7-2276 line 133108 MATCH x5eb0c800/mask=xffbffc00 # CONSTRUCT x5eb0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 Rn_FPR32 =NEON_fminnmp/1@2 # AUNIT --inst x5eb0c800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant :fminnmp Rd_FPR16, vRn_VPR128^".2H" is b_1031=0b0101111010110000110010 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd { Rd_FPR16 = NEON_fminnmp(Rn_FPR32, 2:1); } # C7.2.116 FMINNMP (vector) page C7-2278 line 133214 MATCH x2ea0c400/mask=xbfa0fc00 # CONSTRUCT x6ee0c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnmp/2@8 # AUNIT --inst x6ee0c400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fminnmp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x18 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_fminnmp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.116 FMINNMP (vector) page C7-2278 line 133214 MATCH x2ea0c400/mask=xbfa0fc00 # CONSTRUCT x2ea0c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnmp/2@4 # AUNIT --inst x2ea0c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fminnmp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x18 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fminnmp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.116 FMINNMP (vector) page C7-2278 line 133214 MATCH x2ea0c400/mask=xbfa0fc00 # CONSTRUCT x6ea0c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnmp/2@4 # AUNIT --inst x6ea0c400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fminnmp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x18 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fminnmp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.116 FMINNMP (vector) page C7-2278 line 133214 MATCH x2ec00400/mask=xbfe0fc00 # CONSTRUCT x2ec00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnmp/2@2 # AUNIT --inst x2ec00400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fminnmp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b000001 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { Rd_VPR64.4H = NEON_fminnmp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.116 FMINNMP (vector) page C7-2278 line 133214 MATCH x2ec00400/mask=xbfe0fc00 # CONSTRUCT x6ec00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminnmp/2@2 # AUNIT --inst x6ec00400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fminnmp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b000001 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { Rd_VPR128.8H = NEON_fminnmp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.117 FMINNMV page C7-2280 line 133349 MATCH x2eb0c800/mask=xbfbffc00 # CONSTRUCT x6eb0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fminnmv/1@4 # AUNIT --inst x6eb0c800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" :fminnmv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0xc & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { Rd_FPR32 = NEON_fminnmv(Rn_VPR128.4S, 4:1); } # C7.2.117 FMINNMV page C7-2280 line 133349 MATCH x0eb0c800/mask=xbffffc00 # CONSTRUCT x0eb0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fminnmv/1@2 # AUNIT --inst x0eb0c800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fminnmv Rd_FPR16, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111010110000110010 & Rd_FPR16 & Rn_VPR64.4H & Zd { Rd_FPR16 = NEON_fminnmv(Rn_VPR64.4H, 2:1); } # C7.2.117 FMINNMV page C7-2280 line 133349 MATCH x0eb0c800/mask=xbffffc00 # CONSTRUCT x4eb0c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fminnmv/1@2 # AUNIT --inst x4eb0c800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fminnmv Rd_FPR16, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111010110000110010 & Rd_FPR16 & Rn_VPR128.8H & Zd { Rd_FPR16 = NEON_fminnmv(Rn_VPR128.8H, 2:1); } # C7.2.118 FMINP (scalar) page C7-2282 line 133458 MATCH x7eb0f800/mask=xffbffc00 # CONSTRUCT x7ef0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fminp/1@8 # AUNIT --inst x7ef0f800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" :fminp Rd_FPR64, Rn_VPR128.2D is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x38 & b_1216=0xf & b_1011=2 & Rn_VPR128.2D & Rd_FPR64 & Zd { Rd_FPR64 = NEON_fminp(Rn_VPR128.2D, 8:1); } # C7.2.118 FMINP (scalar) page C7-2282 line 133458 MATCH x7eb0f800/mask=xffbffc00 # CONSTRUCT x7eb0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fminp/1@4 # AUNIT --inst x7eb0f800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" :fminp Rd_FPR32, Rn_VPR64.2S is b_3031=1 & u=1 & b_2428=0x1e & b_23=1 & b_1722=0x18 & b_1216=0xf & b_1011=2 & Rn_VPR64.2S & Rd_FPR32 & Zd { Rd_FPR32 = NEON_fminp(Rn_VPR64.2S, 4:1); } # C7.2.118 FMINP (scalar) page C7-2282 line 133458 MATCH x5eb0f800/mask=xffbffc00 # CONSTRUCT x5eb0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 Rn_FPR32 =NEON_fminp/1@2 # AUNIT --inst x5eb0f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant :fminp Rd_FPR16, vRn_VPR128^".2H" is b_1031=0b0101111010110000111110 & Rd_FPR16 & vRn_VPR128 & Rn_FPR32 & Zd { Rd_FPR16 = NEON_fminp(Rn_FPR32, 2:1); } # C7.2.119 FMINP (vector) page C7-2284 line 133565 MATCH x2ea0f400/mask=xbfa0fc00 # CONSTRUCT x6ee0f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminp/2@8 # AUNIT --inst x6ee0f400/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fminp Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1e & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_fminp(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.119 FMINP (vector) page C7-2284 line 133565 MATCH x2ea0f400/mask=xbfa0fc00 # CONSTRUCT x2ea0f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminp/2@4 # AUNIT --inst x2ea0f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fminp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1e & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fminp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.119 FMINP (vector) page C7-2284 line 133565 MATCH x2ea0f400/mask=xbfa0fc00 # CONSTRUCT x6ea0f400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminp/2@4 # AUNIT --inst x6ea0f400/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fminp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1e & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fminp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.119 FMINP (vector) page C7-2284 line 133565 MATCH x2ec03400/mask=xbfe0fc00 # CONSTRUCT x2ec03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminp/2@2 # AUNIT --inst x2ec03400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fminp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110110 & b_1015=0b001101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { Rd_VPR64.4H = NEON_fminp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.119 FMINP (vector) page C7-2284 line 133565 MATCH x2ec03400/mask=xbfe0fc00 # CONSTRUCT x6ec03400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fminp/2@2 # AUNIT --inst x6ec03400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fminp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110110 & b_1015=0b001101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { Rd_VPR128.8H = NEON_fminp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.120 FMINV page C7-2286 line 133697 MATCH x2eb0f800/mask=xbfbffc00 # CONSTRUCT x6eb0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fminv/1@4 # AUNIT --inst x6eb0f800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" :fminv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0xf & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { Rd_FPR32 = NEON_fminv(Rn_VPR128.4S, 4:1); } # C7.2.120 FMINV page C7-2286 line 133697 MATCH x0eb0f800/mask=xbffffc00 # CONSTRUCT x0eb0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fminv/1@2 # AUNIT --inst x0eb0f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fminv Rd_FPR16, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111010110000111110 & Rd_FPR16 & Rn_VPR64.4H & Zd { Rd_FPR16 = NEON_fminv(Rn_VPR64.4H, 2:1); } # C7.2.120 FMINV page C7-2286 line 133697 MATCH x0eb0f800/mask=xbffffc00 # CONSTRUCT x4eb0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fminv/1@2 # AUNIT --inst x4eb0f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fminv Rd_FPR16, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111010110000111110 & Rd_FPR16 & Rn_VPR128.8H & Zd { Rd_FPR16 = NEON_fminv(Rn_VPR128.8H, 2:1); } # C7.2.121 FMLA (by element) page C7-2288 line 133806 MATCH x0f801000/mask=xbf80f400 # CONSTRUCT x4fc01000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f* &=$f+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@8 # AUNIT --inst x4fc01000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" :fmla Rd_VPR128.2D, Rn_VPR128.2D, Re_VPR128.D.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=3 & b_2121=0 & Re_VPR128.D.vIndex & vIndex & Re_VPR128.D & b_1215=0x1 & b_1010=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd element Re_VPR128.D[vIndex] lane size 8 local tmp1:8 = Re_VPR128.D.vIndex; # simd infix TMPQ1 = Rn_VPR128.2D f* tmp1 on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] f* tmp1; TMPQ1[64,64] = Rn_VPR128.2D[64,64] f* tmp1; # simd infix Rd_VPR128.2D = Rd_VPR128.2D f+ TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] f+ TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] f+ TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.121 FMLA (by element) page C7-2288 line 133806 MATCH x0f801000/mask=xbf80f400 # CONSTRUCT x0f801000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f* &=$f+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@4 # AUNIT --inst x0f801000/mask=xffc0f400 --rand sfp --status pass --comment "nofpround" :fmla Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x1 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix TMPD1 = Rn_VPR64.2S f* tmp1 on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] f* tmp1; TMPD1[32,32] = Rn_VPR64.2S[32,32] f* tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S f+ TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] f+ TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] f+ TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.121 FMLA (by element) page C7-2288 line 133806 MATCH x0f801000/mask=xbf80f400 # CONSTRUCT x4f801000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f* &=$f+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@4 # AUNIT --inst x4f801000/mask=xffc0f400 --rand sfp --status fail --comment "nofpround" :fmla Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x1 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix TMPQ1 = Rn_VPR128.4S f* tmp1 on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] f* tmp1; TMPQ1[32,32] = Rn_VPR128.4S[32,32] f* tmp1; TMPQ1[64,32] = Rn_VPR128.4S[64,32] f* tmp1; TMPQ1[96,32] = Rn_VPR128.4S[96,32] f* tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S f+ TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] f+ TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] f+ TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] f+ TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] f+ TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.121 FMLA (by element) page C7-2288 line 133806 MATCH x5f001000/mask=xffc0f400 # CONSTRUCT x5f001000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f* &=f+ # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@2 # AUNIT --inst x5f001000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" # Scalar half-precision variant :fmla Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2231=0b0101111100 & b_1215=0b0001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; local tmp2:2 = Rn_FPR16 f* tmp1; Rd_FPR16 = Rd_FPR16 f+ tmp2; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.121 FMLA (by element) page C7-2288 line 133806 MATCH x5f801000/mask=xff80f400 # CONSTRUCT x5f801000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f* &=f+ # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@4 # AUNIT --inst x5f801000/mask=xffc0f400 --rand sfp --status pass --comment "nofpround" # Scalar, single-precision and double-precision variant, sz=0 :fmla Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex is b_2331=0b010111111 & b_22=0 & b_1215=0b0001 & b_10=0 & Re_VPR128.S & vIndex & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; local tmp2:4 = Rn_FPR32 f* tmp1; Rd_FPR32 = Rd_FPR32 f+ tmp2; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.121 FMLA (by element) page C7-2288 line 133806 MATCH x5f801000/mask=xff80f400 # CONSTRUCT x5fc01000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f* &=f+ # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@8 # AUNIT --inst x5fc01000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" # Scalar, single-precision and double-precision variant, sz=1 :fmla Rd_FPR64, Rn_FPR64, Re_VPR128.D.vIndex is b_2331=0b010111111 & b_22=1 & b_21=0 & b_1215=0b0001 & b_10=0 & Re_VPR128.D & vIndex & Rd_FPR64 & Rn_FPR64 & Re_VPR128.D.vIndex & Zd { # simd element Re_VPR128.D[vIndex] lane size 8 local tmp1:8 = Re_VPR128.D.vIndex; local tmp2:8 = Rn_FPR64 f* tmp1; Rd_FPR64 = Rd_FPR64 f+ tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.121 FMLA (by element) page C7-2288 line 133806 MATCH x0f001000/mask=xbfc0f400 # CONSTRUCT x0f001000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f* &=$f+$@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@2 # AUNIT --inst x0f001000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" # Vector, half-precision variant SIMD 4H when Q = 0 :fmla Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2229=0b00111100 & b_1215=0b0001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128Lo.H.vIndexHLM & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPD1 = Rn_VPR64.4H f* tmp1 on lane size 2 TMPD1[0,16] = Rn_VPR64.4H[0,16] f* tmp1; TMPD1[16,16] = Rn_VPR64.4H[16,16] f* tmp1; TMPD1[32,16] = Rn_VPR64.4H[32,16] f* tmp1; TMPD1[48,16] = Rn_VPR64.4H[48,16] f* tmp1; # simd infix Rd_VPR64.4H = Rd_VPR64.4H f+ TMPD1 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] f+ TMPD1[0,16]; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] f+ TMPD1[16,16]; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] f+ TMPD1[32,16]; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] f+ TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.121 FMLA (by element) page C7-2288 line 133806 MATCH x0f001000/mask=xbfc0f400 # CONSTRUCT x4f001000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f* &=$f+$@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@2 # AUNIT --inst x4f001000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" # Vector, half-precision variant SIMD 8H when Q = 1 :fmla Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2229=0b00111100 & b_1215=0b0001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128Lo.H.vIndexHLM & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPQ1 = Rn_VPR128.8H f* tmp1 on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] f* tmp1; TMPQ1[16,16] = Rn_VPR128.8H[16,16] f* tmp1; TMPQ1[32,16] = Rn_VPR128.8H[32,16] f* tmp1; TMPQ1[48,16] = Rn_VPR128.8H[48,16] f* tmp1; TMPQ1[64,16] = Rn_VPR128.8H[64,16] f* tmp1; TMPQ1[80,16] = Rn_VPR128.8H[80,16] f* tmp1; TMPQ1[96,16] = Rn_VPR128.8H[96,16] f* tmp1; TMPQ1[112,16] = Rn_VPR128.8H[112,16] f* tmp1; # simd infix Rd_VPR128.8H = Rd_VPR128.8H f+ TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] f+ TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] f+ TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] f+ TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] f+ TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] f+ TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] f+ TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] f+ TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] f+ TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.122 FMLA (vector) page C7-2292 line 134046 MATCH x0e20cc00/mask=xbfa0fc00 # CONSTRUCT x4e60cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f*@8 &=$f+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@8 # AUNIT --inst x4e60cc00/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" :fmla Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.2D & b_1115=0x19 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd infix TMPQ1 = Rn_VPR128.2D f* Rm_VPR128.2D on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] f* Rm_VPR128.2D[0,64]; TMPQ1[64,64] = Rn_VPR128.2D[64,64] f* Rm_VPR128.2D[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D f+ TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] f+ TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] f+ TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.122 FMLA (vector) page C7-2292 line 134046 MATCH x0e20cc00/mask=xbfa0fc00 # CONSTRUCT x0e20cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@4 # AUNIT --inst x0e20cc00/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" :fmla Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.2S & b_1115=0x19 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Re_VPR128 & Zd { # simd infix TMPD1 = Rn_VPR64.2S f* Rm_VPR64.2S on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] f* Rm_VPR64.2S[0,32]; TMPD1[32,32] = Rn_VPR64.2S[32,32] f* Rm_VPR64.2S[32,32]; # simd infix Rd_VPR64.2S = Rd_VPR64.2S f+ TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] f+ TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] f+ TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.122 FMLA (vector) page C7-2292 line 134046 MATCH x0e20cc00/mask=xbfa0fc00 # CONSTRUCT x4e20cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@4 # AUNIT --inst x4e20cc00/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" :fmla Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.4S & b_1115=0x19 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix TMPQ1 = Rn_VPR128.4S f* Rm_VPR128.4S on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] f* Rm_VPR128.4S[0,32]; TMPQ1[32,32] = Rn_VPR128.4S[32,32] f* Rm_VPR128.4S[32,32]; TMPQ1[64,32] = Rn_VPR128.4S[64,32] f* Rm_VPR128.4S[64,32]; TMPQ1[96,32] = Rn_VPR128.4S[96,32] f* Rm_VPR128.4S[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S f+ TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] f+ TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] f+ TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] f+ TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] f+ TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.122 FMLA (vector) page C7-2292 line 134046 MATCH x0e400c00/mask=xbfe0fc00 # CONSTRUCT x0e400c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@2 # AUNIT --inst x0e400c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fmla Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { # simd infix TMPD1 = Rn_VPR64.4H f* Rm_VPR64.4H on lane size 4 TMPD1[0,32] = Rn_VPR64.4H[0,32] f* Rm_VPR64.4H[0,32]; TMPD1[32,32] = Rn_VPR64.4H[32,32] f* Rm_VPR64.4H[32,32]; # simd infix Rd_VPR64.4H = Rd_VPR64.4H f+ TMPD1 on lane size 4 Rd_VPR64.4H[0,32] = Rd_VPR64.4H[0,32] f+ TMPD1[0,32]; Rd_VPR64.4H[32,32] = Rd_VPR64.4H[32,32] f+ TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.122 FMLA (vector) page C7-2292 line 134046 MATCH x0e400c00/mask=xbfe0fc00 # CONSTRUCT x4e400c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmla/3@2 # AUNIT --inst x4e400c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fmla Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.8H f* Rm_VPR128.8H on lane size 4 TMPQ1[0,32] = Rn_VPR128.8H[0,32] f* Rm_VPR128.8H[0,32]; TMPQ1[32,32] = Rn_VPR128.8H[32,32] f* Rm_VPR128.8H[32,32]; TMPQ1[64,32] = Rn_VPR128.8H[64,32] f* Rm_VPR128.8H[64,32]; TMPQ1[96,32] = Rn_VPR128.8H[96,32] f* Rm_VPR128.8H[96,32]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H f+ TMPQ1 on lane size 4 Rd_VPR128.8H[0,32] = Rd_VPR128.8H[0,32] f+ TMPQ1[0,32]; Rd_VPR128.8H[32,32] = Rd_VPR128.8H[32,32] f+ TMPQ1[32,32]; Rd_VPR128.8H[64,32] = Rd_VPR128.8H[64,32] f+ TMPQ1[64,32]; Rd_VPR128.8H[96,32] = Rd_VPR128.8H[96,32] f+ TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.123 FMLAL, FMLAL2 (by element) page C7-2294 line 134165 MATCH x0f800000/mask=xbfc0f400 # CONSTRUCT x0f800000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[0]:4 ARG3 $f* $float2float@2:8 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal/3@4 # AUNIT --inst x0f800000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 2S when Q = 0 :fmlal Rd_VPR64.2S, vRn_VPR64^".2H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2329=0b0011111 & b_22=0 & b_1215=0b0000 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & Re_VPR128Lo.H.vIndexHLM & Zd { TMPS1 = Rn_VPR64[0,32]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPS2 = TMPS1 f* tmp2 on lane size 2 TMPS2[0,16] = TMPS1[0,16] f* tmp2; TMPS2[16,16] = TMPS1[16,16] f* tmp2; # simd resize TMPD3 = float2float(TMPS2) (lane size 2 to 4) TMPD3[0,32] = float2float(TMPS2[0,16]); TMPD3[32,32] = float2float(TMPS2[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD3 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD3[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD3[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.123 FMLAL, FMLAL2 (by element) page C7-2294 line 134165 MATCH x0f800000/mask=xbfc0f400 # CONSTRUCT x4f800000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[0]:8 ARG3 $f* $float2float@2:16 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal/3@2 # AUNIT --inst x4f800000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 4S when Q = 1 :fmlal Rd_VPR128.4S, vRn_VPR128^".4H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=0 & b_1215=0b0000 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Re_VPR128Lo.H.vIndexHLM & Zd { TMPD1 = Rn_VPR128[0,64]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPD2 = TMPD1 f* tmp2 on lane size 2 TMPD2[0,16] = TMPD1[0,16] f* tmp2; TMPD2[16,16] = TMPD1[16,16] f* tmp2; TMPD2[32,16] = TMPD1[32,16] f* tmp2; TMPD2[48,16] = TMPD1[48,16] f* tmp2; # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) TMPQ3[0,32] = float2float(TMPD2[0,16]); TMPQ3[32,32] = float2float(TMPD2[16,16]); TMPQ3[64,32] = float2float(TMPD2[32,16]); TMPQ3[96,32] = float2float(TMPD2[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.123 FMLAL, FMLAL2 (by element) page C7-2294 line 134165 MATCH x2f808000/mask=xbfc0f400 # CONSTRUCT x2f808000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:4 ARG3 $f* $float2float@2:8 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal2/3@2 # AUNIT --inst x2f808000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 2S when Q = 0 :fmlal2 Rd_VPR64.2S, vRn_VPR64^".2H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2329=0b1011111 & b_22=0 & b_1215=0b1000 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & Re_VPR128Lo.H.vIndexHLM & Zd { TMPS1 = Rn_VPR64[32,32]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPS2 = TMPS1 f* tmp2 on lane size 2 TMPS2[0,16] = TMPS1[0,16] f* tmp2; TMPS2[16,16] = TMPS1[16,16] f* tmp2; # simd resize TMPD3 = float2float(TMPS2) (lane size 2 to 4) TMPD3[0,32] = float2float(TMPS2[0,16]); TMPD3[32,32] = float2float(TMPS2[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD3 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD3[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD3[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.123 FMLAL, FMLAL2 (by element) page C7-2294 line 134165 MATCH x2f808000/mask=xbfc0f400 # CONSTRUCT x6f808000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 ARG3 $f* $float2float@2:16 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal2/3@2 # AUNIT --inst x6f808000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 4S when Q = 1 :fmlal2 Rd_VPR128.4S, vRn_VPR128^".4H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2329=0b1011111 & b_22=0 & b_1215=0b1000 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Re_VPR128Lo.H.vIndexHLM & Zd { TMPD1 = Rn_VPR128[64,64]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPD2 = TMPD1 f* tmp2 on lane size 2 TMPD2[0,16] = TMPD1[0,16] f* tmp2; TMPD2[16,16] = TMPD1[16,16] f* tmp2; TMPD2[32,16] = TMPD1[32,16] f* tmp2; TMPD2[48,16] = TMPD1[48,16] f* tmp2; # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) TMPQ3[0,32] = float2float(TMPD2[0,16]); TMPQ3[32,32] = float2float(TMPD2[16,16]); TMPQ3[64,32] = float2float(TMPD2[32,16]); TMPQ3[96,32] = float2float(TMPD2[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.124 FMLAL, FMLAL2 (vector) page C7-2296 line 134298 MATCH x0e20ec00/mask=xbfe0fc00 # CONSTRUCT x0e20ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[0]:4 ARG3[0]:4 $f*@2 $float2float@2:8 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal/3@2 # AUNIT --inst x0e20ec00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 2S when Q = 0 :fmlal Rd_VPR64.2S, vRn_VPR64^".2H", vRm_VPR64^".2H" is b_31=0 & b_30=0 & b_2329=0b0011100 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & vRm_VPR64 & Rm_VPR64 & Zd { TMPS1 = Rn_VPR64[0,32]; TMPS2 = Rm_VPR64[0,32]; # simd infix TMPS3 = TMPS1 f* TMPS2 on lane size 2 TMPS3[0,16] = TMPS1[0,16] f* TMPS2[0,16]; TMPS3[16,16] = TMPS1[16,16] f* TMPS2[16,16]; # simd resize TMPD4 = float2float(TMPS3) (lane size 2 to 4) TMPD4[0,32] = float2float(TMPS3[0,16]); TMPD4[32,32] = float2float(TMPS3[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD4 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD4[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD4[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.124 FMLAL, FMLAL2 (vector) page C7-2296 line 134298 MATCH x0e20ec00/mask=xbfe0fc00 # CONSTRUCT x4e20ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[0]:8 ARG3[0]:8 $f*@2 $float2float@2:16 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal/3@2 # AUNIT --inst x4e20ec00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 4S when Q = 1 :fmlal Rd_VPR128.4S, vRn_VPR128^".4H", Rm_VPR64.4H is b_31=0 & b_30=1 & b_2329=0b0011100 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Rm_VPR64.4H & Zd { TMPD1 = Rn_VPR128[0,64]; TMPD2 = Rm_VPR64.4H[0,64]; # simd infix TMPD3 = TMPD1 f* TMPD2 on lane size 2 TMPD3[0,16] = TMPD1[0,16] f* TMPD2[0,16]; TMPD3[16,16] = TMPD1[16,16] f* TMPD2[16,16]; TMPD3[32,16] = TMPD1[32,16] f* TMPD2[32,16]; TMPD3[48,16] = TMPD1[48,16] f* TMPD2[48,16]; # simd resize TMPQ4 = float2float(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = float2float(TMPD3[0,16]); TMPQ4[32,32] = float2float(TMPD3[16,16]); TMPQ4[64,32] = float2float(TMPD3[32,16]); TMPQ4[96,32] = float2float(TMPD3[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.124 FMLAL, FMLAL2 (vector) page C7-2296 line 134298 MATCH x2e20cc00/mask=xbfe0fc00 # CONSTRUCT x2e20cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:4 ARG3[1]:4 $f*@2 $float2float@2:8 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal2/3@2 # AUNIT --inst x2e20cc00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 2S when Q = 0 :fmlal2 Rd_VPR64.2S, vRn_VPR64^".2H", vRm_VPR128^".2H" is b_31=0 & b_30=0 & b_2329=0b1011100 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & vRm_VPR128 & Rm_VPR128 & Zd { TMPS1 = Rn_VPR64[32,32]; TMPS2 = Rm_VPR128[32,32]; # simd infix TMPS3 = TMPS1 f* TMPS2 on lane size 2 TMPS3[0,16] = TMPS1[0,16] f* TMPS2[0,16]; TMPS3[16,16] = TMPS1[16,16] f* TMPS2[16,16]; # simd resize TMPD4 = float2float(TMPS3) (lane size 2 to 4) TMPD4[0,32] = float2float(TMPS3[0,16]); TMPD4[32,32] = float2float(TMPS3[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD4 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD4[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD4[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.124 FMLAL, FMLAL2 (vector) page C7-2296 line 134298 MATCH x2e20cc00/mask=xbfe0fc00 # CONSTRUCT x6e20cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 ARG3 $f*@2 $float2float@2:16 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlal2/3@2 # AUNIT --inst x6e20cc00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 4S when Q = 1 :fmlal2 Rd_VPR128.4S, vRn_VPR128^".4H", Rm_VPR64.4H is b_31=0 & b_30=1 & b_2329=0b1011100 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Rm_VPR64.4H & Zd { TMPD1 = Rn_VPR128[64,64]; # simd infix TMPD2 = TMPD1 f* Rm_VPR64.4H on lane size 2 TMPD2[0,16] = TMPD1[0,16] f* Rm_VPR64.4H[0,16]; TMPD2[16,16] = TMPD1[16,16] f* Rm_VPR64.4H[16,16]; TMPD2[32,16] = TMPD1[32,16] f* Rm_VPR64.4H[32,16]; TMPD2[48,16] = TMPD1[48,16] f* Rm_VPR64.4H[48,16]; # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) TMPQ3[0,32] = float2float(TMPD2[0,16]); TMPQ3[32,32] = float2float(TMPD2[16,16]); TMPQ3[64,32] = float2float(TMPD2[32,16]); TMPQ3[96,32] = float2float(TMPD2[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.125 FMLS (by element) page C7-2298 line 134425 MATCH x0f805000/mask=xbf80f400 # CONSTRUCT x4fc05000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f* &=$f-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@8 # AUNIT --inst x4fc05000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" :fmls Rd_VPR128.2D, Rn_VPR128.2D, Re_VPR128.D.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=3 & b_2121=0 & Re_VPR128.D.vIndex & vIndex & Re_VPR128.D & b_1215=0x5 & b_1010=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd element Re_VPR128.D[vIndex] lane size 8 local tmp1:8 = Re_VPR128.D.vIndex; # simd infix TMPQ1 = Rn_VPR128.2D f* tmp1 on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] f* tmp1; TMPQ1[64,64] = Rn_VPR128.2D[64,64] f* tmp1; # simd infix Rd_VPR128.2D = Rd_VPR128.2D f- TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] f- TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] f- TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.125 FMLS (by element) page C7-2298 line 134425 MATCH x0f805000/mask=xbf80f400 # CONSTRUCT x0f805000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f* &=$f-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@4 # AUNIT --inst x0f805000/mask=xffc0f400 --rand sfp --status pass --comment "nofpround" :fmls Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x5 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix TMPD1 = Rn_VPR64.2S f* tmp1 on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] f* tmp1; TMPD1[32,32] = Rn_VPR64.2S[32,32] f* tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S f- TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] f- TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] f- TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.125 FMLS (by element) page C7-2298 line 134425 MATCH x0f805000/mask=xbf80f400 # CONSTRUCT x4f805000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f* &=$f-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@4 # AUNIT --inst x4f805000/mask=xffc0f400 --rand sfp --status fail --comment "nofpround" :fmls Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x5 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix TMPQ1 = Rn_VPR128.4S f* tmp1 on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] f* tmp1; TMPQ1[32,32] = Rn_VPR128.4S[32,32] f* tmp1; TMPQ1[64,32] = Rn_VPR128.4S[64,32] f* tmp1; TMPQ1[96,32] = Rn_VPR128.4S[96,32] f* tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S f- TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] f- TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] f- TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] f- TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] f- TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.125 FMLS (by element) page C7-2298 line 134425 MATCH x5f005000/mask=xffc0f400 # CONSTRUCT x5f005000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f* &=f- # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@2 # AUNIT --inst x5f005000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" # Scalar half-precision variant :fmls Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2231=0b0101111100 & b_1215=0b0101 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; local tmp2:2 = Rn_FPR16 f* tmp1; Rd_FPR16 = Rd_FPR16 f- tmp2; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.125 FMLS (by element) page C7-2298 line 134425 MATCH x5f805000/mask=xff80f400 # CONSTRUCT x5f805000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f* &=f- # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@4 # AUNIT --inst x5f805000/mask=xffc0f400 --rand sfp --status pass --comment "nofpround" # Scalar, single-precision and double-precision variant, sz=0 :fmls Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex is b_2331=0b010111111 & b_22=0 & b_1215=0b0101 & b_10=0 & Re_VPR128.S & vIndex & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; local tmp2:4 = Rn_FPR32 f* tmp1; Rd_FPR32 = Rd_FPR32 f- tmp2; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.125 FMLS (by element) page C7-2298 line 134425 MATCH x5f805000/mask=xff80f400 # CONSTRUCT x5fc05000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f* &=f- # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@8 # AUNIT --inst x5fc05000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" # Scalar, single-precision and double-precision variant, sz=1 :fmls Rd_FPR64, Rn_FPR64, Re_VPR128.D.vIndex is b_2331=0b010111111 & b_22=1 & b_21=0 & b_1215=0b0101 & b_10=0 & Re_VPR128.D & vIndex & Rd_FPR64 & Rn_FPR64 & Re_VPR128.D.vIndex & Zd { # simd element Re_VPR128.D[vIndex] lane size 8 local tmp1:8 = Re_VPR128.D.vIndex; local tmp2:8 = Rn_FPR64 f* tmp1; Rd_FPR64 = Rd_FPR64 f- tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.125 FMLS (by element) page C7-2298 line 134425 MATCH x0f005000/mask=xbfc0f400 # CONSTRUCT x0f005000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f*@2 &=$f-$@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@2 # AUNIT --inst x0f005000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" # Vector, half-precision variant SIMD 4H when Q = 0 :fmls Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2229=0b00111100 & b_1215=0b0101 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128Lo.H.vIndexHLM & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPD1 = Rn_VPR64.4H f* tmp1 on lane size 2 TMPD1[0,16] = Rn_VPR64.4H[0,16] f* tmp1; TMPD1[16,16] = Rn_VPR64.4H[16,16] f* tmp1; TMPD1[32,16] = Rn_VPR64.4H[32,16] f* tmp1; TMPD1[48,16] = Rn_VPR64.4H[48,16] f* tmp1; # simd infix Rd_VPR64.4H = Rd_VPR64.4H f- TMPD1 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] f- TMPD1[0,16]; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] f- TMPD1[16,16]; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] f- TMPD1[32,16]; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] f- TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.125 FMLS (by element) page C7-2298 line 134425 MATCH x0f005000/mask=xbfc0f400 # CONSTRUCT x4f005000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f*@2 &=$f-$@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@2 # AUNIT --inst x4f005000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" # Vector, half-precision variant SIMD 8H when Q = 1 :fmls Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2229=0b00111100 & b_1215=0b0101 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128Lo.H.vIndexHLM & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPQ1 = Rn_VPR128.8H f* tmp1 on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] f* tmp1; TMPQ1[16,16] = Rn_VPR128.8H[16,16] f* tmp1; TMPQ1[32,16] = Rn_VPR128.8H[32,16] f* tmp1; TMPQ1[48,16] = Rn_VPR128.8H[48,16] f* tmp1; TMPQ1[64,16] = Rn_VPR128.8H[64,16] f* tmp1; TMPQ1[80,16] = Rn_VPR128.8H[80,16] f* tmp1; TMPQ1[96,16] = Rn_VPR128.8H[96,16] f* tmp1; TMPQ1[112,16] = Rn_VPR128.8H[112,16] f* tmp1; # simd infix Rd_VPR128.8H = Rd_VPR128.8H f- TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] f- TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] f- TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] f- TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] f- TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] f- TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] f- TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] f- TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] f- TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.126 FMLS (vector) page C7-2302 line 134665 MATCH x0ea0cc00/mask=xbfa0fc00 # CONSTRUCT x4ee0cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG1 $f*@8 &=$f-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@8 # AUNIT --inst x4ee0cc00/mask=xffe0fc00 --rand dfp --status fail --comment "nofpround" :fmls Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x19 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd infix TMPQ1 = Rn_VPR128.2D f* Rd_VPR128.2D on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] f* Rd_VPR128.2D[0,64]; TMPQ1[64,64] = Rn_VPR128.2D[64,64] f* Rd_VPR128.2D[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D f- TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] f- TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] f- TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.126 FMLS (vector) page C7-2302 line 134665 MATCH x0ea0cc00/mask=xbfa0fc00 # CONSTRUCT x0ea0cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f*@4 &=f-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@4 # AUNIT --inst x0ea0cc00/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" :fmls Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x19 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix TMPD1 = Rn_VPR64.2S f* Rm_VPR64.2S on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] f* Rm_VPR64.2S[0,32]; TMPD1[32,32] = Rn_VPR64.2S[32,32] f* Rm_VPR64.2S[32,32]; Rd_VPR64.2S = Rd_VPR64.2S f- TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.126 FMLS (vector) page C7-2302 line 134665 MATCH x0ea0cc00/mask=xbfa0fc00 # CONSTRUCT x4ea0cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@4 # AUNIT --inst x4ea0cc00/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" :fmls Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x19 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix TMPQ1 = Rn_VPR128.4S f* Rm_VPR128.4S on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] f* Rm_VPR128.4S[0,32]; TMPQ1[32,32] = Rn_VPR128.4S[32,32] f* Rm_VPR128.4S[32,32]; TMPQ1[64,32] = Rn_VPR128.4S[64,32] f* Rm_VPR128.4S[64,32]; TMPQ1[96,32] = Rn_VPR128.4S[96,32] f* Rm_VPR128.4S[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S f- TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] f- TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] f- TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] f- TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] f- TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.126 FMLS (vector) page C7-2302 line 134665 MATCH x0ec00c00/mask=xbfe0fc00 # CONSTRUCT x0ec00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@2 # AUNIT --inst x0ec00c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fmls Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b000011 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { # simd infix TMPD1 = Rn_VPR64.4H f* Rm_VPR64.4H on lane size 4 TMPD1[0,32] = Rn_VPR64.4H[0,32] f* Rm_VPR64.4H[0,32]; TMPD1[32,32] = Rn_VPR64.4H[32,32] f* Rm_VPR64.4H[32,32]; # simd infix Rd_VPR64.4H = Rd_VPR64.4H f- TMPD1 on lane size 4 Rd_VPR64.4H[0,32] = Rd_VPR64.4H[0,32] f- TMPD1[0,32]; Rd_VPR64.4H[32,32] = Rd_VPR64.4H[32,32] f- TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.126 FMLS (vector) page C7-2302 line 134665 MATCH x0ec00c00/mask=xbfe0fc00 # CONSTRUCT x4ec00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $f*@4 &=$f-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmls/3@2 # AUNIT --inst x4ec00c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fmls Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b000011 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.8H f* Rm_VPR128.8H on lane size 4 TMPQ1[0,32] = Rn_VPR128.8H[0,32] f* Rm_VPR128.8H[0,32]; TMPQ1[32,32] = Rn_VPR128.8H[32,32] f* Rm_VPR128.8H[32,32]; TMPQ1[64,32] = Rn_VPR128.8H[64,32] f* Rm_VPR128.8H[64,32]; TMPQ1[96,32] = Rn_VPR128.8H[96,32] f* Rm_VPR128.8H[96,32]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H f- TMPQ1 on lane size 4 Rd_VPR128.8H[0,32] = Rd_VPR128.8H[0,32] f- TMPQ1[0,32]; Rd_VPR128.8H[32,32] = Rd_VPR128.8H[32,32] f- TMPQ1[32,32]; Rd_VPR128.8H[64,32] = Rd_VPR128.8H[64,32] f- TMPQ1[64,32]; Rd_VPR128.8H[96,32] = Rd_VPR128.8H[96,32] f- TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.127 FMLSL, FMLSL2 (by element) page C7-2304 line 134788 MATCH x0f804000/mask=xbfc0f400 # CONSTRUCT x0f804000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[0]:4 ARG3 $f* $float2float@2:8 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl/3@2 # AUNIT --inst x0f804000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 2S when Q = 0 :fmlsl Rd_VPR64.2S, vRn_VPR64^".2H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2329=0b0011111 & b_22=0 & b_1215=0b0100 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & Re_VPR128Lo.H.vIndexHLM & Zd { TMPS1 = Rn_VPR64[0,32]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPS2 = TMPS1 f* tmp2 on lane size 2 TMPS2[0,16] = TMPS1[0,16] f* tmp2; TMPS2[16,16] = TMPS1[16,16] f* tmp2; # simd resize TMPD3 = float2float(TMPS2) (lane size 2 to 4) TMPD3[0,32] = float2float(TMPS2[0,16]); TMPD3[32,32] = float2float(TMPS2[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD3 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD3[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD3[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.127 FMLSL, FMLSL2 (by element) page C7-2304 line 134788 MATCH x0f804000/mask=xbfc0f400 # CONSTRUCT x4f804000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[0]:8 ARG3 $f* $float2float@2:16 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl/3@2 # AUNIT --inst x4f804000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 4S when Q = 1 :fmlsl Rd_VPR128.4S, vRn_VPR128^".4H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=0 & b_1215=0b0100 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Re_VPR128Lo.H.vIndexHLM & Zd { TMPD1 = Rn_VPR128[0,64]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPD2 = TMPD1 f* tmp2 on lane size 2 TMPD2[0,16] = TMPD1[0,16] f* tmp2; TMPD2[16,16] = TMPD1[16,16] f* tmp2; TMPD2[32,16] = TMPD1[32,16] f* tmp2; TMPD2[48,16] = TMPD1[48,16] f* tmp2; # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) TMPQ3[0,32] = float2float(TMPD2[0,16]); TMPQ3[32,32] = float2float(TMPD2[16,16]); TMPQ3[64,32] = float2float(TMPD2[32,16]); TMPQ3[96,32] = float2float(TMPD2[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.127 FMLSL, FMLSL2 (by element) page C7-2304 line 134788 MATCH x2f80c000/mask=xbfc0f400 # CONSTRUCT x2f80c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:4 ARG3 $f* $float2float@2:8 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl2/3@2 # AUNIT --inst x2f80c000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 2S when Q = 0 :fmlsl2 Rd_VPR64.2S, vRn_VPR64^".2H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2329=0b1011111 & b_22=0 & b_1215=0b1100 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & Re_VPR128Lo.H.vIndexHLM & Zd { TMPS1 = Rn_VPR64[32,32]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPS2 = TMPS1 f* tmp2 on lane size 2 TMPS2[0,16] = TMPS1[0,16] f* tmp2; TMPS2[16,16] = TMPS1[16,16] f* tmp2; # simd resize TMPD3 = float2float(TMPS2) (lane size 2 to 4) TMPD3[0,32] = float2float(TMPS2[0,16]); TMPD3[32,32] = float2float(TMPS2[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD3 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD3[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD3[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.127 FMLSL, FMLSL2 (by element) page C7-2304 line 134788 MATCH x2f80c000/mask=xbfc0f400 # CONSTRUCT x6f80c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 ARG3 $f* $float2float@2:16 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl2/3@2 # AUNIT --inst x6f80c000/mask=xffc0f400 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 4S when Q = 1 :fmlsl2 Rd_VPR128.4S, vRn_VPR128^".4H", Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2329=0b1011111 & b_22=0 & b_1215=0b1100 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Re_VPR128Lo.H.vIndexHLM & Zd { TMPD1 = Rn_VPR128[64,64]; # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPD2 = TMPD1 f* tmp2 on lane size 2 TMPD2[0,16] = TMPD1[0,16] f* tmp2; TMPD2[16,16] = TMPD1[16,16] f* tmp2; TMPD2[32,16] = TMPD1[32,16] f* tmp2; TMPD2[48,16] = TMPD1[48,16] f* tmp2; # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) TMPQ3[0,32] = float2float(TMPD2[0,16]); TMPQ3[32,32] = float2float(TMPD2[16,16]); TMPQ3[64,32] = float2float(TMPD2[32,16]); TMPQ3[96,32] = float2float(TMPD2[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.128 FMLSL, FMLSL2 (vector) page C7-2306 line 134921 MATCH x0ea0ec00/mask=xbfe0fc00 # CONSTRUCT x0ea0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[0]:4 ARG3[0]:4 $f*@2 $float2float@2:8 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl/3@2 # AUNIT --inst x0ea0ec00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 2S when Q = 0 :fmlsl Rd_VPR64.2S, vRn_VPR64^".2H", vRm_VPR64^".2H" is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & vRm_VPR64 & Rm_VPR64 & Zd { TMPS1 = Rn_VPR64[0,32]; TMPS2 = Rm_VPR64[0,32]; # simd infix TMPS3 = TMPS1 f* TMPS2 on lane size 2 TMPS3[0,16] = TMPS1[0,16] f* TMPS2[0,16]; TMPS3[16,16] = TMPS1[16,16] f* TMPS2[16,16]; # simd resize TMPD4 = float2float(TMPS3) (lane size 2 to 4) TMPD4[0,32] = float2float(TMPS3[0,16]); TMPD4[32,32] = float2float(TMPS3[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD4 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD4[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD4[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.128 FMLSL, FMLSL2 (vector) page C7-2306 line 134921 MATCH x0ea0ec00/mask=xbfe0fc00 # CONSTRUCT x4ea0ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[0]:8 ARG3[0]:8 $f*@2 $float2float@2:16 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl/3@2 # AUNIT --inst x4ea0ec00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 4S when Q = 1 :fmlsl Rd_VPR128.4S, vRn_VPR128^".4H", Rm_VPR64.4H is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111011 & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Rm_VPR64.4H & Zd { TMPD1 = Rn_VPR128[0,64]; TMPD2 = Rm_VPR64.4H[0,64]; # simd infix TMPD3 = TMPD1 f* TMPD2 on lane size 2 TMPD3[0,16] = TMPD1[0,16] f* TMPD2[0,16]; TMPD3[16,16] = TMPD1[16,16] f* TMPD2[16,16]; TMPD3[32,16] = TMPD1[32,16] f* TMPD2[32,16]; TMPD3[48,16] = TMPD1[48,16] f* TMPD2[48,16]; # simd resize TMPQ4 = float2float(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = float2float(TMPD3[0,16]); TMPQ4[32,32] = float2float(TMPD3[16,16]); TMPQ4[64,32] = float2float(TMPD3[32,16]); TMPQ4[96,32] = float2float(TMPD3[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ4[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ4[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ4[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.128 FMLSL, FMLSL2 (vector) page C7-2306 line 134921 MATCH x2ea0cc00/mask=xbfe0fc00 # CONSTRUCT x2ea0cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:4 ARG3[1]:4 $f*@2 $float2float@2:8 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl2/3@2 # AUNIT --inst x2ea0cc00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 2S when Q = 0 :fmlsl2 Rd_VPR64.2S, vRn_VPR64^".2H", vRm_VPR128^".2H" is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_VPR64.2S & vRn_VPR64 & Rn_VPR64 & vRm_VPR128 & Rm_VPR128 & Zd { TMPS1 = Rn_VPR64[32,32]; TMPS2 = Rm_VPR128[32,32]; # simd infix TMPS3 = TMPS1 f* TMPS2 on lane size 2 TMPS3[0,16] = TMPS1[0,16] f* TMPS2[0,16]; TMPS3[16,16] = TMPS1[16,16] f* TMPS2[16,16]; # simd resize TMPD4 = float2float(TMPS3) (lane size 2 to 4) TMPD4[0,32] = float2float(TMPS3[0,16]); TMPD4[32,32] = float2float(TMPS3[16,16]); # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD4 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD4[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD4[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.128 FMLSL, FMLSL2 (vector) page C7-2306 line 134921 MATCH x2ea0cc00/mask=xbfe0fc00 # CONSTRUCT x6ea0cc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 ARG3 $f*@2 $float2float@2:16 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_fmlsl2/3@2 # AUNIT --inst x6ea0cc00/mask=xffe0fc00 --rand sfp --status noqemu --comment "ext nofpround" # SIMD 4S when Q = 1 :fmlsl2 Rd_VPR128.4S, vRn_VPR128^".4H", Rm_VPR64.4H is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_21=1 & b_1015=0b110011 & Rd_VPR128.4S & vRn_VPR128 & Rn_VPR128 & Rm_VPR64.4H & Zd { TMPD1 = Rn_VPR128[64,64]; # simd infix TMPD2 = TMPD1 f* Rm_VPR64.4H on lane size 2 TMPD2[0,16] = TMPD1[0,16] f* Rm_VPR64.4H[0,16]; TMPD2[16,16] = TMPD1[16,16] f* Rm_VPR64.4H[16,16]; TMPD2[32,16] = TMPD1[32,16] f* Rm_VPR64.4H[32,16]; TMPD2[48,16] = TMPD1[48,16] f* Rm_VPR64.4H[48,16]; # simd resize TMPQ3 = float2float(TMPD2) (lane size 2 to 4) TMPQ3[0,32] = float2float(TMPD2[0,16]); TMPQ3[32,32] = float2float(TMPD2[16,16]); TMPQ3[64,32] = float2float(TMPD2[32,16]); TMPQ3[96,32] = float2float(TMPD2[48,16]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.129 FMOV (vector, immediate) page C7-2308 line 135048 MATCH x0f00f400/mask=x9ff8fc00 # C7.2.20 BIC (vector, immediate) page C7-2048 line 119572 MATCH x2f001400/mask=xbff81c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # CONSTRUCT x6f00f400/mask=xfff8fc00 MATCHED 4 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1@8 # AUNIT --inst x6f00f400/mask=xfff8fc00 --rand dfp --status nopcodeop :fmov Rd_VPR128.2D, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & cmode=0xf & b_1011=1 & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_fmov(Imm_neon_uimm8Shift, 8:1); } # C7.2.129 FMOV (vector, immediate) page C7-2308 line 135048 MATCH x0f00f400/mask=x9ff8fc00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.212 ORR (vector, immediate) page C7-2507 line 146708 MATCH x0f001400/mask=xbff81c00 # CONSTRUCT x0f00f400/mask=xfff8fc00 MATCHED 3 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2:4 =NEON_fmov/1@4 # AUNIT --inst x0f00f400/mask=xfff8fc00 --rand dfp --status nopcodeop :fmov Rd_VPR64.2S, Imm_neon_uimm8Shift is b_3131=0 & q=0 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & cmode=0xf & b_1011=1 & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fmov(Imm_neon_uimm8Shift:4, 4:1); } # C7.2.129 FMOV (vector, immediate) page C7-2308 line 135048 MATCH x0f00f400/mask=x9ff8fc00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.212 ORR (vector, immediate) page C7-2507 line 146708 MATCH x0f001400/mask=xbff81c00 # CONSTRUCT x4f00f400/mask=xfff8fc00 MATCHED 3 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2:4 =NEON_fmov/1@4 # AUNIT --inst x4f00f400/mask=xfff8fc00 --rand sfp --status nopcodeop :fmov Rd_VPR128.4S, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & cmode=0xf & b_1011=1 & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fmov(Imm_neon_uimm8Shift:4, 4:1); } # C7.2.129 FMOV (vector, immediate) page C7-2308 line 135048 MATCH x0f00fc00/mask=xbff8fc00 # C7.2.89 FCVTZS (vector, fixed-point) page C7-2221 line 129809 MATCH x0f00fc00/mask=xbf80fc00 # CONSTRUCT x0f00fc00/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 int2float:2 &=$dup # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1@2 # AUNIT --inst x0f00fc00/mask=xfff8fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 4H when Q = 0 :fmov Rd_VPR64.4H, Imm_neon_uimm8Shift is b_31=0 & b_30=0 & b_1929=0b00111100000 & b_1015=0b111111 & Rd_VPR64.4H & Imm_neon_uimm8Shift & Zd { local tmp1:2 = int2float(Imm_neon_uimm8Shift); # simd duplicate Rd_VPR64.4H = all elements tmp1 (lane size 2) Rd_VPR64.4H[0,16] = tmp1; Rd_VPR64.4H[16,16] = tmp1; Rd_VPR64.4H[32,16] = tmp1; Rd_VPR64.4H[48,16] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.129 FMOV (vector, immediate) page C7-2308 line 135048 MATCH x0f00fc00/mask=xbff8fc00 # C7.2.89 FCVTZS (vector, fixed-point) page C7-2221 line 129809 MATCH x0f00fc00/mask=xbf80fc00 # CONSTRUCT x4f00fc00/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 int2float:2 &=$dup # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1@2 # AUNIT --inst x4f00fc00/mask=xfff8fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant SIMD 8H when Q = 1 :fmov Rd_VPR128.8H, Imm_neon_uimm8Shift is b_31=0 & b_30=1 & b_1929=0b00111100000 & b_1015=0b111111 & Rd_VPR128.8H & Imm_neon_uimm8Shift & Zd { local tmp1:2 = int2float(Imm_neon_uimm8Shift); # simd duplicate Rd_VPR128.8H = all elements tmp1 (lane size 2) Rd_VPR128.8H[0,16] = tmp1; Rd_VPR128.8H[16,16] = tmp1; Rd_VPR128.8H[32,16] = tmp1; Rd_VPR128.8H[48,16] = tmp1; Rd_VPR128.8H[64,16] = tmp1; Rd_VPR128.8H[80,16] = tmp1; Rd_VPR128.8H[96,16] = tmp1; Rd_VPR128.8H[112,16] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.130 FMOV (register) page C7-2310 line 135162 MATCH x1e204000/mask=xff3ffc00 # CONSTRUCT x1ee04000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x1ee04000/mask=xfffffc00 --rand hfp --status noqemu # Half-precision variant when type == 11 arg1=Rd_FPR16 arg2=Rn_FPR16 :fmov Rd_FPR16, Rn_FPR16 is b_2431=0b00011110 & b_2223=0b11 & b_1021=0b100000010000 & Rd_FPR16 & Rn_FPR16 & Rd_FPR64 & Zd { Rd_FPR16 = Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.130 FMOV (register) page C7-2310 line 135162 MATCH x1e204000/mask=xff3ffc00 # CONSTRUCT x1e204000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x1e204000/mask=xfffffc00 --rand sfp --status pass # Single-precision variant when type == 00 arg1=Rd_FPR32 arg2=Rn_FPR32 :fmov Rd_FPR32, Rn_FPR32 is b_2431=0b00011110 & b_2223=0b00 & b_1021=0b100000010000 & Rd_FPR32 & Rn_FPR32 & Zd { Rd_FPR32 = Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.130 FMOV (register) page C7-2310 line 135162 MATCH x1e204000/mask=xff3ffc00 # CONSTRUCT x1e604000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x1e604000/mask=xfffffc00 --rand dfp --status pass # Double-precision variant when type == 01 arg1=Rd_FPR64 arg2=Rn_FPR64 :fmov Rd_FPR64, Rn_FPR64 is b_2431=0b00011110 & b_2223=0b01 & b_1021=0b100000010000 & Rd_FPR64 & Rn_FPR64 & Zd { Rd_FPR64 = Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x1e660000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x1e660000/mask=xfffffc00 --rand dfp --status noqemu --comment "nofpround" # UNDOCUMENTED Double-precision to 32-bit variant when sf == 0 && type == 01 && rmode == 00 && opcode = 110 arg1=Rd_GPR32 arg2=Rn_FPR64 :fmov Rd_GPR32, Rn_FPR64 is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR32 & Rn_FPR64 & Rd_GPR64 { Rd_GPR32 = float2float(Rn_FPR64); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x9e260000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x9e260000/mask=xfffffc00 --rand sfp --status noqemu --comment "nofpround" # UNDOCUMENTED Single-precision to 64-bit variant when sf == 1 && type == 00 && rmode == 00 && opcode = 110 arg1=Rd_GPR64 arg2=Rn_FPR32 :fmov Rd_GPR64, Rn_FPR32 is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR64 & Rn_FPR32 { Rd_GPR64 = float2float(Rn_FPR32); } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x1e670000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x1e670000/mask=xfffffc00 --rand dfp --status noqemu --comment "nofpround" # UNDOCUMENTED 32-bit to Double-precision variant when sf == 0 && type == 01 && rmode == 00 && opcode = 111 arg1=Rd_FPR64 arg2=Rn_GPR32 :fmov Rd_FPR64, Rn_GPR32 is b_31=0 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR64 & Rn_GPR32 & Zd { Rd_FPR64 = float2float(Rn_GPR32); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x9e270000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x9e270000/mask=xfffffc00 --rand sfp --status noqemu --comment "nofpround" # UNDOCUMENTED 64-bit to single-precision variant when sf == 1 && type == 00 && rmode == 00 && opcode = 111 arg1=Rd_FPR32 arg2=Rn_GPR64 :fmov Rd_FPR32, Rn_GPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR32 & Rn_GPR64 & Zd { Rd_FPR32 = float2float(Rn_GPR64); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x1ee60000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x1ee60000/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision to 32-bit variant when sf == 0 && type == 11 && rmode == 00 && opcode == 110 arg1=Rd_GPR32 arg2=Rn_FPR16 :fmov Rd_GPR32, Rn_FPR16 is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR32 & Rn_FPR16 & Rd_GPR64 { Rd_GPR32 = float2float(Rn_FPR16); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x9ee60000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x9ee60000/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision to 64-bit variant when sf == 1 && type == 11 && rmode == 00 && opcode == 110 arg1=Rd_GPR64 arg2=Rn_FPR16 :fmov Rd_GPR64, Rn_FPR16 is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR64 & Rn_FPR16 { Rd_GPR64 = float2float(Rn_FPR16); } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x1ee70000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x1ee70000/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # 32-bit to half-precision variant when sf == 0 && type == 11 && rmode == 00 && opcode == 111 arg1=Rd_FPR16 arg2=Rn_GPR32 :fmov Rd_FPR16, Rn_GPR32 is b_31=0 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR16 & Rn_GPR32 & Zd { Rd_FPR16 = float2float(Rn_GPR32); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x1e270000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x1e270000/mask=xfffffc00 --rand sfp --status pass --comment "nofpround" # 32-bit to single-precision variant when sf == 0 && type == 00 && rmode == 00 && opcode == 111 arg1=Rd_FPR32 arg2=Rn_GPR32 :fmov Rd_FPR32, Rn_GPR32 is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR32 & Rn_GPR32 & Zd { Rd_FPR32 = Rn_GPR32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x1e260000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x1e260000/mask=xfffffc00 --rand sfp --status pass --comment "nofpround" # Single-precision to 32-bit variant when sf == 0 && type == 00 && rmode == 00 && opcode == 110 arg1=Rd_GPR32 arg2=Rn_FPR32 :fmov Rd_GPR32, Rn_FPR32 is b_31=0 & b_2430=0b0011110 & b_2223=0b00 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR32 & Rn_FPR32 & Rd_GPR64 { Rd_GPR32 = Rn_FPR32; zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x9ee70000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =float2float # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x9ee70000/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # 64-bit to half-precision variant when sf == 1 && type == 11 && rmode == 00 && opcode == 111 arg1=Rd_FPR16 arg2=Rn_GPR64 :fmov Rd_FPR16, Rn_GPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b11 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR16 & Rn_GPR64 & Zd { Rd_FPR16 = float2float(Rn_GPR64); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x9e670000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x9e670000/mask=xfffffc00 --rand dfp --status pass --comment "nofpround" # 64-bit to double-precision variant when sf == 1 && type == 01 && rmode == 00 && opcode == 111 arg1=Rd_FPR64 arg2=Rn_GPR64 :fmov Rd_FPR64, Rn_GPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b111 & b_1015=0b000000 & Rd_FPR64 & Rn_GPR64 & Zd { Rd_FPR64 = Rn_GPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x9eaf0000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 1:1 &=$copy@8 # SMACRO(pseudo) ARG1 ARG2 &=NEON_fmov/2 # AUNIT --inst x9eaf0000/mask=xfffffc00 --rand dfp --status pass --comment "nofpround" # 64-bit to top half of 128-bit variant when sf == 1 && type == 10 && rmode == 01 && opcode == 111 arg1=vRd_VPR128^".D[1]" arg2=Rn_GPR64 :fmov vRd_VPR128^".D[1]", Rn_GPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b10 & b_21=1 & b_1920=0b01 & b_1618=0b111 & b_1015=0b000000 & vRd_VPR128 & Rd_VPR128 & Rn_GPR64 & Zd { # simd copy Rd_VPR128 element 1:1 = Rn_GPR64 (lane size 8) Rd_VPR128[64,64] = Rn_GPR64; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x9e660000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x9e660000/mask=xfffffc00 --rand dfp --status pass --comment "nofpround" # Double-precision to 64-bit variant when sf == 1 && type == 01 && rmode == 00 && opcode == 110 arg1=Rd_GPR64 arg2=Rn_FPR64 :fmov Rd_GPR64, Rn_FPR64 is b_31=1 & b_2430=0b0011110 & b_2223=0b01 & b_21=1 & b_1920=0b00 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR64 & Rn_FPR64 { Rd_GPR64 = Rn_FPR64; } # C7.2.131 FMOV (general) page C7-2312 line 135254 MATCH x1e260000/mask=x7f36fc00 # CONSTRUCT x9eae0000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 =ARG2[1]:8 # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1@8 # AUNIT --inst x9eae0000/mask=xfffffc00 --rand dfp --status pass --comment "nofpround" # Top half of 128-bit to 64-bit variant when sf == 1 && type == 10 && rmode == 01 && opcode == 110 arg1=Rd_GPR64 arg2=vRd_VPR128^".D[1]" :fmov Rd_GPR64, vRn_VPR128^".D[1]" is b_31=1 & b_2430=0b0011110 & b_2223=0b10 & b_21=1 & b_1920=0b01 & b_1618=0b110 & b_1015=0b000000 & Rd_GPR64 & vRn_VPR128 & Rn_VPR128 { Rd_GPR64 = Rn_VPR128[64,64]; } # C7.2.132 FMOV (scalar, immediate) page C7-2316 line 135493 MATCH x1e201000/mask=xff201fe0 # CONSTRUCT x1e601001/mask=xffe01fe1 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x1e601001/mask=xffe01fe1 --rand dfp --status pass :fmov Rd_FPR64, Imm8_fmov64_operand is ImmS_ImmR_TestSet=1 & m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Imm8_fmov64_operand & b_1012=4 & imm5=0x0 & Rd_FPR64 & Zd { Rd_FPR64 = Imm8_fmov64_operand:8; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.132 FMOV (scalar, immediate) page C7-2316 line 135493 MATCH x1e201000/mask=xff201fe0 # CONSTRUCT x1e201000/mask=xffe01fe0 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x1e201000/mask=xffe01fe0 --rand sfp --status pass :fmov Rd_FPR32, Imm8_fmov32_operand is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Imm8_fmov32_operand & b_1012=4 & imm5=0x0 & Rd_FPR32 & Zd { Rd_FPR32 = Imm8_fmov32_operand:4; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.132 FMOV (scalar, immediate) page C7-2316 line 135493 MATCH x1e201000/mask=xff201fe0 # CONSTRUCT x1ee01000/mask=xffe01fe0 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_fmov/1 # AUNIT --inst x1ee01000/mask=xffe01fe0 --rand hfp --status noqemu :fmov Rd_FPR16, Imm8_fmov16_operand is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Imm8_fmov16_operand & b_1012=4 & imm5=0x0 & Rd_FPR16 & Zd { Rd_FPR16 = Imm8_fmov16_operand:2; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.133 FMSUB page C7-2318 line 135582 MATCH x1f008000/mask=xff208000 # CONSTRUCT x1f408000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fmsub/3 # AUNIT --inst x1f408000/mask=xffe08000 --rand dfp --status nopcodeop --comment "nofpround" :fmsub Rd_FPR64, Rn_FPR64, Rm_FPR64, Ra_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=0 & Rm_FPR64 & b_15=1 & Ra_FPR64 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = Ra_FPR64 f- (Rm_FPR64 f* Rn_FPR64); } # C7.2.133 FMSUB page C7-2318 line 135582 MATCH x1f008000/mask=xff208000 # CONSTRUCT x1f008000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fmsub/3 # AUNIT --inst x1f008000/mask=xffe08000 --rand sfp --status nopcodeop --comment "nofpround" :fmsub Rd_FPR32, Rn_FPR32, Rm_FPR32, Ra_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=0 & Rm_FPR32 & b_15=1 & Ra_FPR32 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = Ra_FPR32 f- (Rm_FPR32 f* Rn_FPR32); } # C7.2.133 FMSUB page C7-2318 line 135582 MATCH x1f008000/mask=xff208000 # CONSTRUCT x1fc08000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fmsub/3 # AUNIT --inst x1fc08000/mask=xffe08000 --rand hfp --status noqemu --comment "nofpround" :fmsub Rd_FPR16, Rn_FPR16, Rm_FPR16, Ra_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=0 & Rm_FPR16 & b_15=1 & Ra_FPR16 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = Ra_FPR16 f- (Rm_FPR16 f* Rn_FPR16); } # C7.2.134 FMUL (by element) page C7-2320 line 135711 MATCH x5f009000/mask=xffc0f400 # CONSTRUCT x5f009000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@2 # AUNIT --inst x5f009000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" # FMUL (by element) Scalar, half-precision :fmul Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2231=0b0101111100 & b_1215=0b1001 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; Rd_FPR16 = Rn_FPR16 f* tmp1; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.134 FMUL (by element) page C7-2320 line 135711 MATCH x5f809000/mask=xff80f400 # CONSTRUCT x5f809000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@4 # AUNIT --inst x5f809000/mask=xffc0f400 --rand sfp --status pass --comment "nofpround" # FMUL (by element) Scalar, single-precision and double-precision sz=0 :fmul Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex is b_2331=0b010111111 & b_22=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.S & vIndex & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; Rd_FPR32 = Rn_FPR32 f* tmp1; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.134 FMUL (by element) page C7-2320 line 135711 MATCH x5f809000/mask=xff80f400 # CONSTRUCT x5fc09000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@8 # AUNIT --inst x5fc09000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" # FMUL (by element) Scalar, single-precision and double-precision sz=1 :fmul Rd_FPR64, Rn_FPR64, Re_VPR128.D.vIndex is b_2331=0b010111111 & b_22=1 & b_21=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.D & vIndex & Rd_FPR64 & Rn_FPR64 & Re_VPR128.D.vIndex & Zd { # simd element Re_VPR128.D[vIndex] lane size 8 local tmp1:8 = Re_VPR128.D.vIndex; Rd_FPR64 = Rn_FPR64 f* tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.134 FMUL (by element) page C7-2320 line 135711 MATCH x0f009000/mask=xbfc0f400 # CONSTRUCT x0f009000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@2 # AUNIT --inst x0f009000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" # FMUL (by element) Vector, half-precision, Q=0 :fmul Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_31=0 &b_30=0 & b_2229=0b00111100 & b_1215=0b1001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128Lo.H.vIndexHLM & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* tmp1 on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f* tmp1; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f* tmp1; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f* tmp1; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f* tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.134 FMUL (by element) page C7-2320 line 135711 MATCH x0f009000/mask=xbfc0f400 # CONSTRUCT x4f009000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@2 # AUNIT --inst x4f009000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" # FMUL (by element) Vector, half-precision, Q=1 :fmul Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_31=0 &b_30=1 & b_2229=0b00111100 & b_1215=0b1001 & b_10=0 & Re_VPR128Lo.H & vIndexHLM & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128Lo.H.vIndexHLM & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* tmp1 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f* tmp1; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f* tmp1; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f* tmp1; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f* tmp1; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f* tmp1; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f* tmp1; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f* tmp1; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f* tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.134 FMUL (by element) page C7-2320 line 135711 MATCH x0f809000/mask=xbf80f400 # CONSTRUCT x4fc09000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@8 # AUNIT --inst x4fc09000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" # Vector, single-precision and double-precision Q=1 and sz:L=10 :fmul Rd_VPR128.2D, Rn_VPR128.2D, Re_VPR128.D.vIndex is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=1 & b_21=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.D.vIndex & vIndex & Re_VPR128.D & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd element Re_VPR128.D[vIndex] lane size 8 local tmp1:8 = Re_VPR128.D.vIndex; # simd infix Rd_VPR128.2D = Rn_VPR128.2D f* tmp1 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f* tmp1; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f* tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.134 FMUL (by element) page C7-2320 line 135711 MATCH x0f809000/mask=xbf80f400 # CONSTRUCT x0f809000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@4 # AUNIT --inst x0f809000/mask=xffc0f400 --rand sfp --status fail --comment "nofpround" # Vector, single-precision and double-precision Q=0 and sz:L=0x :fmul Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_31=0 & b_30=0 & b_2329=0b0011111 & b_22=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix Rd_VPR64.2S = Rn_VPR64.2S f* tmp1 on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f* tmp1; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f* tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.134 FMUL (by element) page C7-2320 line 135711 MATCH x0f809000/mask=xbf80f400 # CONSTRUCT x4f809000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@4 # AUNIT --inst x4f809000/mask=xffc0f400 --rand sfp --status fail --comment "nofpround" # Vector, single-precision and double-precision Q=1 and sz:L=0x :fmul Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_31=0 & b_30=1 & b_2329=0b0011111 & b_22=0 & b_1215=0b1001 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix Rd_VPR128.4S = Rn_VPR128.4S f* tmp1 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f* tmp1; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f* tmp1; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f* tmp1; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f* tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.135 FMUL (vector) page C7-2324 line 135951 MATCH x2e20dc00/mask=xbfa0fc00 # CONSTRUCT x6e60dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f*@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@8 # AUNIT --inst x6e60dc00/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" :fmul Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1b & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd infix Rd_VPR128.2D = Rn_VPR128.2D f* Rm_VPR128.2D on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f* Rm_VPR128.2D[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f* Rm_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.135 FMUL (vector) page C7-2324 line 135951 MATCH x2e20dc00/mask=xbfa0fc00 # CONSTRUCT x2e20dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f*@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@4 # AUNIT --inst x2e20dc00/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" :fmul Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1b & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix Rd_VPR64.2S = Rn_VPR64.2S f* Rm_VPR64.2S on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f* Rm_VPR64.2S[0,32]; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f* Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.135 FMUL (vector) page C7-2324 line 135951 MATCH x2e20dc00/mask=xbfa0fc00 # CONSTRUCT x6e20dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f*@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@4 # AUNIT --inst x6e20dc00/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" :fmul Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1b & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix Rd_VPR128.4S = Rn_VPR128.4S f* Rm_VPR128.4S on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f* Rm_VPR128.4S[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f* Rm_VPR128.4S[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f* Rm_VPR128.4S[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f* Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.135 FMUL (vector) page C7-2324 line 135951 MATCH x2e401c00/mask=xbfe0fc00 # CONSTRUCT x2e401c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f*@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@2 # AUNIT --inst x2e401c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant when Q=0 suf=VPR64.4H :fmul Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b101110010 & b_1015=0b000111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* Rm_VPR64.4H on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f* Rm_VPR64.4H[0,16]; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f* Rm_VPR64.4H[16,16]; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f* Rm_VPR64.4H[32,16]; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f* Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.135 FMUL (vector) page C7-2324 line 135951 MATCH x2e401c00/mask=xbfe0fc00 # CONSTRUCT x6e401c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f*@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2@2 # AUNIT --inst x6e401c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant when Q=1 suf=VPR128.8H :fmul Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b101110010 & b_1015=0b000111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* Rm_VPR128.8H on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f* Rm_VPR128.8H[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f* Rm_VPR128.8H[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f* Rm_VPR128.8H[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f* Rm_VPR128.8H[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f* Rm_VPR128.8H[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f* Rm_VPR128.8H[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f* Rm_VPR128.8H[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f* Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.136 FMUL (scalar) page C7-2326 line 136066 MATCH x1e200800/mask=xff20fc00 # CONSTRUCT x1e600800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2 # AUNIT --inst x1e600800/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" :fmul Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x0 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = Rn_FPR64 f* Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.136 FMUL (scalar) page C7-2326 line 136066 MATCH x1e200800/mask=xff20fc00 # CONSTRUCT x1e200800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2 # AUNIT --inst x1e200800/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" :fmul Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x0 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = Rn_FPR32 f* Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.136 FMUL (scalar) page C7-2326 line 136066 MATCH x1e200800/mask=xff20fc00 # CONSTRUCT x1ee00800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmul/2 # AUNIT --inst x1ee00800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" :fmul Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x0 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = Rn_FPR16 f* Rm_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.137 FMULX (by element) page C7-2328 line 136175 MATCH x2f809000/mask=xbf80f400 # CONSTRUCT x6fc09000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@8 # AUNIT --inst x6fc09000/mask=xffe0f400 --rand dfp --status nopcodeop --comment "nofpround" :fmulx Rd_VPR128.2D, Rn_VPR128.2D, Re_VPR128.D.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=3 & b_2121=0 & Re_VPR128.D.vIndex & vIndex & Re_VPR128.D & b_1215=0x9 & b_1010=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local tmp1:8 = Re_VPR128.D.vIndex; # simd infix Rd_VPR128.2D = Rn_VPR128.2D f* tmp1 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f* tmp1; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f* tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.137 FMULX (by element) page C7-2328 line 136175 MATCH x2f809000/mask=xbf80f400 # CONSTRUCT x2f809000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@4 # AUNIT --inst x2f809000/mask=xffc0f400 --rand sfp --status fail --comment "nofpround" :fmulx Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x9 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix Rd_VPR64.2S = Rn_VPR64.2S f* tmp1 on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f* tmp1; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f* tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.137 FMULX (by element) page C7-2328 line 136175 MATCH x2f809000/mask=xbf80f400 # CONSTRUCT x6f809000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@4 # AUNIT --inst x6f809000/mask=xffc0f400 --rand sfp --status nopcodeop --comment "nofpround" :fmulx Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x9 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix Rd_VPR128.4S = Rn_VPR128.4S f* Rm_VPR128.4S on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f* tmp1; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f* tmp1; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f* tmp1; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f* tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.137 FMULX (by element) page C7-2328 line 136175 MATCH x7f009000/mask=xffc0f400 # CONSTRUCT x7f009000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@2 # AUNIT --inst x7f009000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" # Scalar, half-precision variant :fmulx Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2231=0b0111111100 & b_1215=0b1001 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; Rd_FPR16 = Rn_FPR16 f* tmp1; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.137 FMULX (by element) page C7-2328 line 136175 MATCH x7f809000/mask=xff80f400 # CONSTRUCT x7f809000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@4 # AUNIT --inst x7f809000/mask=xffc0f400 --rand sfp --status pass --comment "nofpround" # Scalar, single-precision and double-precision variant when sz=0 Ts=S V=32 :fmulx Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex is b_2331=0b011111111 & b_22=0 & b_1215=0b1001 & b_10=0 & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; Rd_FPR32 = Rn_FPR32 f* tmp1; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.137 FMULX (by element) page C7-2328 line 136175 MATCH x7f809000/mask=xff80f400 # CONSTRUCT x7fc09000/mask=xffe0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@8 # AUNIT --inst x7fc09000/mask=xffe0f400 --rand dfp --status pass --comment "nofpround" # Scalar, single-precision and double-precision variant when sz=1 Ts=D V=64 :fmulx Rd_FPR64, Rn_FPR64, Re_VPR128.D.vIndex is b_2331=0b011111111 & b_22=1 & b_21=0 & b_1215=0b1001 & b_10=0 & Rd_FPR64 & Rn_FPR64 & Re_VPR128.D.vIndex & Re_VPR128.D & vIndex & Zd { # simd element Re_VPR128.D[vIndex] lane size 8 local tmp1:8 = Re_VPR128.D.vIndex; Rd_FPR64 = Rn_FPR64 f* tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.137 FMULX (by element) page C7-2328 line 136175 MATCH x2f009000/mask=xbfc0f400 # CONSTRUCT x2f009000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f*@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@2 # AUNIT --inst x2f009000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" # Vector, half-precision variant when Q = 0 suf=64.4H :fmulx Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2229=0b10111100 & b_1215=0b1001 & b_10=0 & Rd_VPR64.4H & Rn_VPR64.4H & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* tmp1 on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f* tmp1; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f* tmp1; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f* tmp1; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f* tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.137 FMULX (by element) page C7-2328 line 136175 MATCH x2f009000/mask=xbfc0f400 # CONSTRUCT x6f009000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f*@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@2 # AUNIT --inst x6f009000/mask=xffc0f400 --rand hfp --status noqemu --comment "nofpround" # Vector, half-precision variant when Q = 1 suf=128.8H :fmulx Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2229=0b10111100 & b_1215=0b1001 & b_10=0 & Rd_VPR128.8H & Rn_VPR128.8H & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* tmp1 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f* tmp1; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f* tmp1; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f* tmp1; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f* tmp1; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f* tmp1; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f* tmp1; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f* tmp1; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f* tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.138 FMULX page C7-2332 line 136418 MATCH x5e20dc00/mask=xffa0fc00 # CONSTRUCT x5e60dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2 # AUNIT --inst x5e60dc00/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fmulx Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=3 & Rm_FPR64 & b_1115=0x1b & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_fmulx(Rn_FPR64, Rm_FPR64); } # C7.2.138 FMULX page C7-2332 line 136418 MATCH x5e20dc00/mask=xffa0fc00 # CONSTRUCT x5e20dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2 # AUNIT --inst x5e20dc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmulx Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=1 & Rm_FPR32 & b_1115=0x1b & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_fmulx(Rn_FPR32, Rm_FPR32); } # C7.2.138 FMULX page C7-2332 line 136418 MATCH x0e20dc00/mask=xbfa0fc00 # CONSTRUCT x4e60dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@8 # AUNIT --inst x4e60dc00/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :fmulx Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1b & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_fmulx(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.138 FMULX page C7-2332 line 136418 MATCH x0e20dc00/mask=xbfa0fc00 # CONSTRUCT x0e20dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@4 # AUNIT --inst x0e20dc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmulx Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1b & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_fmulx(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.138 FMULX page C7-2332 line 136418 MATCH x0e20dc00/mask=xbfa0fc00 # CONSTRUCT x4e20dc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@4 # AUNIT --inst x4e20dc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :fmulx Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1b & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_fmulx(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.138 FMULX page C7-2332 line 136418 MATCH x5e401c00/mask=xffe0fc00 # CONSTRUCT x5e401c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2 # AUNIT --inst x5e401c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Scalar half precision variant :fmulx Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01011110010 & b_1015=0b000111 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { Rd_FPR16 = Rn_FPR16 f* Rm_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.138 FMULX page C7-2332 line 136418 MATCH x0e401c00/mask=xbfe0fc00 # CONSTRUCT x0e401c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f*@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@2 # AUNIT --inst x0e401c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant when Q=0 suf=64.4H :fmulx Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b000111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { # simd infix Rd_VPR64.4H = Rn_VPR64.4H f* Rm_VPR64.4H on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f* Rm_VPR64.4H[0,16]; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f* Rm_VPR64.4H[16,16]; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f* Rm_VPR64.4H[32,16]; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f* Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.138 FMULX page C7-2332 line 136418 MATCH x0e401c00/mask=xbfe0fc00 # CONSTRUCT x4e401c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f*@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fmulx/2@2 # AUNIT --inst x4e401c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant when Q=1 suf=128.8H :fmulx Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b000111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { # simd infix Rd_VPR128.8H = Rn_VPR128.8H f* Rm_VPR128.8H on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f* Rm_VPR128.8H[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f* Rm_VPR128.8H[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f* Rm_VPR128.8H[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f* Rm_VPR128.8H[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f* Rm_VPR128.8H[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f* Rm_VPR128.8H[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f* Rm_VPR128.8H[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f* Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.139 FNEG (vector) page C7-2335 line 136615 MATCH x2ea0f800/mask=xbfbffc00 # CONSTRUCT x6ee0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$fneg@8 # SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1@8 # AUNIT --inst x6ee0f800/mask=xfffffc00 --rand dfp --status pass :fneg Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd unary Rd_VPR128.2D = f-(Rn_VPR128.2D) on lane size 8 Rd_VPR128.2D[0,64] = f-(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[64,64] = f-(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.139 FNEG (vector) page C7-2335 line 136615 MATCH x2ea0f800/mask=xbfbffc00 # CONSTRUCT x2ea0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$fneg@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1@4 # AUNIT --inst x2ea0f800/mask=xfffffc00 --rand sfp --status pass :fneg Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd unary Rd_VPR64.2S = f-(Rn_VPR64.2S) on lane size 4 Rd_VPR64.2S[0,32] = f-(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[32,32] = f-(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.139 FNEG (vector) page C7-2335 line 136615 MATCH x2ea0f800/mask=xbfbffc00 # CONSTRUCT x6ea0f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$fneg@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1@4 # AUNIT --inst x6ea0f800/mask=xfffffc00 --rand sfp --status pass :fneg Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xf & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd unary Rd_VPR128.4S = f-(Rn_VPR128.4S) on lane size 4 Rd_VPR128.4S[0,32] = f-(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[32,32] = f-(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[64,32] = f-(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[96,32] = f-(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.139 FNEG (vector) page C7-2335 line 136615 MATCH x2ef8f800/mask=xbffffc00 # CONSTRUCT x2ef8f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$fneg@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1@2 # AUNIT --inst x2ef8f800/mask=xfffffc00 --rand hfp --status noqemu # Half-precision variant when Q=0 suf=64.4H :fneg Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b10111011111000111110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { # simd unary Rd_VPR64.4H = f-(Rn_VPR64.4H) on lane size 2 Rd_VPR64.4H[0,16] = f-(Rn_VPR64.4H[0,16]); Rd_VPR64.4H[16,16] = f-(Rn_VPR64.4H[16,16]); Rd_VPR64.4H[32,16] = f-(Rn_VPR64.4H[32,16]); Rd_VPR64.4H[48,16] = f-(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.139 FNEG (vector) page C7-2335 line 136615 MATCH x2ef8f800/mask=xbffffc00 # CONSTRUCT x6ef8f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$fneg@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1@2 # AUNIT --inst x6ef8f800/mask=xfffffc00 --rand hfp --status noqemu # Half-precision variant when Q=1 suf=128.8H :fneg Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b10111011111000111110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { # simd unary Rd_VPR128.8H = f-(Rn_VPR128.8H) on lane size 2 Rd_VPR128.8H[0,16] = f-(Rn_VPR128.8H[0,16]); Rd_VPR128.8H[16,16] = f-(Rn_VPR128.8H[16,16]); Rd_VPR128.8H[32,16] = f-(Rn_VPR128.8H[32,16]); Rd_VPR128.8H[48,16] = f-(Rn_VPR128.8H[48,16]); Rd_VPR128.8H[64,16] = f-(Rn_VPR128.8H[64,16]); Rd_VPR128.8H[80,16] = f-(Rn_VPR128.8H[80,16]); Rd_VPR128.8H[96,16] = f-(Rn_VPR128.8H[96,16]); Rd_VPR128.8H[112,16] = f-(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.140 FNEG (scalar) page C7-2337 line 136726 MATCH x1e214000/mask=xff3ffc00 # CONSTRUCT x1e614000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =fneg # SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1 # AUNIT --inst x1e614000/mask=xfffffc00 --rand dfp --status pass :fneg Rd_FPR64, Rn_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x2 & b_1014=0x10 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = f- Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.140 FNEG (scalar) page C7-2337 line 136726 MATCH x1e214000/mask=xff3ffc00 # CONSTRUCT x1e214000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =fneg # SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1 # AUNIT --inst x1e214000/mask=xfffffc00 --rand sfp --status pass :fneg Rd_FPR32, Rn_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x2 & b_1014=0x10 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = f- Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.140 FNEG (scalar) page C7-2337 line 136726 MATCH x1e214000/mask=xff3ffc00 # CONSTRUCT x1ee14000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =fneg # SMACRO(pseudo) ARG1 ARG2 =NEON_fneg/1 # AUNIT --inst x1ee14000/mask=xfffffc00 --rand hfp --status noqemu :fneg Rd_FPR16, Rn_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x2 & b_1014=0x10 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = f- Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.141 FNMADD page C7-2339 line 136822 MATCH x1f200000/mask=xff208000 # CONSTRUCT x1f600000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 fneg =NEON_fnmadd/3 # AUNIT --inst x1f600000/mask=xffe08000 --rand dfp --status nopcodeop --comment "nofpround" :fnmadd Rd_FPR64, Rn_FPR64, Rm_FPR64, Ra_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=1 & Rm_FPR64 & b_15=0 & Ra_FPR64 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = (f-(Rn_FPR64 f* Rm_FPR64)) f- Ra_FPR64; zext_zd(Zd); } # C7.2.141 FNMADD page C7-2339 line 136822 MATCH x1f200000/mask=xff208000 # CONSTRUCT x1f200000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 fneg =NEON_fnmadd/3 # AUNIT --inst x1f200000/mask=xffe08000 --rand sfp --status nopcodeop --comment "nofpround" :fnmadd Rd_FPR32, Rn_FPR32, Rm_FPR32, Ra_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=1 & Rm_FPR32 & b_15=0 & Ra_FPR32 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = (f-(Rn_FPR32 f* Rm_FPR32)) f- Ra_FPR32; zext_zs(Zd); } # C7.2.141 FNMADD page C7-2339 line 136822 MATCH x1f200000/mask=xff208000 # CONSTRUCT x1fe00000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 fneg =NEON_fnmadd/3 # AUNIT --inst x1fe00000/mask=xffe08000 --rand hfp --status noqemu --comment "nofpround" :fnmadd Rd_FPR16, Rn_FPR16, Rm_FPR16, Ra_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=1 & Rm_FPR16 & b_15=0 & Ra_FPR16 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = (f-(Rn_FPR16 f* Rm_FPR16)) f- Ra_FPR16; zext_zh(Zd); } # C7.2.142 FNMSUB page C7-2341 line 136952 MATCH x1f208000/mask=xff208000 # CONSTRUCT x1f608000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fnmsub/3 # AUNIT --inst x1f608000/mask=xffe08000 --rand dfp --status nopcodeop --comment "nofpround" :fnmsub Rd_FPR64, Rn_FPR64, Rm_FPR64, Ra_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=1 & b_21=1 & Rm_FPR64 & b_15=1 & Ra_FPR64 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = (Rn_FPR64 f* Rm_FPR64) f- Ra_FPR64; zext_zd(Zd); } # C7.2.142 FNMSUB page C7-2341 line 136952 MATCH x1f208000/mask=xff208000 # CONSTRUCT x1f208000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fnmsub/3 # AUNIT --inst x1f208000/mask=xffe08000 --rand sfp --status nopcodeop --comment "nofpround" :fnmsub Rd_FPR32, Rn_FPR32, Rm_FPR32, Ra_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=0 & b_21=1 & Rm_FPR32 & b_15=1 & Ra_FPR32 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = (Rn_FPR32 f* Rm_FPR32) f- Ra_FPR32; zext_zs(Zd); } # C7.2.142 FNMSUB page C7-2341 line 136952 MATCH x1f208000/mask=xff208000 # CONSTRUCT x1fe08000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_fnmsub/3 # AUNIT --inst x1fe08000/mask=xffe08000 --rand hfp --status noqemu --comment "nofpround" :fnmsub Rd_FPR16, Rn_FPR16, Rm_FPR16, Ra_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1f & ftype=3 & b_21=1 & Rm_FPR16 & b_15=1 & Ra_FPR16 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = (Rn_FPR16 f* Rm_FPR16) f- Ra_FPR16; zext_zh(Zd); } # C7.2.143 FNMUL (scalar) page C7-2343 line 137081 MATCH x1e208800/mask=xff20fc00 # CONSTRUCT x1e608800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f* =fneg # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fnmul/2 # AUNIT --inst x1e608800/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" :fnmul Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x8 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { local tmp1:8 = Rn_FPR64 f* Rm_FPR64; Rd_FPR64 = f- tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.143 FNMUL (scalar) page C7-2343 line 137081 MATCH x1e208800/mask=xff20fc00 # CONSTRUCT x1e208800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f* =fneg # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fnmul/2 # AUNIT --inst x1e208800/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" :fnmul Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x8 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { local tmp1:4 = Rn_FPR32 f* Rm_FPR32; Rd_FPR32 = f- tmp1; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.143 FNMUL (scalar) page C7-2343 line 137081 MATCH x1e208800/mask=xff20fc00 # CONSTRUCT x1ee08800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 f* =fneg # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fnmul/2 # AUNIT --inst x1ee08800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" :fnmul Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x8 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { local tmp1:2 = Rn_FPR16 f* Rm_FPR16; Rd_FPR16 = f- tmp1; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.144 FRECPE page C7-2345 line 137191 MATCH x0ea1d800/mask=xbfbffc00 # CONSTRUCT x4ee1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1@8 # AUNIT --inst x4ee1d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" :frecpe Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=1 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_frecpe(Rn_VPR128.2D, 8:1); } # C7.2.144 FRECPE page C7-2345 line 137191 MATCH x0ea1d800/mask=xbfbffc00 # CONSTRUCT x0ea1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1@4 # AUNIT --inst x0ea1d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" :frecpe Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & size_high=1 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_frecpe(Rn_VPR64.2S, 4:1); } # C7.2.144 FRECPE page C7-2345 line 137191 MATCH x0ea1d800/mask=xbfbffc00 # CONSTRUCT x4ea1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1@4 # AUNIT --inst x4ea1d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" :frecpe Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=1 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_frecpe(Rn_VPR128.4S, 4:1); } # C7.2.144 FRECPE page C7-2345 line 137191 MATCH x5ea1d800/mask=xffbffc00 # CONSTRUCT x5ee1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1 # AUNIT --inst x5ee1d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" :frecpe Rd_FPR64, Rn_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & size_high=1 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_frecpe(Rn_FPR64); } # C7.2.144 FRECPE page C7-2345 line 137191 MATCH x5ea1d800/mask=xffbffc00 # CONSTRUCT x5ea1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1 # AUNIT --inst x5ea1d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" :frecpe Rd_FPR32, Rn_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & size_high=1 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_frecpe(Rn_FPR32); } # C7.2.144 FRECPE page C7-2345 line 137191 MATCH x5ef9d800/mask=xfffffc00 # CONSTRUCT x5ef9d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1 # AUNIT --inst x5ef9d800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Scalar half precision variant :frecpe Rd_FPR16, Rn_FPR16 is b_1031=0b0101111011111001110110 & Rd_FPR16 & Rn_FPR16 & Zd { Rd_FPR16 = NEON_frecpe(Rn_FPR16); } # C7.2.144 FRECPE page C7-2345 line 137191 MATCH x0ef9d800/mask=xbffffc00 # CONSTRUCT x0ef9d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1@2 # AUNIT --inst x0ef9d800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant when Q=0 suf=64.4H :frecpe Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111011111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { Rd_VPR64.4H = NEON_frecpe(Rn_VPR64.4H, 2:1); } # C7.2.144 FRECPE page C7-2345 line 137191 MATCH x0ef9d800/mask=xbffffc00 # CONSTRUCT x4ef9d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frecpe/1@2 # AUNIT --inst x4ef9d800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant when Q=1 suf=128.8H :frecpe Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111011111001110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { Rd_VPR128.8H = NEON_frecpe(Rn_VPR128.8H, 2:1); } # C7.2.145 FRECPS page C7-2348 line 137379 MATCH x5e20fc00/mask=xffa0fc00 # CONSTRUCT x5e60fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2 # AUNIT --inst x5e60fc00/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :frecps Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=3 & Rm_FPR64 & b_1115=0x1f & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_frecps(Rn_FPR64, Rm_FPR64); } # C7.2.145 FRECPS page C7-2348 line 137379 MATCH x5e20fc00/mask=xffa0fc00 # CONSTRUCT x5e20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2 # AUNIT --inst x5e20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :frecps Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_2122=1 & Rm_FPR32 & b_1115=0x1f & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_frecps(Rn_FPR32, Rm_FPR32); } # C7.2.145 FRECPS page C7-2348 line 137379 MATCH x0e20fc00/mask=xbfa0fc00 # CONSTRUCT x4e60fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2@8 # AUNIT --inst x4e60fc00/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" :frecps Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_21=1 & Rm_VPR128.2D & b_1115=0x1f & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_frecps(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.145 FRECPS page C7-2348 line 137379 MATCH x0e20fc00/mask=xbfa0fc00 # CONSTRUCT x0e20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2@4 # AUNIT --inst x0e20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :frecps Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR64.2S & b_1115=0x1f & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_frecps(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.145 FRECPS page C7-2348 line 137379 MATCH x0e20fc00/mask=xbfa0fc00 # CONSTRUCT x4e20fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2@4 # AUNIT --inst x4e20fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" :frecps Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_21=1 & Rm_VPR128.4S & b_1115=0x1f & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_frecps(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.145 FRECPS page C7-2348 line 137379 MATCH x5e403c00/mask=xffe0fc00 # CONSTRUCT x5e403c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2 # AUNIT --inst x5e403c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Scalar half precision variant :frecps Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_2131=0b01011110010 & b_1015=0b001111 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { Rd_FPR16 = NEON_frecps(Rn_FPR16, Rm_FPR16); } # C7.2.145 FRECPS page C7-2348 line 137379 MATCH x0e403c00/mask=xbfe0fc00 # CONSTRUCT x0e403c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2@2 # AUNIT --inst x0e403c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant when Q=0 suf=64.4H :frecps Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110010 & b_1015=0b001111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { Rd_VPR64.4H = NEON_frecps(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.145 FRECPS page C7-2348 line 137379 MATCH x0e403c00/mask=xbfe0fc00 # CONSTRUCT x4e403c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frecps/2@2 # AUNIT --inst x4e403c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant when Q=1 suf=128.8H :frecps Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110010 & b_1015=0b001111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { Rd_VPR128.8H = NEON_frecps(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.146 FRECPX page C7-2351 line 137576 MATCH x5ef9f800/mask=xfffffc00 # CONSTRUCT x5ef9f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frecpx/1 # AUNIT --inst x5ef9f800/mask=xfffffc00 --rand hfp --status noqemu # Half-precision variant :frecpx Rd_FPR16, Rn_FPR16 is b_1031=0b0101111011111001111110 & Rd_FPR16 & Rn_FPR16 & Zd { Rd_FPR16 = NEON_frecpx(Rn_FPR16); } # C7.2.146 FRECPX page C7-2351 line 137576 MATCH x5ea1f800/mask=xffbffc00 # CONSTRUCT x5ea1f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frecpx/1 # AUNIT --inst x5ea1f800/mask=xfffffc00 --rand sfp --status nopcodeop # Single-precision and double-precision variant when sz=0 suf=32 :frecpx Rd_FPR32, Rn_FPR32 is b_2331=0b010111101 & b_22=0 & b_1021=0b100001111110 & Rd_FPR32 & Rn_FPR32 & Zd { Rd_FPR32 = NEON_frecpx(Rn_FPR32); } # C7.2.146 FRECPX page C7-2351 line 137576 MATCH x5ea1f800/mask=xffbffc00 # CONSTRUCT x5ee1f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frecpx/1 # AUNIT --inst x5ee1f800/mask=xfffffc00 --rand dfp --status nopcodeop # Single-precision and double-precision variant when sz=1 suf=64 :frecpx Rd_FPR64, Rn_FPR64 is b_2331=0b010111101 & b_22=1 & b_1021=0b100001111110 & Rd_FPR64 & Rn_FPR64 & Zd { Rd_FPR64 = NEON_frecpx(Rn_FPR64); } # C7.2.140 FRINTA (vector) page C7-1313 line 76386 KEEPWITH frint_vmode: "a" is b_29=1 & b_23=0 & b_12=0 { } frint_vmode: "i" is b_29=1 & b_23=1 & b_12=1 { } frint_vmode: "m" is b_29=0 & b_23=0 & b_12=1 { } frint_vmode: "n" is b_29=0 & b_23=0 & b_12=0 { } frint_vmode: "p" is b_29=0 & b_23=1 & b_12=0 { } frint_vmode: "x" is b_29=1 & b_23=0 & b_12=1 { } frint_vmode: "z" is b_29=0 & b_23=1 & b_12=1 { } # C7.2.155 FRINTA (vector) page C7-2369 line 138408 MATCH x2e798800/mask=xbffffc00 # C7.2.157 FRINTI (vector) page C7-2373 line 138642 MATCH x2ef99800/mask=xbffffc00 # C7.2.159 FRINTM (vector) page C7-2377 line 138880 MATCH x0e799800/mask=xbffffc00 # C7.2.161 FRINTN (vector) page C7-2381 line 139118 MATCH x0e798800/mask=xbffffc00 # C7.2.163 FRINTP (vector) page C7-2385 line 139356 MATCH x0ef98800/mask=xbffffc00 # C7.2.165 FRINTX (vector) page C7-2389 line 139594 MATCH x2e799800/mask=xbffffc00 # C7.2.167 FRINTZ (vector) page C7-2393 line 139833 MATCH x0ef99800/mask=xbffffc00 # CONSTRUCT x0e798800/mask=xdf7fec00 MATCHED 7 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1@2 # AUNIT --inst x0e798800/mask=xdf7fec00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant when Q=0 suf=64.4H :frint^frint_vmode Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_29 & b_2428=0b01110 & b_23 & b_1322=0b1111001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR64.4H & Rn_VPR64.4H & Zd { # simd unary Rd_VPR64.4H = trunc(Rn_VPR64.4H) on lane size 2 Rd_VPR64.4H[0,16] = trunc(Rn_VPR64.4H[0,16]); Rd_VPR64.4H[16,16] = trunc(Rn_VPR64.4H[16,16]); Rd_VPR64.4H[32,16] = trunc(Rn_VPR64.4H[32,16]); Rd_VPR64.4H[48,16] = trunc(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.155 FRINTA (vector) page C7-2369 line 138408 MATCH x2e798800/mask=xbffffc00 # C7.2.157 FRINTI (vector) page C7-2373 line 138642 MATCH x2ef99800/mask=xbffffc00 # C7.2.159 FRINTM (vector) page C7-2377 line 138880 MATCH x0e799800/mask=xbffffc00 # C7.2.161 FRINTN (vector) page C7-2381 line 139118 MATCH x0e798800/mask=xbffffc00 # C7.2.163 FRINTP (vector) page C7-2385 line 139356 MATCH x0ef98800/mask=xbffffc00 # C7.2.165 FRINTX (vector) page C7-2389 line 139594 MATCH x2e799800/mask=xbffffc00 # C7.2.167 FRINTZ (vector) page C7-2393 line 139833 MATCH x0ef99800/mask=xbffffc00 # CONSTRUCT x4e798800/mask=xdf7fec00 MATCHED 7 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1@2 # AUNIT --inst x4e798800/mask=xdf7fec00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant when Q=1 suf=128.8H :frint^frint_vmode Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_29 & b_2428=0b01110 & b_23 & b_1322=0b1111001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR128.8H & Rn_VPR128.8H & Zd { # simd unary Rd_VPR128.8H = trunc(Rn_VPR128.8H) on lane size 2 Rd_VPR128.8H[0,16] = trunc(Rn_VPR128.8H[0,16]); Rd_VPR128.8H[16,16] = trunc(Rn_VPR128.8H[16,16]); Rd_VPR128.8H[32,16] = trunc(Rn_VPR128.8H[32,16]); Rd_VPR128.8H[48,16] = trunc(Rn_VPR128.8H[48,16]); Rd_VPR128.8H[64,16] = trunc(Rn_VPR128.8H[64,16]); Rd_VPR128.8H[80,16] = trunc(Rn_VPR128.8H[80,16]); Rd_VPR128.8H[96,16] = trunc(Rn_VPR128.8H[96,16]); Rd_VPR128.8H[112,16] = trunc(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.155 FRINTA (vector) page C7-2369 line 138408 MATCH x2e218800/mask=xbfbffc00 # C7.2.157 FRINTI (vector) page C7-2373 line 138642 MATCH x2ea19800/mask=xbfbffc00 # C7.2.159 FRINTM (vector) page C7-2377 line 138880 MATCH x0e219800/mask=xbfbffc00 # C7.2.161 FRINTN (vector) page C7-2381 line 139118 MATCH x0e218800/mask=xbfbffc00 # C7.2.163 FRINTP (vector) page C7-2385 line 139356 MATCH x0ea18800/mask=xbfbffc00 # C7.2.165 FRINTX (vector) page C7-2389 line 139594 MATCH x2e219800/mask=xbfbffc00 # C7.2.167 FRINTZ (vector) page C7-2393 line 139833 MATCH x0ea19800/mask=xbfbffc00 # CONSTRUCT x0e218800/mask=xdf7fec00 MATCHED 7 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1@4 # AUNIT --inst x0e218800/mask=xdf7fec00 --rand sfp --status fail --comment "nofpround" # Single-precision and double-precision variant when sz=0 Q=0 suf=64.2S :frint^frint_vmode Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_29 & b_2428=0b01110 & b_23 & b_22=0b0 & b_1321=0b100001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR64.2S & Rn_VPR64.2S & Zd { # simd unary Rd_VPR64.2S = trunc(Rn_VPR64.2S) on lane size 4 Rd_VPR64.2S[0,32] = trunc(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[32,32] = trunc(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.155 FRINTA (vector) page C7-2369 line 138408 MATCH x2e218800/mask=xbfbffc00 # C7.2.157 FRINTI (vector) page C7-2373 line 138642 MATCH x2ea19800/mask=xbfbffc00 # C7.2.159 FRINTM (vector) page C7-2377 line 138880 MATCH x0e219800/mask=xbfbffc00 # C7.2.161 FRINTN (vector) page C7-2381 line 139118 MATCH x0e218800/mask=xbfbffc00 # C7.2.163 FRINTP (vector) page C7-2385 line 139356 MATCH x0ea18800/mask=xbfbffc00 # C7.2.165 FRINTX (vector) page C7-2389 line 139594 MATCH x2e219800/mask=xbfbffc00 # C7.2.167 FRINTZ (vector) page C7-2393 line 139833 MATCH x0ea19800/mask=xbfbffc00 # CONSTRUCT x4e218800/mask=xdf7fec00 MATCHED 7 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1@4 # AUNIT --inst x4e218800/mask=xdf7fec00 --rand sfp --status fail --comment "nofpround" # Single-precision and double-precision variant when sz=0 Q=1 suf=128.4S :frint^frint_vmode Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_29 & b_2428=0b01110 & b_23 & b_22=0b0 & b_1321=0b100001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR128.4S & Rn_VPR128.4S & Zd { # simd unary Rd_VPR128.4S = trunc(Rn_VPR128.4S) on lane size 4 Rd_VPR128.4S[0,32] = trunc(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[32,32] = trunc(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[64,32] = trunc(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[96,32] = trunc(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.155 FRINTA (vector) page C7-2369 line 138408 MATCH x2e218800/mask=xbfbffc00 # C7.2.157 FRINTI (vector) page C7-2373 line 138642 MATCH x2ea19800/mask=xbfbffc00 # C7.2.159 FRINTM (vector) page C7-2377 line 138880 MATCH x0e219800/mask=xbfbffc00 # C7.2.161 FRINTN (vector) page C7-2381 line 139118 MATCH x0e218800/mask=xbfbffc00 # C7.2.163 FRINTP (vector) page C7-2385 line 139356 MATCH x0ea18800/mask=xbfbffc00 # C7.2.165 FRINTX (vector) page C7-2389 line 139594 MATCH x2e219800/mask=xbfbffc00 # C7.2.167 FRINTZ (vector) page C7-2393 line 139833 MATCH x0ea19800/mask=xbfbffc00 # CONSTRUCT x4e618800/mask=xdf7fec00 MATCHED 7 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$trunc@8 # SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1@8 # AUNIT --inst x4e618800/mask=xdf7fec00 --rand dfp --status fail --comment "nofpround" # Single-precision and double-precision variant when sz=1 Q=1 suf=128.2D :frint^frint_vmode Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_29 & b_2428=0b01110 & b_23 & b_22=0b1 & b_1321=0b100001100 & b_12 & b_1011=0b10 & frint_vmode & Rd_VPR128.2D & Rn_VPR128.2D & Zd { # simd unary Rd_VPR128.2D = trunc(Rn_VPR128.2D) on lane size 8 Rd_VPR128.2D[0,64] = trunc(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[64,64] = trunc(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.141 FRINTA (scalar) page C7-1315 line 76515 KEEPWITH # FP rounding instruction (not implemented) frint_smode: "a" is b_1517=0b100 { } frint_smode: "i" is b_1517=0b111 { } frint_smode: "m" is b_1517=0b010 { } frint_smode: "n" is b_1517=0b000 { } frint_smode: "p" is b_1517=0b001 { } frint_smode: "x" is b_1517=0b110 { } frint_smode: "z" is b_1517=0b011 { } # C7.2.156 FRINTA (scalar) page C7-2371 line 138539 MATCH x1e264000/mask=xff3ffc00 # C7.2.158 FRINTI (scalar) page C7-2375 line 138773 MATCH x1e27c000/mask=xff3ffc00 # C7.2.160 FRINTM (scalar) page C7-2379 line 139011 MATCH x1e254000/mask=xff3ffc00 # C7.2.162 FRINTN (scalar) page C7-2383 line 139249 MATCH x1e244000/mask=xff3ffc00 # C7.2.164 FRINTP (scalar) page C7-2387 line 139487 MATCH x1e24c000/mask=xff3ffc00 # C7.2.166 FRINTX (scalar) page C7-2391 line 139726 MATCH x1e274000/mask=xff3ffc00 # C7.2.168 FRINTZ (scalar) page C7-2395 line 139964 MATCH x1e25c000/mask=xff3ffc00 # CONSTRUCT x1ee44000/mask=xfffc7c00 MATCHED 7 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1 # AUNIT --inst x1ee44000/mask=xfffc7c00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant when type = 11 suf=16 :frint^frint_smode Rd_FPR16, Rn_FPR16 is b_2431=0b00011110 & b_2223=0b11 & b_1821=0b1001 & b_1517 & b_1014=0b10000 & frint_smode & Rd_FPR16 & Rn_FPR16 & Zd { Rd_FPR16 = trunc(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.156 FRINTA (scalar) page C7-2371 line 138539 MATCH x1e264000/mask=xff3ffc00 # C7.2.158 FRINTI (scalar) page C7-2375 line 138773 MATCH x1e27c000/mask=xff3ffc00 # C7.2.160 FRINTM (scalar) page C7-2379 line 139011 MATCH x1e254000/mask=xff3ffc00 # C7.2.162 FRINTN (scalar) page C7-2383 line 139249 MATCH x1e244000/mask=xff3ffc00 # C7.2.164 FRINTP (scalar) page C7-2387 line 139487 MATCH x1e24c000/mask=xff3ffc00 # C7.2.166 FRINTX (scalar) page C7-2391 line 139726 MATCH x1e274000/mask=xff3ffc00 # C7.2.168 FRINTZ (scalar) page C7-2395 line 139964 MATCH x1e25c000/mask=xff3ffc00 # CONSTRUCT x1e244000/mask=xfffc7c00 MATCHED 7 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1 # AUNIT --inst x1e244000/mask=xfffc7c00 --rand sfp --status fail --comment "nofpround" # Single-precision variant when type = 00 suf=32 :frint^frint_smode Rd_FPR32, Rn_FPR32 is b_2431=0b00011110 & b_2223=0b00 & b_1821=0b1001 & b_1517 & b_1014=0b10000 & frint_smode & Rd_FPR32 & Rn_FPR32 & Zd { Rd_FPR32 = trunc(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.156 FRINTA (scalar) page C7-2371 line 138539 MATCH x1e264000/mask=xff3ffc00 # C7.2.158 FRINTI (scalar) page C7-2375 line 138773 MATCH x1e27c000/mask=xff3ffc00 # C7.2.160 FRINTM (scalar) page C7-2379 line 139011 MATCH x1e254000/mask=xff3ffc00 # C7.2.162 FRINTN (scalar) page C7-2383 line 139249 MATCH x1e244000/mask=xff3ffc00 # C7.2.164 FRINTP (scalar) page C7-2387 line 139487 MATCH x1e24c000/mask=xff3ffc00 # C7.2.166 FRINTX (scalar) page C7-2391 line 139726 MATCH x1e274000/mask=xff3ffc00 # C7.2.168 FRINTZ (scalar) page C7-2395 line 139964 MATCH x1e25c000/mask=xff3ffc00 # CONSTRUCT x1e644000/mask=xfffc7c00 MATCHED 7 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =trunc # SMACRO(pseudo) ARG1 ARG2 =NEON_frint_aimnpxz/1 # AUNIT --inst x1e644000/mask=xfffc7c00 --rand dfp --status fail --comment "nofpround" # Double-precision variant when type = 01 suf=64 :frint^frint_smode Rd_FPR64, Rn_FPR64 is b_2431=0b00011110 & b_2223=0b01 & b_1821=0b1001 & b_1517 & b_1014=0b10000 & frint_smode & Rd_FPR64 & Rn_FPR64 & Zd { Rd_FPR64 = trunc(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.169 FRSQRTE page C7-2397 line 140071 MATCH x7ef9d800/mask=xfffffc00 # CONSTRUCT x7ef9d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1 # AUNIT --inst x7ef9d800/mask=xfffffc00 --status noqemu --comment "nofpround" # Scalar half precision variant when Q=1 sz=1 ba=11 bb=111 V=FPR16 esize= :frsqrte Rd_FPR16, Rn_FPR16 is b_31=0 & b_30=1 & b_2329=0b1111101 & b_22=1 & b_1021=0b111001110110 & Rd_FPR16 & Rn_FPR16 & Zd { Rd_FPR16 = NEON_frsqrte(Rn_FPR16); } # C7.2.169 FRSQRTE page C7-2397 line 140071 MATCH x7ea1d800/mask=xffbffc00 # CONSTRUCT x7ea1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1 # AUNIT --inst x7ea1d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" # Scalar single-precision and double-precision variant when Q=1 sz=0 ba=11 bb=100 V=FPR32 esize= :frsqrte Rd_FPR32, Rn_FPR32 is b_31=0 & b_30=1 & b_2329=0b1111101 & b_22=0 & b_1021=0b100001110110 & Rd_FPR32 & Rn_FPR32 & Zd { Rd_FPR32 = NEON_frsqrte(Rn_FPR32); } # C7.2.169 FRSQRTE page C7-2397 line 140071 MATCH x7ea1d800/mask=xffbffc00 # CONSTRUCT x7ee1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1 # AUNIT --inst x7ee1d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" # Scalar single-precision and double-precision variant when Q=1 sz=1 ba=11 bb=100 V=FPR64 esize= :frsqrte Rd_FPR64, Rn_FPR64 is b_31=0 & b_30=1 & b_2329=0b1111101 & b_22=1 & b_1021=0b100001110110 & Rd_FPR64 & Rn_FPR64 & Zd { Rd_FPR64 = NEON_frsqrte(Rn_FPR64); } # C7.2.169 FRSQRTE page C7-2397 line 140071 MATCH x2ef9d800/mask=xbffffc00 # CONSTRUCT x2ef9d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1@2 # AUNIT --inst x2ef9d800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant when Q=0 sz=1 ba=10 bb=111 V=VPR64.4H esize=@2 :frsqrte Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { Rd_VPR64.4H = NEON_frsqrte(Rn_VPR64.4H, 2:1); } # C7.2.169 FRSQRTE page C7-2397 line 140071 MATCH x2ef9d800/mask=xbffffc00 # CONSTRUCT x6ef9d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1@2 # AUNIT --inst x6ef9d800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant when Q=1 sz=1 ba=10 bb=111 V=VPR128.8H esize=@2 :frsqrte Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { Rd_VPR128.8H = NEON_frsqrte(Rn_VPR128.8H, 2:1); } # C7.2.169 FRSQRTE page C7-2397 line 140071 MATCH x2ea1d800/mask=xbfbffc00 # CONSTRUCT x2ea1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1@4 # AUNIT --inst x2ea1d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" # Vector single-precision and double-precision variant when Q=0 sz=0 ba=10 bb=100 V=VPR64.2S esize=@4 :frsqrte Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { Rd_VPR64.2S = NEON_frsqrte(Rn_VPR64.2S, 4:1); } # C7.2.169 FRSQRTE page C7-2397 line 140071 MATCH x2ea1d800/mask=xbfbffc00 # CONSTRUCT x6ea1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1@4 # AUNIT --inst x6ea1d800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" # Vector single-precision and double-precision variant when Q=1 sz=0 ba=10 bb=100 V=VPR128.4S esize=@4 :frsqrte Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { Rd_VPR128.4S = NEON_frsqrte(Rn_VPR128.4S, 4:1); } # C7.2.169 FRSQRTE page C7-2397 line 140071 MATCH x2ea1d800/mask=xbfbffc00 # CONSTRUCT x6ee1d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrte/1@8 # AUNIT --inst x6ee1d800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" # Vector single-precision and double-precision variant when Q=1 sz=1 ba=10 bb=100 V=VPR128.2D esize=@8 :frsqrte Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b100001110110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { Rd_VPR128.2D = NEON_frsqrte(Rn_VPR128.2D, 8:1); } # C7.2.170 FRSQRTS page C7-2400 line 140259 MATCH x5ec03c00/mask=xffe0fc00 # CONSTRUCT x5ec03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_frsqrts/1 # AUNIT --inst x5ec03c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Scalar half precision variant when Q=1 sz=1 ba=01 bb=0 bc=00 V=FPR16 esize= :frsqrts Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_31=0 & b_30=1 & b_2329=0b0111101 & b_22=1 & b_21=0 & b_1015=0b001111 & Rd_FPR16 & Rn_FPR16 & Rm_FPR16 & Zd { Rd_FPR16 = NEON_frsqrts(Rn_FPR16); } # C7.2.170 FRSQRTS page C7-2400 line 140259 MATCH x5ea0fc00/mask=xffa0fc00 # CONSTRUCT x5ea0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2 # AUNIT --inst x5ea0fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" # Scalar single-precision and double-precision variant when Q=1 sz=0 ba=01 bb=1 bc=11 V=FPR32 esize= :frsqrts Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_31=0 & b_30=1 & b_2329=0b0111101 & b_22=0 & b_21=1 & b_1015=0b111111 & Rd_FPR32 & Rn_FPR32 & Rm_FPR32 & Zd { Rd_FPR32 = NEON_frsqrts(Rn_FPR32, Rm_FPR32); } # C7.2.170 FRSQRTS page C7-2400 line 140259 MATCH x5ea0fc00/mask=xffa0fc00 # CONSTRUCT x5ee0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2 # AUNIT --inst x5ee0fc00/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" # Scalar single-precision and double-precision variant when Q=1 sz=1 ba=01 bb=1 bc=11 V=FPR64 esize= :frsqrts Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_31=0 & b_30=1 & b_2329=0b0111101 & b_22=1 & b_21=1 & b_1015=0b111111 & Rd_FPR64 & Rn_FPR64 & Rm_FPR64 & Zd { Rd_FPR64 = NEON_frsqrts(Rn_FPR64, Rm_FPR64); } # C7.2.170 FRSQRTS page C7-2400 line 140259 MATCH x0ec03c00/mask=xbfe0fc00 # CONSTRUCT x0ec03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2@2 # AUNIT --inst x0ec03c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant when Q=0 sz=1 ba=00 bb=0 bc=00 V=VPR64.4H esize=@2 :frsqrts Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=1 & b_21=0 & b_1015=0b001111 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { Rd_VPR64.4H = NEON_frsqrts(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.170 FRSQRTS page C7-2400 line 140259 MATCH x0ec03c00/mask=xbfe0fc00 # CONSTRUCT x4ec03c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2@2 # AUNIT --inst x4ec03c00/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Vector half precision variant when Q=1 sz=1 ba=00 bb=0 bc=00 V=VPR128.8H esize=@2 :frsqrts Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=1 & b_21=0 & b_1015=0b001111 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { Rd_VPR128.8H = NEON_frsqrts(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.170 FRSQRTS page C7-2400 line 140259 MATCH x0ea0fc00/mask=xbfa0fc00 # CONSTRUCT x0ea0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2@4 # AUNIT --inst x0ea0fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" # Vector single-precision and double-precision variant when Q=0 sz=0 ba=00 bb=1 bc=11 V=VPR64.2S esize=@4 :frsqrts Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111111 & Rd_VPR64.2S & Rn_VPR64.2S & Rm_VPR64.2S & Zd { Rd_VPR64.2S = NEON_frsqrts(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.170 FRSQRTS page C7-2400 line 140259 MATCH x0ea0fc00/mask=xbfa0fc00 # CONSTRUCT x4ea0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2@4 # AUNIT --inst x4ea0fc00/mask=xffe0fc00 --rand sfp --status nopcodeop --comment "nofpround" # Vector single-precision and double-precision variant when Q=1 sz=0 ba=00 bb=1 bc=11 V=VPR128.4S esize=@4 :frsqrts Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=0 & b_21=1 & b_1015=0b111111 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { Rd_VPR128.4S = NEON_frsqrts(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.170 FRSQRTS page C7-2400 line 140259 MATCH x0ea0fc00/mask=xbfa0fc00 # CONSTRUCT x4ee0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_frsqrts/2@8 # AUNIT --inst x4ee0fc00/mask=xffe0fc00 --rand dfp --status nopcodeop --comment "nofpround" # Vector single-precision and double-precision variant when Q=1 sz=1 ba=00 bb=1 bc=11 V=VPR128.2D esize=@8 :frsqrts Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=1 & b_21=1 & b_1015=0b111111 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { Rd_VPR128.2D = NEON_frsqrts(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.171 FSQRT (vector) page C7-2403 line 140456 MATCH x2ef9f800/mask=xbffffc00 # CONSTRUCT x2ef9f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1@2 # AUNIT --inst x2ef9f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant when Q=0 sz=1 ba=111 esize=2 suf=VPR64.4H :fsqrt Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001111110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { # simd unary Rd_VPR64.4H = sqrt(Rn_VPR64.4H) on lane size 2 Rd_VPR64.4H[0,16] = sqrt(Rn_VPR64.4H[0,16]); Rd_VPR64.4H[16,16] = sqrt(Rn_VPR64.4H[16,16]); Rd_VPR64.4H[32,16] = sqrt(Rn_VPR64.4H[32,16]); Rd_VPR64.4H[48,16] = sqrt(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.171 FSQRT (vector) page C7-2403 line 140456 MATCH x2ef9f800/mask=xbffffc00 # CONSTRUCT x6ef9f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1@2 # AUNIT --inst x6ef9f800/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant when Q=1 sz=1 ba=111 esize=2 suf=VPR128.8H :fsqrt Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b111001111110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { # simd unary Rd_VPR128.8H = sqrt(Rn_VPR128.8H) on lane size 2 Rd_VPR128.8H[0,16] = sqrt(Rn_VPR128.8H[0,16]); Rd_VPR128.8H[16,16] = sqrt(Rn_VPR128.8H[16,16]); Rd_VPR128.8H[32,16] = sqrt(Rn_VPR128.8H[32,16]); Rd_VPR128.8H[48,16] = sqrt(Rn_VPR128.8H[48,16]); Rd_VPR128.8H[64,16] = sqrt(Rn_VPR128.8H[64,16]); Rd_VPR128.8H[80,16] = sqrt(Rn_VPR128.8H[80,16]); Rd_VPR128.8H[96,16] = sqrt(Rn_VPR128.8H[96,16]); Rd_VPR128.8H[112,16] = sqrt(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.171 FSQRT (vector) page C7-2403 line 140456 MATCH x2ea1f800/mask=xbfbffc00 # CONSTRUCT x2ea1f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1@4 # AUNIT --inst x2ea1f800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" # Single-precision and double-precision variant when Q=0 sz=0 ba=100 esize=4 suf=VPR64.2S :fsqrt Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001111110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { # simd unary Rd_VPR64.2S = sqrt(Rn_VPR64.2S) on lane size 4 Rd_VPR64.2S[0,32] = sqrt(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[32,32] = sqrt(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.171 FSQRT (vector) page C7-2403 line 140456 MATCH x2ea1f800/mask=xbfbffc00 # CONSTRUCT x6ea1f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1@4 # AUNIT --inst x6ea1f800/mask=xfffffc00 --rand sfp --status nopcodeop --comment "nofpround" # Single-precision and double-precision variant when Q=1 sz=0 ba=100 esize=4 suf=VPR128.4S :fsqrt Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001111110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { # simd unary Rd_VPR128.4S = sqrt(Rn_VPR128.4S) on lane size 4 Rd_VPR128.4S[0,32] = sqrt(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[32,32] = sqrt(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[64,32] = sqrt(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[96,32] = sqrt(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.171 FSQRT (vector) page C7-2403 line 140456 MATCH x2ea1f800/mask=xbfbffc00 # CONSTRUCT x6ee1f800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1@8 # AUNIT --inst x6ee1f800/mask=xfffffc00 --rand dfp --status nopcodeop --comment "nofpround" # Single-precision and double-precision variant when Q=1 sz=1 ba=100 esize=8 suf=VPR128.2D :fsqrt Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=1 & b_1021=0b100001111110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { # simd unary Rd_VPR128.2D = sqrt(Rn_VPR128.2D) on lane size 8 Rd_VPR128.2D[0,64] = sqrt(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[64,64] = sqrt(Rn_VPR128.2D[64,64]); } # C7.2.172 FSQRT (scalar) page C7-2405 line 140566 MATCH x1e21c000/mask=xff3ffc00 # CONSTRUCT x1ee1c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =sqrt/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1 # AUNIT --inst x1ee1c000/mask=xfffffc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant :fsqrt Rd_FPR16, Rn_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & fpDpOpcode=0x3 & b_1014=0x10 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = sqrt(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.172 FSQRT (scalar) page C7-2405 line 140566 MATCH x1e21c000/mask=xff3ffc00 # CONSTRUCT x1e21c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =sqrt/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1 # AUNIT --inst x1e21c000/mask=xfffffc00 --rand sfp --status fail --comment "nofpround" # Single-precision variant :fsqrt Rd_FPR32, Rn_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & fpDpOpcode=0x3 & b_1014=0x10 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = sqrt(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.172 FSQRT (scalar) page C7-2405 line 140566 MATCH x1e21c000/mask=xff3ffc00 # CONSTRUCT x1e61c000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =sqrt/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_fsqrt/1 # AUNIT --inst x1e61c000/mask=xfffffc00 --rand dfp --status fail --comment "nofpround" # Double-precision variant :fsqrt Rd_FPR64, Rn_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & fpDpOpcode=0x3 & b_1014=0x10 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = sqrt(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.173 FSUB (vector) page C7-2407 line 140666 MATCH x0ea0d400/mask=xbfa0fc00 # CONSTRUCT x4ee0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2@8 # AUNIT --inst x4ee0d400/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" :fsub Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_21=1 & Rm_VPR128.2D & b_1115=0x1a & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd infix Rd_VPR128.2D = Rn_VPR128.2D f- Rm_VPR128.2D on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] f- Rm_VPR128.2D[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] f- Rm_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.173 FSUB (vector) page C7-2407 line 140666 MATCH x0ea0d400/mask=xbfa0fc00 # CONSTRUCT x0ea0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2@4 # AUNIT --inst x0ea0d400/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" :fsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR64.2S & b_1115=0x1a & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix Rd_VPR64.2S = Rn_VPR64.2S f- Rm_VPR64.2S on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] f- Rm_VPR64.2S[0,32]; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] f- Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.173 FSUB (vector) page C7-2407 line 140666 MATCH x0ea0d400/mask=xbfa0fc00 # CONSTRUCT x4ea0d400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2@4 # AUNIT --inst x4ea0d400/mask=xffe0fc00 --rand sfp --status fail --comment "nofpround" :fsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_21=1 & Rm_VPR128.4S & b_1115=0x1a & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix Rd_VPR128.4S = Rn_VPR128.4S f- Rm_VPR128.4S on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] f- Rm_VPR128.4S[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] f- Rm_VPR128.4S[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] f- Rm_VPR128.4S[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] f- Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.173 FSUB (vector) page C7-2407 line 140666 MATCH x0ec01400/mask=xbfe0fc00 # CONSTRUCT x0ec01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2@2 # AUNIT --inst x0ec01400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant when Q=0 suf=VPR64.4H :fsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2129=0b001110110 & b_1015=0b000101 & Rd_VPR64.4H & Rn_VPR64.4H & Rm_VPR64.4H & Zd { # simd infix Rd_VPR64.4H = Rn_VPR64.4H f- Rm_VPR64.4H on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] f- Rm_VPR64.4H[0,16]; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] f- Rm_VPR64.4H[16,16]; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] f- Rm_VPR64.4H[32,16]; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] f- Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.173 FSUB (vector) page C7-2407 line 140666 MATCH x0ec01400/mask=xbfe0fc00 # CONSTRUCT x4ec01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$f-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2@2 # AUNIT --inst x4ec01400/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" # Half-precision variant when Q=1 suf=VPR128.8H :fsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2129=0b001110110 & b_1015=0b000101 & Rd_VPR128.8H & Rn_VPR128.8H & Rm_VPR128.8H & Zd { # simd infix Rd_VPR128.8H = Rn_VPR128.8H f- Rm_VPR128.8H on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] f- Rm_VPR128.8H[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] f- Rm_VPR128.8H[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] f- Rm_VPR128.8H[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] f- Rm_VPR128.8H[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] f- Rm_VPR128.8H[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] f- Rm_VPR128.8H[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] f- Rm_VPR128.8H[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] f- Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.174 FSUB (scalar) page C7-2409 line 140785 MATCH x1e203800/mask=xff20fc00 # CONSTRUCT x1e603800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f- # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2 # AUNIT --inst x1e603800/mask=xffe0fc00 --rand dfp --status pass --comment "nofpround" :fsub Rd_FPR64, Rn_FPR64, Rm_FPR64 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & Rm_FPR64 & b_1215=0x3 & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = Rn_FPR64 f- Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.174 FSUB (scalar) page C7-2409 line 140785 MATCH x1e203800/mask=xff20fc00 # CONSTRUCT x1e203800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f- # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2 # AUNIT --inst x1e203800/mask=xffe0fc00 --rand sfp --status pass --comment "nofpround" :fsub Rd_FPR32, Rn_FPR32, Rm_FPR32 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & Rm_FPR32 & b_1215=0x3 & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = Rn_FPR32 f- Rm_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.174 FSUB (scalar) page C7-2409 line 140785 MATCH x1e203800/mask=xff20fc00 # CONSTRUCT x1ee03800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =f- # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_fsub/2 # AUNIT --inst x1ee03800/mask=xffe0fc00 --rand hfp --status noqemu --comment "nofpround" :fsub Rd_FPR16, Rn_FPR16, Rm_FPR16 is m=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & Rm_FPR16 & b_1215=0x3 & b_1011=2 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = Rn_FPR16 f- Rm_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.189 LDNP (SIMD&FP) page C7-2456 line 143778 MATCH x2c400000/mask=x3fc00000 # CONSTRUCT x2c400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 =load ext ARG2 ARG3 4 +:8 =load ext # SMACRO(pseudo) ARG1 ARG3 =NEON_ldnp1/1 ARG2 ARG3 =NEON_ldnp2/1 # AUNIT --inst x2c400000/mask=xffc00000 --status nomem :ldnp Rt_FPR32, Rt2_FPR32, addrPairIndexed is b_3031=0b00 & b_2229=0b10110001 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 & Zt & Zt2 { Rt_FPR32 = * addrPairIndexed; zext_zs(Zt); # zero upper 28 bytes of Zt local tmp1:8 = addrPairIndexed + 4; Rt2_FPR32 = * tmp1; zext_zs(Zt2); # zero upper 28 bytes of Zt2 } # C7.2.189 LDNP (SIMD&FP) page C7-2456 line 143778 MATCH x2c400000/mask=x3fc00000 # CONSTRUCT x6c400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 =load ext ARG2 ARG3 8 +:8 =load ext # SMACRO(pseudo) ARG1 ARG3 =NEON_ldnp1/1 ARG2 ARG3 =NEON_ldnp2/1 # AUNIT --inst x6c400000/mask=xffc00000 --status nomem :ldnp Rt_FPR64, Rt2_FPR64, addrPairIndexed is b_3031=0b01 & b_2229=0b10110001 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 & Zt & Zt2 { Rt_FPR64 = * addrPairIndexed; zext_zd(Zt); # zero upper 24 bytes of Zt local tmp1:8 = addrPairIndexed + 8; Rt2_FPR64 = * tmp1; zext_zd(Zt2); # zero upper 24 bytes of Zt2 } # C7.2.189 LDNP (SIMD&FP) page C7-2456 line 143778 MATCH x2c400000/mask=x3fc00000 # CONSTRUCT xac400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 =load ext ARG2 ARG3 16 +:8 =load ext # SMACRO(pseudo) ARG1 ARG3 =NEON_ldnp1/1 ARG2 ARG3 =NEON_ldnp2/1 # AUNIT --inst xac400000/mask=xffc00000 --status nomem :ldnp Rt_FPR128, Rt2_FPR128, addrPairIndexed is b_3031=0b10 & b_2229=0b10110001 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 & Zt & Zt2 { Rt_FPR128 = * addrPairIndexed; zext_zq(Zt); # zero upper 16 bytes of Zt local tmp1:8 = addrPairIndexed + 16; Rt2_FPR128 = * tmp1; zext_zq(Zt2); # zero upper 16 bytes of Zt2 } # C7.2.190 LDP (SIMD&FP) page C7-2458 line 143922 MATCH x2cc00000/mask=x3fc00000 # C7.2.190 LDP (SIMD&FP) page C7-2458 line 143922 MATCH x2dc00000/mask=x3fc00000 # C7.2.190 LDP (SIMD&FP) page C7-2458 line 143922 MATCH x2d400000/mask=x3fc00000 # C7.2.189 LDNP (SIMD&FP) page C7-2456 line 143778 MATCH x2c400000/mask=x3fc00000 # CONSTRUCT xac400000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 =load ext ARG2 ARG3 16 +:8 =load ext # SMACRO(pseudo) ARG1 ARG3 =NEON_ldp1/1 ARG2 ARG3 =NEON_ldp2/1 # AUNIT --inst xac400000/mask=xfe400000 --status nomem :ldp Rt_FPR128, Rt2_FPR128, addrPairIndexed is b_3031=0b10 & b_2529=0b10110 & b_22=0b1 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 & Zt & Zt2 { Rt_FPR128 = * addrPairIndexed; zext_zq(Zt); # zero upper 16 bytes of Zt local tmp1:8 = addrPairIndexed + 16; Rt2_FPR128 = * tmp1; zext_zq(Zt2); # zero upper 16 bytes of Zt2 } # C7.2.190 LDP (SIMD&FP) page C7-2458 line 143922 MATCH x2cc00000/mask=x3fc00000 # C7.2.190 LDP (SIMD&FP) page C7-2458 line 143922 MATCH x2dc00000/mask=x3fc00000 # C7.2.190 LDP (SIMD&FP) page C7-2458 line 143922 MATCH x2d400000/mask=x3fc00000 # C7.2.189 LDNP (SIMD&FP) page C7-2456 line 143778 MATCH x2c400000/mask=x3fc00000 # CONSTRUCT x2c400000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 =load ext ARG2 ARG3 4 +:8 =load ext # SMACRO(pseudo) ARG1 ARG3 =NEON_ldp1/1 ARG2 ARG3 =NEON_ldp2/1 # AUNIT --inst x2c400000/mask=xfe400000 --status nomem :ldp Rt_FPR32, Rt2_FPR32, addrPairIndexed is b_3031=0b00 & b_2529=0b10110 & b_22=0b1 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 & Zt & Zt2 { Rt_FPR32 = * addrPairIndexed; zext_zs(Zt); # zero upper 28 bytes of Zt local tmp1:8 = addrPairIndexed + 4; Rt2_FPR32 = * tmp1; zext_zs(Zt2); # zero upper 28 bytes of Zt2 } # C7.2.190 LDP (SIMD&FP) page C7-2458 line 143922 MATCH x2cc00000/mask=x3fc00000 # C7.2.190 LDP (SIMD&FP) page C7-2458 line 143922 MATCH x2dc00000/mask=x3fc00000 # C7.2.190 LDP (SIMD&FP) page C7-2458 line 143922 MATCH x2d400000/mask=x3fc00000 # C7.2.189 LDNP (SIMD&FP) page C7-2456 line 143778 MATCH x2c400000/mask=x3fc00000 # CONSTRUCT x6c400000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 =load ext ARG2 ARG3 8 +:8 =load ext # SMACRO(pseudo) ARG1 ARG3 =NEON_ldp1/1 ARG2 ARG3 =NEON_ldp2/1 # AUNIT --inst x6c400000/mask=xfe400000 --status nomem :ldp Rt_FPR64, Rt2_FPR64, addrPairIndexed is b_3031=0b01 & b_2529=0b10110 & b_22=0b1 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 & Zt & Zt2 { Rt_FPR64 = * addrPairIndexed; zext_zd(Zt); # zero upper 24 bytes of Zt local tmp1:8 = addrPairIndexed + 8; Rt2_FPR64 = * tmp1; zext_zd(Zt2); # zero upper 24 bytes of Zt2 } # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3c400400/mask=x3f600c00 # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3c400c00/mask=x3f600c00 # CONSTRUCT x3c400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst x3c400400/mask=xffe00400 --status nomem # Post- and Pre-index 8-bit variant when size==00 && opc==01 F=FPR8 :ldr Rt_FPR8, addrIndexed is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR8 & addrIndexed & Zt { Rt_FPR8 = * addrIndexed; zext_zb(Zt); # zero upper 31 bytes of Zt } # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3c400400/mask=x3f600c00 # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3c400c00/mask=x3f600c00 # CONSTRUCT x7c400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst x7c400400/mask=xffe00400 --status nomem # Post- and Pre-index 16-bit variant when size==01 && opc==01 F=FPR16 :ldr Rt_FPR16, addrIndexed is b_3031=0b01 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR16 & addrIndexed & Zt { Rt_FPR16 = * addrIndexed; zext_zh(Zt); # zero upper 30 bytes of Zt } # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3c400400/mask=x3f600c00 # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3c400c00/mask=x3f600c00 # CONSTRUCT xbc400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst xbc400400/mask=xffe00400 --status nomem # Post- and Pre-index 32-bit variant when size==10 && opc==01 F=FPR32 :ldr Rt_FPR32, addrIndexed is b_3031=0b10 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR32 & addrIndexed & Zt { Rt_FPR32 = * addrIndexed; zext_zs(Zt); # zero upper 28 bytes of Zt } # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3c400400/mask=x3f600c00 # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3c400c00/mask=x3f600c00 # CONSTRUCT xfc400400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst xfc400400/mask=xffe00400 --status nomem # Post- and Pre-index 64-bit variant when size==11 && opc==01 F=FPR64 :ldr Rt_FPR64, addrIndexed is b_3031=0b11 & b_2429=0b111100 & b_2223=0b01 & b_21=0 & b_10=1 & Rt_FPR64 & addrIndexed & Zt { Rt_FPR64 = * addrIndexed; zext_zd(Zt); # zero upper 24 bytes of Zt } # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3c400400/mask=x3f600c00 # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3c400c00/mask=x3f600c00 # CONSTRUCT x3cc00400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst x3cc00400/mask=xffe00400 --status nomem # Post- and Pre-index 128-bit variant when size==00 && opc==11 F=FPR128 :ldr Rt_FPR128, addrIndexed is b_3031=0b00 & b_2429=0b111100 & b_2223=0b11 & b_21=0 & b_10=1 & Rt_FPR128 & addrIndexed & Zt { Rt_FPR128 = * addrIndexed; zext_zq(Zt); # zero upper 16 bytes of Zt } # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3d400000/mask=x3f400000 # CONSTRUCT x3d400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst x3d400000/mask=xffc00000 --status nomem # Unsigned offset 8-bit variant when size == 00 && opc == 01 F=FPR8 :ldr Rt_FPR8, addrUIMM is b_3031=0b00 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR8 & addrUIMM & Zt { Rt_FPR8 = * addrUIMM; zext_zb(Zt); # zero upper 31 bytes of Zt } # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3d400000/mask=x3f400000 # CONSTRUCT x7d400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst x7d400000/mask=xffc00000 --status nomem # Unsigned offset 16-bit variant when size == 01 && opc == 01 F=FPR16 :ldr Rt_FPR16, addrUIMM is b_3031=0b01 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR16 & addrUIMM & Zt { Rt_FPR16 = * addrUIMM; zext_zh(Zt); # zero upper 30 bytes of Zt } # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3d400000/mask=x3f400000 # CONSTRUCT xbd400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst xbd400000/mask=xffc00000 --status nomem # Unsigned offset 32-bit variant when size == 10 && opc == 01 F=FPR32 :ldr Rt_FPR32, addrUIMM is b_3031=0b10 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR32 & addrUIMM & Zt { Rt_FPR32 = * addrUIMM; zext_zs(Zt); # zero upper 28 bytes of Zt } # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3d400000/mask=x3f400000 # CONSTRUCT xfd400000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst xfd400000/mask=xffc00000 --status nomem # Unsigned offset 64-bit variant when size == 11 && opc == 01 F=FPR64 :ldr Rt_FPR64, addrUIMM is b_3031=0b11 & b_2429=0b111101 & b_2223=0b01 & Rt_FPR64 & addrUIMM & Zt { Rt_FPR64 = * addrUIMM; zext_zd(Zt); # zero upper 24 bytes of Zt } # C7.2.191 LDR (immediate, SIMD&FP) page C7-2462 line 144163 MATCH x3d400000/mask=x3f400000 # CONSTRUCT x3dc00000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst x3dc00000/mask=xffc00000 --status nomem # Unsigned offset 128-bit variant when size == 00 && opc == 11 F=FPR128 :ldr Rt_FPR128, addrUIMM is b_3031=0b00 & b_2429=0b111101 & b_2223=0b11 & Rt_FPR128 & addrUIMM & Zt { Rt_FPR128 = * addrUIMM; zext_zq(Zt); # zero upper 16 bytes of Zt } # C7.2.192 LDR (literal, SIMD&FP) page C7-2466 line 144427 MATCH x1c000000/mask=x3f000000 # CONSTRUCT x5c000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load:8 # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst x5c000000/mask=xff000000 --status nomem :ldr Rt_FPR64, AddrLoc19 is size.ldstr=1 & b_2729=3 & v=1 & b_2425=0 & AddrLoc19 & Rt_FPR64 & Zt { Rt_FPR64 = *:8 AddrLoc19; zext_zd(Zt); # zero upper 24 bytes of Zt } # C7.2.192 LDR (literal, SIMD&FP) page C7-2466 line 144427 MATCH x1c000000/mask=x3f000000 # CONSTRUCT x9c000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load:16 # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst x9c000000/mask=xff000000 --status nomem :ldr Rt_FPR128, AddrLoc19 is size.ldstr=2 & b_2729=3 & v=1 & b_2425=0 & AddrLoc19 & Rt_FPR128 & Zt { Rt_FPR128 = *:16 AddrLoc19; zext_zq(Zt); # zero upper 16 bytes of Zt } # C7.2.192 LDR (literal, SIMD&FP) page C7-2466 line 144427 MATCH x1c000000/mask=x3f000000 # CONSTRUCT x1c000000/mask=xff000000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load:4 # SMACRO(pseudo) ARG1 ARG2 =NEON_ldr/1 # AUNIT --inst x1c000000/mask=xff000000 --status nomem :ldr Rt_FPR32, AddrLoc19 is size.ldstr=0 & b_2729=3 & v=1 & b_2425=0 & AddrLoc19 & Rt_FPR32 & Zt { Rt_FPR32 = *:4 AddrLoc19; zext_zs(Zt); # zero upper 28 bytes of Zt } # C7.2.178 LDR (register, SIMD&FP) page C7-1411 line 82199 KEEPWITH extend_amount: "" is b_3031=0b00 & b_23=0 & b_12=0 { export 0:1; } extend_amount: " #0" is b_3031=0b00 & b_23=0 & b_12=1 { export 0:1; } extend_amount: "" is b_3031=0b01 & b_23=0 & b_12=0 { export 0:1; } extend_amount: " #1" is b_3031=0b01 & b_23=0 & b_12=1 { export 1:1; } extend_amount: "" is b_3031=0b10 & b_23=0 & b_12=0 { export 0:1; } extend_amount: " #2" is b_3031=0b10 & b_23=0 & b_12=1 { export 2:1; } extend_amount: "" is b_3031=0b11 & b_23=0 & b_12=0 { export 0:1; } extend_amount: " #3" is b_3031=0b11 & b_23=0 & b_12=1 { export 3:1; } extend_amount: "" is b_3031=0b00 & b_23=1 & b_12=0 { export 0:1; } extend_amount: " #4" is b_3031=0b00 & b_23=1 & b_12=1 { export 4:1; } extend_spec: ", uxtw" is b_1315=0b010 & Rm_GPR32 { local tmp:8 = zext(Rm_GPR32); export tmp; } extend_spec: ", sxtw" is b_1315=0b110 & Rm_GPR32 { local tmp:8 = sext(Rm_GPR32); export tmp; } extend_spec: ", sxtx" is b_1315=0b111 & Rm_GPR64 { export Rm_GPR64; } extend_spec: ", lsl" is b_1315=0b011 & b_12=1 & Rm_GPR64 { export Rm_GPR64; } # same as uxtx extend_spec: "" is b_1315=0b011 & b_12=0 & Rm_GPR64 { export Rm_GPR64; } # same as uxtx # C7.2.193 LDR (register, SIMD&FP) page C7-2468 line 144528 MATCH x3c600800/mask=x3f600c00 # CONSTRUCT x3c600800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load # SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 # AUNIT --inst x3c600800/mask=xffe02c00 --status nomem # 8-fsreg,LDR-8-fsreg variant when size == 00 && opc == 01 && option is not 011 bb=b_13 option=0 F=FPR8 G=GPR32 :ldr Rt_FPR8, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR8 = * tmp2; zext_zb(Zt); # zero upper 31 bytes of Zt } # C7.2.193 LDR (register, SIMD&FP) page C7-2468 line 144528 MATCH x3c600800/mask=x3f600c00 # CONSTRUCT x3c602800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load # SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 # AUNIT --inst x3c602800/mask=xffe02c00 --status nomem # 8-fsreg,LDR-8-fsreg variant when size == 00 && opc == 01 && option is not 011 bb=b_13 option=1 F=FPR8 G=GPR64 :ldr Rt_FPR8, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR8 = * tmp2; zext_zb(Zt); # zero upper 31 bytes of Zt } # C7.2.193 LDR (register, SIMD&FP) page C7-2468 line 144528 MATCH x3c600800/mask=x3f600c00 # CONSTRUCT x3c606800/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load # SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 # AUNIT --inst x3c606800/mask=xffe0ec00 --status nomem # 8-fsreg,LDR-8-fsreg variant when size == 00 && opc == 01 && option is 011 bb=b_1315 option=0b011 F=FPR8 G=GPR64 :ldr Rt_FPR8, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_1315=0b011 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR8 = * tmp2; zext_zb(Zt); # zero upper 31 bytes of Zt } # C7.2.193 LDR (register, SIMD&FP) page C7-2468 line 144528 MATCH x3c600800/mask=x3f600c00 # CONSTRUCT x7c600800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load # SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 # AUNIT --inst x7c600800/mask=xffe02c00 --status nomem # 16-fsreg,LDR-16-fsreg variant when size == 01 && opc == 01 bb=b_13 option=0 F=FPR16 G=GPR32 :ldr Rt_FPR16, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b01 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR16 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR16 = * tmp2; zext_zh(Zt); # zero upper 30 bytes of Zt } # C7.2.193 LDR (register, SIMD&FP) page C7-2468 line 144528 MATCH x3c600800/mask=x3f600c00 # CONSTRUCT x7c602800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load # SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 # AUNIT --inst x7c602800/mask=xffe02c00 --status nomem # 16-fsreg,LDR-16-fsreg variant when size == 01 && opc == 01 bb=b_13 option=1 F=FPR16 G=GPR64 :ldr Rt_FPR16, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b01 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR16 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR16 = * tmp2; zext_zh(Zt); # zero upper 30 bytes of Zt } # C7.2.193 LDR (register, SIMD&FP) page C7-2468 line 144528 MATCH x3c600800/mask=x3f600c00 # CONSTRUCT xbc600800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load # SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 # AUNIT --inst xbc600800/mask=xffe02c00 --status nomem # 32-fsreg,LDR-32-fsreg variant when size == 10 && opc == 01 bb=b_13 option=0 F=FPR32 G=GPR32 :ldr Rt_FPR32, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b10 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR32 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR32 = * tmp2; zext_zs(Zt); # zero upper 28 bytes of Zt } # C7.2.193 LDR (register, SIMD&FP) page C7-2468 line 144528 MATCH x3c600800/mask=x3f600c00 # CONSTRUCT xbc602800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load # SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 # AUNIT --inst xbc602800/mask=xffe02c00 --status nomem # 32-fsreg,LDR-32-fsreg variant when size == 10 && opc == 01 bb=b_13 option=1 F=FPR32 G=GPR64 :ldr Rt_FPR32, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b10 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR32 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR32 = * tmp2; zext_zs(Zt); # zero upper 28 bytes of Zt } # C7.2.193 LDR (register, SIMD&FP) page C7-2468 line 144528 MATCH x3c600800/mask=x3f600c00 # CONSTRUCT xfc600800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load # SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 # AUNIT --inst xfc600800/mask=xffe02c00 --status nomem # 64-fsreg,LDR-64-fsreg variant when size == 11 && opc == 01 bb=b_13 option=0 F=FPR64 G=GPR32 :ldr Rt_FPR64, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b11 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR64 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR64 = * tmp2; zext_zd(Zt); # zero upper 24 bytes of Zt } # C7.2.193 LDR (register, SIMD&FP) page C7-2468 line 144528 MATCH x3c600800/mask=x3f600c00 # CONSTRUCT xfc602800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load # SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 # AUNIT --inst xfc602800/mask=xffe02c00 --status nomem # 64-fsreg,LDR-64-fsreg variant when size == 11 && opc == 01 bb=b_13 option=1 F=FPR64 G=GPR64 :ldr Rt_FPR64, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b11 & b_2429=0b111100 & b_2223=0b01 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR64 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR64 = * tmp2; zext_zd(Zt); # zero upper 24 bytes of Zt } # C7.2.193 LDR (register, SIMD&FP) page C7-2468 line 144528 MATCH x3c600800/mask=x3f600c00 # CONSTRUCT x3ce00800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load # SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 # AUNIT --inst x3ce00800/mask=xffe02c00 --status nomem # 128-fsreg,LDR-128-fsreg variant when size == 00 && opc == 11 bb=b_13 option=0 F=FPR128 G=GPR32 :ldr Rt_FPR128, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b11 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR128 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zt { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR128 = * tmp2; zext_zq(Zt); # zero upper 16 bytes of Zt } # C7.2.193 LDR (register, SIMD&FP) page C7-2468 line 144528 MATCH x3c600800/mask=x3f600c00 # CONSTRUCT x3ce02800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =load # SMACRO(pseudo) ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_ldr/3 # AUNIT --inst x3ce02800/mask=xffe02c00 --status nomem # 128-fsreg,LDR-128-fsreg variant when size == 00 && opc == 11 bb=b_13 option=1 F=FPR128 G=GPR64 :ldr Rt_FPR128, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b11 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR128 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zt { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; Rt_FPR128 = * tmp2; zext_zq(Zt); # zero upper 16 bytes of Zt } # C7.2.194 LDUR (SIMD&FP) page C7-2471 line 144715 MATCH x3c400000/mask=x3f600c00 # CONSTRUCT x3cc00000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldur/1 # AUNIT --inst x3cc00000/mask=xffe00c00 --status nomem :ldur Rt_FPR128, addrIndexed is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=1 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR128 & Zt { Rt_FPR128 = * addrIndexed; zext_zq(Zt); # zero upper 16 bytes of Zt } # C7.2.194 LDUR (SIMD&FP) page C7-2471 line 144715 MATCH x3c400000/mask=x3f600c00 # CONSTRUCT x7c400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldur/1 # AUNIT --inst x7c400000/mask=xffe00c00 --status nomem :ldur Rt_FPR16, addrIndexed is size.ldstr=1 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR16 & Zt { Rt_FPR16 = * addrIndexed; zext_zh(Zt); # zero upper 30 bytes of Zt } # C7.2.194 LDUR (SIMD&FP) page C7-2471 line 144715 MATCH x3c400000/mask=x3f600c00 # CONSTRUCT xbc400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldur/1 # AUNIT --inst xbc400000/mask=xffe00c00 --status nomem :ldur Rt_FPR32, addrIndexed is size.ldstr=2 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR32 & Zt { Rt_FPR32 = * addrIndexed; zext_zs(Zt); # zero upper 28 bytes of Zt } # C7.2.194 LDUR (SIMD&FP) page C7-2471 line 144715 MATCH x3c400000/mask=x3f600c00 # CONSTRUCT xfc400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldur/1 # AUNIT --inst xfc400000/mask=xffe00c00 --status nomem :ldur Rt_FPR64, addrIndexed is size.ldstr=3 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR64 & Zt { Rt_FPR64 = * addrIndexed; zext_zd(Zt); # zero upper 24 bytes of Zt } # C7.2.194 LDUR (SIMD&FP) page C7-2471 line 144715 MATCH x3c400000/mask=x3f600c00 # CONSTRUCT x3c400000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =load # SMACRO(pseudo) ARG1 ARG2 =NEON_ldur/1 # AUNIT --inst x3c400000/mask=xffe00c00 --status nomem :ldur Rt_FPR8, addrIndexed is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=1 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR8 & Zt { Rt_FPR8 = * addrIndexed; zext_zb(Zt); # zero upper 31 bytes of Zt } # C7.2.195 MLA (by element) page C7-2473 line 144842 MATCH x2f000000/mask=xbf00f400 # CONSTRUCT x2f800000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $* &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@4 # AUNIT --inst x2f800000/mask=xffc0f400 --status pass :mla Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & vIndex & Re_VPR128.S & b_1215=0x0 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix TMPD1 = Rn_VPR64.2S * tmp1 on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] * tmp1; TMPD1[32,32] = Rn_VPR64.2S[32,32] * tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.195 MLA (by element) page C7-2473 line 144842 MATCH x2f000000/mask=xbf00f400 # CONSTRUCT x2f400000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $* &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@2 # AUNIT --inst x2f400000/mask=xffc0f400 --status pass :mla Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x0 & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPD1 = Rn_VPR64.4H * tmp1 on lane size 2 TMPD1[0,16] = Rn_VPR64.4H[0,16] * tmp1; TMPD1[16,16] = Rn_VPR64.4H[16,16] * tmp1; TMPD1[32,16] = Rn_VPR64.4H[32,16] * tmp1; TMPD1[48,16] = Rn_VPR64.4H[48,16] * tmp1; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.195 MLA (by element) page C7-2473 line 144842 MATCH x2f000000/mask=xbf00f400 # CONSTRUCT x6f800000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $* &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@4 # AUNIT --inst x6f800000/mask=xffc0f400 --status pass :mla Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x0 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix TMPQ1 = Rn_VPR128.4S * tmp1 on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] * tmp1; TMPQ1[32,32] = Rn_VPR128.4S[32,32] * tmp1; TMPQ1[64,32] = Rn_VPR128.4S[64,32] * tmp1; TMPQ1[96,32] = Rn_VPR128.4S[96,32] * tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.195 MLA (by element) page C7-2473 line 144842 MATCH x2f000000/mask=xbf00f400 # CONSTRUCT x6f400000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $* &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@2 # AUNIT --inst x6f400000/mask=xffc0f400 --status pass :mla Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x0 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPQ1 = Rn_VPR128.8H * tmp1 on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] * tmp1; TMPQ1[16,16] = Rn_VPR128.8H[16,16] * tmp1; TMPQ1[32,16] = Rn_VPR128.8H[32,16] * tmp1; TMPQ1[48,16] = Rn_VPR128.8H[48,16] * tmp1; TMPQ1[64,16] = Rn_VPR128.8H[64,16] * tmp1; TMPQ1[80,16] = Rn_VPR128.8H[80,16] * tmp1; TMPQ1[96,16] = Rn_VPR128.8H[96,16] * tmp1; TMPQ1[112,16] = Rn_VPR128.8H[112,16] * tmp1; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.196 MLA (vector) page C7-2475 line 144975 MATCH x0e209400/mask=xbf20fc00 # CONSTRUCT x4e209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $*@1 &=$+@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@1 # AUNIT --inst x4e209400/mask=xffe0fc00 --status pass :mla Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x12 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix TMPQ1 = Rn_VPR128.16B * Rm_VPR128.16B on lane size 1 TMPQ1[0,8] = Rn_VPR128.16B[0,8] * Rm_VPR128.16B[0,8]; TMPQ1[8,8] = Rn_VPR128.16B[8,8] * Rm_VPR128.16B[8,8]; TMPQ1[16,8] = Rn_VPR128.16B[16,8] * Rm_VPR128.16B[16,8]; TMPQ1[24,8] = Rn_VPR128.16B[24,8] * Rm_VPR128.16B[24,8]; TMPQ1[32,8] = Rn_VPR128.16B[32,8] * Rm_VPR128.16B[32,8]; TMPQ1[40,8] = Rn_VPR128.16B[40,8] * Rm_VPR128.16B[40,8]; TMPQ1[48,8] = Rn_VPR128.16B[48,8] * Rm_VPR128.16B[48,8]; TMPQ1[56,8] = Rn_VPR128.16B[56,8] * Rm_VPR128.16B[56,8]; TMPQ1[64,8] = Rn_VPR128.16B[64,8] * Rm_VPR128.16B[64,8]; TMPQ1[72,8] = Rn_VPR128.16B[72,8] * Rm_VPR128.16B[72,8]; TMPQ1[80,8] = Rn_VPR128.16B[80,8] * Rm_VPR128.16B[80,8]; TMPQ1[88,8] = Rn_VPR128.16B[88,8] * Rm_VPR128.16B[88,8]; TMPQ1[96,8] = Rn_VPR128.16B[96,8] * Rm_VPR128.16B[96,8]; TMPQ1[104,8] = Rn_VPR128.16B[104,8] * Rm_VPR128.16B[104,8]; TMPQ1[112,8] = Rn_VPR128.16B[112,8] * Rm_VPR128.16B[112,8]; TMPQ1[120,8] = Rn_VPR128.16B[120,8] * Rm_VPR128.16B[120,8]; # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.196 MLA (vector) page C7-2475 line 144975 MATCH x0e209400/mask=xbf20fc00 # CONSTRUCT x0ea09400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $*@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@4 # AUNIT --inst x0ea09400/mask=xffe0fc00 --status pass :mla Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x12 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix TMPD1 = Rn_VPR64.2S * Rm_VPR64.2S on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] * Rm_VPR64.2S[0,32]; TMPD1[32,32] = Rn_VPR64.2S[32,32] * Rm_VPR64.2S[32,32]; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.196 MLA (vector) page C7-2475 line 144975 MATCH x0e209400/mask=xbf20fc00 # CONSTRUCT x0e609400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $*@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@2 # AUNIT --inst x0e609400/mask=xffe0fc00 --status pass :mla Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x12 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd infix TMPD1 = Rn_VPR64.4H * Rm_VPR64.4H on lane size 2 TMPD1[0,16] = Rn_VPR64.4H[0,16] * Rm_VPR64.4H[0,16]; TMPD1[16,16] = Rn_VPR64.4H[16,16] * Rm_VPR64.4H[16,16]; TMPD1[32,16] = Rn_VPR64.4H[32,16] * Rm_VPR64.4H[32,16]; TMPD1[48,16] = Rn_VPR64.4H[48,16] * Rm_VPR64.4H[48,16]; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.196 MLA (vector) page C7-2475 line 144975 MATCH x0e209400/mask=xbf20fc00 # CONSTRUCT x4ea09400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $*@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@4 # AUNIT --inst x4ea09400/mask=xffe0fc00 --status pass :mla Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix TMPQ1 = Rn_VPR128.4S * Rm_VPR128.4S on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] * Rm_VPR128.4S[0,32]; TMPQ1[32,32] = Rn_VPR128.4S[32,32] * Rm_VPR128.4S[32,32]; TMPQ1[64,32] = Rn_VPR128.4S[64,32] * Rm_VPR128.4S[64,32]; TMPQ1[96,32] = Rn_VPR128.4S[96,32] * Rm_VPR128.4S[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.196 MLA (vector) page C7-2475 line 144975 MATCH x0e209400/mask=xbf20fc00 # CONSTRUCT x0e209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $*@1 &=$+@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@1 # AUNIT --inst x0e209400/mask=xffe0fc00 --status pass :mla Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x12 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix TMPD1 = Rn_VPR64.8B * Rm_VPR64.8B on lane size 1 TMPD1[0,8] = Rn_VPR64.8B[0,8] * Rm_VPR64.8B[0,8]; TMPD1[8,8] = Rn_VPR64.8B[8,8] * Rm_VPR64.8B[8,8]; TMPD1[16,8] = Rn_VPR64.8B[16,8] * Rm_VPR64.8B[16,8]; TMPD1[24,8] = Rn_VPR64.8B[24,8] * Rm_VPR64.8B[24,8]; TMPD1[32,8] = Rn_VPR64.8B[32,8] * Rm_VPR64.8B[32,8]; TMPD1[40,8] = Rn_VPR64.8B[40,8] * Rm_VPR64.8B[40,8]; TMPD1[48,8] = Rn_VPR64.8B[48,8] * Rm_VPR64.8B[48,8]; TMPD1[56,8] = Rn_VPR64.8B[56,8] * Rm_VPR64.8B[56,8]; # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.196 MLA (vector) page C7-2475 line 144975 MATCH x0e209400/mask=xbf20fc00 # CONSTRUCT x4e609400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $*@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mla/3@2 # AUNIT --inst x4e609400/mask=xffe0fc00 --status pass :mla Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.8H * Rm_VPR128.8H on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] * Rm_VPR128.8H[0,16]; TMPQ1[16,16] = Rn_VPR128.8H[16,16] * Rm_VPR128.8H[16,16]; TMPQ1[32,16] = Rn_VPR128.8H[32,16] * Rm_VPR128.8H[32,16]; TMPQ1[48,16] = Rn_VPR128.8H[48,16] * Rm_VPR128.8H[48,16]; TMPQ1[64,16] = Rn_VPR128.8H[64,16] * Rm_VPR128.8H[64,16]; TMPQ1[80,16] = Rn_VPR128.8H[80,16] * Rm_VPR128.8H[80,16]; TMPQ1[96,16] = Rn_VPR128.8H[96,16] * Rm_VPR128.8H[96,16]; TMPQ1[112,16] = Rn_VPR128.8H[112,16] * Rm_VPR128.8H[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.197 MLS (by element) page C7-2477 line 145080 MATCH x2f004000/mask=xbf00f400 # CONSTRUCT x2f804000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $* &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@4 # AUNIT --inst x2f804000/mask=xffc0f400 --status pass :mls Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x4 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix TMPD1 = Rn_VPR64.2S * tmp1 on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] * tmp1; TMPD1[32,32] = Rn_VPR64.2S[32,32] * tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.197 MLS (by element) page C7-2477 line 145080 MATCH x2f004000/mask=xbf00f400 # CONSTRUCT x2f404000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $* &=$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@2 # AUNIT --inst x2f404000/mask=xffc0f400 --status pass :mls Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x4 & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPD1 = Rn_VPR64.4H * tmp1 on lane size 2 TMPD1[0,16] = Rn_VPR64.4H[0,16] * tmp1; TMPD1[16,16] = Rn_VPR64.4H[16,16] * tmp1; TMPD1[32,16] = Rn_VPR64.4H[32,16] * tmp1; TMPD1[48,16] = Rn_VPR64.4H[48,16] * tmp1; # simd infix Rd_VPR64.4H = Rd_VPR64.4H - TMPD1 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] - TMPD1[0,16]; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] - TMPD1[16,16]; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] - TMPD1[32,16]; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] - TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.197 MLS (by element) page C7-2477 line 145080 MATCH x2f004000/mask=xbf00f400 # CONSTRUCT x6f804000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $* &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@4 # AUNIT --inst x6f804000/mask=xffc0f400 --status pass :mls Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x4 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix TMPQ1 = Rn_VPR128.4S * tmp1 on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] * tmp1; TMPQ1[32,32] = Rn_VPR128.4S[32,32] * tmp1; TMPQ1[64,32] = Rn_VPR128.4S[64,32] * tmp1; TMPQ1[96,32] = Rn_VPR128.4S[96,32] * tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.197 MLS (by element) page C7-2477 line 145080 MATCH x2f004000/mask=xbf00f400 # CONSTRUCT x6f404000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $* &=$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@2 # AUNIT --inst x6f404000/mask=xffc0f400 --status pass :mls Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x4 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix TMPQ1 = Rn_VPR128.8H * tmp1 on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] * tmp1; TMPQ1[16,16] = Rn_VPR128.8H[16,16] * tmp1; TMPQ1[32,16] = Rn_VPR128.8H[32,16] * tmp1; TMPQ1[48,16] = Rn_VPR128.8H[48,16] * tmp1; TMPQ1[64,16] = Rn_VPR128.8H[64,16] * tmp1; TMPQ1[80,16] = Rn_VPR128.8H[80,16] * tmp1; TMPQ1[96,16] = Rn_VPR128.8H[96,16] * tmp1; TMPQ1[112,16] = Rn_VPR128.8H[112,16] * tmp1; # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.198 MLS (vector) page C7-2479 line 145213 MATCH x2e209400/mask=xbf20fc00 # CONSTRUCT x6e209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $*@1 &=$-@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@1 # AUNIT --inst x6e209400/mask=xffe0fc00 --status pass :mls Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x12 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix TMPQ1 = Rn_VPR128.16B * Rm_VPR128.16B on lane size 1 TMPQ1[0,8] = Rn_VPR128.16B[0,8] * Rm_VPR128.16B[0,8]; TMPQ1[8,8] = Rn_VPR128.16B[8,8] * Rm_VPR128.16B[8,8]; TMPQ1[16,8] = Rn_VPR128.16B[16,8] * Rm_VPR128.16B[16,8]; TMPQ1[24,8] = Rn_VPR128.16B[24,8] * Rm_VPR128.16B[24,8]; TMPQ1[32,8] = Rn_VPR128.16B[32,8] * Rm_VPR128.16B[32,8]; TMPQ1[40,8] = Rn_VPR128.16B[40,8] * Rm_VPR128.16B[40,8]; TMPQ1[48,8] = Rn_VPR128.16B[48,8] * Rm_VPR128.16B[48,8]; TMPQ1[56,8] = Rn_VPR128.16B[56,8] * Rm_VPR128.16B[56,8]; TMPQ1[64,8] = Rn_VPR128.16B[64,8] * Rm_VPR128.16B[64,8]; TMPQ1[72,8] = Rn_VPR128.16B[72,8] * Rm_VPR128.16B[72,8]; TMPQ1[80,8] = Rn_VPR128.16B[80,8] * Rm_VPR128.16B[80,8]; TMPQ1[88,8] = Rn_VPR128.16B[88,8] * Rm_VPR128.16B[88,8]; TMPQ1[96,8] = Rn_VPR128.16B[96,8] * Rm_VPR128.16B[96,8]; TMPQ1[104,8] = Rn_VPR128.16B[104,8] * Rm_VPR128.16B[104,8]; TMPQ1[112,8] = Rn_VPR128.16B[112,8] * Rm_VPR128.16B[112,8]; TMPQ1[120,8] = Rn_VPR128.16B[120,8] * Rm_VPR128.16B[120,8]; # simd infix Rd_VPR128.16B = Rd_VPR128.16B - TMPQ1 on lane size 1 Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] - TMPQ1[0,8]; Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] - TMPQ1[8,8]; Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] - TMPQ1[16,8]; Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] - TMPQ1[24,8]; Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] - TMPQ1[32,8]; Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] - TMPQ1[40,8]; Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] - TMPQ1[48,8]; Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] - TMPQ1[56,8]; Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] - TMPQ1[64,8]; Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] - TMPQ1[72,8]; Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] - TMPQ1[80,8]; Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] - TMPQ1[88,8]; Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] - TMPQ1[96,8]; Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] - TMPQ1[104,8]; Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] - TMPQ1[112,8]; Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] - TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.198 MLS (vector) page C7-2479 line 145213 MATCH x2e209400/mask=xbf20fc00 # CONSTRUCT x2ea09400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $*@4 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@4 # AUNIT --inst x2ea09400/mask=xffe0fc00 --status pass :mls Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x12 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix TMPD1 = Rn_VPR64.2S * Rm_VPR64.2S on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] * Rm_VPR64.2S[0,32]; TMPD1[32,32] = Rn_VPR64.2S[32,32] * Rm_VPR64.2S[32,32]; # simd infix Rd_VPR64.2S = Rd_VPR64.2S - TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] - TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] - TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.198 MLS (vector) page C7-2479 line 145213 MATCH x2e209400/mask=xbf20fc00 # CONSTRUCT x2e609400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $*@2 &=$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@2 # AUNIT --inst x2e609400/mask=xffe0fc00 --status pass :mls Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x12 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd infix TMPD1 = Rn_VPR64.4H * Rm_VPR64.4H on lane size 2 TMPD1[0,16] = Rn_VPR64.4H[0,16] * Rm_VPR64.4H[0,16]; TMPD1[16,16] = Rn_VPR64.4H[16,16] * Rm_VPR64.4H[16,16]; TMPD1[32,16] = Rn_VPR64.4H[32,16] * Rm_VPR64.4H[32,16]; TMPD1[48,16] = Rn_VPR64.4H[48,16] * Rm_VPR64.4H[48,16]; # simd infix Rd_VPR64.4H = Rd_VPR64.4H - TMPD1 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] - TMPD1[0,16]; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] - TMPD1[16,16]; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] - TMPD1[32,16]; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] - TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.198 MLS (vector) page C7-2479 line 145213 MATCH x2e209400/mask=xbf20fc00 # CONSTRUCT x6ea09400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $*@4 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@4 # AUNIT --inst x6ea09400/mask=xffe0fc00 --status pass :mls Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix TMPQ1 = Rn_VPR128.4S * Rm_VPR128.4S on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] * Rm_VPR128.4S[0,32]; TMPQ1[32,32] = Rn_VPR128.4S[32,32] * Rm_VPR128.4S[32,32]; TMPQ1[64,32] = Rn_VPR128.4S[64,32] * Rm_VPR128.4S[64,32]; TMPQ1[96,32] = Rn_VPR128.4S[96,32] * Rm_VPR128.4S[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.198 MLS (vector) page C7-2479 line 145213 MATCH x2e209400/mask=xbf20fc00 # CONSTRUCT x2e209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $*@1 &=$-@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@1 # AUNIT --inst x2e209400/mask=xffe0fc00 --status pass :mls Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x12 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix TMPD1 = Rn_VPR64.8B * Rm_VPR64.8B on lane size 1 TMPD1[0,8] = Rn_VPR64.8B[0,8] * Rm_VPR64.8B[0,8]; TMPD1[8,8] = Rn_VPR64.8B[8,8] * Rm_VPR64.8B[8,8]; TMPD1[16,8] = Rn_VPR64.8B[16,8] * Rm_VPR64.8B[16,8]; TMPD1[24,8] = Rn_VPR64.8B[24,8] * Rm_VPR64.8B[24,8]; TMPD1[32,8] = Rn_VPR64.8B[32,8] * Rm_VPR64.8B[32,8]; TMPD1[40,8] = Rn_VPR64.8B[40,8] * Rm_VPR64.8B[40,8]; TMPD1[48,8] = Rn_VPR64.8B[48,8] * Rm_VPR64.8B[48,8]; TMPD1[56,8] = Rn_VPR64.8B[56,8] * Rm_VPR64.8B[56,8]; # simd infix Rd_VPR64.8B = Rd_VPR64.8B - TMPD1 on lane size 1 Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] - TMPD1[0,8]; Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] - TMPD1[8,8]; Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] - TMPD1[16,8]; Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] - TMPD1[24,8]; Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] - TMPD1[32,8]; Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] - TMPD1[40,8]; Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] - TMPD1[48,8]; Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] - TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.198 MLS (vector) page C7-2479 line 145213 MATCH x2e209400/mask=xbf20fc00 # CONSTRUCT x6e609400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $*@2 &=$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_mls/3@2 # AUNIT --inst x6e609400/mask=xffe0fc00 --status pass :mls Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.8H * Rm_VPR128.8H on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] * Rm_VPR128.8H[0,16]; TMPQ1[16,16] = Rn_VPR128.8H[16,16] * Rm_VPR128.8H[16,16]; TMPQ1[32,16] = Rn_VPR128.8H[32,16] * Rm_VPR128.8H[32,16]; TMPQ1[48,16] = Rn_VPR128.8H[48,16] * Rm_VPR128.8H[48,16]; TMPQ1[64,16] = Rn_VPR128.8H[64,16] * Rm_VPR128.8H[64,16]; TMPQ1[80,16] = Rn_VPR128.8H[80,16] * Rm_VPR128.8H[80,16]; TMPQ1[96,16] = Rn_VPR128.8H[96,16] * Rm_VPR128.8H[96,16]; TMPQ1[112,16] = Rn_VPR128.8H[112,16] * Rm_VPR128.8H[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.200 MOV (element) page C7-2483 line 145410 MATCH x6e000400/mask=xffe08400 # C7.2.175 INS (element) page C7-2411 line 140892 MATCH x6e000400/mask=xffe08400 # CONSTRUCT x6e010400/mask=xffe18400 MATCHED 2 DOCUMENTED OPCODES # SMACRO Rd_VPR128 ARG2 imm_neon_uimm4:1 &=$copy # SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm4:1 &=NEON_mov/3@1 # AUNIT --inst x6e010400/mask=xffe18400 --status pass :mov Rd_VPR128.B.imm_neon_uimm4, Rn_VPR128.B.immN_neon_uimm4 is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & Rn_VPR128.B.immN_neon_uimm4 & immN_neon_uimm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd { # simd element Rn_VPR128[immN_neon_uimm4] lane size 1 local tmp1:1 = Rn_VPR128.B.immN_neon_uimm4; # simd copy Rd_VPR128 element imm_neon_uimm4:1 = tmp1 (lane size 1) Rd_VPR128.B.imm_neon_uimm4 = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.200 MOV (element) page C7-2483 line 145410 MATCH x6e000400/mask=xffe08400 # C7.2.175 INS (element) page C7-2411 line 140892 MATCH x6e000400/mask=xffe08400 # CONSTRUCT x6e080400/mask=xffef8400 MATCHED 2 DOCUMENTED OPCODES # SMACRO Rd_VPR128 ARG2 imm_neon_uimm1:1 &=$copy # SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm1:1 &=NEON_mov/3@8 # AUNIT --inst x6e080400/mask=xffef8400 --status pass :mov Rd_VPR128.D.imm_neon_uimm1, Rn_VPR128.D.immN_neon_uimm1 is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & Rn_VPR128.D.immN_neon_uimm1 & immN_neon_uimm1 & Imm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd { # simd element Rn_VPR128[immN_neon_uimm1] lane size 8 local tmp1:8 = Rn_VPR128.D.immN_neon_uimm1; # simd copy Rd_VPR128 element imm_neon_uimm1:1 = tmp1 (lane size 8) Rd_VPR128.D.imm_neon_uimm1 = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.200 MOV (element) page C7-2483 line 145410 MATCH x6e000400/mask=xffe08400 # C7.2.175 INS (element) page C7-2411 line 140892 MATCH x6e000400/mask=xffe08400 # CONSTRUCT x6e020400/mask=xffe38400 MATCHED 2 DOCUMENTED OPCODES # SMACRO Rd_VPR128 ARG2 imm_neon_uimm3:1 &=$copy # SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm3:1 &=NEON_mov/3@2 # AUNIT --inst x6e020400/mask=xffe38400 --status pass :mov Rd_VPR128.H.imm_neon_uimm3, Rn_VPR128.H.immN_neon_uimm3 is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & Rn_VPR128.H.immN_neon_uimm3 & immN_neon_uimm3 & Imm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd { # simd element Rn_VPR128[immN_neon_uimm3] lane size 2 local tmp1:2 = Rn_VPR128.H.immN_neon_uimm3; # simd copy Rd_VPR128 element imm_neon_uimm3:1 = tmp1 (lane size 2) Rd_VPR128.H.imm_neon_uimm3 = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.200 MOV (element) page C7-2483 line 145410 MATCH x6e000400/mask=xffe08400 # C7.2.175 INS (element) page C7-2411 line 140892 MATCH x6e000400/mask=xffe08400 # CONSTRUCT x6e040400/mask=xffe78400 MATCHED 2 DOCUMENTED OPCODES # SMACRO Rd_VPR128 ARG2 imm_neon_uimm2:1 &=$copy # SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm2:1 &=NEON_mov/3@4 # AUNIT --inst x6e040400/mask=xffe78400 --status pass :mov Rd_VPR128.S.imm_neon_uimm2, Rn_VPR128.S.immN_neon_uimm2 is b_3131=0 & q=1 & b_29=1 & b_2428=0xe & b_2123=0 & Rd_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & Rn_VPR128.S.immN_neon_uimm2 & immN_neon_uimm2 & Imm4 & b_1010=1 & Rn_VPR128 & Rd_VPR128 & Zd { # simd element Rn_VPR128[immN_neon_uimm2] lane size 4 local tmp1:4 = Rn_VPR128.S.immN_neon_uimm2; # simd copy Rd_VPR128 element imm_neon_uimm2:1 = tmp1 (lane size 4) Rd_VPR128.S.imm_neon_uimm2 = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.201 MOV (from general) page C7-2485 line 145507 MATCH x4e001c00/mask=xffe0fc00 # C7.2.176 INS (general) page C7-2413 line 141002 MATCH x4e001c00/mask=xffe0fc00 # CONSTRUCT x4e011c00/mask=xffe1fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO Rd_VPR128 ARG2[0]:1 imm_neon_uimm4:1 &=$copy # SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm4:1 &=NEON_mov/3@1 # AUNIT --inst x4e011c00/mask=xffe1fc00 --status pass :mov Rd_VPR128.B.imm_neon_uimm4, Rn_GPR32 is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR32 & Rd_VPR128 & Zd { local tmp1:1 = Rn_GPR32[0,8]; # simd copy Rd_VPR128 element imm_neon_uimm4:1 = tmp1 (lane size 1) Rd_VPR128.B.imm_neon_uimm4 = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.201 MOV (from general) page C7-2485 line 145507 MATCH x4e001c00/mask=xffe0fc00 # C7.2.176 INS (general) page C7-2413 line 141002 MATCH x4e001c00/mask=xffe0fc00 # CONSTRUCT x4e081c00/mask=xffeffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO Rd_VPR128 ARG2 imm_neon_uimm1:1 &=$copy # SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm1:1 &=NEON_mov/3@8 # AUNIT --inst x4e081c00/mask=xffeffc00 --status pass :mov Rd_VPR128.D.imm_neon_uimm1, Rn_GPR64 is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR64 & Rd_VPR128 & Zd { # simd copy Rd_VPR128 element imm_neon_uimm1:1 = Rn_GPR64 (lane size 8) Rd_VPR128.D.imm_neon_uimm1 = Rn_GPR64; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.201 MOV (from general) page C7-2485 line 145507 MATCH x4e001c00/mask=xffe0fc00 # C7.2.176 INS (general) page C7-2413 line 141002 MATCH x4e001c00/mask=xffe0fc00 # CONSTRUCT x4e021c00/mask=xffe3fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO Rd_VPR128 ARG2[0]:2 imm_neon_uimm3:1 &=$copy # SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm3:1 &=NEON_mov/3@2 # AUNIT --inst x4e021c00/mask=xffe3fc00 --status pass :mov Rd_VPR128.H.imm_neon_uimm3, Rn_GPR32 is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR32 & Rd_VPR128 & Zd { local tmp1:2 = Rn_GPR32[0,16]; # simd copy Rd_VPR128 element imm_neon_uimm3:1 = tmp1 (lane size 2) Rd_VPR128.H.imm_neon_uimm3 = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.201 MOV (from general) page C7-2485 line 145507 MATCH x4e001c00/mask=xffe0fc00 # C7.2.176 INS (general) page C7-2413 line 141002 MATCH x4e001c00/mask=xffe0fc00 # CONSTRUCT x4e041c00/mask=xffe7fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO(force-primitive) Rd_VPR128 ARG2 imm_neon_uimm2:1 &=$copy # SMACRO(pseudo) Rd_VPR128 ARG2 imm_neon_uimm2:1 &=NEON_mov/3@2 # AUNIT --inst x4e041c00/mask=xffe7fc00 --status pass :mov Rd_VPR128.S.imm_neon_uimm2, Rn_GPR32 is b_3131=0 & q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rd_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x3 & b_1010=1 & Rn_GPR32 & Rd_VPR128 & Zd { # simd copy Rd_VPR128 element imm_neon_uimm2:1 = Rn_GPR32 (lane size 4) Rd_VPR128.S.imm_neon_uimm2 = Rn_GPR32; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.202 MOV (vector) page C7-2487 line 145604 MATCH x0ea01c00/mask=xbfe0fc00 # C7.2.213 ORR (vector, register) page C7-2509 line 146837 MATCH x0ea01c00/mask=xbfe0fc00 # CONSTRUCT x4ea01c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_mov/1@1 # AUNIT --inst x4ea01c00/mask=xffe0fc00 --status pass :mov Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Rn=Rm & Zd { Rd_VPR128.16B = Rn_VPR128.16B; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.202 MOV (vector) page C7-2487 line 145604 MATCH x0ea01c00/mask=xbfe0fc00 # C7.2.213 ORR (vector, register) page C7-2509 line 146837 MATCH x0ea01c00/mask=xbfe0fc00 # CONSTRUCT x0ea01c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_mov/1@1 # AUNIT --inst x0ea01c00/mask=xffe0fc00 --status pass :mov Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Rn=Rm & Zd { Rd_VPR64.8B = Rn_VPR64.8B; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.203 MOV (to general) page C7-2488 line 145671 MATCH x0e003c00/mask=xbfe3fc00 # C7.2.371 UMOV page C7-2868 line 167415 MATCH x0e003c00/mask=xbfe0fc00 # CONSTRUCT x0e043c00/mask=xffe7fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO(force-primitive) ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_mov/1@4 # AUNIT --inst x0e043c00/mask=xffe7fc00 --status pass :mov Rd_GPR32, Rn_VPR128.S.imm_neon_uimm2 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 & Rd_VPR128 { # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 local tmp1:4 = Rn_VPR128.S.imm_neon_uimm2; Rd_GPR32 = tmp1; zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.203 MOV (to general) page C7-2488 line 145671 MATCH x0e003c00/mask=xbfe3fc00 # C7.2.371 UMOV page C7-2868 line 167415 MATCH x0e003c00/mask=xbfe0fc00 # CONSTRUCT x4e083c00/mask=xffeffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_mov/1@8 # AUNIT --inst x4e083c00/mask=xffeffc00 --status pass :mov Rd_GPR64, Rn_VPR128.D.imm_neon_uimm1 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.D.imm_neon_uimm1 & b_1619=0x8 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR64 { # simd element Rn_VPR128[imm_neon_uimm1] lane size 8 local tmp1:8 = Rn_VPR128.D.imm_neon_uimm1; Rd_GPR64 = tmp1; } # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # C7.2.352 UCVTF (vector, fixed-point) page C7-2827 line 165158 MATCH x2f00e400/mask=xbf80fc00 # CONSTRUCT x2f00e400/mask=xfff8fc00 MATCHED 3 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1 # AUNIT --inst x2f00e400/mask=xfff8fc00 --status pass # MOVI 64-bit scalar variant when datasize=64 q == 0 && op == 1 && cmode == 1110 :movi Rd_FPR64, Imm_neon_uimm8Shift is b_31=0 & b_30=0 & b_29=1 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_FPR64 & Zd { Rd_FPR64 = Imm_neon_uimm8Shift:8; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.233 SCVTF (vector, fixed-point) page C7-2548 line 149051 MATCH x0f00e400/mask=xbf80fc00 # CONSTRUCT x4f00e400/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:1 &=$dup # SMACRO(pseudo) ARG1 ARG2:1 =NEON_movi/1@1 # AUNIT --inst x4f00e400/mask=xfff8fc00 --status pass # MOVI 8-bit variant when datasize=128 q == 1 && op == 0 && cmode == 0b1110 :movi Rd_VPR128.16B, Imm_neon_uimm8Shift is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.16B & Zd { # simd duplicate Rd_VPR128.16B = all elements Imm_neon_uimm8Shift:1 (lane size 1) Rd_VPR128.16B[0,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[8,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[16,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[24,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[32,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[40,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[48,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[56,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[64,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[72,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[80,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[88,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[96,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[104,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[112,8] = Imm_neon_uimm8Shift:1; Rd_VPR128.16B[120,8] = Imm_neon_uimm8Shift:1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # C7.2.352 UCVTF (vector, fixed-point) page C7-2827 line 165158 MATCH x2f00e400/mask=xbf80fc00 # CONSTRUCT x6f00e400/mask=xfff8fc00 MATCHED 3 DOCUMENTED OPCODES # SMACRO(force-primitive) ARG1 ARG2 =var:8 &=$dup # SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@8 # AUNIT --inst x6f00e400/mask=xfff8fc00 --status pass # MOVI 64-bit vector variant when datasize=128 q == 1 && op == 1 && cmode == 1110 :movi Rd_VPR128.2D, Imm_neon_uimm8Shift is b_31=0 & b_30=1 & b_29=1 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.2D & Zd { local tmp1:8 = Imm_neon_uimm8Shift; # simd duplicate Rd_VPR128.2D = all elements tmp1 (lane size 8) Rd_VPR128.2D[0,64] = tmp1; Rd_VPR128.2D[64,64] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.233 SCVTF (vector, fixed-point) page C7-2548 line 149051 MATCH x0f00e400/mask=xbf80fc00 # CONSTRUCT x0f00e400/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@1 # AUNIT --inst x0f00e400/mask=xfff8fc00 --status pass # MOVI 8-bit variant when datasize=64 q == 0 && op == 0 && cmode == 1110 :movi Rd_VPR64.8B, Imm_neon_uimm8Shift is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_1215=0b1110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR64.8B & Zd { Rd_VPR64.8B = Imm_neon_uimm8Shift:8; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.313 SRSHR page C7-2727 line 159165 MATCH x0f002400/mask=xbf80fc00 # C7.2.317 SSHR page C7-2738 line 159757 MATCH x0f000400/mask=xbf80fc00 # CONSTRUCT x0f000400/mask=xfff89c00 MATCHED 3 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@4 # AUNIT --inst x0f000400/mask=xfff89c00 --status pass # MOVI 32-bit shifted immediate variant when datasize=64 q == 0 && op == 0 && cmode == 0xx0 :movi Rd_VPR64.2S, Imm_neon_uimm8Shift is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_15=0 & b_12=0 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR64.2S & Zd { Rd_VPR64.2S = Imm_neon_uimm8Shift:8; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.256 SHRN, SHRN2 page C7-2587 line 151244 MATCH x0f008400/mask=xbf80fc00 # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # CONSTRUCT x0f008400/mask=xfff8dc00 MATCHED 4 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@2 # AUNIT --inst x0f008400/mask=xfff8dc00 --status pass # MOVI 16-bit shifted immediate variant when datasize=64 q == 0 && op == 0 && cmode == 10x0 :movi Rd_VPR64.4H, Imm_neon_uimm8Shift is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_1415=0b10 & b_12=0 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR64.4H & Zd { Rd_VPR64.4H = Imm_neon_uimm8Shift:8; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.313 SRSHR page C7-2727 line 159165 MATCH x0f002400/mask=xbf80fc00 # C7.2.317 SSHR page C7-2738 line 159757 MATCH x0f000400/mask=xbf80fc00 # CONSTRUCT x4f000400/mask=xfff89c00 MATCHED 3 DOCUMENTED OPCODES # SMACRO(force-primitive) ARG1 ARG2:4 &=$dup # SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@4 # AUNIT --inst x4f000400/mask=xfff89c00 --status pass # MOVI 32-bit shifted immediate variant when datasize=128 q == 1 && op == 0 && cmode == 0xx0 :movi Rd_VPR128.4S, Imm_neon_uimm8Shift is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_15=0 & b_12=0 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.4S & Zd { # simd duplicate Rd_VPR128.4S = all elements Imm_neon_uimm8Shift:4 (lane size 4) Rd_VPR128.4S[0,32] = Imm_neon_uimm8Shift:4; Rd_VPR128.4S[32,32] = Imm_neon_uimm8Shift:4; Rd_VPR128.4S[64,32] = Imm_neon_uimm8Shift:4; Rd_VPR128.4S[96,32] = Imm_neon_uimm8Shift:4; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.256 SHRN, SHRN2 page C7-2587 line 151244 MATCH x0f008400/mask=xbf80fc00 # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # CONSTRUCT x4f008400/mask=xfff8dc00 MATCHED 4 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:2 &=$dup # SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@2 # AUNIT --inst x4f008400/mask=xfff8dc00 --status pass # MOVI 16-bit shifted immediate variant when datasize=128 q == 1 && op == 0 && cmode == 10x0 :movi Rd_VPR128.8H, Imm_neon_uimm8Shift is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_1415=0b10 & b_12=0 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.8H & Zd { # simd duplicate Rd_VPR128.8H = all elements Imm_neon_uimm8Shift:2 (lane size 2) Rd_VPR128.8H[0,16] = Imm_neon_uimm8Shift:2; Rd_VPR128.8H[16,16] = Imm_neon_uimm8Shift:2; Rd_VPR128.8H[32,16] = Imm_neon_uimm8Shift:2; Rd_VPR128.8H[48,16] = Imm_neon_uimm8Shift:2; Rd_VPR128.8H[64,16] = Imm_neon_uimm8Shift:2; Rd_VPR128.8H[80,16] = Imm_neon_uimm8Shift:2; Rd_VPR128.8H[96,16] = Imm_neon_uimm8Shift:2; Rd_VPR128.8H[112,16] = Imm_neon_uimm8Shift:2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.212 ORR (vector, immediate) page C7-2507 line 146708 MATCH x0f001400/mask=xbff81c00 # CONSTRUCT x0f00c400/mask=xfff8ec00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 = # SMACRO(pseudo) ARG1 ARG2 =NEON_movi/1@4 # AUNIT --inst x0f00c400/mask=xfff8ec00 --status pass # MOVI 32-bit shifting ones variant when datasize=64 q == 0 && op == 0 && cmode == 110x :movi Rd_VPR64.2S, Imm_neon_uimm8Shift is b_31=0 & b_30=0 & b_29=0 & b_1928=0b0111100000 & b_1315=0b110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR64.2S & Zd { Rd_VPR64.2S = Imm_neon_uimm8Shift:8; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.212 ORR (vector, immediate) page C7-2507 line 146708 MATCH x0f001400/mask=xbff81c00 # CONSTRUCT x4f00c400/mask=xfff8ec00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:4 &=$dup # SMACRO(pseudo) ARG1 ARG2:4 =NEON_movi/1@4 # AUNIT --inst x4f00c400/mask=xfff8ec00 --status pass # MOVI 32-bit shifting ones variant when datasize=128 q == 1 && op == 0 && cmode == 110x :movi Rd_VPR128.4S, Imm_neon_uimm8Shift is b_31=0 & b_30=1 & b_29=0 & b_1928=0b0111100000 & b_1315=0b110 & b_1011=0b01 & Imm_neon_uimm8Shift & Rd_VPR128.4S & Zd { # simd duplicate Rd_VPR128.4S = all elements Imm_neon_uimm8Shift:4 (lane size 4) Rd_VPR128.4S[0,32] = Imm_neon_uimm8Shift:4; Rd_VPR128.4S[32,32] = Imm_neon_uimm8Shift:4; Rd_VPR128.4S[64,32] = Imm_neon_uimm8Shift:4; Rd_VPR128.4S[96,32] = Imm_neon_uimm8Shift:4; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.205 MUL (by element) page C7-2493 line 145949 MATCH x0f008000/mask=xbf00f400 # CONSTRUCT x0f808000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@4 # AUNIT --inst x0f808000/mask=xffc0f400 --status pass :mul Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x8 & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix Rd_VPR64.2S = Rn_VPR64.2S * tmp1 on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] * tmp1; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] * tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.205 MUL (by element) page C7-2493 line 145949 MATCH x0f008000/mask=xbf00f400 # CONSTRUCT x0f408000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@2 # AUNIT --inst x0f408000/mask=xffc0f400 --status pass :mul Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x8 & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix Rd_VPR64.4H = Rn_VPR64.4H * tmp1 on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] * tmp1; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] * tmp1; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] * tmp1; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] * tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.205 MUL (by element) page C7-2493 line 145949 MATCH x0f008000/mask=xbf00f400 # CONSTRUCT x4f808000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(force-primitive) ARG1 ARG2 ARG3 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@4 # AUNIT --inst x4f808000/mask=xffc0f400 --status pass :mul Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x8 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd element Re_VPR128.S[vIndex] lane size 4 local tmp1:4 = Re_VPR128.S.vIndex; # simd infix Rd_VPR128.4S = Rn_VPR128.4S * tmp1 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] * tmp1; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] * tmp1; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] * tmp1; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] * tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.205 MUL (by element) page C7-2493 line 145949 MATCH x0f008000/mask=xbf00f400 # CONSTRUCT x4f408000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@2 # AUNIT --inst x4f408000/mask=xffc0f400 --status pass :mul Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x8 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; # simd infix Rd_VPR128.8H = Rn_VPR128.8H * tmp1 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] * tmp1; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] * tmp1; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] * tmp1; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] * tmp1; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] * tmp1; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] * tmp1; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] * tmp1; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] * tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.206 MUL (vector) page C7-2495 line 146079 MATCH x0e209c00/mask=xbf20fc00 # CONSTRUCT x4e209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$*@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@1 # AUNIT --inst x4e209c00/mask=xffe0fc00 --status pass :mul Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x13 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix Rd_VPR128.16B = Rn_VPR128.16B * Rm_VPR128.16B on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] * Rm_VPR128.16B[0,8]; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] * Rm_VPR128.16B[8,8]; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] * Rm_VPR128.16B[16,8]; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] * Rm_VPR128.16B[24,8]; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] * Rm_VPR128.16B[32,8]; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] * Rm_VPR128.16B[40,8]; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] * Rm_VPR128.16B[48,8]; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] * Rm_VPR128.16B[56,8]; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] * Rm_VPR128.16B[64,8]; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] * Rm_VPR128.16B[72,8]; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] * Rm_VPR128.16B[80,8]; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] * Rm_VPR128.16B[88,8]; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] * Rm_VPR128.16B[96,8]; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] * Rm_VPR128.16B[104,8]; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] * Rm_VPR128.16B[112,8]; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] * Rm_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.206 MUL (vector) page C7-2495 line 146079 MATCH x0e209c00/mask=xbf20fc00 # CONSTRUCT x0ea09c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$*@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@4 # AUNIT --inst x0ea09c00/mask=xffe0fc00 --status pass :mul Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x13 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix Rd_VPR64.2S = Rn_VPR64.2S * Rm_VPR64.2S on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] * Rm_VPR64.2S[0,32]; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] * Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.206 MUL (vector) page C7-2495 line 146079 MATCH x0e209c00/mask=xbf20fc00 # CONSTRUCT x0e609c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$*@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@2 # AUNIT --inst x0e609c00/mask=xffe0fc00 --status pass :mul Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x13 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd infix Rd_VPR64.4H = Rn_VPR64.4H * Rm_VPR64.4H on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] * Rm_VPR64.4H[0,16]; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] * Rm_VPR64.4H[16,16]; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] * Rm_VPR64.4H[32,16]; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] * Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.206 MUL (vector) page C7-2495 line 146079 MATCH x0e209c00/mask=xbf20fc00 # CONSTRUCT x4ea09c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$*@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@4 # AUNIT --inst x4ea09c00/mask=xffe0fc00 --status pass :mul Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix Rd_VPR128.4S = Rn_VPR128.4S * Rm_VPR128.4S on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] * Rm_VPR128.4S[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] * Rm_VPR128.4S[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] * Rm_VPR128.4S[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] * Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.206 MUL (vector) page C7-2495 line 146079 MATCH x0e209c00/mask=xbf20fc00 # CONSTRUCT x0e209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$*@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@1 # AUNIT --inst x0e209c00/mask=xffe0fc00 --status pass :mul Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x13 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix Rd_VPR64.8B = Rn_VPR64.8B * Rm_VPR64.8B on lane size 1 Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] * Rm_VPR64.8B[0,8]; Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] * Rm_VPR64.8B[8,8]; Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] * Rm_VPR64.8B[16,8]; Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] * Rm_VPR64.8B[24,8]; Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] * Rm_VPR64.8B[32,8]; Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] * Rm_VPR64.8B[40,8]; Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] * Rm_VPR64.8B[48,8]; Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] * Rm_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.206 MUL (vector) page C7-2495 line 146079 MATCH x0e209c00/mask=xbf20fc00 # CONSTRUCT x4e609c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$*@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_mul/2@2 # AUNIT --inst x4e609c00/mask=xffe0fc00 --status pass :mul Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd infix Rd_VPR128.8H = Rn_VPR128.8H * Rm_VPR128.8H on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] * Rm_VPR128.8H[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] * Rm_VPR128.8H[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] * Rm_VPR128.8H[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] * Rm_VPR128.8H[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] * Rm_VPR128.8H[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] * Rm_VPR128.8H[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] * Rm_VPR128.8H[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] * Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.207 MVN page C7-2497 line 146183 MATCH x2e205800/mask=xbffffc00 # C7.2.210 NOT page C7-2503 line 146536 MATCH x2e205800/mask=xbffffc00 # CONSTRUCT x6e205800/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$~@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_mvn/1@1 # AUNIT --inst x6e205800/mask=xfffffc00 --status pass :mvn Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=16 & b_1216=5 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd unary Rd_VPR128.16B = ~(Rn_VPR128.16B) on lane size 1 Rd_VPR128.16B[0,8] = ~(Rn_VPR128.16B[0,8]); Rd_VPR128.16B[8,8] = ~(Rn_VPR128.16B[8,8]); Rd_VPR128.16B[16,8] = ~(Rn_VPR128.16B[16,8]); Rd_VPR128.16B[24,8] = ~(Rn_VPR128.16B[24,8]); Rd_VPR128.16B[32,8] = ~(Rn_VPR128.16B[32,8]); Rd_VPR128.16B[40,8] = ~(Rn_VPR128.16B[40,8]); Rd_VPR128.16B[48,8] = ~(Rn_VPR128.16B[48,8]); Rd_VPR128.16B[56,8] = ~(Rn_VPR128.16B[56,8]); Rd_VPR128.16B[64,8] = ~(Rn_VPR128.16B[64,8]); Rd_VPR128.16B[72,8] = ~(Rn_VPR128.16B[72,8]); Rd_VPR128.16B[80,8] = ~(Rn_VPR128.16B[80,8]); Rd_VPR128.16B[88,8] = ~(Rn_VPR128.16B[88,8]); Rd_VPR128.16B[96,8] = ~(Rn_VPR128.16B[96,8]); Rd_VPR128.16B[104,8] = ~(Rn_VPR128.16B[104,8]); Rd_VPR128.16B[112,8] = ~(Rn_VPR128.16B[112,8]); Rd_VPR128.16B[120,8] = ~(Rn_VPR128.16B[120,8]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.207 MVN page C7-2497 line 146183 MATCH x2e205800/mask=xbffffc00 # C7.2.210 NOT page C7-2503 line 146536 MATCH x2e205800/mask=xbffffc00 # CONSTRUCT x2e205800/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$~@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_mvn/1@1 # AUNIT --inst x2e205800/mask=xfffffc00 --status pass :mvn Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=16 & b_1216=5 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd unary Rd_VPR64.8B = ~(Rn_VPR64.8B) on lane size 1 Rd_VPR64.8B[0,8] = ~(Rn_VPR64.8B[0,8]); Rd_VPR64.8B[8,8] = ~(Rn_VPR64.8B[8,8]); Rd_VPR64.8B[16,8] = ~(Rn_VPR64.8B[16,8]); Rd_VPR64.8B[24,8] = ~(Rn_VPR64.8B[24,8]); Rd_VPR64.8B[32,8] = ~(Rn_VPR64.8B[32,8]); Rd_VPR64.8B[40,8] = ~(Rn_VPR64.8B[40,8]); Rd_VPR64.8B[48,8] = ~(Rn_VPR64.8B[48,8]); Rd_VPR64.8B[56,8] = ~(Rn_VPR64.8B[56,8]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x2f006400/mask=xbf80fc00 # C7.2.311 SRI page C7-2722 line 158861 MATCH x2f004400/mask=xbf80fc00 # C7.2.385 URSHR page C7-2900 line 169341 MATCH x2f002400/mask=xbf80fc00 # C7.2.392 USHR page C7-2916 line 170174 MATCH x2f000400/mask=xbf80fc00 # CONSTRUCT x2f000400/mask=xfff89c00 MATCHED 6 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:4 ~ &=$dup # SMACRO(pseudo) ARG1 ARG2:4 =NEON_mvni/1@4 # AUNIT --inst x2f000400/mask=xfff89c00 --status pass :mvni Rd_VPR64.2S, Imm_neon_uimm8Shift is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & b_1515=0 & Imm_neon_uimm8Shift & b_1012=1 & Rd_VPR64.2S & Zd { local tmp1:4 = ~ Imm_neon_uimm8Shift:4; # simd duplicate Rd_VPR64.2S = all elements tmp1 (lane size 4) Rd_VPR64.2S[0,32] = tmp1; Rd_VPR64.2S[32,32] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2709 line 158157 MATCH x2f008400/mask=xbf80fc00 # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # CONSTRUCT x2f008400/mask=xfff8dc00 MATCHED 5 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:2 ~ &=$dup # SMACRO(pseudo) ARG1 ARG2:2 =NEON_mvni/1@2 # AUNIT --inst x2f008400/mask=xfff8dc00 --status pass :mvni Rd_VPR64.4H, Imm_neon_uimm8Shift is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1415=2 & b_1012=1 & Rd_VPR64.4H & Zd { local tmp1:2 = ~ Imm_neon_uimm8Shift:2; # simd duplicate Rd_VPR64.4H = all elements tmp1 (lane size 2) Rd_VPR64.4H[0,16] = tmp1; Rd_VPR64.4H[16,16] = tmp1; Rd_VPR64.4H[32,16] = tmp1; Rd_VPR64.4H[48,16] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x2f006400/mask=xbf80fc00 # C7.2.311 SRI page C7-2722 line 158861 MATCH x2f004400/mask=xbf80fc00 # C7.2.385 URSHR page C7-2900 line 169341 MATCH x2f002400/mask=xbf80fc00 # C7.2.392 USHR page C7-2916 line 170174 MATCH x2f000400/mask=xbf80fc00 # CONSTRUCT x6f000400/mask=xfff89c00 MATCHED 6 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:4 ~ &=$dup # SMACRO(pseudo) ARG1 ARG2:4 =NEON_mvni/1@4 # AUNIT --inst x6f000400/mask=xfff89c00 --status pass :mvni Rd_VPR128.4S, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1515=0 & b_1012=1 & Rd_VPR128.4S & Zd { local tmp1:4 = ~ Imm_neon_uimm8Shift:4; # simd duplicate Rd_VPR128.4S = all elements tmp1 (lane size 4) Rd_VPR128.4S[0,32] = tmp1; Rd_VPR128.4S[32,32] = tmp1; Rd_VPR128.4S[64,32] = tmp1; Rd_VPR128.4S[96,32] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2709 line 158157 MATCH x2f008400/mask=xbf80fc00 # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # CONSTRUCT x6f008400/mask=xfff8dc00 MATCHED 5 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:2 ~ &=$dup # SMACRO(pseudo) ARG1 ARG2:2 =NEON_mvni/1@2 # AUNIT --inst x6f008400/mask=xfff8dc00 --status pass :mvni Rd_VPR128.8H, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1415=2 & b_1012=1 & Rd_VPR128.8H & Zd { local tmp1:2 = ~ Imm_neon_uimm8Shift:2; # simd duplicate Rd_VPR128.8H = all elements tmp1 (lane size 2) Rd_VPR128.8H[0,16] = tmp1; Rd_VPR128.8H[16,16] = tmp1; Rd_VPR128.8H[32,16] = tmp1; Rd_VPR128.8H[48,16] = tmp1; Rd_VPR128.8H[64,16] = tmp1; Rd_VPR128.8H[80,16] = tmp1; Rd_VPR128.8H[96,16] = tmp1; Rd_VPR128.8H[112,16] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # C7.2.20 BIC (vector, immediate) page C7-2048 line 119572 MATCH x2f001400/mask=xbff81c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # CONSTRUCT x2f00c400/mask=xfff8ec00 MATCHED 3 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:4 ~ &=$dup # SMACRO(pseudo) ARG1 ARG2:4 =NEON_mvni/1@4 # AUNIT --inst x2f00c400/mask=xfff8ec00 --status pass :mvni Rd_VPR64.2S, Imm_neon_uimm8Shift is b_3131=0 & q=0 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1315=6 & b_1011=1 & Rd_VPR64.2S & Zd { local tmp1:4 = ~ Imm_neon_uimm8Shift:4; # simd duplicate Rd_VPR64.2S = all elements tmp1 (lane size 4) Rd_VPR64.2S[0,32] = tmp1; Rd_VPR64.2S[32,32] = tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.208 MVNI page C7-2498 line 146251 MATCH x2f000400/mask=xbff80c00 # C7.2.20 BIC (vector, immediate) page C7-2048 line 119572 MATCH x2f001400/mask=xbff81c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # CONSTRUCT x6f00c400/mask=xfff8ec00 MATCHED 3 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:4 ~ &=$dup # SMACRO(pseudo) ARG1 ARG2:4 =NEON_mvni/1@4 # AUNIT --inst x6f00c400/mask=xfff8ec00 --status pass :mvni Rd_VPR128.4S, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=1 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1315=6 & b_1011=1 & Rd_VPR128.4S & Zd { local tmp1:4 = ~ Imm_neon_uimm8Shift:4; # simd duplicate Rd_VPR128.4S = all elements tmp1 (lane size 4) Rd_VPR128.4S[0,32] = tmp1; Rd_VPR128.4S[32,32] = tmp1; Rd_VPR128.4S[64,32] = tmp1; Rd_VPR128.4S[96,32] = tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.209 NEG (vector) page C7-2501 line 146404 MATCH x7e20b800/mask=xff3ffc00 # CONSTRUCT x7ee0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =2comp # SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1 # AUNIT --inst x7ee0b800/mask=xfffffc00 --status pass :neg Rd_VPR64, Rn_VPR64 is b_3131=0 & q=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR64 & Rd_VPR64 & Zd { Rd_VPR64 = - Rn_VPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.209 NEG (vector) page C7-2501 line 146404 MATCH x2e20b800/mask=xbf3ffc00 # CONSTRUCT x2e20b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@1 # AUNIT --inst x2e20b800/mask=xfffffc00 --status nopcodeop :neg Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd unary Rd_VPR64.8B = -Rn_VPR64.8B on lane size 1 Rd_VPR64.8B[0,8] = -Rn_VPR64.8B[0,8]; Rd_VPR64.8B[8,8] = -Rn_VPR64.8B[8,8]; Rd_VPR64.8B[16,8] = -Rn_VPR64.8B[16,8]; Rd_VPR64.8B[24,8] = -Rn_VPR64.8B[24,8]; Rd_VPR64.8B[32,8] = -Rn_VPR64.8B[32,8]; Rd_VPR64.8B[40,8] = -Rn_VPR64.8B[40,8]; Rd_VPR64.8B[48,8] = -Rn_VPR64.8B[48,8]; Rd_VPR64.8B[56,8] = -Rn_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.209 NEG (vector) page C7-2501 line 146404 MATCH x2e20b800/mask=xbf3ffc00 # CONSTRUCT x6e20b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@1 # AUNIT --inst x6e20b800/mask=xfffffc00 --status nopcodeop :neg Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd unary Rd_VPR128.16B = -Rn_VPR128.16B on lane size 1 Rd_VPR128.16B[0,8] = -Rn_VPR128.16B[0,8]; Rd_VPR128.16B[8,8] = -Rn_VPR128.16B[8,8]; Rd_VPR128.16B[16,8] = -Rn_VPR128.16B[16,8]; Rd_VPR128.16B[24,8] = -Rn_VPR128.16B[24,8]; Rd_VPR128.16B[32,8] = -Rn_VPR128.16B[32,8]; Rd_VPR128.16B[40,8] = -Rn_VPR128.16B[40,8]; Rd_VPR128.16B[48,8] = -Rn_VPR128.16B[48,8]; Rd_VPR128.16B[56,8] = -Rn_VPR128.16B[56,8]; Rd_VPR128.16B[64,8] = -Rn_VPR128.16B[64,8]; Rd_VPR128.16B[72,8] = -Rn_VPR128.16B[72,8]; Rd_VPR128.16B[80,8] = -Rn_VPR128.16B[80,8]; Rd_VPR128.16B[88,8] = -Rn_VPR128.16B[88,8]; Rd_VPR128.16B[96,8] = -Rn_VPR128.16B[96,8]; Rd_VPR128.16B[104,8] = -Rn_VPR128.16B[104,8]; Rd_VPR128.16B[112,8] = -Rn_VPR128.16B[112,8]; Rd_VPR128.16B[120,8] = -Rn_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.209 NEG (vector) page C7-2501 line 146404 MATCH x2e20b800/mask=xbf3ffc00 # CONSTRUCT x2e60b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@2 # AUNIT --inst x2e60b800/mask=xfffffc00 --status nopcodeop :neg Rd_VPR64.4H, Rn_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd unary Rd_VPR64.4H = -Rn_VPR64.4H on lane size 2 Rd_VPR64.4H[0,16] = -Rn_VPR64.4H[0,16]; Rd_VPR64.4H[16,16] = -Rn_VPR64.4H[16,16]; Rd_VPR64.4H[32,16] = -Rn_VPR64.4H[32,16]; Rd_VPR64.4H[48,16] = -Rn_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.209 NEG (vector) page C7-2501 line 146404 MATCH x2e20b800/mask=xbf3ffc00 # CONSTRUCT x6e60b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@2 # AUNIT --inst x6e60b800/mask=xfffffc00 --status nopcodeop :neg Rd_VPR128.8H, Rn_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd unary Rd_VPR128.8H = -Rn_VPR128.8H on lane size 2 Rd_VPR128.8H[0,16] = -Rn_VPR128.8H[0,16]; Rd_VPR128.8H[16,16] = -Rn_VPR128.8H[16,16]; Rd_VPR128.8H[32,16] = -Rn_VPR128.8H[32,16]; Rd_VPR128.8H[48,16] = -Rn_VPR128.8H[48,16]; Rd_VPR128.8H[64,16] = -Rn_VPR128.8H[64,16]; Rd_VPR128.8H[80,16] = -Rn_VPR128.8H[80,16]; Rd_VPR128.8H[96,16] = -Rn_VPR128.8H[96,16]; Rd_VPR128.8H[112,16] = -Rn_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.209 NEG (vector) page C7-2501 line 146404 MATCH x2e20b800/mask=xbf3ffc00 # CONSTRUCT x2ea0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@4 # AUNIT --inst x2ea0b800/mask=xfffffc00 --status nopcodeop :neg Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd unary Rd_VPR64.2S = -Rn_VPR64.2S on lane size 4 Rd_VPR64.2S[0,32] = -Rn_VPR64.2S[0,32]; Rd_VPR64.2S[32,32] = -Rn_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.209 NEG (vector) page C7-2501 line 146404 MATCH x2e20b800/mask=xbf3ffc00 # CONSTRUCT x6ea0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@4 # AUNIT --inst x6ea0b800/mask=xfffffc00 --status nopcodeop :neg Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd unary Rd_VPR128.4S = -Rn_VPR128.4S on lane size 4 Rd_VPR128.4S[0,32] = -Rn_VPR128.4S[0,32]; Rd_VPR128.4S[32,32] = -Rn_VPR128.4S[32,32]; Rd_VPR128.4S[64,32] = -Rn_VPR128.4S[64,32]; Rd_VPR128.4S[96,32] = -Rn_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.209 NEG (vector) page C7-2501 line 146404 MATCH x2e20b800/mask=xbf3ffc00 # CONSTRUCT x6ee0b800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_neg/1@8 # AUNIT --inst x6ee0b800/mask=xfffffc00 --status nopcodeop :neg Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_1721=0x10 & b_1216=0xb & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd unary Rd_VPR128.2D = -Rn_VPR128.2D on lane size 8 Rd_VPR128.2D[0,64] = -Rn_VPR128.2D[0,64]; Rd_VPR128.2D[64,64] = -Rn_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.211 ORN (vector) page C7-2505 line 146624 MATCH x0ee01c00/mask=xbfe0fc00 # CONSTRUCT x4ee01c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $~@1 =$|@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_orn/2@1 # AUNIT --inst x4ee01c00/mask=xffe0fc00 --status pass :orn Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd unary TMPQ1 = ~(Rm_VPR128.16B) on lane size 1 TMPQ1[0,8] = ~(Rm_VPR128.16B[0,8]); TMPQ1[8,8] = ~(Rm_VPR128.16B[8,8]); TMPQ1[16,8] = ~(Rm_VPR128.16B[16,8]); TMPQ1[24,8] = ~(Rm_VPR128.16B[24,8]); TMPQ1[32,8] = ~(Rm_VPR128.16B[32,8]); TMPQ1[40,8] = ~(Rm_VPR128.16B[40,8]); TMPQ1[48,8] = ~(Rm_VPR128.16B[48,8]); TMPQ1[56,8] = ~(Rm_VPR128.16B[56,8]); TMPQ1[64,8] = ~(Rm_VPR128.16B[64,8]); TMPQ1[72,8] = ~(Rm_VPR128.16B[72,8]); TMPQ1[80,8] = ~(Rm_VPR128.16B[80,8]); TMPQ1[88,8] = ~(Rm_VPR128.16B[88,8]); TMPQ1[96,8] = ~(Rm_VPR128.16B[96,8]); TMPQ1[104,8] = ~(Rm_VPR128.16B[104,8]); TMPQ1[112,8] = ~(Rm_VPR128.16B[112,8]); TMPQ1[120,8] = ~(Rm_VPR128.16B[120,8]); # simd infix Rd_VPR128.16B = Rn_VPR128.16B | TMPQ1 on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] | TMPQ1[0,8]; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] | TMPQ1[8,8]; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] | TMPQ1[16,8]; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] | TMPQ1[24,8]; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] | TMPQ1[32,8]; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] | TMPQ1[40,8]; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] | TMPQ1[48,8]; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] | TMPQ1[56,8]; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] | TMPQ1[64,8]; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] | TMPQ1[72,8]; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] | TMPQ1[80,8]; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] | TMPQ1[88,8]; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] | TMPQ1[96,8]; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] | TMPQ1[104,8]; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] | TMPQ1[112,8]; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] | TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.211 ORN (vector) page C7-2505 line 146624 MATCH x0ee01c00/mask=xbfe0fc00 # CONSTRUCT x0ee01c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $~@1 =$|@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_orn/2@1 # AUNIT --inst x0ee01c00/mask=xffe0fc00 --status pass :orn Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd unary TMPD1 = ~(Rm_VPR64.8B) on lane size 1 TMPD1[0,8] = ~(Rm_VPR64.8B[0,8]); TMPD1[8,8] = ~(Rm_VPR64.8B[8,8]); TMPD1[16,8] = ~(Rm_VPR64.8B[16,8]); TMPD1[24,8] = ~(Rm_VPR64.8B[24,8]); TMPD1[32,8] = ~(Rm_VPR64.8B[32,8]); TMPD1[40,8] = ~(Rm_VPR64.8B[40,8]); TMPD1[48,8] = ~(Rm_VPR64.8B[48,8]); TMPD1[56,8] = ~(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR64.8B = Rn_VPR64.8B | TMPD1 on lane size 1 Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] | TMPD1[0,8]; Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] | TMPD1[8,8]; Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] | TMPD1[16,8]; Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] | TMPD1[24,8]; Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] | TMPD1[32,8]; Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] | TMPD1[40,8]; Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] | TMPD1[48,8]; Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] | TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.212 ORR (vector, immediate) page C7-2507 line 146708 MATCH x0f001400/mask=xbff81c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.254 SHL page C7-2582 line 150977 MATCH x0f005400/mask=xbf80fc00 # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x0f007400/mask=xbf80fc00 # C7.2.314 SRSRA page C7-2730 line 159316 MATCH x0f003400/mask=xbf80fc00 # C7.2.318 SSRA page C7-2741 line 159921 MATCH x0f001400/mask=xbf80fc00 # CONSTRUCT x0f001400/mask=xfff89c00 MATCHED 6 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:4 &=$|@4 # SMACRO(pseudo) ARG1 ARG2:4 &=NEON_orn/2@4 # AUNIT --inst x0f001400/mask=xfff89c00 --status pass :orr Rd_VPR64.2S, Imm_neon_uimm8Shift is b_3131=0 & q=0 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1515=0 & b_1012=5 & Rd_VPR64.2S & Zd { # simd infix Rd_VPR64.2S = Rd_VPR64.2S | Imm_neon_uimm8Shift:4 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] | Imm_neon_uimm8Shift:4; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] | Imm_neon_uimm8Shift:4; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.212 ORR (vector, immediate) page C7-2507 line 146708 MATCH x0f001400/mask=xbff81c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.305 SQSHRN, SQSHRN2 page C7-2706 line 157972 MATCH x0f009400/mask=xbf80fc00 # CONSTRUCT x0f009400/mask=xfff8dc00 MATCHED 3 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:2 &=$|@2 # SMACRO(pseudo) ARG1 ARG2:2 &=NEON_orn/2@2 # AUNIT --inst x0f009400/mask=xfff8dc00 --status pass :orr Rd_VPR64.4H, Imm_neon_uimm8Shift is b_3131=0 & q=0 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1415=2 & b_1012=5 & Rd_VPR64.4H & Zd { # simd infix Rd_VPR64.4H = Rd_VPR64.4H | Imm_neon_uimm8Shift:2 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] | Imm_neon_uimm8Shift:2; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] | Imm_neon_uimm8Shift:2; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] | Imm_neon_uimm8Shift:2; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] | Imm_neon_uimm8Shift:2; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.212 ORR (vector, immediate) page C7-2507 line 146708 MATCH x0f001400/mask=xbff81c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.254 SHL page C7-2582 line 150977 MATCH x0f005400/mask=xbf80fc00 # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x0f007400/mask=xbf80fc00 # C7.2.314 SRSRA page C7-2730 line 159316 MATCH x0f003400/mask=xbf80fc00 # C7.2.318 SSRA page C7-2741 line 159921 MATCH x0f001400/mask=xbf80fc00 # CONSTRUCT x4f001400/mask=xfff89c00 MATCHED 6 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:4 &=$| # SMACRO(pseudo) ARG1 ARG2:4 &=NEON_orn/2@4 # AUNIT --inst x4f001400/mask=xfff89c00 --status pass :orr Rd_VPR128.4S, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1515=0 & b_1012=5 & Rd_VPR128.4S & Zd { # simd infix Rd_VPR128.4S = Rd_VPR128.4S | Imm_neon_uimm8Shift:4 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] | Imm_neon_uimm8Shift:4; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] | Imm_neon_uimm8Shift:4; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] | Imm_neon_uimm8Shift:4; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] | Imm_neon_uimm8Shift:4; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.212 ORR (vector, immediate) page C7-2507 line 146708 MATCH x0f001400/mask=xbff81c00 # C7.2.204 MOVI page C7-2490 line 145763 MATCH x0f000400/mask=x9ff80c00 # C7.2.305 SQSHRN, SQSHRN2 page C7-2706 line 157972 MATCH x0f009400/mask=xbf80fc00 # CONSTRUCT x4f009400/mask=xfff8dc00 MATCHED 3 DOCUMENTED OPCODES # SMACRO ARG1 ARG2:2 &=$| # SMACRO(pseudo) ARG1 ARG2:2 &=NEON_orr/2@2 # AUNIT --inst x4f009400/mask=xfff8dc00 --status pass :orr Rd_VPR128.8H, Imm_neon_uimm8Shift is b_3131=0 & q=1 & b_29=0 & b_2428=0xf & b_1923=0x0 & Imm_neon_uimm8Shift & b_1415=2 & b_1012=5 & Rd_VPR128.8H & Zd { # simd infix Rd_VPR128.8H = Rd_VPR128.8H | Imm_neon_uimm8Shift:2 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] | Imm_neon_uimm8Shift:2; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] | Imm_neon_uimm8Shift:2; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] | Imm_neon_uimm8Shift:2; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] | Imm_neon_uimm8Shift:2; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] | Imm_neon_uimm8Shift:2; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] | Imm_neon_uimm8Shift:2; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] | Imm_neon_uimm8Shift:2; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] | Imm_neon_uimm8Shift:2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.213 ORR (vector, register) page C7-2509 line 146837 MATCH x0ea01c00/mask=xbfe0fc00 # C7.2.202 MOV (vector) page C7-2487 line 145604 MATCH x0ea01c00/mask=xbfe0fc00 # CONSTRUCT x4ea01c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$|@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_orr/2@1 # AUNIT --inst x4ea01c00/mask=xffe0fc00 --status pass :orr Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.16B & b_1115=0x3 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix Rd_VPR128.16B = Rn_VPR128.16B | Rm_VPR128.16B on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] | Rm_VPR128.16B[0,8]; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] | Rm_VPR128.16B[8,8]; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] | Rm_VPR128.16B[16,8]; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] | Rm_VPR128.16B[24,8]; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] | Rm_VPR128.16B[32,8]; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] | Rm_VPR128.16B[40,8]; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] | Rm_VPR128.16B[48,8]; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] | Rm_VPR128.16B[56,8]; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] | Rm_VPR128.16B[64,8]; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] | Rm_VPR128.16B[72,8]; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] | Rm_VPR128.16B[80,8]; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] | Rm_VPR128.16B[88,8]; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] | Rm_VPR128.16B[96,8]; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] | Rm_VPR128.16B[104,8]; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] | Rm_VPR128.16B[112,8]; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] | Rm_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.213 ORR (vector, register) page C7-2509 line 146837 MATCH x0ea01c00/mask=xbfe0fc00 # C7.2.202 MOV (vector) page C7-2487 line 145604 MATCH x0ea01c00/mask=xbfe0fc00 # CONSTRUCT x0ea01c00/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$|@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_orr/2@1 # AUNIT --inst x0ea01c00/mask=xffe0fc00 --status pass :orr Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.8B & b_1115=0x3 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix Rd_VPR64.8B = Rn_VPR64.8B | Rm_VPR64.8B on lane size 1 Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] | Rm_VPR64.8B[0,8]; Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] | Rm_VPR64.8B[8,8]; Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] | Rm_VPR64.8B[16,8]; Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] | Rm_VPR64.8B[24,8]; Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] | Rm_VPR64.8B[32,8]; Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] | Rm_VPR64.8B[40,8]; Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] | Rm_VPR64.8B[48,8]; Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] | Rm_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.214 PMUL page C7-2511 line 146928 MATCH x2e209c00/mask=xbf20fc00 # CONSTRUCT x6e209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_pmul/2@1 # AUNIT --inst x6e209c00/mask=xffe0fc00 --status nopcodeop :pmul Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x13 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_pmul(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.214 PMUL page C7-2511 line 146928 MATCH x2e209c00/mask=xbf20fc00 # CONSTRUCT x2e209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_pmul/2@1 # AUNIT --inst x2e209c00/mask=xffe0fc00 --status nopcodeop :pmul Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x13 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_pmul(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.215 PMULL, PMULL2 page C7-2513 line 147032 MATCH x0e20e000/mask=xbf20fc00 # CONSTRUCT x0ee0e000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_pmull/2@8 # AUNIT --inst x0ee0e000/mask=xffe0fc00 --status nopcodeop --comment "ext" :pmull Rd_VPR128.1Q, Rn_VPR64.1D, Rm_VPR64.1D is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR64.1D & b_1215=0xe & b_1011=0 & Rn_VPR64.1D & Rd_VPR128.1Q & Zd { Rd_VPR128.1Q = NEON_pmull(Rn_VPR64.1D, Rm_VPR64.1D, 8:1); } # C7.2.215 PMULL, PMULL2 page C7-2513 line 147032 MATCH x0e20e000/mask=xbf20fc00 # CONSTRUCT x0e20e000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_pmull/2@1 # AUNIT --inst x0e20e000/mask=xffe0fc00 --status nopcodeop --comment "ext" :pmull Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xe & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_pmull(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.215 PMULL, PMULL2 page C7-2513 line 147032 MATCH x0e20e000/mask=xbf20fc00 # CONSTRUCT x4ee0e000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_pmull2/2@8 # AUNIT --inst x4ee0e000/mask=xffe0fc00 --status nopcodeop --comment "ext" :pmull2 Rd_VPR128.1Q, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1215=0xe & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.1Q & Zd { Rd_VPR128.1Q = NEON_pmull2(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.215 PMULL, PMULL2 page C7-2513 line 147032 MATCH x0e20e000/mask=xbf20fc00 # CONSTRUCT x4e20e000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_pmull2/2@1 # AUNIT --inst x4e20e000/mask=xffe0fc00 --status nopcodeop --comment "ext" :pmull2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xe & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_pmull2(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.216 RADDHN, RADDHN2 page C7-2515 line 147152 MATCH x2e204000/mask=xbf20fc00 # CONSTRUCT x6e204000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $+@2 0x80:2 &=$+@2 &=$shuffle@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_raddhn2/3@2 # AUNIT --inst x6e204000/mask=xffe0fc00 --status pass --comment "intround" :raddhn2 Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x4 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { # simd infix TMPQ1 = Rn_VPR128.8H + Rm_VPR128.8H on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] + Rm_VPR128.8H[0,16]; TMPQ1[16,16] = Rn_VPR128.8H[16,16] + Rm_VPR128.8H[16,16]; TMPQ1[32,16] = Rn_VPR128.8H[32,16] + Rm_VPR128.8H[32,16]; TMPQ1[48,16] = Rn_VPR128.8H[48,16] + Rm_VPR128.8H[48,16]; TMPQ1[64,16] = Rn_VPR128.8H[64,16] + Rm_VPR128.8H[64,16]; TMPQ1[80,16] = Rn_VPR128.8H[80,16] + Rm_VPR128.8H[80,16]; TMPQ1[96,16] = Rn_VPR128.8H[96,16] + Rm_VPR128.8H[96,16]; TMPQ1[112,16] = Rn_VPR128.8H[112,16] + Rm_VPR128.8H[112,16]; # simd infix TMPQ1 = TMPQ1 + 0x80:2 on lane size 2 TMPQ1[0,16] = TMPQ1[0,16] + 0x80:2; TMPQ1[16,16] = TMPQ1[16,16] + 0x80:2; TMPQ1[32,16] = TMPQ1[32,16] + 0x80:2; TMPQ1[48,16] = TMPQ1[48,16] + 0x80:2; TMPQ1[64,16] = TMPQ1[64,16] + 0x80:2; TMPQ1[80,16] = TMPQ1[80,16] + 0x80:2; TMPQ1[96,16] = TMPQ1[96,16] + 0x80:2; TMPQ1[112,16] = TMPQ1[112,16] + 0x80:2; # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15) lane size 1 Rd_VPR128.16B[64,8] = TMPQ1[8,8]; Rd_VPR128.16B[72,8] = TMPQ1[24,8]; Rd_VPR128.16B[80,8] = TMPQ1[40,8]; Rd_VPR128.16B[88,8] = TMPQ1[56,8]; Rd_VPR128.16B[96,8] = TMPQ1[72,8]; Rd_VPR128.16B[104,8] = TMPQ1[88,8]; Rd_VPR128.16B[112,8] = TMPQ1[104,8]; Rd_VPR128.16B[120,8] = TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.216 RADDHN, RADDHN2 page C7-2515 line 147152 MATCH x2e204000/mask=xbf20fc00 # CONSTRUCT x6ea04000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $+@8 0x80000000:8 &=$+@8 &=$shuffle@1-2@3-3:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_raddhn2/3@8 # AUNIT --inst x6ea04000/mask=xffe0fc00 --status pass --comment "intround" :raddhn2 Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x4 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { # simd infix TMPQ1 = Rn_VPR128.2D + Rm_VPR128.2D on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] + Rm_VPR128.2D[0,64]; TMPQ1[64,64] = Rn_VPR128.2D[64,64] + Rm_VPR128.2D[64,64]; # simd infix TMPQ1 = TMPQ1 + 0x80000000:8 on lane size 8 TMPQ1[0,64] = TMPQ1[0,64] + 0x80000000:8; TMPQ1[64,64] = TMPQ1[64,64] + 0x80000000:8; # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-2@3-3) lane size 4 Rd_VPR128.4S[64,32] = TMPQ1[32,32]; Rd_VPR128.4S[96,32] = TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.216 RADDHN, RADDHN2 page C7-2515 line 147152 MATCH x2e204000/mask=xbf20fc00 # CONSTRUCT x6e604000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $+@4 0x8000:4 &=$+@4 &=$shuffle@1-4@3-5@5-6@7-7:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_raddhn2/3@4 # AUNIT --inst x6e604000/mask=xffe0fc00 --status pass --comment "intround" :raddhn2 Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x4 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.4S + Rm_VPR128.4S on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] + Rm_VPR128.4S[0,32]; TMPQ1[32,32] = Rn_VPR128.4S[32,32] + Rm_VPR128.4S[32,32]; TMPQ1[64,32] = Rn_VPR128.4S[64,32] + Rm_VPR128.4S[64,32]; TMPQ1[96,32] = Rn_VPR128.4S[96,32] + Rm_VPR128.4S[96,32]; # simd infix TMPQ1 = TMPQ1 + 0x8000:4 on lane size 4 TMPQ1[0,32] = TMPQ1[0,32] + 0x8000:4; TMPQ1[32,32] = TMPQ1[32,32] + 0x8000:4; TMPQ1[64,32] = TMPQ1[64,32] + 0x8000:4; TMPQ1[96,32] = TMPQ1[96,32] + 0x8000:4; # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-4@3-5@5-6@7-7) lane size 2 Rd_VPR128.8H[64,16] = TMPQ1[16,16]; Rd_VPR128.8H[80,16] = TMPQ1[48,16]; Rd_VPR128.8H[96,16] = TMPQ1[80,16]; Rd_VPR128.8H[112,16] = TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.216 RADDHN, RADDHN2 page C7-2515 line 147152 MATCH x2e204000/mask=xbf20fc00 # CONSTRUCT x2ea04000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_raddhn/3@8 # AUNIT --inst x2ea04000/mask=xffe0fc00 --status nopcodeop :raddhn Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x4 & b_1011=0 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_raddhn(Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.216 RADDHN, RADDHN2 page C7-2515 line 147152 MATCH x2e204000/mask=xbf20fc00 # CONSTRUCT x2e604000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_raddhn/3@4 # AUNIT --inst x2e604000/mask=xffe0fc00 --status nopcodeop :raddhn Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x4 & b_1011=0 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_raddhn(Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.216 RADDHN, RADDHN2 page C7-2515 line 147152 MATCH x2e204000/mask=xbf20fc00 # CONSTRUCT x2e204000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_raddhn/3@2 # AUNIT --inst x2e204000/mask=xffe0fc00 --status nopcodeop :raddhn Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x4 & b_1011=0 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_raddhn(Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.217 RAX1 page C7-2517 line 147279 MATCH xce608c00/mask=xffe0fc00 # CONSTRUCT xce608c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 1:8 $<<@8 =$|@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_rax1/2@8 # AUNIT --inst xce608c00/mask=xffe0fc00 --status noqemu :rax1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_2131=0b11001110011 & b_1015=0b100011 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { # simd infix TMPQ1 = Rm_VPR128.2D << 1:8 on lane size 8 TMPQ1[0,64] = Rm_VPR128.2D[0,64] << 1:8; TMPQ1[64,64] = Rm_VPR128.2D[64,64] << 1:8; # simd infix Rd_VPR128.2D = Rn_VPR128.2D | TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] | TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] | TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.218 RBIT (vector) page C7-2518 line 147347 MATCH x2e605800/mask=xbffffc00 # CONSTRUCT x2e605800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rbit/1@1 # AUNIT --inst x2e605800/mask=xfffffc00 --status nopcodeop :rbit Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_1029=0b10111001100000010110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { Rd_VPR64.8B = NEON_rbit(Rn_VPR64.8B, 1:1); } # C7.2.218 RBIT (vector) page C7-2518 line 147347 MATCH x2e605800/mask=xbffffc00 # CONSTRUCT x6e605800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rbit/1@1 # AUNIT --inst x6e605800/mask=xfffffc00 --status nopcodeop :rbit Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_1029=0b10111001100000010110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { Rd_VPR128.16B = NEON_rbit(Rn_VPR128.16B, 1:1); } # C7.2.219 REV16 (vector) page C7-2520 line 147435 MATCH x0e201800/mask=xbf3ffc00 # CONSTRUCT x4e201800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rev16/1@1 # AUNIT --inst x4e201800/mask=xfffffc00 --status nopcodeop :rev16 Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x1 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_rev16(Rn_VPR128.16B, 1:1); } # C7.2.219 REV16 (vector) page C7-2520 line 147435 MATCH x0e201800/mask=xbf3ffc00 # CONSTRUCT x0e201800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rev16/1@1 # AUNIT --inst x0e201800/mask=xfffffc00 --status nopcodeop :rev16 Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x1 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_rev16(Rn_VPR64.8B, 1:1); } # C7.2.220 REV32 (vector) page C7-2522 line 147553 MATCH x2e200800/mask=xbf3ffc00 # CONSTRUCT x6e200800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rev32/1@1 # AUNIT --inst x6e200800/mask=xfffffc00 --status nopcodeop :rev32 Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & Q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_rev32(Rn_VPR128.16B, 1:1); } # C7.2.220 REV32 (vector) page C7-2522 line 147553 MATCH x2e200800/mask=xbf3ffc00 # CONSTRUCT x2e600800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rev32/1@2 # AUNIT --inst x2e600800/mask=xfffffc00 --status nopcodeop :rev32 Rd_VPR64.4H, Rn_VPR64.4H is b_3131=0 & Q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_rev32(Rn_VPR64.4H, 2:1); } # C7.2.220 REV32 (vector) page C7-2522 line 147553 MATCH x2e200800/mask=xbf3ffc00 # CONSTRUCT x2e200800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rev32/1@1 # AUNIT --inst x2e200800/mask=xfffffc00 --status nopcodeop :rev32 Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & Q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_rev32(Rn_VPR64.8B, 1:1); } # C7.2.220 REV32 (vector) page C7-2522 line 147553 MATCH x2e200800/mask=xbf3ffc00 # CONSTRUCT x6e600800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rev32/1@2 # AUNIT --inst x6e600800/mask=xfffffc00 --status nopcodeop :rev32 Rd_VPR128.8H, Rn_VPR128.8H is b_3131=0 & Q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_rev32(Rn_VPR128.8H, 2:1); } # C7.2.221 REV64 page C7-2524 line 147671 MATCH x0e200800/mask=xbf3ffc00 # CONSTRUCT x4e200800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rev64/1@1 # AUNIT --inst x4e200800/mask=xfffffc00 --status nopcodeop :rev64 Rd_VPR128.16B, Rn_VPR128.16B is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_rev64(Rn_VPR128.16B, 1:1); } # C7.2.221 REV64 page C7-2524 line 147671 MATCH x0e200800/mask=xbf3ffc00 # CONSTRUCT x0ea00800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rev64/1@4 # AUNIT --inst x0ea00800/mask=xfffffc00 --status nopcodeop :rev64 Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_rev64(Rn_VPR64.2S, 4:1); } # C7.2.221 REV64 page C7-2524 line 147671 MATCH x0e200800/mask=xbf3ffc00 # CONSTRUCT x0e600800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rev64/1@2 # AUNIT --inst x0e600800/mask=xfffffc00 --status nopcodeop :rev64 Rd_VPR64.4H, Rn_VPR64.4H is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_rev64(Rn_VPR64.4H, 2:1); } # C7.2.221 REV64 page C7-2524 line 147671 MATCH x0e200800/mask=xbf3ffc00 # CONSTRUCT x4ea00800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rev64/1@4 # AUNIT --inst x4ea00800/mask=xfffffc00 --status nopcodeop :rev64 Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_rev64(Rn_VPR128.4S, 4:1); } # C7.2.221 REV64 page C7-2524 line 147671 MATCH x0e200800/mask=xbf3ffc00 # CONSTRUCT x0e200800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rev64/1@1 # AUNIT --inst x0e200800/mask=xfffffc00 --status nopcodeop :rev64 Rd_VPR64.8B, Rn_VPR64.8B is b_3131=0 & Q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_rev64(Rn_VPR64.8B, 1:1); } # C7.2.221 REV64 page C7-2524 line 147671 MATCH x0e200800/mask=xbf3ffc00 # CONSTRUCT x4e600800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_rev64/1@2 # AUNIT --inst x4e600800/mask=xfffffc00 --status nopcodeop :rev64 Rd_VPR128.8H, Rn_VPR128.8H is b_3131=0 & Q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x0 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_rev64(Rn_VPR128.8H, 2:1); } # C7.2.222 RSHRN, RSHRN2 page C7-2526 line 147791 MATCH x0f008c00/mask=xbf80fc00 # CONSTRUCT x4f088c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_rshrn2/3@2 # AUNIT --inst x4f088c00/mask=xfff8fc00 --status nopcodeop --comment "nointround" :rshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_rshrn2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.222 RSHRN, RSHRN2 page C7-2526 line 147791 MATCH x0f008c00/mask=xbf80fc00 # CONSTRUCT x0f208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_rshrn/3@8 # AUNIT --inst x0f208c00/mask=xffe0fc00 --status nopcodeop --comment "nointround" :rshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_rshrn(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.222 RSHRN, RSHRN2 page C7-2526 line 147791 MATCH x0f008c00/mask=xbf80fc00 # CONSTRUCT x0f108c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_rshrn/3@4 # AUNIT --inst x0f108c00/mask=xfff0fc00 --status nopcodeop --comment "nointround" :rshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_rshrn(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.222 RSHRN, RSHRN2 page C7-2526 line 147791 MATCH x0f008c00/mask=xbf80fc00 # CONSTRUCT x4f208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_rshrn2/3@8 # AUNIT --inst x4f208c00/mask=xffe0fc00 --status nopcodeop --comment "nointround" :rshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_rshrn2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.222 RSHRN, RSHRN2 page C7-2526 line 147791 MATCH x0f008c00/mask=xbf80fc00 # CONSTRUCT x0f088c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_rshrn/3@2 # AUNIT --inst x0f088c00/mask=xfff8fc00 --status nopcodeop --comment "nointround" :rshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_rshrn(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.222 RSHRN, RSHRN2 page C7-2526 line 147791 MATCH x0f008c00/mask=xbf80fc00 # CONSTRUCT x4f108c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_rshrn2/3@4 # AUNIT --inst x4f108c00/mask=xfff0fc00 --status nopcodeop --comment "nointround" :rshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_rshrn2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.223 RSUBHN, RSUBHN2 page C7-2528 line 147915 MATCH x2e206000/mask=xbf20fc00 # CONSTRUCT x6e206000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_rsubhn2/3@2 # AUNIT --inst x6e206000/mask=xffe0fc00 --status nopcodeop --comment "nointround" :rsubhn2 Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x6 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_rsubhn2(Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.223 RSUBHN, RSUBHN2 page C7-2528 line 147915 MATCH x2e206000/mask=xbf20fc00 # CONSTRUCT x6ea06000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_rsubhn2/3@8 # AUNIT --inst x6ea06000/mask=xffe0fc00 --status nopcodeop --comment "nointround" :rsubhn2 Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x6 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_rsubhn2(Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.223 RSUBHN, RSUBHN2 page C7-2528 line 147915 MATCH x2e206000/mask=xbf20fc00 # CONSTRUCT x6e606000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_rsubhn2/3@4 # AUNIT --inst x6e606000/mask=xffe0fc00 --status nopcodeop --comment "nointround" :rsubhn2 Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x6 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_rsubhn2(Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.223 RSUBHN, RSUBHN2 page C7-2528 line 147915 MATCH x2e206000/mask=xbf20fc00 # CONSTRUCT x2ea06000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_rsubhn/3@8 # AUNIT --inst x2ea06000/mask=xffe0fc00 --status nopcodeop --comment "nointround" :rsubhn Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x6 & b_1011=0 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_rsubhn(Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.223 RSUBHN, RSUBHN2 page C7-2528 line 147915 MATCH x2e206000/mask=xbf20fc00 # CONSTRUCT x2e606000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_rsubhn/3@4 # AUNIT --inst x2e606000/mask=xffe0fc00 --status nopcodeop --comment "nointround" :rsubhn Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x6 & b_1011=0 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_rsubhn(Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.223 RSUBHN, RSUBHN2 page C7-2528 line 147915 MATCH x2e206000/mask=xbf20fc00 # CONSTRUCT x2e206000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_rsubhn/3@2 # AUNIT --inst x2e206000/mask=xffe0fc00 --status nopcodeop --comment "nointround" :rsubhn Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x6 & b_1011=0 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_rsubhn(Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.224 SABA page C7-2530 line 148042 MATCH x0e207c00/mask=xbf20fc00 # CONSTRUCT x4e207c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saba/3@1 # AUNIT --inst x4e207c00/mask=xffe0fc00 --status nopcodeop --comment "abd" :saba Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xf & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_saba(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.224 SABA page C7-2530 line 148042 MATCH x0e207c00/mask=xbf20fc00 # CONSTRUCT x0ea07c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saba/3@4 # AUNIT --inst x0ea07c00/mask=xffe0fc00 --status nopcodeop --comment "abd" :saba Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xf & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_saba(Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.224 SABA page C7-2530 line 148042 MATCH x0e207c00/mask=xbf20fc00 # CONSTRUCT x0e607c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saba/3@2 # AUNIT --inst x0e607c00/mask=xffe0fc00 --status nopcodeop --comment "abd" :saba Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xf & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_saba(Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.224 SABA page C7-2530 line 148042 MATCH x0e207c00/mask=xbf20fc00 # CONSTRUCT x4ea07c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saba/3@4 # AUNIT --inst x4ea07c00/mask=xffe0fc00 --status nopcodeop --comment "abd" :saba Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xf & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_saba(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.224 SABA page C7-2530 line 148042 MATCH x0e207c00/mask=xbf20fc00 # CONSTRUCT x0e207c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saba/3@1 # AUNIT --inst x0e207c00/mask=xffe0fc00 --status nopcodeop --comment "abd" :saba Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xf & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_saba(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.224 SABA page C7-2530 line 148042 MATCH x0e207c00/mask=xbf20fc00 # CONSTRUCT x4e607c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saba/3@2 # AUNIT --inst x4e607c00/mask=xffe0fc00 --status nopcodeop --comment "abd" :saba Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xf & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_saba(Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.225 SABAL, SABAL2 page C7-2532 line 148144 MATCH x0e205000/mask=xbf20fc00 # CONSTRUCT x0ea05000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 $-@8 $abs@8 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabal/3@4 # AUNIT --inst x0ea05000/mask=xffe0fc00 --status pass --comment "ext abd" :sabal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x5 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 8 TMPQ3[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; TMPQ3[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 8 TMPQ4[0,64] = MP_INT_ABS(TMPQ3[0,64]); TMPQ4[64,64] = MP_INT_ABS(TMPQ3[64,64]); # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ4[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.225 SABAL, SABAL2 page C7-2532 line 148144 MATCH x0e205000/mask=xbf20fc00 # CONSTRUCT x0e605000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 $-@4 $abs@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabal/3@2 # AUNIT --inst x0e605000/mask=xffe0fc00 --status pass --comment "ext abd" :sabal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x5 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 4 TMPQ3[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; TMPQ3[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; TMPQ3[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; TMPQ3[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 4 TMPQ4[0,32] = MP_INT_ABS(TMPQ3[0,32]); TMPQ4[32,32] = MP_INT_ABS(TMPQ3[32,32]); TMPQ4[64,32] = MP_INT_ABS(TMPQ3[64,32]); TMPQ4[96,32] = MP_INT_ABS(TMPQ3[96,32]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.225 SABAL, SABAL2 page C7-2532 line 148144 MATCH x0e205000/mask=xbf20fc00 # CONSTRUCT x0e205000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 $-@2 $abs@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabal/3@1 # AUNIT --inst x0e205000/mask=xffe0fc00 --status pass --comment "ext abd" :sabal Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x5 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 2 TMPQ3[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; TMPQ3[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; TMPQ3[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; TMPQ3[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; TMPQ3[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; TMPQ3[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; TMPQ3[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; TMPQ3[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 2 TMPQ4[0,16] = MP_INT_ABS(TMPQ3[0,16]); TMPQ4[16,16] = MP_INT_ABS(TMPQ3[16,16]); TMPQ4[32,16] = MP_INT_ABS(TMPQ3[32,16]); TMPQ4[48,16] = MP_INT_ABS(TMPQ3[48,16]); TMPQ4[64,16] = MP_INT_ABS(TMPQ3[64,16]); TMPQ4[80,16] = MP_INT_ABS(TMPQ3[80,16]); TMPQ4[96,16] = MP_INT_ABS(TMPQ3[96,16]); TMPQ4[112,16] = MP_INT_ABS(TMPQ3[112,16]); # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ4 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ4[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ4[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ4[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ4[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ4[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ4[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ4[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.225 SABAL, SABAL2 page C7-2532 line 148144 MATCH x0e205000/mask=xbf20fc00 # CONSTRUCT x4ea05000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $-@8 $abs@8 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabal2/3@4 # AUNIT --inst x4ea05000/mask=xffe0fc00 --status pass --comment "ext abd" :sabal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x5 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = sext(TMPD3[0,32]); TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 8 TMPQ5[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; TMPQ5[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 8 TMPQ6[0,64] = MP_INT_ABS(TMPQ5[0,64]); TMPQ6[64,64] = MP_INT_ABS(TMPQ5[64,64]); # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ6 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ6[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ6[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.225 SABAL, SABAL2 page C7-2532 line 148144 MATCH x0e205000/mask=xbf20fc00 # CONSTRUCT x4e605000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $-@4 $abs@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabal2/3@2 # AUNIT --inst x4e605000/mask=xffe0fc00 --status pass --comment "ext abd" :sabal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x5 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = sext(TMPD3[0,16]); TMPQ4[32,32] = sext(TMPD3[16,16]); TMPQ4[64,32] = sext(TMPD3[32,16]); TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 4 TMPQ5[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; TMPQ5[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; TMPQ5[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; TMPQ5[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 4 TMPQ6[0,32] = MP_INT_ABS(TMPQ5[0,32]); TMPQ6[32,32] = MP_INT_ABS(TMPQ5[32,32]); TMPQ6[64,32] = MP_INT_ABS(TMPQ5[64,32]); TMPQ6[96,32] = MP_INT_ABS(TMPQ5[96,32]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ6 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ6[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ6[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ6[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ6[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.225 SABAL, SABAL2 page C7-2532 line 148144 MATCH x0e205000/mask=xbf20fc00 # CONSTRUCT x4e205000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 $-@2 $abs@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabal2/3@1 # AUNIT --inst x4e205000/mask=xffe0fc00 --status pass --comment "ext abd" :sabal2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x5 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = sext(TMPD1[0,8]); TMPQ2[16,16] = sext(TMPD1[8,8]); TMPQ2[32,16] = sext(TMPD1[16,8]); TMPQ2[48,16] = sext(TMPD1[24,8]); TMPQ2[64,16] = sext(TMPD1[32,8]); TMPQ2[80,16] = sext(TMPD1[40,8]); TMPQ2[96,16] = sext(TMPD1[48,8]); TMPQ2[112,16] = sext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = sext(TMPD3[0,8]); TMPQ4[16,16] = sext(TMPD3[8,8]); TMPQ4[32,16] = sext(TMPD3[16,8]); TMPQ4[48,16] = sext(TMPD3[24,8]); TMPQ4[64,16] = sext(TMPD3[32,8]); TMPQ4[80,16] = sext(TMPD3[40,8]); TMPQ4[96,16] = sext(TMPD3[48,8]); TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 2 TMPQ5[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; TMPQ5[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; TMPQ5[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; TMPQ5[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; TMPQ5[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; TMPQ5[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; TMPQ5[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; TMPQ5[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 2 TMPQ6[0,16] = MP_INT_ABS(TMPQ5[0,16]); TMPQ6[16,16] = MP_INT_ABS(TMPQ5[16,16]); TMPQ6[32,16] = MP_INT_ABS(TMPQ5[32,16]); TMPQ6[48,16] = MP_INT_ABS(TMPQ5[48,16]); TMPQ6[64,16] = MP_INT_ABS(TMPQ5[64,16]); TMPQ6[80,16] = MP_INT_ABS(TMPQ5[80,16]); TMPQ6[96,16] = MP_INT_ABS(TMPQ5[96,16]); TMPQ6[112,16] = MP_INT_ABS(TMPQ5[112,16]); # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ6 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ6[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ6[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ6[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ6[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ6[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ6[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ6[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ6[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.226 SABD page C7-2534 line 148264 MATCH x0e207400/mask=xbf20fc00 # CONSTRUCT x4ea07400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@4 # AUNIT --inst x4ea07400/mask=xffe0fc00 --status nopcodeop --comment "abd" # C7.2.226 SABD page C7-2534 line 148264 MATCH x0e207400/mask=xbf20fc00 # CONSTRUCT x4e207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@1 # AUNIT --inst x4e207400/mask=xffe0fc00 --status nopcodeop --comment "abd" :sabd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xe & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix TMPQ1 = Rn_VPR128.16B- Rm_VPR128.16B on lane size 1 TMPQ1[0,8] = Rn_VPR128.16B[0,8] - Rm_VPR128.16B[0,8]; TMPQ1[8,8] = Rn_VPR128.16B[8,8] - Rm_VPR128.16B[8,8]; TMPQ1[16,8] = Rn_VPR128.16B[16,8] - Rm_VPR128.16B[16,8]; TMPQ1[24,8] = Rn_VPR128.16B[24,8] - Rm_VPR128.16B[24,8]; TMPQ1[32,8] = Rn_VPR128.16B[32,8] - Rm_VPR128.16B[32,8]; TMPQ1[40,8] = Rn_VPR128.16B[40,8] - Rm_VPR128.16B[40,8]; TMPQ1[48,8] = Rn_VPR128.16B[48,8] - Rm_VPR128.16B[48,8]; TMPQ1[56,8] = Rn_VPR128.16B[56,8] - Rm_VPR128.16B[56,8]; TMPQ1[64,8] = Rn_VPR128.16B[64,8] - Rm_VPR128.16B[64,8]; TMPQ1[72,8] = Rn_VPR128.16B[72,8] - Rm_VPR128.16B[72,8]; TMPQ1[80,8] = Rn_VPR128.16B[80,8] - Rm_VPR128.16B[80,8]; TMPQ1[88,8] = Rn_VPR128.16B[88,8] - Rm_VPR128.16B[88,8]; TMPQ1[96,8] = Rn_VPR128.16B[96,8] - Rm_VPR128.16B[96,8]; TMPQ1[104,8] = Rn_VPR128.16B[104,8] - Rm_VPR128.16B[104,8]; TMPQ1[112,8] = Rn_VPR128.16B[112,8] - Rm_VPR128.16B[112,8]; TMPQ1[120,8] = Rn_VPR128.16B[120,8] - Rm_VPR128.16B[120,8]; # simd infix TMPQ2 = Rm_VPR128.16B - Rn_VPR128.16B on lane size 1 TMPQ2[0,8] = Rm_VPR128.16B[0,8] - Rn_VPR128.16B[0,8]; TMPQ2[8,8] = Rm_VPR128.16B[8,8] - Rn_VPR128.16B[8,8]; TMPQ2[16,8] = Rm_VPR128.16B[16,8] - Rn_VPR128.16B[16,8]; TMPQ2[24,8] = Rm_VPR128.16B[24,8] - Rn_VPR128.16B[24,8]; TMPQ2[32,8] = Rm_VPR128.16B[32,8] - Rn_VPR128.16B[32,8]; TMPQ2[40,8] = Rm_VPR128.16B[40,8] - Rn_VPR128.16B[40,8]; TMPQ2[48,8] = Rm_VPR128.16B[48,8] - Rn_VPR128.16B[48,8]; TMPQ2[56,8] = Rm_VPR128.16B[56,8] - Rn_VPR128.16B[56,8]; TMPQ2[64,8] = Rm_VPR128.16B[64,8] - Rn_VPR128.16B[64,8]; TMPQ2[72,8] = Rm_VPR128.16B[72,8] - Rn_VPR128.16B[72,8]; TMPQ2[80,8] = Rm_VPR128.16B[80,8] - Rn_VPR128.16B[80,8]; TMPQ2[88,8] = Rm_VPR128.16B[88,8] - Rn_VPR128.16B[88,8]; TMPQ2[96,8] = Rm_VPR128.16B[96,8] - Rn_VPR128.16B[96,8]; TMPQ2[104,8] = Rm_VPR128.16B[104,8] - Rn_VPR128.16B[104,8]; TMPQ2[112,8] = Rm_VPR128.16B[112,8] - Rn_VPR128.16B[112,8]; TMPQ2[120,8] = Rm_VPR128.16B[120,8] - Rn_VPR128.16B[120,8]; # simd infix TMPQ2 = TMPQ2 * 2:1 on lane size 1 TMPQ2[0,8] = TMPQ2[0,8] * 2:1; TMPQ2[8,8] = TMPQ2[8,8] * 2:1; TMPQ2[16,8] = TMPQ2[16,8] * 2:1; TMPQ2[24,8] = TMPQ2[24,8] * 2:1; TMPQ2[32,8] = TMPQ2[32,8] * 2:1; TMPQ2[40,8] = TMPQ2[40,8] * 2:1; TMPQ2[48,8] = TMPQ2[48,8] * 2:1; TMPQ2[56,8] = TMPQ2[56,8] * 2:1; TMPQ2[64,8] = TMPQ2[64,8] * 2:1; TMPQ2[72,8] = TMPQ2[72,8] * 2:1; TMPQ2[80,8] = TMPQ2[80,8] * 2:1; TMPQ2[88,8] = TMPQ2[88,8] * 2:1; TMPQ2[96,8] = TMPQ2[96,8] * 2:1; TMPQ2[104,8] = TMPQ2[104,8] * 2:1; TMPQ2[112,8] = TMPQ2[112,8] * 2:1; TMPQ2[120,8] = TMPQ2[120,8] * 2:1; # simd infix TMPQ3 = Rn_VPR128.16B s< Rm_VPR128.16B on lane size 1 TMPQ3[0,8] = zext(Rn_VPR128.16B[0,8] s< Rm_VPR128.16B[0,8]); TMPQ3[8,8] = zext(Rn_VPR128.16B[8,8] s< Rm_VPR128.16B[8,8]); TMPQ3[16,8] = zext(Rn_VPR128.16B[16,8] s< Rm_VPR128.16B[16,8]); TMPQ3[24,8] = zext(Rn_VPR128.16B[24,8] s< Rm_VPR128.16B[24,8]); TMPQ3[32,8] = zext(Rn_VPR128.16B[32,8] s< Rm_VPR128.16B[32,8]); TMPQ3[40,8] = zext(Rn_VPR128.16B[40,8] s< Rm_VPR128.16B[40,8]); TMPQ3[48,8] = zext(Rn_VPR128.16B[48,8] s< Rm_VPR128.16B[48,8]); TMPQ3[56,8] = zext(Rn_VPR128.16B[56,8] s< Rm_VPR128.16B[56,8]); TMPQ3[64,8] = zext(Rn_VPR128.16B[64,8] s< Rm_VPR128.16B[64,8]); TMPQ3[72,8] = zext(Rn_VPR128.16B[72,8] s< Rm_VPR128.16B[72,8]); TMPQ3[80,8] = zext(Rn_VPR128.16B[80,8] s< Rm_VPR128.16B[80,8]); TMPQ3[88,8] = zext(Rn_VPR128.16B[88,8] s< Rm_VPR128.16B[88,8]); TMPQ3[96,8] = zext(Rn_VPR128.16B[96,8] s< Rm_VPR128.16B[96,8]); TMPQ3[104,8] = zext(Rn_VPR128.16B[104,8] s< Rm_VPR128.16B[104,8]); TMPQ3[112,8] = zext(Rn_VPR128.16B[112,8] s< Rm_VPR128.16B[112,8]); TMPQ3[120,8] = zext(Rn_VPR128.16B[120,8] s< Rm_VPR128.16B[120,8]); # simd infix TMPQ2 = TMPQ2 * TMPQ3 on lane size 1 TMPQ2[0,8] = TMPQ2[0,8] * TMPQ3[0,8]; TMPQ2[8,8] = TMPQ2[8,8] * TMPQ3[8,8]; TMPQ2[16,8] = TMPQ2[16,8] * TMPQ3[16,8]; TMPQ2[24,8] = TMPQ2[24,8] * TMPQ3[24,8]; TMPQ2[32,8] = TMPQ2[32,8] * TMPQ3[32,8]; TMPQ2[40,8] = TMPQ2[40,8] * TMPQ3[40,8]; TMPQ2[48,8] = TMPQ2[48,8] * TMPQ3[48,8]; TMPQ2[56,8] = TMPQ2[56,8] * TMPQ3[56,8]; TMPQ2[64,8] = TMPQ2[64,8] * TMPQ3[64,8]; TMPQ2[72,8] = TMPQ2[72,8] * TMPQ3[72,8]; TMPQ2[80,8] = TMPQ2[80,8] * TMPQ3[80,8]; TMPQ2[88,8] = TMPQ2[88,8] * TMPQ3[88,8]; TMPQ2[96,8] = TMPQ2[96,8] * TMPQ3[96,8]; TMPQ2[104,8] = TMPQ2[104,8] * TMPQ3[104,8]; TMPQ2[112,8] = TMPQ2[112,8] * TMPQ3[112,8]; TMPQ2[120,8] = TMPQ2[120,8] * TMPQ3[120,8]; # simd infix Rd_VPR128.16B = TMPQ1 + TMPQ2 on lane size 1 Rd_VPR128.16B[0,8] = TMPQ1[0,8] + TMPQ2[0,8]; Rd_VPR128.16B[8,8] = TMPQ1[8,8] + TMPQ2[8,8]; Rd_VPR128.16B[16,8] = TMPQ1[16,8] + TMPQ2[16,8]; Rd_VPR128.16B[24,8] = TMPQ1[24,8] + TMPQ2[24,8]; Rd_VPR128.16B[32,8] = TMPQ1[32,8] + TMPQ2[32,8]; Rd_VPR128.16B[40,8] = TMPQ1[40,8] + TMPQ2[40,8]; Rd_VPR128.16B[48,8] = TMPQ1[48,8] + TMPQ2[48,8]; Rd_VPR128.16B[56,8] = TMPQ1[56,8] + TMPQ2[56,8]; Rd_VPR128.16B[64,8] = TMPQ1[64,8] + TMPQ2[64,8]; Rd_VPR128.16B[72,8] = TMPQ1[72,8] + TMPQ2[72,8]; Rd_VPR128.16B[80,8] = TMPQ1[80,8] + TMPQ2[80,8]; Rd_VPR128.16B[88,8] = TMPQ1[88,8] + TMPQ2[88,8]; Rd_VPR128.16B[96,8] = TMPQ1[96,8] + TMPQ2[96,8]; Rd_VPR128.16B[104,8] = TMPQ1[104,8] + TMPQ2[104,8]; Rd_VPR128.16B[112,8] = TMPQ1[112,8] + TMPQ2[112,8]; Rd_VPR128.16B[120,8] = TMPQ1[120,8] + TMPQ2[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.226 SABD page C7-2534 line 148264 MATCH x0e207400/mask=xbf20fc00 # CONSTRUCT x0ea07400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $-@4 ARG3 ARG2 $-@4 2:4 &=$* ARG2 ARG3 $sless@4 &=$*@4 =$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@4 # AUNIT --inst x0ea07400/mask=xffe0fc00 --status pass --comment "abd" # This abd instruction is implemented correctly to document a correct # way to implement the signed absolute difference semantic. :sabd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xe & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix TMPD1 = Rn_VPR64.2S - Rm_VPR64.2S on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] - Rm_VPR64.2S[0,32]; TMPD1[32,32] = Rn_VPR64.2S[32,32] - Rm_VPR64.2S[32,32]; # simd infix TMPD2 = Rm_VPR64.2S - Rn_VPR64.2S on lane size 4 TMPD2[0,32] = Rm_VPR64.2S[0,32] - Rn_VPR64.2S[0,32]; TMPD2[32,32] = Rm_VPR64.2S[32,32] - Rn_VPR64.2S[32,32]; # simd infix TMPD2 = TMPD2 * 2:4 on lane size 4 TMPD2[0,32] = TMPD2[0,32] * 2:4; TMPD2[32,32] = TMPD2[32,32] * 2:4; # simd infix TMPD3 = Rn_VPR64.2S s< Rm_VPR64.2S on lane size 4 TMPD3[0,32] = zext(Rn_VPR64.2S[0,32] s< Rm_VPR64.2S[0,32]); TMPD3[32,32] = zext(Rn_VPR64.2S[32,32] s< Rm_VPR64.2S[32,32]); # simd infix TMPD2 = TMPD2 * TMPD3 on lane size 4 TMPD2[0,32] = TMPD2[0,32] * TMPD3[0,32]; TMPD2[32,32] = TMPD2[32,32] * TMPD3[32,32]; # simd infix Rd_VPR64.2S = TMPD1 + TMPD2 on lane size 4 Rd_VPR64.2S[0,32] = TMPD1[0,32] + TMPD2[0,32]; Rd_VPR64.2S[32,32] = TMPD1[32,32] + TMPD2[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.226 SABD page C7-2534 line 148264 MATCH x0e207400/mask=xbf20fc00 # CONSTRUCT x0e607400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@2 # AUNIT --inst x0e607400/mask=xffe0fc00 --status nopcodeop --comment "abd" :sabd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xe & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd infix TMPD1 = Rn_VPR64.4H - Rm_VPR64.4H on lane size 2 TMPD1[0,16] = Rn_VPR64.4H[0,16] - Rm_VPR64.4H[0,16]; TMPD1[16,16] = Rn_VPR64.4H[16,16] - Rm_VPR64.4H[16,16]; TMPD1[32,16] = Rn_VPR64.4H[32,16] - Rm_VPR64.4H[32,16]; TMPD1[48,16] = Rn_VPR64.4H[48,16] - Rm_VPR64.4H[48,16]; # simd infix TMPD2 = Rm_VPR64.4H - Rn_VPR64.4H on lane size 2 TMPD2[0,16] = Rm_VPR64.4H[0,16] - Rn_VPR64.4H[0,16]; TMPD2[16,16] = Rm_VPR64.4H[16,16] - Rn_VPR64.4H[16,16]; TMPD2[32,16] = Rm_VPR64.4H[32,16] - Rn_VPR64.4H[32,16]; TMPD2[48,16] = Rm_VPR64.4H[48,16] - Rn_VPR64.4H[48,16]; # simd infix TMPD2 = TMPD2 * 2:2 on lane size 2 TMPD2[0,16] = TMPD2[0,16] * 2:2; TMPD2[16,16] = TMPD2[16,16] * 2:2; TMPD2[32,16] = TMPD2[32,16] * 2:2; TMPD2[48,16] = TMPD2[48,16] * 2:2; # simd infix TMPD3 = Rn_VPR64.4H s< Rm_VPR64.4H on lane size 2 TMPD3[0,16] = zext(Rn_VPR64.4H[0,16] s< Rm_VPR64.4H[0,16]); TMPD3[16,16] = zext(Rn_VPR64.4H[16,16] s< Rm_VPR64.4H[16,16]); TMPD3[32,16] = zext(Rn_VPR64.4H[32,16] s< Rm_VPR64.4H[32,16]); TMPD3[48,16] = zext(Rn_VPR64.4H[48,16] s< Rm_VPR64.4H[48,16]); # simd infix TMPD2 = TMPD2 * TMPD3 on lane size 2 TMPD2[0,16] = TMPD2[0,16] * TMPD3[0,16]; TMPD2[16,16] = TMPD2[16,16] * TMPD3[16,16]; TMPD2[32,16] = TMPD2[32,16] * TMPD3[32,16]; TMPD2[48,16] = TMPD2[48,16] * TMPD3[48,16]; # simd infix Rd_VPR64.4H = TMPD1 + TMPD2 on lane size 2 Rd_VPR64.4H[0,16] = TMPD1[0,16] + TMPD2[0,16]; Rd_VPR64.4H[16,16] = TMPD1[16,16] + TMPD2[16,16]; Rd_VPR64.4H[32,16] = TMPD1[32,16] + TMPD2[32,16]; Rd_VPR64.4H[48,16] = TMPD1[48,16] + TMPD2[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.226 SABD page C7-2534 line 148264 MATCH x0e207400/mask=xbf20fc00 # CONSTRUCT x4ea07400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@4 # AUNIT --inst x4ea07400/mask=xffe0fc00 --status nopcodeop --comment "abd" :sabd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xe & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix TMPQ1 = Rn_VPR128.4S - Rm_VPR128.4S on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] - Rm_VPR128.4S[0,32]; TMPQ1[32,32] = Rn_VPR128.4S[32,32] - Rm_VPR128.4S[32,32]; TMPQ1[64,32] = Rn_VPR128.4S[64,32] - Rm_VPR128.4S[64,32]; TMPQ1[96,32] = Rn_VPR128.4S[96,32] - Rm_VPR128.4S[96,32]; # simd infix TMPQ2 = Rm_VPR128.4S - Rn_VPR128.4S on lane size 4 TMPQ2[0,32] = Rm_VPR128.4S[0,32] - Rn_VPR128.4S[0,32]; TMPQ2[32,32] = Rm_VPR128.4S[32,32] - Rn_VPR128.4S[32,32]; TMPQ2[64,32] = Rm_VPR128.4S[64,32] - Rn_VPR128.4S[64,32]; TMPQ2[96,32] = Rm_VPR128.4S[96,32] - Rn_VPR128.4S[96,32]; # simd infix TMPQ2 = TMPQ2 * 2:4 on lane size 4 TMPQ2[0,32] = TMPQ2[0,32] * 2:4; TMPQ2[32,32] = TMPQ2[32,32] * 2:4; TMPQ2[64,32] = TMPQ2[94,32] * 2:4; TMPQ2[96,32] = TMPQ2[96,32] * 2:4; # simd infix TMPQ3 = Rn_VPR128.4S s< Rm_VPR128.4S on lane size 4 TMPQ3[0,32] = zext(Rn_VPR128.4S[0,32] s< Rm_VPR128.4S[0,32]); TMPQ3[32,32] = zext(Rn_VPR128.4S[32,32] s< Rm_VPR128.4S[32,32]); TMPQ3[64,32] = zext(Rn_VPR128.4S[64,32] s< Rm_VPR128.4S[64,32]); TMPQ3[96,32] = zext(Rn_VPR128.4S[96,32] s< Rm_VPR128.4S[96,32]); # simd infix TMPQ2 = TMPQ2 * TMPQ3 on lane size 4 TMPQ2[0,32] = TMPQ2[0,32] * TMPQ3[0,32]; TMPQ2[32,32] = TMPQ2[32,32] * TMPQ3[32,32]; TMPQ2[64,32] = TMPQ2[64,32] * TMPQ3[64,32]; TMPQ2[96,32] = TMPQ2[96,32] * TMPQ3[96,32]; # simd infix Rd_VPR128.4S = TMPQ1 + TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32] + TMPQ2[0,32]; Rd_VPR128.4S[32,32] = TMPQ1[32,32] + TMPQ2[32,32]; Rd_VPR128.4S[64,32] = TMPQ1[64,32] + TMPQ2[64,32]; Rd_VPR128.4S[96,32] = TMPQ1[96,32] + TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.226 SABD page C7-2534 line 148264 MATCH x0e207400/mask=xbf20fc00 # CONSTRUCT x0e207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@1 # AUNIT --inst x0e207400/mask=xffe0fc00 --status nopcodeop --comment "abd" :sabd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xe & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix TMPD1 = Rn_VPR64.8B - Rm_VPR64.8B on lane size 1 TMPD1[0,8] = Rn_VPR64.8B[0,8] - Rm_VPR64.8B[0,8]; TMPD1[8,8] = Rn_VPR64.8B[8,8] - Rm_VPR64.8B[8,8]; TMPD1[16,8] = Rn_VPR64.8B[16,8] - Rm_VPR64.8B[16,8]; TMPD1[24,8] = Rn_VPR64.8B[24,8] - Rm_VPR64.8B[24,8]; TMPD1[32,8] = Rn_VPR64.8B[32,8] - Rm_VPR64.8B[32,8]; TMPD1[40,8] = Rn_VPR64.8B[40,8] - Rm_VPR64.8B[40,8]; TMPD1[48,8] = Rn_VPR64.8B[48,8] - Rm_VPR64.8B[48,8]; TMPD1[56,8] = Rn_VPR64.8B[56,8] - Rm_VPR64.8B[56,8]; # simd infix TMPD2 = Rm_VPR64.8B - Rn_VPR64.8B on lane size 1 TMPD2[0,8] = Rm_VPR64.8B[0,8] - Rn_VPR64.8B[0,8]; TMPD2[8,8] = Rm_VPR64.8B[8,8] - Rn_VPR64.8B[8,8]; TMPD2[16,8] = Rm_VPR64.8B[16,8] - Rn_VPR64.8B[16,8]; TMPD2[24,8] = Rm_VPR64.8B[24,8] - Rn_VPR64.8B[24,8]; TMPD2[32,8] = Rm_VPR64.8B[32,8] - Rn_VPR64.8B[32,8]; TMPD2[40,8] = Rm_VPR64.8B[40,8] - Rn_VPR64.8B[40,8]; TMPD2[48,8] = Rm_VPR64.8B[48,8] - Rn_VPR64.8B[48,8]; TMPD2[56,8] = Rm_VPR64.8B[56,8] - Rn_VPR64.8B[56,8]; # simd infix TMPD2 = TMPD2 * 2:1 on lane size 1 TMPD2[0,8] = TMPD2[0,8] * 2:1; TMPD2[8,8] = TMPD2[8,8] * 2:1; TMPD2[16,8] = TMPD2[16,8] * 2:1; TMPD2[24,8] = TMPD2[24,8] * 2:1; TMPD2[32,8] = TMPD2[32,8] * 2:1; TMPD2[40,8] = TMPD2[40,8] * 2:1; TMPD2[48,8] = TMPD2[48,8] * 2:1; TMPD2[56,8] = TMPD2[56,8] * 2:1; # simd infix TMPD3 = Rn_VPR64.8B s< Rm_VPR64.8B on lane size 1 TMPD3[0,8] = zext(Rn_VPR64.8B[0,8] s< Rm_VPR64.8B[0,8]); TMPD3[8,8] = zext(Rn_VPR64.8B[8,8] s< Rm_VPR64.8B[8,8]); TMPD3[16,8] = zext(Rn_VPR64.8B[16,8] s< Rm_VPR64.8B[16,8]); TMPD3[24,8] = zext(Rn_VPR64.8B[24,8] s< Rm_VPR64.8B[24,8]); TMPD3[32,8] = zext(Rn_VPR64.8B[32,8] s< Rm_VPR64.8B[32,8]); TMPD3[40,8] = zext(Rn_VPR64.8B[40,8] s< Rm_VPR64.8B[40,8]); TMPD3[48,8] = zext(Rn_VPR64.8B[48,8] s< Rm_VPR64.8B[48,8]); TMPD3[56,8] = zext(Rn_VPR64.8B[56,8] s< Rm_VPR64.8B[56,8]); # simd infix TMPD2 = TMPD2 * TMPD3 on lane size 1 TMPD2[0,8] = TMPD2[0,8] * TMPD3[0,8]; TMPD2[8,8] = TMPD2[8,8] * TMPD3[8,8]; TMPD2[16,8] = TMPD2[16,8] * TMPD3[16,8]; TMPD2[24,8] = TMPD2[24,8] * TMPD3[24,8]; TMPD2[32,8] = TMPD2[32,8] * TMPD3[32,8]; TMPD2[40,8] = TMPD2[40,8] * TMPD3[40,8]; TMPD2[48,8] = TMPD2[48,8] * TMPD3[48,8]; TMPD2[56,8] = TMPD2[56,8] * TMPD3[56,8]; # simd infix Rd_VPR64.8B = TMPD1 + TMPD2 on lane size 1 Rd_VPR64.8B[0,8] = TMPD1[0,8] + TMPD2[0,8]; Rd_VPR64.8B[8,8] = TMPD1[8,8] + TMPD2[8,8]; Rd_VPR64.8B[16,8] = TMPD1[16,8] + TMPD2[16,8]; Rd_VPR64.8B[24,8] = TMPD1[24,8] + TMPD2[24,8]; Rd_VPR64.8B[32,8] = TMPD1[32,8] + TMPD2[32,8]; Rd_VPR64.8B[40,8] = TMPD1[40,8] + TMPD2[40,8]; Rd_VPR64.8B[48,8] = TMPD1[48,8] + TMPD2[48,8]; Rd_VPR64.8B[56,8] = TMPD1[56,8] + TMPD2[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.226 SABD page C7-2534 line 148264 MATCH x0e207400/mask=xbf20fc00 # CONSTRUCT x4e607400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sabd/2@2 # AUNIT --inst x4e607400/mask=xffe0fc00 --status nopcodeop --comment "abd" :sabd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xe & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.8H - Rm_VPR128.8H on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] - Rm_VPR128.8H[0,16]; TMPQ1[16,16] = Rn_VPR128.8H[16,16] - Rm_VPR128.8H[16,16]; TMPQ1[32,16] = Rn_VPR128.8H[32,16] - Rm_VPR128.8H[32,16]; TMPQ1[48,16] = Rn_VPR128.8H[48,16] - Rm_VPR128.8H[48,16]; TMPQ1[64,16] = Rn_VPR128.8H[64,16] - Rm_VPR128.8H[64,16]; TMPQ1[80,16] = Rn_VPR128.8H[80,16] - Rm_VPR128.8H[80,16]; TMPQ1[96,16] = Rn_VPR128.8H[96,16] - Rm_VPR128.8H[96,16]; TMPQ1[112,16] = Rn_VPR128.8H[112,16] - Rm_VPR128.8H[112,16]; # simd infix TMPQ2 = Rm_VPR128.8H - Rn_VPR128.8H on lane size 2 TMPQ2[0,16] = Rm_VPR128.8H[0,16] - Rn_VPR128.8H[0,16]; TMPQ2[16,16] = Rm_VPR128.8H[16,16] - Rn_VPR128.8H[16,16]; TMPQ2[32,16] = Rm_VPR128.8H[32,16] - Rn_VPR128.8H[32,16]; TMPQ2[48,16] = Rm_VPR128.8H[48,16] - Rn_VPR128.8H[48,16]; TMPQ2[64,16] = Rm_VPR128.8H[64,16] - Rn_VPR128.8H[64,16]; TMPQ2[80,16] = Rm_VPR128.8H[80,16] - Rn_VPR128.8H[80,16]; TMPQ2[96,16] = Rm_VPR128.8H[96,16] - Rn_VPR128.8H[96,16]; TMPQ2[112,16] = Rm_VPR128.8H[112,16] - Rn_VPR128.8H[112,16]; # simd infix TMPQ2 = TMPQ2 * 2:2 on lane size 2 TMPQ2[0,16] = TMPQ2[0,16] * 2:2; TMPQ2[16,16] = TMPQ2[16,16] * 2:2; TMPQ2[32,16] = TMPQ2[32,16] * 2:2; TMPQ2[48,16] = TMPQ2[48,16] * 2:2; TMPQ2[64,16] = TMPQ2[64,16] * 2:2; TMPQ2[80,16] = TMPQ2[80,16] * 2:2; TMPQ2[96,16] = TMPQ2[96,16] * 2:2; TMPQ2[112,16] = TMPQ2[112,16] * 2:2; # simd infix TMPQ3 = Rn_VPR128.8H s< Rm_VPR128.8H on lane size 2 TMPQ3[0,16] = zext(Rn_VPR128.8H[0,16] s< Rm_VPR128.8H[0,16]); TMPQ3[16,16] = zext(Rn_VPR128.8H[16,16] s< Rm_VPR128.8H[16,16]); TMPQ3[32,16] = zext(Rn_VPR128.8H[32,16] s< Rm_VPR128.8H[32,16]); TMPQ3[48,16] = zext(Rn_VPR128.8H[48,16] s< Rm_VPR128.8H[48,16]); TMPQ3[64,16] = zext(Rn_VPR128.8H[64,16] s< Rm_VPR128.8H[64,16]); TMPQ3[80,16] = zext(Rn_VPR128.8H[80,16] s< Rm_VPR128.8H[80,16]); TMPQ3[96,16] = zext(Rn_VPR128.8H[96,16] s< Rm_VPR128.8H[96,16]); TMPQ3[112,16] = zext(Rn_VPR128.8H[112,16] s< Rm_VPR128.8H[112,16]); # simd infix TMPQ2 = TMPQ2 * TMPQ3 on lane size 2 TMPQ2[0,16] = TMPQ2[0,16] * TMPQ3[0,16]; TMPQ2[16,16] = TMPQ2[16,16] * TMPQ3[16,16]; TMPQ2[32,16] = TMPQ2[32,16] * TMPQ3[32,16]; TMPQ2[48,16] = TMPQ2[48,16] * TMPQ3[48,16]; TMPQ2[64,16] = TMPQ2[64,16] * TMPQ3[64,16]; TMPQ2[80,16] = TMPQ2[80,16] * TMPQ3[80,16]; TMPQ2[96,16] = TMPQ2[96,16] * TMPQ3[96,16]; TMPQ2[112,16] = TMPQ2[112,16] * TMPQ3[112,16]; # simd infix Rd_VPR128.8H = TMPQ1 + TMPQ2 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[0,16] + TMPQ2[0,16]; Rd_VPR128.8H[16,16] = TMPQ1[16,16] + TMPQ2[16,16]; Rd_VPR128.8H[32,16] = TMPQ1[32,16] + TMPQ2[32,16]; Rd_VPR128.8H[48,16] = TMPQ1[48,16] + TMPQ2[48,16]; Rd_VPR128.8H[64,16] = TMPQ1[64,16] + TMPQ2[64,16]; Rd_VPR128.8H[80,16] = TMPQ1[80,16] + TMPQ2[80,16]; Rd_VPR128.8H[96,16] = TMPQ1[96,16] + TMPQ2[96,16]; Rd_VPR128.8H[112,16] = TMPQ1[112,16] + TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.227 SABDL, SABDL2 page C7-2536 line 148366 MATCH x0e207000/mask=xbf20fc00 # CONSTRUCT x0ea07000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 $-@8 =$abs@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabdl/3@4 # AUNIT --inst x0ea07000/mask=xffe0fc00 --status pass --comment "ext abd" :sabdl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x7 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 8 TMPQ3[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; TMPQ3[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; # simd unary Rd_VPR128.2D = MP_INT_ABS(TMPQ3) on lane size 8 Rd_VPR128.2D[0,64] = MP_INT_ABS(TMPQ3[0,64]); Rd_VPR128.2D[64,64] = MP_INT_ABS(TMPQ3[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.227 SABDL, SABDL2 page C7-2536 line 148366 MATCH x0e207000/mask=xbf20fc00 # CONSTRUCT x0e607000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 $-@4 =$abs@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabdl/3@2 # AUNIT --inst x0e607000/mask=xffe0fc00 --status pass --comment "ext abd" :sabdl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x7 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 4 TMPQ3[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; TMPQ3[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; TMPQ3[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; TMPQ3[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; # simd unary Rd_VPR128.4S = MP_INT_ABS(TMPQ3) on lane size 4 Rd_VPR128.4S[0,32] = MP_INT_ABS(TMPQ3[0,32]); Rd_VPR128.4S[32,32] = MP_INT_ABS(TMPQ3[32,32]); Rd_VPR128.4S[64,32] = MP_INT_ABS(TMPQ3[64,32]); Rd_VPR128.4S[96,32] = MP_INT_ABS(TMPQ3[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.227 SABDL, SABDL2 page C7-2536 line 148366 MATCH x0e207000/mask=xbf20fc00 # CONSTRUCT x0e207000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 $-@2 =$abs@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabdl/3@1 # AUNIT --inst x0e207000/mask=xffe0fc00 --status pass --comment "ext abd" :sabdl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x7 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 2 TMPQ3[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; TMPQ3[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; TMPQ3[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; TMPQ3[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; TMPQ3[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; TMPQ3[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; TMPQ3[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; TMPQ3[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; # simd unary Rd_VPR128.8H = MP_INT_ABS(TMPQ3) on lane size 2 Rd_VPR128.8H[0,16] = MP_INT_ABS(TMPQ3[0,16]); Rd_VPR128.8H[16,16] = MP_INT_ABS(TMPQ3[16,16]); Rd_VPR128.8H[32,16] = MP_INT_ABS(TMPQ3[32,16]); Rd_VPR128.8H[48,16] = MP_INT_ABS(TMPQ3[48,16]); Rd_VPR128.8H[64,16] = MP_INT_ABS(TMPQ3[64,16]); Rd_VPR128.8H[80,16] = MP_INT_ABS(TMPQ3[80,16]); Rd_VPR128.8H[96,16] = MP_INT_ABS(TMPQ3[96,16]); Rd_VPR128.8H[112,16] = MP_INT_ABS(TMPQ3[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.227 SABDL, SABDL2 page C7-2536 line 148366 MATCH x0e207000/mask=xbf20fc00 # CONSTRUCT x4ea07000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $-@8 =$abs@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabdl2/3@4 # AUNIT --inst x4ea07000/mask=xffe0fc00 --status pass --comment "ext abd" :sabdl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x7 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = sext(TMPD3[0,32]); TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 8 TMPQ5[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; TMPQ5[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; # simd unary Rd_VPR128.2D = MP_INT_ABS(TMPQ5) on lane size 8 Rd_VPR128.2D[0,64] = MP_INT_ABS(TMPQ5[0,64]); Rd_VPR128.2D[64,64] = MP_INT_ABS(TMPQ5[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.227 SABDL, SABDL2 page C7-2536 line 148366 MATCH x0e207000/mask=xbf20fc00 # CONSTRUCT x4e607000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $-@4 =$abs@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabdl2/3@2 # AUNIT --inst x4e607000/mask=xffe0fc00 --status pass --comment "ext abd" :sabdl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x7 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = sext(TMPD3[0,16]); TMPQ4[32,32] = sext(TMPD3[16,16]); TMPQ4[64,32] = sext(TMPD3[32,16]); TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 4 TMPQ5[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; TMPQ5[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; TMPQ5[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; TMPQ5[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; # simd unary Rd_VPR128.4S = MP_INT_ABS(TMPQ5) on lane size 4 Rd_VPR128.4S[0,32] = MP_INT_ABS(TMPQ5[0,32]); Rd_VPR128.4S[32,32] = MP_INT_ABS(TMPQ5[32,32]); Rd_VPR128.4S[64,32] = MP_INT_ABS(TMPQ5[64,32]); Rd_VPR128.4S[96,32] = MP_INT_ABS(TMPQ5[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.227 SABDL, SABDL2 page C7-2536 line 148366 MATCH x0e207000/mask=xbf20fc00 # CONSTRUCT x4e207000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 $-@2 =$abs@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sabdl2/3@1 # AUNIT --inst x4e207000/mask=xffe0fc00 --status pass --comment "ext abd" :sabdl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x7 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = sext(TMPD1[0,8]); TMPQ2[16,16] = sext(TMPD1[8,8]); TMPQ2[32,16] = sext(TMPD1[16,8]); TMPQ2[48,16] = sext(TMPD1[24,8]); TMPQ2[64,16] = sext(TMPD1[32,8]); TMPQ2[80,16] = sext(TMPD1[40,8]); TMPQ2[96,16] = sext(TMPD1[48,8]); TMPQ2[112,16] = sext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = sext(TMPD3[0,8]); TMPQ4[16,16] = sext(TMPD3[8,8]); TMPQ4[32,16] = sext(TMPD3[16,8]); TMPQ4[48,16] = sext(TMPD3[24,8]); TMPQ4[64,16] = sext(TMPD3[32,8]); TMPQ4[80,16] = sext(TMPD3[40,8]); TMPQ4[96,16] = sext(TMPD3[48,8]); TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 2 TMPQ5[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; TMPQ5[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; TMPQ5[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; TMPQ5[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; TMPQ5[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; TMPQ5[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; TMPQ5[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; TMPQ5[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; # simd unary Rd_VPR128.8H = MP_INT_ABS(TMPQ5) on lane size 2 Rd_VPR128.8H[0,16] = MP_INT_ABS(TMPQ5[0,16]); Rd_VPR128.8H[16,16] = MP_INT_ABS(TMPQ5[16,16]); Rd_VPR128.8H[32,16] = MP_INT_ABS(TMPQ5[32,16]); Rd_VPR128.8H[48,16] = MP_INT_ABS(TMPQ5[48,16]); Rd_VPR128.8H[64,16] = MP_INT_ABS(TMPQ5[64,16]); Rd_VPR128.8H[80,16] = MP_INT_ABS(TMPQ5[80,16]); Rd_VPR128.8H[96,16] = MP_INT_ABS(TMPQ5[96,16]); Rd_VPR128.8H[112,16] = MP_INT_ABS(TMPQ5[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.228 SADALP page C7-2538 line 148486 MATCH x0e206800/mask=xbf3ffc00 # CONSTRUCT x0e206800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:8 ARG2 =#+ &=$+@2 # SMACRO(pseudo) ARG1 ARG2 &=NEON_sadalp/2@1 # AUNIT --inst x0e206800/mask=xfffffc00 --status pass --comment "ext" # Vector variant when 4H when size = 00 , Q = 0 Ta=VPR64.4H Tb=VPR64.8B e1=1 e2=2 s2=16 :sadalp Rd_VPR64.4H, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011010 & Rd_VPR64.4H & Rn_VPR64.8B & Zd { TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.8B) on pairs lane size (1 to 2) local tmp2 = Rn_VPR64.8B[0,8]; local tmp4 = sext(tmp2); local tmp3 = Rn_VPR64.8B[8,8]; local tmp5 = sext(tmp3); TMPD1[0,16] = tmp4 + tmp5; tmp2 = Rn_VPR64.8B[16,8]; tmp4 = sext(tmp2); tmp3 = Rn_VPR64.8B[24,8]; tmp5 = sext(tmp3); TMPD1[16,16] = tmp4 + tmp5; tmp2 = Rn_VPR64.8B[32,8]; tmp4 = sext(tmp2); tmp3 = Rn_VPR64.8B[40,8]; tmp5 = sext(tmp3); TMPD1[32,16] = tmp4 + tmp5; tmp2 = Rn_VPR64.8B[48,8]; tmp4 = sext(tmp2); tmp3 = Rn_VPR64.8B[56,8]; tmp5 = sext(tmp3); TMPD1[48,16] = tmp4 + tmp5; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.228 SADALP page C7-2538 line 148486 MATCH x0e206800/mask=xbf3ffc00 # CONSTRUCT x4e206800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 =#+ &=$+@2 # SMACRO(pseudo) ARG1 ARG2 &=NEON_sadalp/2@1 # AUNIT --inst x4e206800/mask=xfffffc00 --status pass --comment "ext" # Vector variant when 8H when size = 00 , Q = 1 Ta=VPR128.8H Tb=VPR128.16B e1=1 e2=2 s2=32 :sadalp Rd_VPR128.8H, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011010 & Rd_VPR128.8H & Rn_VPR128.16B & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.16B) on pairs lane size (1 to 2) local tmp2 = Rn_VPR128.16B[0,8]; local tmp4 = sext(tmp2); local tmp3 = Rn_VPR128.16B[8,8]; local tmp5 = sext(tmp3); TMPQ1[0,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[16,8]; tmp4 = sext(tmp2); tmp3 = Rn_VPR128.16B[24,8]; tmp5 = sext(tmp3); TMPQ1[16,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[32,8]; tmp4 = sext(tmp2); tmp3 = Rn_VPR128.16B[40,8]; tmp5 = sext(tmp3); TMPQ1[32,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[48,8]; tmp4 = sext(tmp2); tmp3 = Rn_VPR128.16B[56,8]; tmp5 = sext(tmp3); TMPQ1[48,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[64,8]; tmp4 = sext(tmp2); tmp3 = Rn_VPR128.16B[72,8]; tmp5 = sext(tmp3); TMPQ1[64,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[80,8]; tmp4 = sext(tmp2); tmp3 = Rn_VPR128.16B[88,8]; tmp5 = sext(tmp3); TMPQ1[80,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[96,8]; tmp4 = sext(tmp2); tmp3 = Rn_VPR128.16B[104,8]; tmp5 = sext(tmp3); TMPQ1[96,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[112,8]; tmp4 = sext(tmp2); tmp3 = Rn_VPR128.16B[120,8]; tmp5 = sext(tmp3); TMPQ1[112,16] = tmp4 + tmp5; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.228 SADALP page C7-2538 line 148486 MATCH x0e206800/mask=xbf3ffc00 # CONSTRUCT x0e606800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:8 ARG2 =#+ &=$+@4 # SMACRO(pseudo) ARG1 ARG2 &=NEON_sadalp/2@2 # AUNIT --inst x0e606800/mask=xfffffc00 --status pass --comment "ext" # Vector variant when 2S when size = 01 , Q = 0 Ta=VPR64.2S Tb=VPR64.4H e1=2 e2=4 s2=16 :sadalp Rd_VPR64.2S, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011010 & Rd_VPR64.2S & Rn_VPR64.4H & Zd { TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.4H) on pairs lane size (2 to 4) local tmp2 = Rn_VPR64.4H[0,16]; local tmp4 = sext(tmp2); local tmp3 = Rn_VPR64.4H[16,16]; local tmp5 = sext(tmp3); TMPD1[0,32] = tmp4 + tmp5; tmp2 = Rn_VPR64.4H[32,16]; tmp4 = sext(tmp2); tmp3 = Rn_VPR64.4H[48,16]; tmp5 = sext(tmp3); TMPD1[32,32] = tmp4 + tmp5; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.228 SADALP page C7-2538 line 148486 MATCH x0e206800/mask=xbf3ffc00 # CONSTRUCT x4e606800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 =#+ &=$+@4 # SMACRO(pseudo) ARG1 ARG2 &=NEON_sadalp/2@2 # AUNIT --inst x4e606800/mask=xfffffc00 --status pass --comment "ext" # Vector variant when 4S when size = 01 , Q = 1 Ta=VPR128.4S Tb=VPR128.8H e1=2 e2=4 s2=32 :sadalp Rd_VPR128.4S, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011010 & Rd_VPR128.4S & Rn_VPR128.8H & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.8H) on pairs lane size (2 to 4) local tmp2 = Rn_VPR128.8H[0,16]; local tmp4 = sext(tmp2); local tmp3 = Rn_VPR128.8H[16,16]; local tmp5 = sext(tmp3); TMPQ1[0,32] = tmp4 + tmp5; tmp2 = Rn_VPR128.8H[32,16]; tmp4 = sext(tmp2); tmp3 = Rn_VPR128.8H[48,16]; tmp5 = sext(tmp3); TMPQ1[32,32] = tmp4 + tmp5; tmp2 = Rn_VPR128.8H[64,16]; tmp4 = sext(tmp2); tmp3 = Rn_VPR128.8H[80,16]; tmp5 = sext(tmp3); TMPQ1[64,32] = tmp4 + tmp5; tmp2 = Rn_VPR128.8H[96,16]; tmp4 = sext(tmp2); tmp3 = Rn_VPR128.8H[112,16]; tmp5 = sext(tmp3); TMPQ1[96,32] = tmp4 + tmp5; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.228 SADALP page C7-2538 line 148486 MATCH x0e206800/mask=xbf3ffc00 # CONSTRUCT x0ea06800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:8 ARG2 =#+ &=$+@8 # SMACRO(pseudo) ARG1 ARG2 &=NEON_sadalp/2@4 # AUNIT --inst x0ea06800/mask=xfffffc00 --status pass --comment "ext" # Vector variant when 1D when size = 10 , Q = 0 Ta=VPR64.1D Tb=VPR64.2S e1=4 e2=8 s2=16 :sadalp Rd_VPR64.1D, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011010 & Rd_VPR64.1D & Rn_VPR64.2S & Zd { TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.2S) on pairs lane size (4 to 8) local tmp2 = Rn_VPR64.2S[0,32]; local tmp4 = sext(tmp2); local tmp3 = Rn_VPR64.2S[32,32]; local tmp5 = sext(tmp3); TMPD1 = tmp4 + tmp5; # simd infix Rd_VPR64.1D = Rd_VPR64.1D + TMPD1 on lane size 8 Rd_VPR64.1D = Rd_VPR64.1D + TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.228 SADALP page C7-2538 line 148486 MATCH x0e206800/mask=xbf3ffc00 # CONSTRUCT x4ea06800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 =#+ &=$+@8 # SMACRO(pseudo) ARG1 ARG2 &=NEON_sadalp/2@4 # AUNIT --inst x4ea06800/mask=xfffffc00 --status pass --comment "ext" # Vector variant when 2D when size = 10 , Q = 1 Ta=VPR128.2D Tb=VPR128.4S e1=4 e2=8 s2=32 :sadalp Rd_VPR128.2D, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011010 & Rd_VPR128.2D & Rn_VPR128.4S & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.4S) on pairs lane size (4 to 8) local tmp2 = Rn_VPR128.4S[0,32]; local tmp4 = sext(tmp2); local tmp3 = Rn_VPR128.4S[32,32]; local tmp5 = sext(tmp3); TMPQ1[0,64] = tmp4 + tmp5; tmp2 = Rn_VPR128.4S[64,32]; tmp4 = sext(tmp2); tmp3 = Rn_VPR128.4S[96,32]; tmp5 = sext(tmp3); TMPQ1[64,64] = tmp4 + tmp5; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.229 SADDL, SADDL2 page C7-2540 line 148596 MATCH x0e200000/mask=xbf20fc00 # CONSTRUCT x0ea00000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 =$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_saddl/2@4 # AUNIT --inst x0ea00000/mask=xffe0fc00 --status pass --comment "ext" :saddl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x0 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = TMPQ1 + TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64] + TMPQ2[0,64]; Rd_VPR128.2D[64,64] = TMPQ1[64,64] + TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.229 SADDL, SADDL2 page C7-2540 line 148596 MATCH x0e200000/mask=xbf20fc00 # CONSTRUCT x0e600000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 =$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddl/2@2 # AUNIT --inst x0e600000/mask=xffe0fc00 --status pass --comment "ext" :saddl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x0 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = TMPQ1 + TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32] + TMPQ2[0,32]; Rd_VPR128.4S[32,32] = TMPQ1[32,32] + TMPQ2[32,32]; Rd_VPR128.4S[64,32] = TMPQ1[64,32] + TMPQ2[64,32]; Rd_VPR128.4S[96,32] = TMPQ1[96,32] + TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.229 SADDL, SADDL2 page C7-2540 line 148596 MATCH x0e200000/mask=xbf20fc00 # CONSTRUCT x0e200000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 =$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddl/2@1 # AUNIT --inst x0e200000/mask=xffe0fc00 --status pass --comment "ext" :saddl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x0 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = TMPQ1 + TMPQ2 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[0,16] + TMPQ2[0,16]; Rd_VPR128.8H[16,16] = TMPQ1[16,16] + TMPQ2[16,16]; Rd_VPR128.8H[32,16] = TMPQ1[32,16] + TMPQ2[32,16]; Rd_VPR128.8H[48,16] = TMPQ1[48,16] + TMPQ2[48,16]; Rd_VPR128.8H[64,16] = TMPQ1[64,16] + TMPQ2[64,16]; Rd_VPR128.8H[80,16] = TMPQ1[80,16] + TMPQ2[80,16]; Rd_VPR128.8H[96,16] = TMPQ1[96,16] + TMPQ2[96,16]; Rd_VPR128.8H[112,16] = TMPQ1[112,16] + TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.229 SADDL, SADDL2 page C7-2540 line 148596 MATCH x0e200000/mask=xbf20fc00 # CONSTRUCT x4ea00000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 =$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddl2/2@4 # AUNIT --inst x4ea00000/mask=xffe0fc00 --status pass --comment "ext" :saddl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x0 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = sext(TMPD3[0,32]); TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix Rd_VPR128.2D = TMPQ2 + TMPQ4 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ2[0,64] + TMPQ4[0,64]; Rd_VPR128.2D[64,64] = TMPQ2[64,64] + TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.229 SADDL, SADDL2 page C7-2540 line 148596 MATCH x0e200000/mask=xbf20fc00 # CONSTRUCT x4e600000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 =$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddl2/2@2 # AUNIT --inst x4e600000/mask=xffe0fc00 --status pass --comment "ext" :saddl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x0 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = sext(TMPD3[0,16]); TMPQ4[32,32] = sext(TMPD3[16,16]); TMPQ4[64,32] = sext(TMPD3[32,16]); TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 + TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ2[0,32] + TMPQ4[0,32]; Rd_VPR128.4S[32,32] = TMPQ2[32,32] + TMPQ4[32,32]; Rd_VPR128.4S[64,32] = TMPQ2[64,32] + TMPQ4[64,32]; Rd_VPR128.4S[96,32] = TMPQ2[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.229 SADDL, SADDL2 page C7-2540 line 148596 MATCH x0e200000/mask=xbf20fc00 # CONSTRUCT x4e200000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 =$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddl2/2@1 # AUNIT --inst x4e200000/mask=xffe0fc00 --status pass --comment "ext" :saddl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x0 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = sext(TMPD1[0,8]); TMPQ2[16,16] = sext(TMPD1[8,8]); TMPQ2[32,16] = sext(TMPD1[16,8]); TMPQ2[48,16] = sext(TMPD1[24,8]); TMPQ2[64,16] = sext(TMPD1[32,8]); TMPQ2[80,16] = sext(TMPD1[40,8]); TMPQ2[96,16] = sext(TMPD1[48,8]); TMPQ2[112,16] = sext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = sext(TMPD3[0,8]); TMPQ4[16,16] = sext(TMPD3[8,8]); TMPQ4[32,16] = sext(TMPD3[16,8]); TMPQ4[48,16] = sext(TMPD3[24,8]); TMPQ4[64,16] = sext(TMPD3[32,8]); TMPQ4[80,16] = sext(TMPD3[40,8]); TMPQ4[96,16] = sext(TMPD3[48,8]); TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 + TMPQ4 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ2[0,16] + TMPQ4[0,16]; Rd_VPR128.8H[16,16] = TMPQ2[16,16] + TMPQ4[16,16]; Rd_VPR128.8H[32,16] = TMPQ2[32,16] + TMPQ4[32,16]; Rd_VPR128.8H[48,16] = TMPQ2[48,16] + TMPQ4[48,16]; Rd_VPR128.8H[64,16] = TMPQ2[64,16] + TMPQ4[64,16]; Rd_VPR128.8H[80,16] = TMPQ2[80,16] + TMPQ4[80,16]; Rd_VPR128.8H[96,16] = TMPQ2[96,16] + TMPQ4[96,16]; Rd_VPR128.8H[112,16] = TMPQ2[112,16] + TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.230 SADDLP page C7-2542 line 148719 MATCH x0e202800/mask=xbf3ffc00 # CONSTRUCT x0ea02800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =#+@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_saddlp/1@4 # AUNIT --inst x0ea02800/mask=xfffffc00 --status pass --comment "ext" :saddlp Rd_VPR64.1D, Rn_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.1D & Zd { TMPD1 = Rn_VPR64.2S; # sipd infix Rd_VPR64.1D = +(TMPD1) on pairs lane size (4 to 8) local tmp2 = TMPD1[0,32]; local tmp4 = sext(tmp2); local tmp3 = TMPD1[32,32]; local tmp5 = sext(tmp3); Rd_VPR64.1D = tmp4 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.230 SADDLP page C7-2542 line 148719 MATCH x0e202800/mask=xbf3ffc00 # CONSTRUCT x0e602800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =#+@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_saddlp/1@2 # AUNIT --inst x0e602800/mask=xfffffc00 --status pass --comment "ext" :saddlp Rd_VPR64.2S, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.2S & Zd { TMPD1 = Rn_VPR64.4H; # sipd infix Rd_VPR64.2S = +(TMPD1) on pairs lane size (2 to 4) local tmp2 = TMPD1[0,16]; local tmp4 = sext(tmp2); local tmp3 = TMPD1[16,16]; local tmp5 = sext(tmp3); Rd_VPR64.2S[0,32] = tmp4 + tmp5; tmp2 = TMPD1[32,16]; tmp4 = sext(tmp2); tmp3 = TMPD1[48,16]; tmp5 = sext(tmp3); Rd_VPR64.2S[32,32] = tmp4 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.230 SADDLP page C7-2542 line 148719 MATCH x0e202800/mask=xbf3ffc00 # CONSTRUCT x0e202800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =#+@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_saddlp/1@1 # AUNIT --inst x0e202800/mask=xfffffc00 --status pass --comment "ext" :saddlp Rd_VPR64.4H, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.4H & Zd { TMPD1 = Rn_VPR64.8B; # sipd infix Rd_VPR64.4H = +(TMPD1) on pairs lane size (1 to 2) local tmp2 = TMPD1[0,8]; local tmp4 = sext(tmp2); local tmp3 = TMPD1[8,8]; local tmp5 = sext(tmp3); Rd_VPR64.4H[0,16] = tmp4 + tmp5; tmp2 = TMPD1[16,8]; tmp4 = sext(tmp2); tmp3 = TMPD1[24,8]; tmp5 = sext(tmp3); Rd_VPR64.4H[16,16] = tmp4 + tmp5; tmp2 = TMPD1[32,8]; tmp4 = sext(tmp2); tmp3 = TMPD1[40,8]; tmp5 = sext(tmp3); Rd_VPR64.4H[32,16] = tmp4 + tmp5; tmp2 = TMPD1[48,8]; tmp4 = sext(tmp2); tmp3 = TMPD1[56,8]; tmp5 = sext(tmp3); Rd_VPR64.4H[48,16] = tmp4 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.230 SADDLP page C7-2542 line 148719 MATCH x0e202800/mask=xbf3ffc00 # CONSTRUCT x4ea02800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =#+@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_saddlp/1@4 # AUNIT --inst x4ea02800/mask=xfffffc00 --status pass --comment "ext" :saddlp Rd_VPR128.2D, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPQ1 = Rn_VPR128.4S; # sipd infix Rd_VPR128.2D = +(TMPQ1) on pairs lane size (4 to 8) local tmp2 = TMPQ1[0,32]; local tmp4 = sext(tmp2); local tmp3 = TMPQ1[32,32]; local tmp5 = sext(tmp3); Rd_VPR128.2D[0,64] = tmp4 + tmp5; tmp2 = TMPQ1[64,32]; tmp4 = sext(tmp2); tmp3 = TMPQ1[96,32]; tmp5 = sext(tmp3); Rd_VPR128.2D[64,64] = tmp4 + tmp5; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.230 SADDLP page C7-2542 line 148719 MATCH x0e202800/mask=xbf3ffc00 # CONSTRUCT x4e602800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =#+@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_saddlp/1@2 # AUNIT --inst x4e602800/mask=xfffffc00 --status pass --comment "ext" :saddlp Rd_VPR128.4S, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPQ1 = Rn_VPR128.8H; # sipd infix Rd_VPR128.4S = +(TMPQ1) on pairs lane size (2 to 4) local tmp2 = TMPQ1[0,16]; local tmp4 = sext(tmp2); local tmp3 = TMPQ1[16,16]; local tmp5 = sext(tmp3); Rd_VPR128.4S[0,32] = tmp4 + tmp5; tmp2 = TMPQ1[32,16]; tmp4 = sext(tmp2); tmp3 = TMPQ1[48,16]; tmp5 = sext(tmp3); Rd_VPR128.4S[32,32] = tmp4 + tmp5; tmp2 = TMPQ1[64,16]; tmp4 = sext(tmp2); tmp3 = TMPQ1[80,16]; tmp5 = sext(tmp3); Rd_VPR128.4S[64,32] = tmp4 + tmp5; tmp2 = TMPQ1[96,16]; tmp4 = sext(tmp2); tmp3 = TMPQ1[112,16]; tmp5 = sext(tmp3); Rd_VPR128.4S[96,32] = tmp4 + tmp5; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.230 SADDLP page C7-2542 line 148719 MATCH x0e202800/mask=xbf3ffc00 # CONSTRUCT x4e202800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =#+@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_saddlp/1@1 # AUNIT --inst x4e202800/mask=xfffffc00 --status pass --comment "ext" :saddlp Rd_VPR128.8H, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=2 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPQ1 = Rn_VPR128.16B; # sipd infix Rd_VPR128.8H = +(TMPQ1) on pairs lane size (1 to 2) local tmp2 = TMPQ1[0,8]; local tmp4 = sext(tmp2); local tmp3 = TMPQ1[8,8]; local tmp5 = sext(tmp3); Rd_VPR128.8H[0,16] = tmp4 + tmp5; tmp2 = TMPQ1[16,8]; tmp4 = sext(tmp2); tmp3 = TMPQ1[24,8]; tmp5 = sext(tmp3); Rd_VPR128.8H[16,16] = tmp4 + tmp5; tmp2 = TMPQ1[32,8]; tmp4 = sext(tmp2); tmp3 = TMPQ1[40,8]; tmp5 = sext(tmp3); Rd_VPR128.8H[32,16] = tmp4 + tmp5; tmp2 = TMPQ1[48,8]; tmp4 = sext(tmp2); tmp3 = TMPQ1[56,8]; tmp5 = sext(tmp3); Rd_VPR128.8H[48,16] = tmp4 + tmp5; tmp2 = TMPQ1[64,8]; tmp4 = sext(tmp2); tmp3 = TMPQ1[72,8]; tmp5 = sext(tmp3); Rd_VPR128.8H[64,16] = tmp4 + tmp5; tmp2 = TMPQ1[80,8]; tmp4 = sext(tmp2); tmp3 = TMPQ1[88,8]; tmp5 = sext(tmp3); Rd_VPR128.8H[80,16] = tmp4 + tmp5; tmp2 = TMPQ1[96,8]; tmp4 = sext(tmp2); tmp3 = TMPQ1[104,8]; tmp5 = sext(tmp3); Rd_VPR128.8H[96,16] = tmp4 + tmp5; tmp2 = TMPQ1[112,8]; tmp4 = sext(tmp2); tmp3 = TMPQ1[120,8]; tmp5 = sext(tmp3); Rd_VPR128.8H[112,16] = tmp4 + tmp5; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.231 SADDLV page C7-2544 line 148829 MATCH x0e303800/mask=xbf3ffc00 # CONSTRUCT x4eb03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_saddlv/1@4 # AUNIT --inst x4eb03800/mask=xfffffc00 --status nopcodeop --comment "ext" :saddlv Rd_FPR64, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.4S & Rd_FPR64 & Zd { Rd_FPR64 = NEON_saddlv(Rn_VPR128.4S, 4:1); } # C7.2.231 SADDLV page C7-2544 line 148829 MATCH x0e303800/mask=xbf3ffc00 # CONSTRUCT x4e303800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_saddlv/1@1 # AUNIT --inst x4e303800/mask=xfffffc00 --status nopcodeop --comment "ext" :saddlv Rd_FPR16, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.16B & Rd_FPR16 & Zd { Rd_FPR16 = NEON_saddlv(Rn_VPR128.16B, 1:1); } # C7.2.231 SADDLV page C7-2544 line 148829 MATCH x0e303800/mask=xbf3ffc00 # CONSTRUCT x0e303800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_saddlv/1@1 # AUNIT --inst x0e303800/mask=xfffffc00 --status nopcodeop --comment "ext" :saddlv Rd_FPR16, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR64.8B & Rd_FPR16 & Zd { Rd_FPR16 = NEON_saddlv(Rn_VPR64.8B, 1:1); } # C7.2.231 SADDLV page C7-2544 line 148829 MATCH x0e303800/mask=xbf3ffc00 # CONSTRUCT x0e703800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_saddlv/1@2 # AUNIT --inst x0e703800/mask=xfffffc00 --status nopcodeop --comment "ext" :saddlv Rd_FPR32, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR64.4H & Rd_FPR32 & Zd { Rd_FPR32 = NEON_saddlv(Rn_VPR64.4H, 2:1); } # C7.2.231 SADDLV page C7-2544 line 148829 MATCH x0e303800/mask=xbf3ffc00 # CONSTRUCT x4e703800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_saddlv/1@2 # AUNIT --inst x4e703800/mask=xfffffc00 --status nopcodeop --comment "ext" :saddlv Rd_FPR32, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.8H & Rd_FPR32 & Zd { Rd_FPR32 = NEON_saddlv(Rn_VPR128.8H, 2:1); } # C7.2.232 SADDW, SADDW2 page C7-2546 line 148929 MATCH x0e201000/mask=xbf20fc00 # CONSTRUCT x0ea01000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $sext@4:16 =$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddw/2@4 # AUNIT --inst x0ea01000/mask=xffe0fc00 --status pass --comment "ext" :saddw Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x1 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rm_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D + TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.232 SADDW, SADDW2 page C7-2546 line 148929 MATCH x0e201000/mask=xbf20fc00 # CONSTRUCT x0e601000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $sext@2:16 =$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddw/2@2 # AUNIT --inst x0e601000/mask=xffe0fc00 --status pass --comment "ext" :saddw Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x1 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rm_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rm_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rm_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S + TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.232 SADDW, SADDW2 page C7-2546 line 148929 MATCH x0e201000/mask=xbf20fc00 # CONSTRUCT x0e201000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $sext@1:16 =$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddw/2@1 # AUNIT --inst x0e201000/mask=xffe0fc00 --status pass --comment "ext" :saddw Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x1 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = sext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = sext(Rm_VPR64.8B[0,8]); TMPQ1[16,16] = sext(Rm_VPR64.8B[8,8]); TMPQ1[32,16] = sext(Rm_VPR64.8B[16,8]); TMPQ1[48,16] = sext(Rm_VPR64.8B[24,8]); TMPQ1[64,16] = sext(Rm_VPR64.8B[32,8]); TMPQ1[80,16] = sext(Rm_VPR64.8B[40,8]); TMPQ1[96,16] = sext(Rm_VPR64.8B[48,8]); TMPQ1[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H + TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.232 SADDW, SADDW2 page C7-2546 line 148929 MATCH x0e201000/mask=xbf20fc00 # CONSTRUCT x4ea01000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3[1]:8 $sext@4:16 =$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddw2/2@4 # AUNIT --inst x4ea01000/mask=xffe0fc00 --status pass --comment "ext" :saddw2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x1 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { TMPD1 = Rm_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D + TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + TMPQ2[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.232 SADDW, SADDW2 page C7-2546 line 148929 MATCH x0e201000/mask=xbf20fc00 # CONSTRUCT x4e601000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3[1]:8 $sext@2:16 =$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddw2/2@2 # AUNIT --inst x4e601000/mask=xffe0fc00 --status pass --comment "ext" :saddw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x1 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { TMPD1 = Rm_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S + TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + TMPQ2[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + TMPQ2[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + TMPQ2[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.232 SADDW, SADDW2 page C7-2546 line 148929 MATCH x0e201000/mask=xbf20fc00 # CONSTRUCT x4e201000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3[1]:8 $sext@1:16 =$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_saddw2/2@1 # AUNIT --inst x4e201000/mask=xffe0fc00 --status pass --comment "ext" :saddw2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x1 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { TMPD1 = Rm_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = sext(TMPD1[0,8]); TMPQ2[16,16] = sext(TMPD1[8,8]); TMPQ2[32,16] = sext(TMPD1[16,8]); TMPQ2[48,16] = sext(TMPD1[24,8]); TMPQ2[64,16] = sext(TMPD1[32,8]); TMPQ2[80,16] = sext(TMPD1[40,8]); TMPQ2[96,16] = sext(TMPD1[48,8]); TMPQ2[112,16] = sext(TMPD1[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H + TMPQ2 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + TMPQ2[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + TMPQ2[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + TMPQ2[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + TMPQ2[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + TMPQ2[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + TMPQ2[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + TMPQ2[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.233 SCVTF (vector, fixed-point) page C7-2548 line 149051 MATCH x5f00e400/mask=xff80fc00 # CONSTRUCT x5f40e400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2 # AUNIT --inst x5f40e400/mask=xffc0fc00 --status nopcodeop --comment "nofpround" :scvtf Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_3031=1 & u=0 & b_2428=0x1f & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_scvtf(Rn_FPR64, Imm_shr_imm64:4); } # C7.2.233 SCVTF (vector, fixed-point) page C7-2548 line 149051 MATCH x5f00e400/mask=xff80fc00 # CONSTRUCT x5f20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2 # AUNIT --inst x5f20e400/mask=xffe0fc00 --status nopcodeop --comment "nofpround" :scvtf Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_3031=1 & u=0 & b_2428=0x1f & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_scvtf(Rn_FPR32, Imm_shr_imm32:4); } # C7.2.233 SCVTF (vector, fixed-point) page C7-2548 line 149051 MATCH x5f00e400/mask=xff80fc00 # CONSTRUCT x5f10e400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2 # AUNIT --inst x5f10e400/mask=xfff0fc00 --status noqemu --comment "nofpround" :scvtf Rd_FPR16, Rn_FPR16, Imm_shr_imm16 is b_3031=1 & u=0 & b_2428=0x1f & b_2023=1 & Imm_shr_imm16 & b_1115=0x1c & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_scvtf(Rn_FPR16, Imm_shr_imm16:4); } # C7.2.233 SCVTF (vector, fixed-point) page C7-2548 line 149051 MATCH x0f00e400/mask=xbf80fc00 # CONSTRUCT x4f40e400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2@8 # AUNIT --inst x4f40e400/mask=xffc0fc00 --status nopcodeop --comment "nofpround" :scvtf Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_scvtf(Rn_VPR128.2D, Imm_shr_imm64:4, 8:1); } # C7.2.233 SCVTF (vector, fixed-point) page C7-2548 line 149051 MATCH x0f00e400/mask=xbf80fc00 # CONSTRUCT x0f20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2@4 # AUNIT --inst x0f20e400/mask=xffe0fc00 --status nopcodeop --comment "nofpround" :scvtf Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_scvtf(Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); } # C7.2.233 SCVTF (vector, fixed-point) page C7-2548 line 149051 MATCH x0f00e400/mask=xbf80fc00 # CONSTRUCT x4f20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2@4 # AUNIT --inst x4f20e400/mask=xffe0fc00 --status nopcodeop --comment "nofpround" :scvtf Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_scvtf(Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); } # C7.2.233 SCVTF (vector, fixed-point) page C7-2548 line 149051 MATCH x0f00e400/mask=xbf80fc00 # CONSTRUCT x0f10e400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2@2 # AUNIT --inst x0f10e400/mask=xfff0fc00 --status noqemu --comment "nofpround" :scvtf Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_scvtf(Rn_VPR64.4H, Imm_shr_imm32:4, 2:1); } # C7.2.233 SCVTF (vector, fixed-point) page C7-2548 line 149051 MATCH x0f00e400/mask=xbf80fc00 # CONSTRUCT x4f10e400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 =NEON_scvtf/2@2 # AUNIT --inst x4f10e400/mask=xfff0fc00 --status noqemu --comment "nofpround" :scvtf Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_scvtf(Rn_VPR128.8H, Imm_shr_imm32:4, 2:1); } # C7.2.234 SCVTF (vector, integer) page C7-2551 line 149206 MATCH x5e21d800/mask=xffbffc00 # CONSTRUCT x5e21d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =int2float # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 # AUNIT --inst x5e21d800/mask=xfffffc00 --status fail --comment "nofpround" :scvtf Rd_FPR32, Rn_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = int2float(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.234 SCVTF (vector, integer) page C7-2551 line 149206 MATCH x5e21d800/mask=xffbffc00 # CONSTRUCT x5e61d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =int2float # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 # AUNIT --inst x5e61d800/mask=xfffffc00 --status pass --comment "nofpround" :scvtf Rd_FPR64, Rn_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & size_high=0 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = int2float(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.234 SCVTF (vector, integer) page C7-2551 line 149206 MATCH x0e21d800/mask=xbfbffc00 # CONSTRUCT x4e61d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1@8 # AUNIT --inst x4e61d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" :scvtf Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=0 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_scvtf(Rn_VPR128.2D, 8:1); } # C7.2.234 SCVTF (vector, integer) page C7-2551 line 149206 MATCH x0e21d800/mask=xbfbffc00 # CONSTRUCT x0e21d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1@4 # AUNIT --inst x0e21d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" :scvtf Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_scvtf(Rn_VPR64.2S, 4:1); } # C7.2.234 SCVTF (vector, integer) page C7-2551 line 149206 MATCH x0e21d800/mask=xbfbffc00 # CONSTRUCT x4e21d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1@4 # AUNIT --inst x4e21d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" :scvtf Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_scvtf(Rn_VPR128.4S, 4:1); } # C7.2.234 SCVTF (vector, integer) page C7-2551 line 149206 MATCH x5e79d800/mask=xfffffc00 # CONSTRUCT x5e79d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =int2float # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 # AUNIT --inst x5e79d800/mask=xfffffc00 --status noqemu --comment "nofpround" # Scalar half precision variant :scvtf Rd_FPR16, Rn_FPR16 is b_1031=0b0101111001111001110110 & Rd_FPR16 & Rn_FPR16 & Zd { Rd_FPR16 = int2float(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.234 SCVTF (vector, integer) page C7-2551 line 149206 MATCH x0e79d800/mask=xbffffc00 # CONSTRUCT x0e79d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1@2 # AUNIT --inst x0e79d800/mask=xfffffc00 --status noqemu --comment "nofpround" # Vector half precision variant when Q=0 suf=VPR64.4H :scvtf Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b00111001111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { Rd_VPR64.4H = NEON_scvtf(Rn_VPR64.4H, 2:1); } # C7.2.234 SCVTF (vector, integer) page C7-2551 line 149206 MATCH x0e79d800/mask=xbffffc00 # CONSTRUCT x4e79d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1@2 # AUNIT --inst x4e79d800/mask=xfffffc00 --status noqemu --comment "nofpround" # Vector half precision variant when Q=1 suf=VPR128.8H :scvtf Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b00111001111001110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { Rd_VPR128.8H = NEON_scvtf(Rn_VPR128.8H, 2:1); } # C7.2.235 SCVTF (scalar, fixed-point) page C7-2554 line 149390 MATCH x1e020000/mask=x7f3f0000 # CONSTRUCT x1ec28000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 int2float:2 FBits16 =f/ # SMACRO(pseudo) ARG1 ARG2 FBits16 =NEON_scvtf/2 # AUNIT --inst x1ec28000/mask=xffff8000 --status noqemu --comment "nofpround" # 32-bit to half-precision variant when sf == 0 && type == 11 :scvtf Rd_FPR16, Rn_GPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode=2 & b_15=1 & FBitsOp & FBits16 & Rn_GPR32 & Rd_FPR16 & Zd { local tmp1:2 = int2float(Rn_GPR32); Rd_FPR16 = tmp1 f/ FBits16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.235 SCVTF (scalar, fixed-point) page C7-2554 line 149390 MATCH x1e020000/mask=x7f3f0000 # CONSTRUCT x9ec20000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 int2float:2 FBits16 =f/ # SMACRO(pseudo) ARG1 ARG2 FBits16 =NEON_scvtf/2 # AUNIT --inst x9ec20000/mask=xffff0000 --status noqemu --comment "nofpround" # 64-bit to half-precision variant when sf == 1 && type == 11 :scvtf Rd_FPR16, Rn_GPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode=2 & FBitsOp & FBits16 & Rn_GPR64 & Rd_FPR16 & Zd { local tmp1:2 = int2float(Rn_GPR64); Rd_FPR16 = tmp1 f/ FBits16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.235 SCVTF (scalar, fixed-point) page C7-2554 line 149390 MATCH x1e020000/mask=x7f3f0000 # CONSTRUCT x1e428000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 int2float:8 FBits64 =f/ # SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_scvtf/2 # AUNIT --inst x1e428000/mask=xffff8000 --status pass --comment "nofpround" :scvtf Rd_FPR64, Rn_GPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode=2 & b_15=1 & FBitsOp & FBits64 & Rn_GPR32 & Rd_FPR64 & Zd { local tmp1:8 = int2float(Rn_GPR32); Rd_FPR64 = tmp1 f/ FBits64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.235 SCVTF (scalar, fixed-point) page C7-2554 line 149390 MATCH x1e020000/mask=x7f3f0000 # CONSTRUCT x1e028000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 int2float FBits32 =f/ # SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_scvtf/2 # AUNIT --inst x1e028000/mask=xffff8000 --status fail --comment "nofpround" :scvtf Rd_FPR32, Rn_GPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode=2 & b_15=1 & FBitsOp & FBits32 & Rn_GPR32 & Rd_FPR32 & Zd { local tmp1:4 = int2float(Rn_GPR32); Rd_FPR32 = tmp1 f/ FBits32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.235 SCVTF (scalar, fixed-point) page C7-2554 line 149390 MATCH x1e020000/mask=x7f3f0000 # CONSTRUCT x9e420000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 int2float FBits64 =f/ # SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_scvtf/2 # AUNIT --inst x9e420000/mask=xffff0000 --status fail --comment "nofpround" :scvtf Rd_FPR64, Rn_GPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode=2 & FBitsOp & FBits64 & Rn_GPR64 & Rd_FPR64 & Zd { local tmp1:8 = int2float(Rn_GPR64); Rd_FPR64 = tmp1 f/ FBits64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.235 SCVTF (scalar, fixed-point) page C7-2554 line 149390 MATCH x1e020000/mask=x7f3f0000 # CONSTRUCT x9e020000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 int2float:4 FBits32 =f/ # SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_scvtf/2 # AUNIT --inst x9e020000/mask=xffff0000 --status fail --comment "nofpround" :scvtf Rd_FPR32, Rn_GPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode=2 & FBitsOp & FBits32 & Rn_GPR64 & Rd_FPR32 & Rd_FPR64 & Zd { local tmp1:4 = int2float(Rn_GPR64); Rd_FPR32 = tmp1 f/ FBits32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.236 SCVTF (scalar, integer) page C7-2556 line 149525 MATCH x1e220000/mask=x7f3ffc00 # CONSTRUCT x1ee20000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =int2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 # AUNIT --inst x1ee20000/mask=xfffffc00 --status noqemu --comment "nofpround" :scvtf Rd_FPR16, Rn_GPR32 is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR32 & Rd_FPR16 & Zd { Rd_FPR16 = int2float(Rn_GPR32); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.236 SCVTF (scalar, integer) page C7-2556 line 149525 MATCH x1e220000/mask=x7f3ffc00 # CONSTRUCT x9ee20000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =int2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 # AUNIT --inst x9ee20000/mask=xfffffc00 --status noqemu --comment "nofpround" :scvtf Rd_FPR16, Rn_GPR64 is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR64 & Rd_FPR16 & Zd { Rd_FPR16 = int2float(Rn_GPR64); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.236 SCVTF (scalar, integer) page C7-2556 line 149525 MATCH x1e220000/mask=x7f3ffc00 # CONSTRUCT x1e620000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =int2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 # AUNIT --inst x1e620000/mask=xfffffc00 --status pass --comment "nofpround" :scvtf Rd_FPR64, Rn_GPR32 is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR32 & Rd_FPR64 & Zd { Rd_FPR64 = int2float(Rn_GPR32); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.236 SCVTF (scalar, integer) page C7-2556 line 149525 MATCH x1e220000/mask=x7f3ffc00 # CONSTRUCT x9e620000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =int2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 # AUNIT --inst x9e620000/mask=xfffffc00 --status pass --comment "nofpround" :scvtf Rd_FPR64, Rn_GPR64 is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR64 & Rd_FPR64 & Zd { Rd_FPR64 = int2float(Rn_GPR64); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.236 SCVTF (scalar, integer) page C7-2556 line 149525 MATCH x1e220000/mask=x7f3ffc00 # CONSTRUCT x1e220000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =int2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 # AUNIT --inst x1e220000/mask=xfffffc00 --status fail --comment "nofpround" :scvtf Rd_FPR32, Rn_GPR32 is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR32 & Rd_FPR32 & Zd { Rd_FPR32 = int2float(Rn_GPR32); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.236 SCVTF (scalar, integer) page C7-2556 line 149525 MATCH x1e220000/mask=x7f3ffc00 # CONSTRUCT x9e220000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =int2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_scvtf/1 # AUNIT --inst x9e220000/mask=xfffffc00 --status fail --comment "nofpround" :scvtf Rd_FPR32, Rn_GPR64 is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode=2 & b_1015=0x0 & Rn_GPR64 & Rd_FPR32 & Zd { Rd_FPR32 = int2float(Rn_GPR64); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.237 SDOT (by element) page C7-2558 line 149653 MATCH x0f00e000/mask=xbf00f400 # CONSTRUCT x0f80e000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 Re_VPR128.S.vIndex =NEON_sdot/2@1 # AUNIT --inst x0f80e000/mask=xffc0f400 --status noqemu # Vector variant when Q=0 Ta=64.2S Tb=64.8B :sdot Rd_VPR64.2S, Rn_VPR64.8B, Re_VPR128.B.vIndex is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd_VPR64.2S & Rn_VPR64.8B & Re_VPR128.B.vIndex & Re_VPR128.S & vIndex & Zd { local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); Rd_VPR64.2S = NEON_sdot(Rn_VPR64.8B, tmp1, 1:1); } # C7.2.237 SDOT (by element) page C7-2558 line 149653 MATCH x0f00e000/mask=xbf00f400 # CONSTRUCT x4f80e000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 Re_VPR128.S.vIndex =NEON_sdot/2@1 # AUNIT --inst x4f80e000/mask=xffc0f400 --status noqemu # Vector variant when Q=1 Ta=128.4S Tb=128.16B :sdot Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.B.vIndex is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd_VPR128.4S & Rn_VPR128.16B & Re_VPR128.B.vIndex & Re_VPR128.S & vIndex & Zd { local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); Rd_VPR128.4S = NEON_sdot(Rn_VPR128.16B, tmp1, 1:1); } # C7.2.238 SDOT (vector) page C7-2560 line 149755 MATCH x0e009400/mask=xbf20fc00 # CONSTRUCT x0e809400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sdot/2@1 # AUNIT --inst x0e809400/mask=xffe0fc00 --status noqemu # Three registers of the same type variant when Q=0 Ta=64.2S Tb=64.8B :sdot Rd_VPR64.2S, Rn_VPR64.8B, Rm_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & Rd_VPR64.2S & Rn_VPR64.8B & Rm_VPR64.8B & Zd { Rd_VPR64.2S = NEON_sdot(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.238 SDOT (vector) page C7-2560 line 149755 MATCH x0e009400/mask=xbf20fc00 # CONSTRUCT x4e809400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sdot/2@1 # AUNIT --inst x4e809400/mask=xffe0fc00 --status noqemu # Three registers of the same type variant when Q=1 Ta=128.4S Tb=128.16B :sdot Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & Rd_VPR128.4S & Rn_VPR128.16B & Rm_VPR128.16B & Zd { Rd_VPR128.4S = NEON_sdot(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.239 SHA1C page C7-2562 line 149854 MATCH x5e000000/mask=xffe0fc00 # CONSTRUCT x5e000000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha1c/3@4 # AUNIT --inst x5e000000/mask=xffe0fc00 --status noqemu :sha1c Rd_VPR128, Rn_FPR32, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b000000 & Rn_FPR32 & Rd_VPR128 & Zd { Rd_VPR128 = NEON_sha1c(Rd_VPR128, Rn_FPR32, Rm_VPR128.4S, 4:1); } # C7.2.240 SHA1H page C7-2563 line 149925 MATCH x5e280800/mask=xfffffc00 # CONSTRUCT x5e280800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 30:1 =<< # SMACRO(pseudo) ARG1 ARG2 =NEON_sha1h/1 # AUNIT --inst x5e280800/mask=xfffffc00 --status noqemu :sha1h Rd_FPR32, Rn_FPR32 is b_2431=0b01011110 & b_2223=0b00 & b_1721=0b10100 & b_1216=0b00000 & b_1011=0b10 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = Rn_FPR32 << 30:1; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.241 SHA1M page C7-2564 line 149984 MATCH x5e002000/mask=xffe0fc00 # CONSTRUCT x5e002000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha1m/3@4 # AUNIT --inst x5e002000/mask=xffe0fc00 --status noqemu :sha1m Rd_VPR128, Rn_FPR32, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b001000 & Rn_FPR32 & Rd_VPR128 & Zd { Rd_VPR128 = NEON_sha1m(Rd_VPR128, Rn_FPR32, Rm_VPR128.4S, 4:1); } # C7.2.242 SHA1P page C7-2565 line 150055 MATCH x5e001000/mask=xffe0fc00 # CONSTRUCT x5e001000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha1p/3@4 # AUNIT --inst x5e001000/mask=xffe0fc00 --status noqemu :sha1p Rd_VPR128, Rn_FPR32, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b000100 & Rn_FPR32 & Rd_VPR128 & Zd { Rd_VPR128 = NEON_sha1p(Rd_VPR128, Rn_FPR32, Rm_VPR128.4S, 4:1); } # C7.2.243 SHA1SU0 page C7-2566 line 150126 MATCH x5e003000/mask=xffe0fc00 # CONSTRUCT x5e003000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha1su0/3@4 # AUNIT --inst x5e003000/mask=xffe0fc00 --status noqemu :sha1su0 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b001100 & Rn_VPR128.4S & Rd_VPR128.4S & Rd_VPR128 & Zd { Rd_VPR128.4S = NEON_sha1su0(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.244 SHA1SU1 page C7-2567 line 150194 MATCH x5e281800/mask=xfffffc00 # CONSTRUCT x5e281800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sha1su1/2@4 # AUNIT --inst x5e281800/mask=xfffffc00 --status noqemu :sha1su1 Rd_VPR128.4S, Rn_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=1 & b_1620=0b01000 & b_1015=0b000110 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sha1su1(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); } # C7.2.245 SHA256H2 page C7-2568 line 150260 MATCH x5e005000/mask=xffe0fc00 # CONSTRUCT x5e005000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha256h2/3@4 # AUNIT --inst x5e005000/mask=xffe0fc00 --status noqemu :sha256h2 Rd_VPR128, Rn_VPR128, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b010100 & Rn_VPR128 & Rd_VPR128 & Zd { Rd_VPR128 = NEON_sha256h2(Rd_VPR128, Rn_VPR128, Rm_VPR128.4S, 4:1); } # C7.2.246 SHA256H page C7-2569 line 150322 MATCH x5e004000/mask=xffe0fc00 # CONSTRUCT x5e004000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha256h/3@4 # AUNIT --inst x5e004000/mask=xffe0fc00 --status noqemu :sha256h Rd_VPR128, Rn_VPR128, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b010000 & Rn_VPR128 & Rd_VPR128 & Zd { Rd_VPR128 = NEON_sha256h(Rd_VPR128, Rn_VPR128, Rm_VPR128.4S, 4:1); } # C7.2.247 SHA256SU0 page C7-2570 line 150384 MATCH x5e282800/mask=xfffffc00 # CONSTRUCT x5e282800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sha256su0/2@4 # AUNIT --inst x5e282800/mask=xfffffc00 --status noqemu :sha256su0 Rd_VPR128.4S, Rn_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=1 & b_1620=0b01000 & b_1015=0b001010 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sha256su0(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); } # C7.2.248 SHA256SU1 page C7-2571 line 150452 MATCH x5e006000/mask=xffe0fc00 # CONSTRUCT x5e006000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha256su1/3@4 # AUNIT --inst x5e006000/mask=xffe0fc00 --status noqemu :sha256su1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_2431=0b01011110 & b_2223=0b00 & b_2121=0 & Rm_VPR128.4S & b_1015=0b011000 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sha256su1(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.249 SHA512H page C7-2573 line 150543 MATCH xce608000/mask=xffe0fc00 # CONSTRUCT xce608000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha512h/3@8 # AUNIT --inst xce608000/mask=xffe0fc00 --status noqemu :sha512h Rd_VPR128, Rn_VPR128, Rm_VPR128.2D is b_2131=0b11001110011 & b_1015=0b100000 & Rd_VPR128 & Rn_VPR128 & Rm_VPR128.2D & Zd { Rd_VPR128 = NEON_sha512h(Rd_VPR128, Rn_VPR128, Rm_VPR128.2D, 8:1); } # C7.2.250 SHA512H2 page C7-2575 line 150631 MATCH xce608400/mask=xffe0fc00 # CONSTRUCT xce608400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha512h2/3@8 # AUNIT --inst xce608400/mask=xffe0fc00 --status noqemu :sha512h2 Rd_VPR128, Rn_VPR128, Rm_VPR128.2D is b_2131=0b11001110011 & b_1015=0b100001 & Rd_VPR128 & Rn_VPR128 & Rm_VPR128.2D & Zd { Rd_VPR128 = NEON_sha512h2(Rd_VPR128, Rn_VPR128, Rm_VPR128.2D, 8:1); } # C7.2.251 SHA512SU0 page C7-2577 line 150719 MATCH xcec08000/mask=xfffffc00 # CONSTRUCT xcec08000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sha512su0/2@8 # AUNIT --inst xcec08000/mask=xfffffc00 --status noqemu :sha512su0 Rd_VPR128.2D, Rn_VPR128.2D is b_1031=0b1100111011000000100000 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { Rd_VPR128.2D = NEON_sha512su0(Rd_VPR128.2D, Rn_VPR128.2D, 8:1); } # C7.2.252 SHA512SU1 page C7-2578 line 150789 MATCH xce608800/mask=xffe0fc00 # CONSTRUCT xce608800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sha512su1/3@8 # AUNIT --inst xce608800/mask=xffe0fc00 --status noqemu :sha512su1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_2131=0b11001110011 & b_1015=0b100010 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & Zd { Rd_VPR128.2D = NEON_sha512su1(Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.253 SHADD page C7-2580 line 150875 MATCH x0e200400/mask=xbf20fc00 # CONSTRUCT x4e200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shadd/2@1 # AUNIT --inst x4e200400/mask=xffe0fc00 --status nopcodeop :shadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x0 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_shadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.253 SHADD page C7-2580 line 150875 MATCH x0e200400/mask=xbf20fc00 # CONSTRUCT x0ea00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shadd/2@4 # AUNIT --inst x0ea00400/mask=xffe0fc00 --status nopcodeop :shadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x0 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_shadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.253 SHADD page C7-2580 line 150875 MATCH x0e200400/mask=xbf20fc00 # CONSTRUCT x0e600400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shadd/2@2 # AUNIT --inst x0e600400/mask=xffe0fc00 --status nopcodeop :shadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x0 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_shadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.253 SHADD page C7-2580 line 150875 MATCH x0e200400/mask=xbf20fc00 # CONSTRUCT x4ea00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shadd/2@4 # AUNIT --inst x4ea00400/mask=xffe0fc00 --status nopcodeop :shadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x0 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_shadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.253 SHADD page C7-2580 line 150875 MATCH x0e200400/mask=xbf20fc00 # CONSTRUCT x0e200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shadd/2@1 # AUNIT --inst x0e200400/mask=xffe0fc00 --status nopcodeop :shadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x0 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_shadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.253 SHADD page C7-2580 line 150875 MATCH x0e200400/mask=xbf20fc00 # CONSTRUCT x4e600400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shadd/2@2 # AUNIT --inst x4e600400/mask=xffe0fc00 --status nopcodeop :shadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x0 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_shadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.254 SHL page C7-2582 line 150977 MATCH x5f005400/mask=xff80fc00 # CONSTRUCT x5f405400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2 # AUNIT --inst x5f405400/mask=xffc0fc00 --status nopcodeop :shl Rd_FPR64, Rn_FPR64, Imm_imm0_63 is b_3031=1 & u=0 & b_2428=0x1f & b_2223=0b01 & Imm_imm0_63 & b_1115=0xa & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_shl(Rn_FPR64, Imm_imm0_63:1); } # C7.2.254 SHL page C7-2582 line 150977 MATCH x0f005400/mask=xbf80fc00 # CONSTRUCT x4f085400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:1 =$<<@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@1 # AUNIT --inst x4f085400/mask=xfff8fc00 --status pass :shl Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { local tmp1:1 = Imm_uimm3; # simd infix Rd_VPR128.16B = Rn_VPR128.16B << tmp1 on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] << tmp1; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] << tmp1; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] << tmp1; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] << tmp1; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] << tmp1; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] << tmp1; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] << tmp1; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] << tmp1; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] << tmp1; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] << tmp1; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] << tmp1; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] << tmp1; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] << tmp1; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] << tmp1; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] << tmp1; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] << tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.254 SHL page C7-2582 line 150977 MATCH x0f005400/mask=xbf80fc00 # CONSTRUCT x4f405400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:8 =$<<@8 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@8 # AUNIT --inst x4f405400/mask=xffc0fc00 --status pass :shl Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xa & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local tmp1:8 = Imm_imm0_63; # simd infix Rd_VPR128.2D = Rn_VPR128.2D << tmp1 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] << tmp1; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] << tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.254 SHL page C7-2582 line 150977 MATCH x0f005400/mask=xbf80fc00 # CONSTRUCT x0f205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:4 =$<<@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@4 # AUNIT --inst x0f205400/mask=xffe0fc00 --status pass :shl Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local tmp1:4 = Imm_uimm5; # simd infix Rd_VPR64.2S = Rn_VPR64.2S << tmp1 on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] << tmp1; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] << tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.254 SHL page C7-2582 line 150977 MATCH x0f005400/mask=xbf80fc00 # CONSTRUCT x0f105400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:2 =$<<@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@2 # AUNIT --inst x0f105400/mask=xfff0fc00 --status pass :shl Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { local tmp1:2 = Imm_uimm4; # simd infix Rd_VPR64.4H = Rn_VPR64.4H << tmp1 on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] << tmp1; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] << tmp1; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] << tmp1; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] << tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.254 SHL page C7-2582 line 150977 MATCH x0f005400/mask=xbf80fc00 # CONSTRUCT x4f205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:4 =$<<@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@4 # AUNIT --inst x4f205400/mask=xffe0fc00 --status pass :shl Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local tmp1:4 = Imm_uimm5; # simd infix Rd_VPR128.4S = Rn_VPR128.4S << tmp1 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] << tmp1; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] << tmp1; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] << tmp1; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] << tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.254 SHL page C7-2582 line 150977 MATCH x0f005400/mask=xbf80fc00 # CONSTRUCT x0f085400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:1 =$<<@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@1 # AUNIT --inst x0f085400/mask=xfff8fc00 --status pass :shl Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { local tmp1:1 = Imm_uimm3; # simd infix Rd_VPR64.8B = Rn_VPR64.8B << tmp1 on lane size 1 Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] << tmp1; Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] << tmp1; Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] << tmp1; Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] << tmp1; Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] << tmp1; Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] << tmp1; Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] << tmp1; Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] << tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.254 SHL page C7-2582 line 150977 MATCH x0f005400/mask=xbf80fc00 # CONSTRUCT x4f105400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:2 =$<<@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_shl/2@2 # AUNIT --inst x4f105400/mask=xfff0fc00 --status pass :shl Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { local tmp1:2 = Imm_uimm4; # simd infix Rd_VPR128.8H = Rn_VPR128.8H << tmp1 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] << tmp1; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] << tmp1; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] << tmp1; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] << tmp1; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] << tmp1; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] << tmp1; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] << tmp1; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] << tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.255 SHLL, SHLL2 page C7-2585 line 151125 MATCH x2e213800/mask=xbf3ffc00 # CONSTRUCT x2ea13800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 zext:8 =$<<@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shll/2@4 # AUNIT --inst x2ea13800/mask=xfffffc00 --status pass --comment "ext" :shll Rd_VPR128.2D, Rn_VPR64.2S, Imm_uimm_exact32 is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & Imm_uimm_exact32 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); local tmp2:8 = zext(Imm_uimm_exact32); # simd infix Rd_VPR128.2D = TMPQ1 << tmp2 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64] << tmp2; Rd_VPR128.2D[64,64] = TMPQ1[64,64] << tmp2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.255 SHLL, SHLL2 page C7-2585 line 151125 MATCH x2e213800/mask=xbf3ffc00 # CONSTRUCT x2e613800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3:4 =$<<@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shll/2@2 # AUNIT --inst x2e613800/mask=xfffffc00 --status pass --comment "ext" :shll Rd_VPR128.4S, Rn_VPR64.4H, Imm_uimm_exact16 is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & Imm_uimm_exact16 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = TMPQ1 << Imm_uimm_exact16:4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32] << Imm_uimm_exact16:4; Rd_VPR128.4S[32,32] = TMPQ1[32,32] << Imm_uimm_exact16:4; Rd_VPR128.4S[64,32] = TMPQ1[64,32] << Imm_uimm_exact16:4; Rd_VPR128.4S[96,32] = TMPQ1[96,32] << Imm_uimm_exact16:4; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.255 SHLL, SHLL2 page C7-2585 line 151125 MATCH x2e213800/mask=xbf3ffc00 # CONSTRUCT x2e213800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@1:16 ARG3:2 =$<<@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shll/2@1 # AUNIT --inst x2e213800/mask=xfffffc00 --status pass --comment "ext" :shll Rd_VPR128.8H, Rn_VPR64.8B, Imm_uimm_exact8 is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & Imm_uimm_exact8 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = TMPQ1 << Imm_uimm_exact8:2 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[0,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[16,16] = TMPQ1[16,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[32,16] = TMPQ1[32,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[48,16] = TMPQ1[48,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[64,16] = TMPQ1[64,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[80,16] = TMPQ1[80,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[96,16] = TMPQ1[96,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[112,16] = TMPQ1[112,16] << Imm_uimm_exact8:2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.255 SHLL, SHLL2 page C7-2585 line 151125 MATCH x2e213800/mask=xbf3ffc00 # CONSTRUCT x6ea13800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 zext:8 =$<<@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shll2/2@4 # AUNIT --inst x6ea13800/mask=xfffffc00 --status pass --comment "ext" :shll2 Rd_VPR128.2D, Rn_VPR128.4S, Imm_uimm_exact32 is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & Imm_uimm_exact32 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); local tmp3:8 = zext(Imm_uimm_exact32); # simd infix Rd_VPR128.2D = TMPQ2 << tmp3 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ2[0,64] << tmp3; Rd_VPR128.2D[64,64] = TMPQ2[64,64] << tmp3; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.255 SHLL, SHLL2 page C7-2585 line 151125 MATCH x2e213800/mask=xbf3ffc00 # CONSTRUCT x6e613800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3:4 =$<<@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shll2/2@2 # AUNIT --inst x6e613800/mask=xfffffc00 --status pass --comment "ext" :shll2 Rd_VPR128.4S, Rn_VPR128.8H, Imm_uimm_exact16 is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & Imm_uimm_exact16 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 << Imm_uimm_exact16:4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ2[0,32] << Imm_uimm_exact16:4; Rd_VPR128.4S[32,32] = TMPQ2[32,32] << Imm_uimm_exact16:4; Rd_VPR128.4S[64,32] = TMPQ2[64,32] << Imm_uimm_exact16:4; Rd_VPR128.4S[96,32] = TMPQ2[96,32] << Imm_uimm_exact16:4; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.255 SHLL, SHLL2 page C7-2585 line 151125 MATCH x2e213800/mask=xbf3ffc00 # CONSTRUCT x6e213800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3:2 =$<<@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shll2/2@1 # AUNIT --inst x6e213800/mask=xfffffc00 --status pass --comment "ext" :shll2 Rd_VPR128.8H, Rn_VPR128.16B, Imm_uimm_exact8 is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & Imm_uimm_exact8 & b_1721=0x10 & b_1216=0x13 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = sext(TMPD1[0,8]); TMPQ2[16,16] = sext(TMPD1[8,8]); TMPQ2[32,16] = sext(TMPD1[16,8]); TMPQ2[48,16] = sext(TMPD1[24,8]); TMPQ2[64,16] = sext(TMPD1[32,8]); TMPQ2[80,16] = sext(TMPD1[40,8]); TMPQ2[96,16] = sext(TMPD1[48,8]); TMPQ2[112,16] = sext(TMPD1[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 << Imm_uimm_exact8:2 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ2[0,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[16,16] = TMPQ2[16,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[32,16] = TMPQ2[32,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[48,16] = TMPQ2[48,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[64,16] = TMPQ2[64,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[80,16] = TMPQ2[80,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[96,16] = TMPQ2[96,16] << Imm_uimm_exact8:2; Rd_VPR128.8H[112,16] = TMPQ2[112,16] << Imm_uimm_exact8:2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.256 SHRN, SHRN2 page C7-2587 line 151244 MATCH x0f008400/mask=xbf80fc00 # CONSTRUCT x0f208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 $>>@8 =$zext@8:8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shrn/2@8 # AUNIT --inst x0f208400/mask=xffe0fc00 --status pass --comment "ext" :shrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { local tmp1:8 = zext(Imm_shr_imm32); # simd infix TMPQ1 = Rn_VPR128.2D >> tmp1 on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] >> tmp1; TMPQ1[64,64] = Rn_VPR128.2D[64,64] >> tmp1; # simd resize Rd_VPR64.2S = zext(TMPQ1) (lane size 8 to 4) Rd_VPR64.2S[0,32] = TMPQ1[0,32]; Rd_VPR64.2S[32,32] = TMPQ1[64,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.256 SHRN, SHRN2 page C7-2587 line 151244 MATCH x0f008400/mask=xbf80fc00 # CONSTRUCT x0f108400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:4 $>>@4 =$zext@4:16 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shrn/2@4 # AUNIT --inst x0f108400/mask=xfff0fc00 --status pass --comment "ext" :shrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { # simd infix TMPQ1 = Rn_VPR128.4S >> Imm_shr_imm16:4 on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] >> Imm_shr_imm16:4; TMPQ1[32,32] = Rn_VPR128.4S[32,32] >> Imm_shr_imm16:4; TMPQ1[64,32] = Rn_VPR128.4S[64,32] >> Imm_shr_imm16:4; TMPQ1[96,32] = Rn_VPR128.4S[96,32] >> Imm_shr_imm16:4; # simd resize Rd_VPR64.4H = zext(TMPQ1) (lane size 4 to 2) Rd_VPR64.4H[0,16] = TMPQ1[0,16]; Rd_VPR64.4H[16,16] = TMPQ1[32,16]; Rd_VPR64.4H[32,16] = TMPQ1[64,16]; Rd_VPR64.4H[48,16] = TMPQ1[96,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.256 SHRN, SHRN2 page C7-2587 line 151244 MATCH x0f008400/mask=xbf80fc00 # CONSTRUCT x0f088400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 $>>@2 =$zext@2:8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shrn/2@2 # AUNIT --inst x0f088400/mask=xfff8fc00 --status pass --comment "ext" :shrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { # simd infix TMPQ1 = Rn_VPR128.8H >> Imm_shr_imm8:2 on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm8:2; TMPQ1[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm8:2; TMPQ1[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm8:2; TMPQ1[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm8:2; TMPQ1[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm8:2; TMPQ1[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm8:2; TMPQ1[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm8:2; TMPQ1[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm8:2; # simd resize Rd_VPR64.8B = zext(TMPQ1) (lane size 2 to 1) Rd_VPR64.8B[0,8] = TMPQ1[0,8]; Rd_VPR64.8B[8,8] = TMPQ1[16,8]; Rd_VPR64.8B[16,8] = TMPQ1[32,8]; Rd_VPR64.8B[24,8] = TMPQ1[48,8]; Rd_VPR64.8B[32,8] = TMPQ1[64,8]; Rd_VPR64.8B[40,8] = TMPQ1[80,8]; Rd_VPR64.8B[48,8] = TMPQ1[96,8]; Rd_VPR64.8B[56,8] = TMPQ1[112,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.256 SHRN, SHRN2 page C7-2587 line 151244 MATCH x0f008400/mask=xbf80fc00 # CONSTRUCT x4f208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 $>>@8 $zext@8:8 1:1 &=$copy # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shrn2/2@8 # AUNIT --inst x4f208400/mask=xffe0fc00 --status pass --comment "ext" :shrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { local tmp1:8 = zext(Imm_shr_imm32); # simd infix TMPQ1 = Rn_VPR128.2D >> tmp1 on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] >> tmp1; TMPQ1[64,64] = Rn_VPR128.2D[64,64] >> tmp1; # simd resize TMPD2 = zext(TMPQ1) (lane size 8 to 4) TMPD2[0,32] = TMPQ1[0,32]; TMPD2[32,32] = TMPQ1[64,32]; # simd copy Rd_VPR128.4S element 1:1 = TMPD2 (lane size 8) Rd_VPR128.4S[64,64] = TMPD2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.256 SHRN, SHRN2 page C7-2587 line 151244 MATCH x0f008400/mask=xbf80fc00 # CONSTRUCT x4f108400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:4 $>>@4 $zext@4:8 1:1 &=$copy # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shrn2/2@4 # AUNIT --inst x4f108400/mask=xfff0fc00 --status pass --comment "ext" :shrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.4S >> Imm_shr_imm16:4 on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] >> Imm_shr_imm16:4; TMPQ1[32,32] = Rn_VPR128.4S[32,32] >> Imm_shr_imm16:4; TMPQ1[64,32] = Rn_VPR128.4S[64,32] >> Imm_shr_imm16:4; TMPQ1[96,32] = Rn_VPR128.4S[96,32] >> Imm_shr_imm16:4; # simd resize TMPD2 = zext(TMPQ1) (lane size 4 to 2) TMPD2[0,16] = TMPQ1[0,16]; TMPD2[16,16] = TMPQ1[32,16]; TMPD2[32,16] = TMPQ1[64,16]; TMPD2[48,16] = TMPQ1[96,16]; # simd copy Rd_VPR128.8H element 1:1 = TMPD2 (lane size 8) Rd_VPR128.8H[64,64] = TMPD2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.256 SHRN, SHRN2 page C7-2587 line 151244 MATCH x0f008400/mask=xbf80fc00 # CONSTRUCT x4f088400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 $>>@2 $zext@2:8 1:1 &=$copy # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shrn2/2@2 # AUNIT --inst x4f088400/mask=xfff8fc00 --status pass --comment "ext" :shrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { # simd infix TMPQ1 = Rn_VPR128.8H >> Imm_shr_imm8:2 on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm8:2; TMPQ1[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm8:2; TMPQ1[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm8:2; TMPQ1[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm8:2; TMPQ1[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm8:2; TMPQ1[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm8:2; TMPQ1[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm8:2; TMPQ1[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm8:2; # simd resize TMPD2 = zext(TMPQ1) (lane size 2 to 1) TMPD2[0,8] = TMPQ1[0,8]; TMPD2[8,8] = TMPQ1[16,8]; TMPD2[16,8] = TMPQ1[32,8]; TMPD2[24,8] = TMPQ1[48,8]; TMPD2[32,8] = TMPQ1[64,8]; TMPD2[40,8] = TMPQ1[80,8]; TMPD2[48,8] = TMPQ1[96,8]; TMPD2[56,8] = TMPQ1[112,8]; # simd copy Rd_VPR128.16B element 1:1 = TMPD2 (lane size 8) Rd_VPR128.16B[64,64] = TMPD2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.257 SHSUB page C7-2589 line 151368 MATCH x0e202400/mask=xbf20fc00 # CONSTRUCT x4e202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shsub/2@1 # AUNIT --inst x4e202400/mask=xffe0fc00 --status nopcodeop :shsub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x4 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_shsub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.257 SHSUB page C7-2589 line 151368 MATCH x0e202400/mask=xbf20fc00 # CONSTRUCT x0ea02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shsub/2@4 # AUNIT --inst x0ea02400/mask=xffe0fc00 --status nopcodeop :shsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x4 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_shsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.257 SHSUB page C7-2589 line 151368 MATCH x0e202400/mask=xbf20fc00 # CONSTRUCT x0e602400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shsub/2@2 # AUNIT --inst x0e602400/mask=xffe0fc00 --status nopcodeop :shsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x4 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_shsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.257 SHSUB page C7-2589 line 151368 MATCH x0e202400/mask=xbf20fc00 # CONSTRUCT x4ea02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shsub/2@4 # AUNIT --inst x4ea02400/mask=xffe0fc00 --status nopcodeop :shsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x4 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_shsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.257 SHSUB page C7-2589 line 151368 MATCH x0e202400/mask=xbf20fc00 # CONSTRUCT x0e202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shsub/2@1 # AUNIT --inst x0e202400/mask=xffe0fc00 --status nopcodeop :shsub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x4 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_shsub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.257 SHSUB page C7-2589 line 151368 MATCH x0e202400/mask=xbf20fc00 # CONSTRUCT x4e602400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_shsub/2@2 # AUNIT --inst x4e602400/mask=xffe0fc00 --status nopcodeop :shsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x4 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_shsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.258 SLI page C7-2591 line 151468 MATCH x2f005400/mask=xbf80fc00 # CONSTRUCT x6f085400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@1 # AUNIT --inst x6f085400/mask=xfff8fc00 --status nopcodeop :sli Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sli(Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3:1, 1:1); } # C7.2.258 SLI page C7-2591 line 151468 MATCH x2f005400/mask=xbf80fc00 # CONSTRUCT x6f405400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@8 # AUNIT --inst x6f405400/mask=xffc0fc00 --status nopcodeop :sli Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xa & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_sli(Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63:1, 8:1); } # C7.2.258 SLI page C7-2591 line 151468 MATCH x2f005400/mask=xbf80fc00 # CONSTRUCT x2f205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@4 # AUNIT --inst x2f205400/mask=xffe0fc00 --status nopcodeop :sli Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sli(Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5:1, 4:1); } # C7.2.258 SLI page C7-2591 line 151468 MATCH x2f005400/mask=xbf80fc00 # CONSTRUCT x2f105400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@2 # AUNIT --inst x2f105400/mask=xfff0fc00 --status nopcodeop :sli Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sli(Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4:1, 2:1); } # C7.2.258 SLI page C7-2591 line 151468 MATCH x2f005400/mask=xbf80fc00 # CONSTRUCT x6f205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@4 # AUNIT --inst x6f205400/mask=xffe0fc00 --status nopcodeop :sli Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xa & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sli(Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5:1, 4:1); } # C7.2.258 SLI page C7-2591 line 151468 MATCH x2f005400/mask=xbf80fc00 # CONSTRUCT x2f085400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@1 # AUNIT --inst x2f085400/mask=xfff8fc00 --status nopcodeop :sli Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xa & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sli(Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3:1, 1:1); } # C7.2.258 SLI page C7-2591 line 151468 MATCH x2f005400/mask=xbf80fc00 # CONSTRUCT x6f105400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3@2 # AUNIT --inst x6f105400/mask=xfff0fc00 --status nopcodeop :sli Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xa & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sli(Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4:1, 2:1); } # C7.2.258 SLI page C7-2591 line 151468 MATCH x7f005400/mask=xff80fc00 # CONSTRUCT x7f405400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sli/3 # AUNIT --inst x7f405400/mask=xffc0fc00 --status nopcodeop :sli Rd_VPR64, Rn_VPR64, Imm_uimm5 is b_2331=0b011111110 & b_22=1 & b_1015=0b010101 & Rd_VPR64 & Rn_VPR64 & Imm_uimm5 & Zd { Rd_VPR64 = NEON_sli(Rd_VPR64, Rn_VPR64, Imm_uimm5:1); } # C7.2.259 SM3PARTW1 page C7-2594 line 151635 MATCH xce60c000/mask=xffe0fc00 # CONSTRUCT xce60c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm3partw1/3@4 # AUNIT --inst xce60c000/mask=xffe0fc00 --status noqemu :sm3partw1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_2131=0b11001110011 & b_1015=0b110000 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sm3partw1(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.260 SM3PARTW2 page C7-2596 line 151723 MATCH xce60c400/mask=xffe0fc00 # CONSTRUCT xce60c400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm3partw2/3@4 # AUNIT --inst xce60c400/mask=xffe0fc00 --status noqemu :sm3partw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_2131=0b11001110011 & b_1015=0b110001 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sm3partw2(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.261 SM3SS1 page C7-2598 line 151808 MATCH xce400000/mask=xffe08000 # CONSTRUCT xce400000/mask=xffe08000 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_sm3ss1/3@4 # AUNIT --inst xce400000/mask=xffe08000 --status noqemu :sm3ss1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, Ra_VPR128.4S is b_2131=0b11001110010 & b_15=0 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Ra_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sm3ss1(Rn_VPR128.4S, Rm_VPR128.4S, Ra_VPR128.4S, 4:1); } # C7.2.247 SM3TT1A page C7-1529 line 88534 KEEPWITH sm3imm2: b_1213 is b_1213 { export *[const]:4 b_1213; } Re_VPR128.S.sm3imm2: Re_VPR128.S^"["^sm3imm2^"]" is Re_VPR128.S & sm3imm2 { export Re_VPR128.S; } # C7.2.262 SM3TT1A page C7-2600 line 151893 MATCH xce408000/mask=xffe0cc00 # CONSTRUCT xce408000/mask=xffe0cc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm3tt1a/3@4 # AUNIT --inst xce408000/mask=xffe0cc00 --status noqemu :sm3tt1a Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.sm3imm2 is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b00 & Rd_VPR128.4S & Rn_VPR128.4S & Re_VPR128.S.sm3imm2 & Re_VPR128.S & sm3imm2 & Zd { local tmp1:4 = SIMD_PIECE(Re_VPR128.S, sm3imm2:1); Rd_VPR128.4S = NEON_sm3tt1a(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); } # C7.2.263 SM3TT1B page C7-2602 line 151999 MATCH xce408400/mask=xffe0cc00 # CONSTRUCT xce408400/mask=xffe0cc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm3tt1b/3@4 # AUNIT --inst xce408400/mask=xffe0cc00 --status noqemu :sm3tt1b Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.sm3imm2 is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b01 & Rd_VPR128.4S & Rn_VPR128.4S & Re_VPR128.S.sm3imm2 & Re_VPR128.S & sm3imm2 & Zd { local tmp1:4 = SIMD_PIECE(Re_VPR128.S, sm3imm2:1); Rd_VPR128.4S = NEON_sm3tt1b(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); } # C7.2.264 SM3TT2A page C7-2604 line 152105 MATCH xce408800/mask=xffe0cc00 # CONSTRUCT xce408800/mask=xffe0cc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm3tt2a/3@4 # AUNIT --inst xce408800/mask=xffe0cc00 --status noqemu :sm3tt2a Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.sm3imm2 is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b10 & Rd_VPR128.4S & Rn_VPR128.4S & Re_VPR128.S.sm3imm2 & Re_VPR128.S & sm3imm2 & Zd { local tmp1:4 = SIMD_PIECE(Re_VPR128.S, sm3imm2:1); Rd_VPR128.4S = NEON_sm3tt2a(Rd_VPR128.4S, Rn_VPR128.4S, tmp1, 4:1); } # C7.2.265 SM3TT2B page C7-2606 line 152210 MATCH xce408c00/mask=xffe0cc00 # CONSTRUCT xce408c00/mask=xffe0cc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm3tt2b/3@4 # AUNIT --inst xce408c00/mask=xffe0cc00 --status noqemu :sm3tt2b Rd_VPR128.S, Rn_VPR128.S, Re_VPR128.S.sm3imm2 is b_2131=0b11001110010 & b_1415=0b10 & b_1011=0b11 & Rd_VPR128.S & Rn_VPR128.S & Re_VPR128.S.sm3imm2 & Re_VPR128.S & sm3imm2 & Zd { local tmp1:4 = SIMD_PIECE(Re_VPR128.S, sm3imm2:1); Rd_VPR128.S = NEON_sm3tt2b(Rd_VPR128.S, Rn_VPR128.S, tmp1, 4:1); } # C7.2.266 SM4E page C7-2608 line 152315 MATCH xcec08400/mask=xfffffc00 # CONSTRUCT xcec08400/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sm4e/2@4 # AUNIT --inst xcec08400/mask=xfffffc00 --status noqemu :sm4e Rd_VPR128.4S, Rn_VPR128.4S is b_1031=0b1100111011000000100001 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sm4e(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); } # C7.2.267 SM4EKEY page C7-2610 line 152409 MATCH xce60c800/mask=xffe0fc00 # CONSTRUCT xce60c800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sm4ekey/3@4 # AUNIT --inst xce60c800/mask=xffe0fc00 --status noqemu :sm4ekey Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_2131=0b11001110011 & b_1015=0b110010 & Rd_VPR128.4S & Rn_VPR128.4S & Rm_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sm4ekey(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.268 SMAX page C7-2612 line 152509 MATCH x0e206400/mask=xbf20fc00 # CONSTRUCT x4e206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@1 # AUNIT --inst x4e206400/mask=xffe0fc00 --status nopcodeop :smax Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xc & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_smax(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.268 SMAX page C7-2612 line 152509 MATCH x0e206400/mask=xbf20fc00 # CONSTRUCT x0ea06400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@4 # AUNIT --inst x0ea06400/mask=xffe0fc00 --status nopcodeop :smax Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xc & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_smax(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.268 SMAX page C7-2612 line 152509 MATCH x0e206400/mask=xbf20fc00 # CONSTRUCT x0e606400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@2 # AUNIT --inst x0e606400/mask=xffe0fc00 --status nopcodeop :smax Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xc & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_smax(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.268 SMAX page C7-2612 line 152509 MATCH x0e206400/mask=xbf20fc00 # CONSTRUCT x4ea06400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@4 # AUNIT --inst x4ea06400/mask=xffe0fc00 --status nopcodeop :smax Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xc & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_smax(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.268 SMAX page C7-2612 line 152509 MATCH x0e206400/mask=xbf20fc00 # CONSTRUCT x0e206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@1 # AUNIT --inst x0e206400/mask=xffe0fc00 --status nopcodeop :smax Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xc & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_smax(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.268 SMAX page C7-2612 line 152509 MATCH x0e206400/mask=xbf20fc00 # CONSTRUCT x4e606400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@2 # AUNIT --inst x4e606400/mask=xffe0fc00 --status nopcodeop :smax Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xc & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_smax(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.269 SMAXP page C7-2614 line 152611 MATCH x0e20a400/mask=xbf20fc00 # CONSTRUCT x4e20a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smax/2@1 # AUNIT --inst x4e20a400/mask=xffe0fc00 --status nopcodeop :smaxp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_smax(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.269 SMAXP page C7-2614 line 152611 MATCH x0e20a400/mask=xbf20fc00 # CONSTRUCT x0ea0a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smaxp/2@4 # AUNIT --inst x0ea0a400/mask=xffe0fc00 --status nopcodeop :smaxp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_smaxp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.269 SMAXP page C7-2614 line 152611 MATCH x0e20a400/mask=xbf20fc00 # CONSTRUCT x0e60a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smaxp/2@2 # AUNIT --inst x0e60a400/mask=xffe0fc00 --status nopcodeop :smaxp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_smaxp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.269 SMAXP page C7-2614 line 152611 MATCH x0e20a400/mask=xbf20fc00 # CONSTRUCT x4ea0a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smaxp/2@4 # AUNIT --inst x4ea0a400/mask=xffe0fc00 --status nopcodeop :smaxp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_smaxp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.269 SMAXP page C7-2614 line 152611 MATCH x0e20a400/mask=xbf20fc00 # CONSTRUCT x0e20a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smaxp/2@1 # AUNIT --inst x0e20a400/mask=xffe0fc00 --status nopcodeop :smaxp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_smaxp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.269 SMAXP page C7-2614 line 152611 MATCH x0e20a400/mask=xbf20fc00 # CONSTRUCT x4e60a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smaxp/2@2 # AUNIT --inst x4e60a400/mask=xffe0fc00 --status nopcodeop :smaxp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_smaxp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.270 SMAXV page C7-2616 line 152715 MATCH x0e30a800/mask=xbf3ffc00 # CONSTRUCT x4e30a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_smaxv/1@1 # AUNIT --inst x4e30a800/mask=xfffffc00 --status nopcodeop :smaxv Rd_FPR8, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd { Rd_FPR8 = NEON_smaxv(Rn_VPR128.16B, 1:1); } # C7.2.270 SMAXV page C7-2616 line 152715 MATCH x0e30a800/mask=xbf3ffc00 # CONSTRUCT x0e30a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_smaxv/1@1 # AUNIT --inst x0e30a800/mask=xfffffc00 --status nopcodeop :smaxv Rd_FPR8, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd { Rd_FPR8 = NEON_smaxv(Rn_VPR64.8B, 1:1); } # C7.2.270 SMAXV page C7-2616 line 152715 MATCH x0e30a800/mask=xbf3ffc00 # CONSTRUCT x0e70a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_smaxv/1@2 # AUNIT --inst x0e70a800/mask=xfffffc00 --status nopcodeop :smaxv Rd_FPR16, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd { Rd_FPR16 = NEON_smaxv(Rn_VPR64.4H, 2:1); } # C7.2.270 SMAXV page C7-2616 line 152715 MATCH x0e30a800/mask=xbf3ffc00 # CONSTRUCT x4e70a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_smaxv/1@2 # AUNIT --inst x4e70a800/mask=xfffffc00 --status nopcodeop :smaxv Rd_FPR16, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd { Rd_FPR16 = NEON_smaxv(Rn_VPR128.8H, 2:1); } # C7.2.270 SMAXV page C7-2616 line 152715 MATCH x0e30a800/mask=xbf3ffc00 # CONSTRUCT x4eb0a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_smaxv/1@4 # AUNIT --inst x4eb0a800/mask=xfffffc00 --status nopcodeop :smaxv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { Rd_FPR32 = NEON_smaxv(Rn_VPR128.4S, 4:1); } # C7.2.271 SMIN page C7-2618 line 152818 MATCH x0e206c00/mask=xbf20fc00 # CONSTRUCT x4e206c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smin/2@1 # AUNIT --inst x4e206c00/mask=xffe0fc00 --status nopcodeop :smin Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xd & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_smin(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.271 SMIN page C7-2618 line 152818 MATCH x0e206c00/mask=xbf20fc00 # CONSTRUCT x0ea06c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smin/2@4 # AUNIT --inst x0ea06c00/mask=xffe0fc00 --status nopcodeop :smin Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xd & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_smin(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.271 SMIN page C7-2618 line 152818 MATCH x0e206c00/mask=xbf20fc00 # CONSTRUCT x0e606c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smin/2@2 # AUNIT --inst x0e606c00/mask=xffe0fc00 --status nopcodeop :smin Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xd & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_smin(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.271 SMIN page C7-2618 line 152818 MATCH x0e206c00/mask=xbf20fc00 # CONSTRUCT x4ea06c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smin/2@4 # AUNIT --inst x4ea06c00/mask=xffe0fc00 --status nopcodeop :smin Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xd & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_smin(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.271 SMIN page C7-2618 line 152818 MATCH x0e206c00/mask=xbf20fc00 # CONSTRUCT x0e206c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smin/2@1 # AUNIT --inst x0e206c00/mask=xffe0fc00 --status nopcodeop :smin Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xd & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_smin(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.271 SMIN page C7-2618 line 152818 MATCH x0e206c00/mask=xbf20fc00 # CONSTRUCT x4e606c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smin/2@2 # AUNIT --inst x4e606c00/mask=xffe0fc00 --status nopcodeop :smin Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xd & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_smin(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.272 SMINP page C7-2620 line 152920 MATCH x0e20ac00/mask=xbf20fc00 # CONSTRUCT x4e20ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sminp/2@1 # AUNIT --inst x4e20ac00/mask=xffe0fc00 --status nopcodeop --comment "nointround" :sminp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x15 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sminp(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.272 SMINP page C7-2620 line 152920 MATCH x0e20ac00/mask=xbf20fc00 # CONSTRUCT x0ea0ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sminp/2@4 # AUNIT --inst x0ea0ac00/mask=xffe0fc00 --status nopcodeop --comment "nointround" :sminp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x15 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sminp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.272 SMINP page C7-2620 line 152920 MATCH x0e20ac00/mask=xbf20fc00 # CONSTRUCT x0e60ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sminp/2@2 # AUNIT --inst x0e60ac00/mask=xffe0fc00 --status nopcodeop --comment "nointround" :sminp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x15 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sminp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.272 SMINP page C7-2620 line 152920 MATCH x0e20ac00/mask=xbf20fc00 # CONSTRUCT x4ea0ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sminp/2@4 # AUNIT --inst x4ea0ac00/mask=xffe0fc00 --status nopcodeop --comment "nointround" :sminp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x15 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sminp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.272 SMINP page C7-2620 line 152920 MATCH x0e20ac00/mask=xbf20fc00 # CONSTRUCT x0e20ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sminp/2@1 # AUNIT --inst x0e20ac00/mask=xffe0fc00 --status nopcodeop --comment "nointround" :sminp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x15 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sminp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.272 SMINP page C7-2620 line 152920 MATCH x0e20ac00/mask=xbf20fc00 # CONSTRUCT x4e60ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sminp/2@2 # AUNIT --inst x4e60ac00/mask=xffe0fc00 --status nopcodeop --comment "nointround" :sminp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x15 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sminp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.273 SMINV page C7-2622 line 153024 MATCH x0e31a800/mask=xbf3ffc00 # CONSTRUCT x4e31a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_sminv/1@1 # AUNIT --inst x4e31a800/mask=xfffffc00 --status nopcodeop :sminv Rd_FPR8, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd { Rd_FPR8 = NEON_sminv(Rn_VPR128.16B, 1:1); } # C7.2.273 SMINV page C7-2622 line 153024 MATCH x0e31a800/mask=xbf3ffc00 # CONSTRUCT x0e31a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_sminv/1@1 # AUNIT --inst x0e31a800/mask=xfffffc00 --status nopcodeop :sminv Rd_FPR8, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd { Rd_FPR8 = NEON_sminv(Rn_VPR64.8B, 1:1); } # C7.2.273 SMINV page C7-2622 line 153024 MATCH x0e31a800/mask=xbf3ffc00 # CONSTRUCT x0e71a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_sminv/1@2 # AUNIT --inst x0e71a800/mask=xfffffc00 --status nopcodeop :sminv Rd_FPR16, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd { Rd_FPR16 = NEON_sminv(Rn_VPR64.4H, 2:1); } # C7.2.273 SMINV page C7-2622 line 153024 MATCH x0e31a800/mask=xbf3ffc00 # CONSTRUCT x4e71a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_sminv/1@2 # AUNIT --inst x4e71a800/mask=xfffffc00 --status nopcodeop :sminv Rd_FPR16, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd { Rd_FPR16 = NEON_sminv(Rn_VPR128.8H, 2:1); } # C7.2.273 SMINV page C7-2622 line 153024 MATCH x0e31a800/mask=xbf3ffc00 # CONSTRUCT x4eb1a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_sminv/1@4 # AUNIT --inst x4eb1a800/mask=xfffffc00 --status nopcodeop :sminv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { Rd_FPR32 = NEON_sminv(Rn_VPR128.4S, 4:1); } # C7.2.274 SMLAL, SMLAL2 (by element) page C7-2624 line 153127 MATCH x0f002000/mask=xbf00f400 # CONSTRUCT x0f802000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 $* &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal/3@4 # AUNIT --inst x0f802000/mask=xffc0f400 --status pass --comment "ext" :smlal Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x2 & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 TMPQ2[0,64] = TMPQ1[0,64] * tmp3; TMPQ2[64,64] = TMPQ1[64,64] * tmp3; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ2[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.274 SMLAL, SMLAL2 (by element) page C7-2624 line 153127 MATCH x0f002000/mask=xbf00f400 # CONSTRUCT x0f402000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 $* &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal/3@2 # AUNIT --inst x0f402000/mask=xffc0f400 --status pass --comment "ext" :smlal Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x2 & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 TMPQ2[0,32] = TMPQ1[0,32] * tmp3; TMPQ2[32,32] = TMPQ1[32,32] * tmp3; TMPQ2[64,32] = TMPQ1[64,32] * tmp3; TMPQ2[96,32] = TMPQ1[96,32] * tmp3; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ2[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ2[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ2[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.274 SMLAL, SMLAL2 (by element) page C7-2624 line 153127 MATCH x0f002000/mask=xbf00f400 # CONSTRUCT x4f802000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 sext:8 $* &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal2/3@4 # AUNIT --inst x4f802000/mask=xffc0f400 --status pass --comment "ext" :smlal2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x2 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Rn_VPR128 & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = sext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 TMPQ3[0,64] = TMPQ2[0,64] * tmp4; TMPQ3[64,64] = TMPQ2[64,64] * tmp4; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.274 SMLAL, SMLAL2 (by element) page C7-2624 line 153127 MATCH x0f002000/mask=xbf00f400 # CONSTRUCT x4f402000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 sext:4 $* &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal2/3@2 # AUNIT --inst x4f402000/mask=xffc0f400 --status pass --comment "ext" :smlal2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x2 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Rn_VPR128 & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = sext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 TMPQ3[0,32] = TMPQ2[0,32] * tmp4; TMPQ3[32,32] = TMPQ2[32,32] * tmp4; TMPQ3[64,32] = TMPQ2[64,32] * tmp4; TMPQ3[96,32] = TMPQ2[96,32] * tmp4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.275 SMLAL, SMLAL2 (vector) page C7-2627 line 153291 MATCH x0e208000/mask=xbf20fc00 # CONSTRUCT x0ea08000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 $*@8 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal/3@4 # AUNIT --inst x0ea08000/mask=xffe0fc00 --status pass --comment "ext" :smlal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x8 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.275 SMLAL, SMLAL2 (vector) page C7-2627 line 153291 MATCH x0e208000/mask=xbf20fc00 # CONSTRUCT x0e608000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 $*@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal/3@2 # AUNIT --inst x0e608000/mask=xffe0fc00 --status pass --comment "ext" :smlal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x8 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.275 SMLAL, SMLAL2 (vector) page C7-2627 line 153291 MATCH x0e208000/mask=xbf20fc00 # CONSTRUCT x0e208000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 $*@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal/3@1 # AUNIT --inst x0e208000/mask=xffe0fc00 --status pass --comment "ext" :smlal Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x8 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 2 TMPQ3[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; TMPQ3[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; TMPQ3[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; TMPQ3[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; TMPQ3[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; TMPQ3[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; TMPQ3[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; TMPQ3[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ3 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ3[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ3[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ3[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ3[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ3[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ3[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ3[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ3[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.275 SMLAL, SMLAL2 (vector) page C7-2627 line 153291 MATCH x0e208000/mask=xbf20fc00 # CONSTRUCT x4ea08000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $*@8 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal2/3@4 # AUNIT --inst x4ea08000/mask=xffe0fc00 --status pass --comment "ext" :smlal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x8 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = sext(TMPD3[0,32]); TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ5 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ5[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ5[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.275 SMLAL, SMLAL2 (vector) page C7-2627 line 153291 MATCH x0e208000/mask=xbf20fc00 # CONSTRUCT x4e608000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $*@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal2/3@2 # AUNIT --inst x4e608000/mask=xffe0fc00 --status pass --comment "ext" :smlal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x8 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = sext(TMPD3[0,16]); TMPQ4[32,32] = sext(TMPD3[16,16]); TMPQ4[64,32] = sext(TMPD3[32,16]); TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ5 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ5[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ5[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ5[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ5[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.275 SMLAL, SMLAL2 (vector) page C7-2627 line 153291 MATCH x0e208000/mask=xbf20fc00 # CONSTRUCT x4e208000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 $*@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlal2/3@1 # AUNIT --inst x4e208000/mask=xffe0fc00 --status pass --comment "ext" :smlal2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x8 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = sext(TMPD1[0,8]); TMPQ2[16,16] = sext(TMPD1[8,8]); TMPQ2[32,16] = sext(TMPD1[16,8]); TMPQ2[48,16] = sext(TMPD1[24,8]); TMPQ2[64,16] = sext(TMPD1[32,8]); TMPQ2[80,16] = sext(TMPD1[40,8]); TMPQ2[96,16] = sext(TMPD1[48,8]); TMPQ2[112,16] = sext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = sext(TMPD3[0,8]); TMPQ4[16,16] = sext(TMPD3[8,8]); TMPQ4[32,16] = sext(TMPD3[16,8]); TMPQ4[48,16] = sext(TMPD3[24,8]); TMPQ4[64,16] = sext(TMPD3[32,8]); TMPQ4[80,16] = sext(TMPD3[40,8]); TMPQ4[96,16] = sext(TMPD3[48,8]); TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 2 TMPQ5[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; TMPQ5[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; TMPQ5[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; TMPQ5[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; TMPQ5[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; TMPQ5[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; TMPQ5[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; TMPQ5[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ5 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ5[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ5[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ5[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ5[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ5[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ5[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ5[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ5[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.276 SMLSL, SMLSL2 (by element) page C7-2629 line 153415 MATCH x0f006000/mask=xbf00f400 # CONSTRUCT x0f806000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 $* &=$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl/3@4 # AUNIT --inst x0f806000/mask=xffc0f400 --status pass --comment "ext" :smlsl Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x6 & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 TMPQ2[0,64] = TMPQ1[0,64] * tmp3; TMPQ2[64,64] = TMPQ1[64,64] * tmp3; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ2[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.276 SMLSL, SMLSL2 (by element) page C7-2629 line 153415 MATCH x0f006000/mask=xbf00f400 # CONSTRUCT x0f406000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 $* &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl/3@2 # AUNIT --inst x0f406000/mask=xffc0f400 --status pass --comment "ext" :smlsl Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x6 & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 TMPQ2[0,32] = TMPQ1[0,32] * tmp3; TMPQ2[32,32] = TMPQ1[32,32] * tmp3; TMPQ2[64,32] = TMPQ1[64,32] * tmp3; TMPQ2[96,32] = TMPQ1[96,32] * tmp3; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ2[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ2[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ2[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.276 SMLSL, SMLSL2 (by element) page C7-2629 line 153415 MATCH x0f006000/mask=xbf00f400 # CONSTRUCT x4f806000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 sext:8 $* &=$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl2/3@4 # AUNIT --inst x4f806000/mask=xffc0f400 --status pass --comment "ext" :smlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x6 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = sext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 TMPQ3[0,64] = TMPQ2[0,64] * tmp4; TMPQ3[64,64] = TMPQ2[64,64] * tmp4; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.276 SMLSL, SMLSL2 (by element) page C7-2629 line 153415 MATCH x0f006000/mask=xbf00f400 # CONSTRUCT x4f406000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 sext:4 $* &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl2/3@2 # AUNIT --inst x4f406000/mask=xffc0f400 --status pass --comment "ext" :smlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x6 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = sext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 TMPQ3[0,32] = TMPQ2[0,32] * tmp4; TMPQ3[32,32] = TMPQ2[32,32] * tmp4; TMPQ3[64,32] = TMPQ2[64,32] * tmp4; TMPQ3[96,32] = TMPQ2[96,32] * tmp4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.277 SMLSL, SMLSL2 (vector) page C7-2632 line 153579 MATCH x0e20a000/mask=xbf20fc00 # CONSTRUCT x0ea0a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 $*@8 &=$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl/3@4 # AUNIT --inst x0ea0a000/mask=xffe0fc00 --status pass --comment "ext" :smlsl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xa & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.277 SMLSL, SMLSL2 (vector) page C7-2632 line 153579 MATCH x0e20a000/mask=xbf20fc00 # CONSTRUCT x0e60a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 $*@4 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl/3@2 # AUNIT --inst x0e60a000/mask=xffe0fc00 --status pass --comment "ext" :smlsl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xa & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.277 SMLSL, SMLSL2 (vector) page C7-2632 line 153579 MATCH x0e20a000/mask=xbf20fc00 # CONSTRUCT x0e20a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 $*@2 &=$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl/3@1 # AUNIT --inst x0e20a000/mask=xffe0fc00 --status pass --comment "ext" :smlsl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xa & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 2 TMPQ3[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; TMPQ3[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; TMPQ3[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; TMPQ3[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; TMPQ3[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; TMPQ3[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; TMPQ3[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; TMPQ3[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ3 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ3[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ3[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ3[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ3[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ3[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ3[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ3[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ3[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.277 SMLSL, SMLSL2 (vector) page C7-2632 line 153579 MATCH x0e20a000/mask=xbf20fc00 # CONSTRUCT x4ea0a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $*@8 &=$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl2/3@4 # AUNIT --inst x4ea0a000/mask=xffe0fc00 --status pass --comment "ext" :smlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xa & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = sext(TMPD3[0,32]); TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ5 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ5[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ5[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.277 SMLSL, SMLSL2 (vector) page C7-2632 line 153579 MATCH x0e20a000/mask=xbf20fc00 # CONSTRUCT x4e60a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $*@4 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl2/3@2 # AUNIT --inst x4e60a000/mask=xffe0fc00 --status pass --comment "ext" :smlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xa & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = sext(TMPD3[0,16]); TMPQ4[32,32] = sext(TMPD3[16,16]); TMPQ4[64,32] = sext(TMPD3[32,16]); TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ5 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ5[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ5[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ5[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ5[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.277 SMLSL, SMLSL2 (vector) page C7-2632 line 153579 MATCH x0e20a000/mask=xbf20fc00 # CONSTRUCT x4e20a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 $*@2 &=$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_smlsl2/3@1 # AUNIT --inst x4e20a000/mask=xffe0fc00 --status pass --comment "ext" :smlsl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xa & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = sext(TMPD1[0,8]); TMPQ2[16,16] = sext(TMPD1[8,8]); TMPQ2[32,16] = sext(TMPD1[16,8]); TMPQ2[48,16] = sext(TMPD1[24,8]); TMPQ2[64,16] = sext(TMPD1[32,8]); TMPQ2[80,16] = sext(TMPD1[40,8]); TMPQ2[96,16] = sext(TMPD1[48,8]); TMPQ2[112,16] = sext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = sext(TMPD3[0,8]); TMPQ4[16,16] = sext(TMPD3[8,8]); TMPQ4[32,16] = sext(TMPD3[16,8]); TMPQ4[48,16] = sext(TMPD3[24,8]); TMPQ4[64,16] = sext(TMPD3[32,8]); TMPQ4[80,16] = sext(TMPD3[40,8]); TMPQ4[96,16] = sext(TMPD3[48,8]); TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 2 TMPQ5[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; TMPQ5[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; TMPQ5[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; TMPQ5[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; TMPQ5[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; TMPQ5[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; TMPQ5[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; TMPQ5[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ5 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ5[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ5[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ5[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ5[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ5[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ5[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ5[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ5[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.279 SMOV page C7-2635 line 153760 MATCH x0e002c00/mask=xbfe0fc00 # CONSTRUCT x0e012c00/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =sext # SMACRO(pseudo) ARG1 ARG2 =NEON_smov/1 # AUNIT --inst x0e012c00/mask=xffe1fc00 --status pass :smov Rd_GPR32, Rn_VPR128.B.imm_neon_uimm4 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 { # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; Rd_GPR32 = sext(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.279 SMOV page C7-2635 line 153760 MATCH x0e002c00/mask=xbfe0fc00 # CONSTRUCT x0e022c00/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =sext # SMACRO(pseudo) ARG1 ARG2 =NEON_smov/1 # AUNIT --inst x0e022c00/mask=xffe3fc00 --status pass :smov Rd_GPR32, Rn_VPR128.H.imm_neon_uimm3 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 { # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; Rd_GPR32 = sext(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.279 SMOV page C7-2635 line 153760 MATCH x0e002c00/mask=xbfe0fc00 # CONSTRUCT x4e012c00/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =sext # SMACRO(pseudo) ARG1 ARG2 =NEON_smov/1 # AUNIT --inst x4e012c00/mask=xffe1fc00 --status pass :smov Rd_GPR64, Rn_VPR128.B.imm_neon_uimm4 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR64 { # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; Rd_GPR64 = sext(tmp1); } # C7.2.279 SMOV page C7-2635 line 153760 MATCH x0e002c00/mask=xbfe0fc00 # CONSTRUCT x4e022c00/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =sext # SMACRO(pseudo) ARG1 ARG2 =NEON_smov/1 # AUNIT --inst x4e022c00/mask=xffe3fc00 --status pass :smov Rd_GPR64, Rn_VPR128.H.imm_neon_uimm3 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR64 { # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; Rd_GPR64 = sext(tmp1); } # C7.2.279 SMOV page C7-2635 line 153760 MATCH x0e002c00/mask=xbfe0fc00 # CONSTRUCT x4e042c00/mask=xffe7fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =sext # SMACRO(pseudo) ARG1 ARG2 =NEON_smov/1 # AUNIT --inst x4e042c00/mask=xffe7fc00 --status pass :smov Rd_GPR64, Rn_VPR128.S.imm_neon_uimm2 is b_3131=0 & Q=1 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.S.imm_neon_uimm2 & b_1618=4 & b_1515=0 & imm4=0x5 & b_1010=1 & Rn_VPR128 & Rd_GPR64 { # simd element Rn_VPR128[imm_neon_uimm2] lane size 4 local tmp1:4 = Rn_VPR128.S.imm_neon_uimm2; Rd_GPR64 = sext(tmp1); } # C7.2.280 SMULL, SMULL2 (by element) page C7-2637 line 153881 MATCH x0f00a000/mask=xbf00f400 # CONSTRUCT x0f80a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull/2@4 # AUNIT --inst x0f80a000/mask=xffc0f400 --status pass --comment "ext" :smull Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xa & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); # simd infix Rd_VPR128.2D = TMPQ1 * tmp3 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64] * tmp3; Rd_VPR128.2D[64,64] = TMPQ1[64,64] * tmp3; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.280 SMULL, SMULL2 (by element) page C7-2637 line 153881 MATCH x0f00a000/mask=xbf00f400 # CONSTRUCT x0f40a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull/2@2 # AUNIT --inst x0f40a000/mask=xffc0f400 --status pass --comment "ext" :smull Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xa & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); # simd infix Rd_VPR128.4S = TMPQ1 * tmp3 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32] * tmp3; Rd_VPR128.4S[32,32] = TMPQ1[32,32] * tmp3; Rd_VPR128.4S[64,32] = TMPQ1[64,32] * tmp3; Rd_VPR128.4S[96,32] = TMPQ1[96,32] * tmp3; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.280 SMULL, SMULL2 (by element) page C7-2637 line 153881 MATCH x0f00a000/mask=xbf00f400 # CONSTRUCT x4f80a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 sext:8 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull2/2@4 # AUNIT --inst x4f80a000/mask=xffc0f400 --status pass --comment "ext" :smull2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xa & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Rn_VPR128 & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = sext(tmp3); # simd infix Rd_VPR128.2D = TMPQ2 * tmp4 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ2[0,64] * tmp4; Rd_VPR128.2D[64,64] = TMPQ2[64,64] * tmp4; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.280 SMULL, SMULL2 (by element) page C7-2637 line 153881 MATCH x0f00a000/mask=xbf00f400 # CONSTRUCT x4f40a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 sext:4 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull2/2@2 # AUNIT --inst x4f40a000/mask=xffc0f400 --status pass --comment "ext" :smull2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xa & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Rn_VPR128 & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = sext(tmp3); # simd infix Rd_VPR128.4S = TMPQ2 * tmp4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ2[0,32] * tmp4; Rd_VPR128.4S[32,32] = TMPQ2[32,32] * tmp4; Rd_VPR128.4S[64,32] = TMPQ2[64,32] * tmp4; Rd_VPR128.4S[96,32] = TMPQ2[96,32] * tmp4; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.281 SMULL, SMULL2 (vector) page C7-2640 line 154037 MATCH x0e20c000/mask=xbf20fc00 # CONSTRUCT x0ea0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 =$*@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull/2@4 # AUNIT --inst x0ea0c000/mask=xffe0fc00 --status pass --comment "ext" :smull Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xc & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = TMPQ1 * TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; Rd_VPR128.2D[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.281 SMULL, SMULL2 (vector) page C7-2640 line 154037 MATCH x0e20c000/mask=xbf20fc00 # CONSTRUCT x0e60c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 =$*@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull/2@2 # AUNIT --inst x0e60c000/mask=xffe0fc00 --status pass --comment "ext" :smull Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xc & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = TMPQ1 * TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; Rd_VPR128.4S[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; Rd_VPR128.4S[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; Rd_VPR128.4S[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.281 SMULL, SMULL2 (vector) page C7-2640 line 154037 MATCH x0e20c000/mask=xbf20fc00 # CONSTRUCT x0e20c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 =$*@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull/2@1 # AUNIT --inst x0e20c000/mask=xffe0fc00 --status pass --comment "ext" :smull Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xc & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = TMPQ1 * TMPQ2 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; Rd_VPR128.8H[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; Rd_VPR128.8H[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; Rd_VPR128.8H[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; Rd_VPR128.8H[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; Rd_VPR128.8H[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; Rd_VPR128.8H[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; Rd_VPR128.8H[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.281 SMULL, SMULL2 (vector) page C7-2640 line 154037 MATCH x0e20c000/mask=xbf20fc00 # CONSTRUCT x4ea0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 =$*@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull2/2@4 # AUNIT --inst x4ea0c000/mask=xffe0fc00 --status pass --comment "ext" :smull2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xc & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Rn_VPR128 & Rm_VPR128 & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = sext(TMPD3[0,32]); TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix Rd_VPR128.2D = TMPQ2 * TMPQ4 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; Rd_VPR128.2D[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.281 SMULL, SMULL2 (vector) page C7-2640 line 154037 MATCH x0e20c000/mask=xbf20fc00 # CONSTRUCT x4e60c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 =$*@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull2/2@2 # AUNIT --inst x4e60c000/mask=xffe0fc00 --status pass --comment "ext" :smull2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xc & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Rn_VPR128 & Rm_VPR128 & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = sext(TMPD3[0,16]); TMPQ4[32,32] = sext(TMPD3[16,16]); TMPQ4[64,32] = sext(TMPD3[32,16]); TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 * TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; Rd_VPR128.4S[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; Rd_VPR128.4S[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; Rd_VPR128.4S[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.281 SMULL, SMULL2 (vector) page C7-2640 line 154037 MATCH x0e20c000/mask=xbf20fc00 # CONSTRUCT x4e20c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 =$*@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_smull2/2@1 # AUNIT --inst x4e20c000/mask=xffe0fc00 --status pass --comment "ext" :smull2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xc & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Rn_VPR128 & Rm_VPR128 & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = sext(TMPD1[0,8]); TMPQ2[16,16] = sext(TMPD1[8,8]); TMPQ2[32,16] = sext(TMPD1[16,8]); TMPQ2[48,16] = sext(TMPD1[24,8]); TMPQ2[64,16] = sext(TMPD1[32,8]); TMPQ2[80,16] = sext(TMPD1[40,8]); TMPQ2[96,16] = sext(TMPD1[48,8]); TMPQ2[112,16] = sext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = sext(TMPD3[0,8]); TMPQ4[16,16] = sext(TMPD3[8,8]); TMPQ4[32,16] = sext(TMPD3[16,8]); TMPQ4[48,16] = sext(TMPD3[24,8]); TMPQ4[64,16] = sext(TMPD3[32,8]); TMPQ4[80,16] = sext(TMPD3[40,8]); TMPQ4[96,16] = sext(TMPD3[48,8]); TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 * TMPQ4 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; Rd_VPR128.8H[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; Rd_VPR128.8H[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; Rd_VPR128.8H[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; Rd_VPR128.8H[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; Rd_VPR128.8H[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; Rd_VPR128.8H[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; Rd_VPR128.8H[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.282 SQABS page C7-2642 line 154155 MATCH x5e207800/mask=xff3ffc00 # CONSTRUCT x5e207800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =abs # SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1 # AUNIT --inst x5e207800/mask=xfffffc00 --status pass --comment "nointsat" # Scalar variant when size = 00 Q = 1 aa=1 suf=FPR8 # Note: in some implemented semantics that ignore saturation (where it # makes a difference), there is an error in about 50% of the lanes. :sqabs Rd_FPR8, Rn_FPR8 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_FPR8 & Rn_FPR8 & Zd { Rd_FPR8 = MP_INT_ABS(Rn_FPR8); zext_zb(Zd); # zero upper 31 bytes of Zd } # C7.2.282 SQABS page C7-2642 line 154155 MATCH x5e207800/mask=xff3ffc00 # CONSTRUCT x5e607800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =abs # SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1 # AUNIT --inst x5e607800/mask=xfffffc00 --status pass --comment "nointsat" # Scalar variant when size = 01 Q = 1 aa=1 suf=FPR16 :sqabs Rd_FPR16, Rn_FPR16 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_FPR16 & Rn_FPR16 & Zd { Rd_FPR16 = MP_INT_ABS(Rn_FPR16); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.282 SQABS page C7-2642 line 154155 MATCH x5e207800/mask=xff3ffc00 # CONSTRUCT x5ea07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =abs # SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1 # AUNIT --inst x5ea07800/mask=xfffffc00 --status pass --comment "nointsat" # Scalar variant when size = 10 Q = 1 aa=1 suf=FPR32 :sqabs Rd_FPR32, Rn_FPR32 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_FPR32 & Rn_FPR32 & Zd { Rd_FPR32 = MP_INT_ABS(Rn_FPR32); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.282 SQABS page C7-2642 line 154155 MATCH x5e207800/mask=xff3ffc00 # CONSTRUCT x5ee07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =abs # SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1 # AUNIT --inst x5ee07800/mask=xfffffc00 --status pass --comment "nointsat" # Scalar variant when size = 11 Q = 1 aa=1 suf=FPR64 :sqabs Rd_FPR64, Rn_FPR64 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_FPR64 & Rn_FPR64 & Zd { Rd_FPR64 = MP_INT_ABS(Rn_FPR64); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.282 SQABS page C7-2642 line 154155 MATCH x0e207800/mask=xbf3ffc00 # CONSTRUCT x0e207800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@1 # AUNIT --inst x0e207800/mask=xfffffc00 --status fail --comment "nointsat" # Vector variant when size = 00 Q = 0 aa=0 esize=1 suf=VPR64.8B :sqabs Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { # simd unary Rd_VPR64.8B = MP_INT_ABS(Rn_VPR64.8B) on lane size 1 Rd_VPR64.8B[0,8] = MP_INT_ABS(Rn_VPR64.8B[0,8]); Rd_VPR64.8B[8,8] = MP_INT_ABS(Rn_VPR64.8B[8,8]); Rd_VPR64.8B[16,8] = MP_INT_ABS(Rn_VPR64.8B[16,8]); Rd_VPR64.8B[24,8] = MP_INT_ABS(Rn_VPR64.8B[24,8]); Rd_VPR64.8B[32,8] = MP_INT_ABS(Rn_VPR64.8B[32,8]); Rd_VPR64.8B[40,8] = MP_INT_ABS(Rn_VPR64.8B[40,8]); Rd_VPR64.8B[48,8] = MP_INT_ABS(Rn_VPR64.8B[48,8]); Rd_VPR64.8B[56,8] = MP_INT_ABS(Rn_VPR64.8B[56,8]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.282 SQABS page C7-2642 line 154155 MATCH x0e207800/mask=xbf3ffc00 # CONSTRUCT x4e207800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@1 # AUNIT --inst x4e207800/mask=xfffffc00 --status fail --comment "nointsat" # Vector variant when size = 00 Q = 1 aa=0 esize=1 suf=VPR128.16B :sqabs Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { # simd unary Rd_VPR128.16B = MP_INT_ABS(Rn_VPR128.16B) on lane size 1 Rd_VPR128.16B[0,8] = MP_INT_ABS(Rn_VPR128.16B[0,8]); Rd_VPR128.16B[8,8] = MP_INT_ABS(Rn_VPR128.16B[8,8]); Rd_VPR128.16B[16,8] = MP_INT_ABS(Rn_VPR128.16B[16,8]); Rd_VPR128.16B[24,8] = MP_INT_ABS(Rn_VPR128.16B[24,8]); Rd_VPR128.16B[32,8] = MP_INT_ABS(Rn_VPR128.16B[32,8]); Rd_VPR128.16B[40,8] = MP_INT_ABS(Rn_VPR128.16B[40,8]); Rd_VPR128.16B[48,8] = MP_INT_ABS(Rn_VPR128.16B[48,8]); Rd_VPR128.16B[56,8] = MP_INT_ABS(Rn_VPR128.16B[56,8]); Rd_VPR128.16B[64,8] = MP_INT_ABS(Rn_VPR128.16B[64,8]); Rd_VPR128.16B[72,8] = MP_INT_ABS(Rn_VPR128.16B[72,8]); Rd_VPR128.16B[80,8] = MP_INT_ABS(Rn_VPR128.16B[80,8]); Rd_VPR128.16B[88,8] = MP_INT_ABS(Rn_VPR128.16B[88,8]); Rd_VPR128.16B[96,8] = MP_INT_ABS(Rn_VPR128.16B[96,8]); Rd_VPR128.16B[104,8] = MP_INT_ABS(Rn_VPR128.16B[104,8]); Rd_VPR128.16B[112,8] = MP_INT_ABS(Rn_VPR128.16B[112,8]); Rd_VPR128.16B[120,8] = MP_INT_ABS(Rn_VPR128.16B[120,8]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.282 SQABS page C7-2642 line 154155 MATCH x0e207800/mask=xbf3ffc00 # CONSTRUCT x0e607800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@2 # AUNIT --inst x0e607800/mask=xfffffc00 --status pass --comment "nointsat" # Vector variant when size = 01 Q = 0 aa=0 esize=2 suf=VPR64.4H :sqabs Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { # simd unary Rd_VPR64.4H = MP_INT_ABS(Rn_VPR64.4H) on lane size 2 Rd_VPR64.4H[0,16] = MP_INT_ABS(Rn_VPR64.4H[0,16]); Rd_VPR64.4H[16,16] = MP_INT_ABS(Rn_VPR64.4H[16,16]); Rd_VPR64.4H[32,16] = MP_INT_ABS(Rn_VPR64.4H[32,16]); Rd_VPR64.4H[48,16] = MP_INT_ABS(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.282 SQABS page C7-2642 line 154155 MATCH x0e207800/mask=xbf3ffc00 # CONSTRUCT x4e607800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@2 # AUNIT --inst x4e607800/mask=xfffffc00 --status pass --comment "nointsat" # Vector variant when size = 01 Q = 1 aa=0 esize=2 suf=VPR128.8H :sqabs Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { # simd unary Rd_VPR128.8H = MP_INT_ABS(Rn_VPR128.8H) on lane size 2 Rd_VPR128.8H[0,16] = MP_INT_ABS(Rn_VPR128.8H[0,16]); Rd_VPR128.8H[16,16] = MP_INT_ABS(Rn_VPR128.8H[16,16]); Rd_VPR128.8H[32,16] = MP_INT_ABS(Rn_VPR128.8H[32,16]); Rd_VPR128.8H[48,16] = MP_INT_ABS(Rn_VPR128.8H[48,16]); Rd_VPR128.8H[64,16] = MP_INT_ABS(Rn_VPR128.8H[64,16]); Rd_VPR128.8H[80,16] = MP_INT_ABS(Rn_VPR128.8H[80,16]); Rd_VPR128.8H[96,16] = MP_INT_ABS(Rn_VPR128.8H[96,16]); Rd_VPR128.8H[112,16] = MP_INT_ABS(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.282 SQABS page C7-2642 line 154155 MATCH x0e207800/mask=xbf3ffc00 # CONSTRUCT x0ea07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@4 # AUNIT --inst x0ea07800/mask=xfffffc00 --status pass --comment "nointsat" # Vector variant when size = 10 Q = 0 aa=0 esize=4 suf=VPR64.2S :sqabs Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { # simd unary Rd_VPR64.2S = MP_INT_ABS(Rn_VPR64.2S) on lane size 4 Rd_VPR64.2S[0,32] = MP_INT_ABS(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[32,32] = MP_INT_ABS(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.282 SQABS page C7-2642 line 154155 MATCH x0e207800/mask=xbf3ffc00 # CONSTRUCT x4ea07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@4 # AUNIT --inst x4ea07800/mask=xfffffc00 --status pass --comment "nointsat" # Vector variant when size = 10 Q = 1 aa=0 esize=4 suf=VPR128.4S :sqabs Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { # simd unary Rd_VPR128.4S = MP_INT_ABS(Rn_VPR128.4S) on lane size 4 Rd_VPR128.4S[0,32] = MP_INT_ABS(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[32,32] = MP_INT_ABS(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[64,32] = MP_INT_ABS(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[96,32] = MP_INT_ABS(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.282 SQABS page C7-2642 line 154155 MATCH x0e207800/mask=xbf3ffc00 # CONSTRUCT x4ee07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$abs@8 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqabs/1@8 # AUNIT --inst x4ee07800/mask=xfffffc00 --status pass --comment "nointsat" # Vector variant when size = 11 Q = 1 aa=0 esize=8 suf=VPR128.2D :sqabs Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { # simd unary Rd_VPR128.2D = MP_INT_ABS(Rn_VPR128.2D) on lane size 8 Rd_VPR128.2D[0,64] = MP_INT_ABS(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[64,64] = MP_INT_ABS(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.283 SQADD page C7-2644 line 154278 MATCH x5e200c00/mask=xff20fc00 # CONSTRUCT x5e200c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2 # AUNIT --inst x5e200c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqadd Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x1 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { Rd_FPR8 = NEON_sqadd(Rn_FPR8, Rm_FPR8); } # C7.2.283 SQADD page C7-2644 line 154278 MATCH x5e200c00/mask=xff20fc00 # CONSTRUCT x5ee00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2 # AUNIT --inst x5ee00c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqadd Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x1 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_sqadd(Rn_FPR64, Rm_FPR64); } # C7.2.283 SQADD page C7-2644 line 154278 MATCH x5e200c00/mask=xff20fc00 # CONSTRUCT x5e600c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2 # AUNIT --inst x5e600c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqadd Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x1 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_sqadd(Rn_FPR16, Rm_FPR16); } # C7.2.283 SQADD page C7-2644 line 154278 MATCH x5e200c00/mask=xff20fc00 # CONSTRUCT x5ea00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2 # AUNIT --inst x5ea00c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqadd Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x1 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_sqadd(Rn_FPR32, Rm_FPR32); } # C7.2.283 SQADD page C7-2644 line 154278 MATCH x0e200c00/mask=xbf20fc00 # CONSTRUCT x4e200c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@1 # AUNIT --inst x4e200c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x1 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sqadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.283 SQADD page C7-2644 line 154278 MATCH x0e200c00/mask=xbf20fc00 # CONSTRUCT x4ee00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@8 # AUNIT --inst x4ee00c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqadd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x1 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_sqadd(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.283 SQADD page C7-2644 line 154278 MATCH x0e200c00/mask=xbf20fc00 # CONSTRUCT x0ea00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@4 # AUNIT --inst x0ea00c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x1 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.283 SQADD page C7-2644 line 154278 MATCH x0e200c00/mask=xbf20fc00 # CONSTRUCT x0e600c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@2 # AUNIT --inst x0e600c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x1 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.283 SQADD page C7-2644 line 154278 MATCH x0e200c00/mask=xbf20fc00 # CONSTRUCT x4ea00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@4 # AUNIT --inst x4ea00c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x1 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.283 SQADD page C7-2644 line 154278 MATCH x0e200c00/mask=xbf20fc00 # CONSTRUCT x0e200c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@1 # AUNIT --inst x0e200c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x1 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sqadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.283 SQADD page C7-2644 line 154278 MATCH x0e200c00/mask=xbf20fc00 # CONSTRUCT x4e600c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqadd/2@2 # AUNIT --inst x4e600c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x1 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2646 line 154405 MATCH x5f003000/mask=xff00f400 # CONSTRUCT x5f803000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 * &=+/2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3 # AUNIT --inst x5f803000/mask=xffc0f400 --status fail --comment "nointsat" # scalar variant, size == 10 (always part == 0) :sqdmlal Rd_FPR64, Rn_FPR32, Re_VPR128.S.vIndex is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b0011 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rd_FPR64 & Rn_FPR32 & Zd { local tmp1:8 = sext(Rn_FPR32); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); local tmp4:8 = tmp1 * tmp3; local tmp5:8 = tmp4 * 2:8; Rd_FPR64 = Rd_FPR64 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2646 line 154405 MATCH x5f003000/mask=xff00f400 # CONSTRUCT x5f403000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 sext:4 ARG2 sext:4 * 2:4 * &=+ # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3 # AUNIT --inst x5f403000/mask=xffc0f400 --status fail --comment "nointsat" # scalar variant, size == 01 (always part == 0) :sqdmlal Rd_FPR32, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b0011 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rd_FPR32 & Rn_FPR16 & Zd { # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp1:2 = Re_VPR128Lo.H.vIndexHLM; local tmp2:4 = sext(tmp1); local tmp3:4 = sext(Rn_FPR16); local tmp4:4 = tmp2 * tmp3; local tmp5:4 = tmp4 * 2:4; Rd_FPR32 = Rd_FPR32 + tmp5; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2646 line 154405 MATCH x0f003000/mask=xbf00f400 # CONSTRUCT x0f803000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 $* 2:8 $* &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3@4 # AUNIT --inst x0f803000/mask=xffc0f400 --status fail --comment "ext nointsat" # vector variant, Q == 0, size == 10 :sqdmlal Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0011 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 TMPQ2[0,64] = TMPQ1[0,64] * tmp3; TMPQ2[64,64] = TMPQ1[64,64] * tmp3; # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 TMPQ3[0,64] = TMPQ2[0,64] * 2:8; TMPQ3[64,64] = TMPQ2[64,64] * 2:8; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2646 line 154405 MATCH x0f003000/mask=xbf00f400 # CONSTRUCT x0f403000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 $* 2:4 $* &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3@2 # AUNIT --inst x0f403000/mask=xffc0f400 --status fail --comment "ext nointsat" # vector variant, Q = 0, size == 01 :sqdmlal Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0011 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 TMPQ2[0,32] = TMPQ1[0,32] * tmp3; TMPQ2[32,32] = TMPQ1[32,32] * tmp3; TMPQ2[64,32] = TMPQ1[64,32] * tmp3; TMPQ2[96,32] = TMPQ1[96,32] * tmp3; # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 TMPQ3[0,32] = TMPQ2[0,32] * 2:4; TMPQ3[32,32] = TMPQ2[32,32] * 2:4; TMPQ3[64,32] = TMPQ2[64,32] * 2:4; TMPQ3[96,32] = TMPQ2[96,32] * 2:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2646 line 154405 MATCH x0f003000/mask=xbf00f400 # CONSTRUCT x4f803000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 sext:8 $* 2:8 $* &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal2/3@4 # AUNIT --inst x4f803000/mask=xffc0f400 --status fail --comment "ext nointsat" # vector variant, Q = 1, size == 10 :sqdmlal2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0011 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = sext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 TMPQ3[0,64] = TMPQ2[0,64] * tmp4; TMPQ3[64,64] = TMPQ2[64,64] * tmp4; # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 TMPQ4[0,64] = TMPQ3[0,64] * 2:8; TMPQ4[64,64] = TMPQ3[64,64] * 2:8; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ4[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.284 SQDMLAL, SQDMLAL2 (by element) page C7-2646 line 154405 MATCH x0f003000/mask=xbf00f400 # CONSTRUCT x4f403000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 sext:4 $* 2:4 $* &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal2/3@2 # AUNIT --inst x4f403000/mask=xffc0f400 --status fail --comment "ext nointsat" # vector variant, Q = 1, size == 01 :sqdmlal2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0011 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = sext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 TMPQ3[0,32] = TMPQ2[0,32] * tmp4; TMPQ3[32,32] = TMPQ2[32,32] * tmp4; TMPQ3[64,32] = TMPQ2[64,32] * tmp4; TMPQ3[96,32] = TMPQ2[96,32] * tmp4; # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 TMPQ4[0,32] = TMPQ3[0,32] * 2:4; TMPQ4[32,32] = TMPQ3[32,32] * 2:4; TMPQ4[64,32] = TMPQ3[64,32] * 2:4; TMPQ4[96,32] = TMPQ3[96,32] * 2:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2650 line 154623 MATCH x5e209000/mask=xff20fc00 # CONSTRUCT x5ea09000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 * &=+ # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3 # AUNIT --inst x5ea09000/mask=xffe0fc00 --status fail --comment "nointsat" # scalar variant, size == 10 (always part == 0) :sqdmlal Rd_FPR64, Rn_FPR32, Rm_FPR32 is b_2431=0b01011110 & b_2223=0b10 & b_21=1 & b_1015=0b100100 & Rd_FPR64 & Rn_FPR32 & Rm_FPR32 & Zd { local tmp1:8 = sext(Rn_FPR32); local tmp2:8 = sext(Rm_FPR32); local tmp3:8 = tmp1 * tmp2; local tmp4:8 = tmp3 * 2:8; Rd_FPR64 = Rd_FPR64 + tmp4; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2650 line 154623 MATCH x5e209000/mask=xff20fc00 # CONSTRUCT x5e609000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:4 ARG3 sext:4 * 2:4 * &=+ # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3 # AUNIT --inst x5e609000/mask=xffe0fc00 --status fail --comment "nointsat" # scalar variant, size == 01 (always part == 0) :sqdmlal Rd_FPR32, Rn_FPR16, Rm_FPR16 is b_2431=0b01011110 & b_2223=0b01 & b_21=1 & b_1015=0b100100 & Rd_FPR32 & Rn_FPR16 & Rm_FPR16 & Zd { local tmp1:4 = sext(Rn_FPR16); local tmp2:4 = sext(Rm_FPR16); local tmp3:4 = tmp1 * tmp2; local tmp4:4 = tmp3 * 2:4; Rd_FPR32 = Rd_FPR32 + tmp4; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2650 line 154623 MATCH x0e209000/mask=xbf20fc00 # CONSTRUCT x0ea09000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 $*@8 2:8 $* &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3@4 # AUNIT --inst x0ea09000/mask=xffe0fc00 --status fail --comment "ext nointsat" # vector variant, Q == 0, size == 10 :sqdmlal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b100100 & Rn_VPR64.2S & Rd_VPR128.2D & Rm_VPR64.2S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 TMPQ4[0,64] = TMPQ3[0,64] * 2:8; TMPQ4[64,64] = TMPQ3[64,64] * 2:8; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ4[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2650 line 154623 MATCH x0e209000/mask=xbf20fc00 # CONSTRUCT x0e609000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 $*@4 2:4 $* &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal/3@2 # AUNIT --inst x0e609000/mask=xffe0fc00 --status fail --comment "ext nointsat" # vector variant, Q = 0, size == 01 :sqdmlal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b100100 & Rn_VPR64.4H & Rd_VPR128.4S & Rm_VPR64.4H & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 TMPQ4[0,32] = TMPQ3[0,32] * 2:4; TMPQ4[32,32] = TMPQ3[32,32] * 2:4; TMPQ4[64,32] = TMPQ3[64,32] * 2:4; TMPQ4[96,32] = TMPQ3[96,32] * 2:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2650 line 154623 MATCH x0e209000/mask=xbf20fc00 # CONSTRUCT x4ea09000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $*@8 2:8 $* &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal2/3@4 # AUNIT --inst x4ea09000/mask=xffe0fc00 --status fail --comment "ext nointsat" # vector variant, Q = 1, size == 10 :sqdmlal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b100100 & Rn_VPR128.4S & Rd_VPR128.2D & Rm_VPR128.4S & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = sext(TMPD3[0,32]); TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix TMPQ6 = TMPQ5 * 2:8 on lane size 8 TMPQ6[0,64] = TMPQ5[0,64] * 2:8; TMPQ6[64,64] = TMPQ5[64,64] * 2:8; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ6 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ6[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ6[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.285 SQDMLAL, SQDMLAL2 (vector) page C7-2650 line 154623 MATCH x0e209000/mask=xbf20fc00 # CONSTRUCT x4e609000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $*@4 2:4 $* &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlal2/3@2 # AUNIT --inst x4e609000/mask=xffe0fc00 --status fail --comment "ext nointsat" # vector variant, Q = 1, size == 01 :sqdmlal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b100100 & Rn_VPR128.8H & Rd_VPR128.4S & Rm_VPR128.8H & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = sext(TMPD3[0,16]); TMPQ4[32,32] = sext(TMPD3[16,16]); TMPQ4[64,32] = sext(TMPD3[32,16]); TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix TMPQ6 = TMPQ5 * 2:4 on lane size 4 TMPQ6[0,32] = TMPQ5[0,32] * 2:4; TMPQ6[32,32] = TMPQ5[32,32] * 2:4; TMPQ6[64,32] = TMPQ5[64,32] * 2:4; TMPQ6[96,32] = TMPQ5[96,32] * 2:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ6 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ6[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ6[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ6[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ6[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2653 line 154796 MATCH x5f007000/mask=xff00f400 # CONSTRUCT x5f807000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 * &=- # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3 # AUNIT --inst x5f807000/mask=xffc0f400 --status fail --comment "nointsat" # scalar variant, size == 10 (always part == 0) :sqdmlsl Rd_FPR64, Rn_FPR32, Re_VPR128.S.vIndex is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b0111 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rd_FPR64 & Rn_FPR32 & Zd { local tmp1:8 = sext(Rn_FPR32); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); local tmp4:8 = tmp1 * tmp3; local tmp5:8 = tmp4 * 2:8; Rd_FPR64 = Rd_FPR64 - tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2653 line 154796 MATCH x5f007000/mask=xff00f400 # CONSTRUCT x5f407000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:4 ARG3 sext:4 * 2:4 * &=- # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3 # AUNIT --inst x5f407000/mask=xffc0f400 --status fail --comment "nointsat" # scalar variant, size == 01 (always part == 0) :sqdmlsl Rd_FPR32, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b0111 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rd_FPR32 & Rn_FPR16 & Zd { local tmp1:4 = sext(Rn_FPR16); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); local tmp4:4 = tmp1 * tmp3; local tmp5:4 = tmp4 * 2:4; Rd_FPR32 = Rd_FPR32 - tmp5; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2653 line 154796 MATCH x0f007000/mask=xbf00f400 # CONSTRUCT x0f807000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 $* 2:8 $* &=$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3@4 # AUNIT --inst x0f807000/mask=xffc0f400 --status fail --comment "ext nointsat" # vector variant, Q == 0, size == 10 :sqdmlsl Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0111 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 TMPQ2[0,64] = TMPQ1[0,64] * tmp3; TMPQ2[64,64] = TMPQ1[64,64] * tmp3; # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 TMPQ3[0,64] = TMPQ2[0,64] * 2:8; TMPQ3[64,64] = TMPQ2[64,64] * 2:8; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2653 line 154796 MATCH x0f007000/mask=xbf00f400 # CONSTRUCT x0f407000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 $* 2:4 $* &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3@2 # AUNIT --inst x0f407000/mask=xffc0f400 --status fail --comment "ext nointsat" # vector variant, Q = 0, size == 01 :sqdmlsl Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=0 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0111 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 TMPQ2[0,32] = TMPQ1[0,32] * tmp3; TMPQ2[32,32] = TMPQ1[32,32] * tmp3; TMPQ2[64,32] = TMPQ1[64,32] * tmp3; TMPQ2[96,32] = TMPQ1[96,32] * tmp3; # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 TMPQ3[0,32] = TMPQ2[0,32] * 2:4; TMPQ3[32,32] = TMPQ2[32,32] * 2:4; TMPQ3[64,32] = TMPQ2[64,32] * 2:4; TMPQ3[96,32] = TMPQ2[96,32] * 2:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2653 line 154796 MATCH x0f007000/mask=xbf00f400 # CONSTRUCT x4f807000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 sext:8 $* 2:8 $* &=$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl2/3@4 # AUNIT --inst x4f807000/mask=xffc0f400 --status fail --comment "ext nointsat" # vector variant, Q = 1, size == 10 :sqdmlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b10 & b_1215=0b0111 & b_10=0 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = sext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 TMPQ3[0,64] = TMPQ2[0,64] * tmp4; TMPQ3[64,64] = TMPQ2[64,64] * tmp4; # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 TMPQ4[0,64] = TMPQ3[0,64] * 2:8; TMPQ4[64,64] = TMPQ3[64,64] * 2:8; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ4 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ4[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.286 SQDMLSL, SQDMLSL2 (by element) page C7-2653 line 154796 MATCH x0f007000/mask=xbf00f400 # CONSTRUCT x4f407000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 sext:4 $* 2:4 $* &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl2/3@2 # AUNIT --inst x4f407000/mask=xffc0f400 --status fail --comment "ext nointsat" # vector variant, Q = 1, size == 01 :sqdmlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_31=0 & b_30=1 & b_2429=0b001111 & b_2223=0b01 & b_1215=0b0111 & b_10=0 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = sext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 TMPQ3[0,32] = TMPQ2[0,32] * tmp4; TMPQ3[32,32] = TMPQ2[32,32] * tmp4; TMPQ3[64,32] = TMPQ2[64,32] * tmp4; TMPQ3[96,32] = TMPQ2[96,32] * tmp4; # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 TMPQ4[0,32] = TMPQ3[0,32] * 2:4; TMPQ4[32,32] = TMPQ3[32,32] * 2:4; TMPQ4[64,32] = TMPQ3[64,32] * 2:4; TMPQ4[96,32] = TMPQ3[96,32] * 2:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ4[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ4[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ4[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2657 line 155015 MATCH x5e20b000/mask=xff20fc00 # CONSTRUCT x5ea0b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 * &=- # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3 # AUNIT --inst x5ea0b000/mask=xffe0fc00 --status fail --comment "nointsat" # scalar variant, size == 10 (always part == 0) :sqdmlsl Rd_FPR64, Rn_FPR32, Rm_FPR32 is b_2431=0b01011110 & b_2223=0b10 & b_21=1 & b_1015=0b101100 & Rd_FPR64 & Rn_FPR32 & Rm_FPR32 & Zd { local tmp1:8 = sext(Rn_FPR32); local tmp2:8 = sext(Rm_FPR32); local tmp3:8 = tmp1 * tmp2; local tmp4:8 = tmp3 * 2:8; Rd_FPR64 = Rd_FPR64 - tmp4; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2657 line 155015 MATCH x5e20b000/mask=xff20fc00 # CONSTRUCT x5e60b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:4 ARG3 sext:4 * 2:4 * &=- # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3 # AUNIT --inst x5e60b000/mask=xffe0fc00 --status fail --comment "nointsat" # scalar variant, size == 01 (always part == 0) :sqdmlsl Rd_FPR32, Rn_FPR16, Rm_FPR16 is b_2431=0b01011110 & b_2223=0b01 & b_21=1 & b_1015=0b101100 & Rd_FPR32 & Rn_FPR16 & Rm_FPR16 & Zd { local tmp1:4 = sext(Rn_FPR16); local tmp2:4 = sext(Rm_FPR16); local tmp3:4 = tmp1 * tmp2; local tmp4:4 = tmp3 * 2:4; Rd_FPR32 = Rd_FPR32 - tmp4; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2657 line 155015 MATCH x0e20b000/mask=xbf20fc00 # CONSTRUCT x0ea0b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 $*@8 2:8 $* &=$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3 # AUNIT --inst x0ea0b000/mask=xffe0fc00 --status fail --comment "ext nointsat" # vector variant, Q == 0, size == 10 :sqdmlsl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b101100 & Rn_VPR64.2S & Rd_VPR128.2D & Rm_VPR64.2S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; # simd infix TMPQ4 = TMPQ3 * 2:8 on lane size 8 TMPQ4[0,64] = TMPQ3[0,64] * 2:8; TMPQ4[64,64] = TMPQ3[64,64] * 2:8; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ4 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ4[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2657 line 155015 MATCH x0e20b000/mask=xbf20fc00 # CONSTRUCT x0e60b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 $*@4 2:4 $* &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3@2 # AUNIT --inst x0e60b000/mask=xffe0fc00 --status fail --comment "ext nointsat" # vector variant, Q = 0, size == 01 :sqdmlsl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b101100 & Rn_VPR64.4H & Rd_VPR128.4S & Rm_VPR64.4H & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; # simd infix TMPQ4 = TMPQ3 * 2:4 on lane size 4 TMPQ4[0,32] = TMPQ3[0,32] * 2:4; TMPQ4[32,32] = TMPQ3[32,32] * 2:4; TMPQ4[64,32] = TMPQ3[64,32] * 2:4; TMPQ4[96,32] = TMPQ3[96,32] * 2:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ4[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ4[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ4[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2657 line 155015 MATCH x0e20b000/mask=xbf20fc00 # CONSTRUCT x4ea0b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $*@8 2:8 $* &=$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl2/3@4 # AUNIT --inst x4ea0b000/mask=xffe0fc00 --status fail --comment "ext nointsat" # vector variant, Q = 1, size == 10 :sqdmlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_21=1 & b_1015=0b101100 & Rn_VPR128.4S & Rd_VPR128.2D & Rm_VPR128.4S & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = sext(TMPD3[0,32]); TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix TMPQ6 = TMPQ5 * 2:8 on lane size 8 TMPQ6[0,64] = TMPQ5[0,64] * 2:8; TMPQ6[64,64] = TMPQ5[64,64] * 2:8; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ6 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ6[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ6[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.287 SQDMLSL, SQDMLSL2 (vector) page C7-2657 line 155015 MATCH x0e20b000/mask=xbf20fc00 # CONSTRUCT x4e60b000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $*@4 2:4 $* &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl2/3@2 # AUNIT --inst x4e60b000/mask=xffe0fc00 --status fail --comment "ext nointsat" # vector variant, Q = 1, size == 01 :sqdmlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_21=1 & b_1015=0b101100 & Rn_VPR128.8H & Rd_VPR128.4S & Rm_VPR128.8H & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = sext(TMPD3[0,16]); TMPQ4[32,32] = sext(TMPD3[16,16]); TMPQ4[64,32] = sext(TMPD3[32,16]); TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix TMPQ6 = TMPQ5 * 2:4 on lane size 4 TMPQ6[0,32] = TMPQ5[0,32] * 2:4; TMPQ6[32,32] = TMPQ5[32,32] * 2:4; TMPQ6[64,32] = TMPQ5[64,32] * 2:4; TMPQ6[96,32] = TMPQ5[96,32] * 2:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ6 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ6[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ6[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ6[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ6[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.288 SQDMULH (by element) page C7-2660 line 155188 MATCH x0f00c000/mask=xbf00f400 # CONSTRUCT x0f80c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 $* 2:8 $* &=$shuffle@1-0@3-1:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_sqdmlsl/3@4 # AUNIT --inst x0f80c000/mask=xffc0f400 --status pass --comment "ext nointsat" :sqdmulh Rd_VPR64.2S, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xc & b_1010=0 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 TMPQ2[0,64] = TMPQ1[0,64] * tmp3; TMPQ2[64,64] = TMPQ1[64,64] * tmp3; # simd infix TMPQ3 = TMPQ2 * 2:8 on lane size 8 TMPQ3[0,64] = TMPQ2[0,64] * 2:8; TMPQ3[64,64] = TMPQ2[64,64] * 2:8; # simd shuffle Rd_VPR64.2S = TMPQ3 (@1-0@3-1) lane size 4 Rd_VPR64.2S[0,32] = TMPQ3[32,32]; Rd_VPR64.2S[32,32] = TMPQ3[96,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.288 SQDMULH (by element) page C7-2660 line 155188 MATCH x0f00c000/mask=xbf00f400 # CONSTRUCT x0f40c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 $* 2:4 $* &=$shuffle@1-0@3-1@5-2@7-3:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@2 # AUNIT --inst x0f40c000/mask=xffc0f400 --status pass --comment "ext nointsat" :sqdmulh Rd_VPR64.4H, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xc & b_1010=0 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 TMPQ2[0,32] = TMPQ1[0,32] * tmp3; TMPQ2[32,32] = TMPQ1[32,32] * tmp3; TMPQ2[64,32] = TMPQ1[64,32] * tmp3; TMPQ2[96,32] = TMPQ1[96,32] * tmp3; # simd infix TMPQ3 = TMPQ2 * 2:4 on lane size 4 TMPQ3[0,32] = TMPQ2[0,32] * 2:4; TMPQ3[32,32] = TMPQ2[32,32] * 2:4; TMPQ3[64,32] = TMPQ2[64,32] * 2:4; TMPQ3[96,32] = TMPQ2[96,32] * 2:4; # simd shuffle Rd_VPR64.4H = TMPQ3 (@1-0@3-1@5-2@7-3) lane size 2 Rd_VPR64.4H[0,16] = TMPQ3[16,16]; Rd_VPR64.4H[16,16] = TMPQ3[48,16]; Rd_VPR64.4H[32,16] = TMPQ3[80,16]; Rd_VPR64.4H[48,16] = TMPQ3[112,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.288 SQDMULH (by element) page C7-2660 line 155188 MATCH x0f00c000/mask=xbf00f400 # CONSTRUCT x4f80c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:32 ARG3 sext:8 $* 2:8 $* &=$shuffle@1-0@3-1@5-2@7-3:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@4 # AUNIT --inst x4f80c000/mask=xffc0f400 --status pass --comment "ext nointsat" :sqdmulh Rd_VPR128.4S, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xc & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd resize TMPZ1 = sext(Rn_VPR128.4S) (lane size 4 to 8) TMPZ1[0,64] = sext(Rn_VPR128.4S[0,32]); TMPZ1[64,64] = sext(Rn_VPR128.4S[32,32]); TMPZ1[128,64] = sext(Rn_VPR128.4S[64,32]); TMPZ1[192,64] = sext(Rn_VPR128.4S[96,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); # simd infix TMPZ2 = TMPZ1 * tmp3 on lane size 8 TMPZ2[0,64] = TMPZ1[0,64] * tmp3; TMPZ2[64,64] = TMPZ1[64,64] * tmp3; TMPZ2[128,64] = TMPZ1[128,64] * tmp3; TMPZ2[192,64] = TMPZ1[192,64] * tmp3; # simd infix TMPZ3 = TMPZ2 * 2:8 on lane size 8 TMPZ3[0,64] = TMPZ2[0,64] * 2:8; TMPZ3[64,64] = TMPZ2[64,64] * 2:8; TMPZ3[128,64] = TMPZ2[128,64] * 2:8; TMPZ3[192,64] = TMPZ2[192,64] * 2:8; # simd shuffle Rd_VPR128.4S = TMPZ3 (@1-0@3-1@5-2@7-3) lane size 4 Rd_VPR128.4S[0,32] = TMPZ3[32,32]; Rd_VPR128.4S[32,32] = TMPZ3[96,32]; Rd_VPR128.4S[64,32] = TMPZ3[160,32]; Rd_VPR128.4S[96,32] = TMPZ3[224,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.288 SQDMULH (by element) page C7-2660 line 155188 MATCH x0f00c000/mask=xbf00f400 # CONSTRUCT x4f40c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:32 ARG3 sext:4 $* 2:4 $* &=$shuffle@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@2 # AUNIT --inst x4f40c000/mask=xffc0f400 --status pass --comment "ext nointsat" :sqdmulh Rd_VPR128.8H, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xc & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd resize TMPZ1 = sext(Rn_VPR128.8H) (lane size 2 to 4) TMPZ1[0,32] = sext(Rn_VPR128.8H[0,16]); TMPZ1[32,32] = sext(Rn_VPR128.8H[16,16]); TMPZ1[64,32] = sext(Rn_VPR128.8H[32,16]); TMPZ1[96,32] = sext(Rn_VPR128.8H[48,16]); TMPZ1[128,32] = sext(Rn_VPR128.8H[64,16]); TMPZ1[160,32] = sext(Rn_VPR128.8H[80,16]); TMPZ1[192,32] = sext(Rn_VPR128.8H[96,16]); TMPZ1[224,32] = sext(Rn_VPR128.8H[112,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); # simd infix TMPZ2 = TMPZ1 * tmp3 on lane size 4 TMPZ2[0,32] = TMPZ1[0,32] * tmp3; TMPZ2[32,32] = TMPZ1[32,32] * tmp3; TMPZ2[64,32] = TMPZ1[64,32] * tmp3; TMPZ2[96,32] = TMPZ1[96,32] * tmp3; TMPZ2[128,32] = TMPZ1[128,32] * tmp3; TMPZ2[160,32] = TMPZ1[160,32] * tmp3; TMPZ2[192,32] = TMPZ1[192,32] * tmp3; TMPZ2[224,32] = TMPZ1[224,32] * tmp3; # simd infix TMPZ3 = TMPZ2 * 2:4 on lane size 4 TMPZ3[0,32] = TMPZ2[0,32] * 2:4; TMPZ3[32,32] = TMPZ2[32,32] * 2:4; TMPZ3[64,32] = TMPZ2[64,32] * 2:4; TMPZ3[96,32] = TMPZ2[96,32] * 2:4; TMPZ3[128,32] = TMPZ2[128,32] * 2:4; TMPZ3[160,32] = TMPZ2[160,32] * 2:4; TMPZ3[192,32] = TMPZ2[192,32] * 2:4; TMPZ3[224,32] = TMPZ2[224,32] * 2:4; # simd shuffle Rd_VPR128.8H = TMPZ3 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 2 Rd_VPR128.8H[0,16] = TMPZ3[16,16]; Rd_VPR128.8H[16,16] = TMPZ3[48,16]; Rd_VPR128.8H[32,16] = TMPZ3[80,16]; Rd_VPR128.8H[48,16] = TMPZ3[112,16]; Rd_VPR128.8H[64,16] = TMPZ3[144,16]; Rd_VPR128.8H[80,16] = TMPZ3[176,16]; Rd_VPR128.8H[96,16] = TMPZ3[208,16]; Rd_VPR128.8H[112,16] = TMPZ3[240,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.288 SQDMULH (by element) page C7-2660 line 155188 MATCH x5f00c000/mask=xff00f400 # CONSTRUCT x5f40c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:4 ARG3 sext:4 * 2:4 * 16:1 >>:4 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2 # AUNIT --inst x5f40c000/mask=xffc0f400 --status pass --comment "nointsat" # Scalar variant when size=01 suf=FPR16 elem=Re_VPR128Lo.H.vIndexHLM p1=Re_VPR128Lo.H p2=vIndexHLM :sqdmulh Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b1100 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { local tmp1:4 = sext(Rn_FPR16); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); local tmp4:4 = tmp1 * tmp3; local tmp5:4 = tmp4 * 2:4; local tmp6:4 = tmp5 >> 16:1; Rd_FPR16 = tmp6:2; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.288 SQDMULH (by element) page C7-2660 line 155188 MATCH x5f00c000/mask=xff00f400 # CONSTRUCT x5f80c000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 * 32:1 >>:8 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2 # AUNIT --inst x5f80c000/mask=xffc0f400 --status pass --comment "nointsat" # Scalar variant when size=10 suf=FPR32 elem=Re_VPR128.S.vIndex p1=Re_VPR128.S p2=vIndex :sqdmulh Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b1100 & b_10=0 & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd { local tmp1:8 = sext(Rn_FPR32); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); local tmp4:8 = tmp1 * tmp3; local tmp5:8 = tmp4 * 2:8; local tmp6:8 = tmp5 >> 32:1; Rd_FPR32 = tmp6:4; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.289 SQDMULH (vector) page C7-2663 line 155365 MATCH x5e20b400/mask=xff20fc00 # CONSTRUCT x5e60b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2 # AUNIT --inst x5e60b400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqdmulh Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x16 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_sqdmulh(Rn_FPR16, Rm_FPR16); } # C7.2.289 SQDMULH (vector) page C7-2663 line 155365 MATCH x5e20b400/mask=xff20fc00 # CONSTRUCT x5ea0b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2 # AUNIT --inst x5ea0b400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqdmulh Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x16 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_sqdmulh(Rn_FPR32, Rm_FPR32); } # C7.2.289 SQDMULH (vector) page C7-2663 line 155365 MATCH x0e20b400/mask=xbf20fc00 # CONSTRUCT x0ea0b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@4 # AUNIT --inst x0ea0b400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqdmulh Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x16 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqdmulh(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.289 SQDMULH (vector) page C7-2663 line 155365 MATCH x0e20b400/mask=xbf20fc00 # CONSTRUCT x0e60b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@2 # AUNIT --inst x0e60b400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqdmulh Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x16 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqdmulh(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.289 SQDMULH (vector) page C7-2663 line 155365 MATCH x0e20b400/mask=xbf20fc00 # CONSTRUCT x4ea0b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@4 # AUNIT --inst x4ea0b400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqdmulh Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x16 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqdmulh(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.289 SQDMULH (vector) page C7-2663 line 155365 MATCH x0e20b400/mask=xbf20fc00 # CONSTRUCT x4e60b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmulh/2@2 # AUNIT --inst x4e60b400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqdmulh Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x16 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqdmulh(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2665 line 155494 MATCH x0f00b000/mask=xbf00f400 # CONSTRUCT x0f80b000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 sext:8 $* 2:8 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2@4 # AUNIT --inst x0f80b000/mask=xffc0f400 --status pass --comment "ext nointsat" :sqdmull Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xb & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 TMPQ2[0,64] = TMPQ1[0,64] * tmp3; TMPQ2[64,64] = TMPQ1[64,64] * tmp3; # simd infix Rd_VPR128.2D = TMPQ2 * 2:8 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ2[0,64] * 2:8; Rd_VPR128.2D[64,64] = TMPQ2[64,64] * 2:8; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2665 line 155494 MATCH x0f00b000/mask=xbf00f400 # CONSTRUCT x4f80b000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 sext:8 $* 2:8 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull2/2@4 # AUNIT --inst x4f80b000/mask=xffc0f400 --status pass --comment "ext nointsat" :sqdmull2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xb & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = sext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 TMPQ3[0,64] = TMPQ2[0,64] * tmp4; TMPQ3[64,64] = TMPQ2[64,64] * tmp4; # simd infix Rd_VPR128.2D = TMPQ3 * 2:8 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ3[0,64] * 2:8; Rd_VPR128.2D[64,64] = TMPQ3[64,64] * 2:8; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2665 line 155494 MATCH x0f00b000/mask=xbf00f400 # CONSTRUCT x0f40b000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 sext:4 $* 2:4 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull2/2@2 # AUNIT --inst x0f40b000/mask=xffc0f400 --status pass --comment "ext nointsat" :sqdmull Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xb & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 TMPQ2[0,32] = TMPQ1[0,32] * tmp3; TMPQ2[32,32] = TMPQ1[32,32] * tmp3; TMPQ2[64,32] = TMPQ1[64,32] * tmp3; TMPQ2[96,32] = TMPQ1[96,32] * tmp3; # simd infix Rd_VPR128.4S = TMPQ2 * 2:4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ2[0,32] * 2:4; Rd_VPR128.4S[32,32] = TMPQ2[32,32] * 2:4; Rd_VPR128.4S[64,32] = TMPQ2[64,32] * 2:4; Rd_VPR128.4S[96,32] = TMPQ2[96,32] * 2:4; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2665 line 155494 MATCH x0f00b000/mask=xbf00f400 # CONSTRUCT x4f40b000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 sext:4 $* 2:4 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull2/2@2 # AUNIT --inst x4f40b000/mask=xffc0f400 --status pass --comment "ext nointsat" :sqdmull2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=0 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xb & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = sext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 TMPQ3[0,32] = TMPQ2[0,32] * tmp4; TMPQ3[32,32] = TMPQ2[32,32] * tmp4; TMPQ3[64,32] = TMPQ2[64,32] * tmp4; TMPQ3[96,32] = TMPQ2[96,32] * tmp4; # simd infix Rd_VPR128.4S = TMPQ3 * 2:4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ3[0,32] * 2:4; Rd_VPR128.4S[32,32] = TMPQ3[32,32] * 2:4; Rd_VPR128.4S[64,32] = TMPQ3[64,32] * 2:4; Rd_VPR128.4S[96,32] = TMPQ3[96,32] * 2:4; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2665 line 155494 MATCH x5f00b000/mask=xff00f400 # CONSTRUCT x5f40b000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:4 ARG3 sext:4 * 2:4 =* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2 # AUNIT --inst x5f40b000/mask=xffc0f400 --status pass --comment "nointsat" # Scalar variant when size=01 Va=FPR32 Vb=FPR16 elem=Re_VPR128Lo.H.vIndexHLM p1=Re_VPR128Lo.H p2=vIndexHLM :sqdmull Rd_FPR32, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b1011 & b_10=0 & Rd_FPR32 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { local tmp1:4 = sext(Rn_FPR16); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); local tmp4:4 = tmp1 * tmp3; Rd_FPR32 = tmp4 * 2:4; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.290 SQDMULL, SQDMULL2 (by element) page C7-2665 line 155494 MATCH x5f00b000/mask=xff00f400 # CONSTRUCT x5f80b000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 =* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2 # AUNIT --inst x5f80b000/mask=xffc0f400 --status pass --comment "nointsat" # Scalar variant when size=10 Va=FPR64 Vb=FPR32 elem=Re_VPR128.S.vIndex p1=Re_VPR128.S p2=vIndex :sqdmull Rd_FPR64, Rn_FPR32, Re_VPR128.S.vIndex is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b1011 & b_10=0 & Rd_FPR64 & Rn_FPR32 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd { local tmp1:8 = sext(Rn_FPR32); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); local tmp4:8 = tmp1 * tmp3; Rd_FPR64 = tmp4 * 2:8; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2668 line 155694 MATCH x0e20d000/mask=xbf20fc00 # CONSTRUCT x4ea0d000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 $*@8 2:8 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull2/2@4 # AUNIT --inst x4ea0d000/mask=xffe0fc00 --status pass --comment "ext nointsat" :sqdmull2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xd & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = sext(TMPD3[0,32]); TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix Rd_VPR128.2D = TMPQ5 * 2:8 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ5[0,64] * 2:8; Rd_VPR128.2D[64,64] = TMPQ5[64,64] * 2:8; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2668 line 155694 MATCH x0e20d000/mask=xbf20fc00 # CONSTRUCT x4e60d000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 $*@4 2:4 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull2/2@2 # AUNIT --inst x4e60d000/mask=xffe0fc00 --status pass --comment "ext nointsat" :sqdmull2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xd & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = sext(TMPD3[0,16]); TMPQ4[32,32] = sext(TMPD3[16,16]); TMPQ4[64,32] = sext(TMPD3[32,16]); TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix Rd_VPR128.4S = TMPQ5 * 2:4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ5[0,32] * 2:4; Rd_VPR128.4S[32,32] = TMPQ5[32,32] * 2:4; Rd_VPR128.4S[64,32] = TMPQ5[64,32] * 2:4; Rd_VPR128.4S[96,32] = TMPQ5[96,32] * 2:4; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2668 line 155694 MATCH x0e20d000/mask=xbf20fc00 # CONSTRUCT x0ea0d000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2@4 # AUNIT --inst x0ea0d000/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqdmull Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xd & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_sqdmull(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2668 line 155694 MATCH x0e20d000/mask=xbf20fc00 # CONSTRUCT x0e60d000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2@2 # AUNIT --inst x0e60d000/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqdmull Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xd & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqdmull(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2668 line 155694 MATCH x5e20d000/mask=xff20fc00 # CONSTRUCT x5e60d000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:4 ARG3 sext:4 * 2:4 =* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2 # AUNIT --inst x5e60d000/mask=xffe0fc00 --status pass --comment "nointsat" # Scalar variant when size=01 Va=FPR32 Vb=FPR16 :sqdmull Rd_FPR32, Rn_FPR16, Rm_FPR16 is b_2431=0b01011110 & b_2223=0b01 & b_21=1 & b_1015=0b110100 & Rd_FPR32 & Rn_FPR16 & Rm_FPR16 & Zd { local tmp1:4 = sext(Rn_FPR16); local tmp2:4 = sext(Rm_FPR16); local tmp3:4 = tmp1 * tmp2; Rd_FPR32 = tmp3 * 2:4; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.291 SQDMULL, SQDMULL2 (vector) page C7-2668 line 155694 MATCH x5e20d000/mask=xff20fc00 # CONSTRUCT x5ea0d000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 =* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqdmull/2 # AUNIT --inst x5ea0d000/mask=xffe0fc00 --status pass --comment "nointsat" # Scalar variant when size=10 Va=FPR64 Vb=FPR32 :sqdmull Rd_FPR64, Rn_FPR32, Rm_FPR32 is b_2431=0b01011110 & b_2223=0b10 & b_21=1 & b_1015=0b110100 & Rd_FPR64 & Rn_FPR32 & Rm_FPR32 & Zd { local tmp1:8 = sext(Rn_FPR32); local tmp2:8 = sext(Rm_FPR32); local tmp3:8 = tmp1 * tmp2; Rd_FPR64 = tmp3 * 2:8; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.292 SQNEG page C7-2671 line 155857 MATCH x7e207800/mask=xff3ffc00 # CONSTRUCT x7e207800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =2comp # SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1 # AUNIT --inst x7e207800/mask=xfffffc00 --status pass --comment "nointsat" # Scalar variant when size=00 Q=1 aa=1 suf=FPR8 :sqneg Rd_FPR8, Rn_FPR8 is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_FPR8 & Rn_FPR8 & Zd { Rd_FPR8 = - Rn_FPR8; zext_zb(Zd); # zero upper 31 bytes of Zd } # C7.2.292 SQNEG page C7-2671 line 155857 MATCH x7e207800/mask=xff3ffc00 # CONSTRUCT x7e607800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =2comp # SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1 # AUNIT --inst x7e607800/mask=xfffffc00 --status pass --comment "nointsat" # Scalar variant when size=01 Q=1 aa=1 suf=FPR16 :sqneg Rd_FPR16, Rn_FPR16 is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_FPR16 & Rn_FPR16 & Zd { Rd_FPR16 = - Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.292 SQNEG page C7-2671 line 155857 MATCH x7e207800/mask=xff3ffc00 # CONSTRUCT x7ea07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =2comp # SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1 # AUNIT --inst x7ea07800/mask=xfffffc00 --status pass --comment "nointsat" # Scalar variant when size=10 Q=1 aa=1 suf=FPR32 :sqneg Rd_FPR32, Rn_FPR32 is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_FPR32 & Rn_FPR32 & Zd { Rd_FPR32 = - Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.292 SQNEG page C7-2671 line 155857 MATCH x7e207800/mask=xff3ffc00 # CONSTRUCT x7ee07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =2comp # SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1 # AUNIT --inst x7ee07800/mask=xfffffc00 --status pass --comment "nointsat" # Scalar variant when size=11 Q=1 aa=1 suf=FPR64 :sqneg Rd_FPR64, Rn_FPR64 is b_31=0 & Q=1 & b_2429=0b111110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_FPR64 & Rn_FPR64 & Zd { Rd_FPR64 = - Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.292 SQNEG page C7-2671 line 155857 MATCH x2e207800/mask=xbf3ffc00 # CONSTRUCT x2e207800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$2comp@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@1 # AUNIT --inst x2e207800/mask=xfffffc00 --status fail --comment "nointsat" # Vector variant when when size = 00 , Q = 0 aa=0 esize=1 suf=VPR64.8B :sqneg Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & Q=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { # simd unary Rd_VPR64.8B = -(Rn_VPR64.8B) on lane size 1 Rd_VPR64.8B[0,8] = -(Rn_VPR64.8B[0,8]); Rd_VPR64.8B[8,8] = -(Rn_VPR64.8B[8,8]); Rd_VPR64.8B[16,8] = -(Rn_VPR64.8B[16,8]); Rd_VPR64.8B[24,8] = -(Rn_VPR64.8B[24,8]); Rd_VPR64.8B[32,8] = -(Rn_VPR64.8B[32,8]); Rd_VPR64.8B[40,8] = -(Rn_VPR64.8B[40,8]); Rd_VPR64.8B[48,8] = -(Rn_VPR64.8B[48,8]); Rd_VPR64.8B[56,8] = -(Rn_VPR64.8B[56,8]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.292 SQNEG page C7-2671 line 155857 MATCH x2e207800/mask=xbf3ffc00 # CONSTRUCT x6e207800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$2comp@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@1 # AUNIT --inst x6e207800/mask=xfffffc00 --status fail --comment "nointsat" # Vector variant when when size = 00 , Q = 1 aa=0 esize=1 suf=VPR128.16B :sqneg Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000011110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { # simd unary Rd_VPR128.16B = -(Rn_VPR128.16B) on lane size 1 Rd_VPR128.16B[0,8] = -(Rn_VPR128.16B[0,8]); Rd_VPR128.16B[8,8] = -(Rn_VPR128.16B[8,8]); Rd_VPR128.16B[16,8] = -(Rn_VPR128.16B[16,8]); Rd_VPR128.16B[24,8] = -(Rn_VPR128.16B[24,8]); Rd_VPR128.16B[32,8] = -(Rn_VPR128.16B[32,8]); Rd_VPR128.16B[40,8] = -(Rn_VPR128.16B[40,8]); Rd_VPR128.16B[48,8] = -(Rn_VPR128.16B[48,8]); Rd_VPR128.16B[56,8] = -(Rn_VPR128.16B[56,8]); Rd_VPR128.16B[64,8] = -(Rn_VPR128.16B[64,8]); Rd_VPR128.16B[72,8] = -(Rn_VPR128.16B[72,8]); Rd_VPR128.16B[80,8] = -(Rn_VPR128.16B[80,8]); Rd_VPR128.16B[88,8] = -(Rn_VPR128.16B[88,8]); Rd_VPR128.16B[96,8] = -(Rn_VPR128.16B[96,8]); Rd_VPR128.16B[104,8] = -(Rn_VPR128.16B[104,8]); Rd_VPR128.16B[112,8] = -(Rn_VPR128.16B[112,8]); Rd_VPR128.16B[120,8] = -(Rn_VPR128.16B[120,8]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.292 SQNEG page C7-2671 line 155857 MATCH x2e207800/mask=xbf3ffc00 # CONSTRUCT x2e607800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$2comp@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@2 # AUNIT --inst x2e607800/mask=xfffffc00 --status pass --comment "nointsat" # Vector variant when when size = 01 , Q = 0 aa=0 esize=2 suf=VPR64.4H :sqneg Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & Q=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { # simd unary Rd_VPR64.4H = -(Rn_VPR64.4H) on lane size 2 Rd_VPR64.4H[0,16] = -(Rn_VPR64.4H[0,16]); Rd_VPR64.4H[16,16] = -(Rn_VPR64.4H[16,16]); Rd_VPR64.4H[32,16] = -(Rn_VPR64.4H[32,16]); Rd_VPR64.4H[48,16] = -(Rn_VPR64.4H[48,16]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.292 SQNEG page C7-2671 line 155857 MATCH x2e207800/mask=xbf3ffc00 # CONSTRUCT x6e607800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$2comp@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@2 # AUNIT --inst x6e607800/mask=xfffffc00 --status pass --comment "nointsat" # Vector variant when when size = 01 , Q = 1 aa=0 esize=2 suf=VPR128.8H :sqneg Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000011110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { # simd unary Rd_VPR128.8H = -(Rn_VPR128.8H) on lane size 2 Rd_VPR128.8H[0,16] = -(Rn_VPR128.8H[0,16]); Rd_VPR128.8H[16,16] = -(Rn_VPR128.8H[16,16]); Rd_VPR128.8H[32,16] = -(Rn_VPR128.8H[32,16]); Rd_VPR128.8H[48,16] = -(Rn_VPR128.8H[48,16]); Rd_VPR128.8H[64,16] = -(Rn_VPR128.8H[64,16]); Rd_VPR128.8H[80,16] = -(Rn_VPR128.8H[80,16]); Rd_VPR128.8H[96,16] = -(Rn_VPR128.8H[96,16]); Rd_VPR128.8H[112,16] = -(Rn_VPR128.8H[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.292 SQNEG page C7-2671 line 155857 MATCH x2e207800/mask=xbf3ffc00 # CONSTRUCT x2ea07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$2comp@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@4 # AUNIT --inst x2ea07800/mask=xfffffc00 --status pass --comment "nointsat" # Vector variant when when size = 10 , Q = 0 aa=0 esize=4 suf=VPR64.2S :sqneg Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & Q=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { # simd unary Rd_VPR64.2S = -(Rn_VPR64.2S) on lane size 4 Rd_VPR64.2S[0,32] = -(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[32,32] = -(Rn_VPR64.2S[32,32]); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.292 SQNEG page C7-2671 line 155857 MATCH x2e207800/mask=xbf3ffc00 # CONSTRUCT x6ea07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$2comp@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@4 # AUNIT --inst x6ea07800/mask=xfffffc00 --status pass --comment "nointsat" # Vector variant when when size = 10 , Q = 1 aa=0 esize=4 suf=VPR128.4S :sqneg Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000011110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { # simd unary Rd_VPR128.4S = -(Rn_VPR128.4S) on lane size 4 Rd_VPR128.4S[0,32] = -(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[32,32] = -(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[64,32] = -(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[96,32] = -(Rn_VPR128.4S[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.292 SQNEG page C7-2671 line 155857 MATCH x2e207800/mask=xbf3ffc00 # CONSTRUCT x6ee07800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =$2comp@8 # SMACRO(pseudo) ARG1 ARG2 =NEON_sqneg/1@8 # AUNIT --inst x6ee07800/mask=xfffffc00 --status pass --comment "nointsat" # Vector variant when when size = 11 , Q = 1 aa=0 esize=8 suf=VPR128.2D :sqneg Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & Q=1 & b_2429=0b101110 & b_2223=0b11 & b_1021=0b100000011110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { # simd unary Rd_VPR128.2D = -(Rn_VPR128.2D) on lane size 8 Rd_VPR128.2D[0,64] = -(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[64,64] = -(Rn_VPR128.2D[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.277 SQRDMLAH (by element) page C7-1598 line 92254 KEEPWITH # Integer saturating instruction (not implemented) sqrdml_subop: "ah" is b_24=0 & b_11=0 { export 0:1; } sqrdml_subop: "ah" is b_24=1 & b_13=0 { export 0:1; } sqrdml_subop: "sh" is b_24=0 & b_11=1 { export 1:1; } sqrdml_subop: "sh" is b_24=1 & b_13=1 { export 1:1; } sqrdml_esize: "h" is b_2223=0b01 { export 16:1; } sqrdml_esize: "s" is b_2223=0b10 { export 32:1; } sqrdml_elements: "4h" is b_2223=0b01 & b_30=0 { export 4:1; } sqrdml_elements: "8h" is b_2223=0b01 & b_30=1 { export 8:1; } sqrdml_elements: "2s" is b_2223=0b10 & b_30=0 { export 2:1; } sqrdml_elements: "4s" is b_2223=0b10 & b_30=1 { export 4:1; } sqrdml_index: val is b_2223=0b01 & b_21 & b_20 & b_11 [ val = b_11 * 4 + b_21 * 2 + b_20; ] { export * [const]:1 val; } sqrdml_index: val is b_2223=0b10 & b_21 & b_11 [ val = b_11 * 2 + b_21; ] { export * [const]:1 val; } # We could be more specific about the size of the register, which # depends on the variant and Q (b_30). For now, I've just made them # all 128 bits. sqrdml_vd: Rd_FPR16 is b_28=1 & b_2223=0b01 & Rd_FPR16 & Rd_VPR128 { export Rd_VPR128; } sqrdml_vd: Rd_FPR32 is b_28=1 & b_2223=0b10 & Rd_FPR32 & Rd_VPR128 { export Rd_VPR128; } sqrdml_vd: vRd_VPR128^"."^sqrdml_elements is b_28=0 & vRd_VPR128 & Rd_VPR128 & sqrdml_elements { export Rd_VPR128; } sqrdml_vn: Rn_FPR16 is b_28=1 & b_2223=0b01 & Rn_FPR16 & Rn_VPR128 { export Rn_VPR128; } sqrdml_vn: Rn_FPR32 is b_28=1 & b_2223=0b10 & Rn_FPR32 & Rn_VPR128 { export Rn_VPR128; } sqrdml_vn: vRn_VPR128^"."^sqrdml_elements is b_28=0 & vRn_VPR128 & Rn_VPR128 & sqrdml_elements { export Rn_VPR128; } # Decode Vm (in some cases) depending on size # cases 34.1, 36.1 sqrdml_vm: Rm_FPR16 is b_28=1 & b_24=0 & b_2223=0b01 & Rm_FPR16 & Rm_VPR128 { export Rm_VPR128; } sqrdml_vm: Rm_FPR32 is b_28=1 & b_24=0 & b_2223=0b10 & Rm_FPR32 & Rm_VPR128 { export Rm_VPR128; } # cases 34.2, 36.2 sqrdml_vm: vRm_VPR128^"."^sqrdml_elements is b_28=0 & b_24=0 & vRm_VPR128 & sqrdml_elements & Rm_VPR128 { export Rm_VPR128; } sqrdml_vmlo: vRm_VPR128Lo is b_2223=0b01 & vRm_VPR128Lo & Rm_VPR128Lo { export Rm_VPR128Lo; } sqrdml_vmlo: vRm_VPR128 is b_2223=0b10 & vRm_VPR128 & Rm_VPR128 { export Rm_VPR128; } # cases 33, 35 sqrdml_vm: sqrdml_vmlo^"."^sqrdml_esize[sqrdml_index] is b_24=1 & sqrdml_vmlo & sqrdml_esize & sqrdml_index { export sqrdml_vmlo; } # SQRDML(Vd, Vn, Vm, esize, elements, subop[, index]) # # performs the SQRDML operation # # Vd[e] = SignedSatQ(Vd[e]<>:4 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2 # AUNIT --inst x5f40d000/mask=xffc0f400 --status fail --comment "nointround nointsat" # Scalar variant when size=01 suf=FPR16 elem elem=Re_VPR128Lo.H.vIndexHLM p1=Re_VPR128Lo.H p2=vIndexHLM :sqrdmulh Rd_FPR16, Rn_FPR16, Re_VPR128Lo.H.vIndexHLM is b_2431=0b01011111 & b_2223=0b01 & b_1215=0b1101 & b_10=0 & Rd_FPR16 & Rn_FPR16 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & Zd { local tmp1:4 = sext(Rn_FPR16); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = sext(tmp2); local tmp4:4 = tmp1 * tmp3; local tmp5:4 = tmp4 * 2:4; local tmp6:4 = tmp5 >> 16:4; Rd_FPR16 = tmp6:2; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.297 SQRDMULH (by element) page C7-2685 line 156680 MATCH x5f00d000/mask=xff00f400 # CONSTRUCT x5f80d000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 sext:8 ARG3 sext:8 * 2:8 * 32:8 >>:8 = # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2 # AUNIT --inst x5f80d000/mask=xffc0f400 --status fail --comment "nointround nointsat" # Scalar variant when size=10 suf=FPR32 elem=Re_VPR128.S.vIndex p1=Re_VPR128.S p2=vIndex :sqrdmulh Rd_FPR32, Rn_FPR32, Re_VPR128.S.vIndex is b_2431=0b01011111 & b_2223=0b10 & b_1215=0b1101 & b_10=0 & Rd_FPR32 & Rn_FPR32 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & Zd { local tmp1:8 = sext(Rn_FPR32); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = sext(tmp2); local tmp4:8 = tmp1 * tmp3; local tmp5:8 = tmp4 * 2:8; local tmp6:8 = tmp5 >> 32:8; Rd_FPR32 = tmp6:4; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.298 SQRDMULH (vector) page C7-2688 line 156859 MATCH x7e20b400/mask=xff20fc00 # CONSTRUCT x7e60b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2 # AUNIT --inst x7e60b400/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrdmulh Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x16 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_sqrdmulh(Rn_FPR16, Rm_FPR16); } # C7.2.298 SQRDMULH (vector) page C7-2688 line 156859 MATCH x7e20b400/mask=xff20fc00 # CONSTRUCT x7ea0b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2 # AUNIT --inst x7ea0b400/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrdmulh Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x16 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_sqrdmulh(Rn_FPR32, Rm_FPR32); } # C7.2.298 SQRDMULH (vector) page C7-2688 line 156859 MATCH x2e20b400/mask=xbf20fc00 # CONSTRUCT x2ea0b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2@4 # AUNIT --inst x2ea0b400/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrdmulh Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x16 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqrdmulh(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.298 SQRDMULH (vector) page C7-2688 line 156859 MATCH x2e20b400/mask=xbf20fc00 # CONSTRUCT x2e60b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2@2 # AUNIT --inst x2e60b400/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrdmulh Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x16 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqrdmulh(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.298 SQRDMULH (vector) page C7-2688 line 156859 MATCH x2e20b400/mask=xbf20fc00 # CONSTRUCT x6ea0b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2@4 # AUNIT --inst x6ea0b400/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrdmulh Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x16 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqrdmulh(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.298 SQRDMULH (vector) page C7-2688 line 156859 MATCH x2e20b400/mask=xbf20fc00 # CONSTRUCT x6e60b400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrdmulh/2@2 # AUNIT --inst x6e60b400/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrdmulh Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x16 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqrdmulh(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.299 SQRSHL page C7-2690 line 156988 MATCH x5e205c00/mask=xff20fc00 # CONSTRUCT x5e205c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2 # AUNIT --inst x5e205c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshl Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0xb & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { Rd_FPR8 = NEON_sqrshl(Rn_FPR8, Rm_FPR8); } # C7.2.299 SQRSHL page C7-2690 line 156988 MATCH x5e205c00/mask=xff20fc00 # CONSTRUCT x5ee05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2 # AUNIT --inst x5ee05c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0xb & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_sqrshl(Rn_FPR64, Rm_FPR64); } # C7.2.299 SQRSHL page C7-2690 line 156988 MATCH x5e205c00/mask=xff20fc00 # CONSTRUCT x5e605c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2 # AUNIT --inst x5e605c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshl Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0xb & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_sqrshl(Rn_FPR16, Rm_FPR16); } # C7.2.299 SQRSHL page C7-2690 line 156988 MATCH x5e205c00/mask=xff20fc00 # CONSTRUCT x5ea05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2 # AUNIT --inst x5ea05c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshl Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0xb & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_sqrshl(Rn_FPR32, Rm_FPR32); } # C7.2.299 SQRSHL page C7-2690 line 156988 MATCH x0e205c00/mask=xbf20fc00 # CONSTRUCT x4e205c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@1 # AUNIT --inst x4e205c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xb & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sqrshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.299 SQRSHL page C7-2690 line 156988 MATCH x0e205c00/mask=xbf20fc00 # CONSTRUCT x4ee05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@8 # AUNIT --inst x4ee05c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0xb & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_sqrshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.299 SQRSHL page C7-2690 line 156988 MATCH x0e205c00/mask=xbf20fc00 # CONSTRUCT x0ea05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@4 # AUNIT --inst x0ea05c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xb & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqrshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.299 SQRSHL page C7-2690 line 156988 MATCH x0e205c00/mask=xbf20fc00 # CONSTRUCT x0e605c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@2 # AUNIT --inst x0e605c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xb & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqrshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.299 SQRSHL page C7-2690 line 156988 MATCH x0e205c00/mask=xbf20fc00 # CONSTRUCT x4ea05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@4 # AUNIT --inst x4ea05c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xb & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqrshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.299 SQRSHL page C7-2690 line 156988 MATCH x0e205c00/mask=xbf20fc00 # CONSTRUCT x0e205c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@1 # AUNIT --inst x0e205c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xb & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sqrshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.299 SQRSHL page C7-2690 line 156988 MATCH x0e205c00/mask=xbf20fc00 # CONSTRUCT x4e605c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqrshl/2@2 # AUNIT --inst x4e605c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xb & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqrshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2692 line 157131 MATCH x0f009c00/mask=xbf80fc00 # CONSTRUCT x4f089c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn2/3@2 # AUNIT --inst x4f089c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" :sqrshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sqrshrn2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2692 line 157131 MATCH x0f009c00/mask=xbf80fc00 # CONSTRUCT x0f209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn/3@8 # AUNIT --inst x0f209c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqrshrn(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2692 line 157131 MATCH x0f009c00/mask=xbf80fc00 # CONSTRUCT x0f109c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn/3@4 # AUNIT --inst x0f109c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqrshrn(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2692 line 157131 MATCH x0f009c00/mask=xbf80fc00 # CONSTRUCT x4f209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn2/3@8 # AUNIT --inst x4f209c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqrshrn2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2692 line 157131 MATCH x0f009c00/mask=xbf80fc00 # CONSTRUCT x0f089c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn2/3@2 # AUNIT --inst x0f089c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" :sqrshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sqrshrn2(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2692 line 157131 MATCH x0f009c00/mask=xbf80fc00 # CONSTRUCT x4f109c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn2/3@4 # AUNIT --inst x4f109c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqrshrn2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2692 line 157131 MATCH x5f009c00/mask=xff80fc00 # CONSTRUCT x5f089c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn/3 # AUNIT --inst x5f089c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" # Scalar variant when immh=0001 Va=FPR16 Vb=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 :sqrshrn Rd_FPR8, Rn_FPR16, Imm_shr_imm8 is b_2331=0b010111110 & b_1922=0b0001 & b_1015=0b100111 & Rd_FPR8 & Rn_FPR16 & Imm_shr_imm8 & Zd { Rd_FPR8 = NEON_sqrshrn(Rd_FPR8, Rn_FPR16, Imm_shr_imm8:1); } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2692 line 157131 MATCH x5f009c00/mask=xff80fc00 # CONSTRUCT x5f109c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn/3 # AUNIT --inst x5f109c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" # Scalar variant when immh=001x Va=FPR32 Vb=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 :sqrshrn Rd_FPR16, Rn_FPR32, Imm_shr_imm16 is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b100111 & Rd_FPR16 & Rn_FPR32 & Imm_shr_imm16 & Zd { Rd_FPR16 = NEON_sqrshrn(Rd_FPR16, Rn_FPR32, Imm_shr_imm16:1); } # C7.2.300 SQRSHRN, SQRSHRN2 page C7-2692 line 157131 MATCH x5f009c00/mask=xff80fc00 # CONSTRUCT x5f209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrn/3 # AUNIT --inst x5f209c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" # Scalar variant when immh=01xx Va=FPR64 Vb=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 :sqrshrn Rd_FPR32, Rn_FPR64, Imm_shr_imm32 is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b100111 & Rd_FPR32 & Rn_FPR64 & Imm_shr_imm32 & Zd { Rd_FPR32 = NEON_sqrshrn(Rd_FPR32, Rn_FPR64, Imm_shr_imm32:1); } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2695 line 157316 MATCH x2f008c00/mask=xbf80fc00 # CONSTRUCT x6f088c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun2/3@2 # AUNIT --inst x6f088c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" :sqrshrun2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sqrshrun2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2695 line 157316 MATCH x2f008c00/mask=xbf80fc00 # CONSTRUCT x2f208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun/3@8 # AUNIT --inst x2f208c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshrun Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqrshrun(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2695 line 157316 MATCH x2f008c00/mask=xbf80fc00 # CONSTRUCT x2f108c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun/3@4 # AUNIT --inst x2f108c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshrun Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqrshrun(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2695 line 157316 MATCH x2f008c00/mask=xbf80fc00 # CONSTRUCT x6f208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun2/3@8 # AUNIT --inst x6f208c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshrun2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x11 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqrshrun2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2695 line 157316 MATCH x2f008c00/mask=xbf80fc00 # CONSTRUCT x2f088c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun/3@2 # AUNIT --inst x2f088c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" :sqrshrun Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x11 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sqrshrun(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2695 line 157316 MATCH x2f008c00/mask=xbf80fc00 # CONSTRUCT x6f108c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun2/3@4 # AUNIT --inst x6f108c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" :sqrshrun2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x11 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqrshrun2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2695 line 157316 MATCH x7f008c00/mask=xff80fc00 # CONSTRUCT x7f088c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun/3 # AUNIT --inst x7f088c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" # Scalar variant when immh=0001 Va=FPR16 Vb=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 :sqrshrun Rd_FPR8, Rn_FPR16, Imm_shr_imm8 is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100011 & Rd_FPR8 & Rn_FPR16 & Imm_shr_imm8 & Zd { Rd_FPR8 = NEON_sqrshrun(Rd_FPR8, Rn_FPR16, Imm_shr_imm8:1); } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2695 line 157316 MATCH x7f008c00/mask=xff80fc00 # CONSTRUCT x7f108c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun/3 # AUNIT --inst x7f108c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" # Scalar variant when immh=001x Va=FPR32 Vb=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 :sqrshrun Rd_FPR16, Rn_FPR32, Imm_shr_imm16 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100011 & Rd_FPR16 & Rn_FPR32 & Imm_shr_imm16 & Zd { Rd_FPR16 = NEON_sqrshrun(Rd_FPR16, Rn_FPR32, Imm_shr_imm16:1); } # C7.2.301 SQRSHRUN, SQRSHRUN2 page C7-2695 line 157316 MATCH x7f008c00/mask=xff80fc00 # CONSTRUCT x7f208c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqrshrun/3 # AUNIT --inst x7f208c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" # Scalar variant when immh=01xx Va=FPR64 Vb=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 :sqrshrun Rd_FPR32, Rn_FPR64, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100011 & Rd_FPR32 & Rn_FPR64 & Imm_shr_imm32 & Zd { Rd_FPR32 = NEON_sqrshrun(Rd_FPR32, Rn_FPR64, Imm_shr_imm32:1); } # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x0f007400/mask=xbf80fc00 # CONSTRUCT x4f087400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@1 # AUNIT --inst x4f087400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sqshl(Rn_VPR128.16B, Imm_uimm3:1, 1:1); } # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x0f007400/mask=xbf80fc00 # CONSTRUCT x4f407400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@8 # AUNIT --inst x4f407400/mask=xffc0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xe & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_sqshl(Rn_VPR128.2D, Imm_imm0_63:1, 8:1); } # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x0f007400/mask=xbf80fc00 # CONSTRUCT x0f207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@4 # AUNIT --inst x0f207400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqshl(Rn_VPR64.2S, Imm_uimm5:1, 4:1); } # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x0f007400/mask=xbf80fc00 # CONSTRUCT x0f107400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@2 # AUNIT --inst x0f107400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqshl(Rn_VPR64.4H, Imm_uimm4:1, 2:1); } # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x0f007400/mask=xbf80fc00 # CONSTRUCT x4f207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@4 # AUNIT --inst x4f207400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqshl(Rn_VPR128.4S, Imm_uimm5:1, 4:1); } # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x0f007400/mask=xbf80fc00 # CONSTRUCT x0f087400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@1 # AUNIT --inst x0f087400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sqshl(Rn_VPR64.8B, Imm_uimm3:1, 1:1); } # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x0f007400/mask=xbf80fc00 # CONSTRUCT x4f107400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2@2 # AUNIT --inst x4f107400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqshl(Rn_VPR128.8H, Imm_uimm4:1, 2:1); } # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x5f007400/mask=xff80fc00 # CONSTRUCT x5f087400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2 # AUNIT --inst x5f087400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=0001 V=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 :sqshl Rd_FPR8, Rn_FPR8, Imm_shr_imm8 is b_2331=0b010111110 & b_1922=0b0001 & b_1015=0b011101 & Rd_FPR8 & Rn_FPR8 & Imm_shr_imm8 & Zd { Rd_FPR8 = NEON_sqshl(Rn_FPR8, Imm_shr_imm8:1); } # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x5f007400/mask=xff80fc00 # CONSTRUCT x5f107400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2 # AUNIT --inst x5f107400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=001x V=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 :sqshl Rd_FPR16, Rn_FPR16, Imm_shr_imm16 is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b011101 & Rd_FPR16 & Rn_FPR16 & Imm_shr_imm16 & Zd { Rd_FPR16 = NEON_sqshl(Rn_FPR16, Imm_shr_imm16:1); } # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x5f007400/mask=xff80fc00 # CONSTRUCT x5f207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2 # AUNIT --inst x5f207400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=01xx V=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 :sqshl Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b011101 & Rd_FPR32 & Rn_FPR32 & Imm_shr_imm32 & Zd { Rd_FPR32 = NEON_sqshl(Rn_FPR32, Imm_shr_imm32:1); } # C7.2.302 SQSHL (immediate) page C7-2698 line 157500 MATCH x5f007400/mask=xff80fc00 # CONSTRUCT x5f407400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshl/2 # AUNIT --inst x5f407400/mask=xffc0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=1xxx V=FPR64 imm=Imm_shr_imm64 bb=b_22 aa=1 :sqshl Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b010111110 & b_22=1 & b_1015=0b011101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { Rd_FPR64 = NEON_sqshl(Rn_FPR64, Imm_shr_imm64:1); } # C7.2.303 SQSHL (register) page C7-2701 line 157665 MATCH x5e204c00/mask=xff20fc00 # CONSTRUCT x5e204c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2 # AUNIT --inst x5e204c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x9 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { Rd_FPR8 = NEON_sqshl(Rn_FPR8, Rm_FPR8); } # C7.2.303 SQSHL (register) page C7-2701 line 157665 MATCH x5e204c00/mask=xff20fc00 # CONSTRUCT x5ee04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2 # AUNIT --inst x5ee04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x9 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_sqshl(Rn_FPR64, Rm_FPR64); } # C7.2.303 SQSHL (register) page C7-2701 line 157665 MATCH x5e204c00/mask=xff20fc00 # CONSTRUCT x5e604c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2 # AUNIT --inst x5e604c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x9 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_sqshl(Rn_FPR16, Rm_FPR16); } # C7.2.303 SQSHL (register) page C7-2701 line 157665 MATCH x5e204c00/mask=xff20fc00 # CONSTRUCT x5ea04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2 # AUNIT --inst x5ea04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x9 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_sqshl(Rn_FPR32, Rm_FPR32); } # C7.2.303 SQSHL (register) page C7-2701 line 157665 MATCH x0e204c00/mask=xbf20fc00 # CONSTRUCT x4e204c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@1 # AUNIT --inst x4e204c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x9 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sqshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.303 SQSHL (register) page C7-2701 line 157665 MATCH x0e204c00/mask=xbf20fc00 # CONSTRUCT x4ee04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@8 # AUNIT --inst x4ee04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x9 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_sqshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.303 SQSHL (register) page C7-2701 line 157665 MATCH x0e204c00/mask=xbf20fc00 # CONSTRUCT x0ea04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@4 # AUNIT --inst x0ea04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x9 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.303 SQSHL (register) page C7-2701 line 157665 MATCH x0e204c00/mask=xbf20fc00 # CONSTRUCT x0e604c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@2 # AUNIT --inst x0e604c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x9 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.303 SQSHL (register) page C7-2701 line 157665 MATCH x0e204c00/mask=xbf20fc00 # CONSTRUCT x4ea04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@4 # AUNIT --inst x4ea04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x9 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.303 SQSHL (register) page C7-2701 line 157665 MATCH x0e204c00/mask=xbf20fc00 # CONSTRUCT x0e204c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@1 # AUNIT --inst x0e204c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x9 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sqshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.303 SQSHL (register) page C7-2701 line 157665 MATCH x0e204c00/mask=xbf20fc00 # CONSTRUCT x4e604c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqshl/2@2 # AUNIT --inst x4e604c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x9 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x2f006400/mask=xbf80fc00 # CONSTRUCT x6f086400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@1 # AUNIT --inst x6f086400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" :sqshlu Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xc & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sqshlu(Rn_VPR128.16B, Imm_uimm3:1, 1:1); } # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x2f006400/mask=xbf80fc00 # CONSTRUCT x6f406400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@8 # AUNIT --inst x6f406400/mask=xffc0fc00 --status nopcodeop --comment "nointsat" :sqshlu Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xc & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_sqshlu(Rn_VPR128.2D, Imm_imm0_63:1, 8:1); } # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x2f006400/mask=xbf80fc00 # CONSTRUCT x2f206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@4 # AUNIT --inst x2f206400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshlu Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xc & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqshlu(Rn_VPR64.2S, Imm_uimm5:1, 4:1); } # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x2f006400/mask=xbf80fc00 # CONSTRUCT x2f106400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@2 # AUNIT --inst x2f106400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" :sqshlu Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xc & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqshlu(Rn_VPR64.4H, Imm_uimm4:1, 2:1); } # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x2f006400/mask=xbf80fc00 # CONSTRUCT x6f206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@4 # AUNIT --inst x6f206400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshlu Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xc & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqshlu(Rn_VPR128.4S, Imm_uimm5:1, 4:1); } # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x2f006400/mask=xbf80fc00 # CONSTRUCT x2f086400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@1 # AUNIT --inst x2f086400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" :sqshlu Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xc & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sqshlu(Rn_VPR64.8B, Imm_uimm3:1, 1:1); } # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x2f006400/mask=xbf80fc00 # CONSTRUCT x6f106400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2@2 # AUNIT --inst x6f106400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" :sqshlu Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xc & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqshlu(Rn_VPR128.8H, Imm_uimm4:1, 2:1); } # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x7f006400/mask=xff80fc00 # CONSTRUCT x7f086400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2 # AUNIT --inst x7f086400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=0001 V=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 :sqshlu Rd_FPR8, Rn_FPR8, Imm_shr_imm8 is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b011001 & Rd_FPR8 & Rn_FPR8 & Imm_shr_imm8 & Zd { Rd_FPR8 = NEON_sqshlu(Rn_FPR8, Imm_shr_imm8:1); } # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x7f006400/mask=xff80fc00 # CONSTRUCT x7f106400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2 # AUNIT --inst x7f106400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=001x V=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 :sqshlu Rd_FPR16, Rn_FPR16, Imm_shr_imm16 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b011001 & Rd_FPR16 & Rn_FPR16 & Imm_shr_imm16 & Zd { Rd_FPR16 = NEON_sqshlu(Rn_FPR16, Imm_shr_imm16:1); } # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x7f006400/mask=xff80fc00 # CONSTRUCT x7f206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2 # AUNIT --inst x7f206400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=01xx V=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 :sqshlu Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b011001 & Rd_FPR32 & Rn_FPR32 & Imm_shr_imm32 & Zd { Rd_FPR32 = NEON_sqshlu(Rn_FPR32, Imm_shr_imm32:1); } # C7.2.304 SQSHLU page C7-2703 line 157807 MATCH x7f006400/mask=xff80fc00 # CONSTRUCT x7f406400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sqshlu/2 # AUNIT --inst x7f406400/mask=xffc0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=1xxx V=FPR64 imm=Imm_shr_imm64 bb=b_22 aa=1 :sqshlu Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b011111110 & b_22=1 & b_1015=0b011001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { Rd_FPR64 = NEON_sqshlu(Rn_FPR64, Imm_shr_imm64:1); } # C7.2.305 SQSHRN, SQSHRN2 page C7-2706 line 157972 MATCH x0f009400/mask=xbf80fc00 # CONSTRUCT x4f089400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn2/3@2 # AUNIT --inst x4f089400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" :sqshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sqshrn2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.305 SQSHRN, SQSHRN2 page C7-2706 line 157972 MATCH x0f009400/mask=xbf80fc00 # CONSTRUCT x0f209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn/3@8 # AUNIT --inst x0f209400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqshrn(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.305 SQSHRN, SQSHRN2 page C7-2706 line 157972 MATCH x0f009400/mask=xbf80fc00 # CONSTRUCT x0f109400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn/3@4 # AUNIT --inst x0f109400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" :sqshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqshrn(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.305 SQSHRN, SQSHRN2 page C7-2706 line 157972 MATCH x0f009400/mask=xbf80fc00 # CONSTRUCT x4f209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn2/3@8 # AUNIT --inst x4f209400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqshrn2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.305 SQSHRN, SQSHRN2 page C7-2706 line 157972 MATCH x0f009400/mask=xbf80fc00 # CONSTRUCT x0f089400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn/3@2 # AUNIT --inst x0f089400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" :sqshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sqshrn(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.305 SQSHRN, SQSHRN2 page C7-2706 line 157972 MATCH x0f009400/mask=xbf80fc00 # CONSTRUCT x4f109400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn2/3@4 # AUNIT --inst x4f109400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" :sqshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqshrn2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.305 SQSHRN, SQSHRN2 page C7-2706 line 157972 MATCH x5f009400/mask=xff80fc00 # CONSTRUCT x5f089400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn/3 # AUNIT --inst x5f089400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=0001 Va=FPR16 Vb=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 :sqshrn Rd_FPR8, Rd_FPR16, Imm_shr_imm8 is b_2331=0b010111110 & b_1922=0b0001 & b_1015=0b100101 & Rd_FPR8 & Rd_FPR16 & Imm_shr_imm8 & Zd { Rd_FPR8 = NEON_sqshrn(Rd_FPR8, Rd_FPR16, Imm_shr_imm8:1); } # C7.2.305 SQSHRN, SQSHRN2 page C7-2706 line 157972 MATCH x5f009400/mask=xff80fc00 # CONSTRUCT x5f109400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn/3 # AUNIT --inst x5f109400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=001x Va=FPR32 Vb=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 :sqshrn Rd_FPR16, Rd_FPR32, Imm_shr_imm16 is b_2331=0b010111110 & b_2022=0b001 & b_1015=0b100101 & Rd_FPR16 & Rd_FPR32 & Imm_shr_imm16 & Zd { Rd_FPR16 = NEON_sqshrn(Rd_FPR16, Rd_FPR32, Imm_shr_imm16:1); } # C7.2.305 SQSHRN, SQSHRN2 page C7-2706 line 157972 MATCH x5f009400/mask=xff80fc00 # CONSTRUCT x5f209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrn/3 # AUNIT --inst x5f209400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=01xx Va=FPR64 Vb=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 :sqshrn Rd_FPR32, Rd_FPR64, Imm_shr_imm32 is b_2331=0b010111110 & b_2122=0b01 & b_1015=0b100101 & Rd_FPR32 & Rd_FPR64 & Imm_shr_imm32 & Zd { Rd_FPR32 = NEON_sqshrn(Rd_FPR32, Rd_FPR64, Imm_shr_imm32:1); } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2709 line 158157 MATCH x2f008400/mask=xbf80fc00 # CONSTRUCT x6f088400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun2/3@2 # AUNIT --inst x6f088400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" :sqshrun2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sqshrun2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2709 line 158157 MATCH x2f008400/mask=xbf80fc00 # CONSTRUCT x2f208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun/3@8 # AUNIT --inst x2f208400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshrun Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqshrun(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2709 line 158157 MATCH x2f008400/mask=xbf80fc00 # CONSTRUCT x2f108400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun/3@4 # AUNIT --inst x2f108400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" :sqshrun Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqshrun(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2709 line 158157 MATCH x2f008400/mask=xbf80fc00 # CONSTRUCT x6f208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun2/3@8 # AUNIT --inst x6f208400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqshrun2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqshrun2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2709 line 158157 MATCH x2f008400/mask=xbf80fc00 # CONSTRUCT x2f088400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun/3@2 # AUNIT --inst x2f088400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" :sqshrun Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sqshrun(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2709 line 158157 MATCH x2f008400/mask=xbf80fc00 # CONSTRUCT x6f108400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun2/3@4 # AUNIT --inst x6f108400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" :sqshrun2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqshrun2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2709 line 158157 MATCH x7f008400/mask=xff80fc00 # CONSTRUCT x7f088400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun/3 # AUNIT --inst x7f088400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=0001 Va=FPR16 Vb=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 :sqshrun Rd_FPR8, Rd_FPR16, Imm_shr_imm8 is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100001 & Rd_FPR8 & Rd_FPR16 & Imm_shr_imm8 & Zd { Rd_FPR8 = NEON_sqshrun(Rd_FPR8, Rd_FPR16, Imm_shr_imm8:1); } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2709 line 158157 MATCH x7f008400/mask=xff80fc00 # CONSTRUCT x7f108400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun/3 # AUNIT --inst x7f108400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=001x Va=FPR32 Vb=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 :sqshrun Rd_FPR16, Rd_FPR32, Imm_shr_imm16 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100001 & Rd_FPR16 & Rd_FPR32 & Imm_shr_imm16 & Zd { Rd_FPR16 = NEON_sqshrun(Rd_FPR16, Rd_FPR32, Imm_shr_imm16:1); } # C7.2.306 SQSHRUN, SQSHRUN2 page C7-2709 line 158157 MATCH x7f008400/mask=xff80fc00 # CONSTRUCT x7f208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sqshrun/3 # AUNIT --inst x7f208400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=01xx Va=FPR64 Vb=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 :sqshrun Rd_FPR32, Rd_FPR64, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100001 & Rd_FPR32 & Rd_FPR64 & Imm_shr_imm32 & Zd { Rd_FPR32 = NEON_sqshrun(Rd_FPR32, Rd_FPR64, Imm_shr_imm32:1); } # C7.2.307 SQSUB page C7-2712 line 158339 MATCH x5e202c00/mask=xff20fc00 # CONSTRUCT x5e202c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2 # AUNIT --inst x5e202c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqsub Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x5 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { Rd_FPR8 = NEON_sqsub(Rn_FPR8, Rm_FPR8); } # C7.2.307 SQSUB page C7-2712 line 158339 MATCH x5e202c00/mask=xff20fc00 # CONSTRUCT x5ee02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2 # AUNIT --inst x5ee02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqsub Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x5 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_sqsub(Rn_FPR64, Rm_FPR64); } # C7.2.307 SQSUB page C7-2712 line 158339 MATCH x5e202c00/mask=xff20fc00 # CONSTRUCT x5e602c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2 # AUNIT --inst x5e602c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqsub Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x5 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_sqsub(Rn_FPR16, Rm_FPR16); } # C7.2.307 SQSUB page C7-2712 line 158339 MATCH x5e202c00/mask=xff20fc00 # CONSTRUCT x5ea02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2 # AUNIT --inst x5ea02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqsub Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x5 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_sqsub(Rn_FPR32, Rm_FPR32); } # C7.2.307 SQSUB page C7-2712 line 158339 MATCH x0e202c00/mask=xbf20fc00 # CONSTRUCT x4e202c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@1 # AUNIT --inst x4e202c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqsub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x5 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sqsub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.307 SQSUB page C7-2712 line 158339 MATCH x0e202c00/mask=xbf20fc00 # CONSTRUCT x4ee02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@8 # AUNIT --inst x4ee02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqsub Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x5 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_sqsub(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.307 SQSUB page C7-2712 line 158339 MATCH x0e202c00/mask=xbf20fc00 # CONSTRUCT x0ea02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@4 # AUNIT --inst x0ea02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x5 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.307 SQSUB page C7-2712 line 158339 MATCH x0e202c00/mask=xbf20fc00 # CONSTRUCT x0e602c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@2 # AUNIT --inst x0e602c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x5 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.307 SQSUB page C7-2712 line 158339 MATCH x0e202c00/mask=xbf20fc00 # CONSTRUCT x4ea02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@4 # AUNIT --inst x4ea02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x5 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.307 SQSUB page C7-2712 line 158339 MATCH x0e202c00/mask=xbf20fc00 # CONSTRUCT x0e202c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@1 # AUNIT --inst x0e202c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqsub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x5 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sqsub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.307 SQSUB page C7-2712 line 158339 MATCH x0e202c00/mask=xbf20fc00 # CONSTRUCT x4e602c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sqsub/2@2 # AUNIT --inst x4e602c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :sqsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x5 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.308 SQXTN, SQXTN2 page C7-2714 line 158467 MATCH x5e214800/mask=xff3ffc00 # CONSTRUCT x5e214800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn/2 # AUNIT --inst x5e214800/mask=xfffffc00 --status nopcodeop --comment "nointsat" :sqxtn Rd_FPR8, Rn_FPR16 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_FPR16 & Rd_FPR8 & Zd { Rd_FPR8 = NEON_sqxtn(Rd_FPR8, Rn_FPR16); } # C7.2.308 SQXTN, SQXTN2 page C7-2714 line 158467 MATCH x5e214800/mask=xff3ffc00 # CONSTRUCT x5e614800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn/2 # AUNIT --inst x5e614800/mask=xfffffc00 --status nopcodeop --comment "nointsat" :sqxtn Rd_FPR16, Rn_FPR32 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_FPR32 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_sqxtn(Rd_FPR16, Rn_FPR32); } # C7.2.308 SQXTN, SQXTN2 page C7-2714 line 158467 MATCH x5e214800/mask=xff3ffc00 # CONSTRUCT x5ea14800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn/2 # AUNIT --inst x5ea14800/mask=xfffffc00 --status nopcodeop --comment "nointsat" :sqxtn Rd_FPR32, Rn_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_FPR64 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_sqxtn(Rd_FPR32, Rn_FPR64); } # C7.2.308 SQXTN, SQXTN2 page C7-2714 line 158467 MATCH x0e214800/mask=xbf3ffc00 # CONSTRUCT x4e214800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn2/2@2 # AUNIT --inst x4e214800/mask=xfffffc00 --status nopcodeop --comment "nointsat" :sqxtn2 Rd_VPR128.16B, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sqxtn2(Rd_VPR128.16B, Rn_VPR128.8H, 2:1); } # C7.2.308 SQXTN, SQXTN2 page C7-2714 line 158467 MATCH x0e214800/mask=xbf3ffc00 # CONSTRUCT x4e614800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn2/2@4 # AUNIT --inst x4e614800/mask=xfffffc00 --status nopcodeop --comment "nointsat" :sqxtn2 Rd_VPR128.8H, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sqxtn2(Rd_VPR128.8H, Rn_VPR128.4S, 4:1); } # C7.2.308 SQXTN, SQXTN2 page C7-2714 line 158467 MATCH x0e214800/mask=xbf3ffc00 # CONSTRUCT x4ea14800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn2/2@8 # AUNIT --inst x4ea14800/mask=xfffffc00 --status nopcodeop --comment "nointsat" :sqxtn2 Rd_VPR128.4S, Rn_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sqxtn2(Rd_VPR128.4S, Rn_VPR128.2D, 8:1); } # C7.2.308 SQXTN, SQXTN2 page C7-2714 line 158467 MATCH x0e214800/mask=xbf3ffc00 # CONSTRUCT x0ea14800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn/2@8 # AUNIT --inst x0ea14800/mask=xfffffc00 --status nopcodeop --comment "nointsat" :sqxtn Rd_VPR64.2S, Rn_VPR128.2D is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sqxtn(Rd_VPR64.2S, Rn_VPR128.2D, 8:1); } # C7.2.308 SQXTN, SQXTN2 page C7-2714 line 158467 MATCH x0e214800/mask=xbf3ffc00 # CONSTRUCT x0e614800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn/2@4 # AUNIT --inst x0e614800/mask=xfffffc00 --status nopcodeop --comment "nointsat" :sqxtn Rd_VPR64.4H, Rn_VPR128.4S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sqxtn(Rd_VPR64.4H, Rn_VPR128.4S, 4:1); } # C7.2.308 SQXTN, SQXTN2 page C7-2714 line 158467 MATCH x0e214800/mask=xbf3ffc00 # CONSTRUCT x0e214800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtn/2@2 # AUNIT --inst x0e214800/mask=xfffffc00 --status nopcodeop --comment "nointsat" :sqxtn Rd_VPR64.8B, Rn_VPR128.8H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x14 & b_1011=2 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sqxtn(Rd_VPR64.8B, Rn_VPR128.8H, 2:1); } # C7.2.309 SQXTUN, SQXTUN2 page C7-2717 line 158622 MATCH x7e212800/mask=xff3ffc00 # CONSTRUCT x7e212800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun/2 # AUNIT --inst x7e212800/mask=xfffffc00 --status noqemu --comment "nointsat" # Scalar variant when size=00 Q=1 bb=1 Ta=FPR16 Tb=FPR8 :sqxtun Rd_FPR8, Rn_FPR16 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100001001010 & Rd_FPR8 & Rn_FPR16 & Zd { Rd_FPR8 = NEON_sqxtun(Rd_FPR8, Rn_FPR16); } # C7.2.309 SQXTUN, SQXTUN2 page C7-2717 line 158622 MATCH x7e212800/mask=xff3ffc00 # CONSTRUCT x7e612800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun/2 # AUNIT --inst x7e612800/mask=xfffffc00 --status noqemu --comment "nointsat" # Scalar variant when size=01 Q=1 bb=1 Ta=FPR32 Tb=FPR16 :sqxtun Rd_FPR16, Rn_FPR32 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100001001010 & Rd_FPR16 & Rn_FPR32 & Zd { Rd_FPR16 = NEON_sqxtun(Rd_FPR16, Rn_FPR32); } # C7.2.309 SQXTUN, SQXTUN2 page C7-2717 line 158622 MATCH x7e212800/mask=xff3ffc00 # CONSTRUCT x7ea12800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun/2 # AUNIT --inst x7ea12800/mask=xfffffc00 --status noqemu --comment "nointsat" # Scalar variant when size=10 Q=1 bb=1 Ta=FPR64 Tb=FPR32 :sqxtun Rd_FPR32, Rn_FPR64 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100001001010 & Rd_FPR32 & Rn_FPR64 & Zd { Rd_FPR32 = NEON_sqxtun(Rd_FPR32, Rn_FPR64); } # C7.2.309 SQXTUN, SQXTUN2 page C7-2717 line 158622 MATCH x2e212800/mask=xbf3ffc00 # CONSTRUCT x2e212800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun/2@2 # AUNIT --inst x2e212800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=00 Q=0 bb=0 Ta=VPR128.8H Tb=VPR64.8B esize=2 :sqxtun Rd_VPR64.8B, Rn_VPR128.8H is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001001010 & Rd_VPR64.8B & Rn_VPR128.8H & Zd { Rd_VPR64.8B = NEON_sqxtun(Rd_VPR64.8B, Rn_VPR128.8H, 2:1); } # C7.2.309 SQXTUN, SQXTUN2 page C7-2717 line 158622 MATCH x2e212800/mask=xbf3ffc00 # CONSTRUCT x6e212800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun2/2@2 # AUNIT --inst x6e212800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=00 Q=1 bb=0 Ta=VPR128.8H Tb=VPR128.16B esize=2 :sqxtun2 Rd_VPR128.16B, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001001010 & Rd_VPR128.16B & Rn_VPR128.8H & Zd { Rd_VPR128.16B = NEON_sqxtun2(Rd_VPR128.16B, Rn_VPR128.8H, 2:1); } # C7.2.309 SQXTUN, SQXTUN2 page C7-2717 line 158622 MATCH x2e212800/mask=xbf3ffc00 # CONSTRUCT x2e612800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun/2@4 # AUNIT --inst x2e612800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=01 Q=0 bb=0 Ta=VPR128.4S Tb=VPR64.4H esize=4 :sqxtun Rd_VPR64.4H, Rn_VPR128.4S is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001001010 & Rd_VPR64.4H & Rn_VPR128.4S & Zd { Rd_VPR64.4H = NEON_sqxtun(Rd_VPR64.4H, Rn_VPR128.4S, 4:1); } # C7.2.309 SQXTUN, SQXTUN2 page C7-2717 line 158622 MATCH x2e212800/mask=xbf3ffc00 # CONSTRUCT x6e612800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun2/2@4 # AUNIT --inst x6e612800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=01 Q=1 bb=0 Ta=VPR128.4S Tb=VPR128.8H esize=4 :sqxtun2 Rd_VPR128.8H, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001001010 & Rd_VPR128.8H & Rn_VPR128.4S & Zd { Rd_VPR128.8H = NEON_sqxtun2(Rd_VPR128.8H, Rn_VPR128.4S, 4:1); } # C7.2.309 SQXTUN, SQXTUN2 page C7-2717 line 158622 MATCH x2e212800/mask=xbf3ffc00 # CONSTRUCT x2ea12800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun/2@8 # AUNIT --inst x2ea12800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=10 Q=0 bb=0 Ta=VPR128.2D Tb=VPR64.2S esize=8 :sqxtun Rd_VPR64.2S, Rn_VPR128.2D is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001001010 & Rd_VPR64.2S & Rn_VPR128.2D & Zd { Rd_VPR64.2S = NEON_sqxtun(Rd_VPR64.2S, Rn_VPR128.2D, 8:1); } # C7.2.309 SQXTUN, SQXTUN2 page C7-2717 line 158622 MATCH x2e212800/mask=xbf3ffc00 # CONSTRUCT x6ea12800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_sqxtun2/2@8 # AUNIT --inst x6ea12800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=10 Q=1 bb=0 Ta=VPR128.2D Tb=VPR128.4S esize=8 :sqxtun2 Rd_VPR128.4S, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001001010 & Rd_VPR128.4S & Rn_VPR128.2D & Zd { Rd_VPR128.4S = NEON_sqxtun2(Rd_VPR128.4S, Rn_VPR128.2D, 8:1); } # C7.2.310 SRHADD page C7-2720 line 158773 MATCH x0e201400/mask=xbf20fc00 # CONSTRUCT x4e201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_srhadd/2@1 # AUNIT --inst x4e201400/mask=xffe0fc00 --status nopcodeop :srhadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x2 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rn_VPR128.16B = NEON_srhadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.310 SRHADD page C7-2720 line 158773 MATCH x0e201400/mask=xbf20fc00 # CONSTRUCT x0ea01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_srhadd/2@4 # AUNIT --inst x0ea01400/mask=xffe0fc00 --status nopcodeop :srhadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x2 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rn_VPR64.2S = NEON_srhadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.310 SRHADD page C7-2720 line 158773 MATCH x0e201400/mask=xbf20fc00 # CONSTRUCT x0e601400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_srhadd/2@2 # AUNIT --inst x0e601400/mask=xffe0fc00 --status nopcodeop :srhadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x2 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rn_VPR64.4H = NEON_srhadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.310 SRHADD page C7-2720 line 158773 MATCH x0e201400/mask=xbf20fc00 # CONSTRUCT x4ea01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_srhadd/2@4 # AUNIT --inst x4ea01400/mask=xffe0fc00 --status nopcodeop :srhadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x2 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rn_VPR128.4S = NEON_srhadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.310 SRHADD page C7-2720 line 158773 MATCH x0e201400/mask=xbf20fc00 # CONSTRUCT x0e201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_srhadd/2@1 # AUNIT --inst x0e201400/mask=xffe0fc00 --status nopcodeop :srhadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x2 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rn_VPR64.8B = NEON_srhadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.310 SRHADD page C7-2720 line 158773 MATCH x0e201400/mask=xbf20fc00 # CONSTRUCT x4e601400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_srhadd/2@2 # AUNIT --inst x4e601400/mask=xffe0fc00 --status nopcodeop :srhadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x2 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rn_VPR128.8H = NEON_srhadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.311 SRI page C7-2722 line 158861 MATCH x2f004400/mask=xbf80fc00 # CONSTRUCT x6f084400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@1 # AUNIT --inst x6f084400/mask=xfff8fc00 --status nopcodeop :sri Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x8 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sri(Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8:4, 1:1); } # C7.2.311 SRI page C7-2722 line 158861 MATCH x2f004400/mask=xbf80fc00 # CONSTRUCT x6f404400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@8 # AUNIT --inst x6f404400/mask=xffc0fc00 --status nopcodeop :sri Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x8 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_sri(Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64:4, 8:1); } # C7.2.311 SRI page C7-2722 line 158861 MATCH x2f004400/mask=xbf80fc00 # CONSTRUCT x2f204400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@4 # AUNIT --inst x2f204400/mask=xffe0fc00 --status nopcodeop :sri Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x8 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sri(Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32:4, 4:1); } # C7.2.311 SRI page C7-2722 line 158861 MATCH x2f004400/mask=xbf80fc00 # CONSTRUCT x2f104400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@2 # AUNIT --inst x2f104400/mask=xfff0fc00 --status nopcodeop :sri Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x8 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sri(Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16:4, 2:1); } # C7.2.311 SRI page C7-2722 line 158861 MATCH x2f004400/mask=xbf80fc00 # CONSTRUCT x6f204400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@4 # AUNIT --inst x6f204400/mask=xffe0fc00 --status nopcodeop :sri Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x8 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sri(Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32:4, 4:1); } # C7.2.311 SRI page C7-2722 line 158861 MATCH x2f004400/mask=xbf80fc00 # CONSTRUCT x2f084400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@1 # AUNIT --inst x2f084400/mask=xfff8fc00 --status nopcodeop :sri Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x8 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sri(Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8:4, 1:1); } # C7.2.311 SRI page C7-2722 line 158861 MATCH x2f004400/mask=xbf80fc00 # CONSTRUCT x6f104400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:4 &=NEON_sri/3@2 # AUNIT --inst x6f104400/mask=xfff0fc00 --status nopcodeop :sri Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x8 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sri(Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16:4, 2:1); } # C7.2.311 SRI page C7-2722 line 158861 MATCH x7f004400/mask=xff80fc00 # CONSTRUCT x7f404400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_sri/3 # AUNIT --inst x7f404400/mask=xffc0fc00 --status nopcodeop :sri Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b011111110 & b_22=1 & b_1015=0b010001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { Rd_FPR64 = NEON_sri(Rd_FPR64, Rn_FPR64, Imm_shr_imm64:1); } # C7.2.312 SRSHL page C7-2725 line 159028 MATCH x5e205400/mask=xff20fc00 # CONSTRUCT x5ee05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2 # AUNIT --inst x5ee05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :srshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0xa & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_srshl(Rn_FPR64, Rm_FPR64); } # C7.2.312 SRSHL page C7-2725 line 159028 MATCH x0e205400/mask=xbf20fc00 # CONSTRUCT x4e205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@1 # AUNIT --inst x4e205400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :srshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xa & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_srshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.312 SRSHL page C7-2725 line 159028 MATCH x0e205400/mask=xbf20fc00 # CONSTRUCT x4ee05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@8 # AUNIT --inst x4ee05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :srshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0xa & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_srshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.312 SRSHL page C7-2725 line 159028 MATCH x0e205400/mask=xbf20fc00 # CONSTRUCT x0ea05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@4 # AUNIT --inst x0ea05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :srshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xa & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_srshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.312 SRSHL page C7-2725 line 159028 MATCH x0e205400/mask=xbf20fc00 # CONSTRUCT x0e605400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@2 # AUNIT --inst x0e605400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :srshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xa & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_srshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.312 SRSHL page C7-2725 line 159028 MATCH x0e205400/mask=xbf20fc00 # CONSTRUCT x4ea05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@4 # AUNIT --inst x4ea05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :srshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xa & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_srshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.312 SRSHL page C7-2725 line 159028 MATCH x0e205400/mask=xbf20fc00 # CONSTRUCT x0e205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@1 # AUNIT --inst x0e205400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :srshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xa & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_srshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.312 SRSHL page C7-2725 line 159028 MATCH x0e205400/mask=xbf20fc00 # CONSTRUCT x4e605400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_srshl/2@2 # AUNIT --inst x4e605400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :srshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xa & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_srshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.313 SRSHR page C7-2727 line 159165 MATCH x0f002400/mask=xbf80fc00 # CONSTRUCT x4f082400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@1 # AUNIT --inst x4f082400/mask=xfff8fc00 --status nopcodeop --comment "nointround" :srshr Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_srshr(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); } # C7.2.313 SRSHR page C7-2727 line 159165 MATCH x0f002400/mask=xbf80fc00 # CONSTRUCT x4f402400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@8 # AUNIT --inst x4f402400/mask=xffc0fc00 --status nopcodeop --comment "nointround" :srshr Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x4 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_srshr(Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); } # C7.2.313 SRSHR page C7-2727 line 159165 MATCH x0f002400/mask=xbf80fc00 # CONSTRUCT x0f202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@4 # AUNIT --inst x0f202400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :srshr Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_srshr(Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); } # C7.2.313 SRSHR page C7-2727 line 159165 MATCH x0f002400/mask=xbf80fc00 # CONSTRUCT x0f102400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@2 # AUNIT --inst x0f102400/mask=xfff0fc00 --status nopcodeop --comment "nointround" :srshr Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_srshr(Rn_VPR64.4H, Imm_shr_imm16:1, 2:1); } # C7.2.313 SRSHR page C7-2727 line 159165 MATCH x0f002400/mask=xbf80fc00 # CONSTRUCT x4f202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@4 # AUNIT --inst x4f202400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :srshr Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_srshr(Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); } # C7.2.313 SRSHR page C7-2727 line 159165 MATCH x0f002400/mask=xbf80fc00 # CONSTRUCT x0f082400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@1 # AUNIT --inst x0f082400/mask=xfff8fc00 --status nopcodeop --comment "nointround" :srshr Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_srshr(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); } # C7.2.313 SRSHR page C7-2727 line 159165 MATCH x0f002400/mask=xbf80fc00 # CONSTRUCT x4f102400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2@2 # AUNIT --inst x4f102400/mask=xfff0fc00 --status nopcodeop --comment "nointround" :srshr Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_srshr(Rn_VPR128.8H, Imm_shr_imm16:1, 2:1); } # C7.2.313 SRSHR page C7-2727 line 159165 MATCH x5f002400/mask=xff80fc00 # CONSTRUCT x5f402400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_srshr/2 # AUNIT --inst x5f402400/mask=xffc0fc00 --status nopcodeop --comment "nointround" :srshr Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b010111110 & b_22=1 & b_1015=0b001001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { Rd_FPR64 = NEON_srshr(Rn_FPR64, Imm_shr_imm64:1); } # C7.2.314 SRSRA page C7-2730 line 159316 MATCH x0f003400/mask=xbf80fc00 # CONSTRUCT x4f083400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:1 $s>>@1 &=$+@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@1 # AUNIT --inst x4f083400/mask=xfff8fc00 --status fail --comment "nointround" :srsra Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix TMPQ1 = Rn_VPR128.16B s>> Imm_shr_imm8:1 on lane size 1 TMPQ1[0,8] = Rn_VPR128.16B[0,8] s>> Imm_shr_imm8:1; TMPQ1[8,8] = Rn_VPR128.16B[8,8] s>> Imm_shr_imm8:1; TMPQ1[16,8] = Rn_VPR128.16B[16,8] s>> Imm_shr_imm8:1; TMPQ1[24,8] = Rn_VPR128.16B[24,8] s>> Imm_shr_imm8:1; TMPQ1[32,8] = Rn_VPR128.16B[32,8] s>> Imm_shr_imm8:1; TMPQ1[40,8] = Rn_VPR128.16B[40,8] s>> Imm_shr_imm8:1; TMPQ1[48,8] = Rn_VPR128.16B[48,8] s>> Imm_shr_imm8:1; TMPQ1[56,8] = Rn_VPR128.16B[56,8] s>> Imm_shr_imm8:1; TMPQ1[64,8] = Rn_VPR128.16B[64,8] s>> Imm_shr_imm8:1; TMPQ1[72,8] = Rn_VPR128.16B[72,8] s>> Imm_shr_imm8:1; TMPQ1[80,8] = Rn_VPR128.16B[80,8] s>> Imm_shr_imm8:1; TMPQ1[88,8] = Rn_VPR128.16B[88,8] s>> Imm_shr_imm8:1; TMPQ1[96,8] = Rn_VPR128.16B[96,8] s>> Imm_shr_imm8:1; TMPQ1[104,8] = Rn_VPR128.16B[104,8] s>> Imm_shr_imm8:1; TMPQ1[112,8] = Rn_VPR128.16B[112,8] s>> Imm_shr_imm8:1; TMPQ1[120,8] = Rn_VPR128.16B[120,8] s>> Imm_shr_imm8:1; # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.314 SRSRA page C7-2730 line 159316 MATCH x0f003400/mask=xbf80fc00 # CONSTRUCT x4f403400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 $s>>@8 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@8 # AUNIT --inst x4f403400/mask=xffc0fc00 --status fail --comment "nointround" :srsra Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x6 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local tmp1:8 = zext(Imm_shr_imm64); # simd infix TMPQ1 = Rn_VPR128.2D s>> tmp1 on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] s>> tmp1; TMPQ1[64,64] = Rn_VPR128.2D[64,64] s>> tmp1; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.314 SRSRA page C7-2730 line 159316 MATCH x0f003400/mask=xbf80fc00 # CONSTRUCT x0f203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:4 $s>>@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@4 # AUNIT --inst x0f203400/mask=xffe0fc00 --status fail --comment "nointround" :srsra Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix TMPD1 = Rn_VPR64.2S s>> Imm_shr_imm32:4 on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] s>> Imm_shr_imm32:4; TMPD1[32,32] = Rn_VPR64.2S[32,32] s>> Imm_shr_imm32:4; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.314 SRSRA page C7-2730 line 159316 MATCH x0f003400/mask=xbf80fc00 # CONSTRUCT x0f103400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 $s>>@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@2 # AUNIT --inst x0f103400/mask=xfff0fc00 --status fail --comment "nointround" :srsra Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd infix TMPD1 = Rn_VPR64.4H s>> Imm_shr_imm16:2 on lane size 2 TMPD1[0,16] = Rn_VPR64.4H[0,16] s>> Imm_shr_imm16:2; TMPD1[16,16] = Rn_VPR64.4H[16,16] s>> Imm_shr_imm16:2; TMPD1[32,16] = Rn_VPR64.4H[32,16] s>> Imm_shr_imm16:2; TMPD1[48,16] = Rn_VPR64.4H[48,16] s>> Imm_shr_imm16:2; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.314 SRSRA page C7-2730 line 159316 MATCH x0f003400/mask=xbf80fc00 # CONSTRUCT x4f203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:4 $s>>@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@4 # AUNIT --inst x4f203400/mask=xffe0fc00 --status fail --comment "nointround" :srsra Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix TMPQ1 = Rn_VPR128.4S s>> Imm_shr_imm32:4 on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] s>> Imm_shr_imm32:4; TMPQ1[32,32] = Rn_VPR128.4S[32,32] s>> Imm_shr_imm32:4; TMPQ1[64,32] = Rn_VPR128.4S[64,32] s>> Imm_shr_imm32:4; TMPQ1[96,32] = Rn_VPR128.4S[96,32] s>> Imm_shr_imm32:4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.314 SRSRA page C7-2730 line 159316 MATCH x0f003400/mask=xbf80fc00 # CONSTRUCT x0f083400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:1 $s>>@1 &=$+@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@1 # AUNIT --inst x0f083400/mask=xfff8fc00 --status fail --comment "nointround" :srsra Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix TMPD1 = Rn_VPR64.8B s>> Imm_shr_imm8:1 on lane size 1 TMPD1[0,8] = Rn_VPR64.8B[0,8] s>> Imm_shr_imm8:1; TMPD1[8,8] = Rn_VPR64.8B[8,8] s>> Imm_shr_imm8:1; TMPD1[16,8] = Rn_VPR64.8B[16,8] s>> Imm_shr_imm8:1; TMPD1[24,8] = Rn_VPR64.8B[24,8] s>> Imm_shr_imm8:1; TMPD1[32,8] = Rn_VPR64.8B[32,8] s>> Imm_shr_imm8:1; TMPD1[40,8] = Rn_VPR64.8B[40,8] s>> Imm_shr_imm8:1; TMPD1[48,8] = Rn_VPR64.8B[48,8] s>> Imm_shr_imm8:1; TMPD1[56,8] = Rn_VPR64.8B[56,8] s>> Imm_shr_imm8:1; # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.314 SRSRA page C7-2730 line 159316 MATCH x0f003400/mask=xbf80fc00 # CONSTRUCT x4f103400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 $s>>@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3@2 # AUNIT --inst x4f103400/mask=xfff0fc00 --status fail --comment "nointround" :srsra Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.8H s>> Imm_shr_imm16:2 on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] s>> Imm_shr_imm16:2; TMPQ1[16,16] = Rn_VPR128.8H[16,16] s>> Imm_shr_imm16:2; TMPQ1[32,16] = Rn_VPR128.8H[32,16] s>> Imm_shr_imm16:2; TMPQ1[48,16] = Rn_VPR128.8H[48,16] s>> Imm_shr_imm16:2; TMPQ1[64,16] = Rn_VPR128.8H[64,16] s>> Imm_shr_imm16:2; TMPQ1[80,16] = Rn_VPR128.8H[80,16] s>> Imm_shr_imm16:2; TMPQ1[96,16] = Rn_VPR128.8H[96,16] s>> Imm_shr_imm16:2; TMPQ1[112,16] = Rn_VPR128.8H[112,16] s>> Imm_shr_imm16:2; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.314 SRSRA page C7-2730 line 159316 MATCH x5f003400/mask=xff80fc00 # CONSTRUCT x5f403400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 s>> &=+ # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_srsra/3 # AUNIT --inst x5f403400/mask=xffc0fc00 --status fail --comment "nointround" :srsra Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b010111110 & b_22=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { local tmp1:8 = zext(Imm_shr_imm64); local tmp2:8 = Rn_FPR64 s>> tmp1; Rd_FPR64 = Rd_FPR64 + tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.315 SSHL page C7-2733 line 159467 MATCH x5e204400/mask=xff20fc00 # CONSTRUCT x5ee04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2 # AUNIT --inst x5ee04400/mask=xffe0fc00 --status nopcodeop :sshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=0 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x8 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_sshl(Rn_FPR64, Rm_FPR64); } # C7.2.315 SSHL page C7-2733 line 159467 MATCH x0e204400/mask=xbf20fc00 # CONSTRUCT x4e204400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@1 # AUNIT --inst x4e204400/mask=xffe0fc00 --status nopcodeop :sshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x8 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_sshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.315 SSHL page C7-2733 line 159467 MATCH x0e204400/mask=xbf20fc00 # CONSTRUCT x4ee04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@8 # AUNIT --inst x4ee04400/mask=xffe0fc00 --status nopcodeop :sshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x8 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_sshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.315 SSHL page C7-2733 line 159467 MATCH x0e204400/mask=xbf20fc00 # CONSTRUCT x0ea04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@4 # AUNIT --inst x0ea04400/mask=xffe0fc00 --status nopcodeop :sshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x8 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_sshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.315 SSHL page C7-2733 line 159467 MATCH x0e204400/mask=xbf20fc00 # CONSTRUCT x0e604400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@2 # AUNIT --inst x0e604400/mask=xffe0fc00 --status nopcodeop :sshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x8 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_sshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.315 SSHL page C7-2733 line 159467 MATCH x0e204400/mask=xbf20fc00 # CONSTRUCT x4ea04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@4 # AUNIT --inst x4ea04400/mask=xffe0fc00 --status nopcodeop :sshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x8 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_sshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.315 SSHL page C7-2733 line 159467 MATCH x0e204400/mask=xbf20fc00 # CONSTRUCT x0e204400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@1 # AUNIT --inst x0e204400/mask=xffe0fc00 --status nopcodeop :sshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x8 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_sshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.315 SSHL page C7-2733 line 159467 MATCH x0e204400/mask=xbf20fc00 # CONSTRUCT x4e604400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sshl/2@2 # AUNIT --inst x4e604400/mask=xffe0fc00 --status nopcodeop :sshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x8 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_sshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # CONSTRUCT x4f08a400/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3 =var:2 =$<<@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshll2/2@1 # AUNIT --inst x4f08a400/mask=xfff8fc00 --status pass --comment "ext" :sshll2 Rd_VPR128.8H, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = sext(TMPD1[0,8]); TMPQ2[16,16] = sext(TMPD1[8,8]); TMPQ2[32,16] = sext(TMPD1[16,8]); TMPQ2[48,16] = sext(TMPD1[24,8]); TMPQ2[64,16] = sext(TMPD1[32,8]); TMPQ2[80,16] = sext(TMPD1[40,8]); TMPQ2[96,16] = sext(TMPD1[48,8]); TMPQ2[112,16] = sext(TMPD1[56,8]); local tmp3:2 = Imm_uimm3; # simd infix Rd_VPR128.8H = TMPQ2 << tmp3 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ2[0,16] << tmp3; Rd_VPR128.8H[16,16] = TMPQ2[16,16] << tmp3; Rd_VPR128.8H[32,16] = TMPQ2[32,16] << tmp3; Rd_VPR128.8H[48,16] = TMPQ2[48,16] << tmp3; Rd_VPR128.8H[64,16] = TMPQ2[64,16] << tmp3; Rd_VPR128.8H[80,16] = TMPQ2[80,16] << tmp3; Rd_VPR128.8H[96,16] = TMPQ2[96,16] << tmp3; Rd_VPR128.8H[112,16] = TMPQ2[112,16] << tmp3; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # CONSTRUCT x0f20a400/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 =var:8 =$<<@8 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshll/2@4 # AUNIT --inst x0f20a400/mask=xffe0fc00 --status pass --comment "ext" :sshll Rd_VPR128.2D, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); local tmp2:8 = Imm_uimm5; # simd infix Rd_VPR128.2D = TMPQ1 << tmp2 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64] << tmp2; Rd_VPR128.2D[64,64] = TMPQ1[64,64] << tmp2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # CONSTRUCT x0f10a400/mask=xfff0fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 =var:4 =$<<@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshll/2@2 # AUNIT --inst x0f10a400/mask=xfff0fc00 --status pass --comment "ext" :sshll Rd_VPR128.4S, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); local tmp2:4 = Imm_uimm4; # simd infix Rd_VPR128.4S = TMPQ1 << tmp2 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32] << tmp2; Rd_VPR128.4S[32,32] = TMPQ1[32,32] << tmp2; Rd_VPR128.4S[64,32] = TMPQ1[64,32] << tmp2; Rd_VPR128.4S[96,32] = TMPQ1[96,32] << tmp2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # CONSTRUCT x4f20a400/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3 =var:8 =$<<@8 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshll2/2@4 # AUNIT --inst x4f20a400/mask=xffe0fc00 --status pass --comment "ext" :sshll2 Rd_VPR128.2D, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); local tmp3:8 = Imm_uimm5; # simd infix Rd_VPR128.2D = TMPQ2 << tmp3 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ2[0,64] << tmp3; Rd_VPR128.2D[64,64] = TMPQ2[64,64] << tmp3; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # CONSTRUCT x0f08a400/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@1:16 ARG3 =var:2 =$<<@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshll/2@1 # AUNIT --inst x0f08a400/mask=xfff8fc00 --status pass --comment "ext" :sshll Rd_VPR128.8H, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); local tmp2:2 = Imm_uimm3; # simd infix Rd_VPR128.8H = TMPQ1 << tmp2 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[0,16] << tmp2; Rd_VPR128.8H[16,16] = TMPQ1[16,16] << tmp2; Rd_VPR128.8H[32,16] = TMPQ1[32,16] << tmp2; Rd_VPR128.8H[48,16] = TMPQ1[48,16] << tmp2; Rd_VPR128.8H[64,16] = TMPQ1[64,16] << tmp2; Rd_VPR128.8H[80,16] = TMPQ1[80,16] << tmp2; Rd_VPR128.8H[96,16] = TMPQ1[96,16] << tmp2; Rd_VPR128.8H[112,16] = TMPQ1[112,16] << tmp2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # CONSTRUCT x4f10a400/mask=xfff0fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3 =var:4 =$<<@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshll2/2@2 # AUNIT --inst x4f10a400/mask=xfff0fc00 --status pass --comment "ext" :sshll2 Rd_VPR128.4S, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); local tmp3:4 = Imm_uimm4; # simd infix Rd_VPR128.4S = TMPQ2 << tmp3 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ2[0,32] << tmp3; Rd_VPR128.4S[32,32] = TMPQ2[32,32] << tmp3; Rd_VPR128.4S[64,32] = TMPQ2[64,32] << tmp3; Rd_VPR128.4S[96,32] = TMPQ2[96,32] << tmp3; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.317 SSHR page C7-2738 line 159757 MATCH x5f000400/mask=xff80fc00 # CONSTRUCT x5f400400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2 # AUNIT --inst x5f400400/mask=xffc0fc00 --status nopcodeop :sshr Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_3031=1 & u=0 & b_2428=0x1f & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x0 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_sshr(Rn_FPR64, Imm_shr_imm64:1); } # C7.2.317 SSHR page C7-2738 line 159757 MATCH x0f000400/mask=xbf80fc00 # CONSTRUCT x4f080400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:1 =$s>>@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@1 # AUNIT --inst x4f080400/mask=xfff8fc00 --status pass :sshr Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix Rd_VPR128.16B = Rn_VPR128.16B s>> Imm_shr_imm8:1 on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] s>> Imm_shr_imm8:1; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] s>> Imm_shr_imm8:1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.317 SSHR page C7-2738 line 159757 MATCH x0f000400/mask=xbf80fc00 # CONSTRUCT x4f400400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 =$s>>@8 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@8 # AUNIT --inst x4f400400/mask=xffc0fc00 --status pass :sshr Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x0 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local tmp1:8 = zext(Imm_shr_imm64); # simd infix Rd_VPR128.2D = Rn_VPR128.2D s>> tmp1 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] s>> tmp1; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] s>> tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.317 SSHR page C7-2738 line 159757 MATCH x0f000400/mask=xbf80fc00 # CONSTRUCT x0f200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:4 =$s>>@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@4 # AUNIT --inst x0f200400/mask=xffe0fc00 --status pass :sshr Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local tmp1:4 = Imm_shr_imm32; # simd infix Rd_VPR64.2S = Rn_VPR64.2S s>> tmp1 on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] s>> tmp1; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] s>> tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.317 SSHR page C7-2738 line 159757 MATCH x0f000400/mask=xbf80fc00 # CONSTRUCT x0f100400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 =$s>>@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@2 # AUNIT --inst x0f100400/mask=xfff0fc00 --status pass :sshr Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd infix Rd_VPR64.4H = Rn_VPR64.4H s>> Imm_shr_imm16:2 on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] s>> Imm_shr_imm16:2; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] s>> Imm_shr_imm16:2; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] s>> Imm_shr_imm16:2; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] s>> Imm_shr_imm16:2; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.317 SSHR page C7-2738 line 159757 MATCH x0f000400/mask=xbf80fc00 # CONSTRUCT x4f200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:4 =$s>>@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@4 # AUNIT --inst x4f200400/mask=xffe0fc00 --status pass :sshr Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local tmp1:4 = Imm_shr_imm32; # simd infix Rd_VPR128.4S = Rn_VPR128.4S s>> tmp1 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] s>> tmp1; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] s>> tmp1; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] s>> tmp1; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] s>> tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.317 SSHR page C7-2738 line 159757 MATCH x0f000400/mask=xbf80fc00 # CONSTRUCT x0f080400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:1 =$s>>@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@1 # AUNIT --inst x0f080400/mask=xfff8fc00 --status pass :sshr Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix Rd_VPR64.8B = Rn_VPR64.8B s>> Imm_shr_imm8:1 on lane size 1 Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] s>> Imm_shr_imm8:1; Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] s>> Imm_shr_imm8:1; Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] s>> Imm_shr_imm8:1; Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] s>> Imm_shr_imm8:1; Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] s>> Imm_shr_imm8:1; Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] s>> Imm_shr_imm8:1; Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] s>> Imm_shr_imm8:1; Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] s>> Imm_shr_imm8:1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.317 SSHR page C7-2738 line 159757 MATCH x0f000400/mask=xbf80fc00 # CONSTRUCT x4f100400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 =$s>>@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_sshr/2@2 # AUNIT --inst x4f100400/mask=xfff0fc00 --status pass :sshr Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd infix Rd_VPR128.8H = Rn_VPR128.8H s>> Imm_shr_imm16:2 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] s>> Imm_shr_imm16:2; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] s>> Imm_shr_imm16:2; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] s>> Imm_shr_imm16:2; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] s>> Imm_shr_imm16:2; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] s>> Imm_shr_imm16:2; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] s>> Imm_shr_imm16:2; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] s>> Imm_shr_imm16:2; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] s>> Imm_shr_imm16:2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.318 SSRA page C7-2741 line 159921 MATCH x0f001400/mask=xbf80fc00 # CONSTRUCT x4f081400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:1 $s>>@1 &=$+@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@1 # AUNIT --inst x4f081400/mask=xfff8fc00 --status pass :ssra Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix TMPQ1 = Rn_VPR128.16B s>> Imm_shr_imm8:1 on lane size 1 TMPQ1[0,8] = Rn_VPR128.16B[0,8] s>> Imm_shr_imm8:1; TMPQ1[8,8] = Rn_VPR128.16B[8,8] s>> Imm_shr_imm8:1; TMPQ1[16,8] = Rn_VPR128.16B[16,8] s>> Imm_shr_imm8:1; TMPQ1[24,8] = Rn_VPR128.16B[24,8] s>> Imm_shr_imm8:1; TMPQ1[32,8] = Rn_VPR128.16B[32,8] s>> Imm_shr_imm8:1; TMPQ1[40,8] = Rn_VPR128.16B[40,8] s>> Imm_shr_imm8:1; TMPQ1[48,8] = Rn_VPR128.16B[48,8] s>> Imm_shr_imm8:1; TMPQ1[56,8] = Rn_VPR128.16B[56,8] s>> Imm_shr_imm8:1; TMPQ1[64,8] = Rn_VPR128.16B[64,8] s>> Imm_shr_imm8:1; TMPQ1[72,8] = Rn_VPR128.16B[72,8] s>> Imm_shr_imm8:1; TMPQ1[80,8] = Rn_VPR128.16B[80,8] s>> Imm_shr_imm8:1; TMPQ1[88,8] = Rn_VPR128.16B[88,8] s>> Imm_shr_imm8:1; TMPQ1[96,8] = Rn_VPR128.16B[96,8] s>> Imm_shr_imm8:1; TMPQ1[104,8] = Rn_VPR128.16B[104,8] s>> Imm_shr_imm8:1; TMPQ1[112,8] = Rn_VPR128.16B[112,8] s>> Imm_shr_imm8:1; TMPQ1[120,8] = Rn_VPR128.16B[120,8] s>> Imm_shr_imm8:1; # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.318 SSRA page C7-2741 line 159921 MATCH x0f001400/mask=xbf80fc00 # CONSTRUCT x4f401400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 $s>>@8 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@8 # AUNIT --inst x4f401400/mask=xffc0fc00 --status pass :ssra Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x2 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local tmp1:8 = zext(Imm_shr_imm64); # simd infix TMPQ1 = Rn_VPR128.2D s>> tmp1 on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] s>> tmp1; TMPQ1[64,64] = Rn_VPR128.2D[64,64] s>> tmp1; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.318 SSRA page C7-2741 line 159921 MATCH x0f001400/mask=xbf80fc00 # CONSTRUCT x0f201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:4 $s>>@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@4 # AUNIT --inst x0f201400/mask=xffe0fc00 --status pass :ssra Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local tmp1:4 = Imm_shr_imm32; # simd infix TMPD1 = Rn_VPR64.2S s>> tmp1 on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] s>> tmp1; TMPD1[32,32] = Rn_VPR64.2S[32,32] s>> tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.318 SSRA page C7-2741 line 159921 MATCH x0f001400/mask=xbf80fc00 # CONSTRUCT x0f101400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 $s>>@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@2 # AUNIT --inst x0f101400/mask=xfff0fc00 --status pass :ssra Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd infix TMPD1 = Rn_VPR64.4H s>> Imm_shr_imm16:2 on lane size 2 TMPD1[0,16] = Rn_VPR64.4H[0,16] s>> Imm_shr_imm16:2; TMPD1[16,16] = Rn_VPR64.4H[16,16] s>> Imm_shr_imm16:2; TMPD1[32,16] = Rn_VPR64.4H[32,16] s>> Imm_shr_imm16:2; TMPD1[48,16] = Rn_VPR64.4H[48,16] s>> Imm_shr_imm16:2; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.318 SSRA page C7-2741 line 159921 MATCH x0f001400/mask=xbf80fc00 # CONSTRUCT x4f201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:4 $s>>@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@4 # AUNIT --inst x4f201400/mask=xffe0fc00 --status pass :ssra Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local tmp1:4 = Imm_shr_imm32; # simd infix TMPQ1 = Rn_VPR128.4S s>> tmp1 on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] s>> tmp1; TMPQ1[32,32] = Rn_VPR128.4S[32,32] s>> tmp1; TMPQ1[64,32] = Rn_VPR128.4S[64,32] s>> tmp1; TMPQ1[96,32] = Rn_VPR128.4S[96,32] s>> tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.318 SSRA page C7-2741 line 159921 MATCH x0f001400/mask=xbf80fc00 # CONSTRUCT x0f081400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:1 $s>>@1 &=$+@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@1 # AUNIT --inst x0f081400/mask=xfff8fc00 --status pass :ssra Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix TMPD1 = Rn_VPR64.8B s>> Imm_shr_imm8:1 on lane size 1 TMPD1[0,8] = Rn_VPR64.8B[0,8] s>> Imm_shr_imm8:1; TMPD1[8,8] = Rn_VPR64.8B[8,8] s>> Imm_shr_imm8:1; TMPD1[16,8] = Rn_VPR64.8B[16,8] s>> Imm_shr_imm8:1; TMPD1[24,8] = Rn_VPR64.8B[24,8] s>> Imm_shr_imm8:1; TMPD1[32,8] = Rn_VPR64.8B[32,8] s>> Imm_shr_imm8:1; TMPD1[40,8] = Rn_VPR64.8B[40,8] s>> Imm_shr_imm8:1; TMPD1[48,8] = Rn_VPR64.8B[48,8] s>> Imm_shr_imm8:1; TMPD1[56,8] = Rn_VPR64.8B[56,8] s>> Imm_shr_imm8:1; # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.318 SSRA page C7-2741 line 159921 MATCH x0f001400/mask=xbf80fc00 # CONSTRUCT x4f101400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 $s>>@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3@2 # AUNIT --inst x4f101400/mask=xfff0fc00 --status pass :ssra Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.8H s>> Imm_shr_imm16:2 on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] s>> Imm_shr_imm16:2; TMPQ1[16,16] = Rn_VPR128.8H[16,16] s>> Imm_shr_imm16:2; TMPQ1[32,16] = Rn_VPR128.8H[32,16] s>> Imm_shr_imm16:2; TMPQ1[48,16] = Rn_VPR128.8H[48,16] s>> Imm_shr_imm16:2; TMPQ1[64,16] = Rn_VPR128.8H[64,16] s>> Imm_shr_imm16:2; TMPQ1[80,16] = Rn_VPR128.8H[80,16] s>> Imm_shr_imm16:2; TMPQ1[96,16] = Rn_VPR128.8H[96,16] s>> Imm_shr_imm16:2; TMPQ1[112,16] = Rn_VPR128.8H[112,16] s>> Imm_shr_imm16:2; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.318 SSRA page C7-2741 line 159921 MATCH x5f001400/mask=xff80fc00 # CONSTRUCT x5f401400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 s>> &=+ # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ssra/3 # AUNIT --inst x5f401400/mask=xffc0fc00 --status pass :ssra Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b010111110 & b_22=1 & b_1015=0b000101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { local tmp1:8 = zext(Imm_shr_imm64); local tmp2:8 = Rn_FPR64 s>> tmp1; Rd_FPR64 = Rd_FPR64 + tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.319 SSUBL, SSUBL2 page C7-2744 line 160085 MATCH x0e202000/mask=xbf20fc00 # CONSTRUCT x4ea02000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@4:16 ARG3[1]:8 $sext@4:16 =$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubl2/2@4 # AUNIT --inst x4ea02000/mask=xffe0fc00 --status pass --comment "ext" :ssubl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x2 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = sext(TMPD3[0,32]); TMPQ4[64,64] = sext(TMPD3[32,32]); # simd infix Rd_VPR128.2D = TMPQ2 - TMPQ4 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; Rd_VPR128.2D[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.319 SSUBL, SSUBL2 page C7-2744 line 160085 MATCH x0e202000/mask=xbf20fc00 # CONSTRUCT x4e602000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@2:16 ARG3[1]:8 $sext@2:16 =$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubl2/2@2 # AUNIT --inst x4e602000/mask=xffe0fc00 --status pass --comment "ext" :ssubl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x2 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = sext(TMPD3[0,16]); TMPQ4[32,32] = sext(TMPD3[16,16]); TMPQ4[64,32] = sext(TMPD3[32,16]); TMPQ4[96,32] = sext(TMPD3[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 - TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; Rd_VPR128.4S[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; Rd_VPR128.4S[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; Rd_VPR128.4S[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.319 SSUBL, SSUBL2 page C7-2744 line 160085 MATCH x0e202000/mask=xbf20fc00 # CONSTRUCT x4e202000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $sext@1:16 ARG3[1]:8 $sext@1:16 =$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubl2/2@1 # AUNIT --inst x4e202000/mask=xffe0fc00 --status pass --comment "ext" :ssubl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x2 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = sext(TMPD1[0,8]); TMPQ2[16,16] = sext(TMPD1[8,8]); TMPQ2[32,16] = sext(TMPD1[16,8]); TMPQ2[48,16] = sext(TMPD1[24,8]); TMPQ2[64,16] = sext(TMPD1[32,8]); TMPQ2[80,16] = sext(TMPD1[40,8]); TMPQ2[96,16] = sext(TMPD1[48,8]); TMPQ2[112,16] = sext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = sext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = sext(TMPD3[0,8]); TMPQ4[16,16] = sext(TMPD3[8,8]); TMPQ4[32,16] = sext(TMPD3[16,8]); TMPQ4[48,16] = sext(TMPD3[24,8]); TMPQ4[64,16] = sext(TMPD3[32,8]); TMPQ4[80,16] = sext(TMPD3[40,8]); TMPQ4[96,16] = sext(TMPD3[48,8]); TMPQ4[112,16] = sext(TMPD3[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 - TMPQ4 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; Rd_VPR128.8H[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; Rd_VPR128.8H[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; Rd_VPR128.8H[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; Rd_VPR128.8H[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; Rd_VPR128.8H[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; Rd_VPR128.8H[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; Rd_VPR128.8H[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.319 SSUBL, SSUBL2 page C7-2744 line 160085 MATCH x0e202000/mask=xbf20fc00 # CONSTRUCT x0ea02000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@4:16 ARG3 $sext@4:16 =$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubl/2@4 # AUNIT --inst x0ea02000/mask=xffe0fc00 --status pass --comment "ext" :ssubl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x2 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = sext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = sext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = TMPQ1 - TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; Rd_VPR128.2D[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.319 SSUBL, SSUBL2 page C7-2744 line 160085 MATCH x0e202000/mask=xbf20fc00 # CONSTRUCT x0e602000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@2:16 ARG3 $sext@2:16 =$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubl/2@2 # AUNIT --inst x0e602000/mask=xffe0fc00 --status pass --comment "ext" :ssubl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x2 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = sext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = sext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = sext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = sext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = TMPQ1 - TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; Rd_VPR128.4S[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; Rd_VPR128.4S[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; Rd_VPR128.4S[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.319 SSUBL, SSUBL2 page C7-2744 line 160085 MATCH x0e202000/mask=xbf20fc00 # CONSTRUCT x0e202000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $sext@1:16 ARG3 $sext@1:16 =$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubl/2@1 # AUNIT --inst x0e202000/mask=xffe0fc00 --status pass --comment "ext" :ssubl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x2 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = sext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = sext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = sext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = sext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = sext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = sext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = sext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = sext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = sext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = sext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = sext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = sext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = sext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = sext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = sext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = sext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = sext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = TMPQ1 - TMPQ2 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; Rd_VPR128.8H[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; Rd_VPR128.8H[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; Rd_VPR128.8H[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; Rd_VPR128.8H[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; Rd_VPR128.8H[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; Rd_VPR128.8H[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; Rd_VPR128.8H[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.320 SSUBW, SSUBW2 page C7-2746 line 160208 MATCH x0e203000/mask=xbf20fc00 # CONSTRUCT x4ea03000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3[1]:8 $sext@4:16 =$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubw2/2@4 # AUNIT --inst x4ea03000/mask=xffe0fc00 --status pass --comment "ext" :ssubw2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x3 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Rm_VPR128 & Zd { TMPD1 = Rm_VPR128.4S[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = sext(TMPD1[0,32]); TMPQ2[64,64] = sext(TMPD1[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D - TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - TMPQ2[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.320 SSUBW, SSUBW2 page C7-2746 line 160208 MATCH x0e203000/mask=xbf20fc00 # CONSTRUCT x4e603000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3[1]:8 $sext@2:16 =$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubw2/2@2 # AUNIT --inst x4e603000/mask=xffe0fc00 --status pass --comment "ext" :ssubw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x3 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { TMPD1 = Rm_VPR128.8H[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = sext(TMPD1[0,16]); TMPQ2[32,32] = sext(TMPD1[16,16]); TMPQ2[64,32] = sext(TMPD1[32,16]); TMPQ2[96,32] = sext(TMPD1[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S - TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - TMPQ2[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - TMPQ2[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - TMPQ2[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.320 SSUBW, SSUBW2 page C7-2746 line 160208 MATCH x0e203000/mask=xbf20fc00 # CONSTRUCT x4e203000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3[1]:8 $sext@1:16 =$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubw2/2@1 # AUNIT --inst x4e203000/mask=xffe0fc00 --status pass --comment "ext" :ssubw2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x3 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { TMPD1 = Rm_VPR128.16B[64,64]; # simd resize TMPQ2 = sext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = sext(TMPD1[0,8]); TMPQ2[16,16] = sext(TMPD1[8,8]); TMPQ2[32,16] = sext(TMPD1[16,8]); TMPQ2[48,16] = sext(TMPD1[24,8]); TMPQ2[64,16] = sext(TMPD1[32,8]); TMPQ2[80,16] = sext(TMPD1[40,8]); TMPQ2[96,16] = sext(TMPD1[48,8]); TMPQ2[112,16] = sext(TMPD1[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H - TMPQ2 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - TMPQ2[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - TMPQ2[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - TMPQ2[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - TMPQ2[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - TMPQ2[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - TMPQ2[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - TMPQ2[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.320 SSUBW, SSUBW2 page C7-2746 line 160208 MATCH x0e203000/mask=xbf20fc00 # CONSTRUCT x0ea03000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $sext@4:16 =$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubw/2@4 # AUNIT --inst x0ea03000/mask=xffe0fc00 --status pass --comment "ext" :ssubw Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x3 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = sext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = sext(Rm_VPR64.2S[0,32]); TMPQ1[64,64] = sext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D - TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.320 SSUBW, SSUBW2 page C7-2746 line 160208 MATCH x0e203000/mask=xbf20fc00 # CONSTRUCT x0e603000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $sext@2:16 =$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubw/2@2 # AUNIT --inst x0e603000/mask=xffe0fc00 --status pass --comment "ext" :ssubw Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x3 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = sext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = sext(Rm_VPR64.4H[0,16]); TMPQ1[32,32] = sext(Rm_VPR64.4H[16,16]); TMPQ1[64,32] = sext(Rm_VPR64.4H[32,16]); TMPQ1[96,32] = sext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S - TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.320 SSUBW, SSUBW2 page C7-2746 line 160208 MATCH x0e203000/mask=xbf20fc00 # CONSTRUCT x0e203000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $sext@1:16 =$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ssubw/2@1 # AUNIT --inst x0e203000/mask=xffe0fc00 --status pass --comment "ext" :ssubw Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x3 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = sext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = sext(Rm_VPR64.8B[0,8]); TMPQ1[16,16] = sext(Rm_VPR64.8B[8,8]); TMPQ1[32,16] = sext(Rm_VPR64.8B[16,8]); TMPQ1[48,16] = sext(Rm_VPR64.8B[24,8]); TMPQ1[64,16] = sext(Rm_VPR64.8B[32,8]); TMPQ1[80,16] = sext(Rm_VPR64.8B[40,8]); TMPQ1[96,16] = sext(Rm_VPR64.8B[48,8]); TMPQ1[112,16] = sext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H - TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.329 STNP (SIMD&FP) page C7-2777 line 162166 MATCH x2c000000/mask=x3fc00000 # CONSTRUCT x2c000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 =store pop ARG2 ARG3 4 +:8 =store pop # SMACRO(pseudo) null ARG1 ARG3 =NEON_stnp1/2 null ARG2 ARG3 =NEON_stnp2/2 # AUNIT --inst x2c000000/mask=xffc00000 --status nomem :stnp Rt_FPR32, Rt2_FPR32, addrPairIndexed is b_3031=0b00 & b_2229=0b10110000 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 { * addrPairIndexed = Rt_FPR32; local tmp1:8 = addrPairIndexed + 4; * tmp1 = Rt2_FPR32; } # C7.2.329 STNP (SIMD&FP) page C7-2777 line 162166 MATCH x2c000000/mask=x3fc00000 # CONSTRUCT x6c000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 =store pop ARG2 ARG3 8 +:8 =store pop # SMACRO(pseudo) null ARG1 ARG3 =NEON_stnp1/2 null ARG2 ARG3 =NEON_stnp2/2 # AUNIT --inst x6c000000/mask=xffc00000 --status nomem :stnp Rt_FPR64, Rt2_FPR64, addrPairIndexed is b_3031=0b01 & b_2229=0b10110000 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 { * addrPairIndexed = Rt_FPR64; local tmp1:8 = addrPairIndexed + 8; * tmp1 = Rt2_FPR64; } # C7.2.329 STNP (SIMD&FP) page C7-2777 line 162166 MATCH x2c000000/mask=x3fc00000 # CONSTRUCT xac000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 =store pop ARG2 ARG3 16 +:8 =store pop # SMACRO(pseudo) null ARG1 ARG3 =NEON_stnp1/2 null ARG2 ARG3 =NEON_stnp2/2 # AUNIT --inst xac000000/mask=xffc00000 --status nomem :stnp Rt_FPR128, Rt2_FPR128, addrPairIndexed is b_3031=0b10 & b_2229=0b10110000 & Rt2_FPR64 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 { * addrPairIndexed = Rt_FPR128; local tmp1:8 = addrPairIndexed + 16; * tmp1 = Rt2_FPR128; } # C7.2.330 STP (SIMD&FP) page C7-2779 line 162288 MATCH x2c800000/mask=x3fc00000 # C7.2.330 STP (SIMD&FP) page C7-2779 line 162288 MATCH x2d800000/mask=x3fc00000 # C7.2.330 STP (SIMD&FP) page C7-2779 line 162288 MATCH x2d000000/mask=x3fc00000 # C7.2.329 STNP (SIMD&FP) page C7-2777 line 162166 MATCH x2c000000/mask=x3fc00000 # CONSTRUCT xac000000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 =store pop ARG2 ARG3 16 +:8 =store pop # SMACRO(pseudo) null ARG1 ARG3 =NEON_stp1/2 null ARG2 ARG3 =NEON_stp2/2 # AUNIT --inst xac000000/mask=xfe400000 --status nomem # 128-bit variant (post-index, pre-index, and signed offset) :stp Rt_FPR128, Rt2_FPR128, addrPairIndexed is b_3031=0b10 & b_2529=0b10110 & b_22=0 & Rt2_FPR128 & addrPairIndexed & Rt_FPR128 { * addrPairIndexed = Rt_FPR128; local tmp1:8 = addrPairIndexed + 16; * tmp1 = Rt2_FPR128; } # C7.2.330 STP (SIMD&FP) page C7-2779 line 162288 MATCH x2c800000/mask=x3fc00000 # C7.2.330 STP (SIMD&FP) page C7-2779 line 162288 MATCH x2d800000/mask=x3fc00000 # C7.2.330 STP (SIMD&FP) page C7-2779 line 162288 MATCH x2d000000/mask=x3fc00000 # C7.2.329 STNP (SIMD&FP) page C7-2777 line 162166 MATCH x2c000000/mask=x3fc00000 # CONSTRUCT x2c000000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 =store pop ARG2 ARG3 4 +:8 =store pop # SMACRO(pseudo) null ARG1 ARG3 =NEON_stp1/2 null ARG2 ARG3 =NEON_stp2/2 # AUNIT --inst x2c000000/mask=xfe400000 --status nomem # 32-bit variant (post-index, pre-index, and signed offset) :stp Rt_FPR32, Rt2_FPR32, addrPairIndexed is b_3031=0b00 & b_2529=0b10110 & b_22=0 & Rt2_FPR32 & addrPairIndexed & Rt_FPR32 { * addrPairIndexed = Rt_FPR32; local tmp1:8 = addrPairIndexed + 4; * tmp1 = Rt2_FPR32; } # C7.2.330 STP (SIMD&FP) page C7-2779 line 162288 MATCH x2c800000/mask=x3fc00000 # C7.2.330 STP (SIMD&FP) page C7-2779 line 162288 MATCH x2d800000/mask=x3fc00000 # C7.2.330 STP (SIMD&FP) page C7-2779 line 162288 MATCH x2d000000/mask=x3fc00000 # C7.2.329 STNP (SIMD&FP) page C7-2777 line 162166 MATCH x2c000000/mask=x3fc00000 # CONSTRUCT x6c000000/mask=xfe400000 MATCHED 4 DOCUMENTED OPCODES # SMACRO ARG1 ARG3 =store pop ARG2 ARG3 8 +:8 =store pop # SMACRO(pseudo) null ARG1 ARG3 =NEON_stp1/2 null ARG2 ARG3 =NEON_stp2/2 # AUNIT --inst x6c000000/mask=xfe400000 --status nomem # 64-bit variant (post-index, pre-index, and signed offset) :stp Rt_FPR64, Rt2_FPR64, addrPairIndexed is b_3031=0b01 & b_2529=0b10110 & b_22=0 & Rt2_FPR64 & addrPairIndexed & Rt_FPR64 { * addrPairIndexed = Rt_FPR64; local tmp1:8 = addrPairIndexed + 8; * tmp1 = Rt2_FPR64; } # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3c000400/mask=x3f600c00 # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3c000c00/mask=x3f600c00 # CONSTRUCT x3c000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 # AUNIT --inst x3c000400/mask=xffe00400 --status nomem # Post- and Pre-offset 8-bit variant when size == 00 && opc == 00 F=FPR8 :str Rt_FPR8, addrIndexed is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR8 & addrIndexed & Zt { * addrIndexed = Rt_FPR8; } # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3c000400/mask=x3f600c00 # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3c000c00/mask=x3f600c00 # CONSTRUCT x7c000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 # AUNIT --inst x7c000400/mask=xffe00400 --status nomem # Post- and Pre-offset 16-bit variant when size == 01 && opc == 00 F=FPR16 :str Rt_FPR16, addrIndexed is b_3031=0b01 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR16 & addrIndexed & Zt { * addrIndexed = Rt_FPR16; } # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3c000400/mask=x3f600c00 # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3c000c00/mask=x3f600c00 # CONSTRUCT xbc000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 # AUNIT --inst xbc000400/mask=xffe00400 --status nomem # Post- and Pre-offset 32-bit variant when size == 10 && opc == 00 F=FPR32 :str Rt_FPR32, addrIndexed is b_3031=0b10 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR32 & addrIndexed & Zt { * addrIndexed = Rt_FPR32; } # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3c000400/mask=x3f600c00 # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3c000c00/mask=x3f600c00 # CONSTRUCT xfc000400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 # AUNIT --inst xfc000400/mask=xffe00400 --status nomem # Post- and Pre-offset 64-bit variant when size == 11 && opc == 00 F=FPR64 :str Rt_FPR64, addrIndexed is b_3031=0b11 & b_2429=0b111100 & b_2223=0b00 & b_21=0 & b_10=1 & Rt_FPR64 & addrIndexed & Zt { * addrIndexed = Rt_FPR64; } # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3c000400/mask=x3f600c00 # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3c000c00/mask=x3f600c00 # CONSTRUCT x3c800400/mask=xffe00400 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 # AUNIT --inst x3c800400/mask=xffe00400 --status nomem # Post- and Pre-offset 128-bit variant when size == 00 && opc == 10 F=FPR128 :str Rt_FPR128, addrIndexed is b_3031=0b00 & b_2429=0b111100 & b_2223=0b10 & b_21=0 & b_10=1 & Rt_FPR128 & addrIndexed & Zt { * addrIndexed = Rt_FPR128; } # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3d000000/mask=x3f400000 # CONSTRUCT x3d000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 # AUNIT --inst x3d000000/mask=xffc00000 --status nomem # Unsigned offset 8-bit variant when size == 00 && opc == 00 F=FPR8 :str Rt_FPR8, addrUIMM is b_3031=0b00 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR8 & addrUIMM & Zt { * addrUIMM = Rt_FPR8; } # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3d000000/mask=x3f400000 # CONSTRUCT x7d000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 # AUNIT --inst x7d000000/mask=xffc00000 --status nomem # Unsigned offset 16-bit variant when size == 01 && opc == 00 F=FPR16 :str Rt_FPR16, addrUIMM is b_3031=0b01 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR16 & addrUIMM & Zt { * addrUIMM = Rt_FPR16; } # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3d000000/mask=x3f400000 # CONSTRUCT xbd000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 # AUNIT --inst xbd000000/mask=xffc00000 --status nomem # Unsigned offset 32-bit variant when size == 10 && opc == 00 F=FPR32 :str Rt_FPR32, addrUIMM is b_3031=0b10 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR32 & addrUIMM & Zt { * addrUIMM = Rt_FPR32; } # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3d000000/mask=x3f400000 # CONSTRUCT xfd000000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 # AUNIT --inst xfd000000/mask=xffc00000 --status nomem # Unsigned offset 64-bit variant when size == 11 && opc == 00 F=FPR64 :str Rt_FPR64, addrUIMM is b_3031=0b11 & b_2429=0b111101 & b_2223=0b00 & Rt_FPR64 & addrUIMM & Zt { * addrUIMM = Rt_FPR64; } # C7.2.331 STR (immediate, SIMD&FP) page C7-2782 line 162501 MATCH x3d000000/mask=x3f400000 # CONSTRUCT x3d800000/mask=xffc00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_str/2 # AUNIT --inst x3d800000/mask=xffc00000 --status nomem # Unsigned offset 128-bit variant when size == 00 && opc == 10 F=FPR128 :str Rt_FPR128, addrUIMM is b_3031=0b00 & b_2429=0b111101 & b_2223=0b10 & Rt_FPR128 & addrUIMM & Zt { * addrUIMM = Rt_FPR128; } # C7.2.332 STR (register, SIMD&FP) page C7-2786 line 162762 MATCH x3c200800/mask=x3f600c00 # CONSTRUCT x3c200800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop # SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 # AUNIT --inst x3c200800/mask=xffe02c00 --status nomem # 8-fsreg,STR-8-fsreg variant when size == 00 && opc == 00 && option is not 011 bb=b_13 option=0 F=FPR8 G=GPR32 :str Rt_FPR8, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR8; } # C7.2.332 STR (register, SIMD&FP) page C7-2786 line 162762 MATCH x3c200800/mask=x3f600c00 # CONSTRUCT x3c202800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop # SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 # AUNIT --inst x3c202800/mask=xffe02c00 --status nomem # 8-fsreg,STR-8-fsreg variant when size == 00 && opc == 00 && option is not 011 bb=b_13 option=1 F=FPR8 G=GPR64 :str Rt_FPR8, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR8; } # C7.2.332 STR (register, SIMD&FP) page C7-2786 line 162762 MATCH x3c200800/mask=x3f600c00 # CONSTRUCT x3c206800/mask=xffe0ec00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop # SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 # AUNIT --inst x3c206800/mask=xffe0ec00 --status nomem # 8-fsreg,STR-8-fsreg variant when size == 00 && opc == 00 && option is 011 bb=b_1315 option=0b011 F=FPR8 G=GPR64 :str Rt_FPR8, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_1315=0b011 & b_1011=0b10 & Rt_FPR8 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR8; } # C7.2.332 STR (register, SIMD&FP) page C7-2786 line 162762 MATCH x3c200800/mask=x3f600c00 # CONSTRUCT x7c200800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop # SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 # AUNIT --inst x7c200800/mask=xffe02c00 --status nomem # 16-fsreg,STR-16-fsreg variant when size == 01 && opc == 00 bb=b_13 option=0 F=FPR16 G=GPR32 :str Rt_FPR16, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b01 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR16 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR16; } # C7.2.332 STR (register, SIMD&FP) page C7-2786 line 162762 MATCH x3c200800/mask=x3f600c00 # CONSTRUCT x7c202800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop # SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 # AUNIT --inst x7c202800/mask=xffe02c00 --status nomem # 16-fsreg,STR-16-fsreg variant when size == 01 && opc == 00 bb=b_13 option=1 F=FPR16 G=GPR64 :str Rt_FPR16, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b01 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR16 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR16; } # C7.2.332 STR (register, SIMD&FP) page C7-2786 line 162762 MATCH x3c200800/mask=x3f600c00 # CONSTRUCT xbc200800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop # SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 # AUNIT --inst xbc200800/mask=xffe02c00 --status nomem # 32-fsreg,STR-32-fsreg variant when size == 10 && opc == 00 bb=b_13 option=0 F=FPR32 G=GPR32 :str Rt_FPR32, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b10 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR32 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR32; } # C7.2.332 STR (register, SIMD&FP) page C7-2786 line 162762 MATCH x3c200800/mask=x3f600c00 # CONSTRUCT xbc202800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop # SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 # AUNIT --inst xbc202800/mask=xffe02c00 --status nomem # 32-fsreg,STR-32-fsreg variant when size == 10 && opc == 00 bb=b_13 option=1 F=FPR32 G=GPR64 :str Rt_FPR32, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b10 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR32 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR32; } # C7.2.332 STR (register, SIMD&FP) page C7-2786 line 162762 MATCH x3c200800/mask=x3f600c00 # CONSTRUCT xfc200800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop # SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 # AUNIT --inst xfc200800/mask=xffe02c00 --status nomem # 64-fsreg,STR-64-fsreg variant when size == 11 && opc == 00 bb=b_13 option=0 F=FPR64 G=GPR32 :str Rt_FPR64, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b11 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR64 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR64; } # C7.2.332 STR (register, SIMD&FP) page C7-2786 line 162762 MATCH x3c200800/mask=x3f600c00 # CONSTRUCT xfc202800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop # SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 # AUNIT --inst xfc202800/mask=xffe02c00 --status nomem # 64-fsreg,STR-64-fsreg variant when size == 11 && opc == 00 bb=b_13 option=1 F=FPR64 G=GPR64 :str Rt_FPR64, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b11 & b_2429=0b111100 & b_2223=0b00 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR64 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR64; } # C7.2.332 STR (register, SIMD&FP) page C7-2786 line 162762 MATCH x3c200800/mask=x3f600c00 # CONSTRUCT x3ca00800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop # SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 # AUNIT --inst x3ca00800/mask=xffe02c00 --status nomem # 128-fsreg,STR-128-fsreg variant when size == 00 && opc == 10 bb=b_13 option=0 F=FPR128 G=GPR32 :str Rt_FPR128, [Rn_GPR64xsp, Rm_GPR32^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b10 & b_21=1 & b_13=0 & b_1011=0b10 & Rt_FPR128 & Rn_GPR64xsp & Rm_GPR32 & extend_spec & extend_amount & Zd { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR128; } # C7.2.332 STR (register, SIMD&FP) page C7-2786 line 162762 MATCH x3c200800/mask=x3f600c00 # CONSTRUCT x3ca02800/mask=xffe02c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 Rn_GPR64xsp extend_spec extend_amount <<:8 + =store pop # SMACRO(pseudo) null ARG1 Rn_GPR64xsp extend_spec extend_amount =NEON_str/4 # AUNIT --inst x3ca02800/mask=xffe02c00 --status nomem # 128-fsreg,STR-128-fsreg variant when size == 00 && opc == 10 bb=b_13 option=1 F=FPR128 G=GPR64 :str Rt_FPR128, [Rn_GPR64xsp, Rm_GPR64^extend_spec^extend_amount] is b_3031=0b00 & b_2429=0b111100 & b_2223=0b10 & b_21=1 & b_13=1 & b_1011=0b10 & Rt_FPR128 & Rn_GPR64xsp & Rm_GPR64 & extend_spec & extend_amount & Zd { local tmp1:8 = extend_spec << extend_amount; local tmp2:8 = Rn_GPR64xsp + tmp1; * tmp2 = Rt_FPR128; } # C7.2.333 STUR (SIMD&FP) page C7-2789 line 162949 MATCH x3c000000/mask=x3f600c00 # CONSTRUCT x3c800000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_stur/2 # AUNIT --inst x3c800000/mask=xffe00c00 --status nomem :stur Rt_FPR128, addrIndexed is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=1 & b_2222=0 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR128 { * addrIndexed = Rt_FPR128; } # C7.2.333 STUR (SIMD&FP) page C7-2789 line 162949 MATCH x3c000000/mask=x3f600c00 # CONSTRUCT x7c000000/mask=xffc00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_stur/2 # AUNIT --inst x7c000000/mask=xffc00c00 --status nomem :stur Rt_FPR16, addrIndexed is size.ldstr=1 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_1011=0 & addrIndexed & Rt_FPR16 { * addrIndexed = Rt_FPR16; } # C7.2.333 STUR (SIMD&FP) page C7-2789 line 162949 MATCH x3c000000/mask=x3f600c00 # CONSTRUCT xbc000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_stur/2 # AUNIT --inst xbc000000/mask=xffe00c00 --status nomem :stur Rt_FPR32, addrIndexed is size.ldstr=2 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR32 { * addrIndexed = Rt_FPR32; } # C7.2.333 STUR (SIMD&FP) page C7-2789 line 162949 MATCH x3c000000/mask=x3f600c00 # CONSTRUCT xfc000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_stur/2 # AUNIT --inst xfc000000/mask=xffe00c00 --status nomem :stur Rt_FPR64, addrIndexed is size.ldstr=3 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR64 { * addrIndexed = Rt_FPR64; } # C7.2.333 STUR (SIMD&FP) page C7-2789 line 162949 MATCH x3c000000/mask=x3f600c00 # CONSTRUCT x3c000000/mask=xffe00c00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =store pop # SMACRO(pseudo) null ARG1 ARG2 =NEON_stur/2 # AUNIT --inst x3c000000/mask=xffe00c00 --status nomem :stur Rt_FPR8, addrIndexed is size.ldstr=0 & b_2729=7 & v=1 & b_2425=0 & b_23=0 & b_2222=0 & b_2121=0 & b_1011=0 & addrIndexed & Rt_FPR8 { * addrIndexed = Rt_FPR8; } # C7.2.334 SUB (vector) page C7-2791 line 163076 MATCH x7e208400/mask=xff20fc00 # CONSTRUCT x7ee08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =- # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2 # AUNIT --inst x7ee08400/mask=xffe0fc00 --status pass :sub Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x10 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = Rn_FPR64 - Rm_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.334 SUB (vector) page C7-2791 line 163076 MATCH x2e208400/mask=xbf20fc00 # CONSTRUCT x6e208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$-@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@1 # AUNIT --inst x6e208400/mask=xffe0fc00 --status pass :sub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x10 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix Rd_VPR128.16B = Rn_VPR128.16B - Rm_VPR128.16B on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] - Rm_VPR128.16B[0,8]; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] - Rm_VPR128.16B[8,8]; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] - Rm_VPR128.16B[16,8]; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] - Rm_VPR128.16B[24,8]; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] - Rm_VPR128.16B[32,8]; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] - Rm_VPR128.16B[40,8]; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] - Rm_VPR128.16B[48,8]; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] - Rm_VPR128.16B[56,8]; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] - Rm_VPR128.16B[64,8]; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] - Rm_VPR128.16B[72,8]; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] - Rm_VPR128.16B[80,8]; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] - Rm_VPR128.16B[88,8]; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] - Rm_VPR128.16B[96,8]; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] - Rm_VPR128.16B[104,8]; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] - Rm_VPR128.16B[112,8]; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] - Rm_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.334 SUB (vector) page C7-2791 line 163076 MATCH x2e208400/mask=xbf20fc00 # CONSTRUCT x6ee08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@8 # AUNIT --inst x6ee08400/mask=xffe0fc00 --status pass :sub Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x10 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd infix Rd_VPR128.2D = Rn_VPR128.2D - Rm_VPR128.2D on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - Rm_VPR128.2D[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - Rm_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.334 SUB (vector) page C7-2791 line 163076 MATCH x2e208400/mask=xbf20fc00 # CONSTRUCT x2ea08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@4 # AUNIT --inst x2ea08400/mask=xffe0fc00 --status pass :sub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x10 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix Rd_VPR64.2S = Rn_VPR64.2S - Rm_VPR64.2S on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] - Rm_VPR64.2S[0,32]; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] - Rm_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.334 SUB (vector) page C7-2791 line 163076 MATCH x2e208400/mask=xbf20fc00 # CONSTRUCT x2e608400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@2 # AUNIT --inst x2e608400/mask=xffe0fc00 --status pass :sub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x10 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd infix Rd_VPR64.4H = Rn_VPR64.4H - Rm_VPR64.4H on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] - Rm_VPR64.4H[0,16]; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] - Rm_VPR64.4H[16,16]; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] - Rm_VPR64.4H[32,16]; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] - Rm_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.334 SUB (vector) page C7-2791 line 163076 MATCH x2e208400/mask=xbf20fc00 # CONSTRUCT x6ea08400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@4 # AUNIT --inst x6ea08400/mask=xffe0fc00 --status pass :sub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x10 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd infix Rd_VPR128.4S = Rn_VPR128.4S - Rm_VPR128.4S on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - Rm_VPR128.4S[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - Rm_VPR128.4S[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - Rm_VPR128.4S[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - Rm_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.334 SUB (vector) page C7-2791 line 163076 MATCH x2e208400/mask=xbf20fc00 # CONSTRUCT x2e208400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$-@1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@1 # AUNIT --inst x2e208400/mask=xffe0fc00 --status pass :sub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x10 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix Rd_VPR64.8B = Rn_VPR64.8B - Rm_VPR64.8B on lane size 1 Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] - Rm_VPR64.8B[0,8]; Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] - Rm_VPR64.8B[8,8]; Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] - Rm_VPR64.8B[16,8]; Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] - Rm_VPR64.8B[24,8]; Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] - Rm_VPR64.8B[32,8]; Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] - Rm_VPR64.8B[40,8]; Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] - Rm_VPR64.8B[48,8]; Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] - Rm_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.334 SUB (vector) page C7-2791 line 163076 MATCH x2e208400/mask=xbf20fc00 # CONSTRUCT x6e608400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@2 # AUNIT --inst x6e608400/mask=xffe0fc00 --status pass :sub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x10 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd infix Rd_VPR128.8H = Rn_VPR128.8H - Rm_VPR128.8H on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - Rm_VPR128.8H[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - Rm_VPR128.8H[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - Rm_VPR128.8H[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - Rm_VPR128.8H[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - Rm_VPR128.8H[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - Rm_VPR128.8H[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - Rm_VPR128.8H[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - Rm_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.335 SUBHN, SUBHN2 page C7-2793 line 163214 MATCH x0e206000/mask=xbf20fc00 # CONSTRUCT x4e206000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $-@2 &=$shuffle@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@2 # AUNIT --inst x4e206000/mask=xffe0fc00 --status pass :subhn2 Rd_VPR128.16B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x6 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { # simd infix TMPQ1 = Rn_VPR128.8H - Rm_VPR128.8H on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] - Rm_VPR128.8H[0,16]; TMPQ1[16,16] = Rn_VPR128.8H[16,16] - Rm_VPR128.8H[16,16]; TMPQ1[32,16] = Rn_VPR128.8H[32,16] - Rm_VPR128.8H[32,16]; TMPQ1[48,16] = Rn_VPR128.8H[48,16] - Rm_VPR128.8H[48,16]; TMPQ1[64,16] = Rn_VPR128.8H[64,16] - Rm_VPR128.8H[64,16]; TMPQ1[80,16] = Rn_VPR128.8H[80,16] - Rm_VPR128.8H[80,16]; TMPQ1[96,16] = Rn_VPR128.8H[96,16] - Rm_VPR128.8H[96,16]; TMPQ1[112,16] = Rn_VPR128.8H[112,16] - Rm_VPR128.8H[112,16]; # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15) lane size 1 Rd_VPR128.16B[64,8] = TMPQ1[8,8]; Rd_VPR128.16B[72,8] = TMPQ1[24,8]; Rd_VPR128.16B[80,8] = TMPQ1[40,8]; Rd_VPR128.16B[88,8] = TMPQ1[56,8]; Rd_VPR128.16B[96,8] = TMPQ1[72,8]; Rd_VPR128.16B[104,8] = TMPQ1[88,8]; Rd_VPR128.16B[112,8] = TMPQ1[104,8]; Rd_VPR128.16B[120,8] = TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.335 SUBHN, SUBHN2 page C7-2793 line 163214 MATCH x0e206000/mask=xbf20fc00 # CONSTRUCT x4ea06000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $-@8 &=$shuffle@1-2@3-3:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_sub/2@8 # AUNIT --inst x4ea06000/mask=xffe0fc00 --status pass :subhn2 Rd_VPR128.4S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x6 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { # simd infix TMPQ1 = Rn_VPR128.2D - Rm_VPR128.2D on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] - Rm_VPR128.2D[0,64]; TMPQ1[64,64] = Rn_VPR128.2D[64,64] - Rm_VPR128.2D[64,64]; # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-2@3-3) lane size 4 Rd_VPR128.4S[64,32] = TMPQ1[32,32]; Rd_VPR128.4S[96,32] = TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.335 SUBHN, SUBHN2 page C7-2793 line 163214 MATCH x0e206000/mask=xbf20fc00 # CONSTRUCT x4e606000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $-@4 &=$shuffle@1-4@3-5@5-6@7-7:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_subhn2/3@4 # AUNIT --inst x4e606000/mask=xffe0fc00 --status pass :subhn2 Rd_VPR128.8H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x6 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.4S - Rm_VPR128.4S on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] - Rm_VPR128.4S[0,32]; TMPQ1[32,32] = Rn_VPR128.4S[32,32] - Rm_VPR128.4S[32,32]; TMPQ1[64,32] = Rn_VPR128.4S[64,32] - Rm_VPR128.4S[64,32]; TMPQ1[96,32] = Rn_VPR128.4S[96,32] - Rm_VPR128.4S[96,32]; # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-4@3-5@5-6@7-7) lane size 2 Rd_VPR128.8H[64,16] = TMPQ1[16,16]; Rd_VPR128.8H[80,16] = TMPQ1[48,16]; Rd_VPR128.8H[96,16] = TMPQ1[80,16]; Rd_VPR128.8H[112,16] = TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.335 SUBHN, SUBHN2 page C7-2793 line 163214 MATCH x0e206000/mask=xbf20fc00 # CONSTRUCT x0ea06000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $-@8 &=$shuffle@1-0@3-1:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_subhn/3@8 # AUNIT --inst x0ea06000/mask=xffe0fc00 --status pass :subhn Rd_VPR64.2S, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.2D & b_1215=0x6 & b_1011=0 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { # simd infix TMPQ1 = Rn_VPR128.2D - Rm_VPR128.2D on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] - Rm_VPR128.2D[0,64]; TMPQ1[64,64] = Rn_VPR128.2D[64,64] - Rm_VPR128.2D[64,64]; # simd shuffle Rd_VPR64.2S = TMPQ1 (@1-0@3-1) lane size 4 Rd_VPR64.2S[0,32] = TMPQ1[32,32]; Rd_VPR64.2S[32,32] = TMPQ1[96,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.335 SUBHN, SUBHN2 page C7-2793 line 163214 MATCH x0e206000/mask=xbf20fc00 # CONSTRUCT x0e606000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $-@4 &=$shuffle@1-0@3-1@5-2@7-3:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_subhn/3@4 # AUNIT --inst x0e606000/mask=xffe0fc00 --status pass :subhn Rd_VPR64.4H, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.4S & b_1215=0x6 & b_1011=0 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { # simd infix TMPQ1 = Rn_VPR128.4S - Rm_VPR128.4S on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] - Rm_VPR128.4S[0,32]; TMPQ1[32,32] = Rn_VPR128.4S[32,32] - Rm_VPR128.4S[32,32]; TMPQ1[64,32] = Rn_VPR128.4S[64,32] - Rm_VPR128.4S[64,32]; TMPQ1[96,32] = Rn_VPR128.4S[96,32] - Rm_VPR128.4S[96,32]; # simd shuffle Rd_VPR64.4H = TMPQ1 (@1-0@3-1@5-2@7-3) lane size 2 Rd_VPR64.4H[0,16] = TMPQ1[16,16]; Rd_VPR64.4H[16,16] = TMPQ1[48,16]; Rd_VPR64.4H[32,16] = TMPQ1[80,16]; Rd_VPR64.4H[48,16] = TMPQ1[112,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.335 SUBHN, SUBHN2 page C7-2793 line 163214 MATCH x0e206000/mask=xbf20fc00 # CONSTRUCT x0e206000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $-@2 &=$shuffle@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_subhn/3@2 # AUNIT --inst x0e206000/mask=xffe0fc00 --status pass :subhn Rd_VPR64.8B, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.8H & b_1215=0x6 & b_1011=0 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { # simd infix TMPQ1 = Rn_VPR128.8H - Rm_VPR128.8H on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] - Rm_VPR128.8H[0,16]; TMPQ1[16,16] = Rn_VPR128.8H[16,16] - Rm_VPR128.8H[16,16]; TMPQ1[32,16] = Rn_VPR128.8H[32,16] - Rm_VPR128.8H[32,16]; TMPQ1[48,16] = Rn_VPR128.8H[48,16] - Rm_VPR128.8H[48,16]; TMPQ1[64,16] = Rn_VPR128.8H[64,16] - Rm_VPR128.8H[64,16]; TMPQ1[80,16] = Rn_VPR128.8H[80,16] - Rm_VPR128.8H[80,16]; TMPQ1[96,16] = Rn_VPR128.8H[96,16] - Rm_VPR128.8H[96,16]; TMPQ1[112,16] = Rn_VPR128.8H[112,16] - Rm_VPR128.8H[112,16]; # simd shuffle Rd_VPR64.8B = TMPQ1 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 1 Rd_VPR64.8B[0,8] = TMPQ1[8,8]; Rd_VPR64.8B[8,8] = TMPQ1[24,8]; Rd_VPR64.8B[16,8] = TMPQ1[40,8]; Rd_VPR64.8B[24,8] = TMPQ1[56,8]; Rd_VPR64.8B[32,8] = TMPQ1[72,8]; Rd_VPR64.8B[40,8] = TMPQ1[88,8]; Rd_VPR64.8B[48,8] = TMPQ1[104,8]; Rd_VPR64.8B[56,8] = TMPQ1[120,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.337 SUQADD page C7-2797 line 163431 MATCH x5e203800/mask=xff3ffc00 # CONSTRUCT x5e203800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=+ # SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2 # AUNIT --inst x5e203800/mask=xfffffc00 --status fail --comment "nointsat" # Scalar variant when size=00 Q=1 bb=1 V=FPR8 s2=2 :suqadd Rd_FPR8, Rn_FPR8 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_FPR8 & Rn_FPR8 & Zd { Rd_FPR8 = Rd_FPR8 + Rn_FPR8; zext_zb(Zd); # zero upper 31 bytes of Zd } # C7.2.337 SUQADD page C7-2797 line 163431 MATCH x5e203800/mask=xff3ffc00 # CONSTRUCT x5e603800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=+ # SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2 # AUNIT --inst x5e603800/mask=xfffffc00 --status fail --comment "nointsat" # Scalar variant when size=01 Q=1 bb=1 V=FPR16 s2=4 :suqadd Rd_FPR16, Rn_FPR16 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_FPR16 & Rn_FPR16 & Zd { Rd_FPR16 = Rd_FPR16 + Rn_FPR16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.337 SUQADD page C7-2797 line 163431 MATCH x5e203800/mask=xff3ffc00 # CONSTRUCT x5ea03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=+ # SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2 # AUNIT --inst x5ea03800/mask=xfffffc00 --status fail --comment "nointsat" # Scalar variant when size=10 Q=1 bb=1 V=FPR32 s2=8 :suqadd Rd_FPR32, Rn_FPR32 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_FPR32 & Rn_FPR32 & Zd { Rd_FPR32 = Rd_FPR32 + Rn_FPR32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.337 SUQADD page C7-2797 line 163431 MATCH x5e203800/mask=xff3ffc00 # CONSTRUCT x5ee03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=+ # SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2 # AUNIT --inst x5ee03800/mask=xfffffc00 --status fail --comment "nointsat" # Scalar variant when size=11 Q=1 bb=1 V=FPR64 s2=16 :suqadd Rd_FPR64, Rn_FPR64 is b_31=0 & b_30=1 & b_2429=0b011110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_FPR64 & Rn_FPR64 & Zd { Rd_FPR64 = Rd_FPR64 + Rn_FPR64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.337 SUQADD page C7-2797 line 163431 MATCH x0e203800/mask=xbf3ffc00 # CONSTRUCT x0e203800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$+@1 # SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@1 # AUNIT --inst x0e203800/mask=xfffffc00 --status fail --comment "nointsat" # Vector variant when size=00 Q=0 bb=0 V=VPR64.8B e1=1 s2=16 :suqadd Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { # simd infix Rd_VPR64.8B = Rd_VPR64.8B + Rn_VPR64.8B on lane size 1 Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + Rn_VPR64.8B[0,8]; Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + Rn_VPR64.8B[8,8]; Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + Rn_VPR64.8B[16,8]; Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + Rn_VPR64.8B[24,8]; Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + Rn_VPR64.8B[32,8]; Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + Rn_VPR64.8B[40,8]; Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + Rn_VPR64.8B[48,8]; Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + Rn_VPR64.8B[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.337 SUQADD page C7-2797 line 163431 MATCH x0e203800/mask=xbf3ffc00 # CONSTRUCT x4e203800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$+@1 # SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@1 # AUNIT --inst x4e203800/mask=xfffffc00 --status fail --comment "nointsat" # Vector variant when size=00 Q=1 bb=0 V=VPR128.16B e1=1 s2=32 :suqadd Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { # simd infix Rd_VPR128.16B = Rd_VPR128.16B + Rn_VPR128.16B on lane size 1 Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + Rn_VPR128.16B[0,8]; Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + Rn_VPR128.16B[8,8]; Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + Rn_VPR128.16B[16,8]; Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + Rn_VPR128.16B[24,8]; Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + Rn_VPR128.16B[32,8]; Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + Rn_VPR128.16B[40,8]; Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + Rn_VPR128.16B[48,8]; Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + Rn_VPR128.16B[56,8]; Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + Rn_VPR128.16B[64,8]; Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + Rn_VPR128.16B[72,8]; Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + Rn_VPR128.16B[80,8]; Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + Rn_VPR128.16B[88,8]; Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + Rn_VPR128.16B[96,8]; Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + Rn_VPR128.16B[104,8]; Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + Rn_VPR128.16B[112,8]; Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + Rn_VPR128.16B[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.337 SUQADD page C7-2797 line 163431 MATCH x0e203800/mask=xbf3ffc00 # CONSTRUCT x0e603800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@2 # AUNIT --inst x0e603800/mask=xfffffc00 --status fail --comment "nointsat" # Vector variant when size=01 Q=0 bb=0 V=VPR64.4H e1=2 s2=16 :suqadd Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { # simd infix Rd_VPR64.4H = Rd_VPR64.4H + Rn_VPR64.4H on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + Rn_VPR64.4H[0,16]; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + Rn_VPR64.4H[16,16]; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + Rn_VPR64.4H[32,16]; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + Rn_VPR64.4H[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.337 SUQADD page C7-2797 line 163431 MATCH x0e203800/mask=xbf3ffc00 # CONSTRUCT x4e603800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@2 # AUNIT --inst x4e603800/mask=xfffffc00 --status fail --comment "nointsat" # Vector variant when size=01 Q=1 bb=0 V=VPR128.8H e1=2 s2=32 :suqadd Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { # simd infix Rd_VPR128.8H = Rd_VPR128.8H + Rn_VPR128.8H on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + Rn_VPR128.8H[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + Rn_VPR128.8H[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + Rn_VPR128.8H[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + Rn_VPR128.8H[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + Rn_VPR128.8H[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + Rn_VPR128.8H[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + Rn_VPR128.8H[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + Rn_VPR128.8H[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.337 SUQADD page C7-2797 line 163431 MATCH x0e203800/mask=xbf3ffc00 # CONSTRUCT x0ea03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@4 # AUNIT --inst x0ea03800/mask=xfffffc00 --status fail --comment "nointsat" # Vector variant when size=10 Q=0 bb=0 V=VPR64.2S e1=4 s2=16 :suqadd Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { # simd infix Rd_VPR64.2S = Rd_VPR64.2S + Rn_VPR64.2S on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + Rn_VPR64.2S[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + Rn_VPR64.2S[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.337 SUQADD page C7-2797 line 163431 MATCH x0e203800/mask=xbf3ffc00 # CONSTRUCT x4ea03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@4 # AUNIT --inst x4ea03800/mask=xfffffc00 --status fail --comment "nointsat" # Vector variant when size=10 Q=1 bb=0 V=VPR128.4S e1=4 s2=32 :suqadd Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { # simd infix Rd_VPR128.4S = Rd_VPR128.4S + Rn_VPR128.4S on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + Rn_VPR128.4S[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + Rn_VPR128.4S[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + Rn_VPR128.4S[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + Rn_VPR128.4S[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.337 SUQADD page C7-2797 line 163431 MATCH x0e203800/mask=xbf3ffc00 # CONSTRUCT x4ee03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 &=NEON_suqadd/2@8 # AUNIT --inst x4ee03800/mask=xfffffc00 --status fail --comment "nointsat" # Vector variant when size=11 Q=1 bb=0 V=VPR128.2D e1=8 s2=32 :suqadd Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b001110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { # simd infix Rd_VPR128.2D = Rd_VPR128.2D + Rn_VPR128.2D on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + Rn_VPR128.2D[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + Rn_VPR128.2D[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # CONSTRUCT x4f08a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 =$sext@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_sxtl2/1@1 # AUNIT --inst x4f08a400/mask=xfffffc00 --status pass --comment "ext" :sxtl2 Rd_VPR128.8H, Rn_VPR128.16B is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize Rd_VPR128.8H = sext(TMPD1) (lane size 1 to 2) Rd_VPR128.8H[0,16] = sext(TMPD1[0,8]); Rd_VPR128.8H[16,16] = sext(TMPD1[8,8]); Rd_VPR128.8H[32,16] = sext(TMPD1[16,8]); Rd_VPR128.8H[48,16] = sext(TMPD1[24,8]); Rd_VPR128.8H[64,16] = sext(TMPD1[32,8]); Rd_VPR128.8H[80,16] = sext(TMPD1[40,8]); Rd_VPR128.8H[96,16] = sext(TMPD1[48,8]); Rd_VPR128.8H[112,16] = sext(TMPD1[56,8]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # CONSTRUCT x0f20a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =$sext@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_sxtl/1@4 # AUNIT --inst x0f20a400/mask=xfffffc00 --status pass --comment "ext" :sxtl Rd_VPR128.2D, Rn_VPR64.2S is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR64.2S; # simd resize Rd_VPR128.2D = sext(TMPD1) (lane size 4 to 8) Rd_VPR128.2D[0,64] = sext(TMPD1[0,32]); Rd_VPR128.2D[64,64] = sext(TMPD1[32,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # CONSTRUCT x0f10a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =$sext@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_sxtl/1@2 # AUNIT --inst x0f10a400/mask=xfffffc00 --status pass --comment "ext" :sxtl Rd_VPR128.4S, Rn_VPR64.4H is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR64.4H; # simd resize Rd_VPR128.4S = sext(TMPD1) (lane size 2 to 4) Rd_VPR128.4S[0,32] = sext(TMPD1[0,16]); Rd_VPR128.4S[32,32] = sext(TMPD1[16,16]); Rd_VPR128.4S[64,32] = sext(TMPD1[32,16]); Rd_VPR128.4S[96,32] = sext(TMPD1[48,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # CONSTRUCT x4f20a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 =$sext@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_sxtl2/1@4 # AUNIT --inst x4f20a400/mask=xfffffc00 --status pass --comment "ext" :sxtl2 Rd_VPR128.2D, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize Rd_VPR128.2D = sext(TMPD1) (lane size 4 to 8) Rd_VPR128.2D[0,64] = sext(TMPD1[0,32]); Rd_VPR128.2D[64,64] = sext(TMPD1[32,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # CONSTRUCT x0f08a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =$sext@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_sxtl/1@1 # AUNIT --inst x0f08a400/mask=xfffffc00 --status pass --comment "ext" :sxtl Rd_VPR128.8H, Rn_VPR64.8B is b_3131=0 & q=0 & u=0 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR128.8H & Rn_VPR128 & Zd { TMPD1 = Rn_VPR64.8B; # simd resize Rd_VPR128.8H = sext(TMPD1) (lane size 1 to 2) Rd_VPR128.8H[0,16] = sext(TMPD1[0,8]); Rd_VPR128.8H[16,16] = sext(TMPD1[8,8]); Rd_VPR128.8H[32,16] = sext(TMPD1[16,8]); Rd_VPR128.8H[48,16] = sext(TMPD1[24,8]); Rd_VPR128.8H[64,16] = sext(TMPD1[32,8]); Rd_VPR128.8H[80,16] = sext(TMPD1[40,8]); Rd_VPR128.8H[96,16] = sext(TMPD1[48,8]); Rd_VPR128.8H[112,16] = sext(TMPD1[56,8]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.338 SXTL, SXTL2 page C7-2799 line 163553 MATCH x0f00a400/mask=xbf87fc00 # C7.2.316 SSHLL, SSHLL2 page C7-2736 line 159625 MATCH x0f00a400/mask=xbf80fc00 # CONSTRUCT x4f10a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 =$sext@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_sxtl2/1@2 # AUNIT --inst x4f10a400/mask=xfffffc00 --status pass --comment "ext" :sxtl2 Rd_VPR128.4S, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize Rd_VPR128.4S = sext(TMPD1) (lane size 2 to 4) Rd_VPR128.4S[0,32] = sext(TMPD1[0,16]); Rd_VPR128.4S[32,32] = sext(TMPD1[16,16]); Rd_VPR128.4S[64,32] = sext(TMPD1[32,16]); Rd_VPR128.4S[96,32] = sext(TMPD1[48,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.322 TBL page C7-1717 line 99409 KEEPWITH tblx: "tbl" is b_12=0 { local tmp:16 = zext(0:8); export tmp; } tblx: "tbx" is b_12=1 & Rd_VPR128 { export Rd_VPR128; } # C7.2.339 TBL page C7-2801 line 163652 MATCH x0e000000/mask=xbfe09c00 # C7.2.340 TBX page C7-2803 line 163781 MATCH x0e001000/mask=xbfe09c00 # CONSTRUCT x0e000000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 tblx Rn_VPR128.16B ARG3 =a64_TBL/3 # SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B ARG3 =NEON_tblx/3@1 # AUNIT --inst x0e000000/mask=xffe0ec00 --status pass # Q == 0 && len == 00 8B, Single register table variant :^tblx Rd_VPR64.8B, "{"^Rn_VPR128.16B^"}", Rm_VPR64.8B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b00 & Rm_VPR64.8B & Rn_VPR128.16B & Rd_VPR64.8B & tblx & Zd { Rd_VPR64.8B = a64_TBL(tblx, Rn_VPR128.16B, Rm_VPR64.8B); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.339 TBL page C7-2801 line 163652 MATCH x0e000000/mask=xbfe09c00 # C7.2.340 TBX page C7-2803 line 163781 MATCH x0e001000/mask=xbfe09c00 # CONSTRUCT x4e000000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 tblx Rn_VPR128.16B ARG3 =a64_TBL/3 # SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B ARG3 =NEON_tblx/3@1 # AUNIT --inst x4e000000/mask=xffe0ec00 --status pass # Q == 1 && len == 00 16B, Single register table variant :^tblx Rd_VPR128.16B, "{"^Rn_VPR128.16B^"}", Rm_VPR128.16B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b00 & Rm_VPR128.16B & Rn_VPR128.16B & Rd_VPR128.16B & tblx & Zd { Rd_VPR128.16B = a64_TBL(tblx, Rn_VPR128.16B, Rm_VPR128.16B); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.339 TBL page C7-2801 line 163652 MATCH x0e000000/mask=xbfe09c00 # C7.2.340 TBX page C7-2803 line 163781 MATCH x0e001000/mask=xbfe09c00 # CONSTRUCT x0e002000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B ARG4 =a64_TBL/4 # SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B ARG4 =NEON_tblx/4 # AUNIT --inst x0e002000/mask=xffe0ec00 --status pass # Q == 0 && len == 01 8B, Two register table variant :^tblx Rd_VPR64.8B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^"}", Rm_VPR64.8B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b01 & Rm_VPR64.8B & Rn_VPR128.16B & Rnn_VPR128.16B & Rd_VPR64.8B & tblx & Zd { Rd_VPR64.8B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rm_VPR64.8B); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.339 TBL page C7-2801 line 163652 MATCH x0e000000/mask=xbfe09c00 # C7.2.340 TBX page C7-2803 line 163781 MATCH x0e001000/mask=xbfe09c00 # CONSTRUCT x4e002000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B ARG4 =a64_TBL/4 # SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B ARG4 =NEON_tblx/4 # AUNIT --inst x4e002000/mask=xffe0ec00 --status pass # Q == 1 && len == 01 16B, Two register table variant :^tblx Rd_VPR128.16B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^"}", Rm_VPR128.16B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b01 & Rm_VPR128.16B & Rn_VPR128.16B & Rnn_VPR128.16B & Rd_VPR128.16B & tblx & Zd { Rd_VPR128.16B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rm_VPR128.16B); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.339 TBL page C7-2801 line 163652 MATCH x0e000000/mask=xbfe09c00 # C7.2.340 TBX page C7-2803 line 163781 MATCH x0e001000/mask=xbfe09c00 # CONSTRUCT x0e004000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B ARG5 =a64_TBL/5 # SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B ARG5 =NEON_tblx/5 # AUNIT --inst x0e004000/mask=xffe0ec00 --status pass # Q == 0 && len == 10 8B, Three register table variant :^tblx Rd_VPR64.8B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^", "^Rnnn_VPR128.16B^"}", Rm_VPR64.8B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b10 & Rm_VPR64.8B & Rn_VPR128.16B & Rnn_VPR128.16B & Rnnn_VPR128.16B & Rd_VPR64.8B & tblx & Zd { Rd_VPR64.8B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rm_VPR64.8B); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.339 TBL page C7-2801 line 163652 MATCH x0e000000/mask=xbfe09c00 # C7.2.340 TBX page C7-2803 line 163781 MATCH x0e001000/mask=xbfe09c00 # CONSTRUCT x4e004000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B ARG5 =a64_TBL/5 # SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B ARG5 =NEON_tblx/5 # AUNIT --inst x4e004000/mask=xffe0ec00 --status pass # Q == 1 && len == 10 16B, Three register table variant :^tblx Rd_VPR128.16B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^", "^Rnnn_VPR128.16B^"}", Rm_VPR128.16B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b10 & Rm_VPR128.16B & Rn_VPR128.16B & Rnn_VPR128.16B & Rnnn_VPR128.16B & Rd_VPR128.16B & tblx & Zd { Rd_VPR128.16B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rm_VPR128.16B); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.339 TBL page C7-2801 line 163652 MATCH x0e000000/mask=xbfe09c00 # C7.2.340 TBX page C7-2803 line 163781 MATCH x0e001000/mask=xbfe09c00 # CONSTRUCT x0e006000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B Rnnnn_VPR128.16B ARG6 =a64_TBL/6 # SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B Rnnnn_VPR128.16B ARG6 =NEON_tblx/6 # AUNIT --inst x0e006000/mask=xffe0ec00 --status pass # Q == 0 && len == 11 8B, Four register table variant :^tblx Rd_VPR64.8B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^", "^Rnnn_VPR128.16B^", "^Rnnnn_VPR128.16B^"}", Rm_VPR64.8B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=0 & b_1314=0b11 & Rm_VPR64.8B & Rn_VPR128.16B & Rnn_VPR128.16B & Rnnn_VPR128.16B & Rnnnn_VPR128.16B & Rd_VPR64.8B & tblx & Zd { Rd_VPR64.8B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rnnnn_VPR128.16B, Rm_VPR64.8B); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.339 TBL page C7-2801 line 163652 MATCH x0e000000/mask=xbfe09c00 # C7.2.340 TBX page C7-2803 line 163781 MATCH x0e001000/mask=xbfe09c00 # CONSTRUCT x4e006000/mask=xffe0ec00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B Rnnnn_VPR128.16B ARG6 =a64_TBL/6 # SMACRO(pseudo) ARG1 tblx Rn_VPR128.16B Rnn_VPR128.16B Rnnn_VPR128.16B Rnnnn_VPR128.16B ARG6 =NEON_tblx/6 # AUNIT --inst x4e006000/mask=xffe0ec00 --status pass # Q == 1 && len == 11 16B, Four register table variant :^tblx Rd_VPR128.16B, "{"^Rn_VPR128.16B^", "^Rnn_VPR128.16B^", "^Rnnn_VPR128.16B^", "^Rnnnn_VPR128.16B^"}", Rm_VPR128.16B is b_31=0 & b_2129=0b001110000 & b_15=0 & b_1011=0b00 & b_30=1 & b_1314=0b11 & Rm_VPR128.16B & Rn_VPR128.16B & Rnn_VPR128.16B & Rnnn_VPR128.16B & Rnnnn_VPR128.16B & Rd_VPR128.16B & tblx & Zd { Rd_VPR128.16B = a64_TBL(tblx, Rn_VPR128.16B, Rnn_VPR128.16B, Rnnn_VPR128.16B, Rnnnn_VPR128.16B, Rm_VPR128.16B); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.341 TRN1 page C7-2805 line 163910 MATCH x0e002800/mask=xbf20fc00 # CONSTRUCT x4e002800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-2@4-4@6-6@8-8@10-10@12-12@14-14:1 swap &=$shuffle@0-1@2-3@4-5@6-7@8-9@10-11@12-13@14-15:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@1 # AUNIT --inst x4e002800/mask=xffe0fc00 --status pass :trn1 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { TMPQ2 = Rm_VPR128.16B; TMPQ1 = Rn_VPR128.16B; # simd shuffle Rd_VPR128.16B = TMPQ1 (@0-0@2-2@4-4@6-6@8-8@10-10@12-12@14-14) lane size 1 Rd_VPR128.16B[0,8] = TMPQ1[0,8]; Rd_VPR128.16B[16,8] = TMPQ1[16,8]; Rd_VPR128.16B[32,8] = TMPQ1[32,8]; Rd_VPR128.16B[48,8] = TMPQ1[48,8]; Rd_VPR128.16B[64,8] = TMPQ1[64,8]; Rd_VPR128.16B[80,8] = TMPQ1[80,8]; Rd_VPR128.16B[96,8] = TMPQ1[96,8]; Rd_VPR128.16B[112,8] = TMPQ1[112,8]; # simd shuffle Rd_VPR128.16B = TMPQ2 (@0-1@2-3@4-5@6-7@8-9@10-11@12-13@14-15) lane size 1 Rd_VPR128.16B[8,8] = TMPQ2[0,8]; Rd_VPR128.16B[24,8] = TMPQ2[16,8]; Rd_VPR128.16B[40,8] = TMPQ2[32,8]; Rd_VPR128.16B[56,8] = TMPQ2[48,8]; Rd_VPR128.16B[72,8] = TMPQ2[64,8]; Rd_VPR128.16B[88,8] = TMPQ2[80,8]; Rd_VPR128.16B[104,8] = TMPQ2[96,8]; Rd_VPR128.16B[120,8] = TMPQ2[112,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.341 TRN1 page C7-2805 line 163910 MATCH x0e002800/mask=xbf20fc00 # CONSTRUCT x4ec02800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0:8 swap &=$shuffle@0-1:8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@8 # AUNIT --inst x4ec02800/mask=xffe0fc00 --status pass :trn1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { TMPQ2 = Rm_VPR128.2D; TMPQ1 = Rn_VPR128.2D; # simd shuffle Rd_VPR128.2D = TMPQ1 (@0-0) lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64]; # simd shuffle Rd_VPR128.2D = TMPQ2 (@0-1) lane size 8 Rd_VPR128.2D[64,64] = TMPQ2[0,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.341 TRN1 page C7-2805 line 163910 MATCH x0e002800/mask=xbf20fc00 # CONSTRUCT x0e802800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0:4 swap &=$shuffle@0-1:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@4 # AUNIT --inst x0e802800/mask=xffe0fc00 --status pass :trn1 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { TMPD2 = Rm_VPR64.2S; TMPD1 = Rn_VPR64.2S; # simd shuffle Rd_VPR64.2S = TMPD1 (@0-0) lane size 4 Rd_VPR64.2S[0,32] = TMPD1[0,32]; # simd shuffle Rd_VPR64.2S = TMPD2 (@0-1) lane size 4 Rd_VPR64.2S[32,32] = TMPD2[0,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.341 TRN1 page C7-2805 line 163910 MATCH x0e002800/mask=xbf20fc00 # CONSTRUCT x0e402800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-2:2 swap &=$shuffle@0-1@2-3:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@2 # AUNIT --inst x0e402800/mask=xffe0fc00 --status pass :trn1 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { TMPD2 = Rm_VPR64.4H; TMPD1 = Rn_VPR64.4H; # simd shuffle Rd_VPR64.4H = TMPD1 (@0-0@2-2) lane size 2 Rd_VPR64.4H[0,16] = TMPD1[0,16]; Rd_VPR64.4H[32,16] = TMPD1[32,16]; # simd shuffle Rd_VPR64.4H = TMPD2 (@0-1@2-3) lane size 2 Rd_VPR64.4H[16,16] = TMPD2[0,16]; Rd_VPR64.4H[48,16] = TMPD2[32,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.341 TRN1 page C7-2805 line 163910 MATCH x0e002800/mask=xbf20fc00 # CONSTRUCT x4e802800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-2:4 swap &=$shuffle@0-1@2-3:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@4 # AUNIT --inst x4e802800/mask=xffe0fc00 --status pass :trn1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { TMPQ2 = Rm_VPR128.4S; TMPQ1 = Rn_VPR128.4S; # simd shuffle Rd_VPR128.4S = TMPQ1 (@0-0@2-2) lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32]; Rd_VPR128.4S[64,32] = TMPQ1[64,32]; # simd shuffle Rd_VPR128.4S = TMPQ2 (@0-1@2-3) lane size 4 Rd_VPR128.4S[32,32] = TMPQ2[0,32]; Rd_VPR128.4S[96,32] = TMPQ2[64,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.341 TRN1 page C7-2805 line 163910 MATCH x0e002800/mask=xbf20fc00 # CONSTRUCT x0e002800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-2@4-4@6-6:1 swap &=$shuffle@0-1@2-3@4-5@6-7:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@1 # AUNIT --inst x0e002800/mask=xffe0fc00 --status pass :trn1 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { TMPD2 = Rm_VPR64.8B; TMPD1 = Rn_VPR64.8B; # simd shuffle Rd_VPR64.8B = TMPD1 (@0-0@2-2@4-4@6-6) lane size 1 Rd_VPR64.8B[0,8] = TMPD1[0,8]; Rd_VPR64.8B[16,8] = TMPD1[16,8]; Rd_VPR64.8B[32,8] = TMPD1[32,8]; Rd_VPR64.8B[48,8] = TMPD1[48,8]; # simd shuffle Rd_VPR64.8B = TMPD2 (@0-1@2-3@4-5@6-7) lane size 1 Rd_VPR64.8B[8,8] = TMPD2[0,8]; Rd_VPR64.8B[24,8] = TMPD2[16,8]; Rd_VPR64.8B[40,8] = TMPD2[32,8]; Rd_VPR64.8B[56,8] = TMPD2[48,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.341 TRN1 page C7-2805 line 163910 MATCH x0e002800/mask=xbf20fc00 # CONSTRUCT x4e402800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-2@4-4@6-6:2 swap &=$shuffle@0-1@2-3@4-5@6-7:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn1/3@2 # AUNIT --inst x4e402800/mask=xffe0fc00 --status pass :trn1 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=2 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { TMPQ2 = Rm_VPR128.8H; TMPQ1 = Rn_VPR128.8H; # simd shuffle Rd_VPR128.8H = TMPQ1 (@0-0@2-2@4-4@6-6) lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[0,16]; Rd_VPR128.8H[32,16] = TMPQ1[32,16]; Rd_VPR128.8H[64,16] = TMPQ1[64,16]; Rd_VPR128.8H[96,16] = TMPQ1[96,16]; # simd shuffle Rd_VPR128.8H = TMPQ2 (@0-1@2-3@4-5@6-7) lane size 2 Rd_VPR128.8H[16,16] = TMPQ2[0,16]; Rd_VPR128.8H[48,16] = TMPQ2[32,16]; Rd_VPR128.8H[80,16] = TMPQ2[64,16]; Rd_VPR128.8H[112,16] = TMPQ2[96,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.342 TRN2 page C7-2807 line 164028 MATCH x0e006800/mask=xbf20fc00 # CONSTRUCT x4e006800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-2@5-4@7-6@9-8@11-10@13-12@15-14:1 swap &=$shuffle@1-1@3-3@5-5@7-7@9-9@11-11@13-13@15-15:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@1 # AUNIT --inst x4e006800/mask=xffe0fc00 --status pass :trn2 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { TMPQ2 = Rm_VPR128.16B; TMPQ1 = Rn_VPR128.16B; # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-0@3-2@5-4@7-6@9-8@11-10@13-12@15-14) lane size 1 Rd_VPR128.16B[0,8] = TMPQ1[8,8]; Rd_VPR128.16B[16,8] = TMPQ1[24,8]; Rd_VPR128.16B[32,8] = TMPQ1[40,8]; Rd_VPR128.16B[48,8] = TMPQ1[56,8]; Rd_VPR128.16B[64,8] = TMPQ1[72,8]; Rd_VPR128.16B[80,8] = TMPQ1[88,8]; Rd_VPR128.16B[96,8] = TMPQ1[104,8]; Rd_VPR128.16B[112,8] = TMPQ1[120,8]; # simd shuffle Rd_VPR128.16B = TMPQ2 (@1-1@3-3@5-5@7-7@9-9@11-11@13-13@15-15) lane size 1 Rd_VPR128.16B[8,8] = TMPQ2[8,8]; Rd_VPR128.16B[24,8] = TMPQ2[24,8]; Rd_VPR128.16B[40,8] = TMPQ2[40,8]; Rd_VPR128.16B[56,8] = TMPQ2[56,8]; Rd_VPR128.16B[72,8] = TMPQ2[72,8]; Rd_VPR128.16B[88,8] = TMPQ2[88,8]; Rd_VPR128.16B[104,8] = TMPQ2[104,8]; Rd_VPR128.16B[120,8] = TMPQ2[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.342 TRN2 page C7-2807 line 164028 MATCH x0e006800/mask=xbf20fc00 # CONSTRUCT x4ec06800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0:8 swap &=$shuffle@1-1:8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@8 # AUNIT --inst x4ec06800/mask=xffe0fc00 --status pass :trn2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { TMPQ2 = Rm_VPR128.2D; TMPQ1 = Rn_VPR128.2D; # simd shuffle Rd_VPR128.2D = TMPQ1 (@1-0) lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[64,64]; # simd shuffle Rd_VPR128.2D = TMPQ2 (@1-1) lane size 8 Rd_VPR128.2D[64,64] = TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.342 TRN2 page C7-2807 line 164028 MATCH x0e006800/mask=xbf20fc00 # CONSTRUCT x0e806800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0:4 swap &=$shuffle@1-1:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@4 # AUNIT --inst x0e806800/mask=xffe0fc00 --status pass :trn2 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { TMPD2 = Rm_VPR64.2S; TMPD1 = Rn_VPR64.2S; # simd shuffle Rd_VPR64.2S = TMPD1 (@1-0) lane size 4 Rd_VPR64.2S[0,32] = TMPD1[32,32]; # simd shuffle Rd_VPR64.2S = TMPD2 (@1-1) lane size 4 Rd_VPR64.2S[32,32] = TMPD2[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.342 TRN2 page C7-2807 line 164028 MATCH x0e006800/mask=xbf20fc00 # CONSTRUCT x0e406800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-2:2 swap &=$shuffle@1-1@3-3:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@2 # AUNIT --inst x0e406800/mask=xffe0fc00 --status pass :trn2 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { TMPD2 = Rm_VPR64.4H; TMPD1 = Rn_VPR64.4H; # simd shuffle Rd_VPR64.4H = TMPD1 (@1-0@3-2) lane size 2 Rd_VPR64.4H[0,16] = TMPD1[16,16]; Rd_VPR64.4H[32,16] = TMPD1[48,16]; # simd shuffle Rd_VPR64.4H = TMPD2 (@1-1@3-3) lane size 2 Rd_VPR64.4H[16,16] = TMPD2[16,16]; Rd_VPR64.4H[48,16] = TMPD2[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.342 TRN2 page C7-2807 line 164028 MATCH x0e006800/mask=xbf20fc00 # CONSTRUCT x4e806800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-2:4 swap &=$shuffle@1-1@3-3:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@4 # AUNIT --inst x4e806800/mask=xffe0fc00 --status pass :trn2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { TMPQ2 = Rm_VPR128.4S; TMPQ1 = Rn_VPR128.4S; # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-0@3-2) lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[32,32]; Rd_VPR128.4S[64,32] = TMPQ1[96,32]; # simd shuffle Rd_VPR128.4S = TMPQ2 (@1-1@3-3) lane size 4 Rd_VPR128.4S[32,32] = TMPQ2[32,32]; Rd_VPR128.4S[96,32] = TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.342 TRN2 page C7-2807 line 164028 MATCH x0e006800/mask=xbf20fc00 # CONSTRUCT x0e006800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-2@5-4@7-6:1 swap &=$shuffle@1-1@3-3@5-5@7-7:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@1 # AUNIT --inst x0e006800/mask=xffe0fc00 --status pass :trn2 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { TMPD2 = Rm_VPR64.8B; TMPD1 = Rn_VPR64.8B; # simd shuffle Rd_VPR64.8B = TMPD1 (@1-0@3-2@5-4@7-6) lane size 1 Rd_VPR64.8B[0,8] = TMPD1[8,8]; Rd_VPR64.8B[16,8] = TMPD1[24,8]; Rd_VPR64.8B[32,8] = TMPD1[40,8]; Rd_VPR64.8B[48,8] = TMPD1[56,8]; # simd shuffle Rd_VPR64.8B = TMPD2 (@1-1@3-3@5-5@7-7) lane size 1 Rd_VPR64.8B[8,8] = TMPD2[8,8]; Rd_VPR64.8B[24,8] = TMPD2[24,8]; Rd_VPR64.8B[40,8] = TMPD2[40,8]; Rd_VPR64.8B[56,8] = TMPD2[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.342 TRN2 page C7-2807 line 164028 MATCH x0e006800/mask=xbf20fc00 # CONSTRUCT x4e406800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-2@5-4@7-6:2 swap &=$shuffle@1-1@3-3@5-5@7-7:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_trn2/3@2 # AUNIT --inst x4e406800/mask=xffe0fc00 --status pass :trn2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=6 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { TMPQ2 = Rm_VPR128.8H; TMPQ1 = Rn_VPR128.8H; # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-0@3-2@5-4@7-6) lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[16,16]; Rd_VPR128.8H[32,16] = TMPQ1[48,16]; Rd_VPR128.8H[64,16] = TMPQ1[80,16]; Rd_VPR128.8H[96,16] = TMPQ1[112,16]; # simd shuffle Rd_VPR128.8H = TMPQ2 (@1-1@3-3@5-5@7-7) lane size 2 Rd_VPR128.8H[16,16] = TMPQ2[16,16]; Rd_VPR128.8H[48,16] = TMPQ2[48,16]; Rd_VPR128.8H[80,16] = TMPQ2[80,16]; Rd_VPR128.8H[112,16] = TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.343 UABA page C7-2809 line 164146 MATCH x2e207c00/mask=xbf20fc00 # CONSTRUCT x6e207c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uaba/3@1 # AUNIT --inst x6e207c00/mask=xffe0fc00 --status nopcodeop --comment "abd" :uaba Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xf & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_uaba(Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.343 UABA page C7-2809 line 164146 MATCH x2e207c00/mask=xbf20fc00 # CONSTRUCT x2ea07c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uaba/3@4 # AUNIT --inst x2ea07c00/mask=xffe0fc00 --status nopcodeop --comment "abd" :uaba Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xf & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_uaba(Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.343 UABA page C7-2809 line 164146 MATCH x2e207c00/mask=xbf20fc00 # CONSTRUCT x2e607c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uaba/3@2 # AUNIT --inst x2e607c00/mask=xffe0fc00 --status nopcodeop --comment "abd" :uaba Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xf & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_uaba(Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.343 UABA page C7-2809 line 164146 MATCH x2e207c00/mask=xbf20fc00 # CONSTRUCT x6ea07c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uaba/3@4 # AUNIT --inst x6ea07c00/mask=xffe0fc00 --status nopcodeop --comment "abd" :uaba Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xf & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_uaba(Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.343 UABA page C7-2809 line 164146 MATCH x2e207c00/mask=xbf20fc00 # CONSTRUCT x2e207c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uaba/3@1 # AUNIT --inst x2e207c00/mask=xffe0fc00 --status nopcodeop --comment "abd" :uaba Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xf & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_uaba(Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.343 UABA page C7-2809 line 164146 MATCH x2e207c00/mask=xbf20fc00 # CONSTRUCT x6e607c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uaba/3@2 # AUNIT --inst x6e607c00/mask=xffe0fc00 --status nopcodeop --comment "abd" :uaba Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xf & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_uaba(Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.344 UABAL, UABAL2 page C7-2811 line 164248 MATCH x2e205000/mask=xbf20fc00 # CONSTRUCT x6ea05000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 $-@8 $abs@8 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uabal2/3@4 # AUNIT --inst x6ea05000/mask=xffe0fc00 --status pass --comment "ext abd" :uabal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x5 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = zext(TMPD3[0,32]); TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 8 TMPQ5[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; TMPQ5[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 8 TMPQ6[0,64] = MP_INT_ABS(TMPQ5[0,64]); TMPQ6[64,64] = MP_INT_ABS(TMPQ5[64,64]); # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ6 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ6[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ6[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.344 UABAL, UABAL2 page C7-2811 line 164248 MATCH x2e205000/mask=xbf20fc00 # CONSTRUCT x6e605000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 $-@4 $abs@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uabal2/3@2 # AUNIT --inst x6e605000/mask=xffe0fc00 --status pass --comment "ext abd" :uabal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x5 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = zext(TMPD3[0,16]); TMPQ4[32,32] = zext(TMPD3[16,16]); TMPQ4[64,32] = zext(TMPD3[32,16]); TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 4 TMPQ5[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; TMPQ5[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; TMPQ5[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; TMPQ5[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 4 TMPQ6[0,32] = MP_INT_ABS(TMPQ5[0,32]); TMPQ6[32,32] = MP_INT_ABS(TMPQ5[32,32]); TMPQ6[64,32] = MP_INT_ABS(TMPQ5[64,32]); TMPQ6[96,32] = MP_INT_ABS(TMPQ5[96,32]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ6 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ6[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ6[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ6[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ6[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.344 UABAL, UABAL2 page C7-2811 line 164248 MATCH x2e205000/mask=xbf20fc00 # CONSTRUCT x6e205000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 $-@2 $abs@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uabal2/3@1 # AUNIT --inst x6e205000/mask=xffe0fc00 --status pass --comment "ext abd" :uabal2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x5 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = zext(TMPD1[0,8]); TMPQ2[16,16] = zext(TMPD1[8,8]); TMPQ2[32,16] = zext(TMPD1[16,8]); TMPQ2[48,16] = zext(TMPD1[24,8]); TMPQ2[64,16] = zext(TMPD1[32,8]); TMPQ2[80,16] = zext(TMPD1[40,8]); TMPQ2[96,16] = zext(TMPD1[48,8]); TMPQ2[112,16] = zext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = zext(TMPD3[0,8]); TMPQ4[16,16] = zext(TMPD3[8,8]); TMPQ4[32,16] = zext(TMPD3[16,8]); TMPQ4[48,16] = zext(TMPD3[24,8]); TMPQ4[64,16] = zext(TMPD3[32,8]); TMPQ4[80,16] = zext(TMPD3[40,8]); TMPQ4[96,16] = zext(TMPD3[48,8]); TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 2 TMPQ5[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; TMPQ5[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; TMPQ5[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; TMPQ5[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; TMPQ5[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; TMPQ5[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; TMPQ5[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; TMPQ5[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; # simd unary TMPQ6 = MP_INT_ABS(TMPQ5) on lane size 2 TMPQ6[0,16] = MP_INT_ABS(TMPQ5[0,16]); TMPQ6[16,16] = MP_INT_ABS(TMPQ5[16,16]); TMPQ6[32,16] = MP_INT_ABS(TMPQ5[32,16]); TMPQ6[48,16] = MP_INT_ABS(TMPQ5[48,16]); TMPQ6[64,16] = MP_INT_ABS(TMPQ5[64,16]); TMPQ6[80,16] = MP_INT_ABS(TMPQ5[80,16]); TMPQ6[96,16] = MP_INT_ABS(TMPQ5[96,16]); TMPQ6[112,16] = MP_INT_ABS(TMPQ5[112,16]); # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ6 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ6[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ6[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ6[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ6[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ6[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ6[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ6[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ6[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.344 UABAL, UABAL2 page C7-2811 line 164248 MATCH x2e205000/mask=xbf20fc00 # CONSTRUCT x2ea05000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@4:16 ARG3 $zext@4:16 $-@8 $abs@8 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uabal/3@4 # AUNIT --inst x2ea05000/mask=xffe0fc00 --status pass --comment "ext abd" :uabal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x5 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 8 TMPQ3[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; TMPQ3[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 8 TMPQ4[0,64] = MP_INT_ABS(TMPQ3[0,64]); TMPQ4[64,64] = MP_INT_ABS(TMPQ3[64,64]); # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ4 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ4[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.344 UABAL, UABAL2 page C7-2811 line 164248 MATCH x2e205000/mask=xbf20fc00 # CONSTRUCT x2e605000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@2:16 ARG3 $zext@2:16 $-@4 $abs@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uabal/3@2 # AUNIT --inst x2e605000/mask=xffe0fc00 --status pass --comment "ext abd" :uabal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x5 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 4 TMPQ3[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; TMPQ3[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; TMPQ3[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; TMPQ3[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 4 TMPQ4[0,32] = MP_INT_ABS(TMPQ3[0,32]); TMPQ4[32,32] = MP_INT_ABS(TMPQ3[32,32]); TMPQ4[64,32] = MP_INT_ABS(TMPQ3[64,32]); TMPQ4[96,32] = MP_INT_ABS(TMPQ3[96,32]); # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ4[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ4[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ4[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.344 UABAL, UABAL2 page C7-2811 line 164248 MATCH x2e205000/mask=xbf20fc00 # CONSTRUCT x2e205000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@1:16 ARG3 $zext@1:16 $-@2 $abs@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_uabal/3@1 # AUNIT --inst x2e205000/mask=xffe0fc00 --status pass --comment "ext abd" :uabal Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x5 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 2 TMPQ3[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; TMPQ3[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; TMPQ3[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; TMPQ3[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; TMPQ3[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; TMPQ3[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; TMPQ3[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; TMPQ3[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; # simd unary TMPQ4 = MP_INT_ABS(TMPQ3) on lane size 2 TMPQ4[0,16] = MP_INT_ABS(TMPQ3[0,16]); TMPQ4[16,16] = MP_INT_ABS(TMPQ3[16,16]); TMPQ4[32,16] = MP_INT_ABS(TMPQ3[32,16]); TMPQ4[48,16] = MP_INT_ABS(TMPQ3[48,16]); TMPQ4[64,16] = MP_INT_ABS(TMPQ3[64,16]); TMPQ4[80,16] = MP_INT_ABS(TMPQ3[80,16]); TMPQ4[96,16] = MP_INT_ABS(TMPQ3[96,16]); TMPQ4[112,16] = MP_INT_ABS(TMPQ3[112,16]); # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ4 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ4[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ4[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ4[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ4[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ4[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ4[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ4[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.345 UABD page C7-2813 line 164369 MATCH x2e207400/mask=xbf20fc00 # CONSTRUCT x6e207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabd/2@1 # AUNIT --inst x6e207400/mask=xffe0fc00 --status nopcodeop --comment "abd" :uabd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xe & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_uabd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.345 UABD page C7-2813 line 164369 MATCH x2e207400/mask=xbf20fc00 # CONSTRUCT x2ea07400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $-@4 ARG3 ARG2 $-@4 2:4 &=$* ARG2 ARG3 $less@4 &=$*@4 =$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabd/2@4 # AUNIT --inst x2ea07400/mask=xffe0fc00 --status pass --comment "abd" # This abd instruction is implemented correctly to document a correct # way to implement the unsigned absolute difference semantic. :uabd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xe & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { # simd infix TMPD1 = Rn_VPR64.2S - Rm_VPR64.2S on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] - Rm_VPR64.2S[0,32]; TMPD1[32,32] = Rn_VPR64.2S[32,32] - Rm_VPR64.2S[32,32]; # simd infix TMPD2 = Rm_VPR64.2S - Rn_VPR64.2S on lane size 4 TMPD2[0,32] = Rm_VPR64.2S[0,32] - Rn_VPR64.2S[0,32]; TMPD2[32,32] = Rm_VPR64.2S[32,32] - Rn_VPR64.2S[32,32]; # simd infix TMPD2 = TMPD2 * 2:4 on lane size 4 TMPD2[0,32] = TMPD2[0,32] * 2:4; TMPD2[32,32] = TMPD2[32,32] * 2:4; # simd infix TMPD3 = Rn_VPR64.2S < Rm_VPR64.2S on lane size 4 TMPD3[0,32] = zext(Rn_VPR64.2S[0,32] < Rm_VPR64.2S[0,32]); TMPD3[32,32] = zext(Rn_VPR64.2S[32,32] < Rm_VPR64.2S[32,32]); # simd infix TMPD2 = TMPD2 * TMPD3 on lane size 4 TMPD2[0,32] = TMPD2[0,32] * TMPD3[0,32]; TMPD2[32,32] = TMPD2[32,32] * TMPD3[32,32]; # simd infix Rd_VPR64.2S = TMPD1 + TMPD2 on lane size 4 Rd_VPR64.2S[0,32] = TMPD1[0,32] + TMPD2[0,32]; Rd_VPR64.2S[32,32] = TMPD1[32,32] + TMPD2[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.345 UABD page C7-2813 line 164369 MATCH x2e207400/mask=xbf20fc00 # CONSTRUCT x2e607400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabd/2@2 # AUNIT --inst x2e607400/mask=xffe0fc00 --status nopcodeop --comment "abd" :uabd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xe & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_uabd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.345 UABD page C7-2813 line 164369 MATCH x2e207400/mask=xbf20fc00 # CONSTRUCT x6ea07400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabd/2@4 # AUNIT --inst x6ea07400/mask=xffe0fc00 --status nopcodeop --comment "abd" :uabd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xe & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_uabd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.345 UABD page C7-2813 line 164369 MATCH x2e207400/mask=xbf20fc00 # CONSTRUCT x2e207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabd/2@1 # AUNIT --inst x2e207400/mask=xffe0fc00 --status nopcodeop --comment "abd" :uabd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xe & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_uabd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.345 UABD page C7-2813 line 164369 MATCH x2e207400/mask=xbf20fc00 # CONSTRUCT x6e607400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabd/2@2 # AUNIT --inst x6e607400/mask=xffe0fc00 --status nopcodeop --comment "abd" :uabd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xe & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_uabd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.346 UABDL, UABDL2 page C7-2815 line 164471 MATCH x2e207000/mask=xbf20fc00 # CONSTRUCT x6ea07000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 $-@8 =$abs@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabdl2/2@4 # AUNIT --inst x6ea07000/mask=xffe0fc00 --status pass --comment "ext abd" :uabdl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x7 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = zext(TMPD3[0,32]); TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 8 TMPQ5[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; TMPQ5[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; # simd unary Rd_VPR128.2D = MP_INT_ABS(TMPQ5) on lane size 8 Rd_VPR128.2D[0,64] = MP_INT_ABS(TMPQ5[0,64]); Rd_VPR128.2D[64,64] = MP_INT_ABS(TMPQ5[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.346 UABDL, UABDL2 page C7-2815 line 164471 MATCH x2e207000/mask=xbf20fc00 # CONSTRUCT x6e607000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 $-@4 =$abs@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabdl2/2@2 # AUNIT --inst x6e607000/mask=xffe0fc00 --status pass --comment "ext abd" :uabdl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x7 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = zext(TMPD3[0,16]); TMPQ4[32,32] = zext(TMPD3[16,16]); TMPQ4[64,32] = zext(TMPD3[32,16]); TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 4 TMPQ5[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; TMPQ5[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; TMPQ5[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; TMPQ5[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; # simd unary Rd_VPR128.4S = MP_INT_ABS(TMPQ5) on lane size 4 Rd_VPR128.4S[0,32] = MP_INT_ABS(TMPQ5[0,32]); Rd_VPR128.4S[32,32] = MP_INT_ABS(TMPQ5[32,32]); Rd_VPR128.4S[64,32] = MP_INT_ABS(TMPQ5[64,32]); Rd_VPR128.4S[96,32] = MP_INT_ABS(TMPQ5[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.346 UABDL, UABDL2 page C7-2815 line 164471 MATCH x2e207000/mask=xbf20fc00 # CONSTRUCT x6e207000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 $-@2 =$abs@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabdl2/2@1 # AUNIT --inst x6e207000/mask=xffe0fc00 --status pass --comment "ext abd" :uabdl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x7 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = zext(TMPD1[0,8]); TMPQ2[16,16] = zext(TMPD1[8,8]); TMPQ2[32,16] = zext(TMPD1[16,8]); TMPQ2[48,16] = zext(TMPD1[24,8]); TMPQ2[64,16] = zext(TMPD1[32,8]); TMPQ2[80,16] = zext(TMPD1[40,8]); TMPQ2[96,16] = zext(TMPD1[48,8]); TMPQ2[112,16] = zext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = zext(TMPD3[0,8]); TMPQ4[16,16] = zext(TMPD3[8,8]); TMPQ4[32,16] = zext(TMPD3[16,8]); TMPQ4[48,16] = zext(TMPD3[24,8]); TMPQ4[64,16] = zext(TMPD3[32,8]); TMPQ4[80,16] = zext(TMPD3[40,8]); TMPQ4[96,16] = zext(TMPD3[48,8]); TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 - TMPQ4 on lane size 2 TMPQ5[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; TMPQ5[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; TMPQ5[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; TMPQ5[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; TMPQ5[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; TMPQ5[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; TMPQ5[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; TMPQ5[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; # simd unary Rd_VPR128.8H = MP_INT_ABS(TMPQ5) on lane size 2 Rd_VPR128.8H[0,16] = MP_INT_ABS(TMPQ5[0,16]); Rd_VPR128.8H[16,16] = MP_INT_ABS(TMPQ5[16,16]); Rd_VPR128.8H[32,16] = MP_INT_ABS(TMPQ5[32,16]); Rd_VPR128.8H[48,16] = MP_INT_ABS(TMPQ5[48,16]); Rd_VPR128.8H[64,16] = MP_INT_ABS(TMPQ5[64,16]); Rd_VPR128.8H[80,16] = MP_INT_ABS(TMPQ5[80,16]); Rd_VPR128.8H[96,16] = MP_INT_ABS(TMPQ5[96,16]); Rd_VPR128.8H[112,16] = MP_INT_ABS(TMPQ5[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.346 UABDL, UABDL2 page C7-2815 line 164471 MATCH x2e207000/mask=xbf20fc00 # CONSTRUCT x2ea07000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@4:16 ARG3 $zext@4:16 $-@8 =$abs@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabdl/2@4 # AUNIT --inst x2ea07000/mask=xffe0fc00 --status pass --comment "ext abd" :uabdl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x7 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 8 TMPQ3[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; TMPQ3[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; # simd unary Rd_VPR128.2D = MP_INT_ABS(TMPQ3) on lane size 8 Rd_VPR128.2D[0,64] = MP_INT_ABS(TMPQ3[0,64]); Rd_VPR128.2D[64,64] = MP_INT_ABS(TMPQ3[64,64]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.346 UABDL, UABDL2 page C7-2815 line 164471 MATCH x2e207000/mask=xbf20fc00 # CONSTRUCT x2e607000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@2:16 ARG3 $zext@2:16 $-@4 =$abs@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabdl/2@2 # AUNIT --inst x2e607000/mask=xffe0fc00 --status pass --comment "ext abd" :uabdl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x7 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Rn_VPR128 & Rm_VPR128 & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 4 TMPQ3[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; TMPQ3[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; TMPQ3[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; TMPQ3[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; # simd unary Rd_VPR128.4S = MP_INT_ABS(TMPQ3) on lane size 4 Rd_VPR128.4S[0,32] = MP_INT_ABS(TMPQ3[0,32]); Rd_VPR128.4S[32,32] = MP_INT_ABS(TMPQ3[32,32]); Rd_VPR128.4S[64,32] = MP_INT_ABS(TMPQ3[64,32]); Rd_VPR128.4S[96,32] = MP_INT_ABS(TMPQ3[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.346 UABDL, UABDL2 page C7-2815 line 164471 MATCH x2e207000/mask=xbf20fc00 # CONSTRUCT x2e207000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@1:16 ARG3 $zext@1:16 $-@2 =$abs@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uabdl/2@1 # AUNIT --inst x2e207000/mask=xffe0fc00 --status pass --comment "ext abd" :uabdl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x7 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Rn_VPR128 & Rm_VPR128 & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 - TMPQ2 on lane size 2 TMPQ3[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; TMPQ3[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; TMPQ3[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; TMPQ3[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; TMPQ3[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; TMPQ3[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; TMPQ3[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; TMPQ3[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; # simd unary Rd_VPR128.8H = MP_INT_ABS(TMPQ3) on lane size 2 Rd_VPR128.8H[0,16] = MP_INT_ABS(TMPQ3[0,16]); Rd_VPR128.8H[16,16] = MP_INT_ABS(TMPQ3[16,16]); Rd_VPR128.8H[32,16] = MP_INT_ABS(TMPQ3[32,16]); Rd_VPR128.8H[48,16] = MP_INT_ABS(TMPQ3[48,16]); Rd_VPR128.8H[64,16] = MP_INT_ABS(TMPQ3[64,16]); Rd_VPR128.8H[80,16] = MP_INT_ABS(TMPQ3[80,16]); Rd_VPR128.8H[96,16] = MP_INT_ABS(TMPQ3[96,16]); Rd_VPR128.8H[112,16] = MP_INT_ABS(TMPQ3[112,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.347 UADALP page C7-2817 line 164592 MATCH x2e206800/mask=xbf3ffc00 # CONSTRUCT x6e206800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 =#u+ &=$+@2 # SMACRO(pseudo) ARG1 ARG2 &=NEON_uadalp/2@1 # AUNIT --inst x6e206800/mask=xfffffc00 --status pass --comment "ext" :uadalp Rd_VPR128.8H, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.16B) on pairs lane size (1 to 2) local tmp2 = Rn_VPR128.16B[0,8]; local tmp4 = zext(tmp2); local tmp3 = Rn_VPR128.16B[8,8]; local tmp5 = zext(tmp3); TMPQ1[0,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[16,8]; tmp4 = zext(tmp2); tmp3 = Rn_VPR128.16B[24,8]; tmp5 = zext(tmp3); TMPQ1[16,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[32,8]; tmp4 = zext(tmp2); tmp3 = Rn_VPR128.16B[40,8]; tmp5 = zext(tmp3); TMPQ1[32,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[48,8]; tmp4 = zext(tmp2); tmp3 = Rn_VPR128.16B[56,8]; tmp5 = zext(tmp3); TMPQ1[48,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[64,8]; tmp4 = zext(tmp2); tmp3 = Rn_VPR128.16B[72,8]; tmp5 = zext(tmp3); TMPQ1[64,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[80,8]; tmp4 = zext(tmp2); tmp3 = Rn_VPR128.16B[88,8]; tmp5 = zext(tmp3); TMPQ1[80,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[96,8]; tmp4 = zext(tmp2); tmp3 = Rn_VPR128.16B[104,8]; tmp5 = zext(tmp3); TMPQ1[96,16] = tmp4 + tmp5; tmp2 = Rn_VPR128.16B[112,8]; tmp4 = zext(tmp2); tmp3 = Rn_VPR128.16B[120,8]; tmp5 = zext(tmp3); TMPQ1[112,16] = tmp4 + tmp5; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.347 UADALP page C7-2817 line 164592 MATCH x2e206800/mask=xbf3ffc00 # CONSTRUCT x2ea06800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:8 ARG2 =#u+ &=$+@8 # SMACRO(pseudo) ARG1 ARG2 &=NEON_uadalp/2@4 # AUNIT --inst x2ea06800/mask=xfffffc00 --status pass --comment "ext" :uadalp Rd_VPR64.1D, Rn_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.1D & Zd { TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.2S) on pairs lane size (4 to 8) local tmp2 = Rn_VPR64.2S[0,32]; local tmp4 = zext(tmp2); local tmp3 = Rn_VPR64.2S[32,32]; local tmp5 = zext(tmp3); TMPD1 = tmp4 + tmp5; # simd infix Rd_VPR64.1D = Rd_VPR64.1D + TMPD1 on lane size 8 Rd_VPR64.1D = Rd_VPR64.1D + TMPD1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.347 UADALP page C7-2817 line 164592 MATCH x2e206800/mask=xbf3ffc00 # CONSTRUCT x2e606800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:8 ARG2 =#u+ &=$+@4 # SMACRO(pseudo) ARG1 ARG2 &=NEON_uadalp/2@2 # AUNIT --inst x2e606800/mask=xfffffc00 --status pass --comment "ext" :uadalp Rd_VPR64.2S, Rn_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.2S & Zd { TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.4H) on pairs lane size (2 to 4) local tmp2 = Rn_VPR64.4H[0,16]; local tmp4 = zext(tmp2); local tmp3 = Rn_VPR64.4H[16,16]; local tmp5 = zext(tmp3); TMPD1[0,32] = tmp4 + tmp5; tmp2 = Rn_VPR64.4H[32,16]; tmp4 = zext(tmp2); tmp3 = Rn_VPR64.4H[48,16]; tmp5 = zext(tmp3); TMPD1[32,32] = tmp4 + tmp5; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.347 UADALP page C7-2817 line 164592 MATCH x2e206800/mask=xbf3ffc00 # CONSTRUCT x6ea06800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 =#u+ &=$+@8 # SMACRO(pseudo) ARG1 ARG2 &=NEON_uadalp/2@4 # AUNIT --inst x6ea06800/mask=xfffffc00 --status pass --comment "ext" :uadalp Rd_VPR128.2D, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.4S) on pairs lane size (4 to 8) local tmp2 = Rn_VPR128.4S[0,32]; local tmp4 = zext(tmp2); local tmp3 = Rn_VPR128.4S[32,32]; local tmp5 = zext(tmp3); TMPQ1[0,64] = tmp4 + tmp5; tmp2 = Rn_VPR128.4S[64,32]; tmp4 = zext(tmp2); tmp3 = Rn_VPR128.4S[96,32]; tmp5 = zext(tmp3); TMPQ1[64,64] = tmp4 + tmp5; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.347 UADALP page C7-2817 line 164592 MATCH x2e206800/mask=xbf3ffc00 # CONSTRUCT x2e206800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:8 ARG2 =#u+ &=$+@2 # SMACRO(pseudo) ARG1 ARG2 &=NEON_uadalp/2@1 # AUNIT --inst x2e206800/mask=xfffffc00 --status pass --comment "ext" :uadalp Rd_VPR64.4H, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.4H & Zd { TMPD1 = 0; # sipd infix TMPD1 = +(Rn_VPR64.8B) on pairs lane size (1 to 2) local tmp2 = Rn_VPR64.8B[0,8]; local tmp4 = zext(tmp2); local tmp3 = Rn_VPR64.8B[8,8]; local tmp5 = zext(tmp3); TMPD1[0,16] = tmp4 + tmp5; tmp2 = Rn_VPR64.8B[16,8]; tmp4 = zext(tmp2); tmp3 = Rn_VPR64.8B[24,8]; tmp5 = zext(tmp3); TMPD1[16,16] = tmp4 + tmp5; tmp2 = Rn_VPR64.8B[32,8]; tmp4 = zext(tmp2); tmp3 = Rn_VPR64.8B[40,8]; tmp5 = zext(tmp3); TMPD1[32,16] = tmp4 + tmp5; tmp2 = Rn_VPR64.8B[48,8]; tmp4 = zext(tmp2); tmp3 = Rn_VPR64.8B[56,8]; tmp5 = zext(tmp3); TMPD1[48,16] = tmp4 + tmp5; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.347 UADALP page C7-2817 line 164592 MATCH x2e206800/mask=xbf3ffc00 # CONSTRUCT x6e606800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 var:16 ARG2 =#u+ &=$+@4 # SMACRO(pseudo) ARG1 ARG2 &=NEON_uadalp/2@2 # AUNIT --inst x6e606800/mask=xfffffc00 --status pass --comment "ext" :uadalp Rd_VPR128.4S, Rn_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x6 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPQ1 = 0; # sipd infix TMPQ1 = +(Rn_VPR128.8H) on pairs lane size (2 to 4) local tmp2 = Rn_VPR128.8H[0,16]; local tmp4 = zext(tmp2); local tmp3 = Rn_VPR128.8H[16,16]; local tmp5 = zext(tmp3); TMPQ1[0,32] = tmp4 + tmp5; tmp2 = Rn_VPR128.8H[32,16]; tmp4 = zext(tmp2); tmp3 = Rn_VPR128.8H[48,16]; tmp5 = zext(tmp3); TMPQ1[32,32] = tmp4 + tmp5; tmp2 = Rn_VPR128.8H[64,16]; tmp4 = zext(tmp2); tmp3 = Rn_VPR128.8H[80,16]; tmp5 = zext(tmp3); TMPQ1[64,32] = tmp4 + tmp5; tmp2 = Rn_VPR128.8H[96,16]; tmp4 = zext(tmp2); tmp3 = Rn_VPR128.8H[112,16]; tmp5 = zext(tmp3); TMPQ1[96,32] = tmp4 + tmp5; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.348 UADDL, UADDL2 page C7-2819 line 164702 MATCH x2e200000/mask=xbf20fc00 # CONSTRUCT x6ea00000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 =$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddl2/2@4 # AUNIT --inst x6ea00000/mask=xffe0fc00 --status pass --comment "ext" :uaddl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x0 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = zext(TMPD3[0,32]); TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix Rd_VPR128.2D = TMPQ2 + TMPQ4 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ2[0,64] + TMPQ4[0,64]; Rd_VPR128.2D[64,64] = TMPQ2[64,64] + TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.348 UADDL, UADDL2 page C7-2819 line 164702 MATCH x2e200000/mask=xbf20fc00 # CONSTRUCT x6e600000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 =$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddl2/2@2 # AUNIT --inst x6e600000/mask=xffe0fc00 --status pass --comment "ext" :uaddl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x0 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = zext(TMPD3[0,16]); TMPQ4[32,32] = zext(TMPD3[16,16]); TMPQ4[64,32] = zext(TMPD3[32,16]); TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 + TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ2[0,32] + TMPQ4[0,32]; Rd_VPR128.4S[32,32] = TMPQ2[32,32] + TMPQ4[32,32]; Rd_VPR128.4S[64,32] = TMPQ2[64,32] + TMPQ4[64,32]; Rd_VPR128.4S[96,32] = TMPQ2[96,32] + TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.348 UADDL, UADDL2 page C7-2819 line 164702 MATCH x2e200000/mask=xbf20fc00 # CONSTRUCT x6e200000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 =$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddl2/2@1 # AUNIT --inst x6e200000/mask=xffe0fc00 --status pass --comment "ext" :uaddl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x0 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = zext(TMPD1[0,8]); TMPQ2[16,16] = zext(TMPD1[8,8]); TMPQ2[32,16] = zext(TMPD1[16,8]); TMPQ2[48,16] = zext(TMPD1[24,8]); TMPQ2[64,16] = zext(TMPD1[32,8]); TMPQ2[80,16] = zext(TMPD1[40,8]); TMPQ2[96,16] = zext(TMPD1[48,8]); TMPQ2[112,16] = zext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = zext(TMPD3[0,8]); TMPQ4[16,16] = zext(TMPD3[8,8]); TMPQ4[32,16] = zext(TMPD3[16,8]); TMPQ4[48,16] = zext(TMPD3[24,8]); TMPQ4[64,16] = zext(TMPD3[32,8]); TMPQ4[80,16] = zext(TMPD3[40,8]); TMPQ4[96,16] = zext(TMPD3[48,8]); TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 + TMPQ4 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ2[0,16] + TMPQ4[0,16]; Rd_VPR128.8H[16,16] = TMPQ2[16,16] + TMPQ4[16,16]; Rd_VPR128.8H[32,16] = TMPQ2[32,16] + TMPQ4[32,16]; Rd_VPR128.8H[48,16] = TMPQ2[48,16] + TMPQ4[48,16]; Rd_VPR128.8H[64,16] = TMPQ2[64,16] + TMPQ4[64,16]; Rd_VPR128.8H[80,16] = TMPQ2[80,16] + TMPQ4[80,16]; Rd_VPR128.8H[96,16] = TMPQ2[96,16] + TMPQ4[96,16]; Rd_VPR128.8H[112,16] = TMPQ2[112,16] + TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.348 UADDL, UADDL2 page C7-2819 line 164702 MATCH x2e200000/mask=xbf20fc00 # CONSTRUCT x2ea00000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@4:16 ARG3 $zext@4:16 =$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddl/2@4 # AUNIT --inst x2ea00000/mask=xffe0fc00 --status pass --comment "ext" :uaddl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x0 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = TMPQ1 + TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64] + TMPQ2[0,64]; Rd_VPR128.2D[64,64] = TMPQ1[64,64] + TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.348 UADDL, UADDL2 page C7-2819 line 164702 MATCH x2e200000/mask=xbf20fc00 # CONSTRUCT x2e600000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@2:16 ARG3 $zext@2:16 =$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddl/2@2 # AUNIT --inst x2e600000/mask=xffe0fc00 --status pass --comment "ext" :uaddl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x0 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = TMPQ1 + TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32] + TMPQ2[0,32]; Rd_VPR128.4S[32,32] = TMPQ1[32,32] + TMPQ2[32,32]; Rd_VPR128.4S[64,32] = TMPQ1[64,32] + TMPQ2[64,32]; Rd_VPR128.4S[96,32] = TMPQ1[96,32] + TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.348 UADDL, UADDL2 page C7-2819 line 164702 MATCH x2e200000/mask=xbf20fc00 # CONSTRUCT x2e200000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@1:16 ARG3 $zext@1:16 =$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddl/2@1 # AUNIT --inst x2e200000/mask=xffe0fc00 --status pass --comment "ext" :uaddl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x0 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = TMPQ1 + TMPQ2 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[0,16] + TMPQ2[0,16]; Rd_VPR128.8H[16,16] = TMPQ1[16,16] + TMPQ2[16,16]; Rd_VPR128.8H[32,16] = TMPQ1[32,16] + TMPQ2[32,16]; Rd_VPR128.8H[48,16] = TMPQ1[48,16] + TMPQ2[48,16]; Rd_VPR128.8H[64,16] = TMPQ1[64,16] + TMPQ2[64,16]; Rd_VPR128.8H[80,16] = TMPQ1[80,16] + TMPQ2[80,16]; Rd_VPR128.8H[96,16] = TMPQ1[96,16] + TMPQ2[96,16]; Rd_VPR128.8H[112,16] = TMPQ1[112,16] + TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.349 UADDLP page C7-2821 line 164825 MATCH x2e202800/mask=xbf3ffc00 # CONSTRUCT x2e202800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =#u+@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlp/1@1 # AUNIT --inst x2e202800/mask=xfffffc00 --status pass --comment "ext" # Vector variant when size = 00 , Q = 0 s=16 e1=1 e2=2 Ta=VPR64.4H Tb=VPR64.8B :uaddlp Rd_VPR64.4H, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001010 & Rd_VPR64.4H & Rn_VPR64.8B & Zd { TMPD1 = Rn_VPR64.8B; # sipd infix Rd_VPR64.4H = +(TMPD1) on pairs lane size (1 to 2) local tmp2 = TMPD1[0,8]; local tmp4 = zext(tmp2); local tmp3 = TMPD1[8,8]; local tmp5 = zext(tmp3); Rd_VPR64.4H[0,16] = tmp4 + tmp5; tmp2 = TMPD1[16,8]; tmp4 = zext(tmp2); tmp3 = TMPD1[24,8]; tmp5 = zext(tmp3); Rd_VPR64.4H[16,16] = tmp4 + tmp5; tmp2 = TMPD1[32,8]; tmp4 = zext(tmp2); tmp3 = TMPD1[40,8]; tmp5 = zext(tmp3); Rd_VPR64.4H[32,16] = tmp4 + tmp5; tmp2 = TMPD1[48,8]; tmp4 = zext(tmp2); tmp3 = TMPD1[56,8]; tmp5 = zext(tmp3); Rd_VPR64.4H[48,16] = tmp4 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.349 UADDLP page C7-2821 line 164825 MATCH x2e202800/mask=xbf3ffc00 # CONSTRUCT x6e202800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =#u+@1 # SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlp/1@1 # AUNIT --inst x6e202800/mask=xfffffc00 --status pass --comment "ext" # Vector variant when size = 00 , Q = 1 s=32 e1=1 e2=2 Ta=VPR128.8H Tb=VPR128.16B :uaddlp Rd_VPR128.8H, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001010 & Rd_VPR128.8H & Rn_VPR128.16B & Zd { TMPQ1 = Rn_VPR128.16B; # sipd infix Rd_VPR128.8H = +(TMPQ1) on pairs lane size (1 to 2) local tmp2 = TMPQ1[0,8]; local tmp4 = zext(tmp2); local tmp3 = TMPQ1[8,8]; local tmp5 = zext(tmp3); Rd_VPR128.8H[0,16] = tmp4 + tmp5; tmp2 = TMPQ1[16,8]; tmp4 = zext(tmp2); tmp3 = TMPQ1[24,8]; tmp5 = zext(tmp3); Rd_VPR128.8H[16,16] = tmp4 + tmp5; tmp2 = TMPQ1[32,8]; tmp4 = zext(tmp2); tmp3 = TMPQ1[40,8]; tmp5 = zext(tmp3); Rd_VPR128.8H[32,16] = tmp4 + tmp5; tmp2 = TMPQ1[48,8]; tmp4 = zext(tmp2); tmp3 = TMPQ1[56,8]; tmp5 = zext(tmp3); Rd_VPR128.8H[48,16] = tmp4 + tmp5; tmp2 = TMPQ1[64,8]; tmp4 = zext(tmp2); tmp3 = TMPQ1[72,8]; tmp5 = zext(tmp3); Rd_VPR128.8H[64,16] = tmp4 + tmp5; tmp2 = TMPQ1[80,8]; tmp4 = zext(tmp2); tmp3 = TMPQ1[88,8]; tmp5 = zext(tmp3); Rd_VPR128.8H[80,16] = tmp4 + tmp5; tmp2 = TMPQ1[96,8]; tmp4 = zext(tmp2); tmp3 = TMPQ1[104,8]; tmp5 = zext(tmp3); Rd_VPR128.8H[96,16] = tmp4 + tmp5; tmp2 = TMPQ1[112,8]; tmp4 = zext(tmp2); tmp3 = TMPQ1[120,8]; tmp5 = zext(tmp3); Rd_VPR128.8H[112,16] = tmp4 + tmp5; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.349 UADDLP page C7-2821 line 164825 MATCH x2e202800/mask=xbf3ffc00 # CONSTRUCT x2e602800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =#u+@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlp/1@2 # AUNIT --inst x2e602800/mask=xfffffc00 --status pass --comment "ext" # Vector variant when size = 01 , Q = 0 s=16 e1=2 e2=4 Ta=VPR64.2S Tb=VPR64.4H :uaddlp Rd_VPR64.2S, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001010 & Rd_VPR64.2S & Rn_VPR64.4H & Zd { TMPD1 = Rn_VPR64.4H; # sipd infix Rd_VPR64.2S = +(TMPD1) on pairs lane size (2 to 4) local tmp2 = TMPD1[0,16]; local tmp4 = zext(tmp2); local tmp3 = TMPD1[16,16]; local tmp5 = zext(tmp3); Rd_VPR64.2S[0,32] = tmp4 + tmp5; tmp2 = TMPD1[32,16]; tmp4 = zext(tmp2); tmp3 = TMPD1[48,16]; tmp5 = zext(tmp3); Rd_VPR64.2S[32,32] = tmp4 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.349 UADDLP page C7-2821 line 164825 MATCH x2e202800/mask=xbf3ffc00 # CONSTRUCT x6e602800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =#u+@2 # SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlp/1@2 # AUNIT --inst x6e602800/mask=xfffffc00 --status pass --comment "ext" # Vector variant when size = 01 , Q = 1 s=32 e1=2 e2=4 Ta=VPR128.4S Tb=VPR128.8H :uaddlp Rd_VPR128.4S, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001010 & Rd_VPR128.4S & Rn_VPR128.8H & Zd { TMPQ1 = Rn_VPR128.8H; # sipd infix Rd_VPR128.4S = +(TMPQ1) on pairs lane size (2 to 4) local tmp2 = TMPQ1[0,16]; local tmp4 = zext(tmp2); local tmp3 = TMPQ1[16,16]; local tmp5 = zext(tmp3); Rd_VPR128.4S[0,32] = tmp4 + tmp5; tmp2 = TMPQ1[32,16]; tmp4 = zext(tmp2); tmp3 = TMPQ1[48,16]; tmp5 = zext(tmp3); Rd_VPR128.4S[32,32] = tmp4 + tmp5; tmp2 = TMPQ1[64,16]; tmp4 = zext(tmp2); tmp3 = TMPQ1[80,16]; tmp5 = zext(tmp3); Rd_VPR128.4S[64,32] = tmp4 + tmp5; tmp2 = TMPQ1[96,16]; tmp4 = zext(tmp2); tmp3 = TMPQ1[112,16]; tmp5 = zext(tmp3); Rd_VPR128.4S[96,32] = tmp4 + tmp5; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.349 UADDLP page C7-2821 line 164825 MATCH x2e202800/mask=xbf3ffc00 # CONSTRUCT x2ea02800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =#u+@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlp/1@4 # AUNIT --inst x2ea02800/mask=xfffffc00 --status pass --comment "ext" # Vector variant when size = 10 , Q = 0 s=16 e1=4 e2=8 Ta=VPR64.1D Tb=VPR64.2S :uaddlp Rd_VPR64.1D, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001010 & Rd_VPR64.1D & Rn_VPR64.2S & Zd { TMPD1 = Rn_VPR64.2S; # sipd infix Rd_VPR64.1D = +(TMPD1) on pairs lane size (4 to 8) local tmp2 = TMPD1[0,32]; local tmp4 = zext(tmp2); local tmp3 = TMPD1[32,32]; local tmp5 = zext(tmp3); Rd_VPR64.1D = tmp4 + tmp5; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.349 UADDLP page C7-2821 line 164825 MATCH x2e202800/mask=xbf3ffc00 # CONSTRUCT x6ea02800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =#u+@4 # SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlp/1@4 # AUNIT --inst x6ea02800/mask=xfffffc00 --status pass --comment "ext" # Vector variant when size = 10 , Q = 1 s=32 e1=4 e2=8 Ta=VPR128.2D Tb=VPR128.4S :uaddlp Rd_VPR128.2D, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001010 & Rd_VPR128.2D & Rn_VPR128.4S & Zd { TMPQ1 = Rn_VPR128.4S; # sipd infix Rd_VPR128.2D = +(TMPQ1) on pairs lane size (4 to 8) local tmp2 = TMPQ1[0,32]; local tmp4 = zext(tmp2); local tmp3 = TMPQ1[32,32]; local tmp5 = zext(tmp3); Rd_VPR128.2D[0,64] = tmp4 + tmp5; tmp2 = TMPQ1[64,32]; tmp4 = zext(tmp2); tmp3 = TMPQ1[96,32]; tmp5 = zext(tmp3); Rd_VPR128.2D[64,64] = tmp4 + tmp5; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.350 UADDLV page C7-2823 line 164935 MATCH x2e303800/mask=xbf3ffc00 # CONSTRUCT x6eb03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlv/1@4 # AUNIT --inst x6eb03800/mask=xfffffc00 --status nopcodeop --comment "ext" :uaddlv Rd_FPR64, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.4S & Rd_FPR64 & Zd { Rd_FPR64 = NEON_uaddlv(Rn_VPR128.4S, 4:1); } # C7.2.350 UADDLV page C7-2823 line 164935 MATCH x2e303800/mask=xbf3ffc00 # CONSTRUCT x6e303800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlv/1@1 # AUNIT --inst x6e303800/mask=xfffffc00 --status nopcodeop --comment "ext" :uaddlv Rd_FPR16, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.16B & Rd_FPR16 & Zd { Rd_FPR16 = NEON_uaddlv(Rn_VPR128.16B, 1:1); } # C7.2.350 UADDLV page C7-2823 line 164935 MATCH x2e303800/mask=xbf3ffc00 # CONSTRUCT x2e303800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlv/1@1 # AUNIT --inst x2e303800/mask=xfffffc00 --status nopcodeop --comment "ext" :uaddlv Rd_FPR16, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR64.8B & Rd_FPR16 & Zd { Rd_FPR16 = NEON_uaddlv(Rn_VPR64.8B, 1:1); } # C7.2.350 UADDLV page C7-2823 line 164935 MATCH x2e303800/mask=xbf3ffc00 # CONSTRUCT x2e703800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlv/1@2 # AUNIT --inst x2e703800/mask=xfffffc00 --status nopcodeop --comment "ext" :uaddlv Rd_FPR32, Rn_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR64.4H & Rd_FPR32 & Zd { Rd_FPR32 = NEON_uaddlv(Rn_VPR64.4H, 2:1); } # C7.2.350 UADDLV page C7-2823 line 164935 MATCH x2e303800/mask=xbf3ffc00 # CONSTRUCT x6e703800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_uaddlv/1@2 # AUNIT --inst x6e703800/mask=xfffffc00 --status nopcodeop --comment "ext" :uaddlv Rd_FPR32, Rn_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x3 & b_1011=2 & Rn_VPR128.8H & Rd_FPR32 & Zd { Rd_FPR32 = NEON_uaddlv(Rn_VPR128.8H, 2:1); } # C7.2.351 UADDW, UADDW2 page C7-2825 line 165035 MATCH x2e201000/mask=xbf20fc00 # CONSTRUCT x6ea01000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3[1]:8 $zext@4:16 =$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddw2/2@4 # AUNIT --inst x6ea01000/mask=xffe0fc00 --status pass --comment "ext" :uaddw2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x1 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { TMPD1 = Rm_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D + TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + TMPQ2[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.351 UADDW, UADDW2 page C7-2825 line 165035 MATCH x2e201000/mask=xbf20fc00 # CONSTRUCT x6e601000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3[1]:8 $zext@2:16 =$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddw2/2@2 # AUNIT --inst x6e601000/mask=xffe0fc00 --status pass --comment "ext" :uaddw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x1 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { TMPD1 = Rm_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S + TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + TMPQ2[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + TMPQ2[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + TMPQ2[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.351 UADDW, UADDW2 page C7-2825 line 165035 MATCH x2e201000/mask=xbf20fc00 # CONSTRUCT x6e201000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3[1]:8 $zext@1:16 =$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddw2/2@1 # AUNIT --inst x6e201000/mask=xffe0fc00 --status pass --comment "ext" :uaddw2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x1 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { TMPD1 = Rm_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = zext(TMPD1[0,8]); TMPQ2[16,16] = zext(TMPD1[8,8]); TMPQ2[32,16] = zext(TMPD1[16,8]); TMPQ2[48,16] = zext(TMPD1[24,8]); TMPQ2[64,16] = zext(TMPD1[32,8]); TMPQ2[80,16] = zext(TMPD1[40,8]); TMPQ2[96,16] = zext(TMPD1[48,8]); TMPQ2[112,16] = zext(TMPD1[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H + TMPQ2 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + TMPQ2[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + TMPQ2[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + TMPQ2[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + TMPQ2[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + TMPQ2[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + TMPQ2[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + TMPQ2[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.351 UADDW, UADDW2 page C7-2825 line 165035 MATCH x2e201000/mask=xbf20fc00 # CONSTRUCT x2ea01000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $zext@4:16 =$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddw/2@4 # AUNIT --inst x2ea01000/mask=xffe0fc00 --status pass --comment "ext" :uaddw Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x1 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = zext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = zext(Rm_VPR64.2S[0,32]); TMPQ1[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D + TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] + TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.351 UADDW, UADDW2 page C7-2825 line 165035 MATCH x2e201000/mask=xbf20fc00 # CONSTRUCT x2e601000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $zext@2:16 =$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddw/2@2 # AUNIT --inst x2e601000/mask=xffe0fc00 --status pass --comment "ext" :uaddw Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x1 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = zext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = zext(Rm_VPR64.4H[0,16]); TMPQ1[32,32] = zext(Rm_VPR64.4H[16,16]); TMPQ1[64,32] = zext(Rm_VPR64.4H[32,16]); TMPQ1[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S + TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] + TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] + TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] + TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.351 UADDW, UADDW2 page C7-2825 line 165035 MATCH x2e201000/mask=xbf20fc00 # CONSTRUCT x2e201000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $zext@1:16 =$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uaddw/2@1 # AUNIT --inst x2e201000/mask=xffe0fc00 --status pass --comment "ext" :uaddw Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x1 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = zext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = zext(Rm_VPR64.8B[0,8]); TMPQ1[16,16] = zext(Rm_VPR64.8B[8,8]); TMPQ1[32,16] = zext(Rm_VPR64.8B[16,8]); TMPQ1[48,16] = zext(Rm_VPR64.8B[24,8]); TMPQ1[64,16] = zext(Rm_VPR64.8B[32,8]); TMPQ1[80,16] = zext(Rm_VPR64.8B[40,8]); TMPQ1[96,16] = zext(Rm_VPR64.8B[48,8]); TMPQ1[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H + TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] + TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] + TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] + TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] + TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] + TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] + TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] + TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.352 UCVTF (vector, fixed-point) page C7-2827 line 165158 MATCH x7f00e400/mask=xff80fc00 # CONSTRUCT x7f40e400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2 # AUNIT --inst x7f40e400/mask=xffc0fc00 --status nopcodeop --comment "nofpround" :ucvtf Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_3031=1 & u=1 & b_2428=0x1f & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_ucvtf(Rn_FPR64, Imm_shr_imm64:1); } # C7.2.352 UCVTF (vector, fixed-point) page C7-2827 line 165158 MATCH x7f00e400/mask=xff80fc00 # CONSTRUCT x7f20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2 # AUNIT --inst x7f20e400/mask=xffe0fc00 --status nopcodeop --comment "nofpround" :ucvtf Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_3031=1 & u=1 & b_2428=0x1f & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_ucvtf(Rn_FPR32, Imm_shr_imm32:1); } # C7.2.352 UCVTF (vector, fixed-point) page C7-2827 line 165158 MATCH x7f00e400/mask=xff80fc00 # CONSTRUCT x7f10e400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2 # AUNIT --inst x7f10e400/mask=xfff0fc00 --status noqemu --comment "nofpround" :ucvtf Rd_FPR16, Rn_FPR16, Imm_shr_imm16 is b_3031=1 & u=1 & b_2428=0x1f & b_2023=1 & Imm_shr_imm16 & b_1115=0x1c & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_ucvtf(Rn_FPR16, Imm_shr_imm16:1); } # C7.2.352 UCVTF (vector, fixed-point) page C7-2827 line 165158 MATCH x2f00e400/mask=xbf80fc00 # CONSTRUCT x6f40e400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2@8 # AUNIT --inst x6f40e400/mask=xffc0fc00 --status nopcodeop --comment "nofpround" :ucvtf Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x1c & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_ucvtf(Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); } # C7.2.352 UCVTF (vector, fixed-point) page C7-2827 line 165158 MATCH x2f00e400/mask=xbf80fc00 # CONSTRUCT x2f20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2@4 # AUNIT --inst x2f20e400/mask=xffe0fc00 --status nopcodeop --comment "nofpround" :ucvtf Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_ucvtf(Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); } # C7.2.352 UCVTF (vector, fixed-point) page C7-2827 line 165158 MATCH x2f00e400/mask=xbf80fc00 # CONSTRUCT x6f20e400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2@4 # AUNIT --inst x6f20e400/mask=xffe0fc00 --status nopcodeop --comment "nofpround" :ucvtf Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_ucvtf(Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); } # C7.2.352 UCVTF (vector, fixed-point) page C7-2827 line 165158 MATCH x2f00e400/mask=xbf80fc00 # CONSTRUCT x2f10e400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2@2 # AUNIT --inst x2f10e400/mask=xfff0fc00 --status noqemu --comment "nofpround" :ucvtf Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_ucvtf(Rn_VPR64.4H, Imm_shr_imm32:1, 2:1); } # C7.2.352 UCVTF (vector, fixed-point) page C7-2827 line 165158 MATCH x2f00e400/mask=xbf80fc00 # CONSTRUCT x6f10e400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ucvtf/2@2 # AUNIT --inst x6f10e400/mask=xfff0fc00 --status noqemu --comment "nofpround" :ucvtf Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=1 & Imm_shr_imm32 & b_1115=0x1c & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_ucvtf(Rn_VPR128.8H, Imm_shr_imm32:1, 2:1); } # C7.2.353 UCVTF (vector, integer) page C7-2830 line 165313 MATCH x7e21d800/mask=xffbffc00 # CONSTRUCT x7e21d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 # AUNIT --inst x7e21d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" :ucvtf Rd_FPR32, Rn_FPR32 is b_3031=1 & u=1 & b_2428=0x1e & size_high=0 & b_1722=0x10 & b_1216=0x1d & b_1011=2 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_ucvtf(Rn_FPR32); } # C7.2.353 UCVTF (vector, integer) page C7-2830 line 165313 MATCH x7e21d800/mask=xffbffc00 # CONSTRUCT x7e61d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 # AUNIT --inst x7e61d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" :ucvtf Rd_FPR64, Rn_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & size_high=0 & b_1722=0x30 & b_1216=0x1d & b_1011=2 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_ucvtf(Rn_FPR64); } # C7.2.353 UCVTF (vector, integer) page C7-2830 line 165313 MATCH x2e21d800/mask=xbfbffc00 # CONSTRUCT x2e21d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1@4 # AUNIT --inst x2e21d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" :ucvtf Rd_VPR64.2S, Rn_VPR64.2S is sf=0 & q=0 & b_2929=1 & b_2428=0x0e & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x1d & b_1011=2 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { Rd_VPR64.2S = NEON_ucvtf(Rn_VPR64.2S, 4:1); } # C7.2.353 UCVTF (vector, integer) page C7-2830 line 165313 MATCH x2e21d800/mask=xbfbffc00 # CONSTRUCT x6e21d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1@4 # AUNIT --inst x6e21d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" :ucvtf Rd_VPR128.4S, Rn_VPR128.4S is sf=0 & q=1 & b_2929=1 & b_2428=0x0e & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x1d & b_1011=2 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { Rd_VPR128.4S = NEON_ucvtf(Rn_VPR128.4S, 4:1); } # C7.2.353 UCVTF (vector, integer) page C7-2830 line 165313 MATCH x2e21d800/mask=xbfbffc00 # CONSTRUCT x6e61d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1@8 # AUNIT --inst x6e61d800/mask=xfffffc00 --status nopcodeop --comment "nofpround" :ucvtf Rd_VPR128.2D, Rn_VPR128.2D is sf=0 & q=1 & b_2929=1 & b_2428=0x0e & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x1d & b_1011=2 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { Rd_VPR128.2D = NEON_ucvtf(Rn_VPR128.2D, 8:1); } # C7.2.353 UCVTF (vector, integer) page C7-2830 line 165313 MATCH x7e79d800/mask=xfffffc00 # CONSTRUCT x7e79d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 # AUNIT --inst x7e79d800/mask=xfffffc00 --status noqemu --comment "nofpround" # Scalar half precision variant :ucvtf Rd_FPR16, Rn_FPR16 is b_1031=0b0111111001111001110110 & Rd_FPR16 & Rn_FPR16 & Zd { Rd_FPR16 = NEON_ucvtf(Rn_FPR16); } # C7.2.353 UCVTF (vector, integer) page C7-2830 line 165313 MATCH x2e79d800/mask=xbffffc00 # CONSTRUCT x2e79d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1@2 # AUNIT --inst x2e79d800/mask=xfffffc00 --status noqemu --comment "nofpround" # Vector half precision variant when Q=0 T=VPR64.4H :ucvtf Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_1029=0b10111001111001110110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { Rd_VPR64.4H = NEON_ucvtf(Rn_VPR64.4H, 2:1); } # C7.2.353 UCVTF (vector, integer) page C7-2830 line 165313 MATCH x2e79d800/mask=xbffffc00 # CONSTRUCT x6e79d800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1@2 # AUNIT --inst x6e79d800/mask=xfffffc00 --status noqemu --comment "nofpround" # Vector half precision variant when Q=1 T=VPR128.8H :ucvtf Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_1029=0b10111001111001110110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { Rd_VPR128.8H = NEON_ucvtf(Rn_VPR128.8H, 2:1); } # C7.2.354 UCVTF (scalar, fixed-point) page C7-2833 line 165497 MATCH x1e030000/mask=x7f3f0000 # CONSTRUCT x1ec38000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 zext:8 int2float:2 FBits16 =f/ # SMACRO(pseudo) ARG1 ARG2 FBits16 =NEON_ucvtf/2 # AUNIT --inst x1ec38000/mask=xffff8000 --status noqemu --comment "nofpround" # if sf == '0' && scale<5> == '0' then UnallocatedEncoding(); :ucvtf Rd_FPR16, Rn_GPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode=3 & b_15=1 & FBitsOp & FBits16 & Rn_GPR32 & Rd_FPR16 & Zd { local tmp1:8 = zext(Rn_GPR32); local tmp2:2 = int2float(tmp1); Rd_FPR16 = tmp2 f/ FBits16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.354 UCVTF (scalar, fixed-point) page C7-2833 line 165497 MATCH x1e030000/mask=x7f3f0000 # CONSTRUCT x9ec30000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 zext:9 int2float:2 FBits16 =f/ # SMACRO(pseudo) ARG1 ARG2 FBits16 =NEON_ucvtf/2 # AUNIT --inst x9ec30000/mask=xffff0000 --status noqemu --comment "nofpround" :ucvtf Rd_FPR16, Rn_GPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=0 & mode=0 & fpOpcode=3 & FBitsOp & FBits16 & Rn_GPR64 & Rd_FPR16 & Zd { local tmp1:9 = zext(Rn_GPR64); local tmp2:2 = int2float(tmp1); Rd_FPR16 = tmp2 f/ FBits16; zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.354 UCVTF (scalar, fixed-point) page C7-2833 line 165497 MATCH x1e030000/mask=x7f3f0000 # CONSTRUCT x1e438000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 zext:8 int2float:8 FBits64 =f/ # SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_ucvtf/2 # AUNIT --inst x1e438000/mask=xffff8000 --status pass --comment "nofpround" # if sf == '0' && scale<5> == '0' then UnallocatedEncoding(); :ucvtf Rd_FPR64, Rn_GPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode=3 & b_15=1 & FBitsOp & FBits64 & Rn_GPR32 & Rd_FPR64 & Zd { local tmp1:8 = zext(Rn_GPR32); local tmp2:8 = int2float(tmp1); Rd_FPR64 = tmp2 f/ FBits64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.354 UCVTF (scalar, fixed-point) page C7-2833 line 165497 MATCH x1e030000/mask=x7f3f0000 # CONSTRUCT x9e430000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 zext:9 int2float:8 FBits64 =f/ # SMACRO(pseudo) ARG1 ARG2 FBits64 =NEON_ucvtf/2 # AUNIT --inst x9e430000/mask=xffff0000 --status fail --comment "nofpround" # The zext:9 naively force unsigned int before conversion :ucvtf Rd_FPR64, Rn_GPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=0 & mode=0 & fpOpcode=3 & FBitsOp & FBits64 & Rn_GPR64 & Rd_FPR64 & Zd { local tmp1:9 = zext(Rn_GPR64); local tmp2:8 = int2float(tmp1); Rd_FPR64 = tmp2 f/ FBits64; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.354 UCVTF (scalar, fixed-point) page C7-2833 line 165497 MATCH x1e030000/mask=x7f3f0000 # CONSTRUCT x1e038000/mask=xffff8000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 zext:8 int2float:4 FBits32 =f/ # SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_ucvtf/2 # AUNIT --inst x1e038000/mask=xffff8000 --status fail --comment "nofpround" # if sf == '0' && scale<5> == '0' then UnallocatedEncoding(); :ucvtf Rd_FPR32, Rn_GPR32, FBitsOp is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode=3 & b_15=1 & FBitsOp & FBits32 & Rn_GPR32 & Rd_FPR32 & Zd { local tmp1:8 = zext(Rn_GPR32); local tmp2:4 = int2float(tmp1); Rd_FPR32 = tmp2 f/ FBits32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.354 UCVTF (scalar, fixed-point) page C7-2833 line 165497 MATCH x1e030000/mask=x7f3f0000 # CONSTRUCT x9e030000/mask=xffff0000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 zext:9 int2float:4 FBits32 =f/ # SMACRO(pseudo) ARG1 ARG2 FBits32 =NEON_ucvtf/2 # AUNIT --inst x9e030000/mask=xffff0000 --status fail --comment "nofpround" :ucvtf Rd_FPR32, Rn_GPR64, FBitsOp is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=0 & mode=0 & fpOpcode=3 & FBitsOp & FBits32 & Rn_GPR64 & Rd_FPR32 & Zd { local tmp1:9 = zext(Rn_GPR64); local tmp2:4 = int2float(tmp1); Rd_FPR32 = tmp2 f/ FBits32; zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.355 UCVTF (scalar, integer) page C7-2835 line 165632 MATCH x1e230000/mask=x7f3ffc00 # CONSTRUCT x1ee30000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 zext:8 =int2float # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 # AUNIT --inst x1ee30000/mask=xfffffc00 --status noqemu --comment "nofpround" :ucvtf Rd_FPR16, Rn_GPR32 is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR32 & Rd_FPR16 & Zd { local tmp1:8 = zext(Rn_GPR32); Rd_FPR16 = int2float(tmp1); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.355 UCVTF (scalar, integer) page C7-2835 line 165632 MATCH x1e230000/mask=x7f3ffc00 # CONSTRUCT x9ee30000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 zext:9 =int2float # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 # AUNIT --inst x9ee30000/mask=xfffffc00 --status noqemu --comment "nofpround" :ucvtf Rd_FPR16, Rn_GPR64 is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=3 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR64 & Rd_FPR16 & Zd { local tmp1:9 = zext(Rn_GPR64); Rd_FPR16 = int2float(tmp1); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.355 UCVTF (scalar, integer) page C7-2835 line 165632 MATCH x1e230000/mask=x7f3ffc00 # CONSTRUCT x1e630000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 zext:8 =int2float # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 # AUNIT --inst x1e630000/mask=xfffffc00 --status pass --comment "nofpround" :ucvtf Rd_FPR64, Rn_GPR32 is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR32 & Rd_FPR64 & Zd { local tmp1:8 = zext(Rn_GPR32); Rd_FPR64 = int2float(tmp1); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.355 UCVTF (scalar, integer) page C7-2835 line 165632 MATCH x1e230000/mask=x7f3ffc00 # CONSTRUCT x9e630000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 zext:9 =int2float # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 # AUNIT --inst x9e630000/mask=xfffffc00 --status fail --comment "nofpround" :ucvtf Rd_FPR64, Rn_GPR64 is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=1 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR64 & Rd_FPR64 & Zd { local tmp1:9 = zext(Rn_GPR64); Rd_FPR64 = int2float(tmp1); zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.355 UCVTF (scalar, integer) page C7-2835 line 165632 MATCH x1e230000/mask=x7f3ffc00 # CONSTRUCT x1e230000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 zext:8 =int2float # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 # AUNIT --inst x1e230000/mask=xfffffc00 --status fail --comment "nofpround" :ucvtf Rd_FPR32, Rn_GPR32 is sf=0 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR32 & Rd_FPR32 & Zd { local tmp1:8 = zext(Rn_GPR32); Rd_FPR32 = int2float(tmp1); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.355 UCVTF (scalar, integer) page C7-2835 line 165632 MATCH x1e230000/mask=x7f3ffc00 # CONSTRUCT x9e230000/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 zext:9 =int2float # SMACRO(pseudo) ARG1 ARG2 =NEON_ucvtf/1 # AUNIT --inst x9e230000/mask=xfffffc00 --status fail --comment "nofpround" :ucvtf Rd_FPR32, Rn_GPR64 is sf=1 & b_3030=0 & s=0 & b_2428=0x1e & ftype=0 & b_2121=1 & rmode=0 & fpOpcode=3 & b_1015=0x0 & Rn_GPR64 & Rd_FPR32 & Zd { local tmp1:9 = zext(Rn_GPR64); Rd_FPR32 = int2float(tmp1); zext_zs(Zd); # zero upper 28 bytes of Zd } # C7.2.356 UDOT (by element) page C7-2837 line 165760 MATCH x2f00e000/mask=xbf00f400 # CONSTRUCT x2f80e000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 Re_VPR128.S.vIndex =NEON_udot/2@1 # AUNIT --inst x2f80e000/mask=xffc0f400 --status noqemu # Vector variant when Q=0 Ta=VPR64.2S Tb=VPR64.8B :udot Rd_VPR64.2S, Rn_VPR64.8B, Re_VPR128.B.vIndex is b_31=0 & b_30=0 & b_2429=0b101111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd_VPR64.2S & Rn_VPR64.8B & Re_VPR128.B.vIndex & Re_VPR128.S & vIndex & Zd { local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); Rd_VPR64.2S = NEON_udot(Rn_VPR64.8B, tmp1, 1:1); } # C7.2.356 UDOT (by element) page C7-2837 line 165760 MATCH x2f00e000/mask=xbf00f400 # CONSTRUCT x6f80e000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 Re_VPR128.S.vIndex =NEON_udot/2@1 # AUNIT --inst x6f80e000/mask=xffc0f400 --status noqemu # Vector variant when Q=1 Ta=VPR128.4S Tb=VPR128.16B :udot Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.B.vIndex is b_31=0 & b_30=1 & b_2429=0b101111 & b_2223=0b10 & b_1215=0b1110 & b_10=0 & Rd_VPR128.4S & Rn_VPR128.16B & Re_VPR128.B.vIndex & Re_VPR128.S & vIndex & Zd { local tmp1:4 = SIMD_PIECE(Re_VPR128.S, vIndex:1); Rd_VPR128.4S = NEON_udot(Rn_VPR128.16B, tmp1, 1:1); } # C7.2.357 UDOT (vector) page C7-2839 line 165862 MATCH x2e009400/mask=xbf20fc00 # CONSTRUCT x2e809400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_udot/2@1 # AUNIT --inst x2e809400/mask=xffe0fc00 --status noqemu # Three registers of the same type variant when Q=0 Ta=VPR64.2S Tb=VPR64.8B :udot Rd_VPR64.2S, Rn_VPR64.8B, Rm_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & Rd_VPR64.2S & Rn_VPR64.8B & Rm_VPR64.8B & Zd { Rd_VPR64.2S = NEON_udot(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.357 UDOT (vector) page C7-2839 line 165862 MATCH x2e009400/mask=xbf20fc00 # CONSTRUCT x6e809400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_udot/2@1 # AUNIT --inst x6e809400/mask=xffe0fc00 --status noqemu # Three registers of the same type variant when Q=1 Ta=VPR128.4S Tb=VPR128.16B :udot Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_21=0 & b_1015=0b100101 & Rd_VPR128.4S & Rn_VPR128.16B & Rm_VPR128.16B & Zd { Rd_VPR128.4S = NEON_udot(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.358 UHADD page C7-2841 line 165961 MATCH x2e200400/mask=xbf20fc00 # CONSTRUCT x6e200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhadd/2@1 # AUNIT --inst x6e200400/mask=xffe0fc00 --status nopcodeop :uhadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x0 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_uhadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.358 UHADD page C7-2841 line 165961 MATCH x2e200400/mask=xbf20fc00 # CONSTRUCT x2ea00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhadd/2@4 # AUNIT --inst x2ea00400/mask=xffe0fc00 --status nopcodeop :uhadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x0 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_uhadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.358 UHADD page C7-2841 line 165961 MATCH x2e200400/mask=xbf20fc00 # CONSTRUCT x2e600400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhadd/2@2 # AUNIT --inst x2e600400/mask=xffe0fc00 --status nopcodeop :uhadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x0 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_uhadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.358 UHADD page C7-2841 line 165961 MATCH x2e200400/mask=xbf20fc00 # CONSTRUCT x6ea00400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhadd/2@4 # AUNIT --inst x6ea00400/mask=xffe0fc00 --status nopcodeop :uhadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x0 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_uhadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.358 UHADD page C7-2841 line 165961 MATCH x2e200400/mask=xbf20fc00 # CONSTRUCT x2e200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhadd/2@1 # AUNIT --inst x2e200400/mask=xffe0fc00 --status nopcodeop :uhadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x0 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_uhadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.358 UHADD page C7-2841 line 165961 MATCH x2e200400/mask=xbf20fc00 # CONSTRUCT x6e600400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhadd/2@2 # AUNIT --inst x6e600400/mask=xffe0fc00 --status nopcodeop :uhadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x0 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_uhadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.359 UHSUB page C7-2843 line 166063 MATCH x2e202400/mask=xbf20fc00 # CONSTRUCT x6e202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhsub/2@1 # AUNIT --inst x6e202400/mask=xffe0fc00 --status nopcodeop :uhsub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x4 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_uhsub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.359 UHSUB page C7-2843 line 166063 MATCH x2e202400/mask=xbf20fc00 # CONSTRUCT x2ea02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhsub/2@4 # AUNIT --inst x2ea02400/mask=xffe0fc00 --status nopcodeop :uhsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x4 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_uhsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.359 UHSUB page C7-2843 line 166063 MATCH x2e202400/mask=xbf20fc00 # CONSTRUCT x2e602400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhsub/2@2 # AUNIT --inst x2e602400/mask=xffe0fc00 --status nopcodeop :uhsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x4 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_uhsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.359 UHSUB page C7-2843 line 166063 MATCH x2e202400/mask=xbf20fc00 # CONSTRUCT x6ea02400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhsub/2@4 # AUNIT --inst x6ea02400/mask=xffe0fc00 --status nopcodeop :uhsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x4 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_uhsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.359 UHSUB page C7-2843 line 166063 MATCH x2e202400/mask=xbf20fc00 # CONSTRUCT x2e202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhsub/2@1 # AUNIT --inst x2e202400/mask=xffe0fc00 --status nopcodeop :uhsub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x4 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_uhsub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.359 UHSUB page C7-2843 line 166063 MATCH x2e202400/mask=xbf20fc00 # CONSTRUCT x6e602400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uhsub/2@2 # AUNIT --inst x6e602400/mask=xffe0fc00 --status nopcodeop :uhsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x4 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_uhsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.360 UMAX page C7-2845 line 166163 MATCH x2e206400/mask=xbf20fc00 # CONSTRUCT x6e206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umax/2@1 # AUNIT --inst x6e206400/mask=xffe0fc00 --status nopcodeop :umax Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xc & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_umax(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.360 UMAX page C7-2845 line 166163 MATCH x2e206400/mask=xbf20fc00 # CONSTRUCT x2ea06400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umax/2@4 # AUNIT --inst x2ea06400/mask=xffe0fc00 --status nopcodeop :umax Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xc & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_umax(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.360 UMAX page C7-2845 line 166163 MATCH x2e206400/mask=xbf20fc00 # CONSTRUCT x2e606400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umax/2@2 # AUNIT --inst x2e606400/mask=xffe0fc00 --status nopcodeop :umax Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xc & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_umax(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.360 UMAX page C7-2845 line 166163 MATCH x2e206400/mask=xbf20fc00 # CONSTRUCT x6ea06400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umax/2@4 # AUNIT --inst x6ea06400/mask=xffe0fc00 --status nopcodeop :umax Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xc & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_umax(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.360 UMAX page C7-2845 line 166163 MATCH x2e206400/mask=xbf20fc00 # CONSTRUCT x2e206400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umax/2@1 # AUNIT --inst x2e206400/mask=xffe0fc00 --status nopcodeop :umax Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xc & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_umax(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.360 UMAX page C7-2845 line 166163 MATCH x2e206400/mask=xbf20fc00 # CONSTRUCT x6e606400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umax/2@2 # AUNIT --inst x6e606400/mask=xffe0fc00 --status nopcodeop :umax Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xc & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_umax(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.361 UMAXP page C7-2847 line 166265 MATCH x2e20a400/mask=xbf20fc00 # CONSTRUCT x6e20a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umaxp/2@1 # AUNIT --inst x6e20a400/mask=xffe0fc00 --status nopcodeop :umaxp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_umaxp(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.361 UMAXP page C7-2847 line 166265 MATCH x2e20a400/mask=xbf20fc00 # CONSTRUCT x2ea0a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umaxp/2@4 # AUNIT --inst x2ea0a400/mask=xffe0fc00 --status nopcodeop :umaxp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_umaxp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.361 UMAXP page C7-2847 line 166265 MATCH x2e20a400/mask=xbf20fc00 # CONSTRUCT x2e60a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umaxp/2@2 # AUNIT --inst x2e60a400/mask=xffe0fc00 --status nopcodeop :umaxp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_umaxp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.361 UMAXP page C7-2847 line 166265 MATCH x2e20a400/mask=xbf20fc00 # CONSTRUCT x6ea0a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umaxp/2@4 # AUNIT --inst x6ea0a400/mask=xffe0fc00 --status nopcodeop :umaxp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_umaxp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.361 UMAXP page C7-2847 line 166265 MATCH x2e20a400/mask=xbf20fc00 # CONSTRUCT x2e20a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umaxp/2@1 # AUNIT --inst x2e20a400/mask=xffe0fc00 --status nopcodeop :umaxp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_umaxp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.361 UMAXP page C7-2847 line 166265 MATCH x2e20a400/mask=xbf20fc00 # CONSTRUCT x6e60a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umaxp/2@2 # AUNIT --inst x6e60a400/mask=xffe0fc00 --status nopcodeop :umaxp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_umaxp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.362 UMAXV page C7-2849 line 166369 MATCH x2e30a800/mask=xbf3ffc00 # CONSTRUCT x6e30a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_umaxv/1@1 # AUNIT --inst x6e30a800/mask=xfffffc00 --status nopcodeop :umaxv Rd_FPR8, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd { Rd_FPR8 = NEON_umaxv(Rn_VPR128.16B, 1:1); } # C7.2.362 UMAXV page C7-2849 line 166369 MATCH x2e30a800/mask=xbf3ffc00 # CONSTRUCT x2e30a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_umaxv/1@1 # AUNIT --inst x2e30a800/mask=xfffffc00 --status nopcodeop :umaxv Rd_FPR8, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd { Rd_FPR8 = NEON_umaxv(Rn_VPR64.8B, 1:1); } # C7.2.362 UMAXV page C7-2849 line 166369 MATCH x2e30a800/mask=xbf3ffc00 # CONSTRUCT x2e70a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_umaxv/1@2 # AUNIT --inst x2e70a800/mask=xfffffc00 --status nopcodeop :umaxv Rd_FPR16, Rn_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd { Rd_FPR16 = NEON_umaxv(Rn_VPR64.4H, 2:1); } # C7.2.362 UMAXV page C7-2849 line 166369 MATCH x2e30a800/mask=xbf3ffc00 # CONSTRUCT x6e70a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_umaxv/1@2 # AUNIT --inst x6e70a800/mask=xfffffc00 --status nopcodeop :umaxv Rd_FPR16, Rn_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd { Rd_FPR16 = NEON_umaxv(Rn_VPR128.8H, 2:1); } # C7.2.362 UMAXV page C7-2849 line 166369 MATCH x2e30a800/mask=xbf3ffc00 # CONSTRUCT x6eb0a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_umaxv/1@4 # AUNIT --inst x6eb0a800/mask=xfffffc00 --status nopcodeop :umaxv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0xa & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { Rd_FPR32 = NEON_umaxv(Rn_VPR128.4S, 4:1); } # C7.2.363 UMIN page C7-2851 line 166472 MATCH x2e206c00/mask=xbf20fc00 # CONSTRUCT x6e206c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umin/2@1 # AUNIT --inst x6e206c00/mask=xffe0fc00 --status nopcodeop :umin Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xd & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_umin(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.363 UMIN page C7-2851 line 166472 MATCH x2e206c00/mask=xbf20fc00 # CONSTRUCT x2ea06c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umin/2@4 # AUNIT --inst x2ea06c00/mask=xffe0fc00 --status nopcodeop :umin Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xd & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_umin(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.363 UMIN page C7-2851 line 166472 MATCH x2e206c00/mask=xbf20fc00 # CONSTRUCT x2e606c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umin/2@2 # AUNIT --inst x2e606c00/mask=xffe0fc00 --status nopcodeop :umin Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xd & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_umin(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.363 UMIN page C7-2851 line 166472 MATCH x2e206c00/mask=xbf20fc00 # CONSTRUCT x6ea06c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umin/2@4 # AUNIT --inst x6ea06c00/mask=xffe0fc00 --status nopcodeop :umin Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xd & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_umin(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.363 UMIN page C7-2851 line 166472 MATCH x2e206c00/mask=xbf20fc00 # CONSTRUCT x2e206c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umin/2@1 # AUNIT --inst x2e206c00/mask=xffe0fc00 --status nopcodeop :umin Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xd & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_umin(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.363 UMIN page C7-2851 line 166472 MATCH x2e206c00/mask=xbf20fc00 # CONSTRUCT x6e606c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umin/2@2 # AUNIT --inst x6e606c00/mask=xffe0fc00 --status nopcodeop :umin Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xd & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_umin(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.364 UMINP page C7-2853 line 166574 MATCH x2e20ac00/mask=xbf20fc00 # CONSTRUCT x6e20ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uminp/2@1 # AUNIT --inst x6e20ac00/mask=xffe0fc00 --status nopcodeop :uminp Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x15 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_uminp(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.364 UMINP page C7-2853 line 166574 MATCH x2e20ac00/mask=xbf20fc00 # CONSTRUCT x2ea0ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uminp/2@4 # AUNIT --inst x2ea0ac00/mask=xffe0fc00 --status nopcodeop :uminp Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x15 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_uminp(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.364 UMINP page C7-2853 line 166574 MATCH x2e20ac00/mask=xbf20fc00 # CONSTRUCT x2e60ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uminp/2@2 # AUNIT --inst x2e60ac00/mask=xffe0fc00 --status nopcodeop :uminp Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x15 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_uminp(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.364 UMINP page C7-2853 line 166574 MATCH x2e20ac00/mask=xbf20fc00 # CONSTRUCT x6ea0ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uminp/2@4 # AUNIT --inst x6ea0ac00/mask=xffe0fc00 --status nopcodeop :uminp Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x15 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_uminp(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.364 UMINP page C7-2853 line 166574 MATCH x2e20ac00/mask=xbf20fc00 # CONSTRUCT x2e20ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uminp/2@1 # AUNIT --inst x2e20ac00/mask=xffe0fc00 --status nopcodeop :uminp Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x15 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_uminp(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.364 UMINP page C7-2853 line 166574 MATCH x2e20ac00/mask=xbf20fc00 # CONSTRUCT x6e60ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uminp/2@2 # AUNIT --inst x6e60ac00/mask=xffe0fc00 --status nopcodeop :uminp Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x15 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_uminp(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.365 UMINV page C7-2855 line 166678 MATCH x2e31a800/mask=xbf3ffc00 # CONSTRUCT x6e31a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_uminv/1@1 # AUNIT --inst x6e31a800/mask=xfffffc00 --status nopcodeop :uminv Rd_FPR8, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.16B & Rd_FPR8 & Zd { Rd_FPR8 = NEON_uminv(Rn_VPR128.16B, 1:1); } # C7.2.365 UMINV page C7-2855 line 166678 MATCH x2e31a800/mask=xbf3ffc00 # CONSTRUCT x2e31a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_uminv/1@1 # AUNIT --inst x2e31a800/mask=xfffffc00 --status nopcodeop :uminv Rd_FPR8, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR64.8B & Rd_FPR8 & Zd { Rd_FPR8 = NEON_uminv(Rn_VPR64.8B, 1:1); } # C7.2.365 UMINV page C7-2855 line 166678 MATCH x2e31a800/mask=xbf3ffc00 # CONSTRUCT x2e71a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_uminv/1@2 # AUNIT --inst x2e71a800/mask=xfffffc00 --status nopcodeop :uminv Rd_FPR16, Rn_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR64.4H & Rd_FPR16 & Zd { Rd_FPR16 = NEON_uminv(Rn_VPR64.4H, 2:1); } # C7.2.365 UMINV page C7-2855 line 166678 MATCH x2e31a800/mask=xbf3ffc00 # CONSTRUCT x6e71a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_uminv/1@2 # AUNIT --inst x6e71a800/mask=xfffffc00 --status nopcodeop :uminv Rd_FPR16, Rn_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.8H & Rd_FPR16 & Zd { Rd_FPR16 = NEON_uminv(Rn_VPR128.8H, 2:1); } # C7.2.365 UMINV page C7-2855 line 166678 MATCH x2e31a800/mask=xbf3ffc00 # CONSTRUCT x6eb1a800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_uminv/1@4 # AUNIT --inst x6eb1a800/mask=xfffffc00 --status nopcodeop :uminv Rd_FPR32, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x18 & b_1216=0x1a & b_1011=2 & Rn_VPR128.4S & Rd_FPR32 & Zd { Rd_FPR32 = NEON_uminv(Rn_VPR128.4S, 4:1); } # C7.2.366 UMLAL, UMLAL2 (by element) page C7-2857 line 166781 MATCH x2f002000/mask=xbf00f400 # CONSTRUCT x2f802000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@4:16 ARG3 zext:8 $* &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal/3@4 # AUNIT --inst x2f802000/mask=xffc0f400 --status pass --comment "ext" :umlal Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x2 & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = zext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 TMPQ2[0,64] = TMPQ1[0,64] * tmp3; TMPQ2[64,64] = TMPQ1[64,64] * tmp3; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ2[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.366 UMLAL, UMLAL2 (by element) page C7-2857 line 166781 MATCH x2f002000/mask=xbf00f400 # CONSTRUCT x6f802000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3 zext:8 $* &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal/3@4 # AUNIT --inst x6f802000/mask=xffc0f400 --status pass --comment "ext" :umlal2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x2 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = zext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 TMPQ3[0,64] = TMPQ2[0,64] * tmp4; TMPQ3[64,64] = TMPQ2[64,64] * tmp4; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.366 UMLAL, UMLAL2 (by element) page C7-2857 line 166781 MATCH x2f002000/mask=xbf00f400 # CONSTRUCT x2f402000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@2:16 ARG3 zext:4 $* &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal/3@2 # AUNIT --inst x2f402000/mask=xffc0f400 --status pass --comment "ext" :umlal Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x2 & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = zext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 TMPQ2[0,32] = TMPQ1[0,32] * tmp3; TMPQ2[32,32] = TMPQ1[32,32] * tmp3; TMPQ2[64,32] = TMPQ1[64,32] * tmp3; TMPQ2[96,32] = TMPQ1[96,32] * tmp3; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ2[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ2[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ2[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.366 UMLAL, UMLAL2 (by element) page C7-2857 line 166781 MATCH x2f002000/mask=xbf00f400 # CONSTRUCT x6f402000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3 zext:4 $* &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal2/3@2 # AUNIT --inst x6f402000/mask=xffc0f400 --status pass --comment "ext" :umlal2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x2 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = zext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 TMPQ3[0,32] = TMPQ2[0,32] * tmp4; TMPQ3[32,32] = TMPQ2[32,32] * tmp4; TMPQ3[64,32] = TMPQ2[64,32] * tmp4; TMPQ3[96,32] = TMPQ2[96,32] * tmp4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.367 UMLAL, UMLAL2 (vector) page C7-2860 line 166945 MATCH x2e208000/mask=xbf20fc00 # CONSTRUCT x6ea08000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 $*@8 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal2/3@4 # AUNIT --inst x6ea08000/mask=xffe0fc00 --status pass --comment "ext" :umlal2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x8 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = zext(TMPD3[0,32]); TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ5 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ5[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ5[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.367 UMLAL, UMLAL2 (vector) page C7-2860 line 166945 MATCH x2e208000/mask=xbf20fc00 # CONSTRUCT x6e608000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 $*@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal2/3@2 # AUNIT --inst x6e608000/mask=xffe0fc00 --status pass --comment "ext" :umlal2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x8 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = zext(TMPD3[0,16]); TMPQ4[32,32] = zext(TMPD3[16,16]); TMPQ4[64,32] = zext(TMPD3[32,16]); TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ5 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ5[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ5[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ5[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ5[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.367 UMLAL, UMLAL2 (vector) page C7-2860 line 166945 MATCH x2e208000/mask=xbf20fc00 # CONSTRUCT x6e208000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 $*@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal2/3@1 # AUNIT --inst x6e208000/mask=xffe0fc00 --status pass --comment "ext" :umlal2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x8 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = zext(TMPD1[0,8]); TMPQ2[16,16] = zext(TMPD1[8,8]); TMPQ2[32,16] = zext(TMPD1[16,8]); TMPQ2[48,16] = zext(TMPD1[24,8]); TMPQ2[64,16] = zext(TMPD1[32,8]); TMPQ2[80,16] = zext(TMPD1[40,8]); TMPQ2[96,16] = zext(TMPD1[48,8]); TMPQ2[112,16] = zext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = zext(TMPD3[0,8]); TMPQ4[16,16] = zext(TMPD3[8,8]); TMPQ4[32,16] = zext(TMPD3[16,8]); TMPQ4[48,16] = zext(TMPD3[24,8]); TMPQ4[64,16] = zext(TMPD3[32,8]); TMPQ4[80,16] = zext(TMPD3[40,8]); TMPQ4[96,16] = zext(TMPD3[48,8]); TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 2 TMPQ5[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; TMPQ5[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; TMPQ5[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; TMPQ5[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; TMPQ5[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; TMPQ5[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; TMPQ5[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; TMPQ5[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ5 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ5[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ5[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ5[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ5[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ5[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ5[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ5[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ5[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.367 UMLAL, UMLAL2 (vector) page C7-2860 line 166945 MATCH x2e208000/mask=xbf20fc00 # CONSTRUCT x2ea08000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@4:16 ARG3 $zext@4:16 $*@8 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal/3@4 # AUNIT --inst x2ea08000/mask=xffe0fc00 --status pass --comment "ext" :umlal Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x8 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ3 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ3[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.367 UMLAL, UMLAL2 (vector) page C7-2860 line 166945 MATCH x2e208000/mask=xbf20fc00 # CONSTRUCT x2e608000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@2:16 ARG3 $zext@2:16 $*@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal/3@2 # AUNIT --inst x2e608000/mask=xffe0fc00 --status pass --comment "ext" :umlal Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x8 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.367 UMLAL, UMLAL2 (vector) page C7-2860 line 166945 MATCH x2e208000/mask=xbf20fc00 # CONSTRUCT x2e208000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@1:16 ARG3 $zext@1:16 $*@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlal/3@1 # AUNIT --inst x2e208000/mask=xffe0fc00 --status pass --comment "ext" :umlal Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x8 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 2 TMPQ3[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; TMPQ3[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; TMPQ3[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; TMPQ3[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; TMPQ3[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; TMPQ3[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; TMPQ3[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; TMPQ3[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ3 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ3[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ3[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ3[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ3[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ3[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ3[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ3[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ3[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.368 UMLSL, UMLSL2 (by element) page C7-2862 line 167069 MATCH x2f006000/mask=xbf00f400 # CONSTRUCT x2f806000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@4:16 ARG3 zext:8 $*@8 &=$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl/3@4 # AUNIT --inst x2f806000/mask=xffc0f400 --status pass --comment "ext" :umlsl Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x6 & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = zext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 8 TMPQ2[0,64] = TMPQ1[0,64] * tmp3; TMPQ2[64,64] = TMPQ1[64,64] * tmp3; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ2[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.368 UMLSL, UMLSL2 (by element) page C7-2862 line 167069 MATCH x2f006000/mask=xbf00f400 # CONSTRUCT x6f806000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3 zext:8 $*@8 &=$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl2/3@4 # AUNIT --inst x6f806000/mask=xffc0f400 --status pass --comment "ext" :umlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0x6 & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = zext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 8 TMPQ3[0,64] = TMPQ2[0,64] * tmp4; TMPQ3[64,64] = TMPQ2[64,64] * tmp4; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.368 UMLSL, UMLSL2 (by element) page C7-2862 line 167069 MATCH x2f006000/mask=xbf00f400 # CONSTRUCT x2f406000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@2:16 ARG3 zext:4 $*@4 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl/3@2 # AUNIT --inst x2f406000/mask=xffc0f400 --status pass --comment "ext" :umlsl Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x6 & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = zext(tmp2); # simd infix TMPQ2 = TMPQ1 * tmp3 on lane size 4 TMPQ2[0,32] = TMPQ1[0,32] * tmp3; TMPQ2[32,32] = TMPQ1[32,32] * tmp3; TMPQ2[64,32] = TMPQ1[64,32] * tmp3; TMPQ2[96,32] = TMPQ1[96,32] * tmp3; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ2[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ2[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ2[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.368 UMLSL, UMLSL2 (by element) page C7-2862 line 167069 MATCH x2f006000/mask=xbf00f400 # CONSTRUCT x6f406000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3 zext:4 $*@4 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl2/3@2 # AUNIT --inst x6f406000/mask=xffc0f400 --status pass --comment "ext" :umlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0x6 & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = zext(tmp3); # simd infix TMPQ3 = TMPQ2 * tmp4 on lane size 4 TMPQ3[0,32] = TMPQ2[0,32] * tmp4; TMPQ3[32,32] = TMPQ2[32,32] * tmp4; TMPQ3[64,32] = TMPQ2[64,32] * tmp4; TMPQ3[96,32] = TMPQ2[96,32] * tmp4; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.369 UMLSL, UMLSL2 (vector) page C7-2865 line 167233 MATCH x2e20a000/mask=xbf20fc00 # CONSTRUCT x6ea0a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 $*@8 &=$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl2/3@4 # AUNIT --inst x6ea0a000/mask=xffe0fc00 --status pass --comment "ext" :umlsl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xa & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = zext(TMPD3[0,32]); TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 8 TMPQ5[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; TMPQ5[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ5 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ5[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ5[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.369 UMLSL, UMLSL2 (vector) page C7-2865 line 167233 MATCH x2e20a000/mask=xbf20fc00 # CONSTRUCT x6e60a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 $*@4 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl2/3@2 # AUNIT --inst x6e60a000/mask=xffe0fc00 --status pass --comment "ext" :umlsl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xa & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = zext(TMPD3[0,16]); TMPQ4[32,32] = zext(TMPD3[16,16]); TMPQ4[64,32] = zext(TMPD3[32,16]); TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 4 TMPQ5[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; TMPQ5[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; TMPQ5[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; TMPQ5[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ5 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ5[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ5[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ5[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ5[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.369 UMLSL, UMLSL2 (vector) page C7-2865 line 167233 MATCH x2e20a000/mask=xbf20fc00 # CONSTRUCT x6e20a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 $*@2 &=$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl2/3@1 # AUNIT --inst x6e20a000/mask=xffe0fc00 --status pass --comment "ext" :umlsl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xa & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = zext(TMPD1[0,8]); TMPQ2[16,16] = zext(TMPD1[8,8]); TMPQ2[32,16] = zext(TMPD1[16,8]); TMPQ2[48,16] = zext(TMPD1[24,8]); TMPQ2[64,16] = zext(TMPD1[32,8]); TMPQ2[80,16] = zext(TMPD1[40,8]); TMPQ2[96,16] = zext(TMPD1[48,8]); TMPQ2[112,16] = zext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = zext(TMPD3[0,8]); TMPQ4[16,16] = zext(TMPD3[8,8]); TMPQ4[32,16] = zext(TMPD3[16,8]); TMPQ4[48,16] = zext(TMPD3[24,8]); TMPQ4[64,16] = zext(TMPD3[32,8]); TMPQ4[80,16] = zext(TMPD3[40,8]); TMPQ4[96,16] = zext(TMPD3[48,8]); TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix TMPQ5 = TMPQ2 * TMPQ4 on lane size 2 TMPQ5[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; TMPQ5[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; TMPQ5[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; TMPQ5[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; TMPQ5[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; TMPQ5[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; TMPQ5[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; TMPQ5[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ5 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ5[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ5[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ5[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ5[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ5[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ5[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ5[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ5[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.369 UMLSL, UMLSL2 (vector) page C7-2865 line 167233 MATCH x2e20a000/mask=xbf20fc00 # CONSTRUCT x2ea0a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@4:16 ARG3 $zext@4:16 $*@8 &=$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl/3@4 # AUNIT --inst x2ea0a000/mask=xffe0fc00 --status pass --comment "ext" :umlsl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xa & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 8 TMPQ3[0,64] = TMPQ1[0,64] * TMPQ2[0,64]; TMPQ3[64,64] = TMPQ1[64,64] * TMPQ2[64,64]; # simd infix Rd_VPR128.2D = Rd_VPR128.2D - TMPQ3 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] - TMPQ3[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] - TMPQ3[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.369 UMLSL, UMLSL2 (vector) page C7-2865 line 167233 MATCH x2e20a000/mask=xbf20fc00 # CONSTRUCT x2e60a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@2:16 ARG3 $zext@2:16 $*@4 &=$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl/3@2 # AUNIT --inst x2e60a000/mask=xffe0fc00 --status pass --comment "ext" :umlsl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xa & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Rd_VPR128 & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 4 TMPQ3[0,32] = TMPQ1[0,32] * TMPQ2[0,32]; TMPQ3[32,32] = TMPQ1[32,32] * TMPQ2[32,32]; TMPQ3[64,32] = TMPQ1[64,32] * TMPQ2[64,32]; TMPQ3[96,32] = TMPQ1[96,32] * TMPQ2[96,32]; # simd infix Rd_VPR128.4S = Rd_VPR128.4S - TMPQ3 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] - TMPQ3[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] - TMPQ3[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] - TMPQ3[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] - TMPQ3[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.369 UMLSL, UMLSL2 (vector) page C7-2865 line 167233 MATCH x2e20a000/mask=xbf20fc00 # CONSTRUCT x2e20a000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@1:16 ARG3 $zext@1:16 $*@2 &=$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_umlsl/3@1 # AUNIT --inst x2e20a000/mask=xffe0fc00 --status pass --comment "ext" :umlsl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xa & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Rd_VPR128 & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix TMPQ3 = TMPQ1 * TMPQ2 on lane size 2 TMPQ3[0,16] = TMPQ1[0,16] * TMPQ2[0,16]; TMPQ3[16,16] = TMPQ1[16,16] * TMPQ2[16,16]; TMPQ3[32,16] = TMPQ1[32,16] * TMPQ2[32,16]; TMPQ3[48,16] = TMPQ1[48,16] * TMPQ2[48,16]; TMPQ3[64,16] = TMPQ1[64,16] * TMPQ2[64,16]; TMPQ3[80,16] = TMPQ1[80,16] * TMPQ2[80,16]; TMPQ3[96,16] = TMPQ1[96,16] * TMPQ2[96,16]; TMPQ3[112,16] = TMPQ1[112,16] * TMPQ2[112,16]; # simd infix Rd_VPR128.8H = Rd_VPR128.8H - TMPQ3 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] - TMPQ3[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] - TMPQ3[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] - TMPQ3[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] - TMPQ3[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] - TMPQ3[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] - TMPQ3[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] - TMPQ3[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] - TMPQ3[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.371 UMOV page C7-2868 line 167415 MATCH x0e003c00/mask=xbfe0fc00 # CONSTRUCT x0e013c00/mask=xffe1fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =zext:4 # SMACRO(pseudo) ARG1 ARG2 =NEON_umov/1 # AUNIT --inst x0e013c00/mask=xffe1fc00 --status pass :umov Rd_GPR32, Rn_VPR128.B.imm_neon_uimm4 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.B.imm_neon_uimm4 & b_1616=1 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 { # simd element Rn_VPR128[imm_neon_uimm4] lane size 1 local tmp1:1 = Rn_VPR128.B.imm_neon_uimm4; Rd_GPR32 = zext(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.371 UMOV page C7-2868 line 167415 MATCH x0e003c00/mask=xbfe0fc00 # CONSTRUCT x0e023c00/mask=xffe3fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =zext:4 # SMACRO(pseudo) ARG1 ARG2 =NEON_umov/1 # AUNIT --inst x0e023c00/mask=xffe3fc00 --status pass :umov Rd_GPR32, Rn_VPR128.H.imm_neon_uimm3 is b_3131=0 & Q=0 & b_29=0 & b_2428=0xe & b_2123=0 & Rn_VPR128.H.imm_neon_uimm3 & b_1617=2 & b_1515=0 & imm4=0x7 & b_1010=1 & Rn_VPR128 & Rd_GPR32 & Rd_GPR64 & Rd_VPR128 { # simd element Rn_VPR128[imm_neon_uimm3] lane size 2 local tmp1:2 = Rn_VPR128.H.imm_neon_uimm3; Rd_GPR32 = zext(tmp1); zext_rs(Rd_GPR64); # zero upper 28 bytes of Rd_GPR64 } # C7.2.372 UMULL, UMULL2 (by element) page C7-2870 line 167549 MATCH x2f00a000/mask=xbf00f400 # CONSTRUCT x6f80a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3 zext:8 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull2/2@4 # AUNIT --inst x6f80a000/mask=xffc0f400 --status pass --comment "ext" :umull2 Rd_VPR128.2D, Rn_VPR128.4S, Re_VPR128.S.vIndex is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xa & b_1010=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp3:4 = Re_VPR128.S.vIndex; local tmp4:8 = zext(tmp3); # simd infix Rd_VPR128.2D = TMPQ2 * tmp4 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ2[0,64] * tmp4; Rd_VPR128.2D[64,64] = TMPQ2[64,64] * tmp4; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.372 UMULL, UMULL2 (by element) page C7-2870 line 167549 MATCH x2f00a000/mask=xbf00f400 # CONSTRUCT x6f40a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3 zext:4 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull2/2@2 # AUNIT --inst x6f40a000/mask=xffc0f400 --status pass --comment "ext" :umull2 Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=1 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xa & b_1010=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp3:2 = Re_VPR128Lo.H.vIndexHLM; local tmp4:4 = zext(tmp3); # simd infix Rd_VPR128.4S = TMPQ2 * tmp4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ2[0,32] * tmp4; Rd_VPR128.4S[32,32] = TMPQ2[32,32] * tmp4; Rd_VPR128.4S[64,32] = TMPQ2[64,32] * tmp4; Rd_VPR128.4S[96,32] = TMPQ2[96,32] * tmp4; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.372 UMULL, UMULL2 (by element) page C7-2870 line 167549 MATCH x2f00a000/mask=xbf00f400 # CONSTRUCT x2f80a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@4:16 ARG3 zext:8 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull/2@4 # AUNIT --inst x2f80a000/mask=xffc0f400 --status pass --comment "ext" :umull Rd_VPR128.2D, Rn_VPR64.2S, Re_VPR128.S.vIndex is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=2 & Re_VPR128.S.vIndex & Re_VPR128.S & vIndex & b_1215=0xa & b_1010=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd element Re_VPR128.S[vIndex] lane size 4 local tmp2:4 = Re_VPR128.S.vIndex; local tmp3:8 = zext(tmp2); # simd infix Rd_VPR128.2D = TMPQ1 * tmp3 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64] * tmp3; Rd_VPR128.2D[64,64] = TMPQ1[64,64] * tmp3; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.372 UMULL, UMULL2 (by element) page C7-2870 line 167549 MATCH x2f00a000/mask=xbf00f400 # CONSTRUCT x2f40a000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@2:16 ARG3 zext:4 =$* # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull/2@2 # AUNIT --inst x2f40a000/mask=xffc0f400 --status pass --comment "ext" :umull Rd_VPR128.4S, Rn_VPR64.4H, Re_VPR128Lo.H.vIndexHLM is b_3131=0 & q=0 & u=1 & b_2428=0xf & advSIMD3.size=1 & Re_VPR128Lo.H.vIndexHLM & Re_VPR128Lo.H & vIndexHLM & b_1215=0xa & b_1010=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd element Re_VPR128Lo.H[vIndexHLM] lane size 2 local tmp2:2 = Re_VPR128Lo.H.vIndexHLM; local tmp3:4 = zext(tmp2); # simd infix Rd_VPR128.4S = TMPQ1 * tmp3 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32] * tmp3; Rd_VPR128.4S[32,32] = TMPQ1[32,32] * tmp3; Rd_VPR128.4S[64,32] = TMPQ1[64,32] * tmp3; Rd_VPR128.4S[96,32] = TMPQ1[96,32] * tmp3; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.373 UMULL, UMULL2 (vector) page C7-2873 line 167705 MATCH x2e20c000/mask=xbf20fc00 # CONSTRUCT x6ea0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 =$*@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull2/2@4 # AUNIT --inst x6ea0c000/mask=xffe0fc00 --status pass --comment "ext" :umull2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0xc & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = zext(TMPD3[0,32]); TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix Rd_VPR128.2D = TMPQ2 * TMPQ4 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ2[0,64] * TMPQ4[0,64]; Rd_VPR128.2D[64,64] = TMPQ2[64,64] * TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.373 UMULL, UMULL2 (vector) page C7-2873 line 167705 MATCH x2e20c000/mask=xbf20fc00 # CONSTRUCT x6e60c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 =$*@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull2/2@2 # AUNIT --inst x6e60c000/mask=xffe0fc00 --status pass --comment "ext" :umull2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0xc & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = zext(TMPD3[0,16]); TMPQ4[32,32] = zext(TMPD3[16,16]); TMPQ4[64,32] = zext(TMPD3[32,16]); TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 * TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ2[0,32] * TMPQ4[0,32]; Rd_VPR128.4S[32,32] = TMPQ2[32,32] * TMPQ4[32,32]; Rd_VPR128.4S[64,32] = TMPQ2[64,32] * TMPQ4[64,32]; Rd_VPR128.4S[96,32] = TMPQ2[96,32] * TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.373 UMULL, UMULL2 (vector) page C7-2873 line 167705 MATCH x2e20c000/mask=xbf20fc00 # CONSTRUCT x6e20c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 =$*@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull2/2@1 # AUNIT --inst x6e20c000/mask=xffe0fc00 --status pass --comment "ext" :umull2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0xc & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = zext(TMPD1[0,8]); TMPQ2[16,16] = zext(TMPD1[8,8]); TMPQ2[32,16] = zext(TMPD1[16,8]); TMPQ2[48,16] = zext(TMPD1[24,8]); TMPQ2[64,16] = zext(TMPD1[32,8]); TMPQ2[80,16] = zext(TMPD1[40,8]); TMPQ2[96,16] = zext(TMPD1[48,8]); TMPQ2[112,16] = zext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = zext(TMPD3[0,8]); TMPQ4[16,16] = zext(TMPD3[8,8]); TMPQ4[32,16] = zext(TMPD3[16,8]); TMPQ4[48,16] = zext(TMPD3[24,8]); TMPQ4[64,16] = zext(TMPD3[32,8]); TMPQ4[80,16] = zext(TMPD3[40,8]); TMPQ4[96,16] = zext(TMPD3[48,8]); TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 * TMPQ4 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ2[0,16] * TMPQ4[0,16]; Rd_VPR128.8H[16,16] = TMPQ2[16,16] * TMPQ4[16,16]; Rd_VPR128.8H[32,16] = TMPQ2[32,16] * TMPQ4[32,16]; Rd_VPR128.8H[48,16] = TMPQ2[48,16] * TMPQ4[48,16]; Rd_VPR128.8H[64,16] = TMPQ2[64,16] * TMPQ4[64,16]; Rd_VPR128.8H[80,16] = TMPQ2[80,16] * TMPQ4[80,16]; Rd_VPR128.8H[96,16] = TMPQ2[96,16] * TMPQ4[96,16]; Rd_VPR128.8H[112,16] = TMPQ2[112,16] * TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.373 UMULL, UMULL2 (vector) page C7-2873 line 167705 MATCH x2e20c000/mask=xbf20fc00 # CONSTRUCT x2ea0c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull/2@4 # AUNIT --inst x2ea0c000/mask=xffe0fc00 --status nopcodeop --comment "ext" :umull Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0xc & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_umull(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.373 UMULL, UMULL2 (vector) page C7-2873 line 167705 MATCH x2e20c000/mask=xbf20fc00 # CONSTRUCT x2e60c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull/2@2 # AUNIT --inst x2e60c000/mask=xffe0fc00 --status nopcodeop --comment "ext" :umull Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0xc & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_umull(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.373 UMULL, UMULL2 (vector) page C7-2873 line 167705 MATCH x2e20c000/mask=xbf20fc00 # CONSTRUCT x2e20c000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_umull/2@1 # AUNIT --inst x2e20c000/mask=xffe0fc00 --status nopcodeop --comment "ext" :umull Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0xc & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_umull(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.374 UQADD page C7-2875 line 167821 MATCH x7e200c00/mask=xff20fc00 # CONSTRUCT x7e200c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2 # AUNIT --inst x7e200c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqadd Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x1 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { Rd_FPR8 = NEON_uqadd(Rn_FPR8, Rm_FPR8); } # C7.2.374 UQADD page C7-2875 line 167821 MATCH x7e200c00/mask=xff20fc00 # CONSTRUCT x7ee00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2 # AUNIT --inst x7ee00c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqadd Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x1 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_uqadd(Rn_FPR64, Rm_FPR64); } # C7.2.374 UQADD page C7-2875 line 167821 MATCH x7e200c00/mask=xff20fc00 # CONSTRUCT x7e600c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2 # AUNIT --inst x7e600c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqadd Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x1 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_uqadd(Rn_FPR16, Rm_FPR16); } # C7.2.374 UQADD page C7-2875 line 167821 MATCH x7e200c00/mask=xff20fc00 # CONSTRUCT x7ea00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2 # AUNIT --inst x7ea00c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqadd Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x1 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_uqadd(Rn_FPR32, Rm_FPR32); } # C7.2.374 UQADD page C7-2875 line 167821 MATCH x2e200c00/mask=xbf20fc00 # CONSTRUCT x6e200c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@1 # AUNIT --inst x6e200c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x1 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_uqadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.374 UQADD page C7-2875 line 167821 MATCH x2e200c00/mask=xbf20fc00 # CONSTRUCT x6ee00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@8 # AUNIT --inst x6ee00c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqadd Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x1 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_uqadd(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.374 UQADD page C7-2875 line 167821 MATCH x2e200c00/mask=xbf20fc00 # CONSTRUCT x2ea00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@4 # AUNIT --inst x2ea00c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x1 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_uqadd(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.374 UQADD page C7-2875 line 167821 MATCH x2e200c00/mask=xbf20fc00 # CONSTRUCT x2e600c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@2 # AUNIT --inst x2e600c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x1 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_uqadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.374 UQADD page C7-2875 line 167821 MATCH x2e200c00/mask=xbf20fc00 # CONSTRUCT x6ea00c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@4 # AUNIT --inst x6ea00c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x1 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_uqadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.374 UQADD page C7-2875 line 167821 MATCH x2e200c00/mask=xbf20fc00 # CONSTRUCT x2e200c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@1 # AUNIT --inst x2e200c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x1 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_uqadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.374 UQADD page C7-2875 line 167821 MATCH x2e200c00/mask=xbf20fc00 # CONSTRUCT x6e600c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqadd/2@2 # AUNIT --inst x6e600c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x1 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_uqadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.375 UQRSHL page C7-2877 line 167948 MATCH x7e205c00/mask=xff20fc00 # CONSTRUCT x7e205c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2 # AUNIT --inst x7e205c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqrshl Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0xb & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { Rd_FPR8 = NEON_uqrshl(Rn_FPR8, Rm_FPR8); } # C7.2.375 UQRSHL page C7-2877 line 167948 MATCH x7e205c00/mask=xff20fc00 # CONSTRUCT x7ee05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2 # AUNIT --inst x7ee05c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqrshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0xb & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_uqrshl(Rn_FPR64, Rm_FPR64); } # C7.2.375 UQRSHL page C7-2877 line 167948 MATCH x7e205c00/mask=xff20fc00 # CONSTRUCT x7e605c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2 # AUNIT --inst x7e605c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqrshl Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0xb & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_uqrshl(Rn_FPR16, Rm_FPR16); } # C7.2.375 UQRSHL page C7-2877 line 167948 MATCH x7e205c00/mask=xff20fc00 # CONSTRUCT x7ea05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2 # AUNIT --inst x7ea05c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqrshl Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0xb & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_uqrshl(Rn_FPR32, Rm_FPR32); } # C7.2.375 UQRSHL page C7-2877 line 167948 MATCH x2e205c00/mask=xbf20fc00 # CONSTRUCT x6e205c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@1 # AUNIT --inst x6e205c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqrshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xb & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_uqrshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.375 UQRSHL page C7-2877 line 167948 MATCH x2e205c00/mask=xbf20fc00 # CONSTRUCT x6ee05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@8 # AUNIT --inst x6ee05c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqrshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0xb & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_uqrshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.375 UQRSHL page C7-2877 line 167948 MATCH x2e205c00/mask=xbf20fc00 # CONSTRUCT x2ea05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@4 # AUNIT --inst x2ea05c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqrshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xb & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_uqrshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.375 UQRSHL page C7-2877 line 167948 MATCH x2e205c00/mask=xbf20fc00 # CONSTRUCT x2e605c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@2 # AUNIT --inst x2e605c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqrshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xb & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_uqrshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.375 UQRSHL page C7-2877 line 167948 MATCH x2e205c00/mask=xbf20fc00 # CONSTRUCT x6ea05c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@4 # AUNIT --inst x6ea05c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqrshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xb & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_uqrshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.375 UQRSHL page C7-2877 line 167948 MATCH x2e205c00/mask=xbf20fc00 # CONSTRUCT x2e205c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@1 # AUNIT --inst x2e205c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqrshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xb & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_uqrshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.375 UQRSHL page C7-2877 line 167948 MATCH x2e205c00/mask=xbf20fc00 # CONSTRUCT x6e605c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqrshl/2@2 # AUNIT --inst x6e605c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqrshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xb & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_uqrshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2879 line 168091 MATCH x2f009c00/mask=xbf80fc00 # CONSTRUCT x6f089c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn2/2@2 # AUNIT --inst x6f089c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" :uqrshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_uqrshrn2(Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2879 line 168091 MATCH x2f009c00/mask=xbf80fc00 # CONSTRUCT x2f209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2@8 # AUNIT --inst x2f209c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqrshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_uqrshrn(Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2879 line 168091 MATCH x2f009c00/mask=xbf80fc00 # CONSTRUCT x2f109c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2@4 # AUNIT --inst x2f109c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" :uqrshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_uqrshrn(Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2879 line 168091 MATCH x2f009c00/mask=xbf80fc00 # CONSTRUCT x6f209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2@8 # AUNIT --inst x6f209c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" :uqrshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x13 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_uqrshrn(Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2879 line 168091 MATCH x2f009c00/mask=xbf80fc00 # CONSTRUCT x2f089c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2@2 # AUNIT --inst x2f089c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" :uqrshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x13 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_uqrshrn(Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2879 line 168091 MATCH x2f009c00/mask=xbf80fc00 # CONSTRUCT x6f109c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn2/2@4 # AUNIT --inst x6f109c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" :uqrshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x13 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_uqrshrn2(Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2879 line 168091 MATCH x7f009c00/mask=xff80fc00 # CONSTRUCT x7f089c00/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2 # AUNIT --inst x7f089c00/mask=xfff8fc00 --status nopcodeop --comment "nointround nointsat" # Scalar variant when immh=0001 Va=FPR16 Vb=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 :uqrshrn Rd_FPR8, Rn_FPR16, Imm_shr_imm8 is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100111 & Rd_FPR8 & Rn_FPR16 & Imm_shr_imm8 & Zd { Rd_FPR8 = NEON_uqrshrn(Rn_FPR16, Imm_shr_imm8:1); } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2879 line 168091 MATCH x7f009c00/mask=xff80fc00 # CONSTRUCT x7f109c00/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2 # AUNIT --inst x7f109c00/mask=xfff0fc00 --status nopcodeop --comment "nointround nointsat" # Scalar variant when immh=001x Va=FPR32 Vb=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 :uqrshrn Rd_FPR16, Rn_FPR32, Imm_shr_imm16 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100111 & Rd_FPR16 & Rn_FPR32 & Imm_shr_imm16 & Zd { Rd_FPR16 = NEON_uqrshrn(Rn_FPR32, Imm_shr_imm16:1); } # C7.2.376 UQRSHRN, UQRSHRN2 page C7-2879 line 168091 MATCH x7f009c00/mask=xff80fc00 # CONSTRUCT x7f209c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqrshrn/2 # AUNIT --inst x7f209c00/mask=xffe0fc00 --status nopcodeop --comment "nointround nointsat" # Scalar variant when immh=01xx Va=FPR64 Vb=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 :uqrshrn Rd_FPR32, Rn_FPR64, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100111 & Rd_FPR32 & Rn_FPR64 & Imm_shr_imm32 & Zd { Rd_FPR32 = NEON_uqrshrn(Rn_FPR64, Imm_shr_imm32:1); } # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x2f007400/mask=xbf80fc00 # CONSTRUCT x6f087400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@1 # AUNIT --inst x6f087400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR128.16B, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_uqshl(Rn_VPR128.16B, Imm_uimm3:1, 1:1); } # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x2f007400/mask=xbf80fc00 # CONSTRUCT x6f407400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@8 # AUNIT --inst x6f407400/mask=xffc0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR128.2D, Rn_VPR128.2D, Imm_imm0_63 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_imm0_63 & b_1115=0xe & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_uqshl(Rn_VPR128.2D, Imm_imm0_63:1, 8:1); } # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x2f007400/mask=xbf80fc00 # CONSTRUCT x2f207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@4 # AUNIT --inst x2f207400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR64.2S, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_uqshl(Rn_VPR64.2S, Imm_uimm5:1, 4:1); } # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x2f007400/mask=xbf80fc00 # CONSTRUCT x2f107400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@2 # AUNIT --inst x2f107400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR64.4H, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_uqshl(Rn_VPR64.4H, Imm_uimm4:1, 2:1); } # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x2f007400/mask=xbf80fc00 # CONSTRUCT x6f207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@4 # AUNIT --inst x6f207400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR128.4S, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0xe & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_uqshl(Rn_VPR128.4S, Imm_uimm5:1, 4:1); } # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x2f007400/mask=xbf80fc00 # CONSTRUCT x2f087400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@1 # AUNIT --inst x2f087400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR64.8B, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0xe & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_uqshl(Rn_VPR64.8B, Imm_uimm3:1, 1:1); } # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x2f007400/mask=xbf80fc00 # CONSTRUCT x6f107400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2@2 # AUNIT --inst x6f107400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR128.8H, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0xe & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_uqshl(Rn_VPR128.8H, Imm_uimm4:1, 2:1); } # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x7f007400/mask=xff80fc00 # CONSTRUCT x7f087400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2 # AUNIT --inst x7f087400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=0001 V=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 :uqshl Rd_FPR8, Rn_FPR8, Imm_shr_imm8 is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b011101 & Rd_FPR8 & Rn_FPR8 & Imm_shr_imm8 & Zd { Rd_FPR8 = NEON_uqshl(Rn_FPR8, Imm_shr_imm8:1); } # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x7f007400/mask=xff80fc00 # CONSTRUCT x7f107400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2 # AUNIT --inst x7f107400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=001x V=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 :uqshl Rd_FPR16, Rn_FPR16, Imm_shr_imm16 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b011101 & Rd_FPR16 & Rn_FPR16 & Imm_shr_imm16 & Zd { Rd_FPR16 = NEON_uqshl(Rn_FPR16, Imm_shr_imm16:1); } # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x7f007400/mask=xff80fc00 # CONSTRUCT x7f207400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2 # AUNIT --inst x7f207400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=01xx V=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 :uqshl Rd_FPR32, Rn_FPR32, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b011101 & Rd_FPR32 & Rn_FPR32 & Imm_shr_imm32 & Zd { Rd_FPR32 = NEON_uqshl(Rn_FPR32, Imm_shr_imm32:1); } # C7.2.377 UQSHL (immediate) page C7-2882 line 168276 MATCH x7f007400/mask=xff80fc00 # CONSTRUCT x7f407400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_uqshl/2 # AUNIT --inst x7f407400/mask=xffc0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=1xxx V=FPR64 imm=Imm_shr_imm64 bb=b_22 aa=1 :uqshl Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b011111110 & b_22=1 & b_1015=0b011101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { Rd_FPR64 = NEON_uqshl(Rn_FPR64, Imm_shr_imm64:1); } # C7.2.378 UQSHL (register) page C7-2885 line 168441 MATCH x7e204c00/mask=xff20fc00 # CONSTRUCT x7e204c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2 # AUNIT --inst x7e204c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x9 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { Rd_FPR8 = NEON_uqshl(Rn_FPR8, Rm_FPR8); } # C7.2.378 UQSHL (register) page C7-2885 line 168441 MATCH x7e204c00/mask=xff20fc00 # CONSTRUCT x7ee04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2 # AUNIT --inst x7ee04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x9 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_uqshl(Rn_FPR64, Rm_FPR64); } # C7.2.378 UQSHL (register) page C7-2885 line 168441 MATCH x7e204c00/mask=xff20fc00 # CONSTRUCT x7e604c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2 # AUNIT --inst x7e604c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x9 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_uqshl(Rn_FPR16, Rm_FPR16); } # C7.2.378 UQSHL (register) page C7-2885 line 168441 MATCH x7e204c00/mask=xff20fc00 # CONSTRUCT x7ea04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2 # AUNIT --inst x7ea04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x9 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_uqshl(Rn_FPR32, Rm_FPR32); } # C7.2.378 UQSHL (register) page C7-2885 line 168441 MATCH x2e204c00/mask=xbf20fc00 # CONSTRUCT x6e204c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@1 # AUNIT --inst x6e204c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x9 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_uqshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.378 UQSHL (register) page C7-2885 line 168441 MATCH x2e204c00/mask=xbf20fc00 # CONSTRUCT x6ee04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@8 # AUNIT --inst x6ee04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x9 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_uqshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.378 UQSHL (register) page C7-2885 line 168441 MATCH x2e204c00/mask=xbf20fc00 # CONSTRUCT x2ea04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@4 # AUNIT --inst x2ea04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x9 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_uqshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.378 UQSHL (register) page C7-2885 line 168441 MATCH x2e204c00/mask=xbf20fc00 # CONSTRUCT x2e604c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@2 # AUNIT --inst x2e604c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x9 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_uqshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.378 UQSHL (register) page C7-2885 line 168441 MATCH x2e204c00/mask=xbf20fc00 # CONSTRUCT x6ea04c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@4 # AUNIT --inst x6ea04c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x9 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_uqshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.378 UQSHL (register) page C7-2885 line 168441 MATCH x2e204c00/mask=xbf20fc00 # CONSTRUCT x2e204c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@1 # AUNIT --inst x2e204c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x9 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_uqshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.378 UQSHL (register) page C7-2885 line 168441 MATCH x2e204c00/mask=xbf20fc00 # CONSTRUCT x6e604c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqshl/2@2 # AUNIT --inst x6e604c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x9 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_uqshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.379 UQSHRN, UQSHRN2 page C7-2887 line 168584 MATCH x2f009400/mask=xbf80fc00 # CONSTRUCT x6f089400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn2/3@2 # AUNIT --inst x6f089400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" :uqshrn2 Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_uqshrn2(Rd_VPR128.16B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.379 UQSHRN, UQSHRN2 page C7-2887 line 168584 MATCH x2f009400/mask=xbf80fc00 # CONSTRUCT x2f209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn/3@8 # AUNIT --inst x2f209400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshrn Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & b_1010=1 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_uqshrn(Rd_VPR64.2S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.379 UQSHRN, UQSHRN2 page C7-2887 line 168584 MATCH x2f009400/mask=xbf80fc00 # CONSTRUCT x2f109400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn/3@4 # AUNIT --inst x2f109400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" :uqshrn Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_uqshrn(Rd_VPR64.4H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.379 UQSHRN, UQSHRN2 page C7-2887 line 168584 MATCH x2f009400/mask=xbf80fc00 # CONSTRUCT x6f209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn2/3@8 # AUNIT --inst x6f209400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqshrn2 Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x12 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_uqshrn2(Rd_VPR128.4S, Rn_VPR128.2D, Imm_shr_imm32:1, 8:1); } # C7.2.379 UQSHRN, UQSHRN2 page C7-2887 line 168584 MATCH x2f009400/mask=xbf80fc00 # CONSTRUCT x2f089400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn/3@2 # AUNIT --inst x2f089400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" :uqshrn Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x12 & b_1010=1 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_uqshrn(Rd_VPR64.8B, Rn_VPR128.8H, Imm_shr_imm8:1, 2:1); } # C7.2.379 UQSHRN, UQSHRN2 page C7-2887 line 168584 MATCH x2f009400/mask=xbf80fc00 # CONSTRUCT x6f109400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn2/3@4 # AUNIT --inst x6f109400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" :uqshrn2 Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x12 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_uqshrn2(Rd_VPR128.8H, Rn_VPR128.4S, Imm_shr_imm16:1, 4:1); } # C7.2.379 UQSHRN, UQSHRN2 page C7-2887 line 168584 MATCH x7f009400/mask=xff80fc00 # CONSTRUCT x7f089400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn/3 # AUNIT --inst x7f089400/mask=xfff8fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=0001 Va=FPR16 Vb=FPR8 imm=Imm_shr_imm8 bb=b_1922 aa=0b0001 :uqshrn Rd_FPR8, Rn_FPR16, Imm_shr_imm8 is b_2331=0b011111110 & b_1922=0b0001 & b_1015=0b100101 & Rd_FPR8 & Rn_FPR16 & Imm_shr_imm8 & Zd { Rd_FPR8 = NEON_uqshrn(Rd_FPR8, Rn_FPR16, Imm_shr_imm8:1); } # C7.2.379 UQSHRN, UQSHRN2 page C7-2887 line 168584 MATCH x7f009400/mask=xff80fc00 # CONSTRUCT x7f109400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn/3 # AUNIT --inst x7f109400/mask=xfff0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=001x Va=FPR32 Vb=FPR16 imm=Imm_shr_imm16 bb=b_2022 aa=0b001 :uqshrn Rd_FPR16, Rn_FPR32, Imm_shr_imm16 is b_2331=0b011111110 & b_2022=0b001 & b_1015=0b100101 & Rd_FPR16 & Rn_FPR32 & Imm_shr_imm16 & Zd { Rd_FPR16 = NEON_uqshrn(Rd_FPR16, Rn_FPR32, Imm_shr_imm16:1); } # C7.2.379 UQSHRN, UQSHRN2 page C7-2887 line 168584 MATCH x7f009400/mask=xff80fc00 # CONSTRUCT x7f209400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_uqshrn/3 # AUNIT --inst x7f209400/mask=xffe0fc00 --status nopcodeop --comment "nointsat" # Scalar variant when immh=01xx Va=FPR64 Vb=FPR32 imm=Imm_shr_imm32 bb=b_2122 aa=0b01 :uqshrn Rd_FPR32, Rn_FPR64, Imm_shr_imm32 is b_2331=0b011111110 & b_2122=0b01 & b_1015=0b100101 & Rd_FPR32 & Rn_FPR64 & Imm_shr_imm32 & Zd { Rd_FPR32 = NEON_uqshrn(Rd_FPR32, Rn_FPR64, Imm_shr_imm32:1); } # C7.2.380 UQSUB page C7-2890 line 168769 MATCH x7e202c00/mask=xff20fc00 # CONSTRUCT x7e202c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2 # AUNIT --inst x7e202c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqsub Rd_FPR8, Rn_FPR8, Rm_FPR8 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=0 & b_2121=1 & Rm_FPR8 & b_1115=0x5 & b_1010=1 & Rn_FPR8 & Rd_FPR8 & Zd { Rd_FPR8 = NEON_uqsub(Rn_FPR8, Rm_FPR8); } # C7.2.380 UQSUB page C7-2890 line 168769 MATCH x7e202c00/mask=xff20fc00 # CONSTRUCT x7ee02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2 # AUNIT --inst x7ee02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqsub Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x5 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_uqsub(Rn_FPR64, Rm_FPR64); } # C7.2.380 UQSUB page C7-2890 line 168769 MATCH x7e202c00/mask=xff20fc00 # CONSTRUCT x7e602c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2 # AUNIT --inst x7e602c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqsub Rd_FPR16, Rn_FPR16, Rm_FPR16 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=1 & b_2121=1 & Rm_FPR16 & b_1115=0x5 & b_1010=1 & Rn_FPR16 & Rd_FPR16 & Zd { Rd_FPR16 = NEON_uqsub(Rn_FPR16, Rm_FPR16); } # C7.2.380 UQSUB page C7-2890 line 168769 MATCH x7e202c00/mask=xff20fc00 # CONSTRUCT x7ea02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2 # AUNIT --inst x7ea02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqsub Rd_FPR32, Rn_FPR32, Rm_FPR32 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=2 & b_2121=1 & Rm_FPR32 & b_1115=0x5 & b_1010=1 & Rn_FPR32 & Rd_FPR32 & Zd { Rd_FPR32 = NEON_uqsub(Rn_FPR32, Rm_FPR32); } # C7.2.380 UQSUB page C7-2890 line 168769 MATCH x2e202c00/mask=xbf20fc00 # CONSTRUCT x6e202c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@1 # AUNIT --inst x6e202c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqsub Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x5 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_uqsub(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.380 UQSUB page C7-2890 line 168769 MATCH x2e202c00/mask=xbf20fc00 # CONSTRUCT x6ee02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@8 # AUNIT --inst x6ee02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqsub Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x5 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_uqsub(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.380 UQSUB page C7-2890 line 168769 MATCH x2e202c00/mask=xbf20fc00 # CONSTRUCT x2ea02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@4 # AUNIT --inst x2ea02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqsub Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x5 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_uqsub(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.380 UQSUB page C7-2890 line 168769 MATCH x2e202c00/mask=xbf20fc00 # CONSTRUCT x2e602c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@2 # AUNIT --inst x2e602c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqsub Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x5 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_uqsub(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.380 UQSUB page C7-2890 line 168769 MATCH x2e202c00/mask=xbf20fc00 # CONSTRUCT x6ea02c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@4 # AUNIT --inst x6ea02c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqsub Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x5 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_uqsub(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.380 UQSUB page C7-2890 line 168769 MATCH x2e202c00/mask=xbf20fc00 # CONSTRUCT x2e202c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@1 # AUNIT --inst x2e202c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqsub Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x5 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_uqsub(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.380 UQSUB page C7-2890 line 168769 MATCH x2e202c00/mask=xbf20fc00 # CONSTRUCT x6e602c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uqsub/2@2 # AUNIT --inst x6e602c00/mask=xffe0fc00 --status nopcodeop --comment "nointsat" :uqsub Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x5 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_uqsub(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.381 UQXTN, UQXTN2 page C7-2892 line 168897 MATCH x7e214800/mask=xff3ffc00 # CONSTRUCT x7e214800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn/2 # AUNIT --inst x7e214800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Scalar variant when size=00 Q=1 bb=1 mnemonic=uqxtn Ta=FPR16 Tb=FPR8 :uqxtn Rd_FPR8, Rn_FPR16 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100001010010 & Rd_FPR8 & Rn_FPR16 & Zd { Rd_FPR8 = NEON_uqxtn(Rd_FPR8, Rn_FPR16); } # C7.2.381 UQXTN, UQXTN2 page C7-2892 line 168897 MATCH x7e214800/mask=xff3ffc00 # CONSTRUCT x7e614800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn/2 # AUNIT --inst x7e614800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Scalar variant when size=01 Q=1 bb=1 mnemonic=uqxtn Ta=FPR32 Tb=FPR16 :uqxtn Rd_FPR16, Rn_FPR32 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100001010010 & Rd_FPR16 & Rn_FPR32 & Zd { Rd_FPR16 = NEON_uqxtn(Rd_FPR16, Rn_FPR32); } # C7.2.381 UQXTN, UQXTN2 page C7-2892 line 168897 MATCH x7e214800/mask=xff3ffc00 # CONSTRUCT x7ea14800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn/2 # AUNIT --inst x7ea14800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Scalar variant when size=10 Q=1 bb=1 mnemonic=uqxtn Ta=FPR64 Tb=FPR32 :uqxtn Rd_FPR32, Rn_FPR64 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100001010010 & Rd_FPR32 & Rn_FPR64 & Zd { Rd_FPR32 = NEON_uqxtn(Rd_FPR32, Rn_FPR64); } # C7.2.381 UQXTN, UQXTN2 page C7-2892 line 168897 MATCH x2e214800/mask=xbf3ffc00 # CONSTRUCT x2e214800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn/2@2 # AUNIT --inst x2e214800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=00 Q=0 bb=0 mnemonic=uqxtn e=2 Ta=VPR128.8H Tb=VPR64.8B :uqxtn Rd_VPR64.8B, Rn_VPR128.8H is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001010010 & Rd_VPR64.8B & Rn_VPR128.8H & Zd { Rd_VPR64.8B = NEON_uqxtn(Rd_VPR64.8B, Rn_VPR128.8H, 2:1); } # C7.2.381 UQXTN, UQXTN2 page C7-2892 line 168897 MATCH x2e214800/mask=xbf3ffc00 # CONSTRUCT x6e214800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn2/2@2 # AUNIT --inst x6e214800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=00 Q=1 bb=0 mnemonic=uqxtn2 e=2 Ta=VPR128.8H Tb=VPR128.16B :uqxtn2 Rd_VPR128.16B, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100001010010 & Rd_VPR128.16B & Rn_VPR128.8H & Zd { Rd_VPR128.16B = NEON_uqxtn2(Rd_VPR128.16B, Rn_VPR128.8H, 2:1); } # C7.2.381 UQXTN, UQXTN2 page C7-2892 line 168897 MATCH x2e214800/mask=xbf3ffc00 # CONSTRUCT x2e614800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn/2@4 # AUNIT --inst x2e614800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=01 Q=0 bb=0 mnemonic=uqxtn e=4 Ta=VPR128.4S Tb=VPR64.4H :uqxtn Rd_VPR64.4H, Rn_VPR128.4S is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001010010 & Rd_VPR64.4H & Rn_VPR128.4S & Zd { Rd_VPR64.4H = NEON_uqxtn(Rd_VPR64.4H, Rn_VPR128.4S, 4:1); } # C7.2.381 UQXTN, UQXTN2 page C7-2892 line 168897 MATCH x2e214800/mask=xbf3ffc00 # CONSTRUCT x6e614800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn2/2@4 # AUNIT --inst x6e614800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=01 Q=1 bb=0 mnemonic=uqxtn2 e=4 Ta=VPR128.4S Tb=VPR128.8H :uqxtn2 Rd_VPR128.8H, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100001010010 & Rd_VPR128.8H & Rn_VPR128.4S & Zd { Rd_VPR128.8H = NEON_uqxtn2(Rd_VPR128.8H, Rn_VPR128.4S, 4:1); } # C7.2.381 UQXTN, UQXTN2 page C7-2892 line 168897 MATCH x2e214800/mask=xbf3ffc00 # CONSTRUCT x2ea14800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn/2@8 # AUNIT --inst x2ea14800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=10 Q=0 bb=0 mnemonic=uqxtn e=8 Ta=VPR128.2D Tb=VPR64.2S :uqxtn Rd_VPR64.2S, Rn_VPR128.2D is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001010010 & Rd_VPR64.2S & Rn_VPR128.2D & Zd { Rd_VPR64.2S = NEON_uqxtn(Rd_VPR64.2S, Rn_VPR128.2D, 8:1); } # C7.2.381 UQXTN, UQXTN2 page C7-2892 line 168897 MATCH x2e214800/mask=xbf3ffc00 # CONSTRUCT x6ea14800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_uqxtn2/2@8 # AUNIT --inst x6ea14800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=10 Q=1 bb=0 mnemonic=uqxtn2 e=8 Ta=VPR128.2D Tb=VPR128.4S :uqxtn2 Rd_VPR128.4S, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100001010010 & Rd_VPR128.4S & Rn_VPR128.2D & Zd { Rd_VPR128.4S = NEON_uqxtn2(Rd_VPR128.4S, Rn_VPR128.2D, 8:1); } # C7.2.382 URECPE page C7-2895 line 169051 MATCH x0ea1c800/mask=xbfbffc00 # CONSTRUCT x0ea1c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_urecpe/1@4 # AUNIT --inst x0ea1c800/mask=xfffffc00 --status nopcodeop # Vector variant when Q=0 T=VPR64.2S :urecpe Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2329=0b0011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { Rd_VPR64.2S = NEON_urecpe(Rn_VPR64.2S, 4:1); } # C7.2.382 URECPE page C7-2895 line 169051 MATCH x0ea1c800/mask=xbfbffc00 # CONSTRUCT x4ea1c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_urecpe/1@4 # AUNIT --inst x4ea1c800/mask=xfffffc00 --status nopcodeop # Vector variant when Q=1 T=VPR128.4S :urecpe Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2329=0b0011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { Rd_VPR128.4S = NEON_urecpe(Rn_VPR128.4S, 4:1); } # C7.2.383 URHADD page C7-2896 line 169117 MATCH x2e201400/mask=xbf20fc00 # CONSTRUCT x6e201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urhadd/2@1 # AUNIT --inst x6e201400/mask=xffe0fc00 --status nopcodeop :urhadd Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x2 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_urhadd(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.383 URHADD page C7-2896 line 169117 MATCH x2e201400/mask=xbf20fc00 # CONSTRUCT x2ea01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urhadd/2@2 # AUNIT --inst x2ea01400/mask=xffe0fc00 --status nopcodeop :urhadd Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x2 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_urhadd(Rn_VPR64.2S, Rm_VPR64.2S, 2:1); } # C7.2.383 URHADD page C7-2896 line 169117 MATCH x2e201400/mask=xbf20fc00 # CONSTRUCT x2e601400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urhadd/2@2 # AUNIT --inst x2e601400/mask=xffe0fc00 --status nopcodeop :urhadd Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x2 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_urhadd(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.383 URHADD page C7-2896 line 169117 MATCH x2e201400/mask=xbf20fc00 # CONSTRUCT x6ea01400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urhadd/2@4 # AUNIT --inst x6ea01400/mask=xffe0fc00 --status nopcodeop :urhadd Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x2 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_urhadd(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.383 URHADD page C7-2896 line 169117 MATCH x2e201400/mask=xbf20fc00 # CONSTRUCT x2e201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urhadd/2@1 # AUNIT --inst x2e201400/mask=xffe0fc00 --status nopcodeop :urhadd Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x2 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_urhadd(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.383 URHADD page C7-2896 line 169117 MATCH x2e201400/mask=xbf20fc00 # CONSTRUCT x6e601400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urhadd/2@2 # AUNIT --inst x6e601400/mask=xffe0fc00 --status nopcodeop :urhadd Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x2 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_urhadd(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.384 URSHL page C7-2898 line 169205 MATCH x7e205400/mask=xff20fc00 # CONSTRUCT x7ee05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2 # AUNIT --inst x7ee05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :urshl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0xa & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_urshl(Rn_FPR64, Rm_FPR64); } # C7.2.384 URSHL page C7-2898 line 169205 MATCH x2e205400/mask=xbf20fc00 # CONSTRUCT x6e205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@1 # AUNIT --inst x6e205400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :urshl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0xa & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_urshl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.384 URSHL page C7-2898 line 169205 MATCH x2e205400/mask=xbf20fc00 # CONSTRUCT x6ee05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@8 # AUNIT --inst x6ee05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :urshl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0xa & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_urshl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.384 URSHL page C7-2898 line 169205 MATCH x2e205400/mask=xbf20fc00 # CONSTRUCT x2ea05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@4 # AUNIT --inst x2ea05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :urshl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0xa & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_urshl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.384 URSHL page C7-2898 line 169205 MATCH x2e205400/mask=xbf20fc00 # CONSTRUCT x2e605400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@2 # AUNIT --inst x2e605400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :urshl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0xa & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_urshl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.384 URSHL page C7-2898 line 169205 MATCH x2e205400/mask=xbf20fc00 # CONSTRUCT x6ea05400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@4 # AUNIT --inst x6ea05400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :urshl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0xa & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_urshl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.384 URSHL page C7-2898 line 169205 MATCH x2e205400/mask=xbf20fc00 # CONSTRUCT x2e205400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@1 # AUNIT --inst x2e205400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :urshl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0xa & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_urshl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.384 URSHL page C7-2898 line 169205 MATCH x2e205400/mask=xbf20fc00 # CONSTRUCT x6e605400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_urshl/2@2 # AUNIT --inst x6e605400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :urshl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0xa & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_urshl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.385 URSHR page C7-2900 line 169341 MATCH x2f002400/mask=xbf80fc00 # CONSTRUCT x6f082400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@1 # AUNIT --inst x6f082400/mask=xfff8fc00 --status nopcodeop --comment "nointround" :urshr Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_urshr(Rn_VPR128.16B, Imm_shr_imm8:1, 1:1); } # C7.2.385 URSHR page C7-2900 line 169341 MATCH x2f002400/mask=xbf80fc00 # CONSTRUCT x6f402400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@8 # AUNIT --inst x6f402400/mask=xffc0fc00 --status nopcodeop --comment "nointround" :urshr Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x4 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_urshr(Rn_VPR128.2D, Imm_shr_imm64:1, 8:1); } # C7.2.385 URSHR page C7-2900 line 169341 MATCH x2f002400/mask=xbf80fc00 # CONSTRUCT x2f202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@4 # AUNIT --inst x2f202400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :urshr Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_urshr(Rn_VPR64.2S, Imm_shr_imm32:1, 4:1); } # C7.2.385 URSHR page C7-2900 line 169341 MATCH x2f002400/mask=xbf80fc00 # CONSTRUCT x2f102400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@2 # AUNIT --inst x2f102400/mask=xfff0fc00 --status nopcodeop --comment "nointround" :urshr Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_urshr(Rn_VPR64.4H, Imm_shr_imm16:1, 2:1); } # C7.2.385 URSHR page C7-2900 line 169341 MATCH x2f002400/mask=xbf80fc00 # CONSTRUCT x6f202400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@4 # AUNIT --inst x6f202400/mask=xffe0fc00 --status nopcodeop --comment "nointround" :urshr Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x4 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_urshr(Rn_VPR128.4S, Imm_shr_imm32:1, 4:1); } # C7.2.385 URSHR page C7-2900 line 169341 MATCH x2f002400/mask=xbf80fc00 # CONSTRUCT x2f082400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@1 # AUNIT --inst x2f082400/mask=xfff8fc00 --status nopcodeop --comment "nointround" :urshr Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x4 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_urshr(Rn_VPR64.8B, Imm_shr_imm8:1, 1:1); } # C7.2.385 URSHR page C7-2900 line 169341 MATCH x2f002400/mask=xbf80fc00 # CONSTRUCT x6f102400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2@2 # AUNIT --inst x6f102400/mask=xfff0fc00 --status nopcodeop --comment "nointround" :urshr Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x4 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_urshr(Rn_VPR128.8H, Imm_shr_imm16:1, 2:1); } # C7.2.385 URSHR page C7-2900 line 169341 MATCH x7f002400/mask=xff80fc00 # CONSTRUCT x7f402400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_urshr/2 # AUNIT --inst x7f402400/mask=xffc0fc00 --status nopcodeop --comment "nointround" # Scalar variant :urshr Rd_FPR64, Rn_FPR64, Imm_shr_imm32 is b_2331=0b011111110 & b_22=1 & b_1015=0b001001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm32 & Zd { Rd_FPR64 = NEON_urshr(Rn_FPR64, Imm_shr_imm32:1); } # C7.2.386 URSQRTE page C7-2903 line 169492 MATCH x2ea1c800/mask=xbfbffc00 # CONSTRUCT x2ea1c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_ursqrte/1@4 # AUNIT --inst x2ea1c800/mask=xfffffc00 --status nopcodeop # Vector variant when Q=0 T=VPR64.2S :ursqrte Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { Rd_VPR64.2S = NEON_ursqrte(Rn_VPR64.2S, 4:1); } # C7.2.386 URSQRTE page C7-2903 line 169492 MATCH x2ea1c800/mask=xbfbffc00 # CONSTRUCT x6ea1c800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 =NEON_ursqrte/1@4 # AUNIT --inst x6ea1c800/mask=xfffffc00 --status nopcodeop # Vector variant when Q=0 T=VPR128.4S :ursqrte Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2329=0b1011101 & b_22=0 & b_1021=0b100001110010 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { Rd_VPR128.4S = NEON_ursqrte(Rn_VPR128.4S, 4:1); } # C7.2.387 URSRA page C7-2904 line 169558 MATCH x2f003400/mask=xbf80fc00 # CONSTRUCT x6f083400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:1 $>>@1 &=$+@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@1 # AUNIT --inst x6f083400/mask=xfff8fc00 --status fail --comment "nointround" :ursra Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix TMPQ1 = Rn_VPR128.16B >> Imm_shr_imm8:1 on lane size 1 TMPQ1[0,8] = Rn_VPR128.16B[0,8] >> Imm_shr_imm8:1; TMPQ1[8,8] = Rn_VPR128.16B[8,8] >> Imm_shr_imm8:1; TMPQ1[16,8] = Rn_VPR128.16B[16,8] >> Imm_shr_imm8:1; TMPQ1[24,8] = Rn_VPR128.16B[24,8] >> Imm_shr_imm8:1; TMPQ1[32,8] = Rn_VPR128.16B[32,8] >> Imm_shr_imm8:1; TMPQ1[40,8] = Rn_VPR128.16B[40,8] >> Imm_shr_imm8:1; TMPQ1[48,8] = Rn_VPR128.16B[48,8] >> Imm_shr_imm8:1; TMPQ1[56,8] = Rn_VPR128.16B[56,8] >> Imm_shr_imm8:1; TMPQ1[64,8] = Rn_VPR128.16B[64,8] >> Imm_shr_imm8:1; TMPQ1[72,8] = Rn_VPR128.16B[72,8] >> Imm_shr_imm8:1; TMPQ1[80,8] = Rn_VPR128.16B[80,8] >> Imm_shr_imm8:1; TMPQ1[88,8] = Rn_VPR128.16B[88,8] >> Imm_shr_imm8:1; TMPQ1[96,8] = Rn_VPR128.16B[96,8] >> Imm_shr_imm8:1; TMPQ1[104,8] = Rn_VPR128.16B[104,8] >> Imm_shr_imm8:1; TMPQ1[112,8] = Rn_VPR128.16B[112,8] >> Imm_shr_imm8:1; TMPQ1[120,8] = Rn_VPR128.16B[120,8] >> Imm_shr_imm8:1; # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.387 URSRA page C7-2904 line 169558 MATCH x2f003400/mask=xbf80fc00 # CONSTRUCT x6f403400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 $>>@8 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@8 # AUNIT --inst x6f403400/mask=xffc0fc00 --status fail --comment "nointround" :ursra Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x6 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local tmp1:8 = zext(Imm_shr_imm64); # simd infix TMPQ1 = Rn_VPR128.2D >> tmp1 on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] >> tmp1; TMPQ1[64,64] = Rn_VPR128.2D[64,64] >> tmp1; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.387 URSRA page C7-2904 line 169558 MATCH x2f003400/mask=xbf80fc00 # CONSTRUCT x2f203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:4 $>>@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@4 # AUNIT --inst x2f203400/mask=xffe0fc00 --status fail --comment "nointround" :ursra Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local tmp1:4 = Imm_shr_imm32; # simd infix TMPD1 = Rn_VPR64.2S >> tmp1 on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] >> tmp1; TMPD1[32,32] = Rn_VPR64.2S[32,32] >> tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.387 URSRA page C7-2904 line 169558 MATCH x2f003400/mask=xbf80fc00 # CONSTRUCT x2f103400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 $>>@2 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@2 # AUNIT --inst x2f103400/mask=xfff0fc00 --status fail --comment "nointround" :ursra Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd infix TMPD1 = Rn_VPR64.4H >> Imm_shr_imm16:2 on lane size 2 TMPD1[0,16] = Rn_VPR64.4H[0,16] >> Imm_shr_imm16:2; TMPD1[16,16] = Rn_VPR64.4H[16,16] >> Imm_shr_imm16:2; TMPD1[32,16] = Rn_VPR64.4H[32,16] >> Imm_shr_imm16:2; TMPD1[48,16] = Rn_VPR64.4H[48,16] >> Imm_shr_imm16:2; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 4 Rd_VPR64.4H[0,32] = Rd_VPR64.4H[0,32] + TMPD1[0,32]; Rd_VPR64.4H[32,32] = Rd_VPR64.4H[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.387 URSRA page C7-2904 line 169558 MATCH x2f003400/mask=xbf80fc00 # CONSTRUCT x6f203400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:4 $>>@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@4 # AUNIT --inst x6f203400/mask=xffe0fc00 --status fail --comment "nointround" :ursra Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x6 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local tmp1:4 = Imm_shr_imm32; # simd infix TMPQ1 = Rn_VPR128.4S >> tmp1 on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] >> tmp1; TMPQ1[32,32] = Rn_VPR128.4S[32,32] >> tmp1; TMPQ1[64,32] = Rn_VPR128.4S[64,32] >> tmp1; TMPQ1[96,32] = Rn_VPR128.4S[96,32] >> tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.387 URSRA page C7-2904 line 169558 MATCH x2f003400/mask=xbf80fc00 # CONSTRUCT x2f083400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:1 $>>@1 &=$+@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@1 # AUNIT --inst x2f083400/mask=xfff8fc00 --status fail --comment "nointround" :ursra Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x6 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix TMPD1 = Rn_VPR64.8B >> Imm_shr_imm8:1 on lane size 1 TMPD1[0,8] = Rn_VPR64.8B[0,8] >> Imm_shr_imm8:1; TMPD1[8,8] = Rn_VPR64.8B[8,8] >> Imm_shr_imm8:1; TMPD1[16,8] = Rn_VPR64.8B[16,8] >> Imm_shr_imm8:1; TMPD1[24,8] = Rn_VPR64.8B[24,8] >> Imm_shr_imm8:1; TMPD1[32,8] = Rn_VPR64.8B[32,8] >> Imm_shr_imm8:1; TMPD1[40,8] = Rn_VPR64.8B[40,8] >> Imm_shr_imm8:1; TMPD1[48,8] = Rn_VPR64.8B[48,8] >> Imm_shr_imm8:1; TMPD1[56,8] = Rn_VPR64.8B[56,8] >> Imm_shr_imm8:1; # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.387 URSRA page C7-2904 line 169558 MATCH x2f003400/mask=xbf80fc00 # CONSTRUCT x6f103400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 $>>@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3@2 # AUNIT --inst x6f103400/mask=xfff0fc00 --status fail --comment "nointround" :ursra Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x6 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.8H >> Imm_shr_imm16:2 on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm16:2; TMPQ1[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm16:2; TMPQ1[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm16:2; TMPQ1[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm16:2; TMPQ1[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm16:2; TMPQ1[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm16:2; TMPQ1[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm16:2; TMPQ1[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm16:2; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.387 URSRA page C7-2904 line 169558 MATCH x7f003400/mask=xff80fc00 # CONSTRUCT x7f403400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 >> &=+ # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_ursra/3 # AUNIT --inst x7f403400/mask=xffc0fc00 --status fail --comment "nointround" # Scalar variant when immh=1xxx :ursra Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b011111110 & b_22=1 & b_1015=0b001101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { local tmp1:8 = zext(Imm_shr_imm64); local tmp2:8 = Rn_FPR64 >> tmp1; Rd_FPR64 = Rd_FPR64 + tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.390 USHL page C7-2911 line 169885 MATCH x7e204400/mask=xff20fc00 # CONSTRUCT x7ee04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2 # AUNIT --inst x7ee04400/mask=xffe0fc00 --status nopcodeop :ushl Rd_FPR64, Rn_FPR64, Rm_FPR64 is b_3031=1 & u=1 & b_2428=0x1e & advSIMD3.size=3 & b_2121=1 & Rm_FPR64 & b_1115=0x8 & b_1010=1 & Rn_FPR64 & Rd_FPR64 & Zd { Rd_FPR64 = NEON_ushl(Rn_FPR64, Rm_FPR64); } # C7.2.390 USHL page C7-2911 line 169885 MATCH x2e204400/mask=xbf20fc00 # CONSTRUCT x6e204400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@1 # AUNIT --inst x6e204400/mask=xffe0fc00 --status nopcodeop :ushl Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1115=0x8 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { Rd_VPR128.16B = NEON_ushl(Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.390 USHL page C7-2911 line 169885 MATCH x2e204400/mask=xbf20fc00 # CONSTRUCT x6ee04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@8 # AUNIT --inst x6ee04400/mask=xffe0fc00 --status nopcodeop :ushl Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=3 & b_2121=1 & Rm_VPR128.2D & b_1115=0x8 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { Rd_VPR128.2D = NEON_ushl(Rn_VPR128.2D, Rm_VPR128.2D, 8:1); } # C7.2.390 USHL page C7-2911 line 169885 MATCH x2e204400/mask=xbf20fc00 # CONSTRUCT x2ea04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@4 # AUNIT --inst x2ea04400/mask=xffe0fc00 --status nopcodeop :ushl Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1115=0x8 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { Rd_VPR64.2S = NEON_ushl(Rn_VPR64.2S, Rm_VPR64.2S, 4:1); } # C7.2.390 USHL page C7-2911 line 169885 MATCH x2e204400/mask=xbf20fc00 # CONSTRUCT x2e604400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@2 # AUNIT --inst x2e604400/mask=xffe0fc00 --status nopcodeop :ushl Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1115=0x8 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { Rd_VPR64.4H = NEON_ushl(Rn_VPR64.4H, Rm_VPR64.4H, 2:1); } # C7.2.390 USHL page C7-2911 line 169885 MATCH x2e204400/mask=xbf20fc00 # CONSTRUCT x6ea04400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@4 # AUNIT --inst x6ea04400/mask=xffe0fc00 --status nopcodeop :ushl Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1115=0x8 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { Rd_VPR128.4S = NEON_ushl(Rn_VPR128.4S, Rm_VPR128.4S, 4:1); } # C7.2.390 USHL page C7-2911 line 169885 MATCH x2e204400/mask=xbf20fc00 # CONSTRUCT x2e204400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@1 # AUNIT --inst x2e204400/mask=xffe0fc00 --status nopcodeop :ushl Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1115=0x8 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { Rd_VPR64.8B = NEON_ushl(Rn_VPR64.8B, Rm_VPR64.8B, 1:1); } # C7.2.390 USHL page C7-2911 line 169885 MATCH x2e204400/mask=xbf20fc00 # CONSTRUCT x6e604400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_ushl/2@2 # AUNIT --inst x6e604400/mask=xffe0fc00 --status nopcodeop :ushl Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1115=0x8 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { Rd_VPR128.8H = NEON_ushl(Rn_VPR128.8H, Rm_VPR128.8H, 2:1); } # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # CONSTRUCT x6f08a400/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3 =var:2 =$<<@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushll2/2@1 # AUNIT --inst x6f08a400/mask=xfff8fc00 --status pass --comment "ext" :ushll2 Rd_VPR128.8H, Rn_VPR128.16B, Imm_uimm3 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = zext(TMPD1[0,8]); TMPQ2[16,16] = zext(TMPD1[8,8]); TMPQ2[32,16] = zext(TMPD1[16,8]); TMPQ2[48,16] = zext(TMPD1[24,8]); TMPQ2[64,16] = zext(TMPD1[32,8]); TMPQ2[80,16] = zext(TMPD1[40,8]); TMPQ2[96,16] = zext(TMPD1[48,8]); TMPQ2[112,16] = zext(TMPD1[56,8]); local tmp3:2 = Imm_uimm3; # simd infix Rd_VPR128.8H = TMPQ2 << tmp3 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ2[0,16] << tmp3; Rd_VPR128.8H[16,16] = TMPQ2[16,16] << tmp3; Rd_VPR128.8H[32,16] = TMPQ2[32,16] << tmp3; Rd_VPR128.8H[48,16] = TMPQ2[48,16] << tmp3; Rd_VPR128.8H[64,16] = TMPQ2[64,16] << tmp3; Rd_VPR128.8H[80,16] = TMPQ2[80,16] << tmp3; Rd_VPR128.8H[96,16] = TMPQ2[96,16] << tmp3; Rd_VPR128.8H[112,16] = TMPQ2[112,16] << tmp3; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # CONSTRUCT x2f20a400/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@4:16 ARG3 =var:8 =$<<@8 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushll/2@4 # AUNIT --inst x2f20a400/mask=xffe0fc00 --status pass --comment "ext" :ushll Rd_VPR128.2D, Rn_VPR64.2S, Imm_uimm5 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); local tmp2:8 = Imm_uimm5; # simd infix Rd_VPR128.2D = TMPQ1 << tmp2 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64] << tmp2; Rd_VPR128.2D[64,64] = TMPQ1[64,64] << tmp2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # CONSTRUCT x2f10a400/mask=xfff0fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@2:16 ARG3 =var:4 =$<<@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushll/2@2 # AUNIT --inst x2f10a400/mask=xfff0fc00 --status pass --comment "ext" :ushll Rd_VPR128.4S, Rn_VPR64.4H, Imm_uimm4 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); local tmp2:4 = Imm_uimm4; # simd infix Rd_VPR128.4S = TMPQ1 << tmp2 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32] << tmp2; Rd_VPR128.4S[32,32] = TMPQ1[32,32] << tmp2; Rd_VPR128.4S[64,32] = TMPQ1[64,32] << tmp2; Rd_VPR128.4S[96,32] = TMPQ1[96,32] << tmp2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # CONSTRUCT x6f20a400/mask=xffe0fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3 =var:8 =$<<@8 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushll2/2@4 # AUNIT --inst x6f20a400/mask=xffe0fc00 --status pass --comment "ext" :ushll2 Rd_VPR128.2D, Rn_VPR128.4S, Imm_uimm5 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5 & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); local tmp3:8 = Imm_uimm5; # simd infix Rd_VPR128.2D = TMPQ2 << tmp3 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ2[0,64] << tmp3; Rd_VPR128.2D[64,64] = TMPQ2[64,64] << tmp3; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # CONSTRUCT x2f08a400/mask=xfff8fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@1:16 ARG3 =var:2 =$<<@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushll/2@1 # AUNIT --inst x2f08a400/mask=xfff8fc00 --status pass --comment "ext" :ushll Rd_VPR128.8H, Rn_VPR64.8B, Imm_uimm3 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3 & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); local tmp2:2 = Imm_uimm3; # simd infix Rd_VPR128.8H = TMPQ1 << tmp2 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[0,16] << tmp2; Rd_VPR128.8H[16,16] = TMPQ1[16,16] << tmp2; Rd_VPR128.8H[32,16] = TMPQ1[32,16] << tmp2; Rd_VPR128.8H[48,16] = TMPQ1[48,16] << tmp2; Rd_VPR128.8H[64,16] = TMPQ1[64,16] << tmp2; Rd_VPR128.8H[80,16] = TMPQ1[80,16] << tmp2; Rd_VPR128.8H[96,16] = TMPQ1[96,16] << tmp2; Rd_VPR128.8H[112,16] = TMPQ1[112,16] << tmp2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # CONSTRUCT x6f10a400/mask=xfff0fc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3 =var:4 =$<<@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushll2/2@2 # AUNIT --inst x6f10a400/mask=xfff0fc00 --status pass --comment "ext" :ushll2 Rd_VPR128.4S, Rn_VPR128.8H, Imm_uimm4 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4 & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); local tmp3:4 = Imm_uimm4; # simd infix Rd_VPR128.4S = TMPQ2 << tmp3 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ2[0,32] << tmp3; Rd_VPR128.4S[32,32] = TMPQ2[32,32] << tmp3; Rd_VPR128.4S[64,32] = TMPQ2[64,32] << tmp3; Rd_VPR128.4S[96,32] = TMPQ2[96,32] << tmp3; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.392 USHR page C7-2916 line 170174 MATCH x2f000400/mask=xbf80fc00 # CONSTRUCT x6f080400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:1 =$>>@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@1 # AUNIT --inst x6f080400/mask=xfff8fc00 --status pass :ushr Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix Rd_VPR128.16B = Rn_VPR128.16B >> Imm_shr_imm8:1 on lane size 1 Rd_VPR128.16B[0,8] = Rn_VPR128.16B[0,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[8,8] = Rn_VPR128.16B[8,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[16,8] = Rn_VPR128.16B[16,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[24,8] = Rn_VPR128.16B[24,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[32,8] = Rn_VPR128.16B[32,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[40,8] = Rn_VPR128.16B[40,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[48,8] = Rn_VPR128.16B[48,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[56,8] = Rn_VPR128.16B[56,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[64,8] = Rn_VPR128.16B[64,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[72,8] = Rn_VPR128.16B[72,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[80,8] = Rn_VPR128.16B[80,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[88,8] = Rn_VPR128.16B[88,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[96,8] = Rn_VPR128.16B[96,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[104,8] = Rn_VPR128.16B[104,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[112,8] = Rn_VPR128.16B[112,8] >> Imm_shr_imm8:1; Rd_VPR128.16B[120,8] = Rn_VPR128.16B[120,8] >> Imm_shr_imm8:1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.392 USHR page C7-2916 line 170174 MATCH x2f000400/mask=xbf80fc00 # CONSTRUCT x6f400400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 =$>>@8 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@8 # AUNIT --inst x6f400400/mask=xffc0fc00 --status pass :ushr Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x0 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local tmp1:8 = zext(Imm_shr_imm64); # simd infix Rd_VPR128.2D = Rn_VPR128.2D >> tmp1 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] >> tmp1; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] >> tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.392 USHR page C7-2916 line 170174 MATCH x2f000400/mask=xbf80fc00 # CONSTRUCT x2f200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:4 =$>>@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@4 # AUNIT --inst x2f200400/mask=xffe0fc00 --status pass :ushr Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local tmp1:4 = Imm_shr_imm32; # simd infix Rd_VPR64.2S = Rn_VPR64.2S >> tmp1 on lane size 4 Rd_VPR64.2S[0,32] = Rn_VPR64.2S[0,32] >> tmp1; Rd_VPR64.2S[32,32] = Rn_VPR64.2S[32,32] >> tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.392 USHR page C7-2916 line 170174 MATCH x2f000400/mask=xbf80fc00 # CONSTRUCT x2f100400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 =$>>@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@2 # AUNIT --inst x2f100400/mask=xfff0fc00 --status pass :ushr Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd infix Rd_VPR64.4H = Rn_VPR64.4H >> Imm_shr_imm16:2 on lane size 2 Rd_VPR64.4H[0,16] = Rn_VPR64.4H[0,16] >> Imm_shr_imm16:2; Rd_VPR64.4H[16,16] = Rn_VPR64.4H[16,16] >> Imm_shr_imm16:2; Rd_VPR64.4H[32,16] = Rn_VPR64.4H[32,16] >> Imm_shr_imm16:2; Rd_VPR64.4H[48,16] = Rn_VPR64.4H[48,16] >> Imm_shr_imm16:2; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.392 USHR page C7-2916 line 170174 MATCH x2f000400/mask=xbf80fc00 # CONSTRUCT x6f200400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:4 =$>>@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@4 # AUNIT --inst x6f200400/mask=xffe0fc00 --status pass :ushr Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x0 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local tmp1:4 = Imm_shr_imm32; # simd infix Rd_VPR128.4S = Rn_VPR128.4S >> tmp1 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] >> tmp1; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] >> tmp1; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] >> tmp1; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] >> tmp1; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.392 USHR page C7-2916 line 170174 MATCH x2f000400/mask=xbf80fc00 # CONSTRUCT x2f080400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:1 =$>>@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@1 # AUNIT --inst x2f080400/mask=xfff8fc00 --status pass :ushr Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x0 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix Rd_VPR64.8B = Rn_VPR64.8B >> Imm_shr_imm8:1 on lane size 1 Rd_VPR64.8B[0,8] = Rn_VPR64.8B[0,8] >> Imm_shr_imm8:1; Rd_VPR64.8B[8,8] = Rn_VPR64.8B[8,8] >> Imm_shr_imm8:1; Rd_VPR64.8B[16,8] = Rn_VPR64.8B[16,8] >> Imm_shr_imm8:1; Rd_VPR64.8B[24,8] = Rn_VPR64.8B[24,8] >> Imm_shr_imm8:1; Rd_VPR64.8B[32,8] = Rn_VPR64.8B[32,8] >> Imm_shr_imm8:1; Rd_VPR64.8B[40,8] = Rn_VPR64.8B[40,8] >> Imm_shr_imm8:1; Rd_VPR64.8B[48,8] = Rn_VPR64.8B[48,8] >> Imm_shr_imm8:1; Rd_VPR64.8B[56,8] = Rn_VPR64.8B[56,8] >> Imm_shr_imm8:1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.392 USHR page C7-2916 line 170174 MATCH x2f000400/mask=xbf80fc00 # CONSTRUCT x6f100400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 =$>>@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2@2 # AUNIT --inst x6f100400/mask=xfff0fc00 --status pass :ushr Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x0 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd infix Rd_VPR128.8H = Rn_VPR128.8H >> Imm_shr_imm16:2 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm16:2; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm16:2; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm16:2; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm16:2; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm16:2; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm16:2; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm16:2; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm16:2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.392 USHR page C7-2916 line 170174 MATCH x7f000400/mask=xff80fc00 # CONSTRUCT x7f400400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 =>> # SMACRO(pseudo) ARG1 ARG2 ARG3:1 =NEON_ushr/2 # AUNIT --inst x7f400400/mask=xffc0fc00 --status pass # Scalar variant when immh=1xxx :ushr Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b011111110 & b_22=1 & b_1015=0b000001 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { local tmp1:8 = zext(Imm_shr_imm64); Rd_FPR64 = Rn_FPR64 >> tmp1; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.394 USQADD page C7-2920 line 170396 MATCH x7e203800/mask=xff3ffc00 # CONSTRUCT x7e203800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2 # AUNIT --inst x7e203800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Scalar variant when size=00 Q=1 bb=1 T=FPR8 :usqadd Rd_FPR8, Rn_FPR8 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_FPR8 & Rn_FPR8 & Zd { Rd_FPR8 = NEON_usqadd(Rd_FPR8, Rn_FPR8); } # C7.2.394 USQADD page C7-2920 line 170396 MATCH x7e203800/mask=xff3ffc00 # CONSTRUCT x7e603800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2 # AUNIT --inst x7e603800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Scalar variant when size=01 Q=1 bb=1 T=FPR16 :usqadd Rd_FPR16, Rn_FPR16 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_FPR16 & Rn_FPR16 & Zd { Rd_FPR16 = NEON_usqadd(Rd_FPR16, Rn_FPR16); } # C7.2.394 USQADD page C7-2920 line 170396 MATCH x7e203800/mask=xff3ffc00 # CONSTRUCT x7ea03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2 # AUNIT --inst x7ea03800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Scalar variant when size=10 Q=1 bb=1 T=FPR32 :usqadd Rd_FPR32, Rn_FPR32 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_FPR32 & Rn_FPR32 & Zd { Rd_FPR32 = NEON_usqadd(Rd_FPR32, Rn_FPR32); } # C7.2.394 USQADD page C7-2920 line 170396 MATCH x7e203800/mask=xff3ffc00 # CONSTRUCT x7ee03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2 # AUNIT --inst x7ee03800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Scalar variant when size=11 Q=1 bb=1 T=FPR64 :usqadd Rd_FPR64, Rn_FPR64 is b_31=0 & b_30=1 & b_2429=0b111110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_FPR64 & Rn_FPR64 & Zd { Rd_FPR64 = NEON_usqadd(Rd_FPR64, Rn_FPR64); } # C7.2.394 USQADD page C7-2920 line 170396 MATCH x2e203800/mask=xbf3ffc00 # CONSTRUCT x2e203800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@1 # AUNIT --inst x2e203800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=00 Q=0 bb=0 e=1 T=VPR64.8B :usqadd Rd_VPR64.8B, Rn_VPR64.8B is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_VPR64.8B & Rn_VPR64.8B & Zd { Rd_VPR64.8B = NEON_usqadd(Rd_VPR64.8B, Rn_VPR64.8B, 1:1); } # C7.2.394 USQADD page C7-2920 line 170396 MATCH x2e203800/mask=xbf3ffc00 # CONSTRUCT x6e203800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@1 # AUNIT --inst x6e203800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=00 Q=1 bb=0 e=1 T=VPR128.16B :usqadd Rd_VPR128.16B, Rn_VPR128.16B is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b00 & b_1021=0b100000001110 & Rd_VPR128.16B & Rn_VPR128.16B & Zd { Rd_VPR128.16B = NEON_usqadd(Rd_VPR128.16B, Rn_VPR128.16B, 1:1); } # C7.2.394 USQADD page C7-2920 line 170396 MATCH x2e203800/mask=xbf3ffc00 # CONSTRUCT x2e603800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@2 # AUNIT --inst x2e603800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=01 Q=0 bb=0 e=2 T=VPR64.4H :usqadd Rd_VPR64.4H, Rn_VPR64.4H is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_VPR64.4H & Rn_VPR64.4H & Zd { Rd_VPR64.4H = NEON_usqadd(Rd_VPR64.4H, Rn_VPR64.4H, 2:1); } # C7.2.394 USQADD page C7-2920 line 170396 MATCH x2e203800/mask=xbf3ffc00 # CONSTRUCT x6e603800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@2 # AUNIT --inst x6e603800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=01 Q=1 bb=0 e=2 T=VPR128.8H :usqadd Rd_VPR128.8H, Rn_VPR128.8H is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b01 & b_1021=0b100000001110 & Rd_VPR128.8H & Rn_VPR128.8H & Zd { Rd_VPR128.8H = NEON_usqadd(Rd_VPR128.8H, Rn_VPR128.8H, 2:1); } # C7.2.394 USQADD page C7-2920 line 170396 MATCH x2e203800/mask=xbf3ffc00 # CONSTRUCT x2ea03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@4 # AUNIT --inst x2ea03800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=10 Q=0 bb=0 e=4 T=VPR64.2S :usqadd Rd_VPR64.2S, Rn_VPR64.2S is b_31=0 & b_30=0 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_VPR64.2S & Rn_VPR64.2S & Zd { Rd_VPR64.2S = NEON_usqadd(Rd_VPR64.2S, Rn_VPR64.2S, 4:1); } # C7.2.394 USQADD page C7-2920 line 170396 MATCH x2e203800/mask=xbf3ffc00 # CONSTRUCT x6ea03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@4 # AUNIT --inst x6ea03800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=10 Q=1 bb=0 e=4 T=VPR128.4S :usqadd Rd_VPR128.4S, Rn_VPR128.4S is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b10 & b_1021=0b100000001110 & Rd_VPR128.4S & Rn_VPR128.4S & Zd { Rd_VPR128.4S = NEON_usqadd(Rd_VPR128.4S, Rn_VPR128.4S, 4:1); } # C7.2.394 USQADD page C7-2920 line 170396 MATCH x2e203800/mask=xbf3ffc00 # CONSTRUCT x6ee03800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 &=NEON_usqadd/2@8 # AUNIT --inst x6ee03800/mask=xfffffc00 --status nopcodeop --comment "nointsat" # Vector variant when size=11 Q=1 bb=0 e=8 T=VPR128.2D :usqadd Rd_VPR128.2D, Rn_VPR128.2D is b_31=0 & b_30=1 & b_2429=0b101110 & b_2223=0b11 & b_1021=0b100000001110 & Rd_VPR128.2D & Rn_VPR128.2D & Zd { Rd_VPR128.2D = NEON_usqadd(Rd_VPR128.2D, Rn_VPR128.2D, 8:1); } # C7.2.395 USRA page C7-2922 line 170519 MATCH x2f001400/mask=xbf80fc00 # CONSTRUCT x6f081400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:1 $>>@1 &=$+@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@1 # AUNIT --inst x6f081400/mask=xfff8fc00 --status pass :usra Rd_VPR128.16B, Rn_VPR128.16B, Imm_shr_imm8 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { # simd infix TMPQ1 = Rn_VPR128.16B >> Imm_shr_imm8:1 on lane size 1 TMPQ1[0,8] = Rn_VPR128.16B[0,8] >> Imm_shr_imm8:1; TMPQ1[8,8] = Rn_VPR128.16B[8,8] >> Imm_shr_imm8:1; TMPQ1[16,8] = Rn_VPR128.16B[16,8] >> Imm_shr_imm8:1; TMPQ1[24,8] = Rn_VPR128.16B[24,8] >> Imm_shr_imm8:1; TMPQ1[32,8] = Rn_VPR128.16B[32,8] >> Imm_shr_imm8:1; TMPQ1[40,8] = Rn_VPR128.16B[40,8] >> Imm_shr_imm8:1; TMPQ1[48,8] = Rn_VPR128.16B[48,8] >> Imm_shr_imm8:1; TMPQ1[56,8] = Rn_VPR128.16B[56,8] >> Imm_shr_imm8:1; TMPQ1[64,8] = Rn_VPR128.16B[64,8] >> Imm_shr_imm8:1; TMPQ1[72,8] = Rn_VPR128.16B[72,8] >> Imm_shr_imm8:1; TMPQ1[80,8] = Rn_VPR128.16B[80,8] >> Imm_shr_imm8:1; TMPQ1[88,8] = Rn_VPR128.16B[88,8] >> Imm_shr_imm8:1; TMPQ1[96,8] = Rn_VPR128.16B[96,8] >> Imm_shr_imm8:1; TMPQ1[104,8] = Rn_VPR128.16B[104,8] >> Imm_shr_imm8:1; TMPQ1[112,8] = Rn_VPR128.16B[112,8] >> Imm_shr_imm8:1; TMPQ1[120,8] = Rn_VPR128.16B[120,8] >> Imm_shr_imm8:1; # simd infix Rd_VPR128.16B = Rd_VPR128.16B + TMPQ1 on lane size 1 Rd_VPR128.16B[0,8] = Rd_VPR128.16B[0,8] + TMPQ1[0,8]; Rd_VPR128.16B[8,8] = Rd_VPR128.16B[8,8] + TMPQ1[8,8]; Rd_VPR128.16B[16,8] = Rd_VPR128.16B[16,8] + TMPQ1[16,8]; Rd_VPR128.16B[24,8] = Rd_VPR128.16B[24,8] + TMPQ1[24,8]; Rd_VPR128.16B[32,8] = Rd_VPR128.16B[32,8] + TMPQ1[32,8]; Rd_VPR128.16B[40,8] = Rd_VPR128.16B[40,8] + TMPQ1[40,8]; Rd_VPR128.16B[48,8] = Rd_VPR128.16B[48,8] + TMPQ1[48,8]; Rd_VPR128.16B[56,8] = Rd_VPR128.16B[56,8] + TMPQ1[56,8]; Rd_VPR128.16B[64,8] = Rd_VPR128.16B[64,8] + TMPQ1[64,8]; Rd_VPR128.16B[72,8] = Rd_VPR128.16B[72,8] + TMPQ1[72,8]; Rd_VPR128.16B[80,8] = Rd_VPR128.16B[80,8] + TMPQ1[80,8]; Rd_VPR128.16B[88,8] = Rd_VPR128.16B[88,8] + TMPQ1[88,8]; Rd_VPR128.16B[96,8] = Rd_VPR128.16B[96,8] + TMPQ1[96,8]; Rd_VPR128.16B[104,8] = Rd_VPR128.16B[104,8] + TMPQ1[104,8]; Rd_VPR128.16B[112,8] = Rd_VPR128.16B[112,8] + TMPQ1[112,8]; Rd_VPR128.16B[120,8] = Rd_VPR128.16B[120,8] + TMPQ1[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.395 USRA page C7-2922 line 170519 MATCH x2f001400/mask=xbf80fc00 # CONSTRUCT x6f401400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 $>>@8 &=$+@8 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@8 # AUNIT --inst x6f401400/mask=xffc0fc00 --status pass :usra Rd_VPR128.2D, Rn_VPR128.2D, Imm_shr_imm64 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2223=0b01 & Imm_shr_imm64 & b_1115=0x2 & b_1010=1 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { local tmp1:8 = zext(Imm_shr_imm64); # simd infix TMPQ1 = Rn_VPR128.2D >> tmp1 on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] >> tmp1; TMPQ1[64,64] = Rn_VPR128.2D[64,64] >> tmp1; # simd infix Rd_VPR128.2D = Rd_VPR128.2D + TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rd_VPR128.2D[0,64] + TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rd_VPR128.2D[64,64] + TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.395 USRA page C7-2922 line 170519 MATCH x2f001400/mask=xbf80fc00 # CONSTRUCT x2f201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:4 $>>@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@4 # AUNIT --inst x2f201400/mask=xffe0fc00 --status pass :usra Rd_VPR64.2S, Rn_VPR64.2S, Imm_shr_imm32 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b_1010=1 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { local tmp1:4 = Imm_shr_imm32; # simd infix TMPD1 = Rn_VPR64.2S >> tmp1 on lane size 4 TMPD1[0,32] = Rn_VPR64.2S[0,32] >> tmp1; TMPD1[32,32] = Rn_VPR64.2S[32,32] >> tmp1; # simd infix Rd_VPR64.2S = Rd_VPR64.2S + TMPD1 on lane size 4 Rd_VPR64.2S[0,32] = Rd_VPR64.2S[0,32] + TMPD1[0,32]; Rd_VPR64.2S[32,32] = Rd_VPR64.2S[32,32] + TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.395 USRA page C7-2922 line 170519 MATCH x2f001400/mask=xbf80fc00 # CONSTRUCT x2f101400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 $>>@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@2 # AUNIT --inst x2f101400/mask=xfff0fc00 --status pass :usra Rd_VPR64.4H, Rn_VPR64.4H, Imm_shr_imm16 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & b_1010=1 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { # simd infix TMPD1 = Rn_VPR64.4H >> Imm_shr_imm16:2 on lane size 2 TMPD1[0,16] = Rn_VPR64.4H[0,16] >> Imm_shr_imm16:2; TMPD1[16,16] = Rn_VPR64.4H[16,16] >> Imm_shr_imm16:2; TMPD1[32,16] = Rn_VPR64.4H[32,16] >> Imm_shr_imm16:2; TMPD1[48,16] = Rn_VPR64.4H[48,16] >> Imm_shr_imm16:2; # simd infix Rd_VPR64.4H = Rd_VPR64.4H + TMPD1 on lane size 2 Rd_VPR64.4H[0,16] = Rd_VPR64.4H[0,16] + TMPD1[0,16]; Rd_VPR64.4H[16,16] = Rd_VPR64.4H[16,16] + TMPD1[16,16]; Rd_VPR64.4H[32,16] = Rd_VPR64.4H[32,16] + TMPD1[32,16]; Rd_VPR64.4H[48,16] = Rd_VPR64.4H[48,16] + TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.395 USRA page C7-2922 line 170519 MATCH x2f001400/mask=xbf80fc00 # CONSTRUCT x6f201400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 =var:4 $>>@4 &=$+@4 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@4 # AUNIT --inst x6f201400/mask=xffe0fc00 --status pass :usra Rd_VPR128.4S, Rn_VPR128.4S, Imm_shr_imm32 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_shr_imm32 & b_1115=0x2 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { local tmp1:4 = Imm_shr_imm32; # simd infix TMPQ1 = Rn_VPR128.4S >> tmp1 on lane size 4 TMPQ1[0,32] = Rn_VPR128.4S[0,32] >> tmp1; TMPQ1[32,32] = Rn_VPR128.4S[32,32] >> tmp1; TMPQ1[64,32] = Rn_VPR128.4S[64,32] >> tmp1; TMPQ1[96,32] = Rn_VPR128.4S[96,32] >> tmp1; # simd infix Rd_VPR128.4S = Rd_VPR128.4S + TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rd_VPR128.4S[0,32] + TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rd_VPR128.4S[32,32] + TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rd_VPR128.4S[64,32] + TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rd_VPR128.4S[96,32] + TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.395 USRA page C7-2922 line 170519 MATCH x2f001400/mask=xbf80fc00 # CONSTRUCT x2f081400/mask=xfff8fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:1 $>>@1 &=$+@1 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@1 # AUNIT --inst x2f081400/mask=xfff8fc00 --status pass :usra Rd_VPR64.8B, Rn_VPR64.8B, Imm_shr_imm8 is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_shr_imm8 & b_1115=0x2 & b_1010=1 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { # simd infix TMPD1 = Rn_VPR64.8B >> Imm_shr_imm8:1 on lane size 1 TMPD1[0,8] = Rn_VPR64.8B[0,8] >> Imm_shr_imm8:1; TMPD1[8,8] = Rn_VPR64.8B[8,8] >> Imm_shr_imm8:1; TMPD1[16,8] = Rn_VPR64.8B[16,8] >> Imm_shr_imm8:1; TMPD1[24,8] = Rn_VPR64.8B[24,8] >> Imm_shr_imm8:1; TMPD1[32,8] = Rn_VPR64.8B[32,8] >> Imm_shr_imm8:1; TMPD1[40,8] = Rn_VPR64.8B[40,8] >> Imm_shr_imm8:1; TMPD1[48,8] = Rn_VPR64.8B[48,8] >> Imm_shr_imm8:1; TMPD1[56,8] = Rn_VPR64.8B[56,8] >> Imm_shr_imm8:1; # simd infix Rd_VPR64.8B = Rd_VPR64.8B + TMPD1 on lane size 1 Rd_VPR64.8B[0,8] = Rd_VPR64.8B[0,8] + TMPD1[0,8]; Rd_VPR64.8B[8,8] = Rd_VPR64.8B[8,8] + TMPD1[8,8]; Rd_VPR64.8B[16,8] = Rd_VPR64.8B[16,8] + TMPD1[16,8]; Rd_VPR64.8B[24,8] = Rd_VPR64.8B[24,8] + TMPD1[24,8]; Rd_VPR64.8B[32,8] = Rd_VPR64.8B[32,8] + TMPD1[32,8]; Rd_VPR64.8B[40,8] = Rd_VPR64.8B[40,8] + TMPD1[40,8]; Rd_VPR64.8B[48,8] = Rd_VPR64.8B[48,8] + TMPD1[48,8]; Rd_VPR64.8B[56,8] = Rd_VPR64.8B[56,8] + TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.395 USRA page C7-2922 line 170519 MATCH x2f001400/mask=xbf80fc00 # CONSTRUCT x6f101400/mask=xfff0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3:2 $>>@2 &=$+@2 # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3@2 # AUNIT --inst x6f101400/mask=xfff0fc00 --status pass :usra Rd_VPR128.8H, Rn_VPR128.8H, Imm_shr_imm16 is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_shr_imm16 & b_1115=0x2 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd infix TMPQ1 = Rn_VPR128.8H >> Imm_shr_imm16:2 on lane size 2 TMPQ1[0,16] = Rn_VPR128.8H[0,16] >> Imm_shr_imm16:2; TMPQ1[16,16] = Rn_VPR128.8H[16,16] >> Imm_shr_imm16:2; TMPQ1[32,16] = Rn_VPR128.8H[32,16] >> Imm_shr_imm16:2; TMPQ1[48,16] = Rn_VPR128.8H[48,16] >> Imm_shr_imm16:2; TMPQ1[64,16] = Rn_VPR128.8H[64,16] >> Imm_shr_imm16:2; TMPQ1[80,16] = Rn_VPR128.8H[80,16] >> Imm_shr_imm16:2; TMPQ1[96,16] = Rn_VPR128.8H[96,16] >> Imm_shr_imm16:2; TMPQ1[112,16] = Rn_VPR128.8H[112,16] >> Imm_shr_imm16:2; # simd infix Rd_VPR128.8H = Rd_VPR128.8H + TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rd_VPR128.8H[0,16] + TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rd_VPR128.8H[16,16] + TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rd_VPR128.8H[32,16] + TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rd_VPR128.8H[48,16] + TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rd_VPR128.8H[64,16] + TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rd_VPR128.8H[80,16] + TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rd_VPR128.8H[96,16] + TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rd_VPR128.8H[112,16] + TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.395 USRA page C7-2922 line 170519 MATCH x7f001400/mask=xff80fc00 # CONSTRUCT x7f401400/mask=xffc0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 zext:8 >> &=+ # SMACRO(pseudo) ARG1 ARG2 ARG3:1 &=NEON_usra/3 # AUNIT --inst x7f401400/mask=xffc0fc00 --status pass # Scalar variant when immh=1xxx :usra Rd_FPR64, Rn_FPR64, Imm_shr_imm64 is b_2331=0b011111110 & b_22=1 & b_1015=0b000101 & Rd_FPR64 & Rn_FPR64 & Imm_shr_imm64 & Zd { local tmp1:8 = zext(Imm_shr_imm64); local tmp2:8 = Rn_FPR64 >> tmp1; Rd_FPR64 = Rd_FPR64 + tmp2; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.396 USUBL, USUBL2 page C7-2925 line 170683 MATCH x2e202000/mask=xbf20fc00 # CONSTRUCT x6ea02000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@4:16 ARG3[1]:8 $zext@4:16 =$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubl2/2@4 # AUNIT --inst x6ea02000/mask=xffe0fc00 --status pass --comment "ext" :usubl2 Rd_VPR128.2D, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x2 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); TMPD3 = Rm_VPR128.4S[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 4 to 8) TMPQ4[0,64] = zext(TMPD3[0,32]); TMPQ4[64,64] = zext(TMPD3[32,32]); # simd infix Rd_VPR128.2D = TMPQ2 - TMPQ4 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ2[0,64] - TMPQ4[0,64]; Rd_VPR128.2D[64,64] = TMPQ2[64,64] - TMPQ4[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.396 USUBL, USUBL2 page C7-2925 line 170683 MATCH x2e202000/mask=xbf20fc00 # CONSTRUCT x6e602000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@2:16 ARG3[1]:8 $zext@2:16 =$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubl2/2@2 # AUNIT --inst x6e602000/mask=xffe0fc00 --status pass --comment "ext" :usubl2 Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x2 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); TMPD3 = Rm_VPR128.8H[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 2 to 4) TMPQ4[0,32] = zext(TMPD3[0,16]); TMPQ4[32,32] = zext(TMPD3[16,16]); TMPQ4[64,32] = zext(TMPD3[32,16]); TMPQ4[96,32] = zext(TMPD3[48,16]); # simd infix Rd_VPR128.4S = TMPQ2 - TMPQ4 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ2[0,32] - TMPQ4[0,32]; Rd_VPR128.4S[32,32] = TMPQ2[32,32] - TMPQ4[32,32]; Rd_VPR128.4S[64,32] = TMPQ2[64,32] - TMPQ4[64,32]; Rd_VPR128.4S[96,32] = TMPQ2[96,32] - TMPQ4[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.396 USUBL, USUBL2 page C7-2925 line 170683 MATCH x2e202000/mask=xbf20fc00 # CONSTRUCT x6e202000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 $zext@1:16 ARG3[1]:8 $zext@1:16 =$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubl2/2@1 # AUNIT --inst x6e202000/mask=xffe0fc00 --status pass --comment "ext" :usubl2 Rd_VPR128.8H, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x2 & b_1011=0 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = zext(TMPD1[0,8]); TMPQ2[16,16] = zext(TMPD1[8,8]); TMPQ2[32,16] = zext(TMPD1[16,8]); TMPQ2[48,16] = zext(TMPD1[24,8]); TMPQ2[64,16] = zext(TMPD1[32,8]); TMPQ2[80,16] = zext(TMPD1[40,8]); TMPQ2[96,16] = zext(TMPD1[48,8]); TMPQ2[112,16] = zext(TMPD1[56,8]); TMPD3 = Rm_VPR128.16B[64,64]; # simd resize TMPQ4 = zext(TMPD3) (lane size 1 to 2) TMPQ4[0,16] = zext(TMPD3[0,8]); TMPQ4[16,16] = zext(TMPD3[8,8]); TMPQ4[32,16] = zext(TMPD3[16,8]); TMPQ4[48,16] = zext(TMPD3[24,8]); TMPQ4[64,16] = zext(TMPD3[32,8]); TMPQ4[80,16] = zext(TMPD3[40,8]); TMPQ4[96,16] = zext(TMPD3[48,8]); TMPQ4[112,16] = zext(TMPD3[56,8]); # simd infix Rd_VPR128.8H = TMPQ2 - TMPQ4 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ2[0,16] - TMPQ4[0,16]; Rd_VPR128.8H[16,16] = TMPQ2[16,16] - TMPQ4[16,16]; Rd_VPR128.8H[32,16] = TMPQ2[32,16] - TMPQ4[32,16]; Rd_VPR128.8H[48,16] = TMPQ2[48,16] - TMPQ4[48,16]; Rd_VPR128.8H[64,16] = TMPQ2[64,16] - TMPQ4[64,16]; Rd_VPR128.8H[80,16] = TMPQ2[80,16] - TMPQ4[80,16]; Rd_VPR128.8H[96,16] = TMPQ2[96,16] - TMPQ4[96,16]; Rd_VPR128.8H[112,16] = TMPQ2[112,16] - TMPQ4[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.396 USUBL, USUBL2 page C7-2925 line 170683 MATCH x2e202000/mask=xbf20fc00 # CONSTRUCT x2ea02000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@4:16 ARG3 $zext@4:16 =$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubl/2@4 # AUNIT --inst x2ea02000/mask=xffe0fc00 --status pass --comment "ext" :usubl Rd_VPR128.2D, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x2 & b_1011=0 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = zext(Rn_VPR64.2S[0,32]); TMPQ1[64,64] = zext(Rn_VPR64.2S[32,32]); # simd resize TMPQ2 = zext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ2[0,64] = zext(Rm_VPR64.2S[0,32]); TMPQ2[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = TMPQ1 - TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64] - TMPQ2[0,64]; Rd_VPR128.2D[64,64] = TMPQ1[64,64] - TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.396 USUBL, USUBL2 page C7-2925 line 170683 MATCH x2e202000/mask=xbf20fc00 # CONSTRUCT x2e602000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@2:16 ARG3 $zext@2:16 =$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubl/2@2 # AUNIT --inst x2e602000/mask=xffe0fc00 --status pass --comment "ext" :usubl Rd_VPR128.4S, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x2 & b_1011=0 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = zext(Rn_VPR64.4H[0,16]); TMPQ1[32,32] = zext(Rn_VPR64.4H[16,16]); TMPQ1[64,32] = zext(Rn_VPR64.4H[32,16]); TMPQ1[96,32] = zext(Rn_VPR64.4H[48,16]); # simd resize TMPQ2 = zext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ2[0,32] = zext(Rm_VPR64.4H[0,16]); TMPQ2[32,32] = zext(Rm_VPR64.4H[16,16]); TMPQ2[64,32] = zext(Rm_VPR64.4H[32,16]); TMPQ2[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = TMPQ1 - TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32] - TMPQ2[0,32]; Rd_VPR128.4S[32,32] = TMPQ1[32,32] - TMPQ2[32,32]; Rd_VPR128.4S[64,32] = TMPQ1[64,32] - TMPQ2[64,32]; Rd_VPR128.4S[96,32] = TMPQ1[96,32] - TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.396 USUBL, USUBL2 page C7-2925 line 170683 MATCH x2e202000/mask=xbf20fc00 # CONSTRUCT x2e202000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@1:16 ARG3 $zext@1:16 =$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubl/2@1 # AUNIT --inst x2e202000/mask=xffe0fc00 --status pass --comment "ext" :usubl Rd_VPR128.8H, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x2 & b_1011=0 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = zext(Rn_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = zext(Rn_VPR64.8B[0,8]); TMPQ1[16,16] = zext(Rn_VPR64.8B[8,8]); TMPQ1[32,16] = zext(Rn_VPR64.8B[16,8]); TMPQ1[48,16] = zext(Rn_VPR64.8B[24,8]); TMPQ1[64,16] = zext(Rn_VPR64.8B[32,8]); TMPQ1[80,16] = zext(Rn_VPR64.8B[40,8]); TMPQ1[96,16] = zext(Rn_VPR64.8B[48,8]); TMPQ1[112,16] = zext(Rn_VPR64.8B[56,8]); # simd resize TMPQ2 = zext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ2[0,16] = zext(Rm_VPR64.8B[0,8]); TMPQ2[16,16] = zext(Rm_VPR64.8B[8,8]); TMPQ2[32,16] = zext(Rm_VPR64.8B[16,8]); TMPQ2[48,16] = zext(Rm_VPR64.8B[24,8]); TMPQ2[64,16] = zext(Rm_VPR64.8B[32,8]); TMPQ2[80,16] = zext(Rm_VPR64.8B[40,8]); TMPQ2[96,16] = zext(Rm_VPR64.8B[48,8]); TMPQ2[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = TMPQ1 - TMPQ2 on lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[0,16] - TMPQ2[0,16]; Rd_VPR128.8H[16,16] = TMPQ1[16,16] - TMPQ2[16,16]; Rd_VPR128.8H[32,16] = TMPQ1[32,16] - TMPQ2[32,16]; Rd_VPR128.8H[48,16] = TMPQ1[48,16] - TMPQ2[48,16]; Rd_VPR128.8H[64,16] = TMPQ1[64,16] - TMPQ2[64,16]; Rd_VPR128.8H[80,16] = TMPQ1[80,16] - TMPQ2[80,16]; Rd_VPR128.8H[96,16] = TMPQ1[96,16] - TMPQ2[96,16]; Rd_VPR128.8H[112,16] = TMPQ1[112,16] - TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.397 USUBW, USUBW2 page C7-2927 line 170806 MATCH x2e203000/mask=xbf20fc00 # CONSTRUCT x6ea03000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3[1]:8 $zext@4:16 =$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubw2/2@4 # AUNIT --inst x6ea03000/mask=xffe0fc00 --status pass --comment "ext" :usubw2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR128.4S & b_1215=0x3 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { TMPD1 = Rm_VPR128.4S[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 4 to 8) TMPQ2[0,64] = zext(TMPD1[0,32]); TMPQ2[64,64] = zext(TMPD1[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D - TMPQ2 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - TMPQ2[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.397 USUBW, USUBW2 page C7-2927 line 170806 MATCH x2e203000/mask=xbf20fc00 # CONSTRUCT x6e603000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3[1]:8 $zext@2:16 =$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubw2/2@2 # AUNIT --inst x6e603000/mask=xffe0fc00 --status pass --comment "ext" :usubw2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR128.8H & b_1215=0x3 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { TMPD1 = Rm_VPR128.8H[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 2 to 4) TMPQ2[0,32] = zext(TMPD1[0,16]); TMPQ2[32,32] = zext(TMPD1[16,16]); TMPQ2[64,32] = zext(TMPD1[32,16]); TMPQ2[96,32] = zext(TMPD1[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S - TMPQ2 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - TMPQ2[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - TMPQ2[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - TMPQ2[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.397 USUBW, USUBW2 page C7-2927 line 170806 MATCH x2e203000/mask=xbf20fc00 # CONSTRUCT x6e203000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3[1]:8 $zext@1:16 =$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubw2/2@1 # AUNIT --inst x6e203000/mask=xffe0fc00 --status pass --comment "ext" :usubw2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR128.16B & b_1215=0x3 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { TMPD1 = Rm_VPR128.16B[64,64]; # simd resize TMPQ2 = zext(TMPD1) (lane size 1 to 2) TMPQ2[0,16] = zext(TMPD1[0,8]); TMPQ2[16,16] = zext(TMPD1[8,8]); TMPQ2[32,16] = zext(TMPD1[16,8]); TMPQ2[48,16] = zext(TMPD1[24,8]); TMPQ2[64,16] = zext(TMPD1[32,8]); TMPQ2[80,16] = zext(TMPD1[40,8]); TMPQ2[96,16] = zext(TMPD1[48,8]); TMPQ2[112,16] = zext(TMPD1[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H - TMPQ2 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - TMPQ2[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - TMPQ2[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - TMPQ2[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - TMPQ2[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - TMPQ2[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - TMPQ2[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - TMPQ2[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.397 USUBW, USUBW2 page C7-2927 line 170806 MATCH x2e203000/mask=xbf20fc00 # CONSTRUCT x2ea03000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $zext@4:16 =$-@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubw/2@4 # AUNIT --inst x2ea03000/mask=xffe0fc00 --status pass --comment "ext" :usubw Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=2 & b_2121=1 & Rm_VPR64.2S & b_1215=0x3 & b_1011=0 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { # simd resize TMPQ1 = zext(Rm_VPR64.2S) (lane size 4 to 8) TMPQ1[0,64] = zext(Rm_VPR64.2S[0,32]); TMPQ1[64,64] = zext(Rm_VPR64.2S[32,32]); # simd infix Rd_VPR128.2D = Rn_VPR128.2D - TMPQ1 on lane size 8 Rd_VPR128.2D[0,64] = Rn_VPR128.2D[0,64] - TMPQ1[0,64]; Rd_VPR128.2D[64,64] = Rn_VPR128.2D[64,64] - TMPQ1[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.397 USUBW, USUBW2 page C7-2927 line 170806 MATCH x2e203000/mask=xbf20fc00 # CONSTRUCT x2e603000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $zext@2:16 =$-@4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubw/2@2 # AUNIT --inst x2e603000/mask=xffe0fc00 --status pass --comment "ext" :usubw Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=1 & b_2121=1 & Rm_VPR64.4H & b_1215=0x3 & b_1011=0 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { # simd resize TMPQ1 = zext(Rm_VPR64.4H) (lane size 2 to 4) TMPQ1[0,32] = zext(Rm_VPR64.4H[0,16]); TMPQ1[32,32] = zext(Rm_VPR64.4H[16,16]); TMPQ1[64,32] = zext(Rm_VPR64.4H[32,16]); TMPQ1[96,32] = zext(Rm_VPR64.4H[48,16]); # simd infix Rd_VPR128.4S = Rn_VPR128.4S - TMPQ1 on lane size 4 Rd_VPR128.4S[0,32] = Rn_VPR128.4S[0,32] - TMPQ1[0,32]; Rd_VPR128.4S[32,32] = Rn_VPR128.4S[32,32] - TMPQ1[32,32]; Rd_VPR128.4S[64,32] = Rn_VPR128.4S[64,32] - TMPQ1[64,32]; Rd_VPR128.4S[96,32] = Rn_VPR128.4S[96,32] - TMPQ1[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.397 USUBW, USUBW2 page C7-2927 line 170806 MATCH x2e203000/mask=xbf20fc00 # CONSTRUCT x2e203000/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $zext@1:16 =$-@2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_usubw/2@1 # AUNIT --inst x2e203000/mask=xffe0fc00 --status pass --comment "ext" :usubw Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xe & advSIMD3.size=0 & b_2121=1 & Rm_VPR64.8B & b_1215=0x3 & b_1011=0 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { # simd resize TMPQ1 = zext(Rm_VPR64.8B) (lane size 1 to 2) TMPQ1[0,16] = zext(Rm_VPR64.8B[0,8]); TMPQ1[16,16] = zext(Rm_VPR64.8B[8,8]); TMPQ1[32,16] = zext(Rm_VPR64.8B[16,8]); TMPQ1[48,16] = zext(Rm_VPR64.8B[24,8]); TMPQ1[64,16] = zext(Rm_VPR64.8B[32,8]); TMPQ1[80,16] = zext(Rm_VPR64.8B[40,8]); TMPQ1[96,16] = zext(Rm_VPR64.8B[48,8]); TMPQ1[112,16] = zext(Rm_VPR64.8B[56,8]); # simd infix Rd_VPR128.8H = Rn_VPR128.8H - TMPQ1 on lane size 2 Rd_VPR128.8H[0,16] = Rn_VPR128.8H[0,16] - TMPQ1[0,16]; Rd_VPR128.8H[16,16] = Rn_VPR128.8H[16,16] - TMPQ1[16,16]; Rd_VPR128.8H[32,16] = Rn_VPR128.8H[32,16] - TMPQ1[32,16]; Rd_VPR128.8H[48,16] = Rn_VPR128.8H[48,16] - TMPQ1[48,16]; Rd_VPR128.8H[64,16] = Rn_VPR128.8H[64,16] - TMPQ1[64,16]; Rd_VPR128.8H[80,16] = Rn_VPR128.8H[80,16] - TMPQ1[80,16]; Rd_VPR128.8H[96,16] = Rn_VPR128.8H[96,16] - TMPQ1[96,16]; Rd_VPR128.8H[112,16] = Rn_VPR128.8H[112,16] - TMPQ1[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # CONSTRUCT x6f08a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 =$zext@1:16 # SMACRO(pseudo) ARG1 ARG2 =NEON_uxtl2/1@1 # AUNIT --inst x6f08a400/mask=xfffffc00 --status pass --comment "ext" :uxtl2 Rd_VPR128.8H, Rn_VPR128.16B is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.16B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR128.16B[64,64]; # simd resize Rd_VPR128.8H = zext(TMPD1) (lane size 1 to 2) Rd_VPR128.8H[0,16] = zext(TMPD1[0,8]); Rd_VPR128.8H[16,16] = zext(TMPD1[8,8]); Rd_VPR128.8H[32,16] = zext(TMPD1[16,8]); Rd_VPR128.8H[48,16] = zext(TMPD1[24,8]); Rd_VPR128.8H[64,16] = zext(TMPD1[32,8]); Rd_VPR128.8H[80,16] = zext(TMPD1[40,8]); Rd_VPR128.8H[96,16] = zext(TMPD1[48,8]); Rd_VPR128.8H[112,16] = zext(TMPD1[56,8]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # CONSTRUCT x2f20a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =$zext@4:16 # SMACRO(pseudo) ARG1 ARG2 =NEON_uxtl/1@4 # AUNIT --inst x2f20a400/mask=xfffffc00 --status pass --comment "ext" :uxtl Rd_VPR128.2D, Rn_VPR64.2S is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.2S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR64.2S; # simd resize Rd_VPR128.2D = zext(TMPD1) (lane size 4 to 8) Rd_VPR128.2D[0,64] = zext(TMPD1[0,32]); Rd_VPR128.2D[64,64] = zext(TMPD1[32,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # CONSTRUCT x2f10a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =$zext@2:16 # SMACRO(pseudo) ARG1 ARG2 =NEON_uxtl/1@2 # AUNIT --inst x2f10a400/mask=xfffffc00 --status pass --comment "ext" :uxtl Rd_VPR128.4S, Rn_VPR64.4H is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.4H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR64.4H; # simd resize Rd_VPR128.4S = zext(TMPD1) (lane size 2 to 4) Rd_VPR128.4S[0,32] = zext(TMPD1[0,16]); Rd_VPR128.4S[32,32] = zext(TMPD1[16,16]); Rd_VPR128.4S[64,32] = zext(TMPD1[32,16]); Rd_VPR128.4S[96,32] = zext(TMPD1[48,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # CONSTRUCT x6f20a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 =$zext@4:16 # SMACRO(pseudo) ARG1 ARG2 =NEON_uxtl2/1@4 # AUNIT --inst x6f20a400/mask=xfffffc00 --status pass --comment "ext" :uxtl2 Rd_VPR128.2D, Rn_VPR128.4S is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2123=1 & Imm_uimm5=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.4S & Rd_VPR128.2D & Zd { TMPD1 = Rn_VPR128.4S[64,64]; # simd resize Rd_VPR128.2D = zext(TMPD1) (lane size 4 to 8) Rd_VPR128.2D[0,64] = zext(TMPD1[0,32]); Rd_VPR128.2D[64,64] = zext(TMPD1[32,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # CONSTRUCT x2f08a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 =var =$zext@1:16 # SMACRO(pseudo) ARG1 ARG2 =NEON_uxtl/1@1 # AUNIT --inst x2f08a400/mask=xfffffc00 --status pass --comment "ext" :uxtl Rd_VPR128.8H, Rn_VPR64.8B is b_3131=0 & q=0 & u=1 & b_2428=0xf & b_1923=0x1 & Imm_uimm3=0 & b_1115=0x14 & b_1010=1 & Rn_VPR64.8B & Rd_VPR128.8H & Zd { TMPD1 = Rn_VPR64.8B; # simd resize Rd_VPR128.8H = zext(TMPD1) (lane size 1 to 2) Rd_VPR128.8H[0,16] = zext(TMPD1[0,8]); Rd_VPR128.8H[16,16] = zext(TMPD1[8,8]); Rd_VPR128.8H[32,16] = zext(TMPD1[16,8]); Rd_VPR128.8H[48,16] = zext(TMPD1[24,8]); Rd_VPR128.8H[64,16] = zext(TMPD1[32,8]); Rd_VPR128.8H[80,16] = zext(TMPD1[40,8]); Rd_VPR128.8H[96,16] = zext(TMPD1[48,8]); Rd_VPR128.8H[112,16] = zext(TMPD1[56,8]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.398 UXTL, UXTL2 page C7-2929 line 170931 MATCH x2f00a400/mask=xbf87fc00 # C7.2.391 USHLL, USHLL2 page C7-2914 line 170042 MATCH x2f00a400/mask=xbf80fc00 # CONSTRUCT x6f10a400/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # SMACRO ARG1 ARG2[1]:8 =$zext@2:16 # SMACRO(pseudo) ARG1 ARG2 =NEON_uxtl2/1@2 # AUNIT --inst x6f10a400/mask=xfffffc00 --status pass --comment "ext" :uxtl2 Rd_VPR128.4S, Rn_VPR128.8H is b_3131=0 & q=1 & u=1 & b_2428=0xf & b_2023=0x1 & Imm_uimm4=0 & b_1115=0x14 & b_1010=1 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPD1 = Rn_VPR128.8H[64,64]; # simd resize Rd_VPR128.4S = zext(TMPD1) (lane size 2 to 4) Rd_VPR128.4S[0,32] = zext(TMPD1[0,16]); Rd_VPR128.4S[32,32] = zext(TMPD1[16,16]); Rd_VPR128.4S[64,32] = zext(TMPD1[32,16]); Rd_VPR128.4S[96,32] = zext(TMPD1[48,16]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.399 UZP1 page C7-2931 line 171030 MATCH x0e001800/mask=xbf20fc00 # CONSTRUCT x4e001800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-1@4-2@6-3@8-4@10-5@12-6@14-7:1 swap &=$shuffle@0-8@2-9@4-10@6-11@8-12@10-13@12-14@14-15:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@1 # AUNIT --inst x4e001800/mask=xffe0fc00 --status pass :uzp1 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { TMPQ2 = Rm_VPR128.16B; TMPQ1 = Rn_VPR128.16B; # simd shuffle Rd_VPR128.16B = TMPQ1 (@0-0@2-1@4-2@6-3@8-4@10-5@12-6@14-7) lane size 1 Rd_VPR128.16B[0,8] = TMPQ1[0,8]; Rd_VPR128.16B[8,8] = TMPQ1[16,8]; Rd_VPR128.16B[16,8] = TMPQ1[32,8]; Rd_VPR128.16B[24,8] = TMPQ1[48,8]; Rd_VPR128.16B[32,8] = TMPQ1[64,8]; Rd_VPR128.16B[40,8] = TMPQ1[80,8]; Rd_VPR128.16B[48,8] = TMPQ1[96,8]; Rd_VPR128.16B[56,8] = TMPQ1[112,8]; # simd shuffle Rd_VPR128.16B = TMPQ2 (@0-8@2-9@4-10@6-11@8-12@10-13@12-14@14-15) lane size 1 Rd_VPR128.16B[64,8] = TMPQ2[0,8]; Rd_VPR128.16B[72,8] = TMPQ2[16,8]; Rd_VPR128.16B[80,8] = TMPQ2[32,8]; Rd_VPR128.16B[88,8] = TMPQ2[48,8]; Rd_VPR128.16B[96,8] = TMPQ2[64,8]; Rd_VPR128.16B[104,8] = TMPQ2[80,8]; Rd_VPR128.16B[112,8] = TMPQ2[96,8]; Rd_VPR128.16B[120,8] = TMPQ2[112,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.399 UZP1 page C7-2931 line 171030 MATCH x0e001800/mask=xbf20fc00 # CONSTRUCT x4ec01800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0:8 swap &=$shuffle@0-1:8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@8 # AUNIT --inst x4ec01800/mask=xffe0fc00 --status pass :uzp1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { TMPQ2 = Rm_VPR128.2D; TMPQ1 = Rn_VPR128.2D; # simd shuffle Rd_VPR128.2D = TMPQ1 (@0-0) lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64]; # simd shuffle Rd_VPR128.2D = TMPQ2 (@0-1) lane size 8 Rd_VPR128.2D[64,64] = TMPQ2[0,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.399 UZP1 page C7-2931 line 171030 MATCH x0e001800/mask=xbf20fc00 # CONSTRUCT x0e801800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0:4 swap &=$shuffle@0-1:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@4 # AUNIT --inst x0e801800/mask=xffe0fc00 --status pass :uzp1 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { TMPD2 = Rm_VPR64.2S; TMPD1 = Rn_VPR64.2S; # simd shuffle Rd_VPR64.2S = TMPD1 (@0-0) lane size 4 Rd_VPR64.2S[0,32] = TMPD1[0,32]; # simd shuffle Rd_VPR64.2S = TMPD2 (@0-1) lane size 4 Rd_VPR64.2S[32,32] = TMPD2[0,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.399 UZP1 page C7-2931 line 171030 MATCH x0e001800/mask=xbf20fc00 # CONSTRUCT x0e401800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-1:2 swap &=$shuffle@0-2@2-3:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@2 # AUNIT --inst x0e401800/mask=xffe0fc00 --status pass :uzp1 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { TMPD2 = Rm_VPR64.4H; TMPD1 = Rn_VPR64.4H; # simd shuffle Rd_VPR64.4H = TMPD1 (@0-0@2-1) lane size 2 Rd_VPR64.4H[0,16] = TMPD1[0,16]; Rd_VPR64.4H[16,16] = TMPD1[32,16]; # simd shuffle Rd_VPR64.4H = TMPD2 (@0-2@2-3) lane size 2 Rd_VPR64.4H[32,16] = TMPD2[0,16]; Rd_VPR64.4H[48,16] = TMPD2[32,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.399 UZP1 page C7-2931 line 171030 MATCH x0e001800/mask=xbf20fc00 # CONSTRUCT x4e801800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-1:4 swap &=$shuffle@0-2@2-3:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@4 # AUNIT --inst x4e801800/mask=xffe0fc00 --status pass :uzp1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { TMPQ2 = Rm_VPR128.4S; TMPQ1 = Rn_VPR128.4S; # simd shuffle Rd_VPR128.4S = TMPQ1 (@0-0@2-1) lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32]; Rd_VPR128.4S[32,32] = TMPQ1[64,32]; # simd shuffle Rd_VPR128.4S = TMPQ2 (@0-2@2-3) lane size 4 Rd_VPR128.4S[64,32] = TMPQ2[0,32]; Rd_VPR128.4S[96,32] = TMPQ2[64,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.399 UZP1 page C7-2931 line 171030 MATCH x0e001800/mask=xbf20fc00 # CONSTRUCT x0e001800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-1@4-2@6-3:1 swap &=$shuffle@0-4@2-5@4-6@6-7:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@1 # AUNIT --inst x0e001800/mask=xffe0fc00 --status pass :uzp1 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { TMPD2 = Rm_VPR64.8B; TMPD1 = Rn_VPR64.8B; # simd shuffle Rd_VPR64.8B = TMPD1 (@0-0@2-1@4-2@6-3) lane size 1 Rd_VPR64.8B[0,8] = TMPD1[0,8]; Rd_VPR64.8B[8,8] = TMPD1[16,8]; Rd_VPR64.8B[16,8] = TMPD1[32,8]; Rd_VPR64.8B[24,8] = TMPD1[48,8]; # simd shuffle Rd_VPR64.8B = TMPD2 (@0-4@2-5@4-6@6-7) lane size 1 Rd_VPR64.8B[32,8] = TMPD2[0,8]; Rd_VPR64.8B[40,8] = TMPD2[16,8]; Rd_VPR64.8B[48,8] = TMPD2[32,8]; Rd_VPR64.8B[56,8] = TMPD2[48,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.399 UZP1 page C7-2931 line 171030 MATCH x0e001800/mask=xbf20fc00 # CONSTRUCT x4e401800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@2-1@4-2@6-3:2 swap &=$shuffle@0-4@2-5@4-6@6-7:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp1/2@2 # AUNIT --inst x4e401800/mask=xffe0fc00 --status pass :uzp1 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=1 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { TMPQ2 = Rm_VPR128.8H; TMPQ1 = Rn_VPR128.8H; # simd shuffle Rd_VPR128.8H = TMPQ1 (@0-0@2-1@4-2@6-3) lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[0,16]; Rd_VPR128.8H[16,16] = TMPQ1[32,16]; Rd_VPR128.8H[32,16] = TMPQ1[64,16]; Rd_VPR128.8H[48,16] = TMPQ1[96,16]; # simd shuffle Rd_VPR128.8H = TMPQ2 (@0-4@2-5@4-6@6-7) lane size 2 Rd_VPR128.8H[64,16] = TMPQ2[0,16]; Rd_VPR128.8H[80,16] = TMPQ2[32,16]; Rd_VPR128.8H[96,16] = TMPQ2[64,16]; Rd_VPR128.8H[112,16] = TMPQ2[96,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.400 UZP2 page C7-2933 line 171142 MATCH x0e005800/mask=xbf20fc00 # CONSTRUCT x4e005800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7:1 swap &=$shuffle@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@1 # AUNIT --inst x4e005800/mask=xffe0fc00 --status pass :uzp2 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { TMPQ2 = Rm_VPR128.16B; TMPQ1 = Rn_VPR128.16B; # simd shuffle Rd_VPR128.16B = TMPQ1 (@1-0@3-1@5-2@7-3@9-4@11-5@13-6@15-7) lane size 1 Rd_VPR128.16B[0,8] = TMPQ1[8,8]; Rd_VPR128.16B[8,8] = TMPQ1[24,8]; Rd_VPR128.16B[16,8] = TMPQ1[40,8]; Rd_VPR128.16B[24,8] = TMPQ1[56,8]; Rd_VPR128.16B[32,8] = TMPQ1[72,8]; Rd_VPR128.16B[40,8] = TMPQ1[88,8]; Rd_VPR128.16B[48,8] = TMPQ1[104,8]; Rd_VPR128.16B[56,8] = TMPQ1[120,8]; # simd shuffle Rd_VPR128.16B = TMPQ2 (@1-8@3-9@5-10@7-11@9-12@11-13@13-14@15-15) lane size 1 Rd_VPR128.16B[64,8] = TMPQ2[8,8]; Rd_VPR128.16B[72,8] = TMPQ2[24,8]; Rd_VPR128.16B[80,8] = TMPQ2[40,8]; Rd_VPR128.16B[88,8] = TMPQ2[56,8]; Rd_VPR128.16B[96,8] = TMPQ2[72,8]; Rd_VPR128.16B[104,8] = TMPQ2[88,8]; Rd_VPR128.16B[112,8] = TMPQ2[104,8]; Rd_VPR128.16B[120,8] = TMPQ2[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.400 UZP2 page C7-2933 line 171142 MATCH x0e005800/mask=xbf20fc00 # CONSTRUCT x4ec05800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0:8 swap &=$shuffle@1-1:8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@8 # AUNIT --inst x4ec05800/mask=xffe0fc00 --status pass :uzp2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { TMPQ2 = Rm_VPR128.2D; TMPQ1 = Rn_VPR128.2D; # simd shuffle Rd_VPR128.2D = TMPQ1 (@1-0) lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[64,64]; # simd shuffle Rd_VPR128.2D = TMPQ2 (@1-1) lane size 8 Rd_VPR128.2D[64,64] = TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.400 UZP2 page C7-2933 line 171142 MATCH x0e005800/mask=xbf20fc00 # CONSTRUCT x0e805800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0:4 swap &=$shuffle@1-1:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@4 # AUNIT --inst x0e805800/mask=xffe0fc00 --status pass :uzp2 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { TMPD2 = Rm_VPR64.2S; TMPD1 = Rn_VPR64.2S; # simd shuffle Rd_VPR64.2S = TMPD1 (@1-0) lane size 4 Rd_VPR64.2S[0,32] = TMPD1[32,32]; # simd shuffle Rd_VPR64.2S = TMPD2 (@1-1) lane size 4 Rd_VPR64.2S[32,32] = TMPD2[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.400 UZP2 page C7-2933 line 171142 MATCH x0e005800/mask=xbf20fc00 # CONSTRUCT x0e405800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-1:2 swap &=$shuffle@1-2@3-3:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@2 # AUNIT --inst x0e405800/mask=xffe0fc00 --status pass :uzp2 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { TMPD2 = Rm_VPR64.4H; TMPD1 = Rn_VPR64.4H; # simd shuffle Rd_VPR64.4H = TMPD1 (@1-0@3-1) lane size 2 Rd_VPR64.4H[0,16] = TMPD1[16,16]; Rd_VPR64.4H[16,16] = TMPD1[48,16]; # simd shuffle Rd_VPR64.4H = TMPD2 (@1-2@3-3) lane size 2 Rd_VPR64.4H[32,16] = TMPD2[16,16]; Rd_VPR64.4H[48,16] = TMPD2[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.400 UZP2 page C7-2933 line 171142 MATCH x0e005800/mask=xbf20fc00 # CONSTRUCT x4e805800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-1:4 swap &=$shuffle@1-2@3-3:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@4 # AUNIT --inst x4e805800/mask=xffe0fc00 --status pass :uzp2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { TMPQ2 = Rm_VPR128.4S; TMPQ1 = Rn_VPR128.4S; # simd shuffle Rd_VPR128.4S = TMPQ1 (@1-0@3-1) lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[32,32]; Rd_VPR128.4S[32,32] = TMPQ1[96,32]; # simd shuffle Rd_VPR128.4S = TMPQ2 (@1-2@3-3) lane size 4 Rd_VPR128.4S[64,32] = TMPQ2[32,32]; Rd_VPR128.4S[96,32] = TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.400 UZP2 page C7-2933 line 171142 MATCH x0e005800/mask=xbf20fc00 # CONSTRUCT x0e005800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-1@5-2@7-3:1 swap &=$shuffle@1-4@3-5@5-6@7-7:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@1 # AUNIT --inst x0e005800/mask=xffe0fc00 --status pass :uzp2 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { TMPD2 = Rm_VPR64.8B; TMPD1 = Rn_VPR64.8B; # simd shuffle Rd_VPR64.8B = TMPD1 (@1-0@3-1@5-2@7-3) lane size 1 Rd_VPR64.8B[0,8] = TMPD1[8,8]; Rd_VPR64.8B[8,8] = TMPD1[24,8]; Rd_VPR64.8B[16,8] = TMPD1[40,8]; Rd_VPR64.8B[24,8] = TMPD1[56,8]; # simd shuffle Rd_VPR64.8B = TMPD2 (@1-4@3-5@5-6@7-7) lane size 1 Rd_VPR64.8B[32,8] = TMPD2[8,8]; Rd_VPR64.8B[40,8] = TMPD2[24,8]; Rd_VPR64.8B[48,8] = TMPD2[40,8]; Rd_VPR64.8B[56,8] = TMPD2[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.400 UZP2 page C7-2933 line 171142 MATCH x0e005800/mask=xbf20fc00 # CONSTRUCT x4e405800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0@3-1@5-2@7-3:2 swap &=$shuffle@1-4@3-5@5-6@7-7:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_uzp2/2@2 # AUNIT --inst x4e405800/mask=xffe0fc00 --status pass :uzp2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=5 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { TMPQ2 = Rm_VPR128.8H; TMPQ1 = Rn_VPR128.8H; # simd shuffle Rd_VPR128.8H = TMPQ1 (@1-0@3-1@5-2@7-3) lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[16,16]; Rd_VPR128.8H[16,16] = TMPQ1[48,16]; Rd_VPR128.8H[32,16] = TMPQ1[80,16]; Rd_VPR128.8H[48,16] = TMPQ1[112,16]; # simd shuffle Rd_VPR128.8H = TMPQ2 (@1-4@3-5@5-6@7-7) lane size 2 Rd_VPR128.8H[64,16] = TMPQ2[16,16]; Rd_VPR128.8H[80,16] = TMPQ2[48,16]; Rd_VPR128.8H[96,16] = TMPQ2[80,16]; Rd_VPR128.8H[112,16] = TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.401 XAR page C7-2935 line 171254 MATCH xce800000/mask=xffe00000 # CONSTRUCT xce800000/mask=xffe00000 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 ARG3 $|@8 ARG4 =var:8 =$>>@8 # SMACRO(pseudo) ARG1 ARG2 ARG3 ARG4 =NEON_xar/3@8 # AUNIT --inst xce800000/mask=xffe00000 --status noqemu # Advanced SIMD variant :xar Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D, LSB_bitfield64_imm is b_2131=0b11001110100 & Rd_VPR128.2D & Rn_VPR128.2D & Rm_VPR128.2D & LSB_bitfield64_imm & Zd { # simd infix TMPQ1 = Rn_VPR128.2D | Rm_VPR128.2D on lane size 8 TMPQ1[0,64] = Rn_VPR128.2D[0,64] | Rm_VPR128.2D[0,64]; TMPQ1[64,64] = Rn_VPR128.2D[64,64] | Rm_VPR128.2D[64,64]; local tmp2:8 = LSB_bitfield64_imm; # simd infix Rd_VPR128.2D = TMPQ1 >> tmp2 on lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64] >> tmp2; Rd_VPR128.2D[64,64] = TMPQ1[64,64] >> tmp2; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.402 XTN, XTN2 page C7-2936 line 171324 MATCH x0e212800/mask=xbf3ffc00 # CONSTRUCT x0ea12800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@8:8 &=$shuffle@0-0@1-1:4 # SMACRO(pseudo) ARG1 ARG2 &=NEON_xtn/2@8 # AUNIT --inst x0ea12800/mask=xfffffc00 --status pass --comment "ext" :xtn Rd_VPR64.2S, Rn_VPR128.2D is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.2D & Rd_VPR64.2S & Zd { # simd resize TMPD1 = zext(Rn_VPR128.2D) (lane size 8 to 4) TMPD1[0,32] = Rn_VPR128.2D[0,32]; TMPD1[32,32] = Rn_VPR128.2D[64,32]; # simd shuffle Rd_VPR64.2S = TMPD1 (@0-0@1-1) lane size 4 Rd_VPR64.2S[0,32] = TMPD1[0,32]; Rd_VPR64.2S[32,32] = TMPD1[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.402 XTN, XTN2 page C7-2936 line 171324 MATCH x0e212800/mask=xbf3ffc00 # CONSTRUCT x4ea12800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@8:8 &=$shuffle@0-2@1-3:4 # SMACRO(pseudo) ARG1 ARG2 &=NEON_xtn2/2@8 # AUNIT --inst x4ea12800/mask=xfffffc00 --status pass --comment "ext" :xtn2 Rd_VPR128.4S, Rn_VPR128.2D is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=2 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.4S & Zd { # simd resize TMPD1 = zext(Rn_VPR128.2D) (lane size 8 to 4) TMPD1[0,32] = Rn_VPR128.2D[0,32]; TMPD1[32,32] = Rn_VPR128.2D[64,32]; # simd shuffle Rd_VPR128.4S = TMPD1 (@0-2@1-3) lane size 4 Rd_VPR128.4S[64,32] = TMPD1[0,32]; Rd_VPR128.4S[96,32] = TMPD1[32,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.402 XTN, XTN2 page C7-2936 line 171324 MATCH x0e212800/mask=xbf3ffc00 # CONSTRUCT x0e612800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@4:8 &=$shuffle@0-0@1-1@2-2@3-3:2 # SMACRO(pseudo) ARG1 ARG2 &=NEON_xtn/2@4 # AUNIT --inst x0e612800/mask=xfffffc00 --status pass --comment "ext" :xtn Rd_VPR64.4H, Rn_VPR128.4S is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.4S & Rd_VPR64.4H & Zd { # simd resize TMPD1 = zext(Rn_VPR128.4S) (lane size 4 to 2) TMPD1[0,16] = Rn_VPR128.4S[0,16]; TMPD1[16,16] = Rn_VPR128.4S[32,16]; TMPD1[32,16] = Rn_VPR128.4S[64,16]; TMPD1[48,16] = Rn_VPR128.4S[96,16]; # simd shuffle Rd_VPR64.4H = TMPD1 (@0-0@1-1@2-2@3-3) lane size 2 Rd_VPR64.4H[0,16] = TMPD1[0,16]; Rd_VPR64.4H[16,16] = TMPD1[16,16]; Rd_VPR64.4H[32,16] = TMPD1[32,16]; Rd_VPR64.4H[48,16] = TMPD1[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.402 XTN, XTN2 page C7-2936 line 171324 MATCH x0e212800/mask=xbf3ffc00 # CONSTRUCT x4e612800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@4:8 &=$shuffle@0-4@1-5@2-6@3-7:2 # SMACRO(pseudo) ARG1 ARG2 &=NEON_xtn2/2@4 # AUNIT --inst x4e612800/mask=xfffffc00 --status pass --comment "ext" :xtn2 Rd_VPR128.8H, Rn_VPR128.4S is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=1 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.8H & Zd { # simd resize TMPD1 = zext(Rn_VPR128.4S) (lane size 4 to 2) TMPD1[0,16] = Rn_VPR128.4S[0,16]; TMPD1[16,16] = Rn_VPR128.4S[32,16]; TMPD1[32,16] = Rn_VPR128.4S[64,16]; TMPD1[48,16] = Rn_VPR128.4S[96,16]; # simd shuffle Rd_VPR128.8H = TMPD1 (@0-4@1-5@2-6@3-7) lane size 2 Rd_VPR128.8H[64,16] = TMPD1[0,16]; Rd_VPR128.8H[80,16] = TMPD1[16,16]; Rd_VPR128.8H[96,16] = TMPD1[32,16]; Rd_VPR128.8H[112,16] = TMPD1[48,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.402 XTN, XTN2 page C7-2936 line 171324 MATCH x0e212800/mask=xbf3ffc00 # CONSTRUCT x0e212800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@2:8 &=$shuffle@0-0@1-1@2-2@3-3@4-4@5-5@6-6@7-7:1 # SMACRO(pseudo) ARG1 ARG2 &=NEON_xtn/2@2 # AUNIT --inst x0e212800/mask=xfffffc00 --status pass --comment "ext" :xtn Rd_VPR64.8B, Rn_VPR128.8H is b_3131=0 & q=0 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.8H & Rd_VPR64.8B & Zd { # simd resize TMPD1 = zext(Rn_VPR128.8H) (lane size 2 to 1) TMPD1[0,8] = Rn_VPR128.8H[0,8]; TMPD1[8,8] = Rn_VPR128.8H[16,8]; TMPD1[16,8] = Rn_VPR128.8H[32,8]; TMPD1[24,8] = Rn_VPR128.8H[48,8]; TMPD1[32,8] = Rn_VPR128.8H[64,8]; TMPD1[40,8] = Rn_VPR128.8H[80,8]; TMPD1[48,8] = Rn_VPR128.8H[96,8]; TMPD1[56,8] = Rn_VPR128.8H[112,8]; # simd shuffle Rd_VPR64.8B = TMPD1 (@0-0@1-1@2-2@3-3@4-4@5-5@6-6@7-7) lane size 1 Rd_VPR64.8B[0,8] = TMPD1[0,8]; Rd_VPR64.8B[8,8] = TMPD1[8,8]; Rd_VPR64.8B[16,8] = TMPD1[16,8]; Rd_VPR64.8B[24,8] = TMPD1[24,8]; Rd_VPR64.8B[32,8] = TMPD1[32,8]; Rd_VPR64.8B[40,8] = TMPD1[40,8]; Rd_VPR64.8B[48,8] = TMPD1[48,8]; Rd_VPR64.8B[56,8] = TMPD1[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.402 XTN, XTN2 page C7-2936 line 171324 MATCH x0e212800/mask=xbf3ffc00 # CONSTRUCT x4e212800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG1 ARG2 $zext@2:8 &=$shuffle@0-8@1-9@2-10@3-11@4-12@5-13@6-14@7-15:1 # SMACRO(pseudo) ARG1 ARG2 &=NEON_xtn2/2@2 # AUNIT --inst x4e212800/mask=xfffffc00 --status pass --comment "ext" :xtn2 Rd_VPR128.16B, Rn_VPR128.8H is b_3131=0 & q=1 & u=0 & b_2428=0xe & advSIMD3.size=0 & b_1721=0x10 & b_1216=0x12 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.16B & Zd { # simd resize TMPD1 = zext(Rn_VPR128.8H) (lane size 2 to 1) TMPD1[0,8] = Rn_VPR128.8H[0,8]; TMPD1[8,8] = Rn_VPR128.8H[16,8]; TMPD1[16,8] = Rn_VPR128.8H[32,8]; TMPD1[24,8] = Rn_VPR128.8H[48,8]; TMPD1[32,8] = Rn_VPR128.8H[64,8]; TMPD1[40,8] = Rn_VPR128.8H[80,8]; TMPD1[48,8] = Rn_VPR128.8H[96,8]; TMPD1[56,8] = Rn_VPR128.8H[112,8]; # simd shuffle Rd_VPR128.16B = TMPD1 (@0-8@1-9@2-10@3-11@4-12@5-13@6-14@7-15) lane size 1 Rd_VPR128.16B[64,8] = TMPD1[0,8]; Rd_VPR128.16B[72,8] = TMPD1[8,8]; Rd_VPR128.16B[80,8] = TMPD1[16,8]; Rd_VPR128.16B[88,8] = TMPD1[24,8]; Rd_VPR128.16B[96,8] = TMPD1[32,8]; Rd_VPR128.16B[104,8] = TMPD1[40,8]; Rd_VPR128.16B[112,8] = TMPD1[48,8]; Rd_VPR128.16B[120,8] = TMPD1[56,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.403 ZIP1 page C7-2938 line 171432 MATCH x0e003800/mask=xbf20fc00 # CONSTRUCT x4e003800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@1-2@2-4@3-6@4-8@5-10@6-12@7-14:1 swap &=$shuffle@0-1@1-3@2-5@3-7@4-9@5-11@6-13@7-15:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@1 # AUNIT --inst x4e003800/mask=xffe0fc00 --status pass :zip1 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { TMPQ2 = Rm_VPR128.16B; TMPQ1 = Rn_VPR128.16B; # simd shuffle Rd_VPR128.16B = TMPQ1 (@0-0@1-2@2-4@3-6@4-8@5-10@6-12@7-14) lane size 1 Rd_VPR128.16B[0,8] = TMPQ1[0,8]; Rd_VPR128.16B[16,8] = TMPQ1[8,8]; Rd_VPR128.16B[32,8] = TMPQ1[16,8]; Rd_VPR128.16B[48,8] = TMPQ1[24,8]; Rd_VPR128.16B[64,8] = TMPQ1[32,8]; Rd_VPR128.16B[80,8] = TMPQ1[40,8]; Rd_VPR128.16B[96,8] = TMPQ1[48,8]; Rd_VPR128.16B[112,8] = TMPQ1[56,8]; # simd shuffle Rd_VPR128.16B = TMPQ2 (@0-1@1-3@2-5@3-7@4-9@5-11@6-13@7-15) lane size 1 Rd_VPR128.16B[8,8] = TMPQ2[0,8]; Rd_VPR128.16B[24,8] = TMPQ2[8,8]; Rd_VPR128.16B[40,8] = TMPQ2[16,8]; Rd_VPR128.16B[56,8] = TMPQ2[24,8]; Rd_VPR128.16B[72,8] = TMPQ2[32,8]; Rd_VPR128.16B[88,8] = TMPQ2[40,8]; Rd_VPR128.16B[104,8] = TMPQ2[48,8]; Rd_VPR128.16B[120,8] = TMPQ2[56,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.403 ZIP1 page C7-2938 line 171432 MATCH x0e003800/mask=xbf20fc00 # CONSTRUCT x4ec03800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0:8 swap &=$shuffle@0-1:8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@8 # AUNIT --inst x4ec03800/mask=xffe0fc00 --status pass :zip1 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { TMPQ2 = Rm_VPR128.2D; TMPQ1 = Rn_VPR128.2D; # simd shuffle Rd_VPR128.2D = TMPQ1 (@0-0) lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[0,64]; # simd shuffle Rd_VPR128.2D = TMPQ2 (@0-1) lane size 8 Rd_VPR128.2D[64,64] = TMPQ2[0,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.403 ZIP1 page C7-2938 line 171432 MATCH x0e003800/mask=xbf20fc00 # CONSTRUCT x0e803800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0:4 swap &=$shuffle@0-1:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@4 # AUNIT --inst x0e803800/mask=xffe0fc00 --status pass :zip1 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { TMPD2 = Rm_VPR64.2S; TMPD1 = Rn_VPR64.2S; # simd shuffle Rd_VPR64.2S = TMPD1 (@0-0) lane size 4 Rd_VPR64.2S[0,32] = TMPD1[0,32]; # simd shuffle Rd_VPR64.2S = TMPD2 (@0-1) lane size 4 Rd_VPR64.2S[32,32] = TMPD2[0,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.403 ZIP1 page C7-2938 line 171432 MATCH x0e003800/mask=xbf20fc00 # CONSTRUCT x0e403800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@1-2:2 swap &=$shuffle@0-1@1-3:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@2 # AUNIT --inst x0e403800/mask=xffe0fc00 --status pass :zip1 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { TMPD2 = Rm_VPR64.4H; TMPD1 = Rn_VPR64.4H; # simd shuffle Rd_VPR64.4H = TMPD1 (@0-0@1-2) lane size 2 Rd_VPR64.4H[0,16] = TMPD1[0,16]; Rd_VPR64.4H[32,16] = TMPD1[16,16]; # simd shuffle Rd_VPR64.4H = TMPD2 (@0-1@1-3) lane size 2 Rd_VPR64.4H[16,16] = TMPD2[0,16]; Rd_VPR64.4H[48,16] = TMPD2[16,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.403 ZIP1 page C7-2938 line 171432 MATCH x0e003800/mask=xbf20fc00 # CONSTRUCT x4e803800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@1-2:4 swap &=$shuffle@0-1@1-3:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@4 # AUNIT --inst x4e803800/mask=xffe0fc00 --status pass :zip1 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { TMPQ2 = Rm_VPR128.4S; TMPQ1 = Rn_VPR128.4S; # simd shuffle Rd_VPR128.4S = TMPQ1 (@0-0@1-2) lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[0,32]; Rd_VPR128.4S[64,32] = TMPQ1[32,32]; # simd shuffle Rd_VPR128.4S = TMPQ2 (@0-1@1-3) lane size 4 Rd_VPR128.4S[32,32] = TMPQ2[0,32]; Rd_VPR128.4S[96,32] = TMPQ2[32,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.403 ZIP1 page C7-2938 line 171432 MATCH x0e003800/mask=xbf20fc00 # CONSTRUCT x0e003800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@1-2@2-4@3-6:1 swap &=$shuffle@0-1@1-3@2-5@3-7:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@1 # AUNIT --inst x0e003800/mask=xffe0fc00 --status pass :zip1 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { TMPD2 = Rm_VPR64.8B; TMPD1 = Rn_VPR64.8B; # simd shuffle Rd_VPR64.8B = TMPD1 (@0-0@1-2@2-4@3-6) lane size 1 Rd_VPR64.8B[0,8] = TMPD1[0,8]; Rd_VPR64.8B[16,8] = TMPD1[8,8]; Rd_VPR64.8B[32,8] = TMPD1[16,8]; Rd_VPR64.8B[48,8] = TMPD1[24,8]; # simd shuffle Rd_VPR64.8B = TMPD2 (@0-1@1-3@2-5@3-7) lane size 1 Rd_VPR64.8B[8,8] = TMPD2[0,8]; Rd_VPR64.8B[24,8] = TMPD2[8,8]; Rd_VPR64.8B[40,8] = TMPD2[16,8]; Rd_VPR64.8B[56,8] = TMPD2[24,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.403 ZIP1 page C7-2938 line 171432 MATCH x0e003800/mask=xbf20fc00 # CONSTRUCT x4e403800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@0-0@1-2@2-4@3-6:2 swap &=$shuffle@0-1@1-3@2-5@3-7:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip1/2@2 # AUNIT --inst x4e403800/mask=xffe0fc00 --status pass :zip1 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=3 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { TMPQ2 = Rm_VPR128.8H; TMPQ1 = Rn_VPR128.8H; # simd shuffle Rd_VPR128.8H = TMPQ1 (@0-0@1-2@2-4@3-6) lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[0,16]; Rd_VPR128.8H[32,16] = TMPQ1[16,16]; Rd_VPR128.8H[64,16] = TMPQ1[32,16]; Rd_VPR128.8H[96,16] = TMPQ1[48,16]; # simd shuffle Rd_VPR128.8H = TMPQ2 (@0-1@1-3@2-5@3-7) lane size 2 Rd_VPR128.8H[16,16] = TMPQ2[0,16]; Rd_VPR128.8H[48,16] = TMPQ2[16,16]; Rd_VPR128.8H[80,16] = TMPQ2[32,16]; Rd_VPR128.8H[112,16] = TMPQ2[48,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.404 ZIP2 page C7-2940 line 171547 MATCH x0e007800/mask=xbf20fc00 # CONSTRUCT x4e007800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@8-0@9-2@10-4@11-6@12-8@13-10@14-12@15-14:1 swap &=$shuffle@8-1@9-3@10-5@11-7@12-9@13-11@14-13@15-15:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@1 # AUNIT --inst x4e007800/mask=xffe0fc00 --status pass :zip2 Rd_VPR128.16B, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR128.16B & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR128.16B & Rd_VPR128.16B & Zd { TMPQ2 = Rm_VPR128.16B; TMPQ1 = Rn_VPR128.16B; # simd shuffle Rd_VPR128.16B = TMPQ1 (@8-0@9-2@10-4@11-6@12-8@13-10@14-12@15-14) lane size 1 Rd_VPR128.16B[0,8] = TMPQ1[64,8]; Rd_VPR128.16B[16,8] = TMPQ1[72,8]; Rd_VPR128.16B[32,8] = TMPQ1[80,8]; Rd_VPR128.16B[48,8] = TMPQ1[88,8]; Rd_VPR128.16B[64,8] = TMPQ1[96,8]; Rd_VPR128.16B[80,8] = TMPQ1[104,8]; Rd_VPR128.16B[96,8] = TMPQ1[112,8]; Rd_VPR128.16B[112,8] = TMPQ1[120,8]; # simd shuffle Rd_VPR128.16B = TMPQ2 (@8-1@9-3@10-5@11-7@12-9@13-11@14-13@15-15) lane size 1 Rd_VPR128.16B[8,8] = TMPQ2[64,8]; Rd_VPR128.16B[24,8] = TMPQ2[72,8]; Rd_VPR128.16B[40,8] = TMPQ2[80,8]; Rd_VPR128.16B[56,8] = TMPQ2[88,8]; Rd_VPR128.16B[72,8] = TMPQ2[96,8]; Rd_VPR128.16B[88,8] = TMPQ2[104,8]; Rd_VPR128.16B[104,8] = TMPQ2[112,8]; Rd_VPR128.16B[120,8] = TMPQ2[120,8]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.404 ZIP2 page C7-2940 line 171547 MATCH x0e007800/mask=xbf20fc00 # CONSTRUCT x4ec07800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0:8 swap &=$shuffle@1-1:8 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@8 # AUNIT --inst x4ec07800/mask=xffe0fc00 --status pass :zip2 Rd_VPR128.2D, Rn_VPR128.2D, Rm_VPR128.2D is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=3 & b_2121=0 & Rm_VPR128.2D & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR128.2D & Rd_VPR128.2D & Zd { TMPQ2 = Rm_VPR128.2D; TMPQ1 = Rn_VPR128.2D; # simd shuffle Rd_VPR128.2D = TMPQ1 (@1-0) lane size 8 Rd_VPR128.2D[0,64] = TMPQ1[64,64]; # simd shuffle Rd_VPR128.2D = TMPQ2 (@1-1) lane size 8 Rd_VPR128.2D[64,64] = TMPQ2[64,64]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.404 ZIP2 page C7-2940 line 171547 MATCH x0e007800/mask=xbf20fc00 # CONSTRUCT x0e807800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@1-0:4 swap &=$shuffle@1-1:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@4 # AUNIT --inst x0e807800/mask=xffe0fc00 --status pass :zip2 Rd_VPR64.2S, Rn_VPR64.2S, Rm_VPR64.2S is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR64.2S & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR64.2S & Rd_VPR64.2S & Zd { TMPD2 = Rm_VPR64.2S; TMPD1 = Rn_VPR64.2S; # simd shuffle Rd_VPR64.2S = TMPD1 (@1-0) lane size 4 Rd_VPR64.2S[0,32] = TMPD1[32,32]; # simd shuffle Rd_VPR64.2S = TMPD2 (@1-1) lane size 4 Rd_VPR64.2S[32,32] = TMPD2[32,32]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.404 ZIP2 page C7-2940 line 171547 MATCH x0e007800/mask=xbf20fc00 # CONSTRUCT x0e407800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@2-0@3-2:2 swap &=$shuffle@2-1@3-3:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@2 # AUNIT --inst x0e407800/mask=xffe0fc00 --status pass :zip2 Rd_VPR64.4H, Rn_VPR64.4H, Rm_VPR64.4H is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR64.4H & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR64.4H & Rd_VPR64.4H & Zd { TMPD2 = Rm_VPR64.4H; TMPD1 = Rn_VPR64.4H; # simd shuffle Rd_VPR64.4H = TMPD1 (@2-0@3-2) lane size 2 Rd_VPR64.4H[0,16] = TMPD1[32,16]; Rd_VPR64.4H[32,16] = TMPD1[48,16]; # simd shuffle Rd_VPR64.4H = TMPD2 (@2-1@3-3) lane size 2 Rd_VPR64.4H[16,16] = TMPD2[32,16]; Rd_VPR64.4H[48,16] = TMPD2[48,16]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.404 ZIP2 page C7-2940 line 171547 MATCH x0e007800/mask=xbf20fc00 # CONSTRUCT x4e807800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@2-0@3-2:4 swap &=$shuffle@2-1@3-3:4 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@4 # AUNIT --inst x4e807800/mask=xffe0fc00 --status pass :zip2 Rd_VPR128.4S, Rn_VPR128.4S, Rm_VPR128.4S is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=2 & b_2121=0 & Rm_VPR128.4S & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR128.4S & Rd_VPR128.4S & Zd { TMPQ2 = Rm_VPR128.4S; TMPQ1 = Rn_VPR128.4S; # simd shuffle Rd_VPR128.4S = TMPQ1 (@2-0@3-2) lane size 4 Rd_VPR128.4S[0,32] = TMPQ1[64,32]; Rd_VPR128.4S[64,32] = TMPQ1[96,32]; # simd shuffle Rd_VPR128.4S = TMPQ2 (@2-1@3-3) lane size 4 Rd_VPR128.4S[32,32] = TMPQ2[64,32]; Rd_VPR128.4S[96,32] = TMPQ2[96,32]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.404 ZIP2 page C7-2940 line 171547 MATCH x0e007800/mask=xbf20fc00 # CONSTRUCT x0e007800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@4-0@5-2@6-4@7-6:1 swap &=$shuffle@4-1@5-3@6-5@7-7:1 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@1 # AUNIT --inst x0e007800/mask=xffe0fc00 --status pass :zip2 Rd_VPR64.8B, Rn_VPR64.8B, Rm_VPR64.8B is b_3131=0 & q=0 & b_2429=0xe & advSIMD3.size=0 & b_2121=0 & Rm_VPR64.8B & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR64.8B & Rd_VPR64.8B & Zd { TMPD2 = Rm_VPR64.8B; TMPD1 = Rn_VPR64.8B; # simd shuffle Rd_VPR64.8B = TMPD1 (@4-0@5-2@6-4@7-6) lane size 1 Rd_VPR64.8B[0,8] = TMPD1[32,8]; Rd_VPR64.8B[16,8] = TMPD1[40,8]; Rd_VPR64.8B[32,8] = TMPD1[48,8]; Rd_VPR64.8B[48,8] = TMPD1[56,8]; # simd shuffle Rd_VPR64.8B = TMPD2 (@4-1@5-3@6-5@7-7) lane size 1 Rd_VPR64.8B[8,8] = TMPD2[32,8]; Rd_VPR64.8B[24,8] = TMPD2[40,8]; Rd_VPR64.8B[40,8] = TMPD2[48,8]; Rd_VPR64.8B[56,8] = TMPD2[56,8]; zext_zd(Zd); # zero upper 24 bytes of Zd } # C7.2.404 ZIP2 page C7-2940 line 171547 MATCH x0e007800/mask=xbf20fc00 # CONSTRUCT x4e407800/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO ARG3 =var ARG1 ARG2 =var &=$shuffle@4-0@5-2@6-4@7-6:2 swap &=$shuffle@4-1@5-3@6-5@7-7:2 # SMACRO(pseudo) ARG1 ARG2 ARG3 =NEON_zip2/2@2 # AUNIT --inst x4e407800/mask=xffe0fc00 --status pass :zip2 Rd_VPR128.8H, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0 & q=1 & b_2429=0xe & advSIMD3.size=1 & b_2121=0 & Rm_VPR128.8H & b_1515=0 & b_1214=7 & b_1011=2 & Rn_VPR128.8H & Rd_VPR128.8H & Zd { TMPQ2 = Rm_VPR128.8H; TMPQ1 = Rn_VPR128.8H; # simd shuffle Rd_VPR128.8H = TMPQ1 (@4-0@5-2@6-4@7-6) lane size 2 Rd_VPR128.8H[0,16] = TMPQ1[64,16]; Rd_VPR128.8H[32,16] = TMPQ1[80,16]; Rd_VPR128.8H[64,16] = TMPQ1[96,16]; Rd_VPR128.8H[96,16] = TMPQ1[112,16]; # simd shuffle Rd_VPR128.8H = TMPQ2 (@4-1@5-3@6-5@7-7) lane size 2 Rd_VPR128.8H[16,16] = TMPQ2[64,16]; Rd_VPR128.8H[48,16] = TMPQ2[80,16]; Rd_VPR128.8H[80,16] = TMPQ2[96,16]; Rd_VPR128.8H[112,16] = TMPQ2[112,16]; zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.13 BFCVT page C7-2037 line 118954 MATCH x1e634000/mask=xfffffc00 # C7.2.69 FCVT page C7-2172 line 126762 MATCH x1e224000/mask=xff3e7c00 # CONSTRUCT x1e634000/mask=xfffffc00 MATCHED 2 DOCUMENTED OPCODES # x1e634000/mask=xfffffc00 NOT MATCHED BY ANY CONSTRUCTOR # SMACRO ARG1 ARG2 =float2float/1 # SMACRO(pseudo) ARG1 ARG2 =NEON_bfcvt/1 # b_0031=0001111001100011010000.......... :bfcvt Rd_FPR16, Rn_FPR32 is b_1031=0b0001111001100011010000 & Rd_FPR16 & Rn_FPR32 & Zd { Rd_FPR16 = float2float(Rn_FPR32); zext_zh(Zd); # zero upper 30 bytes of Zd } # C7.2.14 BFCVTN, BFCVTN2 page C7-2038 line 119011 MATCH x0ea16800/mask=xbffffc00 # CONSTRUCT x0ea16800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # x0ea16800/mask=xbffffc00 NOT MATCHED BY ANY CONSTRUCTOR # SMACRO ARG1 ARG2 =var =$float2float@4:8 # SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn/2@4 # b_0031=0.00111010100001011010.......... :bfcvtn Rd_VPR128.4S, Rn_VPR128.4H is b_3131=0b0 & Q=0 & b_1029=0b00111010100001011010 & Rn_VPR128.4H & Rd_VPR128.4S & Zd { TMPQ1 = Rn_VPR128.4H; # simd resize Rd_VPR128.4S = float2float(TMPQ1) (lane size 4 to 4) Rd_VPR128.4S[0,32] = float2float(TMPQ1[0,32]); Rd_VPR128.4S[32,32] = float2float(TMPQ1[32,32]); Rd_VPR128.4S[64,32] = float2float(TMPQ1[64,32]); Rd_VPR128.4S[96,32] = float2float(TMPQ1[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.14 BFCVTN, BFCVTN2 page C7-2038 line 119011 MATCH x0ea16800/mask=xbffffc00 # CONSTRUCT x4ea16800/mask=xfffffc00 MATCHED 1 DOCUMENTED OPCODES # x0ea16800/mask=xbffffc00 NOT MATCHED BY ANY CONSTRUCTOR # SMACRO ARG1 ARG2 =var =$float2float@4:8 # SMACRO(pseudo) ARG1 ARG2 &=NEON_fcvtn/2@4 :bfcvtn2 Rd_VPR128.4S, Rn_VPR128.8H is b_3131=0b0 & Q=1 & b_1029=0b00111010100001011010 & Rn_VPR128.8H & Rd_VPR128.4S & Zd { TMPQ1 = Rn_VPR128.8H; # simd resize Rd_VPR128.4S = float2float(TMPQ1) (lane size 4 to 4) Rd_VPR128.4S[0,32] = float2float(TMPQ1[0,32]); Rd_VPR128.4S[32,32] = float2float(TMPQ1[32,32]); Rd_VPR128.4S[64,32] = float2float(TMPQ1[64,32]); Rd_VPR128.4S[96,32] = float2float(TMPQ1[96,32]); zext_zq(Zd); # zero upper 16 bytes of Zd } # C7.2.15 BFDOT (by element) page C7-2039 line 119080 MATCH x0f40f000/mask=xbfc0f400 # CONSTRUCT x0f40f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # x0f40f000/mask=xbfc0f400 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=0.00111101......1111.0.......... :bfdot Rd_VPR128.2S, Rn_VPR128.4H, , Re_VPR128.H.vIndexHL is b_3131=0b0 & Q=0 & b_2229=0b00111101 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.4H & Rd_VPR128.2S { Rd_VPR128.2S = NEON_bfdot(Rn_VPR128.4H, Re_VPR128.H.vIndexHL); } # C7.2.15 BFDOT (by element) page C7-2039 line 119080 MATCH x0f40f000/mask=xbfc0f400 # CONSTRUCT x4f40f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES :bfdot Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128.H.vIndexHL is b_3131=0b0 & Q=1 & b_2229=0b00111101 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8H & Rd_VPR128.4S { Rd_VPR128.4S = NEON_bfdot(Rn_VPR128.8H, Re_VPR128.H.vIndexHL); } # C7.2.16 BFDOT (vector) page C7-2041 line 119201 MATCH x2e40fc00/mask=xbfe0fc00 # CONSTRUCT x2e40fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # x2e40fc00/mask=xbfe0fc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=0.101110010.....111111.......... :bfdot Rd_VPR128.2S, Rn_VPR128.4H, Rm_VPR128.4H is b_3131=0b0 & Q=0 & b_2129=0b101110010 & Rm_VPR128.4H & b_1015=0b111111 & Rn_VPR128.4H & Rd_VPR128.2S { Rd_VPR128.2S = NEON_bfdot(Rn_VPR128.4H, Rm_VPR128.4H); } # C7.2.16 BFDOT (vector) page C7-2041 line 119201 MATCH x2e40fc00/mask=xbfe0fc00 # CONSTRUCT x6e40fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES :bfdot Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0b0 & Q=1 & b_2129=0b101110010 & Rm_VPR128.8H & b_1015=0b111111 & Rn_VPR128.8H & Rd_VPR128.4S { Rd_VPR128.4S = NEON_bfdot(Rn_VPR128.8H, Rm_VPR128.8H); } # C7.2.17 BFMLALB, BFMLALT (by element) page C7-2043 line 119316 MATCH x0fc0f000/mask=xbfc0f400 # CONSTRUCT x0fc0f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # x0fc0f000/mask=xbfc0f400 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=0.00111111......1111.0.......... :bfmlalb Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0b0 & Q=0 & b_2229=0b00111111 & Re_VPR128Lo.H.vIndexHLM & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8H & Rd_VPR128.4S { Rd_VPR128.4S = NEON_bfmlalb(Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM); } # C7.2.17 BFMLALB, BFMLALT (by element) page C7-2043 line 119316 MATCH x0fc0f000/mask=xbfc0f400 # CONSTRUCT x4fc0f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES :bfmlalt Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM is b_3131=0b0 & Q=1 & b_2229=0b00111111 & Re_VPR128Lo.H.vIndexHLM & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8H & Rd_VPR128.4S { Rd_VPR128.4S = NEON_bfmlalt(Rd_VPR128.4S, Rn_VPR128.8H, Re_VPR128Lo.H.vIndexHLM); } # C7.2.18 BFMLALB, BFMLALT (vector) page C7-2045 line 119401 MATCH x2ec0fc00/mask=xbfe0fc00 # CONSTRUCT x2ec0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # x2ec0fc00/mask=xbfe0fc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=0.101110110.....111111.......... :bfmlalb Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0b0 & Q=0 & b_2129=0b101110110 & Rm_VPR128.8H & b_1015=0b111111 & Rn_VPR128.8H & Rd_VPR128.4S { Rd_VPR128.4S = NEON_bfmlalb(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H); } # C7.2.18 BFMLALB, BFMLALT (vector) page C7-2045 line 119401 MATCH x2ec0fc00/mask=xbfe0fc00 # CONSTRUCT x6ec0fc00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES :bfmlalt Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_3131=0b0 & Q=1 & b_2129=0b101110110 & Rm_VPR128.8H & b_1015=0b111111 & Rn_VPR128.8H & Rd_VPR128.4S { Rd_VPR128.4S = NEON_bfmlalt(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H); } # C7.2.19 BFMMLA page C7-2046 line 119472 MATCH x6e40ec00/mask=xffe0fc00 # CONSTRUCT x6e40ec00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # x6e40ec00/mask=xffe0fc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=01101110010.....111011.......... :bfmmla Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H is b_2131=0b01101110010 & Rm_VPR128.8H & b_1015=0b111011 & Rn_VPR128.8H & Rd_VPR128.4S { Rd_VPR128.4S = NEON_bfmmla(Rd_VPR128.4S, Rn_VPR128.8H, Rm_VPR128.8H); } # C7.2.147 FRINT32X (vector) page C7-2353 line 137678 MATCH x2e21e800/mask=xbfbffc00 # CONSTRUCT x2e21e800/mask=xbfbffc00 MATCHED 1 DOCUMENTED OPCODES # x2e21e800/mask=xbfbffc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=0.1011100.100001111010.......... :frint32x Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0b0 & Q=0 & b_2329=0b1011100 & b_22=0 & b_1021=0b100001111010 & Rn_VPR64.2S & Rd_VPR64.2S { Rd_VPR64.2S[0,32] = trunc(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[32,32] = trunc(Rn_VPR64.2S[32,32]); } :frint32x Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0b0 & Q=1 & b_2329=0b1011100 & b_22=0 & b_1021=0b100001111010 & Rn_VPR128.4S & Rd_VPR128.4S { Rd_VPR128.4S[0,32] = trunc(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[32,32] = trunc(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[64,32] = trunc(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[96,32] = trunc(Rn_VPR128.4S[96,32]); } :frint32x Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0b0 & Q=1 & b_2329=0b1011100 & b_22=1 & b_1021=0b100001111010 & Rn_VPR128.2D & Rd_VPR128.2D { local result:4 = trunc(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[0,64] = zext(result); result = trunc(Rn_VPR128.2D[64,64]); Rd_VPR128.2D[64,64] = zext(result); } # C7.2.148 FRINT32X (scalar) page C7-2355 line 137767 MATCH x1e28c000/mask=xffbffc00 # CONSTRUCT x1e28c000/mask=xffbffc00 MATCHED 1 DOCUMENTED OPCODES # x1e28c000/mask=xffbffc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=000111100.101000110000.......... :frint32x Rd_FPR32, Rn_FPR32 is b_2331=0b000111100 & b_22=0 & b_1021=0b101000110000 & Rn_FPR32 & Rd_FPR32 { Rd_FPR32 = trunc(Rn_FPR32); } :frint32x Rd_FPR64, Rn_FPR64 is b_2331=0b000111100 & b_22=1 & b_1021=0b101000110000 & Rn_FPR64 & Rd_FPR64 { local result:4 = trunc(Rn_FPR64); Rd_FPR64 = zext(result); } # C7.2.149 FRINT32Z (vector) page C7-2357 line 137862 MATCH x0e21e800/mask=xbfbffc00 # CONSTRUCT x0e21e800/mask=xbfbffc00 MATCHED 1 DOCUMENTED OPCODES # x0e21e800/mask=xbfbffc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=0.0011100.100001111010.......... :frint32z Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0b0 & Q=0 & b_2329=0b0011100 & b_22=0 & b_1021=0b100001111010 & Rn_VPR64.2S & Rd_VPR64.2S { Rd_VPR64.2S[0,32] = trunc(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[32,32] = trunc(Rn_VPR64.2S[32,32]); } :frint32z Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0b0 & Q=1 & b_2329=0b0011100 & b_22=0 & b_1021=0b100001111010 & Rn_VPR128.4S & Rd_VPR128.4S { Rd_VPR128.4S[0,32] = trunc(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[32,32] = trunc(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[64,32] = trunc(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[96,32] = trunc(Rn_VPR128.4S[96,32]); } :frint32z Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0b0 & Q=1 & b_2329=0b0011100 & b_22=1 & b_1021=0b100001111010 & Rn_VPR128.2D & Rd_VPR128.2D { local result:4 = trunc(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[0,64] = zext(result); result = trunc(Rn_VPR128.2D[64,64]); Rd_VPR128.2D[64,64] = zext(result); } # C7.2.150 FRINT32Z (scalar) page C7-2359 line 137950 MATCH x1e284000/mask=xffbffc00 # CONSTRUCT x1e284000/mask=xffbffc00 MATCHED 1 DOCUMENTED OPCODES # x1e284000/mask=xffbffc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=000111100.101000010000.......... :frint32z Rd_FPR32, Rn_FPR32 is b_2331=0b000111100 & b_22=0 & b_1021=0b101000010000 & Rn_FPR32 & Rd_FPR32 { Rd_FPR32 = trunc(Rn_FPR32); } :frint32z Rd_FPR64, Rn_FPR64 is b_2331=0b000111100 & b_22=1 & b_1021=0b101000010000 & Rn_FPR64 & Rd_FPR64 { local result:4 = trunc(Rn_FPR64); Rd_FPR64 = zext(result); } # C7.2.151 FRINT64X (vector) page C7-2361 line 138043 MATCH x2e21f800/mask=xbfbffc00 # CONSTRUCT x2e21f800/mask=xbfbffc00 MATCHED 1 DOCUMENTED OPCODES # x2e21f800/mask=xbfbffc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=0.1011100.100001111110.......... :frint64x Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0b0 & Q=0 & b_2329=0b1011100 & b_22=0 & b_1021=0b100001111110 & Rn_VPR64.2S & Rd_VPR64.2S { local result:8 = trunc(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[0,32] = result[0,32]; result = trunc(Rn_VPR64.2S[32,32]); Rd_VPR64.2S[32,32] = result[0,32]; } :frint64x Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0b0 & Q=1 & b_2329=0b1011100 & b_22=0 & b_1021=0b100001111110 & Rn_VPR128.4S & Rd_VPR128.4S { local result:8 = trunc(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[0,32] = result[0,32]; result = trunc(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[32,32] = result[0,32]; result = trunc(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[64,32] = result[0,32]; result = trunc(Rn_VPR128.4S[96,32]); Rd_VPR128.4S[96,32] = result[0,32]; } :frint64x Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0b0 & Q=1 & b_2329=0b1011100 & b_22=1 & b_1021=0b100001111110 & Rn_VPR128.2D & Rd_VPR128.2D { Rd_VPR128.2D[0,64] = trunc(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[64,64] = trunc(Rn_VPR128.2D[64,64]); } # C7.2.152 FRINT64X (scalar) page C7-2363 line 138132 MATCH x1e29c000/mask=xffbffc00 # CONSTRUCT x1e29c000/mask=xffbffc00 MATCHED 1 DOCUMENTED OPCODES # x1e29c000/mask=xffbffc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=000111100.101001110000.......... :frint64x Rd_FPR32, Rn_FPR32 is b_2331=0b000111100 & b_22=0 & b_1021=0b101001110000 & Rn_FPR32 & Rd_FPR32 { local result:8 = trunc(Rn_FPR32); Rd_FPR32 = result[0,32]; } :frint64x Rd_FPR64, Rn_FPR64 is b_2331=0b000111100 & b_22=1 & b_1021=0b101001110000 & Rn_FPR64 & Rd_FPR64 { Rd_FPR64 = trunc(Rn_FPR64); } # C7.2.153 FRINT64Z (vector) page C7-2365 line 138227 MATCH x0e21f800/mask=xbfbffc00 # CONSTRUCT x0e21f800/mask=xbfbffc00 MATCHED 1 DOCUMENTED OPCODES # x0e21f800/mask=xbfbffc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=0.0011100.100001111110.......... :frint64z Rd_VPR64.2S, Rn_VPR64.2S is b_3131=0b0 & Q=0 & b_2329=0b0011100 & b_22=0 & b_1021=0b100001111110 & Rn_VPR64.2S & Rd_VPR64.2S { local result:8 = trunc(Rn_VPR64.2S[0,32]); Rd_VPR64.2S[0,32] = result[0,32]; result = trunc(Rn_VPR64.2S[32,32]); Rd_VPR64.2S[32,32] = result[0,32]; } :frint64z Rd_VPR128.4S, Rn_VPR128.4S is b_3131=0b0 & Q=1 & b_2329=0b0011100 & b_22=0 & b_1021=0b100001111110 & Rn_VPR128.4S & Rd_VPR128.4S { local result:8 = trunc(Rn_VPR128.4S[0,32]); Rd_VPR128.4S[0,32] = result[0,32]; result = trunc(Rn_VPR128.4S[32,32]); Rd_VPR128.4S[32,32] = result[0,32]; result = trunc(Rn_VPR128.4S[64,32]); Rd_VPR128.4S[64,32] = result[0,32]; result = trunc(Rn_VPR128.4S[96,32]); Rd_VPR128.4S[96,32] = result[0,32]; } :frint64z Rd_VPR128.2D, Rn_VPR128.2D is b_3131=0b0 & Q=1 & b_2329=0b0011100 & b_22=1 & b_1021=0b100001111110 & Rn_VPR128.2D & Rd_VPR128.2D { Rd_VPR128.2D[0,64] = trunc(Rn_VPR128.2D[0,64]); Rd_VPR128.2D[64,64] = trunc(Rn_VPR128.2D[64,64]); } # C7.2.154 FRINT64Z (scalar) page C7-2367 line 138315 MATCH x1e294000/mask=xffbffc00 # CONSTRUCT x1e294000/mask=xffbffc00 MATCHED 1 DOCUMENTED OPCODES # x1e294000/mask=xffbffc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=000111100.101001010000.......... :frint64z Rd_FPR32, Rn_FPR32 is b_2331=0b000111100 & b_22=0 & b_1021=0b101001010000 & Rn_FPR32 & Rd_FPR32 { local result:8 = trunc(Rn_FPR32); Rd_FPR32 = result[0,32]; } :frint64z Rd_FPR64, Rn_FPR64 is b_2331=0b000111100 & b_22=1 & b_1021=0b101001010000 & Rn_FPR64 & Rd_FPR64 { Rd_FPR64 = trunc(Rn_FPR64); } # C7.2.278 SMMLA (vector) page C7-2634 line 153703 MATCH x4e80a400/mask=xffe0fc00 # CONSTRUCT x4e80a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # x4e80a400/mask=xffe0fc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=01001110100.....101001.......... :smmla Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B is b_2131=0b01001110100 & Rm_VPR128.16B & b_1015=0b101001 & Rn_VPR128.16B & Rd_VPR128.4S { Rd_VPR128.4S = NEON_smmla(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B); } # C7.2.336 SUDOT (by element) page C7-2795 line 163341 MATCH x0f00f000/mask=xbfc0f400 # CONSTRUCT x0f00f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # x0f00f000/mask=xbfc0f400 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=0.00111100......1111.0.......... :sudot Rd_VPR128.2S, Rn_VPR128.8B, Re_VPR128.H.vIndexHL is b_3131=0b0 & Q=0 & b_2229=0b00111100 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8B & Rd_VPR128.2S { Rd_VPR128.2S = NEON_sudot(Rd_VPR128.2S, Rn_VPR128.8B, Re_VPR128.H.vIndexHL); } # C7.2.336 SUDOT (by element) page C7-2795 line 163341 MATCH x0f00f000/mask=xbfc0f400 # CONSTRUCT x4f00f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES :sudot Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.H.vIndexHL is b_3131=0b0 & Q=1 & b_2229=0b00111100 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.16B & Rd_VPR128.4S { Rd_VPR128.4S = NEON_sudot(Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.H.vIndexHL); } # C7.2.370 UMMLA (vector) page C7-2867 line 167357 MATCH x6e80a400/mask=xffe0fc00 # CONSTRUCT x6e80a400/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # x6e80a400/mask=xffe0fc00 NOT MATCHED BY ANY CONSTRUCTOR # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_ummla/3@1 # b_0031=01101110100.....101001.......... :ummla Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B is b_2131=0b01101110100 & Rm_VPR128.16B & b_1015=0b101001 & Rn_VPR128.16B & Rd_VPR128.4S { Rd_VPR128.4S = NEON_ummla(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } # C7.2.388 USDOT (vector) page C7-2907 line 169709 MATCH x0e809c00/mask=xbfe0fc00 # CONSTRUCT x0e809c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # x0e809c00/mask=xbfe0fc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=0.001110100.....100111.......... :usdot Rd_VPR128.2S, Rn_VPR128.8B, Rm_VPR128.8B is b_3131=0b0 & Q=0 & b_2129=0b001110100 & Rm_VPR128.8B & b_1015=0b100111 & Rn_VPR128.8B & Rd_VPR128.2S { Rd_VPR128.2S = NEON_usdot(Rd_VPR128.2S, Rn_VPR128.8B, Rm_VPR128.8B); } # C7.2.388 USDOT (vector) page C7-2907 line 169709 MATCH x0e809c00/mask=xbfe0fc00 # CONSTRUCT x4e809c00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES :usdot Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B is b_3131=0b0 & Q=1 & b_2129=0b001110100 & Rn_VPR128.16B & b_1015=0b100111 & Rm_VPR128.16B & Rd_VPR128.4S { Rd_VPR128.4S = NEON_usdot(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B); } # C7.2.389 USDOT (by element) page C7-2909 line 169795 MATCH x0f80f000/mask=xbfc0f400 # CONSTRUCT x0f80f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES # x0f80f000/mask=xbfc0f400 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=0.00111110......1111.0.......... :usdot Rd_VPR128.2S, Rn_VPR128.8B, Re_VPR128.H.vIndexHL is b_3131=0b0 & Q=0 & b_2229=0b00111110 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.8B & Rd_VPR128.2S { Rd_VPR128.2S = NEON_usdot(Rd_VPR128.2S, Rn_VPR128.8B, Re_VPR128.H.vIndexHL); } # C7.2.389 USDOT (by element) page C7-2909 line 169795 MATCH x0f80f000/mask=xbfc0f400 # CONSTRUCT x4f80f000/mask=xffc0f400 MATCHED 1 DOCUMENTED OPCODES :usdot Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.H.vIndexHL is b_3131=0b0 & Q=1 & b_2229=0b00111110 & Re_VPR128.H.vIndexHL & b_1215=0b1111 & b_1010=0b0 & Rn_VPR128.16B & Rd_VPR128.4S { Rd_VPR128.4S = NEON_usdot(Rd_VPR128.4S, Rn_VPR128.16B, Re_VPR128.H.vIndexHL); } # C7.2.393 USMMLA (vector) page C7-2919 line 170338 MATCH x4e80ac00/mask=xffe0fc00 # CONSTRUCT x4e80ac00/mask=xffe0fc00 MATCHED 1 DOCUMENTED OPCODES # SMACRO(pseudo) ARG1 ARG2 ARG3 &=NEON_usmmla/3@1 # x4e80ac00/mask=xffe0fc00 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=01001110100.....101011.......... :usmmla Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B is b_2131=0b01001110100 & Rm_VPR128.16B & b_1015=0b101011 & Rn_VPR128.16B & Rd_VPR128.4S { Rd_VPR128.4S = NEON_usmmla(Rd_VPR128.4S, Rn_VPR128.16B, Rm_VPR128.16B, 1:1); } ================================================ FILE: pypcode/processors/AARCH64/data/languages/AARCH64sve.sinc ================================================ # INFO This file automatically generated by andre on Mon Apr 30 14:51:39 2018 # INFO Direct edits to this file may be lost in future updates # INFO Command line arguments: ['../../../ProcessorTest/test/andre/scrape/sveit.py', '--sinc'] # abs_z_p_z.xml: ABS variant SVE # PATTERN x0416a000/mask=xff3fe000 :abs Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_abs(Zd.T, Pg3_m, Zn.T); } # add_z_p_zz.xml: ADD (vectors, predicated) variant SVE # PATTERN x04000000/mask=xff3fe000 :add Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_add(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # add_z_zi.xml: ADD (immediate) variant SVE # PATTERN x2520c000/mask=xff3fc000 :add Zd.T, Zd.T_2, sve_shf8_1_0to255 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 { Zd.T = SVE_add(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); } # add_z_zz.xml: ADD (vectors, unpredicated) variant SVE # PATTERN x04200000/mask=xff20fc00 :add Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b00 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_add(Zd.T, Zn.T, Zm.T); } # addpl_r_ri.xml: ADDPL variant SVE # PATTERN x04605000/mask=xffe0f800 :addpl Rd_GPR64xsp, Rm_GPR64xsp, "#"^sve_imm6_1_m32to31 is sve_b_2331=0b000001000 & sve_b_22=1 & sve_b_21=1 & sve_rn_1620 & sve_b_1115=0b01010 & sve_imm6_0510 & sve_rd_0004 & Rd_GPR64xsp & Rm_GPR64xsp & sve_imm6_1_m32to31 { Rd_GPR64xsp = SVE_addpl(Rd_GPR64xsp, Rm_GPR64xsp, sve_imm6_1_m32to31:1); } # addvl_r_ri.xml: ADDVL variant SVE # PATTERN x04205000/mask=xffe0f800 :addvl Rd_GPR64xsp, Rm_GPR64xsp, "#"^sve_imm6_1_m32to31 is sve_b_2331=0b000001000 & sve_b_22=0 & sve_b_21=1 & sve_rn_1620 & sve_b_1115=0b01010 & sve_imm6_0510 & sve_rd_0004 & Rd_GPR64xsp & Rm_GPR64xsp & sve_imm6_1_m32to31 { Rd_GPR64xsp = SVE_addvl(Rd_GPR64xsp, Rm_GPR64xsp, sve_imm6_1_m32to31:1); } # adr_z_az.xml: ADR variant Packed offsets # PATTERN x04a0a000/mask=xffa0f000 :adr Zd.T_sz, [Zn.T_sz, Zm.T_sz^sve_mod_amount] is sve_b_2431=0b00000100 & sve_b_23=1 & sve_sz_22 & sve_b_21=1 & sve_zm_1620 & sve_b_1215=0b1010 & sve_msz_1011 & sve_zn_0509 & sve_zd_0004 & sve_mod_amount & Zm.T_sz & Zd.T_sz & Zn.T_sz { Zd.T_sz = SVE_adr(Zd.T_sz, Zn.T_sz, Zm.T_sz, sve_mod_amount:1); } # adr_z_az.xml: ADR variant Unpacked 32-bit signed offsets # PATTERN x0420a000/mask=xffe0f000 :adr Zd.D, [Zn.D, Zm.D, "sxtw"^sve_msz_1011] is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_zm_1620 & sve_b_1215=0b1010 & sve_msz_1011 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm.D { Zd.D = SVE_adr(Zd.D, Zn.D, Zm.D, sve_msz_1011:1); } # adr_z_az.xml: ADR variant Unpacked 32-bit unsigned offsets # PATTERN x0460a000/mask=xffe0f000 :adr Zd.D, [Zn.D, Zm.D, "uxtw"^sve_msz_1011] is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_zm_1620 & sve_b_1215=0b1010 & sve_msz_1011 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm.D { Zd.D = SVE_adr(Zd.D, Zn.D, Zm.D, sve_msz_1011:1); } # and_p_p_pp.xml: AND, ANDS (predicates) variant Flag setting # PATTERN x25404000/mask=xfff0c210 :ands Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_s_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_ands(Pd.B, Pg_z, Pn.B, Pm.B); } # and_p_p_pp.xml: AND, ANDS (predicates) variant Not flag setting # PATTERN x25004000/mask=xfff0c210 :and Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_s_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_and(Pd.B, Pg_z, Pn.B, Pm.B); } # and_z_p_zz.xml: AND (vectors, predicated) variant SVE # PATTERN x041a0000/mask=xff3fe000 :and Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_and(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # and_z_zi.xml: AND (immediate) variant SVE # PATTERN x05800000/mask=xfffc0000 :and Zd.T_imm13, Zd.T_imm13_2, "#"^sve_decode_bit_mask is sve_b_2431=0b00000101 & sve_b_23=1 & sve_b_22=0 & sve_b_1821=0b0000 & sve_imm13_0517 & sve_zdn_0004 & sve_decode_bit_mask & Zd.T_imm13 & Zd.T_imm13_2 { Zd.T_imm13 = SVE_and(Zd.T_imm13, Zd.T_imm13_2, sve_decode_bit_mask:1); } # and_z_zz.xml: AND (vectors, unpredicated) variant SVE # PATTERN x04203000/mask=xffe0fc00 :and Zd.D, Zn.D, Zm.D is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_zm_1620 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm.D { Zd.D = SVE_and(Zd.D, Zn.D, Zm.D); } # andv_r_p_z.xml: ANDV variant SVE # PATTERN x041a2000/mask=xff3fe000 :andv Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_andv(Rd_FPR8, Pg3, Zn.T); } # andv_r_p_z.xml: ANDV variant SVE # PATTERN x041a2000/mask=xff3fe000 :andv Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_andv(Rd_FPR32, Pg3, Zn.T); } # andv_r_p_z.xml: ANDV variant SVE # PATTERN x041a2000/mask=xff3fe000 :andv Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_andv(Rd_FPR16, Pg3, Zn.T); } # andv_r_p_z.xml: ANDV variant SVE # PATTERN x041a2000/mask=xff3fe000 :andv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_andv(Rd_FPR64, Pg3, Zn.T); } # asr_z_p_zi.xml: ASR (immediate, predicated) variant SVE # PATTERN x04008000/mask=xff3fe000 :asr Zd.T_tszh, Pg3_m, Zd.T_tszh_2, "#"^sve_imm_shift is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_tszl_0809 & sve_imm3_0507 & sve_zdn_0004 & sve_imm_shift & Zd.T_tszh & Zd.T_tszh_2 & Pg3_m { Zd.T_tszh = SVE_asr(Zd.T_tszh, Pg3_m, Zd.T_tszh_2, sve_imm_shift:1); } # asr_z_p_zw.xml: ASR (wide elements, predicated) variant SVE # PATTERN x04188000/mask=xff3fe000 :asr Zd.T, Pg3_m, Zd.T_2, Zn.D is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Pg3_m & Zn.D { Zd.T = SVE_asr(Zd.T, Pg3_m, Zd.T_2, Zn.D); } # asr_z_p_zz.xml: ASR (vectors) variant SVE # PATTERN x04108000/mask=xff3fe000 :asr Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_asr(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # asr_z_zi.xml: ASR (immediate, unpredicated) variant SVE # PATTERN x04209000/mask=xff20fc00 :asr Zd.T_tszh, Zn.T_tszh, "#"^sve_imm_shift is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_21=1 & sve_tszl_1920 & sve_imm3_1618 & sve_b_1215=0b1001 & sve_b_11=0 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & sve_imm_shift & Zd.T_tszh & Zn.T_tszh { Zd.T_tszh = SVE_asr(Zd.T_tszh, Zn.T_tszh, sve_imm_shift:1); } # asr_z_zw.xml: ASR (wide elements, unpredicated) variant SVE # PATTERN x04208000/mask=xff20fc00 :asr Zd.T, Zn.T, Zm.D is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1215=0b1000 & sve_b_11=0 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Zm.D { Zd.T = SVE_asr(Zd.T, Zn.T, Zm.D); } # asrd_z_p_zi.xml: ASRD variant SVE # PATTERN x04048000/mask=xff3fe000 :asrd Zd.T_tszh, Pg3_m, Zd.T_tszh_2, "#"^sve_imm_shift is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_tszl_0809 & sve_imm3_0507 & sve_zdn_0004 & sve_imm_shift & Zd.T_tszh & Zd.T_tszh_2 & Pg3_m { Zd.T_tszh = SVE_asrd(Zd.T_tszh, Pg3_m, Zd.T_tszh_2, sve_imm_shift:1); } # asrr_z_p_zz.xml: ASRR variant SVE # PATTERN x04148000/mask=xff3fe000 :asrr Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_asrr(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # bic_and_z_zi.xml: BIC (immediate) variant SVE # ALIASEDBY AND ., ., #(- - 1) if Never # PATTERN x05800000/mask=xfffc0000 # SKIPPING bic_and_z_zi.xml because x05800000/mask=xfffc0000 has already been defined # bic_p_p_pp.xml: BIC, BICS (predicates) variant Flag setting # PATTERN x25404010/mask=xfff0c210 :bics Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_bics(Pd.B, Pg_z, Pn.B, Pm.B); } # bic_p_p_pp.xml: BIC, BICS (predicates) variant Not flag setting # PATTERN x25004010/mask=xfff0c210 :bic Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_bic(Pd.B, Pg_z, Pn.B, Pm.B); } # bic_z_p_zz.xml: BIC (vectors, predicated) variant SVE # PATTERN x041b0000/mask=xff3fe000 :bic Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_bic(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # bic_z_zz.xml: BIC (vectors, unpredicated) variant SVE # PATTERN x04e03000/mask=xffe0fc00 :bic Zd.D, Zn.D, Zm.D is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_zm_1620 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm.D { Zd.D = SVE_bic(Zd.D, Zn.D, Zm.D); } # brka_p_p_p.xml: BRKA, BRKAS variant Flag setting # PATTERN x25504000/mask=xffffc210 :brkas Pd.B, Pg_z, Pn.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1421=0b01000001 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B { Pd.B = SVE_brkas(Pd.B, Pg_z, Pn.B); } # brka_p_p_p.xml: BRKA, BRKAS variant Not flag setting # PATTERN x25104000/mask=xffffc200 :brka Pd.B, Pg_zm, Pn.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_1421=0b01000001 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_m_04 & sve_pd_0003 & Pg_zm & Pd.B & Pn.B { Pd.B = SVE_brka(Pd.B, Pg_zm, Pn.B); } # brkb_p_p_p.xml: BRKB, BRKBS variant Flag setting # PATTERN x25d04000/mask=xffffc210 :brkbs Pd.B, Pg_z, Pn.B is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1421=0b01000001 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B { Pd.B = SVE_brkbs(Pd.B, Pg_z, Pn.B); } # brkb_p_p_p.xml: BRKB, BRKBS variant Not flag setting # PATTERN x25904000/mask=xffffc200 :brkb Pd.B, Pg_zm, Pn.B is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1421=0b01000001 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_m_04 & sve_pd_0003 & Pg_zm & Pd.B & Pn.B { Pd.B = SVE_brkb(Pd.B, Pg_zm, Pn.B); } # brkn_p_p_pp.xml: BRKN, BRKNS variant Flag setting # PATTERN x25584000/mask=xffffc210 :brkns Pd.B, Pg_z, Pn.B, Pd.B_2 is sve_b_2331=0b001001010 & sve_b_22=1 & sve_b_1421=0b01100001 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pdm_0003 & Pd.B & Pd.B_2 & Pg_z & Pn.B { Pd.B = SVE_brkns(Pd.B, Pg_z, Pn.B, Pd.B_2); } # brkn_p_p_pp.xml: BRKN, BRKNS variant Not flag setting # PATTERN x25184000/mask=xffffc210 :brkn Pd.B, Pg_z, Pn.B, Pd.B_2 is sve_b_2331=0b001001010 & sve_b_22=0 & sve_b_1421=0b01100001 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pdm_0003 & Pd.B & Pd.B_2 & Pg_z & Pn.B { Pd.B = SVE_brkn(Pd.B, Pg_z, Pn.B, Pd.B_2); } # brkpa_p_p_pp.xml: BRKPA, BRKPAS variant Flag setting # PATTERN x2540c000/mask=xfff0c210 :brkpas Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b11 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_brkpas(Pd.B, Pg_z, Pn.B, Pm.B); } # brkpa_p_p_pp.xml: BRKPA, BRKPAS variant Not flag setting # PATTERN x2500c000/mask=xfff0c210 :brkpa Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b11 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_brkpa(Pd.B, Pg_z, Pn.B, Pm.B); } # brkpb_p_p_pp.xml: BRKPB, BRKPBS variant Flag setting # PATTERN x2540c010/mask=xfff0c210 :brkpbs Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b11 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_brkpbs(Pd.B, Pg_z, Pn.B, Pm.B); } # brkpb_p_p_pp.xml: BRKPB, BRKPBS variant Not flag setting # PATTERN x2500c010/mask=xfff0c210 :brkpb Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b11 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_brkpb(Pd.B, Pg_z, Pn.B, Pm.B); } # clasta_r_p_z.xml: CLASTA (scalar) variant SVE # PATTERN x0530a000/mask=xff3fe000 :clasta Rd_GPR64, Pg3, Rd_GPR64_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b11000 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zm_0509 & sve_rdn_0004 & Zn.T & Rd_GPR64 & Rd_GPR64_2 & Pg3 { Rd_GPR64 = SVE_clasta(Rd_GPR64, Pg3, Rd_GPR64_2, Zn.T); } # clasta_r_p_z.xml: CLASTA (scalar) variant SVE # PATTERN x0530a000/mask=xff3fe000 :clasta Rd_GPR32, Pg3, Rd_GPR32_2, Zn.T is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1721=0b11000 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zm_0509 & sve_rdn_0004 & Zn.T & Rd_GPR32 & Rd_GPR32_2 & Pg3 { Rd_GPR32 = SVE_clasta(Rd_GPR32, Pg3, Rd_GPR32_2, Zn.T); } # clasta_v_p_z.xml: CLASTA (SIMD&FP scalar) variant SVE # PATTERN x052a8000/mask=xff3fe000 :clasta Rd_FPR8, Pg3, Rd_FPR8_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b00 & sve_b_1721=0b10101 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR8 & Rd_FPR8_2 & Pg3 { Rd_FPR8 = SVE_clasta(Rd_FPR8, Pg3, Rd_FPR8_2, Zn.T); } # clasta_v_p_z.xml: CLASTA (SIMD&FP scalar) variant SVE # PATTERN x052a8000/mask=xff3fe000 :clasta Rd_FPR32, Pg3, Rd_FPR32_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b10 & sve_b_1721=0b10101 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR32 & Rd_FPR32_2 & Pg3 { Rd_FPR32 = SVE_clasta(Rd_FPR32, Pg3, Rd_FPR32_2, Zn.T); } # clasta_v_p_z.xml: CLASTA (SIMD&FP scalar) variant SVE # PATTERN x052a8000/mask=xff3fe000 :clasta Rd_FPR16, Pg3, Rd_FPR16_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b01 & sve_b_1721=0b10101 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR16 & Rd_FPR16_2 & Pg3 { Rd_FPR16 = SVE_clasta(Rd_FPR16, Pg3, Rd_FPR16_2, Zn.T); } # clasta_v_p_z.xml: CLASTA (SIMD&FP scalar) variant SVE # PATTERN x052a8000/mask=xff3fe000 :clasta Rd_FPR64, Pg3, Rd_FPR64_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b10101 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR64 & Rd_FPR64_2 & Pg3 { Rd_FPR64 = SVE_clasta(Rd_FPR64, Pg3, Rd_FPR64_2, Zn.T); } # clasta_z_p_zz.xml: CLASTA (vectors) variant SVE # PATTERN x05288000/mask=xff3fe000 :clasta Zd.T, Pg3, Zd.T_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1721=0b10100 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3 { Zd.T = SVE_clasta(Zd.T, Pg3, Zd.T_2, Zn.T); } # clastb_r_p_z.xml: CLASTB (scalar) variant SVE # PATTERN x0531a000/mask=xff3fe000 :clastb Rd_GPR64, Pg3, Rd_GPR64_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b11000 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zm_0509 & sve_rdn_0004 & Zn.T & Rd_GPR64 & Rd_GPR64_2 & Pg3 { Rd_GPR64 = SVE_clastb(Rd_GPR64, Pg3, Rd_GPR64_2, Zn.T); } # clastb_r_p_z.xml: CLASTB (scalar) variant SVE # PATTERN x0531a000/mask=xff3fe000 :clastb Rd_GPR32, Pg3, Rd_GPR32_2, Zn.T is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1721=0b11000 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zm_0509 & sve_rdn_0004 & Zn.T & Rd_GPR32 & Rd_GPR32_2 & Pg3 { Rd_GPR32 = SVE_clastb(Rd_GPR32, Pg3, Rd_GPR32_2, Zn.T); } # clastb_v_p_z.xml: CLASTB (SIMD&FP scalar) variant SVE # PATTERN x052b8000/mask=xff3fe000 :clastb Rd_FPR8, Pg3, Rd_FPR8_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b00 & sve_b_1721=0b10101 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR8 & Rd_FPR8_2 & Pg3 { Rd_FPR8 = SVE_clastb(Rd_FPR8, Pg3, Rd_FPR8_2, Zn.T); } # clastb_v_p_z.xml: CLASTB (SIMD&FP scalar) variant SVE # PATTERN x052b8000/mask=xff3fe000 :clastb Rd_FPR32, Pg3, Rd_FPR32_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b10 & sve_b_1721=0b10101 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR32 & Rd_FPR32_2 & Pg3 { Rd_FPR32 = SVE_clastb(Rd_FPR32, Pg3, Rd_FPR32_2, Zn.T); } # clastb_v_p_z.xml: CLASTB (SIMD&FP scalar) variant SVE # PATTERN x052b8000/mask=xff3fe000 :clastb Rd_FPR16, Pg3, Rd_FPR16_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b01 & sve_b_1721=0b10101 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR16 & Rd_FPR16_2 & Pg3 { Rd_FPR16 = SVE_clastb(Rd_FPR16, Pg3, Rd_FPR16_2, Zn.T); } # clastb_v_p_z.xml: CLASTB (SIMD&FP scalar) variant SVE # PATTERN x052b8000/mask=xff3fe000 :clastb Rd_FPR64, Pg3, Rd_FPR64_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b10101 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR64 & Rd_FPR64_2 & Pg3 { Rd_FPR64 = SVE_clastb(Rd_FPR64, Pg3, Rd_FPR64_2, Zn.T); } # clastb_z_p_zz.xml: CLASTB (vectors) variant SVE # PATTERN x05298000/mask=xff3fe000 :clastb Zd.T, Pg3, Zd.T_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1721=0b10100 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3 { Zd.T = SVE_clastb(Zd.T, Pg3, Zd.T_2, Zn.T); } # cls_z_p_z.xml: CLS variant SVE # PATTERN x0418a000/mask=xff3fe000 :cls Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_cls(Zd.T, Pg3_m, Zn.T); } # clz_z_p_z.xml: CLZ variant SVE # PATTERN x0419a000/mask=xff3fe000 :clz Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_clz(Zd.T, Pg3_m, Zn.T); } # cmpeq_p_p_zi.xml: CMP (immediate) variant Equal # PATTERN x25008000/mask=xff20e010 :cmpeq Pd.T, Pg3_z, Zn.T, "#"^sve_imm5s_1620 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=0 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & sve_imm5s_1620 { Pd.T = SVE_cmpeq(Pd.T, Pg3_z, Zn.T, sve_imm5s_1620:1); } # cmpeq_p_p_zi.xml: CMP (immediate) variant Greater than # PATTERN x25000010/mask=xff20e010 :cmpgt Pd.T, Pg3_z, Zn.T, "#"^sve_imm5s_1620 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=0 & sve_imm5_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & sve_imm5s_1620 { Pd.T = SVE_cmpgt(Pd.T, Pg3_z, Zn.T, sve_imm5s_1620:1); } # cmpeq_p_p_zi.xml: CMP (immediate) variant Greater than or equal # PATTERN x25000000/mask=xff20e010 :cmpge Pd.T, Pg3_z, Zn.T, "#"^sve_imm5s_1620 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=0 & sve_imm5_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & sve_imm5s_1620 { Pd.T = SVE_cmpge(Pd.T, Pg3_z, Zn.T, sve_imm5s_1620:1); } # cmpeq_p_p_zi.xml: CMP (immediate) variant Higher # PATTERN x24200010/mask=xff202010 :cmphi Pd.T, Pg3_z, Zn.T, "#"^sve_imm7_1420 is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=1 & sve_imm7_1420 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_cmphi(Pd.T, Pg3_z, Zn.T, sve_imm7_1420:1); } # cmpeq_p_p_zi.xml: CMP (immediate) variant Higher or same # PATTERN x24200000/mask=xff202010 :cmphs Pd.T, Pg3_z, Zn.T, "#"^sve_imm7_1420 is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=1 & sve_imm7_1420 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_cmphs(Pd.T, Pg3_z, Zn.T, sve_imm7_1420:1); } # cmpeq_p_p_zi.xml: CMP (immediate) variant Less than # PATTERN x25002000/mask=xff20e010 :cmplt Pd.T, Pg3_z, Zn.T, "#"^sve_imm5s_1620 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=0 & sve_imm5_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & sve_imm5s_1620 { Pd.T = SVE_cmplt(Pd.T, Pg3_z, Zn.T, sve_imm5s_1620:1); } # cmpeq_p_p_zi.xml: CMP (immediate) variant Less than or equal # PATTERN x25002010/mask=xff20e010 :cmple Pd.T, Pg3_z, Zn.T, "#"^sve_imm5s_1620 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=0 & sve_imm5_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & sve_imm5s_1620 { Pd.T = SVE_cmple(Pd.T, Pg3_z, Zn.T, sve_imm5s_1620:1); } # cmpeq_p_p_zi.xml: CMP (immediate) variant Lower # PATTERN x24202000/mask=xff202010 :cmplo Pd.T, Pg3_z, Zn.T, "#"^sve_imm7_1420 is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=1 & sve_imm7_1420 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_cmplo(Pd.T, Pg3_z, Zn.T, sve_imm7_1420:1); } # cmpeq_p_p_zi.xml: CMP (immediate) variant Lower or same # PATTERN x24202010/mask=xff202010 :cmpls Pd.T, Pg3_z, Zn.T, "#"^sve_imm7_1420 is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=1 & sve_imm7_1420 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_cmpls(Pd.T, Pg3_z, Zn.T, sve_imm7_1420:1); } # cmpeq_p_p_zi.xml: CMP (immediate) variant Not equal # PATTERN x25008010/mask=xff20e010 :cmpne Pd.T, Pg3_z, Zn.T, "#"^sve_imm5s_1620 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=0 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & sve_imm5s_1620 { Pd.T = SVE_cmpne(Pd.T, Pg3_z, Zn.T, sve_imm5s_1620:1); } # cmpeq_p_p_zw.xml: CMP (wide elements) variant Equal # PATTERN x24002000/mask=xff20e010 :cmpeq Pd.T, Pg3_z, Zn.T, Zm.D is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D { Pd.T = SVE_cmpeq(Pd.T, Pg3_z, Zn.T, Zm.D); } # cmpeq_p_p_zw.xml: CMP (wide elements) variant Greater than # PATTERN x24004010/mask=xff20e010 :cmpgt Pd.T, Pg3_z, Zn.T, Zm.D is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D { Pd.T = SVE_cmpgt(Pd.T, Pg3_z, Zn.T, Zm.D); } # cmpeq_p_p_zw.xml: CMP (wide elements) variant Greater than or equal # PATTERN x24004000/mask=xff20e010 :cmpge Pd.T, Pg3_z, Zn.T, Zm.D is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D { Pd.T = SVE_cmpge(Pd.T, Pg3_z, Zn.T, Zm.D); } # cmpeq_p_p_zw.xml: CMP (wide elements) variant Higher # PATTERN x2400c010/mask=xff20e010 :cmphi Pd.T, Pg3_z, Zn.T, Zm.D is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D { Pd.T = SVE_cmphi(Pd.T, Pg3_z, Zn.T, Zm.D); } # cmpeq_p_p_zw.xml: CMP (wide elements) variant Higher or same # PATTERN x2400c000/mask=xff20e010 :cmphs Pd.T, Pg3_z, Zn.T, Zm.D is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D { Pd.T = SVE_cmphs(Pd.T, Pg3_z, Zn.T, Zm.D); } # cmpeq_p_p_zw.xml: CMP (wide elements) variant Less than # PATTERN x24006000/mask=xff20e010 :cmplt Pd.T, Pg3_z, Zn.T, Zm.D is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D { Pd.T = SVE_cmplt(Pd.T, Pg3_z, Zn.T, Zm.D); } # cmpeq_p_p_zw.xml: CMP (wide elements) variant Less than or equal # PATTERN x24006010/mask=xff20e010 :cmple Pd.T, Pg3_z, Zn.T, Zm.D is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D { Pd.T = SVE_cmple(Pd.T, Pg3_z, Zn.T, Zm.D); } # cmpeq_p_p_zw.xml: CMP (wide elements) variant Lower # PATTERN x2400e000/mask=xff20e010 :cmplo Pd.T, Pg3_z, Zn.T, Zm.D is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D { Pd.T = SVE_cmplo(Pd.T, Pg3_z, Zn.T, Zm.D); } # cmpeq_p_p_zw.xml: CMP (wide elements) variant Lower or same # PATTERN x2400e010/mask=xff20e010 :cmpls Pd.T, Pg3_z, Zn.T, Zm.D is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D { Pd.T = SVE_cmpls(Pd.T, Pg3_z, Zn.T, Zm.D); } # cmpeq_p_p_zw.xml: CMP (wide elements) variant Not equal # PATTERN x24002010/mask=xff20e010 :cmpne Pd.T, Pg3_z, Zn.T, Zm.D is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z & Zm.D { Pd.T = SVE_cmpne(Pd.T, Pg3_z, Zn.T, Zm.D); } # cmpeq_p_p_zz.xml: CMP (vectors) variant Equal # PATTERN x2400a000/mask=xff20e010 :cmpeq Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_cmpeq(Pd.T, Pg3_z, Zn.T, Zm.T); } # cmpeq_p_p_zz.xml: CMP (vectors) variant Greater than # PATTERN x24008010/mask=xff20e010 :cmpgt Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_cmpgt(Pd.T, Pg3_z, Zn.T, Zm.T); } # cmpeq_p_p_zz.xml: CMP (vectors) variant Greater than or equal # PATTERN x24008000/mask=xff20e010 :cmpge Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_cmpge(Pd.T, Pg3_z, Zn.T, Zm.T); } # cmpeq_p_p_zz.xml: CMP (vectors) variant Higher # PATTERN x24000010/mask=xff20e010 :cmphi Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_cmphi(Pd.T, Pg3_z, Zn.T, Zm.T); } # cmpeq_p_p_zz.xml: CMP (vectors) variant Higher or same # PATTERN x24000000/mask=xff20e010 :cmphs Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_cmphs(Pd.T, Pg3_z, Zn.T, Zm.T); } # cmpeq_p_p_zz.xml: CMP (vectors) variant Not equal # PATTERN x2400a010/mask=xff20e010 :cmpne Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b00100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_cmpne(Pd.T, Pg3_z, Zn.T, Zm.T); } # cmple_cmpeq_p_p_zz.xml: CMPLE (vectors) variant Greater than or equal # ALIASEDBY CMPGE ., /Z, ., . if Never # PATTERN x24008000/mask=xff20e010 # SKIPPING cmple_cmpeq_p_p_zz.xml because x24008000/mask=xff20e010 has already been defined # cmplo_cmpeq_p_p_zz.xml: CMPLO (vectors) variant Higher # ALIASEDBY CMPHI ., /Z, ., . if Never # PATTERN x24000010/mask=xff20e010 # SKIPPING cmplo_cmpeq_p_p_zz.xml because x24000010/mask=xff20e010 has already been defined # cmpls_cmpeq_p_p_zz.xml: CMPLS (vectors) variant Higher or same # ALIASEDBY CMPHS ., /Z, ., . if Never # PATTERN x24000000/mask=xff20e010 # SKIPPING cmpls_cmpeq_p_p_zz.xml because x24000000/mask=xff20e010 has already been defined # cmplt_cmpeq_p_p_zz.xml: CMPLT (vectors) variant Greater than # ALIASEDBY CMPGT ., /Z, ., . if Never # PATTERN x24008010/mask=xff20e010 # SKIPPING cmplt_cmpeq_p_p_zz.xml because x24008010/mask=xff20e010 has already been defined # cnot_z_p_z.xml: CNOT variant SVE # PATTERN x041ba000/mask=xff3fe000 :cnot Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_cnot(Zd.T, Pg3_m, Zn.T); } # cnt_z_p_z.xml: CNT variant SVE # PATTERN x041aa000/mask=xff3fe000 :cnt Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_cnt(Zd.T, Pg3_m, Zn.T); } # cntb_r_s.xml: CNTB, CNTD, CNTH, CNTW variant Byte # PATTERN x0420e000/mask=xfff0fc00 :cntb Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rd_0004 & sve_pattern & sve_imm4_1_1to16 & Rd_GPR64 & sve_mul_pattern { Rd_GPR64 = SVE_cntb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # cntb_r_s.xml: CNTB, CNTD, CNTH, CNTW variant Doubleword # PATTERN x04e0e000/mask=xfff0fc00 :cntd Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rd_0004 & sve_pattern & sve_imm4_1_1to16 & Rd_GPR64 & sve_mul_pattern { Rd_GPR64 = SVE_cntd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # cntb_r_s.xml: CNTB, CNTD, CNTH, CNTW variant Halfword # PATTERN x0460e000/mask=xfff0fc00 :cnth Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rd_0004 & sve_pattern & sve_imm4_1_1to16 & Rd_GPR64 & sve_mul_pattern { Rd_GPR64 = SVE_cnth(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # cntb_r_s.xml: CNTB, CNTD, CNTH, CNTW variant Word # PATTERN x04a0e000/mask=xfff0fc00 :cntw Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rd_0004 & sve_pattern & sve_imm4_1_1to16 & Rd_GPR64 & sve_mul_pattern { Rd_GPR64 = SVE_cntw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # cntp_r_p_p.xml: CNTP variant SVE # PATTERN x25208000/mask=xff3fc200 :cntp Rd_GPR64, Pg, Pn.T is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1415=0b10 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_rd_0004 & Pn.T & Rd_GPR64 & Pg { Rd_GPR64 = SVE_cntp(Rd_GPR64, Pn.T, Pg); } # compact_z_p_z.xml: COMPACT variant SVE # PATTERN x05a18000/mask=xffbfe000 :compact Zd.T_sz, Pg3, Zn.T_sz is sve_b_2331=0b000001011 & sve_sz_22 & sve_b_1321=0b100001100 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T_sz & Zn.T_sz & Pg3 { Zd.T_sz = SVE_compact(Zd.T_sz, Pg3, Zn.T_sz); } # cpy_z_p_i.xml: CPY (immediate) variant SVE # PATTERN x05100000/mask=xff308000 :cpy Zd.T, Pm_zm, sve_shf8_1_m128to127 is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b01 & sve_pg_1619 & sve_b_15=0 & sve_m_14 & sve_sh_13 & sve_imm8_0512 & sve_zd_0004 & sve_shift_13 & Pm_zm & Zd.T & sve_imm8_1_m128to127 & sve_shf8_1_m128to127 { Zd.T = SVE_cpy(Zd.T, Pm_zm, sve_shf8_1_m128to127, sve_shift_13:1); } # cpy_z_p_r.xml: CPY (scalar) variant SVE # PATTERN x0528a000/mask=xff3fe000 :cpy Zd.T, Pg3_m, Rn_GPR64xsp is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1321=0b101000101 & sve_pg_1012 & sve_rn_0509 & sve_zd_0004 & Rn_GPR64xsp & Zd.T & Pg3_m { Zd.T = SVE_cpy(Zd.T, Pg3_m, Rn_GPR64xsp); } # cpy_z_p_r.xml: CPY (scalar) variant SVE # PATTERN x0528a000/mask=xff3fe000 :cpy Zd.T, Pg3_m, Rn_GPR32xsp is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1321=0b101000101 & sve_pg_1012 & sve_rn_0509 & sve_zd_0004 & Rn_GPR32xsp & Zd.T & Pg3_m { Zd.T = SVE_cpy(Zd.T, Pg3_m, Rn_GPR32xsp); } # cpy_z_p_v.xml: CPY (SIMD&FP scalar) variant SVE # PATTERN x05208000/mask=xff3fe000 :cpy Zd.T, Pg3_m, Rn_FPR8 is sve_b_2431=0b00000101 & sve_size_2223=0b00 & sve_b_1321=0b100000100 & sve_pg_1012 & sve_vn_0509 & sve_zd_0004 & Zd.T & Rn_FPR8 & Pg3_m { Zd.T = SVE_cpy(Zd.T, Pg3_m, Rn_FPR8); } # cpy_z_p_v.xml: CPY (SIMD&FP scalar) variant SVE # PATTERN x05208000/mask=xff3fe000 :cpy Zd.T, Pg3_m, Rn_FPR32 is sve_b_2431=0b00000101 & sve_size_2223=0b10 & sve_b_1321=0b100000100 & sve_pg_1012 & sve_vn_0509 & sve_zd_0004 & Zd.T & Rn_FPR32 & Pg3_m { Zd.T = SVE_cpy(Zd.T, Pg3_m, Rn_FPR32); } # cpy_z_p_v.xml: CPY (SIMD&FP scalar) variant SVE # PATTERN x05208000/mask=xff3fe000 :cpy Zd.T, Pg3_m, Rn_FPR16 is sve_b_2431=0b00000101 & sve_size_2223=0b01 & sve_b_1321=0b100000100 & sve_pg_1012 & sve_vn_0509 & sve_zd_0004 & Zd.T & Rn_FPR16 & Pg3_m { Zd.T = SVE_cpy(Zd.T, Pg3_m, Rn_FPR16); } # cpy_z_p_v.xml: CPY (SIMD&FP scalar) variant SVE # PATTERN x05208000/mask=xff3fe000 :cpy Zd.T, Pg3_m, Rn_FPR64 is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1321=0b100000100 & sve_pg_1012 & sve_vn_0509 & sve_zd_0004 & Zd.T & Rn_FPR64 & Pg3_m { Zd.T = SVE_cpy(Zd.T, Pg3_m, Rn_FPR64); } # ctermeq_rr.xml: CTERMEQ, CTERMNE variant Equal # PATTERN x25a02000/mask=xffa0fc1f :ctermeq Rn_GPR64, Rm_GPR64 is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=1 & sve_sz_22=1 & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b001000 & sve_rn_0509 & sve_b_04=0 & sve_b_03=0 & sve_b_02=0 & sve_b_0001=0b00 & Rn_GPR64 & Rm_GPR64 { SVE_ctermeq(Rn_GPR64, Rm_GPR64); } # ctermeq_rr.xml: CTERMEQ, CTERMNE variant Equal # PATTERN x25a02000/mask=xffa0fc1f :ctermeq Rn_GPR32, Rm_GPR32 is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=1 & sve_sz_22=0 & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b001000 & sve_rn_0509 & sve_b_04=0 & sve_b_03=0 & sve_b_02=0 & sve_b_0001=0b00 & Rn_GPR32 & Rm_GPR32 { SVE_ctermeq(Rn_GPR32, Rm_GPR32); } # ctermeq_rr.xml: CTERMEQ, CTERMNE variant Not equal # PATTERN x25a02010/mask=xffa0fc1f :ctermne Rn_GPR64, Rm_GPR64 is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=1 & sve_sz_22=1 & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b001000 & sve_rn_0509 & sve_b_04=1 & sve_b_03=0 & sve_b_02=0 & sve_b_0001=0b00 & Rn_GPR64 & Rm_GPR64 { SVE_ctermne(Rn_GPR64, Rm_GPR64); } # ctermeq_rr.xml: CTERMEQ, CTERMNE variant Not equal # PATTERN x25a02010/mask=xffa0fc1f :ctermne Rn_GPR32, Rm_GPR32 is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=1 & sve_sz_22=0 & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b001000 & sve_rn_0509 & sve_b_04=1 & sve_b_03=0 & sve_b_02=0 & sve_b_0001=0b00 & Rn_GPR32 & Rm_GPR32 { SVE_ctermne(Rn_GPR32, Rm_GPR32); } # decb_r_rs.xml: DECB, DECD, DECH, DECW (scalar) variant Byte # PATTERN x0430e400/mask=xfff0fc00 :decb Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_decb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # decb_r_rs.xml: DECB, DECD, DECH, DECW (scalar) variant Doubleword # PATTERN x04f0e400/mask=xfff0fc00 :decd Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_decd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # decb_r_rs.xml: DECB, DECD, DECH, DECW (scalar) variant Halfword # PATTERN x0470e400/mask=xfff0fc00 :dech Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_dech(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # decb_r_rs.xml: DECB, DECD, DECH, DECW (scalar) variant Word # PATTERN x04b0e400/mask=xfff0fc00 :decw Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_decw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # decd_z_zs.xml: DECD, DECH, DECW (vector) variant Doubleword # PATTERN x04f0c400/mask=xfff0fc00 :decd Zd.D^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11000 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.D & sve_imm4_1_1to16 & sve_mul_pattern { Zd.D = SVE_decd(Zd.D, sve_mul_pattern, sve_imm4_1_1to16:1); } # decd_z_zs.xml: DECD, DECH, DECW (vector) variant Halfword # PATTERN x0470c400/mask=xfff0fc00 :dech Zd.H^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11000 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.H & sve_imm4_1_1to16 & sve_mul_pattern { Zd.H = SVE_dech(Zd.H, sve_mul_pattern, sve_imm4_1_1to16:1); } # decd_z_zs.xml: DECD, DECH, DECW (vector) variant Word # PATTERN x04b0c400/mask=xfff0fc00 :decw Zd.S^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11000 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.S & sve_imm4_1_1to16 & sve_mul_pattern { Zd.S = SVE_decw(Zd.S, sve_mul_pattern, sve_imm4_1_1to16:1); } # decp_r_p_r.xml: DECP (scalar) variant SVE # PATTERN x252d8800/mask=xff3ffe00 :decp Rd_GPR64, Pn.T is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1011 & sve_b_17=0 & sve_b_16=1 & sve_b_1115=0b10001 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR64 { Rd_GPR64 = SVE_decp(Rd_GPR64, Pn.T); } # decp_z_p_z.xml: DECP (vector) variant SVE # PATTERN x252d8000/mask=xff3ffe00 :decp Zd.T, Pn is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1011 & sve_b_17=0 & sve_b_16=1 & sve_b_1115=0b10000 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_zdn_0004 & Zd.T & Pn { Zd.T = SVE_decp(Zd.T, Pn); } # dup_z_i.xml: DUP (immediate) variant SVE # PATTERN x2538c000/mask=xff3fc000 :dup Zd.T, sve_shf8_1_m128to127 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b111 & sve_b_18=0 & sve_b_17=0 & sve_b_1416=0b011 & sve_sh_13 & sve_imm8_0512 & sve_zd_0004 & sve_shift_13 & Zd.T & sve_imm8_1_m128to127 & sve_shf8_1_m128to127 { Zd.T = SVE_dup(Zd.T, sve_shf8_1_m128to127, sve_shift_13:1); } # dup_z_r.xml: DUP (scalar) variant SVE # PATTERN x05203800/mask=xff3ffc00 :dup Zd.T, Rn_GPR64xsp is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1021=0b100000001110 & sve_rn_0509 & sve_zd_0004 & Rn_GPR64xsp & Zd.T { Zd.T = SVE_dup(Zd.T, Rn_GPR64xsp); } # dup_z_r.xml: DUP (scalar) variant SVE # PATTERN x05203800/mask=xff3ffc00 :dup Zd.T, Rn_GPR32xsp is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1021=0b100000001110 & sve_rn_0509 & sve_zd_0004 & Rn_GPR32xsp & Zd.T { Zd.T = SVE_dup(Zd.T, Rn_GPR32xsp); } # dup_z_zi.xml: DUP (indexed) variant SVE # PATTERN x05202000/mask=xff20fc00 :dup Zd.T_tsz, Zn.T_tsz[sve_imm2_tsz] is sve_b_2431=0b00000101 & sve_imm2_2223 & sve_b_21=1 & sve_tsz_1620 & sve_b_1015=0b001000 & sve_zn_0509 & sve_zd_0004 & Zd.T_tsz & Zn.T_tsz & sve_imm2_tsz { Zd.T_tsz = SVE_dup(Zd.T_tsz, Zn.T_tsz, sve_imm2_tsz:1); } # dupm_z_i.xml: DUPM variant SVE # PATTERN x05c00000/mask=xfffc0000 :dupm Zd.T_imm13, "#"^sve_decode_bit_mask is sve_b_1831=0b00000101110000 & sve_imm13_0517 & sve_zd_0004 & sve_decode_bit_mask & Zd.T_imm13 { Zd.T_imm13 = SVE_dupm(Zd.T_imm13, sve_decode_bit_mask:1); } # eon_eor_z_zi.xml: EON variant SVE # ALIASEDBY EOR ., ., #(- - 1) if Never # PATTERN x05400000/mask=xfffc0000 :eon Zd.T_imm13, Zd.T_imm13_2, "#"^sve_decode_bit_mask is sve_b_2431=0b00000101 & sve_b_23=0 & sve_b_22=1 & sve_b_1821=0b0000 & sve_imm13_0517 & sve_zdn_0004 & sve_decode_bit_mask & Zd.T_imm13 & Zd.T_imm13_2 { Zd.T_imm13 = SVE_eon(Zd.T_imm13, Zd.T_imm13_2, sve_decode_bit_mask:1); } # eor_p_p_pp.xml: EOR, EORS (predicates) variant Flag setting # PATTERN x25404200/mask=xfff0c210 :eors Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_eors(Pd.B, Pg_z, Pn.B, Pm.B); } # eor_p_p_pp.xml: EOR, EORS (predicates) variant Not flag setting # PATTERN x25004200/mask=xfff0c210 :eor Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_eor(Pd.B, Pg_z, Pn.B, Pm.B); } # eor_z_p_zz.xml: EOR (vectors, predicated) variant SVE # PATTERN x04190000/mask=xff3fe000 :eor Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_eor(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # eor_z_zi.xml: EOR (immediate) variant SVE # PATTERN x05400000/mask=xfffc0000 # SKIPPING eor_z_zi.xml because x05400000/mask=xfffc0000 has already been defined # eor_z_zz.xml: EOR (vectors, unpredicated) variant SVE # PATTERN x04a03000/mask=xffe0fc00 :eor Zd.D, Zn.D, Zm.D is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_zm_1620 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm.D { Zd.D = SVE_eor(Zd.D, Zn.D, Zm.D); } # eorv_r_p_z.xml: EORV variant SVE # PATTERN x04192000/mask=xff3fe000 :eorv Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_eorv(Rd_FPR8, Pg3, Zn.T); } # eorv_r_p_z.xml: EORV variant SVE # PATTERN x04192000/mask=xff3fe000 :eorv Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_eorv(Rd_FPR32, Pg3, Zn.T); } # eorv_r_p_z.xml: EORV variant SVE # PATTERN x04192000/mask=xff3fe000 :eorv Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_eorv(Rd_FPR16, Pg3, Zn.T); } # eorv_r_p_z.xml: EORV variant SVE # PATTERN x04192000/mask=xff3fe000 :eorv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_eorv(Rd_FPR64, Pg3, Zn.T); } # ext_z_zi.xml: EXT variant SVE # PATTERN x05200000/mask=xffe0e000 :ext Zd.B, Zd.B_2, Zn.B, "#"^sve_imm8_2_0to255 is sve_b_2131=0b00000101001 & sve_imm8h_1620 & sve_b_1315=0b000 & sve_imm8l_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.B & Zd.B_2 & Zn.B & sve_imm8_2_0to255 { Zd.B = SVE_ext(Zd.B, Zd.B_2, Zn.B, sve_imm8_2_0to255:1); } # fabd_z_p_zz.xml: FABD variant SVE # PATTERN x65088000/mask=xff3fe000 :fabd Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b100 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fabd(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # fabs_z_p_z.xml: FABS variant SVE # PATTERN x041ca000/mask=xff3fe000 :fabs Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_fabs(Zd.T, Pg3_m, Zn.T); } # facge_p_p_zz.xml: FAC variant Greater than # PATTERN x6500e010/mask=xff20e010 :facgt Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_facgt(Pd.T, Pg3_z, Zn.T, Zm.T); } # facge_p_p_zz.xml: FAC variant Greater than or equal # PATTERN x6500c010/mask=xff20e010 :facge Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_facge(Pd.T, Pg3_z, Zn.T, Zm.T); } # facle_facge_p_p_zz.xml: FACLE variant Greater than or equal # ALIASEDBY FACGE ., /Z, ., . if Never # PATTERN x6500c010/mask=xff20e010 # SKIPPING facle_facge_p_p_zz.xml because x6500c010/mask=xff20e010 has already been defined # faclt_facge_p_p_zz.xml: FACLT variant Greater than # ALIASEDBY FACGT ., /Z, ., . if Never # PATTERN x6500e010/mask=xff20e010 # SKIPPING faclt_facge_p_p_zz.xml because x6500e010/mask=xff20e010 has already been defined # fadd_z_p_zs.xml: FADD (immediate) variant SVE # PATTERN x65188000/mask=xff3fe3c0 :fadd Zd.T, Pg3_m, Zd.T_2, sve_float_0510 is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0510 & Zd.T & Zd.T_2 & Pg3_m { Zd.T = SVE_fadd(Zd.T, Pg3_m, Zd.T_2, sve_float_0510:1); } # fadd_z_p_zz.xml: FADD (vectors, predicated) variant SVE # PATTERN x65008000/mask=xff3fe000 :fadd Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b000 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fadd(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # fadd_z_zz.xml: FADD (vectors, unpredicated) variant SVE # PATTERN x65000000/mask=xff20fc00 :fadd Zd.T, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b00 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_fadd(Zd.T, Zn.T, Zm.T); } # fadda_v_p_z.xml: FADDA variant SVE # PATTERN x65182000/mask=xff3fe000 :fadda Rd_FPR8, Pg3, Rd_FPR8_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b00 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR8 & Rd_FPR8_2 & Pg3 { Rd_FPR8 = SVE_fadda(Rd_FPR8, Pg3, Rd_FPR8_2, Zn.T); } # fadda_v_p_z.xml: FADDA variant SVE # PATTERN x65182000/mask=xff3fe000 :fadda Rd_FPR32, Pg3, Rd_FPR32_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b10 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR32 & Rd_FPR32_2 & Pg3 { Rd_FPR32 = SVE_fadda(Rd_FPR32, Pg3, Rd_FPR32_2, Zn.T); } # fadda_v_p_z.xml: FADDA variant SVE # PATTERN x65182000/mask=xff3fe000 :fadda Rd_FPR16, Pg3, Rd_FPR16_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b01 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR16 & Rd_FPR16_2 & Pg3 { Rd_FPR16 = SVE_fadda(Rd_FPR16, Pg3, Rd_FPR16_2, Zn.T); } # fadda_v_p_z.xml: FADDA variant SVE # PATTERN x65182000/mask=xff3fe000 :fadda Rd_FPR64, Pg3, Rd_FPR64_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b11 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zm_0509 & sve_vdn_0004 & Zn.T & Rd_FPR64 & Rd_FPR64_2 & Pg3 { Rd_FPR64 = SVE_fadda(Rd_FPR64, Pg3, Rd_FPR64_2, Zn.T); } # faddv_v_p_z.xml: FADDV variant SVE # PATTERN x65002000/mask=xff3fe000 :faddv Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b00 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_faddv(Rd_FPR8, Pg3, Zn.T); } # faddv_v_p_z.xml: FADDV variant SVE # PATTERN x65002000/mask=xff3fe000 :faddv Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b10 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_faddv(Rd_FPR32, Pg3, Zn.T); } # faddv_v_p_z.xml: FADDV variant SVE # PATTERN x65002000/mask=xff3fe000 :faddv Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b01 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_faddv(Rd_FPR16, Pg3, Zn.T); } # faddv_v_p_z.xml: FADDV variant SVE # PATTERN x65002000/mask=xff3fe000 :faddv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b11 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_faddv(Rd_FPR64, Pg3, Zn.T); } # fcadd_z_p_zz.xml: FCADD variant SVE # PATTERN x64008000/mask=xff3ee000 :fcadd Zd.T, Pg3_m, Zd.T_2, Zn.T, sve_rot_16 is sve_b_2431=0b01100100 & sve_size_2223 & sve_b_1721=0b00000 & sve_rot_16 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fcadd(Zd.T, Pg3_m, Zd.T_2, Zn.T, sve_rot_16:1); } # fcmeq_p_p_z0.xml: FCM (zero) variant Equal # PATTERN x65122000/mask=xff3fe010 :fcmeq Pd.T, Pg3_z, Zn.T, "#0.0" is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0100 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_fcmeq(Pd.T, Pg3_z, Zn.T); } # fcmeq_p_p_z0.xml: FCM (zero) variant Greater than # PATTERN x65102010/mask=xff3fe010 :fcmgt Pd.T, Pg3_z, Zn.T, "#0.0" is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0100 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_fcmgt(Pd.T, Pg3_z, Zn.T); } # fcmeq_p_p_z0.xml: FCM (zero) variant Greater than or equal # PATTERN x65102000/mask=xff3fe010 :fcmge Pd.T, Pg3_z, Zn.T, "#0.0" is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0100 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_fcmge(Pd.T, Pg3_z, Zn.T); } # fcmeq_p_p_z0.xml: FCM (zero) variant Less than # PATTERN x65112000/mask=xff3fe010 :fcmlt Pd.T, Pg3_z, Zn.T, "#0.0" is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0100 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_fcmlt(Pd.T, Pg3_z, Zn.T); } # fcmeq_p_p_z0.xml: FCM (zero) variant Less than or equal # PATTERN x65112010/mask=xff3fe010 :fcmle Pd.T, Pg3_z, Zn.T, "#0.0" is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0100 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_fcmle(Pd.T, Pg3_z, Zn.T); } # fcmeq_p_p_z0.xml: FCM (zero) variant Not equal # PATTERN x65132000/mask=xff3fe010 :fcmne Pd.T, Pg3_z, Zn.T, "#0.0" is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0100 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_fcmne(Pd.T, Pg3_z, Zn.T); } # fcmeq_p_p_zz.xml: FCM (vectors) variant Equal # PATTERN x65006000/mask=xff20e010 :fcmeq Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_fcmeq(Pd.T, Pg3_z, Zn.T, Zm.T); } # fcmeq_p_p_zz.xml: FCM (vectors) variant Greater than # PATTERN x65004010/mask=xff20e010 :fcmgt Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_fcmgt(Pd.T, Pg3_z, Zn.T, Zm.T); } # fcmeq_p_p_zz.xml: FCM (vectors) variant Greater than or equal # PATTERN x65004000/mask=xff20e010 :fcmge Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_fcmge(Pd.T, Pg3_z, Zn.T, Zm.T); } # fcmeq_p_p_zz.xml: FCM (vectors) variant Not equal # PATTERN x65006010/mask=xff20e010 :fcmne Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_b_04=1 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_fcmne(Pd.T, Pg3_z, Zn.T, Zm.T); } # fcmeq_p_p_zz.xml: FCM (vectors) variant Unordered # PATTERN x6500c000/mask=xff20e010 :fcmuo Pd.T, Pg3_z, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_pd_0003 & Zm.T & Pd.T & Zn.T & Pg3_z { Pd.T = SVE_fcmuo(Pd.T, Pg3_z, Zn.T, Zm.T); } # fcmla_z_p_zzz.xml: FCMLA (vectors) variant SVE # PATTERN x64000000/mask=xff208000 :fcmla Zd.T, Pg3_m, Zn.T, Zm.T, sve_rot_1314 is sve_b_2431=0b01100100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_rot_1314 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m { Zd.T = SVE_fcmla(Zd.T, Pg3_m, Zn.T, Zm.T, sve_rot_1314:1); } # fcmla_z_zzzi.xml: FCMLA (indexed) variant Half-precision # PATTERN x64a01000/mask=xffe0f000 :fcmla Zd.H, Zn.H, Zm3.H[sve_i2_1920], sve_rot_1011 is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_i2_1920 & sve_zm_1618 & sve_b_1215=0b0001 & sve_rot_1011 & sve_zn_0509 & sve_zda_0004 & Zd.H & Zn.H & Zm3.H { Zd.H = SVE_fcmla(Zd.H, Zn.H, Zm3.H, sve_i2_1920:1, sve_rot_1011:1); } # fcmla_z_zzzi.xml: FCMLA (indexed) variant Single-precision # PATTERN x64e01000/mask=xffe0f000 :fcmla Zd.S, Zn.S, Zm4.S[sve_i1_20], sve_rot_1011 is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_i1_20 & sve_zm_1619 & sve_b_1215=0b0001 & sve_rot_1011 & sve_zn_0509 & sve_zda_0004 & Zd.S & Zn.S & Zm4.S { Zd.S = SVE_fcmla(Zd.S, Zn.S, Zm4.S, sve_i1_20:1, sve_rot_1011:1); } # fcmle_fcmeq_p_p_zz.xml: FCMLE (vectors) variant Greater than or equal # ALIASEDBY FCMGE ., /Z, ., . if Never # PATTERN x65004000/mask=xff20e010 # SKIPPING fcmle_fcmeq_p_p_zz.xml because x65004000/mask=xff20e010 has already been defined # fcmlt_fcmeq_p_p_zz.xml: FCMLT (vectors) variant Greater than # ALIASEDBY FCMGT ., /Z, ., . if Never # PATTERN x65004010/mask=xff20e010 # SKIPPING fcmlt_fcmeq_p_p_zz.xml because x65004010/mask=xff20e010 has already been defined # fcpy_z_p_i.xml: FCPY variant SVE # PATTERN x0510c000/mask=xff30e000 :fcpy Zd.T, Pm_m, "#"^sve_float_imm8 is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b01 & sve_pg_1619 & sve_b_1315=0b110 & sve_imm8_0512 & sve_zd_0004 & sve_float_imm8 & Zd.T & Pm_m { Zd.T = SVE_fcpy(Zd.T, Pm_m, sve_float_imm8:1); } # fcvt_z_p_z.xml: FCVT variant Half-precision to single-precision # PATTERN x6589a000/mask=xffffe000 :fcvt Zd.S, Pg3_m, Zn.H is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1821=0b0010 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.S & Pg3_m { Zd.S = SVE_fcvt(Zd.S, Pg3_m, Zn.H); } # fcvt_z_p_z.xml: FCVT variant Half-precision to double-precision # PATTERN x65c9a000/mask=xffffe000 :fcvt Zd.D, Pg3_m, Zn.H is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1821=0b0010 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.D & Pg3_m { Zd.D = SVE_fcvt(Zd.D, Pg3_m, Zn.H); } # fcvt_z_p_z.xml: FCVT variant Single-precision to half-precision # PATTERN x6588a000/mask=xffffe000 :fcvt Zd.H, Pg3_m, Zn.S is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1821=0b0010 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.H & Pg3_m { Zd.H = SVE_fcvt(Zd.H, Pg3_m, Zn.S); } # fcvt_z_p_z.xml: FCVT variant Single-precision to double-precision # PATTERN x65cba000/mask=xffffe000 :fcvt Zd.D, Pg3_m, Zn.S is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1821=0b0010 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.D & Pg3_m { Zd.D = SVE_fcvt(Zd.D, Pg3_m, Zn.S); } # fcvt_z_p_z.xml: FCVT variant Double-precision to half-precision # PATTERN x65c8a000/mask=xffffe000 :fcvt Zd.H, Pg3_m, Zn.D is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1821=0b0010 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.H & Pg3_m { Zd.H = SVE_fcvt(Zd.H, Pg3_m, Zn.D); } # fcvt_z_p_z.xml: FCVT variant Double-precision to single-precision # PATTERN x65caa000/mask=xffffe000 :fcvt Zd.S, Pg3_m, Zn.D is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1821=0b0010 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.S & Pg3_m { Zd.S = SVE_fcvt(Zd.S, Pg3_m, Zn.D); } # fcvtzs_z_p_z.xml: FCVTZS variant Half-precision to 16-bit # PATTERN x655aa000/mask=xffffe000 :fcvtzs Zd.H, Pg3_m, Zn.H is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.H & Pg3_m { Zd.H = SVE_fcvtzs(Zd.H, Pg3_m, Zn.H); } # fcvtzs_z_p_z.xml: FCVTZS variant Half-precision to 32-bit # PATTERN x655ca000/mask=xffffe000 :fcvtzs Zd.S, Pg3_m, Zn.H is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.S & Pg3_m { Zd.S = SVE_fcvtzs(Zd.S, Pg3_m, Zn.H); } # fcvtzs_z_p_z.xml: FCVTZS variant Half-precision to 64-bit # PATTERN x655ea000/mask=xffffe000 :fcvtzs Zd.D, Pg3_m, Zn.H is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.D & Pg3_m { Zd.D = SVE_fcvtzs(Zd.D, Pg3_m, Zn.H); } # fcvtzs_z_p_z.xml: FCVTZS variant Single-precision to 32-bit # PATTERN x659ca000/mask=xffffe000 :fcvtzs Zd.S, Pg3_m, Zn.S is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.S & Pg3_m { Zd.S = SVE_fcvtzs(Zd.S, Pg3_m, Zn.S); } # fcvtzs_z_p_z.xml: FCVTZS variant Single-precision to 64-bit # PATTERN x65dca000/mask=xffffe000 :fcvtzs Zd.D, Pg3_m, Zn.S is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.D & Pg3_m { Zd.D = SVE_fcvtzs(Zd.D, Pg3_m, Zn.S); } # fcvtzs_z_p_z.xml: FCVTZS variant Double-precision to 32-bit # PATTERN x65d8a000/mask=xffffe000 :fcvtzs Zd.S, Pg3_m, Zn.D is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.S & Pg3_m { Zd.S = SVE_fcvtzs(Zd.S, Pg3_m, Zn.D); } # fcvtzs_z_p_z.xml: FCVTZS variant Double-precision to 64-bit # PATTERN x65dea000/mask=xffffe000 :fcvtzs Zd.D, Pg3_m, Zn.D is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m { Zd.D = SVE_fcvtzs(Zd.D, Pg3_m, Zn.D); } # fcvtzu_z_p_z.xml: FCVTZU variant Half-precision to 16-bit # PATTERN x655ba000/mask=xffffe000 :fcvtzu Zd.H, Pg3_m, Zn.H is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.H & Pg3_m { Zd.H = SVE_fcvtzu(Zd.H, Pg3_m, Zn.H); } # fcvtzu_z_p_z.xml: FCVTZU variant Half-precision to 32-bit # PATTERN x655da000/mask=xffffe000 :fcvtzu Zd.S, Pg3_m, Zn.H is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.S & Pg3_m { Zd.S = SVE_fcvtzu(Zd.S, Pg3_m, Zn.H); } # fcvtzu_z_p_z.xml: FCVTZU variant Half-precision to 64-bit # PATTERN x655fa000/mask=xffffe000 :fcvtzu Zd.D, Pg3_m, Zn.H is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.D & Pg3_m { Zd.D = SVE_fcvtzu(Zd.D, Pg3_m, Zn.H); } # fcvtzu_z_p_z.xml: FCVTZU variant Single-precision to 32-bit # PATTERN x659da000/mask=xffffe000 :fcvtzu Zd.S, Pg3_m, Zn.S is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.S & Pg3_m { Zd.S = SVE_fcvtzu(Zd.S, Pg3_m, Zn.S); } # fcvtzu_z_p_z.xml: FCVTZU variant Single-precision to 64-bit # PATTERN x65dda000/mask=xffffe000 :fcvtzu Zd.D, Pg3_m, Zn.S is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.D & Pg3_m { Zd.D = SVE_fcvtzu(Zd.D, Pg3_m, Zn.S); } # fcvtzu_z_p_z.xml: FCVTZU variant Double-precision to 32-bit # PATTERN x65d9a000/mask=xffffe000 :fcvtzu Zd.S, Pg3_m, Zn.D is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.S & Pg3_m { Zd.S = SVE_fcvtzu(Zd.S, Pg3_m, Zn.D); } # fcvtzu_z_p_z.xml: FCVTZU variant Double-precision to 64-bit # PATTERN x65dfa000/mask=xffffe000 :fcvtzu Zd.D, Pg3_m, Zn.D is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b011 & sve_b_18=1 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m { Zd.D = SVE_fcvtzu(Zd.D, Pg3_m, Zn.D); } # fdiv_z_p_zz.xml: FDIV variant SVE # PATTERN x650d8000/mask=xff3fe000 :fdiv Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b110 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fdiv(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # fdivr_z_p_zz.xml: FDIVR variant SVE # PATTERN x650c8000/mask=xff3fe000 :fdivr Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b110 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fdivr(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # fdup_z_i.xml: FDUP variant SVE # PATTERN x2539c000/mask=xff3fe000 :fdup Zd.T, "#"^sve_float_imm8 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b111 & sve_b_18=0 & sve_b_17=0 & sve_b_1416=0b111 & sve_b_13=0 & sve_imm8_0512 & sve_zd_0004 & sve_float_imm8 & Zd.T { Zd.T = SVE_fdup(Zd.T, sve_float_imm8:1); } # fexpa_z_z.xml: FEXPA variant SVE # PATTERN x0420b800/mask=xff3ffc00 :fexpa Zd.T, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_b_1720=0b0000 & sve_b_16=0 & sve_b_1015=0b101110 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T { Zd.T = SVE_fexpa(Zd.T, Zn.T); } # fmad_z_p_zzz.xml: FMAD variant SVE # PATTERN x65208000/mask=xff20e000 :fmad Zd.T, Pg3_m, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_za_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zn.T & Zm.T & Pg3_m { Zd.T = SVE_fmad(Zd.T, Pg3_m, Zn.T, Zm.T); } # fmax_z_p_zs.xml: FMAX (immediate) variant SVE # PATTERN x651e8000/mask=xff3fe3c0 :fmax Zd.T, Pg3_m, Zd.T_2, sve_float_0010 is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0010 & Zd.T & Zd.T_2 & Pg3_m { Zd.T = SVE_fmax(Zd.T, Pg3_m, Zd.T_2, sve_float_0010:1); } # fmax_z_p_zz.xml: FMAX (vectors) variant SVE # PATTERN x65068000/mask=xff3fe000 :fmax Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b011 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fmax(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # fmaxnm_z_p_zs.xml: FMAXNM (immediate) variant SVE # PATTERN x651c8000/mask=xff3fe3c0 :fmaxnm Zd.T, Pg3_m, Zd.T_2, sve_float_0010 is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0010 & Zd.T & Zd.T_2 & Pg3_m { Zd.T = SVE_fmaxnm(Zd.T, Pg3_m, Zd.T_2, sve_float_0010:1); } # fmaxnm_z_p_zz.xml: FMAXNM (vectors) variant SVE # PATTERN x65048000/mask=xff3fe000 :fmaxnm Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b010 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fmaxnm(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # fmaxnmv_v_p_z.xml: FMAXNMV variant SVE # PATTERN x65042000/mask=xff3fe000 :fmaxnmv Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b00 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_fmaxnmv(Rd_FPR8, Pg3, Zn.T); } # fmaxnmv_v_p_z.xml: FMAXNMV variant SVE # PATTERN x65042000/mask=xff3fe000 :fmaxnmv Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b10 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_fmaxnmv(Rd_FPR32, Pg3, Zn.T); } # fmaxnmv_v_p_z.xml: FMAXNMV variant SVE # PATTERN x65042000/mask=xff3fe000 :fmaxnmv Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b01 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_fmaxnmv(Rd_FPR16, Pg3, Zn.T); } # fmaxnmv_v_p_z.xml: FMAXNMV variant SVE # PATTERN x65042000/mask=xff3fe000 :fmaxnmv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b11 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_fmaxnmv(Rd_FPR64, Pg3, Zn.T); } # fmaxv_v_p_z.xml: FMAXV variant SVE # PATTERN x65062000/mask=xff3fe000 :fmaxv Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b00 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_fmaxv(Rd_FPR8, Pg3, Zn.T); } # fmaxv_v_p_z.xml: FMAXV variant SVE # PATTERN x65062000/mask=xff3fe000 :fmaxv Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b10 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_fmaxv(Rd_FPR32, Pg3, Zn.T); } # fmaxv_v_p_z.xml: FMAXV variant SVE # PATTERN x65062000/mask=xff3fe000 :fmaxv Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b01 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_fmaxv(Rd_FPR16, Pg3, Zn.T); } # fmaxv_v_p_z.xml: FMAXV variant SVE # PATTERN x65062000/mask=xff3fe000 :fmaxv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b11 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_fmaxv(Rd_FPR64, Pg3, Zn.T); } # fmin_z_p_zs.xml: FMIN (immediate) variant SVE # PATTERN x651f8000/mask=xff3fe3c0 :fmin Zd.T, Pg3_m, Zd.T_2, sve_float_0010 is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0010 & Zd.T & Zd.T_2 & Pg3_m { Zd.T = SVE_fmin(Zd.T, Pg3_m, Zd.T_2, sve_float_0010:1); } # fmin_z_p_zz.xml: FMIN (vectors) variant SVE # PATTERN x65078000/mask=xff3fe000 :fmin Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b011 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fmin(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # fminnm_z_p_zs.xml: FMINNM (immediate) variant SVE # PATTERN x651d8000/mask=xff3fe3c0 :fminnm Zd.T, Pg3_m, Zd.T_2, sve_float_0010 is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0010 & Zd.T & Zd.T_2 & Pg3_m { Zd.T = SVE_fminnm(Zd.T, Pg3_m, Zd.T_2, sve_float_0010:1); } # fminnm_z_p_zz.xml: FMINNM (vectors) variant SVE # PATTERN x65058000/mask=xff3fe000 :fminnm Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b010 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fminnm(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # fminnmv_v_p_z.xml: FMINNMV variant SVE # PATTERN x65052000/mask=xff3fe000 :fminnmv Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b00 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_fminnmv(Rd_FPR8, Pg3, Zn.T); } # fminnmv_v_p_z.xml: FMINNMV variant SVE # PATTERN x65052000/mask=xff3fe000 :fminnmv Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b10 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_fminnmv(Rd_FPR32, Pg3, Zn.T); } # fminnmv_v_p_z.xml: FMINNMV variant SVE # PATTERN x65052000/mask=xff3fe000 :fminnmv Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b01 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_fminnmv(Rd_FPR16, Pg3, Zn.T); } # fminnmv_v_p_z.xml: FMINNMV variant SVE # PATTERN x65052000/mask=xff3fe000 :fminnmv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b11 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_fminnmv(Rd_FPR64, Pg3, Zn.T); } # fminv_v_p_z.xml: FMINV variant SVE # PATTERN x65072000/mask=xff3fe000 :fminv Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b00 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_fminv(Rd_FPR8, Pg3, Zn.T); } # fminv_v_p_z.xml: FMINV variant SVE # PATTERN x65072000/mask=xff3fe000 :fminv Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b10 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_fminv(Rd_FPR32, Pg3, Zn.T); } # fminv_v_p_z.xml: FMINV variant SVE # PATTERN x65072000/mask=xff3fe000 :fminv Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b01 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_fminv(Rd_FPR16, Pg3, Zn.T); } # fminv_v_p_z.xml: FMINV variant SVE # PATTERN x65072000/mask=xff3fe000 :fminv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b01100101 & sve_size_2223=0b11 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_fminv(Rd_FPR64, Pg3, Zn.T); } # fmla_z_p_zzz.xml: FMLA (vectors) variant SVE # PATTERN x65200000/mask=xff20e000 :fmla Zd.T, Pg3_m, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m { Zd.T = SVE_fmla(Zd.T, Pg3_m, Zn.T, Zm.T); } # fmla_z_zzzi.xml: FMLA (indexed) variant Half-precision # PATTERN x64200000/mask=xffa0fc00 :fmla Zd.H, Zn.H, Zm3.H[sve_i3h_i3l] is sve_b_2431=0b01100100 & sve_b_23=0 & sve_i3h_22 & sve_b_21=1 & sve_i3l_1920 & sve_zm_1618 & sve_b_1115=0b00000 & sve_b_10=0 & sve_zn_0509 & sve_zda_0004 & Zd.H & Zn.H & Zm3.H & sve_i3h_i3l { Zd.H = SVE_fmla(Zd.H, Zn.H, Zm3.H, sve_i3h_i3l:1); } # fmla_z_zzzi.xml: FMLA (indexed) variant Single-precision # PATTERN x64a00000/mask=xffe0fc00 :fmla Zd.S, Zn.S, Zm3.S[sve_i2_1920] is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_i2_1920 & sve_zm_1618 & sve_b_1115=0b00000 & sve_b_10=0 & sve_zn_0509 & sve_zda_0004 & Zd.S & Zn.S & Zm3.S { Zd.S = SVE_fmla(Zd.S, Zn.S, Zm3.S, sve_i2_1920:1); } # fmla_z_zzzi.xml: FMLA (indexed) variant Double-precision # PATTERN x64e00000/mask=xffe0fc00 :fmla Zd.D, Zn.D, Zm4.D[sve_i1_20] is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_i1_20 & sve_zm_1619 & sve_b_1115=0b00000 & sve_b_10=0 & sve_zn_0509 & sve_zda_0004 & Zd.D & Zn.D & Zm4.D { Zd.D = SVE_fmla(Zd.D, Zn.D, Zm4.D, sve_i1_20:1); } # fmls_z_p_zzz.xml: FMLS (vectors) variant SVE # PATTERN x65202000/mask=xff20e000 :fmls Zd.T, Pg3_m, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m { Zd.T = SVE_fmls(Zd.T, Pg3_m, Zn.T, Zm.T); } # fmls_z_zzzi.xml: FMLS (indexed) variant Half-precision # PATTERN x64200400/mask=xffa0fc00 :fmls Zd.H, Zn.H, Zm3.H[sve_i3h_i3l] is sve_b_2431=0b01100100 & sve_b_23=0 & sve_i3h_22 & sve_b_21=1 & sve_i3l_1920 & sve_zm_1618 & sve_b_1115=0b00000 & sve_b_10=1 & sve_zn_0509 & sve_zda_0004 & Zd.H & Zn.H & Zm3.H & sve_i3h_i3l { Zd.H = SVE_fmls(Zd.H, Zn.H, Zm3.H, sve_i3h_i3l:1); } # fmls_z_zzzi.xml: FMLS (indexed) variant Single-precision # PATTERN x64a00400/mask=xffe0fc00 :fmls Zd.S, Zn.S, Zm3.S[sve_i2_1920] is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_i2_1920 & sve_zm_1618 & sve_b_1115=0b00000 & sve_b_10=1 & sve_zn_0509 & sve_zda_0004 & Zd.S & Zn.S & Zm3.S { Zd.S = SVE_fmls(Zd.S, Zn.S, Zm3.S, sve_i2_1920:1); } # fmls_z_zzzi.xml: FMLS (indexed) variant Double-precision # PATTERN x64e00400/mask=xffe0fc00 :fmls Zd.D, Zn.D, Zm4.D[sve_i1_20] is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_i1_20 & sve_zm_1619 & sve_b_1115=0b00000 & sve_b_10=1 & sve_zn_0509 & sve_zda_0004 & Zd.D & Zn.D & Zm4.D { Zd.D = SVE_fmls(Zd.D, Zn.D, Zm4.D, sve_i1_20:1); } # fmov_cpy_z_p_i.xml: FMOV (zero, predicated) variant SVE # ALIASEDBY CPY ., /M, #0 if Never # PATTERN x05104000/mask=xff30ffe0 :fmov Zd.T, Pm_m, "#0.0" is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b01 & sve_pg_1619 & sve_b_15=0 & sve_m_14=1 & sve_sh_13=0 & sve_imm8_0512=0b00000000 & sve_zd_0004 & Zd.T & Pm_m { Zd.T = SVE_fmov(Zd.T, Pm_m); } # fmov_dup_z_i.xml: FMOV (zero, unpredicated) variant SVE # ALIASEDBY DUP ., #0 if Never # PATTERN x2538c000/mask=xff3fffe0 :fmov Zd.T, "#0.0" is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b111 & sve_b_18=0 & sve_b_17=0 & sve_b_1416=0b011 & sve_sh_13=0 & sve_imm8_0512=0b00000000 & sve_zd_0004 & Zd.T { Zd.T = SVE_fmov(Zd.T); } # fmov_fcpy_z_p_i.xml: FMOV (immediate, predicated) variant SVE # ALIASEDBY FCPY ., /M, # if Unconditionally # PATTERN x0510c000/mask=xff30e000 # SKIPPING fmov_fcpy_z_p_i.xml because x0510c000/mask=xff30e000 has already been defined # fmov_fdup_z_i.xml: FMOV (immediate, unpredicated) variant SVE # ALIASEDBY FDUP ., # if Unconditionally # PATTERN x2539c000/mask=xff3fe000 # SKIPPING fmov_fdup_z_i.xml because x2539c000/mask=xff3fe000 has already been defined # fmsb_z_p_zzz.xml: FMSB variant SVE # PATTERN x6520a000/mask=xff20e000 :fmsb Zd.T, Pg3_m, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_za_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zn.T & Zm.T & Pg3_m { Zd.T = SVE_fmsb(Zd.T, Pg3_m, Zn.T, Zm.T); } # fmul_z_p_zs.xml: FMUL (immediate) variant SVE # PATTERN x651a8000/mask=xff3fe3c0 :fmul Zd.T, Pg3_m, Zd.T_2, sve_float_0520 is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0520 & Zd.T & Zd.T_2 & Pg3_m { Zd.T = SVE_fmul(Zd.T, Pg3_m, Zd.T_2, sve_float_0520:1); } # fmul_z_p_zz.xml: FMUL (vectors, predicated) variant SVE # PATTERN x65028000/mask=xff3fe000 :fmul Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b001 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fmul(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # fmul_z_zz.xml: FMUL (vectors, unpredicated) variant SVE # PATTERN x65000800/mask=xff20fc00 :fmul Zd.T, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b01 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_fmul(Zd.T, Zn.T, Zm.T); } # fmul_z_zzi.xml: FMUL (indexed) variant Half-precision # PATTERN x64202000/mask=xffa0fc00 :fmul Zd.H, Zn.H, Zm3.H[sve_i3h_i3l] is sve_b_2431=0b01100100 & sve_b_23=0 & sve_i3h_22 & sve_b_21=1 & sve_i3l_1920 & sve_zm_1618 & sve_b_1015=0b001000 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.H & Zm3.H & sve_i3h_i3l { Zd.H = SVE_fmul(Zd.H, Zn.H, Zm3.H, sve_i3h_i3l:1); } # fmul_z_zzi.xml: FMUL (indexed) variant Single-precision # PATTERN x64a02000/mask=xffe0fc00 :fmul Zd.S, Zn.S, Zm3.S[sve_i2_1920] is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_i2_1920 & sve_zm_1618 & sve_b_1015=0b001000 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.S & Zm3.S { Zd.S = SVE_fmul(Zd.S, Zn.S, Zm3.S, sve_i2_1920:1); } # fmul_z_zzi.xml: FMUL (indexed) variant Double-precision # PATTERN x64e02000/mask=xffe0fc00 :fmul Zd.D, Zn.D, Zm4.D[sve_i1_20] is sve_b_2431=0b01100100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_i1_20 & sve_zm_1619 & sve_b_1015=0b001000 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm4.D { Zd.D = SVE_fmul(Zd.D, Zn.D, Zm4.D, sve_i1_20:1); } # fmulx_z_p_zz.xml: FMULX variant SVE # PATTERN x650a8000/mask=xff3fe000 :fmulx Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b101 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fmulx(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # fneg_z_p_z.xml: FNEG variant SVE # PATTERN x041da000/mask=xff3fe000 :fneg Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_fneg(Zd.T, Pg3_m, Zn.T); } # fnmad_z_p_zzz.xml: FNMAD variant SVE # PATTERN x6520c000/mask=xff20e000 :fnmad Zd.T, Pg3_m, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_za_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zn.T & Zm.T & Pg3_m { Zd.T = SVE_fnmad(Zd.T, Pg3_m, Zn.T, Zm.T); } # fnmla_z_p_zzz.xml: FNMLA variant SVE # PATTERN x65204000/mask=xff20e000 :fnmla Zd.T, Pg3_m, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m { Zd.T = SVE_fnmla(Zd.T, Pg3_m, Zn.T, Zm.T); } # fnmls_z_p_zzz.xml: FNMLS variant SVE # PATTERN x65206000/mask=xff20e000 :fnmls Zd.T, Pg3_m, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m { Zd.T = SVE_fnmls(Zd.T, Pg3_m, Zn.T, Zm.T); } # fnmsb_z_p_zzz.xml: FNMSB variant SVE # PATTERN x6520e000/mask=xff20e000 :fnmsb Zd.T, Pg3_m, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=1 & sve_za_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zn.T & Zm.T & Pg3_m { Zd.T = SVE_fnmsb(Zd.T, Pg3_m, Zn.T, Zm.T); } # frecpe_z_z.xml: FRECPE variant SVE # PATTERN x650e3000/mask=xff3ffc00 :frecpe Zd.T, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b001 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T { Zd.T = SVE_frecpe(Zd.T, Zn.T); } # frecps_z_zz.xml: FRECPS variant SVE # PATTERN x65001800/mask=xff20fc00 :frecps Zd.T, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b11 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_frecps(Zd.T, Zn.T, Zm.T); } # frecpx_z_p_z.xml: FRECPX variant SVE # PATTERN x650ca000/mask=xff3fe000 :frecpx Zd.T, Pg3_m, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0011 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_frecpx(Zd.T, Pg3_m, Zn.T); } # frinta_z_p_z.xml: FRINT variant Current mode # PATTERN x6507a000/mask=xff3fe000 :frinti Zd.T, Pg3_m, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_frinti(Zd.T, Pg3_m, Zn.T); } # frinta_z_p_z.xml: FRINT variant Current mode signalling inexact # PATTERN x6506a000/mask=xff3fe000 :frintx Zd.T, Pg3_m, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_frintx(Zd.T, Pg3_m, Zn.T); } # frinta_z_p_z.xml: FRINT variant Nearest with ties to away # PATTERN x6504a000/mask=xff3fe000 :frinta Zd.T, Pg3_m, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_frinta(Zd.T, Pg3_m, Zn.T); } # frinta_z_p_z.xml: FRINT variant Nearest with ties to even # PATTERN x6500a000/mask=xff3fe000 :frintn Zd.T, Pg3_m, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_frintn(Zd.T, Pg3_m, Zn.T); } # frinta_z_p_z.xml: FRINT variant Toward zero # PATTERN x6503a000/mask=xff3fe000 :frintz Zd.T, Pg3_m, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_frintz(Zd.T, Pg3_m, Zn.T); } # frinta_z_p_z.xml: FRINT variant Toward minus infinity # PATTERN x6502a000/mask=xff3fe000 :frintm Zd.T, Pg3_m, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_frintm(Zd.T, Pg3_m, Zn.T); } # frinta_z_p_z.xml: FRINT variant Toward plus infinity # PATTERN x6501a000/mask=xff3fe000 :frintp Zd.T, Pg3_m, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_frintp(Zd.T, Pg3_m, Zn.T); } # frsqrte_z_z.xml: FRSQRTE variant SVE # PATTERN x650f3000/mask=xff3ffc00 :frsqrte Zd.T, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b001 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T { Zd.T = SVE_frsqrte(Zd.T, Zn.T); } # frsqrts_z_zz.xml: FRSQRTS variant SVE # PATTERN x65001c00/mask=xff20fc00 :frsqrts Zd.T, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b11 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_frsqrts(Zd.T, Zn.T, Zm.T); } # fscale_z_p_zz.xml: FSCALE variant SVE # PATTERN x65098000/mask=xff3fe000 :fscale Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b100 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fscale(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # fsqrt_z_p_z.xml: FSQRT variant SVE # PATTERN x650da000/mask=xff3fe000 :fsqrt Zd.T, Pg3_m, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1821=0b0011 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_fsqrt(Zd.T, Pg3_m, Zn.T); } # fsub_z_p_zs.xml: FSUB (immediate) variant SVE # PATTERN x65198000/mask=xff3fe3c0 :fsub Zd.T, Pg3_m, Zd.T_2, sve_float_0510 is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0510 & Zd.T & Zd.T_2 & Pg3_m { Zd.T = SVE_fsub(Zd.T, Pg3_m, Zd.T_2, sve_float_0510:1); } # fsub_z_p_zz.xml: FSUB (vectors, predicated) variant SVE # PATTERN x65018000/mask=xff3fe000 :fsub Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b000 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fsub(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # fsub_z_zz.xml: FSUB (vectors, unpredicated) variant SVE # PATTERN x65000400/mask=xff20fc00 :fsub Zd.T, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b00 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_fsub(Zd.T, Zn.T, Zm.T); } # fsubr_z_p_zs.xml: FSUBR (immediate) variant SVE # PATTERN x651b8000/mask=xff3fe3c0 :fsubr Zd.T, Pg3_m, Zd.T_2, sve_float_0510 is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_b_0609=0b0000 & sve_i1_05 & sve_zdn_0004 & sve_float_0510 & Zd.T & Zd.T_2 & Pg3_m { Zd.T = SVE_fsubr(Zd.T, Pg3_m, Zd.T_2, sve_float_0510:1); } # fsubr_z_p_zz.xml: FSUBR (vectors) variant SVE # PATTERN x65038000/mask=xff3fe000 :fsubr Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_2021=0b00 & sve_b_1719=0b001 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_fsubr(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # ftmad_z_zzi.xml: FTMAD variant SVE # PATTERN x65108000/mask=xff38fc00 :ftmad Zd.T, Zd.T_2, Zn.T, "#"^sve_imm3_1_0to7 is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_1921=0b010 & sve_imm3_1618 & sve_b_1015=0b100000 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & sve_imm3_1_0to7 { Zd.T = SVE_ftmad(Zd.T, Zd.T_2, Zn.T, sve_imm3_1_0to7:1); } # ftsmul_z_zz.xml: FTSMUL variant SVE # PATTERN x65000c00/mask=xff20fc00 :ftsmul Zd.T, Zn.T, Zm.T is sve_b_2431=0b01100101 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b01 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_ftsmul(Zd.T, Zn.T, Zm.T); } # ftssel_z_zz.xml: FTSSEL variant SVE # PATTERN x0420b000/mask=xff20fc00 :ftssel Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1115=0b10110 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_ftssel(Zd.T, Zn.T, Zm.T); } # incb_r_rs.xml: INCB, INCD, INCH, INCW (scalar) variant Byte # PATTERN x0430e000/mask=xfff0fc00 :incb Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_incb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # incb_r_rs.xml: INCB, INCD, INCH, INCW (scalar) variant Doubleword # PATTERN x04f0e000/mask=xfff0fc00 :incd Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_incd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # incb_r_rs.xml: INCB, INCD, INCH, INCW (scalar) variant Halfword # PATTERN x0470e000/mask=xfff0fc00 :inch Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_inch(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # incb_r_rs.xml: INCB, INCD, INCH, INCW (scalar) variant Word # PATTERN x04b0e000/mask=xfff0fc00 :incw Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11100 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_incw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # incd_z_zs.xml: INCD, INCH, INCW (vector) variant Doubleword # PATTERN x04f0c000/mask=xfff0fc00 :incd Zd.D^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11000 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.D & sve_imm4_1_1to16 & sve_mul_pattern { Zd.D = SVE_incd(Zd.D, sve_mul_pattern, sve_imm4_1_1to16:1); } # incd_z_zs.xml: INCD, INCH, INCW (vector) variant Halfword # PATTERN x0470c000/mask=xfff0fc00 :inch Zd.H^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11000 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.H & sve_imm4_1_1to16 & sve_mul_pattern { Zd.H = SVE_inch(Zd.H, sve_mul_pattern, sve_imm4_1_1to16:1); } # incd_z_zs.xml: INCD, INCH, INCW (vector) variant Word # PATTERN x04b0c000/mask=xfff0fc00 :incw Zd.S^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b11 & sve_imm4_1619 & sve_b_1115=0b11000 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.S & sve_imm4_1_1to16 & sve_mul_pattern { Zd.S = SVE_incw(Zd.S, sve_mul_pattern, sve_imm4_1_1to16:1); } # incp_r_p_r.xml: INCP (scalar) variant SVE # PATTERN x252c8800/mask=xff3ffe00 :incp Rd_GPR64, Pn.T is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1011 & sve_b_17=0 & sve_b_16=0 & sve_b_1115=0b10001 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR64 { Rd_GPR64 = SVE_incp(Rd_GPR64, Pn.T); } # incp_z_p_z.xml: INCP (vector) variant SVE # PATTERN x252c8000/mask=xff3ffe00 :incp Zd.T, Pn is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1011 & sve_b_17=0 & sve_b_16=0 & sve_b_1115=0b10000 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_zdn_0004 & Zd.T & Pn { Zd.T = SVE_incp(Zd.T, Pn); } # index_z_ii.xml: INDEX (immediates) variant SVE # PATTERN x04204000/mask=xff20fc00 :index Zd.T, "#"^sve_imm5s_0509, "#"^sve_imm5b_1620 is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_imm5b_1620 & sve_b_1015=0b010000 & sve_imm5_0509 & sve_zd_0004 & Zd.T & sve_imm5s_0509 { Zd.T = SVE_index(Zd.T, sve_imm5s_0509:1, sve_imm5b_1620:1); } # index_z_ir.xml: INDEX (immediate, scalar) variant SVE # PATTERN x04204800/mask=xff20fc00 :index Zd.T, "#"^sve_imm5_1_m16to15, Rm_GPR64 is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b010010 & sve_imm5_0509 & sve_zd_0004 & Zd.T & Rm_GPR64 & sve_imm5_1_m16to15 { Zd.T = SVE_index(Zd.T, sve_imm5_1_m16to15:1, Rm_GPR64); } # index_z_ir.xml: INDEX (immediate, scalar) variant SVE # PATTERN x04204800/mask=xff20fc00 :index Zd.T, "#"^sve_imm5_1_m16to15, Rm_GPR32 is sve_b_2431=0b00000100 & (b_23=0 | b_22=0) & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b010010 & sve_imm5_0509 & sve_zd_0004 & Zd.T & Rm_GPR32 & sve_imm5_1_m16to15 { Zd.T = SVE_index(Zd.T, sve_imm5_1_m16to15:1, Rm_GPR32); } # index_z_ri.xml: INDEX (scalar, immediate) variant SVE # PATTERN x04204400/mask=xff20fc00 :index Zd.T, Rn_GPR64, "#"^sve_imm5_1_m16to15 is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_21=1 & sve_imm5_1620 & sve_b_1015=0b010001 & sve_rn_0509 & sve_zd_0004 & Zd.T & Rn_GPR64 & sve_imm5_1_m16to15 { Zd.T = SVE_index(Zd.T, Rn_GPR64, sve_imm5_1_m16to15:1); } # index_z_ri.xml: INDEX (scalar, immediate) variant SVE # PATTERN x04204400/mask=xff20fc00 :index Zd.T, Rn_GPR32, "#"^sve_imm5_1_m16to15 is sve_b_2431=0b00000100 & (b_23=0 | b_22=0) & sve_b_21=1 & sve_imm5_1620 & sve_b_1015=0b010001 & sve_rn_0509 & sve_zd_0004 & Zd.T & Rn_GPR32 & sve_imm5_1_m16to15 { Zd.T = SVE_index(Zd.T, Rn_GPR32, sve_imm5_1_m16to15:1); } # index_z_rr.xml: INDEX (scalars) variant SVE # PATTERN x04204c00/mask=xff20fc00 :index Zd.T, Rn_GPR64, Rm_GPR64 is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b010011 & sve_rn_0509 & sve_zd_0004 & Zd.T & Rn_GPR64 & Rm_GPR64 { Zd.T = SVE_index(Zd.T, Rn_GPR64, Rm_GPR64); } # index_z_rr.xml: INDEX (scalars) variant SVE # PATTERN x04204c00/mask=xff20fc00 :index Zd.T, Rn_GPR32, Rm_GPR32 is sve_b_2431=0b00000100 & (b_23=0 | b_22=0) & sve_b_21=1 & sve_rm_1620 & sve_b_1015=0b010011 & sve_rn_0509 & sve_zd_0004 & Zd.T & Rn_GPR32 & Rm_GPR32 { Zd.T = SVE_index(Zd.T, Rn_GPR32, Rm_GPR32); } # insr_z_r.xml: INSR (scalar) variant SVE # PATTERN x05243800/mask=xff3ffc00 :insr Zd.T, Rn_GPR64 is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1021=0b100100001110 & sve_rm_0509 & sve_zdn_0004 & Zd.T & Rn_GPR64 { Zd.T = SVE_insr(Zd.T, Rn_GPR64); } # insr_z_r.xml: INSR (scalar) variant SVE # PATTERN x05243800/mask=xff3ffc00 :insr Zd.T, Rn_GPR32 is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1021=0b100100001110 & sve_rm_0509 & sve_zdn_0004 & Zd.T & Rn_GPR32 { Zd.T = SVE_insr(Zd.T, Rn_GPR32); } # insr_z_v.xml: INSR (SIMD&FP scalar) variant SVE # PATTERN x05343800/mask=xff3ffc00 :insr Zd.T, Rn_FPR8 is sve_b_2431=0b00000101 & sve_size_2223=0b00 & sve_b_1021=0b110100001110 & sve_vm_0509 & sve_zdn_0004 & Zd.T & Rn_FPR8 { Zd.T = SVE_insr(Zd.T, Rn_FPR8); } # insr_z_v.xml: INSR (SIMD&FP scalar) variant SVE # PATTERN x05343800/mask=xff3ffc00 :insr Zd.T, Rn_FPR32 is sve_b_2431=0b00000101 & sve_size_2223=0b10 & sve_b_1021=0b110100001110 & sve_vm_0509 & sve_zdn_0004 & Zd.T & Rn_FPR32 { Zd.T = SVE_insr(Zd.T, Rn_FPR32); } # insr_z_v.xml: INSR (SIMD&FP scalar) variant SVE # PATTERN x05343800/mask=xff3ffc00 :insr Zd.T, Rn_FPR16 is sve_b_2431=0b00000101 & sve_size_2223=0b01 & sve_b_1021=0b110100001110 & sve_vm_0509 & sve_zdn_0004 & Zd.T & Rn_FPR16 { Zd.T = SVE_insr(Zd.T, Rn_FPR16); } # insr_z_v.xml: INSR (SIMD&FP scalar) variant SVE # PATTERN x05343800/mask=xff3ffc00 :insr Zd.T, Rn_FPR64 is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1021=0b110100001110 & sve_vm_0509 & sve_zdn_0004 & Zd.T & Rn_FPR64 { Zd.T = SVE_insr(Zd.T, Rn_FPR64); } # lasta_r_p_z.xml: LASTA (scalar) variant SVE # PATTERN x0520a000/mask=xff3fe000 :lasta Rd_GPR64, Pg3, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b10000 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_rd_0004 & Zn.T & Rd_GPR64 & Pg3 { Rd_GPR64 = SVE_lasta(Rd_GPR64, Pg3, Zn.T); } # lasta_r_p_z.xml: LASTA (scalar) variant SVE # PATTERN x0520a000/mask=xff3fe000 :lasta Rd_GPR32, Pg3, Zn.T is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1721=0b10000 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_rd_0004 & Zn.T & Rd_GPR32 & Pg3 { Rd_GPR32 = SVE_lasta(Rd_GPR32, Pg3, Zn.T); } # lasta_v_p_z.xml: LASTA (SIMD&FP scalar) variant SVE # PATTERN x05228000/mask=xff3fe000 :lasta Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b00 & sve_b_1721=0b10001 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_lasta(Rd_FPR8, Pg3, Zn.T); } # lasta_v_p_z.xml: LASTA (SIMD&FP scalar) variant SVE # PATTERN x05228000/mask=xff3fe000 :lasta Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b10 & sve_b_1721=0b10001 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_lasta(Rd_FPR32, Pg3, Zn.T); } # lasta_v_p_z.xml: LASTA (SIMD&FP scalar) variant SVE # PATTERN x05228000/mask=xff3fe000 :lasta Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b01 & sve_b_1721=0b10001 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_lasta(Rd_FPR16, Pg3, Zn.T); } # lasta_v_p_z.xml: LASTA (SIMD&FP scalar) variant SVE # PATTERN x05228000/mask=xff3fe000 :lasta Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b10001 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_lasta(Rd_FPR64, Pg3, Zn.T); } # lastb_r_p_z.xml: LASTB (scalar) variant SVE # PATTERN x0521a000/mask=xff3fe000 :lastb Rd_GPR64, Pg3, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b10000 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_rd_0004 & Zn.T & Rd_GPR64 & Pg3 { Rd_GPR64 = SVE_lastb(Rd_GPR64, Pg3, Zn.T); } # lastb_r_p_z.xml: LASTB (scalar) variant SVE # PATTERN x0521a000/mask=xff3fe000 :lastb Rd_GPR32, Pg3, Zn.T is sve_b_2431=0b00000101 & (b_23=0 | b_22=0) & sve_b_1721=0b10000 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_rd_0004 & Zn.T & Rd_GPR32 & Pg3 { Rd_GPR32 = SVE_lastb(Rd_GPR32, Pg3, Zn.T); } # lastb_v_p_z.xml: LASTB (SIMD&FP scalar) variant SVE # PATTERN x05238000/mask=xff3fe000 :lastb Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b00 & sve_b_1721=0b10001 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_lastb(Rd_FPR8, Pg3, Zn.T); } # lastb_v_p_z.xml: LASTB (SIMD&FP scalar) variant SVE # PATTERN x05238000/mask=xff3fe000 :lastb Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b10 & sve_b_1721=0b10001 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_lastb(Rd_FPR32, Pg3, Zn.T); } # lastb_v_p_z.xml: LASTB (SIMD&FP scalar) variant SVE # PATTERN x05238000/mask=xff3fe000 :lastb Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b01 & sve_b_1721=0b10001 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_lastb(Rd_FPR16, Pg3, Zn.T); } # lastb_v_p_z.xml: LASTB (SIMD&FP scalar) variant SVE # PATTERN x05238000/mask=xff3fe000 :lastb Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b00000101 & sve_size_2223=0b11 & sve_b_1721=0b10001 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_lastb(Rd_FPR64, Pg3, Zn.T); } # ld1b_z_p_ai.xml: LD1B (vector plus immediate) variant 32-bit element # PATTERN x8420c000/mask=xffe0e000 :ld1b "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to31] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to31 { Zd.S = SVE_ld1b(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to31); } # ld1b_z_p_ai.xml: LD1B (vector plus immediate) variant 64-bit element # PATTERN xc420c000/mask=xffe0e000 :ld1b "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to31] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to31 { Zd.D = SVE_ld1b(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to31); } # ld1b_z_p_bi.xml: LD1B (scalar plus immediate) variant 8-bit element # PATTERN xa400a000/mask=xfff0e000 :ld1b "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & sve_mul4_1_m8to7 { Zd.B = SVE_ld1b(Zd.B, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1b_z_p_bi.xml: LD1B (scalar plus immediate) variant 16-bit element # PATTERN xa420a000/mask=xfff0e000 :ld1b "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 { Zd.H = SVE_ld1b(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1b_z_p_bi.xml: LD1B (scalar plus immediate) variant 32-bit element # PATTERN xa440a000/mask=xfff0e000 :ld1b "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 { Zd.S = SVE_ld1b(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1b_z_p_bi.xml: LD1B (scalar plus immediate) variant 64-bit element # PATTERN xa460a000/mask=xfff0e000 :ld1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ld1b(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1b_z_p_br.xml: LD1B (scalar plus scalar) variant 8-bit element # PATTERN xa4004000/mask=xffe0e000 :ld1b "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & Rm_GPR64 { Zd.B = SVE_ld1b(Zd.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1b_z_p_br.xml: LD1B (scalar plus scalar) variant 16-bit element # PATTERN xa4204000/mask=xffe0e000 :ld1b "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 { Zd.H = SVE_ld1b(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1b_z_p_br.xml: LD1B (scalar plus scalar) variant 32-bit element # PATTERN xa4404000/mask=xffe0e000 :ld1b "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 { Zd.S = SVE_ld1b(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1b_z_p_br.xml: LD1B (scalar plus scalar) variant 64-bit element # PATTERN xa4604000/mask=xffe0e000 :ld1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ld1b(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1b_z_p_bz.xml: LD1B (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc4004000/mask=xffa0e000 :ld1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ld1b(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ld1b_z_p_bz.xml: LD1B (scalar plus vector) variant 32-bit unscaled offset # PATTERN x84004000/mask=xffa0e000 :ld1b "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ld1b(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ld1b_z_p_bz.xml: LD1B (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc440c000/mask=xffe0e000 :ld1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ld1b(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ld1d_z_p_ai.xml: LD1D (vector plus immediate) variant SVE # PATTERN xc5a0c000/mask=xffe0e000 :ld1d "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to248] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to248 { Zd.D = SVE_ld1d(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to248); } # ld1d_z_p_bi.xml: LD1D (scalar plus immediate) variant SVE # PATTERN xa5e0a000/mask=xfff0e000 :ld1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ld1d(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1d_z_p_br.xml: LD1D (scalar plus scalar) variant SVE # PATTERN xa5e04000/mask=xffe0e000 :ld1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ld1d(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1d_z_p_bz.xml: LD1D (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc5a04000/mask=xffa0e000 :ld1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #3"] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ld1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ld1d_z_p_bz.xml: LD1D (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc5804000/mask=xffa0e000 :ld1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ld1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ld1d_z_p_bz.xml: LD1D (scalar plus vector) variant 64-bit scaled offset # PATTERN xc5e0c000/mask=xffe0e000 :ld1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #3"] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ld1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ld1d_z_p_bz.xml: LD1D (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc5c0c000/mask=xffe0e000 :ld1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ld1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ld1h_z_p_ai.xml: LD1H (vector plus immediate) variant 32-bit element # PATTERN x84a0c000/mask=xffe0e000 :ld1h "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to62] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to62 { Zd.S = SVE_ld1h(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to62); } # ld1h_z_p_ai.xml: LD1H (vector plus immediate) variant 64-bit element # PATTERN xc4a0c000/mask=xffe0e000 :ld1h "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to62] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to62 { Zd.D = SVE_ld1h(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to62); } # ld1h_z_p_bi.xml: LD1H (scalar plus immediate) variant 16-bit element # PATTERN xa4a0a000/mask=xfff0e000 :ld1h "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 { Zd.H = SVE_ld1h(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1h_z_p_bi.xml: LD1H (scalar plus immediate) variant 32-bit element # PATTERN xa4c0a000/mask=xfff0e000 :ld1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 { Zd.S = SVE_ld1h(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1h_z_p_bi.xml: LD1H (scalar plus immediate) variant 64-bit element # PATTERN xa4e0a000/mask=xfff0e000 :ld1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ld1h(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1h_z_p_br.xml: LD1H (scalar plus scalar) variant 16-bit element # PATTERN xa4a04000/mask=xffe0e000 :ld1h "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 { Zd.H = SVE_ld1h(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1h_z_p_br.xml: LD1H (scalar plus scalar) variant 32-bit element # PATTERN xa4c04000/mask=xffe0e000 :ld1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 { Zd.S = SVE_ld1h(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1h_z_p_br.xml: LD1H (scalar plus scalar) variant 64-bit element # PATTERN xa4e04000/mask=xffe0e000 :ld1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ld1h(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1h_z_p_bz.xml: LD1H (scalar plus vector) variant 32-bit scaled offset # PATTERN x84a04000/mask=xffa0e000 :ld1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod^" #1"] is sve_b_2331=0b100001001 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ld1h(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ld1h_z_p_bz.xml: LD1H (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc4a04000/mask=xffa0e000 :ld1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #1"] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ld1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ld1h_z_p_bz.xml: LD1H (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc4804000/mask=xffa0e000 :ld1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ld1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ld1h_z_p_bz.xml: LD1H (scalar plus vector) variant 32-bit unscaled offset # PATTERN x84804000/mask=xffa0e000 :ld1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ld1h(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ld1h_z_p_bz.xml: LD1H (scalar plus vector) variant 64-bit scaled offset # PATTERN xc4e0c000/mask=xffe0e000 :ld1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #1"] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ld1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ld1h_z_p_bz.xml: LD1H (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc4c0c000/mask=xffe0e000 :ld1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ld1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ld1rb_z_p_bi.xml: LD1RB variant 8-bit element # PATTERN x84408000/mask=xffc0e000 :ld1rb "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & sve_opt6_1_0to63 { Zd.B = SVE_ld1rb(Zd.B, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); } # ld1rb_z_p_bi.xml: LD1RB variant 16-bit element # PATTERN x8440a000/mask=xffc0e000 :ld1rb "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_opt6_1_0to63 { Zd.H = SVE_ld1rb(Zd.H, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); } # ld1rb_z_p_bi.xml: LD1RB variant 32-bit element # PATTERN x8440c000/mask=xffc0e000 :ld1rb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_opt6_1_0to63 { Zd.S = SVE_ld1rb(Zd.S, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); } # ld1rb_z_p_bi.xml: LD1RB variant 64-bit element # PATTERN x8440e000/mask=xffc0e000 :ld1rb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to63 { Zd.D = SVE_ld1rb(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); } # ld1rd_z_p_bi.xml: LD1RD variant SVE # PATTERN x85c0e000/mask=xffc0e000 :ld1rd "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to504] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to504 { Zd.D = SVE_ld1rd(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to504); } # ld1rh_z_p_bi.xml: LD1RH variant 16-bit element # PATTERN x84c0a000/mask=xffc0e000 :ld1rh "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to126] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_opt6_1_0to126 { Zd.H = SVE_ld1rh(Zd.H, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to126); } # ld1rh_z_p_bi.xml: LD1RH variant 32-bit element # PATTERN x84c0c000/mask=xffc0e000 :ld1rh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to126] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_opt6_1_0to126 { Zd.S = SVE_ld1rh(Zd.S, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to126); } # ld1rh_z_p_bi.xml: LD1RH variant 64-bit element # PATTERN x84c0e000/mask=xffc0e000 :ld1rh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to126] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to126 { Zd.D = SVE_ld1rh(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to126); } # ld1rqb_z_p_bi.xml: LD1RQB (scalar plus immediate) variant SVE # PATTERN xa4002000/mask=xfff0e000 :ld1rqb "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp^sve_opt4_1_m128to112] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b001 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & sve_opt4_1_m128to112 { Zd.B = SVE_ld1rqb(Zd.B, Pg3_z, Rn_GPR64xsp, sve_opt4_1_m128to112); } # ld1rqb_z_p_br.xml: LD1RQB (scalar plus scalar) variant SVE # PATTERN xa4000000/mask=xffe0e000 :ld1rqb "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b000 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & Rm_GPR64 { Zd.B = SVE_ld1rqb(Zd.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1rqd_z_p_bi.xml: LD1RQD (scalar plus immediate) variant SVE # PATTERN xa5802000/mask=xfff0e000 :ld1rqd "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt4_1_m128to112] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b001 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt4_1_m128to112 { Zd.D = SVE_ld1rqd(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt4_1_m128to112); } # ld1rqd_z_p_br.xml: LD1RQD (scalar plus scalar) variant SVE # PATTERN xa5800000/mask=xffe0e000 :ld1rqd "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b000 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ld1rqd(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1rqh_z_p_bi.xml: LD1RQH (scalar plus immediate) variant SVE # PATTERN xa4802000/mask=xfff0e000 :ld1rqh "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_opt4_1_m128to112] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b001 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_opt4_1_m128to112 { Zd.H = SVE_ld1rqh(Zd.H, Pg3_z, Rn_GPR64xsp, sve_opt4_1_m128to112); } # ld1rqh_z_p_br.xml: LD1RQH (scalar plus scalar) variant SVE # PATTERN xa4800000/mask=xffe0e000 :ld1rqh "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b000 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 { Zd.H = SVE_ld1rqh(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1rqw_z_p_bi.xml: LD1RQW (scalar plus immediate) variant SVE # PATTERN xa5002000/mask=xfff0e000 :ld1rqw "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_opt4_1_m128to112] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b001 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_opt4_1_m128to112 { Zd.S = SVE_ld1rqw(Zd.S, Pg3_z, Rn_GPR64xsp, sve_opt4_1_m128to112); } # ld1rqw_z_p_br.xml: LD1RQW (scalar plus scalar) variant SVE # PATTERN xa5000000/mask=xffe0e000 :ld1rqw "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b000 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 { Zd.S = SVE_ld1rqw(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1rsb_z_p_bi.xml: LD1RSB variant 16-bit element # PATTERN x85c0c000/mask=xffc0e000 :ld1rsb "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_opt6_1_0to63 { Zd.H = SVE_ld1rsb(Zd.H, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); } # ld1rsb_z_p_bi.xml: LD1RSB variant 32-bit element # PATTERN x85c0a000/mask=xffc0e000 :ld1rsb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_opt6_1_0to63 { Zd.S = SVE_ld1rsb(Zd.S, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); } # ld1rsb_z_p_bi.xml: LD1RSB variant 64-bit element # PATTERN x85c08000/mask=xffc0e000 :ld1rsb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to63] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to63 { Zd.D = SVE_ld1rsb(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to63); } # ld1rsh_z_p_bi.xml: LD1RSH variant 32-bit element # PATTERN x8540a000/mask=xffc0e000 :ld1rsh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to126] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_opt6_1_0to126 { Zd.S = SVE_ld1rsh(Zd.S, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to126); } # ld1rsh_z_p_bi.xml: LD1RSH variant 64-bit element # PATTERN x85408000/mask=xffc0e000 :ld1rsh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to126] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to126 { Zd.D = SVE_ld1rsh(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to126); } # ld1rsw_z_p_bi.xml: LD1RSW variant SVE # PATTERN x84c08000/mask=xffc0e000 :ld1rsw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to252] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to252 { Zd.D = SVE_ld1rsw(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to252); } # ld1rw_z_p_bi.xml: LD1RW variant 32-bit element # PATTERN x8540c000/mask=xffc0e000 :ld1rw "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to252] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_opt6_1_0to252 { Zd.S = SVE_ld1rw(Zd.S, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to252); } # ld1rw_z_p_bi.xml: LD1RW variant 64-bit element # PATTERN x8540e000/mask=xffc0e000 :ld1rw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_opt6_1_0to252] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_22=1 & sve_imm6_1621 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_opt6_1_0to252 { Zd.D = SVE_ld1rw(Zd.D, Pg3_z, Rn_GPR64xsp, sve_opt6_1_0to252); } # ld1sb_z_p_ai.xml: LD1SB (vector plus immediate) variant 32-bit element # PATTERN x84208000/mask=xffe0e000 :ld1sb "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to31] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to31 { Zd.S = SVE_ld1sb(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to31); } # ld1sb_z_p_ai.xml: LD1SB (vector plus immediate) variant 64-bit element # PATTERN xc4208000/mask=xffe0e000 :ld1sb "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to31] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to31 { Zd.D = SVE_ld1sb(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to31); } # ld1sb_z_p_bi.xml: LD1SB (scalar plus immediate) variant 16-bit element # PATTERN xa5c0a000/mask=xfff0e000 :ld1sb "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 { Zd.H = SVE_ld1sb(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1sb_z_p_bi.xml: LD1SB (scalar plus immediate) variant 32-bit element # PATTERN xa5a0a000/mask=xfff0e000 :ld1sb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 { Zd.S = SVE_ld1sb(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1sb_z_p_bi.xml: LD1SB (scalar plus immediate) variant 64-bit element # PATTERN xa580a000/mask=xfff0e000 :ld1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ld1sb(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1sb_z_p_br.xml: LD1SB (scalar plus scalar) variant 16-bit element # PATTERN xa5c04000/mask=xffe0e000 :ld1sb "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 { Zd.H = SVE_ld1sb(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1sb_z_p_br.xml: LD1SB (scalar plus scalar) variant 32-bit element # PATTERN xa5a04000/mask=xffe0e000 :ld1sb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 { Zd.S = SVE_ld1sb(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1sb_z_p_br.xml: LD1SB (scalar plus scalar) variant 64-bit element # PATTERN xa5804000/mask=xffe0e000 :ld1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ld1sb(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1sb_z_p_bz.xml: LD1SB (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc4000000/mask=xffa0e000 :ld1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ld1sb(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ld1sb_z_p_bz.xml: LD1SB (scalar plus vector) variant 32-bit unscaled offset # PATTERN x84000000/mask=xffa0e000 :ld1sb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ld1sb(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ld1sb_z_p_bz.xml: LD1SB (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc4408000/mask=xffe0e000 :ld1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ld1sb(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ld1sh_z_p_ai.xml: LD1SH (vector plus immediate) variant 32-bit element # PATTERN x84a08000/mask=xffe0e000 :ld1sh "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to62] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to62 { Zd.S = SVE_ld1sh(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to62); } # ld1sh_z_p_ai.xml: LD1SH (vector plus immediate) variant 64-bit element # PATTERN xc4a08000/mask=xffe0e000 :ld1sh "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to62] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to62 { Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to62); } # ld1sh_z_p_bi.xml: LD1SH (scalar plus immediate) variant 32-bit element # PATTERN xa520a000/mask=xfff0e000 :ld1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 { Zd.S = SVE_ld1sh(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1sh_z_p_bi.xml: LD1SH (scalar plus immediate) variant 64-bit element # PATTERN xa500a000/mask=xfff0e000 :ld1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1sh_z_p_br.xml: LD1SH (scalar plus scalar) variant 32-bit element # PATTERN xa5204000/mask=xffe0e000 :ld1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 { Zd.S = SVE_ld1sh(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1sh_z_p_br.xml: LD1SH (scalar plus scalar) variant 64-bit element # PATTERN xa5004000/mask=xffe0e000 :ld1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1sh_z_p_bz.xml: LD1SH (scalar plus vector) variant 32-bit scaled offset # PATTERN x84a00000/mask=xffa0e000 :ld1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod^" #1"] is sve_b_2331=0b100001001 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ld1sh(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ld1sh_z_p_bz.xml: LD1SH (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc4a00000/mask=xffa0e000 :ld1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #1"] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ld1sh_z_p_bz.xml: LD1SH (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc4800000/mask=xffa0e000 :ld1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ld1sh_z_p_bz.xml: LD1SH (scalar plus vector) variant 32-bit unscaled offset # PATTERN x84800000/mask=xffa0e000 :ld1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ld1sh(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ld1sh_z_p_bz.xml: LD1SH (scalar plus vector) variant 64-bit scaled offset # PATTERN xc4e08000/mask=xffe0e000 :ld1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #1"] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ld1sh_z_p_bz.xml: LD1SH (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc4c08000/mask=xffe0e000 :ld1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ld1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ld1sw_z_p_ai.xml: LD1SW (vector plus immediate) variant SVE # PATTERN xc5208000/mask=xffe0e000 :ld1sw "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to124] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to124 { Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to124); } # ld1sw_z_p_bi.xml: LD1SW (scalar plus immediate) variant SVE # PATTERN xa480a000/mask=xfff0e000 :ld1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1sw_z_p_br.xml: LD1SW (scalar plus scalar) variant SVE # PATTERN xa4804000/mask=xffe0e000 :ld1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1sw_z_p_bz.xml: LD1SW (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc5200000/mask=xffa0e000 :ld1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #2"] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ld1sw_z_p_bz.xml: LD1SW (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc5000000/mask=xffa0e000 :ld1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ld1sw_z_p_bz.xml: LD1SW (scalar plus vector) variant 64-bit scaled offset # PATTERN xc5608000/mask=xffe0e000 :ld1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #2"] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ld1sw_z_p_bz.xml: LD1SW (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc5408000/mask=xffe0e000 :ld1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ld1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ld1w_z_p_ai.xml: LD1W (vector plus immediate) variant 32-bit element # PATTERN x8520c000/mask=xffe0e000 :ld1w "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to124] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to124 { Zd.S = SVE_ld1w(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to124); } # ld1w_z_p_ai.xml: LD1W (vector plus immediate) variant 64-bit element # PATTERN xc520c000/mask=xffe0e000 :ld1w "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to124] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to124 { Zd.D = SVE_ld1w(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to124); } # ld1w_z_p_bi.xml: LD1W (scalar plus immediate) variant 32-bit element # PATTERN xa540a000/mask=xfff0e000 :ld1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=0 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 { Zd.S = SVE_ld1w(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1w_z_p_bi.xml: LD1W (scalar plus immediate) variant 64-bit element # PATTERN xa560a000/mask=xfff0e000 :ld1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ld1w(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ld1w_z_p_br.xml: LD1W (scalar plus scalar) variant 32-bit element # PATTERN xa5404000/mask=xffe0e000 :ld1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 { Zd.S = SVE_ld1w(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1w_z_p_br.xml: LD1W (scalar plus scalar) variant 64-bit element # PATTERN xa5604000/mask=xffe0e000 :ld1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ld1w(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld1w_z_p_bz.xml: LD1W (scalar plus vector) variant 32-bit scaled offset # PATTERN x85204000/mask=xffa0e000 :ld1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod^" #2"] is sve_b_2331=0b100001010 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ld1w(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ld1w_z_p_bz.xml: LD1W (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc5204000/mask=xffa0e000 :ld1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #2"] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ld1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ld1w_z_p_bz.xml: LD1W (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc5004000/mask=xffa0e000 :ld1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ld1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ld1w_z_p_bz.xml: LD1W (scalar plus vector) variant 32-bit unscaled offset # PATTERN x85004000/mask=xffa0e000 :ld1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ld1w(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ld1w_z_p_bz.xml: LD1W (scalar plus vector) variant 64-bit scaled offset # PATTERN xc560c000/mask=xffe0e000 :ld1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #2"] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ld1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ld1w_z_p_bz.xml: LD1W (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc540c000/mask=xffe0e000 :ld1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ld1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ld2b_z_p_bi.xml: LD2B (scalar plus immediate) variant SVE # PATTERN xa420e000/mask=xfff0e000 :ld2b "{"^Zt.B, Ztt.B^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m16to14] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zt.B & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m16to14 { Zt.B = SVE_ld2b(Zt.B, Ztt.B, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m16to14); } # ld2b_z_p_br.xml: LD2B (scalar plus scalar) variant SVE # PATTERN xa420c000/mask=xffe0e000 :ld2b "{"^Zt.B, Ztt.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zt.B & Rn_GPR64xsp & Pg3_z & Rm_GPR64 { Zt.B = SVE_ld2b(Zt.B, Ztt.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld2d_z_p_bi.xml: LD2D (scalar plus immediate) variant SVE # PATTERN xa5a0e000/mask=xfff0e000 :ld2d "{"^Zt.D, Ztt.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m16to14] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zt.D & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m16to14 { Zt.D = SVE_ld2d(Zt.D, Ztt.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m16to14); } # ld2d_z_p_br.xml: LD2D (scalar plus scalar) variant SVE # PATTERN xa5a0c000/mask=xffe0e000 :ld2d "{"^Zt.D, Ztt.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zt.D & Rn_GPR64xsp & Pg3_z & Rm_GPR64 { Zt.D = SVE_ld2d(Zt.D, Ztt.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld2h_z_p_bi.xml: LD2H (scalar plus immediate) variant SVE # PATTERN xa4a0e000/mask=xfff0e000 :ld2h "{"^Zt.H, Ztt.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m16to14] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zt.H & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m16to14 { Zt.H = SVE_ld2h(Zt.H, Ztt.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m16to14); } # ld2h_z_p_br.xml: LD2H (scalar plus scalar) variant SVE # PATTERN xa4a0c000/mask=xffe0e000 :ld2h "{"^Zt.H, Ztt.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zt.H & Rn_GPR64xsp & Pg3_z & Rm_GPR64 { Zt.H = SVE_ld2h(Zt.H, Ztt.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld2w_z_p_bi.xml: LD2W (scalar plus immediate) variant SVE # PATTERN xa520e000/mask=xfff0e000 :ld2w "{"^Zt.S, Ztt.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m16to14] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m16to14 { Zt.S = SVE_ld2w(Zt.S, Ztt.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m16to14); } # ld2w_z_p_br.xml: LD2W (scalar plus scalar) variant SVE # PATTERN xa520c000/mask=xffe0e000 :ld2w "{"^Zt.S, Ztt.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Rn_GPR64xsp & Pg3_z & Rm_GPR64 { Zt.S = SVE_ld2w(Zt.S, Ztt.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld3b_z_p_bi.xml: LD3B (scalar plus immediate) variant SVE # PATTERN xa440e000/mask=xfff0e000 :ld3b "{"^Zt.B, Ztt.B, Zttt.B^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m24to21] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m24to21 { Zt.B = SVE_ld3b(Zt.B, Ztt.B, Zttt.B, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m24to21); } # ld3b_z_p_br.xml: LD3B (scalar plus scalar) variant SVE # PATTERN xa440c000/mask=xffe0e000 :ld3b "{"^Zt.B, Ztt.B, Zttt.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Rn_GPR64xsp & Pg3_z & Rm_GPR64 { Zt.B = SVE_ld3b(Zt.B, Ztt.B, Zttt.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld3d_z_p_bi.xml: LD3D (scalar plus immediate) variant SVE # PATTERN xa5c0e000/mask=xfff0e000 :ld3d "{"^Zt.D, Ztt.D, Zttt.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m24to21] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m24to21 { Zt.D = SVE_ld3d(Zt.D, Ztt.D, Zttt.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m24to21); } # ld3d_z_p_br.xml: LD3D (scalar plus scalar) variant SVE # PATTERN xa5c0c000/mask=xffe0e000 :ld3d "{"^Zt.D, Ztt.D, Zttt.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Rn_GPR64xsp & Pg3_z & Rm_GPR64 { Zt.D = SVE_ld3d(Zt.D, Ztt.D, Zttt.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld3h_z_p_bi.xml: LD3H (scalar plus immediate) variant SVE # PATTERN xa4c0e000/mask=xfff0e000 :ld3h "{"^Zt.H, Ztt.H, Zttt.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m24to21] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m24to21 { Zt.H = SVE_ld3h(Zt.H, Ztt.H, Zttt.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m24to21); } # ld3h_z_p_br.xml: LD3H (scalar plus scalar) variant SVE # PATTERN xa4c0c000/mask=xffe0e000 :ld3h "{"^Zt.H, Ztt.H, Zttt.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Rn_GPR64xsp & Pg3_z & Rm_GPR64 { Zt.H = SVE_ld3h(Zt.H, Ztt.H, Zttt.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld3w_z_p_bi.xml: LD3W (scalar plus immediate) variant SVE # PATTERN xa540e000/mask=xfff0e000 :ld3w "{"^Zt.S, Ztt.S, Zttt.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m24to21] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m24to21 { Zt.S = SVE_ld3w(Zt.S, Ztt.S, Zttt.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m24to21); } # ld3w_z_p_br.xml: LD3W (scalar plus scalar) variant SVE # PATTERN xa540c000/mask=xffe0e000 :ld3w "{"^Zt.S, Ztt.S, Zttt.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Rn_GPR64xsp & Pg3_z & Rm_GPR64 { Zt.S = SVE_ld3w(Zt.S, Ztt.S, Zttt.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld4b_z_p_bi.xml: LD4B (scalar plus immediate) variant SVE # PATTERN xa460e000/mask=xfff0e000 :ld4b "{"^Zt.B, Ztt.B, Zttt.B, Ztttt.B^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m32to28] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b11 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Ztttt.B & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m32to28 { Zt.B = SVE_ld4b(Zt.B, Ztt.B, Zttt.B, Ztttt.B, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m32to28); } # ld4b_z_p_br.xml: LD4B (scalar plus scalar) variant SVE # PATTERN xa460c000/mask=xffe0e000 :ld4b "{"^Zt.B, Ztt.B, Zttt.B, Ztttt.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Ztttt.B & Rn_GPR64xsp & Pg3_z & Rm_GPR64 { Zt.B = SVE_ld4b(Zt.B, Ztt.B, Zttt.B, Ztttt.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld4d_z_p_bi.xml: LD4D (scalar plus immediate) variant SVE # PATTERN xa5e0e000/mask=xfff0e000 :ld4d "{"^Zt.D, Ztt.D, Zttt.D, Ztttt.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m32to28] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b11 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Ztttt.D & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m32to28 { Zt.D = SVE_ld4d(Zt.D, Ztt.D, Zttt.D, Ztttt.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m32to28); } # ld4d_z_p_br.xml: LD4D (scalar plus scalar) variant SVE # PATTERN xa5e0c000/mask=xffe0e000 :ld4d "{"^Zt.D, Ztt.D, Zttt.D, Ztttt.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Ztttt.D & Rn_GPR64xsp & Pg3_z & Rm_GPR64 { Zt.D = SVE_ld4d(Zt.D, Ztt.D, Zttt.D, Ztttt.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld4h_z_p_bi.xml: LD4H (scalar plus immediate) variant SVE # PATTERN xa4e0e000/mask=xfff0e000 :ld4h "{"^Zt.H, Ztt.H, Zttt.H, Ztttt.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m32to28] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Ztttt.H & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m32to28 { Zt.H = SVE_ld4h(Zt.H, Ztt.H, Zttt.H, Ztttt.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m32to28); } # ld4h_z_p_br.xml: LD4H (scalar plus scalar) variant SVE # PATTERN xa4e0c000/mask=xffe0e000 :ld4h "{"^Zt.H, Ztt.H, Zttt.H, Ztttt.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Ztttt.H & Rn_GPR64xsp & Pg3_z & Rm_GPR64 { Zt.H = SVE_ld4h(Zt.H, Ztt.H, Zttt.H, Ztttt.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ld4w_z_p_bi.xml: LD4W (scalar plus immediate) variant SVE # PATTERN xa560e000/mask=xfff0e000 :ld4w "{"^Zt.S, Ztt.S, Zttt.S, Ztttt.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m32to28] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Ztttt.S & Rn_GPR64xsp & Pg3_z & sve_mul4_1_m32to28 { Zt.S = SVE_ld4w(Zt.S, Ztt.S, Zttt.S, Ztttt.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m32to28); } # ld4w_z_p_br.xml: LD4W (scalar plus scalar) variant SVE # PATTERN xa560c000/mask=xffe0e000 :ld4w "{"^Zt.S, Ztt.S, Zttt.S, Ztttt.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Ztttt.S & Rn_GPR64xsp & Pg3_z & Rm_GPR64 { Zt.S = SVE_ld4w(Zt.S, Ztt.S, Zttt.S, Ztttt.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1b_z_p_ai.xml: LDFF1B (vector plus immediate) variant 32-bit element # PATTERN x8420e000/mask=xffe0e000 :ldff1b "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to31] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to31 { Zd.S = SVE_ldff1b(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to31); } # ldff1b_z_p_ai.xml: LDFF1B (vector plus immediate) variant 64-bit element # PATTERN xc420e000/mask=xffe0e000 :ldff1b "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to31] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to31 { Zd.D = SVE_ldff1b(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to31); } # ldff1b_z_p_br.xml: LDFF1B (scalar plus scalar) variant 8-bit element # PATTERN xa4006000/mask=xffe0e000 :ldff1b "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & Rm_GPR64 { Zd.B = SVE_ldff1b(Zd.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1b_z_p_br.xml: LDFF1B (scalar plus scalar) variant 16-bit element # PATTERN xa4206000/mask=xffe0e000 :ldff1b "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 { Zd.H = SVE_ldff1b(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1b_z_p_br.xml: LDFF1B (scalar plus scalar) variant 32-bit element # PATTERN xa4406000/mask=xffe0e000 :ldff1b "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 { Zd.S = SVE_ldff1b(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1b_z_p_br.xml: LDFF1B (scalar plus scalar) variant 64-bit element # PATTERN xa4606000/mask=xffe0e000 :ldff1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ldff1b(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1b_z_p_bz.xml: LDFF1B (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc4006000/mask=xffa0e000 :ldff1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ldff1b(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ldff1b_z_p_bz.xml: LDFF1B (scalar plus vector) variant 32-bit unscaled offset # PATTERN x84006000/mask=xffa0e000 :ldff1b "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ldff1b(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ldff1b_z_p_bz.xml: LDFF1B (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc440e000/mask=xffe0e000 :ldff1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ldff1b(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ldff1d_z_p_ai.xml: LDFF1D (vector plus immediate) variant SVE # PATTERN xc5a0e000/mask=xffe0e000 :ldff1d "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to248] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to248 { Zd.D = SVE_ldff1d(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to248); } # ldff1d_z_p_br.xml: LDFF1D (scalar plus scalar) variant SVE # PATTERN xa5e06000/mask=xffe0e000 :ldff1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ldff1d(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1d_z_p_bz.xml: LDFF1D (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc5a06000/mask=xffa0e000 :ldff1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #3"] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ldff1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ldff1d_z_p_bz.xml: LDFF1D (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc5806000/mask=xffa0e000 :ldff1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ldff1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ldff1d_z_p_bz.xml: LDFF1D (scalar plus vector) variant 64-bit scaled offset # PATTERN xc5e0e000/mask=xffe0e000 :ldff1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #3"] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ldff1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ldff1d_z_p_bz.xml: LDFF1D (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc5c0e000/mask=xffe0e000 :ldff1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ldff1d(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ldff1h_z_p_ai.xml: LDFF1H (vector plus immediate) variant 32-bit element # PATTERN x84a0e000/mask=xffe0e000 :ldff1h "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to62] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to62 { Zd.S = SVE_ldff1h(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to62); } # ldff1h_z_p_ai.xml: LDFF1H (vector plus immediate) variant 64-bit element # PATTERN xc4a0e000/mask=xffe0e000 :ldff1h "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to62] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to62 { Zd.D = SVE_ldff1h(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to62); } # ldff1h_z_p_br.xml: LDFF1H (scalar plus scalar) variant 16-bit element # PATTERN xa4a06000/mask=xffe0e000 :ldff1h "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 { Zd.H = SVE_ldff1h(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1h_z_p_br.xml: LDFF1H (scalar plus scalar) variant 32-bit element # PATTERN xa4c06000/mask=xffe0e000 :ldff1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 { Zd.S = SVE_ldff1h(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1h_z_p_br.xml: LDFF1H (scalar plus scalar) variant 64-bit element # PATTERN xa4e06000/mask=xffe0e000 :ldff1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ldff1h(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1h_z_p_bz.xml: LDFF1H (scalar plus vector) variant 32-bit scaled offset # PATTERN x84a06000/mask=xffa0e000 :ldff1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod^" #1"] is sve_b_2331=0b100001001 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ldff1h(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ldff1h_z_p_bz.xml: LDFF1H (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc4a06000/mask=xffa0e000 :ldff1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #1"] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ldff1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ldff1h_z_p_bz.xml: LDFF1H (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc4806000/mask=xffa0e000 :ldff1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ldff1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ldff1h_z_p_bz.xml: LDFF1H (scalar plus vector) variant 32-bit unscaled offset # PATTERN x84806000/mask=xffa0e000 :ldff1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ldff1h(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ldff1h_z_p_bz.xml: LDFF1H (scalar plus vector) variant 64-bit scaled offset # PATTERN xc4e0e000/mask=xffe0e000 :ldff1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #1"] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ldff1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ldff1h_z_p_bz.xml: LDFF1H (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc4c0e000/mask=xffe0e000 :ldff1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ldff1h(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ldff1sb_z_p_ai.xml: LDFF1SB (vector plus immediate) variant 32-bit element # PATTERN x8420a000/mask=xffe0e000 :ldff1sb "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to31] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to31 { Zd.S = SVE_ldff1sb(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to31); } # ldff1sb_z_p_ai.xml: LDFF1SB (vector plus immediate) variant 64-bit element # PATTERN xc420a000/mask=xffe0e000 :ldff1sb "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to31] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to31 { Zd.D = SVE_ldff1sb(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to31); } # ldff1sb_z_p_br.xml: LDFF1SB (scalar plus scalar) variant 16-bit element # PATTERN xa5c06000/mask=xffe0e000 :ldff1sb "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 { Zd.H = SVE_ldff1sb(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1sb_z_p_br.xml: LDFF1SB (scalar plus scalar) variant 32-bit element # PATTERN xa5a06000/mask=xffe0e000 :ldff1sb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 { Zd.S = SVE_ldff1sb(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1sb_z_p_br.xml: LDFF1SB (scalar plus scalar) variant 64-bit element # PATTERN xa5806000/mask=xffe0e000 :ldff1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ldff1sb(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1sb_z_p_bz.xml: LDFF1SB (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc4002000/mask=xffa0e000 :ldff1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ldff1sb(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ldff1sb_z_p_bz.xml: LDFF1SB (scalar plus vector) variant 32-bit unscaled offset # PATTERN x84002000/mask=xffa0e000 :ldff1sb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ldff1sb(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ldff1sb_z_p_bz.xml: LDFF1SB (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc440a000/mask=xffe0e000 :ldff1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ldff1sb(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ldff1sh_z_p_ai.xml: LDFF1SH (vector plus immediate) variant 32-bit element # PATTERN x84a0a000/mask=xffe0e000 :ldff1sh "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to62] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to62 { Zd.S = SVE_ldff1sh(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to62); } # ldff1sh_z_p_ai.xml: LDFF1SH (vector plus immediate) variant 64-bit element # PATTERN xc4a0a000/mask=xffe0e000 :ldff1sh "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to62] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to62 { Zd.D = SVE_ldff1sh(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to62); } # ldff1sh_z_p_br.xml: LDFF1SH (scalar plus scalar) variant 32-bit element # PATTERN xa5206000/mask=xffe0e000 :ldff1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 { Zd.S = SVE_ldff1sh(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1sh_z_p_br.xml: LDFF1SH (scalar plus scalar) variant 64-bit element # PATTERN xa5006000/mask=xffe0e000 :ldff1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ldff1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1sh_z_p_bz.xml: LDFF1SH (scalar plus vector) variant 32-bit scaled offset # PATTERN x84a02000/mask=xffa0e000 :ldff1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod^" #1"] is sve_b_2331=0b100001001 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ldff1sh(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ldff1sh_z_p_bz.xml: LDFF1SH (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc4a02000/mask=xffa0e000 :ldff1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #1"] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ldff1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ldff1sh_z_p_bz.xml: LDFF1SH (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc4802000/mask=xffa0e000 :ldff1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ldff1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ldff1sh_z_p_bz.xml: LDFF1SH (scalar plus vector) variant 32-bit unscaled offset # PATTERN x84802000/mask=xffa0e000 :ldff1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ldff1sh(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ldff1sh_z_p_bz.xml: LDFF1SH (scalar plus vector) variant 64-bit scaled offset # PATTERN xc4e0a000/mask=xffe0e000 :ldff1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #1"] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ldff1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ldff1sh_z_p_bz.xml: LDFF1SH (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc4c0a000/mask=xffe0e000 :ldff1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ldff1sh(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ldff1sw_z_p_ai.xml: LDFF1SW (vector plus immediate) variant SVE # PATTERN xc520a000/mask=xffe0e000 :ldff1sw "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to124] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to124 { Zd.D = SVE_ldff1sw(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to124); } # ldff1sw_z_p_br.xml: LDFF1SW (scalar plus scalar) variant SVE # PATTERN xa4806000/mask=xffe0e000 :ldff1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ldff1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1sw_z_p_bz.xml: LDFF1SW (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc5202000/mask=xffa0e000 :ldff1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #2"] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ldff1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ldff1sw_z_p_bz.xml: LDFF1SW (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc5002000/mask=xffa0e000 :ldff1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ldff1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ldff1sw_z_p_bz.xml: LDFF1SW (scalar plus vector) variant 64-bit scaled offset # PATTERN xc560a000/mask=xffe0e000 :ldff1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #2"] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ldff1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ldff1sw_z_p_bz.xml: LDFF1SW (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc540a000/mask=xffe0e000 :ldff1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ldff1sw(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ldff1w_z_p_ai.xml: LDFF1W (vector plus immediate) variant 32-bit element # PATTERN x8520e000/mask=xffe0e000 :ldff1w "{"^Zd.S^"}", Pg3_z, [Zn.S^sve_opt5_1_0to124] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Pg3_z & Zd.S & sve_opt5_1_0to124 { Zd.S = SVE_ldff1w(Zd.S, Pg3_z, Zn.S, sve_opt5_1_0to124); } # ldff1w_z_p_ai.xml: LDFF1W (vector plus immediate) variant 64-bit element # PATTERN xc520e000/mask=xffe0e000 :ldff1w "{"^Zd.D^"}", Pg3_z, [Zn.D^sve_opt5_1_0to124] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_imm5_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Pg3_z & Zd.D & sve_opt5_1_0to124 { Zd.D = SVE_ldff1w(Zd.D, Pg3_z, Zn.D, sve_opt5_1_0to124); } # ldff1w_z_p_br.xml: LDFF1W (scalar plus scalar) variant 32-bit element # PATTERN xa5406000/mask=xffe0e000 :ldff1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=0 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 { Zd.S = SVE_ldff1w(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1w_z_p_br.xml: LDFF1W (scalar plus scalar) variant 64-bit element # PATTERN xa5606000/mask=xffe0e000 :ldff1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ldff1w(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldff1w_z_p_bz.xml: LDFF1W (scalar plus vector) variant 32-bit scaled offset # PATTERN x85206000/mask=xffa0e000 :ldff1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod^" #2"] is sve_b_2331=0b100001010 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ldff1w(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ldff1w_z_p_bz.xml: LDFF1W (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc5206000/mask=xffa0e000 :ldff1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod^" #2"] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ldff1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ldff1w_z_p_bz.xml: LDFF1W (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xc5006000/mask=xffa0e000 :ldff1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D & sve_mod { Zd.D = SVE_ldff1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D, sve_mod:1); } # ldff1w_z_p_bz.xml: LDFF1W (scalar plus vector) variant 32-bit unscaled offset # PATTERN x85006000/mask=xffa0e000 :ldff1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_xs_22 & sve_b_21=0 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.S & Zd.S & sve_mod { Zd.S = SVE_ldff1w(Zd.S, Pg3_z, Rn_GPR64xsp, Zm.S, sve_mod:1); } # ldff1w_z_p_bz.xml: LDFF1W (scalar plus vector) variant 64-bit scaled offset # PATTERN xc560e000/mask=xffe0e000 :ldff1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D, "lsl #2"] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ldff1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ldff1w_z_p_bz.xml: LDFF1W (scalar plus vector) variant 64-bit unscaled offset # PATTERN xc540e000/mask=xffe0e000 :ldff1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zm.D & Zd.D { Zd.D = SVE_ldff1w(Zd.D, Pg3_z, Rn_GPR64xsp, Zm.D); } # ldnf1b_z_p_bi.xml: LDNF1B variant 8-bit element # PATTERN xa410a000/mask=xfff0e000 :ldnf1b "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & sve_mul4_1_m8to7 { Zd.B = SVE_ldnf1b(Zd.B, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1b_z_p_bi.xml: LDNF1B variant 16-bit element # PATTERN xa430a000/mask=xfff0e000 :ldnf1b "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b000 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 { Zd.H = SVE_ldnf1b(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1b_z_p_bi.xml: LDNF1B variant 32-bit element # PATTERN xa450a000/mask=xfff0e000 :ldnf1b "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 { Zd.S = SVE_ldnf1b(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1b_z_p_bi.xml: LDNF1B variant 64-bit element # PATTERN xa470a000/mask=xfff0e000 :ldnf1b "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b001 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ldnf1b(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1d_z_p_bi.xml: LDNF1D variant SVE # PATTERN xa5f0a000/mask=xfff0e000 :ldnf1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ldnf1d(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1h_z_p_bi.xml: LDNF1H variant 16-bit element # PATTERN xa4b0a000/mask=xfff0e000 :ldnf1h "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 { Zd.H = SVE_ldnf1h(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1h_z_p_bi.xml: LDNF1H variant 32-bit element # PATTERN xa4d0a000/mask=xfff0e000 :ldnf1h "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 { Zd.S = SVE_ldnf1h(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1h_z_p_bi.xml: LDNF1H variant 64-bit element # PATTERN xa4f0a000/mask=xfff0e000 :ldnf1h "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b011 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ldnf1h(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1sb_z_p_bi.xml: LDNF1SB variant 16-bit element # PATTERN xa5d0a000/mask=xfff0e000 :ldnf1sb "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b111 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 { Zd.H = SVE_ldnf1sb(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1sb_z_p_bi.xml: LDNF1SB variant 32-bit element # PATTERN xa5b0a000/mask=xfff0e000 :ldnf1sb "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 { Zd.S = SVE_ldnf1sb(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1sb_z_p_bi.xml: LDNF1SB variant 64-bit element # PATTERN xa590a000/mask=xfff0e000 :ldnf1sb "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b110 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ldnf1sb(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1sh_z_p_bi.xml: LDNF1SH variant 32-bit element # PATTERN xa530a000/mask=xfff0e000 :ldnf1sh "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 { Zd.S = SVE_ldnf1sh(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1sh_z_p_bi.xml: LDNF1SH variant 64-bit element # PATTERN xa510a000/mask=xfff0e000 :ldnf1sh "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b100 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ldnf1sh(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1sw_z_p_bi.xml: LDNF1SW variant SVE # PATTERN xa490a000/mask=xfff0e000 :ldnf1sw "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b010 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ldnf1sw(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1w_z_p_bi.xml: LDNF1W variant 32-bit element # PATTERN xa550a000/mask=xfff0e000 :ldnf1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=0 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 { Zd.S = SVE_ldnf1w(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnf1w_z_p_bi.xml: LDNF1W variant 64-bit element # PATTERN xa570a000/mask=xfff0e000 :ldnf1w "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_2224=0b101 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ldnf1w(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnt1b_z_p_bi.xml: LDNT1B (scalar plus immediate) variant SVE # PATTERN xa400e000/mask=xfff0e000 :ldnt1b "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2022=0b000 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & sve_mul4_1_m8to7 { Zd.B = SVE_ldnt1b(Zd.B, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnt1b_z_p_br.xml: LDNT1B (scalar plus scalar) variant SVE # PATTERN xa400c000/mask=xffe0e000 :ldnt1b "{"^Zd.B^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.B & Rm_GPR64 { Zd.B = SVE_ldnt1b(Zd.B, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldnt1d_z_p_bi.xml: LDNT1D (scalar plus immediate) variant SVE # PATTERN xa580e000/mask=xfff0e000 :ldnt1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2022=0b000 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & sve_mul4_1_m8to7 { Zd.D = SVE_ldnt1d(Zd.D, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnt1d_z_p_br.xml: LDNT1D (scalar plus scalar) variant SVE # PATTERN xa580c000/mask=xffe0e000 :ldnt1d "{"^Zd.D^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.D & Rm_GPR64 { Zd.D = SVE_ldnt1d(Zd.D, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldnt1h_z_p_bi.xml: LDNT1H (scalar plus immediate) variant SVE # PATTERN xa480e000/mask=xfff0e000 :ldnt1h "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2022=0b000 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & sve_mul4_1_m8to7 { Zd.H = SVE_ldnt1h(Zd.H, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnt1h_z_p_br.xml: LDNT1H (scalar plus scalar) variant SVE # PATTERN xa480c000/mask=xffe0e000 :ldnt1h "{"^Zd.H^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1010010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.H & Rm_GPR64 { Zd.H = SVE_ldnt1h(Zd.H, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldnt1w_z_p_bi.xml: LDNT1W (scalar plus immediate) variant SVE # PATTERN xa500e000/mask=xfff0e000 :ldnt1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2022=0b000 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & sve_mul4_1_m8to7 { Zd.S = SVE_ldnt1w(Zd.S, Pg3_z, Rn_GPR64xsp, sve_mul4_1_m8to7); } # ldnt1w_z_p_br.xml: LDNT1W (scalar plus scalar) variant SVE # PATTERN xa500c000/mask=xffe0e000 :ldnt1w "{"^Zd.S^"}", Pg3_z, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1010010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Pg3_z & Zd.S & Rm_GPR64 { Zd.S = SVE_ldnt1w(Zd.S, Pg3_z, Rn_GPR64xsp, Rm_GPR64); } # ldr_p_bi.xml: LDR (predicate) variant SVE # PATTERN x85800000/mask=xffc0e010 :ldr Pd, [Rn_GPR64xsp^sve_mul9_2_m256to255] is sve_b_2231=0b1000010110 & sve_imm9h_1621 & sve_b_1315=0b000 & sve_imm9l_1012 & sve_rn_0509 & sve_b_04=0 & sve_pt_0003 & Rn_GPR64xsp & sve_mul9_2_m256to255 & Pd { Pd = SVE_ldr(Pd, Rn_GPR64xsp, sve_mul9_2_m256to255); } # ldr_z_bi.xml: LDR (vector) variant SVE # PATTERN x85804000/mask=xffc0e000 :ldr Zd, [Rn_GPR64xsp^sve_mul9_2_m256to255] is sve_b_2231=0b1000010110 & sve_imm9h_1621 & sve_b_1315=0b010 & sve_imm9l_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & sve_mul9_2_m256to255 & Zd { Zd = SVE_ldr(Zd, Rn_GPR64xsp, sve_mul9_2_m256to255); } # lsl_z_p_zi.xml: LSL (immediate, predicated) variant SVE # PATTERN x04038000/mask=xff3fe000 :lsl Zd.T_tszh, Pg3_m, Zd.T_tszh_2, "#"^sve_imm_shift is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_1921=0b000 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_tszl_0809 & sve_imm3_0507 & sve_zdn_0004 & sve_imm_shift & Zd.T_tszh & Zd.T_tszh_2 & Pg3_m { Zd.T_tszh = SVE_lsl(Zd.T_tszh, Pg3_m, Zd.T_tszh_2, sve_imm_shift:1); } # lsl_z_p_zw.xml: LSL (wide elements, predicated) variant SVE # PATTERN x041b8000/mask=xff3fe000 :lsl Zd.T, Pg3_m, Zd.T_2, Zn.D is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Pg3_m & Zn.D { Zd.T = SVE_lsl(Zd.T, Pg3_m, Zd.T_2, Zn.D); } # lsl_z_p_zz.xml: LSL (vectors) variant SVE # PATTERN x04138000/mask=xff3fe000 :lsl Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_lsl(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # lsl_z_zi.xml: LSL (immediate, unpredicated) variant SVE # PATTERN x04209c00/mask=xff20fc00 :lsl Zd.T_tszh, Zn.T_tszh, "#"^sve_imm_shift is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_21=1 & sve_tszl_1920 & sve_imm3_1618 & sve_b_1215=0b1001 & sve_b_11=1 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & sve_imm_shift & Zd.T_tszh & Zn.T_tszh { Zd.T_tszh = SVE_lsl(Zd.T_tszh, Zn.T_tszh, sve_imm_shift:1); } # lsl_z_zw.xml: LSL (wide elements, unpredicated) variant SVE # PATTERN x04208c00/mask=xff20fc00 :lsl Zd.T, Zn.T, Zm.D is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1215=0b1000 & sve_b_11=1 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Zm.D { Zd.T = SVE_lsl(Zd.T, Zn.T, Zm.D); } # lslr_z_p_zz.xml: LSLR variant SVE # PATTERN x04178000/mask=xff3fe000 :lslr Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_lslr(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # lsr_z_p_zi.xml: LSR (immediate, predicated) variant SVE # PATTERN x04018000/mask=xff3fe000 :lsr Zd.T_tszh, Pg3_m, Zd.T_tszh_2, "#"^sve_imm_shift is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_tszl_0809 & sve_imm3_0507 & sve_zdn_0004 & sve_imm_shift & Zd.T_tszh & Zd.T_tszh_2 & Pg3_m { Zd.T_tszh = SVE_lsr(Zd.T_tszh, Pg3_m, Zd.T_tszh_2, sve_imm_shift:1); } # lsr_z_p_zw.xml: LSR (wide elements, predicated) variant SVE # PATTERN x04198000/mask=xff3fe000 :lsr Zd.T, Pg3_m, Zd.T_2, Zn.D is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Pg3_m & Zn.D { Zd.T = SVE_lsr(Zd.T, Pg3_m, Zd.T_2, Zn.D); } # lsr_z_p_zz.xml: LSR (vectors) variant SVE # PATTERN x04118000/mask=xff3fe000 :lsr Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_lsr(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # lsr_z_zi.xml: LSR (immediate, unpredicated) variant SVE # PATTERN x04209400/mask=xff20fc00 :lsr Zd.T_tszh, Zn.T_tszh, "#"^sve_imm_shift is sve_b_2431=0b00000100 & sve_tszh_2223 & sve_b_21=1 & sve_tszl_1920 & sve_imm3_1618 & sve_b_1215=0b1001 & sve_b_11=0 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & sve_imm_shift & Zd.T_tszh & Zn.T_tszh { Zd.T_tszh = SVE_lsr(Zd.T_tszh, Zn.T_tszh, sve_imm_shift:1); } # lsr_z_zw.xml: LSR (wide elements, unpredicated) variant SVE # PATTERN x04208400/mask=xff20fc00 :lsr Zd.T, Zn.T, Zm.D is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1215=0b1000 & sve_b_11=0 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Zm.D { Zd.T = SVE_lsr(Zd.T, Zn.T, Zm.D); } # lsrr_z_p_zz.xml: LSRR variant SVE # PATTERN x04158000/mask=xff3fe000 :lsrr Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_lsrr(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # mad_z_p_zzz.xml: MAD variant SVE # PATTERN x0400c000/mask=xff20e000 :mad Zd.T, Pg3_m, Zm.T, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1415=0b11 & sve_b_13=0 & sve_pg_1012 & sve_za_0509 & sve_zdn_0004 & Zd.T & Zm.T & Zn.T & Pg3_m { Zd.T = SVE_mad(Zd.T, Pg3_m, Zm.T, Zn.T); } # mla_z_p_zzz.xml: MLA variant SVE # PATTERN x04004000/mask=xff20e000 :mla Zd.T, Pg3_m, Zn.T, Zm.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1415=0b01 & sve_b_13=0 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m { Zd.T = SVE_mla(Zd.T, Pg3_m, Zn.T, Zm.T); } # mls_z_p_zzz.xml: MLS variant SVE # PATTERN x04006000/mask=xff20e000 :mls Zd.T, Pg3_m, Zn.T, Zm.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1415=0b01 & sve_b_13=1 & sve_pg_1012 & sve_zn_0509 & sve_zda_0004 & Zd.T & Zm.T & Zn.T & Pg3_m { Zd.T = SVE_mls(Zd.T, Pg3_m, Zn.T, Zm.T); } # mov_and_p_p_pp.xml: MOV (predicate, predicated, zeroing) variant Not flag setting # ALIASEDBY AND .B, /Z, .B, .B if S == '0' && Pn == Pm # PATTERN x25004000/mask=xfff0c210 # SKIPPING mov_and_p_p_pp.xml because x25004000/mask=xfff0c210 has already been defined # mov_cpy_z_p_i.xml: MOV (immediate, predicated) variant SVE # ALIASEDBY CPY ., /, #{, } if Unconditionally # PATTERN x05100000/mask=xff308000 # SKIPPING mov_cpy_z_p_i.xml because x05100000/mask=xff308000 has already been defined # mov_cpy_z_p_r.xml: MOV (scalar, predicated) variant SVE # ALIASEDBY CPY ., /M, if Unconditionally # PATTERN x0528a000/mask=xff3fe000 # SKIPPING mov_cpy_z_p_r.xml because x0528a000/mask=xff3fe000 has already been defined # mov_cpy_z_p_v.xml: MOV (SIMD&FP scalar, predicated) variant SVE # ALIASEDBY CPY ., /M, if Unconditionally # PATTERN x05208000/mask=xff3fe000 # SKIPPING mov_cpy_z_p_v.xml because x05208000/mask=xff3fe000 has already been defined # mov_dup_z_i.xml: MOV (immediate, unpredicated) variant SVE # ALIASEDBY DUP ., #{, } if Unconditionally # PATTERN x2538c000/mask=xff3fc000 # SKIPPING mov_dup_z_i.xml because x2538c000/mask=xff3fc000 has already been defined # mov_dup_z_r.xml: MOV (scalar, unpredicated) variant SVE # ALIASEDBY DUP ., if Unconditionally # PATTERN x05203800/mask=xff3ffc00 # SKIPPING mov_dup_z_r.xml because x05203800/mask=xff3ffc00 has already been defined # mov_dup_z_zi.xml: MOV (SIMD&FP scalar, unpredicated) variant SVE # ALIASEDBY DUP ., .[0] if BitCount(imm2:tsz) == 1 # ALIASEDBY DUP ., .[] if BitCount(imm2:tsz) > 1 # PATTERN # SKIPPING mov_dup_z_zi.xml because there is a mismatch between the XML asmtemplate(4) and regdiagram(1) # mov_dupm_z_i.xml: MOV (bitmask immediate) variant SVE # ALIASEDBY DUPM ., # if SVEMoveMaskPreferred(imm13) # PATTERN x05c00000/mask=xfffc0000 # SKIPPING mov_dupm_z_i.xml because x05c00000/mask=xfffc0000 has already been defined # mov_orr_p_p_pp.xml: MOV (predicate, unpredicated) variant Not flag setting # ALIASEDBY ORR .B, /Z, .B, .B if S == '0' && Pn == Pm && Pm == Pg # PATTERN x25804000/mask=xfff0c210 # SKIPPING mov_orr_p_p_pp.xml because it is an alias: # mov_orr_z_zz.xml: MOV (vector, unpredicated) variant SVE # ALIASEDBY ORR .D, .D, .D if Zn == Zm # PATTERN x04603000/mask=xffe0fc00 # SKIPPING mov_orr_z_zz.xml because it is an alias: # mov_sel_p_p_pp.xml: MOV (predicate, predicated, merging) variant SVE # ALIASEDBY SEL .B, , .B, .B if Pd == Pm # PATTERN x25004210/mask=xfff0c210 # SKIPPING mov_sel_p_p_pp.xml because it is an alias: # mov_sel_z_p_zz.xml: MOV (vector, predicated) variant SVE # ALIASEDBY SEL ., , ., . if Zd == Zm # PATTERN x0520c000/mask=xff20c000 # SKIPPING mov_sel_z_p_zz.xml because it is an alias: # movprfx_z_p_z.xml: MOVPRFX (predicated) variant SVE # PATTERN x04102000/mask=xff3ee000 :movprfx Zd.T, Pg3_zm, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=0 & sve_m_16 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Pg3_zm & Zd.T & Zn.T { Zd.T = SVE_movprfx(Zd.T, Pg3_zm, Zn.T); } # movprfx_z_z.xml: MOVPRFX (unpredicated) variant SVE # PATTERN x0420bc00/mask=xfffffc00 :movprfx Zd, Zn is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_1720=0b0000 & sve_b_16=0 & sve_b_1015=0b101111 & sve_zn_0509 & sve_zd_0004 & Zn & Zd { Zd = SVE_movprfx(Zd, Zn); } # movs_and_p_p_pp.xml: MOVS (predicated) variant Flag setting # ALIASEDBY ANDS .B, /Z, .B, .B if S == '1' && Pn == Pm # PATTERN x25404000/mask=xfff0c210 # SKIPPING movs_and_p_p_pp.xml because x25404000/mask=xfff0c210 has already been defined # movs_orr_p_p_pp.xml: MOVS (unpredicated) variant Flag setting # ALIASEDBY ORRS .B, /Z, .B, .B if S == '1' && Pn == Pm && Pm == Pg # PATTERN x25c04000/mask=xfff0c210 # SKIPPING movs_orr_p_p_pp.xml because it is an alias: # msb_z_p_zzz.xml: MSB variant SVE # PATTERN x0400e000/mask=xff20e000 :msb Zd.T, Pg3_m, Zm.T, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=0 & sve_zm_1620 & sve_b_1415=0b11 & sve_b_13=1 & sve_pg_1012 & sve_za_0509 & sve_zdn_0004 & Zd.T & Zm.T & Zn.T & Pg3_m { Zd.T = SVE_msb(Zd.T, Pg3_m, Zm.T, Zn.T); } # mul_z_p_zz.xml: MUL (vectors) variant SVE # PATTERN x04100000/mask=xff3fe000 :mul Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_mul(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # mul_z_zi.xml: MUL (immediate) variant SVE # PATTERN x2530c000/mask=xff3fe000 :mul Zd.T, Zd.T_2, "#"^sve_imm8_1_m128to127 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b110 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1415=0b11 & sve_b_13=0 & sve_imm8_0512 & sve_zdn_0004 & Zd.T & Zd.T_2 & sve_imm8_1_m128to127 { Zd.T = SVE_mul(Zd.T, Zd.T_2, sve_imm8_1_m128to127:1); } # nand_p_p_pp.xml: NAND, NANDS variant Flag setting # PATTERN x25c04210/mask=xfff0c210 :nands Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_nands(Pd.B, Pg_z, Pn.B, Pm.B); } # nand_p_p_pp.xml: NAND, NANDS variant Not flag setting # PATTERN x25804210/mask=xfff0c210 :nand Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_nand(Pd.B, Pg_z, Pn.B, Pm.B); } # neg_z_p_z.xml: NEG variant SVE # PATTERN x0417a000/mask=xff3fe000 :neg Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_neg(Zd.T, Pg3_m, Zn.T); } # nor_p_p_pp.xml: NOR, NORS variant Flag setting # PATTERN x25c04200/mask=xfff0c210 :nors Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_nors(Pd.B, Pg_z, Pn.B, Pm.B); } # nor_p_p_pp.xml: NOR, NORS variant Not flag setting # PATTERN x25804200/mask=xfff0c210 :nor Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_nor(Pd.B, Pg_z, Pn.B, Pm.B); } # not_eor_p_p_pp.xml: NOT (predicate) variant Not flag setting # ALIASEDBY EOR .B, /Z, .B, .B if Pm == Pg # PATTERN x25004200/mask=xfff0c210 # SKIPPING not_eor_p_p_pp.xml because x25004200/mask=xfff0c210 has already been defined # not_z_p_z.xml: NOT (vector) variant SVE # PATTERN x041ea000/mask=xff3fe000 :not Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_not(Zd.T, Pg3_m, Zn.T); } # nots_eor_p_p_pp.xml: NOTS variant Flag setting # ALIASEDBY EORS .B, /Z, .B, .B if Pm == Pg # PATTERN x25404200/mask=xfff0c210 # SKIPPING nots_eor_p_p_pp.xml because x25404200/mask=xfff0c210 has already been defined # orn_orr_z_zi.xml: ORN (immediate) variant SVE # ALIASEDBY ORR ., ., #(- - 1) if Never # PATTERN x05000000/mask=xfffc0000 :orn Zd.T_imm13, Zd.T_imm13_2, "#"^sve_decode_bit_mask is sve_b_2431=0b00000101 & sve_b_23=0 & sve_b_22=0 & sve_b_1821=0b0000 & sve_imm13_0517 & sve_zdn_0004 & sve_decode_bit_mask & Zd.T_imm13 & Zd.T_imm13_2 { Zd.T_imm13 = SVE_orn(Zd.T_imm13, Zd.T_imm13_2, sve_decode_bit_mask:1); } # orn_p_p_pp.xml: ORN, ORNS (predicates) variant Flag setting # PATTERN x25c04010/mask=xfff0c210 :orns Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_orns(Pd.B, Pg_z, Pn.B, Pm.B); } # orn_p_p_pp.xml: ORN, ORNS (predicates) variant Not flag setting # PATTERN x25804010/mask=xfff0c210 :orn Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_orn(Pd.B, Pg_z, Pn.B, Pm.B); } # orr_p_p_pp.xml: ORR, ORRS (predicates) variant Flag setting # PATTERN x25c04000/mask=xfff0c210 :orrs Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=1 & sve_s_22=1 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_orrs(Pd.B, Pg_z, Pn.B, Pm.B); } # orr_p_p_pp.xml: ORR, ORRS (predicates) variant Not flag setting # PATTERN x25804000/mask=xfff0c210 :orr Pd.B, Pg_z, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=1 & sve_s_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pg_z & Pn.B & Pm.B { Pd.B = SVE_orr(Pd.B, Pg_z, Pn.B, Pm.B); } # orr_z_p_zz.xml: ORR (vectors, predicated) variant SVE # PATTERN x04180000/mask=xff3fe000 :orr Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_orr(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # orr_z_zi.xml: ORR (immediate) variant SVE # PATTERN x05000000/mask=xfffc0000 # SKIPPING orr_z_zi.xml because x05000000/mask=xfffc0000 has already been defined # orr_z_zz.xml: ORR (vectors, unpredicated) variant SVE # PATTERN x04603000/mask=xffe0fc00 :orr Zd.D, Zn.D, Zm.D is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_zm_1620 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Zm.D { Zd.D = SVE_orr(Zd.D, Zn.D, Zm.D); } # orv_r_p_z.xml: ORV variant SVE # PATTERN x04182000/mask=xff3fe000 :orv Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_orv(Rd_FPR8, Pg3, Zn.T); } # orv_r_p_z.xml: ORV variant SVE # PATTERN x04182000/mask=xff3fe000 :orv Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_orv(Rd_FPR32, Pg3, Zn.T); } # orv_r_p_z.xml: ORV variant SVE # PATTERN x04182000/mask=xff3fe000 :orv Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_orv(Rd_FPR16, Pg3, Zn.T); } # orv_r_p_z.xml: ORV variant SVE # PATTERN x04182000/mask=xff3fe000 :orv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b011 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_orv(Rd_FPR64, Pg3, Zn.T); } # pfalse_p.xml: PFALSE variant SVE # PATTERN x2518e400/mask=xfffffff0 :pfalse Pd.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_1021=0b011000111001 & sve_b_0409=0b000000 & sve_pd_0003 & Pd.B { Pd.B = SVE_pfalse(Pd.B); } # pfirst_p_p_p.xml: PFIRST variant SVE # PATTERN x2558c000/mask=xfffffe10 :pfirst Pd.B, Pn, Pd.B_2 is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1021=0b011000110000 & sve_b_09=0 & sve_pg_0508 & sve_b_04=0 & sve_pdn_0003 & Pd.B & Pd.B_2 & Pn { Pd.B = SVE_pfirst(Pd.B, Pn, Pd.B_2); } # pnext_p_p_p.xml: PNEXT variant SVE # PATTERN x2519c400/mask=xff3ffe10 :pnext Pd.T, Pn, Pd.T_2 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1021=0b011001110001 & sve_b_09=0 & sve_pg_0508 & sve_b_04=0 & sve_pdn_0003 & Pd.T & Pd.T_2 & Pn { Pd.T = SVE_pnext(Pd.T, Pn, Pd.T_2); } # prfb_i_p_ai.xml: PRFB (vector plus immediate) variant 32-bit element # PATTERN x8400e000/mask=xffe0e010 :prfb sve_prfop, Pg3, [Zn.S^sve_opt5_1_0to31] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.S & sve_opt5_1_0to31 & Pg3 { SVE_prfb(sve_prfop:1, Pg3, Zn.S, sve_opt5_1_0to31); } # prfb_i_p_ai.xml: PRFB (vector plus immediate) variant 64-bit element # PATTERN xc400e000/mask=xffe0e010 :prfb sve_prfop, Pg3, [Zn.D^sve_opt5_1_0to31] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.D & sve_opt5_1_0to31 & Pg3 { SVE_prfb(sve_prfop:1, Pg3, Zn.D, sve_opt5_1_0to31); } # prfb_i_p_bi.xml: PRFB (scalar plus immediate) variant SVE # PATTERN x85c00000/mask=xffc0e010 :prfb sve_prfop, Pg3, [Rn_GPR64xsp^sve_mul6_1_m32to31] is sve_b_2231=0b1000010111 & sve_imm6_1621 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & sve_mul6_1_m32to31 & Pg3 { SVE_prfb(sve_prfop:1, Pg3, Rn_GPR64xsp, sve_mul6_1_m32to31); } # prfb_i_p_br.xml: PRFB (scalar plus scalar) variant SVE # PATTERN x8400c000/mask=xffe0e010 :prfb sve_prfop, Pg3, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_prfb(sve_prfop:1, Pg3, Rn_GPR64xsp, Rm_GPR64); } # prfb_i_p_bz.xml: PRFB (scalar plus vector) variant 32-bit scaled offset # PATTERN x84200000/mask=xffa0e010 :prfb sve_prfop, Pg3, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2331=0b100001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.S & sve_mod & Pg3 { SVE_prfb(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); } # prfb_i_p_bz.xml: PRFB (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc4200000/mask=xffa0e010 :prfb sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2331=0b110001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & sve_mod & Pg3 { SVE_prfb(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); } # prfb_i_p_bz.xml: PRFB (scalar plus vector) variant 64-bit scaled offset # PATTERN xc4608000/mask=xffe0e010 :prfb sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D] is sve_b_2131=0b11000100011 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & Pg3 { SVE_prfb(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D); } # prfd_i_p_ai.xml: PRFD (vector plus immediate) variant 32-bit element # PATTERN x8580e000/mask=xffe0e010 :prfd sve_prfop, Pg3, [Zn.S^sve_opt5_1_0to248] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.S & sve_opt5_1_0to248 & Pg3 { SVE_prfd(sve_prfop:1, Pg3, Zn.S, sve_opt5_1_0to248); } # prfd_i_p_ai.xml: PRFD (vector plus immediate) variant 64-bit element # PATTERN xc580e000/mask=xffe0e010 :prfd sve_prfop, Pg3, [Zn.D^sve_opt5_1_0to248] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.D & sve_opt5_1_0to248 & Pg3 { SVE_prfd(sve_prfop:1, Pg3, Zn.D, sve_opt5_1_0to248); } # prfd_i_p_bi.xml: PRFD (scalar plus immediate) variant SVE # PATTERN x85c06000/mask=xffc0e010 :prfd sve_prfop, Pg3, [Rn_GPR64xsp^sve_mul6_1_m32to31] is sve_b_2231=0b1000010111 & sve_imm6_1621 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & sve_mul6_1_m32to31 & Pg3 { SVE_prfd(sve_prfop:1, Pg3, Rn_GPR64xsp, sve_mul6_1_m32to31); } # prfd_i_p_br.xml: PRFD (scalar plus scalar) variant SVE # PATTERN x8580c000/mask=xffe0e010 :prfd sve_prfop, Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_prfd(sve_prfop:1, Pg3, Rn_GPR64xsp, Rm_GPR64); } # prfd_i_p_bz.xml: PRFD (scalar plus vector) variant 32-bit scaled offset # PATTERN x84206000/mask=xffa0e010 :prfd sve_prfop, Pg3, [Rn_GPR64xsp, Zm.S, sve_mod^" #3"] is sve_b_2331=0b100001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.S & sve_mod & Pg3 { SVE_prfd(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); } # prfd_i_p_bz.xml: PRFD (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc4206000/mask=xffa0e010 :prfd sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, sve_mod^" #3"] is sve_b_2331=0b110001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & sve_mod & Pg3 { SVE_prfd(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); } # prfd_i_p_bz.xml: PRFD (scalar plus vector) variant 64-bit scaled offset # PATTERN xc460e000/mask=xffe0e010 :prfd sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, "lsl #3"] is sve_b_2131=0b11000100011 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & Pg3 { SVE_prfd(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D); } # prfh_i_p_ai.xml: PRFH (vector plus immediate) variant 32-bit element # PATTERN x8480e000/mask=xffe0e010 :prfh sve_prfop, Pg3, [Zn.S^sve_opt5_1_0to62] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.S & sve_opt5_1_0to62 & Pg3 { SVE_prfh(sve_prfop:1, Pg3, Zn.S, sve_opt5_1_0to62); } # prfh_i_p_ai.xml: PRFH (vector plus immediate) variant 64-bit element # PATTERN xc480e000/mask=xffe0e010 :prfh sve_prfop, Pg3, [Zn.D^sve_opt5_1_0to62] is sve_b_2531=0b1100010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.D & sve_opt5_1_0to62 & Pg3 { SVE_prfh(sve_prfop:1, Pg3, Zn.D, sve_opt5_1_0to62); } # prfh_i_p_bi.xml: PRFH (scalar plus immediate) variant SVE # PATTERN x85c02000/mask=xffc0e010 :prfh sve_prfop, Pg3, [Rn_GPR64xsp^sve_mul6_1_m32to31] is sve_b_2231=0b1000010111 & sve_imm6_1621 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & sve_mul6_1_m32to31 & Pg3 { SVE_prfh(sve_prfop:1, Pg3, Rn_GPR64xsp, sve_mul6_1_m32to31); } # prfh_i_p_br.xml: PRFH (scalar plus scalar) variant SVE # PATTERN x8480c000/mask=xffe0e010 :prfh sve_prfop, Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1000010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_prfh(sve_prfop:1, Pg3, Rn_GPR64xsp, Rm_GPR64); } # prfh_i_p_bz.xml: PRFH (scalar plus vector) variant 32-bit scaled offset # PATTERN x84202000/mask=xffa0e010 :prfh sve_prfop, Pg3, [Rn_GPR64xsp, Zm.S, sve_mod^" #1"] is sve_b_2331=0b100001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.S & sve_mod & Pg3 { SVE_prfh(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); } # prfh_i_p_bz.xml: PRFH (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc4202000/mask=xffa0e010 :prfh sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, sve_mod^" #1"] is sve_b_2331=0b110001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & sve_mod & Pg3 { SVE_prfh(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); } # prfh_i_p_bz.xml: PRFH (scalar plus vector) variant 64-bit scaled offset # PATTERN xc460a000/mask=xffe0e010 :prfh sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, "lsl #1"] is sve_b_2131=0b11000100011 & sve_zm_1620 & sve_b_15=1 & sve_b_14=0 & sve_b_13=1 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & Pg3 { SVE_prfh(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D); } # prfw_i_p_ai.xml: PRFW (vector plus immediate) variant 32-bit element # PATTERN x8500e000/mask=xffe0e010 :prfw sve_prfop, Pg3, [Zn.S^sve_opt5_1_0to124] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.S & sve_opt5_1_0to124 & Pg3 { SVE_prfw(sve_prfop:1, Pg3, Zn.S, sve_opt5_1_0to124); } # prfw_i_p_ai.xml: PRFW (vector plus immediate) variant 64-bit element # PATTERN xc500e000/mask=xffe0e010 :prfw sve_prfop, Pg3, [Zn.D^sve_opt5_1_0to124] is sve_b_2531=0b1100010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_imm5_1620 & sve_b_1315=0b111 & sve_pg_1012 & sve_zn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Zn.D & sve_opt5_1_0to124 & Pg3 { SVE_prfw(sve_prfop:1, Pg3, Zn.D, sve_opt5_1_0to124); } # prfw_i_p_bi.xml: PRFW (scalar plus immediate) variant SVE # PATTERN x85c04000/mask=xffc0e010 :prfw sve_prfop, Pg3, [Rn_GPR64xsp^sve_mul6_1_m32to31] is sve_b_2231=0b1000010111 & sve_imm6_1621 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & sve_mul6_1_m32to31 & Pg3 { SVE_prfw(sve_prfop:1, Pg3, Rn_GPR64xsp, sve_mul6_1_m32to31); } # prfw_i_p_br.xml: PRFW (scalar plus scalar) variant SVE # PATTERN x8500c000/mask=xffe0e010 :prfw sve_prfop, Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1000010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b110 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_prfw(sve_prfop:1, Pg3, Rn_GPR64xsp, Rm_GPR64); } # prfw_i_p_bz.xml: PRFW (scalar plus vector) variant 32-bit scaled offset # PATTERN x84204000/mask=xffa0e010 :prfw sve_prfop, Pg3, [Rn_GPR64xsp, Zm.S, sve_mod^" #2"] is sve_b_2331=0b100001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.S & sve_mod & Pg3 { SVE_prfw(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); } # prfw_i_p_bz.xml: PRFW (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xc4204000/mask=xffa0e010 :prfw sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, sve_mod^" #2"] is sve_b_2331=0b110001000 & sve_xs_22 & sve_b_21=1 & sve_zm_1620 & sve_b_15=0 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & sve_mod & Pg3 { SVE_prfw(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); } # prfw_i_p_bz.xml: PRFW (scalar plus vector) variant 64-bit scaled offset # PATTERN xc460c000/mask=xffe0e010 :prfw sve_prfop, Pg3, [Rn_GPR64xsp, Zm.D, "lsl #2"] is sve_b_2131=0b11000100011 & sve_zm_1620 & sve_b_15=1 & sve_b_14=1 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_b_04=0 & sve_prfop_0003 & sve_prfop & Rn_GPR64xsp & Zm.D & Pg3 { SVE_prfw(sve_prfop:1, Pg3, Rn_GPR64xsp, Zm.D); } # ptest_p_p.xml: PTEST variant SVE # PATTERN x2550c000/mask=xffffc21f :ptest Pg, Pn.B is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b01 & sve_b_1419=0b000011 & sve_pg_1013 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_b_03=0 & sve_b_02=0 & sve_b_01=0 & sve_b_00=0 & Pn.B & Pg { SVE_ptest(Pg, Pn.B); } # ptrue_p_s.xml: PTRUE, PTRUES variant Flag setting # PATTERN x2519e000/mask=xff3ffc10 :ptrues Pd.T^sve_opt_pattern is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1721=0b01100 & sve_b_16=1 & sve_b_1015=0b111000 & sve_pattern_0509 & sve_b_04=0 & sve_pd_0003 & sve_pattern & Pd.T & sve_opt_pattern { Pd.T = SVE_ptrues(Pd.T, sve_opt_pattern); } # ptrue_p_s.xml: PTRUE, PTRUES variant Not flag setting # PATTERN x2518e000/mask=xff3ffc10 :ptrue Pd.T^sve_opt_pattern is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1721=0b01100 & sve_b_16=0 & sve_b_1015=0b111000 & sve_pattern_0509 & sve_b_04=0 & sve_pd_0003 & sve_pattern & Pd.T & sve_opt_pattern { Pd.T = SVE_ptrue(Pd.T, sve_opt_pattern); } # punpkhi_p_p.xml: PUNPKHI, PUNPKLO variant High half # PATTERN x05314000/mask=xfffffe10 :punpkhi Pd.H, Pn.B is sve_b_1731=0b000001010011000 & sve_b_16=1 & sve_b_1015=0b010000 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.H & Pn.B { Pd.H = SVE_punpkhi(Pd.H, Pn.B); } # punpkhi_p_p.xml: PUNPKHI, PUNPKLO variant Low half # PATTERN x05304000/mask=xfffffe10 :punpklo Pd.H, Pn.B is sve_b_1731=0b000001010011000 & sve_b_16=0 & sve_b_1015=0b010000 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pd.H & Pn.B { Pd.H = SVE_punpklo(Pd.H, Pn.B); } # rbit_z_p_z.xml: RBIT variant SVE # PATTERN x05278000/mask=xff3fe000 :rbit Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1001 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_rbit(Zd.T, Pg3_m, Zn.T); } # rdffr_p_f.xml: RDFFR (unpredicated) variant SVE # PATTERN x2519f000/mask=xfffffff0 :rdffr Pd.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_1021=0b011001111100 & sve_b_0409=0b000000 & sve_pd_0003 & Pd.B { Pd.B = SVE_rdffr(Pd.B); } # rdffr_p_p_f.xml: RDFFR, RDFFRS (predicated) variant Flag setting # PATTERN x2558f000/mask=xfffffe10 :rdffrs Pd.B, Pn_z is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1021=0b011000111100 & sve_b_09=0 & sve_pg_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pn_z { Pd.B = SVE_rdffrs(Pd.B, Pn_z); } # rdffr_p_p_f.xml: RDFFR, RDFFRS (predicated) variant Not flag setting # PATTERN x2518f000/mask=xfffffe10 :rdffr Pd.B, Pn_z is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_1021=0b011000111100 & sve_b_09=0 & sve_pg_0508 & sve_b_04=0 & sve_pd_0003 & Pd.B & Pn_z { Pd.B = SVE_rdffr(Pd.B, Pn_z); } # rdvl_r_i.xml: RDVL variant SVE # PATTERN x04bf5000/mask=xfffff800 :rdvl Rd_GPR64, "#"^sve_imm6_1_m32to31 is sve_b_2331=0b000001001 & sve_b_22=0 & sve_b_21=1 & sve_b_1720=0b1111 & sve_b_16=1 & sve_b_1115=0b01010 & sve_imm6_0510 & sve_rd_0004 & sve_imm6_1_m32to31 & Rd_GPR64 { Rd_GPR64 = SVE_rdvl(Rd_GPR64, sve_imm6_1_m32to31:1); } # rev_p_p.xml: REV (predicate) variant SVE # PATTERN x05344000/mask=xff3ffe10 :rev Pd.T, Pn.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1021=0b110100010000 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T { Pd.T = SVE_rev(Pd.T, Pn.T); } # rev_z_z.xml: REV (vector) variant SVE # PATTERN x05383800/mask=xff3ffc00 :rev Zd.T, Zn.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1021=0b111000001110 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T { Zd.T = SVE_rev(Zd.T, Zn.T); } # revb_z_z.xml: REVB, REVH, REVW variant Byte # PATTERN x05248000/mask=xff3fe000 :revb Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1001 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_revb(Zd.T, Pg3_m, Zn.T); } # revb_z_z.xml: REVB, REVH, REVW variant Halfword # PATTERN x05258000/mask=xff3fe000 :revh Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1001 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_revh(Zd.T, Pg3_m, Zn.T); } # revb_z_z.xml: REVB, REVH, REVW variant Word # PATTERN x05268000/mask=xff3fe000 :revw Zd.D, Pg3_m, Zn.D is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1001 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b100 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m { Zd.D = SVE_revw(Zd.D, Pg3_m, Zn.D); } # sabd_z_p_zz.xml: SABD variant SVE # PATTERN x040c0000/mask=xff3fe000 :sabd Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b001 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_sabd(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # saddv_r_p_z.xml: SADDV variant SVE # PATTERN x04002000/mask=xff3fe000 :saddv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b000 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_saddv(Rd_FPR64, Zn.T, Pg3); } # scvtf_z_p_z.xml: SCVTF variant 16-bit to half-precision # PATTERN x6552a000/mask=xffffe000 :scvtf Zd.H, Pg3_m, Zn.H is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.H & Pg3_m { Zd.H = SVE_scvtf(Zd.H, Pg3_m, Zn.H); } # scvtf_z_p_z.xml: SCVTF variant 32-bit to half-precision # PATTERN x6554a000/mask=xffffe000 :scvtf Zd.H, Pg3_m, Zn.S is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.H & Pg3_m { Zd.H = SVE_scvtf(Zd.H, Pg3_m, Zn.S); } # scvtf_z_p_z.xml: SCVTF variant 32-bit to single-precision # PATTERN x6594a000/mask=xffffe000 :scvtf Zd.S, Pg3_m, Zn.S is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.S & Pg3_m { Zd.S = SVE_scvtf(Zd.S, Pg3_m, Zn.S); } # scvtf_z_p_z.xml: SCVTF variant 32-bit to double-precision # PATTERN x65d0a000/mask=xffffe000 :scvtf Zd.D, Pg3_m, Zn.S is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.D & Pg3_m { Zd.D = SVE_scvtf(Zd.D, Pg3_m, Zn.S); } # scvtf_z_p_z.xml: SCVTF variant 64-bit to half-precision # PATTERN x6556a000/mask=xffffe000 :scvtf Zd.H, Pg3_m, Zn.D is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.H & Pg3_m { Zd.H = SVE_scvtf(Zd.H, Pg3_m, Zn.D); } # scvtf_z_p_z.xml: SCVTF variant 64-bit to single-precision # PATTERN x65d4a000/mask=xffffe000 :scvtf Zd.S, Pg3_m, Zn.D is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.S & Pg3_m { Zd.S = SVE_scvtf(Zd.S, Pg3_m, Zn.D); } # scvtf_z_p_z.xml: SCVTF variant 64-bit to double-precision # PATTERN x65d6a000/mask=xffffe000 :scvtf Zd.D, Pg3_m, Zn.D is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m { Zd.D = SVE_scvtf(Zd.D, Pg3_m, Zn.D); } # sdiv_z_p_zz.xml: SDIV variant SVE # PATTERN x04940000/mask=xffbfe000 :sdiv Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz is sve_b_2431=0b00000100 & sve_b_23=1 & sve_sz_22 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T_sz & Zd.T_sz_2 & Zn.T_sz & Pg3_m { Zd.T_sz = SVE_sdiv(Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz); } # sdivr_z_p_zz.xml: SDIVR variant SVE # PATTERN x04960000/mask=xffbfe000 :sdivr Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz is sve_b_2431=0b00000100 & sve_b_23=1 & sve_sz_22 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T_sz & Zd.T_sz_2 & Zn.T_sz & Pg3_m { Zd.T_sz = SVE_sdivr(Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz); } # sdot_z_zzz.xml: SDOT (vectors) variant SVE # PATTERN x44800000/mask=xffa0fc00 :sdot Zd.T_sz, Zn.Tb_sz, Zm.Tb_sz is sve_b_2431=0b01000100 & sve_b_23=1 & sve_sz_22 & sve_b_21=0 & sve_zm_1620 & sve_b_1115=0b00000 & sve_b_10=0 & sve_zn_0509 & sve_zda_0004 & Zm.Tb_sz & Zd.T_sz & Zn.Tb_sz { Zd.T_sz = SVE_sdot(Zd.T_sz, Zn.Tb_sz, Zm.Tb_sz); } # sdot_z_zzzi.xml: SDOT (indexed) variant 32-bit # PATTERN x44a00000/mask=xffe0fc00 :sdot Zd.S, Zn.B, Zm3.B[sve_i2_1920] is sve_b_2431=0b01000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_i2_1920 & sve_zm_1618 & sve_b_1115=0b00000 & sve_b_10=0 & sve_zn_0509 & sve_zda_0004 & Zd.S & Zn.B & Zm3.B { Zd.S = SVE_sdot(Zd.S, Zn.B, Zm3.B, sve_i2_1920:1); } # sdot_z_zzzi.xml: SDOT (indexed) variant 64-bit # PATTERN x44e00000/mask=xffe0fc00 :sdot Zd.D, Zn.H, Zm4.H[sve_i1_20] is sve_b_2431=0b01000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_i1_20 & sve_zm_1619 & sve_b_1115=0b00000 & sve_b_10=0 & sve_zn_0509 & sve_zda_0004 & Zd.D & Zn.H & Zm4.H { Zd.D = SVE_sdot(Zd.D, Zn.H, Zm4.H, sve_i1_20:1); } # sel_p_p_pp.xml: SEL (predicates) variant SVE # PATTERN x25004210/mask=xfff0c210 :sel Pd.B, Pg, Pn.B, Pm.B is sve_b_2431=0b00100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b00 & sve_pm_1619 & sve_b_1415=0b01 & sve_pg_1013 & sve_b_09=1 & sve_pn_0508 & sve_b_04=1 & sve_pd_0003 & Pd.B & Pn.B & Pm.B & Pg { Pd.B = SVE_sel(Pd.B, Pg, Pn.B, Pm.B); } # sel_z_p_zz.xml: SEL (vectors) variant SVE # PATTERN x0520c000/mask=xff20c000 :sel Zd.T, Pg, Zn.T, Zm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1415=0b11 & sve_pg_1013 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T & Pg { Zd.T = SVE_sel(Zd.T, Pg, Zn.T, Zm.T); } # setffr_f.xml: SETFFR variant SVE # PATTERN x252c9000/mask=xffffffff :setffr "" is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b10 & sve_b_1019=0b1100100100 & sve_b_0409=0b000000 & sve_b_03=0 & sve_b_02=0 & sve_b_0001=0b00 unimpl # smax_z_p_zz.xml: SMAX (vectors) variant SVE # PATTERN x04080000/mask=xff3fe000 :smax Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_smax(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # smax_z_zi.xml: SMAX (immediate) variant SVE # PATTERN x2528c000/mask=xff3fe000 :smax Zd.T, Zd.T_2, "#"^sve_imm8_1_m128to127 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b101 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1415=0b11 & sve_b_13=0 & sve_imm8_0512 & sve_zdn_0004 & Zd.T & Zd.T_2 & sve_imm8_1_m128to127 { Zd.T = SVE_smax(Zd.T, Zd.T_2, sve_imm8_1_m128to127:1); } # smaxv_r_p_z.xml: SMAXV variant SVE # PATTERN x04082000/mask=xff3fe000 :smaxv Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_smaxv(Rd_FPR8, Pg3, Zn.T); } # smaxv_r_p_z.xml: SMAXV variant SVE # PATTERN x04082000/mask=xff3fe000 :smaxv Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_smaxv(Rd_FPR32, Pg3, Zn.T); } # smaxv_r_p_z.xml: SMAXV variant SVE # PATTERN x04082000/mask=xff3fe000 :smaxv Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_smaxv(Rd_FPR16, Pg3, Zn.T); } # smaxv_r_p_z.xml: SMAXV variant SVE # PATTERN x04082000/mask=xff3fe000 :smaxv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_smaxv(Rd_FPR64, Pg3, Zn.T); } # smin_z_p_zz.xml: SMIN (vectors) variant SVE # PATTERN x040a0000/mask=xff3fe000 :smin Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_smin(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # smin_z_zi.xml: SMIN (immediate) variant SVE # PATTERN x252ac000/mask=xff3fe000 :smin Zd.T, Zd.T_2, "#"^sve_imm8_1_m128to127 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b101 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1415=0b11 & sve_b_13=0 & sve_imm8_0512 & sve_zdn_0004 & Zd.T & Zd.T_2 & sve_imm8_1_m128to127 { Zd.T = SVE_smin(Zd.T, Zd.T_2, sve_imm8_1_m128to127:1); } # sminv_r_p_z.xml: SMINV variant SVE # PATTERN x040a2000/mask=xff3fe000 :sminv Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_sminv(Rd_FPR8, Pg3, Zn.T); } # sminv_r_p_z.xml: SMINV variant SVE # PATTERN x040a2000/mask=xff3fe000 :sminv Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_sminv(Rd_FPR32, Pg3, Zn.T); } # sminv_r_p_z.xml: SMINV variant SVE # PATTERN x040a2000/mask=xff3fe000 :sminv Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_sminv(Rd_FPR16, Pg3, Zn.T); } # sminv_r_p_z.xml: SMINV variant SVE # PATTERN x040a2000/mask=xff3fe000 :sminv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_sminv(Rd_FPR64, Pg3, Zn.T); } # smulh_z_p_zz.xml: SMULH variant SVE # PATTERN x04120000/mask=xff3fe000 :smulh Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=1 & sve_b_16=0 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_smulh(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # splice_z_p_zz.xml: SPLICE variant SVE # PATTERN x052c8000/mask=xff3fe000 :splice Zd.T, Pg3, Zd.T_2, Zn.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1321=0b101100100 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3 { Zd.T = SVE_splice(Zd.T, Pg3, Zd.T_2, Zn.T); } # sqadd_z_zi.xml: SQADD (immediate) variant SVE # PATTERN x2524c000/mask=xff3fc000 :sqadd Zd.T, Zd.T_2, sve_shf8_1_0to255 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 { Zd.T = SVE_sqadd(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); } # sqadd_z_zz.xml: SQADD (vectors) variant SVE # PATTERN x04201000/mask=xff20fc00 :sqadd Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b10 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_sqadd(Zd.T, Zn.T, Zm.T); } # sqdecb_r_rs.xml: SQDECB variant 32-bit # PATTERN x0420f800/mask=xfff0fc00 :sqdecb Rd_GPR64, Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqdecb(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqdecb_r_rs.xml: SQDECB variant 64-bit # PATTERN x0430f800/mask=xfff0fc00 :sqdecb Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqdecb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqdecd_r_rs.xml: SQDECD (scalar) variant 32-bit # PATTERN x04e0f800/mask=xfff0fc00 :sqdecd Rd_GPR64, Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqdecd(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqdecd_r_rs.xml: SQDECD (scalar) variant 64-bit # PATTERN x04f0f800/mask=xfff0fc00 :sqdecd Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqdecd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqdecd_z_zs.xml: SQDECD (vector) variant SVE # PATTERN x04e0c800/mask=xfff0fc00 :sqdecd Zd.D^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.D & sve_imm4_1_1to16 & sve_mul_pattern { Zd.D = SVE_sqdecd(Zd.D, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqdech_r_rs.xml: SQDECH (scalar) variant 32-bit # PATTERN x0460f800/mask=xfff0fc00 :sqdech Rd_GPR64, Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqdech(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqdech_r_rs.xml: SQDECH (scalar) variant 64-bit # PATTERN x0470f800/mask=xfff0fc00 :sqdech Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqdech(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqdech_z_zs.xml: SQDECH (vector) variant SVE # PATTERN x0460c800/mask=xfff0fc00 :sqdech Zd.H^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.H & sve_imm4_1_1to16 & sve_mul_pattern { Zd.H = SVE_sqdech(Zd.H, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqdecp_r_p_r.xml: SQDECP (scalar) variant 32-bit # PATTERN x252a8800/mask=xff3ffe00 :sqdecp Rd_GPR64, Pn.T, Rd_GPR32 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=1 & sve_b_16=0 & sve_b_1115=0b10001 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR32 & Rd_GPR64 { Rd_GPR64 = SVE_sqdecp(Rd_GPR64, Pn.T, Rd_GPR32); } # sqdecp_r_p_r.xml: SQDECP (scalar) variant 64-bit # PATTERN x252a8c00/mask=xff3ffe00 :sqdecp Rd_GPR64, Pn.T is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=1 & sve_b_16=0 & sve_b_1115=0b10001 & sve_b_10=1 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR64 { Rd_GPR64 = SVE_sqdecp(Rd_GPR64, Pn.T); } # sqdecp_z_p_z.xml: SQDECP (vector) variant SVE # PATTERN x252a8000/mask=xff3ffe00 :sqdecp Zd.T, Pn is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=1 & sve_b_16=0 & sve_b_1115=0b10000 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_zdn_0004 & Zd.T & Pn { Zd.T = SVE_sqdecp(Zd.T, Pn); } # sqdecw_r_rs.xml: SQDECW (scalar) variant 32-bit # PATTERN x04a0f800/mask=xfff0fc00 :sqdecw Rd_GPR64, Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqdecw(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqdecw_r_rs.xml: SQDECW (scalar) variant 64-bit # PATTERN x04b0f800/mask=xfff0fc00 :sqdecw Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqdecw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqdecw_z_zs.xml: SQDECW (vector) variant SVE # PATTERN x04a0c800/mask=xfff0fc00 :sqdecw Zd.S^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=1 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.S & sve_imm4_1_1to16 & sve_mul_pattern { Zd.S = SVE_sqdecw(Zd.S, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqincb_r_rs.xml: SQINCB variant 32-bit # PATTERN x0420f000/mask=xfff0fc00 :sqincb Rd_GPR64, Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqincb(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqincb_r_rs.xml: SQINCB variant 64-bit # PATTERN x0430f000/mask=xfff0fc00 :sqincb Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqincb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqincd_r_rs.xml: SQINCD (scalar) variant 32-bit # PATTERN x04e0f000/mask=xfff0fc00 :sqincd Rd_GPR64, Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqincd(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqincd_r_rs.xml: SQINCD (scalar) variant 64-bit # PATTERN x04f0f000/mask=xfff0fc00 :sqincd Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqincd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqincd_z_zs.xml: SQINCD (vector) variant SVE # PATTERN x04e0c000/mask=xfff0fc00 :sqincd Zd.D^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.D & sve_imm4_1_1to16 & sve_mul_pattern { Zd.D = SVE_sqincd(Zd.D, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqinch_r_rs.xml: SQINCH (scalar) variant 32-bit # PATTERN x0460f000/mask=xfff0fc00 :sqinch Rd_GPR64, Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqinch(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqinch_r_rs.xml: SQINCH (scalar) variant 64-bit # PATTERN x0470f000/mask=xfff0fc00 :sqinch Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqinch(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqinch_z_zs.xml: SQINCH (vector) variant SVE # PATTERN x0460c000/mask=xfff0fc00 :sqinch Zd.H^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.H & sve_imm4_1_1to16 & sve_mul_pattern { Zd.H = SVE_sqinch(Zd.H, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqincp_r_p_r.xml: SQINCP (scalar) variant 32-bit # PATTERN x25288800/mask=xff3ffe00 :sqincp Rd_GPR64, Pn.T, Rd_GPR32 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=0 & sve_b_16=0 & sve_b_1115=0b10001 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR32 & Rd_GPR64 { Rd_GPR64 = SVE_sqincp(Rd_GPR64, Pn.T, Rd_GPR32); } # sqincp_r_p_r.xml: SQINCP (scalar) variant 64-bit # PATTERN x25288c00/mask=xff3ffe00 :sqincp Rd_GPR64, Pn.T is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=0 & sve_b_16=0 & sve_b_1115=0b10001 & sve_b_10=1 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR64 { Rd_GPR64 = SVE_sqincp(Rd_GPR64, Pn.T); } # sqincp_z_p_z.xml: SQINCP (vector) variant SVE # PATTERN x25288000/mask=xff3ffe00 :sqincp Zd.T, Pn is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=0 & sve_b_16=0 & sve_b_1115=0b10000 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_zdn_0004 & Zd.T & Pn { Zd.T = SVE_sqincp(Zd.T, Pn); } # sqincw_r_rs.xml: SQINCW (scalar) variant 32-bit # PATTERN x04a0f000/mask=xfff0fc00 :sqincw Rd_GPR64, Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqincw(Rd_GPR64, Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqincw_r_rs.xml: SQINCW (scalar) variant 64-bit # PATTERN x04b0f000/mask=xfff0fc00 :sqincw Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_sqincw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqincw_z_zs.xml: SQINCW (vector) variant SVE # PATTERN x04a0c000/mask=xfff0fc00 :sqincw Zd.S^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=0 & sve_b_10=0 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.S & sve_imm4_1_1to16 & sve_mul_pattern { Zd.S = SVE_sqincw(Zd.S, sve_mul_pattern, sve_imm4_1_1to16:1); } # sqsub_z_zi.xml: SQSUB (immediate) variant SVE # PATTERN x2526c000/mask=xff3fc000 :sqsub Zd.T, Zd.T_2, sve_shf8_1_0to255 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b11 & sve_b_16=0 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 { Zd.T = SVE_sqsub(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); } # sqsub_z_zz.xml: SQSUB (vectors) variant SVE # PATTERN x04201800/mask=xff20fc00 :sqsub Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b11 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_sqsub(Zd.T, Zn.T, Zm.T); } # st1b_z_p_ai.xml: ST1B (vector plus immediate) variant 32-bit element # PATTERN xe460a000/mask=xffe0e000 :st1b "{"^Zd.S^"}", Pg3, [Zn.S^sve_opt5_1_0to31] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b11 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Zd.S & sve_opt5_1_0to31 & Pg3 { SVE_st1b(Zd.S, Pg3, Zn.S, sve_opt5_1_0to31); } # st1b_z_p_ai.xml: ST1B (vector plus immediate) variant 64-bit element # PATTERN xe440a000/mask=xffe0e000 :st1b "{"^Zd.D^"}", Pg3, [Zn.D^sve_opt5_1_0to31] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Zd.D & sve_opt5_1_0to31 & Pg3 { SVE_st1b(Zd.D, Pg3, Zn.D, sve_opt5_1_0to31); } # st1b_z_p_bi.xml: ST1B (scalar plus immediate) variant SVE # PATTERN xe400e000/mask=xff90e000 :st1b "{"^Zd.T_size_2122^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_size_2122 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Zd.T_size_2122 & Rn_GPR64xsp & sve_mul4_1_m8to7 & Pg3 { SVE_st1b(Zd.T_size_2122, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); } # st1b_z_p_br.xml: ST1B (scalar plus scalar) variant SVE # PATTERN xe4004000/mask=xff80e000 :st1b "{"^Zd.T_size_2122^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_size_2122 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Zd.T_size_2122 & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st1b(Zd.T_size_2122, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st1b_z_p_bz.xml: ST1B (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xe4008000/mask=xffe0a000 :st1b "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 { SVE_st1b(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); } # st1b_z_p_bz.xml: ST1B (scalar plus vector) variant 32-bit unscaled offset # PATTERN xe4408000/mask=xffe0a000 :st1b "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.S & Zd.S & sve_mod & Pg3 { SVE_st1b(Zd.S, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); } # st1b_z_p_bz.xml: ST1B (scalar plus vector) variant 64-bit unscaled offset # PATTERN xe400a000/mask=xffe0e000 :st1b "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 { SVE_st1b(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); } # st1d_z_p_ai.xml: ST1D (vector plus immediate) variant SVE # PATTERN xe5c0a000/mask=xffe0e000 :st1d "{"^Zd.D^"}", Pg3, [Zn.D^sve_opt5_1_0to248] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Zd.D & sve_opt5_1_0to248 & Pg3 { SVE_st1d(Zd.D, Pg3, Zn.D, sve_opt5_1_0to248); } # st1d_z_p_bi.xml: ST1D (scalar plus immediate) variant SVE # PATTERN xe580e000/mask=xff90e000 :st1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_size_2122 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.D & sve_mul4_1_m8to7 & Pg3 { SVE_st1d(Zd.D, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); } # st1d_z_p_br.xml: ST1D (scalar plus scalar) variant SVE # PATTERN xe5804000/mask=xff80e000 :st1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_size_2122 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.D & Rm_GPR64 & Pg3 { SVE_st1d(Zd.D, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st1d_z_p_bz.xml: ST1D (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xe5a08000/mask=xffe0a000 :st1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod^" #3"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 { SVE_st1d(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); } # st1d_z_p_bz.xml: ST1D (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xe5808000/mask=xffe0a000 :st1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 { SVE_st1d(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); } # st1d_z_p_bz.xml: ST1D (scalar plus vector) variant 64-bit scaled offset # PATTERN xe5a0a000/mask=xffe0e000 :st1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, "lsl #3"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 { SVE_st1d(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); } # st1d_z_p_bz.xml: ST1D (scalar plus vector) variant 64-bit unscaled offset # PATTERN xe580a000/mask=xffe0e000 :st1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 { SVE_st1d(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); } # st1h_z_p_ai.xml: ST1H (vector plus immediate) variant 32-bit element # PATTERN xe4e0a000/mask=xffe0e000 :st1h "{"^Zd.S^"}", Pg3, [Zn.S^sve_opt5_1_0to62] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Zd.S & sve_opt5_1_0to62 & Pg3 { SVE_st1h(Zd.S, Pg3, Zn.S, sve_opt5_1_0to62); } # st1h_z_p_ai.xml: ST1H (vector plus immediate) variant 64-bit element # PATTERN xe4c0a000/mask=xffe0e000 :st1h "{"^Zd.D^"}", Pg3, [Zn.D^sve_opt5_1_0to62] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Zd.D & sve_opt5_1_0to62 & Pg3 { SVE_st1h(Zd.D, Pg3, Zn.D, sve_opt5_1_0to62); } # st1h_z_p_bi.xml: ST1H (scalar plus immediate) variant SVE # PATTERN xe480e000/mask=xff90e000 :st1h "{"^Zd.T_size_2122^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_size_2122 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Zd.T_size_2122 & Rn_GPR64xsp & sve_mul4_1_m8to7 & Pg3 { SVE_st1h(Zd.T_size_2122, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); } # st1h_z_p_br.xml: ST1H (scalar plus scalar) variant SVE # PATTERN xe4804000/mask=xff80e000 :st1h "{"^Zd.T_size_2122^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_size_2122 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Zd.T_size_2122 & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st1h(Zd.T_size_2122, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st1h_z_p_bz.xml: ST1H (scalar plus vector) variant 32-bit scaled offset # PATTERN xe4e08000/mask=xffe0a000 :st1h "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp, Zm.S, sve_mod^" #1"] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.S & Zd.S & sve_mod & Pg3 { SVE_st1h(Zd.S, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); } # st1h_z_p_bz.xml: ST1H (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xe4a08000/mask=xffe0a000 :st1h "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod^" #1"] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 { SVE_st1h(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); } # st1h_z_p_bz.xml: ST1H (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xe4808000/mask=xffe0a000 :st1h "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 { SVE_st1h(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); } # st1h_z_p_bz.xml: ST1H (scalar plus vector) variant 32-bit unscaled offset # PATTERN xe4c08000/mask=xffe0a000 :st1h "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.S & Zd.S & sve_mod & Pg3 { SVE_st1h(Zd.S, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); } # st1h_z_p_bz.xml: ST1H (scalar plus vector) variant 64-bit scaled offset # PATTERN xe4a0a000/mask=xffe0e000 :st1h "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, "lsl #1"] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 { SVE_st1h(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); } # st1h_z_p_bz.xml: ST1H (scalar plus vector) variant 64-bit unscaled offset # PATTERN xe480a000/mask=xffe0e000 :st1h "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 { SVE_st1h(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); } # st1w_z_p_ai.xml: ST1W (vector plus immediate) variant 32-bit element # PATTERN xe560a000/mask=xffe0e000 :st1w "{"^Zd.S^"}", Pg3, [Zn.S^sve_opt5_1_0to124] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.S & Zd.S & sve_opt5_1_0to124 & Pg3 { SVE_st1w(Zd.S, Pg3, Zn.S, sve_opt5_1_0to124); } # st1w_z_p_ai.xml: ST1W (vector plus immediate) variant 64-bit element # PATTERN xe540a000/mask=xffe0e000 :st1w "{"^Zd.D^"}", Pg3, [Zn.D^sve_opt5_1_0to124] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_imm5_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zt_0004 & Zn.D & Zd.D & sve_opt5_1_0to124 & Pg3 { SVE_st1w(Zd.D, Pg3, Zn.D, sve_opt5_1_0to124); } # st1w_z_p_bi.xml: ST1W (scalar plus immediate) variant SVE # PATTERN xe500e000/mask=xff90e000 :st1w "{"^Zd.T_size_2122^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_size_2122 & sve_b_20=0 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Zd.T_size_2122 & Rn_GPR64xsp & sve_mul4_1_m8to7 & Pg3 { SVE_st1w(Zd.T_size_2122, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); } # st1w_z_p_br.xml: ST1W (scalar plus scalar) variant SVE # PATTERN xe5004000/mask=xff80e000 :st1w "{"^Zd.T_size_2122^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_size_2122 & sve_rm_1620 & sve_b_1315=0b010 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Zd.T_size_2122 & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st1w(Zd.T_size_2122, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st1w_z_p_bz.xml: ST1W (scalar plus vector) variant 32-bit scaled offset # PATTERN xe5608000/mask=xffe0a000 :st1w "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp, Zm.S, sve_mod^" #2"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.S & Zd.S & sve_mod & Pg3 { SVE_st1w(Zd.S, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); } # st1w_z_p_bz.xml: ST1W (scalar plus vector) variant 32-bit unpacked scaled offset # PATTERN xe5208000/mask=xffe0a000 :st1w "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod^" #2"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 { SVE_st1w(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); } # st1w_z_p_bz.xml: ST1W (scalar plus vector) variant 32-bit unpacked unscaled offset # PATTERN xe5008000/mask=xffe0a000 :st1w "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, sve_mod] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & sve_mod & Pg3 { SVE_st1w(Zd.D, Pg3, Rn_GPR64xsp, Zm.D, sve_mod:1); } # st1w_z_p_bz.xml: ST1W (scalar plus vector) variant 32-bit unscaled offset # PATTERN xe5408000/mask=xffe0a000 :st1w "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp, Zm.S, sve_mod] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_zm_1620 & sve_b_15=1 & sve_xs_14 & sve_b_13=0 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.S & Zd.S & sve_mod & Pg3 { SVE_st1w(Zd.S, Pg3, Rn_GPR64xsp, Zm.S, sve_mod:1); } # st1w_z_p_bz.xml: ST1W (scalar plus vector) variant 64-bit scaled offset # PATTERN xe520a000/mask=xffe0e000 :st1w "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D, "lsl #2"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 { SVE_st1w(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); } # st1w_z_p_bz.xml: ST1W (scalar plus vector) variant 64-bit unscaled offset # PATTERN xe500a000/mask=xffe0e000 :st1w "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Zm.D] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_zm_1620 & sve_b_1315=0b101 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zm.D & Zd.D & Pg3 { SVE_st1w(Zd.D, Pg3, Rn_GPR64xsp, Zm.D); } # st2b_z_p_bi.xml: ST2B (scalar plus immediate) variant SVE # PATTERN xe430e000/mask=xfff0e000 :st2b "{"^Zt.B, Ztt.B^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m16to14] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zt.B & Rn_GPR64xsp & sve_mul4_1_m16to14 & Pg3 { SVE_st2b(Zt.B, Ztt.B, Pg3, Rn_GPR64xsp, sve_mul4_1_m16to14); } # st2b_z_p_br.xml: ST2B (scalar plus scalar) variant SVE # PATTERN xe4206000/mask=xffe0e000 :st2b "{"^Zt.B, Ztt.B^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zt.B & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st2b(Zt.B, Ztt.B, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st2d_z_p_bi.xml: ST2D (scalar plus immediate) variant SVE # PATTERN xe5b0e000/mask=xfff0e000 :st2d "{"^Zt.D, Ztt.D^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m16to14] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zt.D & Rn_GPR64xsp & sve_mul4_1_m16to14 & Pg3 { SVE_st2d(Zt.D, Ztt.D, Pg3, Rn_GPR64xsp, sve_mul4_1_m16to14); } # st2d_z_p_br.xml: ST2D (scalar plus scalar) variant SVE # PATTERN xe5a06000/mask=xffe0e000 :st2d "{"^Zt.D, Ztt.D^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zt.D & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st2d(Zt.D, Ztt.D, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st2h_z_p_bi.xml: ST2H (scalar plus immediate) variant SVE # PATTERN xe4b0e000/mask=xfff0e000 :st2h "{"^Zt.H, Ztt.H^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m16to14] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zt.H & Rn_GPR64xsp & sve_mul4_1_m16to14 & Pg3 { SVE_st2h(Zt.H, Ztt.H, Pg3, Rn_GPR64xsp, sve_mul4_1_m16to14); } # st2h_z_p_br.xml: ST2H (scalar plus scalar) variant SVE # PATTERN xe4a06000/mask=xffe0e000 :st2h "{"^Zt.H, Ztt.H^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zt.H & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st2h(Zt.H, Ztt.H, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st2w_z_p_bi.xml: ST2W (scalar plus immediate) variant SVE # PATTERN xe530e000/mask=xfff0e000 :st2w "{"^Zt.S, Ztt.S^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m16to14] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Rn_GPR64xsp & sve_mul4_1_m16to14 & Pg3 { SVE_st2w(Zt.S, Ztt.S, Pg3, Rn_GPR64xsp, sve_mul4_1_m16to14); } # st2w_z_p_br.xml: ST2W (scalar plus scalar) variant SVE # PATTERN xe5206000/mask=xffe0e000 :st2w "{"^Zt.S, Ztt.S^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b01 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st2w(Zt.S, Ztt.S, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st3b_z_p_bi.xml: ST3B (scalar plus immediate) variant SVE # PATTERN xe450e000/mask=xfff0e000 :st3b "{"^Zt.B, Ztt.B, Zttt.B^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m24to21] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Rn_GPR64xsp & sve_mul4_1_m24to21 & Pg3 { SVE_st3b(Zt.B, Ztt.B, Zttt.B, Pg3, Rn_GPR64xsp, sve_mul4_1_m24to21); } # st3b_z_p_br.xml: ST3B (scalar plus scalar) variant SVE # PATTERN xe4406000/mask=xffe0e000 :st3b "{"^Zt.B, Ztt.B, Zttt.B^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st3b(Zt.B, Ztt.B, Zttt.B, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st3d_z_p_bi.xml: ST3D (scalar plus immediate) variant SVE # PATTERN xe5d0e000/mask=xfff0e000 :st3d "{"^Zt.D, Ztt.D, Zttt.D^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m24to21] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Rn_GPR64xsp & sve_mul4_1_m24to21 & Pg3 { SVE_st3d(Zt.D, Ztt.D, Zttt.D, Pg3, Rn_GPR64xsp, sve_mul4_1_m24to21); } # st3d_z_p_br.xml: ST3D (scalar plus scalar) variant SVE # PATTERN xe5c06000/mask=xffe0e000 :st3d "{"^Zt.D, Ztt.D, Zttt.D^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st3d(Zt.D, Ztt.D, Zttt.D, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st3h_z_p_bi.xml: ST3H (scalar plus immediate) variant SVE # PATTERN xe4d0e000/mask=xfff0e000 :st3h "{"^Zt.H, Ztt.H, Zttt.H^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m24to21] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Rn_GPR64xsp & sve_mul4_1_m24to21 & Pg3 { SVE_st3h(Zt.H, Ztt.H, Zttt.H, Pg3, Rn_GPR64xsp, sve_mul4_1_m24to21); } # st3h_z_p_br.xml: ST3H (scalar plus scalar) variant SVE # PATTERN xe4c06000/mask=xffe0e000 :st3h "{"^Zt.H, Ztt.H, Zttt.H^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st3h(Zt.H, Ztt.H, Zttt.H, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st3w_z_p_bi.xml: ST3W (scalar plus immediate) variant SVE # PATTERN xe550e000/mask=xfff0e000 :st3w "{"^Zt.S, Ztt.S, Zttt.S^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m24to21] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Rn_GPR64xsp & sve_mul4_1_m24to21 & Pg3 { SVE_st3w(Zt.S, Ztt.S, Zttt.S, Pg3, Rn_GPR64xsp, sve_mul4_1_m24to21); } # st3w_z_p_br.xml: ST3W (scalar plus scalar) variant SVE # PATTERN xe5406000/mask=xffe0e000 :st3w "{"^Zt.S, Ztt.S, Zttt.S^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b10 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st3w(Zt.S, Ztt.S, Zttt.S, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st4b_z_p_bi.xml: ST4B (scalar plus immediate) variant SVE # PATTERN xe470e000/mask=xfff0e000 :st4b "{"^Zt.B, Ztt.B, Zttt.B, Ztttt.B^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m32to28] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b11 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Ztttt.B & Rn_GPR64xsp & sve_mul4_1_m32to28 & Pg3 { SVE_st4b(Zt.B, Ztt.B, Zttt.B, Ztttt.B, Pg3, Rn_GPR64xsp, sve_mul4_1_m32to28); } # st4b_z_p_br.xml: ST4B (scalar plus scalar) variant SVE # PATTERN xe4606000/mask=xffe0e000 :st4b "{"^Zt.B, Ztt.B, Zttt.B, Ztttt.B^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.B & Zttt.B & Zt.B & Ztttt.B & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st4b(Zt.B, Ztt.B, Zttt.B, Ztttt.B, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st4d_z_p_bi.xml: ST4D (scalar plus immediate) variant SVE # PATTERN xe5f0e000/mask=xfff0e000 :st4d "{"^Zt.D, Ztt.D, Zttt.D, Ztttt.D^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m32to28] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b11 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Ztttt.D & Rn_GPR64xsp & sve_mul4_1_m32to28 & Pg3 { SVE_st4d(Zt.D, Ztt.D, Zttt.D, Ztttt.D, Pg3, Rn_GPR64xsp, sve_mul4_1_m32to28); } # st4d_z_p_br.xml: ST4D (scalar plus scalar) variant SVE # PATTERN xe5e06000/mask=xffe0e000 :st4d "{"^Zt.D, Ztt.D, Zttt.D, Ztttt.D^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.D & Zttt.D & Zt.D & Ztttt.D & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st4d(Zt.D, Ztt.D, Zttt.D, Ztttt.D, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st4h_z_p_bi.xml: ST4H (scalar plus immediate) variant SVE # PATTERN xe4f0e000/mask=xfff0e000 :st4h "{"^Zt.H, Ztt.H, Zttt.H, Ztttt.H^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m32to28] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Ztttt.H & Rn_GPR64xsp & sve_mul4_1_m32to28 & Pg3 { SVE_st4h(Zt.H, Ztt.H, Zttt.H, Ztttt.H, Pg3, Rn_GPR64xsp, sve_mul4_1_m32to28); } # st4h_z_p_br.xml: ST4H (scalar plus scalar) variant SVE # PATTERN xe4e06000/mask=xffe0e000 :st4h "{"^Zt.H, Ztt.H, Zttt.H, Ztttt.H^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.H & Zttt.H & Zt.H & Ztttt.H & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st4h(Zt.H, Ztt.H, Zttt.H, Ztttt.H, Pg3, Rn_GPR64xsp, Rm_GPR64); } # st4w_z_p_bi.xml: ST4W (scalar plus immediate) variant SVE # PATTERN xe570e000/mask=xfff0e000 :st4w "{"^Zt.S, Ztt.S, Zttt.S, Ztttt.S^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m32to28] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_b_20=1 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Ztttt.S & Rn_GPR64xsp & sve_mul4_1_m32to28 & Pg3 { SVE_st4w(Zt.S, Ztt.S, Zttt.S, Ztttt.S, Pg3, Rn_GPR64xsp, sve_mul4_1_m32to28); } # st4w_z_p_br.xml: ST4W (scalar plus scalar) variant SVE # PATTERN xe5606000/mask=xffe0e000 :st4w "{"^Zt.S, Ztt.S, Zttt.S, Ztttt.S^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b11 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Ztt.S & Zt.S & Zttt.S & Ztttt.S & Rn_GPR64xsp & Rm_GPR64 & Pg3 { SVE_st4w(Zt.S, Ztt.S, Zttt.S, Ztttt.S, Pg3, Rn_GPR64xsp, Rm_GPR64); } # stnt1b_z_p_bi.xml: STNT1B (scalar plus immediate) variant SVE # PATTERN xe410e000/mask=xfff0e000 :stnt1b "{"^Zd.B^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2022=0b001 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.B & sve_mul4_1_m8to7 & Pg3 { SVE_stnt1b(Zd.B, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); } # stnt1b_z_p_br.xml: STNT1B (scalar plus scalar) variant SVE # PATTERN xe4006000/mask=xffe0e000 :stnt1b "{"^Zd.B^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.B & Rm_GPR64 & Pg3 { SVE_stnt1b(Zd.B, Pg3, Rn_GPR64xsp, Rm_GPR64); } # stnt1d_z_p_bi.xml: STNT1D (scalar plus immediate) variant SVE # PATTERN xe590e000/mask=xfff0e000 :stnt1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2022=0b001 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.D & sve_mul4_1_m8to7 & Pg3 { SVE_stnt1d(Zd.D, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); } # stnt1d_z_p_br.xml: STNT1D (scalar plus scalar) variant SVE # PATTERN xe5806000/mask=xffe0e000 :stnt1d "{"^Zd.D^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #3"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.D & Rm_GPR64 & Pg3 { SVE_stnt1d(Zd.D, Pg3, Rn_GPR64xsp, Rm_GPR64); } # stnt1h_z_p_bi.xml: STNT1H (scalar plus immediate) variant SVE # PATTERN xe490e000/mask=xfff0e000 :stnt1h "{"^Zd.H^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2022=0b001 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.H & sve_mul4_1_m8to7 & Pg3 { SVE_stnt1h(Zd.H, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); } # stnt1h_z_p_br.xml: STNT1H (scalar plus scalar) variant SVE # PATTERN xe4806000/mask=xffe0e000 :stnt1h "{"^Zd.H^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #1"] is sve_b_2531=0b1110010 & sve_b_24=0 & sve_b_23=1 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.H & Rm_GPR64 & Pg3 { SVE_stnt1h(Zd.H, Pg3, Rn_GPR64xsp, Rm_GPR64); } # stnt1w_z_p_bi.xml: STNT1W (scalar plus immediate) variant SVE # PATTERN xe510e000/mask=xfff0e000 :stnt1w "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp^sve_mul4_1_m8to7] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2022=0b001 & sve_imm4_1619 & sve_b_1315=0b111 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.S & sve_mul4_1_m8to7 & Pg3 { SVE_stnt1w(Zd.S, Pg3, Rn_GPR64xsp, sve_mul4_1_m8to7); } # stnt1w_z_p_br.xml: STNT1W (scalar plus scalar) variant SVE # PATTERN xe5006000/mask=xffe0e000 :stnt1w "{"^Zd.S^"}", Pg3, [Rn_GPR64xsp, Rm_GPR64, "lsl #2"] is sve_b_2531=0b1110010 & sve_b_24=1 & sve_b_23=0 & sve_b_2122=0b00 & sve_rm_1620 & sve_b_1315=0b011 & sve_pg_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & Zd.S & Rm_GPR64 & Pg3 { SVE_stnt1w(Zd.S, Pg3, Rn_GPR64xsp, Rm_GPR64); } # str_p_bi.xml: STR (predicate) variant SVE # PATTERN xe5800000/mask=xffc0e010 :str Pd, [Rn_GPR64xsp^sve_mul9_2_m256to255] is sve_b_2231=0b1110010110 & sve_imm9h_1621 & sve_b_1315=0b000 & sve_imm9l_1012 & sve_rn_0509 & sve_b_04=0 & sve_pt_0003 & Rn_GPR64xsp & sve_mul9_2_m256to255 & Pd { SVE_str(Pd, Rn_GPR64xsp, sve_mul9_2_m256to255); } # str_z_bi.xml: STR (vector) variant SVE # PATTERN xe5804000/mask=xffc0e000 :str Zd, [Rn_GPR64xsp^sve_mul9_2_m256to255] is sve_b_2231=0b1110010110 & sve_imm9h_1621 & sve_b_1315=0b010 & sve_imm9l_1012 & sve_rn_0509 & sve_zt_0004 & Rn_GPR64xsp & sve_mul9_2_m256to255 & Zd { SVE_str(Zd, Rn_GPR64xsp, sve_mul9_2_m256to255); } # sub_z_p_zz.xml: SUB (vectors, predicated) variant SVE # PATTERN x04010000/mask=xff3fe000 :sub Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_sub(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # sub_z_zi.xml: SUB (immediate) variant SVE # PATTERN x2521c000/mask=xff3fc000 :sub Zd.T, Zd.T_2, sve_shf8_1_0to255 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 { Zd.T = SVE_sub(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); } # sub_z_zz.xml: SUB (vectors, unpredicated) variant SVE # PATTERN x04200400/mask=xff20fc00 :sub Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b00 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_sub(Zd.T, Zn.T, Zm.T); } # subr_z_p_zz.xml: SUBR (vectors) variant SVE # PATTERN x04030000/mask=xff3fe000 :subr Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b000 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_subr(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # subr_z_zi.xml: SUBR (immediate) variant SVE # PATTERN x2523c000/mask=xff3fc000 :subr Zd.T, Zd.T_2, sve_shf8_1_0to255 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 { Zd.T = SVE_subr(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); } # sunpkhi_z_z.xml: SUNPKHI, SUNPKLO variant High half # PATTERN x05313800/mask=xff3ffc00 :sunpkhi Zd.T, Zn.Tb is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1100 & sve_b_17=0 & sve_b_16=1 & sve_b_1015=0b001110 & sve_zn_0509 & sve_zd_0004 & Zn.Tb & Zd.T { Zd.T = SVE_sunpkhi(Zd.T, Zn.Tb); } # sunpkhi_z_z.xml: SUNPKHI, SUNPKLO variant Low half # PATTERN x05303800/mask=xff3ffc00 :sunpklo Zd.T, Zn.Tb is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1100 & sve_b_17=0 & sve_b_16=0 & sve_b_1015=0b001110 & sve_zn_0509 & sve_zd_0004 & Zn.Tb & Zd.T { Zd.T = SVE_sunpklo(Zd.T, Zn.Tb); } # sxtb_z_p_z.xml: SXTB, SXTH, SXTW variant Byte # PATTERN x0410a000/mask=xff3fe000 :sxtb Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b00 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_sxtb(Zd.T, Pg3_m, Zn.T); } # sxtb_z_p_z.xml: SXTB, SXTH, SXTW variant Halfword # PATTERN x0412a000/mask=xff3fe000 :sxth Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b01 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_sxth(Zd.T, Pg3_m, Zn.T); } # sxtb_z_p_z.xml: SXTB, SXTH, SXTW variant Word # PATTERN x0414a000/mask=xff3fe000 :sxtw Zd.D, Pg3_m, Zn.D is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b10 & sve_b_16=0 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m { Zd.D = SVE_sxtw(Zd.D, Pg3_m, Zn.D); } # tbl_z_zz.xml: TBL variant SVE # PATTERN x05203000/mask=xff20fc00 :tbl Zd.T, "{"^Zn.T^"}", Zm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1015=0b001100 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_tbl(Zd.T, Zn.T, Zm.T); } # trn1_p_pp.xml: TRN1, TRN2 (predicates) variant Even # PATTERN x05205000/mask=xff30fe10 :trn1 Pd.T, Pn.T, Pm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b10 & sve_pm_1619 & sve_b_1315=0b010 & sve_b_12=1 & sve_b_11=0 & sve_b_10=0 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T & Pm.T { Pd.T = SVE_trn1(Pd.T, Pn.T, Pm.T); } # trn1_p_pp.xml: TRN1, TRN2 (predicates) variant Odd # PATTERN x05205400/mask=xff30fe10 :trn2 Pd.T, Pn.T, Pm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b10 & sve_pm_1619 & sve_b_1315=0b010 & sve_b_12=1 & sve_b_11=0 & sve_b_10=1 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T & Pm.T { Pd.T = SVE_trn2(Pd.T, Pn.T, Pm.T); } # trn1_z_zz.xml: TRN1, TRN2 (vectors) variant Even # PATTERN x05207000/mask=xff20fc00 :trn1 Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b011 & sve_b_1112=0b10 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_trn1(Zd.T, Zn.T, Zm.T); } # trn1_z_zz.xml: TRN1, TRN2 (vectors) variant Odd # PATTERN x05207400/mask=xff20fc00 :trn2 Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b011 & sve_b_1112=0b10 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_trn2(Zd.T, Zn.T, Zm.T); } # uabd_z_p_zz.xml: UABD variant SVE # PATTERN x040d0000/mask=xff3fe000 :uabd Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b001 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_uabd(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # uaddv_r_p_z.xml: UADDV variant SVE # PATTERN x04012000/mask=xff3fe000 :uaddv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b000 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_uaddv(Rd_FPR64, Zn.T, Pg3); } # ucvtf_z_p_z.xml: UCVTF variant 16-bit to half-precision # PATTERN x6553a000/mask=xffffe000 :ucvtf Zd.H, Pg3_m, Zn.H is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.H & Zd.H & Pg3_m { Zd.H = SVE_ucvtf(Zd.H, Pg3_m, Zn.H); } # ucvtf_z_p_z.xml: UCVTF variant 32-bit to half-precision # PATTERN x6555a000/mask=xffffe000 :ucvtf Zd.H, Pg3_m, Zn.S is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.H & Pg3_m { Zd.H = SVE_ucvtf(Zd.H, Pg3_m, Zn.S); } # ucvtf_z_p_z.xml: UCVTF variant 32-bit to single-precision # PATTERN x6595a000/mask=xffffe000 :ucvtf Zd.S, Pg3_m, Zn.S is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=0 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.S & Pg3_m { Zd.S = SVE_ucvtf(Zd.S, Pg3_m, Zn.S); } # ucvtf_z_p_z.xml: UCVTF variant 32-bit to double-precision # PATTERN x65d1a000/mask=xffffe000 :ucvtf Zd.D, Pg3_m, Zn.S is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.S & Zd.D & Pg3_m { Zd.D = SVE_ucvtf(Zd.D, Pg3_m, Zn.S); } # ucvtf_z_p_z.xml: UCVTF variant 64-bit to half-precision # PATTERN x6557a000/mask=xffffe000 :ucvtf Zd.H, Pg3_m, Zn.D is sve_b_2431=0b01100101 & sve_b_23=0 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.H & Pg3_m { Zd.H = SVE_ucvtf(Zd.H, Pg3_m, Zn.D); } # ucvtf_z_p_z.xml: UCVTF variant 64-bit to single-precision # PATTERN x65d5a000/mask=xffffe000 :ucvtf Zd.S, Pg3_m, Zn.D is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.S & Pg3_m { Zd.S = SVE_ucvtf(Zd.S, Pg3_m, Zn.D); } # ucvtf_z_p_z.xml: UCVTF variant 64-bit to double-precision # PATTERN x65d7a000/mask=xffffe000 :ucvtf Zd.D, Pg3_m, Zn.D is sve_b_2431=0b01100101 & sve_b_23=1 & sve_b_22=1 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m { Zd.D = SVE_ucvtf(Zd.D, Pg3_m, Zn.D); } # udiv_z_p_zz.xml: UDIV variant SVE # PATTERN x04950000/mask=xffbfe000 :udiv Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz is sve_b_2431=0b00000100 & sve_b_23=1 & sve_sz_22 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T_sz & Zd.T_sz_2 & Zn.T_sz & Pg3_m { Zd.T_sz = SVE_udiv(Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz); } # udivr_z_p_zz.xml: UDIVR variant SVE # PATTERN x04970000/mask=xffbfe000 :udivr Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz is sve_b_2431=0b00000100 & sve_b_23=1 & sve_sz_22 & sve_b_1921=0b010 & sve_b_18=1 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T_sz & Zd.T_sz_2 & Zn.T_sz & Pg3_m { Zd.T_sz = SVE_udivr(Zd.T_sz, Pg3_m, Zd.T_sz_2, Zn.T_sz); } # udot_z_zzz.xml: UDOT (vectors) variant SVE # PATTERN x44800400/mask=xffa0fc00 :udot Zd.T_sz, Zn.Tb_sz, Zm.Tb_sz is sve_b_2431=0b01000100 & sve_b_23=1 & sve_sz_22 & sve_b_21=0 & sve_zm_1620 & sve_b_1115=0b00000 & sve_b_10=1 & sve_zn_0509 & sve_zda_0004 & Zm.Tb_sz & Zd.T_sz & Zn.Tb_sz { Zd.T_sz = SVE_udot(Zd.T_sz, Zn.Tb_sz, Zm.Tb_sz); } # udot_z_zzzi.xml: UDOT (indexed) variant 32-bit # PATTERN x44a00400/mask=xffe0fc00 :udot Zd.S, Zn.B, Zm3.B[sve_i2_1920] is sve_b_2431=0b01000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_i2_1920 & sve_zm_1618 & sve_b_1115=0b00000 & sve_b_10=1 & sve_zn_0509 & sve_zda_0004 & Zd.S & Zn.B & Zm3.B { Zd.S = SVE_udot(Zd.S, Zn.B, Zm3.B, sve_i2_1920:1); } # udot_z_zzzi.xml: UDOT (indexed) variant 64-bit # PATTERN x44e00400/mask=xffe0fc00 :udot Zd.D, Zn.H, Zm4.H[sve_i1_20] is sve_b_2431=0b01000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_i1_20 & sve_zm_1619 & sve_b_1115=0b00000 & sve_b_10=1 & sve_zn_0509 & sve_zda_0004 & Zd.D & Zn.H & Zm4.H { Zd.D = SVE_udot(Zd.D, Zn.H, Zm4.H, sve_i1_20:1); } # umax_z_p_zz.xml: UMAX (vectors) variant SVE # PATTERN x04090000/mask=xff3fe000 :umax Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_umax(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # umax_z_zi.xml: UMAX (immediate) variant SVE # PATTERN x2529c000/mask=xff3fe000 :umax Zd.T, Zd.T_2, "#"^sve_imm8_1_0to255 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b101 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1415=0b11 & sve_b_13=0 & sve_imm8_0512 & sve_zdn_0004 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 { Zd.T = SVE_umax(Zd.T, Zd.T_2, sve_imm8_1_0to255:1); } # umaxv_r_p_z.xml: UMAXV variant SVE # PATTERN x04092000/mask=xff3fe000 :umaxv Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_umaxv(Rd_FPR8, Pg3, Zn.T); } # umaxv_r_p_z.xml: UMAXV variant SVE # PATTERN x04092000/mask=xff3fe000 :umaxv Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_umaxv(Rd_FPR32, Pg3, Zn.T); } # umaxv_r_p_z.xml: UMAXV variant SVE # PATTERN x04092000/mask=xff3fe000 :umaxv Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_umaxv(Rd_FPR16, Pg3, Zn.T); } # umaxv_r_p_z.xml: UMAXV variant SVE # PATTERN x04092000/mask=xff3fe000 :umaxv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=0 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_umaxv(Rd_FPR64, Pg3, Zn.T); } # umin_z_p_zz.xml: UMIN (vectors) variant SVE # PATTERN x040b0000/mask=xff3fe000 :umin Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_umin(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # umin_z_zi.xml: UMIN (immediate) variant SVE # PATTERN x252bc000/mask=xff3fe000 :umin Zd.T, Zd.T_2, "#"^sve_imm8_1_0to255 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b101 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1415=0b11 & sve_b_13=0 & sve_imm8_0512 & sve_zdn_0004 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 { Zd.T = SVE_umin(Zd.T, Zd.T_2, sve_imm8_1_0to255:1); } # uminv_r_p_z.xml: UMINV variant SVE # PATTERN x040b2000/mask=xff3fe000 :uminv Rd_FPR8, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b00 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR8 & Pg3 { Rd_FPR8 = SVE_uminv(Rd_FPR8, Pg3, Zn.T); } # uminv_r_p_z.xml: UMINV variant SVE # PATTERN x040b2000/mask=xff3fe000 :uminv Rd_FPR32, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b10 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR32 & Pg3 { Rd_FPR32 = SVE_uminv(Rd_FPR32, Pg3, Zn.T); } # uminv_r_p_z.xml: UMINV variant SVE # PATTERN x040b2000/mask=xff3fe000 :uminv Rd_FPR16, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b01 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR16 & Pg3 { Rd_FPR16 = SVE_uminv(Rd_FPR16, Pg3, Zn.T); } # uminv_r_p_z.xml: UMINV variant SVE # PATTERN x040b2000/mask=xff3fe000 :uminv Rd_FPR64, Pg3, Zn.T is sve_b_2431=0b00000100 & sve_size_2223=0b11 & sve_b_1921=0b001 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b001 & sve_pg_1012 & sve_zn_0509 & sve_vd_0004 & Zn.T & Rd_FPR64 & Pg3 { Rd_FPR64 = SVE_uminv(Rd_FPR64, Pg3, Zn.T); } # umulh_z_p_zz.xml: UMULH variant SVE # PATTERN x04130000/mask=xff3fe000 :umulh Zd.T, Pg3_m, Zd.T_2, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_18=0 & sve_b_17=1 & sve_b_16=1 & sve_b_1315=0b000 & sve_pg_1012 & sve_zm_0509 & sve_zdn_0004 & Zd.T & Zd.T_2 & Zn.T & Pg3_m { Zd.T = SVE_umulh(Zd.T, Pg3_m, Zd.T_2, Zn.T); } # uqadd_z_zi.xml: UQADD (immediate) variant SVE # PATTERN x2525c000/mask=xff3fc000 :uqadd Zd.T, Zd.T_2, sve_shf8_1_0to255 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 { Zd.T = SVE_uqadd(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); } # uqadd_z_zz.xml: UQADD (vectors) variant SVE # PATTERN x04201400/mask=xff20fc00 :uqadd Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b10 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_uqadd(Zd.T, Zn.T, Zm.T); } # uqdecb_r_rs.xml: UQDECB variant 32-bit # PATTERN x0420fc00/mask=xfff0fc00 :uqdecb Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR32 = SVE_uqdecb(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqdecb_r_rs.xml: UQDECB variant 64-bit # PATTERN x0430fc00/mask=xfff0fc00 :uqdecb Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_uqdecb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqdecd_r_rs.xml: UQDECD (scalar) variant 32-bit # PATTERN x04e0fc00/mask=xfff0fc00 :uqdecd Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR32 = SVE_uqdecd(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqdecd_r_rs.xml: UQDECD (scalar) variant 64-bit # PATTERN x04f0fc00/mask=xfff0fc00 :uqdecd Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_uqdecd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqdecd_z_zs.xml: UQDECD (vector) variant SVE # PATTERN x04e0cc00/mask=xfff0fc00 :uqdecd Zd.D^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.D & sve_imm4_1_1to16 & sve_mul_pattern { Zd.D = SVE_uqdecd(Zd.D, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqdech_r_rs.xml: UQDECH (scalar) variant 32-bit # PATTERN x0460fc00/mask=xfff0fc00 :uqdech Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR32 = SVE_uqdech(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqdech_r_rs.xml: UQDECH (scalar) variant 64-bit # PATTERN x0470fc00/mask=xfff0fc00 :uqdech Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_uqdech(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqdech_z_zs.xml: UQDECH (vector) variant SVE # PATTERN x0460cc00/mask=xfff0fc00 :uqdech Zd.H^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.H & sve_imm4_1_1to16 & sve_mul_pattern { Zd.H = SVE_uqdech(Zd.H, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqdecp_r_p_r.xml: UQDECP (scalar) variant 32-bit # PATTERN x252b8800/mask=xff3ffe00 :uqdecp Rd_GPR32, Pn.T is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=1 & sve_b_16=1 & sve_b_1115=0b10001 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR32 { Rd_GPR32 = SVE_uqdecp(Rd_GPR32, Pn.T); } # uqdecp_r_p_r.xml: UQDECP (scalar) variant 64-bit # PATTERN x252b8c00/mask=xff3ffe00 :uqdecp Rd_GPR64, Pn.T is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=1 & sve_b_16=1 & sve_b_1115=0b10001 & sve_b_10=1 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR64 { Rd_GPR64 = SVE_uqdecp(Rd_GPR64, Pn.T); } # uqdecp_z_p_z.xml: UQDECP (vector) variant SVE # PATTERN x252b8000/mask=xff3ffe00 :uqdecp Zd.T, Pn is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=1 & sve_b_16=1 & sve_b_1115=0b10000 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_zdn_0004 & Zd.T & Pn { Zd.T = SVE_uqdecp(Zd.T, Pn); } # uqdecw_r_rs.xml: UQDECW (scalar) variant 32-bit # PATTERN x04a0fc00/mask=xfff0fc00 :uqdecw Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR32 = SVE_uqdecw(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqdecw_r_rs.xml: UQDECW (scalar) variant 64-bit # PATTERN x04b0fc00/mask=xfff0fc00 :uqdecw Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_uqdecw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqdecw_z_zs.xml: UQDECW (vector) variant SVE # PATTERN x04a0cc00/mask=xfff0fc00 :uqdecw Zd.S^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=1 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.S & sve_imm4_1_1to16 & sve_mul_pattern { Zd.S = SVE_uqdecw(Zd.S, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqincb_r_rs.xml: UQINCB variant 32-bit # PATTERN x0420f400/mask=xfff0fc00 :uqincb Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR32 = SVE_uqincb(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqincb_r_rs.xml: UQINCB variant 64-bit # PATTERN x0430f400/mask=xfff0fc00 :uqincb Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_uqincb(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqincd_r_rs.xml: UQINCD (scalar) variant 32-bit # PATTERN x04e0f400/mask=xfff0fc00 :uqincd Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR32 = SVE_uqincd(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqincd_r_rs.xml: UQINCD (scalar) variant 64-bit # PATTERN x04f0f400/mask=xfff0fc00 :uqincd Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_uqincd(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqincd_z_zs.xml: UQINCD (vector) variant SVE # PATTERN x04e0c400/mask=xfff0fc00 :uqincd Zd.D^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.D & sve_imm4_1_1to16 & sve_mul_pattern { Zd.D = SVE_uqincd(Zd.D, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqinch_r_rs.xml: UQINCH (scalar) variant 32-bit # PATTERN x0460f400/mask=xfff0fc00 :uqinch Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR32 = SVE_uqinch(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqinch_r_rs.xml: UQINCH (scalar) variant 64-bit # PATTERN x0470f400/mask=xfff0fc00 :uqinch Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_uqinch(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqinch_z_zs.xml: UQINCH (vector) variant SVE # PATTERN x0460c400/mask=xfff0fc00 :uqinch Zd.H^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=0 & sve_b_22=1 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.H & sve_imm4_1_1to16 & sve_mul_pattern { Zd.H = SVE_uqinch(Zd.H, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqincp_r_p_r.xml: UQINCP (scalar) variant 32-bit # PATTERN x25298800/mask=xff3ffe00 :uqincp Rd_GPR32, Pn.T is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=0 & sve_b_16=1 & sve_b_1115=0b10001 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR32 { Rd_GPR32 = SVE_uqincp(Rd_GPR32, Pn.T); } # uqincp_r_p_r.xml: UQINCP (scalar) variant 64-bit # PATTERN x25298c00/mask=xff3ffe00 :uqincp Rd_GPR64, Pn.T is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=0 & sve_b_16=1 & sve_b_1115=0b10001 & sve_b_10=1 & sve_b_09=0 & sve_pg_0508 & sve_rdn_0004 & Pn.T & Rd_GPR64 { Rd_GPR64 = SVE_uqincp(Rd_GPR64, Pn.T); } # uqincp_z_p_z.xml: UQINCP (vector) variant SVE # PATTERN x25298000/mask=xff3ffe00 :uqincp Zd.T, Pn is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1821=0b1010 & sve_b_17=0 & sve_b_16=1 & sve_b_1115=0b10000 & sve_b_10=0 & sve_b_09=0 & sve_pg_0508 & sve_zdn_0004 & Zd.T & Pn { Zd.T = SVE_uqincp(Zd.T, Pn); } # uqincw_r_rs.xml: UQINCW (scalar) variant 32-bit # PATTERN x04a0f400/mask=xfff0fc00 :uqincw Rd_GPR32^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=0 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR32 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR32 = SVE_uqincw(Rd_GPR32, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqincw_r_rs.xml: UQINCW (scalar) variant 64-bit # PATTERN x04b0f400/mask=xfff0fc00 :uqincw Rd_GPR64^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_21=1 & sve_b_20=1 & sve_imm4_1619 & sve_b_1215=0b1111 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_rdn_0004 & sve_pattern & Rd_GPR64 & sve_imm4_1_1to16 & sve_mul_pattern { Rd_GPR64 = SVE_uqincw(Rd_GPR64, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqincw_z_zs.xml: UQINCW (vector) variant SVE # PATTERN x04a0c400/mask=xfff0fc00 :uqincw Zd.S^sve_mul_pattern is sve_b_2431=0b00000100 & sve_b_23=1 & sve_b_22=0 & sve_b_2021=0b10 & sve_imm4_1619 & sve_b_1215=0b1100 & sve_b_11=0 & sve_b_10=1 & sve_pattern_0509 & sve_zdn_0004 & sve_pattern & Zd.S & sve_imm4_1_1to16 & sve_mul_pattern { Zd.S = SVE_uqincw(Zd.S, sve_mul_pattern, sve_imm4_1_1to16:1); } # uqsub_z_zi.xml: UQSUB (immediate) variant SVE # PATTERN x2527c000/mask=xff3fc000 :uqsub Zd.T, Zd.T_2, sve_shf8_1_0to255 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_1921=0b100 & sve_b_1718=0b11 & sve_b_16=1 & sve_b_1415=0b11 & sve_sh_13 & sve_imm8_0512 & sve_zdn_0004 & sve_shift_13 & Zd.T & Zd.T_2 & sve_imm8_1_0to255 & sve_shf8_1_0to255 { Zd.T = SVE_uqsub(Zd.T, Zd.T_2, sve_shf8_1_0to255, sve_shift_13:1); } # uqsub_z_zz.xml: UQSUB (vectors) variant SVE # PATTERN x04201c00/mask=xff20fc00 :uqsub Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b000 & sve_b_1112=0b11 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_uqsub(Zd.T, Zn.T, Zm.T); } # uunpkhi_z_z.xml: UUNPKHI, UUNPKLO variant High half # PATTERN x05333800/mask=xff3ffc00 :uunpkhi Zd.T, Zn.Tb is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1100 & sve_b_17=1 & sve_b_16=1 & sve_b_1015=0b001110 & sve_zn_0509 & sve_zd_0004 & Zn.Tb & Zd.T { Zd.T = SVE_uunpkhi(Zd.T, Zn.Tb); } # uunpkhi_z_z.xml: UUNPKHI, UUNPKLO variant Low half # PATTERN x05323800/mask=xff3ffc00 :uunpklo Zd.T, Zn.Tb is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_1821=0b1100 & sve_b_17=1 & sve_b_16=0 & sve_b_1015=0b001110 & sve_zn_0509 & sve_zd_0004 & Zn.Tb & Zd.T { Zd.T = SVE_uunpklo(Zd.T, Zn.Tb); } # uxtb_z_p_z.xml: UXTB, UXTH, UXTW variant Byte # PATTERN x0411a000/mask=xff3fe000 :uxtb Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b00 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_uxtb(Zd.T, Pg3_m, Zn.T); } # uxtb_z_p_z.xml: UXTB, UXTH, UXTW variant Halfword # PATTERN x0413a000/mask=xff3fe000 :uxth Zd.T, Pg3_m, Zn.T is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b01 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zd.T & Zn.T & Pg3_m { Zd.T = SVE_uxth(Zd.T, Pg3_m, Zn.T); } # uxtb_z_p_z.xml: UXTB, UXTH, UXTW variant Word # PATTERN x0415a000/mask=xff3fe000 :uxtw Zd.D, Pg3_m, Zn.D is sve_b_2431=0b00000100 & sve_size_2223 & sve_b_1921=0b010 & sve_b_1718=0b10 & sve_b_16=1 & sve_b_1315=0b101 & sve_pg_1012 & sve_zn_0509 & sve_zd_0004 & Zn.D & Zd.D & Pg3_m { Zd.D = SVE_uxtw(Zd.D, Pg3_m, Zn.D); } # uzp1_p_pp.xml: UZP1, UZP2 (predicates) variant Even # PATTERN x05204800/mask=xff30fe10 :uzp1 Pd.T, Pn.T, Pm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b10 & sve_pm_1619 & sve_b_1315=0b010 & sve_b_12=0 & sve_b_11=1 & sve_b_10=0 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T & Pm.T { Pd.T = SVE_uzp1(Pd.T, Pn.T, Pm.T); } # uzp1_p_pp.xml: UZP1, UZP2 (predicates) variant Odd # PATTERN x05204c00/mask=xff30fe10 :uzp2 Pd.T, Pn.T, Pm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b10 & sve_pm_1619 & sve_b_1315=0b010 & sve_b_12=0 & sve_b_11=1 & sve_b_10=1 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T & Pm.T { Pd.T = SVE_uzp2(Pd.T, Pn.T, Pm.T); } # uzp1_z_zz.xml: UZP1, UZP2 (vectors) variant Even # PATTERN x05206800/mask=xff20fc00 :uzp1 Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b011 & sve_b_1112=0b01 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_uzp1(Zd.T, Zn.T, Zm.T); } # uzp1_z_zz.xml: UZP1, UZP2 (vectors) variant Odd # PATTERN x05206c00/mask=xff20fc00 :uzp2 Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b011 & sve_b_1112=0b01 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_uzp2(Zd.T, Zn.T, Zm.T); } # whilele_p_p_rr.xml: WHILELE variant SVE # PATTERN x25200410/mask=xff20ec10 :whilele Pd.T, Rn_GPR64, Rm_GPR64 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=1 & sve_b_11=0 & sve_b_10=1 & sve_rn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Rn_GPR64 & Rm_GPR64 { Pd.T = SVE_whilele(Pd.T, Rn_GPR64, Rm_GPR64); } # whilele_p_p_rr.xml: WHILELE variant SVE # PATTERN x25200410/mask=xff20ec10 :whilele Pd.T, Rn_GPR32, Rm_GPR32 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=0 & sve_b_11=0 & sve_b_10=1 & sve_rn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Rn_GPR32 & Rm_GPR32 { Pd.T = SVE_whilele(Pd.T, Rn_GPR32, Rm_GPR32); } # whilelo_p_p_rr.xml: WHILELO variant SVE # PATTERN x25200c00/mask=xff20ec10 :whilelo Pd.T, Rn_GPR64, Rm_GPR64 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=1 & sve_b_11=1 & sve_b_10=1 & sve_rn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Rn_GPR64 & Rm_GPR64 { Pd.T = SVE_whilelo(Pd.T, Rn_GPR64, Rm_GPR64); } # whilelo_p_p_rr.xml: WHILELO variant SVE # PATTERN x25200c00/mask=xff20ec10 :whilelo Pd.T, Rn_GPR32, Rm_GPR32 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=0 & sve_b_11=1 & sve_b_10=1 & sve_rn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Rn_GPR32 & Rm_GPR32 { Pd.T = SVE_whilelo(Pd.T, Rn_GPR32, Rm_GPR32); } # whilels_p_p_rr.xml: WHILELS variant SVE # PATTERN x25200c10/mask=xff20ec10 :whilels Pd.T, Rn_GPR64, Rm_GPR64 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=1 & sve_b_11=1 & sve_b_10=1 & sve_rn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Rn_GPR64 & Rm_GPR64 { Pd.T = SVE_whilels(Pd.T, Rn_GPR64, Rm_GPR64); } # whilels_p_p_rr.xml: WHILELS variant SVE # PATTERN x25200c10/mask=xff20ec10 :whilels Pd.T, Rn_GPR32, Rm_GPR32 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=0 & sve_b_11=1 & sve_b_10=1 & sve_rn_0509 & sve_b_04=1 & sve_pd_0003 & Pd.T & Rn_GPR32 & Rm_GPR32 { Pd.T = SVE_whilels(Pd.T, Rn_GPR32, Rm_GPR32); } # whilelt_p_p_rr.xml: WHILELT variant SVE # PATTERN x25200400/mask=xff20ec10 :whilelt Pd.T, Rn_GPR64, Rm_GPR64 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=1 & sve_b_11=0 & sve_b_10=1 & sve_rn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Rn_GPR64 & Rm_GPR64 { Pd.T = SVE_whilelt(Pd.T, Rn_GPR64, Rm_GPR64); } # whilelt_p_p_rr.xml: WHILELT variant SVE # PATTERN x25200400/mask=xff20ec10 :whilelt Pd.T, Rn_GPR32, Rm_GPR32 is sve_b_2431=0b00100101 & sve_size_2223 & sve_b_21=1 & sve_rm_1620 & sve_b_1315=0b000 & sve_sf_12=0 & sve_b_11=0 & sve_b_10=1 & sve_rn_0509 & sve_b_04=0 & sve_pd_0003 & Pd.T & Rn_GPR32 & Rm_GPR32 { Pd.T = SVE_whilelt(Pd.T, Rn_GPR32, Rm_GPR32); } # wrffr_f_p.xml: WRFFR variant SVE # PATTERN x25289000/mask=xfffffe1f :wrffr Pn.B is sve_b_3031=0b00 & sve_b_2429=0b100101 & sve_b_23=0 & sve_b_22=0 & sve_b_2021=0b10 & sve_b_1019=0b1000100100 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_b_03=0 & sve_b_02=0 & sve_b_0001=0b00 & Pn.B { SVE_wrffr(Pn.B); } # zip1_p_pp.xml: ZIP1, ZIP2 (predicates) variant High halves # PATTERN x05204400/mask=xff30fe10 :zip2 Pd.T, Pn.T, Pm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b10 & sve_pm_1619 & sve_b_1315=0b010 & sve_b_12=0 & sve_b_11=0 & sve_b_10=1 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T & Pm.T { Pd.T = SVE_zip2(Pd.T, Pn.T, Pm.T); } # zip1_p_pp.xml: ZIP1, ZIP2 (predicates) variant Low halves # PATTERN x05204000/mask=xff30fe10 :zip1 Pd.T, Pn.T, Pm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_2021=0b10 & sve_pm_1619 & sve_b_1315=0b010 & sve_b_12=0 & sve_b_11=0 & sve_b_10=0 & sve_b_09=0 & sve_pn_0508 & sve_b_04=0 & sve_pd_0003 & Pn.T & Pd.T & Pm.T { Pd.T = SVE_zip1(Pd.T, Pn.T, Pm.T); } # zip1_z_zz.xml: ZIP1, ZIP2 (vectors) variant High halves # PATTERN x05206400/mask=xff20fc00 :zip2 Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b011 & sve_b_1112=0b00 & sve_b_10=1 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_zip2(Zd.T, Zn.T, Zm.T); } # zip1_z_zz.xml: ZIP1, ZIP2 (vectors) variant Low halves # PATTERN x05206000/mask=xff20fc00 :zip1 Zd.T, Zn.T, Zm.T is sve_b_2431=0b00000101 & sve_size_2223 & sve_b_21=1 & sve_zm_1620 & sve_b_1315=0b011 & sve_b_1112=0b00 & sve_b_10=0 & sve_zn_0509 & sve_zd_0004 & Zm.T & Zd.T & Zn.T { Zd.T = SVE_zip1(Zd.T, Zn.T, Zm.T); } ================================================ FILE: pypcode/processors/AARCH64/data/languages/AppleSilicon.ldefs ================================================ AppleSilicon ARM v8.5-A LE instructions, LE data, AMX extensions ================================================ FILE: pypcode/processors/AARCH64/data/manuals/AARCH64.idx ================================================ @DDI0487H_a_a-profile_architecture_reference_manual.pdf[ARM Architecture Reference Manual - ARM A-profile architecture, December 2021 (ARM DDI 0487H.a)] abs, 2017 adc, 1144 adcs, 1146 add, 2019 addg, 1155 addhn, 2021 addp, 2025 adds, 1161 addv, 2027 adr, 1163 adrp, 1164 aesd, 2029 aese, 2030 aesimc, 2031 aesmc, 2032 and, 2033 ands, 1171 asr, 1175 asrv, 1177 at, 1179 autda, 1181 autdb, 1182 autia, 1183 autib, 1186 axflag, 1189 b, 1191 bc, 1192 bcax, 2035 bfc, 1193 bfcvt, 2037 bfcvtn, 2038 bfdot, 2041 bfi, 1195 bfm, 1197 bfmlalb, 2045 bfmmla, 2046 bfxil, 1199 bic, 2050 bics, 1203 bif, 2052 bit, 2054 bl, 1205 blr, 1206 blraa, 1207 br, 1209 braa, 1210 brb, 1212 brk, 1213 bsl, 2056 bti, 1214 cas, 1223 casb, 1216 cash, 1218 casp, 1220 cbnz, 1226 cbz, 1227 ccmn, 1230 ccmp, 1234 cfinv, 1236 cfp, 1237 cinc, 1238 cinv, 1240 clrex, 1242 cls, 2058 clz, 2060 cmeq, 2064 cmge, 2070 cmgt, 2076 cmhi, 2079 cmhs, 2082 cmle, 2085 cmlt, 2088 cmn, 1250 cmp, 1256 cmpp, 1258 cmtst, 2090 cneg, 1259 cnt, 2092 cpp, 1261 cpyfp, 1262 cpyfpn, 1267 cpyfprn, 1272 cpyfprt, 1277 cpyfprtn, 1282 cpyfprtrn, 1287 cpyfprtwn, 1292 cpyfpt, 1297 cpyfptn, 1302 cpyfptrn, 1307 cpyfptwn, 1312 cpyfpwn, 1317 cpyfpwt, 1322 cpyfpwtn, 1327 cpyfpwtrn, 1332 cpyfpwtwn, 1337 cpyp, 1342 cpypn, 1348 cpyprn, 1354 cpyprt, 1360 cpyprtn, 1366 cpyprtrn, 1372 cpyprtwn, 1378 cpypt, 1384 cpyptn, 1390 cpyptrn, 1396 cpyptwn, 1402 cpypwn, 1408 cpypwt, 1414 cpypwtn, 1420 cpypwtrn, 1426 cpypwtwn, 1432 crc32b, 1438 crc32cb, 1440 csdb, 1442 csel, 1443 cset, 1445 csetm, 1447 csinc, 1449 csinv, 1451 csneg, 1453 dc, 1455 dcps1, 1457 dcps2, 1458 dcps3, 1459 dgh, 1460 dmb, 1461 drps, 1463 dsb, 1464 dup, 2097 dvp, 1467 eon, 1468 eor, 2099 eor3, 2101 eret, 1474 eretaa, 1475 esb, 1476 ext, 2102 extr, 1477 fabd, 2104 fabs, 2109 facge, 2111 facgt, 2115 fadd, 2121 faddp, 2125 fcadd, 2127 fccmp, 2129 fccmpe, 2131 fcmeq, 2137 fcmge, 2144 fcmgt, 2151 fcmla, 2157 fcmle, 2160 fcmlt, 2163 fcmp, 2166 fcmpe, 2168 fcsel, 2170 fcvt, 2172 fcvtas, 2177 fcvtau, 2182 fcvtl, 2184 fcvtms, 2189 fcvtmu, 2194 fcvtn, 2196 fcvtns, 2201 fcvtnu, 2206 fcvtps, 2211 fcvtpu, 2216 fcvtxn, 2218 fcvtzs, 2229 fcvtzu, 2239 fdiv, 2243 fjcvtzs, 2245 fmadd, 2246 fmax, 2250 fmaxnm, 2254 fmaxnmp, 2258 fmaxnmv, 2260 fmaxp, 2264 fmaxv, 2266 fmin, 2270 fminnm, 2274 fminnmp, 2278 fminnmv, 2280 fminp, 2284 fminv, 2286 fmla, 2292 fmlal, 2296 fmls, 2302 fmlsl, 2306 fmov, 2316 fmsub, 2318 fmul, 2326 fmulx, 2332 fneg, 2337 fnmadd, 2339 fnmsub, 2341 fnmul, 2343 frecpe, 2345 frecps, 2348 frecpx, 2351 frint32x, 2355 frint32z, 2359 frint64x, 2363 frint64z, 2367 frinta, 2371 frinti, 2375 frintm, 2379 frintn, 2383 frintp, 2387 frintx, 2391 frintz, 2395 frsqrte, 2397 frsqrts, 2400 fsqrt, 2405 fsub, 2409 gmi, 1479 hint, 1480 hlt, 1482 hvc, 1483 ic, 1484 ins, 2413 irg, 1485 isb, 1487 ld1, 2419 ld1r, 2423 ld2, 2429 ld2r, 2433 ld3, 2439 ld3r, 2443 ld4, 2449 ld4r, 2453 ld64b, 1488 ldadd, 1493 ldaddb, 1489 ldaddh, 1491 ldapr, 1496 ldaprb, 1498 ldaprh, 1500 ldapur, 1502 ldapurb, 1504 ldapurh, 1506 ldapursb, 1508 ldapursh, 1510 ldapursw, 1512 ldar, 1514 ldarb, 1516 ldarh, 1517 ldaxp, 1518 ldaxr, 1520 ldaxrb, 1522 ldaxrh, 1523 ldclr, 1528 ldclrb, 1524 ldclrh, 1526 ldeor, 1535 ldeorb, 1531 ldeorh, 1533 ldg, 1538 ldgm, 1539 ldlar, 1542 ldlarb, 1540 ldlarh, 1541 ldnp, 2456 ldp, 2458 ldpsw, 1550 ldr, 2468 ldraa, 1560 ldrb, 1565 ldrh, 1570 ldrsb, 1576 ldrsh, 1582 ldrsw, 1588 ldset, 1594 ldsetb, 1590 ldseth, 1592 ldsmax, 1601 ldsmaxb, 1597 ldsmaxh, 1599 ldsmin, 1608 ldsminb, 1604 ldsminh, 1606 ldtr, 1611 ldtrb, 1613 ldtrh, 1615 ldtrsb, 1617 ldtrsh, 1619 ldtrsw, 1621 ldumax, 1627 ldumaxb, 1623 ldumaxh, 1625 ldumin, 1634 lduminb, 1630 lduminh, 1632 ldur, 2471 ldurb, 1639 ldurh, 1640 ldursb, 1641 ldursh, 1643 ldursw, 1645 ldxp, 1646 ldxr, 1648 ldxrb, 1650 ldxrh, 1651 lsl, 1654 lslv, 1656 lsr, 1660 lsrv, 1662 madd, 1664 mla, 2475 mls, 2479 mneg, 1666 mov, 2488 movi, 2490 movk, 1677 movn, 1679 movz, 1681 mrs, 1683 msr, 1688 msub, 1689 mul, 2495 mvn, 2497 mvni, 2498 neg, 2501 negs, 1696 ngc, 1698 ngcs, 1700 nop, 1702 not, 2503 orn, 2505 orr, 2509 pacda, 1709 pacdb, 1710 pacga, 1711 pacia, 1712 pacib, 1715 pmul, 2511 pmull, 2513 prfm, 1722 prfum, 1724 psb, 1726 pssbb, 1727 raddhn, 2515 rax1, 2517 rbit, 2518 ret, 1730 retaa, 1731 rev, 1732 rev16, 2520 rev32, 2522 rev64, 2524 rmif, 1739 ror, 1742 rorv, 1744 rshrn, 2526 rsubhn, 2528 saba, 2530 sabal, 2532 sabd, 2534 sabdl, 2536 sadalp, 2538 saddl, 2540 saddlp, 2542 saddlv, 2544 saddw, 2546 sb, 1746 sbc, 1747 sbcs, 1749 sbfiz, 1751 sbfm, 1753 sbfx, 1756 scvtf, 2556 sdiv, 1758 sdot, 2560 setf8, 1759 setgp, 1760 setgpn, 1765 setgpt, 1770 setgptn, 1775 setp, 1780 setpn, 1784 setpt, 1788 setptn, 1792 sev, 1796 sevl, 1797 sha1c, 2562 sha1h, 2563 sha1m, 2564 sha1p, 2565 sha1su0, 2566 sha1su1, 2567 sha256h, 2569 sha256h2, 2568 sha256su0, 2570 sha256su1, 2571 sha512h, 2573 sha512h2, 2575 sha512su0, 2577 sha512su1, 2578 shadd, 2580 shl, 2582 shll, 2585 shrn, 2587 shsub, 2589 sli, 2591 sm3partw1, 2594 sm3partw2, 2596 sm3ss1, 2598 sm3tt1a, 2600 sm3tt1b, 2602 sm3tt2a, 2604 sm3tt2b, 2606 sm4e, 2608 sm4ekey, 2610 smaddl, 1798 smax, 2612 smaxp, 2614 smaxv, 2616 smc, 1800 smin, 2618 sminp, 2620 sminv, 2622 smlal, 2627 smlsl, 2632 smmla, 2634 smnegl, 1801 smov, 2635 smstart, 1802 smstop, 1804 smsubl, 1806 smulh, 1808 smull, 2640 sqabs, 2642 sqadd, 2644 sqdmlal, 2650 sqdmlsl, 2657 sqdmulh, 2663 sqdmull, 2668 sqneg, 2671 sqrdmlah, 2676 sqrdmlsh, 2682 sqrdmulh, 2688 sqrshl, 2690 sqrshrn, 2692 sqrshrun, 2695 sqshl, 2701 sqshlu, 2703 sqshrn, 2706 sqshrun, 2709 sqsub, 2712 sqxtn, 2714 sqxtun, 2717 srhadd, 2720 sri, 2722 srshl, 2725 srshr, 2727 srsra, 2730 ssbb, 1810 sshl, 2733 sshll, 2736 sshr, 2738 ssra, 2741 ssubl, 2744 ssubw, 2746 st1, 2752 st2, 2759 st2g, 1811 st3, 2766 st4, 2773 st64b, 1813 st64bv, 1814 st64bv0, 1816 stadd, 1822 staddb, 1818 staddh, 1820 stclr, 1828 stclrb, 1824 stclrh, 1826 steor, 1834 steorb, 1830 steorh, 1832 stg, 1836 stgm, 1838 stgp, 1839 stllr, 1844 stllrb, 1842 stllrh, 1843 stlr, 1846 stlrb, 1848 stlrh, 1849 stlur, 1850 stlurb, 1852 stlurh, 1854 stlxp, 1856 stlxr, 1859 stlxrb, 1862 stlxrh, 1864 stnp, 2777 stp, 2779 str, 2786 strb, 1879 strh, 1884 stset, 1890 stsetb, 1886 stseth, 1888 stsmax, 1896 stsmaxb, 1892 stsmaxh, 1894 stsmin, 1902 stsminb, 1898 stsminh, 1900 sttr, 1904 sttrb, 1906 sttrh, 1908 stumax, 1914 stumaxb, 1910 stumaxh, 1912 stumin, 1920 stuminb, 1916 stuminh, 1918 stur, 2789 sturb, 1924 sturh, 1925 stxp, 1926 stxr, 1929 stxrb, 1931 stxrh, 1933 stz2g, 1935 stzg, 1937 stzgm, 1939 sub, 2791 subg, 1947 subhn, 2793 subp, 1948 subps, 1949 subs, 1955 sudot, 2795 suqadd, 2797 svc, 1957 swp, 1962 swpb, 1958 swph, 1960 sxtb, 1964 sxth, 1966 sxtl, 2799 sxtw, 1968 sys, 1969 sysl, 1971 tbl, 2801 tbnz, 1972 tbx, 2803 tbz, 1973 tcancel, 1974 tcommit, 1975 tlbi, 1976 trn1, 2805 trn2, 2807 tsb, 1982 tst, 1984 tstart, 1979 ttest, 1981 uaba, 2809 uabal, 2811 uabd, 2813 uabdl, 2815 uadalp, 2817 uaddl, 2819 uaddlp, 2821 uaddlv, 2823 uaddw, 2825 ubfiz, 1986 ubfm, 1988 ubfx, 1991 ucvtf, 2835 udf, 1993 udiv, 1994 udot, 2839 uhadd, 2841 uhsub, 2843 umaddl, 1995 umax, 2845 umaxp, 2847 umaxv, 2849 umin, 2851 uminp, 2853 uminv, 2855 umlal, 2860 umlsl, 2865 ummla, 2867 umnegl, 1997 umov, 2868 umsubl, 1998 umulh, 2000 umull, 2873 uqadd, 2875 uqrshl, 2877 uqrshrn, 2879 uqshl, 2885 uqshrn, 2887 uqsub, 2890 uqxtn, 2892 urecpe, 2895 urhadd, 2896 urshl, 2898 urshr, 2900 ursqrte, 2903 ursra, 2904 usdot, 2909 ushl, 2911 ushll, 2914 ushr, 2916 usmmla, 2919 usqadd, 2920 usra, 2922 usubl, 2925 usubw, 2927 uxtb, 2002 uxth, 2003 uxtl, 2929 uzp1, 2931 uzp2, 2933 wfe, 2004 wfet, 2005 wfi, 2006 wfit, 2007 xaflag, 2008 xar, 2935 xpacd, 2009 xtn, 2936 yield, 2011 zip1, 2938 zip2, 2940 ================================================ FILE: pypcode/processors/AARCH64/data/patterns/AARCH64_LE_patterns.xml ================================================ 0xc0 0x03 0x5f 0xd6 0xc0 0x03 0x5f 0xd6 0x1f 0x20 0x03 0xd5 0xc0 0x03 0x5f 0xd6 0x1f 0x20 0x03 0xd5 0x1f 0x20 0x03 0xd5 0xff 0x0f 0x5f 0xd6 ........ ........ ........ 000101.. 0x20 0x00 0x20 0xd4 0xfd 0x7b 0xbf 0xa9 0xfe .0001111 0x1. 0xf8 111..... .1....11 10...... 0xa9 11101..1 001..011 1011.... 0x6d 0xff ..000011 000..... 0xd1 0x7f 0x23 0x03 0xd5 .1011111 0x24 0x03 0xd5 111..... .1....11 10...... 0xa9 0x........ 111..... .1....11 10...... 0xa9 0xfe .0001111 0x1. 0xf8 0x........ 0xfe .0001111 0x1. 0xf8 0xfd 0x7b 0xbf 0xa9 0xfd 0x03 0x00 0x91 0x7f 0x23 0x03 0xd5 0xff ..000011 00000... 0xd1 ...10000 ........ ........ 1..10000 00010001 ........ 01...... 0xf9 0x10 ......10 00...... 0x91 0x20 0x02 0x1f 0xd6 .1011111 0x24 0x03 0xd5 ...10000 ........ ........ 1..10000 00010001 ........ 01...... 0xf9 0x10 ......10 00...... 0x91 0x20 0x02 0x1f 0xd6 .1011111 0x24 0x03 0xd5 0xf0 0x7b 0xbf 0xa9 ...10000 ........ ........ 1..10000 00010001 ........ 01...... 0xf9 0x10 ......10 00...... 0x91 0x20 0x02 0x1f 0xd6 ================================================ FILE: pypcode/processors/AARCH64/data/patterns/AARCH64_win_patterns.xml ================================================ 0xff 0x43 0x00 0xd1 ...10001 ........ ........ 1..10000 00110001 ......10 01...... 11111001 0xf1 0x63 0x31 0xcb 0xf1 0x07 0x00 0xf9 0xc0 0x03 0x5f 0xd6 ...10001 ........ ........ 1..10000 0xf0 0x07 0x40 0xf9 00110001 ......10 01...... 11111001 0xf0 0x63 0x30 0xcb 0x1f 0x02 0x11 0xeb ...00001 ........ ........ 01010100 0xff 0x43 0x00 0x91 0xc0 0x03 0x5f 0xd6 0x1f 0x20 0x03 0xd5 ================================================ FILE: pypcode/processors/AARCH64/data/patterns/patternconstraints.xml ================================================ AARCH64_LE_patterns.xml ================================================ FILE: pypcode/processors/AARCH64/data/patterns/prepatternconstraints.xml ================================================ AARCH64_win_patterns.xml ================================================ FILE: pypcode/processors/ARM/data/languages/ARM.cspec ================================================ ; offset = *:1 (lr + r3); r3 = zext(offset); if (inbounds) goto ; offset = *:1 (lr + r12); r3 = zext(offset); r3 = r3 * 2; r12 = lr + r3; ISAModeSwitch = (r12 & 1) != 1; TB = ISAModeSwitch; pc = r12 & 0xfffffffe; goto [pc]; ]]> ================================================ FILE: pypcode/processors/ARM/data/languages/ARM.dwarf ================================================ ================================================ FILE: pypcode/processors/ARM/data/languages/ARM.gdis ================================================ TMode ================================================ FILE: pypcode/processors/ARM/data/languages/ARM.ldefs ================================================ Generic ARM/Thumb v8 little endian Generic ARM/Thumb v8 little endian (Thumb is default) Generic ARM/Thumb v8 little endian instructions and big endian data Generic ARM/Thumb v8 big endian Generic ARM/Thumb v8 big endian (Thumb is default) Generic ARM/Thumb v7 little endian Generic ARM/Thumb v7 little endian instructions and big endian data Generic ARM/Thumb v7 big endian ARM Cortex / Thumb little endian ARM Cortex / Thumb big endian ARM Cortex v8-m little endian ARM Cortex v8-m big endian Generic ARM/Thumb v6 little endian Generic ARM/Thumb v6 big endian Generic ARM/Thumb v5 little endian (T-variant) Generic ARM/Thumb v5 big endian (T-variant) Generic ARM v5 little endian Generic ARM v5 big endian Generic ARM/Thumb v4 little endian (T-variant) Generic ARM/Thumb v4 big endian (T-variant) Generic ARM v4 little endian Generic ARM v4 big endian ================================================ FILE: pypcode/processors/ARM/data/languages/ARM.opinion ================================================ ================================================ FILE: pypcode/processors/ARM/data/languages/ARM.sinc ================================================ # Specification for the ARM Version 4, 4T, 5, 5T, 5E # The following boolean defines control specific support: T_VARIANT, VERSION_5, VERSION_5E define endian=$(ENDIAN); define alignment=2; define space ram type=ram_space size=4 default; define space register type=register_space size=4; define register offset=0x0020 size=4 [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc ]; define register offset=0x0060 size=1 [ NG ZR CY OV tmpNG tmpZR tmpCY tmpOV shift_carry TB Q GE1 GE2 GE3 GE4 ]; define register offset=0x0070 size=4 [ cpsr spsr ]; define register offset=0x0080 size=4 [ mult_addr ]; # Special internal register for dealing with multiple stores/loads define register offset=0x0084 size=4 [ r14_svc r13_svc spsr_svc ]; define register offset=0x0090 size=8 [ mult_dat8 ]; # Special internal register for dealing with multiple stores/loads define register offset=0x0090 size=16 [ mult_dat16 ]; # Special internal register for dealing with multiple stores/loads define register offset=0x00A0 size=4 [ fpsr ]; # floating point state register (for FPA10 floating-point accelerator) define register offset=0x0078 size=1 [ ISAModeSwitch ]; # generic name for TB ThumbBit - set same as TB @define FPSCR_N "fpscr[31,1]" @define FPSCR_Z "fpscr[30,1]" @define FPSCR_C "fpscr[29,1]" @define FPSCR_V "fpscr[28,1]" @if defined(VFPv2) || defined(VFPv3) || defined(SIMD) define register offset=0x00B0 size=4 [ fpsid fpscr fpexc mvfr0 mvfr1 mvfr2 fpinst fpinst2 ]; @endif define register offset=0x0100 size=10 [ fp0 fp1 fp2 fp3 fp4 fp5 fp6 fp7 ]; # eight 80-bit floating registers # pseudo-registers for coprocessor calculations define register offset=0x0200 size=4 [ cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7 cr8 cr9 cr10 cr11 cr12 cr13 cr14 cr15 ]; # Advanced SIMD and VFP extension registers @if defined(VFPv2) || defined(VFPv3) @if ENDIAN == "little" define register offset=0x0300 size=4 [ s0 s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 s12 s13 s14 s15 s16 s17 s18 s19 s20 s21 s22 s23 s24 s25 s26 s27 s28 s29 s30 s31 ]; @else # ENDIAN == "big" define register offset=0x0300 size=4 [ s31 s30 s29 s28 s27 s26 s25 s24 s23 s22 s21 s20 s19 s18 s17 s16 s15 s14 s13 s12 s11 s10 s9 s8 s7 s6 s5 s4 s3 s2 s1 s0 ]; @endif # ENDIAN = "big" @endif # VFPv2 || VFPv3 @if defined(VFPv2) @if ENDIAN == "little" define register offset=0x0300 size=8 [ d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 ]; @else # ENDIAN == "big" define register offset=0x0300 size=8 [ d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0 ]; @endif # ENDIAN = "big" @endif # VFPv2 @if defined(SIMD) || defined(VFPv3) @if ENDIAN == "little" define register offset=0x0300 size=8 [ d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 ]; @else # ENDIAN == "big" define register offset=0x0300 size=8 [ d31 d30 d29 d28 d27 d26 d25 d24 d23 d22 d21 d20 d19 d18 d17 d16 d15 d14 d13 d12 d11 d10 d9 d8 d7 d6 d5 d4 d3 d2 d1 d0 ]; @endif # ENDIAN = "big" @endif # SIMD || VFPv3 @if defined(SIMD) @if ENDIAN == "little" define register offset=0x0300 size=16 [ q0 q1 q2 q3 q4 q5 q6 q7 q8 q9 q10 q11 q12 q13 q14 q15 ]; @else # ENDIAN == "big" define register offset=0x0300 size=16 [ q15 q14 q13 q12 q11 q10 q9 q8 q7 q6 q5 q4 q3 q2 q1 q0 ]; @endif # ENDIAN = "big" @endif # SIMD @if defined(CORTEX) define register offset=0x400 size=4 [ msplim psplim ]; @endif # Define context bits # WARNING: when adjusting context keep compiler packing in mind # and make sure fields do not span a 32-bit boundary before or # after context packing define register offset=0x00 size=8 contextreg; define context contextreg @ifdef T_VARIANT TMode = (0,0) # 1 if in Thumb instruction decode mode T = (0,0) # exact copy (alias!) of TMode LowBitCodeMode = (0,0) # 1 if low bit of instruction address is set on a branch ISA_MODE = (0,0) # 1 for Thumb instruction decode mode @endif LRset = (1,1) noflow # 1 if the instruction right before was a mov lr,pc REToverride = (2,2) noflow # 1 if the instruction should be a branch not a return CALLoverride = (3,3) noflow # 1 if the call should actually be a jump @if defined(VERSION_6T2) || defined(VERSION_7) TEEMode = (4,4) # 1 if in ThumbEE mode, changes some instruction behavior, and makes some instructions invalid condit = (5,13) noflow # both base and shift cond_mask = (10,13) # base condition cond_full = (6,9) # full condition cond_true = (9,9) # true if this condition should be tested for true cond_base = (6,8) # shift mask for controlling shift cond_shft = (9,13) # mask and lower bit of it condition field itmode = (5,5) # true if in ITBlock mode @endif # Transient context bits counter = (14,18) # 0 to 7 counter (for building variable length register lists) # dreg = (17,21) # D register (attached, for building register lists) # sreg = (17,21) # S register (attached, for building register lists) regNum = (19,23) # D register number (see dreg) counter2 = (24,26) # 0 to 7 counter (for building variable length register lists) # dreg2 = (25,29) # 2nd D register (attached, for building register lists) # sreg2 = (25,29) # 2nd S register (attached, for building register lists) reg2Num = (27,31) # 2nd D register number (see dreg2) # --- do not allow any field to span 32-bit boundary --- regInc = (32,33) # Pair register increment ARMcond = (34,34) # ARM conditional instruction ARMcondCk = (35,35) # Finished ARM condition check phase ; define pcodeop coprocessor_function; define pcodeop coprocessor_function2; define pcodeop coprocessor_load; define pcodeop coprocessor_load2; define pcodeop coprocessor_loadlong; define pcodeop coprocessor_loadlong2; define pcodeop coprocessor_moveto; define pcodeop coprocessor_moveto2; define pcodeop coprocessor_movefromRt; define pcodeop coprocessor_movefromRt2; define pcodeop coprocessor_movefrom2; define pcodeop coprocessor_store; define pcodeop coprocessor_store2; define pcodeop coprocessor_storelong; define pcodeop coprocessor_storelong2; define pcodeop software_interrupt; define pcodeop software_bkpt; define pcodeop software_udf; define pcodeop software_hlt; define pcodeop software_hvc; define pcodeop software_smc; # CPS methods (Version 6) define pcodeop setUserMode; define pcodeop setFIQMode; define pcodeop setIRQMode; define pcodeop setSupervisorMode; define pcodeop setMonitorMode; define pcodeop setAbortMode; define pcodeop setUndefinedMode; define pcodeop setSystemMode; define pcodeop enableIRQinterrupts; define pcodeop enableFIQinterrupts; define pcodeop enableDataAbortInterrupts; define pcodeop disableIRQinterrupts; define pcodeop disableFIQinterrupts; define pcodeop isFIQinterruptsEnabled; define pcodeop isIRQinterruptsEnabled; define pcodeop disableDataAbortInterrupts; define pcodeop hasExclusiveAccess; define pcodeop isCurrentModePrivileged; define pcodeop setThreadModePrivileged; define pcodeop isThreadMode; define pcodeop jazelle_branch; define pcodeop ClearExclusiveLocal; define pcodeop HintDebug; define pcodeop DataMemoryBarrier; define pcodeop DataSynchronizationBarrier; define pcodeop secureMonitorCall; define pcodeop WaitForEvent; define pcodeop WaitForInterrupt; define pcodeop HintYield; define pcodeop InstructionSynchronizationBarrier; define pcodeop HintPreloadData; define pcodeop HintPreloadDataForWrite; define pcodeop HintPreloadInstruction; define pcodeop SignedSaturate; define pcodeop SignedDoesSaturate; define pcodeop UnsignedSaturate; define pcodeop UnsignedDoesSaturate; define pcodeop Absolute; define pcodeop ReverseBitOrder; define pcodeop SendEvent; define pcodeop setEndianState; # Copies ISAModeSwitch to TMode define pcodeop setISAMode; macro affectflags() { CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; } macro affect_resflags() { ZR = tmpZR; NG = tmpNG; } macro SetISAModeSwitch(value) { ISAModeSwitch = value; TB = ISAModeSwitch; } macro SetThumbMode(value) { SetISAModeSwitch(value); setISAMode(); } # # simple branch, not inter-working macro BranchWritePC(addr) { pc = addr; } # # Interworking branch, ARM<->Thumb macro BXWritePC(addr) { SetThumbMode((addr & 0x1) != 0); local tmp = addr & 0xfffffffe; pc = tmp; } # # Branch depends on version macro LoadWritePC(addr) { @if defined(VERSION_5) BXWritePC(addr); @else BranchWritePC(addr); @endif } # Branch depends on version macro ALUWritePC(addr) { @if defined(VERSION_7) BXWritePC(addr); @else BranchWritePC(addr); @endif } @if defined(T_VARIANT) ItCond: is TMode=1 { } CheckInIT_CZNO: is TMode=1 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; } # in older, arms always affect flags CheckInIT_CZN: is TMode=1 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; } # in older, arms always affect flags CheckInIT_ZN: is TMode=1 { ZR = tmpZR; NG = tmpNG; } # in older, arms always affect flags @endif @if defined(VERSION_6T2) || defined(VERSION_7) # conditionals for instruction following IT Block thfcc: "eq" is cond_full=0 { local tmp:1 = (ZR!=0); export tmp; } thfcc: "ne" is cond_full=1 { local tmp:1 = (ZR==0); export tmp; } thfcc: "cs" is cond_full=2 { local tmp:1 = (CY!=0); export tmp; } thfcc: "cc" is cond_full=3 { local tmp:1 = (CY==0); export tmp; } thfcc: "mi" is cond_full=4 { local tmp:1 = (NG!=0); export tmp; } thfcc: "pl" is cond_full=5 { local tmp:1 = (NG==0); export tmp; } thfcc: "vs" is cond_full=6 { local tmp:1 = (OV!=0); export tmp; } thfcc: "vc" is cond_full=7 { local tmp:1 = (OV==0); export tmp; } thfcc: "hi" is cond_full=8 { local tmp:1 = CY && !ZR; export tmp; } thfcc: "ls" is cond_full=9 { local tmp:1 = !CY || ZR; export tmp; } thfcc: "ge" is cond_full=10 { local tmp:1 = (NG == OV); export tmp; } thfcc: "lt" is cond_full=11 { local tmp:1 = (NG != OV); export tmp; } thfcc: "gt" is cond_full=12 { local tmp:1 = !ZR && (NG == OV); export tmp; } thfcc: "le" is cond_full=13 { local tmp:1 = ZR || (NG != OV); export tmp; } thfcc: "al" is cond_full=14 { local tmp:1 = 1; export tmp; } #can happen #thfcc: "nv" is cond_full=15 { local tmp:1 = 0; export tmp; } #unpredictable, shouldn't happen # no ITcondition ItCond: is TMode=1 & itmode=0 & cond_mask=0 {} # ITBlock then/else case - the condition being tested is modified by the shift below ItCond: "."thfcc is TMode=1 & itmode=0 & cond_mask & thfcc [ itmode=1; globalset(inst_next,condit);] { if (!thfcc) goto inst_next; } # last ITBlock then/else case - the condition being tested is modified by the shift below ItCond: "."thfcc is TMode=1 & itmode=0 & cond_mask=8 & thfcc { if (!thfcc) goto inst_next; } # certain Thumb instructions don't affect all flags in the IT block CheckInIT_CZNO: is TMode=1 & itmode=1 & cond_mask { } # Do nothing to the flag bits CheckInIT_CZNO: is TMode=1 & itmode=0 & cond_mask { } # Do nothing to the flag bits CheckInIT_CZNO: "s" is TMode=1 & itmode=0 & cond_mask=0 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; } CheckInIT_CZN: is TMode=1 & itmode=1 & cond_mask { } # Do nothing to the flag bits CheckInIT_CZN: is TMode=1 & itmode=0 & cond_mask { } # Do nothing to the flag bits CheckInIT_CZN: "s" is TMode=1 & itmode=0 & cond_mask=0 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; } CheckInIT_ZN: is TMode=1 & itmode=1 & cond_mask { } # Do nothing to the flag bits CheckInIT_ZN: is TMode=1 & itmode=0 & cond_mask { } # Do nothing to the flag bits CheckInIT_ZN: "s" is TMode=1 & itmode=0 & cond_mask=0 { ZR = tmpZR; NG = tmpNG; } :^instruction is itmode=1 & cond_mask=8 & instruction [ condit=0; ] {} :^instruction is itmode=1 & cond_mask & instruction [ cond_shft=cond_shft << 1; itmode=0; ]{} @endif # defined(VERSION_6T2) || defined(VERSION_7) @include "ARMinstructions.sinc" # THUMB instructions @ifdef T_VARIANT @include "ARMTHUMBinstructions.sinc" @endif ================================================ FILE: pypcode/processors/ARM/data/languages/ARM4_be.slaspec ================================================ @define ENDIAN "big" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM4_le.slaspec ================================================ @define ENDIAN "little" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM4t_be.slaspec ================================================ @define ENDIAN "big" @define T_VARIANT "" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM4t_le.slaspec ================================================ @define ENDIAN "little" @define T_VARIANT "" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM5_be.slaspec ================================================ @define ENDIAN "big" @define VERSION_5 "" @define VERSION_5E "" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM5_le.slaspec ================================================ @define ENDIAN "little" @define VERSION_5 "" @define VERSION_5E "" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM5t_be.slaspec ================================================ @define ENDIAN "big" @define T_VARIANT "" @define VERSION_5 "" @define VERSION_5E "" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM5t_le.slaspec ================================================ @define ENDIAN "little" @define T_VARIANT "" @define VERSION_5 "" @define VERSION_5E "" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM6_be.slaspec ================================================ @define ENDIAN "big" @define T_VARIANT "" @define VERSION_5 "" @define VERSION_5E "" @define VERSION_6 "" @define VERSION_6K "" @define VERSION_6T2 "" @define VFPv2 "" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM6_le.slaspec ================================================ @define ENDIAN "little" @define T_VARIANT "" @define VERSION_5 "" @define VERSION_5E "" @define VERSION_6 "" @define VERSION_6K "" @define VERSION_6T2 "" @define VFPv2 "" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM7_be.slaspec ================================================ @define ENDIAN "big" @define T_VARIANT "" @define VERSION_5 "" @define VERSION_5E "" @define VERSION_6 "" @define VERSION_6K "" @define VERSION_6T2 "" @define VERSION_7 "" @define VERSION_7M "" @define SIMD "" @define VFPv3 "" @define VFPv4 "" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM7_le.slaspec ================================================ @define ENDIAN "little" @define T_VARIANT "" @define VERSION_5 "" @define VERSION_5E "" @define VERSION_6 "" @define VERSION_6K "" @define VERSION_6T2 "" @define VERSION_7 "" @define VERSION_7M "" @define SIMD "" @define VFPv3 "" @define VFPv4 "" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM8_be.slaspec ================================================ @define ENDIAN "big" @define T_VARIANT "" @define VERSION_5 "" @define VERSION_5E "" @define VERSION_6 "" @define VERSION_6K "" @define VERSION_6T2 "" @define VERSION_7 "" @define VERSION_7M "" @define VERSION_8 "" @define SIMD "" @define VFPv3 "" @define VFPv4 "" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM8_le.slaspec ================================================ @define ENDIAN "little" @define T_VARIANT "" @define VERSION_5 "" @define VERSION_5E "" @define VERSION_6 "" @define VERSION_6K "" @define VERSION_6T2 "" @define VERSION_7 "" @define VERSION_7M "" @define VERSION_8 "" @define SIMD "" @define VFPv3 "" @define VFPv4 "" @include "ARM.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM8m_be.slaspec ================================================ @define ENDIAN "big" @define T_VARIANT "" @define VERSION_5 "" @define VERSION_5E "" @define VERSION_6 "" @define VERSION_6K "" @define VERSION_6T2 "" @define VERSION_7 "" @define VERSION_7M "" @define VERSION_8 "" @define SIMD "" @define CDE "" @define CORTEX "" @define VFPv3 "" @define VFPv4 "" @include "ARM.sinc" @include "ARM_CDE.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARM8m_le.slaspec ================================================ @define ENDIAN "little" @define T_VARIANT "" @define VERSION_5 "" @define VERSION_5E "" @define VERSION_6 "" @define VERSION_6K "" @define VERSION_6T2 "" @define VERSION_7 "" @define VERSION_7M "" @define VERSION_8 "" @define SIMD "" @define CDE "" @define CORTEX "" @define VFPv3 "" @define VFPv4 "" @include "ARM.sinc" @include "ARM_CDE.sinc" ================================================ FILE: pypcode/processors/ARM/data/languages/ARMCortex.pspec ================================================ ================================================ FILE: pypcode/processors/ARM/data/languages/ARMTHUMBinstructions.sinc ================================================ # Specification for the THUMB Version 2 # This closely follows # "Architecture Reference Manual" Second Edition Edited by David Seal # # WARNING NOTE: Be very careful taking a subpiece or truncating a register with :# or (#) # The LEBE hybrid language causes endian issues if you do not assign the register to a temp # variable and then take a subpiece or truncate. # define token instr2 (16) part2op=(11,15) # this second instruction token is needed for the part2J1=(13,13) part2J2=(11,11) part2cond=(6,9) part2imm6=(0,5) part2S=(10,10) part2imm11=(0,10) part2imm10=(0,9) part2off=(0,10) # bl and blx instructions which use 2 16-bit instructions part2off_10=(1,10) # blx instruction which switches to ARM mode part2c1415=(14,15) part2c1212=(12,12) part2c0615=(6,15) part2Rt=(12,15) part2c0011=(0,11) part2c0909=(9,9) part2c0808=(8,8) part2c0707=(7,7) part2c0505=(5,5) part2c0404=(4,4) part2Rd0003=(0,3) ; define token instrThumb (16) op4=(4,15) op6=(6,15) op7=(7,15) op8=(8,15) op9=(9,15) op11=(11,15) op12=(12,15) op13=(13,15) op0=(0,15) sop0407=(4,7) sop0507=(5,7) sop0508=(5,8) sop0003=(0,3) sop0608=(6,8) sop0610=(6,10) sopit=(0,7) Ra1215=(12,15) Rd0002=(0,2) Rd0003=(0,3) Rd0810=(8,10) Rd0811=(8,11) Rd1215hi=(12,15) Rn0002=(0,2) Rn0003=(0,3) Rd0003hi=(0,3) Rn0305=(3,5) Rn0810=(8,10) Rm0305=(3,5) Rm0306=(3,6) Rm0608=(6,8) Rm0003=(0,3) Rs0305=(3,5) Rt1215=(12,15) Rt0811=(8,11) thI9=(9,9) thP8=(8,8) thH8=(8,8) thL8=(8,8) thU7=(7,7) thB6=(6,6) thN6=(6,6) thS6=(6,6) thW5=(5,5) thL4=(4,4) thCRd=(12,15) thCRn=(0,3) thCRm=(0,3) hrn0002=(0,2) hrm0305=(3,5) rm0306=(3,6) hrd0002=(0,2) immed3=(6,8) immed5=(6,10) immed6=(0,5) immed7=(0,6) immed8=(0,7) immed12_i=(10,10) immed12_imm3=(12,14) immed12_imm8=(0,7) soffset8=(0,7) signed offset10=(0,9) offset10S=(10,10) offset11=(0,10) soffset11=(0,10) signed offset12=(0,11) thcond=(8,11) thcpn=(8,11) thcop=(8,10) thopcode1=(4,7) thop1=(4,6) thopcode2=(5,7) thop2=(7,7) thopcode3=(0,5) thop3=(4,5) l07=(7,7) l11=(11,11) h1=(7,7) h2=(6,6) R=(8,8) sbz=(0,2) thwbit=(5,5) th_psrmask=(8,11) addr_pbit=(10,10) addr_ubit=(9,9) addr_wbit=(8,8) addr_puw =(8,10) addr_puw1 =(5,8) thsrsMode=(0,4) fcond=(4,7) throt=(4,6) imm3_12=(12,14) imm3_shft=(12,14) imm2_shft=(6,7) imm5=(3,7) sysm=(0,7) sysm37=(3,7) sysm02=(0,2) thc0001=(0,1) thc0002=(0,2) thc0003=(0,3) thc0004=(0,4) thc0005=(0,5) thc0006=(0,6) thc0007=(0,7) thc0011=(0,11) thc0107=(1,7) thc0207=(2,7) thc0307=(3,7) thc0407=(4,7) thc0405=(4,5) thc0409=(4,9) thc0506=(5,6) thc0507=(5,7) thc0607=(6,7) thc0810=(8,10) thc0811=(8,11) thc0910=(9,10) thc1414=(14,14) thc1313=(13,13) thc1212=(12,12) thc1214=(12,14) thc1111=(11,11) thc1010=(10,10) thc0909=(9,9) thc0808=(8,8) thc0707=(7,7) thc0606=(6,6) thc0505=(5,5) thc0404=(4,4) thc0303=(3,3) thc0202=(2,2) thc0101=(1,1) thc0000=(0,0) thc0115=(1,15) thc0215=(2,15) thc0315=(3,15) thc0415=(4,15) thc0515=(5,15) thc0615=(6,15) thc0715=(7,15) thc0815=(8,15) thc0915=(9,15) thc1015=(10,15) thc1112=(11,12) thc1115=(11,15) thc1215=(12,15) thc1315=(13,15) thc1415=(14,15) thc1515=(15,15) ; attach variables [ Rd0002 Rd0810 Rn0002 Rn0305 Rn0810 Rm0305 Rm0608 Rs0305 ] [ r0 r1 r2 r3 r4 r5 r6 r7 ]; attach variables [ Rm0003 Rm0306 Rd0811 Rn0003 Rt1215 Rt0811 Ra1215 Rd0003 part2Rt part2Rd0003 ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc ]; attach variables [ Rd1215hi Rd0003hi ] [ r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc _ ]; attach variables [ thCRn thCRd thCRm ] [ cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7 cr8 cr9 cr10 cr11 cr12 cr13 cr14 cr15 ]; attach names [ thcpn ] [ p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 ]; attach names [ thcop ] [ p0 p1 p2 p3 p4 p5 p6 p7 ]; attach variables [ hrn0002 hrm0305 hrd0002 ] [ r8 r9 r10 r11 r12 sp lr pc ]; macro th_addflags(op1,op2) { tmpCY = carry(op1,op2); tmpOV = scarry(op1,op2); } #See ARM Architecture reference section "Pseudocode details of addition and subtraction" macro th_add_with_carry_flags(op1,op2){ local CYz = zext(CY); local result = op1 + op2; tmpCY = carry( op1, op2 ) || carry( result, CYz ); tmpOV = scarry( op1, op2) ^^ scarry( result, CYz ); } #Note: used for subtraction op1 - (op2 + !CY) #sets tmpCY if there is NO borrow macro th_sub_with_carry_flags(op1, op2){ local result = op1 - op2; tmpCY = (op1 > op2) || (result < zext(CY)); tmpOV = sborrow(op1,op2) ^^ sborrow(result,zext(!CY)); } macro th_test_flags(result){ ZR = (result == 0); NG = (result s< 0); CY = shift_carry; } # Note (unlike x86) carry flag is SET if there is NO borrow macro th_subflags(op1,op2) { tmpCY = op2 <= op1; tmpOV = sborrow(op1,op2); } macro th_subflags0(op2) { tmpCY = op2 == 0; tmpOV = sborrow(0,op2); } macro resflags(result) { tmpNG = result s< 0; tmpZR = result == 0; } macro th_logicflags() { tmpCY = shift_carry; tmpOV = OV; } macro th_affectflags() { CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; } macro readAPSR_nzcvq(r) { # TODO: GE bits have not been included r = zext( (NG<<4) | (ZR<<3) | (CY<<2) | (OV<<1) | (Q) ) << 27; } macro writeAPSR_nzcvq(r) { # TODO: GE bits have not been included local tmp = r >> 27 & 0x1f; Q = ((tmp ) & 0x1) != 0; OV = ((tmp >> 1) & 0x1) != 0; CY = ((tmp >> 2) & 0x1) != 0; ZR = ((tmp >> 3) & 0x1) != 0; NG = ((tmp >> 4) & 0x1) != 0; } macro readAPSR_nzcv(r) { r = zext( (NG<<3) | (ZR<<2) | (CY<<1) | (OV) ) << 28; } macro writeAPSR_nzcv(r) { local tmp = r >> 28 & 0xf; OV = ((tmp) & 0x1) != 0; CY = ((tmp >> 1) & 0x1) != 0; ZR = ((tmp >> 2) & 0x1) != 0; NG = ((tmp >> 3) & 0x1) != 0; } ############################################################################### # conditionals for the branch instruction thcc: "eq" is thcond=0 { tmp:1 = (ZR!=0); export tmp; } thcc: "ne" is thcond=1 { tmp:1 = (ZR==0); export tmp; } thcc: "cs" is thcond=2 { tmp:1 = (CY!=0); export tmp; } thcc: "cc" is thcond=3 { tmp:1 = (CY==0); export tmp; } thcc: "mi" is thcond=4 { tmp:1 = (NG!=0); export tmp; } thcc: "pl" is thcond=5 { tmp:1 = (NG==0); export tmp; } thcc: "vs" is thcond=6 { tmp:1 = (OV!=0); export tmp; } thcc: "vc" is thcond=7 { tmp:1 = (OV==0); export tmp; } thcc: "hi" is thcond=8 { tmp:1 = CY && !ZR; export tmp; } thcc: "ls" is thcond=9 { tmp:1 = !CY || ZR; export tmp; } thcc: "ge" is thcond=10 { tmp:1 = (NG == OV); export tmp; } thcc: "lt" is thcond=11 { tmp:1 = (NG != OV); export tmp; } thcc: "gt" is thcond=12 { tmp:1 = !ZR && (NG == OV); export tmp; } thcc: "le" is thcond=13 { tmp:1 = ZR || (NG != OV); export tmp; } # thcc: "AL" is thcond=14 { tmp = 1; export tmp; } # thcc: "NV" is thcond=15 { tmp = 0; export tmp; } @define THCC "thcc & (thc1515=0 | thc1414=0 | thc1313=0)" @if defined(VERSION_6T2) || defined(VERSION_7) part2thcc: "eq" is part2cond=0 { tmp:1 = (ZR!=0); export tmp; } part2thcc: "ne" is part2cond=1 { tmp:1 = (ZR==0); export tmp; } part2thcc: "cs" is part2cond=2 { tmp:1 = (CY!=0); export tmp; } part2thcc: "cc" is part2cond=3 { tmp:1 = (CY==0); export tmp; } part2thcc: "mi" is part2cond=4 { tmp:1 = (NG!=0); export tmp; } part2thcc: "pl" is part2cond=5 { tmp:1 = (NG==0); export tmp; } part2thcc: "vs" is part2cond=6 { tmp:1 = (OV!=0); export tmp; } part2thcc: "vc" is part2cond=7 { tmp:1 = (OV==0); export tmp; } part2thcc: "hi" is part2cond=8 { tmp:1 = CY && !ZR; export tmp; } part2thcc: "ls" is part2cond=9 { tmp:1 = !CY || ZR; export tmp; } part2thcc: "ge" is part2cond=10 { tmp:1 = (NG == OV); export tmp; } part2thcc: "lt" is part2cond=11 { tmp:1 = (NG != OV); export tmp; } part2thcc: "gt" is part2cond=12 { tmp:1 = !ZR && (NG == OV); export tmp; } part2thcc: "le" is part2cond=13 { tmp:1 = ZR || (NG != OV); export tmp; } # part2thcc: "AL" is part2cond=14 { tmp = 1; export tmp; } # part2thcc: "NV" is part2cond=15 { tmp = 0; export tmp; } @define PART2THCC "part2thcc & (part2c0909=0 | part2c0808=0 | part2c0707=0)" @endif # defined(VERSION_6T2) || defined(VERSION_7) @if defined(VERSION_6T2) || defined(VERSION_7) # conditionals for IT Block # Marvel at the UGLINESS: the p-code for pairs (eq,ne) (cs,cc) (mi,pl), etc. are the same # The IT block decoding fills in the complement (if necessary) based on the IT mask bit for the instruction it_thfcc: "eq" is fcond=0 { tmp:1 = (ZR!=0); export tmp; } it_thfcc: "ne" is fcond=1 { tmp:1 = (ZR!=0); export tmp; } it_thfcc: "cs" is fcond=2 { tmp:1 = (CY!=0); export tmp; } it_thfcc: "cc" is fcond=3 { tmp:1 = (CY!=0); export tmp; } it_thfcc: "mi" is fcond=4 { tmp:1 = (NG!=0); export tmp; } it_thfcc: "pl" is fcond=5 { tmp:1 = (NG!=0); export tmp; } it_thfcc: "vs" is fcond=6 { tmp:1 = (OV!=0); export tmp; } it_thfcc: "vc" is fcond=7 { tmp:1 = (OV!=0); export tmp; } it_thfcc: "hi" is fcond=8 { tmp:1 = CY && !ZR; export tmp; } it_thfcc: "ls" is fcond=9 { tmp:1 = CY && !ZR; export tmp; } it_thfcc: "ge" is fcond=10 { tmp:1 = (NG == OV); export tmp; } it_thfcc: "lt" is fcond=11 { tmp:1 = (NG == OV); export tmp; } it_thfcc: "gt" is fcond=12 { tmp:1 = !ZR && (NG == OV); export tmp; } it_thfcc: "le" is fcond=13 { tmp:1 = !ZR && (NG == OV); export tmp; } it_thfcc: "al" is fcond=14 { tmp:1 = 1; export tmp; } @define IT_THFCC "it_thfcc & (thc0707=0 | thc0606=0 | thc0505=0 | thc0404=0)" ByteRotate: "#"^rot is throt [rot = throt << 3; ] { export *[const]:1 rot; } thSBIT_CZNO: is thc0404=0 { } # Do nothing to the flag bits thSBIT_CZNO: "s" is thc0404=1 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; } thSBIT_CZN: is thc0404=0 { } # Do nothing to the flags bits thSBIT_CZN: "s" is thc0404=1 {CY = tmpCY; ZR = tmpZR; NG = tmpNG;} thSBIT_ZN: is thc0404=0 { } # Do nothing to the flag bits thSBIT_ZN: "s" is thc0404=1 { ZR = tmpZR; NG = tmpNG; } @endif # defined(VERSION_6T2) || defined(VERSION_7) # Addressing modes # The capitalized fields are raw register addressing modes Hrd0002: Rd0002 is Rd0002 & h1=0 { export Rd0002; } Hrd0002: hrd0002 is hrd0002 & h1=1 { export hrd0002; } Hrd0002: pc is pc & hrd0002=7 & h1=1 { tmp:4 = inst_start + 4; export tmp; } Hrn0002: Rn0002 is Rn0002 & h1=0 { export Rn0002; } Hrn0002: hrn0002 is hrn0002 & h1=1 { export hrn0002; } Hrn0002: pc is pc & hrn0002=7 & h1=1 { tmp:4 = inst_start + 4; export tmp; } Hrm0305: Rm0305 is Rm0305 & h2=0 { export Rm0305; } Hrm0305: hrm0305 is hrm0305 & h2=1 { export hrm0305; } Hrm0305: pc is pc & hrm0305=7 & h2=1 { tmp:4 = inst_start + 4; export tmp; } @if defined(VERSION_6T2) || defined(VERSION_7) Immed8_4: "#"^immval is immed8 [ immval = immed8 * 4; ] { export *[const]:4 immval; } Immed4: "#"^thc0003 is thc0003 { export *[const]:4 thc0003; } @endif Immed8: "#"^immed8 is immed8 { export *[const]:4 immed8; } Immed3: "#"^immed3 is immed3 { export *[const]:4 immed3; } Pcrel8: [reloc] is immed8 [ reloc = ((inst_start+4) $and 0xfffffffc) + 4*immed8; ] { # don't export as an address, may be PIC code, and would add spurious symbols. export *[const]:4 reloc; } @if defined(VERSION_6T2) || defined(VERSION_7) Pcrel8_s8: [reloc] is immed8 [ reloc = ((inst_start+4) $and 0xfffffffc) + 4*immed8; ] { export *:8 reloc; } @endif # defined(VERSION_6T2) || defined(VERSION_7) Sprel8: sp,"#"^immval is sp & immed8 [ immval = immed8 * 4; ] { local tmp = sp + immval; export tmp; } Immed7_4: "#"^immval is immed7 [ immval = immed7 * 4; ] { tmp:4 = immval; export tmp; } Immed5: "#"^immed5 is immed5 { export *[const]:4 immed5; } @if defined(VERSION_6T2) || defined(VERSION_7) Immed12: "#"^immed12 is immed12_i; immed12_imm3 & immed12_imm8 [ immed12=(immed12_i<<11) | (immed12_imm3<<8) | (immed12_imm8); ] { export *[const]:4 immed12; } Immed16: "#"^immed16 is immed12_i & sop0003; immed12_imm3 & immed12_imm8 [ immed16 = (sop0003 << 12) | (immed12_i<<11) | (immed12_imm3<<8) | (immed12_imm8); ] { export *[const]:2 immed16; } PcrelImmed12Addr: reloc is immed12_i; immed12_imm3 & immed12_imm8 [ reloc = ((inst_start+4) $and 0xfffffffc) + ((immed12_i<<11) | (immed12_imm3<<8) | (immed12_imm8)); ] { # don't export as an address, may be PIC code, and would add spurious symbols. export *[const]:4 reloc; } NegPcrelImmed12Addr: reloc is immed12_i; immed12_imm3 & immed12_imm8 [ reloc = ((inst_start+4) $and 0xfffffffc) - ((immed12_i<<11) | (immed12_imm3<<8) | (immed12_imm8)); ] { # don't export as an address, may be PIC code, and would add spurious symbols. export *[const]:4 reloc; } PcrelOffset12: [reloc] is thc0707=1; offset12 [ reloc = ((inst_start+4) $and 0xfffffffc) + offset12; ] { export *:4 reloc; } PcrelOffset12: [reloc] is thc0707=0; offset12 [ reloc = ((inst_start+4) $and 0xfffffffc) - offset12; ] { export *:4 reloc; } @endif # defined(VERSION_6T2) || defined(VERSION_7) # decode thumb immediate12 encoded value @if defined(VERSION_6T2) || defined(VERSION_7) ThumbExpandImm12: "#"^imm32 is immed12_i=0 ; thc1414=0 & immed12_imm3=0 & immed12_imm8 [ imm32=immed12_imm8 $and 0xff; ] { tmp:4 = imm32; shift_carry = CY; export tmp; } ThumbExpandImm12: "#"^imm32 is immed12_i=0 ; thc1414=0 & immed12_imm3=1 & immed12_imm8 [ imm32=(immed12_imm8<<16) | (immed12_imm8); ] { tmp:4 = imm32; shift_carry = CY; export tmp; } ThumbExpandImm12: "#"^imm32 is immed12_i=0 ; thc1414=0 & immed12_imm3=2 & immed12_imm8 [ imm32=(immed12_imm8<<24) | (immed12_imm8<<8); ] { tmp:4 = imm32; shift_carry = CY; export tmp; } ThumbExpandImm12: "#"^imm32 is immed12_i=0 ; thc1414=0 & immed12_imm3=3 & immed12_imm8 [ imm32=(immed12_imm8<<24) | (immed12_imm8<<16) | (immed12_imm8<<8) | (immed12_imm8); ] { tmp:4 = imm32; shift_carry = CY; export tmp; } ThumbExpandImm12: "#"^imm32 is immed12_i=0 ; immed12_imm3 & thc0707 & immed7 [ imm32=(((0x80+immed7)<<(32-((immed12_imm3<<1)|thc0707)))|((0x80+immed7)>>(((immed12_imm3<<1)|thc0707)))) $and 0xffffffff; ] { tmp:4 = imm32; local tmp1 = (tmp >> 31); shift_carry = tmp1(0); export tmp; } ThumbExpandImm12: "#"^imm32 is immed12_i=1 ; immed12_imm3 & thc0707 & immed7 [ imm32=(((0x80+immed7)<<(32-(16+((immed12_imm3<<1)|thc0707))))|((0x80+immed7)>>((16+((immed12_imm3<<1)|thc0707))))) $and 0xffffffff; ] { tmp:4 = imm32; local tmp1 = (tmp >> 31); shift_carry = tmp1(0); export tmp; } @endif # defined(VERSION_6T2) || defined(VERSION_7) @if defined(VERSION_6T2) || defined(VERSION_7) thLsbImm: "#"^lsb is imm3_shft & imm2_shft [ lsb= (imm3_shft<<2) | imm2_shft; ] { tmp:4 = lsb; export tmp; } thMsbImm: "#"^thc0004 is thc0004 { tmp:4 = thc0004; export tmp; } thWidthMinus1: "#"^width is thc0004 [ width = thc0004 + 1; ] { tmp:4 = thc0004; export tmp; } thBitWidth: "#"^w is imm3_shft & imm2_shft & thc0004 [ w = thc0004 - ((imm3_shft<<2) | imm2_shft) + 1; ] { tmp:4 = w; export tmp; } @endif # VERSION_6T2 || VERSION_7 ##################### ###### thshift2 ###### ##################### @if defined(VERSION_6T2) || defined(VERSION_7) thshift2: Rm0003 is imm3_shft=0 & imm2_shft=0 & thc0405=0 & Rm0003 { shift_carry = CY; export Rm0003; } thshift2: Rm0003, "lsl #"^shftval is imm3_shft & imm2_shft & thc0405=0 & Rm0003 [ shftval=(imm3_shft<<2) | (imm2_shft); ] { local tmp1=(Rm0003>>(32-shftval))&1; shift_carry=tmp1(0); local tmp2=Rm0003<>31); shift_carry=tmp1(0); tmp2:4=0; export tmp2; } thshift2: Rm0003, "lsr #"^shftval is imm3_shft & imm2_shft & thc0405=1 & Rm0003 [ shftval=(imm3_shft<<2) | (imm2_shft); ] { local tmp1=(Rm0003>>(shftval-1))&1; shift_carry=tmp1(0); local tmp2=Rm0003>>shftval; export tmp2; } thshift2: Rm0003, "asr #32" is imm3_shft=0 & imm2_shft=0 & thc0405=2 & Rm0003 { local tmp1=(Rm0003>>31); shift_carry=tmp1(0); local tmp2 = Rm0003 s>> 32; export tmp2; } thshift2: Rm0003, "asr #"^shftval is imm3_shft & imm2_shft & thc0405=2 & Rm0003 [ shftval=(imm3_shft<<2) | (imm2_shft); ] { local tmp1=(Rm0003>>(shftval-1))&1; shift_carry=tmp1(0); local tmp2=Rm0003 s>> shftval; export tmp2; } thshift2: Rm0003, "rrx" is imm3_shft=0 & imm2_shft=0 & thc0405=3 & Rm0003 { local tmp1=Rm0003&1; shift_carry=tmp1(0); local tmp2 = (zext(CY)<<31)|(Rm0003>>1); export tmp2; } thshift2: Rm0003, "ror #"^shftval is imm3_shft & imm2_shft & thc0405=3 & Rm0003 [ shftval=(imm3_shft<<2) | (imm2_shft); ] { local tmp1=(Rm0003>>shftval)|(Rm0003<<(32-shftval)); local tmp2=tmp1 >> 31; shift_carry=tmp2(0); export tmp1; } @endif # VERSION_6T2 || VERSION_7 Addr5: reloc is imm5 & thc0909 [ reloc = inst_start + 4 + ((thc0909 << 6) | (imm5 << 1)); ] { export *:4 reloc; } Addr8: reloc is soffset8 [ reloc = (inst_start+4) + 2*soffset8; ] { export *:4 reloc; } Addr11: reloc is soffset11 [ reloc = (inst_start+4) + 2*soffset11; ] { export *:4 reloc; } @if defined(VERSION_6T2) || defined(VERSION_7) ThAddr20: reloc is part2S=1 & part2imm6; part2J1 & part2J2 & part2imm11 [ reloc = inst_start + 4 + ((-1 << 20) $or (part2J2 << 19) $or (part2J1 << 18) $or (part2imm6 << 12) $or (part2imm11 << 1)); ] { export *:4 reloc; } ThAddr20: reloc is part2S=0 & part2imm6; part2J1 & part2J2 & part2imm11 [ reloc = inst_start + 4 + ((part2J2 << 19) $or (part2J1 << 18) $or (part2imm6 << 12) $or (part2imm11 << 1)); ] { export *:4 reloc; } @endif # defined(VERSION_6T2) || defined(VERSION_7) ThAddr24: reloc is offset10S=0 & offset10; part2J1 & part2J2 & part2off [ reloc = inst_start + 4 + (((part2J1 $xor 1) << 23) $or ((part2J2 $xor 1) << 22) $or (offset10 << 12) $or (part2off << 1)); ] { export *:4 reloc; } ThAddr24: reloc is offset10S=1 & offset10; part2J1 & part2J2 & part2off [ reloc = inst_start + 4 + ((-1 << 24) $or (part2J1 << 23) $or (part2J2 << 22) $or (offset10 << 12) $or (part2off << 1)); ] { export *:4 reloc; } @if defined(VERSION_5) ThArmAddr23: reloc is offset10S=0 & offset10; part2J1 & part2J2 & part2off_10 [ reloc = ((inst_start + 4) $and 0xfffffffc) + (((part2J1 $xor 1) << 23) $or ((part2J2 $xor 1) << 22) $or (offset10 << 12) $or (part2off_10 << 2)); ] { export *:4 reloc; } ThArmAddr23: reloc is offset10S=1 & offset10; part2J1 & part2J2 & part2off_10 [ reloc = ((inst_start + 4) $and 0xfffffffc) + ((-1 << 24) $or (part2J1 << 23) $or (part2J2 << 22) $or (offset10 << 12) $or (part2off_10 << 2)); ] { export *:4 reloc; } @endif # VERSION_5 Rn_exclaim: Rn0810 is Rn0810 & thc0810=0 & thc0000=1 { mult_addr = Rn0810; export Rn0810; } Rn_exclaim: Rn0810 is Rn0810 & thc0810=1 & thc0101=1 { mult_addr = Rn0810; export Rn0810; } Rn_exclaim: Rn0810 is Rn0810 & thc0810=2 & thc0202=1 { mult_addr = Rn0810; export Rn0810; } Rn_exclaim: Rn0810 is Rn0810 & thc0810=3 & thc0303=1 { mult_addr = Rn0810; export Rn0810; } Rn_exclaim: Rn0810 is Rn0810 & thc0810=4 & thc0404=1 { mult_addr = Rn0810; export Rn0810; } Rn_exclaim: Rn0810 is Rn0810 & thc0810=5 & thc0505=1 { mult_addr = Rn0810; export Rn0810; } Rn_exclaim: Rn0810 is Rn0810 & thc0810=6 & thc0606=1 { mult_addr = Rn0810; export Rn0810; } Rn_exclaim: Rn0810 is Rn0810 & thc0810=7 & thc0707=1 { mult_addr = Rn0810; export Rn0810; } Rn_exclaim: Rn0810! is Rn0810 & thc0810 { mult_addr = Rn0810; export Rn0810; } Rn_exclaim_WB: is Rn0810 & thc0810=0 & thc0000=1 { } Rn_exclaim_WB: is Rn0810 & thc0810=1 & thc0101=1 { } Rn_exclaim_WB: is Rn0810 & thc0810=2 & thc0202=1 { } Rn_exclaim_WB: is Rn0810 & thc0810=3 & thc0303=1 { } Rn_exclaim_WB: is Rn0810 & thc0810=4 & thc0404=1 { } Rn_exclaim_WB: is Rn0810 & thc0810=5 & thc0505=1 { } Rn_exclaim_WB: is Rn0810 & thc0810=6 & thc0606=1 { } Rn_exclaim_WB: is Rn0810 & thc0810=7 & thc0707=1 { } Rn_exclaim_WB: is Rn0810 & thc0810 { Rn0810 = mult_addr; } # ldlist is the list of registers to be loaded or popped LdRtype0: r0 is thc0000=1 & r0 & thc0107=0 { r0 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype0: r0, is thc0000=1 & r0 { r0 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype0: is thc0000=0 { } LdRtype1: LdRtype0^r1 is LdRtype0 & thc0101=1 & r1 & thc0207=0 { r1 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype1: LdRtype0^r1, is LdRtype0 & thc0101=1 & r1 { r1 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype1: LdRtype0 is LdRtype0 & thc0101=0 { } LdRtype2: LdRtype1^r2 is LdRtype1 & thc0202=1 & r2 & thc0307=0 { r2 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype2: LdRtype1^r2, is LdRtype1 & thc0202=1 & r2 { r2 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype2: LdRtype1 is LdRtype1 & thc0202=0 { } LdRtype3: LdRtype2^r3 is LdRtype2 & thc0303=1 & r3 & thc0407=0 { r3 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype3: LdRtype2^r3, is LdRtype2 & thc0303=1 & r3 { r3 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype3: LdRtype2 is LdRtype2 & thc0303=0 { } LdRtype4: LdRtype3^r4 is LdRtype3 & thc0404=1 & r4 & thc0507=0 { r4 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype4: LdRtype3^r4, is LdRtype3 & thc0404=1 & r4 { r4 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype4: LdRtype3 is LdRtype3 & thc0404=0 { } LdRtype5: LdRtype4^r5 is LdRtype4 & thc0505=1 & r5 & thc0607=0 { r5 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype5: LdRtype4^r5, is LdRtype4 & thc0505=1 & r5 { r5 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype5: LdRtype4 is LdRtype4 & thc0505=0 { } LdRtype6: LdRtype5^r6 is LdRtype5 & thc0606=1 & r6 & thc0707=0 { r6 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype6: LdRtype5^r6, is LdRtype5 & thc0606=1 & r6 { r6 = *mult_addr; mult_addr = mult_addr + 4; } LdRtype6: LdRtype5 is LdRtype5 & thc0606=0 { } ldlist: LdRtype6^r7 is LdRtype6 & thc0707=1 & r7 { r7 = *mult_addr; mult_addr = mult_addr + 4; } ldlist: LdRtype6 is LdRtype6 & thc0707=0 { } #strlist is the list of registers to be stored StrType0: r0 is thc0000=1 & r0 & thc0107=0 { *mult_addr = r0; mult_addr = mult_addr + 4; } StrType0: r0, is thc0000=1 & r0 { *mult_addr = r0; mult_addr = mult_addr + 4; } StrType0: is thc0000=0 { } StrType1: StrType0^r1 is StrType0 & thc0101=1 & r1 & thc0207=0 { *mult_addr = r1; mult_addr = mult_addr + 4; } StrType1: StrType0^r1, is StrType0 & thc0101=1 & r1 { *mult_addr = r1; mult_addr = mult_addr + 4; } StrType1: StrType0 is StrType0 & thc0101=0 { } StrType2: StrType1^r2 is StrType1 & thc0202=1 & r2 & thc0307=0 { *mult_addr = r2; mult_addr = mult_addr + 4; } StrType2: StrType1^r2, is StrType1 & thc0202=1 & r2 { *mult_addr = r2; mult_addr = mult_addr + 4; } StrType2: StrType1 is StrType1 & thc0202=0 { } StrType3: StrType2^r3 is StrType2 & thc0303=1 & r3 & thc0407=0 { *mult_addr = r3; mult_addr = mult_addr + 4; } StrType3: StrType2^r3, is StrType2 & thc0303=1 & r3 { *mult_addr = r3; mult_addr = mult_addr + 4; } StrType3: StrType2 is StrType2 & thc0303=0 { } StrType4: StrType3^r4 is StrType3 & thc0404=1 & r4 & thc0507=0 { *mult_addr = r4; mult_addr = mult_addr + 4; } StrType4: StrType3^r4, is StrType3 & thc0404=1 & r4 { *mult_addr = r4; mult_addr = mult_addr + 4; } StrType4: StrType3 is StrType3 & thc0404=0 { } StrType5: StrType4^r5 is StrType4 & thc0505=1 & r5 & thc0607=0 { *mult_addr = r5; mult_addr = mult_addr + 4; } StrType5: StrType4^r5, is StrType4 & thc0505=1 & r5 { *mult_addr = r5; mult_addr = mult_addr + 4; } StrType5: StrType4 is StrType4 & thc0505=0 { } StrType6: StrType5^r6 is StrType5 & thc0606=1 & r6 & thc0707=0 { *mult_addr = r6; mult_addr = mult_addr + 4; } StrType6: StrType5^r6, is StrType5 & thc0606=1 & r6 { *mult_addr = r6; mult_addr = mult_addr + 4; } StrType6: StrType5 is StrType5 & thc0606=0 { } StrType7: StrType6^r7 is StrType6 & thc0707=1 & r7 { *mult_addr = r7; mult_addr = mult_addr + 4; } StrType7: StrType6 is StrType6 & thc0707=0 { } strlist: StrType7 is StrType7 { } # pshlist is the list registers to be pushed to memory # SCR 10921, fix the order in which the regs appear in the disassembled insn, to be in line with objdump # Also add commas between regs # PshType7: "" is thc0707=0 { } PshType7: r7 is thc0707=1 & r7 { mult_addr = mult_addr - 4; *mult_addr = r7; } PshType6: PshType7 is PshType7 & thc0606=0 { } PshType6: r6 is thc0606=1 & r6 & thc0707=0 { mult_addr = mult_addr - 4; *mult_addr = r6; } PshType6: r6,PshType7 is PshType7 & thc0606=1 & r6 { mult_addr = mult_addr - 4; *mult_addr = r6; } PshType5: PshType6 is PshType6 & thc0505=0 { } PshType5: r5 is thc0505=1 & r5 & thc0607=0 { mult_addr = mult_addr - 4; *mult_addr = r5; } PshType5: r5,PshType6 is PshType6 & thc0505=1 & r5 { mult_addr = mult_addr - 4; *mult_addr = r5; } PshType4: PshType5 is PshType5 & thc0404=0 { } PshType4: r4 is thc0404=1 & r4 & thc0507=0 { mult_addr = mult_addr - 4; *mult_addr = r4; } PshType4: r4,PshType5 is PshType5 & thc0404=1 & r4 { mult_addr = mult_addr - 4; *mult_addr = r4; } PshType3: PshType4 is PshType4 & thc0303=0 { } PshType3: r3 is thc0303=1 & r3 & thc0407=0 { mult_addr = mult_addr - 4; *mult_addr = r3; } PshType3: r3,PshType4 is PshType4 & thc0303=1 & r3 { mult_addr = mult_addr - 4; *mult_addr = r3; } PshType2: PshType3 is PshType3 & thc0202=0 { } PshType2: r2 is thc0202=1 & r2 & thc0307=0 { mult_addr = mult_addr - 4; *mult_addr = r2; } PshType2: r2,PshType3 is PshType3 & thc0202=1 & r2 { mult_addr = mult_addr - 4; *mult_addr = r2; } PshType1: PshType2 is PshType2 & thc0101=0 { } PshType1: r1 is thc0101=1 & r1 & thc0207=0 { mult_addr = mult_addr - 4; *mult_addr = r1; } PshType1: r1,PshType2 is PshType2 & thc0101=1 & r1 { mult_addr = mult_addr - 4; *mult_addr = r1; } pshlist: PshType1 is PshType1 & thc0000=0 { } pshlist: r0 is thc0000=1 & r0 & thc0107=0 { mult_addr = mult_addr - 4; *mult_addr = r0; } pshlist: r0,PshType1 is PshType1 & thc0000=1 & r0 { mult_addr = mult_addr - 4; *mult_addr = r0; } # ldlist_inc is the list of registers to be loaded for pop instructions thrlist15: r0 is thc0000=1 & r0 & thc0115=0 { r0 = * mult_addr; mult_addr = mult_addr + 4; } thrlist15: r0, is thc0000=1 & r0 { r0 = * mult_addr; mult_addr = mult_addr + 4; } thrlist15: is thc0000=0 { } thrlist14: thrlist15^r1 is thc0101=1 & thrlist15 & r1 & thc0215=0 { r1 = * mult_addr; mult_addr = mult_addr + 4; } thrlist14: thrlist15^r1, is thc0101=1 & thrlist15 & r1 { r1 = * mult_addr; mult_addr = mult_addr + 4; } thrlist14: thrlist15 is thc0101=0 & thrlist15 { } thrlist13: thrlist14^r2 is thc0202=1 & thrlist14 & r2 & thc0315=0 { r2 = * mult_addr; mult_addr = mult_addr + 4; } thrlist13: thrlist14^r2, is thc0202=1 & thrlist14 & r2 { r2 = * mult_addr; mult_addr = mult_addr + 4; } thrlist13: thrlist14 is thc0202=0 & thrlist14 { } thrlist12: thrlist13^r3 is thc0303=1 & thrlist13 & r3 & thc0415=0 { r3 = * mult_addr; mult_addr = mult_addr + 4; } thrlist12: thrlist13^r3, is thc0303=1 & thrlist13 & r3 { r3 = * mult_addr; mult_addr = mult_addr + 4; } thrlist12: thrlist13 is thc0303=0 & thrlist13 { } thrlist11: thrlist12^r4 is thc0404=1 & thrlist12 & r4 & thc0515=0 { r4 = * mult_addr; mult_addr = mult_addr + 4; } thrlist11: thrlist12^r4, is thc0404=1 & thrlist12 & r4 { r4 = * mult_addr; mult_addr = mult_addr + 4; } thrlist11: thrlist12 is thc0404=0 & thrlist12 { } thrlist10: thrlist11^r5 is thc0505=1 & thrlist11 & r5 & thc0615=0 { r5 = * mult_addr; mult_addr = mult_addr + 4; } thrlist10: thrlist11^r5, is thc0505=1 & thrlist11 & r5 { r5 = * mult_addr; mult_addr = mult_addr + 4; } thrlist10: thrlist11 is thc0505=0 & thrlist11 { } thrlist9: thrlist10^r6 is thc0606=1 & thrlist10 & r6 & thc0715=0 { r6 = * mult_addr; mult_addr = mult_addr + 4; } thrlist9: thrlist10^r6, is thc0606=1 & thrlist10 & r6 { r6 = * mult_addr; mult_addr = mult_addr + 4; } thrlist9: thrlist10 is thc0606=0 & thrlist10 { } thrlist8: thrlist9^r7 is thc0707=1 & thrlist9 & r7 & thc0815=0 { r7 = * mult_addr; mult_addr = mult_addr + 4; } thrlist8: thrlist9^r7, is thc0707=1 & thrlist9 & r7 { r7 = * mult_addr; mult_addr = mult_addr + 4; } thrlist8: thrlist9 is thc0707=0 & thrlist9 { } thrlist7: thrlist8^r8 is thc0808=1 & thrlist8 & r8 & thc0915=0 { r8 = * mult_addr; mult_addr = mult_addr + 4; } thrlist7: thrlist8^r8, is thc0808=1 & thrlist8 & r8 { r8 = * mult_addr; mult_addr = mult_addr + 4; } thrlist7: thrlist8 is thc0808=0 & thrlist8 { } thrlist6: thrlist7^r9 is thc0909=1 & thrlist7 & r9 & thc1015=0 { r9 = * mult_addr; mult_addr = mult_addr + 4; } thrlist6: thrlist7^r9, is thc0909=1 & thrlist7 & r9 { r9 = * mult_addr; mult_addr = mult_addr + 4; } thrlist6: thrlist7 is thc0909=0 & thrlist7 { } thrlist5: thrlist6^r10 is thc1010=1 & thrlist6 & r10 & thc1115=0 { r10 = * mult_addr; mult_addr = mult_addr + 4; } thrlist5: thrlist6^r10, is thc1010=1 & thrlist6 & r10 { r10 = * mult_addr; mult_addr = mult_addr + 4; } thrlist5: thrlist6 is thc1010=0 & thrlist6 { } thrlist4: thrlist5^r11 is thc1111=1 & thrlist5 & r11 & thc1215=0 { r11 = * mult_addr; mult_addr = mult_addr + 4; } thrlist4: thrlist5^r11, is thc1111=1 & thrlist5 & r11 { r11 = * mult_addr; mult_addr = mult_addr + 4; } thrlist4: thrlist5 is thc1111=0 & thrlist5 { } thrlist3: thrlist4^r12 is thc1212=1 & thrlist4 & r12 & thc1315=0 { r12 = * mult_addr; mult_addr = mult_addr + 4; } thrlist3: thrlist4^r12, is thc1212=1 & thrlist4 & r12 { r12 = * mult_addr; mult_addr = mult_addr + 4; } thrlist3: thrlist4 is thc1212=0 & thrlist4 { } thrlist2: thrlist3^sp is thc1313=1 & thrlist3 & sp & thc1415=0 { sp = * mult_addr; mult_addr = mult_addr + 4; } thrlist2: thrlist3^sp, is thc1313=1 & thrlist3 & sp { sp = * mult_addr; mult_addr = mult_addr + 4; } thrlist2: thrlist3 is thc1313=0 & thrlist3 { } thrlist1: thrlist2^lr is thc1414=1 & thrlist2 & lr & thc1515=0 { lr = * mult_addr; mult_addr = mult_addr + 4; } thrlist1: thrlist2^lr, is thc1414=1 & thrlist2 & lr { lr = * mult_addr; mult_addr = mult_addr + 4; } thrlist1: thrlist2 is thc1414=0 & thrlist2 { } thldrlist_inc: {thrlist1^pc} is thc1515=1 & thrlist1 & pc { pc = * mult_addr; mult_addr = mult_addr + 4; } thldrlist_inc: {thrlist1} is thc1515=0 & thrlist1 { } @if defined(VERSION_6T2) || defined(VERSION_7) # thstrlist_inc is the list of registers to be stored using IA or IB in Addressing Mode 4 thsinc15: r0 is thc0000=1 & r0 & thc0115=0 { * mult_addr = r0; mult_addr = mult_addr + 4; } thsinc15: r0, is thc0000=1 & r0 { * mult_addr = r0; mult_addr = mult_addr + 4; } thsinc15: is thc0000=0 { } thsinc14: thsinc15^r1 is thc0101=1 & thsinc15 & r1 & thc0215=0 { * mult_addr = r1; mult_addr = mult_addr + 4; } thsinc14: thsinc15^r1, is thc0101=1 & thsinc15 & r1 { * mult_addr = r1; mult_addr = mult_addr + 4; } thsinc14: thsinc15 is thc0101=0 & thsinc15 { } thsinc13: thsinc14^r2 is thc0202=1 & thsinc14 & r2 & thc0315=0 { * mult_addr = r2; mult_addr = mult_addr + 4; } thsinc13: thsinc14^r2, is thc0202=1 & thsinc14 & r2 { * mult_addr = r2; mult_addr = mult_addr + 4; } thsinc13: thsinc14 is thc0202=0 & thsinc14 { } thsinc12: thsinc13^r3 is thc0303=1 & thsinc13 & r3 & thc0415=0 { * mult_addr = r3; mult_addr = mult_addr + 4; } thsinc12: thsinc13^r3, is thc0303=1 & thsinc13 & r3 { * mult_addr = r3; mult_addr = mult_addr + 4; } thsinc12: thsinc13 is thc0303=0 & thsinc13 { } thsinc11: thsinc12^r4 is thc0404=1 & thsinc12 & r4 & thc0515=0 { * mult_addr = r4; mult_addr = mult_addr + 4; } thsinc11: thsinc12^r4, is thc0404=1 & thsinc12 & r4 { * mult_addr = r4; mult_addr = mult_addr + 4; } thsinc11: thsinc12 is thc0404=0 & thsinc12 { } thsinc10: thsinc11^r5 is thc0505=1 & thsinc11 & r5 & thc0615=0 { * mult_addr = r5; mult_addr = mult_addr + 4; } thsinc10: thsinc11^r5, is thc0505=1 & thsinc11 & r5 { * mult_addr = r5; mult_addr = mult_addr + 4; } thsinc10: thsinc11 is thc0505=0 & thsinc11 { } thsinc9: thsinc10^r6 is thc0606=1 & thsinc10 & r6 & thc0715=0 { * mult_addr = r6; mult_addr = mult_addr + 4; } thsinc9: thsinc10^r6, is thc0606=1 & thsinc10 & r6 { * mult_addr = r6; mult_addr = mult_addr + 4; } thsinc9: thsinc10 is thc0606=0 & thsinc10 { } thsinc8: thsinc9^r7 is thc0707=1 & thsinc9 & r7 & thc0815=0 { * mult_addr = r7; mult_addr = mult_addr + 4; } thsinc8: thsinc9^r7, is thc0707=1 & thsinc9 & r7 { * mult_addr = r7; mult_addr = mult_addr + 4; } thsinc8: thsinc9 is thc0707=0 & thsinc9 { } thsinc7: thsinc8^r8 is thc0808=1 & thsinc8 & r8 & thc0915=0 { * mult_addr = r8; mult_addr = mult_addr + 4; } thsinc7: thsinc8^r8, is thc0808=1 & thsinc8 & r8 { * mult_addr = r8; mult_addr = mult_addr + 4; } thsinc7: thsinc8 is thc0808=0 & thsinc8 { } thsinc6: thsinc7^r9 is thc0909=1 & thsinc7 & r9 & thc1015=0 { * mult_addr = r9; mult_addr = mult_addr + 4; } thsinc6: thsinc7^r9, is thc0909=1 & thsinc7 & r9 { * mult_addr = r9; mult_addr = mult_addr + 4; } thsinc6: thsinc7 is thc0909=0 & thsinc7 { } thsinc5: thsinc6^r10 is thc1010=1 & thsinc6 & r10 & thc1115=0 { * mult_addr = r10; mult_addr = mult_addr + 4; } thsinc5: thsinc6^r10, is thc1010=1 & thsinc6 & r10 { * mult_addr = r10; mult_addr = mult_addr + 4; } thsinc5: thsinc6 is thc1010=0 & thsinc6 { } thsinc4: thsinc5^r11 is thc1111=1 & thsinc5 & r11 & thc1215=0 { * mult_addr = r11; mult_addr = mult_addr + 4; } thsinc4: thsinc5^r11, is thc1111=1 & thsinc5 & r11 { * mult_addr = r11; mult_addr = mult_addr + 4; } thsinc4: thsinc5 is thc1111=0 & thsinc5 { } thsinc3: thsinc4^r12 is thc1212=1 & thsinc4 & r12 & thc1315=0 { * mult_addr = r12; mult_addr = mult_addr + 4; } thsinc3: thsinc4^r12, is thc1212=1 & thsinc4 & r12 { * mult_addr = r12; mult_addr = mult_addr + 4; } thsinc3: thsinc4 is thc1212=0 & thsinc4 { } thsinc2: thsinc3^sp is thc1313=1 & thsinc3 & sp & thc1415=0 { * mult_addr = sp; mult_addr = mult_addr + 4; } thsinc2: thsinc3^sp, is thc1313=1 & thsinc3 & sp { * mult_addr = sp; mult_addr = mult_addr + 4; } thsinc2: thsinc3 is thc1313=0 & thsinc3 { } thsinc1: thsinc2^lr is thc1414=1 & thsinc2 & lr & thc1515=0 { * mult_addr = lr; mult_addr = mult_addr + 4; } thsinc1: thsinc2^lr, is thc1414=1 & thsinc2 & lr { * mult_addr = lr; mult_addr = mult_addr + 4; } thsinc1: thsinc2 is thc1414=0 & thsinc2 { } thstrlist_inc: {thsinc1^pc} is thc1515=1 & thsinc1 & pc { *:4 mult_addr = inst_start+4; mult_addr = mult_addr + 4; } thstrlist_inc: {thsinc1} is thc1515=0 & thsinc1 { } # thldrlist_dec is the list of registers to be loaded using DA or DB in Addressing Mode 4 thrldec15: pc is thc1515=1 & pc { pc = * mult_addr; mult_addr = mult_addr - 4; } thrldec15: is thc1515=0 { } thrldec14: lr^thrldec15 is thc1414=1 & thrldec15 & lr & thc1515=0 { lr = * mult_addr; mult_addr = mult_addr - 4; } thrldec14: lr,thrldec15 is thc1414=1 & thrldec15 & lr { lr = * mult_addr; mult_addr = mult_addr - 4; } thrldec14: thrldec15 is thc1414=0 & thrldec15 { } thrldec13: sp^thrldec14 is thc1313=1 & thrldec14 & sp & thc1415=0 { sp = * mult_addr; mult_addr = mult_addr - 4; } thrldec13: sp,thrldec14 is thc1313=1 & thrldec14 & sp { sp = * mult_addr; mult_addr = mult_addr - 4; } thrldec13: thrldec14 is thc1313=0 & thrldec14 { } thrldec12: r12^thrldec13 is thc1212=1 & thrldec13 & r12 & thc1315=0 { r12 = * mult_addr; mult_addr = mult_addr - 4; } thrldec12: r12,thrldec13 is thc1212=1 & thrldec13 & r12 { r12 = * mult_addr; mult_addr = mult_addr - 4; } thrldec12: thrldec13 is thc1212=0 & thrldec13 { } thrldec11: r11^thrldec12 is thc1111=1 & thrldec12 & r11 & thc1215=0 { r11 = * mult_addr; mult_addr = mult_addr - 4; } thrldec11: r11,thrldec12 is thc1111=1 & thrldec12 & r11 { r11 = * mult_addr; mult_addr = mult_addr - 4; } thrldec11: thrldec12 is thc1111=0 & thrldec12 { } thrldec10: r10^thrldec11 is thc1010=1 & thrldec11 & r10 & thc1115=0 { r10 = * mult_addr; mult_addr = mult_addr - 4; } thrldec10: r10,thrldec11 is thc1010=1 & thrldec11 & r10 { r10 = * mult_addr; mult_addr = mult_addr - 4; } thrldec10: thrldec11 is thc1010=0 & thrldec11 { } thrldec9: r9^thrldec10 is thc0909=1 & thrldec10 & r9 & thc1015=0 { r9 = * mult_addr; mult_addr = mult_addr - 4; } thrldec9: r9,thrldec10 is thc0909=1 & thrldec10 & r9 { r9 = * mult_addr; mult_addr = mult_addr - 4; } thrldec9: thrldec10 is thc0909=0 & thrldec10 { } thrldec8: r8^thrldec9 is thc0808=1 & thrldec9 & r8 & thc0915=0 { r8 = * mult_addr; mult_addr = mult_addr - 4; } thrldec8: r8,thrldec9 is thc0808=1 & thrldec9 & r8 { r8 = * mult_addr; mult_addr = mult_addr - 4; } thrldec8: thrldec9 is thc0808=0 & thrldec9 { } thrldec7: r7^thrldec8 is thc0707=1 & thrldec8 & r7 & thc0815=0 { r7 = * mult_addr; mult_addr = mult_addr - 4; } thrldec7: r7,thrldec8 is thc0707=1 & thrldec8 & r7 { r7 = * mult_addr; mult_addr = mult_addr - 4; } thrldec7: thrldec8 is thc0707=0 & thrldec8 { } thrldec6: r6^thrldec7 is thc0606=1 & thrldec7 & r6 & thc0715=0 { r6 = * mult_addr; mult_addr = mult_addr - 4; } thrldec6: r6,thrldec7 is thc0606=1 & thrldec7 & r6 { r6 = * mult_addr; mult_addr = mult_addr - 4; } thrldec6: thrldec7 is thc0606=0 & thrldec7 { } thrldec5: r5^thrldec6 is thc0505=1 & thrldec6 & r5 & thc0615=0 { r5 = * mult_addr; mult_addr = mult_addr - 4; } thrldec5: r5,thrldec6 is thc0505=1 & thrldec6 & r5 { r5 = * mult_addr; mult_addr = mult_addr - 4; } thrldec5: thrldec6 is thc0505=0 & thrldec6 { } thrldec4: r4^thrldec5 is thc0404=1 & thrldec5 & r4 & thc0515=0 { r4 = * mult_addr; mult_addr = mult_addr - 4; } thrldec4: r4,thrldec5 is thc0404=1 & thrldec5 & r4 { r4 = * mult_addr; mult_addr = mult_addr - 4; } thrldec4: thrldec5 is thc0404=0 & thrldec5 { } thrldec3: r3^thrldec4 is thc0303=1 & thrldec4 & r3 & thc0415=0 { r3 = * mult_addr; mult_addr = mult_addr - 4; } thrldec3: r3,thrldec4 is thc0303=1 & thrldec4 & r3 { r3 = * mult_addr; mult_addr = mult_addr - 4; } thrldec3: thrldec4 is thc0303=0 & thrldec4 { } thrldec2: r2^thrldec3 is thc0202=1 & thrldec3 & r2 & thc0315=0 { r2 = * mult_addr; mult_addr = mult_addr - 4; } thrldec2: r2,thrldec3 is thc0202=1 & thrldec3 & r2 { r2 = * mult_addr; mult_addr = mult_addr - 4; } thrldec2: thrldec3 is thc0202=0 & thrldec3 { } thrldec1: r1^thrldec2 is thc0101=1 & thrldec2 & r1 & thc0215=0 { r1 = * mult_addr; mult_addr = mult_addr - 4; } thrldec1: r1,thrldec2 is thc0101=1 & thrldec2 & r1 { r1 = * mult_addr; mult_addr = mult_addr - 4; } thrldec1: thrldec2 is thc0101=0 & thrldec2 { } thldrlist_dec: {r0^thrldec1} is thc0000=1 & thrldec1 & r0 & thc0115=0 { r0 = * mult_addr; mult_addr = mult_addr - 4; } thldrlist_dec: {r0,thrldec1} is thc0000=1 & thrldec1 & r0 { r0 = * mult_addr; mult_addr = mult_addr - 4; } thldrlist_dec: {thrldec1} is thc0000=0 & thrldec1 { } @endif # defined(VERSION_6T2) || defined(VERSION_7) # thstrlist_dec is the list of registers to be pushed thsdec15: pc is thc1515=1 & pc { *:4 mult_addr = inst_start+4; mult_addr = mult_addr - 4; } thsdec15: is thc1515=0 { } thsdec14: lr is thc1414=1 & thsdec15 & lr & thc1515=0 { * mult_addr=lr; mult_addr = mult_addr - 4; } thsdec14: lr,thsdec15 is thc1414=1 & thsdec15 & lr { * mult_addr=lr; mult_addr = mult_addr - 4; } thsdec14: thsdec15 is thc1414=0 & thsdec15 { } thsdec13: sp is thc1313=1 & sp & thc1415=0 { * mult_addr=sp; mult_addr = mult_addr - 4; } thsdec13: sp,thsdec14 is thc1313=1 & thsdec14 & sp { * mult_addr=sp; mult_addr = mult_addr - 4; } thsdec13: thsdec14 is thc1313=0 & thsdec14 { } thsdec12: r12 is thc1212=1 & r12 & thc1315=0 { * mult_addr=r12; mult_addr = mult_addr - 4; } thsdec12: r12,thsdec13 is thc1212=1 & thsdec13 & r12 { * mult_addr=r12; mult_addr = mult_addr - 4; } thsdec12: thsdec13 is thc1212=0 & thsdec13 { } thsdec11: r11 is thc1111=1 & r11 & thc1215=0 { * mult_addr=r11; mult_addr = mult_addr - 4; } thsdec11: r11,thsdec12 is thc1111=1 & thsdec12 & r11 { * mult_addr=r11; mult_addr = mult_addr - 4; } thsdec11: thsdec12 is thc1111=0 & thsdec12 { } thsdec10: r10 is thc1010=1 & r10 & thc1115=0 { * mult_addr=r10; mult_addr = mult_addr - 4; } thsdec10: r10,thsdec11 is thc1010=1 & thsdec11 & r10 { * mult_addr=r10; mult_addr = mult_addr - 4; } thsdec10: thsdec11 is thc1010=0 & thsdec11 { } thsdec9: r9 is thc0909=1 & r9 & thc1015=0 { * mult_addr=r9; mult_addr = mult_addr - 4; } thsdec9: r9,thsdec10 is thc0909=1 & thsdec10 & r9 { * mult_addr=r9; mult_addr = mult_addr - 4; } thsdec9: thsdec10 is thc0909=0 & thsdec10 { } thsdec8: r8 is thc0808=1 & r8 & thc0915=0 { * mult_addr=r8; mult_addr = mult_addr - 4; } thsdec8: r8,thsdec9 is thc0808=1 & thsdec9 & r8 { * mult_addr=r8; mult_addr = mult_addr - 4; } thsdec8: thsdec9 is thc0808=0 & thsdec9 { } thsdec7: r7 is thc0707=1 & r7 & thc0815=0 { * mult_addr=r7; mult_addr = mult_addr - 4; } thsdec7: r7,thsdec8 is thc0707=1 & thsdec8 & r7 { * mult_addr=r7; mult_addr = mult_addr - 4; } thsdec7: thsdec8 is thc0707=0 & thsdec8 { } thsdec6: r6 is thc0606=1 & r6 & thc0715=0 { * mult_addr=r6; mult_addr = mult_addr - 4; } thsdec6: r6,thsdec7 is thc0606=1 & thsdec7 & r6 { * mult_addr=r6; mult_addr = mult_addr - 4; } thsdec6: thsdec7 is thc0606=0 & thsdec7 { } thsdec5: r5 is thc0505=1 & r5 & thc0615=0 { * mult_addr=r5; mult_addr = mult_addr - 4; } thsdec5: r5,thsdec6 is thc0505=1 & thsdec6 & r5 { * mult_addr=r5; mult_addr = mult_addr - 4; } thsdec5: thsdec6 is thc0505=0 & thsdec6 { } thsdec4: r4 is thc0404=1 & r4 & thc0515=0 { * mult_addr=r4; mult_addr = mult_addr - 4; } thsdec4: r4,thsdec5 is thc0404=1 & thsdec5 & r4 { * mult_addr=r4; mult_addr = mult_addr - 4; } thsdec4: thsdec5 is thc0404=0 & thsdec5 { } thsdec3: r3 is thc0303=1 & r3 & thc0415=0 { * mult_addr=r3; mult_addr = mult_addr - 4; } thsdec3: r3,thsdec4 is thc0303=1 & thsdec4 & r3 { * mult_addr=r3; mult_addr = mult_addr - 4; } thsdec3: thsdec4 is thc0303=0 & thsdec4 { } thsdec2: r2 is thc0202=1 & r2 & thc0315=0 { * mult_addr=r2; mult_addr = mult_addr - 4; } thsdec2: r2,thsdec3 is thc0202=1 & thsdec3 & r2 { * mult_addr=r2; mult_addr = mult_addr - 4; } thsdec2: thsdec3 is thc0202=0 & thsdec3 { } thsdec1: r1 is thc0101=1 & r1 & thc0215=0 { * mult_addr=r1; mult_addr = mult_addr - 4; } thsdec1: r1,thsdec2 is thc0101=1 & thsdec2 & r1 { * mult_addr=r1; mult_addr = mult_addr - 4; } thsdec1: thsdec2 is thc0101=0 & thsdec2 { } thstrlist_dec: {r0} is thc0000=1 & r0 & thc0115=0 { * mult_addr=r0; mult_addr = mult_addr - 4; } thstrlist_dec: {r0,thsdec1} is thc0000=1 & thsdec1 & r0 { * mult_addr=r0; mult_addr = mult_addr - 4; } thstrlist_dec: {thsdec1^} is thc0000=0 & thsdec1 { } ldbrace: {ldlist} is ldlist { } stbrace: {strlist} is strlist { } psbrace: {pshlist} is pshlist { } # Some extra subconstructors for the push and pop instructions pclbrace:{ldlist,pc} is ldlist & pc { build ldlist; pc = *mult_addr; mult_addr = mult_addr + 4; } pclbrace:{pc} is thc0007=0 & pc { pc = *mult_addr; mult_addr = mult_addr + 4; } pcpbrace:{pshlist,lr} is pshlist & lr { mult_addr = mult_addr - 4; *mult_addr = lr; build pshlist; } pcpbrace:{lr} is thc0007=0 & lr { mult_addr = mult_addr - 4; *mult_addr = lr; } @if defined(VERSION_6T2) || defined(VERSION_7) RnIndirect12: [Rn0003,"#"^offset12] is Rn0003; offset12 { local tmp = Rn0003 + offset12; export tmp; } RnIndirectPUW: [Rn0003],"#"^-immed8 is Rn0003; addr_puw=1 & immed8 { local tmp = Rn0003; Rn0003=Rn0003-immed8; export tmp; } RnIndirectPUW: [Rn0003],"#"^immed8 is Rn0003; addr_puw=3 & immed8 { local tmp = Rn0003; Rn0003=Rn0003+immed8; export tmp; } RnIndirectPUW: [Rn0003,"#"^-immed8] is Rn0003; addr_puw=4 & immed8 { local tmp = Rn0003 - immed8; export tmp; } RnIndirectPUW: [Rn0003,"#"^-immed8]! is Rn0003; addr_puw=5 & immed8 { local tmp = Rn0003 - immed8; Rn0003=tmp; export tmp; } RnIndirectPUW: [Rn0003,"#"^immed8]! is Rn0003; addr_puw=7 & immed8 { local tmp = Rn0003 + immed8; Rn0003=tmp; export tmp; } @define RN_INDIRECT_PUW "(op0; (addr_puw=4 | thc0808=1)) & RnIndirectPUW" # constraint for RnIndirectPUW RnIndirectPUW1: [Rn0003],"#"^-immval is Rn0003 & addr_puw1=0x3; immed8 [ immval = immed8 * 4; ] { local tmp = Rn0003; Rn0003=Rn0003-immval; export tmp; } RnIndirectPUW1: [Rn0003],"#"^immval is Rn0003 & addr_puw1=0x7; immed8 [ immval = immed8 * 4; ] { local tmp = Rn0003; Rn0003=Rn0003+immval; export tmp; } RnIndirectPUW1: [Rn0003,"#"^-immval] is Rn0003 & addr_puw1=0xa; immed8 [ immval = immed8 * 4; ] { local tmp = Rn0003 - immval; export tmp; } RnIndirectPUW1: [Rn0003,"#"^-immval]! is Rn0003 & addr_puw1=0xb; immed8 [ immval = immed8 * 4; ] { local tmp = Rn0003 - immval; Rn0003=tmp; export tmp; } RnIndirectPUW1: [Rn0003,"#"^immval] is Rn0003 & addr_puw1=0xe; immed8 [ immval = immed8 * 4; ] { local tmp = Rn0003 + immval; export tmp; } RnIndirectPUW1: [Rn0003,"#"^immval]! is Rn0003 & addr_puw1=0xf; immed8 [ immval = immed8 * 4; ] { local tmp = Rn0003 + immval; Rn0003=tmp; export tmp; } @endif # VERSION_6T2 || VERSION_7 @define RN_INDIRECT_PUW1 "((thc0808=1 | thc0505=1); op0) & RnIndirectPUW1" # constraint for RnIndirectPUW1 RnIndirect4: [Rn0305,"#"^immval] is Rn0305 & immed5 [ immval = immed5 * 4; ] { local tmp = Rn0305 + immval; export tmp; } RnIndirect2: [Rn0305,"#"^immval] is Rn0305 & immed5 [ immval = immed5 * 2; ] { local tmp = Rn0305 + immval; export tmp; } RnIndirect1: [Rn0305,"#"^immed5] is Rn0305 & immed5 { local tmp = Rn0305 + immed5; export tmp; } RnRmIndirect: [Rn0305,Rm0608] is Rn0305 & Rm0608 { local tmp = Rn0305 + Rm0608; export tmp; } Pcrel8Indirect: [reloc] is immed8 [ reloc = ((inst_start+4) $and 0xfffffffc) + 4*immed8; ] { export *:4 reloc; } Sprel8Indirect: [sp,"#"^immval] is sp & immed8 [ immval = immed8 * 4; ] { local tmp = sp + immval; export tmp; } @if defined(VERSION_6T2) || defined(VERSION_7) taddrmode5: [Rn0003,"#"^off8] is thP8=1 & thU7=1 & thW5=0 & Rn0003; immed8 [ off8=immed8*4; ] { local tmp = Rn0003 + off8; export tmp; } taddrmode5: [Rn0003,"#"^noff8] is thP8=1 & thU7=0 & thW5=0 & Rn0003; immed8 [ noff8=-(immed8*4); ] { local tmp = Rn0003 + noff8; export tmp; } taddrmode5: [Rn0003,"#"^off8]! is thP8=1 & thU7=1 & thW5=1 & Rn0003; immed8 [ off8=immed8*4; ] { Rn0003 = Rn0003 + off8; export Rn0003; } taddrmode5: [Rn0003,"#"^noff8]! is thP8=1 & thU7=0 & thW5=1 & Rn0003; immed8 [ noff8=-(immed8*4); ] { Rn0003 = Rn0003 + noff8; export Rn0003; } taddrmode5: [Rn0003],"#"^off8 is thP8=0 & thU7=1 & thW5=1 & Rn0003; immed8 [ off8=immed8*4; ] { local tmp = Rn0003; Rn0003 = Rn0003+off8; export tmp; } taddrmode5: [Rn0003],"#"^noff8 is thP8=0 & thU7=0 & thW5=1 & Rn0003; immed8 [ noff8=-(immed8*4); ] { local tmp = Rn0003; Rn0003 = Rn0003 + noff8; export tmp; } taddrmode5: [Rn0003],{immed8} is thP8=0 & thU7=1 & thW5=0 & Rn0003; immed8 { export Rn0003; } @endif # VERSION_6T2 || VERSION_7 # # Modes for SRS instructions # thSRSMode: "usr" is thsrsMode=8 & thc0004 { export *[const]:1 thc0004; } thSRSMode: "fiq" is thsrsMode=9 & thc0004 { export *[const]:1 thc0004; } thSRSMode: "irq" is thsrsMode=10 & thc0004 { export *[const]:1 thc0004; } thSRSMode: "svc" is thsrsMode=11 & thc0004 { export *[const]:1 thc0004; } thSRSMode: "mon" is thsrsMode=14 & thc0004 { export *[const]:1 thc0004; } thSRSMode: "abt" is thsrsMode=15 & thc0004 { export *[const]:1 thc0004; } thSRSMode: "und" is thsrsMode=19 & thc0004 { export *[const]:1 thc0004; } thSRSMode: "sys" is thsrsMode=23 & thc0004 { export *[const]:1 thc0004; } thSRSMode: "#"^thsrsMode is thsrsMode { export *[const]:1 thsrsMode; } # # Detect if the PC is loaded and do a GOTO # # TODO: this is how all detections of writes into the PC should be done. # Instead of enumerating and splitting the case into PC loaded and non loaded, IE (add PC,#0x...) # Should only have one base constructor and have a sub constructor and build to test if the PC was loaded and do the right thing. # @if defined(VERSION_6T2) || defined(VERSION_7) RtGotoCheck: is Rt1215=15 { LoadWritePC(pc); goto [pc]; } RtGotoCheck: is Rt1215 {} @endif # VERSION_6T2 || VERSION_7 ############################################################ # Base constructors # We have the following operand types: # Type Corresponding syntax in ARM/THUMB manual # # Rd0002 with Rd occupying bits 0-2 # Rd0810 with Rd occupying bits 8-10 # Rm0305 with Rm occupying bits 3-5 # Rm0608 with Rm occupying bits 6-8 # Rn0002 with Rn occupying bits 0-2 # Rn0305 with Rn occupying bits 3-5 # Rn0810 with Rn occupying bits 8-10 # Hrd0002 with H1 bit in diagram # Hrn0002 with H1 bit in diagram # Hrm0305 with H2 bit in diagram # Rs0305 with Rs occupying bits 3-5 # Immed3 # # Immed5 # # Immed8 # # Pcrel8 PC,#*4 # Sprel8 SP,#*4 # Immed7_4 #*4 # thcc # Addr8 (for B) # Addr11 (for B no condition) # ThAddr22 (for double BL, BLX) # immed8 (no "#" as in BKPT and SWI) # Rn_exclaim ! # ldbrace (load instructions) # strbrace (store instructions) # RnIndirect4 [,#*4] # RnIndirect2 [,#*2] # RnIndirect1 [,#] # RnRmIndirect [,] # Pcrel8Indirect [PC,#*4] # Sprel8Indirect [SP,#*4] # # Ensure that the condition check phase has been completed with : ARMcondCk=1 { :adc^CheckInIT_CZNO^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x105 & Rm0305 & Rd0002 & CheckInIT_CZNO { build ItCond; th_add_with_carry_flags(Rd0002,Rm0305); Rd0002 = Rd0002 + Rm0305 + zext(CY); resflags(Rd0002); build CheckInIT_CZNO; } @if defined(VERSION_6T2) || defined(VERSION_7) :adc^thSBIT_CZNO^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=0xa & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; build ThumbExpandImm12; th_add_with_carry_flags(Rn0003,ThumbExpandImm12); Rd0811 = Rn0003 + ThumbExpandImm12 + zext(CY); resflags(Rd0811); build thSBIT_CZNO; } :adc^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=0xa & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 { build ItCond; build thshift2; th_add_with_carry_flags(Rn0003,thshift2); local tmp = thshift2+zext(CY); Rd0811 = Rn0003+tmp; resflags(Rd0811); build thSBIT_CZNO; } @endif # VERSION_6T2 || VERSION_7 :add^CheckInIT_CZNO^ItCond Rd0002,Rn0305,Immed3 is TMode=1 & ItCond & op9=0x0e & Immed3 & Rn0305 & Rd0002 & CheckInIT_CZNO { build ItCond; th_addflags(Rn0305,Immed3); Rd0002 = Rn0305 + Immed3; resflags(Rd0002); build CheckInIT_CZNO; } :add^CheckInIT_CZNO^ItCond Rd0810,Immed8 is TMode=1 & ItCond & op11=0x06 & Rd0810 & Immed8 & CheckInIT_CZNO { build ItCond; th_addflags(Rd0810,Immed8); Rd0810 = Rd0810 + Immed8; resflags(Rd0810); build CheckInIT_CZNO; } @if defined(VERSION_6T2) || defined(VERSION_7) :add^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=8 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; build ThumbExpandImm12; th_addflags(Rn0003,ThumbExpandImm12); Rd0811 = Rn0003+ThumbExpandImm12; resflags(Rd0811); build thSBIT_CZNO; } :addw^ItCond Rd0811,Rn0003,Immed12 is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=0 & thc0404=0 & Rn0003; thc1515=0 & Rd0811) & Immed12 { build ItCond; th_addflags(Rn0003,Immed12); Rd0811 = Rn0003+Immed12; resflags(Rd0811); } :add^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=8 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 { build ItCond; build thshift2; local tmp = thshift2; th_addflags(Rn0003,tmp); Rd0811 = Rn0003+tmp; resflags(Rd0811); build thSBIT_CZNO; } :add^thSBIT_CZNO^ItCond^".w" Rd0811,sp,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=8 & thSBIT_CZNO & sp & sop0003=0xd; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; build ThumbExpandImm12; th_addflags(sp,ThumbExpandImm12); Rd0811 = sp+ThumbExpandImm12; resflags(Rd0811); build thSBIT_CZNO; } :addw^ItCond Rd0811,sp,Immed12 is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=0 & thc0404=0 & sop0003=0xd & sp; thc1515=0 & Rd0811) & Immed12 { build ItCond; th_addflags(sp,Immed12); Rd0811 = sp+Immed12; resflags(Rd0811); } :add^thSBIT_CZNO^ItCond^".w" Rd0811,sp,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=8 & thSBIT_CZNO & sop0003=0xd & sp; thc1515=0 & Rd0811 & thshift2 { build ItCond; build thshift2; local tmp = thshift2; th_addflags(sp,tmp); Rd0811 = sp+tmp; resflags(Rd0811); build thSBIT_CZNO; } @endif # VERSION_6T2 || VERSION_7 :add^CheckInIT_CZNO^ItCond Rd0002,Rn0305,Rm0608 is TMode=1 & ItCond & op9=0x0c & Rm0608 & Rn0305 & Rd0002 & CheckInIT_CZNO { build ItCond; th_addflags(Rn0305,Rm0608); Rd0002 = Rn0305 + Rm0608; resflags(Rd0002); build CheckInIT_CZNO; } :add^ItCond Hrd0002,Hrm0305 is TMode=1 & ItCond & op8=0x44 & Hrd0002 & Hrm0305 { build ItCond; Hrd0002 = Hrd0002 + Hrm0305; } :add^ItCond Hrd0002,Hrm0305 is TMode=1 & ItCond & op8=0x44 & Hrd0002 & Hrm0305 & hrd0002=7 & h1=1 { build ItCond; dest:4 = Hrd0002 + Hrm0305; BranchWritePC(dest); goto [pc]; } :add^ItCond Rd0810,Sprel8 is TMode=1 & ItCond & op11=0x15 & Rd0810 & Sprel8 { build ItCond; Rd0810 = Sprel8; } :add^ItCond sp,Immed7_4 is TMode=1 & ItCond & op7=0x160 & sp & Immed7_4 { build ItCond; sp = sp + Immed7_4; } :adr^ItCond Rd0810,Pcrel8 is TMode=1 & ItCond & op11=0x14 & Rd0810 & Pcrel8 { build ItCond; Rd0810 = &Pcrel8; } @if defined(VERSION_6T2) || defined(VERSION_7) :adr^ItCond^".w" Rd0811,NegPcrelImmed12Addr is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=5 & thc0404=0 & sop0003=0xf; thc1515=0 & Rd0811) & NegPcrelImmed12Addr { build ItCond; Rd0811 = &NegPcrelImmed12Addr; } :adr^ItCond^".w" Rd0811,PcrelImmed12Addr is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=0 & thc0404=0 & sop0003=0xf; thc1515=0 & Rd0811) & PcrelImmed12Addr { build ItCond; Rd0811 = &PcrelImmed12Addr; } :adr^ItCond^".w" Rd0811,NegPcrelImmed12Addr is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=5 & thc0404=0 & sop0003=0xf; thc1515=0 & Rd0811 & thc0811=15) & NegPcrelImmed12Addr { build ItCond; pc = &NegPcrelImmed12Addr; goto NegPcrelImmed12Addr; } :adr^ItCond^".w" Rd0811,PcrelImmed12Addr is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=0 & thc0404=0 & sop0003=0xf; thc1515=0 & Rd0811 & thc0811=15) & PcrelImmed12Addr { build ItCond; pc = &PcrelImmed12Addr; goto PcrelImmed12Addr; } @endif # VERSION_6T2 || VERSION_7 :and^CheckInIT_ZN^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x100 & Rd0002 & Rm0305 & CheckInIT_ZN { build ItCond; Rd0002 = Rd0002 & Rm0305; resflags(Rd0002); build CheckInIT_ZN; } @if defined(VERSION_6T2) || defined(VERSION_7) :and^thSBIT_ZN^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=0 & thSBIT_ZN & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; build ThumbExpandImm12; Rd0811 = Rn0003 & ThumbExpandImm12; resflags(Rd0811); build thSBIT_ZN; } :and^thSBIT_ZN^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=0 & thSBIT_ZN & Rn0003; thc1515=0 & Rd0811 & thshift2 { build ItCond; build thshift2; Rd0811 = Rn0003 & thshift2; resflags(Rd0811); build thSBIT_ZN; } @endif # VERSION_6T2 || VERSION_7 macro th_set_carry_for_asr(op1,shift_count) { local bit = (op1 s>> (shift_count-1)) & 1; tmpCY = ((shift_count == 0) && CY) || ((shift_count != 0) && (bit != 0)); } #note that this is a special case where immed5 = 0, which corresponds to a shift amount of 32 :asr^CheckInIT_CZN^ItCond Rd0002,Rm0305,"#0x20" is TMode=1 & ItCond & op11=0x02 & Immed5 & Rm0305 & Rd0002 & immed5=0 & CheckInIT_CZN { build ItCond; th_set_carry_for_asr(Rm0305,32:1); Rd0002 = Rm0305 s>> 32; resflags(Rd0002); build CheckInIT_CZN; } :asr^CheckInIT_CZN^ItCond Rd0002,Rm0305,Immed5 is TMode=1 & ItCond & op11=0x02 & Immed5 & Rm0305 & Rd0002 & CheckInIT_CZN { build ItCond; th_set_carry_for_asr(Rm0305,Immed5); Rd0002 = Rm0305 s>> Immed5; resflags(Rd0002); build CheckInIT_CZN; } :asr^CheckInIT_CZN^ItCond Rd0002,Rs0305 is TMode=1 & ItCond & op6=0x104 & Rd0002 & Rs0305 & CheckInIT_CZN { build ItCond; local shift_amount = Rs0305 & 0xff; th_set_carry_for_asr(Rd0002,shift_amount); Rd0002 = Rd0002 s>> (shift_amount); resflags(Rd0002); build CheckInIT_CZN; } @if defined(VERSION_6T2) || defined(VERSION_7) :asr^thSBIT_CZN^ItCond^".w" Rd0811,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_CZN & sop0003=0xf; thc1515=0 & Rd0811 & thc0405=2 & thshift2 { build ItCond; build thshift2; Rd0811 = thshift2; tmpCY = shift_carry; resflags(Rd0811); build thSBIT_CZN; } :asr^thSBIT_CZN^ItCond^".w" Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op11=0x1f & thc0910=1 & sop0508=2 & thSBIT_CZN & Rn0003; op12=0xf & Rd0811 & sop0407=0 & Rm0003 { build ItCond; local shift_amount = Rm0003 & 0xff; th_set_carry_for_asr(Rn0003,shift_amount); Rd0811 = Rn0003 s>> (shift_amount); resflags(Rd0811); build thSBIT_CZN; } @endif # VERSION_6T2 || VERSION_7 # this constructor is identical to 16-bit udf instruction. it looks # like it implented an unconditional branch instruction (giving it a # made up name), but the thumb 16-bit instruction does not support # unconditional branching. @ifdef NOT_AN_INSTRUCTION :bal Addr8 is TMode=1 & op12=0xd & thcond=14 & Addr8 { goto Addr8; } @endif :b^thcc Addr8 is TMode=1 & ItCond & op12=0b1101 & $(THCC) & Addr8 { if (thcc) goto Addr8; } :b^ItCond Addr11 is TMode=1 & ItCond & op11=0b11100 & Addr11 { goto Addr11; } @if defined(VERSION_6T2) || defined(VERSION_7) :b^part2thcc^".w" ThAddr20 is TMode=1 & (part2op=0x1e & $(PART2THCC); part2c1415=2 & part2c1212=0) & ThAddr20 { if (part2thcc) goto ThAddr20; } :b^ItCond^".w" ThAddr24 is TMode=1 & ItCond & (op11=0x1e; part2c1415=2 & part2c1212=1) & ThAddr24 { build ItCond; goto ThAddr24; } @endif # VERSION_6T2 || VERSION_7 @if defined(VERSION_6T2) || defined(VERSION_7) :bfc^ItCond Rd0811,thLsbImm,thBitWidth is TMode=1 & ItCond & op0=0xf36f; thc1515=0 & Rd0811 & thc0505=0 & thLsbImm & thMsbImm & thBitWidth { build ItCond; clearMask:4 = (-1 << (thMsbImm + 1)) | (-1 >> (32 - thLsbImm)); Rd0811 = Rd0811 & clearMask; } :bfi^ItCond Rd0811,Rn0003,thLsbImm,thBitWidth is TMode=1 & ItCond & op4=0xf36 & Rn0003; thc1515=0 & Rd0811 & thc0505=0 & thLsbImm & thBitWidth { build ItCond; vmask:4 = (1 << thBitWidth) - 1; clear:4 = ~(vmask << thLsbImm); bits:4 = (Rn0003 & vmask) << thLsbImm; Rd0811 = (Rd0811 & clear) | bits; } @endif # VERSION_6T2 || VERSION_7 :bic^CheckInIT_ZN^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x10e & Rd0002 & Rm0305 & CheckInIT_ZN { build ItCond; Rd0002 = Rd0002 & (~Rm0305); resflags(Rd0002); build CheckInIT_ZN; } @if defined(VERSION_6T2) || defined(VERSION_7) :bic^thSBIT_ZN^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=1 & thSBIT_ZN & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; build ThumbExpandImm12; Rd0811 = Rn0003&(~ThumbExpandImm12); resflags(Rd0811); build thSBIT_ZN; } :bic^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=1 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 { build ItCond; build thshift2; Rd0811 = Rn0003&(~thshift2); th_logicflags(); resflags(Rd0811); build thSBIT_CZNO; } @endif # VERSION_6T2 || VERSION_7 @if defined(VERSION_5) # Exception Generation and UDF :hlt immed6 is TMode=1 & op6=0b1011101010 & immed6 { software_hlt(immed6:4); } :bkpt immed8 is TMode=1 & ItCond & op8=0xbe & immed8 { software_bkpt(immed8:4); # Not a mistake, breakpoint always unconditional even in IT Block build ItCond; } :hvc "#"^tmp is TMode=1 & op4=0xf7e & thc0003; op12=0x8 & thc0011 [tmp = thc0003 << 12 | thc0011;] { software_hvc(tmp:4); } # Requires Security Extensions :smc^ItCond "#"^thc0003 is TMode=1 & ItCond & op4=0xf7f & thc0003; op12=0x8 { build ItCond; software_smc(thc0003:1); } @ifndef NOT_AN_INSTRUCTION :udf^ItCond "#"thc0007 is TMode=1 & ItCond & op8 = 0xde & thc0007 { build ItCond; local excaddr:4 = inst_start; local target:4 = software_udf(thc0007:4, excaddr); goto [target]; } @endif :udf^ItCond "#"tmp is TMode=1 & ItCond & op4=0xf7f & thc0003; op12=0xa & thc0011 [tmp = thc0003 << 12 | thc0011;] { build ItCond; local excaddr:4 = inst_start; local target:4 = software_udf(tmp:4, excaddr); goto [target]; } @endif # VERSION_5 :bl^ItCond ThAddr24 is TMode=1 & ItCond & (op11=0x1e; part2c1415=3 & part2c1212=1) & ThAddr24 { build ItCond; lr = inst_next|1; SetThumbMode(1); call ThAddr24; } @ifndef VERSION_6T2 :bl^ItCond "#"^off is TMode=1 & ItCond & op11=0x1e & soffset11 [ off = inst_start + 4 + (soffset11 << 12); ] { build ItCond; lr = off:4; } :bl^ItCond "#"^off is TMode=1 & ItCond & op11=0x1f & offset11 [ off = offset11 << 1; ] { build ItCond; local dest = lr + off:4; lr = inst_next|1; SetThumbMode(1); call [dest]; } :bl^ItCond lr is TMode=1 & ItCond & op11=0x1f & offset11=0 & lr { build ItCond; local dest = lr; lr = inst_next|1; SetThumbMode(1); call [dest]; } :blx^ItCond "#"^off is TMode=1 & ItCond & op11=0x1d & offset11 & thc0000=0 [ off = offset11 << 1; ] { build ItCond; local dest = (lr & (~0x3)) + off:4; lr = inst_next|1; SetThumbMode(0); call [dest]; } :blx^ItCond lr is TMode=1 & ItCond & op11=0x1d & offset11=0 & thc0000=0 & lr { build ItCond; local dest = (lr & (~0x3)); lr = inst_next|1; SetThumbMode(0); call [dest]; } @endif :bl^ItCond ThAddr24 is TMode=1 & CALLoverride=1 & ItCond & (op11=0x1e; part2c1415=3 & part2c1212=1) & ThAddr24 { build ItCond; lr = inst_next|1; SetThumbMode(1); goto ThAddr24; } bxns: "" is thc0003 { } bxns: "ns" is thc0002=0b100 { } @if defined(VERSION_5) :blx^ItCond ThArmAddr23 is TMode=1 & ItCond & (op11=0x1e;part2op=0x1d) & ThArmAddr23 [ TMode=0; globalset(ThArmAddr23,TMode); TMode=1; ] { build ItCond; lr = inst_next|1; SetThumbMode(0); call ThArmAddr23; # Don't set this, assume return will set for emulation. Was screwing up decompiler. TB = 1; } :blx^ItCond ThArmAddr23 is TMode=1 & ItCond & CALLoverride=1 & (op11=0x1e;part2op=0x1d) & ThArmAddr23 [ TMode=0; globalset(ThArmAddr23,TMode); TMode=1; ] { build ItCond; lr = inst_next|1; SetThumbMode(0); goto ThArmAddr23; } :blx^ItCond ThArmAddr23 is TMode=1 & ItCond & (op11=0x1e; part2c1415=3 & part2c1212=0) & ThArmAddr23 [ TMode=0; globalset(ThArmAddr23,TMode); TMode=1; ] { build ItCond; lr = inst_next|1; SetThumbMode(0); call ThArmAddr23; } :blx^bxns^ItCond Hrm0305 is TMode=1 & ItCond & op7=0x08f & Hrm0305 & bxns { build ItCond; BXWritePC(Hrm0305); lr = inst_next|1; call [pc]; # Don't set this, assume return will set for emulation. Was screwing up decompiler. TB = 1; } @endif # VERSION_5 :bx^bxns^ItCond Hrm0305 is TMode=1 & ItCond & op7=0x08e & Hrm0305 & hrm0305=6 & h2=1 & bxns { build ItCond; BXWritePC(Hrm0305); return [pc]; } :bx^bxns^ItCond Hrm0305 is TMode=1 & ItCond & op7=0x08e & Hrm0305 & bxns { build ItCond; BXWritePC(Hrm0305); goto [pc]; } :bx^bxns^ItCond Hrm0305 is TMode=1 & ItCond & LRset=1 & op7=0x08e & Hrm0305 & bxns [ LRset=0; TMode=1; globalset(inst_next,LRset); globalset(inst_next,TMode); ] { build ItCond; BXWritePC(Hrm0305); call [pc]; # Don't set this, assume return will set for emulation. Was screwing up decompiler. TB = 1; } @if defined(VERSION_6T2) || defined(VERSION_7) :bxj^ItCond Rn0003 is TMode=1 & ItCond & op4=0xf3c & Rn0003; op0=0x8f00 { build ItCond; success:1 = jazelle_branch(); if (success) goto ; SetThumbMode( (Rn0003&0x00000001)!=0 ); local tmp=Rn0003&0xfffffffe; goto [tmp]; } # Optional change to THUMB @endif # VERSION_6T2 || VERSION_7 :cbnz^ItCond Rn0002,Addr5 is TMode=1 & ItCond & op12=0xb & thc1111=1 & thc1010=0 & thc0808=1 & Rn0002 & Addr5 { build ItCond; local tmp = Rn0002 != 0; if (tmp) goto Addr5; } :cbz^ItCond Rn0002,Addr5 is TMode=1 & ItCond & op12=0xb & thc1111=0 & thc1010=0 & thc0808=1 & Rn0002 & Addr5 { build ItCond; local tmp = Rn0002 == 0; if (tmp) goto Addr5; } @if defined(VERSION_6T2) || defined(VERSION_7) @ifndef CDE :cdp^ItCond thcpn,thopcode1,thCRd,thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xee & thopcode1 & thCRn; thCRd & thcpn & thopcode2 & thc0404=0 & thCRm { build ItCond; t_cpn:4 = thcpn; t_op1:4 = thopcode1; t_op2:4 = thopcode2; coprocessor_function(t_cpn,t_op1,t_op2,thCRd,thCRn,thCRm); } :cdp2^ItCond thcpn,thopcode1,thCRd,thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xfe & thopcode1 & thCRn; thCRd & thcpn & thopcode2 & thc0404=0 & thCRm { build ItCond; t_cpn:4 = thcpn; t_op1:4 = thopcode1; t_op2:4 = thopcode2; coprocessor_function2(t_cpn,t_op1,t_op2,thCRd,thCRn,thCRm); } @endif #CDE define pcodeop IndexCheck; :chka^ItCond Hrn0002,Rm0306 is TMode=1 & ItCond & TEEMode=1 & op8=0xca & Rm0306 & Hrn0002 { build ItCond; local tmp = Hrn0002 <= Rm0306; if (!tmp) goto inst_next; lr = inst_next|1; IndexCheck(); } :clrex^ItCond is TMode=1 & ItCond & op0=0xf3bf; op0=0x8f2f { build ItCond; ClearExclusiveLocal(); } :clz^ItCond Rd0811,Rm0003 is TMode=1 & ItCond & op4=0xfab & Rm0003; op12=15 & Rd0811 { build ItCond; Rd0811 = lzcount(Rm0003); } :cmn^ItCond Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=8 & thc0404=1 & Rn0003; thc1515=0 & thc0811=15) & ThumbExpandImm12 { build ItCond; th_addflags(Rn0003,ThumbExpandImm12); local tmp = Rn0003 + ThumbExpandImm12; resflags(tmp); th_affectflags(); } :cmn^ItCond^".w" Rn0003,thshift2 is TMode=1 & ItCond & op4=0xeb1 & Rn0003; thc1515=0 & thc0811=15 & thshift2 { build ItCond; build thshift2; th_addflags(Rn0003,thshift2); local tmp = Rn0003+thshift2; resflags(tmp); th_affectflags(); } @endif # VERSION_6T2 || VERSION_7 :cmn^ItCond Rn0002,Rm0305 is TMode=1 & ItCond & op6=0x10b & Rm0305 & Rn0002 { build ItCond; th_addflags(Rn0002,Rm0305); local tmp = Rn0002 + Rm0305; resflags(tmp); th_affectflags(); } :cmp^ItCond Rn0810,Immed8 is TMode=1 & ItCond & op11=5 & Rn0810 & Immed8 { build ItCond; th_subflags(Rn0810,Immed8); local tmp = Rn0810 - Immed8; resflags(tmp); th_affectflags(); } @if defined(VERSION_6T2) || defined(VERSION_7) :cmp^ItCond^".w" Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & thc0404=1 & sop0508=13 & Rn0003; thc1515=0 & thc0811=15) & ThumbExpandImm12 { build ItCond; th_subflags(Rn0003,ThumbExpandImm12); local tmp = Rn0003 - ThumbExpandImm12; resflags(tmp); th_affectflags(); } :cmp^ItCond^".w" Rn0003,thshift2 is TMode=1 & ItCond & op4=0xebb & Rn0003; thc1515=0 & thc0811=15 & thshift2 { build ItCond; th_subflags(Rn0003,thshift2); local tmp = Rn0003 - thshift2; resflags(tmp); th_affectflags(); } @endif # VERSION_6T2 || VERSION_7 :cmp^ItCond Rn0002,Rm0305 is TMode=1 & ItCond & op6=0x10a & Rm0305 & Rn0002 { build ItCond; th_subflags(Rn0002,Rm0305); local tmp = Rn0002 - Rm0305; resflags(tmp); th_affectflags(); } :cmp^ItCond Hrn0002,Hrm0305 is TMode=1 & ItCond & op8=0x45 & Hrm0305 & Hrn0002 { build ItCond; th_subflags(Hrn0002,Hrm0305); local tmp = Hrn0002 - Hrm0305; resflags(tmp); th_affectflags(); } @if defined(VERSION_6) aflag: "a" is thc0202=1 & thc0404=0 { enableDataAbortInterrupts(); } aflag: "a" is thc0202=1 { disableDataAbortInterrupts(); } aflag: is thc0202=0 { } iflag: "i" is thc0101=1 & thc0404=0 { enableIRQinterrupts(); } # 7M: set primask iflag: "i" is thc0101=1 { disableIRQinterrupts(); } # 7M: clear primask iflag: is thc0101=0 { } fflag: "f" is thc0000=1 & thc0404=0 { enableFIQinterrupts(); } # 7M: set faultmask fflag: "f" is thc0000=1 { disableFIQinterrupts(); } # 7M: clear faultmask fflag: is thc0000=0 { } iflags: aflag^iflag^fflag is aflag & iflag & fflag { } :cpsie^ItCond iflags is TMode=1 & ItCond & op8=0xb6 & sop0507=3 & thc0303=0 & iflags & thc0404=0 { build ItCond; build iflags; # see iflags for semantics } :cpsid^ItCond iflags is TMode=1 & ItCond & op8=0xb6 & sop0507=3 & thc0303=0 & iflags & thc0404=1 { build ItCond; build iflags; # see iflags for semantics } @endif # VERSION_6 @if defined(VERSION_6T2) || defined(VERSION_7) # For SCR 11074, implement the "Encoding T2" 32-bit Thumb-2 cps change processor state instruction # Note the manual says there are no conditions on this insn # th2_aflag: "a" is thc0707=1 & thc0910=0x2 { enableDataAbortInterrupts(); } th2_aflag: "a" is thc0707=1 { disableDataAbortInterrupts(); } th2_aflag: is thc0707=0 { } th2_iflag: "i" is thc0606=1 & thc0910=0x2 { enableIRQinterrupts(); } # 7M: set primask th2_iflag: "i" is thc0606=1 { disableIRQinterrupts(); } # 7M: clear primask th2_iflag: is thc0606=0 { } th2_fflag: "f" is thc0505=1 & thc0910=0 { enableFIQinterrupts(); } # 7M: set faultmask th2_fflag: "f" is thc0505=1 { disableFIQinterrupts(); } # 7M: clear faultmask th2_fflag: is thc0505=0 { } th2_iflags: th2_aflag^th2_iflag^th2_fflag is th2_aflag & th2_iflag & th2_fflag { } th2_SetMode: "#"^16 is thc0004=0x10 { setUserMode(); } th2_SetMode: "#"^17 is thc0004=0x11 { setFIQMode(); } th2_SetMode: "#"^18 is thc0004=0x12 { setIRQMode(); } th2_SetMode: "#"^19 is thc0004=0x13 { setSupervisorMode(); } th2_SetMode: "#"^22 is thc0004=0x16 { setMonitorMode(); } th2_SetMode: "#"^23 is thc0004=0x17 { setAbortMode(); } th2_SetMode: "#"^27 is thc0004=0x1b { setUndefinedMode(); } th2_SetMode: "#"^31 is thc0004=0x1f { setSystemMode(); } # 11110 0 1110 1 0 1111 10 0 0 0 :cpsie th2_iflags, th2_SetMode is TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xe & part2imm6=0x2f ; thc0910=0x2 & th2_SetMode & op11=0x10 & th2_iflags { build th2_iflags; } :cpsid th2_iflags, th2_SetMode is TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xe & part2imm6=0x2f ; thc0910=0x3 & th2_SetMode & op11=0x10 & th2_iflags { build th2_iflags; } :cps th2_SetMode is TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xe & part2imm6=0x2f ; thc0808=0x1 & th2_SetMode & op11=0x10 { } @endif # (VERSION_6T2) || defined(VERSION_7) @if defined(VERSION_6T2) || defined(VERSION_7) :dbg^ItCond "#"^thc0004 is TMode=1 & ItCond & op0=0xf3af; op4=0x80f & thc0004 { @if defined(VERSION_7) HintDebug(thc0004:1); @endif # VERSION_7 } @if defined(VERSION_7) :dmb^ItCond "#"^thc0004 is TMode=1 & ItCond & op0=0xf3bf; op4=0x8f5 & thc0004 { DataMemoryBarrier(thc0004:1); } :dsb^ItCond "#"^thc0004 is TMode=1 & ItCond & op0=0xf3bf; op4=0x8f4 & thc0004 { DataSynchronizationBarrier(thc0004:1); } @endif :eor^thSBIT_CZNO^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=4 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; Rd0811 = Rn0003 ^ ThumbExpandImm12; th_logicflags(); resflags(Rd0811); build thSBIT_CZNO; } :eor^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=4 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 { build ItCond; Rd0811 = Rn0003 ^ thshift2; th_logicflags(); resflags(Rd0811); build thSBIT_CZNO; } :enterx^ItCond is TMode=1 & ItCond & op0=0xf3bf; op0=0x8f1f [ TEEMode=1; globalset(inst_next,TEEMode); ] { build ItCond; } :leavex^ItCond is TMode=1 & ItCond & op0=0xf3bf; op0=0x8f0f [ TEEMode=0; globalset(inst_next,TEEMode); ] { build ItCond; } @endif # VERSION_6T2 || VERSION_7 :eor^CheckInIT_ZN^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x101 & Rm0305 & Rd0002 & CheckInIT_ZN { build ItCond; Rd0002 = Rd0002 ^ Rm0305; resflags(Rd0002); build CheckInIT_ZN; } @if defined(VERSION_7) :hb^ItCond "#"^immed8 is TMode=1 & ItCond & TEEMode=1 & op9=0x61 & immed8 { build ItCond; } :isb^ItCond "#"^thc0004 is TMode=1 & ItCond & op0=0xf3bf; op4=0x8f6 & thc0004 { InstructionSynchronizationBarrier(thc0004:1); } @endif # VERSION_7 @if defined(VERSION_8) # F5.1.178 p2969 SEVL T1 variant :sevl is TMode=1 & op0=0b1011111101010000 & ItCond { build ItCond; SendEvent(); } @endif # VERSION_8 @if defined(VERSION_6T2) || defined(VERSION_7) X: "t" is TMode=1 & ((thc0404=1 & thc0303=1) | (thc0404=0 & thc0303=0)) & (thc0202=1 | thc0101=1 | thc0000=1) { } X: "e" is TMode=1 & ((thc0404=1 & thc0303=0) | (thc0404=0 & thc0303=1)) & (thc0202=1 | thc0101=1 | thc0000=1) { } X: "" is TMode=1 & thc0404 & thc0303 & (thc0202=0 & thc0101=0 & thc0000=0) { } Y: "t" is TMode=1 & ((thc0404=1 & thc0202=1) | (thc0404=0 & thc0202=0)) & (thc0101=1 | thc0000=1) { } Y: "e" is TMode=1 & ((thc0404=1 & thc0202=0) | (thc0404=0 & thc0202=1)) & (thc0101=1 | thc0000=1) { } Y: "" is TMode=1 & thc0404 & thc0202 & (thc0101=0 & thc0000=0) { } Z: "t" is TMode=1 & ((thc0404=1 & thc0101=1) | (thc0404=0 & thc0101=0)) & (thc0000=1) { } Z: "e" is TMode=1 & ((thc0404=1 & thc0101=0) | (thc0404=0 & thc0101=1)) & (thc0000=1) { } Z: "" is TMode=1 & thc0404 & thc0101 & (thc0000=0) { } XYZ: is TMode=1 & sop0003=8 { } XYZ: X^Y^Z is TMode=1 & X & Y & Z { } :it^XYZ it_thfcc is TMode=1 & op8=0xbf & XYZ & $(IT_THFCC) & thc0507 & thc0004 [ itmode=0; cond_base = thc0507; cond_shft=thc0004; globalset(inst_next,condit); ] { # just sets up the condition and If Then/Else mask } @ifndef CDE :ldc^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x76 & thN6=0 & thL4=1; thCRd & thcpn) & taddrmode5 { build ItCond; build taddrmode5; t_cpn:4 = thcpn; coprocessor_load(t_cpn,thCRd,taddrmode5); } :ldcl^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x76 & thN6=1 & thL4=1; thCRd & thcpn) & taddrmode5 { build ItCond; build taddrmode5; t_cpn:4 = thcpn; coprocessor_loadlong(t_cpn,thCRd,taddrmode5); } :ldc2^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x7e & thN6=0 & thL4=1; thCRd & thcpn) & taddrmode5 { build ItCond; build taddrmode5; t_cpn:4 = thcpn; coprocessor_load(t_cpn,thCRd,taddrmode5); } :ldc2l^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x7e & thN6=1 & thL4=1; thCRd & thcpn) & taddrmode5 { build ItCond; build taddrmode5; t_cpn:4 = thcpn; coprocessor_loadlong(t_cpn,thCRd,taddrmode5); } @endif # CDE @endif # VERSION_6T2 || VERSION_7 :ldmia^ItCond Rn_exclaim,ldbrace is TMode=1 & ItCond & op11=0x19 & Rn_exclaim & ldbrace & Rn_exclaim_WB { build ItCond; build Rn_exclaim; build ldbrace; build Rn_exclaim_WB; } @if defined(VERSION_6T2) || defined(VERSION_7) :ldm^ItCond^".w" Rn0003,thldrlist_inc is TMode=1 & ItCond & op11=0x1d & thc0910=0 & sop0608=2 & thwbit=0 & thc0404=1 & Rn0003; thc1313=0 & thldrlist_inc { build ItCond; mult_addr = Rn0003; build thldrlist_inc; } :ldm^ItCond^".w" Rn0003!,thldrlist_inc is TMode=1 & ItCond & op11=0x1d & thc0910=0 & sop0608=2 & thwbit=1 & thc0404=1 & Rn0003; thc1313=0 & thldrlist_inc { build ItCond; mult_addr = Rn0003; build thldrlist_inc; Rn0003 = mult_addr; } :ldm^ItCond Rn0003,thldrlist_inc is TMode=1 & ItCond & op11=0x1d & thc0910=0 & sop0608=2 & thwbit=0 & thc0404=1 & Rn0003; thc1515=1 & thc1313=0 & thldrlist_inc { build ItCond; mult_addr = Rn0003; build thldrlist_inc; LoadWritePC(pc); goto [pc]; } :ldm^ItCond^".w" Rn0003,thldrlist_inc is TMode=1 & ItCond & op11=0x1d & thc0910=0 & sop0608=2 & thwbit=1 & thc0404=1 & Rn0003; thc1515=1 & thc1313=0 & thldrlist_inc { build ItCond; mult_addr = Rn0003; build thldrlist_inc; Rn0003 = mult_addr; LoadWritePC(pc); goto [pc]; } :ldmdb^ItCond Rn0003,thldrlist_dec is TMode=1 & ItCond & op4=0xe91 & Rn0003; thc1313=0 & thldrlist_dec { build ItCond; mult_addr = Rn0003-4; build thldrlist_dec; } :ldmdb^ItCond Rn0003!,thldrlist_dec is TMode=1 & ItCond & op4=0xe93 & Rn0003; thc1313=0 & thldrlist_dec { build ItCond; mult_addr = Rn0003-4; build thldrlist_dec; Rn0003 = mult_addr + 4; } :ldmdb^ItCond Rn0003,thldrlist_dec is TMode=1 & ItCond & op4=0xe91 & Rn0003; thc1515=1 & thc1313=0 & thldrlist_dec { build ItCond; mult_addr = Rn0003-4; build thldrlist_dec; LoadWritePC(pc); goto [pc]; } :ldmdb^ItCond Rn0003,thldrlist_dec is TMode=1 & ItCond & op4=0xe93 & Rn0003; thc1515=1 & thc1313=0 & thldrlist_dec { build ItCond; mult_addr = Rn0003-4; build thldrlist_dec; Rn0003 = mult_addr + 4; LoadWritePC(pc); goto [pc]; } @endif # VERSION_6T2 || VERSION_7 :ldr^ItCond Rd0002,RnIndirect4 is TMode=1 & ItCond & op11=0xd & RnIndirect4 & Rd0002 { build ItCond; build RnIndirect4; Rd0002 = *RnIndirect4; } :ldr^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x2c & RnRmIndirect & Rd0002 { build ItCond; build RnRmIndirect; Rd0002 = *RnRmIndirect; } :ldr^ItCond Rd0810,Pcrel8Indirect is TMode=1 & ItCond & op11=9 & Pcrel8Indirect & Rd0810 { build ItCond; build Pcrel8Indirect; Rd0810 = Pcrel8Indirect; } # Note: NO '*' IS INTENTIONAL :ldr^ItCond Rd0810,Sprel8Indirect is TMode=1 & ItCond & op11=0x13 & Sprel8Indirect & Rd0810 { build ItCond; build Sprel8Indirect; Rd0810 = *Sprel8Indirect; } :ldrb^ItCond Rd0002,RnIndirect1 is TMode=1 & ItCond & op11=0xf & RnIndirect1 & Rd0002 { build ItCond; build RnIndirect1; Rd0002 = zext( *:1 RnIndirect1 ); } :ldrb^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x2e & RnRmIndirect & Rd0002 { build ItCond; build RnRmIndirect; Rd0002 = zext( *:1 RnRmIndirect); } :ldrh^ItCond Rd0002,RnIndirect2 is TMode=1 & ItCond & op11=0x11 & RnIndirect2 & Rd0002 { build ItCond; build RnIndirect2; Rd0002 = zext( *:2 RnIndirect2); } :ldrh^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x2d & RnRmIndirect & Rd0002 { build ItCond; build RnRmIndirect; Rd0002 = zext( *:2 RnRmIndirect); } :ldrsb^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x2b & RnRmIndirect & Rd0002 { build ItCond; build RnRmIndirect; Rd0002 = sext( *:1 RnRmIndirect); } :ldrsh^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x2f & RnRmIndirect & Rd0002 { build ItCond; build RnRmIndirect; Rd0002 = sext( *:2 RnRmIndirect); } define pcodeop ExclusiveAccess; @if defined(VERSION_7) :ldrexb^ItCond Rt1215,[Rn0003] is TMode=1 & ItCond & op4=0xe8d & Rn0003; Rt1215 & thc0811=15 & thc0407=4 & thc0003=15 { build ItCond; local tmp = Rn0003; ExclusiveAccess(tmp); val:1 = *tmp; Rt1215 = zext(val); } :ldrexh^ItCond Rt1215,[Rn0003] is TMode=1 & ItCond & op4=0xe8d & Rn0003; Rt1215 & thc0811=15 & thc0407=5 & thc0003=15 { build ItCond; local tmp = Rn0003; ExclusiveAccess(tmp); val:2 = *tmp; Rt1215 = zext(val); } :ldrexd^ItCond Rt1215,Rt0811,[Rn0003] is TMode=1 & ItCond & op4=0xe8d & Rn0003; Rt1215 & Rt0811 & thc0407=7 & thc0003=15 { build ItCond; local tmp = Rn0003; ExclusiveAccess(tmp); val1:4 = *tmp; val2:4 = *(tmp + 4); Rt1215 = val1; Rt0811 = val2; } @endif # VERSION_7 @if defined(VERSION_6T2) || defined(VERSION_7) :ldrex^ItCond Rt1215,[Rn0003,Immed8_4] is TMode=1 & ItCond & op4=0xe85 & Rn0003; Rt1215 & thc0811=15 & Immed8_4 { build ItCond; local tmp = Rn0003 + Immed8_4; ExclusiveAccess(tmp); Rt1215 = *tmp; } # overlaps patterns with the other ldr intructions when Rn==1111, therefore it must occur first :ldr^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op4=0xf85 & sop0003=15; Rt1215 & RtGotoCheck) & PcrelOffset12 { build ItCond; build PcrelOffset12; Rt1215 = PcrelOffset12:4; build RtGotoCheck; } :ldr^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op4=0xf8d & sop0003=15; Rt1215 & RtGotoCheck) & PcrelOffset12 { build ItCond; build PcrelOffset12; Rt1215 = PcrelOffset12:4; build RtGotoCheck; } :ldr^ItCond^".w" Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf8d; Rt1215 & RtGotoCheck) & RnIndirect12 { build ItCond; build RnIndirect12; Rt1215 = *RnIndirect12; build RtGotoCheck; } :ldr^ItCond^".w" Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf85; Rt1215 & thc1111=1 & RtGotoCheck) & $(RN_INDIRECT_PUW) { build ItCond; build RnIndirectPUW; Rt1215 = *RnIndirectPUW; build RtGotoCheck; } :ldr^ItCond^".w" Rt1215,[Rn0003,Rm0003] is TMode=1 & ItCond & op4=0xf85 & Rn0003; Rt1215 & RtGotoCheck & thc1111=0 & sop0610=0 & thc0405=0 & Rm0003 { build ItCond; local tmp = Rn0003 + Rm0003; Rt1215 = *tmp; build RtGotoCheck; } :ldr^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf85 & Rn0003; Rt1215 & RtGotoCheck & thc1111=0 & sop0610=0 & thc0405 & Rm0003 { build ItCond; local tmp = Rn0003 + (Rm0003 << thc0405); Rt1215 = *tmp; build RtGotoCheck; } :ldrb^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op4=0xf81 & sop0003=15; Rt1215) & PcrelOffset12 { build ItCond; build PcrelOffset12; tmp:1 = PcrelOffset12:1; Rt1215 = zext(tmp); } # overlaps patterns with the other ldrb intructions when Rn==1111, therefore it must occur first :ldrb^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op4=0xf89 & sop0003=15; Rt1215) & PcrelOffset12 { build ItCond; build PcrelOffset12; tmp:1 = PcrelOffset12:1; Rt1215 = zext(tmp); } :ldrb^ItCond^".w" Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf89; Rt1215) & RnIndirect12 { build ItCond; build RnIndirect12; tmp:1 = *RnIndirect12; Rt1215 = zext(tmp); } :ldrb^ItCond^".w" Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf81; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) { build ItCond; build RnIndirectPUW; tmp:1 = *RnIndirectPUW; Rt1215 = zext(tmp); } :ldrb^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf81 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 { build ItCond; local tmp = Rn0003 + (Rm0003 << thc0405); val:1 = *tmp; Rt1215 = zext(val); } :ldrbt^ItCond^".w" Rt1215,[Rn0003,Immed8] is TMode=1 & ItCond & op4=0xf81 & Rn0003; Rt1215 & thc0811=14 & Immed8 { build ItCond; local tmp = Rn0003 + Immed8; val:1 = *tmp; Rt1215 = zext(val); } # overlaps patterns with the other ldrd intructions when Rn==1111, therefore it must occur first :ldrd^ItCond Rt1215,Rt0811,Pcrel8_s8 is TMode=1 & ItCond & op9=0x74 & thc0606=1 & thc0404=1 & sop0003=15; Rt1215 & Rt0811 & Pcrel8_s8 { build ItCond; build Pcrel8_s8; local val = Pcrel8_s8; Rt1215 = val(4); Rt0811 = val(0); } :ldrd^ItCond Rt1215,Rt0811,RnIndirectPUW1 is TMode=1 & ItCond & (op9=0x74 & thc0606=1 & thc0404=1 & Rn0003; Rt1215 & Rt0811) & $(RN_INDIRECT_PUW1) { build ItCond; build RnIndirectPUW1; Rt1215 = *RnIndirectPUW1; Rt0811 = *(RnIndirectPUW1+4); } # pldw must come before ldrh.w because of overlap of Rt != 1111 in ldrh.w :pldw^ItCond Rn0003,"#"^offset12 is TMode=1 & ItCond & op6=0x3e2 & thwbit=1 & thc0404=1 & Rn0003; op12=0xf & offset12 { build ItCond; addr:4 = Rn0003 + offset12; HintPreloadDataForWrite(addr); } :pldw^ItCond Rn0003,"#-"^immed8 is TMode=1 & ItCond & op6=0x3e0 & thwbit=1 & thc0404=1 & Rn0003; op8=0xfc & immed8 { build ItCond; addr:4 = Rn0003 - immed8; HintPreloadDataForWrite(addr); } :pldw^ItCond Rn0003,Rm0003,"lsl #"^thc0405 is TMode=1 & ItCond & op6=0x3e0 & thwbit=1 & thc0404=1 & Rn0003; op8=0xf0 & thc0607=0 & thc0405 & Rm0003 { build ItCond; addr:4 = Rn0003 + (Rm0003 << thc0405); HintPreloadDataForWrite(addr); } # overlaps patterns with the other ldrh intructions when Rn==1111, therefore it must occur first :ldrh^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op4=0xf83 & sop0003=15; Rt1215) & PcrelOffset12 { build ItCond; local tmp = PcrelOffset12:2; Rt1215 = zext(tmp); } :ldrh^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op4=0xf8b & sop0003=15; Rt1215) & PcrelOffset12 { build ItCond; tmp:2 = PcrelOffset12:2; Rt1215 = zext(tmp); } :ldrh.w^ItCond Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf8B; Rt1215) & RnIndirect12 { build ItCond; build RnIndirect12; tmp:2 = *RnIndirect12; Rt1215 = zext(tmp); } :ldrh^ItCond^".w" Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf83; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) { build ItCond; build RnIndirectPUW; tmp:2 = *RnIndirectPUW; Rt1215 = zext(tmp); } :ldrh^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf83 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 { build ItCond; local tmp = Rn0003 + (Rm0003 << thc0405); val:2 = *tmp; Rt1215 = zext(val); } :ldrht^ItCond^".w" Rt1215,[Rn0003,Immed8] is TMode=1 & ItCond & op4=0xf83 & Rn0003; Rt1215 & thc0811=14 & Immed8 { build ItCond; local tmp = Rn0003 + Immed8; val:2 = *tmp; Rt1215 = zext(val); } # pli moevd above ldrsb to avoid conflict for ldrsb when Rt == 1111 :pli^ItCond Rn0003,"#"^offset12 is TMode=1 & ItCond & op4=0xf99 & Rn0003; op12=0xf & offset12 { build ItCond; addr:4 = Rn0003 + offset12; HintPreloadInstruction(addr); } :pli^ItCond Rn0003,"#-"^immed8 is TMode=1 & ItCond & op4=0xf91 & Rn0003; op8=0xfc & immed8 { build ItCond; addr:4 = Rn0003 - immed8; HintPreloadInstruction(addr); } :pli^ItCond PcrelOffset12 is TMode=1 & ItCond & (op8=0xf9 & thc0506=0 & thc0004=0x1f; thc1215=0xf) & PcrelOffset12 { build ItCond; HintPreloadInstruction(PcrelOffset12); } :pli^ItCond Rn0003,Rm0003"lsl #"^thc0405 is TMode=1 & ItCond & op4=0xf91 & Rn0003; op6=0x3c0 & thc0405 & Rm0003 { build ItCond; addr:4 = Rn0003 + (Rm0003 << thc0405); HintPreloadInstruction(addr); } # overlaps patterns with the other ldrsb intructions when Rn==1111, therefore it must occur first :ldrsb^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op8=0xf9 & thc0506=0 & thc0404=1 & sop0003=15; Rt1215) & PcrelOffset12 { build ItCond; tmp:1 = *PcrelOffset12; Rt1215 = sext(tmp); } :ldrsb^ItCond^".w" Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf99; Rt1215) & RnIndirect12 { build ItCond; tmp:1 = *RnIndirect12; Rt1215 = sext(tmp); } :ldrsb^ItCond^".w" Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf91; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) { build ItCond; build RnIndirectPUW; tmp:1 = *RnIndirectPUW; Rt1215 = sext(tmp); } :ldrsb^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf91 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 { build ItCond; local tmp = Rn0003 + (Rm0003 << thc0405); val:1 = *tmp; Rt1215 = sext(val); } :ldrsbt^ItCond^".w" Rt1215,[Rn0003,Immed8] is TMode=1 & ItCond & op4=0xf91 & Rn0003; Rt1215 & thc0811=14 & Immed8 { build ItCond; local tmp = Rn0003 + Immed8; val:1 = *tmp; Rt1215 = sext(val); } # overlaps patterns with the other ldr intructions when Rn==1111, therefore it must occur first :ldrsh^ItCond^".w" Rt1215,PcrelOffset12 is TMode=1 & ItCond & (op8=0xf9 & thc0506=1 & thc0404=1 & sop0003=15; Rt1215) & PcrelOffset12 { build ItCond; build PcrelOffset12; tmp:2 = *PcrelOffset12; Rt1215 = sext(tmp); } :ldrsh^ItCond^".w" Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf9B; Rt1215) & RnIndirect12 { build ItCond; tmp:2 = *RnIndirect12; Rt1215 = sext(tmp); } :ldrsh^ItCond^".w" Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf93; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) { build ItCond; build RnIndirectPUW; tmp:2 = *RnIndirectPUW; Rt1215 = sext(tmp); } :ldrsh^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf93 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 { build ItCond; local tmp = Rn0003 + (Rm0003 << thc0405); val:2 = *tmp; Rt1215 = sext(val); } :ldrsht^ItCond^".w" Rt1215,[Rn0003,Immed8] is TMode=1 & ItCond & op4=0xf93 & Rn0003; Rt1215 & thc0811=14 & Immed8 { build ItCond; local tmp = Rn0003 + Immed8; val:2 = *tmp; Rt1215 = sext(val); } :ldrt^ItCond^".w" Rt1215,[Rn0003,Immed8] is TMode=1 & ItCond & op4=0xf85 & Rn0003; Rt1215 & thc0811=14 & Immed8 { build ItCond; local tmp = Rn0003 + Immed8; Rt1215 = *tmp; } @endif # VERSION_6T2 || VERSION_7 macro th_set_carry_for_lsl(op1,shift_count) { local bit = (op1 << (shift_count-1)) & 0x80000000; tmpCY = ((shift_count == 0) && CY) || ((shift_count != 0) && (bit != 0)); } :lsl^CheckInIT_CZN^ItCond Rd0002,Rm0305,Immed5 is TMode=1 & ItCond & op11=0x0 & Immed5 & Rm0305 & Rd0002 & CheckInIT_CZN { build ItCond; th_set_carry_for_lsl(Rm0305,Immed5); Rd0002 = Rm0305 << Immed5; resflags(Rd0002); build CheckInIT_CZN; } :lsl^CheckInIT_CZN^ItCond Rd0002,Rs0305 is TMode=1 & ItCond & op6=0x102 & Rs0305 & Rd0002 & CheckInIT_CZN { build ItCond; local shift_count = Rs0305 & 0xff; th_set_carry_for_lsl(Rd0002,shift_count); Rd0002 = Rd0002 << shift_count; resflags(Rd0002); build CheckInIT_CZN; } macro th_set_carry_for_lsr(op1,shift_count) { local bit = (op1 >> (shift_count-1)) & 1; tmpCY = ((shift_count == 0) && CY) || ((shift_count != 0) && (bit != 0)); } #note that this is a special case where immed5 = 0, which corresponds to a shift amount of 32 :lsr^CheckInIT_CZN^ItCond Rd0002,Rm0305,"#0x20" is TMode=1 & ItCond & op11=1 & Immed5 & Rm0305 & Rd0002 & immed5=0 & CheckInIT_CZN { build ItCond; th_set_carry_for_lsr(Rm0305,32:1); Rd0002 = Rm0305 >> 32; resflags(Rd0002); build CheckInIT_CZN; } :lsr^CheckInIT_CZN^ItCond Rd0002,Rm0305,Immed5 is TMode=1 & ItCond & op11=1 & Immed5 & Rm0305 & Rd0002 & CheckInIT_CZN { build ItCond; local shift_amount = Immed5; th_set_carry_for_lsr(Rm0305,shift_amount); Rd0002 = Rm0305 >> Immed5; resflags(Rd0002); build CheckInIT_CZN; } :lsr^CheckInIT_CZN^ItCond Rd0002,Rs0305 is TMode=1 & ItCond & op6=0x103 & Rd0002 & Rs0305 & CheckInIT_CZN { build ItCond; local shift_amount = (Rs0305 & 0xff); th_set_carry_for_lsr(Rd0002,shift_amount); Rd0002 = Rd0002 >> (Rs0305 & 0xff); resflags(Rd0002); build CheckInIT_CZN; } @if defined(VERSION_6T2) || defined(VERSION_7) :lsl^thSBIT_CZN^ItCond^".w" Rd0811,Rm0003,thLsbImm is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_CZN & sop0003=15; thc1515=0 & Rd0811 & thc0405=0 & Rm0003 & thLsbImm { build ItCond; th_set_carry_for_lsl(Rm0003,thLsbImm); Rd0811 = Rm0003 << thLsbImm; resflags(Rd0811); build thSBIT_CZN; } :lsl^thSBIT_CZN^ItCond^".w" Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op11=0x1f & thc0910=1 & sop0508=0 & thSBIT_CZN & Rn0003; op12=15 & Rd0811 & sop0407=0 & Rm0003 { build ItCond; local shift_amount = (Rm0003 & 0xff); th_set_carry_for_lsl(Rn0003,shift_amount); Rd0811 = Rn0003 << (shift_amount); resflags(Rd0811); build thSBIT_CZN; } :lsr^thSBIT_CZN^ItCond^".w" Rd0811,Rm0003,thLsbImm is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_CZN & sop0003=15; thc1515=0 & Rd0811 & thc0405=1 & Rm0003 & thLsbImm { build ItCond; th_set_carry_for_lsr(Rm0003,thLsbImm); Rd0811 = Rm0003 >> thLsbImm; resflags(Rd0811); build thSBIT_CZN; } :lsr^thSBIT_CZN^ItCond^".w" Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op11=0x1f & thc0910=1 & sop0508=1 & thSBIT_CZN & Rn0003; op12=15 & Rd0811 & sop0407=0 & Rm0003 { build ItCond; local shift_amount = Rm0003 & 0xff; th_set_carry_for_lsr(Rn0003,shift_amount); Rd0811 = Rn0003 >> shift_amount; resflags(Rd0811); build thSBIT_CZN; } @endif # VERSION_6T2 || VERSION_7 @ifndef CDE :mcr^ItCond thcpn,thc0507,Rt1215,thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xee & thc0507 & thc0404=0 & thCRn; Rt1215 & thcpn & thopcode2 & thc0404=1 & thCRm { build ItCond; t_cpn:4 = thcpn; t_op1:4 = thc0507; t_op2:4 = thopcode2; coprocessor_moveto(t_cpn,t_op1,t_op2,Rt1215,thCRn,thCRm); } :mcr2^ItCond thcpn,thc0507,Rt1215,thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xfe & thc0507 & thc0404=0 & thCRn; Rt1215 & thcpn & thopcode2 & thc0404=1 & thCRm { build ItCond; t_cpn:4 = thcpn; t_op1:4 = thc0507; t_op2:4 = thopcode2; coprocessor_moveto(t_cpn,t_op1,t_op2,Rt1215,thCRn,thCRm); } :mcrr^ItCond thcpn,thopcode1,Rt1215,Rn0003,thCRm is TMode=1 & ItCond & op4=0xec4 & Rn0003; Rt1215 & thcpn & thopcode1 & thCRm { build ItCond; t_cpn:4 = thcpn; t_op:4 = thopcode1; coprocessor_moveto2(t_cpn,t_op,Rt1215,Rn0003,thCRm); } :mcrr^ItCond thcpn,thopcode1,Rt1215,Rn0003,thCRm is TMode=1 & ItCond & op4=0xfc4 & Rn0003; Rt1215 & thcpn & thopcode1 & thCRm { build ItCond; t_cpn:4 = thcpn; t_op:4 = thopcode1; coprocessor_moveto2(t_cpn,t_op,Rt1215,Rn0003,thCRm); } @endif # CDE :mov^CheckInIT_ZN^ItCond Rd0810,Immed8 is TMode=1 & ItCond & op11=4 & Rd0810 & Immed8 & CheckInIT_ZN { build ItCond; Rd0810 = Immed8; resflags(Rd0810); build CheckInIT_ZN; } :mov^CheckInIT_ZN^ItCond Rd0002,Rn0305 is TMode=1 & ItCond & op6=0x000 & Rn0305 & Rd0002 & CheckInIT_ZN { build ItCond; Rd0002 = Rn0305; resflags(Rd0002); build CheckInIT_ZN; } :mov^ItCond Hrd0002,Hrm0305 is TMode=1 & ItCond & op8=0x46 & Hrm0305 & Hrd0002 { build ItCond; Hrd0002 = Hrm0305; } :mov^ItCond Hrd0002,Hrm0305 is TMode=1 & ItCond & op8=0x46 & Hrm0305 & Hrd0002 & hrd0002=7 & h1=1 { build ItCond; dest:4 = Hrm0305; BranchWritePC(dest); goto [pc]; } :mov^ItCond Hrd0002,Hrm0305 is TMode=1 & ItCond & op8=0x46 & Hrm0305 & rm0306=14 & Hrd0002 & hrd0002=7 & h1=1 { build ItCond; dest:4 = Hrm0305; BranchWritePC(dest); return [pc]; } :mov^ItCond Hrd0002,Hrm0305 is TMode=1 & ItCond & op8=0x46 & Hrm0305 & hrm0305=7 & Hrd0002 & hrd0002=6 & h1=1 [ LRset=1; TMode=1; globalset(inst_next,LRset); globalset(inst_next,TMode); ] { build ItCond; Hrd0002 = Hrm0305; } @if defined(VERSION_6T2) || defined(VERSION_7) :mov^thSBIT_ZN^ItCond^".w" Rd0811,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=2 & thSBIT_ZN & sop0003=15; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; Rd0811 = ThumbExpandImm12; resflags(Rd0811); build thSBIT_ZN; } :movw^ItCond Rd0811,Immed16 is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=2 & thc0404=0; thc1515=0 & Rd0811) & Immed16 { build ItCond; Rd0811 = zext(Immed16); resflags(Rd0811); } :mov^thSBIT_ZN^ItCond^".w" Rd0811,Rm0003 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_ZN & sop0003=15; op12=0 & Rd0811 & thc0407=0 & Rm0003 { build ItCond; Rd0811 = Rm0003; resflags(Rd0811); build thSBIT_ZN; } :movt^ItCond Rd0811,Immed16 is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=6 & thc0404=0; thc1515=0 & Rd0811) & Immed16 { build ItCond; Rd0811 = (zext(Immed16) << 16) | (Rd0811 & 0xffff); } @ifndef CDE :mrc^ItCond thcpn,thc0507,Rt1215,thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xee & thc0507 & thc0404=1 & thCRn; Rt1215 & thcpn & thopcode2 & thc0404=1 & thCRm { build ItCond; t_cpn:4 = thcpn; t_op1:4 = thc0507; t_op2:4 = thopcode2; Rt1215 = coprocessor_movefromRt(t_cpn,t_op1,t_op2,thCRn,thCRm); } :mrc^ItCond thcpn,thc0507,"APSR_nzcv",thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xee & thc0507 & thc0404=1 & thCRn; Rt1215=15 & thcpn & thopcode2 & thc0404=1 & thCRm { build ItCond; t_cpn:4 = thcpn; t_op1:4 = thc0507; t_op2:4 = thopcode2; local tmp:4 = coprocessor_movefromRt(t_cpn,t_op1,t_op2,thCRn,thCRm); writeAPSR_nzcv(tmp); } :mrc2^ItCond thcpn,thc0507,Rt1215,thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xfe & thc0507 & thc0404=1 & thCRn; Rt1215 & thcpn & thopcode2 & thc0404=1 & thCRm { build ItCond; t_cpn:4 = thcpn; t_op1:4 = thc0507; t_op2:4 = thopcode2; Rt1215 = coprocessor_movefromRt(t_cpn,t_op1,t_op2,thCRn,thCRm); } :mrc2^ItCond thcpn,thc0507,"APSR_nzcv",thCRn,thCRm,thopcode2 is TMode=1 & ItCond & op8=0xfe & thc0507 & thc0404=1 & thCRn; Rt1215=15 & thcpn & thopcode2 & thc0404=1 & thCRm { build ItCond; t_cpn:4 = thcpn; t_op1:4 = thc0507; t_op2:4 = thopcode2; local tmp:4 = coprocessor_movefromRt(t_cpn,t_op1,t_op2,thCRn,thCRm); writeAPSR_nzcv(tmp); } :mrrc^ItCond thcpn,thopcode1,Rt1215,Rn0003,thCRm is TMode=1 & ItCond & op4=0xec5 & Rn0003; Rt1215 & thcpn & thopcode1 & thCRm { build ItCond; t_cpn:4 = thcpn; t_op:4 = thopcode1; Rt1215 = coprocessor_movefromRt(t_cpn,t_op,thCRm); Rn0003 = coprocessor_movefromRt2(t_cpn,t_op,thCRm); } :mrrc2^ItCond thcpn,thopcode1,Rt1215,Rn0003,thCRm is TMode=1 & ItCond & op4=0xfc5 & Rn0003; Rt1215 & thcpn & thopcode1 & thCRm { build ItCond; t_cpn:4 = thcpn; t_op:4 = thopcode1; Rt1215 = coprocessor_movefromRt(t_cpn,t_op,thCRm); Rn0003 = coprocessor_movefromRt2(t_cpn,t_op,thCRm); } @endif #CDE @if defined(VERSION_7M) define pcodeop getMainStackPointer; define pcodeop getProcessStackPointer; define pcodeop getBasePriority; define pcodeop getCurrentExceptionNumber; mrsipsr: "i" is thc0000=1 & Rd0811 { b:1 = isCurrentModePrivileged(); if (!b) goto ; ipsr:4 = getCurrentExceptionNumber(); Rd0811 = Rd0811 | (ipsr & 0x1f); } mrsipsr: is thc0000=0 { } mrsepsr: "e" is thc0101=1 { } mrsepsr: is thc0101=0 { } mrsapsr: is thc0202=1 { } mrsapsr: "a" is thc0202=0 & Rd0811 { readAPSR_nzcvq(Rd0811); } mrspsr: mrsipsr^mrsepsr^mrsapsr^"psr" is mrsipsr & mrsepsr & mrsapsr & Rd0811 { Rd0811 = 0; build mrsapsr; build mrsipsr; } mrspsr: "xpsr" is sysm02=3 & mrsipsr & mrsepsr & mrsapsr & Rd0811 { Rd0811 = 0; build mrsapsr; build mrsipsr; } :mrs^ItCond Rd0811,mrspsr is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm37=0 & mrspsr { build ItCond; build mrspsr; } msp: "msp" is epsilon {} :mrs^ItCond Rd0811,msp is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=8 & msp { build ItCond; Rd0811 = getMainStackPointer(); } psp: "psp" is epsilon {} :mrs^ItCond Rd0811,psp is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=9 & psp { build ItCond; Rd0811 = getProcessStackPointer(); } primask: "primask" is epsilon {} :mrs^ItCond Rd0811,primask is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=16 & primask { build ItCond; Rd0811 = 0; b:1 = isCurrentModePrivileged(); if (!b) goto inst_next; Rd0811 = isIRQinterruptsEnabled(); # should reflect primask register/bit } basepri: "basepri" is epsilon {} :mrs^ItCond Rd0811,basepri is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=17 & basepri { build ItCond; Rd0811 = 0; b:1 = isCurrentModePrivileged(); if (!b) goto inst_next; Rd0811 = getBasePriority(); } basepri_max: "basepri_max" is epsilon {} :mrs^ItCond Rd0811,basepri_max is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=18 & basepri_max { build ItCond; Rd0811 = 0; b:1 = isCurrentModePrivileged(); if (!b) goto inst_next; Rd0811 = getBasePriority(); } faultmask: "faultmask" is epsilon {} :mrs^ItCond Rd0811,faultmask is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=19 & faultmask { build ItCond; Rd0811 = 0; b:1 = isCurrentModePrivileged(); if (!b) goto inst_next; Rd0811 = isFIQinterruptsEnabled(); # should reflect faultmask register/bit } define pcodeop isThreadModePrivileged; define pcodeop isUsingMainStack; control: "control" is epsilon {} :mrs^ItCond Rd0811,control is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=20 & control { build ItCond; notPrivileged:1 = isThreadModePrivileged() != 1:1; altStackMode:1 = isUsingMainStack() != 1:1; Rd0811 = zext((altStackMode << 1) | notPrivileged); } @endif @if defined(CORTEX) define pcodeop setMainStackPointerLimit; :msr^ItCond msplim,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=10 & msplim { build ItCond; setMainStackPointerLimit(Rn0003); } define pcodeop setProcStackPointerLimit; :msr^ItCond psplim,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=11 & psplim { build ItCond; setProcStackPointerLimit(Rn0003); } define pcodeop getMainStackPointerLimit; :mrs^ItCond Rd0811,msplim is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=10 & msplim { build ItCond; Rd0811 = getMainStackPointerLimit(); } define pcodeop getProcessStackPointerLimit; :mrs^ItCond Rd0811,psplim is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=11 & psplim { build ItCond; Rd0811 = getProcessStackPointerLimit(); } @endif #CORTEX :mrs^ItCond Rd0811,cpsr is TMode=1 & ItCond & op0=0xf3ef; op12=0x8 & Rd0811 & sysm=0 & cpsr { build ItCond; tmp:4 = 0; readAPSR_nzcvq(tmp); Rd0811 = tmp; } :mrs^ItCond Rd0811,spsr is TMode=1 & ItCond & op0=0xf3ff; op12=0x8 & Rd0811 & sysm=0 & spsr { build ItCond; Rd0811 = spsr; } @if defined(VERSION_7M) msripsr: "i" is thc0000=1 { } msripsr: is thc0000=0 { } msrepsr: "e" is thc0101=1 { } msrepsr: is thc0101=0 { } msrapsr: is thc0202=1 { } msrapsr: "a" is thc0202=0 & Rn0003 { cpsr = cpsr | (Rn0003 & 0xf8000000); writeAPSR_nzcvq(cpsr); } msrpsr: msripsr^msrepsr^msrapsr^"psr" is msripsr & msrepsr & msrapsr { build msrapsr; } msrpsr: "xpsr" is sysm02=3 & msrapsr { build msrapsr; } :msr^ItCond msrpsr,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm37=0 & msrpsr { build ItCond; build msrpsr; } define pcodeop setMainStackPointer; define pcodeop setProcessStackPointer; define pcodeop setBasePriority; :msr^ItCond msp,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=8 & msp { build ItCond; b:1 = isCurrentModePrivileged(); if (!b) goto inst_next; setMainStackPointer(Rn0003); } :msr^ItCond psp,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=9 & psp { build ItCond; b:1 = isCurrentModePrivileged(); if (!b) goto inst_next; setProcessStackPointer(Rn0003); } :msr^ItCond primask,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=16 & primask { build ItCond; b:1 = isCurrentModePrivileged(); if (!b) goto inst_next; enableIRQinterrupts((Rn0003 & 1) == 1); # should set/clear primask register/bit } :msr^ItCond basepri,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=17 & basepri { build ItCond; b:1 = isCurrentModePrivileged(); if (!b) goto inst_next; setBasePriority(Rn0003); } :msr^ItCond basepri_max,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=18 & basepri_max { build ItCond; b:1 = isCurrentModePrivileged(); if (!b) goto inst_next; if (Rn0003 == 0) goto inst_next; # TODO: does the following compare need to be signed?? cur:4 = getBasePriority(); if (cur != 0 && Rn0003 >= cur) goto inst_next; setBasePriority(Rn0003); } :msr^ItCond faultmask,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=19 & faultmask { build ItCond; b:1 = isCurrentModePrivileged(); if (!b) goto inst_next; enableFIQinterrupts((Rn0003 & 1) == 1); } define pcodeop setStackMode; :msr^ItCond control,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & th_psrmask=8 & sysm=20 & control { build ItCond; b:1 = isCurrentModePrivileged(); if (!b) goto inst_next; privileged:1 = (Rn0003 & 1) == 0; setThreadModePrivileged(privileged); # TODO: not sure about the following semantics b = isThreadMode(); if (!b) goto inst_next; stackMode:1 = isUsingMainStack() == 1:1; setStackMode(stackMode); # TODO: should we set sp ? } @endif thpsrmask: is th_psrmask=0 { export 0:4; } thpsrmask: "_c" is th_psrmask=1 { export 0xff:4; } thpsrmask: "_x" is th_psrmask=2 { export 0xff00:4; } thpsrmask: "_cx" is th_psrmask=3 { export 0xffff:4; } thpsrmask: "_s" is th_psrmask=4 { export 0xff0000:4; } thpsrmask: "_cs" is th_psrmask=5 { export 0xff00ff:4; } thpsrmask: "_xs" is th_psrmask=6 { export 0xffff00:4; } thpsrmask: "_cxs" is th_psrmask=7 { export 0xffffff:4; } thpsrmask: "_f" is th_psrmask=8 { export 0xff000000:4; } thpsrmask: "_cf" is th_psrmask=9 { export 0xff0000ff:4; } thpsrmask: "_xf" is th_psrmask=10 { export 0xff00ff00:4; } thpsrmask: "_cxf" is th_psrmask=11 { export 0xff00ffff:4; } thpsrmask: "_sf" is th_psrmask=12 { export 0xffff0000:4; } thpsrmask: "_csf" is th_psrmask=13 { export 0xffff00ff:4; } thpsrmask: "_xsf" is th_psrmask=14 { export 0xffffff00:4; } thpsrmask: "_cxsf" is th_psrmask=15 { export 0xffffffff:4; } thcpsrmask: cpsr^thpsrmask is thpsrmask & cpsr { export thpsrmask; } :msr^ItCond thcpsrmask,Rn0003 is TMode=1 & ItCond & op4=0xf38 & Rn0003; op12=0x8 & thcpsrmask & thc0007=0 { build ItCond; build thcpsrmask; cpsr = (cpsr& ~thcpsrmask) | (Rn0003 & thcpsrmask); writeAPSR_nzcvq(cpsr); } thspsrmask: spsr^thpsrmask is thpsrmask & spsr { export thpsrmask; } :msr^ItCond thspsrmask,Rn0003 is TMode=1 & ItCond & op4=0xf39 & Rn0003; op12=0x8 & thspsrmask & thc0007=0 { build ItCond; build thspsrmask; spsr = (spsr& ~thspsrmask) | (Rn0003 & thspsrmask); } :mvn^thSBIT_ZN^ItCond Rd0811,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=3 & thSBIT_ZN & thc0003=15; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; Rd0811 = ~ThumbExpandImm12; resflags(Rd0811); build thSBIT_ZN; } :mvn^thSBIT_ZN^ItCond^".w" Rd0811,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=3 & thSBIT_ZN & thc0003=15; thc1515=0 & Rd0811 & thshift2 { build ItCond; Rd0811 = ~thshift2; resflags(Rd0811); build thSBIT_ZN; } @endif # VERSION_6T2 || VERSION_7 :mul^CheckInIT_ZN^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x10d & Rm0305 & Rd0002 & CheckInIT_ZN { build ItCond; Rd0002 = Rm0305 * Rd0002; resflags(Rd0002); build CheckInIT_ZN; } @if defined(VERSION_6T2) || defined(VERSION_7) :mla^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb0 & Rn0003; Ra1215 & Rd0811 & sop0407=0 & Rm0003 { build ItCond; Rd0811 = Rn0003 * Rm0003 + Ra1215; } :mls^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb0 & Rn0003; Ra1215 & Rd0811 & sop0407=1 & Rm0003 { build ItCond; Rd0811 = Ra1215- Rn0003 * Rm0003; } :mul^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb0 & Rn0003; op12=15 & Rd0811 & sop0407=0 & Rm0003 { build ItCond; Rd0811 = Rn0003 * Rm0003; } @endif # VERSION_6T2 || VERSION_7 :mvn^CheckInIT_ZN^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x10f & Rm0305 & Rd0002 & CheckInIT_ZN { build ItCond; Rd0002 = ~Rm0305; resflags(Rd0002); build CheckInIT_ZN; } :nop^ItCond is TMode=1 & ItCond & op0=0xbf00 { } :nop^ItCond^".w" is TMode=1 & ItCond & op0=0xf3af; op0=0x8000 { } :nop is op0=0x46c0 # This is just like a mov r0 r0 { } :orr^CheckInIT_ZN^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x10c & Rm0305 & Rd0002 & CheckInIT_ZN { build ItCond; Rd0002 = Rd0002 | Rm0305; resflags(Rd0002); build CheckInIT_ZN; } @if defined(VERSION_6T2) || defined(VERSION_7) :orn^thSBIT_CZNO^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=3 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; Rd0811 = Rn0003 | ~(ThumbExpandImm12); th_logicflags(); resflags(Rd0811); build thSBIT_CZNO; } :orn^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=3 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 { build ItCond; Rd0811 = Rn0003 | ~(thshift2); th_logicflags(); resflags(Rd0811); build thSBIT_CZNO; } :orr^thSBIT_CZNO^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=2 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; Rd0811 = Rn0003 | ThumbExpandImm12; th_logicflags(); resflags(Rd0811); build thSBIT_CZNO; } :orr^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 { build ItCond; Rd0811 = Rn0003 | thshift2; th_logicflags(); resflags(Rd0811); build thSBIT_CZNO; } :pkhbt^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op4=0xeac & Rn0003; thc1515=0 & Rd0811 & thc0505=0 & thc0404=0 & thshift2 { build ItCond; Rd0811 = (Rn0003 & 0x0000ffff) | (thshift2 & 0xffff0000); th_logicflags(); resflags(Rd0811); } :pkhtb^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op4=0xeac & Rn0003; thc1515=0 & Rd0811 & thc0505=1 & thc0404=0 & thshift2 { build ItCond; Rd0811 = (Rn0003 & 0xffff0000) | (thshift2 & 0x0000ffff); th_logicflags(); resflags(Rd0811); } :pld^ItCond Rn0003,"#"^offset12 is TMode=1 & ItCond & op6=0x3e2 & thwbit=0 & thc0404=1 & Rn0003; op12=0xf & offset12 { build ItCond; addr:4 = Rn0003 + offset12; HintPreloadData(addr); } :pld^ItCond Rn0003,"#-"^immed8 is TMode=1 & ItCond & op6=0x3e0 & thwbit=0 & thc0404=1 & Rn0003; op8=0xfc & immed8 { build ItCond; addr:4 = Rn0003 - immed8; HintPreloadData(addr); } :pld^ItCond PcrelOffset12 is TMode=1 & ItCond & (op8=0xf8 & thc0506=0 & thc0004=0x1f; thc1215=0xf) & PcrelOffset12 { build ItCond; HintPreloadData(PcrelOffset12); } :pld^ItCond Rn0003,Rm0003"lsl #"^thc0405 is TMode=1 & ItCond & op6=0x3e0 & thwbit=0 & thc0404=1 & Rn0003; op8=0xf0 & thc0607=0 & thc0405 & Rm0003 { build ItCond; addr:4 = Rn0003 + (Rm0003 << thc0405); HintPreloadData(addr); } # pld.w moved above ldrh to avoid conflicts #pli moved above ldrsb @endif # VERSION_6T2 || VERSION_7 # # Removed the masking of the stack pointer on push and pop to ignore the lower 2 bits. # This isn't really needed for modeling. # NOTE: It may need to be put back in to model correctly for nasty stack shenanigans. # :pop^ItCond ldbrace is TMode=1 & ItCond & op9=0x5e & R=0 & ldbrace { build ItCond; # mult_addr = sp & 0xfffffffc; mult_addr = sp; build ldbrace; sp = mult_addr; } :pop^ItCond pclbrace is TMode=1 & ItCond & op9=0x5e & R=1 & pclbrace { build ItCond; # mult_addr = sp & 0xfffffffc; mult_addr = sp; build pclbrace; sp = mult_addr; LoadWritePC(pc); return [pc]; } :pop^ItCond thldrlist_inc is TMode=1 & ItCond & op0=0xe8bd; thldrlist_inc { build ItCond; # mult_addr = sp & 0xfffffffc; mult_addr = sp; build thldrlist_inc; sp = mult_addr; } :pop^ItCond thldrlist_inc is TMode=1 & ItCond & op0=0xe8bd; thldrlist_inc & thc1515=1 { build ItCond; # mult_addr = sp & 0xfffffffc; mult_addr = sp; build thldrlist_inc; sp = mult_addr; LoadWritePC(pc); return [pc]; } @if defined(VERSION_6T2) || defined(VERSION_7) :pop^ItCond^".w" thldrlist_inc is TMode=1 & ItCond & op0=0xe8bd; thc1515=0 & thc1313=0 & thldrlist_inc { build ItCond; mult_addr = sp; build thldrlist_inc; sp = mult_addr; } :pop^ItCond^".w" Rt1215 is TMode=1 & ItCond & op0=0xf85d; Rt1215 & offset12=0xb04 { build ItCond; Rt1215 = *sp; sp=sp+4; } :pop^ItCond^".w" thldrlist_inc is TMode=1 & ItCond & op0=0xe8bd; thc1515=1 & thc1313=0 & thldrlist_inc { build ItCond; mult_addr = sp; build thldrlist_inc; sp = mult_addr; LoadWritePC(pc); return [pc]; } :pop^ItCond^".w" Rt1215 is TMode=1 & ItCond & op0=0xf85d; Rt1215 & op12=15 & offset12=0xb04 { build ItCond; dest:4 = *sp; sp=sp+4; LoadWritePC(dest); return [pc]; } :push^ItCond^".w" thstrlist_dec is TMode=1 & ItCond & op0=0xe8ad; thc1515=0 & thc1313=0 & thstrlist_dec { build ItCond; mult_addr = sp-4; build thstrlist_dec; sp = mult_addr + 4; } :push^ItCond^".w" Rt1215 is TMode=1 & ItCond & op0=0xf84d; Rt1215 & offset12=0xd04 { build ItCond; sp=sp-4; *sp = Rt1215; } @endif # VERSION_6T2 || VERSION_7 :push^ItCond psbrace is TMode=1 & ItCond & op9=0x5a & R=0 & psbrace { build ItCond; # mult_addr = sp & 0xfffffffc; mult_addr = sp; build psbrace; sp = mult_addr; } :push^ItCond pcpbrace is TMode=1 & ItCond & op9=0x5a & R=1 & pcpbrace { build ItCond; # mult_addr = sp & 0xfffffffc; mult_addr = sp; build pcpbrace; sp = mult_addr; } :push^ItCond thstrlist_dec is TMode=1 & ItCond & op0=0xe92d; thstrlist_dec { build ItCond; # mult_addr = sp & 0xfffffffc; mult_addr = sp-4; build thstrlist_dec; sp = mult_addr+4; } @if defined(VERSION_5E) :qadd^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x8 & Rm0003 { build ItCond; local sum1 = Rm0003 + Rn0003; sum1 = SignedSaturate(sum1,32:2); Q = SignedDoesSaturate(sum1,32:2); Rd0811 = sum1; } @endif # VERSION_5E @if defined(VERSION_6) :qadd16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rn0003; op12=0xf & Rd0811 & thc0407=0x1 & Rm0003 { build ItCond; local lRn = Rn0003 & 0xffff; local lRm = Rm0003 & 0xffff; local uRn = (Rn0003) & 0xffff; local uRm = (Rm0003 >> 16) & 0xffff; sum1:2 = lRn:2 + lRm:2; sum1 = SignedSaturate(sum1,16:2); sum2:2 = uRn:2 + uRm:2; sum2 = SignedSaturate(sum2,16:2); Rd0811 = (zext(sum2) << 16) | zext(sum1); } :qadd8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x1 & Rm0003 { build ItCond; local rn1 = Rn0003 & 0xff; local rm1 = Rm0003 & 0xff; local rn2 = (Rn0003 >> 8) & 0xff; local rm2 = (Rm0003 >> 8) & 0xff; local rn3 = (Rn0003 >> 16) & 0xff; local rm3 = (Rm0003 >> 16) & 0xff; local rn4 = (Rn0003 >> 24) & 0xff; local rm4 = (Rm0003 >> 24) & 0xff; sum1:1 = rn1:1 + rm1:1; sum1 = SignedSaturate(sum1,8:2); sum2:1 = rn2:1 + rm2:1; sum2 = SignedSaturate(sum2,8:2); sum3:1 = rn3:1 + rm3:1; sum3 = SignedSaturate(sum3,8:2); sum4:1 = rn4:1 + rm4:1; sum4 = SignedSaturate(sum4,8:2); Rd0811 = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); } # qaddsubx :qasx^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x1 & Rm0003 { build ItCond; local lRn = Rn0003 & 0xffff; local lRm = Rm0003 & 0xffff; local uRn = (Rn0003 >> 16) & 0xffff; local uRm = (Rm0003 >> 16) & 0xffff; sum1:2 = lRn:2 - lRm:2; sum1 = SignedSaturate(sum1,16:2); sum2:2 = uRn:2 + uRm:2; sum2 = SignedSaturate(sum2,16:2); Rd0811 = (zext(sum2) << 16) | zext(sum1); } @endif # VERSION_6 @if defined(VERSION_5E) :qdadd^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x9 & Rm0003 { build ItCond; tmp:4 = Rn0003 * 2; tmp = SignedSaturate(tmp,32:2); Q = SignedDoesSaturate(tmp,32:2); tmp = tmp + Rm0003; tmp = SignedSaturate(tmp,32:2); Q = Q | SignedDoesSaturate(tmp,32:2); Rd0811 = tmp; } :qdsub^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0xb & Rm0003 { build ItCond; tmp:4 = Rn0003 * 2; tmp = SignedSaturate(tmp); Q = SignedDoesSaturate(tmp,32:2); tmp = Rm0003 - tmp; tmp = SignedSaturate(tmp,32:2); Q = Q | SignedDoesSaturate(tmp,32:2); Rd0811 = tmp; } @endif # VERSION_5E @if defined(VERSION_6) # qsubaddx :qsax^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfae & Rn0003; op12=0xf & Rd0811 & thc0407=0x1 & Rm0003 { build ItCond; local lRn = Rn0003 & 0xffff; local lRm = Rm0003 & 0xffff; local uRn = (Rn0003 >> 16) & 0xffff; local uRm = (Rm0003 >> 16) & 0xffff; sum1:2 = lRn:2 + lRm:2; sum1 = SignedSaturate(sum1,16:2); sum2:2 = uRn:2 - uRm:2; sum2 = SignedSaturate(sum2,16:2); Rd0811 = (zext(sum2) << 16) | zext(sum1); } @endif # VERSION_6 @if defined(VERSION_5E) :qsub^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0xa & Rm0003 { build ItCond; tmp:4 = Rm0003 - Rn0003; tmp = SignedSaturate(tmp,32:2); Q = SignedDoesSaturate(tmp,32:2); Rd0811 = tmp; } @endif # VERSION_5E @if defined(VERSION_6) :qsub16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfad & Rn0003; op12=0xf & Rd0811 & thc0407=0x1 & Rm0003 { build ItCond; local lRn = Rn0003 & 0xffff; local lRm = Rm0003 & 0xffff; local uRn = (Rn0003 >> 16) & 0xffff; local uRm = (Rm0003 >> 16) & 0xffff; sum1:2 = lRn:2 - lRm:2; sum1 = SignedSaturate(sum1,16:2); sum2:2 = uRn:2 - uRm:2; sum2 = SignedSaturate(sum2,16:2); Rd0811 = (zext(sum2) << 16) | zext(sum1); } :qsub8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfac & Rn0003; op12=0xf & Rd0811 & thc0407=0x1 & Rm0003 { build ItCond; local rn1 = Rn0003 & 0xff; local rm1 = Rm0003 & 0xff; local rn2 = (Rn0003 >> 8) & 0xff; local rm2 = (Rm0003 >> 8) & 0xff; local rn3 = (Rn0003 >> 16) & 0xff; local rm3 = (Rm0003 >> 16) & 0xff; local rn4 = (Rn0003 >> 24) & 0xff; local rm4 = (Rm0003 >> 24) & 0xff; sum1:1 = rn1:1 - rm1:1; sum1 = SignedSaturate(sum1,8:2); sum2:1 = rn2:1 - rm2:1; sum2 = SignedSaturate(sum2,8:2); sum3:1 = rn3:1 - rm3:1; sum3 = SignedSaturate(sum3,8:2); sum4:1 = rn4:1 - rm4:1; sum4 = SignedSaturate(sum4,8:2); Rd0811 = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); } @endif # VERSION_6 @if defined(THUMB_2) # WARNING Rm0003 on the first 2 bytes must be the same value as Rm0003 on the last bytes! # but there is no easy way to check this now... :rev^ItCond Rd0811,Rm0003 is TMode=1 & ItCond & op4=0xfa9; op6=0x2e8 & Rd0811 & Rm0003 { build ItCond; local tmp1 = Rm0003 & 0xff; local tmp2 = (Rm0003 >> 8) & 0xff; local tmp3 = (Rm0003 >> 16) & 0xff; local tmp4 = (Rm0003 >> 24) & 0xff; Rd0811 = (tmp1 << 24) | (tmp2 << 16) | (tmp3 << 8) | tmp4; } @endif # THUMB_2 :rsb^CheckInIT_CZNO^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x109 & Rm0305 & Rd0002 & CheckInIT_CZNO { build ItCond; th_subflags0(Rm0305); Rd0002 = 0-Rm0305; resflags(Rd0002); build CheckInIT_CZNO; } @if defined(VERSION_6) :rev^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x2e8 & Rd0002 & Rm0305 { build ItCond; local tmp1 = Rm0305 & 0xff; local tmp2 = (Rm0305 >> 8) & 0xff; local tmp3 = (Rm0305 >> 16) & 0xff; local tmp4 = (Rm0305 >> 24) & 0xff; Rd0002 = (tmp1 << 24) | (tmp2 << 16) | (tmp3 << 8) | tmp4; } :rev16^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x2e9 & Rd0002 & Rm0305 { build ItCond; local tmp1 = Rm0305 & 0xff; local tmp2 = (Rm0305 >> 8) & 0xff; local tmp3 = (Rm0305 >> 16) & 0xff; local tmp4 = (Rm0305 >> 24) & 0xff; Rd0002 = (tmp3 << 24) | (tmp4 << 16) | (tmp1 << 8) | tmp2; } :revsh^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x2eb & Rd0002 & Rm0305 { build ItCond; local tmp1 = Rm0305 & 0xff; local tmp2 = (Rm0305 >> 8) & 0xff; local result = (tmp1 << 8) | tmp2; Rd0002 = sext(result:2); } @if defined(VERSION_6T2) || defined(VERSION_7) macro BitReverse(val) { tval:1 = val; result:1 = 0; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; val = result; } :rbit^ItCond Rd0811, Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rm0003; op12=0xf & Rd0811 & thc0407=0xa & Rn0003 { build ItCond; t:4 = Rm0003 & 0xff; b1:1 = t:1; t = (Rm0003 >> 8) & 0xff; b2:1 = t:1; t = (Rm0003 >> 16) & 0xff; b3:1 = t:1; t = (Rm0003 >> 24) & 0xff; b4:1 = t:1; BitReverse(b1); BitReverse(b2); BitReverse(b3); BitReverse(b4); Rd0811 = (zext(b1) << 24) | (zext(b2) << 16) | (zext(b3) << 8) | zext(b4); } :rev^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rm0003; op12=0xf & Rd0811 & thc0407=8 & Rn0003 { build ItCond; local tmp1 = Rm0003 & 0xff; local tmp2 = (Rm0003 >> 8) & 0xff; local tmp3 = (Rm0003 >> 16) & 0xff; local tmp4 = (Rm0003 >> 24) & 0xff; Rd0811 = (tmp1 << 24) | (tmp2 << 16) | (tmp3 << 8) | tmp4; } :rev16^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rm0003; op12=0xf & Rd0811 & thc0407=9 & Rn0003 { build ItCond; local tmp1 = Rm0003 & 0xff; local tmp2 = (Rm0003 >> 8) & 0xff; local tmp3 = (Rm0003 >> 16) & 0xff; local tmp4 = (Rm0003 >> 24) & 0xff; Rd0811 = (tmp3 << 24) | (tmp4 << 16) | (tmp1 << 8) | tmp2; } :revsh^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rm0003; op12=0xf & Rd0811 & thc0407=0xb & Rn0003 { build ItCond; local tmp1 = Rm0003 & 0xff; local tmp2 = (Rm0003 >> 8) & 0xff; local result = (tmp1 << 8) | tmp2; Rd0811 = sext(result:2); } # RFE instructions for Thumb-2 "Encoding T1" and "Encoding T2" on page 1574 # :rfedb part2Rd0003 is TMode=1 & part2c0615=0x3a0 & part2c0505=0x0 & part2c0404=0x1 & part2Rd0003 ; op0=0xc000 { # register list is always: pc, cpsr ptr:4 = part2Rd0003 - 4; cpsr = *ptr; ptr = ptr - 4; dest:4 = *ptr; BranchWritePC(dest); return [pc]; } :rfedb part2Rd0003^"!" is TMode=1 & part2c0615=0x3a0 & part2c0505=0x1 & part2c0404=0x1 & part2Rd0003 ; op0=0xc000 { # register list is always: pc, cpsr ptr:4 = part2Rd0003 - 4; cpsr = *ptr; ptr = ptr - 4; dest:4 = *ptr; part2Rd0003 = ptr; BranchWritePC(dest); return [pc]; } :rfeia part2Rd0003 is TMode=1 & part2c0615=0x3a6 & part2c0505=0x0 & part2c0404=0x1 & part2Rd0003 ; op0=0xc000 { # register list is always: pc, cpsr ptr:4 = part2Rd0003; cpsr = *ptr; ptr = ptr + 4; dest:4 = *ptr; BranchWritePC(dest); return [pc]; } :rfeia part2Rd0003^"!" is TMode=1 & part2c0615=0x3a6 & part2c0505=0x1 & part2c0404=0x1 & part2Rd0003 ; op0=0xc000 { # register list is always: pc, cpsr ptr:4 = part2Rd0003; cpsr = *ptr; ptr = ptr + 4; dest:4 = *ptr; part2Rd0003 = ptr + 4; BranchWritePC(dest); return [pc]; } @endif # defined(VERSION_6T2) || defined(VERSION_7) :rsb^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=14 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; th_subflags(ThumbExpandImm12,Rn0003); Rd0811 = ThumbExpandImm12 - Rn0003; resflags(Rd0811); build thSBIT_CZNO; } :rsb^thSBIT_CZNO^ItCond Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=14 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 { build ItCond; th_subflags(thshift2,Rn0003); Rd0811 = thshift2 - Rn0003; resflags(Rd0811); build thSBIT_CZNO; } @endif # VERSION_6 macro th_set_carry_for_ror(result, count) { local bit = result & 0x80000000; tmpCY = ((count == 0) && CY) || ((count != 0) && (bit != 0)); } :ror^CheckInIT_CZN^ItCond Rd0002,Rs0305 is TMode=1 & ItCond & op6=0x107 & Rs0305 & Rd0002 & CheckInIT_CZN { build ItCond; local shift_amount = Rs0305 & 0x1f; local tmp = (Rd0002 >> shift_amount)|(Rd0002 << (32-shift_amount)); th_set_carry_for_ror(tmp,Rs0305 & 0xff); Rd0002 = tmp; resflags(Rd0002); build CheckInIT_CZN; } @if defined(VERSION_6T2) || defined(VERSION_7) :ror^thSBIT_CZN^ItCond Rd0811,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_CZN & thc0003=0xf; thc1515=0 & Rd0811 & thc0405=3 & thshift2 { build ItCond; Rd0811 = thshift2; tmpCY = shift_carry; resflags(Rd0811); build thSBIT_CZN; } :ror^thSBIT_CZN^ItCond^".w" Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op8=0xfa & thc0707=0 & thc0506=3 & thSBIT_CZN & Rn0003; op12=15 & Rd0811 & sop0407=0 & Rm0003 { build ItCond; local shift_amount = Rm0003 & 0x1f; local tmp = (Rn0003>>shift_amount)|(Rn0003<<(32-shift_amount)); th_set_carry_for_ror(tmp,Rm0003 & 0xff); Rd0811 = tmp; resflags(Rd0811); build thSBIT_CZN; } :rrx^thSBIT_CZN^ItCond Rd0811,Rm0003 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=2 & thSBIT_CZN & thc0003=0xf; thc1515=0 & thc1214=0 & Rd0811 & thc0607=0 & thc0405=3 & Rm0003 { build ItCond; local tmp1=Rm0003&1; shift_carry=tmp1(0); local tmp2 = (zext(CY)<<31)|(Rm0003>>1); Rd0811 = tmp2; th_logicflags(); resflags(Rd0811); build thSBIT_CZN; } @endif # defined(VERSION_6T2) || defined(VERSION_7) @if defined(VERSION_6T2) || defined(VERSION_7) :sadd16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 { build ItCond; local tmpRm0003 = Rm0003; local tmpRn0003 = Rn0003; sum1:4 = sext(tmpRn0003[ 0,16]) + sext(tmpRm0003[ 0,16]); sum2:4 = sext(tmpRn0003[16,16]) + sext(tmpRm0003[16,16]); Rd0811[ 0,16] = sum1:2; Rd0811[16,16] = sum2:2; GE1 = sum1 s>= 0; GE2 = sum1 s>= 0; GE3 = sum2 s>= 0; GE4 = sum2 s>= 0; } :sadd8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 { build ItCond; local tmpRm0003 = Rm0003; local tmpRn0003 = Rn0003; sum1:4 = sext(tmpRn0003[ 0,8]) + sext(tmpRm0003[ 0,8]); sum2:4 = sext(tmpRn0003[ 8,8]) + sext(tmpRm0003[ 8,8]); sum3:4 = sext(tmpRn0003[16,8]) + sext(tmpRm0003[16,8]); sum4:4 = sext(tmpRn0003[24,8]) + sext(tmpRm0003[24,8]); Rd0811[ 0,8] = sum1:1; Rd0811[ 8,8] = sum2:1; Rd0811[16,8] = sum3:1; Rd0811[24,8] = sum4:1; GE1 = sum1 s>= 0; GE2 = sum2 s>= 0; GE3 = sum3 s>= 0; GE4 = sum4 s>= 0; } :sasx^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 { build ItCond; local tmpRm0003 = Rm0003; local tmpRn0003 = Rn0003; diff:4 = sext(tmpRn0003[ 0,16]) - sext(tmpRm0003[16,16]); sum:4 = sext(tmpRn0003[16,16]) + sext(tmpRm0003[ 0,16]); Rd0811[ 0,16] = diff[ 0,16]; Rd0811[16,16] = sum[ 0,16]; GE1 = diff s>= 0; GE2 = diff s>= 0; GE3 = sum s>= 0; GE4 = sum s>= 0; } @endif # defined(VERSION_6T2) || defined(VERSION_7) :sbc^CheckInIT_CZNO^ItCond Rd0002,Rm0305 is TMode=1 & ItCond & op6=0x106 & Rm0305 & Rd0002 & CheckInIT_CZNO { build ItCond; th_sub_with_carry_flags(Rd0002,Rm0305); Rd0002 = Rd0002 - Rm0305 - zext(!CY); resflags(Rd0002); build CheckInIT_CZNO; } @if defined(VERSION_6T2) || defined(VERSION_7) :sbc^thSBIT_CZNO^ItCond Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=11 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; build ThumbExpandImm12; th_sub_with_carry_flags(Rn0003,ThumbExpandImm12); Rd0811 = Rn0003 - ThumbExpandImm12 - zext(!CY); resflags(Rd0811); build thSBIT_CZNO; } :sbc^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=11 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 { build ItCond; build thshift2; th_sub_with_carry_flags(Rn0003,thshift2); Rd0811 = Rn0003 - thshift2 - zext(!CY); resflags(Rd0811); build thSBIT_CZNO; } :sbfx^ItCond Rd0811,Rn0003,thLsbImm,thWidthMinus1 is TMode=1 & ItCond & op4=0xf34 & Rn0003; thc1515=0 & Rd0811 & thLsbImm & thWidthMinus1 { build ItCond; build thLsbImm; build thWidthMinus1; shift:4 = 31 - (thLsbImm + thWidthMinus1); # thMsbImm represents widthMinus1 Rd0811 = Rn0003 << shift; shift = 31 - thWidthMinus1; # msbImm represents widthMinus1 Rd0811 = Rd0811 s>> shift; } :sdiv^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb9 & Rn0003; op12=0xf & Rd0811 & thc0407=0xf & Rm0003 { build ItCond; local result = Rn0003 s/ Rm0003; Rd0811 = result; } :sel^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x8 & Rm0003 { build ItCond; Rd0811[ 0,8] = ((GE1 == 1) * Rn0003[ 0,8]) + ((GE1 == 0) * Rm0003[ 0,8]); Rd0811[ 8,8] = ((GE2 == 1) * Rn0003[ 8,8]) + ((GE2 == 0) * Rm0003[ 8,8]); Rd0811[16,8] = ((GE3 == 1) * Rn0003[16,8]) + ((GE3 == 0) * Rm0003[16,8]); Rd0811[24,8] = ((GE4 == 1) * Rn0003[24,8]) + ((GE4 == 0) * Rm0003[24,8]); } :shadd16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rn0003; op12=0xf & Rd0811 & thc0407=0x2 & Rm0003 { build ItCond; sum1:4 = sext(Rn0003[ 0,16]) + sext(Rm0003[ 0,16]); sum2:4 = sext(Rn0003[16,16]) + sext(Rm0003[16,16]); Rd0811[ 0,16] = sum1[1,16]; Rd0811[16,16] = sum2[1,16]; } :shadd8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x2 & Rm0003 { build ItCond; sum1:4 = sext(Rn0003[ 0,8]) + sext(Rm0003[ 0,8]); sum2:4 = sext(Rn0003[ 8,8]) + sext(Rm0003[ 8,8]); sum3:4 = sext(Rn0003[16,8]) + sext(Rm0003[16,8]); sum4:4 = sext(Rn0003[24,8]) + sext(Rm0003[24,8]); Rd0811[ 0,8] = sum1[1,8]; Rd0811[ 8,8] = sum2[1,8]; Rd0811[16,8] = sum3[1,8]; Rd0811[24,8] = sum4[1,8]; } :shasx^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x2 & Rm0003 { build ItCond; diff:4 = sext(Rn0003[ 0,16]) - sext(Rm0003[16,16]); sum:4 = sext(Rn0003[16,16]) + sext(Rm0003[ 0,16]); Rd0811[ 0,16] = diff[1,16]; Rd0811[16,16] = sum[1,16]; } :shsax^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfae & Rn0003; op12=0xf & Rd0811 & thc0407=0x2 & Rm0003 { build ItCond; sum:4 = sext(Rn0003[ 0,16]) + sext(Rm0003[16,16]); diff:4 = sext(Rn0003[16,16]) - sext(Rm0003[ 0,16]); Rd0811[ 0,16] = sum[1,16]; Rd0811[16,16] = diff[1,16]; } :shsub16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfad & Rn0003; op12=0xf & Rd0811 & thc0407=0x2 & Rm0003 { build ItCond; diff1:4 = sext(Rn0003[ 0,16]) - sext(Rm0003[ 0,16]); diff2:4 = sext(Rn0003[16,16]) - sext(Rm0003[16,16]); Rd0811[ 0,16] = diff1[1,16]; Rd0811[16,16] = diff2[1,16]; } :shsub8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfac & Rn0003; op12=0xf & Rd0811 & thc0407=0x2 & Rm0003 { build ItCond; diff1:4 = sext(Rn0003[ 0,8]) - sext(Rm0003[ 0,8]); diff2:4 = sext(Rn0003[ 8,8]) - sext(Rm0003[ 8,8]); diff3:4 = sext(Rn0003[16,8]) - sext(Rm0003[16,8]); diff4:4 = sext(Rn0003[24,8]) - sext(Rm0003[24,8]); Rd0811[ 0,8] = diff1[1,8]; Rd0811[ 8,8] = diff2[1,8]; Rd0811[16,8] = diff3[1,8]; Rd0811[24,8] = diff4[1,8]; } @endif # defined(VERSION_6T2) || defined(VERSION_7) thXBIT: "b" is Rn0003 ; thc0505=0 { local tmpRn0003 = Rn0003; tmp_x:2 = tmpRn0003:2; export tmp_x; } thXBIT: "t" is Rn0003 ; thc0505=1 { local tmpRn0003 = Rn0003; tmp_x:2 = tmpRn0003(2); export tmp_x; } thYBIT: "b" is thc0404=0 & Rm0003 { local tmpRm0003 = Rm0003; tmp_y:2 = tmpRm0003:2; export tmp_y; } thYBIT: "t" is thc0404=1 & Rm0003 { local tmpRm0003 = Rm0003; tmp_y:2 = tmpRm0003(2); export tmp_y; } :smla^thXBIT^thYBIT^ItCond Rd0811,Rn0003,Rm0003,Rt1215 is TMode=1 & ItCond & (op4=0xfb1 & Rn0003; Rt1215 & Rd0811 & thc0607=0 & thYBIT & Rm0003) & thXBIT { build ItCond; tmp:4 = sext(thXBIT) * sext(thYBIT); Q = scarry(tmp,Rt1215) || Q; #Q flag is never cleared by this instruction Rd0811 = tmp + Rt1215; } thdXbot: "" is thc0404=0 & Rm0003 { local tmpRm0003 = Rm0003; tmp:2 = tmpRm0003:2; export tmp; } thdXbot: "X" is thc0404=1 & Rm0003 { local tmpRm0003 = Rm0003; tmp:2 = tmpRm0003(2); export tmp; } thdXtop: "" is thc0404=0 & Rm0003 { local tmpRm0003 = Rm0003; tmp:2 = tmpRm0003(2); export tmp; } thdXtop: "X" is thc0404=1 & Rm0003 { local tmpRm0003 = Rm0003; tmp:2 = tmpRm0003:2; export tmp; } :smlad^thdXbot^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb2 & Rn0003; Ra1215 & Rd0811 & thc0507=0 & thdXbot & thdXtop & Rm0003 { build ItCond; local tmpRn0003 = Rn0003; rnbot:2 = tmpRn0003:2; rntop:2 = tmpRn0003(2); tmpbot:4 = sext(rnbot) * sext(thdXbot); tmptop:4 = sext(rntop) * sext(thdXtop); tmp:4 = sext(tmpbot) + sext(tmptop); Q = scarry(tmp,Ra1215) || Q; #Q flag is never cleared by this instruction Rd0811 = tmp + Ra1215; } :smlald^thdXbot^ItCond Rt1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfbc & Rn0003; Rt1215 & Rd0811 & thc0507=6 & thdXbot & thdXtop & Rm0003 { build ItCond; local tmpRn0003 = Rn0003; rnbot:2 = tmpRn0003:2; rntop:2 = tmpRn0003(2); tmpbot:4 = sext(rnbot) * sext(thdXbot); tmptop:4 = sext(rntop) * sext(thdXtop); accum:8 = (sext(Rd0811) << 32) | zext(Rt1215); tmp:8 = sext(tmpbot) + sext(tmptop); accum = tmp + accum; Rt1215 = accum:4; Rd0811 = accum(4); } :smlal^ItCond Rt1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfbc & Rn0003; Rt1215 & Rd0811 & sop0407=0 & Rm0003 { build ItCond; accum:8 = (sext(Rd0811) << 32) | zext(Rt1215); val:8 = sext(Rn0003) * sext(Rm0003) + accum; Rt1215 = val(0); Rd0811 = val(4); } :smlal^thXBIT^thYBIT^ItCond Rt1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & (op4=0xfbc & Rn0003; Rt1215 & Rd0811 & thc0607=2 & thYBIT & Rm0003) & thXBIT { build ItCond; tmp:4 = sext(thXBIT) * sext(thYBIT); accum:8 = (zext(Rd0811) << 32) | zext(Rt1215); val:8 = sext(tmp) + accum; Rt1215 = val(0); Rd0811 = val(4); } :smlaw^thYBIT^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb3 & Rn0003; Ra1215 & Rd0811 & thc0507=0 & thYBIT & Rm0003 { build ItCond; local tmp:6 = (sext(Rn0003) * sext(thYBIT)); local addend:6 = sext(Ra1215) << 16; Q = scarry(tmp,addend) || Q; #this instruction never clears the Q flag tmp = tmp + addend; Rd0811 = tmp(2); } :smlsd^thdXbot^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb4 & Rn0003; Ra1215 & Rd0811 & thc0507=0 & thdXbot & thdXtop & Rm0003 { build ItCond; local tmpRn0003 = Rn0003; local rnbot:2 = tmpRn0003:2; local rntop:2 = tmpRn0003(2); local prod1:4 = sext(rnbot) * sext(thdXbot); local prod2:4 = sext(rntop) * sext(thdXtop); local diff = prod1 - prod2; Q = scarry(diff,Ra1215) || Q; #instruction never clears Q flag Rd0811 = diff + Ra1215; } :smlsld^thdXbot^ItCond Rt1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfbd & Rn0003; Rt1215 & Rd0811 & thc0507=6 & thdXbot & thdXtop & Rm0003 { build ItCond; local tmpRn0003 = Rn0003; local rnbot:2 = tmpRn0003:2; local rntop:2 = tmpRn0003(2); local tmpbot:4 = sext(rnbot) * sext(thdXbot); local tmptop:4 = sext(rntop) * sext(thdXtop); local accum:8 = (sext(Rd0811) << 32) | zext(Rt1215); local tmp:8 = sext(tmpbot) - sext(tmptop); accum = tmp + accum; Rt1215 = accum:4; Rd0811 = accum(4); } :smmla^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb5 & Rn0003; Ra1215 & Rd0811 & thc0407=0 & Rm0003 { build ItCond; local val:8 = sext(Rn0003) * sext(Rm0003); local accum:8 = (zext(Ra1215)) << 32; val = val + accum; Rd0811 = val(4); } :smmlar^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb5 & Rn0003; Ra1215 & Rd0811 & thc0407=1 & Rm0003 { build ItCond; local val:8 = sext(Rn0003) * sext(Rm0003); local accum:8 = (zext(Ra1215)) << 32; val = val + accum + 0x80000000; Rd0811 = val(4); } :smmls^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb6 & Rn0003; Ra1215 & Rd0811 & thc0407=0 & Rm0003 { build ItCond; local val:8 = sext(Rn0003) * sext(Rm0003); val = (zext(Ra1215) << 32) - val; Rd0811 = val(4); } :smmlsr^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb6 & Rn0003; Ra1215 & Rd0811 & thc0407=1 & Rm0003 { build ItCond; local val:8 = sext(Rn0003) * sext(Rm0003); val = (zext(Ra1215) << 32) - val; val = val + 0x80000000; Rd0811 = val(4); } :smmul^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb5 & Rn0003; thc1215=0xf & Rd0811 & thc0407=0 & Rm0003 { build ItCond; val:8 = sext(Rn0003) * sext(Rm0003); Rd0811 = val(4); } :smmulr^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb5 & Rn0003; thc1215=0xf & Rd0811 & thc0407=1 & Rm0003 { build ItCond; val:8 = sext(Rn0003) * sext(Rm0003); val = val + 0x80000000; Rd0811 = val(4); } :smuad^thdXbot^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb2 & Rn0003; thc1215=0xf & Rd0811 & thc0507=0 & thdXbot & thdXtop & Rm0003 { build ItCond; local tmpRn0003 = Rn0003; local rnbot:2 = tmpRn0003:2; local rntop:2 = tmpRn0003(2); local prod1:4 = sext(rnbot) * sext(thdXbot); local prod2:4 = sext(rntop) * sext(thdXtop); Q = scarry(prod1,prod2) || Q; #instruction does not clear the Q flag Rd0811 = prod1 + prod2; } :smulbb^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb1 & Rn0003; op12=15 & Rd0811 & sop0407=0 & Rm0003 { build ItCond; local tmpRn0003 = Rn0003; local tmpRm0003 = Rm0003; op1:2 = tmpRn0003:2; op2:2 = tmpRm0003:2; Rd0811 = sext(op1) * sext(op2); } :smulbt^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb1 & Rn0003; op12=15 & Rd0811 & sop0407=1 & Rm0003 { build ItCond; local tmpRn0003 = Rn0003; local tmpRm0003 = Rm0003; op1:2 = tmpRn0003:2; op2:2 = tmpRm0003(2); Rd0811 = sext(op1) * sext(op2); } :smultb^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb1 & Rn0003; op12=15 & Rd0811 & sop0407=2 & Rm0003 { build ItCond; local tmpRn0003 = Rn0003; local tmpRm0003 = Rm0003; op1:2 = tmpRn0003(2); op2:2 = tmpRm0003:2; Rd0811 = sext(op1) * sext(op2); } :smultt^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb1 & Rn0003; op12=15 & Rd0811 & sop0407=3 & Rm0003 { build ItCond; local tmpRn0003 = Rn0003; local tmpRm0003 = Rm0003; op1:2 = tmpRn0003(2); op2:2 = tmpRm0003(2); Rd0811 = sext(op1) * sext(op2); } :smull^ItCond Ra1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb8 & Rn0003; Ra1215 & Rd0811 & sop0407=0 & Rm0003 { build ItCond; val:8 = sext(Rn0003) * sext(Rm0003); Ra1215 = val(0); Rd0811 = val(4); } :smusd^thdXbot^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb4 & Rn0003; thc1215=0xf & Rd0811 & thc0507=0 & thdXbot & thdXtop & Rm0003 { build ItCond; local tmpRn0003 = Rn0003; rnbot:2 = tmpRn0003:2; rntop:2 = tmpRn0003(2); tmpbot:4 = sext(rnbot) * sext(thdXbot); tmptop:4 = sext(rntop) * sext(thdXtop); tmp:8 = sext(tmpbot) - sext(tmptop); Rd0811 = tmp:4; } :smulw^thYBIT^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb3 & Rn0003; thc1215=0xf & Rd0811 & thc0507=0 & thYBIT & Rm0003 { build ItCond; tmp:8 = (sext(Rn0003) * sext(thYBIT)) s>> 16; Rd0811 = tmp:4; } :srsdb^ItCond sp^"!",thSRSMode is TMode=1 & ItCond & op6=0x3a0 & sp & thc0505=1 & thc0004=0xd; op8=0xc0 & sop0507=0 & thSRSMode { build ItCond; # register list is always: r14, spsr ptr:4 = sp - 4; *ptr = lr; ptr = ptr - 4; *ptr = spsr; sp = ptr; } :srsdb^ItCond sp,thSRSMode is TMode=1 & ItCond & op6=0x3a0 & sp & thc0505=0 & thc0004=0xd; op8=0xc0 & sop0507=0 & thSRSMode { build ItCond; # register list is always: r14, spsr ptr:4 = sp - 4; *ptr = lr; ptr = ptr - 4; *ptr = spsr; } :srsib^ItCond sp^"!",thSRSMode is TMode=1 & ItCond & op6=0x3a6 & sp & thc0505=1 & thc0004=0xd; op8=0xc0 & sop0507=0 & thSRSMode { build ItCond; # register list is always: r14, spsr ptr:4 = sp + 4; *ptr = lr; ptr = ptr + 4; *ptr = spsr; sp = ptr; } :srsia^ItCond sp,thSRSMode is TMode=1 & ItCond & op6=0x3a6 & sp & thc0505=0 & thc0004=0xd; op8=0xc0 & sop0507=0 & thSRSMode { build ItCond; # register list is always: r14, spsr ptr:4 = sp + 4; *ptr = lr; ptr = ptr + 4; *ptr = spsr; } @if defined(VERSION_6T2) || defined(VERSION_7) # ssat and ssat16 were defined elsewhere and moved here to preserve sort order # shift operands for ssat and usat: th2_shift0: is imm3_shft=0x0 & imm2_shft=0x0 { } th2_shift0: ",lsl "^thLsbImm is imm3_shft & imm2_shft & thLsbImm { } th2_shift1: ",asr "^thLsbImm is imm3_shft & imm2_shft & thLsbImm { } th2_shift1: ",asr #32" is imm3_shft=0x0 & imm2_shft=0x0 { } :ssat Rt0811, thMsbImm, part2Rd0003^th2_shift0 is TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xc & part2c0505=0x0 & part2c0404=0x0 & part2Rd0003 ; thc1515=0x0 & Rt0811 & thc0505=0x0 & th2_shift0 & thMsbImm & thLsbImm { # Shift bit is 0 tmpRn:4 = part2Rd0003 << thLsbImm; tmp:4 = SignedSaturate(tmpRn, thMsbImm); Q = SignedDoesSaturate(tmpRn, thMsbImm); Rt0811 = tmp; } :ssat Rt0811, thMsbImm, part2Rd0003^th2_shift1 is TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xc & part2c0505=0x1 & part2c0404=0x0 & part2Rd0003; thc1515=0x0 & Rt0811 & thc0505=0x0 & th2_shift1 & thMsbImm & thLsbImm { # Shift bit is 1 tmpRn:4 = part2Rd0003 s>> thLsbImm; tmp:4 = SignedSaturate(tmpRn, thMsbImm); Q = SignedDoesSaturate(tmpRn, thMsbImm); Rt0811 = tmp; } :ssat16 Rt0811, Immed4, part2Rd0003 is TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xc & part2c0505=0x1 & part2c0404=0x0 & part2Rd0003; op12=0x0 & Rt0811 & thc0407=0x0 & Immed4 { tmp:4 = SignedSaturate(part2Rd0003, Immed4); Q = SignedDoesSaturate(part2Rd0003, Immed4); Rt0811 = tmp; } :ssax^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfae & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 { build ItCond; sum:4 = sext(Rn0003[ 0,16]) + sext(Rm0003[16,16]); diff:4 = sext(Rn0003[16,16]) - sext(Rm0003[ 0,16]); Rd0811[ 0,16] = sum[0,16]; Rd0811[16,16] = diff[0,16]; GE1 = sum s>= 0; GE2 = sum s>= 0; GE3 = diff s>= 0; GE4 = diff s>= 0; } :ssub16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfad & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 { build ItCond; diff1:4 = sext(Rn0003[ 0,16]) - sext(Rm0003[ 0,16]); diff2:4 = sext(Rn0003[16,16]) - sext(Rm0003[16,16]); Rd0811[ 0,16] = diff1[0,16]; Rd0811[16,16] = diff2[0,16]; GE1 = diff1 s>= 0; GE2 = diff1 s>= 0; GE3 = diff2 s>= 0; GE4 = diff2 s>= 0; } :ssub8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfac & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 { build ItCond; diff1:4 = sext(Rn0003[ 0,8]) - sext(Rm0003[ 0,8]); diff2:4 = sext(Rn0003[ 8,8]) - sext(Rm0003[ 8,8]); diff3:4 = sext(Rn0003[16,8]) - sext(Rm0003[16,8]); diff4:4 = sext(Rn0003[24,8]) - sext(Rm0003[24,8]); Rd0811[ 0,8] = diff1[0,8]; Rd0811[ 8,8] = diff2[0,8]; Rd0811[16,8] = diff3[0,8]; Rd0811[24,8] = diff4[0,8]; GE1 = diff1 s>= 0; GE2 = diff2 s>= 0; GE3 = diff3 s>= 0; GE4 = diff4 s>= 0; } :umull^ItCond Ra1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfba & Rn0003; Ra1215 & Rd0811 & sop0407=0 & Rm0003 { build ItCond; val:8 = zext(Rn0003) * zext(Rm0003); Ra1215 = val(0); Rd0811 = val(4); } :umaal^ItCond Ra1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfbe & Rn0003; Ra1215 & Rd0811 & sop0407=6 & Rm0003 { build ItCond; val:8 = zext(Rn0003) * zext(Rm0003) + zext(Ra1215) + zext(Rd0811); Ra1215 = val(0); Rd0811 = val(4); } :umlal^ItCond Ra1215,Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfbe & Rn0003; Ra1215 & Rd0811 & sop0407=0 & Rm0003 { build ItCond; accum:8 = (zext(Rd0811) << 32) | zext(Ra1215); val:8 = zext(Rn0003) * zext(Rm0003) + accum; Ra1215 = val(0); Rd0811 = val(4); } @endif # defined(VERSION_6T2) || defined(VERSION_7) @if defined(VERSION_6) thumbEndianNess: "LE" is op0=0xb650 { export 0:1; } thumbEndianNess: "BE" is op0=0xb658 { export 1:1; } :setend^ItCond thumbEndianNess is TMode=1 & ItCond & (op0=0xb650 | op0=0xb658) & thumbEndianNess { setEndianState(thumbEndianNess); } :sev^ItCond is TMode=1 & ItCond & op0=0xbf40 { build ItCond; } :sev^ItCond^".w" is TMode=1 & ItCond & op0=0xf3af; op0=8004 { build ItCond; } @endif # VERSION_6 @if defined(VERSION_6T2) || defined(VERSION_7) @ifndef CDE :stc^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x76 & thN6=0 & thL4=0; thCRd & thcpn) & taddrmode5 { build ItCond; build taddrmode5; t_cpn:4 = thcpn; coprocessor_store(t_cpn,thCRd,taddrmode5); } :stcl^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x76 & thN6=1 & thL4=0; thCRd & thcpn) & taddrmode5 { build ItCond; build taddrmode5; t_cpn:4 = thcpn; coprocessor_storelong(t_cpn,thCRd,taddrmode5); } :stc2^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x7e & thN6=0 & thL4=0; thCRd & thcpn) & taddrmode5 { build ItCond; build taddrmode5; t_cpn:4 = thcpn; coprocessor_store(t_cpn,thCRd,taddrmode5); } :stc2l^ItCond thcpn,thCRd,taddrmode5 is (TMode=1 & ItCond & op9=0x7e & thN6=1 & thL4=0; thCRd & thcpn) & taddrmode5 { build ItCond; build taddrmode5; t_cpn:4 = thcpn; coprocessor_storelong(t_cpn,thCRd,taddrmode5); } @endif # CDE :stm^ItCond Rn0003,thstrlist_inc is TMode=1 & ItCond & op11=0x1d & thc0910=0 & sop0608=2 & thwbit=0 & thc0404=0 & Rn0003; thc1515=0 & thc1313=0 & thstrlist_inc { build ItCond; mult_addr = Rn0003; build thstrlist_inc; } :stm^ItCond^".w" Rn0003!,thstrlist_inc is TMode=1 & ItCond & op11=0x1d & thc0910=0 & sop0608=2 & thwbit=1 & thc0404=0 & Rn0003; thc1515=0 & thc1313=0 & thstrlist_inc { build ItCond; mult_addr = Rn0003; build thstrlist_inc; Rn0003 = mult_addr; } :stmdb^ItCond Rn0003!,thstrlist_dec is TMode=1 & ItCond & op4=0xe92 & Rn0003; thc1515=0 & thc1313=0 & thstrlist_dec { build ItCond; mult_addr = Rn0003-4; build thstrlist_dec; Rn0003 = mult_addr + 4; } :stmdb^ItCond Rn0003,thstrlist_dec is TMode=1 & ItCond & op4=0xe90 & Rn0003; thc1515=0 & thc1313=0 & thstrlist_dec { build ItCond; mult_addr = Rn0003-4; build thstrlist_dec; } @endif # defined(VERSION_6T2) || defined(VERSION_7) :stmia^ItCond Rn_exclaim,stbrace is TMode=1 & ItCond & op11=0x18 & Rn_exclaim & stbrace & Rn_exclaim_WB { build ItCond; build Rn_exclaim; build stbrace; build Rn_exclaim_WB; } :str^ItCond Rd0002,RnIndirect4 is TMode=1 & ItCond & op11=0xc & RnIndirect4 & Rd0002 { build ItCond; *RnIndirect4 = Rd0002; } :str^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x28 & RnRmIndirect & Rd0002 { build ItCond; *RnRmIndirect = Rd0002; } :str^ItCond Rd0810,Sprel8Indirect is TMode=1 & ItCond & op11=0x12 & Sprel8Indirect & Rd0810 { build ItCond; *Sprel8Indirect = Rd0810; } :strb^ItCond Rd0002,RnIndirect1 is TMode=1 & ItCond & op11=0xe & RnIndirect1 & Rd0002 { build ItCond; local tmpRd0002 = Rd0002; *RnIndirect1 = tmpRd0002:1; } :strb^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x2a & RnRmIndirect & Rd0002 { build ItCond; local tmpRd0002 = Rd0002; *RnRmIndirect = tmpRd0002:1; } :strh^ItCond Rd0002,RnIndirect2 is TMode=1 & ItCond & op11=0x10 & RnIndirect2 & Rd0002 { build ItCond; local tmpRd0002 = Rd0002; *RnIndirect2 = tmpRd0002:2; } :strh^ItCond Rd0002,RnRmIndirect is TMode=1 & ItCond & op9=0x29 & RnRmIndirect & Rd0002 { build ItCond; local tmpRd0002 = Rd0002; *RnRmIndirect = tmpRd0002:2; } :strt^ItCond^".w" Rt1215,[Rn0003,Immed8] is TMode=1 & ItCond & op4=0xf84 & Rn0003; Rt1215 & thc0811=14 & Immed8 { build ItCond; local tmp = Rn0003 + Immed8; *tmp = Rt1215; } @if defined(VERSION_6T2) || defined(VERSION_7) :str.w^ItCond Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf8c; Rt1215) & RnIndirect12 { build ItCond; *RnIndirect12 = Rt1215; } :str.w^ItCond Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf84; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) { build ItCond; build RnIndirectPUW; *RnIndirectPUW = Rt1215; } :str^ItCond^".w" Rt1215,[Rn0003,Rm0003] is TMode=1 & ItCond & op4=0xf84 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405=0 & Rm0003 { build ItCond; local tmp = Rn0003 + Rm0003; *tmp = Rt1215; } :str^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf84 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 { build ItCond; local tmp = Rn0003 + (Rm0003 << thc0405); *tmp = Rt1215; } :strb^ItCond^".w" Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf88; Rt1215) & RnIndirect12 { build ItCond; build RnIndirect12; local tmpRt1215 = Rt1215; *RnIndirect12 = tmpRt1215:1; } :strb^ItCond^".w" Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf80; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) { build ItCond; build RnIndirectPUW; local tmpRt1215 = Rt1215; *RnIndirectPUW = tmpRt1215:1; } :strb^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf80 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 { build ItCond; local tmp = Rn0003 + (Rm0003 << thc0405); local tmpRt1215 = Rt1215; *tmp = tmpRt1215:1; } :strbt^ItCond Rt1215,[Rn0003,Immed8] is TMode=1 & ItCond & op4=0xf80 & Rn0003; Rt1215 & thc0811=14 & Immed8 { build ItCond; local tmp = Rn0003 + Immed8; local tmpRt1215 = Rt1215; *tmp = tmpRt1215:1; } :strd^ItCond Rt1215,Rt0811,RnIndirectPUW1 is TMode=1 & ItCond & (op9=0x74 & thc0910=0 & thc0606=1 & thc0404=0 & Rn0003; Rt1215 & Rt0811) & $(RN_INDIRECT_PUW1) { build ItCond; build RnIndirectPUW1; local tmp = RnIndirectPUW1; *tmp = Rt1215; tmp = tmp + 4; *tmp = Rt0811; } :strh^ItCond^".w" Rt1215,RnIndirect12 is TMode=1 & ItCond & (op4=0xf8A; Rt1215) & RnIndirect12 { build ItCond; local tmpRt1215 = Rt1215; *RnIndirect12 = tmpRt1215:2; } :strh^ItCond Rt1215,RnIndirectPUW is TMode=1 & ItCond & (op4=0xf82; Rt1215 & thc1111=1) & $(RN_INDIRECT_PUW) { build ItCond; build RnIndirectPUW; local tmpRt1215 = Rt1215; *RnIndirectPUW = tmpRt1215:2; } :strh^ItCond^".w" Rt1215,[Rn0003,Rm0003,"lsl #"^thc0405] is TMode=1 & ItCond & op4=0xf82 & Rn0003; Rt1215 & thc1111=0 & sop0610=0 & thc0405 & Rm0003 { build ItCond; local tmp = Rn0003 + (Rm0003 << thc0405); local tmpRt1215 = Rt1215; *tmp = tmpRt1215:2; } :strht^ItCond Rt1215,[Rn0003,Immed8] is TMode=1 & ItCond & op4=0xf82 & Rn0003; Rt1215 & thc0811=14 & Immed8 { build ItCond; local tmp = Rn0003 + Immed8; local tmpRt1215 = Rt1215; *tmp = tmpRt1215:2; } :strex^ItCond Rd0811,Rt1215,[Rn0003,Immed8_4] is TMode=1 & ItCond & op4=0xe84 & Rn0003; Rt1215 & Rd0811 & Immed8_4 { build ItCond; local tmp = Rn0003 + Immed8_4; local tmpRt = Rt1215; access:1 = hasExclusiveAccess(tmp); Rd0811 = 1; if (!access) goto inst_next; Rd0811 = 0; *tmp = tmpRt; } @endif # VERSION_6T2 || VERSION_7 @if defined(VERSION_7) :strexb^ItCond Rd0003,Rt1215,[Rn0003] is TMode=1 & ItCond & op4=0xe8c & Rn0003; Rt1215 & thc0811=15 & thc0407=4 & Rd0003 { build ItCond; local tmp = Rn0003; local tmpRt = Rt1215; access:1 = hasExclusiveAccess(tmp); Rd0003 = 1; if (!access) goto inst_next; Rd0003 = 0; *tmp = tmpRt:1; } :strexh^ItCond Rd0003,Rt1215,[Rn0003] is TMode=1 & ItCond & op4=0xe8c & Rn0003; Rt1215 & thc0811=15 & thc0407=5 & Rd0003 { build ItCond; local tmp = Rn0003; local tmpRt = Rt1215; access:1 = hasExclusiveAccess(tmp); Rd0003 = 1; if (!access) goto inst_next; Rd0003 = 0; *tmp = tmpRt:2; } :strexd^ItCond Rd0003,Rt1215,Rt0811,[Rn0003] is TMode=1 & ItCond & op4=0xe8c & Rn0003; Rt1215 & Rt0811 & thc0407=7 & Rd0003 { build ItCond; local tmp = Rn0003; local tmpRt = Rt1215; local tmpRt2 = Rt0811; access:1 = hasExclusiveAccess(tmp); Rd0003 = 1; if (!access) goto inst_next; Rd0003 = 0; *tmp = tmpRt; tmp = tmp + 4; *tmp = tmpRt2; } @endif # VERSION_7 :sub^CheckInIT_CZNO^ItCond Rd0002,Rn0305,Immed3 is TMode=1 & ItCond & op9=0xf & Immed3 & Rn0305 & Rd0002 & CheckInIT_CZNO { build ItCond; th_subflags(Rn0305,Immed3); Rd0002 = Rn0305 - Immed3; resflags(Rd0002); build CheckInIT_CZNO; } :sub^CheckInIT_CZNO^ItCond Rd0810,Immed8 is TMode=1 & ItCond & op11=7 & Rd0810 & Immed8 & CheckInIT_CZNO { build ItCond; th_subflags(Rd0810,Immed8); Rd0810 = Rd0810 - Immed8; resflags(Rd0810); build CheckInIT_CZNO; } :sub^CheckInIT_CZNO^ItCond Rd0002,Rn0305,Rm0608 is TMode=1 & ItCond & op9=0xd & Rm0608 & Rn0305 & Rd0002 & CheckInIT_CZNO { build ItCond; th_subflags(Rn0305,Rm0608); Rd0002 = Rn0305 - Rm0608; resflags(Rd0002); build CheckInIT_CZNO; } :sub^ItCond sp,Immed7_4 is TMode=1 & ItCond & op7=0x161 & sp & Immed7_4 { build ItCond; sp = sp - Immed7_4; } @if defined(VERSION_6T2) || defined(VERSION_7) :sub^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=13 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; build ThumbExpandImm12; th_subflags(Rn0003,ThumbExpandImm12); Rd0811 = Rn0003-ThumbExpandImm12; resflags(Rd0811); build thSBIT_CZNO; } :subw^ItCond Rd0811,Rn0003,Immed12 is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=5 & thc0404=0 & Rn0003; thc1515=0 & Rd0811) & Immed12 { build ItCond; th_subflags(Rn0003,Immed12); Rd0811 = Rn0003-Immed12; resflags(Rd0811); } :sub^thSBIT_CZNO^ItCond^".w" Rd0811,Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=13 & thSBIT_CZNO & Rn0003; thc1515=0 & Rd0811 & thshift2 { build ItCond; build thshift2; local tmp = thshift2; th_subflags(Rn0003,tmp); Rd0811 = Rn0003-tmp; resflags(Rd0811); build thSBIT_CZNO; } :sub^thSBIT_CZNO^ItCond^".w" Rd0811,sp,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=13 & thSBIT_CZNO & sp & sop0003=0xd; thc1515=0 & Rd0811) & ThumbExpandImm12 { build ItCond; build ThumbExpandImm12; th_subflags(sp,ThumbExpandImm12); Rd0811 = sp-ThumbExpandImm12; resflags(Rd0811); build thSBIT_CZNO; } :sub^ItCond pc,lr,Immed8 is TMode=1 & ItCond & op4=0xf3d & pc & sop0003=0xe; op8=0x8f & lr & Immed8 { build ItCond; build Immed8; th_subflags(lr,Immed8); dest:4 = lr-Immed8; resflags(dest); cpsr=spsr; SetThumbMode( ((cpsr >> 5) & 1) != 0 ); pc = dest; goto [pc]; } :subw^ItCond Rd0811,sp,Immed12 is TMode=1 & ItCond & (op11=0x1e & thc0909=1 & sop0508=5 & thc0404=0 & sop0003=0xd & sp; thc1515=0 & Rd0811) & Immed12 { build ItCond; th_subflags(sp,Immed12); Rd0811 = sp-Immed12; resflags(Rd0811); } :sub^thSBIT_CZNO^ItCond^".w" Rd0811,sp,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=13 & thSBIT_CZNO & sop0003=0xd & sp; thc1515=0 & Rd0811 & thshift2 { build ItCond; build thshift2; local tmp = thshift2; th_subflags(sp,tmp); Rd0811 = sp-tmp; resflags(Rd0811); build thSBIT_CZNO; } @endif # VERSION_6T2 || VERSION_7 :svc^ItCond immed8 is TMode=1 & ItCond & op8=0xdf & immed8 { build ItCond; tmp:4 = immed8; software_interrupt(tmp); } @if defined(VERSION_6T2) || defined(VERSION_7) :sxtab^ItCond Rd0811, Rn0003, Rm0003, ByteRotate is TMode=1 & ItCond & op4=0xfa4 & Rn0003; op12=0xf & Rd0811 & thc0707=1 & thc0606=0 & ByteRotate & Rm0003 { build ItCond; tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); Rd0811 = sext(tmp:1) + Rn0003; } :sxtab^ItCond Rd0811, Rn0003, Rm0003 is TMode=1 & ItCond & op4=0xfa4 & Rn0003; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 { build ItCond; local tmpRm0003 = Rm0003; Rd0811 = sext(tmpRm0003:1) + Rn0003; } :sxtab16^ItCond Rd0811, Rn0003, Rm0003, ByteRotate is TMode=1 & ItCond & op4=0xfa2 & Rn0003; op12=0xf & Rd0811 & thc0707=1 & thc0606=0 & ByteRotate & Rm0003 { build ItCond; tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); local tmpRn0003 = Rn0003; tmpL:2 = sext(tmp:1) + tmpRn0003:2; tmp = tmp >> 16; tmpH:2 = sext(tmp:1) + tmpRn0003(2); Rd0811 = zext(tmpL) + (zext(tmpH) << 16); } :sxtab16^ItCond Rd0811, Rn0003, Rm0003 is TMode=1 & ItCond & op4=0xfa2 & Rn0003; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 { build ItCond; local tmpRn0003 = Rn0003; local tmpRm0003 = Rm0003; tmpL:2 = sext(tmpRm0003:1) + tmpRn0003:2; local tmp = tmpRm0003 >> 16; tmpH:2 = sext(tmp:1) + tmpRn0003(2); Rd0811 = zext(tmpL) + (zext(tmpH) << 16); } :sxtah^ItCond Rd0811, Rn0003, Rm0003, ByteRotate is TMode=1 & ItCond & op4=0xfa0 & Rn0003; op12=0xf & Rd0811 & thc0707=1 & thc0606=0 & ByteRotate & Rm0003 { build ItCond; tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); Rd0811 = sext(tmp:2) + Rn0003; } :sxtah^ItCond Rd0811, Rn0003, Rm0003 is TMode=1 & ItCond & op4=0xfa0 & Rn0003; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 { build ItCond; local tmpRm0003 = Rm0003; Rd0811 = sext(tmpRm0003:2) + Rn0003; } @endif # VERSION_6T2 || VERSION_7 @if defined(VERSION_6) :sxtb^ItCond Rd0002, Rm0305 is TMode=1 & ItCond & op8=0xb2 & thc0707=0 & thc0606=1 & Rm0305 & Rd0002 { build ItCond; local tmpRm0305 = Rm0305; Rd0002 = sext(tmpRm0305:1); } :sxtb^ItCond^".w" Rd0811, Rm0003, ByteRotate is TMode=1 & ItCond & op0=0xfa4f; op12=0xf & Rd0811 & thc0707=1 & thc0606=0 & ByteRotate & Rm0003 { build ItCond; tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); Rd0811 = sext(tmp:1); } :sxtb^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op0=0xfa4f; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 { build ItCond; local tmpRm0003 = Rm0003; Rd0811 = sext(tmpRm0003:1); } @endif # VERSION_6 @if defined(VERSION_6T2) || defined(VERSION_7) :sxtb16^ItCond Rd0811, Rm0003, ByteRotate is TMode=1 & ItCond & op0=0xfa2f; op12=0xf & Rd0811 & thc0707=1 & thc0606=0 & ByteRotate & Rm0003 { build ItCond; tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); tmpL:2 = sext(tmp:1); tmp = tmp >> 16; tmpH:2 = sext(tmp:1); Rd0811 = zext(tmpL) + (zext(tmpH) << 16); } :sxtb16^ItCond Rd0811, Rm0003 is TMode=1 & ItCond & op0=0xfa2f; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 { build ItCond; local tmpRm0003 = Rm0003; tmpL:2 = sext(tmpRm0003:1); tmp:4 = tmpRm0003 >> 16; tmpH:2 = sext(tmp:1); Rd0811 = zext(tmpL) + (zext(tmpH) << 16); } @endif # VERSION_6T2 || VERSION_7 @if defined(VERSION_6) :sxth^ItCond Rd0002, Rm0305 is TMode=1 & ItCond & op8=0xb2 & thc0707=0 & thc0606=0 & Rm0305 & Rd0002 { build ItCond; local tmpRm0305 = Rm0305; Rd0002 = sext(tmpRm0305:2); } :sxth^ItCond^".w" Rd0811, Rm0003, ByteRotate is TMode=1 & ItCond & op0=0xfa0f; op12=0xf & Rd0811 & thc0707=1 & thc0606=0 & ByteRotate & Rm0003 { build ItCond; tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); Rd0811 = sext(tmp:2); } :sxth^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op0=0xfa0f; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 { build ItCond; local tmpRm0003 = Rm0003; Rd0811 = sext(tmpRm0003:2); } @endif # VERSION_6 @if defined(VERSION_6T2) || defined(VERSION_7) :tbb^ItCond [Rn0003,Rm0003] is TMode=1 & ItCond & op4=0xe8d & Rn0003; op8=0xf0 & thc0507=0 & thc0404=0 & Rm0003 { build ItCond; local tmp = Rn0003 + Rm0003; offs:1 = *tmp; SetThumbMode(1); pc = inst_next + (zext(offs) * 2); goto [pc]; } :tbh^ItCond [Rn0003,Rm0003] is TMode=1 & ItCond & op4=0xe8d & Rn0003; op8=0xf0 & thc0507=0 & thc0404=1 & Rm0003 { build ItCond; local tmp = Rn0003 + (Rm0003 * 2); offs:2 = *tmp; SetThumbMode(1); pc = inst_next + (zext(offs) * 2); goto [pc]; } Pcrel: [pc,Rm0003] is Rm0003 & thc0404=0 & pc { local tmp = Rm0003; tmp = inst_next + tmp; val:1 = *tmp; tmp = zext(val); export tmp; } Pcrel: [pc,Rm0003] is Rm0003 & thc0404=1 & pc { local tmp = Rm0003; tmp = inst_next + (tmp * 2); val:2 = *tmp; tmp = zext(val); export tmp; } :tbb^ItCond Pcrel is TMode=1 & ItCond & op4=0xe8d & thc0003=15; op8=0xf0 & thc0507=0 & thc0404=0 & Pcrel { build ItCond; SetThumbMode(1); pc = inst_next + (Pcrel * 2); goto [pc]; } :tbh^ItCond Pcrel is TMode=1 & ItCond & op4=0xe8d & thc0003=15; op8=0xf0 & thc0507=0 & thc0404=1 & Pcrel { build ItCond; SetThumbMode(1); pc = inst_next + (Pcrel * 2); goto [pc]; } @endif # VERSION_6T2 || VERSION_7 :tst^ItCond Rn0002,Rm0305 is TMode=1 & ItCond & op6=0x108 & Rm0305 & Rn0002 { build ItCond; local tmp = Rn0002 & Rm0305; ZR = (tmp == 0); NG = (tmp s< 0); } @if defined(VERSION_6T2) || defined(VERSION_7) :teq^ItCond Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=4 & thc0404=1 & Rn0003; thc1515=0 & thc0811=0xf) & ThumbExpandImm12 { build ItCond; build ThumbExpandImm12; local tmp = Rn0003 ^ ThumbExpandImm12; th_test_flags(tmp); } :teq^ItCond^".w" Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=4 & thc0404=1 & Rn0003; thc1515=0 & thc0811=0xf & thshift2 { build ItCond; build thshift2; local tmp = Rn0003 ^ thshift2; th_test_flags(tmp); } :tst^ItCond Rn0003,ThumbExpandImm12 is TMode=1 & ItCond & (op11=0x1e & thc0909=0 & sop0508=0 & thc0404=1 & Rn0003; thc1515=0 & thc0811=0xf) & ThumbExpandImm12 { build ItCond; build ThumbExpandImm12; local tmp = Rn0003 & ThumbExpandImm12; th_test_flags(tmp); } :tst^ItCond^".w" Rn0003,thshift2 is TMode=1 & ItCond & op11=0x1d & thc0910=1 & sop0508=0 & thc0404=1 & Rn0003; thc1515=0 & thc0811=0xf & thshift2 { build ItCond; build thshift2; local tmp = Rn0003 & thshift2; th_test_flags(tmp); } :uadd16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rn0003; op12=0xf & Rd0811 & thc0407=0x4 & Rm0003 { build ItCond; sum1:4 = zext(Rn0003[ 0,16]) + zext(Rm0003[ 0,16]); sum2:4 = zext(Rn0003[16,16]) + zext(Rm0003[16,16]); GE1 = carry(Rn0003[0,16],Rm0003[0,16]); GE2 = GE1; GE3 = carry(Rn0003[16,16],Rm0003[16,16]); GE4 = GE3; Rd0811[ 0,16] = sum1[0,16]; Rd0811[16,16] = sum2[0,16]; } :uadd8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x4 & Rm0003 { build ItCond; sum1:4 = zext(Rn0003[ 0,8]) + zext(Rm0003[ 0,8]); sum2:4 = zext(Rn0003[ 8,8]) + zext(Rm0003[ 8,8]); sum3:4 = zext(Rn0003[16,8]) + zext(Rm0003[16,8]); sum4:4 = zext(Rn0003[24,8]) + zext(Rm0003[24,8]); GE1 = carry(Rn0003[0,8],Rm0003[0,8]); GE2 = carry(Rn0003[8,8],Rm0003[8,8]); GE3 = carry(Rn0003[16,8],Rm0003[16,8]); GE4 = carry(Rn0003[24,8],Rm0003[24,8]); Rd0811[ 0,8] = sum1[0,8]; Rd0811[ 8,8] = sum2[0,8]; Rd0811[16,8] = sum3[0,8]; Rd0811[24,8] = sum4[0,8]; } :uasx^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x4 & Rm0003 { build ItCond; diff:4 = zext(Rn0003[ 0,16]) - zext(Rm0003[16,16]); sum:4 = zext(Rn0003[16,16]) + zext(Rm0003[ 0,16]); GE1 = diff s>= 0; GE2 = GE1; GE3 = carry(Rn0003[16,16],Rm0003[0,16]); GE4 = GE3; Rd0811[ 0,16] = diff[0,16]; Rd0811[16,16] = sum[0,16]; } :uhadd16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rn0003; op12=0xf & Rd0811 & thc0407=0x6 & Rm0003 { build ItCond; sum1:4 = zext(Rn0003[ 0,16]) + zext(Rm0003[ 0,16]); sum2:4 = zext(Rn0003[16,16]) + zext(Rm0003[16,16]); Rd0811[ 0,16] = sum1[1,16]; Rd0811[16,16] = sum2[1,16]; } :uhadd8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x6 & Rm0003 { build ItCond; sum1:4 = zext(Rn0003[ 0,8]) + zext(Rm0003[ 0,8]); sum2:4 = zext(Rn0003[ 8,8]) + zext(Rm0003[ 8,8]); sum3:4 = zext(Rn0003[16,8]) + zext(Rm0003[16,8]); sum4:4 = zext(Rn0003[24,8]) + zext(Rm0003[24,8]); Rd0811[ 0,8] = sum1[1,8]; Rd0811[ 8,8] = sum2[1,8]; Rd0811[16,8] = sum3[1,8]; Rd0811[24,8] = sum4[1,8]; } :uhasx^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x6 & Rm0003 { build ItCond; diff:4 = zext(Rn0003[ 0,16]) - zext(Rm0003[16,16]); sum:4 = zext(Rn0003[16,16]) + zext(Rm0003[ 0,16]); Rd0811[ 0,16] = diff[1,16]; Rd0811[16,16] = sum[1,16]; } :uhsax^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfae & Rn0003; op12=0xf & Rd0811 & thc0407=0x6 & Rm0003 { build ItCond; sum:4 = zext(Rn0003[ 0,16]) + zext(Rm0003[16,16]); diff:4 = zext(Rn0003[16,16]) - zext(Rm0003[ 0,16]); Rd0811[ 0,16] = sum[1,16]; Rd0811[16,16] = diff[1,16]; } :uhsub16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfad & Rn0003; op12=0xf & Rd0811 & thc0407=0x6 & Rm0003 { build ItCond; diff1:4 = zext(Rn0003[ 0,16]) - zext(Rm0003[ 0,16]); diff2:4 = zext(Rn0003[16,16]) - zext(Rm0003[16,16]); Rd0811[ 0,16] = diff1[1,16]; Rd0811[16,16] = diff2[1,16]; } :uhsub8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfac & Rn0003; op12=0xf & Rd0811 & thc0407=0x6 & Rm0003 { build ItCond; diff1:4 = zext(Rn0003[ 0,8]) - zext(Rm0003[ 0,8]); diff2:4 = zext(Rn0003[ 8,8]) - zext(Rm0003[ 8,8]); diff3:4 = zext(Rn0003[16,8]) - zext(Rm0003[16,8]); diff4:4 = zext(Rn0003[24,8]) - zext(Rm0003[24,8]); Rd0811[ 0,8] = diff1[1,8]; Rd0811[ 8,8] = diff2[1,8]; Rd0811[16,8] = diff3[1,8]; Rd0811[24,8] = diff4[1,8]; } :uqadd16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa9 & Rn0003; op12=0xf & Rd0811 & thc0407=0x5 & Rm0003 { build ItCond; sum1:4 = zext(Rn0003[ 0,16]) + zext(Rm0003[ 0,16]); sum2:4 = zext(Rn0003[16,16]) + zext(Rm0003[16,16]); tmp1:4 = UnsignedSaturate(sum1, 16:2); tmp2:4 = UnsignedSaturate(sum2, 16:2); Rd0811[ 0,16] = tmp1[0,16]; Rd0811[16,16] = tmp2[0,16]; } :uqadd8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa8 & Rn0003; op12=0xf & Rd0811 & thc0407=0x5 & Rm0003 { build ItCond; sum1:4 = zext(Rn0003[ 0,8]) + zext(Rm0003[ 0,8]); sum2:4 = zext(Rn0003[ 8,8]) + zext(Rm0003[ 8,8]); sum3:4 = zext(Rn0003[16,8]) + zext(Rm0003[16,8]); sum4:4 = zext(Rn0003[24,8]) + zext(Rm0003[24,8]); tmp1:4 = UnsignedSaturate(sum1, 8:2); tmp2:4 = UnsignedSaturate(sum2, 8:2); tmp3:4 = UnsignedSaturate(sum3, 8:2); tmp4:4 = UnsignedSaturate(sum4, 8:2); Rd0811[ 0,8] = tmp1[0,8]; Rd0811[ 8,8] = tmp2[0,8]; Rd0811[16,8] = tmp3[0,8]; Rd0811[24,8] = tmp4[0,8]; } :uqasx^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfaa & Rn0003; op12=0xf & Rd0811 & thc0407=0x5 & Rm0003 { build ItCond; diff:4 = zext(Rn0003[ 0,16]) - zext(Rm0003[16,16]); sum:4 = zext(Rn0003[16,16]) + zext(Rm0003[ 0,16]); tmpdiff:4 = UnsignedSaturate(diff, 16:2); tmpsum:4 = UnsignedSaturate(sum, 16:2); Rd0811[ 0,16] = tmpdiff[0,16]; Rd0811[16,16] = tmpsum[0,16]; } :uqsax^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfae & Rn0003; op12=0xf & Rd0811 & thc0407=0x5 & Rm0003 { build ItCond; sum:4 = zext(Rn0003[ 0,16]) + zext(Rm0003[16,16]); diff:4 = zext(Rn0003[16,16]) - zext(Rm0003[ 0,16]); tmpsum:4 = UnsignedSaturate(sum, 16:2); tmpdiff:4 = UnsignedSaturate(diff, 16:2); Rd0811[ 0,16] = tmpsum[0,16]; Rd0811[16,16] = tmpdiff[0,16]; } :uqsub16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfad & Rn0003; op12=0xf & Rd0811 & thc0407=0x5 & Rm0003 { build ItCond; diff1:4 = zext(Rn0003[ 0,16]) - zext(Rm0003[ 0,16]); diff2:4 = zext(Rn0003[16,16]) - zext(Rm0003[16,16]); tmp1:4 = UnsignedSaturate(diff1, 16:2); tmp2:4 = UnsignedSaturate(diff2, 16:2); Rd0811[ 0,16] = tmp1[0,16]; Rd0811[16,16] = tmp2[0,16]; } :uqsub8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfac & Rn0003; op12=0xf & Rd0811 & thc0407=0x5 & Rm0003 { build ItCond; diff1:4 = zext(Rn0003[ 0,8]) - zext(Rm0003[ 0,8]); diff2:4 = zext(Rn0003[ 8,8]) - zext(Rm0003[ 8,8]); diff3:4 = zext(Rn0003[16,8]) - zext(Rm0003[16,8]); diff4:4 = zext(Rn0003[24,8]) - zext(Rm0003[24,8]); tmp1:4 = UnsignedSaturate(diff1, 8:2); tmp2:4 = UnsignedSaturate(diff2, 8:2); tmp3:4 = UnsignedSaturate(diff3, 8:2); tmp4:4 = UnsignedSaturate(diff4, 8:2); Rd0811[ 0,8] = tmp1[0,8]; Rd0811[ 8,8] = tmp2[0,8]; Rd0811[16,8] = tmp3[0,8]; Rd0811[24,8] = tmp4[0,8]; } :usad8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfb7 & Rn0003; op12=0xf & Rd0811 & thc0407=0x0 & Rm0003 { build ItCond; diff1:4 = zext(Rn0003[ 0,8]) - zext(Rm0003[ 0,8]); diff2:4 = zext(Rn0003[ 8,8]) - zext(Rm0003[ 8,8]); diff3:4 = zext(Rn0003[16,8]) - zext(Rm0003[16,8]); diff4:4 = zext(Rn0003[24,8]) - zext(Rm0003[24,8]); absdiff1:4 = Absolute(diff1); absdiff2:4 = Absolute(diff2); absdiff3:4 = Absolute(diff3); absdiff4:4 = Absolute(diff4); Rd0811 = absdiff1 + absdiff2 + absdiff3 + absdiff4; } :usada8^ItCond Rd0811,Rn0003,Rm0003,Ra1215 is TMode=1 & ItCond & op4=0xfb7 & Rn0003; Ra1215 & Rd0811 & thc0407=0x0 & Rm0003 { build ItCond; diff1:4 = zext(Rn0003[ 0,8]) - zext(Rm0003[ 0,8]); diff2:4 = zext(Rn0003[ 8,8]) - zext(Rm0003[ 8,8]); diff3:4 = zext(Rn0003[16,8]) - zext(Rm0003[16,8]); diff4:4 = zext(Rn0003[24,8]) - zext(Rm0003[24,8]); absdiff1:4 = Absolute(diff1); absdiff2:4 = Absolute(diff2); absdiff3:4 = Absolute(diff3); absdiff4:4 = Absolute(diff4); # The manual specifies a zero extension of Ra to an unspecified # intermediate precision, followed by truncation to 4 bytes. In this # model, zext is retained, but it has no effect because the # intermediate precision is 4 bytes. Rd0811 = zext(Ra1215) + absdiff1 + absdiff2 + absdiff3 + absdiff4; } # usat and ussat16 were defined elsewhere and moved here to preserve sort order :usat Rt0811, thMsbImm, part2Rd0003^th2_shift0 is TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xe & part2c0505=0x0 & part2c0404=0x0 & part2Rd0003 ; thc1515=0x0 & Rt0811 & thc0505=0x0 & th2_shift0 & thMsbImm & thLsbImm { # Shift bit is 0 tmpRn:4 = part2Rd0003 << thLsbImm; tmp:4 = UnsignedSaturate(tmpRn, thMsbImm); Q = UnsignedDoesSaturate(tmpRn, thMsbImm); Rt0811 = tmp; } :usat Rt0811, thMsbImm, part2Rd0003^th2_shift1 is TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xe & part2c0505=0x1 & part2c0404=0x0 & part2Rd0003 ; thc1515=0x0 & Rt0811 & thc0505=0x0 & th2_shift1 & thMsbImm & thLsbImm { # Shift bit is 1 tmpRn:4 = part2Rd0003 s>> thLsbImm; tmp:4 = UnsignedSaturate(tmpRn, thMsbImm); Q = UnsignedDoesSaturate(tmpRn, thMsbImm); Rt0811 = tmp; } :usat16 Rt0811, Immed4, part2Rd0003 is TMode=1 & part2op=0x1e & part2S=0x0 & part2cond=0xe & part2c0505=0x1 & part2c0404=0x0 & part2Rd0003 ; op12=0x0 & Rt0811 & thc0407=0x0 & Immed4 { tmp:4 = UnsignedSaturate(part2Rd0003, Immed4); Q = UnsignedDoesSaturate(part2Rd0003, Immed4); Rt0811 = tmp; } :usax^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfae & Rn0003; op12=0xf & Rd0811 & thc0407=0x4 & Rm0003 { build ItCond; sum:4 = zext(Rn0003[ 0,16]) + zext(Rm0003[16,16]); diff:4 = zext(Rn0003[16,16]) - zext(Rm0003[ 0,16]); Rd0811[ 0,16] = sum[0,16]; Rd0811[16,16] = diff[0,16]; # this odd looking condition tests that the 16 bit sum overflowed, # which would have made it a negative number. That's how it's # documented, but to be consistent they might have used s< 0. GE1 = sum s>= 0x10000; GE2 = sum s>= 0x10000; GE3 = diff s>= 0; GE4 = diff s>= 0; } :usub16^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfad & Rn0003; op12=0xf & Rd0811 & thc0407=0x4 & Rm0003 { build ItCond; diff1:4 = zext(Rn0003[ 0,16]) - zext(Rm0003[ 0,16]); diff2:4 = zext(Rn0003[16,16]) - zext(Rm0003[16,16]); Rd0811[ 0,16] = diff1[0,16]; Rd0811[16,16] = diff2[0,16]; GE1 = diff1 s>= 0; GE2 = diff1 s>= 0; GE3 = diff2 s>= 0; GE4 = diff2 s>= 0; } :usub8^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfac & Rn0003; op12=0xf & Rd0811 & thc0407=0x4 & Rm0003 { build ItCond; diff1:4 = zext(Rn0003[ 0,8]) - zext(Rm0003[ 0,8]); diff2:4 = zext(Rn0003[ 8,8]) - zext(Rm0003[ 8,8]); diff3:4 = zext(Rn0003[16,8]) - zext(Rm0003[16,8]); diff4:4 = zext(Rn0003[24,8]) - zext(Rm0003[24,8]); Rd0811[ 0,8] = diff1[0,8]; Rd0811[ 8,8] = diff2[0,8]; Rd0811[16,8] = diff3[0,8]; Rd0811[24,8] = diff4[0,8]; GE1 = diff1 s>= 0; GE2 = diff2 s>= 0; GE3 = diff3 s>= 0; GE4 = diff4 s>= 0; } :ubfx^ItCond Rd0811,Rn0003,thLsbImm,thWidthMinus1 is TMode=1 & ItCond & op4=0xf3c & Rn0003; thc1515=0 & Rd0811 & thLsbImm & thc0505=0 & thWidthMinus1 { build ItCond; build thLsbImm; build thWidthMinus1; shift:4 = 31 - (thLsbImm + thWidthMinus1); # thMsbImm represents widthMinus1 Rd0811 = Rn0003 << shift; shift = 31 - thWidthMinus1; # msbImm represents widthMinus1 Rd0811 = Rd0811 >> shift; } :udiv^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfbb & Rn0003; op12=0xf & Rd0811 & thc0407=0xf & Rm0003 { build ItCond; result:8 = zext(Rn0003) / zext(Rm0003); Rd0811 = result(0); } :uxtab^ItCond Rd0811,Rn0003,Rm0003,ByteRotate is TMode=1 & ItCond & op4=0xfa5 & Rn0003; op12=15 & Rd0811 & thc0707=1 & ByteRotate & Rm0003 { build ItCond; tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); Rd0811 = Rn0003 + zext(tmp:1); } :uxtab16^ItCond Rd0811,Rn0003,Rm0003,ByteRotate is TMode=1 & ItCond & op4=0xfa3 & Rn0003; op12=15 & Rd0811 & thc0707=1 & ByteRotate & Rm0003 { build ItCond; rotated:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); local tmp_b = rotated:1; local tmpRn0003 = Rn0003; tmpl:2 = tmpRn0003:2 + zext(tmp_b); local tmph = (rotated >> 16); tmp_b = tmph:1; tmph = (tmpRn0003 >> 16) + zext(tmp_b); Rd0811 = (tmph << 16) | zext(tmpl); } :uxtah^ItCond Rd0811,Rn0003,Rm0003 is TMode=1 & ItCond & op4=0xfa1 & Rn0003; op12=15 & Rd0811 & thc0707=1 & throt=0 & Rm0003 { build ItCond; local tmpRm0003 = Rm0003; Rd0811 = Rn0003 + zext(tmpRm0003:2); } :uxtah^ItCond Rd0811,Rn0003,Rm0003,ByteRotate is TMode=1 & ItCond & op4=0xfa1 & Rn0003; op12=15 & Rd0811 & thc0707=1 & ByteRotate & Rm0003 { build ItCond; tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); Rd0811 = Rn0003 + zext(tmp:2); } @endif # VERSION_6T2 || VERSION_7 @if defined(VERSION_6) :uxtb^ItCond Rd0002, Rm0305 is TMode=1 & ItCond & op8=0xb2 & thc0707=1 & thc0606=1 & Rm0305 & Rd0002 { build ItCond; local tmpRm0305 = Rm0305; Rd0002 = zext(tmpRm0305:1); } @endif # VERSION_6 @if defined(VERSION_6T2) || defined(VERSION_7) :uxtb^ItCond^".w" Rd0811, Rm0003, ByteRotate is TMode=1 & ItCond & op0=0xfa5f; op12=0xf & Rd0811 & thc0707=1 & ByteRotate & Rm0003 { build ItCond; tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); Rd0811 = zext(tmp:1); } :uxtb^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op0=0xfa5f; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 { build ItCond; local tmpRm0003 = Rm0003; Rd0811 = zext(tmpRm0003:1); } :uxtb16^ItCond Rd0811, Rm0003, ByteRotate is TMode=1 & ItCond & op0=0xfa3f; op12=0xf & Rd0811 & thc0707=1 & ByteRotate & Rm0003 { build ItCond; tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); Rd0811 = tmp & 0x00ff00ff; } :uxtb16^ItCond Rd0811, Rm0003 is TMode=1 & ItCond & op0=0xfa3f; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 { build ItCond; Rd0811 = Rm0003 & 0x00ff00ff; } @endif # VERSION_6T2 || VERSION_7 @if defined(VERSION_6) :uxth^ItCond Rd0002, Rm0305 is TMode=1 & ItCond & op8=0xb2 & thc0707=1 & thc0606=0 & Rm0305 & Rd0002 { build ItCond; local tmpRm0305 = Rm0305; Rd0002 = zext(tmpRm0305:2); } @endif # VERSION_6 @if defined(VERSION_6T2) || defined(VERSION_7) :uxth^ItCond^".w" Rd0811, Rm0003, ByteRotate is TMode=1 & ItCond & op0=0xfa1f; op12=0xf & Rd0811 & thc0707=1 & ByteRotate & Rm0003 { build ItCond; tmp:4 = (Rm0003 >> ByteRotate) | Rm0003 << ( 32 - ByteRotate); Rd0811 = zext(tmp:2); } :uxth^ItCond^".w" Rd0811, Rm0003 is TMode=1 & ItCond & op0=0xfa1f; op12=0xf & Rd0811 & thc0707=1 & throt=0 & Rm0003 { build ItCond; local tmpRm0003 = Rm0003; Rd0811 = zext(tmpRm0003:2); } @endif # VERSION_6T2 || VERSION_7 # V* see ARMneon.sinc @if defined(VERSION_6) :wfe^ItCond is TMode=1 & ItCond & op0=0xbf20 { WaitForEvent(); } :wfi^ItCond is TMode=1 & ItCond & op0=0xbf30 { WaitForInterrupt(); } :yield^ItCond is TMode=1 & ItCond & op0=0xbf10 { HintYield(); } @endif # VERSION_6 @if defined(VERSION_6T2) || defined(VERSION_7) :wfe^ItCond^".w" is TMode=1 & ItCond & op0=0xf3af; op0=0x8002 { WaitForEvent(); } :wfi^ItCond^".w" is TMode=1 & ItCond & op0=0xf3af; op0=0x8003 { WaitForInterrupt(); } :yield^ItCond^".w" is TMode=1 & ItCond & op0=0xf3af; op0=0x8001 { HintYield(); } @endif #VERSION_6T2 || VERSION_7 } # End with : ARMcondCk=1 ================================================ FILE: pypcode/processors/ARM/data/languages/ARM_CDE.sinc ================================================ @if defined(CDE) # ARMv8-M Custom Datapath Extension acc: "a" is thc1212=1 { local tmp:1 = 1; export *[const]:1 tmp; } acc: "" is thc1212=0 { local tmp:1 = 0; export *[const]:1 tmp; } vacc: "a" is thv_c2828=1 { local tmp:1 = 1; export *[const]:1 tmp; } vacc: "" is thv_c2828=0 { local tmp:1 = 0; export *[const]:1 tmp; } cx1_imm: val is thc0005; thop2 & thopcode3 [val=(thc0005 << 7) | (thop2 << 6) | thopcode3;] {export *[const]:4 val; } cx2_imm: val is thc0405; thop2 & thopcode3 [val=(thc0405 << 7) | (thop2 << 6) | thopcode3;] {export *[const]:4 val; } cx3_imm: val is thop1; thop2 & thop3 [val=(thop1 << 3) | (thop2 << 2) | thop3;] {export *[const]:4 val; } vcx1_imm: val is thv_c2424 & thv_c1619 & thv_c0707 & thv_c0005 [val = (thv_c2424 << 11) |(thv_c1619 << 7) | (thv_c0707 << 6) | thv_c0005;] {export *[const]:4 val; } vcx2_imm: val is thv_c2424 & thv_c1619 & thv_c0707 & thv_c0404 [val = (thv_c2424 << 6 ) |(thv_c1619 << 2) | (thv_c0707 << 1) | thv_c0404;] {export *[const]:4 val; } vcx3_imm: val is thv_c2424 & thv_c2021 & thv_c0404 [val = (thv_c2424 << 3 ) |(thv_c2021 << 1) | thv_c0404;] {export *[const]:4 val; } fvcx1_imm: val is thv_c1619 & thv_c0707 & thv_c0005 [val = (thv_c1619 << 7) | (thv_c0707 << 6) | thv_c0005;] {export *[const]:4 val; } fvcx2_imm: val is thv_c1619 & thv_c0707 & thv_c0404 [val = (thv_c1619 << 2) | (thv_c0707 << 1) | thv_c0404;] {export *[const]:4 val; } fvcx3_imm: val is thv_c2021 & thv_c0404 [val = (thv_c2021 << 1) | thv_c0404;] {export *[const]:4 val; } cx_coRd: Ra1215 is Ra1215 { export Ra1215; } cx_coRd:"APSR_nzcv" is Ra1215=15 { tmp:4 = 0; readAPSR_nzcv(tmp); export tmp; } cx_coRn: Rn0003 is Rn0003 { export Rn0003; } cx_coRn:"APSR_nzcv" is Rn0003=15 { tmp:4 = 0; readAPSR_nzcv(tmp); export tmp; } cx_coRm: Ra1215 is Ra1215 { export Ra1215; } cx_coRm:"APSR_nzcv" is Ra1215=15 { tmp:4 = 0; readAPSR_nzcv(tmp); export tmp; } cx_coRd0: Rd0003 is Rd0003 { export Rd0003; } cx_coRd0:"APSR_nzcv" is Rd0003=15 { tmp:4 = 0; readAPSR_nzcv(tmp); export tmp; } # Pseudo-ops define pcodeop cx1; # Rd = cx1(Coprocessor #, operation, Rd, accumulator, size) define pcodeop cx2; # Rd = cx2(Coprocessor #, operation, Rd, Rn, accumulator, size) define pcodeop cx3; # Rd = cx3(Coprocessor #, operation, Rd, Rn, Rm, accumulator, size) define pcodeop vcx1; # Rd = vcx1(Coprocessor #, operation, Rd, accumulator, size, vectored) define pcodeop vcx2; # Rd = vcx2(Coprocessor #, operation, Rd, Rn, accumulator, size, vectored) define pcodeop vcx3; # Rd = vcx3(Coprocessor #, operation, Rd, Rn, Rm, accumulator, size, vectored) :cx1^acc^ItCond thcop, cx_coRd, cx1_imm is TMode=1 & ItCond & (op13=7 & acc & thc0811=0xe & thc0607=0; cx_coRd & thc1111=0 & thcop & thc0606=0) & cx1_imm { build ItCond; t_cpn:4 = thcop; t_op1:4 = cx1_imm; t_acc:1 = acc; cx_coRd = cx1(t_cpn, t_op1, cx_coRd, t_acc, 32:1); } :cx1^acc^ItCond thcop, cx_coRd, cx1_imm is TMode=1 & ItCond & (op13=7 & acc & thc0811=0xe & thc0607=0; (cx_coRd & Ra1215=15) & thc1111=0 & thcop & thc0606=0) & cx1_imm { build ItCond; t_cpn:4 = thcop; t_op1:4 = cx1_imm; t_acc:1 = acc; cx_coRd = cx1(t_cpn, t_op1, cx_coRd, t_acc, 32:1); writeAPSR_nzcv(cx_coRd); } :cx1d^acc^ItCond thcop, Ra1215, Rd1215hi, cx1_imm is TMode=1 & ItCond & (op13=7 & acc & thc0811=0xe & thc0607=0; Ra1215 & Rd1215hi & thc1111=0 & thcop & thc0606=1) & cx1_imm { build ItCond; t_cpn:4 = thcop; t_op1:4 = cx1_imm; t_acc:1 = acc; result:8 = cx1(t_cpn, t_op1, Ra1215, Rd1215hi, t_acc, 64:1); Ra1215 = result(0); Rd1215hi = result(4); } :cx2^acc^ItCond thcop, cx_coRd, cx_coRn, cx2_imm is TMode=1 & ItCond & (op13=7 & acc & thc0811=0xe & thc0607=1 & cx_coRn; cx_coRd & thc1111=0 & thcop & thc0606=0) & cx2_imm { build ItCond; t_cpn:4 = thcop; t_op1:4 = cx2_imm; t_acc:1 = acc; cx_coRd = cx2(t_cpn, t_op1, cx_coRd, cx_coRn, t_acc, 32:1); } :cx2^acc^ItCond thcop, cx_coRd, cx_coRn, cx2_imm is TMode=1 & ItCond & (op13=7 & acc & thc0811=0xe & thc0607=1 & cx_coRn; (cx_coRd & Ra1215=15) & thc1111=0 & thcop & thc0606=0) & cx2_imm { build ItCond; t_cpn:4 = thcop; t_op1:4 = cx2_imm; t_acc:1 = acc; cx_coRd = cx2(t_cpn, t_op1, cx_coRd, cx_coRn, t_acc, 32:1); writeAPSR_nzcv(cx_coRd); } :cx2d^acc^ItCond thcop, Ra1215, Rd1215hi, cx_coRn, cx2_imm is TMode=1 & ItCond & (op13=7 & acc & thc0811=0xe & thc0607=1 & cx_coRn; Ra1215 & Rd1215hi & thc1111=0 & thcop & thc0606=1) & cx2_imm { build ItCond; t_cpn:4 = thcop; t_op1:4 = cx2_imm; t_acc:1 = acc; result:8 = cx2(t_cpn, t_op1, Ra1215, Rd1215hi, cx_coRn, t_acc, 64:1); Ra1215 = result(0); Rd1215hi = result(4); } :cx3^acc^ItCond thcop, cx_coRd0, cx_coRn, cx_coRm, cx3_imm is TMode=1 & ItCond & (op13=7 & acc & thc0811=0xe & thc0707=1 & cx_coRn; cx_coRm & thc1111=0 & thcop & thc0606=0 & cx_coRd0) & cx3_imm { build ItCond; t_cpn:4 = thcop; t_op1:4 = cx3_imm; t_acc:1 = acc; cx_coRd0 = cx3(t_cpn, t_op1, cx_coRd0, cx_coRn, cx_coRm, t_acc, 32:1); } :cx3^acc^ItCond thcop, cx_coRd0, cx_coRn, cx_coRm, cx3_imm is TMode=1 & ItCond & (op13=7 & acc & thc0811=0xe & thc0707=1 & cx_coRn; cx_coRm & thc1111=0 & thcop & thc0606=0 & (cx_coRd0 & Rd0003=15)) & cx3_imm { build ItCond; t_cpn:4 = thcop; t_op1:4 = cx3_imm; t_acc:1 = acc; cx_coRd0 = cx3(t_cpn, t_op1, cx_coRd0, cx_coRn, cx_coRm, t_acc, 32:1); writeAPSR_nzcv(cx_coRd0); } :cx3d^acc^ItCond thcop, Rd0003, Rd0003hi, cx_coRn, cx_coRm, cx3_imm is TMode=1 & ItCond & (op13=7 & acc & thc0811=0xe & thc0707=1 & cx_coRn; cx_coRm & thc1111=0 & thcop & thc0606=1 & Rd0003 & Rd0003hi) & cx3_imm { build ItCond; t_cpn:4 = thcop; t_op1:4 = cx3_imm; t_acc:1 = acc; result:8 = cx3(t_cpn, t_op1, Rd0003, Rd0003hi, cx_coRn, cx_coRm, t_acc, 64:1); Rd0003 = result(0); Rd0003hi = result(4); } # Vector CDE instructions - Requires Armv8.1-M MVE :vcx1^vacc^ItCond thv_cpn, Qd, vcx1_imm is TMode=1 & ItCond & thv_c2931=7 & vacc & thv_c2527=6 & thv_c2323=0 & thv_c2021=2 & thv_c1111=0 & thv_cpn & thv_c0606=1 & Qd & vcx1_imm { build ItCond; t_cpn:4 = thv_cpn; t_op1:4 = vcx1_imm; t_acc:1 = vacc; t_vec:1 = 1; Qd = vcx1(t_cpn, t_op1, Qd, t_acc, 32:1, t_vec); } :vcx2^vacc^ItCond thv_cpn, Qd, Qm, vcx2_imm is TMode=1 & ItCond & thv_c2931=7 & vacc & thv_c2527=6 & thv_c2323=0 & thv_c2021=3 & thv_c1111=0 & thv_cpn & thv_c0606=1 & Qm & Qd & vcx2_imm { build ItCond; t_cpn:4 = thv_cpn; t_op1:4 = vcx2_imm; t_acc:1 = vacc; t_vec:1 = 1; Qd = vcx2(t_cpn, t_op1, Qd, Qm, t_acc, 32:1, t_vec); } :vcx3^vacc^ItCond thv_cpn, Qd, Qn, Qm, vcx3_imm is TMode=1 & ItCond & thv_c2931=7 & vacc & thv_c2527=6 & thv_c2323=1 & thv_c1111=0 & thv_cpn & thv_c0606=1 & Qm & Qn & Qd & vcx3_imm { build ItCond; t_cpn:4 = thv_cpn; t_op1:4 = vcx3_imm; t_acc:1 = vacc; t_vec:1 = 1; Qd = vcx3(t_cpn, t_op1, Qd, Qn, Qm, t_acc, 32:1, t_vec); } # Floating-point CDE instructions - Requires Armv8.1-M MVE :vcx1^vacc^ItCond thv_cpn, Sd, fvcx1_imm is TMode=1 & ItCond & thv_c2931=7 & vacc & thv_c2527=6 & thv_c2424=0 & thv_c2323=0 & thv_c2021=2 & thv_c1111=0 & thv_cpn & thv_c0606=0 & Sd & fvcx1_imm { build ItCond; t_cpn:4 = thv_cpn; t_op1:4 = fvcx1_imm; t_acc:1 = vacc; t_vec:1 = 0; Sd = vcx1(t_cpn, t_op1, Sd, t_acc, 32:1, t_vec); } :vcx1^vacc^ItCond thv_cpn, Dd, fvcx1_imm is TMode=1 & ItCond & thv_c2931=7 & vacc & thv_c2527=6 & thv_c2424=1 & thv_c2323=0 & thv_c2021=2 & thv_c1111=0 & thv_cpn & thv_c0606=0 & Dd & fvcx1_imm { build ItCond; t_cpn:4 = thv_cpn; t_op1:4 = fvcx1_imm; t_acc:1 = vacc; t_vec:1 = 0; Dd = vcx1(t_cpn, t_op1, Dd, t_acc, 64:1, t_vec); } :vcx2^vacc^ItCond thv_cpn, Sd, Sm, fvcx2_imm is TMode=1 & ItCond & thv_c2931=7 & vacc & thv_c2527=6 & thv_c2424=0 & thv_c2323=0 & thv_c2021=3 & thv_c1111=0 & thv_cpn & thv_c0606=0 & Sm & Sd & fvcx2_imm { build ItCond; t_cpn:4 = thv_cpn; t_op1:4 = fvcx2_imm; t_acc:1 = vacc; t_vec:1 = 0; Sd = vcx2(t_cpn, t_op1, Sd, Sm, t_acc, 32:1, t_vec); } :vcx2^vacc^ItCond thv_cpn, Dd, Dm, fvcx2_imm is TMode=1 & ItCond & thv_c2931=7 & vacc & thv_c2527=6 & thv_c2424=1 & thv_c2323=0 & thv_c2021=3 & thv_c1111=0 & thv_cpn & thv_c0606=0 & Dm & Dd & fvcx2_imm { build ItCond; t_cpn:4 = thv_cpn; t_op1:4 = fvcx2_imm; t_acc:1 = vacc; t_vec:1 = 0; Dd = vcx2(t_cpn, t_op1, Dd, Dm, t_acc, 64:1, t_vec); } :vcx3^vacc^ItCond thv_cpn, Sd, Sn, Sm, fvcx3_imm is TMode=1 & ItCond & thv_c2931=7 & vacc & thv_c2527=6 & thv_c2424=0 & thv_c2323=1 & thv_c1111=0 & thv_cpn & thv_c0606=0 & Sm & Sn & Sd & fvcx3_imm { build ItCond; t_cpn:4 = thv_cpn; t_op1:4 = fvcx3_imm; t_acc:1 = vacc; t_vec:1 = 0; Sd = vcx3(t_cpn, t_op1, Sd, Sn, Sm, t_acc, 32:1, t_vec); } :vcx3^vacc^ItCond thv_cpn, Dd, Dn, Dm, fvcx3_imm is TMode=1 & ItCond & thv_c2931=7 & vacc & thv_c2527=6 & thv_c2424=1 & thv_c2323=1 & thv_c1111=0 & thv_cpn & thv_c0606=0 & Dm & Dn & Dd & fvcx3_imm { build ItCond; t_cpn:4 = thv_cpn; t_op1:4 = fvcx3_imm; t_acc:1 = vacc; t_vec:1 = 0; Dd = vcx3(t_cpn, t_op1, Dd, Dn, Dm, t_acc, 64:1, t_vec); } @endif # CDE ================================================ FILE: pypcode/processors/ARM/data/languages/ARM_apcs.cspec ================================================ ; offset = *:1 (lr + r3); r3 = zext(offset); if (inbounds) goto ; offset = *:1 (lr + r12); r3 = zext(offset); r3 = r3 * 2; r12 = lr + r3; ISAModeSwitch = (r12 & 1) != 1; TB = ISAModeSwitch; pc = r12 & 0xfffffffe; goto [pc]; ]]> ================================================ FILE: pypcode/processors/ARM/data/languages/ARM_v45.cspec ================================================ ; offset = *:1 (lr + r3); r3 = zext(offset); if (inbounds) goto ; offset = *:1 (lr + r12); r3 = zext(offset); r3 = r3 * 2; r12 = lr + r3; ISAModeSwitch = (r12 & 1) != 1; TB = ISAModeSwitch; pc = r12 & 0xfffffffe; goto [pc]; ]]> ================================================ FILE: pypcode/processors/ARM/data/languages/ARM_v45.pspec ================================================ ================================================ FILE: pypcode/processors/ARM/data/languages/ARM_win.cspec ================================================ ================================================ FILE: pypcode/processors/ARM/data/languages/ARMinstructions.sinc ================================================ # Specification for the ARM Version 4, 4T, 5, 5T, 5E # The following boolean defines control specific support: T_VARIANT, VERSION_5, VERSION_5E # # WARNING NOTE: Be very careful taking a subpiece or truncating a register with :# or (#) # The LEBE hybrid language causes endian issues if you do not assign the register to a temp # variable and then take a subpiece or truncate. # @if defined(SIMD) || defined(VFPv2) || defined(VFPv3) @define INCLUDE_NEON "" # Neon instructions included with SIMD, VFPv2 or VFPv3 @endif @if defined(T_VARIANT) @define AMODE "TMode=0" # T_VARIANT must restrict ARM instruction decoding and require TMode=0 @else @define AMODE "epsilon" # THUMB instructions not supported - ARM only @endif @if defined(T_VARIANT) @define VERSION_5_or_T "" @endif @if defined(VERSION_5) @define VERSION_5_or_T "" @endif define token prefix (32) pref=(0,31) ; define token instrArm (32) cond=(28,31) I25=(25,25) P24=(24,24) H24=(24,24) L24=(24,24) U23=(23,23) B22=(22,22) N22=(22,22) S22=(22,22) op=(21,24) W21=(21,21) S20=(20,20) L20=(20,20) Rn=(16,19) RnLo=(0,3) msb=(16,20) satimm5=(16,20) satimm4=(16,19) mask=(16,19) Rd=(12,15) Rd2=(12,15) CRd=(12,15) CRn=(16,19) CRm=(0,3) RdHi=(16,19) RdLo=(12,15) smRd=(16,19) smRa=(12,15) smRm=(8,11) smRn=(0,3) immed12=(8,19) Rs=(8,11) rotate=(8,11) immedH=(8,11) cpn=(8,11) opc1=(21,23) opcode1=(20,23) opc2=(5,7) opcode2=(5,7) opcode3=(4,7) lsb=(7,11) sftimm=(7,11) sh=(6,6) shft=(5,6) immed24=(0,23) addr24=(0,23) signed offset_12=(0,11) immed=(0,7) srsMode=(0,4) immedL=(0,3) immed4=(0,3) dbOption=(0,3) ibOption=(0,3) Rm=(0,3) RmHi=(8,11) Rm2=(0,3) x=(5,5) r=(5,5) y=(6,6) # Advanced SIMD and VFP instruction fields D22=(22,22) N7=(7,7) L7=(7,7) Q6=(6,6) M5=(5,5) Qn0=(16,19) Qd0=(12,15) Qm0=(0,3) Qn1=(16,19) Qd1=(12,15) Qm1=(0,3) Dn0=(16,19) Dd0=(12,15) Dd_1=(12,15) Dd_2=(12,15) Dd_3=(12,15) Dd_4=(12,15) Dd_5=(12,15) Dd_6=(12,15) Dd_7=(12,15) Dd_8=(12,15) Dd_9=(12,15) Dd_10=(12,15) Dd_11=(12,15) Dd_12=(12,15) Dd_13=(12,15) Dd_14=(12,15) Dd_15=(12,15) Dd_16=(12,15) Dm0=(0,3) Dn1=(16,19) Dd1=(12,15) Dm1=(0,3) Dm_3=(0,2) Dm_4=(0,3) Sn0=(16,19) Sd0=(12,15) Sm0=(0,3) Sm0next=(0,3) Sn1=(16,19) Sd1=(12,15) Sm1=(0,3) Sm1next=(0,3) Sm0_3=(0,2) Sm1_3=(0,2) cmode=(8,11) # Arbitrary bit fields bit31=(31,31) bit30=(30,30) bit29=(29,29) bit28=(28,28) c2831=(28,31) c2627=(26,27) c2531=(25,31) c2527=(25,27) c2525=(25,25) c2427=(24,27) c2424=(24,24) c2331=(23,31) c2327=(23,27) c2324=(23,24) c2323=(23,23) c2222=(22,22) c2131=(21,31) c2127=(21,27) c2124=(21,24) c2123=(21,23) c2122=(21,22) c2121=(21,21) c2027=(20,27) c2024=(20,24) c2022=(20,22) c2021=(20,21) c2020=(20,20) c1921=(19,21) c1919=(19,19) c1821=(18,21) c1819=(18,19) c1818=(18,18) c1721=(17,21) c1719=(17,19) c1718=(17,18) c1717=(17,17) c1631=(16,31) c1627=(16,27) c1621=(16,21) c1620=(16,20) c1619=(16,19) c1618=(16,18) c1617=(16,17) c1616=(16,16) c1515=(15,15) c1415=(14,15) c1414=(14,14) c1315=(13,15) c1313=(13,13) c1215=(12,15) c1212=(12,12) c1115=(11,15) c1111=(11,11) c1015=(10,15) c1011=(10,11) c1010=(10,10) c0916=(9,16) c0915=(9,15) c0911=(9,11) c0909=(9,9) c0815=(8,15) c0811=(8,11) c0809=(8,9) c0808=(8,8) c0715=(7,15) c0711=(7,11) c0709=(7,9) c0708=(7,8) c0707=(7,7) c0615=(6,15) c0611=(6,11) c0607=(6,7) c0606=(6,6) c0515=(5,15) c0508=(5,8) c0507=(5,7) c0506=(5,6) c0505=(5,5) c0431=(4,31) c0427=(4,27) c0415=(4,15) c0411=(4,11) c0409=(4,9) c0408=(4,8) c0407=(4,7) c0406=(4,6) c0405=(4,5) c0404=(4,4) c0315=(3,15) c0303=(3,3) c0215=(2,15) c0202=(2,2) c0115=(1,15) c0101=(1,1) c0031=(0,31) c0027=(0,27) c0014=(0,14) c0013=(0,13) c0012=(0,12) c0011=(0,11) c0010=(0,10) c0009=(0,9) c0008=(0,8) c0007=(0,7) c0006=(0,6) c0005=(0,5) c0004=(0,4) c0003=(0,3) c0002=(0,2) c0001=(0,1) c0000=(0,0) # # 32-bit Thumb fields which correspond closely with ARM fields for # certain coprocessor instructions # @if ENDIAN == "little" # Advanced SIMD and VFP instruction fields for 32-bit Little Endian Thumb thv_D22=(6,6) thv_N7=(23,23) thv_L7=(23,23) thv_Q6=(22,22) thv_M5=(21,21) thv_Qn0=(0,3) thv_Qd0=(28,31) thv_Qm0=(16,19) thv_Qn1=(0,3) thv_Qd1=(28,31) thv_Qm1=(16,19) thv_Dn0=(0,3) thv_Dd0=(28,31) thv_Dd_1=(28,31) thv_Dd_2=(28,31) thv_Dd_3=(28,31) thv_Dd_4=(28,31) thv_Dd_5=(28,31) thv_Dd_6=(28,31) thv_Dd_7=(28,31) thv_Dd_8=(28,31) thv_Dd_9=(28,31) thv_Dd_10=(28,31) thv_Dd_11=(28,31) thv_Dd_12=(28,31) thv_Dd_13=(28,31) thv_Dd_14=(28,31) thv_Dd_15=(28,31) thv_Dd_16=(28,31) thv_Dm0=(16,19) thv_Dn1=(0,3) thv_Dd1=(28,31) thv_Dm1=(16,19) thv_Dm_3=(16,18) thv_Dm_4=(16,19) thv_Sn0=(0,3) thv_Sd0=(28,31) thv_Sm0=(16,19) thv_Sm0next=(16,19) thv_Sn1=(0,3) thv_Sd1=(28,31) thv_Sm1=(16,19) thv_Sm1next=(16,19) thv_cmode=(24,27) thv_Sm0_3=(16,18) thv_Sm1_3=(16,18) thv_Rd=(28,31) thv_Rt=(28,31) thv_Rn=(0,3) thv_Rm=(16,19) thv_Rt2=(24,27) thv_immed=(16,23) thv_cpn=(8,10) # Arbitrary bit fields for 32-bit Little Endian Thumb thv_bit31=(15,15) thv_bit30=(14,14) thv_bit29=(13,13) thv_bit28=(12,12) thv_bit23=(7,7) thv_bit21=(5,5) thv_bit20=(4,4) thv_bit07=(23,23) thv_bit06=(22,22) thv_bit00=(16,16) thv_c2931=(13,15) thv_c2831=(12,15) thv_c2828=(12,12) thv_c2627=(10,11) thv_c2527=(9,11) thv_c2525=(9,9) thv_c2431=(8,15) thv_c2427=(8,11) thv_c2424=(8,8) thv_c2331=(7,15) thv_c2327=(7,11) thv_c2324=(7,8) thv_c2323=(7,7) thv_c2223=(6,7) thv_c2222=(6,6) thv_c2131=(5,15) thv_c2127=(5,11) thv_c2124=(5,8) thv_c2123=(5,7) thv_c2122=(5,6) thv_c2121=(5,5) thv_c2031=(4,15) thv_c2027=(4,11) thv_c2024=(4,8) thv_c2022=(4,6) thv_c2021=(4,5) thv_c2020=(4,4) thv_c1921=(3,5) thv_c1919=(3,3) thv_c1821=(2,5) thv_c1819=(2,3) thv_c1818=(2,2) thv_c1721=(1,5) thv_c1719=(1,3) thv_c1718=(1,2) thv_c1717=(1,1) thv_c1631=(0,15) thv_c1627=(0,11) thv_c1621=(0,5) thv_c1620=(0,4) thv_c1619=(0,3) thv_c1618=(0,2) thv_c1617=(0,1) thv_c1616=(0,0) thv_c1515=(31,31) thv_c1415=(30,31) thv_c1414=(30,30) thv_c1313=(29,29) thv_c1215=(28,31) thv_c1212=(28,28) thv_c1111=(27,27) thv_c1011=(26,27) thv_c1010=(26,26) thv_c0911=(25,27) thv_c0909=(25,25) thv_c0811=(24,27) thv_c0809=(24,25) thv_c0808=(24,24) thv_c0711=(23,27) thv_c0709=(23,25) thv_c0708=(23,24) thv_c0707=(23,23) thv_c0611=(22,27) thv_c0607=(22,23) thv_c0606=(22,22) thv_c0508=(21,24) thv_c0507=(21,23) thv_c0506=(21,22) thv_c0505=(21,21) thv_c0431=(4,31) thv_c0427=(4,27) thv_c0411=(20,27) thv_c0409=(20,25) thv_c0407=(20,23) thv_c0406=(20,22) thv_c0405=(20,21) thv_c0404=(20,20) thv_c0303=(19,19) thv_c0215=(18,31) thv_c0202=(18,18) thv_c0101=(17,17) thv_c0104=(17,20) thv_c0031=(0,31) thv_c0027=(0,27) thv_c0015=(16,31) thv_c0011=(16,27) thv_c0010=(16,26) thv_c0008=(16,24) thv_c0007=(16,23) thv_c0006=(16,22) thv_c0005=(16,21) thv_c0004=(16,20) thv_c0003=(16,19) thv_c0001=(16,17) thv_c0000=(16,16) thv_option=(16,19) @else # ENDIAN == "big" # Advanced SIMD and VFP instruction fields for 32-bit Big Endian Thumb thv_D22=(22,22) thv_N7=(7,7) thv_L7=(7,7) thv_Q6=(6,6) thv_M5=(5,5) thv_Qn0=(16,19) thv_Qd0=(12,15) thv_Qm0=(0,3) thv_Qn1=(16,19) thv_Qd1=(12,15) thv_Qm1=(0,3) thv_Dn0=(16,19) thv_Dd0=(12,15) thv_Dd_1=(12,15) thv_Dd_2=(12,15) thv_Dd_3=(12,15) thv_Dd_4=(12,15) thv_Dd_5=(12,15) thv_Dd_6=(12,15) thv_Dd_7=(12,15) thv_Dd_8=(12,15) thv_Dd_9=(12,15) thv_Dd_10=(12,15) thv_Dd_11=(12,15) thv_Dd_12=(12,15) thv_Dd_13=(12,15) thv_Dd_14=(12,15) thv_Dd_15=(12,15) thv_Dd_16=(12,15) thv_Dm0=(0,3) thv_Dn1=(16,19) thv_Dd1=(12,15) thv_Dm1=(0,3) thv_Dm_3=(0,2) thv_Dm_4=(0,3) thv_Sn0=(16,19) thv_Sd0=(12,15) thv_Sm0=(0,3) thv_Sm0next=(0,3) thv_Sn1=(16,19) thv_Sd1=(12,15) thv_Sm1=(0,3) thv_Sm1next=(0,3) thv_Sm0_3=(0,2) thv_Sm1_3=(0,2) thv_cmode=(8,11) thv_Rd=(12,15) thv_Rt=(12,15) thv_Rn=(16,19) thv_Rm=(0,3) thv_Rt2=(8,11) thv_immed=(0,7) thv_cpn=(24,26) # Arbitrary bit fields for 32-bit Big Endian Thumb thv_bit31=(31,31) thv_bit30=(30,30) thv_bit29=(29,29) thv_bit28=(28,28) thv_bit23=(23,23) thv_bit21=(21,21) thv_bit20=(20,20) thv_bit07=(7,7) thv_bit06=(6,6) thv_bit00=(0,0) thv_c2931=(29,31) thv_c2831=(28,31) thv_c2828=(28,28) thv_c2627=(26,27) thv_c2527=(25,27) thv_c2525=(25,25) thv_c2431=(24,31) thv_c2427=(24,27) thv_c2424=(24,24) thv_c2331=(23,31) thv_c2327=(23,27) thv_c2324=(23,24) thv_c2323=(23,23) thv_c2223=(22,23) thv_c2222=(22,22) thv_c2131=(21,31) thv_c2127=(21,27) thv_c2124=(21,24) thv_c2123=(21,23) thv_c2122=(21,22) thv_c2121=(21,21) thv_c2031=(20,31) thv_c2027=(20,27) thv_c2024=(20,24) thv_c2022=(20,22) thv_c2021=(20,21) thv_c2020=(20,20) thv_c1921=(19,21) thv_c1919=(19,19) thv_c1821=(18,21) thv_c1819=(18,19) thv_c1818=(18,18) thv_c1721=(17,21) thv_c1719=(17,19) thv_c1718=(17,18) thv_c1717=(17,17) thv_c1631=(16,31) thv_c1627=(16,27) thv_c1621=(16,21) thv_c1620=(16,20) thv_c1619=(16,19) thv_c1618=(16,18) thv_c1617=(16,17) thv_c1616=(16,16) thv_c1515=(15,15) thv_c1415=(14,15) thv_c1414=(14,14) thv_c1313=(13,13) thv_c1215=(12,15) thv_c1212=(12,12) thv_c1111=(11,11) thv_c1011=(10,11) thv_c1010=(10,10) thv_c0911=(9,11) thv_c0909=(9,9) thv_c0811=(8,11) thv_c0809=(8,9) thv_c0808=(8,8) thv_c0711=(7,11) thv_c0709=(7,9) thv_c0708=(7,8) thv_c0707=(7,7) thv_c0611=(6,11) thv_c0607=(6,7) thv_c0606=(6,6) thv_c0508=(5,8) thv_c0507=(5,7) thv_c0506=(5,6) thv_c0505=(5,5) thv_c0431=(4,31) thv_c0427=(4,27) thv_c0411=(4,11) thv_c0409=(4,9) thv_c0407=(4,7) thv_c0406=(4,6) thv_c0405=(4,5) thv_c0404=(4,4) thv_c0303=(3,3) thv_c0215=(2,15) thv_c0202=(2,2) thv_c0101=(1,1) thv_c0104=(1,4) thv_c0031=(0,31) thv_c0027=(0,27) thv_c0015=(0,15) thv_c0011=(0,11) thv_c0010=(0,10) thv_c0008=(0,8) thv_c0007=(0,7) thv_c0006=(0,6) thv_c0005=(0,5) thv_c0004=(0,4) thv_c0003=(0,3) thv_c0001=(0,1) thv_c0000=(0,0) thv_option=(0,3) @endif # ENDIAN = "big" ; attach variables [ Rn Rd Rs Rm RdHi RdLo smRd smRn smRm smRa RmHi RnLo ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc ]; attach variables [ Rd2 Rm2 ] [ r1 _ r3 _ r5 _ r7 _ r9 _ r11 _ sp _ _ _ ]; # see LDREXD attach variables [ CRd CRn CRm ] [ cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7 cr8 cr9 cr10 cr11 cr12 cr13 cr14 cr15 ]; attach variables [ thv_Rd thv_Rn thv_Rt thv_Rt2 ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc ]; attach names [ cpn ] [ p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15 ]; attach names [ thv_cpn ] [ p0 p1 p2 p3 p4 p5 p6 p7 ]; attach names [ ibOption ] [ opt0 opt1 opt2 opt3 opt4 opt5 opt6 opt7 opt8 opt9 opt10 opt11 opt12 opt13 opt14 SY ]; attach names [ dbOption ] [ opt0 opt1 OSHST OSH opt4 opt5 NSHST NSH opt8 opt9 ISHST ISH opt12 opt13 ST SY ]; macro addflags(op1,op2) { tmpCY = carry(op1,op2); tmpOV = scarry(op1,op2); } # NOTE: unlike x86, carry flag is SET if there is NO borrow macro subflags(op1,op2) { tmpCY = op2 <= op1; tmpOV = sborrow(op1,op2); } macro logicflags() { tmpCY = shift_carry; tmpOV = OV; } macro CVunaffected() { tmpCY = CY; tmpOV = OV; } macro resultflags(result) { tmpNG = result s< 0; tmpZR = result == 0; } rn: pc is pc & c1619=15 { tmp:4 = inst_start+8; export tmp; } rn: Rn is Rn { export Rn; } rm: pc is pc & Rm=15 { tmp:4 = inst_start+8; export tmp; } rm: Rm is Rm { export Rm; } rs: pc is pc & Rs=15 { tmp:4 = inst_start+8; export tmp; } rs: Rs is Rs { export Rs; } cc: "eq" is cond=0 { export ZR; } cc: "ne" is cond=1 { tmp:1 = !ZR; export tmp; } cc: "cs" is cond=2 { export CY; } cc: "cc" is cond=3 { tmp:1 = !CY; export tmp; } cc: "mi" is cond=4 { export NG; } cc: "pl" is cond=5 { tmp:1 = !NG; export tmp; } cc: "vs" is cond=6 { export OV; } cc: "vc" is cond=7 { tmp:1 = !OV; export tmp; } cc: "hi" is cond=8 { tmp:1 = CY && (!ZR); export tmp; } cc: "ls" is cond=9 { tmp:1 = (!CY) || ZR; export tmp; } cc: "ge" is cond=10 { tmp:1 = (NG==OV); export tmp; } cc: "lt" is cond=11 { tmp:1 = (NG!=OV); export tmp; } cc: "gt" is cond=12 { tmp:1 = (!ZR) && (NG==OV); export tmp; } cc: "le" is cond=13 { tmp:1 = ZR || (NG!=OV); export tmp; } COND: cc is $(AMODE) & cc { if (!cc) goto inst_next; } # Execute conditionally COND: is $(AMODE) & cond=14 { } # Always execute #COND: is $(AMODE) & cond=15 { } # Always execute - deprecated, should not be used. @if defined(INCLUDE_NEON) # Unconditional Neon Thumb instructions share many Conditional Neon ARM constructors COND: ItCond is TMode=1 & thv_c2831=14 & cond & ItCond { } # ItCond execute #COND: ItCond is TMode=1 & thv_c2831=15 & cond & ItCond { } # ItCond execute @endif SBIT_CZNO: is S20=0 { } # Do nothing to the flag bits SBIT_CZNO: "s" is S20=1 { CY = tmpCY; ZR = tmpZR; NG = tmpNG; OV = tmpOV; } SBIT_ZN: is S20=0 { } # Do nothing to the flag bits SBIT_ZN: "s" is S20=1 { ZR = tmpZR; NG = tmpNG; } Addr24: reloc is addr24 [ reloc = (inst_next+4) + (4*addr24); ] { export *[ram]:4 reloc; } # see blx(1) instruction @if defined(T_VARIANT) && defined(VERSION_5) HAddr24: reloc is addr24 & H24 [ reloc = ((inst_next+4) + (4*addr24) + (2*H24)) & 0xFFFFFFFF; TMode=1; globalset(reloc,TMode); ] { export *[ram]:4 reloc; } @endif # T_VARIANT && VERSION_5 @if defined(VERSION_5E) XBIT: "b" is x=0 & smRn { local tmpRn = smRn; tmp:2 = tmpRn:2; export tmp; } XBIT: "t" is x=1 & smRn { local tmpRn = smRn; tmp:2 = tmpRn(2); export tmp; } YBIT: "b" is y=0 & smRm { local tmpRm = smRm; tmp:2 = tmpRm:2; export tmp; } YBIT: "t" is y=1 & smRm { local tmpRm = smRm; tmp:2 = tmpRm(2); export tmp; } @endif # VERSION_5E ##################### ###### shift1 ###### ##################### shift1: "#"^value is I25=1 & immed & rotate [ value=((immed<<(32-rotate*2))|(immed>>(rotate*2))) $and 0xffffffff; ] { local tmp:4 = (value >> 31); shift_carry = ((rotate == 0:1) && CY) || ((rotate != 0:1) && tmp(0)); export *[const]:4 value; } #################### define pcodeop coproc_moveto_Main_ID; define pcodeop coproc_moveto_Cache_Type; define pcodeop coproc_moveto_TCM_Status; define pcodeop coproc_moveto_TLB_Type; define pcodeop coproc_moveto_Control; define pcodeop coproc_moveto_Auxiliary_Control; define pcodeop coproc_moveto_Coprocessor_Access_Control; define pcodeop coproc_moveto_Secure_Configuration; define pcodeop coproc_moveto_Secure_Debug_Enable; define pcodeop coproc_moveto_NonSecure_Access_Control; define pcodeop coproc_moveto_Translation_table_base_0; define pcodeop coproc_moveto_Translation_table_base_1; define pcodeop coproc_moveto_Translation_table_control; define pcodeop coproc_moveto_Domain_Access_Control; define pcodeop coproc_moveto_Data_Fault_Status; define pcodeop coproc_moveto_Instruction_Fault_Status; define pcodeop coproc_moveto_Instruction_Fault_Address; define pcodeop coproc_moveto_Fault_Address; define pcodeop coproc_moveto_Instruction_Fault; define pcodeop coproc_moveto_Wait_for_interrupt; define pcodeop coproc_moveto_Invalidate_Entire_Instruction; define pcodeop coproc_moveto_Invalidate_Instruction_Cache_by_MVA; define pcodeop coproc_moveto_Flush_Prefetch_Buffer; define pcodeop coproc_moveto_Invalidate_Entire_Data_cache; define pcodeop coproc_moveto_Invalidate_Entire_Data_by_MVA; define pcodeop coproc_moveto_Invalidate_Entire_Data_by_Index; define pcodeop coproc_moveto_Clean_Entire_Data_Cache; define pcodeop coproc_moveto_Clean_Data_Cache_by_MVA; define pcodeop coproc_moveto_Clean_Data_Cache_by_Index; define pcodeop coproc_moveto_Data_Synchronization; define pcodeop coproc_moveto_Data_Memory_Barrier; define pcodeop coproc_moveto_Invalidate_Entire_Data_Cache; define pcodeop coproc_moveto_Invalidate_Data_Cache_by_MVA; define pcodeop coproc_moveto_Invalidate_unified_TLB_unlocked; define pcodeop coproc_moveto_Invalidate_unified_TLB_by_MVA; define pcodeop coproc_moveto_Invalidate_unified_TLB_by_ASID_match; define pcodeop coproc_moveto_FCSE_PID; define pcodeop coproc_moveto_Context_ID; define pcodeop coproc_moveto_User_RW_Thread_and_Process_ID; define pcodeop coproc_moveto_User_R_Thread_and_Process_ID; define pcodeop coproc_moveto_Privileged_only_Thread_and_Process_ID; define pcodeop coproc_moveto_Peripherial_Port_Memory_Remap; define pcodeop coproc_moveto_Feature_Identification; define pcodeop coproc_moveto_ISA_Feature_Identification; define pcodeop coproc_moveto_Peripheral_Port_Memory_Remap; define pcodeop coproc_moveto_Control_registers; define pcodeop coproc_moveto_Security_world_control; define pcodeop coproc_moveto_Translation_table; define pcodeop coproc_moveto_Instruction_cache; define pcodeop coproc_moveto_Data_cache_operations; define pcodeop coproc_moveto_Identification_registers; define pcodeop coproc_moveto_Peripheral_System; define pcodeop coproc_movefrom_Main_ID; define pcodeop coproc_movefrom_Cache_Type; define pcodeop coproc_movefrom_TCM_Status; define pcodeop coproc_movefrom_TLB_Type; define pcodeop coproc_movefrom_Control; define pcodeop coproc_movefrom_Auxiliary_Control; define pcodeop coproc_movefrom_Coprocessor_Access_Control; define pcodeop coproc_movefrom_Secure_Configuration; define pcodeop coproc_movefrom_Secure_Debug_Enable; define pcodeop coproc_movefrom_NonSecure_Access_Control; define pcodeop coproc_movefrom_Translation_table_base_0; define pcodeop coproc_movefrom_Translation_table_base_1; define pcodeop coproc_movefrom_Translation_table_control; define pcodeop coproc_movefrom_Domain_Access_Control; define pcodeop coproc_movefrom_Data_Fault_Status; define pcodeop coproc_movefrom_Instruction_Fault; define pcodeop coproc_movefrom_Fault_Address; define pcodeop coproc_movefrom_Instruction_Fault_Status; define pcodeop coproc_movefrom_Instruction_Fault_Address; define pcodeop coproc_movefrom_Wait_for_interrupt; define pcodeop coproc_movefrom_Invalidate_Entire_Instruction; define pcodeop coproc_movefrom_Invalidate_Instruction_Cache_by_MVA; define pcodeop coproc_movefrom_Flush_Prefetch_Buffer; define pcodeop coproc_movefrom_Invalidate_Entire_Data_cache; define pcodeop coproc_movefrom_Invalidate_Entire_Data_by_MVA; define pcodeop coproc_movefrom_Invalidate_Entire_Data_by_Index; define pcodeop coproc_movefrom_Clean_Entire_Data_Cache; define pcodeop coproc_movefrom_Clean_Data_Cache_by_MVA; define pcodeop coproc_movefrom_Clean_Data_Cache_by_Index; define pcodeop coproc_movefrom_Data_Synchronization; define pcodeop coproc_movefrom_Data_Memory_Barrier; define pcodeop coproc_movefrom_Invalidate_Entire_Data_Cache; define pcodeop coproc_movefrom_Invalidate_Data_Cache_by_MVA; define pcodeop coproc_movefrom_Invalidate_unified_TLB_unlocked; define pcodeop coproc_movefrom_Invalidate_unified_TLB_by_MVA; define pcodeop coproc_movefrom_Invalidate_unified_TLB_by_ASID_match; define pcodeop coproc_movefrom_FCSE_PID; define pcodeop coproc_movefrom_Context_ID; define pcodeop coproc_movefrom_User_RW_Thread_and_Process_ID; define pcodeop coproc_movefrom_User_R_Thread_and_Process_ID; define pcodeop coproc_movefrom_Privileged_only_Thread_and_Process_ID; define pcodeop coproc_movefrom_Peripherial_Port_Memory_Remap; define pcodeop coproc_movefrom_Feature_Identification; define pcodeop coproc_movefrom_ISA_Feature_Identification; define pcodeop coproc_movefrom_Peripheral_Port_Memory_Remap; define pcodeop coproc_movefrom_Control_registers; define pcodeop coproc_movefrom_Security_world_control; define pcodeop coproc_movefrom_Translation_table; define pcodeop coproc_movefrom_Instruction_cache; define pcodeop coproc_movefrom_Data_cache_operations; define pcodeop coproc_movefrom_Identification_registers; define pcodeop coproc_movefrom_Peripheral_System; mcrOperands: cpn,opc1,Rd,CRn,CRm,opc2 is CRm & opc2 & cpn & CRn & opc1 & Rd { } ##################### ###### shift2 ###### ##################### shift2: rm is I25=0 & sftimm=0 & c0406=0 & rm { shift_carry = CY; export rm; } shift2: rm, "lsl #"^sftimm is I25=0 & sftimm & c0406=0 & rm { local tmp1=(rm>>(32-sftimm))&1; shift_carry=tmp1(0); local tmp2=rm<>31); shift_carry=tmp1(0); tmp2:4=0; export tmp2; } shift2: rm, "lsr #"^sftimm is I25=0 & sftimm & c0406=2 & rm { local tmp1=(rm>>(sftimm-1))&1; shift_carry=tmp1(0); local tmp2=rm>>sftimm; export tmp2; } shift2: rm, "asr #32" is I25=0 & sftimm=0 & c0406=4 & rm { local tmp1=(rm>>31); shift_carry=tmp1(0); local tmp2 = rm s>> 32; export tmp2; } shift2: rm, "asr #"^sftimm is I25=0 & sftimm & c0406=4 & rm { local tmp1=(rm>>(sftimm-1))&1; shift_carry=tmp1(0); local tmp2=rm s>> sftimm; export tmp2; } shift2: rm, "rrx" is I25=0 & c0411=6 & rm { local tmp1=rm&1; shift_carry=tmp1(0); local tmp2 = (zext(CY)<<31)|(rm>>1); export tmp2; } shift2: rm, "ror #"^sftimm is I25=0 & sftimm & c0406=6 & rm { local tmp1=(rm>>sftimm)|(rm<<(32-sftimm)); local tmp2=(tmp1>>31)&1; shift_carry=tmp2(0); export tmp1; } ##################### ###### shift3 ###### ##################### shift3: rm, "lsl "^rs is I25=0 & rs & c0407=1 & rm { local sa=rs&0xff; local tmp1=(rm>>(32-sa))&1; shift_carry=((sa==0:4)&&CY) || ((sa!=0:4)&&tmp1(0)); local tmp2=rm<>(sa-1))&1; shift_carry=((sa==0:4)&&CY) || ((sa!=0:4)&&tmp1(0)); local tmp2=rm>>sa; export tmp2; } shift3: rm, "asr "^rs is I25=0 & rs & c0407=5 & rm { local sa=rs&0xff; local tmp1=(rm s>>(sa-1))&1; shift_carry=((sa==0:4)&&CY) || ((sa!=0:4)&&tmp1(0)); local tmp2=rm s>> sa; export tmp2; } shift3: rm, "ror "^rs is I25=0 & rs & c0407=7 & rm { local sa=rs&0x1f; local tmp1=(rm>>sa)|(rm<<(32-sa)); local tmp2=tmp1>>31; shift_carry=(((rs&0xff)==0:4)&&CY) || (((rs&0xff)!=0:4)&&tmp2(0)); export tmp1; } ##################### ###### shift4 ###### ##################### @if defined(VERSION_6) shift4: rm is sftimm=0 & sh=0 & rm { shift_carry = CY; export rm; } shift4: rm, "lsl #"^sftimm is sftimm & sh=0 & rm { local tmp1=(rm>>(32-sftimm))&1; shift_carry=tmp1(0); local tmp2=rm<>31); shift_carry=tmp1(0); local tmp2 = rm s>> 32; export tmp2; } shift4: rm, "asr #"^sftimm is sftimm & sh=1 & rm { local tmp1=(rm>>(sftimm-1))&1; shift_carry=tmp1(0); local tmp2=rm s>> sftimm; export tmp2; } @endif # VERSION_6 ##################### ###### ror1 ###### ##################### @if defined(VERSION_6) ror1: rm is c1011=0 & rm { local tmp = rm; export tmp; } ror1: rm, "ror #8" is c1011=1 & rm { local tmp = (rm <<24)| (rm >> 8); export tmp; } ror1: rm, "ror #16" is c1011=2 & rm { local tmp = (rm << 16) | (rm >> 16); export tmp; } ror1: rm, "ror #24" is c1011=3 & rm { local tmp = (rm << 8) | (rm >> 24); export tmp; } @endif # VERSION_6 ##################### # addrmode2 is the resulting address for Addressing Mode 2 # it takes care of bits 27-0, except for the B and L flags and the Rd register # the Rn register is taken care of including any possible write-back # it returns a varnode containing the address ##################### # addr2shift is the register rm shifting portion of Addressing Mode 2 addr2shift: rm is c0411=0 & rm { export rm; } addr2shift: rm,"lsl #"^sftimm is sftimm & shft=0 & c0404=0 & rm { local tmp = rm << sftimm; export tmp; } addr2shift: rm,"lsr #"^sftimm is sftimm & shft=1 & c0404=0 & rm { local tmp = rm >> sftimm; export tmp; } addr2shift: rm,"lsr #32" is sftimm=0 & shft=1 & c0404=0 & rm { tmp:4 = 0; export tmp; } addr2shift: rm,"asr #"^sftimm is sftimm & shft=2 & c0404=0 & rm { local tmp = rm s>> sftimm; export tmp; } addr2shift: rm,"asr #32" is sftimm=0 & shft=2 & c0404=0 & rm { local tmp = rm s>> 32; export tmp; } addr2shift: rm,"ror #"^sftimm is sftimm & shft=3 & c0404=0 & rm { local tmp = (rm>>sftimm) | (rm<<(32-sftimm)); export tmp; } addr2shift: rm,"rrx" is sftimm=0 & shft=3 & c0404=0 & rm { tmp:4 = zext(CY); tmp = (tmp<<31) | (rm>>1); export tmp; } # no writeback addrmode2: [reloff] is I25=0 & P24=1 & U23=1 & W21=0 & c1619=15 & offset_12 [ reloff = inst_start + 8 + offset_12; ] { export *[const]:4 reloff; } addrmode2: [reloff] is I25=0 & P24=1 & U23=0 & W21=0 & c1619=15 & offset_12 [ reloff = inst_start + 8 - offset_12; ] { export *[const]:4 reloff; } addrmode2: [rn,"#"^offset_12] is I25=0 & P24=1 & U23=1 & W21=0 & rn & offset_12 { local tmp = rn + offset_12; export tmp; } addrmode2: [rn,"#"^noff] is I25=0 & P24=1 & U23=0 & W21=0 & rn & offset_12 [ noff = -offset_12; ] { local tmp = rn + noff; export tmp; } addrmode2: [rn,addr2shift] is I25=1 & P24=1 & U23=1 & W21=0 & rn & addr2shift { local tmp = rn + addr2shift; export tmp; } addrmode2: [rn,-addr2shift] is I25=1 & P24=1 & U23=0 & W21=0 & rn & addr2shift { local tmp = rn - addr2shift; export tmp; } # pre-indexed writeback addrmode2: [rn,"#"^offset_12]! is I25=0 & P24=1 & U23=1 & W21=1 & rn & offset_12 { rn = rn + offset_12; export rn; } addrmode2: [rn,"#"^noff]! is I25=0 & P24=1 & U23=0 & W21=1 & rn & offset_12 [ noff = -offset_12; ] { rn = rn + noff; export rn; } addrmode2: [rn,addr2shift]! is I25=1 & P24=1 & U23=1 & W21=1 & rn & addr2shift { rn = rn + addr2shift; export rn; } addrmode2: [rn,-addr2shift]! is I25=1 & P24=1 & U23=0 & W21=1 & rn & addr2shift { rn = rn - addr2shift; export rn; } # post-indexed writeback addrmode2: [rn],"#"^offset_12 is I25=0 & P24=0 & U23=1 & W21=0 & rn & offset_12 { local tmp = rn; rn = rn + offset_12; export tmp; } addrmode2: [rn],"#"^noff is I25=0 & P24=0 & U23=0 & W21=0 & rn & offset_12 [ noff = -offset_12; ] { local tmp = rn; rn = rn + noff; export tmp; } addrmode2: [rn],addr2shift is I25=1 & P24=0 & U23=1 & W21=0 & rn & addr2shift { local tmp = rn; rn = rn + addr2shift; export tmp; } addrmode2: [rn],-addr2shift is I25=1 & P24=0 & U23=0 & W21=0 & rn & addr2shift { local tmp = rn; rn = rn - addr2shift; export tmp; } # special-form post-indexed writeback for ldrbt, ldrt, strbt, etc. addrmode2: [rn],"#"^offset_12 is I25=0 & P24=0 & U23=1 & W21=1 & rn & offset_12 { local tmp = rn; rn = rn + offset_12; export tmp; } addrmode2: [rn],"#"^noff is I25=0 & P24=0 & U23=0 & W21=1 & rn & offset_12 [ noff = -offset_12; ] { local tmp = rn; rn = rn + noff; export tmp; } addrmode2: [rn],addr2shift is I25=1 & P24=0 & U23=1 & W21=1 & rn & addr2shift { local tmp = rn; rn = rn + addr2shift; export tmp; } addrmode2: [rn],-addr2shift is I25=1 & P24=0 & U23=0 & W21=1 & rn & addr2shift { local tmp = rn; rn = rn - addr2shift; export tmp; } ########################### # addrmode3 is the resulting address for Addressing Mode 3 # it takes care of bits 27-0, except for the L, S, and H flags and the Rd register # the Rn register is taken care of including any possible write-back # it returns a varnode containing the address ########################### addrmode3: [reloff] is P24=1 & U23=1 & c2122=2 & c1619=15 & immedH & immedL [ reloff=inst_start+8+((immedH<<4) | immedL);] { export *:4 reloff; } addrmode3: [reloff] is P24=1 & U23=0 & c2122=2 & c1619=15 & immedH & immedL [ reloff=inst_start+8-((immedH<<4) | immedL);] { export *:4 reloff; } addrmode3: [rn,"#"^off8] is P24=1 & U23=1 & c2122=2 & rn & immedH & immedL [ off8=(immedH<<4)|immedL;] { local tmp = rn + off8; export tmp; } addrmode3: [rn,"#"^noff8] is P24=1 & U23=0 & c2122=2 & rn & immedH & immedL [ noff8=-((immedH<<4)|immedL);] { local tmp = rn + noff8; export tmp; } addrmode3: [rn,rm] is P24=1 & U23=1 & c2122=0 & rn & c0811=0 & rm { local tmp = rn + rm; export tmp; } addrmode3: [rn,-rm] is P24=1 & U23=0 & c2122=0 & rn & c0811=0 & rm { local tmp = rn - rm; export tmp; } addrmode3: [rn,"#"^off8]! is P24=1 & U23=1 & c2122=3 & rn & immedH & immedL [ off8=(immedH<<4)|immedL;] { rn=rn + off8; export rn; } addrmode3: [rn,"#"^noff8]! is P24=1 & U23=0 & c2122=3 & rn & immedH & immedL [ noff8=-((immedH<<4)|immedL);] { rn=rn + noff8; export rn; } addrmode3: [rn,rm]! is P24=1 & U23=1 & c2122=1 & rn & c0811=0 & rm { rn = rn+rm; export rn; } addrmode3: [rn,-rm]! is P24=1 & U23=0 & c2122=1 & rn & c0811=0 & rm { rn = rn - rm; export rn; } addrmode3: [rn],"#"^off8 is P24=0 & U23=1 & c2222=1 & rn & immedH & immedL [ off8=(immedH<<4)|immedL;] { local tmp=rn; rn=rn + off8; export tmp; } addrmode3: [rn],"#"^noff8 is P24=0 & U23=0 & c2222=1 & rn & immedH & immedL [ noff8=-((immedH<<4)|immedL);] { local tmp=rn; rn=rn + noff8; export tmp; } addrmode3: [rn],rm is P24=0 & U23=1 & c2222=0 & rn & c0811=0 & rm { local tmp=rn; rn=rn+rm; export tmp; } addrmode3: [rn],-rm is P24=0 & U23=0 & c2222=0 & rn & c0811=0 & rm { local tmp=rn; rn=rn-rm; export tmp; } ############################ # Addressing Mode 4. These 4 types take care of the register_list argument in Addressing Mode 4. ############################ # ldlist_inc is the list of registers to be loaded using IA or IB in Addressing Mode 4 linc15: r0 is c0000=1 & r0 { r0 = * mult_addr; mult_addr = mult_addr + 4; } linc15: is c0000=0 { } linc14: linc15,r1 is c0101=1 & linc15 & r1 { r1 = * mult_addr; mult_addr = mult_addr + 4; } linc14: r1 is c0101=1 & c0000=0 & r1 { r1 = * mult_addr; mult_addr = mult_addr + 4; } linc14: linc15 is c0101=0 & linc15 { } linc13: linc14,r2 is c0202=1 & linc14 & r2 { r2 = * mult_addr; mult_addr = mult_addr + 4; } linc13: r2 is c0202=1 & c0001=0 & r2 { r2 = * mult_addr; mult_addr = mult_addr + 4; } linc13: linc14 is c0202=0 & linc14 { } linc12: linc13,r3 is c0303=1 & linc13 & r3 { r3 = * mult_addr; mult_addr = mult_addr + 4; } linc12: r3 is c0303=1 & c0002=0 & r3 { r3 = * mult_addr; mult_addr = mult_addr + 4; } linc12: linc13 is c0303=0 & linc13 { } linc11: linc12,r4 is c0404=1 & linc12 & r4 { r4 = * mult_addr; mult_addr = mult_addr + 4; } linc11: r4 is c0404=1 & c0003=0 & r4 { r4 = * mult_addr; mult_addr = mult_addr + 4; } linc11: linc12 is c0404=0 & linc12 { } linc10: linc11,r5 is c0505=1 & linc11 & r5 { r5 = * mult_addr; mult_addr = mult_addr + 4; } linc10: r5 is c0505=1 & c0004=0 & r5 { r5 = * mult_addr; mult_addr = mult_addr + 4; } linc10: linc11 is c0505=0 & linc11 { } linc9: linc10,r6 is c0606=1 & linc10 & r6 { r6 = * mult_addr; mult_addr = mult_addr + 4; } linc9: r6 is c0606=1 & c0005=0 & r6 { r6 = * mult_addr; mult_addr = mult_addr + 4; } linc9: linc10 is c0606=0 & linc10 { } linc8: linc9,r7 is c0707=1 & linc9 & r7 { r7 = * mult_addr; mult_addr = mult_addr + 4; } linc8: r7 is c0707=1 & c0006=0 & r7 { r7 = * mult_addr; mult_addr = mult_addr + 4; } linc8: linc9 is c0707=0 & linc9 { } linc7: linc8,r8 is c0808=1 & linc8 & r8 { r8 = * mult_addr; mult_addr = mult_addr + 4; } linc7: r8 is c0808=1 & c0007=0 & r8 { r8 = * mult_addr; mult_addr = mult_addr + 4; } linc7: linc8 is c0808=0 & linc8 { } linc6: linc7,r9 is c0909=1 & linc7 & r9 { r9 = * mult_addr; mult_addr = mult_addr + 4; } linc6: r9 is c0909=1 & c0008=0 & r9 { r9 = * mult_addr; mult_addr = mult_addr + 4; } linc6: linc7 is c0909=0 & linc7 { } linc5: linc6,r10 is c1010=1 & linc6 & r10 { r10 = * mult_addr; mult_addr = mult_addr + 4; } linc5: r10 is c1010=1 & c0009=0 & r10 { r10 = * mult_addr; mult_addr = mult_addr + 4; } linc5: linc6 is c1010=0 & linc6 { } linc4: linc5,r11 is c1111=1 & linc5 & r11 { r11 = * mult_addr; mult_addr = mult_addr + 4; } linc4: r11 is c1111=1 & c0010=0 & r11 { r11 = * mult_addr; mult_addr = mult_addr + 4; } linc4: linc5 is c1111=0 & linc5 { } linc3: linc4,r12 is c1212=1 & linc4 & r12 { r12 = * mult_addr; mult_addr = mult_addr + 4; } linc3: r12 is c1212=1 & c0011=0 & r12 { r12 = * mult_addr; mult_addr = mult_addr + 4; } linc3: linc4 is c1212=0 & linc4 { } linc2: linc3,sp is c1313=1 & linc3 & sp { sp = * mult_addr; mult_addr = mult_addr + 4; } linc2: sp is c1313=1 & c0012=0 & sp { sp = * mult_addr; mult_addr = mult_addr + 4; } linc2: linc3 is c1313=0 & linc3 { } linc1: linc2,lr is c1414=1 & linc2 & lr { lr = * mult_addr; mult_addr = mult_addr + 4; } linc1: lr is c1414=1 & c0013=0 & lr { lr = * mult_addr; mult_addr = mult_addr + 4; } linc1: linc2 is c1414=0 & linc2 { } linc0: linc1,pc is c1515=1 & linc1 & pc { pc = * mult_addr; mult_addr = mult_addr + 4; } linc0: pc is c1515=1 & c0014=0 & pc { pc = * mult_addr; mult_addr = mult_addr + 4; } linc0: linc1 is c1515=0 & linc1 { } ldlist_inc: {linc0} is linc0 { } # stlist_inc is the list of registers to be stored using IA or IB in Addressing Mode 4 sinc15: r0 is c0000=1 & r0 { * mult_addr = r0; mult_addr = mult_addr + 4; } sinc15: is c0000=0 { } sinc14: sinc15,r1 is c0101=1 & sinc15 & r1 { * mult_addr = r1; mult_addr = mult_addr + 4; } sinc14: r1 is c0101=1 & c0000=0 & r1 { * mult_addr = r1; mult_addr = mult_addr + 4; } sinc14: sinc15 is c0101=0 & sinc15 { } sinc13: sinc14,r2 is c0202=1 & sinc14 & r2 { * mult_addr = r2; mult_addr = mult_addr + 4; } sinc13: r2 is c0202=1 & c0001=0 & r2 { * mult_addr = r2; mult_addr = mult_addr + 4; } sinc13: sinc14 is c0202=0 & sinc14 { } sinc12: sinc13,r3 is c0303=1 & sinc13 & r3 { * mult_addr = r3; mult_addr = mult_addr + 4; } sinc12: r3 is c0303=1 & c0002=0 & r3 { * mult_addr = r3; mult_addr = mult_addr + 4; } sinc12: sinc13 is c0303=0 & sinc13 { } sinc11: sinc12,r4 is c0404=1 & sinc12 & r4 { * mult_addr = r4; mult_addr = mult_addr + 4; } sinc11: r4 is c0404=1 & c0003=0 & r4 { * mult_addr = r4; mult_addr = mult_addr + 4; } sinc11: sinc12 is c0404=0 & sinc12 { } sinc10: sinc11,r5 is c0505=1 & sinc11 & r5 { * mult_addr = r5; mult_addr = mult_addr + 4; } sinc10: r5 is c0505=1 & c0004=0 & r5 { * mult_addr = r5; mult_addr = mult_addr + 4; } sinc10: sinc11 is c0505=0 & sinc11 { } sinc9: sinc10,r6 is c0606=1 & sinc10 & r6 { * mult_addr = r6; mult_addr = mult_addr + 4; } sinc9: r6 is c0606=1 & c0005=0 & r6 { * mult_addr = r6; mult_addr = mult_addr + 4; } sinc9: sinc10 is c0606=0 & sinc10 { } sinc8: sinc9,r7 is c0707=1 & sinc9 & r7 { * mult_addr = r7; mult_addr = mult_addr + 4; } sinc8: r7 is c0707=1 & c0006=0 & r7 { * mult_addr = r7; mult_addr = mult_addr + 4; } sinc8: sinc9 is c0707=0 & sinc9 { } sinc7: sinc8,r8 is c0808=1 & sinc8 & r8 { * mult_addr = r8; mult_addr = mult_addr + 4; } sinc7: r8 is c0808=1 & c0007=0 & r8 { * mult_addr = r8; mult_addr = mult_addr + 4; } sinc7: sinc8 is c0808=0 & sinc8 { } sinc6: sinc7,r9 is c0909=1 & sinc7 & r9 { * mult_addr = r9; mult_addr = mult_addr + 4; } sinc6: r9 is c0909=1 & c0008=0 & r9 { * mult_addr = r9; mult_addr = mult_addr + 4; } sinc6: sinc7 is c0909=0 & sinc7 { } sinc5: sinc6,r10 is c1010=1 & sinc6 & r10 { * mult_addr = r10; mult_addr = mult_addr + 4; } sinc5: r10 is c1010=1 & c0009=0 & r10 { * mult_addr = r10; mult_addr = mult_addr + 4; } sinc5: sinc6 is c1010=0 & sinc6 { } sinc4: sinc5,r11 is c1111=1 & sinc5 & r11 { * mult_addr = r11; mult_addr = mult_addr + 4; } sinc4: r11 is c1111=1 & c0010=0 & r11 { * mult_addr = r11; mult_addr = mult_addr + 4; } sinc4: sinc5 is c1111=0 & sinc5 { } sinc3: sinc4,r12 is c1212=1 & sinc4 & r12 { * mult_addr = r12; mult_addr = mult_addr + 4; } sinc3: r12 is c1212=1 & c0011=0 & r12 { * mult_addr = r12; mult_addr = mult_addr + 4; } sinc3: sinc4 is c1212=0 & sinc4 { } sinc2: sinc3,sp is c1313=1 & sinc3 & sp { * mult_addr = sp; mult_addr = mult_addr + 4; } sinc2: sp is c1313=1 & c0012=0 & sp { * mult_addr = sp; mult_addr = mult_addr + 4; } sinc2: sinc3 is c1313=0 & sinc3 { } sinc1: sinc2,lr is c1414=1 & sinc2 & lr { * mult_addr = lr; mult_addr = mult_addr + 4; } sinc1: lr is c1414=1 & c0013=0 & lr { * mult_addr = lr; mult_addr = mult_addr + 4; } sinc1: sinc2 is c1414=0 & sinc2 { } sinc0: sinc1,pc is c1515=1 & sinc1 & pc { *:4 mult_addr = (inst_start + 8); mult_addr = mult_addr + 4; } sinc0: pc is c1515=1 & c0014=0 & pc { *:4 mult_addr = (inst_start + 8); mult_addr = mult_addr + 4; } sinc0: sinc1 is c1515=0 & sinc1 { } stlist_inc: {sinc0} is sinc0 { } # ldlist_dec is the list of registers to be loaded using DA or DB in Addressing Mode 4 ldec15: pc is c1515=1 & pc { pc = * mult_addr; mult_addr = mult_addr - 4; } ldec15: is c1515=0 { } ldec14: lr,ldec15 is c1414=1 & ldec15 & lr { lr = * mult_addr; mult_addr = mult_addr - 4; } ldec14: lr is c1414=1 & c1515=0 & lr { lr = * mult_addr; mult_addr = mult_addr - 4; } ldec14: ldec15 is c1414=0 & ldec15 { } ldec13: sp,ldec14 is c1313=1 & ldec14 & sp { sp = * mult_addr; mult_addr = mult_addr - 4; } ldec13: sp is c1313=1 & c1415=0 & sp { sp = * mult_addr; mult_addr = mult_addr - 4; } ldec13: ldec14 is c1313=0 & ldec14 { } ldec12: r12,ldec13 is c1212=1 & ldec13 & r12 { r12 = * mult_addr; mult_addr = mult_addr - 4; } ldec12: r12 is c1212=1 & c1315=0 & r12 { r12 = * mult_addr; mult_addr = mult_addr - 4; } ldec12: ldec13 is c1212=0 & ldec13 { } ldec11: r11,ldec12 is c1111=1 & ldec12 & r11 { r11 = * mult_addr; mult_addr = mult_addr - 4; } ldec11: r11 is c1111=1 & c1215=0 & r11 { r11 = * mult_addr; mult_addr = mult_addr - 4; } ldec11: ldec12 is c1111=0 & ldec12 { } ldec10: r10,ldec11 is c1010=1 & ldec11 & r10 { r10 = * mult_addr; mult_addr = mult_addr - 4; } ldec10: r10 is c1010=1 & c1115=0 & r10 { r10 = * mult_addr; mult_addr = mult_addr - 4; } ldec10: ldec11 is c1010=0 & ldec11 { } ldec9: r9,ldec10 is c0909=1 & ldec10 & r9 { r9 = * mult_addr; mult_addr = mult_addr - 4; } ldec9: r9 is c0909=1 & c1015=0 & r9 { r9 = * mult_addr; mult_addr = mult_addr - 4; } ldec9: ldec10 is c0909=0 & ldec10 { } ldec8: r8,ldec9 is c0808=1 & ldec9 & r8 { r8 = * mult_addr; mult_addr = mult_addr - 4; } ldec8: r8 is c0808=1 & c0915=0 & r8 { r8 = * mult_addr; mult_addr = mult_addr - 4; } ldec8: ldec9 is c0808=0 & ldec9 { } ldec7: r7,ldec8 is c0707=1 & ldec8 & r7 { r7 = * mult_addr; mult_addr = mult_addr - 4; } ldec7: r7 is c0707=1 & c0815=0 & r7 { r7 = * mult_addr; mult_addr = mult_addr - 4; } ldec7: ldec8 is c0707=0 & ldec8 { } ldec6: r6,ldec7 is c0606=1 & ldec7 & r6 { r6 = * mult_addr; mult_addr = mult_addr - 4; } ldec6: r6 is c0606=1 & c0715=0 & r6 { r6 = * mult_addr; mult_addr = mult_addr - 4; } ldec6: ldec7 is c0606=0 & ldec7 { } ldec5: r5,ldec6 is c0505=1 & ldec6 & r5 { r5 = * mult_addr; mult_addr = mult_addr - 4; } ldec5: r5 is c0505=1 & c0615=0 & r5 { r5 = * mult_addr; mult_addr = mult_addr - 4; } ldec5: ldec6 is c0505=0 & ldec6 { } ldec4: r4,ldec5 is c0404=1 & ldec5 & r4 { r4 = * mult_addr; mult_addr = mult_addr - 4; } ldec4: r4 is c0404=1 & c0515=0 & r4 { r4 = * mult_addr; mult_addr = mult_addr - 4; } ldec4: ldec5 is c0404=0 & ldec5 { } ldec3: r3,ldec4 is c0303=1 & ldec4 & r3 { r3 = * mult_addr; mult_addr = mult_addr - 4; } ldec3: r3 is c0303=1 & c0415=0 & r3 { r3 = * mult_addr; mult_addr = mult_addr - 4; } ldec3: ldec4 is c0303=0 & ldec4 { } ldec2: r2,ldec3 is c0202=1 & ldec3 & r2 { r2 = * mult_addr; mult_addr = mult_addr - 4; } ldec2: r2 is c0202=1 & c0315=0 & r2 { r2 = * mult_addr; mult_addr = mult_addr - 4; } ldec2: ldec3 is c0202=0 & ldec3 { } ldec1: r1,ldec2 is c0101=1 & ldec2 & r1 { r1 = * mult_addr; mult_addr = mult_addr - 4; } ldec1: r1 is c0101=1 & c0215=0 & r1 { r1 = * mult_addr; mult_addr = mult_addr - 4; } ldec1: ldec2 is c0101=0 & ldec2 { } ldec0: r0,ldec1 is c0000=1 & ldec1 & r0 { r0 = * mult_addr; mult_addr = mult_addr - 4; } ldec0: r0 is c0000=1 & c0115=0 & r0 { r0 = * mult_addr; mult_addr = mult_addr - 4; } ldec0: ldec1 is c0000=0 & ldec1 { } ldlist_dec: {ldec0} is ldec0 { } # stlist_dec is the list of registers to be stored using DA or DB in Addressing Mode 4 sdec15: pc is c1515=1 & pc { *:4 mult_addr = (inst_start + 8); mult_addr = mult_addr - 4; } sdec15: is c1515=0 { } sdec14: lr,sdec15 is c1414=1 & sdec15 & lr { * mult_addr=lr; mult_addr = mult_addr - 4; } sdec14: lr is c1414=1 & c1515=0 & lr { * mult_addr=lr; mult_addr = mult_addr - 4; } sdec14: sdec15 is c1414=0 & sdec15 { } sdec13: sp,sdec14 is c1313=1 & sdec14 & sp { * mult_addr=sp; mult_addr = mult_addr - 4; } sdec13: sp is c1313=1 & c1415=0 & sp { * mult_addr=sp; mult_addr = mult_addr - 4; } sdec13: sdec14 is c1313=0 & sdec14 { } sdec12: r12,sdec13 is c1212=1 & sdec13 & r12 { * mult_addr=r12; mult_addr = mult_addr - 4; } sdec12: r12 is c1212=1 & c1315=0 & r12 { * mult_addr=r12; mult_addr = mult_addr - 4; } sdec12: sdec13 is c1212=0 & sdec13 { } sdec11: r11,sdec12 is c1111=1 & sdec12 & r11 { * mult_addr=r11; mult_addr = mult_addr - 4; } sdec11: r11 is c1111=1 & c1215=0 & r11 { * mult_addr=r11; mult_addr = mult_addr - 4; } sdec11: sdec12 is c1111=0 & sdec12 { } sdec10: r10,sdec11 is c1010=1 & sdec11 & r10 { * mult_addr=r10; mult_addr = mult_addr - 4; } sdec10: r10 is c1010=1 & c1115=0 & r10 { * mult_addr=r10; mult_addr = mult_addr - 4; } sdec10: sdec11 is c1010=0 & sdec11 { } sdec9: r9,sdec10 is c0909=1 & sdec10 & r9 { * mult_addr=r9; mult_addr = mult_addr - 4; } sdec9: r9 is c0909=1 & c1015=0 & r9 { * mult_addr=r9; mult_addr = mult_addr - 4; } sdec9: sdec10 is c0909=0 & sdec10 { } sdec8: r8,sdec9 is c0808=1 & sdec9 & r8 { * mult_addr=r8; mult_addr = mult_addr - 4; } sdec8: r8 is c0808=1 & c0915=0 & r8 { * mult_addr=r8; mult_addr = mult_addr - 4; } sdec8: sdec9 is c0808=0 & sdec9 { } sdec7: r7,sdec8 is c0707=1 & sdec8 & r7 { * mult_addr=r7; mult_addr = mult_addr - 4; } sdec7: r7 is c0707=1 & c0815=0 & r7 { * mult_addr=r7; mult_addr = mult_addr - 4; } sdec7: sdec8 is c0707=0 & sdec8 { } sdec6: r6,sdec7 is c0606=1 & sdec7 & r6 { * mult_addr=r6; mult_addr = mult_addr - 4; } sdec6: r6 is c0606=1 & c0715=0 & r6 { * mult_addr=r6; mult_addr = mult_addr - 4; } sdec6: sdec7 is c0606=0 & sdec7 { } sdec5: r5,sdec6 is c0505=1 & sdec6 & r5 { * mult_addr=r5; mult_addr = mult_addr - 4; } sdec5: r5 is c0505=1 & c0615=0 & r5 { * mult_addr=r5; mult_addr = mult_addr - 4; } sdec5: sdec6 is c0505=0 & sdec6 { } sdec4: r4,sdec5 is c0404=1 & sdec5 & r4 { * mult_addr=r4; mult_addr = mult_addr - 4; } sdec4: r4 is c0404=1 & c0515=0 & r4 { * mult_addr=r4; mult_addr = mult_addr - 4; } sdec4: sdec5 is c0404=0 & sdec5 { } sdec3: r3,sdec4 is c0303=1 & sdec4 & r3 { * mult_addr=r3; mult_addr = mult_addr - 4; } sdec3: r3 is c0303=1 & c0415=0 & r3 { * mult_addr=r3; mult_addr = mult_addr - 4; } sdec3: sdec4 is c0303=0 & sdec4 { } sdec2: r2,sdec3 is c0202=1 & sdec3 & r2 { * mult_addr=r2; mult_addr = mult_addr - 4; } sdec2: r2 is c0202=1 & c0315=0 & r2 { * mult_addr=r2; mult_addr = mult_addr - 4; } sdec2: sdec3 is c0202=0 & sdec3 { } sdec1: r1,sdec2 is c0101=1 & sdec2 & r1 { * mult_addr=r1; mult_addr = mult_addr - 4; } sdec1: r1 is c0101=1 & c0215=0 & r1 { * mult_addr=r1; mult_addr = mult_addr - 4; } sdec1: sdec2 is c0101=0 & sdec2 { } sdec0: r0,sdec1 is c0000=1 & sdec1 & r0 { * mult_addr=r0; mult_addr = mult_addr - 4; } sdec0: r0 is c0000=1 & c0115=0 & r0 { * mult_addr=r0; mult_addr = mult_addr - 4; } sdec0: sdec1 is c0000=0 & sdec1 { } stlist_dec: {sdec0} is sdec0 { } # reglist deals with Addressing Mode 4 # it takes care of bits 0-27 # we assume that alignment checking is turned on reglist: rn,ldlist_inc is P24=0 & U23=1 & S22=0 & W21=0 & L20=1 & rn & ldlist_inc { mult_addr=rn; build ldlist_inc; } reglist: rn,ldlist_inc"^" is P24=0 & U23=1 & S22=1 & W21=0 & L20=1 & rn & ldlist_inc { mult_addr=rn; build ldlist_inc; } reglist: rn!,ldlist_inc is P24=0 & U23=1 & S22=0 & W21=1 & L20=1 & rn & ldlist_inc { mult_addr=rn; build ldlist_inc; rn=mult_addr; } reglist: rn!,ldlist_inc"^" is P24=0 & U23=1 & S22=1 & W21=1 & L20=1 & rn & ldlist_inc { mult_addr=rn; build ldlist_inc; rn=mult_addr; } reglist: rn,ldlist_inc is P24=1 & U23=1 & S22=0 & W21=0 & L20=1 & rn & ldlist_inc { mult_addr=(rn+4); build ldlist_inc; } reglist: rn,ldlist_inc"^" is P24=1 & U23=1 & S22=1 & W21=0 & L20=1 & rn & ldlist_inc { mult_addr=(rn+4); build ldlist_inc; } reglist: rn!,ldlist_inc is P24=1 & U23=1 & S22=0 & W21=1 & L20=1 & rn & ldlist_inc { mult_addr=(rn+4); build ldlist_inc; rn=mult_addr-4; } reglist: rn!,ldlist_inc"^" is P24=1 & U23=1 & S22=1 & W21=1 & L20=1 & rn & ldlist_inc { mult_addr=(rn+4); build ldlist_inc; rn=mult_addr-4; } reglist: rn,ldlist_dec is P24=0 & U23=0 & S22=0 & W21=0 & L20=1 & rn & ldlist_dec { mult_addr=rn; build ldlist_dec; } reglist: rn,ldlist_dec"^" is P24=0 & U23=0 & S22=1 & W21=0 & L20=1 & rn & ldlist_dec { mult_addr=rn; build ldlist_dec; } reglist: rn!,ldlist_dec is P24=0 & U23=0 & S22=0 & W21=1 & L20=1 & rn & ldlist_dec { mult_addr=rn; build ldlist_dec; rn=mult_addr; } reglist: rn!,ldlist_dec"^" is P24=0 & U23=0 & S22=1 & W21=1 & L20=1 & rn & ldlist_dec { mult_addr=rn; build ldlist_dec; rn=mult_addr; } reglist: rn,ldlist_dec is P24=1 & U23=0 & S22=0 & W21=0 & L20=1 & rn & ldlist_dec { mult_addr=(rn-4); build ldlist_dec; } reglist: rn,ldlist_dec"^" is P24=1 & U23=0 & S22=1 & W21=0 & L20=1 & rn & ldlist_dec { mult_addr=(rn-4); build ldlist_dec; } reglist: rn!,ldlist_dec is P24=1 & U23=0 & S22=0 & W21=1 & L20=1 & rn & ldlist_dec { mult_addr=(rn-4); build ldlist_dec; rn=mult_addr+4; } reglist: rn!,ldlist_dec"^" is P24=1 & U23=0 & S22=1 & W21=1 & L20=1 & rn & ldlist_dec { mult_addr=(rn-4); build ldlist_dec; rn=mult_addr+4; } reglist: rn,stlist_inc is P24=0 & U23=1 & S22=0 & W21=0 & L20=0 & rn & stlist_inc { mult_addr=rn; build stlist_inc; } reglist: rn,stlist_inc"^" is P24=0 & U23=1 & S22=1 & W21=0 & L20=0 & rn & stlist_inc { mult_addr=rn; build stlist_inc; } ## This is here to allow old versions of this instruction to decode. ## The W-Bit21 is specified as (0) in the manual meaning should be 0 but is unpredictable if 1 ## Some older processors did not specify that Writeback was not available if the P24=0 and S22=0, ## which is a system interrupt instruction. ## I AM ASSUMING, that the W-bit is honored on these processors and does update the register!!!! ## This is probably an arbitrary decision, but keeps with what old processor did. reglist: rn,stlist_inc"^" is P24=0 & U23=1 & S22=1 & W21=1 & L20=0 & rn & stlist_inc { mult_addr=rn; build stlist_inc; rn=mult_addr; } reglist: rn!,stlist_inc is P24=0 & U23=1 & S22=0 & W21=1 & L20=0 & rn & stlist_inc { mult_addr=rn; build stlist_inc; rn=mult_addr; } reglist: rn,stlist_inc is P24=1 & U23=1 & S22=0 & W21=0 & L20=0 & rn & stlist_inc { mult_addr=(rn+4); build stlist_inc; } reglist: rn,stlist_inc"^" is P24=1 & U23=1 & S22=1 & W21=0 & L20=0 & rn & stlist_inc { mult_addr=(rn+4); build stlist_inc; } reglist: rn!,stlist_inc is P24=1 & U23=1 & S22=0 & W21=1 & L20=0 & rn & stlist_inc { mult_addr=(rn+4); build stlist_inc; rn=mult_addr-4; } reglist: rn,stlist_dec is P24=0 & U23=0 & S22=0 & W21=0 & L20=0 & rn & stlist_dec { mult_addr=rn; build stlist_dec; } reglist: rn,stlist_dec"^" is P24=0 & U23=0 & S22=1 & W21=0 & L20=0 & rn & stlist_dec { mult_addr=rn; build stlist_dec; } reglist: rn!,stlist_dec is P24=0 & U23=0 & S22=0 & W21=1 & L20=0 & rn & stlist_dec { mult_addr=rn; build stlist_dec; rn=mult_addr; } reglist: rn,stlist_dec is P24=1 & U23=0 & S22=0 & W21=0 & L20=0 & rn & stlist_dec { mult_addr=(rn-4); build stlist_dec; } reglist: rn,stlist_dec"^" is P24=1 & U23=0 & S22=1 & W21=0 & L20=0 & rn & stlist_dec { mult_addr=(rn-4); build stlist_dec; } reglist: rn!,stlist_dec is P24=1 & U23=0 & S22=0 & W21=1 & L20=0 & rn & stlist_dec { mult_addr=(rn-4); build stlist_dec; rn=mult_addr+4; } # mdir is for attaching the load/store multiple addressing mode mnemonic to the mnemonic mdir: "ia" is P24=0 & U23=1 { } mdir: "ib" is P24=1 & U23=1 { } mdir: "da" is P24=0 & U23=0 { } mdir: "db" is P24=1 & U23=0 { } # addrmode5 is the parameter in Addressing Mode5 # it takes care of bits 27-0 except for the N and L flags and CRd and cp# # it takes care of possible writebacks to Rn addrmode5: [rn,"#"^off8] is P24=1 & U23=1 & W21=0 & rn & immed [ off8=immed*4; ] { local tmp = rn + off8; export tmp; } addrmode5: [rn,"#"^noff8] is P24=1 & U23=0 & W21=0 & rn & immed [ noff8=-(immed*4); ] { local tmp = rn + noff8; export tmp; } addrmode5: [rn,"#"^off8]! is P24=1 & U23=1 & W21=1 & rn & immed [ off8=immed*4; ] { rn = rn + off8; export rn; } addrmode5: [rn,"#"^noff8]! is P24=1 & U23=0 & W21=1 & rn & immed [ noff8=-(immed*4); ] { rn = rn + noff8; export rn; } addrmode5: [rn],"#"^off8 is P24=0 & U23=1 & W21=1 & rn & immed [ off8=immed*4; ] { local tmp = rn; rn = rn+off8; export tmp; } addrmode5: [rn],"#"^noff8 is P24=0 & U23=0 & W21=1 & rn & immed [ noff8=-(immed*4); ] { local tmp = rn; rn = rn + noff8; export tmp; } addrmode5: [rn],{immed} is P24=0 & U23=1 & W21=0 & rn & immed { export rn; } # cpsrmask is the resulting cpsr mask for the msr instruction cpsrmask: is mask=0 { export 0:4; } cpsrmask: "cpsr_c" is mask=1 { export 0xff:4; } cpsrmask: "cpsr_x" is mask=2 { export 0xff00:4; } cpsrmask: "cpsr_cx" is mask=3 { export 0xffff:4; } cpsrmask: "cpsr_s" is mask=4 { export 0xff0000:4; } cpsrmask: "cpsr_cs" is mask=5 { export 0xff00ff:4; } cpsrmask: "cpsr_xs" is mask=6 { export 0xffff00:4; } cpsrmask: "cpsr_cxs" is mask=7 { export 0xffffff:4; } cpsrmask: "cpsr_f" is mask=8 { export 0xff000000:4; } cpsrmask: "cpsr_cf" is mask=9 { export 0xff0000ff:4; } cpsrmask: "cpsr_xf" is mask=10 { export 0xff00ff00:4; } cpsrmask: "cpsr_cxf" is mask=11 { export 0xff00ffff:4; } cpsrmask: "cpsr_sf" is mask=12 { export 0xffff0000:4; } cpsrmask: "cpsr_csf" is mask=13 { export 0xffff00ff:4; } cpsrmask: "cpsr_xsf" is mask=14 { export 0xffffff00:4; } cpsrmask: "cpsr_cxsf" is mask=15 { export 0xffffffff:4; } # spsrmask is the mask for spsr in the msr instruction spsrmask: is mask=0 { export 0:4; } spsrmask: "spsr_c" is mask=1 { export 0xff:4; } spsrmask: "spsr_x" is mask=2 { export 0xff00:4; } spsrmask: "spsr_cx" is mask=3 { export 0xffff:4; } spsrmask: "spsr_s" is mask=4 { export 0xff0000:4; } spsrmask: "spsr_cs" is mask=5 { export 0xff00ff:4; } spsrmask: "spsr_xs" is mask=6 { export 0xffff00:4; } spsrmask: "spsr_cxs" is mask=7 { export 0xffffff:4; } spsrmask: "spsr_f" is mask=8 { export 0xff000000:4; } spsrmask: "spsr_cf" is mask=9 { export 0xff0000ff:4; } spsrmask: "spsr_xf" is mask=10 { export 0xff00ff00:4; } spsrmask: "spsr_cxf" is mask=11 { export 0xff00ffff:4; } spsrmask: "spsr_sf" is mask=12 { export 0xffff0000:4; } spsrmask: "spsr_csf" is mask=13 { export 0xffff00ff:4; } spsrmask: "spsr_xsf" is mask=14 { export 0xffffff00:4; } spsrmask: "spsr_cxsf" is mask=15 { export 0xffffffff:4; } ##################### ###### immediate bit-number data for unsigned/signed saturated instructions ##################### @if defined(VERSION_6) sSatImm5: "#"^satimm is satimm5 [ satimm = satimm5 + 1; ] { export *[const]:2 satimm; } sSatImm4: "#"^satimm is satimm4 [ satimm = satimm4 + 1; ] { export *[const]:2 satimm; } uSatImm5: "#"^satimm5 is satimm5 { export *[const]:2 satimm5; } uSatImm4: "#"^satimm4 is satimm4 { export *[const]:2 satimm4; } @endif # VERSION_6 @if defined(VERSION_6K) || defined(VERSION_6T2) optionImm: "#"^immed4 is immed4 { export *[const]:4 immed4; } @endif @if defined(VERSION_6T2) || defined(VERSION_7) lsbImm: "#"^lsb is lsb { export *[const]:4 lsb; } msbImm: "#"^msb is msb { export *[const]:4 msb; } widthMinus1: "#"^width is msb [ width = msb + 1; ] { export *[const]:4 msb; } bitWidth: "#"^w is lsb & msb [ w = msb - lsb + 1; ] { export *[const]:4 w; } @endif # VERSION_6T2 || VERSION_7 # # Modes for SRS instructions # @if defined(VERSION_6) SRSMode: "usr" is srsMode=8 & c0004 { export *[const]:1 c0004; } SRSMode: "fiq" is srsMode=9 & c0004 { export *[const]:1 c0004; } SRSMode: "irq" is srsMode=10 & c0004 { export *[const]:1 c0004; } SRSMode: "svc" is srsMode=11 & c0004 { export *[const]:1 c0004; } SRSMode: "mon" is srsMode=14 & c0004 { export *[const]:1 c0004; } SRSMode: "abt" is srsMode=15 & c0004 { export *[const]:1 c0004; } SRSMode: "und" is srsMode=19 & c0004 { export *[const]:1 c0004; } SRSMode: "sys" is srsMode=23 & c0004 { export *[const]:1 c0004; } SRSMode: "#"^srsMode is srsMode { export *[const]:1 srsMode; } @endif # VERSION_6 # Perform ARMcond check phase and set ARMcond context variable :^instruction is $(AMODE) & ARMcondCk=0 & (bit31=0|bit30=0|bit29=0|bit28=0) & instruction [ ARMcondCk=1; ARMcond=1; ] {} :^instruction is ARMcondCk=0 & instruction [ ARMcondCk=1; ARMcond=0; ] {} # Ensure that the condition check phase has been completed with : ARMcondCk=1 { ################################################# # # Include the SIMD/VFP instructions before the # other ARM instructions to avoid incorrect # constructor matching for those that use the # COND subconstructor. This also ensures # that the various VFP instructions supersede the # CDP/MCR/MRC general coprocessor instructions # ################################################# @if defined(INCLUDE_NEON) @include "ARMneon.sinc" @endif ################################################# # # Do the same now for ARMv8, which also has neon # ################################################# @if defined(VERSION_8) @include "ARMv8.sinc" @endif # VERSION_8 ################################################ # # These instructions must come first because the cond pattern match # is more specific than the subconstructor COND. If a base intruction # matches and then COND fails (cond=14 or cond=15) then the disassembly # will fail # ################################################ @if defined(VERSION_5) # Exception Generation and UDF # immed12_4 used in Exception Generation and Media instructions class immed12_4: "#"^tmp is $(AMODE) & immed12 & immed4 [tmp = (immed12 << 4) | immed4; ] { export *[const]:4 tmp; } :hlt immed12_4 is $(AMODE) & cond=0xe & c2027=0x10 & c0407=0x7 & immed12_4 { software_hlt(immed12_4); } :bkpt immed12_4 is $(AMODE) & cond=0xe & c2027=0x12 & c0407=0x7 & immed12_4 { software_bkpt(immed12_4); } :hvc immed12_4 is $(AMODE) & cond=0xe & c2027=0x14 & c0407=0x7 & immed12_4 { software_hvc(immed12_4); } @if defined(VERSION_6T2) || defined(VERSION_7) define pcodeop SG; :sg is TMode=1 & thv_c0031=0xe97fe97f { SG(); } @endif # Requires Security Extensions :smc^COND immed4 is $(AMODE) & COND & c2027=0x16 & c0407=0x7 & immed4 { build COND; software_smc(immed4:4); } @if defined(VERSION_6T2) || defined(VERSION_7) define pcodeop TT; :tt^ItCond thv_Rt2, thv_Rn is TMode=1 & ItCond & thv_c2031=0b111010000100 & thv_c1215=0b1111 & thv_bit07=0 & thv_bit06=0 & thv_Rt2 & thv_Rn { thv_Rt2 = TT(thv_Rn); } define pcodeop TTA; :tta^ItCond thv_Rt2, thv_Rn is TMode=1 & ItCond & thv_c2031=0b111010000100 & thv_c1215=0b1111 & thv_bit07=1 & thv_bit06=0 & thv_Rt2 & thv_Rn { thv_Rt2 = TTA(thv_Rn); } define pcodeop TTAT; :ttat^ItCond thv_Rt2, thv_Rn is TMode=1 & ItCond & thv_c2031=0b111010000100 & thv_c1215=0b1111 & thv_bit07=1 & thv_bit06=1 & thv_Rt2 & thv_Rn { thv_Rt2 = TTAT(thv_Rn); } define pcodeop TTT; :ttt^ItCond thv_Rt2, thv_Rn is TMode=1 & ItCond & thv_c2031=0b111010000100 & thv_c1215=0b1111 & thv_bit07=0 & thv_bit06=1 & thv_Rt2 & thv_Rn { thv_Rt2 = TTT(thv_Rn); } @endif :udf immed12_4 is $(AMODE) & cond=0xe & c2027=0x7f & c0407=0xf & immed12_4 { local excaddr:4 = inst_start; local target:4 = software_udf(immed12_4:4, excaddr); goto [target]; } @endif # VERSION_5 @if defined(VERSION_6) AFLAG: "a" is c0808=1 & c1819=2 { enableDataAbortInterrupts(); } AFLAG: "a" is c0808=1 { disableDataAbortInterrupts(); } AFLAG: is c0808=0 { } IFLAG: "i" is c0707=1 & c1819=2 { enableIRQinterrupts(); } IFLAG: "i" is c0707=1 { disableIRQinterrupts(); } IFLAG: is c0707=0 { } FFLAG: "f" is c0606=1 & c1819=2 { enableFIQinterrupts(); } FFLAG: "f" is c0606=1 { disableFIQinterrupts(); } FFLAG: is c0606=0 { } IFLAGS: AFLAG^IFLAG^FFLAG is AFLAG & IFLAG & FFLAG { } SetMode: "#"^16 is c0004=0x10 { setUserMode(); } SetMode: "#"^17 is c0004=0x11 { setFIQMode(); } SetMode: "#"^18 is c0004=0x12 { setIRQMode(); } SetMode: "#"^19 is c0004=0x13 { setSupervisorMode(); } SetMode: "#"^22 is c0004=0x16 { setMonitorMode(); } SetMode: "#"^23 is c0004=0x17 { setAbortMode(); } SetMode: "#"^27 is c0004=0x1b { setUndefinedMode(); } SetMode: "#"^31 is c0004=0x1f { setSystemMode(); } :cps SetMode is $(AMODE) & ARMcond=0 & cond=15 & c2027=16 & c1819=0 & c1717=1 & c0916=0 & c0508=0 & SetMode { } :cpsie IFLAGS is $(AMODE) & ARMcond=0 & cond=15 & c2027=16 & c1819=2 & c1717=0 & c0916=0 & c0505=0 & c0004=0 & IFLAGS { } :cpsid IFLAGS is $(AMODE) & ARMcond=0 & cond=15 & c2027=16 & c1819=3 & c1717=0 & c0916=0 & c0505=0 & c0004=0 & IFLAGS { } :cpsie IFLAGS, SetMode is $(AMODE) & ARMcond=0 & cond=15 & c2027=16 & c1819=2 & c1717=1 & c0916=0 & c0505=0 & IFLAGS & SetMode { } :cpsid IFLAGS, SetMode is $(AMODE) & ARMcond=0 & cond=15 & c2027=16 & c1819=3 & c1717=1 & c0916=0 & c0505=0 & IFLAGS & SetMode { } @endif # VERSION_6 @if defined(VERSION_5E) :pld addrmode2 is $(AMODE) & cond=0xf & c2627=1 & c2424=1 & c2022=5 & c1215=0xf & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { HintPreloadData(addrmode2); } # prevent literal form getting matched by pldw :pld addrmode2 is $(AMODE) & cond=0xf & c2627=1 & c2424=1 & c2022=5 & c1619=0xf & c1215=0xf & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { HintPreloadData(addrmode2); } @endif # VERSION_5E @if defined(VERSION_7) :pldw addrmode2 is $(AMODE) & cond=0xf & c2627=1 & c2424=1 & c2022=1 & c1215=0xf & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { HintPreloadDataForWrite(addrmode2); } :pli addrmode2 is $(AMODE) & cond=0xf & c2627=1 & c2424=0 & c2022=5 & c1215=0xf & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { HintPreloadInstruction(addrmode2); } @endif # VERSION_7 @if defined(VERSION_6) :rfeia rn is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=1 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = rn; cpsr = *ptr; ptr = ptr + 4; pc = *ptr; return [pc]; } :rfeib rn is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=1 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = rn + 4; cpsr = *ptr; ptr = ptr + 4; pc = *ptr; return [pc]; } :rfeda rn is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=0 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = rn; cpsr = *ptr; ptr = ptr - 4; pc = *ptr; return [pc]; } :rfedb rn is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=0 & S22=0 & W21=0 & L20=1 & rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = rn - 4; cpsr = *ptr; ptr = ptr - 4; pc = *ptr; return [pc]; } :rfeia Rn! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=1 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = Rn; cpsr = *ptr; ptr = ptr + 4; pc = *ptr; Rn = ptr + 4; return [pc]; } :rfeib Rn! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=1 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = Rn + 4; cpsr = *ptr; ptr = ptr + 4; pc = *ptr; Rn = ptr; return [pc]; } :rfeda Rn! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=0 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = Rn; cpsr = *ptr; ptr = ptr - 4; pc = *ptr; Rn = ptr - 4; return [pc]; } :rfedb Rn! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=0 & S22=0 & W21=1 & L20=1 & Rn & c1215=0 & c0811=10 & c0007=0 { # register list is always: pc, cpsr ptr:4 = Rn - 4; cpsr = *ptr; ptr = ptr - 4; pc = *ptr; Rn = ptr; return [pc]; } :srsia SRSMode is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=1 & S22=1 & W21=0 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp; *ptr = lr; ptr = ptr + 4; *ptr = spsr; ptr = ptr + 4; } :srsib SRSMode is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=1 & W21=0 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp + 4; *ptr = lr; ptr = ptr + 4; *ptr = spsr; } :srsda SRSMode is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=0 & W21=0 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp; *ptr = lr; ptr = ptr - 4; *ptr = spsr; ptr = ptr - 4; } :srsdb SRSMode is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=0 & W21=0 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp - 4; *ptr = lr; ptr = ptr - 4; *ptr = spsr; } :srsia SRSMode! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=1 & S22=1 & W21=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp; *ptr = lr; ptr = ptr + 4; *ptr = spsr; ptr = ptr + 4; sp = ptr; } :srsib SRSMode! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=1 & W21=1 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp + 4; *ptr = lr; ptr = ptr + 4; *ptr = spsr; sp = ptr; } :srsda SRSMode! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=0 & U23=0 & W21=1 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp; *ptr = lr; ptr = ptr - 4; *ptr = spsr; ptr = ptr - 4; sp = ptr; } :srsdb SRSMode! is $(AMODE) & ARMcond=0 & cond=15 & c2527=4 & P24=1 & U23=0 & W21=1 & S22=1 & L20=0 & c1215=0 & c0811=5 & c0507=0 & SRSMode { # register list is always: r14, spsr ptr:4 = sp; ptr = ptr - 4; *ptr = lr; ptr = ptr - 4; *ptr = spsr; sp = ptr; } @endif # VERSION_6 @if defined(VERSION_5) :stc2 cpn,CRd,addrmode5 is $(AMODE) & ARMcond=0 & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=0 { t_cpn:4 = cpn; coprocessor_store2(t_cpn,CRd,addrmode5); } :stc2l cpn,CRd,addrmode5 is $(AMODE) & ARMcond=0 & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=0 { t_cpn:4 = cpn; coprocessor_storelong2(t_cpn,CRd,addrmode5); } @endif # VERSION_5 ################################################# # # Here are the rest of instructions in alphabetical order # ################################################# #See ARM Architecture reference section "Pseudocode details of addition and subtraction" macro add_with_carry_flags(op1,op2){ local CYz = zext(CY); local result = op1 + op2; tmpCY = carry( op1, op2) || carry( result, CYz ); tmpOV = scarry( op1, op2 ) ^^ scarry( result, CYz ); } #Note: used for subtraction op1 - (op2 + !CY) #sets tmpCY if there is NO borrow macro sub_with_carry_flags(op1, op2){ local result = op1 - op2; tmpCY = (op1 > op2) || (result < zext(CY)); tmpOV = sborrow(op1,op2) ^^ sborrow(result,zext(!CY)); } :adc^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=5 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; build shift1; add_with_carry_flags(rn,shift1); Rd = rn+shift1+zext(CY); resultflags(Rd); build SBIT_CZNO; } :adc^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=5 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; build shift2; add_with_carry_flags(rn,shift2); Rd = rn+shift2+zext(CY); resultflags(Rd); build SBIT_CZNO; } :adc^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=5 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; build shift3; add_with_carry_flags(rn,shift3); Rd = rn+shift3+zext(CY); resultflags(Rd); build SBIT_CZNO; } :adc^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=5 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; build shift1; add_with_carry_flags(rn,shift1); dest:4 = rn + shift1 + zext(CY); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :adc^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=5 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; build shift2; add_with_carry_flags(rn,shift2); dest:4 = rn + shift2 + zext(CY); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :adc^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=5 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; build shift3; add_with_carry_flags(rn,shift3); dest:4 = rn + shift3 + zext(CY); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } ArmPCRelImmed12: reloff is U23=1 & immed & rotate [ reloff = inst_start + 8 + ( ((immed<<(32-rotate*2))|(immed>>(rotate*2))) $and 0xffffffff); ] { export *[const]:4 reloff; } ArmPCRelImmed12: reloff is U23=0 & immed & rotate [ reloff = inst_start + 8 - ( ((immed<<(32-rotate*2))|(immed>>(rotate*2))) $and 0xffffffff); ] { export *[const]:4 reloff; } # # ADR constructors must appear before ADD constructors to give ADR parsing precedence # :adr^COND Rd,ArmPCRelImmed12 is $(AMODE) & ARMcond=1 & COND & c2527=1 & (c2024=8 | c2024=4) & Rn=15 & Rd & ArmPCRelImmed12 { build COND; Rd = ArmPCRelImmed12; } :adr^COND pc,ArmPCRelImmed12 is $(AMODE) & ARMcond=1 & COND & c2527=1 & (c2024=8 | c2024=4) & Rn=15 & Rd=15 & pc & ArmPCRelImmed12 { build COND; dest:4 = ArmPCRelImmed12; ALUWritePC(dest); goto [pc]; } :add^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=4 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; build shift1; addflags(rn,shift1); Rd = rn + shift1; resultflags(Rd); build SBIT_CZNO; } :add^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=4 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; build shift2; addflags(rn,shift2); Rd = rn + shift2; resultflags(Rd); build SBIT_CZNO; } :add^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=4 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; build shift3; addflags(rn,shift3); Rd = rn + shift3; resultflags(Rd); build SBIT_CZNO; } :add^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=4 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; build shift1; addflags(rn,shift1); dest:4 = rn + shift1; resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :add^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=4 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; build shift2; addflags(rn,shift2); dest:4 = rn + shift2; resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :add^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=4 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; build shift3; addflags(rn,shift3); dest:4 = rn + shift3; resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :and^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=0 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; build shift1; Rd = rn & shift1; logicflags(); resultflags(Rd); build SBIT_CZNO; } :and^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=0 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; build shift2; Rd = rn & shift2; logicflags(); resultflags(Rd); build SBIT_CZNO; } :and^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=0 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; build shift3; Rd = rn & shift3; logicflags(); resultflags(Rd); build SBIT_CZNO; } :and^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=0 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; build shift1; dest:4 = rn & shift1; logicflags(); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :and^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=0 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; build shift2; dest:4 = rn & shift2; logicflags(); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :and^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=0 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; build shift3; dest:4 = rn & shift3; logicflags(); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } # must match first! before conditional goto :b Addr24 is $(AMODE) & cond=14 & c2527=5 & L24=0 & Addr24 { goto Addr24; } :b^cc Addr24 is $(AMODE) & cc & c2527=5 & L24=0 & Addr24 { if (cc) goto Addr24; } @if defined(VERSION_6T2) :bfc^COND Rd,lsbImm,bitWidth is $(AMODE) & ARMcond=1 & COND & c2127=0x3e & msbImm & Rd & lsbImm & bitWidth & c0006=0x1f { build COND; build lsbImm; build msbImm; build bitWidth; clearMask:4 = (-1 << (msbImm + 1)) | (-1 >> (32 - lsbImm)); Rd = Rd & clearMask; } :bfi^COND Rd,Rm,lsbImm,bitWidth is $(AMODE) & ARMcond=1 & COND & c2127=0x3e & Rd & Rm & lsbImm & bitWidth & c0406=1 { build COND; build lsbImm; build bitWidth; vmask:4 = (1 << bitWidth) - 1; clear:4 = ~(vmask << lsbImm); bits:4 = (Rm & vmask) << lsbImm; Rd = (Rd & clear) | bits; } @endif # VERSION_6T2 :bic^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=14 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; build shift1; Rd = rn&(~shift1); logicflags(); resultflags(Rd); build SBIT_CZNO; } :bic^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=14 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; build shift2; Rd = rn&(~shift2); logicflags(); resultflags(Rd); build SBIT_CZNO; } :bic^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=14 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; build shift3; Rd = rn&(~shift3); logicflags(); resultflags(Rd); build SBIT_CZNO; } :bic^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=14 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; build shift1; dest:4 = rn&(~shift1); logicflags(); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :bic^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=14 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; build shift2; dest:4 = rn&(~shift2); logicflags(); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :bic^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=14 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; build shift3; dest:4 = rn&(~shift3); logicflags(); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } # bl used as a PIC instruction to get at current PC in lr :bl Addr24 is $(AMODE) & cond=14 & c2527=5 & L24=1 & immed24=0xffffff & Addr24 { lr = inst_next; goto Addr24; } # bl used as a PIC instruction to get at current PC in lr :bl^COND Addr24 is $(AMODE) & ARMcond=1 & COND & c2527=5 & L24=1 & immed24=0xffffff & Addr24 { build COND; build Addr24; lr = inst_next; goto Addr24; } :bl Addr24 is $(AMODE) & cond=14 & c2527=5 & L24=1 & Addr24 { lr = inst_next; call Addr24; } :bl^COND Addr24 is $(AMODE) & CALLoverride=0 & COND & c2527=5 & L24=1 & Addr24 { build COND; build Addr24; lr = inst_next; call Addr24; } :bl^COND Addr24 is $(AMODE) & CALLoverride=1 & COND & c2527=5 & L24=1 & Addr24 { build COND; build Addr24; lr = inst_next; goto Addr24; } # blx(1) instruction @if defined(T_VARIANT) && defined(VERSION_5) # Two forms of blx needed to distinguish from b :blx HAddr24 is $(AMODE) & CALLoverride=0 & ARMcond=0 & cond=15 & c2527=5 & H24=0 & HAddr24 { lr = inst_next; SetISAModeSwitch(1); # TMode done by HAddr24's globalset call HAddr24; # don't do causes decompiler trouble TB = 0; } # Always changes to THUMB mode :blx HAddr24 is $(AMODE) & CALLoverride=1 & ARMcond=0 & cond=15 & c2527=5 & H24=0 & HAddr24 { lr = inst_next; SetISAModeSwitch(1); # TMode done by HAddr24's globalset goto HAddr24; } # Always changes to THUMB mode :blx HAddr24 is $(AMODE) & ARMcond=0 & CALLoverride=0 & cond=15 & c2527=5 & H24=1 & HAddr24 { lr = inst_next; SetISAModeSwitch(1); # TMode done by HAddr24's globalset call HAddr24; # don't do causes decompiler trouble TB = 0; } # Always changes to THUMB mode :blx HAddr24 is $(AMODE) & ARMcond=0 & CALLoverride=1 & cond=15 & c2527=5 & H24=1 & HAddr24 { lr = inst_next; SetISAModeSwitch(1); # TMode done by HAddr24's globalset goto HAddr24; } # Always changes to THUMB mode @endif # T_VARIANT && VERSION_5 @if defined(VERSION_5) :blx^COND rm is $(AMODE) & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=3 & rm { build COND; build rm; BXWritePC(rm); lr=inst_next; call [pc]; # don't do causes decompiler trouble TB = 0; } # Optional THUMB :blx^COND rm is $(AMODE) & CALLoverride=1 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=3 & rm { build COND; build rm; BXWritePC(rm); lr=inst_next; goto [pc]; } # Optional THUMB @endif # VERSION_5 @if defined(VERSION_5_or_T) # if branching using lr, assume return :bx^COND rm is $(AMODE) & REToverride=0 & LRset=0 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm & Rm=14 { build COND; build rm; BXWritePC(rm); return [pc]; } # Optional change to THUMB :bx^COND rm is $(AMODE) & REToverride=0 & LRset=0 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm & Rm { build COND; build rm; BXWritePC(rm); goto [pc]; } # Optional change to THUMB # if lr has just been set, assume call :bx^COND rm is $(AMODE) & REToverride=0 & LRset=1 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm & Rm { build COND; build rm; BXWritePC(rm); call [pc]; } # Optional change to THUMB :bx^COND rm is $(AMODE) & REToverride=1 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm { build COND; build rm; BXWritePC(rm); goto [pc]; } # Optional change to THUMB #:bx^COND lr is $(AMODE) & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & Rm=14 & lr #{ # build COND; # TB=(lr&0x00000001)!=0; # tmp = lr & 0xfffffffe; # return [tmp]; #} # Optional change to THUMB @endif # VERSION_5_or_T @if defined(VERSION_6) # bxj behaves like bx except that Jazelle state is enabled if available (added with Version-5 J-variant) :bxj^COND rm is $(AMODE) & REToverride=0 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=2 & rm { build COND; build rm; success:1 = jazelle_branch(); if (success) goto ; BXWritePC(rm); return [pc]; } # Optional change to THUMB # if branching using "ip" then is a goto :bxj^COND rm is $(AMODE) & REToverride=0 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=2 & rm & Rm=12 { build COND; build rm; success:1 = jazelle_branch(); if (success) goto ; BXWritePC(rm); goto [pc]; } # Optional change to THUMB :bxj^COND rm is $(AMODE) & REToverride=1 & ARMcond=1 & COND & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=2 & rm { build COND; build rm; success:1 = jazelle_branch(); if (success) goto ; BXWritePC(rm); goto [pc]; } # Optional change to THUMB @endif # VERSION_6 @if defined(VERSION_5) :cdp2 cpn,opcode1,CRd,CRn,CRm,opcode2 is $(AMODE) & ARMcond=0 & cond=15 & c2427=14 & opcode1 & CRn & CRd & cpn & opcode2 & c0404=0 & CRm { t_cpn:4 = cpn; t_op1:4 = opcode1; t_op2:4 = opcode2; coprocessor_function2(t_cpn,t_op1,t_op2,CRd,CRn,CRm); } @endif # VERSION_5 :cdp^COND cpn,opcode1,CRd,CRn,CRm,opcode2 is $(AMODE) & ARMcond=1 & COND & c2427=14 & opcode1 & CRn & CRd & cpn & opcode2 & c0404=0 & CRm { build COND; t_cpn:4 = cpn; t_op1:4 = opcode1; t_op2:4 = opcode2; coprocessor_function(t_cpn,t_op1,t_op2,CRd,CRn,CRm); } @if defined(VERSION_6K) || defined(VERSION_7) :clrex is $(AMODE) & c0031=0xf57ff01f { ClearExclusiveLocal(); } @endif # VERSION_6K @if defined(VERSION_5) :clz^COND Rd,rm is $(AMODE) & ARMcond=1 & COND & c2027=22 & c1619=15 & Rd & c0811=15 & c0407=1 & rm { build COND; build rm; Rd = lzcount(rm); } @endif # VERSION_5 :cmn^COND rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2024=23 & rn & c1215=0 & c2627=0 & shift1 { build COND; build rn; build shift1; addflags(rn,shift1); local tmp = rn + shift1; resultflags(tmp); affectflags(); } :cmn^COND rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2024=23 & rn & c1215=0 & c2627=0 & shift2 { build COND; build rn; build shift2; addflags(rn,shift2); local tmp = rn + shift2; resultflags(tmp); affectflags(); } :cmn^COND rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2024=23 & rn & c1215=0 & c2627=0 & shift3 { build COND; build rn; build shift3; addflags(rn,shift3); local tmp = rn + shift3; resultflags(tmp); affectflags(); } :cmp^COND rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2024=21 & rn & c1215=0 & c2627=0 & shift1 { build COND; build rn; build shift1; subflags(rn,shift1); local tmp = rn - shift1; resultflags(tmp); affectflags(); } :cmp^COND rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2024=21 & rn & c1215=0 & c2627=0 & shift2 { build COND; build rn; build shift2; subflags(rn,shift2); local tmp = rn - shift2; resultflags(tmp); affectflags(); } :cmp^COND rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2024=21 & rn & c1215=0 & c2627=0 & shift3 { build COND; build rn; build shift3; subflags(rn,shift3); local tmp = rn - shift3; resultflags(tmp); affectflags(); } @if defined(VERSION_6) # cpy is a pre-UAL synonym for mov :cpy^COND pc,rm is $(AMODE) & ARMcond=1 & LRset=0 & COND & pc & c2027=0x1a & c1619=0 & c0411=0 & Rd=15 & rm { build COND; build rm; BXWritePC(rm); goto [pc]; } :cpy^COND pc,lr is $(AMODE) & ARMcond=1 & LRset=0 & COND & pc & c2527=0 & S20=0 & c2124=13 & c1619=0 & Rd=15 & sftimm=0 & c0406=0 & Rm=14 & lr { build COND; dest:4 = lr; ALUWritePC(dest); return [pc]; } :cpy^COND pc,rm is $(AMODE) & ARMcond=1 & LRset=1 & COND & pc & c2027=0x1a & c1619=0 & c0411=0 & Rd=15 & rm { build COND; build rm; BXWritePC(rm); call [pc]; } :cpy^COND lr,rm is $(AMODE) & ARMcond=1 & COND & c2027=0x1a & c1619=0 & c0411=0 & Rd=14 & lr & rm & Rm2=15 [ LRset=1; globalset(inst_next,LRset); ] { build COND; lr = rm; } :cpy^COND Rd,rm is $(AMODE) & ARMcond=1 & COND & c2027=0x1a & c1619=0 & c0411=0 & Rd & rm { build COND; build rm; Rd = rm; } @endif # VERSION_6 @if defined(VERSION_6K) || defined(VERSION_6T2) :dbg^COND optionImm is $(AMODE) & ARMcond=1 & COND & c0427=0x320f0f & optionImm { @if defined(VERSION_7) build COND; build optionImm; HintDebug(optionImm); @endif # VERSION_7 } @endif # VERSION_6K || VERSION_6T2 @if defined(VERSION_7) :dmb dbOption is $(AMODE) & c0431=0xf57ff05 & dbOption { DataMemoryBarrier(dbOption:1); } :dsb dbOption is $(AMODE) & c0431=0xf57ff04 & dbOption { DataSynchronizationBarrier(dbOption:1); } @endif # VERSION_7 :eor^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=1 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; build shift1; Rd = rn^shift1; logicflags(); resultflags(Rd); build SBIT_CZNO; } :eor^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=1 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; build shift2; Rd = rn^shift2; logicflags(); resultflags(Rd); build SBIT_CZNO; } :eor^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=1 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; build shift3; Rd = rn^shift3; logicflags(); resultflags(Rd); build SBIT_CZNO; } :eor^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=1 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; build shift1; dest:4 = rn^shift1; logicflags(); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :eor^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=1 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; build shift2; dest:4 = rn^shift2; logicflags(); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :eor^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=1 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; build shift3; dest:4 = rn^shift3; logicflags(); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } @if defined(VERSION_7) :isb ibOption is $(AMODE) & c0431=0xf57ff06 & ibOption { InstructionSynchronizationBarrier(ibOption:1); } @endif # VERSION_7 ### These must come first, because of cond=15 match @if defined(VERSION_5) :ldc2 cpn,CRd,addrmode5 is $(AMODE) & ARMcond=0 & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=1 { t_cpn:4 = cpn; coprocessor_load2(t_cpn,CRd,addrmode5); } :ldc2l cpn,CRd,addrmode5 is $(AMODE) & ARMcond=0 & cond=15 & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=1 { t_cpn:4 = cpn; coprocessor_loadlong2(t_cpn,CRd,addrmode5); } @endif # VERSION_5 ######## cond=15 match :ldc^COND cpn,CRd,addrmode5 is $(AMODE) & ARMcond=1 & COND & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=1 { build COND; build addrmode5; t_cpn:4 = cpn; coprocessor_load(t_cpn,CRd,addrmode5); } :ldcl^COND cpn,CRd,addrmode5 is $(AMODE) & ARMcond=1 & COND & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=1 { build COND; build addrmode5; t_cpn:4 = cpn; coprocessor_loadlong(t_cpn,CRd,addrmode5); } :ldm^mdir^COND reglist is $(AMODE) & ARMcond=1 & COND & c2527=4 & mdir & L20=1 & c1515=0 & reglist { build COND; build reglist; } :ldm^mdir^COND reglist is $(AMODE) & ARMcond=1 & COND & c2527=4 & mdir & L20=1 & c1515=1 & reglist { build COND; build reglist; LoadWritePC(pc); return [pc]; } #:ldr^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & B22=0 & L20=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 #{ # build COND; # build addrmode2; # tmp:4=addrmode2&0xfffffffc; # tmp2:4=(addrmode2&3)<<3; # Rd=*tmp; # Rd = (Rd >> tmp2) | (Rd << (32-tmp2)); #} # The following form of ldr assumes alignment checking is on :ldr^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=0 & L20=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; Rd = *addrmode2; } # Two forms of ldr with destination=pc needed to distinguish from ldrt :ldr^COND pc,addrmode2 is $(AMODE) & pc & ARMcond=1 & COND & LRset=1 & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=1 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 [ LRset=0; globalset(inst_next,LRset); ] { build COND; build addrmode2; dest:4=*addrmode2; SetThumbMode((dest&0x00000001)!=0); pc=dest&0xfffffffe; call [pc]; SetThumbMode(0); } # No unaligned address :ldr^COND pc,addrmode2 is $(AMODE) & pc & ARMcond=1 & COND & LRset=1 & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=0 & W21=0 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 [ LRset=0; globalset(inst_next,LRset); ] { build COND; build addrmode2; dest:4=*addrmode2; SetThumbMode((dest&0x00000001)!=0); pc=dest&0xfffffffe; call [pc]; SetThumbMode(0); } # No unaligned address # Two forms of ldr with destination=pc needed to distinguish from ldrt :ldr^COND pc,addrmode2 is $(AMODE) & pc & ARMcond=1 & COND & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=1 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; dest:4=*addrmode2; BXWritePC(dest); goto [pc]; } # No unaligned address :ldr^COND pc,addrmode2 is $(AMODE) & pc & ARMcond=1 & COND & c2627=1 & B22=0 & L20=1 & Rd=15 & P24=0 & W21=0 & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; dest:4=*addrmode2; BXWritePC(dest); goto [pc]; } # No unaligned address :ldrb^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=1 & L20=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; Rd = zext( *:1 addrmode2); } :ldrbt^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=1 & L20=1 & P24=0 & W21=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; Rd = zext( *:1 addrmode2); } @if defined(VERSION_5E) :ldrd^COND Rd,Rd2,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & c0407=13 & c1212=0 & L20=0 & Rd & Rd2 & addrmode3 { build COND; build addrmode3; Rd = *(addrmode3); Rd2 = *(addrmode3+4); } @endif # VERSION_5E @if defined(VERSION_6) :ldrex^COND Rd,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x19 & Rn & Rd & c0011=0xf9f { build COND; Rd = *Rn; } @endif # VERSION_6 @if defined(VERSION_6K) :ldrexb^COND Rd,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x1d & Rn & Rd & c0011=0xf9f { build COND; Rd = zext(*:1 Rn); } :ldrexd^COND Rd,Rd2,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x1b & Rn & Rd & Rd2 & c0011=0xf9f { build COND; local addr:4 = Rn; Rd = *(addr); Rd2 = *(addr + 4); } :ldrexh^COND Rd,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x1f & Rn & Rd & c0011=0xf9f { build COND; Rd = zext(*:2 Rn); } @endif # VERSION_6K :ldrh^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & L20=1 & c0407=11 & Rd & addrmode3 { build COND; build addrmode3; Rd = zext( *:2 addrmode3); } @if defined(VERSION_6T2) :ldrht^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & P24=0 & W21=1 & L20=1 & c0407=11 & Rd & addrmode3 { build COND; build addrmode3; Rd = zext( *:2 addrmode3); } @endif # VERSION_6T2 :ldrsb^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & L20=1 & c0407=13 & Rd & addrmode3 { build COND; build addrmode3; Rd = sext( *:1 addrmode3); } @if defined(VERSION_6T2) :ldrsbt^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & P24=0 & W21=1 & L20=1 & c0407=13 & Rd & addrmode3 { build COND; build addrmode3; Rd = sext( *:1 addrmode3); } @endif # VERSION_6T2 :ldrsh^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & L20=1 & c0407=15 & Rd & addrmode3 { build COND; build addrmode3; Rd = sext( *:2 addrmode3); } @if defined(VERSION_6T2) :ldrsht^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & P24=0 & W21=1 & L20=1 & c0407=15 & Rd & addrmode3 { build COND; build addrmode3; Rd = sext( *:2 addrmode3); } @endif # VERSION_6T2 # The following form of ldr assumes alignment checking is on :ldrt^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=0 & L20=1 & P24=0 & W21=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; Rd = *addrmode2; } ###### must come first cond=15 @if defined(VERSION_5) :mcr2 cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & ARMcond=0 & cond=15 & c2427=14 & opc1 & c2020=0 & CRn & Rd & cpn & opc2 & c0404=1 & CRm { t_cpn:4 = cpn; t_op1:4 = opc1; t_op2:4 = opc2; coprocessor_moveto(t_cpn,t_op1,t_op2,Rd,CRn,CRm); } @endif # VERSION_5 ###### must come first cond=15 # ===== START mcr :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Main_ID(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Cache_Type(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_TCM_Status(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_TLB_Type(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Control(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Auxiliary_Control(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Coprocessor_Access_Control(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=1 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Secure_Configuration(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=1 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Secure_Debug_Enable(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=1 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_NonSecure_Access_Control(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Translation_table_base_0(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Translation_table_base_1(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Translation_table_control(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=3 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Domain_Access_Control(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=5 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Data_Fault_Status(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=5 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Instruction_Fault(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=6 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Fault_Address(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=6 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Instruction_Fault(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Wait_for_interrupt(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=5 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Invalidate_Entire_Instruction(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=5 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Invalidate_Instruction_Cache_by_MVA(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=5 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Flush_Prefetch_Buffer(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=6 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Invalidate_Entire_Data_cache(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=6 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Invalidate_Entire_Data_by_MVA(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=6 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Invalidate_Entire_Data_by_Index(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=10 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Clean_Entire_Data_Cache(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=10 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Clean_Data_Cache_by_MVA(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=10 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Clean_Data_Cache_by_Index(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=10 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Data_Synchronization(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=10 & c0404=1 & opc2=5 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Data_Memory_Barrier(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=14 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Invalidate_Entire_Data_Cache(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=14 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Invalidate_Data_Cache_by_MVA(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=7 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=8 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Invalidate_unified_TLB_unlocked(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=7 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=8 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Invalidate_unified_TLB_by_MVA(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=7 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=8 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Invalidate_unified_TLB_by_ASID_match(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_FCSE_PID(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Context_ID(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_User_RW_Thread_and_Process_ID(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_User_R_Thread_and_Process_ID(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=13 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Privileged_only_Thread_and_Process_ID(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=2 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=15 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; coproc_moveto_Peripherial_Port_Memory_Remap(Rd); } :mcr^COND mcrOperands is $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opt2:4=opc2; coproc_moveto_Feature_Identification(Rd,t_opt2); } :mcr^COND mcrOperands is $(AMODE) & CRm=2 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opt2:4=opc2; coproc_moveto_ISA_Feature_Identification(Rd,t_opt2); } :mcr^COND mcrOperands is $(AMODE) & CRm=4 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=2 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; coproc_moveto_Peripheral_Port_Memory_Remap(Rd,t_opc2); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; coproc_moveto_Control_registers(Rd, t_opc2); } :mcr^COND mcrOperands is $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; coproc_moveto_Security_world_control(Rd, t_opc2); } :mcr^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=2 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; coproc_moveto_Translation_table(Rd,t_opc2); } :mcr^COND mcrOperands is $(AMODE) & CRm=5 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; coproc_moveto_Instruction_cache(Rd,t_opc2); } :mcr^COND mcrOperands is $(AMODE) & CRm=10 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; coproc_moveto_Data_cache_operations(Rd,t_opc2); } :mcr^COND mcrOperands is $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=0 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; t_crm:4 = CRm; coproc_moveto_Identification_registers(Rd,t_opc2,t_crm); } :mcr^COND mcrOperands is $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=15 & c2020=0 & opc1 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; t_crm:4 = CRm; t_op1:4 = opc1; coproc_moveto_Peripheral_System(Rd,t_opc2,t_crm,t_op1); } # ===== END mcr :mcr^COND cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & ARMcond=1 & COND & c2427=14 & opc1 & c2020=0 & CRn & Rd & cpn & opc2 & c0404=1 & CRm { build COND; t_cpn:4 = cpn; t_op1:4 = opc1; t_op2:4 = opc2; coprocessor_moveto(t_cpn,t_op1,t_op2,Rd,CRn,CRm); } ##### must come first cond=15 @if defined(VERSION_6) :mcrr2 cpn,opcode3,Rd,Rn,CRm is $(AMODE) & ARMcond=0 & cond=15 & c2027=0xc4 & cpn & opcode3 & Rd & Rn & CRm { t_cpn:4 = cpn; t_op:4 = opcode3; coprocessor_moveto2(t_cpn,t_op,Rd,Rn,CRm); } :mrrc2 cpn,opcode3,Rd,Rn,CRm is $(AMODE) & ARMcond=0 & cond=15 & c2027=0xc5 & cpn & opcode3 & Rd & Rn & CRm { t_cpn:4 = cpn; t_op:4 = opcode3; Rd = coprocessor_movefromRt(t_cpn,t_op,CRm); Rn = coprocessor_movefromRt2(t_cpn,t_op,CRm); } @endif # VERSION_6 ##### must come first cond=15 @if defined(VERSION_5E) :mcrr^COND cpn,opcode3,Rd,Rn,CRm is $(AMODE) & c2027=0xc4 & COND & ARMcond=1 & cpn & opcode3 & Rd & Rn & CRm { build COND; t_cpn:4 = cpn; t_op:4 = opcode3; coprocessor_moveto2(t_cpn,t_op,Rd,Rn,CRm); } :mrrc^COND cpn,opcode3,Rd,Rn,CRm is $(AMODE) & c2027=0xc5 & COND & ARMcond=1 & cpn & opcode3 & Rd & Rn & CRm { build COND; t_cpn:4 = cpn; t_op:4 = opcode3; Rd = coprocessor_movefromRt(t_cpn,t_op,CRm); Rn = coprocessor_movefromRt2(t_cpn,t_op,CRm); } @endif # VERSION_5E :mla^COND^SBIT_ZN Rn,Rm,Rs,Rd is $(AMODE) & ARMcond=1 & COND & c2527=0 & c2124=1 & SBIT_ZN & Rn & Rd & Rs & c0407=9 & Rm { build COND; Rn = Rm*Rs + Rd; resultflags(Rn); build SBIT_ZN; } @if defined(VERSION_6T2) :mls^COND Rn,Rm,Rs,Rd is $(AMODE) & ARMcond=1 & COND & c2027=0x06 & Rn & Rd & Rs & c0407=9 & Rm { build COND; Rn = Rd - Rm*Rs; } @endif # VERSION_6T2 :mov^COND^SBIT_CZNO Rd,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift1 { build COND; build shift1; Rd = shift1; resultflags(Rd); logicflags(); build SBIT_CZNO; } :mov^COND^SBIT_CZNO Rd,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift2 { build COND; build shift2; Rd = shift2; resultflags(Rd); logicflags(); build SBIT_CZNO; } :mov lr,pc is $(AMODE) & ARMcond=1 & c0031=0xe1a0e00f & lr & pc [ LRset=1; globalset(inst_next,LRset); ] { lr = inst_next + 4; resultflags(lr); logicflags(); } :mov^COND^SBIT_CZNO Rd,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift3 { build COND; build shift3; Rd = shift3; resultflags(Rd); logicflags(); build SBIT_CZNO; } :mov^COND^SBIT_CZNO pc,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift1 { build COND; build shift1; SetThumbMode((shift1&0x00000001)!=0); local tmp=shift1&0xfffffffe; resultflags(tmp); logicflags(); build SBIT_CZNO; ALUWritePC(tmp); goto [pc]; } :mov^COND^SBIT_CZNO pc,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift2 { build COND; build shift2; SetThumbMode((shift2&0x00000001)!=0); local tmp=shift2&0xfffffffe; resultflags(tmp); logicflags(); build SBIT_CZNO; ALUWritePC(tmp); goto [pc]; } :mov^COND^SBIT_CZNO pc,shift2 is $(AMODE) & LRset=1 & pc & COND & ARMcond=1 & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift2 { build COND; build shift2; SetThumbMode((shift2&0x00000001)!=0); local tmp=shift2&0xfffffffe; resultflags(tmp); logicflags(); build SBIT_CZNO; ALUWritePC(tmp); call [pc]; } :mov^COND^SBIT_CZNO pc,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift3 { build COND; build shift3; SetThumbMode((shift3&0x00000001)!=0); local tmp=shift3&0xfffffffe; resultflags(tmp); logicflags(); build SBIT_CZNO; ALUWritePC(tmp); goto [pc]; } :mov lr,rm is $(AMODE) & ARMcond=0 & cond=15 & c2527=0 & S20=0 & c2124=13 & c1619=0 & rm & Rm2=15 & sftimm=0 & c0406=0 & Rd=14 & lr [ LRset=1; globalset(inst_next,LRset); ] { lr = rm; } @if defined(VERSION_6T2) :movw^COND Rd,"#"^val is $(AMODE) & ARMcond=1 & COND & c2027=0x30 & c1619 & Rd & c0011 [ val = (c1619 << 12) | c0011; ] { build COND; Rd = val; } :movt^COND Rd,"#"^val is $(AMODE) & ARMcond=1 & COND & c2027=0x34 & c1619 & Rd & c0011 [ val = (c1619 << 12) | c0011; ] { build COND; Rd = (val << 16) | (Rd & 0xffff); } @endif # VERSION_6T2 ###### must come before next instruction because cond=15 @if defined(VERSION_5) :mrc2 cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & ARMcond=0 & cond=15 & c2427=14 & opc1 & c2020=1 & CRn & Rd & cpn & opc2 & c0404=1 & CRm { t_cpn:4 = cpn; t_op1:4 = opc1; t_op2:4 = opc2; Rd = coprocessor_movefromRt(t_cpn,t_op1,t_op2,CRn,CRm); } @endif # VERSION_5 # ===== Start mrc :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Main_ID(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Cache_Type(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_TCM_Status(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_TLB_Type(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Control(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Auxiliary_Control(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Coprocessor_Access_Control(); } :mrc^COND mcrOperands is $(AMODE) & CRm=1 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Secure_Configuration(); } :mrc^COND mcrOperands is $(AMODE) & CRm=1 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Secure_Debug_Enable(); } :mrc^COND mcrOperands is $(AMODE) & CRm=1 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_NonSecure_Access_Control(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Translation_table_base_0(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Translation_table_base_1(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Translation_table_control(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=3 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Domain_Access_Control(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=5 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Instruction_Fault_Status(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=5 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Data_Fault_Status(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=6 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Fault_Address(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=6 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Instruction_Fault_Address(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Wait_for_interrupt(); } :mrc^COND mcrOperands is $(AMODE) & CRm=5 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Invalidate_Entire_Instruction(); } :mrc^COND mcrOperands is $(AMODE) & CRm=5 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Invalidate_Instruction_Cache_by_MVA(); } :mrc^COND mcrOperands is $(AMODE) & CRm=5 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Flush_Prefetch_Buffer(); } :mrc^COND mcrOperands is $(AMODE) & CRm=6 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Invalidate_Entire_Data_cache(); } :mrc^COND mcrOperands is $(AMODE) & CRm=6 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Invalidate_Entire_Data_by_MVA(); } :mrc^COND mcrOperands is $(AMODE) & CRm=6 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Invalidate_Entire_Data_by_Index(); } :mrc^COND mcrOperands is $(AMODE) & CRm=10 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Clean_Entire_Data_Cache(); } :mrc^COND mcrOperands is $(AMODE) & CRm=10 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Clean_Data_Cache_by_MVA(); } :mrc^COND mcrOperands is $(AMODE) & CRm=10 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Clean_Data_Cache_by_Index(); } :mrc^COND mcrOperands is $(AMODE) & CRm=10 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Data_Synchronization(); } :mrc^COND mcrOperands is $(AMODE) & CRm=10 & c0404=1 & opc2=5 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Data_Memory_Barrier(); } :mrc^COND mcrOperands is $(AMODE) & CRm=14 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Invalidate_Entire_Data_Cache(); } :mrc^COND mcrOperands is $(AMODE) & CRm=14 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Invalidate_Data_Cache_by_MVA(); } :mrc^COND mcrOperands is $(AMODE) & CRm=7 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=8 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Invalidate_unified_TLB_unlocked(); } :mrc^COND mcrOperands is $(AMODE) & CRm=7 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=8 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Invalidate_unified_TLB_by_MVA(); } :mrc^COND mcrOperands is $(AMODE) & CRm=7 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=8 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Invalidate_unified_TLB_by_ASID_match(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=0 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_FCSE_PID(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=1 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Context_ID(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=2 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_User_RW_Thread_and_Process_ID(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=3 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_User_R_Thread_and_Process_ID(); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=13 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Privileged_only_Thread_and_Process_ID(); } :mrc^COND mcrOperands is $(AMODE) & CRm=2 & c0404=1 & opc2=4 & cpn=15 & Rd & CRn=15 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; Rd = coproc_movefrom_Peripherial_Port_Memory_Remap(); } :mrc^COND mcrOperands is $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opt2:4=opc2; Rd = coproc_movefrom_Feature_Identification(t_opt2); } :mrc^COND mcrOperands is $(AMODE) & CRm=2 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; Rd = coproc_movefrom_ISA_Feature_Identification(t_opc2); } :mrc^COND mcrOperands is $(AMODE) & CRm=4 & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=2 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; Rd = coproc_movefrom_Peripheral_Port_Memory_Remap(t_opc2); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; Rd = coproc_movefrom_Control_registers(t_opc2); } :mrc^COND mcrOperands is $(AMODE) & CRm=1 & c0404=1 & opc2 & cpn=15 & Rd & CRn=1 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; Rd = coproc_movefrom_Security_world_control(t_opc2); } :mrc^COND mcrOperands is $(AMODE) & CRm=0 & c0404=1 & opc2 & cpn=15 & Rd & CRn=2 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; Rd = coproc_movefrom_Translation_table(t_opc2); } :mrc^COND mcrOperands is $(AMODE) & CRm=5 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; Rd = coproc_movefrom_Instruction_cache(t_opc2); } :mrc^COND mcrOperands is $(AMODE) & CRm=10 & c0404=1 & opc2 & cpn=15 & Rd & CRn=7 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; Rd = coproc_movefrom_Data_cache_operations(t_opc2); } :mrc^COND mcrOperands is $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=0 & c2020=1 & opc1=0 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; t_crm:4 = CRm; Rd = coproc_movefrom_Identification_registers(t_opc2,t_crm); } :mrc^COND mcrOperands is $(AMODE) & CRm & c0404=1 & opc2 & cpn=15 & Rd & CRn=15 & c2020=1 & opc1 & c2427=14 & COND & ARMcond=1 & mcrOperands { build COND; t_opc2:4 = opc2; t_crm:4 = CRm; t_op1:4 = opc1; Rd = coproc_movefrom_Peripheral_System(t_opc2,t_crm,t_op1); } # ===== End mrc :mrc^COND cpn,opc1,Rd,CRn,CRm,opc2 is $(AMODE) & ARMcond=1 & COND & c2427=14 & opc1 & c2020=1 & CRn & Rd & cpn & opc2 & c0404=1 & CRm { build COND; t_cpn:4 = cpn; t_op1:4 = opc1; t_opc2:4 = opc2; Rd = coprocessor_movefromRt(t_cpn,t_op1,t_opc2,CRn,CRm); } :mrs^COND Rd,cpsr is $(AMODE) & ARMcond=1 & COND & c2027=16 & c1619=15 & Rd & offset_12=0 & cpsr { # TODO: GE bits have not been included build COND; Rd = zext( (NG<<4) | (ZR<<3) | (CY<<2) | (OV<<1) | (Q) ) << 27; } :mrs^COND Rd,spsr is $(AMODE) & ARMcond=1 & COND & c2027=20 & c1619=15 & Rd & offset_12=0 & spsr { build COND; Rd = spsr; } :msr^COND cpsrmask,shift1 is $(AMODE) & ARMcond=1 & COND & c2027=50 & cpsrmask & c1215=15 & c2627=0 & shift1 { build COND; build cpsrmask; build shift1; cpsr = (cpsr& ~cpsrmask) | (shift1 & cpsrmask); } :msr^COND cpsrmask,rm is $(AMODE) & ARMcond=1 & COND & c2027=18 & cpsrmask & c1215=15 & c0811=0 & c0407=0 & rm { # TODO: GE bits have not been included build COND; build cpsrmask; cpsr = (cpsr& ~cpsrmask) | (rm & cpsrmask); local tmp = cpsr >> 27 & 0x1f; Q = ((tmp ) & 0x1) != 0; OV = ((tmp >> 1) & 0x1) != 0; CY = ((tmp >> 2) & 0x1) != 0; ZR = ((tmp >> 3) & 0x1) != 0; NG = ((tmp >> 4) & 0x1) != 0; } :msr^COND spsrmask,shift1 is $(AMODE) & ARMcond=1 & COND & c2027=54 & spsrmask & c1215=15 & c2627=0 & shift1 { build COND; build spsrmask; build shift1; spsr = (spsr& ~spsrmask) | (shift1 & spsrmask); } :msr^COND spsrmask,rm is $(AMODE) & ARMcond=1 & COND & c2027=22 & spsrmask & c1215=15 & c0811=0 & c0407=0 & rm { build COND; build spsrmask; spsr = (spsr& ~spsrmask) | (rm & spsrmask); } :mul^COND^SBIT_ZN rn,rm,rs is $(AMODE) & ARMcond=1 & COND & c2527=0 & c2124=0 & SBIT_ZN & rn & c1215=0 & rs & c0407=9 & rm { build COND; build rm; build rs; rn = rm*rs; resultflags(rn); build SBIT_ZN; } :mvn^COND^SBIT_CZNO Rd,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=15 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift1 { build COND; build shift1; Rd=~shift1; resultflags(Rd); logicflags(); build SBIT_CZNO; } :mvn^COND^SBIT_CZNO Rd,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=15 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift2 { build COND; build shift2; Rd=~shift2; resultflags(Rd); logicflags(); build SBIT_CZNO; } :mvn^COND^SBIT_CZNO Rd,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=15 & SBIT_CZNO & c1619=0 & Rd & c2627=0 & shift3 { build COND; build shift3; Rd=~shift3; resultflags(Rd); logicflags(); build SBIT_CZNO; } :mvn^COND^SBIT_ZN pc,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=15 & SBIT_ZN & c1619=0 & Rd=15 & c2627=0 & shift1 { build COND; build shift1; dest:4 = ~shift1; resultflags(dest); build SBIT_ZN; ALUWritePC(dest); goto [pc]; } :mvn^COND^SBIT_ZN pc,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=15 & SBIT_ZN & c1619=0 & Rd=15 & c2627=0 & shift2 { build COND; build shift2; dest:4 = ~shift2; resultflags(dest); build SBIT_ZN; ALUWritePC(dest); goto [pc]; } :mvn^COND^SBIT_ZN pc,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=15 & SBIT_ZN & c1619=0 & Rd=15 & c2627=0 & shift3 { build COND; build shift3; dest:4 = ~shift3; resultflags(dest); build SBIT_ZN; ALUWritePC(dest); goto [pc]; } @if defined(VERSION_6K) || defined(VERSION_6T2) || defined(VERSION_7) :nop^COND is $(AMODE) & ARMcond=1 & COND & c0027=0x320f000 { } @endif # VERSION_6K :orr^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=12 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; build shift1; Rd = rn|shift1; logicflags(); resultflags(Rd); build SBIT_CZNO; } :orr^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=12 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; build shift2; Rd = rn|shift2; logicflags(); resultflags(Rd); build SBIT_CZNO; } :orr^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=12 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; build shift3; Rd = rn|shift3; logicflags(); resultflags(Rd); build SBIT_CZNO; } :orr^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=12 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; build shift1; dest:4 = rn|shift1; logicflags(); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :orr^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=12 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; build shift2; dest:4 = rn|shift2; logicflags(); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :orr^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=12 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; build shift3; dest:4 = rn|shift3; logicflags(); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } @if defined(VERSION_6) :pkhbt^COND Rd,rn,shift4 is $(AMODE) & ARMcond=1 & COND & c2027=0x68 & c0406=1 & Rd & rn & shift4 { build COND; build rn; build shift4; Rd = (rn & 0xffff) + (shift4 & 0xffff0000); } :pkhtb^COND Rd,rn,shift4 is $(AMODE) & ARMcond=1 & COND & c2027=0x68 & c0406=5 & Rd & rn & shift4 { build COND; build rn; build shift4; Rd = (shift4 & 0xffff) + (rn & 0xffff0000); } @endif # VERSION_6 @if defined(VERSION_5E) :qadd^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=0x10 & Rn & Rd & c0811=0 & c0407=5 & Rm { build COND; local sum1 = Rm + Rn; sum1 = SignedSaturate(sum1,32:2); Q = SignedDoesSaturate(sum1,32:2); Rd = sum1; } @endif # VERSION_5E @if defined(VERSION_6) :qadd16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x62 & c0811=15 & c0407=1 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; local lRm = Rm & 0xffff; local uRn = (Rn >> 16) & 0xffff; local uRm = (Rm >> 16) & 0xffff; sum1:2 = lRn:2 + lRm:2; sum1 = SignedSaturate(sum1,16:2); sum2:2 = uRn:2 + uRm:2; sum2 = SignedSaturate(sum2,16:2); Rd = (zext(sum2) << 16) | zext(sum1); } :qadd8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x62 & c0811=15 & c0407=9 & Rn & Rd & Rm { build COND; local rn1 = Rn & 0xff; local rm1 = Rm & 0xff; local rn2 = (Rn >> 8) & 0xff; local rm2 = (Rm >> 8) & 0xff; local rn3 = (Rn >> 16) & 0xff; local rm3 = (Rm >> 16) & 0xff; local rn4 = (Rn >> 24) & 0xff; local rm4 = (Rm >> 24) & 0xff; sum1:1 = rn1:1 + rm1:1; sum1 = SignedSaturate(sum1,8:2); sum2:1 = rn2:1 + rm2:1; sum2 = SignedSaturate(sum2,8:2); sum3:1 = rn3:1 + rm3:1; sum3 = SignedSaturate(sum3,8:2); sum4:1 = rn4:1 + rm4:1; sum4 = SignedSaturate(sum4,8:2); Rd = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); } # qaddsubx :qasx^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x62 & c0811=15 & c0407=3 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; local lRm = Rm & 0xffff; local uRn = (Rn >> 16) & 0xffff; local uRm = (Rm >> 16) & 0xffff; sum1:2 = lRn:2 - lRm:2; sum1 = SignedSaturate(sum1,16:2); sum2:2 = uRn:2 + uRm:2; sum2 = SignedSaturate(sum2,16:2); Rd = (zext(sum2) << 16) | zext(sum1); } @endif # VERSION_6 @if defined(VERSION_5E) :qdadd^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=0x14 & Rn & Rd & c0811=0 & c0407=5 & Rm { build COND; tmp:4 = Rn * 2; tmp = SignedSaturate(tmp,32:2); Q = SignedDoesSaturate(tmp,32:2); tmp = tmp + Rm; tmp = SignedSaturate(tmp,32:2); Q = Q | SignedDoesSaturate(tmp,32:2); Rd = tmp; } :qdsub^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=0x16 & Rn & Rd & c0811=0 & c0407=5 & Rm { build COND; tmp:4 = Rn * 2; tmp = SignedSaturate(tmp); Q = SignedDoesSaturate(tmp,32:2); tmp = Rm - tmp; tmp = SignedSaturate(tmp,32:2); Q = Q | SignedDoesSaturate(tmp,32:2); Rd = tmp; } @endif # VERSION_5E @if defined(VERSION_6) # qsubaddx :qsax^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x62 & c0811=15 & c0407=5 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; local lRm = Rm & 0xffff; local uRn = (Rn >> 16) & 0xffff; local uRm = (Rm >> 16) & 0xffff; sum1:2 = lRn:2 + lRm:2; sum1 = SignedSaturate(sum1,16:2); sum2:2 = uRn:2 - uRm:2; sum2 = SignedSaturate(sum2,16:2); Rd = (zext(sum2) << 16) | zext(sum1); } @endif # VERSION_6 @if defined(VERSION_5E) :qsub^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=0x12 & Rn & Rd & c0811=0 & c0407=5 & Rm { build COND; tmp:4 = Rm - Rn; tmp = SignedSaturate(tmp,32:2); Q = SignedDoesSaturate(tmp,32:2); Rd = tmp; } @endif # VERSION_5E @if defined(VERSION_6) :qsub16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x62 & c0811=15 & c0407=7 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; local lRm = Rm & 0xffff; local uRn = (Rn >> 16) & 0xffff; local uRm = (Rm >> 16) & 0xffff; sum1:2 = lRn:2 - lRm:2; sum1 = SignedSaturate(sum1,16:2); sum2:2 = uRn:2 - uRm:2; sum2 = SignedSaturate(sum2,16:2); Rd = (zext(sum2) << 16) | zext(sum1); } :qsub8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x62 & c0811=15 & c0407=15 & Rn & Rd & Rm { build COND; local rn1 = Rn & 0xff; local rm1 = Rm & 0xff; local rn2 = (Rn >> 8) & 0xff; local rm2 = (Rm >> 8) & 0xff; local rn3 = (Rn >> 16) & 0xff; local rm3 = (Rm >> 16) & 0xff; local rn4 = (Rn >> 24) & 0xff; local rm4 = (Rm >> 24) & 0xff; sum1:1 = rn1:1 - rm1:1; sum1 = SignedSaturate(sum1,8:2); sum2:1 = rn2:1 - rm2:1; sum2 = SignedSaturate(sum2,8:2); sum3:1 = rn3:1 - rm3:1; sum3 = SignedSaturate(sum3,8:2); sum4:1 = rn4:1 - rm4:1; sum4 = SignedSaturate(sum4,8:2); Rd = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); } @endif # VERSION_6 @if defined(VERSION_6T2) macro BitReverse_arm(val) { tval:1 = val; result:1 = 0; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; result = (result << 1) | (tval & 1); tval = tval >> 1; val = result; } :rbit^COND Rd, rm is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=7 & c0407=3 & c1619=15 & c0811=15 & Rd & rm { build COND; build rm; local t:4 = rm & 0xff; local b1:1 = t:1; t = (rm >> 8) & 0xff; local b2:1 = t:1; t = (rm >> 16) & 0xff; local b3:1 = t:1; t = (rm >> 24) & 0xff; local b4:1 = t:1; BitReverse_arm(b1); BitReverse_arm(b2); BitReverse_arm(b3); BitReverse_arm(b4); Rd = (zext(b1) << 24) | (zext(b2) << 16) | (zext(b3) << 8) | zext(b4); } @endif # VERSION_6T2 @if defined(VERSION_6) :rev^COND Rd, rm is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=3 & c0407=3 & c1619=15 & c0811=15 & Rd & rm { build COND; build rm; local tmp1 = rm & 0xff; local tmp2 = (rm >> 8) & 0xff; local tmp3 = (rm >> 16) & 0xff; local tmp4 = (rm >> 24) & 0xff; Rd = (tmp1 << 24) | (tmp2 << 16) | (tmp3 << 8) | tmp4; } :rev16^COND Rd, rm is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=3 & c0407=11 & Rd & rm { build COND; build rm; local tmp1 = rm & 0xff; local tmp2 = (rm >> 8) & 0xff; local tmp3 = (rm >> 16) & 0xff; local tmp4 = (rm >> 24) & 0xff; Rd = (tmp3 << 24) | (tmp4 << 16) | (tmp1 << 8) | tmp2; } :revsh^COND Rd, rm is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=7 & c0407=11 & Rd & rm { build COND; build rm; local tmp1 = rm & 0xff; local tmp2 = (rm >> 8) & 0xff; tmp3:2 = zext(tmp1:1) << 8 | zext(tmp2:1); Rd = sext(tmp3); } @endif # VERSION_6 :rsb^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=3 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; build shift1; subflags(shift1,rn); Rd = shift1-rn; resultflags(Rd); build SBIT_CZNO; } :rsb^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=3 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; build shift2; subflags(shift2,rn); Rd = shift2-rn; resultflags(Rd); build SBIT_CZNO; } :rsb^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=3 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; build shift3; subflags(shift3,rn); Rd = shift3-rn; resultflags(Rd); build SBIT_CZNO; } :rsb^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=3 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; build shift1; subflags(shift1,rn); dest:4 = shift1-rn; resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :rsb^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=3 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; build shift2; subflags(shift2,rn); dest:4 = shift2-rn; resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :rsb^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=3 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; build shift3; subflags(shift3,rn); dest:4 = shift3-rn; resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :rsc^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=7 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; build shift1; sub_with_carry_flags(shift1,rn); Rd=shift1-(rn+zext(!CY)); resultflags(Rd); build SBIT_CZNO; } :rsc^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=7 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; build shift2; sub_with_carry_flags(shift2,rn); Rd=shift2-(rn+zext(!CY)); resultflags(Rd); build SBIT_CZNO; } :rsc^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=7 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; build shift3; sub_with_carry_flags(shift3,rn); Rd=shift3-(rn+zext(!CY)); resultflags(Rd); build SBIT_CZNO; } :rsc^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=7 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; build shift1; sub_with_carry_flags(shift1,rn); local dest:4=shift1-(rn+zext(!CY)); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :rsc^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=7 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; build shift2; sub_with_carry_flags(shift2,rn); local dest:4=shift2-(rn + zext(!CY)); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :rsc^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=7 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; build shift3; sub_with_carry_flags(shift3,rn); local dest:4=shift3-(rn + zext(!CY)); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } @if defined(VERSION_6) :sadd16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x61 & c0811=15 & c0407=1 & Rn & Rd & Rm { build COND; local tmpRn = Rn & 0xffff; local tmpRm = Rm & 0xffff; local sum1 = sext(tmpRn:2) + sext(tmpRm:2); GE1 = sum1 s>= 0; GE2 = sum1 s>= 0; tmpRn = (Rn >> 16) & 0xffff; tmpRm = (Rm >> 16) & 0xffff; local sum2 = sext(tmpRn:2) + sext(tmpRm:2); GE3 = sum2 s>= 0; GE4 = sum2 s>= 0; Rd = ((sum2 & 0xffff) << 16) | (sum1 & 0xffff); } :sadd8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x61 & c0811=15 & c0407=9 & Rn & Rd & Rm { build COND; local tmpRn = Rn & 0xff; local tmpRm = Rm & 0xff; local sum1 = sext(tmpRn:1) + sext(tmpRm:1); GE1 = sum1 s>= 0; tmpRn = (Rn >> 8) & 0xff; tmpRm = (Rm >> 8) & 0xff; local sum2 = sext(tmpRn:1) + sext(tmpRm:1); GE2 = sum2 s>= 0; tmpRn = (Rn >> 16) & 0xff; tmpRm = (Rm >> 16) & 0xff; local sum3 = sext(tmpRn:1) + sext(tmpRm:1); GE3 = sum3 s>= 0; tmpRn = (Rn >> 24) & 0xff; tmpRm = (Rm >> 24) & 0xff; local sum4 = sext(tmpRn:1) + sext(tmpRm:1); GE4 = sum4 s>= 0; Rd = ((sum4 & 0xff) << 24) | ((sum3 & 0xff) << 16) | ((sum2 & 0xff) << 8) | (sum1 & 0xff); } # saddsubx :sasx^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x61 & c0811=15 & c0407=3 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; local lRm = Rm & 0xffff; local uRn = (Rn >> 16) & 0xffff; local uRm = (Rm >> 16) & 0xffff; local sum1 = sext(uRn:2) + sext(lRm:2); GE3 = sum1 s>= 0; GE4 = sum1 s>= 0; local diff = sext(lRn:2) - sext(uRm:2); GE1 = diff s>= 0; GE2 = diff s>= 0; Rd = ((sum1 & 0xffff) << 16) | (diff & 0xffff); } @endif # VERSION_6 :sbc^SBIT_CZNO^COND Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=6 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; build shift1; sub_with_carry_flags(rn,shift1); Rd = rn-(shift1+zext(!CY)); resultflags(Rd); build SBIT_CZNO; } :sbc^SBIT_CZNO^COND Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=6 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; build shift2; sub_with_carry_flags(rn,shift2); Rd = rn-(shift2 + zext(!CY)); resultflags(Rd); build SBIT_CZNO; } :sbc^SBIT_CZNO^COND Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=6 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; build shift3; sub_with_carry_flags(rn,shift3); Rd = rn-(shift3+zext(!CY)); resultflags(Rd); build SBIT_CZNO; } :sbc^SBIT_CZNO^COND pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=6 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; build shift1; sub_with_carry_flags(rn,shift1); local dest:4 = rn-(shift1 + zext(!CY)); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :sbc^SBIT_CZNO^COND pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=6 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; build shift2; sub_with_carry_flags(rn,shift2); local dest:4 = rn-(shift2+zext(!CY)); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } :sbc^SBIT_CZNO^COND pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=6 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; build shift3; sub_with_carry_flags(rn,shift3); local dest:4 = rn-(shift3 + zext(!CY)); resultflags(dest); build SBIT_CZNO; ALUWritePC(dest); goto [pc]; } @if defined(VERSION_6) @if defined(VERSION_6T2) :sbfx^COND Rd,Rm,lsbImm,widthMinus1 is $(AMODE) & COND & ARMcond=1 & c2127=0x3d & widthMinus1 & Rd & lsbImm & c0406=5 & Rm { build COND; build lsbImm; build widthMinus1; shift:4 = 31 - (lsbImm + widthMinus1); Rd = Rm << shift; shift = 31 - widthMinus1; Rd = Rd s>> shift; } @endif # VERSION_6T2 @if defined(VERSION_7) # Warning: note the non-standard use of Rd, Rm, Rn :sdiv^COND RdHi,RnLo,RmHi is $(AMODE) & ARMcond=1 & COND & c2027=0x71 & RdHi & c1215=0xf & RmHi & c0407=0x1 & RnLo { build COND; local result = RnLo s/ RmHi; RdHi = result; } @endif # VERSION_7 :sel^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x68 & Rn & Rd & c0811=15 & c0407=11 & Rm { build COND; local rD1 = ((zext(GE1) * Rn) + (zext(!GE1) * Rm)) & 0x0ff; local rD2 = ((zext(GE2) * Rn) + (zext(!GE2) * Rm)) & 0x0ff00; local rD3 = ((zext(GE3) * Rn) + (zext(!GE3) * Rm)) & 0x0ff0000; local rD4 = ((zext(GE4) * Rn) + (zext(!GE4) * Rm)) & 0x0ff000000; Rd = rD1 | rD2 | rD3 | rD4; } @if defined(VERSION_6K) :sev^COND is $(AMODE) & ARMcond=1 & COND & c0027=0x320f004 { build COND; SendEvent(); } @endif # VERSION_6K # Hopefully we never encounter this instruction since we can not change the effective endianness of the language armEndianNess: "LE" is c0031=0xf1010000 { export 0:1; } armEndianNess: "BE" is c0031=0xf1010200 { export 1:1; } :setend armEndianNess is $(AMODE) & (c0031=0xf1010000 | c0031=0xf1010200) & armEndianNess { setEndianState(armEndianNess); } :shadd16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=1 & Rm { build COND; local tmpRn = Rn; local tmpRm = Rm; sum1:4 = (sext(tmpRn:2) + sext(tmpRm:2)) >> 1; sum2:4 = ((tmpRn s>> 16) + (tmpRm s>> 16)) >> 1; Rd = (sum2 << 16) + (sum1 & 0xffff); } :shadd8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=9 & Rm { build COND; local tmpRn = Rn; local tmpRm = Rm; sum1:4 = (sext(tmpRn:1) + sext(tmpRm:1)) >> 1; local tmpn = tmpRn >> 8; local tmpm = tmpRm >> 8; sum2:4 = (sext(tmpn:1) + sext(tmpm:1)) >> 1; tmpn = tmpRn >> 16; tmpm = tmpRm >> 16; sum3:4 = (sext(tmpn:1) + sext(tmpm:1)) >> 1; tmpn = tmpRn >> 24; tmpm = tmpRm >> 24; sum4:4 = (sext(tmpn:1) + sext(tmpm:1)) >> 1; Rd = (sum4 << 24) + ((sum3 & 0xff) << 16) + ((sum2 & 0xff) << 8) + (sum1 & 0xff); } # shaddsubx :shasx^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=3 & Rm { build COND; local tmpRn = Rn; local tmpRm = Rm; local diff:4 = sext(tmpRn[ 0,16]) - sext(tmpRm[16,16]); local sum:4 = sext(tmpRn[16,16]) + sext(tmpRm[ 0,16]); Rd[0,16] = diff[1,16]; Rd[16,16] = sum[1,16]; } # shsubbaddx :shsax^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=5 & Rm { build COND; local tmpRn = Rn; local tmpRm = Rm; local sum:4 = sext(tmpRn[ 0,16]) + sext(tmpRm[16,16]); local diff:4 = sext(tmpRn[16,16]) - sext(tmpRm[ 0,16]); Rd[ 0,16] = sum[1,16]; Rd[16,16] = diff[1,16]; } :shsub16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=7 & Rm { build COND; local tmpRn = Rn; local tmpRm = Rm; sum1:4 = (sext(tmpRn:2) - sext(tmpRm:2)) >> 1; sum2:4 = ((tmpRn s>> 16) - (tmpRm s>> 16)) >> 1; Rd = (sum2 << 16) + (sum1 & 0xffff); } :shsub8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x63 & Rn & Rd & c0811=15 & c0407=15 & Rm { build COND; local tmpRn = Rn; local tmpRm = Rm; sum1:4 = (sext(tmpRn:1) - sext(tmpRm:1)) >> 1; local tmpn = tmpRn >> 8; local tmpm = tmpRm >> 8; sum2:4 = (sext(tmpn:1) - sext(tmpm:1)) >> 1; tmpn = tmpRn >> 16; tmpm = tmpRm >> 16; sum3:4 = (sext(tmpn:1) - sext(tmpm:1)) >> 1; tmpn = tmpRn >> 24; tmpm = tmpRm >> 24; sum4:4 = (sext(tmpn:1) - sext(tmpm:1)) >> 1; Rd = (sum4 << 24) + ((sum3 & 0xff) << 16) + ((sum2 & 0xff) << 8) + (sum1 & 0xff); } @endif # VERSION_6 @if defined(VERSION_5E) :smla^XBIT^YBIT^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x10 & smRd & smRn & smRm & c0707=1 & XBIT & YBIT & c0404=0 & smRa { build COND; local tmp:4 = sext(XBIT) * sext(YBIT); Q = scarry(tmp,smRa) || Q; #Q flag is sticky smRd = tmp+smRa; } @endif @if defined(VERSION_6) :smlad^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & c0407=1 & smRd & smRa & smRm & smRn { build COND; local tmpRn = smRn; local tmpRm = smRm; local tmpLRn = tmpRn:2; local tmpURn = tmpRn >> 16; local tmpLRm = tmpRm:2; local tmpURm = tmpRm >> 16; local product1 = sext(tmpLRn) * sext(tmpLRm); local product2 = sext(tmpURn:2) * sext(tmpURm:2); local tmpprod = product1 + product2; Q = scarry(smRa, tmpprod) || Q; #Q is sticky smRd = smRa + tmpprod; } :smladx^COND smRd, smRn, smRm, smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & c0407=3 & smRd & smRn & smRm & smRa { build COND; local tmpRn = smRn; local tmpRm = smRm; local tmpLRn = tmpRn:2; local tmpURn = tmpRn >> 16; local tmpLRm = tmpRm:2; local tmpURm = tmpRm >> 16; local product1 = sext(tmpLRn) * sext(tmpURm:2); local product2 = sext(tmpURn:2) * sext(tmpLRm); local tmpprod = product1 + product2; Q = scarry(smRa, tmpprod) || Q; #Q is sticky smRd = smRa + tmpprod; } @endif # VERSION_6 :smlal^COND^SBIT_ZN RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2527=0 & c2124=7 & SBIT_ZN & RdLo & RdHi & smRn & c0407=9 & smRm { build COND; tmp:8 = (zext(RdHi) << 32) | zext(RdLo); rs64:8 = sext(smRm); rm64:8 = sext(smRn); tmp = rs64 * rm64 + tmp; resultflags(tmp); RdLo = tmp(0); RdHi = tmp(4); build SBIT_ZN; } @if defined(VERSION_5E) :smlal^XBIT^YBIT^COND RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x14 & RdLo & RdHi & smRm & c0707=1 & XBIT & YBIT & c0404=0 & smRn { build COND; local prod:8 = sext(XBIT) * sext(YBIT); local result:8 = (zext(RdHi) << 32) | zext(RdLo); result = result + prod; RdLo = result(0); RdHi = result(4); } @endif # VERSION_5E @if defined(VERSION_6) :smlald^COND RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x74 & RdLo & RdHi & c0607=0 & c0405=1 & smRn & smRm { build COND; local tmpRn = smRn; local tmpRm = smRm; prod1:8 = sext(tmpRn:2) * sext(tmpRm:2); rmHi:2 = tmpRm(2); rnHi:2 = tmpRn(2); prod2:8 = sext(rmHi) * sext(rnHi); result:8 = zext(RdLo) + (zext(RdHi) << 32) + prod1 + prod2; RdLo = result:4; RdHi = result(4); } :smlaldx^COND RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x74 & RdLo & RdHi & c0607=0 & c0405=3 & smRn & smRm { build COND; local tmpRn = smRn; local tmpRm = smRm; rmHi:2 = tmpRm(2); rnHi:2 = tmpRn(2); prod1:8 = sext(tmpRn:2) * sext(rmHi); prod2:8 = sext(rnHi) * sext(tmpRm:2); result:8 = zext(RdLo) + (zext(RdHi) << 32) + prod1 + prod2; RdLo = result:4; RdHi = result(4); } @endif # VERSION_6 @if defined(VERSION_5E) :smlaw^YBIT^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x12 & smRd & smRn & smRm & c0707=1 & YBIT & x=0 & c0404=0 & smRa { build COND; local tmp64:6 = sext(smRn) * sext(YBIT); local tmp32:4 = tmp64(2); Q = scarry(tmp32, smRa) || Q; #Q flag is sticky smRd = tmp32 + smRa; } @endif # VERSION_5E @if defined(VERSION_6) :smlsd^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & smRd & smRn & c0607=1 & x=0 & c0404=1 & smRm & smRa { build COND; local tmpRn = smRn; local tmpRm = smRm; prod1:4 = sext(tmpRn:2) * sext(tmpRm:2); rnHi:2 = tmpRn(2); rmHi:2 = tmpRm(2); prod2:4 = sext(rnHi) * sext(rmHi); diff:4 = prod1 - prod2; Q = scarry(diff, smRa) || Q; #Q is sticky smRd = smRa + diff; } :smlsdx^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & smRd & smRn & c0607=1 & x=1 & c0404=1 & smRm & smRa { build COND; local tmpRn = smRn; local tmpRm = smRm; rnHi:2 = tmpRn(2); rmHi:2 = tmpRm(2); prod1:4 = sext(tmpRn:2) * sext(rmHi); prod2:4 = sext(rnHi) * sext(tmpRm:2); diff:4 = prod1 - prod2; Q = scarry(diff, smRa) || Q; #Q is sticky smRd = smRa + diff; } :smlsld^COND RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x74 & RdHi & RdLo & smRm & c0607=1 & x=0 & c0404=1 & smRn { build COND; local tmpRn = smRn; local tmpRm = smRm; prod1:8 = sext(tmpRn:2) * sext(tmpRm:2); rnHi:2 = tmpRn(2); rmHi:2 = tmpRm(2); prod2:8 = sext(rnHi) * sext(rmHi); result:8 = zext(RdLo) + (zext(RdHi) << 32) + (prod1 - prod2); RdLo = result:4; RdHi = result(4); } :smlsldx^COND RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x74 & RdHi & RdLo & smRm & c0607=1 & x=1 & c0404=1 & smRn { build COND; local tmpRn = smRn; local tmpRm = smRm; rnHi:2 = tmpRn(2); rmHi:2 = tmpRm(2); prod1:8 = sext(tmpRn:2) * sext(rmHi); prod2:8 = sext(rnHi) * sext(tmpRm:2); result:8 = zext(RdLo) + (zext(RdHi) << 32) + (prod1 - prod2); RdLo = result:4; RdHi = result(4); } :smmla^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x75 & smRd & smRn & smRm & c0607=0 & r=0 & c0404=1 & smRa { build COND; val:8 = sext(smRn) * sext(smRm); val = (zext(smRa) << 32) + val; smRd = val(4); } :smmlar^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x75 & smRd & smRn & smRm & c0607=0 & r=1 & c0404=1 & smRa { build COND; val:8 = sext(smRn) * sext(smRm); val = (zext(smRa) << 32) + val + 0x80000000; smRd = val(4); } :smmls^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x75 & smRd & smRn & smRm & c0607=3 & r=0 & c0404=1 & smRa { build COND; val:8 = sext(smRn) * sext(smRm); val = (zext(smRa) << 32) - val; smRd = val(4); } :smmlsr^COND smRd,smRn,smRm,smRa is $(AMODE) & ARMcond=1 & COND & c2027=0x75 & smRd & smRn & smRm & c0607=3 & r=1 & c0404=1 & smRa { build COND; val:8 = sext(smRn) * sext(smRm); val = (zext(smRa) << 32) - val + 0x80000000; smRd = val(4); } :smmul^COND smRd,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x75 & smRd & c1215=15 & smRn & c0607=0 & r=0 & c0404=1 & smRm { build COND; val:8 = sext(smRn) * sext(smRm); smRd = val(4); } :smmulr^COND smRd,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x75 & smRd & c1215=15 & smRn & c0607=0 & r=1 & c0404=1 & smRm { build COND; val:8 = (sext(smRn) * sext(smRm)) + 0x080000000; smRd = val(4); } :smuad^COND smRd, smRn, smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & c0407=1 & smRd & c1619=15 & smRn & smRm { build COND; local tmpRm = smRm; local tmpRn = smRn; local tmpLRm = tmpRm:2; local tmpURm = tmpRm >> 16; local tmpLRn = tmpRn:2; local tmpURn = tmpRn >> 16; local product1 = sext(tmpLRm) * sext(tmpLRn); local product2 = sext(tmpURm:2) * sext(tmpURn:2); local tmpprod = product1 + product2; Q = scarry(product1, product2); smRd = tmpprod; } :smuadx^COND smRd, smRn, smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & c0407=3 & smRd & c1619=15 & smRn & smRm { build COND; local tmpRm = smRm; local tmpRn = smRn; local tmpLRm = tmpRm:2; local tmpURm = tmpRm >> 16; local tmpLRn = tmpRn:2; local tmpURn = tmpRn >> 16; local product1 = sext(tmpLRm) * sext(tmpURn:2); local product2 = sext(tmpURm:2) * sext(tmpLRn); local tmpprod = product1 + product2; Q = scarry(product1, product2); smRd = tmpprod; } @endif # VERSION_6 @if defined(VERSION_5E) :smul^XBIT^YBIT^COND smRd,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x16 & smRd & c1215=0 & smRm & c0707=1 & XBIT & YBIT & c0404=0 & smRn { build COND; tmp:8 = sext(XBIT) * sext(YBIT); smRd = tmp:4; } @endif # VERSION_5E :smull^COND^SBIT_ZN RdLo,RdHi,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2527=0 & c2124=6 & SBIT_ZN & RdHi & RdLo & smRn & c0407=9 & smRm { build COND; rn64:8 = sext(smRn); rm64:8 = sext(smRm); local tmp = rn64 * rm64; resultflags(tmp); RdLo = tmp(0); RdHi = tmp(4); build SBIT_ZN; } @if defined(VERSION_5E) :smulw^YBIT^COND smRd,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x12 & smRd & c1215=0 & smRn & c0707=1 & YBIT & x=1 & c0404=0 & smRm { build COND; tmp:6 = sext(smRn) * sext(YBIT); tmp = tmp >> 16; smRd = tmp:4; } @endif # VERSION_5E @if defined(VERSION_6) :smusd^COND smRd,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & smRd & c1215=15 & smRm & c0607=1 & x=0 & c0404=1 & smRn { build COND; local tmpRn = smRn; local tmpRm = smRm; rmHi:2 = tmpRm(2); prod1:4 = sext(tmpRn:2) * sext(tmpRm:2); rnHi:2 = tmpRn(2); prod2:4 = sext(rnHi) * sext(rmHi); smRd = prod1 - prod2; } :smusdx^COND smRd,smRn,smRm is $(AMODE) & ARMcond=1 & COND & c2027=0x70 & smRd & c1215=15 & smRm & c0607=1 & x=1 & c0404=1 & smRn { build COND; local tmpRn = smRn; local tmpRm = smRm; rmHi:2 = tmpRm(2); rnHi:2 = tmpRn(2); prod1:4 = sext(tmpRn:2) * sext(rmHi); prod2:4 = sext(rnHi) * sext(tmpRm:2); smRd = prod1 - prod2; } :ssat^COND Rd, sSatImm5, shift4 is $(AMODE) & ARMcond=1 & COND & c2127=0x35 & c0405=1 & sSatImm5 & Rd & shift4 { build COND; build shift4; tmp:4 = SignedSaturate(shift4, sSatImm5); Q = SignedDoesSaturate(shift4, sSatImm5); Rd = tmp; } :ssat16^COND Rd, sSatImm4, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x6a & c0811=15 & c0407=0x3 & sSatImm4 & Rd & Rm { build COND; build sSatImm4; local tmpl = Rm & 0xffff; tmpl = SignedSaturate(tmpl, sSatImm4); local tmpu = Rm >> 16; tmpu = SignedSaturate(tmpu, sSatImm4); Q = SignedDoesSaturate(tmpl,sSatImm4) | SignedDoesSaturate(tmpu,sSatImm4); Rd = ((tmpu & 0xffff) << 16) | (tmpl & 0xffff); } # ssubaddx :ssax^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x61 & c0811=15 & c0407=5 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; local lRm = Rm & 0xffff; local uRn = (Rn >> 16) & 0xffff; local uRm = (Rm >> 16) & 0xffff; local diff = sext(uRn:2) - sext(lRm:2); GE3 = diff s>= 0; GE4 = diff s>= 0; local sum = sext(lRn:2) + sext(uRm:2); GE1 = sum s>= 0; GE2 = sum s>= 0; Rd = ((diff & 0xffff) << 16) | (sum & 0xffff); } :ssub16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x61 & c0811=15 & c0407=7 & Rn & Rd & Rm { build COND; local lRn = Rn & 0xffff; local lRm = Rm & 0xffff; local uRn = (Rn >> 16) & 0xffff; local uRm = (Rm >> 16) & 0xffff; local diffl = sext(lRn:2) - sext(lRm:2); GE1 = diffl s>= 0; GE2 = diffl s>= 0; local diffu = sext(uRn:2) - sext(uRm:2); GE3 = diffu s>= 0; GE4 = diffu s>= 0; Rd = ((diffu & 0xffff) << 16) | (diffl & 0xffff); } :ssub8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x61 & c0811=15 & c0407=15 & Rn & Rd & Rm { build COND; local tmpRn = Rn & 0xff; local tmpRm = Rm & 0xff; local diff1 = sext(tmpRn:1) - sext(tmpRm:1); GE1 = diff1 s>= 0; tmpRn = (Rn >> 8) & 0xff; tmpRm = (Rm >> 8) & 0xff; local diff2 = sext(tmpRn:1) - sext(tmpRm:1); GE2 = diff2 s>= 0; tmpRn = (Rn >> 16) & 0xff; tmpRm = (Rm >> 16) & 0xff; local diff3 = sext(tmpRn:1) - sext(tmpRm:1); GE3 = diff3 s>= 0; tmpRn = (Rn >> 24) & 0xff; tmpRm = (Rm >> 24) & 0xff; local diff4 = sext(tmpRn:1) - sext(tmpRm:1); GE4 = diff4 s>= 0; Rd = ((diff4 & 0xff) << 24) | ((diff3 & 0xff) << 16) | ((diff2 & 0xff) << 8) | (diff1 & 0xff); } @endif # VERSION_6 :stc^COND cpn,CRd,addrmode5 is $(AMODE) & ARMcond=1 & COND & c2527=6 & addrmode5 & cpn & CRd & N22=0 & L20=0 { build COND; build addrmode5; t_cpn:4 = cpn; coprocessor_store(t_cpn,CRd,addrmode5); } :stcl^COND cpn,CRd,addrmode5 is $(AMODE) & ARMcond=1 & COND & c2527=6 & addrmode5 & cpn & CRd & N22=1 & L20=0 { build COND; build addrmode5; t_cpn:4 = cpn; coprocessor_storelong(t_cpn,CRd,addrmode5); } :stm^mdir^COND reglist is $(AMODE) & ARMcond=1 & COND & c2527=4 & mdir & L20=0 & reglist { build COND; build reglist; } #:str^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=0 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 #{ # build COND; # build addrmode2; # tmp=addrmode2&0xfffffffc; # *tmp = Rd; #} # The following form of str assumes alignment checking is on :str^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=0 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; *addrmode2 = Rd; } :strb^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=1 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; local tmpRd = Rd; *addrmode2 = tmpRd:1; } :strbt^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & P24=0 & B22=1 & W21=1 & L20=0 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; local tmpRd = Rd; *addrmode2 = tmpRd:1; } :strh^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & L20=0 & c0407=11 & Rd & addrmode3 { build COND; build addrmode3; local tmpRd = Rd; *addrmode3 = tmpRd:2; } @if defined(VERSION_5E) :strd^COND Rd,Rd2,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & c0407=0xf & L20=0 & Rd & Rd2 & addrmode3 { build COND; build addrmode3; local addr = addrmode3; *(addr) = Rd; addr = addr + 4; *(addr) = Rd2; } @endif # VERSION_5E @if defined(VERSION_6) :strex^COND Rd,Rm,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x18 & c0411=0xf9 & Rn & Rd & Rm { build COND; local tmp = Rn; local tmpRm = Rm; access:1 = hasExclusiveAccess(tmp); Rd = 1; if (!access) goto inst_next; Rd = 0; *tmp = tmpRm; } @endif # VERSION_6 @if defined(VERSION_6K) :strexb^COND Rd,Rm,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x1c & c0411=0xf9 & Rn & Rd & Rm { build COND; local tmp = Rn; local tmpRm = Rm; access:1 = hasExclusiveAccess(tmp); Rd = 1; if (!access) goto inst_next; Rd = 0; *tmp = tmpRm:1; } :strexd^COND Rd,Rm,Rm2,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x1a & Rn & Rd & c0411=0xf9 & c0003 & Rm & Rm2 { build COND; local addr = Rn; local tmpRm = Rm; local tmpRm2 = Rm2; access:1 = hasExclusiveAccess(addr); Rd = 1; if (!access) goto inst_next; Rd = 0; *(addr) = tmpRm; addr = addr + 4; *(addr) = tmpRm2; } :strexh^COND Rd,Rm,[Rn] is $(AMODE) & ARMcond=1 & COND & c2027=0x1e & c0411=0xf9 & Rn & Rd & Rm { build COND; local tmp = Rn; local tmpRm = Rm; access:1 = hasExclusiveAccess(tmp); Rd = 1; if (!access) goto inst_next; Rd = 0; *tmp = tmpRm:2; } :strht^COND Rd,addrmode3 is $(AMODE) & ARMcond=1 & COND & c2527=0 & P24=0 & W21=1 & L20=0 & c0407=11 & Rd & addrmode3 { build COND; *:2 addrmode3 = Rd; } @endif # VERSION_6K #:strt^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=0 & L20=0 & P24=0 & W21=1 & Rd & addrmode2 #{ # build COND; # build addrmode2; # tmp=addrmode2&0xfffffffc; # *tmp = Rd; #} # The following form of str assumes alignment checking is on :strt^COND Rd,addrmode2 is $(AMODE) & ARMcond=1 & COND & c2627=1 & B22=0 & L20=0 & P24=0 & W21=1 & Rd & (I25=0 | (I25=1 & c0404=0)) & addrmode2 { build COND; build addrmode2; *addrmode2 = Rd; } :sub^COND^SBIT_CZNO Rd,rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd & c2627=0 & shift1 { build COND; build rn; build shift1; subflags(rn,shift1); Rd = rn-shift1; resultflags(Rd); build SBIT_CZNO; } :sub^COND^SBIT_CZNO Rd,rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd & c2627=0 & shift2 { build COND; build rn; build shift2; subflags(rn,shift2); Rd = rn-shift2; resultflags(Rd); build SBIT_CZNO; } :sub^COND^SBIT_CZNO Rd,rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd & c2627=0 & shift3 { build COND; build rn; build shift3; subflags(rn,shift3); Rd = rn-shift3; resultflags(Rd); build SBIT_CZNO; } :sub^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift1 { build COND; build rn; build shift1; subflags(rn,shift1); dest:4 = rn-shift1; resultflags(dest); build SBIT_CZNO; cpsr = spsr; SetThumbMode( ((cpsr >> 5) & 1) != 0 ); pc = dest; goto [pc]; } :sub^COND^SBIT_CZNO pc,rn,shift1 is $(AMODE) & pc & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & Rn=14 & I25=1 & immed=0 & rotate=0 & c2627=0 & shift1 { build COND; build rn; build shift1; subflags(rn,shift1); dest:4 = rn-shift1; resultflags(dest); build SBIT_CZNO; cpsr = spsr; ALUWritePC(dest); return [pc]; } :sub^COND^SBIT_CZNO pc,rn,shift2 is $(AMODE) & pc & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift2 { build COND; build rn; build shift2; subflags(rn,shift2); dest:4 = rn-shift2; resultflags(dest); build SBIT_CZNO; cpsr = spsr; SetThumbMode( ((cpsr >> 5) & 1) != 0 ); pc = dest; goto [pc]; } :sub^COND^SBIT_CZNO pc,rn,shift3 is $(AMODE) & pc & ARMcond=1 & COND & c2124=2 & SBIT_CZNO & rn & Rd=15 & c2627=0 & shift3 { build COND; build rn; build shift3; subflags(rn,shift3); dest:4 = rn-shift3; resultflags(dest); build SBIT_CZNO; cpsr = spsr; SetThumbMode( ((cpsr >> 5) & 1) != 0 ); pc = dest; goto [pc]; } :swi^COND immed24 is $(AMODE) & ARMcond=1 & COND & c2427=15 & immed24 { build COND; tmp:4 = immed24; software_interrupt(tmp); } #:swp^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=16 & Rn & Rd & c0811=0 & c0407=9 & Rm #{ # build COND; # tmp = Rn & 0xfffffffc; # tmp2 = (Rn&3)<<3; # val:4 = *tmp; # val=(val>>tmp2) | (val << (32-tmp2)); # *tmp = Rm; # Rd = val; #} # Assuming alignment checking is enabled :swp^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=16 & Rn & Rd & c0811=0 & c0407=9 & Rm { build COND; val:4 = *Rn; *Rn = Rm; Rd = val; } :swpb^COND Rd,Rm,Rn is $(AMODE) & ARMcond=1 & COND & c2027=20 & Rn & Rd & c0811=0 & c0407=9 & Rm { build COND; local tmp = *:1 Rn; local tmpRm = Rm; *Rn = tmpRm:1; Rd = zext(tmp); } @if defined(VERSION_6) :sxtab^COND Rd,Rn,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=2 & c0407=7 & Rd & Rn & ror1 { build COND; build ror1; Rd = Rn + sext(ror1:1); } :sxtab16^COND Rd,Rn,ror1 is $(AMODE) & ARMcond=1 & COND & c2027=0x68 & c0407=7 & Rn & Rd & ror1 { build COND; build ror1; b:1 = ror1:1; lo:2 = Rn:2 + sext(b); b = ror1(2); hi:2 = Rn(2) + sext(b); Rd = (zext(hi) << 16) + zext(lo); } :sxtah^COND Rd,Rn,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=3 & c0407=7 & Rd & Rn & ror1 { build COND; build ror1; Rd = Rn + sext(ror1:2); } :sxtb^COND Rd,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=2 & c0407=7 & Rd & c1619=15 & ror1 { build COND; build ror1; Rd = sext(ror1:1); } :sxtb16^COND Rd,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=0 & c0407=7 & Rd & c1619=15 & ror1 { build COND; build ror1; local tmp1:1 = ror1:1; local low:2 = sext(tmp1); local tmp2:1 = ror1(2); local high:2 = sext(tmp2); Rd = (zext(high) << 16) | zext(low); } :sxth^COND Rd,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=3 & c0407=7 & Rd & c1619=15 & ror1 { build COND; build ror1; Rd = sext(ror1:2); } @endif # VERSION_6 :teq^COND rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2024=19 & rn & c1215=0 & c2627=0 & shift1 { build COND; build rn; build shift1; local tmp = rn^shift1; logicflags(); resultflags(tmp); affectflags(); } :teq^COND rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2024=19 & rn & c1215=0 & c2627=0 & shift2 { build COND; build rn; build shift2; local tmp = rn^shift2; logicflags(); resultflags(tmp); affectflags(); } :teq^COND rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2024=19 & rn & c1215=0 & c2627=0 & shift3 { build COND; build rn; build shift3; local tmp = rn^shift3; logicflags(); resultflags(tmp); affectflags(); } :teq^COND^"p" rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2024=19 & rn & c1215=15 & c2627=0 & shift1 { build COND; build rn; build shift1; local tmp = rn^shift1; logicflags(); resultflags(tmp); affectflags(); } :teq^COND^"p" rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2024=19 & rn & c1215=15 & c2627=0 & shift2 { build COND; build rn; build shift2; local tmp = rn^shift2; logicflags(); resultflags(tmp); affectflags(); } :teq^COND^"p" rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2024=19 & rn & c1215=15 & c2627=0 & shift3 { build COND; build rn; build shift3; local tmp = rn^shift3; logicflags(); resultflags(tmp); affectflags(); } :tst^COND rn,shift1 is $(AMODE) & ARMcond=1 & COND & c2024=17 & rn & c1215=0 & c2627=0 & shift1 { build COND; build rn; build shift1; local tmp = rn & shift1; logicflags(); resultflags(tmp); affectflags(); } :tst^COND rn,shift2 is $(AMODE) & ARMcond=1 & COND & c2024=17 & rn & c1215=0 & c2627=0 & shift2 { build COND; build rn; build shift2; local tmp = rn & shift2; logicflags(); resultflags(tmp); affectflags(); } :tst^COND rn,shift3 is $(AMODE) & ARMcond=1 & COND & c2024=17 & rn & c1215=0 & c2627=0 & shift3 { build COND; build rn; build shift3; local tmp = rn & shift3; logicflags(); resultflags(tmp); affectflags(); } @if defined(VERSION_6) :uadd16^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=5 & c0811=15 & c0407=1 & Rd & rn & rm { build COND; build rn; build rm; local tmpRn = rn; local tmpRm = rm; tmp1:2 = tmpRn:2; tmp2:2 = tmpRm:2; local tcarry = carry(tmp1,tmp2); GE1 = tcarry; GE2 = tcarry; local tmpLow = tmp1 + tmp2; tmp1 = rn(2); tmp2 = rm(2); tcarry = carry(tmp1,tmp2); GE3 = tcarry; GE4 = tcarry; local tmpHigh = tmp1 + tmp2; Rd = zext(tmpHigh) << 16 | zext(tmpLow); } :uadd8^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=5 & c0811=15 & c0407=9 & Rd & rn & rm { build COND; build rn; build rm; local tmpRn = rn; local tmpRm = rm; tmp1:1 = tmpRn:1; tmp2:1 = tmpRm:1; GE1 = carry(tmp1,tmp2); b1:1 = tmp1 + tmp2; tmp1 = rn(1); tmp2 = rm(1); GE2 = carry(tmp1,tmp2); b2:1 = tmp1 + tmp2; tmp1 = rn(2); tmp2 = rm(2); GE3 = carry(tmp1,tmp2); b3:1 = tmp1 + tmp2; tmp1 = rn(3); tmp2 = rm(3); GE4 = carry(tmp1,tmp2); b4:1 = tmp1 + tmp2; Rd = (zext(b4) << 24) | (zext(b3) << 16) | (zext(b2) << 8) | zext(b1); } # uaddsubx :uasx^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=5 & c0811=15 & c0407=3 & Rd & rn & rm { build COND; build rn; build rm; local tmpRn = rn; local tmpRm = rm; tmp1:2 = tmpRn:2; tmp2:2 = tmpRm(2); local tmpLow:4 = zext(tmp1) - zext(tmp2); GE1 = tmpLow s>= 0; GE2 = tmpLow s>= 0; tmp1 = tmpRn(2); tmp2 = tmpRm:2; tcarry:1 = carry(tmp1,tmp2); GE3 = tcarry; GE4 = tcarry; local tmpHigh = tmp1 + tmp2; Rd[0,16] = tmpLow[0,16]; Rd[16,16] = tmpHigh; } @endif # VERSION_6 @if defined(VERSION_6T2) :ubfx^COND Rd,Rm,lsbImm,widthMinus1 is $(AMODE) & ARMcond=1 & COND & c2127=0x3f & widthMinus1 & Rd & lsbImm & c0406=5 & Rm { build COND; build lsbImm; build widthMinus1; shift:4 = 31 - (lsbImm + widthMinus1); Rd = Rm << shift; shift = 31 - widthMinus1; Rd = Rd >> shift; } @endif # VERSION_6T2 @if defined(VERSION_7) :udiv^COND RdHi,RnLo,RmHi is $(AMODE) & ARMcond=1 & COND & c2027=0x73 & RdHi & c1215=0xf & RmHi & c0407=0x1 & RnLo { build COND; result:8 = zext(RnLo) / zext(RmHi); RdHi = result(0); } @endif # VERSION_7 @if defined(VERSION_6) :uhadd16^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=7 & c0811=15 & c0407=1 & Rd & rn & rm { build COND; build rn; build rm; local tmpRn = rn; local tmpRm = rm; tmp1:4 = tmpRn & 0xffff; tmp2:4 = tmpRm & 0xffff; local tmpLow = tmp1 + tmp2; local tmpHigh = (tmpRn >> 16) + (tmpRm >> 16); Rd[0,16] = tmpLow[1,16]; Rd[16,16] = tmpHigh[1,16]; } :uhadd8^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=7 & c0811=15 & c0407=9 & Rd & rn & rm { build COND; build rn; build rm; local tmpRn = rn; local tmpRm = rm; tmp1:1 = tmpRn:1; tmp2:1 = tmpRm:1; b1:2 = (zext(tmp1) + zext(tmp2)) >> 1; tmp1 = tmpRn(1); tmp2 = tmpRm(1); b2:2 = (zext(tmp1) + zext(tmp2)) >> 1; tmp1 = tmpRn(2); tmp2 = tmpRm(2); b3:2 = (zext(tmp1) + zext(tmp2)) >> 1; tmp1 = tmpRn(3); tmp2 = tmpRm(3); b4:2 = (zext(tmp1) + zext(tmp2)) >> 1; Rd = (zext(b4) << 24) | (zext(b3) << 16) | (zext(b2) << 8) | zext(b1); } # uhaddsubx :uhasx^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=7 & c0811=15 & c0407=3 & Rd & rn & rm { build COND; build rn; build rm; local tmpRn = rn; local tmpRm = rm; tmp1:2 = tmpRn:2; tmp2:2 = tmpRm(2); tmpLow:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ffff; tmp1 = tmpRn(2); tmp2 = tmpRm:2; tmpHigh:4 = (zext(tmp1) + zext(tmp2)) >> 1; Rd = (tmpHigh << 16) | tmpLow; } # uhsubaddx :uhsax^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=7 & c0811=15 & c0407=5 & Rd & rn & rm { build COND; build rn; build rm; local tmpRn = rn; local tmpRm = rm; tmp1:2 = tmpRn:2; tmp2:2 = tmpRm(2); tmpLow:4 = (zext(tmp1) + zext(tmp2)) >> 1; tmp1 = tmpRn(2); tmp2 = tmpRm:2; tmpHigh:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ffff; Rd = (tmpHigh << 16) | tmpLow; } :uhsub16^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=7 & c0811=15 & c0407=7 & Rd & rn & rm { build COND; build rn; build rm; local tmpRn = rn; local tmpRm = rm; tmp1:2 = tmpRn:2; tmp2:2 = tmpRm:2; tmpLow:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ffff; tmp1 = rn(2); tmp2 = rm(2); tmpHigh:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ffff; Rd = (tmpHigh << 16) | tmpLow; } :uhsub8^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=7 & c0811=15 & c0407=15 & Rd & rn & rm { build COND; build rn; build rm; local tmpRn = rn; local tmpRm = rm; tmp1:1 = tmpRn:1; tmp2:1 = tmpRm:1; b1:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ff; tmp1 = tmpRn(1); tmp2 = tmpRm(1); b2:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ff; tmp1 = tmpRn(2); tmp2 = tmpRm(2); b3:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ff; tmp1 = tmpRn(3); tmp2 = tmpRm(3); b4:4 = ((zext(tmp1) - zext(tmp2)) >> 1) & 0x0ff; Rd = (b4 << 24) | (b3 << 16) | (b2 << 8) | b1; } :umaal^COND RdLo,RdHi,Rm,Rs is $(AMODE) & ARMcond=1 & COND & c2027=0x04 & RdHi & RdLo & Rs & c0407=9 & Rm { build COND; result:8 = (zext(Rm) * zext(Rs)) + zext(RdLo) + zext(RdHi); RdLo = result:4; RdHi = result(4); } @endif # VERSION_6 :umlal^COND^SBIT_ZN Rd,Rn,rm,rs is $(AMODE) & ARMcond=1 & COND & c2527=0 & c2124=5 & SBIT_ZN & Rn & Rd & rs & c0407=9 & rm { build COND; build rm; build rs; tmp:8 = (zext(Rn) << 32) | zext(Rd); rs64:8 = zext(rs); rm64:8 = zext(rm); tmp = rs64 * rm64 + tmp; resultflags(tmp); Rd = tmp(0); Rn = tmp(4); build SBIT_ZN; } :umull^COND^SBIT_ZN Rd,Rn,rm,rs is $(AMODE) & ARMcond=1 & COND & c2527=0 & c2124=4 & SBIT_ZN & Rn & Rd & rs & c0407=9 & rm { build COND; build rm; build rs; rs64:8 = zext(rs); rm64:8 = zext(rm); local tmp = rs64 * rm64; resultflags(tmp); Rd = tmp(0); Rn = tmp(4); build SBIT_ZN; } @if defined(VERSION_6) :uqadd16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x66 & c0811=15 & c0407=1 & Rn & Rd & Rm { build COND; local tmpRn = Rn; local tmpRm = Rm; tmp2Rn:2 = tmpRn:2; tmp2Rm:2 = tmpRm:2; sum1:2 = UnsignedSaturate(tmp2Rn + tmp2Rm, 16:2); tmp2Rn = tmpRn(2); tmp2Rm = tmpRm(2); sum2:2 = UnsignedSaturate(tmp2Rn + tmp2Rm, 16:2); Rd = (zext(sum2) << 16) | zext(sum1); } :uqadd8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x66 & c0811=15 & c0407=9 & Rn & Rd & Rm { build COND; local tmpRn = Rn; local tmpRm = Rm; tmp1Rn:1 = tmpRn:1; tmp1Rm:1 = tmpRm:1; sum1:1 = UnsignedSaturate(tmp1Rn + tmp1Rm, 16:2); tmp1Rn = tmpRn(1); tmp1Rm = tmpRm(1); sum2:2 = UnsignedSaturate(tmp1Rn + tmp1Rm, 16:2); tmp1Rn = tmpRn(2); tmp1Rm = tmpRm(2); sum3:2 = UnsignedSaturate(tmp1Rn + tmp1Rm, 16:2); tmp1Rn = tmpRn(3); tmp1Rm = tmpRm(3); sum4:2 = UnsignedSaturate(tmp1Rn + tmp1Rm, 16:2); Rd = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); } # uqaddsubx :uqasx^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x66 & c0811=15 & c0407=3 & Rn & Rd & Rm { build COND; local tmpRn = Rn; local tmpRm = Rm; tmp2Rn:2 = tmpRn:2; tmp2Rm:2 = tmpRm(2); sum1:2 = UnsignedSaturate(tmp2Rn - tmp2Rm, 16:2); tmp2Rn = tmpRn(2); tmp2Rm = tmpRm:2; sum2:2 = UnsignedSaturate(tmp2Rn + tmp2Rm, 16:2); Rd = (zext(sum2) << 16) | zext(sum1); } # uqsubaddx :uqsax^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x66 & c0811=15 & c0407=5 & Rn & Rd & Rm { build COND; local tmpRn = Rn; local tmpRm = Rm; tmp2Rn:2 = tmpRn:2; tmp2Rm:2 = tmpRm(2); sum1:2 = UnsignedSaturate(tmp2Rn + tmp2Rm, 16:2); tmp2Rn = tmpRn(2); tmp2Rm = tmpRm:2; sum2:2 = UnsignedSaturate(tmp2Rn - tmp2Rm, 16:2); Rd = (zext(sum2) << 16) | zext(sum1); } :uqsub16^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x66 & c0811=15 & c0407=7 & Rn & Rd & Rm { build COND; local tmpRn = Rn; local tmpRm = Rm; tmp2Rn:2 = tmpRn:2; tmp2Rm:2 = tmpRm:2; sum1:2 = UnsignedSaturate(tmp2Rn - tmp2Rm, 16:2); tmp2Rn = tmpRn(2); tmp2Rm = tmpRm(2); sum2:2 = UnsignedSaturate(tmp2Rn - tmp2Rm, 16:2); Rd = (zext(sum2) << 16) | zext(sum1); } :uqsub8^COND Rd, Rn, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x66 & c0811=15 & c0407=15 & Rn & Rd & Rm { build COND; local tmpRn = Rn; local tmpRm = Rm; tmp1Rn:1 = tmpRn:1; tmp1Rm:1 = tmpRm:1; sum1:1 = UnsignedSaturate(tmp1Rn - tmp1Rm, 16:2); tmp1Rn = tmpRn(1); tmp1Rm = tmpRm(1); sum2:2 = UnsignedSaturate(tmp1Rn - tmp1Rm, 16:2); tmp1Rn = tmpRn(2); tmp1Rm = tmpRm(2); sum3:2 = UnsignedSaturate(tmp1Rn - tmp1Rm, 16:2); tmp1Rn = tmpRn(3); tmp1Rm = tmpRm(3); sum4:2 = UnsignedSaturate(tmp1Rn - tmp1Rm, 16:2); Rd = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); } :usad8^COND Rd, Rm, Rs is $(AMODE) & ARMcond=1 & COND & c2027=0x78 & c1215=15 & c0407=1 & Rd & Rm & Rs { build COND; local tmpRs = Rs; local tmpRm = Rm; tmp1Rs:1 = tmpRs:1; tmp1Rm:1 = tmpRm:1; sum1:1 = Absolute(tmp1Rs - tmp1Rm); tmp1Rs = tmpRs(1); tmp1Rm = tmpRm(1); sum2:1 = Absolute(tmp1Rs - tmp1Rm); tmp1Rs = tmpRs(2); tmp1Rm = tmpRm(2); sum3:1 = Absolute(tmp1Rs - tmp1Rm); tmp1Rs = tmpRs(3); tmp1Rm = tmpRm(3); sum4:1 = Absolute(tmp1Rs - tmp1Rm); Rd = (zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1); } :usada8^COND Rd, Rm, Rs, Rn is $(AMODE) & ARMcond=1 & COND & c2027=0x78 & c0407=1 & Rd & Rn& Rm & Rs { build COND; local tmpRs = Rs; local tmpRm = Rm; tmp1Rs:1 = tmpRs:1; tmp1Rm:1 = tmpRm:1; sum1:1 = Absolute(tmp1Rs - tmp1Rm); tmp1Rs = tmpRs(1); tmp1Rm = tmpRm(1); sum2:1 = Absolute(tmp1Rs - tmp1Rm); tmp1Rs = tmpRs(2); tmp1Rm = tmpRm(2); sum3:1 = Absolute(tmp1Rs - tmp1Rm); tmp1Rs = tmpRs(3); tmp1Rm = tmpRm(3); sum4:1 = Absolute(tmp1Rs - tmp1Rm); Rd = Rn + ((zext(sum4) << 24) | (zext(sum3) << 16) | (zext(sum2) << 8) | zext(sum1)); } :usat^COND Rd, uSatImm5, shift4 is $(AMODE) & ARMcond=1 & COND & c2127=0x37 & c0405=0x1 & uSatImm5 & Rd & shift4 { build COND; build uSatImm5; build shift4; tmp:4 = UnsignedSaturate(shift4, uSatImm5); Q = UnsignedDoesSaturate(shift4, uSatImm5); Rd = tmp; } :usat16^COND Rd, uSatImm4, Rm is $(AMODE) & ARMcond=1 & COND & c2027=0x6e & c0811=15 & c0407=0x3 & uSatImm4 & Rd & Rm { build COND; build uSatImm4; local tmpl = Rm & 0xffff; tmpl = UnsignedSaturate(tmpl, uSatImm4); local tmpu = Rm >> 16; tmpu = UnsignedSaturate(tmpu, uSatImm4); Q = UnsignedDoesSaturate(tmpl,uSatImm4) | UnsignedDoesSaturate(tmpu,uSatImm4); Rd = ((tmpu & 0xffff) << 16) | (tmpl & 0xffff); } # usubaddx :usax^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=5 & c0811=15 & c0407=5 & Rd & rn & rm { build COND; build rn; build rm; local tmpRn = rn; local tmpRm = rm; tmp1:2 = tmpRn:2; tmp2:2 = tmpRm(2); local tcarry = carry(tmp2,tmp1); GE1 = tcarry; GE2 = tcarry; local tmpLow = tmp1 + tmp2; tmp1 = tmpRn(2); tmp2 = tmpRm:2; tcarry = tmp2 <= tmp1; GE3 = tcarry; GE4 = tcarry; local tmpHigh = tmp1 - tmp2; Rd = zext(tmpHigh) << 16 | zext(tmpLow); } :usub16^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=5 & c0811=15 & c0407=7 & Rd & rn & rm { build COND; build rn; build rm; local tmpRn = rn; local tmpRm = rm; tmp1:2 = tmpRn:2; tmp2:2 = tmpRm:2; local tcarry = tmp2 <= tmp1; GE1 = tcarry; GE2 = tcarry; local tmpLow = tmp1 - tmp2; tmp1 = tmpRn(2); tmp2 = tmpRm(2); tcarry = tmp2 <= tmp1; GE3 = tcarry; GE4 = tcarry; local tmpHigh = tmp1 - tmp2; Rd = zext(tmpHigh) << 16 | zext(tmpLow); } :usub8^COND Rd,rn,rm is $(AMODE) & ARMcond=1 & COND & c2327=12 & c2022=5 & c0811=15 & c0407=15 & Rd & rn & rm { build COND; build rn; build rm; local tmpRn = rn; local tmpRm = rm; tmp1:1 = tmpRn:1; tmp2:1 = tmpRm:1; GE1 = tmp2 <= tmp1; b1:1 = tmp1 - tmp2; tmp1 = tmpRn(1); tmp2 = tmpRm(1); GE2 = tmp2 <= tmp1; b2:1 = tmp1 - tmp2; tmp1 = tmpRn(2); tmp2 = tmpRm(2); GE3 = tmp2 <= tmp1; b3:1 = tmp1 - tmp2; tmp1 = tmpRn(3); tmp2 = tmpRm(3); GE4 = tmp2 <= tmp1; b4:1 = tmp1 - tmp2; Rd = (zext(b4) << 24) | (zext(b3) << 16) | (zext(b2) << 8) | zext(b1); } :uxtab^COND Rd,Rn,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=6 & c0407=7 & Rd & Rn & ror1 { build COND; build ror1; Rd = Rn + zext(ror1:1); } :uxtab16^COND Rd,Rn,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=4 & c0407=7 & Rd & Rn & ror1 { build COND; build ror1; local tmp1 = ror1 & 0xff; local tmp2 = (ror1 >> 16) & 0xff; local tmp1n = (Rn + tmp1) & 0xffff; local tmp2n = (Rn >> 16) + tmp2; Rd = (tmp2n << 16) | tmp1n; } :uxtah^COND Rd,Rn,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=7 & c0407=7 & Rd & Rn & ror1 { build COND; build ror1; Rd = Rn + zext(ror1:2); } :uxtb^COND Rd,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=6 & c0407=7 & Rd & c1619=15 & ror1 { build COND; build ror1; Rd = ror1 & 0x0ff; } :uxtb16^COND Rd,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=4 & c0407=7 & Rd & c1619=15 & ror1 { build COND; build ror1; Rd = ror1 & 0x0ff00ff; } :uxth^COND Rd,ror1 is $(AMODE) & ARMcond=1 & COND & c2327=13 & c2022=7 & c0407=7 & Rd & c1619=15 & ror1 { build COND; build ror1; Rd = ror1 & 0x0ffff; } @endif # VERSION_6 # :v* Advanced SIMD and VFP instructions - see ARMneon.sinc @if defined(VERSION_6K) :wfe^COND is $(AMODE) & ARMcond=1 & COND & c0027=0x320f002 { build COND; WaitForEvent(); } :wfi^COND is $(AMODE) & ARMcond=1 & COND & c0027=0x320f003 { build COND; WaitForInterrupt(); } :yield^COND is $(AMODE) & ARMcond=1 & COND & c0027=0x320f001 { build COND; HintYield(); } @endif # VERSION_6K ## Some special pseudo ops for better distinguishing ## indirect calls, and returns #:callx rm is $(AMODE) & pref=0xe1a0e00f; cond=14 & c2027=18 & c1619=15 & c1215=15 & c0811=15 & c0407=1 & rm #{ # lr = inst_next + 8; # TB=(rm&0x00000001)!=0; # tmp=rm&0xfffffffe; # call [tmp]; # TB=0; #} # Optional change to THUMB #:call^COND^SBIT_CZNO shift1 is $(AMODE) & pref=0xe1a0e00f; COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift1 #{ # lr = inst_next + 8; # build COND; # build shift1; # pc = shift1; # resultflags(pc); # logicflags(); # build SBIT_CZNO; # call [pc]; #} #:call^COND^SBIT_CZNO shift2 is $(AMODE) & pref=0xe1a0e00f; COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift2 #{ # lr = inst_next + 8; # build COND; # build shift2; # pc = shift2; # resultflags(pc); # logicflags(); # build SBIT_CZNO; # call [pc]; #} #:call^COND^SBIT_CZNO shift3 is $(AMODE) & pref=0xe1a0e00f; COND & c2124=13 & SBIT_CZNO & c1619=0 & Rd=15 & c2627=0 & shift3 #{ # lr = inst_next + 8; # build COND; # build shift3; # pc = shift3; # resultflags(pc); # logicflags(); # build SBIT_CZNO; # call [pc]; #} } # End with : ARMcondCk=1 ================================================ FILE: pypcode/processors/ARM/data/languages/ARMneon.dwarf ================================================ ================================================ FILE: pypcode/processors/ARM/data/languages/ARMneon.sinc ================================================ # Advanced SIMD support / NEON # WARNING NOTE: Be very careful taking a subpiece or truncating a register with :# or (#) # The LEBE hybrid language causes endian issues if you do not assign the register to a temp # variable and then take a subpiece or truncate. # @define FPSCR_RMODE "fpscr[22,2]" @define TMODE_E "TMode=1 & thv_c2831=14" # check for neon instructions in thumb mode @define TMODE_F "TMode=1 & thv_c2831=15" @define TMODE_EorF "TMode=1 & thv_c2931=7" # The RM field is bits 22 and 23 of FPSCR @define FPSCR_RMODE "fpscr[21,2]" zero: "#0" is c0000 { export 0:8; } @if defined(SIMD) attach variables [ thv_Rm ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc ]; attach variables [ Qn0 Qd0 Qm0 thv_Qn0 thv_Qd0 thv_Qm0 ] [ q0 _ q1 _ q2 _ q3 _ q4 _ q5 _ q6 _ q7 _ ]; attach variables [ Qn1 Qd1 Qm1 thv_Qn1 thv_Qd1 thv_Qm1 ] [ q8 _ q9 _ q10 _ q11 _ q12 _ q13 _ q14 _ q15 _ ]; Qd: Qd0 is TMode=0 & Qd0 & D22=0 { export Qd0; } Qd: Qd1 is TMode=0 & Qd1 & D22=1 { export Qd1; } Qd: thv_Qd0 is TMode=1 & thv_Qd0 & thv_D22=0 { export thv_Qd0; } Qd: thv_Qd1 is TMode=1 & thv_Qd1 & thv_D22=1 { export thv_Qd1; } Qn: Qn0 is TMode=0 & Qn0 & N7=0 { export Qn0; } Qn: Qn1 is TMode=0 & Qn1 & N7=1 { export Qn1; } Qn: thv_Qn0 is TMode=1 & thv_Qn0 & thv_N7=0 { export thv_Qn0; } Qn: thv_Qn1 is TMode=1 & thv_Qn1 & thv_N7=1 { export thv_Qn1; } Qm: Qm0 is TMode=0 & Qm0 & M5=0 { export Qm0; } Qm: Qm1 is TMode=0 & Qm1 & M5=1 { export Qm1; } Qm: thv_Qm0 is TMode=1 & thv_Qm0 & thv_M5=0 { export thv_Qm0; } Qm: thv_Qm1 is TMode=1 & thv_Qm1 & thv_M5=1 { export thv_Qm1; } @endif # SIMD @if defined(SIMD) || defined(VFPv3) || defined(VFPv2) attach variables [ Dm_3 thv_Dm_3 ] [ d0 d1 d2 d3 d4 d5 d6 d7 ]; attach variables [ Dn0 Dd0 Dm0 Dm_4 thv_Dn0 thv_Dd0 thv_Dm0 thv_Dm_4 ] [ d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 ]; attach variables [ thv_Dd_1 Dd_1 ] [ d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 ]; attach variables [ thv_Dd_2 Dd_2 ] [ d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 _ ]; attach variables [ thv_Dd_3 Dd_3 ] [ d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 _ _ ]; attach variables [ thv_Dd_4 Dd_4 ] [ d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 _ _ _ ]; attach variables [ thv_Dd_5 Dd_5 ] [ d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 _ _ _ _ ]; attach variables [ thv_Dd_6 Dd_6 ] [ d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 _ _ _ _ _ ]; attach variables [ thv_Dd_7 Dd_7 ] [ d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 _ _ _ _ _ _ ]; attach variables [ thv_Dd_8 Dd_8 ] [ d7 d8 d9 d10 d11 d12 d13 d14 d15 _ _ _ _ _ _ _ ]; attach variables [ thv_Dd_9 Dd_9 ] [ d8 d9 d10 d11 d12 d13 d14 d15 _ _ _ _ _ _ _ _ ]; attach variables [ thv_Dd_10 Dd_10 ] [ d9 d10 d11 d12 d13 d14 d15 _ _ _ _ _ _ _ _ _ ]; attach variables [ thv_Dd_11 Dd_11 ] [ d10 d11 d12 d13 d14 d15 _ _ _ _ _ _ _ _ _ _ ]; attach variables [ thv_Dd_12 Dd_12 ] [ d11 d12 d13 d14 d15 _ _ _ _ _ _ _ _ _ _ _ ]; attach variables [ thv_Dd_13 Dd_13 ] [ d12 d13 d14 d15 _ _ _ _ _ _ _ _ _ _ _ _ ]; attach variables [ thv_Dd_14 Dd_14 ] [ d13 d14 d15 _ _ _ _ _ _ _ _ _ _ _ _ _ ]; attach variables [ thv_Dd_15 Dd_15 ] [ d14 d15 _ _ _ _ _ _ _ _ _ _ _ _ _ _ ]; attach variables [ thv_Dd_16 Dd_16 ] [ d15 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ]; Dd: Dd0 is TMode=0 & Dd0 & D22=0 { export Dd0; } Dn: Dn0 is TMode=0 & Dn0 & N7=0 { export Dn0; } Dm: Dm0 is TMode=0 & Dm0 & M5=0 { export Dm0; } Dd: thv_Dd0 is TMode=1 & thv_Dd0 & thv_D22=0 { export thv_Dd0; } Dn: thv_Dn0 is TMode=1 & thv_Dn0 & thv_N7=0 { export thv_Dn0; } Dm: thv_Dm0 is TMode=1 & thv_Dm0 & thv_M5=0 { export thv_Dm0; } Dd2: Dd is Dd { export Dd; } @endif # SIMD || VFPv3 || VFPv2 @if defined(SIMD) || defined(VFPv3) attach variables [ Dn1 Dd1 Dm1 thv_Dn1 thv_Dd1 thv_Dm1 ] [ d16 d17 d18 d19 d20 d21 d22 d23 d24 d25 d26 d27 d28 d29 d30 d31 ]; Dd: Dd1 is TMode=0 & Dd1 & D22=1 { export Dd1; } Dn: Dn1 is TMode=0 & Dn1 & N7=1 { export Dn1; } Dm: Dm1 is TMode=0 & Dm1 & M5=1 { export Dm1; } Dd: thv_Dd1 is TMode=1 & thv_Dd1 & thv_D22=1 { export thv_Dd1; } Dn: thv_Dn1 is TMode=1 & thv_Dn1 & thv_N7=1 { export thv_Dn1; } Dm: thv_Dm1 is TMode=1 & thv_Dm1 & thv_M5=1 { export thv_Dm1; } attach variables [ Sm0_3 thv_Sm0_3 ][s0 s2 s4 s6 s8 s10 s12 s14]; attach variables [ Sm1_3 thv_Sm1_3][s1 s3 s5 s7 s9 s11 s13 s15]; Sm_3: Sm0_3 is TMode=0 & Sm0_3 & M5=0 { export Sm0_3; } Sm_3: Sm1_3 is TMode=0 & Sm1_3 & M5=1 { export Sm1_3; } Sm_3: thv_Sm0_3 is TMode=1 & thv_Sm0_3 & M5=0 { export thv_Sm0_3; } Sm_3: thv_Sm1_3 is TMode=1 & thv_Sm1_3 & M5=1 { export thv_Sm1_3; } @endif # SIMD || VFPv3 @if defined(VFPv2) || defined(VFPv3) attach variables [ Sn0 Sd0 Sm0 thv_Sn0 thv_Sd0 thv_Sm0 ] [ s0 s2 s4 s6 s8 s10 s12 s14 s16 s18 s20 s22 s24 s26 s28 s30 ]; attach variables [ Sn1 Sd1 Sm1 thv_Sn1 thv_Sd1 thv_Sm1 ] [ s1 s3 s5 s7 s9 s11 s13 s15 s17 s19 s21 s23 s25 s27 s29 s31 ]; attach variables [ Sm0next thv_Sm0next ] [ s1 s3 s5 s7 s9 s11 s13 s15 s17 s19 s21 s23 s25 s27 s29 s31 ]; attach variables [ Sm1next thv_Sm1next ] [ s2 s4 s6 s8 s10 s12 s14 s16 s18 s20 s22 s24 s26 s28 s30 _ ]; # We need to create separate constructors for each register rather than attaching # directly to a context variable @if defined (VFPv2) || defined(SIMD) Sreg: s0 is s0 & regNum=0 { export s0; } Sreg: s1 is s1 & regNum=1 { export s1; } Sreg: s2 is s2 & regNum=2 { export s2; } Sreg: s3 is s3 & regNum=3 { export s3; } Sreg: s4 is s4 & regNum=4 { export s4; } Sreg: s5 is s5 & regNum=5 { export s5; } Sreg: s6 is s6 & regNum=6 { export s6; } Sreg: s7 is s7 & regNum=7 { export s7; } Sreg: s8 is s8 & regNum=8 { export s8; } Sreg: s9 is s9 & regNum=9 { export s9; } Sreg: s10 is s10 & regNum=10 { export s10; } Sreg: s11 is s11 & regNum=11 { export s11; } Sreg: s12 is s12 & regNum=12 { export s12; } Sreg: s13 is s13 & regNum=13 { export s13; } Sreg: s14 is s14 & regNum=14 { export s14; } Sreg: s15 is s15 & regNum=15 { export s15; } Sreg: s16 is s16 & regNum=16 { export s16; } Sreg: s17 is s17 & regNum=17 { export s17; } Sreg: s18 is s18 & regNum=18 { export s18; } Sreg: s19 is s19 & regNum=19 { export s19; } Sreg: s20 is s20 & regNum=20 { export s20; } Sreg: s21 is s21 & regNum=21 { export s21; } Sreg: s22 is s22 & regNum=22 { export s22; } Sreg: s23 is s23 & regNum=23 { export s23; } Sreg: s24 is s24 & regNum=24 { export s24; } Sreg: s25 is s25 & regNum=25 { export s25; } Sreg: s26 is s26 & regNum=26 { export s26; } Sreg: s27 is s27 & regNum=27 { export s27; } Sreg: s28 is s28 & regNum=28 { export s28; } Sreg: s29 is s29 & regNum=29 { export s29; } Sreg: s30 is s30 & regNum=30 { export s30; } Sreg: s31 is s31 & regNum=31 { export s31; } Sreg2: s0 is s0 & reg2Num=0 { export s0; } Sreg2: s1 is s1 & reg2Num=1 { export s1; } Sreg2: s2 is s2 & reg2Num=2 { export s2; } Sreg2: s3 is s3 & reg2Num=3 { export s3; } Sreg2: s4 is s4 & reg2Num=4 { export s4; } Sreg2: s5 is s5 & reg2Num=5 { export s5; } Sreg2: s6 is s6 & reg2Num=6 { export s6; } Sreg2: s7 is s7 & reg2Num=7 { export s7; } Sreg2: s8 is s8 & reg2Num=8 { export s8; } Sreg2: s9 is s9 & reg2Num=9 { export s9; } Sreg2: s10 is s10 & reg2Num=10 { export s10; } Sreg2: s11 is s11 & reg2Num=11 { export s11; } Sreg2: s12 is s12 & reg2Num=12 { export s12; } Sreg2: s13 is s13 & reg2Num=13 { export s13; } Sreg2: s14 is s14 & reg2Num=14 { export s14; } Sreg2: s15 is s15 & reg2Num=15 { export s15; } Sreg2: s16 is s16 & reg2Num=16 { export s16; } Sreg2: s17 is s17 & reg2Num=17 { export s17; } Sreg2: s18 is s18 & reg2Num=18 { export s18; } Sreg2: s19 is s19 & reg2Num=19 { export s19; } Sreg2: s20 is s20 & reg2Num=20 { export s20; } Sreg2: s21 is s21 & reg2Num=21 { export s21; } Sreg2: s22 is s22 & reg2Num=22 { export s22; } Sreg2: s23 is s23 & reg2Num=23 { export s23; } Sreg2: s24 is s24 & reg2Num=24 { export s24; } Sreg2: s25 is s25 & reg2Num=25 { export s25; } Sreg2: s26 is s26 & reg2Num=26 { export s26; } Sreg2: s27 is s27 & reg2Num=27 { export s27; } Sreg2: s28 is s28 & reg2Num=28 { export s28; } Sreg2: s29 is s29 & reg2Num=29 { export s29; } Sreg2: s30 is s30 & reg2Num=30 { export s30; } Sreg2: s31 is s31 & reg2Num=31 { export s31; } Dreg: d0 is d0 & regNum=0 { export d0; } Dreg: d1 is d1 & regNum=1 { export d1; } Dreg: d2 is d2 & regNum=2 { export d2; } Dreg: d3 is d3 & regNum=3 { export d3; } Dreg: d4 is d4 & regNum=4 { export d4; } Dreg: d5 is d5 & regNum=5 { export d5; } Dreg: d6 is d6 & regNum=6 { export d6; } Dreg: d7 is d7 & regNum=7 { export d7; } Dreg: d8 is d8 & regNum=8 { export d8; } Dreg: d9 is d9 & regNum=9 { export d9; } Dreg: d10 is d10 & regNum=10 { export d10; } Dreg: d11 is d11 & regNum=11 { export d11; } Dreg: d12 is d12 & regNum=12 { export d12; } Dreg: d13 is d13 & regNum=13 { export d13; } Dreg: d14 is d14 & regNum=14 { export d14; } Dreg: d15 is d15 & regNum=15 { export d15; } Dreg2: d0 is d0 & reg2Num=0 { export d0; } Dreg2: d1 is d1 & reg2Num=1 { export d1; } Dreg2: d2 is d2 & reg2Num=2 { export d2; } Dreg2: d3 is d3 & reg2Num=3 { export d3; } Dreg2: d4 is d4 & reg2Num=4 { export d4; } Dreg2: d5 is d5 & reg2Num=5 { export d5; } Dreg2: d6 is d6 & reg2Num=6 { export d6; } Dreg2: d7 is d7 & reg2Num=7 { export d7; } Dreg2: d8 is d8 & reg2Num=8 { export d8; } Dreg2: d9 is d9 & reg2Num=9 { export d9; } Dreg2: d10 is d10 & reg2Num=10 { export d10; } Dreg2: d11 is d11 & reg2Num=11 { export d11; } Dreg2: d12 is d12 & reg2Num=12 { export d12; } Dreg2: d13 is d13 & reg2Num=13 { export d13; } Dreg2: d14 is d14 & reg2Num=14 { export d14; } Dreg2: d15 is d15 & reg2Num=15 { export d15; } @if defined(SIMD) || defined(VFPv3) Dreg: d16 is d16 & regNum=16 { export d16; } Dreg: d17 is d17 & regNum=17 { export d17; } Dreg: d18 is d18 & regNum=18 { export d18; } Dreg: d19 is d19 & regNum=19 { export d19; } Dreg: d20 is d20 & regNum=20 { export d20; } Dreg: d21 is d21 & regNum=21 { export d21; } Dreg: d22 is d22 & regNum=22 { export d22; } Dreg: d23 is d23 & regNum=23 { export d23; } Dreg: d24 is d24 & regNum=24 { export d24; } Dreg: d25 is d25 & regNum=25 { export d25; } Dreg: d26 is d26 & regNum=26 { export d26; } Dreg: d27 is d27 & regNum=27 { export d27; } Dreg: d28 is d28 & regNum=28 { export d28; } Dreg: d29 is d29 & regNum=29 { export d29; } Dreg: d30 is d30 & regNum=30 { export d30; } Dreg: d31 is d31 & regNum=31 { export d31; } Dreg2: d16 is d16 & reg2Num=16 { export d16; } Dreg2: d17 is d17 & reg2Num=17 { export d17; } Dreg2: d18 is d18 & reg2Num=18 { export d18; } Dreg2: d19 is d19 & reg2Num=19 { export d19; } Dreg2: d20 is d20 & reg2Num=20 { export d20; } Dreg2: d21 is d21 & reg2Num=21 { export d21; } Dreg2: d22 is d22 & reg2Num=22 { export d22; } Dreg2: d23 is d23 & reg2Num=23 { export d23; } Dreg2: d24 is d24 & reg2Num=24 { export d24; } Dreg2: d25 is d25 & reg2Num=25 { export d25; } Dreg2: d26 is d26 & reg2Num=26 { export d26; } Dreg2: d27 is d27 & reg2Num=27 { export d27; } Dreg2: d28 is d28 & reg2Num=28 { export d28; } Dreg2: d29 is d29 & reg2Num=29 { export d29; } Dreg2: d30 is d30 & reg2Num=30 { export d30; } Dreg2: d31 is d31 & reg2Num=31 { export d31; } @else # this is just a placeholder so the parse patterns will match correctly. # regNum is 31 when the base pattern matches, and incremented when # this constructor actually matches Dreg: d0 is d0 & regNum=31 { export d0; } Dreg2: d0 is d0 & reg2Num=31 { export d0; } @endif @endif VRm: Rm is TMode=0 & Rm { export Rm; } VRm: thv_Rm is TMode=1 & thv_Rm { export thv_Rm; } VRn: Rn is TMode=0 & Rn { export Rn; } VRn: thv_Rn is TMode=1 & thv_Rn { export thv_Rn; } VRd: Rd is TMode=0 & Rd { export Rd; } VRd: thv_Rd is TMode=1 & thv_Rd { export thv_Rd; } Sd: Sd0 is TMode=0 & Sd0 & D22=0 { export Sd0; } Sd: Sd1 is TMode=0 & Sd1 & D22=1 { export Sd1; } Sd: thv_Sd0 is TMode=1 & thv_Sd0 & thv_D22=0 { export thv_Sd0; } Sd: thv_Sd1 is TMode=1 & thv_Sd1 & thv_D22=1 { export thv_Sd1; } Sn: Sn0 is TMode=0 & Sn0 & N7=0 { export Sn0; } Sn: Sn1 is TMode=0 & Sn1 & N7=1 { export Sn1; } Sn: thv_Sn0 is TMode=1 & thv_Sn0 & thv_N7=0 { export thv_Sn0; } Sn: thv_Sn1 is TMode=1 & thv_Sn1 & thv_N7=1 { export thv_Sn1; } Sm: Sm0 is TMode=0 & Sm0 & M5=0 { export Sm0; } Sm: Sm1 is TMode=0 & Sm1 & M5=1 { export Sm1; } Sm: thv_Sm0 is TMode=1 & thv_Sm0 & thv_M5=0 { export thv_Sm0; } Sm: thv_Sm1 is TMode=1 & thv_Sm1 & thv_M5=1 { export thv_Sm1; } SmNext: Sm0next is TMode=0 & Sm0next & M5=0 { export Sm0next; } SmNext: Sm1next is TMode=0 & Sm1next & M5=1 { export Sm1next; } SmNext: thv_Sm0next is TMode=1 & thv_Sm0next & thv_M5=0 { export thv_Sm0next; } SmNext: thv_Sm1next is TMode=1 & thv_Sm1next & thv_M5=1 { export thv_Sm1next; } Sd2: Sd is Sd { export Sd; } @endif # VFPv2 || VFPv3 udt: "s" is TMode=0 & c2424=0 { export 0:1; } udt: "u" is TMode=0 & c2424=1 { export 1:1; } udt: "s" is TMode=1 & thv_c2828=0 { export 0:1; } udt: "u" is TMode=1 & thv_c2828=1 { export 1:1; } udt7: "s" is TMode=0 & c0707=0 { export 0:1; } udt7: "u" is TMode=0 & c0707=1 { export 1:1; } udt7: "s" is TMode=1 & thv_c0707=0 { export 0:1; } udt7: "u" is TMode=1 & thv_c0707=1 { export 1:1; } fdt: "u" is TMode=0 & c0808=0 { export 0:1; } fdt: "f" is TMode=0 & c0808=1 { export 1:1; } fdt: "u" is TMode=1 & thv_c0808=0 { export 0:1; } fdt: "f" is TMode=1 & thv_c0808=1 { export 1:1; } esize2021: "8" is TMode=0 & c2021=0 { export 1:4; } esize2021: "16" is TMode=0 & c2021=1 { export 2:4; } esize2021: "32" is TMode=0 & c2021=2 { export 4:4; } esize2021: "64" is TMode=0 & c2021=3 { export 8:4; } esize2021: "8" is TMode=1 & thv_c2021=0 { export 1:4; } esize2021: "16" is TMode=1 & thv_c2021=1 { export 2:4; } esize2021: "32" is TMode=1 & thv_c2021=2 { export 4:4; } esize2021: "64" is TMode=1 & thv_c2021=3 { export 8:4; } esize2021x2: "16" is TMode=0 & c2021=0 { export 2:4; } esize2021x2: "32" is TMode=0 & c2021=1 { export 4:4; } esize2021x2: "64" is TMode=0 & c2021=2 { export 8:4; } esize2021x2: "16" is TMode=1 & thv_c2021=0 { export 2:4; } esize2021x2: "32" is TMode=1 & thv_c2021=1 { export 4:4; } esize2021x2: "64" is TMode=1 & thv_c2021=2 { export 8:4; } esize1819: "8" is TMode=0 & c1819=0 { export 1:4; } esize1819: "16" is TMode=0 & c1819=1 { export 2:4; } esize1819: "32" is TMode=0 & c1819=2 { export 4:4; } esize1819: "64" is TMode=0 & c1819=3 { export 8:4; } esize1819: "8" is TMode=1 & thv_c1819=0 { export 1:4; } esize1819: "16" is TMode=1 & thv_c1819=1 { export 2:4; } esize1819: "32" is TMode=1 & thv_c1819=2 { export 4:4; } esize1819: "64" is TMode=1 & thv_c1819=3 { export 8:4; } esize1819x2: "16" is TMode=0 & c1819=0 { export 2:4; } esize1819x2: "32" is TMode=0 & c1819=1 { export 4:4; } esize1819x2: "64" is TMode=0 & c1819=2 { export 8:4; } esize1819x2: "16" is TMode=1 & thv_c1819=0 { export 2:4; } esize1819x2: "32" is TMode=1 & thv_c1819=1 { export 4:4; } esize1819x2: "64" is TMode=1 & thv_c1819=2 { export 8:4; } esize1819x3: "8" is TMode=0 & c1819=0 { export 1:4; } esize1819x3: "16" is TMode=0 & c1819=1 { export 2:4; } esize1819x3: "32" is TMode=0 & c1819=2 { export 4:4; } esize1819x3: "8" is TMode=1 & thv_c1819=0 { export 1:4; } esize1819x3: "16" is TMode=1 & thv_c1819=1 { export 2:4; } esize1819x3: "32" is TMode=1 & thv_c1819=2 { export 4:4; } esize1011: "8" is TMode=0 & c1011=0 { export 1:4; } esize1011: "16" is TMode=0 & c1011=1 { export 2:4; } esize1011: "32" is TMode=0 & c1011=2 { export 4:4; } esize1011: "64" is TMode=0 & c1011=3 { export 8:4; } esize1011: "8" is TMode=1 & thv_c1011=0 { export 1:4; } esize1011: "16" is TMode=1 & thv_c1011=1 { export 2:4; } esize1011: "32" is TMode=1 & thv_c1011=2 { export 4:4; } esize1011: "64" is TMode=1 & thv_c1011=3 { export 8:4; } esize0607: "8" is TMode=0 & c0607=0 { export 1:4; } esize0607: "16" is TMode=0 & c0607=1 { export 2:4; } esize0607: "32" is TMode=0 & c0607=2 { export 4:4; } esize0607: "64" is TMode=0 & c0607=3 { export 8:4; } # see VLD4 (single 4-element structure to all lanes) esize0607: "8" is TMode=1 & thv_c0607=0 { export 1:4; } esize0607: "16" is TMode=1 & thv_c0607=1 { export 2:4; } esize0607: "32" is TMode=1 & thv_c0607=2 { export 4:4; } esize0607: "64" is TMode=1 & thv_c0607=3 { export 8:4; } # see VLD4 (single 4-element structure to all lanes) fesize2323: "16" is TMode=0 & c2323=1 { export 4:4; } fesize2323: "32" is TMode=0 & c2323=0 { export 2:4; } fesize2323: "16" is TMode=1 & thv_c2323=1 { export 4:4; } fesize2323: "32" is TMode=1 & thv_c2323=0 { export 2:4; } fesize2020: "16" is TMode=0 & c2020=1 { export 4:4; } fesize2020: "32" is TMode=0 & c2020=0 { export 2:4; } fesize2020: "16" is TMode=1 & thv_c2020=1 { export 4:4; } fesize2020: "32" is TMode=1 & thv_c2020=0 { export 2:4; } fesize1819: "16" is TMode=0 & c1819=1 { export 4:4; } fesize1819: "32" is TMode=0 & c1819=2 { export 2:4; } fesize1819: "16" is TMode=1 & thv_c1819=1 { export 4:4; } fesize1819: "32" is TMode=1 & thv_c1819=2 { export 2:4; } roundType: "a" is TMode=0 & c0809=0 { export 0:1; } roundType: "a" is TMode=1 & thv_c0809=0 { export 0:1; } roundType: "n" is TMode=0 & c0809=1 { export 1:1; } roundType: "n" is TMode=1 & thv_c0809=1 { export 1:1; } roundType: "p" is TMode=0 & c0809=2 { export 2:1; } roundType: "p" is TMode=1 & thv_c0809=2 { export 2:1; } roundType: "m" is TMode=0 & c0809=3 { export 3:1; } roundType: "m" is TMode=1 & thv_c0809=3 { export 3:1; } define pcodeop VFPExpandImmediate; # float vfpExpImm_4: imm is TMode=0 & c1919 & c1818 & c1617 & c0003 [ imm = (c1919 << 31) | ((c1818 $xor 1) << 30) | ((c1818 * 0x1f) << 25) | (c1617 << 23) | (c0003 << 19); ] { export *[const]:4 imm; } # float vfpExpImm_4: imm is TMode=1 & thv_c1919 & thv_c1818 & thv_c1617 & thv_c0003 [ imm = (thv_c1919 << 31) | ((thv_c1818 $xor 1) << 30) | ((thv_c1818 * 0x1f) << 25) | (thv_c1617 << 23) | (thv_c0003 << 19); ] { export *[const]:4 imm; } # double vfpExpImm_8: imm is TMode=0 & c1919 & c1818 & c1617 & c0003 [ imm = (c1919 << 63) | ((c1818 $xor 1) << 62) | ((c1818 * 0xff) << 54) | (c1617 << 52) | (c0003 << 48); ] { export *[const]:8 imm; } # double vfpExpImm_8: imm is TMode=1 & thv_c1919 & thv_c1818 & thv_c1617 & thv_c0003 [ imm = (thv_c1919 << 63) | ((thv_c1818 $xor 1) << 62) | ((thv_c1818 * 0xff) << 54) | (thv_c1617 << 52) | (thv_c0003 << 48); ] { export *[const]:8 imm; } define pcodeop SIMDExpandImmediate; simdExpImm_8: "#0" is TMode=0 & c2424=0 & c1618=0 & c0003=0 { export 0:8; } simdExpImm_8: "simdExpand("^c0505^","^cmode^","^val^")" is TMode=0 & c2424 & c1618 & c0505 & c0003 & cmode [ val = (c2424 << 7) | (c1618 << 4) | c0003; ] { imm64:8 = SIMDExpandImmediate(c0505:1, cmode:1, val:1); export imm64; } simdExpImm_8: "#0" is TMode=1 & thv_c2828=0 & thv_c1618=0 & thv_c0003=0 { export 0:8; } simdExpImm_8: "simdExpand("^thv_c0505^","^thv_cmode^","^val^")" is TMode=1 & thv_c2828 & thv_c1618 & thv_c0505 & thv_c0003 & thv_cmode [ val = (thv_c2828 << 7) | (thv_c1618 << 4) | thv_c0003; ] { imm64:8 = SIMDExpandImmediate(thv_c0505:1, thv_cmode:1, val:1); export imm64; } simdExpImm_16: "#0" is TMode=0 & c2424=0 & c1618=0 & c0003=0 { tmp:8 = 0; tmp1:16 = zext(tmp); export tmp1; } simdExpImm_16: "simdExpand("^c0505^","^cmode^","^val^")" is TMode=0 & c2424 & c1618 & c0505 & c0003 & cmode [ val = (c2424 << 7) | (c1618 << 4) | c0003; ] { imm128:16 = SIMDExpandImmediate(c0505:1, cmode:1, val:1); export imm128; } simdExpImm_16: "#0" is TMode=1 & thv_c2828=0 & thv_c1618=0 & thv_c0003=0 { tmp:8 = 0; tmp1:16 = zext(tmp); export tmp1; } simdExpImm_16: "simdExpand("^thv_c0505^","^thv_cmode^","^val^")" is TMode=1 & thv_c2828 & thv_c1618 & thv_c0505 & thv_c0003 & thv_cmode [ val = (thv_c2828 << 7) | (thv_c1618 << 4) | thv_c0003; ] { imm128:16 = SIMDExpandImmediate(thv_c0505:1, thv_cmode:1, val:1); export imm128; } simdExpImmDT: "i32" is TMode=0 & c0911=0 { } simdExpImmDT: "i32" is TMode=0 & c0911=1 { } simdExpImmDT: "i32" is TMode=0 & c0911=2 { } simdExpImmDT: "i32" is TMode=0 & c0911=3 { } simdExpImmDT: "i16" is TMode=0 & c0911=4 { } simdExpImmDT: "i16" is TMode=0 & c0911=5 { } simdExpImmDT: "i32" is TMode=0 & c0811=12 { } simdExpImmDT: "i32" is TMode=0 & c0811=13 { } simdExpImmDT: "i8" is TMode=0 & c0811=14 & c0505=0 { } simdExpImmDT: "i64" is TMode=0 & c0811=14 & c0505=1 { } simdExpImmDT: "f32" is TMode=0 & c0811=15 & c0505=0 { } simdExpImmDT: "i32" is TMode=1 & thv_c0911=0 { } simdExpImmDT: "i32" is TMode=1 & thv_c0911=1 { } simdExpImmDT: "i32" is TMode=1 & thv_c0911=2 { } simdExpImmDT: "i32" is TMode=1 & thv_c0911=3 { } simdExpImmDT: "i16" is TMode=1 & thv_c0911=4 { } simdExpImmDT: "i16" is TMode=1 & thv_c0911=5 { } simdExpImmDT: "i32" is TMode=1 & thv_c0811=12 { } simdExpImmDT: "i32" is TMode=1 & thv_c0811=13 { } simdExpImmDT: "i8" is TMode=1 & thv_c0811=14 & thv_c0505=0 { } simdExpImmDT: "i64" is TMode=1 & thv_c0811=14 & thv_c0505=1 { } simdExpImmDT: "f32" is TMode=1 & thv_c0811=15 & thv_c0505=0 { } macro replicate1to8(bytes, dest) { local val:8 = zext(bytes); val = val | (val << 8); val = val | (val << 16); dest = val | (val << 32); } macro replicate2to8(bytes, dest) { local val:8 = zext(bytes); val = val | (val << 16); dest = val | (val << 32); } macro replicate4to8(bytes, dest) { local val:8 = zext(bytes); dest = val | (val << 32); } define pcodeop VectorAbsoluteDifferenceAndAccumulate; define pcodeop VectorAbsoluteDifference; define pcodeop FloatVectorAbsoluteDifference; define pcodeop VectorAbsolute; define pcodeop FloatVectorAbsolute; @if defined(SIMD) # CryptOp(val) # Various crypto algorithms, too numerous for explication at # this time define pcodeop CryptOp; ####### # AESD single round decryption define pcodeop AESInvShiftRows; define pcodeop AESInvSubBytes; # F6.1.1 p3235 A1/T1 :aesd.8 Qd,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001101 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b00 & thv_c1617=0b00 & thv_c0611=0b001101 & thv_c0404=0)) & Qd & Qm { local shiftRows:16 = AESInvShiftRows(Qd ^ Qm); Qd = AESInvSubBytes(shiftRows); } ####### # AESE single round encryption define pcodeop AESShiftRows; define pcodeop AESSubBytes; # F6.1.2 p3237 A1/T1 :aese.8 Qd,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001100 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b00 & thv_c1617=0b00 & thv_c0611=0b001100 & thv_c0404=0)) & Qd & Qm { local shiftRows:16 = AESInvShiftRows(Qd ^ Qm); Qd = AESSubBytes(shiftRows); } ####### # AESIMC inverse mix columns define pcodeop AESInvMixColumns; # F6.1.3 p3239 A1/T1 :aesimc.8 Qd,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001111 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b00 & thv_c1617=0b00 & thv_c0611=0b001111 & thv_c0404=0)) & Qd & Qm { Qd = AESInvMixColumns(Qm); } ####### # AESMC mix columns define pcodeop AESMixColumns; # F6.1.4 p3240 A1/T1 :aesmc.8 Qd,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b00 & c1617=0b00 & c0611=0b001110 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b00 & thv_c1617=0b00 & thv_c0611=0b001110 & thv_c0404=0)) & Qd & Qm { Qd = AESMixColumns(Qm); } ####### # SHA1C SHA1 hash update (choose) define pcodeop SHA1HashUpdateChoose; # F6.1.7 p3248 A1/T1 :sha1c.32 Qd,Qn,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b00 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11110 & thv_c2021=0b00 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { local X = Qd; local Y = Qn:4; local W = Qm; Qd = SHA1HashUpdateChoose(X, Y, W); } ####### # SHA1H SHA1 fixed rotate # F6.1.8 p3250 A1/T1 :sha1h.32 Qd,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b01 & c0611=0b001011 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b01 & thv_c0611=0b001011 & thv_c0404=0)) & Qd & Qm { local W:4 = Qm(0); Qd = zext(W << 30 | W >> 2); } ####### # SHA1M SHA1 hash update (majority) define pcodeop SHA1HashUpdateMajority; # F6.1.9 p3251 A1/T1 :sha1m.32 Qd,Qn,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b10 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11110 & thv_c2021=0b10 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { local X = Qd; local Y = Qn:4; local W = Qm; Qd = SHA1HashUpdateMajority(X, Y, W); } ####### # SHA1P SHA1 hash update (parity) define pcodeop SHA1HashUpdateParity; # F6.1.10 p3253 A1/T1 :sha1p.32 Qd,Qn,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b01 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11110 & thv_c2021=0b01 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { local X = Qd; local Y = Qn:4; local W = Qm; Qd = SHA1HashUpdateParity(X, Y, W); } ####### # SHA1SU0 SHA1 schedule update 0 # F6.1.11 p3255 A1/T1 :sha1su0.32 Qd,Qn,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00100 & c2021=0b11 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11110 & thv_c2021=0b11 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { local op1 = Qd; local op2 = Qn; local op3 = Qm; local op2LowerHalf = zext(op2[0,64]) << 64; local op1UpperHalf = zext(op1[64,64]); op2 = op2LowerHalf | op1UpperHalf; Qd = op1 ^ op2 ^ op3; } ####### # SHA1SU1 SHA1 schedule update 1 # F6.1.12 p3257 A1/T1 :sha1su1.32 Qd,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b10 & c0611=0b001110 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b10 & thv_c0611=0b001110 & thv_c0404=0)) & Qd & Qm { local X = Qd; local Y = Qm; local Tm = X ^ (Y >> 32); local t0:4 = Tm[0, 32]; local t1:4 = Tm[32, 32]; local t2:4 = Tm[64, 32]; local t3:4 = Tm[96, 32]; local W0:4 = (t0 << 1 | t0 >> 31); local W1:4 = (t1 << 1 | t1 >> 31); local W2:4 = (t2 << 1 | t2 >> 31); local W3:4 = (t3 << 1 | t3 >> 31) ^ (t0 << 2 | t0 >> 30); Qd = (zext(W3) << 96) | (zext(W2) << 64) | (zext(W1) << 32) | zext(W0); } ####### # SHA256H SHA256 hash update part 1 define pcodeop SHA256hash; # F6.1.13 p3259 A1/T1 :sha256h.32 Qd,Qn,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b00 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b00 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { local part1:1 = 1; Qd = SHA256hash(Qd,Qn,Qm, part1); } ####### # SHA256H2 SHA256 hash update part 2 # F6.1.14 p3260 A1/T1 :sha256h2.32 Qd,Qn,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b01 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b01 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { local part1:1 = 0; Qd = SHA256hash(Qd,Qn,Qm, part1); } ####### # SHA256SU0 SHA256 schedule update 0 define pcodeop SHA256ScheduleUpdate0; # F6.1.15 p3261 A1/T1 :sha256su0.32 Qd,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b10 & c0611=0b001111 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b10 & thv_c0611=0b001111 & thv_c0404=0)) & Qd & Qm { Qd = SHA256ScheduleUpdate0(Qd,Qm); } ####### # SHA256SU1 SHA256 schedule update 1 define pcodeop SHA256ScheduleUpdate1; # F6.1.16 p3263 A1/T1 :sha256su1.32 Qd,Qn,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b10 & c0811=0b1100 & c0606=1 & c0404=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b10 & thv_c0811=0b1100 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & Qm { Qd = SHA256ScheduleUpdate1(Qd,Qn,Qm); } # TODO: watch out for c2021=3 :vaba.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=7 & thv_c0606=0 & thv_c0404=1 ) ) & Dm & Dn & Dd & udt & esize2021 { Dd = VectorAbsoluteDifferenceAndAccumulate(Dn,Dm,esize2021,udt); } :vaba.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=7 & thv_c0606=1 & thv_c0404=1 ) ) & Qd & Qn & Qm & udt & esize2021 { Qd = VectorAbsoluteDifferenceAndAccumulate(Qn,Qm,esize2021,udt); } :vabal.^udt^esize2021 Qd,Dn,Dm is (($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=5 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=5 & thv_c0606=0 & thv_c0404=0 ) ) & Qd & Dm & Dn & udt & esize2021 { Qd = VectorAbsoluteDifferenceAndAccumulate(Dn,Dm,esize2021,udt); } :vabd.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=7 & thv_c0606=0 & thv_c0404=0 ) ) & Dm & Dn & Dd & udt & esize2021 { Dd = VectorAbsoluteDifference(Dn,Dm,esize2021,udt); } :vabd.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=7 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=7 & thv_Q6=1 & thv_c0404=0 ) ) & Qd & Qn & Qm & udt & esize2021 { Qd = VectorAbsoluteDifference(Qn,Qm,esize2021,udt); } :vabdl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=7 & Q6=0 & c0404=0 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=7 & thv_c0606=0 & thv_c0404=0 ) ) & Dm & Dn & Qd & udt & esize2021 { Qd = VectorAbsoluteDifference(Dn,Dm,esize2021,udt); } :vabd.f^fesize2020 Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c0811=13 & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c0811=13 & thv_c0606=0 & thv_c0404=0 ) ) & fesize2020 & Dd & Dm & Dn { Dd = FloatVectorAbsoluteDifference(Dn,Dm,fesize2020); } :vabd.f^fesize2020 Qd,Qn,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c0811=13 & Q6=1 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c0811=13 & thv_c0606=1 & thv_c0404=0 ) ) & fesize2020 & Qd & Qm & Qn { Qd = FloatVectorAbsoluteDifference(Qn,Qm,fesize2020); } :vabs.s^esize1819 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=6 & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=6 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & esize1819 { Dd = VectorAbsolute(Dm,esize1819); } :vabs.s^esize1819 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=6 & Q6=1 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=6 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & esize1819 { Qd = VectorAbsolute(Qm,esize1819); } :vabs.f^esize1819 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=0xe & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=0xe & thv_c0606=0 & thv_c0404=0 ) ) & esize1819 & Dm & Dd { Dd = FloatVectorAbsolute(Dm,esize1819); } :vabs.f^esize1819 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=0xe & Q6=1 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=0xe & thv_c0606=1 & thv_c0404=0 ) ) & esize1819 & Qd & Qm { Qd = FloatVectorAbsolute(Qm,esize1819); } @endif # SIMD @if defined(VFPv2) || defined(VFPv3) :vabs^COND^".f32" Sd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x30 & c0611=0x2b & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x30 & thv_c0611=0x2b & thv_c0404=0 ) ) & COND & Sm & Sd { build COND; build Sd; build Sm; Sd = abs(Sm); } :vabs^COND^".f64" Dd,Dm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x30 & c0611=0x2f & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x30 & thv_c0611=0x2f & thv_c0404=0 ) ) & COND & Dd & Dm { build COND; build Dd; build Dm; Dd = abs(Dm); } @endif # VFPv2 || VFPv3 define pcodeop FloatCompareGE; define pcodeop FloatCompareGT; define pcodeop VectorAbs; define pcodeop VectorAdd; define pcodeop VectorSub; define pcodeop FloatVectorAdd; define pcodeop VectorPairwiseAdd; define pcodeop VectorPairwiseMin; define pcodeop VectorPairwiseMax; define pcodeop FloatVectorPairwiseAdd; define pcodeop FloatVectorPairwiseMin; define pcodeop FloatVectorPairwiseMax; define pcodeop VectorPairwiseAddLong; define pcodeop VectorPairwiseAddAccumulateLong; define pcodeop VectorGetElement; @if defined(SIMD) :vacge.f^fesize2020 Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=0 & c0811=14 & Q6=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=14 & thv_c0606=0 & thv_c0404=1 ) ) & fesize2020 & Dn & Dd & Dm { Dd = FloatCompareGE(Dn,Dm,fesize2020); } :vacge.f^fesize2020 Qd,Qn,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=0 & c0811=14 & Q6=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=14 & thv_c0606=1 & thv_c0404=1 ) ) & fesize2020 & Qn & Qd & Qm { Qd = FloatCompareGE(Qn,Qm,fesize2020); } :vacgt.f^fesize2020 Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=1 & c0811=14 & Q6=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=14 & thv_c0606=0 & thv_c0404=1 ) ) & fesize2020 & Dn & Dd & Dm { Dd = FloatCompareGT(Dn,Dm,fesize2020); } :vacgt.f^fesize2020 Qd,Qn,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=1 & c0811=14 & Q6=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=14 & thv_c0606=1 & thv_c0404=1 ) ) & fesize2020 & Qn & Qd & Qm { Qd = FloatCompareGT(Qn,Qm,fesize2020); } :vadd.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=8 & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=8 & thv_Q6=0 & thv_c0404=0)) & esize2021 & Dn & Dd & Dm { Dd = VectorAdd(Dn,Dm,esize2021); } :vadd.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=8 & Q6=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=8 & thv_Q6=1 & thv_c0404=0)) & esize2021 & Qm & Qn & Qd { Qd = VectorAdd(Qn,Qm,esize2021); } :vadd.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm & Dn & Dd { Dd = FloatVectorAdd(Dn,Dm,fesize2020); } :vadd.f^fesize2020 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_Q6=1 & thv_c0404=0) ) & fesize2020 & Qn & Qd & Qm { Qd = FloatVectorAdd(Qn,Qm,fesize2020); } :vpadd.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=11 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=11 & thv_Q6=0 & thv_c0404=1)) & esize2021 & Dn & Dd & Dm { Dd = VectorPairwiseAdd(Dn,Dm,esize2021); } :vpadd.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021<3 & c0811=11 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=11 & thv_Q6=1 & thv_c0404=1) ) & esize2021 & Qm & Qn & Qd { Qd = VectorPairwiseAdd(Qn,Qm,esize2021); } :vpadd.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=0 & c0811=13 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm& Dn & Dd { Dd = FloatVectorPairwiseAdd(Dn,Dm,fesize2020:1); } :vpmax.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=10 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=10 & thv_Q6=0 & thv_c0404=0)) & udt & esize2021 & Dn & Dd & Dm { Dd = VectorPairwiseMax(Dn,Dm,esize2021,udt); } :vpmax.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=0 & c0811=15 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm & Dn & Dd { Dd = FloatVectorPairwiseMax(Dn,Dm,fesize2020:1); } :vpmin.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=10 & Q6=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=10 & thv_Q6=0 & thv_c0404=1)) & udt & esize2021 & Dn & Dd & Dm { Dd = VectorPairwiseMin(Dn,Dm,esize2021,udt); } :vpmin.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=1 & c0811=15 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm & Dn & Dd { Dd = FloatVectorPairwiseMin(Dn,Dm,fesize2020); } :vpadal.^udt7^esize1819 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=6 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=6 & thv_Q6=0 & thv_c0404=0)) & udt7 & esize1819 & Dd & Dm { Dd = VectorPairwiseAddAccumulateLong(Dm,esize1819); } :vpadal.^udt7^esize1819 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=6 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=6 & thv_Q6=1 & thv_c0404=0)) & udt7 & esize1819 & Qd & Qm { Qd = VectorPairwiseAddAccumulateLong(Qm,esize1819); } :vpaddl.^udt7^esize1819 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=2 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=2 & thv_Q6=0 & thv_c0404=0)) & udt7 & esize1819 & Dd & Dm { Dd = VectorPairwiseAddLong(Dm,esize1819); } :vpaddl.^udt7^esize1819 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=2 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=2 & thv_Q6=1 & thv_c0404=0)) & udt7 & esize1819 & Qd & Qm { Qd = VectorPairwiseAddLong(Qm,esize1819); } @endif # SIMD @if defined(VFPv2) || defined(VFPv3) :vadd^COND^".f16" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=3 & c0811=9 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=3 & thv_c0811=9 & thv_c0606=0 & thv_c0404=0) ) & COND & Sm & Sd & Sn { build COND; build Sd; build Sm; build Sn; local result:2 = Sn(0) f+ Sm(0); Sd = zext(result); } :vadd^COND^".f32" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=3 & c0811=10 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=3 & thv_c0811=10 & thv_c0606=0 & thv_c0404=0) ) & COND & Sm & Sd & Sn { build COND; build Sd; build Sm; build Sn; Sd = Sn f+ Sm; } :vadd^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1c & c2021=3 & c0811=11 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=3 & thv_c0811=11 & thv_c0606=0 & thv_c0404=0) ) & COND & Dm & Dd & Dn { build COND; build Dd; build Dm; build Dn; Dd = Dn f+ Dm; } @endif # VFPv2 || VFPv3 define pcodeop VectorAddReturnHigh; define pcodeop VectorBitwiseInsertIfFalse; define pcodeop VectorBitwiseInsertIfTrue; define pcodeop VectorBitwiseSelect; define pcodeop VectorCompareEqual; define pcodeop FloatVectorCompareEqual; define pcodeop VectorCompareGreaterThanOrEqual; define pcodeop FloatVectorCompareGreaterThanOrEqual; define pcodeop VectorCompareGreaterThan; define pcodeop FloatVectorCompareGreaterThan; define pcodeop VectorCountLeadingSignBits; define pcodeop VectorCountLeadingZeros; define pcodeop VectorComplexAdd; define pcodeop VectorComplexMultiplyAccumulate; define pcodeop VectorComplexMultiplyAccumulateByElement; @if defined(SIMD) :vaddhn.i^esize2021x2 Dd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c2021<3 & c0811=4 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=4 & thv_c0606=0 & thv_c0404=0) ) & esize2021x2 & Qn & Dd & Qm { Dd = VectorAddReturnHigh(Qn,Qm,esize2021x2); } :vaddl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=0 & c0606=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=0 & thv_c0606=0 & thv_c0404=0) ) & esize2021 & udt & Dn & Qd & Dm { Qd = VectorAdd(Dn,Dm,esize2021,udt); } :vaddw.^udt^esize2021 Qd,Qn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=1 & c0606=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=1 & thv_c0606=0 & thv_c0404=0) ) & esize2021 & udt & Qn & Qd & Dm { Qd = VectorAdd(Qn,Dm,esize2021,udt); } :vand Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=0 & c0811=1 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm { Dd = Dn & Dm; } :vand Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=0 & c0811=1 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qn & Qd & Qm { Qd = Qn & Qm; } :vbic.i32 Dd,simdExpImm_8 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1111=0 & c0808=1 & c0407=3 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1111=0 & thv_c0808=1 & thv_c0407=3) ) & Dd & simdExpImm_8 { Dd = Dd & ~simdExpImm_8; } :vbic.i32 Qd,simdExpImm_16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1111=0 & c0808=1 & c0407=7 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1111=0 & thv_c0808=1 & thv_c0407=7) ) & Qd & simdExpImm_16 { Qd = Qd & ~simdExpImm_16; } :vbic.i16 Dd,simdExpImm_8 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=2 & c0808=1 & c0407=3 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011=2 & thv_c0808=1 & thv_c0407=3) ) & Dd & simdExpImm_8 { Dd = Dd & ~simdExpImm_8; } :vbic.i16 Qd,simdExpImm_16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=2 & c0808=1 & c0407=7 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011=2 & thv_c0808=1 & thv_c0407=7) ) & Qd & simdExpImm_16 { Qd = Qd & ~simdExpImm_16; } :vbic Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=1 & c0811=1 & Q6=0 & c0404=1 ) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=1 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1) ) & Dm & Dn & Dd { Dd = Dn & ~Dm; } :vbic Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=1 & c0811=1 & Q6=1 & c0404=1 ) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=1 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1) ) & Qm & Qn & Qd { Qd = Qn & ~Qm; } :vbif Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=3 & c0811=1 & Q6=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=3 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1) ) & Dm & Dn & Dd { Dd = VectorBitwiseInsertIfFalse(Dd,Dn,Dm); } :vbif Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=3 & c0811=1 & Q6=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=3 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qm & Qn & Qd { Qd = VectorBitwiseInsertIfFalse(Qd,Qn,Qm); } :vbit Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=2 & c0811=1 & Q6=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dm & Dn & Dd { Dd = VectorBitwiseInsertIfTrue(Dd,Dn,Dm); } :vbit Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=2 & c0811=1 & Q6=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qm & Qn & Qd { Qd = VectorBitwiseInsertIfTrue(Qd,Qn,Qm); } :vbsl Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=1 & c0811=1 & Q6=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=1 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dm & Dn & Dd { Dd = VectorBitwiseSelect(Dd,Dn,Dm); } :vbsl Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=1 & c0811=1 & Q6=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=1 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qm & Qn & Qd { Qd = VectorBitwiseSelect(Qd,Qn,Qm); } crot2424: "#"^90 is ($(AMODE) & c2424=0 ) | (TMode=1 & thv_c2424=0) { local tmp:4 = 90; export tmp; } crot2424: "#"^270 is ($(AMODE) & c2424=1 ) | (TMode=1 & thv_c2424=1) { local tmp:4 = 270; export tmp; } :vcadd.f^fesize2020 Dd,Dn,Dm,crot2424 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=6 & c2323=1 & c2121=0 & c0811=8 & Q6=0 & c0404=1 ) | ($(TMODE_F) & thv_c2527=6 & thv_c2323=1 & thv_c2121=0 & thv_c0811=8 & thv_Q6=0 & thv_c0404=1)) & crot2424 & fesize2020 & Dm & Dn & Dd { Dd = VectorComplexAdd(Dd,Dn,Dm,crot2424,fesize2020); } :vcadd.f^fesize2020 Qd,Qn,Qm,crot2424 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=6 & c2323=1 & c2121=0 & c0811=8 & Q6=1 & c0404=1 )| ($(TMODE_F) & thv_c2527=6 & thv_c2323=1 & thv_c2021=0 & thv_c0811=8 & thv_Q6=1 & thv_c0404=1)) & crot2424 & fesize2020 & Qm & Qn & Qd { Qd = VectorComplexAdd(Qd,Qn,Qm,crot2424,fesize2020); } :vceq.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021<3 & c0811=8 & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=8 & thv_Q6=0 & thv_c0404=1) ) & esize2021 & Dm & Dn & Dd { Dd = VectorCompareEqual(Dn,Dm,esize2021); } :vceq.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021<3 & c0811=8 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=8 & thv_Q6=1 & thv_c0404=1) ) & esize2021 & Qm & Qn & Qd { Qd = VectorCompareEqual(Qn,Qm,esize2021); } :vceq.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=14 & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=14 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm & Dn & Dd { Dd = FloatVectorCompareEqual(Dn,Dm,fesize2020); } :vceq.f^fesize2020 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=14 & Q6=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=14 & thv_Q6=1 & thv_c0404=0) ) & fesize2020 & Qm & Qn & Qd { Qd = FloatVectorCompareEqual(Qn,Qm,fesize2020); } :vceq.i^esize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=2 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=2 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dm & Dd & zero { Dd = VectorCompareEqual(Dm,zero,esize1819); } :vceq.i^esize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=2 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=2 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd & zero { Qd = VectorCompareEqual(Qm,zero,esize1819); } :vceq.f^fesize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=10 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=10 & thv_Q6=0 & thv_c0404=0) ) & fesize1819 & Dm & Dd & zero { Dd = FloatVectorCompareEqual(Dm,zero,fesize1819); } :vceq.f^fesize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=1 & c0711=10 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=1 & thv_c0711=10 & thv_Q6=1 & thv_c0404=0) ) & fesize1819 & Qm & Qd & zero { Qd = FloatVectorCompareEqual(Qm,zero,fesize1819); } :vcge.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=3 & thv_Q6=0 & thv_c0404=1) ) & udt & esize2021 & Dm & Dn & Dd { Dd = VectorCompareGreaterThanOrEqual(Dn,Dm,esize2021,udt); } :vcge.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=3 & thv_Q6=1 & thv_c0404=1) ) & udt & esize2021 & Qm & Qn & Qd { Qd = VectorCompareGreaterThanOrEqual(Qn,Qm,esize2021,udt); } :vcge.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=0 & c0811=14 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=14 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm & Dn & Dd { Dd = FloatVectorCompareGreaterThanOrEqual(Dn,Dm,2:1,32:1); } :vcge.f^fesize2020 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=0 & c0811=14 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=14 & thv_Q6=1 & thv_c0404=0) ) & fesize2020 & Qm & Qn & Qd { Qd = FloatVectorCompareGreaterThanOrEqual(Qn,Qm,2:1,32:1); } :vcge.s^esize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=1 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=1 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dm & Dd & zero { Dd = VectorCompareGreaterThanOrEqual(Dm,zero,esize1819); } :vcge.s^esize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=1 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=1 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd & zero { Qd = VectorCompareGreaterThanOrEqual(Qm,zero,esize1819); } :vcge.f^fesize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=9 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=9 & thv_Q6=0 & thv_c0404=0) ) & fesize1819 & Dm & Dd & zero { Dd = FloatVectorCompareGreaterThanOrEqual(Dm,zero,fesize1819); } :vcge.f^fesize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=9 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=9 & thv_Q6=1 & thv_c0404=0) ) & fesize1819 & Qm & Qd & zero { Qd = FloatVectorCompareGreaterThanOrEqual(Qm,zero,fesize1819); } :vcgt.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=3 & thv_Q6=0 & thv_c0404=0) ) & udt & esize2021 & Dm & Dn & Dd { Dd = VectorCompareGreaterThan(Dn,Dm,esize2021); } :vcgt.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=3 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=3 & thv_Q6=1 & thv_c0404=0) ) & udt & esize2021 & Qm & Qn & Qd { Qd = VectorCompareGreaterThan(Qn,Qm,esize2021); } :vcgt.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=1 & c0811=14 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=14 & thv_Q6=0 & thv_c0404=0) ) & fesize2020 & Dm & Dn & Dd { Dd = FloatVectorCompareGreaterThan(Dn,Dm,fesize2020); } :vcgt.f^fesize2020 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2121=1 & c0811=14 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=14 & thv_Q6=1 & thv_c0404=0) ) & fesize2020 & Qm & Qn & Qd { Qd = FloatVectorCompareGreaterThan(Qn,Qm,fesize2020); } :vcgt.i^esize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=0 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=0 & thv_Q6=0 & thv_c0404=0 ) ) & esize1819 & Dd & Dm & zero { Dd = VectorCompareGreaterThan(Dm,zero,esize1819); } :vcgt.i^esize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=0 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=0 & thv_Q6=1 & thv_c0404=0 ) ) & esize1819 & Qd & Qm & zero { Qd = VectorCompareGreaterThan(Qm,zero,esize1819); } :vcgt.f^fesize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=8 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=8 & thv_Q6=0 & thv_c0404=0 ) ) & fesize1819 & Dd & Dm & zero { Dd = FloatVectorCompareGreaterThan(Dm,zero,fesize1819); } :vcgt.f^fesize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=8 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=8 & thv_Q6=1 & thv_c0404=0 ) ) & fesize1819 & Qd & Qm & zero { Qd = FloatVectorCompareGreaterThan(Qm,zero,fesize1819); } :vcle.s^esize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=3 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=3 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dd & Dm & zero { Dd = VectorCompareGreaterThanOrEqual(zero,Dm,esize1819); } :vcle.s^esize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=3 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=3 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qd & Qm & zero { Qd = VectorCompareGreaterThanOrEqual(zero,Qm,esize1819); } :vcle.f^fesize1819 Dd,Dm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=0xb & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=0xb & thv_Q6=0 & thv_c0404=0) ) & fesize1819 & Dd & Dm & zero { Dd = FloatVectorCompareGreaterThanOrEqual(zero,Dm,fesize1819); } :vcle.f^fesize1819 Qd,Qm,zero is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=0xb & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=0xb & thv_Q6=1 & thv_c0404=0) ) & fesize1819 & Qd & Qm & zero { Qd = FloatVectorCompareGreaterThanOrEqual(zero,Qm,fesize1819); } :vcls.s^esize1819 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=8 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=0 & thv_c0711=8 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dd & Dm { Dd = VectorCountLeadingSignBits(Dm,esize1819); } :vcls.s^esize1819 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=8 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=0 & thv_c0711=8 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qd & Qm { Qd = VectorCountLeadingSignBits(Qm,esize1819); } :vclt.s^esize1819 Dd,Dm,zero is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=4 & Q6=0 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=4 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dm & Dd & zero { Dd = VectorCompareGreaterThan(zero,Dm,esize1819); } :vclt.s^esize1819 Qd,Qm,zero is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=4 & Q6=1 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=4 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd & zero { Qd = VectorCompareGreaterThan(zero,Qm,esize1819); } :vclt.f^fesize1819 Dd,Dm,zero is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=12 & Q6=0 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=12 & thv_Q6=0 & thv_c0404=0) ) & fesize1819 & Dm & Dd & zero { Dd = FloatVectorCompareGreaterThan(zero,Dm,fesize1819); } :vclt.f^fesize1819 Qd,Qm,zero is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & (c1819=1 | c1819=2) & c1617=1 & c0711=12 & Q6=1 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & (thv_c1819=1 | thv_c1819=2) & thv_c1617=1 & thv_c0711=12 & thv_Q6=1 & thv_c0404=0) ) & fesize1819 & Qm & Qd & zero { Qd = FloatVectorCompareGreaterThan(zero,Qm,fesize1819); } :vclz.i^esize1819 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=9 & Q6=0 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=0 & thv_c0711=9 & thv_Q6=0 & thv_c0404=0) ) & esize1819 & Dd & Dm { Dd = VectorCountLeadingZeros(Dm,esize1819); } :vclz.i^esize1819 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=0 & c0711=9 & Q6=1 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=0 & thv_c0711=9 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qd & Qm { Qd = VectorCountLeadingZeros(Qm,esize1819); } crot2324: "#"^0 is ($(AMODE) & c2324=0 ) | (TMode=1 & thv_c2324=0) { local tmp:4 = 0; export tmp; } crot2324: "#"^90 is ($(AMODE) & c2324=1 ) | (TMode=1 & thv_c2324=1) { local tmp:4 = 90; export tmp; } crot2324: "#"^180 is ($(AMODE) & c2324=2 ) | (TMode=1 & thv_c2324=2) { local tmp:4 = 180; export tmp; } crot2324: "#"^270 is ($(AMODE) & c2324=3 ) | (TMode=1 & thv_c2324=3) { local tmp:4 = 270; export tmp; } crot2021: "#"^0 is ($(AMODE) & c2021=0 ) | (TMode=1 & thv_c2021=0) { local tmp:4 = 0; export tmp; } crot2021: "#"^90 is ($(AMODE) & c2021=1 ) | (TMode=1 & thv_c2021=1) { local tmp:4 = 90; export tmp; } crot2021: "#"^180 is ($(AMODE) & c2021=2 ) | (TMode=1 & thv_c2021=2) { local tmp:4 = 180; export tmp; } crot2021: "#"^270 is ($(AMODE) & c2021=3 ) | (TMode=1 & thv_c2021=3) { local tmp:4 = 270; export tmp; } :vcmla.f^fesize2020 Dd,Dn,Dm,crot2324 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=6 & c2121=1 & c0811=8 & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2527=6 & thv_c2121=1 & thv_c0811=8 & thv_Q6=0 & thv_c0404=0)) & crot2324 & fesize2020 & Dm & Dn & Dd { Dd = VectorComplexMultiplyAccumulate(Dd,Dn,Dm,crot2324,fesize2020); } :vcmla.f^fesize2020 Qd,Qn,Qm,crot2324 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=6 & c2121=1 & c0811=8 & Q6=1 & c0404=0 )| ($(TMODE_F) & thv_c2527=6 & thv_c2021=1 & thv_c0811=8 & thv_Q6=1 & thv_c0404=0)) & crot2324 & fesize2020 & Qm & Qn & Qd { Qd = VectorComplexMultiplyAccumulate(Qd,Qn,Qm,crot2324,fesize2020); } :vcmla.f^fesize2323 Dd,Dn,Dm,crot2021 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=7 & c2424=0 & c0811=8 & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2527=7 & thv_c2424=0 & thv_c0811=8 & thv_Q6=0 & thv_c0404=0)) & crot2021 & fesize2323 & Dm & Dn & Dd { Dd = VectorComplexMultiplyAccumulateByElement(Dd,Dn,Dm,crot2021,fesize2323); } :vcmla.f^fesize2323 Qd,Qn,Qm,crot2021 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=7 & c2424=0 & c0811=8 & Q6=1 & c0404=0 )| ($(TMODE_F) & thv_c2527=7 & thv_c2424=0 & thv_c0811=8 & thv_Q6=1 & thv_c0404=0)) & crot2021 & fesize2323 & Qm & Qn & Qd { Qd = VectorComplexMultiplyAccumulateByElement(Qd,Qn,Qm,crot2021,fesize2323); } @endif # SIMD # set float register flags correctly for comparison macro FloatVectorCompare(op1,op2,nanx) { local tNG = op1 f< op2; local tZR = op1 f== op2; local tCY = op2 f<= op1; tOV:1 = nan(op1) | nan(op2); # this is really a comparison with NAN and may also raise an exception when NAN fpscr = (fpscr & 0x0fffffff) | (zext(tNG) << 31) | (zext(tZR) << 30) | (zext(tCY) << 29) | (zext(tOV) << 28); } @if defined(VFPv2) || defined(VFPv3) nanx: "e" is c0707=1 { export 1:1; } nanx: is c0707=0 { export 0:1; } :vcmp^nanx^COND^".f16" Sd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=4 & c0811=0b1001 & c0606=1 & c0404=0) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=4 & thv_c0811=0b1001 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & nanx & Sm { build COND; build Sd; build Sm; local sm16:2 = Sm(0); local sd16:2 = Sd(0); FloatVectorCompare(sd16,sm16,nanx); } :vcmp^nanx^COND^".f32" Sd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=4 & c0811=0b1010 & c0606=1 & c0404=0) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=4 & thv_c0811=0b1010 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & nanx & Sm { build COND; build Sd; build Sm; FloatVectorCompare(Sd,Sm,nanx); } :vcmp^nanx^COND^".f64" Dd,Dm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=4 & c0811=0b1011 & c0606=1 & c0404=0) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=4 & thv_c0811=0b1011 & thv_c0606=1 & thv_c0404=0) ) & COND & Dd & nanx & Dm { build COND; build Dd; build Dm; FloatVectorCompare(Dd,Dm,nanx); } :vcmp^nanx^COND^".f16" Sd,zero is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=5 & c0811=0b1001 & c0006=0b1000000 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=5 & thv_c0811=0b1001 & thv_c0006=0b1000000 ) ) & COND & Sd & nanx & zero { build COND; build Sd; local Zero:2 = 0; local sd16:2 = Sd(0); FloatVectorCompare(sd16,Zero,nanx); } :vcmp^nanx^COND^".f32" Sd,zero is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=5 & c0811=0b1010 & c0006=0b1000000 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=5 & thv_c0811=0b1010 & thv_c0006=0b1000000 ) ) & COND & Sd & nanx & zero { build COND; build Sd; local Zero:4 = 0; FloatVectorCompare(Sd,Zero,nanx); } :vcmp^nanx^COND^".f64" Dd,zero is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=5 & c0811=0b1011 & c0006=0b1000000 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=5 & thv_c0811=0b1011 & thv_c0006=0b1000000 ) ) & COND & Dd & nanx & zero { build COND; build Dd; local Zero:8 = 0; FloatVectorCompare(Dd,Zero,nanx); } @endif # VFPv2 || VFPv3 define pcodeop VectorCountOneBits; @ifndef VERSION_8 #second arg to conversion function indicates rounding mode (see RMODE bits of FPSCR) define pcodeop VectorFloatToSigned; define pcodeop VectorFloatToUnsigned; define pcodeop VectorSignedToFloat; define pcodeop VectorUnsignedToFloat; @endif # VERSION_8 @if defined(SIMD) ####### # VCVT (between floating-point and integer, Advanced SIMD) # :vcnt.8 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1619=0 & c0711=10 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0 & thv_c0711=10 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm { Dd = VectorCountOneBits(Dm,8:1,8:1); } :vcnt.8 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819=0 & c1617=0 & c0711=10 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0 & thv_c0711=10 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm { Qd = VectorCountOneBits(Qm,8:1,8:1); } @ifndef VERSION_8 :vcvt.s16.f16 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=2 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=2 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorFloatToSigned(Dm,3:1); } :vcvt.u16.f16 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=3 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=3 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorFloatToUnsigned(Dm,0:1); } :vcvt.f16.s16 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=0 & Q6=0 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=0 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorSignedToFloat(Dm,0:1); } :vcvt.f16.u16 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=1 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=1 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorUnsignedToFloat(Dm,0:1); } :vcvt.s32.f32 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=2 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=2 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorFloatToSigned(Dm,3:1); } :vcvt.u32.f32 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=3 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=3 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorFloatToUnsigned(Dm,3:1); } :vcvt.f32.s32 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=0 & Q6=0 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=0 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorSignedToFloat(Dm,0:1); } :vcvt.f32.u32 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=1 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=1 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { Dd = VectorUnsignedToFloat(Dm,0:1); } :vcvt.s16.f16 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=2 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=2 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorFloatToSigned(Qm,6:1); } :vcvt.u16.f16 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=3 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=3 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorFloatToUnsigned(Qm,7:1); } :vcvt.f16.s16 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=0 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=0 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorSignedToFloat(Qm,4:1); } :vcvt.f16.u16 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x37 & c0911=3 & c0708=1 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x37 & thv_c0911=3 & thv_c0708=1 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorUnsignedToFloat(Qm,5:1); } :vcvt.s32.f32 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=2 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=2 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorFloatToSigned(Qm,10:1); } :vcvt.u32.f32 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=3 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=3 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorFloatToUnsigned(Qm,11:1); } :vcvt.f32.s32 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=0 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=0 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorSignedToFloat(Qm,8:1); } :vcvt.f32.u32 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x3b & c0911=3 & c0708=1 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x3b & thv_c0911=3 & thv_c0708=1 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { Qd = VectorUnsignedToFloat(Qm,9:1); } @endif # ! VERSION_8 @endif # SIMD @if defined(VFPv2) || defined(VFPv3) @ifndef VERSION_8 ####### # VCVT (between floating-point and integer, VFP) # roundMode: "r" is TMode=0 & c0707=0 { tmp:1 = $(FPSCR_RMODE); export tmp; } roundMode: is TMode=0 & c0707=1 { export 3:1; } # Round towards zero roundMode: "r" is TMode=1 & thv_c0707=0 { tmp:1 = $(FPSCR_RMODE); export tmp; } roundMode: is TMode=1 & thv_c0707=1 { export 3:1; } # Round towards zero :vcvt^roundMode^COND^".s32.f16" Sd,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=5 & c0911=4 & c0808=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=5 & thv_c0911=4 & thv_c0808=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sm & roundMode { build COND; build Sd; build Sm; build roundMode; local Sm16:2 = Sm(0); Sd = trunc(Sm16);#VectorFloatToSigned(Sm16,roundMode); } :vcvt^roundMode^COND^".s32.f32" Sd,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=5 & c0911=5 & c0808=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=5 & thv_c0911=5 & thv_c0808=0 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sm & roundMode { build COND; build Sd; build Sm; build roundMode; Sd = trunc(Sm);#VectorFloatToSigned(Sm16,roundMode); } :vcvt^roundMode^COND^".s32.f64" Sd,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=5 & c0911=5 & c0808=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=5 & thv_c0911=5 & thv_c0808=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & roundMode & Dm { build COND; build Sd; build Dm; build roundMode; Sd = VectorFloatToSigned(Dm,roundMode); } :vcvt^roundMode^COND^".u32.f16" Sd,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=4 & c0911=4 & c0808=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=4 & thv_c0911=4 & thv_c0808=1 & thv_c0606=1 & thv_c0404=0) ) & COND & roundMode & Sd & Sm { build COND; build Sd; build Sm; build roundMode; local Sm16:2 = Sm(0); Sd = VectorFloatToUnsigned(Sm16,roundMode); } :vcvt^roundMode^COND^".u32.f32" Sd,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=4 & c0911=5 & c0808=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=4 & thv_c0911=5 & thv_c0808=0 & thv_c0606=1 & thv_c0404=0) ) & COND & roundMode & Sd & Sm { build COND; build Sd; build Sm; build roundMode; Sd = VectorFloatToUnsigned(Sm,roundMode); } :vcvt^roundMode^COND^".u32.f64" Sd,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=4 & c0911=5 & c0808=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=4 & thv_c0911=5 & thv_c0808=1 & thv_c0606=1 & thv_c0404=0)) & COND & roundMode & Sd & Dm { build COND; build Sd; build Dm; build roundMode; Sd = VectorFloatToUnsigned(Dm,roundMode); } :vcvt^COND^".f16.s32" Sd,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=0 & c0911=4 & c0808=1 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=0 & thv_c0911=4 & thv_c0808=1 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sm { build COND; build Sd; build Sm; local mode:1 = $(FPSCR_RMODE); local Sm16:2 = Sm(0); Sd = VectorSignedToFloat(Sm16,mode); } :vcvt^COND^".f16.u32" Sd,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=0 & c0911=4 & c0808=1 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=0 & thv_c0911=4 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sm { build COND; build Sd; build Sm; local mode:1 = $(FPSCR_RMODE); local Sm16:2 = Sm(0); Sd = VectorUnsignedToFloat(Sm16,mode); } :vcvt^COND^".f64.s32" Dd,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=0 & c0911=5 & c0808=1 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=0 & thv_c0911=5 & thv_c0808=1 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Dd & Sm { build COND; build Dd; build Sm; mode:1 = $(FPSCR_RMODE); Dd = VectorSignedToFloat(Sm,mode); } :vcvt^COND^".f32.s32" Sd,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=0 & c0911=5 & c0808=0 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=0 & thv_c0911=5 & thv_c0808=0 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sm { build COND; build Sd; build Sm; mode:1 = $(FPSCR_RMODE); Sd = VectorSignedToFloat(Sm,mode); } :vcvt^COND^".f32.u32" Sd,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=0 & c0911=5 & c0808=0 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=0 & thv_c0911=5 & thv_c0808=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sm { build COND; build Sd; build Sm; mode:1 = $(FPSCR_RMODE); Sd = VectorUnsignedToFloat(Sm,mode); } :vcvt^COND^".f64.u32" Dd,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1618=0 & c0911=5 & c0808=1 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1618=0 & thv_c0911=5 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & COND & Dd & Sm { build COND; build Dd; build Sm; mode:1 = $(FPSCR_RMODE); Dd = VectorUnsignedToFloat(Sm,mode); } @endif # ! VERSION_8 @endif # VFPv2 || VFPv3 @if defined(SIMD) @ifndef VERSION_8 define pcodeop VectorFloatToSignedFixed; define pcodeop VectorFloatToUnsignedFixed; define pcodeop VectorSignedFixedToFloat; define pcodeop VectorUnsignedFixedToFloat; ####### # VCVT (between floating-point and fixed-point, Advanced SIMD) # fbits: "#"val is TMode=0 & c1621 [ val = 64 - c1621; ] { tmp:1 = val; export tmp; } fbits: "#"val is TMode=1 & thv_c1621 [ val = 64 - thv_c1621; ] { tmp:1 = val; export tmp; } :vcvt.s16.f16 Dd,Dm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=6 & c0808=1 & c0707=0 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=6 & thv_c0808=1 & thv_c0707=0 & thv_c0606=0 & thv_c0404=1) ) & fbits & Dd & Dm { Dd = VectorFloatToSignedFixed(Dm,fbits); } :vcvt.u16.f16 Dd,Dm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=6 & c0808=1 & c0707=0 & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=6 & thv_c0808=1 & thv_c0707=0 & thv_c0606=0 & thv_c0404=1) ) & fbits & Dd & Dm { Dd = VectorFloatToUnsignedFixed(Dm,fbits); } :vcvt.f16.s16 Dd,Dm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=6 & c0808=0 & c0707=0 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=6 & thv_c0808=0 & thv_c0707=0 & thv_c0606=0 & thv_c0404=1) ) & fbits & Dd & Dm { Dd = VectorSignedFixedToFloat(Dm,fbits); } :vcvt.f16.u16 Dd,Dm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=6 & c0808=0 & c0707=0 & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=6 & thv_c0808=0 & thv_c0707=0 & thv_c0606=0 & thv_c0404=1) ) & fbits & Dd & Dm { Dd = VectorUnsignedFixedToFloat(Dm,fbits); } :vcvt.s16.f16 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=6 & c0808=1 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=6 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorFloatToSignedFixed(Qm,fbits); } :vcvt.u16.f16 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=6 & c0808=1 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=6 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorFloatToUnsignedFixed(Qm,fbits); } :vcvt.f16.s16 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=0 & c0808=0 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=0 & thv_c0808=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorSignedFixedToFloat(Qm,fbits); } :vcvt.f16.u16 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=0 & c0808=0 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=0 & thv_c0808=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorUnsignedFixedToFloat(Qm,fbits); } :vcvt.f32.s32 Dd,Dm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=7 & c0808=0 & c0707=0 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=7 & thv_c0808=0 & thv_c0707=0 & thv_c0606=0 & thv_c0404=1) ) & fbits & Dd & Dm { Dd = VectorSignedFixedToFloat(Dm,fbits); } :vcvt.f32.u32 Dd,Dm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=7 & c0808=0 & c0707=0 & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=7 & thv_c0808=0 & thv_c0707=0 & thv_c0606=0 & thv_c0404=1) ) & fbits & Dd & Dm { Dd = VectorUnsignedFixedToFloat(Dm,fbits); } :vcvt.s32.f32 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=7 & c0808=1 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=7 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorFloatToSignedFixed(Qm,fbits); } :vcvt.u32.f32 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=7 & c0808=1 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=7 & thv_c0808=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorFloatToUnsignedFixed(Qm,fbits); } :vcvt.f32.s32 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2121=1 & c0911=7 & c0808=0 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=7 & thv_c0808=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorSignedFixedToFloat(Qm,fbits); } :vcvt.f32.u32 Qd,Qm,fbits is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2121=1 & c0911=7 & c0808=0 & c0707=0 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2121=1 & thv_c0911=7 & thv_c0808=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & fbits & Qd & Qm { Qd = VectorUnsignedFixedToFloat(Qm,fbits); } @endif # ! VERSION_8 @endif # SIMD @if defined(VFPv3) @ifndef VERSION_8 ####### # VCVT (between floating-point and fixed-point, VFP) # fbits16: "#"^val is TMode=0 & c0505 & c0003 [ val = 16 - ((c0003 << 1) + c0505); ] { tmp:1 = val; export tmp; } fbits32: "#"^val is TMode=0 & c0505 & c0003 [ val = 32 - ((c0003 << 1) + c0505); ] { tmp:1 = val; export tmp; } fbits16: "#"^val is TMode=1 & thv_c0505 & thv_c0003 [ val = 16 - ((thv_c0003 << 1) + thv_c0505); ] { tmp:1 = val; export tmp; } fbits32: "#"^val is TMode=1 & thv_c0505 & thv_c0003 [ val = 32 - ((thv_c0003 << 1) + thv_c0505); ] { tmp:1 = val; export tmp; } :vcvt^COND^".s16.f16" Sd,Sd2,fbits16 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=0 & c1011=2 & c0809=1 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=1 & thv_c1717=1 & thv_c1616=0 & thv_c1011=2 & thv_c0809=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0)) & COND & Sd & Sd2 & fbits16 { build COND; build Sd; build Sd2; build fbits16; Sd = VectorFloatToSignedFixed(Sd2,16:1,fbits16); } :vcvt^COND^".u16.f16" Sd,Sd2,fbits16 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=1 & c1011=2 & c0809=1 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=1 & thv_c1717=1 & thv_c1616=1 & thv_c1011=2 & thv_c0809=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sd2 & fbits16 { build COND; build Sd; build Sd2; build fbits16; Sd = VectorFloatToUnsignedFixed(Sd2,16:1,fbits16); } :vcvt^COND^".s32.f16" Sd,Sd2,fbits32 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=0 & c1011=2 & c0809=1 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=1 & thv_c1717=1 & thv_c1616=0 & thv_c1011=2 & thv_c0809=1 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sd2 & fbits32 { build COND; build Sd; build Sd2; build fbits32; Sd = VectorFloatToSignedFixed(Sd2,32:1,fbits32); } :vcvt^COND^".u32.f16" Sd,Sd2,fbits32 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=1 & c1011=2 & c0809=1 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=1 & thv_c1717=1 & thv_c1616=1 & thv_c1011=2 & thv_c0809=1 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sd2 & fbits32 { build COND; build Sd; build Sd2; build fbits32; Sd = VectorFloatToUnsignedFixed(Sd2,32:1,fbits32); } :vcvt^COND^".s16.f32" Sd,Sd2,fbits16 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=0 & c1011=2 & c0809=2 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=1 & thv_c1717=1 & thv_c1616=0 & thv_c1011=2 & thv_c0809=2 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0)) & COND & Sd & Sd2 & fbits16 { build COND; build Sd; build Sd2; build fbits16; Sd = VectorFloatToSignedFixed(Sd2,16:1,fbits16); } :vcvt^COND^".u16.f32" Sd,Sd2,fbits16 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=1 & c1011=2 & c0809=2 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=1 & thv_c1717=1 & thv_c1616=1 & thv_c1011=2 & thv_c0809=2 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sd2 & fbits16 { build COND; build Sd; build Sd2; build fbits16; Sd = VectorFloatToUnsignedFixed(Sd2,16:1,fbits16); } :vcvt^COND^".s32.f32" Sd,Sd2,fbits32 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=0 & c1011=2 & c0809=2 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=1 & thv_c1717=1 & thv_c1616=0 & thv_c1011=2 & thv_c0809=2 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sd2 & fbits32 { build COND; build Sd; build Sd2; build fbits32; Sd = VectorFloatToSignedFixed(Sd2,32:1,fbits32); } :vcvt^COND^".u32.f32" Sd,Sd2,fbits32 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=1 & c1011=2 & c0809=2 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=1 & thv_c1717=1 & thv_c1616=1 & thv_c1011=2 & thv_c0809=2 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sd2 & fbits32 { build COND; build Sd; build Sd2; build fbits32; Sd = VectorFloatToUnsignedFixed(Sd2,32:1,fbits32); } :vcvt^COND^".s16.f64" Dd,Dd2,fbits16 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=0 & c1011=2 & c0809=3 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=1 & thv_c1717=1 & thv_c1616=0 & thv_c1011=2 & thv_c0809=3 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & COND & Dd & Dd2 & fbits16 { build COND; build Dd; build Dd2; build fbits16; Dd = VectorFloatToSignedFixed(Dd2,16:1,fbits16); } :vcvt^COND^".u16.f64" Dd,Dd2,fbits16 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=1 & c1011=2 & c0809=3 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=1 & thv_c1717=1 & thv_c1616=1 & thv_c1011=2 & thv_c0809=3 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & COND & Dd & Dd2 & fbits16 { build COND; build Dd; build Dd2; build fbits16; Dd = VectorFloatToUnsignedFixed(Dd2,16:1,fbits16); } :vcvt^COND^".s32.f64" Dd,Dd2,fbits32 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=0 & c1011=2 & c0809=3 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=1 & thv_c1717=1 & thv_c1616=0 & thv_c1011=2 & thv_c0809=3 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Dd & Dd2 & fbits32 { build COND; build Dd; build Dd2; build fbits32; Dd = VectorFloatToSignedFixed(Dd2,32:1,fbits32); } :vcvt^COND^".u32.f64" Dd,Dd2,fbits32 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=1 & c1717=1 & c1616=1 & c1011=2 & c0809=3 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=1 & thv_c1717=1 & thv_c1616=1 & thv_c1011=2 & thv_c0809=3 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Dd & Dd2 & fbits32 { build COND; build Dd; build Dd2; build fbits32; Dd = VectorFloatToUnsignedFixed(Dd2,32:1,fbits32); } :vcvt^COND^".f16.s16" Sd,Sd2,fbits16 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=0 & c1011=2 & c0809=1 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=0 & thv_c1011=2 & thv_c0809=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0)) & COND & Sd & Sd2 & fbits16 { build COND; build Sd; build Sd2; build fbits16; Sd = VectorSignedFixedToFloat(Sd2,16:1,fbits16); } :vcvt^COND^".f16.u16" Sd,Sd2,fbits16 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=1 & c1011=2 & c0809=1 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=1 & thv_c1011=2 & thv_c0809=1 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sd2 & fbits16 { build COND; build Sd; build Sd2; build fbits16; Sd = VectorFloatToUnsignedFixed(Sd2,16:1,fbits16); } :vcvt^COND^".f16.s32" Sd,Sd2,fbits32 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=0 & c1011=2 & c0809=1 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=0 & thv_c1011=2 & thv_c0809=1 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sd2 & fbits32 { build COND; build Sd; build Sd2; build fbits32; Sd = VectorSignedFixedToFloat(Sd2,32:1,fbits32); } :vcvt^COND^".f16.u32" Sd,Sd2,fbits32 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=1 & c1011=2 & c0809=1 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=1 & thv_c1011=2 & thv_c0809=1 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sd2 & fbits32 { build COND; build Sd; build Sd2; build fbits32; Sd = VectorFloatToUnsignedFixed(Sd2,32:1,fbits32); } :vcvt^COND^".f32.s16" Sd,Sd2,fbits16 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=0 & c1011=2 & c0809=2 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=0 & thv_c1011=2 & thv_c0809=2 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0)) & COND & Sd & Sd2 & fbits16 { build COND; build Sd; build Sd2; build fbits16; Sd = VectorSignedFixedToFloat(Sd2,16:1,fbits16); } :vcvt^COND^".f32.u16" Sd,Sd2,fbits16 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=1 & c1011=2 & c0809=2 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=1 & thv_c1011=2 & thv_c0809=2 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sd2 & fbits16 { build COND; build Sd; build Sd2; build fbits16; Sd = VectorFloatToUnsignedFixed(Sd2,16:1,fbits16); } :vcvt^COND^".f32.s32" Sd,Sd2,fbits32 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=0 & c1011=2 & c0809=2 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=0 & thv_c1011=2 & thv_c0809=2 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sd2 & fbits32 { build COND; build Sd; build Sd2; build fbits32; Sd = VectorSignedFixedToFloat(Sd2,32:1,fbits32); } :vcvt^COND^".f32.u32" Sd,Sd2,fbits32 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=1 & c1011=2 & c0809=2 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=1 & thv_c1011=2 & thv_c0809=2 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Sd & Sd2 & fbits32 { build COND; build Sd; build Sd2; build fbits32; Sd = VectorFloatToUnsignedFixed(Sd2,32:1,fbits32); } :vcvt^COND^".f64.s16" Dd,Dd2,fbits16 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=0 & c1011=2 & c0809=3 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=0 & thv_c1011=2 & thv_c0809=3 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & COND & Dd & Dd2 & fbits16 { build COND; build Dd; build Dd2; build fbits16; Dd = VectorSignedFixedToFloat(Dd2,16:1,fbits16); } :vcvt^COND^".f64.u16" Dd,Dd2,fbits16 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=1 & c1011=2 & c0809=3 & c0707=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=1 & thv_c1011=2 & thv_c0809=3 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0) ) & COND & Dd & Dd2 & fbits16 { build COND; build Dd; build Dd2; build fbits16; Dd = VectorFloatToUnsignedFixed(Dd2,16:1,fbits16); } :vcvt^COND^".f64.s32" Dd,Dd2,fbits32 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=0 & c1011=2 & c0809=3 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=0 & thv_c1011=2 & thv_c0809=3 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Dd & Dd2 & fbits32 { build COND; build Dd; build Dd2; build fbits32; Dd = VectorSignedFixedToFloat(Dd2,32:1,fbits32); } :vcvt^COND^".f64.u32" Dd,Dd2,fbits32 is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1921=7 & c1818=0 & c1717=1 & c1616=1 & c1011=2 & c0809=3 & c0707=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1921=7 & thv_c1818=0 & thv_c1717=1 & thv_c1616=1 & thv_c1011=2 & thv_c0809=3 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0) ) & COND & Dd & Dd2 & fbits32 { build COND; build Dd; build Dd2; build fbits32; Dd = VectorFloatToUnsignedFixed(Dd2,32:1,fbits32); } @endif # ! VERSION_8 @endif # VFPv3 define pcodeop VectorFloatDoubleToSingle; define pcodeop VectorFloatSingleToDouble; @if defined(VFPv2) || defined(VFPv3) @ifndef VERSION_8 ####### # VCVT (between double-precision and single-precision) # :vcvt^COND^".f32.f64" Sd,Dm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x37 & c0911=5 & c0808=1 & c0607=3 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x36 & thv_c0911=5 & thv_c0808=1 & thv_c0607=3 & thv_c0404=0 ) ) & COND & Sd & Dm { build COND; build Sd; build Dm; Sd = float2float(Dm); } :vcvt^COND^".f64.f32" Dd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x37 & c0911=5 & c0808=0 & c0607=3 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x36 & thv_c0911=5 & thv_c0808=0 & thv_c0607=3 & thv_c0404=0 ) ) & COND & Dd & Sm { build COND; build Dd; build Sm; Dd = float2float(Sm); } @endif # ! VERSION_8 @endif # VFPv2 || VFPv3 @if defined(SIMD) @ifndef VERSION_8 define pcodeop VectorFloatSingleToBFloat16; define pcodeop FloatSingleToBFloat16; ####### # VCVT (between single-precision and BFloat16) # :vcvt.bf16.f32 Dd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x36 & c0911=3 & c0808=0 & c0607=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x36 & thv_c0911=3 & thv_c0808=0 & thv_c0607=1 & thv_c0404=0 ) ) & Dd & Qm { Dd = VectorFloatSingleToBFloat16(Qm, 4:1, 16:1); } # VCVTB Convert Single-precision to BFloat16 in Bottom :vcvtb^COND^".bf16.f32" Sd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x33 & c0611=0x25 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x33 & thv_c0611=0x25 & thv_c0404=0 ) ) & COND & Sd & Sm { build COND; build Sd; build Sm; local w:2 = FloatSingleToBFloat16(Sm); Sd[0,16] = w; } # VCVTT Convert Single-precision to BFloat16 in Top :vcvtt^COND^".bf16.f32" Sd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x33 & c0611=0x27 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x33 & thv_c0611=0x27 & thv_c0404=0 ) ) & COND & Sd & Sm { build COND; build Sd; build Sm; w:2 = FloatSingleToBFloat16(Sm); Sd[16,16] = w; } define pcodeop VectorFloatSingleToHalf; define pcodeop VectorFloatHalfToSingle; ####### # VCVT (between half-precision and single-precision) # :vcvt.f16.f32 Dd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x36 & c0911=3 & c0808=0 & c0607=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x36 & thv_c0911=3 & thv_c0808=0 & thv_c0607=0 & thv_c0404=0 ) ) & Dd & Qm { Dd = VectorFloatSingleToHalf(Qm, 4:1, 16:1); } :vcvt.f16.f32 Qd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1621=0x36 & c0911=3 & c0808=1 & c0607=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1621=0x36 & thv_c0911=3 & thv_c0808=1 & thv_c0607=0 & thv_c0404=0 ) ) & Qd & Dm { Qd = VectorFloatHalfToSingle(Dm, 4:1, 16:1); } define pcodeop VectorFloatToSignedRound; define pcodeop VectorFloatToUnsignedRound; # VCVTA/M/N/P Vector convert floating-point to integer with Rounding :vcvt^roundType^".s16.f16" Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xb & c1011=0 & c0707=0 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xb & thv_c1011=0 & thv_c0707=0 & thv_c0606=0 & thv_c0404=0 ) ) & roundType & Dd & Dm { Dd = VectorFloatToSignedRound(Dm, 0:1, roundType); } :vcvt^roundType^".u16.f16" Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xb & c1011=0 & c0707=1 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xb & thv_c1011=0 & thv_c0707=1 & thv_c0606=0 & thv_c0404=0 ) ) & roundType & Dd & Dm { Dd = VectorFloatToUnsignedRound(Dm, 0:1, roundType); } :vcvt^roundType^".s32.f32" Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xc & c1011=0 & c0707=0 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xc & thv_c1011=0 & thv_c0707=0 & thv_c0606=0 & thv_c0404=0 ) ) & roundType & Dd & Dm { Dd = VectorFloatToSignedRound(Dm, 1:1, roundType); } :vcvt^roundType^".u32.f32" Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xc & c1011=0 & c0707=1 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xc & thv_c1011=0 & thv_c0707=1 & thv_c0606=0 & thv_c0404=0 ) ) & roundType & Dd & Dm { Dd = VectorFloatToUnsignedRound(Dm, 1:1, roundType); } :vcvt^roundType^".s16.f16" Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xb & c1011=0 & c0707=0 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xb & thv_c1011=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0 ) ) & roundType & Qd & Qm { Qd = VectorFloatToSignedRound(Qm, 0:1, roundType); } :vcvt^roundType^".u16.f16" Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xb & c1011=0 & c0707=1 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xb & thv_c1011=0 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0 ) ) & roundType & Qd & Qm { Qd = VectorFloatToUnsignedRound(Qm, 0:1, roundType); } :vcvt^roundType^".s32.f32" Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xc & c1011=0 & c0707=0 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xc & thv_c1011=0 & thv_c0707=0 & thv_c0606=1 & thv_c0404=0 ) ) & roundType & Qd & Qm { Qd = VectorFloatToSignedRound(Qm, 1:1, roundType); } :vcvt^roundType^".u32.f32" Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c1821=0xc & c1011=0 & c0707=1 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c1821=0xc & thv_c1011=0 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0 ) ) & roundType & Qd & Qm { Qd = VectorFloatToUnsignedRound(Qm, 1:1, roundType); } @endif # ! VERSION_8 @endif # SIMD @if defined(VFPv3) @ifndef VERSION_8 define pcodeop FloatToSignedRound; define pcodeop FloatToUnsignedRound; # VCVTA/M/N/P Float convert floating-point to integer with Rounding :vcvt^roundType^".s32.f16" Sd,Sm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1821=0xf & c0911=4 & c0808=1 & c0607=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1d & thv_c1821=0xf & thv_c0911=4 & thv_c0808=1 & thv_c0607=0 & thv_c0404=0 ) ) & roundType & Sd & Sm { local sm16:2 = Sm(0); Sd = FloatToSignedRound(sm16, roundType); } :vcvt^roundType^".u32.f16" Sd,Sm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1821=0xf & c0911=4 & c0808=1 & c0607=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1d & thv_c1821=0xf & thv_c0911=4 & thv_c0808=1 & thv_c0607=1 & thv_c0404=0 ) ) & roundType & Sd & Sm { local sm16:2 = Sm(0); Sd = FloatToUnsignedRound(sm16, roundType); } :vcvt^roundType^".s32.f32" Sd,Sm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1821=0xf & c0911=4 & c0808=0 & c0607=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1d & thv_c1821=0xf & thv_c0911=4 & thv_c0808=0 & thv_c0607=0 & thv_c0404=0 ) ) & roundType & Sd & Sm { Sd = FloatToSignedRound(Sm, roundType); } :vcvt^roundType^".u32.f32" Sd,Sm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1821=0xf & c0911=4 & c0808=0 & c0607=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1d & thv_c1821=0xf & thv_c0911=4 & thv_c0808=0 & thv_c0607=1 & thv_c0404=0 ) ) & roundType & Sd & Sm { Sd = FloatToUnsignedRound(Sm, roundType); } :vcvt^roundType^".s32.f64" Sd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1821=0xf & c0911=5 & c0808=1 & c0607=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1d & thv_c1821=0xf & thv_c0911=5 & thv_c0808=1 & thv_c0607=0 & thv_c0404=0 ) ) & roundType & Sd & Dm { Sd = FloatToSignedRound(Dm, roundType); } :vcvt^roundType^".u32.f64" Sd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1821=0xf & c0911=5 & c0808=1 & c0607=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1d & thv_c1821=0xf & thv_c0911=5 & thv_c0808=1 & thv_c0607=1 & thv_c0404=0 ) ) & roundType & Sd & Dm { Sd = FloatToUnsignedRound(Dm, roundType); } # VCVTB Convert Half-precision in Bottom to Single-precision :vcvtb^COND^".f32.f16" Sd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x32 & c0611=0x29 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x32 & thv_c0611=0x29 & thv_c0404=0 ) ) & COND & Sd & Sm { build COND; build Sd; build Sm; Sd = float2float(Sm:2); } :vcvtb^COND^".f16.f32" Sd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x33 & c0611=0x29 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x33 & thv_c0611=0x29 & thv_c0404=0 ) ) & COND & Sd & Sm { build COND; build Sd; build Sm; local w:2 = float2float(Sm); Sd[0,16] = w; } :vcvtb^COND^".f64.f16" Dd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x32 & c0611=0x2d & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x32 & thv_c0611=0x2d & thv_c0404=0 ) ) & COND & Dd & Sm { build COND; build Dd; build Sm; Dd = float2float(Sm:2); } :vcvtb^COND^".f16.f64" Sd,Dm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x33 & c0611=0x2d & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x33 & thv_c0611=0x2d & thv_c0404=0 ) ) & COND & Sd & Dm { build COND; build Sd; build Dm; local w:2 = float2float(Dm); Sd[0,16] = w; } # VCVTT Convert Half-precision in Top to Single-precision :vcvtt^COND^".f32.f16" Sd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x32 & c0611=0x2b & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x32 & thv_c0611=0x2b & thv_c0404=0 ) ) & COND & Sd & Sm { build COND; build Sd; build Sm; w:2 = Sm(2); Sd = float2float(w); } :vcvtt^COND^".f16.f32" Sd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x33 & c0611=0x2b & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x33 & thv_c0611=0x2b & thv_c0404=0 ) ) & COND & Sd & Sm { build COND; build Sd; build Sm; w:2 = float2float(Sm); Sd[16,16] = w; } :vcvtt^COND^".f64.f16" Dd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x32 & c0611=0x2f & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x32 & thv_c0611=0x2f & thv_c0404=0 ) ) & COND & Dd & Sm { build COND; build Dd; build Sm; w:2 = Sm(2); Dd = float2float(w); } :vcvtt^COND^".f16.f64" Sd,Dm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x33 & c0611=0x2f & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x33 & thv_c0611=0x2f & thv_c0404=0 ) ) & COND & Sd & Dm { build COND; build Sd; build Dm; w:2 = float2float(Dm); Sd[16,16] = w; } @endif # ! VERSION_8 @endif # VFPv3 @if defined(VFPv2) || defined(VFPv3) :vdiv^COND^".f16" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c2021=0 & c0811=9 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=9 & thv_c0606=0 & thv_c0404=0 ) ) & COND & Sn & Sd & Sm { build COND; build Sd; build Sm; build Sn; local sm16:2 = Sm(0); local sn16:2 = Sn(0); Sd = zext(sn16 f/ sm16); } :vdiv^COND^".f32" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c2021=0 & c0811=10 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=10 & thv_c0606=0 & thv_c0404=0 ) ) & COND & Sn & Sd & Sm { build COND; build Sd; build Sm; build Sn; Sd = Sn f/ Sm; } :vdiv^COND^".f64" Dd,Dn,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c2021=0 & c0811=11 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=11 & thv_c0606=0 & thv_c0404=0 ) ) & COND & Dn & Dd & Dm { build COND; build Dd; build Dm; build Dn; Dd = Dn f/ Dm; } @endif # VFPv2 || VFPv3 define pcodeop VectorHalvingAdd; define pcodeop VectorHalvingSubtract; define pcodeop VectorRoundHalvingAdd; define pcodeop VectorRoundAddAndNarrow; define pcodeop VectorDotProduct; define pcodeop vectorFusedMultiplyAccumulate; define pcodeop BfloatMultiplyAccumulate; define pcodeop VectorMultiplyAddLongVector; define pcodeop VectorMultiplyAddLongScalar; @if defined(SIMD) # F6.1.79 VDOT (vector) page F6-8052 line 467006 # xfc000d00/mask=xffb00f10 NOT MATCHED BY ANY CONSTRUCTOR # b_0031=111111000.00........1101...0.... :vdot.bf16 Dd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=0 & c0811=0xd & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x18 & thv_c2021=0 & thv_c0811=0xd & thv_Q6=0 & thv_c0404=0 ) ) & Dm & Dn & Dd { Dd = VectorDotProduct(Dn,Dm); } :vdot.bf16 Qd, Qn, Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=0 & c0811=0xd & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x18 & thv_c2021=0 & thv_c0811=0xd & thv_Q6=1 & thv_c0404=0 ) ) & Qm & Qn & Qd { Qd = VectorDotProduct(Qn,Qm); } Mindex: "["^M5^"]" is TMode=0 & M5 { local idx:1 = M5:1; export idx; } Mindex: "["^thv_M5^"]" is TMode=1 & thv_M5 { local idx:1 = thv_M5:1; export idx; } :vdot.bf16 Dd, Dn, Dm0^Mindex is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=0 & c0811=0xd & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=0xd & thv_Q6=0 & thv_c0404=0 ) ) & Dm0 & Mindex & Dn & Dd { Dd = VectorDotProduct(Dn,Dm0,Mindex); } :vdot.bf16 Qd, Qn, Qm0^Mindex is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=0 & c0811=0xd & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=0xd & thv_Q6=1 & thv_c0404=0 ) ) & Qm0 & Mindex & Qn & Qd { Qd = VectorDotProduct(Qn,Qm0,Mindex); } ####### # VDUP (scalar) # vdupIndex: c1719 is TMode=0 & c1616=1 & c1719 { tmp:4 = c1719; export tmp; } vdupIndex: c1819 is TMode=0 & c1617=2 & c1819 { tmp:4 = c1819; export tmp; } vdupIndex: c1919 is TMode=0 & c1618=4 & c1919 { tmp:4 = c1919; export tmp; } vdupIndex: thv_c1719 is TMode=1 & thv_c1616=1 & thv_c1719 { tmp:4 = thv_c1719; export tmp; } vdupIndex: thv_c1819 is TMode=1 & thv_c1617=2 & thv_c1819 { tmp:4 = thv_c1819; export tmp; } vdupIndex: thv_c1919 is TMode=1 & thv_c1618=4 & thv_c1919 { tmp:4 = thv_c1919; export tmp; } vdupSize: 8 is TMode=0 & c1616=1 { } vdupSize: 16 is TMode=0 & c1617=2 { } vdupSize: 32 is TMode=0 & c1618=4 { } vdupSize: 8 is TMode=1 & thv_c1616=1 { } vdupSize: 16 is TMode=1 & thv_c1617=2 { } vdupSize: 32 is TMode=1 & thv_c1618=4 { } vdupDm: Dm^"["^vdupIndex^"]" is Dm & vdupIndex & ((TMode=0 & c1616=1) | (TMode=1 & thv_c1616=1)) { ptr:4 = &Dm + vdupIndex; val:8 = 0; replicate1to8(*[register]:1 ptr, val); export val; } vdupDm: Dm^"["^vdupIndex^"]" is Dm & vdupIndex & ((TMode=0 & c1617=2) | (TMode=1 & thv_c1617=2)) { ptr:4 = &Dm + (2 * vdupIndex); val:8 = 0; replicate2to8(*[register]:2 ptr, val); export val; } vdupDm: Dm^"["^vdupIndex^"]" is Dm & vdupIndex & ((TMode=0 & c1618=4) | (TMode=1 & thv_c1618=4)) { ptr:4 = &Dm + (4 * vdupIndex); val:8 = 0; replicate4to8(*[register]:4 ptr, val); export val; } vdupDm16: vdupDm is vdupDm { val:16 = zext(vdupDm); val = val | (val << 64); export val; } :vdup.^vdupSize Dd,vdupDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & vdupSize & c0711=0x18 & Q6=0 & c0404=0 ) | ($(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c0711=0x18 & thv_Q6=0 & thv_c0404=0 ) ) & Dd & vdupDm { Dd = vdupDm; } :vdup.^vdupSize Qd,vdupDm16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & vdupSize & c0711=0x18 & Q6=1 & c0404=0 ) | ($(TMODE_F) &thv_c2327=0x1f & thv_c2021=3 & thv_c0711=0x18 & thv_Q6=1 & thv_c0404=0) ) & Qd & vdupDm16 { Qd = vdupDm16; } ####### # VDUP (ARM core register) # vdupSize2: 8 is TMode=0 & c2222=1 & c0505=0 { } vdupSize2: 16 is TMode=0 & c2222=0 & c0505=1 { } vdupSize2: 32 is TMode=0 & c2222=0 & c0505=0 { } vdupSize2: 8 is TMode=1 & thv_c2222=1 & thv_c0505=0 { } vdupSize2: 16 is TMode=1 & thv_c2222=0 & thv_c0505=1 { } vdupSize2: 32 is TMode=1 & thv_c2222=0 & thv_c0505=0 { } vdupRd8: VRd is VRd & ((TMode=0 & c2222=1 & c0505=0) | (TMode=1 & thv_c2222=1 & thv_c0505=0)) { val:8 = 0; local tmpRd = VRd; replicate1to8(tmpRd:1, val); export val; } vdupRd8: VRd is VRd & ((TMode=0 & c2222=0 & c0505=1) | (TMode=1 & thv_c2222=0 & thv_c0505=1)) { val:8 = 0; local tmpRd = VRd; replicate2to8(tmpRd:2, val); export val; } vdupRd8: VRd is VRd & ((TMode=0 & c2222=0 & c0505=0) | (TMode=1 & thv_c2222=0 & thv_c0505=0)) { val:8 = 0; local tmpRd = VRd; replicate4to8(tmpRd:4, val); export val; } vdupRd16: vdupRd8 is vdupRd8 { val:16 = zext(vdupRd8); val = val | (val << 64); export val; } :vdup^COND^"."^vdupSize2 Dn,VRd is (( $(AMODE) & ARMcond=1 & c2327=0x1d & c2021=0 & c0811=11 & c0606=0 & c0004=0x10) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=11 & thv_c0606=0 & thv_c0004=0x10 ) ) & VRd & COND & Dn & vdupSize2 & vdupRd8 { build COND; build vdupRd8; build Dn; Dn = vdupRd8; } :vdup^COND^"."^vdupSize2 Qn,VRd is (( $(AMODE) & ARMcond=1 & c2327=0x1d & c2021=2 & c0811=11 & c0606=0 & c0004=0x10) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c0811=11 & thv_c0606=0 & thv_c0004=0x10 ) ) & VRd & COND & Qn & vdupSize2 & vdupRd16 { build COND; build vdupRd16; build Qn; Qn = vdupRd16; } :veor Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x6 & c2021=0 & c0811=1 & Q6=0 & c0404=1) | ($(TMODE_F) &thv_c2327=0x1e & thv_c2021=0 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm { Dd = Dn ^ Dm; } :veor Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x6 & c2021=0 & c0811=1 & Q6=1 & c0404=1) | ($(TMODE_F) &thv_c2327=0x1e & thv_c2021=0 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qd & Qn & Qm { Qd = Qn ^ Qm; } extImm: "#"^c0811 is TMode=0 & c0811 { tmp:1 = c0811; export tmp; } extImm: "#"^thv_c0811 is TMode=1 & thv_c0811 { tmp:1 = thv_c0811; export tmp; } :vext.8 Dd,Dn,Dm,extImm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c2021=3 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=3 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dn & Dm & extImm { val:16 = (zext(Dm) << 64) | zext(Dn); local shift = extImm * 8; val = val >> shift; Dd = val:8; } :vext.8 Qd,Qn,Qm,extImm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c2021=3 & c0606=1 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=3 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qn & Qm & extImm { val:32 = (zext(Qm) << 128) | zext(Qn); local shift = extImm * 8; val = val >> shift; Qd = val:16; } :vfma.f^fesize2020 Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=0 & c0811=0xc & c0606=0 & c0404=1 ) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=0xc & thv_c0606=0 & thv_c0404=1) ) & fesize2020 & Dd & Dn & Dm { Dd = vectorFusedMultiplyAccumulate(Dn, Dm, fesize2020); } :vfma.f^fesize2020 Qd,Qn,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=0 & c0811=0xc & c0606=1 & c0404=1 ) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=0xc & thv_c0606=1 & thv_c0404=1) ) & fesize2020 & Qd & Qn & Qm { Qd = vectorFusedMultiplyAccumulate(Qn, Qm, fesize2020); } # Floating-point Multiply-Accumulate BFloat (vector) :vfmab.BF16 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=0x3 & c0811=8 & c0606=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=0x3 & thv_c0811=8 & thv_c0606=0 & thv_c0404=1) ) & Qd & Qn & Qm { Qd = BfloatMultiplyAccumulate(Qn, Qm, 0:1); } :vfmat.BF16 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=0x3 & c0811=8 & c0606=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=0x3 & thv_c0811=8 & thv_c0606=1 & thv_c0404=1) ) & Qd & Qn & Qm { Qd = BfloatMultiplyAccumulate(Qn, Qm, 1:1); } vmfDm: Dm_3^"["^index^"]" is TMode=0 & Dm_3 & M5 & c0303 [ index = (M5 << 1) + c0303; ] { el:4 = VectorGetElement(Dm_3, index:1, 2:1, 0:1); export el; } vmfDm: thv_Dm_3^"["^index^"]" is TMode=1 & thv_Dm_3 & thv_M5 & thv_c0303 [ index = (thv_M5 << 1) + thv_c0303; ] { el:4 = VectorGetElement(thv_Dm_3, index:1, 2:1, 0:1); export el; } vmfSm: Sm_3^"["^c0303^"]" is TMode=0 & c0404=1 & Sm_3 & M5 & c0303 { el:4 = VectorGetElement(Sm_3, M5:1, 4:1, 0:1); export el; } vmfSm: Sm_3^"["^c0303^"]" is TMode=1 & thv_c0404=1 & Sm_3 & thv_M5 & c0303 { el:4 = VectorGetElement(Sm_3, thv_M5:1, 4:1, 0:1); export el; } :vfmab.BF16 Qd,Qn,vmfDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=0x3 & c0811=8 & c0606=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=0x3 & thv_c0811=8 & thv_c0606=0 & thv_c0404=1) ) & Qd & Qn & vmfDm { Qd = BfloatMultiplyAccumulate(Qn, vmfDm, 0:1); } :vfmat.BF16 Qd,Qn,vmfDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=0x3 & c0811=8 & c0606=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=0x3 & thv_c0811=8 & thv_c0606=1 & thv_c0404=1) ) & Qd & Qn & vmfDm { Qd = BfloatMultiplyAccumulate(Qn, vmfDm, 1:1); } :vfmal.F16 Dd,Sn,Sm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=0x2 & c0811=0x8 & c0606=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=0x2 & thv_c0811=0x8 & thv_c0606=0 & thv_c0404=1) ) & Dd & Sn & Sm { Dd = VectorMultiplyAddLongVector(Sn, Sm, 0:1); } :vfmal.F16 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=0x2 & c0811=0x8 & c0606=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=0x2 & thv_c0811=0x8 & thv_c0606=1 & thv_c0404=1) ) & Qd & Dn & Dm { Qd = VectorMultiplyAddLongVector(Dn, Dm, 1:1); } :vfmal.F16 Dd,Sn,vmfSm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=0x2 & c0811=8 & c0606=0 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=0x2 & thv_c0811=8 & thv_c0606=0 & thv_c0404=1) ) & Dd & Sn & vmfSm { Dd = VectorMultiplyAddLongScalar(Sn, vmfSm, 0:1); } :vfmal.F16 Qd,Dn,vmfDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=0x2 & c0811=8 & c0606=1 & c0404=1 ) | ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=0x2 & thv_c0811=8 & thv_c0606=1 & thv_c0404=1) ) & Qd & Dn & vmfDm { Qd = VectorMultiplyAddLongScalar(Dn, vmfDm, 1:1); } :vhadd.^udt^esize2021 Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=0 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=0 & thv_Q6=0 & thv_c0404=0) ) & udt & Dm & esize2021 & Dn & Dd { Dd = VectorHalvingAdd(Dn,Dm,esize2021,udt); } :vhadd.^udt^esize2021 Qd,Qn,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=0 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=0 & thv_Q6=1 & thv_c0404=0) ) & udt & Qm & esize2021 & Qn & Qd { Qd = VectorHalvingAdd(Qn,Qm,esize2021,udt); } :vraddhn.i^esize2021x2 Dd,Qn,Qm is (($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=4 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=4 & thv_Q6=0 & thv_c0404=0) ) & Qm & esize2021x2 & Qn & Dd { Dd = VectorRoundAddAndNarrow(Qn,Qm,esize2021x2); } :vrhadd.^udt^esize2021 Dd,Dn,Dm is (($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=1 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=1 & thv_Q6=0 & thv_c0404=0) ) & udt & Dm & esize2021 & Dn & Dd { Dd = VectorRoundHalvingAdd(Dn,Dm,esize2021,udt); } :vrhadd.^udt^esize2021 Qd,Qn,Qm is (($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=1 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=1 & thv_Q6=1 & thv_c0404=0) ) & udt & Qm & esize2021 & Qn & Qd { Qd = VectorRoundHalvingAdd(Qn,Qm,esize2021,udt); } :vhsub.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=2 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=2 & thv_Q6=0 & thv_c0404=0) ) & udt & esize2021 & Dn & Dd & Dm { Dd = VectorHalvingSubtract(Dn,Dm,esize2021,udt); } :vhsub.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=2 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=2 & thv_Q6=1 & thv_c0404=0) ) & udt & Qm & esize2021 & Qn & Qd { Qd = VectorHalvingSubtract(Qn,Qm,esize2021,udt); } ####### # VFMA VFMS VFNMA and VFNMS # @if defined(VFPv4) :vfma^COND^".f16" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=1 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c1011=2 & thv_c0809=1 & thv_c0606=0 & thv_c0404=0)) & Sm & Sn & Sd { Sd = zext(Sd:2 f+ (Sn:2 f* Sm:2)); } :vfma^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=2 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c1011=2 & thv_c0809=2 & thv_c0606=0 & thv_c0404=0)) & Sm & Sn & Sd { Sd = Sd f+ (Sn f* Sm); } :vfma^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=3 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c1011=2 & thv_c0809=3 & thv_c0606=0 & thv_c0404=0)) & Dm & Dn & Dd { Dd = Dd f+ (Dn f* Dm); } :vfms^COND^".f16" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c1011=2 & thv_c0809=1 & thv_c0606=1 & thv_c0404=0)) & Sm & Sn & Sd { Sd = zext(Sd:2 f+ ((f- Sn:2) f* Sm:2)); } :vfms^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=2 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c1011=2 & thv_c0809=2 & thv_c0606=1 & thv_c0404=0)) & Sm & Sn & Sd { Sd = Sd f+ ((f- Sn) f* Sm); } :vfms^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=2 & c1011=2 & c0809=3 & c0606=1 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=2 & thv_c1011=2 & thv_c0809=3 & thv_c0606=1 & thv_c0404=0)) & Dm & Dn & Dd { Dd = Dd f+ ((f- Dn) f* Dm); } :vfnma^COND^".f16" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=1 & c0606=1 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=1 & thv_c1011=2 & thv_c0809=1 & thv_c0606=1 & thv_c0404=0)) & Sm & Sn & Sd { Sd = zext((f- Sd:2) f+ ((f- Sn:2) f* Sm:2)); } :vfnma^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=2 & c0606=1 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=1 & thv_c1011=2 & thv_c0809=2 & thv_c0606=1 & thv_c0404=0)) & Sm & Sn & Sd { Sd = (f- Sd) f+ ((f- Sn) f* Sm); } :vfnma^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=3 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=1 & thv_c1011=2 & thv_c0809=3 & thv_c0606=1 & thv_c0404=0)) & Dm & Dn & Dd { Dd = (f- Dd) f+ ((f- Dn) f* Dm); } :vfnms^COND^".f16" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=1 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=1 & thv_c1011=2 & thv_c0809=1 & thv_c0606=0 & thv_c0404=0)) & Sm & Sn & Sd { Sd = zext((f- Sd:2) f+ (Sn:2 f* Sm:2)); } :vfnms^COND^".f32" Sd,Sn,Sm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=2 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=1 & thv_c1011=2 & thv_c0809=2 & thv_c0606=0 & thv_c0404=0)) & Sm & Sn & Sd { Sd = (f- Sd) f+ (Sn f* Sm); } :vfnms^COND^".f64" Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=1 & COND & c2327=0x1d & c2021=1 & c1011=2 & c0809=3 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=1 & thv_c1011=2 & thv_c0809=3 & thv_c0606=0 & thv_c0404=0)) & Dm & Dn & Dd { Dd = (f- Dd) f+ (Dn f* Dm); } @endif # VFPv4 ####### # VLD1 (multiple single elements) # buildVld1DdList: Dreg is Dreg & counter=1 [ counter=0; regNum=regNum+1; ] { Dreg = * mult_addr; } buildVld1DdList: Dreg,buildVld1DdList is Dreg & buildVld1DdList [ counter=counter-1; regNum=regNum+1; ] { Dreg = * mult_addr; mult_addr = mult_addr + 8; build buildVld1DdList; } vld1DdList: "{"^buildVld1DdList^"}" is TMode=0 & c0811=7 & D22 & c1215 & buildVld1DdList [ regNum=(D22<<4)+c1215-1; counter=1; ] { export 1:4; } vld1DdList: "{"^buildVld1DdList^"}" is TMode=0 & c0811=10 & D22 & c1215 & buildVld1DdList [ regNum=(D22<<4)+c1215-1; counter=2; ] { export 2:4; } vld1DdList: "{"^buildVld1DdList^"}" is TMode=0 & c0811=6 & D22 & c1215 & buildVld1DdList [ regNum=(D22<<4)+c1215-1; counter=3; ] { export 3:4; } vld1DdList: "{"^buildVld1DdList^"}" is TMode=0 & c0811=2 & D22 & c1215 & buildVld1DdList [ regNum=(D22<<4)+c1215-1; counter=4; ] { export 4:4; } vld1DdList: "{"^buildVld1DdList^"}" is TMode=1 & thv_c0811=7 & thv_D22 & thv_c1215 & buildVld1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=1; ] { export 1:4; } vld1DdList: "{"^buildVld1DdList^"}" is TMode=1 & thv_c0811=10 & thv_D22 & thv_c1215 & buildVld1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=2; ] { export 2:4; } vld1DdList: "{"^buildVld1DdList^"}" is TMode=1 & thv_c0811=6 & thv_D22 & thv_c1215 & buildVld1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=3; ] { export 3:4; } vld1DdList: "{"^buildVld1DdList^"}" is TMode=1 & thv_c0811=2 & thv_D22 & thv_c1215 & buildVld1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=4; ] { export 4:4; } @define Vld1DdList "(c0811=2 | c0811=6 | c0811=7 | c0811=10)" @define thv_Vld1DdList "(thv_c0811=2 | thv_c0811=6 | thv_c0811=7 | thv_c0811=10)" vldAlign45: is TMode=0 & c0405=0 { } vldAlign45: ":64" is TMode=0 & c0405=1 { } vldAlign45: ":128" is TMode=0 & c0405=2 { } vldAlign45: ":256" is TMode=0 & c0405=3 { } vldAlign45: is TMode=1 & thv_c0405=0 { } vldAlign45: ":64" is TMode=1 & thv_c0405=1 { } vldAlign45: ":128" is TMode=1 & thv_c0405=2 { } vldAlign45: ":256" is TMode=1 & thv_c0405=3 { } RnAligned45: "["^VRn^vldAlign45^"]" is TMode=0 & VRn & vldAlign45 { export VRn; } RnAligned45: "["^VRn^vldAlign45^"]" is TMode=1 & VRn & vldAlign45 { export VRn; } :vld1.^esize0607 vld1DdList,RnAligned45 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0003=15 & $(Vld1DdList)) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0003=15 & $(thv_Vld1DdList)) ) & esize0607 & RnAligned45 & vld1DdList { mult_addr = RnAligned45; build vld1DdList; } :vld1.^esize0607 vld1DdList,RnAligned45^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0003=13 & $(Vld1DdList)) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0003=13 & $(thv_Vld1DdList)) ) & esize0607 & RnAligned45 & vld1DdList { mult_addr = RnAligned45; build vld1DdList; RnAligned45 = RnAligned45 + (8 * vld1DdList); } :vld1.^esize0607 vld1DdList,RnAligned45,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & $(Vld1DdList)) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & $(thv_Vld1DdList)) ) & VRm & esize0607 & RnAligned45 & vld1DdList { mult_addr = RnAligned45; build vld1DdList; RnAligned45 = RnAligned45 + VRm; } ####### # VLD1 (single element to one lane) # vld1Index: val is TMode=0 & c0507 & c1011 [ val = c0507 >> c1011; ] { tmp:4 = val; export tmp; } vld1Index: val is TMode=1 & thv_c0507 & thv_c1011 [ val = thv_c0507 >> thv_c1011; ] { tmp:4 = val; export tmp; } vld1DdElement2: Dd^"["^vld1Index^"]" is Dd & vld1Index & ((TMode=0 & c1011=0) | (TMode=1 & thv_c1011=0)) { ptr:4 = &Dd + vld1Index; *[register]:1 ptr = *:1 mult_addr; } vld1DdElement2: Dd^"["^vld1Index^"]" is Dd & vld1Index & ((TMode=0 & c1011=1) | (TMode=1 & thv_c1011=1)) { ptr:4 = &Dd + (2 * vld1Index); *[register]:2 ptr = *:2 mult_addr; } vld1DdElement2: Dd^"["^vld1Index^"]" is Dd & vld1Index & ((TMode=0 & c1011=2) | (TMode=1 & thv_c1011=2)) { ptr:4 = &Dd + (4 * vld1Index); *[register]:4 ptr = *:4 mult_addr; } @define Vld1DdElement2 "((c1011=0 & c0404=0) | (c1011=1 & c0505=0) | (c1011=2 & (c0406=0 | c0406=3)))" @define T_Vld1DdElement2 "((thv_c1011=0 & thv_c0404=0) | (thv_c1011=1 & thv_c0505=0) | (thv_c1011=2 & (thv_c0406=0 | thv_c0406=3)))" vld1Align2: is TMode=0 & c0404=0 { } vld1Align2: ":16" is TMode=0 & c1011=1 & c0404=1 { } vld1Align2: ":32" is TMode=0 & c1011=2 & c0404=1 { } vld1Align2: is TMode=1 & thv_c0404=0 { } vld1Align2: ":16" is TMode=1 & thv_c1011=1 & thv_c0404=1 { } vld1Align2: ":32" is TMode=1 & thv_c1011=2 & thv_c0404=1 { } RnAligned2: "["^VRn^vld1Align2^"]" is VRn & vld1Align2 { export VRn; } :vld1.^esize1011 vld1DdElement2,RnAligned2 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0809=0 & c0003=15 & $(Vld1DdElement2) ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0809=0 & thv_c0003=15 & $(T_Vld1DdElement2) ) ) & RnAligned2 & esize1011 & vld1DdElement2 { mult_addr = RnAligned2; build vld1DdElement2; } :vld1.^esize1011 vld1DdElement2,RnAligned2^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0809=0 & c0003=13 & $(Vld1DdElement2) ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0809=0 & thv_c0003=13 & $(T_Vld1DdElement2) ) ) & RnAligned2 & esize1011 & vld1DdElement2 { mult_addr = RnAligned2; build vld1DdElement2; RnAligned2 = RnAligned2 + esize1011; } :vld1.^esize1011 vld1DdElement2,RnAligned2,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0809=0 & $(Vld1DdElement2) ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0809=0 & $(T_Vld1DdElement2) ) ) & VRm & RnAligned2 & esize1011 & vld1DdElement2 { mult_addr = RnAligned2; build vld1DdElement2; RnAligned2 = RnAligned2 + VRm; } ####### # VLD1 (single element to all lanes) # vld1RnReplicate: is ((TMode=0 & c0607=0) | (TMode=1 & thv_c0607=0)) & VRn { val:8 = 0; replicate1to8(*:1 VRn, val); export val; } vld1RnReplicate: is ((TMode=0 & c0607=1) | (TMode=1 & thv_c0607=1)) & VRn { val:8 = 0; replicate2to8(*:2 VRn, val); export val; } vld1RnReplicate: is ((TMode=0 & c0607=2) | (TMode=1 & thv_c0607=2)) & VRn { val:8 = 0; replicate4to8(*:4 VRn, val); export val; } vld1Dd3: Dreg^"[]" is Dreg { export Dreg; } buildVld1DdList3: is counter=0 { } buildVld1DdList3: vld1Dd3 is counter=1 & vld1Dd3 [ counter=0; regNum=regNum+1; ] { vld1Dd3 = mult_dat8; } buildVld1DdList3: vld1Dd3,buildVld1DdList3 is vld1Dd3 & buildVld1DdList3 [ counter=counter-1; regNum=regNum+1; ] { vld1Dd3 = mult_dat8; build buildVld1DdList3; } vld1DdList3: "{"^buildVld1DdList3^"}" is TMode=0 & c0505=0 & D22 & c1215 & buildVld1DdList3 [ regNum=(D22<<4)+c1215-1; counter=1; ] { export 1:4; } vld1DdList3: "{"^buildVld1DdList3^"}" is TMode=0 & c0505=1 & D22 & c1215 & buildVld1DdList3 [ regNum=(D22<<4)+c1215-1; counter=2; ] { export 2:4; } vld1DdList3: "{"^buildVld1DdList3^"}" is TMode=1 & thv_c0505=0 & thv_D22 & thv_c1215 & buildVld1DdList3 [ regNum=(thv_D22<<4)+thv_c1215-1; counter=1; ] { export 1:4; } vld1DdList3: "{"^buildVld1DdList3^"}" is TMode=1 & thv_c0505=1 & thv_D22 & thv_c1215 & buildVld1DdList3 [ regNum=(thv_D22<<4)+thv_c1215-1; counter=2; ] { export 2:4; } vld1Align3: is TMode=0 & c0404=0 { } vld1Align3: ":16" is TMode=0 & c0404=1 & c0607=1 { } vld1Align3: ":32" is TMode=0 & c0404=1 & c0607=2 { } vld1Align3: is TMode=1 & thv_c0404=0 { } vld1Align3: ":16" is TMode=1 & thv_c0404=1 & thv_c0607=1 { } vld1Align3: ":32" is TMode=1 & thv_c0404=1 & thv_c0607=2 { } RnAligned3: "["^VRn^vld1Align3^"]" is VRn & vld1Align3 { export VRn; } @define vld1Constrain "((c0607=0 & c0404=0) | c0607=1 | c0607=2)" @define T_vld1Constrain "((thv_c0607=0 & thv_c0404=0) | thv_c0607=1 | thv_c0607=2)" :vld1.^esize0607 vld1DdList3,RnAligned3 is ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=12 & c0003=15 & $(vld1Constrain)) | ($(TMODE_F) & thv_c2327=19 & thv_c2021=2 & thv_c0811=12 & thv_c0003=15 & $(T_vld1Constrain)) & esize0607 & RnAligned3 & vld1RnReplicate & vld1DdList3 { mult_dat8 = vld1RnReplicate; build vld1DdList3; } :vld1.^esize0607 vld1DdList3,RnAligned3^"!" is ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=12 & c0003=13 & $(vld1Constrain)) | ($(TMODE_F) & thv_c2327=19 & thv_c2021=2 & thv_c0811=12 & thv_c0003=13 & $(T_vld1Constrain)) & esize0607 & RnAligned3 & vld1RnReplicate & vld1DdList3 { mult_dat8 = vld1RnReplicate; build vld1DdList3; RnAligned3 = RnAligned3 + esize0607; } :vld1.^esize0607 vld1DdList3,RnAligned3,VRm is ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=12 & $(vld1Constrain)) | ($(TMODE_F) & thv_c2327=19 & thv_c2021=2 & thv_c0811=12 & $(T_vld1Constrain)) & esize0607 & VRm & RnAligned3 & vld1RnReplicate & vld1DdList3 { mult_dat8 = vld1RnReplicate; build vld1DdList3; RnAligned3 = RnAligned3 + VRm; } ####### # VLD2 (multiple 2-element structures) # vld2Dd: Dreg is (($(AMODE) & c0607=0) | ($(TMODE_F) & thv_c0607=0)) & Dreg & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); @endif # ENDIAN = "big" mult_dat8 = 8; *[register]:1 ptr1 = *:1 mult_addr; mult_addr = mult_addr + 1; *[register]:1 ptr2 = *:1 mult_addr; mult_addr = mult_addr + 1; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 1; ptr2 = ptr2 + 1; goto ; } vld2Dd: Dreg is (($(AMODE) & c0607=1) | ($(TMODE_F) & thv_c0607=1)) & Dreg & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); @endif # ENDIAN = "big" mult_dat8 = 4; *[register]:2 ptr1 = *:2 mult_addr; mult_addr = mult_addr + 2; *[register]:2 ptr2 = *:2 mult_addr; mult_addr = mult_addr + 2; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 2; ptr2 = ptr2 + 2; goto ; } vld2Dd: Dreg is (($(AMODE) & c0607=2) | ($(TMODE_F) & thv_c0607=2)) & Dreg & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); @endif # ENDIAN = "big" mult_dat8 = 2; *[register]:4 ptr1 = *:4 mult_addr; mult_addr = mult_addr + 4; *[register]:4 ptr2 = *:4 mult_addr; mult_addr = mult_addr + 4; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 4; ptr2 = ptr2 + 4; goto ; } buildVld2DdListA: is counter=0 { } buildVld2DdListA: vld2Dd,buildVld2DdListA is vld2Dd & buildVld2DdListA & esize0607 [ counter=counter-1; regNum=regNum+1; ] { build vld2Dd; build buildVld2DdListA; } buildVld2DdListB: is counter2=0 { } buildVld2DdListB: Dreg2 is Dreg2 & counter2=1 & esize0607 [ counter2=0; reg2Num=reg2Num+1; ] { } buildVld2DdListB: Dreg2,buildVld2DdListB is Dreg2 & buildVld2DdListB & esize0607 [ counter2=counter2-1; reg2Num=reg2Num+1; ] { } vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=0 & c0811=8 & D22 & c1215 & buildVld2DdListA & buildVld2DdListB [ regNum=(D22<<4)+c1215-1; regInc=1; reg2Num=regNum+1; counter=1; counter2=1; ] { build buildVld2DdListA; build buildVld2DdListB; export 2:4; } vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=0 & c0811=9 & D22 & c1215 & buildVld2DdListA & buildVld2DdListB [ regNum=(D22<<4)+c1215-1; regInc=2; reg2Num=regNum+2; counter=1; counter2=1; ] { build buildVld2DdListA; build buildVld2DdListB; export 2:4; } vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=0 & c0811=3 & D22 & c1215 & buildVld2DdListA & buildVld2DdListB [ regNum=(D22<<4)+c1215-1; regInc=2; reg2Num=regNum+2; counter=2; counter2=2; ] { build buildVld2DdListA; build buildVld2DdListB; export 4:4; } vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=1 & thv_c0811=8 & thv_D22 & thv_c1215 & buildVld2DdListA & buildVld2DdListB [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; reg2Num=regNum+1; counter=1; counter2=1; ] { build buildVld2DdListA; build buildVld2DdListB; export 2:4; } vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=1 & thv_c0811=9 & thv_D22 & thv_c1215 & buildVld2DdListA & buildVld2DdListB [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=2; reg2Num=regNum+2; counter=1; counter2=1; ] { build buildVld2DdListA; build buildVld2DdListB; export 2:4; } vld2DdList: "{"^buildVld2DdListA^buildVld2DdListB^"}" is TMode=1 & thv_c0811=3 & thv_D22 & thv_c1215 & buildVld2DdListA & buildVld2DdListB [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=2; reg2Num=regNum+2; counter=2; counter2=2; ] { build buildVld2DdListA; build buildVld2DdListB; export 4:4; } @define Vld2DdList "(c0811=3 | c0811=8 | c0811=9)" @define thv_Vld2DdList "(thv_c0811=3 | thv_c0811=8 | thv_c0811=9)" :vld2.^esize0607 vld2DdList,RnAligned45 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0607<3 & c0003=15 & $(Vld2DdList) ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0607<3 & thv_c0003=15 & $(thv_Vld2DdList) ) ) & RnAligned45 & esize0607 & vld2DdList { mult_addr = RnAligned45; build vld2DdList; } :vld2.^esize0607 vld2DdList,RnAligned45^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0607<3 & c0003=13 & $(Vld2DdList) ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0607<3 & thv_c0003=13 & $(thv_Vld2DdList) ) ) & RnAligned45 & esize0607 & vld2DdList { mult_addr = RnAligned45; build vld2DdList; RnAligned45 = RnAligned45 + (8 * vld2DdList); } :vld2.^esize0607 vld2DdList,RnAligned45,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0607<3 & c0003 & $(Vld2DdList) ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0607<3 & thv_c0003 & $(thv_Vld2DdList) ) ) & VRm & RnAligned45 & esize0607 & vld2DdList { mult_addr = RnAligned45; build vld2DdList; RnAligned45 = RnAligned45 + VRm; } ####### # VLD2 (single 2-element structure to one lane) # vld2Index: val is TMode=0 & c0507 & c1011 [ val = c0507 >> c1011; ] { tmp:4 = val; export tmp; } vld2Index: val is TMode=1 & thv_c0507 & thv_c1011 [ val = thv_c0507 >> thv_c1011; ] { tmp:4 = val; export tmp; } vld2DdElement2: Dreg^"["^vld2Index^"]" is Dreg & vld2Index & ((TMode=0 & c1011=0) | (TMode=1 & thv_c1011=0)) { ptr:4 = &Dreg + vld2Index; *[register]:1 ptr = *:1 mult_addr; } vld2DdElement2: Dreg^"["^vld2Index^"]" is Dreg & vld2Index & ((TMode=0 & c1011=1) | (TMode=1 & thv_c1011=1)) { ptr:4 = &Dreg + (vld2Index * 2); *[register]:2 ptr = *:2 mult_addr; } vld2DdElement2: Dreg^"["^vld2Index^"]" is Dreg & vld2Index & ((TMode=0 & c1011=2) | (TMode=1 & thv_c1011=2)) { ptr:4 = &Dreg + (vld2Index * 4); *[register]:4 ptr = *:4 mult_addr; } vld2Align2: is TMode=0 & c0404=0 & (c1111=0 | c0505=0) { } vld2Align2: ":16" is TMode=0 & c1011=0 & c0404=1 { } vld2Align2: ":32" is TMode=0 & c1011=1 & c0404=1 { } vld2Align2: ":64" is TMode=0 & c1011=2 & c0405=1 { } vld2Align2: is TMode=1 & thv_c0404=0 & (thv_c1111=0 | thv_c0505=0) { } vld2Align2: ":16" is TMode=1 & thv_c1011=0 & thv_c0404=1 { } vld2Align2: ":32" is TMode=1 & thv_c1011=1 & thv_c0404=1 { } vld2Align2: ":64" is TMode=1 & thv_c1011=2 & thv_c0405=1 { } vld2RnAligned2: "["^VRn^vld2Align2^"]" is VRn & vld2Align2 { export VRn; } buildVld2DdList2: is counter=0 { } buildVld2DdList2: vld2DdElement2 is counter=1 & vld2DdElement2 [ counter=0; regNum=regNum+regInc; ] { build vld2DdElement2; } buildVld2DdList2: vld2DdElement2,buildVld2DdList2 is vld2DdElement2 & buildVld2DdList2 & esize1011 [ counter=counter-1; regNum=regNum+regInc; ] { build vld2DdElement2; mult_addr = mult_addr + esize1011; build buildVld2DdList2; } vld2DdList2: "{"^buildVld2DdList2^"}" is TMode=0 & D22 & c1215 & buildVld2DdList2 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=2; ] { } # Single vld2DdList2: "{"^buildVld2DdList2^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011=2 & c0606=1)) & D22 & c1215 & buildVld2DdList2 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=2; ] { } # Double vld2DdList2: "{"^buildVld2DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVld2DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=2; ] { } # Single vld2DdList2: "{"^buildVld2DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVld2DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=2; ] { } # Double :vld2.^esize1011 vld2DdList2,vld2RnAligned2 is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=1 & c0003=15 ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=1 & thv_c0003=15 ) ) & esize1011 & VRm & vld2RnAligned2 & vld2DdList2 { mult_addr = vld2RnAligned2; build vld2DdList2; } :vld2.^esize1011 vld2DdList2,vld2RnAligned2^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=1 & c0003=13 ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=1 & thv_c0003=13 ) ) & esize1011 & VRm & vld2RnAligned2 & vld2DdList2 { mult_addr = vld2RnAligned2; build vld2DdList2; vld2RnAligned2 = vld2RnAligned2 + (2 * esize1011); } :vld2.^esize1011 vld2DdList2,vld2RnAligned2,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=1 & c0003 ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=1 & thv_c0003 ) ) & esize1011 & VRm & vld2RnAligned2 & vld2DdList2 { mult_addr = vld2RnAligned2; build vld2DdList2; vld2RnAligned2 = vld2RnAligned2 + VRm; } ####### # VLD2 (single 2-element structure to all lanes) # vld234Replicate: is ((TMode=0 & c0607=0) | (TMode=1 & thv_c0607=0)) { val:8 = 0; replicate1to8(*:1 mult_addr, val); export val; } vld234Replicate: is ((TMode=0 & c0607=1) | (TMode=1 & thv_c0607=1)) { val:8 = 0; replicate2to8(*:2 mult_addr, val); export val; } vld234Replicate: is ((TMode=0 & c0607=2) | (TMode=1 & thv_c0607=2)) { val:8 = 0; replicate4to8(*:4 mult_addr, val); export val; } vld2Align3: is TMode=0 & c0404=0 { } vld2Align3: ":16" is TMode=0 & c0404=1 & c0607=0 { } vld2Align3: ":32" is TMode=0 & c0404=1 & c0607=1 { } vld2Align3: ":64" is TMode=0 & c0404=1 & c0607=2 { } vld2Align3: is TMode=1 & thv_c0404=0 { } vld2Align3: ":16" is TMode=1 & thv_c0404=1 & thv_c0607=0 { } vld2Align3: ":32" is TMode=1 & thv_c0404=1 & thv_c0607=1 { } vld2Align3: ":64" is TMode=1 & thv_c0404=1 & thv_c0607=2 { } vld2RnAligned3: "["^VRn^vld2Align3^"]" is VRn & vld2Align3 { export VRn; } buildVld234DdList3: is counter=0 { } buildVld234DdList3: Dreg^"[]" is counter=1 & Dreg & vld234Replicate [ counter=0; regNum=regNum+regInc; ] { Dreg = vld234Replicate; } buildVld234DdList3: Dreg^"[]",buildVld234DdList3 is Dreg & buildVld234DdList3 & vld234Replicate & esize0607 [ counter=counter-1; regNum=regNum+regInc; ] { Dreg = vld234Replicate; mult_addr = mult_addr + esize0607; build buildVld234DdList3; } vld2DdList3: "{"^buildVld234DdList3^"}" is TMode=0 & c0505=0 & D22 & c1215 & buildVld234DdList3 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=2; ] { } # Single vld2DdList3: "{"^buildVld234DdList3^"}" is TMode=0 & c0505=1 & D22 & c1215 & buildVld234DdList3 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=2; ] { } # Double vld2DdList3: "{"^buildVld234DdList3^"}" is TMode=1 & thv_c0505=0 & thv_D22 & thv_c1215 & buildVld234DdList3 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=2; ] { } # Single vld2DdList3: "{"^buildVld234DdList3^"}" is TMode=1 & thv_c0505=1 & thv_D22 & thv_c1215 & buildVld234DdList3 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=2; ] { } # Double :vld2.^esize0607 vld2DdList3,vld2RnAligned3 is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=13 & c0607<3 & c0003=15 ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=13 & thv_c0607<3 & thv_c0003=15 ) ) & esize0607 & VRm & vld2RnAligned3 & vld2DdList3 { mult_addr = vld2RnAligned3; build vld2DdList3; } :vld2.^esize0607 vld2DdList3,vld2RnAligned3^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=13 & c0607<3 & c0003=13 ) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=13 & thv_c0607<3 & thv_c0003=13 ) ) & esize0607 & VRm & vld2RnAligned3 & vld2DdList3 { mult_addr = vld2RnAligned3; build vld2DdList3; vld2RnAligned3 = vld2RnAligned3 + 2 * esize0607; } :vld2.^esize0607 vld2DdList3,vld2RnAligned3,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=13 & c0607<3 & c0003) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=13 & thv_c0607<3 & thv_c0003 ) ) & esize0607 & VRm & vld2RnAligned3 & vld2DdList3 { mult_addr = vld2RnAligned3; build vld2DdList3; vld2RnAligned3 = vld2RnAligned3 + VRm; } ####### # VLD3 (multiple 3-element structures) # vld3Align: is TMode=0 & c0404=0 { } vld3Align: ":64" is TMode=0 & c0404=1 { } vld3Align: is TMode=1 & thv_c0404=0 { } vld3Align: ":64" is TMode=1 & thv_c0404=1 { } vld3RnAligned: "["^VRn^vld3Align^"]" is VRn & vld3Align { export VRn; } vld3Dd: Dreg is (($(AMODE) & c0607=0) | ($(TMODE_F) & thv_c0607=0)) & Dreg & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); ptr3:4 = &Dreg + (regInc * 16); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); ptr3:4 = &Dreg - (regInc * 16); @endif # ENDIAN = "big" mult_dat8 = 8; *[register]:1 ptr1 = *:1 mult_addr; mult_addr = mult_addr + 1; *[register]:1 ptr2 = *:1 mult_addr; mult_addr = mult_addr + 1; *[register]:1 ptr3 = *:1 mult_addr; mult_addr = mult_addr + 1; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 1; ptr2 = ptr2 + 1; ptr3 = ptr3 + 1; goto ; } vld3Dd: Dreg is (($(AMODE) & c0607=1) | ($(TMODE_F) & thv_c0607=1)) & Dreg & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); ptr3:4 = &Dreg + (regInc * 16); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); ptr3:4 = &Dreg - (regInc * 16); @endif # ENDIAN = "big" mult_dat8 = 4; *[register]:2 ptr1 = *:2 mult_addr; mult_addr = mult_addr + 2; *[register]:2 ptr2 = *:2 mult_addr; mult_addr = mult_addr + 2; *[register]:2 ptr3 = *:2 mult_addr; mult_addr = mult_addr + 2; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 2; ptr2 = ptr2 + 2; ptr3 = ptr3 + 2; goto ; } vld3Dd: Dreg is (($(AMODE) & c0607=2) | ($(TMODE_F) & thv_c0607=2)) & Dreg & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); ptr3:4 = &Dreg + (regInc * 16); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); ptr3:4 = &Dreg - (regInc * 16); @endif # ENDIAN = "big" mult_dat8 = 2; *[register]:4 ptr1 = *:4 mult_addr; mult_addr = mult_addr + 4; *[register]:4 ptr2 = *:4 mult_addr; mult_addr = mult_addr + 4; *[register]:4 ptr3 = *:4 mult_addr; mult_addr = mult_addr + 4; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 4; ptr2 = ptr2 + 4; ptr3 = ptr3 + 4; goto ; } # Have to build only once, but because Dreg depends on regNum, have to reset it back to what it was to the start buildVld3DdList: is counter=0 & vld3Dd [ regNum=regNum-3*regInc; ] { build vld3Dd; } buildVld3DdList: Dreg^buildVld3DdList is counter=1 & Dreg & buildVld3DdList [ counter=0; regNum=regNum+regInc; ] { } buildVld3DdList: Dreg,buildVld3DdList is Dreg & buildVld3DdList [ counter=counter-1; regNum=regNum+regInc; ] { } vld3DdList: "{"^buildVld3DdList^"}" is TMode=0 & c0811=4 & D22 & c1215 & buildVld3DdList [ regNum=(D22<<4)+c1215-1; regInc=1; counter=3; ] { } # Single vld3DdList: "{"^buildVld3DdList^"}" is TMode=0 & c0811=5 & D22 & c1215 & buildVld3DdList [ regNum=(D22<<4)+c1215-2; regInc=2; counter=3; ] { } # Double vld3DdList: "{"^buildVld3DdList^"}" is TMode=1 & thv_c0811=4 & thv_D22 & thv_c1215 & buildVld3DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=3; ] { } # Single vld3DdList: "{"^buildVld3DdList^"}" is TMode=1 & thv_c0811=5 & thv_D22 & thv_c1215 & buildVld3DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double :vld3.^esize0607 vld3DdList,vld3RnAligned is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & (c0811=4 | c0811=5) & c0607<3 & c0505=0 & c0003=15 ) | ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & (thv_c0811=4 | thv_c0811=5) & thv_c0607<3 & thv_c0505=0 & thv_c0003=15) ) & vld3RnAligned & esize0607 & vld3DdList { mult_addr = vld3RnAligned; build vld3DdList; } :vld3.^esize0607 vld3DdList,vld3RnAligned^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & (c0811=4 | c0811=5) & c0607<3 & c0505=0 & c0003=13 ) | ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & (thv_c0811=4 | thv_c0811=5) & thv_c0607<3 & thv_c0505=0 & thv_c0003=13) ) & vld3RnAligned & esize0607 & vld3DdList { mult_addr = vld3RnAligned; build vld3DdList; vld3RnAligned = vld3RnAligned + (8 * 3); } :vld3.^esize0607 vld3DdList,vld3RnAligned,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & (c0811=4 | c0811=5) & c0607<3 & c0505=0 ) | ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & (thv_c0811=4 | thv_c0811=5) & thv_c0607<3 & thv_c0505=0 ) ) & VRm & vld3RnAligned & esize0607 & vld3DdList { mult_addr = vld3RnAligned; build vld3DdList; vld3RnAligned = vld3RnAligned + VRm; } ####### # VLD3 (single 3-element structure to one lane) # vld3Index: val is TMode=0 & c0507 & c1011 [ val = c0507 >> c1011; ] { tmp:4 = val; export tmp; } vld3Index: val is TMode=1 & thv_c0507 & thv_c1011 [ val = thv_c0507 >> thv_c1011; ] { tmp:4 = val; export tmp; } vld3DdElement2: Dreg^"["^vld3Index^"]" is Dreg & vld3Index & ((TMode=0 & c1011=0) | (TMode=1 & thv_c1011=0)) { ptr:4 = &Dreg + vld3Index; *[register]:1 ptr = *:1 mult_addr; } vld3DdElement2: Dreg^"["^vld3Index^"]" is Dreg & vld3Index & ((TMode=0 & c1011=1) | (TMode=1 & thv_c1011=1)) { ptr:4 = &Dreg + (vld3Index * 2); *[register]:2 ptr = *:2 mult_addr; } vld3DdElement2: Dreg^"["^vld3Index^"]" is Dreg & vld3Index & ((TMode=0 & c1011=2) | (TMode=1 & thv_c1011=2)) { ptr:4 = &Dreg + (vld3Index * 4); *[register]:4 ptr = *:4 mult_addr; } vld3Rn: "["^VRn^"]" is VRn { export VRn; } buildVld3DdList2: is counter=0 { } buildVld3DdList2: vld3DdElement2 is counter=1 & vld3DdElement2 [ counter=0; regNum=regNum+regInc; ] { build vld3DdElement2; } buildVld3DdList2: vld3DdElement2,buildVld3DdList2 is vld3DdElement2 & buildVld3DdList2 & esize1011 [ counter=counter-1; regNum=regNum+regInc; ] { build vld3DdElement2; mult_addr = mult_addr + esize1011; build buildVld3DdList2; } vld3DdList2: "{"^buildVld3DdList2^"}" is TMode=0 & D22 & c1215 & buildVld3DdList2 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=3; ] { } # Single vld3DdList2: "{"^buildVld3DdList2^"}" is TMode=0 & ((c1011=1 & c0405=2) | (c1011=2 & c0406=4)) & D22 & c1215 & buildVld3DdList2 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=3; ] { } # Double vld3DdList2: "{"^buildVld3DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVld3DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=3; ] { } # Single vld3DdList2: "{"^buildVld3DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0405=2) | (thv_c1011=2 & thv_c0406=4)) & thv_D22 & thv_c1215 & buildVld3DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double :vld3.^esize1011 vld3DdList2,vld3Rn is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=2 & c0003=15) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=2 & thv_c0003=15) ) & vld3Rn & esize1011 & vld3DdList2 { mult_addr = vld3Rn; build vld3DdList2; } :vld3.^esize1011 vld3DdList2,vld3Rn^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=2 & c0003=13) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=2 & thv_c0003=13) ) & vld3Rn & esize1011 & vld3DdList2 { mult_addr = vld3Rn; build vld3DdList2; vld3Rn = vld3Rn + (3 * esize1011); } :vld3.^esize1011 vld3DdList2,vld3Rn,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=2) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=2) ) & VRm & vld3Rn & esize1011 & vld3DdList2 { mult_addr = vld3Rn; build vld3DdList2; vld3Rn = vld3Rn + VRm; } ####### # VLD3 (single 3-element structure to all lanes) # vld3DdList3: "{"^buildVld234DdList3^"}" is TMode=0 & c0505=0 & D22 & c1215 & buildVld234DdList3 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=3; ] { } # Single vld3DdList3: "{"^buildVld234DdList3^"}" is TMode=0 & c0505=1 & D22 & c1215 & buildVld234DdList3 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=3; ] { } # Double vld3DdList3: "{"^buildVld234DdList3^"}" is TMode=1 & thv_c0505=0 & thv_D22 & thv_c1215 & buildVld234DdList3 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=3; ] { } # Single vld3DdList3: "{"^buildVld234DdList3^"}" is TMode=1 & thv_c0505=1 & thv_D22 & thv_c1215 & buildVld234DdList3 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double :vld3.^esize0607 vld3DdList3,vld3Rn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=14 & c0607<3 & c0404=0 & c0003=15) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=14 & thv_c0404=0 & thv_c0003=15) ) & vld3Rn & esize0607 & vld3DdList3 { mult_addr = vld3Rn; build vld3DdList3; } :vld3.^esize0607 vld3DdList3,vld3Rn^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=14 & c0607<3 & c0404=0 & c0003=13) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=14 & thv_c0404=0 & thv_c0003=13) ) & vld3Rn & esize0607 & vld3DdList3 { mult_addr = vld3Rn; build vld3DdList3; vld3Rn = vld3Rn + 3 * esize0607; } :vld3.^esize0607 vld3DdList3,vld3Rn,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=14 & c0607<3 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=14 & thv_c0404=0) ) & VRm & vld3Rn & esize0607 & vld3DdList3 { mult_addr = vld3Rn; build vld3DdList3; vld3Rn = vld3Rn + VRm; } ####### # VLD4 (single 4-element structure to one lane) # vld4Index: val is TMode=0 & c0507 & c1011 [ val = c0507 >> c1011; ] { tmp:4 = val; export tmp; } vld4Index: val is TMode=1 & thv_c0507 & thv_c1011 [ val = thv_c0507 >> thv_c1011; ] { tmp:4 = val; export tmp; } vld4DdElement2: Dreg^"["^vld4Index^"]" is Dreg & vld4Index & ((TMode=0 & c1011=0) | (TMode=1 & thv_c1011=0)) { ptr:4 = &Dreg + vld4Index; *[register]:1 ptr = *:1 mult_addr; } vld4DdElement2: Dreg^"["^vld4Index^"]" is Dreg & vld4Index & ((TMode=0 & c1011=1) | (TMode=1 & thv_c1011=1)) { ptr:4 = &Dreg + vld4Index; *[register]:2 ptr = *:2 mult_addr; } vld4DdElement2: Dreg^"["^vld4Index^"]" is Dreg & vld4Index & ((TMode=0 & c1011=2) | (TMode=1 & thv_c1011=2)) { ptr:4 = &Dreg + vld4Index; *[register]:4 ptr = *:4 mult_addr; } vld4Align2: is TMode=0 & c0404=0 & (c1111=0 | c0505=0) { } vld4Align2: ":32" is TMode=0 & c1011=0 & c0404=1 { } vld4Align2: ":64" is TMode=0 & ((c1011=1 & c0404=1) | (c1011=2 & c0405=1)) { } vld4Align2: ":128" is TMode=0 & c1011=2 & c0405=2 { } vld4Align2: is TMode=1 & thv_c0404=0 & (thv_c1111=0 | thv_c0505=0) { } vld4Align2: ":32" is TMode=1 & thv_c1011=0 & thv_c0404=1 { } vld4Align2: ":64" is TMode=1 & ((thv_c1011=1 & thv_c0404=1) | (thv_c1011=2 & thv_c0405=1)) { } vld4Align2: ":128" is TMode=1 & thv_c1011=2 & thv_c0405=2 { } vld4RnAligned2: "["^VRn^vld4Align2^"]" is VRn & vld4Align2 { export VRn; } buildVld4DdList2: is counter=0 { } buildVld4DdList2: vld4DdElement2 is counter=1 & vld4DdElement2 [ counter=0; regNum=regNum+regInc; ] { build vld4DdElement2; } buildVld4DdList2: vld4DdElement2,buildVld4DdList2 is vld4DdElement2 & buildVld4DdList2 & esize1011 [ counter=counter-1; regNum=regNum+regInc; ] { build vld4DdElement2; mult_addr = mult_addr + esize1011; build buildVld4DdList2; } vld4DdList2: "{"^buildVld4DdList2^"}" is TMode=0 & D22 & c1215 & buildVld4DdList2 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=4; ] { } # Single vld4DdList2: "{"^buildVld4DdList2^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011=2 & c0606=1)) & D22 & c1215 & buildVld4DdList2 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=4; ] { } # Double vld4DdList2: "{"^buildVld4DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVld4DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=4; ] { } # Single vld4DdList2: "{"^buildVld4DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVld4DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double :vld4.^esize1011 vld4DdList2,vld4RnAligned2 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=3 & c0003=15) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=3 & thv_c0003=15 ) ) & esize1011 & vld4RnAligned2 & vld4DdList2 { mult_addr = vld4RnAligned2; build vld4DdList2; } :vld4.^esize1011 vld4DdList2,vld4RnAligned2^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=3 & c0003=13) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=3 & thv_c0003=13 ) ) & esize1011 & vld4RnAligned2 & vld4DdList2 { mult_addr = vld4RnAligned2; build vld4DdList2; vld4RnAligned2 = vld4RnAligned2 + (4 * esize1011); } :vld4.^esize1011 vld4DdList2,vld4RnAligned2,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c1011<3 & c0809=3 & c0003) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c1011<3 & thv_c0809=3 & thv_c0003 ) ) & esize1011 & VRm & vld4RnAligned2 & vld4DdList2 { mult_addr = vld4RnAligned2; build vld4DdList2; vld4RnAligned2 = vld4RnAligned2 + VRm; } ####### # VLD4 (single 4-element structure to all lanes) # vld4size0607: "8" is TMode=0 & c0607=0 { export 1:4; } vld4size0607: "16" is TMode=0 & c0607=1 { export 2:4; } vld4size0607: "32" is TMode=0 & c0607=2 { export 4:4; } vld4size0607: "32" is TMode=0 & c0607=3 { export 4:4; } # see VLD4 (single 4-element structure to all lanes) vld4size0607: "8" is TMode=1 & thv_c0607=0 { export 1:4; } vld4size0607: "16" is TMode=1 & thv_c0607=1 { export 2:4; } vld4size0607: "32" is TMode=1 & thv_c0607=2 { export 4:4; } vld4size0607: "32" is TMode=1 & thv_c0607=3 { export 4:4; } # see VLD4 (single 4-element structure to all lanes) vld4Align3: is TMode=0 & c0404=0 { } vld4Align3: ":32" is TMode=0 & c0404=1 & c0607=0 { } vld4Align3: ":64" is TMode=0 & c0404=1 & (c0607=1 | c0607=2) { } vld4Align3: ":128" is TMode=0 & c0404=1 & c0607=3 { } vld4Align3: is TMode=1 & thv_c0404=0 { } vld4Align3: ":32" is TMode=1 & thv_c0404=1 & thv_c0607=0 { } vld4Align3: ":64" is TMode=1 & thv_c0404=1 & (thv_c0607=1 | thv_c0607=2) { } vld4Align3: ":128" is TMode=1 & thv_c0404=1 & thv_c0607=3 { } vld4RnAligned3: "["^VRn^vld4Align3^"]" is VRn & vld4Align3 { export VRn; } vld4DdList3: "{"^buildVld234DdList3^"}" is TMode=0 & c0505=0 & D22 & c1215 & buildVld234DdList3 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=4; ] { } # Single vld4DdList3: "{"^buildVld234DdList3^"}" is TMode=0 & c0505=1 & D22 & c1215 & buildVld234DdList3 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=4; ] { } # Double vld4DdList3: "{"^buildVld234DdList3^"}" is TMode=1 & thv_c0505=0 & thv_D22 & thv_c1215 & buildVld234DdList3 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=4; ] { } # Single vld4DdList3: "{"^buildVld234DdList3^"}" is TMode=1 & thv_c0505=1 & thv_D22 & thv_c1215 & buildVld234DdList3 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double :vld4.^vld4size0607 vld4DdList3,vld4RnAligned3 is ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=0xf & c0003=0xf) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=0xf & thv_c0003=0xf) & vld4size0607 & vld4RnAligned3 & vld4DdList3 { mult_addr = vld4RnAligned3; build vld4DdList3; } :vld4.^vld4size0607 vld4DdList3,vld4RnAligned3^"!" is ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=0xf & c0003=0xd) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=0xf & thv_c0003=0xd) & vld4size0607 & vld4RnAligned3 & vld4DdList3 { mult_addr = vld4RnAligned3; build vld4DdList3; vld4RnAligned3 = vld4RnAligned3 + (4 * vld4size0607); } :vld4.^vld4size0607 vld4DdList3,vld4RnAligned3,VRm is ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=2 & c0811=0xf) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=2 & thv_c0811=0xf) & vld4size0607 & VRm & vld4RnAligned3 & vld4DdList3 { mult_addr = vld4RnAligned3; build vld4DdList3; vld4RnAligned3 = vld4RnAligned3 + VRm; } ####### # VLD4 (multiple 4-element structures) # vld4Align: is TMode=0 & c0405=0 { } vld4Align: ":64" is TMode=0 & c0405=1 { } vld4Align: ":128" is TMode=0 & c0405=2 { } vld4Align: ":256" is TMode=0 & c0405=3 { } vld4Align: is TMode=1 & thv_c0405=0 { } vld4Align: ":64" is TMode=1 & thv_c0405=1 { } vld4Align: ":128" is TMode=1 & thv_c0405=2 { } vld4Align: ":256" is TMode=1 & thv_c0405=3 { } vld4RnAligned: "["^VRn^vld4Align^"]" is VRn & vld4Align { export VRn; } vld4Dd: Dreg is (($(AMODE) & c0607=0) | ($(TMODE_F) & thv_c0607=0)) & Dreg & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); ptr3:4 = &Dreg + (regInc * 16); ptr4:4 = &Dreg + (regInc * 24); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); ptr3:4 = &Dreg - (regInc * 16); ptr4:4 = &Dreg - (regInc * 24); @endif # ENDIAN = "big" mult_dat8 = 8; *[register]:1 ptr1 = *:1 mult_addr; mult_addr = mult_addr + 1; *[register]:1 ptr2 = *:1 mult_addr; mult_addr = mult_addr + 1; *[register]:1 ptr3 = *:1 mult_addr; mult_addr = mult_addr + 1; *[register]:1 ptr4 = *:1 mult_addr; mult_addr = mult_addr + 1; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 1; ptr2 = ptr2 + 1; ptr3 = ptr3 + 1; ptr4 = ptr4 + 1; goto ; } vld4Dd: Dreg is (($(AMODE) & c0607=1) | ($(TMODE_F) & thv_c0607=1)) & Dreg & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); ptr3:4 = &Dreg + (regInc * 16); ptr4:4 = &Dreg + (regInc * 24); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); ptr3:4 = &Dreg - (regInc * 16); ptr4:4 = &Dreg - (regInc * 24); @endif # ENDIAN = "big" mult_dat8 = 4; *[register]:2 ptr1 = *:2 mult_addr; mult_addr = mult_addr + 2; *[register]:2 ptr2 = *:2 mult_addr; mult_addr = mult_addr + 2; *[register]:2 ptr3 = *:2 mult_addr; mult_addr = mult_addr + 2; *[register]:2 ptr4 = *:2 mult_addr; mult_addr = mult_addr + 2; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 2; ptr2 = ptr2 + 2; ptr3 = ptr3 + 2; ptr4 = ptr4 + 2; goto ; } vld4Dd: Dreg is (($(AMODE) & c0607=2) | ($(TMODE_F) & thv_c0607=2)) & Dreg & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); ptr3:4 = &Dreg + (regInc * 16); ptr4:4 = &Dreg + (regInc * 24); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); ptr3:4 = &Dreg - (regInc * 16); ptr4:4 = &Dreg - (regInc * 24); @endif # ENDIAN = "big" mult_dat8 = 2; *[register]:4 ptr1 = *:4 mult_addr; mult_addr = mult_addr + 4; *[register]:4 ptr2 = *:4 mult_addr; mult_addr = mult_addr + 4; *[register]:4 ptr3 = *:4 mult_addr; mult_addr = mult_addr + 4; *[register]:4 ptr4 = *:4 mult_addr; mult_addr = mult_addr + 4; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 4; ptr2 = ptr2 + 4; ptr3 = ptr3 + 4; ptr4 = ptr4 + 4; goto ; } # Have to build only once, but because Dreg depends on regNum, have to reset it back to what it was to the start buildVld4DdList: is counter=0 & vld4Dd [ regNum=regNum-4*regInc; ] { build vld4Dd; } buildVld4DdList: Dreg^buildVld4DdList is counter=1 & Dreg & buildVld4DdList [ counter=0; regNum=regNum+regInc; ] { } buildVld4DdList: Dreg,buildVld4DdList is Dreg & buildVld4DdList [ counter=counter-1; regNum=regNum+regInc; ] { } vld4DdList: "{"^buildVld4DdList^"}" is TMode=0 & c0808=0 & D22 & c1215 & buildVld4DdList [ regNum=(D22<<4)+c1215-1; regInc=1; counter=4; ] { } # Single vld4DdList: "{"^buildVld4DdList^"}" is TMode=0 & c0808=1 & D22 & c1215 & buildVld4DdList [ regNum=(D22<<4)+c1215-2; regInc=2; counter=4; ] { } # Double vld4DdList: "{"^buildVld4DdList^"}" is TMode=1 & thv_c0808=0 & thv_D22 & thv_c1215 & buildVld4DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=4; ] { } # Single vld4DdList: "{"^buildVld4DdList^"}" is TMode=1 & thv_c0808=1 & thv_D22 & thv_c1215 & buildVld4DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double :vld4.^esize0607 vld4DdList,vld4RnAligned is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0911=0 & c0607<3 & c0003=15 ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0911=0 & thv_c0607<3 & thv_c0003=15 ) ) & esize0607 & VRm & vld4RnAligned & vld4DdList { mult_addr = vld4RnAligned; build vld4DdList; } :vld4.^esize0607 vld4DdList,vld4RnAligned^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0911=0 & c0607<3 & c0003=13 ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0911=0 & thv_c0607<3 & thv_c0003=13 ) ) & esize0607 & VRm & vld4RnAligned & vld4DdList { mult_addr = vld4RnAligned; build vld4DdList; vld4RnAligned = vld4RnAligned + (8 * 4); } :vld4.^esize0607 vld4DdList,vld4RnAligned,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=2 & c0911=0 & c0607<3) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=2 & thv_c0911=0 & thv_c0607<3 ) ) & esize0607 & VRm & vld4RnAligned & vld4DdList { mult_addr = vld4RnAligned; build vld4DdList; vld4RnAligned = vld4RnAligned + VRm; } @endif # SIMD @if defined(VFPv2) || defined(VFPv3) || defined(SIMD) ####### # VLDM (A1) # vldmRn: Rn is TMode=0 & Rn & c2121=0 { export Rn; } vldmRn: Rn^"!" is TMode=0 & Rn & c2121=1 { export Rn; } vldmRn: thv_Rn is TMode=1 & thv_Rn & thv_c2121=0 { export thv_Rn; } vldmRn: thv_Rn^"!" is TMode=1 & thv_Rn & thv_c2121=1 { export thv_Rn; } vldmOffset: value is $(AMODE) & immed [ value= immed << 2; ] { export *[const]:4 value; } vldmOffset: value is TMode=1 & thv_immed [ value= thv_immed << 2; ] { export *[const]:4 value; } vldmUpdate: immed is TMode=0 & vldmRn & c2121=0 & immed { } vldmUpdate: immed is TMode=0 & vldmRn & c2121=1 & immed { vldmRn = vldmRn + (immed << 2); } vldmUpdate: thv_immed is TMode=1 & vldmRn & thv_c2121=0 & thv_immed { } vldmUpdate: thv_immed is TMode=1 & vldmRn & thv_c2121=1 & thv_immed { vldmRn = vldmRn + (thv_immed << 2); } buildVldmDdList: is counter=0 { } buildVldmDdList: Dreg is counter=1 & Dreg [ counter=0; regNum=regNum+1; ] { Dreg = *mult_addr; mult_addr = mult_addr + 8; } buildVldmDdList: Dreg,buildVldmDdList is Dreg & buildVldmDdList [ counter=counter-1; regNum=regNum+1; ] { Dreg = *mult_addr; mult_addr = mult_addr + 8; build buildVldmDdList; } vldmDdList: "{"^buildVldmDdList^"}" is TMode=0 & D22 & c1215 & c0007 & buildVldmDdList [ regNum=(D22<<4)+c1215 - 1; counter=c0007>>1; ] { } vldmDdList: "{"^buildVldmDdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVldmDdList [ regNum=(thv_D22<<4)+thv_c1215 - 1; counter=thv_c0007>>1; ] { } :vldmia^COND vldmRn,vldmDdList is ( ($(AMODE) & c2327=0x19 & c2121 & c2020=1 & c0811=11 & c0000=0) | ($(TMODE_E) & thv_c2327=0x19 & thv_c2121 & thv_c2020=1 & thv_c0811=11 & thv_c0000=0) ) & COND & vldmRn & vldmDdList & vldmOffset & vldmUpdate { mult_addr = vldmRn; build vldmDdList; build vldmUpdate; } :vldmdb^COND vldmRn,vldmDdList is ( ($(AMODE) & c2327=0x1a & c2121=1 & c2020=1 & c0811=11 & c0000=0) | ($(TMODE_E) & thv_c2327=0x1a & thv_c2121=1 & thv_c2020=1 & thv_c0811=11 & thv_c0000=0 ) ) & COND & vldmRn & vldmDdList & vldmOffset { local start_addr = vldmRn - vldmOffset; mult_addr = start_addr; build vldmDdList; vldmRn = start_addr; } @endif # VFPv2 | VFPv3 | SIMD @if defined(VERSION_8) with : TMode=0 { fldmSet1: Dd_1 is Rn & Dd_1 { Dd_1 = * Rn; } fldmSet2: Dd_2 is Rn & Dd_2 & fldmSet1 { build fldmSet1; Dd_2 = *:8 (Rn + 8:4); } fldmSet3: Dd_3 is Rn & Dd_3 & fldmSet2 { build fldmSet2; Dd_3 = *:8 (Rn + 16:4); } fldmSet4: Dd_4 is Rn & Dd_4 & fldmSet3 { build fldmSet3; Dd_4 = *:8 (Rn + 24:4); } fldmSet5: Dd_5 is Rn & Dd_5 & fldmSet4 { build fldmSet4; Dd_5 = *:8 (Rn + 32:4); } fldmSet6: Dd_6 is Rn & Dd_6 & fldmSet5 { build fldmSet5; Dd_6 = *:8 (Rn + 40:4); } fldmSet7: Dd_7 is Rn & Dd_7 & fldmSet6 { build fldmSet6; Dd_7 = *:8 (Rn + 48:4); } fldmSet8: Dd_8 is Rn & Dd_8 & fldmSet7 { build fldmSet7; Dd_8 = *:8 (Rn + 56:4); } fldmSet9: Dd_9 is Rn & Dd_9 & fldmSet8 { build fldmSet8; Dd_9 = *:8 (Rn + 64:4); } fldmSet10: Dd_10 is Rn & Dd_10 & fldmSet9 { build fldmSet9; Dd_10 = *:8 (Rn + 72:4); } fldmSet11: Dd_11 is Rn & Dd_11 & fldmSet10 { build fldmSet10; Dd_11 = *:8 (Rn + 80:4); } fldmSet12: Dd_12 is Rn & Dd_12 & fldmSet11 { build fldmSet11; Dd_12 = *:8 (Rn + 88:4); } fldmSet13: Dd_13 is Rn & Dd_13 & fldmSet12 { build fldmSet12; Dd_13 = *:8 (Rn + 96:4); } fldmSet14: Dd_14 is Rn & Dd_14 & fldmSet13 { build fldmSet13; Dd_14 = *:8 (Rn + 104:4); } fldmSet15: Dd_15 is Rn & Dd_15 & fldmSet14 { build fldmSet14; Dd_15 = *:8 (Rn + 112:4); } fldmSet16: Dd_16 is Rn & Dd_16 & fldmSet15 { build fldmSet15; Dd_16 = *:8 (Rn + 120:4); } fldmSet: "{"^Dd_1^"}" is Dd_1 & c0007=3 & fldmSet1 { build fldmSet1; } fldmSet: "{"^Dd_1^"-"^fldmSet2^"}" is Dd_1 & c0007=5 & fldmSet2 { build fldmSet2; } fldmSet: "{"^Dd_1^"-"^fldmSet3^"}" is Dd_1 & c0007=7 & fldmSet3 { build fldmSet3; } fldmSet: "{"^Dd_1^"-"^fldmSet4^"}" is Dd_1 & c0007=9 & fldmSet4 { build fldmSet4; } fldmSet: "{"^Dd_1^"-"^fldmSet5^"}" is Dd_1 & c0007=11 & fldmSet5 { build fldmSet5; } fldmSet: "{"^Dd_1^"-"^fldmSet6^"}" is Dd_1 & c0007=13 & fldmSet6 { build fldmSet6; } fldmSet: "{"^Dd_1^"-"^fldmSet7^"}" is Dd_1 & c0007=15 & fldmSet7 { build fldmSet7; } fldmSet: "{"^Dd_1^"-"^fldmSet8^"}" is Dd_1 & c0007=17 & fldmSet8 { build fldmSet8; } fldmSet: "{"^Dd_1^"-"^fldmSet9^"}" is Dd_1 & c0007=19 & fldmSet9 { build fldmSet9; } fldmSet: "{"^Dd_1^"-"^fldmSet10^"}" is Dd_1 & c0007=21 & fldmSet10 { build fldmSet10; } fldmSet: "{"^Dd_1^"-"^fldmSet11^"}" is Dd_1 & c0007=23 & fldmSet11 { build fldmSet11; } fldmSet: "{"^Dd_1^"-"^fldmSet12^"}" is Dd_1 & c0007=25 & fldmSet12 { build fldmSet12; } fldmSet: "{"^Dd_1^"-"^fldmSet13^"}" is Dd_1 & c0007=27 & fldmSet13 { build fldmSet13; } fldmSet: "{"^Dd_1^"-"^fldmSet14^"}" is Dd_1 & c0007=29 & fldmSet14 { build fldmSet14; } fldmSet: "{"^Dd_1^"-"^fldmSet15^"}" is Dd_1 & c0007=31 & fldmSet15 { build fldmSet15; } fldmSet: "{"^Dd_1^"-"^fldmSet16^"}" is Dd_1 & c0007=33 & fldmSet16 { build fldmSet16; } fldmWback: Rn^"!" is c2121=1 & c2323=1 & c0007 & Rn { Rn = Rn + (4 * c0007:4); } fldmWback: Rn^"!" is c2121=1 & c2323=0 & c0007 & Rn { Rn = Rn - (4 * c0007:4); } fldmWback: Rn is c2121=0 & Rn { } } with : TMode=1 { fldmSet1: thv_Dd_1 is thv_Rn & thv_Dd_1 { thv_Dd_1 = * thv_Rn; } fldmSet2: thv_Dd_2 is thv_Rn & thv_Dd_2 & fldmSet1 { build fldmSet1; thv_Dd_2 = *:8 (thv_Rn + 8:4); } fldmSet3: thv_Dd_3 is thv_Rn & thv_Dd_3 & fldmSet2 { build fldmSet2; thv_Dd_3 = *:8 (thv_Rn + 16:4); } fldmSet4: thv_Dd_4 is thv_Rn & thv_Dd_4 & fldmSet3 { build fldmSet3; thv_Dd_4 = *:8 (thv_Rn + 24:4); } fldmSet5: thv_Dd_5 is thv_Rn & thv_Dd_5 & fldmSet4 { build fldmSet4; thv_Dd_5 = *:8 (thv_Rn + 32:4); } fldmSet6: thv_Dd_6 is thv_Rn & thv_Dd_6 & fldmSet5 { build fldmSet5; thv_Dd_6 = *:8 (thv_Rn + 40:4); } fldmSet7: thv_Dd_7 is thv_Rn & thv_Dd_7 & fldmSet6 { build fldmSet6; thv_Dd_7 = *:8 (thv_Rn + 48:4); } fldmSet8: thv_Dd_8 is thv_Rn & thv_Dd_8 & fldmSet7 { build fldmSet7; thv_Dd_8 = *:8 (thv_Rn + 56:4); } fldmSet9: thv_Dd_9 is thv_Rn & thv_Dd_9 & fldmSet8 { build fldmSet8; thv_Dd_9 = *:8 (thv_Rn + 64:4); } fldmSet10: thv_Dd_10 is thv_Rn & thv_Dd_10 & fldmSet9 { build fldmSet9; thv_Dd_10 = *:8 (thv_Rn + 72:4); } fldmSet11: thv_Dd_11 is thv_Rn & thv_Dd_11 & fldmSet10 { build fldmSet10; thv_Dd_11 = *:8 (thv_Rn + 80:4); } fldmSet12: thv_Dd_12 is thv_Rn & thv_Dd_12 & fldmSet11 { build fldmSet11; thv_Dd_12 = *:8 (thv_Rn + 88:4); } fldmSet13: thv_Dd_13 is thv_Rn & thv_Dd_13 & fldmSet12 { build fldmSet12; thv_Dd_13 = *:8 (thv_Rn + 96:4); } fldmSet14: thv_Dd_14 is thv_Rn & thv_Dd_14 & fldmSet13 { build fldmSet13; thv_Dd_14 = *:8 (thv_Rn + 104:4); } fldmSet15: thv_Dd_15 is thv_Rn & thv_Dd_15 & fldmSet14 { build fldmSet14; thv_Dd_15 = *:8 (thv_Rn + 112:4); } fldmSet16: thv_Dd_16 is thv_Rn & thv_Dd_16 & fldmSet15 { build fldmSet15; thv_Dd_16 = *:8 (thv_Rn + 120:4); } fldmSet: "{"^thv_Dd_1^"}" is thv_Dd_1 & thv_c0007=3 & fldmSet1 { build fldmSet1; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet2^"}" is thv_Dd_1 & thv_c0007=5 & fldmSet2 { build fldmSet2; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet3^"}" is thv_Dd_1 & thv_c0007=7 & fldmSet3 { build fldmSet3; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet4^"}" is thv_Dd_1 & thv_c0007=9 & fldmSet4 { build fldmSet4; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet5^"}" is thv_Dd_1 & thv_c0007=11 & fldmSet5 { build fldmSet5; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet6^"}" is thv_Dd_1 & thv_c0007=13 & fldmSet6 { build fldmSet6; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet7^"}" is thv_Dd_1 & thv_c0007=15 & fldmSet7 { build fldmSet7; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet8^"}" is thv_Dd_1 & thv_c0007=17 & fldmSet8 { build fldmSet8; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet9^"}" is thv_Dd_1 & thv_c0007=19 & fldmSet9 { build fldmSet9; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet10^"}" is thv_Dd_1 & thv_c0007=21 & fldmSet10 { build fldmSet10; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet11^"}" is thv_Dd_1 & thv_c0007=23 & fldmSet11 { build fldmSet11; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet12^"}" is thv_Dd_1 & thv_c0007=25 & fldmSet12 { build fldmSet12; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet13^"}" is thv_Dd_1 & thv_c0007=27 & fldmSet13 { build fldmSet13; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet14^"}" is thv_Dd_1 & thv_c0007=29 & fldmSet14 { build fldmSet14; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet15^"}" is thv_Dd_1 & thv_c0007=31 & fldmSet15 { build fldmSet15; } fldmSet: "{"^thv_Dd_1^"-"^fldmSet16^"}" is thv_Dd_1 & thv_c0007=33 & fldmSet16 { build fldmSet16; } fldmWback: thv_Rn^"!" is thv_bit21=1 & thv_bit23=1 & thv_c0007 & thv_Rn { thv_Rn = thv_Rn + (4 * thv_c0007:4); } fldmWback: thv_Rn^"!" is thv_bit21=1 & thv_bit23=0 & thv_c0007 & thv_Rn { thv_Rn = thv_Rn - (4 * thv_c0007:4); } fldmWback: thv_Rn is thv_bit21=0 & thv_Rn { } } :fldmdbx^COND fldmWback, fldmSet is $(AMODE) & COND & ARMcond=1 & c2327=0x1a & c2021=3 & c0811=0xb & c0000=1 & fldmWback & fldmSet { build fldmWback; build fldmSet; } :fldmiax^COND fldmWback, fldmSet is $(AMODE) & COND & ARMcond=1 & c2327=0x19 & c2020=1 & c0811=0xb & c0000=1 & fldmWback & fldmSet { build fldmSet; build fldmWback; } :fldmdbx^ItCond fldmWback, fldmSet is TMode=1 & ItCond & thv_c2331=0x1da & thv_c2021=3 & thv_c0811=0xb & fldmWback & fldmSet { build fldmWback; build fldmSet; } :fldmiax^ItCond fldmWback, fldmSet is TMode=1 & ItCond & thv_c2331=0x1d9 & thv_bit20=1 & thv_c0811=0xb & fldmWback & fldmSet { build fldmSet; build fldmWback; } with : TMode=0 { fstmSet1: Dd_1 is Rn & Dd_1 { * Rn = Dd_1; } fstmSet2: Dd_2 is Rn & Dd_2 & fstmSet1 { build fstmSet1; *:8 (Rn + 8:4) = Dd_2; } fstmSet3: Dd_3 is Rn & Dd_3 & fstmSet2 { build fstmSet2; *:8 (Rn + 16:4) = Dd_3; } fstmSet4: Dd_4 is Rn & Dd_4 & fstmSet3 { build fstmSet3; *:8 (Rn + 24:4) = Dd_4; } fstmSet5: Dd_5 is Rn & Dd_5 & fstmSet4 { build fstmSet4; *:8 (Rn + 32:4) = Dd_5; } fstmSet6: Dd_6 is Rn & Dd_6 & fstmSet5 { build fstmSet5; *:8 (Rn + 40:4) = Dd_6; } fstmSet7: Dd_7 is Rn & Dd_7 & fstmSet6 { build fstmSet6; *:8 (Rn + 48:4) = Dd_7; } fstmSet8: Dd_8 is Rn & Dd_8 & fstmSet7 { build fstmSet7; *:8 (Rn + 56:4) = Dd_8; } fstmSet9: Dd_9 is Rn & Dd_9 & fstmSet8 { build fstmSet8; *:8 (Rn + 64:4) = Dd_9; } fstmSet10: Dd_10 is Rn & Dd_10 & fstmSet9 { build fstmSet9; *:8 (Rn + 72:4) = Dd_10; } fstmSet11: Dd_11 is Rn & Dd_11 & fstmSet10 { build fstmSet10; *:8 (Rn + 80:4) = Dd_11; } fstmSet12: Dd_12 is Rn & Dd_12 & fstmSet11 { build fstmSet11; *:8 (Rn + 88:4) = Dd_12; } fstmSet13: Dd_13 is Rn & Dd_13 & fstmSet12 { build fstmSet12; *:8 (Rn + 96:4) = Dd_13; } fstmSet14: Dd_14 is Rn & Dd_14 & fstmSet13 { build fstmSet13; *:8 (Rn + 104:4) = Dd_14; } fstmSet15: Dd_15 is Rn & Dd_15 & fstmSet14 { build fstmSet14; *:8 (Rn + 112:4) = Dd_15; } fstmSet16: Dd_16 is Rn & Dd_16 & fstmSet15 { build fstmSet15; *:8 (Rn + 120:4) = Dd_16; } fstmSet: "{"^Dd_1^"}" is Dd_1 & c0007=3 & fstmSet1 { build fstmSet1; } fstmSet: "{"^Dd_1^"-"^fstmSet2^"}" is Dd_1 & c0007=5 & fstmSet2 { build fstmSet2; } fstmSet: "{"^Dd_1^"-"^fstmSet3^"}" is Dd_1 & c0007=7 & fstmSet3 { build fstmSet3; } fstmSet: "{"^Dd_1^"-"^fstmSet4^"}" is Dd_1 & c0007=9 & fstmSet4 { build fstmSet4; } fstmSet: "{"^Dd_1^"-"^fstmSet5^"}" is Dd_1 & c0007=11 & fstmSet5 { build fstmSet5; } fstmSet: "{"^Dd_1^"-"^fstmSet6^"}" is Dd_1 & c0007=13 & fstmSet6 { build fstmSet6; } fstmSet: "{"^Dd_1^"-"^fstmSet7^"}" is Dd_1 & c0007=15 & fstmSet7 { build fstmSet7; } fstmSet: "{"^Dd_1^"-"^fstmSet8^"}" is Dd_1 & c0007=17 & fstmSet8 { build fstmSet8; } fstmSet: "{"^Dd_1^"-"^fstmSet9^"}" is Dd_1 & c0007=19 & fstmSet9 { build fstmSet9; } fstmSet: "{"^Dd_1^"-"^fstmSet10^"}" is Dd_1 & c0007=21 & fstmSet10 { build fstmSet10; } fstmSet: "{"^Dd_1^"-"^fstmSet11^"}" is Dd_1 & c0007=23 & fstmSet11 { build fstmSet11; } fstmSet: "{"^Dd_1^"-"^fstmSet12^"}" is Dd_1 & c0007=25 & fstmSet12 { build fstmSet12; } fstmSet: "{"^Dd_1^"-"^fstmSet13^"}" is Dd_1 & c0007=27 & fstmSet13 { build fstmSet13; } fstmSet: "{"^Dd_1^"-"^fstmSet14^"}" is Dd_1 & c0007=29 & fstmSet14 { build fstmSet14; } fstmSet: "{"^Dd_1^"-"^fstmSet15^"}" is Dd_1 & c0007=31 & fstmSet15 { build fstmSet15; } fstmSet: "{"^Dd_1^"-"^fstmSet16^"}" is Dd_1 & c0007=33 & fstmSet16 { build fstmSet16; } fstmWback: Rn^"!" is c2121=1 & c2323=1 & c0007 & Rn { Rn = Rn + (4 * c0007:4); } fstmWback: Rn^"!" is c2121=1 & c2323=0 & c0007 & Rn { Rn = Rn - (4 * c0007:4); } fstmWback: Rn is c2121=0 & Rn { } } with : TMode=1 { fstmSet1: thv_Dd_1 is thv_Rn & thv_Dd_1 { * thv_Rn = thv_Dd_1; } fstmSet2: thv_Dd_2 is thv_Rn & thv_Dd_2 & fstmSet1 { build fstmSet1; *:8 (thv_Rn + 8:4) = thv_Dd_2; } fstmSet3: thv_Dd_3 is thv_Rn & thv_Dd_3 & fstmSet2 { build fstmSet2; *:8 (thv_Rn + 16:4) = thv_Dd_3; } fstmSet4: thv_Dd_4 is thv_Rn & thv_Dd_4 & fstmSet3 { build fstmSet3; *:8 (thv_Rn + 24:4) = thv_Dd_4; } fstmSet5: thv_Dd_5 is thv_Rn & thv_Dd_5 & fstmSet4 { build fstmSet4; *:8 (thv_Rn + 32:4) = thv_Dd_5; } fstmSet6: thv_Dd_6 is thv_Rn & thv_Dd_6 & fstmSet5 { build fstmSet5; *:8 (thv_Rn + 40:4) = thv_Dd_6; } fstmSet7: thv_Dd_7 is thv_Rn & thv_Dd_7 & fstmSet6 { build fstmSet6; *:8 (thv_Rn + 48:4) = thv_Dd_7; } fstmSet8: thv_Dd_8 is thv_Rn & thv_Dd_8 & fstmSet7 { build fstmSet7; *:8 (thv_Rn + 56:4) = thv_Dd_8; } fstmSet9: thv_Dd_9 is thv_Rn & thv_Dd_9 & fstmSet8 { build fstmSet8; *:8 (thv_Rn + 64:4) = thv_Dd_9; } fstmSet10: thv_Dd_10 is thv_Rn & thv_Dd_10 & fstmSet9 { build fstmSet9; *:8 (thv_Rn + 72:4) = thv_Dd_10; } fstmSet11: thv_Dd_11 is thv_Rn & thv_Dd_11 & fstmSet10 { build fstmSet10; *:8 (thv_Rn + 80:4) = thv_Dd_11; } fstmSet12: thv_Dd_12 is thv_Rn & thv_Dd_12 & fstmSet11 { build fstmSet11; *:8 (thv_Rn + 88:4) = thv_Dd_12; } fstmSet13: thv_Dd_13 is thv_Rn & thv_Dd_13 & fstmSet12 { build fstmSet12; *:8 (thv_Rn + 96:4) = thv_Dd_13; } fstmSet14: thv_Dd_14 is thv_Rn & thv_Dd_14 & fstmSet13 { build fstmSet13; *:8 (thv_Rn + 104:4) = thv_Dd_14; } fstmSet15: thv_Dd_15 is thv_Rn & thv_Dd_15 & fstmSet14 { build fstmSet14; *:8 (thv_Rn + 112:4) = thv_Dd_15; } fstmSet16: thv_Dd_16 is thv_Rn & thv_Dd_16 & fstmSet15 { build fstmSet15; *:8 (thv_Rn + 120:4) = thv_Dd_16; } fstmSet: "{"^thv_Dd_1^"}" is thv_Dd_1 & thv_c0007=3 & fstmSet1 { build fstmSet1; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet2^"}" is thv_Dd_1 & thv_c0007=5 & fstmSet2 { build fstmSet2; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet3^"}" is thv_Dd_1 & thv_c0007=7 & fstmSet3 { build fstmSet3; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet4^"}" is thv_Dd_1 & thv_c0007=9 & fstmSet4 { build fstmSet4; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet5^"}" is thv_Dd_1 & thv_c0007=11 & fstmSet5 { build fstmSet5; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet6^"}" is thv_Dd_1 & thv_c0007=13 & fstmSet6 { build fstmSet6; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet7^"}" is thv_Dd_1 & thv_c0007=15 & fstmSet7 { build fstmSet7; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet8^"}" is thv_Dd_1 & thv_c0007=17 & fstmSet8 { build fstmSet8; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet9^"}" is thv_Dd_1 & thv_c0007=19 & fstmSet9 { build fstmSet9; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet10^"}" is thv_Dd_1 & thv_c0007=21 & fstmSet10 { build fstmSet10; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet11^"}" is thv_Dd_1 & thv_c0007=23 & fstmSet11 { build fstmSet11; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet12^"}" is thv_Dd_1 & thv_c0007=25 & fstmSet12 { build fstmSet12; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet13^"}" is thv_Dd_1 & thv_c0007=27 & fstmSet13 { build fstmSet13; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet14^"}" is thv_Dd_1 & thv_c0007=29 & fstmSet14 { build fstmSet14; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet15^"}" is thv_Dd_1 & thv_c0007=31 & fstmSet15 { build fstmSet15; } fstmSet: "{"^thv_Dd_1^"-"^fstmSet16^"}" is thv_Dd_1 & thv_c0007=33 & fstmSet16 { build fstmSet16; } fstmWback: thv_Rn^"!" is thv_bit21=1 & thv_bit23=1 & thv_c0007 & thv_Rn { thv_Rn = thv_Rn + (4 * thv_c0007:4); } fstmWback: thv_Rn^"!" is thv_bit21=1 & thv_bit23=0 & thv_c0007 & thv_Rn { thv_Rn = thv_Rn - (4 * thv_c0007:4); } fstmWback: thv_Rn is thv_bit21=0 & thv_Rn { } } :fstmdbx^COND fstmSet, fstmWback is $(AMODE) & COND & ARMcond=1 & c2327=0x1a & c2021=2 & c0811=0xb & c0000=1 & fstmWback & fstmSet { build fstmWback; build fstmSet; } :fstmiax^COND fstmSet, fstmWback is $(AMODE) & COND & ARMcond=1 & c2327=0x19 & c2020=0 & c0811=0xb & c0000=1 & fstmWback & fstmSet { build fstmSet; build fstmWback; } :fstmdbx^ItCond fstmSet, fstmWback is TMode=1 & ItCond & thv_c2331=0x1da & thv_c2021=2 & thv_c0811=0xb & fstmWback & fstmSet { build fstmWback; build fstmSet; } :fstmiax^ItCond fstmSet, fstmWback is TMode=1 & ItCond & thv_c2331=0x1d9 & thv_bit20=0 & thv_c0811=0xb & fstmWback & fstmSet { build fstmSet; build fstmWback; } @endif @if defined(VFPv2) || defined(VFPv3) ####### # VLDM (A2) # buildVldmSdList: is counter=0 { } buildVldmSdList: Sreg is counter=1 & Sreg [ counter=0; regNum=regNum+1; ] { Sreg = *mult_addr; mult_addr = mult_addr + 4; } buildVldmSdList: Sreg,buildVldmSdList is Sreg & buildVldmSdList [ counter=counter-1; regNum=regNum+1; ] { Sreg = *mult_addr; mult_addr = mult_addr + 4; build buildVldmSdList; } vldmSdList: "{"^buildVldmSdList^"}" is TMode=0 & D22 & c1215 & c0007 & buildVldmSdList [ regNum=(c1215<<1) + D22 - 1; counter=c0007; ] { } vldmSdList: "{"^buildVldmSdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVldmSdList [ regNum=(thv_c1215<<1) + thv_D22 - 1; counter=thv_c0007; ] { } :vldmia^COND vldmRn,vldmSdList is ( ($(AMODE) & ARMcond=1 & c2327=0x19 & c2020=1 & c0811=10 ) | ($(TMODE_E) & thv_c2327=0x19 & thv_c2020=1 & thv_c0811=10 ) ) & COND & vldmRn & vldmSdList & vldmOffset & vldmUpdate { mult_addr = vldmRn; build vldmSdList; build vldmUpdate; } :vldmdb^COND vldmRn,vldmSdList is ( ($(AMODE) & ARMcond=1 & c2327=0x1a & c2121=1 & c2020=1 & c0811=10 ) | ($(TMODE_E) & thv_c2327=0x1a & thv_c2121=1 & thv_c2020=1 & thv_c0811=10 ) ) & COND & vldmRn & vldmSdList & vldmOffset { local start_addr = vldmRn - vldmOffset; mult_addr = start_addr; build vldmSdList; vldmRn = start_addr; } ####### # VLDR # vldrRn: "["^Rn^"]" is TMode=0 & Rn & immed=0 & c2323=0 { ptr:4 = Rn; export ptr; } vldrRn: "["^Rn^"]" is TMode=0 & Rn & immed=0 & c2323=1 { ptr:4 = Rn; export ptr; } vldrRn: "["^Rn^",#-"^vldrImm^"]" is TMode=0 & Rn & immed & c2323=0 [ vldrImm = immed * 4; ] { ptr:4 = Rn - vldrImm; export ptr; } vldrRn: "["^Rn^",#"^vldrImm^"]" is TMode=0 & Rn & immed & c2323=1 [ vldrImm = immed * 4; ] { ptr:4 = Rn + vldrImm; export ptr; } vldrRn: "["^pc^"]" is TMode=0 & Rn=15 & pc & immed=0 & c2323=0 { ptr:4 = ((inst_start + 8) & 0xfffffffc); export ptr; } vldrRn: "["^pc^"]" is TMode=0 & Rn=15 & pc & immed=0 & c2323=1 { ptr:4 = ((inst_start + 8) & 0xfffffffc); export ptr; } vldrRn: "["^pc^",#-"^vldrImm^"]" is TMode=0 & Rn=15 & pc & immed & c2323=0 [ vldrImm = immed * 4; ] { ptr:4 = ((inst_start + 8) & 0xfffffffc) - vldrImm; export ptr; } vldrRn: "["^pc^",#"^vldrImm^"]" is TMode=0 & Rn=15 & pc & immed & c2323=1 [ vldrImm = immed * 4; ] { ptr:4 = ((inst_start + 8) & 0xfffffffc) + vldrImm; export ptr; } vldrRn: "["^VRn^"]" is TMode=1 & VRn & thv_immed=0 & thv_c2323=0 { ptr:4 = VRn; export ptr; } vldrRn: "["^VRn^"]" is TMode=1 & VRn & thv_immed=0 & thv_c2323=1 { ptr:4 = VRn; export ptr; } vldrRn: "["^VRn^",#-"^vldrImm^"]" is TMode=1 & VRn & thv_immed & thv_c2323=0 [ vldrImm = thv_immed * 4; ] { ptr:4 = VRn - vldrImm; export ptr; } vldrRn: "["^VRn^",#"^vldrImm^"]" is TMode=1 & VRn & thv_immed & thv_c2323=1 [ vldrImm = thv_immed * 4; ] { ptr:4 = VRn + vldrImm; export ptr; } vldrRn: "["^pc^"]" is TMode=1 & thv_Rn=15 & pc & thv_immed=0 & thv_c2323=0 { ptr:4 = ((inst_start + 4) & 0xfffffffc); export ptr; } vldrRn: "["^pc^"]" is TMode=1 & thv_Rn=15 & pc & thv_immed=0 & thv_c2323=1 { ptr:4 = ((inst_start + 4) & 0xfffffffc); export ptr; } vldrRn: "["^pc^",#-"^vldrImm^"]" is TMode=1 & thv_Rn=15 & pc & thv_immed & thv_c2323=0 [ vldrImm = thv_immed * 4; ] { ptr:4 = ((inst_start + 4) & 0xfffffffc) - vldrImm; export ptr; } vldrRn: "["^pc^",#"^vldrImm^"]" is TMode=1 & thv_Rn=15 & pc & thv_immed & thv_c2323=1 [ vldrImm = thv_immed * 4; ] { ptr:4 = ((inst_start + 4) & 0xfffffffc) + vldrImm; export ptr; } :vldr^COND^".64" Dd,vldrRn is COND & ( ($(AMODE) & ARMcond=1 & c2427=13 & c2021=1 & c0811=11) | ($(TMODE_E) & thv_c2427=13 & thv_c2021=1 & thv_c0811=11)) & Dd & vldrRn { Dd = *:8 vldrRn; } :vldr^COND^".32" Sd,vldrRn is COND & ( ($(AMODE) & ARMcond=1 & c2427=13 & c2021=1 & c0811=10) | ($(TMODE_E) & thv_c2427=13 & thv_c2021=1 & thv_c0811=10)) & Sd & vldrRn { Sd = *:4 vldrRn; } @endif # VFPv2 | VFPv3 define pcodeop VectorMin; define pcodeop VectorMax; define pcodeop FloatVectorMin; define pcodeop FloatVectorMax; define pcodeop VectorMultiplyAccumulate; define pcodeop VectorMultiplySubtract; define pcodeop VectorMultiplySubtractLong; define pcodeop VectorDoubleMultiplyHighHalf; define pcodeop VectorRoundDoubleMultiplyHighHalf; define pcodeop VectorDoubleMultiplyLong; define pcodeop VectorDoubleMultiplyAccumulateLong; define pcodeop VectorDoubleMultiplySubtractLong; define pcodeop FloatVectorMultiplyAccumulate; define pcodeop FloatVectorMultiplySubtract; @if defined(SIMD) :vmax.^udt^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=0 & c0404=0 ) | ( $(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c2021<3 & thv_c0811=6 & thv_Q6=0 & thv_c0404=0 ) ) & esize2021 & udt & Dm & Dn & Dd { Dd = VectorMax(Dn,Dm,esize2021,udt); } :vmax.^udt^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=1 & c0404=0 ) | ( $(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c2021<3 & thv_c0811=6 & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & udt & Qm & Qn & Qd { Qd = VectorMax(Qn,Qm,esize2021,udt); } :vmax.f32 Dd,Dn,Dm is (($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=0 & c0811=15 & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0)) & Dm & Dn & Dd { Dd = FloatVectorMax(Dn,Dm,2:4,32:1); } :vmax.f32 Qd,Qn,Qm is (($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=0 & c0811=15 & Q6=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=15 & thv_Q6=1 & thv_c0404=0)) & Qm & Qn & Qd { Qd = FloatVectorMax(Qn,Qm,2:4,32:1); } :vmin.^udt^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=0 & c0404=1 ) | ( $(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c2021<3 & thv_c0811=6 & thv_Q6=0 & thv_c0404=1 ) ) & esize2021 & udt & Dm & Dn & Dd { Dd = VectorMin(Dn,Dm,esize2021,udt); } :vmin.^udt^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c2021<3 & c0811=6 & Q6=1 & c0404=1 ) | ( $(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c2021<3 & thv_c0811=6 & thv_Q6=1 & thv_c0404=1 ) ) & esize2021 & udt & Qm & Qn & Qd { Qd = VectorMin(Qn,Qm,esize2021,udt); } :vmin.f32 Dd,Dn,Dm is (($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=2 & c0811=15 & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=15 & thv_Q6=0 & thv_c0404=0)) & Dm & Dn & Dd { Dd = FloatVectorMin(Dn,Dm,2:4,32:1); } :vmin.f32 Qd,Qn,Qm is (($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=2 & c0811=15 & Q6=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=15 & thv_Q6=1 & thv_c0404=0)) & Qm & Qn & Qd { Qd = FloatVectorMin(Qn,Qm,2:4,32:1); } :vmla.i^esize2021 Dd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021<3 & c0811=9 & Q6=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=9 & thv_Q6=0 & thv_c0404=0)) & esize2021 & Dm & Dn & Dd { Dd = VectorMultiplyAccumulate(Dn,Dm,esize2021,0:1); } :vmla.i^esize2021 Qd,Qn,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021<3 & c0811=9 & Q6=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=9 & thv_Q6=1 & thv_c0404=0)) & esize2021 & Qm & Qn & Qd { Qd = VectorMultiplyAccumulate(Qn,Qm,esize2021,0:1); } :vmls.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2424=1 & c2021<3 & c0811=9 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=9 & thv_Q6=0 & thv_c0404=0)) & esize2021 & Dm & Dn & Dd { Dd = VectorMultiplySubtract(Dn,Dm,esize2021,0:1); } :vmls.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2424=1 & c2021<3 & c0811=9 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021<3 & thv_c0811=9 & thv_Q6=1 & thv_c0404=0)) & esize2021 & Qm & Qn & Qd { Qd = VectorMultiplySubtract(Qn,Qm,esize2021,0:1); } :vmlal.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=8 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=8 & thv_Q6=0 & thv_c0404=0 ) ) & Dm & Dn & Qd & udt & esize2021 { Qd = VectorMultiplyAccumulate(Dn,Dm,esize2021,udt); } :vmlsl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=10 & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=10 & thv_Q6=0 & thv_c0404=0 ) ) & Dm & Dn & Qd & udt & esize2021 { Qd = VectorMultiplySubtractLong(Dn,Dm,esize2021,udt); } :vmla.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_c0606=0 & thv_c0404=1)) & fesize2020 & Dn & Dd & Dm { Dd = FloatVectorMultiplyAccumulate(Dn,Dm,fesize2020,8:1); } :vmla.f^fesize2020 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=0 & c0811=13 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=0 & thv_c0811=13 & thv_c0606=1 & thv_c0404=1)) & fesize2020 & Qn & Qd & Qm { Qd = FloatVectorMultiplyAccumulate(Qn,Qm,fesize2020,16:1); } :vmls.f^fesize2020 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_c0606=0 & thv_c0404=1)) & fesize2020 & Dn & Dd & Dm { Dd = FloatVectorMultiplySubtract(Dn,Dm,fesize2020,8:1); } :vmls.f^fesize2020 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_c0606=1 & thv_c0404=1)) & fesize2020 & Qn & Qd & Qm { Qd = FloatVectorMultiplySubtract(Qn,Qm,fesize2020,16:1); } @endif # SIMD @if defined(VFPv2) || defined(VFPv3) :vmla^COND^".f32" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=0 & c0811=10 & c0606=0 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=10 & thv_c0606=0 & thv_c0404=0)) & COND & Sm & Sn & Sd { Sd = Sd f+ (Sn f* Sm); } :vmla^COND^".f64" Dd,Dn,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=0 & c0811=11 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=11 & thv_c0606=0 & thv_c0404=0)) & COND & Dm & Dn & Dd { Dd = Dd f+ (Dn f* Dm); } :vmls^COND^".f32" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=0 & c0811=10 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=10 & thv_c0606=1 & thv_c0404=0)) & COND & Sm & Sn & Sd { Sd = Sd f- (Sn f* Sm); } :vmls^COND^".f64" Dd,Dn,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=0 & c0811=11 & c0606=1 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=0 & thv_c0811=11 & thv_c0606=1 & thv_c0404=0)) & COND & Dm & Dn & Dd { Dd = Dd f- (Dn f* Dm); } @endif # VFPv2 || VFPv3 @if defined(SIMD) ##### # VML* (by scalar) (A1) # vmlDm: Dm_3^"["^index^"]" is TMode=0 & c2021=1 & Dm_3 & M5 & c0303 [ index = (M5 << 1) + c0303; ] { el:4 = VectorGetElement(Dm_3, index:1, 2:1, 0:1); export el; } vmlDm: Dm_4^"["^M5^"]" is TMode=0 & c2021=2 & Dm_4 & M5 { el:4 = VectorGetElement(Dm_4, M5:1, 4:1, 0:1); export el; } vmlDm: thv_Dm_3^"["^index^"]" is TMode=1 & thv_c2021=1 & thv_Dm_3 & thv_M5 & thv_c0303 [ index = (thv_M5 << 1) + thv_c0303; ] { el:4 = VectorGetElement(thv_Dm_3, index:1, 2:1, 0:1); export el; } vmlDm: thv_Dm_4^"["^thv_M5^"]" is TMode=1 & thv_c2021=2 & thv_Dm_4 & thv_M5 { el:4 = VectorGetElement(thv_Dm_4, thv_M5:1, 4:1, 0:1); export el; } :vmla.i^esize2021 Dd,Dn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2) & c0811=0 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0 & thv_c0606=1 & thv_c0404=0)) & esize2021 & Dn & Dd & vmlDm { Dd = VectorMultiplyAccumulate(Dn,vmlDm,esize2021); } :vmla.i^esize2021 Qd,Qn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=0 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0 & thv_c0606=1 & thv_c0404=0)) & esize2021 & Qn & Qd & vmlDm { Qd = VectorMultiplyAccumulate(Qn,vmlDm,esize2021); } :vmla.f32 Dd,Dn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2021=2 & c0811=1 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=1 & thv_c0606=1 & thv_c0404=0)) & Dn & Dd & vmlDm { Dd = FloatVectorMultiplyAccumulate(Dn,vmlDm,2:4,32:1); } :vmla.f32 Qd,Qn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2021=2 & c0811=1 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=1 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & vmlDm { Qd = FloatVectorMultiplyAccumulate(Qn,vmlDm,2:4,32:1); } :vmls.i^esize2021 Dd,Dn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2) & c0811=4 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=4 & thv_c0606=1 & thv_c0404=0)) & esize2021 & Dn & Dd & vmlDm { Dd = VectorMultiplySubtract(Dn,vmlDm,esize2021); } :vmls.i^esize2021 Qd,Qn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2)& c0811=4 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=4 & thv_c0606=1 & thv_c0404=0)) & esize2021 & Qn & Qd & vmlDm { Qd = VectorMultiplySubtract(Qn,vmlDm,esize2021); } :vmls.f32 Dd,Dn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & c2021=2 & c0811=5 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=5 & thv_c0606=1 & thv_c0404=0)) & Dn & Dd & vmlDm { Dd = FloatVectorMultiplySubtract(Dn,vmlDm,2:4,32:1); } :vmls.f32 Qd,Qn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c2021=2 & c0811=5 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=5 & thv_c0606=1 & thv_c0404=0)) & Qn & Qd & vmlDm { Qd = FloatVectorMultiplySubtract(Qn,vmlDm,2:4,32:1); } ##### # VML* (by scalar) (A2) # :vmlal.^udt^esize2021 Qd,Dn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=2 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=2 & thv_Q6=1 & thv_c0404=0 ) ) & udt & esize2021 & Dn & Qd & vmlDm { Qd = VectorMultiplyAccumulate(Dn,vmlDm,esize2021,udt); } :vmlsl.^udt^esize2021 Qd,Dn,vmlDm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=6 & Q6=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=6 & thv_Q6=1 & thv_c0404=0 ) ) & udt & esize2021 & Dn & Qd & vmlDm { Qd = VectorMultiplySubtract(Dn,vmlDm,esize2021,udt); } # Addresses all versions of F6.1.134 except A2/T2 with Q=0 :vmov.^simdExpImmDT Dd,simdExpImm_8 is (( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0707=0 & Q6=0 & c0404=1 ) | ( $(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c0707=0 & thv_Q6=0 & thv_c0404=1 )) & Dd & simdExpImmDT & simdExpImm_8 { Dd = simdExpImm_8; } # Addresses all versions of F6.1.134 except At/T2 with Q=1 :vmov.^simdExpImmDT Qd,simdExpImm_16 is (( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0707=0 & Q6=1 & c0404=1 ) | ( $(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c0707=0 & thv_Q6=1 & thv_c0404=1 )) & Qd & simdExpImmDT & simdExpImm_16 { Qd = simdExpImm_16; } @endif # SIMD @if defined(VFPv3) # F6.1.134 vmov A2/T2 :vmov^COND^".f16" Sd,vfpExpImm_4 is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c0411=0x90 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c0411=0x90 ) ) & COND & Sd & vfpExpImm_4 { build COND; Sd = vfpExpImm_4; } :vmov^COND^".f32" Sd,vfpExpImm_4 is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c0411=0xa0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c0411=0xa0 ) ) & COND & Sd & vfpExpImm_4 { build COND; Sd = vfpExpImm_4; } # F6.1.134 vmov A2/T2 :vmov^COND^".f64" Dd,vfpExpImm_8 is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c0411=0xb0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c0411=0xb0 ) ) & COND & Dd & vfpExpImm_8 { build COND; Dd = vfpExpImm_8; } @endif # VFPv3 @if defined(SIMD) :vmov Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=2 & c1619=c0003 & c0811=1 & c0707=c0505 & Q6=0 & c0404=1 ) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c1619=thv_c0003 & thv_c0811=1 & thv_c0707=thv_c0505 & thv_c0606=0 & thv_c0404=1) ) & Dd & Dm { Dd = Dm; } :vmov Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=2 & c1619=c0003 & c0811=1 & c0707=c0505 & Q6=1 & c0404=1 ) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c1619=thv_c0003 & thv_c0811=1 & thv_c0707=thv_c0505 & thv_c0606=1 & thv_c0404=1) ) & Qd & Qm { Qd = Qm; } @endif # SIMD @if defined(VFPv2) || defined(VFPv3) :vmov^COND^".f32" Sd,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x30 & c0611=0x29 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x30 & thv_c0611=0x29 & thv_c0404=0) ) & COND & Sd & Sm { Sd = Sm; } :vmov^COND^".f64" Dd,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x30 & c0611=0x2d & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x30 & thv_c0611=0x2d & thv_c0404=0) ) & COND & Dd & Dm { Dd = Dm; } @endif # VFPv2 || VFPv3 define pcodeop VectorSetElement; @if defined(SIMD) vmovIndex: val is TMode=0 & c2222=1 & c2121 & c0506 [ val = (c2121 << 2) + c0506; ] { tmp:1 = val; export tmp; } vmovIndex: val is TMode=0 & c2222=0 & c2121 & c0606 & c0505=1 [ val = (c2121 << 1) + c0606; ] { tmp:1 = val; export tmp; } vmovIndex: val is TMode=1 & thv_c2222=1 & thv_c2121 & thv_c0506 [ val = (thv_c2121 << 2) + thv_c0506; ] { tmp:1 = val; export tmp; } vmovIndex: val is TMode=1 & thv_c2222=0 & thv_c2121 & thv_c0606 & thv_c0505=1 [ val = (thv_c2121 << 1) + thv_c0606; ] { tmp:1 = val; export tmp; } @if defined(VFPv2) || defined(VFPv3) || defined(SIMD) vmovIndex: c2121 is TMode=0 & c2222=0 & c2121 & c0506=0 { tmp:1 = c2121; export tmp; } vmovIndex: thv_c2121 is TMode=1 & thv_c2222=0 & thv_c2121 & thv_c0506=0 { tmp:1 = thv_c2121; export tmp; } @endif # VFPv2 || VFPv3 || SIMD dNvmovIndex: Dn^"["^vmovIndex^"]" is Dn & vmovIndex { } :vmov^COND^".8" dNvmovIndex,VRd is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2222=1 & c2020=0 & c0811=11 & c0404=1 & c0003=0 ) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2222=1 & thv_c2020=0 & thv_c0811=11 & thv_c0404=1 & thv_c0003=0 ) ) & COND & Dn & VRd & vmovIndex & dNvmovIndex { el:1 = VRd(0); vmask:8 = 0xf << (vmovIndex*8); Dn = (Dn & ~vmask) | (zext(el) & vmask); #VectorSetElement(VRd,Dn,vmovIndex); } :vmov^COND^".16" dNvmovIndex,VRd is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2222=0 & c2020=0 & c0811=11 & c0505=1 & c0404=1 & c0003=0 ) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2222=0 & thv_c2020=0 & thv_c0811=11 & thv_c0505=1 & thv_c0404=1 & thv_c0003=0 ) ) & COND & Dn & VRd & vmovIndex & dNvmovIndex { el:2 = VRd(0); vmask:8 = 0xff << (vmovIndex*16); Dn = (Dn & ~vmask) | (zext(el) & vmask); #VectorSetElement(VRd,Dn,vmovIndex,vmovSize); } :vmov^COND^".32" dNvmovIndex,VRd is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2222=0 & c2020=0 & c0811=11 & c0506=0 & c0404=1 & c0003=0 ) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2222=0 & thv_c2020=0 & thv_c0811=11 & thv_c0506=0 & thv_c0404=1 & thv_c0003=0 ) ) & COND & Dn & VRd & vmovIndex & dNvmovIndex { el:4 = VRd; vmask:8 = 0xffff << (vmovIndex*32); Dn = (Dn & ~vmask) | (zext(el) & vmask); #VectorSetElement(VRd,Dn,vmovIndex,vmovSize); } :vmov^COND^".u8" VRd,dNvmovIndex is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c2222=1 & c2020=1 & c0811=11 & c0404=1 & c0003=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2222=1 & thv_c2020=1 & thv_c0811=11 & thv_c0404=1 & thv_c0003=0 ) ) & COND & Dn & VRd & vmovIndex & dNvmovIndex { val:8 = Dn >> (vmovIndex*8); result:1 = val(0); VRd = zext(result); #VRd = VectorGetElement(Dn,vmovIndex,vmovSize,0:1); } :vmov^COND^".u16" VRd,dNvmovIndex is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c2222=0 & c2020=1 & c0811=11 & c0505=1 & c0404=1 & c0003=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2222=0 & thv_c2020=1 & thv_c0811=11 & thv_c0505=1 & thv_c0404=1 & thv_c0003=0 ) ) & COND & Dn & VRd & vmovIndex & dNvmovIndex { val:8 = Dn >> (vmovIndex*16); result:2 = val(0); VRd = zext(result); #VRd = VectorGetElement(Dn,vmovIndex,vmovSize,0:1); } :vmov^COND^".u32" VRd,dNvmovIndex is ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c2222=0 & c2020=1 & c0811=11 & c0506=0 & c0404=1 & c0003=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2222=0 & thv_c2020=1 & thv_c0811=11 & thv_c0506=0 & thv_c0404=1 & thv_c0003=0 ) ) & COND & Dn & VRd & vmovIndex & dNvmovIndex { val:8 = Dn >> (vmovIndex*32); result:4 = val(0); VRd = zext(result); #VRd = VectorGetElement(Dn,vmovIndex,vmovSize,0:1); } :vmov^COND^".s8" VRd,dNvmovIndex is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2222=1 & c2020=1 & c0811=11 & c0404=1 & c0003=0 ) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2222=1 & thv_c2020=1 & thv_c0811=11 & thv_c0404=1 & thv_c0003=0 ) ) & COND & Dn & VRd & vmovIndex & dNvmovIndex { val:8 = Dn >> (vmovIndex*8); result:1 = val(0); VRd = sext(result); #VRd = VectorGetElement(Dn,vmovIndex,vmovSize,0:1); } :vmov^COND^".s16" VRd,dNvmovIndex is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2222=0 & c2020=1 & c0811=11 & c0505=1 & c0404=1 & c0003=0 ) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2222=0 & thv_c2020=1 & thv_c0811=11 & thv_c0505=1 & thv_c0404=1 & thv_c0003=0 ) ) & COND & Dn & VRd & vmovIndex & dNvmovIndex { val:8 = Dn >> (vmovIndex*16); result:2 = val(0); VRd = sext(result); #VRd = VectorGetElement(Dn,vmovIndex,vmovSize,0:1); } :vmov^COND^".s32" VRd,dNvmovIndex is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2222=0 & c2020=1 & c0811=11 & c0506=0 & c0404=1 & c0003=0 ) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2222=0 & thv_c2020=1 & thv_c0811=11 & thv_c0506=0 & thv_c0404=1 & thv_c0003=0 ) ) & COND & Dn & VRd & vmovIndex & dNvmovIndex { val:8 = Dn >> (vmovIndex*32); result:4 = val(0); VRd = sext(result); #VRd = VectorGetElement(Dn,vmovIndex,vmovSize,0:1); } @endif # SIMD @if defined(VFPv2) || defined(VFPv3) :vmov^COND Sn,VRd is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2122=0 & c2020=0 & c0811=10 & c0006=0x10) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2122=0 & thv_c2020=0 & thv_c0811=10 & thv_c0006=0x10) ) & COND & Sn & VRd { Sn = VRd; } :vmov^COND VRd,Sn is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2122=0 & c2020=1 & c0811=10 & c0006=0x10) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2122=0 & thv_c2020=1 & thv_c0811=10 & thv_c0006=0x10) ) & COND & Sn & VRd { VRd = Sn; } :vmov^COND Sm,SmNext,VRd,VRn is ( ($(AMODE) & ARMcond=1 & c2027=0xc4 & c0611=0x28 & c0404=1) | ($(TMODE_E) & thv_c2027=0xc4 & thv_c0611=0x28 & thv_c0404=1) ) & COND & VRn & VRd & Sm & SmNext { Sm = VRd; SmNext = VRn; } :vmov^COND VRd,VRn,Sm,SmNext is ( ($(AMODE) & ARMcond=1 & c2027=0xc5 & c0611=0x28 & c0404=1) | ($(TMODE_E) & thv_c2027=0xc5 & thv_c0611=0x28 & thv_c0404=1) ) & COND & VRn & VRd & Sm & SmNext { VRd = Sm; VRn = SmNext; } @endif # VFPv2 || VFPv3 @if defined(VFPv2) || defined(VFPv3) || defined(SIMD) :vmov^COND Dm,VRd,VRn is COND & ( ($(AMODE) & ARMcond=1 & c2027=0xc4 & c0611=0x2c & c0404=1) | ($(TMODE_E) & thv_c2027=0xc4 & thv_c0611=0x2c & thv_c0404=1) ) & Dm & VRn & VRd { Dm = (zext(VRn) << 32) + zext(VRd); } :vmov^COND VRd,VRn,Dm is COND & ( ($(AMODE) & ARMcond=1 & c2027=0xc5 & c0611=0x2c & c0404=1) | ($(TMODE_E) & thv_c2027=0xc5 & thv_c0611=0x2c & thv_c0404=1) ) & Dm & VRn & VRd { VRn = Dm(4); VRd = Dm:4; } @endif # VFPv2 || VFPv3 || SIMD define pcodeop VectorCopyLong; define pcodeop VectorCopyNarrow; @if defined(SIMD) :vmovl.^udt^esize2021 Qd,Dm is (($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & (c1921=1 | c1921=2 | c1921=4) & c1618=0 & c0611=0x28 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c1921=1 | thv_c1921=2 | thv_c1921=4) & thv_c1618=0 & thv_c0611=0x28 & thv_c0404=1) ) & esize2021 & udt & Qd & Dm { Qd = VectorCopyLong(Dm,esize2021,udt); } :vmovn.i^esize1819x2 Dd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0611=8 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0611=8 & thv_c0404=0) ) & esize1819x2 & Dd & Qm { Dd = VectorCopyNarrow(Qm,esize1819x2); } :vmovx.F16 Sd,Sm is (($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c1921=0x6 & c1618=0 & c0611=0x29 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1d & thv_c1921=0x6 & thv_c1618=0 & thv_c0611=0x29 & thv_c0404=0) ) & Sd & Sm { local SmUpper:2 = Sm(2); Sd = zext(SmUpper); } @endif # SIMD @if defined(VFPv2) || defined(VFPv3) || defined(SIMD) vmrsReg: fpsid is (($(AMODE) & c1619=0) | (TMode=1 & thv_c1619=0)) & fpsid { export fpsid; } vmrsReg: fpscr is (($(AMODE) & c1619=1) | (TMode=1 & thv_c1619=1)) & fpscr { export fpscr; } vmrsReg: mvfr2 is (($(AMODE) & c1619=5) | (TMode=1 & thv_c1619=5)) & mvfr2 { export mvfr2; } vmrsReg: mvfr1 is (($(AMODE) & c1619=6) | (TMode=1 & thv_c1619=6)) & mvfr1 { export mvfr1; } vmrsReg: mvfr0 is (($(AMODE) & c1619=7) | (TMode=1 & thv_c1619=7)) & mvfr0 { export mvfr0; } vmrsReg: fpexc is (($(AMODE) & c1619=8) | (TMode=1 & thv_c1619=8)) & fpexc { export fpexc; } vmrsReg: fpinst is (($(AMODE) & c1619=9) | (TMode=1 & thv_c1619=9)) & fpinst { export mvfr1; } vmrsReg: fpinst2 is (($(AMODE) & c1619=0xa) | (TMode=1 & thv_c1619=0xa)) & fpinst2 { export mvfr0; } :vmrs^COND VRd,vmrsReg is COND & ( ($(AMODE) & ARMcond=1 & c2027=0xef & c0011=0xa10) | ($(TMODE_E) & thv_c2027=0xef & thv_c0011=0xa10)) & vmrsReg & VRd { VRd = vmrsReg; } apsr: "apsr" is epsilon {} :vmrs^COND apsr,fpscr is ( ($(AMODE) & ARMcond=1 & c1627=0xef1 & c1215=15 & c0011=0xa10) | ($(TMODE_E) & thv_c1627=0xef1 & thv_c1215=15 & thv_c0011=0xa10) ) & COND & apsr & fpscr { NG = $(FPSCR_N); ZR = $(FPSCR_Z); CY = $(FPSCR_C); OV = $(FPSCR_V); } :vmsr^COND vmrsReg,VRd is ( ($(AMODE) & ARMcond=1 & c2027=0xee & c0011=0xa10) | ($(TMODE_E) & thv_c2027=0xee & thv_c0011=0xa10) ) & COND & VRd & vmrsReg { vmrsReg = VRd; } @endif # VFPv2 || VFPv3 || SIMD @if defined(SIMD) ### # VMUL (floating Point) # define pcodeop FloatVectorMult; define pcodeop VectorMultiply; define pcodeop PolynomialMultiply; :vmul.f32 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x06 & c2121=0 & c2020=0 & c0811=0xd & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c2020=0 & thv_c0811=0xd & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm { Dd = FloatVectorMult(Dn,Dm,2:1,32:1); } :vmul.f32 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x06 & c2121=0 & c2020=0 & c0811=0xd & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c2020=0 & thv_c0811=0xd & thv_Q6=1 & thv_c0404=1) ) & Qm & Qn & Qd { Qd = FloatVectorMult(Qn,Qm,2:1,32:1); } :vmul.f16 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x06 & c2121=0 & c2020=1 & c0811=13 & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c2020=1 & thv_c0811=13 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm { Dd = FloatVectorMult(Dn,Dm,4:1,16:1); } :vmul.f16 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x06 & c2121=0 & c2020=1 & c0811=13 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2121=0 & thv_c2020=1 & thv_c0811=13 & thv_Q6=1 & thv_c0404=1) ) & Qm & Qn & Qd { Qd = FloatVectorMult(Qn,Qm,4:1,16:1); } @endif # SIMD @if defined(VFPv2) || defined(VFPv3) || defined(SIMD) :vmul^COND^".f64" Dd,Dn,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=11 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=11 & thv_c0606=0 & thv_c0404=0) ) & COND & Dm & Dn & Dd { Dd = Dn f* Dm; } :vmul^COND^".f32" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=10 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=10 & thv_c0606=0 & thv_c0404=0) ) & COND & Sm & Sn & Sd { Sd = Sn f* Sm; } @endif # VFPv2 || VFPv3 || SIMD @if defined(SIMD) :vmul^COND^".f16" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=9 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=9 & thv_c0606=0 & thv_c0404=0) ) & COND & Sm & Sn & Sd { product:2 = Sn:2 f* Sm:2; Sd = zext(product); } ### # VMUL (Integer and polynomial) # :vmul.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=9 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=9 & thv_Q6=0 & thv_c0404=1)) & esize2021 & Dn & Dd & Dm { Dd = VectorMultiply(Dn,Dm,esize2021); } :vmul.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=9 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=9 & thv_Q6=1 & thv_c0404=1)) & esize2021 & Qm & Qn & Qd { Qd = VectorMultiply(Qn,Qm,esize2021); } :vmul.p8 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=0 & c0811=9 & Q6=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=9 & thv_Q6=0 & thv_c0404=1) ) & Dn & Dd & Dm { Dd = PolynomialMultiply(Dn,Dm,1:1); } :vmul.p8 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c2021=0 & c0811=9 & Q6=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=9 & thv_Q6=1 & thv_c0404=1) ) & Qm & Qn & Qd { Qd = PolynomialMultiply(Qn,Qm,1:1); } :vmull.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=0xc & Q6=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=0xc & thv_Q6=0 & thv_c0404=0) ) & esize2021 & Dm & Dn & Qd & udt { Qd = VectorMultiply(Dn,Dm,esize2021,udt); } :vmull.p8 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x5 & c2021=0 & c0811=0xe & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=0 & thv_c0811=0xe & thv_Q6=0 & thv_c0404=0) ) & Dm & Dn & Qd { Qd = PolynomialMultiply(Dn,Dm,1:1); } :vmull.p64 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x5 & c2021=2 & c0811=0xe & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & thv_c2021=2 & thv_c0811=0xe & thv_Q6=0 & thv_c0404=0) ) & Dm & Dn & Qd { Qd = PolynomialMultiply(Dn,Dm,8:1); } # The below is confusing but these sub-constructors are used in a combination of F6.1.148 VMUL (by scalar) and F6.1.150 VMULL (by Scalar) etype: "I" is TMode=0 & c0909=0 & c0808=0 {} etype: "F" is TMode=0 & c0909=0 & c0808=1 {} etype: "S" is TMode=0 & c0909=1 & c2424=0 {} etype: "U" is TMode=0 & c0909=1 & c2424=1 {} etype: "I" is TMode=1 & thv_c0909=0 & thv_c0808=0 {} etype: "F" is TMode=1 & thv_c0909=0 & thv_c0808=1 {} etype: "S" is TMode=1 & thv_c0909=1 & thv_c2828=0 {} etype: "U" is TMode=1 & thv_c0909=1 & thv_c2828=1 {} vmlDmA: Dm_3^"["^index^"]" is TMode=0 & c2021=1 & Dm_3 & M5 & c0303 [ index = (M5 << 1) + c0303; ] { el:4 = VectorGetElement(Dm_3, index:1, 2:1, 0:1); export el; } vmlDmA: Dm_4^"["^M5^"]" is TMode=0 & c2021=2 & Dm_4 & M5 { el:4 = VectorGetElement(Dm_4, M5:1, 4:1, 0:1); export el; } vmlDmA: Dm_3^"["^index^"]" is TMode=1 & thv_c2021=1 & Dm_3 & thv_M5 & c0303 [ index = (thv_M5 << 1) + c0303; ] { el:4 = VectorGetElement(Dm_3, index:1, 2:1, 0:1); export el; } vmlDmA: Dm_4^"["^thv_M5^"]" is TMode=1 & thv_c2021=2 & Dm_4 & thv_M5 { el:4 = VectorGetElement(Dm_4, thv_M5:1, 4:1, 0:1); export el; } :vmul.^etype^esize2021 Qd,Qn,vmlDmA is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x07 & (c2021=1 | c2021=2) & c0911=4 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0911=4 & thv_c0606=1 & thv_c0404=0 ) ) & etype & esize2021 & Qn & Qd & vmlDmA { Qd = VectorMultiply(Qn,vmlDmA,esize2021); } :vmul.^etype^esize2021 Dd,Dn,vmlDmA is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x5 & (c2021=1 | c2021=2) & c0911=4 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0911=4 & thv_c0606=1 & thv_c0404=0 ) ) & etype & esize2021 & Dn & Dd & vmlDmA { Dd = VectorMultiply(Dn,vmlDmA,esize2021); } :vmull.^etype^esize2021 Qd,Dn,vmlDmA is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=10 & c0606=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=10 & thv_c0606=1 & thv_c0404=0 ) ) & Dd & Dm & esize1819 & etype & esize2021 & Dn & Qd & vmlDmA { Qd = VectorMultiply(Dn,vmlDmA,esize2021); } ### # VMVN (immediate) # :vmvn.i32 Dd,simdExpImm_8 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=0 & c0808=0 & c0407=3 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011=0 & thv_c0808=0 & thv_c0407=3) ) & Dd & simdExpImm_8 { Dd = ~simdExpImm_8; } :vmvn.i32 Qd,simdExpImm_16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=0 & c0808=0 & c0407=7 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011=0 & thv_c0808=0 & thv_c0407=7) ) & Qd & simdExpImm_16 { Qd = ~simdExpImm_16; } :vmvn.i16 Dd,simdExpImm_8 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=2 & c0808=0 & c0407=3 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011=2 & thv_c0808=0 & thv_c0407=3) ) & Dd & simdExpImm_8 { Dd = ~simdExpImm_8; } :vmvn.i16 Qd,simdExpImm_16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011=2 & c0808=0 & c0407=7 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011=2 & thv_c0808=0 & thv_c0407=7) ) & Qd & simdExpImm_16 { Qd = ~simdExpImm_16; } :vmvn.i32 Dd,simdExpImm_8 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0911=6 & c0808=0 & c0407=3 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c0911=6 & thv_c0808=0 & thv_c0407=3) ) & Dd & simdExpImm_8 { Dd = ~simdExpImm_8; } :vmvn.i32 Qd,simdExpImm_16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c0911=6 & c0808=0 & c0407=7 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c0911=6 & thv_c0808=0 & thv_c0407=7) ) & Qd & simdExpImm_16 { Qd = ~simdExpImm_16; } ### # VMVN (register) # :vmvn Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1619=0 & c0811=5 & c0707=1 & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0 & thv_c0811=5 & thv_c0707=1 & thv_Q6=0 & thv_c0404=0) ) & Dd & Dm { Dd = ~Dm; } :vmvn Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1619=0 & c0811=5 & c0707=1 & Q6=1 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0 & thv_c0811=5 & thv_c0707=1 & thv_Q6=1 & thv_c0404=0) ) & Qd & Qm { tmp1:8 = Qm:8; tmp2:8 = Qm(8); tmp1 = ~ tmp1; tmp2 = ~ tmp2; Qd = (zext(tmp2) << 64) | zext(tmp1); } define pcodeop FloatVectorNeg; :vneg.s^esize1819 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=7 & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=7 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & esize1819 { Dd = FloatVectorNeg(Dm,1:1,esize1819); } :vneg.s^esize1819 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=1 & c0711=7 & Q6=1 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=1 & thv_c0711=7 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & esize1819 { Qd = FloatVectorNeg(Qm,1:1,esize1819); } :vneg.f^fesize1819 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=1 & c0711=0xf & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=1 & thv_c0711=0xf & thv_c0606=0 & thv_c0404=0 ) ) & fesize1819 & Dm & Dd { Dd = FloatVectorNeg(Dm,2:1,fesize1819); } :vneg.f^fesize1819 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819=2 & c1617=1 & c0711=0xf & Q6=1 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819=2 & thv_c1617=1 & thv_c0711=0xf & thv_c0606=1 & thv_c0404=0 ) ) & fesize1819 & Qd & Qm { Qd = FloatVectorNeg(Qm,2:1,fesize1819); } @endif # SIMD @if defined(VFPv2) || defined(VFPv3) :vnmla^COND^".f64" Dd,Dn,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=11 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=11 & thv_c0606=1 & thv_c0404=0) ) & COND & Dm & Dn & Dd { build COND; product:8 = Dn f* Dm; Dd = (f- Dd) f+ (f- product); } :vnmla^COND^".f32" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=10 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=10 & thv_c0606=1 & thv_c0404=0) ) & COND & Sm & Sn & Sd { build COND; product:4 = Sn f* Sm; Sd = (f- Sd) f+ (f- product); } :vnmla^COND^".f16" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=9 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=9 & thv_c0606=1 & thv_c0404=0) ) & COND & Sm & Sn & Sd { build COND; product:2 = Sn:2 f* Sm:2; product = (f- Sd:2) f+ (f- product); Sd = zext(product); } :vnmls^COND^".f64" Dd,Dn,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=11 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=11 & thv_c0606=0 & thv_c0404=0) ) & COND & Dm & Dn & Dd { build COND; product:8 = Dn f* Dm; Dd = product f- Dd; } :vnmls^COND^".f32" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=10 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=10 & thv_c0606=0 & thv_c0404=0) ) & COND & Sm & Sn & Sd { build COND; product:4 = Sn f* Sm; Sd = product f- Sd; } :vnmls^COND^".f16" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=1 & c0811=9 & c0606=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=1 & thv_c0811=9 & thv_c0606=0 & thv_c0404=0) ) & COND & Sm & Sn & Sd { build COND; product:2 = Sn:2 f* Sm:2; product = product f- Sd:2; Sd = zext(product); } :vnmul^COND^".f64" Dd,Dn,Dm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=11 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=11 & thv_c0606=1 & thv_c0404=0) ) & COND & Dm & Dn & Dd { build COND; product:8 = Dn f* Dm; Dd = f- product; } :vnmul^COND^".f32" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=10 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=10 & thv_c0606=1 & thv_c0404=0) ) & COND & Sm & Sn & Sd { product:4 = Sn f* Sm; Sd = f- product; } :vnmul^COND^".f16" Sd,Sn,Sm is ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=2 & c0811=9 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=9 & thv_c0606=1 & thv_c0404=0) ) & COND & Sm & Sn & Sd { build COND; product:2 = Sn:2 f* Sm:2; product = f- product; Sd = zext(product); } :vneg^COND^".f16" Sd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x31 & c0611=0x25 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x31 & thv_c0611=0x25 & thv_c0404=0 ) ) & COND & Sm & Sd { build COND; build Sd; build Sm; local tmp:2 = Sm(0); Sd = zext(f- tmp); } :vneg^COND^".f32" Sd,Sm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x31 & c0611=0x29 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x31 & thv_c0611=0x29 & thv_c0404=0 ) ) & COND & Sm & Sd { build COND; build Sd; build Sm; Sd = f- Sm; } :vneg^COND^".f64" Dd,Dm is ( ( $(AMODE) & ARMcond=1 & c2327=0x1d & c1621=0x31 & c0611=0x2d & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1d & thv_c1621=0x31 & thv_c0611=0x2d & thv_c0404=0 ) ) & COND & Dd & Dm { build COND; build Dd; build Dm; Dd = f- Dm; } @endif # VFPv2 || VFPv3 @if defined(SIMD) #F6.1.141 VORR (register) 64-bit SIMD vector variant (A1 and T1) :vorr Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=2 & c0811=1 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm { Dd = Dn | Dm; } #F6.1.141 VORR (register) 128-bit SIMD vector variant (A1 and T1) :vorr Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=2 & c0811=1 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qd & Qn & Qm { Qd = Qn | Qm; } #F6.1.140 VORR and F6.1.138 VORN (immediate) 64-bit SIMD vector variant :vorr Dd,simdExpImm_8 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011<3 & c0808=1 & c0407=1 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011<3 & thv_c0808=1 & thv_c0407=1) ) & Dd & simdExpImm_8 { Dd = Dd | simdExpImm_8; } #F6.1.140 VORR and F6.1.138 VORN (immediate) 128-bit SIMD vector variant :vorr Qd,simdExpImm_16 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1921=0 & c1011<3 & c0808=1 & c0407=5 ) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1921=0 & thv_c1011<3 & thv_c0808=1 & thv_c0407=5) ) & Qd & simdExpImm_16 { Qd = Qd | simdExpImm_16; } #F6.1.139 VORN (register) 64-bit SIMD vector variant (A1 and T1) :vorn Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=3 & c0811=1 & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=3 & thv_c0811=1 & thv_Q6=0 & thv_c0404=1)) & Dn & Dd & Dm { Dd = Dn | ~Dm; } #F6.1.139 VORN (register) 128-bit SIMD vector variant (A1 and T1) :vorn Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2021=3 & c0811=1 & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=3 & thv_c0811=1 & thv_Q6=1 & thv_c0404=1)) & Qd & Qn & Qm { Qd = Qn | ~Qm; } @endif # SIMD ####### # VPUSH (A2) # @if defined(VFPv2) || defined(VFPv3) || defined(SIMD) buildVpushSdList: Sreg is counter=0 & Sreg [ regNum=regNum+1; ] { * mult_addr = Sreg; mult_addr = mult_addr + 4; } buildVpushSdList: Sreg,buildVpushSdList is Sreg & buildVpushSdList [ counter=counter-1; regNum=regNum+1; ] { * mult_addr = Sreg; mult_addr = mult_addr + 4; } vpushSdList: "{"^buildVpushSdList^"}" is TMode=0 & D22 & c1215 & c0007 & buildVpushSdList [ regNum=(c1215<<1)+D22-1; counter=c0007-1; ] { sp = sp - c0007 * 4; mult_addr = sp; build buildVpushSdList; } vpushSdList: "{"^buildVpushSdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVpushSdList [ regNum=(thv_c1215<<1)+thv_D22-1; counter=thv_c0007-1; ] { sp = sp - thv_c0007 * 4; mult_addr = sp; build buildVpushSdList; } buildVpushSd64List: Dreg is counter=0 & Dreg [ regNum=regNum+1; ] { * mult_addr = Dreg:8; mult_addr = mult_addr + 8; } buildVpushSd64List: Dreg,buildVpushSd64List is Dreg & buildVpushSd64List [ counter=counter-1; regNum=regNum+1; ] { * mult_addr = Dreg:8; mult_addr = mult_addr + 8; build buildVpushSd64List; } vpushSd64List: "{"^buildVpushSd64List^"}" is TMode=0 & D22 & c1215 & c0007 & buildVpushSd64List [ regNum=(D22<<4)+c1215-1; counter=c0007 / 2 - 1; ] { sp = sp - c0007 * 4; mult_addr = sp; build buildVpushSd64List; } vpushSd64List: "{"^buildVpushSd64List^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVpushSd64List [ regNum=(thv_D22<<4)+thv_c1215-1; counter=thv_c0007 / 2 - 1; ] { sp = sp - thv_c0007 * 4; mult_addr = sp; build buildVpushSd64List; } :vpush^COND vpushSd64List is ( ($(AMODE) & ARMcond=1 & c2327=0x1a & c1619=13 & c2021=2 & c0811=11 & c0000=0) | ($(TMODE_E) & thv_c2327=0x1a & thv_c1619=13 & thv_c2021=2 & thv_c0811=11 & thv_c0000=0) ) & COND & vpushSd64List { build vpushSd64List; } :vpush^COND vpushSdList is ( ($(AMODE) & ARMcond=1 & c2327=0x1a & c1619=13 & c2021=2 & c0811=10) | ($(TMODE_E) & thv_c2327=0x1a & thv_c1619=13 & thv_c2021=2 & thv_c0811=10) ) & COND & vpushSdList { build vpushSdList; } buildVpopSdList: Sreg is counter=0 & Sreg [ regNum=regNum+1; ] { tmp:4 = *mult_addr; Sreg = zext(tmp); mult_addr = mult_addr + 4; } buildVpopSdList: Sreg,buildVpopSdList is Sreg & buildVpopSdList [ counter=counter-1; regNum=regNum+1; ] { tmp:4 = *mult_addr; Sreg = zext(tmp); mult_addr = mult_addr + 4; } vpopSdList: "{"^buildVpopSdList^"}" is TMode=0 & D22 & c1215 & c0007 & buildVpopSdList [ regNum=(c1215<<1)+D22-1; counter=c0007-1; ] { mult_addr = sp; sp = sp + c0007 * 4; build buildVpopSdList; } vpopSdList: "{"^buildVpopSdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVpopSdList [ regNum=(thv_c1215<<1)+thv_D22-1; counter=thv_c0007-1; ] { mult_addr = sp; sp = sp + thv_c0007 * 4; build buildVpopSdList; } buildVpopSd64List: Dreg is counter=0 & Dreg [ regNum=regNum+1; ] { Dreg = *mult_addr; mult_addr = mult_addr + 8; } buildVpopSd64List: Dreg,buildVpopSd64List is Dreg & buildVpopSd64List [ counter=counter-1; regNum=regNum+1; ] { Dreg = *mult_addr; mult_addr = mult_addr + 8; build buildVpopSd64List; } vpopSd64List: "{"^buildVpopSd64List^"}" is TMode=0 & D22 & c1215 & c0007 & buildVpopSd64List [ regNum=(D22<<4)+c1215-1; counter=c0007 / 2 - 1; ] { mult_addr = sp; sp = sp + c0007 * 4; build buildVpopSd64List; } vpopSd64List: "{"^buildVpopSd64List^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVpopSd64List [ regNum=(thv_D22<<4)+thv_c1215-1; counter=thv_c0007 / 2 - 1; ] { mult_addr = sp; sp = sp + thv_c0007 * 4; build buildVpopSd64List; } :vpop^COND vpopSd64List is ( ($(AMODE) & ARMcond=1 & c2327=0x19 & c1619=13 & c2021=3 & c0811=11 & c0000=0) | ($(TMODE_E) & thv_c2327=0x19 & thv_c1619=13 & thv_c2021=3 & thv_c0811=11 & thv_c0000=0) ) & COND & vpopSd64List { build vpopSd64List; } :vpop^COND vpopSdList is ( ($(AMODE) & ARMcond=1 & c2327=0x19 & c1619=13 & c2021=3 & c0811=10) | ($(TMODE_E) & thv_c2327=0x19 & thv_c1619=13 & thv_c2021=3 & thv_c0811=10) ) & COND & vpopSdList { build vpopSdList; } @endif # VFPv2 || VFPv3 || SIMD @if defined(SIMD) define pcodeop SatQ; define pcodeop SignedSatQ; :vqabs^".s"^esize1819 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=7 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=7 & thv_Q6=0 & thv_c0404=0)) & esize1819 & Dn & Dd & Dm { Dd = VectorAbs(Dn,Dm,esize1819); Dd = SatQ(Dd, esize1819, 0:1); } :vqabs^".s"^esize1819 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0811=7 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0811=7 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qn & Qd { Qd = VectorAbs(Qn,Qm,esize1819); Qd = SatQ(Qd, esize1819, 0:1); } :vqadd.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=0 & Q6=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=0 & thv_Q6=0 & thv_c0404=1)) & udt & esize2021 & Dn & Dd & Dm { Dd = VectorAdd(Dn,Dm,esize2021,udt); Dd = SatQ(Dd, esize2021, udt); } :vqadd.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=0 & Q6=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=0 & thv_Q6=1 & thv_c0404=1) ) & udt & esize2021 & Qm & Qn & Qd { Qd = VectorAdd(Qn,Qm,esize2021,udt); Qd = SatQ(Qd, esize2021, udt); } :vqmovn.i^esize1819x2 Dd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=5 & c0606 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=5 & thv_c0404=0) ) & esize1819x2 & Dd & Qm { Dd = VectorCopyNarrow(Qm,esize1819x2,c0606:1); Dd = SatQ(Dd, esize1819x2,0:1); } :vqmovun.i^esize1819x2 Dd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0611=9 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0611=9 & thv_c0404=0) ) & esize1819x2 & Dd & Qm { Dd = VectorCopyNarrow(Qm,esize1819x2,0:1); Dd = SatQ(Dd, esize1819x2,0:1); } :vqdmlal.S^esize2021 Qd,Dn,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & (c2021=1 | c2021=2) & c0811=0x9 & c0606=0 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0x9 & thv_c0606=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Qd { Qd = VectorDoubleMultiplyAccumulateLong(Dn,Dm,esize2021,0:1); Qd = SatQ(Qd, esize2021,0:1); } :vqdmlal.S^esize2021 Qd,Dn,vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & (c2021=1 | c2021=2) & c0811=0x3 & c0606=1 & c0404=0) | ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0x3 & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Qd { Qd = VectorDoubleMultiplyAccumulateLong(Dn,vmlDmA,esize2021,0:1); Qd = SatQ(Qd, esize2021,0:1); } :vqdmlsl.S^esize2021 Qd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & (c2021=1 | c2021=2) & c0811=0xb & c0606=0 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_c0606=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Qd { Qd = VectorDoubleMultiplySubtractLong(Dn,Dm,esize2021,0:1); Qd = SatQ(Qd, esize2021,0:1); } :vqdmlsl.S^esize2021 Qd, Dn, vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & (c2021=1 | c2021=2)& c0811=0x7 & c0606=1 & c0404=0) | ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0x7 & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Qd { Qd = VectorDoubleMultiplySubtractLong(Dn,vmlDmA,esize2021,0:1); Qd = SatQ(Qd, esize2021,0:1); } :vqdmulh.S^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2324=0 & (c2021=1 | c2021=2) & c0811=0xb & Q6=0 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1e & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_c0606=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Dd { Dd = VectorDoubleMultiplyHighHalf(Dn,Dm,esize2021,0:1); Dd = SatQ(Dd, esize2021,0:1); } :vqdmulh.S^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2324=0 & (c2021=1 | c2021=2) & c0811=0xb & Q6=1 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1e & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & Qm & Qn & Qd { Qd = VectorDoubleMultiplyHighHalf(Qn,Qm,esize2021,0:1); Qd = SatQ(Qd, esize2021,0:1); } :vqdmulh.S^esize2021 Dd, Dn, vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2)& c0811=0xc & c0606=1 & c0404=0) | ( $(TMODE_E) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xc & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Dd { Dd = VectorDoubleMultiplyLong(Dn,vmlDmA,esize2021,0:1); Dd = SatQ(Dd, esize2021,0:1); } :vqdmulh.S^esize2021 Qd, Qn, vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=0xc & c0606=1 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xc & thv_c0606=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Qn & Qd { Qd = VectorDoubleMultiplyLong(Qn,vmlDmA,esize2021,0:1); Qd = SatQ(Qd, esize2021,0:1); } :vqdmull.S^esize2021 Qd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c2021<3 & c0811=0xD & Q6=0 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=0xD & thv_Q6=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Qd { Qd = VectorDoubleMultiplyLong(Dn,Dm,esize2021,0:1); Qd = SatQ(Qd, esize2021,0:1); } :vqdmull.S^esize2021 Qd, Dn, vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c2021<3 & c0811=0xb & Q6=1 & c0404=0 ) | ( $(TMODE_E) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=0xb & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Qd { Qd = VectorDoubleMultiplyLong(Dn,vmlDmA,esize2021,0:1); Qd = SatQ(Qd, esize2021,0:1); } :vqrdmulh.S^esize2021 Dd, Dn, Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2324=2 & (c2021=1 | c2021=2) & c0811=0xb & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1e & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_Q6=0 & thv_c0404=0 ) ) & esize2021 & Dm & Dn & Dd { Dd = VectorRoundDoubleMultiplyHighHalf(Dn,Dm,esize2021,0:1); Dd = SatQ(Dd, esize2021,0:1); } :vqrdmulh.S^esize2021 Qd, Qn, Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2324=2 & (c2021=1 | c2021=2) & c0811=0xb & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1e & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xb & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & Qm & Qn & Qd { Qd = VectorRoundDoubleMultiplyHighHalf(Qn,Qm,esize2021,0:1); Qd = SatQ(Qd, esize2021,0:1); } :vqrdmulh.S^esize2021 Dd, Dn, vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=0 & c2323=1 & (c2021=1 | c2021=2)& c0811=0xd & Q6=1 & c0404=0) | ( $(TMODE_E) & thv_c2327=0x1f & thv_c2323=1 & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xd & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Dn & Dd { Dd = VectorRoundDoubleMultiplyHighHalf(Dn,vmlDmA,esize2021,0:1); Dd = SatQ(Dd, esize2021,0:1); } :vqrdmulh.S^esize2021 Qd, Qn, vmlDmA is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c2021=1 | c2021=2) & c0811=0xd & Q6=1 & c0404=0) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2323=1 & (thv_c2021=1 | thv_c2021=2) & thv_c0811=0xd & thv_Q6=1 & thv_c0404=0 ) ) & esize2021 & vmlDmA & Qn & Qd { Qd = VectorRoundDoubleMultiplyHighHalf(Qn,vmlDmA,esize2021,0:1); Qd = SatQ(Qd, esize2021,0:1); } :vqsub.^udt^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=2 & Q6=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c0811=2 & thv_Q6=0 & thv_c0404=1)) & udt & esize2021 & Dn & Dd & Dm { Dd = VectorSub(Dn,Dm,esize2021,udt); Dd = SatQ(Dd, esize2021, udt); } :vqsub.^udt^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=2 & Q6=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c2323=0 & thv_c0811=2 & thv_Q6=1 & thv_c0404=1) ) & udt & esize2021 & Qm & Qn & Qd { Qd = VectorSub(Qn,Qm,esize2021,udt); Qd = SatQ(Qd, esize2021, udt); } ####### # VRECPE define pcodeop VectorReciprocalEstimate; :vrecpe.^fdt^32 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=0 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0xb & thv_c0911=2 & thv_c0707=0 & thv_Q6=1 & thv_c0404=0) ) & fdt & Qm & Qd { Qd = VectorReciprocalEstimate(Qm,fdt); } :vrecpe.^fdt^32 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=0 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0xb & thv_c0911=2 & thv_c0707=0 & thv_Q6=0 & thv_c0404=0) ) & fdt & Dm & Dd { Dd = VectorReciprocalEstimate(Dm,fdt); } ####### # VRECPS define pcodeop VectorReciprocalStep; :vrecps.f32 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x4 & c2021=0 & c0811=0xf & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=0xf & thv_Q6=1 & thv_c0404=1) ) & Qn & Qm & Qd { Qd = VectorReciprocalStep(Qn,Qm); } :vrecps.f32 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x4 & c2021=0 & c0811=0xf & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=0 & thv_c0811=0xf & thv_Q6=0 & thv_c0404=1) ) & Dn & Dm & Dd { Dd = VectorReciprocalStep(Dn,Dm); } ####### # VREV # define pcodeop vrev; :vrev16.^esize1819x3 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=2 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=2 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & esize1819x3 { Qd = vrev(Qm,esize1819x3); } :vrev32.^esize1819x3 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=1 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=1 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & esize1819x3 { Qd = vrev(Qm,esize1819x3); } :vrev64.^esize1819x3 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=0 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=0 & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & esize1819x3 { Qd = vrev(Qm,esize1819x3); } :vrev16.^esize1819x3 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=2 & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=2 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & esize1819x3 { Dd = vrev(Dm,esize1819x3); } :vrev32.^esize1819x3 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=1 & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=1 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & esize1819x3 { Dd = vrev(Dm,esize1819x3); } :vrev64.^esize1819x3 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=0 & c0911=0 & c0708=0 & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=0 & thv_c0911=0 & thv_c0708=0 & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & esize1819x3 { Dd = vrev(Dm,esize1819x3); } ####### # VSH # define pcodeop VectorShiftLeft; define pcodeop VectorRoundShiftLeft; define pcodeop VectorShiftRight; define pcodeop VectorShiftLeftInsert; define pcodeop VectorShiftRightInsert; define pcodeop VectorShiftRightNarrow; define pcodeop VectorShiftRightAccumulate; define pcodeop VectorRoundShiftRight; define pcodeop VectorRoundShiftRightNarrow; define pcodeop VectorRoundShiftRightAccumulate; ShiftSize: "8" is TMode=0 & c1921=1 & L7=0 { export 8:8; } ShiftSize: "16" is TMode=0 & c2021=1 & L7=0 { export 16:8; } ShiftSize: "32" is TMode=0 & c2121=1 & L7=0 { export 32:8; } ShiftSize: "64" is TMode=0 & L7=1 { export 64:8; } ShiftSize: "8" is TMode=1 & thv_c1921=1 & thv_L7=0 { export 8:8; } ShiftSize: "16" is TMode=1 & thv_c2021=1 & thv_L7=0 { export 16:8; } ShiftSize: "32" is TMode=1 & thv_c2121=1 & thv_L7=0 { export 32:8; } ShiftSize: "64" is TMode=1 & thv_L7=1 { export 64:8; } ShiftImmRLI: "#"^shift_amt is TMode=0 & c1921=1 & L7=0 & c1621 [ shift_amt = 16 - c1621; ] { export *[const]:8 shift_amt; } ShiftImmRLI: "#"^shift_amt is TMode=0 & c2021=1 & L7=0 & c1621 [ shift_amt = 32 - c1621; ] { export *[const]:8 shift_amt; } ShiftImmRLI: "#"^shift_amt is TMode=0 & c2121=1 & L7=0 & c1621 [ shift_amt = 64 - c1621; ] { export *[const]:8 shift_amt; } ShiftImmRLI: "#"^shift_amt is TMode=0 & L7=1 & c1621 [ shift_amt = 64 - c1621; ] { export *[const]:8 shift_amt; } ShiftImmRLI: "#"^shift_amt is TMode=1 & thv_c1921=1 & thv_L7=0 & thv_c1621 [ shift_amt = 16 - thv_c1621; ] { export *[const]:8 shift_amt; } ShiftImmRLI: "#"^shift_amt is TMode=1 & thv_c2021=1 & thv_L7=0 & thv_c1621 [ shift_amt = 32 - thv_c1621; ] { export *[const]:8 shift_amt; } ShiftImmRLI: "#"^shift_amt is TMode=1 & thv_c2121=1 & thv_L7=0 & thv_c1621 [ shift_amt = 64 - thv_c1621; ] { export *[const]:8 shift_amt; } ShiftImmRLI: "#"^shift_amt is TMode=1 & thv_L7=1 & thv_c1621 [ shift_amt = 64 - thv_c1621; ] { export *[const]:8 shift_amt; } ShiftImmLLI: "#"^shift_amt is TMode=0 & c1921=1 & L7=0 & c1621 [ shift_amt = c1621 - 8; ] { export *[const]:8 shift_amt; } ShiftImmLLI: "#"^shift_amt is TMode=0 & c2021=1 & L7=0 & c1621 [ shift_amt = c1621 - 16; ] { export *[const]:8 shift_amt; } ShiftImmLLI: "#"^shift_amt is TMode=0 & c2121=1 & L7=0 & c1621 [ shift_amt = c1621 - 32; ] { export *[const]:8 shift_amt; } ShiftImmLLI: "#"^shift_amt is TMode=0 & L7=1 & c1621 [ shift_amt = c1621 - 0; ] { export *[const]:8 shift_amt; } ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_c1921=1 & thv_L7=0 & thv_c1621 [ shift_amt = thv_c1621 - 8; ] { export *[const]:8 shift_amt; } ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_c2021=1 & thv_L7=0 & thv_c1621 [ shift_amt = thv_c1621 - 16; ] { export *[const]:8 shift_amt; } ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_c2121=1 & thv_L7=0 & thv_c1621 [ shift_amt = thv_c1621 - 32; ] { export *[const]:8 shift_amt; } ShiftImmLLI: "#"^shift_amt is TMode=1 & thv_L7=1 & thv_c1621 [ shift_amt = thv_c1621 - 0; ] { export *[const]:8 shift_amt; } :vqrshl.^udt^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=5 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Qd & Qm { Qd = VectorRoundShiftLeft(Qm,ShiftImmLLI,ShiftSize,udt); } :vqrshl.^udt^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=5 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Dd & Dm { Dd = VectorRoundShiftLeft(Dm,ShiftImmLLI,ShiftSize,udt); } :vqshrn.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x24 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c1919=1 | thv_c2020=1 | thv_c2121=1) & thv_c0611=0x24 & thv_c0404=1) ) & udt & esize2021 & ShiftSize & ShiftImmRLI & Dd & Qm { Dd = VectorShiftRightNarrow(Qm,ShiftImmRLI,esize2021,udt); Dd = SatQ(Dd,esize2021,udt); } :vqshrun.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x20 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & (thv_c1919=1 | thv_c2020=1 | thv_c2121=1) & thv_c0611=0x20 & thv_c0404=1) ) & udt & esize2021 & ShiftSize & ShiftImmRLI & Dd & Qm { Dd = VectorShiftRightNarrow(Qm,ShiftImmRLI,esize2021,udt); Dd = SatQ(Dd,esize2021,udt); } :vqrshrn.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x25 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & (thv_c1919=1 | thv_c2020=1 | thv_c2121=1) & thv_c0611=0x25 & thv_c0404=1) ) & udt & esize2021 & ShiftSize & ShiftImmRLI & Dd & Qm { Dd = VectorRoundShiftRightNarrow(Qm,ShiftImmRLI,esize2021,udt); Dd = SatQ(Dd,esize2021,udt); } :vqrshrun.^udt^esize2021 Dd,Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & (c1919=1 | c2020=1 | c2121=1) & c0611=0x21 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & (thv_c1919=1 | thv_c2020=1 | thv_c2121=1) & thv_c0611=0x21 & thv_c0404=1) ) & udt & esize2021 & ShiftImmRLI & Dd & Qm { Dd = VectorRoundShiftRightNarrow(Qm,ShiftImmRLI,esize2021,udt); Dd = SatQ(Dd,esize2021,udt); } :vqshl.^udt^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=7 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1621 & thv_c0811=7 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Qd & Qm { Qd = VectorShiftLeft(Qm,ShiftImmLLI,ShiftSize,udt); } :vqshl.^udt^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=7 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c1621 & thv_c0811=7 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Dd & Dm { Dd = VectorShiftLeft(Dm,ShiftImmLLI,ShiftSize,udt); } :vqshlu.^udt^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=6 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2828=1 & thv_c2327=0x1f & thv_c1621 & thv_c0811=6 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Qd & Qm { Qd = VectorShiftLeft(Qm,ShiftImmLLI,ShiftSize,udt); } :vqshlu.^udt^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c1621 & c0811=6 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2828=1 & thv_c2327=0x1f & thv_c1621 & thv_c0811=6 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Dd & Dm { Dd = VectorShiftLeft(Dm,ShiftImmLLI,ShiftSize,udt); } :vqshl.^udt^esize2021 Qd, Qm, Qn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=4 & thv_c0606=1 & thv_c0404=1) ) & udt & esize2021 & Qd & Qm & Qn { Qd = VectorShiftLeft(Qm,Qn,esize2021,udt); } :vqshl.^udt^esize2021 Dd, Dm, Dn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=4 & thv_c0606=0 & thv_c0404=1) ) & udt & esize2021 & Dd & Dm & Dn { Dd = VectorShiftLeft(Dm,Dn,esize2021,udt); } :vshl.I^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c0811=5 & c0606=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=5 & thv_c0606=1 & thv_c0404=1) ) & ShiftSize & ShiftImmLLI & Qd & Qm { Qd = VectorShiftLeft(Qm,ShiftImmLLI,ShiftSize,0:1); } :vshl.I^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c0811=5 & c0606=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=5 & thv_c0606=0 & thv_c0404=1) ) & ShiftSize & ShiftImmLLI & Dd & Dm { Dd = VectorShiftLeft(Dm,ShiftImmLLI,ShiftSize,0:1); } :vshl.^udt^esize2021 Qd, Qm, Qn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=4 & thv_c0606=1 & thv_c0404=0) ) & udt & esize2021 & Qd & Qm & Qn { Qd = VectorShiftLeft(Qm,Qn,esize2021,udt); } :vshl.^udt^esize2021 Dd, Dm, Dn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=4 & c0606=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=4 & thv_c0606=0 & thv_c0404=0) ) & udt & esize2021 & Dd & Dm & Dn { Dd = VectorShiftLeft(Dm,Dn,esize2021,udt); } define pcodeop VectorShiftLongLeft; :vshll.^udt^ShiftSize Qd, Dm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=10 & c0607=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=10 & thv_c0607=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmLLI & Qd & Dm { Qd = VectorShiftLongLeft(Dm,ShiftImmLLI); } :vshll.^udt^esize1819 Qd, Dm, "#"^esize1819x3 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=2 & c0811=3 & c0607=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=2 & thv_c0811=3 & thv_c0607=0 & thv_c0404=0) ) & udt & esize1819 & esize1819x3 & Qd & Dm { Qd = VectorShiftLongLeft(Dm,esize1819x3); } :vrshl.^udt^esize2021 Qd, Qm, Qn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=1 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=5 & thv_c0606=1 & thv_c0404=0) ) & udt & esize2021 & Qd & Qm & Qn { Qd = VectorRoundShiftLeft(Qm,esize2021,Qn); } :vrshl.^udt^esize2021 Dd, Dm, Dn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=0 & c0811=5 & c0606=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1e & thv_c0811=5 & thv_c0606=0 & thv_c0404=0) ) & udt & esize2021 & Dd & Dm & Dn { Dd = VectorRoundShiftLeft(Dm,esize2021,Dn); } :vrshr.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=2 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=2 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Qd & Qm { Qd = VectorRoundShiftRight(Qm,ShiftImmRLI); } :vrshr.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=2 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=2 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Dm { Dd = VectorRoundShiftRight(Dm,ShiftImmRLI); } :vrshrn.^ShiftSize Dd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c0811=8 & c0707=0 & c0606=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=8 & thv_c0707=0 & thv_c0606=1 & thv_c0404=1) ) & ShiftSize & ShiftImmRLI & Dd & Qm { Dd = VectorRoundShiftRightNarrow(Qm,ShiftImmRLI); } :vrsra.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=3 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=3 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Qd & Qm { Qd = VectorRoundShiftRightAccumulate(Qd, Qm,ShiftImmRLI); } :vrsra.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=3 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=3 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Dm { Dd = VectorRoundShiftRightAccumulate(Dd, Dm,ShiftImmRLI); } :vsli.^ShiftSize Dd, Dm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c0811=5 & c0606=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c0811=5 & thv_c0606=0 & thv_c0404=1) ) & ShiftSize & ShiftImmLLI & Dd & Dm { Dd = VectorShiftLeftInsert(Dd, Dm,ShiftImmLLI); } :vsli.^ShiftSize Qd, Qm, ShiftImmLLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c0811=5 & c0606=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c0811=5 & thv_c0606=1 & thv_c0404=1) ) & ShiftSize & ShiftImmLLI & Qd & Qm { Qd = VectorShiftLeftInsert(Qd, Qm,ShiftImmLLI); } define pcodeop VectorWidenMultipyAccumulate; :vsmmla.s8 Dd, Dm, Dn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xc & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xc & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & Dn { Dd = VectorWidenMultipyAccumulate(Dm,Dn,0:1); } :vsmmla.s8 Qd, Qm, Qn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xc & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xc & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & Qn { Qd = VectorWidenMultipyAccumulate(Qm,Qn,0:1); } :vummla.u8 Dd, Dm, Dn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xc & c0606=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xc & thv_c0606=0 & thv_c0404=1) ) & Dd & Dm & Dn { Dd = VectorWidenMultipyAccumulate(Dm,Dn,1:1); } :vummla.u8 Qd, Qm, Qn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xc & c0606=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xc & thv_c0606=1 & thv_c0404=1) ) & Qd & Qm & Qn { Qd = VectorWidenMultipyAccumulate(Qm,Qn,1:1); } :vusmmla.s8 Dd, Dm, Dn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x19 & c2021=2 & c0811=0xc & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x19 & thv_c2021=2 & thv_c0811=0xc & thv_c0606=0 & thv_c0404=0) ) & Dd & Dm & Dn { Dd = VectorWidenMultipyAccumulate(Dm,Dn,2:1); } :vusmmla.s8 Qd, Qm, Qn is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x19 & c2021=2 & c0811=0xc & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x19 & thv_c2021=2 & thv_c0811=0xc & thv_c0606=1 & thv_c0404=0) ) & Qd & Qm & Qn { Qd = VectorWidenMultipyAccumulate(Qm,Qn,2:1); } @endif # SIMD @if defined(VFPv2) || defined(VFPv3) || defined(SIMD) :vsqrt^COND^".f32" Sd,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=1 & c0811=10 & c0607=3 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=1 & thv_c0811=10 & thv_c0606=1 & thv_c0404=0) ) & Sm & Sd { build COND; build Sd; build Sm; Sd = sqrt(Sm); } :vsqrt^COND^".f64" Dd,Dm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1d & c2021=3 & c1619=1 & c0811=11 & c0606=1 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1d & thv_c2021=3 & thv_c1619=1 & thv_c0811=11 & thv_c0606=1 & thv_c0404=0) ) & Dm & Dd { build COND; build Dd; build Dm; Dd = sqrt(Dm); } @endif #VFPv2 | VFPv3 | SIMD @if defined(SIMD) :vsra.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=1 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=1 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Qd & Qm { Qd = VectorShiftRightAccumulate(Qd, Qm,ShiftImmRLI); } :vsra.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=1 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=1 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Dm { Dd = VectorShiftRightAccumulate(Dd, Dm,ShiftImmRLI); } :vsri.^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c0811=4 & c0606=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c0811=4 & thv_c0606=1 & thv_c0404=1) ) & ShiftSize & ShiftImmRLI & Qd & Qm { Qd = VectorShiftRightInsert(Qd, Qm,ShiftImmRLI); } :vsri.^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2424=1 & c2323=1 & c0811=4 & c0606=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1f & thv_c0811=4 & thv_c0606=0 & thv_c0404=1) ) & ShiftSize & ShiftImmRLI & Dd & Dm { Dd = VectorShiftRightInsert(Dd, Dm,ShiftImmRLI); } ####### # VSHR # :vshr.^udt^ShiftSize Qd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=0 & c0606=1 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=0 & thv_c0606=1 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Qd & Qm { Qd = VectorShiftRight(Qm,ShiftImmRLI); } :vshr.^udt^ShiftSize Dd, Dm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c0811=0 & c0606=0 & c0404=1) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c0811=0 & thv_c0606=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Dm { Dd = VectorShiftRight(Dm,ShiftImmRLI); } define pcodeop VectorShiftNarrowRight; :vshrn.^ShiftSize Dd, Qm, ShiftImmRLI is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c0811=8 & c0607=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=8 & thv_c0607=0 & thv_c0404=1) ) & udt & ShiftSize & ShiftImmRLI & Dd & Qm { Dd = VectorShiftNarrowRight(Qm,ShiftImmRLI); } ####### # VRSQRTE define pcodeop VectorReciprocalSquareRootEstimate; :vrsqrte.^fdt^32 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=1 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0xb & thv_c0911=2 & thv_c0707=1 & thv_Q6=1 & thv_c0404=0) ) & fdt & Qm & Qd { Qd = VectorReciprocalSquareRootEstimate(Qm,fdt); } :vrsqrte.^fdt^32 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x7 & c2021=3 & c1619=0xb & c0911=2 & c0707=1 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1619=0xb & thv_c0911=2 & thv_c0707=1 & thv_Q6=0 & thv_c0404=0) ) & fdt & Dm & Dd { Dd = VectorReciprocalSquareRootEstimate(Dm,fdt); } ####### # VRSQRTS define pcodeop VectorReciprocalSquareRootStep; :vrsqrts.f32 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x4 & c2021=2 & c0811=0xf & Q6=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=0xf & thv_Q6=1 & thv_c0404=1) ) & Qn & Qm & Qd { Qd = VectorReciprocalSquareRootStep(Qn,Qm); } :vrsqrts.f32 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x4 & c2021=2 & c0811=0xf & Q6=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2021=2 & thv_c0811=0xf & thv_Q6=0 & thv_c0404=1) ) & Dn & Dm & Dd { Dd = VectorReciprocalSquareRootStep(Dn,Dm); } ####### # VST1 (multiple single elements) # buildVst1DdList: Dreg is Dreg & counter=1 [ counter=0; regNum=regNum+1; ] { * mult_addr = Dreg; } buildVst1DdList: Dreg,buildVst1DdList is Dreg & buildVst1DdList [ counter=counter-1; regNum=regNum+1; ] { * mult_addr = Dreg; mult_addr = mult_addr + 8; build buildVst1DdList; } vst1DdList: "{"^buildVst1DdList^"}" is TMode = 0 & c0811=7 & D22 & c1215 & buildVst1DdList [ regNum=(D22<<4)+c1215-1; counter=1; ] { export 1:4; } vst1DdList: "{"^buildVst1DdList^"}" is TMode = 0 & c0811=10 & D22 & c1215 & buildVst1DdList [ regNum=(D22<<4)+c1215-1; counter=2; ] { export 2:4; } vst1DdList: "{"^buildVst1DdList^"}" is TMode = 0 & c0811=6 & D22 & c1215 & buildVst1DdList [ regNum=(D22<<4)+c1215-1; counter=3; ] { export 3:4; } vst1DdList: "{"^buildVst1DdList^"}" is TMode = 0 & c0811=2 & D22 & c1215 & buildVst1DdList [ regNum=(D22<<4)+c1215-1; counter=4; ] { export 4:4; } vst1DdList: "{"^buildVst1DdList^"}" is TMode = 1 & thv_c0811=7 & thv_D22 & thv_c1215 & buildVst1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=1; ] { export 1:4; } vst1DdList: "{"^buildVst1DdList^"}" is TMode = 1 & thv_c0811=10 & thv_D22 & thv_c1215 & buildVst1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=2; ] { export 2:4; } vst1DdList: "{"^buildVst1DdList^"}" is TMode = 1 & thv_c0811=6 & thv_D22 & thv_c1215 & buildVst1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=3; ] { export 3:4; } vst1DdList: "{"^buildVst1DdList^"}" is TMode = 1 & thv_c0811=2 & thv_D22 & thv_c1215 & buildVst1DdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=4; ] { export 4:4; } @define Vst1DdList "(c0811=2 | c0811=6 | c0811=7 | c0811=10)" @define T_Vst1DdList "(thv_c0811=2 | thv_c0811=6 | thv_c0811=7 | thv_c0811=10)" :vst1.^esize0607 vst1DdList,RnAligned45 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0003=15 & $(Vst1DdList)) | ($(TMODE_F) &thv_c2327=18 & thv_c2021=0 & thv_c0003=15 & $(T_Vst1DdList)) ) & RnAligned45 & esize0607 & vst1DdList { mult_addr = RnAligned45; build vst1DdList; } :vst1.^esize0607 vst1DdList,RnAligned45^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0003=13 & $(Vst1DdList)) | ($(TMODE_F) &thv_c2327=18 & thv_c2021=0 & thv_c0003=13 & $(T_Vst1DdList)) ) & RnAligned45 & esize0607 & vst1DdList { mult_addr = RnAligned45; build vst1DdList; RnAligned45 = RnAligned45 + (8 * vst1DdList); } :vst1.^esize0607 vst1DdList,RnAligned45,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & $(Vst1DdList)) | ($(TMODE_F) &thv_c2327=18 & thv_c2021=0 & $(T_Vst1DdList)) ) & RnAligned45 & esize0607 & VRm & vst1DdList { mult_addr = RnAligned45; build vst1DdList; RnAligned45 = RnAligned45 + VRm; } ####### # VST1 (single element to one lane) # vst1Index: val is TMode=0 & c0507 & c1011 [ val = c0507 >> c1011; ] { tmp:4 = val; export tmp; } vst1Index: val is TMode=1 & thv_c0507 & thv_c1011 [ val = thv_c0507 >> thv_c1011; ] { tmp:4 = val; export tmp; } vst1DdElement2: Dd^"["^vst1Index^"]" is ((TMode=0 & c1011=0) | (TMode=1 & thv_c1011=0)) & Dd & vst1Index { ptr:4 = &Dd + vst1Index; *:1 mult_addr = *[register]:1 ptr; } vst1DdElement2: Dd^"["^vst1Index^"]" is ((TMode=0 & c1011=1) | (TMode=1 & thv_c1011=1)) & Dd & vst1Index { ptr:4 = &Dd + (2 * vst1Index); *:2 mult_addr = *[register]:2 ptr; } vst1DdElement2: Dd^"["^vst1Index^"]" is ((TMode=0 & c1011=2) | (TMode=1 & thv_c1011=2)) & Dd & vst1Index { ptr:4 = &Dd + (4 * vst1Index); *:4 mult_addr = *[register]:4 ptr; } @define Vst1DdElement2 "((c1011=0 & c0404=0) | (c1011=1 & c0505=0) | (c1011=2 & (c0406=0 | c0406=3)))" @define T_Vst1DdElement2 "((thv_c1011=0 & thv_c0404=0) | (thv_c1011=1 & thv_c0505=0) | (thv_c1011=2 & (thv_c0406=0 | thv_c0406=3)))" :vst1.^esize1011 vst1DdElement2,RnAligned2 is (($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c0809=0 & c0003=15 & $(Vst1DdElement2)) | ($(TMODE_F) & thv_c2327=19 & thv_c2021=0 & thv_c0809=0 & thv_c0003=15 & $(T_Vst1DdElement2))) & RnAligned2 & esize1011 & vst1DdElement2 { mult_addr = RnAligned2; build vst1DdElement2; } :vst1.^esize1011 vst1DdElement2,RnAligned2^"!" is (($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c0809=0 & c0003=13 & $(Vst1DdElement2)) | ($(TMODE_F) & thv_c2327=19 & thv_c2021=0 & thv_c0809=0 & thv_c0003=13 & $(T_Vst1DdElement2))) & RnAligned2 & esize1011 & vst1DdElement2 { mult_addr = RnAligned2; build vst1DdElement2; RnAligned2 = RnAligned2 + esize1011; } :vst1.^esize1011 vst1DdElement2,RnAligned2,VRm is (($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c0809=0 & $(Vst1DdElement2)) | ($(TMODE_F) & thv_c2327=19 & thv_c2021=0 & thv_c0809=0 & $(T_Vst1DdElement2))) & VRm & RnAligned2 & esize1011 & vst1DdElement2 { mult_addr = RnAligned2; build vst1DdElement2; RnAligned2 = RnAligned2 + VRm; } ####### # VST2 # ####### # VST2 (multiple 2-element structures) # vst2Dd: Dreg is Dreg & ((TMode=0 & c0607=0) | (TMode=1 & thv_c0607=0)) & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); @endif # ENDIAN = "big" mult_dat8 = 8; *:1 mult_addr = *[register]:1 ptr1; mult_addr = mult_addr + 1; *:1 mult_addr = *[register]:1 ptr2; mult_addr = mult_addr + 1; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 1; ptr2 = ptr2 + 1; goto ; } vst2Dd: Dreg is Dreg & ((TMode=0 & c0607=1) | (TMode=1 & thv_c0607=1)) & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); @endif # ENDIAN = "big" mult_dat8 = 4; *:2 mult_addr = *[register]:2 ptr1; mult_addr = mult_addr + 2; *:2 mult_addr = *[register]:2 ptr2; mult_addr = mult_addr + 2; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 2; ptr2 = ptr2 + 2; goto ; } vst2Dd: Dreg is Dreg & ((TMode=0 & c0607=2) | (TMode=1 & thv_c0607=2)) & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); @endif # ENDIAN = "big" mult_dat8 = 2; *:4 mult_addr = *[register]:4 ptr1; mult_addr = mult_addr + 4; *:4 mult_addr = *[register]:4 ptr2; mult_addr = mult_addr + 4; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 4; ptr2 = ptr2 + 4; goto ; } buildVst2DdListA: is counter=0 { } buildVst2DdListA: vst2Dd,buildVst2DdListA is vst2Dd & buildVst2DdListA & esize0607 [ counter=counter-1; regNum=regNum+1; ] { build vst2Dd; build buildVst2DdListA; } buildVst2DdListB: is counter2=0 { } buildVst2DdListB: Dreg2 is Dreg2 & counter2=1 & esize0607 [ counter2=0; reg2Num=reg2Num+1; ] { } buildVst2DdListB: Dreg2,buildVst2DdListB is Dreg2 & buildVst2DdListB & esize0607 [ counter2=counter2-1; reg2Num=reg2Num+1; ] { } vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=0 & c0811=8 & D22 & c1215 & buildVst2DdListA & buildVst2DdListB [ regNum=(D22<<4)+c1215-1; regInc=1; reg2Num=regNum+1; counter=1; counter2=1; ] { build buildVst2DdListA; build buildVst2DdListB; export 2:4; } vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=0 & c0811=9 & D22 & c1215 & buildVst2DdListA & buildVst2DdListB [ regNum=(D22<<4)+c1215-1; regInc=2; reg2Num=regNum+2; counter=1; counter2=1; ] { build buildVst2DdListA; build buildVst2DdListB; export 2:4; } vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=0 & c0811=3 & D22 & c1215 & buildVst2DdListA & buildVst2DdListB [ regNum=(D22<<4)+c1215-1; regInc=2; reg2Num=regNum+2; counter=2; counter2=2; ] { build buildVst2DdListA; build buildVst2DdListB; export 4:4; } vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=1 & thv_c0811=8 & thv_D22 & thv_c1215 & buildVst2DdListA & buildVst2DdListB [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; reg2Num=regNum+1; counter=1; counter2=1; ] { build buildVst2DdListA; build buildVst2DdListB; export 2:4; } vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=1 & thv_c0811=9 & thv_D22 & thv_c1215 & buildVst2DdListA & buildVst2DdListB [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=2; reg2Num=regNum+2; counter=1; counter2=1; ] { build buildVst2DdListA; build buildVst2DdListB; export 2:4; } vst2DdList: "{"^buildVst2DdListA^buildVst2DdListB^"}" is TMode=1 & thv_c0811=3 & thv_D22 & thv_c1215 & buildVst2DdListA & buildVst2DdListB [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=2; reg2Num=regNum+2; counter=2; counter2=2; ] { build buildVst2DdListA; build buildVst2DdListB; export 4:4; } @define Vst2DdList "(c0811=3 | c0811=8 | c0811=9)" @define T_Vst2DdList "(thv_c0811=3 | thv_c0811=8 | thv_c0811=9)" :vst2.^esize0607 vst2DdList,RnAligned45 is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0607<3 & c0003=15 & $(Vst2DdList) ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0607<3 & thv_c0003=15 & $(T_Vst2DdList) ) ) & RnAligned45 & esize0607 & vst2DdList { mult_addr = RnAligned45; build vst2DdList; } :vst2.^esize0607 vst2DdList,RnAligned45^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0607<3 & c0003=13 & $(Vst2DdList) ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0607<3 & thv_c0003=13 & $(T_Vst2DdList) ) ) & RnAligned45 & esize0607 & vst2DdList { mult_addr = RnAligned45; build vst2DdList; RnAligned45 = RnAligned45 + (8 * vst2DdList); } :vst2.^esize0607 vst2DdList,RnAligned45,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0607<3 & $(Vst2DdList) ) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0607<3 & $(T_Vst2DdList) ) ) & RnAligned45 & VRm & esize0607 & vst2DdList { mult_addr = RnAligned45; build vst2DdList; RnAligned45 = RnAligned45 + VRm; } ####### # VST2 (single 2-element structure to one lane) # vst2DdElement2: Dreg^"["^vld2Index^"]" is Dreg & vld2Index & ((TMode=0 & c1011=0) | (TMode=1 & thv_c1011=0)) { ptr:4 = &Dreg + vld2Index; *:1 mult_addr = *[register]:1 ptr; } vst2DdElement2: Dreg^"["^vld2Index^"]" is Dreg & vld2Index & ((TMode=0 & c1011=1) | (TMode=1 & thv_c1011=1)) { ptr:4 = &Dreg + (vld2Index * 2); *:2 mult_addr = *[register]:2 ptr; } vst2DdElement2: Dreg^"["^vld2Index^"]" is Dreg & vld2Index & ((TMode=0 & c1011=2) | (TMode=1 & thv_c1011=2)) { ptr:4 = &Dreg + (vld2Index * 4); *:4 mult_addr = *[register]:4 ptr; } vst2Align2: is TMode=0 & c0404=0 & (c1111=0 | c0505=0) { } vst2Align2: ":16" is TMode=0 & c1011=0 & c0404=1 { } vst2Align2: ":32" is TMode=0 & c1011=1 & c0404=1 { } vst2Align2: ":64" is TMode=0 & c1011=2 & c0405=1 { } vst2Align2: is TMode=1 & thv_c0404=0 & (thv_c1111=0 | thv_c0505=0) { } vst2Align2: ":16" is TMode=1 & thv_c1011=0 & thv_c0404=1 { } vst2Align2: ":32" is TMode=1 & thv_c1011=1 & thv_c0404=1 { } vst2Align2: ":64" is TMode=1 & thv_c1011=2 & thv_c0405=1 { } vst2RnAligned2: "["^VRn^vst2Align2^"]" is VRn & vst2Align2 { export VRn; } buildVst2DdList2: is counter=0 { } buildVst2DdList2: vst2DdElement2 is counter=1 & vst2DdElement2 [ counter=0; regNum=regNum+regInc; ] { build vst2DdElement2; } buildVst2DdList2: vst2DdElement2,buildVst2DdList2 is vst2DdElement2 & buildVst2DdList2 & esize1011 [ counter=counter-1; regNum=regNum+regInc; ] { build vst2DdElement2; mult_addr = mult_addr + esize1011; build buildVst2DdList2; } vst2DdList2: "{"^buildVst2DdList2^"}" is TMode=0 & D22 & c1215 & buildVst2DdList2 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=2; ] { } # Single vst2DdList2: "{"^buildVst2DdList2^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011=2 & c0606=1)) & D22 & c1215 & buildVst2DdList2 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=2; ] { } # Double vst2DdList2: "{"^buildVst2DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVst2DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=2; ] { } # Single vst2DdList2: "{"^buildVst2DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVst2DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=2; ] { } # Double :vst2.^esize1011 vst2DdList2,vst2RnAligned2 is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=1 & c0003=15 ) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=1 & thv_c0003=15 ) ) & vst2RnAligned2 & esize1011 & vst2DdList2 { mult_addr = vst2RnAligned2; build vst2DdList2; } :vst2.^esize1011 vst2DdList2,vst2RnAligned2^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=1 & c0003=13 ) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=1 & thv_c0003=13 ) ) & vst2RnAligned2 & esize1011 & vst2DdList2 { mult_addr = vst2RnAligned2; build vst2DdList2; vst2RnAligned2 = vst2RnAligned2 + (2 * esize1011); } :vst2.^esize1011 vst2DdList2,vst2RnAligned2,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=1 ) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=1 ) ) & vst2RnAligned2 & esize1011 & vst2DdList2 & VRm { mult_addr = vst2RnAligned2; build vst2DdList2; vst2RnAligned2 = vst2RnAligned2 + VRm; } ####### # VST3 # ####### # VST3 (multiple 3-element structures) # vst3Align: is TMode=0 & c0404=0 { } vst3Align: ":64" is TMode=0 & c0404=1 { } vst3Align: is TMode=1 & thv_c0404=0 { } vst3Align: ":64" is TMode=1 & thv_c0404=1 { } vst3RnAligned: "["^VRn^vst3Align^"]" is VRn & vst3Align { export VRn; } vst3Dd: Dreg is Dreg & ((TMode=0 & c0607=0) | (TMode=1 & thv_c0607=0)) & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); ptr3:4 = &Dreg + (regInc * 16); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); ptr3:4 = &Dreg - (regInc * 16); @endif # ENDIAN = "big" mult_dat8 = 8; *:1 mult_addr = *[register]:1 ptr1; mult_addr = mult_addr + 1; *:1 mult_addr = *[register]:1 ptr2; mult_addr = mult_addr + 1; *:1 mult_addr = *[register]:1 ptr3; mult_addr = mult_addr + 1; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 1; ptr2 = ptr2 + 1; ptr3 = ptr3 + 1; goto ; } vst3Dd: Dreg is Dreg & ((TMode=0 & c0607=1) | (TMode=1 & thv_c0607=1)) & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); ptr3:4 = &Dreg + (regInc * 16); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); ptr3:4 = &Dreg - (regInc * 16); @endif # ENDIAN = "big" mult_dat8 = 4; *:2 mult_addr = *[register]:2 ptr1; mult_addr = mult_addr + 2; *:2 mult_addr = *[register]:2 ptr2; mult_addr = mult_addr + 2; *:2 mult_addr = *[register]:2 ptr3; mult_addr = mult_addr + 2; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 2; ptr2 = ptr2 + 2; ptr3 = ptr3 + 2; goto ; } vst3Dd: Dreg is Dreg & ((TMode=0 & c0607=2) | (TMode=1 & thv_c0607=2)) & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); ptr3:4 = &Dreg + (regInc * 16); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); ptr3:4 = &Dreg - (regInc * 16); @endif # ENDIAN = "big" mult_dat8 = 2; *:4 mult_addr = *[register]:4 ptr1; mult_addr = mult_addr + 4; *:4 mult_addr = *[register]:4 ptr2; mult_addr = mult_addr + 4; *:4 mult_addr = *[register]:4 ptr3; mult_addr = mult_addr + 4; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 4; ptr2 = ptr2 + 4; ptr3 = ptr3 + 4; goto ; } # Have to build only once, but because Dreg depends on regNum, have to reset it back to what it was to the start buildvst3DdList: is counter=0 & vst3Dd [ regNum=regNum-3*regInc; ] { build vst3Dd; } buildvst3DdList: Dreg^buildvst3DdList is counter=1 & Dreg & buildvst3DdList [ counter=0; regNum=regNum+regInc; ] { } buildvst3DdList: Dreg,buildvst3DdList is Dreg & buildvst3DdList [ counter=counter-1; regNum=regNum+regInc; ] { } vst3DdList: "{"^buildvst3DdList^"}" is TMode=0 & c0811=4 & D22 & c1215 & buildvst3DdList [ regNum=(D22<<4)+c1215-1; regInc=1; counter=3; ] { } # Single vst3DdList: "{"^buildvst3DdList^"}" is TMode=0 & c0811=5 & D22 & c1215 & buildvst3DdList [ regNum=(D22<<4)+c1215-2; regInc=2; counter=3; ] { } # Double vst3DdList: "{"^buildvst3DdList^"}" is TMode=1 & thv_c0811=4 & thv_D22 & thv_c1215 & buildvst3DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=3; ] { } # Single vst3DdList: "{"^buildvst3DdList^"}" is TMode=1 & thv_c0811=5 & thv_D22 & thv_c1215 & buildvst3DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double :vst3.^esize0607 vst3DdList,vst3RnAligned is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0003=15 ) | ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0003=15 ) ) & vst3RnAligned & esize0607 & vst3DdList { mult_addr = vst3RnAligned; build vst3DdList; } :vst3.^esize0607 vst3DdList,vst3RnAligned^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0003=13 ) | ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0003=13 ) ) & vst3RnAligned & esize0607 & vst3DdList { mult_addr = vst3RnAligned; build vst3DdList; vst3RnAligned = vst3RnAligned + (8 * 3); } :vst3.^esize0607 vst3DdList,vst3RnAligned,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0) | ( $(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 ) ) & vst3RnAligned & esize0607 & vst3DdList & VRm { mult_addr = vst3RnAligned; build vst3DdList; vst3RnAligned = vst3RnAligned + VRm; } ####### # VST3 (single 3-element structure to one lane) # vst3Rn: "["^VRn^"]" is VRn { export VRn; } vst3DdElement2: Dreg^"["^vld3Index^"]" is Dreg & vld3Index & ((TMode=0 & c1011=0) | (TMode=1 & thv_c1011=0)) { ptr:4 = &Dreg + vld3Index; *:1 mult_addr = *[register]:1 ptr; } vst3DdElement2: Dreg^"["^vld3Index^"]" is Dreg & vld3Index & ((TMode=0 & c1011=1) | (TMode=1 & thv_c1011=1)) { ptr:4 = &Dreg + (vld3Index * 2); *:2 mult_addr = *[register]:2 ptr; } vst3DdElement2: Dreg^"["^vld3Index^"]" is Dreg & vld3Index & ((TMode=0 & c1011=2) | (TMode=1 & thv_c1011=2)) { ptr:4 = &Dreg + (vld3Index * 4); *:4 mult_addr = *[register]:4 ptr; } buildVst3DdList2: is counter=0 { } buildVst3DdList2: vst3DdElement2 is counter=1 & vst3DdElement2 [ counter=0; regNum=regNum+regInc; ] { build vst3DdElement2; } buildVst3DdList2: vst3DdElement2,buildVst3DdList2 is vst3DdElement2 & buildVst3DdList2 & esize1011 [ counter=counter-1; regNum=regNum+regInc; ] { build vst3DdElement2; mult_addr = mult_addr + esize1011; build buildVst3DdList2; } vst3DdList2: "{"^buildVst3DdList2^"}" is TMode=0 & D22 & c1215 & buildVst3DdList2 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=3; ] { } # Single vst3DdList2: "{"^buildVst3DdList2^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011=2 & c0606=1)) & D22 & c1215 & buildVst3DdList2 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=3; ] { } # Double vst3DdList2: "{"^buildVst3DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVst3DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=3; ] { } # Single vst3DdList2: "{"^buildVst3DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVst3DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=3; ] { } # Double :vst3.^esize1011 vst3DdList2,vst3Rn is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=2 & c0003=15 ) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=2 & thv_c0003=15 ) ) & vst3Rn & esize1011 & vst3DdList2 { mult_addr = vst3Rn; build vst3DdList2; } :vst3.^esize1011 vst3DdList2,vst3Rn^"!" is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=2 & c0003=13 ) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=2 & thv_c0003=13 ) ) & vst3Rn & esize1011 & vst3DdList2 { mult_addr = vst3Rn; build vst3DdList2; vst3Rn = vst3Rn + (3 * esize1011); } :vst3.^esize1011 vst3DdList2,vst3Rn,VRm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=2 ) | ( $(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=2 ) ) & vst3Rn & esize1011 & vst3DdList2 & VRm { mult_addr = vst3Rn; build vst3DdList2; vst3Rn = vst3Rn + VRm; } ####### # VST4 (multiple 4-element structures) # vst4Align: is TMode=0 & c0405=0 { } vst4Align: ":64" is TMode=0 & c0405=1 { } vst4Align: ":128" is TMode=0 & c0405=2 { } vst4Align: ":256" is TMode=0 & c0405=3 { } vst4Align: is TMode=1 & thv_c0405=0 { } vst4Align: ":64" is TMode=1 & thv_c0405=1 { } vst4Align: ":128" is TMode=1 & thv_c0405=2 { } vst4Align: ":256" is TMode=1 & thv_c0405=3 { } vst4RnAligned: "["^VRn^vst4Align^"]" is VRn & vst4Align { export VRn; } vst4Dd: Dreg is Dreg & ((TMode=0 & c0607=0) | (TMode=1 & thv_c0607=0)) & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); ptr3:4 = &Dreg + (regInc * 16); ptr4:4 = &Dreg + (regInc * 24); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); ptr3:4 = &Dreg - (regInc * 16); ptr4:4 = &Dreg - (regInc * 24); @endif # ENDIAN = "big" mult_dat8 = 8; *:1 mult_addr = *[register]:1 ptr1; mult_addr = mult_addr + 1; *:1 mult_addr = *[register]:1 ptr2; mult_addr = mult_addr + 1; *:1 mult_addr = *[register]:1 ptr3; mult_addr = mult_addr + 1; *:1 mult_addr = *[register]:1 ptr4; mult_addr = mult_addr + 1; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 1; ptr2 = ptr2 + 1; ptr3 = ptr3 + 1; ptr4 = ptr4 + 1; goto ; } vst4Dd: Dreg is Dreg & ((TMode=0 & c0607=1) | (TMode=1 & thv_c0607=1)) & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); ptr3:4 = &Dreg + (regInc * 16); ptr4:4 = &Dreg + (regInc * 24); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); ptr3:4 = &Dreg - (regInc * 16); ptr4:4 = &Dreg - (regInc * 24); @endif # ENDIAN = "big" mult_dat8 = 4; *:2 mult_addr = *[register]:2 ptr1; mult_addr = mult_addr + 2; *:2 mult_addr = *[register]:2 ptr2; mult_addr = mult_addr + 2; *:2 mult_addr = *[register]:2 ptr3; mult_addr = mult_addr + 2; *:2 mult_addr = *[register]:2 ptr4; mult_addr = mult_addr + 2; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 2; ptr2 = ptr2 + 2; ptr3 = ptr3 + 2; ptr4 = ptr4 + 2; goto ; } vst4Dd: Dreg is Dreg & ((TMode=0 & c0607=2) | (TMode=1 & thv_c0607=2)) & regInc { ptr1:4 = &Dreg; @if ENDIAN == "little" ptr2:4 = &Dreg + (regInc * 8); ptr3:4 = &Dreg + (regInc * 16); ptr4:4 = &Dreg + (regInc * 24); @else # ENDIAN == "big" ptr2:4 = &Dreg - (regInc * 8); ptr3:4 = &Dreg - (regInc * 16); ptr4:4 = &Dreg - (regInc * 24); @endif # ENDIAN = "big" mult_dat8 = 2; *:4 mult_addr = *[register]:4 ptr1; mult_addr = mult_addr + 4; *:4 mult_addr = *[register]:4 ptr2; mult_addr = mult_addr + 4; *:4 mult_addr = *[register]:4 ptr3; mult_addr = mult_addr + 4; *:4 mult_addr = *[register]:4 ptr4; mult_addr = mult_addr + 4; mult_dat8 = mult_dat8 - 1; if(mult_dat8 == 0) goto ; ptr1 = ptr1 + 4; ptr2 = ptr2 + 4; ptr3 = ptr3 + 4; ptr4 = ptr4 + 4; goto ; } # Have to build only once, but because Dreg depends on regNum, have to reset it back to what it was to the start buildVst4DdList: is counter=0 & vst4Dd [ regNum=regNum-4*regInc; ] { build vst4Dd; } buildVst4DdList: Dreg^buildVst4DdList is counter=1 & Dreg & buildVst4DdList [ counter=0; regNum=regNum+regInc; ] { } buildVst4DdList: Dreg,buildVst4DdList is Dreg & buildVst4DdList [ counter=counter-1; regNum=regNum+regInc; ] { } vst4DdList: "{"^buildVst4DdList^"}" is TMode=0 & c0808=0 & D22 & c1215 & buildVst4DdList [ regNum=(D22<<4)+c1215-1; regInc=1; counter=4; ] { } # Single vst4DdList: "{"^buildVst4DdList^"}" is TMode=0 & c0808=1 & D22 & c1215 & buildVst4DdList [ regNum=(D22<<4)+c1215-2; regInc=2; counter=4; ] { } # Double vst4DdList: "{"^buildVst4DdList^"}" is TMode=1 & thv_c0808=0 & thv_D22 & thv_c1215 & buildVst4DdList [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=4; ] { } # Single vst4DdList: "{"^buildVst4DdList^"}" is TMode=1 & thv_c0808=1 & thv_D22 & thv_c1215 & buildVst4DdList [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double :vst4.^esize0607 vst4DdList,vst4RnAligned is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0911=0 & c0607<3 & c0003=15) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0911=0 & thv_c0607<3 & thv_c0003=15) ) & vst4RnAligned & esize0607 & vst4DdList { mult_addr = vst4RnAligned; build vst4DdList; } :vst4.^esize0607 vst4DdList,vst4RnAligned^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0911=0 & c0607<3 & c0003=13) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0911=0 & thv_c0607<3 & thv_c0003=13) ) & vst4RnAligned & esize0607 & vst4DdList { mult_addr = vst4RnAligned; build vst4DdList; vst4RnAligned = vst4RnAligned + (8 * 4); } :vst4.^esize0607 vst4DdList,vst4RnAligned,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=8 & c2021=0 & c0911=0 & c0607<3) | ($(TMODE_F) & thv_c2327=0x12 & thv_c2021=0 & thv_c0911=0 & thv_c0607<3) ) & VRm & vst4RnAligned & esize0607 & vst4DdList { mult_addr = vst4RnAligned; build vst4DdList; vst4RnAligned = vst4RnAligned + VRm; } ####### # VST4 (single 4-element structure from one lane) # vst4Index: val is TMode=0 & c0507 & c1011 [ val = c0507 >> c1011; ] { tmp:4 = val; export tmp; } vst4Index: val is TMode=1 & thv_c0507 & thv_c1011 [ val = thv_c0507 >> thv_c1011; ] { tmp:4 = val; export tmp; } vst4DdElement2: Dreg^"["^vst4Index^"]" is Dreg & vst4Index & ((TMode=0 & c1011=0) | (TMode=1 & thv_c1011=0)) { ptr:4 = &Dreg + vst4Index; *:1 mult_addr = *[register]:1 ptr; } vst4DdElement2: Dreg^"["^vst4Index^"]" is Dreg & vst4Index & TMode=0 & ((TMode=0 & c1011=1) | (TMode=1 & thv_c1011=1)) { ptr:4 = &Dreg + vst4Index; *:2 mult_addr = *[register]:2 ptr; } vst4DdElement2: Dreg^"["^vst4Index^"]" is Dreg & vst4Index & ((TMode=0 & c1011=2) | (TMode=1 & thv_c1011=2)) { ptr:4 = &Dreg + vst4Index; *:4 mult_addr = *[register]:4 ptr; } vst4DdElement2: Dreg^"["^vst4Index^"]" is Dreg & vst4Index & ((TMode=0 & c1011=3) | (TMode=1 & thv_c1011=3)) { *mult_addr = Dreg; } vst4Align2: is TMode=0 & c0404=0 & (c1111=0 | c0505=0) { } vst4Align2: ":32" is TMode=0 & c1011=0 & c0404=1 { } vst4Align2: ":64" is TMode=0 & ((c1011=1 & c0404=1) | (c1011=2 & c0405=1)) { } vst4Align2: ":128" is TMode=0 & c1011=2 & c0405=2 { } vst4Align2: is TMode=1 & thv_c0404=0 & (thv_c1111=0 | thv_c0505=0) { } vst4Align2: ":32" is TMode=1 & thv_c1011=0 & thv_c0404=1 { } vst4Align2: ":64" is TMode=1 & ((thv_c1011=1 & thv_c0404=1) | (thv_c1011=2 & thv_c0405=1)) { } vst4Align2: ":128" is TMode=1 & thv_c1011=2 & thv_c0405=2 { } vst4RnAligned2: "["^VRn^vst4Align2^"]" is VRn & vst4Align2 { export VRn; } buildVst4DdList2: is counter=0 { } buildVst4DdList2: vst4DdElement2 is counter=1 & vst4DdElement2 [ counter=0; regNum=regNum+regInc; ] { build vst4DdElement2; } buildVst4DdList2: vst4DdElement2,buildVst4DdList2 is vst4DdElement2 & buildVst4DdList2 & esize1011 [ counter=counter-1; regNum=regNum+regInc; ] { build vst4DdElement2; mult_addr = mult_addr + esize1011; build buildVst4DdList2; } vst4DdList2: "{"^buildVst4DdList2^"}" is TMode=0 & D22 & c1215 & buildVst4DdList2 [ regNum=(D22<<4)+c1215-1; regInc=1; counter=4; ] { } # Single vst4DdList2: "{"^buildVst4DdList2^"}" is TMode=0 & ((c1011=1 & c0505=1) | (c1011=2 & c0606=1)) & D22 & c1215 & buildVst4DdList2 [ regNum=(D22<<4)+c1215-2; regInc=2; counter=4; ] { } # Double vst4DdList2: "{"^buildVst4DdList2^"}" is TMode=1 & thv_D22 & thv_c1215 & buildVst4DdList2 [ regNum=(thv_D22<<4)+thv_c1215-1; regInc=1; counter=4; ] { } # Single vst4DdList2: "{"^buildVst4DdList2^"}" is TMode=1 & ((thv_c1011=1 & thv_c0505=1) | (thv_c1011=2 & thv_c0606=1)) & thv_D22 & thv_c1215 & buildVst4DdList2 [ regNum=(thv_D22<<4)+thv_c1215-2; regInc=2; counter=4; ] { } # Double :vst4.^esize1011 vst4DdList2,vst4RnAligned2 is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=3 & c0003=15) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=3 & thv_c0003=15) ) & vst4RnAligned2 & esize1011 & vst4DdList2 { mult_addr = vst4RnAligned2; build vst4DdList2; } :vst4.^esize1011 vst4DdList2,vst4RnAligned2^"!" is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=3 & c0003=13) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=3 & thv_c0003=13) ) & vst4RnAligned2 & esize1011 & vst4DdList2 { mult_addr = vst4RnAligned2; build vst4DdList2; vst4RnAligned2 = vst4RnAligned2 + (4 * esize1011); } :vst4.^esize1011 vst4DdList2,vst4RnAligned2,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=9 & c2021=0 & c1011<3 & c0809=3) | ($(TMODE_F) & thv_c2327=0x13 & thv_c2021=0 & thv_c1011<3 & thv_c0809=3) ) & VRm & vst4RnAligned2 & esize1011 & vst4DdList2 { mult_addr = vst4RnAligned2; build vst4DdList2; vst4RnAligned2 = vst4RnAligned2 + VRm; } @endif # SIMD @if defined(VFPv2) || defined(VFPv3) || defined(SIMD) ####### # VSTM (A1) # buildVstmDdList: is counter=0 { } buildVstmDdList: Dreg is counter=1 & Dreg [ counter=0; regNum=regNum+1; ] { *mult_addr = Dreg; mult_addr = mult_addr + 8; } buildVstmDdList: Dreg,buildVstmDdList is Dreg & buildVstmDdList [ counter=counter-1; regNum=regNum+1; ] { *mult_addr = Dreg; mult_addr = mult_addr + 8; build buildVstmDdList; } vstmDdList: "{"^buildVstmDdList^"}" is TMode=0 & D22 & c1215 & c0007 & buildVstmDdList [ regNum=(D22<<4)+c1215-1; counter=c0007>>1; ] { } vstmDdList: "{"^buildVstmDdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVstmDdList [ regNum=(thv_D22<<4)+thv_c1215-1; counter=thv_c0007>>1; ] { } :vstmia^COND vldmRn,vstmDdList is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x19 & c2121 & c2020=0 & c0811=11 & c0000=0) | ($(TMODE_E) & thv_c2327=0x19 & thv_c2121 & thv_c2020=0 & thv_c0811=11 & thv_c0000=0) ) & vldmRn & vstmDdList & vldmOffset & vldmUpdate { mult_addr = vldmRn; build vstmDdList; build vldmUpdate; } :vstmdb^COND vldmRn,vstmDdList is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1a & c2121=1 & c2020=0 & c0811=11 & c0000=0) | ($(TMODE_E) & thv_c2327=0x1a & thv_c2121=1 & thv_c2020=0 & thv_c0811=11 & thv_c0000=0) ) & vldmRn & vstmDdList & vldmOffset { local start_addr = vldmRn - vldmOffset; mult_addr = start_addr; build vstmDdList; vldmRn = start_addr; } @endif # VFPv2 | VFPv3 | SIMD @if defined(VFPv2) || defined(VFPv3) ####### # VSTM (A2) # buildVstmSdList: is counter=0 { } buildVstmSdList: Sreg is counter=1 & Sreg [ counter=0; regNum=regNum+1; ] { *mult_addr = Sreg; mult_addr = mult_addr + 4; } buildVstmSdList: Sreg,buildVstmSdList is Sreg & buildVstmSdList [ counter=counter-1; regNum=regNum+1; ] { *mult_addr = Sreg; mult_addr = mult_addr + 4; build buildVstmSdList; } vstmSdList: "{"^buildVstmSdList^"}" is TMode=0 & D22 & c1215 & c0007 & buildVstmSdList [ regNum=(c1215<<1) + D22 -1; counter=c0007; ] { } vstmSdList: "{"^buildVstmSdList^"}" is TMode=1 & thv_D22 & thv_c1215 & thv_c0007 & buildVstmSdList [ regNum=(thv_c1215<<1) + thv_D22 -1; counter=thv_c0007; ] { } :vstmia^COND vldmRn,vstmSdList is COND & ( ( $(AMODE) & ARMcond=1 & c2327=0x19 & c2121 & c2020=0 & c0811=10 ) | ($(TMODE_E) & thv_c2327=0x19 & thv_c2121 & thv_c2020=0 & thv_c0811=10 ) ) & vldmRn & vstmSdList & vldmOffset & vldmUpdate { mult_addr = vldmRn; build vstmSdList; build vldmUpdate; } :vstmdb^COND vldmRn,vstmSdList is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1a & c2121=1 & c2020=0 & c0811=10 ) | ($(TMODE_E) & thv_c2327=0x1a & thv_c2121=1 & thv_c2020=0 & thv_c0811=10) ) & vldmRn & vstmSdList & vldmOffset { local start_addr = vldmRn - vldmOffset; mult_addr = start_addr; build vstmSdList; vldmRn = start_addr; } ####### # VSTR # :vstr^COND^".64" Dd,vldrRn is COND & ( ($(AMODE) & ARMcond=1 & c2427=13 & c2021=0 & c0811=11) | ($(TMODE_E) & thv_c2427=13 & thv_c2021=0 & thv_c0811=11)) & Dd & vldrRn { *vldrRn = Dd; } :vstr^COND^".32" Sd,vldrRn is COND & ( ($(AMODE) & ARMcond=1 & c2427=13 & c2021=0 & c0811=10) | ($(TMODE_E) & thv_c2427=13 & thv_c2021=0 & thv_c0811=10)) & Sd & vldrRn { *vldrRn = Sd; } @endif # VFPv2 || VFPv3 || SIMD ####### # VSUB # @if defined(SIMD) define pcodeop FloatVectorSub; define pcodeop VectorSubAndNarrow; :vsub.i^esize2021 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c0811=8 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1e & thv_c0811=8 & thv_Q6=0 & thv_c0404=0)) & esize2021 & Dn & Dd & Dm { Dd = VectorSub(Dn,Dm,esize2021); } :vsub.i^esize2021 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=6 & c0811=8 & Q6=1 & c0404=0) | ($(TMODE_F) &thv_c2327=0x1e & thv_c0811=8 & thv_Q6=1 & thv_c0404=0) ) & esize2021 & Qm & Qn & Qd { Qd = VectorSub(Qn,Qm,esize2021); } :vsub.f32 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_Q6=0 & thv_c0404=0) ) & Dm & Dn & Dd { Dd = FloatVectorSub(Dn,Dm,2:1,32:1); } :vsub.f32 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c2121=1 & c0811=13 & Q6=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1e & thv_c2121=1 & thv_c0811=13 & thv_Q6=1 & thv_c0404=0) ) & Qn & Qd & Qm { Qd = FloatVectorSub(Qn,Qm,2:1,32:1); } :vsubhn.i^esize2021x2 Dd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=5 & c0811=6 & Q6=0 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1f & thv_c0811=6 & thv_Q6=0 & thv_c0404=0)) & esize2021x2 & Dd & Qn & Qm { Dd = VectorSubAndNarrow(Qn,Qm,esize2021x2); } :vsubl.^udt^esize2021 Qd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=2 & c0606=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=2 & thv_c0606=0 & thv_c0404=0) ) & esize2021 & udt & Dn & Qd & Dm { Qd = VectorSub(Dn,Dm,esize2021,udt); } :vsubw.^udt^esize2021 Qd,Qn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2527=1 & c2323=1 & c2021<3 & c0811=3 & c0606=0 & c0404=0) | ($(TMODE_EorF) & thv_c2327=0x1f & thv_c2021<3 & thv_c0811=3 & thv_c0606=0 & thv_c0404=0) ) & esize2021 & udt & Qn & Qd & Dm { Qd = VectorSub(Qn,Dm,esize2021,udt); } @endif # SIMD @if defined(VFPv2) || defined(VFPv3) :vsub^COND^".f32" Sd,Sn,Sm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=3 & c0811=10 & c0606=1 & c0404=0) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=3 & thv_c0811=10 & thv_c0606=1 & thv_c0404=0) ) & Sm & Sn & Sd { build COND; build Sd; build Sm; build Sn; Sd = Sn f- Sm; } :vsub^COND^".f64" Dd,Dn,Dm is COND & ( ($(AMODE) & ARMcond=1 & c2327=0x1c & c2021=3 & c0811=11 & c0606=1 & c0404=0 ) | ($(TMODE_E) & thv_c2327=0x1c & thv_c2021=3 & thv_c0811=11 & thv_c0606=1 & thv_c0404=0) ) & Dm & Dn & Dd { build COND; build Dd; build Dm; build Dn; Dd = Dn f- Dm; } @endif # VFPv2 || VFPv3 @if defined(SIMD) ####### # VSWP # :vswp Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=0 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=0 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm { tmp:8 = Dm; Dm = Dd; Dd = tmp; } :vswp Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=0 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=0 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm { tmp:16 = Qm; Qm = Qd; Qd = tmp; } ########### # VTBL/VTBX # define pcodeop VectorTableLookup; buildVtblDdList: is counter=0 { } buildVtblDdList: Dreg is Dreg & counter=1 [ counter=0; regNum=regNum+1; ] { } buildVtblDdList: Dreg,buildVtblDdList is Dreg & buildVtblDdList [ counter=counter-1; regNum=regNum+1; ] { build buildVtblDdList; } vtblDdList: "{"^buildVtblDdList^"}" is TMode=0 & c0809=0 & N7 & c1619 & buildVtblDdList [ regNum=(N7<<4)+c1619-1; counter=1; ] { export 1:4; } vtblDdList: "{"^buildVtblDdList^"}" is TMode=0 & c0809=1 & N7 & c1619 & buildVtblDdList [ regNum=(N7<<4)+c1619-1; counter=2; ] { export 2:4; } vtblDdList: "{"^buildVtblDdList^"}" is TMode=0 & c0809=2 & N7 & c1619 & buildVtblDdList [ regNum=(N7<<4)+c1619-1; counter=3; ] { export 3:4; } vtblDdList: "{"^buildVtblDdList^"}" is TMode=0 & c0809=3 & N7 & c1619 & buildVtblDdList [ regNum=(N7<<4)+c1619-1; counter=4; ] { export 4:4; } vtblDdList: "{"^buildVtblDdList^"}" is TMode=1 & thv_c0809=0 & thv_N7 & thv_c1619 & buildVtblDdList [ regNum=(thv_N7<<4)+thv_c1619-1; counter=1; ] { export 1:4; } vtblDdList: "{"^buildVtblDdList^"}" is TMode=1 & thv_c0809=1 & thv_N7 & thv_c1619 & buildVtblDdList [ regNum=(thv_N7<<4)+thv_c1619-1; counter=2; ] { export 2:4; } vtblDdList: "{"^buildVtblDdList^"}" is TMode=1 & thv_c0809=2 & thv_N7 & thv_c1619 & buildVtblDdList [ regNum=(thv_N7<<4)+thv_c1619-1; counter=3; ] { export 3:4; } vtblDdList: "{"^buildVtblDdList^"}" is TMode=1 & thv_c0809=3 & thv_N7 & thv_c1619 & buildVtblDdList [ regNum=(thv_N7<<4)+thv_c1619-1; counter=4; ] { export 4:4; } :vtbl.8 VRd,vtblDdList,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1011=2 & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1011=2 & thv_c0606=0 & thv_c0404=0 ) ) & VRm & VRd & VRn & vtblDdList { VRd = VectorTableLookup(VRm,VRn,vtblDdList); } :vtbx.8 VRd,vtblDdList,VRm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1011=2 & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1011=2 & thv_c0606=1 & thv_c0404=0 ) ) & VRm & VRd & VRn & vtblDdList { VRd = VectorTableLookup(VRm,VRn,vtblDdList); } ###### # VTST # define pcodeop VectorTest; :vtst.^esize2021 Qd, Qn, Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=8 & c0606=1 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=8 & thv_c0606=1 & thv_c0404=1) ) & esize2021 & Qm & Qn & Qd { Qd = VectorTest(Qn, Qm); } :vtst.^esize2021 Dd, Dn, Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=4 & c0811=8 & c0606=0 & c0404=1) | ($(TMODE_E) & thv_c2327=0x1e & thv_c0811=8 & thv_c0606=0 & thv_c0404=1) ) & esize2021 & Dm & Dn & Dd { Dd = VectorTest(Dn, Dm); } define pcodeop VectorTranspose; :vtrn^"."^esize1819 Dd,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=2 & c0811=0 & c0707=1 & Q6=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=2 & thv_c0811=0 & thv_c0707=1 & thv_Q6=0 & thv_c0404=0)) & esize1819 & Dd & Dm { Dd = VectorTranspose(Dm,esize1819); } :vtrn^"."^esize1819 Qd,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1617=2 & c0811=0 & c0707=1 & Q6=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1617=2 & thv_c0811=0 & thv_c0707=1 & thv_Q6=1 & thv_c0404=0) ) & esize1819 & Qm & Qd { Qd = VectorTranspose(Qm,esize1819); } ##### # V[SU]DOT define pcodeop VectorSignedDotProduct; define pcodeop VectorUnsignedDotProduct; define pcodeop VectorSignedUnsignedDotProduct; define pcodeop VectorUnsignedSignedDotProduct; :vsdot.s8 Dd,Dn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=2 & c0811=0xd & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=0) ) & Dm0 & Mindex & Dn & Dd { Dd = VectorSignedDotProduct(Dn,Dm0,Mindex); } :vsdot.s8 Qd,Qn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=2 & c0811=0xd & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=0) ) & Dm0 & Mindex & Qn & Qd { Qd = VectorSignedDotProduct(Qn,Dm0,Mindex); } :vsdot.s8 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xd & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=0) ) & Dm & Dn & Dd { Dd = VectorSignedDotProduct(Dn,Dm); } :vsdot.s8 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xd & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=0) ) & Qm & Qn & Qd { Qd = VectorSignedDotProduct(Qn,Qm); } :vudot.u8 Dd,Dn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=2 & c0811=0xd & c0606=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=1) ) & Dm0 & Mindex & Dn & Dd { Dd = VectorUnsignedDotProduct(Dn,Dm0,Mindex); } :vudot.u8 Qd,Qn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1c & c2021=2 & c0811=0xd & c0606=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1c & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=1) ) & Dm0 & Mindex & Qn & Qd { Qd = VectorUnsignedDotProduct(Qn,Dm0,Mindex); } :vudot.u8 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xd & c0606=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=1) ) & Dm & Dn & Dd { Dd = VectorUnsignedDotProduct(Dn,Dm); } :vudot.u8 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x18 & c2021=2 & c0811=0xd & c0606=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x18 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=1) ) & Qm & Qn & Qd { Qd = VectorUnsignedDotProduct(Qn,Qm); } :vsudot.u8 Dd,Dn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c2021=0 & c0811=0xd & c0606=0 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=1) ) & Dm0 & Mindex & Dn & Dd { Dd = VectorSignedUnsignedDotProduct(Dn,Dm0,Mindex); } :vsudot.u8 Qd,Qn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c2021=0 & c0811=0xd & c0606=1 & c0404=1) | ($(TMODE_F) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=1) ) & Dm0 & Mindex & Qn & Qd { Qd = VectorSignedUnsignedDotProduct(Qn,Dm0,Mindex); } :vusdot.u8 Dd,Dn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c2021=0 & c0811=0xd & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=0) ) & Dm0 & Mindex & Dn & Dd { Dd = VectorUnsignedSignedDotProduct(Dn,Dm0,Mindex); } :vusdot.u8 Qd,Qn,Dm0^Mindex is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x1d & c2021=0 & c0811=0xd & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x1d & thv_c2021=0 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=0) ) & Dm0 & Mindex & Qn & Qd { Qd = VectorUnsignedSignedDotProduct(Qn,Dm0,Mindex); } :vusdot.u8 Dd,Dn,Dm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x19 & c2021=2 & c0811=0xd & c0606=0 & c0404=0) | ($(TMODE_F) & thv_c2327=0x19 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=0 & thv_c0404=0) ) & Dm & Dn & Dd { Dd = VectorUnsignedSignedDotProduct(Dn,Dm); } :vusdot.u8 Qd,Qn,Qm is ( ($(AMODE) & ARMcond=0 & cond=15 & c2327=0x19 & c2021=2 & c0811=0xd & c0606=1 & c0404=0) | ($(TMODE_F) & thv_c2327=0x19 & thv_c2021=2 & thv_c0811=0xd & thv_c0606=1 & thv_c0404=0) ) & Qm & Qn & Qd { Qd = VectorUnsignedSignedDotProduct(Qn,Qm); } ####### # VUZP # define pcodeop VectorUnzip; :vuzp^esize1819 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=2 & Q6=0 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=2 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & esize1819 { Dd = VectorUnzip(Dm,esize1819); } :vuzp^esize1819 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=2 & Q6=1 & c0404=0 ) | ( $(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=2 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & esize1819 { Qd = VectorUnzip(Qm,esize1819); } ####### # VZIP # define pcodeop VectorZip; :vzip^esize1819 Dd,Dm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=3 & Q6=0 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=3 & thv_c0606=0 & thv_c0404=0 ) ) & Dd & Dm & esize1819 { Dd = VectorZip(Dm,esize1819); } :vzip^esize1819 Qd,Qm is ( ( $(AMODE) & ARMcond=0 & cond=15 & c2327=7 & c2021=3 & c1819<3 & c1617=2 & c0711=3 & Q6=1 & c0404=0 ) | ($(TMODE_F) & thv_c2327=0x1f & thv_c2021=3 & thv_c1819<3 & thv_c1617=2 & thv_c0711=3 & thv_c0606=1 & thv_c0404=0 ) ) & Qd & Qm & esize1819 { Qd = VectorZip(Qm,esize1819); } @endif # SIMD ================================================ FILE: pypcode/processors/ARM/data/languages/ARMt.pspec ================================================ ================================================ FILE: pypcode/processors/ARM/data/languages/ARMtTHUMB.pspec ================================================ ================================================ FILE: pypcode/processors/ARM/data/languages/ARMt_v45.pspec ================================================ ================================================ FILE: pypcode/processors/ARM/data/languages/ARMt_v6.pspec ================================================ ================================================ FILE: pypcode/processors/ARM/data/languages/ARMv8.sinc ================================================ # This macro is always defined in this file, but the ifdef may be # useful if it is moved to ARMinstructions.sinc. crc32_type: "b" is TMode=0 & c2122=0b00 & c0909=0 { } crc32_type: "h" is TMode=0 & c2122=0b01 & c0909=0 { } crc32_type: "w" is TMode=0 & c2122=0b10 & c0909=0 { } crc32_type: "cb" is TMode=0 & c2122=0b00 & c0909=1 { } crc32_type: "ch" is TMode=0 & c2122=0b01 & c0909=1 { } crc32_type: "cw" is TMode=0 & c2122=0b10 & c0909=1 { } crc32_type: "b" is TMode=1 & thv_c0405=0b00 { } crc32_type: "h" is TMode=1 & thv_c0405=0b01 { } crc32_type: "w" is TMode=1 & thv_c0405=0b10 { } define pcodeop Crc32Calc; # F5.1.39,40 p7226,7229 CRC32,CRC32C A1 :crc32^crc32_type Rd,Rn,Rm is TMode=0 & c2831=0b1110 & c2327=0b00010 & c2020=0 & c0407=0b0100 & c1011=0b00 & c0808=0 & crc32_type & Rn & Rd & Rm { Rd = Crc32Calc(Rn,Rm); } # F5.1.39 p7226 CRC32 T1 :crc32^crc32_type thv_Rt2,thv_Rn,thv_Rm is TMode=1 & thv_c2031=0b111110101100 & thv_c1215=0b1111 & thv_c0607=0b10 & crc32_type & thv_Rn & thv_Rt2 & thv_Rm { thv_Rt2 = Crc32Calc(thv_Rn,thv_Rm); } # F5.1.40 p7229 CRC32C T1 :crc32c^crc32_type thv_Rt2,thv_Rn,thv_Rm is TMode=1 & thv_c2031=0b111110101101 & thv_c1215=0b1111 & thv_c0607=0b10 & crc32_type & thv_Rn & thv_Rt2 & thv_Rm { thv_Rt2 = Crc32Calc(thv_Rn,thv_Rm); } define pcodeop DCPSInstruction; dcps_lev:1 is TMode=1 & thv_c0001=0b01 { export 1:1; } dcps_lev:2 is TMode=1 & thv_c0001=0b10 { export 2:1; } dcps_lev:3 is TMode=1 & thv_c0001=0b11 { export 3:1; } # F5.1.43 p7235 DCPS1,DCPS2,DCPS3 DSPS1 variant :dcps^dcps_lev is TMode=1 & thv_c1631=0b1111011110001111 & thv_c0215=0b10000000000000 & (thv_c0101=1 | thv_c0000=1) & dcps_lev { DCPSInstruction(dcps_lev:1); } # F5.1.57 p7268 LDA :lda^COND Rd,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x19 & Rn & Rd & c0011=0xc9f { build COND; Rd = *Rn; } # F5.1.57 p7268 LDA :lda thv_Rt,[thv_Rn] is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1010 & ItCond & thv_Rn & thv_Rt { build ItCond; thv_Rt = *thv_Rn; } # F5.1.58 p7270 LDAB :ldab^COND Rd,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x1d & Rn & Rd & c0011=0xc9f { build COND; val:1 = *Rn; Rd = zext(val); } # F5.1.58 p7270 LDAB :ldab thv_Rt,[thv_Rn] is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1000 & ItCond & thv_Rt & thv_Rn { build ItCond; val:1 = *thv_Rn; thv_Rt = zext(val); } # F5.1.59 p7272 LDAEX :ldaex^COND Rd,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x19 & Rn & Rd & c0011=0xe9f { build COND; Rd = *Rn; } # F5.1.59 p7272 LDAEX :ldaex thv_Rt,[thv_Rn] is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1110 & ItCond & thv_Rt & thv_Rn { build ItCond; thv_Rt = *thv_Rn; } # F5.1.60 p7274 LDAEXB :ldaexb^COND Rd,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x1d & Rn & Rd & c0011=0xe9f { build COND; val:1 = *Rn; Rd = zext(val); } # F5.1.60 p7274 LDAEXB :ldaexb thv_Rt,thv_Rn is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1100 & ItCond & thv_Rt & thv_Rn { build ItCond; val:1 = *thv_Rn; thv_Rt = zext(val); } # F5.1.61 p7274 LDAEXD :ldaexd^COND Rd,Rd2,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x1b & Rn & Rd & Rd2 & c0011=0xe9f { local addr:4 = Rn; build COND; @if ENDIAN == "big" Rd = *(addr + 4); Rd2 = *(addr); @else # ENDIAN == "little" Rd = *(addr); Rd2 = *(addr + 4); @endif # ENDIAN == "little" } # F5.1.61 p7274 LDAEXD :ldaexd thv_Rt,thv_Rt2,[thv_Rn] is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1111 & ItCond & thv_Rt & thv_Rt2 & thv_Rn { local addr:4 = thv_Rn; build ItCond; @if ENDIAN == "big" thv_Rt = *(addr + 4); thv_Rt2 = *(addr); @else # ENDIAN == "little" thv_Rt = *(addr); thv_Rt2 = *(addr + 4); @endif # ENDIAN == "little" } # F5.1.62 p7278 LDAEXH :ldaexh^COND Rd,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x1f & Rn & Rd & c0011=0xe9f { build COND; val:2 = *Rn; Rd = zext(val); } # F5.1.62 p7278 LDAEXH :ldaexh thv_Rt,[thv_Rn] is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1101 & ItCond & thv_Rt & thv_Rn { build ItCond; val:2 = *thv_Rn; thv_Rt = zext(val); } # F5.1.63 p7280 LDAH :ldah^COND Rd,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x1f & Rn & Rd & c0011=0xc9f { build COND; val:2 = *Rn; Rd = zext(val); } # F5.1.63 p7280 LDAH :ldah thv_Rt,[thv_Rn] is TMode=1 & thv_c2031=0b111010001101 & thv_c0407=0b1001 & ItCond & thv_Rt & thv_Rn { build ItCond; val:2 = *thv_Rn; thv_Rt = zext(val); } # F5.1.185 p7573 SEVL A1 variant :sevl^COND is TMode=0 & ARMcond=1 & COND & c1627=0b001100100000 & c0007=0b00000101 { build COND; SendEvent(); } # F5.1.185 p7573 SEVL T2 variant :sevl.w is TMode=1 & thv_c2031=0b111100111010 & thv_c1415=0b10 & thv_c1212=0 & thv_c0010=0b00000000101 & ItCond { build ItCond; SendEvent(); } # F5.1.217 p7642 STL :stl^COND Rm,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x18 & Rn & c0415=0xfc9 & Rm { build COND; *Rn = Rm; } # F5.1.217 p7642 STL :stl thv_Rt,[thv_Rn] is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1010 & ItCond & thv_Rt & thv_Rn { build ItCond; *thv_Rn = thv_Rt; } # F5.1.218 p7644 STLB :stlb^COND Rm,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x1c & Rn & c0415=0xfc9 & Rm { build COND; *:1 Rn = Rm[0,8]; } # F5.1.218 p7644 STLB :stlb thv_Rt,[thv_Rn] is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1000 & ItCond & thv_Rt & thv_Rn { build ItCond; *:1 thv_Rn = thv_Rt[0,8]; } # F5.1.219 p7646 STLEX :stlex^COND Rd,Rm,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x18 & Rn & Rd & c0411=0xe9 & Rm { build COND; *Rn = Rm; Rd = 0; } # F5.1.219 p7646 STLEX :stlex thv_Rm,thv_Rt,[thv_Rn] is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1110 & ItCond & thv_Rm & thv_Rt & thv_Rn { build ItCond; *thv_Rn = thv_Rt; thv_Rm = 0; } # F5.1.220 p7649 STLEXB :stlexb^COND Rd,Rm,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x1c & Rn & Rd & c0411=0xe9 & Rm { build COND; *:1 Rn = Rm[0,8]; Rd = 0; } # F5.1.220 p7649 STLEXB :stlexb thv_Rm,thv_Rt,[thv_Rn] is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1100 & ItCond & thv_Rm & thv_Rt & thv_Rn { build ItCond; *:1 thv_Rn = thv_Rt[0,8]; thv_Rm = 0; } # F5.1.221 p7651 STLEXD :stlexd^COND Rd,Rm,Rm2,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x1a & Rn & Rd & c0411=0xe9 & Rm & Rm2 { build COND; @if ENDIAN == "big" *Rn = Rm; *(Rn + 4) = Rm2; @else # ENDIAN == "little" *Rn = Rm2; *(Rn + 4) = Rm; @endif # ENDIAN == "little" Rd = 0; } # F5.1.221 p7651 STLEXD :stlexd thv_Rm,thv_Rt,thv_Rt2,[thv_Rn] is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1111 & ItCond & thv_Rm & thv_Rt & thv_Rt2 & thv_Rn { build ItCond; @if ENDIAN == "big" *thv_Rn = thv_Rt; *(thv_Rn + 4) = thv_Rt2; @else # ENDIAN == "little" *thv_Rn = thv_Rt2; *(thv_Rn + 4) = thv_Rt; @endif # ENDIAN == "little" thv_Rm = 0; } # F5.1.222 p7654 STLEXH :stlexh^COND Rd,Rm,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x1e & Rn & Rd & c0411=0xe9 & Rm { build COND; *:2 Rn = Rm[0,16]; Rd = 0; } # F5.1.222 p7654 STLEXH :stlexh thv_Rm,thv_Rt,[thv_Rn] is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1101 & ItCond & thv_Rm & thv_Rt & thv_Rn { build ItCond; *:2 thv_Rn = thv_Rt[0,16]; thv_Rm = 0; } # F5.1.223 p7657 STLH :stlh^COND Rm,[Rn] is TMode=0 & ARMcond=1 & COND & c2027=0x1e & Rn & c0415=0xfc9 & Rm { build COND; *:2 Rn = Rm[0,16]; } # F5.1.223 p7657 STLH :stlh thv_Rt,[thv_Rn] is TMode=1 & thv_c2031=0b111010001100 & thv_c0407=0b1001 & ItCond & thv_Rt & thv_Rn { build ItCond; *:2 thv_Rn = thv_Rt[0,16]; } @ifdef INCLUDE_NEON # Advanced SIMD support / NEON in ARMv8 ####### # macro declarations # The Inexact flag is bit 4 of FPEXC @define FPEXC_IXF "fpexc[3,1]" # Rounding modes, as used in pseudocode, defined as an enumeration # '01' N @define FPRounding_TIEEVEN "0:1" # '10' P @define FPRounding_POSINF "1:1" # '11' M @define FPRounding_NEGINF "2:1" @define FPRounding_ZERO "3:1" # '00' A @define FPRounding_TIEAWAY "4:1" @define FPRounding_ODD "5:1" ####### # pcodeop declarations # FixedToFP(fp, M, N, fbits, unsigned, rounding) # Convert M-bit fixed point with fbits fractional bits to N-bit # floating point, controlled by unsigned flag and rounding. Can # also be used with packed "SIMD" floats. define pcodeop FixedToFP; # FPConvert(fp, M, N [, rounding]) # Convert floating point between from M-bit to N-bit precision. # Can also be used with packed "SIMD" floats. Sometimes # equivalent to float2float. M, N are the input and output sizes # (16, 32, 64), implied by pseudocode, but given explicitly # here. Rounding is only required when converting to integral # type. define pcodeop FPConvert; # FPConvertInexact() # At the end of any rounding or conversion operation, the # pseudocode tests whether the converted value is identical to # the original value. If it is not identical, and if the "exact" # argument is true, then it sets the floating point exception # FPEXC.Inexact bit. This function is understood to return 0/1 # depending on whether converstion was exact (0) or inexact (1). # define pcodeop FPConvertInexact; # FPToFixed(fp, M, N, fbits, unsigned, rounding) # Convert M-bit floating point to N-bit fixed point with fbits # fractional bits, controlled by unsigned flag and rounding. # between different precisions. Can also be used with packed # "SIMD" floats. define pcodeop FPToFixed; # FPRoundInt(fp, N, rounding, exact) # Round fp to nearest integral floating point, controlled by # rounding. If exact is true, set FPSR.IXC flag. Can also be # used with packed "SIMD" floats. define pcodeop FPRoundInt; # PolynomialMult(op1, op2) define pcodeop PolynomialMult; ####### # The VCVT instructions are a large family for converting between # floating point numbers and integers, of all sizes and combinations # F6.1.58 p7998 A1 cases size = 10 (c0809) :vcvt^COND^".f64.f32" Dd,Sm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11011 & c1616=1 & c1011=0b10 & c0707=1 & c0606=1 & c0404=0 & c0809=0b10) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11011 & thv_c1616=1 & thv_c1011=0b10 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b10)) & COND & Dd & Sm { build COND; Dd = float2float(Sm); } # F6.1.58 p7998 A1 cases size = 11 (c0809) :vcvt^COND^".f32.f64" Sd,Dm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11011 & c1616=1 & c1011=0b10 & c0707=1 & c0606=1 & c0404=0 & c0809=0b11) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11011 & thv_c1616=1 & thv_c1011=0b10 & thv_c0707=1 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b11)) & COND & Sd & Dm { build COND; Sd = float2float(Dm); } # F6.1.59 p8000 A1 op == 1 (c0808) :vcvt.f32.f16 Qd,Dm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b01 & c1617=0b10 & c0911=0b011 & c0607=0b00 & c0404=0 & c0808=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b01 & thv_c1617=0b10 & thv_c0911=0b011 & thv_c0607=0b00 & thv_c0404=0 & thv_c0808=1)) & Qd & Dm { Qd = float2float(Dm:2); } # F6.1.59 p8000 A1 op == 0 (c0808) :vcvt.f16.f32 Dd,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b01 & c1617=0b10 & c0911=0b011 & c0607=0b00 & c0404=0 & c0808=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b01 & thv_c1617=0b10 & thv_c0911=0b011 & thv_c0607=0b00 & thv_c0404=0 & thv_c0808=0)) & Dd & Qm { Dd = float2float(Qm); } vcvt_56_64_dt: ".f32.s32" is ((TMode=0 & c0708=0b00) | (TMode=1 & thv_c0708=0b00)) & Dd & Dm { Dd = FixedToFP(Dm, 32:1, 32:1, 0:1, 0:1, $(FPRounding_TIEEVEN)); } vcvt_56_64_dt: ".f32.u32" is ((TMode=0 & c0708=0b01) | (TMode=1 & thv_c0708=0b01)) & Dd & Dm { Dd = FixedToFP(Dm, 32:1, 32:1, 0:1, 1:1, $(FPRounding_TIEEVEN)); } vcvt_56_64_dt: ".s32.f32" is ((TMode=0 & c0708=0b10) | (TMode=1 & thv_c0708=0b10)) & Dd & Dm { Dd = FPToFixed(Dm, 32:1, 32:1, 0:1, 0:1, $(FPRounding_ZERO)); } vcvt_56_64_dt: ".u32.f32" is ((TMode=0 & c0708=0b11) | (TMode=1 & thv_c0708=0b11)) & Dd & Dm { Dd = FPToFixed(Dm, 32:1, 32:1, 0:1, 1:1, $(FPRounding_ZERO)); } vcvt_56_128_dt: ".f32.s32" is ((TMode=0 & c0708=0b00) | (TMode=1 & thv_c0708=0b00)) & Qd & Qm { Qd = FixedToFP(Qm, 32:1, 32:1, 0:1, 0:1, $(FPRounding_TIEEVEN)); } vcvt_56_128_dt: ".f32.u32" is ((TMode=0 & c0708=0b01) | (TMode=1 & thv_c0708=0b01)) & Qd & Qm { Qd = FixedToFP(Qm, 32:1, 32:1, 0:1, 1:1, $(FPRounding_TIEEVEN)); } vcvt_56_128_dt: ".s32.f32" is ((TMode=0 & c0708=0b10) | (TMode=1 & thv_c0708=0b10)) & Qd & Qm { Qd = FPToFixed(Qm, 32:1, 32:1, 0:1, 0:1, $(FPRounding_ZERO)); } vcvt_56_128_dt: ".u32.f32" is ((TMode=0 & c0708=0b11) | (TMode=1 & thv_c0708=0b11)) & Qd & Qm { Qd = FPToFixed(Qm, 32:1, 32:1, 0:1, 1:1, $(FPRounding_ZERO)); } # F6.1.60 p8002 A1 Q == 0 (c0606) :vcvt^vcvt_56_64_dt Dd,Dm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c0911=0b011 & c0404=0 & c0606=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b11 & thv_c0911=0b011 & thv_c0404=0 & thv_c0606=0)) & vcvt_56_64_dt & Dd & Dm { } # F6.1.60 p8002 A1 Q == 1 (c0606) :vcvt^vcvt_56_128_dt Qd,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c0911=0b011 & c0404=0 & c0606=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b11 & thv_c0911=0b011 & thv_c0404=0 & thv_c0606=1)) & vcvt_56_128_dt & Qd & Qm { } # F6.1.61 p8005 A1 opc2==100 && size==10 (c1618, c0809) :vcvt^COND^".u32.f32" Sd,Sm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b11 & c0404=0 & c1618=0b100 & c0809=0b10) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b11 & thv_c0404=0 & thv_c1618=0b100 & thv_c0809=0b10)) & COND & Sd & Sm { build COND; Sd = zext(Sm f> 0) * (trunc(Sm)); } # F6.1.61 p8005 A1 opc2==101 && size==10 (c1618, c0809) :vcvt^COND^".s32.f32" Sd,Sm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b11 & c0404=0 & c1618=0b101 & c0809=0b10) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b11 & thv_c0404=0 & thv_c1618=0b101 & thv_c0809=0b10)) & COND & Sd & Sm { build COND; Sd = trunc(Sm); } # F6.1.61 p8005 A1 opc2==100 && size==11 (c1618, c0809) :vcvt^COND^".u32.f64" Sd,Dm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b11 & c0404=0 & c1618=0b100 & c0809=0b11) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b11 & thv_c0404=0 & thv_c1618=0b100 & thv_c0809=0b11)) & COND & Sd & Dm { build COND; local tmp:8 = zext(Dm f> 0) * (trunc(Dm)); Sd = tmp:4; } # F6.1.61 p8005 A1 opc2==101 && size==11 (c1618, c0809) :vcvt^COND^".s32.f64" Sd,Dm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b11 & c0404=0 & c1618=0b101 & c0809=0b11) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b11 & thv_c0404=0 & thv_c1618=0b101 & thv_c0809=0b11)) & COND & Sd & Dm { build COND; local tmp:8 = trunc(Dm); Sd = tmp:4; } # The rounding mode depends on c0707=0 => FPSCR else ZERO vcvt_58_3232_dt: ".f32.u32" is ((TMode=0 & c0708=0b00) | (TMode=1 & thv_c0708=0b00)) & Sd & Sm { local tmp:8 = zext(Sm); Sd = int2float(tmp); } vcvt_58_3232_dt: ".f32.s32" is ((TMode=0 & c0708=0b01) | (TMode=1 & thv_c0708=0b01)) & Sd & Sm { local tmp:8 = sext(Sm); Sd = int2float(tmp); } vcvt_58_6432_dt: ".f64.u32" is ((TMode=0 & c0708=0b10) | (TMode=1 & thv_c0708=0b10)) & Dd & Sm { local tmp:8 = zext(Sm); Dd = int2float(tmp); } vcvt_58_6432_dt: ".f64.s32" is ((TMode=0 & c0708=0b11) | (TMode=1 & thv_c0708=0b11)) & Dd & Sm { local tmp:8 = sext(Sm); Dd = int2float(tmp); } # F6.1.62 p8009 A1 size == 10 (c0809) :vcvt^COND^vcvt_58_3232_dt Sd,Sm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11100 & c1616=0 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b10) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11100 & thv_c1616=0 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b10)) & COND & vcvt_58_3232_dt & Sd & Sm { build COND; build vcvt_58_3232_dt; } # F6.1.62 p8009 A1 size == 11 (c0809) :vcvt^COND^vcvt_58_6432_dt Dd,Sm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11100 & c1616=0 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b11) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11100 & thv_c1616=0 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b11)) & COND & vcvt_58_6432_dt & Dd & Sm { build COND; build vcvt_58_6432_dt; } vcvt_59_fbits_built: fbits is TMode=0 & c1621 [ fbits = 64 - c1621; ] { export * [const]:1 fbits; } vcvt_59_fbits_built: fbits is TMode=1 & thv_c1621 [ fbits = 64 - thv_c1621; ] { export * [const]:1 fbits; } vcvt_59_fbits: "#"^fbits is TMode=0 & c1621 [ fbits = 64 - c1621; ] { } vcvt_59_fbits: "#"^fbits is TMode=1 & thv_c1621 [ fbits = 64 - thv_c1621; ] { } vcvt_59_32_dt: ".f32.s32" is ((TMode=0 & c0809=2 & c2424=0) | (TMode=1 & thv_c0809=2 & thv_c2828=0)) & Dd & Dm & vcvt_59_fbits_built { Dd = FixedToFP(Dm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } vcvt_59_32_dt: ".f32.u32" is ((TMode=0 & c0809=2 & c2424=1) | (TMode=1 & thv_c0809=2 & thv_c2828=1)) & Dd & Dm & vcvt_59_fbits_built { Dd = FixedToFP(Dm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } vcvt_59_32_dt: ".s32.f32" is ((TMode=0 & c0809=3 & c2424=0) | (TMode=1 & thv_c0809=3 & thv_c2828=0)) & Dd & Dm & vcvt_59_fbits_built { Dd = FPToFixed(Dm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_ZERO)); } vcvt_59_32_dt: ".u32.f32" is ((TMode=0 & c0809=3 & c2424=1) | (TMode=1 & thv_c0809=3 & thv_c2828=1)) & Dd & Dm & vcvt_59_fbits_built { Dd = FPToFixed(Dm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_ZERO)); } vcvt_59_32_dt: ".f16.s16" is ((TMode=0 & c0809=0 & c2424=0) | (TMode=1 & thv_c0809=0 & thv_c2828=0)) & Dd & Dm & vcvt_59_fbits_built { Dd = FixedToFP(Dm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } vcvt_59_32_dt: ".f16.u16" is ((TMode=0 & c0809=0 & c2424=1) | (TMode=1 & thv_c0809=0 & thv_c2828=1)) & Dd & Dm & vcvt_59_fbits_built { Dd = FixedToFP(Dm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } vcvt_59_32_dt: ".s16.f16" is ((TMode=0 & c0809=1 & c2424=0) | (TMode=1 & thv_c0809=1 & thv_c2828=0)) & Dd & Dm & vcvt_59_fbits_built { Dd = FPToFixed(Dm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_ZERO)); } vcvt_59_32_dt: ".u16.f16" is ((TMode=0 & c0809=1 & c2424=1) | (TMode=1 & thv_c0809=1 & thv_c2828=1)) & Dd & Dm & vcvt_59_fbits_built { Dd = FPToFixed(Dm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_ZERO)); } vcvt_59_64_dt: ".f32.s32" is ((TMode=0 & c0809=2 & c2424=0) | (TMode=1 & thv_c0809=2 & thv_c2828=0)) & Qd & Qm & vcvt_59_fbits_built { Qd = FixedToFP(Qm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } vcvt_59_64_dt: ".f32.u32" is ((TMode=0 & c0809=2 & c2424=1) | (TMode=1 & thv_c0809=2 & thv_c2828=1)) & Qd & Qm & vcvt_59_fbits_built { Qd = FixedToFP(Qm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } vcvt_59_64_dt: ".s32.f32" is ((TMode=0 & c0809=3 & c2424=0) | (TMode=1 & thv_c0809=3 & thv_c2828=0)) & Qd & Qm & vcvt_59_fbits_built { Qd = FPToFixed(Qm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_ZERO)); } vcvt_59_64_dt: ".u32.f32" is ((TMode=0 & c0809=3 & c2424=1) | (TMode=1 & thv_c0809=3 & thv_c2828=1)) & Qd & Qm & vcvt_59_fbits_built { Qd = FPToFixed(Qm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_ZERO)); } vcvt_59_64_dt: ".f16.s16" is ((TMode=0 & c0809=0 & c2424=0) | (TMode=1 & thv_c0809=0 & thv_c2828=0)) & Qd & Qm & vcvt_59_fbits_built { Qd = FixedToFP(Qm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } vcvt_59_64_dt: ".f16.u16" is ((TMode=0 & c0809=0 & c2424=1) | (TMode=1 & thv_c0809=0 & thv_c2828=1)) & Qd & Qm & vcvt_59_fbits_built { Qd = FixedToFP(Qm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } vcvt_59_64_dt: ".s16.f16" is ((TMode=0 & c0809=1 & c2424=0) | (TMode=1 & thv_c0809=1 & thv_c2828=0)) & Qd & Qm & vcvt_59_fbits_built { Qd = FPToFixed(Qm, 32:1, 32:1, vcvt_59_fbits_built, 0:1, $(FPRounding_ZERO)); } vcvt_59_64_dt: ".u16.f16" is ((TMode=0 & c0809=1 & c2424=1) | (TMode=1 & thv_c0809=1 & thv_c2828=1)) & Qd & Qm & vcvt_59_fbits_built { Qd = FPToFixed(Qm, 32:1, 32:1, vcvt_59_fbits_built, 1:1, $(FPRounding_ZERO)); } # Should add rounding here, if dt2 is s32 or u32 then rounding is # FPRounding_ZERO otherwise FPROunding_TIEEVEN # F6.1.63 p8012 A1 Q = 0 (c0606) :vcvt^vcvt_59_32_dt Dd,Dm,vcvt_59_fbits is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2527=0b001 & c2323=1 & c2121=1 & c1011=0b11 & c0707=0 & c0404=1 & c0606=0) | (TMode=1 & thv_c2931=0b111 & thv_c2327=0b11111 & thv_c2121=1 & thv_c1011=0b11 & thv_c0707=0 & thv_c0404=1 & thv_c0606=0)) & vcvt_59_32_dt & vcvt_59_fbits & Dd & Dm { } # F6.1.63 p8012 A1 Q = 1 (c0606) :vcvt^vcvt_59_64_dt Qd,Qm,vcvt_59_fbits is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2527=0b001 & c2323=1 & c2121=1 & c1011=0b11 & c0707=0 & c0404=1 & c0606=1) | (TMode=1 & thv_c2931=0b111 & thv_c2327=0b11111 & thv_c2121=1 & thv_c1011=0b11 & thv_c0707=0 & thv_c0404=1 & thv_c0606=1)) & vcvt_59_64_dt & vcvt_59_fbits & Qd & Qm { } vcvt_60_fbits_built: fbits is TMode=0 & c0707=0 & c0505 & c0003 [fbits = 16 - ( c0003 * 2 + c0505); ] { export * [const]:1 fbits; } vcvt_60_fbits_built: fbits is TMode=1 & thv_c0707=0 & thv_c0505 & thv_c0003 [fbits = 16 - (thv_c0003 * 2 + thv_c0505); ] { export * [const]:1 fbits; } vcvt_60_fbits_built: fbits is TMode=0 & c0707=1 & c0505 & c0003 [fbits = 32 - ( c0003 * 2 + c0505); ] { export * [const]:1 fbits; } vcvt_60_fbits_built: fbits is TMode=1 & thv_c0707=1 & thv_c0505 & thv_c0003 [fbits = 32 - (thv_c0003 * 2 + thv_c0505); ] { export * [const]:1 fbits; } vcvt_60_fbits: "#"^fbits is TMode=0 & c0707=0 & c0505 & c0003 [fbits = 16 - ( c0003 * 2 + c0505); ] { } vcvt_60_fbits: "#"^fbits is TMode=1 & thv_c0707=0 & thv_c0505 & thv_c0003 [fbits = 16 - (thv_c0003 * 2 + thv_c0505); ] { } vcvt_60_fbits: "#"^fbits is TMode=0 & c0707=1 & c0505 & c0003 [fbits = 32 - ( c0003 * 2 + c0505); ] { } vcvt_60_fbits: "#"^fbits is TMode=1 & thv_c0707=1 & thv_c0505 & thv_c0003 [fbits = 32 - (thv_c0003 * 2 + thv_c0505); ] { } vcvt_60_32_dt: ".f32.s16" is ((TMode=0 & c1818=0 & c1616=0 & c0809=0b10 & c0707=0) | (TMode=1 & thv_c1818=0 & thv_c1616=0 & thv_c0809=0b10 & thv_c0707=0)) & Sd & Sd2 & vcvt_60_fbits_built { Sd = FixedToFP(Sd2, 16:1, 32:1, vcvt_60_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } vcvt_60_32_dt: ".f32.s32" is ((TMode=0 & c1818=0 & c1616=0 & c0809=0b10 & c0707=1) | (TMode=1 & thv_c1818=0 & thv_c1616=0 & thv_c0809=0b10 & thv_c0707=1)) & Sd & Sd2 & vcvt_60_fbits_built { Sd = FixedToFP(Sd2, 32:1, 32:1, vcvt_60_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } vcvt_60_32_dt: ".f32.u16" is ((TMode=0 & c1818=0 & c1616=1 & c0809=0b10 & c0707=0) | (TMode=1 & thv_c1818=0 & thv_c1616=1 & thv_c0809=0b10 & thv_c0707=0)) & Sd & Sd2 & vcvt_60_fbits_built { Sd = FixedToFP(Sd2, 16:1, 32:1, vcvt_60_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } vcvt_60_32_dt: ".f32.u32" is ((TMode=0 & c1818=0 & c1616=1 & c0809=0b10 & c0707=1) | (TMode=1 & thv_c1818=0 & thv_c1616=1 & thv_c0809=0b10 & thv_c0707=1)) & Sd & Sd2 & vcvt_60_fbits_built { Sd = FixedToFP(Sd2, 32:1, 32:1, vcvt_60_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } vcvt_60_32_dt: ".s16.f32" is ((TMode=0 & c1818=1 & c1616=0 & c0809=0b10 & c0707=0) | (TMode=1 & thv_c1818=1 & thv_c1616=0 & thv_c0809=0b10 & thv_c0707=0)) & Sd & Sd2 & vcvt_60_fbits_built { Sd = FPToFixed(Sd2, 32:1, 16:1, vcvt_60_fbits_built, 0:1, $(FPRounding_ZERO)); } vcvt_60_32_dt: ".s32.f32" is ((TMode=0 & c1818=1 & c1616=0 & c0809=0b10 & c0707=1) | (TMode=1 & thv_c1818=1 & thv_c1616=0 & thv_c0809=0b10 & thv_c0707=1)) & Sd & Sd2 & vcvt_60_fbits_built { Sd = FPToFixed(Sd2, 32:1, 32:1, vcvt_60_fbits_built, 0:1, $(FPRounding_ZERO)); } vcvt_60_32_dt: ".u16.f32" is ((TMode=0 & c1818=1 & c1616=1 & c0809=0b10 & c0707=0) | (TMode=1 & thv_c1818=1 & thv_c1616=1 & thv_c0809=0b10 & thv_c0707=0)) & Sd & Sd2 & vcvt_60_fbits_built { Sd = FPToFixed(Sd2, 32:1, 16:1, vcvt_60_fbits_built, 1:1, $(FPRounding_ZERO)); } vcvt_60_32_dt: ".u32.f32" is ((TMode=0 & c1818=1 & c1616=1 & c0809=0b10 & c0707=1) | (TMode=1 & thv_c1818=1 & thv_c1616=1 & thv_c0809=0b10 & thv_c0707=1)) & Sd & Sd2 & vcvt_60_fbits_built { Sd = FPToFixed(Sd2, 32:1, 32:1, vcvt_60_fbits_built, 1:1, $(FPRounding_ZERO)); } vcvt_60_64_dt: ".f64.s16" is ((TMode=0 & c1818=0 & c1616=0 & c0809=0b11 & c0707=0) | (TMode=1 & thv_c1818=0 & thv_c1616=0 & thv_c0809=0b11 & thv_c0707=0)) & Dd & Dd2 & vcvt_60_fbits_built { Dd = FixedToFP(Dd2, 16:1, 64:1, vcvt_60_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } vcvt_60_64_dt: ".f64.s32" is ((TMode=0 & c1818=0 & c1616=0 & c0809=0b11 & c0707=1) | (TMode=1 & thv_c1818=0 & thv_c1616=0 & thv_c0809=0b11 & thv_c0707=1)) & Dd & Dd2 & vcvt_60_fbits_built { Dd = FixedToFP(Dd2, 32:1, 64:1, vcvt_60_fbits_built, 0:1, $(FPRounding_TIEEVEN)); } vcvt_60_64_dt: ".f64.u16" is ((TMode=0 & c1818=0 & c1616=1 & c0809=0b11 & c0707=0) | (TMode=1 & thv_c1818=0 & thv_c1616=1 & thv_c0809=0b11 & thv_c0707=0)) & Dd & Dd2 & vcvt_60_fbits_built { Dd = FixedToFP(Dd2, 16:1, 64:1, vcvt_60_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } vcvt_60_64_dt: ".f64.u32" is ((TMode=0 & c1818=0 & c1616=1 & c0809=0b11 & c0707=1) | (TMode=1 & thv_c1818=0 & thv_c1616=1 & thv_c0809=0b11 & thv_c0707=1)) & Dd & Dd2 & vcvt_60_fbits_built { Dd = FixedToFP(Dd2, 32:1, 64:1, vcvt_60_fbits_built, 1:1, $(FPRounding_TIEEVEN)); } vcvt_60_64_dt: ".s16.f64" is ((TMode=0 & c1818=1 & c1616=0 & c0809=0b11 & c0707=0) | (TMode=1 & thv_c1818=1 & thv_c1616=0 & thv_c0809=0b11 & thv_c0707=0)) & Dd & Dd2 & vcvt_60_fbits_built { Dd = FPToFixed(Dd2, 64:1, 16:1, vcvt_60_fbits_built, 0:1, $(FPRounding_ZERO)); } vcvt_60_64_dt: ".s32.f64" is ((TMode=0 & c1818=1 & c1616=0 & c0809=0b11 & c0707=1) | (TMode=1 & thv_c1818=1 & thv_c1616=0 & thv_c0809=0b11 & thv_c0707=1)) & Dd & Dd2 & vcvt_60_fbits_built { Dd = FPToFixed(Dd2, 64:1, 32:1, vcvt_60_fbits_built, 0:1, $(FPRounding_ZERO)); } vcvt_60_64_dt: ".u16.f64" is ((TMode=0 & c1818=1 & c1616=1 & c0809=0b11 & c0707=0) | (TMode=1 & thv_c1818=1 & thv_c1616=1 & thv_c0809=0b11 & thv_c0707=0)) & Dd & Dd2 & vcvt_60_fbits_built { Dd = FPToFixed(Dd2, 64:1, 16:1, vcvt_60_fbits_built, 1:1, $(FPRounding_ZERO)); } vcvt_60_64_dt: ".u32.f64" is ((TMode=0 & c1818=1 & c1616=1 & c0809=0b11 & c0707=1) | (TMode=1 & thv_c1818=1 & thv_c1616=1 & thv_c0809=0b11 & thv_c0707=1)) & Dd & Dd2 & vcvt_60_fbits_built { Dd = FPToFixed(Dd2, 64:1, 32:1, vcvt_60_fbits_built, 1:1, $(FPRounding_ZERO)); } # F6.1.63 p8012 A1 op=0/1 sf=10 (c1818, c0809) :vcvt^COND^vcvt_60_32_dt Sd,Sd2,vcvt_60_fbits is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1717=1 & c1011=0b10 & c0606=1 & c0404=0 & c1818 & c0809=0b10) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1717=1 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c1818 & thv_c0809=0b10)) & COND & vcvt_60_fbits & vcvt_60_32_dt & Sd & Sd2 { build COND; build vcvt_60_32_dt; } # F6.1.63 p8012 A1 op=0/1 sf=11 (c1818, c0809) :vcvt^COND^vcvt_60_64_dt Dd,Dd2,vcvt_60_fbits is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1717=1 & c1011=0b10 & c0606=1 & c0404=0 & c1818 & c0809=0b11) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1717=1 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c1818 & thv_c0809=0b11)) & COND & vcvt_60_fbits & vcvt_60_64_dt & Dd & Dd2 { build COND; build vcvt_60_64_dt; } # vcvta, vcvtm, vcvtn, and vcvtp vcvt_amnp_simd_RM: "a" is ((TMode=0 & c0809=0b00) | (TMode=1 & thv_c0809=0b00)) { export $(FPRounding_TIEAWAY); } vcvt_amnp_simd_RM: "n" is ((TMode=0 & c0809=0b01) | (TMode=1 & thv_c0809=0b01)) { export $(FPRounding_TIEEVEN); } vcvt_amnp_simd_RM: "p" is ((TMode=0 & c0809=0b10) | (TMode=1 & thv_c0809=0b10)) { export $(FPRounding_POSINF); } vcvt_amnp_simd_RM: "m" is ((TMode=0 & c0809=0b11) | (TMode=1 & thv_c0809=0b11)) { export $(FPRounding_NEGINF); } # These RM values need to be converted properly vcvt_amnp_simd_64_dt: ".s32" is TMode=0 & c0707=0 & c0809 & vcvt_amnp_simd_RM & Dd & Dm { Dd = FPToFixed(Dm, 32:1, 32:1, 0:1, 0:1, vcvt_amnp_simd_RM); } vcvt_amnp_simd_64_dt: ".s32" is TMode=1 & thv_c0707=0 & thv_c0809 & vcvt_amnp_simd_RM & Dd & Dm { Dd = FPToFixed(Dm, 32:1, 32:1, 0:1, 0:1, vcvt_amnp_simd_RM); } vcvt_amnp_simd_64_dt: ".u32" is TMode=0 & c0707=1 & c0809 & vcvt_amnp_simd_RM & Dd & Dm { Dd = FPToFixed(Dm, 32:1, 32:1, 0:1, 1:1, vcvt_amnp_simd_RM); } vcvt_amnp_simd_64_dt: ".u32" is TMode=1 & thv_c0707=1 & thv_c0809 & vcvt_amnp_simd_RM & Dd & Dm { Dd = FPToFixed(Dm, 32:1, 32:1, 0:1, 1:1, vcvt_amnp_simd_RM); } vcvt_amnp_simd_128_dt: ".s32" is TMode=0 & c0707=0 & c0809 & vcvt_amnp_simd_RM & Qd & Qm { Qd = FPToFixed(Qm, 32:1, 32:1, 0:1, 0:1, vcvt_amnp_simd_RM); } vcvt_amnp_simd_128_dt: ".s32" is TMode=1 & thv_c0707=0 & thv_c0809 & vcvt_amnp_simd_RM & Qd & Qm { Qd = FPToFixed(Qm, 32:1, 32:1, 0:1, 0:1, vcvt_amnp_simd_RM); } vcvt_amnp_simd_128_dt: ".u32" is TMode=0 & c0707=1 & c0809 & vcvt_amnp_simd_RM & Qd & Qm { Qd = FPToFixed(Qm, 32:1, 32:1, 0:1, 1:1, vcvt_amnp_simd_RM); } vcvt_amnp_simd_128_dt: ".u32" is TMode=1 & thv_c0707=1 & thv_c0809 & vcvt_amnp_simd_RM & Qd & Qm { Qd = FPToFixed(Qm, 32:1, 32:1, 0:1, 1:1, vcvt_amnp_simd_RM); } # F6.1.65,69,71,73 p8019,8028,8032,8036 A1 64-bit SIMD vector variant Q = 0 (c0606) :vcvt^vcvt_amnp_simd_RM^vcvt_amnp_simd_64_dt^".f32" Dd,Dm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c1011=0b00 & c0404=0 & c0606=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b11 & thv_c1011=0b00 & thv_c0404=0 & thv_c0606=0)) & vcvt_amnp_simd_RM & vcvt_amnp_simd_64_dt & Dd & Dm { } # F6.1.65,69,71,73 p8019,8028,8032,8036 A1 128-bit SIMD vector variant Q = 1(c0606) :vcvt^vcvt_amnp_simd_RM^vcvt_amnp_simd_128_dt^".f32" Qd,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00111 & c2021=0b11 & c1819=0b10 & c1617=0b11 & c1011=0b00 & c0404=0 & c0606=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b11 & thv_c1011=0b00 & thv_c0404=0 & thv_c0606=1)) & vcvt_amnp_simd_RM & vcvt_amnp_simd_128_dt & Qd & Qm { } vcvt_amnp_fp_RM: "a" is ((TMode=0 & c1617=0b00) | (TMode=1 & thv_c1617=0b00)) { export $(FPRounding_TIEAWAY); } vcvt_amnp_fp_RM: "n" is ((TMode=0 & c1617=0b01) | (TMode=1 & thv_c1617=0b01)) { export $(FPRounding_TIEEVEN); } vcvt_amnp_fp_RM: "p" is ((TMode=0 & c1617=0b10) | (TMode=1 & thv_c1617=0b10)) { export $(FPRounding_POSINF); } vcvt_amnp_fp_RM: "m" is ((TMode=0 & c1617=0b11) | (TMode=1 & thv_c1617=0b11)) { export $(FPRounding_NEGINF); } vcvt_amnp_fp_s_dt: ".u32" is TMode=0 & c0707=0 & c1617 & vcvt_amnp_fp_RM & Sd & Sm { Sd = FPToFixed(Sm, 32:1, 32:1, 0:1, 1:1, vcvt_amnp_fp_RM); } vcvt_amnp_fp_s_dt: ".u32" is TMode=1 & thv_c0707=0 & thv_c1617 & vcvt_amnp_fp_RM & Sd & Sm { Sd = FPToFixed(Sm, 32:1, 32:1, 0:1, 1:1, vcvt_amnp_fp_RM); } vcvt_amnp_fp_s_dt: ".s32" is TMode=0 & c0707=1 & c1617 & vcvt_amnp_fp_RM & Sd & Sm { Sd = FPToFixed(Sm, 32:1, 32:1, 0:1, 0:1, vcvt_amnp_fp_RM); } vcvt_amnp_fp_s_dt: ".s32" is TMode=1 & thv_c0707=1 & thv_c1617 & vcvt_amnp_fp_RM & Sd & Sm { Sd = FPToFixed(Sm, 32:1, 32:1, 0:1, 0:1, vcvt_amnp_fp_RM); } vcvt_amnp_fp_d_dt: ".u32" is TMode=0 & c0707=0 & c1617 & vcvt_amnp_fp_RM & Sd & Dm { Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 1:1, vcvt_amnp_fp_RM); } vcvt_amnp_fp_d_dt: ".u32" is TMode=1 & thv_c0707=0 & thv_c1617 & vcvt_amnp_fp_RM & Sd & Dm { Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 1:1, vcvt_amnp_fp_RM); } vcvt_amnp_fp_d_dt: ".s32" is TMode=0 & c0707=1 & c1617 & vcvt_amnp_fp_RM & Sd & Dm { Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 0:1, vcvt_amnp_fp_RM); } vcvt_amnp_fp_d_dt: ".s32" is TMode=1 & thv_c0707=1 & thv_c1617 & vcvt_amnp_fp_RM & Sd & Dm { Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 0:1, vcvt_amnp_fp_RM); } # F6.1.66,70,72,74 p8021,8030,8034,8038 Single-precision scalar variant size = 01 (c0809) :vcvt^vcvt_amnp_fp_RM^vcvt_amnp_fp_s_dt^".f16" Sd,Sm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b11 & c1819=0b11 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b01) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b11 & thv_c1819=0b11 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b01)) & vcvt_amnp_fp_RM & vcvt_amnp_fp_s_dt & Sd & Sm unimpl # F6.1.66,70,72,74 p8021,8030,8034,8038 Single-precision scalar variant size = 11 (c0809) :vcvt^vcvt_amnp_fp_RM^vcvt_amnp_fp_s_dt^".f32" Sd,Sm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b11 & c1819=0b11 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b10) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b11 & thv_c1819=0b11 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b10)) & vcvt_amnp_fp_RM & vcvt_amnp_fp_s_dt & Sd & Sm { } # F6.1.66,70,72,74 p8021,8030,8034,8038 Double-precision scalar variant size = 11 (c0809) :vcvt^vcvt_amnp_fp_RM^vcvt_amnp_fp_d_dt^".f64" Sd,Dm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b11 & c1819=0b11 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b11) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b11 & thv_c1819=0b11 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b11)) & vcvt_amnp_fp_RM & vcvt_amnp_fp_d_dt & Sd & Dm { } # vcvtb and vcvtt vcvt_bt3216_op: "b" is ((TMode=0 & c0707=0) | (TMode=1 & thv_c0707=0)) & Sd & Sm { Sd = float2float(Sm:2); } vcvt_bt3216_op: "t" is ((TMode=0 & c0707=1) | (TMode=1 & thv_c0707=1)) & Sd & Sm { w:2 = Sm(2); Sd = float2float(w); } vcvt_bt6416_op: "b" is ((TMode=0 & c0707=0) | (TMode=1 & thv_c0707=0)) & Dd & Sm { Dd = float2float(Sm:2); } vcvt_bt6416_op: "t" is ((TMode=0 & c0707=1) | (TMode=1 & thv_c0707=1)) & Dd & Sm { w:2 = Sm(2); Dd = float2float(w); } vcvt_bt1632_op: "b" is ((TMode=0 & c0707=0) | (TMode=1 & thv_c0707=0)) & Sd & Sm { Sd[0,16] = float2float(Sm); } vcvt_bt1632_op: "t" is ((TMode=0 & c0707=1) | (TMode=1 & thv_c0707=1)) & Sd & Sm { tmp:2 = float2float(Sm); Sd = (zext(tmp)<<16) | zext(Sd[0,16]); } vcvt_bt1664_op: "b" is ((TMode=0 & c0707=0) | (TMode=1 & thv_c0707=0)) & Sd & Dm { Sd[0,16] = float2float(Dm); } vcvt_bt1664_op: "t" is ((TMode=0 & c0707=1) | (TMode=1 & thv_c0707=1)) & Sd & Dm { tmp:2 = float2float(Dm); Sd = (zext(tmp)<<16) | zext(Sd[0,16]); } # F6.1.67 p8023 A1 cases op:sz = 00 (c1616, c0808) # F6.1.76 p8044 A1 cases op:sz = 00 (c1616, c0808) :vcvt^vcvt_bt3216_op^COND^".f32.f16" Sd,Sm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11001 & c0911=0b101 & c0606=1 & c0404=0 & c1616=0 & c0808=0) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11001 & thv_c0911=0b101 & thv_c0606=1 & thv_c0404=0 & thv_c1616=0 & thv_c0808=0)) & COND & vcvt_bt3216_op & Sd & Sm { build COND; build vcvt_bt3216_op; } # F6.1.67 p8023 A1 cases op:sz = 01 (c1616, c0808) # F6.1.76 p8044 A1 cases op:sz = 01 (c1616, c0808) :vcvt^vcvt_bt6416_op^COND^".f64.f16" Dd,Sm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11001 & c0911=0b101 & c0606=1 & c0404=0 & c1616=0 & c0808=1) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11001 & thv_c0911=0b101 & thv_c0606=1 & thv_c0404=0 & thv_c1616=0 & thv_c0808=1)) & COND & vcvt_bt6416_op & Dd & Sm { build COND; build vcvt_bt6416_op; } # F6.1.67 p8023 A1 cases op:sz = 10 (c1616, c0808) # F6.1.76 p8044 A1 cases op:sz = 10 (c1616, c0808) :vcvt^vcvt_bt1632_op^COND^".f16.f32" Sd,Sm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11001 & c0911=0b101 & c0606=1 & c0404=0 & c1616=1 & c0808=0) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11001 & thv_c0911=0b101 & thv_c0606=1 & thv_c0404=0 & thv_c1616=1 & thv_c0808=0)) & COND & vcvt_bt1632_op & Sd & Sm { build COND; build vcvt_bt1632_op; } # F6.1.67 p8023 A1 cases op:sz = 11 (c1616, c0808) # F6.1.76 p8044 A1 cases op:sz = 11 (c1616, c0808) :vcvt^vcvt_bt1664_op^COND^".f16.f64" Sd,Dm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1721=0b11001 & c0911=0b101 & c0606=1 & c0404=0 & c1616=1 & c0808=1) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1721=0b11001 & thv_c0911=0b101 & thv_c0606=1 & thv_c0404=0 & thv_c1616=1 & thv_c0808=1)) & COND & vcvt_bt1664_op & Sd & Dm { build COND; build vcvt_bt1664_op; } # vcvtr # F6.1.75 p8040 A1 case opc2=100 size=10 (c1618, c0809) :vcvtr^COND^".u32.f32" Sd,Sm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b01 & c0404=0 & c1618=0b100 & c0809=0b10) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c1618=0b100 & thv_c0809=0b10)) & COND & Sd & Sm { build COND; Sd = FPToFixed(Sm, 32:1, 32:1, 0:1, 1:1, $(FPSCR_RMODE)); } # F6.1.75 p8040 A1 case opc2=101 size=10 :vcvtr^COND^".s32.f32" Sd,Sm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b01 & c0404=0 & c1618=0b101 & c0809=0b10) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c1618=0b101 & thv_c0809=0b10)) & COND & Sd & Sm { build COND; Sd = FPToFixed(Sm, 32:1, 32:1, 0:1, 0:1, $(FPSCR_RMODE)); } # F6.1.75 p8040 A1 case opc2=100 size=11 :vcvtr^COND^".u32.f64" Sd,Dm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b01 & c0404=0 & c1618=0b100 & c0809=0b11) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c1618=0b100 & thv_c0809=0b11)) & COND & Sd & Dm { build COND; Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 1:1, $(FPSCR_RMODE)); } # F6.1.75 p8040 A1 case opc2=101 size=11 :vcvtr^COND^".s32.f64" Sd,Dm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b111 & c1011=0b10 & c0607=0b01 & c0404=0 & c1618=0b101 & c0809=0b11) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b111 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c1618=0b101 & thv_c0809=0b11)) & COND & Sd & Dm { build COND; Sd = FPToFixed(Dm, 64:1, 32:1, 0:1, 0:1, $(FPSCR_RMODE)); } ####### # VMAXNM/VMINNM # FPMaxNum(Vn, Vm) # Return the maximum of two floating point numbers. # Includes FP and SIMD variants of all lane sizes. define pcodeop FPMaxNum; # FPMinNum(Vn, Vm) # Return the minimum of two floating point numbers. # Includes FP and SIMD variants of all lane sizes. define pcodeop FPMinNum; # F6.1.117 p8178 A1/T1 Q = 0 (c0606) :vmaxnm^".f32" Dd,Dn,Dm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b00 & c0811=0b1111 & c0404=1 & c0606=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b00 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=0)) & Dd & Dn & Dm { Dd = FPMaxNum(Dn, Dm); } # F6.1.117 p8178 A1/T1 Q = 1 (c0606) :vmaxnm^".f32" Qd,Qn,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b00 & c0811=0b1111 & c0404=1 & c0606=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b00 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=1)) & Qd & Qn & Qm { Qd = FPMaxNum(Qn, Qm); } # F6.1.117 p8178 A1/T1 Q = 0 (c0606) :vmaxnm^".f16" Dd,Dn,Dm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b01 & c0811=0b1111 & c0404=1 & c0606=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b01 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=0)) & Dd & Dn & Dm { Dd = FPMaxNum(Dn, Dm); } # F6.1.117 p8178 A1/T1 Q = 1 (c0606) :vmaxnm^".f16" Qd,Qn,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b01 & c0811=0b1111 & c0404=1 & c0606=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b01 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=1)) & Qd & Qn & Qm { Qd = FPMaxNum(Qn, Qm); } # F6.1.117 p8178 A2/T2 size = 01 (c0809) :vmaxnm^".f16" Sd,Sn,Sm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b01) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b01)) & Sd & Sn & Sm { Sd = FPMaxNum(Sn, Sm); } # F6.1.117 p8178 A2/T2 size = 10 (c0809) :vmaxnm^".f32" Sd,Sn,Sm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b10) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b10)) & Sd & Sn & Sm { Sd = FPMaxNum(Sn, Sm); } # F6.1.117 p8178 A2/T2 size = 11 (c0809) :vmaxnm^".f64" Dd,Dn,Dm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b11) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b11)) & Dd & Dn & Dm { Dd = FPMaxNum(Dn, Dm); } # F6.1.120 p8178 A1/T1 Q = 0 (c0606) :vminnm^".f32" Dd,Dn,Dm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b10 & c0811=0b1111 & c0404=1 & c0606=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b10 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=0)) & Dd & Dn & Dm { Dd = FPMinNum(Dn, Dm); } # F6.1.120 p8178 A1/T1 Q = 1 (c0606) :vminnm^".f32" Qd,Qn,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b10 & c0811=0b1111 & c0404=1 & c0606=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b10 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=1)) & Qd & Qn & Qm { Qd = FPMinNum(Qn, Qm); } # F6.1.120 p8178 A1/T1 Q = 0 (c0606) :vminnm^".f16" Dd,Dn,Dm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b11 & c0811=0b1111 & c0404=1 & c0606=0) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b11 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=0)) & Dd & Dn & Dm { Dd = FPMinNum(Dn, Dm); } # F6.1.120 p8178 A1/T1 Q = 1 (c0606) :vminnm^".f16" Qd,Qn,Qm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b00110 & c2021=0b11 & c0811=0b1111 & c0404=1 & c0606=1) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11110 & thv_c2021=0b11 & thv_c0811=0b1111 & thv_c0404=1 & thv_c0606=1)) & Qd & Qn & Qm { Qd = FPMinNum(Qn, Qm); } # F6.1.120 p8178 A2/T2 size = 01 (c0809) :vminnm^".f16" Sd,Sn,Sm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b01) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b01)) & Sd & Sn & Sm { Sd = FPMinNum(Sn, Sm); } # F6.1.120 p8178 A2/T2 size = 10 (c0809) :vminnm^".f32" Sd,Sn,Sm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b10) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b10)) & Sd & Sn & Sm { Sd = FPMinNum(Sn, Sm); } # F6.1.120 p8178 A2/T2 size = 11 (c0809) :vminnm^".f64" Dd,Dn,Dm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11101 & c2021=0b00 & c1011=0b10 & c0606=1 & c0404=0 & c0809=0b11) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11101 & thv_c2021=0b00 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & thv_c0809=0b11)) & Dd & Dn & Dm { Dd = FPMinNum(Dn, Dm); } ####### # VMULL instructions vector/polynomial multiplication vmull_dt: ".s8" is ((TMode=0 & c0909=0 & c2424=0 & c2021=0b00) | (TMode=1 & thv_c0909=0 & thv_c2828=0 & thv_c2021=0b00)) { } vmull_dt: ".s16" is ((TMode=0 & c0909=0 & c2424=0 & c2021=0b01) | (TMode=1 & thv_c0909=0 & thv_c2828=0 & thv_c2021=0b01)) { } vmull_dt: ".s32" is ((TMode=0 & c0909=0 & c2424=0 & c2021=0b10) | (TMode=1 & thv_c0909=0 & thv_c2828=0 & thv_c2021=0b10)) { } vmull_dt: ".u8" is ((TMode=0 & c0909=0 & c2424=1 & c2021=0b00) | (TMode=1 & thv_c0909=0 & thv_c2828=1 & thv_c2021=0b00)) { } vmull_dt: ".u16" is ((TMode=0 & c0909=0 & c2424=1 & c2021=0b01) | (TMode=1 & thv_c0909=0 & thv_c2828=1 & thv_c2021=0b01)) { } vmull_dt: ".u32" is ((TMode=0 & c0909=0 & c2424=1 & c2021=0b10) | (TMode=1 & thv_c0909=0 & thv_c2828=1 & thv_c2021=0b10)) { } vmull_dt: ".p8" is ((TMode=0 & c0909=1 & c2424=0 & c2021=0b00) | (TMode=1 & thv_c0909=1 & thv_c2828=0 & thv_c2021=0b00)) { } vmull_dt: ".p64" is ((TMode=0 & c0909=1 & c2424=0 & c2021=0b10) | (TMode=1 & thv_c0909=1 & thv_c2828=0 & thv_c2021=0b10)) { } # F6.1.149 p8266 VMULL (-integer and +polynomial) op=1 (c0909) (with condition U!=1 and size!=0b11 and size!=01) :vmull^vmull_dt Qd,Dn,Dm is ((TMode=0 & ARMcond=0 & c2531=0b1111001 & c2424=0 & c2323=1 & ( c2121 & c2020=0) & c1011=0b11 & c0808=0 & c0606=0 & c0404=0 & c0909=1) | (TMode=1 & thv_c2931=0b111 & thv_c2828=0 & thv_c2327=0b11111 & (thv_c2121 & thv_c2020=0) & thv_c1011=0b11 & thv_c0808=0 & thv_c0606=0 & thv_c0404=0 & thv_c0909=1)) & vmull_dt & Qd & Dn & Dm { Qd = PolynomialMult(Dn, Dm); } # F6.1.149 p8266 VMULL (+integer and -polynomial) op=0 (c0909) (with condition size!=0b11) :vmull^vmull_dt Qd,Dn,Dm is ((TMode=0 & ARMcond=0 & c2531=0b1111001 & c2323=1 & ( c2121=0 | c2020=0) & c1011=0b11 & c0808=0 & c0606=0 & c0404=0 & c0909=0) | (TMode=1 & thv_c2931=0b111 & thv_c2327=0b11111 & (thv_c2121=0 | thv_c2020=0) & thv_c1011=0b11 & thv_c0808=0 & thv_c0606=0 & thv_c0404=0 & thv_c0909=0)) & vmull_dt & Qd & Dn & Dm { Qd = VectorMultiply(Dn, Dm); } ####### # The VRINT instructions round a "floating-point to an integral # floating point value of the same size", i.e. trunc. # The arguments are # 1: floating point value (can be 2 packed in a Q register) # 2: rounding mode # 3: boolean exact, if true then raise the Inexact exception if the # result differs from the original vrint_simd_RM: "a" is ((TMode=0 & c0709=0b010) | (TMode=1 & thv_c0709=0b010)) { export $(FPRounding_TIEAWAY); } vrint_simd_RM: "m" is ((TMode=0 & c0709=0b101) | (TMode=1 & thv_c0709=0b101)) { export $(FPRounding_NEGINF); } vrint_simd_RM: "n" is ((TMode=0 & c0709=0b000) | (TMode=1 & thv_c0709=0b000)) { export $(FPRounding_TIEEVEN); } vrint_simd_RM: "p" is ((TMode=0 & c0709=0b111) | (TMode=1 & thv_c0709=0b111)) { export $(FPRounding_POSINF); } vrint_simd_RM: "x" is ((TMode=0 & c0709=0b001) | (TMode=1 & thv_c0709=0b001)) { export $(FPRounding_TIEEVEN); } vrint_simd_RM: "z" is ((TMode=0 & c0709=0b011) | (TMode=1 & thv_c0709=0b011)) { export $(FPRounding_ZERO); } # For vrintx, the exact flag is 1, and the IXF flag is set (inexact) vrint_simd_exact: "x" is ((TMode=0 & c0709=0b001) | (TMode=1 & thv_c0709=0b001)) { export 1:1; } vrint_simd_exact: is ((TMode=0 & ( c0707=1 | c0808=1 | c0909=0)) | (TMode=1 & ( thv_c0707=1 | thv_c0808=1 | thv_c0909=0))) { export 0:1; } vrint_simd_ixf: is ((TMode=0 & c0709=0b001) | (TMode=1 & thv_c0709=0b001)) { $(FPEXC_IXF) = FPConvertInexact(); } vrint_simd_ixf: is ((TMode=0 & ( c0707=1 | c0808=1 | c0909=0)) | (TMode=1 & ( thv_c0707=1 | thv_c0808=1 | thv_c0909=0))) { } # F6.1.199,201,203,205,208,210 p8396,8400,8404,8408,8414,8420 Q = 0 (c0606) :vrint^vrint_simd_RM^".f32" Dd,Dm is ((TMode=0 & c2331=0b111100111 & c2021=0b11 & c1819=0b10 & c1617=0b10 & c1011=0b01 & (( c0707=0 & c0909=0) | ( c0707=1 & c0909=1) | ( c0707=1 & c0909=0)) & c0404=0 & c0606=0) | (TMode=1 & thv_c2331=0b111111111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b10 & thv_c1011=0b01 & ((thv_c0707=0 & thv_c0909=0) | (thv_c0707=1 & thv_c0909=1) | (thv_c0707=1 & thv_c0909=0)) & thv_c0404=0 & thv_c0606=0)) & vrint_simd_RM & vrint_simd_exact & vrint_simd_ixf & Dd & Dm { Dd = FPRoundInt(Dm, 32:1, vrint_simd_RM, 0:1); build vrint_simd_ixf; } # F6.1.199,201,203,205,208,210 p8396,8400,8404,8408,8414,8420 Q = 1 (c0606) :vrint^vrint_simd_RM^".f32" Qd,Qm is ((TMode=0 & c2331=0b111100111 & c2021=0b11 & c1819=0b10 & c1617=0b10 & c1011=0b01 & c0404=0 & c0606=1) | (TMode=1 & thv_c2331=0b111111111 & thv_c2021=0b11 & thv_c1819=0b10 & thv_c1617=0b10 & thv_c1011=0b01 & thv_c0404=0 & thv_c0606=1)) & vrint_simd_RM & vrint_simd_exact & vrint_simd_ixf & Qd & Qm { Qd = FPRoundInt(Qm, 32:1, vrint_simd_RM, 0:1); build vrint_simd_ixf; } vrint_fp_RM: "a" is ((TMode=0 & c1617=0b00) | (TMode=1 & thv_c1617=0b00)) { export $(FPRounding_TIEAWAY); } vrint_fp_RM: "m" is ((TMode=0 & c1617=0b11) | (TMode=1 & thv_c1617=0b11)) { export $(FPRounding_NEGINF); } vrint_fp_RM: "n" is ((TMode=0 & c1617=0b01) | (TMode=1 & thv_c1617=0b01)) { export $(FPRounding_TIEEVEN); } vrint_fp_RM: "p" is ((TMode=0 & c1617=0b10) | (TMode=1 & thv_c1617=0b10)) { export $(FPRounding_POSINF); } # F6.1.200,202,204,206 p8398,8402,8406,8410 size = 10 (c0809) :vrint^vrint_fp_RM^".f32" Sd,Sm is ((TMode=0 & ARMcond=0 & c2331=0b111111101 & c1821=0b1110 & c1011=0b10 & c0607=0b01 & c0404=0 & c0809=0b10) | (TMode=1 & thv_c2331=0b111111101 & thv_c1821=0b1110 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c0809=0b10)) & vrint_fp_RM & Sd & Sm { Sd = FPRoundInt(Sm, 32:1, vrint_fp_RM, 0:1); } # F6.1.200,202,204,206 p8398,8402,8406,8410 size = 11 (c0809) :vrint^vrint_fp_RM^".f64" Dd,Dm is ((TMode=0 & ARMcond=0 & c2331=0b111111101 & c1821=0b1110 & c1011=0b10 & c0607=0b01 & c0404=0 & c0809=0b11) | (TMode=1 & thv_c2331=0b111111101 & thv_c1821=0b1110 & thv_c1011=0b10 & thv_c0607=0b01 & thv_c0404=0 & thv_c0809=0b11)) & vrint_fp_RM & Dd & Dm { Dd = FPRoundInt(Dm, 32:1, vrint_fp_RM, 0:1); } vrint_rxz_RM: "r" is ((TMode=0 & c1616=0 & c0707=0) | (TMode=1 & thv_c1616=0 & thv_c0707=0)) { tmp:1 = $(FPSCR_RMODE); export tmp; } vrint_rxz_RM: "x" is ((TMode=0 & c1616=1 & c0707=0) | (TMode=1 & thv_c1616=1 & thv_c0707=0)) { tmp:1 = $(FPSCR_RMODE); export tmp; } vrint_rxz_RM: "z" is ((TMode=0 & c1616=0 & c0707=1) | (TMode=1 & thv_c1616=0 & thv_c0707=1)) { export $(FPRounding_ZERO); } # For vrintx, the exact flag is 1, and the IXF flag is set (inexact) vrint_rxz_exact: "x" is ((TMode=0 & c1616=1 & c0707=0) | (TMode=1 & thv_c1616=1 & thv_c0707=0)) { export 1:1; } vrint_rxz_exact: is ((TMode=0 & ( c1616=0 | c0707=1)) | (TMode=1 & (thv_c1616=0 | thv_c0707=1))) { export 0:1; } vrint_rxz_ixf: is ((TMode=0 & c1616=1 & c0707=0) | (TMode=1 & thv_c1616=1 & thv_c0707=0)) { $(FPEXC_IXF) = FPConvertInexact(); } vrint_rxz_ixf: is ((TMode=0 & ( c1616=0 | c0707=1)) | (TMode=1 & (thv_c1616=0 | thv_c0707=1))) { } # F6.1.207,209,211 p8412,8416,8420 A1 size = 10 (c0809) :vrint^vrint_rxz_RM^COND^".f32" Sd,Sm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b110 & c1718=0b11 & c1011=0b10 & c0606=1 & c0404=0 & (( c1616=0) | ( c1616=1 & c0707=0)) & c0809=0b10) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b110 & thv_c1718=0b11 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & ((thv_c1616=0) | (thv_c1616=1 & thv_c0707=0)) & thv_c0809=0b10)) & vrint_rxz_RM & vrint_rxz_exact & vrint_rxz_ixf & COND & Sd & Sm { build COND; Sd = FPRoundInt(Sm, 32:1, vrint_rxz_RM, vrint_rxz_exact); build vrint_rxz_ixf; } # F6.1.207,209,211 p8412,8416,8420 A1 size = 11 (c0809) :vrint^vrint_rxz_RM^COND^".f64" Dd,Dm is ((TMode=0 & ARMcond=1 & c2327=0b11101 & c1921=0b110 & c1718=0b11 & c1011=0b10 & c0606=1 & c0404=0 & (( c1616=0) | ( c1616=1 & c0707=0)) & c0809=0b11) | (TMode=1 & thv_c2831=0b1110 & thv_c2327=0b11101 & thv_c1921=0b110 & thv_c1718=0b11 & thv_c1011=0b10 & thv_c0606=1 & thv_c0404=0 & ((thv_c1616=0) | (thv_c1616=1 & thv_c0707=0)) & thv_c0809=0b11)) & vrint_rxz_RM & vrint_rxz_exact & vrint_rxz_ixf & COND & Dd & Dm { build COND; Dd = FPRoundInt(Dm, 32:1, vrint_rxz_RM, vrint_rxz_exact); build vrint_rxz_ixf; } ####### # VSEL vselcond: "eq" is ((TMode=0 & c2021=0b00) | (TMode=1 & thv_c2021=0b00)) { tmp:1 = ZR; export tmp; } vselcond: "ge" is ((TMode=0 & c2021=0b10) | (TMode=1 & thv_c2021=0b10)) { tmp:1 = (NG==OV); export tmp; } vselcond: "gt" is ((TMode=0 & c2021=0b11) | (TMode=1 & thv_c2021=0b11)) { tmp:1 = (!ZR && NG==OV); export tmp; } vselcond: "vs" is ((TMode=0 & c2021=0b01) | (TMode=1 & thv_c2021=0b01)) { tmp:1 = OV; export tmp; } # F6.1.223 p8447 A1/T1 size = 11 doubleprec (c0809) :vsel^vselcond^".f64" Dd,Dn,Dm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11100 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b11) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11100 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b11)) & vselcond & Dn & Dd & Dm { Dd = zext(vselcond != 0) * Dn + zext(vselcond == 0) * Dm; } # F6.1.223 p8447 A1/T1 size = 10 singleprec (c0809) :vsel^vselcond".f32" Sd,Sn,Sm is ((TMode=0 & ARMcond=0 & c2831=0b1111 & c2327=0b11100 & c1011=0b10 & c0606=0 & c0404=0 & c0809=0b10) | (TMode=1 & thv_c2831=0b1111 & thv_c2327=0b11100 & thv_c1011=0b10 & thv_c0606=0 & thv_c0404=0 & thv_c0809=0b10)) & vselcond & Sn & Sd & Sm { Sd = zext(vselcond != 0) * Sn + zext(vselcond == 0) * Sm; } @endif # INCLUDE_NEON ================================================ FILE: pypcode/processors/ARM/data/languages/old/ARMv5.lang ================================================ ARM:LE:32:DEPRECATED ARM V5 ARM ================================================ FILE: pypcode/processors/ARM/data/languages/old/ARMv5.trans ================================================ Sleigh-ARMv5 ARM:LE:32:v5 ================================================ FILE: pypcode/processors/ARM/data/languages/old/THUMBv2.lang ================================================ ARM:LE:32:DEPRECATED THUMB V2 ARM ================================================ FILE: pypcode/processors/ARM/data/languages/old/THUMBv2.trans ================================================ Sleigh-THUMBv2 ARM:LE:32:v5t ================================================ FILE: pypcode/processors/ARM/data/manuals/ARM.idx ================================================ @DDI0487H_a_a-profile_architecture_reference_manual.pdf[ARM Architecture Reference Manual - ARM A-profile architecture, December 2021 (ARM DDI 0487H.a)] adc, 7136 add, 7155 adr, 7157 aesd, 7853 aese, 7855 aesimc, 7857 aesmc, 7859 and, 7167 asr, 7171 asrs, 7175 b, 7177 bfc, 7180 bfi, 7182 bic, 7191 bkpt, 7193 bl, 7195 blx, 7198 bx, 7200 bxj, 7202 cbnz, 7203 cbz, 7203 clrex, 7204 clz, 7205 cmn, 7212 cmp, 7219 cps, 7221 crc32, 7226 crc32c, 7229 csdb, 7232 dbg, 7234 dcps1, 7235 dcps2, 7237 dcps3, 7239 dmb, 7241 dsb, 7244 eor, 7254 eret, 7256 esb, 7258 fldm, 7861 fstmdbx, 7864 hlt, 7260 hvc, 7262 isb, 7264 it, 7266 lda, 7268 ldab, 7270 ldaex, 7272 ldaexb, 7274 ldaexd, 7276 ldaexh, 7278 ldah, 7280 ldc, 7284 ldm, 7292 ldmda, 7294 ldmdb, 7296 ldmib, 7299 ldr, 7308 ldrb, 7317 ldrbt, 7320 ldrd, 7329 ldrex, 7332 ldrexb, 7334 ldrexd, 7336 ldrexh, 7338 ldrh, 7346 ldrht, 7349 ldrsb, 7357 ldrsbt, 7360 ldrsh, 7368 ldrsht, 7371 ldrt, 7374 lsl, 7379 lsls, 7383 lsr, 7387 lsrs, 7391 mcr, 7393 mcrr, 7395 mla, 7397 mls, 7399 mov, 7411 movt, 7415 mrc, 7417 mrrc, 7419 mrs, 7423 msr, 7433 mul, 7436 mvn, 7443 nop, 7445 orn, 7449 orr, 7458 pkhbt, 7460 pld, 7468 pli, 7474 pop, 7480 pssbb, 7482 push, 7488 qadd, 7489 qadd16, 7491 qadd8, 7493 qasx, 7495 qdadd, 7497 qdsub, 7499 qsax, 7501 qsub, 7503 qsub16, 7505 qsub8, 7507 rbit, 7509 rev, 7511 rev16, 7513 revsh, 7515 rfe, 7517 ror, 7522 rors, 7526 rrx, 7528 rrxs, 7530 rsb, 7538 rsc, 7544 sadd16, 7546 sadd8, 7548 sasx, 7550 sb, 7552 sbc, 7561 sbfx, 7563 sdiv, 7565 sel, 7567 setend, 7569 setpan, 7570 sev, 7571 sevl, 7573 sha1c, 7867 sha1h, 7869 sha1m, 7871 sha1p, 7873 sha1su0, 7875 sha1su1, 7877 sha256h, 7879 sha256h2, 7881 sha256su0, 7883 sha256su1, 7885 shadd16, 7575 shadd8, 7577 shasx, 7579 shsax, 7581 shsub16, 7583 shsub8, 7585 smc, 7587 smlabb, 7589 smlad, 7591 smlal, 7593 smlalbb, 7595 smlald, 7598 smlawb, 7601 smlsd, 7603 smlsld, 7605 smmla, 7607 smmls, 7609 smmul, 7611 smuad, 7613 smulbb, 7615 smull, 7617 smulwb, 7619 smusd, 7621 srs, 7623 ssat, 7627 ssat16, 7629 ssax, 7631 ssbb, 7633 ssub16, 7635 ssub8, 7637 stc, 7639 stl, 7642 stlb, 7644 stlex, 7646 stlexb, 7649 stlexd, 7651 stlexh, 7654 stlh, 7657 stm, 7663 stmda, 7665 stmdb, 7667 stmib, 7670 str, 7677 strb, 7684 strbt, 7687 strd, 7695 strex, 7698 strexb, 7701 strexd, 7703 strexh, 7706 strh, 7713 strht, 7716 strt, 7720 sub, 7739 svc, 7742 sxtab, 7744 sxtab16, 7746 sxtah, 7748 sxtb, 7750 sxtb16, 7752 sxth, 7754 tbb, 7756 teq, 7762 tsb, 7764 tst, 7771 uadd16, 7773 uadd8, 7775 uasx, 7777 ubfx, 7779 udf, 7781 udiv, 7783 uhadd16, 7785 uhadd8, 7787 uhasx, 7789 uhsax, 7791 uhsub16, 7793 uhsub8, 7795 umaal, 7797 umlal, 7799 umull, 7801 uqadd16, 7803 uqadd8, 7805 uqasx, 7807 uqsax, 7809 uqsub16, 7811 uqsub8, 7813 usad8, 7815 usada8, 7817 usat, 7819 usat16, 7821 usax, 7823 usub16, 7825 usub8, 7827 uxtab, 7829 uxtab16, 7831 uxtah, 7833 uxtb, 7835 uxtb16, 7837 uxth, 7839 vaba, 7887 vabal, 7890 vabd, 7894 vabdl, 7897 vabs, 7899 vacge, 7903 vacgt, 7908 vacle, 7906 vaclt, 7911 vadd, 7917 vaddhn, 7919 vaddl, 7921 vaddw, 7923 vand, 7928 vbic, 7933 vbif, 7935 vbit, 7937 vbsl, 7939 vcadd, 7941 vceq, 7946 vcge, 7953 vcgt, 7960 vcle, 7967 vcls, 7970 vclt, 7975 vclz, 7978 vcmla, 7983 vcmp, 7986 vcmpe, 7990 vcnt, 7994 vcvt, 8015 vcvta, 8021 vcvtb, 8026 vcvtm, 8030 vcvtn, 8034 vcvtp, 8038 vcvtr, 8040 vcvtt, 8047 vdiv, 8049 vdot, 8054 vdup, 8058 veor, 8060 vext, 8065 vfma, 8067 vfmab, 8073 vfmal, 8078 vfms, 8081 vfmsl, 8088 vfnma, 8091 vfnms, 8094 vhadd, 8097 vhsub, 8100 vins, 8103 vjcvt, 8105 vld1, 8115 vld2, 8131 vld3, 8145 vld4, 8157 vldm, 8161 vldr, 8169 vmax, 8175 vmaxnm, 8178 vmin, 8185 vminnm, 8188 vmla, 8198 vmlal, 8203 vmls, 8211 vmlsl, 8216 vmmla, 8218 vmov, 8241 vmovl, 8244 vmovn, 8246 vmovx, 8248 vmrs, 8250 vmsr, 8253 vmul, 8263 vmull, 8269 vmvn, 8275 vneg, 8277 vnmla, 8281 vnmls, 8284 vnmul, 8287 vorn, 8293 vorr, 8298 vpadal, 8300 vpadd, 8305 vpaddl, 8307 vpmax, 8312 vpmin, 8316 vpop, 8318 vpush, 8320 vqabs, 8322 vqadd, 8324 vqdmlal, 8326 vqdmlsl, 8329 vqdmulh, 8332 vqdmull, 8336 vqmovn, 8339 vqneg, 8341 vqrdmlah, 8343 vqrdmlsh, 8347 vqrdmulh, 8351 vqrshl, 8355 vqrshrn, 8359 vqrshrun, 8362 vqshl, 8367 vqshrn, 8371 vqshrun, 8374 vqsub, 8376 vraddhn, 8378 vrecpe, 8380 vrecps, 8382 vrev16, 8384 vrev32, 8387 vrev64, 8390 vrhadd, 8393 vrinta, 8398 vrintm, 8402 vrintn, 8406 vrintp, 8410 vrintr, 8412 vrintx, 8416 vrintz, 8420 vrshl, 8422 vrshr, 8428 vrshrn, 8432 vrsqrte, 8434 vrsqrts, 8436 vrsra, 8438 vrsubhn, 8441 vsdot, 8445 vseleq, 8447 vshl, 8454 vshll, 8457 vshr, 8463 vshrn, 8467 vsli, 8469 vsmmla, 8472 vsqrt, 8474 vsra, 8476 vsri, 8479 vst1, 8487 vst2, 8501 vst3, 8512 vst4, 8522 vstm, 8526 vstr, 8531 vsub, 8538 vsubhn, 8540 vsubl, 8542 vsubw, 8544 vsudot, 8546 vswp, 8548 vtbl, 8550 vtrn, 8553 vtst, 8556 vudot, 8560 vummla, 8562 vusdot, 8566 vusmmla, 8568 vuzp, 8573 vzip, 8577 wfe, 7841 wfi, 7843 yield, 7845 ================================================ FILE: pypcode/processors/ARM/data/patterns/ARM_BE_patterns.xml ================================================ 0xbd .......0 0xbd .......0 0x0000 0xbd .......0 0xbf00 0xbd .......0 0x46c0 0xffff 0x46c0 0x4770 0x4770 0x0000 0x4770 0x46c0 0xb0 000..... 0xbd ....0000 0x00bf 0x8000f3af 0xe8bd 101..... ........ 0xf746 0xf8 0x5d 0xfb 0....... 0xf8 0x5d 0xfb 0x04 0xe8 0xbd 100..... ........ 0xb5 ........ 0xb0 100..... 0xb5 ........ 0x1c 00...... 0xb5 ........ 0x46 0x.. 0xb5 ........ 01.01... 0x.. 0xb5 ........ 0x68 0x.. 0xb5 ........ 01.01... 0x.. 0xb0 10...... 0xb5 1....... 0xaf.. 0xb0 100..... 0xb5 ....0000 0x1c 00...... 0xb5 ....0000 0x46 0x.. 0xb5 ....0000 01.01...0x.. 0xb5 ....0000 0x68 0x.. 0xb5 ....0000 0xe92d 0100.... ........ 0xf8 0x4d 11101101 0x04 0xe12fff1. 0xe12fff1e 0xe1a00000 0xe12fff1e 0x00000000 0xea...... 0xe8 10.11101 10.0.... 0x.. 0xe8 10.11101 10.0.... 0x.. 0xe1a00000 0xe8 10.11101 10.0.... 0x.. 0x00000000 0xe4 0x9d 0xf0 0x08 0xe1 0xa0 0xf0 0x0e 0xe320f000 0xe1a00000 0xe1a00000 0xe24dd... 11101001 00101101 0100.... ........ 11101001 00101101 0100.... ........ 0xe24dd... 11101001 00101101 0100.... ........ 0x........ 0xe24dd... 11101001 00101101 0100.... ........ 0xe1a0 010.0000 0000000. 11101001 00101101 0100.... ........ 0xe24dd... 11100101 00101101 1110.... ........ 11101001 00101101 0000.... ........ 11100101 00101101 11100000 ......00 11100101 00101101 1110.... ........ 0xe24dd... 11100101 00101101 1110.... ........ 0x........ 0xe24dd... 0xe5 0x2d 0xe0 0x08 0xe1a0c00d 0xe9 0x2. 11...... 0x.0 0xe24dd... 11101001 00101101 0100.... ........ 0xe5 1001.... 0....... ........ 11101001 00101101 0100.... ....0000 0xe....... 11101001 00101101 0100.... ....0000 0xe....... 0xe....... 11101001 00101101 0100.... ....0000 11101001 00101101 0100.... ........ 0xe24dd... 11100101 00101101 1110.... ........ 11100101 00101101 1110.... ........ 0xe24dd... 11101001 00101101 .1...... ....0000 0x........ 0xe24dd... 11100101 00101101 1110.... ........ 0x........ 0xe24dd... 0xe1a0c00d 0xe9 0x2. 11...... 0x.0 0xb5 ....0000 0xb0 100..... 0xe92d 0100.... ........ 0xb5 ....0000 0x1c 00...... 0xb5 ....0000 0x46 0x.. 0xb5 ....0000 01.01... 0x.. 0xb5 ....0000 0x68 0x.. 0xb5 ....0000 01.01... 0x.. 0xb0 10...... 0xb5 1...0000 0xaf.. 0xbd .......0 0xbd .......0 0xbf00 0xe8 0xbd 100..... ........ 0x4770 0x4770 0xbf00 11110... ........ 10.1.... ........ 111001.. ........ 0xb5 ........ 0xb0 100..... 0xb5 ........ 0x1c 00...... 0xb5 ........ 0x46 0x.. 0xb5 ........ 01.01... 0x.. 0xb5 ........ 0x68 0x.. 0xb5 ........ 01.01... 0x.. 0xb0 10...... 0xb5 1....... 0xaf.. 0xb0 100..... 0xb5 ....0000 0x1c 00...... 0xb5 ....0000 0x46 0x.. 0xb5 ....0000 01.01...0x.. 0xb5 ....0000 0x68 0x.. 0xb5 ....0000 0xe92d 0100.... ........ 0xf8 0x4d 11101101 0x04 0xb5 ...1.... 0xb4 ...1.... 0xb5 .......0 0xe2 0x8f 1100.... ........ 0xe2 0x8c 1100.... ........ 0xe5 0xbc 0xf. 0x.. 0xb4 0x03 0x48 0x01 0x90 0x01 0xbd 0x01 ================================================ FILE: pypcode/processors/ARM/data/patterns/ARM_LE_patterns.xml ================================================ .......0 0xbd .......0 0xbd 0x0000 .......0 0xbd 0x00bf .......0 0xbd 0xc0 0x46 0xffff 0xc046 0x7047 0x7047 0x0000 0x7047 0xc046 0x7047 0x00bf 000..... 0xb0 ....0000 0xbd 0x00bf 0xaff30080 0xbde8 ........ 1000.... 0x46f7 0x5d 0xf8 0....... 0xfb 0x5d 0xf8 0x04 0xfb 0xbd 0xe8 ........ 100..... ........ 0xb5 1....... 0xb0 ........ 0xb5 00...... 0x1c ........ 0xb5 0x.. 0x46 ........ 0xb5 0x.. 01.01... ........ 0xb5 0x.. 0x68 ........ 0xb5 0x.. 01.01... 10...... 0xb0 1....... 0xb5 0x..af 100..... 0xb0 ....0000 0xb5 00...... 0x1c ....0000 0xb5 0x.. 01.01... ....0000 0xb5 0x.. 0x68 ....0000 0xb5 0x2de9 ........ 0100.... 0x4d 0xf8 0x04 11101101 0x1.ff2fe1 0x1eff2fe1 0x00000000 0x1eff2fe1 0x0000a0e1 0x......ea 0x.. 10.0.... 10.11101 0xe8 0x.. 10.0.... 10.11101 0xe8 0x00000000 0x.. 10.0.... 10.11101 0xe8 0x0000a0e1 0x08 0xf0 0x9d 0xe4 0x0e 0xf0 0xa0 0xe1 0x00f020e3 0x0000a0e1 0x0000a0e1 0x..d.4de2 ........ .10..... 00101101 11101001 ........ 0100.... 00101101 11101001 0x..d.4de2 ........ 0100.... 00101101 11101001 0x........ 0x..d.4de2 ........ 0100.... 00101101 11101001 0000000. 010.0000 0xa0e1 ........ 0100.... 00101101 11101001 0x..d.4de2 ........ 1110.... 00101101 11100101 ........ 0000.... 00101101 11101001 ......00 11100000 00101101 11100101 ........ 1110.... 00101101 11100101 0x..d.4de2 ........ 1110.... 00101101 11100101 0x........ 0x..d.4de2 0x08 0xe0 0x2d 0xe5 0x0dc0a0e1 0x.0 11...... 0x2. 0xe9 ........ 0100.... 00101101 11101001 0x..d.4de2 ........ 0100.... 00101101 11101001 ........ 0....... 1001.... 0xe5 0000.... 0100.... 00101101 11101001 0x......e. 0000.... 0100.... 00101101 11101001 0x......e. 0x......e. 0000.... 0100.... 00101101 11101001 ........ 0100.... 00101101 11101001 ........ 0100.... 00101101 11101001 0x..d.4de2 ........ 1110.... 00101101 11100101 ........ 1110.... 00101101 11100101 0x..d.4de2 ....0000 .1...... 00101101 11101001 0x........ 0x..d.4de2 ........ 1110.... 00101101 11100101 0x........ 0x..d.4de2 0x0dc0a0e1 0x.0 11...... 0x2. 0xe9 ....0000 0xb5 1....... 0xb0 0x2de9 ........ 010..... ....0000 0xb5 00...... 0x1c ....0000 0xb5 0x.. 0x46 ....0000 0xb5 0x.. 01.01... ....0000 0xb5 0x.. 0x68 ....0000 0xb5 0x.. 01.01... 10...... 0xb0 1...0000 0xb5 0x..af .......0 0xbd .......0 0xbd 0x00bf 0xbd 0xe8 ........ 100..... 0x7047 0x7047 0x00bf ........ 11110... ........ 10.1.... ........ 111001.. ........ 0xb5 1....... 0xb0 ........ 0xb5 00...... 0x1c ........ 0xb5 0x.. 0x46 ........ 0xb5 0x.. 01.01... ........ 0xb5 0x.. 0x68 ........ 0xb5 0x.. 01.01... 10...... 0xb0 1....... 0xb5 0x..af 100..... 0xb0 ....0000 0xb5 00...... 0x1c ....0000 0xb5 0x.. 01.01... ....0000 0xb5 0x.. 0x68 ....0000 0xb5 0x2de9 ........ 0100.... 0x4d 0xf8 0x04 11101101 ...1.... 0xb5 ...1.... 0xb4 .......0 0xb5 ........ 1100.... 0x8f 0xe2 ........ 1100.... 0x8c 0xe2 0x.. 0xf. 0xbc 0xe5 0x03 0xb4 0x01 0x48 0x01 0x90 0x01 0xbd 0x10 0xb5 0x02 0x4c 0x24 0x68 0x01 0x94 0x10 0xbd ================================================ FILE: pypcode/processors/ARM/data/patterns/ARM_switch_patterns.xml ================================================ 0x03b4 0x7146 0x0231 0x8908 0x8000 0x8900 0x0858 0x4018 0x8646 0x03bc 0xf746 0x02b4 0x7146 0x4908 0x4900 0x095c 0x4900 0x8e44 0x02bc 0x7047 0x02b4 0x7146 0x4908 0x4900 0x0956 0x4900 0x8e44 0x02bc 0x7047 0x03b4 0x7146 0x4908 0x4000 0x4900 0x095e 0x4900 0x8e44 0x03bc 0x7047 0x03b4 0x7146 0x4908 0x4000 0x4900 0x095a 0x4900 0x8e44 0x03bc 0x7047 0x01c05ee5 0x0c0053e1 0x0330de37 0x0c30de27 0x83 11.00000 0x8ee0 000111.0 0xff2fe1 0x01c05ee5 0x0c0053e1 0x0c30de27 0x0330de37 0x83 11.00000 0x8ee0 000111.0 0xff2fe1 0x30b4 0x7446 0x641e 0x2578 0x641c 0xab42 0x00d2 0x1d46 0x635d 0x5b00 0xe318 0x30bc 0x1847 ================================================ FILE: pypcode/processors/ARM/data/patterns/patternconstraints.xml ================================================ ARM_LE_patterns.xml ARM_BE_patterns.xml ARM_LE_patterns.xml ================================================ FILE: pypcode/processors/ARM/data/patterns/prepatternconstraints.xml ================================================ ARM_switch_patterns.xml ================================================ FILE: pypcode/processors/Atmel/data/languages/atmega256.pspec ================================================ ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32.opinion ================================================ ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a.cspec ================================================ ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a.ldefs ================================================ Generic AVR32-A big-endian ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a.pspec ================================================ ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a.slaspec ================================================ define endian=big; define alignment=2; define space RAM type=ram_space size=4 default; define space register type=register_space size=2; # for the AVR32A define register offset=0x0000 size=4 [ SR EVBA ACBA CPUCR ECR RSR_SUP RSR_INT0 RSR_INT1 RSR_INT2 RSR_INT3 RSR_EX RSR_NMI RSR_DBG RAR_SUP RAR_INT0 RAR_INT1 RAR_INT2 RAR_INT3 RAR_EX RAR_NMI RAR_DBG JECR JOSP JAVA_LV0 JAVA_LV1 JAVA_LV2 JAVA_LV3 JAVA_LV4 JAVA_LV5 JAVA_LV6 JAVA_LV7 JTBA JBCR ]; define register offset=0x0100 size=4 [ CONFIG0 CONFIG1 COUNT COMPARE TLBEHI TLBELO PTBR TLBEAR MMUCR TLBARLO TLBARHI PCCNT PCNT0 PCNT1 PCCR BEAR MPUAR0 MPUAR1 MPUAR2 MPUAR3 MPUAR4 MPUAR5 MPUAR6 MPUAR7 MPUPSR0 MPUPSR1 MPUPSR2 MPUPSR3 MPUPSR4 MPUPSR5 MPUPSR6 MPUPSR7 MPUCRA MPUCRB MPUBRA MPUBRB MPUAPRA MPUAPRB MPUCR SS_STATUS SS_ADRF SS_ADRR SS_ADR0 SS_ADR1 SS_SP_SYS SS_SP_APP SS_RAR SS_RSR ]; # 103-191 reserved for future use # 192-255 implementation defined define register offset=0x1000 size=4 [ R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 SP LR PC ]; define register offset=0x1100 size=1 [ C Z N V Q L _ _ _ _ _ _ _ _ T R GM I0M I1M I2M I3M EM M0 M1 M2 _ D DM J H _ _ ALWAYS_TRUE ]; macro SRTOLOWFLAGS() { C = (SR & 0x1) != 0; Z = (SR & 0x2) != 0; N = (SR & 0x4) != 0; V = (SR & 0x8) != 0; } macro SRTOFLAGS() { C = (SR & 0x1) != 0; Z = (SR & 0x2) != 0; N = (SR & 0x4) != 0; V = (SR & 0x8) != 0; Q = (SR & 0x10) != 0; L = (SR & 0x20) != 0; T = (SR & 0x4000) != 0; R = (SR & 0x8000) != 0; GM = (SR & 0x10000) != 0; I0M = (SR & 0x20000) != 0; I1M = (SR & 0x40000) != 0; I2M = (SR & 0x80000) != 0; I3M = (SR & 0x100000) != 0; EM = (SR & 0x200000) != 0; M0 = (SR & 0x400000) != 0; M1 = (SR & 0x800000) != 0; M2 = (SR & 0x1000000) != 0; D = (SR & 0x4000000) != 0; DM = (SR & 0x8000000) != 0; J = (SR & 0x10000000) != 0; H = (SR & 0x20000000) != 0; } macro CZNVTOSR() { tmp:4 = zext(C&1) | (zext(Z&1) << 1) | (zext(N&1) << 2) | (zext(V&1) << 3); SR = (SR & 0xFFFFFFF0) | tmp; } macro CZNVQTOSR() { tmp:4 = zext(C&1) | (zext(Z&1) << 1) | (zext(N&1) << 2) | (zext(V&1) << 3) | (zext(Q&1) << 4); SR = (SR & 0xFFFFFFE0) | tmp; } macro QTOSR() { SR = (SR & 0xFFFFFFEF) | (zext(Q&1) << 4); } macro CZTOSR() { tmp:4 = zext(C&1) | (zext(Z&1) << 1); SR = (SR & 0xFFFFFFFC) | tmp; } macro JRGMTOSR() { tmp:4 = (zext(R&1) << 15) | (zext(GM&1) << 16) | (zext(J&1) << 28); SR = (SR & 0xEFFFFE7F) | tmp; } macro LTOSR() { tmp:4 = zext(L&1) << 5; SR = (SR & 0xFFFFFFDF) | tmp; } define register offset=0x1200 size=4 [ stadd ldadd ]; define register offset=0x1300 size=4 contextreg; define context contextreg ctx_rel10=(0,9) signed noflow ctx_rel8_2=(0,1) noflow ctx_rel0_8=(2,9) noflow ctx_rel21=(0,20) signed noflow ctx_rel0_16=(5,20) noflow ctx_rel16_1=(4,4) noflow ctx_rel17_4=(0,3) noflow ctx_rel3=(0,2) signed noflow ctx_savex=(0,3) noflow ctx_usex=(0,3) noflow ctx_savey=(4,7) noflow ctx_usey=(4,7) noflow ctx_useu=(4,7) noflow ctx_shift=(0,4) noflow ctx_shigh=(0,3) noflow ctx_slow=(4,4) noflow ctx_coop=(0,6) noflow ctx_cohi=(0,1) noflow ctx_comid=(2,5) noflow ctx_colow=(6,6) noflow ctx_rdplus=(8,11) noflow ctx_rdsave=(8,11) noflow ; define token instr1(16) op13_3 = (13,15) op11_5 = (11, 15) op0_3 = (0, 2) rs9 = (9,12) rp9 = (9,12) rb9 = (9,12) rx9 = (9,12) op9_4 = (9,12) op9_7 = (9,15) op9_2 = (9,10) op7_9 = (7,15) op7_2 = (7,8) op4_5 = (4,8) op4_12 = (4,15) op9_1 = (9,9) op8_1 = (8,8) op8_8 = (8,15) op0_9 = (0,8) b9 = (9,9) op10_6 = (10,15) disp4_3 = (4,6) disp4_5 = (4,8) disp4_7 = (4,10) disp4_4 = (4,7) rs0 = (0,3) rs0_hi = (1,3) rs0_low = (1,3) rd0_hi = (1,3) rd0_low = (1,3) rp0 = (0,3) rd0 = (0,3) rd9 = (9,12) ri0 = (0,3) rb0 = (0,3) ry0 = (0,3) b0 = (0,0) b02 = (2,2) b03 = (3,3) b04 = (4,4) b05 = (5,5) b06 = (6,6) b07 = (7,7) b08 = (8,8) b09 = (9,9) b10 = (10,10) b11 = (11,11) bp9_4 = (9,12) bp4_1 = (4,4) bp4_2 = (4,5) bp4_3 = (4,6) bp4_4 = (4,7) bp4_5 = (4,8) bp4_6 = (4,9) bp4_7 = (4,10) cond4_4 = (4,7) cond0_4 = (0,3) cond0_3 = (0,2) imm4_8 = (4,11) signed imm4_6 = (4,9) signed imm4_5 = (4,8) op12_1 = (12,12) op5_4 = (5,8) imm9_4 = (9,12) signed imm4_1 = (4,4) imm0_4 = (0,3) b003 = (0,3) disp4_8 = (4,11) sdisp4_8 = (4,11) signed op3_1 = (3,3) op3_6 = (3,8) disp21part2_4_1 = (4,4) disp21part3_9_4 = (9,12) signed shift9_4 = (9,12) shift4_1 = (4,4) op12_4 = (12,15) op0_4 = (0,3) disp0_2 = (0,1) sdisp0_2 = (0,1) signed op2_2 = (2,3) op0_16 = (0,15) sa0_3 = (0,2) sa0_4 = (0,3) coh = (9,9) ; define token instr2(16) ecop13_3 = (13,15) disp_16 = (0,15) signed ddisp_16 = (0,15) signed disp_9 = (0,8) disp_8 = (0,7) disp12_4 = (12,15) disp0_11 = (0,10) signed disp0_12 = (0,11) signed edisp4_8 = (4,11) eop14_2 = (14,15) eop11_5 = (11,15) eop12_4 = (12,15) eop6_10 = (6,15) eop8_4 = (8,11) eop6_2 = (6,7) eop5_3 = (5,7) eop9_3 = (9,11) eop8_8 = (8,15) eop0_9 = (0,8) eop0_4 = (0,3) eop9_7 = (9,15) eop12_1 = (12,12) eop5_11 = (5,15) eop0_8 = (0,7) eop0_16 = (0,15) eop4_12 = (4,15) eop4_8 = (4,11) eop4_4 = (4,7) eop10_6 = (10,15) eoff5_5 = (5,9) eoff0_5 = (0,4) elen0_5 = (0,4) eop10_2 = (10,11) esa0_5 = (0,4) ebp5_5 = (5,9) crd8_4 = (8,11) crd9_3 = (9,11) crx4_4 = (4,7) cry0_4 = (0,3) altcrd8_4 = (8,11) altcrx4_4 = (4,7) altcry0_4 = (0,3) altcrd9_3 = (9,11) crd8_1 = (8,8) cp13_3 = (13,15) altcp13_3 = (13,15) shift4_2 = (4,5) shift4_5 = (4,8) shift0_5 = (0,4) selectorxy4_2 = (4,5) ers0 = (0,3) erb0 = (0,3) erd0 = (0,3) erd0a = (0,3) erp0 = (0,3) ers0_hi = (1,3) ers0_low = (1,3) erd0_hi = (1,3) erd0_low = (1,3) econd12_4 = (12, 15) econd8_4 = (8, 11) econd4_4 = (4, 7) eri8 = (8,11) eri0 = (0,3) eb0 = (0,0) eb1 = (1,1) eb2 = (2,2) eb3 = (3,3) eb4 = (4,4) eb5 = (5,5) eb6 = (6,6) eb7 = (7,7) eb8 = (8,8) eb9 = (9,9) eb10 = (10,10) eb11 = (11,11) eb12 = (12,12) eb13 = (13,13) eb14 = (14,14) eb15 = (15,15) ypart = (4,4) upart = (4,4) xpart = (5,5) imm16 = (0,15) simm16 = (0,15) signed simm0_8 = (0,7) signed imm0_8 = (0,7) simm0_15 = (0,14) signed imm12_2 = (12,13) sysreg = (0,7) dbgreg = (0,7) disp21part1_0_16 = (0,15) deb0 = (0,0) deb1 = (1,1) deb2 = (2,2) deb3 = (3,3) deb4 = (4,4) deb5 = (5,5) deb6 = (6,6) deb7 = (7,7) deb8 = (8,8) deb9 = (9,9) deb10 = (10,10) deb11 = (11,11) deb12 = (12,12) deb13 = (13,13) deb14 = (14,14) deb15 = (15,15) ; attach variables [ rs9 rp9 rd9 rb9 rs0 rb0 rp0 rx9 ry0 rd0 ri0 ers0 erd0 erb0 eri0 erp0 eri8 ctx_rdplus ctx_usex ctx_usey] [ R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 SP LR PC ]; attach variables [ rs0_hi rd0_hi erd0_hi ers0_hi ] [ R1 R3 R5 R7 R9 R11 SP PC ]; attach variables [ rs0_low rd0_low erd0_low ers0_low ] [ R0 R2 R4 R6 R8 R10 R12 LR ]; attach variables [ deb0 ] [ R0 R0 ]; attach variables [ deb1 ] [ R1 R1 ]; attach variables [ deb2 ] [ R2 R2 ]; attach variables [ deb3 ] [ R3 R3 ]; attach variables [ deb4 ] [ R4 R4 ]; attach variables [ deb5 ] [ R5 R5 ]; attach variables [ deb6 ] [ R6 R6 ]; attach variables [ deb7 ] [ R7 R7 ]; attach variables [ deb8 ] [ R8 R8 ]; attach variables [ deb9 ] [ R9 R9 ]; attach variables [ deb10 ] [ R10 R10 ]; attach variables [ deb11 ] [ R11 R11 ]; attach variables [ deb12 ] [ R12 R12 ]; attach variables [ deb13 ] [ SP SP ]; attach variables [ deb14 ] [ LR LR ]; attach variables [ deb15 ] [ PC PC ]; attach variables [ sysreg] [ SR EVBA ACBA CPUCR ECR RSR_SUP RSR_INT0 RSR_INT1 RSR_INT2 RSR_INT3 RSR_EX RSR_NMI RSR_DBG RAR_SUP RAR_INT0 RAR_INT1 RAR_INT2 RAR_INT3 RAR_EX RAR_NMI RAR_DBG JECR JOSP JAVA_LV0 JAVA_LV1 JAVA_LV2 JAVA_LV3 JAVA_LV4 JAVA_LV5 JAVA_LV6 JAVA_LV7 JTBA JBCR _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ CONFIG0 CONFIG1 COUNT COMPARE TLBEHI TLBELO PTBR TLBEAR MMUCR TLBARLO TLBARHI PCCNT PCNT0 PCNT1 PCCR BEAR MPUAR0 MPUAR1 MPUAR2 MPUAR3 MPUAR4 MPUAR5 MPUAR6 MPUAR7 MPUPSR0 MPUPSR1 MPUPSR2 MPUPSR3 MPUPSR4 MPUPSR5 MPUPSR6 MPUPSR7 MPUCRA MPUCRB MPUBRA MPUBRB MPUAPRA MPUAPRB MPUCR SS_STATUS SS_ADRF SS_ADRR SS_ADR0 SS_ADR1 SS_SP_SYS SS_SP_APP SS_RAR SS_RSR _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ]; # loadCoprocessorWord(CP:1, CRd:1, address:4) define pcodeop loadCoprocessorWord; # loadCoprocessorDWord(CP:1, CRd:1, address:4) define pcodeop loadCoprocessorDWord; define pcodeop CoProcessorDWordToReg; define pcodeop CoProcessorWordToReg; define pcodeop RegToCoProcessorDWord; define pcodeop RegToCoProcessorWord; define pcodeop storeCoprocessorDword; define pcodeop storeCoprocessorWord; define pcodeop CoprocessorOp; define pcodeop LoadCoProcessorWord; define pcodeop LoadCoProcessorDword; define pcodeop trap; define pcodeop cacheOp; define pcodeop CacheFetch; define pcodeop doSleep; define pcodeop CheckAndRestoreInterupt; define pcodeop CheckAndRestoreSupervisor; define pcodeop ReadTLBEntry; define pcodeop WriteTLBEntry; define pcodeop SearchTLBEntry; define pcodeop SynchMemory; define pcodeop JavaTrap; define pcodeop JavaPopContext; define pcodeop JavaPushContext; define pcodeop JavaCheckStack; define pcodeop SupervisorCallSetup; define pcodeop MoveToDebugReg; define pcodeop MoveFromDebugReg; # conditions # STATUS REGISTER MAP: (LOW) # C - CARRY # Z - ZERO # N - NEGATIVE # V - OVERFLOW # Q - SATURATION # L - LOCK # T - SCRATCH # R - REMAP # STATUS REGISTER MAP: (HIGH) # GM - Global Interrupt Mask # I0M - Interrupt Level 0 Mask # I1M - Interrupt Level 1 Mask # I2M - Interrupt Level 2 Mask # I3M - Interrupt Level 3 Mask # EM - Exception Mask # M0 - Execution Mode 0 # M1 - Execution Mode 1 # M2 - Execution Mode 2 # D - Debug State # DM - Debug State Mask # J - Java State # H - Java Handle #Cond4 Registers Extended (12,15) cc4_e12: "EQ" is econd12_4=0x0 { export Z; } cc4_e12: "NE" is econd12_4=0x1 { tmp:1 = !Z; export tmp; } cc4_e12: "HS" is econd12_4=0x2 { tmp:1 = !C; export tmp; } cc4_e12: "LO" is econd12_4=0x3 { export C; } cc4_e12: "GE" is econd12_4=0x4 { tmp:1 = N == V; export tmp;} cc4_e12: "LT" is econd12_4=0x5 { tmp:1 = N!=V; export tmp; } cc4_e12: "MI" is econd12_4=0x6 { export N; } cc4_e12: "PL" is econd12_4=0x7 { tmp:1 = !N; export tmp; } cc4_e12: "LS" is econd12_4=0x8 { tmp:1 = C || Z; export tmp; } cc4_e12: "GT" is econd12_4=0x9 { tmp:1 = !Z && (N == V); export tmp; } cc4_e12: "LE" is econd12_4=0xa { tmp:1 = Z || (N!=V); export tmp; } cc4_e12: "HI" is econd12_4=0xb { tmp:1 = !C && !Z; export tmp; } cc4_e12: "VS" is econd12_4=0xc { export V; } cc4_e12: "VC" is econd12_4=0xd { tmp:1 = !V; export tmp; } cc4_e12: "QS" is econd12_4=0xe { export Q; } cc4_e12: "AL" is econd12_4=0xf { export ALWAYS_TRUE; } COND_e12: cc4_e12 is cc4_e12 { if (!cc4_e12) goto inst_next; } COND_e12: cc4_e12 is cc4_e12 & econd12_4=0xf { } #Cond4 Registers Extended (4,7) cc4_4: "EQ" is cond4_4=0x0 { export Z; } cc4_4: "NE" is cond4_4=0x1 { tmp:1 = !Z; export tmp; } cc4_4: "HS" is cond4_4=0x2 { tmp:1 = !C; export tmp; } cc4_4: "LO" is cond4_4=0x3 { export C; } cc4_4: "GE" is cond4_4=0x4 { tmp:1 = N == V; export tmp;} cc4_4: "LT" is cond4_4=0x5 { tmp:1 = N!=V; export tmp; } cc4_4: "MI" is cond4_4=0x6 { export N; } cc4_4: "PL" is cond4_4=0x7 { tmp:1 = !N; export tmp; } cc4_4: "LS" is cond4_4=0x8 { tmp:1 = C || Z; export tmp; } cc4_4: "GT" is cond4_4=0x9 { tmp:1 = !Z && (N == V); export tmp; } cc4_4: "LE" is cond4_4=0xa { tmp:1 = Z || (N!=V); export tmp; } cc4_4: "HI" is cond4_4=0xb { tmp:1 = !C && !Z; export tmp; } cc4_4: "VS" is cond4_4=0xc { export V; } cc4_4: "VC" is cond4_4=0xd { tmp:1 = !V; export tmp; } cc4_4: "QS" is cond4_4=0xe { export Q; } cc4_4: "AL" is cond4_4=0xf { export ALWAYS_TRUE; } COND_4_4: cc4_4 is cc4_4 { if (!cc4_4) goto inst_next; } COND_4_4: cc4_4 is cc4_4 & cond4_4=0xf { } #Cond4 Registers Extended (4,7) ecc4_4: "EQ" is econd4_4=0x0 { export Z; } ecc4_4: "NE" is econd4_4=0x1 { tmp:1 = !Z; export tmp; } ecc4_4: "HS" is econd4_4=0x2 { tmp:1 = !C; export tmp; } ecc4_4: "LO" is econd4_4=0x3 { export C; } ecc4_4: "GE" is econd4_4=0x4 { tmp:1 = N == V; export tmp;} ecc4_4: "LT" is econd4_4=0x5 { tmp:1 = N!=V; export tmp; } ecc4_4: "MI" is econd4_4=0x6 { export N; } ecc4_4: "PL" is econd4_4=0x7 { tmp:1 = !N; export tmp; } ecc4_4: "LS" is econd4_4=0x8 { tmp:1 = C || Z; export tmp; } ecc4_4: "GT" is econd4_4=0x9 { tmp:1 = !Z && (N == V); export tmp; } ecc4_4: "LE" is econd4_4=0xa { tmp:1 = Z || (N!=V); export tmp; } ecc4_4: "HI" is econd4_4=0xb { tmp:1 = !C && !Z; export tmp; } ecc4_4: "VS" is econd4_4=0xc { export V; } ecc4_4: "VC" is econd4_4=0xd { tmp:1 = !V; export tmp; } ecc4_4: "QS" is econd4_4=0xe { export Q; } ecc4_4: "AL" is econd4_4=0xf { export ALWAYS_TRUE; } ECOND_4_4: ecc4_4 is ecc4_4 { if (!ecc4_4) goto inst_next; } ECOND_4_4: ecc4_4 is ecc4_4 & econd4_4=0xf { } #Cond4 Registers Extended (4,7) ecc8_4: "EQ" is econd8_4=0x0 { export Z; } ecc8_4: "NE" is econd8_4=0x1 { tmp:1 = !Z; export tmp; } ecc8_4: "HS" is econd8_4=0x2 { tmp:1 = !C; export tmp; } ecc8_4: "LO" is econd8_4=0x3 { export C; } ecc8_4: "GE" is econd8_4=0x4 { tmp:1 = N == V; export tmp;} ecc8_4: "LT" is econd8_4=0x5 { tmp:1 = N!=V; export tmp; } ecc8_4: "MI" is econd8_4=0x6 { export N; } ecc8_4: "PL" is econd8_4=0x7 { tmp:1 = !N; export tmp; } ecc8_4: "LS" is econd8_4=0x8 { tmp:1 = C || Z; export tmp; } ecc8_4: "GT" is econd8_4=0x9 { tmp:1 = !Z && (N == V); export tmp; } ecc8_4: "LE" is econd8_4=0xa { tmp:1 = Z || (N!=V); export tmp; } ecc8_4: "HI" is econd8_4=0xb { tmp:1 = !C && !Z; export tmp; } ecc8_4: "VS" is econd8_4=0xc { export V; } ecc8_4: "VC" is econd8_4=0xd { tmp:1 = !V; export tmp; } ecc8_4: "QS" is econd8_4=0xe { export Q; } ecc8_4: "AL" is econd8_4=0xf { export ALWAYS_TRUE; } ECOND_8_4: ecc8_4 is ecc8_4 { if (!ecc8_4) goto inst_next; } ECOND_8_4: ecc8_4 is ecc8_4 & econd8_4=0xf { } #Cond3 Registers(0 - 2) cc3_0: "eq" is cond0_3=0x0 { export Z; } cc3_0: "ne" is cond0_3=0x1 { tmp:1 = !Z; export tmp; } cc3_0: "cc/hs" is cond0_3=0x2 { tmp:1 = !C; export tmp; } cc3_0: "cc/lo" is cond0_3=0x3 { export C; } cc3_0: "ge" is cond0_3=0x4 { tmp:1 = N == V; export tmp;} cc3_0: "lt" is cond0_3=0x5 { tmp:1 = N!=V; export tmp; } cc3_0: "mi" is cond0_3=0x6 { export N; } cc3_0: "pl" is cond0_3=0x7 { tmp:1 = !N; export tmp; } COND_3: cc3_0 is cc3_0 { export cc3_0; } #Cond4 Registers(0 - 3) cc4_0: "eq" is cond0_4=0x0 { export Z; } cc4_0: "ne" is cond0_4=0x1 { tmp:1 = !Z; export tmp; } cc4_0: "cc/hs" is cond0_4=0x2 { tmp:1 = !C; export tmp; } cc4_0: "cc/lo" is cond0_4=0x3 { export C; } cc4_0: "ge" is cond0_4=0x4 { tmp:1 = N == V; export tmp;} cc4_0: "lt" is cond0_4=0x5 { tmp:1 = N!=V; export tmp; } cc4_0: "mi" is cond0_4=0x6 { export N; } cc4_0: "pl" is cond0_4=0x7 { tmp:1 = !N; export tmp; } cc4_0: "ls" is cond0_4=0x8 { tmp:1 = C || Z; export tmp; } cc4_0: "gt" is cond0_4=0x9 { tmp:1 = !Z && (N == V); export tmp; } cc4_0: "le" is cond0_4=0xa { tmp:1 = Z || (N!=V); export tmp; } cc4_0: "hi" is cond0_4=0xb { tmp:1 = !C && !Z; export tmp; } cc4_0: "vs" is cond0_4=0xc { export V; } cc4_0: "vc" is cond0_4=0xd { tmp:1 = !V; export tmp; } cc4_0: "qs" is cond0_4=0xe { export Q; } cc4_0: "al" is cond0_4=0xf { export ALWAYS_TRUE; } COND_4_0: cc4_0 is cc4_0 { if (!cc4_0) goto inst_next; } COND_4_0: cc4_0 is cc4_0 & cond0_4=0xf { } RP9bInc: rp9++ is rp9 { ptr:4 = rp9; rp9 = rp9 + 1; export ptr; } RPhInc: rp9++ is rp9 { ptr:4 = rp9; rp9 = rp9 + 2; export ptr; } RPwInc: rp9++ is rp9 { ptr:4 = rp9; rp9 = rp9 + 4; export ptr; } RPdInc: rp9++ is rp9 { ptr:4 = rp9; rp9 = rp9 + 8; export ptr; } RP9bDec: --rp9 is rp9 { rp9 = rp9 - 1; ptr:4 = rp9; export ptr; } RPhDec: --rp9 is rp9 { rp9 = rp9 - 2; ptr:4 = rp9; export ptr; } RPwDec: --rp9 is rp9 { rp9 = rp9 - 4; ptr:4 = rp9; export ptr; } RPdDec: --rp9 is rp9 { rp9 = rp9 - 8; ptr:4 = rp9; export ptr; } RPwDec0: --rp0 is rp0 { rp0 = rp0 - 4; ptr:4 = rp0; export ptr; } RPdDec0: --rp0 is rp0 { rp0 = rp0 - 8; ptr:4 = rp0; export ptr; } RPbDisp3: rp9[disp4_3] is rp9 & disp4_3 { ptr:4 = rp9 + disp4_3; export ptr; } RPhDisp3: rp9[disp] is rp9 & disp4_3 [ disp = disp4_3 << 1; ] { ptr:4 = rp9 + disp; export ptr; } RPwDisp4: rp9[disp] is rp9 & disp4_4 [ disp = disp4_4 << 2; ] { ptr:4 = rp9 + disp; export ptr; } RPwDisp5: rp9[disp] is rp9 & disp4_5 [ disp = disp4_5 << 2; ] { ptr:4 = rp9 + disp; export ptr; } RPwDisp8: rp0[disp] is rp0; disp_8 [ disp = disp_8 << 2; ] { ptr:4 = rp0 + disp; export ptr; } RPbDisp9: rp9[disp_9] is rp9; disp_9 { ptr:4 = rp9 + disp_9; export ptr; } RPhDisp9: rp9[disp] is rp9; disp_9 [ disp = disp_9 << 1; ] { ptr:4 = rp9 + disp; export ptr; } RPwDisp9: rp9[disp] is rp9; disp_9 [ disp = disp_9 << 2; ] { ptr:4 = rp9 + disp; export ptr; } RPwDisp12: rp0[disp] is rp0; disp12_4 & disp_8 [ disp = ((disp12_4 << 8) | disp_8) << 2; ] { ptr:4 = rp0 + disp; export ptr; } PCDisp16: loc is disp_16 [ loc = inst_start + disp_16; ] { export *[const]:4 loc; } RPDisp16: rp9[disp_16] is rp9; disp_16 { ptr:4 = rp9 + disp_16; export ptr; } RPDisp16: PC[disp_16] is rp9=15 & PC; disp_16 & PCDisp16 { export PCDisp16; } RB9Shift: rb9[ri0 "<<" shift4_2] is rb9 & ri0; shift4_2 { ptr:4 = rb9 + (ri0 << shift4_2); export ptr; } RBShift0: rb0[eri0 "<<" shift4_2] is rb0; eri0 & shift4_2 { ptr:4 = rb0 + (eri0 << shift4_2); export ptr; } RBSelector: rb9[ri0" << 2]" is rb9 & ri0; selectorxy4_2=0x0 { ptr:4 = rb9 + ((ri0 & 0xff) << 0x02); export ptr; } RBSelector: rb9[ri0" << 2]" is rb9 & ri0; selectorxy4_2=0x1 { ptr:4 = rb9 + (((ri0 >> 8) & 0xff) << 0x02); export ptr; } RBSelector: rb9[ri0" << 2]" is rb9 & ri0; selectorxy4_2=0x2 { ptr:4 = rb9 + (((ri0 >> 16) & 0xff) << 0x02); export ptr; } RBSelector: rb9[ri0" << 2]" is rb9 & ri0; selectorxy4_2=0x3 { ptr:4 = rb9 + (((ri0 >> 24) & 0xff) << 0x02); export ptr; } RS0A: rs0 is rs0 { export rs0; } RS0A: rs0 is rs0 & rs0=0xf { export *[const]:4 inst_start; } RS9A: rs9 is rs9 { export rs9; } RS9A: rs9 is rs9 & rs9=0xf { export *[const]:4 inst_start; } RX9A: rx9 is rx9 { export rx9; } RX9A: rx9 is rx9 & rx9=0xf { export *[const]:4 inst_start; } RY0A: ry0 is ry0 { export ry0; } RY0A: ry0 is ry0 & ry0=0xf { export *[const]:4 inst_start; } RD0A: rd0 is rd0 { export rd0; } RD0A: rd0 is rd0 & rd0=0xf { export *[const]:4 inst_start; } macro ZSTATUS(RES) { Z = RES == 0; CZNVTOSR(); } macro NZSTATUS(RES) { N = RES s< 0; ZSTATUS(RES); CZNVTOSR(); } macro addflags(OP1, OP2, RES) { ## The REAL way to do it (in the processor spec) #V = (OP1[31,1] && OP2[31,1] && !(RES[31,1])) || # (!(OP1[31,1]) && !(OP2[31,1]) && RES[31,1]); V = scarry(OP1, OP2); NZSTATUS(RES); ## The REAL way to do it (in the processor spec) #C = (OP1[31,1] && OP2[31,1]) || # (OP1[31,1] && !(RES[31,1])) || # (OP2[31,1] && !(RES[31,1])); C = carry(OP1, OP2); CZNVTOSR(); } macro subflags(OP1, OP2, RES) { ## The REAL way to do it (in the processor spec) #V = (OP1[31,1] && !(OP2[31,1]) && !(RES[31,1])) || # (!(OP1[31,1]) && OP2[31,1] && RES[31,1]); V = sborrow(OP1, OP2); NZSTATUS(RES); ## The REAL way to do it (in the processor spec) #C = (!(OP1[31,1]) && (OP2[31,1])) || # (OP2[31,1] && RES[31,1]) || # (!(OP1[31,1]) && RES[31,1]); C = OP1 < OP2; CZNVTOSR(); } @include "avr32a_arithmetic_operations.sinc" @include "avr32a_multiplication_operations.sinc" @include "avr32a_logic_operations.sinc" @include "avr32a_bit_operations.sinc" @include "avr32a_shift_operations.sinc" @include "avr32a_data_transfer.sinc" @include "avr32a_system_control.sinc" @include "avr32a_coprocessor_interface.sinc" @include "avr32a_instruction_flow.sinc" @include "avr32a_simd_operations.sinc" @include "avr32a_dsp_operations2.sinc" ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_arithmetic_operations.sinc ================================================ #--------------------------------------------------------------------- # 8.3.2 Arithmetic Operations #--------------------------------------------------------------------- macro cpcflags(OP1, OP2, RES) { ## The REAL way to do it (in the processor spec) #V = (OP1[31,1] && !(OP2[31,1]) && !(RES[31,1])) || # (!(OP1[31,1]) && OP2[31,1] && RES[31,1]); V = sborrow(OP1, OP2); N = RES s< 0; Z = RES == 0 & Z; ## The REAL way to do it (in the processor spec) C = (!(OP1[31,1]) && (OP2[31,1])) || (OP2[31,1] && RES[31,1]) || (!(OP1[31,1]) && RES[31,1]); #C = OP1 < OP2; CZNVTOSR(); } # DUPLICATE CPCFLAGS TO ACCOUNT FOR OP2 == 0 macro cpcflags0(OP1, RES) { ## The REAL way to do it (in the processor spec) #V = (OP1[31,1] && !(OP2[31,1]) && !(RES[31,1])) || # (!(OP1[31,1]) && OP2[31,1] && RES[31,1]); V = sborrow(OP1, 0); N = RES s< 0; Z = RES == 0 & Z; ## The REAL way to do it (in the processor spec) #C = (!(OP1[31,1]) && (OP2[31,1])) || # (OP2[31,1] && RES[31,1]) || # (!(OP1[31,1]) && RES[31,1]); C = 0; CZNVTOSR(); } macro acrflags(OP1, tmpC, RES) { ## The REAL way to do it (in the processor spec) # V = RES[31,1] && !Rd[31,1]; V = scarry(OP1, tmpC); N = RES s< 0; Z = ((RES == 0) && Z); ## The REAL way to do it (in the processor spec) # C = RES[31,1] && Rd[31,1]; C = carry(OP1, tmpC); CZNVTOSR(); } #--------------------------------------------------------------------- # ABS - Absolute Value # I. {d, s} -> {0, 1, ..., 15} #--------------------------------------------------------------------- #ABS Format I # Operation: Rd <- abs(Rd); # Syntax: abs Rd # 010 1110 00100 dddd (Opcode Form) # 0101 1100 0100 dddd (Byte half Form) :ABS rd0 is rd0 & op13_3=0x2 & op9_4=0xe & op4_5=0x4 { local ztst:1 = rd0 s< 0; rd0 = (zext(!ztst)*rd0) + (zext(ztst)*(-rd0)); ZSTATUS(rd0); } #--------------------------------------------------------------------- # ACR - Add carry to register # I. {d, s} -> {0, 1, ..., 15} #--------------------------------------------------------------------- #ACR Format I # Operation: Rd <- Rd + C; # Syntax: acr Rd # 010 1110 00000 dddd (Opcode Form) # 0101 1100 0000 dddd (Byte half Form) :ACR rd0 is rd0 & op13_3=0x2 & op9_4=0xe & op4_5=0x0 { tmpRd0:4 = rd0; tmpC:4 = zext(C); rd0 = tmpRd0 + tmpC; acrflags(tmpRd0, tmpC, rd0); } :ACR rd0 is rd0 & op13_3=0x2 & op9_4=0xe & op4_5=0x0 & rd0=0xf { tmpRd0:4 = inst_start; tmpC:4 = zext(C); PC = tmpRd0 + tmpC; acrflags(tmpRd0, tmpC, PC); goto [PC]; } #--------------------------------------------------------------------- # ADC - Add with Carry # I. {d, x, y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- #ADC Format I # Operation: Rd <- Rx + Ry + C; # Syntax: adc Rd, Rx, Ry # 111 xxxx 00000 yyyy 0000 00000100 dddd (Opcode Form) # 111x xxx0 0000 yyyy 0000 0000 0100 dddd (Byte half Form) :ADC erd0, RX9A, RY0A is op13_3=0x7 & RX9A & op4_5=0x0 & RY0A; eop12_4=0x0 & eop4_8=0x4 & erd0 { erd0 = RX9A + RY0A + zext(C); } :ADC erd0, RX9A, RY0A is op13_3=0x7 & RX9A & op4_5=0x0 & RY0A; eop12_4=0x0 & eop4_8=0x4 & erd0 & erd0=0xf{ PC = RX9A + RY0A + zext(C); goto [PC]; } #--------------------------------------------------------------------- # ADD - Add without Carry # I. {d, s} -> {0, 1, ..., 15} # II. {d, x, y} -> {0, 1, ..., 15} # sa -> {0, 1, 2, 3} #--------------------------------------------------------------------- # ADD Format I # Operation: Rd <- Rd + Rs # Syntax: add Rd, Rs # 000s sss0 0000 dddd :ADD rd0, RS9A is op13_3=0x0 & op4_5=0x0 & rd0 & RS9A { tmpRd0:4 = rd0; rd0 = RS9A + tmpRd0; addflags(tmpRd0, RS9A, rd0); } :ADD PC, RS9A is op13_3=0x0 & op4_5=0x0 & rd0 & RS9A & rs0=0xf & PC { tmpRd0:4 = inst_start; PC = RS9A + tmpRd0; addflags(tmpRd0, RS9A, PC); goto [PC]; } # ADD Format II # Operation: Rd <- Rx + Ry << sa2 # Syntax: add Rd, Rx, Ry << sa # 111x xxx0 0000 yyyy 0000 0000 00tt dddd :ADD erd0, RX9A, RY0A^" << " shift4_2 is op13_3=7 & op4_5=0 & RX9A & RY0A; eop6_10=0 & erd0 & shift4_2 { tmp:4 = RY0A << shift4_2; erd0 = RX9A + tmp; addflags(RX9A, tmp, erd0); } :ADD erd0, RX9A, RY0A^" << " shift4_2 is op13_3=7 & op4_5=0 & RX9A & RY0A; eop6_10=0 & erd0 & shift4_2 & ers0=0xf { tmp:4 = RY0A << shift4_2; PC = RX9A + tmp; addflags(RX9A, tmp, PC); goto [PC]; } #--------------------------------------------------------------------- # ADD{cond4} - Conditional Add # I. cond4 -> {eq, ne, cc/hs, cs/lo, ge, lt, mi, pl, ls, gt, le, hi, vs, vc, qs, al} # {d,x,y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # ADD{cond4} Format I # Operation: if(cond4) then # Rd <- Rx + Ry # Syntax: add{cond4} Rd, Rx, Ry # 111x xxx1 1101 yyyy 1110 cccc 0000 dddd :ADD^{ECOND_8_4} erd0, RX9A, RY0A is (op13_3=0x7 & op4_5=0x1d & RX9A & RY0A; eop12_4=0xe & eop4_4=0 & ECOND_8_4 & erd0) { build ECOND_8_4; erd0 = RX9A + RY0A; } :ADD^{ECOND_8_4} erd0, RX9A, RY0A is (op13_3=0x7 & op4_5=0x1d & RX9A & RY0A; eop12_4=0xe & eop4_4=0 & ECOND_8_4 & erd0 & erd0=0xf) { build ECOND_8_4; PC = RX9A + RY0A; goto [PC]; } #--------------------------------------------------------------------- # ADDABS - Add Absolute Value # I. {d,x,y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # ADDABS Format I # Operation: Rd <- Rx + |Ry| # Syntax: addabs Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1110 0100 dddd :ADDABS erd0, RX9A, RY0A is (op13_3=7 & op4_5=0 & RX9A & RY0A; eop4_12=0xe4 & erd0) { local ztst:1 = RY0A s< 0; local ary0:4 = (zext(!ztst)*RY0A) + (zext(ztst)*(-RY0A)); erd0 = RX9A + ary0; ZSTATUS(erd0); } :ADDABS erd0, RX9A, RY0A is (op13_3=7 & op4_5=0 & RX9A & RY0A; eop4_12=0xe4 & erd0 & erd0=0xf) { local ztst:1 = RY0A s< 0; local ary0:4 = (zext(!ztst)*RY0A) + (zext(ztst)*(-RY0A)); PC = RX9A + ary0; ZSTATUS(PC); goto [PC]; } #--------------------------------------------------------------------- # CP.B - Compare Byte # I. {d, s} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # CP.B Format I # Operation: Rd[7:0] - Rs[7:0] # Syntax: cp.b Rd, Rs # 111s sss0 0000 dddd 0001 1000 0000 0000 :CP.B RD0A, RS9A is op13_3=7 & op4_5=0 & RD0A & RS9A; eop0_16=0x1800 { tmp:1 = RD0A:1 - RS9A:1; subflags(RD0A:1, RS9A:1, tmp); } #--------------------------------------------------------------------- # CP.H - Compare Halfword # I. {d, s} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # CP.H Format I # Operation: Rd[15:0] - Rs[15:0] # Syntax: cp.h Rd, Rs # 111s sss0 0000 dddd 0001 1001 0000 0000 :CP.H RD0A, RS9A is op13_3=7 & op4_5=0 & RD0A & RS9A; eop0_16=0x1900 { tmp:2 = RD0A:2 - RS9A:2; subflags(RD0A:2, RS9A:2, tmp); } #--------------------------------------------------------------------- # CP.W - Compare Word # I. {d, s} -> {0, 1, ..., 15} # II. d -> {0, 1, ..., 15} # imm -> {-32, -31, ..., 31} # III. d -> {0, 1, ..., 15} # imm -> {-1048576, -1048575, ..., 1048575} #--------------------------------------------------------------------- # CP.W Format I # Operation: Rd - Rs # Syntax: cp.w Rd, Rs # 000s sss0 0011 dddd :CP.W RD0A, RS9A is op13_3=0x0 & op4_5=0x3 & RD0A & RS9A { tmp:4 = RD0A - RS9A; subflags(RD0A, RS9A, tmp); } # CP.W Format II # Operation: Rd - SE(imm6) # Syntax: cp.w Rd, imm # 0101 10ii iiii dddd :CP.W RD0A, imm4_6 is op10_6=0x16 & imm4_6 & RD0A { tmp:4 = RD0A - imm4_6; subflags(RD0A, imm4_6, tmp); } # CP.W Format III # Operation: Rd - SE(imm21) # Syntax: cp.w Rd, imm # 111i iii0 010i dddd iiii iiii iiii iiii :CP.W RD0A, imm is op13_3=0x7 & op5_4=0x2 & imm9_4 & imm4_1 & RD0A; imm16 [ imm = (imm9_4 << 17) | (imm4_1 << 16) | imm16; ] { tmp:4 = RD0A - imm; subflags(RD0A, imm, tmp); } #--------------------------------------------------------------------- # CPC - Compare with Carry # I. {d, s} -> {0, 1, ..., 15} # II. d -> {0, 1, ..., 15} #--------------------------------------------------------------------- # CPC Format I # Operation: Rd - Rs - C # Syntax: cpc Rd, Rs # 111s sss0 0000 dddd 0001 0011 0000 0000 :CPC RD0A, RS9A is op13_3=0x7 & op4_5=0x0 & RD0A & RS9A ; eop0_16=0x1300 { temp:4 = RD0A - RS9A - zext(C); cpcflags(RD0A, RS9A, temp); } # CPC Format II # Operation: Rd - C # Syntax: cpc Rd # 0101 1100 0010 dddd :CPC RD0A is op4_12=0x5c2 & RD0A { temp:4 = RD0A - zext(C); cpcflags0(RD0A, temp); } #--------------------------------------------------------------------- # MAX - Return Maximum Value # I. {d,x,y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # MAX Format I # Operation: if Rx > Ry # Rd <- Rx; # else # Rd <- Ry; # Syntax: max Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1100 0100 dddd :MAX erd0, RX9A, RY0A is op13_3=0x7 & op4_5=0x0 & RX9A & RY0A ; eop4_12=0xc4 & erd0 { rxgt:4 = zext(RX9A s> RY0A); rxle:4 = zext(RX9A s<= RY0A); erd0 = RX9A * rxgt + RY0A * rxle; } :MAX erd0, RX9A, RY0A is op13_3=0x7 & op4_5=0x0 & RX9A & RY0A ; eop4_12=0xc4 & erd0 & erd0=0xf { rxgt:4 = zext(RX9A s> RY0A); rxle:4 = zext(RX9A s<= RY0A); PC = RX9A * rxgt + RY0A * rxle; goto [PC]; } #--------------------------------------------------------------------- # MIN - Return Minimum Value # I. {d,x,y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # MIN Format I # Operation: if Rx < Ry # Rd <- Rx; # else # Rd <- Ry; # Syntax: min Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1101 0100 dddd :MIN erd0, RX9A, RY0A is op13_3=0x7 & op4_5=0x0 & RX9A & RY0A ; eop4_12=0xd4 & erd0 { rxlt:4 = zext(RX9A s< RY0A); rxge:4 = zext(RX9A s>= RY0A); erd0 = RX9A * rxlt + RY0A * rxge; } :MIN erd0, RX9A, RY0A is op13_3=0x7 & op4_5=0x0 & RX9A & RY0A ; eop4_12=0xd4 & erd0 & erd0=0xf { rxlt:4 = zext(RX9A s< RY0A); rxge:4 = zext(RX9A s>= RY0A); PC = RX9A * rxlt + RY0A * rxge; goto [PC]; } #--------------------------------------------------------------------- # NEG - Compare Word # I. {d, s} -> {0, 1, ..., 15} # II. d -> {0, 1, ..., 15} # imm -> {-32, -31, ..., 31} # III. d -> {0, 1, ..., 15} # imm -> {-1048576, -1048575, ..., 1048575} #--------------------------------------------------------------------- # NEG Format I # Operation: Rd <- 0 - Rd # Syntax: neg Rd # 0101 1100 0011 dddd :NEG rd0 is op4_12=0x5c3 & rd0 { save:4 = rd0; rd0 = -rd0; subflags(0, save, rd0); } :NEG rd0 is op4_12=0x5c3 & rd0 & rd0=0xf { tmp:4 = inst_start; PC = -tmp; subflags(0, tmp, PC); goto [PC]; } #--------------------------------------------------------------------- # RSUB - Reverse Subtract # I. {d, s} -> {0, 1, ..., 15} # II. {d, s} -> {0, 1, ..., 15} # imm -> {-128, -127, ..., 127} #--------------------------------------------------------------------- # RSUB Format I # Operation: Rd <- Rs - Rd # Syntax: rsub Rd, Rs # 000s sss0 0010 dddd :RSUB rd0, RS9A is op13_3=0x0 & op4_5=2 & rd0 & RS9A { save:4 = rd0; rd0 = RS9A - rd0; subflags(RS9A, save, rd0); } :RSUB rd0, RS9A is op13_3=0x0 & op4_5=2 & rd0 & RS9A & rd0=0xf { PC = RS9A - inst_start; subflags(RS9A, inst_start, PC); goto [PC]; } # RSUB Format II # Operation: Rd <- SE(imm8) - Rs # Syntax: rsub Rd, Rs, imm # 111s sss0 0000 dddd 0001 0001 iiii iiii :RSUB rd0, RS9A, simm0_8 is op13_3=7 & op4_5=0 & rd0 & RS9A; eop12_4=1 & eop8_4=1 & simm0_8 { save:4 = simm0_8; tmp:4 = RS9A; rd0 = save - RS9A; subflags(save, tmp, rd0); } :RSUB rd0, RS9A, simm0_8 is op13_3=7 & op4_5=0 & rd0 & RS9A & rd0=0xf; eop12_4=1 & eop8_4=1 & simm0_8 { save:4 = simm0_8; tmp:4 = RS9A; PC = save - RS9A; subflags(save, tmp, PC); goto [PC]; } # RSUB Format II # Operation: Rd <- SE(imm8) - Rs # Syntax: rsub Rd, Rs, imm # 111s sss0 0000 dddd 0001 0001 iiii iiii #Handles Special Case where Immediate = -0x1 and treat it as a ~ (negate) :RSUB rd0, RS9A, simm0_8 is op13_3=7 & op4_5=0 & rd0 & RS9A; eop12_4=1 & eop8_4=1 & simm0_8 & imm0_8=0xff { rd0 = ~RS9A; subflags(-1:4, RS9A, rd0); } :RSUB rd0, RS9A, simm0_8 is op13_3=7 & op4_5=0 & rd0 & RS9A & rd0=0xf; eop12_4=1 & eop8_4=1 & simm0_8 & imm0_8=0xff { PC = ~RS9A; subflags(-1:4, RS9A, PC); goto [PC]; } #--------------------------------------------------------------------- # RSUB{cond4} - Conditional Move Register # I. d -> {0, 1, ..., 15} # cond4 -> {eq, ne, cc/hs, cs/lo, ge, lt, mi, pl, ls, gt, le, hi, vs, vc, qs, al} # imm -> {-128, -127, ..., 127} #--------------------------------------------------------------------- # RSUB{cond4} Format I # Operation: if (cond4) # Rd <- SE(imm8) - Rd # Syntax: rsub{cond4} Rd, imm # 1111 1011 1011 dddd 0000 cccc iiii iiii :RSUB^{ECOND_8_4} rd0, simm0_8 is op4_12=0xfbb & rd0 ; eop12_4=0 & simm0_8 & ECOND_8_4 { build ECOND_8_4; rd0 = simm0_8 - rd0; } :RSUB^{ECOND_8_4} rd0, simm0_8 is op4_12=0xfbb & rd0 & rd0=0xf; eop12_4=0 & simm0_8 & ECOND_8_4 { build ECOND_8_4; PC = simm0_8 - inst_start; goto [PC]; } #--------------------------------------------------------------------- # SBC - Subtract with Carry # I. {d,x,y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # SBC Format I # Operation: Rd <- Rx - Ry - C # Syntax: sbc Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0001 0100 dddd :SBC erd0, RX9A, RY0A is op13_3=0x7 & op4_5=0x0 & RX9A & RY0A ; eop4_12=0x14 & erd0 { tmpx:4 = RX9A; tmpy:4 = RY0A; erd0 = RX9A - RY0A - zext(C); cpcflags(tmpx, tmpy, erd0); } :SBC erd0, RX9A, RY0A is op13_3=0x7 & op4_5=0x0 & RX9A & RY0A ; eop4_12=0x14 & erd0 & erd0=0xf { tmpx:4 = RX9A; tmpy:4 = RY0A; PC = RX9A - RY0A - zext(C); cpcflags(tmpx, tmpy, PC); goto [PC]; } #--------------------------------------------------------------------- # SCR - Subtract Carry from Register # I. d -> {0, 1, ..., 15} #--------------------------------------------------------------------- # SCR Format I # Operation: Rd <- Rd - C # Syntax: scr Rd # 0101 1100 0001 dddd :SCR rd0 is op4_12=0x5c1 & rd0 { save:4 = rd0; rd0 = rd0 - zext(C); V = (save s< 0) && (rd0 s>= 0); N = rd0 s< 0; Z = (rd0 == 0) && Z; C = (save s>= 0) && (rd0 s< 0); CZNVTOSR(); } :SCR rd0 is op4_12=0x5c1 & rd0 & rd0=0xf { tmp:4 = inst_start; PC = tmp - zext(C); V = (tmp s< 0) && (PC s>= 0); N = PC s< 0; Z = (PC == 0) && Z; C = (tmp s>= 0) && (PC s< 0); CZNVTOSR(); goto [PC]; } #--------------------------------------------------------------------- # SUB - Subtract (without Carry) #--------------------------------------------------------------------- # SUB Format I # 000s sss0 0001 dddd :SUB rd0, RS9A is op13_3=0 & op4_5=1 & rd0 & RS9A { save:4 = rd0; tmp:4 = RS9A; rd0 = rd0 - RS9A; subflags(save, tmp, rd0); } :SUB rd0, RS9A is op13_3=0 & op4_5=1 & rd0 & RS9A & rd0=0xf { tmp:4 = RS9A; PC = inst_start - RS9A; subflags(inst_start, tmp, rd0); goto [PC]; } # SUB Format II # 111x xxx0 0000 yyyy 0000 0001 00tt dddd :SUB erd0, RX9A, RY0A^" << " shift4_2 is op13_3=7 & op4_5=0 & RY0A & RX9A; eop6_10=4 & shift4_2 & erd0 { save:4 = RX9A; val:4 = RY0A << shift4_2; erd0 = RX9A - val; subflags(save, val, erd0); } :SUB erd0, RX9A, RY0A^" << " shift4_2 is op13_3=7 & op4_5=0 & RY0A & RX9A; eop6_10=4 & shift4_2 & erd0 & erd0=0xf { save:4 = RX9A; val:4 = RY0A << shift4_2; PC = RX9A - val; subflags(save, val, PC); goto [PC]; } # SUB Format III # 0010 iiii iiii dddd :SUB rd0, imm is op13_3=0x1 & op12_1=0 & imm4_8 & rd0 & b003=0xd [ imm = imm4_8 << 2; ] { save:4 = rd0; rd0 = rd0 - imm; subflags(save, imm, rd0); } :SUB rd0, imm4_8 is op13_3=0x1 & op12_1=0 & imm4_8 & rd0 { save:4 = rd0; rd0 = rd0 - imm4_8; subflags(save, imm4_8, rd0); } :SUB rd0, imm4_8 is op13_3=0x1 & op12_1=0 & imm4_8 & rd0 & rd0=0xf { tmp:4 = inst_start; PC = tmp - imm4_8; subflags(tmp, imm4_8, PC); goto [PC]; } # SUB Format IV # 111i iii0 001i dddd iiii iiii iiii iiii :SUB rd0, imm is op13_3=0x7 & op5_4=0x1 & imm9_4 & imm4_1 & rd0; imm16 [ imm = (imm9_4 << 17) | (imm4_1 << 16) | imm16; ] { save:4 = rd0; rd0 = rd0 - imm; subflags(save, imm, rd0); } :SUB rd0, imm is op13_3=0x7 & op5_4=0x1 & imm9_4 & imm4_1 & rd0 & rd0=0xf; imm16 [ imm = (imm9_4 << 17) | (imm4_1 << 16) | imm16; ] { tmp:4 = inst_start; PC = tmp - imm; subflags(tmp, imm, PC); goto [PC]; } # SUB Format V # 111s sss0 1100 dddd iiii iiii iiii iiii :SUB rd0, RS9A, simm16 is op13_3=0x7 & op4_5=0xc & RS9A & rd0; simm16 { save:4 = RS9A; rd0 = RS9A - simm16; subflags(save, simm16, rd0); } :SUB rd0, RS9A, simm16 is op13_3=0x7 & op4_5=0xc & RS9A & rd0 & rd0=0xf; simm16 { save:4 = RS9A; PC = RS9A - simm16; subflags(save, simm16, PC); goto [PC]; } #--------------------------------------------------------------------- # SUB{cond4} - Conditional Subtract # I. cond4 -> {eq, ne, cc/hs, cs/lo, ge, lt, mi, pl, ls, gt, le, hi, vs, vc, qs, al} # d -> {0, 1, ..., 15} # imm -> {-128, -127, ..., 127} # II. cond4 -> {eq, ne, cc/hs, cs/lo, ge, lt, mi, pl, ls, gt, le, hi, vs, vc, qs, al} # {d,x,y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # SUB{cond4} Format I # Operation: if(cond4) then # Rd <- Rd - imm8 # Update flags if opcode[f] field is set # Syntax: sub{f}{cond4} Rd, imm # 1111 01f1 1011 dddd 0000 cccc iiii iiii F: "{F}" is b09=1 & rd0; simm0_8 { pre:4 = rd0 + simm0_8; subflags(pre, simm0_8, rd0); } F: is b09=0 & rd0; simm0_8 { } :SUB^F^{ECOND_8_4} rd0, simm0_8 is (op10_6=0x3d & op4_5=0x1b & rd0 ; eop12_4=0 & simm0_8 & ECOND_8_4) & F { build ECOND_8_4; rd0 = rd0 - simm0_8; build F; } :SUB^F^{ECOND_8_4} rd0, simm0_8 is (op10_6=0x3d & op4_5=0x1b & rd0 & rd0=0xf; eop12_4=0 & simm0_8 & ECOND_8_4) & F { build ECOND_8_4; PC = inst_start - simm0_8; build F; goto [PC]; } # SUB{cond4} Format II # Operation: if(cond4) then # Rd <- Rx - Ry # Syntax: sub{cond4} Rd, Rx, Ry # 111x xxx1 1101 yyyy 1110 cccc 0001 dddd :SUB^{ECOND_8_4} erd0, RX9A, RY0A is (op13_3=0x7 & op4_5=0x1d & RX9A & RY0A; eop12_4=0xe & eop4_4=1 & ECOND_8_4 & erd0) { build ECOND_8_4; erd0 = RX9A - RY0A; } :SUB^{ECOND_8_4} erd0, RX9A, RY0A is (op13_3=0x7 & op4_5=0x1d & RX9A & RY0A; eop12_4=0xe & eop4_4=1 & ECOND_8_4 & erd0 & erd0=0xf) { build ECOND_8_4; PC = RX9A - RY0A; goto [PC]; } #--------------------------------------------------------------------- # TNBZ - Test if No Byte is Equal to Zero # I. d -> {0, 1, ..., 15} #--------------------------------------------------------------------- # TNBZ Format I # Operation: if (Rd[31:24] == 0 | # Rd[23:16] == 0 | # Rd[23:16] == 0 | # Rd[23:16] == 0) # Z <- 1; # else # Z <- 0; # Syntax: tnbz Rd # 0101 1100 1110 dddd :TNBZ RD0A is op4_12=0x5ce & RD0A { Z = ((RD0A & 0xff000000) == 0 || (RD0A & 0x00ff0000) == 0 || (RD0A & 0x0000ff00) == 0 || (RD0A & 0x000000ff) == 0); } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_autogen.sinc ================================================ define token gen_instr1(16) g_op13_3 = (13,15) g_op12_4 = (12,15) g_regsel11_1 = (11,11) g_op11_5 = (11,15) g_op10_6 = (10,15) g_regsel10_1 = (10,10) g_op9_7 = (9,15) g_rx9_4 = (9,12) g_opcode9_2 = (9,10) g_rs9_4 = (9,12) g_regsel9_1 = (9,9) g_offset9_4 = (9,12) g_disp9_4 = (9,12) g_rp9_4 = (9,12) g_update9_1 = (9,9) g_rb9_4 = (9,12) g_imm9_4 = (9,12) g_rd9_4 = (9,12) g_shift9_4 = (9,12) g_op8_1 = (8,8) g_op8_8 = (8,15) g_regsel8_1 = (8,8) g_op7_9 = (7,15) g_op7_2 = (7,8) g_regsel7_1 = (7,7) g_regsel6_1 = (6,6) g_regsel5_1 = (5,5) g_op5_4 = (5,8) g_imm4_1 = (4,4) g_disp4_5 = (4,8) g_imm4_6 = (4,9) g_shift4_1 = (4,4) g_imm4_8 = (4,11) g_disp4_1 = (4,4) g_offset4_1 = (4,4) g_disp4_4 = (4,7) g_op4_12 = (4,15) g_disp4_8 = (4,11) g_disp4_3 = (4,6) g_regsel4_1 = (4,4) g_imm4_3 = (4,6) g_op4_5 = (4,8) g_offset4_5 = (4,8) g_cond4_4 = (4,7) g_disp4_7 = (4,10) g_op3_1 = (3,3) g_op3_6 = (3,8) g_returnflag3_1 = (3,3) g_op2_2 = (2,3) g_rd1_3 = (1,3) g_rs1_3 = (1,3) g_ri0_4 = (0,3) g_cond0_4 = (0,3) g_shift0_4 = (0,3) g_op0_16 = (0,15) g_op0_9 = (0,8) g_op0_1 = (0,0) g_offset0_4 = (0,3) g_rs0_4 = (0,3) g_cond0_3 = (0,2) g_op0_3 = (0,2) g_opcode0_4 = (0,3) g_rd0_4 = (0,3) g_shift0_3 = (0,2) g_rp0_4 = (0,3) g_ry0_4 = (0,3) g_disp0_2 = (0,1) g_op0_4 = (0,3) ; define token gen_instr2(16) eg_regsel15_1 = (15,15) eg_offset15_1 = (15,15) eg_regsel14_1 = (14,14) eg_op14_2 = (14,15) eg_op13_3 = (13,15) eg_xpart13_1 = (13,13) eg_cop13_3 = (13,15) eg_regsel13_1 = (13,13) eg_disp12_4 = (12,15) eg_op12_4 = (12,15) eg_regsel12_1 = (12,12) eg_op12_1 = (12,12) eg_update12_1 = (12,12) eg_ypart12_1 = (12,12) eg_cond12_4 = (12,15) eg_opcode12_1 = (12,12) eg_opcode11_5 = (11,15) eg_regsel11_1 = (11,11) eg_regsel10_1 = (10,10) eg_op10_6 = (10,15) eg_regsel9_1 = (9,9) eg_op9_7 = (9,15) eg_rd9_3 = (9,11) eg_rs9_3 = (9,11) eg_op9_3 = (9,11) eg_op8_1 = (8,8) eg_op8_4 = (8,11) eg_rd8_4 = (8,11) eg_op8_8 = (8,15) eg_cond8_4 = (8,11) eg_ri8_4 = (8,11) eg_regsel8_1 = (8,8) eg_rs8_4 = (8,11) eg_regsel7_1 = (7,7) eg_op6_3 = (6,8) eg_regsel6_1 = (6,6) eg_op6_2 = (6,7) eg_op6_10 = (6,15) eg_op5_11 = (5,15) eg_regsel5_1 = (5,5) eg_offset5_5 = (5,9) eg_xpart5_1 = (5,5) eg_regsel4_1 = (4,4) eg_rx4_4 = (4,7) eg_disp4_8 = (4,11) eg_ypart4_1 = (4,4) eg_shift4_5 = (4,8) eg_cond4_4 = (4,7) eg_returnflag4_1 = (4,4) eg_op4_12 = (4,15) eg_op4_4 = (4,7) eg_shift4_2 = (4,5) eg_regsel3_1 = (3,3) eg_regsel2_1 = (2,2) eg_regsel1_1 = (1,1) eg_ri0_4 = (0,3) eg_opcode0_8 = (0,7) eg_rd0_4 = (0,3) eg_imm0_8 = (0,7) eg_shift0_5 = (0,4) eg_regsel0_1 = (0,0) eg_disp0_16 = (0,15) eg_disp0_12 = (0,11) eg_disp0_8 = (0,7) eg_bits0_5 = (0,4) eg_disp0_9 = (0,8) eg_imm0_15 = (0,14) eg_rs0_4 = (0,3) eg_rb0_4 = (0,3) eg_offset0_5 = (0,4) eg_op0_8 = (0,7) eg_imm0_16 = (0,15) eg_op0_9 = (0,8) eg_ry0_4 = (0,3) eg_op0_16 = (0,15) eg_dbgregaddr0_8 = (0,7) eg_disp0_11 = (0,10) eg_rp0_4 = (0,3) eg_op0_4 = (0,3) ; #:ABS g_rd0_4 is g_op4_12=0x5c4 & g_rd0_4 unimpl #:ACALL g_disp4_8 is g_op12_4=0xd & g_disp4_8 & g_op0_4=0x0 unimpl #:ACR g_rd0_4 is g_op4_12=0x5c0 & g_rd0_4 unimpl #:ADC g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x4 & eg_rd0_4 unimpl #:ADD g_rs9_4, g_rd0_4 is g_op13_3=0x0 & g_rs9_4 & g_op4_5=0x0 & g_rd0_4 unimpl #:ADD g_rx9_4, g_ry0_4, eg_shift4_2, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x0 & eg_shift4_2 & eg_rd0_4 unimpl #:ADD_COND g_rx9_4, g_ry0_4, eg_cond8_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1d & g_ry0_4 ; eg_op12_4=0xe & eg_cond8_4 & eg_op4_4=0x0 & eg_rd0_4 unimpl #:ADDABS g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0xe4 & eg_rd0_4 unimpl #:ADDHH.W g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x38 & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:AND g_rs9_4, g_rd0_4 is g_op13_3=0x0 & g_rs9_4 & g_op4_5=0x6 & g_rd0_4 unimpl #:AND g_rx9_4, g_ry0_4, eg_shift4_5, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1e & g_ry0_4 ; eg_op9_7=0x0 & eg_shift4_5 & eg_rd0_4 unimpl #:AND g_rx9_4, g_ry0_4, eg_shift4_5, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1e & g_ry0_4 ; eg_op9_7=0x1 & eg_shift4_5 & eg_rd0_4 unimpl #:AND_COND g_rx9_4, g_ry0_4, eg_cond8_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1d & g_ry0_4 ; eg_op12_4=0xe & eg_cond8_4 & eg_op4_4=0x2 & eg_rd0_4 unimpl #:ANDH g_update9_1, g_rd0_4, eg_imm0_16 is g_op10_6=0x39 & g_update9_1 & g_op4_5=0x1 & g_rd0_4 ; eg_imm0_16 unimpl #:ANDL g_update9_1, g_rd0_4, eg_imm0_16 is g_op10_6=0x38 & g_update9_1 & g_op4_5=0x1 & g_rd0_4 ; eg_imm0_16 unimpl #:ANDN g_rs9_4, g_rd0_4 is g_op13_3=0x0 & g_rs9_4 & g_op4_5=0x8 & g_rd0_4 unimpl #:ASR g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x84 & eg_rd0_4 unimpl #:ASR g_shift9_4, g_shift4_1, g_rd0_4 is g_op13_3=0x5 & g_shift9_4 & g_op5_4=0xa & g_shift4_1 & g_rd0_4 unimpl #:ASR g_rs9_4, g_rd0_4, eg_shift0_5 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_rd0_4 ; eg_op5_11=0xa0 & eg_shift0_5 unimpl #:BFEXTS g_rd9_4, g_rs0_4, eg_offset5_5, eg_bits0_5 is g_op13_3=0x7 & g_rd9_4 & g_op4_5=0x1d & g_rs0_4 ; eg_op10_6=0x2c & eg_offset5_5 & eg_bits0_5 unimpl #:BFEXTU g_rd9_4, g_rs0_4, eg_offset5_5, eg_bits0_5 is g_op13_3=0x7 & g_rd9_4 & g_op4_5=0x1d & g_rs0_4 ; eg_op10_6=0x30 & eg_offset5_5 & eg_bits0_5 unimpl #:BFINS g_rd9_4, g_rs0_4, eg_offset5_5, eg_bits0_5 is g_op13_3=0x7 & g_rd9_4 & g_op4_5=0x1d & g_rs0_4 ; eg_op10_6=0x34 & eg_offset5_5 & eg_bits0_5 unimpl #:BLD g_rd0_4, eg_offset0_5 is g_op4_12=0xedb & g_rd0_4 ; eg_op5_11=0x0 & eg_offset0_5 unimpl #:BR_COND g_disp4_8, g_cond0_3 is g_op12_4=0xc & g_disp4_8 & g_op3_1=0x0 & g_cond0_3 unimpl #:BR_COND g_disp9_4, g_disp4_1, g_cond0_4, eg_disp0_16 is g_op13_3=0x7 & g_disp9_4 & g_op5_4=0x4 & g_disp4_1 & g_cond0_4 ; eg_disp0_16 unimpl #:BREAKPOINT is g_op0_16=0xd673 unimpl #:BREV g_rd0_4 is g_op4_12=0x5c9 & g_rd0_4 unimpl #:BST g_rd0_4, eg_offset0_5 is g_op4_12=0xefb & g_rd0_4 ; eg_op5_11=0x0 & eg_offset0_5 unimpl #:CACHE g_rp0_4, eg_opcode11_5, eg_disp0_11 is g_op4_12=0xf41 & g_rp0_4 ; eg_opcode11_5 & eg_disp0_11 unimpl #:CASTS.H g_rd0_4 is g_op4_12=0x5c8 & g_rd0_4 unimpl #:CASTS.B g_rd0_4 is g_op4_12=0x5c6 & g_rd0_4 unimpl #:CASTU.H g_rd0_4 is g_op4_12=0x5c7 & g_rd0_4 unimpl #:CASTU.B g_rd0_4 is g_op4_12=0x5c5 & g_rd0_4 unimpl #:CBR g_offset9_4, g_offset4_1, g_rd0_4 is g_op13_3=0x5 & g_offset9_4 & g_op5_4=0xe & g_offset4_1 & g_rd0_4 unimpl #:CLZ g_rs9_4, g_rd0_4 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_rd0_4 ; eg_op0_16=0x1200 unimpl #:COM g_rd0_4 is g_op4_12=0x5cd & g_rd0_4 unimpl #:COP g_opcode9_2, g_opcode0_4, eg_cop13_3, eg_opcode12_1, eg_rd8_4, eg_rx4_4, eg_ry0_4 is g_op11_5=0x1c & g_opcode9_2 & g_op4_5=0x1a & g_opcode0_4 ; eg_cop13_3 & eg_opcode12_1 & eg_rd8_4 & eg_rx4_4 & eg_ry0_4 unimpl #:CP.B g_rs9_4, g_rd0_4 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_rd0_4 ; eg_op0_16=0x1800 unimpl #:CP.H g_rs9_4, g_rd0_4 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_rd0_4 ; eg_op0_16=0x1900 unimpl #:CP.W g_rs9_4, g_rd0_4 is g_op13_3=0x0 & g_rs9_4 & g_op4_5=0x3 & g_rd0_4 unimpl #:CP.W g_imm4_6, g_rd0_4 is g_op10_6=0x16 & g_imm4_6 & g_rd0_4 unimpl #:CP.W g_imm9_4, g_imm4_1, g_rd0_4, eg_imm0_16 is g_op13_3=0x7 & g_imm9_4 & g_op5_4=0x2 & g_imm4_1 & g_rd0_4 ; eg_imm0_16 unimpl #:CPC g_rs9_4, g_rd0_4 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_rd0_4 ; eg_op0_16=0x1300 unimpl #:CPC g_rd0_4 is g_op4_12=0x5c2 & g_rd0_4 unimpl #:CSRF g_offset4_5 is g_op9_7=0x6a & g_offset4_5 & g_op0_4=0x3 unimpl #:CSRFCZ g_offset4_5 is g_op9_7=0x68 & g_offset4_5 & g_op0_4=0x3 unimpl #:DIVS g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0xc0 & eg_rd0_4 unimpl #:DIVU g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0xd0 & eg_rd0_4 unimpl #:EOR g_rs9_4, g_rd0_4 is g_op13_3=0x0 & g_rs9_4 & g_op4_5=0x5 & g_rd0_4 unimpl #:EOR g_rx9_4, g_ry0_4, eg_shift4_5, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1e & g_ry0_4 ; eg_op9_7=0x10 & eg_shift4_5 & eg_rd0_4 unimpl #:EOR g_rx9_4, g_ry0_4, eg_shift4_5, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1e & g_ry0_4 ; eg_op9_7=0x11 & eg_shift4_5 & eg_rd0_4 unimpl #:EOR_COND g_rx9_4, g_ry0_4, eg_cond8_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1d & g_ry0_4 ; eg_op12_4=0xe & eg_cond8_4 & eg_op4_4=0x4 & eg_rd0_4 unimpl #:EORH g_rd0_4, eg_imm0_16 is g_op4_12=0xee1 & g_rd0_4 ; eg_imm0_16 unimpl #:EORL g_rd0_4, eg_imm0_16 is g_op4_12=0xec1 & g_rd0_4 ; eg_imm0_16 unimpl #:FRS is g_op0_16=0xd743 unimpl #:ICALL g_rd0_4 is g_op4_12=0x5d1 & g_rd0_4 unimpl #:INCJOSP g_imm4_3 is g_op7_9=0x1ad & g_imm4_3 & g_op0_4=0x3 unimpl #:LD.D g_rp9_4, g_rd1_3 is g_op13_3=0x5 & g_rp9_4 & g_op4_5=0x10 & g_rd1_3 & g_op0_1=0x1 unimpl #:LD.D g_rp9_4, g_rd1_3 is g_op13_3=0x5 & g_rp9_4 & g_op4_5=0x11 & g_rd1_3 & g_op0_1=0x0 unimpl #:LD.D g_rp9_4, g_rd1_3 is g_op13_3=0x5 & g_rp9_4 & g_op4_5=0x10 & g_rd1_3 & g_op0_1=0x0 unimpl #:LD.D g_rp9_4, g_rd1_3, eg_disp0_16 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0xe & g_rd1_3 & g_op0_1=0x0 ; eg_disp0_16 unimpl #:LD.D g_rb9_4, g_ri0_4, eg_shift4_2, eg_rd0_4 is g_op13_3=0x7 & g_rb9_4 & g_op4_5=0x0 & g_ri0_4 ; eg_op6_10=0x8 & eg_shift4_2 & eg_rd0_4 unimpl #:LD.SB g_rp9_4, g_rd0_4, eg_disp0_16 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x12 & g_rd0_4 ; eg_disp0_16 unimpl #:LD.SB g_rb9_4, g_ri0_4, eg_shift4_2, eg_rd0_4 is g_op13_3=0x7 & g_rb9_4 & g_op4_5=0x0 & g_ri0_4 ; eg_op6_10=0x18 & eg_shift4_2 & eg_rd0_4 unimpl #:LD.SB_COND g_rp9_4, g_rd0_4, eg_cond12_4, eg_disp0_9 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1f & g_rd0_4 ; eg_cond12_4 & eg_op9_3=0x3 & eg_disp0_9 unimpl #:LD.UB g_rp9_4, g_rd0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0x13 & g_rd0_4 unimpl #:LD.UB g_rp9_4, g_rd0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0x17 & g_rd0_4 unimpl #:LD.UB g_rp9_4, g_disp4_3, g_rd0_4 is g_op13_3=0x0 & g_rp9_4 & g_op7_2=0x3 & g_disp4_3 & g_rd0_4 unimpl #:LD.UB g_rp9_4, g_rd0_4, eg_disp0_16 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x13 & g_rd0_4 ; eg_disp0_16 unimpl #:LD.UB g_rb9_4, g_ri0_4, eg_shift4_2, eg_rd0_4 is g_op13_3=0x7 & g_rb9_4 & g_op4_5=0x0 & g_ri0_4 ; eg_op6_10=0x1c & eg_shift4_2 & eg_rd0_4 unimpl #:LD.UB_COND g_rp9_4, g_rd0_4, eg_cond12_4, eg_disp0_9 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1f & g_rd0_4 ; eg_cond12_4 & eg_op9_3=0x4 & eg_disp0_9 unimpl #:LD.SH g_rp9_4, g_rd0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0x11 & g_rd0_4 unimpl #:LD.SH g_rp9_4, g_rd0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0x15 & g_rd0_4 unimpl #:LD.SH g_rp9_4, g_disp4_3, g_rd0_4 is g_op13_3=0x4 & g_rp9_4 & g_op7_2=0x0 & g_disp4_3 & g_rd0_4 unimpl #:LD.SH g_rp9_4, g_rd0_4, eg_disp0_16 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x10 & g_rd0_4 ; eg_disp0_16 unimpl #:LD.SH g_rb9_4, g_ri0_4, eg_shift4_2, eg_rd0_4 is g_op13_3=0x7 & g_rb9_4 & g_op4_5=0x0 & g_ri0_4 ; eg_op6_10=0x10 & eg_shift4_2 & eg_rd0_4 unimpl #:LD.SH_COND g_rp9_4, g_rd0_4, eg_cond12_4, eg_disp0_9 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1f & g_rd0_4 ; eg_cond12_4 & eg_op9_3=0x1 & eg_disp0_9 unimpl #:LD.UH g_rp9_4, g_rd0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0x12 & g_rd0_4 unimpl #:LD.UH g_rp9_4, g_rd0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0x16 & g_rd0_4 unimpl #:LD.UH g_rp9_4, g_disp4_3, g_rd0_4 is g_op13_3=0x4 & g_rp9_4 & g_op7_2=0x1 & g_disp4_3 & g_rd0_4 unimpl #:LD.UH g_rp9_4, g_rd0_4, eg_disp0_16 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x11 & g_rd0_4 ; eg_disp0_16 unimpl #:LD.UH g_rb9_4, g_ri0_4, eg_shift4_2, eg_rd0_4 is g_op13_3=0x7 & g_rb9_4 & g_op4_5=0x0 & g_ri0_4 ; eg_op6_10=0x14 & eg_shift4_2 & eg_rd0_4 unimpl #:LD.UH_COND g_rp9_4, g_rd0_4, eg_cond12_4, eg_disp0_9 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1f & g_rd0_4 ; eg_cond12_4 & eg_op9_3=0x2 & eg_disp0_9 unimpl #:LD.W g_rp9_4, g_rd0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0x10 & g_rd0_4 unimpl #:LD.W g_rp9_4, g_rd0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0x14 & g_rd0_4 unimpl #:LD.W g_rp9_4, g_disp4_5, g_rd0_4 is g_op13_3=0x3 & g_rp9_4 & g_disp4_5 & g_rd0_4 unimpl #:LD.W g_rp9_4, g_rd0_4, eg_disp0_16 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0xf & g_rd0_4 ; eg_disp0_16 unimpl #:LD.W g_rb9_4, g_ri0_4, eg_shift4_2, eg_rd0_4 is g_op13_3=0x7 & g_rb9_4 & g_op4_5=0x0 & g_ri0_4 ; eg_op6_10=0xc & eg_shift4_2 & eg_rd0_4 unimpl #:LD.W g_rb9_4, g_ri0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rb9_4 & g_op4_5=0x0 & g_ri0_4 ; eg_op6_10=0x3e & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:LD.W_COND g_rp9_4, g_rd0_4, eg_cond12_4, eg_disp0_9 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1f & g_rd0_4 ; eg_cond12_4 & eg_op9_3=0x0 & eg_disp0_9 unimpl #:LDC.D g_rp0_4, eg_cop13_3, eg_rd9_3, eg_disp0_8 is g_op4_12=0xe9a & g_rp0_4 ; eg_cop13_3 & eg_op12_1=0x1 & eg_rd9_3 & eg_op8_1=0x0 & eg_disp0_8 unimpl #:LDC.D g_rp0_4, eg_cop13_3, eg_rd9_3 is g_op4_12=0xefa & g_rp0_4 ; eg_cop13_3 & eg_op12_1=0x0 & eg_rd9_3 & eg_op0_9=0x50 unimpl #:LDC.D g_rp0_4, eg_cop13_3, eg_rd9_3, eg_shift4_2, eg_ri0_4 is g_op4_12=0xefa & g_rp0_4 ; eg_cop13_3 & eg_op12_1=0x1 & eg_rd9_3 & eg_op6_3=0x1 & eg_shift4_2 & eg_ri0_4 unimpl #:LDC.W g_rp0_4, eg_cop13_3, eg_rd8_4, eg_disp0_8 is g_op4_12=0xe9a & g_rp0_4 ; eg_cop13_3 & eg_op12_1=0x0 & eg_rd8_4 & eg_disp0_8 unimpl #:LDC.W g_rp0_4, eg_cop13_3, eg_rd8_4 is g_op4_12=0xefa & g_rp0_4 ; eg_cop13_3 & eg_op12_1=0x0 & eg_rd8_4 & eg_op0_8=0x40 unimpl #:LDC.W g_rp0_4, eg_cop13_3, eg_rd8_4, eg_shift4_2, eg_ri0_4 is g_op4_12=0xefa & g_rp0_4 ; eg_cop13_3 & eg_op12_1=0x1 & eg_rd8_4 & eg_op6_2=0x0 & eg_shift4_2 & eg_ri0_4 unimpl #:LDC0.D g_rp0_4, eg_disp12_4, eg_rd9_3, eg_disp0_8 is g_op4_12=0xf3a & g_rp0_4 ; eg_disp12_4 & eg_rd9_3 & eg_op8_1=0x0 & eg_disp0_8 unimpl #:LDC0.W g_rp0_4, eg_disp12_4, eg_rd8_4, eg_disp0_8 is g_op4_12=0xf1a & g_rp0_4 ; eg_disp12_4 & eg_rd8_4 & eg_disp0_8 unimpl #:LDCM.D g_rp0_4, eg_cop13_3, eg_update12_1, eg_regsel7_1, eg_regsel6_1, eg_regsel5_1, eg_regsel4_1, eg_regsel3_1, eg_regsel2_1, eg_regsel1_1, eg_regsel0_1 is g_op4_12=0xeda & g_rp0_4 ; eg_cop13_3 & eg_update12_1 & eg_op8_4=0x4 & eg_regsel7_1 & eg_regsel6_1 & eg_regsel5_1 & eg_regsel4_1 & eg_regsel3_1 & eg_regsel2_1 & eg_regsel1_1 & eg_regsel0_1 unimpl #:LDCM.W g_rp0_4, eg_cop13_3, eg_update12_1, eg_regsel7_1, eg_regsel6_1, eg_regsel5_1, eg_regsel4_1, eg_regsel3_1, eg_regsel2_1, eg_regsel1_1, eg_regsel0_1 is g_op4_12=0xeda & g_rp0_4 ; eg_cop13_3 & eg_update12_1 & eg_op8_4=0x1 & eg_regsel7_1 & eg_regsel6_1 & eg_regsel5_1 & eg_regsel4_1 & eg_regsel3_1 & eg_regsel2_1 & eg_regsel1_1 & eg_regsel0_1 unimpl #:LDCM.W g_rp0_4, eg_cop13_3, eg_update12_1, eg_regsel7_1, eg_regsel6_1, eg_regsel5_1, eg_regsel4_1, eg_regsel3_1, eg_regsel2_1, eg_regsel1_1, eg_regsel0_1 is g_op4_12=0xeda & g_rp0_4 ; eg_cop13_3 & eg_update12_1 & eg_op8_4=0x0 & eg_regsel7_1 & eg_regsel6_1 & eg_regsel5_1 & eg_regsel4_1 & eg_regsel3_1 & eg_regsel2_1 & eg_regsel1_1 & eg_regsel0_1 unimpl #:LDDPC g_disp4_7, g_rd0_4 is g_op11_5=0x9 & g_disp4_7 & g_rd0_4 unimpl #:LDDSP g_disp4_7, g_rd0_4 is g_op11_5=0x8 & g_disp4_7 & g_rd0_4 unimpl #:LDINS.B g_rp9_4, g_rd0_4, eg_xpart13_1, eg_ypart12_1, eg_disp0_12 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1d & g_rd0_4 ; eg_op14_2=0x1 & eg_xpart13_1 & eg_ypart12_1 & eg_disp0_12 unimpl #:LDINS.H g_rp9_4, g_rd0_4, eg_ypart12_1, eg_disp0_12 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1d & g_rd0_4 ; eg_op13_3=0x0 & eg_ypart12_1 & eg_disp0_12 unimpl #:LDM g_update9_1, g_rp0_4, eg_regsel15_1, eg_regsel14_1, eg_regsel13_1, eg_regsel12_1, eg_regsel11_1, eg_regsel10_1, eg_regsel9_1, eg_regsel8_1, eg_regsel7_1, eg_regsel6_1, eg_regsel5_1, eg_regsel4_1, eg_regsel3_1, eg_regsel2_1, eg_regsel1_1, eg_regsel0_1 is g_op10_6=0x38 & g_update9_1 & g_op4_5=0x1c & g_rp0_4 ; eg_regsel15_1 & eg_regsel14_1 & eg_regsel13_1 & eg_regsel12_1 & eg_regsel11_1 & eg_regsel10_1 & eg_regsel9_1 & eg_regsel8_1 & eg_regsel7_1 & eg_regsel6_1 & eg_regsel5_1 & eg_regsel4_1 & eg_regsel3_1 & eg_regsel2_1 & eg_regsel1_1 & eg_regsel0_1 unimpl #:LDMTS g_update9_1, g_rp0_4, eg_regsel15_1, eg_regsel14_1, eg_regsel13_1, eg_regsel12_1, eg_regsel11_1, eg_regsel10_1, eg_regsel9_1, eg_regsel8_1, eg_regsel7_1, eg_regsel6_1, eg_regsel5_1, eg_regsel4_1, eg_regsel3_1, eg_regsel2_1, eg_regsel1_1, eg_regsel0_1 is g_op10_6=0x39 & g_update9_1 & g_op4_5=0x1c & g_rp0_4 ; eg_regsel15_1 & eg_regsel14_1 & eg_regsel13_1 & eg_regsel12_1 & eg_regsel11_1 & eg_regsel10_1 & eg_regsel9_1 & eg_regsel8_1 & eg_regsel7_1 & eg_regsel6_1 & eg_regsel5_1 & eg_regsel4_1 & eg_regsel3_1 & eg_regsel2_1 & eg_regsel1_1 & eg_regsel0_1 unimpl #:LDSWP.SH g_rp9_4, g_rd0_4, eg_disp0_12 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1d & g_rd0_4 ; eg_op12_4=0x2 & eg_disp0_12 unimpl #:LDSWP.UH g_rp9_4, g_rd0_4, eg_disp0_12 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1d & g_rd0_4 ; eg_op12_4=0x3 & eg_disp0_12 unimpl #:LDSWP.W g_rp9_4, g_rd0_4, eg_disp0_12 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1d & g_rd0_4 ; eg_op12_4=0x8 & eg_disp0_12 unimpl #:LSL g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x94 & eg_rd0_4 unimpl #:LSL g_shift9_4, g_shift4_1, g_rd0_4 is g_op13_3=0x5 & g_shift9_4 & g_op5_4=0xb & g_shift4_1 & g_rd0_4 unimpl #:LSL g_rs9_4, g_rd0_4, eg_shift0_5 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_rd0_4 ; eg_op5_11=0xa8 & eg_shift0_5 unimpl #:LSR g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0xa4 & eg_rd0_4 unimpl #:LSR g_shift9_4, g_shift4_1, g_rd0_4 is g_op13_3=0x5 & g_shift9_4 & g_op5_4=0xc & g_shift4_1 & g_rd0_4 unimpl #:LSR g_rs9_4, g_rd0_4, eg_shift0_5 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_rd0_4 ; eg_op5_11=0xb0 & eg_shift0_5 unimpl #:MAC g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x34 & eg_rd0_4 unimpl #:MACHH.D g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x16 & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:MACHH.W g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x12 & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:MACS.D g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x54 & eg_rd0_4 unimpl #:MACSATHH.W g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x1a & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:MACU.D g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x74 & eg_rd0_4 unimpl #:MACWH.D g_rx9_4, g_ry0_4, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op5_11=0x64 & eg_ypart4_1 & eg_rd0_4 unimpl #:MAX g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0xc4 & eg_rd0_4 unimpl #:MCALL g_rp0_4, eg_disp0_16 is g_op4_12=0xf01 & g_rp0_4 ; eg_disp0_16 unimpl #:MEMC g_offset0_4, eg_offset15_1, eg_imm0_15 is g_op4_12=0xf61 & g_offset0_4 ; eg_offset15_1 & eg_imm0_15 unimpl #:MEMS g_offset0_4, eg_offset15_1, eg_imm0_15 is g_op4_12=0xf81 & g_offset0_4 ; eg_offset15_1 & eg_imm0_15 unimpl #:MEMT g_offset0_4, eg_offset15_1, eg_imm0_15 is g_op4_12=0xfa1 & g_offset0_4 ; eg_offset15_1 & eg_imm0_15 unimpl #:MFDR g_rd0_4, eg_dbgregaddr0_8 is g_op4_12=0xe5b & g_rd0_4 ; eg_op8_8=0x0 & eg_dbgregaddr0_8 unimpl #:MFSR g_rd0_4, eg_regsel7_1, eg_regsel6_1, eg_regsel5_1, eg_regsel4_1, eg_regsel3_1, eg_regsel2_1, eg_regsel1_1, eg_regsel0_1 is g_op4_12=0xe1b & g_rd0_4 ; eg_op8_8=0x0 & eg_regsel7_1 & eg_regsel6_1 & eg_regsel5_1 & eg_regsel4_1 & eg_regsel3_1 & eg_regsel2_1 & eg_regsel1_1 & eg_regsel0_1 unimpl #:MIN g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0xd4 & eg_rd0_4 unimpl #:MOV g_imm4_8, g_rd0_4 is g_op12_4=0x3 & g_imm4_8 & g_rd0_4 unimpl #:MOV g_imm9_4, g_imm4_1, g_rd0_4, eg_imm0_16 is g_op13_3=0x7 & g_imm9_4 & g_op5_4=0x3 & g_imm4_1 & g_rd0_4 ; eg_imm0_16 unimpl #:MOV g_rs9_4, g_rd0_4 is g_op13_3=0x0 & g_rs9_4 & g_op4_5=0x9 & g_rd0_4 unimpl #:MOV_COND g_rs9_4, g_rd0_4, eg_cond4_4 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_rd0_4 ; eg_op8_8=0x17 & eg_cond4_4 & eg_op0_4=0x0 unimpl #:MOV_COND g_rd0_4, eg_cond8_4, eg_imm0_8 is g_op4_12=0xf9b & g_rd0_4 ; eg_op12_4=0x0 & eg_cond8_4 & eg_imm0_8 unimpl #:MOVHI g_rd0_4, eg_imm0_16 is g_op4_12=0xfc1 & g_rd0_4 ; eg_imm0_16 unimpl #:MTDR g_rs0_4, eg_dbgregaddr0_8 is g_op4_12=0xe7b & g_rs0_4 ; eg_op8_8=0x0 & eg_dbgregaddr0_8 unimpl #:MTSR g_rs0_4, eg_regsel7_1, eg_regsel6_1, eg_regsel5_1, eg_regsel4_1, eg_regsel3_1, eg_regsel2_1, eg_regsel1_1, eg_regsel0_1 is g_op4_12=0xe3b & g_rs0_4 ; eg_op8_8=0x0 & eg_regsel7_1 & eg_regsel6_1 & eg_regsel5_1 & eg_regsel4_1 & eg_regsel3_1 & eg_regsel2_1 & eg_regsel1_1 & eg_regsel0_1 unimpl #:MUL g_rs9_4, g_rd0_4 is g_op13_3=0x5 & g_rs9_4 & g_op4_5=0x13 & g_rd0_4 unimpl #:MUL g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x24 & eg_rd0_4 unimpl #:MUL g_rs9_4, g_rd0_4, eg_imm0_8 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_rd0_4 ; eg_op8_8=0x10 & eg_imm0_8 unimpl #:MULHH.W g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x1e & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:MULNHH.W g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x6 & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:MULNWH.D g_rx9_4, g_ry0_4, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op5_11=0x14 & eg_ypart4_1 & eg_rd0_4 unimpl #:MULS.D g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x44 & eg_rd0_4 unimpl #:MULSATHH.H g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x22 & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:MULSATHH.W g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x26 & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:MULSATRNDHH.H g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x2a & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:MULSATRNDWH.W g_rx9_4, g_ry0_4, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op5_11=0x5c & eg_ypart4_1 & eg_rd0_4 unimpl #:MULSATWH.W g_rx9_4, g_ry0_4, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op5_11=0x74 & eg_ypart4_1 & eg_rd0_4 unimpl #:MULU.D g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x64 & eg_rd0_4 unimpl #:MULWH.D g_rx9_4, g_ry0_4, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op5_11=0x6c & eg_ypart4_1 & eg_rd0_4 unimpl #:MUSFR g_rs0_4 is g_op4_12=0x5d3 & g_rs0_4 unimpl #:MUSTR g_rd0_4 is g_op4_12=0x5d2 & g_rd0_4 unimpl #:MVCR.D g_rd1_3, eg_cop13_3, eg_rs9_3 is g_op4_12=0xefa & g_rd1_3 & g_op0_1=0x0 ; eg_cop13_3 & eg_op12_1=0x0 & eg_rs9_3 & eg_op0_9=0x10 unimpl #:MVCR.W g_rd0_4, eg_cop13_3, eg_rs8_4 is g_op4_12=0xefa & g_rd0_4 ; eg_cop13_3 & eg_op12_1=0x0 & eg_rs8_4 & eg_op0_8=0x0 unimpl #:MVRC.D g_rs1_3, eg_cop13_3, eg_rd9_3 is g_op4_12=0xefa & g_rs1_3 & g_op0_1=0x0 ; eg_cop13_3 & eg_op12_1=0x0 & eg_rd9_3 & eg_op0_9=0x30 unimpl #:MVRC.W g_rs0_4, eg_cop13_3, eg_rd8_4 is g_op4_12=0xefa & g_rs0_4 ; eg_cop13_3 & eg_op12_1=0x0 & eg_rd8_4 & eg_op0_8=0x20 unimpl #:NEG g_rd0_4 is g_op4_12=0x5c3 & g_rd0_4 unimpl #:NOP is g_op0_16=0xd703 unimpl #:OR g_rs9_4, g_rd0_4 is g_op13_3=0x0 & g_rs9_4 & g_op4_5=0x4 & g_rd0_4 unimpl #:OR g_rx9_4, g_ry0_4, eg_shift4_5, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1e & g_ry0_4 ; eg_op9_7=0x8 & eg_shift4_5 & eg_rd0_4 unimpl #:OR g_rx9_4, g_ry0_4, eg_shift4_5, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1e & g_ry0_4 ; eg_op9_7=0x9 & eg_shift4_5 & eg_rd0_4 unimpl #:OR_COND g_rx9_4, g_ry0_4, eg_cond8_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1d & g_ry0_4 ; eg_op12_4=0xe & eg_cond8_4 & eg_op4_4=0x3 & eg_rd0_4 unimpl #:ORH g_rd0_4, eg_imm0_16 is g_op4_12=0xea1 & g_rd0_4 ; eg_imm0_16 unimpl #:ORL g_rd0_4, eg_imm0_16 is g_op4_12=0xe81 & g_rd0_4 ; eg_imm0_16 unimpl #:PABS.SB g_rs0_4, eg_rd0_4 is g_op4_12=0xe00 & g_rs0_4 ; eg_op4_12=0x23e & eg_rd0_4 unimpl #:PABS.SH g_rs0_4, eg_rd0_4 is g_op4_12=0xe00 & g_rs0_4 ; eg_op4_12=0x23f & eg_rd0_4 unimpl #:PACKSH.UB g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x24c & eg_rd0_4 unimpl #:PACKSH.SB g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x24d & eg_rd0_4 unimpl #:PACKW.SH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x247 & eg_rd0_4 unimpl #:PADD.B g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x230 & eg_rd0_4 unimpl #:PADD.H g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x200 & eg_rd0_4 unimpl #:PADDH.UB g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x236 & eg_rd0_4 unimpl #:PADDH.SH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x20c & eg_rd0_4 unimpl #:PADDS.UB g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x234 & eg_rd0_4 unimpl #:PADDS.SB g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x232 & eg_rd0_4 unimpl #:PADDS.UH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x208 & eg_rd0_4 unimpl #:PADDS.SH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x204 & eg_rd0_4 unimpl #:PADDSUB.H g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x84 & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:PADDSUBH.SH g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x8a & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:PADDSUBS.UH g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x88 & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:PADDSUBS.SH g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x86 & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:PADDX.H g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x202 & eg_rd0_4 unimpl #:PADDXH.SH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x20e & eg_rd0_4 unimpl #:PADDXS.UH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x20a & eg_rd0_4 unimpl #:PADDXS.SH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x206 & eg_rd0_4 unimpl #:PASR.B g_rs9_4, g_shift0_3, eg_rd0_4 is g_op13_3=0x7 & g_rs9_4 & g_op3_6=0x0 & g_shift0_3 ; eg_op4_12=0x241 & eg_rd0_4 unimpl #:PASR.H g_rs9_4, g_shift0_4, eg_rd0_4 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_shift0_4 ; eg_op4_12=0x244 & eg_rd0_4 unimpl #:PAVG.UB g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x23c & eg_rd0_4 unimpl #:PAVG.SH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x23d & eg_rd0_4 unimpl #:PLSL.B g_rs9_4, g_shift0_3, eg_rd0_4 is g_op13_3=0x7 & g_rs9_4 & g_op3_6=0x0 & g_shift0_3 ; eg_op4_12=0x242 & eg_rd0_4 unimpl #:PLSL.H g_rs9_4, g_shift0_4, eg_rd0_4 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_shift0_4 ; eg_op4_12=0x245 & eg_rd0_4 unimpl #:PLSR.B g_rs9_4, g_shift0_3, eg_rd0_4 is g_op13_3=0x7 & g_rs9_4 & g_op3_6=0x0 & g_shift0_3 ; eg_op4_12=0x243 & eg_rd0_4 unimpl #:PLSR.H g_rs9_4, g_shift0_4, eg_rd0_4 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_shift0_4 ; eg_op4_12=0x246 & eg_rd0_4 unimpl #:PMAX.UB g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x238 & eg_rd0_4 unimpl #:PMAX.SH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x239 & eg_rd0_4 unimpl #:PMIN.UB g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x23a & eg_rd0_4 unimpl #:PMIN.SH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x23b & eg_rd0_4 unimpl #:POPJC is g_op0_16=0xd713 unimpl #:POPM g_regsel11_1, g_regsel10_1, g_regsel9_1, g_regsel8_1, g_regsel7_1, g_regsel6_1, g_regsel5_1, g_regsel4_1, g_returnflag3_1 is g_op12_4=0xd & g_regsel11_1 & g_regsel10_1 & g_regsel9_1 & g_regsel8_1 & g_regsel7_1 & g_regsel6_1 & g_regsel5_1 & g_regsel4_1 & g_returnflag3_1 & g_op0_3=0x2 unimpl #:PREF g_rp0_4, eg_disp0_16 is g_op4_12=0xf21 & g_rp0_4 ; eg_disp0_16 unimpl #:PSAD g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x240 & eg_rd0_4 unimpl #:PSUB.B g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x231 & eg_rd0_4 unimpl #:PSUB.H g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x201 & eg_rd0_4 unimpl #:PSUBADD.H g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x85 & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:PSUBADDH.SH g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x8b & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:PSUBADDS.UH g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x89 & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:PSUBADDS.SH g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x87 & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:PSUBH.UB g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x237 & eg_rd0_4 unimpl #:PSUBH.SH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x20d & eg_rd0_4 unimpl #:PSUBS.UB g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x235 & eg_rd0_4 unimpl #:PSUBS.SB g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x233 & eg_rd0_4 unimpl #:PSUBS.UH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x209 & eg_rd0_4 unimpl #:PSUBS.SH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x205 & eg_rd0_4 unimpl #:PSUBX.H g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x203 & eg_rd0_4 unimpl #:PSUBXH.SH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x20f & eg_rd0_4 unimpl #:PSUBXS.UH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x20b & eg_rd0_4 unimpl #:PSUBXS.SH g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x207 & eg_rd0_4 unimpl #:PUNPCKUB.H g_rs9_4, eg_returnflag4_1, eg_rd0_4 is g_op13_3=0x7 & g_rs9_4 & g_op0_9=0x0 ; eg_op5_11=0x124 & eg_returnflag4_1 & eg_rd0_4 unimpl #:PUNPCKSB.H g_rs9_4, eg_returnflag4_1, eg_rd0_4 is g_op13_3=0x7 & g_rs9_4 & g_op0_9=0x0 ; eg_op5_11=0x125 & eg_returnflag4_1 & eg_rd0_4 unimpl #:PUSHJC is g_op0_16=0xd723 unimpl #:PUSHM g_regsel11_1, g_regsel10_1, g_regsel9_1, g_regsel8_1, g_regsel7_1, g_regsel6_1, g_regsel5_1, g_regsel4_1 is g_op12_4=0xd & g_regsel11_1 & g_regsel10_1 & g_regsel9_1 & g_regsel8_1 & g_regsel7_1 & g_regsel6_1 & g_regsel5_1 & g_regsel4_1 & g_op0_4=0x1 unimpl #:RCALL g_disp4_8, g_disp0_2 is g_op12_4=0xc & g_disp4_8 & g_op2_2=0x3 & g_disp0_2 unimpl #:RCALL g_disp9_4, g_disp4_1, eg_disp0_16 is g_op13_3=0x7 & g_disp9_4 & g_op5_4=0x5 & g_disp4_1 & g_op0_4=0x0 ; eg_disp0_16 unimpl #:RET_COND g_cond4_4, g_rs0_4 is g_op8_8=0x5e & g_cond4_4 & g_rs0_4 unimpl #:RETD is g_op0_16=0xd623 unimpl #:RETE is g_op0_16=0xd603 unimpl #:RETJ is g_op0_16=0xd633 unimpl #:RETS is g_op0_16=0xd613 unimpl #:RJMP g_disp4_8, g_disp0_2 is g_op12_4=0xc & g_disp4_8 & g_op2_2=0x2 & g_disp0_2 unimpl #:ROL g_rd0_4 is g_op4_12=0x5cf & g_rd0_4 unimpl #:ROR g_rd0_4 is g_op4_12=0x5d0 & g_rd0_4 unimpl #:RSUB g_rs9_4, g_rd0_4 is g_op13_3=0x0 & g_rs9_4 & g_op4_5=0x2 & g_rd0_4 unimpl #:RSUB g_rs9_4, g_rd0_4, eg_imm0_8 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0x0 & g_rd0_4 ; eg_op8_8=0x11 & eg_imm0_8 unimpl #:RSUB_COND g_rd0_4, eg_cond8_4, eg_imm0_8 is g_op4_12=0xfbb & g_rd0_4 ; eg_op12_4=0x0 & eg_cond8_4 & eg_imm0_8 unimpl #:SATADD.H g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x2c & eg_rd0_4 unimpl #:SATADD.W g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0xc & eg_rd0_4 unimpl #:SATRNDS g_rd0_4, eg_offset5_5, eg_shift0_5 is g_op4_12=0xf3b & g_rd0_4 ; eg_op10_6=0x0 & eg_offset5_5 & eg_shift0_5 unimpl #:SATRNDU g_rd0_4, eg_offset5_5, eg_shift0_5 is g_op4_12=0xf3b & g_rd0_4 ; eg_op10_6=0x1 & eg_offset5_5 & eg_shift0_5 unimpl #:SATS g_rd0_4, eg_offset5_5, eg_shift0_5 is g_op4_12=0xf1b & g_rd0_4 ; eg_op10_6=0x0 & eg_offset5_5 & eg_shift0_5 unimpl #:SATSUB.H g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x3c & eg_rd0_4 unimpl #:SATSUB.W g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x1c & eg_rd0_4 unimpl #:SATSUB.W g_rs9_4, g_rd0_4, eg_imm0_16 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0xd & g_rd0_4 ; eg_imm0_16 unimpl #:SATU g_rd0_4, eg_offset5_5, eg_shift0_5 is g_op4_12=0xf1b & g_rd0_4 ; eg_op10_6=0x1 & eg_offset5_5 & eg_shift0_5 unimpl #:SBC g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0x14 & eg_rd0_4 unimpl #:SBR g_offset9_4, g_offset4_1, g_rd0_4 is g_op13_3=0x5 & g_offset9_4 & g_op5_4=0xd & g_offset4_1 & g_rd0_4 unimpl #:SCALL is g_op0_16=0xd733 unimpl #:SCR g_rd0_4 is g_op4_12=0x5c1 & g_rd0_4 unimpl #:SLEEP eg_opcode0_8 is g_op0_16=0xe9b0 ; eg_op8_8=0x0 & eg_opcode0_8 unimpl #:SR_COND g_cond4_4, g_rd0_4 is g_op8_8=0x5f & g_cond4_4 & g_rd0_4 unimpl #:SSRF g_offset4_5 is g_op9_7=0x69 & g_offset4_5 & g_op0_4=0x3 unimpl #:ST.B g_rp9_4, g_rs0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0xc & g_rs0_4 unimpl #:ST.B g_rp9_4, g_rs0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0xf & g_rs0_4 unimpl #:ST.B g_rp9_4, g_disp4_3, g_rs0_4 is g_op13_3=0x5 & g_rp9_4 & g_op7_2=0x1 & g_disp4_3 & g_rs0_4 unimpl #:ST.B g_rp9_4, g_rs0_4, eg_disp0_16 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x16 & g_rs0_4 ; eg_disp0_16 unimpl #:ST.B g_rb9_4, g_ri0_4, eg_shift4_2, eg_rs0_4 is g_op13_3=0x7 & g_rb9_4 & g_op4_5=0x0 & g_ri0_4 ; eg_op6_10=0x2c & eg_shift4_2 & eg_rs0_4 unimpl #:ST.B_COND g_rp9_4, g_rd0_4, eg_cond12_4, eg_disp0_9 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1f & g_rd0_4 ; eg_cond12_4 & eg_op9_3=0x7 & eg_disp0_9 unimpl #:ST.D g_rp9_4, g_rs1_3 is g_op13_3=0x5 & g_rp9_4 & g_op4_5=0x12 & g_rs1_3 & g_op0_1=0x0 unimpl #:ST.D g_rp9_4, g_rs1_3 is g_op13_3=0x5 & g_rp9_4 & g_op4_5=0x12 & g_rs1_3 & g_op0_1=0x1 unimpl #:ST.D g_rp9_4, g_rs1_3 is g_op13_3=0x5 & g_rp9_4 & g_op4_5=0x11 & g_rs1_3 & g_op0_1=0x1 unimpl #:ST.D g_rp9_4, g_rs1_3, eg_disp0_16 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0xe & g_rs1_3 & g_op0_1=0x1 ; eg_disp0_16 unimpl #:ST.D g_rb9_4, g_ri0_4, eg_shift4_2, eg_rs0_4 is g_op13_3=0x7 & g_rb9_4 & g_op4_5=0x0 & g_ri0_4 ; eg_op6_10=0x20 & eg_shift4_2 & eg_rs0_4 unimpl #:ST.H g_rp9_4, g_rs0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0xb & g_rs0_4 unimpl #:ST.H g_rp9_4, g_rs0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0xe & g_rs0_4 unimpl #:ST.H g_rp9_4, g_disp4_3, g_rs0_4 is g_op13_3=0x5 & g_rp9_4 & g_op7_2=0x0 & g_disp4_3 & g_rs0_4 unimpl #:ST.H g_rp9_4, g_rs0_4, eg_disp0_16 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x15 & g_rs0_4 ; eg_disp0_16 unimpl #:ST.H g_rb9_4, g_ri0_4, eg_shift4_2, eg_rs0_4 is g_op13_3=0x7 & g_rb9_4 & g_op4_5=0x0 & g_ri0_4 ; eg_op6_10=0x28 & eg_shift4_2 & eg_rs0_4 unimpl #:ST.H_COND g_rp9_4, g_rd0_4, eg_cond12_4, eg_disp0_9 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1f & g_rd0_4 ; eg_cond12_4 & eg_op9_3=0x6 & eg_disp0_9 unimpl #:ST.W g_rp9_4, g_rs0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0xa & g_rs0_4 unimpl #:ST.W g_rp9_4, g_rs0_4 is g_op13_3=0x0 & g_rp9_4 & g_op4_5=0xd & g_rs0_4 unimpl #:ST.W g_rp9_4, g_disp4_4, g_rs0_4 is g_op13_3=0x4 & g_rp9_4 & g_op8_1=0x1 & g_disp4_4 & g_rs0_4 unimpl #:ST.W g_rp9_4, g_rs0_4, eg_disp0_16 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x14 & g_rs0_4 ; eg_disp0_16 unimpl #:ST.W g_rb9_4, g_ri0_4, eg_shift4_2, eg_rs0_4 is g_op13_3=0x7 & g_rb9_4 & g_op4_5=0x0 & g_ri0_4 ; eg_op6_10=0x24 & eg_shift4_2 & eg_rs0_4 unimpl #:ST.W_COND g_rp9_4, g_rd0_4, eg_cond12_4, eg_disp0_9 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1f & g_rd0_4 ; eg_cond12_4 & eg_op9_3=0x5 & eg_disp0_9 unimpl #:STC.D g_rp0_4, eg_cop13_3, eg_rs9_3, eg_disp0_8 is g_op4_12=0xeba & g_rp0_4 ; eg_cop13_3 & eg_op12_1=0x1 & eg_rs9_3 & eg_op8_1=0x0 & eg_disp0_8 unimpl #:STC.D g_rp0_4, eg_cop13_3, eg_rs9_3 is g_op4_12=0xefa & g_rp0_4 ; eg_cop13_3 & eg_op12_1=0x0 & eg_rs9_3 & eg_op0_9=0x70 unimpl #:STC.D g_rp0_4, eg_cop13_3, eg_rs9_3, eg_shift4_2, eg_ri0_4 is g_op4_12=0xefa & g_rp0_4 ; eg_cop13_3 & eg_op12_1=0x1 & eg_rs9_3 & eg_op6_3=0x3 & eg_shift4_2 & eg_ri0_4 unimpl #:STC.W g_rp0_4, eg_cop13_3, eg_rs8_4, eg_disp0_8 is g_op4_12=0xeba & g_rp0_4 ; eg_cop13_3 & eg_op12_1=0x0 & eg_rs8_4 & eg_disp0_8 unimpl #:STC.W g_rp0_4, eg_cop13_3, eg_rs8_4 is g_op4_12=0xefa & g_rp0_4 ; eg_cop13_3 & eg_op12_1=0x0 & eg_rs8_4 & eg_op0_8=0x60 unimpl #:STC.W g_rp0_4, eg_cop13_3, eg_rs8_4, eg_shift4_2, eg_ri0_4 is g_op4_12=0xefa & g_rp0_4 ; eg_cop13_3 & eg_op12_1=0x1 & eg_rs8_4 & eg_op6_2=0x2 & eg_shift4_2 & eg_ri0_4 unimpl #:STC0.D g_rp0_4, eg_disp12_4, eg_rs9_3, eg_disp0_8 is g_op4_12=0xf7a & g_rp0_4 ; eg_disp12_4 & eg_rs9_3 & eg_op8_1=0x0 & eg_disp0_8 unimpl #:STC0.W g_rp0_4, eg_disp12_4, eg_rs8_4, eg_disp0_8 is g_op4_12=0xf5a & g_rp0_4 ; eg_disp12_4 & eg_rs8_4 & eg_disp0_8 unimpl #:STCM.D g_rp0_4, eg_cop13_3, eg_update12_1, eg_regsel7_1, eg_regsel6_1, eg_regsel5_1, eg_regsel4_1, eg_regsel3_1, eg_regsel2_1, eg_regsel1_1, eg_regsel0_1 is g_op4_12=0xeda & g_rp0_4 ; eg_cop13_3 & eg_update12_1 & eg_op8_4=0x5 & eg_regsel7_1 & eg_regsel6_1 & eg_regsel5_1 & eg_regsel4_1 & eg_regsel3_1 & eg_regsel2_1 & eg_regsel1_1 & eg_regsel0_1 unimpl #:STCM.W g_rp0_4, eg_cop13_3, eg_update12_1, eg_regsel7_1, eg_regsel6_1, eg_regsel5_1, eg_regsel4_1, eg_regsel3_1, eg_regsel2_1, eg_regsel1_1, eg_regsel0_1 is g_op4_12=0xeda & g_rp0_4 ; eg_cop13_3 & eg_update12_1 & eg_op8_4=0x3 & eg_regsel7_1 & eg_regsel6_1 & eg_regsel5_1 & eg_regsel4_1 & eg_regsel3_1 & eg_regsel2_1 & eg_regsel1_1 & eg_regsel0_1 unimpl #:STCM.W g_rp0_4, eg_cop13_3, eg_update12_1, eg_regsel7_1, eg_regsel6_1, eg_regsel5_1, eg_regsel4_1, eg_regsel3_1, eg_regsel2_1, eg_regsel1_1, eg_regsel0_1 is g_op4_12=0xeda & g_rp0_4 ; eg_cop13_3 & eg_update12_1 & eg_op8_4=0x2 & eg_regsel7_1 & eg_regsel6_1 & eg_regsel5_1 & eg_regsel4_1 & eg_regsel3_1 & eg_regsel2_1 & eg_regsel1_1 & eg_regsel0_1 unimpl #:STCOND g_rp9_4, g_rs0_4, eg_disp0_16 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x17 & g_rs0_4 ; eg_disp0_16 unimpl #:STDSP g_disp4_7, g_rs0_4 is g_op11_5=0xa & g_disp4_7 & g_rs0_4 unimpl #:STHH.W g_rx9_4, g_ry0_4, eg_xpart13_1, eg_ypart12_1, eg_disp4_8, eg_rp0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1e & g_ry0_4 ; eg_op14_2=0x3 & eg_xpart13_1 & eg_ypart12_1 & eg_disp4_8 & eg_rp0_4 unimpl #:STHH.W g_rx9_4, g_ry0_4, eg_xpart13_1, eg_ypart12_1, eg_ri8_4, eg_shift4_2, eg_rb0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1e & g_ry0_4 ; eg_op14_2=0x2 & eg_xpart13_1 & eg_ypart12_1 & eg_ri8_4 & eg_op6_2=0x0 & eg_shift4_2 & eg_rb0_4 unimpl #:STM g_update9_1, g_rp0_4, eg_regsel15_1, eg_regsel14_1, eg_regsel13_1, eg_regsel12_1, eg_regsel11_1, eg_regsel10_1, eg_regsel9_1, eg_regsel8_1, eg_regsel7_1, eg_regsel6_1, eg_regsel5_1, eg_regsel4_1, eg_regsel3_1, eg_regsel2_1, eg_regsel1_1, eg_regsel0_1 is g_op10_6=0x3a & g_update9_1 & g_op4_5=0x1c & g_rp0_4 ; eg_regsel15_1 & eg_regsel14_1 & eg_regsel13_1 & eg_regsel12_1 & eg_regsel11_1 & eg_regsel10_1 & eg_regsel9_1 & eg_regsel8_1 & eg_regsel7_1 & eg_regsel6_1 & eg_regsel5_1 & eg_regsel4_1 & eg_regsel3_1 & eg_regsel2_1 & eg_regsel1_1 & eg_regsel0_1 unimpl #:STMTS g_update9_1, g_rp0_4, eg_regsel15_1, eg_regsel14_1, eg_regsel13_1, eg_regsel12_1, eg_regsel11_1, eg_regsel10_1, eg_regsel9_1, eg_regsel8_1, eg_regsel7_1, eg_regsel6_1, eg_regsel5_1, eg_regsel4_1, eg_regsel3_1, eg_regsel2_1, eg_regsel1_1, eg_regsel0_1 is g_op10_6=0x3b & g_update9_1 & g_op4_5=0x1c & g_rp0_4 ; eg_regsel15_1 & eg_regsel14_1 & eg_regsel13_1 & eg_regsel12_1 & eg_regsel11_1 & eg_regsel10_1 & eg_regsel9_1 & eg_regsel8_1 & eg_regsel7_1 & eg_regsel6_1 & eg_regsel5_1 & eg_regsel4_1 & eg_regsel3_1 & eg_regsel2_1 & eg_regsel1_1 & eg_regsel0_1 unimpl #:STSWP.H g_rp9_4, g_rs0_4, eg_disp0_12 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1d & g_rs0_4 ; eg_op12_4=0x9 & eg_disp0_12 unimpl #:STSWP.W g_rp9_4, g_rs0_4, eg_disp0_12 is g_op13_3=0x7 & g_rp9_4 & g_op4_5=0x1d & g_rs0_4 ; eg_op12_4=0xa & eg_disp0_12 unimpl #:SUB g_rs9_4, g_rd0_4 is g_op13_3=0x0 & g_rs9_4 & g_op4_5=0x1 & g_rd0_4 unimpl #:SUB g_rx9_4, g_ry0_4, eg_shift4_2, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x4 & eg_shift4_2 & eg_rd0_4 unimpl #:SUB g_imm4_8, g_rd0_4 is g_op12_4=0x2 & g_imm4_8 & g_rd0_4 unimpl #:SUB g_imm9_4, g_imm4_1, g_rd0_4, eg_imm0_16 is g_op13_3=0x7 & g_imm9_4 & g_op5_4=0x1 & g_imm4_1 & g_rd0_4 ; eg_imm0_16 unimpl #:SUB g_rs9_4, g_rd0_4, eg_imm0_16 is g_op13_3=0x7 & g_rs9_4 & g_op4_5=0xc & g_rd0_4 ; eg_imm0_16 unimpl #:SUB_F_COND g_update9_1, g_rd0_4, eg_cond8_4, eg_imm0_8 is g_op10_6=0x3d & g_update9_1 & g_op4_5=0x1b & g_rd0_4 ; eg_op12_4=0x0 & eg_cond8_4 & eg_imm0_8 unimpl #:SUB_COND g_rx9_4, g_ry0_4, eg_cond8_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x1d & g_ry0_4 ; eg_op12_4=0xe & eg_cond8_4 & eg_op4_4=0x1 & eg_rd0_4 unimpl #:SUBHH.W g_rx9_4, g_ry0_4, eg_xpart5_1, eg_ypart4_1, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op6_10=0x3c & eg_xpart5_1 & eg_ypart4_1 & eg_rd0_4 unimpl #:SWAP.B g_rd0_4 is g_op4_12=0x5cb & g_rd0_4 unimpl #:SWAP.BH g_rd0_4 is g_op4_12=0x5cc & g_rd0_4 unimpl #:SWAP.H g_rd0_4 is g_op4_12=0x5ca & g_rd0_4 unimpl #:SYNC eg_opcode0_8 is g_op0_16=0xebb0 ; eg_op8_8=0x0 & eg_opcode0_8 unimpl #:TLBR is g_op0_16=0xd643 unimpl #:TLBS is g_op0_16=0xd653 unimpl #:TLBW is g_op0_16=0xd663 unimpl #:TNBZ g_rd0_4 is g_op4_12=0x5ce & g_rd0_4 unimpl #:TST g_rs9_4, g_rd0_4 is g_op13_3=0x0 & g_rs9_4 & g_op4_5=0x7 & g_rd0_4 unimpl #:XCHG g_rx9_4, g_ry0_4, eg_rd0_4 is g_op13_3=0x7 & g_rx9_4 & g_op4_5=0x0 & g_ry0_4 ; eg_op4_12=0xb4 & eg_rd0_4 unimpl ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_bit_operations.sinc ================================================ #--------------------------------------------------------------------- # 8.3.6 Bit Operations #--------------------------------------------------------------------- #--------------------------------------------------------------------- # BFEXTS - Bitfield extract and sign-extend # I. {d,s} -> {0, 1, ..., 15} # {bp5, w5} -> {0, 1, ..., 31} #--------------------------------------------------------------------- # BFEXTS Format I # Operation: Rd <- SE(Rs[bp5+w5-1:bp5]) # Syntax: bfexts Rd, Rs, bp5, w5 # 111d ddd1 1101 ssss 1011 00ff fffw wwww :BFEXTS rd9, RS0A, eoff5_5, elen0_5 is op13_3=0x7 & rd9 & op4_5=0x1d & RS0A ; eop10_6=0x2c & eoff5_5 & elen0_5 { shifted:4 = (RS0A >> eoff5_5); mask:4 = (0xffffffff >> (32 - elen0_5)); isolated:4 = shifted & mask; # Technically, elen0_5 can be 0, but result is undefined # if that's the case so we're ok here. signmask:4 = (0xffffffff << (elen0_5 - 1)); test:4 = zext((signmask & isolated) != 0); rd9 = (test * signmask) | isolated; NZSTATUS(rd9); C = rd9 s< 0; CZNVTOSR(); } #--------------------------------------------------------------------- # BFEXTU - Bitfield extract and zero-extend # I. {d,s} -> {0, 1, ..., 15} # {bp5, w5} -> {0, 1, ..., 31} #--------------------------------------------------------------------- # BFEXTU Format I # Operation: Rd <- SE(Rs[bp5+w5-1:bp5]) # Syntax: bfextu Rd, Rs, bp5, w5 # 111d ddd1 1101 ssss 1011 00ff fffw wwww :BFEXTU rd9, RS0A, eoff5_5, elen0_5 is op13_3=0x7 & rd9 & op4_5=0x1d & RS0A ; eop10_6=0x30 & eoff5_5 & elen0_5 { shifted:4 = (RS0A >> eoff5_5); mask:4 = (0xffffffff >> (32 - elen0_5)); rd9 = shifted & mask; NZSTATUS(rd9); C = rd9 s< 0; CZNVTOSR(); } #--------------------------------------------------------------------- # BFINS - Bitfield insert # I. {d,s} -> {0, 1, ..., 15} # {bp5, w5} -> {0, 1, ..., 31} #--------------------------------------------------------------------- # BFINS Format I # Operation: Rd[bp5+w5-1:bp5] <- Rs[w5-1:0] # Syntax: bfins Rd, Rs, bp5, w5 # 111d ddd1 1101 ssss 1101 00ff fffw wwww :BFINS rd9, RS0A, eoff5_5, elen0_5 is op13_3=0x7 & rd9 & op4_5=0x1d & RS0A ; eop10_6=0x34 & eoff5_5 & elen0_5 { lowmask:4 = (0xffffffff >> (32 - elen0_5)); destmask:4 = ~(lowmask << eoff5_5); rd9 = (rd9 & destmask) | ((RS0A & lowmask) << elen0_5); NZSTATUS(rd9); C = rd9 s< 0; CZNVTOSR(); } #--------------------------------------------------------------------- # BLD - Bit load from register to C and Z # I. d -> {0, 1, ..., 15} # bp5 -> {0, 1, ..., 31} #--------------------------------------------------------------------- # BLD Format I # Operation: C <- Rd[bp5] # Z <- Rd[bp5] # Syntax: bld Rd, bp5 # 1110 1101 1011 dddd 0000 0000 000f ffff :BLD rd0, eoff0_5 is op4_12=0xedb & rd0 ; eop5_11=0x0 & eoff0_5 { tmp:4 = (rd0 & (1 << eoff0_5)); test:1 = (tmp != 0); C = test; Z = test; CZNVTOSR(); } #--------------------------------------------------------------------- # BREV - Bit Reverse # I. d -> {0, 1, ..., 15} #--------------------------------------------------------------------- # BREV Format I # Operation: Rd[31:0] <- Rd[0:31] # Syntax: brev Rd # 0101 1100 1001 dddd # taken from http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel # under the 32 bit word reverse :BREV rd0 is op4_12=0x5c9 & rd0 { v:4 = rd0; v = ((v >> 1) & 0x55555555) | ((v & 0x55555555) << 1); v = ((v >> 2) & 0x33333333) | ((v & 0x33333333) << 2); v = ((v >> 4) & 0x0F0F0F0F) | ((v & 0x0F0F0F0F) << 4); v = ((v >> 8) & 0x00FF00FF) | ((v & 0x00FF00FF) << 8); v = (v >> 16) | (v << 16); rd0 = v; ZSTATUS(rd0); } #--------------------------------------------------------------------- # BST - Copy C to register bit # I. d -> {0, 1, ..., 15} # bp5 -> {0, 1, ..., 31} #--------------------------------------------------------------------- # BST Format I # Operation: Rd[bp5] <- C # Syntax: bst Rd, bp5 # 1110 1111 1011 dddd 0000 0000 000f ffff :BST rd0, eoff0_5 is op4_12=0xefb & rd0 ; eop5_11=0x0 & eoff0_5 { destbit:4 = (1 << eoff0_5); cbool:4 = zext(C != 0); rd0 = (rd0 & ~destbit) | (cbool * destbit); } #--------------------------------------------------------------------- # CASTS.{H,B} - Typecast to Signed Word # I, II. d -> {0, 1, ..., 15} #--------------------------------------------------------------------- # CASTS.H Format I # Operation: Rd[31:16] <- Rd[15] # Syntax: casts.h Rd # 0101 1100 1000 dddd :CASTS.H rd0 is op4_12=0x5c8 & rd0 { rd0 = sext(rd0:2); } # CASTS.B Format II # Operation: Rd[31:8] <- Rd[7] # Syntax: casts.b Rd # 0101 1100 0110 dddd :CASTS.B rd0 is op4_12=0x5c6 & rd0 { rd0 = sext(rd0:1); } #--------------------------------------------------------------------- # CASTU.{H,B} - Typecast to Unsigned Word # I, II. d -> {0, 1, ..., 15} #--------------------------------------------------------------------- # CASTU.H Format I # Operation: Rd[31:16] <- 0 # Syntax: castu.h Rd # 0101 1100 1000 dddd :CASTU.H rd0 is op4_12=0x5c7 & rd0 { rd0 = zext(rd0:2); } # CASTU.B Format II # Operation: Rd[31:8] <- 0 # Syntax: castu.b Rd # 0101 1100 0110 dddd :CASTU.B rd0 is op4_12=0x5c5 & rd0 { rd0 = zext(rd0:1); } #--------------------------------------------------------------------- # CBR - Clear Bit in Register # I. d -> {0, 1, ..., 15} # bp5 -> {0, 1, ..., 31} #--------------------------------------------------------------------- # CBR Format I # Operation: Rd[bp5] <- 0 # Syntax: cbr Rd, bp5 # 101f fff1 110f dddd CSBRH: off is bp9_4 & bp4_1 [ off = (bp9_4 << 1) | bp4_1; ] { tmp:4 = off; export tmp; } :CBR rd0, CSBRH is op13_3=0x5 & op5_4=0xe & CSBRH & rd0 { destbit:4 = (1 << CSBRH); rd0 = (rd0 & ~destbit); ZSTATUS(rd0); } #--------------------------------------------------------------------- # CLZ - Count Leading Zeros # I. {d,s} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # CLZ Format I # Operation: temp <- 32 # for (i = 31; i >= 0; i--) # if (Rs[i] == 1) then # temp <- 31 - i; # break; # Rd <- temp; # Syntax: clz Rd, Rs # 111s sss0 0000 dddd 0001 0010 0000 0000 :CLZ rd0, RS9A is op13_3=0x7 & op4_5=0x0 & rd0 & RS9A ; eop0_16=0x1200 { rd0 = lzcount(RS9A); Z = (rd0 == 0); C = (rd0 == 32); CZNVTOSR(); } MEMSH: val is simm0_15 [ val = simm0_15 << 2; ] {export *[const]:4 val; } :MEMC MEMSH, ctx_shift is op4_12=0xf61 & imm0_4 & ctx_shift; eb15 & MEMSH [ctx_shigh = imm0_4; ctx_slow = eb15;] { tmp:4 = 0x00000001 << ctx_shift; tmp = ~tmp; tmpa:4 = *[RAM]:4 MEMSH; *[RAM]:4 MEMSH = tmpa & tmp; } :MEMS MEMSH, ctx_shift is op4_12=0xf81 & imm0_4 & ctx_shift; eb15 & MEMSH [ctx_shigh = imm0_4; ctx_slow = eb15;] { tmp:4 = 0x00000001 << ctx_shift; tmpa:4 = *[RAM]:4 MEMSH; *[RAM]:4 MEMSH = tmpa & tmp; } :MEMT MEMSH, ctx_shift is op4_12=0xfa1 & imm0_4 & ctx_shift; eb15 & MEMSH [ctx_shigh = imm0_4; ctx_slow = eb15;] { tmp:4 = 0x00000001 << ctx_shift; tmpa:4 = *[RAM]:4 MEMSH; *[RAM]:4 MEMSH = tmpa ^ tmp; } #--------------------------------------------------------------------- # SBR - Clear Bit in Register # I. d -> {0, 1, ..., 15} # bp5 -> {0, 1, ..., 31} #--------------------------------------------------------------------- # SBR Format I # Operation: Rd[bp5] <- 1 # Syntax: sbr Rd, bp5 # 101f fff1 101f dddd :SBR rd0, CSBRH is op13_3=0x5 & op5_4=0xd & CSBRH & rd0 { destbit:4 = (1 << CSBRH); rd0 = (rd0 | destbit); Z = 0; CZNVTOSR(); } #--------------------------------------------------------------------- # SWAP.B - Swap Bytes # I. d -> {0, 1, ..., 15} #--------------------------------------------------------------------- # SWAP.B Format I # Operation: temp <- Rd # Rd[31:24] <- temp[7:0] # Rd[23:16] <- temp[15:8] # Rd[15:8] <- temp[23:16] # Rd[7:0] <- temp[31:24] # Syntax: swap.b Rd # 0101 1100 1011 dddd :SWAP.B rd0 is op4_12=0x5cb & rd0 { temp:4 = rd0; rd0[24,8] = temp[0,8]; rd0[16,8] = temp[8,8]; rd0[8,8] = temp[16,8]; rd0[0,8] = temp[24,8]; } #--------------------------------------------------------------------- # SWAP.BH - Swap Bytes in Halfword # I. d -> {0, 1, ..., 15} #--------------------------------------------------------------------- # SWAP.BH Format I # Operation: temp <- Rd # Rd[31:24] <- temp[23:16] # Rd[23:16] <- temp[31:24] # Rd[15:8] <- temp[7:0] # Rd[7:0] <- temp[15:8] # Syntax: swap.bh Rd # 0101 1100 1100 dddd :SWAP.BH rd0 is op4_12=0x5cc & rd0 { temp:4 = rd0; rd0[24,8] = temp[16,8]; rd0[16,8] = temp[24,8]; rd0[8,8] = temp[0,8]; rd0[0,8] = temp[8,8]; } #--------------------------------------------------------------------- # SWAP.H - Swap Halfwords # I. d -> {0, 1, ..., 15} #--------------------------------------------------------------------- # SWAP.H Format I # Operation: temp <- Rd # Rd[31:24] <- temp[23:16] # Rd[23:16] <- temp[31:24] # Rd[15:8] <- temp[7:0] # Rd[7:0] <- temp[15:8] # Syntax: swap.h Rd # 0101 1100 1010 dddd :SWAP.H rd0 is op4_12=0x5ca & rd0 { temp:4 = rd0; rd0[16,16] = temp[0,16]; rd0[0,16] = temp[16,16]; } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_coprocessor_interface.sinc ================================================ #--------------------------------------------------------------------- # Coprocessor Interface #--------------------------------------------------------------------- # COP decodes the coprocessor number for display purposes # because the sleigh "dec" field attribute doesn't exist COPD: "CP0" is cp13_3=0x0 & altcp13_3 { tmp:1 = altcp13_3:1; export tmp; } COPD: "CP1" is cp13_3=0x1 & altcp13_3 { tmp:1 = altcp13_3:1; export tmp; } COPD: "CP2" is cp13_3=0x2 & altcp13_3 { tmp:1 = altcp13_3:1; export tmp; } COPD: "CP3" is cp13_3=0x3 & altcp13_3 { tmp:1 = altcp13_3:1; export tmp; } COPD: "CP4" is cp13_3=0x4 & altcp13_3 { tmp:1 = altcp13_3:1; export tmp; } COPD: "CP5" is cp13_3=0x5 & altcp13_3 { tmp:1 = altcp13_3:1; export tmp; } COPD: "CP6" is cp13_3=0x6 & altcp13_3 { tmp:1 = altcp13_3:1; export tmp; } COPD: "CP7" is cp13_3=0x7 & altcp13_3 { tmp:1 = altcp13_3:1; export tmp; } # CRD decodes the coprocessor number for display purposes # because the sleigh "dec" field attribute doesn't exist CRD: "CR0" is crd8_4=0x0 & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR1" is crd8_4=0x1 & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR2" is crd8_4=0x2 & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR3" is crd8_4=0x3 & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR4" is crd8_4=0x4 & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR5" is crd8_4=0x5 & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR6" is crd8_4=0x6 & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR7" is crd8_4=0x7 & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR8" is crd8_4=0x8 & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR9" is crd8_4=0x9 & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR10" is crd8_4=0xa & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR11" is crd8_4=0xb & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR12" is crd8_4=0xc & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR13" is crd8_4=0xd & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR14" is crd8_4=0xe & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRD: "CR15" is crd8_4=0xf & altcrd8_4 { tmp:1 = altcrd8_4:1; export tmp; } CRX: "CR0" is crx4_4=0x0 & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR1" is crx4_4=0x1 & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR2" is crx4_4=0x2 & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR3" is crx4_4=0x3 & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR4" is crx4_4=0x4 & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR5" is crx4_4=0x5 & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR6" is crx4_4=0x6 & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR7" is crx4_4=0x7 & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR8" is crx4_4=0x8 & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR9" is crx4_4=0x9 & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR10" is crx4_4=0xa & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR11" is crx4_4=0xb & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR12" is crx4_4=0xc & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR13" is crx4_4=0xd & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR14" is crx4_4=0xe & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRX: "CR15" is crx4_4=0xf & altcrx4_4 { tmp:1 = altcrx4_4:1; export tmp; } CRY: "CR0" is cry0_4=0x0 & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR1" is cry0_4=0x1 & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR2" is cry0_4=0x2 & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR3" is cry0_4=0x3 & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR4" is cry0_4=0x4 & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR5" is cry0_4=0x5 & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR6" is cry0_4=0x6 & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR7" is cry0_4=0x7 & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR8" is cry0_4=0x8 & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR9" is cry0_4=0x9 & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR10" is cry0_4=0xa & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR11" is cry0_4=0xb & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR12" is cry0_4=0xc & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR13" is cry0_4=0xd & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR14" is cry0_4=0xe & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CRY: "CR15" is cry0_4=0xf & altcry0_4 { tmp:1 = altcry0_4:1; export tmp; } CPLoadAddress: is rp0=0xf ; eb15 { ldadd = inst_start; } # Rp=PC CPLoadAddress: is rp0 ; eb15 { ldadd = rp0; } # Rp!=PC LDCMDinc7: ",CR14-CR15" is eb7=1 { tmp:1 = 14; tmpa:1 = 15; LoadCoProcessorDword(tmp,tmpa,ldadd); ldadd = ldadd + 8; } LDCMDinc6: ",CR12-CR13" is eb6=1 { tmp:1 = 12; tmpa:1 = 13; LoadCoProcessorDword(tmp,tmpa,ldadd); ldadd = ldadd + 8; } LDCMDinc5: ",CR10-CR11" is eb5=1 { tmp:1 = 10; tmpa:1 = 11; LoadCoProcessorDword(tmp,tmpa,ldadd); ldadd = ldadd + 8; } LDCMDinc4: ",CR8-CR9" is eb4=1 { tmp:1 = 8; tmpa:1 = 9; LoadCoProcessorDword(tmp,tmpa,ldadd); ldadd = ldadd + 8; } LDCMDinc3: ",CR6-CR7" is eb3=1 { tmp:1 = 6; tmpa:1 = 7; LoadCoProcessorDword(tmp,tmpa,ldadd); ldadd = ldadd + 8; } LDCMDinc2: ",CR4-CR5" is eb2=1 { tmp:1 = 4; tmpa:1 = 5; LoadCoProcessorDword(tmp,tmpa,ldadd); ldadd = ldadd + 8; } LDCMDinc1: ",CR2-CR3" is eb1=1 { tmp:1 = 2; tmpa:1 = 3; LoadCoProcessorDword(tmp,tmpa,ldadd); ldadd = ldadd + 8; } LDCMDinc0: ",CR0-CR1" is eb0=1 { tmp:1 = 0; tmpa:1 = 1; LoadCoProcessorDword(tmp,tmpa,ldadd); ldadd = ldadd + 8; } LDCMWinc15: ",CR15" is eb7=1 { tmp:1 = 15; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc14: ",CR14" is eb6=1 { tmp:1 = 14; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc13: ",CR13" is eb5=1 { tmp:1 = 13; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc12: ",CR12" is eb4=1 { tmp:1 = 12; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc11: ",CR11" is eb3=1 { tmp:1 = 11; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc10: ",CR10" is eb2=1 { tmp:1 = 10; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc9: ",CR9" is eb1=1 { tmp:1 = 9; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc8: ",CR8" is eb0=1 { tmp:1 = 8; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc7: ",CR7" is eb7=1 { tmp:1 = 7; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc6: ",CR6" is eb6=1 { tmp:1 = 6; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc5: ",CR5" is eb5=1 { tmp:1 = 5; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc4: ",CR4" is eb4=1 { tmp:1 = 4; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc3: ",CR3" is eb3=1 { tmp:1 = 3; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc2: ",CR2" is eb2=1 { tmp:1 = 2; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc1: ",CR1" is eb1=1 { tmp:1 = 1; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc0: ",CR0" is eb0=1 { tmp:1 = 0; LoadCoProcessorWord(tmp,ldadd); ldadd = ldadd + 4; } LDCMWinc15: is eb7=0 { } LDCMWinc14: is eb6=0 { } LDCMWinc13: is eb5=0 { } LDCMWinc12: is eb4=0 { } LDCMWinc11: is eb3=0 { } LDCMWinc10: is eb2=0 { } LDCMWinc9: is eb1=0 { } LDCMWinc8: is eb0=0 { } LDCMWinc7: is eb7=0 { } LDCMWinc6: is eb6=0 { } LDCMWinc5: is eb5=0 { } LDCMWinc4: is eb4=0 { } LDCMWinc3: is eb3=0 { } LDCMWinc2: is eb2=0 { } LDCMWinc1: is eb1=0 { } LDCMWinc0: is eb0=0 { } LDCMDinc7: is eb7=0 { } LDCMDinc6: is eb6=0 { } LDCMDinc5: is eb5=0 { } LDCMDinc4: is eb4=0 { } LDCMDinc3: is eb3=0 { } LDCMDinc2: is eb2=0 { } LDCMDinc1: is eb1=0 { } LDCMDinc0: is eb0=0 { } LDCMpp: is rd0 ; eb12=0 { } LDCMpp: "++" is rd0 ; eb12=1 { rd0 = ldadd; } LDCMDcommon: LDCMDinc0^LDCMDinc1^LDCMDinc2^LDCMDinc3^LDCMDinc4^LDCMDinc5^LDCMDinc6^LDCMDinc7 is LDCMDinc0 & LDCMDinc1 & LDCMDinc2 & LDCMDinc3 & LDCMDinc4 & LDCMDinc5 & LDCMDinc6 & LDCMDinc7 { build LDCMDinc7; build LDCMDinc6; build LDCMDinc5; build LDCMDinc4; build LDCMDinc3; build LDCMDinc2; build LDCMDinc1; build LDCMDinc0; } LDCMWLcommon: LDCMWinc0^LDCMWinc1^LDCMWinc2^LDCMWinc3^LDCMWinc4^LDCMWinc5^LDCMWinc6^LDCMWinc7 is LDCMWinc0 & LDCMWinc1 & LDCMWinc2 & LDCMWinc3 & LDCMWinc4 & LDCMWinc5 & LDCMWinc6 & LDCMWinc7 { build LDCMWinc7; build LDCMWinc6; build LDCMWinc5; build LDCMWinc4; build LDCMWinc3; build LDCMWinc2; build LDCMWinc1; build LDCMWinc0; } LDCMWHcommon: LDCMWinc8^LDCMWinc9^LDCMWinc10^LDCMWinc11^LDCMWinc12^LDCMWinc13^LDCMWinc14^LDCMWinc15 is LDCMWinc8 & LDCMWinc9 & LDCMWinc10 & LDCMWinc11 & LDCMWinc12 & LDCMWinc13 & LDCMWinc14 & LDCMWinc15 { build LDCMWinc15; build LDCMWinc14; build LDCMWinc13; build LDCMWinc12; build LDCMWinc11; build LDCMWinc10; build LDCMWinc9; build LDCMWinc8; } STCMDdec7: ",CR14-CR15" is eb7=1 { ldadd = ldadd - 8; tmp:1 = 14; tmpa:1 = 15; storeCoprocessorDword(tmp,tmpa,ldadd); } STCMDdec6: ",CR12-CR13" is eb6=1 { ldadd = ldadd - 8; tmp:1 = 12; tmpa:1 = 13; storeCoprocessorDword(tmp,tmpa,ldadd); } STCMDdec5: ",CR10-CR11" is eb5=1 { ldadd = ldadd - 8; tmp:1 = 10; tmpa:1 = 11; storeCoprocessorDword(tmp,tmpa,ldadd); } STCMDdec4: ",CR8-CR9" is eb4=1 { ldadd = ldadd - 8; tmp:1 = 8; tmpa:1 = 9; storeCoprocessorDword(tmp,tmpa,ldadd); } STCMDdec3: ",CR6-CR7" is eb3=1 { ldadd = ldadd - 8; tmp:1 = 6; tmpa:1 = 7; storeCoprocessorDword(tmp,tmpa,ldadd); } STCMDdec2: ",CR4-CR5" is eb2=1 { ldadd = ldadd - 8; tmp:1 = 4; tmpa:1 = 5; storeCoprocessorDword(tmp,tmpa,ldadd); } STCMDdec1: ",CR2-CR3" is eb1=1 { ldadd = ldadd - 8; tmp:1 = 2; tmpa:1 = 3; storeCoprocessorDword(tmp,tmpa,ldadd); } STCMDdec0: ",CR0-CR1" is eb0=1 { ldadd = ldadd - 8; tmp:1 = 0; tmpa:1 = 1; storeCoprocessorDword(tmp,tmpa,ldadd); } STCMWdec15: ",CR15" is eb7=1 { ldadd = ldadd - 4; tmp:1 = 15; storeCoprocessorWord(tmp,ldadd); } STCMWdec14: ",CR14" is eb6=1 { ldadd = ldadd - 4; tmp:1 = 14; storeCoprocessorWord(tmp,ldadd); } STCMWdec13: ",CR13" is eb5=1 { ldadd = ldadd - 4; tmp:1 = 13; storeCoprocessorWord(tmp,ldadd); } STCMWdec12: ",CR12" is eb4=1 { ldadd = ldadd - 4; tmp:1 = 12; storeCoprocessorWord(tmp,ldadd); } STCMWdec11: ",CR11" is eb3=1 { ldadd = ldadd - 4; tmp:1 = 11; storeCoprocessorWord(tmp,ldadd); } STCMWdec10: ",CR10" is eb2=1 { ldadd = ldadd - 4; tmp:1 = 10; storeCoprocessorWord(tmp,ldadd); } STCMWdec9: ",CR9" is eb1=1 { ldadd = ldadd - 4; tmp:1 = 9; storeCoprocessorWord(tmp,ldadd); } STCMWdec8: ",CR8" is eb0=1 { ldadd = ldadd - 4; tmp:1 = 8; storeCoprocessorWord(tmp,ldadd); } STCMWdec7: ",CR7" is eb7=1 { ldadd = ldadd - 4; tmp:1 = 7; storeCoprocessorWord(tmp,ldadd); } STCMWdec6: ",CR6" is eb6=1 { ldadd = ldadd - 4; tmp:1 = 6; storeCoprocessorWord(tmp,ldadd); } STCMWdec5: ",CR5" is eb5=1 { ldadd = ldadd - 4; tmp:1 = 5; storeCoprocessorWord(tmp,ldadd); } STCMWdec4: ",CR4" is eb4=1 { ldadd = ldadd - 4; tmp:1 = 4; storeCoprocessorWord(tmp,ldadd); } STCMWdec3: ",CR3" is eb3=1 { ldadd = ldadd - 4; tmp:1 = 3; storeCoprocessorWord(tmp,ldadd); } STCMWdec2: ",CR2" is eb2=1 { ldadd = ldadd - 4; tmp:1 = 2; storeCoprocessorWord(tmp,ldadd); } STCMWdec1: ",CR1" is eb1=1 { ldadd = ldadd - 4; tmp:1 = 1; storeCoprocessorWord(tmp,ldadd); } STCMWdec0: ",CR0" is eb0=1 { ldadd = ldadd - 4; tmp:1 = 0; storeCoprocessorWord(tmp,ldadd); } STCMWdec15: is eb7=0 { } STCMWdec14: is eb6=0 { } STCMWdec13: is eb5=0 { } STCMWdec12: is eb4=0 { } STCMWdec11: is eb3=0 { } STCMWdec10: is eb2=0 { } STCMWdec9: is eb1=0 { } STCMWdec8: is eb0=0 { } STCMWdec7: is eb7=0 { } STCMWdec6: is eb6=0 { } STCMWdec5: is eb5=0 { } STCMWdec4: is eb4=0 { } STCMWdec3: is eb3=0 { } STCMWdec2: is eb2=0 { } STCMWdec1: is eb1=0 { } STCMWdec0: is eb0=0 { } STCMDdec7: is eb7=0 { } STCMDdec6: is eb6=0 { } STCMDdec5: is eb5=0 { } STCMDdec4: is eb4=0 { } STCMDdec3: is eb3=0 { } STCMDdec2: is eb2=0 { } STCMDdec1: is eb1=0 { } STCMDdec0: is eb0=0 { } STCMDcommon: STCMDdec0^STCMDdec1^STCMDdec2^STCMDdec3^STCMDdec4^STCMDdec5^STCMDdec6^STCMDdec7 is STCMDdec0 & STCMDdec1 & STCMDdec2 & STCMDdec3 & STCMDdec4 & STCMDdec5 & STCMDdec6 & STCMDdec7 { build STCMDdec0; build STCMDdec1; build STCMDdec2; build STCMDdec3; build STCMDdec4; build STCMDdec5; build STCMDdec6; build STCMDdec7; } STCMWLcommon: STCMWdec0^STCMWdec1^STCMWdec2^STCMWdec3^STCMWdec4^STCMWdec5^STCMWdec6^STCMWdec7 is STCMWdec0 & STCMWdec1 & STCMWdec2 & STCMWdec3 & STCMWdec4 & STCMWdec5 & STCMWdec6 & STCMWdec7 { build STCMWdec0; build STCMWdec1; build STCMWdec2; build STCMWdec3; build STCMWdec4; build STCMWdec5; build STCMWdec6; build STCMWdec7; } STCMWHcommon: STCMWdec8^STCMWdec9^STCMWdec10^STCMWdec11^STCMWdec12^STCMWdec13^STCMWdec14^STCMWdec15 is STCMWdec8 & STCMWdec9 & STCMWdec10 & STCMWdec11 & STCMWdec12 & STCMWdec13 & STCMWdec14 & STCMWdec15 { build STCMWdec8; build STCMWdec9; build STCMWdec10; build STCMWdec11; build STCMWdec12; build STCMWdec13; build STCMWdec14; build STCMWdec15; } STCMmm: is rd0 ; eb12=0 { } STCMmm: "--" is rd0 ; eb12=1 { rd0 = ldadd; } :COP COPD,CRD,CRX,CRY,ctx_coop is op11_5=0x1c & op4_5=0x1a & op9_2 & op0_4 & ctx_coop; eop12_1 & COPD & CRD & CRX & CRY [ctx_cohi = op9_2; ctx_comid = op0_4; ctx_colow = eop12_1;] { tmp:1 = ctx_coop; CoprocessorOp(COPD,CRD,CRX,CRY,tmp); } #--------------------------------------------------------------------- # LDC.{D,W} - Load Coprocessor #--------------------------------------------------------------------- # LDC.{D,W} Format I # 1110 1001 1010 pppp CCC1 DDD0 nnnn nnnn :LDC.D COPD,CRD,RPwDisp8 is (op4_12=0xe9a; eop12_1=0x1 & crd8_1=0x0 & COPD & CRD) & RPwDisp8 { loadCoprocessorDWord(COPD, CRD, RPwDisp8); } # LDC.{D,W} Format II # 1110 1111 1010 pppp CCC0 DDD0 0101 0000 :LDC.D COPD,CRD,RPdDec0 is op4_12=0xefa & RPdDec0; eop12_1=0x0 & crd8_1=0x0 & eop0_8=0x50 & COPD & CRD { loadCoprocessorDWord(COPD, CRD, RPdDec0); } # LDC.{D,W} Format III # NOTE: documentation says bits 16-19 are pppp, but we assume they meant bbbb # 1110 1111 1010 bbbb CCC1 DDD0 01tt iiii :LDC.D COPD,CRD,RBShift0 is (op4_12=0xefa; eop12_1=0x1 & crd8_1=0x0 & eop6_2=0x1 & COPD & CRD) & RBShift0 { loadCoprocessorDWord(COPD, CRD, RBShift0); } # LDC.{D,W} Format IV # 1110 1001 1010 pppp CCC0 DDDD nnnn nnnn :LDC.W COPD,CRD,RPwDisp8 is (op4_12=0xe9a; eop12_1=0x0 & COPD & CRD) & RPwDisp8 { loadCoprocessorWord(COPD, CRD, RPwDisp8); } # LDC.{D,W} Format V # 1110 1111 1010 pppp CCC1 DDDD 0100 0000 :LDC.W COPD,CRD,RPwDec0 is op4_12=0xefa & RPwDec0; eop12_1=0x1 & eop0_8=0x40 & COPD & CRD { loadCoprocessorWord(COPD, CRD, RPwDec0); } # LDC.{D,W} Format VI # NOTE: documentation says bits 16-19 are pppp, but we assume they meant bbbb # 1110 1111 1010 bbbb CCC1 DDDD 00tt iiii :LDC.W COPD,CRD,RBShift0 is (op4_12=0xefa; eop12_1=0x1 & eop6_2=0x0 & COPD & CRD) & RBShift0 { loadCoprocessorWord(COPD, CRD, RBShift0); } #--------------------------------------------------------------------- # LDC0.{D,W} - Load Coprocessor 0 #--------------------------------------------------------------------- # LDC0.{D,W} Format I # 1111 0011 1010 pppp nnnn DDD0 nnnn nnnn :LDC0.D "CP0," CRD,RPwDisp12 is (op4_12=0xf3a; crd8_1=0x0 & CRD) & RPwDisp12 { cp:1 = 0; loadCoprocessorDWord(cp, CRD, RPwDisp12); } # LDC0.{D,W} Format II # 1111 0001 1010 pppp nnnn DDDD nnnn nnnn :LDC0.W "CP0," CRD,RPwDisp12 is (op4_12=0xf1a; CRD) & RPwDisp12 { cp:1 = 0; loadCoprocessorDWord(cp, CRD, RPwDisp12); } :LDCM.D ecop13_3,rp0^LDCMpp^LDCMDcommon is (op4_12=0xeda & rp0 ; ecop13_3 & eop8_4=0x4 & LDCMDcommon) & LDCMpp & CPLoadAddress { build CPLoadAddress; build LDCMDcommon; build LDCMpp; } :LDCM.W ecop13_3,rp0^LDCMpp^LDCMWHcommon is (op4_12=0xeda & rp0 ; ecop13_3 & eop8_4=0x1 & LDCMWHcommon) & LDCMpp & CPLoadAddress { build CPLoadAddress; build LDCMWHcommon; build LDCMpp; } :LDCM.W ecop13_3,rp0^LDCMpp^LDCMWLcommon is (op4_12=0xeda & rp0 ; ecop13_3 & eop8_4=0x0 & LDCMWLcommon) & LDCMpp & CPLoadAddress { build CPLoadAddress; build LDCMWLcommon; build LDCMpp; } :MVCR.D COPD,rd0,CRD is op4_12=0xefa & b0=0x0 & rd0; COPD & eb12=0x0 & CRD & eop0_9=0x10 { CoProcessorDWordToReg(COPD, CRD, rd0); } :MVCR.W COPD,rd0,CRD is op4_12=0xefa & b0=0x0 & rd0; COPD & eb12=0x0 & CRD & eop0_8=0x0 { CoProcessorWordToReg(COPD, CRD, rd0); } :MVRC.D COPD,CRD,rs0 is op4_12=0xefa & b0=0x0 & rs0; COPD & eb12=0x0 & CRD & eop0_9=0x30 { RegToCoProcessorDWord(COPD, CRD, rs0); } :MVRC.W COPD,CRD,rs0 is op4_12=0xefa & b0=0x0 & rs0; COPD & eb12=0x0 & CRD & eop0_9=0x20 { RegToCoProcessorWord(COPD, CRD, rs0); } :STC.D COPD,RPwDisp8,CRD is (op4_12=0xeba; eop12_1=0x1 & crd8_1=0x0 & COPD & CRD) & RPwDisp8 { storeCoprocessorDword(COPD, CRD, RPwDisp8); } :STC.D COPD,RPdDec0,CRD is op4_12=0xefa & RPdDec0; eop12_1=0x0 & crd8_1=0x0 & eop0_8=0x70 & COPD & CRD { storeCoprocessorDword(COPD, CRD, RPdDec0); } :STC.D COPD,RBShift0,CRD is (op4_12=0xefa; eop12_1=0x1 & crd8_1=0x0 & eop6_2=0x3 & COPD & CRD) & RBShift0 { storeCoprocessorDword(COPD, CRD, RBShift0); } :STC.W COPD,RPwDisp8,CRD is (op4_12=0xeba; eop12_1=0x0 & COPD & CRD) & RPwDisp8 { storeCoprocessorWord(COPD, CRD, RPwDisp8); } :STC.W COPD,RPwDec0,CRD is op4_12=0xefa & RPwDec0; eop12_1=0x1 & eop0_8=0x60 & COPD & CRD { storeCoprocessorWord(COPD, CRD, RPwDec0); } :STC.W COPD,RBShift0,CRD is (op4_12=0xefa; eop12_1=0x1 & eop6_2=0x2 & COPD & CRD) & RBShift0 { storeCoprocessorWord(COPD, CRD, RBShift0); } :STC0.D "CP0," CRD,RPwDisp12 is (op4_12=0xf7a; crd8_1=0x0 & CRD) & RPwDisp12 { cp:1 = 0; storeCoprocessorDword(cp, CRD, RPwDisp12); } :STC0.W "CP0," CRD,RPwDisp12 is (op4_12=0xf5a; CRD) & RPwDisp12 { cp:1 = 0; storeCoprocessorWord(cp, CRD, RPwDisp12); } :STCM.D ecop13_3,STCMmm^rp0^STCMDcommon is (op4_12=0xeda & rp0 ; ecop13_3 & eop8_4=0x5 & STCMDcommon) & STCMmm & CPLoadAddress { build CPLoadAddress; build STCMDcommon; build STCMmm; } :STCM.W ecop13_3,STCMmm^rp0^STCMWHcommon is (op4_12=0xeda & rp0 ; ecop13_3 & eop8_4=0x3 & STCMWHcommon) & STCMmm & CPLoadAddress { build CPLoadAddress; build STCMWHcommon; build STCMmm; } :STCM.W ecop13_3,STCMmm^rp0^STCMWLcommon is (op4_12=0xeda & rp0 ; ecop13_3 & eop8_4=0x2 & STCMWLcommon) & STCMmm & CPLoadAddress { build CPLoadAddress; build STCMWLcommon; build STCMmm; } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_data_transfer.sinc ================================================ #--------------------------------------------------------------------- # 8.3.9 Data Transfer #--------------------------------------------------------------------- #--------------------------------------------------------------------- # 8.3.9.1 Move/Load Immediate Operations #--------------------------------------------------------------------- #--------------------------------------------------------------------- # MOV - Move Data Into Register #--------------------------------------------------------------------- # MOV Format I # 0011 iiii iiii dddd :MOV rd0, imm4_8 is op13_3=0x1 & op12_1=1 & rd0 & imm4_8 { rd0 = imm4_8; } # MOV Format II # 111i iii0 011i dddd iiii iiii iiii iiii :MOV rd0, imm is op13_3=0x7 & op5_4=0x3 & imm9_4 & imm4_1 & rd0; imm16 [ imm = (imm9_4 << 17) | (imm4_1 << 16) | imm16; ] { rd0 = imm; } # MOV Format III # 000s sss0 1001 dddd :MOV rd0, rs9 is op13_3=0x0 & op4_5=0x09 & rd0 & rs9 { rd0 = rs9; } #--------------------------------------------------------------------- # MOV{cond4} - Conditional Move Register # I. {d, s} -> {0, 1, ..., 15} # cond4 -> {eq, ne, cc/hs, cs/lo, ge, lt, mi, pl, ls, gt, le, hi, vs, vc, qs, al} # II. d -> {0, 1, ..., 15} # cond4 -> {eq, ne, cc/hs, cs/lo, ge, lt, mi, pl, ls, gt, le, hi, vs, vc, qs, al} # imm -> {-128, -127, ..., 127} #--------------------------------------------------------------------- # MOV{cond4} Format I # Operation: if (cond4) # Rd <- Rs # Syntax: mov{cond4} Rd, Rs # 111s sss0 0000 dddd 0001 0111 cccc 0000 :MOV^{ECOND_4_4} rd0, rs9 is op13_3=0x7 & op4_5=0 & rd0 & rs9; eop8_8=0x17 & eop0_4=0 & ECOND_4_4 { build ECOND_4_4; rd0 = rs9; } # MOV{cond4} Format II # Operation: if (cond4) # Rd <- SE(imm8) # Syntax: mov{cond4} Rd, imm # 1111 1001 1011 dddd 0000 cccc iiii iiii :MOV^{ECOND_8_4} rd0, simm0_8 is op4_12=0xf9b & rd0 ; eop12_4=0 & simm0_8 & ECOND_8_4 { build ECOND_8_4; rd0 = simm0_8; } :MOVH rd0, imm16 is op4_12=0xfc1 & rd0 ; imm16 { rd0 = imm16 << 16; } #--------------------------------------------------------------------- # 8.3.9.2 Load/Store Operations #--------------------------------------------------------------------- #--------------------------------------------------------------------- # LD.D - Load Doubleword # I-V. d -> {0, 2, ..., 14} # IV. disp -> {-32768, -32767, ..., 32767} # V. sa -=> {0, 1, 2, 3} #--------------------------------------------------------------------- # LD.D Format I # Operation: Rd+1:Rd <- *(Rp) # Rp <- Rp + 8; # Syntax: ld.d Rd, Rp++ # 101p ppp1 0000 ddd1 :LD.D rd0_low, RPdInc is op13_3=0x5 & op4_5=0x10 & b0=1 & RPdInc & rd0 & rd0_hi & rd0_low { rd0_hi = *:4 RPdInc; rd0_low = *:4 (RPdInc + 4); } # LD.D Format II # Operation: Rp <- Rp - 8; # Rd+1:Rd <- *(Rp) # Syntax: ld.d Rd, --Rp # 101p ppp1 0001 ddd0 :LD.D rd0_low, RPdDec is op13_3=0x5 & op4_5=0x11 & b0=0 & RPdDec & rd0 & rd0_hi & rd0_low { rd0_hi = *:4 RPdDec; rd0_low = *:4 (RPdDec + 4); } # LD.D Format III # Operation: Rd+1:Rd <- *(Rp) # Syntax: ld.d Rd, Rp # 101p ppp1 0000 ddd0 :LD.D rd0_low, rp9 is op13_3=0x5 & op4_5=0x10 & b0=0 & rp9 & rd0 & rd0_hi & rd0_low { rd0_hi = *:4 rp9; rd0_low = *:4 (rp9 + 4); } # LD.D Format IV # Operation: Rd+1:Rd <- *(Rp + SE(disp16)) # Syntax: ld.d Rd, Rp[disp16] # 111p ppp0 1110 ddd0 nnnn nnnn nnnn nnnn :LD.D rd0_low, RPDisp16 is (op13_3=0x7 & op4_5=0xe & b0=0 & rd0 & rd0_hi & rd0_low) ... & RPDisp16 { rd0_hi = *:4 RPDisp16; rd0_low = *:4 (RPDisp16 + 4); } # LD.D Format V # Operation: Rd+1:Rd <- *(Rb + (Ri << sa2)) # Syntax: ld.d Rd, Rb[Ri< {0, 1, ..., 15} # disp -> {0, 4, ..., 508} #--------------------------------------------------------------------- # LDDPC Format I # Operation: Rd <- *((PC && 0xfffffffc) + (ZE(disp7) << 2)) # Syntax: lddpc Rd, PC[disp] # 0100 1nnn nnnn dddd LDDPCdisp: disp is disp4_7 [ disp = (inst_start & 0xfffffffc) + (disp4_7 << 2); ] { export *:4 disp; } :LDDPC rd0, LDDPCdisp is op11_5=0x9 & LDDPCdisp & rd0 { rd0 = LDDPCdisp; } :LDDPC rd0, LDDPCdisp is op11_5=0x9 & LDDPCdisp & rd0 & rd0=0xf { PC = LDDPCdisp; goto [PC]; } #--------------------------------------------------------------------- # LDDSP - Load SP-relative with Displacement # I. d -> {0, 1, ..., 15} # disp -> {0, 4, ..., 508} #--------------------------------------------------------------------- # LDDSP Format I # Operation: Rd <- *((SP && 0xfffffffc) + (ZE(disp7) << 2)) # Syntax: lddsp Rd, SP[disp] # 0100 0nnn nnnn dddd :LDDSP rd0^", SP["^disp^"]" is op11_5=0x8 & disp4_7 & rd0 [ disp = (disp4_7 << 2); ] { ptr:4 = (SP & 0xfffffffc) + disp; # assuming SP was pointing into RAM... rd0 = * ptr; } :LDDSP rd0^", SP["^disp^"]" is op11_5=0x8 & disp4_7 & rd0 & rd0=0xf [ disp = (disp4_7 << 2); ] { ptr:4 = (SP & 0xfffffffc) + disp; # assuming SP was pointing into RAM... goto [ ptr ]; } LDBP: "B" is imm12_2=0 {} LDBP: "L" is imm12_2=1 {} LDBP: "U" is imm12_2=2 {} LDBP: "T" is imm12_2=3 {} :LDINS.B rd0:LDBP, rp9"["disp0_11"]" is op13_3=0x7 & op4_5=0x1d & rd0 & rp9 ; eop14_2=0x1 & LDBP & disp0_11 & imm12_2 { tmp:4 = disp0_11; tmpa:4 = rp9 + tmp; tmpb:1 = *[RAM]:1 tmpa; tmpc:4 = zext(tmpb); tmpd:4 = tmpc << (8 * imm12_2); tmpe:4 = 0xff; tmpe = tmpe << (8 * imm12_2); tmpf:4 = rp9 & ~tmpe; rp9 = tmpf | tmpd; } LDSHIFT12: val is disp0_11 [ val = disp0_11 << 1; ] { export *[const]:2 val; } LDHP: "B" is eb12=0 {} LDHP: "T" is eb12=1 {} :LDINS.H rd0:LDHP, rp9[LDSHIFT12] is op13_3=0x7 & op4_5=0x1d & rd0 & rp9 ; cp13_3=0x0 & eb12 & LDHP & LDSHIFT12 { tmp:4 = sext(LDSHIFT12); tmpa:4 = rp9 + tmp; tmpb:2 = *[RAM]:2 tmpa; tmpc:4 = zext(tmpb); tmpd:4 = tmpc << (16 * eb12); tmpe:4 = 0xffff; tmpe = tmpe << (16 * eb12); tmpf:4 = rp9 & ~tmpe; rp9 = tmpf | tmpd; } LDSTSWPH: val is disp0_12 [ val = disp0_12 << 1; ] { export *[const]:2 val; } LDSTSWPW: val is disp0_12 [ val = disp0_12 << 2; ] { export *[const]:2 val; } :LDSWP.SH rd0, rp9[LDSTSWPH] is op13_3=0x7 & op4_5=0x1d & rp9 & rd0 ; eop12_4=0x2 & LDSTSWPH { tmp:4 = sext(LDSTSWPH); tmpa:4 = rp9 + tmp; tmpb:2 = *[RAM]:2 tmpa; tmpc:2 = (tmpb << 8) | (tmpb >> 8); rd0 = sext(tmpc); } :LDSWP.UH rd0, rp9[LDSTSWPH] is op13_3=0x7 & op4_5=0x1d & rp9 & rd0 ; eop12_4=0x3 & LDSTSWPH { tmp:4 = sext(LDSTSWPH); tmpa:4 = rp9 + tmp; tmpb:2 = *[RAM]:2 tmpa; tmpc:2 = (tmpb << 8) | (tmpb >> 8); rd0 = zext(tmpc); } :LDSWP.W rd0, rp9[LDSTSWPW] is op13_3=0x7 & op4_5=0x1d & rp9 & rd0 ; eop12_4=0x8 & LDSTSWPW { tmp:4 = sext(LDSTSWPW); tmpa:4 = rp9 + tmp; tmpb:4 = *[RAM]:4 tmpa; rd0 = (tmpb << 24) | (tmpb >> 24) | ((tmpb & 0x0000FF00) << 8) | ((tmpb & 0x00FF0000) >> 8); } :STSWP.H rp9[LDSTSWPH], rs0 is op13_3=0x7 & op4_5=0x1d & rp9 & rs0 ; eop12_4=0x9 & LDSTSWPH { tmp:4 = sext(LDSTSWPH); tmpa:4 = rp9 + tmp; tmpb:2 = rs0:2; *[RAM]:2 tmpa = (tmpb >> 8) | (tmpb << 8); } :STSWP.W rp9[LDSTSWPW], rs0 is op13_3=0x7 & op4_5=0x1d & rp9 & rs0 ; eop12_4=0xa & LDSTSWPW { tmp:4 = sext(LDSTSWPW); tmpa:4 = rp9 + tmp; *[RAM]:4 tmpa = (rs0 << 24) | (rs0 >> 24) | ((rs0 & 0x0000FF00) << 8) | ((rs0 & 0x00FF0000) >> 8); } #--------------------------------------------------------------------- # ST.B - Store Byte #--------------------------------------------------------------------- # ST.B Format I # 000p ppp0 1100 ssss :ST.B RP9bInc, rs0 is op13_3=0x0 & op4_5=0x0c & RP9bInc & rs0 { *:1 RP9bInc = rs0:1; } # ST.B Format II # 000p ppp0 1111 ssss :ST.B RP9bDec, rs0 is op13_3=0x0 & op4_5=0x0f & RP9bDec & rs0 { *:1 RP9bDec = rs0:1; } # ST.B Format III # 101p ppp0 1nnn ssss :ST.B RPbDisp3, rs0 is op13_3=0x5 & op7_2=0x1 & rs0 & RPbDisp3 { *:1 RPbDisp3 = rs0:1; } # ST.B Format IV # 111p ppp1 0110 ssss nnnn nnnn nnnn nnnn :ST.B RPDisp16, rs0 is (op13_3=0x7 & op4_5=0x16 & rs0) ... & RPDisp16 { *:1 RPDisp16 = rs0:1; } # ST.B Format V # 111b bbb0 0000 iiii 0000 1011 00tt ssss :ST.B RB9Shift, ers0 is (op13_3=0x7 & op4_5=0 & ri0; eop12_4=0 & eop8_4=0xb & eop6_2=0 & ers0) & RB9Shift { *:1 RB9Shift = ers0:1; } #--------------------------------------------------------------------- # ST.B{cond4} - Conditionally Store Byte #--------------------------------------------------------------------- # ST.B{cond4} Format I # 111p ppp1 1111 ssss cccc 111n nnnn nnnn :ST.B^{COND_e12} RPwDisp9, rs0 is (op13_3=0x7 & op4_5=0x1f & rs0; eop9_3=0x7 & COND_e12) & RPwDisp9 { build COND_e12; *:4 RPwDisp9 = rs0:1; } #--------------------------------------------------------------------- # ST.D - Store Doubleword #--------------------------------------------------------------------- # ST.D Format I # 101p ppp1 0010 sss0 :ST.D RPdInc, rs0_low is op13_3=0x5 & op4_5=0x12 & b0=0 & RPdInc & rs0 & rs0_hi & rs0_low { low:8 = zext(rs0_low); hi:8 = zext(rs0_hi) << 32; *:8 RPdInc = hi | low; } # ST.D Format II # 101p ppp1 0010 sss1 :ST.D RPdDec, rs0_low is op13_3=0x5 & op4_5=0x12 & b0=1 & RPdDec & rs0 & rs0_hi & rs0_low { low:8 = zext(rs0_low); hi:8 = zext(rs0_hi) << 32; *:8 RPdDec = hi | low; } # ST.D Format III # 101p ppp1 0001 sss1 :ST.D rp9, rs0_low is op13_3=0x5 & op4_5=0x11 & b0=1 & rp9 & rs0 & rs0_hi & rs0_low { low:8 = zext(rs0_low); hi:8 = zext(rs0_hi) << 32; *:8 rp9 = hi | low; } # ST.D Format IV # 111p ppp0 1110 sss1 nnnn nnnn nnnn nnnn :ST.D RPDisp16, rs0_low is (op13_3=0x7 & op4_5=0xe & b0=1 & rs0 & rs0_hi & rs0_low) ... & RPDisp16 { low:8 = zext(rs0_low); hi:8 = zext(rs0_hi) << 32; *:8 RPDisp16 = hi | low; } # ST.D Format V # 111b bbb0 0000 iiii 0000 1000 00tt ssss :ST.D RB9Shift, ers0_low is (op13_3=0x7 & op4_5=0 & ri0; eop12_4=0 & eop8_4=0x8 & eop6_2=0 & ers0 & ers0_hi & ers0_low) & RB9Shift { low:8 = zext(ers0_low); hi:8 = zext(ers0_hi) << 32; *:8 RB9Shift = hi | low; } #--------------------------------------------------------------------- # ST.H - Store Halfword #--------------------------------------------------------------------- # ST.H Format I # 000p ppp0 1011 ssss :ST.H RPhInc, rs0 is op13_3=0x0 & op4_5=0x0b & RPhInc & rs0 { *:2 RPhInc = rs0:2; } # ST.H Format II # 000p ppp0 1110 ssss :ST.H RPhDec, rs0 is op13_3=0x0 & op4_5=0x0e & RPhDec & rs0 { *:2 RPhDec = rs0:2; } # ST.H Format III # 101p ppp0 0nnn ssss :ST.H RPhDisp3, rs0 is op13_3=0x5 & op7_2=0x0 & rs0 & RPhDisp3 { *:2 RPhDisp3 = rs0:2; } # ST.H Format IV # 111p ppp1 0101 ssss nnnn nnnn nnnn nnnn :ST.H RPDisp16, rs0 is (op13_3=0x7 & op4_5=0x15 & rs0) ... & RPDisp16 { *:2 RPDisp16 = rs0:2; } # ST.H Format V # 111b bbb0 0000 iiii 0000 1010 00tt ssss :ST.H RB9Shift, ers0 is (op13_3=0x7 & op4_5=0 & ri0; eop12_4=0 & eop8_4=0xa & eop6_2=0 & ers0) & RB9Shift { *:2 RB9Shift = ers0:2; } #--------------------------------------------------------------------- # ST.H{cond4} - Conditionally Store Halfword #--------------------------------------------------------------------- # ST.H{cond4} Format I # 111p ppp1 1111 ssss cccc 110n nnnn nnnn :ST.H^{COND_e12} RPhDisp9, rs0 is (op13_3=0x7 & op4_5=0x1f & rs0; eop9_3=0x6 & COND_e12) & RPhDisp9 { build COND_e12; *:4 RPhDisp9 = rs0:2; } STXP: "T" is eb13=1 & ctx_savex { tmp:4 = ctx_savex; tmp = tmp >> 16; export *[const]:4 tmp; } STXP: "B" is eb13=0 & ctx_savex { tmp:4 = ctx_savex; tmp = tmp & 0xFFFF; export *[const]:4 tmp; } STYP: "T" is eb12=1 & ctx_savey { tmp:4 = ctx_savey; tmp = tmp >> 16; export *[const]:4 tmp; } STYP: "B" is eb12=0 & ctx_savey { tmp:4 = ctx_savey; tmp = tmp & 0xFFFF; export *[const]:4 tmp; } STHHD: val is edisp4_8 [ val = edisp4_8 << 2; ] { export *[const]:2 val; } :STHH.W erp0[STHHD],rx9:STXP,ry0:STYP is op13_3=0x7 & op4_5=0x1e & rx9 & ry0 ; eop14_2=0x3 & STXP & STYP & STHHD & erp0 [ctx_savex=rx9; ctx_savey=ry0;] { tmp:4 = zext(STHHD); tmp = erp0 + tmp; *[RAM]:4 tmp = (STXP << 16) | STYP; } :STHH.W erb0[eri8" << "shift4_2],rx9:STXP,ry0:STYP is op13_3=0x7 & op4_5=0x1e & rx9 & ry0 ; eop14_2=0x2 & STXP & STYP & eri8 & eop6_2=0x0 & shift4_2 & erb0 [ctx_savex=rx9; ctx_savey=ry0;] { tmp:4 = eri8 << shift4_2; *[RAM]:4 tmp = (STXP << 16) | STYP; } #--------------------------------------------------------------------- # ST.W - Store Word #--------------------------------------------------------------------- # ST.W Format I # 000p ppp0 1010 ssss :ST.W RPwInc,rs0 is op13_3=0x0 & op4_5=0x0a & RPwInc & rs0 { *:4 RPwInc = rs0; } # ST.W Format II # 000p ppp0 1101 ssss :ST.W RPwDec,rs0 is op13_3=0x0 & op4_5=0x0d & RPwDec & rs0 { *:4 RPwDec = rs0; } # ST.W Format III # 100p ppp1 nnnn ssss :ST.W RPwDisp4,rs0 is op13_3=0x4 & op8_1=1 & RPwDisp4 & rs0 { *:4 RPwDisp4 = rs0; } # ST.W Format IV # 111p ppp1 0100 ssss nnnn nnnn nnnn nnnn :ST.W RPDisp16,rs0 is (op13_3=7 & op4_5=0x14 & rs0) ... & RPDisp16 { *:4 RPDisp16 = rs0; } # ST.W Format V # 111b bbb0 0000 iiii 0000 1001 00tt ssss :ST.W RB9Shift,ers0 is (op13_3=0x7 & op4_5=0 & ri0; eop12_4=0 & eop8_4=0x9 & eop6_2=0 & ers0) & RB9Shift { *:4 RB9Shift = ers0; } #--------------------------------------------------------------------- # ST.W - Conditionally Store Word #--------------------------------------------------------------------- # ST.W{cond4} Format I # 111p ppp1 1111 ssss cccc 101n nnnn nnnn :ST.W^{COND_e12} RPwDisp9, rs0 is (op13_3=0x7 & op4_5=0x1f & rs0; eop9_3=0x5 & COND_e12) & RPwDisp9 { build COND_e12; *:4 RPwDisp9 = rs0; } :STCOND rp9[disp_16],rs0 is op13_3=0x7 & rp9 & op4_5=0x17 & rs0 ; disp_16 { Z = L; CZTOSR(); if (!L) goto inst_next; tmp:2 = disp_16; tmpa:4 = sext(tmp); tmpa = tmpa + rp9; *[RAM]:4 tmpa = rs0; } #--------------------------------------------------------------------- # STDSP - Store Stack-Pointer Relative # I. disp -> {0, 4, ..., 508} # s -> {0, 1, ..., 15} #--------------------------------------------------------------------- # STDSP Format I # Operation: *((SP & 0xfffffffc) + (ZE(disp7) << 2)) <- Rs # Syntax: stdsp SP[disp], Rs # 0101 0nnn nnnn ssss :STDSP "SP["^disp4_7^"], "^rs0 is op11_5=0xa & disp4_7 & rs0 { ptr:4 = (SP & 0xfffffffc) + (disp4_7 << 2); #ptr:4 = (((SP >> 2) + disp4_7) << 2); # assuming SP was pointing into RAM... *:4 ptr = rs0; } #--------------------------------------------------------------------- # 8.3.9.3 Multiple Data #--------------------------------------------------------------------- #--------------------------------------------------------------------- # LDM - Load Multiple Registers #--------------------------------------------------------------------- # LDM Format I # 1110 00M1 1100 pppp LLLL LLLL LLLL LLLL macro status_r12() { V = 0; N = R12 s< 0; Z = R12 == 0; C = 0; CZNVTOSR(); } LoadAddress: is rp0=0xf ; eb15=1 { ldadd = SP; } # Rp=PC and Reglist16[PC]=1 LoadAddress: is rp0=0xf ; eb15 { ldadd = inst_start; } # Rp=PC and Reglist16[PC]=0 LoadAddress: is rp0 ; eb15 { ldadd = rp0; } # Rp!=PC LoadAddressTS: is rp0 ; eb15 { ldadd = rp0; } LDMinc15: ", PC" is eb15=1 { PC = *:4 ldadd; ldadd = ldadd + 4; } LDMinc14ab: ", LR" is eb14=1 { LR = *:4 ldadd; ldadd = ldadd + 4; } LDMinc13ab: ", SP" is eb13=1 { SP = *:4 ldadd; ldadd = ldadd + 4; } LDMinc12c: ", R12=0" is eb14=0 & eb12=0 { R12 = 0; } LDMinc12c: ", R12=1" is eb14=0 & eb12=1 { R12 = 1; } LDMinc12c: ", R12=-1" is eb14=1 { R12 = -1; } LDMinc12ab: ", R12" is eb12=1 { R12 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc12s: is eb15=1 { status_r12(); } LDMinc11: ", R11" is eb11=1 { R11 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc10: ", R10" is eb10=1 { R10 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc9: ", R9" is eb9=1 { R9 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc8: ", R8" is eb8=1 { R8 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc7: ", R7" is eb7=1 { R7 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc6: ", R6" is eb6=1 { R6 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc5: ", R5" is eb5=1 { R5 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc4: ", R4" is eb4=1 { R4 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc3: ", R3" is eb3=1 { R3 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc2: ", R2" is eb2=1 { R2 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc1: ", R1" is eb1=1 { R1 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc0: ", R0" is eb0=1 { R0 = *:4 ldadd; ldadd = ldadd + 4; } LDMinc15: is eb15=0 { } LDMinc14ab: is eb14=0 { } LDMinc13ab: is eb13=0 { } LDMinc12ab: is eb12=0 { } LDMinc12s: is eb15=0 { } LDMinc11: is eb11=0 { } LDMinc10: is eb10=0 { } LDMinc9: is eb9=0 { } LDMinc8: is eb8=0 { } LDMinc7: is eb7=0 { } LDMinc6: is eb6=0 { } LDMinc5: is eb5=0 { } LDMinc4: is eb4=0 { } LDMinc3: is eb3=0 { } LDMinc2: is eb2=0 { } LDMinc1: is eb1=0 { } LDMinc0: is eb0=0 { } LDMpp: is op9_1=0 { } LDMpp: "++" is op9_1=1 & rp0=0xf { SP = ldadd; } LDMpp: "++" is op9_1=1 & rp0 { rp0 = ldadd; } LDMTSpp: is op9_1=0 { } LDMTSpp: "++" is op9_1=1 & rp0 { rp0 = ldadd; } LDMret: is eb15=1 { return [ PC ]; } LDMret: is eb15=0 { } LDMcommon: LDMinc0^LDMinc1^LDMinc2^LDMinc3^LDMinc4^LDMinc5^LDMinc6^LDMinc7^LDMinc8^LDMinc9^LDMinc10^LDMinc11 is LDMinc0 & LDMinc1 & LDMinc2 & LDMinc3 & LDMinc4 & LDMinc5 & LDMinc6 & LDMinc7 & LDMinc8 & LDMinc9 & LDMinc10 & LDMinc11 { build LDMinc11; build LDMinc10; build LDMinc9; build LDMinc8; build LDMinc7; build LDMinc6; build LDMinc5; build LDMinc4; build LDMinc3; build LDMinc2; build LDMinc1; build LDMinc0; } LDMlistd: LDMpp^LDMcommon^LDMinc12ab^LDMinc13ab^LDMinc14ab^LDMinc15 is (LDMpp ; LDMcommon & LDMinc12ab & LDMinc13ab & LDMinc14ab & LDMinc12s & LDMinc15 & LDMret) & LoadAddressTS { build LoadAddressTS; build LDMinc15; build LDMinc14ab; build LDMinc13ab; build LDMinc12ab; build LDMcommon; build LDMpp; build LDMret; } LDMlistc: LDMpp^LDMcommon^LDMinc12c^LDMinc15 is (LDMpp ; LDMcommon & LDMinc12c & LDMinc12s & LDMinc15 & LDMret) & LoadAddress { build LoadAddress; build LDMinc15; build LDMinc12c; build LDMinc12s; build LDMcommon; build LDMpp; build LDMret; } LDMlistb: LDMpp^LDMcommon^LDMinc12ab^LDMinc13ab^LDMinc14ab^LDMinc15 is (LDMpp ; LDMcommon & LDMinc12ab & LDMinc13ab & LDMinc14ab & LDMinc12s & LDMinc15 & LDMret) & LoadAddress { build LoadAddress; build LDMinc15; build LDMinc14ab; build LDMinc13ab; build LDMinc12ab; build LDMinc12s; build LDMcommon; build LDMpp; build LDMret; } LDMlista: LDMpp^LDMcommon^LDMinc12ab^LDMinc13ab^LDMinc14ab is (LDMpp ; LDMcommon & LDMinc12ab & LDMinc13ab & LDMinc14ab) & LoadAddress { build LoadAddress; build LDMinc14ab; build LDMinc13ab; build LDMinc12ab; build LDMcommon; build LDMpp; } :LDM rp0^LDMlistc is (rp0 & rp0=0xf & op10_6=0x38 & op4_5=0x1c ; eb15=1) & LDMlistc { build LDMlistc; } :LDM rp0^LDMlistb is (rp0 & op10_6=0x38 & op4_5=0x1c ; eb15=1) & LDMlistb { build LDMlistb; } :LDM rp0^LDMlista is (rp0 & op10_6=0x38 & op4_5=0x1c) ... & LDMlista { build LDMlista; } :LDMTS rp0^LDMlistd is (rp0 & op10_6=0x39 & op4_5=0x1c) ... & LDMlistd { build LDMlistd; } #--------------------------------------------------------------------- # POPM - Load Multiple Registers #--------------------------------------------------------------------- # POPM Format I # 1101 RRRR RRRR k010 COM5: is bp4_1=0 {} COM5: "," is bp4_1 {} COM6: is bp4_2=0 {} COM6: "," is bp4_2 {} COM7: is bp4_3=0 {} COM7: "," is bp4_3 {} COM8: is bp4_4=0 {} COM8: "," is bp4_4 {} COM9: is bp4_5=0 {} COM9: "," is bp4_5 {} COM10: is bp4_6=0 {} COM10: "," is bp4_6 {} COM11: is bp4_7=0 {} COM11: is bp4_7=0x20 & b03=1 {} COM11: "," is bp4_7 {} POPMinc11: COM11^"PC" is b11=1 & COM11 { build COM11; PC = *:4 SP; SP = SP + 4; } POPMinc10b: is b10 { } POPMinc10a: COM10^"LR" is b10=1 & COM10 { build COM10; LR = *:4 SP; SP = SP + 4; } POPMinc9b: ",R12=0" is b11=1 & b10=0 & b09=0 { R12 = 0; } POPMinc9b: ",R12=1" is b11=1 & b10=0 & b09=1 { R12 = 1; } POPMinc9b: ",R12=-1" is b11=1 & b10=1 { R12 = -1; } POPMinc9a: COM9^"R12" is b09=1 & COM9 { build COM9; R12 = *:4 SP; SP = SP + 4; } POPMinc9s: is b11=1 { status_r12(); } POPMinc8: COM8^"R11" is b08=1 & COM8 { build COM8; R11 = *:4 SP; SP = SP + 4; } POPMinc7: COM7^"R10" is b07=1 & COM7 { build COM7; R10 = *:4 SP; SP = SP + 4; } POPMinc6: COM6^"R8-R9" is b06=1 & COM6 { build COM6; R9 = *:4 SP; SP = SP + 4; R8 = *:4 SP; SP = SP + 4; } POPMinc5: COM5^"R4-R7" is b05=1 & COM5 { build COM5; R7 = *:4 SP; SP = SP + 4; R6 = *:4 SP; SP = SP + 4; R5 = *:4 SP; SP = SP + 4; R4 = *:4 SP; SP = SP + 4; } POPMinc4: "R0-R3" is b04=1 { R3 = *:4 SP; SP = SP + 4; R2 = *:4 SP; SP = SP + 4; R1 = *:4 SP; SP = SP + 4; R0 = *:4 SP; SP = SP + 4; } POPMinc11: is b11=0 { } POPMinc10a: is b10=0 { } POPMinc9a: is b09=0 { } POPMinc9b: is b11=0 { } POPMinc9s: is b11=0 { } POPMinc8: is b08=0 { } POPMinc7: is b07=0 { } POPMinc6: is b06=0 { } POPMinc5: is b05=0 { } POPMinc4: is b04=0 { } POPMjump: is b11=1 { return [ PC ]; } POPMjump: is b11=0 { } POPMchunk: POPMinc4^POPMinc5^POPMinc6^POPMinc7^POPMinc8 is POPMinc4 & POPMinc5 & POPMinc6 & POPMinc7 & POPMinc8 & POPMinc9s & POPMjump { build POPMinc9s; build POPMinc8; build POPMinc7; build POPMinc6; build POPMinc5; build POPMinc4; build POPMjump; } POPMdispa: POPMchunk^POPMinc9a^POPMinc10a^POPMinc11 is POPMchunk & POPMinc9a & POPMinc10a & POPMinc11 { build POPMinc11; build POPMinc10a; build POPMinc9a; build POPMchunk; } POPMdispb: POPMchunk^POPMinc10b^POPMinc11^POPMinc9b is POPMchunk & POPMinc9b & POPMinc10b & POPMinc11 { build POPMinc11; build POPMinc10b; build POPMinc9b; build POPMchunk; } :POPM POPMdispa is op12_4=0xd & op0_4=0x2 & POPMdispa { build POPMdispa; } :POPM POPMdispb is op12_4=0xd & op0_4=0xa & POPMdispb { build POPMdispb; } #--------------------------------------------------------------------- # PUSHM - Push Multiple Registers to Stack # I. Reglist8 -> {R0-R3, R4-R7, R8-R9, R10, R11, R12, LR, PC} #--------------------------------------------------------------------- # PUSHM Format I: # Operation: if Reglist8[0] == 1 then # *(--SP) <- R0; # *(--SP) <- R1; # *(--SP) <- R2; # *(--SP) <- R3; # if Reglist8[1] == 1 then # *(--SP) <- R4; # *(--SP) <- R5; # *(--SP) <- R6; # *(--SP) <- R7; # if Reglist8[2] == 1 then # *(--SP) <- R8; # *(--SP) <- R9; # if Reglist8[3] == 1 then # *(--SP) <- R10; # if Reglist8[4] == 1 then # *(--SP) <- R11; # if Reglist8[5] == 1 then # *(--SP) <- R12; # if Reglist8[6] == 1 then # *(--SP) <- LR; # if Reglist8[7] == 1 then # *(--SP) <- PC; # Syntax: pushm Reglist8 # 1101 RRRR RRRR 0001 PUSHMdec4: "R0-R3" is b04=1 { SP = SP - 4; *:4 SP = R0; SP = SP - 4; *:4 SP = R1; SP = SP - 4; *:4 SP = R2; SP = SP - 4; *:4 SP = R3; } PUSHMdec5: COM5^"R4-R7" is b05=1 & COM5 { build COM5; SP = SP - 4; *:4 SP = R4; SP = SP - 4; *:4 SP = R5; SP = SP - 4; *:4 SP = R6; SP = SP - 4; *:4 SP = R7; } PUSHMdec6: COM6^"R8-R9" is b06=1 & COM6{ build COM6; SP = SP - 4; *:4 SP = R8; SP = SP - 4; *:4 SP = R9; } PUSHMdec7: COM7^"R10" is b07=1 & COM7 { build COM7; SP = SP - 4; *:4 SP = R10; } PUSHMdec8: COM8^"R11" is b08=1 & COM8 { build COM8; SP = SP - 4; *:4 SP = R11; } PUSHMdec9: COM9^"R12" is b09=1 & COM9 { build COM9; SP = SP - 4; *:4 SP = R12; } PUSHMdec10: COM10^"LR" is b10=1 & COM10 { build COM10; SP = SP - 4; *:4 SP = LR; } PUSHMdec11: COM11^"PC" is b11=1 & COM11 { build COM11; SP = SP - 4; *:4 SP = inst_start; } PUSHMdec4: is b04=0 { } PUSHMdec5: is b05=0 { } PUSHMdec6: is b06=0 { } PUSHMdec7: is b07=0 { } PUSHMdec8: is b08=0 { } PUSHMdec9: is b09=0 { } PUSHMdec10: is b10=0 { } PUSHMdec11: is b11=0 { } PUSHMdisp: PUSHMdec4^PUSHMdec5^PUSHMdec6^PUSHMdec7^PUSHMdec8^PUSHMdec9^PUSHMdec10^PUSHMdec11 is PUSHMdec4 & PUSHMdec5 & PUSHMdec6 & PUSHMdec7 & PUSHMdec8 & PUSHMdec9 & PUSHMdec10 & PUSHMdec11 { build PUSHMdec4; build PUSHMdec5; build PUSHMdec6; build PUSHMdec7; build PUSHMdec8; build PUSHMdec9; build PUSHMdec10; build PUSHMdec11; } :PUSHM PUSHMdisp is op12_4=0xd & op0_4=0x1 & PUSHMdisp { build PUSHMdisp; } #--------------------------------------------------------------------- # STM - Store Multiple Registers #--------------------------------------------------------------------- # STM Format I # 1110 10M1 1100 pppp LLLL LLLL LLLL LLLL StoreAddress: is rp0 ; eb15 { stadd = rp0; } # Rp!=PC STMinc0: ,deb0 is rp0 ; deb0 & eb0=1 { *:4 stadd = R0; stadd = stadd + 4; } STMinc1: ,deb1 is rp0 ; deb1 & eb1=1 { *:4 stadd = R1; stadd = stadd + 4; } STMinc2: ,deb2 is rp0 ; deb2 & eb2=1 { *:4 stadd = R2; stadd = stadd + 4; } STMinc3: ,deb3 is rp0 ; deb3 & eb3=1 { *:4 stadd = R3; stadd = stadd + 4; } STMinc4: ,deb4 is rp0 ; deb4 & eb4=1 { *:4 stadd = R4; stadd = stadd + 4; } STMinc5: ,deb5 is rp0 ; deb5 & eb5=1 { *:4 stadd = R5; stadd = stadd + 4; } STMinc6: ,deb6 is rp0 ; deb6 & eb6=1 { *:4 stadd = R6; stadd = stadd + 4; } STMinc7: ,deb7 is rp0 ; deb7 & eb7=1 { *:4 stadd = R7; stadd = stadd + 4; } STMinc8: ,deb8 is rp0 ; deb8 & eb8=1 { *:4 stadd = R8; stadd = stadd + 4; } STMinc9: ,deb9 is rp0 ; deb9 & eb9=1 { *:4 stadd = R9; stadd = stadd + 4; } STMinc10: ,deb10 is rp0 ; deb10 & eb10=1 { *:4 stadd = R10; stadd = stadd + 4; } STMinc11: ,deb11 is rp0 ; deb11 & eb11=1 { *:4 stadd = R11; stadd = stadd + 4; } STMinc12: ,deb12 is rp0 ; deb12 & eb12=1 { *:4 stadd = R12; stadd = stadd + 4; } STMinc13: ,deb13 is rp0 ; deb13 & eb13=1 { *:4 stadd = SP; stadd = stadd + 4; } STMinc14: ,deb14 is rp0 ; deb14 & eb14=1 { *:4 stadd = LR; stadd = stadd + 4; } STMinc15: ,deb15 is rp0 ; deb15 & eb15=1 { *:4 stadd = inst_start; stadd = stadd + 4; } STMinc0: is rp0 ; eb0=0 { } STMinc1: is rp0 ; eb1=0 { } STMinc2: is rp0 ; eb2=0 { } STMinc3: is rp0 ; eb3=0 { } STMinc4: is rp0 ; eb4=0 { } STMinc5: is rp0 ; eb5=0 { } STMinc6: is rp0 ; eb6=0 { } STMinc7: is rp0 ; eb7=0 { } STMinc8: is rp0 ; eb8=0 { } STMinc9: is rp0 ; eb9=0 { } STMinc10: is rp0 ; eb10=0 { } STMinc11: is rp0 ; eb11=0 { } STMinc12: is rp0 ; eb12=0 { } STMinc13: is rp0 ; eb13=0 { } STMinc14: is rp0 ; eb14=0 { } STMinc15: is rp0 ; eb15=0 { } STMdec0: ,deb0 is rp0 ; deb0 & eb0=1 { rp0 = rp0 - 4; *:4 rp0 = R0; } STMdec1: ,deb1 is rp0 ; deb1 & eb1=1 { rp0 = rp0 - 4; *:4 rp0 = R1; } STMdec2: ,deb2 is rp0 ; deb2 & eb2=1 { rp0 = rp0 - 4; *:4 rp0 = R2; } STMdec3: ,deb3 is rp0 ; deb3 & eb3=1 { rp0 = rp0 - 4; *:4 rp0 = R3; } STMdec4: ,deb4 is rp0 ; deb4 & eb4=1 { rp0 = rp0 - 4; *:4 rp0 = R4; } STMdec5: ,deb5 is rp0 ; deb5 & eb5=1 { rp0 = rp0 - 4; *:4 rp0 = R5; } STMdec6: ,deb6 is rp0 ; deb6 & eb6=1 { rp0 = rp0 - 4; *:4 rp0 = R6; } STMdec7: ,deb7 is rp0 ; deb7 & eb7=1 { rp0 = rp0 - 4; *:4 rp0 = R7; } STMdec8: ,deb8 is rp0 ; deb8 & eb8=1 { rp0 = rp0 - 4; *:4 rp0 = R8; } STMdec9: ,deb9 is rp0 ; deb9 & eb9=1 { rp0 = rp0 - 4; *:4 rp0 = R9; } STMdec10: ,deb10 is rp0 ; deb10 & eb10=1 { rp0 = rp0 - 4; *:4 rp0 = R10; } STMdec11: ,deb11 is rp0 ; deb11 & eb11=1 { rp0 = rp0 - 4; *:4 rp0 = R11; } STMdec12: ,deb12 is rp0 ; deb12 & eb12=1 { rp0 = rp0 - 4; *:4 rp0 = R12; } STMdec13: ,deb13 is rp0 ; deb13 & eb13=1 { rp0 = rp0 - 4; *:4 rp0 = SP; } STMdec14: ,deb14 is rp0 ; deb14 & eb14=1 { rp0 = rp0 - 4; *:4 rp0 = LR; } STMdec15: ,deb15 is rp0 ; deb15 & eb15=1 { rp0 = rp0 - 4; *:4 rp0 = inst_start; } STMdec0: is rp0 ; eb0=0 { } STMdec1: is rp0 ; eb1=0 { } STMdec2: is rp0 ; eb2=0 { } STMdec3: is rp0 ; eb3=0 { } STMdec4: is rp0 ; eb4=0 { } STMdec5: is rp0 ; eb5=0 { } STMdec6: is rp0 ; eb6=0 { } STMdec7: is rp0 ; eb7=0 { } STMdec8: is rp0 ; eb8=0 { } STMdec9: is rp0 ; eb9=0 { } STMdec10: is rp0 ; eb10=0 { } STMdec11: is rp0 ; eb11=0 { } STMdec12: is rp0 ; eb12=0 { } STMdec13: is rp0 ; eb13=0 { } STMdec14: is rp0 ; eb14=0 { } STMdec15: is rp0 ; eb15=0 { } STMdecdisp: STMdec0^STMdec1^STMdec2^STMdec3^STMdec4^STMdec5^STMdec6^STMdec7^STMdec8^STMdec9^STMdec10^STMdec11^STMdec12^STMdec13^STMdec14^STMdec15 is STMdec0 & STMdec1 & STMdec2 & STMdec3 & STMdec4 & STMdec5 & STMdec6 & STMdec7 & STMdec8 & STMdec9 & STMdec10 & STMdec11 & STMdec12 & STMdec13 & STMdec14 & STMdec15 { build STMdec0; build STMdec1; build STMdec2; build STMdec3; build STMdec4; build STMdec5; build STMdec6; build STMdec7; build STMdec8; build STMdec9; build STMdec10; build STMdec11; build STMdec12; build STMdec13; build STMdec14; build STMdec15; } STMincdisp: STMinc0^STMinc1^STMinc2^STMinc3^STMinc4^STMinc5^STMinc6^STMinc7^STMinc8^STMinc9^STMinc10^STMinc11^STMinc12^STMinc13^STMinc14^STMinc15 is STMinc0 & STMinc1 & STMinc2 & STMinc3 & STMinc4 & STMinc5 & STMinc6 & STMinc7 & STMinc8 & STMinc9 & STMinc10 & STMinc11 & STMinc12 & STMinc13 & STMinc14 & STMinc15 & StoreAddress { build StoreAddress; build STMinc15; build STMinc14; build STMinc13; build STMinc12; build STMinc11; build STMinc10; build STMinc9; build STMinc8; build STMinc7; build STMinc6; build STMinc5; build STMinc4; build STMinc3; build STMinc2; build STMinc1; build STMinc0; } :STM "--"^rp0^STMdecdisp is (op10_6=0x3a & op4_5=0x1c & op9_1=1 & rp0) ... & STMdecdisp { } :STM rp0^STMincdisp is (op10_6=0x3a & op4_5=0x1c & op9_1=0 & rp0) ... & STMincdisp { } :STMTS "--"^rp0^STMdecdisp is (op10_6=0x3b & op4_5=0x1c & op9_1=1 & rp0) ... & STMdecdisp { } :STMTS rp0^STMincdisp is (op10_6=0x3b & op4_5=0x1c & op9_1=0 & rp0) ... & STMincdisp { } :XCHG rx9, ry0, erd0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0xb4 & erd0 { erd0 = *[RAM]:4 rx9; *[RAM]:4 rx9 = ry0; } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_dsp_operations.sinc ================================================ #--------------------------------------------------------------------- # 8.3.4 DSP Operations #--------------------------------------------------------------------- operand1: ":b" is rx9 ; selectorxy4_2=0x0 {b:4 = sext(rx9[0,16]); export b;} operand1: ":b" is rx9 ; selectorxy4_2=0x1 {b:4 = sext(rx9[0,16]); export b;} operand1: ":t" is rx9 ; selectorxy4_2=0x2 {t:4 = sext(rx9[16,16]); export t;} operand1: ":t" is rx9 ; selectorxy4_2=0x3 {t:4 = sext(rx9[16,16]); export t;} operand2: ":b" is ry0 ; selectorxy4_2=0x0 {b:4 = sext(ry0[0,16]); export b;} operand2: ":t" is ry0 ; selectorxy4_2=0x1 {t:4 = sext(ry0[16,16]); export t;} operand2: ":b" is ry0 ; selectorxy4_2=0x2 {b:4 = sext(ry0[0,16]); export b;} operand2: ":t" is ry0 ; selectorxy4_2=0x3 {t:4 = sext(ry0[16,16]); export t;} rdPlus1: is erd0=0x0 {export R1;} rdPlus1: is erd0=0x2 {export R3;} rdPlus1: is erd0=0x4 {export R5;} rdPlus1: is erd0=0x6 {export R7;} rdPlus1: is erd0=0x8 {export R9;} rdPlus1: is erd0=0xa {export R11;} rdPlus1: is erd0=0xc {export SP;} rdPlus1: is erd0=0xe {export *[const]:4 inst_start;}#PC register #--------------------------------------------------------------------- # ADDHH.W - Add Halfwords into Word # I. {d, x, y} -> {0, 1, ..., 15} # part -> {t,b} #--------------------------------------------------------------------- # ADDHH.W Format I # Operation: If(Rx-part==t) then operand1=SE(Rx[31:16]) else operand1=SE(Rx[15:0]); # If(Ry-part==t) then operand2=SE(Ry[31:16]) else operand2=SE(Ry[15:0]); # Rd <- operand1 + operand2; # Syntax: addhh.w Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1110 00XY dddd # Assumption: t = 1, b = 0 using XY :ADDHH.W erd0, rx9^operand1, ry0^operand2 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop8_4=0xe & eop6_2=0x0 & selectorxy4_2 & erd0)) & operand1 & operand2 { erd0 = operand1 + operand2; addflags(operand1, operand2, erd0); } #--------------------------------------------------------------------- # MACHH.D - Multiply Halfwords and Accumulate in Doubleword # I. d -> {0, 2, 4, ..., 14} # {x, y} -> {0, 1, ..., 15} # part -> {t,b} #--------------------------------------------------------------------- # MACHH.D Format I # Operation: If(Rx-part==t) then operand1=SE(Rx[31:16]) else operand1=SE(Rx[15:0]); # If(Ry-part==t) then operand2=SE(Ry[31:16]) else operand2=SE(Ry[15:0]); # (Rd+1:Rd)[63:16] <- (operand1*operand2)[31:0] + (Rd+1:Rd)[63:16]; # Rd[15:0] <- 0; # Syntax: machh.d Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0101 10XY dddd # Assumption: t = 1, b = 0 using XY :MACHH.D erd0, rx9^operand1, ry0^operand2 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop8_4=0x5 & eop6_2=0x2 & selectorxy4_2 & erd0 & rdPlus1)) & operand1 & operand2 { multAccumulate:8 = (zext(operand1 * operand2) + ((zext(rdPlus1) << 32) | (zext(erd0[16,16]) << 16))); rdPlus1 = multAccumulate[32,32]; erd0 = ((zext(multAccumulate[16,16]) << 16) & 0xffff0000); } #--------------------------------------------------------------------- # MACHH.W - Multiply Halfwords and Accumulate in Word # I. {d, x, y} -> {0, 1, ..., 15} # part -> {t,b} #--------------------------------------------------------------------- # MACHH.W Format I # Operation: If(Rx-part==t) then operand1=SE(Rx[31:16]) else operand1=SE(Rx[15:0]); # If(Ry-part==t) then operand2=SE(Ry[31:16]) else operand2=SE(Ry[15:0]); # Rd <- (operand1*operand2) + Rd; # Syntax: machh.w Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0100 10XY dddd # Assumption: t = 1, b = 0 using XY :MACHH.W erd0, rx9^operand1, ry0^operand2 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop8_4=0x4 & eop6_2=0x2 & selectorxy4_2 & erd0)) & operand1 & operand2 { multAccumulate:4 = ((operand1 * operand2) + erd0); erd0 = multAccumulate; } #--------------------------------------------------------------------- # MACWH.D - Multiply Word with Halfword and Accumulate in Doubleword # I. d -> {0, 2, 4, ..., 14} # {x, y} -> {0, 1, ..., 15} # part -> {t,b} #--------------------------------------------------------------------- # MACWH.D Format I # Operation: Operand1 = Rx; # If(Ry-part==t) then operand2=SE(Ry[31:16]) else operand2=SE(Ry[15:0]); # (Rd+1:Rd)[63:16] <- (operand1*operand2)[47:0] + (Rd+1:Rd)[63:16]; # Rd[15:0] <- 0; # Syntax: macwh.d Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1100 100Y dddd # Assumption: t = 1, b = 0 using Y :MACWH.D erd0, rx9, ry0^operand2 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop8_4=0xc & eop5_3=0x4 & selectorxy4_2 & erd0 & rdPlus1)) & operand2 { mult:6 = sext(rx9 * operand2); multAccumulate:8 = (zext(mult) + ((zext(rdPlus1) << 32) | (zext(erd0[16,16]) << 16))); rdPlus1 = multAccumulate[32,32]; erd0 = ((zext(multAccumulate[16,16]) << 16) & 0xffff0000); } #--------------------------------------------------------------------- # MULHH.W - Multiply Halfwords with Halfword # I. {d, x, y} -> {0, 1, ..., 15} # part -> {t,b} #--------------------------------------------------------------------- # MULHH.W Format I # Operation: If(Rx-part==t) then operand1=SE(Rx[31:16]) else operand1=SE(Rx[15:0]); # If(Ry-part==t) then operand2=SE(Ry[31:16]) else operand2=SE(Ry[15:0]); # Rd <- operand1 * operand2; # Syntax: mulhh.w Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0111 10XY dddd # Assumption: t = 1, b = 0 using XY :MULHH.W erd0, rx9^operand1, ry0^operand2 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop8_4=0x7 & eop6_2=0x2 & selectorxy4_2 & erd0)) & operand1 & operand2 { erd0 = operand1 * operand2; } #--------------------------------------------------------------------- # MULWH.D - Multiply Word with Halfword # I. d -> {0, 2, 4, ..., 14} # {x, y} -> {0, 1, ..., 15} # part -> {t,b} #--------------------------------------------------------------------- # MULWH.D Format I # Operation: Operand1 = Rx; # If(Ry-part==t) then operand2=SE(Ry[31:16]) else operand2=SE(Ry[15:0]); # (Rd+1:Rd)[63:16] <- (operand1*operand2); # Rd[15:0] <- 0; # Syntax: mulwh.d Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1101 100Y dddd # Assumption: t = 1, b = 0 using Y :MULWH.D erd0, rx9, ry0^operand2 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop8_4=0xd & eop5_3=0x4 & selectorxy4_2 & erd0 & rdPlus1)) & operand2 { multiply:8 = (sext(rx9 * operand2)); rdPlus1 = multiply[32,32]; erd0 = ((zext(multiply[16,16]) << 16) & 0xffff0000); } #--------------------------------------------------------------------- # MULNHH.W - Multiply Halfwords with Negated Halfword # I. {d, x, y} -> {0, 1, ..., 15} # part -> {t,b} #--------------------------------------------------------------------- # MULNHH.W Format I # Operation: If(Rx-part==t) then operand1=SE(Rx[31:16]) else operand1=SE(Rx[15:0]); # If(Ry-part==t) then operand2=SE(Ry[31:16]) else operand2=SE(Ry[15:0]); # Rd <- -(operand1 * operand2); # Syntax: mulnhh.w Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0001 10XY dddd # Assumption: t = 1, b = 0 using XY :MULNHH.W erd0, rx9^operand1, ry0^operand2 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop8_4=0x1 & eop6_2=0x2 & selectorxy4_2 & erd0)) & operand1 & operand2 { neg1:4 = 0xffffffff; erd0 = (neg1 * (operand1 * operand2)); } #--------------------------------------------------------------------- # MULNWH.D - Multiply Word with Negated Halfword # I. d -> {0, 2, 4, ..., 14} # {x, y} -> {0, 1, ..., 15} # part -> {t,b} #--------------------------------------------------------------------- # MULNWH.D Format I # Operation: Operand1 = Rx; # If(Ry-part==t) then operand2=SE(Ry[31:16]) else operand2=SE(Ry[15:0]); # (Rd+1:Rd)[63:16] <- -(operand1*operand2); # Rd[15:0] <- 0; # Syntax: mulnwh.d Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0010 100Y dddd # Assumption: t = 1, b = 0 using Y :MULNWH.D erd0, rx9, ry0^operand2 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop8_4=0x2 & eop5_3=0x4 & selectorxy4_2 & erd0 & rdPlus1)) & operand2 { neg1:8 = 0xffffffffffffffff; multiply:8 = (neg1 * (sext(rx9 * operand2))); rdPlus1 = multiply[32,32]; erd0 = ((zext(multiply[16,16]) << 16) & 0xffff0000); } #--------------------------------------------------------------------- # SATADD.W - Saturated Add of Words # I. {d, x, y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- macro addSatWflags(RX, RY, TEMP, RD){ #Q = (RX[31,1] && RY[31,1] && !TEMP[31,1]) || # (!RX[31,1] && !RY[31,1] && TEMP[31,1]) || Q Q = sborrow(RX, RY); #V = (RX[31,1] && RY[31,1] && !TEMP[31,1]) || # (!RX[31,1] && !RY[31,1] && TEMP[31,1]) V = sborrow(RX, RY); NZSTATUS(RD); C = 0x0; } # SATADD.W Format I # Operation: temp <- Rx + Ry; # If(Rx[31] && Ry[31] && ~temp[31]) || (~Rx[31] && ~Ry[31] && temp[31]) then # If Rx[31] == 0 then # Rd <- 0x7fffffff; # else # Rd <- 0x80000000; # else # Rd <- temp; # Syntax: satadd.w Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0000 1100 dddd :SATADD.W erd0, rx9, ry0 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop4_8=0xc & erd0)) { temp:4 = rx9 + ry0; local if_state_1 = ((rx9[31,1] && ry0[31,1] && ~temp[31,1]) || (~rx9[31,1] && ~ry0[31,1] && temp[31,1])); local if_state_2 = (rx9[31,1] == 0x0); local else_state_1 = !if_state_1; local else_state_2 = !if_state_2; erd0 = ((zext(if_state_1 * if_state_2) * (0x7fffffff)) + (zext(if_state_1 * else_state_2) * (0x80000000)) + (zext(else_state_1) * temp)); addSatWflags(rx9, ry0, temp, erd0); } #--------------------------------------------------------------------- # SATADD.H - Saturated Add of HalfWords # I. {d, x, y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- macro addSatHflags(RX, RY, TEMP, RD){ #Q = (RX[15,1] && RY[15,1] && !TEMP[15,1]) || # (!RX[15,1] && !RY[15,1] && TEMP[15,1]) || Q Q = sborrow(RX[0,15], RY[0,15]); #V = (RX[15,1] && RY[15,1] && !TEMP[15,1]) || # (!RX[15,1] && !RY[15,1] && TEMP[15,1]) V = sborrow(RX[0,15], RY[0,15]); NZSTATUS(RD[0,15]); C = 0x0; } # SATADD.H Format I # Operation: temp <- Rx + Ry; # If(Rx[31] && Ry[31] && ~temp[31]) || (~Rx[31] && ~Ry[31] && temp[31]) then # If Rx[31] == 0 then # Rd <- 0x00007fff; # else # Rd <- 0xffff8000; # else # Rd <- temp; # Syntax: satadd.h Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0010 1100 dddd :SATADD.H erd0, rx9, ry0 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop4_8=0x2c & erd0)) { temp:4 = zext(rx9[0,15]) + zext(ry0[0,15]); local if_state_1 = ((rx9[15,1] && ry0[15,1] && ~temp[15,1]) || (~rx9[15,1] && ~ry0[15,1] && temp[15,1])); local if_state_2 = (rx9[15,1] == 0x0); local else_state_1 = !if_state_1; local else_state_2 = !if_state_2; erd0 = ((zext(if_state_1 * if_state_2) * (0x00007fff)) + (zext(if_state_1 * else_state_2) * (0xffff8000)) + (zext(else_state_1) * zext(temp[0,15]))); addSatHflags(rx9, ry0, temp, erd0); } #--------------------------------------------------------------------- # SATSUB.W - Saturated Subtract of Words # I. {d, x, y} -> {0, 1, ..., 15} # II. {d, s} -> {0, 1, ..., 15} # imm -> {-32768, -32767, ..., 32767} #--------------------------------------------------------------------- macro subSatWflags(OP1, OP2, TEMP, RD){ #Q = (OP1[31,1] && !OP2[31,1] && !TEMP[31,1]) || # (!OP1[31,1] && OP2[31,1] && TEMP[31,1]) || Q Q = sborrow(OP1, OP2); #V = (OP1[31,1] && !OP2[31,1] && !TEMP[31,1]) || # (!OP1[31,1] && OP2[31,1] && TEMP[31,1]) V = sborrow(OP1, OP2); NZSTATUS(RD); C = 0x0; } # SATSUB.W Format I # Operation: OP1 = Rx, OP2 = Ry # temp <- Rx - Ry; # If(OP1[31] && ~OP2[31] && ~temp[31]) || (~OP1[31] && OP2[31] && temp[31]) then # If OP1[31] == 0 then # Rd <- 0x7fffffff; # else # Rd <- 0x80000000; # else # Rd <- temp; # Syntax: satsub.w Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0001 1100 dddd :SATSUB.W erd0, rx9, ry0 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop4_8=0x1c & erd0)) { temp:4 = rx9 - ry0; local if_state_1 = ((rx9[31,1] && ~ry0[31,1] && ~temp[31,1]) || (~rx9[31,1] && ry0[31,1] && temp[31,1])); local if_state_2 = (rx9[31,1] == 0x0); local else_state_1 = !if_state_1; local else_state_2 = !if_state_2; erd0 = ((zext(if_state_1 * if_state_2) * (0x7fffffff)) + (zext(if_state_1 * else_state_2) * (0x80000000)) + (zext(else_state_1) * temp)); subSatWflags(rx9, ry0, temp, erd0); } # SATSUB.W Format II # Operation: OP1 = Rx, OP2 = sext(imm16) # temp <- Rx - Ry; # If(OP1[31] && ~OP2[31] && ~temp[31]) || (~OP1[31] && OP2[31] && temp[31]) then # If OP1[31] == 0 then # Rd <- 0x7fffffff; # else # Rd <- 0x80000000; # else # Rd <- temp; # Syntax: satsub.w Rd, Rx, Ry # 111x xxx0 1101 dddd iiii iiii iiii iiii :SATSUB.W rd0, rx9, simm16 is ((op13_3=0x7 & rx9 & op4_5=0xd & rd0) ; (simm16)) { temp:4 = rx9 - simm16; simm16Masked:4 = (simm16 & 0x80000000); local if_state_1 = ((rx9[31,1] && ~simm16Masked[31,1] && ~temp[31,1]) || (~rx9[31,1] && simm16Masked[31,1] && temp[31,1])); local if_state_2 = (rx9[31,1] == 0x0); local else_state_1 = !if_state_1; local else_state_2 = !if_state_2; rd0 = ((zext(if_state_1 * if_state_2) * (0x7fffffff)) + (zext(if_state_1 * else_state_2) * (0x80000000)) + (zext(else_state_1) * temp)); subSatWflags(rx9, simm16, temp, rd0); } #--------------------------------------------------------------------- # SATSUB.H - Saturated Subtract of Halfwords # I. {d, s} -> {0, 1, ..., 15} #--------------------------------------------------------------------- macro subSatHflags(RX, RY, TEMP, RD){ #Q = (RX[15,1] && !RY[15,1] && !TEMP[15,1]) || # (!RX[15,1] && RY[15,1] && TEMP[15,1]) || Q Q = sborrow(RX[0,15], RY[0,15]); #V = (RX[15,1] && !RY[15,1] && !TEMP[15,1]) || # (!RX[15,1] && RY[15,1] && TEMP[15,1]) V = sborrow(RX[0,15], RY[0,15]); NZSTATUS(RD[0,15]); C = 0x0; } # SATSUB.H Format I # Operation: temp <- Rx - Ry; # If(Rx[15] && ~Ry[15] && ~temp[15]) || (~Rx[15] && Ry[15] && temp[15]) then # If Rx[15] == 0 then # Rd <- 0x00007fff; # else # Rd <- 0xffff8000; # else # Rd <- temp; # Syntax: satsub.h Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0011 1100 dddd :SATSUB.H erd0, rx9, ry0 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop4_8=0x3c & erd0)) { temp:4 = zext(rx9[0,15]) - zext(ry0[0,15]); local if_state_1 = ((rx9[15,1] && ~ry0[15,1] && ~temp[15,1]) || (~rx9[15,1] && ry0[15,1] && temp[15,1])); local if_state_2 = (rx9[15,1] == 0x0); local else_state_1 = !if_state_1; local else_state_2 = !if_state_2; erd0 = ((zext(if_state_1 * if_state_2) * (0x00007fff)) + (zext(if_state_1 * else_state_2) * (0xffff8000)) + (zext(else_state_1) * sext(temp[0,15]))); subSatHflags(rx9, ry0, temp, erd0); } #--------------------------------------------------------------------- # SATRNDS - Saturate with Rounding Signed # I. {d} -> {0, 1, ..., 15} # {sa, bp} -> {0, 1, ..., 31} #--------------------------------------------------------------------- SAMINUS1_CALC: is (rd0 ; esa0_5=0x0) {RDMask:4 = (rd0 & 0x00000000); export RDMask;}#0 //sa should never be 0 for this if case! SAMINUS1_CALC: is (rd0 ; esa0_5=0x1) {RDMask:4 = (rd0 & 0x00000001); export RDMask;}#1 SAMINUS1_CALC: is (rd0 ; esa0_5=0x2) {RDMask:4 = (rd0 & 0x00000002); export RDMask;}#2 SAMINUS1_CALC: is (rd0 ; esa0_5=0x3) {RDMask:4 = (rd0 & 0x00000004); export RDMask;}#3 SAMINUS1_CALC: is (rd0 ; esa0_5=0x4) {RDMask:4 = (rd0 & 0x00000008); export RDMask;}#4 SAMINUS1_CALC: is (rd0 ; esa0_5=0x5) {RDMask:4 = (rd0 & 0x00000010); export RDMask;}#5 SAMINUS1_CALC: is (rd0 ; esa0_5=0x6) {RDMask:4 = (rd0 & 0x00000020); export RDMask;}#6 SAMINUS1_CALC: is (rd0 ; esa0_5=0x7) {RDMask:4 = (rd0 & 0x00000040); export RDMask;}#7 SAMINUS1_CALC: is (rd0 ; esa0_5=0x8) {RDMask:4 = (rd0 & 0x00000080); export RDMask;}#8 SAMINUS1_CALC: is (rd0 ; esa0_5=0x9) {RDMask:4 = (rd0 & 0x00000100); export RDMask;}#9 SAMINUS1_CALC: is (rd0 ; esa0_5=0xa) {RDMask:4 = (rd0 & 0x00000200); export RDMask;}#10 SAMINUS1_CALC: is (rd0 ; esa0_5=0xb) {RDMask:4 = (rd0 & 0x00000400); export RDMask;}#11 SAMINUS1_CALC: is (rd0 ; esa0_5=0xc) {RDMask:4 = (rd0 & 0x00000800); export RDMask;}#12 SAMINUS1_CALC: is (rd0 ; esa0_5=0xd) {RDMask:4 = (rd0 & 0x00001000); export RDMask;}#13 SAMINUS1_CALC: is (rd0 ; esa0_5=0xe) {RDMask:4 = (rd0 & 0x00002000); export RDMask;}#14 SAMINUS1_CALC: is (rd0 ; esa0_5=0xf) {RDMask:4 = (rd0 & 0x00004000); export RDMask;}#15 SAMINUS1_CALC: is (rd0 ; esa0_5=0x10) {RDMask:4 = (rd0 & 0x00008000); export RDMask;}#16 SAMINUS1_CALC: is (rd0 ; esa0_5=0x11) {RDMask:4 = (rd0 & 0x00010000); export RDMask;}#17 SAMINUS1_CALC: is (rd0 ; esa0_5=0x12) {RDMask:4 = (rd0 & 0x00020000); export RDMask;}#18 SAMINUS1_CALC: is (rd0 ; esa0_5=0x13) {RDMask:4 = (rd0 & 0x00040000); export RDMask;}#19 SAMINUS1_CALC: is (rd0 ; esa0_5=0x14) {RDMask:4 = (rd0 & 0x00080000); export RDMask;}#20 SAMINUS1_CALC: is (rd0 ; esa0_5=0x15) {RDMask:4 = (rd0 & 0x00100000); export RDMask;}#21 SAMINUS1_CALC: is (rd0 ; esa0_5=0x16) {RDMask:4 = (rd0 & 0x00200000); export RDMask;}#22 SAMINUS1_CALC: is (rd0 ; esa0_5=0x17) {RDMask:4 = (rd0 & 0x00400000); export RDMask;}#23 SAMINUS1_CALC: is (rd0 ; esa0_5=0x18) {RDMask:4 = (rd0 & 0x00800000); export RDMask;}#24 SAMINUS1_CALC: is (rd0 ; esa0_5=0x19) {RDMask:4 = (rd0 & 0x01000000); export RDMask;}#25 SAMINUS1_CALC: is (rd0 ; esa0_5=0x1a) {RDMask:4 = (rd0 & 0x02000000); export RDMask;}#26 SAMINUS1_CALC: is (rd0 ; esa0_5=0x1b) {RDMask:4 = (rd0 & 0x04000000); export RDMask;}#27 SAMINUS1_CALC: is (rd0 ; esa0_5=0x1c) {RDMask:4 = (rd0 & 0x08000000); export RDMask;}#28 SAMINUS1_CALC: is (rd0 ; esa0_5=0x1d) {RDMask:4 = (rd0 & 0x10000000); export RDMask;}#29 SAMINUS1_CALC: is (rd0 ; esa0_5=0x1e) {RDMask:4 = (rd0 & 0x20000000); export RDMask;}#30 SAMINUS1_CALC: is (rd0 ; esa0_5=0x1f) {RDMask:4 = (rd0 & 0x40000000); export RDMask;}#31 GET_TEMP_FINAL_VAL: is (rd0 ; esa0_5) & SAMINUS1_CALC { sa:4 = (esa0_5 & 0xffffffff); local if_state_1 = (sa != 0x0); tempRDShiftTempVal:4 = rd0 >> esa0_5; Rnd:4 = SAMINUS1_CALC; tempRDShiftFinalVal:4 = (tempRDShiftTempVal + (zext(if_state_1) * Rnd)); export tempRDShiftFinalVal; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x0 & esa0_5)){ if_state_2_calc:1 = 0x1; export if_state_2_calc;} GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x1 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,1]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x2 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,2]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x3 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,3]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x4 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,4]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x5 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,5]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x6 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,6]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x7 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,7]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x8 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,8]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x9 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,9]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0xa & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,10]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0xb & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,11]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0xc & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,12]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0xd & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,13]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0xe & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,14]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0xf & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,15]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x10 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,16]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x11 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,17]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x12 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,18]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x13 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,19]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x14 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,20]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x15 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 =sext(tempRDShiftFinal[0,21]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x16 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,22]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x17 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,23]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x18 & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,24]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x19 & esa0_5)) & GET_TEMP_FINAL_VAL #?NOT SURE WHY I"M GETTING "Unnecessary SEXT warning from here on? { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,25];#sext(tempRDShiftFinal[0,25]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x1a & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,26];#sext(tempRDShiftFinal[0,26]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x1b & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,27];#sext(tempRDShiftFinal[0,27]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x1c & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,28];#sext(tempRDShiftFinal[0,28]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x1d & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,29];#sext(tempRDShiftFinal[0,29]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x1e & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,30];#sext(tempRDShiftFinal[0,30]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC: is (rd0 ; (ebp5_5=0x1f & esa0_5)) & GET_TEMP_FINAL_VAL { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,31]; local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } # SATRNDS Format I # Operation: temp <- Rd >> sa; # if(sa != 0) # Rnd = Rd[sa - 1]; # temp = temp + Rnd; # if((temp == sext(temp[bp - 1:0])) || (bp == 0)) # Rd <- temp; # else # if(temp[31] == 1) # Rd <- -2^(bp - 1); # else # Rd <- 2^(bp - 1); # Syntax: satrnds Rd >> sa, bp # 1111 0011 1011 dddd 0000 00bb bbbs ssss (where b = bp, s = sa) :SATRNDS rd0^"<<"^esa0_5, ebp5_5 is ((op13_3=0x7 & op9_4=0x9 & op4_5=0x1B & rd0) ; (eop12_4=0x0 & eop10_2=0x0 & ebp5_5 & esa0_5)) & GET_IF_STATE2_CALC & GET_TEMP_FINAL_VAL { bp:4 = (ebp5_5 & 0xffffffff); bpMinus1:4 = ((ebp5_5 - 1) & 0xffffffff); local if_state_2 = GET_IF_STATE2_CALC; temp:4 = GET_TEMP_FINAL_VAL; local if_state_3 = (temp[31,1] == 1); bpM1SecondPowerNeg:4 = ((-2)**bpMinus1); bpM1SecondPowerPos:4 = (2**bpMinus1); local else_state_2 = !if_state_2; local else_state_3 = !if_state_3; rd0 = ((zext(if_state_2) * temp) + (zext(else_state_2 * if_state_3) * bpM1SecondPowerNeg) + (zext(else_state_2 * else_state_3) * bpM1SecondPowerPos)); } #--------------------------------------------------------------------- # SATRNDU - Saturate with Rounding Unsigned # I. {d} -> {0, 1, ..., 15} # {sa, bp} -> {0, 1, ..., 31} #--------------------------------------------------------------------- # SATRNDU Format I # Operation: temp <- Rd >> sa; # if(sa != 0) # Rnd = Rd[sa - 1]; # temp = temp + Rnd; # if((temp == zext(temp[bp - 1:0])) || (bp == 0)) # Rd <- temp; # else # if(temp[31] == 1) # Rd <- 0x00000000; # else # Rd <- 2^(bp - 1); # Syntax: satrndu Rd >> sa, bp # 1111 0011 1011 dddd 0000 01bb bbbs ssss (where b = bp, s = sa) :SATRNDU rd0^"<<"^esa0_5, ebp5_5 is ((op13_3=0x7 & op9_4=0x9 & op4_5=0x1B & rd0) ; (eop12_4=0x0 & eop10_2=0x1 & ebp5_5 & esa0_5)) & GET_IF_STATE2_CALC & GET_TEMP_FINAL_VAL { bp:4 = (ebp5_5 & 0xffffffff); bpMinus1:4 = ((ebp5_5 - 1) & 0xffffffff); local if_state_2 = GET_IF_STATE2_CALC; temp:4 = GET_TEMP_FINAL_VAL; local if_state_3 = (temp[31,1] == 1); zero32BitVal:4 = 0x00000000; bpM1SecondPowerPos:4 = (2**bpMinus1); local else_state_2 = !if_state_2; local else_state_3 = !if_state_3; rd0 = ((zext(if_state_2) * temp) + (zext(else_state_2 * if_state_3) * zero32BitVal) + (zext(else_state_2 * else_state_3) * bpM1SecondPowerPos)); } #--------------------------------------------------------------------- # SATS - Saturate Signed # I. {d} -> {0, 1, ..., 15} # {sa, bp} -> {0, 1, ..., 31} #--------------------------------------------------------------------- GET_TEMP_FINAL_VAL_NR: is (rd0 ; esa0_5) & SAMINUS1_CALC { tempRDShiftFinalVal:4 = rd0 >> esa0_5; export tempRDShiftFinalVal; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x0 & esa0_5)){ if_state_2_calc:1 = 0x1; export if_state_2_calc;} GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x1 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,1]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x2 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,2]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x3 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,3]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x4 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,4]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x5 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,5]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x6 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,6]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x7 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,7]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x8 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,8]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x9 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,9]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0xa & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,10]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0xb & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,11]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0xc & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,12]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0xd & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,13]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0xe & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,14]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0xf & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,15]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x10 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,16]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x11 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,17]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x12 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,18]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x13 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,19]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x14 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,20]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x15 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 =sext(tempRDShiftFinal[0,21]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x16 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,22]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x17 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,23]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x18 & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = sext(tempRDShiftFinal[0,24]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x19 & esa0_5)) & GET_TEMP_FINAL_VAL_NR #?NOT SURE WHY I"M GETTING "Unnecessary SEXT warning from here on? { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,25];#sext(tempRDShiftFinal[0,25]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x1a & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,26];#sext(tempRDShiftFinal[0,26]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x1b & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,27];#sext(tempRDShiftFinal[0,27]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x1c & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,28];#sext(tempRDShiftFinal[0,28]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x1d & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,29];#sext(tempRDShiftFinal[0,29]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x1e & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,30];#sext(tempRDShiftFinal[0,30]); local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } GET_IF_STATE2_CALC_NR: is (rd0 ; (ebp5_5=0x1f & esa0_5)) & GET_TEMP_FINAL_VAL_NR { tempRDShiftFinal:4 = GET_TEMP_FINAL_VAL_NR; tempRDShiftFinalMask:4 = tempRDShiftFinal[0,31]; local if_state_2_calc = (tempRDShiftFinal == tempRDShiftFinalMask); export if_state_2_calc; } #SATS Format I #Operation: temp <- Rd >> sa; # if((temp == sext(temp[bp - 1:0])) || (bp == 0)) # Rd <- temp; # else # if(temp[31] == 1) # Rd <- -2^(bp - 1); # else # Rd <- 2^(bp - 1); # Syntax: sats Rd >> sa, bp # 1111 0001 1011 dddd 0000 00bb bbbs ssss (where b = bp, s = sa) :SATS rd0^"<<"^esa0_5, ebp5_5 is ((op13_3=0x7 & op9_4=0x8 & op4_5=0x1B & rd0) ; (eop12_4=0x0 & eop10_2=0x0 & ebp5_5 & esa0_5)) & GET_IF_STATE2_CALC_NR & GET_TEMP_FINAL_VAL_NR { bp:4 = (ebp5_5 & 0xffffffff); bpMinus1:4 = ((ebp5_5 - 1) & 0xffffffff); local if_state_2 = GET_IF_STATE2_CALC_NR; temp:4 = GET_TEMP_FINAL_VAL_NR; local if_state_3 = (temp[31,1] == 1); bpM1SecondPowerNeg:4 = ((-2)**bpMinus1); bpM1SecondPowerPos:4 = (2**bpMinus1); local else_state_2 = !if_state_2; local else_state_3 = !if_state_3; rd0 = ((zext(if_state_2) * temp) + (zext(else_state_2 * if_state_3) * bpM1SecondPowerNeg) + (zext(else_state_2 * else_state_3) * bpM1SecondPowerPos)); } #--------------------------------------------------------------------- # SATU - Saturate Unsigned # I. {d} -> {0, 1, ..., 15} # {sa, bp} -> {0, 1, ..., 31} #--------------------------------------------------------------------- # SATU Format I # Operation: temp <- Rd >> sa; # if((temp == zext(temp[bp - 1:0])) || (bp == 0)) # Rd <- temp; # else # if(temp[31] == 1) # Rd <- 0x00000000; # else # Rd <- 2^(bp - 1); # Syntax: satrndu Rd >> sa, bp # 1111 0011 1011 dddd 0000 01bb bbbs ssss (where b = bp, s = sa) :SATU rd0^"<<"^esa0_5, ebp5_5 is ((op13_3=0x7 & op9_4=0x8 & op4_5=0x1B & rd0) ; (eop12_4=0x0 & eop10_2=0x1 & ebp5_5 & esa0_5)) & GET_IF_STATE2_CALC_NR & GET_TEMP_FINAL_VAL_NR { bp:4 = (ebp5_5 & 0xffffffff); bpMinus1:4 = ((ebp5_5 - 1) & 0xffffffff); local if_state_2 = GET_IF_STATE2_CALC_NR; temp:4 = GET_TEMP_FINAL_VAL_NR; local if_state_3 = (temp[31,1] == 1); zero32BitVal:4 = 0x00000000; bpM1SecondPowerPos:4 = (2**bpMinus1); local else_state_2 = !if_state_2; local else_state_3 = !if_state_3; rd0 = ((zext(if_state_2) * temp) + (zext(else_state_2 * if_state_3) * zero32BitVal) + (zext(else_state_2 * else_state_3) * bpM1SecondPowerPos)); } #--------------------------------------------------------------------- # SUBHH.W - Subtract Halfwords into Word # I. {d, x, y} -> {0, 1, ..., 15} # part -> {t, b} #--------------------------------------------------------------------- # SUBHH.W Format I # Operation: If(Rx-part==t) then operand1=SE(Rx[31:16]) else operand1=SE(Rx[15:0]); # If(Ry-part==t) then operand2=SE(Ry[31:16]) else operand2=SE(Ry[15:0]); # Rd <- operand1 - operand2; # Syntax: subhh.w Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1111 00XY dddd # Assumption: t = 1, b = 0 using XY :SUBHH.W erd0, rx9^operand1, ry0^operand2 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop8_4=0xf & eop6_2=0x0 & selectorxy4_2 & erd0)) & operand1 & operand2 { erd0 = operand1 - operand2; subflags(operand1, operand2, erd0); } #--------------------------------------------------------------------- # MULSATHH.W - Multiply Halfwords with Saturation into Halfword # I. {d, x, y} -> {0, 1, ..., 15} # part -> {t, b} #--------------------------------------------------------------------- # MULSATHH.W Format I # Operation: If(Rx-part==t) then operand1=SE(Rx[31:16]) else operand1=SE(Rx[15:0]); # If(Ry-part==t) then operand2=SE(Ry[31:16]) else operand2=SE(Ry[15:0]); # If(operand1 == operand2 == 0x8000) # Rd <- 0x7FFF; # else # Rd <- sext((operand1 * operand2) >> 15); # Syntax: mulsahh.w Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1000 10XY dddd # Assumption: t = 1, b = 0 using XY :MULSATHH.W erd0, rx9^operand1, ry0^operand2 is ((op13_3=0x7 & rx9 & op4_5=0x0 & ry0) ; (eop12_4=0x0 & eop8_4=0x8 & eop6_2=0x2 & selectorxy4_2 & erd0)) & operand1 & operand2 { compareValx8000:4 = 0x8000; local if_state_3_a = (operand1 == compareValx8000); local if_state_3_b = (operand2 == compareValx8000); local if_state_3 = (if_state_3_a == if_state_3_b); local else_state_3 = !if_state_3; sat_clamp_val:4 = 0x7FFF; nonSatMultVal:4 = ((operand1 * operand2) >> 15); erd0 = ((zext(if_state_3) * sat_clamp_val) + (zext(else_state_3) * nonSatMultVal)); } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_dsp_operations2.sinc ================================================ #--------------------------------------------------------------------- # 8.3.4 DSP Operations # # Note: all DSP operations are stubbed out with custom pcode ops, # because their literal implementations are generally too difficult. #--------------------------------------------------------------------- macro satdspmulh(OP1, OP2, RES, RND, TMP1) { TMP1 = (OP1 == 0x8000) & (OP2 == 0x8000); Q = Q | TMP1; RES = (zext(TMP1) * 0x3FFF8000) + (zext(TMP1 == 0) * ((sext(OP1) * sext(OP2)) + zext(RND))); } macro satdspsh(RES) { V = (RES s> 0x00007FFF) | (RES s< 0xFFFF8000); Q = V | Q; RES = (0x00007FFF * zext(RES s> 0x00007FFF)) + (RES * zext(RES s< 0x00008000) * zext(RES s>= 0xFFFF8000)) + (0x00008000 * zext(RES s< 0xFFFF8000)); } macro satdspsw(RES) { V = (RES s> 0x000000007FFFFFFF) | (RES s< 0xFFFFFFFF80000000); Q = V | Q; RES = (0x000000007FFFFFFF * zext(RES s> 0x000000007FFFFFFF)) + (RES * zext(RES s< 0x0000000080000000) * zext(RES s>= 0xFFFFFFFF80000000)) + (0x0000000080000000 * zext(RES s< 0xFFFFFFFF80000000)); } XPART: ":T" is ctx_usex & xpart=0x1 { tmp:4 = ctx_usex; tmp = tmp >> 16; tmpa:2 = tmp:2; tmpb:4 = sext(tmpa); export *:4 tmpb; } XPART: ":B" is ctx_usex & xpart=0x0 { tmp:4 = ctx_usex; tmp = tmp & 0xFFFF; tmpa:2 = tmp:2; tmpb:4 = sext(tmpa); export *:4 tmpb; } YPART: ":T" is ctx_usey & ypart=0x1 { tmp:4 = ctx_usey; tmp = tmp >> 16; tmpa:2 = tmp:2; tmpb:4 = sext(tmpa); export *:4 tmpb; } YPART: ":B" is ctx_usey & ypart=0x0 { tmp:4 = ctx_usey; tmp = tmp & 0xFFFF; tmpa:2 = tmp:2; tmpb:4 = sext(tmpa); export *:4 tmpb; } :ADDHH.W erd0, rx9^XPART, ry0^YPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop6_10=0x38 & XPART & YPART & erd0 [ctx_savex=rx9; ctx_savey=ry0; ] { erd0 = XPART + YPART; addflags(XPART, YPART, erd0); } :MACHH.D erd0, rx9^XPART, ry0^YPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop6_10=0x16 & XPART & YPART & erd0 & erd0a & ctx_rdplus [ctx_savex=rx9; ctx_savey=ry0; ctx_rdsave=erd0a+1; ] { tmp:4 = XPART * YPART; tmp64a:8 = zext(tmp); tmp64b:8 = zext(erd0); tmp64c:8 = zext(ctx_rdplus); tmp64b = (tmp64c << 32) | tmp64b; tmp64a = (tmp64a << 16) + tmp64b; tmp64b = tmp64a >> 32; erd0 = tmp64a:4 & 0xFFFF0000; ctx_rdplus= tmp64b:4; } :MACHH.W erd0, rx9^XPART, ry0^YPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop6_10=0x12 & XPART & YPART & erd0 [ctx_savex=rx9; ctx_savey=ry0; ] { erd0 = erd0 + (XPART * YPART); } :MACSATHH.W erd0, rx9^PXPART, ry0^PYPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop6_10=0x1a & PXPART & PYPART & erd0 { TMPX:2 = PXPART; TMPY:2 = PYPART; RES:4 = 0; RND:2 = 0; TMP:1 = 0; satdspmulh(TMPX,TMPY,RES,RND,TMP); RES = RES << 1; BIG:8 = sext(RES) + sext(erd0); satdspsw(BIG); QTOSR(); } :MACWH.D erd0, rx9, ry0^YPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop5_11=0x64 & YPART & erd0 & erd0a & ctx_rdplus [ctx_savey=ry0; ctx_rdsave=erd0a+1; ] { tmp64a:8 = sext(rx9); tmp64b:8 = sext(YPART); tmp64a = tmp64a * tmp64b; tmp64b = zext(ctx_rdplus); tmp64c:8 = zext(erd0); tmp64b = (tmp64b << 32) | tmp64c; tmp64b = tmp64b + (tmp64a << 16); tmp64a = tmp64b >> 32; erd0 = tmp64b:4 & 0xFFFF0000; ctx_rdplus = tmp64a:4; } :MULHH.W erd0, rx9^XPART, ry0^YPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop6_10=0x1e & XPART & YPART & erd0 [ctx_savex=rx9; ctx_savey=ry0; ] { erd0 = XPART * YPART; } :MULNHH.W erd0, rx9^XPART, ry0^YPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop6_10=0x6 & XPART & YPART & erd0 [ctx_savex=rx9; ctx_savey=ry0; ] { erd0 = XPART * YPART; if (erd0 == 0) goto inst_next; erd0 = (~erd0) + 1; } :MULNWH.D erd0, rx9, ry0^YPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop5_11=0x14 & YPART & erd0 & erd0a & ctx_rdplus [ctx_savey=ry0; ctx_rdsave=erd0a+1; ] { tmp64a:8 = sext(rx9); tmp64b:8 = sext(YPART); tmp64a = tmp64a * tmp64b; erd0 = 0; ctx_rdplus = 0; if (tmp64a == 0) goto inst_next; tmp64a = (~tmp64a) + 1; erd0 = tmp64a:4; erd0 = erd0 << 16; tmp64a = tmp64a s>> 16; ctx_rdplus = tmp64a:4; } :MULSATHH.H erd0, rx9^PXPART, ry0^PYPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop6_10=0x22 & PXPART & PYPART & erd0 { TMPX:2 = PXPART; TMPY:2 = PYPART; RES:4 = 0; RND:2 = 0; TMP:1 = 0; satdspmulh(TMPX,TMPY,RES,RND,TMP); RES = RES >> 15; erd0 = sext(RES:2); QTOSR(); } :MULSATHH.W erd0, rx9^PXPART, ry0^PYPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop6_10=0x26 & PXPART & PYPART & erd0 { TMPX:2 = PXPART; TMPY:2 = PYPART; RES:4 = 0; RND:2 = 0; TMP:1 = 0; satdspmulh(TMPX,TMPY,RES,RND,TMP); erd0 = RES << 1; QTOSR(); } :MULSATRNDHH.H erd0, rx9^PXPART, ry0^PYPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop6_10=0x2a & PXPART & PYPART & erd0 { TMPX:2 = PXPART; TMPY:2 = PYPART; RES:4 = 0; RND:2 = 0x4000; TMP:1 = 0; satdspmulh(TMPX,TMPY,RES,RND,TMP); RES = RES >> 15; erd0 = sext(RES:2); QTOSR(); } :MULSATRNDWH.W erd0, rx9, ry0^YPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop5_11=0x5c & YPART & erd0 { SAT:1 = (rx9 == 0x80000000) && (YPART == 0xFFFF8000); TMP:8 = ((sext(rx9) * sext(YPART)) + 0x4000) s>> 15; erd0 = (zext(SAT) * 0x7FFFFFFF) + zext(SAT == 0) * TMP:4; } :MULSATWH.W erd0, rx9, ry0^YPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop5_11=0x74 & YPART & erd0 { SAT:1 = (rx9 == 0x80000000) && (YPART == 0xFFFF8000); TMP:8 = (sext(rx9) * sext(YPART)) s>> 15; erd0 = (zext(SAT) * 0x7FFFFFFF) + zext(SAT == 0) * TMP:4; } :MULWH.D erd0, rx9, ry0^YPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop5_11=0x6c & YPART & erd0 & erd0a & ctx_rdplus [ctx_savey=ry0; ctx_rdsave=erd0a+1; ] { tmp64a:8 = sext(rx9); tmp64b:8 = sext(YPART); tmp64a = tmp64a * tmp64b; erd0 = tmp64a:4; erd0 = erd0 << 16; tmp64a = tmp64a s>> 16; ctx_rdplus = tmp64a:4; } :SATADD.H erd0, rx9, ry0 is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop4_12=0x2c & erd0 { TMP:4 = zext(rx9:2) + zext(ry0:2); satdspsh(TMP); erd0 = sext(TMP:2); N = (erd0 & 0x8000) != 0; Z = (erd0 == 0); C = 0; CZNVQTOSR(); } :SATADD.W erd0, rx9, ry0 is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop4_12=0xc & erd0 { TMP:8 = zext(rx9) + zext(ry0); satdspsw(TMP); erd0 = TMP:4; N = (erd0 & 0x80000000) != 0; Z = (erd0 == 0); C = 0; CZNVQTOSR(); } SATM: is ebp5_5=0 { tmp:4 = 0x00000000; export tmp; } SATM: is ebp5_5=1 { tmp:4 = 0xFFFFFFFF; export tmp; } SATM: is ebp5_5=2 { tmp:4 = 0xFFFFFFFE; export tmp; } SATM: is ebp5_5=3 { tmp:4 = 0xFFFFFFFC; export tmp; } SATM: is ebp5_5=4 { tmp:4 = 0xFFFFFFF8; export tmp; } SATM: is ebp5_5=5 { tmp:4 = 0xFFFFFFF0; export tmp; } SATM: is ebp5_5=6 { tmp:4 = 0xFFFFFFE0; export tmp; } SATM: is ebp5_5=7 { tmp:4 = 0xFFFFFFC0; export tmp; } SATM: is ebp5_5=8 { tmp:4 = 0xFFFFFF80; export tmp; } SATM: is ebp5_5=9 { tmp:4 = 0xFFFFFF00; export tmp; } SATM: is ebp5_5=10 { tmp:4 = 0xFFFFFE00; export tmp; } SATM: is ebp5_5=11 { tmp:4 = 0xFFFFFC00; export tmp; } SATM: is ebp5_5=12 { tmp:4 = 0xFFFFF800; export tmp; } SATM: is ebp5_5=13 { tmp:4 = 0xFFFFF000; export tmp; } SATM: is ebp5_5=14 { tmp:4 = 0xFFFFE000; export tmp; } SATM: is ebp5_5=15 { tmp:4 = 0xFFFFC000; export tmp; } SATM: is ebp5_5=16 { tmp:4 = 0xFFFF8000; export tmp; } SATM: is ebp5_5=17 { tmp:4 = 0xFFFF0000; export tmp; } SATM: is ebp5_5=18 { tmp:4 = 0xFFFE0000; export tmp; } SATM: is ebp5_5=19 { tmp:4 = 0xFFFC0000; export tmp; } SATM: is ebp5_5=20 { tmp:4 = 0xFFF80000; export tmp; } SATM: is ebp5_5=21 { tmp:4 = 0xFFF00000; export tmp; } SATM: is ebp5_5=22 { tmp:4 = 0xFFE00000; export tmp; } SATM: is ebp5_5=23 { tmp:4 = 0xFFC00000; export tmp; } SATM: is ebp5_5=24 { tmp:4 = 0xFF800000; export tmp; } SATM: is ebp5_5=25 { tmp:4 = 0xFF000000; export tmp; } SATM: is ebp5_5=26 { tmp:4 = 0xFE000000; export tmp; } SATM: is ebp5_5=27 { tmp:4 = 0xFC000000; export tmp; } SATM: is ebp5_5=28 { tmp:4 = 0xF8000000; export tmp; } SATM: is ebp5_5=29 { tmp:4 = 0xF0000000; export tmp; } SATM: is ebp5_5=30 { tmp:4 = 0xE0000000; export tmp; } SATM: is ebp5_5=31 { tmp:4 = 0xC0000000; export tmp; } :SATRNDS rd0^" >> "^esa0_5, ebp5_5^SATM is op4_12=0xf3b & rd0 ; eop10_6=0x0 & esa0_5 & ebp5_5 & SATM { build SATM; BIT:1 = ebp5_5; BITA:1 = esa0_5; TMP:4 = rd0 s>> esa0_5; TMP = TMP + (zext(BITA != 0) * (rd0 & (1 << (esa0_5 - 1)))); TMPA:4 = TMP << (32-ebp5_5); TMPB:4 = TMPA s>> (32-ebp5_5); NSAT:1 = (TMP == TMPB) || (BIT == 0x0); TMPC:1 = (TMP & 0x80000000) != 0; rd0 = (TMP * zext(NSAT)) + (zext(NSAT == 0) * ((zext(TMPC) * SATM) + ((1 << (ebp5_5-1) - 1) * zext(TMPC == 0)))); Q = Q || (NSAT != 0); QTOSR(); } :SATRNDU rd0^" >> "^esa0_5, ebp5_5 is op4_12=0xf3b & rd0 ; eop10_6=0x1 & esa0_5 & ebp5_5 { BIT:1 = ebp5_5; BITA:1 = esa0_5; TMP:4 = rd0 >> esa0_5; TMP = TMP + (zext(BITA != 0) * (rd0 & (1 << (esa0_5 - 1)))); TMPA:4 = TMP << (32-ebp5_5); TMPB:4 = TMPA >> (32-ebp5_5); NSAT:1 = (TMP == TMPB) || (BIT == 0x0); TMPC:1 = (TMP & 0x80000000) == 0; rd0 = (TMP * zext(NSAT)) + (zext(NSAT == 0) * zext(TMPC) * ((1 << ebp5_5) - 1)); Q = Q || (NSAT != 0); QTOSR(); } :SATS rd0^" >> "^esa0_5, ebp5_5^SATM is op4_12=0xf1b & rd0 ; eop10_6=0x0 & esa0_5 & ebp5_5 & SATM { build SATM; BIT:1 = ebp5_5; TMP:4 = rd0 s>> esa0_5; TMPA:4 = TMP << (32-ebp5_5); TMPB:4 = TMPA s>> (32-ebp5_5); NSAT:1 = (TMP == TMPB) || (BIT == 0x0); TMPC:1 = (TMP & 0x80000000) != 0; rd0 = (TMP * zext(NSAT)) + (zext(NSAT == 0) * ((zext(TMPC) * SATM) + ((1 << (ebp5_5-1) - 1) * zext(TMPC == 0)))); Q = Q || (NSAT != 0); QTOSR(); } :SATSUB.H erd0, rx9, ry0 is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop4_12=0x3c & erd0 { TMP:4 = zext(rx9:2) - zext(ry0:2); satdspsh(TMP); erd0 = sext(TMP:2); N = (erd0 & 0x8000) != 0; Z = (erd0 == 0); C = 0; CZNVQTOSR(); } :SATSUB.W erd0, rx9, ry0 is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop4_12=0x1c & erd0 { TMP:8 = zext(rx9) - zext(ry0); satdspsw(TMP); erd0 = TMP:4; N = (erd0 & 0x80000000) != 0; Z = (erd0 == 0); C = 0; CZNVQTOSR(); } :SATSUB.W rd0, rs9, simm16 is op13_3=0x7 & op4_5=0xd & rd0 & rs9 ; simm16 { TMPY:2 = simm16; TMP:8 = zext(rs9) - sext(TMPY); satdspsw(TMP); rd0 = TMP:4; N = (rd0 & 0x80000000) != 0; Z = (rd0 == 0); C = 0; CZNVQTOSR(); } :SATU rd0^" >> "^esa0_5, ebp5_5 is op4_12=0xf1b & rd0 ; eop10_6=0x1 & esa0_5 & ebp5_5 { BIT:1 = ebp5_5; TMP:4 = rd0 >> esa0_5; TMPA:4 = TMP << (32-ebp5_5); TMPB:4 = TMPA >> (32-ebp5_5); NSAT:1 = (TMP == TMPB) || (BIT == 0x0); TMPC:1 = (TMP & 0x80000000) == 0; rd0 = (TMP * zext(NSAT)) + (zext(NSAT == 0) * zext(TMPC) * ((1 << ebp5_5) - 1)); Q = Q || (NSAT != 0); QTOSR(); } :SUBHH.W erd0, rx9^XPART, ry0^YPART is op13_3=0x7 & op4_5=0x0 & rx9 & ry0 ; eop6_10=0x3c & XPART & YPART & erd0 [ctx_savex=rx9; ctx_savey=ry0; ] { erd0 = XPART - YPART; subflags(XPART, YPART, erd0); } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_instruction_flow.sinc ================================================ #--------------------------------------------------------------------- # 8.3.8 INSTRUCTION FLOW #--------------------------------------------------------------------- #--------------------------------------------------------------------- # ACALL - Application Call # I. disp -> {0, 4, ..., 1020} #--------------------------------------------------------------------- # ACALL Format I: # Operation: LR <- PC + 2 # Syntax: acall disp # 1101 nnnn nnnn 0000 ACALLdisp: disp is disp4_8 [ disp = ACBA + (disp4_8 << 2); ] { export *:4 disp; } :ACALL ACALLdisp is op12_4=0xd & op0_4=0 & ACALLdisp { LR = inst_next; call ACALLdisp; } #--------------------------------------------------------------------- # RET{cond4} - Conditional Return from Subroutine # I. cond4 -> {eq, ne, cc/hs, cs/lo, ge, lt, mi, pl, ls, gt, le, hi, vs, vc, qs, al} # s -> {0, 1, ..., 15} #--------------------------------------------------------------------- retCond4Sub: rs0 is rs0 & rs0=0xf { R12 = 0x1;} #PC (0xf) retCond4Sub: rs0 is rs0 & rs0=0xe { R12 = -0x1;} #LR (0xe) retCond4Sub: rs0 is rs0 & rs0=0xd { R12 = 0x0;} #SP (0xd) retCond4Sub: rs0 is rs0 & rs0=0xc {} #Else R12 is R12 retCond4Sub: rs0 is rs0 {R12 = rs0;} #Else #RET{Cond4} Format I: #Operation: # Conditional return from subroutine with move and test of return value: # if (Rs != {LR, SP, PC}) # R12 <- Rs # PC <- LR # Conditional return from subroutine with return of false value: # else if (Rs == LR) # R12 <- -1 # PC <- LR # Conditional return from subroutine with return of false value: # else if (Rs == SP) # R12 <- 0 # PC <- LR # Conditional return from subroutine with return of true value: # else if (Rs == PC) # R12 <- 1 # PC <- LR #Syntax: ret{cond4} Rs #010 1111 0 CCCC ssss #0101 1110 CCCC ssss :RET^{COND_4_4} retCond4Sub is op13_3=0x2 & op9_4=0xf & op8_1 = 0x0 & COND_4_4 & retCond4Sub { # Test Condition build COND_4_4; build retCond4Sub; # Flags Set: V = 0x0; C = 0x0; NZSTATUS(R12); # End Operation: #PC = LR; return [ LR ]; } #--------------------------------------------------------------------- # BR{cond} - Branch if Condition Satisfied # I. cond3 -> {eq, ne, cc/hs, cs/lo, ge, lt, mi, pl} # disp -> {-256, -254, ..., 254} # II. cond4 -> {eq, ne, cc/hs, cs/lo, ge, lt, mi, pl, ls, gt, le, hi, vs, vc, qs, al} # disp -> {-2097152, -2097150, ..., 2097150} #--------------------------------------------------------------------- sDisp8: sdisp is sdisp4_8 [ sdisp = inst_start + (sdisp4_8 << 1); ] { export *:4 sdisp; } sDisp21: sdisp21 is disp21part2_4_1 & disp21part3_9_4; disp21part1_0_16 [ sdisp21 = inst_start + (((disp21part3_9_4 << 17) | (disp21part2_4_1 << 16) | disp21part1_0_16) << 1); ] { export *:4 sdisp21; } #BR{cond3} Format I: #Operation: # Branch if condition satisfied: # if(cond3) # PC <- PC + (SE(disp8)<<1) # else # PC <- PC + 2; #Syntax: br{cond3}disp #110 0 nnnnnnnn 0 CCC #1100 nnnn nnnn 0CCC :BR^{COND_3} sDisp8 is op13_3=0x6 & op12_1=0x0 & op3_1 = 0x0 & COND_3 & sDisp8 { tst:1 = COND_3; if (tst) goto sDisp8; } #BR{cond4} Format II: #Operation: # Branch if condition satisfied: # if(cond4) # PC <- PC + (SE(disp21)<<1) # else # PC <- PC + 4; #Syntax: br{cond3}disp #111 nnnn 0100 n CCCC nnnnnnnnnnnnnnnn #111n nnn0 100n CCCC nnnn nnnn nnnn nnnn :BR^{COND_4_0} sDisp21 is (op13_3=0x7 & op5_4=0x4 & COND_4_0) ... & sDisp21 { build COND_4_0; goto sDisp21; } #--------------------------------------------------------------------- # RJMP - Relative Jump # I. disp -> {-1024, -1022, ..., 1022} #--------------------------------------------------------------------- # RJMP Format I: # Operation: PC <- PC + (SE(disp10)<<1); # Syntax: rjmp PC[disp] # 1100 nnnn nnnn 10nn RJMPdisp: disp is disp4_8 & sdisp0_2 [ disp = inst_start + (((sdisp0_2 << 8) | disp4_8) << 1); ] { export *:4 disp; } :RJMP RJMPdisp is op12_4=0xc & op2_2=0x2 & RJMPdisp { goto RJMPdisp; } #--------------------------------------------------------------------- # ICALL - Subroutine Call # I. d -> {0, 1, ..., 15} #--------------------------------------------------------------------- # ICALL Format I: # Operation: LR <- PC + 2 # PC <- Rd # Syntax: icall Rd # 0101 1101 0001 dddd :ICALL rd0 is op4_12=0x5d1 & rd0 { LR = inst_next; call [rd0]; } #--------------------------------------------------------------------- # MCALL - Subroutine Call # I. p -> {0, 1, ..., 15} # disp -> {-131072, -131068, ..., 131068} #--------------------------------------------------------------------- RP0Disp16: rp0^"["^disp^"]" is rp0; disp_16 [ disp = (disp_16 << 2); ] { val:4 = (rp0 & 0xfffffffc) + disp; export *:4 val; } RP0Disp16_2: PC[disp] is disp_16 & PC [ disp = (inst_start & 0xfffffffc) + (disp_16 << 2); ] { export *:4 disp; } # MCALL Format I: # Operation: LR <- PC + 4 # PC <- *((Rp & 0xfffffffc) + (SE(disp16) << 2)) # Syntax: mcall Rp[disp] # 1111 0000 0001 pppp nnnn nnnn nnnn nnnn IndirectPlaceHolder: " " is epsilon{} :MCALL RP0Disp16 is op4_12=0xf01 ... & RP0Disp16 { LR = inst_next; PC = RP0Disp16; call [PC]; } :MCALL RP0Disp16_2^IndirectPlaceHolder is op4_12=0xf01 & rp0=0xf ; RP0Disp16_2 & IndirectPlaceHolder { LR = inst_next; PC = RP0Disp16_2; call [PC]; } RelDisp10: val is disp4_8 & disp0_2 [ctx_rel0_8=disp4_8; ctx_rel8_2=disp0_2; val= inst_start + (ctx_rel10 << 1); ] { export *:4 val; } RelDisp21: val is imm16 [ctx_rel0_16=imm16; val=inst_start + (ctx_rel21 << 1); ] { export *:4 val; } :RCALL PC[RelDisp10] is op13_3=6 & op12_1=0 & b02=1 & b03=1 & PC & RelDisp10 { LR = inst_next; call RelDisp10; } :RCALL PC[RelDisp21] is op13_3=7 & op5_4=5 & op0_4=0 & PC & b04 & bp9_4; RelDisp21 [ctx_rel16_1=b04; ctx_rel17_4=bp9_4;] { LR = inst_next; call RelDisp21; } # The RETx instructions are somewhat complicated. The architecutre version (A/B) # determines hardware actions as well as the status of the mode bits M2:M0. For now, # we follow the "B" model as it's simpler and have a custom pcode. For RETE I also # picked a given interrupt level that would actually be determined by the mode bits. :RETD is op0_16=0xd623 { SR = RSR_DBG; SRTOFLAGS(); return [ RAR_DBG ]; } :RETE is op0_16=0xd603 { CheckAndRestoreInterupt(); SR = RSR_INT0; SRTOFLAGS(); L = 0; LTOSR(); return [ RAR_INT0 ]; } :RETS is op0_16=0xd613 { CheckAndRestoreSupervisor(); SR = RSR_SUP; SRTOFLAGS(); return [ RAR_SUP ]; } :RETJ is op0_16=0xd633 { JavaTrap(); J = 1; R = 0; JRGMTOSR(); return [ LR ]; } :SCALL is op0_16=0xd733 { SupervisorCallSetup(); LR = inst_next; tmp:4 = EVBA + 0x100; call [ tmp ]; } JV3: val is b06=1 & ctx_rel3 [ val = ctx_rel3 << 0; ] { export *[const]:1 val; } JV3: val is b06=0 & disp4_3 [ val = disp4_3+1; ] { export *[const]:1 val; } :INCJOSP JV3 is op7_9=0x1ad & op0_4=0x3 & disp4_3 & JV3 [ctx_rel3 = disp4_3;] { JavaCheckStack(JOSP,JV3); JOSP = JOSP + sext(JV3); } :POPJC is op0_16=0xd713 { JavaPopContext(); } :PUSHJC is op0_16=0xd723 { JavaPushContext(); } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_logic_operations.sinc ================================================ #--------------------------------------------------------------------- # 8.3.5 Logic Operations #--------------------------------------------------------------------- #--------------------------------------------------------------------- # AND - Logical AND with optional logical shift # I. {d, s} -> {0, 1, ..., 15} # II, III. {d, x, y} -> {0, 1, ..., 15} # sa -> {0, 1, ..., 31} #--------------------------------------------------------------------- # AND Format I # Operation: Rd <- Rd & Rs # Syntax: and Rd, Rs # 000s sss0 0110 dddd :AND rd0, RS9A is op13_3=0x0 & op4_5=0x6 & rd0 & RS9A { rd0 = rd0 & RS9A; NZSTATUS(rd0); } :AND rd0, RS9A is op13_3=0x0 & op4_5=0x6 & rd0 & RS9A & rd0=0xf { PC = inst_start & RS9A; NZSTATUS(PC); goto [PC]; } # AND Format II # Operation: Rd <- Rx & Ry << sa5 # Syntax: and Rd, Rx, Ry << sa # 111x xxx1 1110 yyyy 0000 000t tttt dddd :AND erd0, RX9A, RY0A^" << " shift4_5 is op13_3=7 & op4_5=0x1e & RX9A & RY0A; eop9_7=0 & erd0 & shift4_5 { erd0 = RX9A & (RY0A << shift4_5); NZSTATUS(erd0); } :AND erd0, RX9A, RY0A^" << " shift4_5 is op13_3=7 & op4_5=0x1e & RX9A & RY0A; eop9_7=0 & erd0 & shift4_5 & erd0=0xf { PC = RX9A & (RY0A << shift4_5); NZSTATUS(PC); goto [PC]; } # AND Format III # Operation: Rd <- Rx & Ry >> sa5 # Syntax: and Rd, Rx, Ry >> sa # 111x xxx1 1110 yyyy 0000 001t tttt dddd :AND erd0, RX9A, RY0A^" >> " shift4_5 is op13_3=7 & op4_5=0x1e & RX9A & RY0A; eop9_7=1 & erd0 & shift4_5 { erd0 = RX9A & (RY0A >> shift4_5); NZSTATUS(erd0); } :AND erd0, RX9A, RY0A^" >> " shift4_5 is op13_3=7 & op4_5=0x1e & RX9A & RY0A; eop9_7=1 & erd0 & shift4_5 & erd0=0xf { PC = RX9A & (RY0A >> shift4_5); NZSTATUS(PC); goto [PC]; } #--------------------------------------------------------------------- # AND{cond4} - Conditional And # I. cond4 -> {eq, ne, cc/hs, cs/lo, ge, lt, mi, pl, ls, gt, le, hi, vs, vc, qs, al} # {d,x,y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # AND{cond4} Format I # Operation: if(cond4) then # Rd <- Rx & Ry # Syntax: and{cond4} Rd, Rx, Ry # 111x xxx1 1101 yyyy 1110 cccc 0010 dddd :AND^{ECOND_8_4} erd0, RX9A, RY0A is (op13_3=0x7 & op4_5=0x1d & RX9A & RY0A; eop12_4=0xe & eop4_4=2 & ECOND_8_4 & erd0) { build ECOND_8_4; erd0 = RX9A & RY0A; } :AND^{ECOND_8_4} erd0, RX9A, RY0A is (op13_3=0x7 & op4_5=0x1d & RX9A & RY0A; eop12_4=0xe & eop4_4=2 & ECOND_8_4 & erd0 & erd0=0xf) { build ECOND_8_4; PC = RX9A & RY0A; goto [PC]; } #--------------------------------------------------------------------- # ANDH, ANDL - Logical AND into high or low half of register # I, II, III, IV. d -> {0, 1, ..., 15} # imm -> {0, 1, ..., 65535} #--------------------------------------------------------------------- # ANDH Format I # Operation: Rd[31:16] <- Rd[31:16] & imm16 # Syntax: andh Rd, imm # 1110 01H0 0001 dddd iiii iiii iiii iiii # H == 0 :ANDH rd0, imm16 is op10_6=0x39 & coh=0 & op4_5=1 & rd0 ; imm16 { value:4 = (imm16 << 16) | 0xffff; rd0 = rd0 & value; NZSTATUS(rd0); } :ANDH rd0, imm16 is op10_6=0x39 & coh=0 & op4_5=1 & rd0 & rd0=0xf; imm16 { value:4 = (imm16 << 16) | 0xffff; PC = inst_start & value; NZSTATUS(PC); goto [PC]; } # ANDH Format II # Operation: Rd[31:16] <- Rd[31:16] & imm16 # Rd[15:0] <- 0 # Syntax: andh Rd, imm, COH # 1110 01H0 0001 dddd iiii iiii iiii iiii # H == 1 :ANDH rd0, imm16^", COH" is op10_6=0x39 & coh=1 & op4_5=1 & rd0 ; imm16 { value:4 = imm16 << 16; rd0 = rd0 & value; NZSTATUS(rd0); } :ANDH rd0, imm16^", COH" is op10_6=0x39 & coh=1 & op4_5=1 & rd0 & rd0=0xf; imm16 { value:4 = imm16 << 16; PC = inst_start & value; NZSTATUS(PC); goto [PC]; } # ANDL Format III # Operation: Rd[15:0] <- Rd[15:0] & imm16 # Syntax: andl Rd, imm # 1110 00H0 0001 dddd iiii iiii iiii iiii # H == 0 :ANDL rd0, imm16 is op10_6=0x38 & coh=0 & op4_5=1 & rd0 ; imm16 { value:4 = imm16 | 0xffff0000; rd0 = rd0 & value; NZSTATUS(rd0); } :ANDL rd0, imm16 is op10_6=0x38 & coh=0 & op4_5=1 & rd0 & rd0=0xf ; imm16 { value:4 = imm16 | 0xffff0000; PC = inst_start & value; NZSTATUS(PC); goto [PC]; } # ANDL Format IV # Operation: Rd[15:0] <- Rd[15:0] & imm16 # Rd[31:16] <- 0 # Syntax: andl Rd, imm, COH # 1110 00H0 0001 dddd iiii iiii iiii iiii # H == 1 :ANDL rd0, imm16^", COH" is op10_6=0x38 & coh=1 & op4_5=1 & rd0 ; imm16 { value:4 = imm16; rd0 = rd0 & value; NZSTATUS(rd0); } :ANDL rd0, imm16^", COH" is op10_6=0x38 & coh=1 & op4_5=1 & rd0 & rd0=0xf; imm16 { value:4 = imm16; PC = inst_start & value; NZSTATUS(PC); } #--------------------------------------------------------------------- # ANDN - Logical AND NOT # I. {d, s} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # ANDN Format I # Operation: Rd <- Rd & Rs # Syntax: andn Rd, Rs # 000s sss0 1000 dddd :ANDN rd0, RS9A is op13_3=0 & op4_5=8 & rd0 & RS9A { rd0 = rd0 & ~RS9A; NZSTATUS(rd0); } :ANDN rd0, RS9A is op13_3=0 & op4_5=8 & rd0 & RS9A & rd0=0xf { PC = inst_start & ~RS9A; NZSTATUS(PC); goto [PC]; } #--------------------------------------------------------------------- # COM - One's Compliment # I. d -> {0, 1, ..., 15} #--------------------------------------------------------------------- # COM Format I # Operation: Rd <- ~Rd # Syntax: com Rd # 0101 1100 1101 dddd :COM rd0 is op4_12=0x5cd & rd0 { rd0 = ~rd0; ZSTATUS(rd0); } #--------------------------------------------------------------------- # EOR - Logical Exclusive OR with optional logical shift # I. {d, s} -> {0, 1, ..., 15} # II, III. {d, x, y} -> {0, 1, ..., 15} # sa -> {0, 1, ..., 31} #--------------------------------------------------------------------- # EOR Format I # Operation: Rd <- Rd & Rs # Syntax: eor Rd, Rs # 000s sss0 0101 dddd :EOR rd0, RS9A is op13_3=0x0 & op4_5=0x5 & rd0 & RS9A { rd0 = rd0 ^ RS9A; NZSTATUS(rd0); } :EOR rd0, RS9A is op13_3=0x0 & op4_5=0x5 & rd0 & RS9A & rd0=0xf { PC = inst_start ^ RS9A; NZSTATUS(PC); goto [PC]; } # EOR Format II # Operation: Rd <- Rx & Ry << sa5 # Syntax: eor Rd, Rx, Ry << sa # 111x xxx1 1110 yyyy 0010 000t tttt dddd :EOR erd0, RX9A, RY0A^" << " shift4_5 is op13_3=7 & op4_5=0x1e & RX9A & RY0A; eop9_7=0x10 & erd0 & shift4_5 { erd0 = RX9A ^ (RY0A << shift4_5); NZSTATUS(erd0); } :EOR erd0, RX9A, RY0A^" << " shift4_5 is op13_3=7 & op4_5=0x1e & RX9A & RY0A; eop9_7=0x10 & erd0 & shift4_5 & erd0=0xf { PC = RX9A ^ (RY0A << shift4_5); NZSTATUS(PC); goto [PC]; } # EOR Format III # Operation: Rd <- Rx & Ry >> sa5 # Syntax: eor Rd, Rx, Ry >> sa # 111x xxx1 1110 yyyy 0010 001t tttt dddd :EOR erd0, RX9A, RY0A^" >> " shift4_5 is op13_3=7 & op4_5=0x1e & RX9A & RY0A; eop9_7=0x11 & erd0 & shift4_5 { erd0 = RX9A ^ (RY0A >> shift4_5); NZSTATUS(erd0); } :EOR erd0, RX9A, RY0A^" >> " shift4_5 is op13_3=7 & op4_5=0x1e & RX9A & RY0A; eop9_7=0x11 & erd0 & shift4_5 & erd0=0xf { PC = RX9A ^ (RY0A >> shift4_5); NZSTATUS(PC); goto [PC]; } #--------------------------------------------------------------------- # EOR{cond4} - Conditional Logical EOR # I. cond4 -> {eq, ne, cc/hs, cs/lo, ge, lt, mi, pl, ls, gt, le, hi, vs, vc, qs, al} # {d,x,y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # EOR{cond4} Format I # Operation: if(cond4) then # Rd <- Rx ^ Ry # Syntax: eor{cond4} Rd, Rx, Ry # 111x xxx1 1101 yyyy 1110 cccc 0100 dddd :EOR^{ECOND_8_4} erd0, RX9A, RY0A is (op13_3=0x7 & op4_5=0x1d & RX9A & RY0A; eop12_4=0xe & eop4_4=4 & ECOND_8_4 & erd0) { build ECOND_8_4; erd0 = RX9A ^ RY0A; } :EOR^{ECOND_8_4} erd0, RX9A, RY0A is (op13_3=0x7 & op4_5=0x1d & RX9A & RY0A; eop12_4=0xe & eop4_4=4 & ECOND_8_4 & erd0 & erd0=0xf) { build ECOND_8_4; PC = RX9A ^ RY0A; goto [PC]; } #--------------------------------------------------------------------- # EORH, EORL - Logical EOR into high or low half of register # I, II. d -> {0, 1, ..., 15} # imm -> {0, 1, ..., 65535} #--------------------------------------------------------------------- # EORH Format I # Operation: Rd[31:16] <- Rd[31:16] & imm16 # Syntax: eorh Rd, imm # 1110 1110 0001 dddd iiii iiii iiii iiii :EORH rd0, imm16 is op4_12=0xee1 & rd0 ; imm16 { value:4 = imm16 << 16; rd0 = rd0 ^ value; NZSTATUS(rd0); } :EORH rd0, imm16 is op4_12=0xee1 & rd0 & rd0=0xf ; imm16 { value:4 = imm16 << 16; PC = inst_start ^ value; NZSTATUS(PC); goto [PC]; } # EORL Format II # Operation: Rd[15:0] <- Rd[15:0] & imm16 # Syntax: eorl Rd, imm # 1110 1100 0001 dddd iiii iiii iiii iiii :EORL rd0, imm16 is op4_12=0xec1 & rd0 ; imm16 { value:4 = imm16; rd0 = rd0 ^ value; NZSTATUS(rd0); } :EORL rd0, imm16 is op4_12=0xec1 & rd0 & rd0=0xf ; imm16 { value:4 = imm16; PC = inst_start ^ value; NZSTATUS(PC); goto [PC]; } #--------------------------------------------------------------------- # OR - Logical OR with optional logical shift # I. {d, s} -> {0, 1, ..., 15} # II, III. {d, x, y} -> {0, 1, ..., 15} # sa -> {0, 1, ..., 31} #--------------------------------------------------------------------- # OR Format I # Operation: Rd <- Rd & Rs # Syntax: or Rd, Rs # 000s sss0 0100 dddd :OR rd0, RS9A is op13_3=0x0 & op4_5=0x4 & rd0 & RS9A { rd0 = rd0 | RS9A; NZSTATUS(rd0); } :OR rd0, RS9A is op13_3=0x0 & op4_5=0x4 & rd0 & RS9A & rd0=0xf { PC = inst_start | RS9A; NZSTATUS(PC); goto [PC]; } # OR Format II # Operation: Rd <- Rx & Ry << sa5 # Syntax: or Rd, Rx, Ry << sa # 111x xxx1 1110 yyyy 0001 000t tttt dddd :OR erd0, RX9A, RY0A^" << " shift4_5 is op13_3=7 & op4_5=0x1e & RX9A & RY0A; eop9_7=8 & erd0 & shift4_5 { erd0 = RX9A | (RY0A << shift4_5); NZSTATUS(erd0); } :OR erd0, RX9A, RY0A^" << " shift4_5 is op13_3=7 & op4_5=0x1e & RX9A & RY0A; eop9_7=8 & erd0 & shift4_5 & erd0=0xf { PC = RX9A | (RY0A << shift4_5); NZSTATUS(PC); goto [PC]; } # OR Format III # Operation: Rd <- Rx & Ry >> sa5 # Syntax: or Rd, Rx, Ry >> sa # 111x xxx1 1110 yyyy 0001 001t tttt dddd :OR erd0, RX9A, RY0A^" >> " shift4_5 is op13_3=7 & op4_5=0x1e & RX9A & RY0A; eop9_7=9 & erd0 & shift4_5 { erd0 = RX9A | (RY0A >> shift4_5); NZSTATUS(erd0); } :OR erd0, RX9A, RY0A^" >> " shift4_5 is op13_3=7 & op4_5=0x1e & RX9A & RY0A; eop9_7=9 & erd0 & shift4_5 & erd0=0xf { PC = RX9A | (RY0A >> shift4_5); NZSTATUS(PC); goto [PC]; } #--------------------------------------------------------------------- # OR{cond4} - Conditional Logical OR # I. cond4 -> {eq, ne, cc/hs, cs/lo, ge, lt, mi, pl, ls, gt, le, hi, vs, vc, qs, al} # {d,x,y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # OR{cond4} Format I # Operation: if(cond4) then # Rd <- Rx | Ry # Syntax: or{cond4} Rd, Rx, Ry # 111x xxx1 1101 yyyy 1110 cccc 0011 dddd :OR^{ECOND_8_4} erd0, RX9A, RY0A is (op13_3=0x7 & op4_5=0x1d & RX9A & RY0A; eop12_4=0xe & eop4_4=3 & ECOND_8_4 & erd0) { build ECOND_8_4; erd0 = RX9A | RY0A; } :OR^{ECOND_8_4} erd0, RX9A, RY0A is (op13_3=0x7 & op4_5=0x1d & RX9A & RY0A; eop12_4=0xe & eop4_4=3 & ECOND_8_4 & erd0 & erd0=0xf) { build ECOND_8_4; PC = RX9A | RY0A; goto [PC]; } #--------------------------------------------------------------------- # ORH, ORL - Logical OR into high or low half of register # I, II. d -> {0, 1, ..., 15} # imm -> {0, 1, ..., 65535} #--------------------------------------------------------------------- # ORH Format I # Operation: Rd[31:16] <- Rd[31:16] | imm16 # Syntax: orh Rd, imm # 1110 1010 0001 dddd iiii iiii iiii iiii :ORH rd0, imm16 is op4_12=0xea1 & rd0 ; imm16 { val:4 = (imm16 << 16); rd0 = rd0 | val; NZSTATUS(rd0); } :ORH rd0, imm16 is op4_12=0xea1 & rd0 & rd0=0xf ; imm16 { val:4 = (imm16 << 16); PC = inst_start | val; NZSTATUS(PC); goto [PC]; } # ORL Format II # Operation: Rd[15:0] <- Rd[15:0] | imm16 # Syntax: orl Rd, imm # 1110 1000 0001 dddd iiii iiii iiii iiii :ORL rd0, imm16 is op4_12=0xe81 & rd0 ; imm16 { val:4 = imm16; rd0 = rd0 | val; NZSTATUS(rd0); } :ORL rd0, imm16 is op4_12=0xe81 & rd0 & rd0=0xf; imm16 { val:4 = imm16; PC = inst_start | val; NZSTATUS(PC); goto [PC]; } #--------------------------------------------------------------------- # TST - Test Register # I. {d, s} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # TST Format I # Operation: Rd & Rs # Syntax: tst Rd, Rs # 000s sss0 0111 dddd :TST RD0A, RS9A is op13_3=0x0 & op4_5=0x7 & RD0A & RS9A { test:4 = RD0A & RS9A; NZSTATUS(test); } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_multiplication_operations.sinc ================================================ #--------------------------------------------------------------------- # 8.3.3 Multiplication Operations #--------------------------------------------------------------------- #--------------------------------------------------------------------- # DIVS - Signed Divide # I. d -> {0, 2, ..., 14) # {x, y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # DIVS Format I # Operation: Rd <- Rx / Ry # Rd+1 <- Rx % Ry # Syntax: divs Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1100 0000 dddd :DIVS erd0_low, RX9A, RY0A is op13_3=7 & op4_5=0 & RY0A & RX9A ; eop4_12=0xc0 & eb0=0 & erd0 & erd0_low & erd0_hi { tmpx:4 = RX9A; tmpy:4 = RY0A; erd0_low = tmpx s/ tmpy; erd0_hi = tmpx s% tmpy; } #--------------------------------------------------------------------- # DIVU - Unsigned Divide # I. d -> {0, 2, ..., 14) # {x, y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # DIVU Format I # Operation: Rd <- Rx / Ry # Rd+1 <- Rx % Ry # Syntax: divu Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1101 0000 dddd :DIVU erd0_low, RX9A, RY0A is op13_3=7 & op4_5=0 & RY0A & RX9A ; eop4_12=0xd0 & eb0=0 & erd0 & erd0_low & erd0_hi { tmpx:4 = RX9A; tmpy:4 = RY0A; erd0_low = tmpx / tmpy; erd0_hi = tmpx % tmpy; } #--------------------------------------------------------------------- # MAC - Multiply Accumulate # I. {d, x, y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # MAC Format I # Operation: Rd <- Rx * Ry + Rd # Syntax: mac Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0011 0100 dddd :MAC erd0, RX9A, RY0A is op13_3=7 & op4_5=0 & RY0A & RX9A ; eop4_12=0x34 & erd0 { erd0 = RX9A * RY0A + erd0; } #--------------------------------------------------------------------- # MACS.D - Multiply Accumulate Signed # I. d -> {0, 2, ..., 14) # {x, y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # MACS.D Format I # Operation: acc <- (Rd+1:Rd) # prod <- Rx * Ry # res <- prod + acc # (Rd+1:Rd) <- res # Syntax: macs.d Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0101 0100 dddd :MACS.D erd0_low, RX9A, RY0A is op13_3=7 & op4_5=0 & RY0A & RX9A ; eop4_12=0x54 & eb0=0 & erd0 & erd0_low & erd0_hi { acc:8 = zext(erd0_low) | zext(erd0_hi << 32); prod:8 = sext(RX9A) * sext(RY0A); res:8 = prod + acc; erd0_low = res:4; tmp:8 = (res s>> 32); erd0_hi = tmp:4; } #--------------------------------------------------------------------- # MACU.D - Multiply Accumulate Unsigned # I. d -> {0, 2, ..., 14) # {x, y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # MACU.D Format I # Operation: acc <- (Rd+1:Rd) # prod <- Rx * Ry # res <- prod + acc # (Rd+1:Rd) <- res # Syntax: macu.d Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0111 0100 dddd :MACU.D erd0_low, RX9A, RY0A is op13_3=7 & op4_5=0 & RY0A & RX9A ; eop4_12=0x74 & eb0=0 & erd0 & erd0_low & erd0_hi { acc:8 = zext(erd0_low) | zext(erd0_hi << 32); prod:8 = zext(RX9A) * zext(RY0A); res:8 = prod + acc; erd0_low = res:4; tmp:8 = (res >> 32); erd0_hi = tmp:4; } #--------------------------------------------------------------------- # MUL - Multiply # I. {d, s} -> {0, 1, ..., 15} # II. {d, x, y} -> {0, 1, ..., 15} # III. {d, s} -> {0, 1, ..., 15} # imm -> {-128, -127, ..., 127} #--------------------------------------------------------------------- # MUL Format I # Operation: Rd <- Rd * Rs # Syntax: mul Rd, Rs # 101s sss1 0011 dddd :MUL rd0, RS9A is op13_3=5 & op4_5=0x13 & rd0 & RS9A { rd0 = rd0 * RS9A; } # MUL Format II # Operation: Rd <- Rx * Ry # Syntax: mul Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0010 0100 dddd :MUL erd0, RX9A, RY0A is op13_3=7 & op4_5=0 & RY0A & RX9A ; eop4_12=0x24 & erd0 { erd0 = RX9A * RY0A; } # MUL Format III # Operation: Rd <- Rs * SE(imm8) # Syntax: mul Rd, Rs, imm # 111s sss0 0000 dddd 0001 0000 iiii iiii :MUL rd0, RS9A, simm0_8 is op13_3=7 & op4_5=0 & rd0 & RS9A ; eop8_8=0x10 & simm0_8 { rd0 = RS9A * simm0_8; } #--------------------------------------------------------------------- # MULS.D - Multiply Signed # I. d -> {0, 2, ..., 14) # {x, y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # MULS.D Format I # Operation: (Rd+1:Rd) <- Rx * Ry # Syntax: muls.d Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0100 0100 dddd :MULS.D erd0_low, RX9A, RY0A is op13_3=7 & op4_5=0 & RY0A & RX9A ; eop4_12=0x44 & eb0=0 & erd0 & erd0_low & erd0_hi { prod:8 = sext(RX9A) * sext(RY0A); erd0_low = prod:4; tmp:8 = (prod s>> 32); erd0_hi = tmp:4; } #--------------------------------------------------------------------- # MULU.D - Multiply Unsigned # I. d -> {0, 2, ..., 14) # {x, y} -> {0, 1, ..., 15} #--------------------------------------------------------------------- # MULU.D Format I # Operation: (Rd+1:Rd) <- Rx * Ry # Syntax: mulu.d Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 0110 0100 dddd :MULU.D erd0_low, RX9A, RY0A is op13_3=7 & op4_5=0 & RY0A & RX9A ; eop4_12=0x64 & eb0=0 & erd0 & erd0_low & erd0_hi { prod:8 = zext(RX9A) * zext(RY0A); erd0_low = prod:4; tmp:8 = (prod >> 32); erd0_hi = tmp:4; } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_shift_operations.sinc ================================================ #--------------------------------------------------------------------- # 8.3.7 Shift Operations #--------------------------------------------------------------------- macro do_asr(VAL, SA, DEST) { tmp:8 = zext(VAL) << 32; tmp = tmp s>> SA; DEST = VAL s>> SA; C = (tmp & 0x0000000080000000) != 0; NZSTATUS(DEST); } #--------------------------------------------------------------------- # ASR - Arithmetic Shift Right # I. {d, x, y} -> {0, 1, ..., 15} # II. d -> {0, 1, ..., 15} # sa -> {0, 1, ..., 31} # III. {d, s} -> {0, 1, ..., 15} # sa -> {0, 1, ..., 31} #--------------------------------------------------------------------- # ASR Format I # Operation: Rd <- ASR(Rx, Ry[4:0]) # Syntax: asr Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1000 0100 dddd :ASR erd0, RX9A, RY0A is op13_3=7 & op4_5=0 & RY0A & RX9A; eop4_12=0x084 & erd0 { do_asr(RX9A, RY0A, erd0); } # ASR Format II # Operation: Rd <- ASR(Rd, sa5) # Syntax: asr Rd, sa # 101t ttt1 010t dddd :ASR rd0, shift is op13_3=5 & op5_4=0xa & shift9_4 & shift4_1 & rd0 [ shift = (shift9_4 << 1) | shift4_1; ] { do_asr(rd0, shift, rd0); } # ASR Format III # Operation: Rd <- ASR(Rs, sa5) # Syntax: asr Rd, Rs, sa # 111s sss0 0000 dddd 0001 0100 000t tttt :ASR rd0, RS9A, shift0_5 is op13_3=7 & op5_4=0 & RS9A & rd0; eop5_11=0xa0 & shift0_5 { do_asr(RS9A, shift0_5, rd0); } macro do_lsl(VAL, SA, DEST) { tmp:8 = zext(VAL); tmp = tmp << SA; DEST = tmp:4; C = (tmp & 0x0000000100000000) != 0; NZSTATUS(DEST); } #--------------------------------------------------------------------- # LSL - Logical Shift Left # I. {d, x, y} -> {0, 1, ..., 15} # II. d -> {0, 1, ..., 15} # sa -> {0, 1, ..., 31} # III. {d, s} -> {0, 1, ..., 15} # sa -> {0, 1, ..., 31} #--------------------------------------------------------------------- # LSL Format I # Operation: Rd <- LSL(Rx, Ry[4:0]) # Syntax: lsl Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1001 0100 dddd :LSL erd0, RX9A, RY0A is op13_3=7 & op4_5=0 & RY0A & RX9A; eop4_12=0x094 & erd0 { tmp:4 = RY0A & 0x0000001F; do_lsl(RX9A, tmp, erd0); } # LSL Format II # Operation: Rd <- LSL(Rd, sa5) # Syntax: lsl Rd, sa # 101t ttt1 011t dddd :LSL rd0, shift is op13_3=5 & op5_4=0xb & shift9_4 & shift4_1 & rd0 [ shift = (shift9_4 << 1) | shift4_1; ] { do_lsl(rd0, shift, rd0); } # LSL Format III # Operation: Rd <- LSL(Rs, sa5) # Syntax: lsl Rd, Rs, sa # 111s sss0 0000 dddd 0001 0101 000t tttt :LSL rd0, RS9A, shift0_5 is op13_3=7 & op5_4=0 & RS9A & rd0; eop5_11=0xa8 & shift0_5 { do_lsl(RS9A, shift0_5, rd0); } macro do_lsr(VAL, SA, DEST) { tmp:8 = zext(VAL) << 32; tmp = tmp >> SA; DEST = VAL >> SA; C = (tmp & 0x0000000080000000) != 0; NZSTATUS(DEST); } #--------------------------------------------------------------------- # LSR - Logical Shift Right # I. {d, x, y} -> {0, 1, ..., 15} # II. d -> {0, 1, ..., 15} # sa -> {0, 1, ..., 31} # III. {d, s} -> {0, 1, ..., 15} # sa -> {0, 1, ..., 31} #--------------------------------------------------------------------- # LSR Format I # Operation: Rd <- LSR(Rx, Ry[4:0]) # Syntax: lsr Rd, Rx, Ry # 111x xxx0 0000 yyyy 0000 1010 0100 dddd :LSR erd0, RX9A, RY0A is op13_3=7 & op4_5=0 & RY0A & RX9A; eop4_12=0x0a4 & erd0 { tmp:4 = RY0A & 0x0000001F; do_lsr(RX9A, tmp, erd0); } # LSR Format II # Operation: Rd <- LSR(Rd, sa5) # Syntax: lsr Rd, sa # 101t ttt1 100t dddd :LSR rd0, shift is op13_3=5 & op5_4=0xc & shift9_4 & shift4_1 & rd0 [ shift = (shift9_4 << 1) | shift4_1; ] { do_lsr(rd0, shift, rd0); } # LSR Format III # Operation: Rd <- LSR(Rs, sa5) # Syntax: lsr Rd, Rs, sa # 111s sss0 0000 dddd 0001 0110 000t tttt :LSR rd0, RS9A, shift0_5 is op13_3=7 & op5_4=0 & RS9A & rd0; eop5_11=0xb0 & shift0_5 { do_lsr(RS9A, shift0_5, rd0); } :ROL rd0 is op4_12=0x5cf & rd0 { tmp:4 = rd0 >> 31; tmpa:4 = zext(C); rd0 = rd0 << 1; rd0 = rd0 | tmpa; C = tmp:1; CZNVTOSR(); } :ROR rd0 is op4_12=0x5d0 & rd0 { tmp:4 = rd0 & 0x1; tmpa:4 = zext(C); tmpa = tmpa << 31; rd0 = rd0 >> 1; rd0 = rd0 | tmpa; C = tmp:1; CZNVTOSR(); } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_simd_operations.sinc ================================================ macro satub(RES) { RES = (zext(RES > 0x00FF) * 0x00FF) + (zext(RES < 0x0100) * RES); } macro satsb(RES) { RES = (0x007F * zext(RES s> 0x007F)) + (RES * zext(RES s< 0x0080) * zext(RES s>= 0xFF80)) + (0x0080 * zext(RES s< 0xFF80)); } macro satsh(RES) { RES = (0x00007FFF * zext(RES s> 0x00007FFF)) + (RES * zext(RES s< 0x00008000) * zext(RES s>= 0xFFFF8000)) + (0x00008000 * zext(RES s< 0xFFFF8000)); } macro satuh(RES) { RES = (0x0000FFFF * zext(RES > 0x0000FFFF)) + (RES * zext(RES < 0x00010000)); } macro sataddub(OP1, OP2, RES) { RES = zext(OP1) + zext(OP2); satub(RES); } macro sataddsb(OP1, OP2, RES) { RES = sext(OP1) + sext(OP2); satsb(RES); } macro satsubub(OP1, OP2, RES) { RES = zext(OP1) - zext(OP2); satub(RES); } macro satsubsb(OP1, OP2, RES) { RES = sext(OP1) - sext(OP2); satsb(RES); } macro satadduh(OP1, OP2, RES) { RES = zext(OP1) + zext(OP2); satuh(RES); } macro satsubuh(OP1, OP2, RES) { RES = zext(OP1) - zext(OP2); satuh(RES); } macro sataddsh(OP1, OP2, RES) { RES = sext(OP1) + sext(OP2); satsh(RES); } macro satsubsh(OP1, OP2, RES) { RES = sext(OP1) - sext(OP2); satsh(RES); } PXPART: ":T" is ctx_usex & xpart=0x1 { tmp:4 = ctx_usex; tmp = tmp >> 16; export *[const]:2 tmp; } PXPART: ":B" is ctx_usex & xpart=0x0 { tmp:4 = ctx_usex; tmp = tmp & 0x0000FFFF; export *[const]:2 tmp; } PYPART: ":T" is ctx_usey & ypart=0x1 { tmp:4 = ctx_usey; tmp = tmp >> 16; export *[const]:2 tmp; } PYPART: ":B" is ctx_usey & ypart=0x0 { tmp:4 = ctx_usey; tmp = tmp & 0x0000FFFF; export *[const]:2 tmp; } PUPART: ":T" is ctx_useu & upart=0x1 { tmp:4 = ctx_useu; tmp = tmp >> 16; export *[const]:2 tmp; } PUPART: ":B" is ctx_useu & upart=0x0 { tmp:4 = ctx_useu; tmp = tmp & 0x0000FFFF; export *[const]:2 tmp; } :PABS.SB erd0, rs0 is op4_12=0xe00 & rs0 ; eop4_12=0x23e & erd0 { tmps:1 = rs0[24,8]; erd0[24,8] = abs(tmps); tmps = rs0[16,8]; erd0[16,8] = abs(tmps); tmps = rs0[8,8]; erd0[8,8] = abs(tmps); tmps = rs0[0,8]; erd0[0,8] = abs(tmps); } :PABS.SH erd0, rs0 is op4_12=0xe00 & rs0 ; eop4_12=0x23f & erd0 { tmps:2 = rs0[16,16]; erd0[16,16] = abs(tmps); tmps = rs0[0,16]; erd0[0,16] = abs(tmps); } :PACKSH.UB erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x24c & erd0 { tmp:2 = rx9[16,16]; satub(tmp); erd0[24,8] = tmp:1; tmp = rx9[0,16]; satub(tmp); erd0[16,8] = tmp:1; tmp = ry0[16,16]; satub(tmp); erd0[8,8] = tmp:1; tmp = ry0[0,16]; satub(tmp); erd0[0,8] = tmp:1; } :PACKSH.SB erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x24d & erd0 { tmp:2 = rx9[16,16]; satsb(tmp); erd0[24,8] = tmp:1; tmp = rx9[0,16]; satsb(tmp); erd0[16,8] = tmp:1; tmp = ry0[16,16]; satsb(tmp); erd0[8,8] = tmp:1; tmp = ry0[0,16]; satsb(tmp); erd0[0,8] = tmp:1; } :PACKW.SH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x247 & erd0 { tmp:4 = rx9; satsh(tmp); erd0[16,16] = tmp:2; tmp = ry0; satsh(tmp); erd0[0,16] = tmp:2; } :PADD.B erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x230 & erd0 { tmpx:1 = rx9[24,8]; tmpy:1 = ry0[24,8]; tmpd:1 = tmpx + tmpy; erd0[24,8] = tmpd; tmpx = rx9[16,8]; tmpy = ry0[16,8]; tmpd = tmpx + tmpy; erd0[16,8] = tmpd; tmpx = rx9[8,8]; tmpy = ry0[8,8]; tmpd = tmpx + tmpy; erd0[8,8] = tmpd; tmpx = rx9[0,8]; tmpy = ry0[0,8]; tmpd = tmpx + tmpy; erd0[0,8] = tmpd; } :PADD.H erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x200 & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[16,16]; tmpd:2 = tmpx + tmpy; erd0[16,16] = tmpd; tmpx = rx9[0,16]; tmpy = ry0[0,16]; tmpd = tmpx + tmpy; erd0[0,16] = tmpd; } :PADDH.UB erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x236 & erd0 { tmpx:2 = zext(rx9[24,8]); tmpy:2 = zext(ry0[24,8]); tmpd:2 = tmpx + tmpy; tmpd = tmpd >> 1; erd0[24,8] = tmpd:1; tmpx = zext(rx9[16,8]); tmpy = zext(ry0[16,8]); tmpd = tmpx + tmpy; tmpd = tmpd >> 1; erd0[16,8] = tmpd:1; tmpx = zext(rx9[8,8]); tmpy = zext(ry0[8,8]); tmpd = tmpx + tmpy; tmpd = tmpd >> 1; erd0[8,8] = tmpd:1; tmpx = zext(rx9[0,8]); tmpy = zext(ry0[0,8]); tmpd = tmpx + tmpy; tmpd = tmpd >> 1; erd0[0,8] = tmpd:1; } :PADDH.SH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x20c & erd0 { tmpx:4 = sext(rx9[16,16]); tmpy:4 = sext(ry0[16,16]); tmpd:4 = tmpx + tmpy; tmpd = tmpd s>> 1; erd0[16,16] = tmpd:2; tmpx = sext(rx9[0,16]); tmpy = sext(ry0[0,16]); tmpd = tmpx + tmpy; tmpd = tmpd s>> 1; erd0[0,16] = tmpd:2; } :PADDS.UB erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x234 & erd0 { tmpx:1 = rx9[24,8]; tmpy:1 = ry0[24,8]; tmpd:2 = 0; sataddub(tmpx,tmpy,tmpd); erd0[24,8] = tmpd:1; tmpx = rx9[16,8]; tmpy = ry0[16,8]; sataddub(tmpx,tmpy,tmpd); erd0[16,8] = tmpd:1; tmpx = rx9[8,8]; tmpy = ry0[8,8]; sataddub(tmpx,tmpy,tmpd); erd0[8,8] = tmpd:1; tmpx = rx9[0,8]; tmpy = ry0[0,8]; sataddub(tmpx,tmpy,tmpd); erd0[0,8] = tmpd:1; } :PADDS.SB erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x232 & erd0 { tmpx:1 = rx9[24,8]; tmpy:1 = ry0[24,8]; tmpd:2 = 0; sataddsb(tmpx,tmpy,tmpd); erd0[24,8] = tmpd:1; tmpx = rx9[16,8]; tmpy = ry0[16,8]; sataddsb(tmpx,tmpy,tmpd); erd0[16,8] = tmpd:1; tmpx = rx9[8,8]; tmpy = ry0[8,8]; sataddsb(tmpx,tmpy,tmpd); erd0[8,8] = tmpd:1; tmpx = rx9[0,8]; tmpy = ry0[0,8]; sataddsb(tmpx,tmpy,tmpd); erd0[0,8] = tmpd:1; } :PADDS.UH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x208 & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[16,16]; tmpd:4 = 0; satadduh(tmpx,tmpy,tmpd); erd0[16,16] = tmpd:2; tmpx = rx9[0,16]; tmpy = ry0[0,16]; satadduh(tmpx,tmpy,tmpd); erd0[0,16] = tmpd:2; } :PADDS.SH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x204 & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[16,16]; tmpd:4 = 0; sataddsh(tmpx,tmpy,tmpd); erd0[16,16] = tmpd:2; tmpx = rx9[0,16]; tmpy = ry0[0,16]; sataddsh(tmpx,tmpy,tmpd); erd0[0,16] = tmpd:2; } :PADDSUB.H erd0, rx9^PXPART, ry0^PYPART is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop6_10=0x84 & PXPART & PYPART & erd0 [ctx_savex=rx9; ctx_savey=ry0; ] { tmp:2 = PXPART + PYPART; erd0[16,16] = tmp; tmp = PXPART - PYPART; erd0[0,16] = tmp; } :PADDSUBH.SH erd0, rx9^PXPART, ry0^PYPART is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop6_10=0x8a & PXPART & PYPART & erd0 [ctx_savex=rx9; ctx_savey=ry0; ] { tmp:4 = sext(PXPART) + sext(PYPART); tmp = tmp s>> 1; erd0[16,16] = tmp:2; tmp = sext(PXPART) - sext(PYPART); tmp = tmp s>> 1; erd0[0,16] = tmp:2; } :PADDX.H erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x202 & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[0,16]; tmpd:2 = tmpx + tmpy; erd0[16,16] = tmpd; tmpx = rx9[0,16]; tmpy = ry0[16,16]; tmpd = tmpx + tmpy; erd0[0,16] = tmpd; } :PADDXH.SH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x20e & erd0 { tmpx:4 = sext(rx9[16,16]); tmpy:4 = sext(ry0[0,16]); tmpd:4 = tmpx + tmpy; tmpd = tmpd s>> 1; erd0[16,16] = tmpd:2; tmpx = sext(rx9[0,16]); tmpy = sext(ry0[16,16]); tmpd = tmpx + tmpy; tmpd = tmpd s>> 1; erd0[0,16] = tmpd:2; } :PASR.B erd0, rs9, sa0_3 is op13_3=0x7 & rs9 & op3_6=0x0 & sa0_3 ; eop4_12=0x241 & erd0 { tmp:1 = rs9[24,8]; tmp = tmp s>> sa0_3; erd0[24,8] = tmp; tmp = rs9[16,8]; tmp = tmp s>> sa0_3; erd0[16,8] = tmp; tmp = rs9[8,8]; tmp = tmp s>> sa0_3; erd0[8,8] = tmp; tmp = rs9[0,8]; tmp = tmp s>> sa0_3; erd0[0,8] = tmp; } :PASR.H erd0, rs9, sa0_4 is op13_3=0x7 & rs9 & op4_5=0x0 & sa0_4 ; eop4_12=0x244 & erd0 { tmp:2 = rs9[16,16]; tmp = tmp s>> sa0_4; erd0[16,16] = tmp; tmp = rs9[0,16]; tmp = tmp s>> sa0_4; erd0[0,16] = tmp; } :PAVG.UB erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x23c & erd0 { tmpx:1 = rx9[24,8]; tmpy:1 = ry0[24,8]; tmpd:2 = (zext(tmpx) + zext(tmpy) + 1) >> 1; erd0[24,8] = tmpd:1; tmpx = rx9[16,8]; tmpy = ry0[16,8]; tmpd = (zext(tmpx) + zext(tmpy) + 1) >> 1; erd0[16,8] = tmpd:1; tmpx = rx9[8,8]; tmpy = ry0[8,8]; tmpd = (zext(tmpx) + zext(tmpy) + 1) >> 1; erd0[8,8] = tmpd:1; tmpx = rx9[0,8]; tmpy = ry0[0,8]; tmpd = (zext(tmpx) + zext(tmpy) + 1) >> 1; erd0[0,8] = tmpd:1; } :PAVG.SH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x23d & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[16,16]; tmpd:4 = (sext(tmpx) + sext(tmpy) + 1) s>> 1; erd0[16,16] = tmpd:2; tmpx = rx9[0,16]; tmpy = ry0[0,16]; tmpd = (sext(tmpx) + sext(tmpy) + 1) s>> 1; erd0[0,16] = tmpd:2; } :PLSL.B erd0, rs9, sa0_3 is op13_3=0x7 & rs9 & op3_6=0x0 & sa0_3 ; eop4_12=0x242 & erd0 { tmp:1 = rs9[24,8]; tmp = tmp << sa0_3; erd0[24,8] = tmp; tmp = rs9[16,8]; tmp = tmp << sa0_3; erd0[16,8] = tmp; tmp = rs9[8,8]; tmp = tmp << sa0_3; erd0[8,8] = tmp; tmp = rs9[0,8]; tmp = tmp << sa0_3; erd0[0,8] = tmp; } :PLSL.H erd0, rs9, sa0_4 is op13_3=0x7 & rs9 & op4_5=0x0 & sa0_4 ; eop4_12=0x245 & erd0 { tmp:2 = rs9[16,16]; tmp = tmp << sa0_4; erd0[16,16] = tmp; tmp = rs9[0,16]; tmp = tmp << sa0_4; erd0[0,16] = tmp; } :PLSR.B erd0, rs9, sa0_3 is op13_3=0x7 & rs9 & op3_6=0x0 & sa0_3 ; eop4_12=0x243 & erd0 { tmp:1 = rs9[24,8]; tmp = tmp >> sa0_3; erd0[24,8] = tmp; tmp = rs9[16,8]; tmp = tmp >> sa0_3; erd0[16,8] = tmp; tmp = rs9[8,8]; tmp = tmp >> sa0_3; erd0[8,8] = tmp; tmp = rs9[0,8]; tmp = tmp >> sa0_3; erd0[0,8] = tmp; } :PLSR.H erd0, rs9, sa0_4 is op13_3=0x7 & rs9 & op4_5=0x0 & sa0_4 ; eop4_12=0x246 & erd0 { tmp:2 = rs9[16,16]; tmp = tmp >> sa0_4; erd0[16,16] = tmp; tmp = rs9[0,16]; tmp = tmp >> sa0_4; erd0[0,16] = tmp; } :PMAX.UB erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x238 & erd0 { tmpx:1 = rx9[24,8]; tmpy:1 = ry0[24,8]; tmpd:1 = (tmpx * zext(tmpx > tmpy)) + (tmpy * zext(tmpy <= tmpx)); erd0[24,8] = tmpd; tmpx = rx9[16,8]; tmpy = ry0[16,8]; tmpd = (tmpx * zext(tmpx > tmpy)) + (tmpy * zext(tmpy <= tmpx)); erd0[16,8] = tmpd; tmpx = rx9[8,8]; tmpy = ry0[8,8]; tmpd = (tmpx * zext(tmpx > tmpy)) + (tmpy * zext(tmpy <= tmpx)); erd0[8,8] = tmpd; tmpx = rx9[0,8]; tmpy = ry0[0,8]; tmpd = (tmpx * zext(tmpx > tmpy)) + (tmpy * zext(tmpy <= tmpx)); erd0[0,8] = tmpd; } :PMAX.SH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x239 & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[16,16]; tmpd:2 = (tmpx * zext(tmpx > tmpy)) + (tmpy * zext(tmpy <= tmpx)); erd0[16,16] = tmpd; tmpx = rx9[0,16]; tmpy = ry0[0,16]; tmpd = (tmpx * zext(tmpx > tmpy)) + (tmpy * zext(tmpy <= tmpx)); erd0[0,16] = tmpd; } :PMIN.UB erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x23a & erd0 { tmpx:1 = rx9[24,8]; tmpy:1 = ry0[24,8]; tmpd:1 = (tmpx * zext(tmpx < tmpy)) + (tmpy * zext(tmpy >= tmpx)); erd0[24,8] = tmpd; tmpx = rx9[16,8]; tmpy = ry0[16,8]; tmpd = (tmpx * zext(tmpx < tmpy)) + (tmpy * zext(tmpy >= tmpx)); erd0[16,8] = tmpd; tmpx = rx9[8,8]; tmpy = ry0[8,8]; tmpd = (tmpx * zext(tmpx < tmpy)) + (tmpy * zext(tmpy >= tmpx)); erd0[8,8] = tmpd; tmpx = rx9[0,8]; tmpy = ry0[0,8]; tmpd = (tmpx * zext(tmpx < tmpy)) + (tmpy * zext(tmpy >= tmpx)); erd0[0,8] = tmpd; } :PMIN.SH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x23b & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[16,16]; tmpd:2 = (tmpx * zext(tmpx < tmpy)) + (tmpy * zext(tmpy >= tmpx)); erd0[16,16] = tmpd; tmpx = rx9[0,16]; tmpy = ry0[0,16]; tmpd = (tmpx * zext(tmpx < tmpy)) + (tmpy * zext(tmpy >= tmpx)); erd0[0,16] = tmpd; } :PADDSUBS.UH erd0, rx9^PXPART, ry0^PYPART is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop6_10=0x88 & PXPART & PYPART & erd0 { tmpx:2 = PXPART; tmpy:2 = PYPART; tmpd:4 = 0; satadduh(tmpx,tmpy,tmpd); erd0[16,16] = tmpd:2; satsubuh(tmpx,tmpy,tmpd); erd0[0,16] = tmpd:2; } :PADDSUBS.SH erd0, rx9^PXPART, ry0^PYPART is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop6_10=0x86 & PXPART & PYPART & erd0 { tmpx:2 = PXPART; tmpy:2 = PYPART; tmpd:4 = 0; sataddsh(tmpx,tmpy,tmpd); erd0[16,16] = tmpd:2; satsubsh(tmpx,tmpy,tmpd); erd0[0,16] = tmpd:2; } :PADDXS.UH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x20a & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[0,16]; tmpd:4 = 0; satadduh(tmpx,tmpy,tmpd); erd0[16,16] = tmpd:2; tmpx = rx9[0,16]; tmpy = ry0[16,16]; satadduh(tmpx,tmpy,tmpd); erd0[0,16] = tmpd:2; } :PADDXS.SH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x206 & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[0,16]; tmpd:4 = 0; sataddsh(tmpx,tmpy,tmpd); erd0[16,16] = tmpd:2; tmpx = rx9[0,16]; tmpy = ry0[16,16]; sataddsh(tmpx,tmpy,tmpd); erd0[0,16] = tmpd:2; } :PSAD erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x240 & erd0 { tmpx:1 = rx9[24,8]; tmpy:1 = ry0[24,8]; tmpd:1 = abs(tmpx - tmpy); erd0 = zext(tmpd); tmpx = rx9[16,8]; tmpy = ry0[16,8]; tmpd = abs(tmpx - tmpy); erd0 = erd0 + zext(tmpd); tmpx = rx9[8,8]; tmpy = ry0[8,8]; tmpd = abs(tmpx - tmpy); erd0 = erd0 + zext(tmpd); tmpx = rx9[0,8]; tmpy = ry0[0,8]; tmpd = abs(tmpx - tmpy); erd0 = erd0 + zext(tmpd); } :PSUB.B erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x231 & erd0 { tmpx:1 = rx9[24,8]; tmpy:1 = ry0[24,8]; tmpd:1 = tmpx - tmpy; erd0[24,8] = tmpd; tmpx = rx9[16,8]; tmpy = ry0[16,8]; tmpd = tmpx - tmpy; erd0[16,8] = tmpd; tmpx = rx9[8,8]; tmpy = ry0[8,8]; tmpd = tmpx - tmpy; erd0[8,8] = tmpd; tmpx = rx9[0,8]; tmpy = ry0[0,8]; tmpd = tmpx - tmpy; erd0[0,8] = tmpd; } :PSUB.H erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x201 & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[16,16]; tmpd:2 = tmpx - tmpy; erd0[16,16] = tmpd; tmpx = rx9[0,16]; tmpy = ry0[0,16]; tmpd = tmpx - tmpy; erd0[0,16] = tmpd; } :PSUBADD.H erd0, rx9^PXPART, ry0^PYPART is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop6_10=0x85 & PXPART & PYPART & erd0 [ctx_savex=rx9; ctx_savey=ry0; ] { tmp:2 = PXPART - PYPART; erd0[16,16] = tmp; tmp = PXPART + PYPART; erd0[0,16] = tmp; } :PSUBADDH.SH erd0, rx9^PXPART, ry0^PYPART is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop6_10=0x8b & PXPART & PYPART & erd0 [ctx_savex=rx9; ctx_savey=ry0; ] { tmp:4 = sext(PXPART) - sext(PYPART); tmp = tmp s>> 1; erd0[16,16] = tmp:2; tmp = sext(PXPART) + sext(PYPART); tmp = tmp s>> 1; erd0[0,16] = tmp:2; } :PSUBADDS.UH erd0, rx9^PXPART, ry0^PYPART is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop6_10=0x89 & PXPART & PYPART & erd0 { tmpx:2 = PXPART; tmpy:2 = PYPART; tmpd:4 = 0; satsubuh(tmpx,tmpy,tmpd); erd0[16,16] = tmpd:2; satadduh(tmpx,tmpy,tmpd); erd0[0,16] = tmpd:2; } :PSUBADDS.SH erd0, rx9^PXPART, ry0^PYPART is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop6_10=0x87 & PXPART & PYPART & erd0 { tmpx:2 = PXPART; tmpy:2 = PYPART; tmpd:4 = 0; satsubsh(tmpx,tmpy,tmpd); erd0[16,16] = tmpd:2; sataddsh(tmpx,tmpy,tmpd); erd0[0,16] = tmpd:2; } :PSUBH.UB erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x237 & erd0 { tmpx:2 = zext(rx9[24,8]); tmpy:2 = zext(ry0[24,8]); tmpd:2 = tmpx - tmpy; tmpd = tmpd >> 1; erd0[24,8] = tmpd:1; tmpx = zext(rx9[16,8]); tmpy = zext(ry0[16,8]); tmpd = tmpx - tmpy; tmpd = tmpd >> 1; erd0[16,8] = tmpd:1; tmpx = zext(rx9[8,8]); tmpy = zext(ry0[8,8]); tmpd = tmpx - tmpy; tmpd = tmpd >> 1; erd0[8,8] = tmpd:1; tmpx = zext(rx9[0,8]); tmpy = zext(ry0[0,8]); tmpd = tmpx - tmpy; tmpd = tmpd >> 1; erd0[0,8] = tmpd:1; } :PSUBH.SH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x20d & erd0 { tmpx:4 = sext(rx9[16,16]); tmpy:4 = sext(ry0[16,16]); tmpd:4 = tmpx - tmpy; tmpd = tmpd s>> 1; erd0[16,16] = tmpd:2; tmpx = sext(rx9[0,16]); tmpy = sext(ry0[0,16]); tmpd = tmpx - tmpy; tmpd = tmpd s>> 1; erd0[0,16] = tmpd:2; } :PSUBS.UB erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x235 & erd0 { tmpx:1 = rx9[24,8]; tmpy:1 = ry0[24,8]; tmpd:2 = 0; satsubub(tmpx,tmpy,tmpd); erd0[24,8] = tmpd:1; tmpx = rx9[16,8]; tmpy = ry0[16,8]; satsubub(tmpx,tmpy,tmpd); erd0[16,8] = tmpd:1; tmpx = rx9[8,8]; tmpy = ry0[8,8]; satsubub(tmpx,tmpy,tmpd); erd0[8,8] = tmpd:1; tmpx = rx9[0,8]; tmpy = ry0[0,8]; satsubub(tmpx,tmpy,tmpd); erd0[0,8] = tmpd:1; } :PSUBS.SB erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x233 & erd0 { tmpx:1 = rx9[24,8]; tmpy:1 = ry0[24,8]; tmpd:2 = 0; satsubsb(tmpx,tmpy,tmpd); erd0[24,8] = tmpd:1; tmpx = rx9[16,8]; tmpy = ry0[16,8]; satsubsb(tmpx,tmpy,tmpd); erd0[16,8] = tmpd:1; tmpx = rx9[8,8]; tmpy = ry0[8,8]; satsubsb(tmpx,tmpy,tmpd); erd0[8,8] = tmpd:1; tmpx = rx9[0,8]; tmpy = ry0[0,8]; satsubsb(tmpx,tmpy,tmpd); erd0[0,8] = tmpd:1; } :PSUBS.UH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x209 & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[16,16]; tmpd:4 = 0; satsubuh(tmpx,tmpy,tmpd); erd0[16,16] = tmpd:2; tmpx = rx9[0,16]; tmpy = ry0[0,16]; satsubuh(tmpx,tmpy,tmpd); erd0[0,16] = tmpd:2; } :PSUBS.SH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x205 & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[16,16]; tmpd:4 = 0; satsubsh(tmpx,tmpy,tmpd); erd0[16,16] = tmpd:2; tmpx = rx9[0,16]; tmpy = ry0[0,16]; satsubsh(tmpx,tmpy,tmpd); erd0[0,16] = tmpd:2; } :PSUBX.H erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x203 & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[0,16]; tmpd:2 = tmpx - tmpy; erd0[16,16] = tmpd; tmpx = rx9[0,16]; tmpy = ry0[16,16]; tmpd = tmpx - tmpy; erd0[0,16] = tmpd; } :PSUBXH.SH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x20f & erd0 { tmpx:4 = sext(rx9[16,16]); tmpy:4 = sext(ry0[0,16]); tmpd:4 = tmpx - tmpy; tmpd = tmpd s>> 1; erd0[16,16] = tmpd:2; tmpx = sext(rx9[0,16]); tmpy = sext(ry0[16,16]); tmpd = tmpx - tmpy; tmpd = tmpd s>> 1; erd0[0,16] = tmpd:2; } :PSUBXS.UH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x20b & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[0,16]; tmpd:4 = 0; satsubuh(tmpx,tmpy,tmpd); erd0[16,16] = tmpd:2; tmpx = rx9[0,16]; tmpy = ry0[16,16]; satsubuh(tmpx,tmpy,tmpd); erd0[0,16] = tmpd:2; } :PSUBXS.SH erd0, rx9, ry0 is op13_3=0x7 & rx9 & op4_5=0x0 & ry0 ; eop4_12=0x207 & erd0 { tmpx:2 = rx9[16,16]; tmpy:2 = ry0[0,16]; tmpd:4 = 0; satsubsh(tmpx,tmpy,tmpd); erd0[16,16] = tmpd:2; tmpx = rx9[0,16]; tmpy = ry0[16,16]; satsubsh(tmpx,tmpy,tmpd); erd0[0,16] = tmpd:2; } :PUNPCKUB.H erd0, rs9^PUPART is op13_3=0x7 & rs9 & op0_9=0x0 ; eop5_11=0x124 & PUPART & erd0 { tmp:2 = PUPART; tmph:1 = tmp[8,8]; tmpl:1 = tmp[0,8]; tmp = zext(tmph); erd0[16,16] = tmp; tmp = zext(tmpl); erd0[0,16] = tmp; } :PUNPCKSB.H erd0, rs9^PUPART is op13_3=0x7 & rs9 & op0_9=0x0 ; eop5_11=0x125 & PUPART & erd0 { tmp:2 = PUPART; tmph:1 = tmp[8,8]; tmpl:1 = tmp[0,8]; tmp = sext(tmph); erd0[16,16] = tmp; tmp = sext(tmpl); erd0[0,16] = tmp; } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr32a_system_control.sinc ================================================ #--------------------------------------------------------------------- # 8.3.10 System/Control #--------------------------------------------------------------------- #--------------------------------------------------------------------- # SR{cond} - Set Register Conditionally # I. cond4 -> {eq,ne,hs,lo,ge,lt,mi,pl,ls,gt,le,hi,vs,vc,qs,al} # d -> {0, 1, ..., 15} #--------------------------------------------------------------------- # SR{cond} Format I # Operation: if (cond4) Rd <- 1; else Rd <- 0; # Syntax: SR{cond4} Rd # 0101 1111 cccc dddd :SR^{COND_4_4} rd0 is op8_8=0x5f & rd0 & COND_4_4 { rd0 = 0; build COND_4_4; rd0 = 1; } :BREAKPOINT is op0_16=0xd673 { trap(); } :CACHE rd0[disp0_11],eop11_5 is op4_12=0xF41 & rd0 ; disp0_11 & eop11_5 { tmpa:4 = disp0_11; tmpb:4 = eop11_5; cacheOp(rd0,tmpa,tmpb); } :CSRF imm4_5 is op9_7=0x6a & op0_4=0x3 & imm4_5 { tmp:4 = 1 << imm4_5; SR = SR & ~tmp; SRTOFLAGS(); } :CSRFCZ imm4_5 is op9_7=0x68 & imm4_5 & op0_4=0x3 { tmp:1 = ((SR >> imm4_5) & 0x1) != 0; C = tmp; Z = tmp; CZTOSR(); } :FRS is op0_16=0xd743 { } :MFSR rd0,sysreg is op4_12=0xE1B & rd0 ; eop8_8=0 & sysreg { rd0 = sysreg; } :MTSR sysreg,rs0 is op4_12=0xE3B & rs0 ; eop8_8=0 & sysreg { sysreg = rs0; } :MFDR rd0, dbgreg is op4_12=0xe5b & rd0 ; eop8_8=0x0 & dbgreg { tmp:1 = dbgreg; MoveFromDebugReg(rd0,tmp); } :MTDR dbgreg, rs0 is op4_12=0xe7b & rs0 ; eop8_8=0x0 & dbgreg { tmp:1 = dbgreg; MoveToDebugReg(rs0,tmp); } :MUSFR rs0 is op4_12=0x5d3 & rs0 { SR = (SR & 0xFFFFFFF0) | (rs0 & 0xF); SRTOLOWFLAGS(); } :MUSTR rd0 is op4_12=0x5d2 & rd0 { rd0 = SR & 0xF; } :NOP is op0_16=0xD703 {} # I found gcc assembler will also use an add r0,r0 for a nop which is an all 0 opcode :NOP is op0_16=0 {} :PREF rp0[disp_16] is op4_12=0xf21 & rp0 ; disp_16 { tmpa:2 = disp_16; tmp:4 = rp0 + sext(tmpa); CacheFetch(tmp); } SLGM: val is disp_8 [ val = disp_8 << 0; ] { export *[const]:1 val; } SLGM: val is eb7=1 & disp_8 [ val = disp_8 << 0; ] { GM = 0; export *[const]:1 val; } :SLEEP SLGM is op0_16=0xe9b0 ; eop8_8=0x0 & SLGM { doSleep(SLGM); } :SSRF imm4_5 is op9_7=0x69 & op0_4=0x3 & imm4_5 { tmp:4 = 1 << imm4_5; SR = SR | tmp; SRTOFLAGS(); } :SYNC eop0_8 is op0_16=0xebb0 ; eop8_8=0x0 & eop0_8 { tmp:1 = eop0_8; SynchMemory(tmp); } :TLBR is op0_16=0xd643 { ReadTLBEntry(); } :TLBS is op0_16=0xd653 { SearchTLBEntry(); } :TLBW is op0_16=0xd663 { WriteTLBEntry(); } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8.ldefs ================================================ AVR8 with 16-bit word addressable code space AVR8 with 22-bit word addressable with EIND code space AVR8 for an Atmega 256 AVR8 for an Xmega ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8.opinion ================================================ ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8.pspec ================================================ ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8.sinc ================================================ # sleigh specification for the avr8 # # Currently designed for ATMega64 in non-ATmel103 configuration # - 0x20-0xff as IO memory, rather than 0x20-0x5f # # This is a ATMega64 with a 64k sized memory # Other parts available have a 4M sized memory so that stack # pointer would be three bytes instead define endian=little; # Declaring space to be word-sized... alternative is to do byte sized define alignment=2; # Force fusion of two byte operations in a row by decoding as words #@define FUSION "" #define where the IO space is mapped if not specified @ifndef IO_START @define IO_START "0x20" @define EIND "0x5c" @endif #define where the registers are located if not specified @ifndef REGISTER_SPACE @define REGISTER_SPACE "mem" @endif # NOTE: DATASIZE other than 2 is not supported yet # more changes to mem load/store are necessary @ifndef DATASIZE @define DATASIZE "2" @endif # mem space should really be the default, but the loading scripts will # prefer the code space as the default. By being explicit for every # instruction, we can eliminate the ambiguity for at least the # decompiler. None-the-less, other than when loading the binary into # Ghidra, it's still preferable to see the name of IO locations used, # rather than code addresses, so leave mem space as the default. define space code type=ram_space size=$(PCBYTESIZE) wordsize=2 default; define space register type=register_space size=2; define space mem type=ram_space size=$(DATASIZE) wordsize=1; # this is a byte address space that should be overlayed on top of the code space define space codebyte type=ram_space size=$(PCBYTESIZE) wordsize=1; # Using decimal rather than hex to match specs # General registers start at 0 in the iospace for earlier avr8 processors # In the Xmega line, they are not accessible in mem, and are in a register space # define $(REGISTER_SPACE) offset=0 size=1 [ R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 Xlo Xhi Ylo Yhi Zlo Zhi ]; define $(REGISTER_SPACE) offset=0 size=2 [ R1R0 R3R2 R5R4 R7R6 R9R8 R11R10 R13R12 R15R14 R17R16 R19R18 R21R20 R23R22 R25R24 X Y Z ]; define $(REGISTER_SPACE) offset=18 size=8 [ R25R24R23R22R21R20R19R18 ]; define $(REGISTER_SPACE) offset=0 size=8 [ R7R6R5R4R3R2R1R0 R15R14R13R12R11R10R9R8 ]; # Technically, the stack pointer is in the i/o space so should be addressable with the # rest of the i/o registers. However, Ghidra does not react well to the stack pointer # being indirectly addressable so we're making an exception. define register offset=0x3D size=1 [ SPL SPH ]; define register offset=0x3D size=2 [ SP ]; define register offset=0x42 size=$(PCBYTESIZE) [ PC ]; define register offset=0x80 size=1 [ Cflg Zflg Nflg Vflg Sflg Hflg Tflg Iflg SKIP ]; ##################################### # Some AVR processors may have different io layouts not just different io. # # AVR processors with more than 64 KiB of RAM make use of the RAMP- registers # to act as the high bits where the X, Y, or Z registers are used, or in direct # addressing instructions. # # TODO: incorporate the use of RAMPX, RAMPY, RAMPZ in the LD, ST instructions # ELPM, and LDS instructions use RAMPZ and RAMPD # # These IO registers need to be accessible to sleigh instruction PCODE # so they are defined here. The bulk of the IO registers are defined # as labels in the appropriate .pspec file. define mem offset=$(IO_START) size = 1 [ # IO_START + 0x00 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ # IO_START + 0x10 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ # IO_START + 0x20 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ # IO_START + 0x30 _ _ _ _ _ _ _ _ RAMPD RAMPX RAMPY RAMPZ _ _ _ SREG ]; # If the AVR processor has more than 128 KiB of ROM, the processor will support the EIND # register along with the EIJMP and EICALL extended instructions. @if HASEIND == "1" define mem offset=$(EIND) size=1 [ EIND ]; @endif ############################## # Define context bits define register offset=0x90 size=4 contextreg; define context contextreg useSkipCond = (0,0) noflow # =1 skip instruction if SKIP register is true # transient context phase = (1,1) # =0 check for useSkipCond, =1 parse instruction ; ## Following 8051 example rather than define bitrange # Works better as distinct variables @define Cflag "Cflg" @define Zflag "Zflg" @define Nflag "Nflg" @define Vflag "Vflg" @define Sflag "Sflg" @define Hflag "Hflg" @define Tflag "Tflg" @define Iflag "Iflg" define token opword (16) ophi16 = ( 0,15) ophi9 = ( 7,15) ophi8 = ( 8,15) ophi7 = ( 9,15) ophi6 = (10,15) ophi5 = (11,15) ophi4 = (12,15) ophi2 = (14,15) opbit13 = (13,13) opbit12 = (12,12) opbit10 = (10,10) opbit9 = ( 9, 9) opbit8 = ( 8, 8) opbit7 = ( 7, 7) opbit3 = ( 3, 3) opbit2 = ( 2, 2) opbit0 = ( 0, 0) oplow12 = ( 0,11) oplow12signed = ( 0,11) signed oplow4 = ( 0, 3) oplow3_flag = ( 0, 2) oplow3 = ( 0, 2) oplow2 = ( 0, 1) op1to3 = ( 1, 3) op2to3 = ( 2, 3) op3to7 = ( 3, 7) op4to8 = ( 4, 8) op4to6 = ( 4, 6) op4to6_flag = ( 4, 6) op6to7 = ( 6, 7) op8to10 = ( 8,10) op9to10 = ( 9,10) op10to11 = (10,11) RdHi = ( 4, 7) RdHi3 = ( 4, 6) RdFull = ( 4, 8) RrHi = ( 0, 3) RrHi3 = ( 0, 2) RrLow = ( 0, 3) RrHiLowSel = ( 9, 9) Rdw2 = ( 4, 5) Rdw4 = ( 4, 7) Rrw4 = ( 0, 3) Rstq = ( 3, 3) RstPtr = ( 2, 3) op0to3 = ( 0, 3) op3to9signed = ( 3, 9) signed op4to7 = ( 4, 7) op8to11 = ( 8,11) ; define token immtok(16) next16 = (0,15) ; define token opfusion16(32) op1hi4 = (12,15) op2hi4 = (28,31) op1hi6 = (10,15) op2hi6 = (26,31) op1low4 = ( 0, 3) op2low4 = (16,19) op1bits0to3 = ( 0, 3) op2bits0to3 = (16,19) op1bits1to3 = ( 1, 3) op2bits1to3 = (17,19) op1bits4to8 = ( 4, 8) op2bits4to8 = (20,24) op1bits5to7 = ( 5, 7) op2bits5to7 = (21,23) op1bits5to8 = ( 5, 8) op2bits5to8 = (21,24) op1bits8to11 = ( 8,11) op2bits8to11 = (24,27) op1bit0 = ( 0, 0) op2bit0 = (16,16) op1bit4 = ( 4, 4) op2bit4 = (20,20) op1bit9 = ( 9, 9) op2bit9 = (25,25) op1RdPair = ( 5, 8) op1RdPairHi = ( 5, 7) op1RrPairLow = ( 1, 3) op1RrPairHi = ( 1, 3) op1RrPairSel = ( 9, 9) ; define token opfusion24(48) f3op1hi4 = (12,15) f3op2hi4 = (28,31) f3op3hi4 = (34,47) f3op1hi6 = (10,15) f3op2hi6 = (26,31) f3op3hi6 = (42,47) f3op1bits0to3 = ( 0, 3) f3op2bits0to3 = (16,19) f3op3bits0to3 = (32,35) f3op2bits4to7 = (20,23) f3op1bits5to7 = ( 5, 7) f3op3bits5to7 = (37,39) f3op1bits8to11 = ( 8,11) f3op2bits8to11 = (24,27) f3op1bit4 = ( 4, 4) f3op3bit4 = (36,36) f3op3bit8 = (40,40) f3op3bit9 = (41,41) f3op1RdPairHi = ( 5, 7) f3op2RdHi = (20,23) ; define token opfusionLdsw(64) # lds lds ldswop1hi7 = ( 9,15) ldswop2hi7 = (41,47) ldswop1low4 = ( 0, 3) ldswop2low4 = (32,35) ldswop1bits5to8 = ( 5, 8) ldswop2bits5to8 = (37,40) ldswop1bit4 = ( 4, 4) ldswop2bit4 = (36,36) ldswop1bit16 = (16,16) ldswop2bit16 = (48,48) ldswop1imm15 = (17,31) ldswop2imm15 = (49,63) ldswop1imm6 = (17,22) ldswop2imm6 = (49,54) ldswop1imm16 = (16,31) ldswop2imm16 = (48,63) ldswop1RdPair = ( 5, 8) stswop2RdPair = (37,40) ; attach variables [ oplow3_flag op4to6_flag ] [ Cflg Zflg Nflg Vflg Sflg Hflg Tflg Iflg ]; attach variables [ RdHi RrHi f3op2RdHi ] [ R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 Xlo Xhi Ylo Yhi Zlo Zhi ] ; attach variables [ RdHi3 RrHi3 ] [ R16 R17 R18 R19 R20 R21 R22 R23 ]; attach variables [ RrLow ] [ R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 ]; attach variables [ RdFull ] [ R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 Xlo Xhi Ylo Yhi Zlo Zhi ]; attach variables [ Rdw2 ] [ R25R24 X Y Z ]; attach variables [ Rstq ] [ Z Y ]; attach variables [ RstPtr ] [ Z _ Y X ]; attach variables [ Rdw4 Rrw4 op1RdPair ldswop1RdPair stswop2RdPair ] [ R1R0 R3R2 R5R4 R7R6 R9R8 R11R10 R13R12 R15R14 R17R16 R19R18 R21R20 R23R22 R25R24 X Y Z ]; attach variables [ op1RrPairLow ] [ R1R0 R3R2 R5R4 R7R6 R9R8 R11R10 R13R12 R15R14 ]; attach variables [ op1RrPairHi op1RdPairHi f3op1RdPairHi ] [ R17R16 R19R18 R21R20 R23R22 R25R24 X Y Z ]; RrFull: RrHi is RrHiLowSel=1 & RrHi { tmp:1 = RrHi; export tmp; } RrFull: RrLow is RrHiLowSel=0 & RrLow { tmp:1 = RrLow; export tmp; } # Alternative: try using some subcontructors op1RrPair: op1RrPairHi is op1RrPairSel=1 & op1RrPairHi { tmp:2 = op1RrPairHi; export tmp; } op1RrPair: op1RrPairLow is op1RrPairSel=0 & op1RrPairLow { tmp:2 = op1RrPairLow; export tmp; } # I'm uneasy at these... as they require the top of the stack # to know what size element to reserve before the push. # The docs should probably say that the top of the stack byte is unused... # # The processor is post-decremented, and because of the way the compiler # manipulates the stack pointer it's important to get this correct. @if PCBYTESIZE == "2" macro pushPC(val) { SP = SP - 1; *[mem]:2 SP = val; SP = SP - 1; } macro popPC(val) { SP = SP + 1; val = *[mem]:2 SP; SP = SP + 1; } @else # PCBYTESIZE == 3 macro pushPC(val) { SP = SP - 2; *[mem]:3 SP = val; SP = SP - 1; } macro popPC(val) { SP = SP + 1; val = *[mem]:3 SP; SP = SP + 2; } @endif macro push8(val) { *[mem]:1 SP = val; SP = SP -1; } macro pop8(val) { SP = SP + 1; val = *[mem]:1 SP; } # .slaspec shortcoming: Hflag isn't computed for most results macro setSflag() { $(Sflag) = $(Nflag) ^ $(Vflag); } macro setResultFlags(result) { $(Nflag) = (result s< 0); $(Zflag) = (result == 0x0); setSflag(); } macro doSubtract(pre,sub,res) { local x = pre - sub; $(Vflag) = sborrow(pre,sub); $(Cflag) = (pre < sub); setResultFlags(x); $(Sflag) = pre s< sub; res = x; } macro doSubtractWithCarry(pre,sub,res) { local partial = pre - sub; local subCarry = sub + $(Cflag); local x = pre - subCarry; local oldZflag = $(Zflag); $(Vflag) = sborrow(pre,sub) ^^ sborrow(partial, $(Cflag)); $(Cflag) = (pre < sub) || (partial < $(Cflag)); setResultFlags(x); $(Sflag) = $(Nflag)^$(Vflag); $(Zflag) = oldZflag & $(Zflag); res = x; } macro setMulFlags(res) { $(Cflag) = ((res & 0x8000) != 0); $(Zflag) = (res == 0); } macro loadSREG(reg) { reg = (zext(Cflg==1) << 0) | (zext(Zflg==1) << 1) | (zext(Nflg==1) << 2) | (zext(Vflg==1) << 3) | (zext(Sflg==1) << 4) | (zext(Hflg==1) << 5) | (zext(Tflg==1)<<6) | (zext(Iflg==1) << 7); SREG = reg; } macro storeSREG(val) { Cflg = ((val>> 0) & 1); Zflg = ((val>> 1) & 1); Nflg = ((val>> 2) & 1); Vflg = ((val>> 3) & 1); Sflg = ((val>> 4) & 1); Hflg = ((val>> 5) & 1); Tflg = ((val>> 6) & 1); Iflg = ((val>> 7) & 1); SREG = val; } # Handle possible skip instruction # This next line is a NOP except for the phase, which is never really checked. # A better fix may be to use -l, and ensure phase=1 is checked on the base constructors. :^instruction is phase=0 & useSkipCond=0 & instruction [ phase=1; ] { build instruction; } :^instruction is phase=0 & useSkipCond=1 & instruction [ phase=1; ] { if (SKIP) goto inst_next; build instruction; } # K8 is immediate for Rd,K8 forms K8: val is op0to3 & op8to11 [ val = (op8to11 << 4) | op0to3; ] { tmp:1 = val; export tmp; } @ifdef FUSION K16fuse: val is op1bits0to3 & op1bits8to11 & op2bits0to3 & op2bits8to11 [ val = (((op2bits8to11 << 4) | op2bits0to3) << 8) | ((op1bits8to11 << 4) | op1bits0to3); ] { tmp:2 = val; export tmp; } f3cmpK16: val is f3op1bits0to3 & f3op1bits8to11 & f3op2bits0to3 & f3op2bits8to11 [ val = (((f3op2bits8to11 << 4) | f3op2bits0to3) << 8) | ((f3op1bits8to11 << 4) | f3op1bits0to3); ] { tmp:2 = val; export tmp; } f3cmpK8: val is f3op2bits0to3 & f3op2bits8to11 [ val = (f3op2bits8to11 << 4) | f3op2bits0to3; ] { tmp:1 = val; export tmp; } @endif rel7addr: rel is op3to9signed [ rel = (op3to9signed + inst_next);] { export *[code]:2 rel; } rel7dst: byteOffset is op3to9signed & rel7addr [ byteOffset = (op3to9signed + inst_next) << 1;] { export rel7addr; } rel12addr: rel is oplow12signed [ rel = oplow12signed + inst_start + 1; ] { export *[code]:2 rel; } rel12dst: byteOffset is oplow12signed & rel12addr [ byteOffset = (oplow12signed + inst_start + 1) << 1; ] { export rel12addr; } abs22addr: loc is op4to8 & opbit0; next16 [ loc = (op4to8 << 17) | (opbit0 << 16) | next16; ] { export *[code]:2 loc; } abs22dst: byteOffset is (op4to8 & opbit0; next16) & abs22addr [ byteOffset = ((op4to8 << 17) | (opbit0 << 16) | next16) << 1; ] { export abs22addr; } next16memPtrVal1: next16 is next16 { export *[mem]:1 next16; } @if DATASIZE == "3" next24constVal: next16 is next16 { export *[const]:$(DATASIZE) next16; } @endif @ifdef FUSION ldswMemPtrVal2: ldswop1imm16 is ldswop1imm16 { export *[mem]:2 ldswop1imm16; } stswMemPtrVal2: ldswop2imm16 is ldswop2imm16 { export *[mem]:2 ldswop2imm16; } @endif # K6 is used in dword operation K6: val is oplow4 & op6to7 [ val = (op6to7 << 4) | oplow4; ] { tmp:1 = val; export tmp; } # K7 is used by lds K7addr: val is oplow4 & op9to10 & opbit8 [ val = ((1 ^ opbit8) << 7) | (opbit8 << 6) | (op9to10 << 4) | oplow4; ] { export *[mem]:1 val; } # Join against various spaces for dataspace... # ##################################################################################### # COMMENTING OUT BECAUSE "subtable symbol K7addr is not allowed in context block" #K7Ioaddr: val is K7addr [ val = K7addr - 0x20; ] { tmp:1 = val; export tmp; } # ##################################################################################### # COMMENTING OUT BECAUSE "Subtable symbol K7Ioaddr is not allowed in context block" #A7Ioaddr: val is K7Ioaddr [ val = (K7Ioaddr | 0x00) + 0x20 ; ] { export *[mem]:1 val; } Aio6: val is oplow4 & op9to10 [ val = ((op9to10 << 4) | oplow4) + $(IO_START); ] { export *[mem]:1 val; } Aio5: val is op3to7 [ val = (op3to7 | 0x00) + $(IO_START); ] { export *[mem]:1 val; } q6: val is oplow3 & op10to11 & opbit13 [ val = (opbit13 << 5) | (op10to11 << 3) | oplow3; ] { tmp:1 = val; export tmp; } @ifdef FUSION # Predicates to verify that fusion will be valid here. # We just want to construct these. The rules are not null to avoid a NOP bug with sleigh fusion16rrrrPred: val is op1bit0=0 & op2bit0=1 & op1bit4=0 & op2bit4=1 & op1bit9=op2bit9 & op1bits5to8=op2bits5to8 & op1bits1to3=op2bits1to3 [ val = 0; ] { tmp:2=val; export tmp; } fusion16rkrkPred: val is op1bits5to7=op2bits5to7 & op1bit4=0 & op2bit4=1 [ val=0; ] { tmp:2 = val; export tmp; } f3cmpPairPred: val is f3op1bits5to7=f3op3bits5to7 & f3op1bit4=0 & f3op3bit4=1 & f3op3bit8=1 [ val=0; ] { tmp:2 = val; export tmp; } f3cmpLdiPred: val is f3op3bit9=1 & f3op3bits0to3=f3op2bits4to7 [ val=0; ] { tmp:2 = val; export tmp; } ldswPairPred: val is ldswop1bit4=0 & ldswop2bit4=1 & ldswop1bits5to8=ldswop2bits5to8 [ val=0; ] { tmp:2 = val; export tmp; } stswPairPred: val is ldswop1bit4=1 & ldswop2bit4=0 & ldswop1bits5to8=ldswop2bits5to8 [ val=0; ] { tmp:2 = val; export tmp; } # would like to check this for const pair, but hangs sleigh compiler: ldswop1imm15=ldswop2imm15 # So check as a few in a row # Not any better & ldswop1imm5b=ldswop2imm5b & ldswop1imm5c=ldswop2imm5c ldswConstPairPred: val is ldswop1bit16=0 & ldswop2bit16=1 & ldswop1imm6=ldswop2imm6 [ val=0; ] { tmp:2 = val; export tmp; } stswConstPairPred: val is ldswop1bit16=1 & ldswop2bit16=0 & ldswop1imm6=ldswop2imm6 [ val=0; ] { tmp:2 = val; export tmp; } @endif define pcodeop todo; define pcodeop todoflow; define pcodeop todoflags; define pcodeop todotst; define pcodeop break; @ifdef FUSION # add followed by adc :addw op1RdPair,op1RrPair is phase=1 & op1hi6=0x3 & op2hi6=0x7 & op1RdPair & op1RrPair & fusion16rrrrPred { local pre = op1RdPair; local post = op1RdPair + op1RrPair; $(Cflag) = carry(op1RdPair,op1RrPair); $(Vflag) = scarry(pre,op1RrPair); op1RdPair = post; setResultFlags(post); } @endif # Rd,Rr :adc RdFull,RrFull is phase=1 & ophi6=0x7 & RdFull & RrFull { local res = RdFull + RrFull + $(Cflag); $(Cflag) = carry(RdFull, RrFull) || carry(RdFull + RrFull, $(Cflag)); $(Vflag) = scarry(RdFull, RrFull) ^^ scarry(RdFull + RrFull, $(Cflag)); setResultFlags(res); RdFull = res; } # Rd,Rr :add RdFull,RrFull is phase=1 & ophi6=0x3 & RdFull & RrFull { local res = RdFull + RrFull; $(Cflag) = carry(RdFull,RrFull); $(Vflag) = scarry(RdFull,RrFull); setResultFlags(res); RdFull = res; } # adiw Rd+1:Rd,K6 :adiw Rdw2,K6 is phase=1 & ophi8=0x96 & Rdw2 & K6 { local pre = Rdw2; Rdw2 = Rdw2 + zext(K6); $(Cflag) = carry(pre,zext(K6)); $(Vflag) = scarry(Rdw2,zext(K6)); setResultFlags(Rdw2); } # and Rd,Rr :and RdFull,RrFull is phase=1 & ophi6=8 & RdFull & RrFull { RdFull = RdFull & RrFull; $(Vflag) = 0; setResultFlags(RdFull); } # andi Rd,K :andi RdHi,K8 is phase=1 & ophi4=7 & RdHi & K8 { RdHi = RdHi & K8; $(Vflag) = 0; setResultFlags(RdHi); } # asr Rd :asr RdFull is phase=1 & ophi7=0x4a & oplow4=0x5 & RdFull { #done $(Cflag) = RdFull & 0x01; RdFull = RdFull s>> 1; $(Nflag) = (RdFull & 0x80) == 0x80; $(Vflag) = $(Nflag) ^ $(Cflag); setResultFlags(RdFull); } # bclr s :bclr op4to6_flag is phase=1 & ophi9=0x129 & oplow4=0x4 & op4to6_flag { #done op4to6_flag = 0; } # bld Rd,b :bld RdFull,oplow3 is phase=1 & ophi7=0x7c & opbit3=0 & RdFull & oplow3 { local b = $(Tflag) << oplow3; local mask = 0xff ^ (1 << oplow3); RdFull = (RdFull & mask) | b; } # brbc s,k :brbc rel7dst,oplow3_flag is phase=1 & ophi6=0x3d & rel7dst & oplow3_flag { if (!oplow3_flag) goto rel7dst; } # brbs s,k (see prev instruction) :brbs rel7dst,oplow3_flag is phase=1 & ophi6=0x3c & rel7dst & oplow3_flag { if (oplow3_flag) goto rel7dst; } # brcs and brcc seem to be special cases of brbs :break is phase=1 & ophi16=0x9598 { break(); } # Probably want to check for various decode logic for conditional branches... # ... specifically BRBS 1,k # breq k - really is BRBS 1,k # bset s :bset op4to6_flag is phase=1 & ophi9=(0x94<<1) & oplow4=0x8 & op4to6_flag { op4to6_flag = 1; } # bst Rd,b :bst RdFull,oplow3 is phase=1 & ophi7=0x7d & opbit3=0 & RdFull & oplow3 { $(Tflag) = (RdFull >> oplow3) & 0x01; } # call k - todo - handle upper bits for 24 bit architecture :call abs22dst is phase=1 & (ophi7=0x4a & op1to3=0x7) ... & abs22dst { tmp:$(PCBYTESIZE) = inst_next >> 1; pushPC(tmp); PC = &abs22dst; call abs22dst; } # cbi A,b :cbi Aio5,oplow3 is phase=1 & ophi8=0x98 & Aio5 & oplow3 { local x = Aio5; x = x & (0xff ^ (1 << oplow3)); Aio5 = x; } # cbr - not actual instruction # clc, clh, cli, cln ... variants on register clearing # sub bits give which bits in SREG to clear :clc is phase=1 & ophi16=0x9488 { $(Cflag) = 0; } :clh is phase=1 & ophi16=0x94d8 { $(Hflag) = 0; } :cli is phase=1 & ophi16=0x94f8 { $(Iflag) = 0; } :cln is phase=1 & ophi16=0x94a8 { $(Nflag) = 0; } :cls is phase=1 & ophi16=0x94c8 { $(Sflag) = 0; } :clt is phase=1 & ophi16=0x94e8 { $(Tflag) = 0; } :clv is phase=1 & ophi16=0x94b8 { $(Vflag) = 0; } :clz is phase=1 & ophi16=0x9498 { $(Zflag) = 0; } # clr Rd - really is EOR Rd, Rd :com RdFull is phase=1 & ophi7=0x4a & RdFull { RdFull = ~RdFull; $(Vflag) = 0; $(Cflag) = 1; setResultFlags(RdFull); } :cp RdFull,RrFull is phase=1 & ophi6=0x05 & RdFull & RrFull { local x = RdFull - RrFull; $(Cflag) = (RdFull < RrFull); $(Vflag) = sborrow(RdFull,RrFull); setResultFlags(x); # but doesn't set result into a register } :cpc RdFull,RrFull is phase=1 & ophi6=0x1 & RdFull & RrFull { local res = 0; doSubtractWithCarry(RdFull,RrFull,res); res = res; # avoid warning } :cpi RdHi,K8 is phase=1 & ophi4=0x3 & RdHi & K8 { local res = 0; doSubtract(RdHi,K8,res); res = res; # avoid warning } @ifdef FUSION # cpi; ldi; cpc sequence :cpiw f3op1RdPairHi,f3cmpK16" ;ldi "f3op2RdHi,f3cmpK8 is phase=1 & f3op1hi4=0x3 & f3op2hi4=0xe & f3op3hi6=0x1 & f3cmpPairPred & f3cmpLdiPred & f3op1RdPairHi & f3op2RdHi & f3cmpK16 & f3cmpK8 { local res = 3; doSubtract(f3op1RdPairHi,f3cmpK16,res); f3op2RdHi = f3cmpK8; } # cp; cpc sequence :cpw op1RdPair,op1RrPair phase=1 & is op1hi6=0x5 & op2hi6=0x1 & fusion16rrrrPred & op1RdPair & op1RrPair { local res = op1RdPair - op1RrPair; $(Vflag) = sborrow(op1RdPair,op1RrPair); $(Cflag) = (op1RdPair < op1RrPair); setResultFlags(res); $(Sflag) = op1RdPair s< op1RrPair; } @endif :cpse RdFull,RrFull is phase=1 & ophi6=0x4 & RdFull & RrFull [ useSkipCond=1; globalset(inst_next,useSkipCond); ] { SKIP = (RdFull == RrFull); } :dec RdFull is phase=1 & ophi7=0x4a & oplow4=0xa & RdFull { # doesn't set the C flag $(Vflag) = (RdFull == 0x80); RdFull = RdFull - 1; setResultFlags(RdFull); } define pcodeop encrypt; define pcodeop decrypt; :des op4to7 is phase=1 & ophi8=0x94 & oplow4=0xb & op4to7 { val:1 = op4to7; local key:8 = R15R14R13R12R11R10R9R8; local result:16 = 0; if (!Hflg) goto ; result = decrypt(R7R6R5R4R3R2R1R0, key, val); goto ; result = encrypt(R7R6R5R4R3R2R1R0, key, val); R7R6R5R4R3R2R1R0 = result(0); R15R14R13R12R11R10R9R8 = result(8); } @if HASEIND == "1" :eicall is phase=1 & ophi16=0x9519 { ptr:$(PCBYTESIZE) = inst_next >> 1; pushPC(ptr); PC = zext(Z) | (zext(EIND) << 16); call [PC]; } :eijmp is phase=1 & ophi16=0x9419 { PC = zext(Z) | (zext(EIND) << 16); goto [PC]; } @endif @if PCBYTESIZE == "3" :elpm is phase=1 & ophi16=0x95d8 { ptr:3 = zext(Z) | (zext(RAMPZ) << 16); local falseRead:1 = *[code]:2 (ptr >> 1); R0 = *[codebyte]:1 ptr; } :elpm RdFull, Z is phase=1 & ophi7=0x48 & oplow4=0x6 & RdFull & Z { ptr:3 = zext(Z) | (zext(RAMPZ) << 16); local falseRead:1 = *[code]:1 (ptr >> 1); RdFull = *[codebyte]:1 ptr; } ElpmPlus: Z^"+" is Z {} :elpm RdFull, ElpmPlus is phase=1 & ophi7=0x48 & oplow4=0x7 & RdFull & ElpmPlus { ptr:3 = zext(Z) | (zext(RAMPZ) << 16); local falseRead:1 = *[code]:1 (ptr >> 1); RdFull = *[codebyte]:1 ptr; ptr = ptr + 1; Z = ptr:2; RAMPZ = ptr[16,8]; } @endif :eor RdFull,RrFull is phase=1 & ophi6=0x9 & RdFull & RrFull { RdFull = RdFull ^ RrFull; $(Vflag) = 0; setResultFlags(RdFull); } # Manual uses fmul. I prefer fracmul to distinguish from floating point :fracmul RdHi,RrHi is phase=1 & ophi9=0x6 & opbit3=1 & RdHi & RrHi { todo(); } :fracmuls RdHi,RrHi is phase=1 & ophi9=0x7 & opbit3=0 & RdHi & RrHi { todo(); } :fracmulsu RdHi,RrHi is phase=1 & ophi9=0x7 & opbit3=1 & RdHi & RrHi { todo(); } :icall is phase=1 & ophi16=0x9509 { ptr:$(PCBYTESIZE) = inst_next >> 1; pushPC(ptr); PC = zext(Z); call [PC]; } :ijmp is phase=1 & ophi16=0x9409 { PC = zext(Z); goto [PC]; } # in Rd,A :in RdFull,Aio6 is phase=1 & ophi5=0x16 & RdFull & Aio6 { RdFull = Aio6; } :in RdFull,SPL is phase=1 & ophi5=0x16 & RdFull & op9to10=3 & oplow4=0xd & SPL { RdFull = SPL; } :in RdFull,SPH is phase=1 & ophi5=0x16 & RdFull & op9to10=3 & oplow4=0xe & SPH { RdFull = SPH; } :in RdFull,SREG is phase=1 & ophi5=0x16 & RdFull & op9to10=3 & oplow4=0xf & SREG { loadSREG(RdFull); } :inc RdFull is phase=1 & ophi7=0x4a & oplow4=0x3 & RdFull { # inc doesn't set the C flag. $(Vflag) = RdFull == 0x7f; RdFull = RdFull + 1; setResultFlags(RdFull); } :jmp abs22dst is phase=1 & (ophi7=0x4a & op1to3=0x6) ... & abs22dst { PC = &abs22dst; goto abs22dst; } :lac Z,RdFull is phase=1 & ophi7=0x49 & oplow4=0x6 & Z & RdFull { tmp:1 = *[mem]:1 Z; tmp = tmp & (0xff - RdFull); *[mem]:1 Z = tmp; RdFull = tmp; } :las Z,RdFull is phase=1 & ophi7=0x49 & oplow4=0x5 & Z & RdFull { tmp:1 = *[mem]:1 Z; tmp = tmp | RdFull; *[mem]:1 Z = tmp; RdFull = tmp; } :lat Z,RdFull is phase=1 & ophi7=0x49 & oplow4=0x7 & Z & RdFull { tmp:1 = *[mem]:1 Z; tmp = tmp ^ RdFull; *[mem]:1 Z = tmp; RdFull = tmp; } # three forms, really just specifying the increment mode # ld Rd,X :ld RdFull,X is phase=1 & ophi7=0x48 & oplow4=0xc & X & RdFull { tmp:2 = X; RdFull = *[mem]:1 tmp; } # ld Rd,Y; ld Rd,Z # Special case of ldd +q below - will conflict with -i sleigh compile :ld RdFull,RstPtr is phase=1 & ophi7=0x40 & oplow3=0x0 & RdFull & RstPtr { tmp:2 = RstPtr; RdFull = *[mem]:1 tmp; } # ld Rd,Y+ ; ld Rd, X+; ld Rd, Z+ LdPlus: RstPtr^"+" is RstPtr { tmp:2 = RstPtr; RstPtr = RstPtr + 0x01; export tmp; } :ld RdFull,LdPlus is phase=1 & ophi7=0x48 & oplow2=0x01 & RdFull & LdPlus { RdFull = *[mem]:1 LdPlus; } # ld Rd,-Y ; ld Rd, -X; ld Rd, -Z LdPredec: "-"^RstPtr is RstPtr { RstPtr = RstPtr - 0x01; export RstPtr; } :ld RdFull,LdPredec is phase=1 & ophi7=0x48 & oplow2=0x02 & RdFull & LdPredec { tmp:2 = LdPredec; RdFull = *[mem]:1 tmp; } @ifndef AVTINY # ldd Rd,Y+q # ldd Rd,Z+q LddYZq: Rstq^"+"^q6 is phase=1 & Rstq & q6 { local ptr = Rstq + zext(q6); export ptr; } :ldd RdFull,LddYZq is phase=1 & ophi2=0x2 & opbit12=0 & opbit9=0 & opbit3 & LddYZq & RdFull { RdFull = *[mem]:1 LddYZq; } @endif # Rd,K :ldi RdHi,K8 is phase=1 & ophi4=0xe & RdHi & K8 { RdHi = K8; } @ifdef AVTINY # lds Rd,k :lds RdHi,K7addr is phase=1 & ophi5=0x14 & RdHi & K7addr { RdHi = K7addr; } @elif DATASIZE == "2" # lds Rd,k :lds RdFull,next16memPtrVal1 is phase=1 & ophi7=0x48 & oplow4=0 & RdFull; next16memPtrVal1 { RdFull = next16memPtrVal1; } @else :lds RdFull,next24constVal is phase=1 & ophi7=0x48 & oplow4=0 & RdFull; next24constVal { local loc:$(DATASIZE) = (zext(RAMPD) << 16 | next24constVal); RdFull = *[mem]:1 loc; } @endif @ifdef FUSION # Fuse together consecuitive lds ; lds # :ldsw ldswop1RdPair,ldswMemPtrVal2 is phase=1 & ldswop1hi7=0x48 & ldswop2hi7=0x48 & ldswop1low4=0 & ldswop2low4=0 & ldswMemPtrVal2 & ldswop1RdPair & ldswPairPred & ldswConstPairPred { ldswop1RdPair = ldswMemPtrVal2; } @endif # lpm R0 :lpm R0 is phase=1 & ophi16=0x95c8 & R0 { ptr:$(PCBYTESIZE) = zext(Z); local falseRead:1 = *[code]:1 (ptr >> 1); R0 = *[codebyte]:$(PCBYTESIZE) ptr; } # lpm Rd,Z :lpm RdFull,Z is phase=1 & ophi7=0x48 & op1to3=0x2 & RdFull & Z & opbit0=0 { ptr:$(PCBYTESIZE) = zext(Z); local falseRead:1 = *[code]:1 (ptr >> 1); RdFull = *[codebyte]:$(PCBYTESIZE) ptr; } # lpm Rd,Z+ LpmPlus: Z^"+" is Z {} :lpm RdFull,LpmPlus is phase=1 & ophi7=0x48 & op1to3=0x2 & RdFull & LpmPlus & opbit0=1 { ptr:$(PCBYTESIZE) = zext(Z); local falseRead:1 = *[code]:1 (ptr >> 1); RdFull = *[codebyte]:$(PCBYTESIZE) ptr; Z = Z + 1; } # lsl - just an assembly mnemonic for add :lsr RdFull is phase=1 & ophi7=0x4a & oplow4=0x6 & RdFull { $(Cflag) = RdFull & 0x01; RdFull = (RdFull >> 1); $(Vflag) = $(Cflag); setResultFlags(RdFull); } # mov Rd,Rr :mov RdFull,RrFull is phase=1 & ophi6=0xb & RdFull & RrFull { RdFull = RrFull; } # movw Rd+1:Rd,Rr+1Rr :movw Rdw4,Rrw4 is phase=1 & ophi8=0x1 & Rdw4 & Rrw4 { Rdw4 = Rrw4; } :mul RdFull,RrFull is phase=1 & ophi6=0x27 & RdFull & RrFull { a:2 = zext(RdFull); b:2 = zext(RrFull); R1R0 = a * b; setMulFlags(R1R0); } :muls RdHi,RrHi is phase=1 & ophi8=0x2 & RdHi & RrHi { a:2 = sext(RdHi); b:2 = sext(RrHi); R1R0 = a * b; setMulFlags(R1R0); } :mulsu RdHi3,RrHi3 is phase=1 & ophi8=0x3 & opbit7=0 & opbit3=0 & RdHi3 & RrHi3 { a:2 = sext(RdHi3); b:2 = zext(RrHi3); R1R0 = a * b; setMulFlags(R1R0); } :neg RdFull is phase=1 & ophi7=0x4a & oplow4=1 & RdFull { RdFull = -RdFull; $(Vflag) = (RdFull == 0x80); $(Cflag) = (RdFull != 0); setResultFlags(RdFull); } :nop is phase=1 & ophi16=0x0 { } :or RdFull,RrFull is phase=1 & ophi6=0xa & RdFull & RrFull { RdFull = RdFull | RrFull; $(Vflag) = 0; setResultFlags(RdFull); } :ori RdHi,K8 is phase=1 & ophi4=0x6 & RdHi & K8 { RdHi = RdHi | K8; $(Vflag) = 0; setResultFlags(RdHi); } # out A,Rr # Note: Rr occupies the normal Rd position :out Aio6,RdFull is phase=1 & ophi5=0x17 & Aio6 & RdFull { Aio6 = RdFull; } :out SPL,RdFull is phase=1 & ophi5=0x17 & RdFull & op9to10=3 & oplow4=0xd & SPL { SPL = RdFull; } :out SPH,RdFull is phase=1 & ophi5=0x17 & RdFull & op9to10=3 & oplow4=0xe & SPH { SPH = RdFull; } :out SREG,RdFull is phase=1 & ophi5=0x17 & RdFull & op9to10=3 & oplow4=0xf & SREG { storeSREG(RdFull); } :pop RdFull is phase=1 & ophi7=0x48 & oplow4=0xf & RdFull { pop8(RdFull); } # push Rf # Note: Rr occupies the normal Rd position :push RdFull is phase=1 & ophi7=0x49 & oplow4=0xf & RdFull { push8(RdFull); } # rcall . is used by the compiler to create space on the stack :rcall "." is phase=1 & ophi4=0xd & oplow12=0 { ptr:$(PCBYTESIZE) = inst_next >> 1; pushPC(ptr); } :rcall rel12dst is phase=1 & ophi4=0xd & rel12dst { ptr:$(PCBYTESIZE) = inst_next >> 1; pushPC(ptr); PC = &rel12dst; call rel12dst; } :ret is phase=1 & ophi16=0x9508 { # Could also handle word size options here popPC(PC); return [PC]; } :reti is phase=1 & ophi16=0x9518 { $(Iflag) = 1; popPC(PC); return [PC]; } # rjmp k :rjmp rel12dst is phase=1 & ophi4=0xc & rel12dst { goto rel12dst; } # ROL is ADC Rd,Rd :ror RdFull is phase=1 & ophi7=0x4a & oplow4=0x7 & RdFull { local c = $(Cflag); local cnew = RdFull & 0x01; RdFull = (c << 7) | (RdFull >> 1); $(Cflag) = cnew; $(Nflag) = (RdFull & 0x80) == 0x80; $(Vflag) = $(Cflag) ^ $(Nflag); setResultFlags(RdFull); } :sbc RdFull,RrFull is phase=1 & ophi6=0x2 & RdFull & RrFull { doSubtractWithCarry(RdFull,RrFull,RdFull); } :sbci RdHi,K8 is phase=1 & ophi4=4 & RdHi & K8 { doSubtractWithCarry(RdHi,K8,RdHi); } @ifdef FUSION # subi sbci :subiw op1RdPairHi,K16fuse is phase=1 & op1hi4=0x5 & op2hi4=0x4 & K16fuse & fusion16rkrkPred & op1RdPairHi { # doSubtract(op1RdPairHi,K16fuse,op1RdPairHi); local res = op1RdPairHi - K16fuse; local pre = op1RdPairHi; $(Vflag) = sborrow(pre,K16fuse); $(Cflag) = (op1RdPairHi < K16fuse); op1RdPairHi = res; setResultFlags(res); $(Sflag) = pre s< K16fuse; } @endif :sbi Aio5,oplow3 is phase=1 & ophi8=0x9a & Aio5 & oplow3 { Aio5 = Aio5 | (1 << oplow3); } :sbic Aio5,oplow3 is phase=1 & ophi8=0x99 & Aio5 & oplow3 [ useSkipCond=1; globalset(inst_next,useSkipCond); ] { SKIP = ((Aio5 & (1 << oplow3)) == 0); } :sbis Aio5,oplow3 is phase=1 & ophi8=0x9b & Aio5 & oplow3 [ useSkipCond=1; globalset(inst_next,useSkipCond); ] { SKIP = ((Aio5 & (1 << oplow3)) != 0); } :sbiw Rdw2,K6 is phase=1 & ophi8=0x97 & Rdw2 & K6 { local pre = Rdw2; Rdw2 = Rdw2 - zext(K6); $(Cflag) = (pre < zext(K6)); $(Vflag) = sborrow(pre,zext(K6)); setResultFlags(Rdw2); } # sbr is an alias for ori :sbrc RdFull,oplow3 is phase=1 & ophi7=0x7e & opbit3=0 & RdFull & oplow3 [ useSkipCond=1; globalset(inst_next,useSkipCond); ] { SKIP = ((RdFull & (1 << oplow3)) == 0); } :sbrs RdFull,oplow3 is phase=1 & ophi7=0x7f & opbit3=0 & RdFull & oplow3 [ useSkipCond=1; globalset(inst_next,useSkipCond); ] { SKIP = ((RdFull & (1 << oplow3)) != 0); } # More flag setting sec, seh, sei, sen, ses, set, sev, sez # Implemented as bset :ser RdHi is phase=1 & ophi8=0xef & oplow4=0xf & RdHi { RdHi = 0xff; } define pcodeop sleep; :sleep is phase=1 & ophi16=0x9588 { sleep(); } define pcodeop store_program_mem; # make this stand out. :spm Z is phase=1 & ophi16=0x95e8 & Z { ptr:$(PCBYTESIZE) = zext(Z) << 1; local falseWrite:1 = *[code]:1 (ptr >> 1); *[codebyte]:$(PCBYTESIZE) ptr = R1R0; store_program_mem(); } SpmPlus: Z^"+" is Z {} :spm SpmPlus is phase=1 & ophi16=0x95f8 & SpmPlus { ptr:$(PCBYTESIZE) = zext(Z) << 1; local falseWrite:1 = *[code]:1 (ptr >> 1); *[codebyte]:$(PCBYTESIZE) ptr = R1R0; Z = Z + 1; store_program_mem(); } # For stores, see the ld code (just flip bit 9) :st X, RdFull is phase=1 & ophi7=0x49 & oplow4=0xc & X & RdFull { tmp:2 = X; *[mem]:1 tmp = RdFull; } # st Rd,Y; st Rd,Z :st RstPtr, RdFull is phase=1 & ophi7=0x41 & oplow3=0x0 & RdFull & RstPtr { tmp:2 = RstPtr; *[mem]:1 tmp = RdFull; } # st Rd,Y+ ; st Rd, X+; st Rd, Z+ StPlus: RstPtr^"+" is RstPtr { tmp:2 = RstPtr; RstPtr = RstPtr + 0x01; export tmp; } :st StPlus, RdFull is phase=1 & ophi7=0x49 & oplow2=0x01 & RdFull & StPlus { *[mem]:1 StPlus = RdFull; } # st Rd,-Y ; st Rd, -X; st Rd, -Z StPredec: "-"^RstPtr is RstPtr { RstPtr = RstPtr - 0x01; export RstPtr; } :st StPredec, RdFull is phase=1 & ophi7=0x49 & oplow2=0x02 & RdFull & StPredec { tmp:2 = StPredec; *[mem]:1 tmp = RdFull; } @ifndef AVTINY # std Rd,Y+q # std Rd,Z+q StdYZq: Rstq^"+"^q6 is Rstq & q6 { local ptr = Rstq + zext(q6); export ptr; } :std StdYZq, RdFull is phase=1 & ophi2=0x2 & opbit12=0 & opbit9=1 & RdFull & opbit3 & StdYZq { *[mem]:1 StdYZq = RdFull; } @endif @ifdef AVTINY # see manual for computation of address for 16-bit STS :sts K7addr, RdHi is phase=1 & ophi5=0x15 & RdHi & K7addr { K7addr = RdHi; } @elif DATASIZE == "2" :sts next16memPtrVal1,RdFull is phase=1 & ophi7=0x49 & oplow4=0 & RdFull; next16memPtrVal1 { next16memPtrVal1 = RdFull; } @else :sts next24constVal,RdFull is phase=1 & ophi7=0x49 & oplow4=0 & RdFull; next24constVal { local loc:3 = (zext(RAMPD) << 16) | next24constVal; *[mem]:1 loc = RdFull; } @endif @ifdef FUSION # sts ; sts emits backwards with respect to lds; lds :stsw stswMemPtrVal2,stswop2RdPair is phase=1 & ldswop1hi7=0x49 & ldswop2hi7=0x49 & ldswop1low4=0 & ldswop2low4=0 & stswMemPtrVal2 & stswop2RdPair & stswPairPred & stswConstPairPred { stswMemPtrVal2 = stswop2RdPair; } @endif :sub RdFull,RrFull is phase=1 & ophi6=0x6 & RdFull & RrFull { doSubtract(RdFull,RrFull,RdFull); } # Rd,K :subi RdHi,K8 is phase=1 & ophi4=5 & RdHi & K8 { doSubtract(RdHi,K8,RdHi); } :swap RdFull is phase=1 & ophi7=0x4a & oplow4=2 & RdFull { RdFull = (RdFull >> 4) | (RdFull << 4); } # tst is AND Rd,Rd define pcodeop watchdog_reset; :wdr is phase=1 & ophi16=0x95a8 { watchdog_reset(); } :xch RdFull is phase=1 & ophi7=0x49 & oplow4=0x4 & RdFull { ptr:2 = Z; local tmp = *[mem]:1 ptr; *[mem]:1 ptr = RdFull; RdFull = tmp; } ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8.slaspec ================================================ # AVR8 with 16-bit addressable code space @define PCBYTESIZE "2" @define HASEIND "0" @include "avr8.sinc" ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8e.slaspec ================================================ # AVR8 with 16-bit addressable code space and support for @define PCBYTESIZE "2" @define HASEIND "1" @include "avr8.sinc" ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8egcc.cspec ================================================ ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8eind.slaspec ================================================ # AVR8 with 22-bit addressable code space @define PCBYTESIZE "3" @define HASEIND "1" @include "avr8.sinc" ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8gcc.cspec ================================================ ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8iarV1.cspec ================================================ ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8imgCraftV8.cspec ================================================ ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8xmega.pspec ================================================ ================================================ FILE: pypcode/processors/Atmel/data/languages/avr8xmega.slaspec ================================================ # AVR8 with 22-bit addressable code space @define PCBYTESIZE "3" @define HASEIND "1" @define IO_START "0" @define REGISTER_SPACE "register" @define EIND "0x3c" @include "avr8.sinc" ================================================ FILE: pypcode/processors/Atmel/data/manuals/AVR32.idx ================================================ @doc32000.pdf [Atmel AVR32 Architecture Document - 04/2011] ABS, 123 ACALL, 124 ACR, 125 ADC, 126 ADD, 127 ADD{EQ}, 128 ADD{NE}, 128 ADD{HS}, 128 ADD{LO}, 128 ADD{GE}, 128 ADD{LT}, 128 ADD{MI}, 128 ADD{PL}, 128 ADD{LS}, 128 ADD{GT}, 128 ADD{LE}, 128 ADD{HI}, 128 ADD{VS}, 128 ADD{VC}, 128 ADD{QS}, 128 ADD{AL}, 128 ADDABS, 129 ADDHH.W, 130 AND, 131 AND{EQ}, 133 AND{NE}, 133 AND{HS}, 133 ADD{LO}, 133 AND{GE}, 133 AND{LT}, 133 AND{MI}, 133 AND{PL}, 133 AND{LS}, 133 AND{GT}, 133 AND{LE}, 133 AND{HI}, 133 AND{VS}, 133 AND{VC}, 133 AND{QS}, 133 AND{AL}, 133 ANDH, 134 ANDL, 134 ANDN, 136 ASR, 137 BFEXTS, 139 BFEXTU, 140 BFINS, 141 BLD, 142 BR{EQ}, 143 BR{NE}, 143 BR{HS}, 143 BR{LO}, 143 BR{GE}, 143 BR{LT}, 143 BR{MI}, 143 BR{PL}, 143 BR{LS}, 143 BR{GT}, 143 BR{LE}, 143 BR{HI}, 143 BR{VS}, 143 BR{VC}, 143 BR{QS}, 143 BR{AL}, 143 BREAKPOINT, 145 BREV, 146 BST, 147 CACHE, 148 CASTS.H, 150 CASTS.B, 150 CASTU.H, 151 CASTU.B, 151 CBR, 152 CLZ, 153 COM, 154 COP, 155 CP.B, 156 CP.H, 157 CP.W, 158 CPC, 160 CSRF, 161 CSRFCZ, 162 DIVS, 163 DIVU, 164 EOR, 165 EOR{EQ}, 167 EOR{NE}, 167 EOR{HS}, 167 EOR{LO}, 167 EOR{GE}, 167 EOR{LT}, 167 EOR{MI}, 167 EOR{PL}, 167 EOR{LS}, 167 EOR{GT}, 167 EOR{LE}, 167 EOR{HI}, 167 EOR{VS}, 167 EOR{VC}, 167 EOR{QS}, 167 EOR{AL}, 167 EORH, 168 EORL, 168 FRS, 169 ICALL, 170 INCJOSP, 171 LD.D, 173 LD.SB, 175 LD.SB{EQ}, 176 LD.SB{NE}, 176 LD.SB{HS}, 176 LD.SB{LO}, 176 LD.SB{GE}, 176 LD.SB{LT}, 176 LD.SB{MI}, 176 LD.SB{PL}, 176 LD.SB{LS}, 176 LD.SB{GT}, 176 LD.SB{LE}, 176 LD.SB{HI}, 176 LD.SB{VS}, 176 LD.SB{VC}, 176 LD.SB{QS}, 176 LD.SB{AL}, 176 LD.UB, 177 LD.UB{EQ}, 179 LD.UB{NE}, 179 LD.UB{HS}, 179 LD.UB{LO}, 179 LD.UB{GE}, 179 LD.UB{LT}, 179 LD.UB{MI}, 179 LD.UB{PL}, 179 LD.UB{LS}, 179 LD.UB{GT}, 179 LD.UB{LE}, 179 LD.UB{HI}, 179 LD.UB{VS}, 179 LD.UB{VC}, 179 LD.UB{QS}, 179 LD.UB{AL}, 179 LD.SH, 180 LD.SH{EQ}, 182 LD.SH{NE}, 182 LD.SH{HS}, 182 LD.SH{LO}, 182 LD.SH{GE}, 182 LD.SH{LT}, 182 LD.SH{MI}, 182 LD.SH{PL}, 182 LD.SH{LS}, 182 LD.SH{GT}, 182 LD.SH{LE}, 182 LD.SH{HI}, 182 LD.SH{VS}, 182 LD.SH{VC}, 182 LD.SH{QS}, 182 LD.SH{AL}, 182 LD.UH, 183 LD.UH{EQ}, 185 LD.UH{NE}, 185 LD.UH{HS}, 185 LD.UH{LO}, 185 LD.UH{GE}, 185 LD.UH{LT}, 185 LD.UH{MI}, 185 LD.UH{PL}, 185 LD.UH{LS}, 185 LD.UH{GT}, 185 LD.UH{LE}, 185 LD.UH{HI}, 185 LD.UH{VS}, 185 LD.UH{VC}, 185 LD.UH{QS}, 185 LD.UH{AL}, 185 LD.W, 187 LD.W{EQ}, 189 LD.W{NE}, 189 LD.W{HS}, 189 LD.W{LO}, 189 LD.W{GE}, 189 LD.W{LT}, 189 LD.W{MI}, 189 LD.W{PL}, 189 LD.W{LS}, 189 LD.W{GT}, 189 LD.W{LE}, 189 LD.W{HI}, 189 LD.W{VS}, 189 LD.W{VC}, 189 LD.W{QS}, 189 LD.W{AL}, 189 LDC.D, 190 LDC.W, 190 LDC0.D, 192 LDC0.W, 192 LDCM.D, 193 LDCM.W, 193 LDDPC, 195 LDDSP, 196 LDINS.B, 197 LDINS.H, 197 LDM, 199 LDMTS, 201 LDSWP.SH, 202 LDSWP.UH, 202 LDSWP.W, 202 LSL, 204 LSR, 206 MAC, 208 MACHH.D, 209 MACHH.W, 210 MACS.D, 211 MACSATHH.W, 212 MACU.D, 213 MACWH.D, 214 MAX, 215 MCALL, 216 MEMC, 217 MEMS, 218 MEMT, 219 MFDR, 220 MFSR, 221 MIN, 222 MOV, 223 MOV{EQ}, 225 MOV{NE}, 225 MOV{HS}, 225 MOV{LO}, 225 MOV{GE}, 225 MOV{LT}, 225 MOV{MI}, 225 MOV{PL}, 225 MOV{LS}, 225 MOV{GT}, 225 MOV{LE}, 225 MOV{HI}, 225 MOV{VS}, 225 MOV{VC}, 225 MOV{QS}, 225 MOV{AL}, 225 MOVHI, 227 MTDR, 228 MTSR, 229 MUL, 231 MULHH.W, 233 MULNHH.W, 234 MULNWH.D, 235 MULS.D, 236 MULSATHH.H, 237 MULSATHH.W, 238 MULSATRNDHH.H, 239 MULSATRNDWH.W, 240 MULSATWH.W, 241 MULU.D, 242 MULWH.D, 243 MUSFR, 244 MUSTR, 245 MVCR.D, 246 MVCR.W, 246 MVRC.D, 247 MVRC.W, 247 NEG, 248 NOP, 249 OR, 250 OR{EQ}, 252 OR{NE}, 252 OR{HS}, 252 OR{LO}, 252 OR{GE}, 252 OR{LT}, 252 OR{MI}, 252 OR{PL}, 252 OR{LS}, 252 OR{GT}, 252 OR{LE}, 252 OR{HI}, 252 OR{VS}, 252 OR{VC}, 252 OR{QS}, 252 OR{AL}, 252 ORH, 253 ORL, 253 PABS.SB, 254 PABS.SH, 254 PACKSH.UB, 255 PACKSH.SB, 255 PACKW.SH, 257 PADD.B, ` 258 PADD.H, 258 PADDH.UB, 259 PADDH.SH, 259 PADDS.UB, 260 PADDS.SB, 260 PADDS.UH, 260 PADDS.SH, 260 PADDSUB.H, 262 PADDSUBH.SH, 263 PADDSUBS.UH, 264 PADDSUBS.SH, 264 PADDX.H, 266 PADDXH.SH, 267 PADDXS.UH, 268 PADDXS.SH, 268 PASR.B, 269 PASR.H, 269 PAVG.UB, 271 PAGV.SH, 271 PLSL.B, 273 PLSL.H, 273 PLSR.B, 275 PLSR.H, 275 PMAX.UB, 277 PMAX.SH, 277 PMIN.UB, 279 PMIN.SH, 279 POPJC, 281 POPM, 282 PREF, 284 PSAD, 285 PSUB.B, 286 PSUB.H, 286 PSUBADD.H, 287 PSUBADDH.SH, 288 PSUBADDS.UH, 289 PSUBADDS.SH, 289 PSUBS.UB, 292 PSUBS.SB, 292 PSUBS.UH, 292 PSUBS.SH, 292 PSUBX.H, 294 PSUBXH.SH, 295 PSUBXS.UH, 296 PSUBXS.SH, 296 PUNPCKSB.H 298 PUNPCKUB.H, 298 PUSHJC, 300 PUSHM, 301 RCALL, 303 RET{EQ}, 304 RET{NE}, 304 RET{HS}, 304 RET{LO}, 304 RET{GE}, 304 RET{LT}, 304 RET{MI}, 304 RET{PL}, 304 RET{LS}, 304 RET{GT}, 304 RET{LE}, 304 RET{HI}, 304 RET{VS}, 304 RET{VC}, 304 RET{QS}, 304 RET{AL}, 304 RETD, 305 RETE, 306 RETJ, 308 RETS, 309 RETTS, 310 RJMP, 311 ROL, 312 ROR, 313 RSUB, 314 RSUB{EQ}, 315 RSUB{NE}, 315 RSUB{HS}, 315 RSUB{LO}, 315 RSUB{GE}, 315 RSUB{LT}, 315 RSUB{MI}, 315 RSUB{PL}, 315 RSUB{LS}, 315 RSUB{GT}, 315 RSUB{LE}, 315 RSUB{HI}, 315 RSUB{VS}, 315 RSUB{VC}, 315 RSUB{QS}, 315 RSUB{AL}, 315 SATADD.H, 316 SATADD.W, 317 SATRNDS, 318 SATRNDU, 319 SATS, 320 SATSUB.H, 321 SATSUB.W, 322 SATU, 324 SBC, 325 SBR, 326 SCALL, 327 SCR, 328 SLEEP, 329 SR{EQ}, 330 SR{NE}, 330 SR{HS}, 330 SR{LO}, 330 SR{GE}, 330 SR{LT}, 330 SR{MI}, 330 SR{PL}, 330 SR{LS}, 330 SR{GT}, 330 SR{LE}, 330 SR{HI}, 330 SR{VS}, 330 SR{VC}, 330 SR{QS}, 330 SR{AL}, 330 SSCALL, 331 SSRF, 332 ST.B, 333 ST.B{EQ}, 335 ST.B{NE}, 335 ST.B{HS}, 335 ST.B{LO}, 335 ST.B{GE}, 335 ST.B{LT}, 335 ST.B{MI}, 335 ST.B{PL}, 335 ST.B{LS}, 335 ST.B{GT}, 335 ST.B{LE}, 335 ST.B{HI}, 335 ST.B{VS}, 335 ST.B{VC}, 335 ST.B{QS}, 335 ST.B{AL}, 335 ST.D, 336 ST.H, 338 ST.H{EQ}, 340 ST.H{NE}, 340 ST.H{HS}, 340 ST.H{LO}, 340 ST.H{GE}, 340 ST.H{LT}, 340 ST.H{MI}, 340 ST.H{PL}, 340 ST.H{LS}, 340 ST.H{GT}, 340 ST.H{LE}, 340 ST.H{HI}, 340 ST.H{VS}, 340 ST.H{VC}, 340 ST.H{QS}, 340 ST.H{AL}, 340 ST.W, 341 ST.W{EQ}, 343 ST.W{NE}, 343 ST.W{HS}, 343 ST.W{LO}, 343 ST.W{GE}, 343 ST.W{LT}, 343 ST.W{MI}, 343 ST.W{PL}, 343 ST.W{LS}, 343 ST.W{GT}, 343 ST.W{LE}, 343 ST.W{HI}, 343 ST.W{VS}, 343 ST.W{VC}, 343 ST.W{QS}, 343 ST.W{AL}, 343 STC.D, 344 STC.W, 344 STC0.D, 346 STC0.W, 346 STCM.D, 347 STCM.W, 347 STCOND, 349 STDSP, 350 STHH.W, 351 STM, 353 STMTS, 354 STSWP.H, 355 STSWP.W, 355 SUB, 356 SUB{EQ}, 358 SUB{NE}, 358 SUB{HS}, 358 SUB{LO}, 358 SUB{GE}, 358 SUB{LT}, 358 SUB{MI}, 358 SUB{PL}, 358 SUB{LS}, 358 SUB{GT}, 358 SUB{LE}, 358 SUB{HI}, 358 SUB{VS}, 358 SUB{VC}, 358 SUB{QS}, 358 SUB{AL}, 358 SUBHH.W, 360 SWAP.B, 361 SWAP.BH, 362 SWAP.H, 363 SYNC, 364 TLBR, 365 TLBS, 366 TLBW, 368 TNBZ, 369 TST, 370 XCHG, 371 ================================================ FILE: pypcode/processors/Atmel/data/manuals/AVR8.idx ================================================ @atmel-0856-avr-instruction-set-manual.pdf [Atmel AVR Instruction Set Manual, 11/2016 (Rev. 0856L)] ADC, 30 ADD, 32 ADIW, 33 AND, 35 ANDI, 36 ASR, 37 BCLR, 38 BLD, 39 BRBC, 40 BRBS, 41 BRCC, 42 BRCS, 43 BREAK, 44 BREQ, 45 BRGE, 46 BRHC, 47 BRHS, 48 BRID, 49 BRIE, 50 BRLO, 51 BRLT, 52 BRMI, 53 BRNE, 54 BRPL, 55 BRSH, 56 BRTC, 57 BRTS, 58 BRVC, 59 BRVS, 60 BSET, 61 BST, 62 CALL, 63 CBI, 65 CBR, 66 CLC, 67 CLH, 68 CLI, 69 CLN, 70 CLR, 71 CLS, 72 CLT, 73 CLV, 74 CLZ, 75 COM, 76 CP, 77 CPC, 79 CPI, 81 CPSE, 83 DEC, 84 DES, 86 EICALL, 87 EIJMP, 88 ELPM, 89 EOR, 91 FMUL, 92 FMULS, 94 FMULSU, 96 # Ghidra currently uses non-standard FRACMUL* mnemonics in place of FMUL* FRACMUL, 92 FRACMULS, 94 FRACMULSU, 96 ICALL, 98 IJMP, 99 IN, 100 INC, 101 JMP, 103 LAC, 104 LAS, 105 LAT, 106 LD, 107 LDD, 109 LDI, 115 LDS, 116 LPM, 118 LSL, 120 LSR, 122 MOV, 123 MOVW, 124 MUL, 125 MULS, 126 MULSU, 127 NEG, 129 NOP, 131 OR, 132 ORI, 133 OUT, 134 POP, 135 PUSH, 136 RCALL, 137 RET, 139 RETI, 140 RJMP, 142 ROL, 143 ROR, 145 SBC, 147 SBCI, 149 SBI, 151 SBIC, 152 SBIS, 153 SBIW, 154 SBR, 156 SBRC, 157 SBRS, 158 SEC, 159 SEH, 160 SEI, 161 SEN, 162 SER, 163 SES, 164 SET, 165 SEV, 166 SEZ, 167 SLEEP, 168 SPM, 169 ST, 173 STD, 175 STS, 179 SUB, 181 SUBI, 183 SWAP, 185 TST, 186 WDR, 187 XCH, 188 ================================================ FILE: pypcode/processors/Atmel/data/patterns/AVR8_patterns.xml ================================================ 0x08 0x95 0x18 0x95 ....1111 1001001. ....1111 1001001. 0x1. 0xe. 0xa. 0xe. 0xb. 0xe. 0xe. 0xe. 0xf. 0xe. 0x0. 0xe. 0x0b 0xbf 0x02 0xc0 0x07 0x90 0x0d 0x92 0xa. 0x3. 0xb1 0x07 0xd9 0xf7 ================================================ FILE: pypcode/processors/Atmel/data/patterns/patternconstraints.xml ================================================ AVR8_patterns.xml ================================================ FILE: pypcode/processors/BPF/data/languages/BPF.cspec ================================================ ================================================ FILE: pypcode/processors/BPF/data/languages/BPF.ldefs ================================================ BPF processor 32-bit little-endian ================================================ FILE: pypcode/processors/BPF/data/languages/BPF.pspec ================================================ ================================================ FILE: pypcode/processors/BPF/data/languages/BPF.sinc ================================================ ############################################################################### # BPF Processor Specification for Ghidra ############################################################################### define space ram type=ram_space size=4 default; define space packet type=ram_space size=4; define space mem type=ram_space size=4; define space register type=register_space size=4; define register offset=0 size=4 [ A X RS R PC ]; define register offset=0 size=2 [ AH _ XH _ RSH _ RH _ PCH _ ]; define register offset=0 size=1 [ AB _ _ _ XB _ _ _ RSB _ _ _ RB _ _ _ PCB _ _ _ ]; # Instruction encoding: Insop:8, dst_reg:4, src_reg:4, off:16, imm:32 - from lsb to msb define token instr(64) imm=(32, 63) jf=(24, 31) signed jt=(16, 23) signed op_src_K_X=(3, 3) op_alu_jmp_opcode=(4, 7) op_alu_jmp_source=(3, 3) op_alu_mode=(4, 7) op_ld_st_mode=(5, 7) op_ld_st_size=(3, 4) op_insn_class=(0, 2) ; :LD imm is imm & op_ld_st_mode=0x0 & op_ld_st_size=0x0 & op_insn_class=0x0 { A=imm; } :LDH imm is imm & op_ld_st_mode=0x0 & op_ld_st_size=0x1 & op_insn_class=0x0 { AH=imm:2; A = A & 0xffff; } :LDB imm is imm & op_ld_st_mode=0x0 & op_ld_st_size=0x2 & op_insn_class=0x0 { AB=imm:1; A = A & 0xff;} :LDX imm is imm & op_ld_st_mode=0x0 & op_ld_st_size=0x0 & op_insn_class=0x1 { X=imm; } :LDXH imm is imm & op_ld_st_mode=0x0 & op_ld_st_size=0x1 & op_insn_class=0x1 { XH=imm:2; X = X & 0xffff; } :LDXB imm is imm & op_ld_st_mode=0x0 & op_ld_st_size=0x2 & op_insn_class=0x1 { XB=imm:1; X = X & 0xff;} :LD imm is imm & op_ld_st_mode=0x1 & op_ld_st_size=0x0 & op_insn_class=0x0 { A=*[packet]:4 imm:4; } :LDH imm is imm & op_ld_st_mode=0x1 & op_ld_st_size=0x1 & op_insn_class=0x0 { A=*[packet]:2 imm:4; A = A & 0xffff; } :LDB imm is imm & op_ld_st_mode=0x1 & op_ld_st_size=0x2 & op_insn_class=0x0 { A=*[packet]:1 imm:4; A = A & 0xff;} :LDX imm is imm & op_ld_st_mode=0x1 & op_ld_st_size=0x0 & op_insn_class=0x1 { X=*[packet]:4 imm:4; } :LDXH imm is imm & op_ld_st_mode=0x1 & op_ld_st_size=0x1 & op_insn_class=0x1 { X=*[packet]:2 imm:4; X = X & 0xffff; } :LDXB imm is imm & op_ld_st_mode=0x1 & op_ld_st_size=0x2 & op_insn_class=0x1 { X=*[packet]:1 imm:4; X = X & 0xff;} :ST imm is imm & op_insn_class=0x2 { *[mem]:4 imm:4=A:4; } :STX imm is imm & op_insn_class=0x3 { *[mem]:4 imm:4=X:4; } :LDI imm is imm & op_ld_st_mode=0x2 & op_ld_st_size=0x0 & op_insn_class=0x0 { A=*[packet]:4 (imm:4 + X); } :LDIH imm is imm & op_ld_st_mode=0x2 & op_ld_st_size=0x1 & op_insn_class=0x0 { A=*[packet]:2 (imm:4 + X); A = A & 0xffff; } :LDIB imm is imm & op_ld_st_mode=0x2 & op_ld_st_size=0x2 & op_insn_class=0x0 { A=*[packet]:1 (imm:4 + X); A = A & 0xff; } :LD imm is imm & op_ld_st_mode=0x3 & op_ld_st_size=0x0 & op_insn_class=0x0 { A=*[mem]:4 imm:4; } :LDH imm is imm & op_ld_st_mode=0x3 & op_ld_st_size=0x1 & op_insn_class=0x0 { A=*[mem]:2 imm:4; A = A & 0xffff; } :LDB imm is imm & op_ld_st_mode=0x3 & op_ld_st_size=0x2 & op_insn_class=0x0 { A=*[mem]:1 imm:4; A = A & 0xff; } :LDX imm is imm & op_ld_st_mode=0x3 & op_ld_st_size=0x0 & op_insn_class=0x1 { X=*[mem]:4 imm:4; } :LDXH imm is imm & op_ld_st_mode=0x3 & op_ld_st_size=0x1 & op_insn_class=0x1 { X=*[mem]:2 imm:4; X = X & 0xffff; } :LDXB imm is imm & op_ld_st_mode=0x3 & op_ld_st_size=0x2 & op_insn_class=0x1 { X=*[mem]:1 imm:4; X = X & 0xff; } # ALU :ADD imm is imm & op_alu_mode=0x0 & op_insn_class=0x4 & op_src_K_X = 0x0 { A= A + imm; } :ADD X is X & op_alu_mode=0x0 & op_insn_class=0x4 & op_src_K_X = 0x1 { A= A + X; } :SUB imm is imm & op_alu_mode=0x1 & op_insn_class=0x4 & op_src_K_X = 0x0 { A= A - imm; } :SUB X is X & op_alu_mode=0x1 & op_insn_class=0x4 & op_src_K_X = 0x1 { A= A - X; } :MUL imm is imm & op_alu_mode=0x2 & op_insn_class=0x4 & op_src_K_X = 0x0 { A= A * imm; } :MUL X is X & op_alu_mode=0x2 & op_insn_class=0x4 & op_src_K_X = 0x1 { A= A * X; } :DIV imm is imm & op_alu_mode=0x3 & op_insn_class=0x4 & op_src_K_X = 0x0 { A= A / imm; } :DIV X is X & op_alu_mode=0x3 & op_insn_class=0x4 & op_src_K_X = 0x1 { A= A / X; } :OR imm is imm & op_alu_mode=0x4 & op_insn_class=0x4 & op_src_K_X = 0x0 { A= A | imm; } :OR X is X & op_alu_mode=0x4 & op_insn_class=0x4 & op_src_K_X = 0x1 { A= A | X; } :AND imm is imm & op_alu_mode=0x5 & op_insn_class=0x4 & op_src_K_X = 0x0 { A= A & imm; } :AND X is X & op_alu_mode=0x5 & op_insn_class=0x4 & op_src_K_X = 0x1 { A= A & X; } :LSH imm is imm & op_alu_mode=0x6 & op_insn_class=0x4 & op_src_K_X = 0x0 { A= A << imm; } :LSH X is X & op_alu_mode=0x6 & op_insn_class=0x4 & op_src_K_X = 0x1 { A= A << X; } :RSH imm is imm & op_alu_mode=0x7 & op_insn_class=0x4 & op_src_K_X = 0x0 { A= A >> imm; } :RSH X is X & op_alu_mode=0x7 & op_insn_class=0x4 & op_src_K_X = 0x1 { A= A >> X; } :NEG is op_alu_mode=0x8 & op_insn_class=0x4 & op_src_K_X = 0x0 { A= -A; } :MOD imm is imm & op_alu_mode=0x9 & op_insn_class=0x4 & op_src_K_X = 0x0 { A= A % imm; } :MOD X is X & op_alu_mode=0x9 & op_insn_class=0x4 & op_src_K_X = 0x1 { A= A % X; } :XOR imm is imm & op_alu_mode=0xa & op_insn_class=0x4 & op_src_K_X = 0x0 { A= A ^ imm; } :XOR X is X & op_alu_mode=0xa & op_insn_class=0x4 & op_src_K_X = 0x1 { A= A ^ X; } :TAX is op_insn_class=0x7 & op_src_K_X = 0x0 { A= X; } :TXA is op_insn_class=0x7 & op_src_K_X = 0x1 { X= A; } :LD_MSH imm is imm & op_ld_st_mode=0x5 & op_ld_st_size=0x0 & op_insn_class=0x0 { local t_val = *[packet]:4 imm:4; t_val = t_val&0xf; t_val = t_val << 2; A = t_val; } :LDH_MSH imm is imm & op_ld_st_mode=0x5 & op_ld_st_size=0x1 & op_insn_class=0x0 { local t_val = *[packet]:2 imm:4; t_val = t_val&0xf; t_val = t_val << 2; AH = t_val; } :LDB_MSH imm is imm & op_ld_st_mode=0x5 & op_ld_st_size=0x2 & op_insn_class=0x0 { local t_val = *[packet]:1 imm:4; t_val = t_val&0xf; t_val = t_val << 2; AB = t_val; } :LDX_MSH imm is imm & op_ld_st_mode=0x5 & op_ld_st_size=0x0 & op_insn_class=0x1 { local t_val = *[packet]:4 imm:4; t_val = t_val&0xf; t_val = t_val << 2; X = t_val; } :LDXH_MSH imm is imm & op_ld_st_mode=0x5 & op_ld_st_size=0x1 & op_insn_class=0x1 { local t_val = *[packet]:2 imm:4; t_val = t_val&0xf; t_val = t_val << 2; XH = t_val; X = X & 0xffff; } :LDXB_MSH imm is imm & op_ld_st_mode=0x5 & op_ld_st_size=0x2 & op_insn_class=0x1 { local t_val = *[packet]:1 imm:4; t_val = t_val&0xf; t_val = t_val << 2; XB = t_val; X = X & 0xff; } #Branch instructions ############################################################################### joff: reloc is imm [ reloc = inst_next + imm * 8; ] { export *:8 reloc; } jtoff: reloc is jt [ reloc = inst_next + jt * 8; ] { export *:8 reloc; } jfoff: reloc is jf [ reloc = inst_next + jf * 8; ] { export *:8 reloc; } :JA joff is joff & op_alu_jmp_opcode=0x0 & op_alu_jmp_source=0 & op_insn_class=0x5 { goto joff; } :JEQ jtoff, jfoff, imm is imm & jtoff & jfoff & op_alu_jmp_opcode=0x1 & op_alu_jmp_source=0 & op_insn_class=0x5 { if (A==imm) goto jtoff; goto jfoff; } :JEQ jtoff, jfoff, X is X & jtoff & jfoff & op_alu_jmp_opcode=0x1 & op_alu_jmp_source=1 & op_insn_class=0x5 { if (A==X) goto jtoff; goto jfoff; } :JGT jtoff, jfoff, imm is imm & jtoff & jfoff & op_alu_jmp_opcode=0x2 & op_alu_jmp_source=0 & op_insn_class=0x5 { if (A > imm) goto jtoff; goto jfoff; } :JGT jtoff, jfoff, X is X & jtoff & jfoff & op_alu_jmp_opcode=0x2 & op_alu_jmp_source=1 & op_insn_class=0x5 { if (A > X) goto jtoff; goto jfoff; } :JGE jtoff, jfoff, imm is imm & jtoff & jfoff & op_alu_jmp_opcode=0x3 & op_alu_jmp_source=0 & op_insn_class=0x5 { if (A >= imm) goto jtoff; goto jfoff; } :JGE jtoff, jfoff, X is X & jtoff & jfoff & op_alu_jmp_opcode=0x3 & op_alu_jmp_source=1 & op_insn_class=0x5 { if (A >= X) goto jtoff; goto jfoff; } :JSET jtoff, jfoff, imm is imm & jtoff & jfoff & op_alu_jmp_opcode=0x4 & op_alu_jmp_source=0 & op_insn_class=0x5 { if ((A&imm) != 0) goto jtoff; goto jfoff; } :JSET jtoff, jfoff, X is X & jtoff & jfoff & op_alu_jmp_opcode=0x4 & op_alu_jmp_source=1 & op_insn_class=0x5 { if ((A&X) != 0) goto jtoff; goto jfoff; } :RETW imm is imm & op_ld_st_size=0 & op_insn_class=0x6 { R = imm; return [*:8 RS]; } ================================================ FILE: pypcode/processors/BPF/data/languages/BPF_le.slaspec ================================================ define endian=little; @include "BPF.sinc" ================================================ FILE: pypcode/processors/CP1600/data/languages/CP1600.cspec ================================================ ================================================ FILE: pypcode/processors/CP1600/data/languages/CP1600.ldefs ================================================ General Instruments CP1600 ================================================ FILE: pypcode/processors/CP1600/data/languages/CP1600.opinion ================================================ ================================================ FILE: pypcode/processors/CP1600/data/languages/CP1600.pspec ================================================ ================================================ FILE: pypcode/processors/CP1600/data/languages/CP1600.slaspec ================================================ define endian=big; define alignment=2; define space ram type=ram_space wordsize=2 size=2 default; define space register type=register_space size=2; define register offset=0x00 size=2 [ R0 R1 R2 R3 R4 R5 R6 R7 ]; define register offset=0x10 size=1 [ I C O Z S ]; define register offset=0x20 size=4 [ contextreg ]; define token opcode_word (16) target3_5 = (3, 5) reg3_5 = (3, 5) target0_2 = (0, 2) reg0_2 = (0, 2) reg0_1 = (0, 1) operation_size = (2, 2) branch_sign = (5, 5) branch_external = (4, 4) branch_condition = (0, 3) external_condition = (0, 3) opcode6_9 = (6, 9) opcode3_9 = (3, 9) opcode2_9 = (2, 9) opcode1_9 = (1, 9) opcode0_9 = (0, 9) ; define token jump_token (32) target24_25 = (24, 25) reg24_25 = (24, 25) address_hi = (18, 23) jump_type = (16, 17) address_lo = (0, 9) ; define token double16 (32) value_lo = (16, 23) value_hi = (0, 7) ; define token immediate16 (16) imm16 = (0, 15) addr16 = (0, 15) ; define context contextreg doublebyte = (0, 0) noflow ; attach variables [ reg0_1 ] [ R0 R1 R2 R3 ]; attach variables [ reg0_2 reg3_5 ] [ R0 R1 R2 R3 R4 R5 R6 R7 ]; attach variables [ reg24_25 ] [ R4 R5 R6 R7 ]; ################################################################ jmpdest16: reloc is address_hi & address_lo [ reloc = (address_hi << 10) + address_lo; ] { export *:2 reloc; } branchdest16: reloc is branch_sign=0 ; imm16 [ reloc = inst_start + 2 + imm16; ] { export *:2 reloc; } branchdest16: reloc is branch_sign=1 ; imm16 [ reloc = inst_start + 2 + (imm16 ^ 0xFFFF); ] { export *:2 reloc; } splitimm16: split is value_hi & value_lo [ split = (value_hi << 8) + value_lo; ] { local tmp:2 = split & 0xFFFF; export tmp; } impliedval16: reg3_5 is reg3_5 & (target3_5=0 | target3_5=1 | target3_5=2 | target3_5=3 | target3_5=7) & doublebyte=0 { local tmp:2 = *:2 reg3_5; export tmp; } impliedval16: reg3_5 is reg3_5 & (target3_5=4 | target3_5=5) & doublebyte=0 { local tmp:2 = *:2 reg3_5; reg3_5 = reg3_5 + 1; export tmp; } impliedval16: reg3_5 is reg3_5 & target3_5=6 & doublebyte=0 { reg3_5 = reg3_5 - 1; local tmp:2 = *:2 reg3_5; export tmp; } impliedval16: reg3_5 is reg3_5 & (target3_5=4 | target3_5=5) & doublebyte=1 { local val:4 = *:4 reg3_5; local low:1 = val(2); local high:1 = val(0); local tmp:2 = (zext(high) << 8) | zext(low); reg3_5 = reg3_5 + 2; export tmp; } impliedval16: reg3_5 is reg3_5 & target3_5=6 & doublebyte=1 { reg3_5 = reg3_5 - 2; local val:4 = *:4 reg3_5; local low:1 = val(2); local high:1 = val(0); local tmp:2 = (zext(high) << 8) | zext(low); export tmp; } impliedval16: reg3_5 is reg3_5 & (target3_5=0 | target3_5=1 | target3_5=2 | target3_5=3 | target3_5=7) & doublebyte=1 { local val:2 = *:1 reg3_5; val = (zext(val) << 8) | zext(val); export val; } checkbranch: is reg0_2=7 { goto [R7]; } checkbranch: is reg0_2 {} regval0_2: is reg0_2=7 { local tmp:2 = inst_next / 2; export tmp; } regval0_2: is reg0_2 { export reg0_2; } cc: "" is branch_external=0 & branch_condition=0 { local tmp:1 = 1; export tmp; } cc: "C" is branch_external=0 & branch_condition=1 { export C; } cc: "OV" is branch_external=0 & branch_condition=2 { export O; } cc: "PL" is branch_external=0 & branch_condition=3 { local tmp = !S; export tmp; } cc: "EQ" is branch_external=0 & branch_condition=4 { export Z; } cc: "LT" is branch_external=0 & branch_condition=5 { local tmp = S != O; export tmp; } cc: "LE" is branch_external=0 & branch_condition=6 { local tmp = (Z == 1) || (S != O); export tmp; } cc: "USC" is branch_external=0 & branch_condition=7 { local tmp = S != C; export tmp; } cc: "NC" is branch_external=0 & branch_condition=9 { local tmp = !C; export tmp; } cc: "NOV" is branch_external=0 & branch_condition=10 { local tmp = !O; export tmp; } cc: "MI" is branch_external=0 & branch_condition=11 { export S; } cc: "NEQ" is branch_external=0 & branch_condition=12 { local tmp = !Z; export tmp; } cc: "GE" is branch_external=0 & branch_condition=13 { local tmp = S == O; export tmp; } cc: "GT" is branch_external=0 & branch_condition=14 { local tmp = (Z == 0) || (S == O); export tmp; } cc: "ESC" is branch_external=0 & branch_condition=15 { local tmp = S == C; export tmp; } ################################################################ macro resultFlags(value) { Z = value == 0; S = value s< 0; } macro addition(first_w, first_r, second) { local tmpC = carry(first_r, second); local tmpO = scarry(first_r, second); first_w = first_r + second; C = tmpC; O = tmpO; resultFlags(first_w); } macro comparison(first, second) { local __val__ = first - second; O = sborrow(first, second); C = first < second; resultFlags(__val__); } macro subtraction(first_w, first_r, second) { local __val__ = first_r - second; O = sborrow(first_r, second); C = first_r < second; resultFlags(__val__); first_w = __val__; } ################################################################ define pcodeop TerminateCurrentInterrupt; define pcodeop SoftwareInterrupt; ################################################################ :ADD addr16, reg0_2 is opcode3_9=0x0058 & reg0_2 & regval0_2 & checkbranch ; addr16 { local ptr:2 = addr16; addition(reg0_2, regval0_2, *:2 ptr); build checkbranch; } :ADD@ impliedval16, reg0_2 is opcode6_9=0x000B & reg0_2 & regval0_2 & checkbranch & impliedval16 { addition(reg0_2, regval0_2, impliedval16); build checkbranch; } :ADCR reg0_2 is opcode3_9=0x0005 & reg0_2 & regval0_2 & checkbranch { local oldC = zext(C); addition(reg0_2, regval0_2, oldC); build checkbranch; } :ADDR reg3_5, reg0_2 is opcode6_9=0x0003 & reg3_5 & reg0_2 & regval0_2 & checkbranch { addition(reg0_2, regval0_2, reg3_5); build checkbranch; } :AND addr16, reg0_2 is opcode3_9=0x0070 & reg0_2 & regval0_2 & checkbranch ; addr16 { local ptr:2 = addr16; reg0_2 = regval0_2 & *:2 ptr; resultFlags(reg0_2); build checkbranch; } :AND@ impliedval16, reg0_2 is opcode6_9=0x000E & reg0_2 & regval0_2 & checkbranch & impliedval16 { reg0_2 = regval0_2 & impliedval16; resultFlags(reg0_2); build checkbranch; } :ANDR reg3_5, reg0_2 is opcode6_9=0x0006 & reg3_5 & reg0_2 & regval0_2 & checkbranch { reg0_2 = regval0_2 & reg3_5; resultFlags(reg0_2); build checkbranch; } :B^cc branchdest16 is (opcode6_9=0x0008 & cc) ... & branchdest16 { if (cc) goto branchdest16; } :BEXT branchdest16, external_condition is (opcode6_9=0x0008 & branch_external=1 & external_condition) ... & branchdest16 { goto branchdest16; } :CLRC is opcode0_9=0x0006 { C = 0; } :CLRR reg0_2 is opcode6_9=0x0007 & reg0_2 & (target0_2=target3_5) & checkbranch { reg0_2 = 0; resultFlags(reg0_2); build checkbranch; } :CMP addr16, reg0_2 is opcode3_9=0x0068 & reg0_2 ; addr16 { local ptr:2 = addr16; comparison(reg0_2, *:2 ptr); } :CMP@ impliedval16, reg0_2 is opcode6_9=0x000D & reg0_2 & impliedval16 { comparison(reg0_2, impliedval16); } :CMPR reg3_5, reg0_2 is opcode6_9=0x0005 & reg3_5 & reg0_2 { comparison(reg0_2, reg3_5); } :COMR reg0_2 is opcode3_9=0x0003 & reg0_2 & regval0_2 & checkbranch { reg0_2 = ~regval0_2; resultFlags(reg0_2); build checkbranch; } :DECR reg0_2 is opcode3_9=0x0002 & reg0_2 & regval0_2 & checkbranch { reg0_2 = regval0_2 - 1; resultFlags(reg0_2); build checkbranch; } :DIS is opcode0_9=0x0003 { I = 0; } :EIS is opcode0_9=0x0002 { I = 1; } :GSWD reg0_1 is opcode2_9=0x000C & reg0_1 { local mask:2 = (zext(S) << 7) + (zext(Z) << 6) + (zext(O) << 5) + (zext(C) << 4); reg0_1 = (mask << 8) + mask; } :HLT is opcode0_9=0x0000 { goto inst_start; } :INCR reg0_2 is opcode3_9=0x0001 & reg0_2 & regval0_2 & checkbranch { reg0_2 = regval0_2 + 1; resultFlags(reg0_2); build checkbranch; } :J jmpdest16 is opcode0_9=0x0004 ; jump_type=0 & target24_25=3 & jmpdest16 { goto jmpdest16; } :JD jmpdest16 is opcode0_9=0x0004 ; jump_type=2 & target24_25=3 & jmpdest16 { I = 0; goto jmpdest16; } :JE jmpdest16 is opcode0_9=0x0004 ; jump_type=1 & target24_25=3 & jmpdest16 { I = 1; goto jmpdest16; } :JR reg3_5 is opcode6_9=0x0002 & reg3_5 & reg0_2 & reg0_2=7 { reg0_2 = reg3_5; resultFlags(reg0_2); return [reg0_2]; } :JSR reg24_25, jmpdest16 is opcode0_9=0x0004 ; jump_type=0 & reg24_25 & jmpdest16 { reg24_25 = inst_next; call jmpdest16; } :JSRD reg24_25, jmpdest16 is opcode0_9=0x0004 ; jump_type=2 & reg24_25 & jmpdest16 { I = 0; reg24_25 = inst_next; call jmpdest16; } :JSRE reg24_25, jmpdest16 is opcode0_9=0x0004 ; jump_type=1 & reg24_25 & jmpdest16 { I = 1; reg24_25 = inst_next; call jmpdest16; } :MOVR reg3_5, reg0_2 is opcode6_9=0x0002 & reg0_2 & reg3_5 & checkbranch { reg0_2 = reg3_5; resultFlags(reg0_2); build checkbranch; } :MVI addr16, reg0_2 is opcode3_9=0x0050 & reg0_2 & checkbranch ; addr16 { local ptr:2 = addr16; reg0_2 = *(*:2 ptr); build checkbranch; } :MVI@ impliedval16, reg0_2 is opcode6_9=0x000A & reg0_2 & impliedval16 & checkbranch { reg0_2 = impliedval16; build checkbranch; } :MVO reg0_2, addr16 is opcode3_9=0x0048 & reg0_2 ; addr16 { local ptr:2 = addr16; *ptr = reg0_2; } :MVO@ reg0_2, reg3_5 is opcode6_9=0x0009 & reg0_2 & reg3_5 & (reg3_5=4 | reg3_5=5) & checkbranch { local ptr:2 = reg3_5; *ptr = reg0_2; reg3_5 = reg3_5 + 1; build checkbranch; } :MVO@ reg0_2, reg3_5 is opcode6_9=0x0009 & reg0_2 & reg3_5 & checkbranch { local ptr:2 = reg3_5; *ptr = reg0_2; build checkbranch; } :MVOI reg0_2 is opcode3_9=0x004F & reg0_2 ; imm16 { local tmp:2 = inst_start + 2; *tmp = reg0_2; } :NEGR reg0_2 is opcode3_9=0x0004 & reg0_2 & regval0_2 & checkbranch { local tmp = regval0_2 ^ 0xFFFF; local tmpC = carry(tmp, 1); local tmpO = scarry(tmp, 1); reg0_2 = -regval0_2; C = tmpC; O = tmpO; resultFlags(reg0_2); build checkbranch; } :NOP is opcode1_9=0x001A { } :NOPP is opcode6_9=0x0008 & branch_external=0 & branch_condition=8 ; imm16 { } :PSHR reg0_2 is opcode6_9=0x0009 & reg0_2 & reg3_5 & reg3_5=6 { local ptr:2 = reg3_5; *ptr = reg0_2; reg3_5 = reg3_5 + 1; } :PULR reg0_2 is opcode6_9=0x000A & impliedval16 & reg0_2 & reg3_5=6 { reg0_2 = impliedval16; } :RSWD reg0_2 is opcode3_9=0x0007 & reg0_2 { C = (reg0_2 & 0b00001000) != 0; O = (reg0_2 & 0b00010000) != 0; Z = (reg0_2 & 0b00100000) != 0; S = (reg0_2 & 0b01000000) != 0; } :SDBD is opcode0_9=0x0001 [ doublebyte=1; globalset(inst_next, doublebyte); ] { } :SETC is opcode0_9=0x0007 { C = 1; } :SIN is opcode1_9=0x001B { SoftwareInterrupt(); } :SUB addr16, reg0_2 is opcode3_9=0x0060 & reg0_2 & regval0_2 & checkbranch ; addr16 { local ptr:2 = addr16; subtraction(reg0_2, regval0_2, *:2 ptr); build checkbranch; } :SUB@ impliedval16, reg0_2 is opcode6_9=0x000C & reg0_2 & regval0_2 & checkbranch & impliedval16 { subtraction(reg0_2, regval0_2, impliedval16); build checkbranch; } :SUBR reg3_5, reg0_2 is opcode6_9=0x0004 & reg3_5 & reg0_2 & regval0_2 & checkbranch { subtraction(reg0_2, regval0_2, reg3_5); build checkbranch; } :TCI is opcode0_9=0x0005 { TerminateCurrentInterrupt(); } :TSTR reg0_2 is opcode6_9=0x0002 & reg0_2 & (target0_2=target3_5) { resultFlags(reg0_2); } :XOR addr16, reg0_2 is opcode3_9=0x0078 & reg0_2 & regval0_2 & checkbranch ; addr16 { local ptr:2 = addr16; reg0_2 = regval0_2 ^ *:2 ptr; resultFlags(reg0_2); build checkbranch; } :XOR@ impliedval16, reg0_2 is opcode6_9=0x000F & reg0_2 & regval0_2 & checkbranch & impliedval16 { reg0_2 = regval0_2 ^ impliedval16; resultFlags(reg0_2); build checkbranch; } :XORR reg3_5, reg0_2 is opcode6_9=0x0007 & reg3_5 & reg0_2 & regval0_2 & checkbranch { reg0_2 = regval0_2 ^ reg3_5; resultFlags(reg0_2); build checkbranch; } :RLC reg0_2, 1 is opcode3_9=0x000A & reg0_2 & regval0_2 & checkbranch { local tmpC = (regval0_2 & 0x8000) != 0; local tmpS = (regval0_2 & 0x4000) != 0; reg0_2 = (regval0_2 << 1) + zext(C); C = tmpC; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :RRC reg0_2, 1 is opcode3_9=0x000E & reg0_2 & regval0_2 & checkbranch { local tmpC = (regval0_2 & 0x0001) != 0; local tmpS = (regval0_2 & 0x0100) != 0; reg0_2 = (regval0_2 >> 1) | (zext(C) << 15); C = tmpC; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :SAR reg0_2, 1 is opcode3_9=0x000D & reg0_2 & regval0_2 & checkbranch { local tmpS = (regval0_2 & 0x0100) != 0; reg0_2 = regval0_2 s>> 1; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :SARC reg0_2, 1 is opcode3_9=0x000F & reg0_2 & regval0_2 & checkbranch { local tmpC = (regval0_2 & 0x0001) != 0; local tmpS = (regval0_2 & 0x0100) != 0; reg0_2 = regval0_2 s>> 1; C = tmpC; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :SLL reg0_2, 1 is opcode3_9=0x0009 & reg0_2 & regval0_2 & checkbranch { local tmpS = (regval0_2 & 0x4000) != 0; reg0_2 = regval0_2 << 1; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :SLLC reg0_2, 1 is opcode3_9=0x000B & reg0_2 & regval0_2 & checkbranch { local tmpC = (regval0_2 & 0x8000) != 0; local tmpS = (regval0_2 & 0x4000) != 0; reg0_2 = regval0_2 << 1; C = tmpC; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :SLR reg0_2, 1 is opcode3_9=0x000C & reg0_2 & regval0_2 & checkbranch { local tmpS = (regval0_2 & 0x0100) != 0; reg0_2 = regval0_2 >> 1; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :SWAP reg0_2, 1 is opcode3_9=0x0008 & reg0_2 & regval0_2 & checkbranch { local tmpS = (regval0_2 & 0x8000) != 0; local tmp = (regval0_2 << 8) & 0xFF00; reg0_2 = tmp | ((regval0_2 >> 8) & 0x00FF); S = tmpS; build checkbranch; } with : operation_size=1 { :RLC reg0_2, 2 is opcode3_9=0x000A & reg0_2 & regval0_2 & checkbranch { local tmpC = (regval0_2 & 0x8000) != 0; local tmpO = (regval0_2 & 0x4000) != 0; local tmpS = (regval0_2 & 0x2000) != 0; reg0_2 = (regval0_2 << 2) + (zext(C) << 1) + zext(O); C = tmpC; O = tmpO; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :RRC reg0_2, 2 is opcode3_9=0x000E & reg0_2 & regval0_2 & checkbranch { local tmpC = (regval0_2 & 0x0001) != 0; local tmpO = (regval0_2 & 0x0002) != 0; local tmpS = (regval0_2 & 0x0200) != 0; reg0_2 = (regval0_2 >> 2) | (zext(C) << 14) | (zext(O) << 15); C = tmpC; O = tmpO; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :SAR reg0_2, 2 is opcode3_9=0x000D & reg0_2 & regval0_2 & checkbranch { local tmpS = (regval0_2 & 0x0200) != 0; reg0_2 = regval0_2 s>> 2; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :SARC reg0_2, 2 is opcode3_9=0x000F & reg0_2 & regval0_2 & checkbranch { local tmpC = (regval0_2 & 0x0001) != 0; local tmpO = (regval0_2 & 0x0002) != 0; local tmpS = (regval0_2 & 0x0200) != 0; reg0_2 = regval0_2 s>> 2; C = tmpC; O = tmpO; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :SLL reg0_2, 2 is opcode3_9=0x0009 & reg0_2 & regval0_2 & checkbranch { local tmpS = (regval0_2 & 0x2000) != 0; reg0_2 = regval0_2 << 2; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :SLLC reg0_2, 2 is opcode3_9=0x000B & reg0_2 & regval0_2 & checkbranch { local tmpC = (regval0_2 & 0x8000) != 0; local tmpO = (regval0_2 & 0x4000) != 0; local tmpS = (regval0_2 & 0x2000) != 0; reg0_2 = regval0_2 << 2; C = tmpC; O = tmpO; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :SLR reg0_2, 2 is opcode3_9=0x000C & reg0_2 & regval0_2 & checkbranch { local tmpS = (regval0_2 & 0x0200) != 0; reg0_2 = regval0_2 >> 2; S = tmpS; Z = reg0_2 == 0; build checkbranch; } :SWAP reg0_2, 2 is opcode3_9=0x0008 & reg0_2 & regval0_2 & checkbranch { local tmpS = (regval0_2 & 0x0080) != 0; reg0_2 = (regval0_2 << 8) | (regval0_2 & 0x00FF); S = tmpS; build checkbranch; } } :ADDI "#"imm16, reg0_2 is opcode3_9=0x005F & reg0_2 & regval0_2 & checkbranch ; imm16 { addition(reg0_2, regval0_2, imm16); build checkbranch; } :ANDI "#"imm16, reg0_2 is opcode3_9=0x0077 & reg0_2 & regval0_2 & checkbranch ; imm16 { reg0_2 = reg0_2 & imm16; resultFlags(reg0_2); build checkbranch; } :CMPI "#"imm16, reg0_2 is opcode3_9=0x006F & reg0_2 ; imm16 { comparison(reg0_2, imm16); } :MVII "#"imm16, reg0_2 is opcode3_9=0x0057 & reg0_2 & checkbranch ; imm16 { reg0_2 = imm16; build checkbranch; } :SUBI "#"imm16, reg0_2 is opcode3_9=0x0067 & reg0_2 & regval0_2 & checkbranch ; imm16 { subtraction(reg0_2, regval0_2, imm16); build checkbranch; } :XORI "#"imm16, reg0_2 is opcode3_9=0x007F & reg0_2 & regval0_2 & checkbranch ; imm16 { reg0_2 = regval0_2 ^ imm16; resultFlags(reg0_2); build checkbranch; } with : doublebyte=1 { :ADDI "#"splitimm16, reg0_2 is opcode3_9=0x005F & reg0_2 & regval0_2 & checkbranch ; splitimm16 { addition(reg0_2, regval0_2, splitimm16); build checkbranch; } :ANDI "#"splitimm16, reg0_2 is opcode3_9=0x0077 & reg0_2 & regval0_2 & checkbranch ; splitimm16 { reg0_2 = regval0_2 & splitimm16; resultFlags(reg0_2); build checkbranch; } :CMPI "#"splitimm16, reg0_2 is opcode3_9=0x006F & reg0_2 ; splitimm16 { comparison(reg0_2, splitimm16); } :MVII "#"splitimm16, reg0_2 is opcode3_9=0x0057 & reg0_2 & checkbranch ; splitimm16 { reg0_2 = splitimm16; build checkbranch; } :SUBI "#"splitimm16, reg0_2 is opcode3_9=0x0067 & reg0_2 & regval0_2 & checkbranch ; splitimm16 { subtraction(reg0_2, regval0_2, splitimm16); build checkbranch; } :XORI "#"splitimm16, reg0_2 is opcode3_9=0x007F & reg0_2 & regval0_2 & checkbranch ; splitimm16 { reg0_2 = regval0_2 ^ splitimm16; resultFlags(reg0_2); build checkbranch; } } ================================================ FILE: pypcode/processors/CR16/data/languages/CR16.cspec ================================================ ================================================ FILE: pypcode/processors/CR16/data/languages/CR16.ldefs ================================================ National Semiconductor's CompactRISC CR16C little endian ================================================ FILE: pypcode/processors/CR16/data/languages/CR16.opinion ================================================ ================================================ FILE: pypcode/processors/CR16/data/languages/CR16.pspec ================================================ ================================================ FILE: pypcode/processors/CR16/data/languages/CR16B.sinc ================================================ # CR16B # TODO: instructions not implemented # Basic ================================================================================ # define endian=big; # Defined in file that includes this file define alignment=2; define space ram type=ram_space size=3 default; define space register type=register_space size=2; # Registers ============================================================================ define register offset=0 size=2 [ R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12_L R12_H R13_L R13_H RA_L RA_H SP_L _ ]; # Fields ================================================================================= define token instr(16) b0=(0,0) # bit0 op1=(1,4) # operand1 op2=(5,8) # operand2 opcode1=(9,12) # opcode1 i=(13,13) # integer operation length bit: i=0=8bit, i=1=16bit opcode2=(14,15) # opcode2 op1_b02=(1,3) # bits 0,1,2 of op1 op1_b12=(2,3) # bits 1,2 of op1 op2_b23=(7,8) # bits 2,3 of op2 op2_b12=(6,7) # bits 1,2 of op2 opcode1_b23=(11,12) # bits 2,3 of opcode1 opcode1_b13=(10,12) # bits 1,2,3 of opcode1 ; # Context variables ==================================================== # Attach variables ===================================================== # attach normal registers #attach variables [ N_0 M_0 ] [ # r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 #]; # Constructors ======================================================================= # MOVES :MOV is opcode2=1 & i & opcode1=0xc & op2 & op1 & b0=1 { } :MOV is opcode2=0 & i & opcode1=0xc & op2 & op1 & b0 { } :MOVXB is opcode2=1 & i=1 & opcode1=0x4 & op2 & op1 & b0=0 { } :MOVZB is opcode2=1 & i=1 & opcode1=0x5 & op2 & op1 & b0=0 { } :MOVD is opcode2=1 & i=1 & opcode1_b13=1 & op2 & op1 & b0=0 { } # ARITHMETIC :ADD is opcode2=1 & i & opcode1=0 & op2 & op1 & b0=1 { } :ADD is opcode2=0 & i & opcode1=0 & op2 & op1 & b0 { } :ADDU is opcode2=1 & i & opcode1=1 & op2 & op1 & b0=1 { } :ADDU is opcode2=0 & i & opcode1=1 & op2 & op1 & b0 { } :ADDC is opcode2=1 & i & opcode1=9 & op2 & op1 & b0=1 { } :ADDC is opcode2=0 & i & opcode1=9 & op2 & op1 & b0 { } :MUL is opcode2=1 & i & opcode1=3 & op2 & op1 & b0=1 { } :MUL is opcode2=0 & i & opcode1=3 & op2 & op1 & b0 { } :MULSB is opcode2=1 & i=1 & opcode1=0 & op2 & op1 & b0=0 { } :MULSW is opcode2=1 & i=1 & opcode1=1 & op2 & op1 & b0=0 { } :MULUW is opcode2=1 & i=1 & opcode1=0xf & op2 & op1_b12=0 & b0=0 { } :SUB is opcode2=1 & i & opcode1=0xf & op2 & op1 & b0=1 { } :SUB is opcode2=0 & i & opcode1=0xf & op2 & op1 & b0 { } :SUBC is opcode2=1 & i & opcode1=0xd & op2 & op1 & b0=1 { } :SUBC is opcode2=0 & i & opcode1=0xd & op2 & op1 & b0 { } # INTEGER COMPARISON :CMP is opcode2=1 & i & opcode1=0x7 & op2 & op1 & b0=1 { } :CMP is opcode2=0 & i & opcode1=0x7 & op2 & op1 & b0 { } :BEQ0 is opcode2=0 & i & opcode1=0xa & op2_b12=0 & op1 & b0=1 { } :BEQ1 is opcode2=0 & i & opcode1=0xa & op2_b12=1 & op1 & b0=1 { } :BNE0 is opcode2=0 & i & opcode1=0xa & op2_b12=2 & op1 & b0=1 { } :BNE1 is opcode2=0 & i & opcode1=0xa & op2_b12=3 & op1 & b0=1 { } # LOGICAL / BOOLEAN :AND is opcode2=1 & i & opcode1=0x8 & op2 & op1 & b0=1 { } :AND is opcode2=0 & i & opcode1=0x8 & op2 & op1 & b0 { } :OR is opcode2=1 & i & opcode1=0xe & op2 & op1 & b0=1 { } :OR is opcode2=0 & i & opcode1=0xe & op2 & op1 & b0 { } :S is opcode2=1 & i & opcode1=0x7 & op2 & op1 & b0=0 { } :XOR is opcode2=1 & i & opcode1=0x6 & op2 & op1 & b0=1 { } :XOR is opcode2=0 & i & opcode1=0x6 & op2 & op1 & b0 { } # SHIFTS :ASHU is opcode2=1 & i & opcode1=0x4 & op2 & op1 & b0=1 { } :ASHU is opcode2=0 & i & opcode1=0x4 & op2 & op1 & b0 { } :LSH is opcode2=1 & i & opcode1=0x5 & op2 & op1 & b0=1 { } :LSH is opcode2=0 & i & opcode1=0x5 & op2 & op1 & b0 { } # BITS :TBIT is opcode2=1 & i=1 & opcode1=0xb & op2 & op1 & b0=1 { } :TBIT is opcode2=0 & i=1 & opcode1=0xb & op2 & op1 & b0 { } :TBIT is opcode2=1 & i & opcode1=2 & op2_b12=2 & op1 & b0=1 { } :TBIT is opcode2=0 & i & opcode1=2 & op2_b12=2 & op1 & b0=1 { } :TBIT is opcode2=0 & i & opcode1=2 & op2_b12=2 & op1 & b0=0 { } :CBIT is opcode2=1 & i & opcode1=2 & op2_b12=0 & op1 & b0=1 { } :CBIT is opcode2=0 & i & opcode1=2 & op2_b12=0 & op1 & b0=1 { } :CBIT is opcode2=0 & i & opcode1=2 & op2_b12=0 & op1 & b0=0 { } :SBIT is opcode2=1 & i & opcode1=2 & op2_b12=1 & op1 & b0=1 { } :SBIT is opcode2=0 & i & opcode1=2 & op2_b12=1 & op1 & b0=1 { } :SBIT is opcode2=0 & i & opcode1=2 & op2_b12=1 & op1 & b0=0 { } # PROCESSOR REGISTER MANIPULATION :LPR is opcode2=1 & i=1 & opcode1=8 & op2 & op1 & b0=0 { } :SPR is opcode2=1 & i=1 & opcode1=9 & op2 & op1 & b0=0 { } # JUMPS / LINKS :Bcond is opcode2=1 & i=0 & opcode1 & op2 & op1 & b0=0 { } :Bcond is opcode2=0 & i=0 & opcode1=0xa & op2 & op1_b02=7 & b0=0 { } :Bcond is opcode2=1 & i=1 & opcode1=0xa & op2 & op1 & b0=0 { } :BAL is opcode2=0 & i=1 & opcode1=0xa & op2 & op1_b02=7 & b0=0 { } :BAL is opcode2=1 & i=1 & opcode1=0xb & op2 & op1 & b0=0 { } :BR is opcode2=1 & i=0 & opcode1 & op2=0xe & op1 & b0=0 { } :BR is opcode2=0 & i=0 & opcode1=0xa & op2=0xe & op1_b02=7 & b0=0 { } :BR is opcode2=1 & i=1 & opcode1=0xa & op2=0xe & op1 & b0=0 { } :EXCP is opcode2=1 & i=1 & opcode1=0xd & op2=0xf & op1 & b0=0 { } :Jcond is opcode2=1 & i=0 & opcode1=0xa & op2 & op1 & b0=1 { } :Jcond is opcode2=0 & i=0 & opcode1=0xb & op2 & op1 & b0=1 { } :JAL is opcode2=1 & i=1 & opcode1=0xa & op2 & op1 & b0=1 { } :JAL is opcode2=0 & i=0 & opcode1=0xb & op2 & op1 & b0=0 { } :JUMP is opcode2=1 & i=0 & opcode1=0xa & op2=0xe & op1 & b0=1 { } :JUMP is opcode2=0 & i=0 & opcode1=0xb & op2=0xe & op1 & b0=1 { } :RETX is opcode2=1 & i=1 & opcode1=0xc & op2=0xf & op1=0xf & b0=0 { } :PUSH is opcode2=1 & i=1 & opcode1=0x6 & op2_b23=0 & op1 & b0=0 { } :POP is opcode2=1 & i=1 & opcode1=0x6 & op2_b23=1 & op1 & b0=0 { } :POPRET is opcode2=1 & i=1 & opcode1=0x6 & op2_b23=2 & op1 & b0=0 { } :POPRET is opcode2=1 & i=1 & opcode1=0x6 & op2_b23=3 & op1 & b0=0 { } # LOAD / STORE :LOAD is opcode2=2 & i & opcode1 & op2 & op1 & b0 { } :LOAD is opcode2=2 & i & opcode1_b23=2 & op2 & op1 & b0=1 { } :LOAD is opcode2=2 & i & opcode1_b23=3 & op2 & op1 & b0=1 { } :LOAD is opcode2=2 & i & opcode1_b23=3 & op2 & op1=0xf & b0=1 { } :LOADM is opcode2=1 & i=1 & opcode1=0xf & op2_b23=0 & op1=2 & b0=0 { } :STORE is opcode2=3 & i & opcode1 & op2 & op1 & b0 { } :STORE is opcode2=3 & i & opcode1_b23=2 & op2 & op1 & b0=1 { } :STORE is opcode2=3 & i & opcode1_b23=3 & op2 & op1 & b0=1 { } :STORE is opcode2=3 & i & opcode1_b23=3 & op2 & op1=0xf & b0=1 { } :STORE is opcode2=1 & i & opcode1=2 & op2_b12=3 & op1 & b0=1 { } :STORE is opcode2=0 & i & opcode1=2 & op2_b12=3 & op1 & b0=1 { } :STORE is opcode2=0 & i & opcode1=2 & op2_b12=3 & op1 & b0=0 { } :STORM is opcode2=1 & i=1 & opcode1=0xf & op2_b23=1 & op1=2 & b0=0 { } # MISC :DI is opcode2=1 & i=1 & opcode1=0xe & op2=0xe & op1=0xf & b0=0 { } :EI is opcode2=1 & i=1 & opcode1=0xe & op2=0xf & op1=0xf & b0=0 { } :NOP is opcode2=0 & i=0 & opcode1=0x1 & op2=0 & op1=0 & b0=0 { } :WAIT is opcode2=1 & i=1 & opcode1=0xf & op2=0xf & op1=0xf & b0=0 { } :EIWAIT is opcode2=1 & i=1 & opcode1=0xf & op2=0xf & op1=0x3 & b0=0 { } ================================================ FILE: pypcode/processors/CR16/data/languages/CR16B.slaspec ================================================ define endian=little; @include "CR16B.sinc" ================================================ FILE: pypcode/processors/CR16/data/languages/CR16C.sinc ================================================ # This module defines CR16C # NOTE: Have assumed for now CFG.SR = 0 # sizes: # B = byte = 1 byte # W = word = 2 bytes # D = doubleword = 4 bytes # Basic ===================================================================== define alignment=2; define space ram type=ram_space size=4 default; define space register type=register_space size=2; # Registers ================================================================== # - When CFG.SR is not set, register pairs are defined as follows: # (R1,R0), (R2,R1) ... (R11,R10), (R12_L, R11) # R12, R13, RA and SP are double-word registers for direct storage of # addresses greater than 16 bits. # # - The most significant 8 bits of ISP and the least significant bit of ISP # are forced to 0 # General purpose registers # NOTE: RA_L==RA and SP_L==SP # NOTE: R12_H, R13_H, RA_H don't actually exist as named registers # (name needed for push/pop instructions implementation only) define register offset=0 size=2 [ R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12_L R12_H R13_L R13_H RA_L RA_H SP_L SP_H ]; define register offset=0 size=4 [ R1R0 R3R2 R5R4 R7R6 R9R8 R11R10 R12 R13 RA SP ]; define register offset=2 size=4 [ R2R1 R4R3 R6R5 R8R7 R10R9 R12LR11 ]; # Dedicated address registers define register offset=50 size=4 [ PC ]; define register offset=60 size=2 [ ISPH ISPL USPH USPL INTBASEH INTBASEL ]; define register offset=60 size=4 [ ISP USP INTBASE ]; # Processor Status register define register offset=80 size=2 [ PSR ]; #define register offset=90 size=1 [ C T L U F Z N E P I ]; # cond flags @define C "PSR[0,1]" @define T "PSR[1,1]" @define L "PSR[2,1]" @define U "PSR[3,1]" @define F "PSR[5,1]" @define Z "PSR[6,1]" @define N "PSR[7,1]" @define E "PSR[9,1]" @define P "PSR[10,1]" @define I "PSR[11,1]" # Configuration register define register offset=100 size=2 [ CFG ]; # Debug registers define register offset=110 size=2 [ DBS DSR DCRH DCRL CAR0H CAR0L CAR1H CAR1L ]; define register offset=114 size=4 [ DCR CAR0 CAR1 ]; # these don't actually exist - they are for prd definition (size=4) define register offset=130 size=4 [ DBS_1 DSR_1 CFG_1 PSR_1 ]; # Fields ====================================================================== define token word1(16) hi = (8,15) # opcode8 lo = (0,7) # op = (0,15) # opcode16 op12 = (4,15) # opcode12 op9 = (7,15) # opcode9 op7 = (9,15) # opcode7 op7p3 = (8,8) # p3 for opcode7 # op10 = (6,15) # opcode10 op4 = (12,15) # opcode4 lo1 = (0,3) op13 = (3,15) # opcode13 op11 = (5,15) dst = (0,3) # register src = (4,7) src1 = (0,3) reg0 = (0,3) reg1 = (0,3) reg2 = (0,3) reg3 = (0,3) reg4 = (0,3) reg5 = (0,3) reg6 = (0,3) reg7 = (0,3) dst1 = (4,7) rp_dst = (0,3) # register pair rp_src = (4,7) rp_dst2 = (4,7) rp_src2 = (0,3) prp_src1 = (0,3) prp_dst1 = (0,3) cond = (4,7) x4 = (12,15) # generic, nibble 4 x3 = (8,11) x2 = (4,7) x1 = (0,3) x2s = (4,6) x3s = (8,11) signed b4_8 = (4,8) b4_7 = (4,7) b4_6 = (4,6) b_ra = (7,7) # RA bit for push/pop, fmt 14 b_prp = (3,3) # prp bit, fmt 18 b0_2 = (0,2) # fmt 6 ; define token word2(16) w2 = (0,15) n4 = (12,15) # nibble 4, sometimes an opcode extension n3 = (8,11) n2 = (4,7) n1 = (0,3) w2_b0 = (0,0) w2_b1_15 = (1,15) signed w2_hi = (8,15) prd = (4,7) # processor registers pr = (4,7) rp_src3 = (0,3) # register pairs rp_dst3 = (0,3) rp_src6 = (4,7) rp_dst6 = (4,7) rp_dst7 = (8,11) src2 = (0,3) # registers dst2 = (0,3) src5 = (4,7) ; define token word3(16) w3 = (0,15) w3_b0 = (0,0) w3_b1_15 = (1,15) ; # used this token when operands spanned across 2 words # | byte1 byte0 byte3 byte2 | define token doubleword(32) dw = (0,31) dw_w1 = (0,15) signed dw_w2 = (16,31) n8 = (12,15) # nibble 4, sometimes an opcode extension n7 = (8,11) n6 = (4,7) # (cond) n5 = (0,3) n5s = (0,3) signed b1_15 = (17,31) b0_15 = (16,31) dw_lo = (0,7) signed # for fmt 5 dw_hi = (8,15) # (op8) dw_op7 = (9,15) # (op7) dw_op9 = (7,15) # (op9) op10 = (6,15) # (opcode 10) b_rs = (8,8) # for fmt 13 dw_b4_6 = (4,6) dw_b_prp = (3,3) # for fmt 17 f17a = (16,19) # for fmt 17 p3 f17b = (4,5) f17c = (30,31) f17d = (24,29) dw_n2 = (20,23) # fmt 17, p2, 4-bits dw_n2b = (20,22) # fmt 17, p2, 3-bit version rp_dst4 = (4,7) rp_src4 = (4,7) rp_dst5 = (0,3) rp_src5 = (0,3) dst3 = (4,7) src3 = (4,7) dst4 = (0,3) src4 = (0,3) prp_dst = (0,3) prp_src = (0,3) ; # Attach variables ===================================================== # normal registers attach variables [ src dst src1 dst1 src2 dst2 src3 dst3 src4 dst4 src5 ] [ R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12_L R13_L RA_L SP_L ]; # register pairs attach variables [ rp_src rp_dst rp_src2 rp_dst2 rp_src3 rp_dst3 rp_src4 rp_dst4 rp_src5 rp_dst5 rp_src6 rp_dst6 rp_dst7 ] [ R1R0 R2R1 R3R2 R4R3 R5R4 R6R5 R7R6 R8R7 R9R8 R10R9 R11R10 R12LR11 R12 R13 RA SP ]; attach variables [ prp_src prp_dst prp_src1 prp_dst1 ] [ # rrp R1R0 R3R2 R5R4 R7R6 R9R8 R11R10 R4R3 R6R5 R1R0 R3R2 R5R4 R7R6 R9R8 R11R10 R4R3 R6R5 ]; # processor registers attach variables [ pr ] [ DBS DSR DCRL DCRH CAR0L CAR0H CAR1L CAR1H CFG PSR INTBASEL INTBASEH ISPL ISPH USPL USPH ]; attach variables [ prd ] [ DBS_1 DSR_1 DCR _ CAR0 _ CAR1 _ CFG_1 PSR_1 INTBASE _ ISP _ USP _ ]; # Pseudo instructions ================================================= define pcodeop trap; define pcodeop suspend; # Macros ============================================================== # Flags set by add instructions macro addflags(op1, op2) { $(C) = (carry(op1, op2)); # Check for carry $(F) = (scarry(op1, op2)); # Check for overflow } # Flags set by add instructions (3 operands) macro addflags2(op1, op2, op3) { local tmp = (carry(op1,op2)); # Check for carry $(C) = (carry(zext(tmp), op3)); tmp = (scarry(op1,op2)); # Check for overflow $(F) = (scarry(zext(tmp), op3)); } # Flags set by sub instructions macro subflags(op1, op2) { $(C) = (op1 < op2); # Check for borrow $(F) = sborrow(op1, op2); # Check for overflow } # Flags set by sub instructions (3 operands) macro subflags2(op1, op2, op3) { $(C) = (op1 < (op2 + op3)); # Check for borrow local tmp = sborrow(op1, op2); # Check for overflow $(F) = sborrow(zext(tmp), op3); } # Flags set by the compare instructions macro compflags(op1, op2) { $(Z) = (op1 == op2); # Equal comparison $(N) = (op2 s< op1); # Signed comparison $(L) = (op2 < op1); # Unsigned comparison } # Operand Display ==================================================== imm20: "$"tmp is n5 & dw_w2 [ tmp = (n5 << 16) + dw_w2; ] { export *[const]:3 tmp; } imm32: "$"tmp is dw_w1 & dw_w2 [ tmp = (dw_w1 << 16) + dw_w2; ] { export *[const]:4 tmp; } imm4: "$"x2 is x2 { export *[const]:1 x2; } # imm src imm4: "$"tmp is x2=9 [ tmp = -1; ] { export *[const]:1 tmp; } imm4a: "$"x2 is x2 { export *[const]:1 x2; } # imm pos imm4b: "$"n6 is n6 { export *[const]:1 n6; } imm4c: "$"dw_n2 is dw_n2 { export *[const]:1 dw_n2; } imm4d: "$"b4_7 is b4_7 { export *[const]:1 b4_7; } # for ash and lsh (signed count) imm16: "$"w2 is w2 { export *[const]:2 w2; } imm5: "$"b4_8 is b4_8 { export *[const]:1 b4_8; } # for ash and lsh (signed count) imm3: "$"b4_6 is b4_6 { export *[const]:1 b4_6; } imm3b: "$"dw_b4_6 is dw_b4_6 { export *[const]:1 dw_b4_6; } imm3c: "$"dw_n2b is dw_n2b { export *[const]:1 dw_n2b; } imm3d: "$"x2s is x2s { export *[const]:1 x2s; } # for ash and lsh (signed count) cnt3: "$"tmp is b4_6 [ tmp = b4_6 + 1; ] { export *[const]:1 tmp; } cnt3b: "$"tmp is b0_2 [ tmp = b0_2 + 1; ] { export *[const]:1 tmp; } # If (abs20 > 0xEFFFF) the resulting address is logically ORed with 0xF00000 # i.e. addresses from 1M-64k to 1M are re-mapped by the core to 16M-64k to 16M. abs20: tmp is n5=0xf & dw_w2 [ tmp = 0xff0000 | dw_w2; ] { export *[const]:3 tmp; } abs20: tmp is n5 & dw_w2 [ tmp = (n5 << 16) + dw_w2; ] { export *[const]:3 tmp; } abs24: "*"tmp is n5 & n7 & b0_15 [ tmp = (n5 << 20) + (n7 << 16) + (b0_15); ] { export *[const]:4 tmp; } # NOTE: disp24 is sign extended to 25 bits - hopefully that doesn't matter here # TODO: display *+/*- relative to PC? disp8: "*"tmp is x1 & x3s [ tmp = (x3s << 5) + (x1 << 1) + inst_start; ] { export *[ram]:4 tmp; } # fmt 21 disp16: "*"tmp is w2_b1_15 [ tmp = (w2_b1_15 << 1) + inst_start; ] { export *[ram]:4 tmp; } # fmt 5, 22 disp24: "*"tmp is n5s & n7 & b1_15 [ tmp = (n5s << 20) + (n7 << 16) + (b1_15 << 1) + inst_start; ] { export *[ram]:4 tmp; } disp24b:"*"tmp is dw_lo & b1_15 [ tmp = (dw_lo << 16) + (b1_15 << 1) + inst_start; ] { export *[ram]:4 tmp; } disp4c: "*"tmp is x2 [ tmp = ((x2 + 1) << 1) + inst_start; ] { export *[ram]:4 tmp; } # disps not relative to PC disp4: "*"tmp is x3 [ tmp = (x3 << 1); ] { export *[const]:3 tmp; } disp4b: "*"x3 is x3 { export *[const]:3 x3; } disp16b:"*"w2 is w2 { export *[const]:2 w2; } disp20: "*"tmp is n7 & dw_w2 [ tmp = (n7 << 16) + dw_w2; ] { export *[const]:3 tmp; } disp14: "*"tmp is f17a & f17b & f17c & f17d [ tmp = (f17d << 8) + (f17c << 6) + (f17b << 4) + f17a; ] { export *[const]:3 tmp; } cc: "eq" is cond=0 { local tmp = $(Z); export tmp; } cc: "ne" is cond=1 { local tmp = !$(Z); export tmp; } cc: "cs" is cond=2 { local tmp = $(C); export tmp; } cc: "cc" is cond=3 { local tmp = !$(C); export tmp; } cc: "hi" is cond=4 { local tmp = $(L); export tmp; } cc: "ls" is cond=5 { local tmp = !$(L); export tmp; } cc: "gt" is cond=6 { local tmp = $(N); export tmp; } cc: "le" is cond=7 { local tmp = !$(N); export tmp; } cc: "fs" is cond=8 { local tmp = $(F); export tmp; } cc: "fc" is cond=9 { local tmp = !$(F); export tmp; } cc: "lo" is cond=10 { local tmp = !$(Z) && !$(L); export tmp; } cc: "hs" is cond=11 { local tmp = $(Z) || $(L); export tmp; } cc: "lt" is cond=12 { local tmp = !$(Z) && !$(N); export tmp; } cc: "ge" is cond=13 { local tmp = $(Z) || $(N); export tmp; } COND: cc is cc { if (!cc) goto inst_next; } cc2: "eq" is n6=0 { local tmp = $(Z); export tmp; } cc2: "ne" is n6=1 { local tmp = !$(Z); export tmp; } cc2: "cs" is n6=2 { local tmp = $(C); export tmp; } cc2: "cc" is n6=3 { local tmp = !$(C); export tmp; } cc2: "hi" is n6=4 { local tmp = $(L); export tmp; } cc2: "ls" is n6=5 { local tmp = !$(L); export tmp; } cc2: "gt" is n6=6 { local tmp = $(N); export tmp; } cc2: "le" is n6=7 { local tmp = !$(N); export tmp; } cc2: "fs" is n6=8 { local tmp = $(F); export tmp; } cc2: "fc" is n6=9 { local tmp = !$(F); export tmp; } cc2: "lo" is n6=10 { local tmp = !($(Z) && $(L)); export tmp; } cc2: "hs" is n6=11 { local tmp = $(Z) || $(L); export tmp; } cc2: "lt" is n6=12 { local tmp = !($(Z) && $(N)); export tmp; } cc2: "ge" is n6=13 { local tmp = $(Z) || $(N); export tmp; } COND2: cc2 is cc2 { if (!cc2) goto inst_next; } ra: "RA" is epsilon { } const1: "0x0" is epsilon { } rs: "[R12]" is b_rs=0 { export R12; } rs: "[R13]" is b_rs=1 { export R13; } prp2: "[R12]" is dw_b_prp=0 { export R12; } prp2: "[R13]" is dw_b_prp=1 { export R13; } prp: "[R12]" is b_prp=0 { export R12; } prp: "[R13]" is b_prp=1 { export R13; } cinv1: "[i]" is epsilon { } cinv2: "[i,u]" is epsilon { } cinv3: "[d]" is epsilon { } cinv4: "[d,u]" is epsilon { } cinv5: "[d,i]" is epsilon { } cinv6: "[d,i,u]" is epsilon { } # Constructors =============================================================== # MOVES # MOVB - move low-order byte only :MOVB imm4, dst is hi=0x58 & dst & imm4 { dst = (dst & 0xff00) | (zext(imm4) & 0x00ff); } :MOVB imm16, dst is hi=0x58 & x2=11 & dst ; imm16 { dst = (dst & 0xff00) | (imm16 & 0x00ff); } :MOVB src, dst is hi=0x59 & src & dst { dst = (dst & 0xff00) | (zext(src:1) & 0x00ff); } # MOVD - move doubleword :MOVD imm20, rp_dst4 is dw_hi=0x05 & rp_dst4 & imm20 { rp_dst4 = zext(imm20); } :MOVD imm32, rp_dst is op12=0x007 & rp_dst ; imm32 { rp_dst = imm32; } :MOVD imm4, rp_dst is hi=0x54 & rp_dst & imm4 { rp_dst = sext(imm4); } :MOVD imm16, rp_dst is hi=0x54 & x2=11 & rp_dst ; imm16 { rp_dst = sext(imm16); } :MOVD rp_src, rp_dst is hi=0x55 & rp_dst & rp_src { rp_dst = rp_src; } # MOVW - move word :MOVW imm4, dst is hi=0x5a & dst & imm4 { dst = sext(imm4); } :MOVW imm16, dst is hi=0x5a & x2=11 & dst ; imm16 { dst = imm16; } :MOVW src, dst is hi=0x5b & src & dst { dst = src; } # MOVXB - move low-order byte of src to word dst with sign extension :MOVXB src, dst is hi=0x5c & src & dst { dst = sext(src:1); } # MOVXW - move word src to doubleword dst with sign extension :MOVXW src, rp_dst is hi=0x5e & src & rp_dst { rp_dst = sext(src); } # MOVZB - move low-order byte of src to word dst with zero extension :MOVZB src, dst is hi=0x5d & src & dst { dst = zext(src:1); } # MOVZW - move word src to doubleword dst with zero extension :MOVZW src, rp_dst is hi=0x5f & src & rp_dst { rp_dst = zext(src); } # ARITHMETIC # ADDB - add low-order byte only :ADDB imm4, dst is hi=0x30 & dst & imm4 { addflags(dst:1, imm4); tmp:1 = dst:1 + imm4; dst = (dst & 0xff00) + zext(tmp); } :ADDB imm16, dst is hi=0x30 & x2=11 & dst ; imm16 { addflags(dst:1, imm16:1); tmp:1 = dst:1 + imm16:1; dst = (dst & 0xff00) + zext(tmp); } :ADDB src, dst is hi=0x31 & src & dst { addflags(dst:1, src:1); tmp:1 = dst:1 + src:1; dst = (dst & 0xff00) + zext(tmp); } # ADDCB - add with carry low-order byte only :ADDCB imm4, dst is hi=0x34 & dst & imm4 { addflags2(dst:1, imm4, $(C)); tmp:1 = dst:1 + imm4 + $(C); dst = (dst & 0xff00) + zext(tmp); } :ADDCB imm16, dst is hi=0x34 & x2=11 & dst ; imm16 { addflags2(dst:1, imm16:1, $(C)); tmp:1 = dst:1 + imm16:1 + $(C); dst = (dst & 0xff00) + zext(tmp); } :ADDCB src, dst is hi=0x35 & src & dst { addflags2(dst:1, src:1, $(C)); tmp:1 = dst:1 + src:1 + $(C); dst = (dst & 0xff00) + zext(tmp); } # ADDCW - add word with carry :ADDCW imm4, dst is hi=0x36 & dst & imm4 { tmp2:2 = sext(imm4); tmp3:2 = zext($(C)); addflags2(dst, tmp2, tmp3); dst = dst + tmp2 + tmp3; } :ADDCW imm16, dst is hi=0x36 & x2=11 & dst ; imm16 { tmp3:2 = zext($(C)); addflags2(dst, imm16, tmp3); dst = dst + imm16 + tmp3; } :ADDCW src, dst is hi=0x37 & src & dst { tmp3:2 = zext($(C)); addflags2(dst, src, tmp3); dst = dst + src + tmp3; } # ADDD - add doubleword :ADDD imm20, rp_dst4 is dw_hi=0x04 & rp_dst4 & imm20 { tmp:4 = zext(imm20); addflags(tmp, rp_dst4); rp_dst4 = rp_dst4 + tmp; } :ADDD imm32, rp_dst is op12=0x002 & rp_dst ; imm32 { addflags(imm32, rp_dst); rp_dst = rp_dst + imm32; } :ADDD imm4, rp_dst is hi=0x60 & rp_dst & imm4 { tmp:4 = sext(imm4); addflags(tmp, rp_dst); rp_dst = rp_dst + tmp; } :ADDD imm16, rp_dst is hi=0x60 & x2=11 & rp_dst ; imm16 { tmp:4 = sext(imm16); addflags(tmp, rp_dst); rp_dst = rp_dst + tmp; } :ADDD rp_src, rp_dst is hi=0x61 & rp_dst & rp_src { addflags(rp_src, rp_dst); rp_dst = rp_dst + rp_src; } # ADDUB - add low-order byte only, PSR flags unaffected :ADDUB imm4, dst is hi=0x2c & dst & imm4 { tmp:1 = dst:1 + imm4; dst = (dst & 0xff00) + zext(tmp); } :NOP is hi=0x2c & dst=0 & x2=0 { } :ADDUB imm16, dst is hi=0x2c & x2=11 & dst ; imm16 { tmp:1 = dst:1 + imm16:1; dst = (dst & 0xff00) + zext(tmp); } :ADDUB src, dst is hi=0x2d & src & dst { tmp:1 = dst:1 + src:1; dst = (dst & 0xff00) + zext(tmp); } # ADDUW - add word, PSR flags unaffected :ADDUW imm4, dst is hi=0x2e & dst & imm4 { tmp:2 = sext(imm4); dst = dst + tmp; } :ADDUW imm16, dst is hi=0x2e & x2=11 & dst ; imm16 { dst = dst + imm16; } :ADDUW src, dst is hi=0x2f & src & dst { dst = dst + src; } # ADDW - add word :ADDW imm4, dst is hi=0x32 & dst & imm4 { tmp:2 = sext(imm4); addflags(tmp, dst); dst = dst + tmp; } :ADDW imm16, dst is hi=0x32 & x2=11 & dst ; imm16 { addflags(imm16, dst); dst = dst + imm16; } :ADDW src, dst is hi=0x33 & src & dst { addflags(src, dst); dst = dst + src; } # Multiply Signed Q15 Word and Accumulate Long Result :MACQW src2, src5, rp_dst7 is op=0x0014 ; n4=13 & src2 & src5 & rp_dst7 { tmp:4 = sext(src2) * sext(src5); # TODO based on what Q15 is, this sext may not be right rp_dst7 = rp_dst7 + tmp; # if (!scarry(rp_dst7, tmp)) goto ; # rp_dst7 = 0x7fffffff; # overflowed, set to max # goto ; # # if (!sborrow(rp_dst7, tmp)) goto ; # rp_dst7 = 0x80000000; # underflowed, set to min # # TODO: if scarry(rp_dst7, tmp) then rp_dst7 = 0x? TODO what is Q15 signed fractional format # if sborrow(rp_dst7, tmp) then rp_dst7 = 0x? } # Unsigned Multiply Word and Accumulate Long Result :MACUW src2, src5, rp_dst7 is op=0x0014 ; n4=14 & src2 & src5 & rp_dst7 { tmp:4 = zext(src2) * zext(src5); rp_dst7 = rp_dst7 + tmp; # if scarry(rp_dst7, tmp) then rp_dst7 = 0xffffffff if (!scarry(rp_dst7, tmp)) goto ; rp_dst7 = 0xffffffff; # overflowed, set to max } # Signed Multiply Word and Add Long Result :MACSW src2, src5, rp_dst7 is op=0x0014 ; n4=15 & src2 & src5 & rp_dst7 { tmp:4 = sext(src2) * sext(src5); rp_dst7 = rp_dst7 + tmp; # if scarry(rp_dst7, tmp) then rp_dst7 = 0x7fffffff if (!scarry(rp_dst7, tmp)) goto ; rp_dst7 = 0x7fffffff; # overflowed, set to max goto ; # if sborrow(rp_dst7, tmp) then rp_dst7 = 0x80000000 if (!sborrow(rp_dst7, tmp)) goto ; rp_dst7 = 0x80000000; # underflowed, set to min } # MULB - multiply byte :MULB imm4, dst is hi=0x64 & dst & imm4 { tmp:1 = dst:1 * imm4; dst = (dst & 0xff00) + zext(tmp); } :MULB imm16, dst is hi=0x64 & x2=11 & dst ; imm16 { tmp:1 = dst:1 * imm16:1; dst = (dst & 0xff00) + zext(tmp);} :MULB src, dst is hi=0x65 & src & dst{ tmp:1 = dst:1 * src:1; dst = (dst & 0xff00) + zext(tmp); } # Signed Multiply Byte, Word Result :MULSB src, dst is hi=0x0b & src & dst { dst = sext(src:1) * sext(dst:1); } # Signed Multiply Word, Long Result :MULSW src, rp_dst is hi=0x62 & src & rp_dst { rp_dst = sext(src) * sext(rp_dst:2); } # Unsigned Multiply Word, Long Result :MULUW src, rp_dst is hi=0x63 & src & rp_dst { rp_dst = zext(src) * zext(rp_dst:2); } # MULW - multiply word :MULW imm4, dst is hi=0x66 & dst & imm4 { tmp:2 = sext(imm4); dst = dst * tmp; } :MULW imm16, dst is hi=0x66 & x2=11 & dst ; imm16 { dst = dst * imm16; } :MULW src, dst is hi=0x67 & src & dst { dst = dst * src; } # SUBB - subtract low-order byte only :SUBB imm4, dst is hi=0x38 & dst & imm4 { subflags(dst:1, imm4); tmp:1 = dst:1 - imm4; dst = (dst & 0xff00) + zext(tmp); } :SUBB imm16, dst is hi=0x38 & x2=11 & dst ; imm16 { subflags(dst:1, imm16:1); tmp:1 = dst:1 - imm16:1; dst = (dst & 0xff00) + zext(tmp); } :SUBB src, dst is hi=0x39 & src & dst { subflags(dst:1, src:1); tmp:1 = dst:1 - src:1; dst = (dst & 0xff00) + zext(tmp); } # SUBCB - subtract with carry low-order byte only :SUBCB imm4, dst is hi=0x3c & dst & imm4 { subflags2(dst:1, imm4, $(C)); tmp:1 = dst:1 - imm4 - $(C); dst = (dst & 0xff00) + zext(tmp); } :SUBCB imm16, dst is hi=0x3c & x2=11 & dst ; imm16 { subflags2(dst:1, imm16:1, $(C)); tmp:1 = dst:1 - imm16:1 - $(C); dst = (dst & 0xff00) + zext(tmp); } :SUBCB src, dst is hi=0x3d & src & dst { subflags2(dst:1, src:1, $(C)); tmp:1 = dst:1 - src:1 - $(C); dst = (dst & 0xff00) + zext(tmp); } # SUBCW - subtract word with carry :SUBCW imm4, dst is hi=0x3e & dst & imm4 { tmp2:2 = sext(imm4); tmp3:2 = zext($(C)); subflags2(dst, tmp2, tmp3); dst = dst - tmp2 - tmp3; } :SUBCW imm16, dst is hi=0x3e & x2=11 & dst ; imm16 { tmp3:2 = zext($(C)); subflags2(dst, imm16, tmp3); dst = dst - imm16 - tmp3; } :SUBCW src, dst is hi=0x3f & src & dst { tmp3:2 = zext($(C)); subflags2(dst, src, tmp3); dst = dst - src - tmp3; } # SUBD - subtract doubleword :SUBD rp_src6, rp_dst3 is op=0x0014 ; n4=12 & n3=0 & rp_src6 & rp_dst3 { # fmt 1 subflags(rp_dst3, rp_src6); rp_dst3 = rp_dst3 - rp_src6; } :SUBD imm32, rp_dst is op12=0x003 & rp_dst ; imm32 { subflags(imm32, rp_dst); rp_dst = rp_dst - imm32; } # SUBW - subtract word :SUBW imm4, dst is hi=0x3a & dst & imm4 { tmp:2 = sext(imm4); subflags(tmp, dst); dst = dst - tmp; } :SUBW imm16, dst is hi=0x3a & x2=11 & dst ; imm16 { subflags(imm16, dst); dst = dst - imm16; } :SUBW src, dst is hi=0x3b & src & dst { subflags(src, dst); dst = dst - src; } # INTEGER COMPARISON # CMPB - compare low-order byte only :CMPB imm4, src1 is hi=0x50 & src1 & imm4 { compflags(imm4, src1:1); } :CMPB imm16, src1 is hi=0x50 & x2=11 & src1 ; imm16 { compflags(imm16:1, src1:1); } :CMPB src, src1 is hi=0x51 & src & src1 { compflags(src:1, src1:1); } # CMPD - compare doubleword :CMPD imm32, rp_src2 is op12=0x009 & rp_src2 ; imm32 { compflags(imm32, rp_src2); } :CMPD imm4, rp_src2 is hi=0x56 & rp_src2 & imm4 { tmp:4 = sext(imm4); compflags(tmp, rp_src2); } :CMPD imm16, rp_src2 is hi=0x56 & x2=11 & rp_src2 ; imm16 { tmp:4 = sext(imm16); compflags(tmp, rp_src2); } :CMPD rp_src, rp_src2 is hi=0x57 & rp_src & rp_src2 { compflags(rp_src, rp_src2); } # CMPW - compare word :CMPW imm4, src1 is hi=0x52 & src1 & imm4 { tmp:2 = sext(imm4); compflags(tmp, src1); } :CMPW imm16, src1 is hi=0x52 & x2=11 & src1 ; imm16 { compflags(imm16, src1); } :CMPW src, src1 is hi=0x53 & src & src1 { compflags(src, src1); } # LOGICAL / BOOLEAN # ANDB - and low-order byte only :ANDB imm4, dst is hi=0x20 & dst & imm4 { tmp:1 = dst:1 & imm4; dst = (dst & 0xff00) + zext(tmp); } :ANDB imm16, dst is hi=0x20 & x2=11 & dst ; imm16 { tmp:1 = dst:1 & imm16:1; dst = (dst & 0xff00) + zext(tmp); } :ANDB src, dst is hi=0x21 & src & dst { tmp:1 = dst:1 & src:1; dst = (dst & 0xff00) + zext(tmp); } # ANDW - and word :ANDW imm4, dst is hi=0x22 & dst & imm4 { tmp:2 = sext(imm4); dst = dst & tmp; } :ANDW imm16, dst is hi=0x22 & x2=11 & dst ; imm16 { dst = dst & imm16; } :ANDW src, dst is hi=0x23 & src & dst { dst = dst & src; } # ANDD - and doubleword :ANDD imm32, rp_dst is op12=0x004 & rp_dst ; imm32 { rp_dst = rp_dst & imm32; } :ANDD rp_src6, rp_dst3 is op=0x0014 ; n4=11 & rp_src6 & rp_dst3 { # fmt 1 rp_dst3 = rp_dst3 & rp_src6; } # ORB - or low-order byte only :ORB imm4, dst is hi=0x24 & dst & imm4 { tmp:1 = dst:1 | imm4; dst = (dst & 0xff00) + zext(tmp); } :ORB imm16, dst is hi=0x24 & x2=11 & dst ; imm16 { tmp:1 = dst:1 | imm16:1; dst = (dst & 0xff00) + zext(tmp); } :ORB src, dst is hi=0x25 & src & dst { tmp:1 = dst:1 | src:1; dst = (dst & 0xff00) + zext(tmp); } # ORW - or word :ORW imm4, dst is hi=0x26 & dst & imm4 { tmp:2 = sext(imm4); dst = dst | tmp; } :ORW imm16, dst is hi=0x26 & x2=11 & dst ; imm16 { dst = dst | imm16; } :ORW src, dst is hi=0x27 & src & dst { dst = dst | src; } # ORD - or doubleword :ORD imm32, rp_dst is op12=0x005 & rp_dst ; imm32 { rp_dst = rp_dst | imm32; } :ORD rp_src6, rp_dst3 is op=0x0014 ; n4=9 & rp_src6 & rp_dst3 { # fmt 1 rp_dst3 = rp_dst3 | rp_src6; } # S - save condition as boolean :S^COND dst is hi=0x08 & COND & dst { dst = 0; build COND; dst = 1; } :S dst is hi=0x08 & cond=14 & dst { dst = 1; } :S dst is hi=0x08 & cond=15 & dst { dst = 1; } # XORB - xor low-order byte only :XORB imm4, dst is hi=0x28 & dst & imm4 { tmp:1 = dst:1 ^ imm4; dst = (dst & 0xff00) + zext(tmp); } :XORB imm16, dst is hi=0x28 & x2=11 & dst ; imm16 { tmp:1 = dst:1 ^ imm16:1; dst = (dst & 0xff00) + zext(tmp); } :XORB src, dst is hi=0x29 & src & dst { tmp:1 = dst:1 ^ src:1; dst = (dst & 0xff00) + zext(tmp); } # XORW - xor word :XORW imm4, dst is hi=0x2a & dst & imm4 { tmp:2 = sext(imm4); dst = dst ^ tmp; } :XORW imm16, dst is hi=0x2a & x2=11 & dst ; imm16 { dst = dst ^ imm16; } :XORW src, dst is hi=0x2b & src & dst { dst = dst ^ src; } # XORD - xor doubleword :XORD imm32, rp_dst is op12=0x006 & rp_dst ; imm32 { rp_dst = rp_dst ^ imm32; } :XORD rp_src6, rp_dst3 is op=0x0014 ; n4=10 & rp_src6 & rp_dst3 { # fmt 1 rp_dst3 = rp_dst3 ^ rp_src6; } # SHIFTS # TODO: left shift displays "+", right shift displays "-" macro do_ash(count, dest) { local shift = count:1; local lf = ( shift) * zext(shift s> 0); local rt = (-shift) * zext(shift s< 0); dest = dest << lf; dest = dest s>> rt; } macro do_ashb(count, dest) { local tmp = dest & 0xff; do_ash(count, tmp); dest = (dest & 0xff00) | (tmp & 0x00ff); } macro do_lsh(count, dest) { local shift = count:1; local lf = ( shift) * zext(shift s> 0); local rt = (-shift) * zext(shift s< 0); dest = dest << lf; dest = dest >> rt; } macro do_lshb(count, dest) { local tmp = dest & 0xff; do_lsh(count, tmp); dest = (dest & 0xff00) | (tmp & 0x00ff); } # ASHUB - Arithmetic shift low-order byte only :ASHUB imm3d, dst is op9=0b010000000 & imm3d & dst { do_ashb(imm3d, dst); } :ASHUB imm3d, dst is op9=0b010000001 & imm3d & dst { do_ashb(imm3d-8, dst); } :ASHUB src, dst is hi=0x41 & src & dst { do_ashb(src, dst); } # ASHUD - Arithmetic shift doubleword :ASHUD imm5, rp_dst is op7=0b0100110 & imm5 & rp_dst { do_ash(imm5, rp_dst); } :ASHUD imm5, rp_dst is op7=0b0100111 & imm5 & rp_dst { do_ash(imm5-32, rp_dst); } :ASHUD src, rp_dst is hi=0x48 & src & rp_dst { do_ash(src, rp_dst); } # ASHUW - Arithmetic shift word :ASHUW imm4d, dst is hi=0x42 & imm4d & dst { do_ash(imm4d, dst); } :ASHUW imm4d, dst is hi=0x43 & imm4d & dst { do_ash(imm4d-16, dst); } :ASHUW src, dst is hi=0x45 & src & dst { do_ash(src, dst); } # LSHB - Logical shift low-order byte only :LSHB imm3d, dst is op9=0b000010011 & imm3d & dst { do_lshb(imm3d-8, dst); } :LSHB src, dst is hi=0x44 & src & dst { do_lshb(src, dst); } # LSHD - Logical shift doubleword :LSHD imm5, rp_dst is op7=0b0100101 & imm5 & rp_dst { do_lsh(imm5-32, rp_dst); } :LSHD src, rp_dst is hi=0x47 & src & rp_dst { do_lsh(src, rp_dst); } # LSHW - Logical shift word :LSHW imm4d, dst is hi=0x49 & imm4d & dst { do_lsh(imm4d-16, dst); } :LSHW src, dst is hi=0x46 & src & dst { do_lsh(src, dst); } # BITS # - for bit ops, dsp(n) is always unsigned # CBITB - clear bit in low-order byte :CBITB imm3c, prp2 disp14(prp_dst) is op10=0x1aa & imm3c & prp2 & disp14 & prp_dst { # fmt 17 tmp:4 = prp_dst:4 + prp2:4 + zext(disp14); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3c; $(F) = (val & mask) >> imm3c; # save bit mask = ~mask; *:1 tmp = val & mask; # clear bit, store } :CBITB imm3b, disp20(dst4) is op=0x0010 ; n8=4 & imm3b & disp20 & dst4 { # fmt 2 tmp:4 = zext(dst4) + zext(disp20); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit mask = ~mask; *:1 tmp = val & mask; # clear bit, store } :CBITB imm3, const1(rp_dst) is op9=0x0d4 & imm3 & rp_dst & const1 { # fmt 9 tmp:4 = rp_dst:4; val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3; $(F) = (val & mask) >> imm3; # save bit mask = ~mask; *:1 tmp = val & mask; # clear bit, store } :CBITB imm3, disp16b(rp_dst) is op9=0x0d6 & imm3 & rp_dst ; disp16b { # fmt 10 tmp:4 = rp_dst:4 + zext(disp16b); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3; $(F) = (val & mask) >> imm3; # save bit mask = ~mask; *:1 tmp = val & mask; # clear bit, store } :CBITB imm3b, disp20(rp_dst5) is op=0x0010 ; n8=5 & imm3b & disp20 & rp_dst5 { # fmt 2 tmp:4 = rp_dst5:4 + zext(disp20); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit mask = ~mask; *:1 tmp = val & mask; # clear bit, store } :CBITB imm3b, disp20(prp_dst) is op=0x0010 ; n8=6 & imm3b & disp20 & prp_dst { # fmt 2 tmp:4 = prp_dst:4 + zext(disp20); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit mask = ~mask; *:1 tmp = val & mask; # clear bit, store } :CBITB imm3b, abs20 is dw_op9=0x0d7 & imm3b & abs20 { # fmt 7 tmp:4 = zext(abs20); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit mask = ~mask; *:1 tmp = val & mask; # clear bit, store } :CBITB imm3b, rs abs20 is dw_hi=0x68 & imm3b & rs & abs20 { # fmt 8 tmp:4 = zext(abs20) + rs:4; val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit mask = ~mask; *:1 tmp = val & mask; # clear bit, store } :CBITB imm3b, abs24 is op=0x0010 ; n8=7 & imm3b & abs24 { # fmt 3 tmp:4 = abs24; val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit mask = ~mask; *:1 tmp = val & mask; # clear bit, store } # CBITW - clear bit in word :CBITW imm4c, prp2 disp14(prp_dst) is op10=0x1ab & imm4c & prp2 & disp14 & prp_dst { # fmt 17 tmp:4 = prp_dst:4 + prp2:4 + zext(disp14); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4c; bit:2 = (val & mask) >> imm4c; $(F) = bit:1; # save bit mask = ~mask; *:2 tmp = val & mask; # clear bit, store } :CBITW imm4b, disp20(dst4) is op=0x0011 ; n8=4 & imm4b & disp20 & dst4 { # fmt 2 tmp:4 = zext(dst4) + zext(disp20); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit mask = ~mask; *:2 tmp = val & mask; # clear bit, store } :CBITW imm4a, const1(rp_dst) is hi=0x6e & imm4a & rp_dst & const1 { tmp:4 = rp_dst:4; val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4a; bit:2 = (val & mask) >> imm4a; $(F) = bit:1; # save bit mask = ~mask; *:2 tmp = val & mask; # clear bit, store } :CBITW imm4a, disp16b(rp_dst) is hi=0x69 & imm4a & rp_dst ; disp16b { tmp:4 = rp_dst:4 + zext(disp16b); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4a; bit:2 = (val & mask) >> imm4a; $(F) = bit:1; # save bit mask = ~mask; *:2 tmp = val & mask; # clear bit, store } :CBITW imm4b, disp20(rp_dst5) is op=0x0011 ; n8=5 & rp_dst5 & imm4b & disp20 { tmp:4 = rp_dst5:4 + zext(disp20); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit mask = ~mask; *:2 tmp = val & mask; # clear bit, store } :CBITW imm4b, disp20(prp_dst) is op=0x0011 ; n8=6 & imm4b & disp20 & prp_dst { # fmt 2 tmp:4 = prp_dst:4 + zext(disp20); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit mask = ~mask; *:2 tmp = val & mask; # clear bit, store } :CBITW imm4b, abs20 is dw_hi=0x6f & imm4b & abs20 { # fmt 12 tmp:4 = zext(abs20); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit mask = ~mask; *:2 tmp = val & mask; # clear bit, store } :CBITW imm4b, rs abs20 is dw_op7=0x36 & imm4b & rs & abs20 { # fmt 13 tmp:4 = zext(abs20) + rs:4; val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit mask = ~mask; *:2 tmp = val & mask; # clear bit, store } :CBITW imm4b, abs24 is op=0x0011 ; n8=7 & imm4b & abs24 { # fmt 3 tmp:4 = abs24; val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit mask = ~mask; *:2 tmp = val & mask; # clear bit, store } # SBITB - set bit in low-order byte :SBITB imm3c, prp2 disp14(prp_dst) is op10=0x1ca & imm3c & prp2 & disp14 & prp_dst { # fmt 17 tmp:4 = prp_dst:4 + prp2:4 + zext(disp14); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3c; $(F) = (val & mask) >> imm3c; # save bit *:1 tmp = val | mask; # set bit, store } :SBITB imm3b, disp20(dst4) is op=0x0010 ; n8=8 & imm3b & disp20 & dst4 { # fmt 2 tmp:4 = zext(dst4) + zext(disp20); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit *:1 tmp = val | mask; # set bit, store } :SBITB imm3, const1(rp_dst) is op9=0x0e4 & imm3 & rp_dst & const1 { # fmt 9 tmp:4 = rp_dst:4; val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3; $(F) = (val & mask) >> imm3; # save bit *:1 tmp = val | mask; # set bit, store } :SBITB imm3, disp16b(rp_dst) is op9=0x0e6 & imm3 & rp_dst ; disp16b { # fmt 10 tmp:4 = rp_dst:4 + zext(disp16b); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3; $(F) = (val & mask) >> imm3; # save bit *:1 tmp = val | mask; # set bit, store } :SBITB imm3b, disp20(rp_dst5) is op=0x0010 ; n8=9 & imm3b & disp20 & rp_dst5 { # fmt 2 tmp:4 = rp_dst5:4 + zext(disp20); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit *:1 tmp = val | mask; # set bit, store } :SBITB imm3b, disp20(prp_dst) is op=0x0010 ; n8=10 & imm3b & disp20 & prp_dst { # fmt 2 tmp:4 = prp_dst:4 + zext(disp20); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit *:1 tmp = val | mask; # set bit, store } :SBITB imm3b, abs20 is dw_op9=0x0e7 & imm3b & abs20 { # fmt 7 tmp:4 = zext(abs20); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit *:1 tmp = val | mask; # set bit, store } :SBITB imm3b, rs abs20 is dw_hi=0x70 & imm3b & rs & abs20 { # fmt 8 tmp:4 = zext(abs20) + rs:4; val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit *:1 tmp = val | mask; # set bit, store } :SBITB imm3b, abs24 is op=0x0010 ; n8=11 & imm3b & abs24 { # fmt 3 tmp:4 = abs24; val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit *:1 tmp = val | mask; # set bit, store } # SBITW - set bit in word :SBITW imm4c, prp2 disp14(prp_dst) is op10=0x1cb & imm4c & prp2 & disp14 & prp_dst { # fmt 17 tmp:4 = prp_dst:4 + prp2:4 + zext(disp14); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4c; bit:2 = (val & mask) >> imm4c; $(F) = bit:1; # save bit *:2 tmp = val | mask; # clear bit, store } :SBITW imm4b, disp20(dst4) is op=0x0011 ; n8=8 & imm4b & disp20 & dst4 { # fmt 2 tmp:4 = zext(dst4) + zext(disp20); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit *:2 tmp = val | mask; # clear bit, store } :SBITW imm4a, const1(rp_dst) is hi=0x76 & imm4a & const1 & rp_dst { # fmt 15 tmp:4 = rp_dst:4; val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4a; bit:2 = (val & mask) >> imm4a; $(F) = bit:1; # save bit *:2 tmp = val | mask; # clear bit, store } :SBITW imm4a, disp16b(rp_dst) is hi=0x71 & imm4a & rp_dst ; disp16b { # fmt 16 tmp:4 = rp_dst:4 + zext(disp16b); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4a; bit:2 = (val & mask) >> imm4a; $(F) = bit:1; # save bit *:2 tmp = val | mask; # clear bit, store } :SBITW imm4b, disp20(rp_dst5) is op=0x0011 ; n8=9 & rp_dst5 & imm4b & disp20 { tmp:4 = rp_dst5:4 + zext(disp20); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit *:2 tmp = val | mask; # clear bit, store } :SBITW imm4b, disp20(prp_dst) is op=0x0011 ; n8=10 & imm4b & disp20 & prp_dst { tmp:4 = prp_dst:4 + zext(disp20); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit *:2 tmp = val | mask; # clear bit, store } :SBITW imm4b, abs20 is dw_hi=0x77 & imm4b & abs20 { # fmt 12 tmp:4 = zext(abs20); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit *:2 tmp = val | mask; # clear bit, store } :SBITW imm4b, rs abs20 is dw_op7=0x3a & imm4b & rs & abs20 { tmp:4 = zext(abs20) + rs:4; val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit *:2 tmp = val | mask; # clear bit, store } :SBITW imm4b, abs24 is op=0x0011 ; n8=11 & imm4b & abs24 { # fmt 3 tmp:4 = abs24; val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit *:2 tmp = val | mask; # clear bit, store } # TBIT - test bit :TBIT imm4a, src1 is hi=0x06 & imm4a & src1 { tmp:4 = zext(src1); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4a; bit:2 = (val & mask) >> imm4a; $(F) = bit:1; # save bit } :TBIT src, src1 is hi=0x07 & src & src1 { tmp:4 = zext(src1); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << src; bit:2 = (val & mask) >> src; $(F) = bit:1; # save bit } # TBITB - test bit in low-order byte :TBITB imm3c, prp2 disp14(prp_dst) is op10=0x1ea & imm3c & prp2 & disp14 & prp_dst { # fmt 17 (affected by CFG.SR) tmp:4 = prp_dst:4 + prp2:4 + zext(disp14); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3c; $(F) = (val & mask) >> imm3c; # save bit } :TBITB imm3b, disp20(dst4) is op=0x0010 ; n8=12 & imm3b & disp20 & dst4 { # fmt 2 tmp:4 = zext(dst4) + zext(disp20); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit } :TBITB imm3, const1(rp_dst) is op9=0x0f4 & imm3 & rp_dst & const1 { # fmt 9 tmp:4 = rp_dst:4; val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3; $(F) = (val & mask) >> imm3; # save bit } :TBITB imm3, disp16b(rp_dst) is op9=0x0f6 & imm3 & rp_dst ; disp16b { # fmt 10 tmp:4 = rp_dst:4 + zext(disp16b); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3; $(F) = (val & mask) >> imm3; # save bit } :TBITB imm3b, disp20(rp_dst5) is op=0x0010 ; n8=13 & imm3b & disp20 & rp_dst5 { tmp:4 = rp_dst5:4 + zext(disp20); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit } :TBITB imm3b, disp20(prp_dst) is op=0x0010 ; n8=14 & imm3b & disp20 & prp_dst { tmp:4 = prp_dst:4 + zext(disp20); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit } :TBITB imm3b, abs20 is dw_op9=0x0f7 & imm3b & abs20 { # fmt 7 tmp:4 = zext(abs20); val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit } :TBITB imm3b, rs abs20 is dw_hi=0x78 & imm3b & rs & abs20 { # fmt 8 tmp:4 = zext(abs20) + rs:4; val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit } :TBITB imm3b, abs24 is op=0x0010 ; n8=15 & imm3b & abs24 { # fmt 3 tmp:4 = abs24; val:1 = *:1 tmp; # load dst operand local mask = 1 << imm3b; $(F) = (val & mask) >> imm3b; # save bit } # TBITW - test bit in word :TBITW imm4c, prp2 disp14(prp_dst) is op10=0x1eb & imm4c & prp2 & disp14 & prp_dst { # fmt 17 tmp:4 = prp_dst:4 + prp2:4 + zext(disp14); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4c; bit:2 = (val & mask) >> imm4c; $(F) = bit:1; # save bit } :TBITW imm4b, disp20(dst4) is op=0x0011 ; n8=12 & imm4b & disp20 & dst4 { tmp:4 = zext(dst4) + zext(disp20); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit } :TBITW imm4a, const1(rp_dst) is hi=0x7e & imm4a & const1 & rp_dst { # fmt 15 tmp:4 = rp_dst:4; val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4a; bit:2 = (val & mask) >> imm4a; $(F) = bit:1; # save bit } :TBITW imm4a, disp16b(rp_dst) is hi=0x79 & imm4a & rp_dst ; disp16b { # fmt 16 tmp:4 = rp_dst:4 + zext(disp16b); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4a; bit:2 = (val & mask) >> imm4a; $(F) = bit:1; # save bit } :TBITW imm4b, disp20(rp_dst5) is op=0x0011 ; n8=13 & rp_dst5 & imm4b & disp20 { tmp:4 = rp_dst5:4 + zext(disp20); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit } :TBITW imm4b, disp20(prp_dst) is op=0x0011 ; n8=14 & imm4b & disp20 & prp_dst { tmp:4 = prp_dst:4 + zext(disp20); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit } :TBITW imm4b, abs20 is dw_hi=0x7f & imm4b & abs20 { tmp:4 = zext(abs20); val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit } :TBITW imm4b, rs abs20 is dw_op7=0x3e & imm4b & rs & abs20 { # fmt 13 tmp:4 = zext(abs20) + rs:4; val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit } :TBITW imm4b, abs24 is op=0x0011 ; n8=15 & imm4b & abs24 { tmp:4 = abs24; val:2 = *:2 tmp; # load dst operand mask:2 = 1 << imm4b; bit:2 = (val & mask) >> imm4b; $(F) = bit:1; # save bit } # PROCESSOR REGISTER MANIPULATION # TODO: # For the LPR instruction, if dest is ISPL or INTBASEL the least significant # bit (bit 0) of the address is 0. If dest is INTBASEH, ISPH, USPH, CAR0H, # or CAR1H bits 8 through 15 are always written as 0. # For the LPRD instruction, if dest is ISP or INTBASE the least significant # bit (bit 0) of the address is 0. If dest is INTBASE, ISP, USP, CAR0, or # CAR1 bits 24 through 31 are always written as 0. # Load Processor Register (word) :LPR src2, pr is op=0x0014 ; n4=0 & n3=0 & pr & src2 { pr = src2; } # Load Processor Register (doubleword) :LPRD rp_src3, prd is op=0x0014 ; n4=1 & n3=0 & prd & rp_src3 { prd = rp_src3; } # Store Processor Register (word) :SPR pr, dst2 is op=0x0014 ; n4=2 & n3=0 & pr & dst2 { dst2 = pr; } # Store Processor Register (doubleword) :SPRD prd, rp_dst3 is op=0x0014 ; n4=3 & n3=0 & prd & rp_dst3 { rp_dst3 = prd; } # JUMPS AND LINKS # BAL - Branch and Link :BAL (RA), disp24b is dw_hi=0xc0 & disp24b & RA { RA = inst_next >> 1; # save bits 1 to 23 in link reg RA PC = &disp24b; # PC = PC + sext25(disp) TODO: 25 bits? call disp24b; } :BAL rp_dst4, disp24 is op=0x0010 ; n8=2 & disp24 & rp_dst4 { # rp link is at word2 (4,7) rp_dst4 = inst_next >> 1; # save bits 1 to 23 in link reg PC = &disp24; # PC = PC + sext25(disp) TODO: 25 bits? call disp24; } # BEQ0B - branch if low byte equals 0 :BEQ0B src1, disp4c is hi=0x0c & src1 & disp4c { if (src1:1 != 0) goto inst_next; # don't branch if not equal PC = &disp4c; # branch goto disp4c; } # BEQ0W - branch if word equals 0 :BEQ0W src1, disp4c is hi=0x0e & src1 & disp4c { if (src1 != 0) goto inst_next; # don't branch if not equal PC = &disp4c; # branch goto disp4c; } # BNE0B - branch if low byte does not equal 0 :BNE0B src1, disp4c is hi=0x0d & src1 & disp4c { if (src1:1 == 0) goto inst_next; # don't branch if equal PC = &disp4c; # branch goto disp4c; } :BNE0W src1, disp4c is hi=0x0f & src1 & disp4c { if (src1 == 0) goto inst_next; # don't branch if equal PC = &disp4c; # branch goto disp4c; } # BR - branch (conditional and unconditional) :BR^COND disp8 is op4=1 & COND & disp8 { # BR (conditional) build COND; PC = &disp8; goto disp8; } :BR disp8 is op4=1 & cond=14 & disp8 { # BR cond=14 (always) PC = &disp8; goto disp8; } :BR disp8 is op4=1 & cond=15 & disp8 { # BR cond=15 (unconditional) PC = &disp8; goto disp8; } :BR^COND disp16 is hi=0x18 & lo1=0 & COND ; disp16 { # BR (conditional) build COND; PC = &disp16; goto disp16; } :BR disp16 is hi=0x18 & lo1=0 & cond=14 ; disp16 { # BR cond=14 (always) PC = &disp16; goto disp16; } :BR disp16 is hi=0x18 & lo1=0 & cond=15 ; disp16 { # BR cond=15 (unconditional) PC = &disp16; goto disp16; } :BR^COND2 disp24 is op=0x0010 ; n8=0 & COND2 & disp24 { # BR (conditional) build COND2; PC = &disp24; goto disp24; } :BR disp24 is op=0x0010 ; n8=0 & n6=14 & disp24 { # BR cond=14 (always) PC = &disp24; goto disp24; } :BR disp24 is op=0x0010 ; n8=0 & n6=15 & disp24 { # BR cond=15 (unconditional) PC = &disp24; goto disp24; } # EXCP - Exception :EXCP x1 is op12=0x00c & x1 { # *:4(ISP:3) = zext(inst_start); *:2(ISP:4) = PSR; # push PSR onto interrupt stack ISP = ISP - 2; *:4(ISP:4) = inst_start; # push ret addr (current instruction) onto interrupt stack ISP = ISP - 4; trap(); } # JAL - Jump and Link :JAL rp_dst is op12=0x00d & rp_dst { RA = inst_next >> 1; local tmp = rp_dst << 1; PC = tmp:4; call [PC]; } :JAL rp_dst6 is op=0x0014 ; n4=8 & n3=0 & rp_dst6 & rp_dst3 { # rp link is at word2 (0,3) rp_dst3 = inst_next >> 1; local tmp = rp_dst6 << 1; PC = tmp:4; call [PC]; } # J - Jump - special case jumping through link register :J^COND RA is hi=0x0a & COND & rp_dst=14 & RA { # conditional build COND; local tmp = RA << 1; PC = tmp:4; return [PC]; } :JUMP RA is hi=0xa & cond=14 & rp_dst=14 & RA { # always local tmp = RA << 1; PC = tmp:4; return [PC]; } :JUSR RA is hi=0xa & cond=15 & rp_dst=14 & RA { # unconditional $(U) = 1; local tmp = RA << 1; PC = tmp:4; return [PC]; } # J - Jump (conditional and unconditional) :J^COND rp_dst is hi=0x0a & COND & rp_dst { # conditional build COND; local tmp = rp_dst << 1; PC = tmp:4; goto [PC]; } :JUMP rp_dst is hi=0xa & cond=14 & rp_dst { # always local tmp = rp_dst << 1; PC = tmp:4; goto [PC]; } :JUSR rp_dst is hi=0xa & cond=15 & rp_dst { # unconditional $(U) = 1; local tmp = rp_dst << 1; PC = tmp:4; goto [PC]; } attach variables [ reg0 ] [ R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12_L R13_L RA_L SP_L ]; attach variables [ reg1 ] [ R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12_L R12_H R13_H RA_H SP_H ]; attach variables [ reg2 ] [ R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12_L R12_H R13_L RA_L SP_L R0 ]; attach variables [ reg3 ] [ R3 R4 R5 R6 R7 R8 R9 R10 R11 R12_L R12_H R13_L R13_H RA_H SP_H R1 ]; attach variables [ reg4 ] [ R4 R5 R6 R7 R8 R9 R10 R11 R12_L R12_H R13_L R13_H RA_L SP_L R0 R3 ]; attach variables [ reg5 ] [ R5 R6 R7 R8 R9 R10 R11 R12_L R12_H R13_L R13_H RA_L RA_H SP_H R1 R4 ]; attach variables [ reg6 ] [ R6 R7 R8 R9 R10 R11 R12_L R12_H R13_L R13_H RA_L RA_H SP_L R0 R2 R5 ]; attach variables [ reg7 ] [ R7 R8 R9 R10 R11 R12_L R12_H R13_L R13_H RA_L RA_H SP_L SP_H R1 R3 R6 ]; macro push_one(reg) { SP = SP - 2; *SP = reg; } macro pop_one(reg) { reg = *SP; SP = SP + 2; } macro push_ra() { SP = SP - 4; *SP = RA; } macro pop_ra() { RA = *SP; SP = SP + 4; } macro do_pop_ret() { tmp:4 = RA << 1; PC = tmp; return [PC]; } push_args: "$"tmp, reg0 is b_ra=0 & b4_6 & reg0 [ tmp = b4_6 + 1; ] { } push_args: "$"tmp, reg0, "ra" is b_ra=1 & b4_6 & reg0 [ tmp = b4_6 + 1; ] { push_ra(); } pop_args: "$"tmp, reg0 is b_ra=0 & b4_6 & reg0 [ tmp = b4_6 + 1; ] { } pop_args: "$"tmp, reg0, "ra" is b_ra=1 & b4_6 & reg0 [ tmp = b4_6 + 1; ] { pop_ra(); } :push push_args is hi=1 & b4_6=0 & reg0 & push_args { build push_args; push_one(reg0); } :push push_args is hi=1 & b4_6=1 & reg0 & reg1 & push_args { build push_args; push_one(reg1); push_one(reg0); } :push push_args is hi=1 & b4_6=2 & reg0 & reg1 & reg2 & push_args { build push_args; push_one(reg2); push_one(reg1); push_one(reg0); } :push push_args is hi=1 & b4_6=3 & reg0 & reg1 & reg2 & reg3 & push_args { build push_args; push_one(reg3); push_one(reg2); push_one(reg1); push_one(reg0); } :push push_args is hi=1 & b4_6=4 & reg0 & reg1 & reg2 & reg3 & reg4 & push_args { build push_args; push_one(reg4); push_one(reg3); push_one(reg2); push_one(reg1); push_one(reg0); } :push push_args is hi=1 & b4_6=5 & reg0 & reg1 & reg2 & reg3 & reg4 & reg5 & push_args { build push_args; push_one(reg5); push_one(reg4); push_one(reg3); push_one(reg2); push_one(reg1); push_one(reg0); } :push push_args is hi=1 & b4_6=6 & reg0 & reg1 & reg2 & reg3 & reg4 & reg5 & reg6 & push_args { build push_args; push_one(reg6); push_one(reg5); push_one(reg4); push_one(reg3); push_one(reg2); push_one(reg1); push_one(reg0); } :push push_args is hi=1 & b4_6=7 & reg0 & reg1 & reg2 & reg3 & reg4 & reg5 & reg6 & reg7 & push_args { build push_args; push_one(reg7); push_one(reg6); push_one(reg5); push_one(reg4); push_one(reg3); push_one(reg2); push_one(reg1); push_one(reg0); } pop_ret: "ret" is hi=3 { do_pop_ret(); } pop_ret: "" is hi=2 { } :pop^pop_ret pop_args is pop_ret & pop_args & op7=1 & b4_6=0 & reg0 { pop_one(reg0); build pop_args; build pop_ret; } :pop^pop_ret pop_args is pop_ret & pop_args & op7=1 & b4_6=1 & reg0 & reg1 { pop_one(reg0); pop_one(reg1); build pop_args; build pop_ret; } :pop^pop_ret pop_args is pop_ret & pop_args & op7=1 & b4_6=2 & reg0 & reg1 & reg2 { pop_one(reg0); pop_one(reg1); pop_one(reg2); build pop_args; build pop_ret; } :pop^pop_ret pop_args is pop_ret & pop_args & op7=1 & b4_6=3 & reg0 & reg1 & reg2 & reg3 { pop_one(reg0); pop_one(reg1); pop_one(reg2); pop_one(reg3); build pop_args; build pop_ret; } :pop^pop_ret pop_args is pop_ret & pop_args & op7=1 & b4_6=4 & reg0 & reg1 & reg2 & reg3 & reg4 { pop_one(reg0); pop_one(reg1); pop_one(reg2); pop_one(reg3); pop_one(reg4); build pop_args; build pop_ret; } :pop^pop_ret pop_args is pop_ret & pop_args & op7=1 & b4_6=5 & reg0 & reg1 & reg2 & reg3 & reg4 & reg5 { pop_one(reg0); pop_one(reg1); pop_one(reg2); pop_one(reg3); pop_one(reg4); pop_one(reg5); build pop_args; build pop_ret; } :pop^pop_ret pop_args is pop_ret & pop_args & op7=1 & b4_6=6 & reg0 & reg1 & reg2 & reg3 & reg4 & reg5 & reg6 { pop_one(reg0); pop_one(reg1); pop_one(reg2); pop_one(reg3); pop_one(reg4); pop_one(reg5); pop_one(reg6); build pop_args; build pop_ret; } :pop^pop_ret pop_args is pop_ret & pop_args & op7=1 & b4_6=7 & reg0 & reg1 & reg2 & reg3 & reg4 & reg5 & reg6 & reg7 { pop_one(reg0); pop_one(reg1); pop_one(reg2); pop_one(reg3); pop_one(reg4); pop_one(reg5); pop_one(reg6); pop_one(reg7); build pop_args; build pop_ret; } # RETX - Return from Exception :RETX is op=0x0003 { PC = *(ISP:4) << 1; # PC equals bits 0 to 22 popped from stack shifted left 1 ISP = ISP + 4; PSR = *(ISP:4); # PSR popped/restored from interrupt stack ISP = ISP + 2; return [PC]; } # LOAD / STORE # - for loads and stores, dsp(n) is always unsigned # LOADB - Load low-order byte only :LOADB prp const1(prp_src1), dst1 is hi=0xbe & prp & const1 & prp_src1 & dst1 { # fmt 18 tmp:4 = prp_src1:4 + prp:4; dst1 = (dst1 & 0xff00) + zext( *:1 tmp ); } :LOADB prp2 disp14(prp_src), dst3 is op10=0x219 & prp2 & disp14 & prp_src & dst3 { # fmt 17 tmp:4 = prp_src:4 + prp2:4 + zext(disp14); dst3 = (dst3 & 0xff00) + zext( *:1 tmp ); } :LOADB disp20(src4), dst3 is op=0x0012 ; n8=4 & disp20 & src4 & dst3 { # fmt 2 tmp:4 = zext(src4) + zext(disp20); dst3 = (dst3 & 0xff00) + zext( *:1 tmp ); } :LOADB -disp20(src4), dst3 is op=0x0018 ; n8=4 & disp20 & src4 & dst3 { tmp:4 = zext(src4) - zext(disp20); dst3 = (dst3 & 0xff00) + zext( *:1 tmp ); } :LOADB disp16b(rp_src2), dst1 is hi=0xbf & rp_src2 & dst1 ; disp16b { # fmt 19 tmp:4 = rp_src2:4 + zext(disp16b); dst1 = (dst1 & 0xff00) + zext( *:1 tmp ); } :LOADB disp20(rp_src5), dst3 is op=0x0012 ; n8=5 & disp20 & rp_src5 & dst3 { # fmt 2 tmp:4 = rp_src5:4 + zext(disp20); dst3 = (dst3 & 0xff00) + zext( *:1 tmp ); } :LOADB -disp20(rp_src5), dst3 is op=0x0018 ; n8=5 & disp20 & rp_src5 & dst3 { tmp:4 = rp_src5:4 - zext(disp20); dst3 = (dst3 & 0xff00) + zext( *:1 tmp ); } :LOADB disp4b(rp_src2), dst1 is op4=0xb & dst1 & disp4b & rp_src2 { # fmt 18 tmp:4 = rp_src2:4 + zext(disp4b); dst1 = (dst1 & 0xff00) + zext( *:1 tmp ); } :LOADB disp20(prp_src), dst3 is op=0x0012 ; n8=6 & disp20 & prp_src & dst3 { # fmt 2 tmp:4 = prp_src:4 + zext(disp20); dst3 = (dst3 & 0xff00) + zext( *:1 tmp ); } :LOADB abs20, dst3 is dw_hi=0x88 & abs20 & dst3 { # fmt 12 tmp:4 = zext(abs20); dst3 = (dst3 & 0xff00) + zext( *:1 tmp ); } :LOADB rs abs20, dst3 is dw_op7=0x45 & dst3 & rs & abs20 { # fmt 13 tmp:4 = zext(abs20) + rs:4; dst3 = (dst3 & 0xff00) + zext( *:1 tmp ); } :LOADB abs24, dst3 is op=0x0012 ; n8=7 & dst3 & abs24 { # fmt 3 dst3 = (dst3 & 0xff00) + zext( *:1 abs24 ); } # LOADD - load doubleword :LOADD prp const1(prp_src1), rp_dst2 is hi=0xae & prp & const1 & prp_src1 & rp_dst2 { # fmt 18 tmp:4 = prp_src1:4 + prp:4; rp_dst2 = *tmp; } :LOADD prp2 disp14(prp_src), rp_dst4 is op10=0x21a & prp2 & disp14 & prp_src & rp_dst4 { # fmt 17 tmp:4 = prp_src:4 + prp2:4 + zext(disp14); rp_dst4 = *tmp; } :LOADD disp20(src4), rp_dst4 is op=0x0012 ; n8=8 & disp20 & src4 & rp_dst4 { # fmt 2 tmp:4 = zext(src4) + zext(disp20); rp_dst4 = *tmp; } :LOADD -disp20(src4), rp_dst4 is op=0x0018 ; n8=8 & disp20 & src4 & rp_dst4 { tmp:4 = zext(src4) - zext(disp20); rp_dst4 = *tmp; } :LOADD disp16b(rp_src2), rp_dst2 is hi=0xaf & rp_dst2 & rp_src2 ; disp16b { # fmt 19 tmp:4 = rp_src2:4 + zext(disp16b); rp_dst2 = *tmp; } :LOADD disp20(rp_src5), rp_dst4 is op=0x0012 ; n8=9 & disp20 & rp_src5 & rp_dst4 { # fmt 2 tmp:4 = rp_src5:4 + zext(disp20); rp_dst4 = *tmp; } :LOADD -disp20(rp_src5), rp_dst4 is op=0x0018 ; n8=9 & disp20 & rp_src5 & rp_dst4 { tmp:4 = rp_src5:4 - zext(disp20); rp_dst4 = *tmp; } :LOADD disp4(rp_src2), rp_dst2 is op4=0xa & disp4 & rp_src2 & rp_dst2 { tmp:4 = rp_src2:4 + zext(disp4); rp_dst2 = *tmp; } :LOADD disp20(prp_src), rp_dst4 is op=0x0012 ; n8=10 & disp20 & prp_src & rp_dst4 { # fmt 2 tmp:4 = prp_src:4 + zext(disp20); rp_dst4 = *tmp; } :LOADD abs20, rp_dst4 is dw_hi=0x87 & abs20 & rp_dst4 { tmp:4 = zext(abs20); rp_dst4 = *tmp; } :LOADD rs abs20, rp_dst4 is dw_op7=0x46 & rs & abs20 & rp_dst4 { tmp:4 = zext(abs20) + rs:4; rp_dst4 = *tmp; } :LOADD abs24, rp_dst4 is op=0x0012 ; n8=11 & rp_dst4 & abs24 { # fmt 3 rp_dst4 = *abs24; } macro load_one_1(rd) { addr:4 = zext(R0); rd = *addr; R0 = R0 + 2; } macro load_one_2(rd) { addr:4 = R1R0:4; rd = *addr; R1R0 = R1R0 + 2; } # LOADM/LOADMP - Load Multiple Registers from Memory :LOADM cnt3b is op13=0x0014 & b0_2=0 & cnt3b { load_one_1(R2); } :LOADM cnt3b is op13=0x0014 & b0_2=1 & cnt3b { load_one_1(R2); load_one_1(R3); } :LOADM cnt3b is op13=0x0014 & b0_2=2 & cnt3b { load_one_1(R2); load_one_1(R3); load_one_1(R4); } :LOADM cnt3b is op13=0x0014 & b0_2=3 & cnt3b { load_one_1(R2); load_one_1(R3); load_one_1(R4); load_one_1(R5); } :LOADM cnt3b is op13=0x0014 & b0_2=4 & cnt3b { load_one_1(R2); load_one_1(R3); load_one_1(R4); load_one_1(R5); load_one_1(R8); } :LOADM cnt3b is op13=0x0014 & b0_2=5 & cnt3b { load_one_1(R2); load_one_1(R3); load_one_1(R4); load_one_1(R5); load_one_1(R8); load_one_1(R9); } :LOADM cnt3b is op13=0x0014 & b0_2=6 & cnt3b { load_one_1(R2); load_one_1(R3); load_one_1(R4); load_one_1(R5); load_one_1(R8); load_one_1(R9); load_one_1(R10); } :LOADM cnt3b is op13=0x0014 & b0_2=7 & cnt3b { load_one_1(R2); load_one_1(R3); load_one_1(R4); load_one_1(R5); load_one_1(R8); load_one_1(R9); load_one_1(R10); load_one_1(R11); } :LOADMP cnt3b is op13=0x0015 & b0_2=0 & cnt3b { load_one_2(R2); load_one_2(R3); } :LOADMP cnt3b is op13=0x0015 & b0_2=1 & cnt3b { load_one_2(R2); load_one_2(R3); load_one_2(R4); } :LOADMP cnt3b is op13=0x0015 & b0_2=2 & cnt3b { load_one_2(R2); load_one_2(R3); load_one_2(R4); } :LOADMP cnt3b is op13=0x0015 & b0_2=3 & cnt3b { load_one_2(R2); load_one_2(R3); load_one_2(R4); load_one_2(R5); } :LOADMP cnt3b is op13=0x0015 & b0_2=4 & cnt3b { load_one_2(R2); load_one_2(R3); load_one_2(R4); load_one_2(R5); load_one_2(R8); } :LOADMP cnt3b is op13=0x0015 & b0_2=5 & cnt3b { load_one_2(R2); load_one_2(R3); load_one_2(R4); load_one_2(R5); load_one_2(R8); load_one_2(R9); } :LOADMP cnt3b is op13=0x0015 & b0_2=6 & cnt3b { load_one_2(R2); load_one_2(R3); load_one_2(R4); load_one_2(R5); load_one_2(R8); load_one_2(R9); load_one_2(R10); } :LOADMP cnt3b is op13=0x0015 & b0_2=7 & cnt3b { load_one_2(R2); load_one_2(R3); load_one_2(R4); load_one_2(R5); load_one_2(R8); load_one_2(R9); load_one_2(R10); load_one_2(R11); } # LOADW - load word :LOADW prp const1(prp_src1), dst1 is hi=0x9e & prp & const1 & prp_src1 & dst1 { # fmt 18 tmp:4 = prp_src1:4 + prp:4; dst1 = *tmp; } :LOADW prp2 disp14(prp_src), dst3 is op10=0x21b & prp2 & disp14 & prp_src & dst3 { # fmt 17 tmp:4 = prp_src:4 + prp2:4 + zext(disp14); dst3 = *tmp; } :LOADW disp20(src4), dst3 is op=0x0012 ; n8=12 & disp20 & src4 & dst3 { # fmt 2 tmp:4 = zext(src4) + zext(disp20); dst3 = *tmp; } :LOADW -disp20(src4), dst3 is op=0x0018 ; n8=12 & disp20 & src4 & dst3 { tmp:4 = zext(src4) - zext(disp20); dst3 = *tmp; } :LOADW disp16b(rp_src2), dst1 is hi=0x9f & rp_src2 & dst1 ; disp16b { # fmt 19 tmp:4 = rp_src2:4 + zext(disp16b); dst1 = *tmp; } :LOADW disp20(rp_src5), dst3 is op=0x0012 ; n8=13 & disp20 & rp_src5 & dst3 { # fmt 2 tmp:4 = rp_src5:4 + zext(disp20); dst3 = *tmp; } :LOADW -disp20(rp_src5), dst3 is op=0x0018 ; n8=13 & disp20 & rp_src5 & dst3 { tmp:4 = rp_src5:4 - zext(disp20); dst3 = *tmp; } :LOADW disp4(rp_src2), dst1 is op4=0x9 & disp4 & rp_src2 & dst1 { # fmt 18 tmp:4 = rp_src2:4 + zext(disp4); dst1 = *tmp; } :LOADW disp20(prp_src), dst3 is op=0x0012 ; n8=14 & disp20 & prp_src & dst3 { # fmt 2 tmp:4 = prp_src:4 + zext(disp20); dst3 = *tmp; } :LOADW abs20, dst3 is dw_hi=0x89 & abs20 & dst3 { tmp:4 = zext(abs20); dst3 = *tmp; } :LOADW rs abs20, dst3 is dw_op7=0x47 & dst3 & rs & abs20 { # fmt 13 tmp:4 = zext(abs20) + rs:4; dst3 = *tmp; } :LOADW abs24, dst3 is op=0x0012 ; n8=15 & dst3 & abs24 { # fmt 3 dst3 = *abs24; } # STORB - store low-order byte only :STORB src, prp const1(prp_dst1) is hi=0xfe & src & prp & const1 & prp_dst1 { # fmt 18 tmp:4 = prp_dst1:4 + prp:4; *tmp = src:1; } :STORB src3, prp2 disp14(prp_dst) is op10=0x319 & src3 & prp2 & disp14 & prp_dst { # fmt 17 tmp:4 = prp_dst:4 + prp2:4 + zext(disp14); *tmp = src3:1; } :STORB src3, disp20(dst4) is op=0x0013 ; n8=4 & src3 & disp20 & dst4 { # fmt 2 tmp:4 = zext(dst4) + zext(disp20); *tmp = src3:1; } :STORB src3, -disp20(dst4) is op=0x0019 ; n8=4 & src3 & disp20 & dst4 { tmp:4 = zext(dst4) - zext(disp20); *tmp = src3:1; } :STORB src, disp16b(rp_dst) is hi=0xff & src & rp_dst ; disp16b { # fmt 19 tmp:4 = rp_dst:4 + zext(disp16b); *tmp = src:1; } :STORB src3, disp20(rp_dst5) is op=0x0013 ; n8=5 & src3 & disp20 & rp_dst5 { # fmt 2 tmp:4 = rp_dst5:4 + zext(disp20); *tmp = src3:1; } :STORB src3, -disp20(rp_dst5) is op=0x0019 ; n8=5 & src3 & disp20 & rp_dst5 { tmp:4 = rp_dst5:4 - zext(disp20); *tmp = src3:1; } :STORB src, disp4b(rp_dst) is op4=0xf & src & disp4b & rp_dst { # fmt 18 <--- tmp:4 = rp_dst:4 + zext(disp4b); *tmp = src:1; } :STORB src3, disp20(prp_dst) is op=0x0013 ; n8=6 & src3 & disp20 & prp_dst { # fmt 2 tmp:4 = prp_dst:4 + zext(disp20); *tmp = src3:1; } :STORB src3, abs20 is dw_hi=0xc8 & abs20 & src3 { tmp:4 = zext(abs20); *tmp = src3:1; } :STORB src3, rs abs20 is dw_op7=0x65 & src3 & rs & abs20 { # fmt 13 tmp:4 = zext(abs20) + rs:4; *tmp = src3:1; } :STORB src3, abs24 is op=0x0013 ; n8=7 & src3 & abs24 { # fmt 3 *abs24 = src3:1; } :STORB imm4c, prp2 disp14(prp_dst) is op10=0x218 & imm4c & prp2 & disp14 & prp_dst { # fmt 17 tmp:4 = prp_dst:4 + prp2:4 + zext(disp14); *tmp = imm4c; } :STORB imm4c, disp20(dst4) is op=0x0012 ; n8=0 & imm4c & disp20 & dst4 { # fmt 2 tmp:4 = zext(dst4) + zext(disp20); *tmp = imm4c; } :STORB imm4a, const1(rp_dst) is hi=0x82 & imm4a & const1 & rp_dst { # fmt 15 *rp_dst:4 = imm4a; } :STORB imm4a, disp16b(rp_dst) is hi=0x83 & imm4a & rp_dst ; disp16b { # fmt 16 tmp:4 = rp_dst:4 + zext(disp16b); *tmp = imm4a; } :STORB imm4c, disp20(rp_dst5) is op=0x0012 ; n8=1 & imm4c & disp20 & rp_dst5 { # fmt 2 tmp:4 = rp_dst5:4 + zext(disp20); *tmp = imm4c; } :STORB imm4c, disp20(prp_dst) is op=0x0012 ; n8=2 & imm4c & disp20 & prp_dst { tmp:4 = prp_dst:4 + zext(disp20); *tmp = imm4c; } :STORB imm4b, abs20 is dw_hi=0x81 & imm4b & abs20 { # fmt 12 tmp:4 = zext(abs20); *tmp = imm4b; } :STORB imm4b, rs abs20 is dw_op7=0x42 & imm4b & rs & abs20 { # fmt 13 tmp:4 = zext(abs20) + rs:4; *tmp = imm4b; } :STORB imm4b, abs24 is op=0x0012 ; n8=3 & imm4b & abs24 { # fmt 3 *abs24 = imm4b; } # STORD - store doubleword :STORD rp_src, prp const1(prp_dst1) is hi=0xee & rp_src & prp & const1 & prp_dst1 { # fmt 18 tmp:4 = prp_dst1:4 + prp:4; *tmp = rp_src; } :STORD rp_src4, prp2 disp14(prp_dst) is op10=0x31a & rp_src4 & prp2 & disp14 & prp_dst { # fmt 17 tmp:4 = prp_dst:4 + prp2:4 + zext(disp14); *tmp = rp_src4; } :STORD rp_src4, disp20(dst4) is op=0x0013 ; n8=8 & rp_src4 & dst4 & disp20 { # fmt 2 tmp:4 = zext(dst4) + zext(disp20); *tmp = rp_src4; } :STORD rp_src4, -disp20(dst4) is op=0x0019 ; n8=8 & rp_src4 & dst4 & disp20 { tmp:4 = zext(dst4) - zext(disp20); *tmp = rp_src4; } :STORD rp_src, disp16b(rp_dst) is hi=0xef & rp_src & rp_dst ; disp16b { # fmt 19 tmp:4 = rp_dst:4 + zext(disp16b); *tmp = rp_src; } :STORD rp_src4, disp20(rp_dst5) is op=0x0013 ; n8=9 & rp_src4 & disp20 & rp_dst5 { # fmt 2 tmp:4 = rp_dst5:4 + zext(disp20); *tmp = rp_src4; } :STORD rp_src4, -disp20(rp_dst5) is op=0x0019 ; n8=9 & rp_src4 & disp20 & rp_dst5 { tmp:4 = rp_dst5:4 - zext(disp20); *tmp = rp_src4; } :STORD rp_src, disp4(rp_dst) is op4=0xe & disp4 & rp_src & rp_dst { # fmt 18 tmp:4 = rp_dst:4 + zext(disp4); *tmp = rp_src; } :STORD rp_src4, disp20(prp_dst) is op=0x0013 ; n8=10 & rp_src4 & disp20 & prp_dst { # fmt 2 tmp:4 = prp_dst:4 + zext(disp20); *tmp = rp_src4; } :STORD rp_src4, abs20 is dw_hi=0xc7 & abs20 & rp_src4 { tmp:4 = zext(abs20); *tmp = rp_src4; } :STORD rp_src4, rs abs20 is dw_op7=0x66 & rp_src4 & rs & abs20 { tmp:4 = zext(abs20) + rs:4; *tmp = rp_src4; } :STORD rp_src4, abs24 is op=0x0013 ; n8=11 & rp_src4 & abs24 { # fmt 3 *abs24 = rp_src4; } macro store_one_1(rd) { addr:4 = zext(R1); *addr = rd; R1 = R1 + 2; } macro store_one_2(rd) { addr:4 = R7R6:4; *addr = rd; R7R6 = R7R6 + 2; } # STORM/STORMP - Store Multiple Registers to Memory :STORM cnt3b is op13=0x0016 & b0_2=0 & cnt3b { store_one_1(R2); } :STORM cnt3b is op13=0x0016 & b0_2=1 & cnt3b { store_one_1(R2); store_one_1(R3); } :STORM cnt3b is op13=0x0016 & b0_2=2 & cnt3b { store_one_1(R2); store_one_1(R3); store_one_1(R4); } :STORM cnt3b is op13=0x0016 & b0_2=3 & cnt3b { store_one_1(R2); store_one_1(R3); store_one_1(R4); store_one_1(R5); } :STORM cnt3b is op13=0x0016 & b0_2=4 & cnt3b { store_one_1(R2); store_one_1(R3); store_one_1(R4); store_one_1(R5); store_one_1(R8); } :STORM cnt3b is op13=0x0016 & b0_2=5 & cnt3b { store_one_1(R2); store_one_1(R3); store_one_1(R4); store_one_1(R5); store_one_1(R8); store_one_1(R9); } :STORM cnt3b is op13=0x0016 & b0_2=6 & cnt3b { store_one_1(R2); store_one_1(R3); store_one_1(R4); store_one_1(R5); store_one_1(R8); store_one_1(R9); store_one_1(R10); } :STORM cnt3b is op13=0x0016 & b0_2=7 & cnt3b { store_one_1(R2); store_one_1(R3); store_one_1(R4); store_one_1(R5); store_one_1(R8); store_one_1(R9); store_one_1(R10); store_one_1(R11); } :STORMP cnt3b is op13=0x0017 & b0_2=0 & cnt3b { store_one_2(R2); } :STORMP cnt3b is op13=0x0017 & b0_2=1 & cnt3b { store_one_2(R2); store_one_2(R3); } :STORMP cnt3b is op13=0x0017 & b0_2=2 & cnt3b { store_one_2(R2); store_one_2(R3); store_one_2(R4); } :STORMP cnt3b is op13=0x0017 & b0_2=3 & cnt3b { store_one_2(R2); store_one_2(R3); store_one_2(R4); store_one_2(R5); } :STORMP cnt3b is op13=0x0017 & b0_2=4 & cnt3b { store_one_2(R2); store_one_2(R3); store_one_2(R4); store_one_2(R5); store_one_2(R8); } :STORMP cnt3b is op13=0x0017 & b0_2=5 & cnt3b { store_one_2(R2); store_one_2(R3); store_one_2(R4); store_one_2(R5); store_one_2(R8); store_one_2(R9); } :STORMP cnt3b is op13=0x0017 & b0_2=6 & cnt3b { store_one_2(R2); store_one_2(R3); store_one_2(R4); store_one_2(R5); store_one_2(R8); store_one_2(R9); store_one_2(R10); } :STORMP cnt3b is op13=0x0017 & b0_2=7 & cnt3b { store_one_2(R2); store_one_2(R3); store_one_2(R4); store_one_2(R5); store_one_2(R8); store_one_2(R9); store_one_2(R10); store_one_2(R11); } # STORW - store word :STORW src, prp const1(prp_dst1) is hi=0xde & src & prp & const1 & prp_dst1 { # fmt 18 tmp:4 = prp_dst1:4 + prp:4; *tmp = src; } :STORW src3, prp2 disp14(prp_dst) is op10=0x31b & prp2 & disp14 & src3 & prp_dst { # fmt 17 tmp:4 = prp_dst:4 + prp2:4 + zext(disp14); *tmp = src3; } :STORW src3, disp20(dst4) is op=0x0013 ; n8=12 & src3 & disp20 & dst4 { # fmt 2 tmp:4 = zext(dst4) + zext(disp20); *tmp = src3; } :STORW src3, -disp20(dst4) is op=0x0019 ; n8=12 & src3 & disp20 & dst4 { tmp:4 = zext(dst4) - zext(disp20); *tmp = src3; } :STORW src, disp16b(rp_dst) is hi=0xdf & src & rp_dst ; disp16b { tmp:4 = rp_dst:4 + zext(disp16b); *tmp = src; } :STORW src3, disp20(rp_dst5) is op=0x0013 ; n8=13 & src3 & disp20 & rp_dst5 { # fmt 2 tmp:4 = rp_dst5:4 + zext(disp20); *tmp = src3; } :STORW src3, -disp20(rp_dst5) is op=0x0019 ; n8=13 & src3 & disp20 & rp_dst5 { tmp:4 = rp_dst5:4 - zext(disp20); *tmp = src3; } :STORW src, disp4(rp_dst) is op4=0xd & disp4 & src & rp_dst { # fmt 18 tmp:4 = rp_dst:4 + zext(disp4); *tmp = src; } :STORW src3, disp20(prp_dst) is op=0x0013 ; n8=14 & src3 & disp20 & prp_dst { # fmt 2 tmp:4 = prp_dst:4 + zext(disp20); *tmp = src3; } :STORW src3, abs20 is dw_hi=0xc9 & abs20 & src3 { tmp:4 = zext(abs20); *tmp = src3; } :STORW src3, rs abs20 is dw_op7=0x67 & src3 & rs & abs20 { # fmt 13 tmp:4 = zext(abs20) + rs:4; *tmp = src3; } :STORW src3, abs24 is op=0x0013 ; n8=15 & src3 & abs24 { # fmt 3 *abs24 = src3; } :STORW imm4c, prp2 disp14(prp_dst) is op10=0x318 & imm4c & prp2 & disp14 & prp_dst { # fmt 17 tmp:4 = prp_dst:4 + prp2:4 + zext(disp14); *:2 tmp = sext(imm4c); } :STORW imm4c, disp20(dst4) is op=0x0013 ; n8=0 & imm4c & disp20 & dst4 { # fmt 2 tmp:4 = zext(dst4) + zext(disp20); *:2 tmp = sext(imm4c); } :STORW imm4a, const1(rp_dst) is hi=0xc2 & imm4a & const1 & rp_dst { # fmt 15 *:2 rp_dst:4 = sext(imm4a); } :STORW imm4a, disp16b(rp_dst) is hi=0xc3 & imm4a & rp_dst ; disp16b { # fmt 16 tmp:4 = rp_dst:4 + zext(disp16b); *:2 tmp = sext(imm4a); } :STORW imm4c, disp20(rp_dst5) is op=0x0013 ; n8=1 & imm4c & disp20 & rp_dst5 { # fmt 2 tmp:4 = rp_dst5:4 + zext(disp20); *:2 tmp = sext(imm4c); } :STORW imm4c, disp20(prp_dst) is op=0x0013 ; n8=2 & imm4c & disp20 & prp_dst { tmp:4 = prp_dst:4 + zext(disp20); *:2 tmp = sext(imm4c); } :STORW imm4b, abs20 is dw_hi=0xc1 & imm4b & abs20 { tmp:4 = zext(abs20); *:2 tmp = sext(imm4b); } :STORW imm4b, rs abs20 is dw_op7=0x62 & imm4b & rs & abs20 { # fmt 13 tmp:4 = zext(abs20) + rs:4; *:2 tmp = sext(imm4b); } :STORW imm4b, abs24 is op=0x0013 ; n8=3 & imm4b & abs24 { # fmt 3 *:2 abs24 = sext(imm4b); } # MISC # CINV - Cache Invalidate :CINV cinv1 is op=0x000a & cinv1 { } :CINV cinv2 is op=0x000b & cinv2 { } :CINV cinv3 is op=0x000c & cinv3 { } :CINV cinv4 is op=0x000d & cinv4 { } :CINV cinv5 is op=0x000e & cinv5 { } :CINV cinv6 is op=0x000f & cinv6 { } # Disable Maskable Interrupts :DI is op=0x0004 { $(E) = 0; } # Enable Maskable Interrupts :EI is op=0x0005 { $(E) = 1; } # Wait for Interrupt :WAIT is op=0x0006 { suspend(); } # Enable Interrupt and Wait for Interrupt :EIWAIT is op=0x0007 { $(E) = 1; suspend(); } # Reserved :undef is op=0x0000 { } :undef is op=0x0001 { } :undef is op=0x0002 { } :undef is op=0x0008 { } :undef is op=0x0009 { } :undef is op=0x0010 ; w2 ; w3 { } :undef is op=0x0011 ; w2 ; w3 { } :undef is op=0x0014 ; w2 { } :undef is op=0x0015 ; w2 { } :undef is op=0x0016 ; w2 { } :undef is op=0x0017 ; w2 { } :undef is op=0x0018 ; w2 ; w3 { } :undef is op=0x0019 ; w2 ; w3 { } :undef is op=0x001a ; w2 ; w3 { } :undef is op=0x001b ; w2 ; w3 { } :undef is op=0x001c ; w2 ; w3 { } :undef is op=0x001d ; w2 ; w3 { } :undef is op=0x001e ; w2 ; w3 { } :undef is op=0x001f ; w2 ; w3 { } :undef is op12=0x008 ; w2 ; w3 { } :undef is op11=0x007 { } :undef is op9=0x012 ; w2 { } :undef is hi=0x80 { } #:undef is hi=0xc0 { #} :undef is op=0xffff { } ================================================ FILE: pypcode/processors/CR16/data/languages/CR16C.slaspec ================================================ define endian=little; @include "CR16C.sinc" ================================================ FILE: pypcode/processors/CR16/data/manuals/CR16.idx ================================================ @prog16c.pdf [ Texas Instruments (formerly National Semiconductor), CompactRISC, CR16C Programmer’s Reference Manual. Part Number: 424521772-101. ] ADDB, 74 ADDW, 74 ADDD, 74 ADDUB, 74 ADDUW, 74 ADDCB, 75 ADDCW, 75 ANDB, 76 ANDW, 76 ANDD, 76 ASHUB, 77 ASHUW, 77 ASHUD, 77 BAL, 79 BEQ, 80 BNE, 80 BCS, 80 BCC, 80 BHI, 80 BLS, 80 BGT, 80 BLE, 80 BFS, 80 BFC, 80 BLO, 80 BHS, 80 BLT, 80 BGE, 80 BEQ0B, 83 BEQ0W, 83 BNE0B, 83 BNE0W, 83 BR, 84 CBITB, 85 CBITW, 85 CINV, 87 CMPB, 88 CMPW, 88 CMPD, 88 DI, 89 EI, 90 EIWAIT, 91 EXCP, 92 JEQ, 93 JNE, 93 JCS, 93 JCC, 93 JHI, 93 JLS, 93 JGT, 93 JLE, 93 JFS, 93 JFC, 93 JLO, 93 JHS, 93 JLT, 93 JGE, 93 JAL, 95 JUMP, 96 JUSR, 96 LOADB, 97 LOADW, 97 LOADD, 97 LOADM, 100 LOADMP, 100 LPR, 101 LPRD, 101 LSHB, 103 LSHW, 103 LSHD, 103 MACSW, 105 MACUW, 106 MACQW, 107 MOVB, 108 MOVW, 108 MOVD, 108 MOVXB, 109 MOVXW, 109 MOVZB, 110 MOVZW, 110 MULB, 111 MULW, 111 MULSB, 112 MULSW, 113 MULUW, 114 NOP, 115 ORB, 116 ORW, 116 ORD, 116 POP, 117 POPrt, 117 PUSH, 120 RETX, 122 SBITB, 123 SBITW, 123 SNE, 125 SCS, 125 SCC, 125 SHI, 125 SLS, 125 SGT, 125 SEQ SLE, 125 SFS, 125 SFC, 125 SLO, 125 SHS, 125 SLT, 125 SGE, 125 SPR, 127 SPRD, 127 STORB, 129 STORW, 129 STORD, 129 STORM, 132 STORMP, 132 SUBB, 134 SUBW, 134 SUBD, 134 SUBCB, 135 SUBCW, 135 TBIT, 136 TBITB, 136 TBITW, 136 WAIT, 138 XORB, 139 XORW, 139 XORD, 139 ================================================ FILE: pypcode/processors/DATA/data/languages/data-be-64.slaspec ================================================ @define ENDIAN "big" @define RAMSIZE "8" @include "data.sinc" ================================================ FILE: pypcode/processors/DATA/data/languages/data-le-64.slaspec ================================================ @define ENDIAN "little" @define RAMSIZE "8" @include "data.sinc" ================================================ FILE: pypcode/processors/DATA/data/languages/data-ptr16.cspec ================================================ ================================================ FILE: pypcode/processors/DATA/data/languages/data-ptr32.cspec ================================================ ================================================ FILE: pypcode/processors/DATA/data/languages/data-ptr64.cspec ================================================ ================================================ FILE: pypcode/processors/DATA/data/languages/data.ldefs ================================================ Raw Data File (Little Endian) Raw Data File (Big Endian) ================================================ FILE: pypcode/processors/DATA/data/languages/data.pspec ================================================ ================================================ FILE: pypcode/processors/DATA/data/languages/data.sinc ================================================ define endian = $(ENDIAN); define alignment = 1; define space ram type=ram_space size=8 default; define space register type=register_space size=4; # # # # # # # # # # # # # # # # # # # # # # # # # # # # # AT LEAST ONE REGISTER, AND STACK POINTER ARE REQUIRED # # # # # # # # # # # # # # # # # # # # # # # # # # # # define register offset=0x0 size=8 [ sp r0 ]; # Define context bits define register offset=0x100 size=4 contextreg; define context contextreg test=(0,0) ; # # # # # # # # # # # # # # # # # # # # # # # # # # # # # AT LEAST ONE INSTRUCTION IS REQUIRED # # # # # # # # # # # # # # # # # # # # # # # # # # # # :nop is test=1 unimpl # # # # # # # # # # # # # # # # # # # # # # # # # # # # ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik.ldefs ================================================ Dalvik Base Dalvik Base Dalvik DEX KitKat Dalvik ODEX KitKat Dalvik DEX Lollipop Dalvik DEX Marshmallow Dalvik DEX Nougat Dalvik DEX Oreo Dalvik DEX Pie Dalvik DEX Android10 Dalvik DEX Android11 Dalvik DEX Android12 Dalvik DEX Android13 ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik.opinion ================================================ ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_Base.cspec ================================================ ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_Base.pspec ================================================ ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_Base.sinc ================================================ #------------------------------------------------------------------------------------ # Sleigh specification file for DALVIK VM #------------------------------------------------------------------------------------ # Source: # https://android.googlesource.com/platform/art/+/[...]/runtime/dex_instruction_list.h # where [...] is the actual Android version name (ie "oreo-release") define endian=little; define alignment=1; @define CPOOL_METHOD "0:4" @define CPOOL_FIELD "1:4" @define CPOOL_STATIC_FIELD "2:4" @define CPOOL_STATIC_METHOD "3:4" @define CPOOL_STRING "4:4" @define CPOOL_CLASSREF "5:4" @define CPOOL_ARRAYLENGTH "6:4" @define CPOOL_SUPER "7:4" @define CPOOL_INSTANCEOF "8:4" #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ define space ram type=ram_space size=4 default; #define space object type=ram_space size=4; # object instances #define space method type=ram_space size=4; # method references #define space field type=ram_space size=4; # field references define space register type=register_space size=4; #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ define register offset=0x0 size=4 [ sp fp resultreg ]; define register offset=0x8 size=8 [ resultregw ]; define register offset=0x100 size=4 # Special input registers [ iv0 iv1 iv2 iv3 iv4 iv5 iv6 iv7 iv8 iv9 iv10 iv11 iv12 iv13 iv14 iv15 ]; define register offset=0x104 size=8 # Wide input registers ODD [ ivw1 ivw3 ivw5 ivw7 ivw9 ivw11 ivw13 ]; define register offset=0x100 size=8 # Wide input registers EVEN [ ivw0 ivw2 ivw4 ivw6 ivw8 ivw10 ivw12 ivw14 ]; define register offset=0x1000 size=4 [ v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 v32 v33 v34 v35 v36 v37 v38 v39 v40 v41 v42 v43 v44 v45 v46 v47 v48 v49 v50 v51 v52 v53 v54 v55 v56 v57 v58 v59 v60 v61 v62 v63 v64 v65 v66 v67 v68 v69 v70 v71 v72 v73 v74 v75 v76 v77 v78 v79 v80 v81 v82 v83 v84 v85 v86 v87 v88 v89 v90 v91 v92 v93 v94 v95 v96 v97 v98 v99 v100 v101 v102 v103 v104 v105 v106 v107 v108 v109 v110 v111 v112 v113 v114 v115 v116 v117 v118 v119 v120 v121 v122 v123 v124 v125 v126 v127 v128 v129 v130 v131 v132 v133 v134 v135 v136 v137 v138 v139 v140 v141 v142 v143 v144 v145 v146 v147 v148 v149 v150 v151 v152 v153 v154 v155 v156 v157 v158 v159 v160 v161 v162 v163 v164 v165 v166 v167 v168 v169 v170 v171 v172 v173 v174 v175 v176 v177 v178 v179 v180 v181 v182 v183 v184 v185 v186 v187 v188 v189 v190 v191 v192 v193 v194 v195 v196 v197 v198 v199 v200 v201 v202 v203 v204 v205 v206 v207 v208 v209 v210 v211 v212 v213 v214 v215 v216 v217 v218 v219 v220 v221 v222 v223 v224 v225 v226 v227 v228 v229 v230 v231 v232 v233 v234 v235 v236 v237 v238 v239 v240 v241 v242 v243 v244 v245 v246 v247 v248 v249 v250 v251 v252 v253 v254 v255 ]; define register offset=0x1004 size=8 # ODD NUMBER WIDE REGISTERS [ vw1 vw3 vw5 vw7 vw9 vw11 vw13 vw15 vw17 vw19 vw21 vw23 vw25 vw27 vw29 vw31 vw33 vw35 vw37 vw39 vw41 vw43 vw45 vw47 vw49 vw51 vw53 vw55 vw57 vw59 vw61 vw63 vw65 vw67 vw69 vw71 vw73 vw75 vw77 vw79 vw81 vw83 vw85 vw87 vw89 vw91 vw93 vw95 vw97 vw99 vw101 vw103 vw105 vw107 vw109 vw111 vw113 vw115 vw117 vw119 vw121 vw123 vw125 vw127 vw129 vw131 vw133 vw135 vw137 vw139 vw141 vw143 vw145 vw147 vw149 vw151 vw153 vw155 vw157 vw159 vw161 vw163 vw165 vw167 vw169 vw171 vw173 vw175 vw177 vw179 vw181 vw183 vw185 vw187 vw189 vw191 vw193 vw195 vw197 vw199 vw201 vw203 vw205 vw207 vw209 vw211 vw213 vw215 vw217 vw219 vw221 vw223 vw225 vw227 vw229 vw231 vw233 vw235 vw237 vw239 vw241 vw243 vw245 vw247 vw249 vw251 vw253 ]; define register offset=0x1000 size=8 # EVEN NUMBER WIDE REGISTERS [ vw0 vw2 vw4 vw6 vw8 vw10 vw12 vw14 vw16 vw18 vw20 vw22 vw24 vw26 vw28 vw30 vw32 vw34 vw36 vw38 vw40 vw42 vw44 vw46 vw48 vw50 vw52 vw54 vw56 vw58 vw60 vw62 vw64 vw66 vw68 vw70 vw72 vw74 vw76 vw78 vw80 vw82 vw84 vw86 vw88 vw90 vw92 vw94 vw96 vw98 vw100 vw102 vw104 vw106 vw108 vw110 vw112 vw114 vw116 vw118 vw120 vw122 vw124 vw126 vw128 vw130 vw132 vw134 vw136 vw138 vw140 vw142 vw144 vw146 vw148 vw150 vw152 vw154 vw156 vw158 vw160 vw162 vw164 vw166 vw168 vw170 vw172 vw174 vw176 vw178 vw180 vw182 vw184 vw186 vw188 vw190 vw192 vw194 vw196 vw198 vw200 vw202 vw204 vw206 vw208 vw210 vw212 vw214 vw216 vw218 vw220 vw222 vw224 vw226 vw228 vw230 vw232 vw234 vw236 vw238 vw240 vw242 vw244 vw246 vw248 vw250 vw252 vw254 ]; # TODO: # 1) test accessing register space past v255. e.g. v12345. #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ define token instruction_byte ( 8 ) inst0 = ( 0, 7 ) ; define token instruction_byte_w_padding ( 16 ) inst1 = ( 0, 7 ) inst1_padding = ( 0, 15 ) ; define token instruction_operands_4_4 ( 8 ) A_BITS_0_3 = (0,3) B_BITS_0_3 = (0,3) A_BITS_4_7 = (4,7) B_BITS_4_7 = (4,7) B_BITS_4_7_S = (4,7) signed ; define token instruction_operands_8 ( 8 ) A_BITS_0_7 = (0,7) A_BITS_0_7_S = (0,7) signed B_BITS_0_7 = (0,7) B_BITS_0_7_S = (0,7) signed C_BITS_0_7 = (0,7) C_BITS_0_7_S = (0,7) signed ; define token instruction_operands_16 ( 16 ) A_BITS_0_15 = (0,15) A_BITS_0_15_S = (0,15) signed B_BITS_0_15 = (0,15) B_BITS_0_15_S = (0,15) signed C_BITS_0_15 = (0,15) C_BITS_0_15_S = (0,15) signed ; define token instruction_operands_32 ( 32 ) A_BITS_0_31 = (0,31) A_BITS_0_31_S = (0,31) signed B_BITS_0_31 = (0,31) B_BITS_0_31_S = (0,31) signed C_BITS_0_31 = (0,31) C_BITS_0_31_S = (0,31) signed ; define token invoke_operands ( 40 ) N_PARAMS = ( 4, 7) PARAM_G = ( 0, 3) METHOD_INDEX = ( 8,23) VTABLE_OFFSET = ( 8,23) INLINE = ( 8,23) PARAM_D = (28,31) PARAM_C = (24,27) PARAM_F = (36,39) PARAM_E = (32,35) ; define token array_operands ( 40 ) N_ELEMENTS = ( 4, 7) ELEMENT_G = ( 0, 3) TYPE_INDEX = ( 8,23) ELEMENT_D = (28,31) ELEMENT_C = (24,27) ELEMENT_F = (36,39) ELEMENT_E = (32,35) ; define token CONST16 ( 16 ) # one 16 constant constant16 = ( 0,15 ) constant16s = ( 0,15 ) signed ; define token CONST32 ( 32 ) # one 32 constant constant32 = ( 0,31 ) constant32s = ( 0,31 ) signed ; define token CONST64 ( 64 ) # one 64 constant constant64 = ( 0,63 ) ; # add "8" to skip over "fp" and "sp" !! registerA4: reg is A_BITS_0_3 [ reg = (A_BITS_0_3 * 4) + 0x1000; ] { export *[register]:4 reg; } registerA8: reg is A_BITS_0_7 [ reg = (A_BITS_0_7 * 4) + 0x1000; ] { export *[register]:4 reg; } registerA16: reg is A_BITS_0_15 [ reg = (A_BITS_0_15 * 4) + 0x1000; ] { export *[register]:4 reg; } registerA4w: reg is A_BITS_0_3 [ reg = (A_BITS_0_3 * 4) + 0x1000; ] { export *[register]:8 reg; } registerA8w: reg is A_BITS_0_7 [ reg = (A_BITS_0_7 * 4) + 0x1000; ] { export *[register]:8 reg; } registerA16w: reg is A_BITS_0_15 [ reg = (A_BITS_0_15 * 4) + 0x1000; ] { export *[register]:8 reg; } registerB4: reg is B_BITS_4_7 [ reg = (B_BITS_4_7 * 4) + 0x1000; ] { export *[register]:4 reg; } registerB8: reg is B_BITS_0_7 [ reg = (B_BITS_0_7 * 4) + 0x1000; ] { export *[register]:4 reg; } registerB16: reg is B_BITS_0_15 [ reg = (B_BITS_0_15 * 4) + 0x1000; ] { export *[register]:4 reg; } registerB4w: reg is B_BITS_4_7 [ reg = (B_BITS_4_7 * 4) + 0x1000; ] { export *[register]:8 reg; } registerB8w: reg is B_BITS_0_7 [ reg = (B_BITS_0_7 * 4) + 0x1000; ] { export *[register]:8 reg; } registerB16w: reg is B_BITS_0_15 [ reg = (B_BITS_0_15 * 4) + 0x1000; ] { export *[register]:8 reg; } registerC8: reg is C_BITS_0_7 [ reg = (C_BITS_0_7 * 4) + 0x1000; ] { export *[register]:4 reg; } registerC16: reg is C_BITS_0_15 [ reg = (C_BITS_0_15 * 4) + 0x1000; ] { export *[register]:4 reg; } registerC32: reg is C_BITS_0_31 [ reg = (C_BITS_0_31 * 4) + 0x1000; ] { export *[register]:4 reg; } registerC8w: reg is C_BITS_0_7 [ reg = (C_BITS_0_7 * 4) + 0x1000; ] { export *[register]:8 reg; } registerC16w: reg is C_BITS_0_15 [ reg = (C_BITS_0_15 * 4) + 0x1000; ] { export *[register]:8 reg; } registerC32w: reg is C_BITS_0_31 [ reg = (C_BITS_0_31 * 4) + 0x1000; ] { export *[register]:8 reg; } regParamC: reg is PARAM_C [ reg = (PARAM_C * 4) + 0x1000; ] { export *[register]:4 reg; } regParamD: reg is PARAM_D [ reg = (PARAM_D * 4) + 0x1000; ] { export *[register]:4 reg; } regParamE: reg is PARAM_E [ reg = (PARAM_E * 4) + 0x1000; ] { export *[register]:4 reg; } regParamF: reg is PARAM_F [ reg = (PARAM_F * 4) + 0x1000; ] { export *[register]:4 reg; } regParamG: reg is PARAM_G [ reg = (PARAM_G * 4) + 0x1000; ] { export *[register]:4 reg; } regElemC: reg is ELEMENT_C [ reg = (ELEMENT_C * 4) + 0x1000; ] { export *[register]:4 reg; } regElemD: reg is ELEMENT_D [ reg = (ELEMENT_D * 4) + 0x1000; ] { export *[register]:4 reg; } regElemE: reg is ELEMENT_E [ reg = (ELEMENT_E * 4) + 0x1000; ] { export *[register]:4 reg; } regElemF: reg is ELEMENT_F [ reg = (ELEMENT_F * 4) + 0x1000; ] { export *[register]:4 reg; } regElemG: reg is ELEMENT_G [ reg = (ELEMENT_G * 4) + 0x1000; ] { export *[register]:4 reg; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ rel16: reloc is A_BITS_0_15_S [ reloc = inst_start + ( A_BITS_0_15_S * 2 ); ] { export *[ram]:32 reloc; } goto8: reloc is A_BITS_0_7_S [ reloc = inst_start + ( A_BITS_0_7_S * 2 ); ] { export *[ram]:8 reloc; } goto16: reloc is A_BITS_0_15_S [ reloc = inst_start + ( A_BITS_0_15_S * 2 ); ] { export *[ram]:16 reloc; } goto32: reloc is A_BITS_0_31_S [ reloc = inst_start + ( A_BITS_0_31_S ); ] { export *[ram]:32 reloc; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Special op which injects correct p-code for invoke*_range instructions # It takes two arguments: 1) is the number of parameters for the method 2) is the starting register define pcodeop moveRangeToIV; define pcodeop monitorEnter; define pcodeop monitorExit; define pcodeop checkCast; define pcodeop throwException; define pcodeop getStaticFieldVolatile; define pcodeop setStaticFieldVolatile; define pcodeop getInstanceFieldQuick; define pcodeop getInstanceFieldVolatile; define pcodeop setInstanceFieldQuick; define pcodeop setInstanceFieldVolatile; define pcodeop filledNewArray; define pcodeop filledNewArrayRange; define pcodeop invokeSuperQuick; define pcodeop invokeSuperQuickRange; define pcodeop invokeVirtualQuick; define pcodeop invokeVirtualQuickRange; define pcodeop switchAssist; define pcodeop breakpoint; #SOURCE: https://android.googlesource.com/platform/art/+/kitkat-dev/runtime/dex_instruction_list.h #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Waste cycles. :nop is inst0=0x00 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Move the contents of one non-object register to another. # # A: destination register (4 bits) # B: source register (4 bits) :move registerA4,registerB4 is inst0=0x01 ; registerA4 & registerB4 { registerA4 = registerB4; } # Move the contents of one non-object register to another. # # A: destination register (8 bits) # B: source register (16 bits) :move_from_16 registerA8,registerB16 is inst0=0x02 ; registerA8 ; registerB16 { registerA8 = registerB16; } # Move the contents of one non-object register to another. # # A: destination register (16 bits) # B: source register (16 bits) :move_16 registerA16,registerB16 is inst1=0x03 & inst1_padding ; registerA16 ; registerB16 { registerA16 = registerB16; } # Move the contents of one register-pair to another. # # A: destination register pair (4 bits) # B: source register pair (4 bits) :move_wide registerA4w,registerB4w is inst0=0x04 ; registerA4w & registerB4w { registerA4w = registerB4w; } # Move the contents of one register-pair to another. # # A: destination register pair (8 bits) # B: source register pair (16 bits) :move_wide_from_16 registerA8w,registerB16w is inst0=0x05 ; registerA8w ; registerB16w { registerA8w = registerB16w; } # Move the contents of one register-pair to another. # # A: destination register pair (16 bits) # B: source register pair (16 bits) :move_wide_16 registerA16w,registerB16w is inst0=0x06 ; registerA16w ; registerB16w { registerA16w = registerB16w; } # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Move the contents of one object-bearing register to another. # # A: destination register (4 bits) # B: source register (4 bits) :move_object registerA4,registerB4 is inst0=0x07 ; registerA4 & registerB4 { registerA4 = registerB4; } # Move the contents of one object-bearing register to another. # # A: destination register (8 bits) # B: source register (16 bits) :move_object_from_16 registerA8,registerB16 is inst0=0x08 ; registerA8 ; registerB16 { registerA8 = registerB16; } # Move the contents of one object-bearing register to another. # # A: destination register (16 bits) # B: source register (16 bits) :move_object_16 registerA16,registerB16 is inst1=0x09 & inst1_padding ; registerA16 ; registerB16 { registerA16 = registerB16; } # Move the single-word non-object result of the most recent invoke-kind # into the indicated register. This must be done as the instruction # immediately after an invoke-kind whose (single-word, non-object) # result is not to be ignored; anywhere else is invalid. # # A: destination register (8 bits) :move_result registerA8 is inst0=0x0a ; registerA8 { registerA8 = resultreg; } # Move the double-word result of the most recent invoke-kind into # the indicated register pair. This must be done as the instruction # immediately after an invoke-kind whose (double-word) result is # not to be ignored; anywhere else is invalid. # # A: destination register pair (8 bits) :move_result_wide registerA8w is inst0=0x0b ; registerA8w { registerA8w = resultregw; } # Move the object result of the most recent invoke-kind into # the indicated register. This must be done as the instruction # immediately after an invoke-kind or filled-new-array whose # (object) result is not to be ignored; anywhere else is invalid. # # A: destination register (8 bits) :move_result_object registerA8 is inst0=0x0c ; registerA8 { registerA8 = resultreg; } # Save a just-caught exception into the given register. This must # be the first instruction of any exception handler whose caught # exception is not to be ignored, and this instruction must only # ever occur as the first instruction of an exception handler; # anywhere else is invalid. # # A: destination register (8 bits) :move_exception registerA8 is inst0=0x0d ; registerA8 { #TODO pCode # this requires state!? } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Return from a void method. :return_void is inst1=0x0e & inst1_padding { return [sp]; } # Return from a single-width (32-bit) non-object value-returning method. # # A: return value register (8 bits) :return registerA8 is inst0=0x0f ; registerA8 { resultreg = registerA8; return [sp]; } # Return from a double-width (64-bit) value-returning method. # # A: return value register-pair (8 bits) :return_wide registerA8w is inst0=0x10 ; registerA8w { resultregw = registerA8w; return [sp]; } # Return from an object-returning method. # # A: return value register (8 bits) :return_object registerA8 is inst0=0x11 ; registerA8 { resultreg = registerA8; return [sp]; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Move the given literal value (sign-extended to 32 bits) into the specified register. # # A: destination register (4 bits) # B: signed int (4 bits) :const_4 registerA4,B_BITS_4_7_S is inst0=0x12 ; registerA4 & B_BITS_4_7_S { registerA4 = sext( B_BITS_4_7_S:4) ; } # Move the given literal value (sign-extended to 32 bits) into the specified register. # # A: destination register (8 bits) # B: signed int (16 bits) :const_16 registerA8,B_BITS_0_15_S is inst0=0x13 ; registerA8 ; B_BITS_0_15_S { registerA8 = sext( B_BITS_0_15_S:4 ); } # Move the given literal value into the specified register. # # A: destination register (8 bits) # B: arbitrary 32-bit constant :"const" registerA8,constant32 is inst0=0x14 ; registerA8 ; constant32 { registerA8 = constant32; } # Move the given literal value (right-zero-extended to 32 bits) into the specified register. # # A: destination register (8 bits) # B: signed int (16 bits) :const_high_16 registerA8,B_BITS_0_15 is inst0=0x15 ; registerA8 ; B_BITS_0_15 { registerA8 = B_BITS_0_15:4 << 16; } # Move the given literal value (sign-extended to 64 bits) into the specified register-pair. # # A: destination register (8 bits) # B: signed int (16 bits) :const_wide_16 registerA8w,constant16s is inst0=0x16 ; registerA8w ; constant16s { registerA8w = sext( constant16s:2 ); } # Move the given literal value (sign-extended to 64 bits) into the specified register-pair. # # A: destination register (8 bits) # B: signed int (32 bits) :const_wide_32 registerA8w,constant32s is inst0=0x17 ; registerA8w ; constant32s { registerA8w = sext( constant32s:4 ); } # Move the given literal value into the specified register-pair. # # A: destination register (8 bits) # B: arbitrary double-width (64-bit) constant :const_wide registerA8w,constant64 is inst0=0x18 ; registerA8w ; constant64 { registerA8w = constant64; } # Move the given literal value (right-zero-extended to 64 bits) into the specified register-pair. # # A: destination register (8 bits) # B: signed int (16 bits) :const_wide_high_16 registerA8w,B_BITS_0_15_S is inst0=0x19 ; registerA8w ; B_BITS_0_15_S { registerA8w = B_BITS_0_15_S << 48; } # Move a reference to the string specified by the given index into the specified register. # # A: destination register (8 bits) # B: string index :const_string registerA8,B_BITS_0_15 is inst0=0x1a ; registerA8 ; B_BITS_0_15 { registerA8 = cpool(0:4, B_BITS_0_15:4, $(CPOOL_STRING)); } # Move a reference to the string specified by the given index into the specified register. # # A: destination register (8 bits) # B: string index :const_string_jumbo registerA8,B_BITS_0_31 is inst0=0x1b ; registerA8 ; B_BITS_0_31 { registerA8 = cpool(0:4, B_BITS_0_31:4, $(CPOOL_STRING)); } # Move a reference to the class specified by the given index into the # specified register. In the case where the indicated type is primitive, # this will store a reference to the primitive type's degenerate class. # # A: destination register (8 bits) # B: type index :const_class registerA8,B_BITS_0_15 is inst0=0x1c ; registerA8 ; B_BITS_0_15 { registerA8 = cpool( 0:4, B_BITS_0_15:4, $(CPOOL_CLASSREF)); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Acquire the monitor for the indicated object. # # A: reference-bearing register (8 bits) :monitor_enter registerA8 is inst0=0x1d ; registerA8 { monitorEnter( registerA8 ); } # Release the monitor for the indicated object. # # A: reference-bearing register (8 bits) :monitor_exit registerA8 is inst0=0x1e ; registerA8 { monitorExit( registerA8 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Throw a ClassCastException if the reference in the given register # cannot be cast to the indicated type. # # A: reference-bearing register (8 bits) # B: type index (16 bits) :check_cast registerA8,B_BITS_0_15 is inst0=0x1f ; registerA8 ; B_BITS_0_15 { checkCast( registerA8, cpool( 0:4, B_BITS_0_15:4, $(CPOOL_CLASSREF)) ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Store in the given destination register 1 if the indicated reference # is an instance of the given type, or 0 if not. # # A: destination register (4 bits) # B: reference-bearing register (4 bits) # C: type index (16 bits) :instance_of registerA4,registerB4,C_BITS_0_15 is inst0=0x20 ; registerA4 & registerB4 ; C_BITS_0_15 { res:1 = cpool( registerB4, C_BITS_0_15:4, $(CPOOL_INSTANCEOF) ); registerA4 = zext( res ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Store in the given destination register the length of the indicated array, in entries # # A: destination register (4 bits) # B: array reference-bearing register (4 bits) :array_length registerA4,registerB4 is inst0=0x21 ; registerA4 & registerB4 { registerA4 = cpool( registerB4, 0:4, $(CPOOL_ARRAYLENGTH) ); } # Construct a new instance of the indicated type, storing a reference # to it in the destination. The type must refer to a non-array class. # # A: destination register (8 bits) # B: type index :new_instance registerA8,B_BITS_0_15 is inst0=0x22 ; registerA8 ; B_BITS_0_15 { registerA8 = newobject( cpool( 0:4, B_BITS_0_15:4, $(CPOOL_CLASSREF)) ); } # Construct a new array of the indicated type and size. The type must be an array type. # # A: destination register (8 bits) # B: size register # C: type index :new_array registerA4,registerB4,C_BITS_0_15 is inst0=0x23 ; registerA4 & registerB4 ; C_BITS_0_15 { registerA4 = newobject( cpool( 0:4, C_BITS_0_15:4, $(CPOOL_CLASSREF)), registerB4 ); } # Construct an array of the given type and size, filling it with the supplied # contents. The type must be an array type. The array's contents must be # single-word (that is, no arrays of long or double, but reference types are # acceptable). The constructed instance is stored as a "result" in the same # way that the method invocation instructions store their results, so the # constructed instance must be moved to a register with an immediately # subsequent move-result-object instruction (if it is to be used). # # A: array size and argument word count (4 bits) # B: type index (16 bits) # C..G: argument registers (4 bits each) :filled_new_array TYPE_INDEX is inst0=0x24 ; N_ELEMENTS = 0 & TYPE_INDEX & regElemC & regElemD & regElemE & regElemF & regElemG { #TODO pCode filledNewArray( TYPE_INDEX:4 ); } :filled_new_array TYPE_INDEX,regElemC is inst0=0x24 ; N_ELEMENTS = 1 & TYPE_INDEX & regElemC & regElemD & regElemE & regElemF & regElemG { #TODO pCode filledNewArray( TYPE_INDEX:4, regElemC ); } :filled_new_array TYPE_INDEX,regElemC,regElemD is inst0=0x24 ; N_ELEMENTS = 2 & TYPE_INDEX & regElemC & regElemD & regElemE & regElemF & regElemG { #TODO pCode filledNewArray( TYPE_INDEX:4, regElemC, regElemD ); } :filled_new_array TYPE_INDEX,regElemC,regElemD,regElemE is inst0=0x24 ; N_ELEMENTS = 3 & TYPE_INDEX & regElemC & regElemD & regElemE & regElemF & regElemG { #TODO pCode filledNewArray( TYPE_INDEX:4, regElemC, regElemD, regElemE ); } :filled_new_array TYPE_INDEX,regElemC,regElemD,regElemE,regElemF is inst0=0x24 ; N_ELEMENTS = 4 & TYPE_INDEX & regElemC & regElemD & regElemE & regElemF & regElemG { #TODO pCode filledNewArray( TYPE_INDEX:4, regElemC, regElemD, regElemE, regElemF ); } :filled_new_array TYPE_INDEX,regElemC,regElemD,regElemE,regElemF,regElemG is inst0=0x24 ; N_ELEMENTS = 5 & TYPE_INDEX & regElemC & regElemD & regElemE & regElemF & regElemG { #TODO pCode filledNewArray( TYPE_INDEX:4, regElemC, regElemD, regElemE, regElemF, regElemG ); } # Construct an array of the given type and size, filling it with # the supplied contents. Clarifications and restrictions are the # same as filled-new-array, described above. # # A: array size and argument word count (8 bits) # B: type index (16 bits) # C: first argument register (16 bits) # N = A + C - 1 :filled_new_array_range A_BITS_0_7,B_BITS_0_15,registerC16 is inst0=0x25 ; A_BITS_0_7 ; B_BITS_0_15 ; registerC16 { #TODO pCode filledNewArrayRange( A_BITS_0_7:4, B_BITS_0_15:4, registerC16 ); } # Fill the given array with the indicated data. The reference must # be to an array of primitives, and the data table must match it in type # and must contain no more elements than will fit in the array. That is, # the array may be larger than the table, and if so, only the initial # elements of the array are set, leaving the remainder alone. # # A: array reference (8 bits) # B: signed "branch" offset to table data pseudo-instruction (32 bits) :fill_array_data registerA8,B_BITS_0_31_S is inst0=0x26 ; registerA8 ; B_BITS_0_31_S { #TODO pCode # fillArrayData } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Throw the indicated exception. # # A: exception-bearing register (8 bits) :throw registerA8 is inst0=0x27 ; registerA8 { throwException( registerA8 ); return [registerA8];#TODO is the best way to return?? } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Unconditionally jump to the indicated instruction. # # A: signed branch offset (8 bits) :goto goto8 is inst0=0x28 ; goto8 { goto goto8; } # Unconditionally jump to the indicated instruction. # # A: signed branch offset (16 bits) :goto_16 goto16 is inst1=0x29 & inst1_padding ; goto16 { goto goto16; } # Unconditionally jump to the indicated instruction. # # A: signed branch offset (32 bits) :goto_32 goto32 is inst1=0x2a & inst1_padding ; goto32 { goto goto32; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Jump to a new instruction based on the value in the given register, # using a table of offsets corresponding to each value in a particular # integral range, or fall through to the next instruction if there is no match. # # A: register to test # B: signed "branch" offset to table data pseudo-instruction (32 bits) # # NOTE: Offset (B) and destinations must be multiplied by 2. # TODO use disassembly action?? :packed_switch registerA8,B_BITS_0_31_S is inst0=0x2b ; registerA8 ; B_BITS_0_31_S { distance:4 = B_BITS_0_31_S * 2; ident:4 = *[ram] ( inst_start + distance ); size2:2 = *[ram] ( inst_start + distance + 2 ); sze:4 = zext( size2 ); first_key:4 = *[ram] ( inst_start + distance + 2 + 2 ); if ( registerA8 < first_key ) goto inst_next; if ( registerA8 >= ( first_key + sze ) ) goto inst_next; targets:4 = ( inst_start + distance + 2 + 2 + 4 ); delta:4 = ( registerA8 ) - ( first_key ); # which index into target value:4 = *[ram] ( targets + ( delta * 4 ) ); address:4 = ( inst_start + ( value * 2 ) ); goto [ address ]; } # Jump to a new instruction based on the value in the given register, # using an ordered table of value-offset pairs, or fall through to the # next instruction if there is no match. # # A: register to test # B: signed "branch" offset to table data pseudo-instruction (32 bits) # # NOTE: Offset (B) and destinations must be multiplied by 2. :sparse_switch registerA8,B_BITS_0_31_S is inst0=0x2c ; registerA8 ; B_BITS_0_31_S { distance:4 = B_BITS_0_31_S * 2; temp:4 = inst_start; size2:2 = *[ram] ( temp + 2 + distance); sze:4 = zext( size2 ); defaultPos:4 = inst_next; address:4 = switchAssist( registerA8, sze, defaultPos, temp, distance ); goto [ address ]; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Perform the indicated floating point or long comparison, # setting a to 0 if b == c, 1 if b > c, or -1 if b < c. # The "bias" listed for the floating point operations indicates # how NaN comparisons are treated: "gt bias" instructions return 1 for # NaN comparisons, and "lt bias" instructions return -1. # # For example, to check to see if floating point x < y it is advisable # to use cmpg-float; a result of -1 indicates that the test was true, # and the other values indicate it was false either due to a valid # comparison or because one of the values was NaN. # # A: destination register (8 bits) # B: first source register or pair # C: second source register or pair :cmpl_float registerA8,registerB8,registerC8 is inst0=0x2d ; registerA8 ; registerB8 ; registerC8 { registerA8 = zext( registerC8 f<= registerB8) + zext( registerC8 f< registerB8) - 1; } :cmpg_float registerA8,registerB8,registerC8 is inst0=0x2e ; registerA8 ; registerB8 ; registerC8 { registerA8 = zext( registerC8 f<= registerB8) + zext( registerC8 f< registerB8) - 1; } :cmpl_double registerA8,registerB8w,registerC8w is inst0=0x2f ; registerA8 ; registerB8w ; registerC8w { registerA8 = zext( registerC8w f<= registerB8w) + zext( registerC8w f< registerB8w) - 1; } :cmpg_double registerA8,registerB8w,registerC8w is inst0=0x30 ; registerA8 ; registerB8w ; registerC8w { registerA8 = zext( registerC8w f<= registerB8w) + zext( registerC8w f< registerB8w) - 1; } :cmp_long registerA8,registerB8w,registerC8w is inst0=0x31 ; registerA8 ; registerB8w ; registerC8w { registerA8 = zext( registerC8w s<= registerB8w ) + zext( registerC8w s< registerB8w ) - 1; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Branch to the given destination if the given two registers' values compare as specified. # # A: first register to test (4 bits) # B: second register to test (4 bits) # C: signed branch offset (16 bits) :if_eq registerA4,registerB4,rel16 is inst0=0x32 ; registerA4 & registerB4 ; rel16 { if ( registerA4 == registerB4 ) goto rel16; } :if_ne registerA4,registerB4,rel16 is inst0=0x33 ; registerA4 & registerB4 ; rel16 { if ( registerA4 != registerB4 ) goto rel16; } :if_lt registerA4,registerB4,rel16 is inst0=0x34 ; registerA4 & registerB4 ; rel16 { if ( registerA4 s< registerB4 ) goto rel16; } :if_ge registerA4,registerB4,rel16 is inst0=0x35 ; registerA4 & registerB4 ; rel16 { if ( registerA4 s>= registerB4 ) goto rel16; } :if_gt registerA4,registerB4,rel16 is inst0=0x36 ; registerA4 & registerB4 ; rel16 { if ( registerA4 s> registerB4 ) goto rel16; } :if_le registerA4,registerB4,rel16 is inst0=0x37 ; registerA4 & registerB4 ; rel16 { if ( registerA4 s<= registerB4 ) goto rel16; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Branch to the given destination if the given register's value compares with 0 as specified. # # A: register to test (8 bits) # B: signed branch offset (16 bits) :if_eqz registerA8,rel16 is inst0=0x38 ; registerA8 ; rel16 { if ( registerA8 == 0 ) goto rel16; } :if_nez registerA8,rel16 is inst0=0x39 ; registerA8 ; rel16 { if ( registerA8 != 0 ) goto rel16; } :if_ltz registerA8,rel16 is inst0=0x3a ; registerA8 ; rel16 { if ( registerA8 s< 0 ) goto rel16; } :if_gez registerA8,rel16 is inst0=0x3b ; registerA8 ; rel16 { if ( registerA8 s>= 0 ) goto rel16; } :if_gtz registerA8,rel16 is inst0=0x3c ; registerA8 ; rel16 { if ( registerA8 s> 0 ) goto rel16; } :if_lez registerA8,rel16 is inst0=0x3d ; registerA8 ; rel16 { if ( registerA8 s<= 0 ) goto rel16; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Perform the identified array operation at the identified index # of the given array, loading or storing into the value register. # # A: value register or pair; may be source or dest (8 bits) # B: array register (8 bits) # C: index register (8 bits) :aget registerA8,registerB8,registerC8 is inst0=0x44 ; registerA8 ; registerB8 ; registerC8 { registerA8 = *( registerB8 + registerC8*4 ); } :aget_wide registerA8w,registerB8,registerC8 is inst0=0x45 ; registerA8w ; registerB8 ; registerC8 { registerA8w = *( registerB8 + registerC8*8 ); } :aget_object registerA8,registerB8,registerC8 is inst0=0x46 ; registerA8 ; registerB8 ; registerC8 { registerA8 = *( registerB8 + registerC8*4 ); } :aget_boolean registerA8,registerB8,registerC8 is inst0=0x47 ; registerA8 ; registerB8 ; registerC8 { registerA8 = zext( *:1 ( registerB8 + registerC8 )); } :aget_byte registerA8,registerB8,registerC8 is inst0=0x48 ; registerA8 ; registerB8 ; registerC8 { registerA8 = sext( *:1 (registerB8 + registerC8) ); } :aget_char registerA8,registerB8,registerC8 is inst0=0x49 ; registerA8 ; registerB8 ; registerC8 { registerA8 = zext( *:2 (registerB8 + registerC8*2 ) ); } :aget_short registerA8,registerB8,registerC8 is inst0=0x4a ; registerA8 ; registerB8 ; registerC8 { registerA8 = sext( *:2 (registerB8 + registerC8 * 2 ) ); } :aput registerA8,registerB8,registerC8 is inst0=0x4b ; registerA8 ; registerB8 ; registerC8 { *( registerB8 + registerC8 * 4 ) = registerA8; } :aput_wide registerA8w,registerB8,registerC8 is inst0=0x4c ; registerA8w ; registerB8 ; registerC8 { *( registerB8 + registerC8 * 8 ) = registerA8w; } :aput_object registerA8,registerB8,registerC8 is inst0=0x4d ; registerA8 ; registerB8 ; registerC8 { *( registerB8 + registerC8 * 4 ) = registerA8; } :aput_boolean registerA8,registerB8,registerC8 is inst0=0x4e ; registerA8 ; registerB8 ; registerC8 { *( registerB8 + registerC8 ) = registerA8:1; } :aput_byte registerA8,registerB8,registerC8 is inst0=0x4f ; registerA8 ; registerB8 ; registerC8 { *( registerB8 + registerC8 ) = registerA8:1; } :aput_char registerA8,registerB8,registerC8 is inst0=0x50 ; registerA8 ; registerB8 ; registerC8 { *( registerB8 + registerC8*2 ) = registerA8:2; } :aput_short registerA8,registerB8,registerC8 is inst0=0x51 ; registerA8 ; registerB8 ; registerC8 { *( registerB8 + registerC8*2 ) = registerA8:2; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Perform the identified object instance field operation with the # identified field, loading or storing into the value register. # # A: value register or pair; may be source or dest (4 bits) # B: object register (4 bits) # C: instance field reference index (16 bits) :iget registerA4,[registerB4:C_BITS_0_15] is inst0=0x52 ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool( registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); registerA4 = *ptr; } :iget_wide registerA4w,[registerB4:C_BITS_0_15] is inst0=0x53 ; registerA4w & registerB4 ; C_BITS_0_15 { ptr:4 = cpool( registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); registerA4w = *ptr; } :iget_object registerA4,[registerB4:C_BITS_0_15] is inst0=0x54 ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool( registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); registerA4 = *ptr; } :iget_boolean registerA4,[registerB4:C_BITS_0_15] is inst0=0x55 ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool( registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); registerA4 = zext( *:1 ptr ); } :iget_byte registerA4,[registerB4:C_BITS_0_15] is inst0=0x56 ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool( registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); registerA4 = sext( *:1 ptr ); } :iget_char registerA4,[registerB4:C_BITS_0_15] is inst0=0x57 ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool( registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); registerA4 = zext( *:2 ptr ); } :iget_short registerA4,[registerB4:C_BITS_0_15] is inst0=0x58 ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool( registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); registerA4 = sext( *:2 ptr ); } :iput registerA4,[registerB4:C_BITS_0_15] is inst0=0x59 ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool(registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); *ptr = registerA4; } :iput_wide registerA4w,[registerB4:C_BITS_0_15] is inst0=0x5a ; registerA4w & registerB4 ; C_BITS_0_15 { ptr:4 = cpool(registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); *ptr = registerA4w; } :iput_object registerA4,[registerB4:C_BITS_0_15] is inst0=0x5b ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool(registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); *ptr = registerA4; } :iput_boolean registerA4,[registerB4:C_BITS_0_15] is inst0=0x5c ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool(registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); *ptr = registerA4 : 1; } :iput_byte registerA4,[registerB4:C_BITS_0_15] is inst0=0x5d ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool(registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); *ptr = registerA4 : 1; } :iput_char registerA4,[registerB4:C_BITS_0_15] is inst0=0x5e ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool(registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); *ptr = registerA4 : 2; } :iput_short registerA4,[registerB4:C_BITS_0_15] is inst0=0x5f ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool(registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); *ptr = registerA4 : 2; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Perform the identified object static field operation with the identified # static field, loading or storing into the value register. # # A: value register or pair; may be source or dest (8 bits) # B: static field reference index (16 bits) :sget registerA8,B_BITS_0_15 is inst0=0x60 ; registerA8 ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); registerA8 = *ptr; } :sget_wide registerA8w,B_BITS_0_15 is inst0=0x61 ; registerA8w ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); registerA8w = *ptr; } :sget_object registerA8,B_BITS_0_15 is inst0=0x62 ; registerA8 ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); registerA8 = *ptr; } :sget_boolean registerA8,B_BITS_0_15 is inst0=0x63 ; registerA8 ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); registerA8 = zext(*:1 ptr); } :sget_byte registerA8,B_BITS_0_15 is inst0=0x64 ; registerA8 ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); registerA8 = sext(*:1 ptr); } :sget_char registerA8,B_BITS_0_15 is inst0=0x65 ; registerA8 ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); registerA8 = zext(*:2 ptr); } :sget_short registerA8,B_BITS_0_15 is inst0=0x66 ; registerA8 ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); registerA8 = sext(*:2 ptr); } :sput registerA8,B_BITS_0_15 is inst0=0x67 ; registerA8 ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); *ptr = registerA8; } :sput_wide registerA8w,B_BITS_0_15 is inst0=0x68 ; registerA8w ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); *ptr = registerA8w; } :sput_object registerA8,B_BITS_0_15 is inst0=0x69 ; registerA8 ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); *ptr = registerA8; } :sput_boolean registerA8,B_BITS_0_15 is inst0=0x6a ; registerA8 ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); *ptr = registerA8:1; } :sput_byte registerA8,B_BITS_0_15 is inst0=0x6b ; registerA8 ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); *ptr = registerA8:1; } :sput_char registerA8,B_BITS_0_15 is inst0=0x6c ; registerA8 ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); *ptr = registerA8:2; } :sput_short registerA8,B_BITS_0_15 is inst0=0x6d ; registerA8 ; B_BITS_0_15 { ptr:4 = cpool(0:4,B_BITS_0_15:4,$(CPOOL_STATIC_FIELD)); *ptr = registerA8:2; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Call the indicated method. The result (if any) may be stored with # an appropriate move-result* variant as the immediately subsequent # instruction. # # A: argument word count (4 bits) # B: method reference index (16 bits) # C..G: argument registers (4 bits each) # invoke-virtual is used to invoke a normal virtual method # (a method that is not private, static, or final, and is also not a constructor). :invoke_virtual METHOD_INDEX is inst0=0x6e ; N_PARAMS=0 & METHOD_INDEX { destination:4 = cpool( 0:4, METHOD_INDEX:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_virtual METHOD_INDEX,regParamC is inst0=0x6e ; N_PARAMS=1 & METHOD_INDEX & regParamC { iv0 = regParamC; destination:4 = cpool( regParamC, METHOD_INDEX:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_virtual METHOD_INDEX,regParamC,regParamD is inst0=0x6e ; N_PARAMS=2 & METHOD_INDEX & regParamC & regParamD { iv0 = regParamC; iv1 = regParamD; destination:4 = cpool( regParamC, METHOD_INDEX:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_virtual METHOD_INDEX,regParamC,regParamD,regParamE is inst0=0x6e ; N_PARAMS=3 & METHOD_INDEX & regParamC & regParamD & regParamE { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; destination:4 = cpool( regParamC, METHOD_INDEX:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_virtual METHOD_INDEX,regParamC,regParamD,regParamE,regParamF is inst0=0x6e ; N_PARAMS=4 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; iv3 = regParamF; destination:4 = cpool( regParamC, METHOD_INDEX:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_virtual METHOD_INDEX,regParamC,regParamD,regParamE,regParamF,regParamG is inst0=0x6e ; N_PARAMS=5 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF & regParamG { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; iv3 = regParamF; iv4 = regParamG; destination:4 = cpool( regParamC, METHOD_INDEX:4, $(CPOOL_METHOD)); call [ destination ]; } # invoke-super is used to invoke the closest superclass's virtual # method (as opposed to the one with the same method_id in the # calling class). The same method restrictions hold as for invoke-virtual. :invoke_super METHOD_INDEX is inst0=0x6f ; N_PARAMS=0 & METHOD_INDEX { destination:4 = cpool( cpool( 0:4, 0:4, $(CPOOL_SUPER)), METHOD_INDEX:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_super METHOD_INDEX,regParamC is inst0=0x6f ; N_PARAMS=1 & METHOD_INDEX & regParamC { iv0 = regParamC; destination:4 = cpool( cpool( 0:4, 0:4, $(CPOOL_SUPER)), METHOD_INDEX:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_super METHOD_INDEX,regParamC,regParamD is inst0=0x6f ; N_PARAMS=2 & METHOD_INDEX & regParamC & regParamD { iv0 = regParamC; iv1 = regParamD; destination:4 = cpool( cpool( 0:4, 0:4, $(CPOOL_SUPER)), METHOD_INDEX:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_super METHOD_INDEX,regParamC,regParamD,regParamE is inst0=0x6f ; N_PARAMS=3 & METHOD_INDEX & regParamC & regParamD & regParamE { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; destination:4 = cpool( cpool( 0:4, 0:4, $(CPOOL_SUPER)), METHOD_INDEX:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_super METHOD_INDEX,regParamC,regParamD,regParamE,regParamF is inst0=0x6f ; N_PARAMS=4 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; iv3 = regParamF; destination:4 = cpool( cpool( 0:4, 0:4, $(CPOOL_SUPER)), METHOD_INDEX:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_super METHOD_INDEX,regParamC,regParamD,regParamE,regParamF,regParamG is inst0=0x6f ; N_PARAMS=5 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF & regParamG { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; iv3 = regParamF; iv4 = regParamG; destination:4 = cpool( cpool( 0:4, 0:4, $(CPOOL_SUPER)), METHOD_INDEX:4, $(CPOOL_METHOD)); call [ destination ]; } # invoke-direct is used to invoke a non-static direct # method (that is, an instance method that is by its # nature non-overridable, namely either a private instance # method or a constructor). :invoke_direct METHOD_INDEX is inst0=0x70 ; N_PARAMS=0 & METHOD_INDEX { destination:4 = cpool( 0:4, METHOD_INDEX, $(CPOOL_METHOD)); call [destination]; } :invoke_direct METHOD_INDEX,regParamC is inst0=0x70 ; N_PARAMS=1 & METHOD_INDEX & regParamC { iv0 = regParamC; destination:4 = cpool( regParamC, METHOD_INDEX, $(CPOOL_METHOD)); call [ destination ]; } :invoke_direct METHOD_INDEX,regParamC,regParamD, is inst0=0x70 ; N_PARAMS=2 & METHOD_INDEX & regParamC & regParamD { iv0 = regParamC; iv1 = regParamD; destination:4 = cpool( regParamC, METHOD_INDEX, $(CPOOL_METHOD)); call [ destination ]; } :invoke_direct METHOD_INDEX,regParamC,regParamD,regParamE is inst0=0x70 ; N_PARAMS=3 & METHOD_INDEX & regParamC & regParamD & regParamE { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; destination:4 = cpool( regParamC, METHOD_INDEX, $(CPOOL_METHOD)); call [ destination ]; } :invoke_direct METHOD_INDEX,regParamC,regParamD,regParamE,regParamF is inst0=0x70 ; N_PARAMS=4 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; iv3 = regParamF; destination:4 = cpool( regParamC, METHOD_INDEX, $(CPOOL_METHOD)); call [ destination ]; } :invoke_direct METHOD_INDEX,regParamC,regParamD,regParamE,regParamF,regParamG is inst0=0x70 ; N_PARAMS=5 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF & regParamG { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; iv3 = regParamF; iv4 = regParamG; destination:4 = cpool( regParamC, METHOD_INDEX, $(CPOOL_METHOD)); call [ destination ]; } # invoke-static is used to invoke a static method # (which is always considered a direct method). :invoke_static METHOD_INDEX is inst0=0x71 ; N_PARAMS=0 & METHOD_INDEX { destination:4 = cpool( 0:4, METHOD_INDEX, $(CPOOL_STATIC_METHOD)); call [ destination ]; } :invoke_static METHOD_INDEX,regParamC is inst0=0x71 ; N_PARAMS=1 & METHOD_INDEX & regParamC { iv0 = regParamC; destination:4 = cpool( 0:4, METHOD_INDEX, $(CPOOL_STATIC_METHOD)); call [ destination ]; } :invoke_static METHOD_INDEX,regParamC,regParamD is inst0=0x71 ; N_PARAMS=2 & METHOD_INDEX & regParamC & regParamD { iv0 = regParamC; iv1 = regParamD; destination:4 = cpool( 0:4, METHOD_INDEX, $(CPOOL_STATIC_METHOD)); call [ destination ]; } :invoke_static METHOD_INDEX,regParamC,regParamD,regParamE is inst0=0x71 ; N_PARAMS=3 & METHOD_INDEX & regParamC & regParamD & regParamE { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; destination:4 = cpool( 0:4, METHOD_INDEX, $(CPOOL_STATIC_METHOD)); call [ destination ]; } :invoke_static METHOD_INDEX,regParamC,regParamD,regParamE,regParamF is inst0=0x71 ; N_PARAMS=4 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; iv3 = regParamF; destination:4 = cpool( 0:4, METHOD_INDEX, $(CPOOL_STATIC_METHOD)); call [ destination ]; } :invoke_static METHOD_INDEX,regParamC,regParamD,regParamE,regParamF,regParamG is inst0=0x71 ; N_PARAMS=5 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF & regParamG { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; iv3 = regParamF; iv4 = regParamG; destination:4 = cpool( 0:4, METHOD_INDEX, $(CPOOL_STATIC_METHOD)); call [ destination ]; } # invoke-interface is used to invoke an interface # method, that is, on an object whose concrete # class isn't known, using a method_id that refers # to an interface. :invoke_interface METHOD_INDEX is inst0=0x72 ; N_PARAMS=0 & METHOD_INDEX { destination:4 = cpool( 0:4, METHOD_INDEX,$(CPOOL_METHOD)); call [ destination ]; } :invoke_interface METHOD_INDEX,regParamC is inst0=0x72 ; N_PARAMS=1 & METHOD_INDEX & regParamC { iv0 = regParamC; destination:4 = cpool(regParamC,METHOD_INDEX,$(CPOOL_METHOD)); call [ destination ]; } :invoke_interface METHOD_INDEX,regParamC,regParamD is inst0=0x72 ; N_PARAMS=2 & METHOD_INDEX & regParamC & regParamD { iv0 = regParamC; iv1 = regParamD; destination:4 = cpool(regParamC,METHOD_INDEX,$(CPOOL_METHOD)); call [ destination ]; } :invoke_interface METHOD_INDEX,regParamC,regParamD,regParamE is inst0=0x72 ; N_PARAMS=3 & METHOD_INDEX & regParamC & regParamD & regParamE { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; destination:4 = cpool(regParamC,METHOD_INDEX,$(CPOOL_METHOD)); call [ destination ]; } :invoke_interface METHOD_INDEX,regParamC,regParamD,regParamE,regParamF is inst0=0x72 ; N_PARAMS=4 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; iv3 = regParamF; destination:4 = cpool(regParamC,METHOD_INDEX,$(CPOOL_METHOD)); call [ destination ]; } :invoke_interface METHOD_INDEX,regParamC,regParamD,regParamE,regParamF,regParamG is inst0=0x72 ; N_PARAMS=5 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF & regParamG { iv0 = regParamC; iv1 = regParamD; iv2 = regParamE; iv3 = regParamF; iv4 = regParamG; destination:4 = cpool(regParamC,METHOD_INDEX,$(CPOOL_METHOD)); call [ destination ]; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Call the indicated method. # See first invoke-kind description above for details, caveats, and suggestions. # # A: argument word count (8 bits) # B: method reference index (16 bits) # C: first argument register (16 bits) # N = A + C - 1 :invoke_virtual_range B_BITS_0_15,A_BITS_0_7,registerC16 is inst0=0x74 ; A_BITS_0_7 ; B_BITS_0_15 ; registerC16 { moveRangeToIV( A_BITS_0_7:4, registerC16 ); destination:4 = cpool(registerC16, B_BITS_0_15:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_super_range B_BITS_0_15,A_BITS_0_7,registerC16 is inst0=0x75 ; A_BITS_0_7 ; B_BITS_0_15 ; registerC16 { moveRangeToIV( A_BITS_0_7:4, registerC16 ); destination:4 = cpool( cpool( 0:4, 0:4, $(CPOOL_SUPER)), B_BITS_0_15:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_direct_range B_BITS_0_15,A_BITS_0_7,registerC16 is inst0=0x76 ; A_BITS_0_7 ; B_BITS_0_15 ; registerC16 { moveRangeToIV( A_BITS_0_7:4, registerC16 ); destination:4 = cpool(registerC16, B_BITS_0_15:4, $(CPOOL_METHOD)); call [ destination ]; } :invoke_static_range B_BITS_0_15,A_BITS_0_7,registerC16 is inst0=0x77 ; A_BITS_0_7 ; B_BITS_0_15 ; registerC16 { moveRangeToIV( A_BITS_0_7:4, registerC16 ); destination:4 = cpool(0:4, B_BITS_0_15:4, $(CPOOL_STATIC_METHOD)); call [ destination ]; } :invoke_interface_range B_BITS_0_15,A_BITS_0_7,registerC16 is inst0=0x78 ; A_BITS_0_7 ; B_BITS_0_15 ; registerC16 { moveRangeToIV( A_BITS_0_7:4, registerC16 ); destination:4 = cpool(registerC16, B_BITS_0_15:4, $(CPOOL_METHOD)); call [ destination ]; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Perform the identified unary operation on the source register, # storing the result in the destination register. # # A: destination register or pair (4 bits) # B: source register or pair (4 bits) :neg_int registerA4,registerB4 is inst0=0x7b ; registerA4 & registerB4 { registerA4 = -registerB4; } :not_int registerA4,registerB4 is inst0=0x7c ; registerA4 & registerB4 { registerA4 = ~registerB4; } :neg_long registerA4w,registerB4w is inst0=0x7d ; registerA4w & registerB4w { registerA4w = -registerB4w; } :not_long registerA4w,registerB4w is inst0=0x7e ; registerA4w & registerB4w { registerA4w = ~registerB4w; } :neg_float registerA4,registerB4 is inst0=0x7f ; registerA4 & registerB4 { registerA4 = f-registerB4; } :neg_double registerA4,registerB4 is inst0=0x80 ; registerA4 & registerB4 { registerA4 = f-registerB4; } :int_to_long registerA4w,registerB4 is inst0=0x81 ; registerA4w & registerB4 { registerA4w = sext(registerB4); } :int_to_float registerA4,registerB4 is inst0=0x82 ; registerA4 & registerB4 { registerA4 = int2float(registerB4); } :int_to_double registerA4w,registerB4 is inst0=0x83 ; registerA4w & registerB4 { registerA4w = int2float(registerB4); } :long_to_int registerA4,registerB4w is inst0=0x84 ; registerA4 & registerB4w { registerA4 = registerB4w:4; } :long_to_float registerA4,registerB4w is inst0=0x85 ; registerA4 & registerB4w { registerA4 = int2float(registerB4w); } :long_to_double registerA4w,registerB4w is inst0=0x86 ; registerA4w & registerB4w { registerA4w = int2float(registerB4w); } :float_to_int registerA4,registerB4 is inst0=0x87 ; registerA4 & registerB4 { registerA4 = trunc(registerB4); } :float_to_long registerA4w,registerB4 is inst0=0x88 ; registerA4w & registerB4 { registerA4w = trunc(registerB4); } :float_to_double registerA4w,registerB4 is inst0=0x89 ; registerA4w & registerB4 { registerA4w = float2float(registerB4); } :double_to_int registerA4,registerB4w is inst0=0x8a ; registerA4 & registerB4w { registerA4 = trunc(registerB4w); } :double_to_long registerA4w,registerB4w is inst0=0x8b ; registerA4w & registerB4w { registerA4w = trunc(registerB4w); } :double_to_float registerA4,registerB4w is inst0=0x8c ; registerA4 & registerB4w { registerA4 = float2float(registerB4w); } :int_to_byte registerA4,registerB4 is inst0=0x8d ; registerA4 & registerB4 { registerA4 = sext(registerB4:1); } :int_to_char registerA4,registerB4 is inst0=0x8e ; registerA4 & registerB4 { registerA4 = zext(registerB4:2); } :int_to_short registerA4,registerB4 is inst0=0x8f ; registerA4 & registerB4 { registerA4 = sext(registerB4:2); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Perform the identified binary operation on the two source registers, # storing the result in the destination register. # # A: destination register or pair (8 bits) # B: first source register or pair (8 bits) # C: second source register or pair (8 bits) :add_int registerA8,registerB8,registerC8 is inst0=0x90 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 + registerC8; } :sub_int registerA8,registerB8,registerC8 is inst0=0x91 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 - registerC8; } :mul_int registerA8,registerB8,registerC8 is inst0=0x92 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 * registerC8; } :div_int registerA8,registerB8,registerC8 is inst0=0x93 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 s/ registerC8; } :rem_int registerA8,registerB8,registerC8 is inst0=0x94 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 s% registerC8; } :and_int registerA8,registerB8,registerC8 is inst0=0x95 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 & registerC8; } :or_int registerA8,registerB8,registerC8 is inst0=0x96 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 | registerC8; } :xor_int registerA8,registerB8,registerC8 is inst0=0x97 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 ^ registerC8; } :shl_int registerA8,registerB8,registerC8 is inst0=0x98 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 << registerC8; } :shr_int registerA8,registerB8,registerC8 is inst0=0x99 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 s>> registerC8; } :ushr_int registerA8,registerB8,registerC8 is inst0=0x9a ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 >> registerC8; } :add_long registerA8w,registerB8w,registerC8w is inst0=0x9b ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w + registerC8w; } :sub_long registerA8w,registerB8w,registerC8w is inst0=0x9c ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w - registerC8w; } :mul_long registerA8w,registerB8w,registerC8w is inst0=0x9d ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w * registerC8w; } :div_long registerA8w,registerB8w,registerC8w is inst0=0x9e ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w s/ registerC8w; } :rem_long registerA8w,registerB8w,registerC8w is inst0=0x9f ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w s% registerC8w; } :and_long registerA8w,registerB8w,registerC8w is inst0=0xa0 ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w & registerC8w; } :or_long registerA8w,registerB8w,registerC8w is inst0=0xa1 ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w | registerC8w; } :xor_long registerA8w,registerB8w,registerC8w is inst0=0xa2 ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w ^ registerC8w; } :shl_long registerA8w,registerB8w,registerC8 is inst0=0xa3 ; registerA8w ; registerB8w ; registerC8 { registerA8w = registerB8w << registerC8; } :shr_long registerA8w,registerB8w,registerC8 is inst0=0xa4 ; registerA8w ; registerB8w ; registerC8 { registerA8w = registerB8w s>> registerC8; } :ushr_long registerA8w,registerB8w,registerC8 is inst0=0xa5 ; registerA8w ; registerB8w ; registerC8 { registerA8w = registerB8w >> registerC8; } :add_float registerA8,registerB8,registerC8 is inst0=0xa6 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 f+ registerC8; } :sub_float registerA8,registerB8,registerC8 is inst0=0xa7 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 f- registerC8; } :mul_float registerA8,registerB8,registerC8 is inst0=0xa8 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 f* registerC8; } :div_float registerA8,registerB8,registerC8 is inst0=0xa9 ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 f/ registerC8; } :rem_float registerA8,registerB8,registerC8 is inst0=0xaa ; registerA8 ; registerB8 ; registerC8 { registerA8 = registerB8 s% registerC8;#TODO how to tell floating point?? } :add_double registerA8w,registerB8w,registerC8w is inst0=0xab ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w f+ registerC8w; } :sub_double registerA8w,registerB8w,registerC8w is inst0=0xac ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w f- registerC8w; } :mul_double registerA8w,registerB8w,registerC8w is inst0=0xad ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w f* registerC8w; } :div_double registerA8w,registerB8w,registerC8w is inst0=0xae ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w f/ registerC8w; } :rem_double registerA8w,registerB8w,registerC8w is inst0=0xaf ; registerA8w ; registerB8w ; registerC8w { registerA8w = registerB8w s% registerC8w;#TODO how to tell floating point?? } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Perform the identified binary operation on the two source registers, # storing the result in the first source register. # # A: destination and first source register or pair (4 bits) # B: second source register or pair (4 bits) :add_int_2addr registerA4,registerB4 is inst0=0xb0 ; registerA4 & registerB4 { registerA4 = registerA4 + registerB4; } :sub_int_2addr registerA4,registerB4 is inst0=0xb1 ; registerA4 & registerB4 { registerA4 = registerA4 - registerB4; } :mul_int_2addr registerA4,registerB4 is inst0=0xb2 ; registerA4 & registerB4 { registerA4 = registerA4 * registerB4; } :div_int_2addr registerA4,registerB4 is inst0=0xb3 ; registerA4 & registerB4 { registerA4 = registerA4 s/ registerB4; } :rem_int_2addr registerA4,registerB4 is inst0=0xb4 ; registerA4 & registerB4 { registerA4 = registerA4 s% registerB4; } :and_int_2addr registerA4,registerB4 is inst0=0xb5 ; registerA4 & registerB4 { registerA4 = registerA4 & registerB4; } :or_int_2addr registerA4,registerB4 is inst0=0xb6 ; registerA4 & registerB4 { registerA4 = registerA4 | registerB4; } :xor_int_2addr registerA4,registerB4 is inst0=0xb7 ; registerA4 & registerB4 { registerA4 = registerA4 ^ registerB4; } :shl_int_2addr registerA4,registerB4 is inst0=0xb8 ; registerA4 & registerB4 { registerA4 = registerA4 << registerB4; } :shr_int_2addr registerA4,registerB4 is inst0=0xb9 ; registerA4 & registerB4 { registerA4 = registerA4 s>> registerB4; } :ushr_int_2addr registerA4,registerB4 is inst0=0xba ; registerA4 & registerB4 { registerA4 = registerA4 >> registerB4; } :add_long_2addr registerA4w,registerB4w is inst0=0xbb ; registerA4w & registerB4w { registerA4w = registerA4w + registerB4w; } :sub_long_2addr registerA4w,registerB4w is inst0=0xbc ; registerA4w & registerB4w { registerA4w= registerA4w - registerB4w; } :mul_long_2addr registerA4w,registerB4w is inst0=0xbd ; registerA4w & registerB4w { registerA4w = registerA4w * registerB4w; } :div_long_2addr registerA4w,registerB4w is inst0=0xbe ; registerA4w & registerB4w { registerA4w = registerA4w s/ registerB4w; } :rem_long_2addr registerA4w,registerB4w is inst0=0xbf ; registerA4w & registerB4w { registerA4w = registerA4w s% registerB4w; } :and_long_2addr registerA4w,registerB4w is inst0=0xc0 ; registerA4w & registerB4w { registerA4w = registerA4w & registerB4w; } :or_long_2addr registerA4w,registerB4w is inst0=0xc1 ; registerA4w & registerB4w { registerA4w = registerA4w | registerB4w; } :xor_long_2addr registerA4w,registerB4w is inst0=0xc2 ; registerA4w & registerB4w { registerA4w = registerA4w ^ registerB4w; } :shl_long_2addr registerA4w,registerB4 is inst0=0xc3 ; registerA4w & registerB4 { registerA4w = registerA4w << registerB4; } :shr_long_2addr registerA4w,registerB4 is inst0=0xc4 ; registerA4w & registerB4 { registerA4w = registerA4w s>> registerB4; } :ushr_long_2addr registerA4w,registerB4 is inst0=0xc5 ; registerA4w & registerB4 { registerA4w = registerA4w >> registerB4; } :add_float_2addr registerA4,registerB4 is inst0=0xc6 ; registerA4 & registerB4 { registerA4 = registerA4 f+ registerB4; } :sub_float_2addr registerA4,registerB4 is inst0=0xc7 ; registerA4 & registerB4 { registerA4 = registerA4 f- registerB4; } :mul_float_2addr registerA4,registerB4 is inst0=0xc8 ; registerA4 & registerB4 { registerA4 = registerA4 f* registerB4; } :div_float_2addr registerA4,registerB4 is inst0=0xc9 ; registerA4 & registerB4 { registerA4 = registerA4 f/ registerB4; } :rem_float_2addr registerA4,registerB4 is inst0=0xca ; registerA4 & registerB4 { registerA4 = registerA4 s% registerB4; } :add_double_2addr registerA4w,registerB4w is inst0=0xcb ; registerA4w & registerB4w { registerA4w = registerA4w f+ registerB4w; } :sub_double_2addr registerA4w,registerB4w is inst0=0xcc ; registerA4w & registerB4w { registerA4w = registerA4w f- registerB4w; } :mul_double_2addr registerA4w,registerB4w is inst0=0xcd ; registerA4w & registerB4w { registerA4w = registerA4w f* registerB4w; } :div_double_2addr registerA4w,registerB4w is inst0=0xce ; registerA4w & registerB4w { registerA4w = registerA4w f/ registerB4w; } :rem_double_2addr registerA4w,registerB4w is inst0=0xcf ; registerA4w & registerB4w { registerA4w = registerA4w s% registerB4w; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Perform the indicated binary op on the indicated register (first argument) # and literal value (second argument), storing the result in the destination # register. # # A: destination register (4 bits) # B: source register (4 bits) # C: signed int constant (16 bits) :add_int_lit16 registerA4,registerB4,C_BITS_0_15_S is inst0=0xd0 ; registerA4 & registerB4 ; C_BITS_0_15_S { registerA4 = registerB4 + C_BITS_0_15_S; } :rsub_int registerA4,registerB4,C_BITS_0_15_S is inst0=0xd1 ; registerA4 & registerB4 ; C_BITS_0_15_S { registerA4 = C_BITS_0_15_S - registerB4; # Twos-complement reverse subtraction. } :mul_int_lit16 registerA4,registerB4,C_BITS_0_15_S is inst0=0xd2 ; registerA4 & registerB4 ; C_BITS_0_15_S { registerA4 = registerB4 * C_BITS_0_15_S; } :div_int_lit16 registerA4,registerB4,C_BITS_0_15_S is inst0=0xd3 ; registerA4 & registerB4 ; C_BITS_0_15_S { registerA4 = registerB4 s/ C_BITS_0_15_S; } :rem_int_lit16 registerA4,registerB4,C_BITS_0_15_S is inst0=0xd4 ; registerA4 & registerB4 ; C_BITS_0_15_S { registerA4 = registerB4 s% C_BITS_0_15_S; } :and_int_lit16 registerA4,registerB4,C_BITS_0_15_S is inst0=0xd5 ; registerA4 & registerB4 ; C_BITS_0_15_S { registerA4 = registerB4 & C_BITS_0_15_S; } :or_int_lit16 registerA4,registerB4,C_BITS_0_15_S is inst0=0xd6 ; registerA4 & registerB4 ; C_BITS_0_15_S { registerA4 = registerB4 | C_BITS_0_15_S; } :xor_int_lit16 registerA4,registerB4,C_BITS_0_15_S is inst0=0xd7 ; registerA4 & registerB4 ; C_BITS_0_15_S { registerA4 = registerB4 ^ C_BITS_0_15_S; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # Perform the indicated binary op on the indicated register (first argument) # and literal value (second argument), storing the result in the destination # register. # # A: destination register (8 bits) # B: source register (8 bits) # C: signed int constant (8 bits) :add_int_lit8 registerA8,registerB8,C_BITS_0_7_S is inst0=0xd8 ; registerA8 ; registerB8 ; C_BITS_0_7_S { registerA8 = registerB8 + C_BITS_0_7_S; } :rsub_int_lit8 registerA8,registerB8,C_BITS_0_7_S is inst0=0xd9 ; registerA8 ; registerB8 ; C_BITS_0_7_S { registerA8 = C_BITS_0_7_S - registerB8; # Twos-complement reverse subtraction. } :mul_int_lit8 registerA8,registerB8,C_BITS_0_7_S is inst0=0xda ; registerA8 ; registerB8 ; C_BITS_0_7_S { registerA8 = registerB8 * C_BITS_0_7_S; } :div_int_lit8 registerA8,registerB8,C_BITS_0_7_S is inst0=0xdb ; registerA8 ; registerB8 ; C_BITS_0_7_S { registerA8 = registerB8 s/ C_BITS_0_7_S; } :rem_int_lit8 registerA8,registerB8,C_BITS_0_7_S is inst0=0xdc ; registerA8 ; registerB8 ; C_BITS_0_7_S { registerA8 = registerB8 s% C_BITS_0_7_S; } :and_int_lit8 registerA8,registerB8,C_BITS_0_7_S is inst0=0xdd ; registerA8 ; registerB8 ; C_BITS_0_7_S { registerA8 = registerB8 & C_BITS_0_7_S; } :or_int_lit8 registerA8,registerB8,C_BITS_0_7_S is inst0=0xde ; registerA8 ; registerB8 ; C_BITS_0_7_S { registerA8 = registerB8 | C_BITS_0_7_S; } :xor_int_lit8 registerA8,registerB8,C_BITS_0_7_S is inst0=0xdf ; registerA8 ; registerB8 ; C_BITS_0_7_S { registerA8 = registerB8 ^ C_BITS_0_7_S; } :shl_int_lit8 registerA8,registerB8,C_BITS_0_7_S is inst0=0xe0 ; registerA8 ; registerB8 ; C_BITS_0_7_S { registerA8 = registerB8 << C_BITS_0_7_S; } :shr_int_lit8 registerA8,registerB8,C_BITS_0_7_S is inst0=0xe1 ; registerA8 ; registerB8 ; C_BITS_0_7_S { registerA8 = registerB8 s>> C_BITS_0_7_S; } :ushr_int_lit8 registerA8,registerB8,C_BITS_0_7_S is inst0=0xe2 ; registerA8 ; registerB8 ; C_BITS_0_7_S { registerA8 = registerB8 >> C_BITS_0_7_S; #TODO should this value be signed??? } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_Base.slaspec ================================================ #------------------------------------------------------------------------------------ # Sleigh specification file for DALVIK VM (base set of instructions) #------------------------------------------------------------------------------------ @include "Dalvik_Base.sinc" ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_DEX_Android10.slaspec ================================================ #------------------------------------------------------------------------------------ # Sleigh specification file for DALVIK VM #------------------------------------------------------------------------------------ # Source: # https://android.googlesource.com/platform/art/+/refs/heads/android10-release/libdexfile/dex/dex_instruction_list.h @include "Dalvik_Base.sinc" @include "Dalvik_OpCode_3E_43_unused.sinc" @include "Dalvik_OpCode_73_return_void_no_barrier.sinc" @include "Dalvik_OpCode_79_unused.sinc" @include "Dalvik_OpCode_7A_unused.sinc" @include "Dalvik_OpCode_E3_EA_dex.sinc" @include "Dalvik_OpCode_EB_F2_iput_iget.sinc" @include "Dalvik_OpCode_F3_unused.sinc" @include "Dalvik_OpCode_F4_unused.sinc" @include "Dalvik_OpCode_F5_unused.sinc" @include "Dalvik_OpCode_F6_unused.sinc" @include "Dalvik_OpCode_F7_unused.sinc" @include "Dalvik_OpCode_F8_unused.sinc" @include "Dalvik_OpCode_F9_unused.sinc" @include "Dalvik_OpCode_FA_FD_dex.sinc" @include "Dalvik_OpCode_FE_FF_dex.sinc" ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_DEX_Android11.slaspec ================================================ #------------------------------------------------------------------------------------ # Sleigh specification file for DALVIK VM #------------------------------------------------------------------------------------ # Source: # https://android.googlesource.com/platform/art/+/refs/heads/android11-release/libdexfile/dex/dex_instruction_list.h @include "Dalvik_Base.sinc" @include "Dalvik_OpCode_3E_43_unused.sinc" @include "Dalvik_OpCode_73_return_void_no_barrier.sinc" @include "Dalvik_OpCode_79_unused.sinc" @include "Dalvik_OpCode_7A_unused.sinc" @include "Dalvik_OpCode_E3_EA_dex.sinc" @include "Dalvik_OpCode_EB_F2_iput_iget.sinc" @include "Dalvik_OpCode_F3_unused.sinc" @include "Dalvik_OpCode_F4_unused.sinc" @include "Dalvik_OpCode_F5_unused.sinc" @include "Dalvik_OpCode_F6_unused.sinc" @include "Dalvik_OpCode_F7_unused.sinc" @include "Dalvik_OpCode_F8_unused.sinc" @include "Dalvik_OpCode_F9_unused.sinc" @include "Dalvik_OpCode_FA_FD_dex.sinc" @include "Dalvik_OpCode_FE_FF_dex.sinc" ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_DEX_Android12.slaspec ================================================ #------------------------------------------------------------------------------------ # Sleigh specification file for DALVIK VM #------------------------------------------------------------------------------------ # Source: # https://android.googlesource.com/platform/art/+/refs/heads/android12-release/libdexfile/dex/dex_instruction_list.h # https://android.googlesource.com/platform/art/+/refs/heads/android13-release/libdexfile/dex/dex_instruction_list.h @include "Dalvik_Base.sinc" @include "Dalvik_OpCode_3E_43_unused.sinc" @include "Dalvik_OpCode_73_unused.sinc" @include "Dalvik_OpCode_79_unused.sinc" @include "Dalvik_OpCode_7A_unused.sinc" @include "Dalvik_OpCode_E3_EA_unused.sinc" @include "Dalvik_OpCode_EB_F2_unused.sinc" @include "Dalvik_OpCode_F3_unused.sinc" @include "Dalvik_OpCode_F4_unused.sinc" @include "Dalvik_OpCode_F5_unused.sinc" @include "Dalvik_OpCode_F6_unused.sinc" @include "Dalvik_OpCode_F7_unused.sinc" @include "Dalvik_OpCode_F8_unused.sinc" @include "Dalvik_OpCode_F9_unused.sinc" @include "Dalvik_OpCode_FA_FD_dex.sinc" @include "Dalvik_OpCode_FE_FF_dex.sinc" ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_DEX_KitKat.slaspec ================================================ #------------------------------------------------------------------------------------ # Sleigh specification file for DALVIK VM #------------------------------------------------------------------------------------ # Source: # https://android.googlesource.com/platform/art/+/refs/heads/kitkat-release/runtime/dex_instruction_list.h @include "Dalvik_Base.sinc" @include "Dalvik_OpCode_3E_43_unused.sinc" @include "Dalvik_OpCode_73_return_void_barrier.sinc" @include "Dalvik_OpCode_79_unused.sinc" @include "Dalvik_OpCode_7A_unused.sinc" @include "Dalvik_OpCode_E3_EA_dex.sinc" @include "Dalvik_OpCode_EB_F2_unused.sinc" @include "Dalvik_OpCode_F3_unused.sinc" @include "Dalvik_OpCode_F4_unused.sinc" @include "Dalvik_OpCode_F5_unused.sinc" @include "Dalvik_OpCode_F6_unused.sinc" @include "Dalvik_OpCode_F7_unused.sinc" @include "Dalvik_OpCode_F8_unused.sinc" @include "Dalvik_OpCode_F9_unused.sinc" @include "Dalvik_OpCode_FA_unused.sinc" @include "Dalvik_OpCode_FB_unused.sinc" @include "Dalvik_OpCode_FC_unused.sinc" @include "Dalvik_OpCode_FD_unused.sinc" @include "Dalvik_OpCode_FE_unused.sinc" @include "Dalvik_OpCode_FF_unused.sinc" ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_DEX_Lollipop.slaspec ================================================ #------------------------------------------------------------------------------------ # Sleigh specification file for DALVIK VM #------------------------------------------------------------------------------------ # Source: # https://android.googlesource.com/platform/art/+/refs/heads/lollipop-release/runtime/dex_instruction_list.h @include "Dalvik_Base.sinc" @include "Dalvik_OpCode_3E_43_unused.sinc" @include "Dalvik_OpCode_73_return_void_barrier.sinc" @include "Dalvik_OpCode_79_unused.sinc" @include "Dalvik_OpCode_7A_unused.sinc" @include "Dalvik_OpCode_E3_EA_dex.sinc" @include "Dalvik_OpCode_EB_F2_unused.sinc" @include "Dalvik_OpCode_F3_unused.sinc" @include "Dalvik_OpCode_F4_unused.sinc" @include "Dalvik_OpCode_F5_unused.sinc" @include "Dalvik_OpCode_F6_unused.sinc" @include "Dalvik_OpCode_F7_unused.sinc" @include "Dalvik_OpCode_F8_unused.sinc" @include "Dalvik_OpCode_F9_unused.sinc" @include "Dalvik_OpCode_FA_unused.sinc" @include "Dalvik_OpCode_FB_unused.sinc" @include "Dalvik_OpCode_FC_unused.sinc" @include "Dalvik_OpCode_FD_unused.sinc" @include "Dalvik_OpCode_FE_unused.sinc" @include "Dalvik_OpCode_FF_unused.sinc" ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_DEX_Marshmallow.slaspec ================================================ #------------------------------------------------------------------------------------ # Sleigh specification file for DALVIK VM #------------------------------------------------------------------------------------ # Source: # https://android.googlesource.com/platform/art/+/refs/heads/marshmallow-release/runtime/dex_instruction_list.h @include "Dalvik_Base.sinc" @include "Dalvik_OpCode_3E_43_unused.sinc" @include "Dalvik_OpCode_73_return_void_no_barrier.sinc" @include "Dalvik_OpCode_79_unused.sinc" @include "Dalvik_OpCode_7A_unused.sinc" @include "Dalvik_OpCode_E3_EA_dex.sinc" @include "Dalvik_OpCode_EB_F2_iput_iget.sinc" @include "Dalvik_OpCode_F3_unused.sinc" @include "Dalvik_OpCode_F4_unused.sinc" @include "Dalvik_OpCode_F5_unused.sinc" @include "Dalvik_OpCode_F6_unused.sinc" @include "Dalvik_OpCode_F7_unused.sinc" @include "Dalvik_OpCode_F8_unused.sinc" @include "Dalvik_OpCode_F9_unused.sinc" @include "Dalvik_OpCode_FA_unused.sinc" @include "Dalvik_OpCode_FB_unused.sinc" @include "Dalvik_OpCode_FC_unused.sinc" @include "Dalvik_OpCode_FD_unused.sinc" @include "Dalvik_OpCode_FE_unused.sinc" @include "Dalvik_OpCode_FF_unused.sinc" ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_DEX_Nougat.slaspec ================================================ #------------------------------------------------------------------------------------ # Sleigh specification file for DALVIK VM #------------------------------------------------------------------------------------ # Source: # https://android.googlesource.com/platform/art/+/refs/heads/nougat-release/runtime/dex_instruction_list.h @include "Dalvik_Base.sinc" @include "Dalvik_OpCode_3E_43_unused.sinc" @include "Dalvik_OpCode_73_return_void_no_barrier.sinc" @include "Dalvik_OpCode_79_unused.sinc" @include "Dalvik_OpCode_7A_unused.sinc" @include "Dalvik_OpCode_E3_EA_dex.sinc" @include "Dalvik_OpCode_EB_F2_iput_iget.sinc" @include "Dalvik_OpCode_F4_unused.sinc" @include "Dalvik_OpCode_FA_unused.sinc" @include "Dalvik_OpCode_FB_unused.sinc" @include "Dalvik_OpCode_FC_unused.sinc" @include "Dalvik_OpCode_FD_unused.sinc" @include "Dalvik_OpCode_FE_unused.sinc" @include "Dalvik_OpCode_FF_unused.sinc" define pcodeop invokeLamda; define token invokeLamda_operands ( 24 ) LAMBDA_vB = ( 4 , 7 ) LAMBDA_vG = ( 0 , 3 ) LAMBDA_vD = ( 12 , 15 ) LAMBDA_vC = ( 8 , 11 ) LAMBDA_vF = ( 20 , 23 ) LAMBDA_vE = ( 16 , 19 ) ; #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF3, INVOKE_LAMBDA, "invoke-lambda", k25x, false, kIndexNone, kContinue | kThrow | kInvoke | kExperimental, kVerifyRegC /*TODO: | kVerifyVarArg*/) \ # k25x, // op vC, {vD, vE, vF, vG} (B: count) # # See https://android.googlesource.com/platform/art/+/nougat-release/runtime/dex_instruction-inl.h # # * uint16 ||| uint16 # * 7-0 15-8 7-0 15-8 # * |------|-----|||-----|-----| # * |opcode|vB|vG|||vD|vC|vF|vE| # * |------|-----|||-----|-----| # # e.g. invoke-lambda vClosure, {vD, vE, vF, vG} -- up to 4 parameters + the closure. :invoke_lambda LAMBDA_vC,{} is inst0=0xf3 ; LAMBDA_vB=0 ;LAMBDA_vG ; LAMBDA_vD ; LAMBDA_vC ; LAMBDA_vF; LAMBDA_vE { #TODO pCode } :invoke_lambda LAMBDA_vC,{LAMBDA_vD} is inst0=0xf3 ; LAMBDA_vB=1 ;LAMBDA_vG ; LAMBDA_vD ; LAMBDA_vC ; LAMBDA_vF; LAMBDA_vE { #TODO pCode } :invoke_lambda LAMBDA_vC,{LAMBDA_vD,LAMBDA_vE} is inst0=0xf3 ; LAMBDA_vB=2 ;LAMBDA_vG ; LAMBDA_vD ; LAMBDA_vC ; LAMBDA_vF; LAMBDA_vE { #TODO pCode } :invoke_lambda LAMBDA_vC,{LAMBDA_vD,LAMBDA_vE,LAMBDA_vF} is inst0=0xf3 ; LAMBDA_vB=3 ;LAMBDA_vG ; LAMBDA_vD ; LAMBDA_vC ; LAMBDA_vF; LAMBDA_vE { #TODO pCode } :invoke_lambda LAMBDA_vC,{LAMBDA_vD,LAMBDA_vE,LAMBDA_vF,LAMBDA_vG} is inst0=0xf3 ; LAMBDA_vB=4 ;LAMBDA_vG ; LAMBDA_vD ; LAMBDA_vC ; LAMBDA_vF; LAMBDA_vE { #TODO pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF5, CAPTURE_VARIABLE, "capture-variable", k21c, false, kIndexStringRef, kExperimental, kVerifyRegA | kVerifyRegBString) \ # # e.g. capture-variable v1, "foobar" :capture_variable registerA8,B_BITS_0_15 is inst0=0xf5 ; registerA8 ; B_BITS_0_15 { #TODO pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF6, CREATE_LAMBDA, "create-lambda", k21c, false_UNUSED, kIndexMethodRef, kContinue | kThrow | kExperimental, kVerifyRegA | kVerifyRegBMethod) \ # # e.g. create-lambda v1, "java/io/PrintStream/print(Ljava/lang/Stream;)V" :create_lambda registerA8,B_BITS_0_15 is inst0=0xf6 ; registerA8 ; B_BITS_0_15 { #TODO pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF7, LIBERATE_VARIABLE, "liberate-variable", k22c, false, kIndexStringRef, kExperimental, kVerifyRegA | kVerifyRegB | kVerifyRegCString) \ # # e.g. liberate-variable v0, v1, "baz" :liberate_variable registerA4,registerB4,C_BITS_0_15 is inst0=0xf7 ; registerA4 & registerB4 ; C_BITS_0_15 { #TODO pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF8, BOX_LAMBDA, "box-lambda", k22x, true, kIndexNone, kContinue | kExperimental, kVerifyRegA | kVerifyRegB) \ :box_lambda registerA8,registerB16 is inst0=0xf8 ; registerA8 ; registerB16 { #TODO pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF9, UNBOX_LAMBDA, "unbox-lambda", k22c, true, kIndexTypeRef, kContinue | kThrow | kExperimental, kVerifyRegA | kVerifyRegB | kVerifyRegCType) \ :unbox_lambda registerA4,registerB4,C_BITS_0_15 is inst0=0xf9 ; registerA4 & registerB4 ; C_BITS_0_15 { #TODO pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_DEX_Oreo.slaspec ================================================ #------------------------------------------------------------------------------------ # Sleigh specification file for DALVIK VM #------------------------------------------------------------------------------------ # Source: # https://android.googlesource.com/platform/art/+/oreo-release/runtime/dex_instruction_list.h @include "Dalvik_Base.sinc" @include "Dalvik_OpCode_3E_43_unused.sinc" @include "Dalvik_OpCode_73_return_void_no_barrier.sinc" @include "Dalvik_OpCode_79_unused.sinc" @include "Dalvik_OpCode_7A_unused.sinc" @include "Dalvik_OpCode_E3_EA_dex.sinc" @include "Dalvik_OpCode_EB_F2_iput_iget.sinc" @include "Dalvik_OpCode_F3_unused.sinc" @include "Dalvik_OpCode_F4_unused.sinc" @include "Dalvik_OpCode_F5_unused.sinc" @include "Dalvik_OpCode_F6_unused.sinc" @include "Dalvik_OpCode_F7_unused.sinc" @include "Dalvik_OpCode_F8_unused.sinc" @include "Dalvik_OpCode_F9_unused.sinc" @include "Dalvik_OpCode_FA_FD_dex.sinc" @include "Dalvik_OpCode_FE_unused.sinc" @include "Dalvik_OpCode_FF_unused.sinc" ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_DEX_Pie.slaspec ================================================ #------------------------------------------------------------------------------------ # Sleigh specification file for DALVIK VM #------------------------------------------------------------------------------------ # Source: # https://android.googlesource.com/platform/art/+/refs/heads/pie-release/libdexfile/dex/dex_instruction_list.h @include "Dalvik_Base.sinc" @include "Dalvik_OpCode_3E_43_unused.sinc" @include "Dalvik_OpCode_73_return_void_no_barrier.sinc" @include "Dalvik_OpCode_79_unused.sinc" @include "Dalvik_OpCode_7A_unused.sinc" @include "Dalvik_OpCode_E3_EA_dex.sinc" @include "Dalvik_OpCode_EB_F2_iput_iget.sinc" @include "Dalvik_OpCode_F3_unused.sinc" @include "Dalvik_OpCode_F4_unused.sinc" @include "Dalvik_OpCode_F5_unused.sinc" @include "Dalvik_OpCode_F6_unused.sinc" @include "Dalvik_OpCode_F7_unused.sinc" @include "Dalvik_OpCode_F8_unused.sinc" @include "Dalvik_OpCode_F9_unused.sinc" @include "Dalvik_OpCode_FA_FD_dex.sinc" @include "Dalvik_OpCode_FE_FF_dex.sinc" ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_ODEX_KitKat.slaspec ================================================ #------------------------------------------------------------------------------------ # Sleigh specification file for DALVIK VM #------------------------------------------------------------------------------------ # # Source: # https://android.googlesource.com/platform/dalvik/+/refs/heads/kitkat-release/libdex/DexFile.h # https://android.googlesource.com/platform/dalvik/+/refs/heads/kitkat-release/libdex/DexOpcodes.h # @include "Dalvik_Base.sinc" @include "Dalvik_OpCode_3E_43_unused.sinc" @include "Dalvik_OpCode_73_unused.sinc" @include "Dalvik_OpCode_79_unused.sinc" @include "Dalvik_OpCode_7A_unused.sinc" @include "Dalvik_OpCode_FF_unused.sinc" #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :iget_volatile registerA4,[registerB4:C_BITS_0_15] is inst0=0xe3 ; registerA4 & registerB4 ; C_BITS_0_15 { registerA4 = getInstanceFieldVolatile( registerB4, C_BITS_0_15:16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :iput_volatile registerA4,[registerB4:C_BITS_0_15] is inst0=0xe4 ; registerA4 & registerB4 ; C_BITS_0_15 { setInstanceFieldVolatile( registerB4, C_BITS_0_15:16, registerA4 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :sget_volatile registerA8,B_BITS_0_15 is inst0=0xe5 ; registerA8 ; B_BITS_0_15 { registerA8 = getStaticFieldVolatile( B_BITS_0_15:16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :sput_volatile registerA8,B_BITS_0_15 is inst0=0xe6 ; registerA8 ; B_BITS_0_15 { setStaticFieldVolatile( B_BITS_0_15:16, registerA8 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :iget_object_volatile registerA4,[registerB4:C_BITS_0_15] is inst0=0xe7 ; registerA4 & registerB4 ; C_BITS_0_15 { registerA4 = getInstanceFieldVolatile( registerB4, C_BITS_0_15:16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :iget_wide_volatile registerA4w,[registerB4:C_BITS_0_15] is inst0=0xe8 ; registerA4w & registerB4 ; C_BITS_0_15 { registerA4w = getInstanceFieldVolatile( registerB4, C_BITS_0_15:16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :iput_wide_volatile registerA4w,[registerB4:C_BITS_0_15] is inst0=0xe9 ; registerA4w & registerB4 ; C_BITS_0_15 { setInstanceFieldVolatile( registerB4, C_BITS_0_15:16, registerA4w ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :sget_wide_volatile registerA8w,B_BITS_0_15 is inst0=0xea ; registerA8w ; B_BITS_0_15 { registerA8w = getStaticFieldVolatile( B_BITS_0_15:16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :sput_wide_volatile registerA8w,B_BITS_0_15 is inst0=0xeb ; registerA8w ; B_BITS_0_15 { setStaticFieldVolatile( B_BITS_0_15:16, registerA8w ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ #:iput_byte_quick registerA4,[registerB4:C_BITS_0_15] is inst0=0xec ; registerA4 & registerB4 ; C_BITS_0_15 #{ # setInstanceFieldQuick( registerB4, C_BITS_0_15:16, registerA4 ); #} :breakpoint is inst0=0xec { #TODO breakpoint( ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :throw_verification_error registerA8,registerB16 is inst0=0xed ; registerA8 ; registerB16 { registerA8 = registerB16; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :execute_inline INLINE,{} is inst0=0xee ; N_PARAMS=0 & INLINE { destination:4 = *[ram] ( ( 4 * INLINE:4 ) + 0xe0000000 ); call [ destination ]; } :execute_inline INLINE,{regParamC} is inst0=0xee ; N_PARAMS=1 & INLINE & regParamC { destination:4 = *[ram] ( ( 4 * INLINE:4 ) + 0xe0000000 ); call [ destination ]; } :execute_inline INLINE,{regParamC,regParamD} is inst0=0xee ; N_PARAMS=2 & INLINE & regParamC & regParamD { destination:4 = *[ram] ( ( 4 * INLINE:4 ) + 0xe0000000 ); call [ destination ]; } :execute_inline INLINE,{regParamC,regParamD,regParamE} is inst0=0xee ; N_PARAMS=3 & INLINE & regParamC & regParamD & regParamE { destination:4 = *[ram] ( ( 4 * INLINE:4 ) + 0xe0000000 ); call [ destination ]; } :execute_inline INLINE,{regParamC,regParamD,regParamE,regParamF} is inst0=0xee ; N_PARAMS=4 & INLINE & regParamC & regParamD & regParamE & regParamF { destination:4 = *[ram] ( ( 4 * INLINE:4 ) + 0xe0000000 ); call [ destination ]; } :execute_inline INLINE,{regParamC,regParamD,regParamE,regParamF,regParamG} is inst0=0xee ; N_PARAMS=5 & INLINE & regParamC & regParamD & regParamE & regParamF & regParamG { destination:4 = *[ram] ( ( 4 * INLINE:4 ) + 0xe0000000 ); call [ destination ]; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :execute_inline_range "inline@"^B_BITS_0_15,A_BITS_0_7,registerC16 is inst0=0xef ; A_BITS_0_7 ; B_BITS_0_15 ; registerC16 { destination:4 = *[ram] ( ( 4 * B_BITS_0_15:4 ) + 0xe0000000 ); call [ destination ]; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # THIS ODEX INSTRUCTION WAS VALID UNTIL API version 13 (OS Version 3.2.x) # :invoke_direct_empty METHOD_INDEX is inst0=0xf0 ; N_PARAMS=0 & METHOD_INDEX # { # destination:4 = *[ram] ( ( 4 * METHOD_INDEX:4 ) + 0xe0000000 ); # call [ destination ]; # } # :invoke_direct_empty METHOD_INDEX,regParamC is inst0=0xf0 ; N_PARAMS=1 & METHOD_INDEX & regParamC # { # destination:4 = *[ram] ( ( 4 * METHOD_INDEX:4 ) + 0xe0000000 ); # call [ destination ]; # } # :invoke_direct_empty METHOD_INDEX,regParamC,regParamD is inst0=0xf0 ; N_PARAMS=2 & METHOD_INDEX & regParamC & regParamD # { # destination:4 = *[ram] ( ( 4 * METHOD_INDEX:4 ) + 0xe0000000 ); # call [ destination ]; # } # :invoke_direct_empty METHOD_INDEX,regParamC,regParamD,regParamE is inst0=0xf0 ; N_PARAMS=3 & METHOD_INDEX & regParamC & regParamD & regParamE # { # destination:4 = *[ram] ( ( 4 * METHOD_INDEX:4 ) + 0xe0000000 ); # call [ destination ]; # } # :invoke_direct_empty METHOD_INDEX,regParamC,regParamD,regParamE,regParamF is inst0=0xf0 ; N_PARAMS=4 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF # { # destination:4 = *[ram] ( ( 4 * METHOD_INDEX:4 ) + 0xe0000000 ); # call [ destination ]; # } # :invoke_direct_empty METHOD_INDEX,regParamC,regParamD,regParamE,regParamF,regParamG is inst0=0xf0 ; N_PARAMS=5 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF & regParamG # { # destination:4 = *[ram] ( ( 4 * METHOD_INDEX:4 ) + 0xe0000000 ); # call [ destination ]; # } :invoke_object_init_range B_BITS_0_15,A_BITS_0_7,registerC16 is inst0=0xf0 ; A_BITS_0_7 ; B_BITS_0_15 ; registerC16 { destination:4 = *[ram] ( ( 4 * B_BITS_0_15:4 ) + 0xe0000000 ); call [ destination ]; } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :return_void_barrier is inst1=0xf1 & inst1_padding { return [sp];#TODO } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :iget_quick "field@"^C_BITS_0_15,registerA4,registerB4 is inst0=0xf2 ; registerA4 & registerB4 ; C_BITS_0_15 { registerA4 = getInstanceFieldQuick( registerB4, C_BITS_0_15:16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :iget_wide_quick "field@"^C_BITS_0_15,registerA4w,registerB4 is inst0=0xf3 ; registerA4w & registerB4 ; C_BITS_0_15 { registerA4w = getInstanceFieldQuick( registerB4, C_BITS_0_15:16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :iget_object_quick "field@"^C_BITS_0_15,registerA4,registerB4 is inst0=0xf4 ; registerA4 & registerB4 ; C_BITS_0_15 { registerA4 = getInstanceFieldQuick( registerB4, C_BITS_0_15:16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :iput_quick "field@"^C_BITS_0_15,registerA4,registerB4 is inst0=0xf5 ; registerA4 & registerB4 ; C_BITS_0_15 { setInstanceFieldQuick( registerB4, C_BITS_0_15:16, registerA4 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :iput_wide_quick "field@"^C_BITS_0_15,registerA4w,registerB4 is inst0=0xf6 ; registerA4w & registerB4 ; C_BITS_0_15 { setInstanceFieldQuick( registerB4, C_BITS_0_15:16, registerA4w ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :iput_object_quick "field@"^C_BITS_0_15,registerA4,registerB4 is inst0=0xf7 ; registerA4 & registerB4 ; C_BITS_0_15 { setInstanceFieldQuick( registerB4, C_BITS_0_15:16, registerA4 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :invoke_virtual_quick "vtable@"^VTABLE_OFFSET is inst0=0xf8 ; N_PARAMS=0 & VTABLE_OFFSET { invokeVirtualQuick( VTABLE_OFFSET:4 ); } :invoke_virtual_quick "vtable@"^VTABLE_OFFSET,regParamC is inst0=0xf8 ; N_PARAMS=1 & VTABLE_OFFSET & regParamC { invokeVirtualQuick( VTABLE_OFFSET:4, regParamC ); } :invoke_virtual_quick "vtable@"^VTABLE_OFFSET,regParamC,regParamD is inst0=0xf8 ; N_PARAMS=2 & VTABLE_OFFSET & regParamC & regParamD { invokeVirtualQuick( VTABLE_OFFSET:4, regParamC, regParamD ); } :invoke_virtual_quick "vtable@"^VTABLE_OFFSET,regParamC,regParamD,regParamE is inst0=0xf8 ; N_PARAMS=3 & VTABLE_OFFSET & regParamC & regParamD & regParamE { invokeVirtualQuick( VTABLE_OFFSET:4, regParamC, regParamD, regParamE ); } :invoke_virtual_quick "vtable@"^VTABLE_OFFSET,regParamC,regParamD,regParamE,regParamF is inst0=0xf8 ; N_PARAMS=4 & VTABLE_OFFSET & regParamC & regParamD & regParamE & regParamF { invokeVirtualQuick( VTABLE_OFFSET:4, regParamC, regParamD, regParamE, regParamF ); } :invoke_virtual_quick "vtable@"^VTABLE_OFFSET,regParamC,regParamD,regParamE,regParamF,regParamG is inst0=0xf8 ; N_PARAMS=5 & VTABLE_OFFSET & regParamC & regParamD & regParamE & regParamF & regParamG { invokeVirtualQuick( VTABLE_OFFSET:4, regParamC, regParamD, regParamE, regParamF, regParamG ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :invoke_virtual_quick_range "vtable@"^B_BITS_0_15,A_BITS_0_7,registerC16 is inst0=0xf9 ; A_BITS_0_7 ; B_BITS_0_15 ; registerC16 { invokeVirtualQuickRange( B_BITS_0_15:4, A_BITS_0_7:4, registerC16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :invoke_super_quick "vtable@"^VTABLE_OFFSET is inst0=0xfa ; N_PARAMS=0 & VTABLE_OFFSET { invokeSuperQuick( VTABLE_OFFSET:4 ); } :invoke_super_quick "vtable@"^VTABLE_OFFSET,regParamC is inst0=0xfa ; N_PARAMS=1 & VTABLE_OFFSET & regParamC { invokeSuperQuick( VTABLE_OFFSET:4, regParamC ); } :invoke_super_quick "vtable@"^VTABLE_OFFSET,regParamC,regParamD is inst0=0xfa ; N_PARAMS=2 & VTABLE_OFFSET & regParamC & regParamD { invokeSuperQuick( VTABLE_OFFSET:4, regParamC, regParamD ); } :invoke_super_quick "vtable@"^VTABLE_OFFSET,regParamC,regParamD,regParamE is inst0=0xfa ; N_PARAMS=3 & VTABLE_OFFSET & regParamC & regParamD & regParamE { invokeSuperQuick( VTABLE_OFFSET:4, regParamC, regParamD, regParamE ); } :invoke_super_quick "vtable@"^VTABLE_OFFSET,regParamC,regParamD,regParamE,regParamF is inst0=0xfa ; N_PARAMS=4 & VTABLE_OFFSET & regParamC & regParamD & regParamE & regParamF { invokeSuperQuick( VTABLE_OFFSET:4, regParamC, regParamD, regParamE, regParamF ); } :invoke_super_quick "vtable@"^VTABLE_OFFSET,regParamC,regParamD,regParamE,regParamF,regParamG is inst0=0xfa ; N_PARAMS=5 & VTABLE_OFFSET & regParamC & regParamD & regParamE & regParamF & regParamG { invokeSuperQuick( VTABLE_OFFSET:4, regParamC, regParamD, regParamE, regParamF, regParamG ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :invoke_super_quick_range "vtable@"^B_BITS_0_15,A_BITS_0_7,registerC16 is inst0=0xfb ; A_BITS_0_7 ; B_BITS_0_15 ; registerC16 { invokeSuperQuickRange( B_BITS_0_15:4, A_BITS_0_7:4, registerC16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :iput_object_volatile registerA4,registerB4,C_BITS_0_15 is inst0=0xfc ; registerA4 & registerB4 ; C_BITS_0_15 { setInstanceFieldVolatile( registerB4, C_BITS_0_15:16, registerA4 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :sget_object_volatile registerA8,B_BITS_0_15 is inst0=0xfd ; registerA8 ; B_BITS_0_15 { registerA8 = getStaticFieldVolatile( B_BITS_0_15:16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :sput_object_volatile registerA8,B_BITS_0_15 is inst0=0xfe ; registerA8 ; B_BITS_0_15 { setStaticFieldVolatile( B_BITS_0_15:16, registerA8 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # 0xff ? #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_3E_43_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0x3E, UNUSED_3E, "unused-3e", k10x, false, kUnknown, 0, kVerifyError) \ :unused_3e is inst0=0x3e { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0x3F, UNUSED_3F, "unused-3f", k10x, false, kUnknown, 0, kVerifyError) \ :unused_3f is inst0=0x3f { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0x40, UNUSED_40, "unused-40", k10x, false, kUnknown, 0, kVerifyError) \ :unused_40 is inst0=0x40 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0x41, UNUSED_41, "unused-41", k10x, false, kUnknown, 0, kVerifyError) \ :unused_41 is inst0=0x41 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0x42, UNUSED_42, "unused-42", k10x, false, kUnknown, 0, kVerifyError) \ :unused_42 is inst0=0x42 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0x43, UNUSED_43, "unused-43", k10x, false, kUnknown, 0, kVerifyError) \ :unused_43 is inst0=0x43 { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_73_return_void_barrier.sinc ================================================ #------------------------------------------------------------------------------------ # V(0x73, RETURN_VOID_BARRIER, "return-void-barrier", k10x, false, kNone, kReturn, kVerifyNone) \ :return_void_barrier is inst1=0x73 & inst1_padding { return [sp]; } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_73_return_void_no_barrier.sinc ================================================ #------------------------------------------------------------------------------------ # V(0x73, RETURN_VOID_NO_BARRIER, "return-void-no-barrier", k10x, false, kNone, kReturn, kVerifyNone) \ :return_void_no_barrier is inst1=0x73 & inst1_padding { return [sp]; } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_73_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ :unused_73 is inst0=0x73 { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_79_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0x79, UNUSED_79, "unused-79", k10x, false, kUnknown, 0, kVerifyError) \ :unused_79 is inst0=0x79 { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_7A_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0x7A, UNUSED_7A, "unused-7a", k10x, false, kUnknown, 0, kVerifyError) \ :unused_7a is inst0=0x7a { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_E3_EA_dex.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE3, IGET_QUICK, "iget-quick", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \ :iget_quick "field@"^C_BITS_0_15,registerA4,registerB4 is inst0=0xe3 ; registerA4 & registerB4 ; C_BITS_0_15 { registerA4 = getInstanceFieldQuick( registerB4, C_BITS_0_15:16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE4, IGET_WIDE_QUICK, "iget-wide-quick", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegAWide | kVerifyRegB) \ :iget_wide_quick "field@"^C_BITS_0_15,registerA4w,registerB4 is inst0=0xe4 ; registerA4w & registerB4 ; C_BITS_0_15 { registerA4w = getInstanceFieldQuick( registerB4, C_BITS_0_15:16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE5, IGET_OBJECT_QUICK, "iget-object-quick", k22c, true, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \ :iget_object_quick "field@"^C_BITS_0_15,registerA4,registerB4 is inst0=0xe5 ; registerA4 & registerB4 ; C_BITS_0_15 { registerA4 = getInstanceFieldQuick( registerB4, C_BITS_0_15:16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE6, IPUT_QUICK, "iput-quick", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \ :iput_quick "field@"^C_BITS_0_15,registerA4,registerB4 is inst0=0xe6 ; registerA4 & registerB4 ; C_BITS_0_15 { setInstanceFieldQuick( registerB4, C_BITS_0_15:16, registerA4 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE7, IPUT_WIDE_QUICK, "iput-wide-quick", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegAWide | kVerifyRegB) \ :iput_wide_quick "field@"^C_BITS_0_15,registerA4w,registerB4 is inst0=0xe7 ; registerA4w & registerB4 ; C_BITS_0_15 { setInstanceFieldQuick( registerB4, C_BITS_0_15:16, registerA4w ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE8, IPUT_OBJECT_QUICK, "iput-object-quick", k22c, false, kFieldRef, kContinue | kThrow, kVerifyRegA | kVerifyRegB) \ :iput_object_quick "field@"^C_BITS_0_15,registerA4,registerB4 is inst0=0xe8 ; registerA4 & registerB4 ; C_BITS_0_15 { setInstanceFieldQuick( registerB4, C_BITS_0_15:16, registerA4 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE9, INVOKE_VIRTUAL_QUICK, "invoke-virtual-quick", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArg) \ :invoke_virtual_quick "vtable@"^VTABLE_OFFSET is inst0=0xe9 ; N_PARAMS=0 & VTABLE_OFFSET { invokeVirtualQuick( VTABLE_OFFSET:4 ); } :invoke_virtual_quick "vtable@"^VTABLE_OFFSET,regParamC is inst0=0xe9 ; N_PARAMS=1 & VTABLE_OFFSET & regParamC { invokeVirtualQuick( VTABLE_OFFSET:4, regParamC ); } :invoke_virtual_quick "vtable@"^VTABLE_OFFSET,regParamC,regParamD is inst0=0xe9 ; N_PARAMS=2 & VTABLE_OFFSET & regParamC & regParamD { invokeVirtualQuick( VTABLE_OFFSET:4, regParamC, regParamD ); } :invoke_virtual_quick "vtable@"^VTABLE_OFFSET,regParamC,regParamD,regParamE is inst0=0xe9 ; N_PARAMS=3 & VTABLE_OFFSET & regParamC & regParamD & regParamE { invokeVirtualQuick( VTABLE_OFFSET:4, regParamC, regParamD, regParamE ); } :invoke_virtual_quick "vtable@"^VTABLE_OFFSET,regParamC,regParamD,regParamE,regParamF is inst0=0xe9 ; N_PARAMS=4 & VTABLE_OFFSET & regParamC & regParamD & regParamE & regParamF { invokeVirtualQuick( VTABLE_OFFSET:4, regParamC, regParamD, regParamE, regParamF ); } :invoke_virtual_quick "vtable@"^VTABLE_OFFSET,regParamC,regParamD,regParamE,regParamF,regParamG is inst0=0xe9 ; N_PARAMS=5 & VTABLE_OFFSET & regParamC & regParamD & regParamE & regParamF & regParamG { invokeVirtualQuick( VTABLE_OFFSET:4, regParamC, regParamD, regParamE, regParamF, regParamG ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xEA, INVOKE_VIRTUAL_RANGE_QUICK, "invoke-virtual/range-quick", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgRange) \ :invoke_virtual_quick_range "vtable@"^B_BITS_0_15,A_BITS_0_7,registerC16 is inst0=0xea ; A_BITS_0_7 ; B_BITS_0_15 ; registerC16 { invokeVirtualQuickRange( B_BITS_0_15:4, A_BITS_0_7:4, registerC16 ); } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_E3_EA_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE3, UNUSED_E3, "unused-e3", k10x, false, kUnknown, 0, kVerifyError) \ :unused_e3 is inst0=0xe3 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE4, UNUSED_E4, "unused-e4", k10x, false, kUnknown, 0, kVerifyError) \ :unused_e4 is inst0=0xe4 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE5, UNUSED_E5, "unused-e5", k10x, false, kUnknown, 0, kVerifyError) \ :unused_e5 is inst0=0xe5 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE6, UNUSED_E6, "unused-e6", k10x, false, kUnknown, 0, kVerifyError) \ :unused_e6 is inst0=0xe6 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE7, UNUSED_E7, "unused-e7", k10x, false, kUnknown, 0, kVerifyError) \ :unused_e7 is inst0=0xe7 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE8, UNUSED_E8, "unused-e8", k10x, false, kUnknown, 0, kVerifyError) \ :unused_e8 is inst0=0xe8 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xE9, UNUSED_E9, "unused-e9", k10x, false, kUnknown, 0, kVerifyError) \ :unused_e9 is inst0=0xe9 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xEA, UNUSED_EA, "unused-ea", k10x, false, kUnknown, 0, kVerifyError) \ :unused_ea is inst0=0xea { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_EB_F2_iput_iget.sinc ================================================ #------------------------------------------------------------------------------------ # V(0xEB, IPUT_BOOLEAN_QUICK, "iput-boolean-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \ :iput_boolean_quick registerA4,[registerB4:C_BITS_0_15] is inst0=0xeb ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool(registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); *ptr = registerA4 : 1; } #------------------------------------------------------------------------------------ # V(0xEC, IPUT_BYTE_QUICK, "iput-byte-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \ :iput_byte_quick registerA4,[registerB4:C_BITS_0_15] is inst0=0xec ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool(registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); *ptr = registerA4 : 1; } #------------------------------------------------------------------------------------ # V(0xED, IPUT_CHAR_QUICK, "iput-char-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \ :iput_char_quick registerA4,[registerB4:C_BITS_0_15] is inst0=0xed ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool(registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); *ptr = registerA4 : 2; } #------------------------------------------------------------------------------------ # V(0xEE, IPUT_SHORT_QUICK, "iput-short-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \ :iput_short_quick registerA4,[registerB4:C_BITS_0_15] is inst0=0xee ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool(registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); *ptr = registerA4 : 2; } #------------------------------------------------------------------------------------ # V(0xEF, IGET_BOOLEAN_QUICK, "iget-boolean-quick", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \ :iget_boolean_quick registerA4,[registerB4:C_BITS_0_15] is inst0=0xef ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool( registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); registerA4 = zext( *:1 ptr ); } #------------------------------------------------------------------------------------ # V(0xF0, IGET_BYTE_QUICK, "iget-byte-quick", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \ :iget_byte_quick registerA4,[registerB4:C_BITS_0_15] is inst0=0xf0 ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool( registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); registerA4 = sext( *:1 ptr ); } #------------------------------------------------------------------------------------ # V(0xF1, IGET_CHAR_QUICK, "iget-char-quick", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \ :iget_char_quick registerA4,[registerB4:C_BITS_0_15] is inst0=0xf1 ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool( registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); registerA4 = zext( *:2 ptr ); } #------------------------------------------------------------------------------------ # V(0xF2, IGET_SHORT_QUICK, "iget-short-quick", k22c, true, kFieldRef, kContinue | kThrow | kLoad | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \ :iget_short_quick registerA4,[registerB4:C_BITS_0_15] is inst0=0xf2 ; registerA4 & registerB4 ; C_BITS_0_15 { ptr:4 = cpool( registerB4, C_BITS_0_15:4, $(CPOOL_FIELD)); registerA4 = sext( *:2 ptr ); } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_EB_F2_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xEB, UNUSED_EB, "unused-eb", k10x, false, kUnknown, 0, kVerifyError) \ :unused_eb is inst0=0xeb { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xEC, UNUSED_EC, "unused-ec", k10x, false, kUnknown, 0, kVerifyError) \ :unused_ec is inst0=0xec { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xED, UNUSED_ED, "unused-ed", k10x, false, kUnknown, 0, kVerifyError) \ :unused_ed is inst0=0xed { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xEE, UNUSED_EE, "unused-ee", k10x, false, kUnknown, 0, kVerifyError) \ :unused_ee is inst0=0xee { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xEF, UNUSED_EF, "unused-ef", k10x, false, kUnknown, 0, kVerifyError) \ :unused_ef is inst0=0xef { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF0, UNUSED_F0, "unused-f0", k10x, false, kUnknown, 0, kVerifyError) \ :unused_f0 is inst0=0xf0 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF1, UNUSED_F1, "unused-f1", k10x, false, kUnknown, 0, kVerifyError) \ :unused_f1 is inst0=0xf1 { #no pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF2, UNUSED_F2, "unused-f2", k10x, false, kUnknown, 0, kVerifyError) \ :unused_f2 is inst0=0xf2 { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_F3_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF3, UNUSED_F3, "unused-f3", k10x, false, kUnknown, 0, kVerifyError) \ :unused_f3 is inst0=0xf3 { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_F4_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF4, UNUSED_F4, "unused-f4", k10x, false, kUnknown, 0, kVerifyError) \ :unused_f4 is inst0=0xf4 { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_F5_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF5, UNUSED_F5, "unused-f5", k10x, false, kUnknown, 0, kVerifyError) \ :unused_f5 is inst0=0xf5 { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_F6_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF6, UNUSED_F6, "unused-f6", k10x, false, kUnknown, 0, kVerifyError) \ :unused_f6 is inst0=0xf6 { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_F7_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF7, UNUSED_F7, "unused-f7", k10x, false, kUnknown, 0, kVerifyError) \ :unused_f7 is inst0=0xf7 { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_F8_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF8, UNUSED_F8, "unused-f8", k10x, false, kUnknown, 0, kVerifyError) \ :unused_f8 is inst0=0xf8 { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_F9_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xF9, UNUSED_F9, "unused-f9", k10x, false, kUnknown, 0, kVerifyError) \ :unused_f9 is inst0=0xf9 { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_FA_FD_dex.sinc ================================================ define token invoke_poly_operands_2 ( 8 ) POLY_ARG_COUNT = ( 4 , 7 ) POLY_PARAM_G = ( 0 , 3 ) ; define token invoke_poly_operands_1 ( 48 ) POLY_METHOD_INDEX = ( 0 , 15 ) POLY_PARAM_D = ( 16 , 19 ) POLY_PARAM_C = ( 20 , 23 ) POLY_PARAM_F = ( 24 , 27 ) POLY_PARAM_E = ( 28 , 31 ) POLY_PROTO_INDEX = ( 32 , 47 ) ; #define token invoke_poly_operands_1 ( 64 ) # POLY_METHOD_INDEX = ( 0 , 15 ) # POLY_METHOD_HANDLE = ( 16 , 31 ) # POLY_PARAM_D = ( 32 , 35 ) # POLY_PARAM_C = ( 36 , 39 ) # POLY_PARAM_F = ( 40 , 43 ) # POLY_PARAM_E = ( 44 , 47 ) # POLY_PROTO_INDEX = ( 48 , 63 ) #; regPolyC: reg is POLY_PARAM_C [ reg = (POLY_PARAM_C * 4) + 0x1000; ] { export *[register]:4 reg; } regPolyD: reg is POLY_PARAM_D [ reg = (POLY_PARAM_D * 4) + 0x1000; ] { export *[register]:4 reg; } regPolyE: reg is POLY_PARAM_E [ reg = (POLY_PARAM_E * 4) + 0x1000; ] { export *[register]:4 reg; } regPolyF: reg is POLY_PARAM_F [ reg = (POLY_PARAM_F * 4) + 0x1000; ] { export *[register]:4 reg; } regPolyG: reg is POLY_PARAM_G [ reg = (POLY_PARAM_G * 4) + 0x1000; ] { export *[register]:4 reg; } define token invoke_poly_range_operands ( 40 ) POLY_RANGE_ARG_COUNT = ( 0 , 7 ) POLY_RANGE_METHOD_INDEX = ( 8 , 23 ) POLY_RANGE_PROTO_INDEX = ( 24 , 39 ) ; #------------------------------------------------------------------------------------ # V(0xFA, INVOKE_POLYMORPHIC, "invoke-polymorphic", k45cc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgNonZero | kVerifyRegHPrototype) \ # # invoke-polymorphic {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH # # A: argument word count (4 bits) # B: method reference index (16 bits) # C: method handle reference to invoke (16 bits) # D..G: argument registers (4 bits each) # H: prototype reference index (16 bits) # # case Instruction::k45cc: { // op {vC, vD, vE, vF, vG}, method@BBBB, proto@HHHH # # Invoke the indicated method handle. # The result (if any) may be stored with an appropriate move-result* variant as the immediately subsequent instruction. # The method reference must be to java.lang.invoke.MethodHandle.invoke or java.lang.invoke.MethodHandle.invokeExact. # The prototype reference describes the argument types provided and the expected return type. # # Present in Dex files from version 038 onwards. :invoke_polymorphic "meth@"^POLY_METHOD_INDEX,"proto@"^POLY_PROTO_INDEX,{} is inst0=0xfa ; POLY_ARG_COUNT=0 ; POLY_METHOD_INDEX & POLY_PROTO_INDEX { #TODO pCode } :invoke_polymorphic "meth@"^POLY_METHOD_INDEX,"proto@"^POLY_PROTO_INDEX,{regPolyC} is inst0=0xfa ; POLY_ARG_COUNT=1 ; POLY_METHOD_INDEX & POLY_PROTO_INDEX & regPolyC { #TODO pCode } :invoke_polymorphic "meth@"^POLY_METHOD_INDEX,"proto@"^POLY_PROTO_INDEX,{regPolyC,regPolyD} is inst0=0xfa ; POLY_ARG_COUNT=2 ; POLY_METHOD_INDEX & POLY_PROTO_INDEX & regPolyC & regPolyD { #TODO pCode } :invoke_polymorphic "meth@"^POLY_METHOD_INDEX,"proto@"^POLY_PROTO_INDEX,{regPolyC,regPolyD,regPolyE} is inst0=0xfa ; POLY_ARG_COUNT=3 ; POLY_METHOD_INDEX & POLY_PROTO_INDEX & regPolyC & regPolyD & regPolyE { #TODO pCode } :invoke_polymorphic "meth@"^POLY_METHOD_INDEX,"proto@"^POLY_PROTO_INDEX,{regPolyC,regPolyD,regPolyE,regPolyF} is inst0=0xfa ; POLY_ARG_COUNT=4 ; POLY_METHOD_INDEX & POLY_PROTO_INDEX & regPolyC & regPolyD & regPolyE & regPolyF { #TODO pCode } :invoke_polymorphic "meth@"^POLY_METHOD_INDEX,"proto@"^POLY_PROTO_INDEX,{regPolyC,regPolyD,regPolyE,regPolyF,regPolyG} is inst0=0xfa ; POLY_ARG_COUNT=5 ; POLY_METHOD_INDEX & POLY_PROTO_INDEX & regPolyC & regPolyD & regPolyE & regPolyF ; regPolyG { #TODO pCode } #------------------------------------------------------------------------------------ # V(0xFB, INVOKE_POLYMORPHIC_RANGE, "invoke-polymorphic/range", k4rcc, kIndexMethodAndProtoRef, kContinue | kThrow | kInvoke, kVerifyRegBMethod | kVerifyVarArgRangeNonZero | kVerifyRegHPrototype) \ # # invoke-polymorphic/range {vCCCC .. vNNNN}, meth@BBBB, proto@HHHH # # A: argument word count (8 bits) # B: method reference index (16 bits) # C: method handle reference to invoke (16 bits) # H: prototype reference index (16 bits) # N = A + C - 1 # # case Instruction::k4rcc: { // op {vCCCC .. v(CCCC+AA-1)}, method@BBBB, proto@HHHH # # Invoke the indicated method handle. See the invoke-polymorphic description above for details. # # Present in Dex files from version 038 onwards. :invoke_polymorphic_range "meth@"^POLY_RANGE_METHOD_INDEX,"cnt@"^POLY_RANGE_ARG_COUNT,"proto@"^POLY_RANGE_PROTO_INDEX is inst0=0xfb ; POLY_RANGE_ARG_COUNT & POLY_RANGE_METHOD_INDEX & POLY_RANGE_PROTO_INDEX { #TODO pCode } #------------------------------------------------------------------------------------ # V(0xFC, INVOKE_CUSTOM, "invoke-custom", k35c, kIndexCallSiteRef, kContinue | kThrow, kVerifyRegBCallSite | kVerifyVarArg) \ # # invoke-custom {vC, vD, vE, vF, vG}, call_site@BBBB # # A: argument word count (4 bits) # B: call site reference index (16 bits) # C..G: argument registers (4 bits each) # # Resolves and invokes the indicated call site. # The result from the invocation (if any) may be stored with an # appropriate move-result* variant as the immediately subsequent instruction. # # This instruction executes in two phases: call site resolution and call site invocation. # # Call site resolution checks whether the indicated call site has an associated # java.lang.invoke.CallSite instance. If not, the bootstrap linker method for the # indicated call site is invoked using arguments present in the DEX file (see call_site_item). # The bootstrap linker method returns a java.lang.invoke.CallSite instance that will then # be associated with the indicated call site if no association exists. Another thread may # have already made the association first, and if so execution of the instruction continues # with the first associated java.lang.invoke.CallSite instance. # # Call site invocation is made on the java.lang.invoke.MethodHandle target of the resolved # java.lang.invoke.CallSite instance. The target is invoked as if executing invoke-polymorphic # (described above) using the method handle and arguments to the invoke-custom instruction as # the arguments to an exact method handle invocation. # # Present in Dex files from version 038 onwards. :invoke_custom METHOD_INDEX,{} is inst0=0xfc ; N_PARAMS=0 & METHOD_INDEX { #TODO pCode -- see invoke_direct } :invoke_custom METHOD_INDEX,{regParamC} is inst0=0xfc ; N_PARAMS=1 & METHOD_INDEX & regParamC { #TODO pCode } :invoke_custom ^METHOD_INDEX,{regParamC,regParamD} is inst0=0xfc ; N_PARAMS=2 & METHOD_INDEX & regParamC & regParamD { #TODO pCode } :invoke_custom METHOD_INDEX,{regParamC,regParamD,regParamE} is inst0=0xfc ; N_PARAMS=3 & METHOD_INDEX & regParamC & regParamD & regParamE { #TODO pCode } :invoke_custom METHOD_INDEX,{regParamC,regParamD,regParamE,regParamF} is inst0=0xfc ; N_PARAMS=4 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF { #TODO pCode } :invoke_custom METHOD_INDEX,{regParamC,regParamD,regParamE,regParamF,regParamG} is inst0=0xfc ; N_PARAMS=5 & METHOD_INDEX & regParamC & regParamD & regParamE & regParamF & regParamG { #TODO pCode } #------------------------------------------------------------------------------------ # V(0xFD, INVOKE_CUSTOM_RANGE, "invoke-custom/range", k3rc, kIndexCallSiteRef, kContinue | kThrow, kVerifyRegBCallSite | kVerifyVarArgRange) \ # # invoke-custom/range {vCCCC .. vNNNN}, call_site@BBBB # # A: argument word count (8 bits) # B: call site reference index (16 bits) # C: first argument register (16-bits) # N = A + C - 1 # # Resolve and invoke a call site. See the invoke-custom description above for details. # # Present in Dex files from version 038 onwards. :invoke_custom_range B_BITS_0_15,A_BITS_0_7,registerC16 is inst0=0xfd ; A_BITS_0_7 ; B_BITS_0_15 ; registerC16 { #TODO pCode -- see invoke_direct_range } #------------------------------------------------------------------------------------ ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_FA_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xFA, UNUSED_FA, "unused-fa", k10x, false, kUnknown, 0, kVerifyError) \ :unused_fa is inst0=0xfa { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_FB_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xFB, UNUSED_FB, "unused-fb", k10x, false, kUnknown, 0, kVerifyError) \ :unused_fb is inst0=0xfb { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_FC_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xFC, UNUSED_FC, "unused-fc", k10x, false, kUnknown, 0, kVerifyError) \ :unused_fc is inst0=0xfc { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_FD_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xFD, UNUSED_FD, "unused-fd", k10x, false, kUnknown, 0, kVerifyError) \ :unused_fd is inst0=0xfd { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_FE_FF_dex.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xFE, CONST_METHOD_HANDLE, "const-method-handle", k21c, kIndexMethodHandleRef, kContinue | kThrow, 0, kVerifyRegA | kVerifyRegBMethodHandle) \ :const_method_handle registerA8,B_BITS_0_15 is inst0=0xfe ; registerA8 ; B_BITS_0_15 { #TODO pCode } #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xFF, CONST_METHOD_TYPE, "const-method-type", k21c, kIndexProtoRef, kContinue | kThrow, 0, kVerifyRegA | kVerifyRegBPrototype) :const_method_type registerA8,B_BITS_0_15 is inst0=0xff ; registerA8 ; B_BITS_0_15 { #TODO pCode } #------------------------------------------------------------------------------------ ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_FE_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xFE, UNUSED_FE, "unused-fe", k10x, false, kUnknown, 0, kVerifyError) \ :unused_fe is inst0=0xfe { #no pCode } ================================================ FILE: pypcode/processors/Dalvik/data/languages/Dalvik_OpCode_FF_unused.sinc ================================================ #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------ # V(0xFF, UNUSED_FF, "unused-ff", k10x, false, kUnknown, 0, kVerifyError) :unused_ff is inst0=0xff { #no pCode } ================================================ FILE: pypcode/processors/HCS08/data/languages/HC05-M68HC05TB.pspec ================================================ ================================================ FILE: pypcode/processors/HCS08/data/languages/HC05.cspec ================================================ ================================================ FILE: pypcode/processors/HCS08/data/languages/HC05.ldefs ================================================ HC05 (6805) Microcontroller Family HC05 (6805) Microcontroller Family - M68HC05TB ================================================ FILE: pypcode/processors/HCS08/data/languages/HC05.pspec ================================================ ================================================ FILE: pypcode/processors/HCS08/data/languages/HC05.slaspec ================================================ # sleigh specification file for Freescale HC05 (6805, 68HC05) @define HC05 "1" @include "HCS_HC.sinc" ================================================ FILE: pypcode/processors/HCS08/data/languages/HC08-MC68HC908QY4.pspec ================================================ ================================================ FILE: pypcode/processors/HCS08/data/languages/HC08.ldefs ================================================ HC08 Microcontroller Family HC08 Microcontroller Family - MC68HC908QY4 ================================================ FILE: pypcode/processors/HCS08/data/languages/HC08.pspec ================================================ ================================================ FILE: pypcode/processors/HCS08/data/languages/HC08.slaspec ================================================ # sleigh specification file for Freescale HC08 (68HC08) @define HC08 "1" @include "HCS_HC.sinc" ================================================ FILE: pypcode/processors/HCS08/data/languages/HCS08-MC9S08GB60.pspec ================================================ ================================================ FILE: pypcode/processors/HCS08/data/languages/HCS08.cspec ================================================ ================================================ FILE: pypcode/processors/HCS08/data/languages/HCS08.ldefs ================================================ HCS08 Microcontroller Family HCS08 Microcontroller Family - MC9S08GB60 ================================================ FILE: pypcode/processors/HCS08/data/languages/HCS08.opinion ================================================ ================================================ FILE: pypcode/processors/HCS08/data/languages/HCS08.pspec ================================================ ================================================ FILE: pypcode/processors/HCS08/data/languages/HCS08.slaspec ================================================ # sleigh specification file for Freescale HCS08 (68HCS08) @define HCS08 "1" @include "HCS_HC.sinc" ================================================ FILE: pypcode/processors/HCS08/data/languages/HCS_HC.sinc ================================================ # common include file for HCS08, HC08 and HC05(6805) constructors define endian=big; define alignment=1; define space RAM type=ram_space size=2 default; define space register type=register_space size=1; @define VECTOR_SWI "0xFFFC" ################################################################ # Registers ################################################################ define register offset=0x00 size=1 [ A ]; @if defined(HC05) define register offset=0x10 size=1 [ X ]; @elif defined(HCS08) || defined(HC08) define register offset=0x10 size=2 [ HIX ]; # H:X in the manual define register offset=0x10 size=1 [ HI X ]; @endif define register offset=0x20 size=2 [ PC SP ]; define register offset=0x20 size=1 [ PCH PCL SPH SPL ]; define register offset=0x30 size=1 [ CCR ]; @if defined(HCS08) || defined(HC08) @define V "CCR[7,1]" # Two's complement overflow Flag @endif # "CCR[6,1]" # unused # "CCR[5,1]" # unused @define H "CCR[4,1]" # Half Carry Flag @define I "CCR[3,1]" # Maskable interrupt control bit @define N "CCR[2,1]" # Negative Flag @define Z "CCR[1,1]" # Zero Flag @define C "CCR[0,1]" # Carry/Borrow Flag ################################################################ # Tokens ################################################################ define token opbyte8 (8) op = (0,7) op4_7 = (4,7) op4_6 = (4,6) nIndex = (1,3) op0_0 = (0,0) ; define token opbyte16 (16) op16 = (0,15) ; define token data8 (8) imm8 = (0,7) simm8 = (0,7) signed rel = (0,7) signed ; define token data16 (16) imm16 = (0,15) ; ################################################################ # Pseudo Instructions ################################################################ @if defined(HCS08) || defined(HC08) || defined(HC05) define pcodeop readIRQ; define pcodeop stop; define pcodeop wait; @endif @if defined(HCS08) || defined(HC08) define pcodeop decimalAdjustAccumulator; define pcodeop decimalAdjustCarry; @endif @if defined(HCS08) define pcodeop backgroundDebugMode; @endif ################################################################ # Addressing tables ################################################################ @if defined(HCS08) || defined(HC08) || defined(HC05) opr8a_8: imm8 is imm8 { export *:1 imm8; } opr16a_8: imm16 is imm16 { export *:1 imm16; } iopr8i: "#"imm8 is imm8 { export *[const]:1 imm8; } @endif @if defined(HCS08) || defined(HC08) opr8a_16: imm8 is imm8 { export *:2 imm8; } iopr8is: "#"simm8 is simm8 { export *[const]:1 simm8; } iopr16i: "#"imm16 is imm16 { export *[const]:2 imm16; } oprx8: imm8 is imm8 { export *[const]:1 imm8; } oprx8_8_SP: imm8,SP is imm8 & SP { address:2 = SP + zext(imm8:1); export *:1 address; } oprx16_8_SP: imm16,SP is imm16 & SP { address:2 = SP + imm16:2; export *:1 address; } @endif @if defined(HCS08) opr16a_16: imm16 is imm16 { export *:2 imm16; } oprx8_16_SP: imm8,SP is imm8 & SP { address:2 = SP + imm8; export *:2 address; } @endif # X or HIX addressing @if defined(HC05) oprx8_8_X: imm8,X is imm8 & X { address:2 = zext(X) + imm8; export address; } oprx16_8_X: imm16,X is imm16 & X { address:2 = zext(X) + imm16; export address; } comma_X: ","X is X { address:2 = zext(X); export address; } @endif @if defined(HCS08) || defined(HC08) oprx8_8_X: imm8,X is imm8 & X { address:2 = HIX + imm8; export address; } oprx16_8_X: imm16,X is imm16 & X { address:2 = HIX + imm16; export address; } comma_X: ","X is X { address:2 = HIX; export address; } @endif @if defined(HCS08) oprx8_16_X: imm8,X is imm8 & X { address:2 = HIX + imm8; export *:2 address; } oprx16_16_X: imm16,X is imm16 & X { address:2 = HIX + imm16; export *:2 address; } @endif # address decoding OP1: iopr8i is op4_6=2; iopr8i { export iopr8i; } OP1: opr8a_8 is op4_6=3; opr8a_8 { export opr8a_8; } OP1: opr16a_8 is op4_6=4; opr16a_8 { export opr16a_8; } OP1: oprx16_8_X is op4_6=5; oprx16_8_X { export *:1 oprx16_8_X; } OP1: oprx8_8_X is op4_6=6; oprx8_8_X { export *:1 oprx8_8_X; } OP1: comma_X is op4_6=7 & comma_X { export *:1 comma_X; } @if defined(HCS08) || defined(HC08) op2_opr8a: imm8 is imm8 { export *:1 imm8; } @endif ADDR: opr8a_8 is op4_6=3; opr8a_8 { export opr8a_8; } ADDR: opr16a_8 is op4_6=4; opr16a_8 { export opr16a_8; } ADDRI: oprx16_8_X is op4_6=5; oprx16_8_X { export oprx16_8_X; } ADDRI: oprx8_8_X is op4_6=6; oprx8_8_X { export oprx8_8_X; } ADDRI: comma_X is op4_6=7 & comma_X { export comma_X; } REL: reloc is rel [ reloc = inst_next + rel; ] { export *:1 reloc; } NthBit: nthbit is nIndex [ nthbit = (1 << nIndex); ] { export *[const]:1 nthbit; } ################################################################ # Macros ################################################################ @if defined(HCS08) || defined(HC08) macro additionFlags(operand1, operand2, result) { local AFmask = -1 >> 4; $(H) = (((operand1 & AFmask) + (operand2 & AFmask)) & (AFmask + 1)) != 0; $(V) = scarry(operand1, operand2); $(N) = result s< 0; $(C) = carry(operand1, operand2); $(Z) = (result == 0); } macro additionWithCarry(operand1, operand2, result) { local Ccopy = zext($(C)); local AFmask = -1 >> 4; $(H) = (((operand1 & AFmask) + (operand2 & AFmask) + Ccopy) & (AFmask + 1)) != 0; $(V) = scarry(operand1, operand2); $(C) = carry(operand1, operand2); local tempResult = operand1 + operand2; $(C) = $(C) || carry(tempResult, Ccopy); $(V) = $(V) ^^ scarry(tempResult, Ccopy); result = tempResult + Ccopy; $(N) =result s< 0; $(Z) = (result == 0); } @elif defined(HC05) # V is not implemented in HC05 macro additionFlags(operand1, operand2, result) { local AFmask = -1 >> 4; $(H) = (((operand1 & AFmask) + (operand2 & AFmask)) & (AFmask + 1)) != 0; $(N) =result s< 0; $(Z) = (result == 0); $(C) = carry(operand1, operand2); } macro additionWithCarry(operand1, operand2, result) { local Ccopy = zext($(C)); local AFmask = -1 >> 4; $(H) = (((operand1 & AFmask) + (operand2 & AFmask) + Ccopy) & (AFmask + 1)) != 0; $(C) = carry(operand1, operand2); local tempResult = operand1 + operand2; $(C) = $(C) || carry(tempResult, Ccopy); result = tempResult + Ccopy; $(N) =result s< 0; $(Z) = (result == 0); } @endif @if defined(HCS08) || defined(HC08) macro subtractionFlags(operand1, operand2, result) { $(V) = sborrow(operand1, operand2); $(N) = (result s< 0); $(Z) = (result == 0); $(C) = operand1 < operand2; } macro subtractWithCarry(operand1, operand2, result) { local Ccopy = zext($(C)); $(V) = sborrow(operand1, operand2); $(C) = operand1 < operand2; local tempResult = operand1 - operand2; $(C) = $(C) || (tempResult < Ccopy); $(V) = $(V) ^^ sborrow(tempResult, Ccopy); result = tempResult - Ccopy; $(N) = result s< 0; } @elif defined(HC05) macro subtractionFlags(operand1, operand2, result) { # V is not implemented in HC05 $(N) = (result s< 0); $(Z) = (result == 0); $(C) = operand1 < operand2; } macro subtractWithCarry(operand1, operand2, result) { local Ccopy = zext($(C)); # V is not implemented in HC05 $(C) = operand1 < operand2; local tempResult = operand1 - operand2; $(C) = $(C) || (tempResult < Ccopy); result = tempResult - Ccopy; $(N) = result s< 0; } @endif @if defined(HCS08) || defined(HC08) macro V_equals_0() { $(V) = 0; } @elif defined(HC05) macro V_equals_0() {} # empty macro because V is not implemented in HC05 @endif @if defined(HCS08) || defined(HC08) macro V_equals_C() { $(V) = $(C); } @elif defined(HC05) macro V_equals_C() {} # empty macro because V is not implemented in HC05 @endif @if defined(HCS08) || defined(HC08) macro V_equals_N_xor_C() { $(V) = $(N) ^ $(C); } @elif defined(HC05) macro V_equals_N_xor_C() {} # empty macro because V is not implemented in HC05 @endif @if defined(HCS08) || defined(HC08) macro V_CMP_flag(operand, result) { $(V) = ( ((A & ~operand & ~result) | (~A & operand & result)) & 0b10000000 ) != 0; } @elif defined(HC05) macro V_CMP_flag(operand, result) {} # empty macro because V is not implemented in HC05 @endif @if defined(HCS08) || defined(HC08) macro V_CPHX_flag(operand, result) { $(V) = ( ((HIX & ~operand & ~result) | (~HIX & operand & result)) & 0x8000 ) != 0; } @elif defined(HC05) macro V_CPHX_flag(operand, result) {} # empty macro because V is not implemented in HC05 @endif @if defined(HCS08) || defined(HC08) macro V_CPX_flag(operand, result) { $(V) = ( ((X & ~operand & ~result) | (~X & operand & result)) & 0b10000000 ) != 0; } @elif defined(HC05) macro V_CPX_flag(operand, result) {} # empty macro because V is not implemented in HC05 @endif @if defined(HCS08) || defined(HC08) macro V_DEC_flag(operand, result) { $(V) = ( (~result & operand) & 0b10000000 ) != 0; } @elif defined(HC05) macro V_DEC_flag(operand, result) {} # empty macro because V is not implemented in HC05 @endif @if defined(HCS08) || defined(HC08) macro V_INC_flag(operand, result) { $(V) = ( (~operand & result) & 0b10000000 ) != 0; } @elif defined(HC05) macro V_INC_flag(operand, result) {} # empty macro because V is not implemented in HC05 @endif @if defined(HCS08) || defined(HC08) macro V_NEG_flag(operand, result) { $(V) = ( (operand & result) & 0b10000000 ) != 0; } @elif defined(HC05) macro V_NEG_flag(operand, result) {} # empty macro because V is not implemented in HC05 @endif @if defined(HCS08) || defined(HC08) || defined(HC05) macro Pull1(operand) { SP = SP + 1; operand = *:1 SP; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) macro Pull2(operand) { SP = SP + 1; operand = *:2 SP; SP = SP + 1; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) macro Push1(operand) { *:1 SP = operand; SP = SP - 1; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) macro Push2(operand) { SP = SP - 1; *:2 SP = operand; SP = SP - 1; } @endif ################################################################ # Constructors ################################################################ @if defined(HCS08) || defined(HC08) || defined(HC05) :ADC OP1 is (op=0xA9 | op=0xB9 | op=0xC9 | op=0xD9 | op=0xE9 | op=0xF9) ... & OP1 { op1:1 = OP1; additionWithCarry(A, op1, A); } @endif @if defined(HCS08) || defined(HC08) :ADC oprx16_8_SP is (op16=0x9ED9); oprx16_8_SP { op1:1 = oprx16_8_SP; additionWithCarry(A, op1, A); } @endif @if defined(HCS08) || defined(HC08) :ADC oprx8_8_SP is (op16=0x9EE9); oprx8_8_SP { op1:1 = oprx8_8_SP; additionWithCarry(A, op1, A); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :ADD OP1 is (op=0xAB | op=0xBB | op=0xCB | op=0xDB | op=0xEB | op=0xFB) ... & OP1 { op1:1 = OP1; result:1 = A + op1; additionFlags(A, op1,result); A = result; } @endif @if defined(HCS08) || defined(HC08) :ADD oprx16_8_SP is (op16=0x9EDB); oprx16_8_SP { op1:1 = oprx16_8_SP; result:1 = A + op1; additionFlags(A, op1,result); A = result; } @endif @if defined(HCS08) || defined(HC08) :ADD oprx8_8_SP is (op16=0x9EEB); oprx8_8_SP { op1:1 = oprx8_8_SP; result:1 = A + op1; additionFlags(A, op1,result); A = result; } @endif @if defined(HCS08) || defined(HC08) :AIS iopr8is is op=0xA7; iopr8is { SP = SP + sext(iopr8is); } @endif @if defined(HCS08) || defined(HC08) :AIX iopr8is is op=0xAF; iopr8is { HIX = HIX + sext(iopr8is); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :AND OP1 is (op=0xA4 | op=0xB4 | op=0xC4 | op=0xD4 | op=0xE4 | op=0xF4) ... & OP1 { A = A & OP1; V_equals_0(); $(Z) = (A == 0); $(N) = (A s< 0); } @endif @if defined(HCS08) || defined(HC08) :AND oprx16_8_SP is (op16=0x9ED4); oprx16_8_SP { A = A & oprx16_8_SP; V_equals_0(); $(Z) = (A == 0); $(N) = (A s< 0); } @endif @if defined(HCS08) || defined(HC08) :AND oprx8_8_SP is (op16=0x9EE4); oprx8_8_SP { A = A & oprx8_8_SP; V_equals_0(); $(Z) = (A == 0); $(N) = (A s< 0); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :ASLA is op=0x48 { $(C) = A >> 7; A = A << 1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :ASLX is op=0x58 { $(C) = X >> 7; X = X << 1; $(Z) = (X == 0); $(N) = (X s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :ASL OP1 is (op=0x38 | op=0x68 | op=0x78) ... & OP1 { tmp:1 = OP1; $(C) = tmp >> 7; tmp = tmp << 1; OP1 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) :ASL oprx8_8_SP is (op16=0x9E68); oprx8_8_SP { tmp:1 = oprx8_8_SP; $(C) = tmp >> 7; tmp = tmp << 1; oprx8_8_SP = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :ASRA is op=0x47 { $(C) = A & 1; A = A s>> 1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :ASRX is op=0x57 { $(C) = X & 1; X = X s>> 1; $(Z) = (X == 0); $(N) = (X s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :ASR OP1 is (op=0x37 | op=0x67 | op=0x77) ... & OP1 { tmp:1 = OP1; $(C) = tmp & 1; tmp = tmp s>> 1; OP1 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) :ASR oprx8_8_SP is (op16=0x9E67); oprx8_8_SP { tmp:1 = oprx8_8_SP; $(C) = tmp & 1; tmp = tmp s>> 1; oprx8_8_SP = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BCC REL is op=0x24; REL { if ($(C) == 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BCLR nIndex, opr8a_8 is op4_7=1 & nIndex & NthBit & op0_0=1; opr8a_8 { opr8a_8 = opr8a_8 & ~NthBit; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BCS REL is op=0x25; REL { if ($(C) == 1) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BEQ REL is op=0x27; REL { if ($(Z) == 1) goto REL; } @endif @if defined(HCS08) || defined(HC08) :BGE REL is op=0x90; REL { if (($(N) ^ $(V)) == 1) goto REL; } @endif @if defined(HCS08) :BGND is op=0x82 { backgroundDebugMode(); } @endif @if defined(HCS08) || defined(HC08) :BGT REL is op=0x92; REL { if (($(Z) | ($(N) ^ $(V))) == 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BHCC REL is op=0x28; REL { if ($(H) == 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BHCS REL is op=0x29; REL { if ($(H) == 1) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BHI REL is op=0x22; REL { if (($(C) | $(Z)) == 0) goto REL; } @endif #:BHS REL is op=0x24; REL See BCC @if defined(HCS08) || defined(HC08) || defined(HC05) :BIH REL is op=0x2F; REL { tmp:1 = readIRQ(); if (tmp == 1) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BIL REL is op=0x2E; REL { tmp:1 = readIRQ(); if (tmp == 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BIT OP1 is (op=0xA5 | op=0xB5 | op=0xC5 | op=0xD5 | op=0xE5 | op=0xF5) ... & OP1 { result:1 = A & OP1; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :BIT oprx16_8_SP is (op16=0x9ED5); oprx16_8_SP { result:1 = A & oprx16_8_SP; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :BIT oprx8_8_SP is (op16=0x9EE5); oprx8_8_SP { result:1 = A & oprx8_8_SP; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :BLE REL is op=0x93; REL { if ($(Z) | ($(N) ^ $(V))) goto REL; } @endif #:BLO REL is op=0x25; REL see BCS @if defined(HCS08) || defined(HC08) || defined(HC05) :BLS REL is op=0x23; REL { if (($(C) | $(Z)) == 1) goto REL; } @endif @if defined(HCS08) || defined(HC08) :BLT REL is op=0x91; REL { if (($(N) ^ $(V)) == 1) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BMC REL is op=0x2C; REL { if ($(I) == 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BMI REL is op=0x2B; REL { if ($(N) == 1) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BMS REL is op=0x2D; REL { if ($(I) == 1) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BNE REL is op=0x26; REL { if ($(Z) == 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BPL REL is op=0x2A; REL { if ($(N) == 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BRA REL is op=0x20; REL { goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BRCLR nIndex, opr8a_8, REL is op4_7=0 & nIndex & NthBit & op0_0=1; opr8a_8; REL { result:1 = opr8a_8 & NthBit; $(C) = (result != 0); if (result == 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) # branch never is a two-byte nop :BRN REL is op=0x21; REL { } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BRSET nIndex, opr8a_8, REL is op4_7=0 & nIndex & NthBit & op0_0=0; opr8a_8; REL { result:1 = opr8a_8 & NthBit; $(C) = (result != 0); if (result != 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BSET nIndex, opr8a_8 is op4_7=1 & nIndex & NthBit & op0_0=0; opr8a_8 { opr8a_8 = opr8a_8 | NthBit; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :BSR REL is op=0xAD; REL { tmp:2 = inst_next; Push2( tmp ); call REL; } @endif @if defined(HCS08) || defined(HC08) :CBEQ opr8a_8, REL is (op=0x31); opr8a_8; REL { if (A == opr8a_8) goto REL; } @endif @if defined(HCS08) || defined(HC08) :CBEQA iopr8i, REL is op=0x41; iopr8i; REL { if (A == iopr8i) goto REL; } @endif @if defined(HCS08) || defined(HC08) :CBEQX iopr8i, REL is op=0x51; iopr8i; REL { if (X == iopr8i) goto REL; } @endif @if defined(HCS08) || defined(HC08) :CBEQ oprx8, X"+,", REL is (op=0x61) & X; oprx8; REL { tmp:1 = *:1 (HIX + zext(oprx8)); HIX = HIX + 1; if (A == tmp) goto REL; } @endif @if defined(HCS08) || defined(HC08) :CBEQ ","X"+,", REL is (op=0x71) & X; REL { tmp:1 = *:1 (HIX); HIX = HIX + 1; if (A == tmp) goto REL; } @endif @if defined(HCS08) || defined(HC08) :CBEQ oprx8_8_SP, REL is (op16=0x9E61); oprx8_8_SP; REL { if (A == oprx8_8_SP) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :CLC is op=0x98 { $(C) = 0; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :CLI is op=0x9A { $(I) = 0; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :CLRA is op=0x4F { A = 0; $(Z) = 1; $(N) = 0; V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :CLRX is op=0x5F { X = 0; $(Z) = 1; $(N) = 0; V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :CLRH is op=0x8C { HI = 0; $(Z) = 1; $(N) = 0; V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :CLR OP1 is (op=0x3F | op=0x6F | op=0x7F) ... & OP1 { OP1 = 0; $(Z) = 1; $(N) = 0; V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :CLR oprx8_8_SP is (op16=0x9E6F); oprx8_8_SP { oprx8_8_SP = 0; $(Z) = 1; $(N) = 0; V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :CMP OP1 is (op=0xA1 | op=0xB1 | op=0xC1 | op=0xD1 | op=0xE1 | op=0xF1) ... & OP1 { op1:1 = OP1; tmp:1 = A - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > A); V_CMP_flag(op1, tmp); } @endif @if defined(HCS08) || defined(HC08) :CMP oprx16_8_SP is (op16=0x9ED1); oprx16_8_SP { op1:1 = oprx16_8_SP; tmp:1 = A - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > A); V_CMP_flag(op1, tmp); } @endif @if defined(HCS08) || defined(HC08) :CMP oprx8_8_SP is (op16=0x9EE1); oprx8_8_SP { op1:1 = oprx8_8_SP; tmp:1 = A - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > A); V_CMP_flag(op1, tmp); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :COMA is op=0x43 { A = ~A; $(Z) = (A == 0); $(N) = (A s< 0); $(C) = 1; V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :COMX is op=0x53 { X = ~X; $(Z) = (X == 0); $(N) = (X s< 0); $(C) = 1; V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :COM OP1 is (op=0x33 | op=0x63 | op=0x73) ... & OP1 { tmp:1 = ~OP1; OP1 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = 1; V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :COM oprx8_8_SP is (op16=0x9E63); oprx8_8_SP { tmp:1 = ~oprx8_8_SP; oprx8_8_SP = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = 1; V_equals_0(); } @endif @if defined(HCS08) :CPHX opr16a_16 is (op=0x3E); opr16a_16 { op1:2 = opr16a_16; tmp:2 = HIX - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > HIX); V_CPHX_flag(op1, tmp); } @endif @if defined(HCS08) || defined(HC08) :CPHX iopr16i is (op=0x65); iopr16i { op1:2 = iopr16i; tmp:2 = HIX - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > HIX); V_CPHX_flag(op1, tmp); } @endif @if defined(HCS08) || defined(HC08) :CPHX opr8a_16 is (op=0x75); opr8a_16 { op1:2 = *:2 opr8a_16; tmp:2 = HIX - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > HIX); V_CPHX_flag(op1, tmp); } @endif @if defined(HCS08) :CPHX oprx8_16_SP is (op16=0x9EF3); oprx8_16_SP { op1:2 = oprx8_16_SP; tmp:2 = HIX - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > HIX); V_CPHX_flag(op1, tmp); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :CPX OP1 is (op=0xA3 | op=0xB3 | op=0xC3 | op=0xD3 | op=0xE3 | op=0xF3) ... & OP1 { op1:1 = OP1; tmp:1 = X - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > X); V_CPX_flag(op1, tmp); } @endif @if defined(HCS08) || defined(HC08) :CPX oprx16_8_SP is (op16=0x9ED3); oprx16_8_SP { op1:1 = oprx16_8_SP; tmp:1 = X - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > X); V_CPX_flag(op1, tmp); } @endif @if defined(HCS08) || defined(HC08) :CPX oprx8_8_SP is (op16=0x9EE3); oprx8_8_SP { op1:1 = oprx8_8_SP; tmp:1 = X - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > X); V_CPX_flag(op1, tmp); } @endif @if defined(HCS08) || defined(HC08) :DAA is op=0x72 { A = decimalAdjustAccumulator(A, $(C), $(H)); $(C) = decimalAdjustCarry(A, $(C), $(H)); $(Z) = (A == 0); $(N) = (A s< 0); # V is undefined } @endif @if defined(HCS08) || defined(HC08) :DBNZA REL is op=0x4B; REL { A = A - 1; if (A != 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) :DBNZX REL is op=0x5B; REL { X = X - 1; if (X != 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) :DBNZ OP1, REL is (op=0x3B | op=0x6B | op=0x7B) ... & OP1; REL { OP1 = OP1 - 1; if (OP1 != 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) :DBNZ oprx8_8_SP, REL is (op16=0x9E6B); oprx8_8_SP; REL { tmp:1 = oprx8_8_SP - 1; oprx8_8_SP = tmp; if (tmp != 0) goto REL; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :DECA is op=0x4A { tmp:1 = A; A = tmp - 1; $(Z) = (A == 0); $(N) = (A s< 0); V_DEC_flag(tmp, A); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :DECX is op=0x5A { tmp:1 = X; X = tmp - 1; $(Z) = (X == 0); $(N) = (X s< 0); V_DEC_flag(tmp, X); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :DEC OP1 is (op=0x3A | op=0x6A | op=0x7A) ... & OP1 { tmp:1 = OP1; result:1 = tmp - 1; OP1 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_DEC_flag(tmp, result); } @endif @if defined(HCS08) || defined(HC08) :DEC oprx8_8_SP is (op16=0x9E6A); oprx8_8_SP { tmp:1 = oprx8_8_SP; result:1 = tmp - 1; oprx8_8_SP = result; $(Z) = (result == 0); $(N) = (result s< 0); V_DEC_flag(tmp, result); } @endif @if defined(HCS08) || defined(HC08) :DIV is op=0x52 { tmp:2 = (zext(HI) << 8) | (zext(A)); resultQ:2 = tmp / zext(X); A = resultQ:1; resultR:2 = tmp % zext(X); HI = resultR:1; $(Z) = (A == 0); $(C) = (X == 0) | (resultQ > 0x00FF); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :EOR OP1 is (op=0xA8 | op=0xB8 | op=0xC8 | op=0xD8 | op=0xE8 | op=0xF8) ... & OP1 { op1:1 = OP1; A = A ^ op1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :EOR oprx16_8_SP is (op16=0x9ED8); oprx16_8_SP { op1:1 = oprx16_8_SP; A = A ^ op1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :EOR oprx8_8_SP is (op16=0x9EE8); oprx8_8_SP { op1:1 = oprx8_8_SP; A = A ^ op1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :INCA is op=0x4C { tmp:1 = A; A = tmp + 1; $(Z) = (A == 0); $(N) = (A s< 0); V_INC_flag(tmp, A); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :INCX is op=0x5C { tmp:1 = X; X = tmp + 1; $(Z) = (X == 0); $(N) = (X s< 0); V_INC_flag(tmp, X); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :INC OP1 is (op=0x3C | op=0x6C | op=0x7C) ... & OP1 { tmp:1 = OP1; result:1 = tmp + 1; OP1 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_INC_flag(tmp, result); } @endif @if defined(HCS08) || defined(HC08) :INC oprx8_8_SP is (op16=0x9E6C); oprx8_8_SP { tmp:1 = oprx8_8_SP; result:1 = tmp + 1; oprx8_8_SP = result; $(Z) = (result == 0); $(N) = (result s< 0); V_INC_flag(tmp, result); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :JMP ADDR is (op=0xBC | op=0xCC) ... & ADDR { goto ADDR; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :JMP ADDRI is (op=0xDC | op=0xEC | op=0xFC) ... & ADDRI { goto [ADDRI]; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :JSR ADDR is (op=0xBD | op=0xCD) ... & ADDR { tmp:2 = inst_next; Push2( tmp ); call ADDR; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :JSR ADDRI is (op=0xDD | op=0xED | op=0xFD) ... & ADDRI { tmp:2 = inst_next; Push2( tmp ); call [ADDRI]; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :LDA OP1 is (op=0xA6 | op=0xB6 | op=0xC6 | op=0xD6 | op=0xE6 | op=0xF6) ... & OP1 { A = OP1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :LDA oprx16_8_SP is (op16=0x9ED6); oprx16_8_SP { A = oprx16_8_SP; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :LDA oprx8_8_SP is (op16=0x9EE6); oprx8_8_SP { A = oprx8_8_SP; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :LDHX iopr16i is (op=0x45); iopr16i { HIX = iopr16i; $(Z) = (HIX == 0); $(N) = (HI s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :LDHX opr8a_16 is (op=0x55); opr8a_16 { HIX = opr8a_16; $(Z) = (HIX == 0); $(N) = (HI s< 0); V_equals_0(); } @endif @if defined(HCS08) :LDHX opr16a_16 is (op=0x32); opr16a_16 { HIX = opr16a_16; $(Z) = (HIX == 0); $(N) = (HI s< 0); V_equals_0(); } @endif @if defined(HCS08) :LDHX ","X is (op16=0x9EAE) & X { HIX = *:2 (HIX); $(Z) = (HIX == 0); $(N) = (HI s< 0); V_equals_0(); } @endif @if defined(HCS08) :LDHX oprx16_16_X is (op16=0x9EBE); oprx16_16_X { HIX = oprx16_16_X; $(Z) = (HIX == 0); $(N) = (HI s< 0); V_equals_0(); } @endif @if defined(HCS08) :LDHX oprx8_16_X is (op16=0x9ECE); oprx8_16_X { HIX = oprx8_16_X; $(Z) = (HIX == 0); $(N) = (HI s< 0); V_equals_0(); } @endif @if defined(HCS08) :LDHX oprx8_16_SP is (op16=0x9EFE); oprx8_16_SP { HIX = oprx8_16_SP; $(Z) = (HIX == 0); $(N) = (HI s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :LDX OP1 is (op=0xAE | op=0xBE | op=0xCE | op=0xDE | op=0xEE | op=0xFE) ... & OP1 { X = OP1; $(Z) = (X == 0); $(N) = (X s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :LDX oprx16_8_SP is (op16=0x9EDE); oprx16_8_SP { X = oprx16_8_SP; $(Z) = (X == 0); $(N) = (X s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :LDX oprx8_8_SP is (op16=0x9EEE); oprx8_8_SP { X = oprx8_8_SP; $(Z) = (X == 0); $(N) = (X s< 0); V_equals_0(); } @endif ## Logical Shift left is same as arithmetic shift left #:LSLA is op=0x48 #:LSLX is op=0x58 #:LSL OP1 is (op=0x38 | op=0x68 | op=0x78) ... & OP1 @if defined(HCS08) || defined(HC08) || defined(HC05) :LSRA is op=0x44 { $(C) = (A & 1); A = (A >> 1); $(Z) = (A == 0); $(N) = 0; V_equals_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :LSRX is op=0x54 { $(C) = (X & 1); X = (X >> 1); $(Z) = (X == 0); $(N) = 0; V_equals_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :LSR OP1 is (op=0x34 | op=0x64 | op=0x74) ... & OP1 { tmp:1 = OP1; $(C) = tmp & 1; tmp = tmp >> 1; OP1 = tmp; $(Z) = (tmp == 0); $(N) = 0; V_equals_C(); } @endif @if defined(HCS08) || defined(HC08) :LSR oprx8_8_SP is (op16=0x9E64); oprx8_8_SP { tmp:1 = oprx8_8_SP; $(C) = tmp & 1; tmp = tmp >> 1; oprx8_8_SP = tmp; $(Z) = (tmp == 0); $(N) = 0; V_equals_C(); } @endif @if defined(HCS08) || defined(HC08) :MOV opr8a_8, op2_opr8a is (op=0x4E); opr8a_8; op2_opr8a { result:1 = opr8a_8; op2_opr8a = result; V_equals_0(); $(N) = (result s< 0); $(Z) = (result == 0); } @endif @if defined(HCS08) || defined(HC08) :MOV opr8a_8, X"+" is (op=0x5E); opr8a_8 & X { result:1 = opr8a_8; *:1 HIX = result; HIX = HIX + 1; V_equals_0(); $(N) = (result s< 0); $(Z) = (result == 0); } @endif @if defined(HCS08) || defined(HC08) :MOV iopr8i, op2_opr8a is (op=0x6E); iopr8i; op2_opr8a { result:1 = iopr8i; op2_opr8a = result; V_equals_0(); $(N) = (result s< 0); $(Z) = (result == 0); } @endif @if defined(HCS08) || defined(HC08) :MOV ","X"+," op2_opr8a is (op=0x7E) & X; op2_opr8a { result:1 = *:1 HIX; op2_opr8a = result; HIX = HIX + 1; V_equals_0(); $(N) = (result s< 0); $(Z) = (result == 0); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :MUL is op=0x42 { op1:2 = zext(A); op2:2 = zext(X); result:2 = op1 * op2; A = result:1; X = result(1); $(H) = 0; $(C) = 0; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :NEGA is op=0x40 { tmp:1 = A; A = -tmp; $(C) = (A != 0); $(Z) = (A == 0); $(N) = (A s< 0); V_NEG_flag(tmp, A); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :NEGX is op=0x50 { tmp:1 = X; X = -tmp; $(C) = (X != 0); $(Z) = (X == 0); $(N) = (X s< 0); V_NEG_flag(tmp, X); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :NEG OP1 is (op=0x30 | op=0x60 | op=0x70) ... & OP1 { tmp:1 = OP1; result:1 = -tmp; OP1 = result; $(C) = (result != 0); $(Z) = (result == 0); $(N) = (result s< 0); V_NEG_flag(tmp, result); } @endif @if defined(HCS08) || defined(HC08) :NEG oprx8_8_SP is (op16=0x9E60); oprx8_8_SP { tmp:1 = oprx8_8_SP; result:1 = -tmp; oprx8_8_SP = result; $(C) = (result != 0); $(Z) = (result == 0); $(N) = (result s< 0); V_NEG_flag(tmp, result); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :NOP is op = 0x9D { } @endif @if defined(HCS08) || defined(HC08) :NSA is op = 0x62 { A = (A >> 4) | (A << 4); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :ORA OP1 is (op=0xAA | op=0xBA | op=0xCA | op=0xDA | op=0xEA | op=0xFA) ... & OP1 { A = A | OP1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :ORA oprx16_8_SP is (op16=0x9EDA); oprx16_8_SP { A = A | oprx16_8_SP; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :ORA oprx8_8_SP is (op16=0x9EEA); oprx8_8_SP { A = A | oprx8_8_SP; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :PSHA is op = 0x87 { Push1( A ); } @endif @if defined(HCS08) || defined(HC08) :PSHH is op = 0x8B { Push1( HI ); } @endif @if defined(HCS08) || defined(HC08) :PSHX is op = 0x89 { Push1( X ); } @endif @if defined(HCS08) || defined(HC08) :PULA is op = 0x86 { Pull1( A ); } @endif @if defined(HCS08) || defined(HC08) :PULH is op = 0x8A { Pull1( HI ); } @endif @if defined(HCS08) || defined(HC08) :PULX is op = 0x88 { Pull1( X ); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :ROLA is op=0x49 { tmpC:1 = $(C) ; $(C) = A >> 7; A = (A << 1) | tmpC; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :ROLX is op=0x59 { tmpC:1 = $(C); $(C) = X >> 7; X = (X << 1) | tmpC; $(Z) = (X == 0); $(N) = (X s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :ROL OP1 is (op=0x39 | op=0x69 | op=0x79) ... & OP1 { tmpC:1 = $(C); op1:1 = OP1; $(C) = op1 >> 7; result:1 = (op1 << 1) | tmpC; OP1 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) :ROL oprx8_8_SP is (op16=0x9E69); oprx8_8_SP { tmpC:1 = $(C); op1:1 = oprx8_8_SP; $(C) = op1 >> 7; result:1 = (op1 << 1) | tmpC; oprx8_8_SP = result; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :RORA is op=0x46 { tmpC:1 = $(C) << 7; $(C) = A & 1; A = (A >> 1) | tmpC; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :RORX is op=0x56 { tmpC:1 = $(C) << 7; $(C) = X & 1; X = (X >> 1) | tmpC; $(Z) = (X == 0); $(N) = (X s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :ROR OP1 is (op=0x36 | op=0x66 | op=0x76) ... & OP1 { tmpC:1 = $(C) << 7; tmp:1 = OP1; $(C) = tmp & 1; tmp = (tmp >> 1) | tmpC; OP1 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) :ROR oprx8_8_SP is (op16=0x9E66); oprx8_8_SP { tmpC:1 = $(C) << 7; tmp:1 = oprx8_8_SP; $(C) = tmp & 1; tmp = (tmp >> 1) | tmpC; oprx8_8_SP = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :RSP is op = 0x9C { SP = 0xff; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :RTI is op = 0x80 { Pull1( CCR ); Pull1( A ); Pull1( X ); tmp:2 = 0; Pull2( tmp ); return [tmp]; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :RTS is op = 0x81 { tmp:2 = 0; Pull2( tmp ); return [tmp]; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :SBC OP1 is (op=0xA2 | op=0xB2 | op=0xC2 | op=0xD2 | op=0xE2 | op=0xF2) ... & OP1 { op1:1 = OP1; subtractWithCarry(A, op1, A); } @endif @if defined(HCS08) || defined(HC08) :SBC oprx16_8_SP is (op16=0x9ED2); oprx16_8_SP { op1:1 = oprx16_8_SP; subtractWithCarry(A, op1, A); } @endif @if defined(HCS08) || defined(HC08) :SBC oprx8_8_SP is (op16=0x9EE2); oprx8_8_SP { op1:1 = oprx8_8_SP; subtractWithCarry(A, op1, A); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :SEC is op = 0x99 { $(C) = 1; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :SEI is op = 0x9B { $(I) = 1; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :STA OP1 is (op=0xB7 | op=0xC7 | op=0xD7 | op=0xE7 | op=0xF7) ... & OP1 { OP1 = A; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :STA oprx16_8_SP is (op16=0x9ED7); oprx16_8_SP { oprx16_8_SP = A; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :STA oprx8_8_SP is (op16=0x9EE7); oprx8_8_SP { oprx8_8_SP = A; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :STHX opr8a_16 is (op=0x35); opr8a_16 { opr8a_16 = HIX; $(Z) = (HIX == 0); $(N) = (HI s< 0); V_equals_0(); } @endif @if defined(HCS08) :STHX opr16a_16 is (op=0x96); opr16a_16 { opr16a_16 = HIX; $(Z) = (HIX == 0); $(N) = (HI s< 0); V_equals_0(); } @endif @if defined(HCS08) :STHX oprx8_16_SP is (op16=0x9EFF); oprx8_16_SP { oprx8_16_SP = HIX; $(Z) = (HIX == 0); $(N) = (HI s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :STOP is op=0x8E { $(I) = 0; stop(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :STX OP1 is (op=0xBF | op=0xCF | op=0xDF | op=0xEF | op=0xFF) ... & OP1 { OP1 = X; $(Z) = (X == 0); $(N) = (X s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :STX oprx16_8_SP is (op16=0x9EDF); oprx16_8_SP { oprx16_8_SP = X; $(Z) = (X == 0); $(N) = (X s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :STX oprx8_8_SP is (op16=0x9EEF); oprx8_8_SP { oprx8_8_SP = X; $(Z) = (X == 0); $(N) = (X s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :SUB OP1 is (op=0xA0 | op=0xB0 | op=0xC0 | op=0xD0 | op=0xE0 | op=0xF0) ... & OP1 { op1:1 = OP1; result:1 = A - op1; subtractionFlags(A, op1,result); A = result; } @endif @if defined(HCS08) || defined(HC08) :SUB oprx16_8_SP is (op16=0x9ED0); oprx16_8_SP { op1:1 = oprx16_8_SP; result:1 = A - op1; subtractionFlags(A, op1,result); A = result; } @endif @if defined(HCS08) || defined(HC08) :SUB oprx8_8_SP is (op16=0x9EE0); oprx8_8_SP { op1:1 = oprx8_8_SP; result:1 = A - op1; subtractionFlags(A, op1,result); A = result; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :SWI is op=0x83 { tmp:2 = inst_next; Push2( tmp ); Push1( X ); Push1( A ); Push1( CCR ); $(I) = 1; addr:2 = $(VECTOR_SWI); call [addr]; } @endif @if defined(HCS08) || defined(HC08) :TAP is op=0x84 { CCR = A; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :TAX is op=0x97 { X = A; } @endif @if defined(HCS08) || defined(HC08) :TPA is op=0x85 { A = CCR; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :TSTA is op=0x4D { $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :TSTX is op=0x5D { $(Z) = (X == 0); $(N) = (X s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :TST OP1 is (op=0x3D | op=0x6D | op=0x7D) ... & OP1 { op1:1 = OP1; $(Z) = (op1 == 0); $(N) = (op1 s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :TST oprx8_8_SP is (op16=0x9E6D); oprx8_8_SP { op1:1 = oprx8_8_SP; $(Z) = (op1 == 0); $(N) = (op1 s< 0); V_equals_0(); } @endif @if defined(HCS08) || defined(HC08) :TSX is op=0x95 { HIX = SP + 1; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :TXA is op=0x9F { A = X; } @endif @if defined(HCS08) || defined(HC08) :TXS is op=0x94 { SP = HIX - 1; } @endif @if defined(HCS08) || defined(HC08) || defined(HC05) :WAIT is op=0x8f { $(I) = 0; wait(); } @endif ================================================ FILE: pypcode/processors/HCS08/data/manuals/HC05.idx ================================================ @M68HC05TB.pdf[ Rev. 2.0 1998 M68HC05 Family, Understanding Small Microcontrollers, NXP.com ] ADC, 222 ADD, 223 AND, 224 ASL, 225 ASLA, 225 ASLX, 225 ASR, 226 ASRA, 226 ASRX, 226 BCC, 227 BCLR, 228 BCS, 229 BEQ, 230 BHCC, 231 BHCS, 232 BHI, 233 BHS, 234 BIH, 235 BIL, 236 BIT, 237 BLO, 238 BLS, 239 BMC, 240 BMI, 241 BMS, 242 BNE, 243 BPL, 244 BRA, 245 BRCLR, 246 BRN, 247 BRSET, 248 BSET, 249 BSR, 250 CLC, 251 CLI, 252 CLR, 253 CLRA, 253 CLRX, 253 CMP, 254 COM, 255 COMA, 255 COMX, 255 CPX, 256 DEC, 257 DECA, 257 DECX, 257 EOR, 258 INC, 259 INCA, 259 INCX, 259 JMP, 260 JSR, 261 LDA, 262 LDX, 263 LSL, 264 LSLA, 264 LSLX, 264 LSR, 265 LSRA, 265 LSRX, 265 MUL, 266 NEG, 267 NEGA, 267 NEGX, 267 NOP, 268 ORA, 269 ROL, 270 ROLA, 270 ROLX, 270 ROR, 271 RORA, 271 RORX, 271 RSP, 272 RTI, 273 RTS, 274 SBC, 275 SEC, 276 SEI, 277 STA, 278 STOP, 279 STX, 280 SUB, 281 SWI, 282 TAX, 283 TST, 284 TSTA, 284 TSTX, 284 TXA, 285 WAIT, 286 ================================================ FILE: pypcode/processors/HCS08/data/manuals/HC08.idx ================================================ @CPU08RM.pdf[ Rev. 4 02/2006 M68HC08 Microcontrollers, NXP.com ] ADC, 63 ADD, 64 AIS, 65 AIX, 66 AND, 67 ASL, 68 ASLA, 68 ASLX, 68 ASR, 69 ASRA, 69 ASRX, 69 BCC, 70 BCLR, 71 BCS, 72 BEQ, 73 BGE, 74 BGT, 75 BHCC, 76 BHCS, 77 BHI, 78 BHS, 79 BIH, 80 BIL, 81 BIT, 82 BLE, 83 BLO, 84 BLS, 85 BLT, 86 BMC, 87 BMI, 88 BMS, 89 BNE, 90 BPL, 91 BRA, 92 BRCLR, 94 BRN, 95 BRSET, 96 BSET, 97 BSR, 98 CBEQ, 99 CBEQA, 99 CBEQX, 99 CLC, 100 CLI, 101 CLR, 102 CLRA, 102 CLRX, 102 CLRH, 102 CMP, 103 COM, 104 COMA, 104 COMX, 104 CPHX, 105 CPX, 106 DAA, 107 DBNZ, 109 DBNZA, 109 DBNZX, 109 DEC, 110 DECA, 110 DECX, 110 DIV, 111 EOR, 112 INC, 113 INCA, 113 INCX, 113 JMP, 114 JSR, 115 LDA, 116 LDHX, 117 LDX, 118 LSL, 119 LSLA, 119 LSLX, 119 LSR, 120 LSRA, 120 LSRX, 120 MOV, 121 MUL, 122 NEG, 123 NEGA, 123 NEGX, 123 NOP, 124 NSA, 125 ORA, 126 PSHA, 127 PSHH, 128 PSHX, 129 PULA, 130 PULH, 131 PULX, 132 ROL, 133 ROLA, 133 ROLX, 133 ROR, 134 RORA, 134 RORX, 134 RSP, 135 RTI, 136 RTS, 137 SBC, 138 SEC, 139 SEI, 140 STA, 141 STHX, 142 STOP, 143 STX, 144 SUB, 145 SWI, 146 TAP, 147 TAX, 148 TPA, 149 TST, 150 TSTA, 150 TSTX, 150 TSX, 151 TXA, 152 TXS, 153 WAIT, 154 ================================================ FILE: pypcode/processors/HCS08/data/manuals/HCS08.idx ================================================ @HCS08RMV1.pdf[ Rev. 2 05/2007 M68HCS08 Microcontrollers, NXP.com ] ADC, 201 ADD, 202 AIS, 203 AIX, 204 AND, 205 ASL, 206 ASLA, 206 ASLX, 206 ASR, 207 ASRA, 207 ASRX, 207 BCC, 208 BCLR, 209 BCS, 210 BEQ, 211 BGE, 212 BGND, 213 BGT, 214 BHCC, 215 BHCS, 216 BHI, 217 BHS, 218 BIH, 219 BIL, 220 BIT, 221 BLE, 222 BLO, 223 BLS, 224 BLT, 225 BMC, 226 BMI, 227 BMS, 228 BNE, 229 BPL, 230 BRA, 231 BRCLR, 233 BRN, 234 BRSET, 235 BSET, 236 BSR, 237 CBEQ, 238 CBEQA, 238 CBEQX, 238 CLC, 239 CLI, 240 CLR, 241 CLRA, 241 CLRX, 241 CLRH, 241 CMP, 242 COM, 243 COMA, 243 COMX, 243 CPHX, 244 CPX, 245 DAA, 246 DBNZ, 248 DBNZA, 248 DBNZX, 248 DEC, 249 DECA, 249 DECX, 249 DIV, 250 EOR, 251 INC, 252 INCA, 252 INCX, 252 JMP, 253 JSR, 254 LDA, 255 LDHX, 256 LDX, 257 LSL, 258 LSLA, 258 LSLX, 258 LSR, 259 LSRA, 259 LSRX, 259 MOV, 260 MUL, 261 NEG, 262 NEGA, 262 NEGX, 262 NOP, 263 NSA, 264 ORA, 265 PSHA, 266 PSHH, 267 PSHX, 268 PULA, 269 PULH, 270 PULX, 271 ROL, 272 ROLA, 272 ROLX, 272 ROR, 273 RORA, 273 RORX, 273 RSP, 274 RTI, 275 RTS, 276 SBC, 277 SEC, 278 SEI, 279 STA, 280 STHX, 281 STOP, 282 STX, 283 SUB, 284 SWI, 285 TAP, 286 TAX, 287 TPA, 288 TST, 289 TSTA, 289 TSTX, 289 TSX, 290 TXA, 291 TXS, 292 WAIT, 293 ================================================ FILE: pypcode/processors/HCS08/data/test-vectors/HC05_tv.s ================================================ .hc05 .area DIRECT (PAG) ;.setdp 0, DIRECT ;low_data1: ;.ds 1 .area PROGRAM (ABS) .org 0x80 LOW_SUB_TEST: RTS .org 0x2000 HIGH_SUB_TEST: RTS ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ADC OP1 is (op=0xA9 | op=0xB9 | op=0xC9 | op=0xD9 | op=0xE9 | op=0xF9) ... & OP1 ADC #0xFE ADC *0xFE ADC 0xFEDC ADC 0xFEDC,X ADC 0xFE,X ADC ,X ; @if defined(HCS08) || defined(HC08) ; : ADC oprx16_8_SP is (op16=0x9ED9); oprx16_8_SP ; ADC 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : ADC oprx8_8_SP is (op16=0x9EE9); oprx8_8_SP ; ADC 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ADD OP1 is (op=0xAB | op=0xBB | op=0xCB | op=0xDB | op=0xEB | op=0xFB) ... & OP1 ADD #0xFE ADD *0xFE ADD 0xFEDC ADD 0xFEDC,X ADD 0xFE,X ADD ,X ; @if defined(HCS08) || defined(HC08) ; : ADD oprx16_8_SP is (op16=0x9EDB); oprx16_8_SP ; ADD 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : ADD oprx8_8_SP is (op16=0x9EEB); oprx8_8_SP ; ADD 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : AIS iopr8is is op=0xA7; iopr8is ; AIS #0x7F ; AIS #-0x7F ; @if defined(HCS08) || defined(HC08) ; : AIX iopr8is is op=0xAF; iopr8is ; AIX #0x7F ; AIX #-0x7F ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : AND OP1 is (op=0xA4 | op=0xB4 | op=0xC4 | op=0xD4 | op=0xE4 | op=0xF4) ... & OP1 AND #0xFE AND *0xFE AND 0xFEDC AND 0xFEDC,X AND 0xFE,X AND ,X ; @if defined(HCS08) || defined(HC08) ; : AND oprx16_8_SP is (op16=0x9ED4); oprx16_8_SP ; AND 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : AND oprx8_8_SP is (op16=0x9EE4); oprx8_8_SP ; AND 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASLA is op=0x48 ASLA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASLX is op=0x58 ASLX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASL OP1 is (op=0x38 | op=0x68 | op=0x78) ... & OP1 ASL *0xFE ASL 0xFE,X ASL ,X ; @if defined(HCS08) || defined(HC08) ; : ASL oprx8_8_SP is (op16=0x9E68); oprx8_8_SP ; ASL 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASRA is op=0x47 ASRA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASRX is op=0x57 ASRX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASR OP1 is (op=0x37 | op=0x67 | op=0x77) ... & OP1 ASR *0xFE ASR 0xFE,X ASR ,X ; @if defined(HCS08) || defined(HC08) ; : ASR oprx8_8_SP is (op16=0x9E67); oprx8_8_SP ; ASR 0xFE,S BACKWARDS1: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BCC REL is op=0x24; REL BCC BACKWARDS1 BCC FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BCLR nIndex, opr8a_8 is op4_7=1 & nIndex & NthBit & op0_0=1; opr8a_8 BCLR #0, *0xFE BCLR #1, *0xED BCLR #2, *0xDC BCLR #3, *0xCB BCLR #4, *0xBA BCLR #5, *0xA9 BCLR #6, *0x98 BCLR #7, *0x87 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BCS REL is op=0x25; REL BCS BACKWARDS1 BCS FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BEQ REL is op=0x27; REL BEQ BACKWARDS1 BEQ FORWARDS1 ; @if defined(HCS08) || defined(HC08) ; : BGE REL is op=0x90; REL ; BGE BACKWARDS1 ; BGE FORWARDS1 ; @if defined(HCS08) ; : BGND is op=0x82 ; BGND ; @if defined(HCS08) || defined(HC08) ; : BGT REL is op=0x92; REL ; BGT BACKWARDS1 ; BGT FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BHCC REL is op=0x28; REL BHCC BACKWARDS1 BHCC FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BHCS REL is op=0x29; REL BHCS BACKWARDS1 BHCS FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BHI REL is op=0x22; REL BHI BACKWARDS1 BHI FORWARDS1 ; :BHS REL is op=0x24; REL See BCC ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BIH REL is op=0x2F; REL BIH BACKWARDS1 BIH FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BIL REL is op=0x2E; REL BIL BACKWARDS1 BIL FORWARDS1 FORWARDS1: BACKWARDS2: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BIT OP1 is (op=0xA5 | op=0xB5 | op=0xC5 | op=0xD5 | op=0xE5 | op=0xF5) ... & OP1 BIT #0xFE BIT *0xFE BIT 0xFEDC BIT 0xFEDC,X BIT 0xFE,X BIT ,X ; @if defined(HCS08) || defined(HC08) ; : BIT oprx16_8_SP is (op16=0x9ED5); oprx16_8_SP ; BIT 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : BIT oprx8_8_SP is (op16=0x9EE5); oprx8_8_SP ; BIT 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : BLE REL is op=0x93; REL ; BLE BACKWARDS2 ; BLE FORWARDS2 ; :BLO REL is op=0x25; REL see BCS ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BLS REL is op=0x23; REL BLS BACKWARDS2 BLS FORWARDS2 ; @if defined(HCS08) || defined(HC08) ; : BLT REL is op=0x91; REL ; BLT BACKWARDS2 ; BLT FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BMC REL is op=0x2C; REL BMC BACKWARDS2 BMC FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BMI REL is op=0x2B; REL BMI BACKWARDS2 BMI FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BMS REL is op=0x2D; REL BMS BACKWARDS2 BMS FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BNE REL is op=0x26; REL BNE BACKWARDS2 BNE FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BPL REL is op=0x2A; REL BPL BACKWARDS2 BPL FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BRA REL is op=0x20; REL BRA BACKWARDS2 BRA FORWARDS2 FORWARDS2: BACKWARDS3: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BRCLR nIndex, opr8a_8, REL is op4_7=0 & nIndex & NthBit & op0_0=1; opr8a_8; REL BRCLR #0, *0xFE,BACKWARDS3 BRCLR #1, *0xED,BACKWARDS3 BRCLR #2, *0xDC,BACKWARDS3 BRCLR #3, *0xCB,BACKWARDS3 BRCLR #4, *0xBA,BACKWARDS3 BRCLR #5, *0xA9,BACKWARDS3 BRCLR #6, *0x98,BACKWARDS3 BRCLR #7, *0x87,BACKWARDS3 BRCLR #0, *0xFE,FORWARDS3 BRCLR #1, *0xED,FORWARDS3 BRCLR #2, *0xDC,FORWARDS3 BRCLR #3, *0xCB,FORWARDS3 BRCLR #4, *0xBA,FORWARDS3 BRCLR #5, *0xA9,FORWARDS3 BRCLR #6, *0x98,FORWARDS3 BRCLR #7, *0x87,FORWARDS3 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; branch never is a two-byte nop ; : BRN REL is op=0x21; REL BRN BACKWARDS3 BRN FORWARDS3 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BRSET nIndex, opr8a_8, REL is op4_7=0 & nIndex & NthBit & op0_0=0; opr8a_8; REL BRSET #0, *0xFE,BACKWARDS3 BRSET #1, *0xED,BACKWARDS3 BRSET #2, *0xDC,BACKWARDS3 BRSET #3, *0xCB,BACKWARDS3 BRSET #4, *0xBA,BACKWARDS3 BRSET #5, *0xA9,BACKWARDS3 BRSET #6, *0x98,BACKWARDS3 BRSET #7, *0x87,BACKWARDS3 BRSET #0, *0xFE,FORWARDS3 BRSET #1, *0xED,FORWARDS3 BRSET #2, *0xDC,FORWARDS3 BRSET #3, *0xCB,FORWARDS3 BRSET #4, *0xBA,FORWARDS3 BRSET #5, *0xA9,FORWARDS3 BRSET #6, *0x98,FORWARDS3 BRSET #7, *0x87,FORWARDS3 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BSET nIndex, opr8a_8 is op4_7=1 & nIndex & NthBit & op0_0=0; opr8a_8 BSET #0, *0xFE BSET #1, *0xED BSET #2, *0xDC BSET #3, *0xCB BSET #4, *0xBA BSET #5, *0xA9 BSET #6, *0x98 BSET #7, *0x87 FORWARDS3: BACKWARDS4: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BSR REL is op=0xAD; REL BSR BACKWARDS4 BSR FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQ opr8a_8, REL is (op=0x31); opr8a_8; REL ; CBEQ *0xFE, BACKWARDS4 ; CBEQ *0xFE, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQA iopr8i, REL is op=0x41; iopr8i; REL ; CBEQA #0xFE, BACKWARDS4 ; CBEQA #0xFE, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQX iopr8i, REL is op=0x51; iopr8i; REL ; CBEQX #0xFE, BACKWARDS4 ; CBEQX #0xFE, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQ oprx8, X"+", REL is (op=0x61) & X; oprx8; REL ; CBEQ *0xFE, X+, BACKWARDS4 ; CBEQ *0xFE, X+, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQ ","X"+", REL is (op=0x71) & X; REL ; CBEQ ,X+, BACKWARDS4 ; CBEQ ,X+, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQ oprx8_8_SP, REL is (op16=0x9E61); oprx8_8_SP; REL ; CBEQ 0xFE,S, BACKWARDS4 ; CBEQ 0xFE,S, FORWARDS4 FORWARDS4: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLC is op=0x98 CLC ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLI is op=0x9A CLI ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLRA is op=0x4F CLRA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLRX is op=0x5F CLRX ; @if defined(HCS08) || defined(HC08) ; : CLRH is op=0x8C ; CLRH ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLR OP1 is (op=0x3F | op=0x6F | op=0x7F) ... & OP1 CLR *0xFE CLR 0xFE,X CLR ,X ; @if defined(HCS08) || defined(HC08) ; : CLR oprx8_8_SP is (op16=0x9E6F); oprx8_8_SP ; CLR 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CMP OP1 is (op=0xA1 | op=0xB1 | op=0xC1 | op=0xD1 | op=0xE1 | op=0xF1) ... & OP1 CMP #0xFE CMP *0xFE CMP 0xFEDC CMP 0xFEDC,X CMP 0xFE,X CMP ,X ; @if defined(HCS08) || defined(HC08) ; : CMP oprx16_8_SP is (op16=0x9ED1); oprx16_8_SP ; CMP 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : CMP oprx8_8_SP is (op16=0x9EE1); oprx8_8_SP ; CMP 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : COMA is op=0x43 COMA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : COMX is op=0x53 COMX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : COM OP1 is (op=0x33 | op=0x63 | op=0x73) ... & OP1 COM *0xFE COM 0xFE,X COM ,X ; @if defined(HCS08) || defined(HC08) ; : COM oprx8_8_SP is (op16=0x9E63); oprx8_8_SP ; COM 0xFE,S ; @if defined(HCS08) ; : CPHX opr16a_16 is (op=0x3E); opr16a_16 ; CPHX 0xFEDC ; @if defined(HCS08) || defined(HC08) ; : CPHX iopr16i is (op=0x65); iopr16i ; CPHX #0xFEDC ; @if defined(HCS08) || defined(HC08) ; : CPHX opr8a_16 is (op=0x75); opr8a_16 ; CPHX *0xFE ; @if defined(HCS08) ; : CPHX oprx8_16_SP is (op16=0x9EF3); oprx8_16_SP ; CPHX 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CPX OP1 is (op=0xA3 | op=0xB3 | op=0xC3 | op=0xD3 | op=0xE3 | op=0xF3) ... & OP1 CPX #0xFE CPX *0xFE CPX 0xFEDC CPX 0xFEDC,X CPX 0xFE,X CPX ,X ; @if defined(HCS08) || defined(HC08) ; : CPX oprx16_8_SP is (op16=0x9ED3); oprx16_8_SP ; CPX 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : CPX oprx8_8_SP is (op16=0x9EE3); oprx8_8_SP ; CPX 0xFE,S BACKWARDS5: ; @if defined(HCS08) || defined(HC08) ; : DAA is op=0x72 ; DAA ; @if defined(HCS08) || defined(HC08) ; : DBNZA REL is op=0x4B; REL ; DBNZA BACKWARDS5 ; DBNZA FORWARDS5 ; @if defined(HCS08) || defined(HC08) ; : DBNZX REL is op=0x5B; REL ; DBNZX BACKWARDS5 ; DBNZX FORWARDS5 ; @if defined(HCS08) || defined(HC08) ; : DBNZ OP1, REL is (op=0x3B | op=0x6B | op=0x7B) ... & OP1; REL ; DBNZ *0xFE, BACKWARDS5 ; DBNZ 0xFE,X, BACKWARDS5 ; DBNZ ,X, BACKWARDS5 ; DBNZ *0xFE, FORWARDS5 ; DBNZ 0xFE,X, FORWARDS5 ; DBNZ ,X, FORWARDS5 ; @if defined(HCS08) || defined(HC08) ; : DBNZ oprx8_8_SP, REL is (op16=0x9E6B); oprx8_8_SP; REL ; DBNZ 0xFE,S, BACKWARDS5 ; DBNZ 0xFE,S, FORWARDS5 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : DECA is op=0x4A DECA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : DECX is op=0x5A DECX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : DEC OP1 is (op=0x3A | op=0x6A | op=0x7A) ... & OP1 DEC *0xFE DEC 0xFE,X DEC ,X ; @if defined(HCS08) || defined(HC08) ; : DEC oprx8_8_SP is (op16=0x9E6A); oprx8_8_SP ; DEC 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : DIV is op=0x52 ; DIV FORWARDS5: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : EOR OP1 is (op=0xA8 | op=0xB8 | op=0xC8 | op=0xD8 | op=0xE8 | op=0xF8) ... & OP1 EOR #0xFE EOR *0xFE EOR 0xFEDC EOR 0xFEDC,X EOR 0xFE,X EOR ,X ; @if defined(HCS08) || defined(HC08) ; : EOR oprx16_8_SP is (op16=0x9ED8); oprx16_8_SP ; EOR 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : EOR oprx8_8_SP is (op16=0x9EE8); oprx8_8_SP ; EOR 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : INCA is op=0x4C INCA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : INCX is op=0x5C INCX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : INC OP1 is (op=0x3C | op=0x6C | op=0x7C) ... & OP1 INC *0xFE INC 0xFE,X INC ,X ; @if defined(HCS08) || defined(HC08) ; : INC oprx8_8_SP is (op16=0x9E6C); oprx8_8_SP ; INC 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : JMP ADDR is (op=0xBC | op=0xCC) ... & ADDR JMP *LOW_SUB_TEST JMP HIGH_SUB_TEST ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : JMP ADDRI is (op=0xDC | op=0xEC | op=0xFC) ... & ADDRI JMP 0xFEDC,X JMP 0xFE,X JMP ,X ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : JSR ADDR is (op=0xBD | op=0xCD) ... & ADDR JSR *LOW_SUB_TEST JSR HIGH_SUB_TEST ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : JSR ADDRI is (op=0xDD | op=0xED | op=0xFD) ... & ADDRI JSR 0xFEDC,X JSR 0xFE,X JSR ,X ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LDA OP1 is (op=0xA6 | op=0xB6 | op=0xC6 | op=0xD6 | op=0xE6 | op=0xF6) ... & OP1 LDA #0xFE LDA *0xFE LDA 0xFEDC LDA 0xFEDC,X LDA 0xFE,X LDA ,X ; @if defined(HCS08) || defined(HC08) ; : LDA oprx16_8_SP is (op16=0x9ED6); oprx16_8_SP ; LDA 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : LDA oprx8_8_SP is (op16=0x9EE6); oprx8_8_SP ; LDA 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : LDHX iopr16i is (op=0x45); iopr16i ; LDHX #0xFEDC ; @if defined(HCS08) || defined(HC08) ; : LDHX opr8a_16 is (op=0x55); opr8a_16 ; LDHX *0xFE ; @if defined(HCS08) ; : LDHX opr16a_16 is (op=0x32); opr16a_16 ; LDHX 0xFEDC ; @if defined(HCS08) ; : LDHX ","X is (op16=0x9EAE) & X ; LDHX ,X ; @if defined(HCS08) ; : LDHX oprx16_16_X is (op16=0x9EBE); oprx16_16_X ; LDHX 0xFEDC,X ; @if defined(HCS08) ; : LDHX oprx8_16_X is (op16=0x9ECE); oprx8_16_X ; LDHX 0xFE,X ; @if defined(HCS08) ; : LDHX oprx8_16_SP is (op16=0x9EFE); oprx8_16_SP ; LDHX 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LDX OP1 is (op=0xAE | op=0xBE | op=0xCE | op=0xDE | op=0xEE | op=0xFE) ... & OP1 LDX #0xFE LDX *0xFE LDX 0xFEDC LDX 0xFEDC,X LDX 0xFE,X LDX ,X ; @if defined(HCS08) || defined(HC08) ; : LDX oprx16_8_SP is (op16=0x9EDE); oprx16_8_SP ; LDX 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : LDX oprx8_8_SP is (op16=0x9EEE); oprx8_8_SP ; LDX 0xFE,S ; ## Logical Shift left is same as arithmetic shift left ; :LSLA is op=0x48 ; :LSLX is op=0x58 ; :LSL OP1 is (op=0x38 | op=0x68 | op=0x78) ... & OP1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LSRA is op=0x44 LSRA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LSRX is op=0x54 LSRX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LSR OP1 is (op=0x34 | op=0x64 | op=0x74) ... & OP1 LSR *0xFE LSR 0xFE,X LSR ,X ; @if defined(HCS08) || defined(HC08) ; : LSR oprx8_8_SP is (op16=0x9E64); oprx8_8_SP ; LSR 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : MOV opr8a_8, op2_opr8a is (op=0x4E); opr8a_8; op2_opr8a ; MOV *0xFE, *0x97 ; @if defined(HCS08) || defined(HC08) ; : MOV opr8a_8, X"+" is (op=0x5E); opr8a_8 & X ; MOV 0xFE, X+ ; @if defined(HCS08) || defined(HC08) ; : MOV iopr8i, op2_opr8a is (op=0x6E); iopr8i; op2_opr8a ; MOV #0xFE, *0x97 ; @if defined(HCS08) || defined(HC08) ; : MOV ","X"+," op2_opr8a is (op=0x7E) & X; op2_opr8a ; MOV ,X+, *0xFE ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : MUL is op=0x42 MUL ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : NEGA is op=0x40 NEGA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : NEGX is op=0x50 NEGX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : NEG OP1 is (op=0x30 | op=0x60 | op=0x70) ... & OP1 NEG *0xFE NEG 0xFE,X NEG ,X ; @if defined(HCS08) || defined(HC08) ; : NEG oprx8_8_SP is (op16=0x9E60); oprx8_8_SP ; NEG 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : NOP is op = 0x9D NOP ; @if defined(HCS08) || defined(HC08) ; : NSA is op = 0x62 ; NSA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ORA OP1 is (op=0xAA | op=0xBA | op=0xCA | op=0xDA | op=0xEA | op=0xFA) ... & OP1 ORA #0xFE ORA *0xFE ORA 0xFEDC ORA 0xFEDC,X ORA 0xFE,X ORA ,X ; @if defined(HCS08) || defined(HC08) ; : ORA oprx16_8_SP is (op16=0x9EDA); oprx16_8_SP ; ORA 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : ORA oprx8_8_SP is (op16=0x9EEA); oprx8_8_SP ; ORA 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : PSHA is op = 0x87 ; PSHA ; @if defined(HCS08) || defined(HC08) ; : PSHH is op = 0x8B ; PSHH ; @if defined(HCS08) || defined(HC08) ; : PSHX is op = 0x89 ; PSHX ; @if defined(HCS08) || defined(HC08) ; : PULA is op = 0x86 ; PULA ; @if defined(HCS08) || defined(HC08) ; : PULH is op = 0x8A ; PULH ; @if defined(HCS08) || defined(HC08) ; : PULX is op = 0x88 ; PULX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ROLA is op=0x49 ROLA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ROLX is op=0x59 ROLX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ROL OP1 is (op=0x39 | op=0x69 | op=0x79) ... & OP1 ROL *0xFE ROL 0xFE,X ROL ,X ; @if defined(HCS08) || defined(HC08) ; : ROL oprx8_8_SP is (op16=0x9E69); oprx8_8_SP ; ROL 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RORA is op=0x46 RORA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RORX is op=0x56 RORX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ROR OP1 is (op=0x36 | op=0x66 | op=0x76) ... & OP1 ROR *0xFE ROR 0xFE,X ROR ,X ; @if defined(HCS08) || defined(HC08) ; : ROR oprx8_8_SP is (op16=0x9E66); oprx8_8_SP ; ROR 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RSP is op = 0x9C RSP ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RTI is op = 0x80 RTI ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RTS is op = 0x81 RTS ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SBC OP1 is (op=0xA2 | op=0xB2 | op=0xC2 | op=0xD2 | op=0xE2 | op=0xF2) ... & OP1 SBC #0xFE SBC *0xFE SBC 0xFEDC SBC 0xFEDC,X SBC 0xFE,X SBC ,X ; @if defined(HCS08) || defined(HC08) ; : SBC oprx16_8_SP is (op16=0x9ED2); oprx16_8_SP ; SBC 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : SBC oprx8_8_SP is (op16=0x9EE2); oprx8_8_SP ; SBC 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SEC is op = 0x99 SEC ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SEI is op = 0x9B SEI ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : STA OP1 is (op=0xB7 | op=0xC7 | op=0xD7 | op=0xE7 | op=0xF7) ... & OP1 STA *0xFE STA 0xFEDC STA 0xFEDC,X STA 0xFE,X STA ,X ; @if defined(HCS08) || defined(HC08) ; : STA oprx16_8_SP is (op16=0x9ED7); oprx16_8_SP ; STA 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : STA oprx8_8_SP is (op16=0x9EE7); oprx8_8_SP ; STA 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : STHX opr8a_16 is (op=0x35); opr8a_16 ; STHX *0xFE ; @if defined(HCS08) ; : STHX opr16a_16 is (op=0x96); opr16a_16 ; STHX 0xFEDC ; @if defined(HCS08) ; : STHX oprx8_16_SP is (op16=0x9EFF); oprx8_16_SP ; STHX 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : STOP is op=0x8E STOP ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : STX OP1 is (op=0xBF | op=0xCF | op=0xDF | op=0xEF | op=0xFF) ... & OP1 STX *0xFE STX 0xFEDC STX 0xFEDC,X STX 0xFE,X STX ,X ; @if defined(HCS08) || defined(HC08) ; : STX oprx16_8_SP is (op16=0x9EDF); oprx16_8_SP ; STX 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : STX oprx8_8_SP is (op16=0x9EEF); oprx8_8_SP ; STX 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SUB OP1 is (op=0xA0 | op=0xB0 | op=0xC0 | op=0xD0 | op=0xE0 | op=0xF0) ... & OP1 SUB #0xFE SUB *0xFE SUB 0xFEDC SUB 0xFEDC,X SUB 0xFE,X SUB ,X ; @if defined(HCS08) || defined(HC08) ; : SUB oprx16_8_SP is (op16=0x9ED0); oprx16_8_SP ; SUB 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : SUB oprx8_8_SP is (op16=0x9EE0); oprx8_8_SP ; SUB 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SWI is op=0x83 SWI ; @if defined(HCS08) || defined(HC08) ; : TAP is op=0x84 ; TAP ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TAX is op=0x97 TAX ; @if defined(HCS08) || defined(HC08) ; : TPA is op=0x85 ; TPA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TSTA is op=0x4D TSTA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TSTX is op=0x5D TSTX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TST OP1 is (op=0x3D | op=0x6D | op=0x7D) ... & OP1 TST *0xFE TST 0xFE,X TST ,X ; @if defined(HCS08) || defined(HC08) ; : TST oprx8_8_SP is (op16=0x9E6D); oprx8_8_SP ; TST 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : TSX is op=0x95 ; TSX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TXA is op=0x9F TXA ; @if defined(HCS08) || defined(HC08) ; : TXS is op=0x94 ; TXS ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : WAIT is op=0x8f WAIT HERE: BRA HERE ================================================ FILE: pypcode/processors/HCS08/data/test-vectors/HC08_tv.s ================================================ .hc08 .area DIRECT (PAG) ;.setdp 0, DIRECT ;low_data1: ;.ds 1 .area PROGRAM (ABS) .org 0x80 LOW_SUB_TEST: RTS .org 0x2000 HIGH_SUB_TEST: RTS ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ADC OP1 is (op=0xA9 | op=0xB9 | op=0xC9 | op=0xD9 | op=0xE9 | op=0xF9) ... & OP1 ADC #0xFE ADC *0xFE ADC 0xFEDC ADC 0xFEDC,X ADC 0xFE,X ADC ,X ; @if defined(HCS08) || defined(HC08) ; : ADC oprx16_8_SP is (op16=0x9ED9); oprx16_8_SP ADC 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : ADC oprx8_8_SP is (op16=0x9EE9); oprx8_8_SP ADC 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ADD OP1 is (op=0xAB | op=0xBB | op=0xCB | op=0xDB | op=0xEB | op=0xFB) ... & OP1 ADD #0xFE ADD *0xFE ADD 0xFEDC ADD 0xFEDC,X ADD 0xFE,X ADD ,X ; @if defined(HCS08) || defined(HC08) ; : ADD oprx16_8_SP is (op16=0x9EDB); oprx16_8_SP ADD 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : ADD oprx8_8_SP is (op16=0x9EEB); oprx8_8_SP ADD 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : AIS iopr8is is op=0xA7; iopr8is AIS #0x7F AIS #-0x7F ; @if defined(HCS08) || defined(HC08) ; : AIX iopr8is is op=0xAF; iopr8is AIX #0x7F AIX #-0x7F ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : AND OP1 is (op=0xA4 | op=0xB4 | op=0xC4 | op=0xD4 | op=0xE4 | op=0xF4) ... & OP1 AND #0xFE AND *0xFE AND 0xFEDC AND 0xFEDC,X AND 0xFE,X AND ,X ; @if defined(HCS08) || defined(HC08) ; : AND oprx16_8_SP is (op16=0x9ED4); oprx16_8_SP AND 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : AND oprx8_8_SP is (op16=0x9EE4); oprx8_8_SP AND 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASLA is op=0x48 ASLA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASLX is op=0x58 ASLX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASL OP1 is (op=0x38 | op=0x68 | op=0x78) ... & OP1 ASL *0xFE ASL 0xFE,X ASL ,X ; @if defined(HCS08) || defined(HC08) ; : ASL oprx8_8_SP is (op16=0x9E68); oprx8_8_SP ASL 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASRA is op=0x47 ASRA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASRX is op=0x57 ASRX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASR OP1 is (op=0x37 | op=0x67 | op=0x77) ... & OP1 ASR *0xFE ASR 0xFE,X ASR ,X ; @if defined(HCS08) || defined(HC08) ; : ASR oprx8_8_SP is (op16=0x9E67); oprx8_8_SP ASR 0xFE,S BACKWARDS1: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BCC REL is op=0x24; REL BCC BACKWARDS1 BCC FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BCLR nIndex, opr8a_8 is op4_7=1 & nIndex & NthBit & op0_0=1; opr8a_8 BCLR #0, *0xFE BCLR #1, *0xED BCLR #2, *0xDC BCLR #3, *0xCB BCLR #4, *0xBA BCLR #5, *0xA9 BCLR #6, *0x98 BCLR #7, *0x87 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BCS REL is op=0x25; REL BCS BACKWARDS1 BCS FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BEQ REL is op=0x27; REL BEQ BACKWARDS1 BEQ FORWARDS1 ; @if defined(HCS08) || defined(HC08) ; : BGE REL is op=0x90; REL BGE BACKWARDS1 BGE FORWARDS1 ; @if defined(HCS08) ; : BGND is op=0x82 ; BGND ; @if defined(HCS08) || defined(HC08) ; : BGT REL is op=0x92; REL BGT BACKWARDS1 BGT FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BHCC REL is op=0x28; REL BHCC BACKWARDS1 BHCC FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BHCS REL is op=0x29; REL BHCS BACKWARDS1 BHCS FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BHI REL is op=0x22; REL BHI BACKWARDS1 BHI FORWARDS1 ; :BHS REL is op=0x24; REL See BCC ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BIH REL is op=0x2F; REL BIH BACKWARDS1 BIH FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BIL REL is op=0x2E; REL BIL BACKWARDS1 BIL FORWARDS1 FORWARDS1: BACKWARDS2: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BIT OP1 is (op=0xA5 | op=0xB5 | op=0xC5 | op=0xD5 | op=0xE5 | op=0xF5) ... & OP1 BIT #0xFE BIT *0xFE BIT 0xFEDC BIT 0xFEDC,X BIT 0xFE,X BIT ,X ; @if defined(HCS08) || defined(HC08) ; : BIT oprx16_8_SP is (op16=0x9ED5); oprx16_8_SP BIT 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : BIT oprx8_8_SP is (op16=0x9EE5); oprx8_8_SP BIT 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : BLE REL is op=0x93; REL BLE BACKWARDS2 BLE FORWARDS2 ; :BLO REL is op=0x25; REL see BCS ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BLS REL is op=0x23; REL BLS BACKWARDS2 BLS FORWARDS2 ; @if defined(HCS08) || defined(HC08) ; : BLT REL is op=0x91; REL BLT BACKWARDS2 BLT FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BMC REL is op=0x2C; REL BMC BACKWARDS2 BMC FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BMI REL is op=0x2B; REL BMI BACKWARDS2 BMI FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BMS REL is op=0x2D; REL BMS BACKWARDS2 BMS FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BNE REL is op=0x26; REL BNE BACKWARDS2 BNE FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BPL REL is op=0x2A; REL BPL BACKWARDS2 BPL FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BRA REL is op=0x20; REL BRA BACKWARDS2 BRA FORWARDS2 FORWARDS2: BACKWARDS3: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BRCLR nIndex, opr8a_8, REL is op4_7=0 & nIndex & NthBit & op0_0=1; opr8a_8; REL BRCLR #0, *0xFE,BACKWARDS3 BRCLR #1, *0xED,BACKWARDS3 BRCLR #2, *0xDC,BACKWARDS3 BRCLR #3, *0xCB,BACKWARDS3 BRCLR #4, *0xBA,BACKWARDS3 BRCLR #5, *0xA9,BACKWARDS3 BRCLR #6, *0x98,BACKWARDS3 BRCLR #7, *0x87,BACKWARDS3 BRCLR #0, *0xFE,FORWARDS3 BRCLR #1, *0xED,FORWARDS3 BRCLR #2, *0xDC,FORWARDS3 BRCLR #3, *0xCB,FORWARDS3 BRCLR #4, *0xBA,FORWARDS3 BRCLR #5, *0xA9,FORWARDS3 BRCLR #6, *0x98,FORWARDS3 BRCLR #7, *0x87,FORWARDS3 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; branch never is a two-byte nop ; : BRN REL is op=0x21; REL BRN BACKWARDS3 BRN FORWARDS3 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BRSET nIndex, opr8a_8, REL is op4_7=0 & nIndex & NthBit & op0_0=0; opr8a_8; REL BRSET #0, *0xFE,BACKWARDS3 BRSET #1, *0xED,BACKWARDS3 BRSET #2, *0xDC,BACKWARDS3 BRSET #3, *0xCB,BACKWARDS3 BRSET #4, *0xBA,BACKWARDS3 BRSET #5, *0xA9,BACKWARDS3 BRSET #6, *0x98,BACKWARDS3 BRSET #7, *0x87,BACKWARDS3 BRSET #0, *0xFE,FORWARDS3 BRSET #1, *0xED,FORWARDS3 BRSET #2, *0xDC,FORWARDS3 BRSET #3, *0xCB,FORWARDS3 BRSET #4, *0xBA,FORWARDS3 BRSET #5, *0xA9,FORWARDS3 BRSET #6, *0x98,FORWARDS3 BRSET #7, *0x87,FORWARDS3 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BSET nIndex, opr8a_8 is op4_7=1 & nIndex & NthBit & op0_0=0; opr8a_8 BSET #0, *0xFE BSET #1, *0xED BSET #2, *0xDC BSET #3, *0xCB BSET #4, *0xBA BSET #5, *0xA9 BSET #6, *0x98 BSET #7, *0x87 FORWARDS3: BACKWARDS4: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BSR REL is op=0xAD; REL BSR BACKWARDS4 BSR FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQ opr8a_8, REL is (op=0x31); opr8a_8; REL CBEQ *0xFE, BACKWARDS4 CBEQ *0xFE, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQA iopr8i, REL is op=0x41; iopr8i; REL CBEQA #0xFE, BACKWARDS4 CBEQA #0xFE, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQX iopr8i, REL is op=0x51; iopr8i; REL CBEQX #0xFE, BACKWARDS4 CBEQX #0xFE, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQ oprx8, X"+", REL is (op=0x61) & X; oprx8; REL CBEQ *0xFE, X+, BACKWARDS4 CBEQ *0xFE, X+, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQ ","X"+", REL is (op=0x71) & X; REL CBEQ ,X+, BACKWARDS4 CBEQ ,X+, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQ oprx8_8_SP, REL is (op16=0x9E61); oprx8_8_SP; REL CBEQ 0xFE,S, BACKWARDS4 CBEQ 0xFE,S, FORWARDS4 FORWARDS4: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLC is op=0x98 CLC ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLI is op=0x9A CLI ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLRA is op=0x4F CLRA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLRX is op=0x5F CLRX ; @if defined(HCS08) || defined(HC08) ; : CLRH is op=0x8C CLRH ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLR OP1 is (op=0x3F | op=0x6F | op=0x7F) ... & OP1 CLR *0xFE CLR 0xFE,X CLR ,X ; @if defined(HCS08) || defined(HC08) ; : CLR oprx8_8_SP is (op16=0x9E6F); oprx8_8_SP CLR 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CMP OP1 is (op=0xA1 | op=0xB1 | op=0xC1 | op=0xD1 | op=0xE1 | op=0xF1) ... & OP1 CMP #0xFE CMP *0xFE CMP 0xFEDC CMP 0xFEDC,X CMP 0xFE,X CMP ,X ; @if defined(HCS08) || defined(HC08) ; : CMP oprx16_8_SP is (op16=0x9ED1); oprx16_8_SP CMP 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : CMP oprx8_8_SP is (op16=0x9EE1); oprx8_8_SP CMP 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : COMA is op=0x43 COMA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : COMX is op=0x53 COMX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : COM OP1 is (op=0x33 | op=0x63 | op=0x73) ... & OP1 COM *0xFE COM 0xFE,X COM ,X ; @if defined(HCS08) || defined(HC08) ; : COM oprx8_8_SP is (op16=0x9E63); oprx8_8_SP COM 0xFE,S ; @if defined(HCS08) ; : CPHX opr16a_16 is (op=0x3E); opr16a_16 ; CPHX 0xFEDC ; @if defined(HCS08) || defined(HC08) ; : CPHX iopr16i is (op=0x65); iopr16i CPHX #0xFEDC ; @if defined(HCS08) || defined(HC08) ; : CPHX opr8a_16 is (op=0x75); opr8a_16 CPHX *0xFE ; @if defined(HCS08) ; : CPHX oprx8_16_SP is (op16=0x9EF3); oprx8_16_SP ; CPHX 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CPX OP1 is (op=0xA3 | op=0xB3 | op=0xC3 | op=0xD3 | op=0xE3 | op=0xF3) ... & OP1 CPX #0xFE CPX *0xFE CPX 0xFEDC CPX 0xFEDC,X CPX 0xFE,X CPX ,X ; @if defined(HCS08) || defined(HC08) ; : CPX oprx16_8_SP is (op16=0x9ED3); oprx16_8_SP CPX 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : CPX oprx8_8_SP is (op16=0x9EE3); oprx8_8_SP CPX 0xFE,S BACKWARDS5: ; @if defined(HCS08) || defined(HC08) ; : DAA is op=0x72 DAA ; @if defined(HCS08) || defined(HC08) ; : DBNZA REL is op=0x4B; REL DBNZA BACKWARDS5 DBNZA FORWARDS5 ; @if defined(HCS08) || defined(HC08) ; : DBNZX REL is op=0x5B; REL DBNZX BACKWARDS5 DBNZX FORWARDS5 ; @if defined(HCS08) || defined(HC08) ; : DBNZ OP1, REL is (op=0x3B | op=0x6B | op=0x7B) ... & OP1; REL DBNZ *0xFE, BACKWARDS5 DBNZ 0xFE,X, BACKWARDS5 DBNZ ,X, BACKWARDS5 DBNZ *0xFE, FORWARDS5 DBNZ 0xFE,X, FORWARDS5 DBNZ ,X, FORWARDS5 ; @if defined(HCS08) || defined(HC08) ; : DBNZ oprx8_8_SP, REL is (op16=0x9E6B); oprx8_8_SP; REL DBNZ 0xFE,S, BACKWARDS5 DBNZ 0xFE,S, FORWARDS5 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : DECA is op=0x4A DECA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : DECX is op=0x5A DECX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : DEC OP1 is (op=0x3A | op=0x6A | op=0x7A) ... & OP1 DEC *0xFE DEC 0xFE,X DEC ,X ; @if defined(HCS08) || defined(HC08) ; : DEC oprx8_8_SP is (op16=0x9E6A); oprx8_8_SP DEC 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : DIV is op=0x52 DIV FORWARDS5: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : EOR OP1 is (op=0xA8 | op=0xB8 | op=0xC8 | op=0xD8 | op=0xE8 | op=0xF8) ... & OP1 EOR #0xFE EOR *0xFE EOR 0xFEDC EOR 0xFEDC,X EOR 0xFE,X EOR ,X ; @if defined(HCS08) || defined(HC08) ; : EOR oprx16_8_SP is (op16=0x9ED8); oprx16_8_SP EOR 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : EOR oprx8_8_SP is (op16=0x9EE8); oprx8_8_SP EOR 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : INCA is op=0x4C INCA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : INCX is op=0x5C INCX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : INC OP1 is (op=0x3C | op=0x6C | op=0x7C) ... & OP1 INC *0xFE INC 0xFE,X INC ,X ; @if defined(HCS08) || defined(HC08) ; : INC oprx8_8_SP is (op16=0x9E6C); oprx8_8_SP INC 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : JMP ADDR is (op=0xBC | op=0xCC) ... & ADDR JMP *LOW_SUB_TEST JMP HIGH_SUB_TEST ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : JMP ADDRI is (op=0xDC | op=0xEC | op=0xFC) ... & ADDRI JMP 0xFEDC,X JMP 0xFE,X JMP ,X ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : JSR ADDR is (op=0xBD | op=0xCD) ... & ADDR JSR *LOW_SUB_TEST JSR HIGH_SUB_TEST ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : JSR ADDRI is (op=0xDD | op=0xED | op=0xFD) ... & ADDRI JSR 0xFEDC,X JSR 0xFE,X JSR ,X ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LDA OP1 is (op=0xA6 | op=0xB6 | op=0xC6 | op=0xD6 | op=0xE6 | op=0xF6) ... & OP1 LDA #0xFE LDA *0xFE LDA 0xFEDC LDA 0xFEDC,X LDA 0xFE,X LDA ,X ; @if defined(HCS08) || defined(HC08) ; : LDA oprx16_8_SP is (op16=0x9ED6); oprx16_8_SP LDA 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : LDA oprx8_8_SP is (op16=0x9EE6); oprx8_8_SP LDA 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : LDHX iopr16i is (op=0x45); iopr16i LDHX #0xFEDC ; @if defined(HCS08) || defined(HC08) ; : LDHX opr8a_16 is (op=0x55); opr8a_16 LDHX *0xFE ; @if defined(HCS08) ; : LDHX opr16a_16 is (op=0x32); opr16a_16 ; LDHX 0xFEDC ; @if defined(HCS08) ; : LDHX ","X is (op16=0x9EAE) & X ; LDHX ,X ; @if defined(HCS08) ; : LDHX oprx16_16_X is (op16=0x9EBE); oprx16_16_X ; LDHX 0xFEDC,X ; @if defined(HCS08) ; : LDHX oprx8_16_X is (op16=0x9ECE); oprx8_16_X ; LDHX 0xFE,X ; @if defined(HCS08) ; : LDHX oprx8_16_SP is (op16=0x9EFE); oprx8_16_SP ; LDHX 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LDX OP1 is (op=0xAE | op=0xBE | op=0xCE | op=0xDE | op=0xEE | op=0xFE) ... & OP1 LDX #0xFE LDX *0xFE LDX 0xFEDC LDX 0xFEDC,X LDX 0xFE,X LDX ,X ; @if defined(HCS08) || defined(HC08) ; : LDX oprx16_8_SP is (op16=0x9EDE); oprx16_8_SP LDX 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : LDX oprx8_8_SP is (op16=0x9EEE); oprx8_8_SP LDX 0xFE,S ; ## Logical Shift left is same as arithmetic shift left ; :LSLA is op=0x48 ; :LSLX is op=0x58 ; :LSL OP1 is (op=0x38 | op=0x68 | op=0x78) ... & OP1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LSRA is op=0x44 LSRA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LSRX is op=0x54 LSRX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LSR OP1 is (op=0x34 | op=0x64 | op=0x74) ... & OP1 LSR *0xFE LSR 0xFE,X LSR ,X ; @if defined(HCS08) || defined(HC08) ; : LSR oprx8_8_SP is (op16=0x9E64); oprx8_8_SP LSR 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : MOV opr8a_8, op2_opr8a is (op=0x4E); opr8a_8; op2_opr8a MOV *0xFE, *0x97 ; @if defined(HCS08) || defined(HC08) ; : MOV opr8a_8, X"+" is (op=0x5E); opr8a_8 & X MOV 0xFE, X+ ; @if defined(HCS08) || defined(HC08) ; : MOV iopr8i, op2_opr8a is (op=0x6E); iopr8i; op2_opr8a MOV #0xFE, *0x97 ; @if defined(HCS08) || defined(HC08) ; : MOV ","X"+," op2_opr8a is (op=0x7E) & X; op2_opr8a MOV ,X+, *0xFE ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : MUL is op=0x42 MUL ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : NEGA is op=0x40 NEGA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : NEGX is op=0x50 NEGX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : NEG OP1 is (op=0x30 | op=0x60 | op=0x70) ... & OP1 NEG *0xFE NEG 0xFE,X NEG ,X ; @if defined(HCS08) || defined(HC08) ; : NEG oprx8_8_SP is (op16=0x9E60); oprx8_8_SP NEG 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : NOP is op = 0x9D NOP ; @if defined(HCS08) || defined(HC08) ; : NSA is op = 0x62 NSA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ORA OP1 is (op=0xAA | op=0xBA | op=0xCA | op=0xDA | op=0xEA | op=0xFA) ... & OP1 ORA #0xFE ORA *0xFE ORA 0xFEDC ORA 0xFEDC,X ORA 0xFE,X ORA ,X ; @if defined(HCS08) || defined(HC08) ; : ORA oprx16_8_SP is (op16=0x9EDA); oprx16_8_SP ORA 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : ORA oprx8_8_SP is (op16=0x9EEA); oprx8_8_SP ORA 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : PSHA is op = 0x87 PSHA ; @if defined(HCS08) || defined(HC08) ; : PSHH is op = 0x8B PSHH ; @if defined(HCS08) || defined(HC08) ; : PSHX is op = 0x89 PSHX ; @if defined(HCS08) || defined(HC08) ; : PULA is op = 0x86 PULA ; @if defined(HCS08) || defined(HC08) ; : PULH is op = 0x8A PULH ; @if defined(HCS08) || defined(HC08) ; : PULX is op = 0x88 PULX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ROLA is op=0x49 ROLA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ROLX is op=0x59 ROLX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ROL OP1 is (op=0x39 | op=0x69 | op=0x79) ... & OP1 ROL *0xFE ROL 0xFE,X ROL ,X ; @if defined(HCS08) || defined(HC08) ; : ROL oprx8_8_SP is (op16=0x9E69); oprx8_8_SP ROL 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RORA is op=0x46 RORA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RORX is op=0x56 RORX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ROR OP1 is (op=0x36 | op=0x66 | op=0x76) ... & OP1 ROR *0xFE ROR 0xFE,X ROR ,X ; @if defined(HCS08) || defined(HC08) ; : ROR oprx8_8_SP is (op16=0x9E66); oprx8_8_SP ROR 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RSP is op = 0x9C RSP ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RTI is op = 0x80 RTI ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RTS is op = 0x81 RTS ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SBC OP1 is (op=0xA2 | op=0xB2 | op=0xC2 | op=0xD2 | op=0xE2 | op=0xF2) ... & OP1 SBC #0xFE SBC *0xFE SBC 0xFEDC SBC 0xFEDC,X SBC 0xFE,X SBC ,X ; @if defined(HCS08) || defined(HC08) ; : SBC oprx16_8_SP is (op16=0x9ED2); oprx16_8_SP SBC 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : SBC oprx8_8_SP is (op16=0x9EE2); oprx8_8_SP SBC 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SEC is op = 0x99 SEC ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SEI is op = 0x9B SEI ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : STA OP1 is (op=0xB7 | op=0xC7 | op=0xD7 | op=0xE7 | op=0xF7) ... & OP1 STA *0xFE STA 0xFEDC STA 0xFEDC,X STA 0xFE,X STA ,X ; @if defined(HCS08) || defined(HC08) ; : STA oprx16_8_SP is (op16=0x9ED7); oprx16_8_SP STA 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : STA oprx8_8_SP is (op16=0x9EE7); oprx8_8_SP STA 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : STHX opr8a_16 is (op=0x35); opr8a_16 STHX *0xFE ; @if defined(HCS08) ; : STHX opr16a_16 is (op=0x96); opr16a_16 ; STHX 0xFEDC ; @if defined(HCS08) ; : STHX oprx8_16_SP is (op16=0x9EFF); oprx8_16_SP ; STHX 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : STOP is op=0x8E STOP ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : STX OP1 is (op=0xBF | op=0xCF | op=0xDF | op=0xEF | op=0xFF) ... & OP1 STX *0xFE STX 0xFEDC STX 0xFEDC,X STX 0xFE,X STX ,X ; @if defined(HCS08) || defined(HC08) ; : STX oprx16_8_SP is (op16=0x9EDF); oprx16_8_SP STX 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : STX oprx8_8_SP is (op16=0x9EEF); oprx8_8_SP STX 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SUB OP1 is (op=0xA0 | op=0xB0 | op=0xC0 | op=0xD0 | op=0xE0 | op=0xF0) ... & OP1 SUB #0xFE SUB *0xFE SUB 0xFEDC SUB 0xFEDC,X SUB 0xFE,X SUB ,X ; @if defined(HCS08) || defined(HC08) ; : SUB oprx16_8_SP is (op16=0x9ED0); oprx16_8_SP SUB 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : SUB oprx8_8_SP is (op16=0x9EE0); oprx8_8_SP SUB 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SWI is op=0x83 SWI ; @if defined(HCS08) || defined(HC08) ; : TAP is op=0x84 TAP ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TAX is op=0x97 TAX ; @if defined(HCS08) || defined(HC08) ; : TPA is op=0x85 TPA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TSTA is op=0x4D TSTA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TSTX is op=0x5D TSTX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TST OP1 is (op=0x3D | op=0x6D | op=0x7D) ... & OP1 TST *0xFE TST 0xFE,X TST ,X ; @if defined(HCS08) || defined(HC08) ; : TST oprx8_8_SP is (op16=0x9E6D); oprx8_8_SP TST 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : TSX is op=0x95 TSX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TXA is op=0x9F TXA ; @if defined(HCS08) || defined(HC08) ; : TXS is op=0x94 TXS ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : WAIT is op=0x8f WAIT HERE: BRA HERE ================================================ FILE: pypcode/processors/HCS08/data/test-vectors/HCS08_tv.s ================================================ .hcs08 .area DIRECT (PAG) ;.setdp 0, DIRECT ;low_data1: ;.ds 1 .area PROGRAM (ABS) .org 0x80 LOW_SUB_TEST: RTS .org 0x2000 HIGH_SUB_TEST: RTS ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ADC OP1 is (op=0xA9 | op=0xB9 | op=0xC9 | op=0xD9 | op=0xE9 | op=0xF9) ... & OP1 ADC #0xFE ADC *0xFE ADC 0xFEDC ADC 0xFEDC,X ADC 0xFE,X ADC ,X ; @if defined(HCS08) || defined(HC08) ; : ADC oprx16_8_SP is (op16=0x9ED9); oprx16_8_SP ADC 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : ADC oprx8_8_SP is (op16=0x9EE9); oprx8_8_SP ADC 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ADD OP1 is (op=0xAB | op=0xBB | op=0xCB | op=0xDB | op=0xEB | op=0xFB) ... & OP1 ADD #0xFE ADD *0xFE ADD 0xFEDC ADD 0xFEDC,X ADD 0xFE,X ADD ,X ; @if defined(HCS08) || defined(HC08) ; : ADD oprx16_8_SP is (op16=0x9EDB); oprx16_8_SP ADD 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : ADD oprx8_8_SP is (op16=0x9EEB); oprx8_8_SP ADD 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : AIS iopr8is is op=0xA7; iopr8is AIS #0x7F AIS #-0x7F ; @if defined(HCS08) || defined(HC08) ; : AIX iopr8is is op=0xAF; iopr8is AIX #0x7F AIX #-0x7F ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : AND OP1 is (op=0xA4 | op=0xB4 | op=0xC4 | op=0xD4 | op=0xE4 | op=0xF4) ... & OP1 AND #0xFE AND *0xFE AND 0xFEDC AND 0xFEDC,X AND 0xFE,X AND ,X ; @if defined(HCS08) || defined(HC08) ; : AND oprx16_8_SP is (op16=0x9ED4); oprx16_8_SP AND 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : AND oprx8_8_SP is (op16=0x9EE4); oprx8_8_SP AND 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASLA is op=0x48 ASLA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASLX is op=0x58 ASLX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASL OP1 is (op=0x38 | op=0x68 | op=0x78) ... & OP1 ASL *0xFE ASL 0xFE,X ASL ,X ; @if defined(HCS08) || defined(HC08) ; : ASL oprx8_8_SP is (op16=0x9E68); oprx8_8_SP ASL 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASRA is op=0x47 ASRA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASRX is op=0x57 ASRX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ASR OP1 is (op=0x37 | op=0x67 | op=0x77) ... & OP1 ASR *0xFE ASR 0xFE,X ASR ,X ; @if defined(HCS08) || defined(HC08) ; : ASR oprx8_8_SP is (op16=0x9E67); oprx8_8_SP ASR 0xFE,S BACKWARDS1: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BCC REL is op=0x24; REL BCC BACKWARDS1 BCC FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BCLR nIndex, opr8a_8 is op4_7=1 & nIndex & NthBit & op0_0=1; opr8a_8 BCLR #0, *0xFE BCLR #1, *0xED BCLR #2, *0xDC BCLR #3, *0xCB BCLR #4, *0xBA BCLR #5, *0xA9 BCLR #6, *0x98 BCLR #7, *0x87 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BCS REL is op=0x25; REL BCS BACKWARDS1 BCS FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BEQ REL is op=0x27; REL BEQ BACKWARDS1 BEQ FORWARDS1 ; @if defined(HCS08) || defined(HC08) ; : BGE REL is op=0x90; REL BGE BACKWARDS1 BGE FORWARDS1 ; @if defined(HCS08) ; : BGND is op=0x82 BGND ; @if defined(HCS08) || defined(HC08) ; : BGT REL is op=0x92; REL BGT BACKWARDS1 BGT FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BHCC REL is op=0x28; REL BHCC BACKWARDS1 BHCC FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BHCS REL is op=0x29; REL BHCS BACKWARDS1 BHCS FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BHI REL is op=0x22; REL BHI BACKWARDS1 BHI FORWARDS1 ; :BHS REL is op=0x24; REL See BCC ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BIH REL is op=0x2F; REL BIH BACKWARDS1 BIH FORWARDS1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BIL REL is op=0x2E; REL BIL BACKWARDS1 BIL FORWARDS1 FORWARDS1: BACKWARDS2: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BIT OP1 is (op=0xA5 | op=0xB5 | op=0xC5 | op=0xD5 | op=0xE5 | op=0xF5) ... & OP1 BIT #0xFE BIT *0xFE BIT 0xFEDC BIT 0xFEDC,X BIT 0xFE,X BIT ,X ; @if defined(HCS08) || defined(HC08) ; : BIT oprx16_8_SP is (op16=0x9ED5); oprx16_8_SP BIT 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : BIT oprx8_8_SP is (op16=0x9EE5); oprx8_8_SP BIT 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : BLE REL is op=0x93; REL BLE BACKWARDS2 BLE FORWARDS2 ; :BLO REL is op=0x25; REL see BCS ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BLS REL is op=0x23; REL BLS BACKWARDS2 BLS FORWARDS2 ; @if defined(HCS08) || defined(HC08) ; : BLT REL is op=0x91; REL BLT BACKWARDS2 BLT FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BMC REL is op=0x2C; REL BMC BACKWARDS2 BMC FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BMI REL is op=0x2B; REL BMI BACKWARDS2 BMI FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BMS REL is op=0x2D; REL BMS BACKWARDS2 BMS FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BNE REL is op=0x26; REL BNE BACKWARDS2 BNE FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BPL REL is op=0x2A; REL BPL BACKWARDS2 BPL FORWARDS2 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BRA REL is op=0x20; REL BRA BACKWARDS2 BRA FORWARDS2 FORWARDS2: BACKWARDS3: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BRCLR nIndex, opr8a_8, REL is op4_7=0 & nIndex & NthBit & op0_0=1; opr8a_8; REL BRCLR #0, *0xFE,BACKWARDS3 BRCLR #1, *0xED,BACKWARDS3 BRCLR #2, *0xDC,BACKWARDS3 BRCLR #3, *0xCB,BACKWARDS3 BRCLR #4, *0xBA,BACKWARDS3 BRCLR #5, *0xA9,BACKWARDS3 BRCLR #6, *0x98,BACKWARDS3 BRCLR #7, *0x87,BACKWARDS3 BRCLR #0, *0xFE,FORWARDS3 BRCLR #1, *0xED,FORWARDS3 BRCLR #2, *0xDC,FORWARDS3 BRCLR #3, *0xCB,FORWARDS3 BRCLR #4, *0xBA,FORWARDS3 BRCLR #5, *0xA9,FORWARDS3 BRCLR #6, *0x98,FORWARDS3 BRCLR #7, *0x87,FORWARDS3 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; branch never is a two-byte nop ; : BRN REL is op=0x21; REL BRN BACKWARDS3 BRN FORWARDS3 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BRSET nIndex, opr8a_8, REL is op4_7=0 & nIndex & NthBit & op0_0=0; opr8a_8; REL BRSET #0, *0xFE,BACKWARDS3 BRSET #1, *0xED,BACKWARDS3 BRSET #2, *0xDC,BACKWARDS3 BRSET #3, *0xCB,BACKWARDS3 BRSET #4, *0xBA,BACKWARDS3 BRSET #5, *0xA9,BACKWARDS3 BRSET #6, *0x98,BACKWARDS3 BRSET #7, *0x87,BACKWARDS3 BRSET #0, *0xFE,FORWARDS3 BRSET #1, *0xED,FORWARDS3 BRSET #2, *0xDC,FORWARDS3 BRSET #3, *0xCB,FORWARDS3 BRSET #4, *0xBA,FORWARDS3 BRSET #5, *0xA9,FORWARDS3 BRSET #6, *0x98,FORWARDS3 BRSET #7, *0x87,FORWARDS3 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BSET nIndex, opr8a_8 is op4_7=1 & nIndex & NthBit & op0_0=0; opr8a_8 BSET #0, *0xFE BSET #1, *0xED BSET #2, *0xDC BSET #3, *0xCB BSET #4, *0xBA BSET #5, *0xA9 BSET #6, *0x98 BSET #7, *0x87 FORWARDS3: BACKWARDS4: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : BSR REL is op=0xAD; REL BSR BACKWARDS4 BSR FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQ opr8a_8, REL is (op=0x31); opr8a_8; REL CBEQ *0xFE, BACKWARDS4 CBEQ *0xFE, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQA iopr8i, REL is op=0x41; iopr8i; REL CBEQA #0xFE, BACKWARDS4 CBEQA #0xFE, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQX iopr8i, REL is op=0x51; iopr8i; REL CBEQX #0xFE, BACKWARDS4 CBEQX #0xFE, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQ oprx8, X"+", REL is (op=0x61) & X; oprx8; REL CBEQ *0xFE, X+, BACKWARDS4 CBEQ *0xFE, X+, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQ ","X"+", REL is (op=0x71) & X; REL CBEQ ,X+, BACKWARDS4 CBEQ ,X+, FORWARDS4 ; @if defined(HCS08) || defined(HC08) ; : CBEQ oprx8_8_SP, REL is (op16=0x9E61); oprx8_8_SP; REL CBEQ 0xFE,S, BACKWARDS4 CBEQ 0xFE,S, FORWARDS4 FORWARDS4: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLC is op=0x98 CLC ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLI is op=0x9A CLI ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLRA is op=0x4F CLRA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLRX is op=0x5F CLRX ; @if defined(HCS08) || defined(HC08) ; : CLRH is op=0x8C CLRH ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CLR OP1 is (op=0x3F | op=0x6F | op=0x7F) ... & OP1 CLR *0xFE CLR 0xFE,X CLR ,X ; @if defined(HCS08) || defined(HC08) ; : CLR oprx8_8_SP is (op16=0x9E6F); oprx8_8_SP CLR 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CMP OP1 is (op=0xA1 | op=0xB1 | op=0xC1 | op=0xD1 | op=0xE1 | op=0xF1) ... & OP1 CMP #0xFE CMP *0xFE CMP 0xFEDC CMP 0xFEDC,X CMP 0xFE,X CMP ,X ; @if defined(HCS08) || defined(HC08) ; : CMP oprx16_8_SP is (op16=0x9ED1); oprx16_8_SP CMP 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : CMP oprx8_8_SP is (op16=0x9EE1); oprx8_8_SP CMP 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : COMA is op=0x43 COMA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : COMX is op=0x53 COMX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : COM OP1 is (op=0x33 | op=0x63 | op=0x73) ... & OP1 COM *0xFE COM 0xFE,X COM ,X ; @if defined(HCS08) || defined(HC08) ; : COM oprx8_8_SP is (op16=0x9E63); oprx8_8_SP COM 0xFE,S ; @if defined(HCS08) ; : CPHX opr16a_16 is (op=0x3E); opr16a_16 CPHX 0xFEDC ; @if defined(HCS08) || defined(HC08) ; : CPHX iopr16i is (op=0x65); iopr16i CPHX #0xFEDC ; @if defined(HCS08) || defined(HC08) ; : CPHX opr8a_16 is (op=0x75); opr8a_16 CPHX *0xFE ; @if defined(HCS08) ; : CPHX oprx8_16_SP is (op16=0x9EF3); oprx8_16_SP CPHX 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : CPX OP1 is (op=0xA3 | op=0xB3 | op=0xC3 | op=0xD3 | op=0xE3 | op=0xF3) ... & OP1 CPX #0xFE CPX *0xFE CPX 0xFEDC CPX 0xFEDC,X CPX 0xFE,X CPX ,X ; @if defined(HCS08) || defined(HC08) ; : CPX oprx16_8_SP is (op16=0x9ED3); oprx16_8_SP CPX 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : CPX oprx8_8_SP is (op16=0x9EE3); oprx8_8_SP CPX 0xFE,S BACKWARDS5: ; @if defined(HCS08) || defined(HC08) ; : DAA is op=0x72 DAA ; @if defined(HCS08) || defined(HC08) ; : DBNZA REL is op=0x4B; REL DBNZA BACKWARDS5 DBNZA FORWARDS5 ; @if defined(HCS08) || defined(HC08) ; : DBNZX REL is op=0x5B; REL DBNZX BACKWARDS5 DBNZX FORWARDS5 ; @if defined(HCS08) || defined(HC08) ; : DBNZ OP1, REL is (op=0x3B | op=0x6B | op=0x7B) ... & OP1; REL DBNZ *0xFE, BACKWARDS5 DBNZ 0xFE,X, BACKWARDS5 DBNZ ,X, BACKWARDS5 DBNZ *0xFE, FORWARDS5 DBNZ 0xFE,X, FORWARDS5 DBNZ ,X, FORWARDS5 ; @if defined(HCS08) || defined(HC08) ; : DBNZ oprx8_8_SP, REL is (op16=0x9E6B); oprx8_8_SP; REL DBNZ 0xFE,S, BACKWARDS5 DBNZ 0xFE,S, FORWARDS5 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : DECA is op=0x4A DECA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : DECX is op=0x5A DECX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : DEC OP1 is (op=0x3A | op=0x6A | op=0x7A) ... & OP1 DEC *0xFE DEC 0xFE,X DEC ,X ; @if defined(HCS08) || defined(HC08) ; : DEC oprx8_8_SP is (op16=0x9E6A); oprx8_8_SP DEC 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : DIV is op=0x52 DIV FORWARDS5: ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : EOR OP1 is (op=0xA8 | op=0xB8 | op=0xC8 | op=0xD8 | op=0xE8 | op=0xF8) ... & OP1 EOR #0xFE EOR *0xFE EOR 0xFEDC EOR 0xFEDC,X EOR 0xFE,X EOR ,X ; @if defined(HCS08) || defined(HC08) ; : EOR oprx16_8_SP is (op16=0x9ED8); oprx16_8_SP EOR 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : EOR oprx8_8_SP is (op16=0x9EE8); oprx8_8_SP EOR 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : INCA is op=0x4C INCA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : INCX is op=0x5C INCX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : INC OP1 is (op=0x3C | op=0x6C | op=0x7C) ... & OP1 INC *0xFE INC 0xFE,X INC ,X ; @if defined(HCS08) || defined(HC08) ; : INC oprx8_8_SP is (op16=0x9E6C); oprx8_8_SP INC 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : JMP ADDR is (op=0xBC | op=0xCC) ... & ADDR JMP *LOW_SUB_TEST JMP HIGH_SUB_TEST ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : JMP ADDRI is (op=0xDC | op=0xEC | op=0xFC) ... & ADDRI JMP 0xFEDC,X JMP 0xFE,X JMP ,X ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : JSR ADDR is (op=0xBD | op=0xCD) ... & ADDR JSR *LOW_SUB_TEST JSR HIGH_SUB_TEST ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : JSR ADDRI is (op=0xDD | op=0xED | op=0xFD) ... & ADDRI JSR 0xFEDC,X JSR 0xFE,X JSR ,X ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LDA OP1 is (op=0xA6 | op=0xB6 | op=0xC6 | op=0xD6 | op=0xE6 | op=0xF6) ... & OP1 LDA #0xFE LDA *0xFE LDA 0xFEDC LDA 0xFEDC,X LDA 0xFE,X LDA ,X ; @if defined(HCS08) || defined(HC08) ; : LDA oprx16_8_SP is (op16=0x9ED6); oprx16_8_SP LDA 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : LDA oprx8_8_SP is (op16=0x9EE6); oprx8_8_SP LDA 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : LDHX iopr16i is (op=0x45); iopr16i LDHX #0xFEDC ; @if defined(HCS08) || defined(HC08) ; : LDHX opr8a_16 is (op=0x55); opr8a_16 LDHX *0xFE ; @if defined(HCS08) ; : LDHX opr16a_16 is (op=0x32); opr16a_16 LDHX 0xFEDC ; @if defined(HCS08) ; : LDHX ","X is (op16=0x9EAE) & X LDHX ,X ; @if defined(HCS08) ; : LDHX oprx16_16_X is (op16=0x9EBE); oprx16_16_X LDHX 0xFEDC,X ; @if defined(HCS08) ; : LDHX oprx8_16_X is (op16=0x9ECE); oprx8_16_X LDHX 0xFE,X ; @if defined(HCS08) ; : LDHX oprx8_16_SP is (op16=0x9EFE); oprx8_16_SP LDHX 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LDX OP1 is (op=0xAE | op=0xBE | op=0xCE | op=0xDE | op=0xEE | op=0xFE) ... & OP1 LDX #0xFE LDX *0xFE LDX 0xFEDC LDX 0xFEDC,X LDX 0xFE,X LDX ,X ; @if defined(HCS08) || defined(HC08) ; : LDX oprx16_8_SP is (op16=0x9EDE); oprx16_8_SP LDX 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : LDX oprx8_8_SP is (op16=0x9EEE); oprx8_8_SP LDX 0xFE,S ; ## Logical Shift left is same as arithmetic shift left ; :LSLA is op=0x48 ; :LSLX is op=0x58 ; :LSL OP1 is (op=0x38 | op=0x68 | op=0x78) ... & OP1 ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LSRA is op=0x44 LSRA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LSRX is op=0x54 LSRX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : LSR OP1 is (op=0x34 | op=0x64 | op=0x74) ... & OP1 LSR *0xFE LSR 0xFE,X LSR ,X ; @if defined(HCS08) || defined(HC08) ; : LSR oprx8_8_SP is (op16=0x9E64); oprx8_8_SP LSR 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : MOV opr8a_8, op2_opr8a is (op=0x4E); opr8a_8; op2_opr8a MOV *0xFE, *0x97 ; @if defined(HCS08) || defined(HC08) ; : MOV opr8a_8, X"+" is (op=0x5E); opr8a_8 & X MOV 0xFE, X+ ; @if defined(HCS08) || defined(HC08) ; : MOV iopr8i, op2_opr8a is (op=0x6E); iopr8i; op2_opr8a MOV #0xFE, *0x97 ; @if defined(HCS08) || defined(HC08) ; : MOV ","X"+," op2_opr8a is (op=0x7E) & X; op2_opr8a MOV ,X+, *0xFE ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : MUL is op=0x42 MUL ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : NEGA is op=0x40 NEGA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : NEGX is op=0x50 NEGX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : NEG OP1 is (op=0x30 | op=0x60 | op=0x70) ... & OP1 NEG *0xFE NEG 0xFE,X NEG ,X ; @if defined(HCS08) || defined(HC08) ; : NEG oprx8_8_SP is (op16=0x9E60); oprx8_8_SP NEG 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : NOP is op = 0x9D NOP ; @if defined(HCS08) || defined(HC08) ; : NSA is op = 0x62 NSA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ORA OP1 is (op=0xAA | op=0xBA | op=0xCA | op=0xDA | op=0xEA | op=0xFA) ... & OP1 ORA #0xFE ORA *0xFE ORA 0xFEDC ORA 0xFEDC,X ORA 0xFE,X ORA ,X ; @if defined(HCS08) || defined(HC08) ; : ORA oprx16_8_SP is (op16=0x9EDA); oprx16_8_SP ORA 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : ORA oprx8_8_SP is (op16=0x9EEA); oprx8_8_SP ORA 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : PSHA is op = 0x87 PSHA ; @if defined(HCS08) || defined(HC08) ; : PSHH is op = 0x8B PSHH ; @if defined(HCS08) || defined(HC08) ; : PSHX is op = 0x89 PSHX ; @if defined(HCS08) || defined(HC08) ; : PULA is op = 0x86 PULA ; @if defined(HCS08) || defined(HC08) ; : PULH is op = 0x8A PULH ; @if defined(HCS08) || defined(HC08) ; : PULX is op = 0x88 PULX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ROLA is op=0x49 ROLA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ROLX is op=0x59 ROLX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ROL OP1 is (op=0x39 | op=0x69 | op=0x79) ... & OP1 ROL *0xFE ROL 0xFE,X ROL ,X ; @if defined(HCS08) || defined(HC08) ; : ROL oprx8_8_SP is (op16=0x9E69); oprx8_8_SP ROL 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RORA is op=0x46 RORA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RORX is op=0x56 RORX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : ROR OP1 is (op=0x36 | op=0x66 | op=0x76) ... & OP1 ROR *0xFE ROR 0xFE,X ROR ,X ; @if defined(HCS08) || defined(HC08) ; : ROR oprx8_8_SP is (op16=0x9E66); oprx8_8_SP ROR 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RSP is op = 0x9C RSP ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RTI is op = 0x80 RTI ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : RTS is op = 0x81 RTS ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SBC OP1 is (op=0xA2 | op=0xB2 | op=0xC2 | op=0xD2 | op=0xE2 | op=0xF2) ... & OP1 SBC #0xFE SBC *0xFE SBC 0xFEDC SBC 0xFEDC,X SBC 0xFE,X SBC ,X ; @if defined(HCS08) || defined(HC08) ; : SBC oprx16_8_SP is (op16=0x9ED2); oprx16_8_SP SBC 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : SBC oprx8_8_SP is (op16=0x9EE2); oprx8_8_SP SBC 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SEC is op = 0x99 SEC ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SEI is op = 0x9B SEI ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : STA OP1 is (op=0xB7 | op=0xC7 | op=0xD7 | op=0xE7 | op=0xF7) ... & OP1 STA *0xFE STA 0xFEDC STA 0xFEDC,X STA 0xFE,X STA ,X ; @if defined(HCS08) || defined(HC08) ; : STA oprx16_8_SP is (op16=0x9ED7); oprx16_8_SP STA 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : STA oprx8_8_SP is (op16=0x9EE7); oprx8_8_SP STA 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : STHX opr8a_16 is (op=0x35); opr8a_16 STHX *0xFE ; @if defined(HCS08) ; : STHX opr16a_16 is (op=0x96); opr16a_16 STHX 0xFEDC ; @if defined(HCS08) ; : STHX oprx8_16_SP is (op16=0x9EFF); oprx8_16_SP STHX 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : STOP is op=0x8E STOP ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : STX OP1 is (op=0xBF | op=0xCF | op=0xDF | op=0xEF | op=0xFF) ... & OP1 STX *0xFE STX 0xFEDC STX 0xFEDC,X STX 0xFE,X STX ,X ; @if defined(HCS08) || defined(HC08) ; : STX oprx16_8_SP is (op16=0x9EDF); oprx16_8_SP STX 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : STX oprx8_8_SP is (op16=0x9EEF); oprx8_8_SP STX 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SUB OP1 is (op=0xA0 | op=0xB0 | op=0xC0 | op=0xD0 | op=0xE0 | op=0xF0) ... & OP1 SUB #0xFE SUB *0xFE SUB 0xFEDC SUB 0xFEDC,X SUB 0xFE,X SUB ,X ; @if defined(HCS08) || defined(HC08) ; : SUB oprx16_8_SP is (op16=0x9ED0); oprx16_8_SP SUB 0xFEDC,S ; @if defined(HCS08) || defined(HC08) ; : SUB oprx8_8_SP is (op16=0x9EE0); oprx8_8_SP SUB 0xFE,S ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : SWI is op=0x83 SWI ; @if defined(HCS08) || defined(HC08) ; : TAP is op=0x84 TAP ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TAX is op=0x97 TAX ; @if defined(HCS08) || defined(HC08) ; : TPA is op=0x85 TPA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TSTA is op=0x4D TSTA ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TSTX is op=0x5D TSTX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TST OP1 is (op=0x3D | op=0x6D | op=0x7D) ... & OP1 TST *0xFE TST 0xFE,X TST ,X ; @if defined(HCS08) || defined(HC08) ; : TST oprx8_8_SP is (op16=0x9E6D); oprx8_8_SP TST 0xFE,S ; @if defined(HCS08) || defined(HC08) ; : TSX is op=0x95 TSX ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : TXA is op=0x9F TXA ; @if defined(HCS08) || defined(HC08) ; : TXS is op=0x94 TXS ; @if defined(HCS08) || defined(HC08) || defined(HC05) ; : WAIT is op=0x8f WAIT HERE: BRA HERE ================================================ FILE: pypcode/processors/HCS12/data/languages/HC12.cspec ================================================ ================================================ FILE: pypcode/processors/HCS12/data/languages/HC12.pspec ================================================ ================================================ FILE: pypcode/processors/HCS12/data/languages/HC12.slaspec ================================================ # sleigh specification file for Freescale HC12 (68HC12) @define HC12 "1" @define SIZE "2" @define MAXFLASHPage "0xFF" @include "HCS_HC12.sinc" ================================================ FILE: pypcode/processors/HCS12/data/languages/HCS12.cspec ================================================ ================================================ FILE: pypcode/processors/HCS12/data/languages/HCS12.ldefs ================================================ HC12 Microcontroller Family HCS12 Microcontroller Family HCS12X Microcontroller Family HCS12X Microcontroller Family ================================================ FILE: pypcode/processors/HCS12/data/languages/HCS12.opinion ================================================ ================================================ FILE: pypcode/processors/HCS12/data/languages/HCS12.pspec ================================================ ================================================ FILE: pypcode/processors/HCS12/data/languages/HCS12.slaspec ================================================ # sleigh specification file for Freescale HCS12 (68HCS12) @define HCS12 "1" @define SIZE "3" @define MAXFLASHPage "0xFF" @include "HCS_HC12.sinc" ================================================ FILE: pypcode/processors/HCS12/data/languages/HCS12X.cspec ================================================ R7 = SP; ================================================ FILE: pypcode/processors/HCS12/data/languages/HCS12X.pspec ================================================ ================================================ FILE: pypcode/processors/HCS12/data/languages/HCS12X.slaspec ================================================ # sleigh specification file for Freescale HCS12 (68HCS12) @define HCS12 "1" @define HCS12X "1" @define SIZE "3" @define MAXFLASHPage "0xFF" @include "HCS_HC12.sinc" @include "XGATE.sinc" ================================================ FILE: pypcode/processors/HCS12/data/languages/HCS_HC12.sinc ================================================ # common include file for HCS12, and HC12 constructors define endian=big; define alignment=1; define space RAM type=ram_space size=$(SIZE) default; define space register type=register_space size=2; @define VECTOR_SWI "0xFFF6" @define VECTOR_TRAP "0xFFF8" ################################################################ # Registers ################################################################ define register offset=0x00 size=1 [ A B ]; define register offset=0x00 size=2 [ D ]; # IX also referred to as X or x; but must be distinct from X bit in CCR # IY also referred to as Y or y; made IY to be consistent with IX define register offset=0x10 size=2 [ IX IY TMP2 TMP3 TMP1 ]; define register offset=0x10 size=1 [ IXH IXL IYH IYL TMP2H TMP2L TMP3H TMP3L TMP1H TMP1L ]; #define register offset=0x20 size=3 [ _ PCE _ SPE ]; define register offset=0x20 size=3 [ _ PCE ]; define register offset=0x20 size=2 [ _ _ PC _ _ SP ]; define register offset=0x20 size=1 [ _ _ _ _ PCH PCL _ _ _ _ SPH SPL ]; define register offset=0x30 size=2 [ CCRW ]; define register offset=0x30 size=1 [ CCRH ]; define register offset=0x31 size=1 [ CCR ]; define register offset = 0x32 size=3 [physPage]; define RAM offset=0x11 size=1 [DIRECT]; @ifdef HCS12 define RAM offset=0x30 size=1 [PPAGE]; @endif @ifdef HCS12X define RAM offset=0x10 size=1 [GPAGE]; define RAM offset=0x16 size=1 [RPAGE]; define RAM offset=0x17 size=1 [EPAGE]; @endif # Define context bits # WARNING: when adjusting context keep compiler packing in mind # and make sure fields do not span a 32-bit boundary before or # after context packing define register offset=0x40 size=4 contextreg; define context contextreg Prefix18 = (0,0) # 1 if 0x18 is the first byte PrefixHCS12X = (0,0) # 1 if first byte is 0x18 so that HCS12X to use GPAGE for memory access UseGPAGE = (1,1) # 1 if should use GPAGE concatenated to lower 16-bit EA XGATE = (2,2) # 1 if in xgate instruction decode mode ; # individual status bits within CCRH @define IPL_2 "CCRH[2,1]" @define IPL_1 "CCRH[1,1]" @define IPL_0 "CCRH[0,1]" @define IPL "CCRH[0,3]" # entire IPL # individual status bits within CCR @define S "CCR[7,1]" # STOP Enable @define X "CCR[6,1]" # Non-maskable interrupt control bit @define H "CCR[5,1]" # Half Carry Flag @define I "CCR[4,1]" # Maskable interrupt control bit @define N "CCR[3,1]" # Negative Flag @define Z "CCR[2,1]" # Zero Flag @define V "CCR[1,1]" # Two's complement overflow Flag @define C "CCR[0,1]" # Carry/Borrow Flag ################################################################ # Tokens ################################################################ define token opbyte8 (8) op8 = (0,7) op7_4 = (4,7) op6_4 = (4,6) nIndex = (1,3) op0_0 = (0,0) trapnum = (0,7) ; define token xb8 (8) rr7_6 = (6,7) rr7_6a = (6,7) xb5_5 = (5,5) nn4_0 = (0,4) signed xb7_5 = (5,7) rr4_3 = (3,4) xb2_2 = (2,2) xb2_1 = (1,2) # actually needed this instead of xb2_2 z1_1 = (1,1) s0_0 = (0,0) ss0_0 = (0,0) signed xb2_0 = (0,2) p4_4 = (4,4) decrement3_3 = (3,3) nn3_0 = (0,3) aa1_0 = (0,1) aa0_0 = (0,0) # actually needed this instead of aa1_0 ; define token eb8 (8) notUsed7_7 = (7,7) abcdxys6_4 = (4,6) abc5_4 = (4,5) dxys2_0 = (0,2) abcdxys2_0 = (0,2) columns7_4 = (4,7) rows2_0 = (0,2) rows3_0 = (0,3) bytes_ABCl_6_4 = (4,6) bytes_ABClT3lBXlYlSl_6_4 = (4,6) bytes_ABChT3hBXhYhSh_6_4 = (4,6) words_CT3DXYS_6_4 = (4,6) words_T3DXYS_6_4 = (4,6) bytes_ABCl_2_0 = (0,2) bytes_T3lDlXlYlSl_6_4 = (4,6) words_T2DXYS_2_0 = (0,2) bytes_T2h_XhYhSh_2_0 = (0,2) bytes_T2l_XlYlSl_2_0 = (0,2) bytes_T3l_XlYlSl_6_4 = (4,6) bytes_T3h_XhYhSh_6_4 = (4,6) words_T3_XYS_6_4 = (4,6) bytes_T2hDhXhYhSh_2_0 = (0,2) bytes_T2lDlXlYlSl_2_0 = (0,2) ; define token opbyte16 (16) op16 = (0,15) op15_13 = (13,15) sign12_12 = (12,12) signed not_used11 = (11,11) size10_10 = (10,10) byte9_8 = (8,9) word9_8 = (8,9) rr7_0 = (0,7) ; define token data8 (8) imm8 = (0,7) simm8 = (0,7) signed rel = (0,7) signed ; define token data16 (16) imm16 = (0,15) imm16p = (12,15) imm16e = (8,15) imm16ev = (0,9) imm16rv = (0,11) imm16pv = (0,13) simm16 = (0,15) signed ; attach variables [ rr7_6 rr4_3 ] [ IX IY SP PC ]; attach variables [ rr7_6a ] [ IX IY SP _ ]; # PC not valid choice in this case # TODO would be great if this worked # attach names [ rr7_6 rr4_3 ] [ "X" "Y" "SP" "PC" ]; # TODO do the negative values work? attach values [ nn3_0 ] [ 1 2 3 4 5 6 7 8 -8 -7 -6 -5 -4 -3 -2 -1 ]; attach variables [ aa0_0 ] [ A B ]; attach variables [ byte9_8 ] [ A B _ _ ]; attach variables [ word9_8 ] [ D IX IY SP ]; attach variables [ abc5_4 ] [ A B CCR _ ]; attach variables [ dxys2_0 ] [ _ _ _ TMP2 D IX IY SP ]; attach variables [ bytes_ABCl_2_0 ] [ A B CCR _ _ _ _ _ ]; attach variables [ bytes_ABClT3lBXlYlSl_6_4 ] [ A B CCR TMP3L B IXL IYL SPL ]; attach variables [ bytes_ABChT3hBXhYhSh_6_4 ] [ A B CCRH TMP3H B IXH IYH SPH ]; attach variables [ words_T2DXYS_2_0 ] [ _ _ _ TMP2 D IX IY SP ]; attach variables [ bytes_T3lDlXlYlSl_6_4 ] [ _ _ _ TMP3L B IXL IYL SPL ]; attach variables [ words_T3DXYS_6_4 ] [ _ _ _ TMP3 D IX IY SP ]; attach variables [ words_CT3DXYS_6_4 ] [ _ _ CCRW TMP3 D IX IY SP ]; attach variables [ bytes_T2l_XlYlSl_2_0 ] [ _ _ _ TMP2L _ IXL IYL SPL ]; attach variables [ bytes_T2h_XhYhSh_2_0 ] [ _ _ _ TMP2H _ IXH IYH SPH ]; attach variables [ bytes_T3l_XlYlSl_6_4 ] [ _ _ _ TMP3L _ IXL IYL SPL ]; attach variables [ bytes_T3h_XhYhSh_6_4 ] [ _ _ _ TMP3H _ IXH IYH SPH ]; attach variables [ words_T3_XYS_6_4 ] [ _ _ _ TMP3 _ IX IY SP ]; attach variables [ bytes_ABCl_6_4 ] [ A B CCR _ _ _ _ _ ]; attach variables [ bytes_T2hDhXhYhSh_2_0 ] [ _ _ _ TMP2H A IXH IYH SPH ]; attach variables [ bytes_T2lDlXlYlSl_2_0 ] [ _ _ _ TMP2L B IXL IYL SPL ]; ################################################################ # Pseudo Instructions ################################################################ define pcodeop segment; # Define special pcodeop that calculates the RAM address # given the segment selector and offset as input define pcodeop readIRQ; define pcodeop stop; define pcodeop WaitForInterrupt; define pcodeop decimalAdjustAccumulator; define pcodeop decimalAdjustCarry; define pcodeop EMACS; define pcodeop ETBL; define pcodeop ETBL_Cflag; define pcodeop GradeOfMembership; define pcodeop TableLookupAndInterpolate; define pcodeop TableLookupAndInterpolateRoundable; define pcodeop WeightedAverageSOPHigh; define pcodeop WeightedAverageSOPLow; define pcodeop WeightedAverageSOW; define pcodeop WeightedAverageResume; define pcodeop MinMaxRuleEvaluation; define pcodeop MinMaxRuleEvaluationCorrect; define pcodeop MinMaxRuleEvaluationWeighted; define pcodeop MinMaxRuleEvaluationWeightedCorrect; define pcodeop backgroundDebugMode; @if defined(HCS12X) macro setHCSphysPage(addr) { local a3:3 = zext(addr); local isReg:1 = (a3 & 0xfC00) == 0x0; local isEpage:1 = (a3 & 0xfc00) ==0x800; local isEpage_FF:1 = (a3 & 0xfc00) ==0xC00; local isRpage:1 = (a3 & 0xf000) ==0x1000; local isRpage_FE:1 = (a3 & 0xf000) ==0x2000; local isRpage_FF:1 = (a3 & 0xf000) ==0x3000; local isPpage_FD:1 = (a3 & 0xc000) ==0x4000; local isPpage:1 = (a3 & 0xc000) ==0x8000; local isPpage_FF:1 = (a3 & 0xc000) ==0xC000; physPage = (zext(isReg) * 0x0) + (zext(isEpage) * (0x100000 | ((zext(EPAGE) << 10) ^ 0x800))) + (zext(isEpage_FF) * ((0x4FF << 10) ^ 0xC00)) + (zext(isRpage) * (((zext(RPAGE) << 12) ^ 0x1000))) + (zext(isRpage_FE) * (((0xFE << 12) ^ 0x2000))) + (zext(isRpage_FF) * (((0xFF << 12) ^ 0x3000))) + (zext(isPpage_FD) * (0x400000 | ((0x3F4000) ^ 0x4000))) + (zext(isPpage) * (0x400000 | ((zext(PPAGE) << 14 ) ^ 0x8000))) + (zext(isPpage_FF) * (0x400000 | ((0x3FC000) ^ 0xC000))) ; } @elif defined(HCS12) && SIZE=="3" macro setHCSphysPage(addr) { local a3:3 = zext(addr); local isPpage_3D:1 = (a3 & 0xc000) ==0x0000; local isPpage_3E:1 = (a3 & 0xc000) ==0x4000; local isPpage:1 = (a3 & 0xc000) ==0x8000; local isPpage_3F:1 = (a3 & 0xc000) ==0xC000; # physPage = (zext(isPpage) * (0x000000 | ((zext(PPAGE) << 14 ) ^ 0x8000))); physPage = (zext(isPpage_3D) * (0x000000 | ((0xF4000) ^ 0x0000))) + (zext(isPpage_3E) * (0x000000 | ((0xF8000) ^ 0x4000))) + (zext(isPpage) * (0x000000 | ((zext(PPAGE) << 14 ) ^ 0x8000))) + (zext(isPpage_3F) * (0x000000 | ((0xFC000) ^ 0xC000))) ; } @endif macro GetPagedAddr(addr,paddr) { @if SIZE=="3" setHCSphysPage(addr); paddr = segment(physPage, addr); @else paddr = addr; @endif } macro Load1(value, addr) { local paddr:$(SIZE); GetPagedAddr(addr,paddr); value = *:1 paddr; } macro Load2(value, addr) { local paddr:$(SIZE); GetPagedAddr(addr,paddr); value = *:2 paddr; } macro Store(addr, value) { local paddr:$(SIZE); GetPagedAddr(addr,paddr); *paddr = value; } ################################################################ # Addressing tables ################################################################ # # TODO: Paging could be added here as these are constant addresses # could factor the overlapping addresses here, unless the # page register is always added in then will have to export as a constant # since don't know the real address # macro pageCAddr(addr, shift, page, offset) { addr = addr | ((page << shift) | offset); } macro pagePAddr(addr, shift, page, offset) { addr = addr | ((zext(page) << shift) | offset); } @if defined(HCS12X) # known EPAGE offsets opr16a: imm16 is imm16e=0x8 & imm16 & imm16ev { local addr:3 = 0x100000; pagePAddr(addr,10,EPAGE,imm16ev); export addr; } opr16a: imm16 is imm16e=0x9 & imm16 & imm16ev { local addr:3 = 0x100000; pagePAddr(addr,10,EPAGE,imm16ev); export addr; } opr16a: imm16 is imm16e=0xa & imm16 & imm16ev { local addr:3 = 0x100000; pagePAddr(addr,10,EPAGE,imm16ev); export addr; } opr16a: imm16 is imm16e=0xb & imm16 & imm16ev { local addr:3 = 0x100000; pagePAddr(addr,10,EPAGE,imm16ev); export addr; } opr16a: imm16 is imm16e=0xc & imm16 & imm16ev { local addr:3 = 0; pageCAddr(addr,10,0xFF,imm16ev); export addr; } opr16a: imm16 is imm16e=0xd & imm16 & imm16ev { local addr:3 = 0; pageCAddr(addr,10,0xFF,imm16ev); export addr; } opr16a: imm16 is imm16e=0xe & imm16 & imm16ev { local addr:3 = 0; pageCAddr(addr,10,0xFF,imm16ev); export addr; } opr16a: imm16 is imm16e=0xf & imm16 & imm16ev { local addr:3 = 0; pageCAddr(addr,10,0xFF,imm16ev); export addr; } # known RPAGE offsets opr16a: imm16 is imm16p=0x1 & imm16 & imm16rv { local addr:3 = 0; pagePAddr(addr,12,RPAGE,imm16rv); export addr; } opr16a: imm16 is imm16p=0x2 & imm16 & imm16rv { local addr:3 = 0; pageCAddr(addr,12,0xFE,imm16rv); export addr; } opr16a: imm16 is imm16p=0x3 & imm16 & imm16rv { local addr:3 = 0; pageCAddr(addr,12,0xFF,imm16rv); export addr; } # known PPAGE offsets opr16a: imm16 is imm16p=0x4 & imm16 & imm16pv { local addr:3 = 0x400000; pageCAddr(addr,14,0xFD,imm16pv); export addr; } opr16a: imm16 is imm16p=0x5 & imm16 & imm16pv { local addr:3 = 0x400000; pageCAddr(addr,14,0xFD,imm16pv); export addr; } opr16a: imm16 is imm16p=0x6 & imm16 & imm16pv { local addr:3 = 0x400000; pageCAddr(addr,14,0xFD,imm16pv); export addr; } opr16a: imm16 is imm16p=0x7 & imm16 & imm16pv { local addr:3 = 0x400000; pageCAddr(addr,14,0xFD,imm16pv); export addr; } opr16a: imm16 is imm16p=0x8 & imm16 & imm16pv { local addr:3 = 0x400000; pagePAddr(addr,14,PPAGE,imm16pv & 0x3fff); export addr; } opr16a: imm16 is imm16p=0x9 & imm16 & imm16pv { local addr:3 = 0x400000; pagePAddr(addr,14,PPAGE,imm16pv & 0x3fff); export addr; } opr16a: imm16 is imm16p=0xa & imm16 & imm16pv { local addr:3 = 0x400000; pagePAddr(addr,14,PPAGE,imm16pv & 0x3fff); export addr; } opr16a: imm16 is imm16p=0xb & imm16 & imm16pv { local addr:3 = 0x400000; pagePAddr(addr,14,PPAGE,imm16pv & 0x3fff); export addr; } opr16a: imm16 is imm16p=0xC & imm16 & imm16pv { local addr:3 = 0x400000; pageCAddr(addr,14,0xFF,imm16pv); export addr; } opr16a: imm16 is imm16p=0xD & imm16 & imm16pv { local addr:3 = 0x400000; pageCAddr(addr,14,0xFF,imm16pv); export addr; } opr16a: imm16 is imm16p=0xE & imm16 & imm16pv { local addr:3 = 0x400000; pageCAddr(addr,14,0xFF,imm16pv); export addr; } opr16a: imm16 is imm16p=0xF & imm16 & imm16pv { local addr:3 = 0x400000; pageCAddr(addr,14,0xFF,imm16pv); export addr; } opr16a: imm16 is imm16e & imm16 { local addr:3 = imm16; export addr; } opr8a: imm8 is imm8 { export *[const]:3 imm8; } opr8a_8: imm8 is UseGPAGE=0 & imm8 { export *:1 imm8; } opr8a_8: imm8 is UseGPAGE=1 & imm8 { local addr:3 = 0; pagePAddr(addr,16,GPAGE,imm8); export *:1 addr; } opr8a_16: imm8 is UseGPAGE=0 & imm8 { export *:2 imm8; } opr8a_16: imm8 is UseGPAGE=1 & imm8 { local addr:3 = 0; pagePAddr(addr,16,GPAGE,imm8); export *:2 addr; } opr16a_8: opr16a is UseGPAGE=0 & opr16a { export *:1 opr16a; } opr16a_8: imm16 is UseGPAGE=1 & imm16 { local addr:3 = 0; pagePAddr(addr,16,GPAGE,imm16); export *:1 addr; } opr16a_16: opr16a is UseGPAGE=0 & opr16a { export *:2 opr16a; } opr16a_16: imm16 is UseGPAGE=1 & imm16 { local addr:3 = 0; pagePAddr(addr,16,GPAGE,imm16); export *:2 addr; } iopr8i: "#"imm8 is imm8 { export *[const]:1 imm8; } iopr16i: "#"imm16 is imm16 { export *[const]:2 imm16; } msk8: imm8 is imm8 { export *[const]:1 imm8; } page: imm8 is imm8 { export *[const]:1 imm8; } #PageDest: dest is imm16p=0x8 & imm16 & imm16pv ; imm8 [ dest = (imm8 << 16) | imm16; ] { export *:1 dest; } #PageDest: dest is imm16p=0x9 & imm16 & imm16pv ; imm8 [ dest = (imm8 << 16) | imm16; ] { export *:1 dest; } #PageDest: dest is imm16p=0xa & imm16 & imm16pv ; imm8 [ dest = (imm8 << 16) | imm16; ] { export *:1 dest; } #PageDest: dest is imm16p=0xb & imm16 & imm16pv ; imm8 [ dest = (imm8 << 16) | imm16; ] { export *:1 dest; } #PageDest: dest is imm16p=0xc & imm16 & imm16pv ; imm8 [ dest = ($(MAXFLASHPage) << 16) | imm16; ] { export *:1 dest; } #PageDest: dest is imm16p=0xd & imm16 & imm16pv ; imm8 [ dest = ($(MAXFLASHPage) << 16) | imm16; ] { export *:1 dest; } #PageDest: dest is imm16p=0xe & imm16 & imm16pv ; imm8 [ dest = ($(MAXFLASHPage) << 16) | imm16; ] { export *:1 dest; } #PageDest: dest is imm16p=0xf & imm16 & imm16pv ; imm8 [ dest = ($(MAXFLASHPage) << 16) | imm16; ] { export *:1 dest; } PageDest: opr16a is opr16a; page { export opr16a; } @else @if SIZE=="3" opr16a: imm16 is imm16p=0x0 & imm16 & imm16pv { local addr:3 = 0; pageCAddr(addr,14,0x3D,imm16pv); export addr; } opr16a: imm16 is imm16p=0x1 & imm16 & imm16pv { local addr:3 = 0; pageCAddr(addr,14,0x3D,imm16pv); export addr; } opr16a: imm16 is imm16p=0x2 & imm16 & imm16pv { local addr:3 = 0; pageCAddr(addr,14,0x3D,imm16pv); export addr; } opr16a: imm16 is imm16p=0x3 & imm16 & imm16pv { local addr:3 = 0; pageCAddr(addr,14,0x3D,imm16pv); export addr; } opr16a: imm16 is imm16p=0x4 & imm16 & imm16pv { local addr:3 = 0; pageCAddr(addr,14,0x3E,imm16pv); export addr; } opr16a: imm16 is imm16p=0x5 & imm16 & imm16pv { local addr:3 = 0; pageCAddr(addr,14,0x3E,imm16pv); export addr; } opr16a: imm16 is imm16p=0x6 & imm16 & imm16pv { local addr:3 = 0; pageCAddr(addr,14,0x3E,imm16pv); export addr; } opr16a: imm16 is imm16p=0x7 & imm16 & imm16pv { local addr:3 = 0; pageCAddr(addr,14,0x3E,imm16pv); export addr; } opr16a: imm16 is imm16p=0x8 & imm16 & imm16pv { local addr:3 = 0; pagePAddr(addr,14,PPAGE,imm16pv & 0x3fff); export addr; } opr16a: imm16 is imm16p=0x9 & imm16 & imm16pv { local addr:3 = 0; pagePAddr(addr,14,PPAGE,imm16pv & 0x3fff); export addr; } opr16a: imm16 is imm16p=0xa & imm16 & imm16pv { local addr:3 = 0; pagePAddr(addr,14,PPAGE,imm16pv & 0x3fff); export addr; } opr16a: imm16 is imm16p=0xb & imm16 & imm16pv { local addr:3 = 0; pagePAddr(addr,14,PPAGE,imm16pv & 0x3fff); export addr; } opr16a: imm16 is imm16p=0xc & imm16 & imm16pv { local addr:3 = 0; pageCAddr(addr,14,0x3F,imm16pv); export addr; } opr16a: imm16 is imm16p=0xd & imm16 & imm16pv { local addr:3 = 0; pageCAddr(addr,14,0x3F,imm16pv); export addr; } opr16a: imm16 is imm16p=0xe & imm16 & imm16pv { local addr:3 = 0; pageCAddr(addr,14,0x3F,imm16pv); export addr; } opr16a: imm16 is imm16p=0xf & imm16 & imm16pv { local addr:3 = 0; pageCAddr(addr,14,0x3F,imm16pv); export addr; } page: imm8 is imm8 { export *[const]:1 imm8; } PageDest: opr16a is opr16a; page { export opr16a; } @elif SIZE=="2" opr16a: imm16 is imm16 { local addr:$(SIZE) = imm16; export addr; } @endif opr8a: imm8 is imm8 { local addr:$(SIZE) = imm8; export addr; } opr8a_8: imm8 is UseGPAGE=0 & imm8 { export *:1 imm8; } opr8a_16: imm8 is UseGPAGE=0 & imm8 { export *:2 imm8; } opr16a_8: opr16a is opr16a { export *:1 opr16a; } opr16a_16: opr16a is opr16a { export *:2 opr16a; } iopr8i: "#"imm8 is imm8 { export *[const]:1 imm8; } iopr16i: "#"imm16 is imm16 { export *[const]:2 imm16; } msk8: imm8 is imm8 { export *[const]:1 imm8; } @endif # # Postbyte Code (xb) address decoding # for indexed addressing modes: IDX, IDX1, IDX2, [D,IDX] and [IDX2] # # # per page 29, 30, and 33 # #********** # IDX #********** # # rr0nnnnn # # 5-bit constant offset n = -16 to +15 IDX_a: 0,rr7_6 is rr7_6 & xb5_5=0 & nn4_0=0 { export rr7_6; } # TODO make sure that nn4_0 is a signed extension IDX_b: nn4_0, rr7_6 is rr7_6 & xb5_5=0 & nn4_0 { address:2 = rr7_6 + nn4_0; export address; } # rr1pnnnn # # used xb7_5 in all of these to prevent rr7_6a from ever being 0b11, and overlapping the 111rr0zs decodes # # Auto pre-decrement IDX_c: nn3_0, -rr7_6a is (xb7_5=0b001 | xb7_5=0b011 | xb7_5=0b101) & rr7_6a & p4_4=0 & decrement3_3=1 & nn3_0 { rr7_6a = rr7_6a + nn3_0; address:2 = rr7_6a; export address; } # Auto pre-increment IDX_d: nn3_0, +rr7_6a is (xb7_5=0b001 | xb7_5=0b011 | xb7_5=0b101) & rr7_6a & p4_4=0 & decrement3_3=0 & nn3_0 { rr7_6a = rr7_6a + nn3_0; address:2 = rr7_6a; export address; } # Auto post-decrement IDX_e: nn3_0, rr7_6a- is (xb7_5=0b001 | xb7_5=0b011 | xb7_5=0b101) & rr7_6a & p4_4=1 & decrement3_3=1 & nn3_0 { address:2 = rr7_6a; rr7_6a = rr7_6a + nn3_0; export address; } # Auto post-increment IDX_f: nn3_0, rr7_6a+ is (xb7_5=0b001 | xb7_5=0b011 | xb7_5=0b101) & rr7_6a & p4_4=1 & decrement3_3=0 & nn3_0 { address:2 = rr7_6a; rr7_6a = rr7_6a + nn3_0; export address; } # 111rr1aa # # Accumulator unsigned 8-bit offset indexed IDX_g: aa0_0, rr4_3 is xb7_5=0b111 & rr4_3 & xb2_1=0b10 & aa0_0 { address:2 = rr4_3 + zext(aa0_0); export address; } IDX_g: aa0_0, PC is xb7_5=0b111 & rr4_3=3 & xb2_1=0b10 & aa0_0 & PC { address:2 = inst_next + zext(aa0_0); export address; } # Accumulator 16-bit offset indexed IDX_h: D, rr4_3 is xb7_5=0b111 & rr4_3 & xb2_0=0b110 & D { address:2 = rr4_3 + D; export address; } IDX_h: D, PC is xb7_5=0b111 & rr4_3=3 & xb2_0=0b110 & D & PC { address:2 = inst_next + D; export address; } #********** # IDX1 #********** # # 111rr0zs imm8 # # Constant offset (9-bit signed) IDX_i: opr9, rr4_3 is xb7_5=0b111 & rr4_3 & xb2_2=0 & z1_1=0 & ss0_0 ; imm8 [ opr9 = (ss0_0 << 8) | imm8; ] { address:2 = rr4_3 + opr9; export address; } IDX_i_PCRel: target is ss0_0 ; imm8 [ target = inst_next + (ss0_0 << 8) | imm8; ] { export *[const]:2 target; } IDX_i: opr9, PC is (xb7_5=0b111 & rr4_3=3 & xb2_2=0 & z1_1=0 & ss0_0 ; imm8) & IDX_i_PCRel & PC [ opr9 = (ss0_0 << 8) | imm8; ] { export IDX_i_PCRel; } #********** # IDX2 #********** # # 111rr0zs simm16 # # Constant offset (16-bit signed) IDX_k: simm16, rr4_3 is xb7_5=0b111 & rr4_3 & xb2_2=0 & z1_1=1 & s0_0=0; simm16 { address:2 = rr4_3 + simm16; export address; } IDX_k_PCRel: target is simm16 [ target = inst_next + simm16; ] { export *[const]:2 target; } IDX_k: simm16, PC is xb7_5=0b111 & rr4_3=3 & xb2_2=0 & z1_1=1 & s0_0=0; simm16 & PC & IDX_k_PCRel { export IDX_k_PCRel; } #********** # [IDX2] #********** # # 111rr011 simm16 # # 16-bit offset indexed-indirect IDX_l: [simm16, rr4_3] is xb7_5=0b111 & rr4_3 & xb2_2=0 & z1_1=1 & s0_0=1; simm16 { address:2 = rr4_3 + simm16; Load2(address,address); export address; } IDX_l_PCRel: target is simm16 [ target = inst_next + simm16; ] { export *[const]:2 target; } IDX_l: [simm16, PC] is xb7_5=0b111 & rr4_3=3 & xb2_2=0 & z1_1=1 & s0_0=1; simm16 & PC & IDX_l_PCRel { address:2 = IDX_l_PCRel; Load2(address,address); export address; } #********** # [D,IDX] #********** # # 111rr111 # # Accumulator D offset indexed-indirect IDX_m: [D, rr4_3] is xb7_5=0b111 & rr4_3 & xb2_0=0b111 & D { address:2 = rr4_3 + D; Load2(address,address); export address; } IDX_m: [D, PC] is xb7_5=0b111 & rr4_3=3 & xb2_0=0b111 & D & PC { address:2 = inst_next + D; Load2(address,address); export address; } ###################################################################### ###################################################################### # # effective address of IDX, IDX1, IDX2 # indexed3: IDX_a is IDX_a { export IDX_a; } indexed3: IDX_b is IDX_b { export IDX_b; } indexed3: IDX_c is IDX_c { export IDX_c; } indexed3: IDX_d is IDX_d { export IDX_d; } indexed3: IDX_e is IDX_e { export IDX_e; } indexed3: IDX_f is IDX_f { export IDX_f; } indexed3: IDX_g is IDX_g { export IDX_g; } indexed3: IDX_h is IDX_h { export IDX_h; } indexed3: IDX_i is IDX_i { export IDX_i; } indexed3: IDX_k is IDX_k { export IDX_k; } # not indexed3: IDX_l is IDX_l { export IDX_l; } # not indexed3: IDX_m is IDX_m { export IDX_m; } # # effective address of [IDX2], [D,IDX] # # not indexedindexed3: IDX_a is IDX_a { export IDX_a; } # not indexedindexed3: IDX_b is IDX_b { export IDX_b; } # not indexedindexed3: IDX_c is IDX_c { export IDX_c; } # not indexedindexed3: IDX_d is IDX_d { export IDX_d; } # not indexedindexed3: IDX_e is IDX_e { export IDX_e; } # not indexedindexed3: IDX_f is IDX_f { export IDX_f; } # not indexedindexed3: IDX_g is IDX_g { export IDX_g; } # not indexedindexed3: IDX_h is IDX_h { export IDX_h; } # not indexedindexed3: IDX_i is IDX_i { export IDX_i; } # not indexedindexed3: IDX_k is IDX_k { export IDX_k; } @ifdef HCS12 indexed2: IDX_l is IDX_l { export IDX_l; } indexed2: IDX_m is IDX_m { export IDX_m; } @endif # # effective address of IDX, IDX1, IDX2, [IDX2], [D,IDX] # indexed5: IDX_a is IDX_a { export IDX_a; } indexed5: IDX_b is IDX_b { export IDX_b; } indexed5: IDX_c is IDX_c { export IDX_c; } indexed5: IDX_d is IDX_d { export IDX_d; } indexed5: IDX_e is IDX_e { export IDX_e; } indexed5: IDX_f is IDX_f { export IDX_f; } indexed5: IDX_g is IDX_g { export IDX_g; } indexed5: IDX_h is IDX_h { export IDX_h; } indexed5: IDX_i is IDX_i { export IDX_i; } indexed5: IDX_k is IDX_k { export IDX_k; } indexed5: IDX_l is IDX_l { export IDX_l; } indexed5: IDX_m is IDX_m { export IDX_m; } # # effective address of IDX # indexed1: IDX_a is IDX_a { export IDX_a; } indexed1: IDX_b is IDX_b { export IDX_b; } indexed1: IDX_c is IDX_c { export IDX_c; } indexed1: IDX_d is IDX_d { export IDX_d; } indexed1: IDX_e is IDX_e { export IDX_e; } indexed1: IDX_f is IDX_f { export IDX_f; } indexed1: IDX_g is IDX_g { export IDX_g; } indexed1: IDX_h is IDX_h { export IDX_h; } # not indexed1: IDX_i is IDX_i { export IDX_i; } # not indexed1: IDX_k is IDX_k { export IDX_k; } # not indexed1: iIDX_l is iIDX_l { export iIDX_l; } # not indexed1: IDX_m is IDX_m { export IDX_m; } # # indexed0_x - exports the effective address (EA) # indexed1_x - exports a single byte at the effective address # indexed2_x - exports a double byte at the effective address # @ifndef HCS12X indexed1_1: indexed1 is indexed1 { local paddr:$(SIZE) = 0; GetPagedAddr(indexed1,paddr); export *:1 paddr; } @endif @ifdef HCS12 indexed2_1: indexed1 is indexed1 { local paddr:$(SIZE) = 0; GetPagedAddr(indexed1,paddr); export *:2 paddr; } @else indexed2_1: indexed1 is indexed1 { export *:2 indexed1; } @endif @ifdef HCS12 indexed0_2: indexed2 is indexed2 { local val = indexed2; export val; } @endif indexed0_3: indexed3 is indexed3 { local val = indexed3; export val; } @ifdef HCS12 indexed1_3: indexed3 is indexed3 { local paddr:$(SIZE) = 0; GetPagedAddr(indexed3,paddr); export *:1 paddr; } @else indexed1_3: indexed3 is indexed3 { export *:1 indexed3; } @endif @ifdef HCS12 indexed2_3: indexed3 is indexed3 { local paddr:$(SIZE) = 0; GetPagedAddr(indexed3,paddr); export *:2 paddr; } @endif @ifdef HCS12 indexedA_5: indexed5 is UseGPAGE=0 & indexed5 { local paddr:$(SIZE) = 0; GetPagedAddr(indexed5,paddr); export paddr; } @else indexedA_5: indexed5 is UseGPAGE=0 & indexed5 { export indexed5; } @endif @ifdef HCS12X indexedA_5: indexed5 is UseGPAGE=1 & indexed5 { local addr:$(SIZE) = (zext(GPAGE) << 16) | zext(indexed5); export addr; } @endif @ifdef HCS12 indexed1_5: indexed5 is UseGPAGE=0 & indexed5 { local paddr:$(SIZE) = 0; GetPagedAddr(indexed5,paddr); export *:1 paddr; } @else indexed1_5: indexed5 is UseGPAGE=0 & indexed5 { export *:1 indexed5; } @endif @ifdef HCS12X indexed1_5: indexed5 is UseGPAGE=1 & indexed5 { local addr:$(SIZE) = (zext(GPAGE) << 16) | zext(indexed5); export *:1 addr; } @endif @ifdef HCS12 indexed2_5: indexed5 is UseGPAGE=0 & indexed5 { local paddr:$(SIZE) = 0; GetPagedAddr(indexed5,paddr); export *:2 paddr; } @else indexed2_5: indexed5 is UseGPAGE=0 & indexed5 { export indexed5; } @endif @ifdef HCS12X indexed2_5: indexed5 is UseGPAGE=1 & indexed5 { local addr:$(SIZE) = (zext(GPAGE) << 16) | zext(indexed5); export *:2 addr; } @endif # range -128 through +127 rel8: reloc is rel [ reloc = inst_next + rel; ] { export *:1 reloc; } # range -256 through +255 rel9: reloc is sign12_12 & rr7_0 [ reloc = inst_next + ((sign12_12 << 8) | rr7_0); ] { export *:1 reloc; } # positive range 0 through +65535 rel16: reloc is simm16 [ reloc = inst_next + simm16; ] { export *:1 reloc; } op2_opr16a_8: opr16a_8 is opr16a_8 { export opr16a_8; } op2_opr16a_16: opr16a_16 is opr16a_16 { export opr16a_16; } @if defined(HCS12X) op2_indexed1_5: indexed1_5 is indexed1_5 { export indexed1_5; } @else op2_indexed1_1: indexed1_1 is indexed1_1 { export indexed1_1; } @endif @if defined(HCS12X) op2_indexed2_5: indexed2_5 is indexed2_5 { export indexed2_5; } @else op2_indexed2_1: indexed2_1 is indexed2_1 { export indexed2_1; } @endif ################################################################ # Macros ################################################################ macro additionWithCarry(operand1, operand2, result) { local Ccopy = zext($(C)); local AFmask = -1 >> 4; $(H) = (((operand1 & AFmask) + (operand2 & AFmask) + Ccopy) & (AFmask + 1)) != 0; $(C) = carry(operand1, operand2); local tempResult = operand1 + operand2; $(C) = $(C) || carry(tempResult, Ccopy); $(V) = $(V) ^^ scarry(tempResult, Ccopy); result = tempResult + Ccopy; $(N) = (result s< 0); $(Z) = (result == 0); } macro addition_flags1(operand1, operand2,result) { local AFmask = -1 >> 4; $(H) = (((operand1 & AFmask) + (operand2 & AFmask)) & (AFmask + 1)) != 0; $(N) = (result s< 0); $(Z) = (result == 0); $(V) = scarry(operand1,operand2); $(C) = carry(operand1,operand2); } macro addition_flags2(operand1, operand2, result) { $(N) = (result s< 0); $(Z) = (result == 0); $(V) = scarry(operand1,operand2); $(C) = carry(operand1,operand2); } macro subtraction_flags1(register, operand, result) { $(V) = sborrow(register,operand); $(N) = (result s< 0); $(Z) = (result == 0); $(C) = register < operand; } macro subtraction_flags2(register, operand, result) { $(V) = sborrow(register,operand); $(N) = (result s< 0); $(Z) = (result == 0); $(C) = register < operand; } macro V_equals_0() { $(V) = 0; } macro V_equals_C() { $(V) = $(C); } macro V_equals_N_xor_C() { $(V) = $(N) ^ $(C); } macro V_CMP_flag(register, operand) { $(V) = sborrow(register,operand); } macro V_DEC_flag(operand) { $(V) = sborrow(operand,1); } macro V_DEC_flag2(operand) { $(V) = sborrow(operand,1); } macro V_INC_flag(operand) { $(V) = scarry(operand,1); } macro V_INC_flag2(operand) { $(V) = scarry(operand,1); } macro V_NEG_flag(operand) { $(V) = sborrow(0,operand); } macro V_NEG_flag2(operand) { $(V) = sborrow(0,operand); } macro Pull1(operand) { @if SIZE=="3" setHCSphysPage(SP); paddr:$(SIZE) = segment(physPage,SP); @else paddr:$(SIZE) = SP; @endif operand = *paddr; SP = SP + 1; } macro Pull2(operand) { @if SIZE=="3" setHCSphysPage(SP); paddr:$(SIZE) = segment(physPage,SP); @else paddr:$(SIZE) = SP; @endif operand = *paddr; SP = SP + 2; } macro Push1(operand) { SP = SP - 1; @if SIZE=="3" setHCSphysPage(SP); paddr:$(SIZE) = segment(physPage,SP); @else paddr:$(SIZE) = SP; @endif *paddr = operand; } macro Push2(operand) { SP = SP - 2; @if SIZE=="3" setHCSphysPage(SP); paddr:$(SIZE) = segment(physPage,SP); @else paddr:$(SIZE) = SP; @endif *paddr = operand; } macro setCCR( operand ) { # when CCR is the destination, cannot set the X bit unless it is already set in CCR CCR = operand & (CCR | 0b10111111); } macro setCCRW( operand ) { # when CCRW is the destination, cannot set the X bit unless it is already set in CCRW CCRW = operand & (CCRW | 0b1111111110111111); } ################################################################ # Constructors ################################################################ with : XGATE=0 { @ifdef HCS12X :^instruction is op8=0x18; instruction [ Prefix18=1; ] {} @else :^instruction is op8=0x18; instruction [ Prefix18=1; ] {} @endif :ABA is (Prefix18=1 & op8=0x06) { result:1 = A + B; addition_flags1(A, B, result); A = result; } :ABX is Prefix18=0 & (op16=0x1AE5) { IX = zext(B) + IX; } :ABY is Prefix18=0 & (op16=0x19ED) { IY = zext(B) + IY; } :ADCA iopr8i is Prefix18=0 & (op8=0x89); iopr8i { op1:1 = iopr8i; local result:1; additionWithCarry(A, op1, result); A = result; } :ADCA opr8a_8 is Prefix18=0 & (op8=0x99); opr8a_8 { op1:1 = opr8a_8; local result:1; additionWithCarry(A, op1, result); A = result; } :ADCA opr16a_8 is Prefix18=0 & (op8=0xB9); opr16a_8 { op1:1 = opr16a_8; local result:1; additionWithCarry(A, op1, result); A = result; } :ADCA indexed1_5 is Prefix18=0 & (op8=0xA9); indexed1_5 { op1:1 = indexed1_5; result:1 = A + op1 + $(C); addition_flags1(A, op1, result); A = result; } :ADCB iopr8i is Prefix18=0 & (op8=0xC9); iopr8i { op1:1 = iopr8i; local result:1; additionWithCarry(A, op1, result); B = result; } :ADCB opr8a_8 is Prefix18=0 & (op8=0xD9); opr8a_8 { op1:1 = opr8a_8; result:1 = B + op1 + $(C); addition_flags1(B, op1, result); B = result; } :ADCB opr16a_8 is Prefix18=0 & (op8=0xF9); opr16a_8 { op1:1 = opr16a_8; local result:1; additionWithCarry(B, op1, result); B = result; } :ADCB indexed1_5 is Prefix18=0 & (op8=0xE9); indexed1_5 { op1:1 = indexed1_5; local result:1; additionWithCarry(B, op1, result); B = result; } :ADDA iopr8i is Prefix18=0 & (op8=0x8B); iopr8i { op1:1 = iopr8i; result:1 = A + op1; addition_flags1(A, op1, result); A = result; } :ADDA opr8a_8 is Prefix18=0 & (op8=0x9B); opr8a_8 { op1:1 = opr8a_8; result:1 = A + op1; addition_flags1(A, op1, result); A = result; } :ADDA opr16a_8 is Prefix18=0 & (op8=0xBB); opr16a_8 { op1:1 = opr16a_8; result:1 = A + op1; addition_flags1(A, op1, result); A = result; } :ADDA indexed1_5 is Prefix18=0 & (op8=0xAB); indexed1_5 { op1:1 = indexed1_5; result:1 = A + op1; addition_flags1(A, op1, result); A = result; } :ADDB iopr8i is Prefix18=0 & (op8=0xCB); iopr8i { op1:1 = iopr8i; result:1 = B + op1; addition_flags1(B, op1, result); B = result; } :ADDB opr8a_8 is Prefix18=0 & (op8=0xDB); opr8a_8 { op1:1 = opr8a_8; result:1 = B + op1; addition_flags1(B, op1, result); B = result; } :ADDB opr16a_8 is Prefix18=0 & (op8=0xFB); opr16a_8 { op1:1 = opr16a_8; result:1 = B + op1; addition_flags1(B, op1, result); B = result; } :ADDB indexed1_5 is Prefix18=0 & (op8=0xEB); indexed1_5 { op1:1 = indexed1_5; result:1 = B + op1; addition_flags1(B, op1, result); B = result; } :ADDD iopr16i is Prefix18=0 & (op8=0xC3); iopr16i { op1:2 = iopr16i; result:2 = D + op1; addition_flags2(D, op1, result); D = result; } :ADDD opr8a_16 is Prefix18=0 & (op8=0xD3); opr8a_16 { op1:2 = opr8a_16; result:2 = D + op1; addition_flags2(D, op1, result); D = result; } :ADDD opr16a_16 is Prefix18=0 & (op8=0xF3); opr16a_16 { op1:2 = opr16a_16; result:2 = D + op1; addition_flags2(D, op1, result); D = result; } :ADDD indexed2_5 is Prefix18=0 & (op8=0xE3); indexed2_5 { op1:2 = indexed2_5; result:2 = D + op1; addition_flags2(D, op1, result); D = result; } @if defined(HCS12X) :ADDX iopr16i is Prefix18=1 & (op8=0x8B); iopr16i { op1:2 = iopr16i; result:2 = IX + op1; addition_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :ADDX opr8a_16 is Prefix18=1 & (op8=0x9B); opr8a_16 { op1:2 = opr8a_16; result:2 = IX + op1; addition_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :ADDX opr16a_16 is Prefix18=1 & (op8=0xBB); opr16a_16 { op1:2 = opr16a_16; result:2 = IX + op1; addition_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :ADDX indexed2_5 is Prefix18=1 & (op8=0xAB); indexed2_5 { op1:2 = indexed2_5; result:2 = IX + op1; addition_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :ADDY iopr16i is Prefix18=1 & (op8=0xCB); iopr16i { op1:2 = iopr16i; result:2 = IY + op1; addition_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :ADDY opr8a_16 is Prefix18=1 & (op8=0xDB); opr8a_16 { op1:2 = opr8a_16; result:2 = IY + op1; addition_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :ADDY opr16a_16 is Prefix18=1 & (op8=0xFB); opr16a_16 { op1:2 = opr16a_16; result:2 = IY + op1; addition_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :ADDY indexed2_5 is Prefix18=1 & (op8=0xEB); indexed2_5 { op1:2 = indexed2_5; result:2 = IY + op1; addition_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :ADED iopr16i is Prefix18=1 & (op8=0xC3); iopr16i { op1:2 = iopr16i; result:2 = D + op1 + zext($(C)); addition_flags2(D, op1, result); D = result; } @endif @if defined(HCS12X) :ADED opr8a_16 is Prefix18=1 & (op8=0xD3); opr8a_16 { op1:2 = opr8a_16; result:2 = D + op1 + zext($(C)); addition_flags2(D, op1, result); D = result; } @endif @if defined(HCS12X) :ADED opr16a_16 is Prefix18=1 & (op8=0xF3); opr16a_16 { op1:2 = opr16a_16; result:2 = D + op1 + zext($(C)); addition_flags2(D, op1, result); D = result; } @endif @if defined(HCS12X) :ADED indexed2_5 is Prefix18=1 & (op8=0xE3); indexed2_5 { op1:2 = indexed2_5; result:2 = D + op1 + zext($(C)); addition_flags2(D, op1, result); D = result; } @endif @if defined(HCS12X) :ADEX iopr16i is Prefix18=1 & (op8=0x89); iopr16i { op1:2 = iopr16i; result:2 = IX + op1 + zext($(C)); addition_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :ADEX opr8a_16 is Prefix18=1 & (op8=0x99); opr8a_16 { op1:2 = opr8a_16; result:2 = IX + op1 + zext($(C)); addition_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :ADEX opr16a_16 is Prefix18=1 & (op8=0xb9); opr16a_16 { op1:2 = opr16a_16; result:2 = IX + op1 + zext($(C)); addition_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :ADEX indexed2_5 is Prefix18=1 & (op8=0xa9); indexed2_5 { op1:2 = indexed2_5; result:2 = IX + op1 + zext($(C)); addition_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :ADEY iopr16i is Prefix18=1 & (op8=0xC9); iopr16i { op1:2 = iopr16i; result:2 = IY + op1 + zext($(C)); addition_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :ADEY opr8a_16 is Prefix18=1 & (op8=0xD9); opr8a_16 { op1:2 = opr8a_16; result:2 = IY + op1 + zext($(C)); addition_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :ADEY opr16a_16 is Prefix18=1 & (op8=0xF9); opr16a_16 { op1:2 = opr16a_16; result:2 = IY + op1 + zext($(C)); addition_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :ADEY indexed2_5 is Prefix18=1 & (op8=0xE9); indexed2_5 { op1:2 = indexed2_5; result:2 = IY + op1 + zext($(C)); addition_flags2(IY, op1, result); IY = result; } @endif :ANDA iopr8i is Prefix18=0 & (op8=0x84); iopr8i { A = A & iopr8i; V_equals_0(); $(Z) = (A == 0); $(N) = (A s< 0); } :ANDA opr8a_8 is Prefix18=0 & (op8=0x94); opr8a_8 { A = A & opr8a_8; V_equals_0(); $(Z) = (A == 0); $(N) = (A s< 0); } :ANDA opr16a_8 is Prefix18=0 & (op8=0xB4); opr16a_8 { A = A & opr16a_8; V_equals_0(); $(Z) = (A == 0); $(N) = (A s< 0); } :ANDA indexed1_5 is Prefix18=0 & (op8=0xA4); indexed1_5 { A = A & indexed1_5; V_equals_0(); $(Z) = (A == 0); $(N) = (A s< 0); } :ANDB iopr8i is Prefix18=0 & (op8=0xC4); iopr8i { B = B & iopr8i; V_equals_0(); $(Z) = (B == 0); $(N) = (B s< 0); } :ANDB opr8a_8 is Prefix18=0 & (op8=0xD4); opr8a_8 { B = B & opr8a_8; V_equals_0(); $(Z) = (B == 0); $(N) = (B s< 0); } :ANDB opr16a_8 is Prefix18=0 & (op8=0xF4); opr16a_8 { B = B & opr16a_8; V_equals_0(); $(Z) = (B == 0); $(N) = (B s< 0); } :ANDB indexed1_5 is Prefix18=0 & (op8=0xE4); indexed1_5 { B = B & indexed1_5; V_equals_0(); $(Z) = (B == 0); $(N) = (B s< 0); } :ANDCC iopr8i is Prefix18=0 & (op8=0x10); iopr8i { CCR = CCR & iopr8i; } @if defined(HCS12X) :ANDX iopr16i is Prefix18=1 & (op8=0x84); iopr16i { IX = IX & iopr16i; V_equals_0(); $(Z) = (IX == 0); $(N) = (IX s< 0); } @endif @if defined(HCS12X) :ANDX opr8a_16 is Prefix18=1 & (op8=0x94); opr8a_16 { IX = IX & opr8a_16; V_equals_0(); $(Z) = (IX == 0); $(N) = (IX s< 0); } @endif @if defined(HCS12X) :ANDX opr16a_16 is Prefix18=1 & (op8=0xB4); opr16a_16 { IX = IX & opr16a_16; V_equals_0(); $(Z) = (IX == 0); $(N) = (IX s< 0); } @endif @if defined(HCS12X) :ANDX indexed2_5 is Prefix18=1 & (op8=0xA4); indexed2_5 { IX = IX & indexed2_5; V_equals_0(); $(Z) = (IX == 0); $(N) = (IX s< 0); } @endif @if defined(HCS12X) :ANDY iopr16i is Prefix18=1 & (op8=0xC4); iopr16i { IY = IY & iopr16i; V_equals_0(); $(Z) = (IY == 0); $(N) = (IY s< 0); } @endif @if defined(HCS12X) :ANDY opr8a_16 is Prefix18=1 & (op8=0xD4); opr8a_16 { IY = IY & opr8a_16; V_equals_0(); $(Z) = (IY == 0); $(N) = (IY s< 0); } @endif @if defined(HCS12X) :ANDY opr16a_16 is Prefix18=1 & (op8=0xF4); opr16a_16 { IY = IY & opr16a_16; V_equals_0(); $(Z) = (IY == 0); $(N) = (IY s< 0); } @endif @if defined(HCS12X) :ANDY indexed2_5 is Prefix18=1 & (op8=0xE4); indexed2_5 { IY = IY & indexed2_5; V_equals_0(); $(Z) = (IY == 0); $(N) = (IY s< 0); } @endif :ASL opr16a_8 is Prefix18=0 & (op8=0x78); opr16a_8 { tmp:1 = opr16a_8; $(C) = tmp[7,1]; tmp = tmp << 1; opr16a_8 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } :ASL indexed1_5 is Prefix18=0 & (op8=0x68); indexed1_5 { tmp:1 = indexed1_5; $(C) = tmp[7,1]; tmp = tmp << 1; indexed1_5 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } :ASLA is Prefix18=0 & op8=0x48 { $(C) = A[7,1]; A = A << 1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_N_xor_C(); } :ASLB is Prefix18=0 & op8=0x58 { $(C) = B[7,1]; B = B << 1; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_N_xor_C(); } :ASLD is Prefix18=0 & op8=0x59 { $(C) = D[15,1]; D = D << 1; $(Z) = (D == 0); $(N) = (D s< 0); V_equals_N_xor_C(); } @if defined(HCS12X) :ASLW opr16a_16 is Prefix18=1 & (op8=0x78); opr16a_16 { local tmp = opr16a_16; $(C) = tmp[15,1]; tmp = tmp << 1; opr16a_16 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :ASLW indexed2_5 is Prefix18=1 & (op8=0x68); indexed2_5 { local tmp = indexed2_5; $(C) = tmp[15,1]; tmp = tmp << 1; indexed2_5 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :ASLX is Prefix18=1 & op8=0x48 { $(C) = IX[15,1]; IX = IX << 1; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :ASLY is Prefix18=1 & op8=0x58 { $(C) = IY[15,1]; IY = IY << 1; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_N_xor_C(); } @endif :ASR opr16a_8 is Prefix18=0 & (op8=0x77); opr16a_8 { tmp:1 = opr16a_8; $(C) = tmp[0,1]; tmp = tmp s>> 1; opr16a_8 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } :ASR indexed1_5 is Prefix18=0 & (op8=0x67); indexed1_5 { tmp:1 = indexed1_5; $(C) = tmp[0,1]; tmp = tmp s>> 1; indexed1_5 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } :ASRA is Prefix18=0 & op8=0x47 { $(C) = A[0,1]; A = A s>> 1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_N_xor_C(); } :ASRB is Prefix18=0 & op8=0x57 { $(C) = B[0,1]; B = B s>> 1; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_N_xor_C(); } @if defined(HCS12X) :ASRW opr16a_16 is Prefix18=1 & (op8=0x77); opr16a_16 { local tmp = opr16a_16; $(C) = tmp[0,1]; tmp = tmp s>> 1; opr16a_16 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :ASRW indexed2_5 is Prefix18=1 & (op8=0x67); indexed2_5 { local tmp = indexed2_5; $(C) = tmp[0,1]; tmp = tmp s>> 1; indexed2_5 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :ASRX is Prefix18=1 & op8=0x47 { $(C) = IX[0,1]; IX = IX s>> 1; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :ASRY is Prefix18=1 & op8=0x57 { $(C) = IY[0,1]; IY = IY s>> 1; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_N_xor_C(); } @endif :BCC rel8 is Prefix18=0 & op8=0x24; rel8 { if ($(C) == 0) goto rel8; } :BCLR opr8a_8, msk8 is Prefix18=0 & (op8=0x4D); opr8a_8; msk8 { op1:1 = opr8a_8; op1 = op1 & ~msk8; opr8a_8 = op1; $(N) = (op1 s< 0); $(Z) = (op1 == 0); V_equals_0(); } :BCLR opr16a_8, msk8 is Prefix18=0 & (op8=0x1D); opr16a_8; msk8 { op1:1 = opr16a_8; op1 = op1 & ~msk8; opr16a_8 = op1; $(N) = (op1 s< 0); $(Z) = (op1 == 0); V_equals_0(); } :BCLR indexed1_3, msk8 is Prefix18=0 & (op8=0x0D); indexed1_3; msk8 { op1:1 = indexed1_3; op1 = op1 & ~msk8; indexed1_3 = op1; $(N) = (op1 s< 0); $(Z) = (op1 == 0); V_equals_0(); } :BCS rel8 is Prefix18=0 & op8=0x25; rel8 { if ($(C) == 1) goto rel8; } :BEQ rel8 is Prefix18=0 & op8=0x27; rel8 { if ($(Z) == 1) goto rel8; } :BGE rel8 is Prefix18=0 & op8=0x2C; rel8 { if (($(N) ^ $(V)) == 1) goto rel8; } @if defined(HCS12) :BGND is Prefix18=0 & op8=0x00 { BDM_return:$(SIZE) = inst_next; # this could return BDM location, or BDM_return PCE = backgroundDebugMode(BDM_return); goto [PCE]; } @endif :BGT rel8 is Prefix18=0 & op8=0x2E; rel8 { if (($(Z) | ($(N) ^ $(V))) == 0) goto rel8; } :BHI rel8 is Prefix18=0 & op8=0x22; rel8 { if (($(C) | $(Z)) == 0) goto rel8; } #:BHS rel8 is op8=0x24; rel8 See BCC :BITA iopr8i is Prefix18=0 & (op8=0x85); iopr8i { result:1 = A & iopr8i; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } :BITA opr8a_8 is Prefix18=0 & (op8=0x95); opr8a_8 { result:1 = A & opr8a_8; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } :BITA opr16a_8 is Prefix18=0 & (op8=0xB5); opr16a_8 { result:1 = A & opr16a_8; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } :BITA indexed1_5 is Prefix18=0 & (op8=0xA5); indexed1_5 { result:1 = A & indexed1_5; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } :BITB iopr8i is Prefix18=0 & (op8=0xC5); iopr8i { result:1 = B & iopr8i; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } :BITB opr8a_8 is Prefix18=0 & (op8=0xD5); opr8a_8 { result:1 = B & opr8a_8; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } :BITB opr16a_8 is Prefix18=0 & (op8=0xF5); opr16a_8 { result:1 = B & opr16a_8; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } :BITB indexed1_5 is Prefix18=0 & (op8=0xE5); indexed1_5 { result:1 = B & indexed1_5; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } @if defined(HCS12X) :BITX iopr16i is Prefix18=1 & (op8=0x85); iopr16i { local result = IX & iopr16i; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } @endif @if defined(HCS12X) :BITX opr8a_16 is Prefix18=1 & (op8=0x95); opr8a_16 { local result = IX & opr8a_16; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } @endif @if defined(HCS12X) :BITX opr16a_16 is Prefix18=1 & (op8=0xB5); opr16a_16 { local result = IX & opr16a_16; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } @endif @if defined(HCS12X) :BITX indexed2_5 is Prefix18=1 & (op8=0xA5); indexed2_5 { local result = IX & indexed2_5; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } @endif @if defined(HCS12X) :BITY iopr16i is Prefix18=1 & (op8=0xC5); iopr16i { local result = IY & iopr16i; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } @endif @if defined(HCS12X) :BITY opr8a_16 is Prefix18=1 & (op8=0xD5); opr8a_16 { local result = IY & opr8a_16; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } @endif @if defined(HCS12X) :BITY opr16a_16 is Prefix18=1 & (op8=0xF5); opr16a_16 { local result = IY & opr16a_16; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } @endif @if defined(HCS12X) :BITY indexed2_5 is Prefix18=1 & (op8=0xE5); indexed2_5 { local result = IY & indexed2_5; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_0(); } @endif :BLE rel8 is Prefix18=0 & op8=0x2F; rel8 { if ($(Z) | ($(N) ^ $(V))) goto rel8; } #:BLO rel8 is op8=0x25; rel8 see BCS :BLS rel8 is Prefix18=0 & op8=0x23; rel8 { if (($(C) | $(Z)) == 1) goto rel8; } :BLT rel8 is Prefix18=0 & op8=0x2D; rel8 { if (($(N) ^ $(V)) ==1) goto rel8; } :BMI rel8 is Prefix18=0 & op8=0x2B; rel8 { if ($(N) == 1) goto rel8; } :BNE rel8 is Prefix18=0 & op8=0x26; rel8 { if ($(Z) == 0) goto rel8; } :BPL rel8 is Prefix18=0 & op8=0x2A; rel8 { if ($(N) == 0) goto rel8; } :BRA rel8 is Prefix18=0 & op8=0x20; rel8 { goto rel8; } :BRCLR opr8a_8, msk8, rel8 is Prefix18=0 & op8=0x4F; opr8a_8; msk8; rel8 { result:1 = opr8a_8 & msk8; if (result == 0) goto rel8; } :BRCLR opr16a_8, msk8, rel8 is Prefix18=0 & op8=0x1F; opr16a_8; msk8; rel8 { result:1 = opr16a_8 & msk8; if (result == 0) goto rel8; } :BRCLR indexed1_3, msk8, rel8 is Prefix18=0 & op8=0x0F; indexed1_3; msk8; rel8 { result:1 = indexed1_3 & msk8; if (result == 0) goto rel8; } # branch never is a two-byte nop SkipNextInstr: dest is epsilon [ dest = inst_next + 1; ] { export *[RAM]:1 dest; } :BRN SkipNextInstr is Prefix18=0 & op8=0x21 & SkipNextInstr { goto SkipNextInstr; } :BRSET opr8a_8, msk8, rel8 is Prefix18=0 & op8=0x4E; opr8a_8; msk8; rel8 { result:1 = ~opr8a_8 & msk8; if (result != 0) goto rel8; } :BRSET opr16a_8, msk8, rel8 is Prefix18=0 & op8=0x1E; opr16a_8; msk8; rel8 { result:1 = ~opr16a_8 & msk8; if (result != 0) goto rel8; } :BRSET indexed1_3, msk8, rel8 is Prefix18=0 & op8=0x0E; indexed1_3; msk8; rel8 { result:1 = ~indexed1_3 & msk8; if (result != 0) goto rel8; } :BSET opr8a_8, msk8 is Prefix18=0 & op8=0x4C; opr8a_8; msk8 { result:1 = opr8a_8 | msk8; opr8a_8 = result; $(N) = (result s< 0); $(Z) = (result == 0); V_equals_0(); } :BSET opr16a_8, msk8 is Prefix18=0 & op8=0x1C; opr16a_8; msk8 { result:1 = opr16a_8 | msk8; opr16a_8 = result; $(N) = (result s< 0); $(Z) = (result == 0); V_equals_0(); } :BSET indexed1_3, msk8 is Prefix18=0 & op8=0x0C; indexed1_3; msk8 { result:1 = indexed1_3 | msk8; indexed1_3 = result; $(N) = (result s< 0); $(Z) = (result == 0); V_equals_0(); } :BSR rel8 is Prefix18=0 & op8=0x07; rel8 { tmp:2 = inst_next; Push2( tmp ); call rel8; } @if defined(HCS12X) :BTAS opr8a_8, msk8 is Prefix18=1 & (op8=0x35); opr8a_8; msk8 { op1:1 = opr8a_8; tmp:1 = op1 & msk8; $(N) = tmp[7,1]; $(Z) = (tmp == 0); $(V) = 0; opr8a_8 = op1 | msk8; } @endif @if defined(HCS12X) :BTAS opr16a_8, msk8 is Prefix18=1 & (op8=0x36); opr16a_8; msk8 { op1:1 = opr16a_8; tmp:1 = op1 & msk8; $(N) = tmp[7,1]; $(Z) = (tmp == 0); $(V) = 0; opr16a_8 = op1 | msk8; } @endif @if defined(HCS12X) :BTAS indexed1_3, msk8 is Prefix18=1 & (op8=0x37); indexed1_3; msk8 { op1:1 = indexed1_3; tmp:1 = op1 & msk8; $(N) = tmp[7,1]; $(Z) = (tmp == 0); $(V) = 0; indexed1_3 = op1 | msk8; } @endif :BVC rel8 is Prefix18=0 & op8=0x28; rel8 { if ($(V) == 0) goto rel8; } :BVS rel8 is Prefix18=0 & op8=0x29; rel8 { if ($(V) == 1) goto rel8; } @ifdef HCS12 CallDest: PageDest, imm8 is (imm16; imm8) & PageDest { PPAGE = imm8; build PageDest; export PageDest; } :CALL CallDest is Prefix18=0 & op8=0x4A; CallDest { tmp:2 = inst_next; Push2( tmp ); local ppage_tmp:1 = PPAGE; Push1( PPAGE ); build CallDest; local dest:$(SIZE) = CallDest; call [dest]; PPAGE = ppage_tmp; } :CALL indexed2_3, page is Prefix18=0 & (op8=0x4B); indexed2_3; page { tmp:2 = inst_next; Push2( tmp ); local ppage_tmp:1 = PPAGE; Push1( PPAGE ); PPAGE = page; build indexed2_3; local dest:$(SIZE); GetPagedAddr(indexed2_3,dest); call [dest]; PPAGE = ppage_tmp; } :CALL indexed0_2 is Prefix18=0 & (op8=0x4B); indexed0_2 { tmp:2 = inst_next; Push2( tmp ); local ppage_tmp:1 = PPAGE; Push1( PPAGE ); Load1(PPAGE, indexed0_2 + 2); local addr:2; Load2(addr,indexed0_2); local dest:$(SIZE); GetPagedAddr(addr,dest); call [dest]; PPAGE = ppage_tmp; } @endif :CBA is (Prefix18=1 & op8=0x17) { tmp:1 = A - B; $(N) = (tmp s< 0); $(Z) = (tmp == 0); V_CMP_flag(A, B); $(C) = (B > A); } :CLC is Prefix18=0 & op16=0x10FE { $(C) = 0; } :CLI is Prefix18=0 & op16=0x10EF { $(I) = 0; } :CLR opr16a_8 is Prefix18=0 & (op8=0x79); opr16a_8 { opr16a_8 = 0; $(N) = 0; $(Z) = 1; V_equals_0(); $(C) = 0; } :CLR indexed1_5 is Prefix18=0 & (op8=0x69); indexed1_5 { indexed1_5 = 0; $(N) = 0; $(Z) = 1; V_equals_0(); $(C) = 0; } :CLRA is Prefix18=0 & op8=0x87 { A = 0; $(N) = 0; $(Z) = 1; V_equals_0(); $(C) = 0; } :CLRB is Prefix18=0 & op8=0xC7 { B = 0; $(N) = 0; $(Z) = 1; V_equals_0(); $(C) = 0; } @if defined(HCS12X) :CLRW opr16a_16 is Prefix18=1 & (op8=0x79); opr16a_16 { opr16a_16 = 0; $(N) = 0; $(Z) = 1; V_equals_0(); $(C) = 0; } @endif @if defined(HCS12X) :CLRW indexed2_5 is Prefix18=1 & (op8=0x69); indexed2_5 { indexed2_5 = 0; $(N) = 0; $(Z) = 1; V_equals_0(); $(C) = 0; } @endif @if defined(HCS12X) :CLRX is Prefix18=1 & op8=0x87 { IX = 0; $(N) = 0; $(Z) = 1; V_equals_0(); $(C) = 0; } @endif @if defined(HCS12X) :CLRY is Prefix18=1 & op8=0xC7 { IY = 0; $(N) = 0; $(Z) = 1; V_equals_0(); $(C) = 0; } @endif :CLV is Prefix18=0 & op16=0x10FD { $(V) = 0; } :CMPA iopr8i is Prefix18=0 & (op8=0x81); iopr8i { op1:1 = iopr8i; tmp:1 = A - op1; $(N) = (tmp s< 0); $(Z) = (tmp == 0); V_CMP_flag(A, op1); $(C) = (op1 > A); } :CMPA opr8a_8 is Prefix18=0 & (op8=0x91); opr8a_8 { op1:1 = opr8a_8; tmp:1 = A - op1; $(N) = (tmp s< 0); $(Z) = (tmp == 0); V_CMP_flag(A, op1); $(C) = (op1 > A); } :CMPA opr16a_8 is Prefix18=0 & (op8=0xB1); opr16a_8 { op1:1 = opr16a_8; tmp:1 = A - op1; $(N) = (tmp s< 0); $(Z) = (tmp == 0); V_CMP_flag(A, op1); $(C) = (op1 > A); } :CMPA indexed1_5 is Prefix18=0 & (op8=0xA1); indexed1_5 { op1:1 = indexed1_5; tmp:1 = A - op1; $(N) = (tmp s< 0); $(Z) = (tmp == 0); V_CMP_flag(A, op1); $(C) = (op1 > A); } :CMPB iopr8i is Prefix18=0 & (op8=0xC1); iopr8i { op1:1 = iopr8i; tmp:1 = B - op1; $(N) = (tmp s< 0); $(Z) = (tmp == 0); V_CMP_flag(B, op1); $(C) = (op1 > B); } :CMPB opr8a_8 is Prefix18=0 & (op8=0xD1); opr8a_8 { op1:1 = opr8a_8; tmp:1 = B - op1; $(N) = (tmp s< 0); $(Z) = (tmp == 0); V_CMP_flag(B, op1); $(C) = (op1 > B); } :CMPB opr16a_8 is Prefix18=0 & (op8=0xF1); opr16a_8 { op1:1 = opr16a_8; tmp:1 = B - op1; $(N) = (tmp s< 0); $(Z) = (tmp == 0); V_CMP_flag(B, op1); $(C) = (op1 > B); } :CMPB indexed1_5 is Prefix18=0 & (op8=0xE1); indexed1_5 { op1:1 = indexed1_5; tmp:1 = B - op1; $(N) = (tmp s< 0); $(Z) = (tmp == 0); V_CMP_flag(B, op1); $(C) = (op1 > B); } :COM opr16a_8 is Prefix18=0 & (op8=0x71); opr16a_8 { tmp:1 = ~opr16a_8; opr16a_8 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = 1; V_equals_0(); } :COM indexed1_5 is Prefix18=0 & (op8=0x61); indexed1_5 { tmp:1 = ~indexed1_5; indexed1_5 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = 1; V_equals_0(); } :COMA is Prefix18=0 & op8=0x41 { A = ~A; $(Z) = (A == 0); $(N) = (A s< 0); $(C) = 1; V_equals_0(); } :COMB is Prefix18=0 & op8=0x51 { B = ~B; $(Z) = (B == 0); $(N) = (B s< 0); $(C) = 1; V_equals_0(); } @if defined(HCS12X) :COMW opr16a_16 is Prefix18=1 & (op8=0x71); opr16a_16 { local tmp = ~opr16a_16; opr16a_16 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = 1; V_equals_0(); } @endif @if defined(HCS12X) :COMW indexed2_5 is Prefix18=1 & (op8=0x61); indexed2_5 { local tmp = ~indexed2_5; indexed2_5 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = 1; V_equals_0(); } @endif @if defined(HCS12X) :COMX is Prefix18=1 & op8=0x41 { IX = ~IX; $(Z) = (IX == 0); $(N) = (IX s< 0); $(C) = 1; V_equals_0(); } @endif @if defined(HCS12X) :COMY is Prefix18=1 & op8=0x51 { IY = ~IY; $(Z) = (IY == 0); $(N) = (IY s< 0); $(C) = 1; V_equals_0(); } @endif :CPD iopr16i is Prefix18=0 & (op8=0x8C); iopr16i { op1:2 = iopr16i; tmp:2 = D - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > D); V_CMP_flag(D, op1); } :CPD opr8a_16 is Prefix18=0 & (op8=0x9C); opr8a_16 { op1:2 = opr8a_16; tmp:2 = D - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > D); V_CMP_flag(D, op1); } :CPD opr16a_16 is Prefix18=0 & (op8=0xBC); opr16a_16 { op1:2 = opr16a_16; tmp:2 = D - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > D); V_CMP_flag(D, op1); } :CPD indexed2_5 is Prefix18=0 & (op8=0xAC); indexed2_5 { op1:2 = indexed2_5; tmp:2 = D - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > D); V_CMP_flag(D, op1); } @if defined(HCS12X) :CPED iopr16i is Prefix18=1 & (op8=0x8C); iopr16i { op1:2 = iopr16i; tmp:2 = D - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > D); V_CMP_flag(D, op1); } @endif @if defined(HCS12X) :CPED opr8a_16 is Prefix18=1 & (op8=0x9C); opr8a_16 { op1:2 = opr8a_16; tmp:2 = D - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > D); V_CMP_flag(D, op1); } @endif @if defined(HCS12X) :CPED opr16a_16 is Prefix18=1 & (op8=0xBC); opr16a_16 { op1:2 = opr16a_16; tmp:2 = D - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > D); V_CMP_flag(D, op1); } @endif @if defined(HCS12X) :CPED indexed2_5 is Prefix18=1 & (op8=0xAC); indexed2_5 { op1:2 = indexed2_5; tmp:2 = D - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > D); V_CMP_flag(D, op1); } @endif @if defined(HCS12X) :CPES iopr16i is Prefix18=1 & (op8=0x8F); iopr16i { op1:2 = iopr16i; tmp:2 = SP - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > SP); V_CMP_flag(SP, op1); } @endif @if defined(HCS12X) :CPES opr8a_16 is Prefix18=1 & (op8=0x9F); opr8a_16 { op1:2 = opr8a_16; tmp:2 = SP - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > SP); V_CMP_flag(SP, op1); } @endif @if defined(HCS12X) :CPES opr16a_16 is Prefix18=1 & (op8=0xBF); opr16a_16 { op1:2 = opr16a_16; tmp:2 = SP - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > SP); V_CMP_flag(SP, op1); } @endif @if defined(HCS12X) :CPES indexed2_5 is Prefix18=1 & (op8=0xAF); indexed2_5 { op1:2 = indexed2_5; tmp:2 = SP - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > SP); V_CMP_flag(SP, op1); } @endif @if defined(HCS12X) :CPEX iopr16i is Prefix18=1 & (op8=0x8E); iopr16i { op1:2 = iopr16i; tmp:2 = IX - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IX); V_CMP_flag(IX, op1); } @endif @if defined(HCS12X) :CPEX opr8a_16 is Prefix18=1 & (op8=0x9E); opr8a_16 { op1:2 = opr8a_16; tmp:2 = IX - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IX); V_CMP_flag(IX, op1); } @endif @if defined(HCS12X) :CPEX opr16a_16 is Prefix18=1 & (op8=0xBE); opr16a_16 { op1:2 = opr16a_16; tmp:2 = IX - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IX); V_CMP_flag(IX, op1); } @endif @if defined(HCS12X) :CPEX indexed2_5 is Prefix18=1 & (op8=0xAE); indexed2_5 { op1:2 = indexed2_5; tmp:2 = IX - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IX); V_CMP_flag(IX, op1); } @endif @if defined(HCS12X) :CPEY iopr16i is Prefix18=1 & (op8=0x8D); iopr16i { op1:2 = iopr16i; tmp:2 = IY - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IY); V_CMP_flag(IY, op1); } @endif @if defined(HCS12X) :CPEY opr8a_16 is Prefix18=1 & (op8=0x9D); opr8a_16 { op1:2 = opr8a_16; tmp:2 = IY - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IY); V_CMP_flag(IY, op1); } @endif @if defined(HCS12X) :CPEY opr16a_16 is Prefix18=1 & (op8=0xBD); opr16a_16 { op1:2 = opr16a_16; tmp:2 = IY - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IY); V_CMP_flag(IY, op1); } @endif @if defined(HCS12X) :CPEY indexed2_5 is Prefix18=1 & (op8=0xAD); indexed2_5 { op1:2 = indexed2_5; tmp:2 = IY - (op1 + zext($(C))); $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IY); V_CMP_flag(IY, op1); } @endif SkipNext2Bytes: dest is epsilon [ dest = inst_next + 2; ] { export *[RAM]:1 dest; } :CPS loc is Prefix18=0 & (op8=0x8F) & SkipNext2Bytes [ loc = (inst_next & 0xffffff); ] { local addr:$(SIZE) = inst_next; local op1:2 = *[RAM]:2 addr; tmp:2 = SP - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > SP); V_CMP_flag(SP, op1); goto SkipNext2Bytes; } :CPS opr8a_16 is Prefix18=0 & (op8=0x9F); opr8a_16 { op1:2 = opr8a_16; tmp:2 = SP - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > SP); V_CMP_flag(SP, op1); } :CPS loc is Prefix18=0 & (op8=0xBF) & SkipNext2Bytes [ loc = (inst_next & 0xffffff); ] { local addr:$(SIZE) = inst_next; local op1:2 = *[RAM]:2 addr; Load2(op1, op1); tmp:2 = SP - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > SP); V_CMP_flag(SP, op1); goto SkipNext2Bytes; } :CPS indexed2_5 is Prefix18=0 & (op8=0xAF); indexed2_5 { op1:2 = indexed2_5; tmp:2 = SP - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > SP); V_CMP_flag(SP, op1); } :CPX iopr16i is Prefix18=0 & (op8=0x8E); iopr16i { op1:2 = iopr16i; tmp:2 = IX - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IX); V_CMP_flag(IX, op1); } :CPX opr8a_16 is Prefix18=0 & (op8=0x9E); opr8a_16 { op1:2 = opr8a_16; tmp:2 = IX - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IX); V_CMP_flag(IX, op1); } :CPX opr16a_16 is Prefix18=0 & (op8=0xBE); opr16a_16 { op1:2 = opr16a_16; tmp:2 = IX - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IX); V_CMP_flag(IX, op1); } :CPX indexed2_5 is Prefix18=0 & (op8=0xAE); indexed2_5 { op1:2 = indexed2_5; tmp:2 = IX - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IX); V_CMP_flag(IX, op1); } :CPY iopr16i is Prefix18=0 & (op8=0x8D); iopr16i { op1:2 = iopr16i; tmp:2 = IY - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IY); V_CMP_flag(IY, op1); } :CPY opr8a_16 is Prefix18=0 & (op8=0x9D); opr8a_16 { op1:2 = opr8a_16; tmp:2 = IY - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IY); V_CMP_flag(IY, op1); } :CPY opr16a_16 is Prefix18=0 & (op8=0xBD); opr16a_16 { op1:2 = opr16a_16; tmp:2 = IY - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IY); V_CMP_flag(IY, op1); } :CPY indexed2_5 is Prefix18=0 & (op8=0xAD); indexed2_5 { op1:2 = indexed2_5; tmp:2 = IY - op1; $(Z) = (tmp == 0); $(N) = (tmp s< 0); $(C) = (op1 > IY); V_CMP_flag(IY, op1); } :DAA is Prefix18=1 & op8=0x07 { A = decimalAdjustAccumulator(A, $(C), $(H)); $(C) = decimalAdjustCarry(A, $(C), $(H)); $(Z) = (A == 0); $(N) = (A s< 0); #V is undefined } :DBEQ byte9_8, rel9 is Prefix18=0 & op8=0x04; op15_13=0x0 & size10_10=0 & byte9_8 & rel9 { byte9_8 = byte9_8 - 1; if (byte9_8 == 0) goto rel9; } :DBEQ word9_8, rel9 is Prefix18=0 & op8=0x04; op15_13=0x0 & size10_10=1 & word9_8 & rel9 { word9_8 = word9_8 - 1; if (word9_8 == 0) goto rel9; } :DBNE byte9_8, rel9 is Prefix18=0 & op8=0x04; op15_13=0x1 & size10_10=0 & byte9_8 & rel9 { byte9_8 = byte9_8 - 1; if (byte9_8 != 0) goto rel9; } :DBNE word9_8, rel9 is Prefix18=0 & op8=0x04; op15_13=0x1 & size10_10=1 & word9_8 & rel9 { word9_8 = word9_8 - 1; if (word9_8 != 0) goto rel9; } :DEC opr16a_8 is Prefix18=0 & (op8=0x73); opr16a_8 { tmp:1 = opr16a_8; result:1 = tmp - 1; opr16a_8 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_DEC_flag(tmp); } :DEC indexed1_5 is Prefix18=0 & (op8=0x63); indexed1_5 { tmp:1 = indexed1_5; result:1 = tmp - 1; indexed1_5 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_DEC_flag(tmp); } :DECA is Prefix18=0 & op8=0x43 { tmp:1 = A; A = tmp - 1; $(Z) = (A == 0); $(N) = (A s< 0); V_DEC_flag(tmp); } :DECB is Prefix18=0 & op8=0x53 { tmp:1 = B; B = tmp - 1; $(Z) = (B == 0); $(N) = (B s< 0); V_DEC_flag(tmp); } @if defined(HCS12X) :DECW opr16a_16 is Prefix18=1 & (op8=0x73); opr16a_16 { local tmp = opr16a_16; local result = tmp - 1; opr16a_16 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_DEC_flag2(tmp); } @endif @if defined(HCS12X) :DECW indexed2_5 is Prefix18=1 & (op8=0x63); indexed2_5 { local tmp = indexed2_5; local result = tmp - 1; indexed2_5 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_DEC_flag2(tmp); } @endif @if defined(HCS12X) :DECX is Prefix18=1 & op8=0x43 { local tmp = IX; IX = tmp - 1; $(Z) = (IX == 0); $(N) = (IX s< 0); V_DEC_flag2(tmp); } @endif @if defined(HCS12X) :DECY is Prefix18=1 & op8=0x53 { local tmp = IY; IY = tmp - 1; $(Z) = (IY == 0); $(N) = (IY s< 0); V_DEC_flag2(tmp); } @endif :DES is Prefix18=0 & op16=0x1B9F { SP = SP - 1; } :DEX is Prefix18=0 & op8=0x09 { IX = IX - 1; $(Z) = (IX == 0); } :DEY is Prefix18=0 & op8=0x03 { IY = IY - 1; $(Z) = (IY == 0); } :EDIV is Prefix18=0 & op8=0x11 { tmp:4 = (zext(IY) << 16) | (zext(D)); resultQ:4 = tmp / zext(IX); resultR:4 = tmp % zext(IX); IY = resultQ:2; D = resultR:2; $(N) = (IY s< 0); $(Z) = (IY == 0); $(V) = (resultQ > 0x0000FFFF); $(C) = (IX == 0); } :EDIVS is Prefix18=1 & op8=0x14 { tmp:4 = (zext(IY) << 16) | (zext(D)); resultQ:4 = tmp s/ sext(IX); resultR:4 = tmp s% sext(IX); IY = resultQ:2; D = resultR:2; $(N) = (IY s< 0); $(Z) = (IY == 0); $(V) = (resultQ s> 0x00007FFF) | (resultQ s< 0x00008000); $(C) = (IX == 0); } :EMACS opr16a is Prefix18=1 & op8=0x12; opr16a { local valx:2 = 0; local valy:2 = 0; Load2(valx,IX); Load2(valy,IY); result:4 = sext(valx) * sext(valy); Store(opr16a, result); $(N) = (result s< 0); $(Z) = (result == 0); $(V) = (result s> 0x000000007FFFFFFF) | (result s< 0x0000000080000000); $(C) = (result > 0x00000000FFFFFFFF); } :EMAXD indexed2_5 is Prefix18=1 & op8=0x1A; indexed2_5 { result:4 = zext(D) - zext(indexed2_5); if (D > indexed2_5) goto ; D = indexed2_5; $(N) = (result:2 s< 0); $(Z) = (result:2 == 0); $(V) = (result s> 0x00007FFF) | (result s< 0x00008000); $(C) = (result > 0x0000FFFF); } :EMAXM indexed2_5 is Prefix18=1 & op8=0x1E; indexed2_5 { result:4 = zext(D) - zext(indexed2_5); if (D > indexed2_5) goto ; indexed2_5 = D; $(N) = (result:2 s< 0); $(Z) = (result:2 == 0); $(V) = (result s> 0x00007FFF) | (result s< 0x00008000); $(C) = (result > 0x0000FFFF); } :EMIND indexed2_5 is Prefix18=1 & op8=0x1B; indexed2_5 { result:4 = zext(D) - zext(indexed2_5); if (D < indexed2_5) goto ; D = indexed2_5; $(N) = (result:2 s< 0); $(Z) = (result:2 == 0); $(V) = (result s> 0x00007FFF) | (result s< 0x00008000); $(C) = (result > 0x0000FFFF); } :EMINM indexed2_5 is Prefix18=1 & op8=0x1F; indexed2_5 { result:4 = zext(D) - zext(indexed2_5); if (D < indexed2_5) goto ; indexed2_5 = D; $(N) = (result:2 s< 0); $(Z) = (result:2 == 0); $(V) = (result s> 0x00007FFF) | (result s< 0x00008000); $(C) = (result > 0x0000FFFF); } :EMUL is Prefix18=0 & op8=0x13 { result:4 = zext(D) * zext(IY); IY = result(2); D = result:2; $(N) = result[31,1]; $(Z) = (result == 0); $(C) = result[15,1]; } :EMULS is Prefix18=1 & op8=0x13 { result:4 = sext(D) * sext(IY); IY = result(2); D = result:2; $(N) = result[31,1]; $(Z) = (result == 0); $(C) = result[15,1]; } :EORA iopr8i is Prefix18=0 & (op8=0x88); iopr8i { op1:1 = iopr8i; A = A ^ op1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :EORA opr8a_8 is Prefix18=0 & (op8=0x98); opr8a_8 { op1:1 = opr8a_8; A = A ^ op1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :EORA opr16a_8 is Prefix18=0 & (op8=0xB8); opr16a_8 { op1:1 = opr16a_8; A = A ^ op1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :EORA indexed1_5 is Prefix18=0 & (op8=0xA8); indexed1_5 { op1:1 = indexed1_5; A = A ^ op1; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :EORB iopr8i is Prefix18=0 & (op8=0xC8); iopr8i { op1:1 = iopr8i; B = B ^ op1; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :EORB opr8a_8 is Prefix18=0 & (op8=0xD8); opr8a_8 { op1:1 = opr8a_8; B = B ^ op1; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :EORB opr16a_8 is Prefix18=0 & (op8=0xF8); opr16a_8 { op1:1 = opr16a_8; B = B ^ op1; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :EORB indexed1_5 is Prefix18=0 & (op8=0xE8); indexed1_5 { op1:1 = indexed1_5; B = B ^ op1; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } @if defined(HCS12X) :EORX iopr16i is Prefix18=1 & (op8=0x88); iopr16i { local op1 = iopr16i; IX = IX ^ op1; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_0(); } @endif @if defined(HCS12X) :EORX opr8a_16 is Prefix18=1 & (op8=0x98); opr8a_16 { local op1 = opr8a_16; IX = IX ^ op1; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_0(); } @endif @if defined(HCS12X) :EORX opr16a_16 is Prefix18=1 & (op8=0xB8); opr16a_16 { local op1 = opr16a_16; IX = IX ^ op1; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_0(); } @endif @if defined(HCS12X) :EORX indexed2_5 is Prefix18=1 & (op8=0xA8); indexed2_5 { local op1 = indexed2_5; IX = IX ^ op1; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_0(); } @endif @if defined(HCS12X) :EORY iopr16i is Prefix18=1 & (op8=0xC8); iopr16i { local op1 = iopr16i; IY = IY ^ op1; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_0(); } @endif @if defined(HCS12X) :EORY opr8a_16 is Prefix18=1 & (op8=0xD8); opr8a_16 { local op1 = opr8a_16; IY = IY ^ op1; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_0(); } @endif @if defined(HCS12X) :EORY opr16a_16 is Prefix18=1 & (op8=0xF8); opr16a_16 { local op1 = opr16a_16; IY = IY ^ op1; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_0(); } @endif @if defined(HCS12X) :EORY indexed2_5 is Prefix18=1 & (op8=0xE8); indexed2_5 { local op1 = indexed2_5; IY = IY ^ op1; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_0(); } @endif :ETBL indexed2_1 is Prefix18=1 & op8=0x3F; indexed2_1 { D = ETBL( indexed2_1, B ); $(N) = (D s< 0); $(Z) = (D == 0); $(C) = ETBL_Cflag( indexed2_1, B ); } # this case 'C0' or 'C8', does not display similarly to other members of either its row or column :EXG D, A is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x0 & ( columns7_4=0xC ) ) | ( rows3_0=0x8 & ( columns7_4=0xC ) ) ) & D & A { tmp:1 = B; B = A; A = tmp; } # this case 'C1' or 'C9', does not work similarly to other members of either its row or column :EXG D, B is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x1 & ( columns7_4=0xC ) ) | ( rows3_0=0x9 & ( columns7_4=0xC ) ) ) & D & B { B = B; A = 0xFF; } # this case '84' or '8C', does not work similarly to other members of either its row or column :EXG A, D is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x4 & ( columns7_4=0x8 ) ) | ( rows3_0=0xC & ( columns7_4=0x8 ) ) ) & A & D { D = zext(A); } # this case '94' or '9C', does not work similarly to other members of either its row or column :EXG B, D is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x4 & ( columns7_4=0x9 ) ) | ( rows3_0=0xC & ( columns7_4=0x9 ) ) ) & B & D { D = zext(B); } # this case 'A8', does not work the same as 'A0' :EXG CCRH, A is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x8 & ( columns7_4=0xA ) ) ) & CCRH & A { tmp:1 = CCRH; CCRH = A; A = tmp; } # this case '8A', does not work the same as '82' :EXG A, CCRH is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xA & ( columns7_4=0x8 ) ) ) & A & CCRH { tmp:1 = A; A = CCRH; CCRH = tmp; } # this case 'AA', does not display the same as 'A2' :EXG CCRW, "CCRW" is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xA & ( columns7_4=0xA ) ) ) & CCRW { CCRW = CCRW; } :EXG bytes_ABCl_6_4, bytes_ABCl_2_0 is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x0 & ( columns7_4=0x8 | columns7_4=0x9 ) ) | ( rows3_0=0x1 & ( columns7_4=0x8 | columns7_4=0x9 ) ) | ( rows3_0=0x8 & ( columns7_4=0x8 | columns7_4=0x9 ) ) | ( rows3_0=0x9 & ( columns7_4=0x8 | columns7_4=0x9 ) ) ) & bytes_ABCl_6_4 & bytes_ABCl_2_0 { tmp:1 = bytes_ABCl_2_0; bytes_ABCl_2_0 = bytes_ABCl_6_4; bytes_ABCl_6_4 = tmp; } :EXG bytes_ABCl_6_4, CCR is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x2 & ( columns7_4=0x8 | columns7_4=0x9 ) ) | ( rows3_0=0xA & ( columns7_4=0x9 ) ) ) & bytes_ABCl_6_4 & CCR { tmp:1 = bytes_ABCl_6_4; bytes_ABCl_6_4 = CCR; setCCR( tmp ); } :EXG CCR, bytes_ABCl_2_0 is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x0 & ( columns7_4=0xA ) ) | ( rows3_0=0x1 & ( columns7_4=0xA ) ) | ( rows3_0=0x2 & ( columns7_4=0xA ) ) | ( rows3_0=0x9 & ( columns7_4=0xA ) ) ) & CCR & bytes_ABCl_2_0 { tmp:1 = bytes_ABCl_2_0; bytes_ABCl_2_0 = CCR; setCCR( tmp ); } :EXG bytes_T3l_XlYlSl_6_4, A is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x0 & ( columns7_4=0xB | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) ) & bytes_T3l_XlYlSl_6_4 & words_T3_XYS_6_4 & A { tmp:2 = zext(A); A = bytes_T3l_XlYlSl_6_4; words_T3_XYS_6_4 = tmp; } :EXG bytes_T3h_XhYhSh_6_4, A is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x8 & ( columns7_4=0xB | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) ) & bytes_T3h_XhYhSh_6_4 & words_T3_XYS_6_4 & A { tmp:2 = zext(A); A = bytes_T3h_XhYhSh_6_4; words_T3_XYS_6_4 = tmp; } :EXG bytes_T3l_XlYlSl_6_4, B is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x1 & ( columns7_4=0xB | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) ) & bytes_T3l_XlYlSl_6_4 & words_T3_XYS_6_4 & B { tmp:2 = 0xFF00 | zext(B); B = bytes_T3l_XlYlSl_6_4; words_T3_XYS_6_4 = tmp; } :EXG bytes_T3l_XlYlSl_6_4, B is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x9 & ( columns7_4=0xB | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) ) & bytes_T3l_XlYlSl_6_4 & B { tmp:1 = B; B = bytes_T3l_XlYlSl_6_4; bytes_T3l_XlYlSl_6_4 = tmp; } :EXG bytes_T3lDlXlYlSl_6_4, CCR is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x2 & ( columns7_4=0xB | columns7_4=0xC | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) ) & bytes_T3lDlXlYlSl_6_4 & words_T3DXYS_6_4 & CCR { tmp:2 = 0xFF00 | zext(CCR); # when CCR is the destination, cannot set the X bit unless it is already set in CCR setCCR( bytes_T3lDlXlYlSl_6_4 ); words_T3DXYS_6_4 = tmp; } :EXG words_T3DXYS_6_4, CCRW is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xA & ( columns7_4=0xB | columns7_4=0xC | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) ) & words_T3DXYS_6_4 & CCRW { tmp:2 = CCRW; setCCRW( words_T3DXYS_6_4 ); words_T3DXYS_6_4 = tmp; } # this case 'CB', does not work similarly to other members of either its row or column :EXG D, TMP1 is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xB & ( columns7_4=0xC ) ) ) & D & TMP1 { tmp:2 = D; D = TMP1; TMP1 = tmp; } # this case 'BC', does not work similarly to other members of either its row or column :EXG TMP1, D is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xC & ( columns7_4=0xB ) ) ) & TMP1 & D { tmp:2 = TMP1; TMP1 = D; D = tmp; } :EXG words_T3DXYS_6_4, words_T2DXYS_2_0 is Prefix18=0 & ( op8=0xB7 ); ( # Case "C5" is handled by XGDX # Case "C6" is handled by XGDY ( rows3_0=0x3 & ( columns7_4=0xB | columns7_4=0xC | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) | ( rows3_0=0x4 & ( columns7_4=0xB | columns7_4=0xC | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) | ( rows3_0=0x5 & ( columns7_4=0xB | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) | ( rows3_0=0x6 & ( columns7_4=0xB | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) | ( rows3_0=0x7 & ( columns7_4=0xB | columns7_4=0xC | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) | ( rows3_0=0xB & ( columns7_4=0xB | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) | ( rows3_0=0xC & ( columns7_4=0xC | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) | ( rows3_0=0xD & ( columns7_4=0xB | columns7_4=0xC | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) | ( rows3_0=0xE & ( columns7_4=0xB | columns7_4=0xC | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) | ( rows3_0=0xF & ( columns7_4=0xB | columns7_4=0xC | columns7_4=0xD | columns7_4=0xE | columns7_4=0xF ) ) ) & words_T3DXYS_6_4 & words_T2DXYS_2_0 { tmp:2 = words_T3DXYS_6_4; words_T3DXYS_6_4 = words_T2DXYS_2_0; words_T2DXYS_2_0 = tmp; } :EXG bytes_ABCl_6_4, words_T2DXYS_2_0 is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x3 & ( columns7_4=0x8 | columns7_4=0x9 ) ) | ( rows3_0=0x5 & ( columns7_4=0x8 | columns7_4=0x9 ) ) | ( rows3_0=0x6 & ( columns7_4=0x8 | columns7_4=0x9 ) ) | ( rows3_0=0x7 & ( columns7_4=0x8 | columns7_4=0x9 ) ) ) & bytes_ABCl_6_4 & words_T2DXYS_2_0 & bytes_T2lDlXlYlSl_2_0 { tmp:2 = zext(bytes_ABCl_6_4); bytes_ABCl_6_4 = bytes_T2lDlXlYlSl_2_0; words_T2DXYS_2_0 = tmp; } :EXG bytes_ABCl_6_4, bytes_T2hDhXhYhSh_2_0 is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xB & ( columns7_4=0x8 ) ) | ( rows3_0=0xD & ( columns7_4=0x8 ) ) | ( rows3_0=0xE & ( columns7_4=0x8 ) ) | ( rows3_0=0xF & ( columns7_4=0x8 ) ) ) & bytes_ABCl_6_4 & bytes_T2hDhXhYhSh_2_0 { tmp:1 = bytes_ABCl_6_4; bytes_ABCl_6_4 = bytes_T2hDhXhYhSh_2_0; bytes_T2hDhXhYhSh_2_0 = tmp; } # only column 9 with rows B, (skip C), D, E, and F :EXG bytes_ABCl_6_4, bytes_T2lDlXlYlSl_2_0 is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xB & ( columns7_4=0x9 ) ) | ( rows3_0=0xD & ( columns7_4=0x9 ) ) | ( rows3_0=0xE & ( columns7_4=0x9 ) ) | ( rows3_0=0xF & ( columns7_4=0x9 ) ) ) & bytes_ABCl_6_4 & bytes_T2lDlXlYlSl_2_0 { tmp:1 = bytes_ABCl_6_4; bytes_ABCl_6_4 = bytes_T2lDlXlYlSl_2_0; bytes_T2lDlXlYlSl_2_0 = tmp; } :EXG CCR, words_T2DXYS_2_0 is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x3 & ( columns7_4=0xA ) ) | ( rows3_0=0x4 & ( columns7_4=0xA ) ) | ( rows3_0=0x5 & ( columns7_4=0xA ) ) | ( rows3_0=0x6 & ( columns7_4=0xA ) ) | ( rows3_0=0x7 & ( columns7_4=0xA ) ) ) & CCR & words_T2DXYS_2_0 & bytes_T2lDlXlYlSl_2_0 { tmp:2 = zext(CCR); setCCR( bytes_T2lDlXlYlSl_2_0 ); words_T2DXYS_2_0 = tmp; } :EXG CCRW, words_T2DXYS_2_0 is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xB & ( columns7_4=0xA ) ) | ( rows3_0=0xC & ( columns7_4=0xA ) ) | ( rows3_0=0xD & ( columns7_4=0xA ) ) | ( rows3_0=0xE & ( columns7_4=0xA ) ) | ( rows3_0=0xF & ( columns7_4=0xA ) ) ) & CCRW & words_T2DXYS_2_0 { tmp:2 = CCRW; setCCRW( words_T2DXYS_2_0 ); words_T2DXYS_2_0 = tmp; } :FDIV is Prefix18=1 & op8=0x11 { $(V) = (IX <= D); $(C) = (IX == 0); tmp:4 = (zext(D) << 16); resultQ:4 = tmp / zext(IX); resultR:4 = tmp % zext(IX); IX = resultQ:2; D = resultR:2; $(Z) = (IX == 0); } #:GLDAA is op16=0x1896 See GPAGE extended LDAA #:GLDAA is op16=0x18B6 See GPAGE extended LDAA #:GLDAA is op16=0x18A6 See GPAGE extended LDAA #:GLDAB is op16=0x18D6 See GPAGE extended LDAB #:GLDAB is op16=0x18F6 See GPAGE extended LDAB #:GLDAB is op16=0x18E6 See GPAGE extended LDAB #:GLDD is op16=0x18DC See GPAGE extended LDD #:GLDD is op16=0x18FC See GPAGE extended LDD #:GLDD is op16=0x18EC See GPAGE extended LDD #:GLDS is op16=0x18DF See GPAGE extended LDS #:GLDS is op16=0x18FF See GPAGE extended LDS #:GLDS is op16=0x18EF See GPAGE extended LDS #:GLDX is op16=0x18DE See GPAGE extended LDX #:GLDX is op16=0x18FE See GPAGE extended LDX #:GLDX is op16=0x18EE See GPAGE extended LDX #:GLDY is op16=0x18DD See GPAGE extended LDY #:GLDY is op16=0x18FD See GPAGE extended LDY #:GLDY is op16=0x18ED See GPAGE extended LDY #:GSTAA is op16=0x185A See GPAGE extended STAA #:GSTAA is op16=0x187A See GPAGE extended STAA #:GSTAA is op16=0x186A See GPAGE extended STAA #:GSTAB is op16=0x185B See GPAGE extended STAB #:GSTAB is op16=0x187B See GPAGE extended STAB #:GSTAB is op16=0x186B See GPAGE extended STAB #:GSTD is op16=0x185C See GPAGE extended STD #:GSTD is op16=0x187C See GPAGE extended STD #:GSTD is op16=0x186C See GPAGE extended STD #:GSTS is op16=0x185F See GPAGE extended STS #:GSTS is op16=0x187F See GPAGE extended STS #:GSTS is op16=0x186F See GPAGE extended STS #:GSTX is op16=0x185E See GPAGE extended STX #:GSTX is op16=0x187E See GPAGE extended STX #:GSTX is op16=0x186E See GPAGE extended STX #:GSTY is op16=0x185D See GPAGE extended STY #:GSTY is op16=0x187D See GPAGE extended STY #:GSTY is op16=0x186D See GPAGE extended STY :IBEQ byte9_8, rel9 is Prefix18=0 & op8=0x04; op15_13=0x4 & size10_10=0 & byte9_8 & rel9 { byte9_8 = byte9_8 + 1; if (byte9_8 == 0) goto rel9; } :IBEQ word9_8, rel9 is Prefix18=0 & op8=0x04; op15_13=0x4 & size10_10=1 & word9_8 & rel9 { word9_8 = word9_8 + 1; if (word9_8 == 0) goto rel9; } :IBNE byte9_8, rel9 is Prefix18=0 & op8=0x04; op15_13=0x5 & size10_10=0 & byte9_8 & rel9 { byte9_8 = byte9_8 + 1; if (byte9_8 != 0) goto rel9; } :IBNE word9_8, rel9 is Prefix18=0 & op8=0x04; op15_13=0x5 & size10_10=1 & word9_8 & rel9 { word9_8 = word9_8 + 1; if (word9_8 != 0) goto rel9; } :IDIV is Prefix18=1 & op8=0x10 { $(C) = (IX == 0); resultQ:2 = D / IX; resultR:2 = D % IX; IX = resultQ; D = resultR; $(Z) = (IX == 0); $(V) = 0; } :IDIVS is Prefix18=1 & op8=0x15 { $(C) = (IX == 0); resultQ:4 = sext(D) s/ sext(IX); resultR:4 = sext(D) s% sext(IX); IX = resultQ:2; D = resultR:2; $(N) = (IX s< 0); $(Z) = (IX == 0); $(V) = (resultQ s> 0x00007FFF) | (resultQ s< 0x00008000); } :INC opr16a_8 is Prefix18=0 & (op8=0x72); opr16a_8 { tmp:1 = opr16a_8; result:1 = tmp + 1; opr16a_8 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_INC_flag(tmp); } :INC indexed1_5 is Prefix18=0 & (op8=0x62); indexed1_5 { tmp:1 = indexed1_5; result:1 = tmp + 1; indexed1_5 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_INC_flag(tmp); } :INCA is Prefix18=0 & op8=0x42 { tmp:1 = A; A = tmp + 1; $(Z) = (A == 0); $(N) = (A s< 0); V_INC_flag(tmp); } :INCB is Prefix18=0 & op8=0x52 { tmp:1 = B; B = tmp + 1; $(Z) = (B == 0); $(N) = (B s< 0); V_INC_flag(tmp); } @if defined(HCS12X) :INCW opr16a_16 is Prefix18=1 & (op8=0x72); opr16a_16 { tmp:2 = opr16a_16; result:2 = tmp + 1; opr16a_16 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_INC_flag2(tmp); } @endif @if defined(HCS12X) :INCW indexed2_5 is Prefix18=1 & (op8=0x62); indexed2_5 { tmp:2 = indexed2_5; result:2 = tmp + 1; indexed2_5 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_INC_flag2(tmp); } @endif @if defined(HCS12X) :INCX is Prefix18=1 & op8=0x42 { local tmp = IX; IX = tmp + 1; $(Z) = (IX == 0); $(N) = (IX s< 0); V_INC_flag2(tmp); } @endif @if defined(HCS12X) :INCY is Prefix18=1 & op8=0x52 { local tmp = IY; IY = tmp + 1; $(Z) = (IY == 0); $(N) = (IY s< 0); V_INC_flag2(tmp); } @endif :INS is Prefix18=0 & op16=0x1B81 { SP = SP + 1; } :INX is Prefix18=0 & op8=0x08 { IX = IX + 1; $(Z) = (IX == 0); } :INY is Prefix18=0 & op8=0x02 { IY = IY + 1; $(Z) = (IY == 0); } :JMP opr16a is Prefix18=0 & (op8=0x06); opr16a { goto [opr16a]; } :JMP indexedA_5 is Prefix18=0 & (op8=0x05); indexedA_5 { goto [indexedA_5]; } :JSR opr8a is Prefix18=0 & (op8=0x17); opr8a { tmp:2 = inst_next; Push2( tmp ); call [opr8a]; } :JSR opr16a is Prefix18=0 & (op8=0x16); opr16a { tmp:2 = inst_next; Push2( tmp ); call [opr16a]; } :JSR indexedA_5 is Prefix18=0 & (op8=0x15); indexedA_5 { tmp:2 = inst_next; Push2( tmp ); call [indexedA_5]; } :LBCC rel16 is Prefix18=1 & op8=0x24; rel16 { if ($(C) == 0) goto rel16; } :LBCS rel16 is Prefix18=1 & op8=0x25; rel16 { if ($(C) == 1) goto rel16; } :LBEQ rel16 is Prefix18=1 & op8=0x27; rel16 { if ($(Z) == 1) goto rel16; } :LBGE rel16 is Prefix18=1 & op8=0x2C; rel16 { if (($(N) ^ $(V)) == 1) goto rel16; } :LBGT rel16 is Prefix18=1 & op8=0x2E; rel16 { if (($(Z) | ($(N) ^ $(V))) == 0) goto rel16; } :LBHI rel16 is Prefix18=1 & op8=0x22; rel16 { if (($(C) | $(Z)) == 0) goto rel16; } #:LBHS rel16 is Prefix18=1 & op8=0x24; rel16 See LBCC :LBLE rel16 is Prefix18=1 & op8=0x2F; rel16 { if ($(Z) | ($(N) ^ $(V))) goto rel16; } #:LBLO rel16 is Prefix18=1 & op8=0x25; rel16 see LBCS :LBLS rel16 is Prefix18=1 & op8=0x23; rel16 { if (($(C) | $(Z)) == 1) goto rel16; } :LBLT rel16 is Prefix18=1 & op8=0x2D; rel16 { if (($(N) ^ $(V)) == 1) goto rel16; } :LBMI rel16 is Prefix18=1 & op8=0x2B; rel16 { if ($(N) == 1) goto rel16; } :LBNE rel16 is Prefix18=1 & op8=0x26; rel16 { if ($(Z) == 0) goto rel16; } :LBPL rel16 is Prefix18=1 & op8=0x2A; rel16 { if ($(N) == 0) goto rel16; } :LBRA rel16 is Prefix18=1 & op8=0x20; rel16 { goto rel16; } # branch never is a four-byte nop :LBRN rel16 is Prefix18=1 & op8=0x21; rel16 { } :LBVC rel16 is Prefix18=1 & op8=0x28; rel16 { if ($(V) == 0) goto rel16; } :LBVS rel16 is Prefix18=1 & op8=0x29; rel16 { if ($(V) == 1) goto rel16; } :LDAA iopr8i is Prefix18=0 & (op8=0x86); iopr8i { A = iopr8i; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } GPaged: "G" is Prefix18=1 [ UseGPAGE=1; ] {} GPaged: is Prefix18=0 [ UseGPAGE=0; ] {} :^GPaged^"LDAA" opr8a_8 is GPaged & (op8=0x96); opr8a_8 [ UseGPAGE=Prefix18; ] { build GPaged; A = opr8a_8; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :^GPaged^"LDAA" opr16a_8 is GPaged & (op8=0xB6); opr16a_8 [ UseGPAGE=Prefix18; ] { build GPaged; A = opr16a_8; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :^GPaged^"LDAA" indexed1_5 is GPaged & (op8=0xA6); indexed1_5 [ UseGPAGE=Prefix18; ] { build GPaged; A = indexed1_5; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :LDAB iopr8i is Prefix18=0 & (op8=0xC6); iopr8i { B = iopr8i; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :^GPaged^"LDAB" opr8a_8 is GPaged & (op8=0xD6); opr8a_8 [ UseGPAGE=Prefix18; ] { build GPaged; B = opr8a_8; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :^GPaged^"LDAB" opr16a_8 is GPaged & (op8=0xF6); opr16a_8 [ UseGPAGE=Prefix18; ] { build GPaged; B = opr16a_8; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :^GPaged^"LDAB" indexed1_5 is GPaged & (op8=0xE6); indexed1_5 [ UseGPAGE=Prefix18; ] { build GPaged; B = indexed1_5; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :LDD iopr16i is Prefix18=0 & (op8=0xCC); iopr16i { D = iopr16i; $(Z) = (D == 0); $(N) = (D s< 0); V_equals_0(); } :^GPaged^"LDD" opr8a_16 is GPaged & (op8=0xDC); opr8a_16 [ UseGPAGE=Prefix18; ] { build GPaged; D = opr8a_16; $(Z) = (D == 0); $(N) = (D s< 0); V_equals_0(); } :^GPaged^"LDD" opr16a_16 is GPaged & (op8=0xFC); opr16a_16 [ UseGPAGE=Prefix18; ] { build GPaged; D = opr16a_16; $(Z) = (D == 0); $(N) = (D s< 0); V_equals_0(); } :^GPaged^"LDD" indexed2_5 is GPaged & (op8=0xEC); indexed2_5 [ UseGPAGE=Prefix18; ] { build GPaged; D = indexed2_5; $(Z) = (D == 0); $(N) = (D s< 0); V_equals_0(); } define pcodeop LoadStack; :LDS iopr16i is Prefix18=0 & (op8=0xCF); iopr16i { SP = LoadStack(iopr16i); $(Z) = (SP == 0); $(N) = (SP s< 0); V_equals_0(); } :^GPaged^"LDS" opr8a_16 is GPaged & (op8=0xDF); opr8a_16 [ UseGPAGE=Prefix18; ] { build GPaged; SP = LoadStack(opr8a_16); $(Z) = (SP == 0); $(N) = (SP s< 0); V_equals_0(); } :^GPaged^"LDS" opr16a_16 is GPaged & (op8=0xFF); opr16a_16 [ UseGPAGE=Prefix18; ] { build GPaged; SP = LoadStack(opr16a_16); $(Z) = (SP == 0); $(N) = (SP s< 0); V_equals_0(); } :^GPaged^"LDS" indexed2_5 is GPaged & (op8=0xEF); indexed2_5 [ UseGPAGE=Prefix18; ] { build GPaged; SP = LoadStack(indexed2_5); $(Z) = (SP == 0); $(N) = (SP s< 0); V_equals_0(); } :LDX iopr16i is Prefix18=0 & (op8=0xCE); iopr16i { IX = iopr16i; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_0(); } :^GPaged^"LDX" opr8a_16 is GPaged & (op8=0xDE); opr8a_16 [ UseGPAGE=Prefix18; ] { build GPaged; IX = opr8a_16; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_0(); } :^GPaged^"LDX" opr16a_16 is GPaged & (op8=0xFE); opr16a_16 [ UseGPAGE=Prefix18; ] { build GPaged; IX = opr16a_16; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_0(); } :^GPaged^"LDX" indexed2_5 is GPaged & (op8=0xEE); indexed2_5 [ UseGPAGE=Prefix18; ] { build GPaged; IX = indexed2_5; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_0(); } :LDY iopr16i is Prefix18=0 & (op8=0xCD); iopr16i { IY = iopr16i; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_0(); } :^GPaged^"LDY" opr8a_16 is GPaged & (op8=0xDD); opr8a_16 [ UseGPAGE=Prefix18; ] { build GPaged; IY = opr8a_16; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_0(); } :^GPaged^"LDY" opr16a_16 is GPaged & (op8=0xFD); opr16a_16 [ UseGPAGE=Prefix18; ] { build GPaged; IY = opr16a_16; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_0(); } :^GPaged^"LDY" indexed2_5 is GPaged & (op8=0xED); indexed2_5 [ UseGPAGE=Prefix18; ] { build GPaged; IY = indexed2_5; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_0(); } :LEAS indexed0_3 is Prefix18=0 & (op8=0x1B); indexed0_3 { SP = indexed0_3; } :LEAX indexed0_3 is Prefix18=0 & (op8=0x1A); indexed0_3 { IX = indexed0_3; } :LEAY indexed0_3 is Prefix18=0 & (op8=0x19); indexed0_3 { IY = indexed0_3; } ## Logical Shift left is same as arithmetic shift left #:LSL is (op8=0x68 | op8=0x78) #:LSLA is op8=0x48 #:LSLB is op8=0x58 #:LSLD is op8=0x59 #:LSLW is op16=0x1878 | op16=0x1868 see ASLW #:LSLX is op16=0x1848 see ASLX #:LSLY is op16=0x1858 see ASLY :LSR opr16a_8 is Prefix18=0 & (op8=0x74); opr16a_8 { tmp:1 = opr16a_8; $(C) = tmp & 1; tmp = tmp >> 1; opr16a_8 = tmp; $(Z) = (tmp == 0); $(N) = 0; V_equals_C(); } :LSR indexed1_5 is Prefix18=0 & (op8=0x64); indexed1_5 { tmp:1 = indexed1_5; $(C) = tmp & 1; tmp = tmp >> 1; indexed1_5 = tmp; $(Z) = (tmp == 0); $(N) = 0; V_equals_C(); } :LSRA is Prefix18=0 & op8=0x44 { $(C) = A[0,1]; A = (A >> 1); $(Z) = (A == 0); $(N) = 0; V_equals_C(); } :LSRB is Prefix18=0 & op8=0x54 { $(C) = B[0,1]; B = (B >> 1); $(Z) = (B == 0); $(N) = 0; V_equals_C(); } :LSRD is Prefix18=0 & op8=0x49 { $(C) = D[0,1]; D = (D >> 1); $(Z) = (D == 0); $(N) = 0; V_equals_C(); } @if defined(HCS12X) :LSRW opr16a_16 is Prefix18=1 & (op8=0x74); opr16a_16 { local tmp = opr16a_16; $(C) = tmp[0,1]; tmp = tmp >> 1; opr16a_16 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :LSRW indexed2_5 is Prefix18=1 & (op8=0x64); indexed2_5 { local tmp = indexed2_5; $(C) = tmp[0,1]; tmp = tmp >> 1; indexed2_5 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :LSRX is Prefix18=1 & op8=0x44 { $(C) = IX[0,1]; IX = IX >> 1; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :LSRY is Prefix18=1 & op8=0x54 { $(C) = IY[0,1]; IY = IY >> 1; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_N_xor_C(); } @endif :MAXA indexed1_5 is Prefix18=1 & op8=0x18; indexed1_5 { tmp:1 = indexed1_5; result:2 = zext(A) - zext(tmp); if (A > tmp) goto ; A = tmp; $(N) = (result:1 s< 0); $(Z) = (result:1 == 0); $(V) = (result s> 0x007F) | (result s< 0x0080); $(C) = (result > 0x00FF); } :MAXM indexed1_5 is Prefix18=1 & op8=0x1C; indexed1_5 { tmp:1 = indexed1_5; result:4 = zext(A) - zext(tmp); if (tmp > A) goto ; indexed1_5 = A; $(N) = (result:2 s< 0); $(Z) = (result:2 == 0); $(V) = (result s> 0x007F) | (result s< 0x0080); $(C) = (result > 0x00FF); } :MEM is Prefix18=0 & op8=0x01 { local val:1 = GradeOfMembership(A, IX, IY); Store(IY, val); IY = IY + 1; IX = IX + 4; } :MINA indexed1_5 is Prefix18=1 & op8=0x19; indexed1_5 { tmp:1 = indexed1_5; result:2 = zext(A) - zext(tmp); if (A < tmp) goto ; A = tmp; $(N) = (result:1 s< 0); $(Z) = (result:1 == 0); $(V) = (result s> 0x007F) | (result s< 0x0080); $(C) = (result > 0x00FF); } :MINM indexed1_5 is Prefix18=1 & op8=0x1D; indexed1_5 { tmp:1 = indexed1_5; result:4 = zext(A) - zext(tmp); if (tmp < A) goto ; indexed1_5 = A; $(N) = (result:2 s< 0); $(Z) = (result:2 == 0); $(V) = (result s> 0x007F) | (result s< 0x0080); $(C) = (result > 0x00FF); } :MOVB iopr8i, opr16a_8 is Prefix18=1 & op8=0x0B; iopr8i; opr16a_8 { opr16a_8 = iopr8i; } @if defined(HCS12X) :MOVB iopr8i, indexed1_5 is Prefix18=1 & op8=0x08; indexed1_5; iopr8i { indexed1_5 = iopr8i; } @else :MOVB iopr8i, indexed1_1 is Prefix18=1 & op8=0x08; indexed1_1; iopr8i { indexed1_1 = iopr8i; } @endif :MOVB opr16a_8, op2_opr16a_8 is Prefix18=1 & op8=0x0C; opr16a_8; op2_opr16a_8 { build opr16a_8; local tmp = opr16a_8; build op2_opr16a_8; op2_opr16a_8 = tmp; } @if defined(HCS12X) :MOVB opr16a_8, indexed1_5 is Prefix18=1 & op8=0x09; indexed1_5; opr16a_8 { indexed1_5 = opr16a_8; } @else :MOVB opr16a_8, indexed1_1 is Prefix18=1 & op8=0x09; indexed1_1; opr16a_8 { indexed1_1 = opr16a_8; } @endif @if defined(HCS12X) :MOVB indexed1_5, opr16a_8 is Prefix18=1 & op8=0x0D; indexed1_5; opr16a_8 { opr16a_8 = indexed1_5; } @else :MOVB indexed1_1, opr16a_8 is Prefix18=1 & op8=0x0D; indexed1_1; opr16a_8 { opr16a_8 = indexed1_1; } @endif @if defined(HCS12X) :MOVB indexed1_5, op2_indexed1_5 is Prefix18=1 & op8=0x0A; indexed1_5; op2_indexed1_5 { # two operands share a lower level subconstructor # MUST do the builds and store the value, or the first operands results will be overwritten build indexed1_5; local tmp = indexed1_5; build op2_indexed1_5; op2_indexed1_5 = tmp; } @else :MOVB indexed1_1, op2_indexed1_1 is Prefix18=1 & op8=0x0A; indexed1_1; op2_indexed1_1 { # two operands share a lower level subconstructor # MUST do the builds and store the value, or the first operands results will be overwritten build indexed1_1; local tmp = indexed1_1; build op2_indexed1_1; op2_indexed1_1 = tmp; } @endif :MOVW iopr16i, opr16a_16 is Prefix18=1 & op8=0x03; iopr16i; opr16a_16 { opr16a_16 = iopr16i; } @if defined(HCS12X) :MOVW iopr16i, indexed2_5 is Prefix18=1 & op8=0x00; indexed2_5; iopr16i { indexed2_5 = iopr16i; } @else :MOVW iopr16i, indexed2_1 is Prefix18=1 & op8=0x00; indexed2_1; iopr16i { indexed2_1 = iopr16i; } @endif :MOVW opr16a_16, op2_opr16a_16 is Prefix18=1 & op8=0x04; opr16a_16; op2_opr16a_16 { # two operands share a lower level subconstructor # MUST do the builds and store the value, or the first operands results will be overwritten build opr16a_16; local tmp = opr16a_16; build op2_opr16a_16; op2_opr16a_16 = tmp; } @if defined(HCS12X) :MOVW opr16a_16, indexed2_5 is Prefix18=1 & op8=0x01; indexed2_5; opr16a_16 { indexed2_5 = opr16a_16; } @else :MOVW opr16a_16, indexed2_1 is Prefix18=1 & op8=0x01; indexed2_1; opr16a_16 { indexed2_1 = opr16a_16; } @endif @if defined(HCS12X) :MOVW indexed2_5, opr16a_16 is Prefix18=1 & op8=0x05; indexed2_5; opr16a_16 { opr16a_16 = indexed2_5; } @else :MOVW indexed2_1, opr16a_16 is Prefix18=1 & op8=0x05; indexed2_1; opr16a_16 { opr16a_16 = indexed2_1; } @endif @if defined(HCS12X) :MOVW indexed2_5, op2_indexed2_5 is Prefix18=1 & op8=0x02; indexed2_5; op2_indexed2_5 { # two operands share a lower level subconstructor # MUST do the builds and store the value, or the first operands results will be overwritten build indexed2_5; local tmp = indexed2_5; build op2_indexed2_5; op2_indexed2_5 = tmp; } @else :MOVW indexed2_1, op2_indexed2_1 is Prefix18=1 & op8=0x02; indexed2_1; op2_indexed2_1 { # two operands share a lower level subconstructor # MUST do the builds and store the value, or the first operands results will be overwritten build indexed2_1; local tmp = indexed2_1; build op2_indexed2_1; op2_indexed2_1 = tmp; } @endif :MUL is Prefix18=0 & op8=0x12 { D = zext(A) * zext(B); $(C) = B[7,1]; } :NEG opr16a_8 is Prefix18=0 & (op8=0x70); opr16a_8 { tmp:1 = opr16a_8; result:1 = -tmp; opr16a_8 = result; $(C) = (result != 0); $(Z) = (result == 0); $(N) = (result s< 0); V_NEG_flag(tmp); } :NEG indexed1_5 is Prefix18=0 & (op8=0x60); indexed1_5 { tmp:1 = indexed1_5; result:1 = -tmp; indexed1_5 = result; $(C) = (result != 0); $(Z) = (result == 0); $(N) = (result s< 0); V_NEG_flag(tmp); } :NEGA is Prefix18=0 & op8=0x40 { tmp:1 = A; A = -tmp; $(C) = (A != 0); $(Z) = (A == 0); $(N) = (A s< 0); V_NEG_flag(tmp); } :NEGB is Prefix18=0 & op8=0x50 { tmp:1 = B; B = -tmp; $(C) = (B != 0); $(Z) = (B == 0); $(N) = (B s< 0); V_NEG_flag(tmp); } @if defined(HCS12X) :NEGW opr16a_16 is Prefix18=1 & (op8=0x70); opr16a_16 { tmp:2 = opr16a_16; result:2 = -tmp; opr16a_16 = result; $(C) = (result != 0); $(Z) = (result == 0); $(N) = (result s< 0); V_NEG_flag2(tmp); } @endif @if defined(HCS12X) :NEGW indexed2_5 is Prefix18=1 & (op8=0x60); indexed2_5 { tmp:2 = indexed2_5; result:2 = -tmp; indexed2_5 = result; $(C) = (result != 0); $(Z) = (result == 0); $(N) = (result s< 0); V_NEG_flag2(tmp); } @endif @if defined(HCS12X) :NEGX is Prefix18=1 & op8=0x40 { tmp:2 = IX; IX = -tmp; $(C) = (IX != 0); $(Z) = (IX == 0); $(N) = (IX s< 0); V_NEG_flag2(tmp); } @endif @if defined(HCS12X) :NEGY is Prefix18=1 & op8=0x50 { tmp:2 = IY; IY = -tmp; $(C) = (IY != 0); $(Z) = (IY == 0); $(N) = (IY s< 0); V_NEG_flag2(tmp); } @endif :NOP is Prefix18=0 & op8=0xA7 { } :ORAA iopr8i is Prefix18=0 & (op8=0x8A); iopr8i { A = A | iopr8i; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :ORAA opr8a_8 is Prefix18=0 & (op8=0x9A); opr8a_8 { A = A | opr8a_8; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :ORAA opr16a_8 is Prefix18=0 & (op8=0xBA); opr16a_8 { A = A | opr16a_8; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :ORAA indexed1_5 is Prefix18=0 & (op8=0xAA); indexed1_5 { A = A | indexed1_5; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :ORAB iopr8i is Prefix18=0 & (op8=0xCA); iopr8i { B = B | iopr8i; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :ORAB opr8a_8 is Prefix18=0 & (op8=0xDA); opr8a_8 { B = B | opr8a_8; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :ORAB opr16a_8 is Prefix18=0 & (op8=0xFA); opr16a_8 { B = B | opr16a_8; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :ORAB indexed1_5 is Prefix18=0 & (op8=0xEA); indexed1_5 { B = B | indexed1_5; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :ORCC iopr8i is Prefix18=0 & (op8=0x14); iopr8i { CCR = CCR | (iopr8i & 0b10111111); } @if defined(HCS12X) :ORX iopr16i is Prefix18=1 & (op8=0x8A); iopr16i { IX = IX | iopr16i; V_equals_0(); $(Z) = (IX == 0); $(N) = (IX s< 0); } @endif @if defined(HCS12X) :ORX opr8a_16 is Prefix18=1 & (op8=0x9A); opr8a_16 { IX = IX | opr8a_16; V_equals_0(); $(Z) = (IX == 0); $(N) = (IX s< 0); } @endif @if defined(HCS12X) :ORX opr16a_16 is Prefix18=1 & (op8=0xBA); opr16a_16 { IX = IX | opr16a_16; V_equals_0(); $(Z) = (IX == 0); $(N) = (IX s< 0); } @endif @if defined(HCS12X) :ORX indexed2_5 is Prefix18=1 & (op8=0xAA); indexed2_5 { IX = IX | indexed2_5; V_equals_0(); $(Z) = (IX == 0); $(N) = (IX s< 0); } @endif @if defined(HCS12X) :ORY iopr16i is Prefix18=1 & (op8=0xCA); iopr16i { IY = IY | iopr16i; V_equals_0(); $(Z) = (IY == 0); $(N) = (IY s< 0); } @endif @if defined(HCS12X) :ORY opr8a_16 is Prefix18=1 & (op8=0xDA); opr8a_16 { IY = IY | opr8a_16; V_equals_0(); $(Z) = (IY == 0); $(N) = (IY s< 0); } @endif @if defined(HCS12X) :ORY opr16a_16 is Prefix18=1 & (op8=0xFA); opr16a_16 { IY = IY | opr16a_16; V_equals_0(); $(Z) = (IY == 0); $(N) = (IY s< 0); } @endif @if defined(HCS12X) :ORY indexed2_5 is Prefix18=1 & (op8=0xEA); indexed2_5 { IY = IY | indexed2_5; V_equals_0(); $(Z) = (IY == 0); $(N) = (IY s< 0); } @endif :PSHA is Prefix18=0 & op8=0x36 { Push1( A ); } :PSHB is Prefix18=0 & op8=0x37 { Push1( B ); } :PSHC is Prefix18=0 & op8=0x39 { Push1( CCR ); } @if defined(HCS12X) :PSHCW is Prefix18=1 & op8=0x39 { Push2( CCRW ); } @endif :PSHD is Prefix18=0 & op8=0x3B { Push2( D ); } :PSHX is Prefix18=0 & op8=0x34 { Push2( IX ); } :PSHY is Prefix18=0 & op8=0x35 { Push2( IY ); } :PULA is Prefix18=0 & op8=0x32 { Pull1( A ); } :PULB is Prefix18=0 & op8=0x33 { Pull1( B ); } :PULC is Prefix18=0 & op8=0x38 { Pull1( CCR ); } @if defined(HCS12X) :PULCW is Prefix18=1 & op8=0x38 { Pull2( CCRW ); } @endif :PULD is Prefix18=0 & op8=0x3A { Pull2( D ); } :PULX is Prefix18=0 & op8=0x30 { Pull2( IX ); } :PULY is Prefix18=0 & op8=0x31 { Pull2( IY ); } :REV is Prefix18=1 & op8=0x3A { tempIX:2 = MinMaxRuleEvaluation(IX, IY, A, $(V)); $(V) = MinMaxRuleEvaluationCorrect(IX, IY, A, $(V)); IX = tempIX; } :REVW is Prefix18=1 & op8=0x3B { tempIX:2 = MinMaxRuleEvaluationWeighted(IX, IY, A, $(V), $(C)); tempIY:2 = MinMaxRuleEvaluationWeighted(IX, IY, A, $(V), $(C)); $(V) = MinMaxRuleEvaluationWeightedCorrect(IX, IY, A, $(V), $(C)); IX = tempIX; IY = tempIY; } :ROL opr16a_8 is Prefix18=0 & (op8=0x75); opr16a_8 { tmpC:1 = $(C); op1:1 = opr16a_8; $(C) = op1 >> 7; result:1 = op1 << 1; result = result | tmpC; opr16a_8 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_N_xor_C(); } :ROL indexed1_5 is Prefix18=0 & (op8=0x65); indexed1_5 { tmpC:1 = $(C); op1:1 = indexed1_5; $(C) = op1 >> 7; result:1 = op1 << 1; result = result | tmpC; indexed1_5 = result; $(Z) = (result == 0); $(N) = (result s< 0); V_equals_N_xor_C(); } :ROLA is Prefix18=0 & op8=0x45 { tmpC:1 = $(C) ; $(C) = A >> 7; A = A << 1; A = A | tmpC; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_N_xor_C(); } :ROLB is Prefix18=0 & op8=0x55 { tmpC:1 = $(C) ; $(C) = B >> 7; B = B << 1; B = B | tmpC; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_N_xor_C(); } @if defined(HCS12X) :ROLW opr16a_16 is Prefix18=1 & (op8=0x75); opr16a_16 { local tmp = opr16a_16; local tmpC = $(C); $(C) = tmp[15,1]; tmp = tmp << 1; tmp = tmp | zext(tmpC); opr16a_16 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :ROLW indexed2_5 is Prefix18=1 & (op8=0x65); indexed2_5 { local tmp = indexed2_5; local tmpC = $(C); $(C) = tmp[15,1]; tmp = tmp << 1; tmp = tmp | zext(tmpC); indexed2_5 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :ROLX is Prefix18=1 & op8=0x45 { local tmpC = $(C); $(C) = IX[15,1]; IX = IX << 1; IX = IX | zext(tmpC); $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :ROLY is Prefix18=1 & op8=0x55 { local tmpC = $(C); $(C) = IY[15,1]; IY = IY << 1; IY = IY | zext(tmpC); $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_N_xor_C(); } @endif :ROR opr16a_8 is Prefix18=0 & (op8=0x76); opr16a_8 { tmpC:1 = $(C) << 7; tmp:1 = opr16a_8; $(C) = tmp & 1; tmp = tmp >> 1; tmp = tmp | tmpC; opr16a_8 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } :ROR indexed1_5 is Prefix18=0 & (op8=0x66); indexed1_5 { tmpC:1 = $(C) << 7; tmp:1 = indexed1_5; $(C) = tmp & 1; tmp = tmp >> 1; tmp = tmp | tmpC; indexed1_5 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } :RORA is Prefix18=0 & op8=0x46 { tmpC:1 = $(C) << 7; $(C) = A & 1; A = A >> 1; A = A | tmpC; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_N_xor_C(); } :RORB is Prefix18=0 & op8=0x56 { tmpC:1 = $(C) << 7; $(C) = B & 1; B = B >> 1; B = B | tmpC; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_N_xor_C(); } @if defined(HCS12X) :RORW opr16a_16 is Prefix18=1 & (op8=0x76); opr16a_16 { local tmp = opr16a_16; local tmpC = $(C); $(C) = tmp[0,1]; tmp = tmp >> 1; tmp = tmp | (zext(tmpC) << 15); opr16a_16 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :RORW indexed2_5 is Prefix18=1 & (op8=0x66); indexed2_5 { local tmp = indexed2_5; local tmpC = $(C); $(C) = tmp[0,1]; tmp = tmp >> 1; tmp = tmp | (zext(tmpC) << 15); indexed2_5 = tmp; $(Z) = (tmp == 0); $(N) = (tmp s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :RORX is Prefix18=1 & op8=0x46 { local tmpC = $(C); $(C) = IX[0,1]; IX = IX >> 1; IX = IX | (zext(tmpC) << 15); $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_N_xor_C(); } @endif @if defined(HCS12X) :RORY is Prefix18=1 & op8=0x56 { local tmpC = $(C); $(C) = IY[0,1]; IY = IY >> 1; IY = IY | (zext(tmpC) << 15); $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_N_xor_C(); } @endif @ifdef HCS12 :RTC is Prefix18=0 & op8=0x0A { Pull1( PPAGE ); tmp:2 = 0; Pull2( tmp ); return [tmp]; } @endif :RTI is Prefix18=0 & op8=0x0B { tmp:2 = 0; Pull1( CCR ); Pull1( B ); Pull1( A ); Pull2( IX ); Pull2( IY ); Pull2( tmp ); # as ordered on page 289, not as documented in RTI description return [tmp]; } :RTS is Prefix18=0 & op8=0x3D { tmp:2 = 0; Pull2( tmp ); return [tmp]; } :SBA is Prefix18=1 & (op8=0x16) { result:1 = A - B; subtraction_flags1(A, B, result); A = result; } :SBCA iopr8i is Prefix18=0 & (op8=0x82); iopr8i { op1:1 = iopr8i; result:1 = A - op1 - $(C); subtraction_flags1(A, op1, result); A = result; } :SBCA opr8a_8 is Prefix18=0 & (op8=0x92); opr8a_8 { op1:1 = opr8a_8; result:1 = A - op1 - $(C); subtraction_flags1(A, op1, result); A = result; } :SBCA opr16a_8 is Prefix18=0 & (op8=0xB2); opr16a_8 { op1:1 = opr16a_8; result:1 = A - op1 - $(C); subtraction_flags1(A, op1, result); A = result; } :SBCA indexed1_5 is Prefix18=0 & (op8=0xA2); indexed1_5 { op1:1 = indexed1_5; result:1 = A - op1 - $(C); subtraction_flags1(A, op1, result); A = result; } :SBCB iopr8i is Prefix18=0 & (op8=0xC2); iopr8i { op1:1 = iopr8i; result:1 = B - op1 - $(C); subtraction_flags1(B, op1, result); B = result; } :SBCB opr8a_8 is Prefix18=0 & (op8=0xD2); opr8a_8 { op1:1 = opr8a_8; result:1 = B - op1 - $(C); subtraction_flags1(B, op1, result); B = result; } :SBCB opr16a_8 is Prefix18=0 & (op8=0xF2); opr16a_8 { op1:1 = opr16a_8; result:1 = B - op1 - $(C); subtraction_flags1(B, op1, result); B = result; } :SBCB indexed1_5 is Prefix18=0 & (op8=0xE2); indexed1_5 { op1:1 = indexed1_5; result:1 = B - op1 - $(C); subtraction_flags1(B, op1, result); B = result; } @if defined(HCS12X) :SBED iopr16i is Prefix18=1 & (op8=0x83); iopr16i { op1:2 = iopr16i; result:2 = D - op1 - zext($(C)); subtraction_flags2(D, op1, result); D = result; } @endif @if defined(HCS12X) :SBED opr8a_16 is Prefix18=1 & (op8=0x93); opr8a_16 { op1:2 = opr8a_16; result:2 = D - op1 - zext($(C)); subtraction_flags2(D, op1, result); D = result; } @endif @if defined(HCS12X) :SBED opr16a_16 is Prefix18=1 & (op8=0xB3); opr16a_16 { op1:2 = opr16a_16; result:2 = D - op1 - zext($(C)); subtraction_flags2(D, op1, result); D = result; } @endif @if defined(HCS12X) :SBED indexed2_5 is Prefix18=1 & (op8=0xA3); indexed2_5 { op1:2 = indexed2_5; result:2 = D - op1 - zext($(C)); subtraction_flags2(D, op1, result); D = result; } @endif @if defined(HCS12X) :SBEX iopr16i is Prefix18=1 & (op8=0x82); iopr16i { op1:2 = iopr16i; result:2 = IX - op1 - zext($(C)); subtraction_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :SBEX opr8a_16 is Prefix18=1 & (op8=0x92); opr8a_16 { op1:2 = opr8a_16; result:2 = IX - op1 - zext($(C)); subtraction_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :SBEX opr16a_16 is Prefix18=1 & (op8=0xB2); opr16a_16 { op1:2 = opr16a_16; result:2 = IX - op1 - zext($(C)); subtraction_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :SBEX indexed2_5 is Prefix18=1 & (op8=0xA2); indexed2_5 { op1:2 = indexed2_5; result:2 = IX - op1 - zext($(C)); subtraction_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :SBEY iopr16i is Prefix18=1 & (op8=0xC2); iopr16i { op1:2 = iopr16i; result:2 = IY - op1 - zext($(C)); subtraction_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :SBEY opr8a_16 is Prefix18=1 & (op8=0xD2); opr8a_16 { op1:2 = opr8a_16; result:2 = IY - op1 - zext($(C)); subtraction_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :SBEY opr16a_16 is Prefix18=1 & (op8=0xF2); opr16a_16 { op1:2 = opr16a_16; result:2 = IY - op1 - zext($(C)); subtraction_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :SBEY indexed2_5 is Prefix18=1 & (op8=0xE2); indexed2_5 { op1:2 = indexed2_5; result:2 = IY - op1 - zext($(C)); subtraction_flags2(IY, op1, result); IY = result; } @endif :SEC is Prefix18=0 & op16=0x1401 { $(C) = 1; } :SEI is Prefix18=0 & op16=0x1410 { $(I) = 1; } :SEV is Prefix18=0 & op16=0x1402 { $(V) = 1; } @if defined(HCS12X) :SEX A, D is Prefix18=0 & op8=0xB7; ( ( rows3_0=0xC & ( columns7_4=0x0 ) ) ) & A & D { D = sext( A ); } @endif @if defined(HCS12X) :SEX B, D is Prefix18=0 & op8=0xB7; ( ( rows3_0=0xC & ( columns7_4=0x1 ) ) ) & B & D { D = sext( B ); } @endif @if defined(HCS12X) :SEX D, IX is Prefix18=0 & op8=0xB7; ( ( rows3_0=0xD & ( columns7_4=0x4 ) ) ) & D & IX { # generate the sign extension upper word and assign it to destination local tmp:4 = sext( D ); IX = tmp(2); } @endif @if defined(HCS12X) :SEX D, IY is Prefix18=0 & op8=0xB7; ( ( rows3_0=0xE & ( columns7_4=0x4 ) ) ) & D & IY { # generate the sign extension upper word and assign it to destination local tmp:4 = sext( D ); IY = tmp(2); } @endif :SEX abc5_4, dxys2_0 is Prefix18=0 & op8=0xB7; ( ( rows3_0=0x3 & ( columns7_4=0x0 | columns7_4=0x1 | columns7_4=0x2 ) ) | ( rows3_0=0x4 & ( columns7_4=0x0 | columns7_4=0x1 | columns7_4=0x2 ) ) | ( rows3_0=0x5 & ( columns7_4=0x0 | columns7_4=0x1 | columns7_4=0x2 ) ) | ( rows3_0=0x6 & ( columns7_4=0x0 | columns7_4=0x1 | columns7_4=0x2 ) ) | ( rows3_0=0x7 & ( columns7_4=0x0 | columns7_4=0x1 | columns7_4=0x2 ) ) ) & abc5_4 & dxys2_0 { dxys2_0 = sext(abc5_4); } :^GPaged^"STAA" opr8a_8 is GPaged & (op8=0x5A); opr8a_8 [ UseGPAGE=Prefix18; ] { build GPaged; opr8a_8 = A; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :^GPaged^"STAA" opr16a_8 is GPaged & (op8=0x7A); opr16a_8 [ UseGPAGE=Prefix18; ] { build GPaged; opr16a_8 = A; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :^GPaged^"STAA" indexed1_5 is GPaged & (op8=0x6A); indexed1_5 [ UseGPAGE=Prefix18; ] { build GPaged; indexed1_5 = A; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :^GPaged^"STAB" opr8a_8 is GPaged & (op8=0x5B); opr8a_8 [ UseGPAGE=Prefix18; ] { build GPaged; opr8a_8 = B; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :^GPaged^"STAB" opr16a_8 is GPaged & (op8=0x7B); opr16a_8 [ UseGPAGE=Prefix18; ] { build GPaged; opr16a_8 = B; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :^GPaged^"STAB" indexed1_5 is GPaged & (op8=0x6B); indexed1_5 [ UseGPAGE=Prefix18; ] { build GPaged; indexed1_5 = B; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :^GPaged^"STD" opr8a_16 is GPaged & (op8=0x5C); opr8a_16 [ UseGPAGE=Prefix18; ] { build GPaged; opr8a_16 = D; $(Z) = (D == 0); $(N) = (D s< 0); V_equals_0(); } :^GPaged^"STD" opr16a_16 is GPaged & (op8=0x7C); opr16a_16 [ UseGPAGE=Prefix18; ] { build GPaged; opr16a_16 = D; $(Z) = (D == 0); $(N) = (D s< 0); V_equals_0(); } :^GPaged^"STD" indexed2_5 is GPaged & (op8=0x6C); indexed2_5 [ UseGPAGE=Prefix18; ] { build GPaged; indexed2_5 = D; $(Z) = (D == 0); $(N) = (D s< 0); V_equals_0(); } :STOP is Prefix18=1 & op8=0x3E { if ($(S) == 0) goto ; tmp:2 = inst_next; Push2( tmp ); Push2( IY ); Push2( IX ); Push1( A ); Push1( B ); Push1( CCR ); stop(); } :^GPaged^"STS" opr8a_16 is GPaged & (op8=0x5F); opr8a_16 [ UseGPAGE=Prefix18; ] { build GPaged; opr8a_16 = SP; $(Z) = (SP == 0); $(N) = (SP s< 0); V_equals_0(); } :^GPaged^"STS" opr16a_16 is GPaged & (op8=0x7F); opr16a_16 [ UseGPAGE=Prefix18; ] { build GPaged; opr16a_16 = SP; $(Z) = (SP == 0); $(N) = (SP s< 0); V_equals_0(); } :^GPaged^"STS" indexed2_5 is GPaged & (op8=0x6F); indexed2_5 [ UseGPAGE=Prefix18; ] { build GPaged; indexed2_5 = SP; $(Z) = (SP == 0); $(N) = (SP s< 0); V_equals_0(); } :^GPaged^"STX" opr8a_16 is GPaged & (op8=0x5E); opr8a_16 [ UseGPAGE=Prefix18; ] { build GPaged; opr8a_16 = IX; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_0(); } :^GPaged^"STX" opr16a_16 is GPaged & (op8=0x7E); opr16a_16 [ UseGPAGE=Prefix18; ] { build GPaged; opr16a_16 = IX; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_0(); } :^GPaged^"STX" indexed2_5 is GPaged & (op8=0x6E); indexed2_5 [ UseGPAGE=Prefix18; ] { build GPaged; indexed2_5 = IX; $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_0(); } :^GPaged^"STY" opr8a_16 is GPaged & (op8=0x5D); opr8a_16 [ UseGPAGE=Prefix18; ] { build GPaged; opr8a_16 = IY; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_0(); } :^GPaged^"STY" opr16a_16 is GPaged & (op8=0x7D); opr16a_16 [ UseGPAGE=Prefix18; ] { build GPaged; opr16a_16 = IY; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_0(); } :^GPaged^"STY" indexed2_5 is GPaged & (op8=0x6D); indexed2_5 [ UseGPAGE=Prefix18; ] { build GPaged; indexed2_5 = IY; $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_0(); } :SUBA iopr8i is Prefix18=0 & (op8=0x80); iopr8i { op1:1 = iopr8i; result:1 = A - op1; subtraction_flags1(A, op1, result); A = result; } :SUBA opr8a_8 is Prefix18=0 & (op8=0x90); opr8a_8 { op1:1 = opr8a_8; result:1 = A - op1; subtraction_flags1(A, op1, result); A = result; } :SUBA opr16a_8 is Prefix18=0 & (op8=0xB0); opr16a_8 { op1:1 = opr16a_8; result:1 = A - op1; subtraction_flags1(A, op1, result); A = result; } :SUBA indexed1_5 is Prefix18=0 & (op8=0xA0); indexed1_5 { op1:1 = indexed1_5; result:1 = A - op1; subtraction_flags1(A, op1, result); A = result; } :SUBB iopr8i is Prefix18=0 & (op8=0xC0); iopr8i { op1:1 = iopr8i; result:1 = B - op1; subtraction_flags1(B, op1, result); B = result; } :SUBB opr8a_8 is Prefix18=0 & (op8=0xD0); opr8a_8 { op1:1 = opr8a_8; result:1 = B - op1; subtraction_flags1(B, op1, result); B = result; } :SUBB opr16a_8 is Prefix18=0 & (op8=0xF0); opr16a_8 { op1:1 = opr16a_8; result:1 = B - op1; subtraction_flags1(B, op1, result); B = result; } :SUBB indexed1_5 is Prefix18=0 & (op8=0xE0); indexed1_5 { op1:1 = indexed1_5; result:1 = B - op1; subtraction_flags1(B, op1, result); B = result; } :SUBD iopr16i is Prefix18=0 & (op8=0x83); iopr16i { op1:2 = iopr16i; result:2 = D - op1; subtraction_flags2(D, op1, result); D = result; } :SUBD opr8a_16 is Prefix18=0 & (op8=0x93); opr8a_16 { op1:2 = opr8a_16; result:2 = D - op1; subtraction_flags2(D, op1, result); D = result; } :SUBD opr16a_16 is Prefix18=0 & (op8=0xB3); opr16a_16 { op1:2 = opr16a_16; result:2 = D - op1; subtraction_flags2(D, op1, result); D = result; } :SUBD indexed2_5 is Prefix18=0 & (op8=0xA3); indexed2_5 { op1:2 = indexed2_5; result:2 = D - op1; subtraction_flags2(D, op1, result); D = result; } @if defined(HCS12X) :SUBX iopr16i is Prefix18=1 & (op8=0x80); iopr16i { op1:2 = iopr16i; result:2 = IX - op1; subtraction_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :SUBX opr8a_16 is Prefix18=1 & (op8=0x90); opr8a_16 { op1:2 = opr8a_16; result:2 = IX - op1; subtraction_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :SUBX opr16a_16 is Prefix18=1 & (op8=0xB0); opr16a_16 { op1:2 = opr16a_16; result:2 = IX - op1; subtraction_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :SUBX indexed2_5 is Prefix18=1 & (op8=0xA0); indexed2_5 { op1:2 = indexed2_5; result:2 = IX - op1; subtraction_flags2(IX, op1, result); IX = result; } @endif @if defined(HCS12X) :SUBY iopr16i is Prefix18=1 & (op8=0xC0); iopr16i { op1:2 = iopr16i; result:2 = IY - op1; subtraction_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :SUBY opr8a_16 is Prefix18=1 & (op8=0xD0); opr8a_16 { op1:2 = opr8a_16; result:2 = IY - op1; subtraction_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :SUBY opr16a_16 is Prefix18=1 & (op8=0xF0); opr16a_16 { op1:2 = opr16a_16; result:2 = IY - op1; subtraction_flags2(IY, op1, result); IY = result; } @endif @if defined(HCS12X) :SUBY indexed2_5 is Prefix18=1 & (op8=0xE0); indexed2_5 { op1:2 = indexed2_5; result:2 = IY - op1; subtraction_flags2(IY, op1, result); IY = result; } @endif :SWI is Prefix18=0 & op8=0x3F { tmp:2 = inst_next; Push2( tmp ); Push2( IY ); Push2( IX ); Push1( A ); Push1( B ); Push1( CCR ); $(I) = 1; addr:2 = $(VECTOR_SWI); call [addr]; } :TAB is Prefix18=1 & op8=0x0E { B = A; $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); } :TAP is Prefix18=0 & op16=0xB702 { setCCR( A ); } :TBA is Prefix18=1 & op8=0x0F { A = B; $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); } :TBEQ byte9_8, rel9 is Prefix18=0 & op8=0x04; op15_13=0x2 & size10_10=0 & byte9_8 & rel9 { if (byte9_8 == 0) goto rel9; } :TBEQ word9_8, rel9 is Prefix18=0 & op8=0x04; op15_13=0x2 & size10_10=1 & word9_8 & rel9 { if (word9_8 == 0) goto rel9; } :TBL indexed1_3 is Prefix18=1 & op8=0x3D; indexed1_3 { A = TableLookupAndInterpolate(indexed1_3, B); $(Z) = (A == 0); $(N) = (A s< 0); @if defined(HC12) $(C) = TableLookupAndInterpolateRoundable(indexed1_3, B); @endif } :TBNE byte9_8, rel9 is Prefix18=0 & op8=0x04; op15_13=0x3 & size10_10=0 & byte9_8 & rel9 { if (byte9_8 != 0) goto rel9; } :TBNE word9_8, rel9 is Prefix18=0 & op8=0x04; op15_13=0x3 & size10_10=1 & word9_8 & rel9 { if (word9_8 != 0) goto rel9; } :TFR bytes_ABClT3lBXlYlSl_6_4, bytes_ABCl_2_0 is Prefix18=0 & ( op8=0xB7 ); ( # The case "20" is covered by TPA ( rows3_0=0x0 & ( columns7_4=0x0 | columns7_4=0x1 | columns7_4=0x3 | columns7_4=0x4 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) | ( rows3_0=0x1 & ( columns7_4=0x0 | columns7_4=0x1 | columns7_4=0x2 | columns7_4=0x3 | columns7_4=0x4 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) | ( rows3_0=0x8 & ( columns7_4=0x0 | columns7_4=0x1 | columns7_4=0x4 ) ) | ( rows3_0=0x9 & ( columns7_4=0x0 | columns7_4=0x1 | columns7_4=0x2 | columns7_4=0x3 | columns7_4=0x4 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) ) & bytes_ABClT3lBXlYlSl_6_4 & bytes_ABCl_2_0 { bytes_ABCl_2_0 = bytes_ABClT3lBXlYlSl_6_4; } :TFR bytes_ABClT3lBXlYlSl_6_4, CCR is Prefix18=0 & ( op8=0xB7 ); ( # The case "02" is covered by TAP ( rows3_0=0x2 & ( columns7_4=0x1 | columns7_4=0x2 | columns7_4=0x3 | columns7_4=0x4 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) | ( rows3_0=0xA & ( columns7_4=0x1 ) ) ) & bytes_ABClT3lBXlYlSl_6_4 & CCR { setCCR( bytes_ABClT3lBXlYlSl_6_4 ); } :TFR bytes_ABChT3hBXhYhSh_6_4, A is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0x8 & ( columns7_4=0x2 | columns7_4=0x3 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) ) & bytes_ABChT3hBXhYhSh_6_4 & A { A = bytes_ABChT3hBXhYhSh_6_4; } :TFR A, CCRH is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xA & ( columns7_4=0x0 ) ) ) & A & CCRH { CCRH = A; } :TFR words_CT3DXYS_6_4, CCRW is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xA & ( columns7_4=0x2 | columns7_4=0x3 | columns7_4=0x4 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) ) & words_CT3DXYS_6_4 & CCRW { setCCRW( words_CT3DXYS_6_4 ); } :TFR words_T3DXYS_6_4, words_T2DXYS_2_0 is Prefix18=0 & ( op8=0xB7 ); ( # The case "57" is covered by TXS # The case "67" is covered by TYS # The case "75" is covered by TSX # The case "76" is covered by TSY ( rows3_0=0x3 & ( columns7_4=0x3 | columns7_4=0x4 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) | ( rows3_0=0x4 & ( columns7_4=0x3 | columns7_4=0x4 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) | ( rows3_0=0x5 & ( columns7_4=0x3 | columns7_4=0x4 | columns7_4=0x5 | columns7_4=0x6 ) ) | ( rows3_0=0x6 & ( columns7_4=0x3 | columns7_4=0x4 | columns7_4=0x5 | columns7_4=0x6 ) ) | ( rows3_0=0x7 & ( columns7_4=0x3 | columns7_4=0x4 | columns7_4=0x7 ) ) | ( rows3_0=0xB & ( columns7_4=0x3 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) | ( rows3_0=0xC & ( columns7_4=0x4 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) | ( rows3_0=0xD & ( columns7_4=0x3 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) | ( rows3_0=0xE & ( columns7_4=0x3 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) | ( rows3_0=0xF & ( columns7_4=0x3 | columns7_4=0x4 | columns7_4=0x5 | columns7_4=0x6 | columns7_4=0x7 ) ) ) & words_T3DXYS_6_4 & words_T2DXYS_2_0 { words_T2DXYS_2_0 = words_T3DXYS_6_4; } :TFR D, TMP1 is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xB & ( columns7_4=0x4 ) ) ) & D & TMP1 { TMP1 = D; } :TFR TMP1, D is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xC & ( columns7_4=0x3 ) ) ) & TMP1 & D { D = TMP1; } :TFR CCRW, words_T2DXYS_2_0 is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xB & ( columns7_4=0x2 ) ) | ( rows3_0=0xC & ( columns7_4=0x2 ) ) | ( rows3_0=0xD & ( columns7_4=0x2 ) ) | ( rows3_0=0xE & ( columns7_4=0x2 ) ) | ( rows3_0=0xF & ( columns7_4=0x2 ) ) ) & CCRW & words_T2DXYS_2_0 { words_T2DXYS_2_0 = CCRW; } :TFR A, bytes_T2h_XhYhSh_2_0 is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xB & ( columns7_4=0x0 ) ) | ( rows3_0=0xD & ( columns7_4=0x0 ) ) | ( rows3_0=0xE & ( columns7_4=0x0 ) ) | ( rows3_0=0xF & ( columns7_4=0x0 ) ) ) & A & bytes_T2h_XhYhSh_2_0 { bytes_T2h_XhYhSh_2_0 = A; } :TFR A, bytes_T2l_XlYlSl_2_0 is Prefix18=0 & ( op8=0xB7 ); ( ( rows3_0=0xB & ( columns7_4=0x1 ) ) | ( rows3_0=0xD & ( columns7_4=0x1 ) ) | ( rows3_0=0xE & ( columns7_4=0x1 ) ) | ( rows3_0=0xF & ( columns7_4=0x1 ) ) ) & A & bytes_T2l_XlYlSl_2_0 { bytes_T2l_XlYlSl_2_0 = A; } :TPA is Prefix18=0 & op16=0xB720 { A = CCR; } # TODO Not working properly with context regis ter for Prefix18 :TRAP trapnum is Prefix18=1 & op8=0x30 & trapnum { tmp:2 = inst_next; Push2( tmp ); Push2( IY ); Push2( IX ); Push1( A ); Push1( B ); Push1( CCR ); $(I) = 1; addr:2 = $(VECTOR_TRAP); call [addr]; } :TST opr16a_8 is Prefix18=0 & (op8=0xF7); opr16a_8 { op1:1 = opr16a_8; $(Z) = (op1 == 0); $(N) = (op1 s< 0); V_equals_0(); $(C) = 0; } :TST indexed1_5 is Prefix18=0 & (op8=0xE7); indexed1_5 { op1:1 = indexed1_5; $(Z) = (op1 == 0); $(N) = (op1 s< 0); V_equals_0(); $(C) = 0; } :TSTA is Prefix18=0 & op8=0x97 { $(Z) = (A == 0); $(N) = (A s< 0); V_equals_0(); $(C) = 0; } :TSTB is Prefix18=0 & op8=0xD7 { $(Z) = (B == 0); $(N) = (B s< 0); V_equals_0(); $(C) = 0; } @if defined(HCS12X) :TSTW opr16a_16 is Prefix18=1 & (op8=0xF7); opr16a_16 { op1:2 = opr16a_16; $(Z) = (op1 == 0); $(N) = (op1 s< 0); V_equals_0(); $(C) = 0; } @endif @if defined(HCS12X) :TSTW indexed2_5 is Prefix18=1 & (op8=0xE7); indexed2_5 { op1:2 = indexed2_5; $(Z) = (op1 == 0); $(N) = (op1 s< 0); V_equals_0(); $(C) = 0; } @endif :TSTX is Prefix18=1 & op8=0x97 { $(Z) = (IX == 0); $(N) = (IX s< 0); V_equals_0(); } :TSTY is Prefix18=1 & op8=0xD7 { $(Z) = (IY == 0); $(N) = (IY s< 0); V_equals_0(); } :TSX is Prefix18=0 & op16=0xB775 { IX = SP; } :TSY is Prefix18=0 & op16=0xB776 { IY = SP; } :TXS is Prefix18=0 & op16=0xB757 { SP = IX; } :TYS is Prefix18=0 & op16=0xB767 { SP = IY; } :WAI is Prefix18=0 & op8=0x3E { tmp:2 = inst_next; Push2( tmp ); Push2( IY ); Push2( IX ); Push1( A ); Push1( B ); Push1( CCR ); WaitForInterrupt(); } :WAV is Prefix18=1 & op8=0x3C { tempIY:2 = WeightedAverageSOPHigh(B, IY, IX); tempD:2 = WeightedAverageSOPLow (B, IY, IX); tempIX:2 = WeightedAverageSOW(B, IY, IX); B = 0; IY = tempIY; D = tempD; IX = tempIX; } :WAVR is Prefix18=0 & op8=0x3C { WeightedAverageResume(); } :XGDX is Prefix18=0 & op16=0xB7C5 { tmp:2 = IX; IX = D; D = tmp; } :XGDY is Prefix18=0 & op16=0xB7C6 { tmp:2 = IY; IY = D; D = tmp; } } # End with : XGATE=0 ================================================ FILE: pypcode/processors/HCS12/data/languages/XGATE.sinc ================================================ # sleigh specification file for XGATE MCU peripheral co-processor ################################################################ # Registers ################################################################ # register R0 always contains the value 0 define register offset=0x100 size=2 [R0 R1 R2 R3 R4 R5 R6 R7]; define register offset=0x100 size=1 [R0.H R0.L R1.H R1.L R2.H R2.L R3.H R3.L R4.H R4.L R5.H R5.L R6.H R6.L R7.H R7.L]; define register offset=0x110 size=2 [XPC XCCR]; define register offset=0x120 size=1 [XC XV XZ XN]; # Individual status bits within the XCCR @define XN "XN" # XCCR[3,1] # Negative Flag @define XZ "XZ" # XCCR[2,1] # Zero Flag @define XV "XV" # XCCR[1,1] # Overflow Flag @define XC "XC" # XCCR[0,1] # Carry Flag ################################################################ # Tokens ################################################################ define token XOpWord16 (16) xop16 = (0,15) opcode = (11,15) reg8 = (8,10) reg8_lo = (8,10) reg8_hi = (8,10) imm3 = (8,10) op9_10 = (9,10) bit_10 = (10,10) immrel9 = (0,9) signed immrel8 = (0,8) signed xop8 = (0,7) reg5 = (5,7) ximm4 = (4,7) ximm8 = (0,7) op4 = (0,4) op3 = (0,3) offs5 = (0,5) reg2 = (2,4) op2 = (0,1) ; ################################################################ # Attach variables ################################################################ attach variables [reg8 reg5 reg2] [R0 R1 R2 R3 R4 R5 R6 R7]; attach variables [reg8_lo ] [R0.L R1.L R2.L R3.L R4.L R5.L R6.L R7.L]; attach variables [reg8_hi ] [R0.H R1.H R2.H R3.H R4.H R5.H R6.H R7.H]; ################################################################ # Pseudo Instructions ################################################################ define pcodeop leftShiftCarry; define pcodeop rightShiftCarry; define pcodeop parity; define pcodeop clearSemaphore; define pcodeop setSemaphore; define pcodeop setInterruptFlag; define pcodeop TerminateThread; ################################################################ # Macros Instructions ################################################################ macro default_flags(result) { $(XZ) = (result == 0); $(XN) = (result s< 0); $(XV) = 0; #$(XC) not affected } macro addition_flags(operand1, operand2, result) { $(XN) = (result s< 0); $(XZ) = ((result == 0) & ($(XZ)==1)); $(XV) = (((operand1 & operand2 & ~result) | (~operand1 & ~operand2 & result)) & 0x8000) != 0; $(XC) = (((operand1 & operand2) | (operand2 & ~result) | (~result & operand1)) & 0x8000) != 0; } macro subtraction_flags(register, operand, result) { $(XN) = (result s< 0); $(XZ) = (result == 0); $(XV) = ( ((register & ~operand & ~result) | (~register & operand & result)) & 0x8000 ) != 0; $(XC) = ( ((~register & operand) | (operand & result) | (~register & result)) & 0x8000 ) != 0; } macro subtraction_flagsB(register, operand, result) { $(XN) = (result s< 0); $(XZ) = (result == 0); $(XV) = ( ((register & ~operand & ~result) | (~register & operand & result)) & 0x80 ) != 0; $(XC) = ( ((~register & operand) | (operand & result) | (result & ~register)) & 0x80 ) != 0; } macro subtraction_flagsC(register, operand, result) { $(XN) = (result s< 0); $(XZ) = ( (result == 0) & ($(XZ) == 1)); $(XV) = ( ((register & ~operand & ~result) | (~register & operand & result)) & 0x8000 ) != 0; $(XC) = ( ((~register & operand) | (operand & result) | (~register & result)) & 0x8000 ) != 0; } macro shiftFlags(result,old) { $(XN) = (result s< 0); $(XZ) = (result == 0); tmp:2 = (old >> 15) ^ (result >> 15); $(XV) = tmp(1); } macro getbit(res,in,bitnum) { res = ((in >> bitnum) & 1) != 0; } # # computes a fake PPAGE page mapping based on the 16 bit input address # The XGATE memory is mapped to the pages of physical memory # Warning: This might not be the correct mapping on all XGATE processors # # 0000-07ff = 0x00_0000 - 0x00_07ff # 0800-7fff = 0x78_0800 - XGFLASH_HIGH # 8000-ffff = 0x0f_0800 - 0x0f_ffff # macro computePage(addr) { local isReg:1 = addr < 0x800; local isFlash:1 = addr >= 0x800 & addr < 0x7fff; local isRam:1 = addr >= 0x8000; physPage = (zext(isReg) * 0x0)+ (zext(isFlash) * (0x78 << 16)) + (zext(isRam) * (0xf<<16)); } ################################################################ # Constructors ################################################################ #rel9 defined in HCS_HC12.sinc # range -256 through +255 with : XGATE=1 { rel9: reloc is immrel8 [ reloc = inst_next + (immrel8 * 2); ] { export *:1 reloc; } # range -512 through +512 rel10: reloc is immrel9 [ reloc = inst_next + (immrel9 * 2); ] { export *:1 reloc; } rd : reg8 is reg8 { export reg8; } rs1: reg5 is reg5 & reg5=0 { export 0:2; } rs1: reg5 is reg5 { export reg5; } rs2: reg2 is reg2 & reg2=0 { export 0:2; } rs2: reg2 is reg2 { export reg2; } rd_lo: reg8 is reg8 & reg8_lo { export reg8_lo; } rd_hi: reg8 is reg8 & reg8_hi { export reg8_hi; } # Add with carry :ADC rd, rs1, rs2 is opcode=0x3 & rd & rs1 & rs2 & op2=0x3 { local result:2 = rs1 + rs2 + zext($(XC)); rd = result; addition_flags(rs1, rs2, result); } # Add without carry :ADD rd, rs1, rs2 is opcode=0x3 & rd & rs1 & rs2 & op2=0x2 { local result:2 = rs1 + rs2; rd = result; addition_flags(rs1, rs2, result); } # Add immediate 8-bit constant (high byte) :ADDH rd, ximm8 is opcode=0x1d & rd & ximm8 { local val:2 = ximm8 << 8; local result:2 = rd + val; addition_flags(rd, val, result); rd = result; } # Add immediate 8-bit constant (low byte) :ADDL rd, ximm8 is opcode=0x1c & rd & ximm8 { local result:2 = rd + ximm8; $(XN) = (result s< 0); $(XZ) = ((result == 0) & ($(XZ)==1)); $(XV) = ((~rd & result) & 0x8000) != 0; $(XC) = ((rd & ~result) & 0x8000) != 0; rd = result; } # Logical AND :AND rd, rs1, rs2 is opcode=0x2 & rd & rs1 & rs2 & op2=0x0 { rd = rs1 & rs2; default_flags(rd); } # Logical AND immediate 8-bit constant (high byte) :ANDH rd, ximm8 is opcode=0x11 & rd & ximm8 & rd_hi { rd_hi = rd_hi & ximm8; default_flags(rd_hi); } # Logical AND immediate 8-bit constant (low byte) :ANDL rd, ximm8 is opcode=0x10 & rd & ximm8 & rd_lo { rd_lo = rd_lo & ximm8; default_flags(rd_lo); } # Arithmetic Shift Right :ASR rd, ximm4 is opcode=0x1 & rd & ximm4 & op3=0x9 { getbit($(XC), rd, ximm4-1); rd = rd s>> ximm4; default_flags(rd); } :ASR rd, rs1 is opcode=0x1 & rd & rs1 & op4=0x11 { getbit($(XC), rd, rs1-1); rd = rd s>> rs1; default_flags(rd); } # Branch if Carry Cleared :BCC rel9 is opcode=0x4 & op9_10=0x0 & rel9 { if ($(XC) == 0) goto rel9; } # Branch if Carry Set :BCS rel9 is opcode=0x4 & op9_10=0x1 & rel9 { if ($(XC) == 1) goto rel9; } # Branch of Equal :BEQ rel9 is opcode=0x4 & op9_10=0x3 & rel9 { if ($(XZ) == 1) goto rel9; } # Bit Field Extract :BFEXT rd, rs1, rs2 is opcode=0xc & rd & rs1 & rs2 & op2=0x3 { local origin:2 = rs2 & 0xf; local width:2 = (rs2 >> 4) & 0xf; local mask:2 = (0xffff >> (16-(width + 1))) << origin; local result:2 = (rs1 & mask) >> origin; rd = result; default_flags(rd); } # Bit Field Find First One :BFFO rd, rs1 is opcode=0x1 & rd & rs1 & op4=0x10 { # 15 - count leading zeros tmp:2 = rs1; $(XC) = (rs1 == 0); rd = zext(tmp != 0) * (15 - lzcount(tmp)); default_flags(rd); } # Bit Field Insert :BFINS rd, rs1, rs2 is opcode=0xd & rd & rs1 & rs2 & op2=0x3 { local origin:2 = rs2 & 0xf; local width:2 = (rs2 >> 4) & 0xf; local mask:2 = (0xffff >> (16-(width + 1))) << origin; local result:2 = (rs1 & mask); rd = (rd & ~mask) | result; default_flags(rd); } # Bit Field Insert and Invert :BFINSI rd, rs1, rs2 is opcode=0xe & rd & rs1 & rs2 & op2=0x3 { local origin:2 = rs2 & 0xf; local width:2 = (rs2 >> 4) & 0xf; local mask:2 = (0xffff >> (16-(width + 1))) << origin; local result:2 = (~rs1 & mask); rd = (rd & ~mask) | result; default_flags(rd); } # Bit Field Insert and XNOR :BFINSX rd, rs1, rs2 is opcode=0xf & rd & rs1 & rs2 & op2=0x3 { local origin:2 = rs2 & 0xf; local width:2 = (rs2 >> 4) & 0xf; local mask:2 = (0xffff >> (16-(width + 1))) << origin; local result:2 = (~(rs1 ^ rd) & mask); rd = (rd & ~mask) | result; default_flags(rd); } # Branch if Greater than or Equal to Zero :BGE rel9 is opcode=0x6 & op9_10=0x2 & rel9 { if (($(XN) ^ $(XV)) == 0) goto rel9; } # Branch if Greater than Zero :BGT rel9 is opcode=0x7 & op9_10=0x0 & rel9 { if (($(XZ) | ($(XN) ^ $(XV))) == 0) goto rel9; } # Branch if Higher :BHI rel9 is opcode=0x6 & op9_10=0x0 & rel9 { if (($(XC) | $(XZ)) == 0) goto rel9; } #:BHS rel9 is opcode=0x4 & op9_10=0x0 & rel9 see BCC # Bit Test immediate 8-bit constant (high byte) :BITH rd, ximm8 is opcode=0x13 & rd & ximm8 & rd_hi { local val = rd_hi & ximm8; default_flags(val); } # Bit Test immediate 8-bit constant (low byte) :BITL reg8, ximm8 is opcode=0x12 & reg8 & ximm8 & rd_lo { local val = rd_lo & ximm8; default_flags(val); } # Branch if Less or Equal to Zero :BLE rel9 is opcode=0x7 & op9_10=0x1 & rel9 { if ($(XZ) | ($(XN) ^ $(XV))) goto rel9; } #:BLO rel9 is opcode=0x4 & op9_10=0x1 & rel9 See BCS # Branch if Lower or Same :BLS rel9 is opcode=0x6 & op9_10=0x1 & rel9 { if (($(XC) | $(XZ)) == 1) goto rel9; } # Branch of Lower than Zero :BLT rel9 is opcode=0x6 & op9_10=0x3 & rel9 { if (($(XN) ^ $(XV)) == 1) goto rel9; } # Branch if Minus :BMI rel9 is opcode=0x5 & op9_10=0x1 & rel9 { if ($(XN) == 1) goto rel9; } # Branch if Not Equal :BNE rel9 is opcode=0x4 & op9_10=0x2 & rel9 { if ($(XZ) == 0) goto rel9; } # Branch if Plus :BPL rel9 is opcode=0x5 & op9_10=0x0 & rel9 { if ($(XN) == 0) goto rel9; } # Branch Always :BRA rel10 is opcode=0x7 & bit_10=0x1 & rel10 { goto rel10; } # Break :BRK is xop16=0x0 { # put xgate into debug mode and set breakpoint goto inst_next; } # Branch if Overflow Cleared :BVC rel9 is opcode=0x5 & op9_10=0x2 & rel9 { if ($(XV) == 0) goto rel9; } # Branch if Overflow Set :BVS rel9 is opcode=0x5 & op9_10=0x3 & rel9 { if ($(XV) == 2) goto rel9; } # Compare # synonym for SUB R0, RS1, RS2 :CMP rs1, rs2 is opcode=0x3 & reg8=0x0 & rs1 & rs2 & op2=0x0 { tmp:2 = rs1 - rs2; subtraction_flags(rs1, rs2, tmp); } # Compare Immediate 8-bit constant (low byte) :CMPL rd, ximm8 is opcode=0x1a & rd & ximm8 { local val:1 = rd:1; local tmp:1 = val - ximm8; local xtmp:1 = ximm8; subtraction_flagsB(val, xtmp, tmp); } # One's Complement :COM rd, rs2 is opcode=0x2 & rd & reg5=0x0 & rs2 & op2=0x3 { local val:2 = ~rs2; rd = val; default_flags(rd); } :COM rd is opcode=0x2 & rd & reg5=0x0 & rs2 & reg8=reg2 & op2=0x3 { local val:2 = ~rs2; rd = val; default_flags(rd); } # Compare with Carry :CPC rs1, rs2 is opcode=0x3 & reg8=0x0 & rs1 & rs2 & op2=0x1 { local tmp:2 = rs1 - rs2 - zext($(XC)); subtraction_flags(rs1, rs2, tmp); } # Compare Immediate 8-bit constant with carry (high byte) :CPCH rd, ximm8 is opcode=0x1b & rd & ximm8 { local val:2 = rd >> 8; local tmp:1 = val(1) - ximm8 - $(XC); local xtmp:1 = ximm8; subtraction_flagsB(val(1), xtmp, tmp); } # Clear Semaphore :CSEM rd is opcode=0x0 & rd & xop8=0xf0 { # treat as NOP clearSemaphore(rd); } :CSEM imm3 is opcode=0x0 & imm3 & xop8=0xf1 { local sem:1 = imm3; clearSemaphore(sem); } # Logical Shift Left with Carry :CSL rd, ximm4 is opcode=0x1 & rd & ximm4 & op3=0xa { local Ctmp:2 = zext($(XC)); local shift:2 = ((ximm4-1)%16+1); local oldRd:2 = rd >> 15; getbit($(XC), rd, 16-shift); leftShiftCarry(rd,Ctmp,shift,rd); shiftFlags(rd,oldRd); } :CSL rd, rs1 is opcode=0x1 & rd & rs1 & op4=0x12 { local Ctmp:2 = zext($(XC)); #if rs1 > 16, then rs1 = 16 local rsgt:2 = zext(rs1>16); local rslt:2 = zext(rs1<16); local shift:2 = rs1*rsgt + 16*rslt; local oldRd:2 = rd >> 15; getbit($(XC), rd, 16-shift); leftShiftCarry(rd,Ctmp,shift,rd); shiftFlags(rd,oldRd); } # Logical Shift Right with Carry :CSR rd, ximm4 is opcode=0x1 & rd & ximm4 & op3=0xb { local Ctmp:2 = zext($(XC)); local shift:2 = ((ximm4-1)%16+1); local oldRd:2 = rd >> 15; getbit($(XC), rd, shift-1); rightShiftCarry(rd,Ctmp,shift,rd); shiftFlags(rd,oldRd); } :CSR rd, rs1 is opcode=0x1 & rd & rs1 & op4=0x13 { local Ctmp:2 = zext($(XC)); #if rs1 > 16, then rs1 = 16 local rsgt:2 = zext(rs1>16); local rslt:2 = zext(rs1<16); local shift:2 = rs1*rsgt + 16*rslt; local oldRd:2 = rd >> 15; getbit($(XC), rd, shift-1); rightShiftCarry(rd,Ctmp,shift,rd); shiftFlags(rd,oldRd); } :CSR rd, rs1 is opcode=0x1 & rd & rs1 & reg5=0 & op4=0x13 { $(XN) = (rd s< 0); $(XZ) = (rd == 0); $(XV) = 0; # $(XC) is unaffected } # Jump and Link :JAL rd is opcode=0x0 & rd & xop8=0xf6 { local dest:2 = rd; rd = inst_next; call [dest]; } # Load byte from memory (low byte) :LDB rd, (rs1, offs5) is opcode=0x8 & rd & rs1 & offs5 { local addr = rs1 + offs5; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:1 = *:1 (dst); rd = (rd & 0xff00) | zext(val); } :LDB rd, (rs1, rs2) is opcode=0xc & rd & rs1 & rs2 & op2=0x0 { local addr = rs1 + rs2; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:1 = *:1 (dst); rd = (rd & 0xff00) | zext(val); } :LDB rd, (rs1, rs2+) is opcode=0xc & rd & rs1 & rs2 & op2=0x1 { local addr = rs1 + rs2; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:1 = *:1 (dst); rd = (rd & 0xff00) | zext(val); rs1 = rs1 + 1; } :LDB rd, (rs1, -rs2) is opcode=0xc & rd & rs1 & rs2 & op2=0x2 { rs2 = rs2 - 1; local addr = rs1 + rs2; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:1 = *:1 (dst); rd = (rd & 0xff00) | zext(val); } # Load Immediate 8-bit constant (high byte) :LDH rd, ximm8 is opcode=0x1f & rd & ximm8 & rd_hi { rd_hi = ximm8; } # Load Immediate 8-bit constant (low byte) :LDL rd, ximm8 is opcode=0x1e & rd & ximm8 { rd = ximm8; } # Load Word from Memory :LDW rd, (rs1, offs5) is opcode=0x9 & rd & rs1 & offs5 { local addr = rs1 + offs5; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:2 = *:2 (dst); rd = val; } :LDW rd, (rs1, rs2) is opcode=0xd & rd & rs1 & rs2 & op2=0x0 { local addr = rs1 + rs2; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:2 = *:2 (dst); rd = val; } :LDW rd, (rs1, rs2+) is opcode=0xd & rd & rs1 & rs2 & op2=0x1 { local addr = rs1 + rs2; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:2 = *:2 (dst); rd = val; rs1 = rs1 + 2; } :LDW rd, (rs1, -rs2) is opcode=0xd & rd & rs1 & rs2 & op2=0x2 { rs2 = rs2 - 2; local addr = rs1 + rs2; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:2 = *:2 (dst); rd = val; } # Logical Shift Left :LSL rd, ximm4 is opcode=0x1 & rd & ximm4 & op3=0xc { local shift:2 = ((ximm4-1)%16+1); getbit($(XC), rd, 16-shift); local oldRd:2 = rd >> 15; rd = rd << shift; shiftFlags(rd,oldRd); } :LSL rd, rs1 is opcode=0x1 & rd & rs1 & op4=0x14 { getbit($(XC), rd, 16-rs1); local oldRd:2 = rd >> 15; rd = rd << rs1; shiftFlags(rd,oldRd); } # Logical Shift Right :LSR rd, ximm4 is opcode=0x1 & rd & ximm4 & op3=0xd { getbit($(XC), rd, ximm4-1); local oldRd:2 = rd >> 15; rd = rd >> ximm4; shiftFlags(rd,oldRd); } :LSR rd, rs1 is opcode=0x1 & rd & rs1 & op4=0x15 { getbit($(XC), rd, rs1-1); local oldRd:2 = (rd >> 15); rd = rd >> rs1; shiftFlags(rd,oldRd); } # Move Register Content # Synonym for OR RD, R0, RS :MOV rd, rs2 is opcode=0x2 & rd & reg5=0 & rs2 & op2=0x2 { rd = rs2; default_flags(rd); } # Two's Complement :NEG rd, rs2 is opcode=0x3 & rd & reg8!=0 & reg5=0x0 & rs2 & op2=0x0 { local tmp:2 = -rs2; rd = tmp; $(XN) = (rd s< 0); $(XZ) = (rd == 0); $(XV) = (((rs2 & rd) & 0x8000) != 0); $(XC) = (((rs2 | rd) & 0x8000) != 0); } :NEG rd is opcode=0x3 & rd & reg5=0x0 & rs2 & reg2=reg8 & op2=0x0 { local tmp:2 = -rs2; rd = tmp; $(XN) = (rd s< 0); $(XZ) = (rd == 0); $(XV) = (((rs2 & rd) & 0x8000) != 0); $(XC) = (((rs2 | rd) & 0x8000) != 0); } # No Op :NOP is xop16=0x100 {} # Logical OR :OR rd, rs1, rs2 is opcode=0x2 & rd & rs1 & rs2 & op2=0x2 { local result:2 = rs1 | rs2; rd = result; default_flags(result); } # Logical OR Immediate 8-bit Constant (high byte) :ORH rd, ximm8 is opcode=0x15 & rd & ximm8 & rd_hi { rd_hi = rd_hi | ximm8; default_flags(rd_hi); } # Logical OR Immediate 8-bit Constant (low byte) :ORL rd, ximm8 is opcode=0x14 & rd & ximm8 & rd_lo { rd_lo = rd_lo | ximm8; default_flags(rd_lo); } # Calculate Parity :PAR rd is opcode=0x0 & rd & xop8=0xf5 { parity(rd, $(XC)); default_flags(rd); } # Rotate Left :ROL rd, ximm4 is opcode=0x1 & rd & ximm4 & op3=0xe { local cnt:2 = ximm4; rd = (rd << cnt) | (rd >> (16 - cnt)); default_flags(rd); } :ROL rd, rs1 is opcode=0x1 & rd & rs1 & op4=0x16 { local cnt:2 = rs1 & 0xf; rd = (rd << cnt) | (rd >> (16 - cnt)); default_flags(rd); } # Rotate Right :ROR rd, ximm4 is opcode=0x1 & rd & ximm4 & op3=0xf { local cnt:2 = ximm4; rd = (rd >> cnt) | (rd << (16 - cnt)); default_flags(rd); } :ROR rd, rs1 is opcode=0x1 & rd & rs1 & op4=0x17 { local cnt:2 = rs1 & 0xf; rd = (rd >> cnt) | (rd << (16 - cnt)); default_flags(rd); } # Return to Scheduler # Implement as NOP for now :RTS is xop16=0x0200 { XPC = TerminateThread(); return [XPC]; } # Subtract with Carry :SBC rd, rs1, rs2 is opcode=0x3 & rd & rs1 & rs2 & op2=0x1 { local result:2 = rs1 - rs2 - zext($(XC)); rd = result; subtraction_flagsC(rs1, rs2, result); } # Sign Extent Byte to Word :SEX rd is opcode=0x0 & rd & xop8=0xf4 { local result:1 = rd:1 & 0xff; rd = sext(result); default_flags(rd); } # Set Interrupt Flag # TODO: implement interrupt flags :SIF is xop16=0x0300 { setInterruptFlag(); } :SIF rd is opcode=0x0 & rd & xop8=0xf7 { setInterruptFlag(); } # Set Semaphore # TODO: implement semaphores :SSEM imm3 is opcode=0x0 & imm3 & xop8=0xf2 { local sem:1 = imm3; setSemaphore(sem); } :SSEM rd is opcode=0x0 & rd & xop8=0xf3 { setSemaphore(rd); } # Store Byte to Memory (low byte) :STB rd, (rs1, offs5) is opcode=0xa & rd & rs1 & offs5 { local addr = rs1 + offs5; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:1 = rd:1; *dst = val; } :STB rd, (rs1, rs2) is opcode=0xe & rd & rs1 & rs2 & op2=0x0 { local addr = rs1 + rs2; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:1 = rd:1; *dst = val; } :STB rd, (rs1, rs2+) is opcode=0xe & rd & rs1 & rs2 & op2=0x1 { local addr = rs1 + rs2; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:1 = rd:1; *dst = val; rs2 = rs2 + 1; } :STB rd, (rs1, -rs2) is opcode=0xe & rd & rs1 & rs2 & op2=0x2 { rs2 = rs2 - 1; local addr = rs1 + rs2; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:1 = rd:1; *dst = val; } # Store Word to Memory :STW rd, (rs1, offs5) is opcode=0xb & rd & rs1 & offs5 { local addr = rs1 + offs5; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:2 = rd; *dst = val; } :STW rd, (rs1, rs2) is opcode=0xf & rd & rs1 & rs2 & op2=0x0 { local addr = rs1 + rs2; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:2 = rd; *dst = val; rs2 = rs2 + 1; } :STW rd, (rs1, rs2+) is opcode=0xf & rd & rs1 & rs2 & op2=0x1 { local addr = rs1 + rs2; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:2 = rd; *dst = val; rs2 = rs2 + 2; } :STW rd, (rs1, -rs2) is opcode=0xf & rd & rs1 & rs2 & op2=0x2 { rs2 = rs2 - 2; local addr = rs1 + rs2; computePage(addr); local dst:3 = segment(PPAGE,addr); local val:2 = rd; *dst = val; } # Subtract without Carry :SUB rd, rs1, rs2 is opcode=0x3 & rd & rs1 & rs2 & op2=0x0 { local result:2 = rs1 - rs2; rd = result; subtraction_flags(rs1, rs2, result); } # Subtract Immediate 8-bit constant (high byte) :SUBH rd, ximm8 is opcode=0x19 & rd & ximm8 { local val:2 = ximm8 << 8; local result:2 = rd - val; subtraction_flags(rd, val, result); rd = result; } # Subtract Immediate 8-bit constant (low byte) :SUBL rd, ximm8 is opcode=0x18 & rd & ximm8 { local val:2 = ximm8; local result:2 = rd - val; $(XN) = (result s< 0); $(XZ) = ((result == 0) & ($(XZ)==1)); $(XV) = ((~rd & result) & 0x8000) != 0; $(XC) = ((rd & ~result) & 0x8000) != 0; rd = result; } # Transfer from and to Special Registers :TFR rd, XCCR is opcode=0x0 & rd & xop8=0xf8 & XCCR { local val:1 = ((($(XN) << 1) | $(XZ) << 1) | $(XV) << 1) | $(XC); rd = zext(val); } :TFR XCCR, rd is opcode=0x0 & rd & xop8=0xf9 & XCCR { XCCR = rd & 0xf; $(XN) = rd[3,1]; $(XZ) = rd[2,1]; $(XV) = rd[1,1]; $(XC) = rd[0,1]; } :TFR rd, XPC is opcode=0x0 & rd & xop8=0xfa & XPC { rd = inst_next + 2; } # Test Register # Synonym for SUB R0, RS, R0 :TST rs1 is opcode=0x3 & reg8=0x0 & rs1 & reg2=0x0 & op2=0x0 { local result:2 = rs1; subtraction_flags(rs1,0,result); } # Logical Exclusive NOR :XNOR rd, rs1, rs2 is opcode=0x2 & rd & rs1 & rs2 & op2=0x3 { local result:2 = ~(rs1 ^ rs2); rd = result; default_flags(result); } # Logical Exclusive NOR Immediate 8-bit constant (high byte) :XNORH rd, ximm8 is opcode=0x17 & rd & ximm8 & rd_hi { rd_hi = ~(rd_hi ^ ximm8); default_flags(rd_hi); } # Logical Exclusive NOR Immediate 8-bit constant (low byte) :XNORL rd, ximm8 is opcode=0x16 & rd & ximm8 & rd_lo { rd_lo= ~(rd_lo^ ximm8); default_flags(rd_lo); } } ================================================ FILE: pypcode/processors/HCS12/data/manuals/HCS12.idx ================================================ @S12XCPUV2.pdf[ CPU12/CPU12X Reference Manual, Rev. v01.04 21 Apr. 2016, nxp.com ] ABA, 84 ABX, 85 ABY, 86 ADCA, 87 ADCB, 88 ADDA, 89 ADDB, 90 ADDD, 91 ADDX, 92 ADDY, 93 ADED, 94 ADEX, 95 ADEY, 96 ANDA, 97 ANDB, 98 ANDCC, 99 ANDX, 100 ANDY, 101 ASL, 102 ASLA, 103 ASLB, 104 ASLD, 105 ASLW, 106 ASLX, 107 ASLY, 108 ASR, 109 ASRA, 110 ASRB, 111 ASRW, 112 ASRX, 113 ASRY, 114 BCC, 115 BCLR, 116 BCS, 117 BEQ, 118 BGE, 119 BGND, 120 BGT, 121 BHI, 122 BHS, 123 BITA, 124 BITB, 125 BITX, 126 BITY, 127 BLE, 128 BLO, 129 BLS, 130 BLT, 131 BMI, 132 BNE, 133 BPL, 134 BRA, 135 BRCLR, 136 BRN, 137 BRSET, 138 BSET, 139 BSR, 140 BTAS, 141 BVC, 142 BVS, 143 CALL, 144 CBA, 145 CLC, 146 CLI, 147 CLR, 148 CLRA, 149 CLRB, 150 CLRW, 151 CLRX, 152 CLRY, 153 CLV, 154 CMPA, 155 CMPB, 156 COM, 157 COMA, 158 COMB, 159 COMW, 160 COMX, 161 COMY, 162 CPD, 163 CPED, 164 CPES, 165 CPEX, 166 CPEY, 167 CPS, 168 CPX, 169 CPY, 170 DAA, 171 DBEQ, 172 DBNE, 173 DEC, 174 DECA, 175 DECB, 176 DECW, 177 DECX, 178 DECY, 179 DES, 180 DEX, 181 DEY, 182 EDIV, 183 EDIVS, 184 EMACS, 185 EMAXD, 186 EMAXM, 187 EMIND, 188 EMINM, 189 EMUL, 190 EMULS, 191 EORA, 192 EORB, 193 EORX, 194 EORY, 195 ETBL, 196 EXG, 197 FDIV, 199 GLDAA, 200 GLDAB, 201 GLDD, 202 GLDS, 203 GLDX, 204 GLDY, 205 GSTAA, 206 GSTAB, 207 GSTD, 208 GSTS, 209 GSTX, 210 GSTY, 211 IBEQ, 212 IBNE, 213 IDIV, 214 IDIVS, 215 INC, 216 INCA, 217 INCB, 218 INCW, 219 INCX, 220 INCY, 221 INS, 222 INX, 223 INY, 224 JMP, 225 JSR, 226 LBCC, 227 LBCS, 228 LBEQ, 229 LBGE, 230 LBGT, 231 LBHI, 232 LBHS, 233 LBLE, 234 LBLO, 235 LBLS, 236 LBLT, 237 LBMI, 238 LBNE, 239 LBPL, 240 LBRA, 241 LBRN, 242 LBVC, 243 LBVS, 244 LDAA, 245 LDAB, 246 LDD, 247 LDS, 248 LDX, 249 LDY, 250 LEAS, 251 LEAX, 252 LEAY, 253 LSL, 254 LSLA, 255 LSLB, 256 LSLD, 257 LSLW, 258 LSLX, 259 LSLY, 260 LSR, 261 LSRA, 262 LSRB, 263 LSRD, 264 LSRW, 265 LSRX, 266 LSRY, 267 MAXA, 268 MAXM, 269 MEM, 270 MINA, 271 MINM, 272 MOVB, 273 MOVW, 280 MUL, 287 NEG, 288 NEGA, 289 NEGB, 290 NEGW, 291 NEGX, 292 NEGY, 293 NOP, 294 ORAA, 295 ORAB, 296 ORCC, 297 ORX, 298 ORY, 299 PSHA, 300 PSHB, 301 PSHC, 302 PSHCW, 303 PSHD, 304 PSHX, 305 PSHY, 306 PULA, 307 PULB, 308 PULC, 309 PULCW, 310 PULD, 311 PULX, 312 PULY, 313 REV, 314 REVW, 316 ROL, 318 ROLA, 319 ROLB, 320 ROLW, 321 ROLX, 322 ROLY, 323 ROR, 324 RORA, 325 RORB, 326 RORW, 327 RORX, 328 RORY, 329 RTC, 330 RTI, 331 RTS, 332 SBA, 333 SBCA, 334 SBCB, 335 SBED, 336 SBEX, 337 SBEY, 338 SEC, 339 SEI, 340 SEV, 341 SEX, 342 STAA, 343 STAB, 344 STD, 345 STOP, 346 STS, 348 STX, 349 STY, 350 SUBA, 351 SUBB, 352 SUBD, 353 SUBX, 354 SUBY, 355 SWI, 356 SYS, 357 TAB, 358 TAP, 359 TBA, 360 TBEQ, 361 TBL, 362 TBNE, 363 TFR, 364 TPA, 366 TRAP, 367 TST, 368 TSTA, 369 TSTB, 370 TSTW, 371 TSTX, 372 TSTY, 373 TSX, 374 TSY, 375 TXS, 376 TYS, 377 WAI, 378 WAV, 379 XGDX, 380 XGDY, 381 @MC9S12XEP100RMV1.pdf[ MC9S12XEP100 Reference Manual, Rev. 1.25 02/2013, nxp.com ] ADC, 389 ADD, 390 ADDH, 391 ADDL, 392 AND, 393 ANDH, 394 ANDL, 395 ASR, 396 BCC, 397 BCS, 398 BEQ, 399 BFEXT, 400 BFFO, 401 BFINS, 402 BFINSI, 403 BFINSX, 404 BGE, 405 BGT, 406 BHI, 407 BHS, 408 BITH, 409 BITL, 410 BLE, 411 BLO, 412 BLS, 413 BLT, 414 BMI, 415 BNE, 416 BPL, 417 BRA, 418 BRK, 419 BVC, 420 BVS, 421 CMP, 422 CMPL, 423 COM, 424 CPC, 425 CPCH, 426 CSEM, 427 CSL, 428 CSR, 429 JAL, 430 LDB, 431 LDH, 432 LDL, 433 LDW, 434 LSL, 435 LSR, 436 MOV, 437 NEG, 438 NOP, 439 OR, 440 ORH, 441 ORL, 442 PAR, 443 ROL, 444 ROR, 445 RTS, 446 SBC, 447 SEX, 448 SIF, 449 SSEM, 450 STB, 451 STW, 452 SUB, 453 SUBH, 454 SUBL, 455 TFR, 456 TST, 457 XNOR, 458 XNORH, 459 XNORL, 460 ================================================ FILE: pypcode/processors/JVM/data/languages/JVM.cspec ================================================ ================================================ FILE: pypcode/processors/JVM/data/languages/JVM.ldefs ================================================ Generic JVM ================================================ FILE: pypcode/processors/JVM/data/languages/JVM.opinion ================================================ ================================================ FILE: pypcode/processors/JVM/data/languages/JVM.pspec ================================================ ================================================ FILE: pypcode/processors/JVM/data/languages/JVM.slaspec ================================================ # Stack Convention: # 1) Stack grows to smaller addresses (Subtract to push) # 2) Stack pointer points to a valid item on the stack # A Java "word" can hold a boolean, byte, short, float, int, reference, or returnAddress. # Two "words" hold a long or a double @define SIZE "4" # Number of bytes in the Java Word @define DOUBLE_SIZE "8" @define INT_SUPPORT "1" ############################################################################### # Basic Definitions ############################################################################### define endian=big; define alignment=1; define space ram type=ram_space size=$(SIZE) default; define space register type=register_space size=$(SIZE); define space constantPool type=ram_space size=$(SIZE); define space localVariableArray type=ram_space size=$(SIZE); define space parameterSpace type=ram_space size=$(SIZE); define register offset = 0x0 size=$(DOUBLE_SIZE) [cat2_return_value]; define register offset = 0x0 size=$(SIZE) [_ return_value SP PC switch_target return_address call_target LVA]; #define register offset = 0x0 size=$(DOUBLE_SIZE) [cat2_return_value]; #define register offset = 0x8 size=$(SIZE) [return_value SP PC switch_target return_address call_target LVA]; define register offset=0x100 size=16 [ switch_ctrl ]; @define CPOOL_ANEWARRAY "0:4" @define CPOOL_CHECKCAST "1:4" @define CPOOL_GETFIELD "2:4" @define CPOOL_GETSTATIC "3:4" #also used for ldc_w @define CPOOL_LDC "4:4" @define CPOOL_LDC2_W "5:4" @define CPOOL_INSTANCEOF "6:4" @define CPOOL_INVOKEDYNAMIC "7:4" @define CPOOL_INVOKEINTERFACE "8:4" @define CPOOL_INVOKESPECIAL "9:4" @define CPOOL_INVOKESTATIC "10:4" @define CPOOL_INVOKEVIRTUAL "11:4" @define CPOOL_MULTIANEWARRAY "12:4" @define CPOOL_NEW "13:4" @define CPOOL_NEWARRAY "14:4" @define CPOOL_PUTSTATIC "15:4" @define CPOOL_PUTFIELD "16:4" @define CPOOL_ARRAYLENGTH "17:4" # #defined ops ending in "CallOther" are used for pcode injection # define pcodeop getFieldCallOther; define pcodeop getStaticCallOther; define pcodeop ldcCallOther; define pcodeop ldc_wCallOther; define pcodeop ldc2_wCallOther; define pcodeop invokedynamicCallOther; define pcodeop invokeinterfaceCallOther; define pcodeop invokespecialCallOther; define pcodeop invokestaticCallOther; define pcodeop invokevirtualCallOther; define pcodeop multianewarrayCallOther; define pcodeop putStaticCallOther; define pcodeop putFieldCallOther; # # defined ops ending in "Op" are black-box instructions # define pcodeop athrowOp; define pcodeop checkcastOp; define pcodeop dremOp; define pcodeop fremOp; define pcodeop monitorenterOp; define pcodeop monitorexitOp; define pcodeop multianewarrayOp; define pcodeop multianewarrayProcessAdditionalDimensionsOp; define pcodeop throwExceptionOp; ############################################################################### # Context ############################################################################### define context switch_ctrl switch_low = (0,31) noflow switch_high = (32,63) noflow switch_num = (64,95) noflow in_table_switch = (96,97) noflow in_lookup_switch = (98,99) noflow alignmentPad = (100,101) noflow padVal = (100,101) noflow ; ############################################################################### # TOKENS ############################################################################### define token opcode (8) op = (0,7) ; define token w_opcode (16) w_op = (0,15) ; define token data8 (8) n = (0,3) m = (4,7) atype = (0,7) byte = (0,7) byte1 = (0,7) byte2 = (0,7) byte3 = (0,7) byte4 = (0,7) sbyte = (0,7) signed branch = (0,7) signed branchbyte1 = (0,7) signed branchbyte2 = (0,7) branchbyte3 = (0,7) branchbyte4 = (0,7) index = (0,7) indexbyte1 = (0,7) indexbyte2 = (0,7) constant = (0,7) constantbyte1= (0,7) constantbyte2= (0,7) nargs = (0,7) method = (0,7) defaultbyte1 = (0,7) defaultbyte2 = (0,7) defaultbyte3 = (0,7) defaultbyte4 = (0,7) highbyte1 = (0,7) highbyte2 = (0,7) highbyte3 = (0,7) highbyte4 = (0,7) lowbyte1 = (0,7) lowbyte2 = (0,7) lowbyte3 = (0,7) lowbyte4 = (0,7) npairsbyte1 = (0,7) npairsbyte2 = (0,7) npairsbyte3 = (0,7) npairsbyte4 = (0,7) dimensions = (0,7) blank1 = (0,7) blank2 = (0,7) count = (0,7) pad = (0,7) pad1 = (0,7) pad2 = (0,7) pad3 = (0,7) wide_op = (0,7) ; define token switch (8) matchbyte1 = (0,7) matchbyte2 = (0,7) matchbyte3 = (0,7) matchbyte4 = (0,7) offsetbyte1 = (0,7) offsetbyte2 = (0,7) offsetbyte3 = (0,7) offsetbyte4 = (0,7) ; ############################################################################### # Macros ############################################################################### macro push(x) { SP = SP - $(SIZE); *:$(SIZE) SP = x; } macro pop(x) { x = *:$(SIZE) SP; SP = SP + $(SIZE); } macro push2(x) { SP = SP - $(DOUBLE_SIZE); *:$(DOUBLE_SIZE) SP = x:$(DOUBLE_SIZE); } macro pop2(x) { x = *:$(DOUBLE_SIZE) SP; SP = SP + $(DOUBLE_SIZE); } ############################################################################### # Pseudo Instructions ############################################################################### Branch:addr is branchbyte1; branchbyte2 [ addr = inst_start + (branchbyte1<<8 | branchbyte2); ] { export *:$(SIZE) addr; } Branch_w:addr is branchbyte1; branchbyte2; branchbyte3; branchbyte4 [ addr = inst_start + ((branchbyte1 << 24) | (branchbyte2 << 16) | (branchbyte3 << 8) | branchbyte4); ] { export *:$(SIZE) addr; } Default:"default" addr is defaultbyte1; defaultbyte2; defaultbyte3; defaultbyte4 [ addr = inst_start + ((defaultbyte1<<24) | (defaultbyte2<<16) | (defaultbyte3 << 8) | defaultbyte4); ] { export *:$(SIZE) addr; } ############################################################################### # Constructors ############################################################################### :aaload is (in_table_switch=0 & in_lookup_switch=0 & op=0x32) { _index :$(SIZE) = 0; _arrayref :$(SIZE) = 0; pop(_index); pop(_arrayref); _offset:$(SIZE) = _arrayref + $(SIZE) * _index; _value:$(SIZE) = *[ram] _offset; push(_value); } :aastore is (in_table_switch=0 & in_lookup_switch=0 & op=0x53) { _index :$(SIZE) = 0; _arrayref :$(SIZE) = 0; _value :$(SIZE) = 0; pop(_value); pop(_index); pop(_arrayref); _offset:$(SIZE) = _arrayref + $(SIZE) * _index; *[ram]:$(SIZE) _offset = _value; } :aconst_null is (in_table_switch=0 & in_lookup_switch=0 & op=0x01) { push(0:4); } :aload index is (in_table_switch=0 & in_lookup_switch=0 & op=0x19); index { LVA = $(SIZE) * index; _objectref :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_objectref); } :aload_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x2a) { LVA = 0; _objectref :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_objectref); } :aload_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x2b) { LVA = 1 * $(SIZE); _objectref :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_objectref); } :aload_2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x2c) { LVA = 2 * $(SIZE); _objectref :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_objectref); } :aload_3 is (in_table_switch=0 & in_lookup_switch=0 & op=0x2d) { LVA = 3 * $(SIZE); _objectref :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_objectref); } :anewarray index is (in_table_switch=0 & in_lookup_switch=0 & op=0xbd); indexbyte1; indexbyte2 [ index = indexbyte1<<8 | indexbyte2; ] { _count :$(SIZE) = 0; _arrayref :$(SIZE) = 0; pop(_count); _ref: $(SIZE) = cpool(0:4,index,$(CPOOL_ANEWARRAY)); _arrayref = newobject(_ref,_count); push(_arrayref); } :areturn is (in_table_switch=0 & in_lookup_switch=0 & op=0xb0) { pop(return_value); return [return_address]; } :arraylength is (in_table_switch=0 & in_lookup_switch=0 & op=0xbe) { _arrayref :$(SIZE) = 0; _length :$(SIZE) = 0; pop(_arrayref); _length = cpool(_arrayref, 0:4, $(CPOOL_ARRAYLENGTH)); push(_length); } :astore index is (in_table_switch=0 & in_lookup_switch=0 & op=0x3a); index { _value :$(SIZE) = 0; pop(_value); LVA = index*$(SIZE); *[localVariableArray]:$(SIZE) (LVA) = _value; } :astore_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x4b) { _value :$(SIZE) = 0; pop(_value); LVA = 0; *[localVariableArray]:$(SIZE) (LVA) = _value; } :astore_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x4c) { _value :$(SIZE) = 0; pop(_value); LVA = 1 * $(SIZE); *[localVariableArray]:$(SIZE) (LVA) = _value; } :astore_2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x4d) { _value :$(SIZE) = 0; pop(_value); LVA = 2 * $(SIZE); *[localVariableArray]:$(SIZE) (LVA) = _value; } :astore_3 is (in_table_switch=0 & in_lookup_switch=0 & op=0x4e) { _value :$(SIZE) = 0; pop(_value); LVA = 3 * $(SIZE); *[localVariableArray]:$(SIZE) (LVA) = _value; } :athrow is (in_table_switch=0 & in_lookup_switch=0 & op=0xbf) { _objectref :$(SIZE) = 0; pop(_objectref); athrowOp(_objectref); SP = SP; goto ; } :baload is (in_table_switch=0 & in_lookup_switch=0 & op=0x33) { _index :$(SIZE) = 0; _arrayref :$(SIZE) = 0; _value:1 = 0; pop(_index); pop(_arrayref); _offset: $(SIZE) = _arrayref + _index; _value = *[ram]:1 _offset; _valueSignExtended:$(SIZE) = sext(_value); push(_valueSignExtended); } :bastore is (in_table_switch=0 & in_lookup_switch=0 & op=0x54) { _value :$(SIZE) = 0; _index :$(SIZE) = 0; _arrayref :$(SIZE) = 0; pop(_value); pop(_index); pop(_arrayref); _offset: $(SIZE) = _arrayref + _index; *[ram]:1 _offset = _value:1; } :bipush sbyte is (in_table_switch=0 & in_lookup_switch=0 & op=0x10); sbyte { _value:$(SIZE) = sext(sbyte:1); push(_value); } :caload is (in_table_switch=0 & in_lookup_switch=0 & op=0x34) { _index :$(SIZE) = 0; _arrayref :$(SIZE) = 0; _value:2 = 0; pop(_index); pop(_arrayref); _offset: $(SIZE) = _arrayref + 2 * _index; _value = *[ram]:2 _offset; _valueZeroExtended:$(SIZE) = zext(_value); push(_valueZeroExtended); } :castore is (in_table_switch=0 & in_lookup_switch=0 & op = 0x55) { _value :$(SIZE) = 0; _index :$(SIZE) = 0; _arrayref :$(SIZE) = 0; pop(_value); pop(_index); pop(_arrayref); _offset: $(SIZE) = _arrayref + 2 * _index; *[ram]:2 _offset = _value:2; } :checkcast index is (in_table_switch=0 & in_lookup_switch=0 & op=0xc0); indexbyte1; indexbyte2 [ index = indexbyte1<<8 | indexbyte2; ] { #_object: $(SIZE) = *:$(SIZE) SP; #throwExceptionOp(_object); #_res:1 = cpool(_object,index,$(CPOOL_CHECKCAST)); #throwExceptionOp(_res); #_ref: $(SIZE) = cpool(0:4,index,$(CPOOL_CHECKCAST)); #checkcastOp(_object,_ref); _object:$(SIZE) = 0; pop(_object); _object = cpool(_object,index,$(CPOOL_CHECKCAST)); push(_object); #_res:1 = cpool(_object,index,$(CPOOL_CHECKCAST)); #_result:$(SIZE) = zext(_res); #push(_result); } :d2f is (in_table_switch=0 & in_lookup_switch=0 & op=0x90) { _double :$(DOUBLE_SIZE) = 0; _float :$(SIZE) = 0; pop2(_double); _float = float2float(_double); push(_float); } #this is not exactly the algorithm that the JVM uses to convert doubles to ints #should be good enough :d2i is (in_table_switch=0 & in_lookup_switch=0 & op=0x8e) { _double :$(DOUBLE_SIZE) = 0; _int :$(DOUBLE_SIZE) = 0; pop2(_double); _int = round(_double); push(_int:$(SIZE)); } #this is not exactly the algorithm that the JVM uses to convert doubles to longs :d2l is (in_table_switch=0 & in_lookup_switch=0 & op=0x8f) { _double :$(DOUBLE_SIZE) = 0; _long :$(DOUBLE_SIZE) = 0; pop2(_double); _long = round(_double); push2(_long); } :dadd is (in_table_switch=0 & in_lookup_switch=0 & op=0x63) { _value1 :$(DOUBLE_SIZE) = 0; _value2 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = _value1 f+ _value2; push2(_result); } :daload is (in_table_switch=0 & in_lookup_switch=0 & op=0x31) { _index :$(SIZE) = 0; _arrayref :$(SIZE) = 0; _value :$(DOUBLE_SIZE) = 0; pop(_index); pop(_arrayref); _offset: $(SIZE) = _arrayref + $(DOUBLE_SIZE) * _index; _value = *[ram]:$(DOUBLE_SIZE) _offset; push2(_value); } :dastore is (in_table_switch=0 & in_lookup_switch=0 & op=0x52) { _value :$(DOUBLE_SIZE) = 0; _index:$(SIZE) = 0; _arrayref:$(SIZE) = 0; pop2(_value); pop(_index); pop(_arrayref); _offset: $(SIZE) = _arrayref + $(DOUBLE_SIZE) * _index; *[ram]:$(DOUBLE_SIZE) _offset = _value; } :dcmpg is (in_table_switch=0 & in_lookup_switch=0 & op=0x98) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(SIZE) = 0; pop2(_value2); pop2(_value1); _result = zext(_value1 f> _value2) + zext(_value1 f>= _value2) - 1; push(_result); } :dcmpl is (in_table_switch=0 & in_lookup_switch=0 & op=0x97) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(SIZE) = 0; pop2(_value2); pop2(_value1); _result = zext(_value1 f> _value2) + zext(_value1 f>= _value2) - 1; push(_result); } :dconst_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x0e) { _int :$(DOUBLE_SIZE) = 0; _double :$(DOUBLE_SIZE) = int2float(_int); push2(_double); } :dconst_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x0f) { _int :$(DOUBLE_SIZE) = 1; _double :$(DOUBLE_SIZE) = int2float(_int); push2(_double); } :ddiv is (in_table_switch=0 & in_lookup_switch=0 & op=0x6f) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = _value1 f/ _value2; push2(_result); } :dload index is (in_table_switch=0 & in_lookup_switch=0 & op=0x18); index { LVA = index*$(SIZE); _value :$(DOUBLE_SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push2(_value); } :dload_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x26) { LVA = 0; _value :$(DOUBLE_SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push2(_value); } :dload_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x27) { LVA = 1 * $(SIZE); _value :$(DOUBLE_SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push2(_value); } :dload_2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x28) { LVA = 2 * $(SIZE); _value :$(DOUBLE_SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push2(_value); } :dload_3 is (in_table_switch=0 & in_lookup_switch=0 & op=0x29) { LVA = 3 * $(SIZE); _value :$(DOUBLE_SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push2(_value); } :dmul is (in_table_switch=0 & in_lookup_switch=0 & op=0x6b) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = _value1 f* _value2; push2(_result); } :dneg is (in_table_switch=0 & in_lookup_switch=0 & op=0x77) { _value :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value); _result = f- _value; push2(_result); } :drem is (in_table_switch=0 & in_lookup_switch=0 & op=0x73) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = dremOp(_value1, _value2); push2(_result); } :dreturn is (in_table_switch=0 & in_lookup_switch=0 & op=0xaf) { pop2(cat2_return_value); return [return_address]; } :dstore index is (in_table_switch=0 & in_lookup_switch=0 & op=0x39); index { _value :$(DOUBLE_SIZE) = 0; pop2(_value); LVA = index*$(SIZE); *[localVariableArray]:$(DOUBLE_SIZE) (LVA) = _value; } :dstore_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x47) { _value :$(DOUBLE_SIZE) = 0; pop2(_value); LVA = 0; *[localVariableArray]:$(DOUBLE_SIZE) (LVA) = _value; } :dstore_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x48) { _value :$(DOUBLE_SIZE) = 0; pop2(_value); LVA = 1 * $(SIZE); *[localVariableArray]:$(DOUBLE_SIZE) (LVA) = _value; } :dstore_2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x49) { _value :$(DOUBLE_SIZE) = 0; pop2(_value); LVA = 2 * $(SIZE); *[localVariableArray]:$(DOUBLE_SIZE) (LVA) = _value; } :dstore_3 is (in_table_switch=0 & in_lookup_switch=0 & op=0x4a) { _value :$(DOUBLE_SIZE) = 0; pop2(_value); LVA = 3 * $(SIZE); *[localVariableArray]:$(DOUBLE_SIZE) (LVA) = _value; } :dsub is (in_table_switch=0 & in_lookup_switch=0 & op=0x67) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = _value1 f- _value2; push2(_result); } :dup is (in_table_switch=0 & in_lookup_switch=0 & op=0x59) { local word = *:$(SIZE) SP; push(word); } :dup_x1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x5a) { _value1 :$(SIZE) = 0; _value2 :$(SIZE) = 0; pop(_value1); pop(_value2); push(_value1); push(_value2); push(_value1); } :dup_x2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x5b) { _value1 :$(SIZE) = 0; _value2 :$(SIZE) = 0; _value3 :$(SIZE) = 0; pop(_value1); pop(_value2); pop(_value3); push(_value1); push(_value3); push(_value2); push(_value1); } :dup2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x5c) { _value1 :$(SIZE) = 0; _value2 :$(SIZE) = 0; pop(_value1); pop(_value2); push(_value2); push(_value1); push(_value2); push(_value1); } :dup2_x1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x5d) { _value1 :$(SIZE) = 0; _value2 :$(SIZE) = 0; _value3 :$(SIZE) = 0; pop(_value1); pop(_value2); pop(_value3); push(_value2); push(_value1); push(_value3); push(_value2); push(_value1); } :dup2_x2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x5e) { _value1 :$(SIZE) = 0; _value2 :$(SIZE) = 0; _value3 :$(SIZE) = 0; _value4 :$(SIZE) = 0; pop(_value1); pop(_value2); pop(_value3); pop(_value4); push(_value2); push(_value1); push(_value4); push(_value3); push(_value2); push(_value1); } :f2d is (in_table_switch=0 & in_lookup_switch=0 & op=0x8d) { _value :$(SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop(_value); _result = float2float(_value); push2(_result); } #not exactly how the JVM converts floats to ints but close enough :f2i is (in_table_switch=0 & in_lookup_switch=0 & op=0x8b) { _value :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value); _result = round(_value); push(_result); } #not exactly how the JVM converts floats to longs :f2l is (in_table_switch=0 & in_lookup_switch=0 & op=0x8c) { _value :$(SIZE) = 0; _value2 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop(_value); _value2 = float2float(_value); _result = round(_value2); push2(_result); } :fadd is (in_table_switch=0 & in_lookup_switch=0 & op=0x62) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 f+ _value2; push(_result); } :faload is (in_table_switch=0 & in_lookup_switch=0 & op=0x30) { _index :$(SIZE) = 0; _arrayref :$(SIZE) = 0; _value :$(SIZE) = 0; pop(_index); pop(_arrayref); local _offset = _arrayref + ($(SIZE) * _index); _value = *[ram] (_offset); push(_value); } :fastore is (in_table_switch=0 & in_lookup_switch=0 & op=0x51) { _value :$(SIZE) = 0; _index:$(SIZE) = 0; _arrayref:$(SIZE) = 0; pop(_value); pop(_index); pop(_arrayref); local _offset = _arrayref + ($(SIZE) * _index); *[ram] _offset = _value; } :fcmpg is (in_table_switch=0 & in_lookup_switch=0 & op=0x96) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = zext(_value1 f> _value2) + zext(_value1 f>= _value2) - 1; push(_result); } :fcmpl is (in_table_switch=0 & in_lookup_switch=0 & op=0x95) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = zext(_value1 f> _value2) + zext(_value1 f>= _value2) - 1; push(_result); } :fconst_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x0b) { f :$(SIZE) = 0; fAsFloat:$(SIZE) = int2float(f); push(fAsFloat); } :fconst_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x0c) { f :$(SIZE) = 1; fAsFloat:$(SIZE) = int2float(f); push(fAsFloat); } :fconst_2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x0d) { f :$(SIZE) = 2; fAsFloat:$(SIZE) = int2float(f); push(fAsFloat); } :fdiv is (in_table_switch=0 & in_lookup_switch=0 & op=0x6e) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 f/ _value2; push(_result); } :fload index is (in_table_switch=0 & in_lookup_switch=0 & op=0x17); index { LVA = $(SIZE) * index; _value :$(SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push(_value); } :fload_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x22) { LVA = 0; _value :$(SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push(_value); } :fload_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x23) { LVA = 1 * $(SIZE); _value :$(SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push(_value); } :fload_2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x24) { LVA = 2 * $(SIZE); _value :$(SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push(_value); } :fload_3 is (in_table_switch=0 & in_lookup_switch=0 & op=0x25) { LVA = 3 * $(SIZE); _value :$(SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push(_value); } :fmul is (in_table_switch=0 & in_lookup_switch=0 & op=0x6a) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 f* _value2; push(_result); } :fneg is (in_table_switch=0 & in_lookup_switch=0 & op=0x76) { _value :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value); _result = f- _value; push(_result); } :frem is (in_table_switch=0 & in_lookup_switch=0 & op=0x72) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = fremOp(_value1, _value2); push(_result); } :freturn is (in_table_switch=0 & in_lookup_switch=0 & op=0xae) { pop(return_value); return [return_address]; } :fstore index is (in_table_switch=0 & in_lookup_switch=0 & op=0x38); index { _value :$(SIZE) = 0; pop(_value); LVA = index*$(SIZE); *[localVariableArray]:$(SIZE) (LVA) = _value; } :fstore_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x43) { _value :$(SIZE) = 0; pop(_value); LVA = 0; *[localVariableArray]:$(SIZE) (LVA) = _value; } :fstore_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x44) { _value :$(SIZE) = 0; pop(_value); LVA = 1 * $(SIZE); *[localVariableArray]:$(SIZE) (LVA) = _value; } :fstore_2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x45) { _value :$(SIZE) = 0; pop(_value); LVA = 2 * $(SIZE); *[localVariableArray]:$(SIZE) (LVA) = _value; } :fstore_3 is (in_table_switch=0 & in_lookup_switch=0 & op=0x46) { _value :$(SIZE) = 0; pop(_value); LVA = 3 * $(SIZE); *[localVariableArray]:$(SIZE) (LVA) = _value; } :fsub is (in_table_switch=0 & in_lookup_switch=0 & op=0x66) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 f- _value2; push(_result); } :getfield index is (in_table_switch=0 & in_lookup_switch=0 & op=0xb4 ); indexbyte1; indexbyte2 [ index = indexbyte1<<8 | indexbyte2; ] { getFieldCallOther(index:2); } :getstatic index is (in_table_switch=0 & in_lookup_switch=0 & op=0xb2); indexbyte1; indexbyte2 [index = indexbyte1 << 8 | indexbyte2;] { getStaticCallOther(index:2); } :goto Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0xa7); Branch { goto Branch; } :goto_w Branch_w is (in_table_switch=0 & in_lookup_switch=0 & op=0xc8); Branch_w { goto Branch_w; } :i2b is (in_table_switch=0 & in_lookup_switch=0 & op=0x91) { _value :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value); local _byte = _value:1; _result = sext(_byte); push(_result); } :i2c is (in_table_switch=0 & in_lookup_switch=0 & op=0x92) { _value :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value); local _char = _value:2; _result = zext(_char); push(_result); } :i2d is (in_table_switch=0 & in_lookup_switch=0 & op=0x87) { _value :$(SIZE) = 0; _resultAsLong :$(DOUBLE_SIZE) = 0; _resultAsDouble :$(DOUBLE_SIZE) = 0; pop(_value); _resultAsLong = sext(_value); _resultAsDouble = int2float(_resultAsLong); push2(_resultAsDouble); } :i2f is (in_table_switch=0 & in_lookup_switch=0 & op=0x86) { _value :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value); _result = int2float(_value); push(_result); } :i2l is (in_table_switch=0 & in_lookup_switch=0 & op=0x85) { _value :$(SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop(_value); _result = sext(_value); push2(_result); } :i2s is (in_table_switch=0 & in_lookup_switch=0 & op=0x93) { _value :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value); local _short = _value:2; _result = sext(_short); push(_result); } :iadd is (in_table_switch=0 & in_lookup_switch=0 & op=0x60) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 + _value2; push(_result); } :iaload is (in_table_switch=0 & in_lookup_switch=0 & op=0x2e) { _index :$(SIZE) = 0; _arrayref :$(SIZE) = 0; _value :$(SIZE) = 0; pop(_index); pop(_arrayref); local _offset = _arrayref + ($(SIZE) * _index); _value = *[ram] (_offset); push(_value); } :iand is (in_table_switch=0 & in_lookup_switch=0 & op=0x7e) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 & _value2; push(_result); } :iastore is (in_table_switch=0 & in_lookup_switch=0 & op=0x4f) { _value :$(SIZE) = 0; _index:$(SIZE) = 0; _arrayref:$(SIZE) = 0; pop(_value); pop(_index); pop(_arrayref); local _offset = _arrayref + ($(SIZE) * _index); *[ram] _offset = _value; } :iconst_m1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x02) { _i :$(SIZE) = -1; push(_i); } :iconst_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x03) { _i :$(SIZE) = 0; push(_i); } :iconst_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x04) { _i :$(SIZE) = 1; push(_i); } :iconst_2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x05) { _i :$(SIZE) = 2; push(_i); } :iconst_3 is (in_table_switch=0 & in_lookup_switch=0 & op=0x06) { _i :$(SIZE) = 3; push(_i); } :iconst_4 is (in_table_switch=0 & in_lookup_switch=0 & op=0x07) { _i :$(SIZE) = 4; push(_i); } :iconst_5 is (in_table_switch=0 & in_lookup_switch=0 & op=0x08) { _i :$(SIZE) = 5; push(_i); } :idiv is (in_table_switch=0 & in_lookup_switch=0 & op=0x6c) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 s/ _value2; push(_result); } :if_acmpeq Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0xa5); Branch { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; pop(_value2); pop(_value1); if( _value1 == _value2 ) goto Branch; } :if_acmpne Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0xa6); Branch { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; pop(_value2); pop(_value1); if( _value1 != _value2 ) goto Branch; } :if_icmpeq Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0x9f); Branch { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; pop(_value2); pop(_value1); if( _value1 == _value2 ) goto Branch; } :if_icmpne Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0xa0); Branch { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; pop(_value2); pop(_value1); if( _value1 != _value2 ) goto Branch; } :if_icmplt Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0xa1); Branch { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; pop(_value2); pop(_value1); if( _value1 s< _value2 ) goto Branch; } :if_icmpge Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0xa2); Branch { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; pop(_value2); pop(_value1); if( _value1 s>= _value2 ) goto Branch; } :if_icmpgt Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0xa3); Branch { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; pop(_value2); pop(_value1); if( _value1 s> _value2 ) goto Branch; } :if_icmple Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0xa4); Branch { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; pop(_value2); pop(_value1); if( _value1 s<= _value2 ) goto Branch; } :ifeq Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0x99); Branch { _value :$(SIZE) = 0; pop(_value); if( _value == 0 ) goto Branch; } :ifne Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0x9a); Branch { _value :$(SIZE) = 0; pop(_value); if( _value != 0 ) goto Branch; } :iflt Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0x9b); Branch { _value :$(SIZE) = 0; pop(_value); if( _value s< 0 ) goto Branch; } :ifge Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0x9c); Branch { _value :$(SIZE) = 0; pop(_value); if( _value s>= 0 ) goto Branch; } :ifgt Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0x9d); Branch { _value :$(SIZE) = 0; pop(_value); if( _value s> 0 ) goto Branch; } :ifle Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0x9e); Branch { _value :$(SIZE) = 0; pop(_value); if( _value s<= 0 ) goto Branch; } :ifnonnull Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0xc7); Branch { _value :$(SIZE) = 0; pop(_value); if ( _value != 0) goto Branch; } :ifnull Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0xc6); Branch { _value :$(SIZE) = 0; pop(_value); if ( _value == 0) goto Branch; } :iinc index, constant is (in_table_switch=0 & in_lookup_switch=0 & op=0x84); index; constant { _const :$(SIZE) = 0; LVA = index*$(SIZE); _const = sext(constant:1); *[localVariableArray]:$(SIZE) (LVA) = (*[localVariableArray]:$(SIZE) (LVA)) + _const; } :iload index is (in_table_switch=0 & in_lookup_switch=0 & op=0x15); index { LVA = index*$(SIZE); _value :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_value); } :iload_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x1a) { LVA = 0; _value :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_value); } :iload_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x1b) { LVA = 1 * $(SIZE); _value :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_value); } :iload_2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x1c) { LVA = 2 * $(SIZE); _value :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_value); } :iload_3 is (in_table_switch=0 & in_lookup_switch=0 & op=0x1d) { LVA = 3 * $(SIZE); _value :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_value); } :imul is (in_table_switch=0 & in_lookup_switch=0 & op=0x68) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 * _value2; push(_result); } :ineg is (in_table_switch=0 & in_lookup_switch=0 & op=0x74) { _value :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value); _result = -_value; push(_result); } :instanceof index is (in_table_switch=0 & in_lookup_switch=0 & op=0xc1); indexbyte1; indexbyte2 [ index = indexbyte1<<8 | indexbyte2; ] { _object:$(SIZE) = 0; pop(_object); _res:1 = cpool(_object,index,$(CPOOL_INSTANCEOF)); _result:$(SIZE) = zext(_res); push(_result); } :invokedynamic index, blank1, blank2 is (in_table_switch=0 & in_lookup_switch=0 & op=0xba); indexbyte1; indexbyte2; blank1; blank2 [ index = indexbyte1<<8 | indexbyte2; ] { invokedynamicCallOther(index:2); } :invokeinterface index, count, blank1 is (in_table_switch=0 & in_lookup_switch=0 & op=0xb9); indexbyte1; indexbyte2; count; blank1 [ index = indexbyte1<<8 | indexbyte2; ] { invokeinterfaceCallOther(index:2); } :invokespecial index is (in_table_switch=0 & in_lookup_switch=0 & op=0xb7); indexbyte1; indexbyte2 [ index = indexbyte1<<8 | indexbyte2; ] { invokespecialCallOther(index:2); } :invokestatic index is (in_table_switch=0 & in_lookup_switch=0 & op=0xb8); indexbyte1; indexbyte2 [ index = indexbyte1<<8 | indexbyte2; ] { invokestaticCallOther(index:2); } :invokevirtual index is (in_table_switch=0 & in_lookup_switch=0 & op=0xb6); indexbyte1; indexbyte2 [ index = indexbyte1<<8 | indexbyte2; ] { invokevirtualCallOther(index:2); } :ior is (in_table_switch=0 & in_lookup_switch=0 & op=0x80) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 | _value2; push(_result); } :irem is (in_table_switch=0 & in_lookup_switch=0 & op=0x70) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 s% _value2; push(_result); } :ireturn is (in_table_switch=0 & in_lookup_switch=0 & op=0xac) { pop(return_value); return [return_address]; } :ishl is (in_table_switch=0 & in_lookup_switch=0 & op=0x78) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 << (_value2 & 0x1f); push(_result); } :ishr is (in_table_switch=0 & in_lookup_switch=0 & op=0x7a) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 s>> (_value2 & 0x1f); push(_result); } :istore index is (in_table_switch=0 & in_lookup_switch=0 & op=0x36); index { _value :$(SIZE) = 0; pop(_value); LVA = index*$(SIZE); *[localVariableArray]:$(SIZE) (LVA) = _value; } :istore_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x3b) { _value :$(SIZE) = 0; pop(_value); LVA = 0; *[localVariableArray]:$(SIZE) (LVA) =_value; } :istore_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x3c) { _value :$(SIZE) = 0; pop(_value); LVA = 1 * $(SIZE); *[localVariableArray]:$(SIZE) (LVA) =_value; } :istore_2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x3d) { _value :$(SIZE) = 0; pop(_value); LVA = 2 * $(SIZE); *[localVariableArray]:$(SIZE) (LVA) =_value; } :istore_3 is (in_table_switch=0 & in_lookup_switch=0 & op=0x3e) { _value :$(SIZE) = 0; pop(_value); LVA = 3 * $(SIZE); *[localVariableArray]:$(SIZE) (LVA) =_value; } :isub is (in_table_switch=0 & in_lookup_switch=0 & op=0x64) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 - _value2; push(_result); } :iushr is (in_table_switch=0 & in_lookup_switch=0 & op=0x7c) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 >> (_value2 & 0x1f); push(_result); } :ixor is (in_table_switch=0 & in_lookup_switch=0 & op=0x82) { _value2 :$(SIZE) = 0; _value1 :$(SIZE) = 0; _result :$(SIZE) = 0; pop(_value2); pop(_value1); _result = _value1 ^ _value2; push(_result); } :jsr Branch is (in_table_switch=0 & in_lookup_switch=0 & op=0xa8); Branch { retAddr :$(SIZE) = inst_next; push(retAddr); goto Branch; } :jsr_w Branch_w is (in_table_switch=0 & in_lookup_switch=0 & op=0xc9); Branch_w { retAddr :$(SIZE) = inst_next; push(retAddr); goto Branch_w; } :l2d is (in_table_switch=0 & in_lookup_switch=0 & op=0x8a) { _value :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value); _result = int2float(_value); push2(_result); } :l2f is (in_table_switch=0 & in_lookup_switch=0 & op=0x89) { _value :$(DOUBLE_SIZE) = 0; _result :$(SIZE) = 0; pop2(_value); _result = int2float(_value); push(_result); } :l2i is (in_table_switch=0 & in_lookup_switch=0 & op=0x88) { _value :$(DOUBLE_SIZE) = 0; _result :$(SIZE) = 0; pop2(_value); _result = _value:$(SIZE); push(_result); } :ladd is (in_table_switch=0 & in_lookup_switch=0 & op=0x61) { _value1 :$(DOUBLE_SIZE) = 0; _value2 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = _value1 + _value2; push2(_result); } :laload is (in_table_switch=0 & in_lookup_switch=0 & op=0x2f) { _index :$(SIZE) = 0; _arrayref :$(SIZE) = 0; _value :$(DOUBLE_SIZE) = 0; pop(_index); pop(_arrayref); local _offset = _arrayref + ($(DOUBLE_SIZE) * _index); _value = *[ram]:$(DOUBLE_SIZE) (_offset); push2(_value); } :land is (in_table_switch=0 & in_lookup_switch=0 & op=0x7f) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = _value1 & _value2; push2(_result); } :lastore is (in_table_switch=0 & in_lookup_switch=0 & op=0x50) { _value :$(DOUBLE_SIZE) = 0; _index:$(SIZE) = 0; _arrayref:$(SIZE) = 0; pop2(_value); pop(_index); pop(_arrayref); local _offset = _arrayref + ($(DOUBLE_SIZE) * _index); *[ram]:$(DOUBLE_SIZE) _offset = _value; } :lcmp is (in_table_switch=0 & in_lookup_switch=0 & op=0x94) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(SIZE) = 0; pop2(_value2); pop2(_value1); _result = zext(_value1 s> _value2) + zext(_value1 s>= _value2) - 1; push(_result); } :lconst_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x09) { _l :$(DOUBLE_SIZE) = 0; push2(_l); } :lconst_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x0a) { _l :$(DOUBLE_SIZE) = 1; push2(_l); } :ldc index is (in_table_switch=0 & in_lookup_switch=0 & op=0x12); index { ldcCallOther(index:1); } :ldc_w index is (in_table_switch=0 & in_lookup_switch=0 & op=0x13); indexbyte1; indexbyte2 [ index = indexbyte1 << 8 | indexbyte2; ] { ldc_wCallOther(index:2); } :ldc2_w index is (in_table_switch=0 & in_lookup_switch=0 & op=0x14); indexbyte1; indexbyte2 [ index = indexbyte1 << 8 | indexbyte2; ] { ldc2_wCallOther(index:2); } :ldiv is (in_table_switch=0 & in_lookup_switch=0 & op=0x6d) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = _value1 s/ _value2; push2(_result); } :lload index is (in_table_switch=0 & in_lookup_switch=0 & op=0x16); index { LVA = index * $(SIZE); _value :$(DOUBLE_SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push2(_value); } :lload_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x1e) { LVA = 0; _value :$(DOUBLE_SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push2(_value); } :lload_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x1f) { LVA = 1 * $(SIZE); _value :$(DOUBLE_SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push2(_value); } :lload_2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x20) { LVA = 2 * $(SIZE); _value :$(DOUBLE_SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push2(_value); } :lload_3 is (in_table_switch=0 & in_lookup_switch=0 & op=0x21) { LVA = 3 * $(SIZE); _value :$(DOUBLE_SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push2(_value); } :lmul is (in_table_switch=0 & in_lookup_switch=0 & op=0x69) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = _value1 * _value2; push2(_result); } :lneg is (in_table_switch=0 & in_lookup_switch=0 & op=0x75) { _value :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value); _result= -_value; push2(_result); } ################################################################################################## # lookupswitch ################################################################################################## #compute and display one match,offset pair LookupSwitch_match:match _offset is matchbyte1; matchbyte2; matchbyte3; matchbyte4; offsetbyte1; offsetbyte2; offsetbyte3; offsetbyte4 [match = (matchbyte1 << 24) | (matchbyte2 << 16) | (matchbyte3 << 8) | (matchbyte4); _offset = inst_start + ((offsetbyte1 << 24) | (offsetbyte2 << 16) | (offsetbyte3 << 8) | (offsetbyte4)); ] { } #consume one match,offset pair and decrement the switch number :^LookupSwitch_match, instruction is (in_lookup_switch=1 & in_table_switch=0); LookupSwitch_match; instruction [switch_num = switch_num - 1;] { } #leave the switch statement :""LookupSwitch_match is (in_lookup_switch=1 & switch_num=1 & in_table_switch=0); LookupSwitch_match [in_lookup_switch=0;] { } define pcodeop switchAssist; padSwitch: "" is alignmentPad = 3 & padVal & op ; pad1; pad2; pad3 { export *[const]:1 padVal; } padSwitch: "" is alignmentPad = 2 & padVal & op ; pad1; pad2 { export *[const]:1 padVal; } padSwitch: "" is alignmentPad = 1 & padVal & op ; pad1 { export *[const]:1 padVal; } padSwitch: "" is alignmentPad = 0 & padVal & op { export *[const]:1 padVal; } #Note: "Default" constructor does not play nice with switchAssist injection... dolookupswitch: _default, npairs is alignmentPad & defaultbyte1; defaultbyte2; defaultbyte3; defaultbyte4; npairsbyte1; npairsbyte2; npairsbyte3; npairsbyte4 [ npairs = (npairsbyte1 << 24) | (npairsbyte2 << 16) | (npairsbyte3 << 8) | npairsbyte4; _default = inst_start + ((defaultbyte1 << 24) | (defaultbyte2 << 16) | (defaultbyte3 << 8) | defaultbyte4); switch_num = npairs; in_lookup_switch = 1;] { local _padding:1 = alignmentPad; local _opcodeAddr:$(SIZE) = inst_start; local _key:$(SIZE) = 0; pop(_key); local _address:$(SIZE) = switchAssist(_key,_opcodeAddr,_padding,_default:$(SIZE),npairs:$(SIZE)); goto [ _address ]; } :lookupswitch dolookupswitch, instruction is in_lookup_switch=0 & in_table_switch=0 & op=0xab & alignmentPad = 0 ; dolookupswitch; instruction { build dolookupswitch; } :lookupswitch dolookupswitch, instruction is in_lookup_switch=0 & in_table_switch=0 & op=0xab & alignmentPad = 1 ; pad1; dolookupswitch; instruction { build dolookupswitch; } :lookupswitch dolookupswitch, instruction is in_lookup_switch=0 & in_table_switch=0 & op=0xab & alignmentPad = 2 ; pad1; pad2; dolookupswitch; instruction { build dolookupswitch; } :lookupswitch dolookupswitch, instruction is in_lookup_switch=0 & in_table_switch=0 & op=0xab & alignmentPad = 3 ; pad1; pad2; pad3; dolookupswitch; instruction { build dolookupswitch; } :lor is (in_table_switch=0 & in_lookup_switch=0 & op=0x81) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = _value1 | _value2; push2(_result); } :lrem is (in_table_switch=0 & in_lookup_switch=0 & op=0x71) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = _value1 s% _value2; push2(_result); } :lreturn is (in_table_switch=0 & in_lookup_switch=0 & op=0xad) { pop2(cat2_return_value); return [return_address]; } :lshl is (in_table_switch=0 & in_lookup_switch=0 & op=0x79) { _value2 :$(SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop(_value2); pop2(_value1); _result = _value1 << (_value2 & 0x3f); push2(_result); } :lshr is (in_table_switch=0 & in_lookup_switch=0 & op=0x7b) { _value2 :$(SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop(_value2); pop2(_value1); _result = _value1 s>> (_value2 & 0x3f); push2(_result); } :lstore index is (in_table_switch=0 & in_lookup_switch=0 & op=0x37); index { _value :$(DOUBLE_SIZE) = 0; pop2(_value); LVA = index * $(SIZE); *[localVariableArray]:$(DOUBLE_SIZE) (LVA) = _value; } :lstore_0 is (in_table_switch=0 & in_lookup_switch=0 & op=0x3f) { _value :$(DOUBLE_SIZE) = 0; pop2(_value); LVA = 0; *[localVariableArray]:$(DOUBLE_SIZE) (LVA) =_value; } :lstore_1 is (in_table_switch=0 & in_lookup_switch=0 & op=0x40) { _value :$(DOUBLE_SIZE) = 0; pop2(_value); LVA = 1 * $(SIZE); *[localVariableArray]:$(DOUBLE_SIZE) (LVA) =_value; } :lstore_2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x41) { _value :$(DOUBLE_SIZE) = 0; pop2(_value); LVA = 2 * $(SIZE); *[localVariableArray]:$(DOUBLE_SIZE) (LVA) =_value; } :lstore_3 is (in_table_switch=0 & in_lookup_switch=0 & op=0x42) { _value :$(DOUBLE_SIZE) = 0; pop2(_value); LVA = 3 * $(SIZE); *[localVariableArray]:$(DOUBLE_SIZE) (LVA) =_value; } :lsub is (in_table_switch=0 & in_lookup_switch=0 & op=0x65) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = _value1 - _value2; push2(_result); } :lushr is (in_table_switch=0 & in_lookup_switch=0 & op=0x7d) { _value2 :$(SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop(_value2); pop2(_value1); _result = _value1 >> (_value2 & 0x3f); push2(_result); } :lxor is (in_table_switch=0 & in_lookup_switch=0 & op=0x83) { _value2 :$(DOUBLE_SIZE) = 0; _value1 :$(DOUBLE_SIZE) = 0; _result :$(DOUBLE_SIZE) = 0; pop2(_value2); pop2(_value1); _result = _value1 ^ _value2; push2(_result); } :monitorenter is (in_table_switch=0 & in_lookup_switch=0 & op=0xc2) { _objectref :$(SIZE) = 0; pop(_objectref); monitorenterOp(_objectref); } :monitorexit is (in_table_switch=0 & in_lookup_switch=0 & op=0xc3) { _objectref :$(SIZE) = 0; pop(_objectref); monitorexitOp(_objectref); } :multianewarray index is (in_table_switch=0 & in_lookup_switch=0 & op=0xc5); indexbyte1; indexbyte2; dimensions [index = indexbyte1<<8 | indexbyte2;] { multianewarrayCallOther(index:2,dimensions:1); } :new index is (in_table_switch=0 & in_lookup_switch=0 & op=0xbb); indexbyte1; indexbyte2 [ index = indexbyte1<<8 | indexbyte2; ] { local _className = cpool(0:4,index,$(CPOOL_NEW)); _ref: $(SIZE) = newobject(_className); push(_ref); } :newarray atype is (in_table_switch=0 & in_lookup_switch=0 & op=0xbc); atype { _count:$(SIZE)=0; _arrayref:$(SIZE)=0; pop(_count); _ref:$(SIZE) = cpool(0:4,atype:1,$(CPOOL_NEWARRAY)); _arrayref = newobject(_ref, _count); push(_arrayref); } :nop is (in_table_switch=0 & in_lookup_switch=0 & in_lookup_switch=0 & op=0x00) { } :pop is (in_table_switch=0 & in_lookup_switch=0 & op=0x57) { SP = SP + $(SIZE); } :pop2 is (in_table_switch=0 & in_lookup_switch=0 & op=0x58) { SP = SP + $(DOUBLE_SIZE); } :putfield index is (in_table_switch=0 & in_lookup_switch=0 & op=0xb5); indexbyte1; indexbyte2 [ index = indexbyte1 << 8 | indexbyte2; ] { putFieldCallOther(index:2); } :putstatic index is (in_table_switch=0 & in_lookup_switch=0 & op=0xb3); indexbyte1; indexbyte2 [index = indexbyte1 << 8 | indexbyte2; ] { putStaticCallOther(index:2); } :ret index is (in_table_switch=0 & in_lookup_switch=0 & op=0xa9); index { LVA = index * $(SIZE); _value:$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); goto [_value]; } :return is (in_table_switch=0 & in_lookup_switch=0 & op=0xb1) { return [return_address]; } :saload is (in_table_switch=0 & in_lookup_switch=0 & op=0x35) { _index :$(SIZE) = 0; _arrayref :$(SIZE) = 0; _value:2 = 0; pop(_index); pop(_arrayref); _offset: $(SIZE) = _arrayref + _index * 2; _value = *[ram]:2 _offset; _valueSignExtended:$(SIZE) = sext(_value); push(_valueSignExtended); } :sastore is (in_table_switch=0 & in_lookup_switch=0 & op=0x56) { _value :$(SIZE) = 0; _index :$(SIZE) = 0; _arrayref :$(SIZE) = 0; pop(_value); pop(_index); pop(_arrayref); local _offset = _arrayref + _index * 2; *[ram]:$(SIZE) _offset = _value:2; } :sipush short is (in_table_switch=0 & in_lookup_switch=0 & op=0x11); byte1; byte2 [ short = byte1<<8 | byte2; ] { _result:$(SIZE) = sext(short:2); push(_result); } :swap is (in_table_switch=0 & in_lookup_switch=0 & op=0x5f) { _value1 :$(SIZE) = 0; _value2 :$(SIZE) = 0; pop(_value1); pop(_value2); push(_value1); push(_value2); } ######################################################################################################################## # tableswitch ######################################################################################################################## #compute and display one switch offset Switch_offset:_offset is offsetbyte1; offsetbyte2; offsetbyte3; offsetbyte4 [ _offset = inst_start + ((offsetbyte1<<24) | (offsetbyte2<<16) | (offsetbyte3<<8) | offsetbyte4); ] { } # Switch entry that's not the last one. # no pcode def - this construction is just for consuming all of the bytes of a tableswitch instructions # decrements the switch number :^Switch_offset, instruction is (in_table_switch=1 & in_lookup_switch=0); Switch_offset; instruction [switch_num = switch_num - 1;] { } #Last switch entry. Get out of switch context. :""Switch_offset is (in_table_switch=1 & in_lookup_switch=0 &switch_num=0); Switch_offset [ in_table_switch=0; ] { } dotableswitch: Default, low, high is alignmentPad & Default;lowbyte1; lowbyte2; lowbyte3; lowbyte4; highbyte1; highbyte2; highbyte3; highbyte4 [ low = (lowbyte1 << 24) | (lowbyte2 << 16) | (lowbyte3 << 8) | lowbyte4; high = (highbyte1 << 24) | (highbyte2 << 16) | (highbyte3 << 8) | highbyte4; switch_low = low; switch_num = high - low; switch_high = high; in_table_switch = 1; ] { local _offset :$(SIZE) = 0; local idx :$(SIZE) = 0; local padding:$(SIZE) = alignmentPad; pop(idx); if (idx s< low) goto Default; if (idx s> high) goto Default; #opcode_address + byte_for_opcode + padding + default + low + high = start of table _offset = inst_start + 1 + padding + 4 + 4 + 4 + 4*(idx-low); switch_target = inst_start + *:$(SIZE)(_offset); goto [switch_target]; } :tableswitch dotableswitch, instruction is in_table_switch=0 & in_lookup_switch=0 & op=0xaa & alignmentPad=0; dotableswitch; instruction { build dotableswitch; } :tableswitch dotableswitch, instruction is in_table_switch=0 & in_lookup_switch=0 & op=0xaa & alignmentPad=1; pad1; dotableswitch; instruction { build dotableswitch; } :tableswitch dotableswitch, instruction is in_table_switch=0 & in_lookup_switch=0 & op=0xaa & alignmentPad=2; pad1; pad2; dotableswitch; instruction { build dotableswitch; } :tableswitch dotableswitch, instruction is in_table_switch=0 & in_lookup_switch=0 & op=0xaa & alignmentPad=3; pad1; pad2; pad3; dotableswitch; instruction { build dotableswitch; } #wide loads :wide_iload index is (in_table_switch=0 & in_lookup_switch=0 & w_op=0xc415); indexbyte1; indexbyte2 [index = (indexbyte1 << 8) | indexbyte2; ] { LVA = index * $(SIZE); _value :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_value); } :wide_fload index is (in_table_switch=0 & in_lookup_switch=0 & w_op=0xc417); indexbyte1; indexbyte2 [index = (indexbyte1 << 8) | indexbyte2; ] { LVA = index * $(SIZE); _value :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_value); } :wide_aload index is (in_table_switch=0 & in_lookup_switch=0 & w_op=0xc419); indexbyte1; indexbyte2 [index = (indexbyte1 << 8) | indexbyte2; ] { LVA = index * $(SIZE); _value :$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); push(_value); } :wide_lload index is (in_table_switch=0 & in_lookup_switch=0 & w_op=0xc416); indexbyte1; indexbyte2 [index = (indexbyte1 << 8) | indexbyte2; ] { LVA = index * $(SIZE); _value :$(DOUBLE_SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push2(_value); } :wide_dload index is (in_table_switch=0 & in_lookup_switch=0 & w_op=0xc418); indexbyte1; indexbyte2 [index = (indexbyte1 << 8) | indexbyte2; ] { LVA = index * $(SIZE); _value :$(DOUBLE_SIZE) = *[localVariableArray]:$(DOUBLE_SIZE) (LVA); push2(_value); } #wide stores :wide_istore index is (in_table_switch=0 & in_lookup_switch=0 & w_op=0xc436); indexbyte1; indexbyte2 [index = (indexbyte1 << 8) | indexbyte2;] { _value :$(SIZE) = 0; pop(_value); LVA = index * $(SIZE); *[localVariableArray]:$(SIZE) (LVA) = _value; } :wide_fstore index is (in_table_switch=0 & in_lookup_switch=0 & w_op=0xc438); indexbyte1; indexbyte2 [index = (indexbyte1 << 8) | indexbyte2;] { _value :$(SIZE) = 0; pop(_value); LVA = index * $(SIZE); *[localVariableArray]:$(SIZE) (LVA) = _value; } :wide_astore index is (in_table_switch=0 & in_lookup_switch=0 & w_op=0xc43a); indexbyte1; indexbyte2 [index = (indexbyte1 << 8) | indexbyte2;] { _value :$(SIZE) = 0; pop(_value); LVA = index * $(SIZE); *[localVariableArray]:$(SIZE) (LVA) = _value; } :wide_lstore index is (in_table_switch=0 & in_lookup_switch=0 & w_op=0xc437); indexbyte1; indexbyte2 [index = (indexbyte1 << 8) | indexbyte2;] { _value :$(DOUBLE_SIZE) = 0; pop2(_value); LVA = index * $(SIZE); *[localVariableArray]:$(DOUBLE_SIZE) (LVA) = _value; } :wide_dstore index is (in_table_switch=0 & in_lookup_switch=0 & w_op=0xc439); indexbyte1; indexbyte2 [index = (indexbyte1 << 8) | indexbyte2;] { _value :$(DOUBLE_SIZE) = 0; pop2(_value); LVA = index * $(SIZE); *[localVariableArray]:$(DOUBLE_SIZE) (LVA) = _value; } #wide ret :wide_ret index is (in_table_switch=0 & in_lookup_switch=0 & w_op=0xc4a9); indexbyte1; indexbyte2 [index = (indexbyte1 << 8) | indexbyte2;] { LVA = index * $(SIZE); _value:$(SIZE) = *[localVariableArray]:$(SIZE) (LVA); goto [_value]; } #wide inc - instruction is called wide format 2 by JVM specification but iinc_w by javap :iinc_w index, constant is (in_table_switch=0 & in_lookup_switch=0 & w_op=0xc484); indexbyte1; indexbyte2; constantbyte1; constantbyte2 [ index = (indexbyte1 << 8) | indexbyte2; constant = (constantbyte1 << 8) | constantbyte2;] { fullIndex: $(SIZE) = 4*zext(index:2); fullConstant: $(SIZE) = sext(constant:2); intVal:$(SIZE) = *[localVariableArray]:$(SIZE) (fullIndex); *[localVariableArray]:$(SIZE) (fullIndex) = (intVal + fullConstant); } ================================================ FILE: pypcode/processors/JVM/data/manuals/JVM.idx ================================================ @jvms8.pdf [The Java Virtual Machine Specification - Jave SE 8 Edition] aaload, 381 aastore, 382 aconst_null, 384 aload, 385 aload_0, 386 aload_1, 386 aload_2, 386 aload_3, 386 anewarray, 387 areturn, 388 arraylength, 389 astore, 390 astore_0, 391 astore_1, 391 astore_2, 391 astore_3, 391 athrow, 392 baload, 394 bastore, 395 bipush, 396 caload, 397 castore, 398 checkcast, 399 d2f, 401 d2i, 402 d2l, 403 dadd, 404 daload, 406 dastore, 407 dcmpg, 408 dcmpl, 408 dconst_0, 410 dconst_1, 410 ddiv, 411 dload, 413 dload_0, 414 dload_1, 414 dload_2, 414 dload_3, 414 dmul, 415 dneg, 417 drem, 418 dreturn, 420 dstore, 421 dstore_0, 422 dstore_1, 422 dstore_2, 422 dstore_3, 422 dsub, 423 dup, 424 dup_x1, 425 dup_x2, 426 dup2, 427 dup2_x1, 428 dup2_x2, 429 f2d, 431 f2i, 432 f2l, 433 fadd, 434 faload, 436 fastore, 437 fcmpg, 438 fcmpl, 438 fconst_0, 440 fconst_1, 440 fconst_2, 440 fdiv, 441 fload, 443 fload_0, 444 fload_1, 444 fload_2, 444 fload_3, 444 fmul, 445 fneg, 447 frem, 448 freturn, 450 fstore, 451 fstore_0, 452 fstore_1, 452 fstore_2, 452 fstore_3, 452 fsub, 453 getfield, 454 getstatic, 456 goto, 458 goto_w, 459 i2b, 460 i2c, 461 i2d, 462 i2f, 463 i2l, 464 i2s, 465 iadd, 466 iaload, 467 iand, 468 iastore, 469 iconst_m1, 470 iconst_0, 470 iconst_1, 470 iconst_2, 470 iconst_3, 470 iconst_4, 470 iconst_5, 470 idiv, 471 if_acmpeq, 472 if_acmpne, 472 if_icmpeq, 473 if_icmpne, 473 if_icmplt, 473 if_icmple, 473 if_icmpgt, 473 if_icmpge, 473 ifeq, 475 ifne, 475 iflt, 475 ifge, 475 ifgt, 475 ifle, 475 ifnonnull, 477 ifnull, 478 iinc, 479 iload, 480 iload_0, 481 iload_1, 481 iload_2, 481 iload_3, 481 imul, 482 ineg, 483 instanceof, 484 invokedynamic, 486 invokeinterface, 491 invokespecial, 495 invokestatic, 500 invokevirtual, 503 ior, 508 irem, 509 ireturn, 510 ishl, 511 ishr, 512 istore, 513 istore_0, 514 istore_1, 514 istore_2, 514 istore_3, 514 isub, 515 iushr, 516 ixor, 517 jsr, 518 jsr_w, 519 l2d, 520 l2f, 521 l2i, 522 ladd, 523 laload, 524 land, 525 lastore, 526 lcmp, 527 lconst_0, 528 lconst_1, 528 ldc, 529 ldc_w, 531 ldc2_w, 533 ldiv, 534 lload, 535 lload_0, 536 lload_1, 536 lload_2, 536 lload_3, 536 lmul, 537 lneg, 538 lookupswitch, 539 lor, 541 lrem, 542 lreturn, 543 lshl, 544 lshr, 545 lstore, 546 lstore_0, 547 lstore_1, 547 lstore_2, 547 lstore_3, 547 lsub, 548 lushr, 549 lxor, 550 monitorenter, 551 monitorexit, 553 multianewarray, 555 new, 557 newarray, 559 nop, 561 pop, 562 pop2, 563 putfield, 564 putstatic, 566 ret, 568 return, 569 saload, 570 sastore, 571 sipush, 572 swap, 573 tableswitch, 574 wide, 576 ================================================ FILE: pypcode/processors/Loongarch/data/languages/ilp32d.cspec ================================================ ================================================ FILE: pypcode/processors/Loongarch/data/languages/ilp32f.cspec ================================================ ================================================ FILE: pypcode/processors/Loongarch/data/languages/lasx.sinc ================================================ define pcodeop xvfmadd.s; #lasx.txt xvfmadd.s mask=0x0a100000 #0x0a100000 0xfff00000 x0:5,x5:5,x10:5,x15:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0', 'xreg15_5_s0'] :xvfmadd.s xrD, xrJ, xrK, xrA is op20_31=0xa1 & xrD & xrJ & xrK & xrA { xrD = xvfmadd.s(xrD, xrJ, xrK, xrA); } define pcodeop xvfmadd.d; #lasx.txt xvfmadd.d mask=0x0a200000 #0x0a200000 0xfff00000 x0:5,x5:5,x10:5,x15:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0', 'xreg15_5_s0'] :xvfmadd.d xrD, xrJ, xrK, xrA is op20_31=0xa2 & xrD & xrJ & xrK & xrA { xrD = xvfmadd.d(xrD, xrJ, xrK, xrA); } define pcodeop xvfmsub.s; #lasx.txt xvfmsub.s mask=0x0a500000 #0x0a500000 0xfff00000 x0:5,x5:5,x10:5,x15:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0', 'xreg15_5_s0'] :xvfmsub.s xrD, xrJ, xrK, xrA is op20_31=0xa5 & xrD & xrJ & xrK & xrA { xrD = xvfmsub.s(xrD, xrJ, xrK, xrA); } define pcodeop xvfmsub.d; #lasx.txt xvfmsub.d mask=0x0a600000 #0x0a600000 0xfff00000 x0:5,x5:5,x10:5,x15:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0', 'xreg15_5_s0'] :xvfmsub.d xrD, xrJ, xrK, xrA is op20_31=0xa6 & xrD & xrJ & xrK & xrA { xrD = xvfmsub.d(xrD, xrJ, xrK, xrA); } define pcodeop xvfnmadd.s; #lasx.txt xvfnmadd.s mask=0x0a900000 #0x0a900000 0xfff00000 x0:5,x5:5,x10:5,x15:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0', 'xreg15_5_s0'] :xvfnmadd.s xrD, xrJ, xrK, xrA is op20_31=0xa9 & xrD & xrJ & xrK & xrA { xrD = xvfnmadd.s(xrD, xrJ, xrK, xrA); } define pcodeop xvfnmadd.d; #lasx.txt xvfnmadd.d mask=0x0aa00000 #0x0aa00000 0xfff00000 x0:5,x5:5,x10:5,x15:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0', 'xreg15_5_s0'] :xvfnmadd.d xrD, xrJ, xrK, xrA is op20_31=0xaa & xrD & xrJ & xrK & xrA { xrD = xvfnmadd.d(xrD, xrJ, xrK, xrA); } define pcodeop xvfnmsub.s; #lasx.txt xvfnmsub.s mask=0x0ad00000 #0x0ad00000 0xfff00000 x0:5,x5:5,x10:5,x15:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0', 'xreg15_5_s0'] :xvfnmsub.s xrD, xrJ, xrK, xrA is op20_31=0xad & xrD & xrJ & xrK & xrA { xrD = xvfnmsub.s(xrD, xrJ, xrK, xrA); } define pcodeop xvfnmsub.d; #lasx.txt xvfnmsub.d mask=0x0ae00000 #0x0ae00000 0xfff00000 x0:5,x5:5,x10:5,x15:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0', 'xreg15_5_s0'] :xvfnmsub.d xrD, xrJ, xrK, xrA is op20_31=0xae & xrD & xrJ & xrK & xrA { xrD = xvfnmsub.d(xrD, xrJ, xrK, xrA); } define pcodeop xvfcmp.caf.s; #lasx.txt xvfcmp.caf.s mask=0x0c900000 #0x0c900000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.caf.s xrD, xrJ, xrK is op15_31=0x1920 & xrD & xrJ & xrK { xrD = xvfcmp.caf.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.saf.s; #lasx.txt xvfcmp.saf.s mask=0x0c908000 #0x0c908000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.saf.s xrD, xrJ, xrK is op15_31=0x1921 & xrD & xrJ & xrK { xrD = xvfcmp.saf.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.clt.s; #lasx.txt xvfcmp.clt.s mask=0x0c910000 #0x0c910000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.clt.s xrD, xrJ, xrK is op15_31=0x1922 & xrD & xrJ & xrK { xrD = xvfcmp.clt.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.slt.s; #lasx.txt xvfcmp.slt.s mask=0x0c918000 #0x0c918000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.slt.s xrD, xrJ, xrK is op15_31=0x1923 & xrD & xrJ & xrK { xrD = xvfcmp.slt.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.ceq.s; #lasx.txt xvfcmp.ceq.s mask=0x0c920000 #0x0c920000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.ceq.s xrD, xrJ, xrK is op15_31=0x1924 & xrD & xrJ & xrK { xrD = xvfcmp.ceq.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.seq.s; #lasx.txt xvfcmp.seq.s mask=0x0c928000 #0x0c928000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.seq.s xrD, xrJ, xrK is op15_31=0x1925 & xrD & xrJ & xrK { xrD = xvfcmp.seq.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.cle.s; #lasx.txt xvfcmp.cle.s mask=0x0c930000 #0x0c930000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cle.s xrD, xrJ, xrK is op15_31=0x1926 & xrD & xrJ & xrK { xrD = xvfcmp.cle.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.sle.s; #lasx.txt xvfcmp.sle.s mask=0x0c938000 #0x0c938000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sle.s xrD, xrJ, xrK is op15_31=0x1927 & xrD & xrJ & xrK { xrD = xvfcmp.sle.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.cun.s; #lasx.txt xvfcmp.cun.s mask=0x0c940000 #0x0c940000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cun.s xrD, xrJ, xrK is op15_31=0x1928 & xrD & xrJ & xrK { xrD = xvfcmp.cun.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.sun.s; #lasx.txt xvfcmp.sun.s mask=0x0c948000 #0x0c948000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sun.s xrD, xrJ, xrK is op15_31=0x1929 & xrD & xrJ & xrK { xrD = xvfcmp.sun.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.cult.s; #lasx.txt xvfcmp.cult.s mask=0x0c950000 #0x0c950000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cult.s xrD, xrJ, xrK is op15_31=0x192a & xrD & xrJ & xrK { xrD = xvfcmp.cult.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.sult.s; #lasx.txt xvfcmp.sult.s mask=0x0c958000 #0x0c958000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sult.s xrD, xrJ, xrK is op15_31=0x192b & xrD & xrJ & xrK { xrD = xvfcmp.sult.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.cueq.s; #lasx.txt xvfcmp.cueq.s mask=0x0c960000 #0x0c960000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cueq.s xrD, xrJ, xrK is op15_31=0x192c & xrD & xrJ & xrK { xrD = xvfcmp.cueq.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.sueq.s; #lasx.txt xvfcmp.sueq.s mask=0x0c968000 #0x0c968000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sueq.s xrD, xrJ, xrK is op15_31=0x192d & xrD & xrJ & xrK { xrD = xvfcmp.sueq.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.cule.s; #lasx.txt xvfcmp.cule.s mask=0x0c970000 #0x0c970000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cule.s xrD, xrJ, xrK is op15_31=0x192e & xrD & xrJ & xrK { xrD = xvfcmp.cule.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.sule.s; #lasx.txt xvfcmp.sule.s mask=0x0c978000 #0x0c978000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sule.s xrD, xrJ, xrK is op15_31=0x192f & xrD & xrJ & xrK { xrD = xvfcmp.sule.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.cne.s; #lasx.txt xvfcmp.cne.s mask=0x0c980000 #0x0c980000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cne.s xrD, xrJ, xrK is op15_31=0x1930 & xrD & xrJ & xrK { xrD = xvfcmp.cne.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.sne.s; #lasx.txt xvfcmp.sne.s mask=0x0c988000 #0x0c988000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sne.s xrD, xrJ, xrK is op15_31=0x1931 & xrD & xrJ & xrK { xrD = xvfcmp.sne.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.cor.s; #lasx.txt xvfcmp.cor.s mask=0x0c9a0000 #0x0c9a0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cor.s xrD, xrJ, xrK is op15_31=0x1934 & xrD & xrJ & xrK { xrD = xvfcmp.cor.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.sor.s; #lasx.txt xvfcmp.sor.s mask=0x0c9a8000 #0x0c9a8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sor.s xrD, xrJ, xrK is op15_31=0x1935 & xrD & xrJ & xrK { xrD = xvfcmp.sor.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.cune.s; #lasx.txt xvfcmp.cune.s mask=0x0c9c0000 #0x0c9c0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cune.s xrD, xrJ, xrK is op15_31=0x1938 & xrD & xrJ & xrK { xrD = xvfcmp.cune.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.sune.s; #lasx.txt xvfcmp.sune.s mask=0x0c9c8000 #0x0c9c8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sune.s xrD, xrJ, xrK is op15_31=0x1939 & xrD & xrJ & xrK { xrD = xvfcmp.sune.s(xrD, xrJ, xrK); } define pcodeop xvfcmp.caf.d; #lasx.txt xvfcmp.caf.d mask=0x0ca00000 #0x0ca00000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.caf.d xrD, xrJ, xrK is op15_31=0x1940 & xrD & xrJ & xrK { xrD = xvfcmp.caf.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.saf.d; #lasx.txt xvfcmp.saf.d mask=0x0ca08000 #0x0ca08000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.saf.d xrD, xrJ, xrK is op15_31=0x1941 & xrD & xrJ & xrK { xrD = xvfcmp.saf.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.clt.d; #lasx.txt xvfcmp.clt.d mask=0x0ca10000 #0x0ca10000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.clt.d xrD, xrJ, xrK is op15_31=0x1942 & xrD & xrJ & xrK { xrD = xvfcmp.clt.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.slt.d; #lasx.txt xvfcmp.slt.d mask=0x0ca18000 #0x0ca18000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.slt.d xrD, xrJ, xrK is op15_31=0x1943 & xrD & xrJ & xrK { xrD = xvfcmp.slt.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.ceq.d; #lasx.txt xvfcmp.ceq.d mask=0x0ca20000 #0x0ca20000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.ceq.d xrD, xrJ, xrK is op15_31=0x1944 & xrD & xrJ & xrK { xrD = xvfcmp.ceq.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.seq.d; #lasx.txt xvfcmp.seq.d mask=0x0ca28000 #0x0ca28000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.seq.d xrD, xrJ, xrK is op15_31=0x1945 & xrD & xrJ & xrK { xrD = xvfcmp.seq.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.cle.d; #lasx.txt xvfcmp.cle.d mask=0x0ca30000 #0x0ca30000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cle.d xrD, xrJ, xrK is op15_31=0x1946 & xrD & xrJ & xrK { xrD = xvfcmp.cle.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.sle.d; #lasx.txt xvfcmp.sle.d mask=0x0ca38000 #0x0ca38000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sle.d xrD, xrJ, xrK is op15_31=0x1947 & xrD & xrJ & xrK { xrD = xvfcmp.sle.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.cun.d; #lasx.txt xvfcmp.cun.d mask=0x0ca40000 #0x0ca40000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cun.d xrD, xrJ, xrK is op15_31=0x1948 & xrD & xrJ & xrK { xrD = xvfcmp.cun.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.sun.d; #lasx.txt xvfcmp.sun.d mask=0x0ca48000 #0x0ca48000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sun.d xrD, xrJ, xrK is op15_31=0x1949 & xrD & xrJ & xrK { xrD = xvfcmp.sun.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.cult.d; #lasx.txt xvfcmp.cult.d mask=0x0ca50000 #0x0ca50000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cult.d xrD, xrJ, xrK is op15_31=0x194a & xrD & xrJ & xrK { xrD = xvfcmp.cult.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.sult.d; #lasx.txt xvfcmp.sult.d mask=0x0ca58000 #0x0ca58000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sult.d xrD, xrJ, xrK is op15_31=0x194b & xrD & xrJ & xrK { xrD = xvfcmp.sult.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.cueq.d; #lasx.txt xvfcmp.cueq.d mask=0x0ca60000 #0x0ca60000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cueq.d xrD, xrJ, xrK is op15_31=0x194c & xrD & xrJ & xrK { xrD = xvfcmp.cueq.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.sueq.d; #lasx.txt xvfcmp.sueq.d mask=0x0ca68000 #0x0ca68000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sueq.d xrD, xrJ, xrK is op15_31=0x194d & xrD & xrJ & xrK { xrD = xvfcmp.sueq.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.cule.d; #lasx.txt xvfcmp.cule.d mask=0x0ca70000 #0x0ca70000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cule.d xrD, xrJ, xrK is op15_31=0x194e & xrD & xrJ & xrK { xrD = xvfcmp.cule.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.sule.d; #lasx.txt xvfcmp.sule.d mask=0x0ca78000 #0x0ca78000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sule.d xrD, xrJ, xrK is op15_31=0x194f & xrD & xrJ & xrK { xrD = xvfcmp.sule.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.cne.d; #lasx.txt xvfcmp.cne.d mask=0x0ca80000 #0x0ca80000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cne.d xrD, xrJ, xrK is op15_31=0x1950 & xrD & xrJ & xrK { xrD = xvfcmp.cne.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.sne.d; #lasx.txt xvfcmp.sne.d mask=0x0ca88000 #0x0ca88000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sne.d xrD, xrJ, xrK is op15_31=0x1951 & xrD & xrJ & xrK { xrD = xvfcmp.sne.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.cor.d; #lasx.txt xvfcmp.cor.d mask=0x0caa0000 #0x0caa0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cor.d xrD, xrJ, xrK is op15_31=0x1954 & xrD & xrJ & xrK { xrD = xvfcmp.cor.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.sor.d; #lasx.txt xvfcmp.sor.d mask=0x0caa8000 #0x0caa8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sor.d xrD, xrJ, xrK is op15_31=0x1955 & xrD & xrJ & xrK { xrD = xvfcmp.sor.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.cune.d; #lasx.txt xvfcmp.cune.d mask=0x0cac0000 #0x0cac0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.cune.d xrD, xrJ, xrK is op15_31=0x1958 & xrD & xrJ & xrK { xrD = xvfcmp.cune.d(xrD, xrJ, xrK); } define pcodeop xvfcmp.sune.d; #lasx.txt xvfcmp.sune.d mask=0x0cac8000 #0x0cac8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcmp.sune.d xrD, xrJ, xrK is op15_31=0x1959 & xrD & xrJ & xrK { xrD = xvfcmp.sune.d(xrD, xrJ, xrK); } define pcodeop xvbitsel.v; #lasx.txt xvbitsel.v mask=0x0d200000 #0x0d200000 0xfff00000 x0:5,x5:5,x10:5,x15:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0', 'xreg15_5_s0'] :xvbitsel.v xrD, xrJ, xrK, xrA is op20_31=0xd2 & xrD & xrJ & xrK & xrA { xrD = xvbitsel.v(xrD, xrJ, xrK, xrA); } define pcodeop xvshuf.b; #lasx.txt xvshuf.b mask=0x0d600000 #0x0d600000 0xfff00000 x0:5,x5:5,x10:5,x15:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0', 'xreg15_5_s0'] :xvshuf.b xrD, xrJ, xrK, xrA is op20_31=0xd6 & xrD & xrJ & xrK & xrA { xrD = xvshuf.b(xrD, xrJ, xrK, xrA); } define pcodeop xvld; #lasx.txt xvld mask=0x2c800000 #0x2c800000 0xffc00000 x0:5, r5:5,so10:12 ['xreg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :xvld xrD, RJsrc,simm10_12 is op22_31=0xb2 & xrD & RJsrc & simm10_12 { xrD = xvld(xrD, RJsrc, simm10_12:$(REGSIZE)); } define pcodeop xvst; #lasx.txt xvst mask=0x2cc00000 #0x2cc00000 0xffc00000 x0:5, r5:5,so10:12 ['xreg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :xvst xrD, RJsrc,simm10_12 is op22_31=0xb3 & xrD & RJsrc & simm10_12 { xrD = xvst(xrD, RJsrc, simm10_12:$(REGSIZE)); } define pcodeop xvldrepl.d; #lasx.txt xvldrepl.d mask=0x32100000 [@orig_fmt=XdJSk9ps3] #0x32100000 0xfff80000 x0:5, r5:5,so10:9<<3 ['xreg0_5_s0', 'reg5_5_s0', 'soffs10_9_s0'] :xvldrepl.d xrD, RJsrc,simm10_9 is op19_31=0x642 & xrD & RJsrc & simm10_9 { xrD = xvldrepl.d(xrD, RJsrc, simm10_9:$(REGSIZE)); } define pcodeop xvldrepl.w; #lasx.txt xvldrepl.w mask=0x32200000 [@orig_fmt=XdJSk10ps2] #0x32200000 0xfff00000 x0:5, r5:5,so10:10<<2 ['xreg0_5_s0', 'reg5_5_s0', 'soffs10_10_s0'] :xvldrepl.w xrD, RJsrc,simm10_10 is op20_31=0x322 & xrD & RJsrc & simm10_10 { xrD = xvldrepl.w(xrD, RJsrc, simm10_10:$(REGSIZE)); } define pcodeop xvldrepl.h; #lasx.txt xvldrepl.h mask=0x32400000 [@orig_fmt=XdJSk11ps1] #0x32400000 0xffe00000 x0:5, r5:5,so10:11<<1 ['xreg0_5_s0', 'reg5_5_s0', 'soffs10_11_s0'] :xvldrepl.h xrD, RJsrc,simm10_11 is op21_31=0x192 & xrD & RJsrc & simm10_11 { xrD = xvldrepl.h(xrD, RJsrc, simm10_11:$(REGSIZE)); } define pcodeop xvldrepl.b; #lasx.txt xvldrepl.b mask=0x32800000 #0x32800000 0xffc00000 x0:5, r5:5,so10:12 ['xreg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :xvldrepl.b xrD, RJsrc,simm10_12 is op22_31=0xca & xrD & RJsrc & simm10_12 { xrD = xvldrepl.b(xrD, RJsrc, simm10_12:$(REGSIZE)); } define pcodeop xvstelm.d; #lasx.txt xvstelm.d mask=0x33100000 [@orig_fmt=XdJSk8ps3Un2] #0x33100000 0xfff00000 x0:5, r5:5,so10:8<<3,u18:2 ['xreg0_5_s0', 'reg5_5_s0', 'soffs10_8_s0', 'imm18_2_s0'] :xvstelm.d xrD, RJsrc,simm10_8, imm18_2 is op20_31=0x331 & xrD & RJsrc & simm10_8 & imm18_2 { xrD = xvstelm.d(xrD, RJsrc, simm10_8:$(REGSIZE), imm18_2:$(REGSIZE)); } define pcodeop xvstelm.w; #lasx.txt xvstelm.w mask=0x33200000 [@orig_fmt=XdJSk8ps2Un3] #0x33200000 0xffe00000 x0:5, r5:5,so10:8<<2,u18:3 ['xreg0_5_s0', 'reg5_5_s0', 'soffs10_8_s0', 'imm18_3_s0'] :xvstelm.w xrD, RJsrc,simm10_8, imm18_3 is op21_31=0x199 & xrD & RJsrc & simm10_8 & imm18_3 { xrD = xvstelm.w(xrD, RJsrc, simm10_8:$(REGSIZE), imm18_3:$(REGSIZE)); } define pcodeop xvstelm.h; #lasx.txt xvstelm.h mask=0x33400000 [@orig_fmt=XdJSk8ps1Un4] #0x33400000 0xffc00000 x0:5, r5:5,so10:8<<1,u18:4 ['xreg0_5_s0', 'reg5_5_s0', 'soffs10_8_s0', 'imm18_4_s0'] :xvstelm.h xrD, RJsrc,simm10_8, imm18_4 is op22_31=0xcd & xrD & RJsrc & simm10_8 & imm18_4 { xrD = xvstelm.h(xrD, RJsrc, simm10_8:$(REGSIZE), imm18_4:$(REGSIZE)); } define pcodeop xvstelm.b; #lasx.txt xvstelm.b mask=0x33800000 #0x33800000 0xff800000 x0:5, r5:5,so10:8,u18:5 ['xreg0_5_s0', 'reg5_5_s0', 'soffs10_8_s0', 'imm18_5_s0'] :xvstelm.b xrD, RJsrc,simm10_8, imm18_5 is op23_31=0x67 & xrD & RJsrc & simm10_8 & imm18_5 { xrD = xvstelm.b(xrD, RJsrc, simm10_8:$(REGSIZE), imm18_5:$(REGSIZE)); } define pcodeop xvldx; #lasx.txt xvldx mask=0x38480000 #0x38480000 0xffff8000 x0:5, r5:5, r10:5 ['xreg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :xvldx xrD, RJsrc, RKsrc is op15_31=0x7090 & xrD & RJsrc & RKsrc { xrD = xvldx(xrD, RJsrc, RKsrc); } define pcodeop xvstx; #lasx.txt xvstx mask=0x384c0000 #0x384c0000 0xffff8000 x0:5, r5:5, r10:5 ['xreg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :xvstx xrD, RJsrc, RKsrc is op15_31=0x7098 & xrD & RJsrc & RKsrc { xrD = xvstx(xrD, RJsrc, RKsrc); } define pcodeop xvseq.b; #lasx.txt xvseq.b mask=0x74000000 #0x74000000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvseq.b xrD, xrJ, xrK is op15_31=0xe800 & xrD & xrJ & xrK { xrD = xvseq.b(xrD, xrJ, xrK); } define pcodeop xvseq.h; #lasx.txt xvseq.h mask=0x74008000 #0x74008000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvseq.h xrD, xrJ, xrK is op15_31=0xe801 & xrD & xrJ & xrK { xrD = xvseq.h(xrD, xrJ, xrK); } define pcodeop xvseq.w; #lasx.txt xvseq.w mask=0x74010000 #0x74010000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvseq.w xrD, xrJ, xrK is op15_31=0xe802 & xrD & xrJ & xrK { xrD = xvseq.w(xrD, xrJ, xrK); } define pcodeop xvseq.d; #lasx.txt xvseq.d mask=0x74018000 #0x74018000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvseq.d xrD, xrJ, xrK is op15_31=0xe803 & xrD & xrJ & xrK { xrD = xvseq.d(xrD, xrJ, xrK); } define pcodeop xvsle.b; #lasx.txt xvsle.b mask=0x74020000 #0x74020000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsle.b xrD, xrJ, xrK is op15_31=0xe804 & xrD & xrJ & xrK { xrD = xvsle.b(xrD, xrJ, xrK); } define pcodeop xvsle.h; #lasx.txt xvsle.h mask=0x74028000 #0x74028000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsle.h xrD, xrJ, xrK is op15_31=0xe805 & xrD & xrJ & xrK { xrD = xvsle.h(xrD, xrJ, xrK); } define pcodeop xvsle.w; #lasx.txt xvsle.w mask=0x74030000 #0x74030000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsle.w xrD, xrJ, xrK is op15_31=0xe806 & xrD & xrJ & xrK { xrD = xvsle.w(xrD, xrJ, xrK); } define pcodeop xvsle.d; #lasx.txt xvsle.d mask=0x74038000 #0x74038000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsle.d xrD, xrJ, xrK is op15_31=0xe807 & xrD & xrJ & xrK { xrD = xvsle.d(xrD, xrJ, xrK); } define pcodeop xvsle.bu; #lasx.txt xvsle.bu mask=0x74040000 #0x74040000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsle.bu xrD, xrJ, xrK is op15_31=0xe808 & xrD & xrJ & xrK { xrD = xvsle.bu(xrD, xrJ, xrK); } define pcodeop xvsle.hu; #lasx.txt xvsle.hu mask=0x74048000 #0x74048000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsle.hu xrD, xrJ, xrK is op15_31=0xe809 & xrD & xrJ & xrK { xrD = xvsle.hu(xrD, xrJ, xrK); } define pcodeop xvsle.wu; #lasx.txt xvsle.wu mask=0x74050000 #0x74050000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsle.wu xrD, xrJ, xrK is op15_31=0xe80a & xrD & xrJ & xrK { xrD = xvsle.wu(xrD, xrJ, xrK); } define pcodeop xvsle.du; #lasx.txt xvsle.du mask=0x74058000 #0x74058000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsle.du xrD, xrJ, xrK is op15_31=0xe80b & xrD & xrJ & xrK { xrD = xvsle.du(xrD, xrJ, xrK); } define pcodeop xvslt.b; #lasx.txt xvslt.b mask=0x74060000 #0x74060000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvslt.b xrD, xrJ, xrK is op15_31=0xe80c & xrD & xrJ & xrK { xrD = xvslt.b(xrD, xrJ, xrK); } define pcodeop xvslt.h; #lasx.txt xvslt.h mask=0x74068000 #0x74068000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvslt.h xrD, xrJ, xrK is op15_31=0xe80d & xrD & xrJ & xrK { xrD = xvslt.h(xrD, xrJ, xrK); } define pcodeop xvslt.w; #lasx.txt xvslt.w mask=0x74070000 #0x74070000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvslt.w xrD, xrJ, xrK is op15_31=0xe80e & xrD & xrJ & xrK { xrD = xvslt.w(xrD, xrJ, xrK); } define pcodeop xvslt.d; #lasx.txt xvslt.d mask=0x74078000 #0x74078000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvslt.d xrD, xrJ, xrK is op15_31=0xe80f & xrD & xrJ & xrK { xrD = xvslt.d(xrD, xrJ, xrK); } define pcodeop xvslt.bu; #lasx.txt xvslt.bu mask=0x74080000 #0x74080000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvslt.bu xrD, xrJ, xrK is op15_31=0xe810 & xrD & xrJ & xrK { xrD = xvslt.bu(xrD, xrJ, xrK); } define pcodeop xvslt.hu; #lasx.txt xvslt.hu mask=0x74088000 #0x74088000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvslt.hu xrD, xrJ, xrK is op15_31=0xe811 & xrD & xrJ & xrK { xrD = xvslt.hu(xrD, xrJ, xrK); } define pcodeop xvslt.wu; #lasx.txt xvslt.wu mask=0x74090000 #0x74090000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvslt.wu xrD, xrJ, xrK is op15_31=0xe812 & xrD & xrJ & xrK { xrD = xvslt.wu(xrD, xrJ, xrK); } define pcodeop xvslt.du; #lasx.txt xvslt.du mask=0x74098000 #0x74098000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvslt.du xrD, xrJ, xrK is op15_31=0xe813 & xrD & xrJ & xrK { xrD = xvslt.du(xrD, xrJ, xrK); } define pcodeop xvadd.b; #lasx.txt xvadd.b mask=0x740a0000 #0x740a0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvadd.b xrD, xrJ, xrK is op15_31=0xe814 & xrD & xrJ & xrK { xrD = xvadd.b(xrD, xrJ, xrK); } define pcodeop xvadd.h; #lasx.txt xvadd.h mask=0x740a8000 #0x740a8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvadd.h xrD, xrJ, xrK is op15_31=0xe815 & xrD & xrJ & xrK { xrD = xvadd.h(xrD, xrJ, xrK); } define pcodeop xvadd.w; #lasx.txt xvadd.w mask=0x740b0000 #0x740b0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvadd.w xrD, xrJ, xrK is op15_31=0xe816 & xrD & xrJ & xrK { xrD = xvadd.w(xrD, xrJ, xrK); } define pcodeop xvadd.d; #lasx.txt xvadd.d mask=0x740b8000 #0x740b8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvadd.d xrD, xrJ, xrK is op15_31=0xe817 & xrD & xrJ & xrK { xrD = xvadd.d(xrD, xrJ, xrK); } define pcodeop xvsub.b; #lasx.txt xvsub.b mask=0x740c0000 #0x740c0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsub.b xrD, xrJ, xrK is op15_31=0xe818 & xrD & xrJ & xrK { xrD = xvsub.b(xrD, xrJ, xrK); } define pcodeop xvsub.h; #lasx.txt xvsub.h mask=0x740c8000 #0x740c8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsub.h xrD, xrJ, xrK is op15_31=0xe819 & xrD & xrJ & xrK { xrD = xvsub.h(xrD, xrJ, xrK); } define pcodeop xvsub.w; #lasx.txt xvsub.w mask=0x740d0000 #0x740d0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsub.w xrD, xrJ, xrK is op15_31=0xe81a & xrD & xrJ & xrK { xrD = xvsub.w(xrD, xrJ, xrK); } define pcodeop xvsub.d; #lasx.txt xvsub.d mask=0x740d8000 #0x740d8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsub.d xrD, xrJ, xrK is op15_31=0xe81b & xrD & xrJ & xrK { xrD = xvsub.d(xrD, xrJ, xrK); } define pcodeop xvaddwev.h.b; #lasx.txt xvaddwev.h.b mask=0x741e0000 #0x741e0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwev.h.b xrD, xrJ, xrK is op15_31=0xe83c & xrD & xrJ & xrK { xrD = xvaddwev.h.b(xrD, xrJ, xrK); } define pcodeop xvaddwev.w.h; #lasx.txt xvaddwev.w.h mask=0x741e8000 #0x741e8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwev.w.h xrD, xrJ, xrK is op15_31=0xe83d & xrD & xrJ & xrK { xrD = xvaddwev.w.h(xrD, xrJ, xrK); } define pcodeop xvaddwev.d.w; #lasx.txt xvaddwev.d.w mask=0x741f0000 #0x741f0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwev.d.w xrD, xrJ, xrK is op15_31=0xe83e & xrD & xrJ & xrK { xrD = xvaddwev.d.w(xrD, xrJ, xrK); } define pcodeop xvaddwev.q.d; #lasx.txt xvaddwev.q.d mask=0x741f8000 #0x741f8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwev.q.d xrD, xrJ, xrK is op15_31=0xe83f & xrD & xrJ & xrK { xrD = xvaddwev.q.d(xrD, xrJ, xrK); } define pcodeop xvsubwev.h.b; #lasx.txt xvsubwev.h.b mask=0x74200000 #0x74200000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwev.h.b xrD, xrJ, xrK is op15_31=0xe840 & xrD & xrJ & xrK { xrD = xvsubwev.h.b(xrD, xrJ, xrK); } define pcodeop xvsubwev.w.h; #lasx.txt xvsubwev.w.h mask=0x74208000 #0x74208000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwev.w.h xrD, xrJ, xrK is op15_31=0xe841 & xrD & xrJ & xrK { xrD = xvsubwev.w.h(xrD, xrJ, xrK); } define pcodeop xvsubwev.d.w; #lasx.txt xvsubwev.d.w mask=0x74210000 #0x74210000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwev.d.w xrD, xrJ, xrK is op15_31=0xe842 & xrD & xrJ & xrK { xrD = xvsubwev.d.w(xrD, xrJ, xrK); } define pcodeop xvsubwev.q.d; #lasx.txt xvsubwev.q.d mask=0x74218000 #0x74218000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwev.q.d xrD, xrJ, xrK is op15_31=0xe843 & xrD & xrJ & xrK { xrD = xvsubwev.q.d(xrD, xrJ, xrK); } define pcodeop xvaddwod.h.b; #lasx.txt xvaddwod.h.b mask=0x74220000 #0x74220000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwod.h.b xrD, xrJ, xrK is op15_31=0xe844 & xrD & xrJ & xrK { xrD = xvaddwod.h.b(xrD, xrJ, xrK); } define pcodeop xvaddwod.w.h; #lasx.txt xvaddwod.w.h mask=0x74228000 #0x74228000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwod.w.h xrD, xrJ, xrK is op15_31=0xe845 & xrD & xrJ & xrK { xrD = xvaddwod.w.h(xrD, xrJ, xrK); } define pcodeop xvaddwod.d.w; #lasx.txt xvaddwod.d.w mask=0x74230000 #0x74230000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwod.d.w xrD, xrJ, xrK is op15_31=0xe846 & xrD & xrJ & xrK { xrD = xvaddwod.d.w(xrD, xrJ, xrK); } define pcodeop xvaddwod.q.d; #lasx.txt xvaddwod.q.d mask=0x74238000 #0x74238000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwod.q.d xrD, xrJ, xrK is op15_31=0xe847 & xrD & xrJ & xrK { xrD = xvaddwod.q.d(xrD, xrJ, xrK); } define pcodeop xvsubwod.h.b; #lasx.txt xvsubwod.h.b mask=0x74240000 #0x74240000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwod.h.b xrD, xrJ, xrK is op15_31=0xe848 & xrD & xrJ & xrK { xrD = xvsubwod.h.b(xrD, xrJ, xrK); } define pcodeop xvsubwod.w.h; #lasx.txt xvsubwod.w.h mask=0x74248000 #0x74248000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwod.w.h xrD, xrJ, xrK is op15_31=0xe849 & xrD & xrJ & xrK { xrD = xvsubwod.w.h(xrD, xrJ, xrK); } define pcodeop xvsubwod.d.w; #lasx.txt xvsubwod.d.w mask=0x74250000 #0x74250000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwod.d.w xrD, xrJ, xrK is op15_31=0xe84a & xrD & xrJ & xrK { xrD = xvsubwod.d.w(xrD, xrJ, xrK); } define pcodeop xvsubwod.q.d; #lasx.txt xvsubwod.q.d mask=0x74258000 #0x74258000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwod.q.d xrD, xrJ, xrK is op15_31=0xe84b & xrD & xrJ & xrK { xrD = xvsubwod.q.d(xrD, xrJ, xrK); } define pcodeop xvaddwev.h.bu; #lasx.txt xvaddwev.h.bu mask=0x742e0000 #0x742e0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwev.h.bu xrD, xrJ, xrK is op15_31=0xe85c & xrD & xrJ & xrK { xrD = xvaddwev.h.bu(xrD, xrJ, xrK); } define pcodeop xvaddwev.w.hu; #lasx.txt xvaddwev.w.hu mask=0x742e8000 #0x742e8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwev.w.hu xrD, xrJ, xrK is op15_31=0xe85d & xrD & xrJ & xrK { xrD = xvaddwev.w.hu(xrD, xrJ, xrK); } define pcodeop xvaddwev.d.wu; #lasx.txt xvaddwev.d.wu mask=0x742f0000 #0x742f0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwev.d.wu xrD, xrJ, xrK is op15_31=0xe85e & xrD & xrJ & xrK { xrD = xvaddwev.d.wu(xrD, xrJ, xrK); } define pcodeop xvaddwev.q.du; #lasx.txt xvaddwev.q.du mask=0x742f8000 #0x742f8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwev.q.du xrD, xrJ, xrK is op15_31=0xe85f & xrD & xrJ & xrK { xrD = xvaddwev.q.du(xrD, xrJ, xrK); } define pcodeop xvsubwev.h.bu; #lasx.txt xvsubwev.h.bu mask=0x74300000 #0x74300000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwev.h.bu xrD, xrJ, xrK is op15_31=0xe860 & xrD & xrJ & xrK { xrD = xvsubwev.h.bu(xrD, xrJ, xrK); } define pcodeop xvsubwev.w.hu; #lasx.txt xvsubwev.w.hu mask=0x74308000 #0x74308000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwev.w.hu xrD, xrJ, xrK is op15_31=0xe861 & xrD & xrJ & xrK { xrD = xvsubwev.w.hu(xrD, xrJ, xrK); } define pcodeop xvsubwev.d.wu; #lasx.txt xvsubwev.d.wu mask=0x74310000 #0x74310000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwev.d.wu xrD, xrJ, xrK is op15_31=0xe862 & xrD & xrJ & xrK { xrD = xvsubwev.d.wu(xrD, xrJ, xrK); } define pcodeop xvsubwev.q.du; #lasx.txt xvsubwev.q.du mask=0x74318000 #0x74318000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwev.q.du xrD, xrJ, xrK is op15_31=0xe863 & xrD & xrJ & xrK { xrD = xvsubwev.q.du(xrD, xrJ, xrK); } define pcodeop xvaddwod.h.bu; #lasx.txt xvaddwod.h.bu mask=0x74320000 #0x74320000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwod.h.bu xrD, xrJ, xrK is op15_31=0xe864 & xrD & xrJ & xrK { xrD = xvaddwod.h.bu(xrD, xrJ, xrK); } define pcodeop xvaddwod.w.hu; #lasx.txt xvaddwod.w.hu mask=0x74328000 #0x74328000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwod.w.hu xrD, xrJ, xrK is op15_31=0xe865 & xrD & xrJ & xrK { xrD = xvaddwod.w.hu(xrD, xrJ, xrK); } define pcodeop xvaddwod.d.wu; #lasx.txt xvaddwod.d.wu mask=0x74330000 #0x74330000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwod.d.wu xrD, xrJ, xrK is op15_31=0xe866 & xrD & xrJ & xrK { xrD = xvaddwod.d.wu(xrD, xrJ, xrK); } define pcodeop xvaddwod.q.du; #lasx.txt xvaddwod.q.du mask=0x74338000 #0x74338000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwod.q.du xrD, xrJ, xrK is op15_31=0xe867 & xrD & xrJ & xrK { xrD = xvaddwod.q.du(xrD, xrJ, xrK); } define pcodeop xvsubwod.h.bu; #lasx.txt xvsubwod.h.bu mask=0x74340000 #0x74340000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwod.h.bu xrD, xrJ, xrK is op15_31=0xe868 & xrD & xrJ & xrK { xrD = xvsubwod.h.bu(xrD, xrJ, xrK); } define pcodeop xvsubwod.w.hu; #lasx.txt xvsubwod.w.hu mask=0x74348000 #0x74348000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwod.w.hu xrD, xrJ, xrK is op15_31=0xe869 & xrD & xrJ & xrK { xrD = xvsubwod.w.hu(xrD, xrJ, xrK); } define pcodeop xvsubwod.d.wu; #lasx.txt xvsubwod.d.wu mask=0x74350000 #0x74350000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwod.d.wu xrD, xrJ, xrK is op15_31=0xe86a & xrD & xrJ & xrK { xrD = xvsubwod.d.wu(xrD, xrJ, xrK); } define pcodeop xvsubwod.q.du; #lasx.txt xvsubwod.q.du mask=0x74358000 #0x74358000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsubwod.q.du xrD, xrJ, xrK is op15_31=0xe86b & xrD & xrJ & xrK { xrD = xvsubwod.q.du(xrD, xrJ, xrK); } define pcodeop xvaddwev.h.bu.b; #lasx.txt xvaddwev.h.bu.b mask=0x743e0000 #0x743e0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwev.h.bu.b xrD, xrJ, xrK is op15_31=0xe87c & xrD & xrJ & xrK { xrD = xvaddwev.h.bu.b(xrD, xrJ, xrK); } define pcodeop xvaddwev.w.hu.h; #lasx.txt xvaddwev.w.hu.h mask=0x743e8000 #0x743e8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwev.w.hu.h xrD, xrJ, xrK is op15_31=0xe87d & xrD & xrJ & xrK { xrD = xvaddwev.w.hu.h(xrD, xrJ, xrK); } define pcodeop xvaddwev.d.wu.w; #lasx.txt xvaddwev.d.wu.w mask=0x743f0000 #0x743f0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwev.d.wu.w xrD, xrJ, xrK is op15_31=0xe87e & xrD & xrJ & xrK { xrD = xvaddwev.d.wu.w(xrD, xrJ, xrK); } define pcodeop xvaddwev.q.du.d; #lasx.txt xvaddwev.q.du.d mask=0x743f8000 #0x743f8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwev.q.du.d xrD, xrJ, xrK is op15_31=0xe87f & xrD & xrJ & xrK { xrD = xvaddwev.q.du.d(xrD, xrJ, xrK); } define pcodeop xvaddwod.h.bu.b; #lasx.txt xvaddwod.h.bu.b mask=0x74400000 #0x74400000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwod.h.bu.b xrD, xrJ, xrK is op15_31=0xe880 & xrD & xrJ & xrK { xrD = xvaddwod.h.bu.b(xrD, xrJ, xrK); } define pcodeop xvaddwod.w.hu.h; #lasx.txt xvaddwod.w.hu.h mask=0x74408000 #0x74408000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwod.w.hu.h xrD, xrJ, xrK is op15_31=0xe881 & xrD & xrJ & xrK { xrD = xvaddwod.w.hu.h(xrD, xrJ, xrK); } define pcodeop xvaddwod.d.wu.w; #lasx.txt xvaddwod.d.wu.w mask=0x74410000 #0x74410000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwod.d.wu.w xrD, xrJ, xrK is op15_31=0xe882 & xrD & xrJ & xrK { xrD = xvaddwod.d.wu.w(xrD, xrJ, xrK); } define pcodeop xvaddwod.q.du.d; #lasx.txt xvaddwod.q.du.d mask=0x74418000 #0x74418000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvaddwod.q.du.d xrD, xrJ, xrK is op15_31=0xe883 & xrD & xrJ & xrK { xrD = xvaddwod.q.du.d(xrD, xrJ, xrK); } define pcodeop xvsadd.b; #lasx.txt xvsadd.b mask=0x74460000 #0x74460000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsadd.b xrD, xrJ, xrK is op15_31=0xe88c & xrD & xrJ & xrK { xrD = xvsadd.b(xrD, xrJ, xrK); } define pcodeop xvsadd.h; #lasx.txt xvsadd.h mask=0x74468000 #0x74468000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsadd.h xrD, xrJ, xrK is op15_31=0xe88d & xrD & xrJ & xrK { xrD = xvsadd.h(xrD, xrJ, xrK); } define pcodeop xvsadd.w; #lasx.txt xvsadd.w mask=0x74470000 #0x74470000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsadd.w xrD, xrJ, xrK is op15_31=0xe88e & xrD & xrJ & xrK { xrD = xvsadd.w(xrD, xrJ, xrK); } define pcodeop xvsadd.d; #lasx.txt xvsadd.d mask=0x74478000 #0x74478000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsadd.d xrD, xrJ, xrK is op15_31=0xe88f & xrD & xrJ & xrK { xrD = xvsadd.d(xrD, xrJ, xrK); } define pcodeop xvssub.b; #lasx.txt xvssub.b mask=0x74480000 #0x74480000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssub.b xrD, xrJ, xrK is op15_31=0xe890 & xrD & xrJ & xrK { xrD = xvssub.b(xrD, xrJ, xrK); } define pcodeop xvssub.h; #lasx.txt xvssub.h mask=0x74488000 #0x74488000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssub.h xrD, xrJ, xrK is op15_31=0xe891 & xrD & xrJ & xrK { xrD = xvssub.h(xrD, xrJ, xrK); } define pcodeop xvssub.w; #lasx.txt xvssub.w mask=0x74490000 #0x74490000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssub.w xrD, xrJ, xrK is op15_31=0xe892 & xrD & xrJ & xrK { xrD = xvssub.w(xrD, xrJ, xrK); } define pcodeop xvssub.d; #lasx.txt xvssub.d mask=0x74498000 #0x74498000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssub.d xrD, xrJ, xrK is op15_31=0xe893 & xrD & xrJ & xrK { xrD = xvssub.d(xrD, xrJ, xrK); } define pcodeop xvsadd.bu; #lasx.txt xvsadd.bu mask=0x744a0000 #0x744a0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsadd.bu xrD, xrJ, xrK is op15_31=0xe894 & xrD & xrJ & xrK { xrD = xvsadd.bu(xrD, xrJ, xrK); } define pcodeop xvsadd.hu; #lasx.txt xvsadd.hu mask=0x744a8000 #0x744a8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsadd.hu xrD, xrJ, xrK is op15_31=0xe895 & xrD & xrJ & xrK { xrD = xvsadd.hu(xrD, xrJ, xrK); } define pcodeop xvsadd.wu; #lasx.txt xvsadd.wu mask=0x744b0000 #0x744b0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsadd.wu xrD, xrJ, xrK is op15_31=0xe896 & xrD & xrJ & xrK { xrD = xvsadd.wu(xrD, xrJ, xrK); } define pcodeop xvsadd.du; #lasx.txt xvsadd.du mask=0x744b8000 #0x744b8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsadd.du xrD, xrJ, xrK is op15_31=0xe897 & xrD & xrJ & xrK { xrD = xvsadd.du(xrD, xrJ, xrK); } define pcodeop xvssub.bu; #lasx.txt xvssub.bu mask=0x744c0000 #0x744c0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssub.bu xrD, xrJ, xrK is op15_31=0xe898 & xrD & xrJ & xrK { xrD = xvssub.bu(xrD, xrJ, xrK); } define pcodeop xvssub.hu; #lasx.txt xvssub.hu mask=0x744c8000 #0x744c8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssub.hu xrD, xrJ, xrK is op15_31=0xe899 & xrD & xrJ & xrK { xrD = xvssub.hu(xrD, xrJ, xrK); } define pcodeop xvssub.wu; #lasx.txt xvssub.wu mask=0x744d0000 #0x744d0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssub.wu xrD, xrJ, xrK is op15_31=0xe89a & xrD & xrJ & xrK { xrD = xvssub.wu(xrD, xrJ, xrK); } define pcodeop xvssub.du; #lasx.txt xvssub.du mask=0x744d8000 #0x744d8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssub.du xrD, xrJ, xrK is op15_31=0xe89b & xrD & xrJ & xrK { xrD = xvssub.du(xrD, xrJ, xrK); } define pcodeop xvhaddw.h.b; #lasx.txt xvhaddw.h.b mask=0x74540000 #0x74540000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhaddw.h.b xrD, xrJ, xrK is op15_31=0xe8a8 & xrD & xrJ & xrK { xrD = xvhaddw.h.b(xrD, xrJ, xrK); } define pcodeop xvhaddw.w.h; #lasx.txt xvhaddw.w.h mask=0x74548000 #0x74548000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhaddw.w.h xrD, xrJ, xrK is op15_31=0xe8a9 & xrD & xrJ & xrK { xrD = xvhaddw.w.h(xrD, xrJ, xrK); } define pcodeop xvhaddw.d.w; #lasx.txt xvhaddw.d.w mask=0x74550000 #0x74550000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhaddw.d.w xrD, xrJ, xrK is op15_31=0xe8aa & xrD & xrJ & xrK { xrD = xvhaddw.d.w(xrD, xrJ, xrK); } define pcodeop xvhaddw.q.d; #lasx.txt xvhaddw.q.d mask=0x74558000 #0x74558000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhaddw.q.d xrD, xrJ, xrK is op15_31=0xe8ab & xrD & xrJ & xrK { xrD = xvhaddw.q.d(xrD, xrJ, xrK); } define pcodeop xvhsubw.h.b; #lasx.txt xvhsubw.h.b mask=0x74560000 #0x74560000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhsubw.h.b xrD, xrJ, xrK is op15_31=0xe8ac & xrD & xrJ & xrK { xrD = xvhsubw.h.b(xrD, xrJ, xrK); } define pcodeop xvhsubw.w.h; #lasx.txt xvhsubw.w.h mask=0x74568000 #0x74568000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhsubw.w.h xrD, xrJ, xrK is op15_31=0xe8ad & xrD & xrJ & xrK { xrD = xvhsubw.w.h(xrD, xrJ, xrK); } define pcodeop xvhsubw.d.w; #lasx.txt xvhsubw.d.w mask=0x74570000 #0x74570000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhsubw.d.w xrD, xrJ, xrK is op15_31=0xe8ae & xrD & xrJ & xrK { xrD = xvhsubw.d.w(xrD, xrJ, xrK); } define pcodeop xvhsubw.q.d; #lasx.txt xvhsubw.q.d mask=0x74578000 #0x74578000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhsubw.q.d xrD, xrJ, xrK is op15_31=0xe8af & xrD & xrJ & xrK { xrD = xvhsubw.q.d(xrD, xrJ, xrK); } define pcodeop xvhaddw.hu.bu; #lasx.txt xvhaddw.hu.bu mask=0x74580000 #0x74580000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhaddw.hu.bu xrD, xrJ, xrK is op15_31=0xe8b0 & xrD & xrJ & xrK { xrD = xvhaddw.hu.bu(xrD, xrJ, xrK); } define pcodeop xvhaddw.wu.hu; #lasx.txt xvhaddw.wu.hu mask=0x74588000 #0x74588000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhaddw.wu.hu xrD, xrJ, xrK is op15_31=0xe8b1 & xrD & xrJ & xrK { xrD = xvhaddw.wu.hu(xrD, xrJ, xrK); } define pcodeop xvhaddw.du.wu; #lasx.txt xvhaddw.du.wu mask=0x74590000 #0x74590000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhaddw.du.wu xrD, xrJ, xrK is op15_31=0xe8b2 & xrD & xrJ & xrK { xrD = xvhaddw.du.wu(xrD, xrJ, xrK); } define pcodeop xvhaddw.qu.du; #lasx.txt xvhaddw.qu.du mask=0x74598000 #0x74598000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhaddw.qu.du xrD, xrJ, xrK is op15_31=0xe8b3 & xrD & xrJ & xrK { xrD = xvhaddw.qu.du(xrD, xrJ, xrK); } define pcodeop xvhsubw.hu.bu; #lasx.txt xvhsubw.hu.bu mask=0x745a0000 #0x745a0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhsubw.hu.bu xrD, xrJ, xrK is op15_31=0xe8b4 & xrD & xrJ & xrK { xrD = xvhsubw.hu.bu(xrD, xrJ, xrK); } define pcodeop xvhsubw.wu.hu; #lasx.txt xvhsubw.wu.hu mask=0x745a8000 #0x745a8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhsubw.wu.hu xrD, xrJ, xrK is op15_31=0xe8b5 & xrD & xrJ & xrK { xrD = xvhsubw.wu.hu(xrD, xrJ, xrK); } define pcodeop xvhsubw.du.wu; #lasx.txt xvhsubw.du.wu mask=0x745b0000 #0x745b0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhsubw.du.wu xrD, xrJ, xrK is op15_31=0xe8b6 & xrD & xrJ & xrK { xrD = xvhsubw.du.wu(xrD, xrJ, xrK); } define pcodeop xvhsubw.qu.du; #lasx.txt xvhsubw.qu.du mask=0x745b8000 #0x745b8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvhsubw.qu.du xrD, xrJ, xrK is op15_31=0xe8b7 & xrD & xrJ & xrK { xrD = xvhsubw.qu.du(xrD, xrJ, xrK); } define pcodeop xvadda.b; #lasx.txt xvadda.b mask=0x745c0000 #0x745c0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvadda.b xrD, xrJ, xrK is op15_31=0xe8b8 & xrD & xrJ & xrK { xrD = xvadda.b(xrD, xrJ, xrK); } define pcodeop xvadda.h; #lasx.txt xvadda.h mask=0x745c8000 #0x745c8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvadda.h xrD, xrJ, xrK is op15_31=0xe8b9 & xrD & xrJ & xrK { xrD = xvadda.h(xrD, xrJ, xrK); } define pcodeop xvadda.w; #lasx.txt xvadda.w mask=0x745d0000 #0x745d0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvadda.w xrD, xrJ, xrK is op15_31=0xe8ba & xrD & xrJ & xrK { xrD = xvadda.w(xrD, xrJ, xrK); } define pcodeop xvadda.d; #lasx.txt xvadda.d mask=0x745d8000 #0x745d8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvadda.d xrD, xrJ, xrK is op15_31=0xe8bb & xrD & xrJ & xrK { xrD = xvadda.d(xrD, xrJ, xrK); } define pcodeop xvabsd.b; #lasx.txt xvabsd.b mask=0x74600000 #0x74600000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvabsd.b xrD, xrJ, xrK is op15_31=0xe8c0 & xrD & xrJ & xrK { xrD = xvabsd.b(xrD, xrJ, xrK); } define pcodeop xvabsd.h; #lasx.txt xvabsd.h mask=0x74608000 #0x74608000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvabsd.h xrD, xrJ, xrK is op15_31=0xe8c1 & xrD & xrJ & xrK { xrD = xvabsd.h(xrD, xrJ, xrK); } define pcodeop xvabsd.w; #lasx.txt xvabsd.w mask=0x74610000 #0x74610000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvabsd.w xrD, xrJ, xrK is op15_31=0xe8c2 & xrD & xrJ & xrK { xrD = xvabsd.w(xrD, xrJ, xrK); } define pcodeop xvabsd.d; #lasx.txt xvabsd.d mask=0x74618000 #0x74618000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvabsd.d xrD, xrJ, xrK is op15_31=0xe8c3 & xrD & xrJ & xrK { xrD = xvabsd.d(xrD, xrJ, xrK); } define pcodeop xvabsd.bu; #lasx.txt xvabsd.bu mask=0x74620000 #0x74620000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvabsd.bu xrD, xrJ, xrK is op15_31=0xe8c4 & xrD & xrJ & xrK { xrD = xvabsd.bu(xrD, xrJ, xrK); } define pcodeop xvabsd.hu; #lasx.txt xvabsd.hu mask=0x74628000 #0x74628000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvabsd.hu xrD, xrJ, xrK is op15_31=0xe8c5 & xrD & xrJ & xrK { xrD = xvabsd.hu(xrD, xrJ, xrK); } define pcodeop xvabsd.wu; #lasx.txt xvabsd.wu mask=0x74630000 #0x74630000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvabsd.wu xrD, xrJ, xrK is op15_31=0xe8c6 & xrD & xrJ & xrK { xrD = xvabsd.wu(xrD, xrJ, xrK); } define pcodeop xvabsd.du; #lasx.txt xvabsd.du mask=0x74638000 #0x74638000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvabsd.du xrD, xrJ, xrK is op15_31=0xe8c7 & xrD & xrJ & xrK { xrD = xvabsd.du(xrD, xrJ, xrK); } define pcodeop xvavg.b; #lasx.txt xvavg.b mask=0x74640000 #0x74640000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavg.b xrD, xrJ, xrK is op15_31=0xe8c8 & xrD & xrJ & xrK { xrD = xvavg.b(xrD, xrJ, xrK); } define pcodeop xvavg.h; #lasx.txt xvavg.h mask=0x74648000 #0x74648000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavg.h xrD, xrJ, xrK is op15_31=0xe8c9 & xrD & xrJ & xrK { xrD = xvavg.h(xrD, xrJ, xrK); } define pcodeop xvavg.w; #lasx.txt xvavg.w mask=0x74650000 #0x74650000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavg.w xrD, xrJ, xrK is op15_31=0xe8ca & xrD & xrJ & xrK { xrD = xvavg.w(xrD, xrJ, xrK); } define pcodeop xvavg.d; #lasx.txt xvavg.d mask=0x74658000 #0x74658000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavg.d xrD, xrJ, xrK is op15_31=0xe8cb & xrD & xrJ & xrK { xrD = xvavg.d(xrD, xrJ, xrK); } define pcodeop xvavg.bu; #lasx.txt xvavg.bu mask=0x74660000 #0x74660000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavg.bu xrD, xrJ, xrK is op15_31=0xe8cc & xrD & xrJ & xrK { xrD = xvavg.bu(xrD, xrJ, xrK); } define pcodeop xvavg.hu; #lasx.txt xvavg.hu mask=0x74668000 #0x74668000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavg.hu xrD, xrJ, xrK is op15_31=0xe8cd & xrD & xrJ & xrK { xrD = xvavg.hu(xrD, xrJ, xrK); } define pcodeop xvavg.wu; #lasx.txt xvavg.wu mask=0x74670000 #0x74670000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavg.wu xrD, xrJ, xrK is op15_31=0xe8ce & xrD & xrJ & xrK { xrD = xvavg.wu(xrD, xrJ, xrK); } define pcodeop xvavg.du; #lasx.txt xvavg.du mask=0x74678000 #0x74678000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavg.du xrD, xrJ, xrK is op15_31=0xe8cf & xrD & xrJ & xrK { xrD = xvavg.du(xrD, xrJ, xrK); } define pcodeop xvavgr.b; #lasx.txt xvavgr.b mask=0x74680000 #0x74680000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavgr.b xrD, xrJ, xrK is op15_31=0xe8d0 & xrD & xrJ & xrK { xrD = xvavgr.b(xrD, xrJ, xrK); } define pcodeop xvavgr.h; #lasx.txt xvavgr.h mask=0x74688000 #0x74688000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavgr.h xrD, xrJ, xrK is op15_31=0xe8d1 & xrD & xrJ & xrK { xrD = xvavgr.h(xrD, xrJ, xrK); } define pcodeop xvavgr.w; #lasx.txt xvavgr.w mask=0x74690000 #0x74690000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavgr.w xrD, xrJ, xrK is op15_31=0xe8d2 & xrD & xrJ & xrK { xrD = xvavgr.w(xrD, xrJ, xrK); } define pcodeop xvavgr.d; #lasx.txt xvavgr.d mask=0x74698000 #0x74698000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavgr.d xrD, xrJ, xrK is op15_31=0xe8d3 & xrD & xrJ & xrK { xrD = xvavgr.d(xrD, xrJ, xrK); } define pcodeop xvavgr.bu; #lasx.txt xvavgr.bu mask=0x746a0000 #0x746a0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavgr.bu xrD, xrJ, xrK is op15_31=0xe8d4 & xrD & xrJ & xrK { xrD = xvavgr.bu(xrD, xrJ, xrK); } define pcodeop xvavgr.hu; #lasx.txt xvavgr.hu mask=0x746a8000 #0x746a8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavgr.hu xrD, xrJ, xrK is op15_31=0xe8d5 & xrD & xrJ & xrK { xrD = xvavgr.hu(xrD, xrJ, xrK); } define pcodeop xvavgr.wu; #lasx.txt xvavgr.wu mask=0x746b0000 #0x746b0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavgr.wu xrD, xrJ, xrK is op15_31=0xe8d6 & xrD & xrJ & xrK { xrD = xvavgr.wu(xrD, xrJ, xrK); } define pcodeop xvavgr.du; #lasx.txt xvavgr.du mask=0x746b8000 #0x746b8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvavgr.du xrD, xrJ, xrK is op15_31=0xe8d7 & xrD & xrJ & xrK { xrD = xvavgr.du(xrD, xrJ, xrK); } define pcodeop xvmax.b; #lasx.txt xvmax.b mask=0x74700000 #0x74700000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmax.b xrD, xrJ, xrK is op15_31=0xe8e0 & xrD & xrJ & xrK { xrD = xvmax.b(xrD, xrJ, xrK); } define pcodeop xvmax.h; #lasx.txt xvmax.h mask=0x74708000 #0x74708000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmax.h xrD, xrJ, xrK is op15_31=0xe8e1 & xrD & xrJ & xrK { xrD = xvmax.h(xrD, xrJ, xrK); } define pcodeop xvmax.w; #lasx.txt xvmax.w mask=0x74710000 #0x74710000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmax.w xrD, xrJ, xrK is op15_31=0xe8e2 & xrD & xrJ & xrK { xrD = xvmax.w(xrD, xrJ, xrK); } define pcodeop xvmax.d; #lasx.txt xvmax.d mask=0x74718000 #0x74718000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmax.d xrD, xrJ, xrK is op15_31=0xe8e3 & xrD & xrJ & xrK { xrD = xvmax.d(xrD, xrJ, xrK); } define pcodeop xvmin.b; #lasx.txt xvmin.b mask=0x74720000 #0x74720000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmin.b xrD, xrJ, xrK is op15_31=0xe8e4 & xrD & xrJ & xrK { xrD = xvmin.b(xrD, xrJ, xrK); } define pcodeop xvmin.h; #lasx.txt xvmin.h mask=0x74728000 #0x74728000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmin.h xrD, xrJ, xrK is op15_31=0xe8e5 & xrD & xrJ & xrK { xrD = xvmin.h(xrD, xrJ, xrK); } define pcodeop xvmin.w; #lasx.txt xvmin.w mask=0x74730000 #0x74730000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmin.w xrD, xrJ, xrK is op15_31=0xe8e6 & xrD & xrJ & xrK { xrD = xvmin.w(xrD, xrJ, xrK); } define pcodeop xvmin.d; #lasx.txt xvmin.d mask=0x74738000 #0x74738000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmin.d xrD, xrJ, xrK is op15_31=0xe8e7 & xrD & xrJ & xrK { xrD = xvmin.d(xrD, xrJ, xrK); } define pcodeop xvmax.bu; #lasx.txt xvmax.bu mask=0x74740000 #0x74740000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmax.bu xrD, xrJ, xrK is op15_31=0xe8e8 & xrD & xrJ & xrK { xrD = xvmax.bu(xrD, xrJ, xrK); } define pcodeop xvmax.hu; #lasx.txt xvmax.hu mask=0x74748000 #0x74748000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmax.hu xrD, xrJ, xrK is op15_31=0xe8e9 & xrD & xrJ & xrK { xrD = xvmax.hu(xrD, xrJ, xrK); } define pcodeop xvmax.wu; #lasx.txt xvmax.wu mask=0x74750000 #0x74750000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmax.wu xrD, xrJ, xrK is op15_31=0xe8ea & xrD & xrJ & xrK { xrD = xvmax.wu(xrD, xrJ, xrK); } define pcodeop xvmax.du; #lasx.txt xvmax.du mask=0x74758000 #0x74758000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmax.du xrD, xrJ, xrK is op15_31=0xe8eb & xrD & xrJ & xrK { xrD = xvmax.du(xrD, xrJ, xrK); } define pcodeop xvmin.bu; #lasx.txt xvmin.bu mask=0x74760000 #0x74760000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmin.bu xrD, xrJ, xrK is op15_31=0xe8ec & xrD & xrJ & xrK { xrD = xvmin.bu(xrD, xrJ, xrK); } define pcodeop xvmin.hu; #lasx.txt xvmin.hu mask=0x74768000 #0x74768000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmin.hu xrD, xrJ, xrK is op15_31=0xe8ed & xrD & xrJ & xrK { xrD = xvmin.hu(xrD, xrJ, xrK); } define pcodeop xvmin.wu; #lasx.txt xvmin.wu mask=0x74770000 #0x74770000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmin.wu xrD, xrJ, xrK is op15_31=0xe8ee & xrD & xrJ & xrK { xrD = xvmin.wu(xrD, xrJ, xrK); } define pcodeop xvmin.du; #lasx.txt xvmin.du mask=0x74778000 #0x74778000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmin.du xrD, xrJ, xrK is op15_31=0xe8ef & xrD & xrJ & xrK { xrD = xvmin.du(xrD, xrJ, xrK); } define pcodeop xvmul.b; #lasx.txt xvmul.b mask=0x74840000 #0x74840000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmul.b xrD, xrJ, xrK is op15_31=0xe908 & xrD & xrJ & xrK { xrD = xvmul.b(xrD, xrJ, xrK); } define pcodeop xvmul.h; #lasx.txt xvmul.h mask=0x74848000 #0x74848000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmul.h xrD, xrJ, xrK is op15_31=0xe909 & xrD & xrJ & xrK { xrD = xvmul.h(xrD, xrJ, xrK); } define pcodeop xvmul.w; #lasx.txt xvmul.w mask=0x74850000 #0x74850000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmul.w xrD, xrJ, xrK is op15_31=0xe90a & xrD & xrJ & xrK { xrD = xvmul.w(xrD, xrJ, xrK); } define pcodeop xvmul.d; #lasx.txt xvmul.d mask=0x74858000 #0x74858000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmul.d xrD, xrJ, xrK is op15_31=0xe90b & xrD & xrJ & xrK { xrD = xvmul.d(xrD, xrJ, xrK); } define pcodeop xvmuh.b; #lasx.txt xvmuh.b mask=0x74860000 #0x74860000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmuh.b xrD, xrJ, xrK is op15_31=0xe90c & xrD & xrJ & xrK { xrD = xvmuh.b(xrD, xrJ, xrK); } define pcodeop xvmuh.h; #lasx.txt xvmuh.h mask=0x74868000 #0x74868000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmuh.h xrD, xrJ, xrK is op15_31=0xe90d & xrD & xrJ & xrK { xrD = xvmuh.h(xrD, xrJ, xrK); } define pcodeop xvmuh.w; #lasx.txt xvmuh.w mask=0x74870000 #0x74870000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmuh.w xrD, xrJ, xrK is op15_31=0xe90e & xrD & xrJ & xrK { xrD = xvmuh.w(xrD, xrJ, xrK); } define pcodeop xvmuh.d; #lasx.txt xvmuh.d mask=0x74878000 #0x74878000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmuh.d xrD, xrJ, xrK is op15_31=0xe90f & xrD & xrJ & xrK { xrD = xvmuh.d(xrD, xrJ, xrK); } define pcodeop xvmuh.bu; #lasx.txt xvmuh.bu mask=0x74880000 #0x74880000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmuh.bu xrD, xrJ, xrK is op15_31=0xe910 & xrD & xrJ & xrK { xrD = xvmuh.bu(xrD, xrJ, xrK); } define pcodeop xvmuh.hu; #lasx.txt xvmuh.hu mask=0x74888000 #0x74888000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmuh.hu xrD, xrJ, xrK is op15_31=0xe911 & xrD & xrJ & xrK { xrD = xvmuh.hu(xrD, xrJ, xrK); } define pcodeop xvmuh.wu; #lasx.txt xvmuh.wu mask=0x74890000 #0x74890000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmuh.wu xrD, xrJ, xrK is op15_31=0xe912 & xrD & xrJ & xrK { xrD = xvmuh.wu(xrD, xrJ, xrK); } define pcodeop xvmuh.du; #lasx.txt xvmuh.du mask=0x74898000 #0x74898000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmuh.du xrD, xrJ, xrK is op15_31=0xe913 & xrD & xrJ & xrK { xrD = xvmuh.du(xrD, xrJ, xrK); } define pcodeop xvmulwev.h.b; #lasx.txt xvmulwev.h.b mask=0x74900000 #0x74900000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwev.h.b xrD, xrJ, xrK is op15_31=0xe920 & xrD & xrJ & xrK { xrD = xvmulwev.h.b(xrD, xrJ, xrK); } define pcodeop xvmulwev.w.h; #lasx.txt xvmulwev.w.h mask=0x74908000 #0x74908000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwev.w.h xrD, xrJ, xrK is op15_31=0xe921 & xrD & xrJ & xrK { xrD = xvmulwev.w.h(xrD, xrJ, xrK); } define pcodeop xvmulwev.d.w; #lasx.txt xvmulwev.d.w mask=0x74910000 #0x74910000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwev.d.w xrD, xrJ, xrK is op15_31=0xe922 & xrD & xrJ & xrK { xrD = xvmulwev.d.w(xrD, xrJ, xrK); } define pcodeop xvmulwev.q.d; #lasx.txt xvmulwev.q.d mask=0x74918000 #0x74918000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwev.q.d xrD, xrJ, xrK is op15_31=0xe923 & xrD & xrJ & xrK { xrD = xvmulwev.q.d(xrD, xrJ, xrK); } define pcodeop xvmulwod.h.b; #lasx.txt xvmulwod.h.b mask=0x74920000 #0x74920000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwod.h.b xrD, xrJ, xrK is op15_31=0xe924 & xrD & xrJ & xrK { xrD = xvmulwod.h.b(xrD, xrJ, xrK); } define pcodeop xvmulwod.w.h; #lasx.txt xvmulwod.w.h mask=0x74928000 #0x74928000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwod.w.h xrD, xrJ, xrK is op15_31=0xe925 & xrD & xrJ & xrK { xrD = xvmulwod.w.h(xrD, xrJ, xrK); } define pcodeop xvmulwod.d.w; #lasx.txt xvmulwod.d.w mask=0x74930000 #0x74930000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwod.d.w xrD, xrJ, xrK is op15_31=0xe926 & xrD & xrJ & xrK { xrD = xvmulwod.d.w(xrD, xrJ, xrK); } define pcodeop xvmulwod.q.d; #lasx.txt xvmulwod.q.d mask=0x74938000 #0x74938000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwod.q.d xrD, xrJ, xrK is op15_31=0xe927 & xrD & xrJ & xrK { xrD = xvmulwod.q.d(xrD, xrJ, xrK); } define pcodeop xvmulwev.h.bu; #lasx.txt xvmulwev.h.bu mask=0x74980000 #0x74980000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwev.h.bu xrD, xrJ, xrK is op15_31=0xe930 & xrD & xrJ & xrK { xrD = xvmulwev.h.bu(xrD, xrJ, xrK); } define pcodeop xvmulwev.w.hu; #lasx.txt xvmulwev.w.hu mask=0x74988000 #0x74988000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwev.w.hu xrD, xrJ, xrK is op15_31=0xe931 & xrD & xrJ & xrK { xrD = xvmulwev.w.hu(xrD, xrJ, xrK); } define pcodeop xvmulwev.d.wu; #lasx.txt xvmulwev.d.wu mask=0x74990000 #0x74990000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwev.d.wu xrD, xrJ, xrK is op15_31=0xe932 & xrD & xrJ & xrK { xrD = xvmulwev.d.wu(xrD, xrJ, xrK); } define pcodeop xvmulwev.q.du; #lasx.txt xvmulwev.q.du mask=0x74998000 #0x74998000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwev.q.du xrD, xrJ, xrK is op15_31=0xe933 & xrD & xrJ & xrK { xrD = xvmulwev.q.du(xrD, xrJ, xrK); } define pcodeop xvmulwod.h.bu; #lasx.txt xvmulwod.h.bu mask=0x749a0000 #0x749a0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwod.h.bu xrD, xrJ, xrK is op15_31=0xe934 & xrD & xrJ & xrK { xrD = xvmulwod.h.bu(xrD, xrJ, xrK); } define pcodeop xvmulwod.w.hu; #lasx.txt xvmulwod.w.hu mask=0x749a8000 #0x749a8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwod.w.hu xrD, xrJ, xrK is op15_31=0xe935 & xrD & xrJ & xrK { xrD = xvmulwod.w.hu(xrD, xrJ, xrK); } define pcodeop xvmulwod.d.wu; #lasx.txt xvmulwod.d.wu mask=0x749b0000 #0x749b0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwod.d.wu xrD, xrJ, xrK is op15_31=0xe936 & xrD & xrJ & xrK { xrD = xvmulwod.d.wu(xrD, xrJ, xrK); } define pcodeop xvmulwod.q.du; #lasx.txt xvmulwod.q.du mask=0x749b8000 #0x749b8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwod.q.du xrD, xrJ, xrK is op15_31=0xe937 & xrD & xrJ & xrK { xrD = xvmulwod.q.du(xrD, xrJ, xrK); } define pcodeop xvmulwev.h.bu.b; #lasx.txt xvmulwev.h.bu.b mask=0x74a00000 #0x74a00000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwev.h.bu.b xrD, xrJ, xrK is op15_31=0xe940 & xrD & xrJ & xrK { xrD = xvmulwev.h.bu.b(xrD, xrJ, xrK); } define pcodeop xvmulwev.w.hu.h; #lasx.txt xvmulwev.w.hu.h mask=0x74a08000 #0x74a08000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwev.w.hu.h xrD, xrJ, xrK is op15_31=0xe941 & xrD & xrJ & xrK { xrD = xvmulwev.w.hu.h(xrD, xrJ, xrK); } define pcodeop xvmulwev.d.wu.w; #lasx.txt xvmulwev.d.wu.w mask=0x74a10000 #0x74a10000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwev.d.wu.w xrD, xrJ, xrK is op15_31=0xe942 & xrD & xrJ & xrK { xrD = xvmulwev.d.wu.w(xrD, xrJ, xrK); } define pcodeop xvmulwev.q.du.d; #lasx.txt xvmulwev.q.du.d mask=0x74a18000 #0x74a18000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwev.q.du.d xrD, xrJ, xrK is op15_31=0xe943 & xrD & xrJ & xrK { xrD = xvmulwev.q.du.d(xrD, xrJ, xrK); } define pcodeop xvmulwod.h.bu.b; #lasx.txt xvmulwod.h.bu.b mask=0x74a20000 #0x74a20000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwod.h.bu.b xrD, xrJ, xrK is op15_31=0xe944 & xrD & xrJ & xrK { xrD = xvmulwod.h.bu.b(xrD, xrJ, xrK); } define pcodeop xvmulwod.w.hu.h; #lasx.txt xvmulwod.w.hu.h mask=0x74a28000 #0x74a28000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwod.w.hu.h xrD, xrJ, xrK is op15_31=0xe945 & xrD & xrJ & xrK { xrD = xvmulwod.w.hu.h(xrD, xrJ, xrK); } define pcodeop xvmulwod.d.wu.w; #lasx.txt xvmulwod.d.wu.w mask=0x74a30000 #0x74a30000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwod.d.wu.w xrD, xrJ, xrK is op15_31=0xe946 & xrD & xrJ & xrK { xrD = xvmulwod.d.wu.w(xrD, xrJ, xrK); } define pcodeop xvmulwod.q.du.d; #lasx.txt xvmulwod.q.du.d mask=0x74a38000 #0x74a38000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmulwod.q.du.d xrD, xrJ, xrK is op15_31=0xe947 & xrD & xrJ & xrK { xrD = xvmulwod.q.du.d(xrD, xrJ, xrK); } define pcodeop xvmadd.b; #lasx.txt xvmadd.b mask=0x74a80000 #0x74a80000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmadd.b xrD, xrJ, xrK is op15_31=0xe950 & xrD & xrJ & xrK { xrD = xvmadd.b(xrD, xrJ, xrK); } define pcodeop xvmadd.h; #lasx.txt xvmadd.h mask=0x74a88000 #0x74a88000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmadd.h xrD, xrJ, xrK is op15_31=0xe951 & xrD & xrJ & xrK { xrD = xvmadd.h(xrD, xrJ, xrK); } define pcodeop xvmadd.w; #lasx.txt xvmadd.w mask=0x74a90000 #0x74a90000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmadd.w xrD, xrJ, xrK is op15_31=0xe952 & xrD & xrJ & xrK { xrD = xvmadd.w(xrD, xrJ, xrK); } define pcodeop xvmadd.d; #lasx.txt xvmadd.d mask=0x74a98000 #0x74a98000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmadd.d xrD, xrJ, xrK is op15_31=0xe953 & xrD & xrJ & xrK { xrD = xvmadd.d(xrD, xrJ, xrK); } define pcodeop xvmsub.b; #lasx.txt xvmsub.b mask=0x74aa0000 #0x74aa0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmsub.b xrD, xrJ, xrK is op15_31=0xe954 & xrD & xrJ & xrK { xrD = xvmsub.b(xrD, xrJ, xrK); } define pcodeop xvmsub.h; #lasx.txt xvmsub.h mask=0x74aa8000 #0x74aa8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmsub.h xrD, xrJ, xrK is op15_31=0xe955 & xrD & xrJ & xrK { xrD = xvmsub.h(xrD, xrJ, xrK); } define pcodeop xvmsub.w; #lasx.txt xvmsub.w mask=0x74ab0000 #0x74ab0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmsub.w xrD, xrJ, xrK is op15_31=0xe956 & xrD & xrJ & xrK { xrD = xvmsub.w(xrD, xrJ, xrK); } define pcodeop xvmsub.d; #lasx.txt xvmsub.d mask=0x74ab8000 #0x74ab8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmsub.d xrD, xrJ, xrK is op15_31=0xe957 & xrD & xrJ & xrK { xrD = xvmsub.d(xrD, xrJ, xrK); } define pcodeop xvmaddwev.h.b; #lasx.txt xvmaddwev.h.b mask=0x74ac0000 #0x74ac0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwev.h.b xrD, xrJ, xrK is op15_31=0xe958 & xrD & xrJ & xrK { xrD = xvmaddwev.h.b(xrD, xrJ, xrK); } define pcodeop xvmaddwev.w.h; #lasx.txt xvmaddwev.w.h mask=0x74ac8000 #0x74ac8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwev.w.h xrD, xrJ, xrK is op15_31=0xe959 & xrD & xrJ & xrK { xrD = xvmaddwev.w.h(xrD, xrJ, xrK); } define pcodeop xvmaddwev.d.w; #lasx.txt xvmaddwev.d.w mask=0x74ad0000 #0x74ad0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwev.d.w xrD, xrJ, xrK is op15_31=0xe95a & xrD & xrJ & xrK { xrD = xvmaddwev.d.w(xrD, xrJ, xrK); } define pcodeop xvmaddwev.q.d; #lasx.txt xvmaddwev.q.d mask=0x74ad8000 #0x74ad8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwev.q.d xrD, xrJ, xrK is op15_31=0xe95b & xrD & xrJ & xrK { xrD = xvmaddwev.q.d(xrD, xrJ, xrK); } define pcodeop xvmaddwod.h.b; #lasx.txt xvmaddwod.h.b mask=0x74ae0000 #0x74ae0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwod.h.b xrD, xrJ, xrK is op15_31=0xe95c & xrD & xrJ & xrK { xrD = xvmaddwod.h.b(xrD, xrJ, xrK); } define pcodeop xvmaddwod.w.h; #lasx.txt xvmaddwod.w.h mask=0x74ae8000 #0x74ae8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwod.w.h xrD, xrJ, xrK is op15_31=0xe95d & xrD & xrJ & xrK { xrD = xvmaddwod.w.h(xrD, xrJ, xrK); } define pcodeop xvmaddwod.d.w; #lasx.txt xvmaddwod.d.w mask=0x74af0000 #0x74af0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwod.d.w xrD, xrJ, xrK is op15_31=0xe95e & xrD & xrJ & xrK { xrD = xvmaddwod.d.w(xrD, xrJ, xrK); } define pcodeop xvmaddwod.q.d; #lasx.txt xvmaddwod.q.d mask=0x74af8000 #0x74af8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwod.q.d xrD, xrJ, xrK is op15_31=0xe95f & xrD & xrJ & xrK { xrD = xvmaddwod.q.d(xrD, xrJ, xrK); } define pcodeop xvmaddwev.h.bu; #lasx.txt xvmaddwev.h.bu mask=0x74b40000 #0x74b40000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwev.h.bu xrD, xrJ, xrK is op15_31=0xe968 & xrD & xrJ & xrK { xrD = xvmaddwev.h.bu(xrD, xrJ, xrK); } define pcodeop xvmaddwev.w.hu; #lasx.txt xvmaddwev.w.hu mask=0x74b48000 #0x74b48000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwev.w.hu xrD, xrJ, xrK is op15_31=0xe969 & xrD & xrJ & xrK { xrD = xvmaddwev.w.hu(xrD, xrJ, xrK); } define pcodeop xvmaddwev.d.wu; #lasx.txt xvmaddwev.d.wu mask=0x74b50000 #0x74b50000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwev.d.wu xrD, xrJ, xrK is op15_31=0xe96a & xrD & xrJ & xrK { xrD = xvmaddwev.d.wu(xrD, xrJ, xrK); } define pcodeop xvmaddwev.q.du; #lasx.txt xvmaddwev.q.du mask=0x74b58000 #0x74b58000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwev.q.du xrD, xrJ, xrK is op15_31=0xe96b & xrD & xrJ & xrK { xrD = xvmaddwev.q.du(xrD, xrJ, xrK); } define pcodeop xvmaddwod.h.bu; #lasx.txt xvmaddwod.h.bu mask=0x74b60000 #0x74b60000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwod.h.bu xrD, xrJ, xrK is op15_31=0xe96c & xrD & xrJ & xrK { xrD = xvmaddwod.h.bu(xrD, xrJ, xrK); } define pcodeop xvmaddwod.w.hu; #lasx.txt xvmaddwod.w.hu mask=0x74b68000 #0x74b68000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwod.w.hu xrD, xrJ, xrK is op15_31=0xe96d & xrD & xrJ & xrK { xrD = xvmaddwod.w.hu(xrD, xrJ, xrK); } define pcodeop xvmaddwod.d.wu; #lasx.txt xvmaddwod.d.wu mask=0x74b70000 #0x74b70000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwod.d.wu xrD, xrJ, xrK is op15_31=0xe96e & xrD & xrJ & xrK { xrD = xvmaddwod.d.wu(xrD, xrJ, xrK); } define pcodeop xvmaddwod.q.du; #lasx.txt xvmaddwod.q.du mask=0x74b78000 #0x74b78000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwod.q.du xrD, xrJ, xrK is op15_31=0xe96f & xrD & xrJ & xrK { xrD = xvmaddwod.q.du(xrD, xrJ, xrK); } define pcodeop xvmaddwev.h.bu.b; #lasx.txt xvmaddwev.h.bu.b mask=0x74bc0000 #0x74bc0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwev.h.bu.b xrD, xrJ, xrK is op15_31=0xe978 & xrD & xrJ & xrK { xrD = xvmaddwev.h.bu.b(xrD, xrJ, xrK); } define pcodeop xvmaddwev.w.hu.h; #lasx.txt xvmaddwev.w.hu.h mask=0x74bc8000 #0x74bc8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwev.w.hu.h xrD, xrJ, xrK is op15_31=0xe979 & xrD & xrJ & xrK { xrD = xvmaddwev.w.hu.h(xrD, xrJ, xrK); } define pcodeop xvmaddwev.d.wu.w; #lasx.txt xvmaddwev.d.wu.w mask=0x74bd0000 #0x74bd0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwev.d.wu.w xrD, xrJ, xrK is op15_31=0xe97a & xrD & xrJ & xrK { xrD = xvmaddwev.d.wu.w(xrD, xrJ, xrK); } define pcodeop xvmaddwev.q.du.d; #lasx.txt xvmaddwev.q.du.d mask=0x74bd8000 #0x74bd8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwev.q.du.d xrD, xrJ, xrK is op15_31=0xe97b & xrD & xrJ & xrK { xrD = xvmaddwev.q.du.d(xrD, xrJ, xrK); } define pcodeop xvmaddwod.h.bu.b; #lasx.txt xvmaddwod.h.bu.b mask=0x74be0000 #0x74be0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwod.h.bu.b xrD, xrJ, xrK is op15_31=0xe97c & xrD & xrJ & xrK { xrD = xvmaddwod.h.bu.b(xrD, xrJ, xrK); } define pcodeop xvmaddwod.w.hu.h; #lasx.txt xvmaddwod.w.hu.h mask=0x74be8000 #0x74be8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwod.w.hu.h xrD, xrJ, xrK is op15_31=0xe97d & xrD & xrJ & xrK { xrD = xvmaddwod.w.hu.h(xrD, xrJ, xrK); } define pcodeop xvmaddwod.d.wu.w; #lasx.txt xvmaddwod.d.wu.w mask=0x74bf0000 #0x74bf0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwod.d.wu.w xrD, xrJ, xrK is op15_31=0xe97e & xrD & xrJ & xrK { xrD = xvmaddwod.d.wu.w(xrD, xrJ, xrK); } define pcodeop xvmaddwod.q.du.d; #lasx.txt xvmaddwod.q.du.d mask=0x74bf8000 #0x74bf8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmaddwod.q.du.d xrD, xrJ, xrK is op15_31=0xe97f & xrD & xrJ & xrK { xrD = xvmaddwod.q.du.d(xrD, xrJ, xrK); } define pcodeop xvdiv.b; #lasx.txt xvdiv.b mask=0x74e00000 #0x74e00000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvdiv.b xrD, xrJ, xrK is op15_31=0xe9c0 & xrD & xrJ & xrK { xrD = xvdiv.b(xrD, xrJ, xrK); } define pcodeop xvdiv.h; #lasx.txt xvdiv.h mask=0x74e08000 #0x74e08000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvdiv.h xrD, xrJ, xrK is op15_31=0xe9c1 & xrD & xrJ & xrK { xrD = xvdiv.h(xrD, xrJ, xrK); } define pcodeop xvdiv.w; #lasx.txt xvdiv.w mask=0x74e10000 #0x74e10000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvdiv.w xrD, xrJ, xrK is op15_31=0xe9c2 & xrD & xrJ & xrK { xrD = xvdiv.w(xrD, xrJ, xrK); } define pcodeop xvdiv.d; #lasx.txt xvdiv.d mask=0x74e18000 #0x74e18000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvdiv.d xrD, xrJ, xrK is op15_31=0xe9c3 & xrD & xrJ & xrK { xrD = xvdiv.d(xrD, xrJ, xrK); } define pcodeop xvmod.b; #lasx.txt xvmod.b mask=0x74e20000 #0x74e20000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmod.b xrD, xrJ, xrK is op15_31=0xe9c4 & xrD & xrJ & xrK { xrD = xvmod.b(xrD, xrJ, xrK); } define pcodeop xvmod.h; #lasx.txt xvmod.h mask=0x74e28000 #0x74e28000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmod.h xrD, xrJ, xrK is op15_31=0xe9c5 & xrD & xrJ & xrK { xrD = xvmod.h(xrD, xrJ, xrK); } define pcodeop xvmod.w; #lasx.txt xvmod.w mask=0x74e30000 #0x74e30000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmod.w xrD, xrJ, xrK is op15_31=0xe9c6 & xrD & xrJ & xrK { xrD = xvmod.w(xrD, xrJ, xrK); } define pcodeop xvmod.d; #lasx.txt xvmod.d mask=0x74e38000 #0x74e38000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmod.d xrD, xrJ, xrK is op15_31=0xe9c7 & xrD & xrJ & xrK { xrD = xvmod.d(xrD, xrJ, xrK); } define pcodeop xvdiv.bu; #lasx.txt xvdiv.bu mask=0x74e40000 #0x74e40000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvdiv.bu xrD, xrJ, xrK is op15_31=0xe9c8 & xrD & xrJ & xrK { xrD = xvdiv.bu(xrD, xrJ, xrK); } define pcodeop xvdiv.hu; #lasx.txt xvdiv.hu mask=0x74e48000 #0x74e48000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvdiv.hu xrD, xrJ, xrK is op15_31=0xe9c9 & xrD & xrJ & xrK { xrD = xvdiv.hu(xrD, xrJ, xrK); } define pcodeop xvdiv.wu; #lasx.txt xvdiv.wu mask=0x74e50000 #0x74e50000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvdiv.wu xrD, xrJ, xrK is op15_31=0xe9ca & xrD & xrJ & xrK { xrD = xvdiv.wu(xrD, xrJ, xrK); } define pcodeop xvdiv.du; #lasx.txt xvdiv.du mask=0x74e58000 #0x74e58000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvdiv.du xrD, xrJ, xrK is op15_31=0xe9cb & xrD & xrJ & xrK { xrD = xvdiv.du(xrD, xrJ, xrK); } define pcodeop xvmod.bu; #lasx.txt xvmod.bu mask=0x74e60000 #0x74e60000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmod.bu xrD, xrJ, xrK is op15_31=0xe9cc & xrD & xrJ & xrK { xrD = xvmod.bu(xrD, xrJ, xrK); } define pcodeop xvmod.hu; #lasx.txt xvmod.hu mask=0x74e68000 #0x74e68000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmod.hu xrD, xrJ, xrK is op15_31=0xe9cd & xrD & xrJ & xrK { xrD = xvmod.hu(xrD, xrJ, xrK); } define pcodeop xvmod.wu; #lasx.txt xvmod.wu mask=0x74e70000 #0x74e70000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmod.wu xrD, xrJ, xrK is op15_31=0xe9ce & xrD & xrJ & xrK { xrD = xvmod.wu(xrD, xrJ, xrK); } define pcodeop xvmod.du; #lasx.txt xvmod.du mask=0x74e78000 #0x74e78000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvmod.du xrD, xrJ, xrK is op15_31=0xe9cf & xrD & xrJ & xrK { xrD = xvmod.du(xrD, xrJ, xrK); } define pcodeop xvsll.b; #lasx.txt xvsll.b mask=0x74e80000 #0x74e80000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsll.b xrD, xrJ, xrK is op15_31=0xe9d0 & xrD & xrJ & xrK { xrD = xvsll.b(xrD, xrJ, xrK); } define pcodeop xvsll.h; #lasx.txt xvsll.h mask=0x74e88000 #0x74e88000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsll.h xrD, xrJ, xrK is op15_31=0xe9d1 & xrD & xrJ & xrK { xrD = xvsll.h(xrD, xrJ, xrK); } define pcodeop xvsll.w; #lasx.txt xvsll.w mask=0x74e90000 #0x74e90000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsll.w xrD, xrJ, xrK is op15_31=0xe9d2 & xrD & xrJ & xrK { xrD = xvsll.w(xrD, xrJ, xrK); } define pcodeop xvsll.d; #lasx.txt xvsll.d mask=0x74e98000 #0x74e98000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsll.d xrD, xrJ, xrK is op15_31=0xe9d3 & xrD & xrJ & xrK { xrD = xvsll.d(xrD, xrJ, xrK); } define pcodeop xvsrl.b; #lasx.txt xvsrl.b mask=0x74ea0000 #0x74ea0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrl.b xrD, xrJ, xrK is op15_31=0xe9d4 & xrD & xrJ & xrK { xrD = xvsrl.b(xrD, xrJ, xrK); } define pcodeop xvsrl.h; #lasx.txt xvsrl.h mask=0x74ea8000 #0x74ea8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrl.h xrD, xrJ, xrK is op15_31=0xe9d5 & xrD & xrJ & xrK { xrD = xvsrl.h(xrD, xrJ, xrK); } define pcodeop xvsrl.w; #lasx.txt xvsrl.w mask=0x74eb0000 #0x74eb0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrl.w xrD, xrJ, xrK is op15_31=0xe9d6 & xrD & xrJ & xrK { xrD = xvsrl.w(xrD, xrJ, xrK); } define pcodeop xvsrl.d; #lasx.txt xvsrl.d mask=0x74eb8000 #0x74eb8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrl.d xrD, xrJ, xrK is op15_31=0xe9d7 & xrD & xrJ & xrK { xrD = xvsrl.d(xrD, xrJ, xrK); } define pcodeop xvsra.b; #lasx.txt xvsra.b mask=0x74ec0000 #0x74ec0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsra.b xrD, xrJ, xrK is op15_31=0xe9d8 & xrD & xrJ & xrK { xrD = xvsra.b(xrD, xrJ, xrK); } define pcodeop xvsra.h; #lasx.txt xvsra.h mask=0x74ec8000 #0x74ec8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsra.h xrD, xrJ, xrK is op15_31=0xe9d9 & xrD & xrJ & xrK { xrD = xvsra.h(xrD, xrJ, xrK); } define pcodeop xvsra.w; #lasx.txt xvsra.w mask=0x74ed0000 #0x74ed0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsra.w xrD, xrJ, xrK is op15_31=0xe9da & xrD & xrJ & xrK { xrD = xvsra.w(xrD, xrJ, xrK); } define pcodeop xvsra.d; #lasx.txt xvsra.d mask=0x74ed8000 #0x74ed8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsra.d xrD, xrJ, xrK is op15_31=0xe9db & xrD & xrJ & xrK { xrD = xvsra.d(xrD, xrJ, xrK); } define pcodeop xvrotr.b; #lasx.txt xvrotr.b mask=0x74ee0000 #0x74ee0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvrotr.b xrD, xrJ, xrK is op15_31=0xe9dc & xrD & xrJ & xrK { xrD = xvrotr.b(xrD, xrJ, xrK); } define pcodeop xvrotr.h; #lasx.txt xvrotr.h mask=0x74ee8000 #0x74ee8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvrotr.h xrD, xrJ, xrK is op15_31=0xe9dd & xrD & xrJ & xrK { xrD = xvrotr.h(xrD, xrJ, xrK); } define pcodeop xvrotr.w; #lasx.txt xvrotr.w mask=0x74ef0000 #0x74ef0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvrotr.w xrD, xrJ, xrK is op15_31=0xe9de & xrD & xrJ & xrK { xrD = xvrotr.w(xrD, xrJ, xrK); } define pcodeop xvrotr.d; #lasx.txt xvrotr.d mask=0x74ef8000 #0x74ef8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvrotr.d xrD, xrJ, xrK is op15_31=0xe9df & xrD & xrJ & xrK { xrD = xvrotr.d(xrD, xrJ, xrK); } define pcodeop xvsrlr.b; #lasx.txt xvsrlr.b mask=0x74f00000 #0x74f00000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrlr.b xrD, xrJ, xrK is op15_31=0xe9e0 & xrD & xrJ & xrK { xrD = xvsrlr.b(xrD, xrJ, xrK); } define pcodeop xvsrlr.h; #lasx.txt xvsrlr.h mask=0x74f08000 #0x74f08000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrlr.h xrD, xrJ, xrK is op15_31=0xe9e1 & xrD & xrJ & xrK { xrD = xvsrlr.h(xrD, xrJ, xrK); } define pcodeop xvsrlr.w; #lasx.txt xvsrlr.w mask=0x74f10000 #0x74f10000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrlr.w xrD, xrJ, xrK is op15_31=0xe9e2 & xrD & xrJ & xrK { xrD = xvsrlr.w(xrD, xrJ, xrK); } define pcodeop xvsrlr.d; #lasx.txt xvsrlr.d mask=0x74f18000 #0x74f18000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrlr.d xrD, xrJ, xrK is op15_31=0xe9e3 & xrD & xrJ & xrK { xrD = xvsrlr.d(xrD, xrJ, xrK); } define pcodeop xvsrar.b; #lasx.txt xvsrar.b mask=0x74f20000 #0x74f20000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrar.b xrD, xrJ, xrK is op15_31=0xe9e4 & xrD & xrJ & xrK { xrD = xvsrar.b(xrD, xrJ, xrK); } define pcodeop xvsrar.h; #lasx.txt xvsrar.h mask=0x74f28000 #0x74f28000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrar.h xrD, xrJ, xrK is op15_31=0xe9e5 & xrD & xrJ & xrK { xrD = xvsrar.h(xrD, xrJ, xrK); } define pcodeop xvsrar.w; #lasx.txt xvsrar.w mask=0x74f30000 #0x74f30000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrar.w xrD, xrJ, xrK is op15_31=0xe9e6 & xrD & xrJ & xrK { xrD = xvsrar.w(xrD, xrJ, xrK); } define pcodeop xvsrar.d; #lasx.txt xvsrar.d mask=0x74f38000 #0x74f38000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrar.d xrD, xrJ, xrK is op15_31=0xe9e7 & xrD & xrJ & xrK { xrD = xvsrar.d(xrD, xrJ, xrK); } define pcodeop xvsrln.b.h; #lasx.txt xvsrln.b.h mask=0x74f48000 #0x74f48000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrln.b.h xrD, xrJ, xrK is op15_31=0xe9e9 & xrD & xrJ & xrK { xrD = xvsrln.b.h(xrD, xrJ, xrK); } define pcodeop xvsrln.h.w; #lasx.txt xvsrln.h.w mask=0x74f50000 #0x74f50000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrln.h.w xrD, xrJ, xrK is op15_31=0xe9ea & xrD & xrJ & xrK { xrD = xvsrln.h.w(xrD, xrJ, xrK); } define pcodeop xvsrln.w.d; #lasx.txt xvsrln.w.d mask=0x74f58000 #0x74f58000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrln.w.d xrD, xrJ, xrK is op15_31=0xe9eb & xrD & xrJ & xrK { xrD = xvsrln.w.d(xrD, xrJ, xrK); } define pcodeop xvsran.b.h; #lasx.txt xvsran.b.h mask=0x74f68000 #0x74f68000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsran.b.h xrD, xrJ, xrK is op15_31=0xe9ed & xrD & xrJ & xrK { xrD = xvsran.b.h(xrD, xrJ, xrK); } define pcodeop xvsran.h.w; #lasx.txt xvsran.h.w mask=0x74f70000 #0x74f70000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsran.h.w xrD, xrJ, xrK is op15_31=0xe9ee & xrD & xrJ & xrK { xrD = xvsran.h.w(xrD, xrJ, xrK); } define pcodeop xvsran.w.d; #lasx.txt xvsran.w.d mask=0x74f78000 #0x74f78000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsran.w.d xrD, xrJ, xrK is op15_31=0xe9ef & xrD & xrJ & xrK { xrD = xvsran.w.d(xrD, xrJ, xrK); } define pcodeop xvsrlrn.b.h; #lasx.txt xvsrlrn.b.h mask=0x74f88000 #0x74f88000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrlrn.b.h xrD, xrJ, xrK is op15_31=0xe9f1 & xrD & xrJ & xrK { xrD = xvsrlrn.b.h(xrD, xrJ, xrK); } define pcodeop xvsrlrn.h.w; #lasx.txt xvsrlrn.h.w mask=0x74f90000 #0x74f90000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrlrn.h.w xrD, xrJ, xrK is op15_31=0xe9f2 & xrD & xrJ & xrK { xrD = xvsrlrn.h.w(xrD, xrJ, xrK); } define pcodeop xvsrlrn.w.d; #lasx.txt xvsrlrn.w.d mask=0x74f98000 #0x74f98000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrlrn.w.d xrD, xrJ, xrK is op15_31=0xe9f3 & xrD & xrJ & xrK { xrD = xvsrlrn.w.d(xrD, xrJ, xrK); } define pcodeop xvsrarn.b.h; #lasx.txt xvsrarn.b.h mask=0x74fa8000 #0x74fa8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrarn.b.h xrD, xrJ, xrK is op15_31=0xe9f5 & xrD & xrJ & xrK { xrD = xvsrarn.b.h(xrD, xrJ, xrK); } define pcodeop xvsrarn.h.w; #lasx.txt xvsrarn.h.w mask=0x74fb0000 #0x74fb0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrarn.h.w xrD, xrJ, xrK is op15_31=0xe9f6 & xrD & xrJ & xrK { xrD = xvsrarn.h.w(xrD, xrJ, xrK); } define pcodeop xvsrarn.w.d; #lasx.txt xvsrarn.w.d mask=0x74fb8000 #0x74fb8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsrarn.w.d xrD, xrJ, xrK is op15_31=0xe9f7 & xrD & xrJ & xrK { xrD = xvsrarn.w.d(xrD, xrJ, xrK); } define pcodeop xvssrln.b.h; #lasx.txt xvssrln.b.h mask=0x74fc8000 #0x74fc8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrln.b.h xrD, xrJ, xrK is op15_31=0xe9f9 & xrD & xrJ & xrK { xrD = xvssrln.b.h(xrD, xrJ, xrK); } define pcodeop xvssrln.h.w; #lasx.txt xvssrln.h.w mask=0x74fd0000 #0x74fd0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrln.h.w xrD, xrJ, xrK is op15_31=0xe9fa & xrD & xrJ & xrK { xrD = xvssrln.h.w(xrD, xrJ, xrK); } define pcodeop xvssrln.w.d; #lasx.txt xvssrln.w.d mask=0x74fd8000 #0x74fd8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrln.w.d xrD, xrJ, xrK is op15_31=0xe9fb & xrD & xrJ & xrK { xrD = xvssrln.w.d(xrD, xrJ, xrK); } define pcodeop xvssran.b.h; #lasx.txt xvssran.b.h mask=0x74fe8000 #0x74fe8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssran.b.h xrD, xrJ, xrK is op15_31=0xe9fd & xrD & xrJ & xrK { xrD = xvssran.b.h(xrD, xrJ, xrK); } define pcodeop xvssran.h.w; #lasx.txt xvssran.h.w mask=0x74ff0000 #0x74ff0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssran.h.w xrD, xrJ, xrK is op15_31=0xe9fe & xrD & xrJ & xrK { xrD = xvssran.h.w(xrD, xrJ, xrK); } define pcodeop xvssran.w.d; #lasx.txt xvssran.w.d mask=0x74ff8000 #0x74ff8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssran.w.d xrD, xrJ, xrK is op15_31=0xe9ff & xrD & xrJ & xrK { xrD = xvssran.w.d(xrD, xrJ, xrK); } define pcodeop xvssrlrn.b.h; #lasx.txt xvssrlrn.b.h mask=0x75008000 #0x75008000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrlrn.b.h xrD, xrJ, xrK is op15_31=0xea01 & xrD & xrJ & xrK { xrD = xvssrlrn.b.h(xrD, xrJ, xrK); } define pcodeop xvssrlrn.h.w; #lasx.txt xvssrlrn.h.w mask=0x75010000 #0x75010000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrlrn.h.w xrD, xrJ, xrK is op15_31=0xea02 & xrD & xrJ & xrK { xrD = xvssrlrn.h.w(xrD, xrJ, xrK); } define pcodeop xvssrlrn.w.d; #lasx.txt xvssrlrn.w.d mask=0x75018000 #0x75018000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrlrn.w.d xrD, xrJ, xrK is op15_31=0xea03 & xrD & xrJ & xrK { xrD = xvssrlrn.w.d(xrD, xrJ, xrK); } define pcodeop xvssrarn.b.h; #lasx.txt xvssrarn.b.h mask=0x75028000 #0x75028000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrarn.b.h xrD, xrJ, xrK is op15_31=0xea05 & xrD & xrJ & xrK { xrD = xvssrarn.b.h(xrD, xrJ, xrK); } define pcodeop xvssrarn.h.w; #lasx.txt xvssrarn.h.w mask=0x75030000 #0x75030000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrarn.h.w xrD, xrJ, xrK is op15_31=0xea06 & xrD & xrJ & xrK { xrD = xvssrarn.h.w(xrD, xrJ, xrK); } define pcodeop xvssrarn.w.d; #lasx.txt xvssrarn.w.d mask=0x75038000 #0x75038000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrarn.w.d xrD, xrJ, xrK is op15_31=0xea07 & xrD & xrJ & xrK { xrD = xvssrarn.w.d(xrD, xrJ, xrK); } define pcodeop xvssrln.bu.h; #lasx.txt xvssrln.bu.h mask=0x75048000 #0x75048000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrln.bu.h xrD, xrJ, xrK is op15_31=0xea09 & xrD & xrJ & xrK { xrD = xvssrln.bu.h(xrD, xrJ, xrK); } define pcodeop xvssrln.hu.w; #lasx.txt xvssrln.hu.w mask=0x75050000 #0x75050000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrln.hu.w xrD, xrJ, xrK is op15_31=0xea0a & xrD & xrJ & xrK { xrD = xvssrln.hu.w(xrD, xrJ, xrK); } define pcodeop xvssrln.wu.d; #lasx.txt xvssrln.wu.d mask=0x75058000 #0x75058000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrln.wu.d xrD, xrJ, xrK is op15_31=0xea0b & xrD & xrJ & xrK { xrD = xvssrln.wu.d(xrD, xrJ, xrK); } define pcodeop xvssran.bu.h; #lasx.txt xvssran.bu.h mask=0x75068000 #0x75068000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssran.bu.h xrD, xrJ, xrK is op15_31=0xea0d & xrD & xrJ & xrK { xrD = xvssran.bu.h(xrD, xrJ, xrK); } define pcodeop xvssran.hu.w; #lasx.txt xvssran.hu.w mask=0x75070000 #0x75070000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssran.hu.w xrD, xrJ, xrK is op15_31=0xea0e & xrD & xrJ & xrK { xrD = xvssran.hu.w(xrD, xrJ, xrK); } define pcodeop xvssran.wu.d; #lasx.txt xvssran.wu.d mask=0x75078000 #0x75078000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssran.wu.d xrD, xrJ, xrK is op15_31=0xea0f & xrD & xrJ & xrK { xrD = xvssran.wu.d(xrD, xrJ, xrK); } define pcodeop xvssrlrn.bu.h; #lasx.txt xvssrlrn.bu.h mask=0x75088000 #0x75088000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrlrn.bu.h xrD, xrJ, xrK is op15_31=0xea11 & xrD & xrJ & xrK { xrD = xvssrlrn.bu.h(xrD, xrJ, xrK); } define pcodeop xvssrlrn.hu.w; #lasx.txt xvssrlrn.hu.w mask=0x75090000 #0x75090000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrlrn.hu.w xrD, xrJ, xrK is op15_31=0xea12 & xrD & xrJ & xrK { xrD = xvssrlrn.hu.w(xrD, xrJ, xrK); } define pcodeop xvssrlrn.wu.d; #lasx.txt xvssrlrn.wu.d mask=0x75098000 #0x75098000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrlrn.wu.d xrD, xrJ, xrK is op15_31=0xea13 & xrD & xrJ & xrK { xrD = xvssrlrn.wu.d(xrD, xrJ, xrK); } define pcodeop xvssrarn.bu.h; #lasx.txt xvssrarn.bu.h mask=0x750a8000 #0x750a8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrarn.bu.h xrD, xrJ, xrK is op15_31=0xea15 & xrD & xrJ & xrK { xrD = xvssrarn.bu.h(xrD, xrJ, xrK); } define pcodeop xvssrarn.hu.w; #lasx.txt xvssrarn.hu.w mask=0x750b0000 #0x750b0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrarn.hu.w xrD, xrJ, xrK is op15_31=0xea16 & xrD & xrJ & xrK { xrD = xvssrarn.hu.w(xrD, xrJ, xrK); } define pcodeop xvssrarn.wu.d; #lasx.txt xvssrarn.wu.d mask=0x750b8000 #0x750b8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvssrarn.wu.d xrD, xrJ, xrK is op15_31=0xea17 & xrD & xrJ & xrK { xrD = xvssrarn.wu.d(xrD, xrJ, xrK); } define pcodeop xvbitclr.b; #lasx.txt xvbitclr.b mask=0x750c0000 #0x750c0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvbitclr.b xrD, xrJ, xrK is op15_31=0xea18 & xrD & xrJ & xrK { xrD = xvbitclr.b(xrD, xrJ, xrK); } define pcodeop xvbitclr.h; #lasx.txt xvbitclr.h mask=0x750c8000 #0x750c8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvbitclr.h xrD, xrJ, xrK is op15_31=0xea19 & xrD & xrJ & xrK { xrD = xvbitclr.h(xrD, xrJ, xrK); } define pcodeop xvbitclr.w; #lasx.txt xvbitclr.w mask=0x750d0000 #0x750d0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvbitclr.w xrD, xrJ, xrK is op15_31=0xea1a & xrD & xrJ & xrK { xrD = xvbitclr.w(xrD, xrJ, xrK); } define pcodeop xvbitclr.d; #lasx.txt xvbitclr.d mask=0x750d8000 #0x750d8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvbitclr.d xrD, xrJ, xrK is op15_31=0xea1b & xrD & xrJ & xrK { xrD = xvbitclr.d(xrD, xrJ, xrK); } define pcodeop xvbitset.b; #lasx.txt xvbitset.b mask=0x750e0000 #0x750e0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvbitset.b xrD, xrJ, xrK is op15_31=0xea1c & xrD & xrJ & xrK { xrD = xvbitset.b(xrD, xrJ, xrK); } define pcodeop xvbitset.h; #lasx.txt xvbitset.h mask=0x750e8000 #0x750e8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvbitset.h xrD, xrJ, xrK is op15_31=0xea1d & xrD & xrJ & xrK { xrD = xvbitset.h(xrD, xrJ, xrK); } define pcodeop xvbitset.w; #lasx.txt xvbitset.w mask=0x750f0000 #0x750f0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvbitset.w xrD, xrJ, xrK is op15_31=0xea1e & xrD & xrJ & xrK { xrD = xvbitset.w(xrD, xrJ, xrK); } define pcodeop xvbitset.d; #lasx.txt xvbitset.d mask=0x750f8000 #0x750f8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvbitset.d xrD, xrJ, xrK is op15_31=0xea1f & xrD & xrJ & xrK { xrD = xvbitset.d(xrD, xrJ, xrK); } define pcodeop xvbitrev.b; #lasx.txt xvbitrev.b mask=0x75100000 #0x75100000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvbitrev.b xrD, xrJ, xrK is op15_31=0xea20 & xrD & xrJ & xrK { xrD = xvbitrev.b(xrD, xrJ, xrK); } define pcodeop xvbitrev.h; #lasx.txt xvbitrev.h mask=0x75108000 #0x75108000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvbitrev.h xrD, xrJ, xrK is op15_31=0xea21 & xrD & xrJ & xrK { xrD = xvbitrev.h(xrD, xrJ, xrK); } define pcodeop xvbitrev.w; #lasx.txt xvbitrev.w mask=0x75110000 #0x75110000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvbitrev.w xrD, xrJ, xrK is op15_31=0xea22 & xrD & xrJ & xrK { xrD = xvbitrev.w(xrD, xrJ, xrK); } define pcodeop xvbitrev.d; #lasx.txt xvbitrev.d mask=0x75118000 #0x75118000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvbitrev.d xrD, xrJ, xrK is op15_31=0xea23 & xrD & xrJ & xrK { xrD = xvbitrev.d(xrD, xrJ, xrK); } define pcodeop xvpackev.b; #lasx.txt xvpackev.b mask=0x75160000 #0x75160000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpackev.b xrD, xrJ, xrK is op15_31=0xea2c & xrD & xrJ & xrK { xrD = xvpackev.b(xrD, xrJ, xrK); } define pcodeop xvpackev.h; #lasx.txt xvpackev.h mask=0x75168000 #0x75168000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpackev.h xrD, xrJ, xrK is op15_31=0xea2d & xrD & xrJ & xrK { xrD = xvpackev.h(xrD, xrJ, xrK); } define pcodeop xvpackev.w; #lasx.txt xvpackev.w mask=0x75170000 #0x75170000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpackev.w xrD, xrJ, xrK is op15_31=0xea2e & xrD & xrJ & xrK { xrD = xvpackev.w(xrD, xrJ, xrK); } define pcodeop xvpackev.d; #lasx.txt xvpackev.d mask=0x75178000 #0x75178000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpackev.d xrD, xrJ, xrK is op15_31=0xea2f & xrD & xrJ & xrK { xrD = xvpackev.d(xrD, xrJ, xrK); } define pcodeop xvpackod.b; #lasx.txt xvpackod.b mask=0x75180000 #0x75180000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpackod.b xrD, xrJ, xrK is op15_31=0xea30 & xrD & xrJ & xrK { xrD = xvpackod.b(xrD, xrJ, xrK); } define pcodeop xvpackod.h; #lasx.txt xvpackod.h mask=0x75188000 #0x75188000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpackod.h xrD, xrJ, xrK is op15_31=0xea31 & xrD & xrJ & xrK { xrD = xvpackod.h(xrD, xrJ, xrK); } define pcodeop xvpackod.w; #lasx.txt xvpackod.w mask=0x75190000 #0x75190000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpackod.w xrD, xrJ, xrK is op15_31=0xea32 & xrD & xrJ & xrK { xrD = xvpackod.w(xrD, xrJ, xrK); } define pcodeop xvpackod.d; #lasx.txt xvpackod.d mask=0x75198000 #0x75198000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpackod.d xrD, xrJ, xrK is op15_31=0xea33 & xrD & xrJ & xrK { xrD = xvpackod.d(xrD, xrJ, xrK); } define pcodeop xvilvl.b; #lasx.txt xvilvl.b mask=0x751a0000 #0x751a0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvilvl.b xrD, xrJ, xrK is op15_31=0xea34 & xrD & xrJ & xrK { xrD = xvilvl.b(xrD, xrJ, xrK); } define pcodeop xvilvl.h; #lasx.txt xvilvl.h mask=0x751a8000 #0x751a8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvilvl.h xrD, xrJ, xrK is op15_31=0xea35 & xrD & xrJ & xrK { xrD = xvilvl.h(xrD, xrJ, xrK); } define pcodeop xvilvl.w; #lasx.txt xvilvl.w mask=0x751b0000 #0x751b0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvilvl.w xrD, xrJ, xrK is op15_31=0xea36 & xrD & xrJ & xrK { xrD = xvilvl.w(xrD, xrJ, xrK); } define pcodeop xvilvl.d; #lasx.txt xvilvl.d mask=0x751b8000 #0x751b8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvilvl.d xrD, xrJ, xrK is op15_31=0xea37 & xrD & xrJ & xrK { xrD = xvilvl.d(xrD, xrJ, xrK); } define pcodeop xvilvh.b; #lasx.txt xvilvh.b mask=0x751c0000 #0x751c0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvilvh.b xrD, xrJ, xrK is op15_31=0xea38 & xrD & xrJ & xrK { xrD = xvilvh.b(xrD, xrJ, xrK); } define pcodeop xvilvh.h; #lasx.txt xvilvh.h mask=0x751c8000 #0x751c8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvilvh.h xrD, xrJ, xrK is op15_31=0xea39 & xrD & xrJ & xrK { xrD = xvilvh.h(xrD, xrJ, xrK); } define pcodeop xvilvh.w; #lasx.txt xvilvh.w mask=0x751d0000 #0x751d0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvilvh.w xrD, xrJ, xrK is op15_31=0xea3a & xrD & xrJ & xrK { xrD = xvilvh.w(xrD, xrJ, xrK); } define pcodeop xvilvh.d; #lasx.txt xvilvh.d mask=0x751d8000 #0x751d8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvilvh.d xrD, xrJ, xrK is op15_31=0xea3b & xrD & xrJ & xrK { xrD = xvilvh.d(xrD, xrJ, xrK); } define pcodeop xvpickev.b; #lasx.txt xvpickev.b mask=0x751e0000 #0x751e0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpickev.b xrD, xrJ, xrK is op15_31=0xea3c & xrD & xrJ & xrK { xrD = xvpickev.b(xrD, xrJ, xrK); } define pcodeop xvpickev.h; #lasx.txt xvpickev.h mask=0x751e8000 #0x751e8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpickev.h xrD, xrJ, xrK is op15_31=0xea3d & xrD & xrJ & xrK { xrD = xvpickev.h(xrD, xrJ, xrK); } define pcodeop xvpickev.w; #lasx.txt xvpickev.w mask=0x751f0000 #0x751f0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpickev.w xrD, xrJ, xrK is op15_31=0xea3e & xrD & xrJ & xrK { xrD = xvpickev.w(xrD, xrJ, xrK); } define pcodeop xvpickev.d; #lasx.txt xvpickev.d mask=0x751f8000 #0x751f8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpickev.d xrD, xrJ, xrK is op15_31=0xea3f & xrD & xrJ & xrK { xrD = xvpickev.d(xrD, xrJ, xrK); } define pcodeop xvpickod.b; #lasx.txt xvpickod.b mask=0x75200000 #0x75200000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpickod.b xrD, xrJ, xrK is op15_31=0xea40 & xrD & xrJ & xrK { xrD = xvpickod.b(xrD, xrJ, xrK); } define pcodeop xvpickod.h; #lasx.txt xvpickod.h mask=0x75208000 #0x75208000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpickod.h xrD, xrJ, xrK is op15_31=0xea41 & xrD & xrJ & xrK { xrD = xvpickod.h(xrD, xrJ, xrK); } define pcodeop xvpickod.w; #lasx.txt xvpickod.w mask=0x75210000 #0x75210000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpickod.w xrD, xrJ, xrK is op15_31=0xea42 & xrD & xrJ & xrK { xrD = xvpickod.w(xrD, xrJ, xrK); } define pcodeop xvpickod.d; #lasx.txt xvpickod.d mask=0x75218000 #0x75218000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvpickod.d xrD, xrJ, xrK is op15_31=0xea43 & xrD & xrJ & xrK { xrD = xvpickod.d(xrD, xrJ, xrK); } define pcodeop xvreplve.b; #lasx.txt xvreplve.b mask=0x75220000 #0x75220000 0xffff8000 x0:5,x5:5, r10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'reg10_5_s0'] :xvreplve.b xrD, xrJ, RKsrc is op15_31=0xea44 & xrD & xrJ & RKsrc { xrD = xvreplve.b(xrD, xrJ, RKsrc); } define pcodeop xvreplve.h; #lasx.txt xvreplve.h mask=0x75228000 #0x75228000 0xffff8000 x0:5,x5:5, r10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'reg10_5_s0'] :xvreplve.h xrD, xrJ, RKsrc is op15_31=0xea45 & xrD & xrJ & RKsrc { xrD = xvreplve.h(xrD, xrJ, RKsrc); } define pcodeop xvreplve.w; #lasx.txt xvreplve.w mask=0x75230000 #0x75230000 0xffff8000 x0:5,x5:5, r10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'reg10_5_s0'] :xvreplve.w xrD, xrJ, RKsrc is op15_31=0xea46 & xrD & xrJ & RKsrc { xrD = xvreplve.w(xrD, xrJ, RKsrc); } define pcodeop xvreplve.d; #lasx.txt xvreplve.d mask=0x75238000 #0x75238000 0xffff8000 x0:5,x5:5, r10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'reg10_5_s0'] :xvreplve.d xrD, xrJ, RKsrc is op15_31=0xea47 & xrD & xrJ & RKsrc { xrD = xvreplve.d(xrD, xrJ, RKsrc); } define pcodeop xvand.v; #lasx.txt xvand.v mask=0x75260000 #0x75260000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvand.v xrD, xrJ, xrK is op15_31=0xea4c & xrD & xrJ & xrK { xrD = xvand.v(xrD, xrJ, xrK); } define pcodeop xvor.v; #lasx.txt xvor.v mask=0x75268000 #0x75268000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvor.v xrD, xrJ, xrK is op15_31=0xea4d & xrD & xrJ & xrK { xrD = xvor.v(xrD, xrJ, xrK); } define pcodeop xvxor.v; #lasx.txt xvxor.v mask=0x75270000 #0x75270000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvxor.v xrD, xrJ, xrK is op15_31=0xea4e & xrD & xrJ & xrK { xrD = xvxor.v(xrD, xrJ, xrK); } define pcodeop xvnor.v; #lasx.txt xvnor.v mask=0x75278000 #0x75278000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvnor.v xrD, xrJ, xrK is op15_31=0xea4f & xrD & xrJ & xrK { xrD = xvnor.v(xrD, xrJ, xrK); } define pcodeop xvandn.v; #lasx.txt xvandn.v mask=0x75280000 #0x75280000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvandn.v xrD, xrJ, xrK is op15_31=0xea50 & xrD & xrJ & xrK { xrD = xvandn.v(xrD, xrJ, xrK); } define pcodeop xvorn.v; #lasx.txt xvorn.v mask=0x75288000 #0x75288000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvorn.v xrD, xrJ, xrK is op15_31=0xea51 & xrD & xrJ & xrK { xrD = xvorn.v(xrD, xrJ, xrK); } define pcodeop xvfrstp.b; #lasx.txt xvfrstp.b mask=0x752b0000 #0x752b0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfrstp.b xrD, xrJ, xrK is op15_31=0xea56 & xrD & xrJ & xrK { xrD = xvfrstp.b(xrD, xrJ, xrK); } define pcodeop xvfrstp.h; #lasx.txt xvfrstp.h mask=0x752b8000 #0x752b8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfrstp.h xrD, xrJ, xrK is op15_31=0xea57 & xrD & xrJ & xrK { xrD = xvfrstp.h(xrD, xrJ, xrK); } define pcodeop xvadd.q; #lasx.txt xvadd.q mask=0x752d0000 #0x752d0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvadd.q xrD, xrJ, xrK is op15_31=0xea5a & xrD & xrJ & xrK { xrD = xvadd.q(xrD, xrJ, xrK); } define pcodeop xvsub.q; #lasx.txt xvsub.q mask=0x752d8000 #0x752d8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsub.q xrD, xrJ, xrK is op15_31=0xea5b & xrD & xrJ & xrK { xrD = xvsub.q(xrD, xrJ, xrK); } define pcodeop xvsigncov.b; #lasx.txt xvsigncov.b mask=0x752e0000 #0x752e0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsigncov.b xrD, xrJ, xrK is op15_31=0xea5c & xrD & xrJ & xrK { xrD = xvsigncov.b(xrD, xrJ, xrK); } define pcodeop xvsigncov.h; #lasx.txt xvsigncov.h mask=0x752e8000 #0x752e8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsigncov.h xrD, xrJ, xrK is op15_31=0xea5d & xrD & xrJ & xrK { xrD = xvsigncov.h(xrD, xrJ, xrK); } define pcodeop xvsigncov.w; #lasx.txt xvsigncov.w mask=0x752f0000 #0x752f0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsigncov.w xrD, xrJ, xrK is op15_31=0xea5e & xrD & xrJ & xrK { xrD = xvsigncov.w(xrD, xrJ, xrK); } define pcodeop xvsigncov.d; #lasx.txt xvsigncov.d mask=0x752f8000 #0x752f8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvsigncov.d xrD, xrJ, xrK is op15_31=0xea5f & xrD & xrJ & xrK { xrD = xvsigncov.d(xrD, xrJ, xrK); } define pcodeop xvfadd.s; #lasx.txt xvfadd.s mask=0x75308000 #0x75308000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfadd.s xrD, xrJ, xrK is op15_31=0xea61 & xrD & xrJ & xrK { xrD = xvfadd.s(xrD, xrJ, xrK); } define pcodeop xvfadd.d; #lasx.txt xvfadd.d mask=0x75310000 #0x75310000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfadd.d xrD, xrJ, xrK is op15_31=0xea62 & xrD & xrJ & xrK { xrD = xvfadd.d(xrD, xrJ, xrK); } define pcodeop xvfsub.s; #lasx.txt xvfsub.s mask=0x75328000 #0x75328000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfsub.s xrD, xrJ, xrK is op15_31=0xea65 & xrD & xrJ & xrK { xrD = xvfsub.s(xrD, xrJ, xrK); } define pcodeop xvfsub.d; #lasx.txt xvfsub.d mask=0x75330000 #0x75330000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfsub.d xrD, xrJ, xrK is op15_31=0xea66 & xrD & xrJ & xrK { xrD = xvfsub.d(xrD, xrJ, xrK); } define pcodeop xvfmul.s; #lasx.txt xvfmul.s mask=0x75388000 #0x75388000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfmul.s xrD, xrJ, xrK is op15_31=0xea71 & xrD & xrJ & xrK { xrD = xvfmul.s(xrD, xrJ, xrK); } define pcodeop xvfmul.d; #lasx.txt xvfmul.d mask=0x75390000 #0x75390000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfmul.d xrD, xrJ, xrK is op15_31=0xea72 & xrD & xrJ & xrK { xrD = xvfmul.d(xrD, xrJ, xrK); } define pcodeop xvfdiv.s; #lasx.txt xvfdiv.s mask=0x753a8000 #0x753a8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfdiv.s xrD, xrJ, xrK is op15_31=0xea75 & xrD & xrJ & xrK { xrD = xvfdiv.s(xrD, xrJ, xrK); } define pcodeop xvfdiv.d; #lasx.txt xvfdiv.d mask=0x753b0000 #0x753b0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfdiv.d xrD, xrJ, xrK is op15_31=0xea76 & xrD & xrJ & xrK { xrD = xvfdiv.d(xrD, xrJ, xrK); } define pcodeop xvfmax.s; #lasx.txt xvfmax.s mask=0x753c8000 #0x753c8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfmax.s xrD, xrJ, xrK is op15_31=0xea79 & xrD & xrJ & xrK { xrD = xvfmax.s(xrD, xrJ, xrK); } define pcodeop xvfmax.d; #lasx.txt xvfmax.d mask=0x753d0000 #0x753d0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfmax.d xrD, xrJ, xrK is op15_31=0xea7a & xrD & xrJ & xrK { xrD = xvfmax.d(xrD, xrJ, xrK); } define pcodeop xvfmin.s; #lasx.txt xvfmin.s mask=0x753e8000 #0x753e8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfmin.s xrD, xrJ, xrK is op15_31=0xea7d & xrD & xrJ & xrK { xrD = xvfmin.s(xrD, xrJ, xrK); } define pcodeop xvfmin.d; #lasx.txt xvfmin.d mask=0x753f0000 #0x753f0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfmin.d xrD, xrJ, xrK is op15_31=0xea7e & xrD & xrJ & xrK { xrD = xvfmin.d(xrD, xrJ, xrK); } define pcodeop xvfmaxa.s; #lasx.txt xvfmaxa.s mask=0x75408000 #0x75408000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfmaxa.s xrD, xrJ, xrK is op15_31=0xea81 & xrD & xrJ & xrK { xrD = xvfmaxa.s(xrD, xrJ, xrK); } define pcodeop xvfmaxa.d; #lasx.txt xvfmaxa.d mask=0x75410000 #0x75410000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfmaxa.d xrD, xrJ, xrK is op15_31=0xea82 & xrD & xrJ & xrK { xrD = xvfmaxa.d(xrD, xrJ, xrK); } define pcodeop xvfmina.s; #lasx.txt xvfmina.s mask=0x75428000 #0x75428000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfmina.s xrD, xrJ, xrK is op15_31=0xea85 & xrD & xrJ & xrK { xrD = xvfmina.s(xrD, xrJ, xrK); } define pcodeop xvfmina.d; #lasx.txt xvfmina.d mask=0x75430000 #0x75430000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfmina.d xrD, xrJ, xrK is op15_31=0xea86 & xrD & xrJ & xrK { xrD = xvfmina.d(xrD, xrJ, xrK); } define pcodeop xvfcvt.h.s; #lasx.txt xvfcvt.h.s mask=0x75460000 #0x75460000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcvt.h.s xrD, xrJ, xrK is op15_31=0xea8c & xrD & xrJ & xrK { xrD = xvfcvt.h.s(xrD, xrJ, xrK); } define pcodeop xvfcvt.s.d; #lasx.txt xvfcvt.s.d mask=0x75468000 #0x75468000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvfcvt.s.d xrD, xrJ, xrK is op15_31=0xea8d & xrD & xrJ & xrK { xrD = xvfcvt.s.d(xrD, xrJ, xrK); } define pcodeop xvffint.s.l; #lasx.txt xvffint.s.l mask=0x75480000 #0x75480000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvffint.s.l xrD, xrJ, xrK is op15_31=0xea90 & xrD & xrJ & xrK { xrD = xvffint.s.l(xrD, xrJ, xrK); } define pcodeop xvftint.w.d; #lasx.txt xvftint.w.d mask=0x75498000 #0x75498000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvftint.w.d xrD, xrJ, xrK is op15_31=0xea93 & xrD & xrJ & xrK { xrD = xvftint.w.d(xrD, xrJ, xrK); } define pcodeop xvftintrm.w.d; #lasx.txt xvftintrm.w.d mask=0x754a0000 #0x754a0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvftintrm.w.d xrD, xrJ, xrK is op15_31=0xea94 & xrD & xrJ & xrK { xrD = xvftintrm.w.d(xrD, xrJ, xrK); } define pcodeop xvftintrp.w.d; #lasx.txt xvftintrp.w.d mask=0x754a8000 #0x754a8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvftintrp.w.d xrD, xrJ, xrK is op15_31=0xea95 & xrD & xrJ & xrK { xrD = xvftintrp.w.d(xrD, xrJ, xrK); } define pcodeop xvftintrz.w.d; #lasx.txt xvftintrz.w.d mask=0x754b0000 #0x754b0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvftintrz.w.d xrD, xrJ, xrK is op15_31=0xea96 & xrD & xrJ & xrK { xrD = xvftintrz.w.d(xrD, xrJ, xrK); } define pcodeop xvftintrne.w.d; #lasx.txt xvftintrne.w.d mask=0x754b8000 #0x754b8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvftintrne.w.d xrD, xrJ, xrK is op15_31=0xea97 & xrD & xrJ & xrK { xrD = xvftintrne.w.d(xrD, xrJ, xrK); } define pcodeop xvshuf.h; #lasx.txt xvshuf.h mask=0x757a8000 #0x757a8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvshuf.h xrD, xrJ, xrK is op15_31=0xeaf5 & xrD & xrJ & xrK { xrD = xvshuf.h(xrD, xrJ, xrK); } define pcodeop xvshuf.w; #lasx.txt xvshuf.w mask=0x757b0000 #0x757b0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvshuf.w xrD, xrJ, xrK is op15_31=0xeaf6 & xrD & xrJ & xrK { xrD = xvshuf.w(xrD, xrJ, xrK); } define pcodeop xvshuf.d; #lasx.txt xvshuf.d mask=0x757b8000 #0x757b8000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvshuf.d xrD, xrJ, xrK is op15_31=0xeaf7 & xrD & xrJ & xrK { xrD = xvshuf.d(xrD, xrJ, xrK); } define pcodeop xvperm.w; #lasx.txt xvperm.w mask=0x757d0000 #0x757d0000 0xffff8000 x0:5,x5:5,x10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'xreg10_5_s0'] :xvperm.w xrD, xrJ, xrK is op15_31=0xeafa & xrD & xrJ & xrK { xrD = xvperm.w(xrD, xrJ, xrK); } define pcodeop xvseqi.b; #lasx.txt xvseqi.b mask=0x76800000 #0x76800000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvseqi.b xrD, xrJ,simm10_5 is op15_31=0xed00 & xrD & xrJ & simm10_5 { xrD = xvseqi.b(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvseqi.h; #lasx.txt xvseqi.h mask=0x76808000 #0x76808000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvseqi.h xrD, xrJ,simm10_5 is op15_31=0xed01 & xrD & xrJ & simm10_5 { xrD = xvseqi.h(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvseqi.w; #lasx.txt xvseqi.w mask=0x76810000 #0x76810000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvseqi.w xrD, xrJ,simm10_5 is op15_31=0xed02 & xrD & xrJ & simm10_5 { xrD = xvseqi.w(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvseqi.d; #lasx.txt xvseqi.d mask=0x76818000 #0x76818000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvseqi.d xrD, xrJ,simm10_5 is op15_31=0xed03 & xrD & xrJ & simm10_5 { xrD = xvseqi.d(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvslei.b; #lasx.txt xvslei.b mask=0x76820000 #0x76820000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvslei.b xrD, xrJ,simm10_5 is op15_31=0xed04 & xrD & xrJ & simm10_5 { xrD = xvslei.b(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvslei.h; #lasx.txt xvslei.h mask=0x76828000 #0x76828000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvslei.h xrD, xrJ,simm10_5 is op15_31=0xed05 & xrD & xrJ & simm10_5 { xrD = xvslei.h(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvslei.w; #lasx.txt xvslei.w mask=0x76830000 #0x76830000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvslei.w xrD, xrJ,simm10_5 is op15_31=0xed06 & xrD & xrJ & simm10_5 { xrD = xvslei.w(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvslei.d; #lasx.txt xvslei.d mask=0x76838000 #0x76838000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvslei.d xrD, xrJ,simm10_5 is op15_31=0xed07 & xrD & xrJ & simm10_5 { xrD = xvslei.d(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvslei.bu; #lasx.txt xvslei.bu mask=0x76840000 #0x76840000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvslei.bu xrD, xrJ, imm10_5 is op15_31=0xed08 & xrD & xrJ & imm10_5 { xrD = xvslei.bu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvslei.hu; #lasx.txt xvslei.hu mask=0x76848000 #0x76848000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvslei.hu xrD, xrJ, imm10_5 is op15_31=0xed09 & xrD & xrJ & imm10_5 { xrD = xvslei.hu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvslei.wu; #lasx.txt xvslei.wu mask=0x76850000 #0x76850000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvslei.wu xrD, xrJ, imm10_5 is op15_31=0xed0a & xrD & xrJ & imm10_5 { xrD = xvslei.wu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvslei.du; #lasx.txt xvslei.du mask=0x76858000 #0x76858000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvslei.du xrD, xrJ, imm10_5 is op15_31=0xed0b & xrD & xrJ & imm10_5 { xrD = xvslei.du(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvslti.b; #lasx.txt xvslti.b mask=0x76860000 #0x76860000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvslti.b xrD, xrJ,simm10_5 is op15_31=0xed0c & xrD & xrJ & simm10_5 { xrD = xvslti.b(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvslti.h; #lasx.txt xvslti.h mask=0x76868000 #0x76868000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvslti.h xrD, xrJ,simm10_5 is op15_31=0xed0d & xrD & xrJ & simm10_5 { xrD = xvslti.h(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvslti.w; #lasx.txt xvslti.w mask=0x76870000 #0x76870000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvslti.w xrD, xrJ,simm10_5 is op15_31=0xed0e & xrD & xrJ & simm10_5 { xrD = xvslti.w(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvslti.d; #lasx.txt xvslti.d mask=0x76878000 #0x76878000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvslti.d xrD, xrJ,simm10_5 is op15_31=0xed0f & xrD & xrJ & simm10_5 { xrD = xvslti.d(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvslti.bu; #lasx.txt xvslti.bu mask=0x76880000 #0x76880000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvslti.bu xrD, xrJ, imm10_5 is op15_31=0xed10 & xrD & xrJ & imm10_5 { xrD = xvslti.bu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvslti.hu; #lasx.txt xvslti.hu mask=0x76888000 #0x76888000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvslti.hu xrD, xrJ, imm10_5 is op15_31=0xed11 & xrD & xrJ & imm10_5 { xrD = xvslti.hu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvslti.wu; #lasx.txt xvslti.wu mask=0x76890000 #0x76890000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvslti.wu xrD, xrJ, imm10_5 is op15_31=0xed12 & xrD & xrJ & imm10_5 { xrD = xvslti.wu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvslti.du; #lasx.txt xvslti.du mask=0x76898000 #0x76898000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvslti.du xrD, xrJ, imm10_5 is op15_31=0xed13 & xrD & xrJ & imm10_5 { xrD = xvslti.du(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvaddi.bu; #lasx.txt xvaddi.bu mask=0x768a0000 #0x768a0000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvaddi.bu xrD, xrJ, imm10_5 is op15_31=0xed14 & xrD & xrJ & imm10_5 { xrD = xvaddi.bu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvaddi.hu; #lasx.txt xvaddi.hu mask=0x768a8000 #0x768a8000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvaddi.hu xrD, xrJ, imm10_5 is op15_31=0xed15 & xrD & xrJ & imm10_5 { xrD = xvaddi.hu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvaddi.wu; #lasx.txt xvaddi.wu mask=0x768b0000 #0x768b0000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvaddi.wu xrD, xrJ, imm10_5 is op15_31=0xed16 & xrD & xrJ & imm10_5 { xrD = xvaddi.wu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvaddi.du; #lasx.txt xvaddi.du mask=0x768b8000 #0x768b8000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvaddi.du xrD, xrJ, imm10_5 is op15_31=0xed17 & xrD & xrJ & imm10_5 { xrD = xvaddi.du(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsubi.bu; #lasx.txt xvsubi.bu mask=0x768c0000 #0x768c0000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsubi.bu xrD, xrJ, imm10_5 is op15_31=0xed18 & xrD & xrJ & imm10_5 { xrD = xvsubi.bu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsubi.hu; #lasx.txt xvsubi.hu mask=0x768c8000 #0x768c8000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsubi.hu xrD, xrJ, imm10_5 is op15_31=0xed19 & xrD & xrJ & imm10_5 { xrD = xvsubi.hu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsubi.wu; #lasx.txt xvsubi.wu mask=0x768d0000 #0x768d0000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsubi.wu xrD, xrJ, imm10_5 is op15_31=0xed1a & xrD & xrJ & imm10_5 { xrD = xvsubi.wu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsubi.du; #lasx.txt xvsubi.du mask=0x768d8000 #0x768d8000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsubi.du xrD, xrJ, imm10_5 is op15_31=0xed1b & xrD & xrJ & imm10_5 { xrD = xvsubi.du(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvbsll.v; #lasx.txt xvbsll.v mask=0x768e0000 #0x768e0000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvbsll.v xrD, xrJ, imm10_5 is op15_31=0xed1c & xrD & xrJ & imm10_5 { xrD = xvbsll.v(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvbsrl.v; #lasx.txt xvbsrl.v mask=0x768e8000 #0x768e8000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvbsrl.v xrD, xrJ, imm10_5 is op15_31=0xed1d & xrD & xrJ & imm10_5 { xrD = xvbsrl.v(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvmaxi.b; #lasx.txt xvmaxi.b mask=0x76900000 #0x76900000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvmaxi.b xrD, xrJ,simm10_5 is op15_31=0xed20 & xrD & xrJ & simm10_5 { xrD = xvmaxi.b(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvmaxi.h; #lasx.txt xvmaxi.h mask=0x76908000 #0x76908000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvmaxi.h xrD, xrJ,simm10_5 is op15_31=0xed21 & xrD & xrJ & simm10_5 { xrD = xvmaxi.h(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvmaxi.w; #lasx.txt xvmaxi.w mask=0x76910000 #0x76910000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvmaxi.w xrD, xrJ,simm10_5 is op15_31=0xed22 & xrD & xrJ & simm10_5 { xrD = xvmaxi.w(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvmaxi.d; #lasx.txt xvmaxi.d mask=0x76918000 #0x76918000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvmaxi.d xrD, xrJ,simm10_5 is op15_31=0xed23 & xrD & xrJ & simm10_5 { xrD = xvmaxi.d(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvmini.b; #lasx.txt xvmini.b mask=0x76920000 #0x76920000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvmini.b xrD, xrJ,simm10_5 is op15_31=0xed24 & xrD & xrJ & simm10_5 { xrD = xvmini.b(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvmini.h; #lasx.txt xvmini.h mask=0x76928000 #0x76928000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvmini.h xrD, xrJ,simm10_5 is op15_31=0xed25 & xrD & xrJ & simm10_5 { xrD = xvmini.h(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvmini.w; #lasx.txt xvmini.w mask=0x76930000 #0x76930000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvmini.w xrD, xrJ,simm10_5 is op15_31=0xed26 & xrD & xrJ & simm10_5 { xrD = xvmini.w(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvmini.d; #lasx.txt xvmini.d mask=0x76938000 #0x76938000 0xffff8000 x0:5,x5:5,s10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'simm10_5_s0'] :xvmini.d xrD, xrJ,simm10_5 is op15_31=0xed27 & xrD & xrJ & simm10_5 { xrD = xvmini.d(xrD, xrJ, simm10_5:$(REGSIZE)); } define pcodeop xvmaxi.bu; #lasx.txt xvmaxi.bu mask=0x76940000 #0x76940000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvmaxi.bu xrD, xrJ, imm10_5 is op15_31=0xed28 & xrD & xrJ & imm10_5 { xrD = xvmaxi.bu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvmaxi.hu; #lasx.txt xvmaxi.hu mask=0x76948000 #0x76948000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvmaxi.hu xrD, xrJ, imm10_5 is op15_31=0xed29 & xrD & xrJ & imm10_5 { xrD = xvmaxi.hu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvmaxi.wu; #lasx.txt xvmaxi.wu mask=0x76950000 #0x76950000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvmaxi.wu xrD, xrJ, imm10_5 is op15_31=0xed2a & xrD & xrJ & imm10_5 { xrD = xvmaxi.wu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvmaxi.du; #lasx.txt xvmaxi.du mask=0x76958000 #0x76958000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvmaxi.du xrD, xrJ, imm10_5 is op15_31=0xed2b & xrD & xrJ & imm10_5 { xrD = xvmaxi.du(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvmini.bu; #lasx.txt xvmini.bu mask=0x76960000 #0x76960000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvmini.bu xrD, xrJ, imm10_5 is op15_31=0xed2c & xrD & xrJ & imm10_5 { xrD = xvmini.bu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvmini.hu; #lasx.txt xvmini.hu mask=0x76968000 #0x76968000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvmini.hu xrD, xrJ, imm10_5 is op15_31=0xed2d & xrD & xrJ & imm10_5 { xrD = xvmini.hu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvmini.wu; #lasx.txt xvmini.wu mask=0x76970000 #0x76970000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvmini.wu xrD, xrJ, imm10_5 is op15_31=0xed2e & xrD & xrJ & imm10_5 { xrD = xvmini.wu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvmini.du; #lasx.txt xvmini.du mask=0x76978000 #0x76978000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvmini.du xrD, xrJ, imm10_5 is op15_31=0xed2f & xrD & xrJ & imm10_5 { xrD = xvmini.du(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvfrstpi.b; #lasx.txt xvfrstpi.b mask=0x769a0000 #0x769a0000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvfrstpi.b xrD, xrJ, imm10_5 is op15_31=0xed34 & xrD & xrJ & imm10_5 { xrD = xvfrstpi.b(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvfrstpi.h; #lasx.txt xvfrstpi.h mask=0x769a8000 #0x769a8000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvfrstpi.h xrD, xrJ, imm10_5 is op15_31=0xed35 & xrD & xrJ & imm10_5 { xrD = xvfrstpi.h(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvclo.b; #lasx.txt xvclo.b mask=0x769c0000 #0x769c0000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvclo.b xrD, xrJ is op10_31=0x1da700 & xrD & xrJ { xrD = xvclo.b(xrD, xrJ); } define pcodeop xvclo.h; #lasx.txt xvclo.h mask=0x769c0400 #0x769c0400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvclo.h xrD, xrJ is op10_31=0x1da701 & xrD & xrJ { xrD = xvclo.h(xrD, xrJ); } define pcodeop xvclo.w; #lasx.txt xvclo.w mask=0x769c0800 #0x769c0800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvclo.w xrD, xrJ is op10_31=0x1da702 & xrD & xrJ { xrD = xvclo.w(xrD, xrJ); } define pcodeop xvclo.d; #lasx.txt xvclo.d mask=0x769c0c00 #0x769c0c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvclo.d xrD, xrJ is op10_31=0x1da703 & xrD & xrJ { xrD = xvclo.d(xrD, xrJ); } define pcodeop xvclz.b; #lasx.txt xvclz.b mask=0x769c1000 #0x769c1000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvclz.b xrD, xrJ is op10_31=0x1da704 & xrD & xrJ { xrD = xvclz.b(xrD, xrJ); } define pcodeop xvclz.h; #lasx.txt xvclz.h mask=0x769c1400 #0x769c1400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvclz.h xrD, xrJ is op10_31=0x1da705 & xrD & xrJ { xrD = xvclz.h(xrD, xrJ); } define pcodeop xvclz.w; #lasx.txt xvclz.w mask=0x769c1800 #0x769c1800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvclz.w xrD, xrJ is op10_31=0x1da706 & xrD & xrJ { xrD = xvclz.w(xrD, xrJ); } define pcodeop xvclz.d; #lasx.txt xvclz.d mask=0x769c1c00 #0x769c1c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvclz.d xrD, xrJ is op10_31=0x1da707 & xrD & xrJ { xrD = xvclz.d(xrD, xrJ); } define pcodeop xvpcnt.b; #lasx.txt xvpcnt.b mask=0x769c2000 #0x769c2000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvpcnt.b xrD, xrJ is op10_31=0x1da708 & xrD & xrJ { xrD = xvpcnt.b(xrD, xrJ); } define pcodeop xvpcnt.h; #lasx.txt xvpcnt.h mask=0x769c2400 #0x769c2400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvpcnt.h xrD, xrJ is op10_31=0x1da709 & xrD & xrJ { xrD = xvpcnt.h(xrD, xrJ); } define pcodeop xvpcnt.w; #lasx.txt xvpcnt.w mask=0x769c2800 #0x769c2800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvpcnt.w xrD, xrJ is op10_31=0x1da70a & xrD & xrJ { xrD = xvpcnt.w(xrD, xrJ); } define pcodeop xvpcnt.d; #lasx.txt xvpcnt.d mask=0x769c2c00 #0x769c2c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvpcnt.d xrD, xrJ is op10_31=0x1da70b & xrD & xrJ { xrD = xvpcnt.d(xrD, xrJ); } define pcodeop xvneg.b; #lasx.txt xvneg.b mask=0x769c3000 #0x769c3000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvneg.b xrD, xrJ is op10_31=0x1da70c & xrD & xrJ { xrD = xvneg.b(xrD, xrJ); } define pcodeop xvneg.h; #lasx.txt xvneg.h mask=0x769c3400 #0x769c3400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvneg.h xrD, xrJ is op10_31=0x1da70d & xrD & xrJ { xrD = xvneg.h(xrD, xrJ); } define pcodeop xvneg.w; #lasx.txt xvneg.w mask=0x769c3800 #0x769c3800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvneg.w xrD, xrJ is op10_31=0x1da70e & xrD & xrJ { xrD = xvneg.w(xrD, xrJ); } define pcodeop xvneg.d; #lasx.txt xvneg.d mask=0x769c3c00 #0x769c3c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvneg.d xrD, xrJ is op10_31=0x1da70f & xrD & xrJ { xrD = xvneg.d(xrD, xrJ); } define pcodeop xvmskltz.b; #lasx.txt xvmskltz.b mask=0x769c4000 #0x769c4000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvmskltz.b xrD, xrJ is op10_31=0x1da710 & xrD & xrJ { xrD = xvmskltz.b(xrD, xrJ); } define pcodeop xvmskltz.h; #lasx.txt xvmskltz.h mask=0x769c4400 #0x769c4400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvmskltz.h xrD, xrJ is op10_31=0x1da711 & xrD & xrJ { xrD = xvmskltz.h(xrD, xrJ); } define pcodeop xvmskltz.w; #lasx.txt xvmskltz.w mask=0x769c4800 #0x769c4800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvmskltz.w xrD, xrJ is op10_31=0x1da712 & xrD & xrJ { xrD = xvmskltz.w(xrD, xrJ); } define pcodeop xvmskltz.d; #lasx.txt xvmskltz.d mask=0x769c4c00 #0x769c4c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvmskltz.d xrD, xrJ is op10_31=0x1da713 & xrD & xrJ { xrD = xvmskltz.d(xrD, xrJ); } define pcodeop xvmskgez.b; #lasx.txt xvmskgez.b mask=0x769c5000 #0x769c5000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvmskgez.b xrD, xrJ is op10_31=0x1da714 & xrD & xrJ { xrD = xvmskgez.b(xrD, xrJ); } define pcodeop xvmsknz.b; #lasx.txt xvmsknz.b mask=0x769c6000 #0x769c6000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvmsknz.b xrD, xrJ is op10_31=0x1da718 & xrD & xrJ { xrD = xvmsknz.b(xrD, xrJ); } define pcodeop xvseteqz.v; #lasx.txt xvseteqz.v mask=0x769c9800 #0x769c9800 0xfffffc18 c0:3,x5:5 ['fcc0_3_s0', 'xreg5_5_s0'] :xvseteqz.v fccD, xrJ is op10_31=0x1da726 & fccD & xrJ { fccD = xvseteqz.v(fccD, xrJ); } define pcodeop xvsetnez.v; #lasx.txt xvsetnez.v mask=0x769c9c00 #0x769c9c00 0xfffffc18 c0:3,x5:5 ['fcc0_3_s0', 'xreg5_5_s0'] :xvsetnez.v fccD, xrJ is op10_31=0x1da727 & fccD & xrJ { fccD = xvsetnez.v(fccD, xrJ); } define pcodeop xvsetanyeqz.b; #lasx.txt xvsetanyeqz.b mask=0x769ca000 #0x769ca000 0xfffffc18 c0:3,x5:5 ['fcc0_3_s0', 'xreg5_5_s0'] :xvsetanyeqz.b fccD, xrJ is op10_31=0x1da728 & fccD & xrJ { fccD = xvsetanyeqz.b(fccD, xrJ); } define pcodeop xvsetanyeqz.h; #lasx.txt xvsetanyeqz.h mask=0x769ca400 #0x769ca400 0xfffffc18 c0:3,x5:5 ['fcc0_3_s0', 'xreg5_5_s0'] :xvsetanyeqz.h fccD, xrJ is op10_31=0x1da729 & fccD & xrJ { fccD = xvsetanyeqz.h(fccD, xrJ); } define pcodeop xvsetanyeqz.w; #lasx.txt xvsetanyeqz.w mask=0x769ca800 #0x769ca800 0xfffffc18 c0:3,x5:5 ['fcc0_3_s0', 'xreg5_5_s0'] :xvsetanyeqz.w fccD, xrJ is op10_31=0x1da72a & fccD & xrJ { fccD = xvsetanyeqz.w(fccD, xrJ); } define pcodeop xvsetanyeqz.d; #lasx.txt xvsetanyeqz.d mask=0x769cac00 #0x769cac00 0xfffffc18 c0:3,x5:5 ['fcc0_3_s0', 'xreg5_5_s0'] :xvsetanyeqz.d fccD, xrJ is op10_31=0x1da72b & fccD & xrJ { fccD = xvsetanyeqz.d(fccD, xrJ); } define pcodeop xvsetallnez.b; #lasx.txt xvsetallnez.b mask=0x769cb000 #0x769cb000 0xfffffc18 c0:3,x5:5 ['fcc0_3_s0', 'xreg5_5_s0'] :xvsetallnez.b fccD, xrJ is op10_31=0x1da72c & fccD & xrJ { fccD = xvsetallnez.b(fccD, xrJ); } define pcodeop xvsetallnez.h; #lasx.txt xvsetallnez.h mask=0x769cb400 #0x769cb400 0xfffffc18 c0:3,x5:5 ['fcc0_3_s0', 'xreg5_5_s0'] :xvsetallnez.h fccD, xrJ is op10_31=0x1da72d & fccD & xrJ { fccD = xvsetallnez.h(fccD, xrJ); } define pcodeop xvsetallnez.w; #lasx.txt xvsetallnez.w mask=0x769cb800 #0x769cb800 0xfffffc18 c0:3,x5:5 ['fcc0_3_s0', 'xreg5_5_s0'] :xvsetallnez.w fccD, xrJ is op10_31=0x1da72e & fccD & xrJ { fccD = xvsetallnez.w(fccD, xrJ); } define pcodeop xvsetallnez.d; #lasx.txt xvsetallnez.d mask=0x769cbc00 #0x769cbc00 0xfffffc18 c0:3,x5:5 ['fcc0_3_s0', 'xreg5_5_s0'] :xvsetallnez.d fccD, xrJ is op10_31=0x1da72f & fccD & xrJ { fccD = xvsetallnez.d(fccD, xrJ); } define pcodeop xvflogb.s; #lasx.txt xvflogb.s mask=0x769cc400 #0x769cc400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvflogb.s xrD, xrJ is op10_31=0x1da731 & xrD & xrJ { xrD = xvflogb.s(xrD, xrJ); } define pcodeop xvflogb.d; #lasx.txt xvflogb.d mask=0x769cc800 #0x769cc800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvflogb.d xrD, xrJ is op10_31=0x1da732 & xrD & xrJ { xrD = xvflogb.d(xrD, xrJ); } define pcodeop xvfclass.s; #lasx.txt xvfclass.s mask=0x769cd400 #0x769cd400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfclass.s xrD, xrJ is op10_31=0x1da735 & xrD & xrJ { xrD = xvfclass.s(xrD, xrJ); } define pcodeop xvfclass.d; #lasx.txt xvfclass.d mask=0x769cd800 #0x769cd800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfclass.d xrD, xrJ is op10_31=0x1da736 & xrD & xrJ { xrD = xvfclass.d(xrD, xrJ); } define pcodeop xvfsqrt.s; #lasx.txt xvfsqrt.s mask=0x769ce400 #0x769ce400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfsqrt.s xrD, xrJ is op10_31=0x1da739 & xrD & xrJ { xrD = xvfsqrt.s(xrD, xrJ); } define pcodeop xvfsqrt.d; #lasx.txt xvfsqrt.d mask=0x769ce800 #0x769ce800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfsqrt.d xrD, xrJ is op10_31=0x1da73a & xrD & xrJ { xrD = xvfsqrt.d(xrD, xrJ); } define pcodeop xvfrecip.s; #lasx.txt xvfrecip.s mask=0x769cf400 #0x769cf400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrecip.s xrD, xrJ is op10_31=0x1da73d & xrD & xrJ { xrD = xvfrecip.s(xrD, xrJ); } define pcodeop xvfrecip.d; #lasx.txt xvfrecip.d mask=0x769cf800 #0x769cf800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrecip.d xrD, xrJ is op10_31=0x1da73e & xrD & xrJ { xrD = xvfrecip.d(xrD, xrJ); } define pcodeop xvfrsqrt.s; #lasx.txt xvfrsqrt.s mask=0x769d0400 #0x769d0400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrsqrt.s xrD, xrJ is op10_31=0x1da741 & xrD & xrJ { xrD = xvfrsqrt.s(xrD, xrJ); } define pcodeop xvfrsqrt.d; #lasx.txt xvfrsqrt.d mask=0x769d0800 #0x769d0800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrsqrt.d xrD, xrJ is op10_31=0x1da742 & xrD & xrJ { xrD = xvfrsqrt.d(xrD, xrJ); } define pcodeop xvfrint.s; #lasx.txt xvfrint.s mask=0x769d3400 #0x769d3400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrint.s xrD, xrJ is op10_31=0x1da74d & xrD & xrJ { xrD = xvfrint.s(xrD, xrJ); } define pcodeop xvfrint.d; #lasx.txt xvfrint.d mask=0x769d3800 #0x769d3800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrint.d xrD, xrJ is op10_31=0x1da74e & xrD & xrJ { xrD = xvfrint.d(xrD, xrJ); } define pcodeop xvfrintrm.s; #lasx.txt xvfrintrm.s mask=0x769d4400 #0x769d4400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrintrm.s xrD, xrJ is op10_31=0x1da751 & xrD & xrJ { xrD = xvfrintrm.s(xrD, xrJ); } define pcodeop xvfrintrm.d; #lasx.txt xvfrintrm.d mask=0x769d4800 #0x769d4800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrintrm.d xrD, xrJ is op10_31=0x1da752 & xrD & xrJ { xrD = xvfrintrm.d(xrD, xrJ); } define pcodeop xvfrintrp.s; #lasx.txt xvfrintrp.s mask=0x769d5400 #0x769d5400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrintrp.s xrD, xrJ is op10_31=0x1da755 & xrD & xrJ { xrD = xvfrintrp.s(xrD, xrJ); } define pcodeop xvfrintrp.d; #lasx.txt xvfrintrp.d mask=0x769d5800 #0x769d5800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrintrp.d xrD, xrJ is op10_31=0x1da756 & xrD & xrJ { xrD = xvfrintrp.d(xrD, xrJ); } define pcodeop xvfrintrz.s; #lasx.txt xvfrintrz.s mask=0x769d6400 #0x769d6400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrintrz.s xrD, xrJ is op10_31=0x1da759 & xrD & xrJ { xrD = xvfrintrz.s(xrD, xrJ); } define pcodeop xvfrintrz.d; #lasx.txt xvfrintrz.d mask=0x769d6800 #0x769d6800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrintrz.d xrD, xrJ is op10_31=0x1da75a & xrD & xrJ { xrD = xvfrintrz.d(xrD, xrJ); } define pcodeop xvfrintrne.s; #lasx.txt xvfrintrne.s mask=0x769d7400 #0x769d7400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrintrne.s xrD, xrJ is op10_31=0x1da75d & xrD & xrJ { xrD = xvfrintrne.s(xrD, xrJ); } define pcodeop xvfrintrne.d; #lasx.txt xvfrintrne.d mask=0x769d7800 #0x769d7800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfrintrne.d xrD, xrJ is op10_31=0x1da75e & xrD & xrJ { xrD = xvfrintrne.d(xrD, xrJ); } define pcodeop xvfcvtl.s.h; #lasx.txt xvfcvtl.s.h mask=0x769de800 #0x769de800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfcvtl.s.h xrD, xrJ is op10_31=0x1da77a & xrD & xrJ { xrD = xvfcvtl.s.h(xrD, xrJ); } define pcodeop xvfcvth.s.h; #lasx.txt xvfcvth.s.h mask=0x769dec00 #0x769dec00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfcvth.s.h xrD, xrJ is op10_31=0x1da77b & xrD & xrJ { xrD = xvfcvth.s.h(xrD, xrJ); } define pcodeop xvfcvtl.d.s; #lasx.txt xvfcvtl.d.s mask=0x769df000 #0x769df000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfcvtl.d.s xrD, xrJ is op10_31=0x1da77c & xrD & xrJ { xrD = xvfcvtl.d.s(xrD, xrJ); } define pcodeop xvfcvth.d.s; #lasx.txt xvfcvth.d.s mask=0x769df400 #0x769df400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvfcvth.d.s xrD, xrJ is op10_31=0x1da77d & xrD & xrJ { xrD = xvfcvth.d.s(xrD, xrJ); } define pcodeop xvffint.s.w; #lasx.txt xvffint.s.w mask=0x769e0000 #0x769e0000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvffint.s.w xrD, xrJ is op10_31=0x1da780 & xrD & xrJ { xrD = xvffint.s.w(xrD, xrJ); } define pcodeop xvffint.s.wu; #lasx.txt xvffint.s.wu mask=0x769e0400 #0x769e0400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvffint.s.wu xrD, xrJ is op10_31=0x1da781 & xrD & xrJ { xrD = xvffint.s.wu(xrD, xrJ); } define pcodeop xvffint.d.l; #lasx.txt xvffint.d.l mask=0x769e0800 #0x769e0800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvffint.d.l xrD, xrJ is op10_31=0x1da782 & xrD & xrJ { xrD = xvffint.d.l(xrD, xrJ); } define pcodeop xvffint.d.lu; #lasx.txt xvffint.d.lu mask=0x769e0c00 #0x769e0c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvffint.d.lu xrD, xrJ is op10_31=0x1da783 & xrD & xrJ { xrD = xvffint.d.lu(xrD, xrJ); } define pcodeop xvffintl.d.w; #lasx.txt xvffintl.d.w mask=0x769e1000 #0x769e1000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvffintl.d.w xrD, xrJ is op10_31=0x1da784 & xrD & xrJ { xrD = xvffintl.d.w(xrD, xrJ); } define pcodeop xvffinth.d.w; #lasx.txt xvffinth.d.w mask=0x769e1400 #0x769e1400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvffinth.d.w xrD, xrJ is op10_31=0x1da785 & xrD & xrJ { xrD = xvffinth.d.w(xrD, xrJ); } define pcodeop xvftint.w.s; #lasx.txt xvftint.w.s mask=0x769e3000 #0x769e3000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftint.w.s xrD, xrJ is op10_31=0x1da78c & xrD & xrJ { xrD = xvftint.w.s(xrD, xrJ); } define pcodeop xvftint.l.d; #lasx.txt xvftint.l.d mask=0x769e3400 #0x769e3400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftint.l.d xrD, xrJ is op10_31=0x1da78d & xrD & xrJ { xrD = xvftint.l.d(xrD, xrJ); } define pcodeop xvftintrm.w.s; #lasx.txt xvftintrm.w.s mask=0x769e3800 #0x769e3800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrm.w.s xrD, xrJ is op10_31=0x1da78e & xrD & xrJ { xrD = xvftintrm.w.s(xrD, xrJ); } define pcodeop xvftintrm.l.d; #lasx.txt xvftintrm.l.d mask=0x769e3c00 #0x769e3c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrm.l.d xrD, xrJ is op10_31=0x1da78f & xrD & xrJ { xrD = xvftintrm.l.d(xrD, xrJ); } define pcodeop xvftintrp.w.s; #lasx.txt xvftintrp.w.s mask=0x769e4000 #0x769e4000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrp.w.s xrD, xrJ is op10_31=0x1da790 & xrD & xrJ { xrD = xvftintrp.w.s(xrD, xrJ); } define pcodeop xvftintrp.l.d; #lasx.txt xvftintrp.l.d mask=0x769e4400 #0x769e4400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrp.l.d xrD, xrJ is op10_31=0x1da791 & xrD & xrJ { xrD = xvftintrp.l.d(xrD, xrJ); } define pcodeop xvftintrz.w.s; #lasx.txt xvftintrz.w.s mask=0x769e4800 #0x769e4800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrz.w.s xrD, xrJ is op10_31=0x1da792 & xrD & xrJ { xrD = xvftintrz.w.s(xrD, xrJ); } define pcodeop xvftintrz.l.d; #lasx.txt xvftintrz.l.d mask=0x769e4c00 #0x769e4c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrz.l.d xrD, xrJ is op10_31=0x1da793 & xrD & xrJ { xrD = xvftintrz.l.d(xrD, xrJ); } define pcodeop xvftintrne.w.s; #lasx.txt xvftintrne.w.s mask=0x769e5000 #0x769e5000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrne.w.s xrD, xrJ is op10_31=0x1da794 & xrD & xrJ { xrD = xvftintrne.w.s(xrD, xrJ); } define pcodeop xvftintrne.l.d; #lasx.txt xvftintrne.l.d mask=0x769e5400 #0x769e5400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrne.l.d xrD, xrJ is op10_31=0x1da795 & xrD & xrJ { xrD = xvftintrne.l.d(xrD, xrJ); } define pcodeop xvftint.wu.s; #lasx.txt xvftint.wu.s mask=0x769e5800 #0x769e5800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftint.wu.s xrD, xrJ is op10_31=0x1da796 & xrD & xrJ { xrD = xvftint.wu.s(xrD, xrJ); } define pcodeop xvftint.lu.d; #lasx.txt xvftint.lu.d mask=0x769e5c00 #0x769e5c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftint.lu.d xrD, xrJ is op10_31=0x1da797 & xrD & xrJ { xrD = xvftint.lu.d(xrD, xrJ); } define pcodeop xvftintrz.wu.s; #lasx.txt xvftintrz.wu.s mask=0x769e7000 #0x769e7000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrz.wu.s xrD, xrJ is op10_31=0x1da79c & xrD & xrJ { xrD = xvftintrz.wu.s(xrD, xrJ); } define pcodeop xvftintrz.lu.d; #lasx.txt xvftintrz.lu.d mask=0x769e7400 #0x769e7400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrz.lu.d xrD, xrJ is op10_31=0x1da79d & xrD & xrJ { xrD = xvftintrz.lu.d(xrD, xrJ); } define pcodeop xvftintl.l.s; #lasx.txt xvftintl.l.s mask=0x769e8000 #0x769e8000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintl.l.s xrD, xrJ is op10_31=0x1da7a0 & xrD & xrJ { xrD = xvftintl.l.s(xrD, xrJ); } define pcodeop xvftinth.l.s; #lasx.txt xvftinth.l.s mask=0x769e8400 #0x769e8400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftinth.l.s xrD, xrJ is op10_31=0x1da7a1 & xrD & xrJ { xrD = xvftinth.l.s(xrD, xrJ); } define pcodeop xvftintrml.l.s; #lasx.txt xvftintrml.l.s mask=0x769e8800 #0x769e8800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrml.l.s xrD, xrJ is op10_31=0x1da7a2 & xrD & xrJ { xrD = xvftintrml.l.s(xrD, xrJ); } define pcodeop xvftintrmh.l.s; #lasx.txt xvftintrmh.l.s mask=0x769e8c00 #0x769e8c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrmh.l.s xrD, xrJ is op10_31=0x1da7a3 & xrD & xrJ { xrD = xvftintrmh.l.s(xrD, xrJ); } define pcodeop xvftintrpl.l.s; #lasx.txt xvftintrpl.l.s mask=0x769e9000 #0x769e9000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrpl.l.s xrD, xrJ is op10_31=0x1da7a4 & xrD & xrJ { xrD = xvftintrpl.l.s(xrD, xrJ); } define pcodeop xvftintrph.l.s; #lasx.txt xvftintrph.l.s mask=0x769e9400 #0x769e9400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrph.l.s xrD, xrJ is op10_31=0x1da7a5 & xrD & xrJ { xrD = xvftintrph.l.s(xrD, xrJ); } define pcodeop xvftintrzl.l.s; #lasx.txt xvftintrzl.l.s mask=0x769e9800 #0x769e9800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrzl.l.s xrD, xrJ is op10_31=0x1da7a6 & xrD & xrJ { xrD = xvftintrzl.l.s(xrD, xrJ); } define pcodeop xvftintrzh.l.s; #lasx.txt xvftintrzh.l.s mask=0x769e9c00 #0x769e9c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrzh.l.s xrD, xrJ is op10_31=0x1da7a7 & xrD & xrJ { xrD = xvftintrzh.l.s(xrD, xrJ); } define pcodeop xvftintrnel.l.s; #lasx.txt xvftintrnel.l.s mask=0x769ea000 #0x769ea000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrnel.l.s xrD, xrJ is op10_31=0x1da7a8 & xrD & xrJ { xrD = xvftintrnel.l.s(xrD, xrJ); } define pcodeop xvftintrneh.l.s; #lasx.txt xvftintrneh.l.s mask=0x769ea400 #0x769ea400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvftintrneh.l.s xrD, xrJ is op10_31=0x1da7a9 & xrD & xrJ { xrD = xvftintrneh.l.s(xrD, xrJ); } define pcodeop xvexth.h.b; #lasx.txt xvexth.h.b mask=0x769ee000 #0x769ee000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvexth.h.b xrD, xrJ is op10_31=0x1da7b8 & xrD & xrJ { xrD = xvexth.h.b(xrD, xrJ); } define pcodeop xvexth.w.h; #lasx.txt xvexth.w.h mask=0x769ee400 #0x769ee400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvexth.w.h xrD, xrJ is op10_31=0x1da7b9 & xrD & xrJ { xrD = xvexth.w.h(xrD, xrJ); } define pcodeop xvexth.d.w; #lasx.txt xvexth.d.w mask=0x769ee800 #0x769ee800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvexth.d.w xrD, xrJ is op10_31=0x1da7ba & xrD & xrJ { xrD = xvexth.d.w(xrD, xrJ); } define pcodeop xvexth.q.d; #lasx.txt xvexth.q.d mask=0x769eec00 #0x769eec00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvexth.q.d xrD, xrJ is op10_31=0x1da7bb & xrD & xrJ { xrD = xvexth.q.d(xrD, xrJ); } define pcodeop xvexth.hu.bu; #lasx.txt xvexth.hu.bu mask=0x769ef000 #0x769ef000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvexth.hu.bu xrD, xrJ is op10_31=0x1da7bc & xrD & xrJ { xrD = xvexth.hu.bu(xrD, xrJ); } define pcodeop xvexth.wu.hu; #lasx.txt xvexth.wu.hu mask=0x769ef400 #0x769ef400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvexth.wu.hu xrD, xrJ is op10_31=0x1da7bd & xrD & xrJ { xrD = xvexth.wu.hu(xrD, xrJ); } define pcodeop xvexth.du.wu; #lasx.txt xvexth.du.wu mask=0x769ef800 #0x769ef800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvexth.du.wu xrD, xrJ is op10_31=0x1da7be & xrD & xrJ { xrD = xvexth.du.wu(xrD, xrJ); } define pcodeop xvexth.qu.du; #lasx.txt xvexth.qu.du mask=0x769efc00 #0x769efc00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvexth.qu.du xrD, xrJ is op10_31=0x1da7bf & xrD & xrJ { xrD = xvexth.qu.du(xrD, xrJ); } define pcodeop xvreplgr2vr.b; #lasx.txt xvreplgr2vr.b mask=0x769f0000 #0x769f0000 0xfffffc00 x0:5, r5:5 ['xreg0_5_s0', 'reg5_5_s0'] :xvreplgr2vr.b xrD, RJsrc is op10_31=0x1da7c0 & xrD & RJsrc { xrD = xvreplgr2vr.b(xrD, RJsrc); } define pcodeop xvreplgr2vr.h; #lasx.txt xvreplgr2vr.h mask=0x769f0400 #0x769f0400 0xfffffc00 x0:5, r5:5 ['xreg0_5_s0', 'reg5_5_s0'] :xvreplgr2vr.h xrD, RJsrc is op10_31=0x1da7c1 & xrD & RJsrc { xrD = xvreplgr2vr.h(xrD, RJsrc); } define pcodeop xvreplgr2vr.w; #lasx.txt xvreplgr2vr.w mask=0x769f0800 #0x769f0800 0xfffffc00 x0:5, r5:5 ['xreg0_5_s0', 'reg5_5_s0'] :xvreplgr2vr.w xrD, RJsrc is op10_31=0x1da7c2 & xrD & RJsrc { xrD = xvreplgr2vr.w(xrD, RJsrc); } define pcodeop xvreplgr2vr.d; #lasx.txt xvreplgr2vr.d mask=0x769f0c00 #0x769f0c00 0xfffffc00 x0:5, r5:5 ['xreg0_5_s0', 'reg5_5_s0'] :xvreplgr2vr.d xrD, RJsrc is op10_31=0x1da7c3 & xrD & RJsrc { xrD = xvreplgr2vr.d(xrD, RJsrc); } define pcodeop vext2xv.h.b; #lasx.txt vext2xv.h.b mask=0x769f1000 #0x769f1000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :vext2xv.h.b xrD, xrJ is op10_31=0x1da7c4 & xrD & xrJ { xrD = vext2xv.h.b(xrD, xrJ); } define pcodeop vext2xv.w.b; #lasx.txt vext2xv.w.b mask=0x769f1400 #0x769f1400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :vext2xv.w.b xrD, xrJ is op10_31=0x1da7c5 & xrD & xrJ { xrD = vext2xv.w.b(xrD, xrJ); } define pcodeop vext2xv.d.b; #lasx.txt vext2xv.d.b mask=0x769f1800 #0x769f1800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :vext2xv.d.b xrD, xrJ is op10_31=0x1da7c6 & xrD & xrJ { xrD = vext2xv.d.b(xrD, xrJ); } define pcodeop vext2xv.w.h; #lasx.txt vext2xv.w.h mask=0x769f1c00 #0x769f1c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :vext2xv.w.h xrD, xrJ is op10_31=0x1da7c7 & xrD & xrJ { xrD = vext2xv.w.h(xrD, xrJ); } define pcodeop vext2xv.d.h; #lasx.txt vext2xv.d.h mask=0x769f2000 #0x769f2000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :vext2xv.d.h xrD, xrJ is op10_31=0x1da7c8 & xrD & xrJ { xrD = vext2xv.d.h(xrD, xrJ); } define pcodeop vext2xv.d.w; #lasx.txt vext2xv.d.w mask=0x769f2400 #0x769f2400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :vext2xv.d.w xrD, xrJ is op10_31=0x1da7c9 & xrD & xrJ { xrD = vext2xv.d.w(xrD, xrJ); } define pcodeop vext2xv.hu.bu; #lasx.txt vext2xv.hu.bu mask=0x769f2800 #0x769f2800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :vext2xv.hu.bu xrD, xrJ is op10_31=0x1da7ca & xrD & xrJ { xrD = vext2xv.hu.bu(xrD, xrJ); } define pcodeop vext2xv.wu.bu; #lasx.txt vext2xv.wu.bu mask=0x769f2c00 #0x769f2c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :vext2xv.wu.bu xrD, xrJ is op10_31=0x1da7cb & xrD & xrJ { xrD = vext2xv.wu.bu(xrD, xrJ); } define pcodeop vext2xv.du.bu; #lasx.txt vext2xv.du.bu mask=0x769f3000 #0x769f3000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :vext2xv.du.bu xrD, xrJ is op10_31=0x1da7cc & xrD & xrJ { xrD = vext2xv.du.bu(xrD, xrJ); } define pcodeop vext2xv.wu.hu; #lasx.txt vext2xv.wu.hu mask=0x769f3400 #0x769f3400 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :vext2xv.wu.hu xrD, xrJ is op10_31=0x1da7cd & xrD & xrJ { xrD = vext2xv.wu.hu(xrD, xrJ); } define pcodeop vext2xv.du.hu; #lasx.txt vext2xv.du.hu mask=0x769f3800 #0x769f3800 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :vext2xv.du.hu xrD, xrJ is op10_31=0x1da7ce & xrD & xrJ { xrD = vext2xv.du.hu(xrD, xrJ); } define pcodeop vext2xv.du.wu; #lasx.txt vext2xv.du.wu mask=0x769f3c00 #0x769f3c00 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :vext2xv.du.wu xrD, xrJ is op10_31=0x1da7cf & xrD & xrJ { xrD = vext2xv.du.wu(xrD, xrJ); } define pcodeop xvrotri.b; #lasx.txt xvrotri.b mask=0x76a02000 #0x76a02000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvrotri.b xrD, xrJ, imm10_3 is op13_31=0x3b501 & xrD & xrJ & imm10_3 { xrD = xvrotri.b(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvrotri.h; #lasx.txt xvrotri.h mask=0x76a04000 #0x76a04000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvrotri.h xrD, xrJ, imm10_4 is op14_31=0x1da81 & xrD & xrJ & imm10_4 { xrD = xvrotri.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvrotri.w; #lasx.txt xvrotri.w mask=0x76a08000 #0x76a08000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvrotri.w xrD, xrJ, imm10_5 is op15_31=0xed41 & xrD & xrJ & imm10_5 { xrD = xvrotri.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvrotri.d; #lasx.txt xvrotri.d mask=0x76a10000 #0x76a10000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvrotri.d xrD, xrJ, imm10_6 is op16_31=0x76a1 & xrD & xrJ & imm10_6 { xrD = xvrotri.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvsrlri.b; #lasx.txt xvsrlri.b mask=0x76a42000 #0x76a42000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvsrlri.b xrD, xrJ, imm10_3 is op13_31=0x3b521 & xrD & xrJ & imm10_3 { xrD = xvsrlri.b(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvsrlri.h; #lasx.txt xvsrlri.h mask=0x76a44000 #0x76a44000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvsrlri.h xrD, xrJ, imm10_4 is op14_31=0x1da91 & xrD & xrJ & imm10_4 { xrD = xvsrlri.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvsrlri.w; #lasx.txt xvsrlri.w mask=0x76a48000 #0x76a48000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsrlri.w xrD, xrJ, imm10_5 is op15_31=0xed49 & xrD & xrJ & imm10_5 { xrD = xvsrlri.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsrlri.d; #lasx.txt xvsrlri.d mask=0x76a50000 #0x76a50000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvsrlri.d xrD, xrJ, imm10_6 is op16_31=0x76a5 & xrD & xrJ & imm10_6 { xrD = xvsrlri.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvsrari.b; #lasx.txt xvsrari.b mask=0x76a82000 #0x76a82000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvsrari.b xrD, xrJ, imm10_3 is op13_31=0x3b541 & xrD & xrJ & imm10_3 { xrD = xvsrari.b(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvsrari.h; #lasx.txt xvsrari.h mask=0x76a84000 #0x76a84000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvsrari.h xrD, xrJ, imm10_4 is op14_31=0x1daa1 & xrD & xrJ & imm10_4 { xrD = xvsrari.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvsrari.w; #lasx.txt xvsrari.w mask=0x76a88000 #0x76a88000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsrari.w xrD, xrJ, imm10_5 is op15_31=0xed51 & xrD & xrJ & imm10_5 { xrD = xvsrari.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsrari.d; #lasx.txt xvsrari.d mask=0x76a90000 #0x76a90000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvsrari.d xrD, xrJ, imm10_6 is op16_31=0x76a9 & xrD & xrJ & imm10_6 { xrD = xvsrari.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvinsgr2vr.w; #lasx.txt xvinsgr2vr.w mask=0x76ebc000 #0x76ebc000 0xffffe000 x0:5, r5:5,u10:3 ['xreg0_5_s0', 'reg5_5_s0', 'imm10_3_s0'] :xvinsgr2vr.w xrD, RJsrc, imm10_3 is op13_31=0x3b75e & xrD & RJsrc & imm10_3 { xrD = xvinsgr2vr.w(xrD, RJsrc, imm10_3:$(REGSIZE)); } define pcodeop xvinsgr2vr.d; #lasx.txt xvinsgr2vr.d mask=0x76ebe000 #0x76ebe000 0xfffff000 x0:5, r5:5,u10:2 ['xreg0_5_s0', 'reg5_5_s0', 'imm10_2_s0'] :xvinsgr2vr.d xrD, RJsrc, imm10_2 is op12_31=0x76ebe & xrD & RJsrc & imm10_2 { xrD = xvinsgr2vr.d(xrD, RJsrc, imm10_2:$(REGSIZE)); } define pcodeop xvpickve2gr.w; #lasx.txt xvpickve2gr.w mask=0x76efc000 #0x76efc000 0xffffe000 r0:5,x5:5,u10:3 ['reg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvpickve2gr.w RD, xrJ, imm10_3 is op13_31=0x3b77e & RD & xrJ & imm10_3 { RD = xvpickve2gr.w(RD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvpickve2gr.d; #lasx.txt xvpickve2gr.d mask=0x76efe000 #0x76efe000 0xfffff000 r0:5,x5:5,u10:2 ['reg0_5_s0', 'xreg5_5_s0', 'imm10_2_s0'] :xvpickve2gr.d RD, xrJ, imm10_2 is op12_31=0x76efe & RD & xrJ & imm10_2 { RD = xvpickve2gr.d(RD, xrJ, imm10_2:$(REGSIZE)); } define pcodeop xvpickve2gr.wu; #lasx.txt xvpickve2gr.wu mask=0x76f3c000 #0x76f3c000 0xffffe000 r0:5,x5:5,u10:3 ['reg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvpickve2gr.wu RD, xrJ, imm10_3 is op13_31=0x3b79e & RD & xrJ & imm10_3 { RD = xvpickve2gr.wu(RD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvpickve2gr.du; #lasx.txt xvpickve2gr.du mask=0x76f3e000 #0x76f3e000 0xfffff000 r0:5,x5:5,u10:2 ['reg0_5_s0', 'xreg5_5_s0', 'imm10_2_s0'] :xvpickve2gr.du RD, xrJ, imm10_2 is op12_31=0x76f3e & RD & xrJ & imm10_2 { RD = xvpickve2gr.du(RD, xrJ, imm10_2:$(REGSIZE)); } define pcodeop xvrepl128vei.b; #lasx.txt xvrepl128vei.b mask=0x76f78000 #0x76f78000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvrepl128vei.b xrD, xrJ, imm10_4 is op14_31=0x1dbde & xrD & xrJ & imm10_4 { xrD = xvrepl128vei.b(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvrepl128vei.h; #lasx.txt xvrepl128vei.h mask=0x76f7c000 #0x76f7c000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvrepl128vei.h xrD, xrJ, imm10_3 is op13_31=0x3b7be & xrD & xrJ & imm10_3 { xrD = xvrepl128vei.h(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvrepl128vei.w; #lasx.txt xvrepl128vei.w mask=0x76f7e000 #0x76f7e000 0xfffff000 x0:5,x5:5,u10:2 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_2_s0'] :xvrepl128vei.w xrD, xrJ, imm10_2 is op12_31=0x76f7e & xrD & xrJ & imm10_2 { xrD = xvrepl128vei.w(xrD, xrJ, imm10_2:$(REGSIZE)); } define pcodeop xvrepl128vei.d; #lasx.txt xvrepl128vei.d mask=0x76f7f000 #0x76f7f000 0xfffff800 x0:5,x5:5,u10:1 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_1_s0'] :xvrepl128vei.d xrD, xrJ, imm10_1 is op11_31=0xedefe & xrD & xrJ & imm10_1 { xrD = xvrepl128vei.d(xrD, xrJ, imm10_1:$(REGSIZE)); } define pcodeop xvinsve0.w; #lasx.txt xvinsve0.w mask=0x76ffc000 #0x76ffc000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvinsve0.w xrD, xrJ, imm10_3 is op13_31=0x3b7fe & xrD & xrJ & imm10_3 { xrD = xvinsve0.w(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvinsve0.d; #lasx.txt xvinsve0.d mask=0x76ffe000 #0x76ffe000 0xfffff000 x0:5,x5:5,u10:2 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_2_s0'] :xvinsve0.d xrD, xrJ, imm10_2 is op12_31=0x76ffe & xrD & xrJ & imm10_2 { xrD = xvinsve0.d(xrD, xrJ, imm10_2:$(REGSIZE)); } define pcodeop xvpickve.w; #lasx.txt xvpickve.w mask=0x7703c000 #0x7703c000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvpickve.w xrD, xrJ, imm10_3 is op13_31=0x3b81e & xrD & xrJ & imm10_3 { xrD = xvpickve.w(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvpickve.d; #lasx.txt xvpickve.d mask=0x7703e000 #0x7703e000 0xfffff000 x0:5,x5:5,u10:2 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_2_s0'] :xvpickve.d xrD, xrJ, imm10_2 is op12_31=0x7703e & xrD & xrJ & imm10_2 { xrD = xvpickve.d(xrD, xrJ, imm10_2:$(REGSIZE)); } define pcodeop xvreplve0.b; #lasx.txt xvreplve0.b mask=0x77070000 #0x77070000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvreplve0.b xrD, xrJ is op10_31=0x1dc1c0 & xrD & xrJ { xrD = xvreplve0.b(xrD, xrJ); } define pcodeop xvreplve0.h; #lasx.txt xvreplve0.h mask=0x77078000 #0x77078000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvreplve0.h xrD, xrJ is op10_31=0x1dc1e0 & xrD & xrJ { xrD = xvreplve0.h(xrD, xrJ); } define pcodeop xvreplve0.w; #lasx.txt xvreplve0.w mask=0x7707c000 #0x7707c000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvreplve0.w xrD, xrJ is op10_31=0x1dc1f0 & xrD & xrJ { xrD = xvreplve0.w(xrD, xrJ); } define pcodeop xvreplve0.d; #lasx.txt xvreplve0.d mask=0x7707e000 #0x7707e000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvreplve0.d xrD, xrJ is op10_31=0x1dc1f8 & xrD & xrJ { xrD = xvreplve0.d(xrD, xrJ); } define pcodeop xvreplve0.q; #lasx.txt xvreplve0.q mask=0x7707f000 #0x7707f000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvreplve0.q xrD, xrJ is op10_31=0x1dc1fc & xrD & xrJ { xrD = xvreplve0.q(xrD, xrJ); } define pcodeop xvsllwil.h.b; #lasx.txt xvsllwil.h.b mask=0x77082000 #0x77082000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvsllwil.h.b xrD, xrJ, imm10_3 is op13_31=0x3b841 & xrD & xrJ & imm10_3 { xrD = xvsllwil.h.b(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvsllwil.w.h; #lasx.txt xvsllwil.w.h mask=0x77084000 #0x77084000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvsllwil.w.h xrD, xrJ, imm10_4 is op14_31=0x1dc21 & xrD & xrJ & imm10_4 { xrD = xvsllwil.w.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvsllwil.d.w; #lasx.txt xvsllwil.d.w mask=0x77088000 #0x77088000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsllwil.d.w xrD, xrJ, imm10_5 is op15_31=0xee11 & xrD & xrJ & imm10_5 { xrD = xvsllwil.d.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvextl.q.d; #lasx.txt xvextl.q.d mask=0x77090000 #0x77090000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvextl.q.d xrD, xrJ is op10_31=0x1dc240 & xrD & xrJ { xrD = xvextl.q.d(xrD, xrJ); } define pcodeop xvsllwil.hu.bu; #lasx.txt xvsllwil.hu.bu mask=0x770c2000 #0x770c2000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvsllwil.hu.bu xrD, xrJ, imm10_3 is op13_31=0x3b861 & xrD & xrJ & imm10_3 { xrD = xvsllwil.hu.bu(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvsllwil.wu.hu; #lasx.txt xvsllwil.wu.hu mask=0x770c4000 #0x770c4000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvsllwil.wu.hu xrD, xrJ, imm10_4 is op14_31=0x1dc31 & xrD & xrJ & imm10_4 { xrD = xvsllwil.wu.hu(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvsllwil.du.wu; #lasx.txt xvsllwil.du.wu mask=0x770c8000 #0x770c8000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsllwil.du.wu xrD, xrJ, imm10_5 is op15_31=0xee19 & xrD & xrJ & imm10_5 { xrD = xvsllwil.du.wu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvextl.qu.du; #lasx.txt xvextl.qu.du mask=0x770d0000 #0x770d0000 0xfffffc00 x0:5,x5:5 ['xreg0_5_s0', 'xreg5_5_s0'] :xvextl.qu.du xrD, xrJ is op10_31=0x1dc340 & xrD & xrJ { xrD = xvextl.qu.du(xrD, xrJ); } define pcodeop xvbitclri.b; #lasx.txt xvbitclri.b mask=0x77102000 #0x77102000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvbitclri.b xrD, xrJ, imm10_3 is op13_31=0x3b881 & xrD & xrJ & imm10_3 { xrD = xvbitclri.b(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvbitclri.h; #lasx.txt xvbitclri.h mask=0x77104000 #0x77104000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvbitclri.h xrD, xrJ, imm10_4 is op14_31=0x1dc41 & xrD & xrJ & imm10_4 { xrD = xvbitclri.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvbitclri.w; #lasx.txt xvbitclri.w mask=0x77108000 #0x77108000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvbitclri.w xrD, xrJ, imm10_5 is op15_31=0xee21 & xrD & xrJ & imm10_5 { xrD = xvbitclri.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvbitclri.d; #lasx.txt xvbitclri.d mask=0x77110000 #0x77110000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvbitclri.d xrD, xrJ, imm10_6 is op16_31=0x7711 & xrD & xrJ & imm10_6 { xrD = xvbitclri.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvbitseti.b; #lasx.txt xvbitseti.b mask=0x77142000 #0x77142000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvbitseti.b xrD, xrJ, imm10_3 is op13_31=0x3b8a1 & xrD & xrJ & imm10_3 { xrD = xvbitseti.b(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvbitseti.h; #lasx.txt xvbitseti.h mask=0x77144000 #0x77144000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvbitseti.h xrD, xrJ, imm10_4 is op14_31=0x1dc51 & xrD & xrJ & imm10_4 { xrD = xvbitseti.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvbitseti.w; #lasx.txt xvbitseti.w mask=0x77148000 #0x77148000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvbitseti.w xrD, xrJ, imm10_5 is op15_31=0xee29 & xrD & xrJ & imm10_5 { xrD = xvbitseti.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvbitseti.d; #lasx.txt xvbitseti.d mask=0x77150000 #0x77150000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvbitseti.d xrD, xrJ, imm10_6 is op16_31=0x7715 & xrD & xrJ & imm10_6 { xrD = xvbitseti.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvbitrevi.b; #lasx.txt xvbitrevi.b mask=0x77182000 #0x77182000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvbitrevi.b xrD, xrJ, imm10_3 is op13_31=0x3b8c1 & xrD & xrJ & imm10_3 { xrD = xvbitrevi.b(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvbitrevi.h; #lasx.txt xvbitrevi.h mask=0x77184000 #0x77184000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvbitrevi.h xrD, xrJ, imm10_4 is op14_31=0x1dc61 & xrD & xrJ & imm10_4 { xrD = xvbitrevi.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvbitrevi.w; #lasx.txt xvbitrevi.w mask=0x77188000 #0x77188000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvbitrevi.w xrD, xrJ, imm10_5 is op15_31=0xee31 & xrD & xrJ & imm10_5 { xrD = xvbitrevi.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvbitrevi.d; #lasx.txt xvbitrevi.d mask=0x77190000 #0x77190000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvbitrevi.d xrD, xrJ, imm10_6 is op16_31=0x7719 & xrD & xrJ & imm10_6 { xrD = xvbitrevi.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvsat.b; #lasx.txt xvsat.b mask=0x77242000 #0x77242000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvsat.b xrD, xrJ, imm10_3 is op13_31=0x3b921 & xrD & xrJ & imm10_3 { xrD = xvsat.b(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvsat.h; #lasx.txt xvsat.h mask=0x77244000 #0x77244000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvsat.h xrD, xrJ, imm10_4 is op14_31=0x1dc91 & xrD & xrJ & imm10_4 { xrD = xvsat.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvsat.w; #lasx.txt xvsat.w mask=0x77248000 #0x77248000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsat.w xrD, xrJ, imm10_5 is op15_31=0xee49 & xrD & xrJ & imm10_5 { xrD = xvsat.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsat.d; #lasx.txt xvsat.d mask=0x77250000 #0x77250000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvsat.d xrD, xrJ, imm10_6 is op16_31=0x7725 & xrD & xrJ & imm10_6 { xrD = xvsat.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvsat.bu; #lasx.txt xvsat.bu mask=0x77282000 #0x77282000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvsat.bu xrD, xrJ, imm10_3 is op13_31=0x3b941 & xrD & xrJ & imm10_3 { xrD = xvsat.bu(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvsat.hu; #lasx.txt xvsat.hu mask=0x77284000 #0x77284000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvsat.hu xrD, xrJ, imm10_4 is op14_31=0x1dca1 & xrD & xrJ & imm10_4 { xrD = xvsat.hu(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvsat.wu; #lasx.txt xvsat.wu mask=0x77288000 #0x77288000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsat.wu xrD, xrJ, imm10_5 is op15_31=0xee51 & xrD & xrJ & imm10_5 { xrD = xvsat.wu(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsat.du; #lasx.txt xvsat.du mask=0x77290000 #0x77290000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvsat.du xrD, xrJ, imm10_6 is op16_31=0x7729 & xrD & xrJ & imm10_6 { xrD = xvsat.du(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvslli.b; #lasx.txt xvslli.b mask=0x772c2000 #0x772c2000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvslli.b xrD, xrJ, imm10_3 is op13_31=0x3b961 & xrD & xrJ & imm10_3 { xrD = xvslli.b(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvslli.h; #lasx.txt xvslli.h mask=0x772c4000 #0x772c4000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvslli.h xrD, xrJ, imm10_4 is op14_31=0x1dcb1 & xrD & xrJ & imm10_4 { xrD = xvslli.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvslli.w; #lasx.txt xvslli.w mask=0x772c8000 #0x772c8000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvslli.w xrD, xrJ, imm10_5 is op15_31=0xee59 & xrD & xrJ & imm10_5 { xrD = xvslli.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvslli.d; #lasx.txt xvslli.d mask=0x772d0000 #0x772d0000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvslli.d xrD, xrJ, imm10_6 is op16_31=0x772d & xrD & xrJ & imm10_6 { xrD = xvslli.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvsrli.b; #lasx.txt xvsrli.b mask=0x77302000 #0x77302000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvsrli.b xrD, xrJ, imm10_3 is op13_31=0x3b981 & xrD & xrJ & imm10_3 { xrD = xvsrli.b(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvsrli.h; #lasx.txt xvsrli.h mask=0x77304000 #0x77304000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvsrli.h xrD, xrJ, imm10_4 is op14_31=0x1dcc1 & xrD & xrJ & imm10_4 { xrD = xvsrli.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvsrli.w; #lasx.txt xvsrli.w mask=0x77308000 #0x77308000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsrli.w xrD, xrJ, imm10_5 is op15_31=0xee61 & xrD & xrJ & imm10_5 { xrD = xvsrli.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsrli.d; #lasx.txt xvsrli.d mask=0x77310000 #0x77310000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvsrli.d xrD, xrJ, imm10_6 is op16_31=0x7731 & xrD & xrJ & imm10_6 { xrD = xvsrli.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvsrai.b; #lasx.txt xvsrai.b mask=0x77342000 #0x77342000 0xffffe000 x0:5,x5:5,u10:3 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_3_s0'] :xvsrai.b xrD, xrJ, imm10_3 is op13_31=0x3b9a1 & xrD & xrJ & imm10_3 { xrD = xvsrai.b(xrD, xrJ, imm10_3:$(REGSIZE)); } define pcodeop xvsrai.h; #lasx.txt xvsrai.h mask=0x77344000 #0x77344000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvsrai.h xrD, xrJ, imm10_4 is op14_31=0x1dcd1 & xrD & xrJ & imm10_4 { xrD = xvsrai.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvsrai.w; #lasx.txt xvsrai.w mask=0x77348000 #0x77348000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsrai.w xrD, xrJ, imm10_5 is op15_31=0xee69 & xrD & xrJ & imm10_5 { xrD = xvsrai.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsrai.d; #lasx.txt xvsrai.d mask=0x77350000 #0x77350000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvsrai.d xrD, xrJ, imm10_6 is op16_31=0x7735 & xrD & xrJ & imm10_6 { xrD = xvsrai.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvsrlni.b.h; #lasx.txt xvsrlni.b.h mask=0x77404000 #0x77404000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvsrlni.b.h xrD, xrJ, imm10_4 is op14_31=0x1dd01 & xrD & xrJ & imm10_4 { xrD = xvsrlni.b.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvsrlni.h.w; #lasx.txt xvsrlni.h.w mask=0x77408000 #0x77408000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsrlni.h.w xrD, xrJ, imm10_5 is op15_31=0xee81 & xrD & xrJ & imm10_5 { xrD = xvsrlni.h.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsrlni.w.d; #lasx.txt xvsrlni.w.d mask=0x77410000 #0x77410000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvsrlni.w.d xrD, xrJ, imm10_6 is op16_31=0x7741 & xrD & xrJ & imm10_6 { xrD = xvsrlni.w.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvsrlni.d.q; #lasx.txt xvsrlni.d.q mask=0x77420000 #0x77420000 0xfffe0000 x0:5,x5:5,u10:7 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_7_s0'] :xvsrlni.d.q xrD, xrJ, imm10_7 is op17_31=0x3ba1 & xrD & xrJ & imm10_7 { xrD = xvsrlni.d.q(xrD, xrJ, imm10_7:$(REGSIZE)); } define pcodeop xvsrlrni.b.h; #lasx.txt xvsrlrni.b.h mask=0x77444000 #0x77444000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvsrlrni.b.h xrD, xrJ, imm10_4 is op14_31=0x1dd11 & xrD & xrJ & imm10_4 { xrD = xvsrlrni.b.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvsrlrni.h.w; #lasx.txt xvsrlrni.h.w mask=0x77448000 #0x77448000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsrlrni.h.w xrD, xrJ, imm10_5 is op15_31=0xee89 & xrD & xrJ & imm10_5 { xrD = xvsrlrni.h.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsrlrni.w.d; #lasx.txt xvsrlrni.w.d mask=0x77450000 #0x77450000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvsrlrni.w.d xrD, xrJ, imm10_6 is op16_31=0x7745 & xrD & xrJ & imm10_6 { xrD = xvsrlrni.w.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvsrlrni.d.q; #lasx.txt xvsrlrni.d.q mask=0x77460000 #0x77460000 0xfffe0000 x0:5,x5:5,u10:7 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_7_s0'] :xvsrlrni.d.q xrD, xrJ, imm10_7 is op17_31=0x3ba3 & xrD & xrJ & imm10_7 { xrD = xvsrlrni.d.q(xrD, xrJ, imm10_7:$(REGSIZE)); } define pcodeop xvssrlni.b.h; #lasx.txt xvssrlni.b.h mask=0x77484000 #0x77484000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvssrlni.b.h xrD, xrJ, imm10_4 is op14_31=0x1dd21 & xrD & xrJ & imm10_4 { xrD = xvssrlni.b.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvssrlni.h.w; #lasx.txt xvssrlni.h.w mask=0x77488000 #0x77488000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvssrlni.h.w xrD, xrJ, imm10_5 is op15_31=0xee91 & xrD & xrJ & imm10_5 { xrD = xvssrlni.h.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvssrlni.w.d; #lasx.txt xvssrlni.w.d mask=0x77490000 #0x77490000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvssrlni.w.d xrD, xrJ, imm10_6 is op16_31=0x7749 & xrD & xrJ & imm10_6 { xrD = xvssrlni.w.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvssrlni.d.q; #lasx.txt xvssrlni.d.q mask=0x774a0000 #0x774a0000 0xfffe0000 x0:5,x5:5,u10:7 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_7_s0'] :xvssrlni.d.q xrD, xrJ, imm10_7 is op17_31=0x3ba5 & xrD & xrJ & imm10_7 { xrD = xvssrlni.d.q(xrD, xrJ, imm10_7:$(REGSIZE)); } define pcodeop xvssrlni.bu.h; #lasx.txt xvssrlni.bu.h mask=0x774c4000 #0x774c4000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvssrlni.bu.h xrD, xrJ, imm10_4 is op14_31=0x1dd31 & xrD & xrJ & imm10_4 { xrD = xvssrlni.bu.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvssrlni.hu.w; #lasx.txt xvssrlni.hu.w mask=0x774c8000 #0x774c8000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvssrlni.hu.w xrD, xrJ, imm10_5 is op15_31=0xee99 & xrD & xrJ & imm10_5 { xrD = xvssrlni.hu.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvssrlni.wu.d; #lasx.txt xvssrlni.wu.d mask=0x774d0000 #0x774d0000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvssrlni.wu.d xrD, xrJ, imm10_6 is op16_31=0x774d & xrD & xrJ & imm10_6 { xrD = xvssrlni.wu.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvssrlni.du.q; #lasx.txt xvssrlni.du.q mask=0x774e0000 #0x774e0000 0xfffe0000 x0:5,x5:5,u10:7 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_7_s0'] :xvssrlni.du.q xrD, xrJ, imm10_7 is op17_31=0x3ba7 & xrD & xrJ & imm10_7 { xrD = xvssrlni.du.q(xrD, xrJ, imm10_7:$(REGSIZE)); } define pcodeop xvssrlrni.b.h; #lasx.txt xvssrlrni.b.h mask=0x77504000 #0x77504000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvssrlrni.b.h xrD, xrJ, imm10_4 is op14_31=0x1dd41 & xrD & xrJ & imm10_4 { xrD = xvssrlrni.b.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvssrlrni.h.w; #lasx.txt xvssrlrni.h.w mask=0x77508000 #0x77508000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvssrlrni.h.w xrD, xrJ, imm10_5 is op15_31=0xeea1 & xrD & xrJ & imm10_5 { xrD = xvssrlrni.h.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvssrlrni.w.d; #lasx.txt xvssrlrni.w.d mask=0x77510000 #0x77510000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvssrlrni.w.d xrD, xrJ, imm10_6 is op16_31=0x7751 & xrD & xrJ & imm10_6 { xrD = xvssrlrni.w.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvssrlrni.d.q; #lasx.txt xvssrlrni.d.q mask=0x77520000 #0x77520000 0xfffe0000 x0:5,x5:5,u10:7 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_7_s0'] :xvssrlrni.d.q xrD, xrJ, imm10_7 is op17_31=0x3ba9 & xrD & xrJ & imm10_7 { xrD = xvssrlrni.d.q(xrD, xrJ, imm10_7:$(REGSIZE)); } define pcodeop xvssrlrni.bu.h; #lasx.txt xvssrlrni.bu.h mask=0x77544000 #0x77544000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvssrlrni.bu.h xrD, xrJ, imm10_4 is op14_31=0x1dd51 & xrD & xrJ & imm10_4 { xrD = xvssrlrni.bu.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvssrlrni.hu.w; #lasx.txt xvssrlrni.hu.w mask=0x77548000 #0x77548000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvssrlrni.hu.w xrD, xrJ, imm10_5 is op15_31=0xeea9 & xrD & xrJ & imm10_5 { xrD = xvssrlrni.hu.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvssrlrni.wu.d; #lasx.txt xvssrlrni.wu.d mask=0x77550000 #0x77550000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvssrlrni.wu.d xrD, xrJ, imm10_6 is op16_31=0x7755 & xrD & xrJ & imm10_6 { xrD = xvssrlrni.wu.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvssrlrni.du.q; #lasx.txt xvssrlrni.du.q mask=0x77560000 #0x77560000 0xfffe0000 x0:5,x5:5,u10:7 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_7_s0'] :xvssrlrni.du.q xrD, xrJ, imm10_7 is op17_31=0x3bab & xrD & xrJ & imm10_7 { xrD = xvssrlrni.du.q(xrD, xrJ, imm10_7:$(REGSIZE)); } define pcodeop xvsrani.b.h; #lasx.txt xvsrani.b.h mask=0x77584000 #0x77584000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvsrani.b.h xrD, xrJ, imm10_4 is op14_31=0x1dd61 & xrD & xrJ & imm10_4 { xrD = xvsrani.b.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvsrani.h.w; #lasx.txt xvsrani.h.w mask=0x77588000 #0x77588000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsrani.h.w xrD, xrJ, imm10_5 is op15_31=0xeeb1 & xrD & xrJ & imm10_5 { xrD = xvsrani.h.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsrani.w.d; #lasx.txt xvsrani.w.d mask=0x77590000 #0x77590000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvsrani.w.d xrD, xrJ, imm10_6 is op16_31=0x7759 & xrD & xrJ & imm10_6 { xrD = xvsrani.w.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvsrani.d.q; #lasx.txt xvsrani.d.q mask=0x775a0000 #0x775a0000 0xfffe0000 x0:5,x5:5,u10:7 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_7_s0'] :xvsrani.d.q xrD, xrJ, imm10_7 is op17_31=0x3bad & xrD & xrJ & imm10_7 { xrD = xvsrani.d.q(xrD, xrJ, imm10_7:$(REGSIZE)); } define pcodeop xvsrarni.b.h; #lasx.txt xvsrarni.b.h mask=0x775c4000 #0x775c4000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvsrarni.b.h xrD, xrJ, imm10_4 is op14_31=0x1dd71 & xrD & xrJ & imm10_4 { xrD = xvsrarni.b.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvsrarni.h.w; #lasx.txt xvsrarni.h.w mask=0x775c8000 #0x775c8000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvsrarni.h.w xrD, xrJ, imm10_5 is op15_31=0xeeb9 & xrD & xrJ & imm10_5 { xrD = xvsrarni.h.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvsrarni.w.d; #lasx.txt xvsrarni.w.d mask=0x775d0000 #0x775d0000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvsrarni.w.d xrD, xrJ, imm10_6 is op16_31=0x775d & xrD & xrJ & imm10_6 { xrD = xvsrarni.w.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvsrarni.d.q; #lasx.txt xvsrarni.d.q mask=0x775e0000 #0x775e0000 0xfffe0000 x0:5,x5:5,u10:7 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_7_s0'] :xvsrarni.d.q xrD, xrJ, imm10_7 is op17_31=0x3baf & xrD & xrJ & imm10_7 { xrD = xvsrarni.d.q(xrD, xrJ, imm10_7:$(REGSIZE)); } define pcodeop xvssrani.b.h; #lasx.txt xvssrani.b.h mask=0x77604000 #0x77604000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvssrani.b.h xrD, xrJ, imm10_4 is op14_31=0x1dd81 & xrD & xrJ & imm10_4 { xrD = xvssrani.b.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvssrani.h.w; #lasx.txt xvssrani.h.w mask=0x77608000 #0x77608000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvssrani.h.w xrD, xrJ, imm10_5 is op15_31=0xeec1 & xrD & xrJ & imm10_5 { xrD = xvssrani.h.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvssrani.w.d; #lasx.txt xvssrani.w.d mask=0x77610000 #0x77610000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvssrani.w.d xrD, xrJ, imm10_6 is op16_31=0x7761 & xrD & xrJ & imm10_6 { xrD = xvssrani.w.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvssrani.d.q; #lasx.txt xvssrani.d.q mask=0x77620000 #0x77620000 0xfffe0000 x0:5,x5:5,u10:7 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_7_s0'] :xvssrani.d.q xrD, xrJ, imm10_7 is op17_31=0x3bb1 & xrD & xrJ & imm10_7 { xrD = xvssrani.d.q(xrD, xrJ, imm10_7:$(REGSIZE)); } define pcodeop xvssrani.bu.h; #lasx.txt xvssrani.bu.h mask=0x77644000 #0x77644000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvssrani.bu.h xrD, xrJ, imm10_4 is op14_31=0x1dd91 & xrD & xrJ & imm10_4 { xrD = xvssrani.bu.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvssrani.hu.w; #lasx.txt xvssrani.hu.w mask=0x77648000 #0x77648000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvssrani.hu.w xrD, xrJ, imm10_5 is op15_31=0xeec9 & xrD & xrJ & imm10_5 { xrD = xvssrani.hu.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvssrani.wu.d; #lasx.txt xvssrani.wu.d mask=0x77650000 #0x77650000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvssrani.wu.d xrD, xrJ, imm10_6 is op16_31=0x7765 & xrD & xrJ & imm10_6 { xrD = xvssrani.wu.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvssrani.du.q; #lasx.txt xvssrani.du.q mask=0x77660000 #0x77660000 0xfffe0000 x0:5,x5:5,u10:7 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_7_s0'] :xvssrani.du.q xrD, xrJ, imm10_7 is op17_31=0x3bb3 & xrD & xrJ & imm10_7 { xrD = xvssrani.du.q(xrD, xrJ, imm10_7:$(REGSIZE)); } define pcodeop xvssrarni.b.h; #lasx.txt xvssrarni.b.h mask=0x77684000 #0x77684000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvssrarni.b.h xrD, xrJ, imm10_4 is op14_31=0x1dda1 & xrD & xrJ & imm10_4 { xrD = xvssrarni.b.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvssrarni.h.w; #lasx.txt xvssrarni.h.w mask=0x77688000 #0x77688000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvssrarni.h.w xrD, xrJ, imm10_5 is op15_31=0xeed1 & xrD & xrJ & imm10_5 { xrD = xvssrarni.h.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvssrarni.w.d; #lasx.txt xvssrarni.w.d mask=0x77690000 #0x77690000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvssrarni.w.d xrD, xrJ, imm10_6 is op16_31=0x7769 & xrD & xrJ & imm10_6 { xrD = xvssrarni.w.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvssrarni.d.q; #lasx.txt xvssrarni.d.q mask=0x776a0000 #0x776a0000 0xfffe0000 x0:5,x5:5,u10:7 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_7_s0'] :xvssrarni.d.q xrD, xrJ, imm10_7 is op17_31=0x3bb5 & xrD & xrJ & imm10_7 { xrD = xvssrarni.d.q(xrD, xrJ, imm10_7:$(REGSIZE)); } define pcodeop xvssrarni.bu.h; #lasx.txt xvssrarni.bu.h mask=0x776c4000 #0x776c4000 0xffffc000 x0:5,x5:5,u10:4 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_4_s0'] :xvssrarni.bu.h xrD, xrJ, imm10_4 is op14_31=0x1ddb1 & xrD & xrJ & imm10_4 { xrD = xvssrarni.bu.h(xrD, xrJ, imm10_4:$(REGSIZE)); } define pcodeop xvssrarni.hu.w; #lasx.txt xvssrarni.hu.w mask=0x776c8000 #0x776c8000 0xffff8000 x0:5,x5:5,u10:5 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_5_s0'] :xvssrarni.hu.w xrD, xrJ, imm10_5 is op15_31=0xeed9 & xrD & xrJ & imm10_5 { xrD = xvssrarni.hu.w(xrD, xrJ, imm10_5:$(REGSIZE)); } define pcodeop xvssrarni.wu.d; #lasx.txt xvssrarni.wu.d mask=0x776d0000 #0x776d0000 0xffff0000 x0:5,x5:5,u10:6 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_6_s0'] :xvssrarni.wu.d xrD, xrJ, imm10_6 is op16_31=0x776d & xrD & xrJ & imm10_6 { xrD = xvssrarni.wu.d(xrD, xrJ, imm10_6:$(REGSIZE)); } define pcodeop xvssrarni.du.q; #lasx.txt xvssrarni.du.q mask=0x776e0000 #0x776e0000 0xfffe0000 x0:5,x5:5,u10:7 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_7_s0'] :xvssrarni.du.q xrD, xrJ, imm10_7 is op17_31=0x3bb7 & xrD & xrJ & imm10_7 { xrD = xvssrarni.du.q(xrD, xrJ, imm10_7:$(REGSIZE)); } define pcodeop xvextrins.d; #lasx.txt xvextrins.d mask=0x77800000 #0x77800000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvextrins.d xrD, xrJ, imm10_8 is op18_31=0x1de0 & xrD & xrJ & imm10_8 { xrD = xvextrins.d(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvextrins.w; #lasx.txt xvextrins.w mask=0x77840000 #0x77840000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvextrins.w xrD, xrJ, imm10_8 is op18_31=0x1de1 & xrD & xrJ & imm10_8 { xrD = xvextrins.w(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvextrins.h; #lasx.txt xvextrins.h mask=0x77880000 #0x77880000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvextrins.h xrD, xrJ, imm10_8 is op18_31=0x1de2 & xrD & xrJ & imm10_8 { xrD = xvextrins.h(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvextrins.b; #lasx.txt xvextrins.b mask=0x778c0000 #0x778c0000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvextrins.b xrD, xrJ, imm10_8 is op18_31=0x1de3 & xrD & xrJ & imm10_8 { xrD = xvextrins.b(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvshuf4i.b; #lasx.txt xvshuf4i.b mask=0x77900000 #0x77900000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvshuf4i.b xrD, xrJ, imm10_8 is op18_31=0x1de4 & xrD & xrJ & imm10_8 { xrD = xvshuf4i.b(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvshuf4i.h; #lasx.txt xvshuf4i.h mask=0x77940000 #0x77940000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvshuf4i.h xrD, xrJ, imm10_8 is op18_31=0x1de5 & xrD & xrJ & imm10_8 { xrD = xvshuf4i.h(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvshuf4i.w; #lasx.txt xvshuf4i.w mask=0x77980000 #0x77980000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvshuf4i.w xrD, xrJ, imm10_8 is op18_31=0x1de6 & xrD & xrJ & imm10_8 { xrD = xvshuf4i.w(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvshuf4i.d; #lasx.txt xvshuf4i.d mask=0x779c0000 #0x779c0000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvshuf4i.d xrD, xrJ, imm10_8 is op18_31=0x1de7 & xrD & xrJ & imm10_8 { xrD = xvshuf4i.d(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvbitseli.b; #lasx.txt xvbitseli.b mask=0x77c40000 #0x77c40000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvbitseli.b xrD, xrJ, imm10_8 is op18_31=0x1df1 & xrD & xrJ & imm10_8 { xrD = xvbitseli.b(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvandi.b; #lasx.txt xvandi.b mask=0x77d00000 #0x77d00000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvandi.b xrD, xrJ, imm10_8 is op18_31=0x1df4 & xrD & xrJ & imm10_8 { xrD = xvandi.b(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvori.b; #lasx.txt xvori.b mask=0x77d40000 #0x77d40000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvori.b xrD, xrJ, imm10_8 is op18_31=0x1df5 & xrD & xrJ & imm10_8 { xrD = xvori.b(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvxori.b; #lasx.txt xvxori.b mask=0x77d80000 #0x77d80000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvxori.b xrD, xrJ, imm10_8 is op18_31=0x1df6 & xrD & xrJ & imm10_8 { xrD = xvxori.b(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvnori.b; #lasx.txt xvnori.b mask=0x77dc0000 #0x77dc0000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvnori.b xrD, xrJ, imm10_8 is op18_31=0x1df7 & xrD & xrJ & imm10_8 { xrD = xvnori.b(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvldi; #lasx.txt xvldi mask=0x77e00000 #0x77e00000 0xfffc0000 x0:5,s5:13 ['xreg0_5_s0', 'simm5_13_s0'] :xvldi xrD,simm5_13 is op18_31=0x1df8 & xrD & simm5_13 { xrD = xvldi(xrD, simm5_13:$(REGSIZE)); } define pcodeop xvpermi.w; #lasx.txt xvpermi.w mask=0x77e40000 #0x77e40000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvpermi.w xrD, xrJ, imm10_8 is op18_31=0x1df9 & xrD & xrJ & imm10_8 { xrD = xvpermi.w(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvpermi.d; #lasx.txt xvpermi.d mask=0x77e80000 #0x77e80000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvpermi.d xrD, xrJ, imm10_8 is op18_31=0x1dfa & xrD & xrJ & imm10_8 { xrD = xvpermi.d(xrD, xrJ, imm10_8:$(REGSIZE)); } define pcodeop xvpermi.q; #lasx.txt xvpermi.q mask=0x77ec0000 #0x77ec0000 0xfffc0000 x0:5,x5:5,u10:8 ['xreg0_5_s0', 'xreg5_5_s0', 'imm10_8_s0'] :xvpermi.q xrD, xrJ, imm10_8 is op18_31=0x1dfb & xrD & xrJ & imm10_8 { xrD = xvpermi.q(xrD, xrJ, imm10_8:$(REGSIZE)); } ================================================ FILE: pypcode/processors/Loongarch/data/languages/lbt.sinc ================================================ define pcodeop movgr2scr; #lbt.txt movgr2scr mask=0x00000800 [@lbt] #0x00000800 0xfffffc1c cr0:2, r5:5 ['scr0_2_s0', 'reg5_5_s0'] :movgr2scr lbtrD, RJ is op10_31=0x2 & op2_4=0x0 & lbtrD & RJ { movgr2scr(lbtrD:1, RJ); } define pcodeop movscr2gr; #lbt.txt movscr2gr mask=0x00000c00 [@lbt] #0x00000c00 0xffffff80 r0:5,cr5:2 ['reg0_5_s0', 'scr5_2_s0'] :movscr2gr RD, lbtrJ is op7_31=0x18 & RD & lbtrJ { RD = movscr2gr(RD, lbtrJ:1); } define pcodeop x86mttop; #lbt.txt x86mttop mask=0x00007000 [@lbt] #0x00007000 0xffffff1f u5:3 ['imm5_3_s0'] :x86mttop imm5_3 is op8_31=0x70 & op0_4=0x0 & imm5_3 { x86mttop(imm5_3:$(REGSIZE)); } define pcodeop x86mftop; #lbt.txt x86mftop mask=0x00007400 [@lbt] #0x00007400 0xffffffe0 r0:5 ['reg0_5_s0'] :x86mftop RD is op5_31=0x3a0 & RD { RD = x86mftop(RD); } define pcodeop setx86loope; #lbt.txt x86setloope mask=0x00007800 [@lbt, @orig_name=setx86loope] #0x00007800 0xfffffc00 r0:5, r5:5 ['reg0_5_s0', 'reg5_5_s0'] :setx86loope RD, RJ is op10_31=0x1e & RD & RJ { RD = setx86loope(RD, RJ); } define pcodeop setx86loopne; #lbt.txt x86setloopne mask=0x00007c00 [@lbt, @orig_name=setx86loopne] #0x00007c00 0xfffffc00 r0:5, r5:5 ['reg0_5_s0', 'reg5_5_s0'] :setx86loopne RD, RJ is op10_31=0x1f & RD & RJ { RD = setx86loopne(RD, RJ); } define pcodeop x86inc.b; #lbt.txt x86inc.b mask=0x00008000 [@lbt] #0x00008000 0xfffffc1f r5:5 ['reg5_5_s0'] :x86inc.b RJ is op10_31=0x20 & op0_4=0x0 & RJ { RJ = x86inc.b(RJ); } define pcodeop x86inc.h; #lbt.txt x86inc.h mask=0x00008001 [@lbt] #0x00008001 0xfffffc1f r5:5 ['reg5_5_s0'] :x86inc.h RJ is op10_31=0x20 & op0_4=0x1 & RJ { RJ = x86inc.h(RJ); } define pcodeop x86inc.w; #lbt.txt x86inc.w mask=0x00008002 [@lbt] #0x00008002 0xfffffc1f r5:5 ['reg5_5_s0'] :x86inc.w RJ is op10_31=0x20 & op0_4=0x2 & RJ { RJ = x86inc.w(RJ); } define pcodeop x86inc.d; #lbt.txt x86inc.d mask=0x00008003 [@lbt] #0x00008003 0xfffffc1f r5:5 ['reg5_5_s0'] :x86inc.d RJ is op10_31=0x20 & op0_4=0x3 & RJ { RJ = x86inc.d(RJ); } define pcodeop x86dec.b; #lbt.txt x86dec.b mask=0x00008004 [@lbt] #0x00008004 0xfffffc1f r5:5 ['reg5_5_s0'] :x86dec.b RJ is op10_31=0x20 & op0_4=0x4 & RJ { RJ = x86dec.b(RJ); } define pcodeop x86dec.h; #lbt.txt x86dec.h mask=0x00008005 [@lbt] #0x00008005 0xfffffc1f r5:5 ['reg5_5_s0'] :x86dec.h RJ is op10_31=0x20 & op0_4=0x5 & RJ { RJ = x86dec.h(RJ); } define pcodeop x86dec.w; #lbt.txt x86dec.w mask=0x00008006 [@lbt] #0x00008006 0xfffffc1f r5:5 ['reg5_5_s0'] :x86dec.w RJ is op10_31=0x20 & op0_4=0x6 & RJ { RJ = x86dec.w(RJ); } define pcodeop x86dec.d; #lbt.txt x86dec.d mask=0x00008007 [@lbt] #0x00008007 0xfffffc1f r5:5 ['reg5_5_s0'] :x86dec.d RJ is op10_31=0x20 & op0_4=0x7 & RJ { RJ = x86dec.d(RJ); } define pcodeop x86settm; #lbt.txt x86settm mask=0x00008008 [@lbt] #0x00008008 0xffffffff :x86settm is instword=0x00008008 { x86settm(); } define pcodeop x86inctop; #lbt.txt x86inctop mask=0x00008009 [@lbt] #0x00008009 0xffffffff :x86inctop is instword=0x00008009 { x86inctop(); } define pcodeop x86clrtm; #lbt.txt x86clrtm mask=0x00008028 [@lbt] #0x00008028 0xffffffff :x86clrtm is instword=0x00008028 { x86clrtm(); } define pcodeop x86dectop; #lbt.txt x86dectop mask=0x00008029 [@lbt] #0x00008029 0xffffffff :x86dectop is instword=0x00008029 { x86dectop(); } define pcodeop rotr.b; #lbt.txt rotr.b mask=0x001a0000 [@lbt] #0x001a0000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :rotr.b RD, RJ, RK is op15_31=0x34 & RD & RJ & RK { RD = rotr.b(RD, RJ, RK); } define pcodeop rotr.h; #lbt.txt rotr.h mask=0x001a8000 [@lbt] #0x001a8000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :rotr.h RD, RJ, RK is op15_31=0x35 & RD & RJ & RK { RD = rotr.h(RD, RJ, RK); } define pcodeop addu12i.w; #lbt.txt addu12i.w mask=0x00290000 [@lbt] #0x00290000 0xffff8000 r0:5, r5:5, s10:5 ['reg0_5_s0', 'reg5_5_s0', 'simm10_5_s0'] :addu12i.w RD, RJ, simm10_5 is op15_31=0x52 & RD & RJ & simm10_5 { RD = addu12i.w(RD, RJ, simm10_5:$(REGSIZE)); } define pcodeop addu12i.d; #lbt.txt addu12i.d mask=0x00298000 [@lbt] #0x00298000 0xffff8000 r0:5, r5:5, s10:5 ['reg0_5_s0', 'reg5_5_s0', 'simm10_5_s0'] :addu12i.d RD, RJ, simm10_5 is op15_31=0x53 & RD & RJ & simm10_5 { RD = addu12i.d(RD, RJ, simm10_5:$(REGSIZE)); } define pcodeop adc.b; #lbt.txt adc.b mask=0x00300000 [@lbt] #0x00300000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :adc.b RD, RJ, RK is op15_31=0x60 & RD & RJ & RK { RD = adc.b(RD, RJ, RK); } define pcodeop adc.h; #lbt.txt adc.h mask=0x00308000 [@lbt] #0x00308000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :adc.h RD, RJ, RK is op15_31=0x61 & RD & RJ & RK { RD = adc.h(RD, RJ, RK); } define pcodeop adc.w; #lbt.txt adc.w mask=0x00310000 [@lbt] #0x00310000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :adc.w RD, RJ, RK is op15_31=0x62 & RD & RJ & RK { RD = adc.w(RD, RJ, RK); } define pcodeop adc.d; #lbt.txt adc.d mask=0x00318000 [@lbt] #0x00318000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :adc.d RD, RJ, RK is op15_31=0x63 & RD & RJ & RK { RD = adc.d(RD, RJ, RK); } define pcodeop sbc.b; #lbt.txt sbc.b mask=0x00320000 [@lbt] #0x00320000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :sbc.b RD, RJ, RK is op15_31=0x64 & RD & RJ & RK { RD = sbc.b(RD, RJ, RK); } define pcodeop sbc.h; #lbt.txt sbc.h mask=0x00328000 [@lbt] #0x00328000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :sbc.h RD, RJ, RK is op15_31=0x65 & RD & RJ & RK { RD = sbc.h(RD, RJ, RK); } define pcodeop sbc.w; #lbt.txt sbc.w mask=0x00330000 [@lbt] #0x00330000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :sbc.w RD, RJ, RK is op15_31=0x66 & RD & RJ & RK { RD = sbc.w(RD, RJ, RK); } define pcodeop sbc.d; #lbt.txt sbc.d mask=0x00338000 [@lbt] #0x00338000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :sbc.d RD, RJ, RK is op15_31=0x67 & RD & RJ & RK { RD = sbc.d(RD, RJ, RK); } define pcodeop rcr.b; #lbt.txt rcr.b mask=0x00340000 [@lbt] #0x00340000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :rcr.b RD, RJ, RK is op15_31=0x68 & RD & RJ & RK { RD = rcr.b(RD, RJ, RK); } define pcodeop rcr.h; #lbt.txt rcr.h mask=0x00348000 [@lbt] #0x00348000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :rcr.h RD, RJ, RK is op15_31=0x69 & RD & RJ & RK { RD = rcr.h(RD, RJ, RK); } define pcodeop rcr.w; #lbt.txt rcr.w mask=0x00350000 [@lbt] #0x00350000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :rcr.w RD, RJ, RK is op15_31=0x6a & RD & RJ & RK { RD = rcr.w(RD, RJ, RK); } define pcodeop rcr.d; #lbt.txt rcr.d mask=0x00358000 [@lbt] #0x00358000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :rcr.d RD, RJ, RK is op15_31=0x6b & RD & RJ & RK { RD = rcr.d(RD, RJ, RK); } define pcodeop armmove; #lbt.txt armmove mask=0x00364000 [@lbt] #0x00364000 0xffffc000 r0:5, r5:5,u10:4 ['reg0_5_s0', 'reg5_5_s0', 'imm10_4_s0'] :armmove RD, RJ, imm10_4 is op14_31=0xd9 & RD & RJ & imm10_4 { RD = armmove(RD, RJ, imm10_4:$(REGSIZE)); } define pcodeop setx86j; #lbt.txt x86setj mask=0x00368000 [@lbt, @orig_name=setx86j] #0x00368000 0xffffc3e0 r0:5,u10:4 ['reg0_5_s0', 'imm10_4_s0'] :setx86j RD, imm10_4 is op14_31=0xda & op5_9=0x0 & RD & imm10_4 { RD = setx86j(RD, imm10_4:$(REGSIZE)); } define pcodeop setarmj; #lbt.txt armsetj mask=0x0036c000 [@lbt, @orig_name=setarmj] #0x0036c000 0xffffc3e0 r0:5,u10:4 ['reg0_5_s0', 'imm10_4_s0'] :setarmj RD, imm10_4 is op14_31=0xdb & op5_9=0x0 & RD & imm10_4 { RD = setarmj(RD, imm10_4:$(REGSIZE)); } define pcodeop armadd.w; #lbt.txt armadd.w mask=0x00370010 [@lbt] #0x00370010 0xffff8010 r5:5, r10:5,u0:4 ['reg5_5_s0', 'reg10_5_s0', 'imm0_4_s0'] :armadd.w RJ, RK, imm0_4 is op15_31=0x6e & op4_4=0x1 & RJ & RK & imm0_4 { RJ = armadd.w(RJ, RK, imm0_4:$(REGSIZE)); } define pcodeop armsub.w; #lbt.txt armsub.w mask=0x00378010 [@lbt] #0x00378010 0xffff8010 r5:5, r10:5,u0:4 ['reg5_5_s0', 'reg10_5_s0', 'imm0_4_s0'] :armsub.w RJ, RK, imm0_4 is op15_31=0x6f & op4_4=0x1 & RJ & RK & imm0_4 { RJ = armsub.w(RJ, RK, imm0_4:$(REGSIZE)); } define pcodeop armadc.w; #lbt.txt armadc.w mask=0x00380010 [@lbt] #0x00380010 0xffff8010 r5:5, r10:5,u0:4 ['reg5_5_s0', 'reg10_5_s0', 'imm0_4_s0'] :armadc.w RJ, RK, imm0_4 is op15_31=0x70 & op4_4=0x1 & RJ & RK & imm0_4 { RJ = armadc.w(RJ, RK, imm0_4:$(REGSIZE)); } define pcodeop armsbc.w; #lbt.txt armsbc.w mask=0x00388010 [@lbt] #0x00388010 0xffff8010 r5:5, r10:5,u0:4 ['reg5_5_s0', 'reg10_5_s0', 'imm0_4_s0'] :armsbc.w RJ, RK, imm0_4 is op15_31=0x71 & op4_4=0x1 & RJ & RK & imm0_4 { RJ = armsbc.w(RJ, RK, imm0_4:$(REGSIZE)); } define pcodeop armand.w; #lbt.txt armand.w mask=0x00390010 [@lbt] #0x00390010 0xffff8010 r5:5, r10:5,u0:4 ['reg5_5_s0', 'reg10_5_s0', 'imm0_4_s0'] :armand.w RJ, RK, imm0_4 is op15_31=0x72 & op4_4=0x1 & RJ & RK & imm0_4 { RJ = armand.w(RJ, RK, imm0_4:$(REGSIZE)); } define pcodeop armor.w; #lbt.txt armor.w mask=0x00398010 [@lbt] #0x00398010 0xffff8010 r5:5, r10:5,u0:4 ['reg5_5_s0', 'reg10_5_s0', 'imm0_4_s0'] :armor.w RJ, RK, imm0_4 is op15_31=0x73 & op4_4=0x1 & RJ & RK & imm0_4 { RJ = armor.w(RJ, RK, imm0_4:$(REGSIZE)); } define pcodeop armxor.w; #lbt.txt armxor.w mask=0x003a0010 [@lbt] #0x003a0010 0xffff8010 r5:5, r10:5,u0:4 ['reg5_5_s0', 'reg10_5_s0', 'imm0_4_s0'] :armxor.w RJ, RK, imm0_4 is op15_31=0x74 & op4_4=0x1 & RJ & RK & imm0_4 { RJ = armxor.w(RJ, RK, imm0_4:$(REGSIZE)); } define pcodeop armsll.w; #lbt.txt armsll.w mask=0x003a8010 [@lbt] #0x003a8010 0xffff8010 r5:5, r10:5,u0:4 ['reg5_5_s0', 'reg10_5_s0', 'imm0_4_s0'] :armsll.w RJ, RK, imm0_4 is op15_31=0x75 & op4_4=0x1 & RJ & RK & imm0_4 { RJ = armsll.w(RJ, RK, imm0_4:$(REGSIZE)); } define pcodeop armsrl.w; #lbt.txt armsrl.w mask=0x003b0010 [@lbt] #0x003b0010 0xffff8010 r5:5, r10:5,u0:4 ['reg5_5_s0', 'reg10_5_s0', 'imm0_4_s0'] :armsrl.w RJ, RK, imm0_4 is op15_31=0x76 & op4_4=0x1 & RJ & RK & imm0_4 { RJ = armsrl.w(RJ, RK, imm0_4:$(REGSIZE)); } define pcodeop armsra.w; #lbt.txt armsra.w mask=0x003b8010 [@lbt] #0x003b8010 0xffff8010 r5:5, r10:5,u0:4 ['reg5_5_s0', 'reg10_5_s0', 'imm0_4_s0'] :armsra.w RJ, RK, imm0_4 is op15_31=0x77 & op4_4=0x1 & RJ & RK & imm0_4 { RJ = armsra.w(RJ, RK, imm0_4:$(REGSIZE)); } define pcodeop armrotr.w; #lbt.txt armrotr.w mask=0x003c0010 [@lbt] #0x003c0010 0xffff8010 r5:5, r10:5,u0:4 ['reg5_5_s0', 'reg10_5_s0', 'imm0_4_s0'] :armrotr.w RJ, RK, imm0_4 is op15_31=0x78 & op4_4=0x1 & RJ & RK & imm0_4 { RJ = armrotr.w(RJ, RK, imm0_4:$(REGSIZE)); } define pcodeop armslli.w; #lbt.txt armslli.w mask=0x003c8010 [@lbt, @orig_fmt=JUk5Ud4] #0x003c8010 0xffff8010 r5:5,u10:5,u0:4 ['reg5_5_s0', 'imm10_5_s0', 'imm0_4_s0'] :armslli.w RJ, imm0_4, imm10_5 is op15_31=0x79 & op4_4=0x1 & RJ & imm0_4 & imm10_5 { RJ = armslli.w(RJ, imm0_4:$(REGSIZE), imm10_5:$(REGSIZE)); } define pcodeop armsrli.w; #lbt.txt armsrli.w mask=0x003d0010 [@lbt, @orig_fmt=JUk5Ud4] #0x003d0010 0xffff8010 r5:5,u10:5,u0:4 ['reg5_5_s0', 'imm10_5_s0', 'imm0_4_s0'] :armsrli.w RJ, imm0_4, imm10_5 is op15_31=0x7a & op4_4=0x1 & RJ & imm0_4 & imm10_5 { RJ = armsrli.w(RJ, imm0_4:$(REGSIZE), imm10_5:$(REGSIZE)); } define pcodeop armsrai.w; #lbt.txt armsrai.w mask=0x003d8010 [@lbt, @orig_fmt=JUk5Ud4] #0x003d8010 0xffff8010 r5:5,u10:5,u0:4 ['reg5_5_s0', 'imm10_5_s0', 'imm0_4_s0'] :armsrai.w RJ, imm0_4, imm10_5 is op15_31=0x7b & op4_4=0x1 & RJ & imm0_4 & imm10_5 { RJ = armsrai.w(RJ, imm0_4:$(REGSIZE), imm10_5:$(REGSIZE)); } define pcodeop armrotri.w; #lbt.txt armrotri.w mask=0x003e0010 [@lbt, @orig_fmt=JUk5Ud4] #0x003e0010 0xffff8010 r5:5,u10:5,u0:4 ['reg5_5_s0', 'imm10_5_s0', 'imm0_4_s0'] :armrotri.w RJ, imm0_4, imm10_5 is op15_31=0x7c & op4_4=0x1 & RJ & imm0_4 & imm10_5 { RJ = armrotri.w(RJ, imm0_4:$(REGSIZE), imm10_5:$(REGSIZE)); } define pcodeop x86mul.b; #lbt.txt x86mul.b mask=0x003e8000 [@lbt] #0x003e8000 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86mul.b RJ, RK is op15_31=0x7d & op0_4=0x0 & RJ & RK { RJ = x86mul.b(RJ, RK); } define pcodeop x86mul.h; #lbt.txt x86mul.h mask=0x003e8001 [@lbt] #0x003e8001 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86mul.h RJ, RK is op15_31=0x7d & op0_4=0x1 & RJ & RK { RJ = x86mul.h(RJ, RK); } define pcodeop x86mul.w; #lbt.txt x86mul.w mask=0x003e8002 [@lbt] #0x003e8002 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86mul.w RJ, RK is op15_31=0x7d & op0_4=0x2 & RJ & RK { RJ = x86mul.w(RJ, RK); } define pcodeop x86mul.d; #lbt.txt x86mul.d mask=0x003e8003 [@lbt] #0x003e8003 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86mul.d RJ, RK is op15_31=0x7d & op0_4=0x3 & RJ & RK { RJ = x86mul.d(RJ, RK); } define pcodeop x86mul.bu; #lbt.txt x86mul.bu mask=0x003e8004 [@lbt] #0x003e8004 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86mul.bu RJ, RK is op15_31=0x7d & op0_4=0x4 & RJ & RK { RJ = x86mul.bu(RJ, RK); } define pcodeop x86mul.hu; #lbt.txt x86mul.hu mask=0x003e8005 [@lbt] #0x003e8005 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86mul.hu RJ, RK is op15_31=0x7d & op0_4=0x5 & RJ & RK { RJ = x86mul.hu(RJ, RK); } define pcodeop x86mul.wu; #lbt.txt x86mul.wu mask=0x003e8006 [@lbt] #0x003e8006 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86mul.wu RJ, RK is op15_31=0x7d & op0_4=0x6 & RJ & RK { RJ = x86mul.wu(RJ, RK); } define pcodeop x86mul.du; #lbt.txt x86mul.du mask=0x003e8007 [@lbt] #0x003e8007 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86mul.du RJ, RK is op15_31=0x7d & op0_4=0x7 & RJ & RK { RJ = x86mul.du(RJ, RK); } define pcodeop x86add.wu; #lbt.txt x86add.wu mask=0x003f0000 [@lbt] #0x003f0000 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86add.wu RJ, RK is op15_31=0x7e & op0_4=0x0 & RJ & RK { RJ = x86add.wu(RJ, RK); } define pcodeop x86add.du; #lbt.txt x86add.du mask=0x003f0001 [@lbt] #0x003f0001 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86add.du RJ, RK is op15_31=0x7e & op0_4=0x1 & RJ & RK { RJ = x86add.du(RJ, RK); } define pcodeop x86sub.wu; #lbt.txt x86sub.wu mask=0x003f0002 [@lbt] #0x003f0002 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sub.wu RJ, RK is op15_31=0x7e & op0_4=0x2 & RJ & RK { RJ = x86sub.wu(RJ, RK); } define pcodeop x86sub.du; #lbt.txt x86sub.du mask=0x003f0003 [@lbt] #0x003f0003 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sub.du RJ, RK is op15_31=0x7e & op0_4=0x3 & RJ & RK { RJ = x86sub.du(RJ, RK); } define pcodeop x86add.b; #lbt.txt x86add.b mask=0x003f0004 [@lbt] #0x003f0004 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86add.b RJ, RK is op15_31=0x7e & op0_4=0x4 & RJ & RK { RJ = x86add.b(RJ, RK); } define pcodeop x86add.h; #lbt.txt x86add.h mask=0x003f0005 [@lbt] #0x003f0005 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86add.h RJ, RK is op15_31=0x7e & op0_4=0x5 & RJ & RK { RJ = x86add.h(RJ, RK); } define pcodeop x86add.w; #lbt.txt x86add.w mask=0x003f0006 [@lbt] #0x003f0006 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86add.w RJ, RK is op15_31=0x7e & op0_4=0x6 & RJ & RK { RJ = x86add.w(RJ, RK); } define pcodeop x86add.d; #lbt.txt x86add.d mask=0x003f0007 [@lbt] #0x003f0007 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86add.d RJ, RK is op15_31=0x7e & op0_4=0x7 & RJ & RK { RJ = x86add.d(RJ, RK); } define pcodeop x86sub.b; #lbt.txt x86sub.b mask=0x003f0008 [@lbt] #0x003f0008 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sub.b RJ, RK is op15_31=0x7e & op0_4=0x8 & RJ & RK { RJ = x86sub.b(RJ, RK); } define pcodeop x86sub.h; #lbt.txt x86sub.h mask=0x003f0009 [@lbt] #0x003f0009 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sub.h RJ, RK is op15_31=0x7e & op0_4=0x9 & RJ & RK { RJ = x86sub.h(RJ, RK); } define pcodeop x86sub.w; #lbt.txt x86sub.w mask=0x003f000a [@lbt] #0x003f000a 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sub.w RJ, RK is op15_31=0x7e & op0_4=0xa & RJ & RK { RJ = x86sub.w(RJ, RK); } define pcodeop x86sub.d; #lbt.txt x86sub.d mask=0x003f000b [@lbt] #0x003f000b 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sub.d RJ, RK is op15_31=0x7e & op0_4=0xb & RJ & RK { RJ = x86sub.d(RJ, RK); } define pcodeop x86adc.b; #lbt.txt x86adc.b mask=0x003f000c [@lbt] #0x003f000c 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86adc.b RJ, RK is op15_31=0x7e & op0_4=0xc & RJ & RK { RJ = x86adc.b(RJ, RK); } define pcodeop x86adc.h; #lbt.txt x86adc.h mask=0x003f000d [@lbt] #0x003f000d 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86adc.h RJ, RK is op15_31=0x7e & op0_4=0xd & RJ & RK { RJ = x86adc.h(RJ, RK); } define pcodeop x86adc.w; #lbt.txt x86adc.w mask=0x003f000e [@lbt] #0x003f000e 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86adc.w RJ, RK is op15_31=0x7e & op0_4=0xe & RJ & RK { RJ = x86adc.w(RJ, RK); } define pcodeop x86adc.d; #lbt.txt x86adc.d mask=0x003f000f [@lbt] #0x003f000f 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86adc.d RJ, RK is op15_31=0x7e & op0_4=0xf & RJ & RK { RJ = x86adc.d(RJ, RK); } define pcodeop x86sbc.b; #lbt.txt x86sbc.b mask=0x003f0010 [@lbt] #0x003f0010 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sbc.b RJ, RK is op15_31=0x7e & op0_4=0x10 & RJ & RK { RJ = x86sbc.b(RJ, RK); } define pcodeop x86sbc.h; #lbt.txt x86sbc.h mask=0x003f0011 [@lbt] #0x003f0011 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sbc.h RJ, RK is op15_31=0x7e & op0_4=0x11 & RJ & RK { RJ = x86sbc.h(RJ, RK); } define pcodeop x86sbc.w; #lbt.txt x86sbc.w mask=0x003f0012 [@lbt] #0x003f0012 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sbc.w RJ, RK is op15_31=0x7e & op0_4=0x12 & RJ & RK { RJ = x86sbc.w(RJ, RK); } define pcodeop x86sbc.d; #lbt.txt x86sbc.d mask=0x003f0013 [@lbt] #0x003f0013 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sbc.d RJ, RK is op15_31=0x7e & op0_4=0x13 & RJ & RK { RJ = x86sbc.d(RJ, RK); } define pcodeop x86sll.b; #lbt.txt x86sll.b mask=0x003f0014 [@lbt] #0x003f0014 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sll.b RJ, RK is op15_31=0x7e & op0_4=0x14 & RJ & RK { RJ = x86sll.b(RJ, RK); } define pcodeop x86sll.h; #lbt.txt x86sll.h mask=0x003f0015 [@lbt] #0x003f0015 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sll.h RJ, RK is op15_31=0x7e & op0_4=0x15 & RJ & RK { RJ = x86sll.h(RJ, RK); } define pcodeop x86sll.w; #lbt.txt x86sll.w mask=0x003f0016 [@lbt] #0x003f0016 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sll.w RJ, RK is op15_31=0x7e & op0_4=0x16 & RJ & RK { RJ = x86sll.w(RJ, RK); } define pcodeop x86sll.d; #lbt.txt x86sll.d mask=0x003f0017 [@lbt] #0x003f0017 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sll.d RJ, RK is op15_31=0x7e & op0_4=0x17 & RJ & RK { RJ = x86sll.d(RJ, RK); } define pcodeop x86srl.b; #lbt.txt x86srl.b mask=0x003f0018 [@lbt] #0x003f0018 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86srl.b RJ, RK is op15_31=0x7e & op0_4=0x18 & RJ & RK { RJ = x86srl.b(RJ, RK); } define pcodeop x86srl.h; #lbt.txt x86srl.h mask=0x003f0019 [@lbt] #0x003f0019 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86srl.h RJ, RK is op15_31=0x7e & op0_4=0x19 & RJ & RK { RJ = x86srl.h(RJ, RK); } define pcodeop x86srl.w; #lbt.txt x86srl.w mask=0x003f001a [@lbt] #0x003f001a 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86srl.w RJ, RK is op15_31=0x7e & op0_4=0x1a & RJ & RK { RJ = x86srl.w(RJ, RK); } define pcodeop x86srl.d; #lbt.txt x86srl.d mask=0x003f001b [@lbt] #0x003f001b 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86srl.d RJ, RK is op15_31=0x7e & op0_4=0x1b & RJ & RK { RJ = x86srl.d(RJ, RK); } define pcodeop x86sra.b; #lbt.txt x86sra.b mask=0x003f001c [@lbt] #0x003f001c 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sra.b RJ, RK is op15_31=0x7e & op0_4=0x1c & RJ & RK { RJ = x86sra.b(RJ, RK); } define pcodeop x86sra.h; #lbt.txt x86sra.h mask=0x003f001d [@lbt] #0x003f001d 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sra.h RJ, RK is op15_31=0x7e & op0_4=0x1d & RJ & RK { RJ = x86sra.h(RJ, RK); } define pcodeop x86sra.w; #lbt.txt x86sra.w mask=0x003f001e [@lbt] #0x003f001e 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sra.w RJ, RK is op15_31=0x7e & op0_4=0x1e & RJ & RK { RJ = x86sra.w(RJ, RK); } define pcodeop x86sra.d; #lbt.txt x86sra.d mask=0x003f001f [@lbt] #0x003f001f 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86sra.d RJ, RK is op15_31=0x7e & op0_4=0x1f & RJ & RK { RJ = x86sra.d(RJ, RK); } define pcodeop x86rotr.b; #lbt.txt x86rotr.b mask=0x003f8000 [@lbt] #0x003f8000 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rotr.b RJ, RK is op15_31=0x7f & op0_4=0x0 & RJ & RK { RJ = x86rotr.b(RJ, RK); } define pcodeop x86rotr.h; #lbt.txt x86rotr.h mask=0x003f8001 [@lbt] #0x003f8001 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rotr.h RJ, RK is op15_31=0x7f & op0_4=0x1 & RJ & RK { RJ = x86rotr.h(RJ, RK); } define pcodeop x86rotr.d; #lbt.txt x86rotr.d mask=0x003f8002 [@lbt] #0x003f8002 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rotr.d RJ, RK is op15_31=0x7f & op0_4=0x2 & RJ & RK { RJ = x86rotr.d(RJ, RK); } define pcodeop x86rotr.w; #lbt.txt x86rotr.w mask=0x003f8003 [@lbt] #0x003f8003 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rotr.w RJ, RK is op15_31=0x7f & op0_4=0x3 & RJ & RK { RJ = x86rotr.w(RJ, RK); } define pcodeop x86rotl.b; #lbt.txt x86rotl.b mask=0x003f8004 [@lbt] #0x003f8004 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rotl.b RJ, RK is op15_31=0x7f & op0_4=0x4 & RJ & RK { RJ = x86rotl.b(RJ, RK); } define pcodeop x86rotl.h; #lbt.txt x86rotl.h mask=0x003f8005 [@lbt] #0x003f8005 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rotl.h RJ, RK is op15_31=0x7f & op0_4=0x5 & RJ & RK { RJ = x86rotl.h(RJ, RK); } define pcodeop x86rotl.w; #lbt.txt x86rotl.w mask=0x003f8006 [@lbt] #0x003f8006 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rotl.w RJ, RK is op15_31=0x7f & op0_4=0x6 & RJ & RK { RJ = x86rotl.w(RJ, RK); } define pcodeop x86rotl.d; #lbt.txt x86rotl.d mask=0x003f8007 [@lbt] #0x003f8007 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rotl.d RJ, RK is op15_31=0x7f & op0_4=0x7 & RJ & RK { RJ = x86rotl.d(RJ, RK); } define pcodeop x86rcr.b; #lbt.txt x86rcr.b mask=0x003f8008 [@lbt] #0x003f8008 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rcr.b RJ, RK is op15_31=0x7f & op0_4=0x8 & RJ & RK { RJ = x86rcr.b(RJ, RK); } define pcodeop x86rcr.h; #lbt.txt x86rcr.h mask=0x003f8009 [@lbt] #0x003f8009 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rcr.h RJ, RK is op15_31=0x7f & op0_4=0x9 & RJ & RK { RJ = x86rcr.h(RJ, RK); } define pcodeop x86rcr.w; #lbt.txt x86rcr.w mask=0x003f800a [@lbt] #0x003f800a 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rcr.w RJ, RK is op15_31=0x7f & op0_4=0xa & RJ & RK { RJ = x86rcr.w(RJ, RK); } define pcodeop x86rcr.d; #lbt.txt x86rcr.d mask=0x003f800b [@lbt] #0x003f800b 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rcr.d RJ, RK is op15_31=0x7f & op0_4=0xb & RJ & RK { RJ = x86rcr.d(RJ, RK); } define pcodeop x86rcl.b; #lbt.txt x86rcl.b mask=0x003f800c [@lbt] #0x003f800c 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rcl.b RJ, RK is op15_31=0x7f & op0_4=0xc & RJ & RK { RJ = x86rcl.b(RJ, RK); } define pcodeop x86rcl.h; #lbt.txt x86rcl.h mask=0x003f800d [@lbt] #0x003f800d 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rcl.h RJ, RK is op15_31=0x7f & op0_4=0xd & RJ & RK { RJ = x86rcl.h(RJ, RK); } define pcodeop x86rcl.w; #lbt.txt x86rcl.w mask=0x003f800e [@lbt] #0x003f800e 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rcl.w RJ, RK is op15_31=0x7f & op0_4=0xe & RJ & RK { RJ = x86rcl.w(RJ, RK); } define pcodeop x86rcl.d; #lbt.txt x86rcl.d mask=0x003f800f [@lbt] #0x003f800f 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86rcl.d RJ, RK is op15_31=0x7f & op0_4=0xf & RJ & RK { RJ = x86rcl.d(RJ, RK); } define pcodeop x86and.b; #lbt.txt x86and.b mask=0x003f8010 [@lbt] #0x003f8010 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86and.b RJ, RK is op15_31=0x7f & op0_4=0x10 & RJ & RK { RJ = x86and.b(RJ, RK); } define pcodeop x86and.h; #lbt.txt x86and.h mask=0x003f8011 [@lbt] #0x003f8011 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86and.h RJ, RK is op15_31=0x7f & op0_4=0x11 & RJ & RK { RJ = x86and.h(RJ, RK); } define pcodeop x86and.w; #lbt.txt x86and.w mask=0x003f8012 [@lbt] #0x003f8012 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86and.w RJ, RK is op15_31=0x7f & op0_4=0x12 & RJ & RK { RJ = x86and.w(RJ, RK); } define pcodeop x86and.d; #lbt.txt x86and.d mask=0x003f8013 [@lbt] #0x003f8013 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86and.d RJ, RK is op15_31=0x7f & op0_4=0x13 & RJ & RK { RJ = x86and.d(RJ, RK); } define pcodeop x86or.b; #lbt.txt x86or.b mask=0x003f8014 [@lbt] #0x003f8014 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86or.b RJ, RK is op15_31=0x7f & op0_4=0x14 & RJ & RK { RJ = x86or.b(RJ, RK); } define pcodeop x86or.h; #lbt.txt x86or.h mask=0x003f8015 [@lbt] #0x003f8015 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86or.h RJ, RK is op15_31=0x7f & op0_4=0x15 & RJ & RK { RJ = x86or.h(RJ, RK); } define pcodeop x86or.w; #lbt.txt x86or.w mask=0x003f8016 [@lbt] #0x003f8016 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86or.w RJ, RK is op15_31=0x7f & op0_4=0x16 & RJ & RK { RJ = x86or.w(RJ, RK); } define pcodeop x86or.d; #lbt.txt x86or.d mask=0x003f8017 [@lbt] #0x003f8017 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86or.d RJ, RK is op15_31=0x7f & op0_4=0x17 & RJ & RK { RJ = x86or.d(RJ, RK); } define pcodeop x86xor.b; #lbt.txt x86xor.b mask=0x003f8018 [@lbt] #0x003f8018 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86xor.b RJ, RK is op15_31=0x7f & op0_4=0x18 & RJ & RK { RJ = x86xor.b(RJ, RK); } define pcodeop x86xor.h; #lbt.txt x86xor.h mask=0x003f8019 [@lbt] #0x003f8019 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86xor.h RJ, RK is op15_31=0x7f & op0_4=0x19 & RJ & RK { RJ = x86xor.h(RJ, RK); } define pcodeop x86xor.w; #lbt.txt x86xor.w mask=0x003f801a [@lbt] #0x003f801a 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86xor.w RJ, RK is op15_31=0x7f & op0_4=0x1a & RJ & RK { RJ = x86xor.w(RJ, RK); } define pcodeop x86xor.d; #lbt.txt x86xor.d mask=0x003f801b [@lbt] #0x003f801b 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :x86xor.d RJ, RK is op15_31=0x7f & op0_4=0x1b & RJ & RK { RJ = x86xor.d(RJ, RK); } define pcodeop armnot.w; #lbt.txt armnot.w mask=0x003fc01c [@lbt] #0x003fc01c 0xffffc01f r5:5,u10:4 ['reg5_5_s0', 'imm10_4_s0'] :armnot.w RJ, imm10_4 is op14_31=0xff & op0_4=0x1c & RJ & imm10_4 { RJ = armnot.w(RJ, imm10_4:$(REGSIZE)); } define pcodeop armmov.w; #lbt.txt armmov.w mask=0x003fc01d [@lbt] #0x003fc01d 0xffffc01f r5:5,u10:4 ['reg5_5_s0', 'imm10_4_s0'] :armmov.w RJ, imm10_4 is op14_31=0xff & op0_4=0x1d & RJ & imm10_4 { RJ = armmov.w(RJ, imm10_4:$(REGSIZE)); } define pcodeop armmov.d; #lbt.txt armmov.d mask=0x003fc01e [@lbt] #0x003fc01e 0xffffc01f r5:5,u10:4 ['reg5_5_s0', 'imm10_4_s0'] :armmov.d RJ, imm10_4 is op14_31=0xff & op0_4=0x1e & RJ & imm10_4 { RJ = armmov.d(RJ, imm10_4:$(REGSIZE)); } define pcodeop armrrx.w; #lbt.txt armrrx.w mask=0x003fc01f [@lbt] #0x003fc01f 0xffffc01f r5:5,u10:4 ['reg5_5_s0', 'imm10_4_s0'] :armrrx.w RJ, imm10_4 is op14_31=0xff & op0_4=0x1f & RJ & imm10_4 { RJ = armrrx.w(RJ, imm10_4:$(REGSIZE)); } define pcodeop rotri.b; #lbt.txt rotri.b mask=0x004c2000 [@lbt] #0x004c2000 0xffffe000 r0:5, r5:5,u10:3 ['reg0_5_s0', 'reg5_5_s0', 'imm10_3_s0'] :rotri.b RD, RJ, imm10_3 is op13_31=0x261 & RD & RJ & imm10_3 { RD = rotri.b(RD, RJ, imm10_3:$(REGSIZE)); } define pcodeop rotri.h; #lbt.txt rotri.h mask=0x004c4000 [@lbt] #0x004c4000 0xffffc000 r0:5, r5:5,u10:4 ['reg0_5_s0', 'reg5_5_s0', 'imm10_4_s0'] :rotri.h RD, RJ, imm10_4 is op14_31=0x131 & RD & RJ & imm10_4 { RD = rotri.h(RD, RJ, imm10_4:$(REGSIZE)); } define pcodeop rcri.b; #lbt.txt rcri.b mask=0x00502000 [@lbt] #0x00502000 0xffffe000 r0:5, r5:5,u10:3 ['reg0_5_s0', 'reg5_5_s0', 'imm10_3_s0'] :rcri.b RD, RJ, imm10_3 is op13_31=0x281 & RD & RJ & imm10_3 { RD = rcri.b(RD, RJ, imm10_3:$(REGSIZE)); } define pcodeop rcri.h; #lbt.txt rcri.h mask=0x00504000 [@lbt] #0x00504000 0xffffc000 r0:5, r5:5,u10:4 ['reg0_5_s0', 'reg5_5_s0', 'imm10_4_s0'] :rcri.h RD, RJ, imm10_4 is op14_31=0x141 & RD & RJ & imm10_4 { RD = rcri.h(RD, RJ, imm10_4:$(REGSIZE)); } define pcodeop rcri.w; #lbt.txt rcri.w mask=0x00508000 [@lbt] #0x00508000 0xffff8000 r0:5, r5:5,u10:5 ['reg0_5_s0', 'reg5_5_s0', 'imm10_5_s0'] :rcri.w RD, RJ, imm10_5 is op15_31=0xa1 & RD & RJ & imm10_5 { RD = rcri.w(RD, RJ, imm10_5:$(REGSIZE)); } define pcodeop rcri.d; #lbt.txt rcri.d mask=0x00510000 [@lbt] #0x00510000 0xffff0000 r0:5, r5:5,u10:6 ['reg0_5_s0', 'reg5_5_s0', 'imm10_6_s0'] :rcri.d RD, RJ, imm10_6 is op16_31=0x51 & RD & RJ & imm10_6 { RD = rcri.d(RD, RJ, imm10_6:$(REGSIZE)); } define pcodeop x86slli.b; #lbt.txt x86slli.b mask=0x00542000 [@lbt] #0x00542000 0xffffe01f r5:5,u10:3 ['reg5_5_s0', 'imm10_3_s0'] :x86slli.b RJ, imm10_3 is op13_31=0x2a1 & op0_4=0x0 & RJ & imm10_3 { RJ = x86slli.b(RJ, imm10_3:$(REGSIZE)); } define pcodeop x86srli.b; #lbt.txt x86srli.b mask=0x00542004 [@lbt] #0x00542004 0xffffe01f r5:5,u10:3 ['reg5_5_s0', 'imm10_3_s0'] :x86srli.b RJ, imm10_3 is op13_31=0x2a1 & op0_4=0x4 & RJ & imm10_3 { RJ = x86srli.b(RJ, imm10_3:$(REGSIZE)); } define pcodeop x86srai.b; #lbt.txt x86srai.b mask=0x00542008 [@lbt] #0x00542008 0xffffe01f r5:5,u10:3 ['reg5_5_s0', 'imm10_3_s0'] :x86srai.b RJ, imm10_3 is op13_31=0x2a1 & op0_4=0x8 & RJ & imm10_3 { RJ = x86srai.b(RJ, imm10_3:$(REGSIZE)); } define pcodeop x86rotri.b; #lbt.txt x86rotri.b mask=0x0054200c [@lbt] #0x0054200c 0xffffe01f r5:5,u10:3 ['reg5_5_s0', 'imm10_3_s0'] :x86rotri.b RJ, imm10_3 is op13_31=0x2a1 & op0_4=0xc & RJ & imm10_3 { RJ = x86rotri.b(RJ, imm10_3:$(REGSIZE)); } define pcodeop x86rcri.b; #lbt.txt x86rcri.b mask=0x00542010 [@lbt] #0x00542010 0xffffe01f r5:5,u10:3 ['reg5_5_s0', 'imm10_3_s0'] :x86rcri.b RJ, imm10_3 is op13_31=0x2a1 & op0_4=0x10 & RJ & imm10_3 { RJ = x86rcri.b(RJ, imm10_3:$(REGSIZE)); } define pcodeop x86rotli.b; #lbt.txt x86rotli.b mask=0x00542014 [@lbt] #0x00542014 0xffffe01f r5:5,u10:3 ['reg5_5_s0', 'imm10_3_s0'] :x86rotli.b RJ, imm10_3 is op13_31=0x2a1 & op0_4=0x14 & RJ & imm10_3 { RJ = x86rotli.b(RJ, imm10_3:$(REGSIZE)); } define pcodeop x86rcli.b; #lbt.txt x86rcli.b mask=0x00542018 [@lbt] #0x00542018 0xffffe01f r5:5,u10:3 ['reg5_5_s0', 'imm10_3_s0'] :x86rcli.b RJ, imm10_3 is op13_31=0x2a1 & op0_4=0x18 & RJ & imm10_3 { RJ = x86rcli.b(RJ, imm10_3:$(REGSIZE)); } define pcodeop x86slli.h; #lbt.txt x86slli.h mask=0x00544001 [@lbt] #0x00544001 0xffffc01f r5:5,u10:4 ['reg5_5_s0', 'imm10_4_s0'] :x86slli.h RJ, imm10_4 is op14_31=0x151 & op0_4=0x1 & RJ & imm10_4 { RJ = x86slli.h(RJ, imm10_4:$(REGSIZE)); } define pcodeop x86srli.h; #lbt.txt x86srli.h mask=0x00544005 [@lbt] #0x00544005 0xffffc01f r5:5,u10:4 ['reg5_5_s0', 'imm10_4_s0'] :x86srli.h RJ, imm10_4 is op14_31=0x151 & op0_4=0x5 & RJ & imm10_4 { RJ = x86srli.h(RJ, imm10_4:$(REGSIZE)); } define pcodeop x86srai.h; #lbt.txt x86srai.h mask=0x00544009 [@lbt] #0x00544009 0xffffc01f r5:5,u10:4 ['reg5_5_s0', 'imm10_4_s0'] :x86srai.h RJ, imm10_4 is op14_31=0x151 & op0_4=0x9 & RJ & imm10_4 { RJ = x86srai.h(RJ, imm10_4:$(REGSIZE)); } define pcodeop x86rotri.h; #lbt.txt x86rotri.h mask=0x0054400d [@lbt] #0x0054400d 0xffffc01f r5:5,u10:4 ['reg5_5_s0', 'imm10_4_s0'] :x86rotri.h RJ, imm10_4 is op14_31=0x151 & op0_4=0xd & RJ & imm10_4 { RJ = x86rotri.h(RJ, imm10_4:$(REGSIZE)); } define pcodeop x86rcri.h; #lbt.txt x86rcri.h mask=0x00544011 [@lbt] #0x00544011 0xffffc01f r5:5,u10:4 ['reg5_5_s0', 'imm10_4_s0'] :x86rcri.h RJ, imm10_4 is op14_31=0x151 & op0_4=0x11 & RJ & imm10_4 { RJ = x86rcri.h(RJ, imm10_4:$(REGSIZE)); } define pcodeop x86rotli.h; #lbt.txt x86rotli.h mask=0x00544015 [@lbt] #0x00544015 0xffffc01f r5:5,u10:4 ['reg5_5_s0', 'imm10_4_s0'] :x86rotli.h RJ, imm10_4 is op14_31=0x151 & op0_4=0x15 & RJ & imm10_4 { RJ = x86rotli.h(RJ, imm10_4:$(REGSIZE)); } define pcodeop x86rcli.h; #lbt.txt x86rcli.h mask=0x00544019 [@lbt] #0x00544019 0xffffc01f r5:5,u10:4 ['reg5_5_s0', 'imm10_4_s0'] :x86rcli.h RJ, imm10_4 is op14_31=0x151 & op0_4=0x19 & RJ & imm10_4 { RJ = x86rcli.h(RJ, imm10_4:$(REGSIZE)); } define pcodeop x86slli.w; #lbt.txt x86slli.w mask=0x00548002 [@lbt] #0x00548002 0xffff801f r5:5,u10:5 ['reg5_5_s0', 'imm10_5_s0'] :x86slli.w RJ, imm10_5 is op15_31=0xa9 & op0_4=0x2 & RJ & imm10_5 { RJ = x86slli.w(RJ, imm10_5:$(REGSIZE)); } define pcodeop x86srli.w; #lbt.txt x86srli.w mask=0x00548006 [@lbt] #0x00548006 0xffff801f r5:5,u10:5 ['reg5_5_s0', 'imm10_5_s0'] :x86srli.w RJ, imm10_5 is op15_31=0xa9 & op0_4=0x6 & RJ & imm10_5 { RJ = x86srli.w(RJ, imm10_5:$(REGSIZE)); } define pcodeop x86srai.w; #lbt.txt x86srai.w mask=0x0054800a [@lbt] #0x0054800a 0xffff801f r5:5,u10:5 ['reg5_5_s0', 'imm10_5_s0'] :x86srai.w RJ, imm10_5 is op15_31=0xa9 & op0_4=0xa & RJ & imm10_5 { RJ = x86srai.w(RJ, imm10_5:$(REGSIZE)); } define pcodeop x86rotri.w; #lbt.txt x86rotri.w mask=0x0054800e [@lbt] #0x0054800e 0xffff801f r5:5,u10:5 ['reg5_5_s0', 'imm10_5_s0'] :x86rotri.w RJ, imm10_5 is op15_31=0xa9 & op0_4=0xe & RJ & imm10_5 { RJ = x86rotri.w(RJ, imm10_5:$(REGSIZE)); } define pcodeop x86rcri.w; #lbt.txt x86rcri.w mask=0x00548012 [@lbt] #0x00548012 0xffff801f r5:5,u10:5 ['reg5_5_s0', 'imm10_5_s0'] :x86rcri.w RJ, imm10_5 is op15_31=0xa9 & op0_4=0x12 & RJ & imm10_5 { RJ = x86rcri.w(RJ, imm10_5:$(REGSIZE)); } define pcodeop x86rotli.w; #lbt.txt x86rotli.w mask=0x00548016 [@lbt] #0x00548016 0xffff801f r5:5,u10:5 ['reg5_5_s0', 'imm10_5_s0'] :x86rotli.w RJ, imm10_5 is op15_31=0xa9 & op0_4=0x16 & RJ & imm10_5 { RJ = x86rotli.w(RJ, imm10_5:$(REGSIZE)); } define pcodeop x86rcli.w; #lbt.txt x86rcli.w mask=0x0054801a [@lbt] #0x0054801a 0xffff801f r5:5,u10:5 ['reg5_5_s0', 'imm10_5_s0'] :x86rcli.w RJ, imm10_5 is op15_31=0xa9 & op0_4=0x1a & RJ & imm10_5 { RJ = x86rcli.w(RJ, imm10_5:$(REGSIZE)); } define pcodeop x86slli.d; #lbt.txt x86slli.d mask=0x00550003 [@lbt] #0x00550003 0xffff001f r5:5,u10:6 ['reg5_5_s0', 'imm10_6_s0'] :x86slli.d RJ, imm10_6 is op16_31=0x55 & op0_4=0x3 & RJ & imm10_6 { RJ = x86slli.d(RJ, imm10_6:$(REGSIZE)); } define pcodeop x86srli.d; #lbt.txt x86srli.d mask=0x00550007 [@lbt] #0x00550007 0xffff001f r5:5,u10:6 ['reg5_5_s0', 'imm10_6_s0'] :x86srli.d RJ, imm10_6 is op16_31=0x55 & op0_4=0x7 & RJ & imm10_6 { RJ = x86srli.d(RJ, imm10_6:$(REGSIZE)); } define pcodeop x86srai.d; #lbt.txt x86srai.d mask=0x0055000b [@lbt] #0x0055000b 0xffff001f r5:5,u10:6 ['reg5_5_s0', 'imm10_6_s0'] :x86srai.d RJ, imm10_6 is op16_31=0x55 & op0_4=0xb & RJ & imm10_6 { RJ = x86srai.d(RJ, imm10_6:$(REGSIZE)); } define pcodeop x86rotri.d; #lbt.txt x86rotri.d mask=0x0055000f [@lbt] #0x0055000f 0xffff001f r5:5,u10:6 ['reg5_5_s0', 'imm10_6_s0'] :x86rotri.d RJ, imm10_6 is op16_31=0x55 & op0_4=0xf & RJ & imm10_6 { RJ = x86rotri.d(RJ, imm10_6:$(REGSIZE)); } define pcodeop x86rcri.d; #lbt.txt x86rcri.d mask=0x00550013 [@lbt] #0x00550013 0xffff001f r5:5,u10:6 ['reg5_5_s0', 'imm10_6_s0'] :x86rcri.d RJ, imm10_6 is op16_31=0x55 & op0_4=0x13 & RJ & imm10_6 { RJ = x86rcri.d(RJ, imm10_6:$(REGSIZE)); } define pcodeop x86rotli.d; #lbt.txt x86rotli.d mask=0x00550017 [@lbt] #0x00550017 0xffff001f r5:5,u10:6 ['reg5_5_s0', 'imm10_6_s0'] :x86rotli.d RJ, imm10_6 is op16_31=0x55 & op0_4=0x17 & RJ & imm10_6 { RJ = x86rotli.d(RJ, imm10_6:$(REGSIZE)); } define pcodeop x86rcli.d; #lbt.txt x86rcli.d mask=0x0055001b [@lbt] #0x0055001b 0xffff001f r5:5,u10:6 ['reg5_5_s0', 'imm10_6_s0'] :x86rcli.d RJ, imm10_6 is op16_31=0x55 & op0_4=0x1b & RJ & imm10_6 { RJ = x86rcli.d(RJ, imm10_6:$(REGSIZE)); } define pcodeop x86settag; #lbt.txt x86settag mask=0x00580000 [@lbt] #0x00580000 0xfffc0000 r0:5,u5:5,u10:8 ['reg0_5_s0', 'imm5_5_s0', 'imm10_8_s0'] :x86settag RD, imm5_5, imm10_8 is op18_31=0x16 & RD & imm5_5 & imm10_8 { RD = x86settag(RD, imm5_5:$(REGSIZE), imm10_8:$(REGSIZE)); } define pcodeop x86mfflag; #lbt.txt x86mfflag mask=0x005c0000 [@lbt] #0x005c0000 0xfffc03e0 r0:5,u10:8 ['reg0_5_s0', 'imm10_8_s0'] :x86mfflag RD, imm10_8 is op18_31=0x17 & op5_9=0x0 & RD & imm10_8 { RD = x86mfflag(RD, imm10_8:$(REGSIZE)); } define pcodeop x86mtflag; #lbt.txt x86mtflag mask=0x005c0020 [@lbt] #0x005c0020 0xfffc03e0 r0:5,u10:8 ['reg0_5_s0', 'imm10_8_s0'] :x86mtflag RD, imm10_8 is op18_31=0x17 & op5_9=0x1 & RD & imm10_8 { RD = x86mtflag(RD, imm10_8:$(REGSIZE)); } define pcodeop armmfflag; #lbt.txt armmfflag mask=0x005c0040 [@lbt] #0x005c0040 0xfffc03e0 r0:5,u10:8 ['reg0_5_s0', 'imm10_8_s0'] :armmfflag RD, imm10_8 is op18_31=0x17 & op5_9=0x2 & RD & imm10_8 { RD = armmfflag(RD, imm10_8:$(REGSIZE)); } define pcodeop armmtflag; #lbt.txt armmtflag mask=0x005c0060 [@lbt] #0x005c0060 0xfffc03e0 r0:5,u10:8 ['reg0_5_s0', 'imm10_8_s0'] :armmtflag RD, imm10_8 is op18_31=0x17 & op5_9=0x3 & RD & imm10_8 { RD = armmtflag(RD, imm10_8:$(REGSIZE)); } define pcodeop fcvt.ld.d; #lbt.txt fcvt.ld.d mask=0x0114e000 [@lbt] #0x0114e000 0xfffffc00 f0:5,f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fcvt.ld.d drD,drJ is op10_31=0x4538 & drD & drJ { drD = fcvt.ld.d(drD, drJ); } define pcodeop fcvt.ud.d; #lbt.txt fcvt.ud.d mask=0x0114e400 [@lbt] #0x0114e400 0xfffffc00 f0:5,f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fcvt.ud.d drD,drJ is op10_31=0x4539 & drD & drJ { drD = fcvt.ud.d(drD, drJ); } define pcodeop fcvt.d.ld; #lbt.txt fcvt.d.ld mask=0x01150000 [@lbt] #0x01150000 0xffff8000 f0:5,f5:5,f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fcvt.d.ld drD,drJ,drK is op16_31=0x115 & drD & drJ & drK { drD = fcvt.d.ld(drD, drJ, drK); } define pcodeop ldl.w; #lbt.txt ldl.w mask=0x2e000000 [@lbt] #0x2e000000 0xffc00000 r0:5, r5:5, so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :ldl.w RD, RJ, simm10_12 is op22_31=0xb8 & RD & RJ & simm10_12 { RD = ldl.w(RD, RJ, simm10_12:$(REGSIZE)); } define pcodeop ldr.w; #lbt.txt ldr.w mask=0x2e400000 [@lbt] #0x2e400000 0xffc00000 r0:5, r5:5, so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :ldr.w RD, RJ, simm10_12 is op22_31=0xb9 & RD & RJ & simm10_12 { RD = ldr.w(RD, RJ, simm10_12:$(REGSIZE)); } define pcodeop ldl.d; #lbt.txt ldl.d mask=0x2e800000 [@lbt] #0x2e800000 0xffc00000 r0:5, r5:5, so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :ldl.d RD, RJ, simm10_12 is op22_31=0xba & RD & RJ & simm10_12 { RD = ldl.d(RD, RJ, simm10_12:$(REGSIZE)); } define pcodeop ldr.d; #lbt.txt ldr.d mask=0x2ec00000 [@lbt] #0x2ec00000 0xffc00000 r0:5, r5:5, so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :ldr.d RD, RJ, simm10_12 is op22_31=0xbb & RD & RJ & simm10_12 { RD = ldr.d(RD, RJ, simm10_12:$(REGSIZE)); } define pcodeop stl.w; #lbt.txt stl.w mask=0x2f000000 [@lbt] #0x2f000000 0xffc00000 r0:5, r5:5, so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :stl.w RD, RJ, simm10_12 is op22_31=0xbc & RD & RJ & simm10_12 { RD = stl.w(RD, RJ, simm10_12:$(REGSIZE)); } define pcodeop str.w; #lbt.txt str.w mask=0x2f400000 [@lbt] #0x2f400000 0xffc00000 r0:5, r5:5, so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :str.w RD, RJ, simm10_12 is op22_31=0xbd & RD & RJ & simm10_12 { RD = str.w(RD, RJ, simm10_12:$(REGSIZE)); } define pcodeop stl.d; #lbt.txt stl.d mask=0x2f800000 [@lbt] #0x2f800000 0xffc00000 r0:5, r5:5, so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :stl.d RD, RJ, simm10_12 is op22_31=0xbe & RD & RJ & simm10_12 { RD = stl.d(RD, RJ, simm10_12:$(REGSIZE)); } define pcodeop str.d; #lbt.txt str.d mask=0x2fc00000 [@lbt] #0x2fc00000 0xffc00000 r0:5, r5:5, so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :str.d RD, RJ, simm10_12 is op22_31=0xbf & RD & RJ & simm10_12 { RD = str.d(RD, RJ, simm10_12:$(REGSIZE)); } define pcodeop jiscr0; #lbt.txt jiscr0 mask=0x48000200 [@lbt, @orig_fmt=Sd5k16ps2] #0x48000200 0xfc0003e0 s0:5|10:16<<2 ['simm0_0_s2'] :jiscr0 Rel26 is op26_31=0x12 & op5_9=0x10 & Rel26 { jiscr0(Rel26); } define pcodeop jiscr1; #lbt.txt jiscr1 mask=0x48000300 [@lbt, @orig_fmt=Sd5k16ps2] #0x48000300 0xfc0003e0 s0:5|10:16<<2 ['simm0_0_s2'] :jiscr1 Rel26 is op26_31=0x12 & op5_9=0x18 & Rel26 { jiscr1(Rel26); } ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch.ldefs ================================================ Loongson 3 32-bit with 32-bit FP Loongson 3 32-bit with 64-bit FP Loongson 3 64-bit with 32-bit FP Loongson 3 64-bit with 64-bit FP ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch.opinion ================================================ ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch32.pspec ================================================ ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch32_f32.slaspec ================================================ @define REGSIZE 4 @define FREGSIZE 4 @define ADDRSIZE 4 @include "loongarch_main.sinc" @include "loongarch32_instructions.sinc" @include "loongarch_float.sinc" @include "loongarch_double.sinc" @include "lasx.sinc" @include "lbt.sinc" @include "lsx.sinc" @include "lvz.sinc" ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch32_f64.slaspec ================================================ @define REGSIZE 4 @define FREGSIZE 8 @define ADDRSIZE 4 @include "loongarch_main.sinc" @include "loongarch32_instructions.sinc" @include "loongarch_float.sinc" @include "loongarch_double.sinc" #@include "lasx.sinc" #@include "lbt.sinc" #@include "lsx.sinc" #@include "lvz.sinc" ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch32_instructions.sinc ================================================ #################### # Base Instructions #################### #la-base-32.txt add.w mask=0x00100000 [@la32, @primary, @qemu] #0x00100000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :add.w RD, RJ32src, RK32src is op15_31=0x20 & RD & RJ32src & RK32src { local add1:4 = RJ32src; local add2:4 = RK32src; local result = add1 + add2; RD = sext(result); } #la-base-32.txt addi.w mask=0x02800000 [@la32, @primary, @qemu] #0x02800000 0xffc00000 r0:5,r5:5,s10:12 ['reg0_5_s0', 'reg5_5_s0', 'simm10_12_s0'] :addi.w RD, RJ32src, simm10_12 is op22_31=0xa & RD & RJ32src & simm10_12 { local add1:4 = RJ32src; local add2:4 = simm10_12; local result = add1 + add2; RD = sext(result); } #la-bitops-32.txt sladd.w mask=0x00040000 [@orig_name=alsl.w, @orig_fmt=DJKUa2pp1, @la32] #0x00040000 0xfffe0000 r0:5,r5:5,r10:5,u15:2+1 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0', 'imm15_2+1_s0'] :alsl.w RD, RJ32src, RK32src, alsl_shift is op17_31=0x2 & RD & RJ32src & RK32src & alsl_shift { local result:4 = (RJ32src << alsl_shift) + RK32src; RD = sext(result); } #la-base-32.txt and mask=0x00148000 [@la32, @primary, @qemu] #0x00148000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :and RD, RJsrc, RKsrc is op15_31=0x29 & RD & RJsrc & RKsrc { RD = RJsrc & RKsrc; } #la-base-32.txt andi mask=0x03400000 [@la32, @primary, @qemu] #0x03400000 0xffc00000 r0:5,r5:5,u10:12 ['reg0_5_s0', 'reg5_5_s0', 'imm10_12_s0'] :andi RD, RJsrc, imm10_12 is op22_31=0xd & RD & RJsrc & imm10_12 { RD = RJsrc & imm10_12; } # alias of andi r0, r0, 0 :nop is op22_31=0xd & rD=0 & rJ=0 & imm10_12=0 { } #la-base-32.txt andn mask=0x00168000 [@la32, @primary, @qemu] #0x00168000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :andn RD, RJsrc, RKsrc is op15_31=0x2d & RD & RJsrc & RKsrc { RD = RJsrc & ~(RKsrc); } #la-base-32.txt break mask=0x002a0000 [@la32, @primary] #0x002a0000 0xffff8000 u0:15 ['imm0_15_s0'] :break imm0_15 is op15_31=0x54 & imm0_15 { local code:2 = imm0_15; local addr:$(REGSIZE) = break(code); goto [addr]; } #la-base-32.txt cpucfg mask=0x00006c00 [@la32] #0x00006c00 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :cpucfg RD, RJ32src is op10_31=0x1b & RD & RJ32src { RD = cpucfg(RJ32src); } #la-base-32.txt dbgcall mask=0x002a8000 [@orig_name=dbcl] #0x002a8000 0xffff8000 u0:15 ['imm0_15_s0'] :dbcl imm0_15 is op15_31=0x55 & imm0_15 { local code:2 = imm0_15; dbcl(code); } #la-mul-32.txt div.w mask=0x00200000 [@la32, @primary, @qemu] #0x00200000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :div.w RD, RJ32src, RK32src is op15_31=0x40 & RD & RJ32src & RK32src { tmp:4 = RJ32src s/ RK32src; RD = sext(tmp); } #la-mul-32.txt div.wu mask=0x00210000 [@la32, @primary, @qemu] #0x00210000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :div.wu RD, RJ32src, RK32src is op15_31=0x42 & RD & RJ32src & RK32src { tmp:4 = RJ32src / RK32src; RD = sext(tmp); } #la-base-32.txt sext.b mask=0x00005c00 [@orig_name=ext.w.b, @la32, @qemu] #0x00005c00 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :ext.w.b RD, RJsrc is op10_31=0x17 & RD & RJsrc { local tmp:1 = RJsrc(0); RD = sext(tmp); } #la-base-32.txt sext.h mask=0x00005800 [@orig_name=ext.w.h, @la32, @qemu] #0x00005800 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :ext.w.h RD, RJsrc is op10_31=0x16 & RD & RJsrc { local tmp:2 = RJsrc(0); RD = sext(tmp); } #la-base-32.txt lu12i.w mask=0x14000000 [@la32, @primary, @qemu] #0x14000000 0xfe000000 r0:5,s5:20 ['reg0_5_s0', 'simm5_20_s0'] :lu12i.w RD, simm12i is op25_31=0xa & RD & simm12i { RD = sext(simm12i); } #la-base-32.txt maskeqz mask=0x00130000 [@la32, @qemu] #0x00130000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :maskeqz RD, RJsrc, RKsrc is op15_31=0x26 & RD & RJsrc & RKsrc { local test = (RKsrc == 0); RD = (zext(test) * 0) + (zext(!test) * RJsrc); } #la-base-32.txt masknez mask=0x00138000 [@la32, @qemu] #0x00138000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :masknez RD, RJsrc, RKsrc is op15_31=0x27 & RD & RJsrc & RKsrc { local test = (RKsrc != 0); RD = (zext(test) * 0) + (zext(!test) * RJsrc); } #la-mul-32.txt mod.w mask=0x00208000 [@la32, @primary, @qemu] #0x00208000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :mod.w RD, RJ32src, RK32src is op15_31=0x41 & RD & RJ32src & RK32src { tmp:4 = RJ32src s% RK32src; RD = sext(tmp); } #la-mul-32.txt mod.wu mask=0x00218000 [@la32, @primary, @qemu] #0x00218000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :mod.wu RD, RJ32src, RK32src is op15_31=0x43 & RD & RJ32src & RK32src { tmp:4 = RJ32src % RK32src; RD = sext(tmp); } #la-mul-32.txt mul.w mask=0x001c0000 [@la32, @primary, @qemu] #0x001c0000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :mul.w RD, RJ32src, RK32src is op15_31=0x38 & RD & RJ32src & RK32src { tmp1:8 = sext( RJ32src ); tmp2:8 = sext( RK32src ); prod:8 = tmp1 * tmp2; RD = sext( prod:4 ); } #la-mul-32.txt mulh.w mask=0x001c8000 [@la32, @primary, @qemu] #0x001c8000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :mulh.w RD, RJ32src, RK32src is op15_31=0x39 & RD & RJ32src & RK32src { tmp1:8 = sext( RJ32src ); tmp2:8 = sext( RK32src ); prod:8 = tmp1 * tmp2; prod = prod >> 32; RD = sext( prod:4 ); } #la-mul-32.txt mulh.wu mask=0x001d0000 [@la32, @primary, @qemu] #0x001d0000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :mulh.wu RD, RJ32src, RK32src is op15_31=0x3a & RD & RJ32src & RK32src { tmp1:8 = zext( RJ32src ); tmp2:8 = zext( RK32src ); prod:8 = tmp1 * tmp2; prod = prod >> 32; RD = sext( prod:4 ); } #la-base-32.txt nor mask=0x00140000 [@la32, @primary, @qemu] #0x00140000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :nor RD, RJsrc, RKsrc is op15_31=0x28 & RD & RJsrc & RKsrc { RD = ~(RJsrc | RKsrc); } #la-base-32.txt or mask=0x00150000 [@la32, @primary, @qemu] #0x00150000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :or RD, RJsrc, RKsrc is op15_31=0x2a & RD & RJsrc & RKsrc { RD = RJsrc | RKsrc; } # alias of or rd, rj, zero :move RD, RJsrc is op15_31=0x2a & RD & RJsrc & rK=0 { RD = RJsrc; } #la-base-32.txt ori mask=0x03800000 [@la32, @primary, @qemu] #0x03800000 0xffc00000 r0:5,r5:5,u10:12 ['reg0_5_s0', 'reg5_5_s0', 'imm10_12_s0'] :ori RD, RJsrc, imm10_12 is op22_31=0xe & RD & RJsrc & imm10_12 { RD = RJsrc | imm10_12; } #la-base-32.txt orn mask=0x00160000 [@la32, @primary, @qemu] #0x00160000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :orn RD, RJsrc, RKsrc is op15_31=0x2c & RD & RJsrc & RKsrc { RD = RJsrc | ~(RKsrc); } #la-base-32.txt pcaddu2i mask=0x18000000 [@orig_name=pcaddi, @la32, @primary, @qemu] #0x18000000 0xfe000000 r0:5,s5:20 ['reg0_5_s0', 'simm5_20_s0'] :pcaddi RD,pcadd2 is op25_31=0xc & RD & pcadd2 { RD = pcadd2; } #la-base-32.txt pcalau12i mask=0x1a000000 [@la32, @qemu] #0x1a000000 0xfe000000 r0:5,s5:20 ['reg0_5_s0', 'simm5_20_s0'] :pcalau12i RD, pcala12 is op25_31=0xd & RD & pcala12 { RD = pcala12; } #la-base-32.txt pcaddu12i mask=0x1c000000 [@la32, @primary, @qemu] #0x1c000000 0xfe000000 r0:5,s5:20 ['reg0_5_s0', 'simm5_20_s0'] :pcaddu12i RD, pcadd12 is op25_31=0xe & RD & pcadd12 { RD = pcadd12; } #la-base-32.txt pcaddu18i mask=0x1e000000 [@qemu] #0x1e000000 0xfe000000 r0:5,s5:20 ['reg0_5_s0', 'simm5_20_s0'] :pcaddu18i RD, pcadd18 is op25_31=0xf & RD & pcadd18 { RD = pcadd18; } #la-base-32.txt rdtimel.w mask=0x00006000 [@la32, @primary] #0x00006000 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :rdtimel.w RD32, RJ32 is op10_31=0x18 & RD32 & RJ32 { local tmp:1 = 0; RD32 = rdtime.counter(tmp); RJ32 = rdtime.counterid(tmp); } #la-base-32.txt rdtimeh.w mask=0x00006400 [@la32, @primary] #0x00006400 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :rdtimeh.w RD32, RJ32 is op10_31=0x19 & RD32 & RJ32 { local tmp:1 = 1; RD32 = rdtime.counter(tmp); RJ32 = rdtime.counterid(tmp); } #la-base-32.txt rotr.w mask=0x001b0000 [@la32, @qemu] #0x001b0000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :rotr.w RD, RJ32src, RK32src is op15_31=0x36 & RD & RJ32src & RK32src { local shift:1 = RK32src(0) & 0x1f; local tmp1:4 = RJ32src s>> shift; local tmp2:4 = RJ32src << (32 - shift); local result = tmp1 + tmp2; RD = sext(result); } #la-base-32.txt rotri.w mask=0x004c8000 [@la32, @qemu] #0x004c8000 0xffff8000 r0:5,r5:5,u10:5 ['reg0_5_s0', 'reg5_5_s0', 'imm10_5_s0'] :rotri.w RD, RJ32src, imm10_5 is op15_31=0x99 & RD & RJ32src & imm10_5 { local shift:1 = imm10_5; local tmp1:4 = RJ32src s>> shift; local tmp2:4 = RJ32src << (32 - shift); local result = tmp1 + tmp2; RD = sext(result); } #la-base-32.txt sll.w mask=0x00170000 [@la32, @primary, @qemu] #0x00170000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :sll.w RD, RJ32src, RK32src is op15_31=0x2e & RD & RJ32src & RK32src { local shift:1 = RK32src(0) & 0x1f; local result:4 = RJ32src << shift; RD = sext(result); } #la-base-32.txt slli.w mask=0x00408000 [@la32, @primary, @qemu] #0x00408000 0xffff8000 r0:5,r5:5,u10:5 ['reg0_5_s0', 'reg5_5_s0', 'imm10_5_s0'] :slli.w RD, RJ32src, imm10_5 is op15_31=0x81 & RD & RJ32src& imm10_5 { local shift:1 = imm10_5 & 0x1f; local result:4 = RJ32src << shift; RD = sext(result); } #la-base-32.txt slt mask=0x00120000 [@la32, @primary, @qemu] #0x00120000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :slt RD, RJsrc, RKsrc is op15_31=0x24 & RD & RJsrc & RKsrc { RD = zext( RJsrc s< RKsrc ); } #la-base-32.txt sltu mask=0x00128000 [@la32, @primary, @qemu] #0x00128000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :sltu RD, RJsrc, RKsrc is op15_31=0x25 & RD & RJsrc & RKsrc { RD = zext( RJsrc < RKsrc ); } #la-base-32.txt slti mask=0x02000000 [@la32, @primary, @qemu] #0x02000000 0xffc00000 r0:5,r5:5,s10:12 ['reg0_5_s0', 'reg5_5_s0', 'simm10_12_s0'] :slti RD, RJsrc, simm10_12 is op22_31=0x8 & RD & RJsrc & simm10_12 { RD = zext( RJsrc s< simm10_12 ); } #la-base-32.txt sltui mask=0x02400000 [@la32, @primary, @qemu] #0x02400000 0xffc00000 r0:5,r5:5,s10:12 ['reg0_5_s0', 'reg5_5_s0', 'simm10_12_s0'] :sltui RD, RJsrc, simm10_12 is op22_31=0x9 & RD & RJsrc & simm10_12 { RD = zext( RJsrc < simm10_12 ); } #la-base-32.txt srl.w mask=0x00178000 [@la32, @primary, @qemu] #0x00178000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :srl.w RD, RJ32src, RK32src is op15_31=0x2f & RD & RJ32src & RK32src { local shift:1 = RK32src(0) & 0x1f; local result:4 = RJ32src >> shift; RD = sext(result); } #la-base-32.txt srli.w mask=0x00448000 [@la32, @primary, @qemu] #0x00448000 0xffff8000 r0:5,r5:5,u10:5 ['reg0_5_s0', 'reg5_5_s0', 'imm10_5_s0'] :srli.w RD, RJ32src, imm10_5 is op15_31=0x89 & RD & RJ32src & imm10_5 { local shift:1 = imm10_5 & 0x1f; local result:4 = RJ32src >> shift; RD = sext(result); } #la-base-32.txt sra.w mask=0x00180000 [@la32, @primary, @qemu] #0x00180000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :sra.w RD, RJ32src, RK32src is op15_31=0x30 & RD & RJ32src & RK32src { local shift:1 = RK32src(0) & 0x1f; local result:4 = RJ32src s>> shift; RD = sext(result); } #la-base-32.txt srai.w mask=0x00488000 [@la32, @primary, @qemu] #0x00488000 0xffff8000 r0:5,r5:5,u10:5 ['reg0_5_s0', 'reg5_5_s0', 'imm10_5_s0'] :srai.w RD, RJ32src, imm10_5 is op15_31=0x91 & RD & RJ32src & imm10_5 { local shift:1 = imm10_5 & 0x1f; local result:4 = RJ32src s>> shift; RD = sext(result); } #la-base-32.txt sub.w mask=0x00110000 [@la32, @primary, @qemu] #0x00110000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :sub.w RD, RJ32src, RK32src is op15_31=0x22 & RD & RJ32src & RK32src { local sub1:4 = RJ32src; local sub2:4 = RK32src; local result = sub1 - sub2; RD = sext(result); } #la-base-32.txt syscall mask=0x002b0000 [@la32, @primary] #0x002b0000 0xffff8000 u0:15 ['imm0_15_s0'] :syscall imm0_15 is op15_31=0x56 & imm0_15 { local code:2 = imm0_15; syscall(code); } #la-base-32.txt xor mask=0x00158000 [@la32, @primary, @qemu] #0x00158000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :xor RD, RJsrc, RKsrc is op15_31=0x2b & RD & RJsrc & RKsrc { RD = RJsrc ^ RKsrc; } #la-base-32.txt xori mask=0x03c00000 [@la32, @primary, @qemu] #0x03c00000 0xffc00000 r0:5,r5:5,u10:12 ['reg0_5_s0', 'reg5_5_s0', 'imm10_12_s0'] :xori RD, RJsrc, imm10_12 is op22_31=0xf & RD & RJsrc & imm10_12 { RD = RJsrc ^ imm10_12; } ########################## # Load/Store Instructions ########################## #la-base-32.txt ldox4.w mask=0x24000000 [@orig_name=ldptr.w, @orig_fmt=DJSk14ps2] #0x24000000 0xff000000 r0:5,r5:5,so10:14<<2 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_14_s0'] :ldptr.w RD, ldstptr_addr is op24_31=0x24 & RD & RJsrc & ldstptr_addr { local data:4 = *[ram]:4 ldstptr_addr; RD = sext(data); } #la-base-32.txt stox4.w mask=0x25000000 [@orig_name=stptr.w, @orig_fmt=DJSk14ps2] #0x25000000 0xff000000 r0:5,r5:5,so10:14<<2 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_14_s0'] :stptr.w RD, ldstptr_addr is op24_31=0x25 & RD32 & RD & ldstptr_addr { *[ram]:4 ldstptr_addr = RD32; } #la-base-32.txt ld.b mask=0x28000000 [@la32, @primary, @qemu] #0x28000000 0xffc00000 r0:5,r5:5,so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :ld.b RD, ldst_addr is op22_31=0xa0 & RD & RJsrc & ldst_addr { RD = sext(*[ram]:1 ldst_addr); } #la-base-32.txt ld.h mask=0x28400000 [@la32, @primary, @qemu] #0x28400000 0xffc00000 r0:5,r5:5,so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :ld.h RD, ldst_addr is op22_31=0xa1 & RD & RJsrc & ldst_addr { RD = sext(*[ram]:2 ldst_addr); } #la-base-32.txt ld.w mask=0x28800000 [@la32, @primary, @qemu] #0x28800000 0xffc00000 r0:5,r5:5,so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :ld.w RD, ldst_addr is op22_31=0xa2 & RD & RJsrc & ldst_addr { local data:4 = *[ram]:4 ldst_addr; RD = sext(data); } #la-base-32.txt st.b mask=0x29000000 [@la32, @primary, @qemu] #0x29000000 0xffc00000 r0:5,r5:5,so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :st.b RDsrc, ldst_addr is op22_31=0xa4 & RDsrc & ldst_addr { *[ram]:1 ldst_addr = RDsrc:1; } #la-base-32.txt st.h mask=0x29400000 [@la32, @primary, @qemu] #0x29400000 0xffc00000 r0:5,r5:5,so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :st.h RDsrc, ldst_addr is op22_31=0xa5 & RDsrc & ldst_addr { *[ram]:2 ldst_addr = RDsrc:2; } #la-base-32.txt st.w mask=0x29800000 [@la32, @primary, @qemu] #0x29800000 0xffc00000 r0:5,r5:5,so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :st.w RDsrc, ldst_addr is op22_31=0xa6 & RDsrc & ldst_addr { *[ram]:4 ldst_addr = RDsrc:4; } #la-base-32.txt ld.bu mask=0x2a000000 [@la32, @primary, @qemu] #0x2a000000 0xffc00000 r0:5,r5:5,so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :ld.bu RD, ldst_addr is op22_31=0xa8 & RD & RJsrc & ldst_addr { RD = zext(*[ram]:1 ldst_addr); } #la-base-32.txt ld.hu mask=0x2a400000 [@la32, @primary, @qemu] #0x2a400000 0xffc00000 r0:5,r5:5,so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :ld.hu RD, ldst_addr is op22_31=0xa9 & RD & RJsrc & ldst_addr { RD = zext(*[ram]:2 ldst_addr); } #la-base-32.txt preld mask=0x2ac00000 [@orig_fmt=Ud5JSk12, @la32, @primary] #0x2ac00000 0xffc00000 u0:5,r5:5,so10:12 ['imm0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :preld imm0_5, ldst_addr is op22_31=0xab & imm0_5 & op0_4=0 & ldst_addr { preld_loadl1cache(0:1, ldst_addr); } :preld imm0_5, ldst_addr is op22_31=0xab & imm0_5 & op0_4=8 & ldst_addr { local hint:1 = imm0_5; preld_storel1cache(8:1, ldst_addr); } :preld imm0_5, ldst_addr is op22_31=0xab & imm0_5 & ldst_addr { preld_nop(); } #la-base-32.txt preldx mask=0x382c0000 [@orig_fmt=Ud5JK] #0x382c0000 0xffff8000 u0:5,r5:5,r10:5 ['imm0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :preldx imm0_5, RJsrc, RKsrc is op15_31=0x7058 & RJsrc & RKsrc & imm0_5 & op0_4=0 { preldx_loadl1cache(0:1, RJsrc, RKsrc); } :preldx imm0_5, RJsrc, RKsrc is op15_31=0x7058 & RJsrc & RKsrc & imm0_5 & op0_4=8 { preldx_storel1cache(8:1, RJsrc, RKsrc); } :preldx imm0_5, RJsrc, RKsrc is op15_31=0x7058 & RJsrc & RKsrc & imm0_5 { preldx_nop(imm0_5:1, RJsrc, RJsrc); } #la-base-32.txt ldx.b mask=0x38000000 [@qemu] #0x38000000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldx.b RD, ldstx_addr is op15_31=0x7000 & RD & ldstx_addr { RD = sext(*[ram]:1 ldstx_addr); } #la-base-32.txt ldx.h mask=0x38040000 [@qemu] #0x38040000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldx.h RD, ldstx_addr is op15_31=0x7008 & RD & ldstx_addr { RD = sext(*[ram]:2 ldstx_addr); } #la-base-32.txt ldx.w mask=0x38080000 [@qemu] #0x38080000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldx.w RD, ldstx_addr is op15_31=0x7010 & RD & ldstx_addr { local data:4 = *[ram]:4 ldstx_addr; RD = sext(data); } #la-base-32.txt stx.b mask=0x38100000 [@qemu] #0x38100000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :stx.b RDsrc, ldstx_addr is op15_31=0x7020 & RDsrc & ldstx_addr { *[ram]:1 ldstx_addr = RDsrc:1; } #la-base-32.txt stx.h mask=0x38140000 [@qemu] #0x38140000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :stx.h RDsrc, ldstx_addr is op15_31=0x7028 & RDsrc & ldstx_addr { *[ram]:2 ldstx_addr = RDsrc:2; } #la-base-32.txt stx.w mask=0x38180000 [@qemu] #0x38180000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :stx.w RDsrc, ldstx_addr is op15_31=0x7030 & RDsrc & RD32src & ldstx_addr { *[ram]:4 ldstx_addr = RD32src; } #la-base-32.txt ldx.bu mask=0x38200000 [@qemu] #0x38200000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldx.bu RD, ldstx_addr is op15_31=0x7040 & RD & ldstx_addr { RD = zext(*[ram]:1 ldstx_addr); } #la-base-32.txt ldx.hu mask=0x38240000 [@qemu] #0x38240000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldx.hu RD, ldstx_addr is op15_31=0x7048 & RD & ldstx_addr { RD = zext(*[ram]:2 ldstx_addr); } #la-base-32.txt dbar mask=0x38720000 [@la32, @primary, @qemu] #0x38720000 0xffff8000 u0:15 ['imm0_15_s0'] :dbar imm0_15 is op15_31=0x70e4 & imm0_15 { local code:2 = imm0_15; dbar(code); } #la-base-32.txt ibar mask=0x38728000 [@la32, @primary] #0x38728000 0xffff8000 u0:15 ['imm0_15_s0'] :ibar imm0_15 is op15_31=0x70e5 & imm0_15 { local code:2 = imm0_15; ibar(code); } ###################### # Branch Instructions ###################### #la-base-32.txt b mask=0x50000000 [@orig_fmt=Sd10k16ps2, @la32, @primary, @qemu] #0x50000000 0xfc000000 sb0:10|10:16<<2 ['sbranch0_0_s2'] :b Rel26 is op26_31=0x14 & Rel26 { goto Rel26; } #la-base-32.txt bl mask=0x54000000 [@orig_fmt=Sd10k16ps2, @la32, @primary, @qemu] #0x54000000 0xfc000000 sb0:10|10:16<<2 ['sbranch0_0_s2'] :bl Rel26 is op26_31=0x15 & Rel26 { ra = inst_next; call Rel26; } :bl Rel26 is op26_31=0x15 & Rel26 & imm10_16=1 & simm0_10=0 { ra = inst_next; goto Rel26; } #la-base-32.txt beq mask=0x58000000 [@orig_fmt=JDSk16ps2, @la32, @primary, @qemu] #0x58000000 0xfc000000 r5:5,r0:5,sb10:16<<2 ['reg5_5_s0', 'reg0_5_s0', 'sbranch10_16_s0'] :beq RDsrc, RJsrc, Rel16 is op26_31=0x16 & RDsrc & RJsrc & Rel16 { if(RJsrc == RDsrc) goto Rel16; } #la-base-32.txt beqz mask=0x40000000 [@orig_fmt=JSd5k16ps2, @la32] #0x40000000 0xfc000000 r5:5,sb0:5|10:16<<2 ['reg5_5_s0', 'sbranch0_0_s2'] :beqz RJsrc, Rel21 is op26_31=0x10 & RJsrc & Rel21 { if (RJsrc == 0) goto Rel21; } #la-base-32.txt ble mask=0x64000000 [@orig_name=bge, @orig_fmt=JDSk16ps2, @la32, @primary, @qemu] #0x64000000 0xfc000000 r5:5,r0:5,sb10:16<<2 ['reg5_5_s0', 'reg0_5_s0', 'sbranch10_16_s0'] :bge RDsrc, RJsrc, Rel16 is op26_31=0x19 & RDsrc & RJsrc & Rel16 { if(RJsrc s>= RDsrc) goto Rel16; } :bgez RJsrc, Rel16 is op26_31=0x19 & rD=0 & RJsrc & Rel16 { if(RJsrc s>= 0) goto Rel16; } :blez RDsrc, Rel16 is op26_31=0x19 & RDsrc & rJ=0 & Rel16 { if(0 s>= RDsrc) goto Rel16; } #la-base-32.txt bleu mask=0x6c000000 [@orig_name=bgeu, @orig_fmt=JDSk16ps2, @la32, @primary, @qemu] #0x6c000000 0xfc000000 r5:5,r0:5,sb10:16<<2 ['reg5_5_s0', 'reg0_5_s0', 'sbranch10_16_s0'] :bgeu RDsrc, RJsrc, Rel16 is op26_31=0x1b & RDsrc & RJsrc & Rel16 { if(RJsrc >= RDsrc) goto Rel16; } #la-base-32.txt bgt mask=0x60000000 [@orig_name=blt, @orig_fmt=JDSk16ps2, @la32, @primary, @qemu] #0x60000000 0xfc000000 r5:5,r0:5,sb10:16<<2 ['reg5_5_s0', 'reg0_5_s0', 'sbranch10_16_s0'] :blt RDsrc, RJsrc, Rel16 is op26_31=0x18 & RDsrc & RJsrc & Rel16 { if(RJsrc s< RDsrc) goto Rel16; } :bltz RJsrc, Rel16 is op26_31=0x18 & rD=0 & RJsrc & Rel16 { if(RJsrc s< 0) goto Rel16; } :bgtz RDsrc, Rel16 is op26_31=0x18 & RDsrc & rJ=0 & Rel16 { if(0 s< RDsrc) goto Rel16; } #la-base-32.txt bgtu mask=0x68000000 [@orig_name=bltu, @orig_fmt=JDSk16ps2, @la32, @primary, @qemu] #0x68000000 0xfc000000 r5:5,r0:5,sb10:16<<2 ['reg5_5_s0', 'reg0_5_s0', 'sbranch10_16_s0'] :bltu RDsrc, RJsrc, Rel16 is op26_31=0x1a & RDsrc & RJsrc & Rel16 { if(RJsrc < RDsrc) goto Rel16; } #la-base-32.txt bne mask=0x5c000000 [@orig_fmt=JDSk16ps2, @la32, @primary, @qemu] #0x5c000000 0xfc000000 r5:5,r0:5,sb10:16<<2 ['reg5_5_s0', 'reg0_5_s0', 'sbranch10_16_s0'] :bne RDsrc, RJsrc, Rel16 is op26_31=0x17 & RDsrc & RJsrc & Rel16 { if(RJsrc != RDsrc) goto Rel16; } #la-base-32.txt bnez mask=0x44000000 [@orig_fmt=JSd5k16ps2, @la32] #0x44000000 0xfc000000 r5:5,sb0:5|10:16<<2 ['reg5_5_s0', 'sbranch0_0_s2'] :bnez RJsrc, Rel21 is op26_31=0x11 & RJsrc & Rel21 { if (RJsrc != 0) goto Rel21; } #la-base-32.txt jirl mask=0x4c000000 [@orig_fmt=DJSk16ps2, @la32, @primary, @qemu] #0x4c000000 0xfc000000 r0:5,r5:5,so10:16<<2 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_16_s0'] :jirl RD, RelJ16 is op26_31=0x13 & RD & RJsrc & RelJ16 { RD = inst_next; call [RelJ16]; } # alias of jirl zero, ra, 0 :ret is op26_31=0x13 & rD=0x0 & rJ=0x1 & RJsrc & simm10_16=0 { local retAddr = RJsrc; return [retAddr]; } # alias of jirl zero, rj, 0 :jr RJsrc is op26_31=0x13 & rD=0x0 & RJsrc & simm10_16=0 { pc = RJsrc; goto [pc]; } ###################### # Atomic Instructions ###################### #la-atomics-32.txt ll.w mask=0x20000000 [@orig_fmt=DJSk14ps2, @la32, @primary] #0x20000000 0xff000000 r0:5,r5:5,so10:14<<2 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_14_s0'] :ll.w RD, ldstptr_addr is op24_31=0x20 & RD & ldstptr_addr { local data:4 = *[ram]:4 ldstptr_addr; RD = sext(data); } #la-atomics-32.txt sc.w mask=0x21000000 [@orig_fmt=DJSk14ps2, @la32, @primary] #0x21000000 0xff000000 r0:5,r5:5,so10:14<<2 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_14_s0'] :sc.w RD, ldstptr_addr is op24_31=0x21 & RD & ldstptr_addr { *[ram]:4 ldstptr_addr = RD:4; } #la-atomics-32.txt amswap.w mask=0x38600000 [@orig_fmt=DKJ] #0x38600000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amswap.w RD, RJsrc, RK32src is op15_31=0x70c0 & RD & RJsrc & RK32src { local val:4 = *[ram]:4 RJsrc; RD = sext(val); *[ram]:4 RJsrc = RK32src; } #la-atomics-32.txt amadd.w mask=0x38610000 [@orig_fmt=DKJ] #0x38610000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amadd.w RD, RJsrc, RK32src is op15_31=0x70c2 & RD & RJsrc & RK32src { local val:4 = *[ram]:4 RJsrc; RD = sext(val); *[ram]:4 RJsrc = (RK32src + val); } #la-atomics-32.txt amand.w mask=0x38620000 [@orig_fmt=DKJ] #0x38620000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amand.w RD, RJsrc, RK32src is op15_31=0x70c4 & RD & RJsrc & RK32src { local val:4 = *[ram]:4 RJsrc; RD = sext(val); *[ram]:4 RJsrc = (RK32src & val); } #la-atomics-32.txt amor.w mask=0x38630000 [@orig_fmt=DKJ] #0x38630000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amor.w RD, RJsrc, RK32src is op15_31=0x70c6 & RD & RJsrc & RK32src { local val:4 = *[ram]:4 RJsrc; RD = sext(val); *[ram]:4 RJsrc = (RK32src | val); } #la-atomics-32.txt amxor.w mask=0x38640000 [@orig_fmt=DKJ] #0x38640000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amxor.w RD, RJsrc, RK32src is op15_31=0x70c8 & RD & RJsrc & RK32src { local val:4 = *[ram]:4 RJsrc; RD = sext(val); *[ram]:4 RJsrc = (RK32src ^ val); } #la-atomics-32.txt ammax.w mask=0x38650000 [@orig_fmt=DKJ] #0x38650000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammax.w RD, RJsrc, RK32src is op15_31=0x70ca & RD & RJsrc & RK32src { local val1:4 = *[ram]:4 RJsrc; local val2:4 = RK32src; local test = (val1 s>= val2); RD = sext(val1); *[ram]:4 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-32.txt ammin.w mask=0x38660000 [@orig_fmt=DKJ] #0x38660000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammin.w RD, RJsrc, RK32src is op15_31=0x70cc & RD & RJsrc & RK32src { local val1:4 = *[ram]:4 RJsrc; local val2:4 = RK32src; local test = (val1 s<= val2); RD = sext(val1); *[ram]:4 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-32.txt amswap_db.w mask=0x38690000 [@orig_fmt=DKJ] #0x38690000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amswap_db.w RD, RJsrc, RK32src is op15_31=0x70d2 & RD & RJsrc & RK32src { dbar(0:1); local val:4 = *[ram]:4 RJsrc; RD = zext(val); *[ram]:4 RJsrc = RK32src; } #la-atomics-32.txt amadd_db.w mask=0x386a0000 [@orig_fmt=DKJ] #0x386a0000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amadd_db.w RD, RJsrc, RK32src is op15_31=0x70d4 & RD & RJsrc & RK32src { dbar(0:1); local val:4 = *[ram]:4 RJsrc; RD = sext(val); *[ram]:4 RJsrc = (RK32src + val); } #la-atomics-32.txt amand_db.w mask=0x386b0000 [@orig_fmt=DKJ] #0x386b0000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amand_db.w RD, RJsrc, RK32src is op15_31=0x70d6 & RD & RJsrc & RK32src { dbar(0:1); local val:4 = *[ram]:4 RJsrc; RD = sext(val); *[ram]:4 RJsrc = (RK32src & val); } #la-atomics-32.txt amor_db.w mask=0x386c0000 [@orig_fmt=DKJ] #0x386c0000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amor_db.w RD, RJsrc, RK32src is op15_31=0x70d8 & RD & RJsrc & RK32src { dbar(0:1); local val:4 = *[ram]:4 RJsrc; RD = sext(val); *[ram]:4 RJsrc = (RK32src | val); } #la-atomics-32.txt amxor_db.w mask=0x386d0000 [@orig_fmt=DKJ] #0x386d0000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amxor_db.w RD, RJsrc, RK32src is op15_31=0x70da & RD & RJsrc & RK32src { dbar(0:1); local val:4 = *[ram]:4 RJsrc; RD = sext(val); *[ram]:4 RJsrc = (RK32src ^ val); } #la-atomics-32.txt ammax_db.w mask=0x386e0000 [@orig_fmt=DKJ] #0x386e0000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammax_db.w RD, RJsrc, RK32src is op15_31=0x70dc & RD & RJsrc & RK32src { dbar(0:1); local val1:4 = *[ram]:4 RJsrc; local val2:4 = RK32src; local test = (val1 s>= val2); RD = sext(val1); *[ram]:4 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-32.txt ammin_db.w mask=0x386f0000 [@orig_fmt=DKJ] #0x386f0000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammin_db.w RD, RJsrc, RK32src is op15_31=0x70de & RD & RJsrc & RK32src { dbar(0:1); local val1:4 = *[ram]:4 RJsrc; local val2:4 = RK32src; local test = (val1 s<= val2); RD = sext(val1); *[ram]:4 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } ################################ # Bit-manipulation Instructions ################################ #la-bitops-32.txt clo.w mask=0x00001000 [@la32] #0x00001000 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :clo.w RD, RJ32src is op10_31=0x4 & RD & RJ32src { RD = lzcount( ~RJ32src ); } #la-bitops-32.txt clz.w mask=0x00001400 [@la32, @qemu] #0x00001400 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :clz.w RD, RJ32src is op10_31=0x5 & RD & RJ32src { RD = lzcount( RJ32src ); } #define pcodeop tzcount; #la-bitops-32.txt cto.w mask=0x00001800 [@la32] #0x00001800 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :cto.w RD, RJ32src is op10_31=0x6 & RD & RJ32src { local tmp:4 = 0; tzcount32(~RJ32src, tmp); RD = zext(tmp); } #la-bitops-32.txt ctz.w mask=0x00001c00 [@la32, @qemu] #0x00001c00 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :ctz.w RD, RJ32src is op10_31=0x7 & RD & RJ32src { local tmp:4 = 0; tzcount32(RJ32src, tmp); RD = zext(tmp); } #la-bitops-32.txt revb.2h mask=0x00003000 [@la32, @qemu] #0x00003000 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :revb.2h RD, RJ32src is op10_31=0xc & RD & RJ32src { tmp0:4 = (zext(RJ32src[0,8]) << 8) + zext(RJ32src[8,8]); tmp1:4 = (zext(RJ32src[16,8]) << 8) + zext(RJ32src[24,8]); RD = sext((tmp1 << 16) + tmp0); } #la-bitops-32.txt revbit.4b mask=0x00004800 [@orig_name=bitrev.4b, @la32] #0x00004800 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :bitrev.4b RD, RJ32src is op10_31=0x12 & RD & RJ32src { local v:4 = 0; byterev32(RJ32src, v); RD = sext(v); } #la-bitops-32.txt revbit.w mask=0x00005000 [@orig_name=bitrev.w, @la32] #0x00005000 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :bitrev.w RD, RJ32src is op10_31=0x14 & RD & RJ32src { local v:4 = 0; bitrev32(RJ32src, v); RD = sext(v); } #la-bitops-32.txt catpick.w mask=0x00080000 [@orig_name=bytepick.w, @la32] #0x00080000 0xfffe0000 r0:5,r5:5,r10:5,u15:2 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0', 'imm15_2_s0'] :bytepick.w RD, RJ32src, RK32src, imm15_2 is op17_31=0x4 & RD & RJ32src & RK32src & imm15_2 { local bitstop:1 = 8 * (4 - imm15_2); local mask:4 = (1 << bitstop) - 1; local tmp_hi:4 = RK32src & ~mask; local tmp_lo:4 = (RJ32src & (mask << (32-bitstop)) >> (32-bitstop)); RD = sext(tmp_hi + tmp_lo); } define pcodeop crc.w.b.w; #la-bitops-32.txt crc.w.b.w mask=0x00240000 #0x00240000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :crc.w.b.w RD, RJ32src, RK32src is op15_31=0x48 & RD & RJ32src & RK32src { local val:1 = RJ32src(0); RD = crc_ieee802.3(RK32src, val, 16:1, 0xedb88320:4); } define pcodeop crc.w.h.w; #la-bitops-32.txt crc.w.h.w mask=0x00248000 #0x00248000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :crc.w.h.w RD, RJ32src, RK32src is op15_31=0x49 & RD & RJ32src & RK32src { local val:2 = RJ32src(0); RD = crc_ieee802.3(RK32src, val, 16:1, 0xedb88320:4); } define pcodeop crc.w.w.w; #la-bitops-32.txt crc.w.w.w mask=0x00250000 #0x00250000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :crc.w.w.w RD, RJ32src, RK32src is op15_31=0x4a & RD & RJ32src & RK32src { RD = crc_ieee802.3(RK32src, RJ32src, 32:1, 0xedb88320:4); } define pcodeop crcc.w.b.w; #la-bitops-32.txt crcc.w.b.w mask=0x00260000 #0x00260000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :crcc.w.b.w RD, RJ32src, RK32src is op15_31=0x4c & RD & RJ32src & RK32src { local val:1 = RJ32src(0); RD = crc_castagnoli(RK32src, val, 8:1, 0x82f63b78:4); } define pcodeop crcc.w.h.w; #la-bitops-32.txt crcc.w.h.w mask=0x00268000 #0x00268000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :crcc.w.h.w RD, RJ32src, RK32src is op15_31=0x4d & RD & RJ32src & RK32src { local val:2 = RJ32src(0); RD = crc_castagnoli(RK32src, val, 16:1, 0x82f63b78:4); } define pcodeop crcc.w.w.w; #la-bitops-32.txt crcc.w.w.w mask=0x00270000 #0x00270000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :crcc.w.w.w RD, RJ32src, RK32src is op15_31=0x4e & RD & RJ32src & RK32src { RD = crc_castagnoli(RK32src, RJ32src, 32:1, 0x82f63b78:4); } define pcodeop bstrins.w; #la-bitops-32.txt bstrins.w mask=0x00600000 [@orig_fmt=DJUm5Uk5, @la32, @qemu] #0x00600000 0xffe08000 r0:5,r5:5,u16:5,u10:5 ['reg0_5_s0', 'reg5_5_s0', 'imm16_5_s0', 'imm10_5_s0'] :bstrins.w RD, RJ32src, imm16_5, imm10_5 is op21_31=0x3 & op15_15=0x0 & RD & RD32 & RJ32src & imm10_5 & imm16_5 { local msb:1 = imm16_5; local lsb:1 = imm10_5; local len:1 = msb + 1 - lsb; local mask:4 = (1 << len) - 1; local repl:4 = (RJ32src & (mask << lsb)) >> lsb; RD = sext((RD32 & (~mask)) | repl); } #la-bitops-32.txt bstrpick.w mask=0x00608000 [@orig_fmt=DJUm5Uk5, @la32, @qemu] #0x00608000 0xffe08000 r0:5,r5:5,u16:5,u10:5 ['reg0_5_s0', 'reg5_5_s0', 'imm16_5_s0', 'imm10_5_s0'] :bstrpick.w RD, RJ32src, imm16_5, imm10_5, is op21_31=0x3 & op15_15=0x1 & RD & RJ32src & imm10_5 & imm16_5 { local msb:1 = imm16_5; local lsb:1 = imm10_5; local len:1 = msb + 1 - lsb; local mask:4 = (1 << len) - 1; local repl:4 = (RJ32src & (mask << lsb)) >> lsb; RD = sext(repl); } ############################### # Bounds-checking Instructions ############################### #la-bound.txt asrtle mask=0x00010000 [@orig_name=asrtle.d] #0x00010000 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :asrtle.d RJsrc, RKsrc is op15_31=0x2 & op0_4=0x0 & RJsrc & RKsrc { if (RJsrc <= RKsrc) goto inst_next; addr_bound_exception(RJsrc, RKsrc); } #la-bound.txt asrtgt mask=0x00018000 [@orig_name=asrtgt.d] #0x00018000 0xffff801f r5:5, r10:5 ['reg5_5_s0', 'reg10_5_s0'] :asrtgt.d RJsrc, RKsrc is op15_31=0x3 & op0_4=0x0 & RJsrc & RKsrc { if (RJsrc > RKsrc) goto inst_next; addr_bound_exception(RJsrc, RKsrc); } #la-bound.txt ldgt.b mask=0x38780000 #0x38780000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldgt.b RD, RJsrc, RKsrc is op15_31=0x70f0 & RD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr > RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; RD = sext(*[ram]:1 vaddr); } #la-bound.txt ldgt.h mask=0x38788000 #0x38788000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldgt.h RD, RJsrc, RKsrc is op15_31=0x70f1 & RD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr > RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; RD = sext(*[ram]:2 vaddr); } #la-bound.txt ldgt.w mask=0x38790000 #0x38790000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldgt.w RD, RJsrc, RKsrc is op15_31=0x70f2 & RD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr > RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; RD = sext(*[ram]:4 vaddr); } #la-bound.txt ldle.b mask=0x387a0000 #0x387a0000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldle.b RD, RJsrc, RKsrc is op15_31=0x70f4 & RD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr <= RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; RD = sext(*[ram]:1 vaddr); } #la-bound.txt ldle.h mask=0x387a8000 #0x387a8000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldle.h RD, RJsrc, RKsrc is op15_31=0x70f5 & RD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr <= RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; RD = sext(*[ram]:2 vaddr); } #la-bound.txt ldle.w mask=0x387b0000 #0x387b0000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldle.w RD, RJsrc, RKsrc is op15_31=0x70f6 & RD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr <= RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; RD = sext(*[ram]:4 vaddr); } #la-bound.txt stgt.b mask=0x387c0000 #0x387c0000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :stgt.b RDsrc, RJsrc, RKsrc is op15_31=0x70f8 & RDsrc & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr > RKsrc) goto ; bound_check_exception(vaddr, RKsrc); goto inst_next; *[ram]:1 RJsrc = RDsrc:1; } #la-bound.txt stgt.h mask=0x387c8000 #0x387c8000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :stgt.h RDsrc, RJsrc, RKsrc is op15_31=0x70f9 & RDsrc & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr > RKsrc) goto ; bound_check_exception(vaddr, RKsrc); goto inst_next; *[ram]:2 vaddr = RDsrc:2; } #la-bound.txt stgt.w mask=0x387d0000 #0x387d0000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :stgt.w RDsrc, RJsrc, RKsrc is op15_31=0x70fa & RDsrc & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr > RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; *[ram]:4 vaddr = RDsrc:4; } #la-bound.txt stle.b mask=0x387e0000 #0x387e0000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :stle.b RDsrc, RJsrc, RKsrc is op15_31=0x70fc & RDsrc & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr <= RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; *[ram]:1 vaddr = RDsrc:1; } #la-bound.txt stle.h mask=0x387e8000 #0x387e8000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :stle.h RDsrc, RJsrc, RKsrc is op15_31=0x70fd & RDsrc & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr <= RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; *[ram]:2 vaddr = RDsrc:2; } #la-bound.txt stle.w mask=0x387f0000 #0x387f0000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :stle.w RDsrc, RJsrc, RKsrc is op15_31=0x70fe & RDsrc & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr <= RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; *[ram]:4 vaddr = RDsrc:4; } ######################### # PRIVILEGED INSTRUCTIONS ######################### #la-privileged-32.txt csrxchg mask=0x04000000 [@primary] #0x04000000 0xff000000 r0:5,r5:5,u10:14 ['reg0_5_s0', 'reg5_5_s0', 'imm10_14_s0'] csr: csr is imm10_14 [csr = $(CSR_OFFSET) + imm10_14 * $(REGSIZE);] { export *[register]:$(REGSIZE) csr; } :csrxchg RD, RJsrc, csr is op24_31=0x4 & RD & RJsrc & csr { local csrval:$(REGSIZE) = csr; local mask = RJsrc; csr = (RD & mask) | (csrval & ~mask); RD = csrval; } :csrrd RD, csr is op24_31=0x4 & RD & op5_9=0 & csr { RD = csr; } :csrrw RD, csr is op24_31=0x4 & RD & op5_9=1 & csr { local csrval:$(REGSIZE) = csr; csr = RD; RD = csrval; } define pcodeop cacop; #la-privileged-32.txt cacop mask=0x06000000 [@orig_fmt=Ud5JSk12, @primary] #0x06000000 0xffc00000 u0:5,r5:5,s10:12 ['imm0_5_s0', 'reg5_5_s0', 'simm10_12_s0'] cache_obj: op0_2 is op0_2 { local tmp:1 = op0_2; export *[const]:1 tmp; } op_type: "initialization" is op3_4=0 { export 0:1; } op_type: "consistency" is op3_4=1 { export 1:1; } op_type: "coherency" is op3_4=2 { export 2:1; } op_type: "Custom" is op3_4=3 { export 3:1; } :cacop op_type^"("^cache_obj^")", ldst_addr is op22_31=0x18 & cache_obj & op_type & ldst_addr { cacop(op_type, cache_obj, ldst_addr); } define pcodeop lddir; level: imm10_8 is imm10_8 { export *[const]:1 imm10_8; } #la-privileged-32.txt lddir mask=0x06400000 #0x06400000 0xfffc0000 r0:5,r5:5,u10:8 ['reg0_5_s0', 'reg5_5_s0', 'imm10_8_s0'] :lddir RD, RJsrc, level is op18_31=0x190 & RD & RJsrc & level { RD = lddir(RJsrc, level); } define pcodeop ldpte; seq: imm10_8 is imm10_8 { export *[const]:1 imm10_8; } #la-privileged-32.txt ldpte mask=0x06440000 #0x06440000 0xfffc001f r5:5,u10:8 ['reg5_5_s0', 'imm10_8_s0'] :ldpte RJsrc, seq is op18_31=0x191 & op0_4=0x0 & RJsrc & seq { ldpte(RJsrc, seq); } #la-privileged-32.txt iocsrrd.b mask=0x06480000 #0x06480000 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :iocsrrd.b RD, RJsrc is op10_31=0x19200 & RD & RJsrc { local val:1 = *[iocsr]:1 RJsrc; RD = sext(val); } #la-privileged-32.txt iocsrrd.h mask=0x06480400 #0x06480400 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :iocsrrd.h RD, RJsrc is op10_31=0x19201 & RD & RJsrc { local val:2 = *[iocsr]:2 RJsrc; RD = sext(val); } #la-privileged-32.txt iocsrrd.w mask=0x06480800 #0x06480800 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :iocsrrd.w RD, RJsrc is op10_31=0x19202 & RD & RJsrc { local val:4 = *[iocsr]:4 RJsrc; RD = sext(val); } #la-privileged-32.txt iocsrwr.b mask=0x06481000 #0x06481000 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :iocsrwr.b RDsrc, RJsrc is op10_31=0x19204 & RDsrc & RJsrc { local val:1 = RDsrc:1; *[iocsr]:1 RJsrc = val; } #la-privileged-32.txt iocsrwr.h mask=0x06481400 #0x06481400 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :iocsrwr.h RDsrc, RJsrc is op10_31=0x19205 & RDsrc & RJsrc { local val:2= RDsrc:2; *[iocsr]:2 RJsrc = val; } #la-privileged-32.txt iocsrwr.w mask=0x06481800 #0x06481800 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :iocsrwr.w RDsrc, RJsrc is op10_31=0x19206 & RDsrc & RJsrc { local val:4= RDsrc:4; *[iocsr]:4 RJsrc = val; } define pcodeop tlbclr; #la-privileged-32.txt tlbclr mask=0x06482000 #0x06482000 0xffffffff :tlbclr is instword=0x06482000 { tlbclr(); } define pcodeop tlbflush; #la-privileged-32.txt tlbflush mask=0x06482400 #0x06482400 0xffffffff :tlbflush is instword=0x06482400 { tlbflush(); } define pcodeop tlbsrch; #la-privileged-32.txt tlbsrch mask=0x06482800 [@primary] #0x06482800 0xffffffff :tlbsrch is instword=0x06482800 { tlbsrch(); } define pcodeop tlbrd; #la-privileged-32.txt tlbrd mask=0x06482c00 [@primary] #0x06482c00 0xffffffff :tlbrd is instword=0x06482c00 { tlbrd(); } define pcodeop tlbwr; #la-privileged-32.txt tlbwr mask=0x06483000 [@primary] #0x06483000 0xffffffff :tlbwr is instword=0x06483000 { tlbwr(); } define pcodeop tlbfill; #la-privileged-32.txt tlbfill mask=0x06483400 [@primary] #0x06483400 0xffffffff :tlbfill is instword=0x06483400 { tlbfill(); } define pcodeop ertn; #la-privileged-32.txt eret mask=0x06483800 [@orig_name=ertn, @primary] #0x06483800 0xffffffff :ertn is instword=0x06483800 { local ret:$(REGSIZE) = ertn(); return [ret]; } define pcodeop idle; #la-privileged-32.txt idle mask=0x06488000 [@primary] #0x06488000 0xffff8000 u0:15 ['imm0_15_s0'] :idle imm0_15 is op15_31=0xc91 & imm0_15 { idle(imm0_15:2); } define pcodeop invtlb; #la-privileged-32.txt tlbinv mask=0x06498000 [@orig_name=invtlb, @orig_fmt=Ud5JK, @primary] #0x06498000 0xffff8000 u0:5,r5:5,r10:5 ['imm0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :invtlb RJsrc, RKsrc, imm0_5 is op15_31=0xc93 & RJsrc & RKsrc & imm0_5 { invtlb(RJsrc, RKsrc, imm0_5:1); } ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch64.pspec ================================================ ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch64_f32.slaspec ================================================ @define LA64 "" @define REGSIZE 8 @define FREGSIZE 4 @define ADDRSIZE 8 @include "loongarch_main.sinc" @include "loongarch32_instructions.sinc" @include "loongarch64_instructions.sinc" @include "loongarch_float.sinc" @include "loongarch_double.sinc" @include "lasx.sinc" @include "lbt.sinc" @include "lsx.sinc" @include "lvz.sinc" ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch64_f64.slaspec ================================================ @define LA64 "" @define REGSIZE 8 @define FREGSIZE 8 @define ADDRSIZE 8 @include "loongarch_main.sinc" @include "loongarch32_instructions.sinc" @include "loongarch64_instructions.sinc" @include "loongarch_float.sinc" @include "loongarch_double.sinc" @include "lasx.sinc" @include "lbt.sinc" @include "lsx.sinc" @include "lvz.sinc" ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch64_instructions.sinc ================================================ ################### # Base Instructions ################### #la-base-64.txt add.d mask=0x00108000 [@qemu] #0x00108000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :add.d RD, RJsrc, RKsrc is op15_31=0x21 & RD & RJsrc & RKsrc { RD = RJsrc + RKsrc; } #la-base-64.txt addi.d mask=0x02c00000 [@qemu] #0x02c00000 0xffc00000 r0:5,r5:5,s10:12 ['reg0_5_s0', 'reg5_5_s0', 'simm10_12_s0'] :addi.d RD, RJsrc,simm10_12 is op22_31=0xb & RD & RJsrc & simm10_12 { RD = RJsrc + simm10_12; } #la-base-64.txt addu16i.d mask=0x10000000 [@qemu] #0x10000000 0xfc000000 r0:5,r5:5,s10:16 ['reg0_5_s0', 'reg5_5_s0', 'simm10_16_s0'] :addu16i.d RD, RJsrc, addu16_imm is op26_31=0x4 & RD & RJsrc & addu16_imm { RD = RJsrc + addu16_imm; } #la-bitops-64.txt sladd.d mask=0x002c0000 [@orig_name=alsl.d, @orig_fmt=DJKUa2pp1] #0x002c0000 0xfffe0000 r0:5,r5:5,r10:5,u15:2+1 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0', 'imm15_2+1_s0'] :alsl.d RD, RJsrc, RKsrc, alsl_shift is op17_31=0x16 & RD & RJsrc & RKsrc & alsl_shift { RD = (RJsrc << alsl_shift) + RKsrc; } #la-bitops-64.txt sladd.wu mask=0x00060000 [@orig_name=alsl.wu, @orig_fmt=DJKUa2pp1] #0x00060000 0xfffe0000 r0:5,r5:5,r10:5,u15:2+1 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0', 'imm15_2+1_s0'] :alsl.wu RD, RJ32src, RK32src, alsl_shift is op17_31=0x3 & RD & RJ32src & RK32src & alsl_shift { local result:4 = (RJ32src << alsl_shift) + RK32src; RD = zext(result); } #la-mul-64.txt div.d mask=0x00220000 [@qemu] #0x00220000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :div.d RD, RJsrc, RKsrc is op15_31=0x44 & RD & RJsrc & RKsrc { RD = RJsrc s/ RKsrc; } #la-mul-64.txt div.du mask=0x00230000 [@qemu] #0x00230000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :div.du RD, RJsrc, RKsrc is op15_31=0x46 & RD & RJsrc & RKsrc { RD = RJsrc / RKsrc; } #la-mul-64.txt mod.d mask=0x00228000 [@qemu] #0x00228000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :mod.d RD, RJsrc, RKsrc is op15_31=0x45 & RD & RJsrc & RKsrc { RD = RJsrc s% RKsrc; } #la-mul-64.txt mod.du mask=0x00238000 [@qemu] #0x00238000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :mod.du RD, RJsrc, RKsrc is op15_31=0x47 & RD & RJsrc & RKsrc { RD = RJsrc % RKsrc; } #la-mul-64.txt mul.d mask=0x001d8000 [@qemu] #0x001d8000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :mul.d RD, RJsrc, RKsrc is op15_31=0x3b & RD & RJsrc & RKsrc { tmp1:16 = sext( RJsrc ); tmp2:16 = sext( RKsrc ); prod:16 = tmp1 * tmp2; RD = prod:8; } #la-mul-64.txt mulh.d mask=0x001e0000 [@qemu] #0x001e0000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :mulh.d RD, RJsrc, RKsrc is op15_31=0x3c & RD & RJsrc & RKsrc { tmp1:16 = sext( RJsrc ); tmp2:16 = sext( RKsrc ); prod:16 = tmp1 * tmp2; prod = prod >> 64; RD = prod:8; } #la-mul-64.txt mulh.du mask=0x001e8000 [@qemu] #0x001e8000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :mulh.du RD, RJsrc, RKsrc is op15_31=0x3d & RD & RJsrc & RKsrc { tmp1:16 = zext( RJsrc ); tmp2:16 = zext( RKsrc ); prod:16 = tmp1 * tmp2; prod = prod >> 64; RD = prod:8; } #la-mul-64.txt mulw.d.w mask=0x001f0000 #0x001f0000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :mulw.d.w RD, RJ32src, RK32src is op15_31=0x3e & RD & RJ32src & RK32src { tmp1:8 = sext( RJ32src ); tmp2:8 = sext( RK32src ); prod:8 = tmp1 * tmp2; RD = prod; } #la-mul-64.txt mulw.d.wu mask=0x001f8000 #0x001f8000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :mulw.d.wu RD, RJ32src, RK32src is op15_31=0x3f & RD & RJ32src & RK32src { tmp1:8 = zext( RJ32src ); tmp2:8 = zext( RK32src ); prod:8 = tmp1 * tmp2; RD = prod; } #la-base-64.txt rdtime.d mask=0x00006800 #0x00006800 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :rdtime.d RD, RJ is op10_31=0x1a & RD & RJ { local tmp:1 = 2; RD = rdtime.counter(tmp); RJ = rdtime.counterid(tmp); } #la-base-64.txt rotr.d mask=0x001b8000 [@qemu] #0x001b8000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :rotr.d RD, RJsrc, RKsrc is op15_31=0x37 & RD & RJsrc & RKsrc { local shift:1 = RKsrc(0) & 0x3f; local tmp1:8 = RJsrc s>> shift; local tmp2:8 = RJsrc << (64 - shift); RD = tmp1 + tmp2; } #la-base-64.txt rotri.d mask=0x004d0000 [@qemu] #0x004d0000 0xffff0000 r0:5,r5:5,u10:6 ['reg0_5_s0', 'reg5_5_s0', 'imm10_6_s0'] :rotri.d RD, RJsrc, imm10_6 is op16_31=0x4d & RD & RJsrc & imm10_6 { local shift:1 = imm10_6 & 0x3f; local tmp1:8 = RJsrc s>> shift; local tmp2:8 = RJsrc << (64 - shift); RD = tmp1 + tmp2; } #la-base-64.txt sll.d mask=0x00188000 [@qemu] #0x00188000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :sll.d RD, RJsrc, RKsrc is op15_31=0x31 & RD & RJsrc & RKsrc { local shift:1 = RKsrc(0) & 0x3f; RD = RJsrc << shift; } #la-base-64.txt slli.d mask=0x00410000 [@qemu] #0x00410000 0xffff0000 r0:5,r5:5,u10:6 ['reg0_5_s0', 'reg5_5_s0', 'imm10_6_s0'] :slli.d RD, RJsrc, imm10_6 is op16_31=0x41 & RD & RJsrc & imm10_6 { local shift:1 = imm10_6 & 0x1f; RD = RJsrc << shift; } #la-base-64.txt sra.d mask=0x00198000 [@qemu] #0x00198000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :sra.d RD, RJsrc, RKsrc is op15_31=0x33 & RD & RJsrc & RKsrc { local shift:1 = RKsrc(0) & 0x3f; RD = RJsrc s>> shift; } #la-base-64.txt srai.d mask=0x00490000 [@qemu] #0x00490000 0xffff0000 r0:5,r5:5,u10:6 ['reg0_5_s0', 'reg5_5_s0', 'imm10_6_s0'] :srai.d RD, RJsrc, imm10_6 is op16_31=0x49 & RD & RJsrc & imm10_6 { local shift:1 = imm10_6 & 0x1f; RD = RJsrc s>> shift; } #la-base-64.txt srl.d mask=0x00190000 [@qemu] #0x00190000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :srl.d RD, RJsrc, RKsrc is op15_31=0x32 & RD & RJsrc & RKsrc { local shift:1 = RKsrc(0) & 0x3f; RD = RJsrc >> shift; } #la-base-64.txt srli.d mask=0x00450000 [@qemu] #0x00450000 0xffff0000 r0:5,r5:5,u10:6 ['reg0_5_s0', 'reg5_5_s0', 'imm10_6_s0'] :srli.d RD, RJsrc, imm10_6 is op16_31=0x45 & RD & RJsrc & imm10_6 { local shift:1 = imm10_6 & 0x1f; RD = RJsrc >> shift; } #la-base-64.txt sub.d mask=0x00118000 [@qemu] #0x00118000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :sub.d RD, RJsrc, RKsrc is op15_31=0x23 & RD & RJsrc & RKsrc { RD = RJsrc - RKsrc; } ########################## # Load/Store Instructions ########################## #la-base-64.txt cu52i.d mask=0x03000000 [@orig_name=lu52i.d, @qemu] #0x03000000 0xffc00000 r0:5,r5:5,s10:12 ['reg0_5_s0', 'reg5_5_s0', 'simm10_12_s0'] :lu52i.d RD, RJsrc, simm52i is op22_31=0xc & RD & RJsrc & simm52i { RD = simm52i + (RJsrc & 0xfffffffffffff); } #la-base-64.txt cu32i.d mask=0x16000000 [@orig_name=lu32i.d, @qemu] #0x16000000 0xfe000000 r0:5,s5:20 ['reg0_5_s0', 'simm5_20_s0'] :lu32i.d RD, simm32i is op25_31=0xb & RD & RD32 & simm32i { RD = simm32i + zext(RD32); } #la-base-64.txt ldox4.d mask=0x26000000 [@orig_name=ldptr.d, @orig_fmt=DJSk14ps2] #0x26000000 0xff000000 r0:5,r5:5,so10:14<<2 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_14_s0'] :ldptr.d RD, ldstptr_addr is op24_31=0x26 & RD & ldstptr_addr { RD = *[ram]:8 ldstptr_addr; } #la-base-64.txt stox4.d mask=0x27000000 [@orig_name=stptr.d, @orig_fmt=DJSk14ps2] #0x27000000 0xff000000 r0:5,r5:5,so10:14<<2 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_14_s0'] :stptr.d RDsrc, ldstptr_addr is op24_31=0x27 & RDsrc & ldstptr_addr { *[ram]:8 ldstptr_addr = RDsrc; } #la-base-64.txt ld.d mask=0x28c00000 [@qemu] #0x28c00000 0xffc00000 r0:5,r5:5,so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :ld.d RD, ldst_addr is op22_31=0xa3 & RD & ldst_addr { RD = *[ram]:8 ldst_addr; } #la-base-64.txt st.d mask=0x29c00000 [@qemu] #0x29c00000 0xffc00000 r0:5,r5:5,so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :st.d RDsrc, ldst_addr is op22_31=0xa7 & RDsrc & ldst_addr { *[ram]:8 ldst_addr = RDsrc; } #la-base-64.txt ld.wu mask=0x2a800000 [@qemu] #0x2a800000 0xffc00000 r0:5,r5:5,so10:12 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :ld.wu RD, ldst_addr is op22_31=0xaa & RD & ldst_addr { RD = zext(*[ram]:4 ldst_addr); } #la-base-64.txt ldx.d mask=0x380c0000 [@qemu] #0x380c0000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldx.d RD, ldstx_addr is op15_31=0x7018 & RD & ldstx_addr { RD = *[ram]:8 ldstx_addr; } #la-base-64.txt stx.d mask=0x381c0000 [@qemu] #0x381c0000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :stx.d RDsrc, ldstx_addr is op15_31=0x7038 & RDsrc & ldstx_addr { *[ram]:8 ldstx_addr = RDsrc; } #la-base-64.txt ldx.wu mask=0x38280000 [@qemu] #0x38280000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldx.wu RD, ldstx_addr is op15_31=0x7050 & RD & ldstx_addr { RD = zext(*[ram]:4 ldstx_addr); } ###################### # Atomic Instructions ###################### #la-atomics-64.txt ll.d mask=0x22000000 [@orig_fmt=DJSk14ps2] #0x22000000 0xff000000 r0:5,r5:5,so10:14<<2 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_14_s0'] :ll.d RD, ldstptr_addr is op24_31=0x22 & RD & ldstptr_addr { RD = *[ram]:8 ldstptr_addr; } #la-atomics-64.txt sc.d mask=0x23000000 [@orig_fmt=DJSk14ps2] #0x23000000 0xff000000 r0:5,r5:5,so10:14<<2 ['reg0_5_s0', 'reg5_5_s0', 'soffs10_14_s0'] :sc.d RD, ldstptr_addr is op24_31=0x23 & RD & ldstptr_addr { *[ram]:8 ldstptr_addr = RD; } #la-atomics-64.txt amswap.d mask=0x38608000 [@orig_fmt=DKJ] #0x38608000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amswap.d RD, RJsrc, RKsrc is op15_31=0x70c1 & RD & RJsrc & RKsrc { local val:8 = *[ram]:8 RJsrc; RD = val; *[ram]:8 RJsrc = RKsrc; } #la-atomics-64.txt amadd.d mask=0x38618000 [@orig_fmt=DKJ] #0x38618000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amadd.d RD, RJsrc, RKsrc is op15_31=0x70c3 & RD & RJsrc & RKsrc { local val:8 = *[ram]:8 RJsrc; RD = val; *[ram]:8 RJsrc = (RKsrc + val); } #la-atomics-64.txt amand.d mask=0x38628000 [@orig_fmt=DKJ] #0x38628000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amand.d RD, RJsrc, RKsrc is op15_31=0x70c5 & RD & RJsrc & RKsrc { local val:8 = *[ram]:8 RJsrc; RD = val; *[ram]:8 RJsrc = (RKsrc & val); } #la-atomics-64.txt amor.d mask=0x38638000 [@orig_fmt=DKJ] #0x38638000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amor.d RD, RJsrc, RKsrc is op15_31=0x70c7 & RD & RJsrc & RKsrc { local val:8 = *[ram]:8 RJsrc; RD = val; *[ram]:8 RJsrc = (RKsrc | val); } #la-atomics-64.txt amxor.d mask=0x38648000 [@orig_fmt=DKJ] #0x38648000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amxor.d RD, RJsrc, RKsrc is op15_31=0x70c9 & RD & RJsrc & RKsrc { local val:8 = *[ram]:8 RJsrc; RD = val; *[ram]:8 RJsrc = (RKsrc ^ val); } #la-atomics-64.txt ammax.d mask=0x38658000 [@orig_fmt=DKJ] #0x38658000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammax.d RD, RJsrc, RKsrc is op15_31=0x70cb & RD & RJsrc & RKsrc { local val1:8 = *[ram]:8 RJsrc; local val2:8 = RKsrc; local test = (val1 s>= val2); RD = val1; *[ram]:8 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-64.txt ammin.d mask=0x38668000 [@orig_fmt=DKJ] #0x38668000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammin.d RD, RJsrc, RKsrc is op15_31=0x70cd & RD & RJsrc & RKsrc { local val1:8 = *[ram]:8 RJsrc; local val2:8 = RKsrc; local test = (val1 s<= val2); RD = val1; *[ram]:8 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-64.txt ammax.wu mask=0x38670000 [@orig_fmt=DKJ] #0x38670000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammax.wu RD, RJsrc, RK32src is op15_31=0x70ce & RD & RJsrc & RK32src { local val1:4 = *[ram]:4 RJsrc; local val2:4 = RK32src; local test = (val1 >= val2); RD = sext(val1); *[ram]:4 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-64.txt ammax.du mask=0x38678000 [@orig_fmt=DKJ] #0x38678000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammax.du RD, RJsrc, RKsrc is op15_31=0x70cf & RD & RJsrc & RKsrc { local val1:8 = *[ram]:8 RJsrc; local val2:8 = RKsrc; local test = (val1 >= val2); RD = val1; *[ram]:8 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-64.txt ammin.wu mask=0x38680000 [@orig_fmt=DKJ] #0x38680000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammin.wu RD, RJsrc, RK32src is op15_31=0x70d0 & RD & RJsrc & RK32src { local val1:4 = *[ram]:4 RJsrc; local val2:4 = RK32src; local test = (val1 <= val2); RD = sext(val1); *[ram]:4 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-64.txt ammin.du mask=0x38688000 [@orig_fmt=DKJ] #0x38688000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammin.du RD, RJsrc, RKsrc is op15_31=0x70d1 & RD & RJsrc & RKsrc { local val1:8 = *[ram]:8 RJsrc; local val2:8 = RKsrc; local test = (val1 <= val2); RD = val1; *[ram]:8 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-64.txt amswap_db.d mask=0x38698000 [@orig_fmt=DKJ] #0x38698000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amswap_db.d RD, RJsrc, RKsrc is op15_31=0x70d3 & RD & RJsrc & RKsrc { dbar(0:1); local val:8 = *[ram]:8 RJsrc; RD = val; *[ram]:8 RJsrc = RKsrc; } #la-atomics-64.txt amadd_db.d mask=0x386a8000 [@orig_fmt=DKJ] #0x386a8000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amadd_db.d RD, RJsrc, RKsrc is op15_31=0x70d5 & RD & RJsrc & RKsrc { dbar(0:1); local val:8 = *[ram]:8 RJsrc; RD = val; *[ram]:8 RJsrc = (RKsrc + val); } #la-atomics-64.txt amand_db.d mask=0x386b8000 [@orig_fmt=DKJ] #0x386b8000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amand_db.d RD, RJsrc, RKsrc is op15_31=0x70d7 & RD & RJsrc & RKsrc { dbar(0:1); local val:8 = *[ram]:8 RJsrc; RD = val; *[ram]:8 RJsrc = (RKsrc & val); } #la-atomics-64.txt amor_db.d mask=0x386c8000 [@orig_fmt=DKJ] #0x386c8000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amor_db.d RD, RJsrc, RKsrc is op15_31=0x70d9 & RD & RJsrc & RKsrc { dbar(0:1); local val:8 = *[ram]:8 RJsrc; RD = val; *[ram]:8 RJsrc = (RKsrc | val); } #la-atomics-64.txt amxor_db.d mask=0x386d8000 [@orig_fmt=DKJ] #0x386d8000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :amxor_db.d RD, RJsrc, RKsrc is op15_31=0x70db & RD & RJsrc & RKsrc { dbar(0:1); local val:8 = *[ram]:8 RJsrc; RD = val; *[ram]:8 RJsrc = (RKsrc ^ val); } #la-atomics-64.txt ammax_db.d mask=0x386e8000 [@orig_fmt=DKJ] #0x386e8000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammax_db.d RD, RJsrc, RKsrc is op15_31=0x70dd & RD & RJsrc & RKsrc { dbar(0:1); local val1:8 = *[ram]:8 RJsrc; local val2:8 = RKsrc; local test = (val1 s>= val2); RD = val1; *[ram]:8 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-64.txt ammin_db.d mask=0x386f8000 [@orig_fmt=DKJ] #0x386f8000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammin_db.d RD, RJsrc, RKsrc is op15_31=0x70df & RD & RJsrc & RKsrc { dbar(0:1); local val1:8 = *[ram]:8 RJsrc; local val2:8 = RKsrc; local test = (val1 s<= val2); RD = val1; *[ram]:8 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-64.txt ammax_db.wu mask=0x38700000 [@orig_fmt=DKJ] #0x38700000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammax_db.wu RD, RJsrc, RK32src is op15_31=0x70e0 & RD & RJsrc & RK32src { dbar(0:1); local val1:4 = *[ram]:4 RJsrc; local val2:4 = RK32src; local test = (val1 >= val2); RD = sext(val1); *[ram]:4 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-64.txt ammax_db.du mask=0x38708000 [@orig_fmt=DKJ] #0x38708000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammax_db.du RD, RJsrc, RKsrc is op15_31=0x70e1 & RD & RJsrc & RKsrc { dbar(0:1); local val1:8 = *[ram]:8 RJsrc; local val2:8 = RKsrc; local test = (val1 >= val2); RD = val1; *[ram]:8 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-64.txt ammin_db.wu mask=0x38710000 [@orig_fmt=DKJ] #0x38710000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammin_db.wu RD, RJsrc, RK32src is op15_31=0x70e2 & RD & RJsrc & RK32src { dbar(0:1); local val1:4 = *[ram]:4 RJsrc; local val2:4 = RK32src; local test = (val1 <= val2); RD = sext(val1); *[ram]:4 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } #la-atomics-64.txt ammin_db.du mask=0x38718000 [@orig_fmt=DKJ] #0x38718000 0xffff8000 r0:5,r10:5,r5:5 ['reg0_5_s0', 'reg10_5_s0', 'reg5_5_s0'] :ammin_db.du RD, RJsrc, RKsrc is op15_31=0x70e3 & RD & RJsrc & RKsrc { dbar(0:1); local val1:8 = *[ram]:8 RJsrc; local val2:8 = RKsrc; local test = (val1 <= val2); RD = val1; *[ram]:8 RJsrc = (zext(test) * val1) + (zext(!test) * val2); } ################################ # Bit-manipulation Instructions ################################ #la-bitops-64.txt clo.d mask=0x00002000 #0x00002000 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :clo.d RD, RJsrc is op10_31=0x8 & RD & RJsrc { RD = lzcount( ~RJsrc ); } #la-bitops-64.txt clz.d mask=0x00002400 [@qemu] #0x00002400 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :clz.d RD, RJsrc is op10_31=0x9 & RD & RJsrc { RD = lzcount( RJsrc ); } #la-bitops-64.txt cto.d mask=0x00002800 #0x00002800 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :cto.d RD, RJsrc is op10_31=0xa & RD & RJsrc { local tmp:8 = 0; tzcount64(~RJsrc, tmp); RD = tmp; } #la-bitops-64.txt ctz.d mask=0x00002c00 [@qemu] #0x00002c00 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :ctz.d RD, RJsrc is op10_31=0xb & RD & RJsrc { local tmp:8 = 0; tzcount64(RJsrc, tmp); RD = tmp; } #la-bitops-64.txt revb.4h mask=0x00003400 #0x00003400 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :revb.4h RD, RJsrc is op10_31=0xd & RD & RJsrc { tmp0:8 = (zext(RJsrc[0,8]) << 8) + zext(RJsrc[8,8]); tmp1:8 = (zext(RJsrc[16,8]) << 8) + zext(RJsrc[24,8]); tmp2:8 = (zext(RJsrc[32,8]) << 8) + zext(RJsrc[40,8]); tmp3:8 = (zext(RJsrc[48,8]) << 8) + zext(RJsrc[56,8]); RD = (tmp3 << 48) + (tmp2 << 32) + (tmp1 << 16) + tmp0; } #la-bitops-64.txt revb.2w mask=0x00003800 [@qemu] #0x00003800 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :revb.2w RD, RJsrc is op10_31=0xe & RD & RJsrc { tmp0:8 = (zext(RJsrc[0,8]) << 24) + (zext(RJsrc[8,8]) << 16) + (zext(RJsrc[16,8]) << 8) + zext(RJsrc[24,8]); tmp1:8 = (zext(RJsrc[32,8]) << 24) + (zext(RJsrc[40,8]) << 16) + (zext(RJsrc[48,8]) << 8) + zext(RJsrc[56,8]); RD = (tmp1 << 32) + tmp0; } #la-bitops-64.txt revb.d mask=0x00003c00 [@qemu] #0x00003c00 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :revb.d RD, RJsrc is op10_31=0xf & RD & RJsrc { tmp0:8 = zext(RJsrc[0,8]); tmp1:8 = zext(RJsrc[8,8]); tmp2:8 = zext(RJsrc[16,8]); tmp3:8 = zext(RJsrc[24,8]); tmp4:8 = zext(RJsrc[32,8]); tmp5:8 = zext(RJsrc[40,8]); tmp6:8 = zext(RJsrc[48,8]); tmp7:8 = zext(RJsrc[56,8]); RD = (tmp0 << 56) + (tmp1 << 48) + (tmp2 << 40) + (tmp3 << 32) + (tmp4 << 24) + (tmp5 << 16) + (tmp6 << 8) + tmp7; } #la-bitops-64.txt revh.2w mask=0x00004000 #0x00004000 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :revh.2w RD, RJsrc is op10_31=0x10 & RD & RJsrc { tmp0:8 = (zext(RJsrc[0,16]) << 16) + zext(RJsrc[16,16]); tmp1:8 = (zext(RJsrc[32,16]) << 8) + zext(RJsrc[48,16]); RD = (tmp1 << 32) + tmp0; } #la-bitops-64.txt revh.d mask=0x00004400 #0x00004400 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :revh.d RD, RJsrc is op10_31=0x11 & RD & RJsrc { tmp0:8 = zext(RJsrc[0,16]); tmp1:8 = zext(RJsrc[16,16]); tmp2:8 = zext(RJsrc[32,16]); tmp3:8 = zext(RJsrc[48,16]); RD = (tmp3 << 48) + (tmp2 << 32) + (tmp1 << 16) + tmp0; } #la-bitops-64.txt revbit.8b mask=0x00004c00 [@orig_name=bitrev.8b] #0x00004c00 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :bitrev.8b RD, RJsrc is op10_31=0x13 & RD & RJsrc { local v:8 = 0; byterev64(RJsrc, v); RD = v; } #la-bitops-64.txt revbit.d mask=0x00005400 [@orig_name=bitrev.d] #0x00005400 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :bitrev.d RD, RJsrc is op10_31=0x15 & RD & RJsrc { local v:8 = 0; bitrev64(RJsrc, v); RD = v; } define pcodeop bytepick.d; #la-bitops-64.txt catpick.d mask=0x000c0000 [@orig_name=bytepick.d] #0x000c0000 0xfffc0000 r0:5,r5:5,r10:5,u15:3 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0', 'imm15_3_s0'] :bytepick.d RD, RJsrc, RKsrc, imm15_3 is op18_31=0x3 & RD & RJsrc & RKsrc & imm15_3 { local bitstop:1 = 8 * (8 - imm15_3); local mask:8 = (1 << bitstop) - 1; local tmp_hi:8 = RKsrc & ~mask; local tmp_lo:8 = (RJsrc & (mask << (64-bitstop)) >> (64-bitstop)); RD = tmp_hi + tmp_lo; } define pcodeop crc.w.d.w; #la-bitops-64.txt crc.w.d.w mask=0x00258000 #0x00258000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :crc.w.d.w RD, RJsrc, RKsrc is op15_31=0x4b & RD & RJsrc & RKsrc { RD = crc_ieee802.3(RKsrc, RJsrc, 64:1, 0xedb88320:4); } define pcodeop crcc.w.d.w; #la-bitops-64.txt crcc.w.d.w mask=0x00278000 #0x00278000 0xffff8000 r0:5,r5:5,r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :crcc.w.d.w RD, RJsrc, RKsrc is op15_31=0x4f & RD & RJsrc & RKsrc { RD = crc_castagnoli(RKsrc, RJsrc, 64:1, 0x82f63b78:4); } #la-bitops-64.txt bstrins.d mask=0x00800000 [@orig_fmt=DJUm6Uk6, @qemu] #0x00800000 0xffc00000 r0:5,r5:5,u16:6,u10:6 ['reg0_5_s0', 'reg5_5_s0', 'imm16_6_s0', 'imm10_6_s0'] :bstrins.d RD, RJsrc, imm16_6, imm10_6 is op22_31=0x2 & RD & RJsrc & imm10_6 & imm16_6 { local msb:1 = imm16_6; local lsb:1 = imm10_6; local len:1 = msb + 1 - lsb; local mask:8 = (1 << len) - 1; local repl:8 = (RJsrc & (mask << lsb)) >> lsb; RD = (RD & (~mask)) | repl; } #la-bitops-64.txt bstrpick.d mask=0x00c00000 [@orig_fmt=DJUm6Uk6, @qemu] #0x00c00000 0xffc00000 r0:5,r5:5,u16:6,u10:6 ['reg0_5_s0', 'reg5_5_s0', 'imm16_6_s0', 'imm10_6_s0'] :bstrpick.d RD, RJsrc, imm16_6, imm10_6 is op22_31=0x3 & RD & RJsrc & imm10_6 & imm16_6 { local msb:1 = imm16_6; local lsb:1 = imm10_6; local len:1 = msb + 1 - lsb; local mask:8 = (1 << len) - 1; local repl:8 = (RJsrc & (mask << lsb)) >> lsb; RD = repl; } ############################### # Bounds-checking Instructions ############################### #la-bound-64.txt ldgt.d mask=0x38798000 #0x38798000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldgt.d RD, RJsrc, RKsrc is op15_31=0x70f3 & RD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr > RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; RD = sext(*[ram]:8 vaddr); } #la-bound-64.txt ldle.d mask=0x387b8000 #0x387b8000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :ldle.d RD, RJsrc, RKsrc is op15_31=0x70f7 & RD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr <= RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; RD = sext(*[ram]:1 vaddr); } #la-bound-64.txt stgt.d mask=0x387d8000 #0x387d8000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :stgt.d RDsrc, RJsrc, RKsrc is op15_31=0x70fb & RDsrc & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr > RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; *[ram]:8 vaddr = RDsrc:8; } #la-bound-64.txt stle.d mask=0x387f8000 #0x387f8000 0xffff8000 r0:5, r5:5, r10:5 ['reg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :stle.d RDsrc, RJsrc, RKsrc is op15_31=0x70ff & RDsrc & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr <= RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; *[ram]:8 vaddr = RDsrc:8; } ######################### # PRIVILEGED INSTRUCTIONS ######################### #la-privileged-64.txt iocsrrd.d mask=0x06480c00 #0x06480c00 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :iocsrrd.d RD, RJsrc is op10_31=0x19203 & RD & RJsrc { RD = *[iocsr]:8 RJsrc; } #la-privileged-64.txt iocsrwr.d mask=0x06481c00 #0x06481c00 0xfffffc00 r0:5,r5:5 ['reg0_5_s0', 'reg5_5_s0'] :iocsrwr.d RDsrc, RJsrc is op10_31=0x19207 & RDsrc & RJsrc { *[iocsr]:8 RJsrc = RDsrc; } ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch_double.sinc ================================================ #la-fp-d.txt fadd.d mask=0x01010000 #0x01010000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fadd.d drD, drJ, drK is op15_31=0x202 & drD & drJ & drK { drD = drJ f+ drK; } #la-fp-d.txt fsub.d mask=0x01030000 #0x01030000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fsub.d drD, drJ, drK is op15_31=0x206 & drD & drJ & drK { drD = drJ f- drK; } #la-fp-d.txt fmul.d mask=0x01050000 #0x01050000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fmul.d drD, drJ, drK is op15_31=0x20a & drD & drJ & drK { drD = drJ f* drK; } #la-fp-d.txt fdiv.d mask=0x01070000 #0x01070000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fdiv.d drD, drJ, drK is op15_31=0x20e & drD & drJ & drK { drD = drJ f/ drK; } #la-fp-d.txt fmax.d mask=0x01090000 #0x01090000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fmax.d drD, drJ, drK is op15_31=0x212 & drD & drJ & drK { local jval = drJ; local kval = drK; local test = jval f>= kval; drD = (zext(test) * jval) + (zext(!test) * kval); } #la-fp-d.txt fmin.d mask=0x010b0000 #0x010b0000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fmin.d drD, drJ, drK is op15_31=0x216 & drD & drJ & drK { local jval = drJ; local kval = drK; local test = jval f<= kval; drD = (zext(test) * jval) + (zext(!test) * kval); } #la-fp-d.txt fmaxa.d mask=0x010d0000 #0x010d0000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fmaxa.d drD, drJ, drK is op15_31=0x21a & drD & drJ & drK { local jval = drJ; local kval = drK; local test = (abs(jval) f>= abs(kval)); drD = (zext(test) * jval) + (zext(!test) * kval); } #la-fp-d.txt fmina.d mask=0x010f0000 #0x010f0000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fmina.d drD, drJ, drK is op15_31=0x21e & drD & drJ & drK { local jval = drJ; local kval = drK; local test = (abs(jval) f<= abs(kval)); drD = (zext(test) * jval) + (zext(!test) * kval); } #la-fp-d.txt fscaleb.d mask=0x01110000 #0x01110000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fscaleb.d drD, drJ, drK is op15_31=0x222 & drD & drJ & drK { drD = f_scaleb(drJ, drK); } #la-fp-d.txt fcopysign.d mask=0x01130000 #0x01130000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fcopysign.d drD, drJ, drK is op15_31=0x226 & drD & drJ & drK { local kval = drK & 0x8000000000000000; local jval = drJ & 0x7fffffffffffffff; drD = kval | jval ; } #la-fp-d.txt fabs.d mask=0x01140800 #0x01140800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fabs.d drD, drJ is op10_31=0x4502 & drD & drJ { drD = abs(drJ); } #la-fp-d.txt fneg.d mask=0x01141800 #0x01141800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fneg.d drD, drJ is op10_31=0x4506 & drD & drJ { drD = f- drJ; } #la-fp-d.txt flogb.d mask=0x01142800 #0x01142800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :flogb.d drD, drJ is op10_31=0x450a & drD & drJ { drD = f_logb(drJ); } #la-fp-d.txt fclass.d mask=0x01143800 #0x01143800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fclass.d drD, drJ is op10_31=0x450e & drD & drJ { drD = f_class(drD, drJ); } #la-fp-d.txt fsqrt.d mask=0x01144800 #0x01144800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fsqrt.d drD, drJ is op10_31=0x4512 & drD & drJ { drD = sqrt(drJ); } #la-fp-d.txt frecip.d mask=0x01145800 #0x01145800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :frecip.d drD, drJ is op10_31=0x4516 & drD & drJ { local one:4 = 1; drD = int2float(one) f/ drJ; } #la-fp-d.txt frsqrt.d mask=0x01146800 #0x01146800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :frsqrt.d drD, drJ is op10_31=0x451a & drD & drJ { local one:4 = 1; drD = int2float(one) f/ sqrt(drJ); } #la-fp-d.txt fmov.d mask=0x01149800 #0x01149800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fmov.d drD, drJ is op10_31=0x4526 & drD & drJ { drD = drJ; } @ifdef LA64 #la-fp-d.txt movgr2fr.d mask=0x0114a800 #0x0114a800 0xfffffc00 f0:5, r5:5 ['freg0_5_s0', 'reg5_5_s0'] :movgr2fr.d drD, RJsrc is op10_31=0x452a & drD & RJsrc { drD = RJsrc; } #la-fp-d.txt movfr2gr.d mask=0x0114b800 #0x0114b800 0xfffffc00 r0:5, f5:5 ['reg0_5_s0', 'freg5_5_s0'] :movfr2gr.d RD, drJ is op10_31=0x452e & RD & drJ { RD = drJ; } @endif #la-fp-d.txt fcvt.s.d mask=0x01191800 #0x01191800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fcvt.s.d drD, drJ is op10_31=0x4646 & drD & drJ & frD { frD = float2float(drJ); } #la-fp-d.txt fcvt.d.s mask=0x01192400 #0x01192400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fcvt.d.s drD, drJ is op10_31=0x4649 & drD & drJ & frJ { drD = float2float(frJ); } #la-fp-d.txt ftintrm.w.d mask=0x011a0800 #0x011a0800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrm.w.d drD, drJ is op10_31=0x4682 & drD & drJ { local val:4 = trunc(drJ); drD = zext(val); } #la-fp-d.txt ftintrm.l.d mask=0x011a2800 #0x011a2800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrm.l.d drD, drJ is op10_31=0x468a & drD & drJ { drD = trunc(drJ); } #la-fp-d.txt ftintrp.w.d mask=0x011a4800 #0x011a4800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrp.w.d drD, drJ is op10_31=0x4692 & drD & drJ { local val:4 = trunc(drJ); drD = zext(val); } #la-fp-d.txt ftintrp.l.d mask=0x011a6800 #0x011a6800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrp.l.d drD, drJ is op10_31=0x469a & drD & drJ { drD = trunc(drJ); } #la-fp-d.txt ftintrz.w.d mask=0x011a8800 #0x011a8800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrz.w.d drD, drJ is op10_31=0x46a2 & drD & drJ { local val:4 = trunc(drJ); drD = zext(val); } #la-fp-d.txt ftintrz.l.d mask=0x011aa800 #0x011aa800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrz.l.d drD, drJ is op10_31=0x46aa & drD & drJ { drD = trunc(drJ); } #la-fp-d.txt ftintrne.w.d mask=0x011ac800 #0x011ac800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrne.w.d drD, drJ is op10_31=0x46b2 & drD & drJ { local val:4 = round_even(drJ); drD = zext(val); } #la-fp-d.txt ftintrne.l.d mask=0x011ae800 #0x011ae800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrne.l.d drD, drJ is op10_31=0x46ba & drD & drJ { drD = round_even(drJ); } #la-fp-d.txt ftint.w.d mask=0x011b0800 #0x011b0800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftint.w.d drD, drJ is op10_31=0x46c2 & drD & drJ { local val:4 = trunc(drJ); drD = zext(val); } #la-fp-d.txt ftint.l.d mask=0x011b2800 #0x011b2800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftint.l.d drD, drJ is op10_31=0x46ca & drD & drJ { drD = trunc(drJ); } #la-fp-d.txt ffint.d.w mask=0x011d2000 #0x011d2000 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ffint.d.w drD, drJ is op10_31=0x4748 & drD & drJ & frJ { drD = int2float(frJ); } #la-fp-d.txt ffint.d.l mask=0x011d2800 #0x011d2800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ffint.d.l drD, drJ is op10_31=0x474a & drD & drJ { drD = int2float(drJ); } #la-fp-d.txt frint.d mask=0x011e4800 #0x011e4800 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :frint.d drD, drJ is op10_31=0x4792 & drD & drJ { local val:8 = trunc(drJ); drD = int2float(val); } #la-fp-d.txt fmadd.d mask=0x08200000 #0x08200000 0xfff00000 f0:5, f5:5, f10:5, f15:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0', 'freg15_5_s0'] :fmadd.d drD, drJ, drK, drA is op20_31=0x82 & drD & drJ & drK & drA { drD = (drJ f* drK) f+ drA; } #la-fp-d.txt fmsub.d mask=0x08600000 #0x08600000 0xfff00000 f0:5, f5:5, f10:5, f15:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0', 'freg15_5_s0'] :fmsub.d drD, drJ, drK, drA is op20_31=0x86 & drD & drJ & drK & drA { drD = (drJ f* drK) f- drA; } #la-fp-d.txt fnmadd.d mask=0x08a00000 #0x08a00000 0xfff00000 f0:5, f5:5, f10:5, f15:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0', 'freg15_5_s0'] :fnmadd.d drD, drJ, drK, drA is op20_31=0x8a & drD & drJ & drK & drA { drD = f- ((drJ f* drK) f+ drA); } #la-fp-d.txt fnmsub.d mask=0x08e00000 #0x08e00000 0xfff00000 f0:5, f5:5, f10:5, f15:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0', 'freg15_5_s0'] :fnmsub.d drD, drJ, drK, drA is op20_31=0x8e & drD & drJ & drK & drA { drD = f- ((drJ f* drK) f- drA); } dSNaN: "c" is ccf_s = 0 { } dSNaN: "s" is ccf_s = 1 { } dcond: dSNaN^"af" is ccf=0x0 & dSNaN { DCMPR = 0; } dcond: dSNaN^"lt" is ccf=0x1 & dSNaN { DCMPR = DCMP1 f< DCMP2; } dcond: dSNaN^"eq" is ccf=0x2 & dSNaN { DCMPR = DCMP1 f== DCMP2; } dcond: dSNaN^"le" is ccf=0x3 & dSNaN { DCMPR = DCMP1 f<= DCMP2; } dcond: dSNaN^"un" is ccf=0x4 & dSNaN { DCMPR = nan(DCMP1) || nan(DCMP2); } dcond: dSNaN^"ult" is ccf=0x5 & dSNaN { DCMPR = (nan(DCMP1) || nan(DCMP2)) || (DCMP1 f< DCMP2); } dcond: dSNaN^"ueq" is ccf=0x6 & dSNaN { DCMPR = (nan(DCMP1) || nan(DCMP2)) || (DCMP1 f== DCMP2); } dcond: dSNaN^"ule" is ccf=0x7 & dSNaN { DCMPR = (nan(DCMP1) || nan(DCMP2)) || (DCMP1 f<= DCMP2); } dcond: dSNaN^"ne" is ccf=0x8 & dSNaN { DCMPR = DCMP1 f!= DCMP2; } dcond: dSNaN^"or" is ccf=0xa & dSNaN { DCMPR = !(nan(DCMP1) || nan(DCMP2)); } dcond: dSNaN^"une" is ccf=0xc & dSNaN { DCMPR = (nan(DCMP1) || nan(DCMP2)) || (DCMP1 f!= DCMP2); } #la-fp-d.txt fcmp.caf.d mask=0x0c200000 #0x0c200000 0xffff8018 c0:3, f5:5, f10:5 ['fcc0_3_s0', 'freg5_5_s0', 'freg10_5_s0'] :fcmp.^dcond^".d" fccD, drJ, drK is op20_31=0xc2 & dcond & op3_4 = 0 & fccD & drJ & drK { DCMP1 = drJ; DCMP2 = drK; build dcond; fccD = DCMPR; } #la-fp-d.txt fld.d mask=0x2b800000 #0x2b800000 0xffc00000 f0:5, r5:5, so10:12 ['freg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :fld.d drD, ldst_addr is op22_31=0xae & drD & ldst_addr { drD = sext(*[ram]:8 ldst_addr); } #la-fp-d.txt fst.d mask=0x2bc00000 #0x2bc00000 0xffc00000 f0:5, r5:5, so10:12 ['freg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :fst.d drD, ldst_addr is op22_31=0xaf & drD & ldst_addr { *[ram]:8 ldst_addr = drD; } #la-fp-d.txt fldx.d mask=0x38340000 #0x38340000 0xffff8000 f0:5, r5:5, r10:5 ['freg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :fldx.d drD, ldstx_addr is op15_31=0x7068 & drD & ldstx_addr { drD = *[ram]:8 ldstx_addr; } #la-fp-d.txt fstx.d mask=0x383c0000 #0x383c0000 0xffff8000 f0:5, r5:5, r10:5 ['freg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :fstx.d drD, ldstx_addr is op15_31=0x7078 & drD & ldstx_addr { *[ram]:8 ldstx_addr = drD; } #la-bound-fp-d.txt fldgt.d mask=0x38748000 #0x38748000 0xffff8000 f0:5,r5:5,r10:5 ['freg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :fldgt.d drD, RJsrc, RKsrc is op15_31=0x70e9 & drD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr > RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; drD = sext(*[ram]:8 vaddr); } #la-bound-fp-d.txt fldle.d mask=0x38758000 #0x38758000 0xffff8000 f0:5,r5:5,r10:5 ['freg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :fldle.d drD, RJsrc, RKsrc is op15_31=0x70eb & drD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr <= RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; drD = sext(*[ram]:8 vaddr); } define pcodeop fstgt.d; #la-bound-fp-d.txt fstgt.d mask=0x38768000 #0x38768000 0xffff8000 f0:5,r5:5,r10:5 ['freg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :fstgt.d drD, RJsrc, RKsrc is op15_31=0x70ed & drD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr > RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; *[ram]:8 vaddr = drD; } define pcodeop fstle.d; #la-bound-fp-d.txt fstle.d mask=0x38778000 #0x38778000 0xffff8000 f0:5,r5:5,r10:5 ['freg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :fstle.d drD, RJsrc, RKsrc is op15_31=0x70ef & drD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr <= RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; *[ram]:8 vaddr = drD; } ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch_float.sinc ================================================ #### # General Floating-Point Instructions #### #la-fp.txt fcsrwr mask=0x0114c000 [@orig_name=movgr2fcsr, @orig_fmt=DJ] #0x0114c000 0xfffffc1c fc0:2,r5:5 ['fcreg0_2_s0', 'reg5_5_s0'] :movgr2fcsr fcsr, RJ32 is op10_31=0x4530 & RJ32 & RJ32src & fcsr & imm0_5=0 { fcsr = RJ32src; } :movgr2fcsr fcsr^".enables", RJ32 is op10_31=0x4530 & RJ32 & RJ32src & fcsr & imm0_5=1 { local mask:4 = 0x1f; fcsr = (fcsr & ~mask) + (RJ32src & mask); } :movgr2fcsr fcsr^".flags_cause", RJ32 is op10_31=0x4530 & RJ32 & RJ32src & fcsr & imm0_5=2 { local mask:4 = 0x1f1f0000; fcsr = (fcsr & ~mask) + (RJ32src & mask); } :movgr2fcsr fcsr^".rm", RJ32 is op10_31=0x4530 & RJ32 & RJ32src & fcsr & imm0_5=3 { local mask:4 = 0x300; fcsr = (fcsr & ~mask) + (RJ32src & mask); } define pcodeop uncertain_fcsr; # per the manual: if the fcsr does not exist, the result is uncertain :movgr2fcsr fcsr, RJ32 is op10_31=0x4530 & RJ32 & RJ32src & fcsr & imm0_5 { uncertain_fcsr(imm0_5:1); fcsr = RJ32src; } #la-fp.txt fcsrrd mask=0x0114c800 [@orig_name=movfcsr2gr, @orig_fmt=DJ] #0x0114c800 0xffffff80 r0:5,fc5:2 ['reg0_5_s0', 'fcreg5_2_s0'] :movfcsr2gr RD, fcsr is op10_31=0x4532 & RD & fcsr & imm5_5=0 { RD = sext(fcsr); } :movfcsr2gr RD, fcsr is op10_31=0x4532 & RD & fcsr & imm5_5=1 { local mask:4 = 0x1f; RD = sext(fcsr & mask); } :movfcsr2gr RD, fcsr is op10_31=0x4532 & RD & fcsr & imm5_5=2 { local mask:4 = 0x1f1f0000; RD = sext(fcsr & mask); } :movfcsr2gr RD, fcsr is op10_31=0x4532 & RD & fcsr & imm5_5=3 { local mask:4 = 0x300; RD = sext(fcsr & mask); } # per the manual: if the fcsr does not exist, the result is uncertain :movfcsr2gr RD, fcsr is op10_31=0x4532 & RD & fcsr & imm5_5 { uncertain_fcsr(imm5_5:1); RD = sext(fcsr); } #la-fp.txt movfr2fcc mask=0x0114d000 [@orig_name=movfr2cf] #0x0114d000 0xfffffc18 c0:3,f5:5 ['fcc0_3_s0', 'freg5_5_s0'] :movfr2cf fccD, FRJ is op11_31=0x229a & fccD & FRJ { fccD = FRJ[0,1]; } #la-fp.txt movfcc2fr mask=0x0114d400 [@orig_name=movcf2fr] #0x0114d400 0xffffff00 f0:5,c5:3 ['freg0_5_s0', 'fcc5_3_s0'] :movcf2fr FRD, fccJ is op10_31=0x4535 & FRD & fccJ { FRD[0,1] = fccJ[0,1]; } #la-fp.txt movgr2fcc mask=0x0114d800 [@orig_name=movgr2cf] #0x0114d800 0xfffffc18 c0:3,r5:5 ['fcc0_3_s0', 'reg5_5_s0'] :movgr2cf fccD, RJsrc is op10_31=0x4536 & fccD & RJsrc { fccD = RJsrc[0,1]; } #la-fp.txt movfcc2gr mask=0x0114dc00 [@orig_name=movcf2gr] #0x0114dc00 0xffffff00 r0:5,c5:3 ['reg0_5_s0', 'fcc5_3_s0'] :movcf2gr RD, fccJ is op10_31=0x4537 & RD & fccJ { RD[0,1] = fccJ[0,1]; } #la-fp.txt fsel mask=0x0d000000 #0x0d000000 0xfffc0000 f0:5,f5:5,f10:5,c15:3 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0', 'fcc15_3_s0'] :fsel FRD, FRJ, FRK, fccA is op20_31=0xd0 & FRD & FRJ & FRK & fccA { local test:1 = (fccA == 0); FRD = (zext(!test) * FRK) + (zext(test) * FRJ); } #la-fp.txt bceqz mask=0x48000000 [@orig_fmt=CjSd5k16ps2] #0x48000000 0xfc000300 c5:3,sb0:5|10:16<<2 ['fcc5_3_s0', 'sbranch0_0_s2'] :bceqz fccJ, Rel21 is op26_31=0x12 & fccJ & op8_9=0 & Rel21 { if(fccJ == 0) goto Rel21; } #la-fp.txt bcnez mask=0x48000100 [@orig_fmt=CjSd5k16ps2] #0x48000100 0xfc000300 c5:3,sb0:5|10:16<<2 ['fcc5_3_s0', 'sbranch0_0_s2'] :bcnez fccJ, Rel21 is op26_31=0x12 & fccJ & op8_9=1 & Rel21 { if(fccJ != 0) goto Rel21; } ##################################### # Floating-Point Single Instructions ##################################### #la-fp-s.txt fadd.s mask=0x01008000 #0x01008000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fadd.s frD, frJ, frK is op15_31=0x201 & frD & frJ & frK { frD = frJ f+ frK; } #la-fp-s.txt fsub.s mask=0x01028000 #0x01028000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fsub.s frD, frJ, frK is op15_31=0x205 & frD & frJ & frK { frD = frJ f- frK; } #la-fp-s.txt fmul.s mask=0x01048000 #0x01048000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fmul.s frD, frJ, frK is op15_31=0x209 & frD & frJ & frK { frD = frJ f* frK; } #la-fp-s.txt fdiv.s mask=0x01068000 #0x01068000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fdiv.s frD, frJ, frK is op15_31=0x20d & frD & frJ & frK { frD = frJ f/ frK; } #la-fp-s.txt fmadd.s mask=0x08100000 #0x08100000 0xfff00000 f0:5, f5:5, f10:5, f15:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0', 'freg15_5_s0'] :fmadd.s frD, frJ, frK, frA is op20_31=0x81 & frD & frJ & frK & frA { frD = (frJ f* frK) f+ frA; } #la-fp-s.txt fmsub.s mask=0x08500000 #0x08500000 0xfff00000 f0:5, f5:5, f10:5, f15:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0', 'freg15_5_s0'] :fmsub.s frD, frJ, frK, frA is op20_31=0x85 & frD & frJ & frK & frA { frD = (frJ f* frK) f- frA; } #la-fp-s.txt fnmadd.s mask=0x08900000 #0x08900000 0xfff00000 f0:5, f5:5, f10:5, f15:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0', 'freg15_5_s0'] :fnmadd.s frD, frJ, frK, frA is op20_31=0x89 & frD & frJ & frK & frA { frD = f- ((frJ f* frK) f+ frA); } #la-fp-s.txt fnmsub.s mask=0x08d00000 #0x08d00000 0xfff00000 f0:5, f5:5, f10:5, f15:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0', 'freg15_5_s0'] :fnmsub.s frD, frJ, frK, frA is op20_31=0x8d & frD & frJ & frK & frA { frD = f- ((frJ f* frK) f- frA); } #la-fp-s.txt fmax.s mask=0x01088000 #0x01088000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fmax.s frD, frJ, frK is op15_31=0x211 & frD & frJ & frK { local jval = frJ; local kval = frK; local test = (jval f>= kval); frD = (zext(test) * jval) + (zext(!test) * kval); } #la-fp-s.txt fmin.s mask=0x010a8000 #0x010a8000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fmin.s frD, frJ, frK is op15_31=0x215 & frD & frJ & frK { local jval = frJ; local kval = frK; local test = (jval f<= kval); frD = (zext(test) * jval) + (zext(!test) * kval); } #la-fp-s.txt fmaxa.s mask=0x010c8000 #0x010c8000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fmaxa.s frD, frJ, frK is op15_31=0x219 & frD & frJ & frK { local jval = frJ; local kval = frK; local test = (abs(jval) f>= abs(kval)); frD = (zext(test) * jval) + (zext(!test) * kval); } #la-fp-s.txt fmina.s mask=0x010e8000 #0x010e8000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fmina.s frD, frJ, frK is op15_31=0x21d & frD & frJ & frK { local jval = frJ; local kval = frK; local test = (abs(jval) f<= abs(kval)); frD = (zext(test) * jval) + (zext(!test) * kval); } #la-fp-s.txt fabs.s mask=0x01140400 #0x01140400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fabs.s frD, frJ is op10_31=0x4501 & frD & frJ { frD = abs(frJ); } #la-fp-s.txt fneg.s mask=0x01141400 #0x01141400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fneg.s frD, frJ is op10_31=0x4505 & frD & frJ { frD = f- frJ; } #la-fp-s.txt fsqrt.s mask=0x01144400 #0x01144400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fsqrt.s frD, frJ is op10_31=0x4511 & frD & frJ { frD = sqrt(frJ); } #la-fp-s.txt frecip.s mask=0x01145400 #0x01145400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :frecip.s frD, frJ is op10_31=0x4515 & frD & frJ { local one:4 = 1; frD = int2float(one) f/ frJ; } #la-fp-s.txt frsqrt.s mask=0x01146400 #0x01146400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :frsqrt.s frD, frJ is op10_31=0x4519 & frD & frJ { local one:4 = 1; frD = int2float(one) f/ sqrt(frJ); } #la-fp-s.txt fscaleb.s mask=0x01108000 #0x01108000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fscaleb.s frD, frJ, frK is op15_31=0x221 & frD & frJ & frK { frD = f_scaleb(frJ, frK); } #la-fp-s.txt flogb.s mask=0x01142400 #0x01142400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :flogb.s frD, frJ is op10_31=0x4509 & frD & frJ { frD = f_logb(frJ); } #la-fp-s.txt fcopysign.s mask=0x01128000 #0x01128000 0xffff8000 f0:5, f5:5, f10:5 ['freg0_5_s0', 'freg5_5_s0', 'freg10_5_s0'] :fcopysign.s frD, frJ, frK is op15_31=0x225 & frD & frJ & frK { local kval = frK & 0x80000000; local jval = frJ & 0x7fffffff; frD = kval | jval ; } #la-fp-s.txt fclass.s mask=0x01143400 #0x01143400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fclass.s frD, frJ is op10_31=0x450d & frD & frJ { frD = f_class(frJ); } #la-fp-s.txt fmov.s mask=0x01149400 #0x01149400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :fmov.s frD, frJ is op10_31=0x4525 & frD & frJ { frD = frJ; } #la-fp-s.txt movgr2fr.w mask=0x0114a400 #0x0114a400 0xfffffc00 f0:5,r5:5 ['freg0_5_s0', 'reg5_5_s0'] :movgr2fr.w frD, RJ32src is op10_31=0x4529 & frD & RJ32src { frD = RJ32src; } #la-fp-s.txt movgr2frh.w mask=0x0114ac00 #0x0114ac00 0xfffffc00 f0:5,r5:5 ['freg0_5_s0', 'reg5_5_s0'] :movgr2frh.w drD, RJ32src is op10_31=0x452b & drD & RJ32src { drD = (zext(RJ32src) << 32) | (drD & 0xffffffff); } #la-fp-s.txt movfr2gr.s mask=0x0114b400 #0x0114b400 0xfffffc00 r0:5, f5:5 ['reg0_5_s0', 'freg5_5_s0'] :movfr2gr.s RD, frJ is op10_31=0x452d & RD & frJ { RD = sext(frJ); } #la-fp-s.txt movfrh2gr.s mask=0x0114bc00 #0x0114bc00 0xfffffc00 r0:5, f5:5 ['reg0_5_s0', 'freg5_5_s0'] :movfrh2gr.s RD, drJ is op10_31=0x452f & RD & drJ { RD = sext(drJ[32,32]); } #la-fp-s.txt ftintrm.w.s mask=0x011a0400 #0x011a0400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrm.w.s frD, frJ is op10_31=0x4681 & frD & frJ { frD = trunc(frJ); } #la-fp-s.txt ftintrm.l.s mask=0x011a2400 #0x011a2400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrm.l.s frD, frJ is op10_31=0x4689 & frD & frJ { local val:8 = trunc(frJ); frD = val(0); } #la-fp-s.txt ftintrp.w.s mask=0x011a4400 #0x011a4400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrp.w.s frD, frJ is op10_31=0x4691 & frD & frJ { frD = trunc(frJ); } #la-fp-s.txt ftintrp.l.s mask=0x011a6400 #0x011a6400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrp.l.s frD, frJ is op10_31=0x4699 & frD & frJ { local val:8 = trunc(frJ); frD = val(0); } #la-fp-s.txt ftintrz.w.s mask=0x011a8400 #0x011a8400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrz.w.s frD, frJ is op10_31=0x46a1 & frD & frJ { frD = trunc(frJ); } #la-fp-s.txt ftintrz.l.s mask=0x011aa400 #0x011aa400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrz.l.s frD, frJ is op10_31=0x46a9 & frD & frJ { local val:8 = trunc(frJ); frD = val(0); } #la-fp-s.txt ftintrne.w.s mask=0x011ac400 #0x011ac400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrne.w.s frD, frJ is op10_31=0x46b1 & frD & frJ { frD = round_even(frJ); } #la-fp-s.txt ftintrne.l.s mask=0x011ae400 #0x011ae400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftintrne.l.s frD, frJ is op10_31=0x46b9 & frD & frJ { local val:8 = round_even(frJ); frD = val(0); } #la-fp-s.txt ftint.w.s mask=0x011b0400 #0x011b0400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftint.w.s frD, frJ is op10_31=0x46c1 & frD & frJ { frD = trunc(frJ); } #la-fp-s.txt ftint.l.s mask=0x011b2400 #0x011b2400 0xfffffc00 f0:5, f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ftint.l.s frD, frJ is op10_31=0x46c9 & frD & frJ { local val:8 = trunc(frJ); frD = val(0); } #la-fp-s.txt ffint.s.w mask=0x011d1000 #0x011d1000 0xfffffc00 f0:5,f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ffint.s.w frD,frJ is op10_31=0x4744 & frD & frJ { frD =int2float(frJ); } #la-fp-s.txt ffint.s.l mask=0x011d1800 #0x011d1800 0xfffffc00 f0:5,f5:5 ['freg0_5_s0', 'freg5_5_s0'] :ffint.s.l frD, drD is op10_31=0x4746 & frD & drD { frD = int2float(drD); } #la-fp-s.txt frint.s mask=0x011e4400 #0x011e4400 0xfffffc00 f0:5,f5:5 ['freg0_5_s0', 'freg5_5_s0'] :frint.s frD,frJ is op10_31=0x4791 & frD & frJ { local val:4 = trunc(frJ); frD = int2float(val); } #la-fp-s.txt fld.s mask=0x2b000000 #0x2b000000 0xffc00000 f0:5,r5:5,so10:12 ['freg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :fld.s frD, ldst_addr is op22_31=0xac & frD & ldst_addr { frD = *[ram]:4 ldst_addr; } #la-fp-s.txt fst.s mask=0x2b400000 #0x2b400000 0xffc00000 f0:5,r5:5,so10:12 ['freg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :fst.s frD, ldst_addr is op22_31=0xad & frD & ldst_addr { *[ram]:4 ldst_addr = frD:4; } #la-fp-s.txt fldx.s mask=0x38300000 #0x38300000 0xffff8000 f0:5,r5:5,r10:5 ['freg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :fldx.s frD, ldstx_addr is op15_31=0x7060 & frD & ldstx_addr { frD = *[ram]:4 ldstx_addr; } #la-fp-s.txt fstx.s mask=0x38380000 #0x38380000 0xffff8000 f0:5,r5:5,r10:5 ['freg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :fstx.s frD, ldstx_addr is op15_31=0x7070 & frD & ldstx_addr { *[ram]:4 ldstx_addr = frD; } SNaN: "c" is ccf_s = 0 { } SNaN: "s" is ccf_s = 1 { } fcond: SNaN^"af" is ccf=0x0 & SNaN { FCMPR = 0; } fcond: SNaN^"lt" is ccf=0x1 & SNaN { FCMPR = FCMP1 f< FCMP2; } fcond: SNaN^"eq" is ccf=0x2 & SNaN { FCMPR = FCMP1 f== FCMP2; } fcond: SNaN^"le" is ccf=0x3 & SNaN { FCMPR = FCMP1 f<= FCMP2; } fcond: SNaN^"un" is ccf=0x4 & SNaN { FCMPR = nan(FCMP1) || nan(FCMP2); } fcond: SNaN^"ult" is ccf=0x5 & SNaN { FCMPR = (nan(FCMP1) || nan(FCMP2)) || (FCMP1 f< FCMP2); } fcond: SNaN^"ueq" is ccf=0x6 & SNaN { FCMPR = (nan(FCMP1) || nan(FCMP2)) || (FCMP1 f== FCMP2); } fcond: SNaN^"ule" is ccf=0x7 & SNaN { FCMPR = (nan(FCMP1) || nan(FCMP2)) || (FCMP1 f<= FCMP2); } fcond: SNaN^"ne" is ccf=0x8 & SNaN { FCMPR = FCMP1 f!= FCMP2; } fcond: SNaN^"or" is ccf=0xa & SNaN { FCMPR = !(nan(FCMP1) || nan(FCMP2)); } fcond: SNaN^"une" is ccf=0xc & SNaN { FCMPR = (nan(FCMP1) || nan(FCMP2)) || (FCMP1 f!= FCMP2); } #la-fp-s.txt fcmp.caf.s mask=0x0c100000 #0x0c100000 0xffff8018 c0:3, f5:5, f10:5 ['fcc0_3_s0', 'freg5_5_s0', 'freg10_5_s0'] :fcmp.^fcond^".s" fccD, frJ, frK is op20_31=0xc1 & fcond & op3_4 = 0 & fccD & frJ & frK { FCMP1 = frJ; FCMP2 = frK; build fcond; fccD = FCMPR; } #la-bound-fp-s.txt fldgt.s mask=0x38740000 #0x38740000 0xffff8000 f0:5, r5:5, r10:5 ['freg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :fldgt.s frD, RJsrc, RKsrc is op15_31=0x70e8 & frD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr > RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; frD = sext(*[ram]:4 vaddr); } #la-bound-fp-s.txt fldle.s mask=0x38750000 #0x38750000 0xffff8000 f0:5, r5:5, r10:5 ['freg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :fldle.s frD, RJsrc, RKsrc is op15_31=0x70ea & frD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr <= RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; frD = sext(*[ram]:4 vaddr); } #la-bound-fp-s.txt fstgt.s mask=0x38760000 #0x38760000 0xffff8000 f0:5, r5:5, r10:5 ['freg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :fstgt.s frD, RJsrc, RKsrc is op15_31=0x70ec & frD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr > RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; *[ram]:4 vaddr = frD; } #la-bound-fp-s.txt fstle.s mask=0x38770000 #0x38770000 0xffff8000 f0:5, r5:5, r10:5 ['freg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :fstle.s frD, RJsrc, RKsrc is op15_31=0x70ee & frD & RJsrc & RKsrc { local vaddr = RJsrc; if (vaddr <= RKsrc) goto ; bound_check_exception(RJsrc, RKsrc); goto inst_next; *[ram]:4 vaddr = frD; } ================================================ FILE: pypcode/processors/Loongarch/data/languages/loongarch_main.sinc ================================================ define endian=little; define alignment=4; define space ram type=ram_space size=$(REGSIZE) default; define space iocsr type=ram_space size=$(REGSIZE); define space register type=register_space size=4; define register offset=0x0 size=$(REGSIZE) [ pc scr0 scr1 scr2 scr3 ]; define register offset=0x40 size=1 [ fcc0 fcc1 fcc2 fcc3 fcc4 fcc5 fcc6 fcc7 ]; define register offset=0x48 size=4 [ fcsr ]; # ABI names: # "$zero", "$ra", "$tp", "$sp", "$a0", "$a1", "$a2", "$a3", # "$a4", "$a5", "$a6", "$a7", "$t0", "$t1", "$t2", "$t3", # "$t4", "$t5", "$t6", "$t7", "$t8", "$x", "$fp", "$s0", # "$s1", "$s2", "$s3", "$s4", "$s5", "$s6", "$s7", "$s8", # GPR General Purpose Registers define register offset=0x100 size=$(REGSIZE) [ zero ra tp sp a0 a1 a2 a3 a4 a5 a6 a7 t0 t1 t2 t3 t4 t5 t6 t7 t8 r21 fp s0 s1 s2 s3 s4 s5 s6 s7 s8 ]; @ifdef LA64 define register offset=0x100 size=4 [ r0_lo r0_hi ra_lo ra_hi tp_lo tp_hi sp_lo sp_hi a0_lo a0_hi a1_lo a1_hi a2_lo a2_hi a3_lo a3_hi a4_lo a4_hi a5_lo a5_hi a6_lo a6_hi a7_lo a7_hi t0_lo t0_hi t1_lo t1_hi t2_lo t2_hi t3_lo t3_hi t4_lo t4_hi t5_lo t5_hi t6_lo t6_hi t7_lo t7_hi t8_lo t8_hi r21_lo r21_hi fp_lo fp_hi s0_lo s0_hi s1_lo s1_hi s2_lo s2_hi s3_lo s3_hi s4_lo s4_hi s5_lo s5_hi s6_lo s6_hi s7_lo s7_hi s8_lo s8_hi ]; @endif # Floating Point registers (either 32- or 64-bit) @if FREGSIZE == "4" define register offset=0x1000 size=4 [ fa0 _ _ _ _ _ _ _ fa1 _ _ _ _ _ _ _ fa2 _ _ _ _ _ _ _ fa3 _ _ _ _ _ _ _ fa4 _ _ _ _ _ _ _ fa5 _ _ _ _ _ _ _ fa6 _ _ _ _ _ _ _ fa7 _ _ _ _ _ _ _ ft0 _ _ _ _ _ _ _ ft1 _ _ _ _ _ _ _ ft2 _ _ _ _ _ _ _ ft3 _ _ _ _ _ _ _ ft4 _ _ _ _ _ _ _ ft5 _ _ _ _ _ _ _ ft6 _ _ _ _ _ _ _ ft7 _ _ _ _ _ _ _ ft8 _ _ _ _ _ _ _ ft9 _ _ _ _ _ _ _ ft10 _ _ _ _ _ _ _ ft11 _ _ _ _ _ _ _ ft12 _ _ _ _ _ _ _ ft13 _ _ _ _ _ _ _ ft14 _ _ _ _ _ _ _ ft15 _ _ _ _ _ _ _ fs0 _ _ _ _ _ _ _ fs1 _ _ _ _ _ _ _ fs2 _ _ _ _ _ _ _ fs3 _ _ _ _ _ _ _ fs4 _ _ _ _ _ _ _ fs5 _ _ _ _ _ _ _ fs6 _ _ _ _ _ _ _ fs7 _ _ _ _ _ _ _ ]; define register offset=0x1000 size=8 [ fa0_1 _ _ _ fa2_3 _ _ _ fa4_5 _ _ _ fa6_7 _ _ _ ft8_9 _ _ _ ft10_11 _ _ _ ft12_13 _ _ _ ft14_15 _ _ _ ft16_17 _ _ _ ft18_19 _ _ _ ft20_21 _ _ _ ft22_23 _ _ _ fs24_25 _ _ _ fs26_27 _ _ _ fs28_29 _ _ _ fs30_31 _ _ _ ]; @else define register offset=0x1000 size=4 [ fa0_lo _ _ _ _ _ _ _ fa1_lo _ _ _ _ _ _ _ fa2_lo _ _ _ _ _ _ _ fa3_lo _ _ _ _ _ _ _ fa4_lo _ _ _ _ _ _ _ fa5_lo _ _ _ _ _ _ _ fa6_lo _ _ _ _ _ _ _ fa7_lo _ _ _ _ _ _ _ ft0_lo _ _ _ _ _ _ _ ft1_lo _ _ _ _ _ _ _ ft2_lo _ _ _ _ _ _ _ ft3_lo _ _ _ _ _ _ _ ft4_lo _ _ _ _ _ _ _ ft5_lo _ _ _ _ _ _ _ ft6_lo _ _ _ _ _ _ _ ft7_lo _ _ _ _ _ _ _ ft8_lo _ _ _ _ _ _ _ ft9_lo _ _ _ _ _ _ _ ft10_lo _ _ _ _ _ _ _ ft11_lo _ _ _ _ _ _ _ ft12_lo _ _ _ _ _ _ _ ft13_lo _ _ _ _ _ _ _ ft14_lo _ _ _ _ _ _ _ ft15_lo _ _ _ _ _ _ _ fs0_lo _ _ _ _ _ _ _ fs1_lo _ _ _ _ _ _ _ fs2_lo _ _ _ _ _ _ _ fs3_lo _ _ _ _ _ _ _ fs4_lo _ _ _ _ _ _ _ fs5_lo _ _ _ _ _ _ _ fs6_lo _ _ _ _ _ _ _ fs7_lo _ _ _ _ _ _ _ ]; define register offset=0x1000 size=8 [ fa0 _ _ _ fa1 _ _ _ fa2 _ _ _ fa3 _ _ _ fa4 _ _ _ fa5 _ _ _ fa6 _ _ _ fa7 _ _ _ ft0 _ _ _ ft1 _ _ _ ft2 _ _ _ ft3 _ _ _ ft4 _ _ _ ft5 _ _ _ ft6 _ _ _ ft7 _ _ _ ft8 _ _ _ ft9 _ _ _ ft10 _ _ _ ft11 _ _ _ ft12 _ _ _ ft13 _ _ _ ft14 _ _ _ ft15 _ _ _ fs0 _ _ _ fs1 _ _ _ fs2 _ _ _ fs3 _ _ _ fs4 _ _ _ fs5 _ _ _ fs6 _ _ _ fs7 _ _ _ ]; @endif #FREGSIZE == 32 # SIMD eXtension 256-bit registers (lsx) # overlaps the floating point registers above define register offset=0x1000 size=16 [ v0 _ v1 _ v2 _ v3 _ v4 _ v5 _ v6 _ v7 _ v8 _ v9 _ v10 _ v11 _ v12 _ v13 _ v14 _ v15 _ v16 _ v17 _ v18 _ v19 _ v20 _ v21 _ v22 _ v23 _ v24 _ v25 _ v26 _ v27 _ v28 _ v29 _ v30 _ v31 _ ]; # AdVanced SIMD eXtension 256-bit registers (lasx) # overlaps the floating point registers above define register offset=0x1000 size=32 [ x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 x31 ]; @define CSR_OFFSET "0x2000" #used for the csr instructions csrxchg/cssrd/cssrw define register offset=$(CSR_OFFSET) size=$(REGSIZE) [ crmd prmd euen misc ecfg estat era badv badi csr9 csr10 csr11 eentry csr13 csr14 csr15 tlbidx tlbehi tlbelo0 tlbelo1 csr20 csr21 csr22 csr23 asid pgdl pgdh pgd pwcl pwch stlbps rvacfg cpuid prcfg1 prcfg2 prcfg3 csr36 csr37 csr38 csr39 csr40 csr41 csr42 csr43 csr44 csr45 csr46 csr47 save0 save1 save2 save3 save4 save5 save6 save7 save8 save9 save10 save11 save12 save13 save14 save15 tid tcfg tval cntc ticlr csr69 csr70 csr71 csr72 csr73 csr74 csr75 csr76 csr77 csr78 csr79 csr80 csr81 csr82 csr83 csr84 csr85 csr86 csr87 csr88 csr89 csr90 csr91 csr92 csr93 csr94 csr95 llbctl csr97 csr98 csr99 csr100 csr101 csr102 csr103 csr104 csr105 csr106 csr107 csr108 csr109 csr110 csr111 csr112 csr113 csr114 csr115 csr116 csr117 csr118 csr119 csr120 csr121 csr122 csr123 csr124 csr125 csr126 csr127 impctl1 impctl2 csr130 csr131 csr132 csr133 csr134 csr135 tlbrentry tlbrbadv tlbrera tlbrsave tlbrelo0 tlbrelo1 tlbrehi tlbrprmd merrctl merrinfo1 merrinfo2 merrentry merrera merrsave csr150 csr151 ctag csr153 csr154 csr155 csr156 csr157 csr158 csr159 csr160 csr161 csr162 csr163 csr164 csr165 csr166 csr167 csr168 csr169 csr170 csr171 csr172 csr173 csr174 csr175 csr176 csr177 csr178 csr179 csr180 csr181 csr182 csr183 csr184 csr185 csr186 csr187 csr188 csr189 csr190 csr191 csr192 csr193 csr194 csr195 csr196 csr197 csr198 csr199 csr200 csr201 csr202 csr203 csr204 csr205 csr206 csr207 csr208 csr209 csr210 csr211 csr212 csr213 csr214 csr215 csr216 csr217 csr218 csr219 csr220 csr221 csr222 csr223 csr224 csr225 csr226 csr227 csr228 csr229 csr230 csr231 csr232 csr233 csr234 csr235 csr236 csr237 csr238 csr239 csr240 csr241 csr242 csr243 csr244 csr245 csr246 csr247 csr248 csr249 csr250 csr251 csr252 csr253 csr254 csr255 csr256 csr257 csr258 csr259 csr260 csr261 csr262 csr263 ]; # Dummy registers for floating point comparison define register offset=0x5000 size=4 [ FCMP1 FCMP2 ]; define register offset=0x5008 size=1 [ FCMPR ]; define register offset=0x5100 size=8 [ DCMP1 DCMP2 ]; define register offset=0x5110 size=1 [ DCMPR ]; define register offset=0x50 size=4 contextreg; define context contextreg phase = (0,1) ; define token instr(32) instword = ( 0,31) op26_31 = (26,31) op25_31 = (25,31) op24_31 = (24,31) op23_31 = (23,31) op22_31 = (22,31) op21_31 = (21,31) op20_31 = (20,31) op19_31 = (19,31) op18_31 = (18,31) op18_19 = (18,19) op17_31 = (17,31) op16_31 = (16,31) op15_31 = (15,31) op15_15 = (15,15) op14_31 = (14,31) op13_31 = (13,31) op12_31 = (12,31) op11_31 = (11,31) op10_31 = (10,31) op8_31 = ( 8,31) op8_9 = ( 8, 9) op7_31 = ( 7,31) op5_9 = ( 5, 9) op5_31 = ( 5,31) op4_4 = ( 4, 4) op3_4 = ( 3, 4) op2_4 = ( 2, 4) op0_2 = ( 0, 2) op0_4 = ( 0, 4) op0_31 = ( 0,31) ccf = (16,19) ccf_s = (15,15) simm5_20 = ( 5,24) signed simm5_13 = ( 5,17) signed simm10_9 = (10,18) signed simm10_8 = (10,17) signed simm10_5 = (10,14) signed simm10_14 = (10,23) signed simm10_16 = (10,25) signed simm10_12 = (10,21) signed simm10_11 = (10,20) signed simm10_10 = (10,19) signed simm0_5 = ( 0, 4) signed simm0_10 = ( 0, 9) signed rK = (10,14) rK32 = (10,14) rJ = ( 5, 9) rJ32 = ( 5, 9) rD = ( 0, 4) rD32 = ( 0, 4) xrK = (10,14) xrJ = ( 5, 9) xrD = ( 0, 4) xrA = (15,19) vrK = (10,14) vrJ = ( 5, 9) vrD = ( 0, 4) vrA = (15,19) lbtrJ = ( 5, 6) lbtrD = ( 0, 1) frK = (10,14) frJ = ( 5, 9) frD = ( 0, 4) frA = (15,19) drK = (10,14) drJ = ( 5, 9) drD = ( 0, 4) drA = (15,19) fccJ = ( 5, 7) fccD = ( 0, 2) fccA = (15,17) imm5_5 = ( 5, 9) imm5_3 = ( 5, 7) imm18_5 = (18,22) imm18_4 = (18,21) imm18_3 = (18,20) imm18_2 = (18,19) imm18_1 = (18,18) imm16_6 = (16,21) imm16_5 = (16,20) imm15_3 = (15,17) imm15_2 = (15,16) imm10_8 = (10,17) imm10_7 = (10,16) imm10_6 = (10,15) imm10_5 = (10,14) imm10_4 = (10,13) imm10_3 = (10,12) imm10_2 = (10,11) imm10_16 = (10,25) imm10_14 = (10,23) imm10_12 = (10,21) imm10_1 = (10,10) imm0_5 = ( 0, 4) imm0_4 = ( 0, 3) imm0_15 = ( 0,14) ; attach variables [ rD rJ rK ] [ zero ra tp sp a0 a1 a2 a3 a4 a5 a6 a7 t0 t1 t2 t3 t4 t5 t6 t7 t8 r21 fp s0 s1 s2 s3 s4 s5 s6 s7 s8 ]; @ifdef LA64 attach variables [ rD32 rJ32 rK32 ] [ r0_lo ra_lo tp_lo sp_lo a0_lo a1_lo a2_lo a3_lo a4_lo a5_lo a6_lo a7_lo t0_lo t1_lo t2_lo t3_lo t4_lo t5_lo t6_lo t7_lo t8_lo r21_lo fp_lo s0_lo s1_lo s2_lo s3_lo s4_lo s5_lo s6_lo s7_lo s8_lo ]; @else # For LA32 these are the same as rD, rJ, rK attach variables [ rD32 rJ32 rK32 ] [ zero ra tp sp a0 a1 a2 a3 a4 a5 a6 a7 t0 t1 t2 t3 t4 t5 t6 t7 t8 r21 fp s0 s1 s2 s3 s4 s5 s6 s7 s8 ]; @endif @if FREGSIZE == "8" # For 64-bit floating point single instruction operands use only the low part attach variables [ frD frJ frK ] [ fa0_lo fa1_lo fa2_lo fa3_lo fa4_lo fa5_lo fa6_lo fa7_lo ft0_lo ft1_lo ft2_lo ft3_lo ft4_lo ft5_lo ft6_lo ft7_lo ft8_lo ft9_lo ft10_lo ft11_lo ft12_lo ft13_lo ft14_lo ft15_lo fs0_lo fs1_lo fs2_lo fs3_lo fs4_lo fs5_lo fs6_lo fs7_lo ]; attach variables [ drD drJ drK ] [ fa0 fa1 fa2 fa3 fa4 fa5 fa6 fa7 ft0 ft1 ft2 ft3 ft4 ft5 ft6 ft7 ft8 ft9 ft10 ft11 ft12 ft13 ft14 ft15 fs0 fs1 fs2 fs3 fs4 fs5 fs6 fs7 ]; @else attach variables [ frD frJ frK ] [ fa0 fa1 fa2 fa3 fa4 fa5 fa6 fa7 ft0 ft1 ft2 ft3 ft4 ft5 ft6 ft7 ft8 ft9 ft10 ft11 ft12 ft13 ft14 ft15 fs0 fs1 fs2 fs3 fs4 fs5 fs6 fs7 ]; # For 64-bit floating point Double instruction operands need to bond two 32-bit FPRs attach variables [ drD drJ drK ] [ fa0_1 _ fa2_3 _ fa4_5 _ fa6_7 _ ft8_9 _ ft10_11 _ ft12_13 _ ft14_15 _ ft16_17 _ ft18_19 _ ft20_21 _ ft22_23 _ fs24_25 _ fs26_27 _ fs28_29 _ fs30_31 _ ]; @endif attach variables [vrD vrJ vrK vrA] [ v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 ]; attach variables [xrD xrJ xrK xrA] [ x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 x31 ]; attach variables [ fccD fccJ fccA] [ fcc0 fcc1 fcc2 fcc3 fcc4 fcc5 fcc6 fcc7 ]; # Register subconstructors RD: rD is rD { export rD; } RDsrc: rD is rD { export rD; } RDsrc: rD is rD & rD=0 { export 0:$(REGSIZE); } RJ: rJ is rJ { export rJ; } RJsrc: rJ is rJ { export rJ; } RJsrc: rJ is rJ & rJ=0 { export 0:$(REGSIZE); } RK: rK is rK { export rK; } RKsrc: rK is rK { export rK; } RKsrc: rK is rK & rK=0 { export 0:$(REGSIZE); } RD32: rD is rD & rD32 { export rD32; } RD32src: rD is rD & rD32 { export rD32; } RD32src: rD is rD & rD32=0 { export 0:4; } RJ32: rJ is rJ & rJ32 { export rJ32; } RJ32src: rJ is rJ & rJ32 { export rJ32; } RJ32src: rJ is rJ & rJ32=0 { export 0:4; } RK32: rK is rK & rK32 { export rK32; } RK32src: rK is rK & rK32 { export rK32; } RK32src: rK is rK & rK32=0 { export 0:4; } @if FREGSIZE == "8" FRD: drD is drD { export drD; } FRJ: drJ is drJ { export drJ; } FRK: drK is drK { export drK; } @else FRD: frD is frD { export frD; } FRJ: frJ is frJ { export frJ; } FRK: frK is frK { export frK; } @endif # Immediate operand sub-constructors addu16_imm: val is simm10_16 [val = simm10_16 << 16;] { export *[const]:$(REGSIZE) val; } alsl_shift: sa2 is imm15_2 [sa2 = imm15_2 + 1;] { export *[const]:1 sa2; } ldst_addr: RJsrc(simm10_12) is RJsrc & simm10_12 { local vaddr:$(REGSIZE) = RJsrc + simm10_12; export vaddr; } ldstptr_addr: RJsrc(voffs) is RJsrc & simm10_14 [voffs = (simm10_14 << 2);] { local vaddr:$(REGSIZE) = RJsrc + voffs; export vaddr; } ldstx_addr: RJsrc(RKsrc) is RJsrc & RKsrc { local vaddr:$(REGSIZE) = RJsrc + RKsrc; export vaddr; } pcadd2: reloffs is simm5_20 [reloffs = inst_start + (simm5_20 << 2);] { export *[const]:$(REGSIZE) reloffs; } pcadd12: reloffs is simm5_20 [reloffs = inst_start + (simm5_20 << 12);] { export *[const]:$(REGSIZE) reloffs; } pcala12: reloffs is simm5_20 [reloffs = (inst_start & ~0xfff) + (simm5_20 << 12);] { export *[const]:$(REGSIZE) reloffs; } pcadd18: reloffs is simm5_20 [reloffs = inst_start + (simm5_20 << 18);] { export *[const]:$(REGSIZE) reloffs; } Rel16: reloc is simm10_16 [ reloc = inst_start + (simm10_16 << 2); ] { export *:$(ADDRSIZE) reloc; } Rel21: reloc is imm10_16 & simm0_5 [ reloc = inst_start + (((simm0_5 << 16) + imm10_16) << 2); ] { export *:$(ADDRSIZE) reloc; } Rel26: reloc is imm10_16 & simm0_10 [ reloc = inst_start + (((simm0_10 << 16) | imm10_16) << 2); ] { export *:$(ADDRSIZE) reloc; } RelJ16: RJsrc, simm10_16 is RJsrc & simm10_16 { local tmp:$(ADDRSIZE) = RJsrc + (simm10_16 << 2); export tmp; } simm12i: immed is simm5_20 [immed = simm5_20 << 12; ] { export *[const]:$(REGSIZE) immed; } simm32i: immed is simm5_20 [immed = simm5_20 << 32; ] { export *[const]:$(REGSIZE) immed; } simm52i: immed is simm10_12 [immed = simm10_12 << 52; ] { export *[const]:$(REGSIZE) immed; } # general pcodeops define pcodeop break; define pcodeop cpucfg; define pcodeop addr_bound_exception; define pcodeop bound_check_exception; define pcodeop crc_ieee802.3; define pcodeop crc_castagnoli; define pcodeop dbcl; define pcodeop dbar; define pcodeop ibar; define pcodeop iocsrrd; define pcodeop iocsrwr; define pcodeop preld_loadl1cache; define pcodeop preld_storel1cache; define pcodeop preld_nop; define pcodeop preldx_loadl1cache; define pcodeop preldx_storel1cache; define pcodeop preldx_nop; # param: 0 = low word, 1 = high word, 2 = both (for rdtime.d) define pcodeop rdtime.counter; define pcodeop rdtime.counterid; define pcodeop syscall; define pcodeop f_scaleb; define pcodeop f_logb; define pcodeop f_class; define pcodeop round_even; # # MACROS # macro bitrev32(input, output) { local v = input; v = ((v & 0xffff0000) >> 16) | ((v & 0x0000ffff) << 16); v = ((v & 0xff00ff00) >> 8) | ((v & 0x00ff00ff) << 8); v = ((v & 0xf0f0f0f0) >> 4) | ((v & 0x0f0f0f0f) << 4); v = ((v & 0xcccccccc) >> 2) | ((v & 0x33333333) << 2); v = ((v & 0xaaaaaaaa) >> 1) | ((v & 0x55555555) << 1); output = v; } macro bitrev64(input, output) { local v = input; v = ((v & 0xffffffff00000000) >> 32) | ((v & 0x00000000ffffffff) << 32); v = ((v & 0xffff0000ffff0000) >> 16) | ((v & 0x0000ffff0000ffff) << 16); v = ((v & 0xff00ff00ff00ff00) >> 8) | ((v & 0x00ff00ff00ff00ff) << 8); v = ((v & 0xf0f0f0f0f0f0f0f0) >> 4) | ((v & 0x0f0f0f0f0f0f0f0f) << 4); v = ((v & 0xcccccccccccccccc) >> 2) | ((v & 0x3333333333333333) << 2); v = ((v & 0xaaaaaaaaaaaaaaaa) >> 1) | ((v & 0x5555555555555555) << 1); output = v; } macro byterev(input, output) { local v = input; v = ((v & 0xf0) >> 4) | ((v & 0x0f) << 4); v = ((v & 0xcc) >> 2) | ((v & 0x33) << 2); v = ((v & 0xaa) >> 1) | ((v & 0x55) << 1); output = v; } macro byterev32(input, output) { local v = input; v = ((v & 0xf0f0f0f0) >> 4) | ((v & 0x0f0f0f0f) << 4); v = ((v & 0xcccccccc) >> 2) | ((v & 0x33333333) << 2); v = ((v & 0xaaaaaaaa) >> 1) | ((v & 0x55555555) << 1); output = v; } macro byterev64(input, output) { local v = input; v = ((v & 0xf0f0f0f0f0f0f0f0) >> 4) | ((v & 0x0f0f0f0f0f0f0f0f) << 4); v = ((v & 0xcccccccccccccccc) >> 2) | ((v & 0x3333333333333333) << 2); v = ((v & 0xaaaaaaaaaaaaaaaa) >> 1) | ((v & 0x5555555555555555) << 1); output = v; } macro tzcount32(input, count) { count = 32; local v = input & (-input); count = count - zext(v != 0); count = count - 16 * zext((v & 0x0000ffff) != 0); count = count - 8 * zext((v & 0x00ff00ff) != 0); count = count - 4 * zext((v & 0x0f0f0f0f) != 0); count = count - 2 * zext((v & 0x33333333) != 0); count = count - 1 * zext((v & 0x55555555) != 0); } macro tzcount64(input, count) { count = 64; local v:8 = input & (-input); count = count - 1 * zext(v != 0); count = count - 32 * zext((v & 0x00000000ffffffff) != 0); count = count - 16 * zext((v & 0x0000ffff0000ffff) != 0); count = count - 8 * zext((v & 0x00ff00ff00ff00ff) != 0); count = count - 4 * zext((v & 0x0f0f0f0f0f0f0f0f) != 0); count = count - 2 * zext((v & 0x3333333333333333) != 0); count = count - 1 * zext((v & 0x5555555555555555) != 0); } ================================================ FILE: pypcode/processors/Loongarch/data/languages/lp64d.cspec ================================================ ================================================ FILE: pypcode/processors/Loongarch/data/languages/lp64f.cspec ================================================ ================================================ FILE: pypcode/processors/Loongarch/data/languages/lsx.sinc ================================================ define pcodeop vfmadd.s; #lsx.txt vfmadd.s mask=0x09100000 #0x09100000 0xfff00000 v0:5,v5:5,v10:5,v15:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0', 'vreg15_5_s0'] :vfmadd.s vrD, vrJ, vrK, vrA is op20_31=0x91 & vrD & vrJ & vrK & vrA { vrD = vfmadd.s(vrD, vrJ, vrK, vrA); } define pcodeop vfmadd.d; #lsx.txt vfmadd.d mask=0x09200000 #0x09200000 0xfff00000 v0:5,v5:5,v10:5,v15:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0', 'vreg15_5_s0'] :vfmadd.d vrD, vrJ, vrK, vrA is op20_31=0x92 & vrD & vrJ & vrK & vrA { vrD = vfmadd.d(vrD, vrJ, vrK, vrA); } define pcodeop vfmsub.s; #lsx.txt vfmsub.s mask=0x09500000 #0x09500000 0xfff00000 v0:5,v5:5,v10:5,v15:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0', 'vreg15_5_s0'] :vfmsub.s vrD, vrJ, vrK, vrA is op20_31=0x95 & vrD & vrJ & vrK & vrA { vrD = vfmsub.s(vrD, vrJ, vrK, vrA); } define pcodeop vfmsub.d; #lsx.txt vfmsub.d mask=0x09600000 #0x09600000 0xfff00000 v0:5,v5:5,v10:5,v15:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0', 'vreg15_5_s0'] :vfmsub.d vrD, vrJ, vrK, vrA is op20_31=0x96 & vrD & vrJ & vrK & vrA { vrD = vfmsub.d(vrD, vrJ, vrK, vrA); } define pcodeop vfnmadd.s; #lsx.txt vfnmadd.s mask=0x09900000 #0x09900000 0xfff00000 v0:5,v5:5,v10:5,v15:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0', 'vreg15_5_s0'] :vfnmadd.s vrD, vrJ, vrK, vrA is op20_31=0x99 & vrD & vrJ & vrK & vrA { vrD = vfnmadd.s(vrD, vrJ, vrK, vrA); } define pcodeop vfnmadd.d; #lsx.txt vfnmadd.d mask=0x09a00000 #0x09a00000 0xfff00000 v0:5,v5:5,v10:5,v15:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0', 'vreg15_5_s0'] :vfnmadd.d vrD, vrJ, vrK, vrA is op20_31=0x9a & vrD & vrJ & vrK & vrA { vrD = vfnmadd.d(vrD, vrJ, vrK, vrA); } define pcodeop vfnmsub.s; #lsx.txt vfnmsub.s mask=0x09d00000 #0x09d00000 0xfff00000 v0:5,v5:5,v10:5,v15:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0', 'vreg15_5_s0'] :vfnmsub.s vrD, vrJ, vrK, vrA is op20_31=0x9d & vrD & vrJ & vrK & vrA { vrD = vfnmsub.s(vrD, vrJ, vrK, vrA); } define pcodeop vfnmsub.d; #lsx.txt vfnmsub.d mask=0x09e00000 #0x09e00000 0xfff00000 v0:5,v5:5,v10:5,v15:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0', 'vreg15_5_s0'] :vfnmsub.d vrD, vrJ, vrK, vrA is op20_31=0x9e & vrD & vrJ & vrK & vrA { vrD = vfnmsub.d(vrD, vrJ, vrK, vrA); } define pcodeop vfcmp.caf.s; #lsx.txt vfcmp.caf.s mask=0x0c500000 #0x0c500000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.caf.s vrD, vrJ, vrK is op15_31=0x18a0 & vrD & vrJ & vrK { vrD = vfcmp.caf.s(vrD, vrJ, vrK); } define pcodeop vfcmp.saf.s; #lsx.txt vfcmp.saf.s mask=0x0c508000 #0x0c508000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.saf.s vrD, vrJ, vrK is op15_31=0x18a1 & vrD & vrJ & vrK { vrD = vfcmp.saf.s(vrD, vrJ, vrK); } define pcodeop vfcmp.clt.s; #lsx.txt vfcmp.clt.s mask=0x0c510000 #0x0c510000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.clt.s vrD, vrJ, vrK is op15_31=0x18a2 & vrD & vrJ & vrK { vrD = vfcmp.clt.s(vrD, vrJ, vrK); } define pcodeop vfcmp.slt.s; #lsx.txt vfcmp.slt.s mask=0x0c518000 #0x0c518000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.slt.s vrD, vrJ, vrK is op15_31=0x18a3 & vrD & vrJ & vrK { vrD = vfcmp.slt.s(vrD, vrJ, vrK); } define pcodeop vfcmp.ceq.s; #lsx.txt vfcmp.ceq.s mask=0x0c520000 #0x0c520000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.ceq.s vrD, vrJ, vrK is op15_31=0x18a4 & vrD & vrJ & vrK { vrD = vfcmp.ceq.s(vrD, vrJ, vrK); } define pcodeop vfcmp.seq.s; #lsx.txt vfcmp.seq.s mask=0x0c528000 #0x0c528000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.seq.s vrD, vrJ, vrK is op15_31=0x18a5 & vrD & vrJ & vrK { vrD = vfcmp.seq.s(vrD, vrJ, vrK); } define pcodeop vfcmp.cle.s; #lsx.txt vfcmp.cle.s mask=0x0c530000 #0x0c530000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cle.s vrD, vrJ, vrK is op15_31=0x18a6 & vrD & vrJ & vrK { vrD = vfcmp.cle.s(vrD, vrJ, vrK); } define pcodeop vfcmp.sle.s; #lsx.txt vfcmp.sle.s mask=0x0c538000 #0x0c538000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sle.s vrD, vrJ, vrK is op15_31=0x18a7 & vrD & vrJ & vrK { vrD = vfcmp.sle.s(vrD, vrJ, vrK); } define pcodeop vfcmp.cun.s; #lsx.txt vfcmp.cun.s mask=0x0c540000 #0x0c540000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cun.s vrD, vrJ, vrK is op15_31=0x18a8 & vrD & vrJ & vrK { vrD = vfcmp.cun.s(vrD, vrJ, vrK); } define pcodeop vfcmp.sun.s; #lsx.txt vfcmp.sun.s mask=0x0c548000 #0x0c548000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sun.s vrD, vrJ, vrK is op15_31=0x18a9 & vrD & vrJ & vrK { vrD = vfcmp.sun.s(vrD, vrJ, vrK); } define pcodeop vfcmp.cult.s; #lsx.txt vfcmp.cult.s mask=0x0c550000 #0x0c550000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cult.s vrD, vrJ, vrK is op15_31=0x18aa & vrD & vrJ & vrK { vrD = vfcmp.cult.s(vrD, vrJ, vrK); } define pcodeop vfcmp.sult.s; #lsx.txt vfcmp.sult.s mask=0x0c558000 #0x0c558000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sult.s vrD, vrJ, vrK is op15_31=0x18ab & vrD & vrJ & vrK { vrD = vfcmp.sult.s(vrD, vrJ, vrK); } define pcodeop vfcmp.cueq.s; #lsx.txt vfcmp.cueq.s mask=0x0c560000 #0x0c560000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cueq.s vrD, vrJ, vrK is op15_31=0x18ac & vrD & vrJ & vrK { vrD = vfcmp.cueq.s(vrD, vrJ, vrK); } define pcodeop vfcmp.sueq.s; #lsx.txt vfcmp.sueq.s mask=0x0c568000 #0x0c568000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sueq.s vrD, vrJ, vrK is op15_31=0x18ad & vrD & vrJ & vrK { vrD = vfcmp.sueq.s(vrD, vrJ, vrK); } define pcodeop vfcmp.cule.s; #lsx.txt vfcmp.cule.s mask=0x0c570000 #0x0c570000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cule.s vrD, vrJ, vrK is op15_31=0x18ae & vrD & vrJ & vrK { vrD = vfcmp.cule.s(vrD, vrJ, vrK); } define pcodeop vfcmp.sule.s; #lsx.txt vfcmp.sule.s mask=0x0c578000 #0x0c578000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sule.s vrD, vrJ, vrK is op15_31=0x18af & vrD & vrJ & vrK { vrD = vfcmp.sule.s(vrD, vrJ, vrK); } define pcodeop vfcmp.cne.s; #lsx.txt vfcmp.cne.s mask=0x0c580000 #0x0c580000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cne.s vrD, vrJ, vrK is op15_31=0x18b0 & vrD & vrJ & vrK { vrD = vfcmp.cne.s(vrD, vrJ, vrK); } define pcodeop vfcmp.sne.s; #lsx.txt vfcmp.sne.s mask=0x0c588000 #0x0c588000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sne.s vrD, vrJ, vrK is op15_31=0x18b1 & vrD & vrJ & vrK { vrD = vfcmp.sne.s(vrD, vrJ, vrK); } define pcodeop vfcmp.cor.s; #lsx.txt vfcmp.cor.s mask=0x0c5a0000 #0x0c5a0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cor.s vrD, vrJ, vrK is op15_31=0x18b4 & vrD & vrJ & vrK { vrD = vfcmp.cor.s(vrD, vrJ, vrK); } define pcodeop vfcmp.sor.s; #lsx.txt vfcmp.sor.s mask=0x0c5a8000 #0x0c5a8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sor.s vrD, vrJ, vrK is op15_31=0x18b5 & vrD & vrJ & vrK { vrD = vfcmp.sor.s(vrD, vrJ, vrK); } define pcodeop vfcmp.cune.s; #lsx.txt vfcmp.cune.s mask=0x0c5c0000 #0x0c5c0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cune.s vrD, vrJ, vrK is op15_31=0x18b8 & vrD & vrJ & vrK { vrD = vfcmp.cune.s(vrD, vrJ, vrK); } define pcodeop vfcmp.sune.s; #lsx.txt vfcmp.sune.s mask=0x0c5c8000 #0x0c5c8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sune.s vrD, vrJ, vrK is op15_31=0x18b9 & vrD & vrJ & vrK { vrD = vfcmp.sune.s(vrD, vrJ, vrK); } define pcodeop vfcmp.caf.d; #lsx.txt vfcmp.caf.d mask=0x0c600000 #0x0c600000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.caf.d vrD, vrJ, vrK is op15_31=0x18c0 & vrD & vrJ & vrK { vrD = vfcmp.caf.d(vrD, vrJ, vrK); } define pcodeop vfcmp.saf.d; #lsx.txt vfcmp.saf.d mask=0x0c608000 #0x0c608000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.saf.d vrD, vrJ, vrK is op15_31=0x18c1 & vrD & vrJ & vrK { vrD = vfcmp.saf.d(vrD, vrJ, vrK); } define pcodeop vfcmp.clt.d; #lsx.txt vfcmp.clt.d mask=0x0c610000 #0x0c610000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.clt.d vrD, vrJ, vrK is op15_31=0x18c2 & vrD & vrJ & vrK { vrD = vfcmp.clt.d(vrD, vrJ, vrK); } define pcodeop vfcmp.slt.d; #lsx.txt vfcmp.slt.d mask=0x0c618000 #0x0c618000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.slt.d vrD, vrJ, vrK is op15_31=0x18c3 & vrD & vrJ & vrK { vrD = vfcmp.slt.d(vrD, vrJ, vrK); } define pcodeop vfcmp.ceq.d; #lsx.txt vfcmp.ceq.d mask=0x0c620000 #0x0c620000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.ceq.d vrD, vrJ, vrK is op15_31=0x18c4 & vrD & vrJ & vrK { vrD = vfcmp.ceq.d(vrD, vrJ, vrK); } define pcodeop vfcmp.seq.d; #lsx.txt vfcmp.seq.d mask=0x0c628000 #0x0c628000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.seq.d vrD, vrJ, vrK is op15_31=0x18c5 & vrD & vrJ & vrK { vrD = vfcmp.seq.d(vrD, vrJ, vrK); } define pcodeop vfcmp.cle.d; #lsx.txt vfcmp.cle.d mask=0x0c630000 #0x0c630000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cle.d vrD, vrJ, vrK is op15_31=0x18c6 & vrD & vrJ & vrK { vrD = vfcmp.cle.d(vrD, vrJ, vrK); } define pcodeop vfcmp.sle.d; #lsx.txt vfcmp.sle.d mask=0x0c638000 #0x0c638000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sle.d vrD, vrJ, vrK is op15_31=0x18c7 & vrD & vrJ & vrK { vrD = vfcmp.sle.d(vrD, vrJ, vrK); } define pcodeop vfcmp.cun.d; #lsx.txt vfcmp.cun.d mask=0x0c640000 #0x0c640000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cun.d vrD, vrJ, vrK is op15_31=0x18c8 & vrD & vrJ & vrK { vrD = vfcmp.cun.d(vrD, vrJ, vrK); } define pcodeop vfcmp.sun.d; #lsx.txt vfcmp.sun.d mask=0x0c648000 #0x0c648000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sun.d vrD, vrJ, vrK is op15_31=0x18c9 & vrD & vrJ & vrK { vrD = vfcmp.sun.d(vrD, vrJ, vrK); } define pcodeop vfcmp.cult.d; #lsx.txt vfcmp.cult.d mask=0x0c650000 #0x0c650000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cult.d vrD, vrJ, vrK is op15_31=0x18ca & vrD & vrJ & vrK { vrD = vfcmp.cult.d(vrD, vrJ, vrK); } define pcodeop vfcmp.sult.d; #lsx.txt vfcmp.sult.d mask=0x0c658000 #0x0c658000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sult.d vrD, vrJ, vrK is op15_31=0x18cb & vrD & vrJ & vrK { vrD = vfcmp.sult.d(vrD, vrJ, vrK); } define pcodeop vfcmp.cueq.d; #lsx.txt vfcmp.cueq.d mask=0x0c660000 #0x0c660000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cueq.d vrD, vrJ, vrK is op15_31=0x18cc & vrD & vrJ & vrK { vrD = vfcmp.cueq.d(vrD, vrJ, vrK); } define pcodeop vfcmp.sueq.d; #lsx.txt vfcmp.sueq.d mask=0x0c668000 #0x0c668000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sueq.d vrD, vrJ, vrK is op15_31=0x18cd & vrD & vrJ & vrK { vrD = vfcmp.sueq.d(vrD, vrJ, vrK); } define pcodeop vfcmp.cule.d; #lsx.txt vfcmp.cule.d mask=0x0c670000 #0x0c670000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cule.d vrD, vrJ, vrK is op15_31=0x18ce & vrD & vrJ & vrK { vrD = vfcmp.cule.d(vrD, vrJ, vrK); } define pcodeop vfcmp.sule.d; #lsx.txt vfcmp.sule.d mask=0x0c678000 #0x0c678000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sule.d vrD, vrJ, vrK is op15_31=0x18cf & vrD & vrJ & vrK { vrD = vfcmp.sule.d(vrD, vrJ, vrK); } define pcodeop vfcmp.cne.d; #lsx.txt vfcmp.cne.d mask=0x0c680000 #0x0c680000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cne.d vrD, vrJ, vrK is op15_31=0x18d0 & vrD & vrJ & vrK { vrD = vfcmp.cne.d(vrD, vrJ, vrK); } define pcodeop vfcmp.sne.d; #lsx.txt vfcmp.sne.d mask=0x0c688000 #0x0c688000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sne.d vrD, vrJ, vrK is op15_31=0x18d1 & vrD & vrJ & vrK { vrD = vfcmp.sne.d(vrD, vrJ, vrK); } define pcodeop vfcmp.cor.d; #lsx.txt vfcmp.cor.d mask=0x0c6a0000 #0x0c6a0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cor.d vrD, vrJ, vrK is op15_31=0x18d4 & vrD & vrJ & vrK { vrD = vfcmp.cor.d(vrD, vrJ, vrK); } define pcodeop vfcmp.sor.d; #lsx.txt vfcmp.sor.d mask=0x0c6a8000 #0x0c6a8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sor.d vrD, vrJ, vrK is op15_31=0x18d5 & vrD & vrJ & vrK { vrD = vfcmp.sor.d(vrD, vrJ, vrK); } define pcodeop vfcmp.cune.d; #lsx.txt vfcmp.cune.d mask=0x0c6c0000 #0x0c6c0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.cune.d vrD, vrJ, vrK is op15_31=0x18d8 & vrD & vrJ & vrK { vrD = vfcmp.cune.d(vrD, vrJ, vrK); } define pcodeop vfcmp.sune.d; #lsx.txt vfcmp.sune.d mask=0x0c6c8000 #0x0c6c8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcmp.sune.d vrD, vrJ, vrK is op15_31=0x18d9 & vrD & vrJ & vrK { vrD = vfcmp.sune.d(vrD, vrJ, vrK); } define pcodeop vbitsel.v; #lsx.txt vbitsel.v mask=0x0d100000 #0x0d100000 0xfff00000 v0:5,v5:5,v10:5,v15:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0', 'vreg15_5_s0'] :vbitsel.v vrD, vrJ, vrK, vrA is op20_31=0xd1 & vrD & vrJ & vrK & vrA { vrD = vbitsel.v(vrD, vrJ, vrK, vrA); } define pcodeop vshuf.b; #lsx.txt vshuf.b mask=0x0d500000 #0x0d500000 0xfff00000 v0:5,v5:5,v10:5,v15:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0', 'vreg15_5_s0'] :vshuf.b vrD, vrJ, vrK, vrA is op20_31=0xd5 & vrD & vrJ & vrK & vrA { vrD = vshuf.b(vrD, vrJ, vrK, vrA); } define pcodeop vld; #lsx.txt vld mask=0x2c000000 #0x2c000000 0xffc00000 v0:5, r5:5, so10:12 ['vreg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :vld vrD, RJsrc, simm10_12 is op22_31=0xb0 & vrD & RJsrc & simm10_12 { vrD = vld(vrD, RJsrc, simm10_12:$(REGSIZE)); } define pcodeop vst; #lsx.txt vst mask=0x2c400000 #0x2c400000 0xffc00000 v0:5, r5:5, so10:12 ['vreg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :vst vrD, RJsrc, simm10_12 is op22_31=0xb1 & vrD & RJsrc & simm10_12 { vrD = vst(vrD, RJsrc, simm10_12:$(REGSIZE)); } define pcodeop vldrepl.d; #lsx.txt vldrepl.d mask=0x30100000 [@orig_fmt=VdJSk9ps3] #0x30100000 0xfff80000 v0:5, r5:5, so10:9<<3 ['vreg0_5_s0', 'reg5_5_s0', 'soffs10_9_s0'] :vldrepl.d vrD, RJsrc, simm10_9 is op19_31=0x602 & vrD & RJsrc & simm10_9 { vrD = vldrepl.d(vrD, RJsrc, simm10_9:$(REGSIZE)); } define pcodeop vldrepl.w; #lsx.txt vldrepl.w mask=0x30200000 [@orig_fmt=VdJSk10ps2] #0x30200000 0xfff00000 v0:5, r5:5, so10:10<<2 ['vreg0_5_s0', 'reg5_5_s0', 'soffs10_10_s0'] :vldrepl.w vrD, RJsrc, simm10_10 is op20_31=0x302 & vrD & RJsrc & simm10_10 { vrD = vldrepl.w(vrD, RJsrc, simm10_10:$(REGSIZE)); } define pcodeop vldrepl.h; #lsx.txt vldrepl.h mask=0x30400000 [@orig_fmt=VdJSk11ps1] #0x30400000 0xffe00000 v0:5, r5:5, so10:11<<1 ['vreg0_5_s0', 'reg5_5_s0', 'soffs10_11_s0'] :vldrepl.h vrD, RJsrc, simm10_11 is op21_31=0x182 & vrD & RJsrc & simm10_11 { vrD = vldrepl.h(vrD, RJsrc, simm10_11:$(REGSIZE)); } define pcodeop vldrepl.b; #lsx.txt vldrepl.b mask=0x30800000 #0x30800000 0xffc00000 v0:5, r5:5, so10:12 ['vreg0_5_s0', 'reg5_5_s0', 'soffs10_12_s0'] :vldrepl.b vrD, RJsrc, simm10_12 is op22_31=0xc2 & vrD & RJsrc & simm10_12 { vrD = vldrepl.b(vrD, RJsrc, simm10_12:$(REGSIZE)); } define pcodeop vstelm.d; #lsx.txt vstelm.d mask=0x31100000 [@orig_fmt=VdJSk8ps3Un1] #0x31100000 0xfff80000 v0:5, r5:5, so10:8<<3,u18:1 ['vreg0_5_s0', 'reg5_5_s0', 'soffs10_8_s0', 'imm18_1_s0'] :vstelm.d vrD, RJsrc, simm10_8, imm18_1 is op19_31=0x622 & vrD & RJsrc & simm10_8 & imm18_1 { vrD = vstelm.d(vrD, RJsrc, simm10_8:$(REGSIZE), imm18_1:$(REGSIZE)); } define pcodeop vstelm.w; #lsx.txt vstelm.w mask=0x31200000 [@orig_fmt=VdJSk8ps2Un2] #0x31200000 0xfff00000 v0:5, r5:5, so10:8<<2,u18:2 ['vreg0_5_s0', 'reg5_5_s0', 'soffs10_8_s0', 'imm18_2_s0'] :vstelm.w vrD, RJsrc, simm10_8, imm18_2 is op20_31=0x312 & vrD & RJsrc & simm10_8 & imm18_2 { vrD = vstelm.w(vrD, RJsrc, simm10_8:$(REGSIZE), imm18_2:$(REGSIZE)); } define pcodeop vstelm.h; #lsx.txt vstelm.h mask=0x31400000 [@orig_fmt=VdJSk8ps1Un3] #0x31400000 0xffe00000 v0:5, r5:5, so10:8<<1,u18:3 ['vreg0_5_s0', 'reg5_5_s0', 'soffs10_8_s0', 'imm18_3_s0'] :vstelm.h vrD, RJsrc, simm10_8, imm18_3 is op21_31=0x18a & vrD & RJsrc & simm10_8 & imm18_3 { vrD = vstelm.h(vrD, RJsrc, simm10_8:$(REGSIZE), imm18_3:$(REGSIZE)); } define pcodeop vstelm.b; #lsx.txt vstelm.b mask=0x31800000 #0x31800000 0xffc00000 v0:5, r5:5, so10:8,u18:4 ['vreg0_5_s0', 'reg5_5_s0', 'soffs10_8_s0', 'imm18_4_s0'] :vstelm.b vrD, RJsrc, simm10_8, imm18_4 is op22_31=0xc6 & vrD & RJsrc & simm10_8 & imm18_4 { vrD = vstelm.b(vrD, RJsrc, simm10_8:$(REGSIZE), imm18_4:$(REGSIZE)); } define pcodeop vldx; #lsx.txt vldx mask=0x38400000 #0x38400000 0xffff8000 v0:5, r5:5, r10:5 ['vreg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :vldx vrD, RJsrc, RKsrc is op15_31=0x7080 & vrD & RJsrc & RKsrc { vrD = vldx(vrD, RJsrc, RKsrc); } define pcodeop vstx; #lsx.txt vstx mask=0x38440000 #0x38440000 0xffff8000 v0:5, r5:5, r10:5 ['vreg0_5_s0', 'reg5_5_s0', 'reg10_5_s0'] :vstx vrD, RJsrc, RKsrc is op15_31=0x7088 & vrD & RJsrc & RKsrc { vrD = vstx(vrD, RJsrc, RKsrc); } define pcodeop vseq.b; #lsx.txt vseq.b mask=0x70000000 #0x70000000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vseq.b vrD, vrJ, vrK is op15_31=0xe000 & vrD & vrJ & vrK { vrD = vseq.b(vrD, vrJ, vrK); } define pcodeop vseq.h; #lsx.txt vseq.h mask=0x70008000 #0x70008000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vseq.h vrD, vrJ, vrK is op15_31=0xe001 & vrD & vrJ & vrK { vrD = vseq.h(vrD, vrJ, vrK); } define pcodeop vseq.w; #lsx.txt vseq.w mask=0x70010000 #0x70010000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vseq.w vrD, vrJ, vrK is op15_31=0xe002 & vrD & vrJ & vrK { vrD = vseq.w(vrD, vrJ, vrK); } define pcodeop vseq.d; #lsx.txt vseq.d mask=0x70018000 #0x70018000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vseq.d vrD, vrJ, vrK is op15_31=0xe003 & vrD & vrJ & vrK { vrD = vseq.d(vrD, vrJ, vrK); } define pcodeop vsle.b; #lsx.txt vsle.b mask=0x70020000 #0x70020000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsle.b vrD, vrJ, vrK is op15_31=0xe004 & vrD & vrJ & vrK { vrD = vsle.b(vrD, vrJ, vrK); } define pcodeop vsle.h; #lsx.txt vsle.h mask=0x70028000 #0x70028000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsle.h vrD, vrJ, vrK is op15_31=0xe005 & vrD & vrJ & vrK { vrD = vsle.h(vrD, vrJ, vrK); } define pcodeop vsle.w; #lsx.txt vsle.w mask=0x70030000 #0x70030000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsle.w vrD, vrJ, vrK is op15_31=0xe006 & vrD & vrJ & vrK { vrD = vsle.w(vrD, vrJ, vrK); } define pcodeop vsle.d; #lsx.txt vsle.d mask=0x70038000 #0x70038000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsle.d vrD, vrJ, vrK is op15_31=0xe007 & vrD & vrJ & vrK { vrD = vsle.d(vrD, vrJ, vrK); } define pcodeop vsle.bu; #lsx.txt vsle.bu mask=0x70040000 #0x70040000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsle.bu vrD, vrJ, vrK is op15_31=0xe008 & vrD & vrJ & vrK { vrD = vsle.bu(vrD, vrJ, vrK); } define pcodeop vsle.hu; #lsx.txt vsle.hu mask=0x70048000 #0x70048000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsle.hu vrD, vrJ, vrK is op15_31=0xe009 & vrD & vrJ & vrK { vrD = vsle.hu(vrD, vrJ, vrK); } define pcodeop vsle.wu; #lsx.txt vsle.wu mask=0x70050000 #0x70050000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsle.wu vrD, vrJ, vrK is op15_31=0xe00a & vrD & vrJ & vrK { vrD = vsle.wu(vrD, vrJ, vrK); } define pcodeop vsle.du; #lsx.txt vsle.du mask=0x70058000 #0x70058000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsle.du vrD, vrJ, vrK is op15_31=0xe00b & vrD & vrJ & vrK { vrD = vsle.du(vrD, vrJ, vrK); } define pcodeop vslt.b; #lsx.txt vslt.b mask=0x70060000 #0x70060000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vslt.b vrD, vrJ, vrK is op15_31=0xe00c & vrD & vrJ & vrK { vrD = vslt.b(vrD, vrJ, vrK); } define pcodeop vslt.h; #lsx.txt vslt.h mask=0x70068000 #0x70068000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vslt.h vrD, vrJ, vrK is op15_31=0xe00d & vrD & vrJ & vrK { vrD = vslt.h(vrD, vrJ, vrK); } define pcodeop vslt.w; #lsx.txt vslt.w mask=0x70070000 #0x70070000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vslt.w vrD, vrJ, vrK is op15_31=0xe00e & vrD & vrJ & vrK { vrD = vslt.w(vrD, vrJ, vrK); } define pcodeop vslt.d; #lsx.txt vslt.d mask=0x70078000 #0x70078000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vslt.d vrD, vrJ, vrK is op15_31=0xe00f & vrD & vrJ & vrK { vrD = vslt.d(vrD, vrJ, vrK); } define pcodeop vslt.bu; #lsx.txt vslt.bu mask=0x70080000 #0x70080000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vslt.bu vrD, vrJ, vrK is op15_31=0xe010 & vrD & vrJ & vrK { vrD = vslt.bu(vrD, vrJ, vrK); } define pcodeop vslt.hu; #lsx.txt vslt.hu mask=0x70088000 #0x70088000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vslt.hu vrD, vrJ, vrK is op15_31=0xe011 & vrD & vrJ & vrK { vrD = vslt.hu(vrD, vrJ, vrK); } define pcodeop vslt.wu; #lsx.txt vslt.wu mask=0x70090000 #0x70090000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vslt.wu vrD, vrJ, vrK is op15_31=0xe012 & vrD & vrJ & vrK { vrD = vslt.wu(vrD, vrJ, vrK); } define pcodeop vslt.du; #lsx.txt vslt.du mask=0x70098000 #0x70098000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vslt.du vrD, vrJ, vrK is op15_31=0xe013 & vrD & vrJ & vrK { vrD = vslt.du(vrD, vrJ, vrK); } define pcodeop vadd.b; #lsx.txt vadd.b mask=0x700a0000 #0x700a0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vadd.b vrD, vrJ, vrK is op15_31=0xe014 & vrD & vrJ & vrK { vrD = vadd.b(vrD, vrJ, vrK); } define pcodeop vadd.h; #lsx.txt vadd.h mask=0x700a8000 #0x700a8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vadd.h vrD, vrJ, vrK is op15_31=0xe015 & vrD & vrJ & vrK { vrD = vadd.h(vrD, vrJ, vrK); } define pcodeop vadd.w; #lsx.txt vadd.w mask=0x700b0000 #0x700b0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vadd.w vrD, vrJ, vrK is op15_31=0xe016 & vrD & vrJ & vrK { vrD = vadd.w(vrD, vrJ, vrK); } define pcodeop vadd.d; #lsx.txt vadd.d mask=0x700b8000 #0x700b8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vadd.d vrD, vrJ, vrK is op15_31=0xe017 & vrD & vrJ & vrK { vrD = vadd.d(vrD, vrJ, vrK); } define pcodeop vsub.b; #lsx.txt vsub.b mask=0x700c0000 #0x700c0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsub.b vrD, vrJ, vrK is op15_31=0xe018 & vrD & vrJ & vrK { vrD = vsub.b(vrD, vrJ, vrK); } define pcodeop vsub.h; #lsx.txt vsub.h mask=0x700c8000 #0x700c8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsub.h vrD, vrJ, vrK is op15_31=0xe019 & vrD & vrJ & vrK { vrD = vsub.h(vrD, vrJ, vrK); } define pcodeop vsub.w; #lsx.txt vsub.w mask=0x700d0000 #0x700d0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsub.w vrD, vrJ, vrK is op15_31=0xe01a & vrD & vrJ & vrK { vrD = vsub.w(vrD, vrJ, vrK); } define pcodeop vsub.d; #lsx.txt vsub.d mask=0x700d8000 #0x700d8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsub.d vrD, vrJ, vrK is op15_31=0xe01b & vrD & vrJ & vrK { vrD = vsub.d(vrD, vrJ, vrK); } define pcodeop vaddwev.h.b; #lsx.txt vaddwev.h.b mask=0x701e0000 #0x701e0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwev.h.b vrD, vrJ, vrK is op15_31=0xe03c & vrD & vrJ & vrK { vrD = vaddwev.h.b(vrD, vrJ, vrK); } define pcodeop vaddwev.w.h; #lsx.txt vaddwev.w.h mask=0x701e8000 #0x701e8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwev.w.h vrD, vrJ, vrK is op15_31=0xe03d & vrD & vrJ & vrK { vrD = vaddwev.w.h(vrD, vrJ, vrK); } define pcodeop vaddwev.d.w; #lsx.txt vaddwev.d.w mask=0x701f0000 #0x701f0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwev.d.w vrD, vrJ, vrK is op15_31=0xe03e & vrD & vrJ & vrK { vrD = vaddwev.d.w(vrD, vrJ, vrK); } define pcodeop vaddwev.q.d; #lsx.txt vaddwev.q.d mask=0x701f8000 #0x701f8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwev.q.d vrD, vrJ, vrK is op15_31=0xe03f & vrD & vrJ & vrK { vrD = vaddwev.q.d(vrD, vrJ, vrK); } define pcodeop vsubwev.h.b; #lsx.txt vsubwev.h.b mask=0x70200000 #0x70200000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwev.h.b vrD, vrJ, vrK is op15_31=0xe040 & vrD & vrJ & vrK { vrD = vsubwev.h.b(vrD, vrJ, vrK); } define pcodeop vsubwev.w.h; #lsx.txt vsubwev.w.h mask=0x70208000 #0x70208000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwev.w.h vrD, vrJ, vrK is op15_31=0xe041 & vrD & vrJ & vrK { vrD = vsubwev.w.h(vrD, vrJ, vrK); } define pcodeop vsubwev.d.w; #lsx.txt vsubwev.d.w mask=0x70210000 #0x70210000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwev.d.w vrD, vrJ, vrK is op15_31=0xe042 & vrD & vrJ & vrK { vrD = vsubwev.d.w(vrD, vrJ, vrK); } define pcodeop vsubwev.q.d; #lsx.txt vsubwev.q.d mask=0x70218000 #0x70218000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwev.q.d vrD, vrJ, vrK is op15_31=0xe043 & vrD & vrJ & vrK { vrD = vsubwev.q.d(vrD, vrJ, vrK); } define pcodeop vaddwod.h.b; #lsx.txt vaddwod.h.b mask=0x70220000 #0x70220000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwod.h.b vrD, vrJ, vrK is op15_31=0xe044 & vrD & vrJ & vrK { vrD = vaddwod.h.b(vrD, vrJ, vrK); } define pcodeop vaddwod.w.h; #lsx.txt vaddwod.w.h mask=0x70228000 #0x70228000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwod.w.h vrD, vrJ, vrK is op15_31=0xe045 & vrD & vrJ & vrK { vrD = vaddwod.w.h(vrD, vrJ, vrK); } define pcodeop vaddwod.d.w; #lsx.txt vaddwod.d.w mask=0x70230000 #0x70230000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwod.d.w vrD, vrJ, vrK is op15_31=0xe046 & vrD & vrJ & vrK { vrD = vaddwod.d.w(vrD, vrJ, vrK); } define pcodeop vaddwod.q.d; #lsx.txt vaddwod.q.d mask=0x70238000 #0x70238000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwod.q.d vrD, vrJ, vrK is op15_31=0xe047 & vrD & vrJ & vrK { vrD = vaddwod.q.d(vrD, vrJ, vrK); } define pcodeop vsubwod.h.b; #lsx.txt vsubwod.h.b mask=0x70240000 #0x70240000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwod.h.b vrD, vrJ, vrK is op15_31=0xe048 & vrD & vrJ & vrK { vrD = vsubwod.h.b(vrD, vrJ, vrK); } define pcodeop vsubwod.w.h; #lsx.txt vsubwod.w.h mask=0x70248000 #0x70248000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwod.w.h vrD, vrJ, vrK is op15_31=0xe049 & vrD & vrJ & vrK { vrD = vsubwod.w.h(vrD, vrJ, vrK); } define pcodeop vsubwod.d.w; #lsx.txt vsubwod.d.w mask=0x70250000 #0x70250000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwod.d.w vrD, vrJ, vrK is op15_31=0xe04a & vrD & vrJ & vrK { vrD = vsubwod.d.w(vrD, vrJ, vrK); } define pcodeop vsubwod.q.d; #lsx.txt vsubwod.q.d mask=0x70258000 #0x70258000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwod.q.d vrD, vrJ, vrK is op15_31=0xe04b & vrD & vrJ & vrK { vrD = vsubwod.q.d(vrD, vrJ, vrK); } define pcodeop vaddwev.h.bu; #lsx.txt vaddwev.h.bu mask=0x702e0000 #0x702e0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwev.h.bu vrD, vrJ, vrK is op15_31=0xe05c & vrD & vrJ & vrK { vrD = vaddwev.h.bu(vrD, vrJ, vrK); } define pcodeop vaddwev.w.hu; #lsx.txt vaddwev.w.hu mask=0x702e8000 #0x702e8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwev.w.hu vrD, vrJ, vrK is op15_31=0xe05d & vrD & vrJ & vrK { vrD = vaddwev.w.hu(vrD, vrJ, vrK); } define pcodeop vaddwev.d.wu; #lsx.txt vaddwev.d.wu mask=0x702f0000 #0x702f0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwev.d.wu vrD, vrJ, vrK is op15_31=0xe05e & vrD & vrJ & vrK { vrD = vaddwev.d.wu(vrD, vrJ, vrK); } define pcodeop vaddwev.q.du; #lsx.txt vaddwev.q.du mask=0x702f8000 #0x702f8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwev.q.du vrD, vrJ, vrK is op15_31=0xe05f & vrD & vrJ & vrK { vrD = vaddwev.q.du(vrD, vrJ, vrK); } define pcodeop vsubwev.h.bu; #lsx.txt vsubwev.h.bu mask=0x70300000 #0x70300000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwev.h.bu vrD, vrJ, vrK is op15_31=0xe060 & vrD & vrJ & vrK { vrD = vsubwev.h.bu(vrD, vrJ, vrK); } define pcodeop vsubwev.w.hu; #lsx.txt vsubwev.w.hu mask=0x70308000 #0x70308000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwev.w.hu vrD, vrJ, vrK is op15_31=0xe061 & vrD & vrJ & vrK { vrD = vsubwev.w.hu(vrD, vrJ, vrK); } define pcodeop vsubwev.d.wu; #lsx.txt vsubwev.d.wu mask=0x70310000 #0x70310000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwev.d.wu vrD, vrJ, vrK is op15_31=0xe062 & vrD & vrJ & vrK { vrD = vsubwev.d.wu(vrD, vrJ, vrK); } define pcodeop vsubwev.q.du; #lsx.txt vsubwev.q.du mask=0x70318000 #0x70318000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwev.q.du vrD, vrJ, vrK is op15_31=0xe063 & vrD & vrJ & vrK { vrD = vsubwev.q.du(vrD, vrJ, vrK); } define pcodeop vaddwod.h.bu; #lsx.txt vaddwod.h.bu mask=0x70320000 #0x70320000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwod.h.bu vrD, vrJ, vrK is op15_31=0xe064 & vrD & vrJ & vrK { vrD = vaddwod.h.bu(vrD, vrJ, vrK); } define pcodeop vaddwod.w.hu; #lsx.txt vaddwod.w.hu mask=0x70328000 #0x70328000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwod.w.hu vrD, vrJ, vrK is op15_31=0xe065 & vrD & vrJ & vrK { vrD = vaddwod.w.hu(vrD, vrJ, vrK); } define pcodeop vaddwod.d.wu; #lsx.txt vaddwod.d.wu mask=0x70330000 #0x70330000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwod.d.wu vrD, vrJ, vrK is op15_31=0xe066 & vrD & vrJ & vrK { vrD = vaddwod.d.wu(vrD, vrJ, vrK); } define pcodeop vaddwod.q.du; #lsx.txt vaddwod.q.du mask=0x70338000 #0x70338000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwod.q.du vrD, vrJ, vrK is op15_31=0xe067 & vrD & vrJ & vrK { vrD = vaddwod.q.du(vrD, vrJ, vrK); } define pcodeop vsubwod.h.bu; #lsx.txt vsubwod.h.bu mask=0x70340000 #0x70340000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwod.h.bu vrD, vrJ, vrK is op15_31=0xe068 & vrD & vrJ & vrK { vrD = vsubwod.h.bu(vrD, vrJ, vrK); } define pcodeop vsubwod.w.hu; #lsx.txt vsubwod.w.hu mask=0x70348000 #0x70348000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwod.w.hu vrD, vrJ, vrK is op15_31=0xe069 & vrD & vrJ & vrK { vrD = vsubwod.w.hu(vrD, vrJ, vrK); } define pcodeop vsubwod.d.wu; #lsx.txt vsubwod.d.wu mask=0x70350000 #0x70350000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwod.d.wu vrD, vrJ, vrK is op15_31=0xe06a & vrD & vrJ & vrK { vrD = vsubwod.d.wu(vrD, vrJ, vrK); } define pcodeop vsubwod.q.du; #lsx.txt vsubwod.q.du mask=0x70358000 #0x70358000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsubwod.q.du vrD, vrJ, vrK is op15_31=0xe06b & vrD & vrJ & vrK { vrD = vsubwod.q.du(vrD, vrJ, vrK); } define pcodeop vaddwev.h.bu.b; #lsx.txt vaddwev.h.bu.b mask=0x703e0000 #0x703e0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwev.h.bu.b vrD, vrJ, vrK is op15_31=0xe07c & vrD & vrJ & vrK { vrD = vaddwev.h.bu.b(vrD, vrJ, vrK); } define pcodeop vaddwev.w.hu.h; #lsx.txt vaddwev.w.hu.h mask=0x703e8000 #0x703e8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwev.w.hu.h vrD, vrJ, vrK is op15_31=0xe07d & vrD & vrJ & vrK { vrD = vaddwev.w.hu.h(vrD, vrJ, vrK); } define pcodeop vaddwev.d.wu.w; #lsx.txt vaddwev.d.wu.w mask=0x703f0000 #0x703f0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwev.d.wu.w vrD, vrJ, vrK is op15_31=0xe07e & vrD & vrJ & vrK { vrD = vaddwev.d.wu.w(vrD, vrJ, vrK); } define pcodeop vaddwev.q.du.d; #lsx.txt vaddwev.q.du.d mask=0x703f8000 #0x703f8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwev.q.du.d vrD, vrJ, vrK is op15_31=0xe07f & vrD & vrJ & vrK { vrD = vaddwev.q.du.d(vrD, vrJ, vrK); } define pcodeop vaddwod.h.bu.b; #lsx.txt vaddwod.h.bu.b mask=0x70400000 #0x70400000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwod.h.bu.b vrD, vrJ, vrK is op15_31=0xe080 & vrD & vrJ & vrK { vrD = vaddwod.h.bu.b(vrD, vrJ, vrK); } define pcodeop vaddwod.w.hu.h; #lsx.txt vaddwod.w.hu.h mask=0x70408000 #0x70408000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwod.w.hu.h vrD, vrJ, vrK is op15_31=0xe081 & vrD & vrJ & vrK { vrD = vaddwod.w.hu.h(vrD, vrJ, vrK); } define pcodeop vaddwod.d.wu.w; #lsx.txt vaddwod.d.wu.w mask=0x70410000 #0x70410000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwod.d.wu.w vrD, vrJ, vrK is op15_31=0xe082 & vrD & vrJ & vrK { vrD = vaddwod.d.wu.w(vrD, vrJ, vrK); } define pcodeop vaddwod.q.du.d; #lsx.txt vaddwod.q.du.d mask=0x70418000 #0x70418000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vaddwod.q.du.d vrD, vrJ, vrK is op15_31=0xe083 & vrD & vrJ & vrK { vrD = vaddwod.q.du.d(vrD, vrJ, vrK); } define pcodeop vsadd.b; #lsx.txt vsadd.b mask=0x70460000 #0x70460000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsadd.b vrD, vrJ, vrK is op15_31=0xe08c & vrD & vrJ & vrK { vrD = vsadd.b(vrD, vrJ, vrK); } define pcodeop vsadd.h; #lsx.txt vsadd.h mask=0x70468000 #0x70468000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsadd.h vrD, vrJ, vrK is op15_31=0xe08d & vrD & vrJ & vrK { vrD = vsadd.h(vrD, vrJ, vrK); } define pcodeop vsadd.w; #lsx.txt vsadd.w mask=0x70470000 #0x70470000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsadd.w vrD, vrJ, vrK is op15_31=0xe08e & vrD & vrJ & vrK { vrD = vsadd.w(vrD, vrJ, vrK); } define pcodeop vsadd.d; #lsx.txt vsadd.d mask=0x70478000 #0x70478000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsadd.d vrD, vrJ, vrK is op15_31=0xe08f & vrD & vrJ & vrK { vrD = vsadd.d(vrD, vrJ, vrK); } define pcodeop vssub.b; #lsx.txt vssub.b mask=0x70480000 #0x70480000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssub.b vrD, vrJ, vrK is op15_31=0xe090 & vrD & vrJ & vrK { vrD = vssub.b(vrD, vrJ, vrK); } define pcodeop vssub.h; #lsx.txt vssub.h mask=0x70488000 #0x70488000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssub.h vrD, vrJ, vrK is op15_31=0xe091 & vrD & vrJ & vrK { vrD = vssub.h(vrD, vrJ, vrK); } define pcodeop vssub.w; #lsx.txt vssub.w mask=0x70490000 #0x70490000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssub.w vrD, vrJ, vrK is op15_31=0xe092 & vrD & vrJ & vrK { vrD = vssub.w(vrD, vrJ, vrK); } define pcodeop vssub.d; #lsx.txt vssub.d mask=0x70498000 #0x70498000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssub.d vrD, vrJ, vrK is op15_31=0xe093 & vrD & vrJ & vrK { vrD = vssub.d(vrD, vrJ, vrK); } define pcodeop vsadd.bu; #lsx.txt vsadd.bu mask=0x704a0000 #0x704a0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsadd.bu vrD, vrJ, vrK is op15_31=0xe094 & vrD & vrJ & vrK { vrD = vsadd.bu(vrD, vrJ, vrK); } define pcodeop vsadd.hu; #lsx.txt vsadd.hu mask=0x704a8000 #0x704a8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsadd.hu vrD, vrJ, vrK is op15_31=0xe095 & vrD & vrJ & vrK { vrD = vsadd.hu(vrD, vrJ, vrK); } define pcodeop vsadd.wu; #lsx.txt vsadd.wu mask=0x704b0000 #0x704b0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsadd.wu vrD, vrJ, vrK is op15_31=0xe096 & vrD & vrJ & vrK { vrD = vsadd.wu(vrD, vrJ, vrK); } define pcodeop vsadd.du; #lsx.txt vsadd.du mask=0x704b8000 #0x704b8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsadd.du vrD, vrJ, vrK is op15_31=0xe097 & vrD & vrJ & vrK { vrD = vsadd.du(vrD, vrJ, vrK); } define pcodeop vssub.bu; #lsx.txt vssub.bu mask=0x704c0000 #0x704c0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssub.bu vrD, vrJ, vrK is op15_31=0xe098 & vrD & vrJ & vrK { vrD = vssub.bu(vrD, vrJ, vrK); } define pcodeop vssub.hu; #lsx.txt vssub.hu mask=0x704c8000 #0x704c8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssub.hu vrD, vrJ, vrK is op15_31=0xe099 & vrD & vrJ & vrK { vrD = vssub.hu(vrD, vrJ, vrK); } define pcodeop vssub.wu; #lsx.txt vssub.wu mask=0x704d0000 #0x704d0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssub.wu vrD, vrJ, vrK is op15_31=0xe09a & vrD & vrJ & vrK { vrD = vssub.wu(vrD, vrJ, vrK); } define pcodeop vssub.du; #lsx.txt vssub.du mask=0x704d8000 #0x704d8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssub.du vrD, vrJ, vrK is op15_31=0xe09b & vrD & vrJ & vrK { vrD = vssub.du(vrD, vrJ, vrK); } define pcodeop vhaddw.h.b; #lsx.txt vhaddw.h.b mask=0x70540000 #0x70540000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhaddw.h.b vrD, vrJ, vrK is op15_31=0xe0a8 & vrD & vrJ & vrK { vrD = vhaddw.h.b(vrD, vrJ, vrK); } define pcodeop vhaddw.w.h; #lsx.txt vhaddw.w.h mask=0x70548000 #0x70548000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhaddw.w.h vrD, vrJ, vrK is op15_31=0xe0a9 & vrD & vrJ & vrK { vrD = vhaddw.w.h(vrD, vrJ, vrK); } define pcodeop vhaddw.d.w; #lsx.txt vhaddw.d.w mask=0x70550000 #0x70550000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhaddw.d.w vrD, vrJ, vrK is op15_31=0xe0aa & vrD & vrJ & vrK { vrD = vhaddw.d.w(vrD, vrJ, vrK); } define pcodeop vhaddw.q.d; #lsx.txt vhaddw.q.d mask=0x70558000 #0x70558000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhaddw.q.d vrD, vrJ, vrK is op15_31=0xe0ab & vrD & vrJ & vrK { vrD = vhaddw.q.d(vrD, vrJ, vrK); } define pcodeop vhsubw.h.b; #lsx.txt vhsubw.h.b mask=0x70560000 #0x70560000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhsubw.h.b vrD, vrJ, vrK is op15_31=0xe0ac & vrD & vrJ & vrK { vrD = vhsubw.h.b(vrD, vrJ, vrK); } define pcodeop vhsubw.w.h; #lsx.txt vhsubw.w.h mask=0x70568000 #0x70568000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhsubw.w.h vrD, vrJ, vrK is op15_31=0xe0ad & vrD & vrJ & vrK { vrD = vhsubw.w.h(vrD, vrJ, vrK); } define pcodeop vhsubw.d.w; #lsx.txt vhsubw.d.w mask=0x70570000 #0x70570000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhsubw.d.w vrD, vrJ, vrK is op15_31=0xe0ae & vrD & vrJ & vrK { vrD = vhsubw.d.w(vrD, vrJ, vrK); } define pcodeop vhsubw.q.d; #lsx.txt vhsubw.q.d mask=0x70578000 #0x70578000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhsubw.q.d vrD, vrJ, vrK is op15_31=0xe0af & vrD & vrJ & vrK { vrD = vhsubw.q.d(vrD, vrJ, vrK); } define pcodeop vhaddw.hu.bu; #lsx.txt vhaddw.hu.bu mask=0x70580000 #0x70580000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhaddw.hu.bu vrD, vrJ, vrK is op15_31=0xe0b0 & vrD & vrJ & vrK { vrD = vhaddw.hu.bu(vrD, vrJ, vrK); } define pcodeop vhaddw.wu.hu; #lsx.txt vhaddw.wu.hu mask=0x70588000 #0x70588000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhaddw.wu.hu vrD, vrJ, vrK is op15_31=0xe0b1 & vrD & vrJ & vrK { vrD = vhaddw.wu.hu(vrD, vrJ, vrK); } define pcodeop vhaddw.du.wu; #lsx.txt vhaddw.du.wu mask=0x70590000 #0x70590000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhaddw.du.wu vrD, vrJ, vrK is op15_31=0xe0b2 & vrD & vrJ & vrK { vrD = vhaddw.du.wu(vrD, vrJ, vrK); } define pcodeop vhaddw.qu.du; #lsx.txt vhaddw.qu.du mask=0x70598000 #0x70598000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhaddw.qu.du vrD, vrJ, vrK is op15_31=0xe0b3 & vrD & vrJ & vrK { vrD = vhaddw.qu.du(vrD, vrJ, vrK); } define pcodeop vhsubw.hu.bu; #lsx.txt vhsubw.hu.bu mask=0x705a0000 #0x705a0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhsubw.hu.bu vrD, vrJ, vrK is op15_31=0xe0b4 & vrD & vrJ & vrK { vrD = vhsubw.hu.bu(vrD, vrJ, vrK); } define pcodeop vhsubw.wu.hu; #lsx.txt vhsubw.wu.hu mask=0x705a8000 #0x705a8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhsubw.wu.hu vrD, vrJ, vrK is op15_31=0xe0b5 & vrD & vrJ & vrK { vrD = vhsubw.wu.hu(vrD, vrJ, vrK); } define pcodeop vhsubw.du.wu; #lsx.txt vhsubw.du.wu mask=0x705b0000 #0x705b0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhsubw.du.wu vrD, vrJ, vrK is op15_31=0xe0b6 & vrD & vrJ & vrK { vrD = vhsubw.du.wu(vrD, vrJ, vrK); } define pcodeop vhsubw.qu.du; #lsx.txt vhsubw.qu.du mask=0x705b8000 #0x705b8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vhsubw.qu.du vrD, vrJ, vrK is op15_31=0xe0b7 & vrD & vrJ & vrK { vrD = vhsubw.qu.du(vrD, vrJ, vrK); } define pcodeop vadda.b; #lsx.txt vadda.b mask=0x705c0000 #0x705c0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vadda.b vrD, vrJ, vrK is op15_31=0xe0b8 & vrD & vrJ & vrK { vrD = vadda.b(vrD, vrJ, vrK); } define pcodeop vadda.h; #lsx.txt vadda.h mask=0x705c8000 #0x705c8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vadda.h vrD, vrJ, vrK is op15_31=0xe0b9 & vrD & vrJ & vrK { vrD = vadda.h(vrD, vrJ, vrK); } define pcodeop vadda.w; #lsx.txt vadda.w mask=0x705d0000 #0x705d0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vadda.w vrD, vrJ, vrK is op15_31=0xe0ba & vrD & vrJ & vrK { vrD = vadda.w(vrD, vrJ, vrK); } define pcodeop vadda.d; #lsx.txt vadda.d mask=0x705d8000 #0x705d8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vadda.d vrD, vrJ, vrK is op15_31=0xe0bb & vrD & vrJ & vrK { vrD = vadda.d(vrD, vrJ, vrK); } define pcodeop vabsd.b; #lsx.txt vabsd.b mask=0x70600000 #0x70600000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vabsd.b vrD, vrJ, vrK is op15_31=0xe0c0 & vrD & vrJ & vrK { vrD = vabsd.b(vrD, vrJ, vrK); } define pcodeop vabsd.h; #lsx.txt vabsd.h mask=0x70608000 #0x70608000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vabsd.h vrD, vrJ, vrK is op15_31=0xe0c1 & vrD & vrJ & vrK { vrD = vabsd.h(vrD, vrJ, vrK); } define pcodeop vabsd.w; #lsx.txt vabsd.w mask=0x70610000 #0x70610000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vabsd.w vrD, vrJ, vrK is op15_31=0xe0c2 & vrD & vrJ & vrK { vrD = vabsd.w(vrD, vrJ, vrK); } define pcodeop vabsd.d; #lsx.txt vabsd.d mask=0x70618000 #0x70618000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vabsd.d vrD, vrJ, vrK is op15_31=0xe0c3 & vrD & vrJ & vrK { vrD = vabsd.d(vrD, vrJ, vrK); } define pcodeop vabsd.bu; #lsx.txt vabsd.bu mask=0x70620000 #0x70620000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vabsd.bu vrD, vrJ, vrK is op15_31=0xe0c4 & vrD & vrJ & vrK { vrD = vabsd.bu(vrD, vrJ, vrK); } define pcodeop vabsd.hu; #lsx.txt vabsd.hu mask=0x70628000 #0x70628000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vabsd.hu vrD, vrJ, vrK is op15_31=0xe0c5 & vrD & vrJ & vrK { vrD = vabsd.hu(vrD, vrJ, vrK); } define pcodeop vabsd.wu; #lsx.txt vabsd.wu mask=0x70630000 #0x70630000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vabsd.wu vrD, vrJ, vrK is op15_31=0xe0c6 & vrD & vrJ & vrK { vrD = vabsd.wu(vrD, vrJ, vrK); } define pcodeop vabsd.du; #lsx.txt vabsd.du mask=0x70638000 #0x70638000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vabsd.du vrD, vrJ, vrK is op15_31=0xe0c7 & vrD & vrJ & vrK { vrD = vabsd.du(vrD, vrJ, vrK); } define pcodeop vavg.b; #lsx.txt vavg.b mask=0x70640000 #0x70640000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavg.b vrD, vrJ, vrK is op15_31=0xe0c8 & vrD & vrJ & vrK { vrD = vavg.b(vrD, vrJ, vrK); } define pcodeop vavg.h; #lsx.txt vavg.h mask=0x70648000 #0x70648000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavg.h vrD, vrJ, vrK is op15_31=0xe0c9 & vrD & vrJ & vrK { vrD = vavg.h(vrD, vrJ, vrK); } define pcodeop vavg.w; #lsx.txt vavg.w mask=0x70650000 #0x70650000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavg.w vrD, vrJ, vrK is op15_31=0xe0ca & vrD & vrJ & vrK { vrD = vavg.w(vrD, vrJ, vrK); } define pcodeop vavg.d; #lsx.txt vavg.d mask=0x70658000 #0x70658000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavg.d vrD, vrJ, vrK is op15_31=0xe0cb & vrD & vrJ & vrK { vrD = vavg.d(vrD, vrJ, vrK); } define pcodeop vavg.bu; #lsx.txt vavg.bu mask=0x70660000 #0x70660000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavg.bu vrD, vrJ, vrK is op15_31=0xe0cc & vrD & vrJ & vrK { vrD = vavg.bu(vrD, vrJ, vrK); } define pcodeop vavg.hu; #lsx.txt vavg.hu mask=0x70668000 #0x70668000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavg.hu vrD, vrJ, vrK is op15_31=0xe0cd & vrD & vrJ & vrK { vrD = vavg.hu(vrD, vrJ, vrK); } define pcodeop vavg.wu; #lsx.txt vavg.wu mask=0x70670000 #0x70670000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavg.wu vrD, vrJ, vrK is op15_31=0xe0ce & vrD & vrJ & vrK { vrD = vavg.wu(vrD, vrJ, vrK); } define pcodeop vavg.du; #lsx.txt vavg.du mask=0x70678000 #0x70678000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavg.du vrD, vrJ, vrK is op15_31=0xe0cf & vrD & vrJ & vrK { vrD = vavg.du(vrD, vrJ, vrK); } define pcodeop vavgr.b; #lsx.txt vavgr.b mask=0x70680000 #0x70680000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavgr.b vrD, vrJ, vrK is op15_31=0xe0d0 & vrD & vrJ & vrK { vrD = vavgr.b(vrD, vrJ, vrK); } define pcodeop vavgr.h; #lsx.txt vavgr.h mask=0x70688000 #0x70688000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavgr.h vrD, vrJ, vrK is op15_31=0xe0d1 & vrD & vrJ & vrK { vrD = vavgr.h(vrD, vrJ, vrK); } define pcodeop vavgr.w; #lsx.txt vavgr.w mask=0x70690000 #0x70690000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavgr.w vrD, vrJ, vrK is op15_31=0xe0d2 & vrD & vrJ & vrK { vrD = vavgr.w(vrD, vrJ, vrK); } define pcodeop vavgr.d; #lsx.txt vavgr.d mask=0x70698000 #0x70698000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavgr.d vrD, vrJ, vrK is op15_31=0xe0d3 & vrD & vrJ & vrK { vrD = vavgr.d(vrD, vrJ, vrK); } define pcodeop vavgr.bu; #lsx.txt vavgr.bu mask=0x706a0000 #0x706a0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavgr.bu vrD, vrJ, vrK is op15_31=0xe0d4 & vrD & vrJ & vrK { vrD = vavgr.bu(vrD, vrJ, vrK); } define pcodeop vavgr.hu; #lsx.txt vavgr.hu mask=0x706a8000 #0x706a8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavgr.hu vrD, vrJ, vrK is op15_31=0xe0d5 & vrD & vrJ & vrK { vrD = vavgr.hu(vrD, vrJ, vrK); } define pcodeop vavgr.wu; #lsx.txt vavgr.wu mask=0x706b0000 #0x706b0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavgr.wu vrD, vrJ, vrK is op15_31=0xe0d6 & vrD & vrJ & vrK { vrD = vavgr.wu(vrD, vrJ, vrK); } define pcodeop vavgr.du; #lsx.txt vavgr.du mask=0x706b8000 #0x706b8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vavgr.du vrD, vrJ, vrK is op15_31=0xe0d7 & vrD & vrJ & vrK { vrD = vavgr.du(vrD, vrJ, vrK); } define pcodeop vmax.b; #lsx.txt vmax.b mask=0x70700000 #0x70700000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmax.b vrD, vrJ, vrK is op15_31=0xe0e0 & vrD & vrJ & vrK { vrD = vmax.b(vrD, vrJ, vrK); } define pcodeop vmax.h; #lsx.txt vmax.h mask=0x70708000 #0x70708000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmax.h vrD, vrJ, vrK is op15_31=0xe0e1 & vrD & vrJ & vrK { vrD = vmax.h(vrD, vrJ, vrK); } define pcodeop vmax.w; #lsx.txt vmax.w mask=0x70710000 #0x70710000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmax.w vrD, vrJ, vrK is op15_31=0xe0e2 & vrD & vrJ & vrK { vrD = vmax.w(vrD, vrJ, vrK); } define pcodeop vmax.d; #lsx.txt vmax.d mask=0x70718000 #0x70718000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmax.d vrD, vrJ, vrK is op15_31=0xe0e3 & vrD & vrJ & vrK { vrD = vmax.d(vrD, vrJ, vrK); } define pcodeop vmin.b; #lsx.txt vmin.b mask=0x70720000 #0x70720000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmin.b vrD, vrJ, vrK is op15_31=0xe0e4 & vrD & vrJ & vrK { vrD = vmin.b(vrD, vrJ, vrK); } define pcodeop vmin.h; #lsx.txt vmin.h mask=0x70728000 #0x70728000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmin.h vrD, vrJ, vrK is op15_31=0xe0e5 & vrD & vrJ & vrK { vrD = vmin.h(vrD, vrJ, vrK); } define pcodeop vmin.w; #lsx.txt vmin.w mask=0x70730000 #0x70730000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmin.w vrD, vrJ, vrK is op15_31=0xe0e6 & vrD & vrJ & vrK { vrD = vmin.w(vrD, vrJ, vrK); } define pcodeop vmin.d; #lsx.txt vmin.d mask=0x70738000 #0x70738000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmin.d vrD, vrJ, vrK is op15_31=0xe0e7 & vrD & vrJ & vrK { vrD = vmin.d(vrD, vrJ, vrK); } define pcodeop vmax.bu; #lsx.txt vmax.bu mask=0x70740000 #0x70740000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmax.bu vrD, vrJ, vrK is op15_31=0xe0e8 & vrD & vrJ & vrK { vrD = vmax.bu(vrD, vrJ, vrK); } define pcodeop vmax.hu; #lsx.txt vmax.hu mask=0x70748000 #0x70748000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmax.hu vrD, vrJ, vrK is op15_31=0xe0e9 & vrD & vrJ & vrK { vrD = vmax.hu(vrD, vrJ, vrK); } define pcodeop vmax.wu; #lsx.txt vmax.wu mask=0x70750000 #0x70750000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmax.wu vrD, vrJ, vrK is op15_31=0xe0ea & vrD & vrJ & vrK { vrD = vmax.wu(vrD, vrJ, vrK); } define pcodeop vmax.du; #lsx.txt vmax.du mask=0x70758000 #0x70758000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmax.du vrD, vrJ, vrK is op15_31=0xe0eb & vrD & vrJ & vrK { vrD = vmax.du(vrD, vrJ, vrK); } define pcodeop vmin.bu; #lsx.txt vmin.bu mask=0x70760000 #0x70760000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmin.bu vrD, vrJ, vrK is op15_31=0xe0ec & vrD & vrJ & vrK { vrD = vmin.bu(vrD, vrJ, vrK); } define pcodeop vmin.hu; #lsx.txt vmin.hu mask=0x70768000 #0x70768000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmin.hu vrD, vrJ, vrK is op15_31=0xe0ed & vrD & vrJ & vrK { vrD = vmin.hu(vrD, vrJ, vrK); } define pcodeop vmin.wu; #lsx.txt vmin.wu mask=0x70770000 #0x70770000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmin.wu vrD, vrJ, vrK is op15_31=0xe0ee & vrD & vrJ & vrK { vrD = vmin.wu(vrD, vrJ, vrK); } define pcodeop vmin.du; #lsx.txt vmin.du mask=0x70778000 #0x70778000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmin.du vrD, vrJ, vrK is op15_31=0xe0ef & vrD & vrJ & vrK { vrD = vmin.du(vrD, vrJ, vrK); } define pcodeop vmul.b; #lsx.txt vmul.b mask=0x70840000 #0x70840000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmul.b vrD, vrJ, vrK is op15_31=0xe108 & vrD & vrJ & vrK { vrD = vmul.b(vrD, vrJ, vrK); } define pcodeop vmul.h; #lsx.txt vmul.h mask=0x70848000 #0x70848000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmul.h vrD, vrJ, vrK is op15_31=0xe109 & vrD & vrJ & vrK { vrD = vmul.h(vrD, vrJ, vrK); } define pcodeop vmul.w; #lsx.txt vmul.w mask=0x70850000 #0x70850000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmul.w vrD, vrJ, vrK is op15_31=0xe10a & vrD & vrJ & vrK { vrD = vmul.w(vrD, vrJ, vrK); } define pcodeop vmul.d; #lsx.txt vmul.d mask=0x70858000 #0x70858000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmul.d vrD, vrJ, vrK is op15_31=0xe10b & vrD & vrJ & vrK { vrD = vmul.d(vrD, vrJ, vrK); } define pcodeop vmuh.b; #lsx.txt vmuh.b mask=0x70860000 #0x70860000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmuh.b vrD, vrJ, vrK is op15_31=0xe10c & vrD & vrJ & vrK { vrD = vmuh.b(vrD, vrJ, vrK); } define pcodeop vmuh.h; #lsx.txt vmuh.h mask=0x70868000 #0x70868000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmuh.h vrD, vrJ, vrK is op15_31=0xe10d & vrD & vrJ & vrK { vrD = vmuh.h(vrD, vrJ, vrK); } define pcodeop vmuh.w; #lsx.txt vmuh.w mask=0x70870000 #0x70870000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmuh.w vrD, vrJ, vrK is op15_31=0xe10e & vrD & vrJ & vrK { vrD = vmuh.w(vrD, vrJ, vrK); } define pcodeop vmuh.d; #lsx.txt vmuh.d mask=0x70878000 #0x70878000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmuh.d vrD, vrJ, vrK is op15_31=0xe10f & vrD & vrJ & vrK { vrD = vmuh.d(vrD, vrJ, vrK); } define pcodeop vmuh.bu; #lsx.txt vmuh.bu mask=0x70880000 #0x70880000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmuh.bu vrD, vrJ, vrK is op15_31=0xe110 & vrD & vrJ & vrK { vrD = vmuh.bu(vrD, vrJ, vrK); } define pcodeop vmuh.hu; #lsx.txt vmuh.hu mask=0x70888000 #0x70888000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmuh.hu vrD, vrJ, vrK is op15_31=0xe111 & vrD & vrJ & vrK { vrD = vmuh.hu(vrD, vrJ, vrK); } define pcodeop vmuh.wu; #lsx.txt vmuh.wu mask=0x70890000 #0x70890000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmuh.wu vrD, vrJ, vrK is op15_31=0xe112 & vrD & vrJ & vrK { vrD = vmuh.wu(vrD, vrJ, vrK); } define pcodeop vmuh.du; #lsx.txt vmuh.du mask=0x70898000 #0x70898000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmuh.du vrD, vrJ, vrK is op15_31=0xe113 & vrD & vrJ & vrK { vrD = vmuh.du(vrD, vrJ, vrK); } define pcodeop vmulwev.h.b; #lsx.txt vmulwev.h.b mask=0x70900000 #0x70900000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwev.h.b vrD, vrJ, vrK is op15_31=0xe120 & vrD & vrJ & vrK { vrD = vmulwev.h.b(vrD, vrJ, vrK); } define pcodeop vmulwev.w.h; #lsx.txt vmulwev.w.h mask=0x70908000 #0x70908000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwev.w.h vrD, vrJ, vrK is op15_31=0xe121 & vrD & vrJ & vrK { vrD = vmulwev.w.h(vrD, vrJ, vrK); } define pcodeop vmulwev.d.w; #lsx.txt vmulwev.d.w mask=0x70910000 #0x70910000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwev.d.w vrD, vrJ, vrK is op15_31=0xe122 & vrD & vrJ & vrK { vrD = vmulwev.d.w(vrD, vrJ, vrK); } define pcodeop vmulwev.q.d; #lsx.txt vmulwev.q.d mask=0x70918000 #0x70918000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwev.q.d vrD, vrJ, vrK is op15_31=0xe123 & vrD & vrJ & vrK { vrD = vmulwev.q.d(vrD, vrJ, vrK); } define pcodeop vmulwod.h.b; #lsx.txt vmulwod.h.b mask=0x70920000 #0x70920000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwod.h.b vrD, vrJ, vrK is op15_31=0xe124 & vrD & vrJ & vrK { vrD = vmulwod.h.b(vrD, vrJ, vrK); } define pcodeop vmulwod.w.h; #lsx.txt vmulwod.w.h mask=0x70928000 #0x70928000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwod.w.h vrD, vrJ, vrK is op15_31=0xe125 & vrD & vrJ & vrK { vrD = vmulwod.w.h(vrD, vrJ, vrK); } define pcodeop vmulwod.d.w; #lsx.txt vmulwod.d.w mask=0x70930000 #0x70930000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwod.d.w vrD, vrJ, vrK is op15_31=0xe126 & vrD & vrJ & vrK { vrD = vmulwod.d.w(vrD, vrJ, vrK); } define pcodeop vmulwod.q.d; #lsx.txt vmulwod.q.d mask=0x70938000 #0x70938000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwod.q.d vrD, vrJ, vrK is op15_31=0xe127 & vrD & vrJ & vrK { vrD = vmulwod.q.d(vrD, vrJ, vrK); } define pcodeop vmulwev.h.bu; #lsx.txt vmulwev.h.bu mask=0x70980000 #0x70980000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwev.h.bu vrD, vrJ, vrK is op15_31=0xe130 & vrD & vrJ & vrK { vrD = vmulwev.h.bu(vrD, vrJ, vrK); } define pcodeop vmulwev.w.hu; #lsx.txt vmulwev.w.hu mask=0x70988000 #0x70988000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwev.w.hu vrD, vrJ, vrK is op15_31=0xe131 & vrD & vrJ & vrK { vrD = vmulwev.w.hu(vrD, vrJ, vrK); } define pcodeop vmulwev.d.wu; #lsx.txt vmulwev.d.wu mask=0x70990000 #0x70990000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwev.d.wu vrD, vrJ, vrK is op15_31=0xe132 & vrD & vrJ & vrK { vrD = vmulwev.d.wu(vrD, vrJ, vrK); } define pcodeop vmulwev.q.du; #lsx.txt vmulwev.q.du mask=0x70998000 #0x70998000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwev.q.du vrD, vrJ, vrK is op15_31=0xe133 & vrD & vrJ & vrK { vrD = vmulwev.q.du(vrD, vrJ, vrK); } define pcodeop vmulwod.h.bu; #lsx.txt vmulwod.h.bu mask=0x709a0000 #0x709a0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwod.h.bu vrD, vrJ, vrK is op15_31=0xe134 & vrD & vrJ & vrK { vrD = vmulwod.h.bu(vrD, vrJ, vrK); } define pcodeop vmulwod.w.hu; #lsx.txt vmulwod.w.hu mask=0x709a8000 #0x709a8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwod.w.hu vrD, vrJ, vrK is op15_31=0xe135 & vrD & vrJ & vrK { vrD = vmulwod.w.hu(vrD, vrJ, vrK); } define pcodeop vmulwod.d.wu; #lsx.txt vmulwod.d.wu mask=0x709b0000 #0x709b0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwod.d.wu vrD, vrJ, vrK is op15_31=0xe136 & vrD & vrJ & vrK { vrD = vmulwod.d.wu(vrD, vrJ, vrK); } define pcodeop vmulwod.q.du; #lsx.txt vmulwod.q.du mask=0x709b8000 #0x709b8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwod.q.du vrD, vrJ, vrK is op15_31=0xe137 & vrD & vrJ & vrK { vrD = vmulwod.q.du(vrD, vrJ, vrK); } define pcodeop vmulwev.h.bu.b; #lsx.txt vmulwev.h.bu.b mask=0x70a00000 #0x70a00000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwev.h.bu.b vrD, vrJ, vrK is op15_31=0xe140 & vrD & vrJ & vrK { vrD = vmulwev.h.bu.b(vrD, vrJ, vrK); } define pcodeop vmulwev.w.hu.h; #lsx.txt vmulwev.w.hu.h mask=0x70a08000 #0x70a08000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwev.w.hu.h vrD, vrJ, vrK is op15_31=0xe141 & vrD & vrJ & vrK { vrD = vmulwev.w.hu.h(vrD, vrJ, vrK); } define pcodeop vmulwev.d.wu.w; #lsx.txt vmulwev.d.wu.w mask=0x70a10000 #0x70a10000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwev.d.wu.w vrD, vrJ, vrK is op15_31=0xe142 & vrD & vrJ & vrK { vrD = vmulwev.d.wu.w(vrD, vrJ, vrK); } define pcodeop vmulwev.q.du.d; #lsx.txt vmulwev.q.du.d mask=0x70a18000 #0x70a18000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwev.q.du.d vrD, vrJ, vrK is op15_31=0xe143 & vrD & vrJ & vrK { vrD = vmulwev.q.du.d(vrD, vrJ, vrK); } define pcodeop vmulwod.h.bu.b; #lsx.txt vmulwod.h.bu.b mask=0x70a20000 #0x70a20000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwod.h.bu.b vrD, vrJ, vrK is op15_31=0xe144 & vrD & vrJ & vrK { vrD = vmulwod.h.bu.b(vrD, vrJ, vrK); } define pcodeop vmulwod.w.hu.h; #lsx.txt vmulwod.w.hu.h mask=0x70a28000 #0x70a28000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwod.w.hu.h vrD, vrJ, vrK is op15_31=0xe145 & vrD & vrJ & vrK { vrD = vmulwod.w.hu.h(vrD, vrJ, vrK); } define pcodeop vmulwod.d.wu.w; #lsx.txt vmulwod.d.wu.w mask=0x70a30000 #0x70a30000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwod.d.wu.w vrD, vrJ, vrK is op15_31=0xe146 & vrD & vrJ & vrK { vrD = vmulwod.d.wu.w(vrD, vrJ, vrK); } define pcodeop vmulwod.q.du.d; #lsx.txt vmulwod.q.du.d mask=0x70a38000 #0x70a38000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmulwod.q.du.d vrD, vrJ, vrK is op15_31=0xe147 & vrD & vrJ & vrK { vrD = vmulwod.q.du.d(vrD, vrJ, vrK); } define pcodeop vmadd.b; #lsx.txt vmadd.b mask=0x70a80000 #0x70a80000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmadd.b vrD, vrJ, vrK is op15_31=0xe150 & vrD & vrJ & vrK { vrD = vmadd.b(vrD, vrJ, vrK); } define pcodeop vmadd.h; #lsx.txt vmadd.h mask=0x70a88000 #0x70a88000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmadd.h vrD, vrJ, vrK is op15_31=0xe151 & vrD & vrJ & vrK { vrD = vmadd.h(vrD, vrJ, vrK); } define pcodeop vmadd.w; #lsx.txt vmadd.w mask=0x70a90000 #0x70a90000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmadd.w vrD, vrJ, vrK is op15_31=0xe152 & vrD & vrJ & vrK { vrD = vmadd.w(vrD, vrJ, vrK); } define pcodeop vmadd.d; #lsx.txt vmadd.d mask=0x70a98000 #0x70a98000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmadd.d vrD, vrJ, vrK is op15_31=0xe153 & vrD & vrJ & vrK { vrD = vmadd.d(vrD, vrJ, vrK); } define pcodeop vmsub.b; #lsx.txt vmsub.b mask=0x70aa0000 #0x70aa0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmsub.b vrD, vrJ, vrK is op15_31=0xe154 & vrD & vrJ & vrK { vrD = vmsub.b(vrD, vrJ, vrK); } define pcodeop vmsub.h; #lsx.txt vmsub.h mask=0x70aa8000 #0x70aa8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmsub.h vrD, vrJ, vrK is op15_31=0xe155 & vrD & vrJ & vrK { vrD = vmsub.h(vrD, vrJ, vrK); } define pcodeop vmsub.w; #lsx.txt vmsub.w mask=0x70ab0000 #0x70ab0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmsub.w vrD, vrJ, vrK is op15_31=0xe156 & vrD & vrJ & vrK { vrD = vmsub.w(vrD, vrJ, vrK); } define pcodeop vmsub.d; #lsx.txt vmsub.d mask=0x70ab8000 #0x70ab8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmsub.d vrD, vrJ, vrK is op15_31=0xe157 & vrD & vrJ & vrK { vrD = vmsub.d(vrD, vrJ, vrK); } define pcodeop vmaddwev.h.b; #lsx.txt vmaddwev.h.b mask=0x70ac0000 #0x70ac0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwev.h.b vrD, vrJ, vrK is op15_31=0xe158 & vrD & vrJ & vrK { vrD = vmaddwev.h.b(vrD, vrJ, vrK); } define pcodeop vmaddwev.w.h; #lsx.txt vmaddwev.w.h mask=0x70ac8000 #0x70ac8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwev.w.h vrD, vrJ, vrK is op15_31=0xe159 & vrD & vrJ & vrK { vrD = vmaddwev.w.h(vrD, vrJ, vrK); } define pcodeop vmaddwev.d.w; #lsx.txt vmaddwev.d.w mask=0x70ad0000 #0x70ad0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwev.d.w vrD, vrJ, vrK is op15_31=0xe15a & vrD & vrJ & vrK { vrD = vmaddwev.d.w(vrD, vrJ, vrK); } define pcodeop vmaddwev.q.d; #lsx.txt vmaddwev.q.d mask=0x70ad8000 #0x70ad8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwev.q.d vrD, vrJ, vrK is op15_31=0xe15b & vrD & vrJ & vrK { vrD = vmaddwev.q.d(vrD, vrJ, vrK); } define pcodeop vmaddwod.h.b; #lsx.txt vmaddwod.h.b mask=0x70ae0000 #0x70ae0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwod.h.b vrD, vrJ, vrK is op15_31=0xe15c & vrD & vrJ & vrK { vrD = vmaddwod.h.b(vrD, vrJ, vrK); } define pcodeop vmaddwod.w.h; #lsx.txt vmaddwod.w.h mask=0x70ae8000 #0x70ae8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwod.w.h vrD, vrJ, vrK is op15_31=0xe15d & vrD & vrJ & vrK { vrD = vmaddwod.w.h(vrD, vrJ, vrK); } define pcodeop vmaddwod.d.w; #lsx.txt vmaddwod.d.w mask=0x70af0000 #0x70af0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwod.d.w vrD, vrJ, vrK is op15_31=0xe15e & vrD & vrJ & vrK { vrD = vmaddwod.d.w(vrD, vrJ, vrK); } define pcodeop vmaddwod.q.d; #lsx.txt vmaddwod.q.d mask=0x70af8000 #0x70af8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwod.q.d vrD, vrJ, vrK is op15_31=0xe15f & vrD & vrJ & vrK { vrD = vmaddwod.q.d(vrD, vrJ, vrK); } define pcodeop vmaddwev.h.bu; #lsx.txt vmaddwev.h.bu mask=0x70b40000 #0x70b40000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwev.h.bu vrD, vrJ, vrK is op15_31=0xe168 & vrD & vrJ & vrK { vrD = vmaddwev.h.bu(vrD, vrJ, vrK); } define pcodeop vmaddwev.w.hu; #lsx.txt vmaddwev.w.hu mask=0x70b48000 #0x70b48000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwev.w.hu vrD, vrJ, vrK is op15_31=0xe169 & vrD & vrJ & vrK { vrD = vmaddwev.w.hu(vrD, vrJ, vrK); } define pcodeop vmaddwev.d.wu; #lsx.txt vmaddwev.d.wu mask=0x70b50000 #0x70b50000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwev.d.wu vrD, vrJ, vrK is op15_31=0xe16a & vrD & vrJ & vrK { vrD = vmaddwev.d.wu(vrD, vrJ, vrK); } define pcodeop vmaddwev.q.du; #lsx.txt vmaddwev.q.du mask=0x70b58000 #0x70b58000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwev.q.du vrD, vrJ, vrK is op15_31=0xe16b & vrD & vrJ & vrK { vrD = vmaddwev.q.du(vrD, vrJ, vrK); } define pcodeop vmaddwod.h.bu; #lsx.txt vmaddwod.h.bu mask=0x70b60000 #0x70b60000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwod.h.bu vrD, vrJ, vrK is op15_31=0xe16c & vrD & vrJ & vrK { vrD = vmaddwod.h.bu(vrD, vrJ, vrK); } define pcodeop vmaddwod.w.hu; #lsx.txt vmaddwod.w.hu mask=0x70b68000 #0x70b68000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwod.w.hu vrD, vrJ, vrK is op15_31=0xe16d & vrD & vrJ & vrK { vrD = vmaddwod.w.hu(vrD, vrJ, vrK); } define pcodeop vmaddwod.d.wu; #lsx.txt vmaddwod.d.wu mask=0x70b70000 #0x70b70000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwod.d.wu vrD, vrJ, vrK is op15_31=0xe16e & vrD & vrJ & vrK { vrD = vmaddwod.d.wu(vrD, vrJ, vrK); } define pcodeop vmaddwod.q.du; #lsx.txt vmaddwod.q.du mask=0x70b78000 #0x70b78000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwod.q.du vrD, vrJ, vrK is op15_31=0xe16f & vrD & vrJ & vrK { vrD = vmaddwod.q.du(vrD, vrJ, vrK); } define pcodeop vmaddwev.h.bu.b; #lsx.txt vmaddwev.h.bu.b mask=0x70bc0000 #0x70bc0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwev.h.bu.b vrD, vrJ, vrK is op15_31=0xe178 & vrD & vrJ & vrK { vrD = vmaddwev.h.bu.b(vrD, vrJ, vrK); } define pcodeop vmaddwev.w.hu.h; #lsx.txt vmaddwev.w.hu.h mask=0x70bc8000 #0x70bc8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwev.w.hu.h vrD, vrJ, vrK is op15_31=0xe179 & vrD & vrJ & vrK { vrD = vmaddwev.w.hu.h(vrD, vrJ, vrK); } define pcodeop vmaddwev.d.wu.w; #lsx.txt vmaddwev.d.wu.w mask=0x70bd0000 #0x70bd0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwev.d.wu.w vrD, vrJ, vrK is op15_31=0xe17a & vrD & vrJ & vrK { vrD = vmaddwev.d.wu.w(vrD, vrJ, vrK); } define pcodeop vmaddwev.q.du.d; #lsx.txt vmaddwev.q.du.d mask=0x70bd8000 #0x70bd8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwev.q.du.d vrD, vrJ, vrK is op15_31=0xe17b & vrD & vrJ & vrK { vrD = vmaddwev.q.du.d(vrD, vrJ, vrK); } define pcodeop vmaddwod.h.bu.b; #lsx.txt vmaddwod.h.bu.b mask=0x70be0000 #0x70be0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwod.h.bu.b vrD, vrJ, vrK is op15_31=0xe17c & vrD & vrJ & vrK { vrD = vmaddwod.h.bu.b(vrD, vrJ, vrK); } define pcodeop vmaddwod.w.hu.h; #lsx.txt vmaddwod.w.hu.h mask=0x70be8000 #0x70be8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwod.w.hu.h vrD, vrJ, vrK is op15_31=0xe17d & vrD & vrJ & vrK { vrD = vmaddwod.w.hu.h(vrD, vrJ, vrK); } define pcodeop vmaddwod.d.wu.w; #lsx.txt vmaddwod.d.wu.w mask=0x70bf0000 #0x70bf0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwod.d.wu.w vrD, vrJ, vrK is op15_31=0xe17e & vrD & vrJ & vrK { vrD = vmaddwod.d.wu.w(vrD, vrJ, vrK); } define pcodeop vmaddwod.q.du.d; #lsx.txt vmaddwod.q.du.d mask=0x70bf8000 #0x70bf8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmaddwod.q.du.d vrD, vrJ, vrK is op15_31=0xe17f & vrD & vrJ & vrK { vrD = vmaddwod.q.du.d(vrD, vrJ, vrK); } define pcodeop vdiv.b; #lsx.txt vdiv.b mask=0x70e00000 #0x70e00000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vdiv.b vrD, vrJ, vrK is op15_31=0xe1c0 & vrD & vrJ & vrK { vrD = vdiv.b(vrD, vrJ, vrK); } define pcodeop vdiv.h; #lsx.txt vdiv.h mask=0x70e08000 #0x70e08000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vdiv.h vrD, vrJ, vrK is op15_31=0xe1c1 & vrD & vrJ & vrK { vrD = vdiv.h(vrD, vrJ, vrK); } define pcodeop vdiv.w; #lsx.txt vdiv.w mask=0x70e10000 #0x70e10000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vdiv.w vrD, vrJ, vrK is op15_31=0xe1c2 & vrD & vrJ & vrK { vrD = vdiv.w(vrD, vrJ, vrK); } define pcodeop vdiv.d; #lsx.txt vdiv.d mask=0x70e18000 #0x70e18000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vdiv.d vrD, vrJ, vrK is op15_31=0xe1c3 & vrD & vrJ & vrK { vrD = vdiv.d(vrD, vrJ, vrK); } define pcodeop vmod.b; #lsx.txt vmod.b mask=0x70e20000 #0x70e20000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmod.b vrD, vrJ, vrK is op15_31=0xe1c4 & vrD & vrJ & vrK { vrD = vmod.b(vrD, vrJ, vrK); } define pcodeop vmod.h; #lsx.txt vmod.h mask=0x70e28000 #0x70e28000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmod.h vrD, vrJ, vrK is op15_31=0xe1c5 & vrD & vrJ & vrK { vrD = vmod.h(vrD, vrJ, vrK); } define pcodeop vmod.w; #lsx.txt vmod.w mask=0x70e30000 #0x70e30000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmod.w vrD, vrJ, vrK is op15_31=0xe1c6 & vrD & vrJ & vrK { vrD = vmod.w(vrD, vrJ, vrK); } define pcodeop vmod.d; #lsx.txt vmod.d mask=0x70e38000 #0x70e38000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmod.d vrD, vrJ, vrK is op15_31=0xe1c7 & vrD & vrJ & vrK { vrD = vmod.d(vrD, vrJ, vrK); } define pcodeop vdiv.bu; #lsx.txt vdiv.bu mask=0x70e40000 #0x70e40000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vdiv.bu vrD, vrJ, vrK is op15_31=0xe1c8 & vrD & vrJ & vrK { vrD = vdiv.bu(vrD, vrJ, vrK); } define pcodeop vdiv.hu; #lsx.txt vdiv.hu mask=0x70e48000 #0x70e48000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vdiv.hu vrD, vrJ, vrK is op15_31=0xe1c9 & vrD & vrJ & vrK { vrD = vdiv.hu(vrD, vrJ, vrK); } define pcodeop vdiv.wu; #lsx.txt vdiv.wu mask=0x70e50000 #0x70e50000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vdiv.wu vrD, vrJ, vrK is op15_31=0xe1ca & vrD & vrJ & vrK { vrD = vdiv.wu(vrD, vrJ, vrK); } define pcodeop vdiv.du; #lsx.txt vdiv.du mask=0x70e58000 #0x70e58000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vdiv.du vrD, vrJ, vrK is op15_31=0xe1cb & vrD & vrJ & vrK { vrD = vdiv.du(vrD, vrJ, vrK); } define pcodeop vmod.bu; #lsx.txt vmod.bu mask=0x70e60000 #0x70e60000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmod.bu vrD, vrJ, vrK is op15_31=0xe1cc & vrD & vrJ & vrK { vrD = vmod.bu(vrD, vrJ, vrK); } define pcodeop vmod.hu; #lsx.txt vmod.hu mask=0x70e68000 #0x70e68000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmod.hu vrD, vrJ, vrK is op15_31=0xe1cd & vrD & vrJ & vrK { vrD = vmod.hu(vrD, vrJ, vrK); } define pcodeop vmod.wu; #lsx.txt vmod.wu mask=0x70e70000 #0x70e70000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmod.wu vrD, vrJ, vrK is op15_31=0xe1ce & vrD & vrJ & vrK { vrD = vmod.wu(vrD, vrJ, vrK); } define pcodeop vmod.du; #lsx.txt vmod.du mask=0x70e78000 #0x70e78000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vmod.du vrD, vrJ, vrK is op15_31=0xe1cf & vrD & vrJ & vrK { vrD = vmod.du(vrD, vrJ, vrK); } define pcodeop vsll.b; #lsx.txt vsll.b mask=0x70e80000 #0x70e80000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsll.b vrD, vrJ, vrK is op15_31=0xe1d0 & vrD & vrJ & vrK { vrD = vsll.b(vrD, vrJ, vrK); } define pcodeop vsll.h; #lsx.txt vsll.h mask=0x70e88000 #0x70e88000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsll.h vrD, vrJ, vrK is op15_31=0xe1d1 & vrD & vrJ & vrK { vrD = vsll.h(vrD, vrJ, vrK); } define pcodeop vsll.w; #lsx.txt vsll.w mask=0x70e90000 #0x70e90000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsll.w vrD, vrJ, vrK is op15_31=0xe1d2 & vrD & vrJ & vrK { vrD = vsll.w(vrD, vrJ, vrK); } define pcodeop vsll.d; #lsx.txt vsll.d mask=0x70e98000 #0x70e98000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsll.d vrD, vrJ, vrK is op15_31=0xe1d3 & vrD & vrJ & vrK { vrD = vsll.d(vrD, vrJ, vrK); } define pcodeop vsrl.b; #lsx.txt vsrl.b mask=0x70ea0000 #0x70ea0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrl.b vrD, vrJ, vrK is op15_31=0xe1d4 & vrD & vrJ & vrK { vrD = vsrl.b(vrD, vrJ, vrK); } define pcodeop vsrl.h; #lsx.txt vsrl.h mask=0x70ea8000 #0x70ea8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrl.h vrD, vrJ, vrK is op15_31=0xe1d5 & vrD & vrJ & vrK { vrD = vsrl.h(vrD, vrJ, vrK); } define pcodeop vsrl.w; #lsx.txt vsrl.w mask=0x70eb0000 #0x70eb0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrl.w vrD, vrJ, vrK is op15_31=0xe1d6 & vrD & vrJ & vrK { vrD = vsrl.w(vrD, vrJ, vrK); } define pcodeop vsrl.d; #lsx.txt vsrl.d mask=0x70eb8000 #0x70eb8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrl.d vrD, vrJ, vrK is op15_31=0xe1d7 & vrD & vrJ & vrK { vrD = vsrl.d(vrD, vrJ, vrK); } define pcodeop vsra.b; #lsx.txt vsra.b mask=0x70ec0000 #0x70ec0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsra.b vrD, vrJ, vrK is op15_31=0xe1d8 & vrD & vrJ & vrK { vrD = vsra.b(vrD, vrJ, vrK); } define pcodeop vsra.h; #lsx.txt vsra.h mask=0x70ec8000 #0x70ec8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsra.h vrD, vrJ, vrK is op15_31=0xe1d9 & vrD & vrJ & vrK { vrD = vsra.h(vrD, vrJ, vrK); } define pcodeop vsra.w; #lsx.txt vsra.w mask=0x70ed0000 #0x70ed0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsra.w vrD, vrJ, vrK is op15_31=0xe1da & vrD & vrJ & vrK { vrD = vsra.w(vrD, vrJ, vrK); } define pcodeop vsra.d; #lsx.txt vsra.d mask=0x70ed8000 #0x70ed8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsra.d vrD, vrJ, vrK is op15_31=0xe1db & vrD & vrJ & vrK { vrD = vsra.d(vrD, vrJ, vrK); } define pcodeop vrotr.b; #lsx.txt vrotr.b mask=0x70ee0000 #0x70ee0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vrotr.b vrD, vrJ, vrK is op15_31=0xe1dc & vrD & vrJ & vrK { vrD = vrotr.b(vrD, vrJ, vrK); } define pcodeop vrotr.h; #lsx.txt vrotr.h mask=0x70ee8000 #0x70ee8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vrotr.h vrD, vrJ, vrK is op15_31=0xe1dd & vrD & vrJ & vrK { vrD = vrotr.h(vrD, vrJ, vrK); } define pcodeop vrotr.w; #lsx.txt vrotr.w mask=0x70ef0000 #0x70ef0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vrotr.w vrD, vrJ, vrK is op15_31=0xe1de & vrD & vrJ & vrK { vrD = vrotr.w(vrD, vrJ, vrK); } define pcodeop vrotr.d; #lsx.txt vrotr.d mask=0x70ef8000 #0x70ef8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vrotr.d vrD, vrJ, vrK is op15_31=0xe1df & vrD & vrJ & vrK { vrD = vrotr.d(vrD, vrJ, vrK); } define pcodeop vsrlr.b; #lsx.txt vsrlr.b mask=0x70f00000 #0x70f00000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrlr.b vrD, vrJ, vrK is op15_31=0xe1e0 & vrD & vrJ & vrK { vrD = vsrlr.b(vrD, vrJ, vrK); } define pcodeop vsrlr.h; #lsx.txt vsrlr.h mask=0x70f08000 #0x70f08000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrlr.h vrD, vrJ, vrK is op15_31=0xe1e1 & vrD & vrJ & vrK { vrD = vsrlr.h(vrD, vrJ, vrK); } define pcodeop vsrlr.w; #lsx.txt vsrlr.w mask=0x70f10000 #0x70f10000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrlr.w vrD, vrJ, vrK is op15_31=0xe1e2 & vrD & vrJ & vrK { vrD = vsrlr.w(vrD, vrJ, vrK); } define pcodeop vsrlr.d; #lsx.txt vsrlr.d mask=0x70f18000 #0x70f18000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrlr.d vrD, vrJ, vrK is op15_31=0xe1e3 & vrD & vrJ & vrK { vrD = vsrlr.d(vrD, vrJ, vrK); } define pcodeop vsrar.b; #lsx.txt vsrar.b mask=0x70f20000 #0x70f20000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrar.b vrD, vrJ, vrK is op15_31=0xe1e4 & vrD & vrJ & vrK { vrD = vsrar.b(vrD, vrJ, vrK); } define pcodeop vsrar.h; #lsx.txt vsrar.h mask=0x70f28000 #0x70f28000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrar.h vrD, vrJ, vrK is op15_31=0xe1e5 & vrD & vrJ & vrK { vrD = vsrar.h(vrD, vrJ, vrK); } define pcodeop vsrar.w; #lsx.txt vsrar.w mask=0x70f30000 #0x70f30000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrar.w vrD, vrJ, vrK is op15_31=0xe1e6 & vrD & vrJ & vrK { vrD = vsrar.w(vrD, vrJ, vrK); } define pcodeop vsrar.d; #lsx.txt vsrar.d mask=0x70f38000 #0x70f38000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrar.d vrD, vrJ, vrK is op15_31=0xe1e7 & vrD & vrJ & vrK { vrD = vsrar.d(vrD, vrJ, vrK); } define pcodeop vsrln.b.h; #lsx.txt vsrln.b.h mask=0x70f48000 #0x70f48000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrln.b.h vrD, vrJ, vrK is op15_31=0xe1e9 & vrD & vrJ & vrK { vrD = vsrln.b.h(vrD, vrJ, vrK); } define pcodeop vsrln.h.w; #lsx.txt vsrln.h.w mask=0x70f50000 #0x70f50000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrln.h.w vrD, vrJ, vrK is op15_31=0xe1ea & vrD & vrJ & vrK { vrD = vsrln.h.w(vrD, vrJ, vrK); } define pcodeop vsrln.w.d; #lsx.txt vsrln.w.d mask=0x70f58000 #0x70f58000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrln.w.d vrD, vrJ, vrK is op15_31=0xe1eb & vrD & vrJ & vrK { vrD = vsrln.w.d(vrD, vrJ, vrK); } define pcodeop vsran.b.h; #lsx.txt vsran.b.h mask=0x70f68000 #0x70f68000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsran.b.h vrD, vrJ, vrK is op15_31=0xe1ed & vrD & vrJ & vrK { vrD = vsran.b.h(vrD, vrJ, vrK); } define pcodeop vsran.h.w; #lsx.txt vsran.h.w mask=0x70f70000 #0x70f70000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsran.h.w vrD, vrJ, vrK is op15_31=0xe1ee & vrD & vrJ & vrK { vrD = vsran.h.w(vrD, vrJ, vrK); } define pcodeop vsran.w.d; #lsx.txt vsran.w.d mask=0x70f78000 #0x70f78000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsran.w.d vrD, vrJ, vrK is op15_31=0xe1ef & vrD & vrJ & vrK { vrD = vsran.w.d(vrD, vrJ, vrK); } define pcodeop vsrlrn.b.h; #lsx.txt vsrlrn.b.h mask=0x70f88000 #0x70f88000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrlrn.b.h vrD, vrJ, vrK is op15_31=0xe1f1 & vrD & vrJ & vrK { vrD = vsrlrn.b.h(vrD, vrJ, vrK); } define pcodeop vsrlrn.h.w; #lsx.txt vsrlrn.h.w mask=0x70f90000 #0x70f90000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrlrn.h.w vrD, vrJ, vrK is op15_31=0xe1f2 & vrD & vrJ & vrK { vrD = vsrlrn.h.w(vrD, vrJ, vrK); } define pcodeop vsrlrn.w.d; #lsx.txt vsrlrn.w.d mask=0x70f98000 #0x70f98000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrlrn.w.d vrD, vrJ, vrK is op15_31=0xe1f3 & vrD & vrJ & vrK { vrD = vsrlrn.w.d(vrD, vrJ, vrK); } define pcodeop vsrarn.b.h; #lsx.txt vsrarn.b.h mask=0x70fa8000 #0x70fa8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrarn.b.h vrD, vrJ, vrK is op15_31=0xe1f5 & vrD & vrJ & vrK { vrD = vsrarn.b.h(vrD, vrJ, vrK); } define pcodeop vsrarn.h.w; #lsx.txt vsrarn.h.w mask=0x70fb0000 #0x70fb0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrarn.h.w vrD, vrJ, vrK is op15_31=0xe1f6 & vrD & vrJ & vrK { vrD = vsrarn.h.w(vrD, vrJ, vrK); } define pcodeop vsrarn.w.d; #lsx.txt vsrarn.w.d mask=0x70fb8000 #0x70fb8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsrarn.w.d vrD, vrJ, vrK is op15_31=0xe1f7 & vrD & vrJ & vrK { vrD = vsrarn.w.d(vrD, vrJ, vrK); } define pcodeop vssrln.b.h; #lsx.txt vssrln.b.h mask=0x70fc8000 #0x70fc8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrln.b.h vrD, vrJ, vrK is op15_31=0xe1f9 & vrD & vrJ & vrK { vrD = vssrln.b.h(vrD, vrJ, vrK); } define pcodeop vssrln.h.w; #lsx.txt vssrln.h.w mask=0x70fd0000 #0x70fd0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrln.h.w vrD, vrJ, vrK is op15_31=0xe1fa & vrD & vrJ & vrK { vrD = vssrln.h.w(vrD, vrJ, vrK); } define pcodeop vssrln.w.d; #lsx.txt vssrln.w.d mask=0x70fd8000 #0x70fd8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrln.w.d vrD, vrJ, vrK is op15_31=0xe1fb & vrD & vrJ & vrK { vrD = vssrln.w.d(vrD, vrJ, vrK); } define pcodeop vssran.b.h; #lsx.txt vssran.b.h mask=0x70fe8000 #0x70fe8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssran.b.h vrD, vrJ, vrK is op15_31=0xe1fd & vrD & vrJ & vrK { vrD = vssran.b.h(vrD, vrJ, vrK); } define pcodeop vssran.h.w; #lsx.txt vssran.h.w mask=0x70ff0000 #0x70ff0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssran.h.w vrD, vrJ, vrK is op15_31=0xe1fe & vrD & vrJ & vrK { vrD = vssran.h.w(vrD, vrJ, vrK); } define pcodeop vssran.w.d; #lsx.txt vssran.w.d mask=0x70ff8000 #0x70ff8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssran.w.d vrD, vrJ, vrK is op15_31=0xe1ff & vrD & vrJ & vrK { vrD = vssran.w.d(vrD, vrJ, vrK); } define pcodeop vssrlrn.b.h; #lsx.txt vssrlrn.b.h mask=0x71008000 #0x71008000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrlrn.b.h vrD, vrJ, vrK is op15_31=0xe201 & vrD & vrJ & vrK { vrD = vssrlrn.b.h(vrD, vrJ, vrK); } define pcodeop vssrlrn.h.w; #lsx.txt vssrlrn.h.w mask=0x71010000 #0x71010000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrlrn.h.w vrD, vrJ, vrK is op15_31=0xe202 & vrD & vrJ & vrK { vrD = vssrlrn.h.w(vrD, vrJ, vrK); } define pcodeop vssrlrn.w.d; #lsx.txt vssrlrn.w.d mask=0x71018000 #0x71018000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrlrn.w.d vrD, vrJ, vrK is op15_31=0xe203 & vrD & vrJ & vrK { vrD = vssrlrn.w.d(vrD, vrJ, vrK); } define pcodeop vssrarn.b.h; #lsx.txt vssrarn.b.h mask=0x71028000 #0x71028000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrarn.b.h vrD, vrJ, vrK is op15_31=0xe205 & vrD & vrJ & vrK { vrD = vssrarn.b.h(vrD, vrJ, vrK); } define pcodeop vssrarn.h.w; #lsx.txt vssrarn.h.w mask=0x71030000 #0x71030000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrarn.h.w vrD, vrJ, vrK is op15_31=0xe206 & vrD & vrJ & vrK { vrD = vssrarn.h.w(vrD, vrJ, vrK); } define pcodeop vssrarn.w.d; #lsx.txt vssrarn.w.d mask=0x71038000 #0x71038000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrarn.w.d vrD, vrJ, vrK is op15_31=0xe207 & vrD & vrJ & vrK { vrD = vssrarn.w.d(vrD, vrJ, vrK); } define pcodeop vssrln.bu.h; #lsx.txt vssrln.bu.h mask=0x71048000 #0x71048000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrln.bu.h vrD, vrJ, vrK is op15_31=0xe209 & vrD & vrJ & vrK { vrD = vssrln.bu.h(vrD, vrJ, vrK); } define pcodeop vssrln.hu.w; #lsx.txt vssrln.hu.w mask=0x71050000 #0x71050000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrln.hu.w vrD, vrJ, vrK is op15_31=0xe20a & vrD & vrJ & vrK { vrD = vssrln.hu.w(vrD, vrJ, vrK); } define pcodeop vssrln.wu.d; #lsx.txt vssrln.wu.d mask=0x71058000 #0x71058000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrln.wu.d vrD, vrJ, vrK is op15_31=0xe20b & vrD & vrJ & vrK { vrD = vssrln.wu.d(vrD, vrJ, vrK); } define pcodeop vssran.bu.h; #lsx.txt vssran.bu.h mask=0x71068000 #0x71068000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssran.bu.h vrD, vrJ, vrK is op15_31=0xe20d & vrD & vrJ & vrK { vrD = vssran.bu.h(vrD, vrJ, vrK); } define pcodeop vssran.hu.w; #lsx.txt vssran.hu.w mask=0x71070000 #0x71070000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssran.hu.w vrD, vrJ, vrK is op15_31=0xe20e & vrD & vrJ & vrK { vrD = vssran.hu.w(vrD, vrJ, vrK); } define pcodeop vssran.wu.d; #lsx.txt vssran.wu.d mask=0x71078000 #0x71078000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssran.wu.d vrD, vrJ, vrK is op15_31=0xe20f & vrD & vrJ & vrK { vrD = vssran.wu.d(vrD, vrJ, vrK); } define pcodeop vssrlrn.bu.h; #lsx.txt vssrlrn.bu.h mask=0x71088000 #0x71088000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrlrn.bu.h vrD, vrJ, vrK is op15_31=0xe211 & vrD & vrJ & vrK { vrD = vssrlrn.bu.h(vrD, vrJ, vrK); } define pcodeop vssrlrn.hu.w; #lsx.txt vssrlrn.hu.w mask=0x71090000 #0x71090000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrlrn.hu.w vrD, vrJ, vrK is op15_31=0xe212 & vrD & vrJ & vrK { vrD = vssrlrn.hu.w(vrD, vrJ, vrK); } define pcodeop vssrlrn.wu.d; #lsx.txt vssrlrn.wu.d mask=0x71098000 #0x71098000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrlrn.wu.d vrD, vrJ, vrK is op15_31=0xe213 & vrD & vrJ & vrK { vrD = vssrlrn.wu.d(vrD, vrJ, vrK); } define pcodeop vssrarn.bu.h; #lsx.txt vssrarn.bu.h mask=0x710a8000 #0x710a8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrarn.bu.h vrD, vrJ, vrK is op15_31=0xe215 & vrD & vrJ & vrK { vrD = vssrarn.bu.h(vrD, vrJ, vrK); } define pcodeop vssrarn.hu.w; #lsx.txt vssrarn.hu.w mask=0x710b0000 #0x710b0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrarn.hu.w vrD, vrJ, vrK is op15_31=0xe216 & vrD & vrJ & vrK { vrD = vssrarn.hu.w(vrD, vrJ, vrK); } define pcodeop vssrarn.wu.d; #lsx.txt vssrarn.wu.d mask=0x710b8000 #0x710b8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vssrarn.wu.d vrD, vrJ, vrK is op15_31=0xe217 & vrD & vrJ & vrK { vrD = vssrarn.wu.d(vrD, vrJ, vrK); } define pcodeop vbitclr.b; #lsx.txt vbitclr.b mask=0x710c0000 #0x710c0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vbitclr.b vrD, vrJ, vrK is op15_31=0xe218 & vrD & vrJ & vrK { vrD = vbitclr.b(vrD, vrJ, vrK); } define pcodeop vbitclr.h; #lsx.txt vbitclr.h mask=0x710c8000 #0x710c8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vbitclr.h vrD, vrJ, vrK is op15_31=0xe219 & vrD & vrJ & vrK { vrD = vbitclr.h(vrD, vrJ, vrK); } define pcodeop vbitclr.w; #lsx.txt vbitclr.w mask=0x710d0000 #0x710d0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vbitclr.w vrD, vrJ, vrK is op15_31=0xe21a & vrD & vrJ & vrK { vrD = vbitclr.w(vrD, vrJ, vrK); } define pcodeop vbitclr.d; #lsx.txt vbitclr.d mask=0x710d8000 #0x710d8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vbitclr.d vrD, vrJ, vrK is op15_31=0xe21b & vrD & vrJ & vrK { vrD = vbitclr.d(vrD, vrJ, vrK); } define pcodeop vbitset.b; #lsx.txt vbitset.b mask=0x710e0000 #0x710e0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vbitset.b vrD, vrJ, vrK is op15_31=0xe21c & vrD & vrJ & vrK { vrD = vbitset.b(vrD, vrJ, vrK); } define pcodeop vbitset.h; #lsx.txt vbitset.h mask=0x710e8000 #0x710e8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vbitset.h vrD, vrJ, vrK is op15_31=0xe21d & vrD & vrJ & vrK { vrD = vbitset.h(vrD, vrJ, vrK); } define pcodeop vbitset.w; #lsx.txt vbitset.w mask=0x710f0000 #0x710f0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vbitset.w vrD, vrJ, vrK is op15_31=0xe21e & vrD & vrJ & vrK { vrD = vbitset.w(vrD, vrJ, vrK); } define pcodeop vbitset.d; #lsx.txt vbitset.d mask=0x710f8000 #0x710f8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vbitset.d vrD, vrJ, vrK is op15_31=0xe21f & vrD & vrJ & vrK { vrD = vbitset.d(vrD, vrJ, vrK); } define pcodeop vbitrev.b; #lsx.txt vbitrev.b mask=0x71100000 #0x71100000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vbitrev.b vrD, vrJ, vrK is op15_31=0xe220 & vrD & vrJ & vrK { vrD = vbitrev.b(vrD, vrJ, vrK); } define pcodeop vbitrev.h; #lsx.txt vbitrev.h mask=0x71108000 #0x71108000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vbitrev.h vrD, vrJ, vrK is op15_31=0xe221 & vrD & vrJ & vrK { vrD = vbitrev.h(vrD, vrJ, vrK); } define pcodeop vbitrev.w; #lsx.txt vbitrev.w mask=0x71110000 #0x71110000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vbitrev.w vrD, vrJ, vrK is op15_31=0xe222 & vrD & vrJ & vrK { vrD = vbitrev.w(vrD, vrJ, vrK); } define pcodeop vbitrev.d; #lsx.txt vbitrev.d mask=0x71118000 #0x71118000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vbitrev.d vrD, vrJ, vrK is op15_31=0xe223 & vrD & vrJ & vrK { vrD = vbitrev.d(vrD, vrJ, vrK); } define pcodeop vpackev.b; #lsx.txt vpackev.b mask=0x71160000 #0x71160000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpackev.b vrD, vrJ, vrK is op15_31=0xe22c & vrD & vrJ & vrK { vrD = vpackev.b(vrD, vrJ, vrK); } define pcodeop vpackev.h; #lsx.txt vpackev.h mask=0x71168000 #0x71168000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpackev.h vrD, vrJ, vrK is op15_31=0xe22d & vrD & vrJ & vrK { vrD = vpackev.h(vrD, vrJ, vrK); } define pcodeop vpackev.w; #lsx.txt vpackev.w mask=0x71170000 #0x71170000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpackev.w vrD, vrJ, vrK is op15_31=0xe22e & vrD & vrJ & vrK { vrD = vpackev.w(vrD, vrJ, vrK); } define pcodeop vpackev.d; #lsx.txt vpackev.d mask=0x71178000 #0x71178000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpackev.d vrD, vrJ, vrK is op15_31=0xe22f & vrD & vrJ & vrK { vrD = vpackev.d(vrD, vrJ, vrK); } define pcodeop vpackod.b; #lsx.txt vpackod.b mask=0x71180000 #0x71180000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpackod.b vrD, vrJ, vrK is op15_31=0xe230 & vrD & vrJ & vrK { vrD = vpackod.b(vrD, vrJ, vrK); } define pcodeop vpackod.h; #lsx.txt vpackod.h mask=0x71188000 #0x71188000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpackod.h vrD, vrJ, vrK is op15_31=0xe231 & vrD & vrJ & vrK { vrD = vpackod.h(vrD, vrJ, vrK); } define pcodeop vpackod.w; #lsx.txt vpackod.w mask=0x71190000 #0x71190000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpackod.w vrD, vrJ, vrK is op15_31=0xe232 & vrD & vrJ & vrK { vrD = vpackod.w(vrD, vrJ, vrK); } define pcodeop vpackod.d; #lsx.txt vpackod.d mask=0x71198000 #0x71198000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpackod.d vrD, vrJ, vrK is op15_31=0xe233 & vrD & vrJ & vrK { vrD = vpackod.d(vrD, vrJ, vrK); } define pcodeop vilvl.b; #lsx.txt vilvl.b mask=0x711a0000 #0x711a0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vilvl.b vrD, vrJ, vrK is op15_31=0xe234 & vrD & vrJ & vrK { vrD = vilvl.b(vrD, vrJ, vrK); } define pcodeop vilvl.h; #lsx.txt vilvl.h mask=0x711a8000 #0x711a8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vilvl.h vrD, vrJ, vrK is op15_31=0xe235 & vrD & vrJ & vrK { vrD = vilvl.h(vrD, vrJ, vrK); } define pcodeop vilvl.w; #lsx.txt vilvl.w mask=0x711b0000 #0x711b0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vilvl.w vrD, vrJ, vrK is op15_31=0xe236 & vrD & vrJ & vrK { vrD = vilvl.w(vrD, vrJ, vrK); } define pcodeop vilvl.d; #lsx.txt vilvl.d mask=0x711b8000 #0x711b8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vilvl.d vrD, vrJ, vrK is op15_31=0xe237 & vrD & vrJ & vrK { vrD = vilvl.d(vrD, vrJ, vrK); } define pcodeop vilvh.b; #lsx.txt vilvh.b mask=0x711c0000 #0x711c0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vilvh.b vrD, vrJ, vrK is op15_31=0xe238 & vrD & vrJ & vrK { vrD = vilvh.b(vrD, vrJ, vrK); } define pcodeop vilvh.h; #lsx.txt vilvh.h mask=0x711c8000 #0x711c8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vilvh.h vrD, vrJ, vrK is op15_31=0xe239 & vrD & vrJ & vrK { vrD = vilvh.h(vrD, vrJ, vrK); } define pcodeop vilvh.w; #lsx.txt vilvh.w mask=0x711d0000 #0x711d0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vilvh.w vrD, vrJ, vrK is op15_31=0xe23a & vrD & vrJ & vrK { vrD = vilvh.w(vrD, vrJ, vrK); } define pcodeop vilvh.d; #lsx.txt vilvh.d mask=0x711d8000 #0x711d8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vilvh.d vrD, vrJ, vrK is op15_31=0xe23b & vrD & vrJ & vrK { vrD = vilvh.d(vrD, vrJ, vrK); } define pcodeop vpickev.b; #lsx.txt vpickev.b mask=0x711e0000 #0x711e0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpickev.b vrD, vrJ, vrK is op15_31=0xe23c & vrD & vrJ & vrK { vrD = vpickev.b(vrD, vrJ, vrK); } define pcodeop vpickev.h; #lsx.txt vpickev.h mask=0x711e8000 #0x711e8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpickev.h vrD, vrJ, vrK is op15_31=0xe23d & vrD & vrJ & vrK { vrD = vpickev.h(vrD, vrJ, vrK); } define pcodeop vpickev.w; #lsx.txt vpickev.w mask=0x711f0000 #0x711f0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpickev.w vrD, vrJ, vrK is op15_31=0xe23e & vrD & vrJ & vrK { vrD = vpickev.w(vrD, vrJ, vrK); } define pcodeop vpickev.d; #lsx.txt vpickev.d mask=0x711f8000 #0x711f8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpickev.d vrD, vrJ, vrK is op15_31=0xe23f & vrD & vrJ & vrK { vrD = vpickev.d(vrD, vrJ, vrK); } define pcodeop vpickod.b; #lsx.txt vpickod.b mask=0x71200000 #0x71200000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpickod.b vrD, vrJ, vrK is op15_31=0xe240 & vrD & vrJ & vrK { vrD = vpickod.b(vrD, vrJ, vrK); } define pcodeop vpickod.h; #lsx.txt vpickod.h mask=0x71208000 #0x71208000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpickod.h vrD, vrJ, vrK is op15_31=0xe241 & vrD & vrJ & vrK { vrD = vpickod.h(vrD, vrJ, vrK); } define pcodeop vpickod.w; #lsx.txt vpickod.w mask=0x71210000 #0x71210000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpickod.w vrD, vrJ, vrK is op15_31=0xe242 & vrD & vrJ & vrK { vrD = vpickod.w(vrD, vrJ, vrK); } define pcodeop vpickod.d; #lsx.txt vpickod.d mask=0x71218000 #0x71218000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vpickod.d vrD, vrJ, vrK is op15_31=0xe243 & vrD & vrJ & vrK { vrD = vpickod.d(vrD, vrJ, vrK); } define pcodeop vreplve.b; #lsx.txt vreplve.b mask=0x71220000 #0x71220000 0xffff8000 v0:5,v5:5, r10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'reg10_5_s0'] :vreplve.b vrD, vrJ, RKsrc is op15_31=0xe244 & vrD & vrJ & RKsrc { vrD = vreplve.b(vrD, vrJ, RKsrc); } define pcodeop vreplve.h; #lsx.txt vreplve.h mask=0x71228000 #0x71228000 0xffff8000 v0:5,v5:5, r10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'reg10_5_s0'] :vreplve.h vrD, vrJ, RKsrc is op15_31=0xe245 & vrD & vrJ & RKsrc { vrD = vreplve.h(vrD, vrJ, RKsrc); } define pcodeop vreplve.w; #lsx.txt vreplve.w mask=0x71230000 #0x71230000 0xffff8000 v0:5,v5:5, r10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'reg10_5_s0'] :vreplve.w vrD, vrJ, RKsrc is op15_31=0xe246 & vrD & vrJ & RKsrc { vrD = vreplve.w(vrD, vrJ, RKsrc); } define pcodeop vreplve.d; #lsx.txt vreplve.d mask=0x71238000 #0x71238000 0xffff8000 v0:5,v5:5, r10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'reg10_5_s0'] :vreplve.d vrD, vrJ, RKsrc is op15_31=0xe247 & vrD & vrJ & RKsrc { vrD = vreplve.d(vrD, vrJ, RKsrc); } define pcodeop vand.v; #lsx.txt vand.v mask=0x71260000 #0x71260000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vand.v vrD, vrJ, vrK is op15_31=0xe24c & vrD & vrJ & vrK { vrD = vand.v(vrD, vrJ, vrK); } define pcodeop vor.v; #lsx.txt vor.v mask=0x71268000 #0x71268000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vor.v vrD, vrJ, vrK is op15_31=0xe24d & vrD & vrJ & vrK { vrD = vor.v(vrD, vrJ, vrK); } define pcodeop vxor.v; #lsx.txt vxor.v mask=0x71270000 #0x71270000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vxor.v vrD, vrJ, vrK is op15_31=0xe24e & vrD & vrJ & vrK { vrD = vxor.v(vrD, vrJ, vrK); } define pcodeop vnor.v; #lsx.txt vnor.v mask=0x71278000 #0x71278000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vnor.v vrD, vrJ, vrK is op15_31=0xe24f & vrD & vrJ & vrK { vrD = vnor.v(vrD, vrJ, vrK); } define pcodeop vandn.v; #lsx.txt vandn.v mask=0x71280000 #0x71280000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vandn.v vrD, vrJ, vrK is op15_31=0xe250 & vrD & vrJ & vrK { vrD = vandn.v(vrD, vrJ, vrK); } define pcodeop vorn.v; #lsx.txt vorn.v mask=0x71288000 #0x71288000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vorn.v vrD, vrJ, vrK is op15_31=0xe251 & vrD & vrJ & vrK { vrD = vorn.v(vrD, vrJ, vrK); } define pcodeop vfrstp.b; #lsx.txt vfrstp.b mask=0x712b0000 #0x712b0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfrstp.b vrD, vrJ, vrK is op15_31=0xe256 & vrD & vrJ & vrK { vrD = vfrstp.b(vrD, vrJ, vrK); } define pcodeop vfrstp.h; #lsx.txt vfrstp.h mask=0x712b8000 #0x712b8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfrstp.h vrD, vrJ, vrK is op15_31=0xe257 & vrD & vrJ & vrK { vrD = vfrstp.h(vrD, vrJ, vrK); } define pcodeop vadd.q; #lsx.txt vadd.q mask=0x712d0000 #0x712d0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vadd.q vrD, vrJ, vrK is op15_31=0xe25a & vrD & vrJ & vrK { vrD = vadd.q(vrD, vrJ, vrK); } define pcodeop vsub.q; #lsx.txt vsub.q mask=0x712d8000 #0x712d8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsub.q vrD, vrJ, vrK is op15_31=0xe25b & vrD & vrJ & vrK { vrD = vsub.q(vrD, vrJ, vrK); } define pcodeop vsigncov.b; #lsx.txt vsigncov.b mask=0x712e0000 #0x712e0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsigncov.b vrD, vrJ, vrK is op15_31=0xe25c & vrD & vrJ & vrK { vrD = vsigncov.b(vrD, vrJ, vrK); } define pcodeop vsigncov.h; #lsx.txt vsigncov.h mask=0x712e8000 #0x712e8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsigncov.h vrD, vrJ, vrK is op15_31=0xe25d & vrD & vrJ & vrK { vrD = vsigncov.h(vrD, vrJ, vrK); } define pcodeop vsigncov.w; #lsx.txt vsigncov.w mask=0x712f0000 #0x712f0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsigncov.w vrD, vrJ, vrK is op15_31=0xe25e & vrD & vrJ & vrK { vrD = vsigncov.w(vrD, vrJ, vrK); } define pcodeop vsigncov.d; #lsx.txt vsigncov.d mask=0x712f8000 #0x712f8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vsigncov.d vrD, vrJ, vrK is op15_31=0xe25f & vrD & vrJ & vrK { vrD = vsigncov.d(vrD, vrJ, vrK); } define pcodeop vfadd.s; #lsx.txt vfadd.s mask=0x71308000 #0x71308000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfadd.s vrD, vrJ, vrK is op15_31=0xe261 & vrD & vrJ & vrK { vrD = vfadd.s(vrD, vrJ, vrK); } define pcodeop vfadd.d; #lsx.txt vfadd.d mask=0x71310000 #0x71310000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfadd.d vrD, vrJ, vrK is op15_31=0xe262 & vrD & vrJ & vrK { vrD = vfadd.d(vrD, vrJ, vrK); } define pcodeop vfsub.s; #lsx.txt vfsub.s mask=0x71328000 #0x71328000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfsub.s vrD, vrJ, vrK is op15_31=0xe265 & vrD & vrJ & vrK { vrD = vfsub.s(vrD, vrJ, vrK); } define pcodeop vfsub.d; #lsx.txt vfsub.d mask=0x71330000 #0x71330000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfsub.d vrD, vrJ, vrK is op15_31=0xe266 & vrD & vrJ & vrK { vrD = vfsub.d(vrD, vrJ, vrK); } define pcodeop vfmul.s; #lsx.txt vfmul.s mask=0x71388000 #0x71388000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfmul.s vrD, vrJ, vrK is op15_31=0xe271 & vrD & vrJ & vrK { vrD = vfmul.s(vrD, vrJ, vrK); } define pcodeop vfmul.d; #lsx.txt vfmul.d mask=0x71390000 #0x71390000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfmul.d vrD, vrJ, vrK is op15_31=0xe272 & vrD & vrJ & vrK { vrD = vfmul.d(vrD, vrJ, vrK); } define pcodeop vfdiv.s; #lsx.txt vfdiv.s mask=0x713a8000 #0x713a8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfdiv.s vrD, vrJ, vrK is op15_31=0xe275 & vrD & vrJ & vrK { vrD = vfdiv.s(vrD, vrJ, vrK); } define pcodeop vfdiv.d; #lsx.txt vfdiv.d mask=0x713b0000 #0x713b0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfdiv.d vrD, vrJ, vrK is op15_31=0xe276 & vrD & vrJ & vrK { vrD = vfdiv.d(vrD, vrJ, vrK); } define pcodeop vfmax.s; #lsx.txt vfmax.s mask=0x713c8000 #0x713c8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfmax.s vrD, vrJ, vrK is op15_31=0xe279 & vrD & vrJ & vrK { vrD = vfmax.s(vrD, vrJ, vrK); } define pcodeop vfmax.d; #lsx.txt vfmax.d mask=0x713d0000 #0x713d0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfmax.d vrD, vrJ, vrK is op15_31=0xe27a & vrD & vrJ & vrK { vrD = vfmax.d(vrD, vrJ, vrK); } define pcodeop vfmin.s; #lsx.txt vfmin.s mask=0x713e8000 #0x713e8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfmin.s vrD, vrJ, vrK is op15_31=0xe27d & vrD & vrJ & vrK { vrD = vfmin.s(vrD, vrJ, vrK); } define pcodeop vfmin.d; #lsx.txt vfmin.d mask=0x713f0000 #0x713f0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfmin.d vrD, vrJ, vrK is op15_31=0xe27e & vrD & vrJ & vrK { vrD = vfmin.d(vrD, vrJ, vrK); } define pcodeop vfmaxa.s; #lsx.txt vfmaxa.s mask=0x71408000 #0x71408000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfmaxa.s vrD, vrJ, vrK is op15_31=0xe281 & vrD & vrJ & vrK { vrD = vfmaxa.s(vrD, vrJ, vrK); } define pcodeop vfmaxa.d; #lsx.txt vfmaxa.d mask=0x71410000 #0x71410000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfmaxa.d vrD, vrJ, vrK is op15_31=0xe282 & vrD & vrJ & vrK { vrD = vfmaxa.d(vrD, vrJ, vrK); } define pcodeop vfmina.s; #lsx.txt vfmina.s mask=0x71428000 #0x71428000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfmina.s vrD, vrJ, vrK is op15_31=0xe285 & vrD & vrJ & vrK { vrD = vfmina.s(vrD, vrJ, vrK); } define pcodeop vfmina.d; #lsx.txt vfmina.d mask=0x71430000 #0x71430000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfmina.d vrD, vrJ, vrK is op15_31=0xe286 & vrD & vrJ & vrK { vrD = vfmina.d(vrD, vrJ, vrK); } define pcodeop vfcvt.h.s; #lsx.txt vfcvt.h.s mask=0x71460000 #0x71460000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcvt.h.s vrD, vrJ, vrK is op15_31=0xe28c & vrD & vrJ & vrK { vrD = vfcvt.h.s(vrD, vrJ, vrK); } define pcodeop vfcvt.s.d; #lsx.txt vfcvt.s.d mask=0x71468000 #0x71468000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vfcvt.s.d vrD, vrJ, vrK is op15_31=0xe28d & vrD & vrJ & vrK { vrD = vfcvt.s.d(vrD, vrJ, vrK); } define pcodeop vffint.s.l; #lsx.txt vffint.s.l mask=0x71480000 #0x71480000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vffint.s.l vrD, vrJ, vrK is op15_31=0xe290 & vrD & vrJ & vrK { vrD = vffint.s.l(vrD, vrJ, vrK); } define pcodeop vftint.w.d; #lsx.txt vftint.w.d mask=0x71498000 #0x71498000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vftint.w.d vrD, vrJ, vrK is op15_31=0xe293 & vrD & vrJ & vrK { vrD = vftint.w.d(vrD, vrJ, vrK); } define pcodeop vftintrm.w.d; #lsx.txt vftintrm.w.d mask=0x714a0000 #0x714a0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vftintrm.w.d vrD, vrJ, vrK is op15_31=0xe294 & vrD & vrJ & vrK { vrD = vftintrm.w.d(vrD, vrJ, vrK); } define pcodeop vftintrp.w.d; #lsx.txt vftintrp.w.d mask=0x714a8000 #0x714a8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vftintrp.w.d vrD, vrJ, vrK is op15_31=0xe295 & vrD & vrJ & vrK { vrD = vftintrp.w.d(vrD, vrJ, vrK); } define pcodeop vftintrz.w.d; #lsx.txt vftintrz.w.d mask=0x714b0000 #0x714b0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vftintrz.w.d vrD, vrJ, vrK is op15_31=0xe296 & vrD & vrJ & vrK { vrD = vftintrz.w.d(vrD, vrJ, vrK); } define pcodeop vftintrne.w.d; #lsx.txt vftintrne.w.d mask=0x714b8000 #0x714b8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vftintrne.w.d vrD, vrJ, vrK is op15_31=0xe297 & vrD & vrJ & vrK { vrD = vftintrne.w.d(vrD, vrJ, vrK); } define pcodeop vshuf.h; #lsx.txt vshuf.h mask=0x717a8000 #0x717a8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vshuf.h vrD, vrJ, vrK is op15_31=0xe2f5 & vrD & vrJ & vrK { vrD = vshuf.h(vrD, vrJ, vrK); } define pcodeop vshuf.w; #lsx.txt vshuf.w mask=0x717b0000 #0x717b0000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vshuf.w vrD, vrJ, vrK is op15_31=0xe2f6 & vrD & vrJ & vrK { vrD = vshuf.w(vrD, vrJ, vrK); } define pcodeop vshuf.d; #lsx.txt vshuf.d mask=0x717b8000 #0x717b8000 0xffff8000 v0:5,v5:5,v10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'vreg10_5_s0'] :vshuf.d vrD, vrJ, vrK is op15_31=0xe2f7 & vrD & vrJ & vrK { vrD = vshuf.d(vrD, vrJ, vrK); } define pcodeop vseqi.b; #lsx.txt vseqi.b mask=0x72800000 #0x72800000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vseqi.b vrD, vrJ, simm10_5 is op15_31=0xe500 & vrD & vrJ & simm10_5 { vrD = vseqi.b(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vseqi.h; #lsx.txt vseqi.h mask=0x72808000 #0x72808000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vseqi.h vrD, vrJ, simm10_5 is op15_31=0xe501 & vrD & vrJ & simm10_5 { vrD = vseqi.h(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vseqi.w; #lsx.txt vseqi.w mask=0x72810000 #0x72810000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vseqi.w vrD, vrJ, simm10_5 is op15_31=0xe502 & vrD & vrJ & simm10_5 { vrD = vseqi.w(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vseqi.d; #lsx.txt vseqi.d mask=0x72818000 #0x72818000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vseqi.d vrD, vrJ, simm10_5 is op15_31=0xe503 & vrD & vrJ & simm10_5 { vrD = vseqi.d(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vslei.b; #lsx.txt vslei.b mask=0x72820000 #0x72820000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vslei.b vrD, vrJ, simm10_5 is op15_31=0xe504 & vrD & vrJ & simm10_5 { vrD = vslei.b(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vslei.h; #lsx.txt vslei.h mask=0x72828000 #0x72828000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vslei.h vrD, vrJ, simm10_5 is op15_31=0xe505 & vrD & vrJ & simm10_5 { vrD = vslei.h(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vslei.w; #lsx.txt vslei.w mask=0x72830000 #0x72830000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vslei.w vrD, vrJ, simm10_5 is op15_31=0xe506 & vrD & vrJ & simm10_5 { vrD = vslei.w(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vslei.d; #lsx.txt vslei.d mask=0x72838000 #0x72838000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vslei.d vrD, vrJ, simm10_5 is op15_31=0xe507 & vrD & vrJ & simm10_5 { vrD = vslei.d(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vslei.bu; #lsx.txt vslei.bu mask=0x72840000 #0x72840000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vslei.bu vrD, vrJ, imm10_5 is op15_31=0xe508 & vrD & vrJ & imm10_5 { vrD = vslei.bu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vslei.hu; #lsx.txt vslei.hu mask=0x72848000 #0x72848000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vslei.hu vrD, vrJ, imm10_5 is op15_31=0xe509 & vrD & vrJ & imm10_5 { vrD = vslei.hu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vslei.wu; #lsx.txt vslei.wu mask=0x72850000 #0x72850000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vslei.wu vrD, vrJ, imm10_5 is op15_31=0xe50a & vrD & vrJ & imm10_5 { vrD = vslei.wu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vslei.du; #lsx.txt vslei.du mask=0x72858000 #0x72858000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vslei.du vrD, vrJ, imm10_5 is op15_31=0xe50b & vrD & vrJ & imm10_5 { vrD = vslei.du(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vslti.b; #lsx.txt vslti.b mask=0x72860000 #0x72860000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vslti.b vrD, vrJ, simm10_5 is op15_31=0xe50c & vrD & vrJ & simm10_5 { vrD = vslti.b(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vslti.h; #lsx.txt vslti.h mask=0x72868000 #0x72868000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vslti.h vrD, vrJ, simm10_5 is op15_31=0xe50d & vrD & vrJ & simm10_5 { vrD = vslti.h(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vslti.w; #lsx.txt vslti.w mask=0x72870000 #0x72870000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vslti.w vrD, vrJ, simm10_5 is op15_31=0xe50e & vrD & vrJ & simm10_5 { vrD = vslti.w(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vslti.d; #lsx.txt vslti.d mask=0x72878000 #0x72878000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vslti.d vrD, vrJ, simm10_5 is op15_31=0xe50f & vrD & vrJ & simm10_5 { vrD = vslti.d(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vslti.bu; #lsx.txt vslti.bu mask=0x72880000 #0x72880000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vslti.bu vrD, vrJ, imm10_5 is op15_31=0xe510 & vrD & vrJ & imm10_5 { vrD = vslti.bu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vslti.hu; #lsx.txt vslti.hu mask=0x72888000 #0x72888000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vslti.hu vrD, vrJ, imm10_5 is op15_31=0xe511 & vrD & vrJ & imm10_5 { vrD = vslti.hu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vslti.wu; #lsx.txt vslti.wu mask=0x72890000 #0x72890000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vslti.wu vrD, vrJ, imm10_5 is op15_31=0xe512 & vrD & vrJ & imm10_5 { vrD = vslti.wu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vslti.du; #lsx.txt vslti.du mask=0x72898000 #0x72898000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vslti.du vrD, vrJ, imm10_5 is op15_31=0xe513 & vrD & vrJ & imm10_5 { vrD = vslti.du(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vaddi.bu; #lsx.txt vaddi.bu mask=0x728a0000 #0x728a0000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vaddi.bu vrD, vrJ, imm10_5 is op15_31=0xe514 & vrD & vrJ & imm10_5 { vrD = vaddi.bu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vaddi.hu; #lsx.txt vaddi.hu mask=0x728a8000 #0x728a8000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vaddi.hu vrD, vrJ, imm10_5 is op15_31=0xe515 & vrD & vrJ & imm10_5 { vrD = vaddi.hu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vaddi.wu; #lsx.txt vaddi.wu mask=0x728b0000 #0x728b0000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vaddi.wu vrD, vrJ, imm10_5 is op15_31=0xe516 & vrD & vrJ & imm10_5 { vrD = vaddi.wu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vaddi.du; #lsx.txt vaddi.du mask=0x728b8000 #0x728b8000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vaddi.du vrD, vrJ, imm10_5 is op15_31=0xe517 & vrD & vrJ & imm10_5 { vrD = vaddi.du(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsubi.bu; #lsx.txt vsubi.bu mask=0x728c0000 #0x728c0000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsubi.bu vrD, vrJ, imm10_5 is op15_31=0xe518 & vrD & vrJ & imm10_5 { vrD = vsubi.bu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsubi.hu; #lsx.txt vsubi.hu mask=0x728c8000 #0x728c8000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsubi.hu vrD, vrJ, imm10_5 is op15_31=0xe519 & vrD & vrJ & imm10_5 { vrD = vsubi.hu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsubi.wu; #lsx.txt vsubi.wu mask=0x728d0000 #0x728d0000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsubi.wu vrD, vrJ, imm10_5 is op15_31=0xe51a & vrD & vrJ & imm10_5 { vrD = vsubi.wu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsubi.du; #lsx.txt vsubi.du mask=0x728d8000 #0x728d8000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsubi.du vrD, vrJ, imm10_5 is op15_31=0xe51b & vrD & vrJ & imm10_5 { vrD = vsubi.du(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vbsll.v; #lsx.txt vbsll.v mask=0x728e0000 #0x728e0000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vbsll.v vrD, vrJ, imm10_5 is op15_31=0xe51c & vrD & vrJ & imm10_5 { vrD = vbsll.v(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vbsrl.v; #lsx.txt vbsrl.v mask=0x728e8000 #0x728e8000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vbsrl.v vrD, vrJ, imm10_5 is op15_31=0xe51d & vrD & vrJ & imm10_5 { vrD = vbsrl.v(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vmaxi.b; #lsx.txt vmaxi.b mask=0x72900000 #0x72900000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vmaxi.b vrD, vrJ, simm10_5 is op15_31=0xe520 & vrD & vrJ & simm10_5 { vrD = vmaxi.b(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vmaxi.h; #lsx.txt vmaxi.h mask=0x72908000 #0x72908000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vmaxi.h vrD, vrJ, simm10_5 is op15_31=0xe521 & vrD & vrJ & simm10_5 { vrD = vmaxi.h(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vmaxi.w; #lsx.txt vmaxi.w mask=0x72910000 #0x72910000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vmaxi.w vrD, vrJ, simm10_5 is op15_31=0xe522 & vrD & vrJ & simm10_5 { vrD = vmaxi.w(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vmaxi.d; #lsx.txt vmaxi.d mask=0x72918000 #0x72918000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vmaxi.d vrD, vrJ, simm10_5 is op15_31=0xe523 & vrD & vrJ & simm10_5 { vrD = vmaxi.d(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vmini.b; #lsx.txt vmini.b mask=0x72920000 #0x72920000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vmini.b vrD, vrJ, simm10_5 is op15_31=0xe524 & vrD & vrJ & simm10_5 { vrD = vmini.b(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vmini.h; #lsx.txt vmini.h mask=0x72928000 #0x72928000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vmini.h vrD, vrJ, simm10_5 is op15_31=0xe525 & vrD & vrJ & simm10_5 { vrD = vmini.h(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vmini.w; #lsx.txt vmini.w mask=0x72930000 #0x72930000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vmini.w vrD, vrJ, simm10_5 is op15_31=0xe526 & vrD & vrJ & simm10_5 { vrD = vmini.w(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vmini.d; #lsx.txt vmini.d mask=0x72938000 #0x72938000 0xffff8000 v0:5,v5:5, s10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'simm10_5_s0'] :vmini.d vrD, vrJ, simm10_5 is op15_31=0xe527 & vrD & vrJ & simm10_5 { vrD = vmini.d(vrD, vrJ, simm10_5:$(REGSIZE)); } define pcodeop vmaxi.bu; #lsx.txt vmaxi.bu mask=0x72940000 #0x72940000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vmaxi.bu vrD, vrJ, imm10_5 is op15_31=0xe528 & vrD & vrJ & imm10_5 { vrD = vmaxi.bu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vmaxi.hu; #lsx.txt vmaxi.hu mask=0x72948000 #0x72948000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vmaxi.hu vrD, vrJ, imm10_5 is op15_31=0xe529 & vrD & vrJ & imm10_5 { vrD = vmaxi.hu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vmaxi.wu; #lsx.txt vmaxi.wu mask=0x72950000 #0x72950000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vmaxi.wu vrD, vrJ, imm10_5 is op15_31=0xe52a & vrD & vrJ & imm10_5 { vrD = vmaxi.wu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vmaxi.du; #lsx.txt vmaxi.du mask=0x72958000 #0x72958000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vmaxi.du vrD, vrJ, imm10_5 is op15_31=0xe52b & vrD & vrJ & imm10_5 { vrD = vmaxi.du(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vmini.bu; #lsx.txt vmini.bu mask=0x72960000 #0x72960000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vmini.bu vrD, vrJ, imm10_5 is op15_31=0xe52c & vrD & vrJ & imm10_5 { vrD = vmini.bu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vmini.hu; #lsx.txt vmini.hu mask=0x72968000 #0x72968000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vmini.hu vrD, vrJ, imm10_5 is op15_31=0xe52d & vrD & vrJ & imm10_5 { vrD = vmini.hu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vmini.wu; #lsx.txt vmini.wu mask=0x72970000 #0x72970000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vmini.wu vrD, vrJ, imm10_5 is op15_31=0xe52e & vrD & vrJ & imm10_5 { vrD = vmini.wu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vmini.du; #lsx.txt vmini.du mask=0x72978000 #0x72978000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vmini.du vrD, vrJ, imm10_5 is op15_31=0xe52f & vrD & vrJ & imm10_5 { vrD = vmini.du(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vfrstpi.b; #lsx.txt vfrstpi.b mask=0x729a0000 #0x729a0000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vfrstpi.b vrD, vrJ, imm10_5 is op15_31=0xe534 & vrD & vrJ & imm10_5 { vrD = vfrstpi.b(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vfrstpi.h; #lsx.txt vfrstpi.h mask=0x729a8000 #0x729a8000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vfrstpi.h vrD, vrJ, imm10_5 is op15_31=0xe535 & vrD & vrJ & imm10_5 { vrD = vfrstpi.h(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vclo.b; #lsx.txt vclo.b mask=0x729c0000 #0x729c0000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vclo.b vrD, vrJ is op10_31=0x1ca700 & vrD & vrJ { vrD = vclo.b(vrD, vrJ); } define pcodeop vclo.h; #lsx.txt vclo.h mask=0x729c0400 #0x729c0400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vclo.h vrD, vrJ is op10_31=0x1ca701 & vrD & vrJ { vrD = vclo.h(vrD, vrJ); } define pcodeop vclo.w; #lsx.txt vclo.w mask=0x729c0800 #0x729c0800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vclo.w vrD, vrJ is op10_31=0x1ca702 & vrD & vrJ { vrD = vclo.w(vrD, vrJ); } define pcodeop vclo.d; #lsx.txt vclo.d mask=0x729c0c00 #0x729c0c00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vclo.d vrD, vrJ is op10_31=0x1ca703 & vrD & vrJ { vrD = vclo.d(vrD, vrJ); } define pcodeop vclz.b; #lsx.txt vclz.b mask=0x729c1000 #0x729c1000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vclz.b vrD, vrJ is op10_31=0x1ca704 & vrD & vrJ { vrD = vclz.b(vrD, vrJ); } define pcodeop vclz.h; #lsx.txt vclz.h mask=0x729c1400 #0x729c1400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vclz.h vrD, vrJ is op10_31=0x1ca705 & vrD & vrJ { vrD = vclz.h(vrD, vrJ); } define pcodeop vclz.w; #lsx.txt vclz.w mask=0x729c1800 #0x729c1800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vclz.w vrD, vrJ is op10_31=0x1ca706 & vrD & vrJ { vrD = vclz.w(vrD, vrJ); } define pcodeop vclz.d; #lsx.txt vclz.d mask=0x729c1c00 #0x729c1c00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vclz.d vrD, vrJ is op10_31=0x1ca707 & vrD & vrJ { vrD = vclz.d(vrD, vrJ); } define pcodeop vpcnt.b; #lsx.txt vpcnt.b mask=0x729c2000 #0x729c2000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vpcnt.b vrD, vrJ is op10_31=0x1ca708 & vrD & vrJ { vrD = vpcnt.b(vrD, vrJ); } define pcodeop vpcnt.h; #lsx.txt vpcnt.h mask=0x729c2400 #0x729c2400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vpcnt.h vrD, vrJ is op10_31=0x1ca709 & vrD & vrJ { vrD = vpcnt.h(vrD, vrJ); } define pcodeop vpcnt.w; #lsx.txt vpcnt.w mask=0x729c2800 #0x729c2800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vpcnt.w vrD, vrJ is op10_31=0x1ca70a & vrD & vrJ { vrD = vpcnt.w(vrD, vrJ); } define pcodeop vpcnt.d; #lsx.txt vpcnt.d mask=0x729c2c00 #0x729c2c00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vpcnt.d vrD, vrJ is op10_31=0x1ca70b & vrD & vrJ { vrD = vpcnt.d(vrD, vrJ); } define pcodeop vneg.b; #lsx.txt vneg.b mask=0x729c3000 #0x729c3000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vneg.b vrD, vrJ is op10_31=0x1ca70c & vrD & vrJ { vrD = vneg.b(vrD, vrJ); } define pcodeop vneg.h; #lsx.txt vneg.h mask=0x729c3400 #0x729c3400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vneg.h vrD, vrJ is op10_31=0x1ca70d & vrD & vrJ { vrD = vneg.h(vrD, vrJ); } define pcodeop vneg.w; #lsx.txt vneg.w mask=0x729c3800 #0x729c3800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vneg.w vrD, vrJ is op10_31=0x1ca70e & vrD & vrJ { vrD = vneg.w(vrD, vrJ); } define pcodeop vneg.d; #lsx.txt vneg.d mask=0x729c3c00 #0x729c3c00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vneg.d vrD, vrJ is op10_31=0x1ca70f & vrD & vrJ { vrD = vneg.d(vrD, vrJ); } define pcodeop vmskltz.b; #lsx.txt vmskltz.b mask=0x729c4000 #0x729c4000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vmskltz.b vrD, vrJ is op10_31=0x1ca710 & vrD & vrJ { vrD = vmskltz.b(vrD, vrJ); } define pcodeop vmskltz.h; #lsx.txt vmskltz.h mask=0x729c4400 #0x729c4400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vmskltz.h vrD, vrJ is op10_31=0x1ca711 & vrD & vrJ { vrD = vmskltz.h(vrD, vrJ); } define pcodeop vmskltz.w; #lsx.txt vmskltz.w mask=0x729c4800 #0x729c4800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vmskltz.w vrD, vrJ is op10_31=0x1ca712 & vrD & vrJ { vrD = vmskltz.w(vrD, vrJ); } define pcodeop vmskltz.d; #lsx.txt vmskltz.d mask=0x729c4c00 #0x729c4c00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vmskltz.d vrD, vrJ is op10_31=0x1ca713 & vrD & vrJ { vrD = vmskltz.d(vrD, vrJ); } define pcodeop vmskgez.b; #lsx.txt vmskgez.b mask=0x729c5000 #0x729c5000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vmskgez.b vrD, vrJ is op10_31=0x1ca714 & vrD & vrJ { vrD = vmskgez.b(vrD, vrJ); } define pcodeop vmsknz.b; #lsx.txt vmsknz.b mask=0x729c6000 #0x729c6000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vmsknz.b vrD, vrJ is op10_31=0x1ca718 & vrD & vrJ { vrD = vmsknz.b(vrD, vrJ); } define pcodeop vseteqz.v; #lsx.txt vseteqz.v mask=0x729c9800 #0x729c9800 0xfffffc18 c0:3,v5:5 ['fcc0_3_s0', 'vreg5_5_s0'] :vseteqz.v fccD, vrJ is op10_31=0x1ca726 & fccD & vrJ { fccD = vseteqz.v(fccD, vrJ); } define pcodeop vsetnez.v; #lsx.txt vsetnez.v mask=0x729c9c00 #0x729c9c00 0xfffffc18 c0:3,v5:5 ['fcc0_3_s0', 'vreg5_5_s0'] :vsetnez.v fccD, vrJ is op10_31=0x1ca727 & fccD & vrJ { fccD = vsetnez.v(fccD, vrJ); } define pcodeop vsetanyeqz.b; #lsx.txt vsetanyeqz.b mask=0x729ca000 #0x729ca000 0xfffffc18 c0:3,v5:5 ['fcc0_3_s0', 'vreg5_5_s0'] :vsetanyeqz.b fccD, vrJ is op10_31=0x1ca728 & fccD & vrJ { fccD = vsetanyeqz.b(fccD, vrJ); } define pcodeop vsetanyeqz.h; #lsx.txt vsetanyeqz.h mask=0x729ca400 #0x729ca400 0xfffffc18 c0:3,v5:5 ['fcc0_3_s0', 'vreg5_5_s0'] :vsetanyeqz.h fccD, vrJ is op10_31=0x1ca729 & fccD & vrJ { fccD = vsetanyeqz.h(fccD, vrJ); } define pcodeop vsetanyeqz.w; #lsx.txt vsetanyeqz.w mask=0x729ca800 #0x729ca800 0xfffffc18 c0:3,v5:5 ['fcc0_3_s0', 'vreg5_5_s0'] :vsetanyeqz.w fccD, vrJ is op10_31=0x1ca72a & fccD & vrJ { fccD = vsetanyeqz.w(fccD, vrJ); } define pcodeop vsetanyeqz.d; #lsx.txt vsetanyeqz.d mask=0x729cac00 #0x729cac00 0xfffffc18 c0:3,v5:5 ['fcc0_3_s0', 'vreg5_5_s0'] :vsetanyeqz.d fccD, vrJ is op10_31=0x1ca72b & fccD & vrJ { fccD = vsetanyeqz.d(fccD, vrJ); } define pcodeop vsetallnez.b; #lsx.txt vsetallnez.b mask=0x729cb000 #0x729cb000 0xfffffc18 c0:3,v5:5 ['fcc0_3_s0', 'vreg5_5_s0'] :vsetallnez.b fccD, vrJ is op10_31=0x1ca72c & fccD & vrJ { fccD = vsetallnez.b(fccD, vrJ); } define pcodeop vsetallnez.h; #lsx.txt vsetallnez.h mask=0x729cb400 #0x729cb400 0xfffffc18 c0:3,v5:5 ['fcc0_3_s0', 'vreg5_5_s0'] :vsetallnez.h fccD, vrJ is op10_31=0x1ca72d & fccD & vrJ { fccD = vsetallnez.h(fccD, vrJ); } define pcodeop vsetallnez.w; #lsx.txt vsetallnez.w mask=0x729cb800 #0x729cb800 0xfffffc18 c0:3,v5:5 ['fcc0_3_s0', 'vreg5_5_s0'] :vsetallnez.w fccD, vrJ is op10_31=0x1ca72e & fccD & vrJ { fccD = vsetallnez.w(fccD, vrJ); } define pcodeop vsetallnez.d; #lsx.txt vsetallnez.d mask=0x729cbc00 #0x729cbc00 0xfffffc18 c0:3,v5:5 ['fcc0_3_s0', 'vreg5_5_s0'] :vsetallnez.d fccD, vrJ is op10_31=0x1ca72f & fccD & vrJ { fccD = vsetallnez.d(fccD, vrJ); } define pcodeop vflogb.s; #lsx.txt vflogb.s mask=0x729cc400 #0x729cc400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vflogb.s vrD, vrJ is op10_31=0x1ca731 & vrD & vrJ { vrD = vflogb.s(vrD, vrJ); } define pcodeop vflogb.d; #lsx.txt vflogb.d mask=0x729cc800 #0x729cc800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vflogb.d vrD, vrJ is op10_31=0x1ca732 & vrD & vrJ { vrD = vflogb.d(vrD, vrJ); } define pcodeop vfclass.s; #lsx.txt vfclass.s mask=0x729cd400 #0x729cd400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfclass.s vrD, vrJ is op10_31=0x1ca735 & vrD & vrJ { vrD = vfclass.s(vrD, vrJ); } define pcodeop vfclass.d; #lsx.txt vfclass.d mask=0x729cd800 #0x729cd800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfclass.d vrD, vrJ is op10_31=0x1ca736 & vrD & vrJ { vrD = vfclass.d(vrD, vrJ); } define pcodeop vfsqrt.s; #lsx.txt vfsqrt.s mask=0x729ce400 #0x729ce400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfsqrt.s vrD, vrJ is op10_31=0x1ca739 & vrD & vrJ { vrD = vfsqrt.s(vrD, vrJ); } define pcodeop vfsqrt.d; #lsx.txt vfsqrt.d mask=0x729ce800 #0x729ce800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfsqrt.d vrD, vrJ is op10_31=0x1ca73a & vrD & vrJ { vrD = vfsqrt.d(vrD, vrJ); } define pcodeop vfrecip.s; #lsx.txt vfrecip.s mask=0x729cf400 #0x729cf400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrecip.s vrD, vrJ is op10_31=0x1ca73d & vrD & vrJ { vrD = vfrecip.s(vrD, vrJ); } define pcodeop vfrecip.d; #lsx.txt vfrecip.d mask=0x729cf800 #0x729cf800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrecip.d vrD, vrJ is op10_31=0x1ca73e & vrD & vrJ { vrD = vfrecip.d(vrD, vrJ); } define pcodeop vfrsqrt.s; #lsx.txt vfrsqrt.s mask=0x729d0400 #0x729d0400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrsqrt.s vrD, vrJ is op10_31=0x1ca741 & vrD & vrJ { vrD = vfrsqrt.s(vrD, vrJ); } define pcodeop vfrsqrt.d; #lsx.txt vfrsqrt.d mask=0x729d0800 #0x729d0800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrsqrt.d vrD, vrJ is op10_31=0x1ca742 & vrD & vrJ { vrD = vfrsqrt.d(vrD, vrJ); } define pcodeop vfrint.s; #lsx.txt vfrint.s mask=0x729d3400 #0x729d3400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrint.s vrD, vrJ is op10_31=0x1ca74d & vrD & vrJ { vrD = vfrint.s(vrD, vrJ); } define pcodeop vfrint.d; #lsx.txt vfrint.d mask=0x729d3800 #0x729d3800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrint.d vrD, vrJ is op10_31=0x1ca74e & vrD & vrJ { vrD = vfrint.d(vrD, vrJ); } define pcodeop vfrintrm.s; #lsx.txt vfrintrm.s mask=0x729d4400 #0x729d4400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrintrm.s vrD, vrJ is op10_31=0x1ca751 & vrD & vrJ { vrD = vfrintrm.s(vrD, vrJ); } define pcodeop vfrintrm.d; #lsx.txt vfrintrm.d mask=0x729d4800 #0x729d4800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrintrm.d vrD, vrJ is op10_31=0x1ca752 & vrD & vrJ { vrD = vfrintrm.d(vrD, vrJ); } define pcodeop vfrintrp.s; #lsx.txt vfrintrp.s mask=0x729d5400 #0x729d5400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrintrp.s vrD, vrJ is op10_31=0x1ca755 & vrD & vrJ { vrD = vfrintrp.s(vrD, vrJ); } define pcodeop vfrintrp.d; #lsx.txt vfrintrp.d mask=0x729d5800 #0x729d5800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrintrp.d vrD, vrJ is op10_31=0x1ca756 & vrD & vrJ { vrD = vfrintrp.d(vrD, vrJ); } define pcodeop vfrintrz.s; #lsx.txt vfrintrz.s mask=0x729d6400 #0x729d6400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrintrz.s vrD, vrJ is op10_31=0x1ca759 & vrD & vrJ { vrD = vfrintrz.s(vrD, vrJ); } define pcodeop vfrintrz.d; #lsx.txt vfrintrz.d mask=0x729d6800 #0x729d6800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrintrz.d vrD, vrJ is op10_31=0x1ca75a & vrD & vrJ { vrD = vfrintrz.d(vrD, vrJ); } define pcodeop vfrintrne.s; #lsx.txt vfrintrne.s mask=0x729d7400 #0x729d7400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrintrne.s vrD, vrJ is op10_31=0x1ca75d & vrD & vrJ { vrD = vfrintrne.s(vrD, vrJ); } define pcodeop vfrintrne.d; #lsx.txt vfrintrne.d mask=0x729d7800 #0x729d7800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfrintrne.d vrD, vrJ is op10_31=0x1ca75e & vrD & vrJ { vrD = vfrintrne.d(vrD, vrJ); } define pcodeop vfcvtl.s.h; #lsx.txt vfcvtl.s.h mask=0x729de800 #0x729de800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfcvtl.s.h vrD, vrJ is op10_31=0x1ca77a & vrD & vrJ { vrD = vfcvtl.s.h(vrD, vrJ); } define pcodeop vfcvth.s.h; #lsx.txt vfcvth.s.h mask=0x729dec00 #0x729dec00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfcvth.s.h vrD, vrJ is op10_31=0x1ca77b & vrD & vrJ { vrD = vfcvth.s.h(vrD, vrJ); } define pcodeop vfcvtl.d.s; #lsx.txt vfcvtl.d.s mask=0x729df000 #0x729df000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfcvtl.d.s vrD, vrJ is op10_31=0x1ca77c & vrD & vrJ { vrD = vfcvtl.d.s(vrD, vrJ); } define pcodeop vfcvth.d.s; #lsx.txt vfcvth.d.s mask=0x729df400 #0x729df400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vfcvth.d.s vrD, vrJ is op10_31=0x1ca77d & vrD & vrJ { vrD = vfcvth.d.s(vrD, vrJ); } define pcodeop vffint.s.w; #lsx.txt vffint.s.w mask=0x729e0000 #0x729e0000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vffint.s.w vrD, vrJ is op10_31=0x1ca780 & vrD & vrJ { vrD = vffint.s.w(vrD, vrJ); } define pcodeop vffint.s.wu; #lsx.txt vffint.s.wu mask=0x729e0400 #0x729e0400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vffint.s.wu vrD, vrJ is op10_31=0x1ca781 & vrD & vrJ { vrD = vffint.s.wu(vrD, vrJ); } define pcodeop vffint.d.l; #lsx.txt vffint.d.l mask=0x729e0800 #0x729e0800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vffint.d.l vrD, vrJ is op10_31=0x1ca782 & vrD & vrJ { vrD = vffint.d.l(vrD, vrJ); } define pcodeop vffint.d.lu; #lsx.txt vffint.d.lu mask=0x729e0c00 #0x729e0c00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vffint.d.lu vrD, vrJ is op10_31=0x1ca783 & vrD & vrJ { vrD = vffint.d.lu(vrD, vrJ); } define pcodeop vffintl.d.w; #lsx.txt vffintl.d.w mask=0x729e1000 #0x729e1000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vffintl.d.w vrD, vrJ is op10_31=0x1ca784 & vrD & vrJ { vrD = vffintl.d.w(vrD, vrJ); } define pcodeop vffinth.d.w; #lsx.txt vffinth.d.w mask=0x729e1400 #0x729e1400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vffinth.d.w vrD, vrJ is op10_31=0x1ca785 & vrD & vrJ { vrD = vffinth.d.w(vrD, vrJ); } define pcodeop vftint.w.s; #lsx.txt vftint.w.s mask=0x729e3000 #0x729e3000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftint.w.s vrD, vrJ is op10_31=0x1ca78c & vrD & vrJ { vrD = vftint.w.s(vrD, vrJ); } define pcodeop vftint.l.d; #lsx.txt vftint.l.d mask=0x729e3400 #0x729e3400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftint.l.d vrD, vrJ is op10_31=0x1ca78d & vrD & vrJ { vrD = vftint.l.d(vrD, vrJ); } define pcodeop vftintrm.w.s; #lsx.txt vftintrm.w.s mask=0x729e3800 #0x729e3800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrm.w.s vrD, vrJ is op10_31=0x1ca78e & vrD & vrJ { vrD = vftintrm.w.s(vrD, vrJ); } define pcodeop vftintrm.l.d; #lsx.txt vftintrm.l.d mask=0x729e3c00 #0x729e3c00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrm.l.d vrD, vrJ is op10_31=0x1ca78f & vrD & vrJ { vrD = vftintrm.l.d(vrD, vrJ); } define pcodeop vftintrp.w.s; #lsx.txt vftintrp.w.s mask=0x729e4000 #0x729e4000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrp.w.s vrD, vrJ is op10_31=0x1ca790 & vrD & vrJ { vrD = vftintrp.w.s(vrD, vrJ); } define pcodeop vftintrp.l.d; #lsx.txt vftintrp.l.d mask=0x729e4400 #0x729e4400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrp.l.d vrD, vrJ is op10_31=0x1ca791 & vrD & vrJ { vrD = vftintrp.l.d(vrD, vrJ); } define pcodeop vftintrz.w.s; #lsx.txt vftintrz.w.s mask=0x729e4800 #0x729e4800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrz.w.s vrD, vrJ is op10_31=0x1ca792 & vrD & vrJ { vrD = vftintrz.w.s(vrD, vrJ); } define pcodeop vftintrz.l.d; #lsx.txt vftintrz.l.d mask=0x729e4c00 #0x729e4c00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrz.l.d vrD, vrJ is op10_31=0x1ca793 & vrD & vrJ { vrD = vftintrz.l.d(vrD, vrJ); } define pcodeop vftintrne.w.s; #lsx.txt vftintrne.w.s mask=0x729e5000 #0x729e5000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrne.w.s vrD, vrJ is op10_31=0x1ca794 & vrD & vrJ { vrD = vftintrne.w.s(vrD, vrJ); } define pcodeop vftintrne.l.d; #lsx.txt vftintrne.l.d mask=0x729e5400 #0x729e5400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrne.l.d vrD, vrJ is op10_31=0x1ca795 & vrD & vrJ { vrD = vftintrne.l.d(vrD, vrJ); } define pcodeop vftint.wu.s; #lsx.txt vftint.wu.s mask=0x729e5800 #0x729e5800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftint.wu.s vrD, vrJ is op10_31=0x1ca796 & vrD & vrJ { vrD = vftint.wu.s(vrD, vrJ); } define pcodeop vftint.lu.d; #lsx.txt vftint.lu.d mask=0x729e5c00 #0x729e5c00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftint.lu.d vrD, vrJ is op10_31=0x1ca797 & vrD & vrJ { vrD = vftint.lu.d(vrD, vrJ); } define pcodeop vftintrz.wu.s; #lsx.txt vftintrz.wu.s mask=0x729e7000 #0x729e7000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrz.wu.s vrD, vrJ is op10_31=0x1ca79c & vrD & vrJ { vrD = vftintrz.wu.s(vrD, vrJ); } define pcodeop vftintrz.lu.d; #lsx.txt vftintrz.lu.d mask=0x729e7400 #0x729e7400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrz.lu.d vrD, vrJ is op10_31=0x1ca79d & vrD & vrJ { vrD = vftintrz.lu.d(vrD, vrJ); } define pcodeop vftintl.l.s; #lsx.txt vftintl.l.s mask=0x729e8000 #0x729e8000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintl.l.s vrD, vrJ is op10_31=0x1ca7a0 & vrD & vrJ { vrD = vftintl.l.s(vrD, vrJ); } define pcodeop vftinth.l.s; #lsx.txt vftinth.l.s mask=0x729e8400 #0x729e8400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftinth.l.s vrD, vrJ is op10_31=0x1ca7a1 & vrD & vrJ { vrD = vftinth.l.s(vrD, vrJ); } define pcodeop vftintrml.l.s; #lsx.txt vftintrml.l.s mask=0x729e8800 #0x729e8800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrml.l.s vrD, vrJ is op10_31=0x1ca7a2 & vrD & vrJ { vrD = vftintrml.l.s(vrD, vrJ); } define pcodeop vftintrmh.l.s; #lsx.txt vftintrmh.l.s mask=0x729e8c00 #0x729e8c00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrmh.l.s vrD, vrJ is op10_31=0x1ca7a3 & vrD & vrJ { vrD = vftintrmh.l.s(vrD, vrJ); } define pcodeop vftintrpl.l.s; #lsx.txt vftintrpl.l.s mask=0x729e9000 #0x729e9000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrpl.l.s vrD, vrJ is op10_31=0x1ca7a4 & vrD & vrJ { vrD = vftintrpl.l.s(vrD, vrJ); } define pcodeop vftintrph.l.s; #lsx.txt vftintrph.l.s mask=0x729e9400 #0x729e9400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrph.l.s vrD, vrJ is op10_31=0x1ca7a5 & vrD & vrJ { vrD = vftintrph.l.s(vrD, vrJ); } define pcodeop vftintrzl.l.s; #lsx.txt vftintrzl.l.s mask=0x729e9800 #0x729e9800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrzl.l.s vrD, vrJ is op10_31=0x1ca7a6 & vrD & vrJ { vrD = vftintrzl.l.s(vrD, vrJ); } define pcodeop vftintrzh.l.s; #lsx.txt vftintrzh.l.s mask=0x729e9c00 #0x729e9c00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrzh.l.s vrD, vrJ is op10_31=0x1ca7a7 & vrD & vrJ { vrD = vftintrzh.l.s(vrD, vrJ); } define pcodeop vftintrnel.l.s; #lsx.txt vftintrnel.l.s mask=0x729ea000 #0x729ea000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrnel.l.s vrD, vrJ is op10_31=0x1ca7a8 & vrD & vrJ { vrD = vftintrnel.l.s(vrD, vrJ); } define pcodeop vftintrneh.l.s; #lsx.txt vftintrneh.l.s mask=0x729ea400 #0x729ea400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vftintrneh.l.s vrD, vrJ is op10_31=0x1ca7a9 & vrD & vrJ { vrD = vftintrneh.l.s(vrD, vrJ); } define pcodeop vexth.h.b; #lsx.txt vexth.h.b mask=0x729ee000 #0x729ee000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vexth.h.b vrD, vrJ is op10_31=0x1ca7b8 & vrD & vrJ { vrD = vexth.h.b(vrD, vrJ); } define pcodeop vexth.w.h; #lsx.txt vexth.w.h mask=0x729ee400 #0x729ee400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vexth.w.h vrD, vrJ is op10_31=0x1ca7b9 & vrD & vrJ { vrD = vexth.w.h(vrD, vrJ); } define pcodeop vexth.d.w; #lsx.txt vexth.d.w mask=0x729ee800 #0x729ee800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vexth.d.w vrD, vrJ is op10_31=0x1ca7ba & vrD & vrJ { vrD = vexth.d.w(vrD, vrJ); } define pcodeop vexth.q.d; #lsx.txt vexth.q.d mask=0x729eec00 #0x729eec00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vexth.q.d vrD, vrJ is op10_31=0x1ca7bb & vrD & vrJ { vrD = vexth.q.d(vrD, vrJ); } define pcodeop vexth.hu.bu; #lsx.txt vexth.hu.bu mask=0x729ef000 #0x729ef000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vexth.hu.bu vrD, vrJ is op10_31=0x1ca7bc & vrD & vrJ { vrD = vexth.hu.bu(vrD, vrJ); } define pcodeop vexth.wu.hu; #lsx.txt vexth.wu.hu mask=0x729ef400 #0x729ef400 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vexth.wu.hu vrD, vrJ is op10_31=0x1ca7bd & vrD & vrJ { vrD = vexth.wu.hu(vrD, vrJ); } define pcodeop vexth.du.wu; #lsx.txt vexth.du.wu mask=0x729ef800 #0x729ef800 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vexth.du.wu vrD, vrJ is op10_31=0x1ca7be & vrD & vrJ { vrD = vexth.du.wu(vrD, vrJ); } define pcodeop vexth.qu.du; #lsx.txt vexth.qu.du mask=0x729efc00 #0x729efc00 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vexth.qu.du vrD, vrJ is op10_31=0x1ca7bf & vrD & vrJ { vrD = vexth.qu.du(vrD, vrJ); } define pcodeop vreplgr2vr.b; #lsx.txt vreplgr2vr.b mask=0x729f0000 #0x729f0000 0xfffffc00 v0:5, r5:5 ['vreg0_5_s0', 'reg5_5_s0'] :vreplgr2vr.b vrD, RJsrc is op10_31=0x1ca7c0 & vrD & RJsrc { vrD = vreplgr2vr.b(vrD, RJsrc); } define pcodeop vreplgr2vr.h; #lsx.txt vreplgr2vr.h mask=0x729f0400 #0x729f0400 0xfffffc00 v0:5, r5:5 ['vreg0_5_s0', 'reg5_5_s0'] :vreplgr2vr.h vrD, RJsrc is op10_31=0x1ca7c1 & vrD & RJsrc { vrD = vreplgr2vr.h(vrD, RJsrc); } define pcodeop vreplgr2vr.w; #lsx.txt vreplgr2vr.w mask=0x729f0800 #0x729f0800 0xfffffc00 v0:5, r5:5 ['vreg0_5_s0', 'reg5_5_s0'] :vreplgr2vr.w vrD, RJsrc is op10_31=0x1ca7c2 & vrD & RJsrc { vrD = vreplgr2vr.w(vrD, RJsrc); } define pcodeop vreplgr2vr.d; #lsx.txt vreplgr2vr.d mask=0x729f0c00 #0x729f0c00 0xfffffc00 v0:5, r5:5 ['vreg0_5_s0', 'reg5_5_s0'] :vreplgr2vr.d vrD, RJsrc is op10_31=0x1ca7c3 & vrD & RJsrc { vrD = vreplgr2vr.d(vrD, RJsrc); } define pcodeop vrotri.b; #lsx.txt vrotri.b mask=0x72a02000 #0x72a02000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vrotri.b vrD, vrJ, imm10_3 is op13_31=0x39501 & vrD & vrJ & imm10_3 { vrD = vrotri.b(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vrotri.h; #lsx.txt vrotri.h mask=0x72a04000 #0x72a04000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vrotri.h vrD, vrJ, imm10_4 is op14_31=0x1ca81 & vrD & vrJ & imm10_4 { vrD = vrotri.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vrotri.w; #lsx.txt vrotri.w mask=0x72a08000 #0x72a08000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vrotri.w vrD, vrJ, imm10_5 is op15_31=0xe541 & vrD & vrJ & imm10_5 { vrD = vrotri.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vrotri.d; #lsx.txt vrotri.d mask=0x72a10000 #0x72a10000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vrotri.d vrD, vrJ, imm10_6 is op16_31=0x72a1 & vrD & vrJ & imm10_6 { vrD = vrotri.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vsrlri.b; #lsx.txt vsrlri.b mask=0x72a42000 #0x72a42000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vsrlri.b vrD, vrJ, imm10_3 is op13_31=0x39521 & vrD & vrJ & imm10_3 { vrD = vsrlri.b(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vsrlri.h; #lsx.txt vsrlri.h mask=0x72a44000 #0x72a44000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vsrlri.h vrD, vrJ, imm10_4 is op14_31=0x1ca91 & vrD & vrJ & imm10_4 { vrD = vsrlri.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vsrlri.w; #lsx.txt vsrlri.w mask=0x72a48000 #0x72a48000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsrlri.w vrD, vrJ, imm10_5 is op15_31=0xe549 & vrD & vrJ & imm10_5 { vrD = vsrlri.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsrlri.d; #lsx.txt vsrlri.d mask=0x72a50000 #0x72a50000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vsrlri.d vrD, vrJ, imm10_6 is op16_31=0x72a5 & vrD & vrJ & imm10_6 { vrD = vsrlri.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vsrari.b; #lsx.txt vsrari.b mask=0x72a82000 #0x72a82000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vsrari.b vrD, vrJ, imm10_3 is op13_31=0x39541 & vrD & vrJ & imm10_3 { vrD = vsrari.b(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vsrari.h; #lsx.txt vsrari.h mask=0x72a84000 #0x72a84000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vsrari.h vrD, vrJ, imm10_4 is op14_31=0x1caa1 & vrD & vrJ & imm10_4 { vrD = vsrari.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vsrari.w; #lsx.txt vsrari.w mask=0x72a88000 #0x72a88000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsrari.w vrD, vrJ, imm10_5 is op15_31=0xe551 & vrD & vrJ & imm10_5 { vrD = vsrari.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsrari.d; #lsx.txt vsrari.d mask=0x72a90000 #0x72a90000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vsrari.d vrD, vrJ, imm10_6 is op16_31=0x72a9 & vrD & vrJ & imm10_6 { vrD = vsrari.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vinsgr2vr.b; #lsx.txt vinsgr2vr.b mask=0x72eb8000 #0x72eb8000 0xffffc000 v0:5, r5:5,u10:4 ['vreg0_5_s0', 'reg5_5_s0', 'imm10_4_s0'] :vinsgr2vr.b vrD, RJsrc, imm10_4 is op14_31=0x1cbae & vrD & RJsrc & imm10_4 { vrD = vinsgr2vr.b(vrD, RJsrc, imm10_4:$(REGSIZE)); } define pcodeop vinsgr2vr.h; #lsx.txt vinsgr2vr.h mask=0x72ebc000 #0x72ebc000 0xffffe000 v0:5, r5:5,u10:3 ['vreg0_5_s0', 'reg5_5_s0', 'imm10_3_s0'] :vinsgr2vr.h vrD, RJsrc, imm10_3 is op13_31=0x3975e & vrD & RJsrc & imm10_3 { vrD = vinsgr2vr.h(vrD, RJsrc, imm10_3:$(REGSIZE)); } define pcodeop vinsgr2vr.w; #lsx.txt vinsgr2vr.w mask=0x72ebe000 #0x72ebe000 0xfffff000 v0:5, r5:5,u10:2 ['vreg0_5_s0', 'reg5_5_s0', 'imm10_2_s0'] :vinsgr2vr.w vrD, RJsrc, imm10_2 is op12_31=0x72ebe & vrD & RJsrc & imm10_2 { vrD = vinsgr2vr.w(vrD, RJsrc, imm10_2:$(REGSIZE)); } define pcodeop vinsgr2vr.d; #lsx.txt vinsgr2vr.d mask=0x72ebf000 #0x72ebf000 0xfffff800 v0:5, r5:5,u10:1 ['vreg0_5_s0', 'reg5_5_s0', 'imm10_1_s0'] :vinsgr2vr.d vrD, RJsrc, imm10_1 is op11_31=0xe5d7e & vrD & RJsrc & imm10_1 { vrD = vinsgr2vr.d(vrD, RJsrc, imm10_1:$(REGSIZE)); } define pcodeop vpickve2gr.b; #lsx.txt vpickve2gr.b mask=0x72ef8000 #0x72ef8000 0xffffc000 r0:5,v5:5,u10:4 ['reg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vpickve2gr.b RD, vrJ, imm10_4 is op14_31=0x1cbbe & RD & vrJ & imm10_4 { RD = vpickve2gr.b(RD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vpickve2gr.h; #lsx.txt vpickve2gr.h mask=0x72efc000 #0x72efc000 0xffffe000 r0:5,v5:5,u10:3 ['reg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vpickve2gr.h RD, vrJ, imm10_3 is op13_31=0x3977e & RD & vrJ & imm10_3 { RD = vpickve2gr.h(RD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vpickve2gr.w; #lsx.txt vpickve2gr.w mask=0x72efe000 #0x72efe000 0xfffff000 r0:5,v5:5,u10:2 ['reg0_5_s0', 'vreg5_5_s0', 'imm10_2_s0'] :vpickve2gr.w RD, vrJ, imm10_2 is op12_31=0x72efe & RD & vrJ & imm10_2 { RD = vpickve2gr.w(RD, vrJ, imm10_2:$(REGSIZE)); } define pcodeop vpickve2gr.d; #lsx.txt vpickve2gr.d mask=0x72eff000 #0x72eff000 0xfffff800 r0:5,v5:5,u10:1 ['reg0_5_s0', 'vreg5_5_s0', 'imm10_1_s0'] :vpickve2gr.d RD, vrJ, imm10_1 is op11_31=0xe5dfe & RD & vrJ & imm10_1 { RD = vpickve2gr.d(RD, vrJ, imm10_1:$(REGSIZE)); } define pcodeop vpickve2gr.bu; #lsx.txt vpickve2gr.bu mask=0x72f38000 #0x72f38000 0xffffc000 r0:5,v5:5,u10:4 ['reg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vpickve2gr.bu RD, vrJ, imm10_4 is op14_31=0x1cbce & RD & vrJ & imm10_4 { RD = vpickve2gr.bu(RD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vpickve2gr.hu; #lsx.txt vpickve2gr.hu mask=0x72f3c000 #0x72f3c000 0xffffe000 r0:5,v5:5,u10:3 ['reg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vpickve2gr.hu RD, vrJ, imm10_3 is op13_31=0x3979e & RD & vrJ & imm10_3 { RD = vpickve2gr.hu(RD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vpickve2gr.wu; #lsx.txt vpickve2gr.wu mask=0x72f3e000 #0x72f3e000 0xfffff000 r0:5,v5:5,u10:2 ['reg0_5_s0', 'vreg5_5_s0', 'imm10_2_s0'] :vpickve2gr.wu RD, vrJ, imm10_2 is op12_31=0x72f3e & RD & vrJ & imm10_2 { RD = vpickve2gr.wu(RD, vrJ, imm10_2:$(REGSIZE)); } define pcodeop vpickve2gr.du; #lsx.txt vpickve2gr.du mask=0x72f3f000 #0x72f3f000 0xfffff800 r0:5,v5:5,u10:1 ['reg0_5_s0', 'vreg5_5_s0', 'imm10_1_s0'] :vpickve2gr.du RD, vrJ, imm10_1 is op11_31=0xe5e7e & RD & vrJ & imm10_1 { RD = vpickve2gr.du(RD, vrJ, imm10_1:$(REGSIZE)); } define pcodeop vreplvei.b; #lsx.txt vreplvei.b mask=0x72f78000 #0x72f78000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vreplvei.b vrD, vrJ, imm10_4 is op14_31=0x1cbde & vrD & vrJ & imm10_4 { vrD = vreplvei.b(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vreplvei.h; #lsx.txt vreplvei.h mask=0x72f7c000 #0x72f7c000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vreplvei.h vrD, vrJ, imm10_3 is op13_31=0x397be & vrD & vrJ & imm10_3 { vrD = vreplvei.h(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vreplvei.w; #lsx.txt vreplvei.w mask=0x72f7e000 #0x72f7e000 0xfffff000 v0:5,v5:5,u10:2 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_2_s0'] :vreplvei.w vrD, vrJ, imm10_2 is op12_31=0x72f7e & vrD & vrJ & imm10_2 { vrD = vreplvei.w(vrD, vrJ, imm10_2:$(REGSIZE)); } define pcodeop vreplvei.d; #lsx.txt vreplvei.d mask=0x72f7f000 #0x72f7f000 0xfffff800 v0:5,v5:5,u10:1 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_1_s0'] :vreplvei.d vrD, vrJ, imm10_1 is op11_31=0xe5efe & vrD & vrJ & imm10_1 { vrD = vreplvei.d(vrD, vrJ, imm10_1:$(REGSIZE)); } define pcodeop vsllwil.h.b; #lsx.txt vsllwil.h.b mask=0x73082000 #0x73082000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vsllwil.h.b vrD, vrJ, imm10_3 is op13_31=0x39841 & vrD & vrJ & imm10_3 { vrD = vsllwil.h.b(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vsllwil.w.h; #lsx.txt vsllwil.w.h mask=0x73084000 #0x73084000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vsllwil.w.h vrD, vrJ, imm10_4 is op14_31=0x1cc21 & vrD & vrJ & imm10_4 { vrD = vsllwil.w.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vsllwil.d.w; #lsx.txt vsllwil.d.w mask=0x73088000 #0x73088000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsllwil.d.w vrD, vrJ, imm10_5 is op15_31=0xe611 & vrD & vrJ & imm10_5 { vrD = vsllwil.d.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vextl.q.d; #lsx.txt vextl.q.d mask=0x73090000 #0x73090000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vextl.q.d vrD, vrJ is op10_31=0x1cc240 & vrD & vrJ { vrD = vextl.q.d(vrD, vrJ); } define pcodeop vsllwil.hu.bu; #lsx.txt vsllwil.hu.bu mask=0x730c2000 #0x730c2000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vsllwil.hu.bu vrD, vrJ, imm10_3 is op13_31=0x39861 & vrD & vrJ & imm10_3 { vrD = vsllwil.hu.bu(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vsllwil.wu.hu; #lsx.txt vsllwil.wu.hu mask=0x730c4000 #0x730c4000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vsllwil.wu.hu vrD, vrJ, imm10_4 is op14_31=0x1cc31 & vrD & vrJ & imm10_4 { vrD = vsllwil.wu.hu(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vsllwil.du.wu; #lsx.txt vsllwil.du.wu mask=0x730c8000 #0x730c8000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsllwil.du.wu vrD, vrJ, imm10_5 is op15_31=0xe619 & vrD & vrJ & imm10_5 { vrD = vsllwil.du.wu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vextl.qu.du; #lsx.txt vextl.qu.du mask=0x730d0000 #0x730d0000 0xfffffc00 v0:5,v5:5 ['vreg0_5_s0', 'vreg5_5_s0'] :vextl.qu.du vrD, vrJ is op10_31=0x1cc340 & vrD & vrJ { vrD = vextl.qu.du(vrD, vrJ); } define pcodeop vbitclri.b; #lsx.txt vbitclri.b mask=0x73102000 #0x73102000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vbitclri.b vrD, vrJ, imm10_3 is op13_31=0x39881 & vrD & vrJ & imm10_3 { vrD = vbitclri.b(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vbitclri.h; #lsx.txt vbitclri.h mask=0x73104000 #0x73104000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vbitclri.h vrD, vrJ, imm10_4 is op14_31=0x1cc41 & vrD & vrJ & imm10_4 { vrD = vbitclri.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vbitclri.w; #lsx.txt vbitclri.w mask=0x73108000 #0x73108000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vbitclri.w vrD, vrJ, imm10_5 is op15_31=0xe621 & vrD & vrJ & imm10_5 { vrD = vbitclri.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vbitclri.d; #lsx.txt vbitclri.d mask=0x73110000 #0x73110000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vbitclri.d vrD, vrJ, imm10_6 is op16_31=0x7311 & vrD & vrJ & imm10_6 { vrD = vbitclri.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vbitseti.b; #lsx.txt vbitseti.b mask=0x73142000 #0x73142000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vbitseti.b vrD, vrJ, imm10_3 is op13_31=0x398a1 & vrD & vrJ & imm10_3 { vrD = vbitseti.b(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vbitseti.h; #lsx.txt vbitseti.h mask=0x73144000 #0x73144000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vbitseti.h vrD, vrJ, imm10_4 is op14_31=0x1cc51 & vrD & vrJ & imm10_4 { vrD = vbitseti.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vbitseti.w; #lsx.txt vbitseti.w mask=0x73148000 #0x73148000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vbitseti.w vrD, vrJ, imm10_5 is op15_31=0xe629 & vrD & vrJ & imm10_5 { vrD = vbitseti.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vbitseti.d; #lsx.txt vbitseti.d mask=0x73150000 #0x73150000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vbitseti.d vrD, vrJ, imm10_6 is op16_31=0x7315 & vrD & vrJ & imm10_6 { vrD = vbitseti.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vbitrevi.b; #lsx.txt vbitrevi.b mask=0x73182000 #0x73182000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vbitrevi.b vrD, vrJ, imm10_3 is op13_31=0x398c1 & vrD & vrJ & imm10_3 { vrD = vbitrevi.b(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vbitrevi.h; #lsx.txt vbitrevi.h mask=0x73184000 #0x73184000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vbitrevi.h vrD, vrJ, imm10_4 is op14_31=0x1cc61 & vrD & vrJ & imm10_4 { vrD = vbitrevi.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vbitrevi.w; #lsx.txt vbitrevi.w mask=0x73188000 #0x73188000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vbitrevi.w vrD, vrJ, imm10_5 is op15_31=0xe631 & vrD & vrJ & imm10_5 { vrD = vbitrevi.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vbitrevi.d; #lsx.txt vbitrevi.d mask=0x73190000 #0x73190000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vbitrevi.d vrD, vrJ, imm10_6 is op16_31=0x7319 & vrD & vrJ & imm10_6 { vrD = vbitrevi.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vsat.b; #lsx.txt vsat.b mask=0x73242000 #0x73242000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vsat.b vrD, vrJ, imm10_3 is op13_31=0x39921 & vrD & vrJ & imm10_3 { vrD = vsat.b(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vsat.h; #lsx.txt vsat.h mask=0x73244000 #0x73244000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vsat.h vrD, vrJ, imm10_4 is op14_31=0x1cc91 & vrD & vrJ & imm10_4 { vrD = vsat.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vsat.w; #lsx.txt vsat.w mask=0x73248000 #0x73248000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsat.w vrD, vrJ, imm10_5 is op15_31=0xe649 & vrD & vrJ & imm10_5 { vrD = vsat.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsat.d; #lsx.txt vsat.d mask=0x73250000 #0x73250000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vsat.d vrD, vrJ, imm10_6 is op16_31=0x7325 & vrD & vrJ & imm10_6 { vrD = vsat.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vsat.bu; #lsx.txt vsat.bu mask=0x73282000 #0x73282000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vsat.bu vrD, vrJ, imm10_3 is op13_31=0x39941 & vrD & vrJ & imm10_3 { vrD = vsat.bu(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vsat.hu; #lsx.txt vsat.hu mask=0x73284000 #0x73284000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vsat.hu vrD, vrJ, imm10_4 is op14_31=0x1cca1 & vrD & vrJ & imm10_4 { vrD = vsat.hu(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vsat.wu; #lsx.txt vsat.wu mask=0x73288000 #0x73288000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsat.wu vrD, vrJ, imm10_5 is op15_31=0xe651 & vrD & vrJ & imm10_5 { vrD = vsat.wu(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsat.du; #lsx.txt vsat.du mask=0x73290000 #0x73290000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vsat.du vrD, vrJ, imm10_6 is op16_31=0x7329 & vrD & vrJ & imm10_6 { vrD = vsat.du(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vslli.b; #lsx.txt vslli.b mask=0x732c2000 #0x732c2000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vslli.b vrD, vrJ, imm10_3 is op13_31=0x39961 & vrD & vrJ & imm10_3 { vrD = vslli.b(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vslli.h; #lsx.txt vslli.h mask=0x732c4000 #0x732c4000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vslli.h vrD, vrJ, imm10_4 is op14_31=0x1ccb1 & vrD & vrJ & imm10_4 { vrD = vslli.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vslli.w; #lsx.txt vslli.w mask=0x732c8000 #0x732c8000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vslli.w vrD, vrJ, imm10_5 is op15_31=0xe659 & vrD & vrJ & imm10_5 { vrD = vslli.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vslli.d; #lsx.txt vslli.d mask=0x732d0000 #0x732d0000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vslli.d vrD, vrJ, imm10_6 is op16_31=0x732d & vrD & vrJ & imm10_6 { vrD = vslli.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vsrli.b; #lsx.txt vsrli.b mask=0x73302000 #0x73302000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vsrli.b vrD, vrJ, imm10_3 is op13_31=0x39981 & vrD & vrJ & imm10_3 { vrD = vsrli.b(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vsrli.h; #lsx.txt vsrli.h mask=0x73304000 #0x73304000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vsrli.h vrD, vrJ, imm10_4 is op14_31=0x1ccc1 & vrD & vrJ & imm10_4 { vrD = vsrli.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vsrli.w; #lsx.txt vsrli.w mask=0x73308000 #0x73308000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsrli.w vrD, vrJ, imm10_5 is op15_31=0xe661 & vrD & vrJ & imm10_5 { vrD = vsrli.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsrli.d; #lsx.txt vsrli.d mask=0x73310000 #0x73310000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vsrli.d vrD, vrJ, imm10_6 is op16_31=0x7331 & vrD & vrJ & imm10_6 { vrD = vsrli.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vsrai.b; #lsx.txt vsrai.b mask=0x73342000 #0x73342000 0xffffe000 v0:5,v5:5,u10:3 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_3_s0'] :vsrai.b vrD, vrJ, imm10_3 is op13_31=0x399a1 & vrD & vrJ & imm10_3 { vrD = vsrai.b(vrD, vrJ, imm10_3:$(REGSIZE)); } define pcodeop vsrai.h; #lsx.txt vsrai.h mask=0x73344000 #0x73344000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vsrai.h vrD, vrJ, imm10_4 is op14_31=0x1ccd1 & vrD & vrJ & imm10_4 { vrD = vsrai.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vsrai.w; #lsx.txt vsrai.w mask=0x73348000 #0x73348000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsrai.w vrD, vrJ, imm10_5 is op15_31=0xe669 & vrD & vrJ & imm10_5 { vrD = vsrai.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsrai.d; #lsx.txt vsrai.d mask=0x73350000 #0x73350000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vsrai.d vrD, vrJ, imm10_6 is op16_31=0x7335 & vrD & vrJ & imm10_6 { vrD = vsrai.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vsrlni.b.h; #lsx.txt vsrlni.b.h mask=0x73404000 #0x73404000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vsrlni.b.h vrD, vrJ, imm10_4 is op14_31=0x1cd01 & vrD & vrJ & imm10_4 { vrD = vsrlni.b.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vsrlni.h.w; #lsx.txt vsrlni.h.w mask=0x73408000 #0x73408000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsrlni.h.w vrD, vrJ, imm10_5 is op15_31=0xe681 & vrD & vrJ & imm10_5 { vrD = vsrlni.h.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsrlni.w.d; #lsx.txt vsrlni.w.d mask=0x73410000 #0x73410000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vsrlni.w.d vrD, vrJ, imm10_6 is op16_31=0x7341 & vrD & vrJ & imm10_6 { vrD = vsrlni.w.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vsrlni.d.q; #lsx.txt vsrlni.d.q mask=0x73420000 #0x73420000 0xfffe0000 v0:5,v5:5,u10:7 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_7_s0'] :vsrlni.d.q vrD, vrJ, imm10_7 is op17_31=0x39a1 & vrD & vrJ & imm10_7 { vrD = vsrlni.d.q(vrD, vrJ, imm10_7:$(REGSIZE)); } define pcodeop vsrlrni.b.h; #lsx.txt vsrlrni.b.h mask=0x73444000 #0x73444000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vsrlrni.b.h vrD, vrJ, imm10_4 is op14_31=0x1cd11 & vrD & vrJ & imm10_4 { vrD = vsrlrni.b.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vsrlrni.h.w; #lsx.txt vsrlrni.h.w mask=0x73448000 #0x73448000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsrlrni.h.w vrD, vrJ, imm10_5 is op15_31=0xe689 & vrD & vrJ & imm10_5 { vrD = vsrlrni.h.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsrlrni.w.d; #lsx.txt vsrlrni.w.d mask=0x73450000 #0x73450000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vsrlrni.w.d vrD, vrJ, imm10_6 is op16_31=0x7345 & vrD & vrJ & imm10_6 { vrD = vsrlrni.w.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vsrlrni.d.q; #lsx.txt vsrlrni.d.q mask=0x73460000 #0x73460000 0xfffe0000 v0:5,v5:5,u10:7 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_7_s0'] :vsrlrni.d.q vrD, vrJ, imm10_7 is op17_31=0x39a3 & vrD & vrJ & imm10_7 { vrD = vsrlrni.d.q(vrD, vrJ, imm10_7:$(REGSIZE)); } define pcodeop vssrlni.b.h; #lsx.txt vssrlni.b.h mask=0x73484000 #0x73484000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vssrlni.b.h vrD, vrJ, imm10_4 is op14_31=0x1cd21 & vrD & vrJ & imm10_4 { vrD = vssrlni.b.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vssrlni.h.w; #lsx.txt vssrlni.h.w mask=0x73488000 #0x73488000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vssrlni.h.w vrD, vrJ, imm10_5 is op15_31=0xe691 & vrD & vrJ & imm10_5 { vrD = vssrlni.h.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vssrlni.w.d; #lsx.txt vssrlni.w.d mask=0x73490000 #0x73490000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vssrlni.w.d vrD, vrJ, imm10_6 is op16_31=0x7349 & vrD & vrJ & imm10_6 { vrD = vssrlni.w.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vssrlni.d.q; #lsx.txt vssrlni.d.q mask=0x734a0000 #0x734a0000 0xfffe0000 v0:5,v5:5,u10:7 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_7_s0'] :vssrlni.d.q vrD, vrJ, imm10_7 is op17_31=0x39a5 & vrD & vrJ & imm10_7 { vrD = vssrlni.d.q(vrD, vrJ, imm10_7:$(REGSIZE)); } define pcodeop vssrlni.bu.h; #lsx.txt vssrlni.bu.h mask=0x734c4000 #0x734c4000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vssrlni.bu.h vrD, vrJ, imm10_4 is op14_31=0x1cd31 & vrD & vrJ & imm10_4 { vrD = vssrlni.bu.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vssrlni.hu.w; #lsx.txt vssrlni.hu.w mask=0x734c8000 #0x734c8000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vssrlni.hu.w vrD, vrJ, imm10_5 is op15_31=0xe699 & vrD & vrJ & imm10_5 { vrD = vssrlni.hu.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vssrlni.wu.d; #lsx.txt vssrlni.wu.d mask=0x734d0000 #0x734d0000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vssrlni.wu.d vrD, vrJ, imm10_6 is op16_31=0x734d & vrD & vrJ & imm10_6 { vrD = vssrlni.wu.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vssrlni.du.q; #lsx.txt vssrlni.du.q mask=0x734e0000 #0x734e0000 0xfffe0000 v0:5,v5:5,u10:7 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_7_s0'] :vssrlni.du.q vrD, vrJ, imm10_7 is op17_31=0x39a7 & vrD & vrJ & imm10_7 { vrD = vssrlni.du.q(vrD, vrJ, imm10_7:$(REGSIZE)); } define pcodeop vssrlrni.b.h; #lsx.txt vssrlrni.b.h mask=0x73504000 #0x73504000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vssrlrni.b.h vrD, vrJ, imm10_4 is op14_31=0x1cd41 & vrD & vrJ & imm10_4 { vrD = vssrlrni.b.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vssrlrni.h.w; #lsx.txt vssrlrni.h.w mask=0x73508000 #0x73508000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vssrlrni.h.w vrD, vrJ, imm10_5 is op15_31=0xe6a1 & vrD & vrJ & imm10_5 { vrD = vssrlrni.h.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vssrlrni.w.d; #lsx.txt vssrlrni.w.d mask=0x73510000 #0x73510000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vssrlrni.w.d vrD, vrJ, imm10_6 is op16_31=0x7351 & vrD & vrJ & imm10_6 { vrD = vssrlrni.w.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vssrlrni.d.q; #lsx.txt vssrlrni.d.q mask=0x73520000 #0x73520000 0xfffe0000 v0:5,v5:5,u10:7 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_7_s0'] :vssrlrni.d.q vrD, vrJ, imm10_7 is op17_31=0x39a9 & vrD & vrJ & imm10_7 { vrD = vssrlrni.d.q(vrD, vrJ, imm10_7:$(REGSIZE)); } define pcodeop vssrlrni.bu.h; #lsx.txt vssrlrni.bu.h mask=0x73544000 #0x73544000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vssrlrni.bu.h vrD, vrJ, imm10_4 is op14_31=0x1cd51 & vrD & vrJ & imm10_4 { vrD = vssrlrni.bu.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vssrlrni.hu.w; #lsx.txt vssrlrni.hu.w mask=0x73548000 #0x73548000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vssrlrni.hu.w vrD, vrJ, imm10_5 is op15_31=0xe6a9 & vrD & vrJ & imm10_5 { vrD = vssrlrni.hu.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vssrlrni.wu.d; #lsx.txt vssrlrni.wu.d mask=0x73550000 #0x73550000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vssrlrni.wu.d vrD, vrJ, imm10_6 is op16_31=0x7355 & vrD & vrJ & imm10_6 { vrD = vssrlrni.wu.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vssrlrni.du.q; #lsx.txt vssrlrni.du.q mask=0x73560000 #0x73560000 0xfffe0000 v0:5,v5:5,u10:7 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_7_s0'] :vssrlrni.du.q vrD, vrJ, imm10_7 is op17_31=0x39ab & vrD & vrJ & imm10_7 { vrD = vssrlrni.du.q(vrD, vrJ, imm10_7:$(REGSIZE)); } define pcodeop vsrani.b.h; #lsx.txt vsrani.b.h mask=0x73584000 #0x73584000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vsrani.b.h vrD, vrJ, imm10_4 is op14_31=0x1cd61 & vrD & vrJ & imm10_4 { vrD = vsrani.b.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vsrani.h.w; #lsx.txt vsrani.h.w mask=0x73588000 #0x73588000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsrani.h.w vrD, vrJ, imm10_5 is op15_31=0xe6b1 & vrD & vrJ & imm10_5 { vrD = vsrani.h.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsrani.w.d; #lsx.txt vsrani.w.d mask=0x73590000 #0x73590000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vsrani.w.d vrD, vrJ, imm10_6 is op16_31=0x7359 & vrD & vrJ & imm10_6 { vrD = vsrani.w.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vsrani.d.q; #lsx.txt vsrani.d.q mask=0x735a0000 #0x735a0000 0xfffe0000 v0:5,v5:5,u10:7 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_7_s0'] :vsrani.d.q vrD, vrJ, imm10_7 is op17_31=0x39ad & vrD & vrJ & imm10_7 { vrD = vsrani.d.q(vrD, vrJ, imm10_7:$(REGSIZE)); } define pcodeop vsrarni.b.h; #lsx.txt vsrarni.b.h mask=0x735c4000 #0x735c4000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vsrarni.b.h vrD, vrJ, imm10_4 is op14_31=0x1cd71 & vrD & vrJ & imm10_4 { vrD = vsrarni.b.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vsrarni.h.w; #lsx.txt vsrarni.h.w mask=0x735c8000 #0x735c8000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vsrarni.h.w vrD, vrJ, imm10_5 is op15_31=0xe6b9 & vrD & vrJ & imm10_5 { vrD = vsrarni.h.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vsrarni.w.d; #lsx.txt vsrarni.w.d mask=0x735d0000 #0x735d0000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vsrarni.w.d vrD, vrJ, imm10_6 is op16_31=0x735d & vrD & vrJ & imm10_6 { vrD = vsrarni.w.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vsrarni.d.q; #lsx.txt vsrarni.d.q mask=0x735e0000 #0x735e0000 0xfffe0000 v0:5,v5:5,u10:7 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_7_s0'] :vsrarni.d.q vrD, vrJ, imm10_7 is op17_31=0x39af & vrD & vrJ & imm10_7 { vrD = vsrarni.d.q(vrD, vrJ, imm10_7:$(REGSIZE)); } define pcodeop vssrani.b.h; #lsx.txt vssrani.b.h mask=0x73604000 #0x73604000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vssrani.b.h vrD, vrJ, imm10_4 is op14_31=0x1cd81 & vrD & vrJ & imm10_4 { vrD = vssrani.b.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vssrani.h.w; #lsx.txt vssrani.h.w mask=0x73608000 #0x73608000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vssrani.h.w vrD, vrJ, imm10_5 is op15_31=0xe6c1 & vrD & vrJ & imm10_5 { vrD = vssrani.h.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vssrani.w.d; #lsx.txt vssrani.w.d mask=0x73610000 #0x73610000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vssrani.w.d vrD, vrJ, imm10_6 is op16_31=0x7361 & vrD & vrJ & imm10_6 { vrD = vssrani.w.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vssrani.d.q; #lsx.txt vssrani.d.q mask=0x73620000 #0x73620000 0xfffe0000 v0:5,v5:5,u10:7 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_7_s0'] :vssrani.d.q vrD, vrJ, imm10_7 is op17_31=0x39b1 & vrD & vrJ & imm10_7 { vrD = vssrani.d.q(vrD, vrJ, imm10_7:$(REGSIZE)); } define pcodeop vssrani.bu.h; #lsx.txt vssrani.bu.h mask=0x73644000 #0x73644000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vssrani.bu.h vrD, vrJ, imm10_4 is op14_31=0x1cd91 & vrD & vrJ & imm10_4 { vrD = vssrani.bu.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vssrani.hu.w; #lsx.txt vssrani.hu.w mask=0x73648000 #0x73648000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vssrani.hu.w vrD, vrJ, imm10_5 is op15_31=0xe6c9 & vrD & vrJ & imm10_5 { vrD = vssrani.hu.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vssrani.wu.d; #lsx.txt vssrani.wu.d mask=0x73650000 #0x73650000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vssrani.wu.d vrD, vrJ, imm10_6 is op16_31=0x7365 & vrD & vrJ & imm10_6 { vrD = vssrani.wu.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vssrani.du.q; #lsx.txt vssrani.du.q mask=0x73660000 #0x73660000 0xfffe0000 v0:5,v5:5,u10:7 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_7_s0'] :vssrani.du.q vrD, vrJ, imm10_7 is op17_31=0x39b3 & vrD & vrJ & imm10_7 { vrD = vssrani.du.q(vrD, vrJ, imm10_7:$(REGSIZE)); } define pcodeop vssrarni.b.h; #lsx.txt vssrarni.b.h mask=0x73684000 #0x73684000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vssrarni.b.h vrD, vrJ, imm10_4 is op14_31=0x1cda1 & vrD & vrJ & imm10_4 { vrD = vssrarni.b.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vssrarni.h.w; #lsx.txt vssrarni.h.w mask=0x73688000 #0x73688000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vssrarni.h.w vrD, vrJ, imm10_5 is op15_31=0xe6d1 & vrD & vrJ & imm10_5 { vrD = vssrarni.h.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vssrarni.w.d; #lsx.txt vssrarni.w.d mask=0x73690000 #0x73690000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vssrarni.w.d vrD, vrJ, imm10_6 is op16_31=0x7369 & vrD & vrJ & imm10_6 { vrD = vssrarni.w.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vssrarni.d.q; #lsx.txt vssrarni.d.q mask=0x736a0000 #0x736a0000 0xfffe0000 v0:5,v5:5,u10:7 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_7_s0'] :vssrarni.d.q vrD, vrJ, imm10_7 is op17_31=0x39b5 & vrD & vrJ & imm10_7 { vrD = vssrarni.d.q(vrD, vrJ, imm10_7:$(REGSIZE)); } define pcodeop vssrarni.bu.h; #lsx.txt vssrarni.bu.h mask=0x736c4000 #0x736c4000 0xffffc000 v0:5,v5:5,u10:4 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_4_s0'] :vssrarni.bu.h vrD, vrJ, imm10_4 is op14_31=0x1cdb1 & vrD & vrJ & imm10_4 { vrD = vssrarni.bu.h(vrD, vrJ, imm10_4:$(REGSIZE)); } define pcodeop vssrarni.hu.w; #lsx.txt vssrarni.hu.w mask=0x736c8000 #0x736c8000 0xffff8000 v0:5,v5:5,u10:5 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_5_s0'] :vssrarni.hu.w vrD, vrJ, imm10_5 is op15_31=0xe6d9 & vrD & vrJ & imm10_5 { vrD = vssrarni.hu.w(vrD, vrJ, imm10_5:$(REGSIZE)); } define pcodeop vssrarni.wu.d; #lsx.txt vssrarni.wu.d mask=0x736d0000 #0x736d0000 0xffff0000 v0:5,v5:5,u10:6 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_6_s0'] :vssrarni.wu.d vrD, vrJ, imm10_6 is op16_31=0x736d & vrD & vrJ & imm10_6 { vrD = vssrarni.wu.d(vrD, vrJ, imm10_6:$(REGSIZE)); } define pcodeop vssrarni.du.q; #lsx.txt vssrarni.du.q mask=0x736e0000 #0x736e0000 0xfffe0000 v0:5,v5:5,u10:7 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_7_s0'] :vssrarni.du.q vrD, vrJ, imm10_7 is op17_31=0x39b7 & vrD & vrJ & imm10_7 { vrD = vssrarni.du.q(vrD, vrJ, imm10_7:$(REGSIZE)); } define pcodeop vextrins.d; #lsx.txt vextrins.d mask=0x73800000 #0x73800000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vextrins.d vrD, vrJ, imm10_8 is op18_31=0x1ce0 & vrD & vrJ & imm10_8 { vrD = vextrins.d(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vextrins.w; #lsx.txt vextrins.w mask=0x73840000 #0x73840000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vextrins.w vrD, vrJ, imm10_8 is op18_31=0x1ce1 & vrD & vrJ & imm10_8 { vrD = vextrins.w(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vextrins.h; #lsx.txt vextrins.h mask=0x73880000 #0x73880000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vextrins.h vrD, vrJ, imm10_8 is op18_31=0x1ce2 & vrD & vrJ & imm10_8 { vrD = vextrins.h(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vextrins.b; #lsx.txt vextrins.b mask=0x738c0000 #0x738c0000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vextrins.b vrD, vrJ, imm10_8 is op18_31=0x1ce3 & vrD & vrJ & imm10_8 { vrD = vextrins.b(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vshuf4i.b; #lsx.txt vshuf4i.b mask=0x73900000 #0x73900000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vshuf4i.b vrD, vrJ, imm10_8 is op18_31=0x1ce4 & vrD & vrJ & imm10_8 { vrD = vshuf4i.b(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vshuf4i.h; #lsx.txt vshuf4i.h mask=0x73940000 #0x73940000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vshuf4i.h vrD, vrJ, imm10_8 is op18_31=0x1ce5 & vrD & vrJ & imm10_8 { vrD = vshuf4i.h(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vshuf4i.w; #lsx.txt vshuf4i.w mask=0x73980000 #0x73980000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vshuf4i.w vrD, vrJ, imm10_8 is op18_31=0x1ce6 & vrD & vrJ & imm10_8 { vrD = vshuf4i.w(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vshuf4i.d; #lsx.txt vshuf4i.d mask=0x739c0000 #0x739c0000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vshuf4i.d vrD, vrJ, imm10_8 is op18_31=0x1ce7 & vrD & vrJ & imm10_8 { vrD = vshuf4i.d(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vbitseli.b; #lsx.txt vbitseli.b mask=0x73c40000 #0x73c40000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vbitseli.b vrD, vrJ, imm10_8 is op18_31=0x1cf1 & vrD & vrJ & imm10_8 { vrD = vbitseli.b(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vandi.b; #lsx.txt vandi.b mask=0x73d00000 #0x73d00000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vandi.b vrD, vrJ, imm10_8 is op18_31=0x1cf4 & vrD & vrJ & imm10_8 { vrD = vandi.b(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vori.b; #lsx.txt vori.b mask=0x73d40000 #0x73d40000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vori.b vrD, vrJ, imm10_8 is op18_31=0x1cf5 & vrD & vrJ & imm10_8 { vrD = vori.b(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vxori.b; #lsx.txt vxori.b mask=0x73d80000 #0x73d80000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vxori.b vrD, vrJ, imm10_8 is op18_31=0x1cf6 & vrD & vrJ & imm10_8 { vrD = vxori.b(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vnori.b; #lsx.txt vnori.b mask=0x73dc0000 #0x73dc0000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vnori.b vrD, vrJ, imm10_8 is op18_31=0x1cf7 & vrD & vrJ & imm10_8 { vrD = vnori.b(vrD, vrJ, imm10_8:$(REGSIZE)); } define pcodeop vldi; #lsx.txt vldi mask=0x73e00000 #0x73e00000 0xfffc0000 v0:5, s5:13 ['vreg0_5_s0', 'simm5_13_s0'] :vldi vrD, simm5_13 is op18_31=0x1cf8 & vrD & simm5_13 { vrD = vldi(vrD, simm5_13:$(REGSIZE)); } define pcodeop vpermi.w; #lsx.txt vpermi.w mask=0x73e40000 #0x73e40000 0xfffc0000 v0:5,v5:5,u10:8 ['vreg0_5_s0', 'vreg5_5_s0', 'imm10_8_s0'] :vpermi.w vrD, vrJ, imm10_8 is op18_31=0x1cf9 & vrD & vrJ & imm10_8 { vrD = vpermi.w(vrD, vrJ, imm10_8:$(REGSIZE)); } ================================================ FILE: pypcode/processors/Loongarch/data/languages/lvz.sinc ================================================ define pcodeop gcsrxchg; #lvz.txt gcsrxchg mask=0x05000000 [@lvz] #0x05000000 0xff000000 r0:5, r5:5,u10:14 ['reg0_5_s0', 'reg5_5_s0', 'imm10_14_s0'] :gcsrxchg RD, RJsrc,imm10_14 is op24_31=0x5 & RD & RJsrc & imm10_14 { RD = gcsrxchg(RD, RJsrc, imm10_14:$(REGSIZE)); } define pcodeop gtlbclr; #lvz.txt gtlbclr mask=0x06482001 [@lvz] :gtlbclr is instword=0x06482001 { gtlbclr(); } define pcodeop gtlbflush; #lvz.txt gtlbflush mask=0x06482401 [@lvz] #0x06482401 0xffffffff [''] :gtlbflush is instword=0x06482401 { gtlbflush(); } define pcodeop gtlbsrch; #lvz.txt gtlbsrch mask=0x06482801 [@lvz] :gtlbsrch is instword=0x06482801 { gtlbsrch(); } define pcodeop gtlbrd; #lvz.txt gtlbrd mask=0x06482c01 [@lvz] :gtlbrd is instword=0x06482c01 { gtlbrd(); } define pcodeop gtlbwr; #lvz.txt gtlbwr mask=0x06483001 [@lvz] :gtlbwr is instword=0x06483001 { gtlbwr(); } define pcodeop gtlbfill; #lvz.txt gtlbfill mask=0x06483401 [@lvz] :gtlbfill is instword=0x06483401 { gtlbfill(); } define pcodeop hvcl; #lvz.txt hypcall mask=0x002b8000 [@lvz, @orig_name=hvcl] #0x002b8000 0xffff8000 u0:15 ['imm0_15_s0'] :hvcl imm0_15 is op15_31=0x57 & imm0_15 { hvcl(imm0_15:$(REGSIZE)); } ================================================ FILE: pypcode/processors/Loongarch/data/manuals/loongarch.idx ================================================ @LoongArch-Vol1-EN.pdf[LoongArch Reference Manual - Volume 1: Basic Architecture] add, 23 sub, 23 addi, 24 addu16id, 24 alsl, 25 lu12i.w, 26 lu32i.d, 26 lu52i.d, 26 slt, 26 sltu, 26 slti, 27 sltui, 27 pcaddi, 28 pcaddu12i, 28 pcaddu18i, 28 pcalau12i, 28 and, 29 or, 29 nor, 29 xor, 29 andn, 29 orn, 29 andi, 30 ori, 30 xori, 30 mul, 31 mulh, 31 mulw, 32 div, 32 mod, 32 sll.w, 34 srl.w, 34 sra.w, 34 rotr.w, 34 slli.w, 35 srli.w, 35 srai.w, 35 rotri.w, 35 sll.d, 36 srl.d, 36 sra.d, 36 rotr.d, 36 slli.d, 37 srli.d, 37 srai.d, 37 rotri.d, 37 ext.w.b, 38 ext.w.h, 38 clo, 38 clz, 38 cto, 38 ctz, 38 bytepick, 40 revb, 40 revh, 41 bitrev.4b, 42 bitrev.8b, 42 bitrev.w, 43 bitrev.d, 43 bstrins, 43 bstrpick, 44 maskeqz, 44 masknez, 44 beq, 45 bne, 45 blt, 45 bge, 45 bltu, 45 bgeu, 45 beqz, 46 bnez, 46 b, 47 bl, 47 jirl, 48 ld, 49 st, 49 ldx, 51 stx, 51 ldptr, 53 stptr, 53 preld, 54 preldx, 55 lgdt, 56 ldle, 56 stgt, 56 stle, 56 amswap, 60 amadd, 60 amand, 60 amor, 60 amxor, 60 ammax, 60 ammin, 60 ll, 62 sc, 62 dbar, 62 ibar, 63 crc, 63 crcc, 63 syscall, 64 break, 64 asrtle, 65 asrtgt, 65 rdtime, 65 rdtimel, 65 rdtimeh, 65 cpucfg, 65 fadd, 78 fsub, 78 fmul, 78 fmadd, 80 fmsub, 80 fnmadd, 80 fnmsub, 80 fmax, 81 fmin, 81 fmaxa, 82 fmina, 82 fabs, 83 fneg, 83 fsqrt, 83 frecip, 83 frsqrt, 83 fscaleb, 85 flogb, 85 fcopysign, 85 fclass, 86 fcmp, 86 fcvt, 88 ffint, 88 ftint, 88 ftintrm, 90 ftintrp, 90 ftintrz, 90 ftintrne, 90 frint, 92 fmov, 93 fsel, 94 movgr2fr, 94 movgr2frh, 94 movfr2gr, 95 movfrh2gr, 95 movgr2fcsr, 95 movfcsr2gr, 95 movfr2cf, 96 movcf2fr, 96 movgr2cf, 96 movcf2gr, 96 bceqz, 97 bcnez, 97 fld, 97 fst, 97 fldx, 98 fstx, 98 fldgt, 100 fldle, 100 fstgt, 100 fstle, 100 csrrd, 103 csrwr, 103 csrxchg, 103 iocsrrd, 104 iocsrwr, 104 cacop, 104 tlbsrch, 105 tlbrd, 106 tlbwr, 106 tlbfill, 106 tlbclr, 107 tlbflush, 107 invtlb, 107 lddir, 108 ldpte, 108 ertn, 109 dbcl, 110 idle, 110 ================================================ FILE: pypcode/processors/Loongarch/data/patterns/loongarch_patterns.xml ================================================ 0x20 0x00 0x00 0x4c 0x63 ......00 111..... 0x02 01100001 .....000 11...... 0x29 11111111 ......11 ........ 01010011 0x80 0x01 0x00 0x4c 0x63 ......00 111..... 0x02 0x20 0x00 0x00 0x4c 0x63 ......00 111..... 0x02 ================================================ FILE: pypcode/processors/Loongarch/data/patterns/patternconstraints.xml ================================================ loongarch_patterns.xml ================================================ FILE: pypcode/processors/M16C/data/languages/M16C_60.cspec ================================================ ================================================ FILE: pypcode/processors/M16C/data/languages/M16C_60.ldefs ================================================ Renesas M16C/60 16-Bit MicroComputer ================================================ FILE: pypcode/processors/M16C/data/languages/M16C_60.pspec ================================================ ================================================ FILE: pypcode/processors/M16C/data/languages/M16C_60.slaspec ================================================ # Renesas M16C/60 16-Bit MicroComputer # # Memory Architecture # define endian=little; define alignment=1; define space RAM type=ram_space size=3 default; define space register type=register_space size=2; # # General Registers # define register offset=0x0000 size=2 [ R1 R3 R0 R2 A0 A1 ]; define register offset=0x0000 size=1 [ R1L R1H _ _ R0L R0H ]; define register offset=0x0000 size=4 [ R3R1 R2R0 A1A0 ]; define register offset=0x1000 size=3 [ PC # Program Counter ]; define register offset=0x2000 size=3 [ INTB # Interrupt Table Register ]; define register offset=0x2000 size=2 [ INTBL INTBH ]; define register offset=0x3000 size=2 [ SP # Current Stack Pointer (Represents active stack pointer: ISP or USP) FB # Frame Base Register SB # Static Base Register FLG # Flag Register ISP # Interrupt Stack Pointer ]; # Flag Register Contents (FLG) # # b15 - Reserved area # b14:b12 - Processor interrupt priority level # b11:b8 - Reserved area # b7 - (U) Stack pointer select flag # b6 - (I) Interrupt enable flag # b5 - (O) Overflow flag # b4 - (B) Register bank select flag # b3 - (S) Sign flag # b2 - (Z) Zero flag # b1 - (D) Debug flag # b0 - (C) Carry flag @define CARRY "FLG[0,1]" @define DEBUG "FLG[1,1]" @define ZERO "FLG[2,1]" @define SIGN "FLG[3,1]" @define REG_BANK "FLG[4,1]" @define OVERFLOW "FLG[5,1]" @define INTERRUPT "FLG[6,1]" @define STACK_SEL "FLG[7,1]" @define IPL "FLG[12,3]" # Define context bits define register offset=0xA000 size=4 contextreg; define context contextreg dstFollowsSrc = (0,1) # =1 destination add-on data follows 4-bit encoded source add-on data # =2 destination add-on data follows 8-bit data ; define token b0(8) b0_0007 = (0,7) ; define token b1(8) b1_d2 = (0,1) b1_d3 = (0,2) b1_d3_2 = (2,2) b1_2_reg8 = (2,2) b1_2_regAx = (2,2) b1_3_regAx = (3,3) b1_3_reg8 = (3,3) b1_size_0 = (0,0) b1_0407 = (4,7) b1_0307 = (3,7) b1_0107 = (1,7) b1_0007 = (0,7) b1_0002 = (0,2) b1_bit = (0,2) ; define token b2(8) b2_d4_reg8 = (0,1) b2_s4_reg8 = (4,5) b2_d4_reg16 = (0,1) b2_s4_reg16 = (4,5) b2_d4_reg32 = (0,0) b2_s4_reg32 = (4,4) b2_reg32 = (4,4) b2_d4_regAxSF = (0,1) # selects A0, A1, SB or FB b2_s4_regAxSF = (4,5) # selects A0, A1, SB or FB b2_d4_regAx = (0,0) b2_s4_regAx = (4,4) b2_reg16 = (4,6) b2_creg16 = (4,6) b2_d4 = (0,3) b2_d4_3 = (3,3) b2_d4_23 = (2,3) b2_d4_13 = (1,3) b2_s4 = (4,7) b2_s4_23 = (6,7) b2_s4_13 = (5,7) b2_shiftSign_7 = (7,7) b2_shiftSign_3 = (3,3) b2_0707 = (7,7) b2_0607 = (6,7) b2_0507 = (5,7) b2_0407 = (4,7) b2_0406 = (4,6) b2_0307 = (3,7) b2_0303 = (3,3) b2_0007 = (0,7) b2_0003 = (0,3) b2_0002 = (0,2) b2_simm4_0407 = (4,7) signed b2_simm4_0003 = (0,3) signed ; define token b3(8) b3_0407 = (4,7) b3_0007 = (0,7) b3_0003 = (0,3) ; define token b4(8) b4_0007 = (0,7) ; define token b5(8) b5_0007 = (0,7) ; define token b6(8) b6_0007 = (0,7) ; define token imm8(8) simm8_dat = (0,7) signed imm8_dat = (0,7) imm8_base = (3,7) # bit,base byte displacement imm8_bit = (0,2) # bit,base bit number simm8_base = (3,7) signed # bit,base signed byte displacement simm8_bit = (0,2) # bit,base signed bit number imm6_dat = (0,5) # int number cnd8_dat = (0,7) imm8_0607 = (6,7) imm8_0407 = (4,7) imm8_0003 = (0,3) regBit7 = (7,7) regBit6 = (6,6) regBit5 = (5,5) regBit4 = (4,4) regBit3 = (3,3) regBit2 = (2,2) regBit1 = (1,1) regBit0 = (0,0) ; define token imm16(16) simm16_dat = (0,15) signed imm16_dat = (0,15) imm16_base = (3,15) # bit,base byte displacement imm16_bit = (0, 2) # bit,base bit number ; define token imm24(24) simm24_dat = (0,23) signed imm24_dat = (0,23) simm20_dat = (0,19) imm20_dat = (0,19) ; define token imm32(32) simm32_dat = (0,31) signed imm32_dat = (0,31) ; attach variables [ b2_s4_reg16 b2_d4_reg16 ] [ R0 R1 R2 R3 ]; attach variables [ b2_s4_reg8 b2_d4_reg8 ] [ R0L R0H R1L R1H ]; attach variables [ b1_2_reg8 b1_3_reg8 ] [ R0L R0H ]; attach variables [ b2_s4_regAx b2_d4_regAx b1_3_regAx b1_2_regAx ] [ A0 A1 ]; attach variables [ b2_s4_regAxSF b2_d4_regAxSF ] [ A0 A1 SB FB ]; attach variables [ b2_reg16 ] [ R0 R1 R2 R3 A0 A1 _ _ ]; attach variables [ b2_creg16 ] [ _ INTBL INTBH FLG ISP SP SB FB ]; attach variables [ b2_reg32 b2_d4_reg32 ] [ R2R0 R3R1 ]; # # PCode Op # define pcodeop Break; # BRK define pcodeop DecimalAdd; # DADD define pcodeop DecimalAddWithCarry; # DADC define pcodeop DecimalSubtractWithBorrow; # DSBB define pcodeop DecimalSubtract; # DSUB define pcodeop Wait; # WAIT # # FLAG MACROS... # # Set zero and sign flags from result macro setResultFlags(result) { $(SIGN) = (result s< 0x0); $(ZERO) = (result == 0x0); } # Set carry and overflow flags for addition macro setAdd3Flags(v1, v2, v3) { local add13 = v1 + v3; $(CARRY) = carry(v1,v3) || carry(v2,add13); $(OVERFLOW) = scarry(v1,v3) || scarry(v2,add13); } # Set carry and overflow flags for addition macro setAddFlags(v1, v2) { $(CARRY) = carry(v1, v2); $(OVERFLOW) = scarry(v1, v2); } # Set overflow flags for subtraction of op3,op2 from op1 (op1-op2-op3) macro setSubtract3Flags(v1, v2, v3) { local add12 = v1 - v2; $(CARRY) = (v1 >= v2) || (add12 >= v3); $(OVERFLOW) = sborrow(v1, v2) || sborrow(add12, v3); } # Set overflow flags for subtraction of op2 from op1 (op1-op2) macro setSubtractFlags(v1, v2) { $(CARRY) = (v1 s>= v2); $(OVERFLOW) = sborrow(v1, v2); } macro push1(val) { SP = SP - 1; ptr:3 = zext(SP); *:1 ptr = val; } macro push2(val) { SP = SP - 2; ptr:3 = zext(SP); *:2 ptr = val; } macro push3(val) { SP = SP - 3; ptr:3 = zext(SP); *:3 ptr = val; } macro push4(val) { SP = SP - 4; ptr:3 = zext(SP); *:4 ptr = val; } macro pop1(val) { ptr:3 = zext(SP); val = *:1 ptr; SP = SP + 1; } macro pop2(val) { ptr:3 = zext(SP); val = *:2 ptr; SP = SP + 2; } macro pop3(val) { ptr:3 = zext(SP); val = *:3 ptr; SP = SP + 3; } macro pop4(val) { ptr:3 = zext(SP); val = *:4 ptr; SP = SP + 4; } # # Source operand location data # # Obtain base offset displacement for [AX | SB | FB] - AX and SB uses unsigned displacements, FB uses signed displacement src4dsp8: imm8_dat^":8" is b1_0007; b2_s4; imm8_dat { export *[const]:2 imm8_dat; } src4dsp8: simm8_dat^":8" is b1_0007; b2_s4=0xb; simm8_dat { export *[const]:2 simm8_dat; } src4dsp16: imm16_dat^":16" is b1_0007; b2_s4; imm16_dat { export *[const]:2 imm16_dat; } # src4... Handle 4-bit encoded Source specified by b2_s4(4-bits) # Variable length pattern starting at instruction byte b1 # associated src4 add-on data immediately follows instruction byte b2 # abs16 cases are broken out differently to facilitate export of constant addresses in certain cases # 1-Byte source value/location specified by 4-bit encoding (b2_d4) src4B: b2_s4_reg8 is b1_0007; b2_s4_23=0x0 & b2_s4_reg8 { export b2_s4_reg8; } # Rx src4B: b2_s4_regAx is b1_0007; b2_s4_13=0x2 & b2_s4_regAx { tmp:1 = b2_s4_regAx:1; export tmp; } # Ax src4B: [b2_s4_regAx] is b1_0007; b2_s4_13=0x3 & b2_s4_regAx { ptr:3 = zext(b2_s4_regAx); export *:1 ptr; } # [Ax] src4B: src4dsp8^[b2_s4_regAxSF] is (b1_0007; b2_s4_23=0x2 & b2_s4_regAxSF) ... & src4dsp8 { ptr:3 = zext(b2_s4_regAxSF + src4dsp8); export *:1 ptr; } # dsp:8[Ax|SB|FB] src4B: src4dsp16^[b2_s4_regAxSF] is (b1_0007; b2_s4_23=0x3 & b2_s4_regAxSF) ... & src4dsp16 { ptr:3 = zext(b2_s4_regAxSF + src4dsp16); export *:1 ptr; } # dsp:16[Ax|SB|FB] src4B: imm16_dat is b1_0007; b2_s4=0xf; imm16_dat { export *:1 imm16_dat; } # abs16 (special constant address case) # 2-Byte source value/location specified by 2-bit encoding (b2_d4) src4W: b2_s4_reg16 is b1_0007; b2_s4_23=0x0 & b2_s4_reg16 { export b2_s4_reg16; } # Rx src4W: b2_s4_regAx is b1_0007; b2_s4_13=0x2 & b2_s4_regAx { export b2_s4_regAx; } # Ax src4W: [b2_s4_regAx] is b1_0007; b2_s4_13=0x3 & b2_s4_regAx { ptr:3 = zext(b2_s4_regAx); export *:2 ptr; } # [Ax] src4W: src4dsp8^[b2_s4_regAxSF] is (b1_0007; b2_s4_23=0x2 & b2_s4_regAxSF) ... & src4dsp8 { ptr:3 = zext(b2_s4_regAxSF + src4dsp8); export *:2 ptr; } # dsp:8[Ax|SB|FB] src4W: src4dsp16^[b2_s4_regAxSF] is (b1_0007; b2_s4_23=0x3 & b2_s4_regAxSF) ... & src4dsp16 { ptr:3 = zext(b2_s4_regAxSF + src4dsp16); export *:2 ptr; } # dsp:16[Ax|SB|FB] src4W: imm16_dat is b1_0007; b2_s4=0xf; imm16_dat { export *:2 imm16_dat; } # abs16 (special constant address case) # # Destination operand location data (may also be used as a source in certain cases) # # Skip instruction and source add-on bytes which occur before destination add-on bytes # Starting position is at b1 skipBytesBeforeDst4: is b1_0007; b2_s4 { } skipBytesBeforeDst4: is dstFollowsSrc=1 & b1_0007; b2_s4_23=0x2; imm8_dat { } # src4: dsp8 skipBytesBeforeDst4: is dstFollowsSrc=1 & b1_0007; b2_s4_23=0x3; imm16_dat { } # src4: dsp16/abs16 skipBytesBeforeDst4: is dstFollowsSrc=2 & b1_0007; b2_d4; imm8_dat { } # dsp8 # Obtain base offset displacement for [AX | SB | FB] - AX and SB uses unsigned displacements, FB uses signed displacement dst4dsp8: imm8_dat^":8" is (skipBytesBeforeDst4; imm8_dat) { export *[const]:2 imm8_dat; } dst4dsp8: simm8_dat^":8" is (b1_0007; b2_d4=0xb) ... & (skipBytesBeforeDst4; simm8_dat) { export *[const]:2 simm8_dat; } dst4dsp16: imm16_dat^":16" is (skipBytesBeforeDst4; imm16_dat) { export *[const]:2 imm16_dat; } # dst4... Handle 4-bit encoded Destination specified by b2_d4(4-bits) # Ax direct case is read-only! Instruction must use dst4Ax for write/update case # Variable length pattern starting at instruction byte b1 # abs16 cases are broken out differently to facilitate export of constant addresses in certain cases # 1-Byte destination value/location specified by 4-bit encoding (b2_d4) dst4B: b2_d4_reg8 is b1_0007; b2_d4_23=0x0 & b2_d4_reg8 { export b2_d4_reg8; } # Rx dst4B: b2_d4_regAx is b1_0007; b2_d4_13=0x2 & b2_d4_regAx { tmp:1 = b2_d4_regAx:1; export tmp; } # Ax - read-only use ! dst4B: [b2_d4_regAx] is b1_0007; b2_d4_13=0x3 & b2_d4_regAx { ptr:3 = zext(b2_d4_regAx); export *:1 ptr; } # [Ax] dst4B: dst4dsp8^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x2 & b2_d4_regAxSF) ... & dst4dsp8 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp8); export *:1 ptr; } # dsp:8[Ax|SB|FB] dst4B: dst4dsp16^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x3 & b2_d4_regAxSF) ... & dst4dsp16 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp16); export *:1 ptr; } # dsp:16[Ax|SB] dst4B: imm16_dat is (b1_0007; b2_d4=0xf) ... & (skipBytesBeforeDst4; imm16_dat) { export *:1 imm16_dat; } # abs16 (special constant address case) # 2-Byte destination value/location specified by 4-bit encoding (b2_d4) dst4W: b2_d4_reg16 is b1_0007; b2_d4_23=0x0 & b2_d4_reg16 { export b2_d4_reg16; } # Rx dst4W: b2_d4_regAx is b1_0007; b2_d4_13=0x2 & b2_d4_regAx { export b2_d4_regAx; } # Ax dst4W: [b2_d4_regAx] is b1_0007; b2_d4_13=0x3 & b2_d4_regAx { ptr:3 = zext(b2_d4_regAx); export *:2 ptr; } # [Ax] dst4W: dst4dsp8^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x2 & b2_d4_regAxSF) ... & dst4dsp8 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp8); export *:2 ptr; } # dsp:8[Ax|SB|FB] dst4W: dst4dsp16^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x3 & b2_d4_regAxSF) ... & dst4dsp16 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp16); export *:2 ptr; } # dsp:16[Ax|SB] dst4W: imm16_dat is (b1_0007; b2_d4=0xf) ... & (skipBytesBeforeDst4; imm16_dat) { export *:2 imm16_dat; } # abs16 (special constant address case) # 4-Byte destination value/location specified by 4-bit encoding (b2_d4) dst4L: b2_d4_reg32 is b1_0007; b2_d4_13=0x0 & b2_d4_reg32 { export b2_d4_reg32; } # Rx dst4L: A1A0 is A1A0 & b1_0007; b2_d4=0x4 { export A1A0; } # A1A0 dst4L: [b2_d4_regAx] is b1_0007; b2_d4_13=0x3 & b2_d4_regAx { ptr:3 = zext(b2_d4_regAx); export *:4 ptr; } # [Ax] dst4L: dst4dsp8^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x2 & b2_d4_regAxSF) ... & dst4dsp8 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp8); export *:4 ptr; } # dsp:8[Ax|SB|FB] dst4L: dst4dsp16^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x3 & b2_d4_regAxSF) ... & dst4dsp16 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp16); export *:4 ptr; } # dsp:16[Ax|SB] dst4L: imm16_dat is (b1_0007; b2_d4=0xf) ... & (skipBytesBeforeDst4; imm16_dat) { export *:4 imm16_dat; } # abs16 (special constant address case) # 3-Byte destination value/location specified by 4-bit encoding (b2_d4) - use DST4L to constrain, and dst4L for register Ax/Rx non-memory cases dst4T: [b2_d4_regAx] is b1_0007; b2_d4_13=0x3 & b2_d4_regAx { ptr:3 = zext(b2_d4_regAx); export *:3 ptr; } # [Ax] dst4T: dst4dsp8^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x2 & b2_d4_regAxSF) ... & dst4dsp8 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp8); export *:3 ptr; } # dsp:8[Ax|SB|FB] dst4T: dst4dsp16^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x3 & b2_d4_regAxSF) ... & dst4dsp16 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp16); export *:3 ptr; } # dsp:16[Ax|SB] dst4T: imm16_dat is (b1_0007; b2_d4=0xf) ... & (skipBytesBeforeDst4; imm16_dat) { export *:3 imm16_dat; } # abs16 (special constant address case) # 3-Byte effective address specified by 4-bit encoding (b2_d4) dst4A: dst4dsp8^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x2 & b2_d4_regAxSF) ... & dst4dsp8 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp8); export ptr; } # dsp:8[Ax|SB|FB] dst4A: dst4dsp16^[b2_d4_regAxSF] is (b1_0007; b2_d4_23=0x3 & b2_d4_regAxSF) ... & dst4dsp16 { ptr:3 = zext(b2_d4_regAxSF + dst4dsp16); export ptr; } # dsp:16[Ax|SB] dst4A: imm16_dat is (b1_0007; b2_d4=0xf) ... & (skipBytesBeforeDst4; imm16_dat) { export *[const]:3 imm16_dat; } # abs16 (special constant address case) # Ax destination specified by 4-bit encoding (b2_d4) # NOTE! Ax destination is special case and must be handled separately by each instruction # Starting position is at instruction b1 dst4Ax: b2_d4_regAx is b1_0007; b2_d4_regAx { export b2_d4_regAx; } # 1/2-Byte destination value/location specified by 4-bit encoding (b2_d4) # This handles the case for dst4B, dst4W and dst4L where 5-bit encoded Source (src4) add-on bytes may exist before Destination add-on bytes # Variable length pattern starting at instruction byte b1 dst4B_afterSrc4: dst4B is dst4B [ dstFollowsSrc=1; ] { export dst4B; } dst4W_afterSrc4: dst4W is dst4W [ dstFollowsSrc=1; ] { export dst4W; } dst4L_afterSrc4: dst4L is dst4L [ dstFollowsSrc=1; ] { export dst4L; } # # The following macros are used to constrain bit patterns when using dst4 # These should be used by constructor pattern matching instead of the corresponding dst4 sub-constructor # @define DST4AX "((b1_0007; b2_d4_13=0x2) & dst4Ax)" @define DST4A "((b1_0007; b2_d4_3=1) ... & dst4A)" @define DST4T "((b1_0007; (b2_d4_3=1 | b2_d4_13=3)) ... & dst4T)" # Skip instruction and source add-on bytes which occur before destination add-on bytes # Starting position is at b1 skipBytesBeforeDst2: is b1_d2 { } skipBytesBeforeDst2: is dstFollowsSrc=2 & b1_d2; imm8_dat { } # dsp8 # # destination value/location specified by 2/3-bit encoding, R0H/R0L choice controlled by destination-bit (b1_0002) # dst2B: R0L is (R0L & b1_d3=0x4) { export R0L; } dst2B: R0H is (R0H & b1_d3=0x0) { export R0H; } dst2B: imm8_dat^[SB] is (SB & b1_d2=0x1) ... & (skipBytesBeforeDst2; imm8_dat) { ptr:3 = zext(SB + imm8_dat); export *:1 ptr; } dst2B: simm8_dat^[FB] is (FB & b1_d2=0x2) ... & (skipBytesBeforeDst2; simm8_dat) { ptr:3 = zext(FB + simm8_dat); export *:1 ptr; } dst2B: imm16_dat is (b1_d2=0x3) ... & (skipBytesBeforeDst2; imm16_dat) { export *:1 imm16_dat; } # # destination value/location specified by 3-bit encoding (must be constrained by DST3B or DST3B_AFTER_DSP8) # dst3B: R0L is (R0L & b1_d3=0x4) { export R0L; } dst3B: R0H is (R0H & b1_d3=0x3) { export R0H; } dst3B: imm8_dat^[SB] is (SB & b1_d3=0x5) ... & (skipBytesBeforeDst2; imm8_dat) { ptr:3 = zext(SB + imm8_dat); export *:1 ptr; } dst3B: simm8_dat^[FB] is (FB & b1_d3=0x6) ... & (skipBytesBeforeDst2; simm8_dat) { ptr:3 = zext(FB + simm8_dat); export *:1 ptr; } dst3B: imm16_dat is (b1_d3=0x7) ... & (skipBytesBeforeDst2; imm16_dat) { export *:1 imm16_dat; } # 1-Byte destination value/location specified by 3-bit encoding (b2_d3) # This handles the case for dst3B where Dsp8 add-on bytes always exist before Destination add-on bytes # Variable length pattern starting at instruction byte b1 dst3B_afterDsp8: dst3B is dst3B [ dstFollowsSrc=2; ] { export dst3B; } # # The following macros are used to constrain bit patterns when using dst2 for a 3-bit src/dest # These should be used by constructor pattern matching instead of the corresponding dst4 sub-constructor # @define DST3B "((b1_d3=3 | b1_d3_2=1) ... & dst3B)" @define DST3B_AFTER_DSP8 "((b1_d3=3 | b1_d3_2=1) ... & dst3B_afterDsp8)" # Special dsp8[SP] source/destination - starting point is on dsp8 data dsp8spB: simm8_dat^":8"^[SP] is simm8_dat & SP { ptr:3 = zext(SP + simm8_dat); export *:1 ptr; } dsp8spW: simm8_dat^":8"^[SP] is simm8_dat & SP { ptr:3 = zext(SP + simm8_dat); export *:2 ptr; } # Special dsp20[A0] source/destination - starting point is on dsp20 data dsp20A0B: simm20_dat^":20["^A0^"]" is A0 & simm20_dat { ptr:3 = zext(A0 + simm20_dat); export *:1 ptr; } dsp20A0W: simm20_dat^":20["^A0^"]" is A0 & simm20_dat { ptr:3 = zext(A0 + simm20_dat); export *:2 ptr; } # # Bit base - associated add-on data immediately follows instruction byte b2 # There are three cases which must be broken-out by instruction (regBase, memBaseAx, memBase) # # bit-base is bit,byte specified by [Ax] (constrain instruction pattern using b2_d4_13=0x3) - contexts of Ax are exported memBaseAx: [b2_d4_regAx] is b1_0007; b2_d4_13=0x3 & b2_d4_regAx { export b2_d4_regAx; } # [Ax] (special case! bit operand does not appear) # bit-base is 16-bit register: Rx or Ax (constrain instruction pattern using b2_d4_3=0) regBase: b2_d4_reg16 is b1_0007; b2_d4_23=0x0 & b2_d4_reg16 { export b2_d4_reg16; } # Rx regBase: b2_d4_regAx is b1_0007; b2_d4_13=0x2 & b2_d4_regAx { export b2_d4_regAx; } # Ax # bit-base is byte location within memory memBase: imm8_base^":8"^[b2_d4_regAxSF] is b1_0007; b2_d4_23=0x2 & b2_d4_regAxSF; imm8_base { ptr:3 = zext(b2_d4_regAxSF + imm8_base); export *:1 ptr; } # dsp:8[Ax|SB] memBase: simm8_base^":8"^[FB] is b1_0007; b2_d4_23=0x2 & b2_d4=0xb & FB; simm8_base { ptr:3 = zext(FB + simm8_base); export *:1 ptr; } # dsp:8[FB] memBase: imm16_base^":16"^[b2_d4_regAxSF] is b1_0007; b2_d4_23=0x3 & b2_d4_regAxSF; imm16_base { ptr:3 = zext(b2_d4_regAxSF + imm16_base); export *:1 ptr; } # dsp:16[Ax|SB] memBase: imm16_base^":16" is b1_0007; b2_d4=0xf; imm16_base { export *:1 imm16_base; } # abs16 (special constant address case) memBase11: imm8_dat^":11"^[SB] is SB & b1_0007; imm8_dat { ptr:3 = zext(SB + imm8_dat); export *:1 ptr; } # dsp:11[SB] # Bit operand associated with regBase operand # TODO: imm8_0407=0 constraint removed due to sleigh compiler issue regBit: imm8_0003 is b1_0007; b2_d4; imm8_0003 { export *[const]:1 imm8_0003; } # Rx, Ax # Bit operand associated with memBase operand memBit: imm8_bit is b1_0007; b2_d4; imm8_bit { export *[const]:1 imm8_bit; } # dsp:8[Ax|SB|FB] memBit: imm16_bit is b1_0007; b2_d4_23=3; imm16_bit { export *[const]:1 imm16_bit; } # dsp:16[Ax|SB], base:16 # # Immediate data operand # Fixed length - current position is at start of immediate data # srcImm3: "#"^b2_0002 is b2_0002 { export *[const]:1 b2_0002; } srcImm8: "#"^imm8_dat is imm8_dat { export *[const]:1 imm8_dat; } srcImm16: "#"^imm16_dat is imm16_dat { export *[const]:2 imm16_dat; } srcSimm8: "#"^simm8_dat is simm8_dat { export *[const]:1 simm8_dat; } srcSimm16: "#"^simm16_dat is simm16_dat { export *[const]:2 simm16_dat; } # Signed immediate data from 4-bit value: -8 <= value <= 7 # NOTE! There are two different cases based upon the bits used from b2 srcSimm4_0407: "#"^b2_simm4_0407 is b2_simm4_0407 { export *[const]:1 b2_simm4_0407; } srcSimm4_0003: "#"^b2_simm4_0003 is b2_simm4_0003 { export *[const]:1 b2_simm4_0003; } # Signed immediate shift amount from 4-bit value: -8 <= value <= -1 || 1 <= value <= 8 # NOTE! There are two different cases based upon the bits used from b2 srcSimm4Shift_0407: "#"^val is b2_shiftSign_7=0 & b2_0406 [ val = b2_0406 + 1; ] { export *[const]:1 val; } srcSimm4Shift_0407: "#"^val is b2_shiftSign_7=1 & b2_0406 [ val = -(b2_0406 + 1); ] { export *[const]:1 val; } srcSimm4Shift_0003: "#"^val is b2_shiftSign_3=0 & b2_0002 [ val = b2_0002 + 1; ] { export *[const]:1 val; } srcSimm4Shift_0003: "#"^val is b2_shiftSign_3=1 & b2_0002 [ val = -(b2_0002 + 1); ] { export *[const]:1 val; } srcZero8: "#0" is b1_0007 { export 0:1; } # special 6-bit immediate for INT number srcIntNum: "#"^imm6_dat is imm6_dat { export *[const]:1 imm6_dat; } # # Offset label operand # abs20offset: imm20_dat is imm20_dat { export *:1 imm20_dat; } abs20offsetW: imm20_dat is imm20_dat { export *:2 imm20_dat; } abs16offset: imm16_dat is imm16_dat { export *:1 imm16_dat; } # Relative address offsets rel16offset1: offs is simm16_dat [ offs = inst_start + 1 + simm16_dat; ] { export *:1 offs; } rel8offset1: offs is simm8_dat [ offs = inst_start + 1 + simm8_dat; ] { export *:1 offs; } rel8offset2: offs is simm8_dat [ offs = inst_start + 2 + simm8_dat; ] { export *:1 offs; } rel3offset2: offs is b1_0002 [ offs = inst_start + 2 + b1_0002; ] { export *:1 offs; } reloffset_dst4W: dst4W is dst4W { local reladdr = inst_start + dst4W; export *:3 reladdr; } reloffset_dst4L: dst4L is dst4L { local reladdr = inst_start + dst4L; export *:3 reladdr; } reloffset_dst4T: dst4T is $(DST4T) { local reladdr = inst_start + dst4T; export *:3 reladdr; } # # Conditionals # cnd8: "GEU" is cnd8_dat=0x00 { tstCnd:1 = ($(CARRY) == 1); export tstCnd; } # Equal to or greater than (<=), C flag is 1 cnd8: "GTU" is cnd8_dat=0x01 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 1); export tstCnd; } # Greater than (<) cnd8: "EQ" is cnd8_dat=0x02 { tstCnd:1 = ($(ZERO) == 1); export tstCnd; } # Equal to (=), Z flag is 1 cnd8: "N" is cnd8_dat=0x03 { tstCnd:1 = ($(SIGN) == 1); export tstCnd; } # Negative (0>) cnd8: "LE" is cnd8_dat=0x04 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 1); export tstCnd; } # Equal to or less than (signed value) (>=) cnd8: "O" is cnd8_dat=0x05 { tstCnd:1 = ($(OVERFLOW) == 1); export tstCnd; } # O flag is 1 cnd8: "GE" is cnd8_dat=0x06 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 0); export tstCnd; } # Equal to or greater than (signed value) (<=) cnd8: "LTU" is cnd8_dat=0xf8 { tstCnd:1 = ($(CARRY) == 0); export tstCnd; } # less than (>), C flag is 0 cnd8: "LEU" is cnd8_dat=0xf9 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 0); export tstCnd; } # Equal to or less than (>=) cnd8: "NE" is cnd8_dat=0xfa { tstCnd:1 = ($(ZERO) == 0); export tstCnd; } # Not Equal to (=), Z flag is 0 cnd8: "PZ" is cnd8_dat=0xfb { tstCnd:1 = ($(SIGN) == 0); export tstCnd; } # Positive or zero (0<=) cnd8: "GT" is cnd8_dat=0xfc { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 0); export tstCnd; } # Greater than (signed value) (<) cnd8: "NO" is cnd8_dat=0xfd { tstCnd:1 = ($(OVERFLOW) == 0); export tstCnd; } # O flag is 0 cnd8: "LT" is cnd8_dat=0xfe { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 1); export tstCnd; } # less than (signed value) (<=) b2cnd4: "GEU" is b2_0003=0x0 { tstCnd:1 = ($(CARRY) == 1); export tstCnd; } # Equal to or greater than (<=), C flag is 1 b2cnd4: "GTU" is b2_0003=0x1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 1); export tstCnd; } # Greater than (<) b2cnd4: "EQ" is b2_0003=0x2 { tstCnd:1 = ($(ZERO) == 1); export tstCnd; } # Equal to (=), Z flag is 1 b2cnd4: "N" is b2_0003=0x3 { tstCnd:1 = ($(SIGN) == 1); export tstCnd; } # Negative (0>) b2cnd4: "LTU" is b2_0003=0x4 { tstCnd:1 = ($(CARRY) == 0); export tstCnd; } # less than (>), C flag is 0 b2cnd4: "LEU" is b2_0003=0x5 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 0); export tstCnd; } # Equal to or less than (>=) b2cnd4: "NE" is b2_0003=0x6 { tstCnd:1 = ($(ZERO) == 0); export tstCnd; } # Not Equal to (=), Z flag is 0 b2cnd4: "PZ" is b2_0003=0x7 { tstCnd:1 = ($(SIGN) == 0); export tstCnd; } # Positive or zero (0<=) b2cnd4: "LE" is b2_0003=0x8 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 1); export tstCnd; } # Equal to or less than (signed value) (>=) b2cnd4: "O" is b2_0003=0x9 { tstCnd:1 = ($(OVERFLOW) == 1); export tstCnd; } # O flag is 1 b2cnd4: "GE" is b2_0003=0xa { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 0); export tstCnd; } # Equal to or greater than (signed value) (<=) b2cnd4: "GT" is b2_0003=0xc { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 0); export tstCnd; } # Greater than (signed value) (<) b2cnd4: "NO" is b2_0003=0xd { tstCnd:1 = ($(OVERFLOW) == 0); export tstCnd; } # O flag is 0 b2cnd4: "LT" is b2_0003=0xe { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 1); export tstCnd; } # less than (signed value) (<=) # Special case of b2cnd4 where b2_0003=1 (see JCnd) b2cnd3: "LE" is b2_0002=0x0 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 1); export tstCnd; } # Equal to or less than (signed value) (>=) b2cnd3: "O" is b2_0002=0x1 { tstCnd:1 = ($(OVERFLOW) == 1); export tstCnd; } # O flag is 1 b2cnd3: "GE" is b2_0002=0x2 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 0); export tstCnd; } # Equal to or greater than (signed value) (<=) b2cnd3: "GT" is b2_0002=0x4 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 0); export tstCnd; } # Greater than (signed value) (<) b2cnd3: "NO" is b2_0002=0x5 { tstCnd:1 = ($(OVERFLOW) == 0); export tstCnd; } # O flag is 0 b2cnd3: "LT" is b2_0002=0x6 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 1); export tstCnd; } # less than (signed value) (<=) b1cnd3: "LTU" is b1_0002=4 { tstCnd:1 = ($(CARRY) == 0); export tstCnd; } # less than (>), C flag is 0 b1cnd3: "LEU" is b1_0002=5 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 0); export tstCnd; } # Equal to or less than (>=) b1cnd3: "NE" is b1_0002=6 { tstCnd:1 = ($(ZERO) == 0); export tstCnd; } # Not Equal to (=), Z flag is 0 b1cnd3: "PZ" is b1_0002=7 { tstCnd:1 = ($(SIGN) == 0); export tstCnd; } # Positive or zero (0<=) b1cnd3: "GEU" is b1_0002=0 { tstCnd:1 = ($(CARRY) == 1); export tstCnd; } # Equal to or greater than (<=), C flag is 1 b1cnd3: "GTU" is b1_0002=1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 1); export tstCnd; } # Greater than (<) b1cnd3: "EQ" is b1_0002=2 { tstCnd:1 = ($(ZERO) == 1); export tstCnd; } # Equal to (=), Z flag is 1 b1cnd3: "N" is b1_0002=3 { tstCnd:1 = ($(SIGN) == 1); export tstCnd; } # Negative (0>) # # Flag bit operand # flagBit: "C" is b2_0406=0 { export 0:2; } flagBit: "D" is b2_0406=1 { export 1:2; } flagBit: "Z" is b2_0406=2 { export 2:2; } flagBit: "S" is b2_0406=3 { export 3:2; } flagBit: "B" is b2_0406=4 { export 4:2; } flagBit: "O" is b2_0406=5 { export 5:2; } flagBit: "I" is b2_0406=6 { export 6:2; } flagBit: "U" is b2_0406=7 { export 7:2; } # # Instruction Constructors # ### ABS ### :ABS.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xf) ... & dst4B { local tmp = dst4B; $(OVERFLOW) = (tmp == 0x80); local ztst = (tmp s< 0); tmp = (zext(ztst) * -tmp) + (zext(!ztst) * tmp); dst4B = tmp; setResultFlags(tmp); } # 0111 0110 1111 0100 ABS.B A0 # 0111 0110 1111 0001 ABS.B R0H :ABS.B dst4Ax is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xf) & $(DST4AX) { local tmp = dst4Ax:1; $(OVERFLOW) = (tmp == 0x80); local ztst = (tmp s< 0); tmp = (zext(ztst) * -tmp) + (zext(!ztst) * tmp); dst4Ax = zext(tmp); setResultFlags(tmp); } :ABS.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0xf) ... & dst4W { local tmp = dst4W; $(OVERFLOW) = (tmp == 0x8000); local ztst = (tmp s< 0); tmp = (zext(ztst) * -tmp) + (zext(!ztst) * tmp); dst4W = tmp; setResultFlags(tmp); } ### ADC ### # (1) ADC.B #simm, dst :ADC.B srcSimm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x6) ... & dst4B); srcSimm8 { tmp:1 = dst4B; c:1 = $(CARRY); setAdd3Flags(tmp, srcSimm8, c); tmp = tmp + srcSimm8 + c; dst4B = tmp; setResultFlags(tmp); } # (1) ADC.B #simm, Ax :ADC.B srcSimm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x6) & $(DST4AX)); srcSimm8 { tmp:1 = dst4Ax:1; c:1 = $(CARRY); setAdd3Flags(tmp, srcSimm8, c); tmp = tmp + srcSimm8 + c; dst4Ax = zext(tmp); setResultFlags(tmp); } # (1) ADC.W #simm, dst :ADC.W srcSimm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x6) ... & dst4W); srcSimm16 { tmp:2 = dst4W; c:2 = zext($(CARRY)); setAdd3Flags(tmp, srcSimm16, c); tmp = tmp + srcSimm16 + c; dst4W = tmp; setResultFlags(tmp); } # (2) ADC.B src, dst :ADC.B src4B, dst4B_afterSrc4 is (b1_0107=0x58 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { tmp:1 = dst4B_afterSrc4; src:1 = src4B; c:1 = $(CARRY); setAdd3Flags(tmp, src, c); tmp = tmp + src + c; dst4B_afterSrc4 = tmp; setResultFlags(tmp); } # (2) ADC.B src, Ax :ADC.B src4B, dst4Ax is (b1_0107=0x58 & b1_size_0=0) ... & src4B & $(DST4AX) ... { tmp:1 = dst4Ax:1; src:1 = src4B; c:1 = $(CARRY); setAdd3Flags(tmp, src, c); tmp = tmp + src + c; dst4Ax = zext(tmp); setResultFlags(tmp); } # (2) ADC.W src, dst :ADC.W src4W, dst4W_afterSrc4 is (b1_0107=0x58 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { tmp:2 = dst4W_afterSrc4; src:2 = src4W; c:2 = zext($(CARRY)); setAdd3Flags(tmp, src, c); tmp = tmp + src + c; dst4W_afterSrc4 = tmp; setResultFlags(tmp); } ### ADCF ### :ADCF.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xe) ... & dst4B { tmp:1 = dst4B; c:1 = $(CARRY); setAddFlags(tmp, c); tmp = tmp + c; dst4B = tmp; setResultFlags(tmp); } :ADCF.B dst4Ax is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xe) & $(DST4AX) { tmp:1 = dst4Ax:1; c:1 = $(CARRY); setAddFlags(tmp, c); tmp = tmp + c; dst4Ax = zext(tmp); setResultFlags(tmp); } :ADCF.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0xe) ... & dst4W { tmp:2 = dst4W; c:2 = zext($(CARRY)); setAddFlags(tmp, c); tmp = tmp + c; dst4W = tmp; setResultFlags(tmp); } ### ADD ### # (1) ADD.B:G #simm, dst :ADD^".B:G" srcSimm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x4) ... & dst4B); srcSimm8 { tmp:1 = dst4B; setAddFlags(tmp, srcSimm8); tmp = tmp + srcSimm8; dst4B = tmp; setResultFlags(tmp); } # (1) ADD.B:G #simm, Ax :ADD^".B:G" srcSimm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x4) & $(DST4AX)); srcSimm8 { tmp:1 = dst4Ax:1; setAddFlags(tmp, srcSimm8); tmp = tmp + srcSimm8; dst4Ax = zext(tmp); setResultFlags(tmp); } # (1) ADD.W:G #simm, dst :ADD^".W:G" srcSimm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x4) ... & dst4W); srcSimm16 { tmp:2 = dst4W; setAddFlags(tmp, srcSimm16); tmp = tmp + srcSimm16; dst4W = tmp; setResultFlags(tmp); } # (2) ADD.B:Q #simm4, dst :ADD^".B:Q" srcSimm4_0407, dst4B is (b1_0107=0x64 & b1_size_0=0; srcSimm4_0407) ... & dst4B { tmp:1 = dst4B; setAddFlags(tmp, srcSimm4_0407); tmp = tmp + srcSimm4_0407; dst4B = tmp; setResultFlags(tmp); } # (2) ADD.B:Q #simm4, Ax :ADD^".B:Q" srcSimm4_0407, dst4Ax is (b1_0107=0x64 & b1_size_0=0; srcSimm4_0407) & $(DST4AX) { tmp:1 = dst4Ax:1; setAddFlags(tmp, srcSimm4_0407); tmp = tmp + srcSimm4_0407; dst4Ax = zext(tmp); setResultFlags(tmp); } # (2) ADD.W:Q #simm4, dst :ADD^".W:Q" srcSimm4_0407, dst4W is (b1_0107=0x64 & b1_size_0=1; srcSimm4_0407) ... & dst4W { tmp:2 = dst4W; imm:2 = sext(srcSimm4_0407); setAddFlags(tmp, imm); tmp = tmp + imm; dst4W = tmp; setResultFlags(tmp); } # (3) ADD.B:S #imm, dst :ADD^".B:S" srcSimm8, dst3B_afterDsp8 is (b1_0307=0x10; srcSimm8) ... & $(DST3B_AFTER_DSP8) { tmp:1 = dst3B_afterDsp8; setAddFlags(tmp, srcSimm8); tmp = tmp + srcSimm8; dst3B_afterDsp8 = tmp; setResultFlags(tmp); } # (4) ADD.B:G src, dst :ADD^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x50 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { tmp:1 = dst4B_afterSrc4; src:1 = src4B; setAddFlags(tmp, src); tmp = tmp + src; dst4B_afterSrc4 = tmp; setResultFlags(tmp); } # (4) ADD.B:G src, Ax :ADD^".B:G" src4B, dst4Ax is (b1_0107=0x50 & b1_size_0=0) ... & src4B & $(DST4AX) ... { tmp:1 = dst4Ax:1; src:1 = src4B; setAddFlags(tmp, src); tmp = tmp + src; dst4Ax = zext(tmp); setResultFlags(tmp); } # (4) ADD.W:G src, dst :ADD^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x50 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { tmp:2 = dst4W_afterSrc4; src:2 = src4W; setAddFlags(tmp, src); tmp = tmp + src; dst4W_afterSrc4 = tmp; setResultFlags(tmp); } # (5) ADD.B:S src, R0H/R0L :ADD^".B:S" dst2B, b1_2_reg8 is (b1_0307=0x4 & b1_2_reg8) ... & dst2B { src:1 = dst2B; setAddFlags(b1_2_reg8, src); b1_2_reg8 = b1_2_reg8 + src; setResultFlags(b1_2_reg8); } # (6) ADD.B:G #simm, SP :ADD^".B:G" srcSimm8, SP is SP & b1_0107=0x3e & b1_size_0=0; b2_0007=0xeb; srcSimm8 { imm:2 = sext(srcSimm8); setAddFlags(SP, imm); SP = SP + imm; setResultFlags(SP); } # (6) ADD.W:G #simm, SP :ADD^".W:G" srcSimm16, SP is SP & b1_0107=0x3e & b1_size_0=1; b2_0007=0xeb; srcSimm16 { setAddFlags(SP, srcSimm16); SP = SP + srcSimm16; setResultFlags(SP); } # (7) ADD.W:Q #simm, SP :ADD^".B:Q" srcSimm4_0003, SP is SP & b1_0007=0x7d; b2_0407=0xb & srcSimm4_0003 { imm:2 = sext(srcSimm4_0003); setAddFlags(SP, imm); SP = SP + imm; setResultFlags(SP); } ### ADJNZ ### :ADJNZ.B srcSimm4_0407, dst4B is ((b1_0107=0x7c & b1_size_0=0; srcSimm4_0407) ... & dst4B); rel8offset2 { tmp:1 = dst4B + srcSimm4_0407; dst4B = tmp; if (tmp != 0) goto rel8offset2; } :ADJNZ.B srcSimm4_0407, dst4Ax is ((b1_0107=0x7c & b1_size_0=0; srcSimm4_0407) & $(DST4AX)); rel8offset2 { tmp:1 = dst4Ax:1 + srcSimm4_0407; dst4Ax = zext(tmp); if (tmp != 0) goto rel8offset2; } :ADJNZ.W srcSimm4_0407, dst4W is ((b1_0107=0x7c & b1_size_0=1; srcSimm4_0407) ... & dst4W); rel8offset2 { tmp:2 = dst4W + sext(srcSimm4_0407); dst4W = tmp; if (tmp != 0) goto rel8offset2; } ### AND ### # (1) AND.B:G #imm, dst :AND^".B:G" srcImm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x2) ... & dst4B); srcImm8 { tmp:1 = dst4B & srcImm8; dst4B = tmp; setResultFlags(tmp); } # (1) AND.B:G #imm, Ax :AND^".B:G" srcImm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x2) & $(DST4AX)); srcImm8 { tmp:1 = dst4Ax:1 & srcImm8; dst4Ax = zext(tmp); setResultFlags(tmp); } # (1) AND.W:G #imm, dst :AND^".W:G" srcImm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x2) ... & dst4W); srcImm16 { tmp:2 = dst4W & srcImm16; dst4W = tmp; setResultFlags(tmp); } # (2) AND.B:S #imm, dst :AND^".B:S" srcImm8, dst3B_afterDsp8 is (b1_0307=0x12; srcImm8) ... & $(DST3B_AFTER_DSP8) { tmp:1 = dst3B_afterDsp8 & srcImm8; dst3B_afterDsp8 = tmp; setResultFlags(tmp); } # (3) AND.B:G src, dst :AND^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x48 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { tmp:1 = dst4B_afterSrc4 & src4B; dst4B_afterSrc4 = tmp; setResultFlags(tmp); } # (3) AND.B:G src, Ax :AND^".B:G" src4B, dst4Ax is (b1_0107=0x48 & b1_size_0=0) ... & src4B & $(DST4AX) ... { tmp:1 = dst4Ax:1 & src4B; dst4Ax = zext(tmp); setResultFlags(tmp); } # (3) AND.W:G src, dst :AND^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x48 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { tmp:2 = dst4W_afterSrc4 & src4W; dst4W_afterSrc4 = tmp; setResultFlags(tmp); } # (4) AND.B:S src, R0L/R0H :AND^".B:S" dst2B, b1_2_reg8 is (b1_0307=0x2 & b1_2_reg8) ... & dst2B { tmp:1 = dst2B & b1_2_reg8; b1_2_reg8 = tmp; setResultFlags(tmp); } ### BAND ### # BAND bit,Rx/Ax :BAND regBit, regBase is (b1_0007=0x7e; b2_0407=0x4 & b2_d4_3=0) ... & regBase ... & regBit { bitValue:2 = (regBase >> regBit) & 1; $(CARRY) = $(CARRY) & bitValue:1; } # BAND [Ax] :BAND memBaseAx is (b1_0007=0x7e; b2_0407=0x4 & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; bitValue:1 = (val >> bit) & 1; $(CARRY) = $(CARRY) & bitValue; } # BAND bit,base :BAND memBit, memBase is (b1_0007=0x7e; b2_0407=0x4) ... & memBase & memBit { bitValue:1 = (memBase >> memBit) & 1; $(CARRY) = $(CARRY) & bitValue; } ### BCLR ### # (1) BCLR:G bit,Rx/Ax :BCLR^":G" regBit, regBase is (b1_0007=0x7e; b2_0407=0x8 & b2_d4_3=0) ... & regBase ... & regBit { mask:2 = ~(1 << regBit); regBase = regBase & mask; } # (1) BCLR:G [Ax] :BCLR^":G" memBaseAx is (b1_0007=0x7e; b2_0407=0x8 & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = ~(1 << bit); *:1 ptr = val & mask; } # (1) BCLR:G bit,base :BCLR^":G" memBit, memBase is (b1_0007=0x7e; b2_0407=0x8) ... & memBase & memBit { mask:1 = ~(1 << memBit); memBase = memBase & mask; } # (2) BCLR:S bit,base:11[SB] :BCLR^":S" b1_bit, memBase11 is (b1_0307=0x08 & b1_bit) ... & memBase11 { mask:1 = ~(1 << b1_bit); memBase11 = memBase11 & mask; } ### BMcnd ### # (1) BMcnd bit,Rx/Ax :BM^cnd8 regBit, regBase is ((b1_0007=0x7e; b2_0407=0x2 & b2_d4_3=0) ... & regBase ... & regBit); cnd8 { mask:2 = ~(1 << regBit); regBase = ((zext(cnd8) << regBit) | (regBase & mask)); } # (1) BMcnd [Ax] :BM^cnd8 memBaseAx is ((b1_0007=0x7e; b2_0407=0x2 & b2_d4_13=0x3) & memBaseAx); cnd8 { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = ~(1 << bit); *:1 ptr = ((cnd8 << bit) | (val & mask)); } # (1) BMcnd bit,base :BM^cnd8 memBit, memBase is ((b1_0007=0x7e; b2_0407=0x2) ... & memBase & memBit); cnd8 { mask:1 = ~(1 << memBit); memBase = ((cnd8 << memBit) | (memBase & mask)); } # (2) BMcnd C :BM^b2cnd4 "C" is b1_0007=0x7d; b2_0407=0xd & b2cnd4 { $(CARRY) = b2cnd4; } ### BNAND ### # BNAND bit,Rx/Ax :BNAND regBit, regBase is (b1_0007=0x7e; b2_0407=0x5 & b2_d4_3=0) ... & regBase ... & regBit { mask:2 = (1 << regBit); bitValue:2 = (regBase & mask); $(CARRY) = $(CARRY) && (bitValue == 0); } # BNAND [Ax] :BNAND memBaseAx is (b1_0007=0x7e; b2_0407=0x5 & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = (1 << bit); bitValue:1 = (val & mask); $(CARRY) = $(CARRY) && (bitValue == 0); } # BNAND bit,base :BNAND memBit, memBase is (b1_0007=0x7e; b2_0407=0x5) ... & memBase & memBit { mask:1 = (1 << memBit); bitValue:1 = (memBase & mask); $(CARRY) = $(CARRY) && (bitValue == 0); } ### BNOR ### # BNOR bit,Rx/Ax :BNOR regBit, regBase is (b1_0007=0x7e; b2_0407=0x7 & b2_d4_3=0) ... & regBase ... & regBit { mask:2 = (1 << regBit); bitValue:2 = (regBase & mask); $(CARRY) = $(CARRY) || (bitValue == 0); } # BNOR [Ax] :BNOR memBaseAx is (b1_0007=0x7e; b2_0407=0x7 & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = (1 << bit); bitValue:1 = (val & mask); $(CARRY) = $(CARRY) || (bitValue == 0); } # BNOR bit,base :BNOR memBit, memBase is (b1_0007=0x7e; b2_0407=0x7) ... & memBase & memBit { mask:1 = (1 << memBit); bitValue:1 = (memBase & mask); $(CARRY) = $(CARRY) || (bitValue == 0); } ### BNOT ### # (1) BNOT:G bit,Rx/Ax :BNOT^":G" regBit, regBase is (b1_0007=0x7e; b2_0407=0xa & b2_d4_3=0) ... & regBase ... & regBit { mask:2 = (1 << regBit); bitValue:2 = (~regBase & mask); regBase = (regBase & ~mask) | bitValue; } # (1) BNOT:G [Ax] :BNOT^":G" memBaseAx is (b1_0007=0x7e; b2_0407=0xa & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = (1 << bit); bitValue:1 = (~val & mask); *:1 ptr = (val & ~mask) | bitValue; } # (1) BNOT:G bit,base :BNOT^":G" memBit, memBase is (b1_0007=0x7e; b2_0407=0xa) ... & memBase & memBit { mask:1 = (1 << memBit); val:1 = memBase; bitValue:1 = (~val & mask); memBase = (val & ~mask) | bitValue; } # (2) BNOT:S bit,base:11[SB] :BNOT^":S" b1_bit, memBase11 is (b1_0307=0x0a & b1_bit) ... & memBase11 { mask:1 = (1 << b1_bit); val:1 = memBase11; bitValue:1 = (~val & mask); memBase11 = (val & ~mask) | bitValue; } ### BNTST ### # BNTST bit,Rx/Ax :BNTST regBit, regBase is (b1_0007=0x7e; b2_0407=0x3 & b2_d4_3=0) ... & regBase ... & regBit { mask:2 = (1 << regBit); bitValue:2 = (regBase & mask); z:1 = (bitValue == 0); $(CARRY) = z; $(ZERO) = z; } # BNTST [Ax] :BNTST memBaseAx is (b1_0007=0x7e; b2_0407=0x3 & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = (1 << bit); bitValue:1 = (val & mask); z:1 = (bitValue == 0); $(CARRY) = z; $(ZERO) = z; } # BNTST bit,base :BNTST memBit, memBase is (b1_0007=0x7e; b2_0407=0x3) ... & memBase & memBit { mask:1 = (1 << memBit); bitValue:1 = (memBase & mask); z:1 = (bitValue == 0); $(CARRY) = z; $(ZERO) = z; } ### BNXOR ### # BNXOR bit,Rx/Ax :BNXOR regBit, regBase is (b1_0007=0x7e; b2_0407=0xd & b2_d4_3=0) ... & regBase ... & regBit { mask:2 = (1 << regBit); bitValue:2 = (regBase & mask); $(CARRY) = $(CARRY) ^ (bitValue == 0); } # BNXOR [Ax] :BNXOR memBaseAx is (b1_0007=0x7e; b2_0407=0xd & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = (1 << bit); bitValue:1 = (val & mask); $(CARRY) = $(CARRY) ^ (bitValue == 0); } # BNXOR bit,base :BNXOR memBit, memBase is (b1_0007=0x7e; b2_0407=0xd) ... & memBase & memBit { mask:1 = (1 << memBit); bitValue:1 = (memBase & mask); $(CARRY) = $(CARRY) ^ (bitValue == 0); } ### BOR ### # BOR bit,Rx/Ax :BOR regBit, regBase is (b1_0007=0x7e; b2_0407=0x6 & b2_d4_3=0) ... & regBase ... & regBit { mask:2 = (1 << regBit); bitValue:2 = (regBase & mask); $(CARRY) = $(CARRY) || (bitValue != 0); } # BOR [Ax] :BOR memBaseAx is (b1_0007=0x7e; b2_0407=0x6 & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = (1 << bit); bitValue:1 = (val & mask); $(CARRY) = $(CARRY) || (bitValue != 0); } # BOR bit,base :BOR memBit, memBase is (b1_0007=0x7e; b2_0407=0x6) ... & memBase & memBit { mask:1 = (1 << memBit); bitValue:1 = (memBase & mask); $(CARRY) = $(CARRY) || (bitValue != 0); } ### BRK ### :BRK is b1_0007=0x0 { # most likely not necessary to model break behavior Break(); } ### BSET ### # (1) BSET:G bit,Rx/Ax :BSET^":G" regBit, regBase is (b1_0007=0x7e; b2_0407=0x9 & b2_d4_3=0) ... & regBase ... & regBit { mask:2 = (1 << regBit); regBase = regBase | mask; } # (1) BSET:G [Ax] :BSET^":G" memBaseAx is (b1_0007=0x7e; b2_0407=0x9 & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = (1 << bit); *:1 ptr = val | mask; } # (1) BSET:G bit,base :BSET^":G" memBit, memBase is (b1_0007=0x7e; b2_0407=0x9) ... & memBase & memBit { mask:1 = (1 << memBit); memBase = memBase | mask; } # (2) BSET:S bit,base:11[SB] :BSET^":S" b1_bit, memBase11 is (b1_0307=0x09 & b1_bit) ... & memBase11 { mask:1 = (1 << b1_bit); memBase11 = memBase11 | mask; } ### BTST ### # (1) BTST:G bit,Rx/Ax :BTST^":G" regBit, regBase is (b1_0007=0x7e; b2_0407=0xb & b2_d4_3=0) ... & regBase ... & regBit { mask:2 = (1 << regBit); bitValue:2 = (regBase & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; } # (1) BTST:G [Ax] :BTST^":G" memBaseAx is (b1_0007=0x7e; b2_0407=0xb & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = (1 << bit); bitValue:1 = (val & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; } # (1) BTST:G bit,base :BTST^":G" memBit, memBase is (b1_0007=0x7e; b2_0407=0xb) ... & memBase & memBit { mask:1 = (1 << memBit); bitValue:1 = (memBase & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; } # (2) BTST:S bit,base:11[SB] :BTST^":S" b1_bit, memBase11 is (b1_0307=0x0b & b1_bit) ... & memBase11 { mask:1 = (1 << b1_bit); bitValue:1 = (memBase11 & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; } ### BTSTC ### # BTSTC bit,Rx/Ax :BTSTC regBit, regBase is (b1_0007=0x7e; b2_0407=0x0 & b2_d4_3=0) ... & regBase ... & regBit { mask:2 = (1 << regBit); bitValue:2 = (regBase & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; regBase = regBase & ~mask; } # BTSTC [Ax] :BTSTC memBaseAx is (b1_0007=0x7e; b2_0407=0x0 & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = (1 << bit); bitValue:1 = (val & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; *:1 ptr = val & ~mask; } # BTSTC bit,base :BTSTC memBit, memBase is (b1_0007=0x7e; b2_0407=0x0) ... & memBase & memBit { mask:1 = (1 << memBit); val:1 = memBase; bitValue:1 = (val & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; memBase = val & ~mask; } ### BTSTS ### # BTSTS bit,Rx/Ax :BTSTS regBit, regBase is (b1_0007=0x7e; b2_0407=0x1 & b2_d4_3=0) ... & regBase ... & regBit { mask:2 = (1 << regBit); bitValue:2 = (regBase & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; regBase = regBase | mask; } # BTSTS [Ax] :BTSTS memBaseAx is (b1_0007=0x7e; b2_0407=0x1 & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = (1 << bit); bitValue:1 = (val & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; *:1 ptr = val | mask; } # BTSTS bit,base :BTSTS memBit, memBase is (b1_0007=0x7e; b2_0407=0x1) ... & memBase & memBit { mask:1 = (1 << memBit); val:1 = memBase; bitValue:1 = (val & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; memBase = val | mask; } ### BXOR ### # BXOR bit,Rx/Ax :BXOR regBit, regBase is (b1_0007=0x7e; b2_0407=0xc & b2_d4_3=0) ... & regBase ... & regBit { mask:2 = (1 << regBit); bitValue:2 = (regBase & mask); $(CARRY) = $(CARRY) ^ (bitValue != 0); } # BXOR [Ax] :BXOR memBaseAx is (b1_0007=0x7e; b2_0407=0xc & b2_d4_13=0x3) & memBaseAx { ptr:3 = zext(memBaseAx >> 3); bit:1 = memBaseAx:1 & 0x7; val:1 = *:1 ptr; mask:1 = (1 << bit); bitValue:1 = (val & mask); $(CARRY) = $(CARRY) ^ (bitValue != 0); } # BXOR bit,base :BXOR memBit, memBase is (b1_0007=0x7e; b2_0407=0xc) ... & memBase & memBit { mask:1 = (1 << memBit); bitValue:1 = (memBase & mask); $(CARRY) = $(CARRY) ^ (bitValue != 0); } ### CMP ### # (1) CMP.B:G #simm, dst :CMP^".B:G" srcSimm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x8) ... & dst4B); srcSimm8 { tmp:1 = dst4B; setSubtractFlags(tmp, srcSimm8); tmp = tmp - srcSimm8; setResultFlags(tmp); } # (1) CMP.B:G #simm, Ax :CMP^".B:G" srcSimm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x8) & $(DST4AX)); srcSimm8 { tmp:1 = dst4Ax:1; setSubtractFlags(tmp, srcSimm8); tmp = tmp - srcSimm8; setResultFlags(tmp); } # (1) CMP.W:G #simm, dst :CMP^".W:G" srcSimm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x8) ... & dst4W); srcSimm16 { tmp:2 = dst4W; setSubtractFlags(tmp, srcSimm16); tmp = tmp - srcSimm16; setResultFlags(tmp); } # (2) CMP.B:Q #simm4, dst :CMP^".B:Q" srcSimm4_0407, dst4B is (b1_0107=0x68 & b1_size_0=0; srcSimm4_0407) ... & dst4B { tmp:1 = dst4B; setSubtractFlags(tmp, srcSimm4_0407); tmp = tmp - srcSimm4_0407; setResultFlags(tmp); } # (2) CMP.B:Q #simm4, Ax :CMP^".B:Q" srcSimm4_0407, dst4Ax is (b1_0107=0x68 & b1_size_0=0; srcSimm4_0407) & $(DST4AX) { tmp:1 = dst4Ax:1; setSubtractFlags(tmp, srcSimm4_0407); tmp = tmp - srcSimm4_0407; setResultFlags(tmp); } # (2) CMP.W:Q #simm4, dst :CMP^".W:Q" srcSimm4_0407, dst4W is (b1_0107=0x68 & b1_size_0=1; srcSimm4_0407) ... & dst4W { tmp:2 = dst4W; imm:2 = sext(srcSimm4_0407); setSubtractFlags(tmp, imm); tmp = tmp - imm; setResultFlags(tmp); } # (3) CMP.B:S #imm, dst :CMP^".B:S" srcSimm8, dst3B_afterDsp8 is (b1_0307=0x1c; srcSimm8) ... & $(DST3B_AFTER_DSP8) { tmp:1 = dst3B_afterDsp8; setSubtractFlags(tmp, srcSimm8); tmp = tmp - srcSimm8; setResultFlags(tmp); } # (4) CMP.B:G src, dst :CMP^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x60 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { tmp:1 = dst4B_afterSrc4; src:1 = src4B; setSubtractFlags(tmp, src); tmp = tmp - src; setResultFlags(tmp); } # (4) CMP.B:G src, Ax :CMP^".B:G" src4B, dst4Ax is (b1_0107=0x60 & b1_size_0=0) ... & src4B & $(DST4AX) ... { tmp:1 = dst4Ax:1; src:1 = src4B; setSubtractFlags(tmp, src); tmp = tmp - src; setResultFlags(tmp); } # (4) CMP.W:G src, dst :CMP^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x60 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { tmp:2 = dst4W_afterSrc4; src:2 = src4W; setSubtractFlags(tmp, src); tmp = tmp - src; setResultFlags(tmp); } # (5) CMP.B:S src, R0H/R0L :CMP^".B:S" dst2B, b1_2_reg8 is (b1_0307=0x7 & b1_2_reg8) ... & dst2B { src:1 = dst2B; setSubtractFlags(b1_2_reg8, src); b1_2_reg8 = b1_2_reg8 - src; setResultFlags(b1_2_reg8); } ### DADC ### # (1) DADC.B #imm, R0L :DADC.B srcImm8, R0L is R0L & b1_0007=0x7c; b2_0007=0xee; srcImm8 { src:2 = zext(srcImm8); dst:2 = zext(R0L); tmp:2 = DecimalAddWithCarry(src, dst); R0L = tmp:1; $(CARRY) = (tmp > 0x99); setResultFlags(tmp:1); } # (2) DADC.W #imm, R0 :DADC.W srcImm16, R0 is R0 & b1_0007=0x7d; b2_0007=0xee; srcImm16 { src:4 = zext(srcImm16); dst:4 = zext(R0); tmp:4 = DecimalAddWithCarry(src, dst); R0 = tmp:2; $(CARRY) = (tmp > 0x9999); setResultFlags(tmp:2); } # (3) DADC.B R0H, R0L :DADC.B R0H, R0L is R0H & R0L & b1_0007=0x7c; b2_0007=0xe6 { src:2 = zext(R0H); dst:2 = zext(R0L); tmp:2 = DecimalAddWithCarry(src, dst); R0L = tmp:1; $(CARRY) = (tmp > 0x99); setResultFlags(tmp:1); } # (4) DADC.W R1, R0 :DADC.W R1, R0 is R1 & R0 & b1_0007=0x7d; b2_0007=0xe6 { src:4 = zext(R1); dst:4 = zext(R0); tmp:4 = DecimalAddWithCarry(src, dst); R0 = tmp:2; $(CARRY) = (tmp > 0x9999); setResultFlags(tmp:2); } ### DADD ### # (1) DADD.B #imm, R0L :DADD.B srcImm8, R0L is R0L & b1_0007=0x7c; b2_0007=0xec; srcImm8 { src:2 = zext(srcImm8); dst:2 = zext(R0L); tmp:2 = DecimalAdd(src, dst); R0L = tmp:1; $(CARRY) = (tmp > 0x99); setResultFlags(tmp:1); } # (2) DADD.W #imm, R0 :DADD.W srcImm16, R0 is R0 & b1_0007=0x7d; b2_0007=0xec; srcImm16 { src:4 = zext(srcImm16); dst:4 = zext(R0); tmp:4 = DecimalAdd(src, dst); R0 = tmp:2; $(CARRY) = (tmp > 0x9999); setResultFlags(tmp:2); } # (3) DADD.B R0H, R0L :DADD.B R0H, R0L is R0H & R0L & b1_0007=0x7c; b2_0007=0xe4 { src:2 = zext(R0H); dst:2 = zext(R0L); tmp:2 = DecimalAdd(src, dst); R0L = tmp:1; $(CARRY) = (tmp > 0x99); setResultFlags(tmp:1); } # (4) DADD.W R1, R0 :DADD.W R1, R0 is R1 & R0 & b1_0007=0x7d; b2_0007=0xe4 { src:4 = zext(R1); dst:4 = zext(R0); tmp:4 = DecimalAdd(src, dst); R0 = tmp:2; $(CARRY) = (tmp > 0x9999); setResultFlags(tmp:2); } ### DEC ### # (1) DEC.B dst :DEC.B dst3B is b1_0307=0x15 ... & $(DST3B) { dst:1 = dst3B; setSubtractFlags(dst, 1); dst = dst - 1; dst3B = dst; setResultFlags(dst); } # (2) DEC.W dst :DEC.W b1_3_regAx is b1_0407=0xf & b1_0002=0x2 & b1_3_regAx { dst:2 = b1_3_regAx; setSubtractFlags(dst, 1); dst = dst - 1; b1_3_regAx = dst; setResultFlags(dst); } ### DIV ### # (1) DIV.B #imm :DIV.B srcSimm8 is b1_0107=0x3e & b1_size_0=0; b2_0007=0xe1; srcSimm8 { d:2 = sext(srcSimm8); q:2 = R0 s/ d; r:2 = R0 s% d; # remainder has same sign as R0 (dividend) R0L = q:1; R0H = r:1; q = q s>> 8; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (1) DIV.W #imm :DIV.W srcSimm16 is b1_0107=0x3e & b1_size_0=1; b2_0007=0xe1; srcSimm16 { d:4 = sext(srcSimm16); q:4 = R2R0 s/ d; r:4 = R2R0 s% d; # remainder has same sign as R0 (dividend) R0 = q:2; R2 = r:2; q = q s>> 16; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (2) DIV.B src :DIV.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xd) ... & dst4B { d:2 = sext(dst4B); q:2 = R0 s/ d; r:2 = R0 s% d; # remainder has same sign as R0 (dividend) R0L = q:1; R0H = r:1; q = q s>> 8; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (2) DIV.W src :DIV.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0xd) ... & dst4W { d:4 = sext(dst4W); q:4 = R2R0 s/ d; r:4 = R2R0 s% d; # remainder has same sign as R0 (dividend) R0 = q:2; R2 = r:2; q = q s>> 16; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } ### DIVU ### # (1) DIVU.B #imm :DIVU.B srcImm8 is b1_0107=0x3e & b1_size_0=0; b2_0007=0xe0; srcImm8 { d:2 = zext(srcImm8); q:2 = R0 / d; r:2 = R0 % d; R0L = q:1; R0H = r:1; q = q s>> 8; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (1) DIVU.W #imm :DIVU.W srcImm16 is b1_0107=0x3e & b1_size_0=1; b2_0007=0xe0; srcImm16 { d:4 = zext(srcImm16); q:4 = R2R0 / d; r:4 = R2R0 % d; R0 = q:2; R2 = r:2; q = q s>> 16; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (2) DIVU.B src :DIVU.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xc) ... & dst4B { d:2 = zext(dst4B); q:2 = R0 / d; r:2 = R0 % d; R0L = q:1; R0H = r:1; q = q s>> 8; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (2) DIVU.W src :DIVU.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0xc) ... & dst4W { d:4 = zext(dst4W); q:4 = R2R0 / d; r:4 = R2R0 % d; R0 = q:2; R2 = r:2; q = q s>> 16; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } ### DIVX ### # (1) DIVX.B #imm :DIVX.B srcSimm8 is b1_0107=0x3e & b1_size_0=0; b2_0007=0xe3; srcSimm8 { d:2 = sext(srcSimm8); q:2 = R0 s/ d; r:2 = R0 s% d; #according to the manual the remainder has the same sign as the quotient differ:1 = (r s< 0) != (d s< 0); r = (zext(differ) * (-r)) + (zext(!differ) * r); R0L = q:1; R0H = r:1; q = q s>> 8; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (1) DIVX.W #imm :DIVX.W srcSimm16 is b1_0107=0x3e & b1_size_0=1; b2_0007=0xe3; srcSimm16 { d:4 = sext(srcSimm16); q:4 = R2R0 s/ d; r:4 = R2R0 s% d; #according to the manual the remainder has the same sign as the quotient differ:1 = (r s< 0) != (d s< 0); r = (zext(differ) * (-r)) + (zext(!differ) * r); R0 = q:2; R2 = r:2; q = q s>> 16; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (2) DIVX.B src :DIVX.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0x9) ... & dst4B { d:2 = sext(dst4B); q:2 = R0 s/ d; r:2 = R0 s% d; #according to the manual the remainder has the same sign as the quotient differ:1 = (r s< 0) != (d s< 0); r = (zext(differ) * (-r)) + (zext(!differ) * r); R0L = q:1; R0H = r:1; q = q s>> 8; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (2) DIVX.W src :DIVX.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0x9) ... & dst4W { d:4 = sext(dst4W); q:4 = R2R0 s/ d; r:4 = R2R0 s% d; #according to the manual the remainder has the same sign as the quotient differ:1 = (r s< 0) != (d s< 0); r = (zext(differ) * (-r)) + (zext(!differ) * r); R0 = q:2; R2 = r:2; q = q s>> 16; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } ### DSBB ### # (1) DSBB.B #imm8, R0L :DSBB.B srcImm8, R0L is R0L & b1_0007=0x7c; b2_0007=0xef; srcImm8 { src:2 = zext(srcImm8); dst:2 = zext(R0L); c:1 = $(CARRY); $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); tmp:2 = DecimalSubtractWithBorrow(dst, src); R0L = tmp:1; setResultFlags(tmp:1); } # (2) DSBB.W #imm16, R0 :DSBB.W srcImm16, R0 is R0 & b1_0007=0x7d; b2_0007=0xef; srcImm16 { src:4 = zext(srcImm16); dst:4 = zext(R0); c:1 = $(CARRY); $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); tmp:4 = DecimalSubtractWithBorrow(dst, src); R0 = tmp:2; setResultFlags(tmp:2); } # (3) DSBB.B R0H, R0L :DSBB.B R0H, R0L is R0H & R0L & b1_0007=0x7c; b2_0007=0xe7 { src:2 = zext(R0H); dst:2 = zext(R0L); c:1 = $(CARRY); $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); tmp:2 = DecimalSubtractWithBorrow(dst, src); R0L = tmp:1; setResultFlags(tmp:1); } # (4) DSBB.W R1, R0 :DSBB.W R1, R0 is R0 & R1 & b1_0007=0x7d; b2_0007=0xe7 { src:4 = zext(R1); dst:4 = zext(R0); c:1 = $(CARRY); $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); tmp:4 = DecimalSubtractWithBorrow(dst, src); R0 = tmp:2; setResultFlags(tmp:2); } ### DSUB ### # (1) DSUB.B #imm8, R0L :DSUB.B srcImm8, R0L is R0L & b1_0007=0x7c; b2_0007=0xed; srcImm8 { src:2 = zext(srcImm8); dst:2 = zext(R0L); $(CARRY) = (dst >= src); tmp:2 = DecimalSubtract(dst, src); R0L = tmp:1; setResultFlags(tmp:1); } # (2) DSUB.W #imm16, R0 :DSUB.W srcImm16, R0 is R0 & b1_0007=0x7d; b2_0007=0xed; srcImm16 { src:4 = zext(srcImm16); dst:4 = zext(R0); $(CARRY) = (dst >= src); tmp:4 = DecimalSubtract(dst, src); R0 = tmp:2; setResultFlags(tmp:2); } # (3) DSUB.B R0H, R0L :DSUB.B R0H, R0L is R0H & R0L & b1_0007=0x7c; b2_0007=0xe5 { src:2 = zext(R0H); dst:2 = zext(R0L); $(CARRY) = (dst >= src); tmp:2 = DecimalSubtract(dst, src); R0L = tmp:1; setResultFlags(tmp:1); } # (4) DSUB.W R1, R0 :DSUB.W R1, R0 is R0 & R1 & b1_0007=0x7d; b2_0007=0xe5 { src:4 = zext(R1); dst:4 = zext(R0); $(CARRY) = (dst >= src); tmp:4 = DecimalSubtract(dst, src); R0 = tmp:2; setResultFlags(tmp:2); } ### ENTER ### :ENTER srcImm8 is b1_0007=0x7c; b2_0007=0xf2; srcImm8 { push2(FB); FB = SP; SP = SP - zext(srcImm8); } ### EXITD ### :EXITD is b1_0007=0x7d; b2_0007=0xf2 { SP = FB; pop2(FB); pc:3 = 0; pop3(pc); return [pc]; } ### EXTS ### # (1) EXTS.B dst :EXTS.B dst4B is (b1_0007=0x7c; b2_0407=0x6) ... & dst4B & dst4W { tmp:2 = sext(dst4B); dst4W = tmp; setResultFlags(tmp); } # (1) EXTS.B Ax :EXTS.B dst4Ax is (b1_0007=0x7c; b2_0407=0x6) & $(DST4AX) { tmp:2 = sext(dst4Ax:1); dst4Ax = tmp; setResultFlags(tmp); } # (2) EXTS.W R0 :EXTS.W R0 is R0 & b1_0007=0x7c; b2_0007=0xf3 { tmp:4 = sext(R0); R2R0 = tmp; setResultFlags(tmp); } ### FCLR ### :FCLR flagBit is b1_0007=0xeb; b2_0707=0 & flagBit & b2_0003=0x5 { mask:2 = ~(1 << flagBit); FLG = FLG & mask; } ### FSET ### :FSET flagBit is b1_0007=0xeb; b2_0707=0 & flagBit & b2_0003=0x4 { mask:2 = (1 << flagBit); FLG = FLG | mask; } ### INC ### # (1) INC.B dst :INC.B dst3B is b1_0307=0x14 ... & $(DST3B) { tmp:1 = dst3B + 1; dst3B = tmp; setResultFlags(tmp); } # (2) INC.W dst :INC.W b1_3_regAx is b1_0407=0xb & b1_0002=0x2 & b1_3_regAx { tmp:2 = b1_3_regAx + 1; b1_3_regAx = tmp; setResultFlags(tmp); } ### INT ### :INT srcIntNum is b1_0007=0xeb; imm8_0607=3 & srcIntNum { push1(FLG:1); next:3 = inst_next; push3(next); ptr3:3 = (INTB + (zext(srcIntNum) * 0x4)); pc:3 = *:3 ptr3; $(STACK_SEL) = ((srcIntNum > 0x1f) * $(STACK_SEL)); $(INTERRUPT) = 0x0; $(DEBUG) = 0x0; call [pc]; } ##### INTO ##### :INTO is b1_0007=0xf6 { if ($(OVERFLOW) == 0) goto inst_next; push1(FLG:1); next:3 = inst_next; push3(next); $(STACK_SEL) = 0; $(INTERRUPT) = 0x0; $(DEBUG) = 0x0; call 0x0fffe0; } ### JCnd ### # (1) JCnd3 dsp8 :J^b1cnd3 rel8offset1 is b1_0307=0x0d & b1cnd3; rel8offset1 { if (b1cnd3) goto rel8offset1; } # (2) JCnd4 dsp8 :J^b2cnd3 rel8offset2 is b1_0007=0x7d; b2_0407=0xc & b2_0303=1 & b2cnd3; rel8offset2 { if (b2cnd3) goto rel8offset2; } ### JMP ### # (1) JMP.S dsp3 :JMP.S rel3offset2 is b1_0307=0x0c & rel3offset2 { goto rel3offset2; } # (2) JMP.B dsp8 :JMP.B rel8offset1 is b1_0007=0xfe; rel8offset1 { goto rel8offset1; } # (3) JMP.W dsp16 :JMP.W rel16offset1 is b1_0007=0xf4; rel16offset1 { goto rel16offset1; } # (4) JMP.A abs20 :JMP.A abs20offset is b1_0007=0xfc; abs20offset { goto abs20offset; } ### JMPI ### # JMPI.W dst :JMPI.W reloffset_dst4W is (b1_0007=0x7d; b2_0407=0x2) ... & reloffset_dst4W { goto reloffset_dst4W; } # JMPI.A dst (dst=register) :JMPI.A reloffset_dst4L is (b1_0007=0x7d; b2_0407=0x0) ... & reloffset_dst4L { goto reloffset_dst4L; } # JMPI.A dst (dst=memory) :JMPI.A reloffset_dst4T is (b1_0007=0x7d; b2_0407=0x0) ... & reloffset_dst4T { goto reloffset_dst4T; } ### JMPS ### :JMPS srcImm8 is b1_0007=0xee; srcImm8 { # 18 <= srcImm8 <= 255 (range restriction not enforced by pattern match) ptr:3 = 0x0ffffe - (zext(srcImm8) << 1); pc:3 = 0x0f0000 | zext(*:2 ptr); goto [pc]; } ### JSR ### :JSR.W rel16offset1 is b1_0007=0xf5; rel16offset1 { next:3 = inst_next; push3(next); call rel16offset1; } :JSR.A abs20offset is b1_0007=0xfd; abs20offset { next:3 = inst_next; push3(next); call abs20offset; } ### JSRI ### # JSRI.W dst :JSRI.W reloffset_dst4W is (b1_0007=0x7d; b2_0407=0x3) ... & reloffset_dst4W { next:3 = inst_next; push3(next); call reloffset_dst4W; } # JSRI.A dst (dst=register) :JSRI.A dst4L is (b1_0007=0x7d; b2_0407=0x1) ... & dst4L { next:3 = inst_next; push3(next); pc:3 = dst4L:3; call [pc]; } # JSRI.A dst (dst=memory) :JSRI.A dst4T is (b1_0007=0x7d; b2_0407=0x1) ... & $(DST4T) { next:3 = inst_next; push3(next); pc:3 = dst4T; call [pc]; } ### JSRS ### :JSRS srcImm8 is b1_0007=0xef; srcImm8 { # 18 <= srcImm8 <= 255 (range restriction not enforced by pattern match) next:3 = inst_next; push3(next); ptr:3 = 0x0ffffe - (zext(srcImm8) << 1); pc:3 = 0x0f0000 | zext(*:2 ptr); call [pc]; } ### LDC ### :LDC srcImm16, b2_creg16 is b1_0007=0xeb; b2_0707=0 & b2_creg16 & b2_0003=0x0; srcImm16 { b2_creg16 = srcImm16; } :LDC dst4W, b2_creg16 is (b1_0007=0x7a; b2_0707=1 & b2_creg16) ... & dst4W { b2_creg16 = dst4W; } ### LDCTX ### :LDCTX abs16offset, abs20offset is b1_0007=0x7c; b2_0007=0xf0; abs16offset; imm20_dat & abs20offset { taskNum:1 = abs16offset; # load task number stored at abs16 ptr:3 = imm20_dat + (zext(taskNum) * 2); # compute table entry address relative to abs20 regInfo:1 = *:1 ptr; ptr = ptr + 1; spCorrect:1 = *:1 ptr; ptr = zext(SP); if ((regInfo & 1) == 0) goto ; R0 = *:2 ptr; ptr = ptr + 2; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; R1 = *:2 ptr; ptr = ptr + 2; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; R2 = *:2 ptr; ptr = ptr + 2; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; R3 = *:2 ptr; ptr = ptr + 2; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; A0 = *:2 ptr; ptr = ptr + 2; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; A1 = *:2 ptr; ptr = ptr + 2; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; SB = *:2 ptr; ptr = ptr + 2; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; FB = *:2 ptr; ptr = ptr + 2; SP = SP + zext(spCorrect); } ### LDE ### # (1) LDE.B abs20, dst :LDE.B abs20offset, dst4B is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0x8) ... & dst4B); abs20offset { tmp:1 = abs20offset; dst4B = tmp; setResultFlags(tmp); } # (1) LDE.B abs20, Ax :LDE.B abs20offset, dst4Ax is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0x8) & $(DST4AX)); abs20offset { tmp:1 = abs20offset; dst4Ax = zext(tmp); setResultFlags(tmp); } # (1) LDE.W abs20, dst :LDE.W abs20offsetW, dst4W is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0x8) ... & dst4W); abs20offsetW { tmp:2 = abs20offsetW; dst4W = tmp; setResultFlags(tmp); } # (2) LDE.B dsp20, dst :LDE.B dsp20A0B, dst4B is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0x9) ... & dst4B); dsp20A0B { tmp:1 = dsp20A0B; dst4B = tmp; setResultFlags(tmp); } # (2) LDE.B dsp20, Ax :LDE.B dsp20A0B, dst4Ax is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0x9) & $(DST4AX)); dsp20A0B { tmp:1 = dsp20A0B; dst4Ax = zext(tmp); setResultFlags(tmp); } # (2) LDE.W dsp20, dst :LDE.W dsp20A0W, dst4W is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0x9) ... & dst4W); dsp20A0W { tmp:2 = dsp20A0W; dst4W = tmp; setResultFlags(tmp); } # (3) LDE.B [A1A0], dst :LDE.B [A1A0], dst4B is (A1A0 & b1_0107=0x3a & b1_size_0=0; b2_0407=0xa) ... & dst4B { ptr:3 = A1A0:3; tmp:1 = *:1 ptr; dst4B = tmp; setResultFlags(tmp); } # (3) LDE.B [A1A0], Ax :LDE.B [A1A0], dst4Ax is (A1A0 & b1_0107=0x3a & b1_size_0=0; b2_0407=0xa) & $(DST4AX) { ptr:3 = A1A0:3; tmp:1 = *:1 ptr; dst4Ax = zext(tmp); setResultFlags(tmp); } # (3) LDE.W [A1A0], dst :LDE.W [A1A0], dst4W is (A1A0 & b1_0107=0x3a & b1_size_0=1; b2_0407=0xa) ... & dst4W { ptr:3 = A1A0:3; tmp:2 = *:2 ptr; dst4W = tmp; setResultFlags(tmp); } ### LDINTB ### # LDINTB operand value ldIntbVal: "#"^val is b1_0007; b2_0007; b3_0003; b4_0007; b5_0007; b6_0007; imm16_dat [ val = (b3_0003 << 16) + imm16_dat; ] { export *[const]:3 val; } # NOTE: Although this is documented as a macro for two LDE instructions, the encoding is different ?? :LDINTB ldIntbVal is (b1_0007=0xeb; b2_0007=0x20; b3_0407=0x0; b4_0007=0x0; b5_0007=0xeb; b6_0007=0x10) ... & ldIntbVal { INTB = ldIntbVal; } ### LDIPL ### :LDIPL srcImm3 is b1_0007=0x7d; b2_0307=0x14 & srcImm3 { $(IPL) = srcImm3; } ### MOV ### # (1) MOV.B:G #imm, dst :MOV^".B:G" srcImm8, dst4B is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0xc) ... & dst4B); srcImm8 { val:1 = srcImm8; dst4B = val; setResultFlags(val); } # (1) MOV.B:G #imm, Ax :MOV^".B:G" srcImm8, dst4Ax is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0xc) & $(DST4AX)); srcImm8 { val:1 = srcImm8; dst4Ax = zext(val); setResultFlags(val); } # (1) MOV.W:G #imm, dst :MOV^".W:G" srcImm16, dst4W is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0xc) ... & dst4W); srcImm16 { val:2 = srcImm16; dst4W = val; setResultFlags(val); } # (2) MOV.B:Q #simm4, dst :MOV^".B:Q" srcSimm4_0407, dst4B is (b1_0107=0x6c & b1_size_0=0; srcSimm4_0407) ... & dst4B { val:1 = srcSimm4_0407; dst4B = val; setResultFlags(val); } # (2) MOV.B:Q #simm4, Ax :MOV^".B:Q" srcSimm4_0407, dst4Ax is (b1_0107=0x6c & b1_size_0=0; srcSimm4_0407) & $(DST4AX) { val:1 = srcSimm4_0407; dst4Ax = zext(val); setResultFlags(val); } # (2) MOV.W:Q #simm4, dst :MOV^".W:Q" srcSimm4_0407, dst4W is (b1_0107=0x6c & b1_size_0=1; srcSimm4_0407) ... & dst4W { val:2 = sext(srcSimm4_0407); dst4W = val; setResultFlags(val); } # (3) MOV.B:S #imm, dst :MOV^".B:S" srcImm8, dst3B_afterDsp8 is (b1_0307=0x18; srcImm8) ... & $(DST3B_AFTER_DSP8) { val:1 = srcImm8; dst3B_afterDsp8 = val; setResultFlags(val); } # (4) MOV.B:S #imm, dst :MOV^".B:S" srcImm8, b1_3_regAx is b1_0407=0xe & b1_3_regAx & b1_0002=0x2; srcImm8 { val:1 = srcImm8; b1_3_regAx = zext(val); setResultFlags(val); } # (4) MOV.W:S #imm, Ax :MOV^".W:S" srcImm16, b1_3_regAx is b1_0407=0xa & b1_3_regAx & b1_0002=0x2; srcImm16 { val:2 = srcImm16; b1_3_regAx = val; setResultFlags(val); } # (5) MOV.B:Z #0, dst :MOV^".B:Z" srcZero8, dst3B is (srcZero8 & b1_0307=0x16) ... & $(DST3B) { dst3B = 0; $(SIGN) = 0; $(ZERO) = 1; } # (6) MOV.B:G src, dst :MOV^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x39 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { val:1 = src4B; dst4B_afterSrc4 = val; setResultFlags(val); } # (6) MOV.B:G src, Ax :MOV^".B:G" src4B, dst4Ax is (b1_0107=0x39 & b1_size_0=0) ... & src4B & $(DST4AX) ... { val:1 = src4B; dst4Ax = zext(val); setResultFlags(val); } # (6) MOV.W:G src, dst :MOV^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x39 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { val:2 = src4W; dst4W_afterSrc4 = val; setResultFlags(val); } # (7) MOV.B:S src, Ax :MOV^".B:S" dst2B, b1_2_regAx is (b1_0307=0x06 & b1_2_regAx) ... & dst2B { val:1 = dst2B; b1_2_regAx = zext(val); setResultFlags(val); } # (8) MOV.B:S R0H/R0L, dst # TODO: Is it really necessary to exclude R0H/R0L as valid destination ?? :MOV^".B:S" b1_2_reg8, dst2B is (b1_0307=0x0 & b1_2_reg8) ... & dst2B { val:1 = b1_2_reg8; dst2B = val; setResultFlags(val); } # (9) MOV.B:S src, R0H/R0L :MOV^".B:S" dst2B, b1_2_reg8 is (b1_0307=0x1 & b1_2_reg8) ... & dst2B { val:1 = dst2B; b1_2_reg8 = val; setResultFlags(val); } # (10) MOV.B:G dsp:8[SP], dst :MOV^".B:G" dsp8spB, dst4B is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0xb) ... & dst4B); dsp8spB { val:1 = dsp8spB; dst4B = val; setResultFlags(val); } # (10) MOV.B:G dsp:8[SP], Ax :MOV^".B:G" dsp8spB, dst4Ax is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0xb) & $(DST4AX)); dsp8spB { val:1 = dsp8spB; dst4Ax = zext(val); setResultFlags(val); } # (10) MOV.W:G dsp:8[SP], dst :MOV^".W:G" dsp8spW, dst4W is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0xb) ... & dst4W); dsp8spW { val:2 = dsp8spW; dst4W = val; setResultFlags(val); } # (11) MOV.B:G src, dsp:8[SP] :MOV^".B:G" dst4B, dsp8spB is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0x3) ... & dst4B); dsp8spB { val:1 = dst4B; dsp8spB = val; setResultFlags(val); } # (11) MOV.W:G src, dsp:8[SP] :MOV^".W:G" dst4W, dsp8spW is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0x3) ... & dst4W); dsp8spW { val:2 = dst4W; dsp8spW = val; setResultFlags(val); } ### MOVA ### :MOVA dst4A, b2_reg16 is (b1_0007=0xeb; b2_0707=0 & b2_reg16) ... & $(DST4A) { b2_reg16 = dst4A:2; } ### MOVDir ### # TODO: dst4B=Ax/R0L cases will parse but are not valid # (1) MOVDir R0L, dst :MOVLL R0L, dst4B is (R0L & b1_0007=0x7c; b2_0407=0x8) ... & dst4B { dst4B = (R0L & 0x0f) | (dst4B & 0xf0); } :MOVHL R0L, dst4B is (R0L & b1_0007=0x7c; b2_0407=0x9) ... & dst4B { dst4B = ((R0L & 0xf0) >> 4) | (dst4B & 0xf0); } :MOVLH R0L, dst4B is (R0L & b1_0007=0x7c; b2_0407=0xa) ... & dst4B { dst4B = ((R0L & 0x0f) << 4) | (dst4B & 0x0f); } :MOVHH R0L, dst4B is (R0L & b1_0007=0x7c; b2_0407=0xb) ... & dst4B { dst4B = (R0L & 0xf0) | (dst4B & 0x0f); } # (1) MOVDir dst, R0L :MOVLL dst4B, R0L is (R0L & b1_0007=0x7c; b2_0407=0x0) ... & dst4B { R0L = (dst4B & 0x0f) | (R0L & 0xf0); } :MOVHL dst4B, R0L is (R0L & b1_0007=0x7c; b2_0407=0x1) ... & dst4B { R0L = ((dst4B & 0xf0) >> 4) | (R0L & 0xf0); } :MOVLH dst4B, R0L is (R0L & b1_0007=0x7c; b2_0407=0x2) ... & dst4B { R0L = ((dst4B & 0x0f) << 4) | (R0L & 0x0f); } :MOVHH dst4B, R0L is (R0L & b1_0007=0x7c; b2_0407=0x3) ... & dst4B { R0L = (dst4B & 0xf0) | (R0L & 0x0f); } ### MUL ### # TODO: Illegal MUL destination cases will parse but are not valid (e.g., R0H, R2, R1H, R3) # (1) MUL.B #imm, dst :MUL.B srcSimm8, dst4B is ((b1_0107=0x3e & b1_size_0=0; b2_0407=0x5) ... & dst4B & dst4W); srcSimm8 { dst4W = sext(srcSimm8) * sext(dst4B); } # (1) MUL.W #imm, dst :MUL.W srcSimm16, dst4W is ((b1_0107=0x3e & b1_size_0=1; b2_0407=0x5) ... & dst4W & dst4L); srcSimm16 { dst4L = sext(srcSimm16) * sext(dst4W); } # (2) MUL.B src, dst :MUL.B src4B, dst4B_afterSrc4 is (b1_0107=0x3c & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... & dst4W_afterSrc4 ... { dst4W_afterSrc4 = sext(src4B) * sext(dst4B_afterSrc4); } # (2) MUL.W src, dst :MUL.W src4W, dst4W_afterSrc4 is (b1_0107=0x3c & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... & dst4L_afterSrc4 ... { dst4L_afterSrc4 = sext(src4W) * sext(dst4W_afterSrc4); } ### MULU ### # TODO: Illegal MULU destination cases will parse but are not valid (e.g., R0H, R2, R1H, R3) # (1) MULU.B #imm, dst :MULU.B srcImm8, dst4B is ((b1_0107=0x3e & b1_size_0=0; b2_0407=0x4) ... & dst4B & dst4W); srcImm8 { dst4W = zext(srcImm8) * zext(dst4B); } # (1) MULU.W #imm, dst :MULU.W srcImm16, dst4W is ((b1_0107=0x3e & b1_size_0=1; b2_0407=0x4) ... & dst4W & dst4L); srcImm16 { dst4L = zext(srcImm16) * zext(dst4W); } # (2) MULU.B src, dst :MULU.B src4B, dst4B_afterSrc4 is (b1_0107=0x38 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... & dst4W_afterSrc4 ... { dst4W_afterSrc4 = zext(src4B) * zext(dst4B_afterSrc4); } # (2) MULU.W src, dst :MULU.W src4W, dst4W_afterSrc4 is (b1_0107=0x38 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... & dst4L_afterSrc4 ... { dst4L_afterSrc4 = zext(src4W) * zext(dst4W_afterSrc4); } ### NEG ### # (1) NEG.B dst :NEG.B dst4B is (b1_0107=0x3a & b1_size_0=0; b2_0407=0x5) ... & dst4B { tmp:1 = dst4B; setSubtractFlags(0:1, tmp); tmp = -tmp; dst4B = tmp; setResultFlags(tmp); } # (1) NEG.W dst :NEG.W dst4W is (b1_0107=0x3a & b1_size_0=1; b2_0407=0x5) ... & dst4W { tmp:2 = dst4W; setSubtractFlags(0:2, tmp); tmp = -tmp; dst4W = tmp; setResultFlags(tmp); } ### NOP ### :NOP is b1_0007=0x04 { } ### NOT ### # (1) NOT.B dst :NOT.B dst4B is (b1_0107=0x3a & b1_size_0=0; b2_0407=0x7) ... & dst4B { tmp:1 = ~dst4B; dst4B = tmp; setResultFlags(tmp); } # (1) NOT.W dst :NOT.W dst4W is (b1_0107=0x3a & b1_size_0=1; b2_0407=0x7) ... & dst4W { tmp:2 = ~dst4W; dst4W = tmp; setResultFlags(tmp); } # (2) NOT.B:S dst :NOT^".B:S" dst3B is (b1_0307=0x17) ... & $(DST3B) { tmp:1 = ~dst3B; dst3B = tmp; setResultFlags(tmp); } ### OR ### # (1) OR.B:G #imm, dst :OR^".B:G" srcImm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x3) ... & dst4B); srcImm8 { tmp:1 = dst4B | srcImm8; dst4B = tmp; setResultFlags(tmp); } # (1) OR.B:G #imm, Ax :OR^".B:G" srcImm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x3) & $(DST4AX)); srcImm8 { tmp:1 = dst4Ax:1 | srcImm8; dst4Ax = zext(tmp); setResultFlags(tmp); } # (1) OR.W:G #imm, dst :OR^".W:G" srcImm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x3) ... & dst4W); srcImm16 { tmp:2 = dst4W | srcImm16; dst4W = tmp; setResultFlags(tmp); } # (2) OR.B:S #imm, dst :OR^".B:S" srcImm8, dst3B_afterDsp8 is (b1_0307=0x13; srcImm8) ... & $(DST3B_AFTER_DSP8) { tmp:1 = dst3B_afterDsp8 | srcImm8; dst3B_afterDsp8 = tmp; setResultFlags(tmp); } # (3) OR.B:G src, dst :OR^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x4c & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { tmp:1 = dst4B_afterSrc4 | src4B; dst4B_afterSrc4 = tmp; setResultFlags(tmp); } # (3) OR.B:G src, Ax :OR^".B:G" src4B, dst4Ax is (b1_0107=0x4c & b1_size_0=0) ... & src4B & $(DST4AX) ... { tmp:1 = dst4Ax:1 | src4B; dst4Ax = zext(tmp); setResultFlags(tmp); } # (3) OR.W:G src, dst :OR^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x4c & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { tmp:2 = dst4W_afterSrc4 | src4W; dst4W_afterSrc4 = tmp; setResultFlags(tmp); } # (4) OR.B:S src, R0L/R0H :OR^".B:S" dst2B, b1_2_reg8 is (b1_0307=0x3 & b1_2_reg8) ... & dst2B { tmp:1 = dst2B | b1_2_reg8; b1_2_reg8 = tmp; setResultFlags(tmp); } ### POP ### # (1) POP.B:G dst :POP^".B:G" dst4B is (b1_0107=0x3a & b1_size_0=0; b2_0407=0xd) ... & dst4B { pop1(dst4B); } # (1) POP.B:G Ax :POP^".B:G" dst4Ax is (b1_0107=0x3a & b1_size_0=0; b2_0407=0xd) & $(DST4AX) { val:1 = 0; pop1(val); dst4Ax = zext(val); } # (1) POP.W:G dst :POP^".W:G" dst4W is (b1_0107=0x3a & b1_size_0=1; b2_0407=0xd) ... & dst4W { pop2(dst4W); } # (2) POP.B:S R0L/R0H :POP^".B:S" b1_3_reg8 is b1_0407=0x9 & b1_3_reg8 & b1_0002=0x2 { pop1(b1_3_reg8); } # (3) POP.W:S Ax :POP^".W:S" b1_3_regAx is b1_0407=0xd & b1_3_regAx & b1_0002=0x2 { pop2(b1_3_regAx); } ### POPC ### :POPC b2_creg16 is b1_0007=0xeb; b2_0707=0 & b2_creg16 & b2_0003=0x3 { pop2(b2_creg16); } ### POPM ### popRegFB: FB is regBit7=1 & FB { pop2(FB); } popRegFB: is regBit7=0 { } popRegSB: SB popRegFB is regBit6=1 & popRegFB & SB { pop2(SB); build popRegFB; } popRegSB: popRegFB is popRegFB { build popRegFB; } popRegA1: A1 popRegSB is regBit5=1 & popRegSB & A1 { pop2(A1); build popRegSB; } popRegA1: popRegSB is popRegSB { build popRegSB; } popRegA0: A0 popRegA1 is regBit4=1 & popRegA1 & A0 { pop2(A0); build popRegA1; } popRegA0: popRegA1 is popRegA1 { build popRegA1; } popRegR3: R3 popRegA0 is regBit3=1 & popRegA0 & R3 { pop2(R3); build popRegA0; } popRegR3: popRegA0 is popRegA0 { build popRegA0; } popRegR2: R2 popRegR3 is regBit2=1 & popRegR3 & R2 { pop2(R2); build popRegR3; } popRegR2: popRegR3 is popRegR3 { build popRegR3; } popRegR1: R1 popRegR2 is regBit1=1 & popRegR2 & R1 { pop2(R1); build popRegR2; } popRegR1: popRegR2 is popRegR2 { build popRegR2; } popRegR0: R0 popRegR1 is regBit0=1 & popRegR1 & R0 { pop2(R0); build popRegR1; } popRegR0: popRegR1 is popRegR1 { build popRegR1; } popRegList: "( "^popRegR0^")" is popRegR0 { build popRegR0; } :POPM popRegList is b1_0007=0xed; popRegList { build popRegList; } ### PUSH ### # (1) PUSH.B:G #imm :PUSH^".B:G" srcImm8 is b1_0107=0x3e & b1_size_0=0; b2_0007=0xe2; srcImm8 { push1(srcImm8); } # (1) PUSH.W:G #imm :PUSH^".W:G" srcImm16 is b1_0107=0x3e & b1_size_0=1; b2_0007=0xe2; srcImm16 { push2(srcImm16); } # (2) PUSH.B:G src :PUSH^".B:G" dst4B is (b1_0107=0x3a & b1_size_0=0; b2_0407=0x4) ... & dst4B { push1(dst4B); } # (2) PUSH.W:G src :PUSH^".W:G" dst4W is (b1_0107=0x3a & b1_size_0=1; b2_0407=0x4) ... & dst4W { push2(dst4W); } # (3) PUSH.B:S R0H/R0L :PUSH^".B:S" b1_3_reg8 is b1_0407=0x8 & b1_3_reg8 & b1_0002=0x2 { push1(b1_3_reg8); } # (4) PUSH.W:S Ax :PUSH^".W:S" b1_3_regAx is b1_0407=0xc & b1_3_regAx & b1_0002=0x2 { push2(b1_3_regAx); } ### PUSHA ### :PUSHA dst4A is (b1_0007=0x7d; b2_0407=0x9) ... & $(DST4A) { push2(dst4A:2); } ### PUSHC ### :PUSHC b2_creg16 is b1_0007=0xeb; b2_0707=0 & b2_creg16 & b2_0003=0x2 { push2(b2_creg16); } ### PUSHM ### pushRegR0: R0 is regBit7=1 & R0 { push2(R0); } pushRegR0: is regBit7=0 { } pushRegR1: pushRegR0 R1 is regBit6=1 & pushRegR0 & R1 { push2(R1); build pushRegR0; } pushRegR1: pushRegR0 is pushRegR0 { build pushRegR0; } pushRegR2: pushRegR1 R2 is regBit5=1 & pushRegR1 & R2 { push2(R2); build pushRegR1; } pushRegR2: pushRegR1 is pushRegR1 { build pushRegR1; } pushRegR3: pushRegR2 R3 is regBit4=1 & pushRegR2 & R3 { push2(R3); build pushRegR2; } pushRegR3: pushRegR2 is pushRegR2 { build pushRegR2; } pushRegA0: pushRegR3 A0 is regBit3=1 & pushRegR3 & A0 { push3(A0); build pushRegR3; } pushRegA0: pushRegR3 is pushRegR3 { build pushRegR3; } pushRegA1: pushRegA0 A1 is regBit2=1 & pushRegA0 & A1 { push3(A1); build pushRegA0; } pushRegA1: pushRegA0 is pushRegA0 { build pushRegA0; } pushRegSB: pushRegA1 SB is regBit1=1 & pushRegA1 & SB { push3(SB); build pushRegA1; } pushRegSB: pushRegA1 is pushRegA1 { build pushRegA1; } pushRegFB: pushRegSB FB is regBit0=1 & pushRegSB & FB { push3(FB); build pushRegSB; } pushRegFB: pushRegSB is pushRegSB { build pushRegSB; } pushRegList: "("^pushRegFB^" )" is pushRegFB { build pushRegFB; } :PUSHM pushRegList is b1_0007=0xec; pushRegList { build pushRegList; } ### REIT ### :REIT is b1_0007=0xfb { pc:3 = 0; pop3(pc); f:1 = 0; pop1(f); FLG = zext(f); # TODO: Not sure what state upper FLG bits should be in ?? return [pc]; } ### RMPA ### :RMPA.B is b1_0107=0x3e & b1_size_0=0; b2_0007=0xf1 { if (R3 == 0) goto inst_next; ptr0:3 = zext(A0); ptr1:3 = zext(A1); a:1 = *:1 ptr0; b:1 = *:1 ptr1; A0 = A0 + 1; A1 = A1 + 1; prod:2 = sext(a) * sext(b); o:1 = scarry(R0, prod); $(OVERFLOW) = o | $(OVERFLOW); R0 = R0 + prod; R3 = R3 - 1; goto inst_start; } :RMPA.W is b1_0107=0x3e & b1_size_0=1; b2_0007=0xf1 { if (R3 == 0) goto inst_next; ptr0:3 = zext(A0); ptr1:3 = zext(A1); a:2 = *:2 ptr0; b:2 = *:2 ptr1; A0 = A0 + 2; A1 = A1 + 2; prod:4 = sext(a) * sext(b); o:1 = scarry(R2R0, prod); $(OVERFLOW) = o | $(OVERFLOW); R2R0 = R2R0 + prod; R3 = R3 - 1; goto inst_start; } ### ROLC ### :ROLC.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xa) ... & dst4B { c:1 = $(CARRY); tmp:1 = dst4B; $(CARRY) = tmp s< 0; tmp = (tmp << 1) | c; dst4B = tmp; setResultFlags(tmp); } :ROLC.B dst4Ax is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xa) & $(DST4AX) { c:1 = $(CARRY); tmp:1 = dst4Ax:1; $(CARRY) = tmp s< 0; tmp = (tmp << 1) | c; dst4Ax = zext(tmp); setResultFlags(tmp); } :ROLC.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0xa) ... & dst4W { c:2 = zext($(CARRY)); tmp:2 = dst4W; $(CARRY) = tmp s< 0; tmp = (tmp << 1) | c; dst4W = tmp; setResultFlags(tmp); } ### RORC ### :RORC.B dst4B is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xb) ... & dst4B { c:1 = $(CARRY); tmp:1 = dst4B; $(CARRY) = (tmp & 1) == 1; tmp = (tmp >> 1) | (c << 7); dst4B = tmp; setResultFlags(tmp); } :RORC.B dst4Ax is (b1_0107=0x3b & b1_size_0=0; b2_0407=0xb) & $(DST4AX) { c:1 = $(CARRY); tmp:1 = dst4Ax:1; $(CARRY) = (tmp & 1) == 1; tmp = (tmp >> 1) | (c << 7); dst4Ax = zext(tmp); setResultFlags(tmp); } :RORC.W dst4W is (b1_0107=0x3b & b1_size_0=1; b2_0407=0xb) ... & dst4W { c:2 = zext($(CARRY)); tmp:2 = dst4W; $(CARRY) = (tmp & 1) == 1; tmp = (tmp >> 1) | (c << 15); dst4W = tmp; setResultFlags(tmp); } ### ROT ### # (1) ROT.B #imm, dst (right) :ROT.B srcSimm4Shift_0407, dst4B is (b1_0107=0x70 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=1) ... & dst4B { rightShift:1 = -srcSimm4Shift_0407; tmp:1 = dst4B; $(CARRY) = (tmp >> (rightShift - 1)) & 1; tmp = (tmp >> rightShift) | (tmp << (8 - rightShift)); dst4B = tmp; setResultFlags(tmp); } # (1) ROT.B #imm, Ax (right) :ROT.B srcSimm4Shift_0407, dst4Ax is (b1_0107=0x70 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=1) & $(DST4AX) { rightShift:1 = -srcSimm4Shift_0407; tmp:1 = dst4Ax:1; $(CARRY) = (tmp >> (rightShift - 1)) & 1; tmp = (tmp >> rightShift) | (tmp << (8 - rightShift)); dst4Ax = zext(tmp); setResultFlags(tmp); } # (1) ROT.W #imm, dst (right) :ROT.W srcSimm4Shift_0407, dst4W is (b1_0107=0x70 & b1_size_0=1; srcSimm4Shift_0407 & b2_shiftSign_7=1) ... & dst4W { rightShift:1 = -srcSimm4Shift_0407; tmp:2 = dst4W; c:2 = (tmp >> (rightShift - 1)); $(CARRY) = c:1 & 1; tmp = (tmp >> rightShift) | (tmp << (16 - rightShift)); dst4W = tmp; setResultFlags(tmp); } # (1) ROT.B #imm, dst (left) :ROT.B srcSimm4Shift_0407, dst4B is (b1_0107=0x70 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=0) ... & dst4B { leftShift:1 = srcSimm4Shift_0407; tmp:1 = dst4B; $(CARRY) = (tmp >> (8 - leftShift)) & 1; tmp = (tmp << leftShift) | (tmp >> (8 - leftShift)); dst4B = tmp; setResultFlags(tmp); } # (1) ROT.B #imm, Ax (left) :ROT.B srcSimm4Shift_0407, dst4Ax is (b1_0107=0x70 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=0) & $(DST4AX) { leftShift:1 = srcSimm4Shift_0407; tmp:1 = dst4Ax:1; $(CARRY) = (tmp >> (8 - leftShift)) & 1; tmp = (tmp << leftShift) | (tmp >> (8 - leftShift)); dst4Ax = zext(tmp); setResultFlags(tmp); } # (1) ROT.W #imm, dst (left) :ROT.W srcSimm4Shift_0407, dst4W is (b1_0107=0x70 & b1_size_0=1; srcSimm4Shift_0407 & b2_shiftSign_7=0) ... & dst4W { leftShift:1 = srcSimm4Shift_0407; tmp:2 = dst4W; c:2 = (tmp >> (16 - leftShift)); $(CARRY) = c:1 & 1; tmp = (tmp << leftShift) | (tmp >> (16 - leftShift)); dst4W = tmp; setResultFlags(tmp); } # (2) ROT.B R1H, dst :ROT.B R1H, dst4B is (R1H & b1_0107=0x3a & b1_size_0=0; b2_0407=0x6) ... & dst4B { if (R1H == 0) goto inst_next; shift:1 = R1H s% 8; tmp:1 = dst4B; if (shift s>= 0) goto ; shift = -shift; $(CARRY) = (tmp >> (shift - 1)) & 1; tmp = (tmp >> shift) | (tmp << (8 - shift)); goto ; $(CARRY) = (tmp >> (8 - shift)) & 1; tmp = (tmp << shift) | (tmp >> (8 - shift)); dst4B = tmp; setResultFlags(tmp); } # (2) ROT.B R1H, Ax :ROT.B R1H, dst4Ax is (R1H & b1_0107=0x3a & b1_size_0=0; b2_0407=0x6) & $(DST4AX) { if (R1H == 0) goto inst_next; shift:1 = R1H s% 8; tmp:1 = dst4Ax:1; if (shift s>= 0) goto ; shift = -shift; $(CARRY) = (tmp >> (shift - 1)) & 1; tmp = (tmp >> shift) | (tmp << (8 - shift)); goto ; $(CARRY) = (tmp >> (8 - shift)) & 1; tmp = (tmp << shift) | (tmp >> (8 - shift)); dst4Ax = zext(tmp); setResultFlags(tmp); } # (2) ROT.W R1H, dst :ROT.W R1H, dst4W is (R1H & b1_0107=0x3a & b1_size_0=1; b2_0407=0x6) ... & dst4W { if (R1H == 0) goto inst_next; shift:1 = R1H s% 16; tmp:2 = dst4W; if (shift s>= 0) goto ; shift = -shift; c:2 = (tmp >> (shift - 1)); tmp = (tmp >> shift) | (tmp << (16 - shift)); goto ; c = (tmp >> (16 - shift)); tmp = (tmp << shift) | (tmp >> (16 - shift)); $(CARRY) = c:1 & 1; dst4W = tmp; setResultFlags(tmp); } ### RTS ### :RTS is b1_0007=0xf3 { pc:3 = 0; pop3(pc); return [pc]; } ### SBB ### # (1) SBB.B #imm, dst :SBB.B srcSimm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x7) ... & dst4B); srcSimm8 { tmp:1 = dst4B; c:1 = $(CARRY); setSubtract3Flags(tmp, srcSimm8, c); tmp = tmp - srcSimm8 - c; dst4B = tmp; setResultFlags(tmp); } # (1) SBB.B #imm, Ax :SBB.B srcSimm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x7) & $(DST4AX)); srcSimm8 { tmp:1 = dst4Ax:1; c:1 = $(CARRY); setSubtract3Flags(tmp, srcSimm8, c); tmp = tmp - srcSimm8 - c; dst4Ax = zext(tmp); setResultFlags(tmp); } # (1) SBB.W #imm, dst :SBB.W srcSimm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x7) ... & dst4W); srcSimm16 { tmp:2 = dst4W; c:2 = zext($(CARRY)); setSubtract3Flags(tmp, srcSimm16, c); tmp = tmp - srcSimm16 - c; dst4W = tmp; setResultFlags(tmp); } # (2) SBB.B src, dst :SBB.B src4B, dst4B_afterSrc4 is (b1_0107=0x5c & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { tmp:1 = dst4B_afterSrc4; s:1 = src4B; c:1 = $(CARRY); setSubtract3Flags(tmp, s, c); tmp = tmp - s - c; dst4B_afterSrc4 = tmp; setResultFlags(tmp); } # (2) SBB.B src, Ax :SBB.B src4B, dst4Ax is (b1_0107=0x5c & b1_size_0=0) ... & src4B & $(DST4AX) ... { tmp:1 = dst4Ax:1; s:1 = src4B; c:1 = $(CARRY); setSubtract3Flags(tmp, s, c); tmp = tmp - s - c; dst4Ax = zext(tmp); setResultFlags(tmp); } # (2) SBB.W src, dst :SBB.W src4W, dst4W_afterSrc4 is (b1_0107=0x5c & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { tmp:2 = dst4W_afterSrc4; s:2 = src4W; c:2 = zext($(CARRY)); setSubtract3Flags(tmp, s, c); tmp = tmp - s - c; dst4W_afterSrc4 = tmp; setResultFlags(tmp); } ##### SBJNZ - PSUEDO-OP! SAME AS ADJNZ ##### ### SHA ### macro SHAsetShiftRightFlags(val,shift,result) { local c = (val >> (shift - 1)) & 1; $(CARRY) = c:1; local mask = ~(-(1 << shift)); allOnes:1 = (mask & val) == mask; allZeros:1 = (mask & val) == 0; $(OVERFLOW) = (result s< 0 && allOnes) || (result s>= 0 && allZeros); setResultFlags(result); } macro SHAsetShiftLeftFlags(val,shift,result,sze) { local c = (val >> (sze - shift)) & 1; $(CARRY) = c:1; local mask = -(1 << shift); allOnes:1 = (mask & val) == mask; allZeros:1 = (mask & val) == 0; $(OVERFLOW) = (result s< 0 && allOnes) || (result s>= 0 && allZeros); setResultFlags(result); } # (1) SHA.B #imm4, dst (right) :SHA.B srcSimm4Shift_0407, dst4B is (b1_0107=0x78 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=1) ... & dst4B { val:1 = dst4B; shift:1 = -srcSimm4Shift_0407; tmp:1 = val s>> shift; dst4B = tmp; SHAsetShiftRightFlags(val, shift, tmp); } # (1) SHA.B #imm4, Ax (right) :SHA.B srcSimm4Shift_0407, dst4Ax is (b1_0107=0x78 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=1) & $(DST4AX) { val:1 = dst4Ax:1; shift:1 = -srcSimm4Shift_0407; tmp:1 = val s>> shift; dst4Ax = zext(tmp); SHAsetShiftRightFlags(val, shift, tmp); } # (1) SHA.W #imm4, dst (right) :SHA.W srcSimm4Shift_0407, dst4W is (b1_0107=0x78 & b1_size_0=1; srcSimm4Shift_0407 & b2_shiftSign_7=1) ... & dst4W { val:2 = dst4W; shift:1 = -srcSimm4Shift_0407; tmp:2 = val s>> shift; dst4W = tmp; SHAsetShiftRightFlags(val, shift, tmp); } # (1) SHA.B #imm4, dst (left) :SHA.B srcSimm4Shift_0407, dst4B is (b1_0107=0x78 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=0) ... & dst4B { val:1 = dst4B; shift:1 = srcSimm4Shift_0407; tmp:1 = val << shift; dst4B = tmp; SHAsetShiftLeftFlags(val, shift, tmp, 8); } # (1) SHA.B #imm4, Ax (left) :SHA.B srcSimm4Shift_0407, dst4Ax is (b1_0107=0x78 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=0) & $(DST4AX) { val:1 = dst4Ax:1; shift:1 = srcSimm4Shift_0407; tmp:1 = val << shift; dst4Ax = zext(tmp); SHAsetShiftLeftFlags(val, shift, tmp, 8); } # (1) SHA.W #imm4, dst (left) :SHA.W srcSimm4Shift_0407, dst4W is (b1_0107=0x78 & b1_size_0=1; srcSimm4Shift_0407 & b2_shiftSign_7=0) ... & dst4W { val:2 = dst4W; shift:1 = srcSimm4Shift_0407; tmp:2 = val << shift; dst4W = tmp; SHAsetShiftLeftFlags(val, shift, tmp, 16); } # (2) SHA.B R1H, dst :SHA.B R1H, dst4B is (R1H & b1_0107=0x3a & b1_size_0=0; b2_0407=0xf) ... & dst4B { if (R1H == 0) goto inst_next; shift:1 = R1H; val:1 = dst4B; if (shift s> 0) goto ; shift = -shift; tmp:1 = val s>> shift; dst4B = tmp; SHAsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst4B = tmp; SHAsetShiftLeftFlags(val, shift, tmp, 8); } # (2) SHA.B R1H, Ax :SHA.B R1H, dst4Ax is (R1H & b1_0107=0x3a & b1_size_0=0; b2_0407=0xf) & $(DST4AX) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:1 = dst4Ax:1; if (shift s> 0) goto ; shift = -shift; tmp:1 = val s>> shift; dst4Ax = zext(tmp); SHAsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst4Ax = zext(tmp); SHAsetShiftLeftFlags(val, shift, tmp, 8); } # (2) SHA.W R1H, dst :SHA.W R1H, dst4W is (R1H & b1_0107=0x3a & b1_size_0=1; b2_0407=0xf) ... & dst4W { if (R1H == 0) goto inst_next; shift:1 = R1H; val:2 = dst4W; if (shift s> 0) goto ; shift = -shift; tmp:2 = val s>> shift; dst4W = tmp; SHAsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst4W = tmp; SHAsetShiftLeftFlags(val, shift, tmp, 16); } # (3) SHA.L #imm4, R2R0/R3R1 (right) :SHA.L srcSimm4Shift_0003, b2_reg32 is b1_0007=0xeb; b2_0507=0x5 & b2_reg32 & srcSimm4Shift_0003 & b2_shiftSign_3=1 { val:4 = b2_reg32; shift:1 = -srcSimm4Shift_0003; tmp:4 = val s>> shift; b2_reg32 = tmp; SHAsetShiftRightFlags(val, shift, tmp); } # (3) SHA.L #imm4, R2R0/R3R1 (left) :SHA.L srcSimm4Shift_0003, b2_reg32 is b1_0007=0xeb; b2_0507=0x5 & b2_reg32 & srcSimm4Shift_0003 & b2_shiftSign_3=0 { val:4 = b2_reg32; shift:1 = srcSimm4Shift_0003; tmp:4 = val << shift; b2_reg32 = tmp; SHAsetShiftLeftFlags(val, shift, tmp, 32); } # (4) SHA.L R1H, R2R0/R3R1 :SHA.L R1H, b2_reg32 is R1H & b1_0007=0xeb; b2_0507=0x1 & b2_reg32 & b2_0003=0x1 { if (R1H == 0) goto inst_next; shift:1 = R1H; val:4 = b2_reg32; if (shift s> 0) goto ; shift = -shift; tmp:4 = val s>> shift; b2_reg32 = tmp; SHAsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; b2_reg32 = tmp; SHAsetShiftLeftFlags(val, shift, tmp, 32); } ### SHL ### macro SHLsetShiftRightFlags(val,shift,result) { local c = (val >> (shift - 1)) & 1; $(CARRY) = c:1; setResultFlags(result); } macro SHLsetShiftLeftFlags(val,shift,result,sze) { local c = (val >> (sze - shift)) & 1; $(CARRY) = c:1; setResultFlags(result); } # (1) SHL.B #imm4, dst (right) :SHL.B srcSimm4Shift_0407, dst4B is (b1_0107=0x74 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=1) ... & dst4B { val:1 = dst4B; shift:1 = -srcSimm4Shift_0407; tmp:1 = val >> shift; dst4B = tmp; SHLsetShiftRightFlags(val, shift, tmp); } # (1) SHL.B #imm4, Ax (right) :SHL.B srcSimm4Shift_0407, dst4Ax is (b1_0107=0x74 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=1) & $(DST4AX) { val:1 = dst4Ax:1; shift:1 = -srcSimm4Shift_0407; tmp:1 = val >> shift; dst4Ax = zext(tmp); SHLsetShiftRightFlags(val, shift, tmp); } # (1) SHL.W #imm4, dst (right) :SHL.W srcSimm4Shift_0407, dst4W is (b1_0107=0x74 & b1_size_0=1; srcSimm4Shift_0407 & b2_shiftSign_7=1) ... & dst4W { val:2 = dst4W; shift:1 = -srcSimm4Shift_0407; tmp:2 = val >> shift; dst4W = tmp; SHLsetShiftRightFlags(val, shift, tmp); } # (1) SHL.B #imm4, dst (left) :SHL.B srcSimm4Shift_0407, dst4B is (b1_0107=0x74 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=0) ... & dst4B { val:1 = dst4B; shift:1 = srcSimm4Shift_0407; tmp:1 = val << shift; dst4B = tmp; SHLsetShiftLeftFlags(val, shift, tmp, 8); } # (1) SHL.B #imm4, Ax (left) :SHL.B srcSimm4Shift_0407, dst4Ax is (b1_0107=0x74 & b1_size_0=0; srcSimm4Shift_0407 & b2_shiftSign_7=0) & $(DST4AX) { val:1 = dst4Ax:1; shift:1 = srcSimm4Shift_0407; tmp:1 = val << shift; dst4Ax = zext(tmp); SHLsetShiftLeftFlags(val, shift, tmp, 8); } # (1) SHL.W #imm4, dst (left) :SHL.W srcSimm4Shift_0407, dst4W is (b1_0107=0x74 & b1_size_0=1; srcSimm4Shift_0407 & b2_shiftSign_7=0) ... & dst4W { val:2 = dst4W; shift:1 = srcSimm4Shift_0407; tmp:2 = val << shift; dst4W = tmp; SHLsetShiftLeftFlags(val, shift, tmp, 16); } # (2) SHL.B R1H, dst :SHL.B R1H, dst4B is (R1H & b1_0107=0x3a & b1_size_0=0; b2_0407=0xe) ... & dst4B { if (R1H == 0) goto inst_next; shift:1 = R1H; val:1 = dst4B; if (shift s> 0) goto ; shift = -shift; tmp:1 = val >> shift; dst4B = tmp; SHLsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst4B = tmp; SHLsetShiftLeftFlags(val, shift, tmp, 8); } # (2) SHL.B R1H, Ax :SHL.B R1H, dst4Ax is (R1H & b1_0107=0x3a & b1_size_0=0; b2_0407=0xe) & $(DST4AX) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:1 = dst4Ax:1; if (shift s> 0) goto ; shift = -shift; tmp:1 = val >> shift; dst4Ax = zext(tmp); SHLsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst4Ax = zext(tmp); SHLsetShiftLeftFlags(val, shift, tmp, 8); } # (2) SHL.W R1H, dst :SHL.W R1H, dst4W is (R1H & b1_0107=0x3a & b1_size_0=1; b2_0407=0xe) ... & dst4W { if (R1H == 0) goto inst_next; shift:1 = R1H; val:2 = dst4W; if (shift s> 0) goto ; shift = -shift; tmp:2 = val >> shift; dst4W = tmp; SHLsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst4W = tmp; SHLsetShiftLeftFlags(val, shift, tmp, 16); } # (3) SHL.L #imm4, R2R0/R3R1 (right) :SHL.L srcSimm4Shift_0003, b2_reg32 is b1_0007=0xeb; b2_0507=0x4 & b2_reg32 & srcSimm4Shift_0003 & b2_shiftSign_3=1 { val:4 = b2_reg32; shift:1 = -srcSimm4Shift_0003; tmp:4 = val >> shift; b2_reg32 = tmp; SHLsetShiftRightFlags(val, shift, tmp); } # (3) SHL.L #imm4, R2R0/R3R1 (left) :SHL.L srcSimm4Shift_0003, b2_reg32 is b1_0007=0xeb; b2_0507=0x4 & b2_reg32 & srcSimm4Shift_0003 & b2_shiftSign_3=0 { val:4 = b2_reg32; shift:1 = srcSimm4Shift_0003; tmp:4 = val << shift; b2_reg32 = tmp; SHLsetShiftLeftFlags(val, shift, tmp, 32); } # (4) SHL.L R1H, R2R0/R3R1 :SHL.L R1H, b2_reg32 is R1H & b1_0007=0xeb; b2_0507=0x0 & b2_reg32 & b2_0003=0x1 { if (R1H == 0) goto inst_next; shift:1 = R1H; val:4 = b2_reg32; if (shift s> 0) goto ; shift = -shift; tmp:4 = val >> shift; b2_reg32 = tmp; SHLsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; b2_reg32 = tmp; SHLsetShiftLeftFlags(val, shift, tmp, 32); } ### SMOVB ### :SMOVB.B is b1_0107=0x3e & b1_size_0=0; b2_0007=0xe9 { if (R3 == 0) goto inst_next; ptr0:3 = (zext(R1H) << 16) + zext(A0); ptr1:3 = zext(A1); *:1 ptr1 = *:1 ptr0; A1 = A1 - 1; ptr0 = ptr0 - 1; A0 = ptr0:2; R1H = ptr0(2); R3 = R3 - 1; goto inst_start; } :SMOVB.W is b1_0107=0x3e & b1_size_0=1; b2_0007=0xe9 { if (R3 == 0) goto inst_next; ptr0:3 = (zext(R1H) << 16) + zext(A0); ptr1:3 = zext(A1); *:2 ptr1 = *:2 ptr0; A1 = A1 - 2; ptr0 = ptr0 - 2; A0 = ptr0:2; R1H = ptr0(2); R3 = R3 - 1; goto inst_start; } ### SMOVF ### :SMOVF.B is b1_0107=0x3e & b1_size_0=0; b2_0007=0xe8 { if (R3 == 0) goto inst_next; ptr0:3 = (zext(R1H) << 16) + zext(A0); ptr1:3 = zext(A1); *:1 ptr1 = *:1 ptr0; A1 = A1 + 1; ptr0 = ptr0 + 1; A0 = ptr0:2; R1H = ptr0(2); R3 = R3 - 1; goto inst_start; } :SMOVF.W is b1_0107=0x3e & b1_size_0=1; b2_0007=0xe8 { if (R3 == 0) goto inst_next; ptr0:3 = (zext(R1H) << 16) + zext(A0); ptr1:3 = zext(A1); *:2 ptr1 = *:2 ptr0; A1 = A1 + 2; ptr0 = ptr0 + 2; A0 = ptr0:2; R1H = ptr0(2); R3 = R3 - 1; goto inst_start; } ### SSTR ### :SSTR.B is b1_0107=0x3e & b1_size_0=0; b2_0007=0xea { if (R3 == 0) goto inst_next; ptr1:3 = zext(A1); *:1 ptr1 = R0L; A1 = A1 + 1; R3 = R3 - 1; goto inst_start; } :SSTR.W is b1_0107=0x3e & b1_size_0=1; b2_0007=0xea { if (R3 == 0) goto inst_next; ptr1:3 = zext(A1); *:2 ptr1 = R0; A1 = A1 + 2; R3 = R3 - 1; goto inst_start; } ### STC ### # (1) STC src, dst :STC b2_creg16, dst4W is (b1_0007=0x7b; b2_0707=1 & b2_creg16) ... & dst4W { dst4W = b2_creg16; } # (2) STC PC, dst (dst=register) :STC PC, dst4L is (PC & b1_0007=0x7c; b2_0407=0xc) ... & dst4L { dst4L = zext(PC); } # (2) STC PC, dst (dst=memory) :STC PC, dst4T is (PC & b1_0007=0x7c; b2_0407=0xc) ... & $(DST4T) { dst4T = inst_next; # PC value refers to next instruction address } ### STCTX ### :STCTX abs16offset, abs20offset is b1_0007=0xb6; b2_0007=0xd3; abs16offset; imm20_dat & abs20offset { taskNum:1 = abs16offset; # load task number stored at abs16 ptr:3 = imm20_dat + (zext(taskNum) * 2); # compute table entry address relative to abs20 regInfo:1 = *:1 ptr; ptr = ptr + 1; spCorrect:1 = *:1 ptr; ptr = zext(SP); if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 2; *:2 ptr = FB; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 2; *:2 ptr = SB; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 2; *:2 ptr = A1; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 2; *:2 ptr = A0; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 2; *:2 ptr = R3; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 2; *:2 ptr = R2; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 2; *:2 ptr = R1; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 2; *:2 ptr = R0; SP = SP - zext(spCorrect); } ### STE ### # (1) STE.B src, abs20 :STE.B dst4B, abs20offset is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0) ... & dst4B); abs20offset { val:1 = dst4B; abs20offset = val; setResultFlags(val); } # (1) STE.W src, abs20 :STE.W dst4W, abs20offsetW is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0) ... & dst4W); abs20offsetW { val:2 = dst4W; abs20offsetW = val; setResultFlags(val); } # (2) STE.B src, dsp:20[A0] :STE.B dst4B, dsp20A0B is ((b1_0107=0x3a & b1_size_0=0; b2_0407=0x1) ... & dst4B); dsp20A0B { val:1 = dst4B; dsp20A0B = val; setResultFlags(val); } # (2) STE.W src, dsp:20[A0] :STE.W dst4W, dsp20A0W is ((b1_0107=0x3a & b1_size_0=1; b2_0407=0x1) ... & dst4W); dsp20A0W { val:2 = dst4W; dsp20A0W = val; setResultFlags(val); } steA1A0B: "["^A1A0^"]" is A1A0 { ptr:3 = A1A0:3; export *:1 ptr; } steA1A0W: "["^A1A0^"]" is A1A0 { ptr:3 = A1A0:3; export *:2 ptr; } # (3) STE.B src, [A1A0] :STE.B dst4B, steA1A0B is (steA1A0B & b1_0107=0x3a & b1_size_0=0; b2_0407=0x2) ... & dst4B { val:1 = dst4B; steA1A0B = val; setResultFlags(val); } # (3) STE.W src, [A1A0] :STE.W dst4W, steA1A0W is (steA1A0W & b1_0107=0x3a & b1_size_0=1; b2_0407=0x2) ... & dst4W { val:2 = dst4W; steA1A0W = val; setResultFlags(val); } ### STNZ ### :STNZ srcImm8, dst3B_afterDsp8 is (b1_0307=0x1a; srcImm8) ... & $(DST3B_AFTER_DSP8) { if ($(ZERO) != 0) goto inst_next; dst3B_afterDsp8 = srcImm8; } ### STZ ### :STZ srcImm8, dst3B_afterDsp8 is (b1_0307=0x19; srcImm8) ... & $(DST3B_AFTER_DSP8) { if ($(ZERO) == 0) goto inst_next; dst3B_afterDsp8 = srcImm8; } ### STZX ### skipBytesBeforeImm82: is b1_0007; imm8_dat { } # imm81 skipBytesBeforeImm82: is b1_d3=0x5; imm16_dat { } # imm81; dsp8 skipBytesBeforeImm82: is b1_d3=0x6; imm16_dat { } # imm81; dsp8 skipBytesBeforeImm82: is b1_d3=0x7; imm24_dat { } # imm81; abs16 stzxImm82: "#"^imm8_dat is skipBytesBeforeImm82; imm8_dat { export *[const]:1 imm8_dat; } :STZX srcImm8, stzxImm82, dst3B_afterDsp8 is (b1_0307=0x1b; srcImm8) ... & $(DST3B_AFTER_DSP8) ... & stzxImm82 { z:1 = $(ZERO); dst3B_afterDsp8 = (z * srcImm8) + (!z * stzxImm82); } ### SUB ### # (1) SUB.B:G #simm, dst :SUB^".B:G" srcSimm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x5) ... & dst4B); srcSimm8 { tmp:1 = dst4B; setSubtractFlags(tmp, srcSimm8); tmp = tmp - srcSimm8; dst4B = tmp; setResultFlags(tmp); } # (1) SUB.B:G #simm, Ax :SUB^".B:G" srcSimm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x5) & $(DST4AX)); srcSimm8 { tmp:1 = dst4Ax:1; setSubtractFlags(tmp, srcSimm8); tmp = tmp - srcSimm8; dst4Ax = zext(tmp); setResultFlags(tmp); } # (1) SUB.W:G #simm, dst :SUB^".W:G" srcSimm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x5) ... & dst4W); srcSimm16 { tmp:2 = dst4W; setSubtractFlags(tmp, srcSimm16); tmp = tmp - srcSimm16; dst4W = tmp; setResultFlags(tmp); } # (2) SUB.B:S #simm, dst :SUB^".B:S" srcSimm8, dst3B_afterDsp8 is (b1_0307=0x11; srcSimm8) ... & $(DST3B_AFTER_DSP8) { tmp:1 = dst3B_afterDsp8; setSubtractFlags(tmp, srcSimm8); tmp = tmp - srcSimm8; dst3B_afterDsp8 = tmp; setResultFlags(tmp); } # (3) SUB.B:G src, dst :SUB^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x54 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { tmp:1 = dst4B_afterSrc4; src:1 = src4B; setSubtractFlags(tmp, src); tmp = tmp - src; dst4B_afterSrc4 = tmp; setResultFlags(tmp); } # (3) SUB.B:G src, Ax :SUB^".B:G" src4B, dst4Ax is (b1_0107=0x54 & b1_size_0=0) ... & src4B & $(DST4AX) ... { tmp:1 = dst4Ax:1; src:1 = src4B; setSubtractFlags(tmp, src); tmp = tmp - src; dst4Ax = zext(tmp); setResultFlags(tmp); } # (3) SUB.W:G src, dst :SUB^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x54 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { tmp:2 = dst4W_afterSrc4; src:2 = src4W; setSubtractFlags(tmp, src); tmp = tmp - src; dst4W_afterSrc4 = tmp; setResultFlags(tmp); } # (4) SUB.B:S src, R0H/R0L :SUB^".B:S" dst2B, b1_2_reg8 is (b1_0307=0x5 & b1_2_reg8) ... & dst2B { tmp:1 = b1_2_reg8; src:1 = dst2B; setSubtractFlags(tmp, src); tmp = tmp - src; b1_2_reg8 = tmp; setResultFlags(tmp); } ### TST ### # (1) TST.B #imm, dst :TST.B srcImm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x0) ... & dst4B); srcImm8 { tmp:1 = dst4B & srcImm8; setResultFlags(tmp); } # (1) TST.W #imm, dst :TST.W srcImm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x0) ... & dst4W); srcImm16 { tmp:2 = dst4W & srcImm16; setResultFlags(tmp); } # (2) TST.B src, dst :TST.B src4B, dst4B_afterSrc4 is (b1_0107=0x40 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { tmp:1 = dst4B_afterSrc4 & src4B; setResultFlags(tmp); } # (2) TST.W src, dst :TST.W src4W, dst4W_afterSrc4 is (b1_0107=0x40 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { tmp:2 = dst4W_afterSrc4 & src4W; setResultFlags(tmp); } ##### UND ##### # Don't implement this "Undefined" instruction # :UND is b1_0007=0xff ### WAIT ### :WAIT is b1_0007=0x7d; b2_0007=0xf3 { Wait(); } ### XCHG ### :XCHG.B b2_s4_reg8, dst4B is (b1_0107=0x3d & b1_size_0=0; b2_0607=0 & b2_s4_reg8) ... & dst4B { tmp:1 = dst4B; dst4B = b2_s4_reg8; b2_s4_reg8 = tmp; } :XCHG.B b2_s4_reg8, dst4Ax is (b1_0107=0x3d & b1_size_0=0; b2_0607=0 & b2_s4_reg8) & $(DST4AX) { tmp:1 = dst4Ax:1; dst4Ax = zext(b2_s4_reg8); b2_s4_reg8 = tmp; } :XCHG.W b2_s4_reg16, dst4W is (b1_0107=0x3d & b1_size_0=1; b2_0607=0 & b2_s4_reg16) ... & dst4W { tmp:2 = dst4W; dst4W = b2_s4_reg16; b2_s4_reg16 = tmp; } ### XOR ### # (1) XOR.B:G #imm, dst :XOR^".B:G" srcImm8, dst4B is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x1) ... & dst4B); srcImm8 { tmp:1 = dst4B ^ srcImm8; dst4B = tmp; setResultFlags(tmp); } # (1) XOR.B:G #imm, Ax :XOR^".B:G" srcImm8, dst4Ax is ((b1_0107=0x3b & b1_size_0=0; b2_0407=0x1) & $(DST4AX)); srcImm8 { tmp:1 = dst4Ax:1 ^ srcImm8; dst4Ax = zext(tmp); setResultFlags(tmp); } # (1) XOR.W:G #imm, dst :XOR^".W:G" srcImm16, dst4W is ((b1_0107=0x3b & b1_size_0=1; b2_0407=0x1) ... & dst4W); srcImm16 { tmp:2 = dst4W ^ srcImm16; dst4W = tmp; setResultFlags(tmp); } # (2) XOR.B:G src, dst :XOR^".B:G" src4B, dst4B_afterSrc4 is (b1_0107=0x44 & b1_size_0=0) ... & src4B ... & dst4B_afterSrc4 ... { tmp:1 = dst4B_afterSrc4 ^ src4B; dst4B_afterSrc4 = tmp; setResultFlags(tmp); } # (2) XOR.B:G src, Ax :XOR^".B:G" src4B, dst4Ax is (b1_0107=0x44 & b1_size_0=0) ... & src4B & $(DST4AX) ... { tmp:1 = dst4Ax:1 ^ src4B; dst4Ax = zext(tmp); setResultFlags(tmp); } # (2) XOR.W:G src, dst :XOR^".W:G" src4W, dst4W_afterSrc4 is (b1_0107=0x44 & b1_size_0=1) ... & src4W ... & dst4W_afterSrc4 ... { tmp:2 = dst4W_afterSrc4 ^ src4W; dst4W_afterSrc4 = tmp; setResultFlags(tmp); } ================================================ FILE: pypcode/processors/M16C/data/languages/M16C_80.cspec ================================================ ================================================ FILE: pypcode/processors/M16C/data/languages/M16C_80.ldefs ================================================ Renesas M16C/80 16-Bit MicroComputer ================================================ FILE: pypcode/processors/M16C/data/languages/M16C_80.pspec ================================================ ================================================ FILE: pypcode/processors/M16C/data/languages/M16C_80.slaspec ================================================ # # Renesas M16C/80 16-Bit MicroComputer # # # Memory Architecture # define endian=little; define alignment=1; define space RAM type=ram_space size=3 default; define space register type=register_space size=2; # # General Registers # define register offset=0x0000 size=2 [ R0 R2 R1 R3 ]; define register offset=0x0000 size=1 [ R0L R0H _ _ R1L R1H _ _ ]; define register offset=0x0000 size=4 [ R2R0 R3R1 ]; define register offset=0x0000 size=6 [ R1R2R0 ]; define register offset=0x2000 size=3 [ A0 A1 ]; define register offset=0x3000 size=3 [ PC # Program Counter SVP # Save PC Register VCT # Vector Register byteIndexOffset # Byte offset for memory (see useByteIndex) bitIndex # Index offset for bit operations (see useBitIndex) ]; define register offset=0x4000 size=3 [ INTB ]; define register offset=0x4000 size=2 [ INTBL INTBH ]; define register offset=0x5000 size=3 [ SP # Stack Pointer (Represents active stack pointer: ISP or USP) FB # Frame Base Register SB # Static Base Register ISP # Interrupt Stack Pointer ]; define register offset=0x6000 size=2 [ FLG # Flag Register SVF # Save Flag Register ]; @define CARRY "FLG[0,1]" @define DEBUG "FLG[1,1]" @define ZERO "FLG[2,1]" @define SIGN "FLG[3,1]" @define REG_BANK "FLG[4,1]" @define OVERFLOW "FLG[5,1]" @define INTERRUPT "FLG[6,1]" @define STACK_SEL "FLG[7,1]" @define IPL "FLG[12,3]" define register offset=0x7000 size=2 [ # These are really 1-Byte registers DMD0 # DMA mode register DMD1 # DMA mode register ]; define register offset=0x8000 size=2 [ DCT0 # DMA transfer count register DCT1 # DMA transfer count register DRC0 # DMA transfer count reload register DRC1 # DMA transfer count reload register ]; define register offset=0x9000 size=3 [ DMA0 # DMA memory address register DMA1 # DMA memory address register DSA0 # DMA SFR address register DSA1 # DMA SFR address register DRA0 # DMA memory address reload register DRA1 # DMA memory address reload register ]; # Define context bits define register offset=0xA000 size=4 contextreg; define context contextreg useBitIndex = (0, 0) noflow # =1 use bitIndex instead of bit specified by instruction useByteIndexOffset = (1, 2) noflow useSrcByteIndexOffset = (1, 1) noflow useDstByteIndexOffset = (2, 2) noflow # transient context: phase = (3, 4) # guard for saving off modes before starting instructions indDst = (5, 5) # =1 indirect destination indSrc = (6, 6) # =1 indirect source dstFollowsSrc = (7, 8) # =1 destination add-on data follows 5-bit encoded source add-on data # =2 destination add-on data follows 8-bit data ; define token b0(8) b0_0007 = (0,7) ; define token b1(8) b1_s5 = (4,6) b1_s5_4 = (6,6) b1_d5 = (1,3) b1_d5_4 = (3,3) b1_d2 = (4,5) b1_d1_regAx = (0,0) b1_size_5 = (5,5) b1_size_4 = (4,4) b1_size_0 = (0,0) b1_0707 = (7,7) b1_0607 = (6,7) b1_0507 = (5,7) b1_0505 = (5,5) b1_0407 = (4,7) b1_0406 = (4,6) b1_0405 = (4,5) b1_0104 = (1,4) b1_0103 = (1,3) b1_0007 = (0,7) b1_0000 = (0,0) ; define token b2(8) b2_d5_reg8 = (6,7) b2_s5_reg8 = (4,5) b2_d5_reg16 = (6,7) b2_s5_reg16 = (4,5) b2_d5_reg32 = (6,6) # only d0 used to select double register b2_s5_reg32 = (4,4) # only s0 used to select double register b2_d5_regAxSF = (6,7) # selects A0, A1, SB or FB b2_s5_regAxSF = (4,5) # selects A0, A1, SB or FB b2_d5_regAx = (6,6) b2_s5_regAx = (4,4) b2_d5 = (6,7) b2_s5 = (4,5) b2_d5_1 = (7,7) b2_d5_0 = (6,6) b2_s5_1 = (5,5) b2_s5_0 = (4,4) b2_0707 = (7,7) b2_0606 = (6,6) b2_0405 = (4,5) b2_0307 = (3,7) b2_0305 = (3,5) b2_0105 = (1,5) b2_0102 = (1,2) b2_0101 = (1,1) b2_0007 = (0,7) b2_0005 = (0,5) b2_0003 = (0,3) b2_0002 = (0,2) b2_simm4 = (0,3) signed b2_shiftSign = (3,3) b2_bit = (0,2) b2_reg8 = (0,2) b2_reg16 = (0,2) b2_creg16 = (0,2) b2_creg24 = (0,2) b2_dreg24 = (0,2) b2_reg32 = (0,0) b2_regAx = (0,0) ; define token imm8(8) simm8_dat = (0,7) signed imm8_dat = (0,7) imm6_dat = (2,7) cnd_dat = (0,3) imm8_0001 = (0,1) regBit7 = (7,7) regBit6 = (6,6) regBit5 = (5,5) regBit4 = (4,4) regBit3 = (3,3) regBit2 = (2,2) regBit1 = (1,1) regBit0 = (0,0) ; define token imm16(16) simm16_dat = (0,15) signed imm16_dat = (0,15) ; define token imm24(24) simm24_dat = (0,23) signed imm24_dat = (0,23) ; define token imm32(32) simm32_dat = (0,31) signed imm32_dat = (0,31) ; attach variables [ b2_s5_reg32 b2_d5_reg32 ] [ R2R0 R3R1 ]; attach variables [ b2_s5_reg16 b2_d5_reg16 ] [ R2 R3 R0 R1 ]; attach variables [ b2_s5_reg8 b2_d5_reg8 ] [ R0H R1H R0L R1L ]; attach variables [ b2_s5_regAx b2_d5_regAx b1_d1_regAx b2_regAx ] [ A0 A1 ]; attach variables [ b2_s5_regAxSF b2_d5_regAxSF ] [ A0 A1 SB FB ]; attach variables [ b2_creg16 ] [ DCT0 DCT1 FLG SVF DRC0 DRC1 DMD0 DMD1 ]; attach variables [ b2_creg24 ] [ INTB SP SB FB SVP VCT _ ISP ]; attach variables [ b2_dreg24 ] [ _ _ DMA0 DMA1 DRA0 DRA1 DSA0 DSA1 ]; attach variables [ b2_reg32 ] [ R2R0 R3R1 ]; # XCHG register attach attach variables [ b2_reg8 ] [ R0L R1L _ _ R0H R1H _ _ ]; attach variables [ b2_reg16 ] [ R0 R1 _ _ R2 R3 _ _ ]; # # PCode Op # define pcodeop Break; # BRK define pcodeop Break2; # BRK2 define pcodeop DecimalAdd; # DADD define pcodeop DecimalAddWithCarry; # DADC define pcodeop DecimalSubtractWithBorrow; # DSBB define pcodeop DecimalSubtract; # DSUB define pcodeop Wait; # WAIT # # FLAG MACROS... # # Set zero and sign flags from result macro setResultFlags(result) { $(SIGN) = (result s< 0x0); $(ZERO) = (result == 0x0); } # Set carry and overflow flags for addition macro setAdd3Flags(v1, v2, v3) { local add13 = v1 + v3; $(CARRY) = carry(v1,v3) || carry(v2,add13); $(OVERFLOW) = scarry(v1,v3) || scarry(v2,add13); } # Set carry and overflow flags for addition macro setAddFlags(v1, v2) { $(CARRY) = carry(v1, v2); $(OVERFLOW) = scarry(v1, v2); } # Set overflow flags for subtraction of op3,op2 from op1 (op1-op2-op3) macro setSubtract3Flags(v1, v2, v3) { local add12 = v1 - v2; $(CARRY) = (v1 >= v2) || (add12 >= v3); $(OVERFLOW) = sborrow(v1, v2) || sborrow(add12, v3); } # Set overflow flags for subtraction of op2 from op1 (op1-op2) macro setSubtractFlags(v1, v2) { $(CARRY) = (v1 s>= v2); $(OVERFLOW) = sborrow(v1, v2); } macro push1(val) { SP = SP - 2; *:1 SP = val; } macro push2(val) { SP = SP - 2; *:2 SP = val; } macro push3(val) { SP = SP - 4; *:3 SP = val; } macro push4(val) { SP = SP - 4; *:4 SP = val; } macro pop1(val) { val = *:1 SP; SP = SP + 2; } macro pop2(val) { val = *:2 SP; SP = SP + 2; } macro pop3(val) { val = *:3 SP; SP = SP + 4; } macro pop4(val) { val = *:4 SP; SP = SP + 4; } :^instruction is phase=0 & b0_0007 & instruction [ phase=1; ] {} :^instruction is phase=0 & b0_0007=0x09; instruction [ indDst=1; phase=1; ] {} # indirect destination prefix :^instruction is phase=0 & b0_0007=0x41; instruction [ indSrc=1; phase=1; ] {} # indirect source prefix :^instruction is phase=0 & b0_0007=0x49; instruction [ indDst=1; indSrc=1; phase=1; ] {} # indirect source and destination prefix # # Source operand location data # # Obtain additional source byte offset as a result of an INDEX instruction (flagged by useSrcByteIndexOffset context bit) srcIndexOffset: is useSrcByteIndexOffset=0 { export 0:3; } srcIndexOffset: is useSrcByteIndexOffset=1 { export byteIndexOffset; } # Obtain base offset displacement for [AX | SB | FB] - AX and SB uses unsigned displacements, FB uses signed displacement src5dsp8: imm8_dat^":8" is b1_s5; b2_s5; imm8_dat { export *[const]:3 imm8_dat; } src5dsp8: simm8_dat^":8" is b1_s5; b2_s5=0x3; simm8_dat { export *[const]:3 simm8_dat; } src5dsp16: imm16_dat^":16" is b1_s5; b2_s5; imm16_dat { export *[const]:3 imm16_dat; } src5dsp16: simm16_dat^":16" is b1_s5; b2_s5=0x3; simm16_dat { export *[const]:3 simm16_dat; } src5dsp24: imm24_dat^":24" is b1_s5; b2_s5; imm24_dat { export *[const]:3 imm24_dat; } src5dsp24: simm24_dat^":24" is b1_s5; b2_s5=0x3; simm24_dat { export *[const]:3 simm24_dat; } # src5... Handle 5-bit encoded Source specified by b1_s(3-bits) and b2_s(2-bits) # Variable length pattern starting at instruction byte b1 # associated src5 add-on data immediately follows instruction byte b2 # abs16 and abs24 cases are broken out differently to facilitate export of constant addresses in certain cases # 1-Byte source value/location specified by 5-bit encoding (b1_d5/b2_d5) - supports indirect prefix and byteIndexOffset src5B: b2_s5_reg8 is b1_s5=0x4; b2_s5_reg8 { export b2_s5_reg8; } # Rx src5B: b2_s5_regAx is b1_s5=0x0; b2_s5_1=1 & b2_s5_regAx { tmp:1 = b2_s5_regAx:1; export tmp; } # Ax src5B: [b2_s5_regAx] is indSrc=1 & b1_s5=0x0; b2_s5_1=1 & b2_s5_regAx { ptr:3 = b2_s5_regAx; export *:1 ptr; } # [Ax] - w/ indirect prefix src5B: [b2_s5_regAx] is srcIndexOffset & b1_s5=0x0; b2_s5_1=0 & b2_s5_regAx { ptr:3 = b2_s5_regAx + srcIndexOffset; export *:1 ptr; } # [Ax] src5B: [[b2_s5_regAx]] is indSrc=1 & srcIndexOffset & b1_s5=0x0; b2_s5_1=0 & b2_s5_regAx { ptr:3 = b2_s5_regAx + srcIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [[Ax]] src5B: src5dsp8^[b2_s5_regAxSF] is (srcIndexOffset & b1_s5=0x1; b2_s5_regAxSF) ... & src5dsp8 { ptr:3 = b2_s5_regAxSF + src5dsp8 + srcIndexOffset; export *:1 ptr; } # dsp:8[Ax|SB|FB] src5B: [src5dsp8^[b2_s5_regAxSF]] is (indSrc=1 & srcIndexOffset & b1_s5=0x1; b2_s5_regAxSF) ... & src5dsp8 { ptr:3 = b2_s5_regAxSF + src5dsp8 + srcIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [dsp:8[Ax|SB|FB]] src5B: src5dsp16^[b2_s5_regAxSF] is (srcIndexOffset & b1_s5=0x2; b2_s5_regAxSF) ... & src5dsp16 { ptr:3 = b2_s5_regAxSF + src5dsp16 + srcIndexOffset; export *:1 ptr; } # dsp:16[Ax|SB|FB] src5B: [src5dsp16^[b2_s5_regAxSF]] is (indSrc=1 & srcIndexOffset & b1_s5=0x2; b2_s5_regAxSF) ... & src5dsp16 { ptr:3 = b2_s5_regAxSF + src5dsp16 + srcIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [dsp:16[Ax|SB|FB]] src5B: src5dsp24^[b2_s5_regAx] is (srcIndexOffset & b1_s5=0x3; b2_s5_1=0 & b2_s5_regAx) ... & src5dsp24 { ptr:3 = b2_s5_regAx + src5dsp24 + srcIndexOffset; export *:1 ptr; } # dsp:24[Ax] src5B: [src5dsp24^[b2_s5_regAx]] is (indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5_1=0 & b2_s5_regAx) ... & src5dsp24 { ptr:3 = b2_s5_regAx + src5dsp24 + srcIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [dsp:24[Ax]] src5B: imm16_dat is indSrc=0 & useSrcByteIndexOffset=1 & b1_s5=0x3; b2_s5=0x3; imm16_dat { ptr:3 = imm16_dat + byteIndexOffset; export *:1 ptr; } # abs16 (+byteIndexOffset) src5B: imm16_dat is indSrc=0 & b1_s5=0x3; b2_s5=0x3; imm16_dat { export *:1 imm16_dat; } # abs16 (special constant address case) src5B: [imm16_dat] is indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5=0x3; imm16_dat { ptr:3 = imm16_dat + srcIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [abs16] src5B: imm24_dat is indSrc=0 & useSrcByteIndexOffset=1 & b1_s5=0x3; b2_s5=0x2; imm24_dat { ptr:3 = imm24_dat + byteIndexOffset; export *:1 ptr; } # abs24 (+byteIndexOffset) src5B: imm24_dat is indSrc=0 & b1_s5=0x3; b2_s5=0x2; imm24_dat { export *:1 imm24_dat; } # abs24 (special constant address case) src5B: [imm24_dat] is indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5=0x2; imm24_dat { ptr:3 = imm24_dat + srcIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [abs24] # 2-Byte source value/location specified by 5-bit encoding (b1_d5/b2_d5) - supports indirect prefix and byteIndexOffset src5W: b2_s5_reg16 is b1_s5=0x4; b2_s5_reg16 { export b2_s5_reg16; } # Rx src5W: b2_s5_regAx is b1_s5=0x0; b2_s5_1=1 & b2_s5_regAx { tmp:2 = b2_s5_regAx:2; export tmp; } # Ax src5W: [b2_s5_regAx] is indSrc=1 & b1_s5=0x0; b2_s5_1=1 & b2_s5_regAx { ptr:3 = b2_s5_regAx; export *:2 ptr; } # [Ax] - w/ indirect prefix src5W: [b2_s5_regAx] is srcIndexOffset & b1_s5=0x0; b2_s5_1=0 & b2_s5_regAx { ptr:3 = b2_s5_regAx + srcIndexOffset; export *:2 ptr; } # [Ax] src5W: [[b2_s5_regAx]] is indSrc=1 & srcIndexOffset & b1_s5=0x0; b2_s5_1=0 & b2_s5_regAx { ptr:3 = b2_s5_regAx + srcIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [[Ax]] src5W: src5dsp8^[b2_s5_regAxSF] is (srcIndexOffset & b1_s5=0x1; b2_s5_regAxSF) ... & src5dsp8 { ptr:3 = b2_s5_regAxSF + src5dsp8 + srcIndexOffset; export *:2 ptr; } # dsp:8[Ax|SB|FB] src5W: [src5dsp8^[b2_s5_regAxSF]] is (indSrc=1 & srcIndexOffset & b1_s5=0x1; b2_s5_regAxSF) ... & src5dsp8 { ptr:3 = b2_s5_regAxSF + src5dsp8 + srcIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [dsp:8[Ax|SB|FB]] src5W: src5dsp16^[b2_s5_regAxSF] is (srcIndexOffset & b1_s5=0x2; b2_s5_regAxSF) ... & src5dsp16 { ptr:3 = b2_s5_regAxSF + src5dsp16 + srcIndexOffset; export *:2 ptr; } # dsp:16[Ax|SB|FB] src5W: [src5dsp16^[b2_s5_regAxSF]] is (indSrc=1 & srcIndexOffset & b1_s5=0x2; b2_s5_regAxSF) ... & src5dsp16 { ptr:3 = b2_s5_regAxSF + src5dsp16 + srcIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [dsp:16[Ax|SB|FB]] src5W: src5dsp24^[b2_s5_regAx] is (srcIndexOffset & b1_s5=0x3; b2_s5_1=0 & b2_s5_regAx) ... & src5dsp24 { ptr:3 = b2_s5_regAx + src5dsp24 + srcIndexOffset; export *:2 ptr; } # dsp:24[Ax] src5W: [src5dsp24^[b2_s5_regAx]] is (indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5_1=0 & b2_s5_regAx) ... & src5dsp24 { ptr:3 = b2_s5_regAx + src5dsp24 + srcIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [dsp:24[Ax]] src5W: imm16_dat is indSrc=0 & useSrcByteIndexOffset=1 & b1_s5=0x3; b2_s5=0x3; imm16_dat { ptr:3 = imm16_dat + byteIndexOffset; export *:2 ptr; } # abs16 (+byteIndexOffset) src5W: imm16_dat is indSrc=0 & b1_s5=0x3; b2_s5=0x3; imm16_dat { export *:2 imm16_dat; } # abs16 (special constant address case) src5W: [imm16_dat] is indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5=0x3; imm16_dat { ptr:3 = imm16_dat + srcIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [abs16] src5W: imm24_dat is indSrc=0 & useSrcByteIndexOffset=1 & b1_s5=0x3; b2_s5=0x2; imm24_dat { ptr:3 = imm24_dat + byteIndexOffset; export *:2 ptr; } # abs24 (+byteIndexOffset) src5W: imm24_dat is indSrc=0 & b1_s5=0x3; b2_s5=0x2; imm24_dat { export *:2 imm24_dat; } # abs24 (special constant address case) src5W: [imm24_dat] is indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5=0x2; imm24_dat { ptr:3 = imm24_dat + srcIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [abs24] # 4-Byte source value/location specified by 5-bit encoding (b1_d5/b2_d5) - supports indirect prefix and byteIndexOffset src5L: b2_s5_reg32 is b1_s5=0x4; b2_s5_1=1 & b2_s5_reg32 { export b2_s5_reg32; } # Rx src5L: b2_s5_regAx is b1_s5=0x0; b2_s5_1=1 & b2_s5_regAx { tmp:4 = zext(b2_s5_regAx); export tmp; } # Ax src5L: [b2_s5_regAx] is indSrc=1 & b1_s5=0x0; b2_s5_1=1 & b2_s5_regAx { ptr:3 = b2_s5_regAx; export *:4 ptr; } # [Ax] - w/ indirect prefix src5L: [b2_s5_regAx] is srcIndexOffset & b1_s5=0x0; b2_s5_1=0 & b2_s5_regAx { ptr:3 = b2_s5_regAx + srcIndexOffset; export *:4 ptr; } # [Ax] src5L: [[b2_s5_regAx]] is indSrc=1 & srcIndexOffset & b1_s5=0x0; b2_s5_1=0 & b2_s5_regAx { ptr:3 = b2_s5_regAx + srcIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [[Ax]] src5L: src5dsp8^[b2_s5_regAxSF] is (srcIndexOffset & b1_s5=0x1; b2_s5_regAxSF) ... & src5dsp8 { ptr:3 = b2_s5_regAxSF + src5dsp8 + srcIndexOffset; export *:4 ptr; } # dsp:8[Ax|SB|FB] src5L: [src5dsp8^[b2_s5_regAxSF]] is (indSrc=1 & srcIndexOffset & b1_s5=0x1; b2_s5_regAxSF) ... & src5dsp8 { ptr:3 = b2_s5_regAxSF + src5dsp8 + srcIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [dsp:8[Ax|SB|FB]] src5L: src5dsp16^[b2_s5_regAxSF] is (srcIndexOffset & b1_s5=0x2; b2_s5_regAxSF) ... & src5dsp16 { ptr:3 = b2_s5_regAxSF + src5dsp16 + srcIndexOffset; export *:4 ptr; } # dsp:16[Ax|SB|FB] src5L: [src5dsp16^[b2_s5_regAxSF]] is (indSrc=1 & srcIndexOffset & b1_s5=0x2; b2_s5_regAxSF) ... & src5dsp16 { ptr:3 = b2_s5_regAxSF + src5dsp16 + srcIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [dsp:16[Ax|SB|FB]] src5L: src5dsp24^[b2_s5_regAx] is (srcIndexOffset & b1_s5=0x3; b2_s5_1=0 & b2_s5_regAx) ... & src5dsp24 { ptr:3 = b2_s5_regAx + src5dsp24 + srcIndexOffset; export *:4 ptr; } # dsp:24[Ax] src5L: [src5dsp24^[b2_s5_regAx]] is (indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5_1=0 & b2_s5_regAx) ... & src5dsp24 { ptr:3 = b2_s5_regAx + src5dsp24 + srcIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [dsp:24[Ax]] src5L: imm16_dat is indSrc=0 & useSrcByteIndexOffset=1 & b1_s5=0x3; b2_s5=0x3; imm16_dat { ptr:3 = imm16_dat + byteIndexOffset; export *:4 ptr; } # abs16 (+byteIndexOffset) src5L: imm16_dat is indSrc=0 & b1_s5=0x3; b2_s5=0x3; imm16_dat { export *:4 imm16_dat; } # abs16 (special constant address case) src5L: [imm16_dat] is indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5=0x3; imm16_dat { ptr:3 = imm16_dat + srcIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [abs16] src5L: imm24_dat is indSrc=0 & useSrcByteIndexOffset=1 & b1_s5=0x3; b2_s5=0x2; imm24_dat { ptr:3 = imm24_dat + byteIndexOffset; export *:4 ptr; } # abs24 (+byteIndexOffset) src5L: imm24_dat is indSrc=0 & b1_s5=0x3; b2_s5=0x2; imm24_dat { export *:4 imm24_dat; } # abs24 (special constant address case) src5L: [imm24_dat] is indSrc=1 & srcIndexOffset & b1_s5=0x3; b2_s5=0x2; imm24_dat { ptr:3 = imm24_dat + srcIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [abs24] # # The following macros are used to elliminate illegal bit patterns when using src5 # These should be used by constructor pattern matching instead of the corresponding src5 subconstructor # @define SRC5B "((b1_s5=4 | b1_s5_4=0) ... & src5B)" @define SRC5W "((b1_s5=4 | b1_s5_4=0) ... & src5W)" @define SRC5L "((b1_s5=4 | b1_s5_4=0) ... & src5L)" # # Destination operand location data (may also be used as a source in certain cases) # # Skip instruction and source add-on bytes which occur before destination add-on bytes # Starting position is at b1 skipBytesBeforeDst5: is b1_s5; b2_s5 { } skipBytesBeforeDst5: is dstFollowsSrc=1 & b1_s5=1; b2_s5; imm8_dat { } # src5: dsp8 skipBytesBeforeDst5: is dstFollowsSrc=1 & b1_s5=2; b2_s5; imm16_dat { } # src5: dsp16 skipBytesBeforeDst5: is dstFollowsSrc=1 & b1_s5=3; b2_s5; imm24_dat { } # src5: dsp24/abs24 skipBytesBeforeDst5: is dstFollowsSrc=1 & b1_s5=3; b2_s5=3; imm16_dat { } # src5: abs16 skipBytesBeforeDst5: is dstFollowsSrc=2 & b1_d5; b2_d5; imm8_dat { } # dsp8 # Obtain additional destination byte offset as a result of an INDEX instruction (flagged by useDstByteIndexOffset context bit) dstIndexOffset: is useDstByteIndexOffset=0 { export 0:3; } dstIndexOffset: is useDstByteIndexOffset=1 { export byteIndexOffset; } # Obtain base offset displacement for [AX | SB | FB] - AX and SB uses unsigned displacements, FB uses signed displacement dst5dsp8: imm8_dat^":8" is (skipBytesBeforeDst5; imm8_dat) { export *[const]:3 imm8_dat; } dst5dsp8: simm8_dat^":8" is (b1_d5; b2_d5=0x3) ... & (skipBytesBeforeDst5; simm8_dat) { export *[const]:3 simm8_dat; } dst5dsp16: imm16_dat^":16" is (skipBytesBeforeDst5; imm16_dat) { export *[const]:3 imm16_dat; } dst5dsp16: simm16_dat^":16" is (b1_d5; b2_d5=0x3) ... & (skipBytesBeforeDst5; simm16_dat) { export *[const]:3 simm16_dat; } dst5dsp24: imm24_dat^":24" is (skipBytesBeforeDst5; imm24_dat) { export *[const]:3 imm24_dat; } # dst5... Handle 5-bit encoded Destination specified by b1_d5(3-bits) and b2_d5(2-bits) # Ax direct case is read-only! Instruction must use dst5Ax for write/update case # Variable length pattern starting at instruction byte b1 # abs16 and abs24 cases are broken out differently to facilitate export of constant addresses in certain cases # 1-Byte destination value/location specified by 5-bit encoding (b1_d5/b2_d5) - supports indirect prefix and byteIndexOffset dst5B: b2_d5_reg8 is b1_d5=0x4; b2_d5_reg8 { export b2_d5_reg8; } # Rx dst5B: b2_d5_regAx is b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { tmp:1 = b2_d5_regAx:1; export tmp; } # Ax - read-only use ! dst5B: [b2_d5_regAx] is indDst=1 & b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { ptr:3 = b2_d5_regAx; export *:1 ptr; } # [Ax] - w/ indirect prefix dst5B: [b2_d5_regAx] is dstIndexOffset & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + dstIndexOffset; export *:1 ptr; } # [Ax] dst5B: [[b2_d5_regAx]] is indDst=1 & dstIndexOffset & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + dstIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [[Ax]] dst5B: dst5dsp8^[b2_d5_regAxSF] is (dstIndexOffset & b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8 + dstIndexOffset; export *:1 ptr; } # dsp:8[Ax|SB|FB] dst5B: [dst5dsp8^[b2_d5_regAxSF]] is (indDst=1 & dstIndexOffset & b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8 + dstIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [dsp:8[Ax|SB|FB]] dst5B: dst5dsp16^[b2_d5_regAxSF] is (dstIndexOffset & b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16 + dstIndexOffset; export *:1 ptr; } # dsp:16[Ax|SB|FB] dst5B: [dst5dsp16^[b2_d5_regAxSF]] is (indDst=1 & dstIndexOffset & b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16 + dstIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [dsp:16[Ax|SB|FB]] dst5B: dst5dsp24^[b2_d5_regAx] is (dstIndexOffset & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24 + dstIndexOffset; export *:1 ptr; } # dsp:24[Ax] dst5B: [dst5dsp24^[b2_d5_regAx]] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24 + dstIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [dsp:24[Ax]] dst5B: imm16_dat is (indDst=0 & useDstByteIndexOffset=1 & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { ptr:3 = imm16_dat + byteIndexOffset; export *:1 ptr; } # abs16 (+byteIndexOffset) dst5B: imm16_dat is (indDst=0 & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { export *:1 imm16_dat; } # abs16 (special constant address case) dst5B: [imm16_dat] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { ptr:3 = imm16_dat + dstIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [abs16] dst5B: imm24_dat is (indDst=0 & useDstByteIndexOffset=1 & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { ptr:3 = imm24_dat + byteIndexOffset; export *:1 ptr; } # abs24 dst5B: imm24_dat is (indDst=0 & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { export *:1 imm24_dat; } # abs24 (special constant address case) dst5B: [imm24_dat] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { ptr:3 = imm24_dat + dstIndexOffset; ptr = *:3 ptr; export *:1 ptr; } # [abs24] # 2-Byte destination value/location specified by 5-bit encoding (b1_d5/b2_d5) - supports indirect prefix and byteIndexOffset dst5W: b2_d5_reg16 is b1_d5=0x4; b2_d5_reg16 { export b2_d5_reg16; } # Rx dst5W: b2_d5_regAx is b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { tmp:2 = b2_d5_regAx:2; export tmp; } # Ax - read-only use ! dst5W: [b2_d5_regAx] is indDst=1 & b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { ptr:3 = b2_d5_regAx; export *:2 ptr; } # [Ax] - w/ indirect prefix dst5W: [b2_d5_regAx] is dstIndexOffset & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + dstIndexOffset; export *:2 ptr; } # [Ax] dst5W: [[b2_d5_regAx]] is indDst=1 & dstIndexOffset & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + dstIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [[Ax]] dst5W: dst5dsp8^[b2_d5_regAxSF] is (dstIndexOffset & b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8 + dstIndexOffset; export *:2 ptr; } # dsp:8[Ax|SB|FB] dst5W: [dst5dsp8^[b2_d5_regAxSF]] is (indDst=1 & dstIndexOffset & b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8 + dstIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [dsp:8[Ax|SB|FB]] dst5W: dst5dsp16^[b2_d5_regAxSF] is (dstIndexOffset & b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16 + dstIndexOffset; export *:2 ptr; } # dsp:16[Ax|SB|FB] dst5W: [dst5dsp16^[b2_d5_regAxSF]] is (indDst=1 & dstIndexOffset & b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16 + dstIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [dsp:16[Ax|SB|FB]] dst5W: dst5dsp24^[b2_d5_regAx] is (dstIndexOffset & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24 + dstIndexOffset; export *:2 ptr; } # dsp:24[Ax] dst5W: [dst5dsp24^[b2_d5_regAx]] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24 + dstIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [dsp:24[Ax]] dst5W: imm16_dat is (indDst=0 & useDstByteIndexOffset=1 & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { ptr:3 = imm16_dat + byteIndexOffset; export *:2 ptr; } # abs16 (+byteIndexOffset) dst5W: imm16_dat is (indDst=0 & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { export *:2 imm16_dat; } # abs16 (special constant address case) dst5W: [imm16_dat] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { ptr:3 = imm16_dat + dstIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [abs16] dst5W: imm24_dat is (indDst=0 & useDstByteIndexOffset=1 & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { ptr:3 = imm24_dat + byteIndexOffset; export *:2 ptr; } # abs24 dst5W: imm24_dat is (indDst=0 & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { export *:2 imm24_dat; } # abs24 (special constant address case) dst5W: [imm24_dat] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { ptr:3 = imm24_dat + dstIndexOffset; ptr = *:3 ptr; export *:2 ptr; } # [abs24] # 4-Byte destination value/location specified by 5-bit encoding (b1_d5/b2_d5) - supports indirect prefix and byteIndexOffset dst5L: b2_d5_reg32 is b1_d5=0x4; b2_d5_1=1 & b2_d5_reg32 { export b2_d5_reg32; } # Rx dst5L: b2_d5_regAx is b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { tmp:4 = zext(b2_d5_regAx); export tmp; } # Ax - read-only use ! dst5L: [b2_d5_regAx] is indDst=1 & b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { ptr:3 = b2_d5_regAx; export *:4 ptr; } # [Ax] - w/ indirect prefix dst5L: [b2_d5_regAx] is dstIndexOffset & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + dstIndexOffset; export *:4 ptr; } # [Ax] dst5L: [[b2_d5_regAx]] is indDst=1 & dstIndexOffset & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + dstIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [[Ax]] dst5L: dst5dsp8^[b2_d5_regAxSF] is (dstIndexOffset & b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8 + dstIndexOffset; export *:4 ptr; } # dsp:8[Ax|SB|FB] dst5L: [dst5dsp8^[b2_d5_regAxSF]] is (indDst=1 & dstIndexOffset & b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8 + dstIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [dsp:8[Ax|SB|FB]] dst5L: dst5dsp16^[b2_d5_regAxSF] is (dstIndexOffset & b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16 + dstIndexOffset; export *:4 ptr; } # dsp:16[Ax|SB|FB] dst5L: [dst5dsp16^[b2_d5_regAxSF]] is (indDst=1 & dstIndexOffset & b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16 + dstIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [dsp:16[Ax|SB|FB]] dst5L: dst5dsp24^[b2_d5_regAx] is (dstIndexOffset & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24 + dstIndexOffset; export *:4 ptr; } # dsp:24[Ax] dst5L: [dst5dsp24^[b2_d5_regAx]] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24 + dstIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [dsp:24[Ax]] dst5L: imm16_dat is (indDst=0 & useDstByteIndexOffset=1 & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { ptr:3 = imm16_dat + byteIndexOffset; export *:4 ptr; } # abs16 (+byteIndexOffset) dst5L: imm16_dat is (indDst=0 & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { export *:4 imm16_dat; } # abs16 (special constant address case) dst5L: [imm16_dat] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { ptr:3 = imm16_dat + dstIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [abs16] dst5L: imm24_dat is (indDst=0 & useDstByteIndexOffset=1 & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { ptr:3 = imm24_dat + byteIndexOffset; export *:4 ptr; } # abs24 dst5L: imm24_dat is (indDst=0 & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { export *:4 imm24_dat; } # abs24 (special constant address case) dst5L: [imm24_dat] is (indDst=1 & dstIndexOffset & b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { ptr:3 = imm24_dat + dstIndexOffset; ptr = *:3 ptr; export *:4 ptr; } # [abs24] # 3-Byte destination effective address specified by 5-bit encoding (b1_d5/b2_d5) dst5A: dst5dsp8^[b2_d5_regAxSF] is (b1_d5=0x1; b2_d5_regAxSF) ... & dst5dsp8 { ptr:3 = b2_d5_regAxSF + dst5dsp8; export ptr; } # dsp:8[Ax|SB|FB] dst5A: dst5dsp16^[b2_d5_regAxSF] is (b1_d5=0x2; b2_d5_regAxSF) ... & dst5dsp16 { ptr:3 = b2_d5_regAxSF + dst5dsp16; export ptr; } # dsp:16[Ax|SB|FB] dst5A: dst5dsp24^[b2_d5_regAx] is (b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & dst5dsp24 { ptr:3 = b2_d5_regAx + dst5dsp24; export ptr; } # dsp:24[Ax] dst5A: imm16_dat is (b1_d5=0x3; b2_d5=0x3) ... & (skipBytesBeforeDst5; imm16_dat) { export *[const]:3 imm16_dat; } # abs16 (special constant address case) dst5A: imm24_dat is (b1_d5=0x3; b2_d5=0x2) ... & (skipBytesBeforeDst5; imm24_dat) { export *[const]:3 imm24_dat; } # abs24 (special constant address case) # Ax destination specified by 5-bit encoding (b1_d5/b2_d5) # NOTE! Ax destination is special case and must be handled seperately by each instruction # Starting position is at instruction b1 dst5Ax: b2_d5_regAx is b1_d5; b2_d5_regAx { export b2_d5_regAx; } # 1/2/4-Byte destination value/location specified by 5-bit encoding (b1_d5/b2_d5) # This handles the case for dst5B, dst5W and dst5L where 5-bit encoded Source (src5) add-on bytes may exist before Destination add-on bytes # Variable length pattern starting at instruction byte b1 dst5B_afterSrc5: dst5B is dst5B [ dstFollowsSrc=1; ] { export dst5B; } dst5W_afterSrc5: dst5W is dst5W [ dstFollowsSrc=1; ] { export dst5W; } dst5L_afterSrc5: dst5L is dst5L [ dstFollowsSrc=1; ] { export dst5L; } # 1/2/4-Byte destination value/location specified by 5-bit encoding (b1_d5/b2_d5) # This handles the case for dst5B, dst5W and dst5L where Dsp8 add-on bytes always exist before Destination add-on bytes # Variable length pattern starting at instruction byte b1 dst5B_afterDsp8: dst5B is dst5B [ dstFollowsSrc=2; ] { export dst5B; } dst5W_afterDsp8: dst5W is dst5W [ dstFollowsSrc=2; ] { export dst5W; } # # The following macros are used to elliminate illegal bit patterns when using dst5 # These should be used by constructor pattern matching instead of the corresponding dst5 subconstructor # @define DST5B "((b1_d5=4 | b1_d5_4=0) ... & dst5B)" @define DST5W "((b1_d5=4 | b1_d5_4=0) ... & dst5W)" @define DST5L "((b1_d5=4 | b1_d5_4=0) ... & dst5L)" @define DST5A "((b1_d5_4=0) ... & dst5A)" @define DST5AX "((b1_d5=0x0; b2_d5_1=1) & dst5Ax)" @define DST5B_AFTER_SRC5 "((b1_d5=4 | b1_d5_4=0) ... & dst5B_afterSrc5)" @define DST5W_AFTER_SRC5 "((b1_d5=4 | b1_d5_4=0) ... & dst5W_afterSrc5)" @define DST5L_AFTER_SRC5 "((b1_d5=4 | b1_d5_4=0) ... & dst5L_afterSrc5)" @define DST5B_AFTER_DSP8 "((b1_d5=4 | b1_d5_4=0) ... & dst5B_afterDsp8)" @define DST5W_AFTER_DSP8 "((b1_d5=4 | b1_d5_4=0) ... & dst5W_afterDsp8)" @define DST5L_AFTER_DSP8 "((b1_d5=4 | b1_d5_4=0) ... & dst5L_afterDsp8)" # dst2... Handle 2-bit encoded Destination specified by b1_d2 # Variable length pattern starting at instruction byte b1 # TODO? Certain uses of dst2 should exclude the R0 case (b1_d2=0) # 1-Byte destination value/location specified by 2-bit encoding (b1_d2) dst2B: R0L is b1_d2=0 & R0L { export R0L; } dst2B: imm16_dat is b1_d2=1; imm16_dat { export *:1 imm16_dat; } dst2B: [imm16_dat] is indDst=1 & b1_d2=1; imm16_dat { ptr:3 = imm16_dat; ptr = *:3 ptr; export *:1 ptr; } dst2B: imm8_dat^":8"^[SB] is b1_d2=2 & SB; imm8_dat { ptr:3 = SB + imm8_dat; export *:1 ptr; } dst2B: [imm8_dat^":8"^[SB]] is indDst=1 & b1_d2=2 & SB; imm8_dat { ptr:3 = SB + imm8_dat; ptr = *:3 ptr; export *:1 ptr; } dst2B: simm8_dat^":8"^[FB] is b1_d2=3 & FB; simm8_dat { ptr:3 = FB + simm8_dat; export *:1 ptr; } dst2B: [simm8_dat^":8"^[FB]] is indDst=1 & b1_d2=3 & FB; simm8_dat { ptr:3 = FB + simm8_dat; ptr = *:3 ptr; export *:1 ptr; } # 2-Byte destination value/location specified by 2-bit encoding (b1_d2) dst2W: R0 is b1_d2=0 & R0 { export R0; } dst2W: imm16_dat is b1_d2=1; imm16_dat { export *:2 imm16_dat; } dst2W: [imm16_dat] is indDst=1 & b1_d2=1; imm16_dat { ptr:3 = imm16_dat; ptr = *:3 ptr; export *:2 ptr; } dst2W: imm8_dat^":8"^[SB] is b1_d2=2 & SB; imm8_dat { ptr:3 = SB + imm8_dat; export *:2 ptr; } dst2W: [imm8_dat^":8"^[SB]] is indDst=1 & b1_d2=2 & SB; imm8_dat { ptr:3 = SB + imm8_dat; ptr = *:3 ptr; export *:2 ptr; } dst2W: simm8_dat^":8"^[FB] is b1_d2=3 & FB; simm8_dat { ptr:3 = FB + simm8_dat; export *:2 ptr; } dst2W: [simm8_dat^":8"^[FB]] is indDst=1 & b1_d2=3 & FB; simm8_dat { ptr:3 = FB + simm8_dat; ptr = *:3 ptr; export *:2 ptr; } # 4-Byte destination value/location specified by 2-bit encoding (b1_d2) dst2L: R2R0 is b1_d2=0 & R2R0 { export R2R0; } dst2L: imm16_dat is b1_d2=1; imm16_dat { export *:4 imm16_dat; } dst2L: [imm16_dat] is indDst=1 & b1_d2=1; imm16_dat { ptr:3 = imm16_dat; ptr = *:3 ptr; export *:4 ptr; } dst2L: imm8_dat^":8"^[SB] is b1_d2=2 & SB; imm8_dat { ptr:3 = SB + imm8_dat; export *:4 ptr; } dst2L: [imm8_dat^":8"^[SB]] is indDst=1 & b1_d2=2 & SB; imm8_dat { ptr:3 = SB + imm8_dat; ptr = *:3 ptr; export *:4 ptr; } dst2L: simm8_dat^":8"^[FB] is b1_d2=3 & FB; simm8_dat { ptr:3 = FB + simm8_dat; export *:4 ptr; } dst2L: [simm8_dat^":8"^[FB]] is indDst=1 & b1_d2=3 & FB; simm8_dat { ptr:3 = FB + simm8_dat; ptr = *:3 ptr; export *:4 ptr; } dsp8spB: simm8_dat^":8"^[SP] is simm8_dat & SP { ptr:3 = SP + simm8_dat; export *:1 ptr; } dsp8spW: simm8_dat^":8"^[SP] is simm8_dat & SP { ptr:3 = SP + simm8_dat; export *:2 ptr; } # # Bit base - associated add-on data immediately follows instruction byte b2 # (Ax destination case must be handled seperately) # # Obtain bitbase offset displacement for [AX | SB | FB] - AX and SB uses unsigned displacements, FB uses signed displacement bitbaseDsp8: imm8_dat^":11" is b1_d5; b2_d5; imm8_dat { export *[const]:3 imm8_dat; } bitbaseDsp8: simm8_dat^":11" is b1_d5; b2_d5=0x3; simm8_dat { export *[const]:3 simm8_dat; } bitbaseDsp16: imm16_dat^":19" is b1_d5; b2_d5; imm16_dat { export *[const]:3 imm16_dat; } bitbaseDsp16: simm16_dat^":19" is b1_d5; b2_d5=0x3; simm16_dat { export *[const]:3 simm16_dat; } bitbaseDsp24: imm24_dat^":27" is b1_d5; b2_d5; imm24_dat { export *[const]:3 imm24_dat; } bitbaseDsp24: simm24_dat^":27" is b1_d5; b2_d5=0x3; simm24_dat { export *[const]:3 simm24_dat; } bitbase: b2_d5_reg8 is useBitIndex=0 & b1_d5=0x4; b2_d5_reg8 { export b2_d5_reg8; } # Rx bitbase: b2_d5_regAx is useBitIndex=0 & b1_d5=0x0; b2_d5_1=1 & b2_d5_regAx { tmp:1 = b2_d5_regAx:1; export tmp; } # Ax - read-only case bitbase: [b2_d5_regAx] is useBitIndex=0 & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx; export *:1 ptr; } # [Ax] bitbase: bitbaseDsp8^[b2_d5_regAxSF] is (useBitIndex=0 & b1_d5=0x1; b2_d5_regAxSF) ... & bitbaseDsp8 { ptr:3 = b2_d5_regAxSF + bitbaseDsp8; export *:1 ptr; } # base:11[Ax|SB|FB] bitbase: bitbaseDsp16^[b2_d5_regAxSF] is (useBitIndex=0 & b1_d5=0x2; b2_d5_regAxSF) ... & bitbaseDsp16 { ptr:3 = b2_d5_regAxSF + bitbaseDsp16; export *:1 ptr; } # base:19[Ax|SB|FB] bitbase: bitbaseDsp24^[b2_d5_regAx] is (useBitIndex=0 & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & bitbaseDsp24 { ptr:3 = b2_d5_regAx + bitbaseDsp24; export *:1 ptr; } # base:27[Ax] bitbase: imm16_dat^":19" is useBitIndex=0 & b1_d5=0x3; b2_d5=0x3; imm16_dat { export *:1 imm16_dat; } # base:19 bitbase: imm24_dat^":27" is useBitIndex=0 & b1_d5=0x3; b2_d5=0x2; imm24_dat { export *:1 imm24_dat; } # base:27 bitbase: [b2_d5_regAx] is useBitIndex=1 & b1_d5=0x0; b2_d5_1=0 & b2_d5_regAx { ptr:3 = b2_d5_regAx + (bitIndex / 8); export *:1 ptr; } # [Ax] w/bitIndex bitbase: bitbaseDsp8^[b2_d5_regAxSF] is (useBitIndex=1 & b1_d5=0x1; b2_d5_regAxSF) ... & bitbaseDsp8 { ptr:3 = b2_d5_regAxSF + bitbaseDsp8 + (bitIndex / 8); export *:1 ptr; } # base:11[Ax|SB|FB] w/bitIndex bitbase: bitbaseDsp16^[b2_d5_regAxSF] is (useBitIndex=1 & b1_d5=0x2; b2_d5_regAxSF) ... & bitbaseDsp16 { ptr:3 = b2_d5_regAxSF + bitbaseDsp16 + (bitIndex / 8); export *:1 ptr; } # base:19[Ax|SB|FB] w/bitIndex bitbase: bitbaseDsp24^[b2_d5_regAx] is (useBitIndex=1 & b1_d5=0x3; b2_d5_1=0 & b2_d5_regAx) ... & bitbaseDsp24 { ptr:3 = b2_d5_regAx + bitbaseDsp24 + (bitIndex / 8); export *:1 ptr; } # base:27[Ax] w/bitIndex bitbase: imm16_dat^":19" is useBitIndex=1 & b1_d5=0x3; b2_d5=0x3; imm16_dat { ptr:3 = imm16_dat + (bitIndex / 8); export *:1 ptr; } # base:19 w/bitIndex bitbase: imm24_dat^":27" is useBitIndex=1 & b1_d5=0x3; b2_d5=0x2; imm24_dat { ptr:3 = imm24_dat + (bitIndex / 8); export *:1 ptr; } # base:27 w/bitIndex # Ax bitbase destination specified by 5-bit encoding (b1_d5/b2_d5) # NOTE! Ax destination is special case and must be handled seperately by each instruction # Starting position is at instruction b1 bitbaseAx: b2_d5_regAx is b1_d5; b2_d5_regAx { export b2_d5_regAx; } bitbaseAbs16: imm16_dat is imm16_dat { export *:1 imm16_dat; } # # The following macros are used to elliminate illegal bit patterns when using dst5 # These should be used by constructor pattern matching instead of the corresponding dst5 subconstructor # @define BITBASE "((b1_d5=4 | b1_d5_4=0) ... & bitbase)" @define BITBASE_AX "((b1_d5=0x0; b2_d5_1=1) & bitbaseAx)" # Bit identifier (may be overriden if useBitIndex has been set by BINDEX instruction bit: b2_bit is useBitIndex=0 & b2_bit { export *[const]:1 b2_bit; } bit: [bitIndex] is useBitIndex=1 & bitIndex { val:3 = bitIndex % 8; b:1 = val:1; export b; } # # Immediate data operand # Fixed length - current position is at start of immediate data # srcImm3: "#"^b2_0002 is b2_0002 { export *[const]:1 b2_0002; } srcImm8: "#"^imm8_dat is imm8_dat { export *[const]:1 imm8_dat; } srcImm8a: "#"^imm8_dat is imm8_dat { export *[const]:1 imm8_dat; } # used when two imm8 are needed srcImm16: "#"^imm16_dat is imm16_dat { export *[const]:2 imm16_dat; } srcImm16a: "#"^imm16_dat is imm16_dat { export *[const]:2 imm16_dat; } # used when two imm16 are needed srcImm24: "#"^imm24_dat is imm24_dat { export *[const]:3 imm24_dat; } srcImm32: "#"^imm32_dat is imm32_dat { export *[const]:4 imm32_dat; } # Unsigned immediate data from 1-bit value: 1 <= value <= 2 (1 added to unsigned bit value) srcImm1p: "#"^val is b1_0505 [ val = b1_0505 + 1; ] { export *[const]:1 val; } # Unsigned immediate data from 2-bit value: 1 <= value <= 8 (1 added to unsigned bit value) srcImm3p: "#"^val is b1_0405 & b1_0000 [ val = (b1_0405 << 1) + b1_0000 + 1; ] { export *[const]:1 val; } srcSimm8: "#"^simm8_dat is simm8_dat { export *[const]:1 simm8_dat; } srcSimm16: "#"^simm16_dat is simm16_dat { export *[const]:2 simm16_dat; } srcSimm32: "#"^simm32_dat is simm32_dat { export *[const]:4 simm32_dat; } # Signed immediate data from signed 4-bit value: -8 <= value <= 7 srcSimm4: "#"^b2_simm4 is b2_simm4 { export *[const]:1 b2_simm4; } srcSimm8a: srcSimm8 is srcSimm8 { export srcSimm8; } srcSimm16a: srcSimm16 is srcSimm16 { export srcSimm16; } # Signed immediate shift amount from 4-bit value: -8 <= value <= -1 || 1 <= value <= 8 srcSimm4Shift: "#"^val is b2_shiftSign=0 & b2_0002 [ val = b2_0002 + 1; ] { export *[const]:1 val; } srcSimm4Shift: "#"^val is b2_shiftSign=1 & b2_0002 [ val = -(b2_0002 + 1); ] { export *[const]:1 val; } srcZero8: "#0" is b1_0007 { export 0:1; } srcZero16: "#0" is b1_0007 { export 0:2; } # special 6-bit immediate for INT number srcIntNum: "#"^imm6_dat is imm6_dat { export *[const]:1 imm6_dat; } # # Offset label operand # abs24offset: imm24_dat is imm24_dat { export *:1 imm24_dat; } abs16offset: imm16_dat is imm16_dat { export *:1 imm16_dat; } # Relative address offsets rel16offset1: offs is simm16_dat [ offs = inst_start + 1 + simm16_dat; ] { export *:1 offs; } rel8offset1: offs is simm8_dat [ offs = inst_start + 1 + simm8_dat; ] { export *:1 offs; } rel8offset2: offs is simm8_dat [ offs = inst_start + 2 + simm8_dat; ] { export *:1 offs; } rel3offset2: offs is b1_0405 & b1_0000 [ offs = inst_start + 2 + ((b1_0405 << 1) + b1_0000); ] { export *:1 offs; } reloffset_dst5W: dst5W is $(DST5W) { local reladdr = inst_start + dst5W; export *:3 reladdr; } reloffset_dst5L: dst5L is $(DST5L) { local reladdr = inst_start + dst5L; export *:3 reladdr; } reloffset_dst5Ax: dst5Ax is $(DST5AX) { local reladdr = inst_start + dst5Ax; export *:3 reladdr; } # # Conditionals (see BMcnd) # # TODO!! Need to verify conditional logic pulled from old slaspec # TODO: the 'cnd' subconstructor should really constrain the bits 4-7 to 0x0, however this exposes a sleigh compiler problem cnd: "LTU" is cnd_dat=0x0 { tstCnd:1 = ($(CARRY) == 0); export tstCnd; } # less than (>), C flag is 0 cnd: "LEU" is cnd_dat=0x1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 0); export tstCnd; } # Equal to or less than (>=) cnd: "NE" is cnd_dat=0x2 { tstCnd:1 = ($(ZERO) == 0); export tstCnd; } # Not Equal to (=), Z flag is 0 cnd: "PZ" is cnd_dat=0x3 { tstCnd:1 = ($(SIGN) == 0); export tstCnd; } # Positive or zero (0<=) cnd: "NO" is cnd_dat=0x4 { tstCnd:1 = ($(OVERFLOW) == 0); export tstCnd; } # O flag is 0 cnd: "GT" is cnd_dat=0x5 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 0); export tstCnd; } # Greater than (signed value) (<) cnd: "GE" is cnd_dat=0x6 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 0); export tstCnd; } # Equal to or greater than (signed value) (<=) cnd: "GEU" is cnd_dat=0x8 { tstCnd:1 = ($(CARRY) == 1); export tstCnd; } # Equal to or greater than (<=), C flag is 1 cnd: "GTU" is cnd_dat=0x9 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 1); export tstCnd; } # Greater than (<) cnd: "EQ" is cnd_dat=0xa { tstCnd:1 = ($(ZERO) == 1); export tstCnd; } # Equal to (=), Z flag is 1 cnd: "N" is cnd_dat=0xb { tstCnd:1 = ($(SIGN) == 1); export tstCnd; } # Negative (0>) cnd: "O" is cnd_dat=0xc { tstCnd:1 = ($(OVERFLOW) == 1); export tstCnd; } # O flag is 1 cnd: "LE" is cnd_dat=0xd { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 1); export tstCnd; } # Equal to or less than (signed value) (>=) cnd: "LT" is cnd_dat=0xe { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 1); export tstCnd; } # less than (signed value) (<=) b2cnd: "LTU" is b2_0606=0 & b2_0002=0 { tstCnd:1 = ($(CARRY) == 0); export tstCnd; } # less than (>), C flag is 0 b2cnd: "LEU" is b2_0606=0 & b2_0002=1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 0); export tstCnd; } # Equal to or less than (>=) b2cnd: "NE" is b2_0606=0 & b2_0002=2 { tstCnd:1 = ($(ZERO) == 0); export tstCnd; } # Not Equal to (=), Z flag is 0 b2cnd: "PZ" is b2_0606=0 & b2_0002=3 { tstCnd:1 = ($(SIGN) == 0); export tstCnd; } # Positive or zero (0<=) b2cnd: "NO" is b2_0606=0 & b2_0002=4 { tstCnd:1 = ($(OVERFLOW) == 0); export tstCnd; } # O flag is 0 b2cnd: "GT" is b2_0606=0 & b2_0002=5 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 0); export tstCnd; } # Greater than (signed value) (<) b2cnd: "GE" is b2_0606=0 & b2_0002=6 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 0); export tstCnd; } # Equal to or greater than (signed value) (<=) b2cnd: "GEU" is b2_0606=1 & b2_0002=0 { tstCnd:1 = ($(CARRY) == 1); export tstCnd; } # Equal to or greater than (<=), C flag is 1 b2cnd: "GTU" is b2_0606=1 & b2_0002=1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 1); export tstCnd; } # Greater than (<) b2cnd: "EQ" is b2_0606=1 & b2_0002=2 { tstCnd:1 = ($(ZERO) == 1); export tstCnd; } # Equal to (=), Z flag is 1 b2cnd: "N" is b2_0606=1 & b2_0002=3 { tstCnd:1 = ($(SIGN) == 1); export tstCnd; } # Negative (0>) b2cnd: "O" is b2_0606=1 & b2_0002=4 { tstCnd:1 = ($(OVERFLOW) == 1); export tstCnd; } # O flag is 1 b2cnd: "LE" is b2_0606=1 & b2_0002=5 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 1); export tstCnd; } # Equal to or less than (signed value) (>=) b2cnd: "LT" is b2_0606=1 & b2_0002=6 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 1); export tstCnd; } # less than (signed value) (<=) b1cnd: "LTU" is b1_0406=0 & b1_0000=0 { tstCnd:1 = ($(CARRY) == 0); export tstCnd; } # less than (>), C flag is 0 b1cnd: "LEU" is b1_0406=0 & b1_0000=1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 0); export tstCnd; } # Equal to or less than (>=) b1cnd: "NE" is b1_0406=1 & b1_0000=0 { tstCnd:1 = ($(ZERO) == 0); export tstCnd; } # Not Equal to (=), Z flag is 0 b1cnd: "PZ" is b1_0406=1 & b1_0000=1 { tstCnd:1 = ($(SIGN) == 0); export tstCnd; } # Positive or zero (0<=) b1cnd: "NO" is b1_0406=2 & b1_0000=0 { tstCnd:1 = ($(OVERFLOW) == 0); export tstCnd; } # O flag is 0 b1cnd: "GT" is b1_0406=2 & b1_0000=1 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 0); export tstCnd; } # Greater than (signed value) (<) b1cnd: "GE" is b1_0406=3 & b1_0000=0 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 0); export tstCnd; } # Equal to or greater than (signed value) (<=) b1cnd: "GEU" is b1_0406=4 & b1_0000=0 { tstCnd:1 = ($(CARRY) == 1); export tstCnd; } # Equal to or greater than (<=), C flag is 1 b1cnd: "GTU" is b1_0406=4 & b1_0000=1 { tstCnd:1 = (($(CARRY) & (!$(ZERO))) == 1); export tstCnd; } # Greater than (<) b1cnd: "EQ" is b1_0406=5 & b1_0000=0 { tstCnd:1 = ($(ZERO) == 1); export tstCnd; } # Equal to (=), Z flag is 1 b1cnd: "N" is b1_0406=5 & b1_0000=1 { tstCnd:1 = ($(SIGN) == 1); export tstCnd; } # Negative (0>) b1cnd: "O" is b1_0406=6 & b1_0000=0 { tstCnd:1 = ($(OVERFLOW) == 1); export tstCnd; } # O flag is 1 b1cnd: "LE" is b1_0406=6 & b1_0000=1 { tstCnd:1 = ((($(SIGN) ^ $(OVERFLOW)) | $(ZERO)) == 1); export tstCnd; } # Equal to or less than (signed value) (>=) b1cnd: "LT" is b1_0406=7 & b1_0000=0 { tstCnd:1 = (($(SIGN) ^ $(OVERFLOW)) == 1); export tstCnd; } # less than (signed value) (<=) # # Flag bit operand # flagBit: "C" is b2_0002=0 { export 0:2; } flagBit: "D" is b2_0002=1 { export 1:2; } flagBit: "Z" is b2_0002=2 { export 2:2; } flagBit: "S" is b2_0002=3 { export 3:2; } flagBit: "B" is b2_0002=4 { export 4:2; } flagBit: "O" is b2_0002=5 { export 5:2; } flagBit: "I" is b2_0002=6 { export 6:2; } flagBit: "U" is b2_0002=7 { export 7:2; } with: phase=1 { # # Instruction Constructors # ##### ABS ##### # (1) ABS.B dst # 1010 0100 1001 1111 0011 0100 0001 0010 ABS.B 0x1234:16[SB] # 0000 1001 1010 0100 1001 1111 0011 0100 0001 0010 ABS.B [0x1234:16[SB]] :ABS.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x1f) ... & $(DST5B) ... { tmp:1 = dst5B; $(OVERFLOW) = (tmp == 0x80); if (tmp s>= 0) goto ; tmp = -tmp; dst5B = tmp; setResultFlags(tmp); } # (1) ABS.B Ax :ABS.B dst5Ax is (b1_0407=0xa & b1_size_0=0; b2_0005=0x1f) ... & $(DST5AX) ... { tmp:1 = dst5Ax:1; $(OVERFLOW) = (tmp == 0x80); if (tmp s>= 0) goto ; tmp = -tmp; dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) ABS.W dst # 1010 0101 1001 1111 0011 0100 0001 0010 ABS.W 0x1234:16[SB] # 0000 1001 1010 0101 1001 1111 0011 0100 0001 0010 ABS.W [0x1234:16[SB]] :ABS.W dst5W is (b1_0407=0xa & b1_size_0=1; b2_0005=0x1f) ... & $(DST5W) ... { tmp:2 = dst5W; $(OVERFLOW) = (tmp == 0x8000); if (tmp s>= 0) goto ; tmp = -tmp; dst5W = tmp; setResultFlags(tmp); } # (1) ABS.W Ax :ABS.W dst5Ax is (b1_0407=0xa & b1_size_0=1; b2_0005=0x1f) ... & $(DST5AX) ... { tmp:2 = dst5Ax:2; $(OVERFLOW) = (tmp == 0x8000); if (tmp s>= 0) goto ; tmp = -tmp; dst5Ax = zext(tmp); setResultFlags(tmp); } ##### ADC ##### # (1) ADC.B #simm, dst # 0000 0001 1000 0100 1010 1110 0011 0100 0001 0010 0101 0110 ADC.B 0x56, 0x1234:16[SB] # 0000 1001 0000 0001 1000 0100 1010 1110 0011 0100 0001 0010 0101 0110 ABS.B 0x56, [0x1234:16[SB]] :ADC.B srcSimm8, dst5B is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B)); srcSimm8 { tmp:1 = dst5B; c:1 = $(CARRY); setAdd3Flags(tmp, srcSimm8, c); tmp = tmp + srcSimm8 + c; dst5B = tmp; setResultFlags(tmp); } # (1) ADC.B #simm, Ax :ADC.B srcSimm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2e) & $(DST5AX)); srcSimm8 { tmp:1 = dst5Ax:1; c:1 = $(CARRY); setAdd3Flags(tmp, srcSimm8, c); tmp = tmp + srcSimm8 + c; dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) ADC.W #simm, dst :ADC.W srcSimm16, dst5W is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W)); srcSimm16 { tmp:2 = dst5W; c:2 = zext($(CARRY)); setAdd3Flags(tmp, srcSimm16, c); tmp = tmp + srcSimm16 + c; dst5W = tmp; setResultFlags(tmp); } # (1) ADC.B #simm, Ax :ADC.W srcSimm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2e) & $(DST5AX)); srcSimm16 { tmp:2 = dst5Ax:2; c:2 = zext($(CARRY)); setAdd3Flags(tmp, srcSimm16, c); tmp = tmp + srcSimm16 + c; dst5Ax = zext(tmp); setResultFlags(tmp); } # (2) ADC.B src5, dst5 :ADC.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x4) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { tmp:1 = dst5B_afterSrc5; s:1 = src5B; c:1 = $(CARRY); setAdd3Flags(tmp, s, c); tmp = tmp + s + c; dst5B_afterSrc5 = tmp; setResultFlags(tmp); } # (2) ADC.B src5, Ax :ADC.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x4) ... & $(SRC5B) & $(DST5AX) ...) { tmp:1 = dst5Ax:1; s:1 = src5B; c:1 = $(CARRY); setAdd3Flags(tmp, s, c); tmp = tmp + s + c; dst5Ax = zext(tmp); setResultFlags(tmp); } # (2) ADC.W src5, dst5 :ADC.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x4) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { tmp:2 = dst5W_afterSrc5; s:2 = src5W; c:2 = zext($(CARRY)); setAdd3Flags(tmp, s, c); tmp = tmp + s + c; dst5W_afterSrc5 = tmp; setResultFlags(tmp); } # (2) ADC.W src5, Ax :ADC.W src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x4) ... & $(SRC5W) & $(DST5AX) ...) { tmp:2 = dst5Ax:2; s:2 = src5W; c:2 = zext($(CARRY)); setAdd3Flags(tmp, s, c); tmp = tmp + s + c; dst5Ax = zext(tmp); setResultFlags(tmp); } ##### ADCF ##### # (1) ADCF.B dst :ADCF.B dst5B is (b1_0407=0xb & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B) { tmp:1 = dst5B; c:1 = $(CARRY); setAddFlags(tmp, c); tmp = tmp + c; dst5B = tmp; setResultFlags(tmp); } # (1) ADCF.B Ax :ADCF.B dst5Ax is (b1_0407=0xb & b1_size_0=0; b2_0005=0x1e) & $(DST5AX) { tmp:1 = dst5Ax:1; c:1 = $(CARRY); setAddFlags(tmp, c); tmp = tmp + c; dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) ADCF.W dst :ADCF.W dst5W is (b1_0407=0xb & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W) { tmp:2 = dst5W; c:2 = zext($(CARRY)); setAddFlags(tmp, c); tmp = tmp + c; dst5W = tmp; setResultFlags(tmp); } # (1) ADCF.B Ax :ADCF.W dst5Ax is (b1_0407=0xb & b1_size_0=1; b2_0005=0x1e) & $(DST5AX) { tmp:2 = dst5Ax:2; c:2 = zext($(CARRY)); setAddFlags(tmp, c); tmp = tmp + c; dst5Ax = zext(tmp); setResultFlags(tmp); } ##### ADD ##### # (1) ADD.B:G #simm, dst :ADD^".B:G" srcSimm8, dst5B is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B)); srcSimm8 { tmp:1 = dst5B; setAddFlags(tmp, srcSimm8); tmp = tmp + srcSimm8; dst5B = tmp; setResultFlags(tmp); } # (1) ADD.B:G #simm, Ax :ADD^".B:G" srcSimm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2e) & $(DST5AX)); srcSimm8 { tmp:1 = dst5Ax:1; setAddFlags(tmp, srcSimm8); tmp = tmp + srcSimm8; dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) ADD.W:G #simm, dst :ADD^".W:G" srcSimm16, dst5W is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W)); srcSimm16 { tmp:2 = dst5W; setAddFlags(tmp, srcSimm16); tmp = tmp + srcSimm16; dst5W = tmp; setResultFlags(tmp); } # (1) ADD.W:G #simm, Ax :ADD^".W:G" srcSimm16, dst5Ax is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2e) & $(DST5AX)); srcSimm16 { tmp:2 = dst5Ax:2; setAddFlags(tmp, srcSimm16); tmp = tmp + srcSimm16; dst5Ax = zext(tmp); setResultFlags(tmp); } # (2) ADD.L:G #simm, dst :ADD^".L:G" srcSimm32, dst5L is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x31) ... & $(DST5L)); srcSimm32 { tmp:4 = dst5L; setAddFlags(tmp, srcSimm32); tmp = tmp + srcSimm32; dst5L = tmp; setResultFlags(tmp); } # (2) ADD.L:G #simm, Ax :ADD^".L:G" srcSimm32, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x31) & $(DST5AX)); srcSimm32 { tmp:4 = zext(dst5Ax); setAddFlags(tmp, srcSimm32); tmp = tmp + srcSimm32; dst5Ax = tmp:3; setResultFlags(tmp); } # (3) ADD.B:G #simm4, dst :ADD^".B:G" srcSimm4, dst5B is (b1_0507=0x7 & b1_size_4=0 & b1_size_0=0; b2_0405=3 & srcSimm4) ... & $(DST5B) { tmp:1 = dst5B; setAddFlags(tmp, srcSimm4); tmp = tmp + srcSimm4; dst5B = tmp; setResultFlags(tmp); } # (3) ADD.B:G #simm4, Ax :ADD^".B:G" srcSimm4, dst5Ax is (b1_0507=0x7 & b1_d5=0x0 & b1_size_4=0 & b1_size_0=0; b2_0405=3 & srcSimm4) & $(DST5AX) { tmp:1 = dst5Ax:1; setAddFlags(tmp, srcSimm4); tmp = tmp + srcSimm4; dst5Ax = zext(tmp); setResultFlags(tmp); } # (3) ADD.W:Q #simm4, dst :ADD^".W:Q" srcSimm4, dst5W is (b1_0507=0x7 & b1_size_4=0 & b1_size_0=1; b2_0405=3 & srcSimm4) ... & $(DST5W) { tmp:2 = dst5W; imm:2 = sext(srcSimm4); setAddFlags(tmp, imm); tmp = tmp + imm; dst5W = tmp; setResultFlags(tmp); } # (3) ADD.W:Q #simm4, Ax :ADD^".W:Q" srcSimm4, dst5Ax is (b1_0507=0x7 & b1_d5=0x0 & b1_size_4=0 & b1_size_0=1; b2_0405=3 & srcSimm4) & $(DST5AX) { tmp:2 = dst5Ax:2; imm:2 = sext(srcSimm4); setAddFlags(tmp, imm); tmp = tmp + imm; dst5Ax = zext(tmp); setResultFlags(tmp); } # (3) ADD.L:Q #simm4, dst :ADD^".L:Q" srcSimm4, dst5L is (b1_0507=0x7 & b1_size_4=1 & b1_size_0=0; b2_0405=3 & srcSimm4) ... & $(DST5L) { tmp:4 = dst5L; imm:4 = sext(srcSimm4); setAddFlags(tmp, imm); tmp = tmp + imm; dst5L = tmp; setResultFlags(tmp); } # (3) ADD.L:Q #simm4, Ax :ADD^".L:Q" srcSimm4, dst5Ax is (b1_0507=0x7 & b1_d5=0x0 & b1_size_4=1 & b1_size_0=0; b2_0405=3 & srcSimm4) & $(DST5AX) { tmp:4 = sext(dst5Ax); imm:4 = sext(srcSimm4); setAddFlags(tmp, imm); tmp = tmp + imm; dst5Ax = tmp:3; setResultFlags(tmp); } # (4) ADD.B:S #simm, dst :ADD^".B:S" srcSimm8, dst2B is ((b1_0607=0 & b1_0103=3 & b1_size_0=0) ... & dst2B); srcSimm8 { tmp:1 = dst2B; setAddFlags(tmp, srcSimm8); tmp = tmp + srcSimm8; dst2B = tmp; setResultFlags(tmp); } # (4) ADD.W:S #simm, dst # 0010 0111 0101 0110 0011 0100 0001 0010 ADD.W:S #0x1234, 0x56:8[SB] :ADD^".W:S" srcSimm16, dst2W is ((b1_0607=0 & b1_0103=3 & b1_size_0=1) ... & dst2W); srcSimm16 { tmp:2 = dst2W; setAddFlags(tmp, srcSimm16); tmp = tmp + srcSimm16; dst2W = tmp; setResultFlags(tmp); } # (5) ADD.L:S #imm1, Ax :ADD^".L:S" srcImm1p, b1_d1_regAx is b1_0607=2 & srcImm1p & b1_0104=0x6 & b1_d1_regAx { tmp:4 = sext(b1_d1_regAx); imm:4 = zext(srcImm1p); setAddFlags(tmp, imm); tmp = tmp + imm; b1_d1_regAx = tmp:3; setResultFlags(tmp); } # (6) ADD.B:G src, dst # 1011 0110 0001 1000 0101 0110 0011 0100 0001 0010 0011 0011 0010 0010 0001 0001 ADD.B:G 0x123456:24[A0], 112233[A1] # 1100 0101 1111 1000 0011 0100 0001 0010 ADD.W:G R1, 0x1234:16[FB] :ADD^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x8) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... { tmp:1 = dst5B_afterSrc5; src:1 = src5B; setAddFlags(tmp, src); tmp = tmp + src; dst5B_afterSrc5 = tmp; setResultFlags(tmp); } # (6) ADD.B:G src, Ax - Ax destination case :ADD^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x8) ... & $(SRC5B) & $(DST5AX) ... { tmp:1 = dst5Ax:1; src:1 = src5B; setAddFlags(tmp, src); tmp = tmp + src; dst5Ax = zext(tmp); setResultFlags(tmp); } # (6) ADD.W:G src, dst :ADD^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x8) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... { tmp:2 = dst5W_afterSrc5; src:2 = src5W; setAddFlags(tmp, src); tmp = tmp + src; dst5W_afterSrc5 = tmp; setResultFlags(tmp); } # (6) ADD.W:G src, Ax - Ax destination case :ADD^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x8) ... & $(SRC5W) & $(DST5AX) ... { tmp:2 = dst5Ax:2; src:2 = src5W; setAddFlags(tmp, src); tmp = tmp + src; dst5Ax = zext(tmp); setResultFlags(tmp); } # (7) ADD.L:G src, dst :ADD^".L:G" src5L, dst5L_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x2) ... & $(SRC5L) ... & $(DST5L_AFTER_SRC5) ... { tmp:4 = dst5L_afterSrc5; src:4 = src5L; setAddFlags(tmp, src); tmp = tmp + src; dst5L_afterSrc5 = tmp; setResultFlags(tmp); } # (7) ADD.L:G src, Ax - Ax destination case :ADD^".L:G" src5L, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x2) ... & $(SRC5L) & $(DST5AX) ... { tmp:4 = zext(dst5Ax); src:4 = src5L; setAddFlags(tmp, src); tmp = tmp + src; dst5Ax = tmp:3; setResultFlags(tmp); } # (8) ADD.l:G #simm16, SP :ADD^".L:G" srcSimm16, SP is b1_0007=0xb6 & SP; b2_0007=0x13; srcSimm16 { # not done as 32-bit calculation to simplify stack analysis imm:3 = sext(srcSimm16); setAddFlags(SP, imm); SP = SP + imm; setResultFlags(SP); } # (9) ADD.L:Q #imm3, SP :ADD^".L:Q" srcImm3p, SP is b1_0607=1 & srcImm3p & b1_0103=1 & SP { # not done as 32-bit calculation to simplify stack analysis imm:3 = zext(srcImm3p); setAddFlags(SP, imm); SP = SP + imm; setResultFlags(SP); } # (10) ADD.L:S #simm8, SP :ADD^".L:S" srcSimm8, SP is b1_0007=0xb6 & SP; b2_0007=0x03; srcSimm8 { # not done as 32-bit calculation to simplify stack analysis imm:3 = sext(srcSimm8); setAddFlags(SP, imm); SP = SP + imm; setResultFlags(SP); } ##### ADDX ##### # (1) ADDX #simm, dst5 :ADDX srcSimm8, dst5L is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x11) ... & $(DST5L)); srcSimm8 { tmp:4 = dst5L; src:4 = sext(srcSimm8); setAddFlags(tmp, src); tmp = tmp + src; dst5L = tmp; setResultFlags(tmp); } # (1) ADDX #simm, Ax :ADDX srcSimm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x11) & $(DST5AX)); srcSimm8 { tmp:4 = zext(dst5Ax); src:4 = sext(srcSimm8); setAddFlags(tmp, src); tmp = tmp + src; dst5Ax = tmp:3; setResultFlags(tmp); } # (2) ADDX src5, dst5 :ADDX src5B, dst5L_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x2) ... & $(SRC5B) ... & $(DST5L_AFTER_SRC5) ... { tmp:4 = dst5L_afterSrc5; src:4 = sext(src5B); setAddFlags(tmp, src); tmp = tmp + src; dst5L_afterSrc5 = tmp; setResultFlags(tmp); } # (2) ADDX src5, Ax :ADDX src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x2) ... & $(SRC5B) & $(DST5AX) ... { tmp:4 = zext(dst5Ax); src:4 = sext(src5B); setAddFlags(tmp, src); tmp = tmp + src; dst5Ax = tmp:3; setResultFlags(tmp); } ##### ADJNZ ##### # ADJNZ.B #simm4, dst, rel8offset2 # 1111 1000 1001 1111 0000 0110 ADJNZ #-0x1,R0L, :ADJNZ.B srcSimm4, dst5B, rel8offset2 is ((b1_0407=0xf & b1_size_0=0; b2_0405=1 & srcSimm4) ... & $(DST5B)); rel8offset2 { tmp:1 = dst5B + srcSimm4; dst5B = tmp; if (tmp != 0) goto rel8offset2; } # ADJNZ.B #simm4, Ax, , rel8offset2 :ADJNZ.B srcSimm4, dst5Ax, rel8offset2 is ((b1_0407=0xf & b1_size_0=0; b2_0405=1 & srcSimm4) & $(DST5AX)); rel8offset2 { tmp:1 = dst5Ax:1 + srcSimm4; dst5Ax = zext(tmp); if (tmp != 0) goto rel8offset2; } # ADJNZ.W #simm4, dst, rel8offset2 :ADJNZ.W srcSimm4, dst5W, rel8offset2 is ((b1_0407=0xf & b1_size_0=1; b2_0405=1 & srcSimm4) ... & $(DST5W)); rel8offset2 { tmp:2 = dst5W + sext(srcSimm4); dst5W = tmp; if (tmp != 0) goto rel8offset2; } # ADJNZ.W #simm4, Ax, rel8offset2 :ADJNZ.W srcSimm4, dst5Ax, rel8offset2 is ((b1_0407=0xf & b1_size_0=1; b2_0405=1 & srcSimm4) & $(DST5AX)); rel8offset2 { tmp:2 = dst5Ax:2 + sext(srcSimm4); dst5Ax = zext(tmp); if (tmp != 0) goto rel8offset2; } ##### AND ##### # (1) AND.B:G #imm, dst :AND^".B:G" srcImm8, dst5B is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3f) ... & $(DST5B)); srcImm8 { tmp:1 = dst5B & srcImm8; dst5B = tmp; setResultFlags(tmp); } # (1) AND.B:G #imm, Ax :AND^".B:G" srcImm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3f) & $(DST5AX)); srcImm8 { tmp:1 = dst5Ax:1 & srcImm8; dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) AND.W:G #imm, dst :AND^".W:G" srcImm16, dst5W is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3f) ... & $(DST5W)); srcImm16 { tmp:2 = dst5W & srcImm16; dst5W = tmp; setResultFlags(tmp); } # (1) AND.W:G #imm, Ax :AND^".W:G" srcImm16, dst5Ax is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3f) & $(DST5AX)); srcImm16 { tmp:2 = dst5Ax:2 & srcImm16; dst5Ax = zext(tmp); setResultFlags(tmp); } # (2) AND.B:S #imm, dst :AND^".B:S" srcImm8, dst2B is ((b1_0607=1 & b1_0103=6 & b1_size_0=0) ... & dst2B); srcImm8 { tmp:1 = dst2B & srcImm8; dst2B = tmp; setResultFlags(tmp); } # (2) AND.W:S #imm, dst :AND^".W:S" srcImm16, dst2W is ((b1_0607=1 & b1_0103=6 & b1_size_0=1) ... & dst2W); srcImm16 { tmp:2 = dst2W & srcImm16; dst2W = tmp; setResultFlags(tmp); } # (3) AND.B:G src5, dst5 :AND^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0xd) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... { tmp:1 = dst5B_afterSrc5 & src5B; dst5B_afterSrc5 = tmp; setResultFlags(tmp); } # (3) AND.B:G src5, Ax :AND^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0xd) ... & $(SRC5B) & $(DST5AX) ... { tmp:1 = dst5Ax:1 & src5B; dst5Ax = zext(tmp); setResultFlags(tmp); } # (3) AND.W:G src5, dst5 :AND^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0xd) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... { tmp:2 = dst5W_afterSrc5 & src5W; dst5W_afterSrc5 = tmp; setResultFlags(tmp); } # (3) AND.W:G src5, Ax :AND^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0xd) ... & $(SRC5W) & $(DST5AX) ... { tmp:2 = dst5Ax:2 & src5W; dst5Ax = zext(tmp); setResultFlags(tmp); } ##### BAND ##### # BAND bit,bitbase # 0000 0001 1101 0110 0000 1011 0101 0110 0011 0100 0001 0010 BAND 0x3,0x123456[A0] :BAND bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x1 & bit) ... & $(BITBASE)) { bitValue:1 = (bitbase >> bit) & 1; $(CARRY) = $(CARRY) & bitValue; } ##### BCLR ##### # BCLR bit,bitbase # 1101 0110 0011 0011 0101 0110 0011 0100 0001 0010 BCLR 0x3,0x123456[A0] :BCLR bit, bitbase is (b1_0407=0xd & b1_size_0=0; b2_0305=0x6 & bit) ... & $(BITBASE) { mask:1 = ~(1 << bit); bitbase = bitbase & mask; } # BCLR bit,Ax :BCLR b2_bit, bitbaseAx is (b1_0407=0xd & b1_size_0=0; b2_0305=0x6 & b2_bit) & $(BITBASE_AX) { mask:3 = ~(1 << b2_bit); bitbaseAx = bitbaseAx & mask; } ##### BITINDEX ##### # BITINDEX.B src -- dst5B used as source # 1100 1000 1010 1110 BINDEX.B R0L :BITINDEX.B dst5B is (b1_0407=0xc & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B) [ useBitIndex=1; globalset(inst_next,useBitIndex); useBitIndex=0; ] { bitIndex = zext(dst5B); } # BITINDEX.W src -- dst5W used as source :BITINDEX.W dst5W is (b1_0407=0xc & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W) [ useBitIndex=1; globalset(inst_next,useBitIndex); useBitIndex=0; ] { bitIndex = zext(dst5W); } ##### BMCnd ##### # (1) BMcnd bit, bitbase :BM^cnd bit, bitbase is ((b1_0407=0xd & b1_size_0=0; b2_0305=0x2 & bit) ... & $(BITBASE)); cnd { mask:1 = ~(1 << bit); bitbase = ((cnd << bit) | (bitbase & mask)); } # (1) BMcnd bit, Ax :BM^cnd b2_bit, bitbaseAx is ((b1_0407=0xd & b1_size_0=0; b2_0305=0x2 & b2_bit) & $(BITBASE_AX)); cnd { mask:3 = ~(1 << b2_bit); bitbaseAx = ((zext(cnd) << b2_bit) | (bitbaseAx & mask)); } # (2) BMcnd C :BM^b2cnd "C" is b1_0007=0xd9; b2_0707=0 & b2_0305=5 & b2cnd { $(CARRY) = b2cnd; } ##### BNAND ##### :BNAND bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x3 & bit) ... & $(BITBASE)) { mask:1 = (1 << bit); bitValue:1 = (bitbase & mask); $(CARRY) = $(CARRY) && (bitValue == 0); } :BNAND b2_bit, bitbaseAx is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x3 & b2_bit) & $(BITBASE_AX)) { mask:3 = (1 << b2_bit); bitValue:3 = (bitbaseAx & mask); $(CARRY) = $(CARRY) && (bitValue == 0); } ##### BNOR ##### :BNOR bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x6 & bit) ... & $(BITBASE)) { mask:1 = (1 << bit); bitValue:1 = (bitbase & mask); $(CARRY) = $(CARRY) || (bitValue == 0); } :BNOR b2_bit, bitbaseAx is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x6 & b2_bit) & $(BITBASE_AX)) { mask:3 = (1 << b2_bit); bitValue:3 = (bitbaseAx & mask); $(CARRY) = $(CARRY) || (bitValue == 0); } ##### BNOT ##### # BNOT bit,bitbase :BNOT bit, bitbase is (b1_0407=0xd & b1_size_0=0; b2_0305=0x3 & bit) ... & $(BITBASE) { mask:1 = (1 << bit); val:1 = bitbase; bitValue:1 = (~val & mask); bitbase = (val & ~mask) | bitValue; } # BNOT bit,Ax :BNOT b2_bit, bitbaseAx is (b1_0407=0xd & b1_size_0=0; b2_0305=0x3 & b2_bit) & $(BITBASE_AX) { mask:3 = (1 << b2_bit); bitValue:3 = (~bitbaseAx & mask); bitbaseAx = (bitbaseAx & ~mask) | bitValue; } ##### BNTST ##### :BNTST bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x0 & bit) ... & $(BITBASE)) { mask:1 = (1 << bit); bitValue:1 = (bitbase & mask); z:1 = (bitValue == 0); $(CARRY) = z; $(ZERO) = z; } :BNTST b2_bit, bitbaseAx is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x0 & b2_bit) & $(BITBASE_AX)) { mask:3 = (1 << b2_bit); bitValue:3 = (bitbaseAx & mask); z:1 = (bitValue == 0); $(CARRY) = z; $(ZERO) = z; } ##### BNXOR ##### :BNXOR bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x7 & bit) ... & $(BITBASE)) { mask:1 = (1 << bit); bitValue:1 = (bitbase & mask); $(CARRY) = $(CARRY) ^ (bitValue == 0); } :BNXOR b2_bit, bitbaseAx is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x7 & b2_bit) & $(BITBASE_AX)) { mask:3 = (1 << b2_bit); bitValue:3 = (bitbaseAx & mask); $(CARRY) = $(CARRY) ^ (bitValue == 0); } ##### BOR ##### :BOR bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x4 & bit) ... & $(BITBASE)) { mask:1 = (1 << bit); bitValue:1 = (bitbase & mask); $(CARRY) = $(CARRY) || (bitValue != 0); } :BOR b2_bit, bitbaseAx is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x4 & b2_bit) & $(BITBASE_AX)) { mask:3 = (1 << b2_bit); bitValue:3 = (bitbaseAx & mask); $(CARRY) = $(CARRY) || (bitValue != 0); } ##### BRK ##### :BRK is b1_0007=0x0 { # I don't think it is necessary to model break behavior Break(); } ##### BRK2 ##### :BRK2 is b1_0007=0x8 { # I don't think it is necessary to model break behavior Break2(); } ##### BSET ##### :BSET bit, bitbase is (b1_0407=0xd & b1_size_0=0; b2_0305=0x7 & bit) ... & $(BITBASE) { mask:1 = (1 << bit); bitbase = bitbase | mask; } :BSET b2_bit, bitbaseAx is (b1_0407=0xd & b1_size_0=0; b2_0305=0x7 & b2_bit) & $(BITBASE_AX) { mask:3 = (1 << b2_bit); bitbaseAx = bitbaseAx | mask; } ##### BTST ##### # (1) BTST bit, bitbase :BTST bit, bitbase is (b1_0407=0xd & b1_size_0=0; b2_0305=0x0 & bit) ... & $(BITBASE) { mask:1 = (1 << bit); bitValue:1 = (bitbase & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; } # (1) BTST bit, Ax :BTST b2_bit, bitbaseAx is (b1_0407=0xd & b1_size_0=0; b2_0305=0x0 & b2_bit) & $(BITBASE_AX) { mask:3 = (1 << b2_bit); bitValue:3 = (bitbaseAx & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; } # (2) BTST bit, bitbase :BTST b, bitbaseAbs16 is b1_0607=0 & b1_0405 & b1_0103=5 & b1_0000; bitbaseAbs16 [ b = (b1_0405 << 1) + b1_0000; ] { mask:1 = (1 << b); bitValue:1 = (bitbaseAbs16 & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; } ##### BTSTC ##### # (1) BTSTC bit, bitbase :BTSTC bit, bitbase is (b1_0407=0xd & b1_size_0=0; b2_0305=0x4 & bit) ... & $(BITBASE) { mask:1 = (1 << bit); val:1 = bitbase; bitValue:1 = (val & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; bitbase = val & ~mask; } # (1) BTSTC bit, Ax :BTSTC b2_bit, bitbaseAx is (b1_0407=0xd & b1_size_0=0; b2_0305=0x4 & b2_bit) & $(BITBASE_AX) { mask:3 = (1 << b2_bit); bitValue:3 = (bitbaseAx & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; bitbaseAx = bitbaseAx & ~mask; } ##### BTSTS ##### # (1) BTSTS bit, bitbase :BTSTS bit, bitbase is (b1_0407=0xd & b1_size_0=0; b2_0305=0x5 & bit) ... & $(BITBASE) { mask:1 = (1 << bit); val:1 = bitbase; bitValue:1 = (val & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; bitbase = val | mask; } # (1) BTSTS bit, Ax :BTSTS b2_bit, bitbaseAx is (b1_0407=0xd & b1_size_0=0; b2_0305=0x5 & b2_bit) & $(BITBASE_AX) { mask:3 = (1 << b2_bit); bitValue:3 = (bitbaseAx & mask); z:1 = (bitValue == 0); $(CARRY) = !z; $(ZERO) = z; bitbaseAx = bitbaseAx | mask; } ##### BXOR ##### :BXOR bit, bitbase is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x5 & bit) ... & $(BITBASE)) { mask:1 = (1 << bit); bitValue:1 = (bitbase & mask); $(CARRY) = $(CARRY) ^ (bitValue != 0); } :BXOR b2_bit, bitbaseAx is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=0; b2_0305=0x5 & b2_bit) & $(BITBASE_AX)) { mask:3 = (1 << b2_bit); bitValue:3 = (bitbaseAx & mask); $(CARRY) = $(CARRY) ^ (bitValue != 0); } ##### CLIP ##### # CLIP.B #simm, #simm, dst5 :CLIP.B srcSimm8, srcSimm8a, dst5B is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B)); srcSimm8; srcSimm8a { val:1 = dst5B; cmp1:1 = srcSimm8 s> val; cmp2:1 = srcSimm8a s< val; dst5B = (cmp1 * srcSimm8) + (cmp2 * srcSimm8a) + ((!cmp1 * !cmp2) * val); } # CLIP.B #simm, #simm, Ax :CLIP.B srcSimm8, srcSimm8a, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3e) & $(DST5AX)); srcSimm8; srcSimm8a { val:1 = dst5Ax:1; cmp1:1 = srcSimm8 s> val; cmp2:1 = srcSimm8a s< val; dst5Ax = zext((cmp1 * srcSimm8) + (cmp2 * srcSimm8a) + ((!cmp1 * !cmp2) * val)); } # CLIP.W #simm, #simm, dst5 :CLIP.W srcSimm16, srcSimm16a, dst5W is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3e) ... & $(DST5W)); srcSimm16; srcSimm16a { val:2 = dst5W; cmp1:1 = srcSimm16 s> val; cmp2:1 = srcSimm16a s< val; dst5W = (zext(cmp1) * srcSimm16) + (zext(cmp2) * srcSimm16a) + (zext(!cmp1 * !cmp2) * val); } # CLIP.W #simm, #simm, Ax :CLIP.W srcSimm16, srcSimm16a, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3e) & $(DST5AX)); srcSimm16; srcSimm16a { val:2 = dst5Ax:2; cmp1:1 = srcSimm16 s> val; cmp2:1 = srcSimm16a s< val; dst5Ax = zext((zext(cmp1) * srcSimm16) + (zext(cmp2) * srcSimm16a) + (zext(!cmp1 * !cmp2) * val)); } ##### CMP ##### # (1) CMP.B:G #simm, dst5 :CMP^".B:G" srcSimm8, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B)); srcSimm8 { tmp:1 = dst5B; setSubtractFlags(tmp, srcSimm8); tmp = tmp - srcSimm8; setResultFlags(tmp); } # (1) CMP.B:G #simm, Ax :CMP^".B:G" srcSimm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x2e) & $(DST5AX)); srcSimm8 { tmp:1 = dst5Ax:1; setSubtractFlags(tmp, srcSimm8); tmp = tmp - srcSimm8; setResultFlags(tmp); } # (1) CMP.W:G #simm, dst5 :CMP^".W:G" srcSimm16, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W)); srcSimm16 { tmp:2 = dst5W; setSubtractFlags(tmp, srcSimm16); tmp = tmp - srcSimm16; setResultFlags(tmp); } # (1) CMP.W:G #simm, Ax :CMP^".W:G" srcSimm16, dst5Ax is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x2e) & $(DST5AX)); srcSimm16 { tmp:2 = dst5Ax:2; setSubtractFlags(tmp, srcSimm16); tmp = tmp - srcSimm16; setResultFlags(tmp); } # (2) CMP.L:G #simm, dst5 :CMP^".L:G" srcSimm32, dst5L is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x31) ... & $(DST5L)); srcSimm32 { tmp:4 = dst5L; setSubtractFlags(tmp, srcSimm32); tmp = tmp - srcSimm32; setResultFlags(tmp); } # (2) CMP.L:G #simm, Ax :CMP^".L:G" srcSimm32, dst5Ax is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x31) & $(DST5AX)); srcSimm32 { tmp:4 = zext(dst5Ax); setSubtractFlags(tmp, srcSimm32); tmp = tmp - srcSimm32; setResultFlags(tmp); } # (3) CMP.B:Q #simm4, dst5 :CMP^".B:Q" srcSimm4, dst5B is (b1_0407=0xe & b1_size_0=0; b2_0405=1 & srcSimm4) ... & $(DST5B) { tmp:1 = dst5B; setSubtractFlags(tmp, srcSimm4); tmp = tmp - srcSimm4; setResultFlags(tmp); } # (3) CMP.B:Q #simm4, Ax :CMP^".B:Q" srcSimm4, dst5Ax is (b1_0407=0xe & b1_size_0=0; b2_0405=1 & srcSimm4) & $(DST5AX) { tmp:1 = dst5Ax:1; setSubtractFlags(tmp, srcSimm4); tmp = tmp - srcSimm4; setResultFlags(tmp); } # (3) CMP.W:Q #simm4, dst5 :CMP^".W:Q" srcSimm4, dst5W is (b1_0407=0xe & b1_size_0=1; b2_0405=1 & srcSimm4) ... & $(DST5W) { tmp:2 = dst5W; imm:2 = sext(srcSimm4); setSubtractFlags(tmp, imm); tmp = tmp - imm; setResultFlags(tmp); } # (3) CMP.W:Q #simm4, Ax :CMP^".W:Q" srcSimm4, dst5Ax is (b1_0407=0xe & b1_size_0=1; b2_0405=1 & srcSimm4) & $(DST5AX) { tmp:2 = dst5Ax:2; imm:2 = sext(srcSimm4); setSubtractFlags(tmp, imm); tmp = tmp - imm; setResultFlags(tmp); } # (4) CMP.B:S #simm, dst2 :CMP^".B:S" srcSimm8, dst2B is ((b1_0607=1 & b1_0103=3 & b1_size_0=0) ... & dst2B); srcSimm8 { tmp:1 = dst2B; setSubtractFlags(tmp, srcSimm8); tmp = tmp - srcSimm8; setResultFlags(tmp); } # (4) CMP.W:S #simm, dst2 :CMP^".W:S" srcSimm16, dst2W is ((b1_0607=1 & b1_0103=3 & b1_size_0=1) ... & dst2W); srcSimm16 { tmp:2 = dst2W; setSubtractFlags(tmp, srcSimm16); tmp = tmp - srcSimm16; setResultFlags(tmp); } # (5) CMP.B:G src5, dst5 :CMP^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x6) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) { tmp:1 = dst5B_afterSrc5; src:1 = src5B; setSubtractFlags(tmp, src); tmp = tmp - src; setResultFlags(tmp); } # (5) CMP.B:G src5, Ax :CMP^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x6) ... & $(SRC5B) & $(DST5AX) ... { tmp:1 = dst5Ax:1; src:1 = src5B; setSubtractFlags(tmp, src); tmp = tmp - src; setResultFlags(tmp); } # (5) CMP.W:G src5, dst5 :CMP^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x6) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) { tmp:2 = dst5W_afterSrc5; src:2 = src5W; setSubtractFlags(tmp, src); tmp = tmp - src; setResultFlags(tmp); } # (5) CMP.W:G src5, Ax :CMP^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x6) ... & $(SRC5W) & $(DST5AX) ... { tmp:2 = dst5Ax:2; src:2 = src5W; setSubtractFlags(tmp, src); tmp = tmp - src; setResultFlags(tmp); } # (6) CMP.L:G src5, dst5 :CMP^".L:G" src5L, dst5L_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=1) ... & $(SRC5L) ... & $(DST5L_AFTER_SRC5) ... { tmp:4 = dst5L_afterSrc5; src:4 = src5L; setSubtractFlags(tmp, src); tmp = tmp - src; setResultFlags(tmp); } # (6) CMP.L:G src5, Ax :CMP^".L:G" src5L, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=1) ... & $(SRC5L) & $(DST5AX) ... { tmp:4 = zext(dst5Ax); src:4 = src5L; setSubtractFlags(tmp, src); tmp = tmp - src; setResultFlags(tmp); } # (7) CMP.B:S src2, R0L :CMP^".B:S" dst2B, R0L is (b1_0607=1 & b1_0103=0 & b1_size_0=0 & R0L) ... & dst2B { tmp:1 = dst2B; setSubtractFlags(R0L, tmp); tmp = tmp - R0L; setResultFlags(tmp); } # (7) CMP.W:S src2, R0 :CMP^".W:S" dst2W, R0 is (b1_0607=1 & b1_0103=0 & b1_size_0=1 & R0) ... & dst2W { tmp:2 = dst2W; setSubtractFlags(R0, tmp); tmp = tmp - R0; setResultFlags(tmp); } ##### CMPX ##### # CMPX #simm, dst5 :CMPX srcSimm8, dst5L is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x11) ... & $(DST5L)); srcSimm8 { tmp:4 = dst5L; imm:4 = sext(srcSimm8); setSubtractFlags(tmp, imm); tmp = tmp - imm; setResultFlags(tmp); } # CMPX #simm, Ax :CMPX srcSimm8, dst5Ax is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x11) & $(DST5AX)); srcSimm8 { tmp:4 = zext(dst5Ax); imm:4 = sext(srcSimm8); setSubtractFlags(tmp, imm); tmp = tmp - imm; setResultFlags(tmp); } ##### DADC ##### # (1) DADC.B #imm, dst5 :DADC.B srcImm8, dst5B is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B)); srcImm8 { src:2 = zext(srcImm8); dst:2 = zext(dst5B); tmp:2 = DecimalAddWithCarry(src, dst); dst5B = tmp:1; $(CARRY) = (tmp > 0x99); setResultFlags(tmp:1); } # (1) DADC.B #imm, Ax :DADC.B srcImm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x0e) & $(DST5AX)); srcImm8 { src:2 = zext(srcImm8); dst:2 = zext(dst5Ax:1); tmp:2 = DecimalAddWithCarry(src, dst); dst5Ax = zext(tmp:1); $(CARRY) = (tmp > 0x99); setResultFlags(tmp:1); } # (1) DADC.W #imm, dst5 :DADC.W srcImm16, dst5W is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W)); srcImm16 { src:4 = zext(srcImm16); dst:4 = zext(dst5W); tmp:4 = DecimalAddWithCarry(src, dst); dst5W = tmp:2; $(CARRY) = (tmp > 0x9999); setResultFlags(tmp:2); } # (1) DADC.W #imm, Ax :DADC.W srcImm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x0e) & $(DST5AX)); srcImm16 { src:4 = zext(srcImm16); dst:4 = zext(dst5Ax:2); tmp:4 = DecimalAddWithCarry(src, dst); dst5Ax = zext(tmp:2); $(CARRY) = (tmp > 0x9999); setResultFlags(tmp:2); } # (2) DADC.B src5, dst5 :DADC.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x8) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { src:2 = zext(src5B); dst:2 = zext(dst5B_afterSrc5); tmp:2 = DecimalAddWithCarry(src, dst); dst5B_afterSrc5 = tmp:1; $(CARRY) = (tmp > 0x99); setResultFlags(tmp:1); } # (2) DADC.B src5, Ax :DADC.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x8) ... & $(SRC5B) & $(DST5AX) ...) { src:2 = zext(src5B); dst:2 = zext(dst5Ax:1); tmp:2 = DecimalAddWithCarry(src, dst); dst5Ax = zext(tmp:1); $(CARRY) = (tmp > 0x99); setResultFlags(tmp:1); } # (2) DADC.W src5, dst5 :DADC.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x8) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { src:4 = zext(src5W); dst:4 = zext(dst5W_afterSrc5); tmp:4 = DecimalAddWithCarry(src, dst); dst5W_afterSrc5 = tmp:2; $(CARRY) = (tmp > 0x9999); setResultFlags(tmp:2); } # (2) DADC.W src5, Ax :DADC.W src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x8) ... & $(SRC5W) & $(DST5AX) ...) { src:4 = zext(src5W); dst:4 = zext(dst5Ax:2); tmp:4 = DecimalAddWithCarry(src, dst); dst5Ax = zext(tmp:2); $(CARRY) = (tmp > 0x9999); setResultFlags(tmp:2); } ##### DADD ##### # (1) DADD.B #imm, dst5 :DADD.B srcImm8, dst5B is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B)); srcImm8 { src:2 = zext(srcImm8); dst:2 = zext(dst5B); tmp:2 = DecimalAdd(src, dst); dst5B = tmp:1; $(CARRY) = (tmp > 0x99); setResultFlags(tmp:1); } # (1) DADD.B #imm, Ax :DADD.B srcImm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x1e) & $(DST5AX)); srcImm8 { src:2 = zext(srcImm8); dst:2 = zext(dst5Ax:1); tmp:2 = DecimalAdd(src, dst); dst5Ax = zext(tmp:1); $(CARRY) = (tmp > 0x99); setResultFlags(tmp:1); } # (1) DADD.W #imm, dst5 :DADD.W srcImm16, dst5W is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W)); srcImm16 { src:4 = zext(srcImm16); dst:4 = zext(dst5W); tmp:4 = DecimalAdd(src, dst); dst5W = tmp:2; $(CARRY) = (tmp > 0x9999); setResultFlags(tmp:2); } # (1) DADD.W #imm, Ax :DADD.W srcImm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x1e) & $(DST5AX)); srcImm16 { src:4 = zext(srcImm16); dst:4 = zext(dst5Ax:2); tmp:4 = DecimalAdd(src, dst); dst5Ax = zext(tmp:2); $(CARRY) = (tmp > 0x9999); setResultFlags(tmp:2); } # (2) DADD.B src5, dst5 :DADD.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x0) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { src:2 = zext(src5B); dst:2 = zext(dst5B_afterSrc5); tmp:2 = DecimalAdd(src, dst); dst5B_afterSrc5 = tmp:1; $(CARRY) = (tmp > 0x99); setResultFlags(tmp:1); } # (2) DADD.B src5, Ax :DADD.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x0) ... & $(SRC5B) & $(DST5AX) ...) { src:2 = zext(src5B); dst:2 = zext(dst5Ax:1); tmp:2 = DecimalAdd(src, dst); dst5Ax = zext(tmp:1); $(CARRY) = (tmp > 0x99); setResultFlags(tmp:1); } # (2) DADD.W src5, dst5 :DADD.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x0) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { src:4 = zext(src5W); dst:4 = zext(dst5W_afterSrc5); tmp:4 = DecimalAdd(src, dst); dst5W_afterSrc5 = tmp:2; $(CARRY) = (tmp > 0x9999); setResultFlags(tmp:2); } # (2) DADD.W src5, Ax :DADD.W src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x0) ... & $(SRC5W) & $(DST5AX) ...) { src:4 = zext(src5W); dst:4 = zext(dst5Ax:2); tmp:4 = DecimalAdd(src, dst); dst5Ax = zext(tmp:2); $(CARRY) = (tmp > 0x9999); setResultFlags(tmp:2); } ##### DEC ##### # DEC.B dst5 :DEC.B dst5B is (b1_0407=0xb & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B) { tmp:1 = dst5B - 1; dst5B = tmp; setResultFlags(tmp); } # DEC.B Ax :DEC.B dst5Ax is (b1_0407=0xb & b1_size_0=0; b2_0005=0x0e) & $(DST5AX) { tmp:1 = dst5Ax:1 - 1; dst5Ax = zext(tmp); setResultFlags(tmp); } # DEC.W dst5 :DEC.W dst5W is (b1_0407=0xb & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W) { tmp:2 = dst5W - 1; dst5W = tmp; setResultFlags(tmp); } # DEC.W Ax :DEC.W dst5Ax is (b1_0407=0xb & b1_size_0=1; b2_0005=0x0e) & $(DST5AX) { tmp:2 = dst5Ax:2 - 1; dst5Ax = zext(tmp); setResultFlags(tmp); } ##### DIV ##### # (1) DIV.B #imm :DIV.B srcSimm8 is b1_0007=0xb0; b2_0007=0x43; srcSimm8 { d:2 = sext(srcSimm8); q:2 = R0 s/ d; r:2 = R0 s% d; # remainder has same sign as R0 (dividend) R0L = q:1; R0H = r:1; q = q s>> 8; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (1) DIV.W #imm :DIV.W srcSimm16 is b1_0007=0xb0; b2_0007=0x53; srcSimm16 { d:4 = sext(srcSimm16); q:4 = R2R0 s/ d; r:4 = R2R0 s% d; # remainder has same sign as R0 (dividend) R0 = q:2; R2 = r:2; q = q s>> 16; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (2) DIV.B src5 :DIV.B dst5B is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B) { d:2 = sext(dst5B); q:2 = R0 s/ d; r:2 = R0 s% d; # remainder has same sign as R0 (dividend) R0L = q:1; R0H = r:1; q = q s>> 8; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (2) DIV.W src5 :DIV.W dst5W is (b1_0407=0x8 & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W) { d:4 = sext(dst5W); q:4 = R2R0 s/ d; r:4 = R2R0 s% d; # remainder has same sign as R0 (dividend) R0 = q:2; R2 = r:2; q = q s>> 16; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } ##### DIVU ##### # (1) DIVU.B #imm :DIVU.B srcImm8 is b1_0007=0xb0; b2_0007=0x03; srcImm8 { d:2 = zext(srcImm8); q:2 = R0 / d; r:2 = R0 % d; R0L = q:1; R0H = r:1; q = q s>> 8; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (1) DIVU.W #imm :DIVU.W srcImm16 is b1_0007=0xb0; b2_0007=0x13; srcImm16 { d:4 = zext(srcImm16); q:4 = R2R0 / d; r:4 = R2R0 % d; R0 = q:2; R2 = r:2; q = q s>> 16; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (2) DIVU.B src5 :DIVU.B dst5B is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B) { d:2 = zext(dst5B); q:2 = R0 / d; r:2 = R0 % d; R0L = q:1; R0H = r:1; q = q s>> 8; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (2) DIVU.W src5 :DIVU.W dst5W is (b1_0407=0x8 & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W) { d:4 = zext(dst5W); q:4 = R2R0 / d; r:4 = R2R0 % d; R0 = q:2; R2 = r:2; q = q s>> 16; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } ##### DIVX ##### # (1) DIVX.B #imm :DIVX.B srcSimm8 is b1_0007=0xb2; b2_0007=0x43; srcSimm8 { d:2 = sext(srcSimm8); q:2 = R0 s/ d; r:2 = R0 s% d; #according to the manual the remainder has the same sign as the quotient differ:1 = (r s< 0) != (d s< 0); r = (zext(differ) * (-r)) + (zext(!differ) * r); R0L = q:1; R0H = r:1; q = q s>> 8; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (1) DIVX.W #imm :DIVX.W srcSimm16 is b1_0007=0xb2; b2_0007=0x53; srcSimm16 { d:4 = sext(srcSimm16); q:4 = R2R0 s/ d; r:4 = R2R0 s% d; #according to the manual the remainder has the same sign as the quotient differ:1 = (r s< 0) != (d s< 0); r = (zext(differ) * (-r)) + (zext(!differ) * r); R0 = q:2; R2 = r:2; q = q s>> 16; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (2) DIVX.B src5 :DIVX.B dst5B is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B) { d:2 = sext(dst5B); q:2 = R0 s/ d; r:2 = R0 s% d; #according to the manual the remainder has the same sign as the quotient differ:1 = (r s< 0) != (d s< 0); r = (zext(differ) * (-r)) + (zext(!differ) * r); R0L = q:1; R0H = r:1; q = q s>> 8; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } # (2) DIVX.W src5 :DIVX.W dst5W is (b1_0407=0x9 & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W) { d:4 = sext(dst5W); q:4 = R2R0 s/ d; r:4 = R2R0 s% d; #according to the manual the remainder has the same sign as the quotient R0 = q:2; R2 = r:2; q = q s>> 16; $(OVERFLOW) = (d == 0) || ((q != 0) && (q != -1)); } ##### DSBB ##### # (1) DSBB.B #imm, dst5 :DSBB.B srcImm8, dst5B is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B)); srcImm8 { src:2 = zext(srcImm8); dst:2 = zext(dst5B); c:1 = $(CARRY); $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); tmp:2 = DecimalSubtractWithBorrow(dst, src); dst5B = tmp:1; setResultFlags(tmp:1); } # (1) DSBB.B #imm, Ax :DSBB.B srcImm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x0e) & $(DST5AX)); srcImm8 { src:2 = zext(srcImm8); dst:2 = zext(dst5Ax:1); c:1 = $(CARRY); $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); tmp:2 = DecimalSubtractWithBorrow(dst, src); dst5Ax = zext(tmp:1); setResultFlags(tmp:1); } # (1) DSBB.W #imm, dst5 :DSBB.W srcImm16, dst5W is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W)); srcImm16 { src:4 = zext(srcImm16); dst:4 = zext(dst5W); c:1 = $(CARRY); $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); tmp:4 = DecimalSubtractWithBorrow(dst, src); dst5W = tmp:2; setResultFlags(tmp:2); } # (1) DSBB.W #imm, Ax :DSBB.W srcImm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x0e) & $(DST5AX)); srcImm16 { src:4 = zext(srcImm16); dst:4 = zext(dst5Ax:2); c:1 = $(CARRY); $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); tmp:4 = DecimalSubtractWithBorrow(dst, src); dst5Ax = zext(tmp:2); setResultFlags(tmp:2); } # (2) DSBB.B src5, dst5 :DSBB.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xa) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { src:2 = zext(src5B); dst:2 = zext(dst5B_afterSrc5); c:1 = $(CARRY); $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); tmp:2 = DecimalSubtractWithBorrow(dst, src); dst5B_afterSrc5 = tmp:1; setResultFlags(tmp:1); } # (2) DSBB.B src5, Ax :DSBB.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xa) ... & $(SRC5B) & $(DST5AX) ...) { src:2 = zext(src5B); dst:2 = zext(dst5Ax:1); c:1 = $(CARRY); $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); tmp:2 = DecimalSubtractWithBorrow(dst, src); dst5Ax = zext(tmp:1); setResultFlags(tmp:1); } # (2) DSBB.W src5, dst5 :DSBB.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0xa) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { src:4 = zext(src5W); dst:4 = zext(dst5W_afterSrc5); c:1 = $(CARRY); $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); tmp:4 = DecimalSubtractWithBorrow(dst, src); dst5W_afterSrc5 = tmp:2; setResultFlags(tmp:2); } # (2) DSBB.W src5, Ax :DSBB.W src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0xa) ... & $(SRC5W) & $(DST5AX) ...) { src:4 = zext(src5W); dst:4 = zext(dst5Ax:2); c:1 = $(CARRY); $(CARRY) = (c && (dst > src)) || (!c && (dst >= src)); tmp:4 = DecimalSubtractWithBorrow(dst, src); dst5Ax = zext(tmp:2); setResultFlags(tmp:2); } ##### DSUB ##### # (1) DSUB.B #imm, dst5 :DSUB.B srcImm8, dst5B is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B)); srcImm8 { src:2 = zext(srcImm8); dst:2 = zext(dst5B); $(CARRY) = (dst >= src); tmp:2 = DecimalSubtract(dst, src); dst5B = tmp:1; setResultFlags(tmp:1); } # (1) DSUB.B #imm, Ax :DSUB.B srcImm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x1e) & $(DST5AX)); srcImm8 { src:2 = zext(srcImm8); dst:2 = zext(dst5Ax:1); $(CARRY) = (dst >= src); tmp:2 = DecimalSubtract(dst, src); dst5Ax = zext(tmp:1); setResultFlags(tmp:1); } # (1) DSUB.W #imm, dst5 :DSUB.W srcImm16, dst5W is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W)); srcImm16 { src:4 = zext(srcImm16); dst:4 = zext(dst5W); $(CARRY) = (dst >= src); tmp:4 = DecimalSubtract(dst, src); dst5W = tmp:2; setResultFlags(tmp:2); } # (1) DSUB.W #imm, Ax :DSUB.W srcImm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x1e) & $(DST5AX)); srcImm16 { src:4 = zext(srcImm16); dst:4 = zext(dst5Ax:2); $(CARRY) = (dst >= src); tmp:4 = DecimalSubtract(dst, src); dst5Ax = zext(tmp:2); setResultFlags(tmp:2); } # (2) DSUB.B src5, dst5 :DSUB.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x2) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { src:2 = zext(src5B); dst:2 = zext(dst5B_afterSrc5); $(CARRY) = (dst >= src); tmp:2 = DecimalSubtract(dst, src); dst5B_afterSrc5 = tmp:1; setResultFlags(tmp:1); } # (2) DSUB.B src5, Ax :DSUB.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x2) ... & $(SRC5B) & $(DST5AX) ...) { src:2 = zext(src5B); dst:2 = zext(dst5Ax:1); $(CARRY) = (dst >= src); tmp:2 = DecimalSubtract(dst, src); dst5Ax = zext(tmp:1); setResultFlags(tmp:1); } # (2) DSUB.W src5, dst5 :DSUB.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x2) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { src:4 = zext(src5W); dst:4 = zext(dst5W_afterSrc5); $(CARRY) = (dst >= src); tmp:4 = DecimalSubtract(dst, src); dst5W_afterSrc5 = tmp:2; setResultFlags(tmp:2); } # (2) DSUB.W src5, Ax :DSUB.W src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x2) ... & $(SRC5W) & $(DST5AX) ...) { src:4 = zext(src5W); dst:4 = zext(dst5Ax:2); $(CARRY) = (dst >= src); tmp:4 = DecimalSubtract(dst, src); dst5Ax = zext(tmp:2); setResultFlags(tmp:2); } ##### ENTER ##### :ENTER srcImm8 is b1_0007=0xec; srcImm8 { push3(FB); FB = SP; SP = SP - zext(srcImm8); } ##### EXITD ##### :EXITD is b1_0007=0xfc { SP = FB; pop3(FB); pc:3 = 0; pop3(pc); return [pc]; } ##### EXTS ##### # (1) EXTS.B dst5 :EXTS.B dst5B is (b1_0407=0xc & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B) & $(DST5W) { tmp:2 = sext(dst5B); dst5W = tmp; setResultFlags(tmp); } # (1) EXTS.B Ax :EXTS.B dst5Ax is (b1_0407=0xc & b1_size_0=0; b2_0005=0x1e) & $(DST5AX) { tmp:2 = sext(dst5Ax:1); dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) EXTS.W dst5 :EXTS.W dst5W is (b1_0407=0xc & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W) & $(DST5L) { tmp:4 = sext(dst5W); dst5L = tmp; setResultFlags(tmp); } # (1) EXTS.W Ax :EXTS.W dst5Ax is (b1_0407=0xc & b1_size_0=1; b2_0005=0x1e) & $(DST5AX) { tmp:4 = sext(dst5Ax:2); dst5Ax = tmp:3; setResultFlags(tmp); } # (2) EXTS.B src5, dst5 :EXTS.B src5B, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x7) ... & $(SRC5B) ... & $(DST5W_AFTER_SRC5) ...) { tmp:2 = sext(src5B); dst5W_afterSrc5 = tmp; setResultFlags(tmp); } # (2) EXTS.B src5, Ax :EXTS.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x7) ... & $(SRC5B) & $(DST5AX) ...) { tmp:2 = sext(src5B); dst5Ax = zext(tmp); setResultFlags(tmp); } ##### EXTZ ##### # (1) EXTZ.B src5, dst5 :EXTZ.B src5B, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xb) ... & $(SRC5B) ... & $(DST5W_AFTER_SRC5) ...) { tmp:2 = zext(src5B); dst5W_afterSrc5 = tmp; setResultFlags(tmp); } # (1) EXTZ.B src5, Ax :EXTZ.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xb) ... & $(SRC5B) & $(DST5AX) ...) { tmp:2 = zext(src5B); dst5Ax = zext(tmp); setResultFlags(tmp); } ##### FCLR ##### :FCLR flagBit is b1_0007=0xd3; b2_0307=0x1d & flagBit { mask:2 = ~(1 << flagBit); FLG = FLG & mask; } ##### FREIT ##### :FREIT is b1_0007=0x9f { FLG = SVF; return [SVP]; } ##### FSET ##### :FSET flagBit is b1_0007=0xd1; b2_0307=0x1d & flagBit { mask:2 = (1 << flagBit); FLG = FLG | mask; } ##### INC ##### # INC.B dst5 :INC.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B) { tmp:1 = dst5B + 1; dst5B = tmp; setResultFlags(tmp); } # INC.B Ax :INC.B dst5Ax is (b1_0407=0xa & b1_size_0=0; b2_0005=0x0e) & $(DST5AX) { tmp:1 = dst5Ax:1 + 1; dst5Ax = zext(tmp); setResultFlags(tmp); } # INC.W dst5 :INC.W dst5W is (b1_0407=0xa & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W) { tmp:2 = dst5W + 1; dst5W = tmp; setResultFlags(tmp); } # INC.W Ax :INC.W dst5Ax is (b1_0407=0xa & b1_size_0=1; b2_0005=0x0e) & $(DST5AX) { tmp:2 = dst5Ax:2 + 1; dst5Ax = zext(tmp); setResultFlags(tmp); } ##### INDEXB ##### # 1000 1000 0100 0011 INDEXB.B R1H :INDEXB.B dst5B is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x03) ... & $(DST5B) [ useByteIndexOffset=3; globalset(inst_next,useByteIndexOffset); useByteIndexOffset=0; ] { byteIndexOffset = zext(dst5B); } :INDEXB.W dst5W is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x13) ... & $(DST5W) [ useByteIndexOffset=3; globalset(inst_next,useByteIndexOffset); useByteIndexOffset=0; ] { byteIndexOffset = zext(dst5W); } ##### INDEXBD ##### :INDEXBD.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x03) ... & $(DST5B) [ useDstByteIndexOffset=3; globalset(inst_next,useDstByteIndexOffset); useDstByteIndexOffset=0; ] { byteIndexOffset = zext(dst5B); } :INDEXBD.W dst5W is (b1_0407=0xa & b1_size_0=0; b2_0005=0x13) ... & $(DST5W) [ useDstByteIndexOffset=3; globalset(inst_next,useDstByteIndexOffset); useDstByteIndexOffset=0; ] { byteIndexOffset = zext(dst5W); } ##### INDEXBS ##### :INDEXBS.B dst5B is (b1_0407=0xc & b1_size_0=0; b2_0005=0x03) ... & $(DST5B) [ useSrcByteIndexOffset=3; globalset(inst_next,useSrcByteIndexOffset); useSrcByteIndexOffset=0; ] { byteIndexOffset = zext(dst5B); } :INDEXBS.W dst5W is (b1_0407=0xc & b1_size_0=0; b2_0005=0x13) ... & $(DST5W) [ useSrcByteIndexOffset=3; globalset(inst_next,useSrcByteIndexOffset); useSrcByteIndexOffset=0; ] { byteIndexOffset = zext(dst5W); } ##### INDEXL ##### :INDEXL.B dst5B is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x23) ... & $(DST5B) [ useByteIndexOffset=3; globalset(inst_next,useByteIndexOffset); useByteIndexOffset=0; ] { byteIndexOffset = zext(dst5B) * 4; } :INDEXL.W dst5W is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x33) ... & $(DST5W) [ useByteIndexOffset=3; globalset(inst_next,useByteIndexOffset); useByteIndexOffset=0; ] { byteIndexOffset = zext(dst5W) * 4; } ##### INDEXLD ##### :INDEXLD.B dst5B is (b1_0407=0xb & b1_size_0=0; b2_0005=0x23) ... & $(DST5B) [ useDstByteIndexOffset=3; globalset(inst_next,useDstByteIndexOffset); useDstByteIndexOffset=0; ] { byteIndexOffset = zext(dst5B) * 4; } :INDEXLD.W dst5W is (b1_0407=0xb & b1_size_0=0; b2_0005=0x33) ... & $(DST5W) [ useDstByteIndexOffset=3; globalset(inst_next,useDstByteIndexOffset); useDstByteIndexOffset=0; ] { byteIndexOffset = zext(dst5W) * 4; } ##### INDEXLS ##### :INDEXLS.B dst5B is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x03) ... & $(DST5B) [ useSrcByteIndexOffset=3; globalset(inst_next,useSrcByteIndexOffset); useSrcByteIndexOffset=0; ] { byteIndexOffset = zext(dst5B) * 4; } :INDEXLS.W dst5W is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x13) ... & $(DST5W) [ useSrcByteIndexOffset=3; globalset(inst_next,useSrcByteIndexOffset); useSrcByteIndexOffset=0; ] { byteIndexOffset = zext(dst5W) * 4; } ##### INDEXW ##### :INDEXW.B dst5B is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x23) ... & $(DST5B) [ useByteIndexOffset=3; globalset(inst_next,useByteIndexOffset); useByteIndexOffset=0; ] { byteIndexOffset = zext(dst5B) * 2; } :INDEXW.W dst5W is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x33) ... & $(DST5W) [ useByteIndexOffset=3; globalset(inst_next,useByteIndexOffset); useByteIndexOffset=0; ] { byteIndexOffset = zext(dst5W) * 2; } ##### INDEXWD ##### :INDEXWD.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x23) ... & $(DST5B) [ useDstByteIndexOffset=3; globalset(inst_next,useDstByteIndexOffset); useDstByteIndexOffset=0; ] { byteIndexOffset = zext(dst5B) * 2; } :INDEXWD.W dst5W is (b1_0407=0xa & b1_size_0=0; b2_0005=0x33) ... & $(DST5W) [ useDstByteIndexOffset=3; globalset(inst_next,useDstByteIndexOffset); useDstByteIndexOffset=0; ] { byteIndexOffset = zext(dst5W) * 2; } ##### INDEXWS ##### :INDEXWS.B dst5B is (b1_0407=0xc & b1_size_0=0; b2_0005=0x23) ... & $(DST5B) [ useSrcByteIndexOffset=3; globalset(inst_next,useSrcByteIndexOffset); useSrcByteIndexOffset=0; ] { byteIndexOffset = zext(dst5B) * 2; } :INDEXWS.W dst5W is (b1_0407=0xc & b1_size_0=0; b2_0005=0x33) ... & $(DST5W) [ useSrcByteIndexOffset=3; globalset(inst_next,useSrcByteIndexOffset); useSrcByteIndexOffset=0; ] { byteIndexOffset = zext(dst5W) * 2; } ##### INT ##### :INT srcIntNum is b1_0007=0xbe; imm8_0001=0 & srcIntNum { push2(FLG); next:3 = inst_next; push3(next); ptr3:3 = (INTB + (zext(srcIntNum) * 0x4)); pc:3 = *:3 ptr3; $(STACK_SEL) = ((srcIntNum > 0x1f) * $(STACK_SEL)); $(INTERRUPT) = 0x0; $(DEBUG) = 0x0; call [pc]; } ##### INTO ##### :INTO is b1_0007=0xbf { if ($(OVERFLOW) == 0) goto inst_next; push2(FLG); next:3 = inst_next; push3(next); $(STACK_SEL) = 0; $(INTERRUPT) = 0x0; $(DEBUG) = 0x0; call 0x0ffffe0; } ##### JCnd ##### :J^b1cnd rel8offset1 is b1_0707=1 & b1_0103=5 & b1cnd; rel8offset1 { if (b1cnd) goto rel8offset1; } ##### JMP ##### :JMP.S rel3offset2 is b1_0607=1 & b1_0103=5 & rel3offset2 { goto rel3offset2; } :JMP.B rel8offset1 is b1_0007=0xbb; rel8offset1 { goto rel8offset1; } :JMP.W rel16offset1 is b1_0007=0xce; rel16offset1 { goto rel16offset1; } :JMP.A abs24offset is b1_0007=0xcc; abs24offset { goto abs24offset; } ##### JMPI ##### :JMPI.W reloffset_dst5W is (b1_0407=0xc & b1_size_0=1; b2_0005=0x0f) ... & reloffset_dst5W { goto reloffset_dst5W; } :JMPI.A reloffset_dst5L is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x01) ... & reloffset_dst5L { goto reloffset_dst5L; } :JMPI.A reloffset_dst5Ax is (b1_0407=0x8 & b1_size_0=0; b2_0005=0x01) & reloffset_dst5Ax { goto reloffset_dst5Ax; } ##### JMPS ##### :JMPS srcImm8 is b1_0007=0xdc; srcImm8 { # 18 <= srcImm8 <= 255 (range restriction not enforced by pattern match) ptr:3 = 0x0fffe - (zext(srcImm8) << 1); pc:3 = 0xff0000 | zext(*:2 ptr); goto [pc]; } ##### JSR ##### :JSR.W rel16offset1 is b1_0007=0xcf; rel16offset1 { next:3 = inst_next; push3(next); call rel16offset1; } :JSR.A abs24offset is b1_0007=0xcd; abs24offset { next:3 = inst_next; push3(next); call abs24offset; } ##### JSRI ##### :JSRI.W reloffset_dst5W is (b1_0407=0xc & b1_size_0=1; b2_0005=0x1f) ... & reloffset_dst5W { next:3 = inst_next; push3(next); call reloffset_dst5W; } :JSRI.A dst5L is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x01) ... & $(DST5L) { next:3 = inst_next; push3(next); pc:3 = dst5L:3; call [pc]; } :JSRI.A dst5Ax is (b1_0407=0x9 & b1_size_0=0; b2_0005=0x01) & $(DST5AX) { next:3 = inst_next; push3(next); call [dst5Ax]; } ##### JSRS ##### :JSRS srcImm8 is b1_0007=0xdd; srcImm8 { # 18 <= srcImm8 <= 255 (range restriction not enforced by pattern match) next:3 = inst_next; push3(next); ptr:3 = 0x0fffe - (zext(srcImm8) << 1); pc:3 = 0xff0000 | zext(*:2 ptr); call [pc]; } ##### LDC ##### # (1) LDC #imm16, b2_creg16 :LDC srcImm16, b2_creg16 is b1_0007=0xd5; b2_0307=0x15 & b2_creg16; srcImm16 { b2_creg16 = srcImm16; } # (2) LDC #imm24, b2_creg24 :LDC srcImm24, b2_creg24 is b1_0007=0xd5; b2_0307=0x05 & b2_creg24; srcImm24 { b2_creg24 = srcImm24; } # (3) LDC #imm24, b2_dreg24 :LDC srcImm24, b2_dreg24 is b1_0007=0xd5; b2_0307=0x0d & b2_dreg24; srcImm24 { b2_dreg24 = srcImm24; } # (4) LDC dst5, b2_creg16 :LDC dst5W, b2_creg16 is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=1; b2_0305=1 & b2_creg16) ... & $(DST5W)) { b2_creg16 = dst5W; } # (5) LDC dst5, b2_creg24 :LDC dst5L, b2_creg24 is (b1_0407=0xd & b1_size_0=1; b2_0305=0 & b2_creg24) ... & $(DST5L) { b2_creg24 = dst5L:3; } # (6) LDC dst5, b2_dreg24 :LDC dst5L, b2_dreg24 is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=1; b2_0305=0 & b2_dreg24) ... & $(DST5L)) { b2_dreg24 = dst5L:3; } ##### LDCTX ##### :LDCTX abs16offset, abs24offset is b1_0007=0xb6; b2_0007=0xc3; abs16offset; imm24_dat & abs24offset { taskNum:1 = abs16offset; # load task number stored at abs16 ptr:3 = imm24_dat + (zext(taskNum) * 2); # compute table entry address relative to abs24 regInfo:1 = *:1 ptr; ptr = ptr + 1; spCorrect:1 = *:1 ptr; ptr = SP; if ((regInfo & 1) == 0) goto ; R0 = *:2 ptr; ptr = ptr + 2; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; R1 = *:2 ptr; ptr = ptr + 2; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; R2 = *:2 ptr; ptr = ptr + 2; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; R3 = *:2 ptr; ptr = ptr + 2; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; tmp:4 = *:4 ptr; A0 = tmp:3; ptr = ptr + 4; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; tmp = *:4 ptr; A1 = tmp:3; ptr = ptr + 4; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; tmp = *:4 ptr; SB = tmp:3; ptr = ptr + 4; regInfo = regInfo >> 1; if ((regInfo & 1) == 0) goto ; tmp = *:4 ptr; FB = tmp:3; ptr = ptr + 4; SP = SP + zext(spCorrect); } ##### LDIPL ##### :LDIPL srcImm3 is b1_0007=0xd5; b2_0307=0x1d & srcImm3 { $(IPL) = srcImm3; } ##### MAX ##### # (1) MAX.B #imm, dst5 :MAX.B srcSimm8, dst5B is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3f) ... & $(DST5B)); srcSimm8 { if (srcSimm8 s<= dst5B) goto inst_next; dst5B = srcSimm8; } # (1) MAX.B #imm, Ax :MAX.B srcSimm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3f) & $(DST5AX)); srcSimm8 { if (srcSimm8 s<= dst5Ax:1) goto inst_next; dst5Ax = zext(srcSimm8); } # (1) MAX.W #imm, dst5 :MAX.W srcSimm16, dst5W is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3f) ... & $(DST5W)); srcSimm16 { if (srcSimm16 s<= dst5W) goto inst_next; dst5W = srcSimm16; } # (1) MAX.W #imm, Ax :MAX.W srcSimm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3f) & $(DST5AX)); srcSimm16 { if (srcSimm16 s<= dst5Ax:2) goto inst_next; dst5Ax = zext(srcSimm16); } # (2) MAX.B src5, dst5 :MAX.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xd) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { val:1 = src5B; if (val s<= dst5B_afterSrc5) goto inst_next; dst5B_afterSrc5 = val; } # (2) MAX.B src5, Ax :MAX.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xd) ... & $(SRC5B) & $(DST5AX) ...) { val:1 = src5B; if (val s<= dst5Ax:1) goto inst_next; dst5Ax = zext(val); } # (2) MAX.W src5, dst5 :MAX.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0xd) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { val:2 = src5W; if (val s<= dst5W_afterSrc5) goto inst_next; dst5W_afterSrc5 = val; } # (2) MAX.W src5, Ax :MAX.B src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0xd) ... & $(SRC5W) & $(DST5AX) ...) { val:2 = src5W; if (val s<= dst5Ax:2) goto inst_next; dst5Ax = zext(val); } ##### MIN ##### # (1) MIN.B #imm, dst5 :MIN.B srcSimm8, dst5B is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2f) ... & $(DST5B)); srcSimm8 { if (srcSimm8 s>= dst5B) goto inst_next; dst5B = srcSimm8; } # (1) MIN.B #imm, Ax :MIN.B srcSimm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2f) & $(DST5AX)); srcSimm8 { if (srcSimm8 s>= dst5Ax:1) goto inst_next; dst5Ax = zext(srcSimm8); } # (1) MIN.W #imm, dst5 :MIN.W srcSimm16, dst5W is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2f) ... & $(DST5W)); srcSimm16 { if (srcSimm16 s>= dst5W) goto inst_next; dst5W = srcSimm16; } # (1) MIN.W #imm, Ax :MIN.W srcSimm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2f) & $(DST5AX)); srcSimm16 { if (srcSimm16 s>= dst5Ax:2) goto inst_next; dst5Ax = zext(srcSimm16); } # (2) MIN.B src5, dst5 :MIN.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xc) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { val:1 = src5B; if (val s>= dst5B_afterSrc5) goto inst_next; dst5B_afterSrc5 = val; } # (2) MIN.B src5, Ax :MIN.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0xc) ... & $(SRC5B) & $(DST5AX) ...) { val:1 = src5B; if (val s>= dst5Ax:1) goto inst_next; dst5Ax = zext(val); } # (2) MIN.W src5, dst5 :MIN.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0xc) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { val:2 = src5W; if (val s>= dst5W_afterSrc5) goto inst_next; dst5W_afterSrc5 = val; } # (2) MIN.W src5, Ax :MIN.B src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0xc) ... & $(SRC5W) & $(DST5AX) ...) { val:2 = src5W; if (val s>= dst5Ax:2) goto inst_next; dst5Ax = zext(val); } ##### MOV ##### # (1) MOV.B:G #imm, dst5 :MOV^".B:G" srcImm8, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x2f) ... & $(DST5B)); srcImm8 { val:1 = srcImm8; dst5B = val; setResultFlags(val); } # (1) MOV.B:G #imm, Ax :MOV^".B:G" srcImm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x2f) & $(DST5AX)); srcImm8 { val:1 = srcImm8; dst5Ax = zext(val); setResultFlags(val); } # (1) MOV.W:G #imm, dst5 :MOV^".W:G" srcImm16, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x2f) ... & $(DST5W)); srcImm16 { val:2 = srcImm16; dst5W = val; setResultFlags(val); } # (1) MOV.W:G #imm, Ax :MOV^".W:G" srcImm16, dst5Ax is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x2f) & $(DST5AX)); srcImm16 { val:2 = srcImm16; dst5Ax = zext(val); setResultFlags(val); } # (2) MOV.L:G #imm, dst5 :MOV^".L:G" srcImm32, dst5L is ((b1_0407=0xb & b1_size_0=0; b2_0005=0x31) ... & $(DST5L)); srcImm32 { val:4 = srcImm32; dst5L = val; setResultFlags(val); } # (2) MOV.L:G #imm, Ax :MOV^".L:G" srcImm32, dst5Ax is ((b1_0407=0xb & b1_size_0=0; b2_0005=0x31) & $(DST5AX)); srcImm32 { val:4 = srcImm32; dst5Ax = val:3; setResultFlags(val); } # (3) MOV.B:Q #imm4, dst5 :MOV^".B:Q" srcSimm4, dst5B is (b1_0407=0xf & b1_size_0=0; b2_0405=2 & srcSimm4) ... & $(DST5B) { val:1 = srcSimm4; dst5B = val; setResultFlags(val); } # (3) MOV.B:Q #imm4, Ax :MOV^".B:Q" srcSimm4, dst5Ax is (b1_0407=0xf & b1_size_0=0; b2_0405=2 & srcSimm4) & $(DST5AX) { val:1 = srcSimm4; dst5Ax = zext(val); setResultFlags(val); } # (3) MOV.W:Q #imm4, dst5 :MOV^".W:Q" srcSimm4, dst5W is (b1_0407=0xf & b1_size_0=1; b2_0405=2 & srcSimm4) ... & $(DST5W) { val:2 = sext(srcSimm4); dst5W = val; setResultFlags(val); } # (3) MOV.W:Q #imm4, Ax :MOV^".W:Q" srcSimm4, dst5Ax is (b1_0407=0xf & b1_size_0=1; b2_0405=2 & srcSimm4) & $(DST5AX) { val:2 = sext(srcSimm4); dst5Ax = zext(val); setResultFlags(val); } # (4) MOV.B:S #imm, dst2 :MOV^".B:S" srcImm8, dst2B is ((b1_0607=0 & b1_0103=2 & b1_size_0=0) ... & dst2B); srcImm8 { val:1 = srcImm8; dst2B = val; setResultFlags(val); } # (4) MOV.W:S #imm, dst2 :MOV^".W:S" srcImm16, dst2W is ((b1_0607=0 & b1_0103=2 & b1_size_0=1) ... & dst2W); srcImm16 { val:2 = srcImm16; dst2W = val; setResultFlags(val); } # (5) MOV.W:S #imm16, Ax :MOV^".W:S" srcImm16, b1_d1_regAx is b1_0607=2 & b1_size_5=0 & b1_0104=0xe & b1_d1_regAx; srcImm16 { val:2 = srcImm16; b1_d1_regAx = zext(val); setResultFlags(val); } # (5) MOV.L:S #imm24, Ax :MOV^".L:S" srcImm24, b1_d1_regAx is b1_0607=2 & b1_size_5=1 & b1_0104=0xe & b1_d1_regAx; srcImm24 { val:3 = srcImm24; b1_d1_regAx = val; setResultFlags(val); } # (6) MOV.B:Z #0, dst2 :MOV^".B:Z" srcZero8, dst2B is (b1_0607=0 & b1_0103=1 & b1_size_0=0 & srcZero8) ... & dst2B { dst2B = 0; $(SIGN) = 0; $(ZERO) = 1; } # (6) MOV.W:Z #0, dst2 :MOV^".W:Z" srcZero16, dst2W is (b1_0607=0 & b1_0103=1 & b1_size_0=1 & srcZero16) ... & dst2W { dst2W = 0; $(SIGN) = 0; $(ZERO) = 1; } # (7) MOV.B:G src5, dst5 :MOV^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0xb) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... { val:1 = src5B; dst5B_afterSrc5 = val; setResultFlags(val); } # (7) MOV.B:G src5, Ax :MOV^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0xb) ... & $(SRC5B) & $(DST5AX) ... { val:1 = src5B; dst5Ax = zext(val); setResultFlags(val); } # (7) MOV.W:G src5, dst5 :MOV^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0xb) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... { val:2 = src5W; dst5W_afterSrc5 = val; setResultFlags(val); } # (7) MOV.W:G src5, Ax :MOV^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0xb) ... & $(SRC5W) & $(DST5AX) ... { val:2 = src5W; dst5Ax = zext(val); setResultFlags(val); } # (8) MOV.L:G src5, dst5 :MOV^".L:G" src5L, dst5L_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x3) ... & $(SRC5L) ... & $(DST5L_AFTER_SRC5) ... { val:4 = src5L; dst5L_afterSrc5 = val; setResultFlags(val); } # (8) MOV.L:G src5, Ax :MOV^".L:G" src5L, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x3) ... & $(SRC5L) & $(DST5AX) ... { val:4 = src5L; dst5Ax = val:3; setResultFlags(val); } # (9) MOV.B:S src2, R0L :MOV^".B:S" dst2B, R0L is (R0L & b1_0607=0 & b1_0103=4 & b1_size_0=0) ... & dst2B { val:1 = dst2B; R0L = val; setResultFlags(val); } # (9) MOV.W:S src2, R0 :MOV^".W:S" dst2W, R0 is (R0 & b1_0607=0 & b1_0103=4 & b1_size_0=1) ... & dst2W { val:2 = dst2W; R0 = val; setResultFlags(val); } # (10) MOV.B:S src2, R1L :MOV^".B:S" dst2B, R1L is (R1L & b1_0607=1 & b1_0103=7 & b1_size_0=0) ... & dst2B { val:1 = dst2B; R1L = val; setResultFlags(val); } # (10) MOV.W:S src2, R1 :MOV^".W:S" dst2W, R1 is (R1 & b1_0607=1 & b1_0103=7 & b1_size_0=1) ... & dst2W { val:2 = dst2W; R1 = val; setResultFlags(val); } # (11) MOV.B:S R0L, dst2 :MOV^".B:S" R0L, dst2B is (R0L & b1_0607=0 & b1_0103=0 & b1_size_0=0) ... & dst2B { val:1 = R0L; dst2B = val; setResultFlags(val); } # (11) MOV.W:S R0, dst2 :MOV^".W:S" R0, dst2W is (R0 & b1_0607=0 & b1_0103=0 & b1_size_0=1) ... & dst2W { val:2 = R0; dst2W = val; setResultFlags(val); } # (12) MOV.L:S src2L, Ax :MOV^".L:S" dst2L, b1_d1_regAx is (b1_0607=1 & b1_0103=4 & b1_d1_regAx) ... & dst2L { val:4 = dst2L; b1_d1_regAx = val:3; setResultFlags(val); } # (13) MOV.B:G dsp:8[SP], dst5 # 1011 0110 1000 1111 0001 0010 1110 1111 1100 1101 1010 1011 MOV.G:G 0x12(SP),0xabcdef :MOV^".B:G" dsp8spB, dst5B_afterDsp8 is (b1_0407=0xb & b1_size_0=0; b2_0005=0x0f; dsp8spB) ... & $(DST5B_AFTER_DSP8) { val:1 = dsp8spB; dst5B_afterDsp8 = val; setResultFlags(val); } # (13) MOV.B:G dsp:8[SP], Ax :MOV^".B:G" dsp8spB, dst5Ax is (b1_0407=0xb & b1_size_0=0; b2_0005=0x0f; dsp8spB) & $(DST5AX) ... { val:1 = dsp8spB; dst5Ax = zext(val); setResultFlags(val); } # (13) MOV.W:G dsp:8[SP], dst5 :MOV^".W:G" dsp8spW, dst5W_afterDsp8 is (b1_0407=0xb & b1_size_0=1; b2_0005=0x0f; dsp8spW) ... & $(DST5W_AFTER_DSP8) { val:2 = dsp8spW; dst5W_afterDsp8 = val; setResultFlags(val); } # (13) MOV.W:G dsp:8[SP], Ax :MOV^".W:G" dsp8spW, dst5Ax is (b1_0407=0xb & b1_size_0=1; b2_0005=0x0f; dsp8spW) & $(DST5AX) ... { val:2 = dsp8spW; dst5Ax = zext(val); setResultFlags(val); } # (14) MOV.B:G src5, dsp:8[SP] :MOV^".B:G" dst5B, dsp8spB is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x0f) ... & $(DST5B)); dsp8spB { val:1 = dst5B; dsp8spB = val; setResultFlags(val); } # (14) MOV.W:G src5, dsp:8[SP] :MOV^".W:G" dst5W, dsp8spW is ((b1_0407=0xa & b1_size_0=1; b2_0005=0x0f) ... & $(DST5W)); dsp8spW { val:2 = dst5W; dsp8spW = val; setResultFlags(val); } ##### MOVA ##### # MOVA dst5A, RxRx :MOVA dst5A, b2_reg32 is (b1_0407=0xd & b1_size_0=1; b2_0105=0xc & b2_reg32) ... & $(DST5A) { b2_reg32 = zext(dst5A); } # MOVA dst5A, Ax :MOVA dst5A, b2_regAx is (b1_0407=0xd & b1_size_0=1; b2_0105=0xd & b2_regAx) ... & $(DST5A) { b2_regAx = dst5A; } ##### MOVDir ##### # TODO: dst5B=Ax case will parse but is not valid # (1) MOVDir R0L, dst :MOVLL R0L, dst5B is R0L & b0_0007=0x1; ((b1_0407=0xb & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B)) { dst5B = (R0L & 0x0f) | (dst5B & 0xf0); } :MOVHL R0L, dst5B is R0L & b0_0007=0x1; ((b1_0407=0xb & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B)) { dst5B = ((R0L & 0xf0) >> 4) | (dst5B & 0xf0); } :MOVLH R0L, dst5B is R0L & b0_0007=0x1; ((b1_0407=0xb & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B)) { dst5B = ((R0L & 0x0f) << 4) | (dst5B & 0x0f); } :MOVHH R0L, dst5B is R0L & b0_0007=0x1; ((b1_0407=0xb & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B)) { dst5B = (R0L & 0xf0) | (dst5B & 0x0f); } # (2) MOVDir dst, R0L :MOVLL dst5B, R0L is R0L & b0_0007=0x1; ((b1_0407=0xa & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B)) { R0L = (dst5B & 0x0f) | (R0L & 0xf0); } :MOVHL dst5B, R0L is R0L & b0_0007=0x1; ((b1_0407=0xa & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B)) { R0L = ((dst5B & 0xf0) >> 4) | (R0L & 0xf0); } :MOVLH dst5B, R0L is R0L & b0_0007=0x1; ((b1_0407=0xa & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B)) { R0L = ((dst5B & 0x0f) << 4) | (R0L & 0x0f); } :MOVHH dst5B, R0L is R0L & b0_0007=0x1; ((b1_0407=0xa & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B)) { R0L = (dst5B & 0xf0) | (R0L & 0x0f); } ##### MOVX ##### :MOVX srcSimm8, dst5L is ((b1_0407=0xb & b1_size_0=0; b2_0005=0x11) ... & $(DST5L)); srcSimm8 { val:4 = sext(srcSimm8); dst5L = val; setResultFlags(val); } :MOVX srcSimm8, dst5Ax is ((b1_0407=0xb & b1_size_0=0; b2_0005=0x11) & $(DST5AX)); srcSimm8 { val:3 = sext(srcSimm8); dst5Ax = val; setResultFlags(val); } ##### MUL ##### # TODO: Illegal MUL destination cases will parse but are not valid (e.g., R0H/R2, R1H/R3) # (1) MUL.B #imm, dst5 :MUL.B srcSimm8, dst5B is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x1f) ... & $(DST5W) & $(DST5B)); srcSimm8 { dst5W = sext(srcSimm8) * sext(dst5B); } # (1) MUL.B #imm, Ax :MUL.B srcSimm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x1f) & $(DST5AX)); srcSimm8 { val:2 = sext(srcSimm8) * sext(dst5Ax:1); dst5Ax = zext(val); } # (1) MUL.W #imm, dst5 :MUL.W srcSimm16, dst5W is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x1f) ... & $(DST5L) & $(DST5W)); srcSimm16 { dst5L = sext(srcSimm16) * sext(dst5W); } # (1) MUL.W #imm, Ax :MUL.W srcSimm16, dst5Ax is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x1f) & $(DST5AX)); srcSimm16 { val:4 = sext(srcSimm16) * sext(dst5Ax:2); dst5Ax = val:3; } # (2) MUL.B src5, dst5 :MUL.B src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0xc) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... & $(DST5W_AFTER_SRC5) ... { dst5W_afterSrc5 = sext(src5B) * sext(dst5B_afterSrc5); } # (2) MUL.B src5, Ax :MUL.B src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0xc) ... & $(SRC5B) & $(DST5AX) ... { val:2 = sext(src5B) * sext(dst5Ax:1); dst5Ax = zext(val); } # (2) MUL.W src5, dst5 :MUL.W src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0xc) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... & $(DST5L_AFTER_SRC5) ... { dst5L_afterSrc5 = sext(src5W) * sext(dst5W_afterSrc5); } # (2) MUL.W src5, Ax :MUL.W src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0xc) ... & $(SRC5W) & $(DST5AX) ... { val:4 = sext(src5W) * sext(dst5Ax:2); dst5Ax = val:3; } ##### MULEX ##### :MULEX dst5W is (b1_0407=0xc & b1_size_0=1; b2_0005=0x3e) ... & $(DST5W) { R1R2R0 = sext(R2R0) * sext(dst5W); } ##### MULU ##### # TODO: Illegal MULU destination cases will parse but are not valid (e.g., R0H/R2, R1H/R3) # (1) MULU.B #imm, dst5 :MULU.B srcImm8, dst5B is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x0f) ... & $(DST5B) & $(DST5W)); srcImm8 { dst5W = zext(srcImm8) * zext(dst5B); } # (1) MULU.B #imm, Ax :MULU.B srcImm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x0f) & $(DST5AX)); srcImm8 { val:2 = zext(srcImm8) * zext(dst5Ax:1); dst5Ax = zext(val); } # (1) MULU.W #imm, dst5 :MULU.W srcImm16, dst5W is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x0f) ... & $(DST5W) & $(DST5L)); srcImm16 { dst5L = zext(srcImm16) * zext(dst5W); } # (1) MULU.W #imm, Ax :MULU.W srcImm16, dst5Ax is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x0f) & $(DST5AX)); srcImm16 { val:4 = zext(srcImm16) * zext(dst5Ax:2); dst5Ax = val:3; } # (2) MULU.B src5, dst5 :MULU.B src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x4) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... & $(DST5W_AFTER_SRC5) ... { dst5W_afterSrc5 = zext(src5B) * zext(dst5B_afterSrc5); } # (2) MULU.B src5, Ax :MULU.B src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x4) ... & $(SRC5B) & $(DST5AX) ... { val:2 = zext(src5B) * zext(dst5Ax:1); dst5Ax = zext(val); } # (2) MULU.W src5, dst5 :MULU.W src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x4) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... & $(DST5L_AFTER_SRC5) ... { dst5L_afterSrc5 = zext(src5W) * zext(dst5W_afterSrc5); } # (2) MULU.W src5, Ax :MULU.W src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x4) ... & $(SRC5W) & $(DST5AX) ... { val:4 = zext(src5W) * zext(dst5Ax:2); dst5Ax = val:3; } ##### NEG ##### # NEG.B dst5 :NEG.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x2f) ... & $(DST5B) { tmp:1 = dst5B; setSubtractFlags(0:1, tmp); tmp = -tmp; dst5B = tmp; setResultFlags(tmp); } # NEG.B Ax :NEG.B dst5Ax is (b1_0407=0xa & b1_size_0=0; b2_0005=0x2f) & $(DST5AX) { tmp:1 = dst5Ax:1; setSubtractFlags(0:1, tmp); tmp = -tmp; dst5Ax = zext(tmp); setResultFlags(tmp); } # NEG.W dst5 :NEG.W dst5W is (b1_0407=0xa & b1_size_0=1; b2_0005=0x2f) ... & $(DST5W) { tmp:2 = dst5W; setSubtractFlags(0:2, tmp); tmp = -tmp; dst5W = tmp; setResultFlags(tmp); } # NEG.W Ax :NEG.W dst5Ax is (b1_0407=0xa & b1_size_0=1; b2_0005=0x2f) & $(DST5AX) { tmp:2 = dst5Ax:2; setSubtractFlags(0:2, tmp); tmp = -tmp; dst5Ax = zext(tmp); setResultFlags(tmp); } ##### NOP ##### :NOP is b1_0007=0xde { } ##### NOT ##### # NOT.B dst5 :NOT.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x1e) ... & $(DST5B) { tmp:1 = ~dst5B; dst5B = tmp; setResultFlags(tmp); } # NOT.B Ax :NOT.B dst5Ax is (b1_0407=0xa & b1_size_0=0; b2_0005=0x1e) & $(DST5AX) { tmp:1 = ~dst5Ax:1; tmp = tmp; dst5Ax = zext(tmp); setResultFlags(tmp); } # NOT.W dst5 :NOT.W dst5W is (b1_0407=0xa & b1_size_0=1; b2_0005=0x1e) ... & $(DST5W) { tmp:2 = ~dst5W; dst5W = tmp; setResultFlags(tmp); } # NOT.W Ax :NOT.W dst5Ax is (b1_0407=0xa & b1_size_0=1; b2_0005=0x1e) & $(DST5AX) { tmp:2 = ~dst5Ax:2; dst5Ax = zext(tmp); setResultFlags(tmp); } ##### OR ##### # (1) OR.B:G #imm, dst :OR^".B:G" srcImm8, dst5B is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2f) ... & $(DST5B)); srcImm8 { tmp:1 = dst5B & srcImm8; dst5B = tmp; setResultFlags(tmp); } # (1) OR.B:G #imm, Ax :OR^".B:G" srcImm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x2f) & $(DST5AX)); srcImm8 { tmp:1 = dst5Ax:1 & srcImm8; dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) OR.W:G #imm, dst :OR^".W:G" srcImm16, dst5W is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2f) ... & $(DST5W)); srcImm16 { tmp:2 = dst5W & srcImm16; dst5W = tmp; setResultFlags(tmp); } # (1) OR.W:G #imm, Ax :OR^".W:G" srcImm16, dst5Ax is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x2f) & $(DST5AX)); srcImm16 { tmp:2 = dst5Ax:2 & srcImm16; dst5Ax = zext(tmp); setResultFlags(tmp); } # (2) OR.B:S #imm, dst :OR^".B:S" srcImm8, dst2B is ((b1_0607=1 & b1_0103=2 & b1_size_0=0) ... & dst2B); srcImm8 { tmp:1 = dst2B & srcImm8; dst2B = tmp; setResultFlags(tmp); } # (2) OR.W:S #imm, dst :OR^".W:S" srcImm16, dst2W is ((b1_0607=1 & b1_0103=2 & b1_size_0=1) ... & dst2W); srcImm16 { tmp:2 = dst2W & srcImm16; dst2W = tmp; setResultFlags(tmp); } # (3) OR.B:G src5, dst5 :OR^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x5) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... { tmp:1 = dst5B_afterSrc5 & src5B; dst5B_afterSrc5 = tmp; setResultFlags(tmp); } # (3) OR.B:G src5, Ax :OR^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x5) ... & $(SRC5B) & $(DST5AX) ... { tmp:1 = dst5Ax:1 & src5B; dst5Ax = zext(tmp); setResultFlags(tmp); } # (3) OR.W:G src5, dst5 :OR^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x5) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... { tmp:2 = dst5W_afterSrc5 & src5W; dst5W_afterSrc5 = tmp; setResultFlags(tmp); } # (3) OR.W:G src5, Ax :OR^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x5) ... & $(SRC5W) & $(DST5AX) ... { tmp:2 = dst5Ax:2 & src5W; dst5Ax = zext(tmp); setResultFlags(tmp); } ##### POP ##### # POP.B dst5 :POP.B dst5B is (b1_0407=0xb & b1_size_0=0; b2_0005=0x2f) ... & $(DST5B) { pop1(dst5B); } # POP.B Ax :POP.B dst5Ax is (b1_0407=0xb & b1_size_0=0; b2_0005=0x2f) & $(DST5AX) { val:1 = 0; pop1(val); dst5Ax = zext(val); } # POP.W dst5 :POP.W dst5W is (b1_0407=0xb & b1_size_0=1; b2_0005=0x2f) ... & $(DST5W) { pop2(dst5W); } # POP.W Ax :POP.W dst5Ax is (b1_0407=0xb & b1_size_0=1; b2_0005=0x2f) & $(DST5AX) { val:2 = 0; pop2(val); dst5Ax = zext(val); } ##### POPC ##### # (1) POPC reg16 :POPC b2_creg16 is b1_0007=0xd3; b2_0307=0x15 & b2_creg16 { pop2(b2_creg16); } # (2) POPC reg24 :POPC b2_creg24 is b1_0007=0xd3; b2_0307=0x05 & b2_creg24 { pop3(b2_creg24); } ##### POPM ##### popRegFB: FB is regBit7=1 & FB { pop3(FB); } popRegFB: is regBit7=0 { } popRegSB: SB popRegFB is regBit6=1 & popRegFB & SB { pop3(SB); build popRegFB; } popRegSB: popRegFB is popRegFB { build popRegFB; } popRegA1: A1 popRegSB is regBit5=1 & popRegSB & A1 { pop3(A1); build popRegSB; } popRegA1: popRegSB is popRegSB { build popRegSB; } popRegA0: A0 popRegA1 is regBit4=1 & popRegA1 & A0 { pop3(A0); build popRegA1; } popRegA0: popRegA1 is popRegA1 { build popRegA1; } popRegR3: R3 popRegA0 is regBit3=1 & popRegA0 & R3 { pop2(R3); build popRegA0; } popRegR3: popRegA0 is popRegA0 { build popRegA0; } popRegR2: R2 popRegR3 is regBit2=1 & popRegR3 & R2 { pop2(R2); build popRegR3; } popRegR2: popRegR3 is popRegR3 { build popRegR3; } popRegR1: R1 popRegR2 is regBit1=1 & popRegR2 & R1 { pop2(R1); build popRegR2; } popRegR1: popRegR2 is popRegR2 { build popRegR2; } popRegR0: R0 popRegR1 is regBit0=1 & popRegR1 & R0 { pop2(R0); build popRegR1; } popRegR0: popRegR1 is popRegR1 { build popRegR1; } popRegList: "( "^popRegR0^")" is popRegR0 { build popRegR0; } :POPM popRegList is b1_0007=0x8e; popRegList { build popRegList; } ##### PUSH ##### # (1) PUSH.B #imm :PUSH.B srcImm8 is b1_0007=0xae; srcImm8 { push1(srcImm8); #tmp:2 = zext(srcImm8); # This differs from what really happens - decompiler tries to resolve source of unknown byte on stack #push2(tmp); } # (1) PUSH.W #imm :PUSH.B srcImm16 is b1_0007=0xaf; srcImm16 { push2(srcImm16); } # (2) PUSH.B src5 :PUSH.B dst5B is (b1_0407=0xc & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B) { push1(dst5B); #tmp:2 = zext(dst5B); # This differs from what really happens - decompiler tries to resolve source of unknown byte on stack #push2(tmp); } # (2) PUSH.W src5 :PUSH.W dst5W is (b1_0407=0xc & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W) { push2(dst5W); } # (3) PUSH.L #imm :PUSH.L srcImm32 is b1_0007=0xb6; b2_0007=0x53; srcImm32 { push4(srcImm32); } # (4) PUSH.L src5 :PUSH.L dst5L is (b1_0407=0xa & b1_size_0=0; b2_0005=0x01) ... & $(DST5L) { push4(dst5L); } ##### PUSHA ##### :PUSHA dst5A is (b1_0407=0xb & b1_size_0=0; b2_0005=0x01) ... & $(DST5A) { push3(dst5A); #tmp:4 = zext(dst5A); # This differs from what really happens - decompiler tries to resolve source of unknown byte on stack #push4(tmp); } ##### PUSHC ##### # (1) PUSHC reg16 :PUSHC b2_creg16 is b1_0007=0xd1; b2_0307=0x15 & b2_creg16 { push2(b2_creg16); } # (2) PUSHC reg24 :PUSHC b2_creg24 is b1_0007=0xd1; b2_0307=0x05 & b2_creg24 { push3(b2_creg24); #tmp:4 = zext(b2_creg24); # This differs from what really happens - decompiler tries to resolve source of unknown byte on stack #push4(tmp); } ##### PUSHM ##### pushRegR0: R0 is regBit7=1 & R0 { push2(R0); } pushRegR0: is regBit7=0 { } pushRegR1: pushRegR0 R1 is regBit6=1 & pushRegR0 & R1 { push2(R1); build pushRegR0; } pushRegR1: pushRegR0 is pushRegR0 { build pushRegR0; } pushRegR2: pushRegR1 R2 is regBit5=1 & pushRegR1 & R2 { push2(R2); build pushRegR1; } pushRegR2: pushRegR1 is pushRegR1 { build pushRegR1; } pushRegR3: pushRegR2 R3 is regBit4=1 & pushRegR2 & R3 { push2(R3); build pushRegR2; } pushRegR3: pushRegR2 is pushRegR2 { build pushRegR2; } pushRegA0: pushRegR3 A0 is regBit3=1 & pushRegR3 & A0 { push3(A0); build pushRegR3; } pushRegA0: pushRegR3 is pushRegR3 { build pushRegR3; } pushRegA1: pushRegA0 A1 is regBit2=1 & pushRegA0 & A1 { push3(A1); build pushRegA0; } pushRegA1: pushRegA0 is pushRegA0 { build pushRegA0; } pushRegSB: pushRegA1 SB is regBit1=1 & pushRegA1 & SB { push3(SB); build pushRegA1; } pushRegSB: pushRegA1 is pushRegA1 { build pushRegA1; } pushRegFB: pushRegSB FB is regBit0=1 & pushRegSB & FB { push3(FB); build pushRegSB; } pushRegFB: pushRegSB is pushRegSB { build pushRegSB; } pushRegList: "("^pushRegFB^" )" is pushRegFB { build pushRegFB; } :PUSHM pushRegList is b1_0007=0x8f; pushRegList { build pushRegList; } ##### REIT ##### :REIT is b1_0007=0x9e { pc:3 = 0; pop3(pc); pop2(FLG); return [pc]; } ##### RMPA ##### :RMPA.B is b1_0007=0xb8; b2_0007=0x43 { if (R3 == 0) goto inst_next; a:1 = *:1 A0; b:1 = *:1 A1; A0 = A0 + 1; A1 = A1 + 1; prod:6 = sext(a) * sext(b); o:1 = scarry(R1R2R0, prod); $(OVERFLOW) = o | $(OVERFLOW); R1R2R0 = R1R2R0 + prod; R3 = R3 - 1; goto inst_start; } :RMPA.W is b1_0007=0xb8; b2_0007=0x53 { if (R3 == 0) goto inst_next; a:2 = *:2 A0; b:2 = *:2 A1; A0 = A0 + 2; A1 = A1 + 2; prod:6 = sext(a) * sext(b); o:1 = scarry(R1R2R0, prod); $(OVERFLOW) = o | $(OVERFLOW); R1R2R0 = R1R2R0 + prod; R3 = R3 - 1; goto inst_start; } ##### ROLC ##### :ROLC.B dst5B is (b1_0407=0xb & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B) { c:1 = $(CARRY); tmp:1 = dst5B; $(CARRY) = tmp s< 0; tmp = (tmp << 1) | c; dst5B = tmp; setResultFlags(tmp); } :ROLC.B dst5Ax is (b1_0407=0xb & b1_size_0=0; b2_0005=0x2e) & $(DST5AX) { c:1 = $(CARRY); tmp:1 = dst5Ax:1; $(CARRY) = tmp s< 0; tmp = (tmp << 1) | c; dst5Ax = zext(tmp); setResultFlags(tmp); } :ROLC.W dst5W is (b1_0407=0xb & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W) { c:2 = zext($(CARRY)); tmp:2 = dst5W; $(CARRY) = tmp s< 0; tmp = (tmp << 1) | c; dst5W = tmp; setResultFlags(tmp); } :ROLC.W dst5Ax is (b1_0407=0xb & b1_size_0=1; b2_0005=0x2e) & $(DST5AX) { c:2 = zext($(CARRY)); tmp:2 = dst5Ax:2; $(CARRY) = tmp s< 0; tmp = (tmp << 1) | c; dst5Ax = zext(tmp); setResultFlags(tmp); } ##### RORC ##### :RORC.B dst5B is (b1_0407=0xa & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B) { c:1 = $(CARRY); tmp:1 = dst5B; $(CARRY) = (tmp & 1) == 1; tmp = (tmp >> 1) | (c << 7); dst5B = tmp; setResultFlags(tmp); } :RORC.B dst5Ax is (b1_0407=0xa & b1_size_0=0; b2_0005=0x2e) & $(DST5AX) { c:1 = $(CARRY); tmp:1 = dst5Ax:1; $(CARRY) = (tmp & 1) == 1; tmp = (tmp >> 1) | (c << 7); dst5Ax = zext(tmp); setResultFlags(tmp); } :RORC.W dst5W is (b1_0407=0xa & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W) { c:2 = zext($(CARRY)); tmp:2 = dst5W; $(CARRY) = (tmp & 1) == 1; tmp = (tmp >> 1) | (c << 15); dst5W = tmp; setResultFlags(tmp); } :RORC.W dst5Ax is (b1_0407=0xa & b1_size_0=1; b2_0005=0x2e) & $(DST5AX) { c:2 = zext($(CARRY)); tmp:2 = dst5Ax:2; $(CARRY) = (tmp & 1) == 1; tmp = (tmp >> 1) | (c << 15); dst5Ax = zext(tmp); setResultFlags(tmp); } ##### ROT ##### # (1) ROT.B #imm4, dst5 (right) :ROT.B srcSimm4Shift, dst5B is (b1_0407=0xe & b1_size_0=0; b2_0405=2 & b2_shiftSign=1 & srcSimm4Shift) ... & $(DST5B) { rightShift:1 = -srcSimm4Shift; tmp:1 = dst5B; $(CARRY) = (tmp >> (rightShift - 1)) & 1; tmp = (tmp >> rightShift) | (tmp << (8 - rightShift)); dst5B = tmp; setResultFlags(tmp); } # (1) ROT.B #imm4, Ax (right) :ROT.B srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=0; b2_0405=2 & b2_shiftSign=1 & srcSimm4Shift) & $(DST5AX) { rightShift:1 = -srcSimm4Shift; tmp:1 = dst5Ax:1; $(CARRY) = (tmp >> (rightShift - 1)) & 1; tmp = (tmp >> rightShift) | (tmp << (8 - rightShift)); dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) ROT.W #imm4, dst5 (right) :ROT.W srcSimm4Shift, dst5W is (b1_0407=0xe & b1_size_0=1; b2_0405=2 & b2_shiftSign=1 & srcSimm4Shift) ... & $(DST5W) { rightShift:1 = -srcSimm4Shift; tmp:2 = dst5W; c:2 = (tmp >> (rightShift - 1)); $(CARRY) = c:1 & 1; tmp = (tmp >> rightShift) | (tmp << (16 - rightShift)); dst5W = tmp; setResultFlags(tmp); } # (1) ROT.W #imm4, Ax (right) :ROT.W srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=1; b2_0405=2 & b2_shiftSign=1 & srcSimm4Shift) & $(DST5AX) { rightShift:1 = -srcSimm4Shift; tmp:2 = dst5Ax:2; c:2 = (tmp >> (rightShift - 1)); $(CARRY) = c:1 & 1; tmp = (tmp >> rightShift) | (tmp << (16 - rightShift)); dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) ROT.B #imm4, dst5 (left) :ROT.B srcSimm4Shift, dst5B is (b1_0407=0xe & b1_size_0=0; b2_0405=2 & b2_shiftSign=0 & srcSimm4Shift) ... & $(DST5B) { leftShift:1 = srcSimm4Shift; tmp:1 = dst5B; $(CARRY) = (tmp >> (8 - leftShift)) & 1; tmp = (tmp << leftShift) | (tmp >> (8 - leftShift)); dst5B = tmp; setResultFlags(tmp); } # (1) ROT.B #imm4, Ax (left) :ROT.B srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=0; b2_0405=2 & b2_shiftSign=0 & srcSimm4Shift) & $(DST5AX) { leftShift:1 = srcSimm4Shift; tmp:1 = dst5Ax:1; $(CARRY) = (tmp >> (8 - leftShift)) & 1; tmp = (tmp << leftShift) | (tmp >> (8 - leftShift)); dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) ROT.W #imm4, dst5 (left) :ROT.W srcSimm4Shift, dst5W is (b1_0407=0xe & b1_size_0=1; b2_0405=2 & b2_shiftSign=0 & srcSimm4Shift) ... & $(DST5W) { leftShift:1 = srcSimm4Shift; tmp:2 = dst5W; c:2 = (tmp >> (16 - leftShift)); $(CARRY) = c:1 & 1; tmp = (tmp << leftShift) | (tmp >> (16 - leftShift)); dst5W = tmp; setResultFlags(tmp); } # (1) ROT.W #imm4, Ax (left) :ROT.W srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=1; b2_0405=2 & b2_shiftSign=0 & srcSimm4Shift) & $(DST5AX) { leftShift:1 = srcSimm4Shift; tmp:2 = dst5Ax:2; c:2 = (tmp >> (16 - leftShift)); $(CARRY) = c:1 & 1; tmp = (tmp << leftShift) | (tmp >> (16 - leftShift)); dst5Ax = zext(tmp); setResultFlags(tmp); } # (2) ROT.B R1H, dst5 :ROT.B R1H, dst5B is (R1H & b1_0407=0xa & b1_size_0=0; b2_0005=0x3f) ... & $(DST5B) { if (R1H == 0) goto inst_next; shift:1 = R1H s% 8; tmp:1 = dst5B; if (shift s>= 0) goto ; shift = -shift; $(CARRY) = (tmp >> (shift - 1)) & 1; tmp = (tmp >> shift) | (tmp << (8 - shift)); goto ; $(CARRY) = (tmp >> (8 - shift)) & 1; tmp = (tmp << shift) | (tmp >> (8 - shift)); dst5B = tmp; setResultFlags(tmp); } # (2) ROT.B R1H, Ax :ROT.B R1H, dst5Ax is (R1H & b1_0407=0xa & b1_size_0=0; b2_0005=0x3f) & $(DST5AX) { if (R1H == 0) goto inst_next; shift:1 = R1H s% 8; tmp:1 = dst5Ax:1; if (shift s>= 0) goto ; shift = -shift; $(CARRY) = (tmp >> (shift - 1)) & 1; tmp = (tmp >> shift) | (tmp << (8 - shift)); goto ; $(CARRY) = (tmp >> (8 - shift)) & 1; tmp = (tmp << shift) | (tmp >> (8 - shift)); dst5Ax = zext(tmp); setResultFlags(tmp); } # (2) ROT.W R1H, dst5 :ROT.W R1H, dst5W is (R1H & b1_0407=0xa & b1_size_0=1; b2_0005=0x3f) ... & $(DST5W) { if (R1H == 0) goto inst_next; shift:1 = R1H s% 16; tmp:2 = dst5W; if (shift s>= 0) goto ; shift = -shift; c:2 = (tmp >> (shift - 1)); tmp = (tmp >> shift) | (tmp << (16 - shift)); goto ; c = (tmp >> (16 - shift)); tmp = (tmp << shift) | (tmp >> (16 - shift)); $(CARRY) = c:1 & 1; dst5W = tmp; setResultFlags(tmp); } # (2) ROT.W R1H, Ax :ROT.W R1H, dst5Ax is (R1H & b1_0407=0xa & b1_size_0=1; b2_0005=0x3f) & $(DST5AX) { if (R1H == 0) goto inst_next; shift:1 = R1H s% 16; tmp:2 = dst5Ax:2; if (shift s>= 0) goto ; shift = -shift; c:2 = (tmp >> (shift - 1)); tmp = (tmp >> shift) | (tmp << (16 - shift)); goto ; c = (tmp >> (16 - shift)); tmp = (tmp << shift) | (tmp >> (16 - shift)); $(CARRY) = c:1 & 1; dst5Ax = zext(tmp); setResultFlags(tmp); } ##### RTS ##### :RTS is b1_0007=0xdf { pc:3 = 0; pop3(pc); return [pc]; } ##### SBB ##### # (1) SBB.B #simm, dst :SBB.B srcSimm8, dst5B is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x2e) ... & $(DST5B)); srcSimm8 { tmp:1 = dst5B; c:1 = $(CARRY); setSubtract3Flags(tmp, srcSimm8, c); tmp = tmp - srcSimm8 - c; dst5B = tmp; setResultFlags(tmp); } # (1) SBB.B #simm, Ax :SBB.B srcSimm8, dst5Ax is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x2e) & $(DST5AX)); srcSimm8 { tmp:1 = dst5Ax:1; c:1 = $(CARRY); setSubtract3Flags(tmp, srcSimm8, c); tmp = tmp - srcSimm8 - c; dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) SBB.W #simm, dst :SBB.W srcSimm16, dst5W is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x2e) ... & $(DST5W)); srcSimm16 { tmp:2 = dst5W; c:2 = zext($(CARRY)); setSubtract3Flags(tmp, srcSimm16, c); tmp = tmp - srcSimm16 - c; dst5W = tmp; setResultFlags(tmp); } # (1) SBB.B #simm, Ax :SBB.W srcSimm16, dst5Ax is b0_0007=0x1; ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x2e) & $(DST5AX)); srcSimm16 { tmp:2 = dst5Ax:2; c:2 = zext($(CARRY)); setSubtract3Flags(tmp, srcSimm16, c); tmp = tmp - srcSimm16 - c; dst5Ax = zext(tmp); setResultFlags(tmp); } # (2) SBB.B src5, dst5 :SBB.B src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x6) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { tmp:1 = dst5B_afterSrc5; s:1 = src5B; c:1 = $(CARRY); setSubtract3Flags(tmp, s, c); tmp = tmp - s - c; dst5B_afterSrc5 = tmp; setResultFlags(tmp); } # (2) SBB.B src5, Ax :SBB.B src5B, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x6) ... & $(SRC5B) & $(DST5AX) ...) { tmp:1 = dst5Ax:1; s:1 = src5B; c:1 = $(CARRY); setSubtract3Flags(tmp, s, c); tmp = tmp - s - c; dst5Ax = zext(tmp); setResultFlags(tmp); } # (2) SBB.W src5, dst5 :SBB.W src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x6) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { tmp:2 = dst5W_afterSrc5; s:2 = src5W; c:2 = zext($(CARRY)); setSubtract3Flags(tmp, s, c); tmp = tmp - s - c; dst5W_afterSrc5 = tmp; setResultFlags(tmp); } # (2) SBB.W src5, Ax :SBB.W src5W, dst5Ax is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x6) ... & $(SRC5W) & $(DST5AX) ...) { tmp:2 = dst5Ax:2; s:2 = src5W; c:2 = zext($(CARRY)); setSubtract3Flags(tmp, s, c); tmp = tmp - s - c; dst5Ax = zext(tmp); setResultFlags(tmp); } ##### SBJNZ - PSUEDO-OP! SAME AS ADJNZ ##### ##### SCCnd ##### :SC^b2cnd dst5W is (b1_0407=0xd & b1_size_0=1; b2_0405=3 & b2cnd) ... & $(DST5W) { dst5W = zext(b2cnd); } :SC^b2cnd dst5Ax is (b1_0407=0xd & b1_size_0=1; b2_0405=3 & b2cnd) & $(DST5AX) { dst5Ax = zext(b2cnd); } ##### SCMPU ##### :SCMPU.B is b1_0007=0xb8; b2_0007=0xc3 { tmp0:1 = *:1 A0; tmp2:1 = *:1 A1; setSubtractFlags(tmp0, tmp2); tmp:1 = tmp0 - tmp2; setResultFlags(tmp); A0 = A0 + 1; A1 = A1 + 1; if ((tmp0 != 0) && (tmp0 == tmp2)) goto inst_start; } :SCMPU.W is b1_0007=0xb8; b2_0007=0xd3 { # TODO: The symantic description looks suspicious - manual may be incorrect ?? tmp0:1 = *:1 A0; tmp2:1 = *:1 A1; setSubtractFlags(tmp0, tmp2); setResultFlags(tmp0 - tmp2); A0 = A0 + 1; A1 = A1 + 1; tmp1:1 = *:1 A0; tmp3:1 = *:1 A1; A0 = A0 + 1; A1 = A1 + 1; if (tmp0 == 0 || tmp0 != tmp2) goto ; setSubtractFlags(tmp1, tmp3); setResultFlags(tmp1 - tmp3); if ((tmp0 != 0) && (tmp1 != 0) && (tmp0 == tmp2) && (tmp1 == tmp3)) goto inst_start; } ##### SHA ##### macro SHAsetShiftRightFlags(val,shift,result) { local c = (val >> (shift - 1)) & 1; $(CARRY) = c:1; local mask = ~(-(1 << shift)); allOnes:1 = (mask & val) == mask; allZeros:1 = (mask & val) == 0; $(OVERFLOW) = (result s< 0 && allOnes) || (result s>= 0 && allZeros); setResultFlags(result); } macro SHAsetShiftLeftFlags(val,shift,result,sze) { local c = (val >> (sze - shift)) & 1; $(CARRY) = c:1; local mask = -(1 << shift); allOnes:1 = (mask & val) == mask; allZeros:1 = (mask & val) == 0; $(OVERFLOW) = (result s< 0 && allOnes) || (result s>= 0 && allZeros); setResultFlags(result); } # (1) SHA.B #imm4, dst5 (right) :SHA.B srcSimm4Shift, dst5B is (b1_0407=0xf & b1_size_0=0; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) ... & $(DST5B) { val:1 = dst5B; shift:1 = -srcSimm4Shift; tmp:1 = val s>> shift; dst5B = tmp; SHAsetShiftRightFlags(val, shift, tmp); } # (1) SHA.B #imm4, Ax (right) :SHA.B srcSimm4Shift, dst5Ax is (b1_0407=0xf & b1_size_0=0; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) & $(DST5AX) { val:1 = dst5Ax:1; shift:1 = -srcSimm4Shift; tmp:1 = val s>> shift; dst5Ax = zext(tmp); SHAsetShiftRightFlags(val, shift, tmp); } # (1) SHA.W #imm4, dst5 (right) :SHA.W srcSimm4Shift, dst5W is (b1_0407=0xf & b1_size_0=1; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) ... & $(DST5W) { val:2 = dst5W; shift:1 = -srcSimm4Shift; tmp:2 = val s>> shift; dst5W = tmp; SHAsetShiftRightFlags(val, shift, tmp); } # (1) SHA.W #imm4, Ax (right) :SHA.W srcSimm4Shift, dst5Ax is (b1_0407=0xf & b1_size_0=1; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) & $(DST5AX) { val:2 = dst5Ax:2; shift:1 = -srcSimm4Shift; tmp:2 = val s>> shift; dst5Ax = zext(tmp); SHAsetShiftRightFlags(val, shift, tmp); } # (1) SHA.B #imm4, dst5 (left) :SHA.B srcSimm4Shift, dst5B is (b1_0407=0xf & b1_size_0=0; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) ... & $(DST5B) { val:1 = dst5B; shift:1 = srcSimm4Shift; tmp:1 = val << shift; dst5B = tmp; SHAsetShiftLeftFlags(val, shift, tmp, 8); } # (1) SHA.B #imm4, Ax (left) :SHA.B srcSimm4Shift, dst5Ax is (b1_0407=0xf & b1_size_0=0; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) & $(DST5AX) { val:1 = dst5Ax:1; shift:1 = srcSimm4Shift; tmp:1 = val << shift; dst5Ax = zext(tmp); SHAsetShiftLeftFlags(val, shift, tmp, 8); } # (1) SHA.W #imm4, dst5 (left) :SHA.W srcSimm4Shift, dst5W is (b1_0407=0xf & b1_size_0=1; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) ... & $(DST5W) { val:2 = dst5W; shift:1 = srcSimm4Shift; tmp:2 = val << shift; dst5W = tmp; SHAsetShiftLeftFlags(val, shift, tmp, 16); } # (1) SHA.W #imm4, Ax (left) :SHA.W srcSimm4Shift, dst5Ax is (b1_0407=0xf & b1_size_0=1; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) & $(DST5AX) { val:2 = dst5Ax:2; shift:1 = srcSimm4Shift; tmp:2 = val << shift; dst5Ax = zext(tmp); SHAsetShiftLeftFlags(val, shift, tmp, 16); } # (2) SHA.L #imm, dst5 :SHA.L srcSimm8, dst5L is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x21) ... & $(DST5L)); srcSimm8 { # Unable to pattern match on sign bit due to interior ellipses shift:1 = srcSimm8; val:4 = dst5L; if (shift s> 0) goto ; shift = -shift; tmp:4 = val s>> shift; dst5L = tmp; SHAsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst5L = tmp; SHAsetShiftLeftFlags(val, shift, tmp, 32); } # (2) SHA.L #imm, Ax :SHA.L srcSimm8, dst5Ax is ((b1_0407=0xa & b1_size_0=0; b2_0005=0x21) & $(DST5AX)); srcSimm8 { # Unable to pattern match on sign bit due to interior ellipses shift:1 = srcSimm8; val:4 = zext(dst5Ax); if (shift s> 0) goto ; shift = -shift; tmp:4 = val s>> shift; dst5Ax = tmp:3; goto inst_next; tmp = val << shift; dst5Ax = tmp:3; # No flags set } # (3) SHA.B R1H, dst5 :SHA.B R1H, dst5B is (R1H & b1_0407=0xb & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:1 = dst5B; if (shift s> 0) goto ; shift = -shift; tmp:1 = val s>> shift; dst5B = tmp; SHAsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst5B = tmp; SHAsetShiftLeftFlags(val, shift, tmp, 8); } # (3) SHA.B R1H, Ax :SHA.B R1H, dst5Ax is (R1H & b1_0407=0xb & b1_size_0=0; b2_0005=0x3e) & $(DST5AX) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:1 = dst5Ax:1; if (shift s> 0) goto ; shift = -shift; tmp:1 = val s>> shift; dst5Ax = zext(tmp); SHAsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst5Ax = zext(tmp); SHAsetShiftLeftFlags(val, shift, tmp, 8); } # (3) SHA.W R1H, dst5 :SHA.W R1H, dst5W is (R1H & b1_0407=0xb & b1_size_0=1; b2_0005=0x3e) ... & $(DST5W) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:2 = dst5W; if (shift s> 0) goto ; shift = -shift; tmp:2 = val s>> shift; dst5W = tmp; SHAsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst5W = tmp; SHAsetShiftLeftFlags(val, shift, tmp, 16); } # (3) SHA.W R1H, Ax :SHA.W R1H, dst5Ax is (R1H & b1_0407=0xb & b1_size_0=1; b2_0005=0x3e) & $(DST5AX) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:2 = dst5Ax:2; if (shift s> 0) goto ; shift = -shift; tmp:2 = val s>> shift; dst5Ax = zext(tmp); SHAsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst5Ax = zext(tmp); SHAsetShiftLeftFlags(val, shift, tmp, 16); } # (4) SHA.L R1H, dst5 :SHA.L R1H, dst5L is (R1H & b1_0407=0xc & b1_size_0=0; b2_0005=0x11) ... & $(DST5L) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:4 = dst5L; if (shift s> 0) goto ; shift = -shift; tmp:4 = val s>> shift; dst5L = tmp; SHAsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst5L = tmp; SHAsetShiftLeftFlags(val, shift, tmp, 32); } # (4) SHA.L R1H, Ax :SHA.L R1H, dst5Ax is (R1H & b1_0407=0xc & b1_size_0=0; b2_0005=0x11) & $(DST5AX) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:4 = zext(dst5Ax); if (shift s> 0) goto ; shift = -shift; tmp:4 = val s>> shift; dst5Ax = tmp:3; goto inst_next; tmp = val << shift; dst5Ax = tmp:3; # No flags set } ##### SHL ##### macro SHLsetShiftRightFlags(val,shift,result) { local c = (val >> (shift - 1)) & 1; $(CARRY) = c:1; setResultFlags(result); } macro SHLsetShiftLeftFlags(val,shift,result,sze) { local c = (val >> (sze - shift)) & 1; $(CARRY) = c:1; setResultFlags(result); } # (1) SHL.B #imm4, dst5 (right) :SHL.B srcSimm4Shift, dst5B is (b1_0407=0xe & b1_size_0=0; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) ... & $(DST5B) { val:1 = dst5B; shift:1 = -srcSimm4Shift; tmp:1 = val >> shift; dst5B = tmp; SHLsetShiftRightFlags(val, shift, tmp); } # (1) SHL.B #imm4, Ax (right) :SHL.B srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=0; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) & $(DST5AX) { val:1 = dst5Ax:1; shift:1 = -srcSimm4Shift; tmp:1 = val >> shift; dst5Ax = zext(tmp); SHLsetShiftRightFlags(val, shift, tmp); } # (1) SHL.W #imm4, dst5 (right) :SHL.W srcSimm4Shift, dst5W is (b1_0407=0xe & b1_size_0=1; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) ... & $(DST5W) { val:2 = dst5W; shift:1 = -srcSimm4Shift; tmp:2 = val >> shift; dst5W = tmp; SHLsetShiftRightFlags(val, shift, tmp); } # (1) SHL.W #imm4, Ax (right) :SHL.W srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=1; b2_0405=0 & b2_shiftSign=1 & srcSimm4Shift) & $(DST5AX) { val:2 = dst5Ax:2; shift:1 = -srcSimm4Shift; tmp:2 = val >> shift; dst5Ax = zext(tmp); SHLsetShiftRightFlags(val, shift, tmp); } # (1) SHL.B #imm4, dst5 (left) :SHL.B srcSimm4Shift, dst5B is (b1_0407=0xe & b1_size_0=0; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) ... & $(DST5B) { val:1 = dst5B; shift:1 = srcSimm4Shift; tmp:1 = val << shift; dst5B = tmp; SHLsetShiftLeftFlags(val, shift, tmp, 8); } # (1) SHL.B #imm4, Ax (left) :SHL.B srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=0; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) & $(DST5AX) { val:1 = dst5Ax:1; shift:1 = srcSimm4Shift; tmp:1 = val << shift; dst5Ax = zext(tmp); SHLsetShiftLeftFlags(val, shift, tmp, 8); } # (1) SHL.W #imm4, dst5 (left) :SHL.W srcSimm4Shift, dst5W is (b1_0407=0xe & b1_size_0=1; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) ... & $(DST5W) { val:2 = dst5W; shift:1 = srcSimm4Shift; tmp:2 = val << shift; dst5W = tmp; SHLsetShiftLeftFlags(val, shift, tmp, 16); } # (1) SHL.W #imm4, Ax (left) :SHL.W srcSimm4Shift, dst5Ax is (b1_0407=0xe & b1_size_0=1; b2_0405=0 & b2_shiftSign=0 & srcSimm4Shift) & $(DST5AX) { val:2 = dst5Ax:2; shift:1 = srcSimm4Shift; tmp:2 = val << shift; dst5Ax = zext(tmp); SHLsetShiftLeftFlags(val, shift, tmp, 16); } # (2) SHL.L #imm, dst5 :SHL.L srcSimm8, dst5L is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x21) ... & $(DST5L)); srcSimm8 { # Unable to pattern match on sign bit due to interior ellipses shift:1 = srcSimm8; val:4 = dst5L; if (shift s> 0) goto ; shift = -shift; tmp:4 = val >> shift; dst5L = tmp; SHLsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst5L = tmp; SHLsetShiftLeftFlags(val, shift, tmp, 32); } # (2) SHL.L #imm, Ax :SHL.L srcSimm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x21) & $(DST5AX)); srcSimm8 { # Unable to pattern match on sign bit due to interior ellipses shift:1 = srcSimm8; val:4 = zext(dst5Ax); if (shift s> 0) goto ; shift = -shift; tmp:4 = val >> shift; dst5Ax = tmp:3; goto inst_next; tmp = val << shift; dst5Ax = tmp:3; # No flags set } # (3) SHL.B R1H, dst5 :SHL.B R1H, dst5B is (R1H & b1_0407=0xa & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:1 = dst5B; if (shift s> 0) goto ; shift = -shift; tmp:1 = val >> shift; dst5B = tmp; SHLsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst5B = tmp; SHLsetShiftLeftFlags(val, shift, tmp, 8); } # (3) SHL.B R1H, Ax :SHL.B R1H, dst5Ax is (R1H & b1_0407=0xa & b1_size_0=0; b2_0005=0x3e) & $(DST5AX) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:1 = dst5Ax:1; if (shift s> 0) goto ; shift = -shift; tmp:1 = val >> shift; dst5Ax = zext(tmp); SHLsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst5Ax = zext(tmp); SHLsetShiftLeftFlags(val, shift, tmp, 8); } # (3) SHL.W R1H, dst5 :SHL.W R1H, dst5W is (R1H & b1_0407=0xa & b1_size_0=1; b2_0005=0x3e) ... & $(DST5W) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:2 = dst5W; if (shift s> 0) goto ; shift = -shift; tmp:2 = val >> shift; dst5W = tmp; SHLsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst5W = tmp; SHLsetShiftLeftFlags(val, shift, tmp, 16); } # (3) SHL.W R1H, Ax :SHL.W R1H, dst5Ax is (R1H & b1_0407=0xa & b1_size_0=1; b2_0005=0x3e) & $(DST5AX) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:2 = dst5Ax:2; if (shift s> 0) goto ; shift = -shift; tmp:2 = val >> shift; dst5Ax = zext(tmp); SHLsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst5Ax = zext(tmp); SHLsetShiftLeftFlags(val, shift, tmp, 16); } # (4) SHL.L R1H, dst5 :SHL.L R1H, dst5L is (R1H & b1_0407=0xc & b1_size_0=0; b2_0005=0x01) ... & $(DST5L) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:4 = dst5L; if (shift s> 0) goto ; shift = -shift; tmp:4 = val >> shift; dst5L = tmp; SHLsetShiftRightFlags(val, shift, tmp); goto inst_next; tmp = val << shift; dst5L = tmp; SHLsetShiftLeftFlags(val, shift, tmp, 32); } # (4) SHL.L R1H, Ax :SHL.L R1H, dst5Ax is (R1H & b1_0407=0xc & b1_size_0=0; b2_0005=0x01) & $(DST5AX) { if (R1H == 0) goto inst_next; shift:1 = R1H; val:4 = zext(dst5Ax); if (shift s> 0) goto ; shift = -shift; tmp:4 = val >> shift; dst5Ax = tmp:3; goto inst_next; tmp = val << shift; dst5Ax = tmp:3; # No flags set } ##### SIN ##### :SIN.B is b1_0007=0xb2; b2_0007=0x83 { if (R3 == 0) goto inst_next; *:1 A1 = *:1 A0; A1 = A1 + 1; R3 = R3 - 1; goto inst_start; } :SIN.W is b1_0007=0xb2; b2_0007=0x93 { if (R3 == 0) goto inst_next; *:2 A1 = *:2 A0; A1 = A1 + 2; R3 = R3 - 1; goto inst_start; } ##### SMOVB ##### :SMOVB.B is b1_0007=0xb6; b2_0007=0x83 { if (R3 == 0) goto inst_next; *:1 A1 = *:1 A0; A1 = A1 - 1; A0 = A0 - 1; R3 = R3 - 1; goto inst_start; } :SMOVB.W is b1_0007=0xb6; b2_0007=0x93 { if (R3 == 0) goto inst_next; *:2 A1 = *:2 A0; A1 = A1 - 2; A0 = A0 - 2; R3 = R3 - 1; goto inst_start; } ##### SMOVF ##### :SMOVF.B is b1_0007=0xb0; b2_0007=0x83 { if (R3 == 0) goto inst_next; *:1 A1 = *:1 A0; A1 = A1 + 1; A0 = A0 + 1; R3 = R3 - 1; goto inst_start; } :SMOVF.W is b1_0007=0xb0; b2_0007=0x93 { if (R3 == 0) goto inst_next; *:2 A1 = *:2 A0; A1 = A1 + 2; A0 = A0 + 2; R3 = R3 - 1; goto inst_start; } ##### SMOVU ##### :SMOVU.B is b1_0007=0xb8; b2_0007=0x83 { local tmp:1 = *:1 A0; *:1 A1 = tmp; A0 = A0 + 1; A1 = A1 + 1; if (tmp != 0) goto inst_start; } :SMOVU.W is b1_0007=0xb8; b2_0007=0x93 { local tmp:2 = *:2 A0; *:2 A1 = tmp; A0 = A0 + 2; A1 = A1 + 2; local tmp0:2 = tmp & 0xff; local tmp1:2 = tmp & 0xff00; if ((tmp0 != 0) && (tmp1 != 0)) goto inst_start; } ##### SOUT ##### :SOUT.B is b1_0007=0xb4; b2_0007=0x83 { if (R3 == 0) goto inst_next; *:1 A1 = *:1 A0; A0 = A0 + 1; R3 = R3 - 1; goto inst_start; } :SOUT.W is b1_0007=0xb4; b2_0007=0x93 { if (R3 == 0) goto inst_next; *:2 A1 = *:2 A0; A0 = A0 + 2; R3 = R3 - 1; goto inst_start; } ##### SSTR ##### :SSTR.B is b1_0007=0xb8; b2_0007=0x03 { if (R3 == 0) goto inst_next; *:1 A1 = R0L; A1 = A1 + 1; R3 = R3 - 1; goto inst_start; } :SSTR.W is b1_0007=0xb8; b2_0007=0x13 { if (R3 == 0) goto inst_next; *:2 A1 = R0; A1 = A1 + 2; R3 = R3 - 1; goto inst_start; } ##### STC ##### # (1) STC dreg24, dst5 :STC b2_dreg24, dst5L is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=1; b2_0305=0x2 & b2_dreg24) ... & $(DST5L)) { dst5L = zext(b2_dreg24); } # (1) STC dreg24, Ax :STC b2_dreg24, dst5Ax is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=1; b2_0305=0x2 & b2_dreg24) & $(DST5AX)) { dst5Ax = b2_dreg24; } # (2) STC reg16, dst5 :STC b2_creg16, dst5W is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=1; b2_0305=0x3 & b2_creg16) ... & $(DST5W)) { dst5W = b2_creg16; } # (2) STC reg16, Ax :STC b2_creg16, dst5Ax is b0_0007=0x1; ((b1_0407=0xd & b1_size_0=1; b2_0305=0x3 & b2_creg16) & $(DST5AX)) { dst5Ax = zext(b2_creg16); } # (3) STC reg24, dst5L :STC b2_creg24, dst5L is (b1_0407=0xd & b1_size_0=1; b2_0305=0x2 & b2_creg24) ... & $(DST5L) { dst5L = zext(b2_creg24); } # (3) STC reg24, Ax :STC b2_creg24, dst5Ax is (b1_0407=0xd & b1_size_0=1; b2_0305=0x2 & b2_creg24) & $(DST5AX) { dst5Ax = b2_creg24; } ##### STCTX ##### :STCTX abs16offset, abs24offset is b1_0007=0xb6; b2_0007=0xd3; abs16offset; imm24_dat & abs24offset { taskNum:1 = abs16offset; # load task number stored at abs16 ptr:3 = imm24_dat + (zext(taskNum) * 2); # compute table entry address relative to abs24 regInfo:1 = *:1 ptr; ptr = ptr + 1; spCorrect:1 = *:1 ptr; ptr = SP; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 4; *:4 ptr = FB; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 4; *:4 ptr = SB; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 4; *:4 ptr = A1; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 4; *:4 ptr = A0; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 2; *:2 ptr = R3; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 2; *:2 ptr = R2; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 2; *:2 ptr = R1; regInfo = regInfo << 1; if ((regInfo & 0x80) == 0) goto ; ptr = ptr - 2; *:2 ptr = R0; SP = SP - zext(spCorrect); } ##### STNZ ##### # (1) STNZ.B #imm, dst5 :STNZ.B srcImm8, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x1f) ... & $(DST5B)); srcImm8 { if ($(ZERO) != 0) goto inst_next; dst5B = srcImm8; } # (1) STNZ.B #imm, Ax :STNZ.B srcImm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x1f) & $(DST5AX)); srcImm8 { if ($(ZERO) != 0) goto inst_next; dst5Ax = zext(srcImm8); } # (1) STNZ.W #imm, dst5 :STNZ.W srcImm16, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x1f) ... & $(DST5W)); srcImm16 { if ($(ZERO) != 0) goto inst_next; dst5W = srcImm16; } # (1) STNZ.W #imm, Ax :STNZ.W srcImm16, dst5Ax is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x1f) & $(DST5AX)); srcImm16 { if ($(ZERO) != 0) goto inst_next; dst5Ax = zext(srcImm16); } ##### STZ ##### # (1) STZ.B #imm, dst5 :STZ.B srcImm8, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x0f) ... & $(DST5B)); srcImm8 { if ($(ZERO) == 0) goto inst_next; dst5B = srcImm8; } # (1) STZ.B #imm, Ax :STZ.B srcImm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x0f) & $(DST5AX)); srcImm8 { if ($(ZERO) == 0) goto inst_next; dst5Ax = zext(srcImm8); } # (1) STZ.W #imm, dst5 :STZ.W srcImm16, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x0f) ... & $(DST5W)); srcImm16 { if ($(ZERO) == 0) goto inst_next; dst5W = srcImm16; } # (1) STZ.W #imm, Ax :STZ.W srcImm16, dst5Ax is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x0f) & $(DST5AX)); srcImm16 { if ($(ZERO) == 0) goto inst_next; dst5Ax = zext(srcImm16); } ##### STZX ##### # STZX.B #imm, #imm, dst5 :STZX.B srcImm8, srcImm8a, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x3f) ... & $(DST5B)); srcImm8; srcImm8a { z:1 = $(ZERO); dst5B = (z * srcImm8) + (!z * srcImm8a); } # STZX.B #imm, #imm, Ax :STZX.B srcImm8, srcImm8a, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x3f) & $(DST5AX)); srcImm8; srcImm8a { z:1 = $(ZERO); dst5Ax = zext((z * srcImm8) + (!z * srcImm8a)); } # STZX.W #imm, #imm, dst5 :STZX.W srcImm16, srcImm16a, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x3f) ... & $(DST5W)); srcImm16; srcImm16a { z:1 = $(ZERO); dst5W = (zext(z) * srcImm16) + (zext(!z) * srcImm16a); } # STZX.W #imm, #imm, Ax :STZX.W srcImm16, srcImm16a, dst5Ax is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x3f) & $(DST5AX)); srcImm16; srcImm16a { z:1 = $(ZERO); dst5Ax = zext((zext(z) * srcImm16) + (zext(!z) * srcImm16a)); } ##### SUB ##### # (1) SUB.B:G #simm, dst :SUB^".B:G" srcSimm8, dst5B is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B)); srcSimm8 { tmp:1 = dst5B; setSubtractFlags(tmp, srcSimm8); tmp = tmp - srcSimm8; dst5B = tmp; setResultFlags(tmp); } # (1) SUB.B:G #simm, Ax :SUB^".B:G" srcSimm8, dst5Ax is ((b1_0407=0x8 & b1_size_0=0; b2_0005=0x3e) & $(DST5AX)); srcSimm8 { tmp:1 = dst5Ax:1; setSubtractFlags(tmp, srcSimm8); tmp = tmp - srcSimm8; dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) SUB.W:G #simm, dst :SUB^".W:G" srcSimm16, dst5W is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3e) ... & $(DST5W)); srcSimm16 { tmp:2 = dst5W; setSubtractFlags(tmp, srcSimm16); tmp = tmp - srcSimm16; dst5W = tmp; setResultFlags(tmp); } # (1) SUB.W:G #simm, Ax :SUB^".W:G" srcSimm16, dst5Ax is ((b1_0407=0x8 & b1_size_0=1; b2_0005=0x3e) & $(DST5AX)); srcSimm16 { tmp:2 = dst5Ax:2; setSubtractFlags(tmp, srcSimm16); tmp = tmp - srcSimm16; dst5Ax = zext(tmp); setResultFlags(tmp); } # (2) SUB.L:G #simm, dst :SUB^".L:G" srcSimm32, dst5L is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x31) ... & $(DST5L)); srcSimm32 { tmp:4 = dst5L; setSubtractFlags(tmp, srcSimm32); tmp = tmp - srcSimm32; dst5L = tmp; setResultFlags(tmp); } # (2) SUB.L:G #simm, Ax :SUB^".L:G" srcSimm32, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x31) & $(DST5AX)); srcSimm32 { tmp:4 = zext(dst5Ax); setSubtractFlags(tmp, srcSimm32); tmp = tmp - srcSimm32; dst5Ax = tmp:3; setResultFlags(tmp); } # (3) SUB.B:S #simm, dst :SUB^".B:S" srcSimm8, dst2B is ((b1_0607=0 & b1_0103=7 & b1_size_0=0) ... & dst2B); srcSimm8 { tmp:1 = dst2B; setSubtractFlags(tmp, srcSimm8); tmp = tmp - srcSimm8; dst2B = tmp; setResultFlags(tmp); } # (3) SUB.W:S #simm, dst :SUB^".W:S" srcSimm16, dst2W is ((b1_0607=0 & b1_0103=7 & b1_size_0=1) ... & dst2W); srcSimm16 { tmp:2 = dst2W; setSubtractFlags(tmp, srcSimm16); tmp = tmp - srcSimm16; dst2W = tmp; setResultFlags(tmp); } # (4) SUB.B:G src, dst :SUB^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0xa) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... { tmp:1 = dst5B_afterSrc5; src:1 = src5B; setSubtractFlags(tmp, src); tmp = tmp - src; dst5B_afterSrc5 = tmp; setResultFlags(tmp); } # (4) SUB.B:G src, Ax - Ax destination case :SUB^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0xa) ... & $(SRC5B) & $(DST5AX) ... { tmp:1 = dst5Ax:1; src:1 = src5B; setSubtractFlags(tmp, src); tmp = tmp - src; dst5Ax = zext(tmp); setResultFlags(tmp); } # (4) SUB.W:G src, dst :SUB^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0xa) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... { tmp:2 = dst5W_afterSrc5; src:2 = src5W; setSubtractFlags(tmp, src); tmp = tmp - src; dst5W_afterSrc5 = tmp; setResultFlags(tmp); } # (4) SUB.W:G src, Ax - Ax destination case :SUB^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0xa) ... & $(SRC5W) & $(DST5AX) ... { tmp:2 = dst5Ax:2; src:2 = src5W; setSubtractFlags(tmp, src); tmp = tmp - src; dst5Ax = zext(tmp); setResultFlags(tmp); } # (5) SUB.L:G src, dst :SUB^".L:G" src5L, dst5L_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x0) ... & $(SRC5L) ... & $(DST5L_AFTER_SRC5) ... { tmp:4 = dst5L_afterSrc5; src:4 = src5L; setSubtractFlags(tmp, src); tmp = tmp - src; dst5L_afterSrc5 = tmp; setResultFlags(tmp); } # (5) SUB.L:G src, Ax - Ax destination case :SUB^".L:G" src5L, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x0) ... & $(SRC5L) & $(DST5AX) ... { tmp:4 = zext(dst5Ax); src:4 = src5L; setSubtractFlags(tmp, src); tmp = tmp - src; dst5Ax = tmp:3; setResultFlags(tmp); } ##### SUBX ##### # (1) SUBX #simm, dst5 :SUBX srcSimm8, dst5L is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x11) ... & $(DST5L)); srcSimm8 { tmp:4 = dst5L; src:4 = sext(srcSimm8); setSubtractFlags(tmp, src); tmp = tmp - src; dst5L = tmp; setResultFlags(tmp); } # (1) SUBX #simm, Ax :SUBX srcSimm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x11) & $(DST5AX)); srcSimm8 { tmp:4 = zext(dst5Ax); src:4 = sext(srcSimm8); setSubtractFlags(tmp, src); tmp = tmp - src; dst5Ax = tmp:3; setResultFlags(tmp); } # (2) SUBX src5, dst5 :SUBX src5B, dst5L_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x0) ... & $(SRC5B) ... & $(DST5L_AFTER_SRC5) ... { tmp:4 = dst5L_afterSrc5; src:4 = sext(src5B); setSubtractFlags(tmp, src); tmp = tmp - src; dst5L_afterSrc5 = tmp; setResultFlags(tmp); } # (2) SUBX src5, Ax :SUBX src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x0) ... & $(SRC5B) & $(DST5AX) ... { tmp:4 = zext(dst5Ax); src:4 = sext(src5B); setSubtractFlags(tmp, src); tmp = tmp - src; dst5Ax = tmp:3; setResultFlags(tmp); } ##### TST ##### # (1) TST.B:G #imm, dst :TST^".B:G" srcImm8, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x3e) ... & $(DST5B)); srcImm8 { tmp:1 = dst5B & srcImm8; setResultFlags(tmp); } # (1) TST.W:G #imm, dst :TST^".W:G" srcImm16, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x3e) ... & $(DST5W)); srcImm16 { tmp:2 = dst5W & srcImm16; setResultFlags(tmp); } # (2) TST.B:S #imm, dst :TST^".B:S" srcImm8, dst2B is ((b1_0607=0 & b1_0103=6 & b1_size_0=0) ... & dst2B); srcImm8 { tmp:1 = dst2B & srcImm8; setResultFlags(tmp); } # (2) TST.W:S #imm, dst :TST^".W:S" srcImm16, dst2W is ((b1_0607=0 & b1_0103=6 & b1_size_0=1) ... & dst2W); srcImm16 { tmp:2 = dst2W & srcImm16; setResultFlags(tmp); } # (3) TST.B:G src5, dst5 :TST^".B:G" src5B, dst5B_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=0; b2_0003=0x9) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ...) { tmp:1 = dst5B_afterSrc5 & src5B; setResultFlags(tmp); } # (3) TST.W:G src5, dst5 :TST^".W:G" src5W, dst5W_afterSrc5 is b0_0007=0x1; ((b1_0707=1 & b1_size_0=1; b2_0003=0x9) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ...) { tmp:2 = dst5W_afterSrc5 & src5W; setResultFlags(tmp); } ##### UND ##### # Don't implement this "Undefined" instruction # :UND is b1_0007=0xff ##### WAIT ##### :WAIT is b1_0007=0xb2; b2_0007=0x03 { Wait(); } ##### XCHG ##### # XCHG.B reg8, dst5 :XCHG.B b2_reg8, dst5B is (b1_0407=0xd & b1_size_0=0; b2_0305=1 & b2_0101=0 & b2_reg8) ... & $(DST5B) { tmp:1 = dst5B; dst5B = b2_reg8; b2_reg8 = tmp; } # XCHG.B Ax, dst5 :XCHG.B b2_regAx, dst5B is (b1_0407=0xd & b1_size_0=0; b2_0305=1 & b2_0102=1 & b2_regAx) ... & $(DST5B) { tmp:1 = dst5B; dst5B = b2_regAx:1; b2_regAx = zext(tmp); } # XCHG.B reg8, Ax :XCHG.B b2_reg8, dst5Ax is (b1_0407=0xd & b1_size_0=0; b2_0305=1 & b2_0101=0 & b2_reg8) & $(DST5AX) { tmp:1 = dst5Ax:1; dst5Ax = zext(b2_reg8); b2_reg8 = tmp; } # XCHG.B Ax, Ax :XCHG.B b2_regAx, dst5Ax is (b1_0407=0xd & b1_size_0=0; b2_0305=1 & b2_0102=1 & b2_regAx) & $(DST5AX) { tmp:1 = dst5Ax:1; dst5Ax = zext(b2_regAx:1); b2_regAx = zext(tmp); } # XCHG.W reg16, dst5 :XCHG.W b2_reg16, dst5W is (b1_0407=0xd & b1_size_0=1; b2_0305=1 & b2_0101=0 & b2_reg16) ... & $(DST5W) { tmp:2 = dst5W; dst5W = b2_reg16; b2_reg16 = tmp; } # XCHG.W Ax, dst5 :XCHG.W b2_regAx, dst5W is (b1_0407=0xd & b1_size_0=1; b2_0305=1 & b2_0102=1 & b2_regAx) ... & $(DST5W) { tmp:2 = dst5W; dst5W = b2_regAx:2; b2_regAx = zext(tmp); } # XCHG.W reg16, Ax :XCHG.W b2_reg16, dst5Ax is (b1_0407=0xd & b1_size_0=1; b2_0305=1 & b2_0101=0 & b2_reg16) & $(DST5AX) { tmp:2 = dst5Ax:2; dst5Ax = zext(b2_reg16); b2_reg16 = tmp; } # XCHG.W Ax, Ax :XCHG.W b2_regAx, dst5Ax is (b1_0407=0xd & b1_size_0=1; b2_0305=1 & b2_0102=1 & b2_regAx) & $(DST5AX) { tmp:3 = dst5Ax; dst5Ax = zext(b2_regAx:2); # dest Ax recieves low 16-bits of src Ax zero extended b2_regAx = tmp; # src Ax recieves all 24-bits of dest Ax } ##### XOR ##### # (1) XOR.B #imm, dst :XOR^".B:G" srcImm8, dst5B is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x0e) ... & $(DST5B)); srcImm8 { tmp:1 = dst5B ^ srcImm8; dst5B = tmp; setResultFlags(tmp); } # (1) XOR.B #imm, Ax :XOR^".B:G" srcImm8, dst5Ax is ((b1_0407=0x9 & b1_size_0=0; b2_0005=0x0e) & $(DST5AX)); srcImm8 { tmp:1 = dst5Ax:1 ^ srcImm8; dst5Ax = zext(tmp); setResultFlags(tmp); } # (1) XOR.W #imm, dst :XOR^".W:G" srcImm16, dst5W is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x0e) ... & $(DST5W)); srcImm16 { tmp:2 = dst5W ^ srcImm16; dst5W = tmp; setResultFlags(tmp); } # (1) XOR.W #imm, Ax :XOR^".W:G" srcImm16, dst5Ax is ((b1_0407=0x9 & b1_size_0=1; b2_0005=0x0e) & $(DST5AX)); srcImm16 { tmp:2 = dst5Ax:2 ^ srcImm16; dst5Ax = zext(tmp); setResultFlags(tmp); } # (2) XOR.B src5, dst5 :XOR^".B:G" src5B, dst5B_afterSrc5 is (b1_0707=1 & b1_size_0=0; b2_0003=0x9) ... & $(SRC5B) ... & $(DST5B_AFTER_SRC5) ... { tmp:1 = dst5B_afterSrc5 ^ src5B; dst5B_afterSrc5 = tmp; setResultFlags(tmp); } # (2) XOR.B src5, Ax :XOR^".B:G" src5B, dst5Ax is (b1_0707=1 & b1_size_0=0; b2_0003=0x9) ... & $(SRC5B) & $(DST5AX) ... { tmp:1 = dst5Ax:1 ^ src5B; dst5Ax = zext(tmp); setResultFlags(tmp); } # (2) XOR.W src5, dst5 :XOR^".W:G" src5W, dst5W_afterSrc5 is (b1_0707=1 & b1_size_0=1; b2_0003=0x9) ... & $(SRC5W) ... & $(DST5W_AFTER_SRC5) ... { tmp:2 = dst5W_afterSrc5 ^ src5W; dst5W_afterSrc5 = tmp; setResultFlags(tmp); } # (2) XOR.W src5, Ax :XOR^".W:G" src5W, dst5Ax is (b1_0707=1 & b1_size_0=1; b2_0003=0x9) ... & $(SRC5W) & $(DST5AX) ... { tmp:2 = dst5Ax:2 ^ src5W; dst5Ax = zext(tmp); setResultFlags(tmp); } } # end phase=1 ================================================ FILE: pypcode/processors/M16C/data/manuals/M16C_60.idx ================================================ @m16csm.pdf ABS, 55 ADC, 56 ADCF, 57 ADD, 58 ADJNZ, 60 AND, 61 BAND, 63 BCLR, 64 BM, 65 BNAND, 66 BNOR, 67 BNOT, 68 BNTST, 69 BNXOR, 70 BOR, 71 BRK, 72 BSET, 73 BTST, 74 BTSTC, 75 BTSTS, 76 BXOR, 77 CMP, 78 DADC, 80 DADD, 81 DEC, 82 DIV, 83 DIVU, 84 DIVX, 85 DSBB, 86 DSUB, 87 ENTER, 88 EXITD, 89 EXTS, 90 FCLR, 91 FSET, 92 INC, 93 INT, 94 INTO, 95 J, 96 JMP, 97 JMPI, 98 JMPS, 99 JSR, 100 JSRI, 101 JSRS, 102 LDC, 103 LDCTX, 104 LDE, 105 LDINTB, 106 LDIPL, 107 MOV, 108 MOVA, 110 MOVHH, 111 MOVHL, 111 MOVLH, 111 MOVLL, 111 MUL, 112 MULU, 113 NEG, 114 NOP, 115 NOT, 116 OR, 117 POP, 119 POPC, 120 POPM, 121 PUSH, 122 PUSHA, 123 PUSHC, 124 PUSHM, 125 REIT, 126 RMPA, 127 ROLC, 128 RORC, 129 ROT, 130 RTS, 131 SBB, 132 SBJNZ, 133 SHA, 134 SHL, 135 SMOVB, 136 SMOVF, 137 SSTR, 138 STC, 139 STCTX, 140 STE, 141 STNZ, 142 STZ, 143 STZX, 144 SUB, 145 TST, 147 UND, 148 WAIT, 149 XCHG, 150 XOR, 151 ================================================ FILE: pypcode/processors/M16C/data/manuals/M16C_80.idx ================================================ @m16c80.pdf ABS, 60 ADC, 61 ADCF, 62 ADD, 63 ADDX, 65 ADJNZ, 66 AND, 67 BAND, 69 BCLR, 70 BITINDEX, 71 BM, 72 BNAND, 73 BNOR, 74 BNOT, 75 BNTST, 76 BNXOR, 77 BOR, 78 BRK, 79 BRK2, 80 BSET, 81 BTST, 82 BTSTC, 83 BTSTS, 84 BXOR, 85 CLIP, 86 CMP, 87 CMPX, 89 DADC, 90 DADD, 91 DEC, 92 DIV, 93 DIVU, 94 DIVX, 95 DSBB, 96 DSUB, 97 ENTER, 98 EXITD, 99 EXTS, 100 EXTZ, 101 FLCR, 102 FREIT, 103 FSET, 104 INC, 105 INDEX, 175 INT, 107 INTO, 108 J, 109 JMP, 110 JMPI, 111 JMPS, 112 JSR, 113 JSRI, 114 JSRS, 115 LDC, 116 LDCTX, 117 LDIPL, 118 MAX, 119 MIN, 120 MOV, 121 MOVA, 123 MOVHH, 124 MOVHL, 124 MOVLH, 124 MOVLL, 124 MOVX, 125 MUL, 126 MULEX, 127 MULU, 128 NEG, 129 NOP, 130 NOT, 131 OR, 132 POP, 134 POPC, 135 POPM, 136 PUSH, 137 PUSHA, 138 PUSHC, 139 PUSHM, 140 REIT, 141 RMPA, 142 ROLC, 143 RORC, 144 ROT, 145 RTS, 146 SBB, 147 SBJNZ, 148 SC, 149 SCMPU, 150 SHA, 151 SHL, 153 SIN, 155 SMOVB, 156 SMOVF, 157 SMOVU, 158 SOUT, 159 SSTR, 160 STC, 161 STCTX, 162 STNZ, 163 STZ, 164 STZX, 165 SUB, 166 SUBX, 168 TST, 169 UND, 171 WAIT, 172 XCHG, 173 XOR, 174 ================================================ FILE: pypcode/processors/M8C/data/languages/m8c.cspec ================================================ ================================================ FILE: pypcode/processors/M8C/data/languages/m8c.ldefs ================================================ Cypress M8C Microcontroller Family ================================================ FILE: pypcode/processors/M8C/data/languages/m8c.opinion ================================================ ================================================ FILE: pypcode/processors/M8C/data/languages/m8c.pspec ================================================ ================================================ FILE: pypcode/processors/M8C/data/languages/m8c.slaspec ================================================ # sleigh specification file for Cypress M8C define endian=big; define alignment=1; define space CODE type=ram_space size=2 default; define space RAM type=ram_space size=1; define space BANK0 type=ram_space size=1; define space BANK1 type=ram_space size=1; define space register type=register_space size=1; ################################################################ # Registers ################################################################ define register offset=0x00 size=1 [ A X SP F]; define register offset=0x10 size=2 [ PC ]; define register offset=0x30 size=4 [ contextreg ]; # individual bits within Flags Register F @define XIO "F[4,1]" # Extend I/O bank select @define S "F[3,1]" # Supervisor code @define C "F[2,1]" # Carry @define Z "F[1,1]" # Zero @define IE "F[0,1]" # Global Interrupt Enable define context contextreg regbank=(0,0) ; ################################################################ # Tokens ################################################################ define token opcode (8) op8 = (0,7) op71 = (1,7) op73 = (3,7) op74 = (4,7) op20 = (0,2) op10 = (0,1) op0 = (0,0) ; define token data8 (8) addr8 = (0,7) imm8 = (0,7) simm8 = (0,7) signed rel = (0,7) signed rsb = (4,4) sign = (7,7) ; define token data16 (16) imm16 = (0,15) simm16 = (0,15) signed addr16 = (0,15) ; define token relinstr (16) op4 = (12,15) srel12 = (0,11) signed ; ################################################################ # Pseudo Instructions ################################################################ define pcodeop halt; define pcodeop nop; define pcodeop syscall; macro push8(val) { *[RAM]:1 SP = val; SP = SP + 1; } macro push16(val) { *[RAM]:2 SP = val; SP = SP + 2; } macro pop8(val) { SP = SP - 1; val = *[RAM]:1 SP; } macro pop16(val) { SP = SP - 2; val = *[RAM]:2 SP; } macro compflags(op1, op2) { tmp1:1 = op1; tmp2:1 = op2; $(C) = (tmp1 < tmp2); $(Z) = (tmp1 == tmp2); } macro testflags(op1, op2) { $(Z) = ((op1 & op2) == 0); } macro addflags(op1, op2) { t1:2 = zext(op1); t2:2 = zext(op2); tmp:2 = t1 + t2; $(C) = tmp > 255; $(Z) = (((op1 + op2) & 0xFF) == 0); } macro resultflags(result) { $(C) = result s< 0; $(Z) = result == 0; } macro zeroflag(result) { $(Z) = result == 0; } ################################################################ # Addressing tables ################################################################ regAorX: A is op0=0 & A { export A; } regAorX: X is op0=1 & X { export X; } Addr8: addr8 is addr8 { export *[RAM]:1 addr8; } SAddr8: addr8 is addr8 { export *[RAM]:1 addr8; } XAddr8: [X+simm8] is X & simm8 & sign=0 { ptr:1 = X + simm8; export *[RAM]:1 ptr; } XAddr8: [X+simm8] is X & simm8 & sign=1 { ptr:1 = X - ~simm8; export *[RAM]:1 ptr; } Addr8Incr: [Addr8]"++" is Addr8 { export Addr8; } RAddr8: addr8 is addr8 & regbank=0 { export *[BANK0]:1 addr8; } RAddr8: addr8 is addr8 & regbank=1 { export *[BANK1]:1 addr8; } RXAddr8: [X+simm8] is X & simm8 & regbank=0 { ptr:1 = X + simm8; export *[BANK0]:1 ptr; } RXAddr8: [X+simm8] is X & simm8 & regbank=1 { ptr:1 = X + simm8; export *[BANK1]:1 ptr; } Imm8: "#"imm8 is imm8 { export *[const]:1 imm8; } Addr16: addr16 is addr16 { export *:2 addr16; } RelAddr: reladdr is srel12 [ reladdr = inst_start + 1 + srel12; ] { export *:2 reladdr; } CallAddr: calladdr is srel12 [ calladdr = inst_start + 2 + srel12; ] { export *:2 calladdr; } IndexAddr: indexaddr is srel12 [ indexaddr = inst_start + 2 + srel12;] { export *[CODE]:2 indexaddr; } ################################################################ # Constructors ################################################################ :ADC A, Imm8 is op73=0x01 & op20=0x01 & A; Imm8 { A = A + Imm8 + $(C); resultflags(A); } :ADC A, Addr8 is op73=0x01 & op20=0x02 & A; Addr8 { A = A + Addr8 + $(C); resultflags(A); } :ADC A, XAddr8 is op73=0x01 & op20=0x03 & A; XAddr8 { A = A + XAddr8 + $(C); resultflags(A); } :ADC Addr8, A is op73=0x01 & op20=0x04 & A; Addr8 { tmp:1 = Addr8 + A + $(C); Addr8 = tmp; resultflags(tmp); } :ADC XAddr8, A is op73=0x01 & op20=0x05 & A; XAddr8 { tmp:1 = XAddr8 + A + $(C); XAddr8 = tmp; resultflags(tmp); } :ADC Addr8, Imm8 is op73=0x01 & op20=0x06; Addr8; Imm8 { tmp:1 = Addr8 + Imm8 + $(C); Addr8 = tmp; resultflags(tmp); } :ADC XAddr8, Imm8 is op73=0x01 & op20=0x07; XAddr8; Imm8 { tmp:1 = XAddr8 + Imm8 + $(C); XAddr8 = tmp; resultflags(tmp); } :ADD A, Imm8 is op73=0x00 & op20=0x01 & A; Imm8 { addflags(A, Imm8); A = A + Imm8; } :ADD A, Addr8 is op73=0x00 & op20=0x02 & A; Addr8 { addflags(A, Addr8); A = A + Addr8; } :ADD A, XAddr8 is op73=0x00 & op20=0x03 & A; XAddr8 { addflags(A, XAddr8); A = A + XAddr8; } :ADD Addr8, A is op73=0x00 & op20=0x04 & A; Addr8 { addflags(Addr8, A); Addr8 = Addr8 + A; } :ADD XAddr8, A is op73=0x00 & op20=0x05 & A; XAddr8 { addflags(XAddr8, A); XAddr8 = XAddr8 + A; } :ADD Addr8, Imm8 is op73=0x00 & op20=0x06; Addr8; Imm8 { addflags(Addr8, Imm8); Addr8 = Addr8 + Imm8; } :ADD XAddr8, Imm8 is op73=0x00 & op20=0x07; XAddr8; Imm8 { addflags(XAddr8, Imm8); XAddr8 = XAddr8 + Imm8; } :ADD SP, simm8 is op8=0x38 & SP; simm8 { SP = SP + simm8; } :AND A, Imm8 is op73=0x04 & op20=0x01 & A; Imm8 { A = A & Imm8; zeroflag(A); } :AND A, Addr8 is op73=0x04 & op20=0x02 & A; Addr8 { A = A & Addr8; zeroflag(A); } :AND A, XAddr8 is op73=0x04 & op20=0x03 & A; XAddr8 { A = A & XAddr8; zeroflag(A); } :AND Addr8, A is op73=0x04 & op20=0x04 & A; Addr8 { tmp:1 = Addr8 & A; Addr8 = tmp; zeroflag(tmp); } :AND XAddr8, A is op73=0x04 & op20=0x05 & A; XAddr8 { tmp:1 = XAddr8 & A; XAddr8 = tmp; zeroflag(tmp); } :AND Addr8, Imm8 is op73=0x04 & op20=0x06; Addr8; Imm8 { tmp:1 = Addr8 & Imm8; Addr8 = tmp; zeroflag(tmp); } :AND XAddr8, Imm8 is op73=0x04 & op20=0x07; XAddr8; Imm8 { tmp:1 = XAddr8 & Imm8; XAddr8 = tmp; zeroflag(tmp); } :AND RAddr8, Imm8 is op8=0x41; RAddr8; Imm8 { tmp:1 = RAddr8 & Imm8; RAddr8 = tmp; zeroflag(tmp); } :AND RXAddr8, Imm8 is op8=0x42; RXAddr8; Imm8 { tmp:1 = RXAddr8 & Imm8; RXAddr8 = tmp; zeroflag(tmp); } :AND F, imm8 is op8=0x70 & F; imm8 & rsb [ regbank = regbank & rsb; globalset(inst_next, regbank); ] { F = F & imm8; } :ASL A is op8=0x64 & A { A = A << 1:1; } :ASL Addr8 is op8=0x65; Addr8 { Addr8 = Addr8 << 1:1; } :ASL XAddr8 is op8=0x66; XAddr8 { XAddr8 = XAddr8 << 1:1; } :ASR A is op8=0x67 & A { A = A >> 1:1; } :ASR Addr8 is op8=0x68; Addr8 { Addr8 = Addr8 >> 1:1; } :ASR XAddr8 is op8=0x69; XAddr8 { XAddr8 = XAddr8 >> 1:1; } :CALL CallAddr is op4=0x9 & CallAddr { ret:2 = inst_next; push16(ret); call CallAddr; } :CMP A, Imm8 is op8=0x39 & A; Imm8 { compflags(A, Imm8); } :CMP A, Addr8 is op8=0x3A & A; Addr8 { compflags(A, Addr8); } :CMP A, XAddr8 is op8=0x3B & A; XAddr8 { compflags(A, XAddr8); } :CMP Addr8, Imm8 is op8=0x3C; Addr8; Imm8 { compflags(Addr8, Imm8); } :CMP XAddr8, Imm8 is op8=0x3D; XAddr8; Imm8 { temp:1 = XAddr8; compflags(temp, Imm8); } :CPL A is op8=0x73 & A { A = ~A; } :DEC regAorX is op71=0x3C & regAorX { regAorX = regAorX - 1:1; resultflags(regAorX); } :DEC Addr8 is op71=0x3D & op0=0; Addr8 { Addr8 = Addr8 - 1:1; resultflags(Addr8); } :DEC XAddr8 is op71=0x3D & op0=1; XAddr8 { XAddr8 = XAddr8 - 1:1; resultflags(XAddr8); } :HALT is op8=0x30 { halt(); } :INC regAorX is op71=0x3A & regAorX { addflags(regAorX, 1:1); regAorX = regAorX + 1:1; } :INC Addr8 is op71=0x3B & op0=0; Addr8 { addflags(Addr8, 1:1); Addr8 = Addr8 + 1:1; } :INC XAddr8 is op71=0x3B & op0=1; XAddr8 { addflags(XAddr8, 1:1); XAddr8 = XAddr8 + 1:1; } :INDEX IndexAddr is op4=0xF & IndexAddr & srel12 { ptr:2 = inst_start + 2 + srel12 + zext(A); A = *[CODE]:1 ptr; } :JACC RelAddr is op4=0xE & RelAddr { tmp:2 = sext(A); target:2 = RelAddr + sext(A); goto [target]; } :JC RelAddr is op4=0xC & RelAddr { if ($(C) != 0) goto RelAddr; } :JMP RelAddr is op4=0x8 & RelAddr { goto RelAddr; } :JNC RelAddr is op4=0xD & RelAddr { if ($(C) == 0) goto RelAddr; } :JNZ RelAddr is op4=0xB & RelAddr { if ($(Z) == 0) goto RelAddr; } :JZ RelAddr is op4=0xA & RelAddr { if ($(Z) == 1) goto RelAddr; } :LCALL Addr16 is op8=0x7C; Addr16 { ret:2 = inst_next; push16(ret); call Addr16; } :LJMP Addr16 is op8=0x7D; Addr16 { goto Addr16; } :MOV X, SP is op8=0x4F & X & SP { X = SP; } :MOV A, Imm8 is op8=0x50 & A; Imm8 { A = Imm8; zeroflag(A); } :MOV A, Addr8 is op8=0x51 & A; Addr8 { A = Addr8; zeroflag(A); } :MOV A, XAddr8 is op8=0x52 & A; XAddr8 { A = XAddr8; zeroflag(A); } :MOV Addr8, A is op8=0x53 & A; Addr8 { Addr8 = A; } :MOV XAddr8, A is op8=0x54 & A; XAddr8 { XAddr8 = A; } :MOV Addr8, Imm8 is op8=0x55; Addr8; Imm8 { Addr8 = Imm8; } :MOV XAddr8, Imm8 is op8=0x56; XAddr8; Imm8 { XAddr8 = Imm8; } :MOV X, Imm8 is op8=0x57 & X; Imm8 { X = Imm8; } :MOV X, Addr8 is op8=0x58 & X; Addr8 { X = Addr8; } :MOV X, XAddr8 is op8=0x59 & X; XAddr8 { X = XAddr8; } :MOV Addr8, X is op8=0x5A & X; Addr8 { Addr8 = X; } :MOV A, X is op8=0x5B & A & X { A = X; zeroflag(A); } :MOV X, A is op8=0x5C & A & X { X = A; } :MOV A, RAddr8 is op8=0x5D & A; RAddr8 { A = RAddr8; zeroflag(A); } :MOV A, RXAddr8 is op8=0x5E & A; RXAddr8 { A = RXAddr8; zeroflag(A); } :MOV Addr8, SAddr8 is op8=0x5F; Addr8; SAddr8 { Addr8 = SAddr8; } :MOV RAddr8, A is op8=0x60 & A; RAddr8 { RAddr8 = A; } :MOV RXAddr8, A is op8=0x61 & A; RXAddr8 { RXAddr8 = A; } :MOV RAddr8, Imm8 is op8=0x62; RAddr8; Imm8 { RAddr8 = Imm8; } :MOV RXAddr8, Imm8 is op8=0x63; RXAddr8; Imm8 { RXAddr8 = Imm8; } :MVI A, Addr8Incr is op8=0x3E & A; Addr8Incr { ptr:1 = Addr8Incr; A = *[RAM]:1 ptr; zeroflag(A); Addr8Incr = ptr + 1:1; } :MVI Addr8Incr, A is op8=0x3F & A; Addr8Incr { ptr:1 = Addr8Incr; *[RAM]:1 ptr = A; Addr8Incr = ptr + 1:1; } :NOP is op8=0x40 { nop(); } :OR A, Imm8 is op73=0x05 & op20=0x01 & A; Imm8 { A = A | Imm8; zeroflag(A); } :OR A, Addr8 is op73=0x05 & op20=0x02 & A; Addr8 { A = A | Addr8; zeroflag(A); } :OR A, XAddr8 is op73=0x05 & op20=0x03 & A; XAddr8 { A = A | XAddr8; zeroflag(A); } :OR Addr8, A is op73=0x05 & op20=0x04 & A; Addr8 { tmp:1 = Addr8 | A; zeroflag(tmp); Addr8 = tmp; } :OR XAddr8, A is op73=0x05 & op20=0x05 & A; XAddr8 { tmp:1 = XAddr8 | A; zeroflag(tmp); XAddr8 = tmp; } :OR Addr8, Imm8 is op73=0x05 & op20=0x06; Addr8; Imm8 { tmp:1 = Addr8 | Imm8; zeroflag(tmp); Addr8 = tmp; } :OR XAddr8, Imm8 is op73=0x05 & op20=0x07; XAddr8; Imm8 { tmp:1 = XAddr8 | Imm8; zeroflag(tmp); XAddr8 = tmp; } :OR RAddr8, Imm8 is op8=0x43; RAddr8; Imm8 { tmp:1 = RAddr8 | Imm8; zeroflag(tmp); RAddr8 = tmp; } :OR RXAddr8, Imm8 is op8=0x44; RXAddr8; Imm8 { tmp:1 = RXAddr8 | Imm8; zeroflag(tmp); RXAddr8 = tmp; } :OR F, imm8 is op8=0x71 & F; imm8 & rsb [ regbank = regbank | rsb; globalset(inst_next, regbank); ] { F = F | imm8; } :POP X is op8=0x20 & X { pop8(X); } :POP A is op8=0x18 & A { pop8(A); } :PUSH X is op8=0x10 & X { push8(X); } :PUSH A is op8=0x08 & A { push8(A); } :RETI is op8=0x7E { pc:2 = 0; pop16(pc); return[pc]; } :RET is op8=0x7F { pc:2 = 0; pop16(pc); return[pc]; } :RLC A is op8=0x6A & A { c:1 = (A & 0x80) >> 7:1; A = (A << 1:1) | $(C); $(C) = c; } :RLC Addr8 is op8=0x6B; Addr8 { tmp:1 = Addr8; c:1 = (tmp & 0x80) >> 7:1; Addr8 = (tmp << 1) | $(C); $(C) = c; } :RLC XAddr8 is op8=0x6C; XAddr8 { tmp:1 = XAddr8; c:1 = (tmp & 0x80) >> 7:1; XAddr8 = (tmp << 1) | $(C); $(C) = c; } :ROMX is op8=0x28 { msb:2 = zext(A) << 8:1; ptr:2 = msb | zext(X); A = *[CODE]:1 ptr; zeroflag(A); } :RRC A is op8=0x6D & A { c:1 = A & 0x01:1; A = (A >> 1) | ($(C) << 7:1); $(C) = c; } :RRC Addr8 is op8=0x6E; Addr8 { tmp:1 = Addr8; c:1 = tmp & 0x01:1; Addr8 = (tmp >> 1:1) | ($(C) << 7:1); $(C) = c; } :RRC XAddr8 is op8=0x6F; XAddr8 { tmp:1 = XAddr8; c:1 = tmp & 0x01; XAddr8 = (tmp >> 1:1) | ($(C) << 7:1); $(C) = c; } :SBB A, Imm8 is op73 = 0x03 & op20=0x01 & A; Imm8 { A = A - (Imm8 + $(C)); resultflags(A); } :SBB A, Addr8 is op73 = 0x03 & op20=0x02 & A; Addr8 { A = A - (Addr8 + $(C)); resultflags(A); } :SBB A, XAddr8 is op73 = 0x03 & op20=0x03 & A; XAddr8 { A = A - (XAddr8 + $(C)); resultflags(A); } :SBB Addr8, A is op73 = 0x03 & op20=0x04 & A; Addr8 { tmp:1 = Addr8 - (A + $(C)); resultflags(tmp); Addr8 = tmp; } :SBB XAddr8, A is op73 = 0x03 & op20=0x05 & A; XAddr8 { tmp:1 = XAddr8 - (A + $(C)); resultflags(tmp); XAddr8 = tmp; } :SBB Addr8, Imm8 is op73 = 0x03 & op20=0x06; Addr8; Imm8 { tmp:1 = Addr8 - (Imm8 + $(C)); resultflags(tmp); Addr8 = tmp; } :SBB XAddr8, Imm8 is op73 = 0x03 & op20=0x07; XAddr8; Imm8 { local tmp = XAddr8 - (Imm8 + $(C)); resultflags(tmp); XAddr8 = tmp; } :SSC is op8=0x00 { syscall(A); } :SUB A, Imm8 is op73 = 0x02 & op20=0x01 & A; Imm8 { A = A - Imm8; resultflags(A); } :SUB A, Addr8 is op73 = 0x02 & op20=0x02 & A; Addr8 { A = A - Addr8; resultflags(A); } :SUB A, XAddr8 is op73 = 0x02 & op20=0x03 & A; XAddr8 { A = A - XAddr8; resultflags(A); } :SUB Addr8, A is op73 = 0x02 & op20=0x04 & A; Addr8 { tmp:1 = Addr8 - A; resultflags(tmp); Addr8 = tmp; } :SUB XAddr8, A is op73 = 0x02 & op20=0x05 & A; XAddr8 { tmp:1 = XAddr8 - A; resultflags(tmp); XAddr8 = tmp; } :SUB Addr8, Imm8 is op73 = 0x02 & op20=0x06; Addr8; Imm8 { tmp:1 = Addr8 - Imm8; resultflags(tmp); Addr8 = tmp; } :SUB XAddr8, Imm8 is op73 = 0x02 & op20=0x07; XAddr8; Imm8 { tmp:1 = XAddr8 - Imm8; resultflags(tmp); XAddr8 = tmp; } :SWAP A, X is op8=0x4B & A & X { tmp:1 = A; A = X; X = tmp; } :SWAP regAorX, Addr8 is op71=0x26 & regAorX; Addr8 { tmp:1 = regAorX; regAorX = Addr8; Addr8 = tmp; } :SWAP A, SP is op8=0x4E & A & SP { tmp:1 = A; A = SP; SP = tmp; } :TST Addr8, Imm8 is op8=0x47; Addr8; Imm8 { tmp:1 = Addr8; testflags(tmp, Imm8); } :TST XAddr8, Imm8 is op8=0x48; XAddr8; Imm8 { tmp:1 = XAddr8; testflags(tmp, Imm8); } :TST RAddr8, Imm8 is op8=0x49; RAddr8; Imm8 { tmp:1 = RAddr8; testflags(tmp, Imm8); } :TST RXAddr8, Imm8 is op8=0x4A; RXAddr8; Imm8 { tmp:1 = RXAddr8; testflags(tmp, Imm8); } :XOR A, Imm8 is op73=0x06 & op20=0x01 & A; Imm8 { A = A ^ Imm8; zeroflag(A); } :XOR A, Addr8 is op73=0x06 & op20=0x02 & A; Addr8 { A = A ^ Addr8; zeroflag(A); } :XOR A, XAddr8 is op73=0x06 & op20=0x03 & A; XAddr8 { A = A ^ XAddr8; zeroflag(A); } :XOR Addr8, A is op73=0x06 & op20=0x04 & A; Addr8 { tmp:1 = Addr8 ^ A; zeroflag(Addr8); Addr8 = tmp; } :XOR XAddr8, A is op73=0x06 & op20=0x05 & A; XAddr8 { tmp:1 = XAddr8 ^ A; zeroflag(tmp); XAddr8 = tmp; } :XOR Addr8, Imm8 is op73=0x06 & op20=0x06; Addr8; Imm8 { tmp:1 = Addr8 ^ Imm8; zeroflag(tmp); Addr8 = tmp; } :XOR XAddr8, Imm8 is op73=0x06 & op20=0x07; XAddr8; Imm8 { tmp:1 = XAddr8 ^ Imm8; zeroflag(tmp); XAddr8 = tmp; } :XOR RAddr8, Imm8 is op8=0x45; RAddr8; Imm8 { tmp:1 = RAddr8 ^ Imm8; zeroflag(tmp); RAddr8 = tmp; } :XOR RXAddr8, Imm8 is op8=0x46; RXAddr8; Imm8 { tmp:1 = RXAddr8 ^ Imm8; zeroflag(tmp); RXAddr8 = tmp; } :XOR F, imm8 is op8=0x72 & F; imm8 & rsb [ regbank = regbank ^ rsb; globalset(inst_next, regbank); ] { tmp:1 = F ^ imm8; resultflags(tmp); F = tmp; } ================================================ FILE: pypcode/processors/MC6800/data/languages/6800.ldefs ================================================ 6809 Microprocessor Hitachi 6309 Microprocessor, extension of 6809, 6309 addressing modes, missing many instructions ================================================ FILE: pypcode/processors/MC6800/data/languages/6805.cspec ================================================ ================================================ FILE: pypcode/processors/MC6800/data/languages/6805.ldefs ================================================ 6805 Microcontroller Family ================================================ FILE: pypcode/processors/MC6800/data/languages/6805.pspec ================================================ ================================================ FILE: pypcode/processors/MC6800/data/languages/6805.slaspec ================================================ # sleigh specification file for Motorola 6805 define endian=big; define alignment=1; define space RAM type=ram_space size=2 default; define space register type=register_space size=1; define register offset=0x00 size=1 [ A X ]; define register offset=0x20 size=2 [ PC SP]; define register offset=0x30 size=1 [H I N Z C]; # status bits define RAM offset=0x3ffc size=2 [ SWI_VECTOR ]; #TOKENS define token opbyte (8) op = (0,7) op4_7 = (4,7) op4_6 = (4,6) n = (1,3) bit_0 = (0,0) ; define token data8 (8) imm8 = (0,7) rel = (0,7) signed ; define token data (16) imm16 = (0,15) ; ################################################################ # Pseudo Instructions ################################################################ define pcodeop readIRQ; ################################################################ REL: reloc is rel [ reloc = inst_next + rel; ] { export *:2 reloc; } OP1: "#"imm8 is op4_6=2; imm8 { tmp:1 = imm8; export tmp; } OP1: imm8 is op4_6=3; imm8 { export *:1 imm8; } OP1: imm16 is op4_6=4; imm16 { export *:1 imm16; } OP1: imm16,X is op4_6=5 & X; imm16 { tmp:2 = imm16 + zext(X); export *:1 tmp; } OP1: imm8,X is op4_6=6 & X; imm8 { tmp:2 = imm8 + zext(X); export *:1 tmp; } OP1: ","X is op4_6=7 & X { tmp:2 = zext(X); export *:1 tmp; } ADDR: imm8 is op4_6=3; imm8 { export *:1 imm8; } ADDR: imm16 is op4_6=4; imm16 { export *:1 imm16; } ADDRI: imm16,X is op4_6=5 & X; imm16 { tmp:2 = imm16 + zext(X); export *:1 tmp; } ADDRI: imm8,X is op4_6=6 & X; imm8 { tmp:2 = imm8 + zext(X); export *:1 tmp; } ADDRI: ","X is op4_6=7 & X { tmp:2 = zext(X); export *:1 tmp; } DIRECT: imm8 is imm8 { export *:1 imm8; } :ADC OP1 is (op=0xA9 | op=0xB9 | op=0xC9 | op=0xD9 | op=0xE9 | op=0xF9) ... & OP1 { local op1 = OP1; # compute half carry local halfop1 = op1 & 0xF; local halfA = A & 0xF; local halfresult = halfop1 + halfA + C; H = (halfresult >> 4) & 1; local result = A + op1; local tmpC = carry(A, op1); A = result + C; C = carry(result, C); C = C | tmpC; Z = (A == 0); N = (A s< 0); } :ADD OP1 is (op=0xAB | op=0xBB | op=0xCB | op=0xDB | op=0xEB | op=0xFB) ... & OP1 { local op1 = OP1; # compute half carry local halfop1 = op1 & 0xF; local halfA = A & 0xF; local halfresult = halfop1 + halfA; H = (halfresult >> 4) & 1; C = carry(A, op1); A = A + op1; Z = (A == 0); N = (A s< 0); } :AND OP1 is (op=0xA4 | op=0xB4 | op=0xC4 | op=0xD4 | op=0xE4 | op=0xF4) ... & OP1 { A = A & OP1; Z = (A == 0); N = (A s< 0); } :ASLA is op=0x48 { C = A >> 7; A = A << 1; Z = (A == 0); N = (A s< 0); } :ASLX is op=0x58 { C = X >> 7; X = X << 1; Z = (X == 0); N = (X s< 0); } :ASL OP1 is (op=0x38 | op=0x68 | op=0x78) ... & OP1 { local tmp = OP1; C = tmp >> 7; tmp = tmp << 1; OP1 = tmp; Z = (tmp == 0); N = (tmp s< 0); } :ASRA is op=0x47 { C = A & 1; A = A s>> 1; Z = (A == 0); N = (A s< 0); } :ASRX is op=0x57 { C = X & 1; X = X s>> 1; Z = (X == 0); N = (X s< 0); } :ASR OP1 is (op=0x37 | op=0x67 | op=0x77) ... & OP1 { local tmp = OP1; C = tmp & 1; tmp = tmp s>> 1; OP1 = tmp; Z = (tmp == 0); N = (tmp s< 0); } :BCC REL is op=0x24;REL { if (C == 0) goto REL; } :BCLR n,DIRECT is op4_7=1 & bit_0=1 & n; DIRECT { local mask = ~(1 << n); DIRECT = DIRECT & mask; } :BCS REL is op=0x25;REL { if (C) goto REL; } :BEQ REL is op=0x27;REL { if (Z) goto REL; } :BHCC REL is op=0x28;REL { if (H == 0) goto REL; } :BHCS REL is op=0x29;REL { if (H) goto REL; } :BHI REL is op=0x22;REL { local tmp = C || Z; if (tmp == 0) goto REL; } #:BHS REL is op=0x24;REL See BCC :BIH REL is op=0x2F;REL { tmp:1 = readIRQ(); if (tmp) goto REL; } :BIL REL is op=0x2E;REL { tmp:1 = readIRQ(); if (tmp == 0) goto REL; } :BIT OP1 is (op=0xA5 | op=0xB5 | op=0xC5 | op=0xD5 | op=0xE5 | op=0xF5) ... & OP1 { local result = A & OP1; Z = (result == 0); N = (result s< 0); } #:BLO REL is op=0x25;REL see BCS :BLS REL is op=0x23;REL { local tmp = C || Z; if (tmp) goto REL; } :BMC REL is op=0x2C;REL { if (I == 0) goto REL; } :BMI REL is op=0x2B;REL { if (N) goto REL; } :BMS REL is op=0x2D;REL { if (I) goto REL; } :BNE REL is op=0x26;REL { if (Z == 0) goto REL; } :BPL REL is op=0x2A;REL { if (N == 0) goto REL; } :BRA REL is op=0x20;REL { goto REL; } :BRN REL is op=0x21;REL { } :BRCLR n,DIRECT,REL is op4_7=0 & bit_0=1 & n; DIRECT; REL { local mask = (1 << n); local result = DIRECT & mask; if (result == 0) goto REL; } :BRSET n,DIRECT,REL is op4_7=0 & bit_0=0 & n; DIRECT; REL { local mask = (1 << n); local result = DIRECT & mask; if (result != 0) goto REL; } :BSET n,DIRECT is op4_7=1 & bit_0=0 & n; DIRECT { local mask = (1 << n); DIRECT = DIRECT | mask; } :BSR REL is op=0xAD; REL { SP=SP-1; *:2 SP = inst_next; SP=SP-1; call REL; } :CLC is op=0x98 { C = 0; } :CLI is op=0x9A { I = 0; } :CLRA is op=0x4F { A = 0; Z = 1; N = 0; } :CLRX is op=0x5F { X = 0; Z = 1; N = 0; } :CLR OP1 is (op=0x3F | op=0x6F | op=0x7F) ... & OP1 { OP1 = 0; Z = 1; N = 0; } :CMP OP1 is (op=0xA1 | op=0xB1 | op=0xC1 | op=0xD1 | op=0xE1 | op=0xF1) ... & OP1 { local op1 = OP1; local tmp = A - op1; Z = tmp == 0; N = tmp s< 0; C = (A < op1); } :COMA is op=0x43 { A = ~A; Z = (A == 0); N = (A s< 0); C = 1; } :COMX is op=0x53 { X = ~X; Z = (X == 0); N = (X s< 0); C = 1; } :COM OP1 is (op=0x33 | op=0x63 | op=0x73) ... & OP1 { local tmp = ~OP1; OP1 = tmp; Z = (tmp == 0); N = (tmp s< 0); C = 1; } :CPX OP1 is (op=0xA3 | op=0xB3 | op=0xC3 | op=0xD3 | op=0xE3 | op=0xF3) ... & OP1 { local op1 = OP1; local tmp = X - op1; Z = tmp == 0; N = tmp s< 0; C = (X < op1); } :DECA is op=0x4A { A = A - 1; Z = (A == 0); N = (A s< 0); } :DECX is op=0x5A { X = X - 1; Z = (X == 0); N = (X s< 0); } :DEC OP1 is (op=0x3A | op=0x6A | op=0x7A) ... & OP1 { local tmp = OP1 - 1; OP1 = tmp; Z = (tmp == 0); N = (tmp s< 0); } :EOR OP1 is (op=0xA8 | op=0xB8 | op=0xC8 | op=0xD8 | op=0xE8 | op=0xF8) ... & OP1 { local op1 = OP1; A = A ^ op1; Z = A == 0; N = A s< 0; } :INCA is op=0x4C { A = A + 1; Z = (A == 0); N = (A s< 0); } :INCX is op=0x5C { X = X + 1; Z = (X == 0); N = (X s< 0); } :INC OP1 is (op=0x3C | op=0x6C | op=0x7C) ... & OP1 { local tmp = OP1 + 1; OP1 = tmp; Z = (tmp == 0); N = (tmp s< 0); } :JMP ADDR is (op=0xBC | op=0xCC) ... & ADDR { goto ADDR; } :JMP ADDRI is (op=0xDC | op=0xEC | op=0xFC) ... & ADDRI { goto [ADDRI]; } :JSR ADDR is (op=0xBD | op=0xCD) ... & ADDR { *:2 (SP-1) = inst_next; SP=SP-2; call ADDR; } :JSR ADDRI is (op=0xDD | op=0xED | op=0xFD) ... & ADDRI { *:2 (SP-1) = inst_next; SP=SP-2; call [ADDRI]; } :LDA OP1 is (op=0xA6 | op=0xB6 | op=0xC6 | op=0xD6 | op=0xE6 | op=0xF6) ... & OP1 { A = OP1; Z = A == 0; N = A s< 0; } :LDX OP1 is (op=0xAE | op=0xBE | op=0xCE | op=0xDE | op=0xEE | op=0xFE) ... & OP1 { X = OP1; Z = X == 0; N = X s< 0; } ## Logical Shift left is same as arithmetic shift left #:LSLA is op=0x48 #:LSLX is op=0x58 #:LSL OP1 is (op=0x38 | op=0x68 | op=0x78) ... & OP1 :LSRA is op=0x44 { C = A & 1; A = A >> 1; Z = (A == 0); N = 0; } :LSRX is op=0x54 { C = X & 1; X = X >> 1; Z = (X == 0); N = 0; } :LSR OP1 is (op=0x34 | op=0x64 | op=0x74) ... & OP1 { local tmp = OP1; C = tmp & 1; tmp = tmp >> 1; OP1 = tmp; Z = (tmp == 0); N = 0; } :MUL is op=0x42 { op1:2 = zext(A); op2:2 = zext(X); local result = op1 * op2; A = result:1; result = result >> 8; X = result:1; } :NEGA is op=0x40 { C = A != 0; A = -A; Z = (A == 0); N = (A s< 0); } :NEGX is op=0x50 { C = X != 0; X = -X; Z = (X == 0); N = (X s< 0); } :NEG OP1 is (op=0x30 | op=0x60 | op=0x70) ... & OP1 { local op1 = OP1; C = op1 != 0; OP1 = -op1; Z = (op1 == 0); N = (op1 s< 0); } :NOP is op = 0x9D { } :ORA OP1 is (op=0xAA | op=0xBA | op=0xCA | op=0xDA | op=0xEA | op=0xFA) ... & OP1 { A = A | OP1; Z = (A == 0); N = (A s< 0); } :ROLA is op=0x49 { local tmp = C ; C = A >> 7; A = A << 1; A = A | tmp; Z = (A == 0); N = (A s< 0); } :ROLX is op=0x59 { local tmp = C; C = X >> 7; X = X << 1; X = X | tmp; Z = (X == 0); N = (X s< 0); } :ROL OP1 is (op=0x39 | op=0x69 | op=0x79) ... & OP1 { local tmpC = C; local op1 = OP1; C = op1 >> 7; local result = op1 << 1; result = result | tmpC; OP1 = result; Z = (result == 0); N = (result s< 0); } :RORA is op=0x46 { local tmpC = C << 7; C = A & 1; A = A s>> 1; A = A | tmpC; Z = (A == 0); N = (A s< 0); } :RORX is op=0x56 { local tmpC = C << 7; C = X & 1; X = X s>> 1; X = X | tmpC; Z = (X == 0); N = (X s< 0); } :ROR OP1 is (op=0x36 | op=0x66 | op=0x76) ... & OP1 { local tmpC = C << 7; local tmp = OP1; C = tmp & 1; tmp = tmp s>> 1; tmp = tmp | tmpC; OP1 = tmp; Z = (tmp == 0); N = (tmp s< 0); } :RSP is op = 0x9C { SP = 0xff; } :RTI is op = 0x80 { SP = SP+1; local ccr = *:1 SP; H = ccr[4,1]; I = ccr[3,1]; N = ccr[2,1]; Z = ccr[1,1]; C = ccr[0,1]; SP = SP+1; A = *:1 SP; SP = SP+1; X = *:1 SP; SP = SP+1; tmp:2 = *:2 SP; SP = SP+1; return [tmp]; } :RTS is op = 0x81 { SP = SP+1; tmp:2 = *:2 SP; SP = SP+1; return [tmp]; } :SBC OP1 is (op=0xA2 | op=0xB2 | op=0xC2 | op=0xD2 | op=0xE2 | op=0xF2) ... & OP1 { local op1 = OP1; local tmp = A - op1 - C; Z = tmp == 0; N = tmp s< 0; C = ((A <= op1) * C) | (A < op1); A = tmp; } :SEC is op = 0x99 { C = 1; } :SEI is op = 0x9B { I = 1; } :STA OP1 is (op=0xB7 | op=0xC7 | op=0xD7 | op=0xE7 | op=0xF7) ... & OP1 { OP1 = A; Z = A == 0; N = A s< 0; } :STOP is op=0x8E { I = 0; } :STX OP1 is (op=0xBF | op=0xCF | op=0xDF | op=0xEF | op=0xFF) ... & OP1 { OP1 = X; Z = X == 0; N = X s< 0; } :SUB OP1 is (op=0xA0 | op=0xB0 | op=0xC0 | op=0xD0 | op=0xE0 | op=0xF0) ... & OP1 { local op1 = OP1; C = (A < op1); A = A - op1; Z = A == 0; N = A s< 0; A = A; } :SWI is op=0x83 { SP=SP-1; *:2 SP = inst_next; SP=SP-1; *:1 SP = X; SP=SP-1; *:1 SP = A; tmp:1 = 0b11100000 | (H << 4) | (I << 3) | (N << 2) | ( Z << 1) | C; SP=SP-1; *:1 SP = tmp; I = 1; call [SWI_VECTOR]; } :TAX is op=0x97 { X = A; } :TSTA is op=0x4D { Z = (A == 0); N = (A s< 0); } :TSTX is op=0x5D { Z = (X == 0); N = (X s< 0); } :TST OP1 is (op=0x3D | op=0x6D | op=0x7D) ... & OP1 { local op1 = OP1; Z = (op1 == 0); N = (op1 s< 0); } :TXA is op=0x9F { A = X; } :WAIT is op=0x8f { I = 0; } ================================================ FILE: pypcode/processors/MC6800/data/languages/6809.cspec ================================================ ================================================ FILE: pypcode/processors/MC6800/data/languages/6809.pspec ================================================ ================================================ FILE: pypcode/processors/MC6800/data/languages/6809.slaspec ================================================ # sleigh specification file for Motorola 6809 @define M6809 "" @include "6x09.sinc" @include "6x09_push.sinc" @include "6x09_pull.sinc" @include "6x09_exg_tfr.sinc" ================================================ FILE: pypcode/processors/MC6800/data/languages/6x09.sinc ================================================ # sleigh specification file for Motorola 6809/Hitachi 6309 define endian=big; define alignment=1; @define SWI3_VECTOR "0xFFF2" @define SWI2_VECTOR "0xFFF4" @define FIRQ_VECTOR "0xFFF6" @define IRQ_VECTOR "0xFFF8" @define SWI_VECTOR "0xFFFA" @define NMI_VECTOR "0xFFFC" @define RST_VECTOR "0xFFFE" define space RAM type=ram_space size=2 default; define space register type=register_space size=1; @ifdef H6309 # 8-bit registers A, B, E, F, MD define register offset=0 size=1 [ A B E F MD ]; # 16-bit registers D, W define register offset=0 size=2 [ D W ]; # 16-bit register V define register offset=12 size=2 [ V ]; # 32-bit register Q define register offset=0 size=4 [ Q ]; @else # 8-bit registers A, B define register offset=0 size=1 [ A B ]; # 16-bit register D define register offset=0 size=2 [ D ]; @endif # 8-bit condition code register, direct page register define register offset=8 size=1 [ CC DP ]; # 16-bit registers: # PC: Program counter # S: Stack pointer # U: alternate stack pointer/index register # X,Y: index register define register offset=16 size=2 [ PC X Y U S ]; # Pseudo registers used for EXG instruction. define register offset=32 size=2 [ exg16_r0 exg16_r1 ]; define register offset=32 size=1 [ exg8h_r0 exg8l_r0 exg8h_r1 exg8l_r1 ]; # define status bits: (See also 8051/z80). @define C "CC[0,1]" # C: Carry (or borrow) flag @define V "CC[1,1]" # V: Overflow flag @define Z "CC[2,1]" # Z: Zero result @define N "CC[3,1]" # N: Negative result (twos complement) @define I "CC[4,1]" # I: IRQ interrupt masked @define H "CC[5,1]" # H: Half carry flag @define F "CC[6,1]" # F: FIRQ interrupt masked @define E "CC[7,1]" # E: Entire register state stacked define token opbyte (8) op = (0,7) op45 = (4,5) op47 = (4,7) ; define token data8 (8) imm8 = (0,7) simm8 = (0,7) signed simm5 = (0,4) signed idxMode = (0,4) noOffset5 = (7,7) idxReg = (5,6) imm80 = (0,0) imm81 = (1,1) imm82 = (2,2) imm83 = (3,3) imm84 = (4,4) imm85 = (5,5) imm86 = (6,6) imm87 = (7,7) reg0_exg = (4,7) reg1_exg = (0,3) ; define token data (16) imm16 = (0,15) simm16 = (0,15) signed ; attach variables [ idxReg ] [ X Y U S ]; EA: simm5,idxReg is simm5 & idxReg & noOffset5=0 { local offs:1 = simm5; local addr:2 = idxReg + sext(offs); export addr; } EA: ","^idxReg is idxReg & noOffset5=1 & idxMode=0b00100 # no offset { local addr:2 = idxReg; export addr; } EA: simm8,idxReg is idxReg & noOffset5=1 & idxMode=0b01000; simm8 # 8-bit offset { local addr:2 = idxReg + simm8; export addr; } EA: simm16,idxReg is idxReg & noOffset5=1 & idxMode=0b01001; simm16 # 16-bit offset { local addr:2 = idxReg + simm16; export addr; } @ifdef H6309 EA: ","^W is idxReg=0b00 & noOffset5=1 & idxMode=0b01111 & W # no offset { local addr:2 = W; export addr; } EA: simm16,W is idxReg=0b01 & noOffset5=1 & idxMode=0b01111; simm16 & W # 16-bit offset { local addr:2 = W + simm16; export addr; } @endif # H6309 EA: A,idxReg is idxReg & noOffset5=1 & idxMode=0b00110 & A # A,R { local addr:2 = idxReg + sext(A); export addr; } EA: B,idxReg is idxReg & noOffset5=1 & idxMode=0b00101 & B # B,R { local addr:2 = idxReg + sext(B); export addr; } EA: D,idxReg is idxReg & noOffset5=1 & idxMode=0b01011 & D # D,R { local addr:2 = idxReg + D; export addr; } @ifdef H6309 EA: E,idxReg is idxReg & noOffset5=1 & idxMode=0b00111 & E # E,R { local addr:2 = idxReg + sext(E); export addr; } EA: F,idxReg is idxReg & noOffset5=1 & idxMode=0b01010 & F # F,R { local addr:2 = idxReg + sext(F); export addr; } EA: W,idxReg is idxReg & noOffset5=1 & idxMode=0b01110 & W # W,R { local addr:2 = idxReg + W; export addr; } @endif # H6309 EA: ","^idxReg^"+" is idxReg & noOffset5=1 & idxMode=0b00000 # ,R+ { addr:2 = idxReg; idxReg = idxReg + 1; export addr; } EA: ","^idxReg^"++" is idxReg & noOffset5=1 & idxMode=0b00001 # ,R++ { local addr:2 = idxReg; idxReg = idxReg + 2; export addr; } EA: ",-"^idxReg is idxReg & noOffset5=1 & idxMode=0b00010 # ,-R { idxReg = idxReg - 1; local addr:2 = idxReg; export addr; } EA: ",--"^idxReg is idxReg & noOffset5=1 & idxMode=0b00011 # ,--R { idxReg = idxReg - 2; local addr:2 = idxReg; export addr; } @ifdef H6309 EA: ","^W^"++" is idxReg=0b10 & noOffset5=1 & idxMode=0b01111 & W # ,W++ { local addr:2 = W; W = W + 2; export addr; } EA: ",--"^W is idxReg=0b11 & noOffset5=1 & idxMode=0b01111 & W # ,--W { W = W - 2; local addr:2 = W; export addr; } @endif # H6309 EA: addr,"PCR" is noOffset5=1 & idxMode=0b01100; simm8 [ addr = inst_next + simm8; ] { export *[const]:2 addr; } EA: addr,"PCR" is noOffset5=1 & idxMode=0b01101; simm16 [ addr = inst_next + simm16; ] { export *[const]:2 addr; } EA: "[,"idxReg"]" is idxReg & noOffset5=1 & idxMode=0b10100 { local addr:2 = *:2 idxReg; export addr; } EA: "["simm8,idxReg"]" is idxReg & noOffset5=1 & idxMode=0b11000; simm8 { local offs:1 = simm8; local addr:2 = idxReg + sext(offs); addr = *:2 addr; export addr; } EA: "["simm16,idxReg"]" is idxReg & noOffset5=1 & idxMode=0b11001; simm16 { local addr:2 = idxReg + simm16; addr = *:2 addr; export addr; } @ifdef H6309 EA: "[,"W"]" is idxReg=0b00 & noOffset5=1 & idxMode=0b10000 & W { local addr:2 = *:2 W; export addr; } EA: "["simm16,W"]" is idxReg=0b01 & noOffset5=1 & idxMode=0b10000; simm16 & W { local addr:2 = W + simm16; addr = *:2 addr; export addr; } @endif # H6309 EA: "["^A,idxReg"]" is A & idxReg & noOffset5=1 & idxMode=0b10110 { local addr:2 = idxReg + sext(A); addr = *:2 addr; export addr; } EA: "["^B,idxReg"]" is B & idxReg & noOffset5=1 & idxMode=0b10101 { local addr:2 = idxReg + sext(B); addr = *:2 addr; export addr; } EA: "["^D,idxReg"]" is D & idxReg & noOffset5=1 & idxMode=0b11011 { local addr:2 = idxReg + D; addr = *:2 addr; export addr; } @ifdef H6309 EA: "["^E,idxReg"]" is E & idxReg & noOffset5=1 & idxMode=0b10111 { local addr:2 = idxReg + sext(E); addr = *:2 addr; export addr; } EA: "["^F,idxReg"]" is F & idxReg & noOffset5=1 & idxMode=0b11010 { local addr:2 = idxReg + sext(F); addr = *:2 addr; export addr; } EA: "["^W,idxReg"]" is W & idxReg & noOffset5=1 & idxMode=0b11110 { local addr:2 = idxReg + W; addr = *:2 addr; export addr; } @endif # H6309 EA: "[,"idxReg"++]" is idxReg & noOffset5=1 & idxMode=0b10001 { local addr:2 = idxReg; addr = *:2 addr; idxReg = idxReg + 2; export addr; } EA: "[,--"idxReg"]" is idxReg & noOffset5=1 & idxMode=0b10011 { idxReg = idxReg - 2; local addr:2 = idxReg; addr = *:2 addr; export addr; } @ifdef H6309 EA: "[,"^W^"++]" is W & idxReg=0b10 & noOffset5=1 & idxMode=0b10000 { local addr:2 = W; addr = *:2 addr; W = W + 2; export addr; } EA: "[,--"^W^"]" is W & idxReg=0b11 & noOffset5=1 & idxMode=0b10000 { W = W - 2; local addr:2 = W; addr = *:2 addr; export addr; } @endif # H6309 EA: "["addr",PCR]" is noOffset5=1 & idxMode=0b11100; simm8 [ addr = inst_next + simm8; ] { local eaddr:2 = inst_next + simm8; eaddr = *:2 eaddr; export eaddr; } EA: "["addr",PCR]" is noOffset5=1 & idxMode=0b11101; simm16 [ addr = inst_next + simm16; ] { local eaddr:2 = inst_next + simm16; eaddr = *:2 eaddr; export eaddr; } EA: "["imm16"]" is noOffset5=1 & idxReg=0b00 & idxMode=0b11111; imm16 { local eaddr:2 = imm16; eaddr = *:2 eaddr; export eaddr; } ################################################################ # Constructors ################################################################ PAGE2: is op=0x10 { } # PAGE2 opcode prefix (0x10) PAGE3: is op=0x11 { } # PAGE3 opcode prefix (0x11) IMMED1: "#"imm8 is imm8 { export *[const]:1 imm8; } REL: addr is simm8 [ addr = inst_next + simm8; ] { export *:2 addr; } REL2: addr is simm16 [ addr = inst_next + simm16; ] { export *:2 addr; } # 1-byte operand, immediate/direct/indexed/extended addressing mode OP1: "#"imm8 is op45=0; imm8 { export *[const]:1 imm8; } OP1: "<"imm8 is (op47=0 | op47=9 | op47=0xD); imm8 { local tmp:2 = (zext(DP) << 8) + imm8; export *:1 tmp; } OP1: EA is op45=2; EA { local tmp:2 = EA; export *:1 tmp; } OP1: imm16 is op45=3; imm16 { export *:1 imm16; } # 2-byte operand, direct/indexed/extended addressing mode OP2: "#"imm16 is (op47=8 | op47=0xC); imm16 { export *[const]:2 imm16; } OP2: "<"imm8 is (op47=0 | op47=9 | op47=0xD); imm8 { local tmp:2 = (zext(DP) << 8) + imm8; export *:2 tmp; } OP2: EA is (op47=3 | op47=6 | op47=0xA | op47=0xE); EA { local tmp:2 = EA; export *:2 tmp; } OP2: imm16 is (op47=7 | op47=0xB | op47=0xF); imm16 { export *:2 imm16; } #JMP and JSR treat the direct/indexed/extended address modes differently OP2J: "<"imm8 is (op47=0 | op47=9); imm8 { local tmp:2 = (zext(DP) << 8) + imm8; export tmp; } OP2J: EA is (op47=6 | op47=0xA); EA { export EA; } OP2J: imm16 is (op47=7 | op47=0xB ); imm16 { export *[const]:2 imm16; } ################################################################ # Macros ################################################################ macro setNZFlags(result) { $(Z) = (result == 0); $(N) = (result s< 0); } macro setHFlag(reg, op) { local mask = 0x0F; # Low nibble mask $(H) = (((reg & mask) + (op & mask)) >> 4) & 1; } # Negate twos complement value in op. # P-code INT_2COMP. macro negate(op) { $(V) = (op == 0x80); $(C) = (op != 0); op = -op; setNZFlags(op); } # Logical complement of op. (0 => 1; 1 => 0) # P-code INT_NEGATE. macro complement(op) { $(V) = 0; $(C) = 1; op = ~op; setNZFlags(op); } # Signed shift right. # P-code INT_SRIGHT. macro arithmeticShiftRight(op) { $(C) = op & 1; op = (op s>> 1); setNZFlags(op); } macro logicalShiftRight(op) { $(C) = op & 1; op = op >> 1; $(Z) = (op == 0); $(N) = 0; } macro rotateRightWithCarry(op) { local carryOut = $(C) << 7; $(C) = op & 1; op = (op s>> 1) | carryOut; setNZFlags(op); } macro logicalShiftLeft(op) { local tmp = (op >> 7); $(C) = tmp; op = op << 1; $(V) = tmp ^ (op >> 7); setNZFlags(op); } macro rotateLeftWithCarry(op) { local carryIn = $(C); local tmp = (op >> 7); $(C) = tmp; op = (op << 1) | carryIn; $(V) = tmp ^ (op >> 7); setNZFlags(op); } macro increment(op) { $(V) = (op == 0x7F); op = op + 1; setNZFlags(op); } macro decrement(op) { $(V) = (op == 0x80); op = op - 1; setNZFlags(op); } macro test(op) { $(V) = 0; setNZFlags(op); } macro clear(op) { $(V) = 0; op = 0; $(Z) = 1; $(N) = 0; $(C) = 0; } macro addition(reg, op) { local tmp = reg; local val = op; $(C) = carry(tmp, val); $(V) = scarry(tmp, val); tmp = tmp + val; setNZFlags(tmp); reg = tmp; } macro additionWithCarry(reg, op) { local carryIn = zext($(C)); local tmp = reg; local val = op; local mask = 0x0F; # Low nibble mask local result = tmp + val; $(H) = (((tmp & mask) + (val & mask) + carryIn) >> 4) & 1; $(C) = carry(tmp, val) || carry(result, carryIn); $(V) = scarry(tmp, val) ^^ scarry(result, carryIn); tmp = result + carryIn; setNZFlags(tmp); reg = tmp; } macro subtraction(reg, op) { local tmp = reg; local val = op; $(V) = sborrow(tmp, val); $(C) = (tmp < val); tmp = tmp - val; setNZFlags(tmp); reg = tmp; } macro subtractionWithCarry(reg, op) { local carryIn = zext($(C)); local tmp = reg; local val = op; local tmpResult = tmp - val; $(C) = (tmp < val) || (tmpResult < carryIn); $(V) = sborrow(tmp, val) ^^ sborrow(tmpResult, carryIn); tmp = tmpResult - carryIn; setNZFlags(tmp); reg = tmp; } macro compare(reg, op) { local tmp = reg; local val = op; $(V) = sborrow(tmp, val); $(C) = (tmp < val); tmp = tmp - val; setNZFlags(tmp); } macro logicalAnd(reg, op) { reg = reg & op; setNZFlags(reg); $(V) = 0; } macro logicalOr(reg, op) { reg = reg | op; setNZFlags(reg); $(V) = 0; } macro logicalExclusiveOr(reg, op) { reg = reg ^ op; setNZFlags(reg); $(V) = 0; } macro bitTest(reg, op) { local tmp = reg & op; setNZFlags(tmp); $(V) = 0; } macro loadRegister(reg, op) { reg = op; setNZFlags(reg); $(V) = 0; } macro storeRegister(reg, op) { op = reg; setNZFlags(reg); $(V) = 0; } # Push 1 byte operand op1 macro Push1(reg, op) { reg = reg - 1; *:1 reg = op; } # Push 2 byte operand op2 macro Push2(reg, op) { reg = reg - 2; *:2 reg = op; } # Pull 1 byte operand op1 macro Pull1(reg, op) { op = *:1 reg; reg = reg + 1; } # Pull 2 byte operand op2 macro Pull2(reg, op) { op = *:2 reg; reg = reg + 2; } macro PushUYXDpD() { Push2(S, U); Push2(S, Y); Push2(S, X); Push1(S, DP); Push2(S, D); } macro PullDDpXYU() { Pull2(S, D); Pull1(S, DP); Pull2(S, X); Pull2(S, Y); Pull2(S, U); } macro PushEntireState() { local tmp:2 = inst_next; $(E) = 1; Push2(S, tmp); # return PC address PushUYXDpD(); Push1(S, CC); } ################################################################ # Instructions ################################################################ ################################################################ # Opcode 0x00 - 0x0F, relative addressing # Opcode 0x40 - 0x4F, register A addressing # Opcode 0x50 - 0x5F, register B addressing # Opcode 0x60 - 0x6F, indexed addressing # Opcode 0x70 - 0x7F, extended addressing ################################################################ :NEGA is op=0x40 { negate(A); } :NEGB is op=0x50 { negate(B); } :NEG OP1 is (op=0x00 | op=0x60 | op=0x70) ... & OP1 { negate(OP1); } :COMA is op=0x43 { complement(A); } :COMB is op=0x53 { complement(B); } :COM OP1 is (op=0x03 | op=0x63 | op=0x73) ... & OP1 { complement(OP1); } :LSRA is op=0x44 { logicalShiftRight(A); } :LSRB is op=0x54 { logicalShiftRight(B); } :LSR OP1 is (op=0x04 | op=0x64 | op=0x74) ... & OP1 { logicalShiftRight(OP1); } :RORA is op=0x46 { rotateRightWithCarry(A); } :RORB is op=0x56 { rotateRightWithCarry(B); } :ROR OP1 is (op=0x06 | op=0x66 | op=0x76) ... & OP1 { rotateRightWithCarry(OP1); } :ASRA is op=0x47 { arithmeticShiftRight(A); } :ASRB is op=0x57 { arithmeticShiftRight(B); } :ASR OP1 is (op=0x07 | op=0x67 | op=0x77) ... & OP1 { arithmeticShiftRight(OP1); } :LSLA is op=0x48 { logicalShiftLeft(A); } :LSLB is op=0x58 { logicalShiftLeft(B); } :LSL OP1 is (op=0x08 | op=0x68 | op=0x78) ... & OP1 { logicalShiftLeft(OP1); } :ROLA is op=0x49 { rotateLeftWithCarry(A); } :ROLB is op=0x59 { rotateLeftWithCarry(B); } :ROL OP1 is (op=0x09 | op=0x69 | op=0x79) ... & OP1 { rotateLeftWithCarry(OP1); } :DECA is op=0x4A { decrement(A); } :DECB is op=0x5A { decrement(B); } :DEC OP1 is (op=0x0A | op=0x6A | op=0x7A) ... & OP1 { decrement(OP1); } :INCA is op=0x4C { increment(A); } :INCB is op=0x5C { increment(B); } :INC OP1 is (op=0x0C | op=0x6C | op=0x7C) ... & OP1 { increment(OP1); } :TSTA is op=0x4D { test(A); } :TSTB is op=0x5D { test(B); } :TST OP1 is (op=0x0D | op=0x6D | op=0x7D) ... & OP1 { test(OP1); } :JMP OP2J is (op=0x0E | op=0x6E | op=0x7E) ... & OP2J { local target = OP2J; goto [target]; } :CLRA is op=0x4F { clear(A); } :CLRB is op=0x5F { clear(B); } :CLR OP1 is (op=0x0F | op=0x6F | op=0x7F) ... & OP1 { clear(OP1); } ################################################################ # Opcode 0x10 - 0x1F, misc. addressing ################################################################ :NOP is op=0x12 { } :SYNC is op=0x13 { } :LBRA REL2 is op=0x16; REL2 { goto REL2; } :LBSR REL2 is op=0x17; REL2 { local tmp:2 = inst_next; Push2(S, tmp); call REL2; } :DAA is op=0x19 { local highA:1 = A >> 4; local lowA:1 = A & 0x0F; local cc1 = ($(C) == 1 | highA > 9 | (highA > 8) & (lowA > 9)); local cc2 = ($(H) == 1 | lowA > 9); if ( cc1 & cc2 ) goto ; if ( cc1 ) goto ; if ( cc2 ) goto ; goto ; $(C) = carry(A, 0x66); A = A + 0x66; goto ; $(C) = carry(A, 0x60); A = A + 0x60; goto ; $(C) = carry(A, 0x06); A = A + 0x06; goto ; setNZFlags(A); } :ORCC IMMED1 is op=0x1A; IMMED1 { CC = CC | IMMED1; } :ANDCC IMMED1 is op=0x1C; IMMED1 { CC = CC & IMMED1; } :SEX is op=0x1D { D = sext(B); } ################################################################ # Opcode 0x20 - 0x2F, relative addressing ################################################################ :BRA REL is op=0x20; REL { goto REL; } :BRN REL is op=0x21; REL { } :BHI REL is op=0x22; REL { local tmp = $(C) + $(Z); if (tmp == 0) goto REL; } :BLS REL is op=0x23; REL { local tmp = $(C) + $(Z); if (tmp) goto REL; } #:BHS REL is op=0x24; REL # See BCC :BCC REL is op=0x24; REL { if ($(C) == 0) goto REL; } #:BLO REL is op=0x25; REL # see BCS :BCS REL is op=0x25; REL { if ($(C)) goto REL; } :BNE REL is op=0x26; REL { if ($(Z) == 0) goto REL; } :BEQ REL is op=0x27; REL { if ($(Z)) goto REL; } :BVC REL is op=0x28; REL { if ($(V) == 0) goto REL; } :BVS REL is op=0x29; REL { if ($(V)) goto REL; } :BPL REL is op=0x2A; REL { if ($(N) == 0) goto REL; } :BMI REL is op=0x2B; REL { if ($(N)) goto REL; } :BGE REL is op=0x2C; REL { if ($(N) == $(V)) goto REL; } :BLT REL is op=0x2D; REL { local tmp = $(C) ^ $(Z); if (tmp) goto REL; } :BGT REL is op=0x2E; REL { if (($(N) == $(V)) & $(C)) goto REL; } :BLE REL is op=0x2F; REL { local tmp = $(N) ^ $(V); if (tmp | $(Z)) goto REL; } ################################################################ # Opcode 0x30 - 0x3F, misc. addressing ################################################################ :LEAX EA is op=0x30; EA { local tmp = EA; X = tmp; $(Z) = (tmp == 0); } :LEAY EA is op=0x31; EA { local tmp = EA; Y = tmp; $(Z) = (tmp == 0); } :LEAS EA is op=0x32; EA { S = EA; } :LEAU EA is op=0x33; EA { U = EA; } :RTS is op=0x39 { local addr:2; Pull2(S, addr); return [addr]; } :ABX is op=0x3A { X = X + zext(B); } :RTI is op=0x3B { local addr:2; Pull1(S, CC); if ($(E)==0) goto ; PullDDpXYU(); Pull2(S, addr); return [addr]; } :CWAI IMMED1 is op=0x3C; IMMED1 { CC = CC & IMMED1; PushEntireState(); } :MUL is op=0x3D { D = zext(A) * zext(B); $(Z) = (D == 0); $(C) = B >> 7; } :SWI is op=0x3F { PushEntireState(); $(I) = 1; $(F) = 1; tmp:2 = $(SWI_VECTOR); call [tmp]; } ################################################################ # Opcode 0x80 - 0x8F, immediate addressing # Opcode 0x90 - 0x9F, direct addressing # Opcode 0xA0 - 0xAF, indexed addressing # Opcode 0xB0 - 0xBF, extended addressing # Opcode 0xC0 - 0xCF, immediate addressing # Opcode 0xD0 - 0xDF, direct addressing # Opcode 0xE0 - 0xEF, indexed addressing # Opcode 0xF0 - 0xFF, extended addressing ################################################################ :SUBA OP1 is (op=0x80 | op=0x90 | op=0xA0 | op=0xB0) ... & OP1 { subtraction(A, OP1); } :SUBB OP1 is (op=0xC0 | op=0xD0 | op=0xE0 | op=0xF0) ... & OP1 { subtraction(B, OP1); } :CMPA OP1 is (op=0x81 | op=0x91 | op=0xA1 | op=0xB1) ... & OP1 { compare(A, OP1); } :CMPB OP1 is (op=0xC1 | op=0xD1 | op=0xE1 | op=0xF1) ... & OP1 { compare(B, OP1); } :SBCA OP1 is (op=0x82 | op=0x92 | op=0xA2 | op=0xB2) ... & OP1 { subtractionWithCarry(A, OP1); } :SBCB OP1 is (op=0xC2 | op=0xD2 | op=0xE2 | op=0xF2) ... & OP1 { subtractionWithCarry(B, OP1); } :SUBD OP2 is (op=0x83 | op=0x93 | op=0xA3 | op=0xB3) ... & OP2 { subtraction(D, OP2); } :ADDD OP2 is (op=0xC3 | op=0xD3 | op=0xE3 | op=0xF3) ... & OP2 { addition(D, OP2); } :ANDA OP1 is (op=0x84 | op=0x94 | op=0xA4 | op=0xB4) ... & OP1 { logicalAnd(A, OP1); } :ANDB OP1 is (op=0xC4 | op=0xD4 | op=0xE4 | op=0xF4) ... & OP1 { logicalAnd(B, OP1); } :BITA OP1 is (op=0x85 | op=0x95 | op=0xA5 | op=0xB5) ... & OP1 { bitTest(A, OP1); } :BITB OP1 is (op=0xC5 | op=0xD5 | op=0xE5 | op=0xF5) ... & OP1 { bitTest(B, OP1); } :LDA OP1 is (op=0x86 | op=0x96 | op=0xA6 | op=0xB6) ... & OP1 { loadRegister(A, OP1); } :LDB OP1 is (op=0xC6 | op=0xD6 | op=0xE6 | op=0xF6) ... & OP1 { loadRegister(B, OP1); } :STA OP1 is (op=0x97 | op=0xA7 | op=0xB7) ... & OP1 { storeRegister(A, OP1); } :STB OP1 is (op=0xD7 | op=0xE7 | op=0xF7) ... & OP1 { storeRegister(B, OP1); } :EORA OP1 is (op=0x88 | op=0x98 | op=0xA8 | op=0xB8) ... & OP1 { logicalExclusiveOr(A, OP1); } :EORB OP1 is (op=0xC8 | op=0xD8 | op=0xE8 | op=0xF8) ... & OP1 { logicalExclusiveOr(B, OP1); } :ADCA OP1 is (op=0x89 | op=0x99 | op=0xA9 | op=0xB9) ... & OP1 { additionWithCarry(A, OP1); } :ADCB OP1 is (op=0xC9 | op=0xD9 | op=0xE9 | op=0xF9) ... & OP1 { additionWithCarry(B, OP1); } :ORA OP1 is (op=0x8A | op=0x9A | op=0xAA | op=0xBA) ... & OP1 { logicalOr(A, OP1); } :ORB OP1 is (op=0xCA | op=0xDA | op=0xEA | op=0xFA) ... & OP1 { logicalOr(B, OP1); } :ADDA OP1 is (op=0x8B | op=0x9B | op=0xAB | op=0xBB) ... & OP1 { setHFlag(A, OP1); addition(A, OP1); } :ADDB OP1 is (op=0xCB | op=0xDB | op=0xEB | op=0xFB) ... & OP1 { setHFlag(B, OP1); addition(B, OP1); } :CMPX OP2 is (op=0x8C | op=0x9C | op=0xAC | op=0xBC) ... & OP2 { compare(X, OP2); } :LDD OP2 is (op=0xCC | op=0xDC | op=0xEC | op=0xFC) ... & OP2 { loadRegister(D, OP2); } :BSR REL is op=0x8D; REL { local addr:2 = inst_next; Push2(S, addr); call REL; } :JSR OP2J is (op=0x9D | op=0xAD | op=0xBD) ... & OP2J { local addr:2 = inst_next; Push2(S, addr); local target = OP2J; call [target]; } :STD OP2 is (op=0xDD | op=0xED | op=0xFD) ... & OP2 { storeRegister(D, OP2); } :LDX OP2 is (op=0x8E | op=0x9E | op=0xAE | op=0xBE) ... & OP2 { loadRegister(X, OP2); } :LDU OP2 is (op=0xCE | op=0xDE | op=0xEE | op=0xFE) ... & OP2 { loadRegister(U, OP2); } :STX OP2 is (op=0x9F | op=0xAF | op=0xBF) ... & OP2 { storeRegister(X, OP2); } :STU OP2 is (op=0xDF | op=0xEF | op=0xFF) ... & OP2 { storeRegister(U, OP2); } ################################################################ # Page 2 Opcodes (prefix 0x10) ################################################################ :LBRN REL2 is PAGE2; op=0x21; REL2 { } :LBHI REL2 is PAGE2; op=0x22; REL2 { local tmp = $(C) + $(Z); if (tmp == 0) goto REL2; } :LBLS REL2 is PAGE2; op=0x23; REL2 { local tmp = $(C) + $(Z); if (tmp) goto REL2; } :LBCC REL2 is PAGE2; op=0x24; REL2 { if ($(C) == 0) goto REL2; } #:LBLO REL2 is PAGE2; op=0x25; REL2 # see LBCS :LBCS REL2 is PAGE2; op=0x25; REL2 { if ($(C)) goto REL2; } :LBNE REL2 is PAGE2; op=0x26; REL2 { if ($(Z) == 0) goto REL2; } :LBEQ REL2 is PAGE2; op=0x27; REL2 { if ($(Z)) goto REL2; } :LBVC REL2 is PAGE2; op=0x28; REL2 { if ($(V) == 0) goto REL2; } :LBVS REL2 is PAGE2; op=0x29; REL2 { if ($(V)) goto REL2; } :LBPL REL2 is PAGE2; op=0x2A; REL2 { if ($(N) == 0) goto REL2; } :LBMI REL2 is PAGE2; op=0x2B; REL2 { if ($(N)) goto REL2; } :LBGE REL2 is PAGE2; op=0x2C; REL2 { if ($(N) == $(V)) goto REL2; } :LBLT REL2 is PAGE2; op=0x2D; REL2 { local tmp = $(C) ^ $(Z); if (tmp) goto REL2; } :LBGT REL2 is PAGE2; op=0x2E; REL2 { if (($(N) == $(V)) & $(C)) goto REL2; } :LBLE REL2 is PAGE2; op=0x2F; REL2 { local tmp = $(N) ^ $(V); if (tmp | $(Z)) goto REL2; } :SWI2 is PAGE2; op=0x3F { PushEntireState(); tmp:2 = $(SWI2_VECTOR); call [tmp]; } :CMPD OP2 is PAGE2; (op=0x83 | op=0x93 | op=0xA3 | op=0xB3) ... & OP2 { compare(D, OP2); } :CMPY OP2 is PAGE2; (op=0x8C | op=0x9C | op=0xAC | op=0xBC) ... & OP2 { compare(Y, OP2); } :LDY OP2 is PAGE2; (op=0x8E | op=0x9E | op=0xAE | op=0xBE) ... & OP2 { loadRegister(Y, OP2); } :STY OP2 is PAGE2; (op=0x9F | op=0xAF | op=0xBF) ... & OP2 { storeRegister(Y, OP2); } :LDS OP2 is PAGE2; (op=0xCE | op=0xDE | op=0xEE | op=0xFE) ... & OP2 { loadRegister(S, OP2); } :STS OP2 is PAGE2; (op=0xDF | op=0xEF | op=0xFF) ... & OP2 { storeRegister(S, OP2); } ################################################################ # Page 3 Opcodes (prefix 0x11) ################################################################ :SWI3 is PAGE3; op=0x3F { PushEntireState(); tmp:2 = $(SWI3_VECTOR); call [tmp]; } :CMPU OP2 is PAGE3; (op=0x83 | op=0x93 | op=0xA3 | op=0xB3) ... & OP2 { compare(U, OP2); } :CMPS OP2 is PAGE3; (op=0x8C | op=0x9C | op=0xAC | op=0xBC) ... & OP2 { compare(S, OP2); } ================================================ FILE: pypcode/processors/MC6800/data/languages/6x09_exg_tfr.sinc ================================================ # sleigh specification file for Motorola 6809/Hitachi 6309 ################################################################ # EXG, TFR helper ################################################################ @ifdef H6309 EXG_r0Tmp: D is reg0_exg=0 & D { exg16_r0 = D; } EXG_r0Tmp: X is reg0_exg=1 & X { exg16_r0 = X; } EXG_r0Tmp: Y is reg0_exg=2 & Y { exg16_r0 = Y; } EXG_r0Tmp: U is reg0_exg=3 & U { exg16_r0 = U; } EXG_r0Tmp: S is reg0_exg=4 & S { exg16_r0 = S; } EXG_r0Tmp: PC is reg0_exg=5 & PC { exg16_r0 = inst_next; } EXG_r0Tmp: W is reg0_exg=6 & W { exg16_r0 = 0x0; } EXG_r0Tmp: V is reg0_exg=7 & V { exg16_r0 = 0x0; } EXG_r0Tmp: A is reg0_exg=8 & A { exg8l_r0 = A; exg8h_r0 = A; } EXG_r0Tmp: B is reg0_exg=9 & B { exg8l_r0 = B; exg8h_r0 = B; } EXG_r0Tmp: CC is reg0_exg=10 & CC { exg8l_r0 = CC; exg8h_r0 = CC;} EXG_r0Tmp: DP is reg0_exg=11 & DP { exg8l_r0 = DP; exg8h_r0 = DP;} EXG_r0Tmp: 0 is reg0_exg=12 { exg16_r0 = 0x0; } EXG_r0Tmp: 0 is reg0_exg=13 { exg16_r0 = 0x0; } EXG_r0Tmp: E is reg0_exg=14 & E { exg8l_r0 = E; exg8h_r0 = E; } EXG_r0Tmp: F is reg0_exg=15 & F { exg8l_r0 = F; exg8h_r0 = F; } EXG_r1Tmp: D is reg1_exg=0 & D { exg16_r1 = D; } EXG_r1Tmp: X is reg1_exg=1 & X { exg16_r1 = X; } EXG_r1Tmp: Y is reg1_exg=2 & Y { exg16_r1 = Y; } EXG_r1Tmp: U is reg1_exg=3 & U { exg16_r1 = U; } EXG_r1Tmp: S is reg1_exg=4 & S { exg16_r1 = S; } EXG_r1Tmp: PC is reg1_exg=5 & PC { exg16_r1 = inst_next; } EXG_r1Tmp: W is reg1_exg=6 & W { exg16_r1 = 0x0; } EXG_r1Tmp: V is reg1_exg=7 & V { exg16_r1 = 0x0; } EXG_r1Tmp: A is reg1_exg=8 & A { exg8l_r1 = A; exg8h_r1 = A; } EXG_r1Tmp: B is reg1_exg=9 & B { exg8l_r1 = B; exg8h_r1 = B; } EXG_r1Tmp: CC is reg1_exg=10 & CC { exg8l_r1 = CC; exg8h_r1 = CC;} EXG_r1Tmp: DP is reg1_exg=11 & DP { exg8l_r1 = DP; exg8h_r1 = DP;} EXG_r1Tmp: 0 is reg1_exg=12 { exg16_r1 = 0x0; } EXG_r1Tmp: 0 is reg1_exg=13 { exg16_r1 = 0x0; } EXG_r1Tmp: E is reg1_exg=14 & E { exg8l_r1 = E; exg8h_r1 = E; } EXG_r1Tmp: F is reg1_exg=15 & F { exg8l_r1 = F; exg8h_r1 = F; } EXG_r0Set: D is reg0_exg=0 & D { D = exg16_r1; } EXG_r0Set: X is reg0_exg=1 & X { X = exg16_r1; } EXG_r0Set: Y is reg0_exg=2 & Y { Y = exg16_r1; } EXG_r0Set: U is reg0_exg=3 & U { U = exg16_r1; } EXG_r0Set: S is reg0_exg=4 & S { S = exg16_r1; } EXG_r0Set: PC is reg0_exg=5 & PC { PC = exg16_r1; } # must GOTO EXG_r0Set: W is reg0_exg=6 & W { W = exg16_r1; } EXG_r0Set: V is reg0_exg=7 & V { V = exg16_r1; } EXG_r0Set: A is reg0_exg=8 & A { A = exg8h_r1; } EXG_r0Set: B is reg0_exg=9 & B { B = exg8l_r1; } EXG_r0Set: CC is reg0_exg=10 & CC { CC = exg8l_r1; } EXG_r0Set: DP is reg0_exg=11 & DP { DP = exg8h_r1; } EXG_r0Set: 0 is reg0_exg=12 { } EXG_r0Set: 0 is reg0_exg=13 { } EXG_r0Set: E is reg0_exg=14 & E { E = exg8h_r1; } EXG_r0Set: F is reg0_exg=15 & F { F = exg8l_r1; } EXG_r1Set: D is reg1_exg=0 & D { D = exg16_r0; } EXG_r1Set: X is reg1_exg=1 & X { X = exg16_r0; } EXG_r1Set: Y is reg1_exg=2 & Y { Y = exg16_r0; } EXG_r1Set: U is reg1_exg=3 & U { U = exg16_r0; } EXG_r1Set: S is reg1_exg=4 & S { S = exg16_r0; } EXG_r1Set: PC is reg1_exg=5 & PC { PC = exg16_r0; } # must GOTO EXG_r1Set: W is reg1_exg=6 & W { W = exg16_r0; } EXG_r1Set: V is reg1_exg=7 & V { V = exg16_r0; } EXG_r1Set: A is reg1_exg=8 & A { A = exg8h_r0; } EXG_r1Set: B is reg1_exg=9 & B { B = exg8l_r0; } EXG_r1Set: CC is reg1_exg=10 & CC { CC = exg8l_r0; } EXG_r1Set: DP is reg1_exg=11 & DP { DP = exg8h_r0; } EXG_r1Set: 0 is reg1_exg=12 { } EXG_r1Set: 0 is reg1_exg=13 { } EXG_r1Set: E is reg1_exg=14 & E { E = exg8h_r0; } EXG_r1Set: F is reg1_exg=15 & F { F = exg8l_r0; } @endif @ifdef M6809 EXG_r0Tmp: D is reg0_exg=0 & D { exg16_r0 = D; } EXG_r0Tmp: X is reg0_exg=1 & X { exg16_r0 = X; } EXG_r0Tmp: Y is reg0_exg=2 & Y { exg16_r0 = Y; } EXG_r0Tmp: U is reg0_exg=3 & U { exg16_r0 = U; } EXG_r0Tmp: S is reg0_exg=4 & S { exg16_r0 = S; } EXG_r0Tmp: PC is reg0_exg=5 & PC { exg16_r0 = inst_next; } EXG_r0Tmp: "inv" is reg0_exg=6 { exg16_r0 = 0xFFFF; } EXG_r0Tmp: "inv" is reg0_exg=7 { exg16_r0 = 0xFFFF; } EXG_r0Tmp: A is reg0_exg=8 & A { exg8l_r0 = A; exg8h_r0 = 0xFF; } EXG_r0Tmp: B is reg0_exg=9 & B { exg8l_r0 = B; exg8h_r0 = 0xFF; } EXG_r0Tmp: CC is reg0_exg=10 & CC { exg8l_r0 = CC; exg8h_r0 = CC;} EXG_r0Tmp: DP is reg0_exg=11 & DP { exg8l_r0 = DP; exg8h_r0 = DP;} EXG_r0Tmp: "inv" is reg0_exg=12 { exg16_r0 = 0xFFFF; } EXG_r0Tmp: "inv" is reg0_exg=13 { exg16_r0 = 0xFFFF; } EXG_r0Tmp: "inv" is reg0_exg=14 { exg16_r0 = 0xFFFF; } EXG_r0Tmp: "inv" is reg0_exg=15 { exg16_r0 = 0xFFFF; } EXG_r1Tmp: D is reg1_exg=0 & D { exg16_r1 = D; } EXG_r1Tmp: X is reg1_exg=1 & X { exg16_r1 = X; } EXG_r1Tmp: Y is reg1_exg=2 & Y { exg16_r1 = Y; } EXG_r1Tmp: U is reg1_exg=3 & U { exg16_r1 = U; } EXG_r1Tmp: S is reg1_exg=4 & S { exg16_r1 = S; } EXG_r1Tmp: PC is reg1_exg=5 & PC { exg16_r1 = inst_next; } EXG_r1Tmp: "inv" is reg1_exg=6 { exg16_r1 = 0xFFFF; } EXG_r1Tmp: "inv" is reg1_exg=7 { exg16_r1 = 0xFFFF; } EXG_r1Tmp: A is reg1_exg=8 & A { exg8l_r1 = A; exg8h_r1 = 0xFF; } EXG_r1Tmp: B is reg1_exg=9 & B { exg8l_r1 = B; exg8h_r1 = 0xFF; } EXG_r1Tmp: CC is reg1_exg=10 & CC { exg8l_r1 = CC; exg8h_r1 = 0xFF;} EXG_r1Tmp: DP is reg1_exg=11 & DP { exg8l_r1 = DP; exg8h_r1 = 0xFF;} EXG_r1Tmp: "inv" is reg1_exg=12 { exg16_r1 = 0xFFFF; } EXG_r1Tmp: "inv" is reg1_exg=13 { exg16_r1 = 0xFFFF; } EXG_r1Tmp: "inv" is reg1_exg=14 { exg16_r1 = 0xFFFF; } EXG_r1Tmp: "inv" is reg1_exg=15 { exg16_r1 = 0xFFFF; } EXG_r0Set: D is reg0_exg=0 & D { D = exg16_r1; } EXG_r0Set: X is reg0_exg=1 & X { X = exg16_r1; } EXG_r0Set: Y is reg0_exg=2 & Y { Y = exg16_r1; } EXG_r0Set: U is reg0_exg=3 & U { U = exg16_r1; } EXG_r0Set: S is reg0_exg=4 & S { S = exg16_r1; } EXG_r0Set: PC is reg0_exg=5 & PC { PC = exg16_r1; } # must GOTO EXG_r0Set: "inv" is reg0_exg=6 { } EXG_r0Set: "inv" is reg0_exg=7 { } EXG_r0Set: A is reg0_exg=8 & A { A = exg8l_r1; } EXG_r0Set: B is reg0_exg=9 & B { B = exg8l_r1; } EXG_r0Set: CC is reg0_exg=10 & CC { CC = exg8l_r1; } EXG_r0Set: DP is reg0_exg=11 & DP { DP = exg8l_r1; } EXG_r0Set: "inv" is reg0_exg=12 { } EXG_r0Set: "inv" is reg0_exg=13 { } EXG_r0Set: "inv" is reg0_exg=14 { } EXG_r0Set: "inv" is reg0_exg=15 { } EXG_r1Set: D is reg1_exg=0 & D { D = exg16_r0; } # Must to r1 set first so A,D = A,B switch EXG_r1Set: X is reg1_exg=1 & X { X = exg16_r0; } EXG_r1Set: Y is reg1_exg=2 & Y { Y = exg16_r0; } EXG_r1Set: U is reg1_exg=3 & U { U = exg16_r0; } EXG_r1Set: S is reg1_exg=4 & S { S = exg16_r0; } EXG_r1Set: PC is reg1_exg=5 & PC { PC = exg16_r0; } # must GOTO EXG_r1Set: "inv" is reg1_exg=6 { } EXG_r1Set: "inv" is reg1_exg=7 { } EXG_r1Set: A is reg1_exg=8 & A { A = exg8l_r0; } EXG_r1Set: B is reg1_exg=9 & B { B = exg8l_r0; } EXG_r1Set: CC is reg1_exg=10 & CC { CC = exg8l_r0; } EXG_r1Set: DP is reg1_exg=11 & DP { DP = exg8l_r0; } EXG_r1Set: "inv" is reg1_exg=12 { } EXG_r1Set: "inv" is reg1_exg=13 { } EXG_r1Set: "inv" is reg1_exg=14 { } EXG_r1Set: "inv" is reg1_exg=15 { } @endif EXG_GOTO: is reg0_exg=5 | reg1_exg=5 { goto [PC]; } EXG_GOTO: is reg0_exg & reg1_exg { } # PC not set TFR_GOTO: is reg1_exg=5 { goto [PC]; } TFR_GOTO: is reg1_exg { } # PC not set # Exchange two registers :EXG EXG_r0Set,EXG_r1Set is op=0x1E; EXG_r0Set & EXG_r1Set & EXG_r0Tmp & EXG_r1Tmp & EXG_GOTO { build EXG_r0Tmp; build EXG_r1Tmp; build EXG_r1Set; build EXG_r0Set; build EXG_GOTO; } # Transfer register to another register :TFR EXG_r0Tmp,EXG_r1Set is op=0x1F; EXG_r1Set & EXG_r0Tmp & TFR_GOTO { build EXG_r0Tmp; build EXG_r1Set; build TFR_GOTO; } ================================================ FILE: pypcode/processors/MC6800/data/languages/6x09_pull.sinc ================================================ # sleigh specification file for Motorola 6809/Hitachi 6309 ################################################################# # PULS helper ################################################################ puls0: CC is CC & imm80=1 { Pull1(S, CC); } puls0: is imm80=0 { } puls1: puls0" "A is A & imm81=1 & puls0 { Pull1(S, A); } puls1: puls0 is imm81=0 & puls0 { } puls2: puls1" "B is B & imm82=1 & puls1 { Pull1(S, B); } puls2: puls1 is imm82=0 & puls1 { } puls3: puls2" "DP is DP & imm83=1 & puls2 { Pull1(S, DP); } puls3: puls2 is imm83=0 & puls2 { } puls4: puls3" "X is X & imm84=1 & puls3 { Pull2(S, X); } puls4: puls3 is imm84=0 & puls3 { } puls5: puls4" "Y is Y & imm85=1 & puls4 { Pull2(S, Y); } puls5: puls4 is imm85=0 & puls4 { } puls6: puls5" "U is U & imm86=1 & puls5 { Pull2(S, U); } puls6: puls5 is imm86=0 & puls5 { } puls7: puls6" "PC is PC & imm87=1 & puls6 { local t:2 = 0; Pull2(S, t); goto [t]; } puls7: puls6 is imm87=0 & puls6 { } :PULS puls7 is op=0x35; puls7 { } ################################################################ # PULU helper ################################################################ pulu0: CC is CC & imm80=1 { Pull1(U, CC); } pulu0: is imm80=0 { } pulu1: pulu0" "A is A & imm81=1 & pulu0 { Pull1(U, A); } pulu1: pulu0 is imm81=0 & pulu0 { } pulu2: pulu1" "B is B & imm82=1 & pulu1 { Pull1(U, B); } pulu2: pulu1 is imm82=0 & pulu1 { } pulu3: pulu2" "DP is DP & imm83=1 & pulu2 { Pull1(U, DP); } pulu3: pulu2 is imm83=0 & pulu2 { } pulu4: pulu3" "X is X & imm84=1 & pulu3 { Pull2(U, X); } pulu4: pulu3 is imm84=0 & pulu3 { } pulu5: pulu4" "Y is Y & imm85=1 & pulu4 { Pull2(U, Y); } pulu5: pulu4 is imm85=0 & pulu4 { } pulu6: pulu5" "S is S & imm86=1 & pulu5 { Pull2(U, S); } pulu6: pulu5 is imm86=0 & pulu5 { } pulu7: pulu6" "PC is PC & imm87=1 & pulu6 { local t:2 = 0; Pull2(U, t); goto [t]; } pulu7: pulu6 is imm87=0 & pulu6 { } :PULU pulu7 is op=0x37; pulu7 { } ================================================ FILE: pypcode/processors/MC6800/data/languages/6x09_push.sinc ================================================ # sleigh specification file for Motorola 6809/Hitachi 6309 ################################################################ # PSHS helper ################################################################ pshs7: " "PC is PC & imm87=1 { local t:2 = inst_next; Push2(S, t); } pshs7: is imm87=0 { } pshs6: pshs7" "U is U & imm86=1 & pshs7 { Push2(S, U); } pshs6: pshs7 is imm86=0 & pshs7 { } pshs5: pshs6" "Y is Y & imm85=1 & pshs6 { Push2(S, Y); } pshs5: pshs6 is imm85=0 & pshs6 { } pshs4: pshs5" "X is X & imm84=1 & pshs5 { Push2(S, X); } pshs4: pshs5 is imm84=0 & pshs5 { } pshs3: pshs4" "DP is DP & imm83=1 & pshs4 { Push1(S, DP); } pshs3: pshs4 is imm83=0 & pshs4 { } pshs2: pshs3" "B is B & imm82=1 & pshs3 { Push1(S, B); } pshs2: pshs3 is imm82=0 & pshs3 { } pshs1: pshs2" "A is A & imm81=1 & pshs2 { Push1(S, A); } pshs1: pshs2 is imm81=0 & pshs2 { } pshs0: pshs1" "CC is CC & imm80=1 & pshs1 { Push1(S, CC); } pshs0: pshs1 is imm80=0 & pshs1 { } :PSHS pshs0 is op=0x34; pshs0 { } ################################################################ # PSHU helper ################################################################ pshu7: PC is PC & imm87=1 { local t:2 = inst_next; Push2(U, t); } pshu7: is imm87=0 { } pshu6: pshu7" "S is S & imm86=1 & pshu7 { Push2(U, S); } pshu6: pshu7 is imm86=0 & pshu7 { } pshu5: pshu6" "Y is Y & imm85=1 & pshu6 { Push2(U, Y); } pshu5: pshu6 is imm85=0 & pshu6 { } pshu4: pshu5" "X is X & imm84=1 & pshu5 { Push2(U, X); } pshu4: pshu5 is imm84=0 & pshu5 { } pshu3: pshu4" "DP is DP & imm83=1 & pshu4 { Push1(U, DP); } pshu3: pshu4 is imm83=0 & pshu4 { } pshu2: pshu3" "B is B & imm82=1 & pshu3 { Push1(U, B); } pshu2: pshu3 is imm82=0 & pshu3 { } pshu1: pshu2" "A is A & imm81=1 & pshu2 { Push1(U, A); } pshu1: pshu2 is imm81=0 & pshu2 { } pshu0: pshu1" "CC is CC & imm80=1 & pshu1 { Push1(U, CC); } pshu0: pshu1 is imm80=0 & pshu1 { } :PSHU pshu0 is op=0x36; pshu0 { } ================================================ FILE: pypcode/processors/MC6800/data/languages/H6309.slaspec ================================================ # sleigh specification file for Hitachi 6309 # Compatible with MC6809 with some extended instructions # and addressing modes @define H6309 "" @include "6x09.sinc" @include "6x09_push.sinc" @include "6x09_pull.sinc" @include "6x09_exg_tfr.sinc" ================================================ FILE: pypcode/processors/MC6800/data/manuals/6809.idx ================================================ @M6809PM.rev0_May83.pdf [MC6809-MC6809E Microprocessor Programming Manual, May 1983 (M6809PM/AD)] ABX, 51 ADCA, 52 ADCB, 52 ADDA, 53 ADDB, 53 ADDD, 54 ANDA, 55 ANDB, 55 ANDCC, 56 ASL, 57 ASLA, 57 ASLB, 57 ASR, 58 ASRA, 58 ASRB, 58 BCC, 59 LBCC, 59 BCS, 60 LBCS, 60 BEQ, 61 LBEQ, 61 BGE, 62 LBGE, 62 BGT, 63 LBGT, 63 BHI, 64 LBHI, 64 BHS, 65 LBHS, 65 BITA, 66 BITB, 66 BLE, 67 LBLE, 67 BLO, 68 LBLO, 68 BLS, 69 LBLS, 69 BLT, 70 LBLT, 70 BMI, 71 LBMI, 71 BNE, 72 LBNE, 72 BPL, 73 LBPL, 73 BRA, 74 LBRA, 74 BRN, 75 LBRN, 75 BSR, 76 LBSR, 76 BVC, 77 LBVC, 77 BVS, 78 LBVS, 78 CLR, 79 CMPA, 80 CMPB, 80 CMPD, 81 CMPX, 81 CMPY, 81 CMPU, 81 CMPS, 81 COM, 82 COMA, 82 COMB, 82 CWAI, 83 DDA, 84 DEC, 85 DECA, 85 DECB, 85 EORA, 86 EORB, 86 EXG, 87 INC, 88 INCA, 88 INCB, 88 JMP, 89 JSR, 90 LDA, 91 LDB, 91 LDD, 92 LDX, 92 LDY, 92 LDS, 92 LDU, 92 LEAX, 93 LEAY, 93 LEAS, 93 LEAU, 93 LSL, 94 LSLA, 94 LSLB, 94 LSR, 95 LSRA, 95 LSRB, 95 MUL, 96 NEG, 97 NEGA, 97 NEGB, 97 NOP, 98 ORA, 99 ORB, 99 ORCC, 100 PSHS, 101 PSHU, 102 PULS, 103 PULU, 104 ROL, 105 ROLA, 105 ROLB, 105 ROR, 106 RORA, 106 RORB, 106 RTI, 107 RTS, 108 SBCA, 109 SBCB, 109 SEX, 110 STA, 111 STB, 111 STD, 112 STX, 112 STY, 112 STS, 112 STU, 112 SUBA, 113 SUBB, 113 SUBD, 114 SWI, 115 SWI2, 116 SWI3, 117 SYNC, 118 TFR, 119 TST, 120 TSTA, 120 TSTB, 120 ================================================ FILE: pypcode/processors/MCS96/data/languages/MCS96.cspec ================================================ ================================================ FILE: pypcode/processors/MCS96/data/languages/MCS96.ldefs ================================================ Intel MCS-96 Microcontroller Family ================================================ FILE: pypcode/processors/MCS96/data/languages/MCS96.pspec ================================================ ================================================ FILE: pypcode/processors/MCS96/data/languages/MCS96.sinc ================================================ define endian=little; define alignment=1; define space RAM type=ram_space size=2 wordsize=1 default; define space register type=register_space size=1; ################################################################ # Registers ################################################################ define register offset=0x00 size=2 [ PSW ]; define register offset=0x10 size=2 [ PC ]; define register offset=0x18 size=2 [ SP ]; # Special registers define RAM offset=0x00 size=1 [ ZRlo ZRhi AD_resultlo AD_resulthi HSI_timelo HSI_timehi HSI_status SBUF INT_MASK INT_PEND TIMER1lo TIMER1hi TIMER2lo TIMER2hi PORT0 PORT1 PORT2 SP_STAT INT_PEND1 INT_MASK1 WSR IOS0 IOS1 IOS2 ]; define RAM offset=0x00 size=2 [ ZR AD_result HSI_time HSI_SBUF INTERRUPT TIMER1 TIMER2 PORT01 PORT2_SPS INT1 WSR_IOS0 IOS12 ]; define RAM offset=0x00 size=4 [ ZR_AD HSI INT_TIMER1 TIMER2_PORT01 PORT2_INT1 WSR_IOS012 ]; # Stack pointer define RAM offset=0x18 size=1 [ SPlo SPhi]; define RAM offset=0x18 size=2 [ SPR ]; define RAM offset=0x18 size=4 [ SPR1A ]; # Byte registers define RAM offset=0x1a size=1 [ R1A R1B R1C R1D R1E R1F R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 R2A R2B R2C R2D R2E R2F R30 R31 R32 R33 R34 R35 R36 R37 R38 R39 R3A R3B R3C R3D R3E R3F R40 R41 R42 R43 R44 R45 R46 R47 R48 R49 R4A R4B R4C R4D R4E R4F R50 R51 R52 R53 R54 R55 R56 R57 R58 R59 R5A R5B R5C R5D R5E R5F R60 R61 R62 R63 R64 R65 R66 R67 R68 R69 R6A R6B R6C R6D R6E R6F R70 R71 R72 R73 R74 R75 R76 R77 R78 R79 R7A R7B R7C R7D R7E R7F R80 R81 R82 R83 R84 R85 R86 R87 R88 R89 R8A R8B R8C R8D R8E R8F R90 R91 R92 R93 R94 R95 R96 R97 R98 R99 R9A R9B R9C R9D R9E R9F RA0 RA1 RA2 RA3 RA4 RA5 RA6 RA7 RA8 RA9 RAA RAB RAC RAD RAE RAF RB0 RB1 RB2 RB3 RB4 RB5 RB6 RB7 RB8 RB9 RBA RBB RBC RBD RBE RBF RC0 RC1 RC2 RC3 RC4 RC5 RC6 RC7 RC8 RC9 RCA RCB RCC RCD RCE RCF RD0 RD1 RD2 RD3 RD4 RD5 RD6 RD7 RD8 RD9 RDA RDB RDC RDD RDE RDF RE0 RE1 RE2 RE3 RE4 RE5 RE6 RE7 RE8 RE9 REA REB REC RED REE REF RF0 RF1 RF2 RF3 RF4 RF5 RF6 RF7 RF8 RF9 RFA RFB RFC RFD RFE RFF R100 R101 R102 R103 R104 R105 R106 R107 R108 R109 R10A R10B R10C R10D R10E R10F R110 R111 R112 R113 R114 R115 R116 R117 R118 R119 R11A R11B R11C R11D R11E R11F R120 R121 R122 R123 R124 R125 R126 R127 R128 R129 R12A R12B R12C R12D R12E R12F R130 R131 R132 R133 R134 R135 R136 R137 R138 R139 R13A R13B R13C R13D R13E R13F R140 R141 R142 R143 R144 R145 R146 R147 R148 R149 R14A R14B R14C R14D R14E R14F R150 R151 R152 R153 R154 R155 R156 R157 R158 R159 R15A R15B R15C R15D R15E R15F R160 R161 R162 R163 R164 R165 R166 R167 R168 R169 R16A R16B R16C R16D R16E R16F R170 R171 R172 R173 R174 R175 R176 R177 R178 R179 R17A R17B R17C R17D R17E R17F R180 R181 R182 R183 R184 R185 R186 R187 R188 R189 R18A R18B R18C R18D R18E R18F R190 R191 R192 R193 R194 R195 R196 R197 R198 R199 R19A R19B R19C R19D R19E R19F R1A0 R1A1 R1A2 R1A3 R1A4 R1A5 R1A6 R1A7 R1A8 R1A9 R1AA R1AB R1AC R1AD R1AE R1AF R1B0 R1B1 R1B2 R1B3 R1B4 R1B5 R1B6 R1B7 R1B8 R1B9 R1BA R1BB R1BC R1BD R1BE R1BF R1C0 R1C1 R1C2 R1C3 R1C4 R1C5 R1C6 R1C7 R1C8 R1C9 R1CA R1CB R1CC R1CD R1CE R1CF R1D0 R1D1 R1D2 R1D3 R1D4 R1D5 R1D6 R1D7 R1D8 R1D9 R1DA R1DB R1DC R1DD R1DE R1DF R1E0 R1E1 R1E2 R1E3 R1E4 R1E5 R1E6 R1E7 R1E8 R1E9 R1EA R1EB R1EC R1ED R1EE R1EF R1F0 R1F1 R1F2 R1F3 R1F4 R1F5 R1F6 R1F7 R1F8 R1F9 R1FA R1FB R1FC R1FD R1FE R1FF ]; # Word registers define RAM offset=0x1a size=2 [ RW1A RW1C RW1E RW20 RW22 RW24 RW26 RW28 RW2A RW2C RW2E RW30 RW32 RW34 RW36 RW38 RW3A RW3C RW3E RW40 RW42 RW44 RW46 RW48 RW4A RW4C RW4E RW50 RW52 RW54 RW56 RW58 RW5A RW5C RW5E RW60 RW62 RW64 RW66 RW68 RW6A RW6C RW6E RW70 RW72 RW74 RW76 RW78 RW7A RW7C RW7E RW80 RW82 RW84 RW86 RW88 RW8A RW8C RW8E RW90 RW92 RW94 RW96 RW98 RW9A RW9C RW9E RWA0 RWA2 RWA4 RWA6 RWA8 RWAA RWAC RWAE RWB0 RWB2 RWB4 RWB6 RWB8 RWBA RWBC RWBE RWC0 RWC2 RWC4 RWC6 RWC8 RWCA RWCC RWCE RWD0 RWD2 RWD4 RWD6 RWD8 RWDA RWDC RWDE RWE0 RWE2 RWE4 RWE6 RWE8 RWEA RWEC RWEE RWF0 RWF2 RWF4 RWF6 RWF8 RWFA RWFC RWFE RW100 RW102 RW104 RW106 RW108 RW10A RW10C RW10E RW110 RW112 RW114 RW116 RW118 RW11A RW11C RW11E RW120 RW122 RW124 RW126 RW128 RW12A RW12C RW12E RW130 RW132 RW134 RW136 RW138 RW13A RW13C RW13E RW140 RW142 RW144 RW146 RW148 RW14A RW14C RW14E RW150 RW152 RW154 RW156 RW158 RW15A RW15C RW15E RW160 RW162 RW164 RW166 RW168 RW16A RW16C RW16E RW170 RW172 RW174 RW176 RW178 RW17A RW17C RW17E RW180 RW182 RW184 RW186 RW188 RW18A RW18C RW18E RW190 RW192 RW194 RW196 RW198 RW19A RW19C RW19E RW1A0 RW1A2 RW1A4 RW1A6 RW1A8 RW1AA RW1AC RW1AE RW1B0 RW1B2 RW1B4 RW1B6 RW1B8 RW1BA RW1BC RW1BE RW1C0 RW1C2 RW1C4 RW1C6 RW1C8 RW1CA RW1CC RW1CE RW1D0 RW1D2 RW1D4 RW1D6 RW1D8 RW1DA RW1DC RW1DE RW1E0 RW1E2 RW1E4 RW1E6 RW1E8 RW1EA RW1EC RW1EE RW1F0 RW1F2 RW1F4 RW1F6 RW1F8 RW1FA RW1FC RW1FE ]; # Double-word registers define RAM offset=0x1c size=4 [ RL1C RL20 RL24 RL28 RL2C RL30 RL34 RL38 RL3C RL40 RL44 RL48 RL4C RL50 RL54 RL58 RL5C RL60 RL64 RL68 RL6C RL70 RL74 RL78 RL7C RL80 RL84 RL88 RL8C RL90 RL94 RL98 RL9C RLA0 RLA4 RLA8 RLAC RLB0 RLB4 RLB8 RLBC RLC0 RLC4 RLC8 RLCC RLD0 RLD4 RLD8 RLDC RLE0 RLE4 RLE8 RLEC RLF0 RLF4 RLF8 RLFC RL100 RL104 RL108 RL10C RL110 RL114 RL118 RL11C RL120 RL124 RL128 RL12C RL130 RL134 RL138 RL13C RL140 RL144 RL148 RL14C RL150 RL154 RL158 RL15C RL160 RL164 RL168 RL16C RL170 RL174 RL178 RL17C RL180 RL184 RL188 RL18C RL190 RL194 RL198 RL19C RL1A0 RL1A4 RL1A8 RL1AC RL1B0 RL1B4 RL1B8 RL1BC RL1C0 RL1C4 RL1C8 RL1CC RL1D0 RL1D4 RL1D8 RL1DC RL1E0 RL1E4 RL1E8 RL1EC RL1F0 RL1F4 RL1F8 RL1FC ]; # Individual status bits within the Program Status Word @define Z "PSW[7,1]" # Zero Flag @define N "PSW[6,1]" # Negative Flag @define V "PSW[5,1]" # Overflow Flag @define VT "PSW[4,1]" # Overflow Trap Flag @define C "PSW[3,1]" # Carry Flag @define PSE "PSW[2,1]" # Peripheral Transaction Server flag @define I "PSW[1,1]" # global Interrupt disable bit @define ST "PSW[0,1]" # STicky bit Flag ################################################################ # Tokens ################################################################ # All instructions have a single-byte opcode except for a # handful of multiplication/division instructions which have # a two-byte op-code with the first byte as 'FE' define token opbyte (8) op8 = (0,7) op6 = (2,7) op5 = (3,7) cond = (0,3) op4 = (4,7) aa = (0,1) bitno = (0,2) highb = (4,7) imm8 = (0,7) signed # immediate simm8 = (0,7) signed # immediate baop = (0,7) # byte register breg8 = (0,7) # byte register dbreg = (0,7) # byte register waop = (0,7) # word register wreg8 = (0,7) # word register dwreg = (0,7) # word register lreg = (0,7) # long/double register dlreg = (0,7) # long/double register imm7 = (1,7) iwreg7 = (1,7) addbit8 = (0,0) ; define token opword (16) imm16 = ( 0, 15 ) disp16 = (0,15) signed op16 = (3,7) jmp11_hi = (0,2) signed #relative offset jmp11_lo = (8,15) ; attach variables [ baop breg8 dbreg ] [ ZRlo ZRhi AD_resultlo AD_resulthi HSI_timelo HSI_timehi HSI_status SBUF INT_MASK INT_PEND TIMER1lo TIMER1hi TIMER2lo TIMER2hi PORT0 PORT1 PORT2 SP_STAT INT_PEND1 INT_MASK1 WSR IOS0 IOS1 IOS2 SPlo SPhi R1A R1B R1C R1D R1E R1F R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 R2A R2B R2C R2D R2E R2F R30 R31 R32 R33 R34 R35 R36 R37 R38 R39 R3A R3B R3C R3D R3E R3F R40 R41 R42 R43 R44 R45 R46 R47 R48 R49 R4A R4B R4C R4D R4E R4F R50 R51 R52 R53 R54 R55 R56 R57 R58 R59 R5A R5B R5C R5D R5E R5F R60 R61 R62 R63 R64 R65 R66 R67 R68 R69 R6A R6B R6C R6D R6E R6F R70 R71 R72 R73 R74 R75 R76 R77 R78 R79 R7A R7B R7C R7D R7E R7F R80 R81 R82 R83 R84 R85 R86 R87 R88 R89 R8A R8B R8C R8D R8E R8F R90 R91 R92 R93 R94 R95 R96 R97 R98 R99 R9A R9B R9C R9D R9E R9F RA0 RA1 RA2 RA3 RA4 RA5 RA6 RA7 RA8 RA9 RAA RAB RAC RAD RAE RAF RB0 RB1 RB2 RB3 RB4 RB5 RB6 RB7 RB8 RB9 RBA RBB RBC RBD RBE RBF RC0 RC1 RC2 RC3 RC4 RC5 RC6 RC7 RC8 RC9 RCA RCB RCC RCD RCE RCF RD0 RD1 RD2 RD3 RD4 RD5 RD6 RD7 RD8 RD9 RDA RDB RDC RDD RDE RDF RE0 RE1 RE2 RE3 RE4 RE5 RE6 RE7 RE8 RE9 REA REB REC RED REE REF RF0 RF1 RF2 RF3 RF4 RF5 RF6 RF7 RF8 RF9 RFA RFB RFC RFD RFE RFF ]; attach variables [ waop wreg8 dwreg ] [ ZR _ AD_result _ HSI_time _ HSI_SBUF _ INTERRUPT _ TIMER1 _ TIMER2 _ PORT01 _ PORT2_SPS _ INT1 _ WSR_IOS0 _ IOS12 _ SP _ RW1A _ RW1C _ RW1E _ RW20 _ RW22 _ RW24 _ RW26 _ RW28 _ RW2A _ RW2C _ RW2E _ RW30 _ RW32 _ RW34 _ RW36 _ RW38 _ RW3A _ RW3C _ RW3E _ RW40 _ RW42 _ RW44 _ RW46 _ RW48 _ RW4A _ RW4C _ RW4E _ RW50 _ RW52 _ RW54 _ RW56 _ RW58 _ RW5A _ RW5C _ RW5E _ RW60 _ RW62 _ RW64 _ RW66 _ RW68 _ RW6A _ RW6C _ RW6E _ RW70 _ RW72 _ RW74 _ RW76 _ RW78 _ RW7A _ RW7C _ RW7E _ RW80 _ RW82 _ RW84 _ RW86 _ RW88 _ RW8A _ RW8C _ RW8E _ RW90 _ RW92 _ RW94 _ RW96 _ RW98 _ RW9A _ RW9C _ RW9E _ RWA0 _ RWA2 _ RWA4 _ RWA6 _ RWA8 _ RWAA _ RWAC _ RWAE _ RWB0 _ RWB2 _ RWB4 _ RWB6 _ RWB8 _ RWBA _ RWBC _ RWBE _ RWC0 _ RWC2 _ RWC4 _ RWC6 _ RWC8 _ RWCA _ RWCC _ RWCE _ RWD0 _ RWD2 _ RWD4 _ RWD6 _ RWD8 _ RWDA _ RWDC _ RWDE _ RWE0 _ RWE2 _ RWE4 _ RWE6 _ RWE8 _ RWEA _ RWEC _ RWEE _ RWF0 _ RWF2 _ RWF4 _ RWF6 _ RWF8 _ RWFA _ RWFC _ RWFE _ ]; attach variables [iwreg7] [ ZR AD_result HSI_time HSI_SBUF INTERRUPT TIMER1 TIMER2 PORT01 PORT2_SPS INT1 WSR_IOS0 IOS12 SP RW1A RW1C RW1E RW20 RW22 RW24 RW26 RW28 RW2A RW2C RW2E RW30 RW32 RW34 RW36 RW38 RW3A RW3C RW3E RW40 RW42 RW44 RW46 RW48 RW4A RW4C RW4E RW50 RW52 RW54 RW56 RW58 RW5A RW5C RW5E RW60 RW62 RW64 RW66 RW68 RW6A RW6C RW6E RW70 RW72 RW74 RW76 RW78 RW7A RW7C RW7E RW80 RW82 RW84 RW86 RW88 RW8A RW8C RW8E RW90 RW92 RW94 RW96 RW98 RW9A RW9C RW9E RWA0 RWA2 RWA4 RWA6 RWA8 RWAA RWAC RWAE RWB0 RWB2 RWB4 RWB6 RWB8 RWBA RWBC RWBE RWC0 RWC2 RWC4 RWC6 RWC8 RWCA RWCC RWCE RWD0 RWD2 RWD4 RWD6 RWD8 RWDA RWDC RWDE RWE0 RWE2 RWE4 RWE6 RWE8 RWEA RWEC RWEE RWF0 RWF2 RWF4 RWF6 RWF8 RWFA RWFC RWFE ]; attach variables [ lreg dlreg ] [ ZR_AD _ _ _ HSI _ _ _ INT_TIMER1 _ _ _ TIMER2_PORT01 _ _ _ PORT2_INT1 _ _ _ WSR_IOS012 _ _ _ SPR1A _ _ _ RL1C _ _ _ RL20 _ _ _ RL24 _ _ _ RL28 _ _ _ RL2C _ _ _ RL30 _ _ _ RL34 _ _ _ RL38 _ _ _ RL3C _ _ _ RL40 _ _ _ RL44 _ _ _ RL48 _ _ _ RL4C _ _ _ RL50 _ _ _ RL54 _ _ _ RL58 _ _ _ RL5C _ _ _ RL60 _ _ _ RL64 _ _ _ RL68 _ _ _ RL6C _ _ _ RL70 _ _ _ RL74 _ _ _ RL78 _ _ _ RL7C _ _ _ RL80 _ _ _ RL84 _ _ _ RL88 _ _ _ RL8C _ _ _ RL90 _ _ _ RL94 _ _ _ RL98 _ _ _ RL9C _ _ _ RLA0 _ _ _ RLA4 _ _ _ RLA8 _ _ _ RLAC _ _ _ RLB0 _ _ _ RLB4 _ _ _ RLB8 _ _ _ RLBC _ _ _ RLC0 _ _ _ RLC4 _ _ _ RLC8 _ _ _ RLCC _ _ _ RLD0 _ _ _ RLD4 _ _ _ RLD8 _ _ _ RLDC _ _ _ RLE0 _ _ _ RLE4 _ _ _ RLE8 _ _ _ RLEC _ _ _ RLF0 _ _ _ RLF4 _ _ _ RLF8 _ _ _ RLFC _ _ _ ]; immed8: imm8 is imm8 { export *[const]:1 imm8; } simmed8: simm8 is simm8 { export *[const]:1 simm8; } immed16: imm16 is imm16 { export *[const]:2 imm16; } baop8: baop is baop & baop=0 { local tmp:1 = 0:1; export tmp; } baop8: baop is baop & baop=1 { local tmp:1 = 0:1; export tmp; } baop8: baop is baop { export baop; } waop16: waop is waop & waop=0 { local tmp:2 = 0:2; export tmp; } waop16: waop is waop { export waop; } iwreg: iwreg7 is iwreg7 & iwreg7=0 { local tmp:2 = 0:2; export tmp; } iwreg: iwreg7 is iwreg7 { export iwreg7; } breg: breg8 is breg8 & breg8=0 { local tmp:1 = 0:1; export tmp; } breg: breg8 is breg8 & breg8=1 { local tmp:1 = 0:1; export tmp; } breg: breg8 is breg8 { export breg8; } wreg: wreg8 is wreg8 & wreg8=0 { local tmp:2 = 0:2; export tmp; } wreg: wreg8 is wreg8 { export wreg8; } # See reference manual pp. 29-30 and note 1 on page 113 # 1-byte operands oper8: baop8 is aa=0x0; baop8 { export baop8; } #direct oper8: "#"immed8 is aa=0x1; immed8 { export immed8; } #immediate oper8: "["iwreg"]" is aa=0x2; iwreg & addbit8=0 { local tmp = iwreg; export *[RAM]:1 tmp; } #indirect oper8: "["iwreg"]+" is aa=0x2; iwreg & addbit8=1 { local tmp = iwreg; iwreg = iwreg + 1; export *[RAM]:1 tmp; } #indirect+ oper8: simmed8"["iwreg"]" is aa=0x3; iwreg & addbit8=0; simmed8 { local tmp = iwreg; tmp = tmp + sext(simmed8); export *[RAM]:1 tmp; } #indexed short oper8: immed16", LOOKUP["iwreg"]" is aa=0x3; iwreg & addbit8=1; immed16 { local tmp = iwreg; tmp = tmp + immed16; export *[RAM]:1 tmp; } #indexed long # 2-byte operands oper16: waop16 is aa=0x0; waop16 { export waop16; } #direct oper16: "#"immed16 is aa=0x1; immed16 { export immed16; } #immediate oper16: "["iwreg"]" is aa=0x2; iwreg & addbit8=0 { local tmp = iwreg; export *[RAM]:2 tmp; } #indirect oper16: "["iwreg"]+" is aa=0x2; iwreg & addbit8=1 { local tmp = iwreg; iwreg = iwreg + 2; export *[RAM]:2 tmp; } #indirect oper16: simmed8"["iwreg"]" is aa=0x3; iwreg & addbit8=0; simmed8 { local tmp = iwreg; tmp = tmp + sext(simmed8); export *[RAM]:2 tmp; } #indexed short oper16: immed16", TABLE["iwreg"]" is aa=0x3 ; iwreg & addbit8=1; immed16 { local tmp = iwreg; tmp = tmp + immed16; export *[RAM]:2 tmp; } #indexed long # range -128 through +127 jmpdest: reloc is simm8 [ reloc = inst_next + simm8; ] { export *:1 reloc; } # range -1023 through 1024 jmpdest11: reloc is jmp11_hi & jmp11_lo [reloc = inst_next + ((jmp11_hi << 8) | jmp11_lo);] { export *:2 reloc; } jmpdest16: reloc is disp16 [reloc = inst_next + disp16;] { export *:2 reloc; } ################################################################ # Pseudo Instructions ################################################################ #define pcodeop blockMove; define pcodeop idlePowerdown; define pcodeop normalize; ################################################################ # Macros ################################################################ macro push(val) { SP = SP - 2; *:2 SP = val; } macro pop(val) { val = *:2 SP; SP = SP + 2; } macro resetFlags() { PSW = 0; } macro resultFlags(res) { $(Z) = (res == 0); $(N) = (res s< 0); $(C) = 0; $(V) = 0; } macro additionFlags(op1, op2, res) { $(Z) = (res == 0); $(N) = (res s< 0); $(C) = carry(op1, op2); $(V) = scarry(op1, op2); $(VT) = $(V) | $(VT); } macro subtractFlags(op1, op2, res) { $(N) = (res s< 0); $(Z) = (res == 0); $(C) = (op1 < op2) == 0; $(V) = sborrow(op1, op2); $(VT) = $(VT) | $(V); } macro stickyFlagW(source, shift) { local mask:2 = -1; mask = (mask >> (16 - shift + 1)) * zext(shift > 1); local result:2 = source & mask; $(ST) = (result != 0); } macro stickyFlagB(source, shift) { local mask:1 = -1; mask = (mask >> (8 - shift + 1)) * (shift > 1); local result:1 = source & mask; $(ST) = (result != 0); } macro stickyFlagL(source, shift) { local mask:4 = -1; mask = (mask >> (32 - shift + 1)) * zext(shift > 1); local result:4 = source & mask; $(ST) = (result != 0); } macro blockMove(ptrs, cntreg) { local tsptr:4 = (ptrs & 0xffff0000) >> 0x10; local srcPtr:2 = tsptr(2); local dstPtr:2 = ptrs(2); local cnt = cntreg; local data:2 = *srcPtr; *dstPtr = data; srcPtr = srcPtr + 2; dstPtr = dstPtr + 2; cnt = cnt - 1; if cnt != 0 goto ; ptrs = zext(dstPtr) | (zext(srcPtr) << 0x10); } macro setShiftLeftCarryFlag(shiftee,amount) { shifting:1 = amount != 0; local tmp = ((shiftee >> (16-amount)) & 1); $(C) = (shifting & tmp:1); } macro setShiftRightCarryFlag(shiftee,amount) { shifting:1 = amount != 0; local tmp = (shiftee >> (amount-1)) & 1; $(C) = (shifting & tmp:1); } macro setSignedShiftRightCarryFlag(shiftee,amount) { shifting:1 = amount != 0; local tmp = (shiftee s>> (amount-1)) & 1; $(C) = (shifting & tmp:1); } ################################################################ # Constructors ################################################################ # 2-op :ADD wreg, oper16 is op6=0x19 ... & oper16; wreg { local tmpD = wreg + oper16; additionFlags(wreg, oper16, tmpD); wreg = tmpD; } # 3 op :ADD dwreg, wreg, oper16 is op6=0x11 ... & oper16; wreg; dwreg { local tmpD = wreg + oper16; dwreg = tmpD; additionFlags(wreg, oper16, tmpD); } # 2-op :ADDB breg, oper8 is op6=0x1d ... & oper8; breg { local tmp = breg + oper8; additionFlags(breg, oper8, tmp); breg = tmp; } :ADDB dbreg, breg, oper8 is op6=0x15 ... & oper8; breg; dbreg { local tmp = breg + oper8; dbreg = tmp; additionFlags(breg, oper8, tmp); } :ADDC wreg, oper16 is op6=0x29 ... & oper16; wreg { local tmp = oper16 + zext($(C)); local res = wreg + tmp; local oldz = $(Z); additionFlags(wreg, tmp, res); $(Z) = oldz & $(Z); # only cleared, not set wreg = res; } :ADDCB breg, oper8 is op6=0x2d ... & oper8; breg { local tmp = oper8 + $(C); local res =breg + tmp; local oldz = $(Z); additionFlags(breg, tmp, res); $(Z) = oldz & $(Z); # only cleared, not set breg = res; } # 2-op :AND wreg, oper16 is op6=0x18 ... & oper16; wreg { wreg = wreg & oper16; resultFlags(wreg); } # 3-op :AND dwreg, wreg, oper16 is op6=0x10 ... & oper16; wreg; dwreg { dwreg = wreg & oper16; resultFlags(dwreg); } :ANDB breg, oper8 is op6=0x1c ... & oper8; breg { breg = breg & oper8; resultFlags(breg); } :ANDB dbreg, breg, oper8 is op6=0x14 ... & oper8; breg; dbreg { dbreg = breg & oper8; resultFlags(dbreg); } :BMOV lreg, wreg is op8=0xc1; wreg ; lreg { blockMove(lreg, wreg); } :BMOVI lreg, wreg is op8=0xcd; wreg; lreg { blockMove(lreg, wreg); } :BR [ wreg ] is op8=0xe3; wreg { goto [wreg]; } :CLR wreg is op8=0x1; wreg { wreg = 0; $(Z) = 1; $(N) = 0; $(C) = 0; $(V) = 0; } :CLRB breg is op8=0x11; breg { breg = 0; $(Z) = 1; $(N) = 0; $(C) = 0; $(V) = 0; } :CLRC is op8=0xf8 { $(C) = 0; } :CLRVT is op8=0xfc { $(VT) = 0; } :CMP wreg, oper16 is op6=0x22 ... & oper16; wreg { op1:2 = oper16; tmp:2 = wreg - op1; subtractFlags(wreg, op1, tmp); } :CMPB breg, oper8 is op6=0x26 ... & oper8; breg { op1:1 = oper8; tmp:1 = breg - op1; subtractFlags(breg, op1, tmp); } @if defined(C196KB) || defined(C196KC) :CMPL dlreg, lreg is op8=0xc5; lreg; dlreg { op1:4 = lreg; tmp:4 = dlreg - op1; $(N) = (tmp s< 0); $(Z) = (tmp == 0); $(C) = scarry(dlreg, op1); $(V) = (((dlreg & ~op1 & ~tmp) | (~dlreg & op1 & tmp)) & 0x80000000 ) != 0; $(VT) = $(VT) | $(V); } @endif :DEC wreg is op8=0x05; wreg { local tmp = wreg - 1; subtractFlags(wreg, 1, tmp); wreg = tmp; } :DECB breg is op8=0x15; breg { local tmp = breg - 1; subtractFlags(breg, 1, tmp); breg = tmp; } :DI is op8=0xfa { $(I) = 0; } :DIV lreg, oper16 is op8=0xfe; op6=0x23 ... & oper16; lreg { local num = lreg; local den = oper16; local div = num s/ sext(den); local rem = num s% sext(den); lreg = zext(div:2 << 16) | rem; @if defined(C196KB) || defined(C196KC) $(V) = ((div s> 0x7fff) | (div s< 0x8001)); @endif $(VT) = $(VT) | $(V); } :DIVB wreg, oper8 is op8=0xfe; op6=0x27 ... & oper8; wreg { local num = wreg; local den = oper8; local div = num s/ sext(den); local rem = num s% sext(den); wreg = zext(div:1 << 8) | rem; @if defined(C196KB) || defined(C196KC) $(V) = ((div s> 0x7f) | (div s< 0x81)); @endif $(VT) = $(VT) | $(V); } :DIVU lreg, oper16 is op6=0x23 ... & oper16; lreg { local num = lreg; local den = oper16; local div = num / zext(den); local rem = num % zext(den); lreg = zext(div:2 << 16) | rem; $(V) = (div > 0xffff); $(VT) = $(VT) | $(V); } :DIVUB wreg, oper8 is op6=0x27 ... & oper8; wreg { local num = wreg; local den = oper8; local div = num / zext(den); local rem = num % zext(den); wreg = zext(div:1 << 8) | rem; $(V) = (div > 0xff); $(VT) = $(VT) | $(V); } :DJNZ breg, jmpdest is op8=0xe0; breg; jmpdest { breg = breg - 1; if (breg != 0) goto jmpdest; } @if defined(C196KB) || defined(C196KC) :DJNZW wreg, jmpdest is op8=0xe1; wreg; jmpdest { wreg = wreg - 1; if (wreg != 0) goto jmpdest; } @endif @if defined(C196KC) :DPTS is op8=0xec { $(PSE) = 0; } @endif :EI is op8=0xfb { $(I) = 1; } @if defined(C196KC) :EPTS is op8=0xed { $(PSE) = 1; } @endif :EXT lreg is op8=0x6; lreg { tmp:4 = lreg & 0xffff; lreg = sext(tmp); resultFlags(lreg); } :EXTB wreg is op8=0x16; wreg { tmp:2 = wreg & 0xff; wreg = sext(tmp); resultFlags(wreg); } :INC wreg is op8=0x7; wreg { local tmp = wreg + 1; additionFlags(wreg, 1, tmp); wreg = tmp; } :INCB breg is op8=0x17; breg { local tmp = breg + 1; additionFlags(breg, 1, tmp); breg = tmp; } @if defined(C196KB) || defined(C196KC) :IDLPD "#"immed8 is op8=0xf6; immed8 { idlePowerdown(); $(Z) = 0; $(N) = 0; $(C) = 0; $(V) = 0; $(VT) = 0; $(ST) = 0; } @endif :JBC breg, bitno, jmpdest is op5=0x6 & bitno; breg; jmpdest { local bit = (breg >> bitno) & 0x1; if (bit == 0) goto jmpdest; } :JBS breg, bitno, jmpdest is op5=0x7 & bitno; breg; jmpdest { local bit = (breg >> bitno) & 0x1; if (bit == 1) goto jmpdest; } cc: "NST" is cond=0 { tmp:1 = ($(ST) == 0); export tmp; } cc: "NH" is cond=1 { tmp:1 = (($(C) == 0) | ($(Z) == 1)); export tmp; } cc: "GT" is cond=2 { tmp:1 = (($(N) == 0) & ($(Z) == 0)); export tmp; } cc: "NC" is cond=3 { tmp:1 = ($(C) == 0); export tmp; } cc: "NVT" is cond=4 { tmp:1 = ($(VT) == 0); $(VT) = 0; export tmp; } cc: "NV" is cond=5 { tmp:1 = ($(V) == 0); export tmp; } cc: "GE" is cond=6 { tmp:1 = ($(N) == 0); export tmp; } cc: "NE" is cond=7 { tmp:1 = ($(Z) == 0); export tmp; } cc: "ST" is cond=8 { tmp:1 = ($(ST) == 1); export tmp; } cc: "H" is cond=9 { tmp:1 = (($(C) == 1) & ($(Z) == 0)); export tmp; } cc: "LE" is cond=10 { tmp:1 = (($(N) == 1) | ($(Z) == 1)); export tmp; } cc: "C" is cond=11 { tmp:1 = ($(C) == 1); export tmp; } cc: "VT" is cond=12 { tmp:1 = ($(VT) == 1); $(VT) = 0; export tmp; } cc: "V" is cond=13 { tmp:1 = ($(V) == 1); export tmp; } cc: "LT" is cond=14 { tmp:1 = ($(N) == 1); export tmp; } cc: "E" is cond=15 { tmp:1 = ($(Z) == 1); export tmp; } :J^cc jmpdest is op4=0xd & cc; jmpdest {if (cc) goto jmpdest;} :LCALL jmpdest16 is op8=0xef; jmpdest16 { ret:2 = inst_next; push(ret); call jmpdest16; } :LD wreg, oper16 is op6=0x28 ... & oper16; wreg { wreg = oper16; } :LDB breg, oper8 is op6=0x2c ... & oper8; breg { breg = oper8; } :LDBSE wreg, oper8 is op6=0x2f ... & oper8; wreg { wreg = sext(oper8); } :LDBZE wreg, oper8 is op6=0x2b ... & oper8; wreg { wreg = zext(oper8); } :LJMP jmpdest16 is op8=0xe7; jmpdest16 { goto jmpdest16; } # 2-op :MUL lreg, oper16 is op8=0xfe; op6=0x1b ... & oper16; lreg { tmpD:4 = sext(lreg:2); tmpS:4 = sext(oper16); tmpD = tmpD * tmpS; lreg = tmpD; } # 3-op :MUL lreg, wreg, oper16 is op8=0xfe; op6=0x13 ... & oper16; wreg; lreg { tmpD:4 = sext(wreg); tmpS:4 = sext(oper16); tmpD = tmpD * tmpS; lreg = tmpD; } # 2-op :MULB wreg, oper8 is op8=0xfe; op6=0x1f ... & oper8; wreg { tmpD:2 = sext(wreg:1); tmpS:2 = sext(oper8); tmpD = tmpD * tmpS; wreg = tmpD; } # 3-op :MULB wreg, breg, oper8 is op8=0xfe; op6=0x17 ... & oper8; breg; wreg { tmpD:2 = sext(breg); tmpS:2 = sext(oper8); tmpD = tmpD * tmpS; wreg = tmpD; } # 2-op :MULU lreg, oper16 is op6=0x1b ... & oper16; lreg { tmpD:4 = zext(lreg:2); tmpS:4 = zext(oper16); tmpD = tmpD * tmpS; lreg = tmpD; } # 3-op :MULU lreg, wreg, oper16 is op6=0x13 ... & oper16; wreg; lreg { tmpD:4 = zext(wreg); tmpS:4 = zext(oper16); tmpD = tmpD * tmpS; lreg = tmpD; } # 2-op :MULUB wreg, oper8 is op6=0x1f ... & oper8; wreg { tmpD:2 = zext(wreg:1); tmpS:2 = zext(oper8); tmpD = tmpD * tmpS; wreg = tmpD; } # 3-op :MULUB wreg, breg, oper8 is op6=0x17 ... & oper8; breg; wreg { local tmpD:2 = zext(breg); local tmpS:2 = zext(oper8); tmpD = tmpD * tmpS; wreg = tmpD; } :NEG wreg is op8=0x03; wreg { local tmp:2 = -wreg; local zero:2 = 0; subtractFlags(zero, wreg, tmp); wreg = tmp; } :NEGB breg is op8=0x13; breg { local tmp:1 = -breg; local zero:1 = 0; subtractFlags(zero, breg, tmp); breg = tmp; } :NOP is op8=0xfd { } #NOP :NORML lreg, breg is op8=0xf; breg; lreg { #The LONG-INTEGER operand is normalized; i.e., it is shifted to the left until its #most significant bit is 1. If the most significant bit is still 0 after 31 shifts, the #process stops and the zero flag is set. The number of shifts actually performed #is stored in the second operand. normalize(lreg, breg); $(Z) = (lreg == 0); $(C) = 0; # $(N) is undefined } :NOT wreg is op8 = 0x2; wreg { wreg = ~wreg; resultFlags(wreg); } :NOTB breg is op8=0x12; breg { breg = ~breg; resultFlags(breg); } :OR wreg, oper16 is op6=0x20 ... & oper16; wreg { tmpD:2 = wreg; tmpS:2 = oper16; tmpD = tmpD | tmpS; wreg = tmpD; resultFlags(wreg); } :ORB breg, oper8 is op6=0x24 ... & oper8; breg { tmpD:1 = breg; tmpS:1 = oper8; tmpD = tmpD | tmpS; breg = tmpD; resultFlags(breg); } :POP oper16 is op6=0x33 ... & oper16 { local result:2 = 0; pop(result); oper16 = result; } @if defined(C196KB) || defined(C196KC) :POPA is op8=0xf5 { local result:2 = 0; pop(result); WSR = result:1 & 0xff; local resultHi = result >> 8; INT_MASK1 = resultHi:1; pop(result); INT_MASK = result:1 & 0xff; resultHi = result >> 8; PSW = resultHi:1; } @endif :POPF is op8=0xf3 { local result:2 = 0; pop(result); PSW = zext(result:1); local resultHi = result >> 8; INT_MASK = resultHi:1; } :PUSH oper16 is op6=0x32 ... & oper16 { val:2 = oper16; push(val); } @if defined(C196KB) || defined(C196KC) :PUSHA is op8=0xf4 { local val:2 = (zext(INT_MASK) << 8) | zext(WSR); push(val); val = (zext(INT_MASK) << 8) | zext(PSW); push(val); resetFlags(); } @endif :PUSHF is op8=0xf2 { val:2 = (zext(INT_MASK) << 8) | zext(PSW); push(val); resetFlags(); } :RET is op8=0xf0 { local retDest:2 = 0; pop(retDest); return [retDest]; } :RST is op8=0xff { resetFlags(); PC = 0x2080; goto [PC]; } :SCALL jmpdest11 is op16=0x5 & jmpdest11 { ret:2 = inst_next; push(ret); call jmpdest11; } :SETC is op8=0xf9 { $(C) = 1; } :SHL wreg, "#"immed8 is op8=0x09; immed8 & (highb = 0x0); wreg { local source = wreg; local shift = immed8; setShiftLeftCarryFlag(source, shift); local res = source << shift; $(Z) = (res == 0); $(V) = 0; $(VT) = $(VT) | $(V); wreg = res; } :SHL wreg, breg is op8=0x09; breg & (highb != 0x0); wreg { local source = wreg; local shift = breg; setShiftLeftCarryFlag(source, shift); local res = source << shift; $(Z) = (res == 0); $(V) = 0; $(VT) = $(VT) | $(V); wreg = res; } :SHLB dbreg, "#"immed8 is op8=0x19; immed8 & (highb = 0x0); dbreg { local source = dbreg; local shift = immed8; setShiftLeftCarryFlag(source, shift); local res = source << shift; $(Z) = (res == 0); $(V) = 0; $(VT) = $(VT) | $(V); dbreg = res; } :SHLB dbreg, breg is op8=0x19; breg & (highb != 0x0); dbreg { local source = dbreg; local shift = breg; setShiftLeftCarryFlag(source, shift); local res = source << shift; $(Z) = (res == 0); $(V) = 0; $(VT) = $(VT) | $(V); dbreg = res; } :SHLL lreg, "#"immed8 is op8=0x0d; immed8 & (highb = 0x0); lreg { local source = lreg; local shift = immed8; setShiftLeftCarryFlag(source, shift); local res = source << shift; $(Z) = (res == 0); $(V) = 0; $(VT) = $(VT) | $(V); lreg = res; } :SHLL lreg, breg is op8=0x0d; breg & (highb != 0x0); lreg { local source = lreg; local shift = breg; setShiftLeftCarryFlag(source, shift); local res = source << shift; $(Z) = (res == 0); $(V) = 0; $(VT) = $(VT) | $(V); lreg = res; } :SHR wreg, "#"immed8 is op8=0x08; immed8 & (highb = 0x0); wreg { local source = wreg; local shift = immed8; $(ST) = 0; setShiftRightCarryFlag(source, shift); local res = source >> shift; $(Z) = (res == 0); $(N) = 0; $(V) = 0; wreg = res; stickyFlagW(source, shift); } :SHR wreg, breg is op8=0x08; breg & (highb != 0x0); wreg { local source = wreg; local shift = breg; setShiftRightCarryFlag(source, shift); local res = source >> shift; $(Z) = (res == 0); $(N) = 0; $(V) = 0; wreg = res; stickyFlagW(source, shift); } :SHRA wreg, "#"immed8 is op8=0x0a; immed8 & (highb = 0x0); wreg { local source = wreg; local shift = immed8; setSignedShiftRightCarryFlag(source, shift); local res = source s>> shift; $(Z) = (res == 0); $(N) = (res s< 0); $(V) = 0; wreg = res; stickyFlagW(source, shift); } :SHRA wreg, breg is op8=0x0a; breg & (highb != 0x0); wreg { local source = wreg; local shift = breg; setSignedShiftRightCarryFlag(source, shift); local res = source s>> shift; $(Z) = (res == 0); $(N) = (res s< 0); $(V) = 0; wreg = res; stickyFlagW(source, shift); } :SHRAB dbreg, "#"immed8 is op8=0x1a; immed8 & (highb = 0x0); dbreg { local source = dbreg; local shift = immed8; setSignedShiftRightCarryFlag(source, shift); local res = source s>> shift; $(Z) = (res == 0); $(N) = (res s< 0); $(V) = 0; dbreg = res; stickyFlagB(source, shift); } :SHRAB dbreg, breg is op8=0x1a; breg & (highb != 0x0); dbreg { local source = dbreg; local shift = breg; setSignedShiftRightCarryFlag(source, shift); local res = source s>> shift; $(Z) = (res == 0); $(N) = (res s< 0); $(V) = 0; dbreg = res; stickyFlagB(source, shift); } :SHRAL lreg, "#"immed8 is op8=0x0e; immed8 & (highb = 0x0); lreg { local source = lreg; local shift = immed8; setSignedShiftRightCarryFlag(source, shift); local res = source s>> shift; $(Z) = (res == 0); $(N) = (res s< 0); $(V) = 0; lreg = res; stickyFlagL(source, shift); } :SHRAL lreg, breg is op8=0x0e; breg & (highb != 0x0); lreg { local source = lreg; local shift = breg; setSignedShiftRightCarryFlag(source, shift); local res = source s>> shift; $(Z) = (res == 0); $(N) = (res s< 0); $(V) = 0; lreg = res; stickyFlagL(source, shift); } :SHRB dbreg, "#"immed8 is op8=0x18; immed8 & (highb = 0x0); dbreg { local source = dbreg; local shift = immed8; setShiftRightCarryFlag(source, shift); local res = source >> shift; $(Z) = (res == 0); $(N) = 0; $(V) = 0; dbreg = res; stickyFlagB(source, shift); } :SHRB dbreg, breg is op8=0x18; breg & (highb != 0x0); dbreg { local source = dbreg; local shift = breg; setShiftRightCarryFlag(source, shift); local res = source >> shift; $(Z) = (res == 0); $(N) = 0; $(V) = 0; dbreg = res; stickyFlagB(source, shift); } :SHRL lreg, "#"immed8 is op8=0x0c; immed8 & (highb = 0x0); lreg { local source = lreg; local shift = immed8; setShiftRightCarryFlag(source, shift); local res = source >> shift; $(Z) = (res == 0); $(N) = 0; $(V) = 0; lreg = res; stickyFlagL(source, shift); } :SHRL lreg, breg is op8=0x0c; breg & (highb != 0x0); lreg { local source = lreg; local shift = breg; setShiftRightCarryFlag(source, shift); local res = source >> shift; $(Z) = (res == 0); $(N) = 0; $(V) = 0; lreg = res; stickyFlagL(source, shift); } :SJMP jmpdest11 is op16=0x4 & jmpdest11 { goto jmpdest11; } :SKIP is op8=0x00 { local tmp:1 = 0; tmp = tmp; # avoid warning } #2-byte NOP :ST wreg, oper16 is op6=0x30 ... & oper16; wreg { oper16 = wreg; } :STB breg, oper8 is op6=0x31 ... & oper8; breg { oper8 = breg; } # 2-op :SUB wreg, oper16 is op6=0x1a ... & oper16; wreg { local tmp = wreg - oper16; subtractFlags(wreg, oper16, tmp); wreg = tmp; } # 3 op :SUB dwreg, wreg, oper16 is op6=0x12 ... & oper16; wreg; dwreg { dwreg = wreg - oper16; subtractFlags(wreg, oper16, dwreg); } # 2-op :SUBB breg, oper8 is op6=0x1e ... & oper8; breg { local tmp = breg - oper8; subtractFlags(breg, oper8, tmp); } :SUBB dbreg, breg, oper8 is op6=0x16 ... & oper8; breg; dbreg { dbreg = breg - oper8; subtractFlags(breg, oper8, dbreg); } :SUBC wreg, oper16 is op6=0x2a ... & oper16; wreg { local tmp = wreg - oper16 - zext(1 - $(C)); local oldz = $(Z); subtractFlags(wreg, oper16-zext(1-$(C)), tmp); wreg = tmp; $(Z) = oldz & $(Z); } :SUBCB breg, oper8 is op6=0x2e ... & oper8; breg { local tmp = breg - oper8 - zext(1 - $(C)); local oldz = $(Z); subtractFlags(breg, oper8-zext(1-$(C)), tmp); breg = tmp; $(Z) = oldz & $(Z); } @if defined(C196KC) :TIJMP dwreg, "["wreg"]" "#"immed8 is op8=0xe2; wreg; immed8; dwreg { local index = *wreg; local jmpOffset = zext(index & immed8); local tBase = dwreg; local destX = (jmpOffset << 1) + tBase; local jmpDest = *:2 destX; goto [jmpDest]; } @endif :TRAP is op8=0xf7 { ret:2 = inst_next; push(ret); trapv:2 = 0x2010; PC = *trapv; goto [PC]; } :XOR wreg, oper16 is op6=0x21 ... & oper16; wreg { tmpD:2 = wreg; tmpS:2 = oper16; tmpD = tmpD ^ tmpS; wreg = tmpD; resultFlags(wreg); } @if defined(C196KC) :XCH wreg, waop16 is op8=0x04; waop16; wreg { local tmp = wreg; wreg = waop16; waop16 = tmp; } :XCH wreg, immed8", TABLE["iwreg"]" is op8=0x0b; iwreg & addbit8=0; immed8; wreg { local tmp = iwreg; tmp = tmp + sext(immed8); local val = *[RAM]:2 tmp; local wreg_tmp = wreg; wreg = val; *[RAM]:2 tmp = wreg_tmp; } :XCH wreg, immed16", TABLE["iwreg"]" is op8=0x0b; iwreg & addbit8=1; immed16; wreg { local tmp = iwreg; tmp = tmp + immed16; local val = *[RAM]:2 tmp; local wreg_tmp = wreg; wreg = val; *[RAM]:2 tmp = wreg_tmp; } @endif @if defined(C196KC) :XCHB breg, baop8 is op8=0x14 ; baop8; breg { local tmp = breg; breg = baop8; baop8 = tmp; } :XCHB breg, immed8", TABLE["iwreg"]" is op8=0x1b ; iwreg & addbit8=0; immed8; breg { local tmp = iwreg; tmp = tmp + sext(immed8); local val = *[RAM]:1 tmp; local wreg_tmp = breg; breg = val; *[RAM]:1 tmp = wreg_tmp; } :XCHB breg, immed16", TABLE["iwreg"]" is op8=0x1b ; iwreg & addbit8=1; immed16; breg { local tmp = iwreg; tmp = tmp + sext(immed16); local val = *[RAM]:1 tmp; local wreg_tmp = breg; breg = val; *[RAM]:1 tmp = wreg_tmp; } @endif :XORB breg, oper8 is op6=0x25 ... & oper8; breg { tmpD:1 = breg; tmpS:1 = oper8; tmpD = tmpD ^ tmpS; breg = tmpD; resultFlags(breg); } ================================================ FILE: pypcode/processors/MCS96/data/languages/MCS96.slaspec ================================================ @define C196KB "1" @define C196KC "1" @include "MCS96.sinc" ================================================ FILE: pypcode/processors/MCS96/data/manuals/MCS96.idx ================================================ @1991_Intel_16-Bit_Embedded_Controller_Handbook.pdf[1991 Intel 16-Bit Embedded Controller Handbook] ADD, 175 ADDB, 176 ADDC, 177 ADDCB, 177 AND, 178 ANDB, 179 BMOV, 180 BMOVI, 181 BR, 182 CLR, 182 CLRB, 183 CLRC, 183 CLRVT, 184 CMP, 184 CMPB, 185 CMPL, 185 DEC, 186 DECB, 186 DI, 186 DIV, 187 DIVB 187 DIVU, 188 DIVUB, 188 DJNZ, 189 DJNZWM 189 DPTS, 190 EI, 190 EPTS, 191 EXT, 191 EXTB, 192 INC, 192 INCB, 193 IDLPD, 193 JBC, 194 JBS, 195 JC, 195 JE, 196 JGE, 196 JGT, 197 JH, 197 JLE, 198 JLT, 198 JNC, 199 JNE, 199 JNH, 200 JNST, 200 JNV, 201 JNVT, 201 JST, 202 JV, 202 JVT, 203 LCALL, 203 LD, 204 LDB, 204 LDBSE, 205 LDBZE, 205 LJMP, 206 MUL, 206 MULB, 207 MULU, 208 MULUB, 209 NEG, 210 NEGB, 211 NOP, 211 NORML, 212 NOT, 212 NOTB, 213 OR, 213 ORB, 214 POP, 214 POPA, 215 POPF, 215 PUSH, 216 PUSHA, 216 PUSHF, 217 RET, 217 RST, 218 SCALL, 218 SETC, 219 SHL, 219 SHLB, 220 SHLL, 221 SHR, 222 SHRA, 223 SHRAB, 224 SHRAL, 225 SHRB, 226 SHRL, 227 SJMP, 228 SKIP, 228 ST, 229 STB, 229 SUB, 230 SUBB, 231 SUBC, 232 SUBCB, 232 TIJMP, 233 TRAP, 234 XOR, 234 XCH, 235 XCHB, 235 XORB, 236 ================================================ FILE: pypcode/processors/MIPS/data/languages/MIPS.opinion ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips.dwarf ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips.ldefs ================================================ MIPS32 32-bit addresses, big endian, with mips16e MIPS32 32-bit addresses, little endian, with mips16e MIPS32 Release-6 32-bit addresses, big endian, with microMIPS MIPS32 Release-6 32-bit addresses, little endian, with microMIPS MIPS64 64-bit addresses, big endian, with mips16e MIPS64 64-bit addreses, little endian, with mips16e MIPS64 64-bit addresses, big endian, with microMIPS MIPS64 64-bit addresses, little endian, with microMIPS MIPS64 Release-6 64-bit addresses, big endian, with microMIPS MIPS64 Release-6 64-bit addresses, little endian, with microMIPS MIPS64 32-bit addresses, big endian, with mips16e MIPS64 32-bit addresses, little endian, with mips16e MIPS64 32-bit addresses, little endian, with microMIPS MIPS64 32-bit addresses, big endian, with microMIPS MIPS64 Release-6 big endian with 32 bit addressing and microMIPS MIPS64 Release-6 with 32-bit addresses, little endian, with microMIPS MIPS32 32-bit addresses, big endian, with microMIPS MIPS32 32-bit addresses, little endian, with microMIPS ================================================ FILE: pypcode/processors/MIPS/data/languages/mips.sinc ================================================ # MIPS Common specification file for 32 and 64-bit processors # Appropriate defines (MIPS32, MIPS64, MIPS64_32ADDRS) must be # specified before including this file # The following Coprocessor configuration is supported: # COP0: integrated CPU virtual memory and exception handler # COP1: Floating-point Unit (FPU) # COP2: # COP3: Used for MIPS64 FPU extended instructions (see COP1X) # # ################################ # # Notes for the elf header e_flags "secondary" field in the MIPS.opinion file. # # "-" indicates don't care # # 0x5------- MIPS32 Release 1, 32-bit addresses, ABI is o32, FREGSIZE = 4, gcc -mips32 # Example: 0x50001001, secondary="1342181377" # # 0x6-----2- MIPS64 Release 1, 32-bit addresses, ABI is n32, FREGSIZE = 8, gcc -mips64 # # 0x6-----0- MIPS64 Release 1, 64-bit addresses, ABI is n64, FREGSIZE = 8, gcc -mips64 -mabi-64 # # 0x7------- MIPS32 Release 2, ABI is o32, gcc -mips32r2 to r5 # if 0x00000200 is set then FREGSIZE = 8 else FREGSIZE = 4 # 0x70001001, secondary="1879052289" # with mips16: 0x74001001 # # 0x8-----2- MIPS64 Release 2, 32-bit addresses, ABI is n32, FREGSIZE = 8, gcc -mips64r2 to r5 # 0x80000021, secondary="2147483681" # # 0x8-----0- MIPS64 Release 2, 64-bit addresses, ABI is n64, gcc -mips64r2 to r5 -mabi-64 # 0x80000001 # # 0x9------- MIPS32 Release 6, ABI is o32, gcc -mips32r6 # if 0x00000200 is set then FREGSIZE = 8 else FREGSIZE = 4 # # 0xa-----2- MIPS64 Release 6, 32-bit addresses, ABI is n32, FREGSIZE = 8, gcc -mips64r6 # 0xa0000421 # # 0xa-----0- MIPS64 Release 6, 64-bit addresses, ABI is n64, FREGSIZE = 8, gcc -mips64r6 -mabi-64 # # Masks: # # 0x04000000 MIPS-16 # # 0x02000000 MicroMIPS # # ################################ # # Notes about register names and function args: # # Function args are passed in a0 - a3, which are the same as $4 - $7, other args are on the stack # Floating point args are in f12 and f14 # # Function return values are stored in v0 (and v1 if the regs are 32-bit and return type is 64-bits) # v0 and v1 are the same as general purpose regs $2 and $3 # Floating point return values are in f0 (and f1 if needed for binding) # # $29 is the stack pointer sp # $30 is the frame pointer fp also called s8 # $31 is the return address ra # # ################################ # There is now support for single and double floating point instructions, with the following limitations: # # The PS paired single fmt1 format is not implemented. In paired single instructions, # the specified 64-bit floating point register operands are each considered as two separate # 32-bit floating points numbers, and they are processed in parallel with the same instuction. # (This is supposed to be the first microprocessor implementation of SIMD.) # # Only COP1 Floating point coprocessor unit 1 is supported (there is no support for unit 2) # # Some notes about MIPS Floating Point, derived from the FPU table on page 87 of the MIPS # Architecture Volume 1, 2014 # # FPU configuration is stored in a 32-bit read-only Floating Point Implementation Register (FIR), # this is also known as CP1 Control Register 0. # Bit 22, called F64 or also called the FR bit, is = 1 when you have 64-bit FPRs, # else if the FR bit = 0 then you have 32-bit FPRs # # The macro FREGSIZE = 4 for 32-bit FPRs, and = 8 for 64-bit FPRs # # Other info is in the FCSR register, CP1 Register 31. # Bit 23 is one condition code, and bits 25-31 is the FCC with other condition codes, # which are set after a compare. Note this FCRE register is removed in Release 6. # # MIPS32 Release 1: # The FPU (floating point unit) has 32 32-bit FPRs # 64-bit floating point doubles are stored in even-odd pairs of FPRs # F64 = 0 # # MIPS32 Release 2 and later: # FPU has 32 64-bit FPRs # F64 = 1 # Use these gcc options to get 64-bit floating point instructions: -mips32r2 -mfp64 # (gcc default is 32-bit FPRs) # # MIPS64 Release 1 - 5 # The FPU has 32 64-bit FPRs # F64 = 1 # # MIPS32 Release 6: # In "strictly 32-bit" mode there are 32 32-bit FPRs, and bonding of two 32-bit FPRs to support # 64-bit floating point is not allowed # F64 = 0 # # MIPS64 Release 6: # FPU has 32 64-bit FPRs, when F64 = 1 # In "strictly 32-bit" mode there are 32 32-bit FPRs, and bonding of two 32-bit FPRs is not allowed # F64 = 0 # # Release 6: # Floating point condition codes are removed, and replaced by a new CMP.condn.fmt instruction # The PS paired-single format in floating point instructions is removed - this was when # two 32-bit floats were stuffed into one 64-bit FPR, and supported SIMD. # # In function calls, floating point args are passed in FPRs f12 to f15, and the return value is in f0 # # When you have 32-bit FPRs, then 64-bit double floats are created by bonding a pair of 32-bit FPRs. # f0 is the low half or has the LSB or also called lower word of the double, and # f1 is the high half or has the MSB or also called upper word of the double # # When you have a 64-bit longword integer: # For function return longword values: # $2 (same as v0) holds the MSB top half of the longword # $3 (same as v1) holds the LSB bottom half of the longword # # Note that when FREGSIZE = 8 (ie, 64-bit FPRs) and you have a single 32-bit float, then the # 32-bit float data is stored in bits 0-31 of the FPR. (Bits 32-63 are "unused".) # # Note that when conversions are done to/from floats, doubles, ints, and longs (64-bit) that rounding # errors can occur in the simulator, so equality comparisons should be done carefully. # # ################################ # #----- @ifdef MIPS64 @ifdef MIPS64_32ADDRS # used for 64-bit mips restricted to 32-bit addresses @define REGSIZE "8" # General purpose register size (8 or 4) @define FREGSIZE "8" # Floating point register size (8 or 4) @define ADDRSIZE "4" # Memory address size (8 or 4, virtual and physical) @define DREGSIZE "16" # 2x REGSIZE used for accumulators @define SIZETO4 "4" # In 32-bit mode, no truncation needed @define ADDRCAST ":4" # need to down cast to pointer size @define NEEDCAST "1" @else # full 64 bit ptrs @define REGSIZE "8" # General purpose register size (8 or 4) @define FREGSIZE "8" # Floating point register size (8 or 4) @define ADDRSIZE "8" # Memory address size (8 or 4, virtual and physical) @define DREGSIZE "16" # 2x REGSIZE used for accumulators @define SIZETO4 "4" # In 64-bit mode, use when need to do 32-bit operation @define ADDRCAST "" # no need to down cast to pointer size @endif #MIPS64_32ADDRS @else # MIPS32 @define REGSIZE "4" # General purpose register size (8 or 4) # FREGSIZE for mips32 is set in slaspec file @define ADDRSIZE "4" # Memory address size (8 or 4, virtual and physical) @define DOUBLE "8" # 2x REGSIZE used for accumulators @define DREGSIZE "8" @define SIZETO4 "4" @define ADDRCAST "" # no need to down cast to pointer size @endif #----- define endian=$(ENDIAN); define alignment=2; define space ram type=ram_space size=$(ADDRSIZE) default; define space register type=register_space size=4; # General purpose registers define register offset=0 size=$(REGSIZE) [ zero at v0 v1 a0 a1 a2 a3 t0 t1 t2 t3 t4 t5 t6 t7 s0 s1 s2 s3 s4 s5 s6 s7 t8 t9 k0 k1 gp sp s8 ra pc ]; @ifdef MIPS64 # We need the 32-bit pieces of the main registers for the 32-bit instructions @if ENDIAN == "big" define register offset=0 size=4 [ zero_hi zero_lo at_hi at_lo v0_hi v0_lo v1_hi v1_lo a0_hi a0_lo a1_hi a1_lo a2_hi a2_lo a3_hi a3_lo t0_hi t0_lo t1_hi t1_lo t2_hi t2_lo t3_hi t3_lo t4_hi t4_lo t5_hi t5_lo t6_hi t6_lo t7_hi t7_lo s0_hi s0_lo s1_hi s1_lo s2_hi s2_lo s3_hi s3_lo s4_hi s4_lo s5_hi s5_lo s6_hi s6_lo s7_hi s7_lo t8_hi t8_lo t9_hi t9_lo k0_hi k0_lo k1_hi k1_lo gp_hi gp_lo sp_hi sp_lo s8_hi s8_lo ra_hi ra_lo pc_hi pc_lo ]; @else define register offset=0 size=4 [ zero_lo zero_hi at_lo at_hi v0_lo v0_hi v1_lo v1_hi a0_lo a0_hi a1_lo a1_hi a2_lo a2_hi a3_lo a3_hi t0_lo t0_hi t1_lo t1_hi t2_lo t2_hi t3_lo t3_hi t4_lo t4_hi t5_lo t5_hi t6_lo t6_hi t7_lo t7_hi s0_lo s0_hi s1_lo s1_hi s2_lo s2_hi s3_lo s3_hi s4_lo s4_hi s5_lo s5_hi s6_lo s6_hi s7_lo s7_hi t8_lo t8_hi t9_lo t9_hi k0_lo k0_hi k1_lo k1_hi gp_lo gp_hi sp_lo sp_hi s8_lo s8_hi ra_lo ra_hi pc_lo pc_hi ]; @endif # ENDIAN @endif # MIPS64 # Floating point registers @if FREGSIZE == "4" @if ENDIAN == "big" # For 64-bit Double floating point operands need to bond two 32-bit FPRs define register offset=0x1000 size=4 [ f1 f0 f3 f2 f5 f4 f7 f6 f9 f8 f11 f10 f13 f12 f15 f14 f17 f16 f19 f18 f21 f20 f23 f22 f25 f24 f27 f26 f29 f28 f31 f30 ]; @else define register offset=0x1000 size=4 [ f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18 f19 f20 f21 f22 f23 f24 f25 f26 f27 f28 f29 f30 f31 ]; @endif # ENDIAN # Note ftD, fsD, and fdD and frD have been added to support 64-bit double floats define register offset=0x1000 size=8 [ f0_1 f2_3 f4_5 f6_7 f8_9 f10_11 f12_13 f14_15 f16_17 f18_19 f20_21 f22_23 f24_25 f26_27 f28_29 f30_31 ]; @else # FREGSIZE == "8" define register offset=0x1000 size=8 [ f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18 f19 f20 f21 f22 f23 f24 f25 f26 f27 f28 f29 f30 f31 ]; @endif # FREGSIZE # Floating point control registers (common to both MIPS32 and MIPS64) define register offset=0x1200 size=4 [ fir fccr fexr fenr fcsr ]; # COP-0 control registers, sel=0 define register offset=0x2000 size=$(REGSIZE) [ Index Random EntryLo0 EntryLo1 Context PageMask Wired HWREna BadVAddr Count EntryHi Compare Status Cause EPC PRId Config LLAddr WatchLo WatchHi XContext cop0_reg21 cop0_reg22 Debug DEPC PerfCnt ErrCtl CacheErr TagLo TagHi ErrorEPC DESAVE ]; # COP-0 control registers, sel=1 define register offset=0x2100 size=$(REGSIZE) [ MVPControl VPEControl TCStatus cop0_reg3.1 ContextConfig PageGrain SRSConf0 cop0_reg7.1 cop0_reg8.1 cop0_reg9.1 cop0_reg10.1 cop0_reg11.1 IntCtl cop0_reg13.1 cop0_reg14.1 EBase Config1 cop0_reg17.1 WatchLo.1 WatchHi.1 cop0_reg20.1 cop0_reg21.1 cop0_reg22.1 TraceControl cop0_reg24.1 PerfCnt.1 cop0_reg26.1 CacheErr.1 DataLo.1 DataHi.1 cop0_reg30.1 cop0_reg31.1 ]; # COP-0 control registers, sel=2 define register offset=0x2200 size=$(REGSIZE) [ MVPConf0 VPEConf0 TCBind cop0_reg3.2 cop0_reg4.2 cop0_reg5.2 SRSConf1 cop0_reg7.2 cop0_reg8.2 cop0_reg9.2 cop0_reg10.2 cop0_reg11.2 SRSCtl cop0_reg13.2 cop0_reg14.2 cop0_reg15.2 Config2 cop0_reg17.2 WatchLo.2 WatchHi.2 cop0_reg20.2 cop0_reg21.2 cop0_reg22.2 TraceControl2 cop0_reg24.2 PerfCnt.2 cop0_reg26.2 CacheErr.2 TagLo.2 TagHi.2 cop0_reg30.2 cop0_reg31.2 ]; # COP-0 control registers, sel=3 define register offset=0x2300 size=$(REGSIZE) [ MVPConf1 VPEConf1 TCRestart cop0_reg3.3 cop0_reg4.3 cop0_reg5.3 SRSConf2 cop0_reg7.3 cop0_reg8.3 cop0_reg9.3 cop0_reg10.3 cop0_reg11.3 SRSMap cop0_reg13.3 cop0_reg14.3 cop0_reg15.3 Config3 cop0_reg17.3 WatchLo.3 WatchHi.3 cop0_reg20.3 cop0_reg21.3 cop0_reg22.3 UserTraceData cop0_reg24.3 PerfCnt.3 cop0_reg26.3 CacheErr.3 DataLo.3 DataHi.3 cop0_reg30.3 cop0_reg31.3 ]; # COP-0 control registers, sel=4 define register offset=0x2400 size=$(REGSIZE) [ cop0_reg0.4 YQMask TCHalt cop0_reg3.4 cop0_reg4.4 cop0_reg5.4 SRSConf3 cop0_reg7.4 cop0_reg8.4 cop0_reg9.4 cop0_reg10.4 cop0_reg11.4 cop0_reg12.4 cop0_reg13.4 cop0_reg14.4 cop0_reg15.4 cop0_reg16.4 cop0_reg17.4 WatchLo.4 WatchHi.4 cop0_reg20.4 cop0_reg21.4 cop0_reg22.4 TraceBPC cop0_reg24.4 PerfCnt.4 cop0_reg26.4 CacheErr.4 TagLo.4 TagHi.4 cop0_reg30.4 cop0_reg31.4 ]; # COP-0 control registers, sel=5 define register offset=0x2500 size=$(REGSIZE) [ cop0_reg0.5 VPESchedule TCContext cop0_reg3.5 cop0_reg4.5 cop0_reg5.5 SRSConf4 cop0_reg7.5 cop0_reg8.5 cop0_reg9.5 cop0_reg10.5 cop0_reg11.5 cop0_reg12.5 cop0_reg13.5 cop0_reg14.5 cop0_reg15.5 cop0_reg16.5 cop0_reg17.5 WatchLo.5 WatchHi.5 cop0_reg20.5 cop0_reg21.5 cop0_reg22.5 cop0_reg23.5 cop0_reg24.5 PerfCnt.5 cop0_reg26.5 CacheErr.5 DataLo.5 DataHi.5 cop0_reg30.5 cop0_reg31.5 ]; # COP-0 control registers, sel=6 define register offset=0x2600 size=$(REGSIZE) [ cop0_reg0.6 VPEScheFBack TCSchedule cop0_reg3.6 cop0_reg4.6 cop0_reg5.6 cop0_reg6.6 cop0_reg7.6 cop0_reg8.6 cop0_reg9.6 cop0_reg10.6 cop0_reg11.6 cop0_reg12.6 cop0_reg13.6 cop0_reg14.6 cop0_reg15.6 cop0_reg16.6 cop0_reg17.6 WatchLo.6 WatchHi.6 cop0_reg20.6 cop0_reg21.6 cop0_reg22.6 cop0_reg23.6 cop0_reg24.6 PerfCnt.6 cop0_reg26.6 CacheErr.6 TagLo.6 TagHi.6 cop0_reg30.6 cop0_reg31.6 ]; # COP-0 control registers, sel=7 define register offset=0x2700 size=$(REGSIZE) [ cop0_reg0.7 VPEOpt TCScheFBack cop0_reg3.7 cop0_reg4.7 cop0_reg5.7 cop0_reg6.7 cop0_reg7.7 cop0_reg8.7 cop0_reg9.7 cop0_reg10.7 cop0_reg11.7 cop0_reg12.7 cop0_reg13.7 cop0_reg14.7 cop0_reg15.7 cop0_reg16.7 cop0_reg17.7 WatchLo.7 WatchHi.7 cop0_reg20.7 cop0_reg21.7 cop0_reg22.7 cop0_reg23.7 cop0_reg24.7 PerfCnt.7 cop0_reg26.7 CacheErr.7 DataLo.7 DataHi.7 cop0_reg30.7 cop0_reg31.7 ]; # Some other internal registers define register offset=0x3000 size=$(REGSIZE) [ @if ENDIAN == "big" hi lo hi1 lo1 hi2 lo2 hi3 lo3 @else lo hi lo1 hi1 lo2 hi2 lo3 hi3 @endif # ENDIAN tsp ]; # MIPS dsp lo/hi combined registers define register offset=0x3000 size=$(DREGSIZE) [ ac0 ac1 ac2 ac3 ]; define register offset=0x3100 size=$(REGSIZE) [ DSPControl ]; define register offset=0x3200 size=$(REGSIZE) [ HW_CPUNUM HW_SYNCI_STEP HW_CC HW_CCRe HW_PerfCtr HW_XNP HW_RES6 HW_RES7 HW_RES8 HW_RES9 HW_RES10 HW_RES11 HW_RES12 HW_RES13 HW_RES14 HW_RES15 HW_RES16 HW_RES17 HW_RES18 HW_RES19 HW_RES20 HW_RES21 HW_RES22 HW_RES23 HW_RES24 HW_RES25 HW_RES26 HW_RES27 HW_RES28 HW_ULR HW_RESIM30 HW_RESIM31 ]; @ifdef ISA_VARIANT define register offset=0x3F00 size=1 [ ISAModeSwitch ]; @endif # Dummy registers for multi-threading define register offset=0x3300 size=$(REGSIZE) [ thread_zero thread_at thread_v0 thread_v1 thread_a0 thread_a1 thread_a2 thread_a3 thread_t0 thread_t1 thread_t2 thread_t3 thread_t4 thread_t5 thread_t6 thread_t7 thread_s0 thread_s1 thread_s2 thread_s3 thread_s4 thread_s5 thread_s6 thread_s7 thread_t8 thread_t9 thread_k0 thread_k1 thread_gp thread_sp thread_s8 thread_ra thread_f0 thread_f1 thread_f2 thread_f3 thread_f4 thread_f5 thread_f6 thread_f7 thread_f8 thread_f9 thread_f10 thread_f11 thread_f12 thread_f13 thread_f14 thread_f15 thread_f16 thread_f17 thread_f18 thread_f19 thread_f20 thread_f21 thread_f22 thread_f23 thread_f24 thread_f25 thread_f26 thread_f27 thread_f28 thread_f29 thread_f30 thread_f31 thread_lo0 thread_hi0 thread_acx0 _ thread_lo1 thread_hi1 thread_acx1 _ thread_lo2 thread_hi2 thread_acx2 _ thread_lo3 thread_hi3 thread_acx3 _ thread_fir thread_fccr thread_fexr thread_fenr thread_fcsr ]; # Define context bits define register offset=0x4000 size=4 contextreg; define context contextreg PAIR_INSTRUCTION_FLAG=(0,0) noflow # =1 paired instruction REL6=(31,31) # =1 Release 6, =0 Pre release 6, (Fixed, set via pspec) RELP=(30,30) # =1 Mips16e, =0 MicroMips. REL6, RELP can't both be 1 (Fixed, set via pspec) @ifdef ISA_VARIANT ISA_MODE=(1,1) # =1 Decode using alternate ISA, variable. LowBitCodeMode = (1,1) # =1 if low bit of instruction address is set on a branch #below here is for mips16e. Overlaps with micromips ext_isjal=(2,2) noflow ext_value=(3,13) noflow ext_value_select=(3,5) noflow ext_value_1005=(3,8) noflow ext_value_1004=(3,9) noflow ext_value_sa40=(3,7) noflow ext_value_xreg=(3,5) noflow ext_value_frame=(6,9) noflow ext_value_areg=(10,13) noflow ext_value_b0=(13,13) noflow ext_value_b1=(12,12) noflow ext_value_b2=(11,11) noflow ext_value_b3=(10,10) noflow ext_value_saz=(8,13) noflow ext_value_1511=(9,13) noflow ext_value_1511s=(9,13) signed noflow ext_value_1411=(10,13) noflow ext_value_1411s=(10,13) signed noflow ext_tgt_2521=(3,7) noflow ext_tgt_2016=(8,12) noflow ext_tgt_x=(13,13) noflow ext_is_ext=(14,14) noflow ext_m16r32=(15,19) noflow ext_m16r32a=(15,19) noflow ext_reg_high=(15,16) noflow ext_reg_low=(17,19) noflow ext_svrs_sreg=(20,24) noflow ext_svrs_xs=(20,22) noflow ext_svrs_s1=(23,23) noflow ext_svrs_s0=(24,24) noflow ext_done=(25,25) noflow ext_delay=(26,27) noflow # 16e2 ext_rb=(11,13) noflow ext_imm_2426=(3, 5) ext_imm_2526=(3,4) noflow ext_imm_1620=(9,13) noflow ext_imm_1920=(9,10) noflow ext_imm_2124=(5, 8) noflow ext_imm_2123=(6, 8) noflow ext_imm_21=(8,8) noflow ext_imm_2226=(3,7) noflow #below here is for micromips. Overlaps with mips16e ext_t4_name=(2,5) noflow ext_t4=(2,5) noflow ext_tra=(6,6) noflow ext_32_code=(7,16) noflow ext_32_codes=(7,16) signed noflow ext_32_addim=(10,16) noflow ext_32_addims=(10,16) signed noflow ext_32_imm2=(15,16) noflow ext_32_imm2s=(15,16) signed noflow ext_32_imm3=(14,16) noflow ext_32_imm3s=(14,16) signed noflow ext_32_imm5=(12,16) noflow ext_32_imm5s=(12,16) signed noflow ext_32_imm6=(11,16) noflow ext_32_rlist=(7,11) noflow ext_32_base=(12,16) noflow ext_32_basea=(12,16) noflow ext_32_rd=(7,11) noflow ext_32_rdset=(7,11) noflow ext_32_rs1=(7,11) noflow ext_32_rs1lo=(7,11) noflow ext_32_rs1set=(7,11) noflow ext_16_rs=(7,9) noflow ext_16_rslo=(7,8) noflow ext_16_rshi=(9,9) noflow ext_off16_s=(7,22) signed noflow ext_off16_u=(7,22) noflow @endif # ISA_VARIANT ; # Instruction fields define token instr(32) prime = (26,31) bit25 = (25,25) zero2425 = (24,25) zero2325 = (23,25) zero1 = (22,25) rs32 = (21,25) frD = (21,25) rs = (21,25) fr = (21,25) base = (21,25) format = (21,25) copop = (21,25) mfmc0 = (21,25) zero21 = (21,25) jsub = (21,25) sa_dsp2 = (21,25) shift21 = (21,25) sz = (21,25) acf = (21,22) acflo = (21,22) acfhi = (21,22) shift20 = (20,25) breakcode = (6,25) off26 = (0,25) signed # 26 bit signed offset, e.g. balc, bc ind26 = (0,25) # 26 bit unsigned index, e.g. jal copfill = (6,24) cofun = (0,24) off21 = (0,20) signed # 21 bit signed offset in conditional branch/link off16 = (0,15) signed # 16 bit signed offset in conditional branch/link bit21 = (21,21) bitz19 = (19,20) pcrel = (19,20) pcrel2 = (18,20) cc = (18,20) immed1625 = (16,25) signed immed1623 = (16,23) signed rt32 = (16,20) rt = (16,20) rtmtdsp = (16,20) RT0thread = (16,20) RTthread = (16,20) FTthread = (16,20) FCTthread = (16,20) ftD = (16,20) ft = (16,20) index = (16,20) hint = (16,20) cop1code = (16,20) synci = (16,20) cond = (16,20) op = (16,20) zero1620 = (16,20) zero1619 = (16,19) lohiacx = (16,19) nd = (17,17) tf = (16,16) zero1320 = (13,20) zero1315 = (13,15) szero = (11,25) mask = (11,20) baser6 = (11,15) rd32 = (11,15) rd = (11,15) rdmtdsp = (11,15) rd0_0 = (11,15) rd0_1 = (11,15) rd0_2 = (11,15) rd0_3 = (11,15) rd0_4 = (11,15) rd0_5 = (11,15) rd0_6 = (11,15) rd0_7 = (11,15) rd_hw = (11,15) cp2cprSel0 = (11,15) cp2cprSel1 = (11,15) cp2cprSel2 = (11,15) cp2cprSel3 = (11,15) cp2cprSel4 = (11,15) cp2cprSel5 = (11,15) cp2cprSel6 = (11,15) cp2cprSel7 = (11,15) fsD = (11,15) fs = (11,15) RD0thread = (11,15) RDthread = (11,15) FDthread = (11,15) FCRthread = (16,20) sa_dsp = (11,15) fs_unk = (11,15) fs_fcr = (11,15) zero4 = (11,15) msbd = (11,15) lohiacx2 = (11,14) aclo = (11,12) achi = (11,12) ac = (11,12) bp = (11,12) bit10 = (10,10) spec2 = (9,10) spec3 = (8,10) simmed9 = (7,15) zero2 = (7,10) code = (6,15) fdD = (6,10) fd = (6,10) stype = (6,10) sa = (6,10) lsb = (6,10) fct2 = (6,10) zero5 = (6,10) wsbh = (6,10) bp3 = (6,8) sel_0608 = (6,8) sa2 = (6,7) bp2 = (6,7) zero6 = (3,10) bigfunct = (0,10) fct = (0,5) bshfl = (0,5) bit6 = (6,6) zero3 = (0,4) bit5 = (5,5) h = (4,4) op4 = (3,5) bit3 = (3,3) sel = (0,2) format1X = (0,2) simmed19 = (0,18) signed simmed18 = (0,17) signed immed = (0,15) simmed = (0,15) signed simmseq = (6,15) signed simmed11 = (0,10) ; attach variables [ rs rt rd base index baser6 ] [ zero at v0 v1 a0 a1 a2 a3 t0 t1 t2 t3 t4 t5 t6 t7 s0 s1 s2 s3 s4 s5 s6 s7 t8 t9 k0 k1 gp sp s8 ra ]; attach variables [ rd_hw ] [ HW_CPUNUM HW_SYNCI_STEP HW_CC HW_CCRe HW_PerfCtr HW_XNP HW_RES6 HW_RES7 HW_RES8 HW_RES9 HW_RES10 HW_RES11 HW_RES12 HW_RES13 HW_RES14 HW_RES15 HW_RES26 HW_RES17 HW_RES18 HW_RES19 HW_RES20 HW_RES21 HW_RES22 HW_RES23 HW_RES24 HW_RES25 HW_RES26 HW_RES27 HW_RES28 HW_ULR HW_RESIM30 HW_RESIM31 ]; @ifdef MIPS64 attach variables [ rs32 rt32 rd32 ] [ zero_lo at_lo v0_lo v1_lo a0_lo a1_lo a2_lo a3_lo t0_lo t1_lo t2_lo t3_lo t4_lo t5_lo t6_lo t7_lo s0_lo s1_lo s2_lo s3_lo s4_lo s5_lo s6_lo s7_lo t8_lo t9_lo k0_lo k1_lo gp_lo sp_lo s8_lo ra_lo ]; @else # For MIPS32 these are the same as rs, rt, and rd attach variables [ rs32 rt32 rd32 ] [ zero at v0 v1 a0 a1 a2 a3 t0 t1 t2 t3 t4 t5 t6 t7 s0 s1 s2 s3 s4 s5 s6 s7 t8 t9 k0 k1 gp sp s8 ra ]; @endif attach variables [ fs ft fd fr ] [ f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18 f19 f20 f21 f22 f23 f24 f25 f26 f27 f28 f29 f30 f31 ]; @if FREGSIZE == "4" # For 64-bit floating point Double instruction operands need to bond two 32-bit FPRs attach variables [ fsD ftD fdD frD ] [ f0_1 _ f2_3 _ f4_5 _ f6_7 _ f8_9 _ f10_11 _ f12_13 _ f14_15 _ f16_17 _ f18_19 _ f20_21 _ f22_23 _ f24_25 _ f26_27 _ f28_29 _ f30_31 _ ]; @else # FREGSIZE == "8" attach variables [ fsD ftD fdD frD ] [ f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18 f19 f20 f21 f22 f23 f24 f25 f26 f27 f28 f29 f30 f31 ]; @endif # Only a few Floating Point Control (FCR) registers are defined attach variables [ fs_fcr ] [ fir _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ fccr fexr _ fenr _ _ fcsr ]; attach variables [ rd0_0 ] [ Index Random EntryLo0 EntryLo1 Context PageMask Wired HWREna BadVAddr Count EntryHi Compare Status Cause EPC PRId Config LLAddr WatchLo WatchHi XContext cop0_reg21 cop0_reg22 Debug DEPC PerfCnt ErrCtl CacheErr TagLo TagHi ErrorEPC DESAVE ]; attach variables [ rd0_1 ] [ MVPControl VPEControl TCStatus cop0_reg3.1 ContextConfig PageGrain SRSConf0 cop0_reg7.1 cop0_reg8.1 cop0_reg9.1 cop0_reg10.1 cop0_reg11.1 IntCtl cop0_reg13.1 cop0_reg14.1 EBase Config1 cop0_reg17.1 WatchLo.1 WatchHi.1 cop0_reg20.1 cop0_reg21.1 cop0_reg22.1 TraceControl cop0_reg24.1 PerfCnt.1 cop0_reg26.1 CacheErr.1 DataLo.1 DataHi.1 cop0_reg30.1 cop0_reg31.1 ]; attach variables [ rd0_2 ] [ MVPConf0 VPEConf0 TCBind cop0_reg3.2 cop0_reg4.2 cop0_reg5.2 SRSConf1 cop0_reg7.2 cop0_reg8.2 cop0_reg9.2 cop0_reg10.2 cop0_reg11.2 SRSCtl cop0_reg13.2 cop0_reg14.2 cop0_reg15.2 Config2 cop0_reg17.2 WatchLo.2 WatchHi.2 cop0_reg20.2 cop0_reg21.2 cop0_reg22.2 TraceControl2 cop0_reg24.2 PerfCnt.2 cop0_reg26.2 CacheErr.2 TagLo.2 TagHi.2 cop0_reg30.2 cop0_reg31.2 ]; attach variables [ rd0_3 ] [ MVPConf1 VPEConf1 TCRestart cop0_reg3.3 cop0_reg4.3 cop0_reg5.3 SRSConf2 cop0_reg7.3 cop0_reg8.3 cop0_reg9.3 cop0_reg10.3 cop0_reg11.3 SRSMap cop0_reg13.3 cop0_reg14.3 cop0_reg15.3 Config3 cop0_reg17.3 WatchLo.3 WatchHi.3 cop0_reg20.3 cop0_reg21.3 cop0_reg22.3 UserTraceData cop0_reg24.3 PerfCnt.3 cop0_reg26.3 CacheErr.3 DataLo.3 DataHi.3 cop0_reg30.3 cop0_reg31.3 ]; attach variables [ rd0_4 ] [ cop0_reg0.4 YQMask TCHalt cop0_reg3.4 cop0_reg4.4 cop0_reg5.4 SRSConf3 cop0_reg7.4 cop0_reg8.4 cop0_reg9.4 cop0_reg10.4 cop0_reg11.4 cop0_reg12.4 cop0_reg13.4 cop0_reg14.4 cop0_reg15.4 cop0_reg16.4 cop0_reg17.4 WatchLo.4 WatchHi.4 cop0_reg20.4 cop0_reg21.4 cop0_reg22.4 TraceBPC cop0_reg24.4 PerfCnt.4 cop0_reg26.4 CacheErr.4 TagLo.4 TagHi.4 cop0_reg30.4 cop0_reg31.4 ]; attach variables [ rd0_5 ] [ cop0_reg0.5 VPESchedule TCContext cop0_reg3.5 cop0_reg4.5 cop0_reg5.5 SRSConf4 cop0_reg7.5 cop0_reg8.5 cop0_reg9.5 cop0_reg10.5 cop0_reg11.5 cop0_reg12.5 cop0_reg13.5 cop0_reg14.5 cop0_reg15.5 cop0_reg16.5 cop0_reg17.5 WatchLo.5 WatchHi.5 cop0_reg20.5 cop0_reg21.5 cop0_reg22.5 cop0_reg23.5 cop0_reg24.5 PerfCnt.5 cop0_reg26.5 CacheErr.5 DataLo.5 DataHi.5 cop0_reg30.5 cop0_reg31.5 ]; attach variables [ rd0_6 ] [ cop0_reg0.6 VPEScheFBack TCSchedule cop0_reg3.6 cop0_reg4.6 cop0_reg5.6 cop0_reg6.6 cop0_reg7.6 cop0_reg8.6 cop0_reg9.6 cop0_reg10.6 cop0_reg11.6 cop0_reg12.6 cop0_reg13.6 cop0_reg14.6 cop0_reg15.6 cop0_reg16.6 cop0_reg17.6 WatchLo.6 WatchHi.6 cop0_reg20.6 cop0_reg21.6 cop0_reg22.6 cop0_reg23.6 cop0_reg24.6 PerfCnt.6 cop0_reg26.6 CacheErr.6 TagLo.6 TagHi.6 cop0_reg30.6 cop0_reg31.6 ]; attach variables [ rd0_7 ] [ cop0_reg0.7 VPEOpt TCScheFBack cop0_reg3.7 cop0_reg4.7 cop0_reg5.7 cop0_reg6.7 cop0_reg7.7 cop0_reg8.7 cop0_reg9.7 cop0_reg10.7 cop0_reg11.7 cop0_reg12.7 cop0_reg13.7 cop0_reg14.7 cop0_reg15.7 cop0_reg16.7 cop0_reg17.7 WatchLo.7 WatchHi.7 cop0_reg20.7 cop0_reg21.7 cop0_reg22.7 cop0_reg23.7 cop0_reg24.7 PerfCnt.7 cop0_reg26.7 CacheErr.7 DataLo.7 DataHi.7 cop0_reg30.7 cop0_reg31.7 ]; attach variables [ aclo acflo ] [ lo lo1 lo2 lo3 ]; attach variables [ achi acfhi ] [ hi hi1 hi2 hi3 ]; attach variables [ ac acf ] [ ac0 ac1 ac2 ac3 ]; attach names hint [ "load" "store" "hint2" "hint3" "load_streamed" "store_streamed" "load_retained" "store_retained" "hint8" "hint9" "hint10" "hint11" "hint12" "hint13" "hint14" "hint15" "hint16" "hint17" "hint18" "hint19" "hint20" "hint21" "hint22" "hint23" "hint24" "writeback_invalidate" "hint26" "hint27" "hint28" "hint29" "PrepareForStore" "hint31" ]; attach variables [RTthread RDthread] [ thread_zero thread_at thread_v0 thread_v1 thread_a0 thread_a1 thread_a2 thread_a3 thread_t0 thread_t1 thread_t2 thread_t3 thread_t4 thread_t5 thread_t6 thread_t7 thread_s0 thread_s1 thread_s2 thread_s3 thread_s4 thread_s5 thread_s6 thread_s7 thread_t8 thread_t9 thread_k0 thread_k1 thread_gp thread_sp thread_s8 thread_ra ]; attach variables [ FTthread FDthread ] [ thread_f0 thread_f1 thread_f2 thread_f3 thread_f4 thread_f5 thread_f6 thread_f7 thread_f8 thread_f9 thread_f10 thread_f11 thread_f12 thread_f13 thread_f14 thread_f15 thread_f16 thread_f17 thread_f18 thread_f19 thread_f20 thread_f21 thread_f22 thread_f23 thread_f24 thread_f25 thread_f26 thread_f27 thread_f28 thread_f29 thread_f30 thread_f31 ]; attach variables [ rtmtdsp rdmtdsp ] [ thread_lo0 thread_hi0 thread_acx0 _ thread_lo1 thread_hi1 thread_acx1 _ thread_lo2 thread_hi2 thread_acx2 _ thread_lo3 thread_hi3 thread_acx3 _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ]; attach variables [ FCTthread FCRthread ] [ thread_fir _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ thread_fccr thread_fexr _ thread_fenr _ _ thread_fcsr ]; # Subconstructors RD0: rd0_0 is rd0_0 & sel=0 { export rd0_0; } RD0: rd0_1 is rd0_1 & sel=1 { export rd0_1; } RD0: rd0_2 is rd0_2 & sel=2 { export rd0_2; } RD0: rd0_3 is rd0_3 & sel=3 { export rd0_3; } RD0: rd0_4 is rd0_4 & sel=4 { export rd0_4; } RD0: rd0_5 is rd0_5 & sel=5 { export rd0_5; } RD0: rd0_6 is rd0_6 & sel=6 { export rd0_6; } RD0: rd0_7 is rd0_7 & sel=7 { export rd0_7; } RD: rd is rd { export rd; } RDsrc: rd is rd { export rd; } RDsrc: rd is rd & rd=0 { export 0:$(REGSIZE); } RS: rs is rs { export rs; } RSsrc: rs is rs { export rs; } RSsrc: rs is rs & rs=0 { export 0:$(REGSIZE); } RT: rt is rt { export rt; } RTsrc: rt is rt { export rt; } RTsrc: rt is rt & rt=0 { export 0:$(REGSIZE); } RD32: rd is rd & rd32 { export rd32; } RS32src: rs is rs & rs32 { export rs32; } RS32src: rs is rs & rs32=0 { export 0:4; } RT32: rt is rt & rt32 { export rt32; } RT32src: rt is rt & rt32 { export rt32; } RT32src: rt is rt & rt32=0 { export 0:4; } @ifdef NEEDCAST macro MemSrcCast(dest,src) { dest = *(src:$(ADDRSIZE)); } macro MemDestCast(dest,src) { *(dest:$(ADDRSIZE)) = src; } macro ValCast(dest,src) { dest = src:$(ADDRSIZE); } @else macro MemSrcCast(dest,src) { dest = *(src); } macro MemDestCast(dest,src) { *(dest) = src; } macro ValCast(dest,src) { dest = src; } @endif OFF_BASE: simmed(base) is simmed & base { tmp:$(REGSIZE) = base + simmed; tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } INDEX_BASE: index(base) is index & base { tmp:$(REGSIZE) = base + index; tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } OFF_BASER6: simmed(base) is REL6=0 & simmed & base { tmp:$(REGSIZE) = base + simmed; tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } OFF_BASER6: simmed9(base) is REL6=1 & simmed9 & base { tmp:$(REGSIZE) = base + simmed9; tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } S18L3: val is simmed18 [ val = simmed18 << 3; ] { export *[const]:4 val; } S19L2: val is simmed19 [ val = simmed19 << 2; ] { export *[const]:4 val; } S16L16: val is simmed [ val = simmed << 16; ] { export *[const]:4 val; } S16L32: val is simmed [ val = simmed << 32; ] { export *[const]:8 val; } S16L48: val is simmed [ val = simmed << 48; ] { export *[const]:8 val; } SAV: val is sa2 [ val = sa2+1; ] { export *[const]:1 val; } Rel16: reloc is off16 [ reloc=inst_start+4+4*off16; ] { export *:$(ADDRSIZE) reloc; } Rel21: reloc is off21 [ reloc=inst_start+4+4*off21; ] { export *:$(ADDRSIZE) reloc; } Rel26: reloc is off26 [ reloc=inst_start+4+4*off26; ] { export *:$(ADDRSIZE) reloc; } Abs26: reloc is ind26 [ reloc=((inst_start+4) $and 0xfffffffff0000000) | 4*ind26; ] { export *:$(ADDRSIZE) reloc; } InsSize: mysize is msbd & lsb [ mysize = msbd - lsb + 1; ] { tmp:1 = mysize; export tmp; } ExtSize: mysize is msbd [ mysize = msbd + 1; ] { tmp:1 = mysize; export tmp; } @ifdef MIPS64 DextmSize: mysize is msbd [ mysize = msbd + 1 + 32; ] { tmp:1 = mysize; export tmp; } DXuPos: pos is lsb [ pos = lsb + 32; ] { tmp:1 = pos; export tmp; } DinsXSize: mysize is msbd & lsb [ mysize = msbd - lsb + 1 + 32; ] { tmp:1 = mysize; export tmp; } @endif macro JXWritePC(addr) { @ifdef ISA_VARIANT ISAModeSwitch = (addr & 0x1) != 0; tmp:$(REGSIZE) = -2; tmp = tmp & addr; pc = tmp; @else pc=addr; @endif } # Floating point formats #fmt: "S" is format=0x10 { } #fmt: "D" is format=0x11 { } #fmt: "W" is format=0x14 { } #fmt: "L" is format=0x15 { } #fmt: "PS" is format=0x16 { } fmt1: "S" is format=0x10 { } fmt1: "D" is format=0x11 { } fmt1: "PS" is format=0x16 { } fmt2: "S" is format=0x10 { } fmt2: "D" is format=0x11 { } fmt3: "S" is format=0x10 { } fmt3: "W" is format=0x14 { } fmt3: "L" is format=0x15 { } fmt4: "D" is format=0x11 { } fmt4: "W" is format=0x14 { } fmt4: "L" is format=0x15 { } fmt5: "S" is format1X=0x0 { } fmt5: "D" is format1X=0x1 { } fmt5: "PS" is format1X=0x6 { } # Release 6 and later: fmt6: "S" is format=0x14 { } fmt6: "D" is format=0x15 { } # Custom Pcode Operations # # To add a new pcodeop op that is implemented in Java code: # # In this directory: # ./ghidra/Ghidra/Processors/MIPS/src/main/java/ghidra/program/emulation # Edit this file to register a new Java method that implements the pcodeop: # MIPSEmulateInstructionStateModifier.java # (Be sure to also import the new class) # # The mips.pspec file must have this key set (this has already been done): # # # Add the Java class file for the new pcodeop here: # ./ghidra/Ghidra/Framework/SoftwareModeling/src/main/java/ghidra/pcode/emulate/callother # define pcodeop break; define pcodeop trap; define pcodeop wait; define pcodeop syscall; define pcodeop cacheOp; define pcodeop signalDebugBreakpointException; define pcodeop disableInterrupts; define pcodeop enableInterrupts; define pcodeop hazzard; define pcodeop lockload; define pcodeop lockwrite; define pcodeop synch; define pcodeop tlbop; define pcodeop bitSwap; define pcodeop disableProcessor; define pcodeop enableProcessor; define pcodeop signalReservedInstruction; define pcodeop TLB_invalidate; define pcodeop TLB_invalidate_flush; define pcodeop TLB_probe_for_matching_entry; define pcodeop TLB_read_indexed_entryHi; define pcodeop TLB_read_indexed_entryLo0; define pcodeop TLB_read_indexed_entryLo1; define pcodeop TLB_read_indexed_entryPageMask; define pcodeop TLB_write_indexed_entry; define pcodeop TLB_write_random_entry; # prefetch(vaddr, hint); define pcodeop prefetch; # getFpCondition(cc) define pcodeop getFpCondition; # getCopCondition(cop_num, cc) define pcodeop getCopCondition; # setCopControlWord(cop_num, reg_num, value) define pcodeop setCopControlWord; # getCopControlWord(cop_num, reg_num) define pcodeop getCopControlWord; # copFunction(cop_num, func) define pcodeop copFunction; # getCopReg(cop_num, reg_num) define pcodeop getCopReg; define pcodeop getCopRegH; # setCopReg(cop_num, reg_num, value) define pcodeop setCopReg; define pcodeop setCopRegH; # extractField(value, msbd, lsb) define pcodeop extractField; # setShadow(sgpr, value) define pcodeop setShadow; # getHWRegister(reg, sel) define pcodeop getHWRegister; # gpr = getShadow(sgpr) define pcodeop getShadow; ================================================ FILE: pypcode/processors/MIPS/data/languages/mips16.sinc ================================================ ################ # # MIPS16e # # From MIPS32 Architecture for PRogrammers Volume IV-a: # The MIPS16e Application Specific Extension to the MIPS32 # Architecture. # # Document #: MD00076 Rev 2.63 July 16, 2013 # ################ define token m16instr (16) m16_op=(11,15) m16_i_imm=(0,4) m16_rx=(8,10) m16_rxa=(8,10) m16_ri_imm=(0,4) m16_ri_z=(5,7) m16_ry=(5,7) m16_rya=(5,7) m16_rr_f=(0,4) m16_rr_nd=(7,7) m16_rr_l=(6,6) m16_rr_ra=(5,5) m16_rr_z=(5,7) m16_rri_imm=(0,4) m16_rz=(2,4) m16_rza=(2,4) m16_rrr_f=(0,1) m16_rria_f=(4,4) m16_rria_imm=(0,3) m16_rria_simm=(0,3) signed m16_shft_sa=(2,4) m16_shft_f=(0,1) m16_i8_f=(8,10) m16_i8_imm=(0,4) m16_is8_imm=(0,7) signed m16_mv_rz=(0,2) m16_mv_rza=(0,2) m16_i8_rz=(5,7) m16_i8_r32=(0,4) m16_i8_r32a=(0,4) m16_i8_r32_20=(5,7) m16_i8_r32_43=(3,4) m16_i8_svrs=(8,10) m16_i8_sw=(8,10) m16_iu8_imm=(0,7) m16_b_imm=(0,4) m16_b_z=(5,10) m16_cb_z=(5,7) m16_b_off=(0,10) signed m16_cb_off=(0,7) signed m16_svrs_s=(7,7) m16_svrs_ra=(6,6) m16_svrs_s0=(5,5) m16_svrs_s1=(4,4) m16_svrs_frame=(0,3) m16_ext_val=(0,10) m16_tgt_1500=(0,15) m16_tgt_2521=(0,4) m16_tgt_2016=(5,9) m16_jal=(10,10) m16_code=(5,10) ; attach variables [ m16_rx m16_ry m16_rz m16_mv_rz ext_rb ] [ s0 s1 v0 v1 a0 a1 a2 a3 ]; attach variables [ ext_m16r32 m16_i8_r32 ] [ zero at v0 v1 a0 a1 a2 a3 t0 t1 t2 t3 t4 t5 t6 t7 s0 s1 s2 s3 s4 s5 s6 s7 t8 t9 k0 k1 gp sp s8 ra ]; @ifdef MIPS64 attach variables [ m16_rxa m16_rya m16_rza m16_mv_rza] [ s0_lo s1_lo v0_lo v1_lo a0_lo a1_lo a2_lo a3_lo ]; attach variables [ ext_m16r32a m16_i8_r32a ] [ zero_lo at_lo v0_lo v1_lo a0_lo a1_lo a2_lo a3_lo t0_lo t1_lo t2_lo t3_lo t4_lo t5_lo t6_lo t7_lo s0_lo s1_lo s2_lo s3_lo s4_lo s5_lo s6_lo s7_lo t8_lo t9_lo k0_lo k1_lo gp_lo sp_lo s8_lo ra_lo ]; RZ: m16_rz is m16_rz { export m16_rz; } @else # !MIPS64 attach variables [ m16_rxa m16_rya m16_rza m16_mv_rza ] [ s0 s1 v0 v1 a0 a1 a2 a3 ]; attach variables [ ext_m16r32a m16_i8_r32a ] [ zero at v0 v1 a0 a1 a2 a3 t0 t1 t2 t3 t4 t5 t6 t7 s0 s1 s2 s3 s4 s5 s6 s7 t8 t9 k0 k1 gp sp s8 ra ]; RZ: is epsilon {} @endif # MIPS64 RX: m16_rx is m16_rx { export m16_rx; } RX32: m16_rx is m16_rx & m16_rxa { export m16_rxa; } RY32: m16_ry is m16_ry & m16_rya { export m16_rya; } RZ32: m16_rz is m16_rz & m16_rza { export m16_rza; } attach names [ ext_svrs_sreg][ _ "s0" "s1" "s0-s1" "s2" "s0,s2" "s1-s2" "s0-s2" "s2-s3" "s0,s2-s3" "s1-s3" "s0-s3" "s2-s4" "s0,s2-s4" "s1-s4" "s0-s4" "s2-s5" "s0,s2-s5" "s1-s5" "s0-s5" "s2-s6" "s0,s2-s6" "s1-s6" "s0-s6" "s2-s7" "s0,s2-s7" "s1-s7" "s0-s7" "s2-s8" "s0,s2-s8" "s1-s8" "s0-s8" ]; Abs26_m16: reloc is m16_tgt_1500 & ext_tgt_2016 & ext_tgt_2521 [ reloc=((inst_start+4) $and 0xfffffffff0000000)+4*(m16_tgt_1500 | (ext_tgt_2016 << 16) | (ext_tgt_2521 << 21)); ] { export *:$(ADDRSIZE) reloc; } Rel16_m16: reloc is ext_is_ext=1 & m16_b_z=0 & m16_b_imm & ext_value_1005 & ext_value_1511s [ reloc=inst_start+4+2*((ext_value_1511s << 11) | (ext_value_1005 << 5) | m16_b_imm); ] { export *:$(ADDRSIZE) reloc; } Rel16_m16: reloc is ext_is_ext=0 & m16_b_off [ reloc=inst_start+2+2*m16_b_off; ] { export *:$(ADDRSIZE) reloc; } CRel16_m16: reloc is ext_is_ext=1 & m16_cb_z=0 & m16_b_imm & ext_value_1005 & ext_value_1511s [ reloc=inst_start+4+2*((ext_value_1511s << 11) | (ext_value_1005 << 5) | m16_b_imm); ] { export *:$(ADDRSIZE) reloc; } CRel16_m16: reloc is ext_is_ext=0 & m16_cb_off [ reloc=inst_start+2+2*m16_cb_off; ] { export *:$(ADDRSIZE) reloc; } :^instruction is ISA_MODE=1 & RELP=1 & m16_op=0b11110 & ext_done=0 & ext_isjal=0 & m16_ext_val; instruction [ext_value=m16_ext_val; ext_is_ext=1; ext_done=1; ] { build instruction; } :^instruction is ISA_MODE=1 & RELP=1 & m16_op=0b00011 & ext_done=0 & ext_isjal=0 & m16_jal & m16_tgt_2016 & m16_tgt_2521; instruction [ext_tgt_2016=m16_tgt_2016; ext_tgt_2521=m16_tgt_2521; ext_tgt_x=m16_jal; ext_isjal=1; ext_done=1; ] { build instruction; } EXT_I: val is m16_i_imm [ val = m16_i_imm << 2; ] { export *[const]:2 val; } EXT_IS0: val is m16_i_imm [ val = m16_i_imm << 0; ] { export *[const]:2 val; } EXT_IS1: val is m16_i_imm [ val = m16_i_imm << 1; ] { export *[const]:2 val; } EXT_RI: val is ext_value_1511 & ext_value_1005 & m16_ri_imm [val = (ext_value_1511 << 11) | (ext_value_1005 << 5) | m16_ri_imm; ] { export *[const]:2 val; } EXT_RRIA: val is ext_is_ext=1 & ext_value_1411s & ext_value_1004 & m16_rria_imm [ val=(ext_value_1411s << 11) | (ext_value_1004 << 4) | m16_rria_imm; ] { export *[const]:2 val; } EXT_RRIA: m16_rria_simm is ext_is_ext=0 & m16_rria_simm { export *[const]:2 m16_rria_simm; } EXT_IS8: val is ext_is_ext=1 & ext_value_1511s & ext_value_1005 & m16_i8_imm [val=(ext_value_1511s << 11) | (ext_value_1005 << 5) | m16_i8_imm; ] { export *[const]:2 val; } EXT_IS8: m16_is8_imm is ext_is_ext=0 & m16_is8_imm { export *[const]:2 m16_is8_imm; } EXT_IS8L3: val is ext_is_ext=0 & ext_value_1511 & ext_value_1005 & m16_is8_imm [val = m16_is8_imm << 3; ] { export *[const]:2 val; } EXT_IU8: val is ext_is_ext=1 & ext_value_1511 & ext_value_1005 & m16_i8_imm [val = (ext_value_1511 << 11) | (ext_value_1005 << 5) | m16_i8_imm; ] { export *[const]:2 val; } EXT_IU8: val is ext_is_ext=0 & m16_iu8_imm [val = m16_iu8_imm << 2; ] { export *[const]:2 val; } EXT_LIU8: val is ext_is_ext=1 & ext_value_1511 & ext_value_1005 & m16_i8_imm [val = (ext_value_1511 << 11) | (ext_value_1005 << 5) | m16_i8_imm; ] { export *[const]:2 val; } EXT_LIU8: m16_iu8_imm is ext_is_ext=0 & m16_iu8_imm { export *[const]:2 m16_iu8_imm; } EXT_SHIFT: ext_value_sa40 is ext_is_ext=1 & ext_value_saz=0 & m16_shft_sa=0 & ext_value_sa40 { export *[const]:1 ext_value_sa40;} EXT_SHIFT: val is ext_is_ext=0 & m16_shft_sa=0 [val = 8; ] { export *[const]:1 val;} EXT_SHIFT: m16_shft_sa is ext_is_ext=0 & m16_shft_sa { export *[const]:1 m16_shft_sa;} EXT_SET: val is ext_is_ext=1 & m16_ri_z=0 & ext_value_1511 & ext_value_1005 & m16_i8_imm [val = (ext_value_1511 << 11) | (ext_value_1005 << 5) | m16_i8_imm; ] { export *[const]:4 val; } EXT_SET: m16_iu8_imm is ext_is_ext=0 & m16_iu8_imm { export *[const]:4 m16_iu8_imm; } OFF_M16: EXT_IS8(m16_rx) is ext_is_ext=1 & EXT_IS8 & m16_rx { tmp:$(REGSIZE) = m16_rx + sext(EXT_IS8); tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } OFF_M16: EXT_I(m16_rx) is ext_is_ext=0 & EXT_I & m16_rx { tmp:$(REGSIZE) = m16_rx + zext(EXT_I); tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } OFF_M16S0: EXT_IS8(m16_rx) is ext_is_ext=1 & EXT_IS8 & m16_rx { tmp:$(REGSIZE) = m16_rx + sext(EXT_IS8); tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } OFF_M16S0: EXT_IS0(m16_rx) is ext_is_ext=0 & EXT_IS0 & m16_rx { tmp:$(REGSIZE) = m16_rx + zext(EXT_IS0); tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } OFF_M16S1: EXT_IS8(m16_rx) is ext_is_ext=1 & EXT_IS8 & m16_rx { tmp:$(REGSIZE) = m16_rx + sext(EXT_IS8); tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } OFF_M16S1: EXT_IS1(m16_rx) is ext_is_ext=0 & EXT_IS1 & m16_rx { tmp:$(REGSIZE) = m16_rx + zext(EXT_IS1); tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } OFF_M16PC: EXT_IS8(pc) is ext_is_ext=1 & m16_i8_rz=0 & EXT_IS8 & pc { tmp:$(REGSIZE) = (inst_start + sext(EXT_IS8)) & 0xFFFFFFFC; tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } OFF_M16PC: val(pc) is ext_is_ext=0 & m16_iu8_imm & pc & ext_delay [ val = m16_iu8_imm << 2; ] { tmp:$(REGSIZE) = (inst_start + val - (ext_delay << 1)) & 0xFFFFFFFC; tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } OFF_M16SP: EXT_IS8(sp) is ext_is_ext=1 & m16_i8_rz=0 & EXT_IS8 & sp { tmp:$(REGSIZE) = sp + sext(EXT_IS8); tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } OFF_M16SP: val(sp) is ext_is_ext=0 & m16_iu8_imm & sp [ val = m16_iu8_imm << 2; ] { tmp:$(REGSIZE) = sp + val; tmpscaled:$(ADDRSIZE) = 0; ValCast(tmpscaled,tmp); export tmpscaled; } EXT_FRAME: val is ext_value_frame=0 & m16_svrs_frame=0 [val = 128; ] {export *[const]:2 val;} EXT_FRAME: val is ext_value_frame & m16_svrs_frame [val = ((ext_value_frame << 4) | m16_svrs_frame) << 3;] {export *[const]:2 val;} REGRS_STAT: is ext_value_areg {} REGRS_STAT: ",a3" is (ext_value_areg=1 | ext_value_areg=5 | ext_value_areg=9 |ext_value_areg=0xd) { tsp = tsp-4; MemSrcCast(a3,tsp); } REGRS_STAT: ",a2-a3" is (ext_value_areg=2 | ext_value_areg=6 | ext_value_areg=0xa) { tsp = tsp-4; MemSrcCast(a3,tsp); tsp = tsp-4; MemSrcCast(a2,tsp); } REGRS_STAT: ",a1-a3" is (ext_value_areg=3 | ext_value_areg=7) { tsp = tsp-4; MemSrcCast(a3,tsp); tsp = tsp-4; MemSrcCast(a2,tsp); tsp = tsp-4; MemSrcCast(a1,tsp); } REGRS_STAT: ",a0-a3" is ext_value_areg=0xB { tsp = tsp-4; MemSrcCast(a3,tsp); tsp = tsp-4; MemSrcCast(a2,tsp); tsp = tsp-4; MemSrcCast(a1,tsp); tsp = tsp-4; MemSrcCast(a0,tsp); } REST_STAT: is ext_value_areg=0 | ext_value_areg=4 | ext_value_areg=8 | ext_value_areg=0xc | ext_value_areg=0xe {} REST_STAT: REGRS_STAT is REGRS_STAT { build REGRS_STAT; } REGSV_STAT: is ext_value_areg {} REGSV_STAT: ",a3" is (ext_value_areg=1 | ext_value_areg=5 | ext_value_areg=9 | ext_value_areg=0xd) { tsp = tsp-4; MemDestCast(tsp,a3); } REGSV_STAT: ",a2-a3" is (ext_value_areg=2 | ext_value_areg=6 | ext_value_areg=0xa) { tsp = tsp-4; MemDestCast(tsp,a3); tsp = tsp-4; MemDestCast(tsp,a2); } REGSV_STAT: ",a1-a3" is (ext_value_areg=3 | ext_value_areg=7) { tsp = tsp-4; MemDestCast(tsp,a3); tsp = tsp-4; MemDestCast(tsp,a2); tsp = tsp-4; MemDestCast(tsp,a1); } REGSV_STAT: ",a0-a3" is ext_value_areg=0xb { tsp = tsp-4; MemDestCast(tsp,a3); tsp = tsp-4; MemDestCast(tsp,a2); tsp = tsp-4; MemDestCast(tsp,a1); tsp = tsp-4; MemDestCast(tsp,a0); } SAVE_STAT: is ext_value_areg=0 | ext_value_areg=4 | ext_value_areg=8 | ext_value_areg=0xc | ext_value_areg=0xe {} SAVE_STAT: REGSV_STAT is REGSV_STAT { build REGSV_STAT; } REGSV_BLD1: is ext_value_b2=0 {} REGSV_BLD1: "a0," is ext_value_b2=1 { ptr:$(REGSIZE) = sp; MemDestCast(ptr,a0); } REGSV_BLD2: REGSV_BLD1 is REGSV_BLD1 { build REGSV_BLD1; } REGSV_BLD2: "a0-a1," is ext_value_b3=1 & ext_value_b2=0 & (ext_value_b1=0 | ext_value_b0=0) { ptr:$(REGSIZE) = sp; MemDestCast(ptr,a0); ptr = sp+4; MemDestCast(ptr,a1); } REGSV_BLD3: REGSV_BLD2 is REGSV_BLD2 { build REGSV_BLD2; } REGSV_BLD3: "a0-a2," is ext_value_b3=1 & ext_value_b2=1 & ext_value_b1=0 { ptr:$(REGSIZE) = sp; MemDestCast(ptr,a0); ptr = sp+4; MemDestCast(ptr,a1); ptr = sp+8; MemDestCast(ptr,a2); } REGSV_BLD4: REGSV_BLD3 is REGSV_BLD3 { build REGSV_BLD3; } REGSV_BLD4: "a0-a3," is ext_value_areg=0b1110 { ptr:$(REGSIZE) = sp; MemDestCast(ptr,a0); ptr = sp+4; MemDestCast(ptr,a1); ptr = sp+8; MemDestCast(ptr,a2); ptr = sp+12; MemDestCast(ptr,a3); } SAVE_ARG: is ext_value_areg=0 | ext_value_areg=1 | ext_value_areg=2 | ext_value_areg=3 | ext_value_areg=0xb | ext_value_areg=0xf {} SAVE_ARG: REGSV_BLD4 is REGSV_BLD4 { build REGSV_BLD4; } REGRS_S0: is m16_svrs_s0 {} REGRS_S0: is m16_svrs_s0=1 { tsp = tsp-$(REGSIZE); MemSrcCast(s0,tsp); } REGRS_S1: is m16_svrs_s1 {} REGRS_S1: is m16_svrs_s1=1 { tsp = tsp-$(REGSIZE); MemSrcCast(s1,tsp); } REGRS_S8: is ext_value_xreg=6 {} REGRS_S8: is ext_value_xreg { tsp = tsp-$(REGSIZE); MemSrcCast(s8,tsp); } REGRS_S7: is ext_value_xreg=5 {} REGRS_S7: is REGRS_S8 { build REGRS_S8; tsp = tsp-$(REGSIZE); MemSrcCast(s7,tsp); } REGRS_S6: is ext_value_xreg=4 {} REGRS_S6: is REGRS_S7 { build REGRS_S7; tsp = tsp-$(REGSIZE); MemSrcCast(s6,tsp); } REGRS_S5: is ext_value_xreg=3 {} REGRS_S5: is REGRS_S6 { build REGRS_S6; tsp = tsp-$(REGSIZE); MemSrcCast(s5,tsp); } REGRS_S4: is ext_value_xreg=2 {} REGRS_S4: is REGRS_S5 { build REGRS_S5; tsp = tsp-$(REGSIZE); MemSrcCast(s4,tsp); } REGRS_S3: is ext_value_xreg=1 {} REGRS_S3: is REGRS_S4 { build REGRS_S4; tsp = tsp-$(REGSIZE); MemSrcCast(s3,tsp); } REGRS_S2: is ext_value_xreg=0 {} REGRS_S2: is REGRS_S3 { build REGRS_S3; tsp = tsp-$(REGSIZE); MemSrcCast(s2,tsp); } REST_SREG: is m16_svrs_s0=0 & m16_svrs_s1=0 & ext_value_xreg=0 {} REST_SREG: ","ext_svrs_sreg is m16_svrs_s0 & m16_svrs_s1 & ext_value_xreg & ext_svrs_sreg & REGRS_S0 & REGRS_S1 & REGRS_S2 [ext_svrs_s0=m16_svrs_s0;ext_svrs_s1=m16_svrs_s1;ext_svrs_xs=ext_value_xreg;] { build REGRS_S2; build REGRS_S1; build REGRS_S0; } REGSV_S0: is m16_svrs_s0 {} REGSV_S0: is m16_svrs_s0=1 { tsp = tsp-$(REGSIZE); MemDestCast(tsp,s0);} REGSV_S1: is m16_svrs_s1 {} REGSV_S1: is m16_svrs_s1=1 { tsp = tsp-$(REGSIZE); MemDestCast(tsp,s1); } REGSV_S8: is ext_value_xreg=6 {} REGSV_S8: is ext_value_xreg { tsp = tsp-$(REGSIZE); MemDestCast(tsp,s8); } REGSV_S7: is ext_value_xreg=5 {} REGSV_S7: is REGSV_S8 { build REGSV_S8; tsp = tsp-$(REGSIZE); MemDestCast(tsp,s7); } REGSV_S6: is ext_value_xreg=4 {} REGSV_S6: is REGSV_S7 { build REGSV_S7; tsp = tsp-$(REGSIZE); MemDestCast(tsp,s6); } REGSV_S5: is ext_value_xreg=3 {} REGSV_S5: is REGSV_S6 { build REGSV_S6; tsp = tsp-$(REGSIZE); MemDestCast(tsp,s5); } REGSV_S4: is ext_value_xreg=2 {} REGSV_S4: is REGSV_S5 { build REGSV_S5; tsp = tsp-$(REGSIZE); MemDestCast(tsp,s4); } REGSV_S3: is ext_value_xreg=1 {} REGSV_S3: is REGSV_S4 { build REGSV_S4; tsp = tsp-$(REGSIZE); MemDestCast(tsp,s3); } REGSV_S2: is ext_value_xreg=0 {} REGSV_S2: is REGSV_S3 { build REGSV_S3; tsp = tsp-$(REGSIZE); MemDestCast(tsp,s2); } SAVE_SREG: is m16_svrs_s0=0 & m16_svrs_s1=0 & ext_value_xreg=0 {} SAVE_SREG: ","ext_svrs_sreg is m16_svrs_s0 & m16_svrs_s1 & ext_value_xreg & ext_svrs_sreg & REGSV_S0 & REGSV_S1 & REGSV_S2 [ext_svrs_s0=m16_svrs_s0;ext_svrs_s1=m16_svrs_s1;ext_svrs_xs=ext_value_xreg;] { build REGSV_S2; build REGSV_S1; build REGSV_S0; } REST_RA: is m16_svrs_ra=0 {} REST_RA: ",ra" is m16_svrs_ra=1 { tsp = tsp-$(REGSIZE); MemSrcCast(ra,tsp); } SAVE_RA: is m16_svrs_ra=0 {} SAVE_RA: ",ra" is m16_svrs_ra=1 { tsp = tsp-$(REGSIZE); MemDestCast(tsp,ra); } REST_TOP: EXT_FRAME^REST_RA^REST_SREG^REST_STAT is EXT_FRAME & REST_RA & REST_SREG & REST_STAT { build EXT_FRAME; tmp:2 = EXT_FRAME; tsp = sp+zext(tmp); build REST_RA; build REST_SREG; build REST_STAT; sp = sp+zext(tmp); } SAVE_TOP: SAVE_ARG^EXT_FRAME^SAVE_RA^SAVE_SREG^SAVE_STAT is EXT_FRAME & SAVE_RA & SAVE_SREG & SAVE_ARG & SAVE_STAT { tsp = sp; build SAVE_ARG; build SAVE_RA; build SAVE_SREG; build SAVE_STAT; build EXT_FRAME; tmp:2 = EXT_FRAME; sp = sp - zext(tmp); } # The non-extended PC relative clears the lower 2 bits *after* the add. # The extended version clears the lower 2 bits *before* the add. The difference in how they do it is correct :addiu RX32, pc, EXT_IU8 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b00001 & ext_is_ext=0 & RX32 & EXT_IU8 & pc & ext_delay & RX { tmp:4 = zext(EXT_IU8); tmpa:4 = ext_delay; RX32 = (inst_start + tmp - (tmpa << 1)) & 0xFFFFFFFC; @ifdef MIPS64 RX = sext(RX32); @endif } :addiu RX32, pc, EXT_IS8 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b00001 & ext_is_ext=1 & RX32 & EXT_IS8 & pc & RX { tmp:4 = sext(EXT_IS8); RX32 = (inst_start & 0xFFFFFFFC) + tmp; @ifdef MIPS64 RX = sext(RX32); @endif } :addiu RX32, EXT_IS8 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01001 & RX32 & EXT_IS8 & RX { tmp:4 = sext(EXT_IS8); RX32 = RX32 + tmp; @ifdef MIPS64 RX = sext(RX32); @endif } :addiu RY32, RX32, EXT_RRIA is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01000 & m16_rria_f=0 & RX32 & RY32 & EXT_RRIA & RX { tmp:4 = sext(EXT_RRIA); RY32 = RX32 + tmp; @ifdef MIPS64 RX = sext(RX32); @endif } :addiu sp, EXT_IS8L3 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01100 & m16_i8_f=0b011 & ext_is_ext=0 & sp & EXT_IS8L3 { tmp:$(REGSIZE) = sext(EXT_IS8L3); sp = sp + tmp; } :addiu sp, EXT_IS8 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01100 & m16_i8_f=0b011 & ext_is_ext=1 & sp & EXT_IS8 { tmp:$(REGSIZE) = sext(EXT_IS8); sp = sp + tmp; } :addiu RX32, sp, EXT_IU8 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b00000 & ext_is_ext=0 & sp & RX32 & EXT_IU8 & RX { tmp:4 = zext(EXT_IU8); RX32 = sp:4 + tmp; @ifdef MIPS64 RX = sext(RX32); @endif } :addiu RX32, sp, EXT_IS8 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b00000 & ext_is_ext=1 & sp & RX32 & EXT_IS8 & RX { tmp:4 = sext(EXT_IS8); RX32 = sp:4 + tmp; @ifdef MIPS64 RX = sext(RX32); @endif } :addu RZ32, RX32, RY32 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=0 & m16_op=0b11100 & m16_rrr_f=0b01 & RX32 & RY32 & RZ32 & RZ { RZ32 = RX32 + RY32; @ifdef MIPS64 RZ = sext(RZ32); @endif } :and m16_rx, m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b01100 & m16_rx & m16_ry { m16_rx = m16_rx & m16_ry; } :asmacro ext_value_select is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & m16_op=0b11100 & ext_value_select { } :b Rel16_m16 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b00010 & Rel16_m16 { goto Rel16_m16; } :beqz RX32, CRel16_m16 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b00100 & RX32 & CRel16_m16 { if (RX32 == 0) goto CRel16_m16; } :bnez RX32, CRel16_m16 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b00101 & RX32 & CRel16_m16 { if (RX32 != 0) goto CRel16_m16; } :break m16_code is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b00101 & m16_code { tmp:4=m16_code; trap(tmp); } :bteqz CRel16_m16 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01100 & m16_i8_f=0b000 & CRel16_m16 { if (t8 == 0) goto CRel16_m16; } :btnez CRel16_m16 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01100 & m16_i8_f=0b001 & CRel16_m16 { if (t8 != 0) goto CRel16_m16; } :cmp m16_rx, m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b01010 & m16_rx & m16_ry { t8 = m16_rx ^ m16_ry; } :cmpi m16_rx, m16_iu8_imm is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01110 & ext_is_ext=0 & m16_rx & m16_iu8_imm { tmpa:1 = m16_iu8_imm; tmp:$(REGSIZE) = zext(tmpa); t8 = m16_rx ^ tmp; } :cmpi m16_rx, EXT_RI is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01110 & ext_is_ext=1 & m16_rx & EXT_RI { tmp:$(REGSIZE) = zext(EXT_RI); t8 = m16_rx ^ tmp; } :div RX32, RY32 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b11010 & RX32 & RY32 { if (RY32 == 0) goto ; lo = sext(RX32 s/ RY32); hi = sext(RX32 s% RY32); } :divu RX32, RY32 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b11011 & RX32 & RY32 { if (RY32 == 0) goto ; lo = sext(RX32 / RY32); hi = sext(RX32 % RY32); } :jal Abs26_m16 is ISA_MODE=1 & RELP=1 & ext_isjal=1 & ext_tgt_x=0 & Abs26_m16 [ ext_delay=0b10; globalset(inst_next, ext_delay);] { ra = inst_next | 0x1; delayslot( 1 ); call Abs26_m16; #double check gpr31 bit } :jalr m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_nd=0 & m16_rr_l=1 & m16_rr_ra=0 & m16_rr_f=0b00000 & m16_rx [ ext_delay=0b01; globalset(inst_next, ext_delay); globalset(inst_start, ext_delay); ] { JXWritePC(m16_rx); ra = inst_next | 0x1; delayslot( 1 ); call [pc]; } :jalrc m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_nd=1 & m16_rr_l=1 & m16_rr_ra=0 & m16_rr_f=0b00000 & m16_rx { JXWritePC(m16_rx); ra = inst_next | 0x1; call [pc]; } :jalx Abs26_m16 is ISA_MODE=1 & RELP=1 & ext_isjal=1 & ext_tgt_x=1 & Abs26_m16 [ ext_delay=0b10; ISA_MODE = 0; globalset(Abs26_m16, ISA_MODE); globalset(inst_next, ext_delay); globalset(inst_start, ext_delay); ] { ra = inst_next | 0x1; delayslot( 1 ); ISAModeSwitch = 0; call Abs26_m16; } :jr ra is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_nd=0 & m16_rr_l=0 & m16_rr_ra=1 & ra & m16_rr_f=0b00000 & m16_rx=0 [ ext_delay=0b01; globalset(inst_next, ext_delay); globalset(inst_start, ext_delay); ] { JXWritePC(ra); delayslot( 1 ); return [pc]; } :jr m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_nd=0 & m16_rr_l=0 & m16_rr_ra=0 & m16_rr_f=0b00000 & m16_rx [ ext_delay=0b01; globalset(inst_next, ext_delay); globalset(inst_start, ext_delay); ] { JXWritePC(m16_rx); delayslot( 1 ); goto [pc]; } :jrc ra is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_nd=1 & m16_rr_l=0 & m16_rr_ra=1 & ra & m16_rr_f=0b00000 & m16_rx=0 { JXWritePC(ra); return [pc]; } :jrc m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_nd=1 & m16_rr_l=0 & m16_rr_ra=0 & m16_rr_f=0b00000 & m16_rx { JXWritePC(m16_rx); goto [pc]; } :lb m16_ry, OFF_M16S0 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b10000 & m16_ry & OFF_M16S0 { m16_ry = sext(*[ram]:1 OFF_M16S0); } :lbu m16_ry, OFF_M16S0 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b10100 & m16_ry & OFF_M16S0 { m16_ry = zext(*[ram]:1 OFF_M16S0); } :lh m16_ry, OFF_M16S1 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b10001 & m16_ry & OFF_M16S1 { m16_ry = sext(*[ram]:2 OFF_M16S1); } :lhu m16_ry, OFF_M16S1 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b10101 & m16_ry & OFF_M16S1 { m16_ry = zext(*[ram]:2 OFF_M16S1); } :li m16_rx, EXT_LIU8 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01101 & m16_rx & EXT_LIU8 { m16_rx = zext(EXT_LIU8); } :lw m16_ry, OFF_M16 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b10011 & m16_ry & OFF_M16 { m16_ry = sext(*[ram]:4 OFF_M16); } :lw m16_rx, OFF_M16PC is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b10110 & m16_rx & OFF_M16PC { m16_rx = sext(*[ram]:4 OFF_M16PC); } :lw m16_rx, OFF_M16SP is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b10010 & m16_rx & OFF_M16SP { m16_rx = sext(*[ram]:4 OFF_M16SP); } :mfhi m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b10000 & m16_rr_z=0 & m16_rx { m16_rx = hi; } :mflo m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b10010 & m16_rr_z=0 & m16_rx { m16_rx = lo; } :move ext_m16r32, m16_mv_rz is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01100 & m16_i8_f=0b101 & ext_m16r32 & m16_mv_rz & m16_i8_r32_20 & m16_i8_r32_43 [ ext_reg_low = m16_i8_r32_20; ext_reg_high = m16_i8_r32_43 ; ] { ext_m16r32 = m16_mv_rz; } :move m16_ry, m16_i8_r32 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01100 & m16_i8_f=0b111 & m16_ry & m16_i8_r32 { m16_ry = m16_i8_r32; } :mult RX32, RY32 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b11000 & RX32 & RY32 { tmp1:8 = sext( RX32 ); tmp2:8 = sext( RY32 ); prod:8 = tmp1 * tmp2; lo = sext(prod:4); prod = prod >> 32; hi = sext(prod:4); } :multu RX32, RY32 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b11001 & RX32 & RY32 { tmp1:8 = zext( RX32 ); tmp2:8 = zext( RY32 ); prod:8 = tmp1 * tmp2; lo = sext(prod:4); prod = prod >> 32; hi = sext(prod:4); } :neg RX, RY32 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b01011 & RX & RY32 { RX = sext(0-RY32); } :not m16_rx, m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b01111 & m16_rx & m16_ry { m16_rx = ~m16_ry; } # nop is technically a special case of a move instruction that moves 0 to 0 :nop is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01100 & m16_i8_f=0b101 & m16_i8_imm=0 & m16_ri_z=0 {} :or m16_rx, m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b01101 & m16_rx & m16_ry { m16_rx = m16_rx | m16_ry; } # save/restore argument format #,,,, :restore REST_TOP is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01100 & m16_i8_f=0b100 & m16_svrs_s=0 & REST_TOP { build REST_TOP; } :save SAVE_TOP is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01100 & m16_i8_f=0b100 & m16_svrs_s=1 & SAVE_TOP { build SAVE_TOP; } :sb RY32, OFF_M16S0 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11000 & RY32 & OFF_M16S0 { *[ram]:1 OFF_M16S0 = RY32:1; } :sdbbp m16_code is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b00001 & m16_code { signalDebugBreakpointException(); } :seb m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b10001 & m16_rr_z=0b100 & m16_rx { tmp:1 = m16_rx:1; tmpb:$(REGSIZE) = sext(tmp); m16_rx = tmpb; } :seh m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b10001 & m16_rr_z=0b101 & m16_rx { tmp:2 = m16_rx:2; tmpb:$(REGSIZE) = sext(tmp); m16_rx = tmpb; } :sh m16_ry, OFF_M16S1 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11001 & m16_ry & OFF_M16S1 { *[ram]:2 OFF_M16S1 = m16_ry:2; } :sll m16_rx, m16_ry, EXT_SHIFT is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b00110 & m16_shft_f=0b00 & m16_rx & m16_ry & EXT_SHIFT { m16_rx = m16_ry << EXT_SHIFT; } :sllv m16_ry, m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b00100 & m16_rx & m16_ry { m16_ry = m16_ry << (m16_rx & 0x1f); } :slt m16_rx, m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b00010 & m16_rx & m16_ry { t8 = zext( m16_rx s< m16_ry ); } :slti m16_rx, EXT_SET is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01010 & m16_rx & EXT_SET { tmpa:4 = EXT_SET; tmp:$(REGSIZE) = sext(tmpa); t8 = zext( m16_rx s< tmp); } :sltiu m16_rx, EXT_SET is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01011 & m16_rx & EXT_SET { tmpa:4 = EXT_SET; tmp:$(REGSIZE) = sext(tmpa); t8 = zext( m16_rx < tmp); } :sltu m16_rx, m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b00011 & m16_rx & m16_ry { t8 = zext( m16_rx < m16_ry ); } :sra m16_rx, m16_ry, EXT_SHIFT is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b00110 & m16_shft_f=0b11 & m16_rx & m16_ry & EXT_SHIFT { m16_rx = m16_ry s>> EXT_SHIFT; } :srav m16_ry, m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b00111 & m16_rx & m16_ry { m16_ry = m16_ry s>> (m16_rx & 0x1f); } :srl m16_rx, m16_ry, EXT_SHIFT is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b00110 & m16_shft_f=0b10 & m16_rx & m16_ry & EXT_SHIFT { m16_rx = m16_ry >> EXT_SHIFT; } :srlv m16_ry, m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b00110 & m16_rx & m16_ry { m16_ry = m16_ry >> (m16_rx & 0x1f); } :subu RZ32, RX32, RY32 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=0 & m16_op=0b11100 & m16_rrr_f=0b11 & RX32 & RY32 & RZ32 & RZ { RZ32 = RX32 - RY32; @ifdef MIPS64 RZ = sext(RZ32); @endif } :sw m16_ry, OFF_M16 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11011 & m16_ry & OFF_M16 { *[ram]:4 OFF_M16 = m16_ry:4; } :sw m16_rx, OFF_M16SP is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11010 & m16_rx & OFF_M16SP { *[ram]:4 OFF_M16SP = m16_rx:4; } :sw ra, OFF_M16SP is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01100 & m16_i8_sw=0b010 & ra & OFF_M16SP { *[ram]:4 OFF_M16SP = ra:4; } :xor m16_rx, m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_f=0b01110 & m16_rx & m16_ry { m16_rx = m16_rx ^ m16_ry; } :zeb m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_z=0b000 & m16_rr_f=0b10001 & m16_rx { tmp:1 = m16_rx:1; m16_rx = zext(tmp); } :zeh m16_rx is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b11101 & m16_rr_z=0b001 & m16_rr_f=0b10001 & m16_rx { tmp:2 = m16_rx:2; m16_rx = zext(tmp); } ################ # # MIPS16e2 # # MIPS16e2 Application Specific Extension # Technical Reference Manual # # Document #: MD01172 Rev 1.00 April 26, 2016 # ################ E2_REGOFF: imm is ext_imm_2124 & m16_i_imm [ imm = m16_i_imm | (ext_imm_2124 << 5);] { export *[const]:2 imm; } :addiu m16_rx, gp, EXT_IS8 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & m16_op=0b00000 & ext_is_ext=1 & gp & m16_rx & m16_ri_z=1 & EXT_IS8 { m16_rx = gp + sext(EXT_IS8); } :andi m16_rx, EXT_LIU8 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & m16_op=0b01101 & m16_rx & m16_ri_z=3 & EXT_LIU8 { m16_rx = m16_rx & zext(EXT_LIU8); } :cache ext_imm_1620, E2_REGOFF(m16_rx) is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2526=0 & ext_imm_1620 & m16_op=0b11010 & m16_rx & m16_ri_z=5 & E2_REGOFF { local tmp:$(REGSIZE) = m16_rx + sext(E2_REGOFF); cacheOp(ext_imm_1620:1, tmp); } :di is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123=0 & ext_imm_1620=0b00110 & m16_op=0b01100 & m16_rx=0b111 & m16_ry=0 & m16_i_imm=0b01100 { Status = Status & ~1; } :di m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123=0 & ext_imm_1620=0b00010 & m16_op=0b01100 & m16_rx=0b111 & m16_ry & m16_i_imm=0b01100 { m16_ry = Status; Status = Status & ~1; # clearing last bit (ffff..fffe == -2 signed) } :dmt is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123=0b001 & ext_imm_1620=0b00110 & m16_op=0b01100 & m16_rx=0b111 & m16_ry=0 & m16_i_imm=1 { # Clear VPEControl IE bit (bit 15) VPEControl = VPEControl & ~0x8000; #VPEControl[15,1] = 0; } :dmt m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123=0b001 & ext_imm_1620=0b00010 & m16_op=0b01100 & m16_rx=0b111 & m16_ry & m16_i_imm=1 { # Clear VPEControl IE bit (bit 15) m16_ry = VPEControl; VPEControl = VPEControl & ~0x8000; #VPEControl[15,1] = 0; } :dvpe m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123=0b001 & ext_imm_1620=0b00010 & m16_op=0b01100 & m16_rx=0b111 & m16_ry & m16_i_imm=0 { # Clear MVPControl EVP bit (bit 0) m16_ry = MVPControl; MVPControl = MVPControl & ~0x1; } :dvpe is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123=0b001 & ext_imm_1620=0b00110 & m16_op=0b01100 & m16_rx=0b111 & m16_ry=0 & m16_i_imm=0 { # Clear MVPControl EVP bit (bit 0) MVPControl = MVPControl & ~0x1; } :ehb is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226=0b00011 & ext_imm_21=0 & ext_imm_1620=0 & m16_op=0b00110 & m16_rx & m16_ry & m16_shft_sa=4 & m16_shft_f=0 { } :ei is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123=0 & ext_imm_1620=0b00111 & m16_op=0b01100 & m16_rx=0b111 & m16_ry=0 & m16_i_imm=0b01100 { Status = Status | 1; } :ei m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123=0 & ext_imm_1620=0b00011 & m16_op=0b01100 & m16_rx=0b111 & m16_ry & m16_i_imm=0b01100 { m16_ry = Status; Status = Status | 1; } :emt is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123=1 & ext_imm_1620=0b00111 & m16_op=0b01100 & m16_rx=0b111 & m16_ry=0 & m16_i_imm=1 { # Set VPEControl TE bit (bit 15) VPEControl = VPEControl | 0x8000; # VPEControl[15,1] = 1; } :emt m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123=1 & ext_imm_1620=0b00011 & m16_op=0b01100 & m16_rx=0b111 & m16_ry & m16_i_imm=1 { # Set VPEControl TE bit (bit 15) m16_ry = VPEControl; VPEControl = VPEControl | 0x8000; # VPEControl[15,1] = 1; } :evpe is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123=1 & ext_imm_1620=0b00111 & m16_op=0b01100 & m16_rx=0b111 & m16_ry=0 & m16_i_imm=0 { # Set MVPControl EVP bit (bit 0)h MVPControl = MVPControl | 0x1; } :evpe m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123=1 & ext_imm_1620=0b00011 & m16_op=0b01100 & m16_rx=0b111 & m16_ry & m16_i_imm=0 { # Set MVPControl EVP bit (bit 0)h m16_ry = MVPControl; MVPControl = MVPControl | 0x1; } :ext m16_ry, m16_rx, ext_imm_2226, ext_size is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226 & ext_imm_21=1 & ext_imm_1620 & m16_op=0b00110 & m16_rx & m16_ry & m16_shft_sa=2 & m16_shft_f=0 [ ext_size = ext_imm_1620+1; ] { local rs_tmp:$(REGSIZE) = m16_rx << ($(REGSIZE) * 8 - (ext_size + ext_imm_2226)); rs_tmp = rs_tmp >> ($(REGSIZE) * 8 - ext_size); m16_ry = zext(rs_tmp); } :ins m16_ry, m16_rx, ext_imm_2226, ins_size is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226 & ext_imm_21=1 & ext_imm_1620 & m16_op=0b00110 & m16_rx & m16_ry & m16_shft_sa=1 & m16_shft_f=0 [ ins_size = ext_imm_1620 - ext_imm_2226 + 1; ] { local tmpa:$(REGSIZE) = -1; tmpa = tmpa >> ($(REGSIZE) * 8 - ins_size); local tmpb:$(REGSIZE) = m16_rx & tmpa; tmpa = tmpa << ext_imm_2226; tmpa = ~tmpa; tmpb = tmpb << ext_imm_2226; m16_ry = (m16_ry & tmpa) | tmpb; } :ins m16_ry, zero, ext_imm_2226, ins_size is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226 & ext_imm_21=0 & ext_imm_1620 & m16_op=0b00110 & m16_rx=0 & m16_ry & m16_shft_sa=1 & m16_shft_f=0 & zero [ ins_size = ext_imm_1620 - ext_imm_2226 + 1; ] { local tmpa:$(REGSIZE) = -1; tmpa = tmpa >> ($(REGSIZE) * 8 - ins_size); tmpa = tmpa << ext_imm_2226; tmpa = ~tmpa; m16_ry = (m16_ry & tmpa); } # LB/LBU/LH/LHU/LW - handled by mips16 :ll m16_rx, E2_REGOFF(ext_rb) is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2526=0 & ext_imm_1920=0 & ext_rb & m16_op=0b10010 & m16_rx & m16_ri_z=6 & E2_REGOFF { local tmp:$(REGSIZE) = sext(E2_REGOFF); tmp = tmp + ext_rb; local tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); m16_rx = sext(*[ram]:4 tmpa); lockload(tmp); } :lui m16_rx, EXT_LIU8 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & m16_op=0b01101 & m16_rx & m16_ri_z=1 & EXT_LIU8 { m16_rx = zext(EXT_LIU8) << 16; } :lwl m16_rx, E2_REGOFF(ext_rb) is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2526=0 & ext_imm_1920=0 & ext_rb & m16_op=0b10010 & m16_rx & m16_ri_z=7 & E2_REGOFF { local tmp:$(REGSIZE) = sext(E2_REGOFF); tmp = tmp + ext_rb; local shft:$(REGSIZE) = tmp & 0x3; local addr:$(REGSIZE) = tmp - shft; local valOrig:4 = m16_rx:$(SIZETO4) & (0xffffffff >> ((4-shft) * 8)); local valLoad:4 = 0; MemSrcCast(valLoad,addr); valLoad = valLoad << (shft * 8); m16_rx = sext( valLoad | valOrig ); } :lwr m16_rx, E2_REGOFF(ext_rb) is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2526=0 & ext_imm_1920=0b10 & ext_rb & m16_op=0b10010 & m16_rx & m16_ri_z=7 & E2_REGOFF { local tmp:$(REGSIZE) = sext(E2_REGOFF); tmp = tmp + ext_rb; local shft:$(REGSIZE) = tmp & 0x3; local addr:$(REGSIZE) = tmp - shft; local valOrig:4 = m16_rx:$(SIZETO4) & (0xffffffff << ((shft+1) * 8)); local valLoad:4 = 0; MemSrcCast(valLoad,addr); valLoad = valLoad >> ((3-shft) * 8); m16_rx = sext( valOrig | valLoad ); } :mfc0 m16_ry, m16_i_imm, ext_imm_2123 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123 & ext_imm_1620=0 & m16_op=0b01100 & m16_rx=0b111 & m16_ry & m16_i_imm { m16_ry = getCopReg(0:1,m16_i_imm:1,ext_imm_2123:1); } :mtc0 m16_ry, m16_i_imm, ext_imm_2123 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2426=0 & ext_imm_2123 & ext_imm_1620=1 & m16_op=0b01100 & m16_rx=0b111 & m16_ry & m16_i_imm { setCopReg(0:1,m16_ry,m16_i_imm:1,ext_imm_2123:1); } :movz m16_rx, m16_ry, ext_rb is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226=0 & ext_imm_21=1 & ext_imm_1920=0 & ext_rb & m16_op=0b00110 & m16_rx & m16_ry & m16_shft_sa=1 & m16_shft_f=0b10 { if(m16_ry != 0) goto inst_next; m16_rx = ext_rb; } :movz m16_rx, zero, m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226=0 & ext_imm_21=0 & ext_imm_1920=0 & ext_rb=0 & m16_op=0b00110 & m16_rx & m16_ry & m16_shft_sa=1 & m16_shft_f=0b10 & zero { if(m16_ry != 0) goto inst_next; m16_rx = 0; } :movn m16_rx, m16_ry, ext_rb is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226=0 & ext_imm_21=1 & ext_imm_1920=0 & ext_rb & m16_op=0b00110 & m16_rx & m16_ry & m16_shft_sa=2 & m16_shft_f=0b10 { if(m16_ry == 0) goto inst_next; m16_rx = ext_rb; } :movn m16_rx, zero, m16_ry is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226=0 & ext_imm_21=0 & ext_imm_1920=0 & ext_rb=0 & m16_op=0b00110 & m16_rx & m16_ry & m16_shft_sa=2 & m16_shft_f=0b10 & zero { if(m16_ry == 0) goto inst_next; m16_rx = 0; } :movtn m16_rx, zero is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226=0 & ext_imm_21=0 & ext_imm_1920=0 & ext_rb & m16_op=0b00110 & m16_rx & m16_rr_z=0 & m16_shft_sa=6 & m16_shft_f=0b10 & zero { if(t8 == 0) goto inst_next; m16_rx = 0; } :movtn m16_rx, ext_rb is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226=0 & ext_imm_21=1 & ext_imm_1920=0 & ext_rb & m16_op=0b00110 & m16_rx & m16_rr_z=0 & m16_shft_sa=6 & m16_shft_f=0b10 { if(t8 == 0) goto inst_next; m16_rx = ext_rb; } :movtz m16_rx, zero is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226=0 & ext_imm_21=0 & ext_imm_1920=0 & ext_rb & m16_op=0b00110 & m16_rx & m16_rr_z=0 & m16_shft_sa=5 & m16_shft_f=0b10 & zero { if(t8 != 0) goto inst_next; m16_rx = 0; } :movtz m16_rx, ext_rb is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226=0 & ext_imm_21=1 & ext_imm_1920=0 & ext_rb & m16_op=0b00110 & m16_rx & m16_rr_z=0 & m16_shft_sa=5 & m16_shft_f=0b10 { if(t8 != 0) goto inst_next; m16_rx = ext_rb; } :pause is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226=0b00101 & ext_imm_21=0 & ext_imm_1620=0 & m16_op=0b00110 & m16_rx=0 & m16_rr_z=0 & m16_shft_sa=6 & m16_shft_f=0 { wait(); } :pref ext_imm_1620, E2_REGOFF(m16_rx) is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2526=0 & ext_imm_1620 & ext_rb & m16_op=0b11010 & m16_rx & m16_ri_z=4 & E2_REGOFF { local tmp:$(REGSIZE) = m16_rx + sext(E2_REGOFF); prefetch(tmp, ext_imm_1620:1); } :ori m16_rx, EXT_LIU8 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & m16_op=0b01101 & m16_rx & m16_ri_z=2 & EXT_LIU8 { m16_rx = m16_rx | zext(EXT_LIU8); } :rdhwr m16_ry, ext_imm_1620 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226=0 & ext_imm_21=0 & ext_imm_1620 & m16_op=0b00110 & m16_rx=0 & m16_ry & m16_shft_sa=3 & m16_shft_f=0 { m16_ry = getHWRegister(ext_imm_1620:1); } # SB/SH/SW - handled by mips16 :sc m16_rx, E2_REGOFF(ext_rb) is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2526=0 & ext_imm_1920=0 & ext_rb & m16_op=0b11010 & m16_rx & m16_ri_z=6 & E2_REGOFF{ local tmp:$(REGSIZE) = sext(E2_REGOFF); tmp = tmp + ext_rb; lockwrite(tmp); local tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = m16_rx; m16_rx = 1; } :swl m16_rx, E2_REGOFF(ext_rb) is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2526=0 & ext_imm_1920=0b00 & ext_rb & m16_op=0b11010 & m16_rx & m16_ri_z=7 & E2_REGOFF{ local tmp:$(REGSIZE) = sext(E2_REGOFF); tmp = tmp + ext_rb; local tmpRT:4 = m16_rx:$(SIZETO4); local shft:$(REGSIZE) = tmp & 0x3; local addr:$(REGSIZE) = tmp - shft; local valOrig:4 = 0; MemSrcCast(valOrig,addr); valOrig = valOrig & (0xffffffff << ((4-shft) * 8)); local valStore:4 = (tmpRT >> (shft * 8)) | valOrig; MemDestCast(addr,valStore); } :swr m16_rx, E2_REGOFF(ext_rb) is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2526=0 & ext_imm_1920=0b10 & ext_rb & m16_op=0b11010 & m16_rx & m16_ri_z=7 & E2_REGOFF { local tmp:$(REGSIZE) = sext(E2_REGOFF); tmp = tmp + ext_rb; local tmpRT:4 = m16_rx:$(SIZETO4); local shft:$(REGSIZE) = tmp & 0x3; local addr:$(REGSIZE) = tmp - shft; local valOrig:4 = 0; MemSrcCast(valOrig,addr); valOrig = valOrig & (0xffffffff >> ((shft+1) * 8)); local valStore:4 = (tmpRT << ((3-shft)*8)) | valOrig; MemDestCast(addr,valStore); } :sync ext_imm_2226 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & ext_imm_2226 & ext_imm_21=0 & ext_imm_1620=0 & m16_op=0b00110 & m16_rx=0 & m16_ry=0 & m16_shft_sa=5 & m16_shft_f=0b00 { SYNC(ext_imm_2226:1); } :xori m16_rx, EXT_LIU8 is ISA_MODE=1 & RELP=1 & ext_isjal=0 & ext_is_ext=1 & m16_op=0b01101 & m16_rx & m16_ri_z=4 & EXT_LIU8 { m16_rx = m16_rx ^ zext(EXT_LIU8); } ================================================ FILE: pypcode/processors/MIPS/data/languages/mips32.pspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips32Instructions.sinc ================================================ ############################ # # MIPS32 # Basic and FP (COP1) instructions # # (See bottom of file for MIPS64 instructions included with MIPS32) # ############################ @if defined(ISA_VARIANT) @define AMODE "ISA_MODE=0" # ISA_MODE must restrict MIPS instruction decoding and require ISA_MODE=0 @else @define AMODE "epsilon" # Mips16 instructions not supported - Mips32 only @endif # 0000 00ss ssst tttt dddd d000 0010 0000 :add RD32, RS32src, RT32src is $(AMODE) & prime=0 & sa=0 & fct=0x20 & RD32 & RS32src & RT32src & RD { RD32 = RS32src + RT32src; @ifdef MIPS64 RD = sext(RD32); @endif } # 0010 01ss ssst tttt iiii iiii iiii iiii :addiu RT32, RS32src, simmed is $(AMODE) & prime=9 & RT32 & RS32src & simmed & RT { RT32 = RS32src + simmed; @ifdef MIPS64 RT = sext(RT32); @endif } # 0000 00ss ssst tttt dddd d000 0010 0001 :addu RD32, RS32src, RT32src is $(AMODE) & prime=0 & fct=0x21 & RS32src & RT32src & RD32 & sa=0 & RD { RD32 = RS32src + RT32src; @ifdef MIPS64 RD = sext(RD32); @endif } # 0000 00ss ssst tttt dddd d000 0010 0100 :and RD, RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x24 & RSsrc & RTsrc & RD & sa=0 { RD = RSsrc & RTsrc; } # 0011 00ss ssst tttt iiii iiii iiii iiii :andi RT, RSsrc, immed is $(AMODE) & prime=0xC & RSsrc & RT & immed { RT = RSsrc & immed; } # 0001 0000 0000 0000 iiii iiii iiii iiii :b Rel16 is $(AMODE) & prime=4 & rs=0 & rt=0 & Rel16 { delayslot(1); goto Rel16; } # 0001 00ss ssst tttt iiii iiii iiii iiii :beq RSsrc, RTsrc, Rel16 is $(AMODE) & prime=4 & RSsrc & RTsrc & Rel16 { delayflag:1 = ( RSsrc == RTsrc ); delayslot( 1 ); if delayflag goto Rel16; } # 0000 01ss sss0 0001 iiii iiii iiii iiii :bgez RSsrc, Rel16 is $(AMODE) & prime=1 & cond=1 & RSsrc & Rel16 { delayflag:1 = ( RSsrc s>= 0 ); delayslot( 1 ); if delayflag goto Rel16; } # 0001 11ss sss0 0000 iiii iiii iiii iiii :bgtz RSsrc, Rel16 is $(AMODE) & prime=7 & cond=0 & RSsrc & Rel16 { delayflag:1 = ( RSsrc s> 0 ); delayslot( 1 ); if delayflag goto Rel16; } # 0001 10ss sss0 0000 iiii iiii iiii iiii :blez RSsrc, Rel16 is $(AMODE) & prime=6 & cond=0 & RSsrc & Rel16 { delayflag:1 = ( RSsrc s<= 0 ); delayslot( 1 ); if delayflag goto Rel16; } # 0000 01ss sss0 0000 iiii iiii iiii iiii :bltz RSsrc, Rel16 is $(AMODE) & prime=1 & cond=0 & RSsrc & Rel16 { delayflag:1 = ( RSsrc s< 0 ); delayslot( 1 ); if delayflag goto Rel16; } # 0001 01ss ssst tttt iiii iiii iiii iiii :bne RSsrc, RTsrc, Rel16 is $(AMODE) & prime=5 & RSsrc & RTsrc & Rel16 { delayflag:1 = ( RSsrc != RTsrc ); delayslot( 1 ); if delayflag goto Rel16; } # 0000 00cc cccc cccc cccc cccc cc00 1101 :break breakcode is $(AMODE) & prime=0 & fct=0xD & breakcode { tmp:4=breakcode; trap(tmp); } # 1011 11bb bbbo oooo iiii iiii iiii iiii :cache op, OFF_BASER6 is $(AMODE) & ((prime=0x2F & REL6=0) | (prime=0x1F & REL6=1 & fct=0x25 & bit6=0)) & OFF_BASER6 & op { cacheOp(op:1, OFF_BASER6); } :cachee op, OFF_BASER6 is $(AMODE) & prime=0x1F & fct=0x1B & bit6=0 & OFF_BASER6 & op { cacheOp(op:1, OFF_BASER6); } :cfc0 RT, RD0 is $(AMODE) & prime=0x10 & copop=2 & RT & RD0 & bigfunct=0 { RT = sext( RD0:$(SIZETO4) ); } # 0100 1000 010t tttt ssss s000 0000 0000 :cfc2 RT, immed is $(AMODE) & prime=0x12 & copop=2 & RT & immed { tmp:4 = getCopControlWord( 2:1, immed:4 ); RT = sext(tmp); } # Special case of ADDU # 0000 0000 0000 0000 dddd d000 0010 0001 :clear RD is $(AMODE) & prime=0 & fct=0x21 & rs=0 & rt=0 & RD & sa=0 { RD = 0; } define pcodeop special2; # 0111 00ss ssst tttt dddd daaa aaxx xyyy # valid values of x and y: # x: 0 y: 3,6,7 # x: 1 y: 0-7 # x: 2 y: 0-7 # x: 3 y: 0-7 # x: 4 y: 2,3,6,7 # x: 5 y: 0-7 # x: 6 y: 0-7 # x: 7 y: 0-6 :SPECIAL2 RD, RSsrc, RTsrc, sa, fct is $(AMODE) & prime=0x1C & sa & RD & RSsrc & RTsrc & fct { tmp:1 = fct; tmp2:1 = sa; RD = special2(RSsrc, RTsrc, tmp2, tmp); } # 0100 101c cccc cccc cccc cccc cccc cccc :cop2 cofun is $(AMODE) & prime=0x12 & bit25=1 & cofun { arg:4 = cofun; copFunction(2:1, arg); } :ctc0 RTsrc, RD0 is $(AMODE) & prime=0x10 & copop=6 & RTsrc & RD0 & bigfunct=0 { RD0 = RTsrc; } # 0100 1000 110t tttt iiii iiii iiii iiii :ctc2 RTsrc, immed is $(AMODE) & prime=0x12 & copop=6 & RTsrc & immed { setCopControlWord( 2:1, immed:4, RTsrc ); } # 0100 0010 0000 0000 0000 0000 0001 1111 :deret is $(AMODE) & prime=0x10 & bit25=1 & copfill=0x0 & fct=0x1F { return[DEPC]; } # 0100 0001 011t tttt 0110 0000 0000 0000 :di is $(AMODE) & prime=0x10 & mfmc0=0x0B & rd=0x0C & fct2=0x0 & bit5=0x0 & zero3=0x0 & rt=0x0 { Status = Status & -2; # clearing last bit (ffff..fffe == -2 signed) } :di RT is $(AMODE) & prime=0x10 & mfmc0=0x0B & rd=0x0C & fct2=0x0 & bit5=0x0 & zero3=0x0 & RT { RT = Status; Status = Status & -2; # clearing last bit (ffff..fffe == -2 signed) } # 0000 0000 0000 0000 0000 0000 1100 0000 :ehb is $(AMODE) & prime=0x0 & rs=0x0 & rt=0x0 & rd=0x0 & fct2=0x3 & fct=0x0 { } # 0100 0001 011t tttt 0110 0000 0010 0000 :ei is $(AMODE) & prime=0x10 & mfmc0=0x0B & rd=0x0C & fct2=0x0 & bit5=0x01 & zero3=0x0 & rt=0x0 { Status = Status | 1; } :ei RT is $(AMODE) & prime=0x10 & mfmc0=0x0B & rd=0x0C & fct2=0x0 & bit5=0x01 & zero3=0x0 & RT { RT = Status; Status = Status | 1; } # MIPS R3000 and prior only, replaced with ERET in R4000 and later # 0100 0010 0000 0000 0000 0000 0001 0000 :rfe is $(AMODE) & prime=0x10 & fct=0x10 & bit25=1 & copfill=0 { local currentStatus = Status; Status = (currentStatus & 0xfffffff0) | ((currentStatus & 0x3c) >> 2); } # 0100 0010 0000 0000 0000 0000 0001 1000 :eret is $(AMODE) & prime=0x10 & fct=0x18 & bit25=1 & copfill=0 { return[EPC]; } :eretnc is $(AMODE) & prime=0x10 & fct=0x18 & bit25=1 & copfill=1 { return[EPC]; } # 0111 11ss ssst tttt mmmm mLLL LL00 0000 :ext RT, RSsrc, lsb, ExtSize is $(AMODE) & prime=0x1F & fct=0x0 & RT & RSsrc & lsb & msbd & ExtSize { # Extract Bit Field # RT = extractField(RSsrc, msbd:1, lsb:1); # Note that msbd = size - 1 rs_tmp:$(REGSIZE) = RSsrc << ($(REGSIZE) * 8 - (msbd + lsb + 1)); rs_tmp = rs_tmp >> ($(REGSIZE) * 8 - (msbd + 1)); RT = zext(rs_tmp); } # 0111 11ss ssst tttt mmmm mLLL LL00 0100 :ins RT, RSsrc, lsb, InsSize is $(AMODE) & prime=0x1F & fct=0x04 & RT & RTsrc & RSsrc & lsb & msbd & InsSize { tmpa:$(REGSIZE) = -1; tmpa = tmpa >> ($(REGSIZE) * 8 - InsSize); tmpb:$(REGSIZE) = RSsrc & tmpa; tmpa = tmpa << lsb; tmpa = ~tmpa; tmpb = tmpb << lsb; RT = (RT & tmpa) | tmpb; } # 0000 10aa aaaa aaaa aaaa aaaa aaaa aaaa :j Abs26 is $(AMODE) & prime=2 & Abs26 { delayslot( 1 ); goto Abs26; } # 0000 11aa aaaa aaaa aaaa aaaa aaaa aaaa :jal Abs26 is $(AMODE) & prime=3 & Abs26 { ra = inst_next; delayslot( 1 ); call Abs26; } @ifdef ISA_VARIANT # 0000 00ss sss0 0000 dddd dhhh hh00 1001 :jalr RD, RSsrc is $(AMODE) & prime=0 & fct=9 & RSsrc & rt=0 & RD { build RD; build RSsrc; JXWritePC(RSsrc); RD = inst_next; delayslot( 1 ); call [pc]; } :jalr RSsrc is $(AMODE) & prime=0 & fct=9 & RSsrc & rt=0 & rd=0x1F { build RSsrc; JXWritePC(RSsrc); ra = inst_next; delayslot( 1 ); call [pc]; } @else # 0000 00ss sss0 0000 dddd dhhh hh00 1001 :jalr RD, RSsrc is $(AMODE) & prime=0 & fct=9 & RSsrc & rt=0 & RD { RD = inst_next; delayslot( 1 ); tmp:$(ADDRSIZE) = 0; ValCast(tmp,RSsrc); call [tmp]; } :jalr RSsrc is $(AMODE) & prime=0 & fct=9 & RSsrc & rt=0 & rd=0x1F { ra = inst_next; delayslot( 1 ); tmp:$(ADDRSIZE) = 0; ValCast(tmp,RSsrc); call [tmp]; } @endif @ifdef ISA_VARIANT # 0000 00ss sss0 0000 dddd d1hh hh00 1001 :jalr.hb RD, RSsrc is $(AMODE) & prime=0 & fct=9 & RSsrc & rt=0 & RD & bit10=1 { build RD; build RSsrc; JXWritePC(RSsrc); RD = inst_next; delayslot( 1 ); call [pc]; } :jalr.hb RSsrc is $(AMODE) & prime=0 & fct=9 & RSsrc & rt=0 & rd=0x1F & bit10=1 { build RSsrc; JXWritePC(RSsrc); ra = inst_next; delayslot( 1 ); call [pc]; } @else # 0000 00ss sss0 0000 dddd d1hh hh00 1001 :jalr.hb RD, RSsrc is $(AMODE) & prime=0 & fct=9 & RSsrc & rt=0 & RD & bit10=1 { RD = inst_next; delayslot( 1 ); tmp:$(ADDRSIZE) = 0; ValCast(tmp,RSsrc); call [tmp]; } :jalr.hb RSsrc is $(AMODE) & prime=0 & fct=9 & RSsrc & rt=0 & rd=0x1F & bit10=1 { ra = inst_next; delayslot( 1 ); tmp:$(ADDRSIZE) = 0; ValCast(tmp,RSsrc); call [tmp]; } @endif @ifdef ISA_VARIANT # 0000 00ss sss0 0000 0000 0hhh hh00 1000 :jr RSsrc is $(AMODE) & prime=0 & ((REL6=0 & fct=8) | (REL6=1 & fct=0x09)) & RSsrc & rt=0 & rd=0 { build RSsrc; JXWritePC(RSsrc); delayslot(1); goto [pc]; } @else # 0000 00ss sss0 0000 0000 0hhh hh00 1000 :jr RSsrc is $(AMODE) & prime=0 & ((REL6=0 & fct=8) | (REL6=1 & fct=0x09)) & RSsrc & rt=0 & rd=0 { delayslot(1); tmp:$(ADDRSIZE) = 0; ValCast(tmp,RSsrc); goto [tmp]; } @endif @ifdef ISA_VARIANT # 0000 00ss sss0 0000 0000 01hh hh00 1000 :jr.hb RSsrc is $(AMODE) & prime=0 & ((REL6=0 & fct=8) | (REL6=1 & fct=0x09)) & RSsrc & rt=0 & rd=0 & bit10=1 { build RSsrc; JXWritePC(RSsrc); delayslot(1); goto [pc]; } @else # 0000 00ss sss0 0000 0000 01hh hh00 1000 :jr.hb RSsrc is $(AMODE) & prime=0 & ((REL6=0 & fct=8) | (REL6=1 & fct=0x09)) & RSsrc & rt=0 & rd=0 & bit10=1 { delayslot(1); tmp:$(ADDRSIZE) = 0; ValCast(tmp,RSsrc); goto [tmp]; } @endif # Special case of JR # 0000 0011 1110 0000 0000 0hhh hh00 1000 @ifdef ISA_VARIANT :jr ra is $(AMODE) & prime=0 & ((REL6=0 & fct=8) | (REL6=1 & fct=0x09)) & rs=0x1F & ra & rt=0 & rd=0 & sa=0 { JXWritePC(ra); delayslot(1); return[pc]; } @else :jr ra is $(AMODE) & prime=0 & ((REL6=0 & fct=8) | (REL6=1 & fct=0x09)) & rs=0x1F & ra & rt=0 & rd=0 & sa=0 { delayslot(1); return[ra]; } @endif # 1000 00bb bbbt tttt iiii iiii iiii iiii :lb RT, OFF_BASE is $(AMODE) & prime=0x20 & OFF_BASE & RT { RT = sext(*[ram]:1 OFF_BASE); } :lbe RT, OFF_BASER6 is $(AMODE) & prime=0x1F & fct=0x2C & bit6=0 & OFF_BASER6 & RT { RT = sext(*[ram]:1 OFF_BASER6); } # 1001 00bb bbbt tttt iiii iiii iiii iiii :lbu RT, OFF_BASE is $(AMODE) & prime=0x24 & OFF_BASE & RT { RT = zext( *[ram]:1 OFF_BASE ); } :lbue RT, OFF_BASER6 is $(AMODE) & prime=0x1F & fct=0x28 & bit6=0 & OFF_BASER6 & RT { RT = zext( *[ram]:1 OFF_BASER6 ); } # 1000 01bb bbbt tttt iiii iiii iiii iiii :lh RT, OFF_BASE is $(AMODE) & prime=0x21 & OFF_BASE & RT { RT = sext( *[ram]:2 OFF_BASE ); } :lhe RT, OFF_BASER6 is $(AMODE) & prime=0x1F & fct=0x2D & bit6=0 & OFF_BASER6 & RT { RT = sext( *[ram]:2 OFF_BASER6 ); } # 1001 01bb bbbt tttt iiii iiii iiii iiii :lhu RT, OFF_BASE is $(AMODE) & prime=0x25 & OFF_BASE & RT { RT = zext( *[ram]:2 OFF_BASE ); } :lhue RT, OFF_BASER6 is $(AMODE) & prime=0x1F & fct=0x29 & bit6=0 & OFF_BASER6 & RT { RT = zext( *[ram]:2 OFF_BASER6 ); } :lle RT, OFF_BASER6 is $(AMODE) & prime=0x1F & fct=0x2E & bit6=0 & OFF_BASER6 & RT { RT = sext(*[ram]:4 OFF_BASER6); } # 1000 11bb bbbt tttt iiii iiii iiii iiii :lw RT, OFF_BASE is $(AMODE) & prime=0x23 & OFF_BASE & RT { RT = sext( *[ram]:4 OFF_BASE ); } :lwe RT, OFF_BASER6 is $(AMODE) & prime=0x1F & fct=0x2F & bit6=0 & OFF_BASER6 & RT { RT = sext( *[ram]:4 OFF_BASER6 ); } :lbx RD, INDEX_BASE is $(AMODE) & prime=0x1F & RD & fct=10 & fct2=22 & INDEX_BASE { RD = sext(*[ram]:1 INDEX_BASE); } :lhux RD, INDEX_BASE is $(AMODE) & prime=0x1F & RD & fct=10 & fct2=20 & INDEX_BASE { RD = zext(*[ram]:2 INDEX_BASE); } @ifdef MIPS64 :lwux RD, INDEX_BASE is $(AMODE) & prime=0x1F & RD & fct=10 & fct2=16 & INDEX_BASE { RD = zext(*[ram]:4 INDEX_BASE); } @endif # 0100 0000 000t tttt dddd d000 0000 0sss :mfc0 RT, RD0 is $(AMODE) & prime=0x10 & copop=0 & RT & RD0 & zero6=0 { RT = sext( RD0:$(SIZETO4) ); } # 0100 1000 000t tttt iiii iiii iiii iiii :mfc2 RT, immed is $(AMODE) & prime=0x12 & copop=0 & RT & immed { tmp:$(REGSIZE) = getCopReg(2:1, immed:4); RT = sext( tmp ); } # 0100 1000 011t tttt iiii iiii iiii iiii :mfhc2 RT, immed is $(AMODE) & prime=0x12 & copop=3 & RT & fs & immed { tmp:$(REGSIZE) = getCopReg(2:1, immed:4); RT = sext(tmp >> 32); } # Special case of ADDIU # 0010 0100 000t tttt iiii iiii iiii iiii :li RT, simmed is $(AMODE) & prime=9 & rs=0 & RT & simmed { RT = simmed; } # Special case of ADDU # 0000 0000 000t tttt dddd d000 0010 0001 :move RD, RTsrc is $(AMODE) & prime=0 & fct=0x21 & rs=0 & RD & RTsrc & sa=0 { RD = RTsrc; } # Special case of ADDU # 0000 00ss sss0 0000 dddd d000 0010 0001 :move RD, RSsrc is $(AMODE) & prime=0 & fct=0x21 & RSsrc & rt=0 & RD & sa=0 { RD = RSsrc; } # 0100 0000 100t tttt dddd d000 0000 0sss :mtc0 RTsrc, RD0, sel is $(AMODE) & prime=0x10 & copop=4 & RTsrc & RD0 & zero6=0 & sel { setCopReg(0:1, RD0, RTsrc, sel:1); } # 0100 1000 100t tttt iiii iiii iiii iiii :mtc2 RTsrc, immed is $(AMODE) & prime=0x12 & copop=4 & RTsrc & immed { setCopReg(2:1, immed:4, RTsrc); } :mthc0 RTsrc, RD0, sel is $(AMODE) & prime=0x10 & copop=6 & RTsrc & RD0 & zero6=0 & sel { setCopReg(0:1, RD0, RTsrc, sel:1); } # 0100 1000 111t tttt iiii iiii iiii iiii :mthc2 RTsrc, immed is $(AMODE) & prime=0x12 & copop=0x07 & RTsrc & immed { arg:4 = immed; tmp:4 = RTsrc:$(SIZETO4); low:4 = getCopReg(2:1, arg); val:8 = (zext(tmp) << 32) + zext(low); setCopReg(2:1, arg, val); } :nal is $(AMODE) & REL6=0 & prime=1 & cond=0x10 & zero21=0 { delayslot(1); ra = inst_next; } # 0000 0000 0000 0000 0000 0000 0000 0000 :nop is $(AMODE) & prime=0 & rs=0 & rt=0 & rd=0 & sa=0 & fct=0 { } # 0000 00ss ssst tttt dddd d000 0010 0111 :nor RD, RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x27 & RSsrc & RTsrc & RD & sa=0 { RD = ~(RSsrc | RTsrc); } # 0000 00ss ssst tttt dddd d000 0010 0101 :or RD, RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x25 & RSsrc & RTsrc & RD & sa=0 { RD = RSsrc | RTsrc; } # 0011 01ss ssst tttt iiii iiii iiii iiii :ori RT, RSsrc, immed is $(AMODE) & prime=0xD & RSsrc & RT & immed { RT = RSsrc | immed; } :pause is $(AMODE) & prime=0 & szero=0 & fct=0 & fct2=0x05 { wait(); } :pref hint, OFF_BASE is $(AMODE) & prime=0x33 & hint & OFF_BASE { prefetch(OFF_BASE, hint:1); } :prefe hint, OFF_BASER6 is $(AMODE) & prime=0x1F & fct=0x23 & bit6=0 & OFF_BASER6 & hint { prefetch(OFF_BASER6, hint:1); } # 0111 1100 000t tttt dddd d000 0011 1011 :rdhwr RT, rd_hw is $(AMODE) & prime=0x1F & rs=0 & fct2=0 & fct=0x3B & RT & rd_hw & rd!=4 { RT = getHWRegister(rd_hw); } # 0111 1100 000t tttt dddd d000 0011 1011 :rdhwr RT, rd_hw, sel_0608 is $(AMODE) & REL6=1 & prime=0x1F & rs=0 & spec2=0 & fct=0x3B & RT & rd_hw & rd=4 & sel_0608 { RT = getHWRegister(rd_hw, sel_0608:1); } # 0100 0001 010t tttt dddd d000 0000 0000 :rdpgpr RD, RT is $(AMODE) & prime=0x10 & rs=10 & bigfunct=0 & RD & RT { RD = getShadow(RT); } # 0000 0000 001t tttt dddd daaa aa00 0010 :rotr RD32, RT32src, sa is $(AMODE) & prime=0 & zero1=0 & bit21=1 & fct=2 & RD32 & RT32src & sa & RD { tmp1:4 = RT32src >> sa; tmp2:4 = RT32src << (32 - sa); RD32 = tmp1 + tmp2; @ifdef MIPS64 RD = sext(RD32); @endif } # 0000 00ss ssst tttt dddd d000 0100 0110 :rotrv RD32, RT32src, RS32src is $(AMODE) & prime=0 & zero2=0 & bit6=1 & fct=6 & RD32 & RT32src & RS32src & RD { shift:4 = RS32src & 0x1f; tmp1:4 = RT32src >> shift; tmp2:4 = RT32src << (32 - shift); RD32 = tmp1 + tmp2; @ifdef MIPS64 RD = sext(RD32); @endif } # 1010 00bb bbbt tttt iiii iiii iiii iiii :sb RTsrc, OFF_BASE is $(AMODE) & prime=0x28 & OFF_BASE & RTsrc { *[ram]:1 OFF_BASE = RTsrc:1; } :sbe RTsrc, OFF_BASER6 is $(AMODE) & prime=0x1F & fct=0x1C & bit6=0 & OFF_BASER6 & RTsrc { *[ram]:1 OFF_BASER6 = RTsrc:1; } :sce RTsrc, OFF_BASER6 is $(AMODE) & prime=0x1F & fct=0x1E & bit6=0 & OFF_BASER6 & RTsrc { *[ram]:4 OFF_BASER6 = RTsrc:$(SIZETO4); RTsrc = 1; } # 0111 00cc cccc cccc cccc cccc cc11 1111 :sdbbp breakcode is $(AMODE) & prime=0x1C & fct=0x3F & breakcode { signalDebugBreakpointException(); } @ifndef COPR_C # 1111 10bb bbbt tttt iiii iiii iiii iiii :sdc2 RTsrc, OFF_BASE is $(AMODE) & prime=0x3E & OFF_BASE & RTsrc { *[ram]:8 OFF_BASE = getCopReg(2:1, RTsrc); } @endif # 0111 1100 000t tttt dddd d100 0010 0000 :seb RD, RTsrc is $(AMODE) & prime=0x1F & rs=0 & fct2=0x10 & fct=0x20 & RD & RTsrc { RD = sext( RTsrc:1 ); } # 0111 1100 000t tttt dddd d110 0010 0000 :seh RD, RTsrc is $(AMODE) & prime=0x1F & rs=0 & fct2=0x18 & fct=0x20 & RD & RTsrc { RD = sext( RTsrc:2 ); } # 1010 01bb bbbt tttt iiii iiii iiii iiii :sh RTsrc, OFF_BASE is $(AMODE) & prime=0x29 & OFF_BASE & RTsrc { *[ram]:2 OFF_BASE = RTsrc:2; } :she RTsrc, OFF_BASER6 is $(AMODE) & prime=0x1F & fct=0x1D & bit6=0 & OFF_BASER6 & RTsrc { *[ram]:2 OFF_BASER6 = RTsrc:2; } # 0000 0000 000t tttt dddd daaa aa00 0000 :sll RD32, RT32src, sa is $(AMODE) & prime=0 & fct=0 & rs=0 & RD32 & RT32src & sa & RD { RD32 = RT32src << sa; @ifdef MIPS64 RD = sext(RD32); @endif } # 0000 00ss ssst tttt dddd d000 0000 0100 :sllv RD32, RT32src, RS32src is $(AMODE) & prime=0 & fct=4 & RS32src & RT32src & RD32 & sa=0 & RD { shift:4 = RS32src & 0x1f; RD32 = RT32src << shift; @ifdef MIPS64 RD = sext(RD32); @endif } # 0000 00ss ssst tttt dddd d000 0010 1010 :slt RD, RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x2A & RSsrc & RTsrc & RD & sa=0 { RD = zext( RSsrc s< RTsrc ); } # 0010 10ss ssst tttt iiii iiii iiii iiii :slti RT, RSsrc, simmed is $(AMODE) & prime=10 & RSsrc & RT & simmed { RT = zext( RSsrc s< simmed ); } # 0010 11ss ssst tttt iiii iiii iiii iiii :sltiu RT, RSsrc, simmed is $(AMODE) & prime=0xB & RSsrc & RT & simmed { RT = zext( RSsrc < simmed ); } # 0000 00ss ssst tttt dddd d000 0010 1011 :sltu RD, RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x2B & RSsrc & RTsrc & RD & sa=0 { RD = zext( RSsrc < RTsrc ); } # 0000 0000 000t tttt dddd daaa aa00 0011 :sra RD32, RT32src, sa is $(AMODE) & prime=0 & fct=3 & rs=0 & RT32src & RD32 & sa & RD { RD32 = RT32src s>> sa; @ifdef MIPS64 RD = sext(RD32); @endif } # 0000 00ss ssst tttt dddd d000 0000 0111 :srav RD32, RT32src, RS32src is $(AMODE) & prime=0 & fct=7 & RS32src & RT32src & RD32 & sa=0 & RD { shift:4 = RS32src & 0x1f; RD32 = RT32src s>> shift; @ifdef MIPS64 RD = sext(RD32); @endif } # 0000 0000 000t tttt dddd daaa aa00 0010 :srl RD32, RT32src, sa is $(AMODE) & prime=0 & fct=2 & rs=0 & RT32src & RD32 & sa & RD { RD32 = RT32src >> sa; @ifdef MIPS64 RD = sext(RD32); @endif } # 0000 00ss ssst tttt dddd d000 0000 0110 :srlv RD32, RT32src, RS32src is $(AMODE) & prime=0 & fct=6 & RS32src & RT32src & RD32 & sa=0 & RD { shift:4 = RS32src & 0x1f; RD32 = RT32src >> shift; @ifdef MIPS64 RD = sext(RD32); @endif } # 0000 0000 0000 0000 0000 0000 0100 0000 :ssnop is $(AMODE) & prime=0 & rs=0 & rt=0 & rd=0 & sa=1 & fct=0 { } # 0000 00ss ssst tttt dddd d000 0010 0010 :sub RD32, RS32src, RT32src is $(AMODE) & prime=0 & fct=0x22 & RS32src & RT32src & RD32 & sa=0 & RD { RD32 = RS32src - RT32src; @ifdef MIPS64 RD = sext(RD32); @endif } # 0000 00ss ssst tttt dddd d000 0010 0011 :subu RD32, RS32src, RT32src is $(AMODE) & prime=0 & fct=0x23 & RS32src & RT32src & RD32 & sa=0 & RD { RD32 = RS32src - RT32src; @ifdef MIPS64 RD = sext(RD32); @endif } # 1010 11bb bbbt tttt iiii iiii iiii iiii :sw RTsrc, OFF_BASE is $(AMODE) & prime=0x2B & OFF_BASE & RTsrc { *[ram]:4 OFF_BASE = RTsrc:$(SIZETO4); } @ifndef COPR_C # 1110 10bb bbbt tttt iiii iiii iiii iiii :swc2 hint, OFF_BASE is $(AMODE) & prime=0x3A & OFF_BASE & hint { tmp:4 = getCopReg(2:1, hint:4); *[ram]:4 OFF_BASE = tmp; } @endif :swe RTsrc, OFF_BASER6 is $(AMODE) & prime=0x1F & fct=0x1F & bit6=0 & OFF_BASER6 & RTsrc { *[ram]:4 OFF_BASER6 = RTsrc:$(SIZETO4); } define pcodeop SYNC; # 0000 0000 0000 0000 0000 0yyy yy00 1111 :sync scalar is $(AMODE) & prime=0 & fct=0xF & szero=0 & stype [ scalar = stype + 0; ] { SYNC(scalar:1); } # 0000 01bb bbb1 1111 iiii iiii iiii iiii :synci OFF_BASE is $(AMODE) & prime=1 & OFF_BASE & synci=0x1F { } # 0000 00cc cccc cccc cccc cccc cc00 1100 :syscall is $(AMODE) & prime=0 & fct=0xC & breakcode { tmp:4=breakcode; syscall(tmp); } # 0000 0000 0000 0000 cccc cccc cc11 0100 # trap always :teq RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x34 & RSsrc & RTsrc & code & rs=0 & rt=0 { tmp:2 = code; local dest:$(ADDRSIZE) = trap(tmp); goto [dest]; } # 0000 00ss ssst tttt cccc cccc cc11 0100 :teq RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x34 & RSsrc & RTsrc & code { if (RSsrc != RTsrc) goto ; tmp:2=code; trap(tmp); } # 0000 00ss ssst tttt cccc cccc cc11 0000 :tge RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x30 & RSsrc & RTsrc & code { if (RSsrc < RTsrc) goto ; tmp:2=code; trap(tmp); } # 0000 00ss ssst tttt cccc cccc cc11 0001 :tgeu RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x31 & RSsrc & RTsrc & code { if (RSsrc < RTsrc) goto ; tmp:2=code; trap(tmp); } :tlbinv is $(AMODE) & prime=0x10 & bit25=1 & copfill=0x00 & fct=0x03 { TLB_invalidate(Index, EntryHi); } :tlbinvf is $(AMODE) & prime=0x10 & bit25=1 & copfill=0x00 & fct=0x04 { TLB_invalidate_flush(Index); } :tlbp is $(AMODE) & prime=0x10 & bit25=1 & copfill=0x00 & fct=0x08 { Index = TLB_probe_for_matching_entry(EntryHi); } :tlbr is $(AMODE) & prime=0x10 & bit25=1 & copfill=0x00 & fct=0x01 { EntryHi = TLB_read_indexed_entryHi(Index); EntryLo0 = TLB_read_indexed_entryLo0(Index); EntryLo1 = TLB_read_indexed_entryLo1(Index); PageMask = TLB_read_indexed_entryPageMask(Index); } :tlbwi is $(AMODE) & prime=0x10 & bit25=1 & copfill=0x00 & fct=0x02 { TLB_write_indexed_entry(Index, EntryHi, EntryLo0, EntryLo1, PageMask); } :tlbwr is $(AMODE) & prime=0x10 & bit25=1 & copfill=0x00 & fct=0x06 { TLB_write_random_entry(Random, EntryHi, EntryLo0, EntryLo1, PageMask); } # 0000 00ss ssst tttt cccc cccc cc11 0010 :tlt RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x32 & RSsrc & RTsrc & code { if (RSsrc s>= RTsrc) goto ; tmp:2=code; trap(tmp); } # 0000 00ss ssst tttt cccc cccc cc11 0011 :tltu RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x33 & RSsrc & RTsrc & code { if (RSsrc >= RTsrc) goto ; tmp:2=code; trap(tmp); } # 0000 00ss ssst tttt cccc cccc cc11 0110 :tne RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x36 & RSsrc & RTsrc & code { if (RSsrc == RTsrc) goto ; tmp:2=code; trap(tmp); } # 0100 001c cccc cccc cccc cccc cc10 0000 :wait is $(AMODE) & prime=0x10 & fct=0x20 & copfill & bit25=1 { tmp:4 = copfill; wait(tmp); } # 0100 0001 110t tttt dddd d000 0000 0000 :wrpgpr RD, RTsrc is $(AMODE) & prime=0x10 & format=0xE & RTsrc & RD & bigfunct=0 { setShadow(RD, RTsrc); } # 0111 1100 000t tttt dddd d000 1010 0000 :wsbh RD, RTsrc is $(AMODE) & prime=0x1F & format=0 & RTsrc & RD & wsbh=2 & bshfl=0x20 { tmp1:$(REGSIZE) = RTsrc & 0xff; tmp2:$(REGSIZE) = (RTsrc >> 8) & 0xff; tmp3:$(REGSIZE) = (RTsrc >> 16) & 0xff; tmp4:$(REGSIZE) = (RTsrc >> 24) & 0xff; RD = (tmp3 << 24) | (tmp4 << 16) | (tmp1 << 8) | (tmp2); } # 0000 00ss ssst tttt dddd d000 0010 0110 :xor RD, RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x26 & RSsrc & RTsrc & RD & sa=0 { RD = RSsrc ^ RTsrc; } # 0011 10ss ssst tttt iiii iiii iiii iiii :xori RT, RSsrc, immed is $(AMODE) & prime=0xE & RSsrc & RT & immed { RT = RSsrc ^ immed; } ############################ # # MIPS64 Instructions to be included with all MIPS32 processors # ############################ ## Allow MIPS 64 instructions below for compilers ## using a 64-bit chip, but really keeping things to 32-bits # Special case of daddu # 0000 00ss ssst tttt dddd d000 0010 1101 :clear RD is $(AMODE) & prime=0 & fct=0x2D & rs=0 & rt=0 & RD & sa=0 { RD = 0; } # 0000 00ss ssst tttt dddd d000 0010 1100 :dadd RD, RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x2C & RSsrc & RTsrc & RD & sa=0 { RD = RSsrc + RTsrc; } # 0110 01ss ssst tttt iiii iiii iiii iiii :daddiu RT, RSsrc, simmed is $(AMODE) & prime=0x19 & RSsrc & RT & simmed { RT = RSsrc + simmed; } # 0000 00ss ssst tttt dddd d000 0010 1101 :daddu RD, RSsrc, RTsrc is $(AMODE) & prime=0 & fct=0x2D & RSsrc & RTsrc & RD & sa=0 { RD = RSsrc + RTsrc; } #### # # Pre-6 semantics # #### # 0010 00ss ssst tttt iiii iiii iiii iiii :addi RT32, RS32src, simmed is $(AMODE) & REL6=0 & prime=8 & RT32 & RS32src & simmed & RT { RT32 = RS32src + simmed; @ifdef MIPS64 RT = sext(RT32); @endif } # 0000 01ss sss1 0001 iiii iiii iiii iiii :bal Rel16 is $(AMODE) & REL6=0 & prime=1 & cond=0x11 & rs=0 & Rel16 { ra = inst_next; delayslot( 1 ); call Rel16; } # Special case PIC :bal Rel16 is $(AMODE) & REL6=0 & prime=1 & cond=0x11 & rs=0 & off16=1 & Rel16 { ra = inst_next; delayslot( 1 ); goto Rel16; } # 0100 1001 000c cc00 iiii iiii iiii iiii :bc2f Rel16 is $(AMODE) & REL6=0 & prime=0x12 & copop=8 & cc=0 & nd=0 & tf=0 & Rel16 { tmp:1 = getCopCondition(2:1, 0:1); delayslot(1); if (tmp != 0) goto inst_next; goto Rel16; } :bc2f cc,Rel16 is $(AMODE) & REL6=0 & prime=0x12 & copop=8 & cc & nd=0 & tf=0 & Rel16 { tmp:1 = getCopCondition(2:1, cc:1); delayslot(1); if (tmp != 0) goto inst_next; goto Rel16; } # 0100 1001 000c cc10 iiii iiii iiii iiii :bc2fl Rel16 is $(AMODE) & REL6=0 & prime=0x12 & copop=8 & cc=0 & nd=1 & tf=0 & Rel16 { tmp:1 = getCopCondition(2:1, 0:1); if (tmp != 0) goto inst_next; delayslot(1); goto Rel16; } :bc2fl cc,Rel16 is $(AMODE) & REL6=0 & prime=0x12 & copop=8 & cc & nd=1 & tf=0 & Rel16 { tmp:1 = getCopCondition(2:1, cc:1); if (tmp != 0) goto inst_next; delayslot(1); goto Rel16; } # 0100 1001 000c cc01 iiii iiii iiii iiii :bc2t Rel16 is $(AMODE) & REL6=0 & prime=0x12 & copop=8 & cc=0 & nd=0 & tf=1 & Rel16 { tmp:1 = getCopCondition(2:1, 0:1); delayslot(1); if (tmp == 0) goto inst_next; goto Rel16; } :bc2t cc,Rel16 is $(AMODE) & REL6=0 & prime=0x12 & copop=8 & cc & nd=0 & tf=1 & Rel16 { tmp:1 = getCopCondition(2:1, cc:1); delayslot(1); if (tmp == 0) goto inst_next; goto Rel16; } # 0100 1001 000c cc11 iiii iiii iiii iiii :bc2tl Rel16 is $(AMODE) & REL6=0 & prime=0x12 & copop=8 & cc=0 & nd=1 & tf=1 & Rel16 { tmp:1 = getCopCondition(2:1, 0:1); if (tmp == 0) goto inst_next; delayslot(1); goto Rel16; } :bc2tl cc,Rel16 is $(AMODE) & REL6=0 & prime=0x12 & copop=8 & cc & nd=1 & tf=1 & Rel16 { tmp:1 = getCopCondition(2:1, cc:1); if (tmp == 0) goto inst_next; delayslot(1); goto Rel16; } # 0101 00ss ssst tttt iiii iiii iiii iiii :beql RSsrc, RTsrc, Rel16 is $(AMODE) & REL6=0 & prime=0x14 & RSsrc & RTsrc & Rel16 { if (!(RSsrc==RTsrc)) goto inst_next; delayslot(1); goto Rel16; } :bgezal RSsrc, Rel16 is $(AMODE) & REL6=0 & prime=1 & cond=0x11 & RSsrc & Rel16 { ra = inst_next; delayflag:1 = ( RSsrc s>= 0 ); delayslot( 1 ); if (!delayflag) goto inst_next; call Rel16; } # 0000 01ss sss1 0011 iiii iiii iiii iiii :bgezall RSsrc, Rel16 is $(AMODE) & REL6=0 & prime=1 & cond=0x13 & RSsrc & Rel16 { ra = inst_next; if (!(RSsrc s>= 0)) goto inst_next; delayslot( 1 ); call Rel16; } # 0000 01ss sss0 0011 iiii iiii iiii iiii :bgezl RSsrc, Rel16 is $(AMODE) & REL6=0 & prime=1 & cond=3 & RSsrc & Rel16 { if (!(RSsrc s>= 0)) goto inst_next; delayslot(1); goto Rel16; } # 0101 11ss sss0 0000 iiii iiii iiii iiii :bgtzl RSsrc, Rel16 is $(AMODE) & REL6=0 & prime=0x17 & cond=0 & RSsrc & Rel16 { if (!(RSsrc s> 0)) goto inst_next; delayslot(1); goto Rel16; } # 0101 10ss sss0 0000 iiii iiii iiii iiii :blezl RSsrc, Rel16 is $(AMODE) & REL6=0 & prime=0x16 & cond=0 & RSsrc & Rel16 { if (!(RSsrc s<= 0)) goto inst_next; delayslot(1); goto Rel16; } # 0000 01ss sss1 0000 iiii iiii iiii iiii :bltzal RSsrc, Rel16 is $(AMODE) & REL6=0 & prime=1 & cond=0x10 & RSsrc & Rel16 { ra = inst_next; delayflag:1 = ( RSsrc s< 0 ); delayslot( 1 ); if (!delayflag) goto inst_next; call Rel16; } # 0000 01ss sss1 0010 iiii iiii iiii iiii :bltzall RSsrc, Rel16 is $(AMODE) & REL6=0 & prime=1 & cond=0x12 & RSsrc & Rel16 { ra = inst_next; if (!(RSsrc s< 0)) goto inst_next; delayslot(1); call Rel16; } # 0000 01ss sss0 0010 iiii iiii iiii iiii :bltzl RSsrc, Rel16 is $(AMODE) & REL6=0 & prime=1 & cond=2 & RSsrc & Rel16 { if (!(RSsrc s< 0)) goto inst_next; delayslot(1); goto Rel16; } # 0101 01ss ssst tttt iiii iiii iiii iiii :bnel RSsrc, RTsrc, Rel16 is $(AMODE) & REL6=0 & prime=0x15 & RSsrc & RTsrc & Rel16 { if (!(RSsrc!=RTsrc)) goto inst_next; delayslot(1); goto Rel16; } # 0111 00ss ssst tttt dddd d000 0010 0001 :clo RD, RSsrc is $(AMODE) & REL6=0 & prime=0x1C & sa=0x0 & fct=0x21 & RD & RSsrc { # Count leading ones in a word RD = lzcount( ~RSsrc ); } # 0111 00ss ssst tttt dddd d000 0010 0000 :clz RD, RSsrc is $(AMODE) & REL6=0 & prime=0x1C & sa=0x0 & fct=0x20 & RD & RSsrc { # Count leading zeros in a word RD = lzcount( RSsrc ); } # 0000 00ss ssst tttt 0000 0000 0001 1010 :div RS32src, RT32src is $(AMODE) & REL6=0 & prime=0 & fct=0x1A & RS32src & RT32src & rd=0 & sa=0 { lo = sext(RS32src s/ RT32src); hi = sext(RS32src s% RT32src); } # 0000 00ss ssst tttt 0000 0000 0001 1011 :divu RS32src, RT32src is $(AMODE) & REL6=0 & prime=0 & fct=0x1B & RS32src & RT32src & rd=0 & sa=0 { lo = sext(RS32src / RT32src); hi = sext(RS32src % RT32src); } @ifdef ISA_VARIANT # 0111 01aa aaaa aaaa aaaa aaaa aaaa aaaa :jalx Abs26 is $(AMODE) & REL6=0 & prime=0x1D & Abs26 [ ISA_MODE = 1; globalset(Abs26, ISA_MODE);] { ra = inst_next; delayslot( 1 ); ISAModeSwitch = 1; call Abs26; } @endif @ifndef COPR_C # 1101 10bb bbbt tttt iiii iiii iiii iiii :ldc2 rt, OFF_BASE is $(AMODE) & REL6=0 & prime=0x36 & OFF_BASE & rt { setCopReg(2:1, rt, *[ram]:8 OFF_BASE); } @endif # 1100 00bb bbbt tttt iiii iiii iiii iiii :ll RT, OFF_BASE is $(AMODE) & REL6=0 & prime=0x30 & OFF_BASE & RT { RT = sext(*[ram]:4 OFF_BASE); } # 0011 1100 000t tttt iiii iiii iiii iiii :lui RT, immed is $(AMODE) & REL6=0 & prime=0xF & rs=0 & RT & immed { tmp:4 = immed << 16; RT = sext(tmp); } @ifndef COPR_C # 1100 10bb bbbt tttt iiii iiii iiii iiii :lwc2 rt, OFF_BASE is $(AMODE) & REL6=0 & prime=0x32 & OFF_BASE & rt { setCopReg( 2:1, rt, *[ram]:4 OFF_BASE ); } @endif @if ENDIAN == "big" # 1000 10bb bbbt tttt iiii iiii iiii iiii :lwl RT, OFF_BASE is $(AMODE) & REL6=0 & prime=0x22 & OFF_BASE & RT & RTsrc { shft:$(ADDRSIZE) = OFF_BASE & 0x3; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:4 = RTsrc:$(SIZETO4) & (0xffffffff >> ((4-shft) * 8)); valLoad:4 = *(addr) << (shft * 8); RT = sext( valLoad | valOrig ); } # 1001 10bb bbbt tttt iiii iiii iiii iiii :lwr RT, OFF_BASE is $(AMODE) & REL6=0 & prime=0x26 & OFF_BASE & RT & RTsrc { shft:$(ADDRSIZE) = OFF_BASE & 0x3; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:4 = RTsrc:$(SIZETO4) & (0xffffffff << ((shft+1) * 8)); valLoad:4 = *(addr) >> ((3-shft) * 8); RT = sext( valOrig | valLoad ); } :lwle RTsrc, OFF_BASER6 is $(AMODE) & REL6=0 & prime=0x1F & fct=0x19 & bit6=0 & OFF_BASER6 & RTsrc & RT { shft:$(ADDRSIZE) = OFF_BASER6 & 0x3; addr:$(ADDRSIZE) = OFF_BASER6 - shft; valOrig:4 = RTsrc:$(SIZETO4) & (0xffffffff >> ((4-shft) * 8)); valLoad:4 = *(addr) << (shft * 8); RT = sext( valLoad | valOrig ); } :lwre RTsrc, OFF_BASER6 is $(AMODE) & REL6=0 & prime=0x1F & fct=0x1A & bit6=0 & OFF_BASER6 & RTsrc & RT { shft:$(ADDRSIZE) = OFF_BASER6 & 0x3; addr:$(ADDRSIZE) = OFF_BASER6 - shft; valOrig:4 = RTsrc:$(SIZETO4) & (0xffffffff << ((shft+1) * 8)); valLoad:4 = *(addr) >> ((3-shft) * 8); RT = sext( valOrig | valLoad ); } @else :lwl RT, OFF_BASE is $(AMODE) & REL6=0 & prime=0x22 & OFF_BASE & RT & RTsrc { shft:$(ADDRSIZE) = OFF_BASE & 0x3; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:4 = RTsrc:$(SIZETO4) & (0xffffffff >> ((shft+1)* 8)); valLoad:4 = *(addr) << ((3-shft) * 8); RT = sext( valLoad | valOrig ); } # 1001 10bb bbbt tttt iiii iiii iiii iiii :lwr RT, OFF_BASE is $(AMODE) & REL6=0 & prime=0x26 & OFF_BASE & RT & RTsrc { shft:$(ADDRSIZE) = OFF_BASE & 0x3; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:4 = RTsrc:$(SIZETO4) & (0xffffffff << ((4-shft)* 8)); valLoad:4 = *(addr) >> (shft * 8); RT = sext( valOrig | valLoad ); } :lwle RTsrc, OFF_BASER6 is $(AMODE) & REL6=0 & prime=0x1F & fct=0x19 & bit6=0 & OFF_BASER6 & RTsrc & RT { shft:$(ADDRSIZE) = OFF_BASER6 & 0x3; addr:$(ADDRSIZE) = OFF_BASER6 - shft; valOrig:4 = RTsrc:$(SIZETO4) & (0xffffffff >> ((shft+1)* 8)); valLoad:4 = *(addr) << ((3-shft) * 8); RT = sext( valLoad | valOrig ); } :lwre RTsrc, OFF_BASER6 is $(AMODE) & REL6=0 & prime=0x1F & fct=0x1A & bit6=0 & OFF_BASER6 & RTsrc & RT { shft:$(ADDRSIZE) = OFF_BASER6 & 0x3; addr:$(ADDRSIZE) = OFF_BASER6 - shft; valOrig:4 = RTsrc:$(SIZETO4) & (0xffffffff << ((4-shft)* 8)); valLoad:4 = *(addr) >> (shft * 8); RT = sext( valOrig | valLoad ); } @endif # lwl and lwr almost always come in pairs. # When the analyzer does finds a matching lwl/lwr pair, the pcode is simplified so that # lwl does all the loading while lwr is a no-op @if ENDIAN == "big" :lwl RT, OFF_BASE is $(AMODE) & REL6=0 & prime=0x22 & OFF_BASE & RT & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 1; globalset(inst_next, PAIR_INSTRUCTION_FLAG);] { RT = sext( *[ram]:4 OFF_BASE ); } :lwr RT, OFF_BASE is $(AMODE) & REL6=0 & prime=0x26 & OFF_BASE & RT & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 0; ] { } @else :lwl RT, OFF_BASE is $(AMODE) & REL6=0 & prime=0x22 & OFF_BASE & RT & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 1; globalset(inst_next, PAIR_INSTRUCTION_FLAG);] { } :lwr RT, OFF_BASE is $(AMODE) & REL6=0 & prime=0x26 & OFF_BASE & RT & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 0; ] { RT = sext( *[ram]:4 OFF_BASE ); } @endif # 0111 00ss ssst tttt 000a a000 0000 0000 :madd RS32src, RT32src is $(AMODE) & REL6=0 & prime=0x1C & zero1315=0x0 & fct2=0x0 & fct=0x0 & RS32src & RT32src & ac=0 & achi & aclo { tmp1:8 = sext(RS32src); tmp2:8 = sext(RT32src); prod:8 = tmp1 * tmp2; aclo = aclo & 0xffffffff; # Make sure any upper bits of lo don't contribute to sum sum:8 = (zext(achi) << 32) + zext(aclo) + prod; aclo = sext(sum:4); sum = sum >> 32; achi = sext(sum:4); } # 0111 00ss ssst tttt 000a a000 0000 0001 :maddu RS32src, RT32src is $(AMODE) & REL6=0 & prime=0x1C & zero1315=0x0 & fct2=0x0 & fct=0x01 & RS32src & RT32src & ac=0 & achi & aclo { tmp1:8 = zext(RS32src); tmp2:8 = zext(RT32src); prod:8 = tmp1 * tmp2; aclo = aclo & 0xffffffff; # Make sure any upper bits of lo don't contribute to sum sum:8 = (zext(achi) << 32) + zext(aclo) + prod; aclo = sext(sum:4); sum = sum >> 32; achi = sext(sum:4); } # 0000 0000 0aa0 0000 dddd d000 0001 0000 :mfhi RD is $(AMODE) & REL6=0 & prime=0 & fct=0x10 & RD & zero5=0 & zero1620=0 & zero2325=0 & acf=0 & acfhi { RD = acfhi; } # 0000 0000 0aa0 0000 dddd d000 0001 0010 :mflo RD is $(AMODE) & REL6=0 & prime=0 & fct=0x12 & RD & zero5=0 & zero1620=0 & zero2325=0 & acf=0 & acflo { RD = acflo; } # 0000 00ss ssst tttt dddd d000 0000 1011 :movn RD, RSsrc, RTsrc is $(AMODE) & REL6=0 & prime=0 & zero5=0 & fct=0xB & RD & RSsrc & RTsrc { if (RTsrc == 0) goto ; RD = RSsrc; } # 0000 00ss ssst tttt dddd d000 0000 1010 :movz RD, RSsrc, RTsrc is $(AMODE) & REL6=0 & prime=0 & zero5=0 & fct=10 & RD & RSsrc & RTsrc { if (RTsrc != 0) goto ; # We can't use goto inst_next because it fails if we are in a delay slot RD = RSsrc; } # 0111 00ss ssst tttt 000a a000 0000 0100 :msub RS32src, RT32src is $(AMODE) & REL6=0 & prime=0x1C & fct2=0 & fct=0x04 & RS32src & RT32src & zero1315=0 & aclo & achi { tmp1:8 = sext(RS32src); tmp2:8 = sext(RT32src); prod:8 = tmp1 * tmp2; aclo = aclo & 0xffffffff; # Make sure any upper bits of lo don't contribute to sum sum:8 = (zext(achi) << 32) + zext(aclo) - prod; aclo = sext(sum:4); sum = sum >> 32; achi = sext(sum:4); } # 0111 00ss ssst tttt 000a a000 0000 0101 :msubu RS32src, RT32src is $(AMODE) & REL6=0 & prime=0x1C & fct2=0 & fct=0x05 & RS32src & RT32src & zero1315=0 & ac=0 & aclo & achi { tmp1:8 = zext(RS32src); tmp2:8 = zext(RT32src); prod:8 = tmp1 * tmp2; aclo = aclo & 0xffffffff; # Make sure any upper bits of lo don't contribute to sum sum:8 = (zext(achi) << 32) + zext(aclo) - prod; aclo = sext(sum:4); sum = sum >> 32; achi = sext(sum:4); } # 0000 00ss sss0 0000 000a a000 0001 0001 :mthi RSsrc is $(AMODE) & REL6=0 & prime=0 & fct=0x11 & RSsrc & zero5=0 & zero1320=0 & ac=0 & achi { achi = RSsrc; } # 0000 00ss sss0 0000 000a a000 0001 0011 :mtlo RSsrc is $(AMODE) & REL6=0 & prime=0 & fct=0x13 & RSsrc & zero5=0 & zero1320=0 & ac=0 & aclo { aclo = RSsrc; } # 0111 00ss ssst tttt dddd d000 0000 0010 :mul RD, RS32src, RT32src is $(AMODE) & REL6=0 & prime=0x1C & sa=0x0 & fct=0x02 & RD & RS32src & RT32src { tmp1:8 = sext( RS32src ); tmp2:8 = sext( RT32src ); prod:8 = tmp1 * tmp2; RD = sext( prod:4 ); } # 0000 00ss ssst tttt 000a a000 0001 1000 :mult RS32src, RT32src is $(AMODE) & REL6=0 & prime=0 & fct=0x18 & RS32src & RT32src & zero5=0 & zero1315=0 & aclo & achi { tmp1:8 = sext( RS32src ); tmp2:8 = sext( RT32src ); prod:8 = tmp1 * tmp2; aclo = sext(prod:4); prod = prod >> 32; achi = sext(prod:4); } # 0000 00ss ssst tttt 000a a000 0001 1001 :multu RS32src, RT32src is $(AMODE) & REL6=0 & prime=0 & fct=0x19 & RS32src & RT32src & zero5=0 & zero1315=0 & aclo & achi { tmp1:8 = zext( RS32src ); tmp2:8 = zext( RT32src ); prod:8 = tmp1 * tmp2; aclo = sext(prod:4); prod = prod >> 32; achi = sext(prod:4); } # 0100 0110 110t tttt ssss sddd dd10 1100 :pll.ps fd, fs, ft is $(AMODE) & REL6=0 & prime=0x11 & format=0x16 & fct=0x2C & ft & fs & fd unimpl # 0100 0110 110t tttt ssss sddd dd10 1101 :plu.ps fd, fs, ft is $(AMODE) & REL6=0 & prime=0x11 & format=0x16 & fct=0x2D & ft & fs & fd unimpl #:prefx # 0100 0110 110t tttt ssss sddd dd10 1110 :pul.ps fd, fs, ft is $(AMODE) & REL6=0 & prime=0x11 & format=0x16 & fct=0x2E & fd & fs & ft unimpl # 0100 0110 110t tttt ssss sddd dd10 1111 :puu.ps fd, fs, ft is $(AMODE) & REL6=0 & prime=0x11 & format=0x16 & fct=0x2F & fd & fs & ft unimpl # 1110 00bb bbbt tttt iiii iiii iiii iiii :sc RTsrc, OFF_BASE is $(AMODE) & REL6=0 & prime=0x38 & OFF_BASE & RT & RTsrc { *[ram]:4 OFF_BASE = RTsrc:$(SIZETO4); RT = 1; } @if ENDIAN == "big" # 1010 10bb bbbt tttt iiii iiii iiii iiii :swl RTsrc, OFF_BASE is $(AMODE) & REL6=0 & prime=0x2A & OFF_BASE & RTsrc { tmpRT:4 = RTsrc:$(SIZETO4); shft:$(ADDRSIZE) = OFF_BASE & 0x3; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:4 = *(addr) & (0xffffffff << ((4-shft) * 8)); valStore:4 = tmpRT >> (shft * 8); *(addr) = valOrig | valStore; } # 1011 10bb bbbt tttt iiii iiii iiii iiii :swr RTsrc, OFF_BASE is $(AMODE) & REL6=0 & prime=0x2E & OFF_BASE & RTsrc { tmpRT:4 = RTsrc:$(SIZETO4); shft:$(ADDRSIZE) = OFF_BASE & 0x3; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:4 = *(addr) & (0xffffffff >> ((shft+1) * 8)); valStore:4 = tmpRT << ((3-shft)*8); *(addr) = valOrig | valStore; } :swle RTsrc, OFF_BASER6 is $(AMODE) & REL6=0 & prime=0x1F & fct=0x21 & bit6=0 & OFF_BASER6 & RTsrc & RT { tmpRT:4 = RTsrc:$(SIZETO4); shft:$(ADDRSIZE) = OFF_BASER6 & 0x3; addr:$(ADDRSIZE) = OFF_BASER6 - shft; valOrig:4 = *(addr) & (0xffffffff << ((4-shft) * 8)); valStore:4 = tmpRT >> (shft * 8); *(addr) = valOrig | valStore; } :swre RTsrc, OFF_BASER6 is $(AMODE) & REL6=0 & prime=0x1F & fct=0x22 & bit6=0 & OFF_BASER6 & RTsrc & RT { tmpRT:4 = RTsrc:$(SIZETO4); shft:$(ADDRSIZE) = OFF_BASER6 & 0x3; addr:$(ADDRSIZE) = OFF_BASER6 - shft; valOrig:4 = *(addr) & (0xffffffff >> ((shft+1) * 8)); valStore:4 = tmpRT << ((3-shft)*8); *(addr) = valOrig | valStore; } @else # 1010 10bb bbbt tttt iiii iiii iiii iiii :swl RTsrc, OFF_BASE is $(AMODE) & REL6=0 & prime=0x2A & OFF_BASE & RTsrc { tmpRT:4 = RTsrc:$(SIZETO4); shft:$(ADDRSIZE) = OFF_BASE & 0x3; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:4 = *(addr) & (0xffffffff << ((shft+1) * 8)); valStore:4 = tmpRT >> ((3-shft) * 8); *(addr) = valOrig | valStore; } # 1011 10bb bbbt tttt iiii iiii iiii iiii :swr RTsrc, OFF_BASE is $(AMODE) & REL6=0 & prime=0x2E & OFF_BASE & RTsrc { tmpRT:4 = RTsrc:$(SIZETO4); shft:$(ADDRSIZE) = OFF_BASE & 0x3; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:4 = *(addr) & (0xffffffff >> ((4-shft) * 8)); valStore:4 = tmpRT << (shft*8); *(addr) = valOrig | valStore; } :swle RTsrc, OFF_BASER6 is $(AMODE) & REL6=0 & prime=0x1F & fct=0x21 & bit6=0 & OFF_BASER6 & RTsrc & RT { tmpRT:4 = RTsrc:$(SIZETO4); shft:$(ADDRSIZE) = OFF_BASER6 & 0x3; addr:$(ADDRSIZE) = OFF_BASER6 - shft; valOrig:4 = *(addr) & (0xffffffff << ((shft+1) * 8)); valStore:4 = tmpRT >> ((3-shft) * 8); *(addr) = valOrig | valStore; } :swre RTsrc, OFF_BASER6 is $(AMODE) & REL6=0 & prime=0x1F & fct=0x22 & bit6=0 & OFF_BASER6 & RTsrc & RT { tmpRT:4 = RTsrc:$(SIZETO4); shft:$(ADDRSIZE) = OFF_BASER6 & 0x3; addr:$(ADDRSIZE) = OFF_BASER6 - shft; valOrig:4 = *(addr) & (0xffffffff >> ((4-shft) * 8)); valStore:4 = tmpRT << (shft*8); *(addr) = valOrig | valStore; } @endif # When the analyzer finds a matching swl/swr pair, the pcode is simplified so that # swl does all the storing while swr is a no-op @if ENDIAN == "big" :swl RTsrc, OFF_BASE is $(AMODE) & REL6=0 & prime=0x2A & OFF_BASE & RTsrc & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 1; globalset(inst_next, PAIR_INSTRUCTION_FLAG);] { *[ram]:4 OFF_BASE = RTsrc:$(SIZETO4); } :swr RTsrc, OFF_BASE is $(AMODE) & REL6=0 & prime=0x2E & OFF_BASE & RTsrc & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 0; ]{ } @else :swl RTsrc, OFF_BASE is $(AMODE) & REL6=0 & prime=0x2A & OFF_BASE & RTsrc & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 1; globalset(inst_next, PAIR_INSTRUCTION_FLAG);] { } :swr RTsrc, OFF_BASE is $(AMODE) & REL6=0 & prime=0x2E & OFF_BASE & RTsrc & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 0; ]{ *[ram]:4 OFF_BASE = RTsrc:$(SIZETO4); } @endif # 0000 01ss sss0 1100 iiii iiii iiii iiii :teqi RSsrc, simmed is $(AMODE) & REL6=0 & prime=1 & cond=0xC & RSsrc & simmed { if (RSsrc != simmed) goto ; trap(); } # 0000 01ss sss0 1000 iiii iiii iiii iiii :tgei RSsrc, simmed is $(AMODE) & REL6=0 & prime=1 & cond=8 & RSsrc & simmed { if (RSsrc s< simmed) goto ; trap(); } # 0000 01ss sss0 1001 iiii iiii iiii iiii :tgeiu RSsrc, simmed is $(AMODE) & REL6=0 & prime=1 & cond=9 & RSsrc & simmed { if (RSsrc < simmed) goto ; trap(); } # 0000 01ss sss0 1010 iiii iiii iiii iiii :tlti RSsrc, simmed is $(AMODE) & REL6=0 & prime=1 & cond=10 & RSsrc & simmed { if (RSsrc s>= simmed) goto ; trap(); } # 0000 01ss sss0 1011 iiii iiii iiii iiii :tltiu RSsrc, simmed is $(AMODE) & REL6=0 & prime=1 & cond=0xB & RSsrc & simmed { if (RSsrc >= simmed) goto ; trap(); } # 0000 01ss sss0 1110 iiii iiii iiii iiii :tnei RSsrc, simmed is $(AMODE) & REL6=0 & prime=1 & cond=0xE & RSsrc & simmed { if (RSsrc == simmed) goto ; trap(); } ############################ # # MIPS64 Instructions to be included with all MIPS32 processors # ############################ ## Allow MIPS 64 instructions below for compilers ## using a 64-bit chip, but really keeping things to 32-bits # 0110 00ss ssst tttt iiii iiii iiii iiii :daddi RT, RSsrc, simmed is $(AMODE) & REL6=0 & prime=0x18 & RSsrc & RT & simmed { RT = RSsrc + simmed; } #### # # Release 6 semantics # #### :addiupc RSsrc, S19L2 is $(AMODE) & REL6=1 & prime=0x3B & bitz19=0 & RSsrc & S19L2 { RSsrc = inst_start + sext(S19L2); } :align RD, RS32src, RT32src, bp2 is $(AMODE) & REL6=1 & prime=0x1F & spec3=0x2 & fct=0x20 & bp2 & RS32src & RT32src & RD { tmp:4 = RT32src << (8 * bp2); tmp = tmp | (RS32src >> (32 - (8 * bp2))); RD = sext(tmp); } :aluipc RSsrc, S16L16 is $(AMODE) & REL6=1 & prime=0x3B & op=0x1F & RSsrc & S16L16 { RSsrc = inst_start + sext(S16L16); RSsrc = RSsrc & ~0xFFFF; } :aui RTsrc, RSsrc, S16L16 is $(AMODE) & REL6=1 & prime=0x0F & RSsrc & RTsrc & S16L16 { RTsrc = RSsrc + sext(S16L16); } :auipc RSsrc, S16L16 is $(AMODE) & REL6=1 & prime=0x3B & op=0x1E & RSsrc & S16L16 { RSsrc = inst_start + sext(S16L16); } # 0000 0100 0001 0001 iiii iiii iiii iiii :bal Rel16 is $(AMODE) & REL6=1 & prime=0x01 & cond=0x11 & rs=0 & Rel16 { ra = inst_next; delayslot(1); call Rel16; } :bal Rel16 is $(AMODE) & REL6=1 & prime=0x01 & cond=0x11 & rs=0 & off16=1 & Rel16 { ra = inst_next; delayslot(1); goto Rel16; } :balc Rel26 is $(AMODE) & REL6=1 & prime=0x3A & Rel26 { ra = inst_next; call Rel26; } :bc Rel26 is $(AMODE) & REL6=1 & prime=0x32 & Rel26 { goto Rel26; } :bc2eqz op, Rel16 is $(AMODE) & REL6=1 & prime=0x12 & copop=0x09 & op & Rel16 { tmp:1 = getCopCondition(2:1, op:1); if (tmp == 0) goto inst_next; goto Rel16; } :bc2nez op, Rel16 is $(AMODE) & REL6=1 & prime=0x12 & copop=0x0D & op & Rel16 { tmp:1 = getCopCondition(2:1, op:1); if (tmp != 0) goto inst_next; goto Rel16; } :bad1 is $(AMODE) & REL6=1 & prime=0x06 & rs=0 & rt=0 unimpl :blezalc RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x06 & rs=0 & RTsrc & Rel16 { if (RTsrc s> 0) goto inst_next; ra = inst_next; call Rel16; } :bgezalc RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x06 & rs=rt & rt!=0 & RTsrc & Rel16 { if (RTsrc s< 0) goto inst_next; ra = inst_next; call Rel16; } :bgeuc RSsrc, RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x06 & rt!=0 & rs!=rt & RSsrc & RTsrc & Rel16 { if (RSsrc >= RTsrc) goto Rel16; } :bad2 is $(AMODE) & REL6=1 & prime=0x07 & rs=0 & rt=0 unimpl :bgtzalc RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x07 & rs=0 & RTsrc & Rel16 { if (RTsrc s<= 0) goto inst_next; ra = inst_next; call Rel16; } :bltzalc RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x07 & rs=rt & rt!=0 & RTsrc & Rel16 { if (RTsrc s>= 0) goto inst_next; ra = inst_next; call Rel16; } :bltuc RSsrc, RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x07 & rt!=0 & rs!=rt & RSsrc & RTsrc & Rel16 { if (RSsrc < RTsrc) goto Rel16; } :beqzalc RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x08 & rs=0 & rt!=0 & RTsrc & Rel16 { if (RTsrc s> 0) goto inst_next; ra = inst_next; call Rel16; } :beqc RSsrc, RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x08 & rs!=0 & rs 0x7FFFFFFF) || (tmpS s< -2147483648); @if REGSIZE == "8" tmpF = tmpF || (RTsrc s> 0x7FFFFFFF) || (RTsrc s< -2147483648) || (RSsrc s> 0x7FFFFFFF) || (RSsrc s< -2147483648); @endif if (tmpF == 1) goto Rel16; } :bnezalc RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x18 & rs=0 & rt!=0 & RTsrc & Rel16 { if (RTsrc == 0) goto inst_next; ra = inst_next; call Rel16; } :bnec RSsrc, RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x18 & rs!=0 & rs 0x7FFFFFFF) || (tmpS s< -2147483648); @if REGSIZE == "8" tmpF = tmpF || (RTsrc s> 0x7FFFFFFF) || (RTsrc s< -2147483648) || (RSsrc s> 0x7FFFFFFF) || (RSsrc s< -2147483648); @endif if (tmpF == 0) goto Rel16; } :bad3 is $(AMODE) & REL6=1 & prime=0x16 & rs=0 & rt=0 unimpl :blezc RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x16 & rs=0 & rt!=0 & RTsrc & Rel16 { if (RTsrc s<= 0) goto Rel16; } :bgezc RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x16 & rs=rt & rt!=0 & RTsrc & Rel16 { if (RTsrc s>= 0) goto Rel16; } :bgec RSsrc, RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x16 & RSsrc & RTsrc & Rel16 { if (RSsrc s>= RTsrc) goto Rel16; } :bad4 is $(AMODE) & REL6=1 & prime=0x17 & rs=0 & rt=0 unimpl :bgtzc RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x17 & rs=0 & rt!=0 & RTsrc & Rel16 { if (RTsrc s> 0) goto Rel16; } :bltzc RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x17 & rs=rt & rt!=0 & RTsrc & Rel16 { if (RTsrc s< 0) goto Rel16; } :bltc RSsrc, RTsrc, Rel16 is $(AMODE) & REL6=1 & prime=0x17 & RSsrc & RTsrc & Rel16 { if (RSsrc s< RTsrc) goto Rel16; } # The jic instruction takes care of the 'bad' case here :beqzc RSsrc, Rel21 is $(AMODE) & REL6=1 & prime=0x36 & RSsrc & Rel21 { if (RSsrc == 0) goto Rel21; } # The jialc instruction takes care of the 'bad' case here :bnezc RSsrc, Rel21 is $(AMODE) & REL6=1 & prime=0x3E & RSsrc & Rel21 { if (RSsrc != 0) goto Rel21; } :bitswap RD, RT32src is $(AMODE) & REL6=1 & prime=0x1F & zero21=0 & fct2=0 & bshfl=0x20 & RT32src & RD { tmp:4 = bitSwap(RT32src); RD = sext(tmp); } :clo RD, RSsrc is $(AMODE) & REL6=1 & prime=0x00 & op=0 & sa=0x1 & fct=0x11 & RD & RSsrc { RD = lzcount( ~RSsrc ); } :clz RD, RSsrc is $(AMODE) & REL6=1 & prime=0x00 & op=0 & sa=0x1 & fct=0x10 & RD & RSsrc { RD = lzcount( RSsrc ); } :div RD, RS32src, RT32src is $(AMODE) & REL6=1 & prime=0x00 & fct=0x1A & fct2=0x02 & RD & RS32src & RT32src { tmp:4 = RS32src s/ RT32src; RD = sext(tmp); } :mod RD, RS32src, RT32src is $(AMODE) & REL6=1 & prime=0x00 & fct=0x1A & fct2=0x03 & RD & RS32src & RT32src { tmp:4 = RS32src s% RT32src; RD = sext(tmp); } :divu RD, RS32src, RT32src is $(AMODE) & REL6=1 & prime=0x00 & fct=0x1B & fct2=0x02 & RD & RS32src & RT32src { tmp:4 = RS32src / RT32src; RD = sext(tmp); } :modu RD, RS32src, RT32src is $(AMODE) & REL6=1 & prime=0x00 & fct=0x1B & fct2=0x03 & RD & RS32src & RT32src { tmp:4 = RS32src % RT32src; RD = sext(tmp); } :dvp RT is $(AMODE) & REL6=1 & prime=0x10 & mfmc0=0x0B & fct=0x24 & RT & RD0 & zero5=0 & zero4=0 { disableProcessor(RT); } :evp RT is $(AMODE) & REL6=1 & prime=0x10 & mfmc0=0x0B & fct=0x04 & RT & RD0 & zero5=0 & zero4=0 { enableProcessor(RT); } # NOTE: Unlike almost every other branch/jump that has an immediate, the immediate is *IS NOT* shifted. This allows # this instruction to serve same function as jalx in pre-6. :jialc RTsrc, simmed is $(AMODE) & REL6=1 & prime=0x3E & jsub=0x00 & RTsrc & simmed { build RTsrc; tmp:$(REGSIZE) = sext(simmed:2) + RTsrc; JXWritePC(tmp); ra = inst_next; call [pc]; } :jic RTsrc, simmed is $(AMODE) & REL6=1 & prime=0x36 & jsub=0x00 & RTsrc & simmed { build RTsrc; tmp:$(REGSIZE) = sext(simmed:2) + RTsrc; JXWritePC(tmp); goto [pc]; } :jic RTsrc, simmed is $(AMODE) & REL6=1 & prime=0x36 & jsub=0x00 & RTsrc & simmed & immed=0x00 & rt=0x1f { build RTsrc; JXWritePC(ra); return [pc]; } @ifndef COPR_C :ldc2 RTsrc, simmed11(baser6) is $(AMODE) & REL6=1 & prime=0x12 & copop=0x0E & simmed11 & baser6 & RTsrc { tmp:$(REGSIZE) = simmed11; tmp = tmp + baser6; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); setCopReg(2:1, RTsrc, *[ram]:8 tmpa); } @endif :ll RT, OFF_BASER6 is $(AMODE) & REL6=1 & prime=0x1F & fct=0x36 & bit6=0 & OFF_BASER6 & RT { RT = sext(*[ram]:4 OFF_BASER6); } :llx RT, OFF_BASER6 is $(AMODE) & REL6=1 & prime=0x1F & fct=0x36 & bit6=1 & OFF_BASER6 & RT { RT = sext(*[ram]:4 OFF_BASER6); } :llxe RT, OFF_BASER6 is $(AMODE) & REL6=1 & prime=0x1F & fct=0x27 & bit6=1 & OFF_BASER6 & RT { RT = sext(*[ram]:4 OFF_BASER6); } :lsa RD, RS32src, RT32src, SAV is $(AMODE) & REL6=1 & prime=0x00 & fct=0x05 & spec3=0 & SAV & RD & RS32src & RT32src { tmp:4 = (RS32src << SAV) + RT32src; RD = sext(tmp); } @ifndef COPR_C :lwc2 RTsrc, simmed11(baser6) is $(AMODE) & REL6=1 & prime=0x12 & copop=0x0A & simmed11 & baser6 & RTsrc { tmp:$(REGSIZE) = simmed11; tmp = tmp + baser6; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); setCopReg( 2:1, RTsrc, *[ram]:4 tmpa); } @endif :lwpc RS, S19L2 is $(AMODE) & REL6=1 & prime=0x3B & pcrel=0x1 & RS & S19L2 { tmp:$(REGSIZE) = inst_start + sext(S19L2); tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); RS = sext(*[ram]:4 tmpa); } :mul RD, RS32src, RT32src is $(AMODE) & REL6=1 & prime=0x00 & fct=0x18 & fct2=0x02 & RD & RS32src & RT32src { tmpS:8 = sext(RS32src); tmpT:8 = sext(RT32src); tmpS = tmpS * tmpT; tmp:4 = tmpS[0,32]; RD = sext(tmp); } :muh RD, RS32src, RT32src is $(AMODE) & REL6=1 & prime=0x00 & fct=0x18 & fct2=0x03 & RD & RS32src & RT32src { tmpS:8 = sext(RS32src); tmpT:8 = sext(RT32src); tmpS = tmpS * tmpT; tmp:4 = tmpS[32,32]; RD = sext(tmp); } :mulu RD, RS32src, RT32src is $(AMODE) & REL6=1 & prime=0x00 & fct=0x19 & fct2=0x02 & RD & RS32src & RT32src { tmpS:8 = zext(RS32src); tmpT:8 = zext(RT32src); tmpS = tmpS * tmpT; tmp:4 = tmpS[0,32]; RD = sext(tmp); } :muhu RD, RS32src, RT32src is $(AMODE) & REL6=1 & prime=0x00 & fct=0x19 & fct2=0x03 & RD & RS32src & RT32src { tmpS:8 = zext(RS32src); tmpT:8 = zext(RT32src); tmpS = tmpS * tmpT; tmp:4 = tmpS[32,32]; RD = sext(tmp); } :scx RTsrc, OFF_BASER6 is $(AMODE) & REL6=1 & prime=0x1F & fct=0x26 & bit6=1 & OFF_BASER6 & RTsrc { *[ram]:4 OFF_BASER6 = RTsrc:$(SIZETO4); } :scxe RTsrc, OFF_BASER6 is $(AMODE) & REL6=1 & prime=0x1F & fct=0x1E & bit6=1 & OFF_BASER6 & RTsrc { *[ram]:4 OFF_BASER6 = RTsrc:$(SIZETO4); RTsrc = 1; } :seleqz RD, RSsrc, RTsrc is $(AMODE) & REL6=1 & prime=0x00 & fct=0x35 & fct2=0x00 & RD & RSsrc & RTsrc { # We use tmp to cover case where rs and rd are the same reg tmps:$(REGSIZE) = RSsrc; tmpt:$(REGSIZE) = RTsrc; RD = 0; if (tmpt != 0) goto ; RD = tmps; } :selnez RD, RSsrc, RTsrc is $(AMODE) & REL6=1 & prime=0x00 & fct=0x37 & fct2=0x00 & RD & RSsrc & RTsrc { # We use tmp to cover case where rs and rd are the same reg tmps:$(REGSIZE) = RSsrc; tmpt:$(REGSIZE) = RTsrc; RD = 0; if (tmpt == 0) goto ; RD = tmps; } :sigrie immed is $(AMODE) & REL6=1 & prime=0x01 & zero21=0 & cond=0x17 & immed { signalReservedInstruction(immed:2); } @include "mipsfloat.sinc" ================================================ FILE: pypcode/processors/MIPS/data/languages/mips32R6.pspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips32R6be.slaspec ================================================ # SLA specification file for MIPS32 big endian @define ENDIAN "big" @define FREGSIZE "8" @define ISA_VARIANT "" @include "mips.sinc" @include "mips32Instructions.sinc" @include "mips16.sinc" @include "mipsmicro.sinc" @include "mips_dsp.sinc" ================================================ FILE: pypcode/processors/MIPS/data/languages/mips32R6le.slaspec ================================================ # SLA specification file for MIPS32 little endian @define ENDIAN "little" @define FREGSIZE "8" @define ISA_VARIANT "" @include "mips.sinc" @include "mips32Instructions.sinc" @include "mips16.sinc" @include "mipsmicro.sinc" @include "mips_dsp.sinc" ================================================ FILE: pypcode/processors/MIPS/data/languages/mips32_eabi.cspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips32_fp64.cspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips32be.cspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips32be.slaspec ================================================ # SLA specification file for MIPS32 big endian @define ENDIAN "big" @define FREGSIZE "4" @define ISA_VARIANT "" @include "mips.sinc" @include "mips32Instructions.sinc" @include "mips16.sinc" @include "mipsmicro.sinc" @include "mips_mt.sinc" @include "mips_dsp.sinc" ================================================ FILE: pypcode/processors/MIPS/data/languages/mips32le.cspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips32le.slaspec ================================================ # SLA specification file for MIPS32 little endian @define ENDIAN "little" @define FREGSIZE "4" @define ISA_VARIANT "" @include "mips.sinc" @include "mips32Instructions.sinc" @include "mips16.sinc" @include "mipsmicro.sinc" @include "mips_mt.sinc" @include "mips_dsp.sinc" ================================================ FILE: pypcode/processors/MIPS/data/languages/mips32micro.pspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips64.pspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips64Instructions.sinc ================================================ ############################ # # MIPS64 Instructions # ############################ # 0111 00ss ssst tttt dddd d000 0010 0101 :dclo RD, RSsrc is $(AMODE) & ((REL6=0 & prime=0x1C & sa=0x0 & fct=0x25) | (REL6=1 & prime=0x00 & sa=0x1 & fct=0x13 & op=0)) & RD & RSsrc { RD = lzcount( ~RSsrc ); } # 0111 00ss ssst tttt dddd d000 0010 0100 :dclz RD, RSsrc is $(AMODE) & ((REL6=0 & prime=0x1C & sa=0x0 & fct=0x24) | (REL6=1 & prime=0x00 & sa=0x1 & fct=0x12 & op=0)) & RD & RSsrc { RD = lzcount( RSsrc ); } # 0111 11ss ssst tttt mmmm mLLL LL00 0011 :dext RT, RSsrc, lsb, ExtSize is $(AMODE) & prime=0x1F & fct=0x03 & RT & RSsrc & lsb & msbd & ExtSize { val:8 = (RSsrc >> lsb); val = val & (0xffffffff >> (32 - ExtSize)); RT = zext(val); } # 0111 11ss ssst tttt mmmm mLLL LL00 0001 :dextm RT, RSsrc, lsb, DextmSize is $(AMODE) & prime=0x1F & fct=0x01 & RT & RSsrc & lsb & msbd & DextmSize { val:8 = (RSsrc >> lsb); val = val & (0xffffffffffffffff >> (64 - DextmSize)); RT = zext(val); } # 0111 11ss ssst tttt mmmm mLLL LL00 0010 :dextu RT, RSsrc, DXuPos, ExtSize is $(AMODE) & prime=0x1F & fct=0x02 & RT & RSsrc & lsb & msbd & DXuPos & ExtSize { val:8 = (RSsrc >> DXuPos); val = val & (0xffffffff >> (32 - ExtSize)); RT = zext(val); } # 0111 11ss ssst tttt mmmm mLLL LL00 0111 :dins RT, RSsrc, lsb, InsSize is $(AMODE) & prime=0x1F & fct=0x07 & RT & RTsrc & RSsrc & lsb & msbd & InsSize { tmpa:$(REGSIZE) = -1; tmpa = tmpa >> ($(REGSIZE)*8 - InsSize); tmpb:$(REGSIZE) = RSsrc & tmpa; tmpa = tmpa << lsb; tmpa = ~tmpa; tmpb = tmpb << lsb; RT = (RT & tmpa) | tmpb; } # 0111 11ss ssst tttt mmmm mLLL LL00 0101 :dinsm RT, RSsrc, lsb, DinsXSize is $(AMODE) & prime=0x1F & fct=0x05 & RT & RTsrc & RSsrc & lsb & msbd & DinsXSize { tmpa:$(REGSIZE) = -1; tmpa = tmpa >> ($(REGSIZE)*8 - DinsXSize); tmpb:$(REGSIZE) = RSsrc & tmpa; tmpa = tmpa << lsb; tmpa = ~tmpa; tmpb = tmpb << lsb; RT = (RT & tmpa) | tmpb; } # 0111 11ss ssst tttt mmmm mLLL LL00 0110 :dinsu RT, RSsrc, DXuPos, InsSize is $(AMODE) & prime=0x1F & fct=0x06 & RT & RTsrc & RSsrc & lsb & msbd & DXuPos & InsSize { tmpa:$(REGSIZE) = -1; tmpa = tmpa >> ($(REGSIZE)*8 - InsSize); tmpb:$(REGSIZE) = RSsrc & tmpa; tmpa = tmpa << DXuPos; tmpa = ~tmpa; tmpb = tmpb << DXuPos; RT = (RT & tmpa) | tmpb; } # 0100 0000 001t tttt dddd d000 0000 0eee :dmfc0 RT, RD0 is $(AMODE) & prime=16 & copop=1 & RT & RD0 & zero6=0 { RT = RD0; } :dmfc1 RT, fs is $(AMODE) & prime=17 & copop=1 & RT & fs & bigfunct=0 { RT = fs; } :dmfc2 RT, immed is $(AMODE) & prime=18 & copop=1 & RT & immed { RT = getCopReg(2:1, immed:4); } # 0100 0000 101t tttt dddd d000 0000 0eee :dmtc0 RTsrc, RD0 is $(AMODE) & prime=16 & copop=5 & RTsrc & RD0 & zero6=0 { RD0 = RTsrc; } # 0100 0100 101t tttt ssss s000 0000 0000 :dmtc1 RTsrc, fs is $(AMODE) & prime=17 & copop=5 & RTsrc & fs & bigfunct=0 { fs = RTsrc; } :dmtc2 RTsrc, immed is $(AMODE) & prime=18 & copop=5 & RTsrc & immed { setCopReg(2:1, immed:4, RTsrc); } # 0000 0000 001t tttt dddd daaa aa11 1010 :drotr RD, RTsrc, sa is $(AMODE) & prime=0x0 & zero1=0x0 & bit21=0x1 & fct=0x3A & RD & RTsrc & sa { tmp:8 = RTsrc; tmp1:8 = tmp >> sa; tmp2:8 = tmp << (64 - sa); RD = tmp1 + tmp2; } # 0000 0000 001t tttt dddd daaa aa11 1110 :drotr32 RD, RTsrc, sa is $(AMODE) & prime=0x0 & zero1=0x0 & bit21=0x1 & fct=0x3E & RD & RTsrc & sa { shift:1 = sa + 32; tmp:8 = RTsrc; tmp1:8 = tmp >> shift; tmp2:8 = tmp << (64 - shift); RD = tmp1 + tmp2; } # 0000 00ss ssst tttt dddd d000 0101 0110 :drotrv RD, RTsrc, RSsrc is $(AMODE) & prime=0x0 & zero2=0x0 & bit6=0x1 & fct=0x16 & RD & RTsrc & RSsrc { shift:8 = RSsrc & 0x3f; tmp:8 = RTsrc; tmp1:8 = tmp >> shift; tmp2:8 = tmp << (32 - shift); RD = tmp1 + tmp2; } # 0111 1100 000t tttt dddd d000 1010 0100 :dsbh RD, RTsrc is $(AMODE) & prime=0x1F & rs=0x0 & fct2=0x02 & fct=0x24 & RD & RTsrc { tmp1:8 = RTsrc & 0xff; tmp2:8 = (RTsrc >> 8) & 0xff; tmp3:8 = (RTsrc >> 16) & 0xff; tmp4:8 = (RTsrc >> 24) & 0xff; tmp5:8 = (RTsrc >> 32) & 0xff; tmp6:8 = (RTsrc >> 40) & 0xff; tmp7:8 = (RTsrc >> 48) & 0xff; tmp8:8 = (RTsrc >> 56) & 0xff; RD = (tmp7 << 56) | (tmp8 << 48) | (tmp5 << 40) | (tmp6 << 32) | (tmp3 << 24) | (tmp4 << 16) | (tmp1 << 8) | (tmp2); } # 0111 1100 000t tttt dddd d001 0110 0100 :dshd RD, RTsrc is$(AMODE) & prime=0x1F & rs=0x0 & fct2=0x05 & fct=0x24 & RD & RTsrc { tmp1:8 = RTsrc & 0xffff; tmp2:8 = (RTsrc >> 16) & 0xffff; tmp3:8 = (RTsrc >> 32) & 0xffff; tmp4:8 = (RTsrc >> 48) & 0xffff; RD = (tmp1 << 48) | (tmp2 << 32) | (tmp3 << 16) | tmp4; } # 0000 0000 000t tttt dddd daaa aa11 1000 :dsll RD, RTsrc, sa is $(AMODE) & prime=0 & fct=56 & rs=0 & RTsrc & RD & sa { RD = RTsrc << sa; } # 0000 0000 000t tttt dddd daaa aa11 1100 :dsll32 RD, RTsrc, sa is $(AMODE) & prime=0 & fct=60 & rs=0 & RTsrc & RD & sa { RD = RTsrc << (sa + 32); } # 0000 00ss ssst tttt dddd d000 0001 0100 :dsllv RD, RTsrc, RSsrc is $(AMODE) & prime=0 & fct=20 & RSsrc & RTsrc & RD & sa=0 { RD = RTsrc << RSsrc; } # 0000 0000 000t tttt dddd daaa aa11 1011 :dsra RD, RTsrc, sa is $(AMODE) & prime=0 & fct=59 & rs=0 & RTsrc & RD & sa { RD = RTsrc s>> sa; } # 0000 0000 000t tttt dddd daaa aa11 1111 :dsra32 RD, RTsrc, sa is $(AMODE) & prime=0 & fct=63 & rs=0 & RTsrc & RD & sa { RD = RTsrc s>> (sa + 32); } # 0000 00ss ssst tttt dddd d000 0001 0111 :dsrav RD, RTsrc, RSsrc is $(AMODE) & prime=0 & fct=23 & RSsrc & RTsrc & RD & sa=0 { RD = RTsrc s>> RSsrc; } # 0000 0000 000t tttt dddd daaa aa11 1010 :dsrl RD, RTsrc, sa is $(AMODE) & prime=0 & fct=58 & rs=0 & RTsrc & RD & sa { RD = RTsrc >> sa; } # 0000 0000 000t tttt dddd daaa aa11 1110 :dsrl32 RD, RTsrc, sa is $(AMODE) & prime=0 & fct=62 & rs=0 & RTsrc & RD & sa { RD = RTsrc >> (sa + 32); } # 0000 00ss ssst tttt dddd d000 0101 0110 :dsrlv RD, RTsrc, RSsrc is $(AMODE) & prime=0 & fct=22 & RSsrc & RTsrc & RD & sa=0 { RD = RTsrc >> RSsrc; } # 0000 00ss ssst tttt dddd d000 0010 1110 :dsub RD, RSsrc, RTsrc is $(AMODE) & prime=0 & fct=46 & RSsrc & RTsrc & RD & sa=0 { RD = RSsrc - RTsrc; } # 0000 00ss ssst tttt dddd d000 0010 1111 :dsubu RD, RSsrc, RTsrc is $(AMODE) & prime=0 & fct=47 & RSsrc & RTsrc & RD & sa=0 { RD = RSsrc - RTsrc; } # 1101 11bb bbbt tttt iiii iiii iiii iiii :ld RT, OFF_BASE is $(AMODE) & prime=55 & OFF_BASE & RT { RT = *[ram]:8 OFF_BASE; } @if ENDIAN == "big" # 0110 10bb bbbt tttt iiii iiii iiii iiii :ldl RT, OFF_BASE is $(AMODE) & prime=26 & OFF_BASE & RT { shft:$(ADDRSIZE) = OFF_BASE & 0x7; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:8 = RT & (0xffffffffffffffff >> ((8-shft) * 8)); valLoad:8 = *(addr) << (shft * 8); RT = valLoad | valOrig; } # 0110 11bb bbbt tttt iiii iiii iiii iiii :ldr RT, OFF_BASE is $(AMODE) & prime=27 & OFF_BASE & RT { # no-op # see ldl instruction shft:$(ADDRSIZE) = OFF_BASE & 0x7; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:8 = RT & (0xffffffffffffffff << ((shft+1) * 8)); valLoad:8 = *(addr) >> ((7-shft) * 8); RT = valOrig | valLoad; } @else # ENDIAN == "little # 0110 10bb bbbt tttt iiii iiii iiii iiii :ldl RT, OFF_BASE is $(AMODE) & prime=26 & OFF_BASE & RT { shft:$(ADDRSIZE) = OFF_BASE & 0x7; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:8 = RT & (0xffffffffffffffff >> ((shft+1) * 8)); valLoad:8 = *(addr) << ((7-shft) * 8); RT = valLoad | valOrig; } # 0110 11bb bbbt tttt iiii iiii iiii iiii :ldr RT, OFF_BASE is $(AMODE) & prime=27 & OFF_BASE & RT { # no-op # see ldl instruction shft:$(ADDRSIZE) = OFF_BASE & 0x7; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:8 = RT & (0xffffffffffffffff << ((8-shft) * 8)); valLoad:8 = *(addr) >> (shft * 8); RT = valOrig | valLoad; } @endif # ENDIAN # ldl and ldr almost always come in pairs. # When the analyzer does finds a matching ldl/ldr pair, the pcode is simplified so that # ldl does all the loading while ldr is a no-op @if ENDIAN == "big" :ldl RT, OFF_BASE is $(AMODE) & prime=26 & OFF_BASE & RT & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 1; globalset(inst_next, PAIR_INSTRUCTION_FLAG);] { RT = *[ram]:8 OFF_BASE; } :ldr RT, OFF_BASE is $(AMODE) & prime=27 & OFF_BASE & RT & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 0; ] { } @else :ldl RT, OFF_BASE is $(AMODE) & prime=26 & OFF_BASE & RT & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 1; globalset(inst_next, PAIR_INSTRUCTION_FLAG);] { } :ldr RT, OFF_BASE is $(AMODE) & prime=27 & OFF_BASE & RT & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 0; ] { RT = *[ram]:8 OFF_BASE; } @endif # 1101 00bb bbbt tttt iiii iiii iiii iiii :lld RT, OFF_BASE is $(AMODE) & prime=52 & OFF_BASE & RT { RT = *[ram]:8 OFF_BASE; } # 1001 11bb bbbt tttt iiii iiii iiii iiii :lwu RT, OFF_BASE is $(AMODE) & prime=39 & OFF_BASE & RT { RT = zext( *[ram]:4 OFF_BASE ); } # 1111 00bb bbbt tttt iiii iiii iiii iiii :scd RTsrc, OFF_BASE is $(AMODE) & prime=60 & OFF_BASE & RT & RTsrc { *[ram]:8 OFF_BASE = RTsrc; RT = 1; } # 1111 11bb bbbt tttt iiii iiii iiii iiii :sd RTsrc, OFF_BASE is $(AMODE) & prime=63 & OFF_BASE & RTsrc { *[ram]:8 OFF_BASE = RTsrc; } @if ENDIAN == "big" # 1011 00bb bbbt tttt iiii iiii iiii iiii :sdl RTsrc, OFF_BASE is $(AMODE) & prime=44 & OFF_BASE & RTsrc { shft:$(ADDRSIZE) = OFF_BASE & 0x7; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:8 = *(addr) & (0xffffffffffffffff << ((8-shft) * 8)); valStore:8 = RTsrc >> (shft * 8); *(addr) = valOrig | valStore; } # 1011 01bb bbbt tttt iiii iiii iiii iiii :sdr RTsrc, OFF_BASE is $(AMODE) & prime=45 & OFF_BASE & RTsrc { shft:$(ADDRSIZE) = OFF_BASE & 0x7; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:8 = *(addr) & (0xffffffffffffffff >> ((shft+1) * 8)); valStore:8 = RTsrc << ((7-shft)*8); *(addr) = valStore | valOrig; } @else # ENDIAN == "little # 1011 00bb bbbt tttt iiii iiii iiii iiii :sdl RTsrc, OFF_BASE is $(AMODE) & prime=44 & OFF_BASE & RTsrc { shft:$(ADDRSIZE) = OFF_BASE & 0x7; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:8 = *(addr) & (0xffffffffffffffff << ((shft+1) * 8)); valStore:8 = RTsrc >> ((7-shft) * 8); *(addr) = valOrig | valStore; } # 1011 01bb bbbt tttt iiii iiii iiii iiii :sdr RTsrc, OFF_BASE is $(AMODE) & prime=45 & OFF_BASE & RTsrc { shft:$(ADDRSIZE) = OFF_BASE & 0x7; addr:$(ADDRSIZE) = OFF_BASE - shft; valOrig:8 = *(addr) & (0xffffffffffffffff >> ((8-shft) * 8)); valStore:8 = RTsrc << (shft*8); *(addr) = valStore | valOrig; } @endif # ENDIAN # When the analyzer finds a matching sdl/sdr pair, the pcode is simplified so that # sdl does all the storing while sdr is a no-op @if ENDIAN == "big" :sdl RTsrc, OFF_BASE is $(AMODE) & prime=44 & OFF_BASE & RTsrc & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 1; globalset(inst_next, PAIR_INSTRUCTION_FLAG);] { *[ram]:8 OFF_BASE = RTsrc; } :sdr RTsrc, OFF_BASE is $(AMODE) & prime=45 & OFF_BASE & RTsrc & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 0; ] { } @else :sdl RTsrc, OFF_BASE is $(AMODE) & prime=44 & OFF_BASE & RTsrc & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 1; globalset(inst_next, PAIR_INSTRUCTION_FLAG);] { } :sdr RTsrc, OFF_BASE is $(AMODE) & prime=45 & OFF_BASE & RTsrc & PAIR_INSTRUCTION_FLAG=1 [ PAIR_INSTRUCTION_FLAG = 0; ] { *[ram]:8 OFF_BASE = RTsrc; } @endif #### # # Pre-6 semantics # #### # 0000 00ss ssst tttt 0000 0000 0001 1110 :ddiv RSsrc, RTsrc is $(AMODE) & REL6=0 & prime=0 & fct=30 & RSsrc & RTsrc & rd=0 & sa=0 { lo = RSsrc s/ RTsrc; hi = RSsrc s% RTsrc; } # 0000 00ss ssst tttt 0000 0000 0001 1111 :ddivu RSsrc, RTsrc is $(AMODE) & REL6=0 & prime=0 & fct=31 & RSsrc & RTsrc & rd=0 & sa=0 { lo = RSsrc / RTsrc; hi = RSsrc % RTsrc; } # 0000 00ss ssst tttt 0000 0000 0001 1100 :dmult RSsrc, RTsrc is $(AMODE) & REL6=0 & prime=0 & fct=28 & RSsrc & RTsrc & rd=0 & sa=0 { prod:16 = sext( RSsrc ) * sext( RTsrc ); lo = prod(0); hi = prod(8); } # 0000 00ss ssst tttt 0000 0000 0001 1101 :dmultu RSsrc, RTsrc is $(AMODE) & REL6=0 & prime=0 & fct=29 & RSsrc & RTsrc & rd=0 & sa=0 { prod:16 = zext( RSsrc ) * zext( RTsrc ); lo = prod(0); hi = prod(8); } #### # # Release 6 semantics # #### :dalign RD, RSsrc, RTsrc, bp3 is $(AMODE) & REL6=1 & prime=0x1F & spec2=0x1 & fct=0x24 & bp3 & RSsrc & RTsrc & RD { tmp:8 = RTsrc << (8 * bp3); tmp = tmp | (RSsrc >> (64 - (8 * bp3))); RD = sext(tmp); } :daui RTsrc, RSsrc, S16L16 is $(AMODE) & REL6=1 & prime=0x1D & rs!=0 & RSsrc & RTsrc & S16L16 { RTsrc = RSsrc + sext(S16L16); } :dahi RSsrc, S16L32 is $(AMODE) & REL6=1 & prime=0x01 & op=0x06 & RSsrc & S16L32 { RSsrc = RSsrc + sext(S16L32); } :dati RSsrc, S16L48 is $(AMODE) & REL6=1 & prime=0x01 & op=0x1E & RSsrc & S16L48 { RSsrc = RSsrc + sext(S16L48); } :dbitswap RD, RTsrc is $(AMODE) & REL6=1 & prime=0x1F & zero21=0 & fct2=0 & bshfl=0x24 & RTsrc & RD { RD = bitSwap(RTsrc); } :ddiv RD, RSsrc, RTsrc is $(AMODE) & REL6=1 & prime=0x00 & fct=0x1E & fct2=0x02 & RD & RSsrc & RTsrc { RD = RSsrc s/ RTsrc; } :dmod RD, RSsrc, RTsrc is $(AMODE) & REL6=1 & prime=0x00 & fct=0x1E & fct2=0x03 & RD & RSsrc & RTsrc { RD = RSsrc s% RTsrc; } :ddivu RD, RSsrc, RTsrc is $(AMODE) & REL6=1 & prime=0x00 & fct=0x1F & fct2=0x02 & RD & RSsrc & RTsrc { RD = RSsrc / RTsrc; } :dmodu RD, RSsrc, RTsrc is $(AMODE) & REL6=1 & prime=0x00 & fct=0x1F & fct2=0x03 & RD & RSsrc & RTsrc { RD = RSsrc % RTsrc; } :dmul RD, RSsrc, RTsrc is $(AMODE) & REL6=1 & prime=0x00 & fct=0x1C & fct2=0x02 & RD & RSsrc & RTsrc { tmpS:16 = sext(RSsrc); tmpT:16 = sext(RTsrc); tmpS = tmpS * tmpT; RD = tmpS[0,64]; } :dmuh RD, RSsrc, RTsrc is $(AMODE) & REL6=1 & prime=0x00 & fct=0x1C & fct2=0x03 & RD & RSsrc & RTsrc { tmpS:16 = sext(RSsrc); tmpT:16 = sext(RTsrc); tmpS = tmpS * tmpT; RD = tmpS[64,64]; } :dmulu RD, RSsrc, RTsrc is $(AMODE) & REL6=1 & prime=0x00 & fct=0x1D & fct2=0x02 & RD & RSsrc & RTsrc { tmpS:16 = zext(RSsrc); tmpT:16 = zext(RTsrc); tmpS = tmpS * tmpT; RD = tmpS[0,64]; } :dmuhu RD, RSsrc, RTsrc is $(AMODE) & REL6=1 & prime=0x00 & fct=0x1D & fct2=0x03 & RD & RSsrc & RTsrc { tmpS:16 = zext(RSsrc); tmpT:16 = zext(RTsrc); tmpS = tmpS * tmpT; RD = tmpS[64,64]; } :dlsa RD, RSsrc, RTsrc, SAV is $(AMODE) & REL6=1 & prime=0x00 & fct=0x15 & spec3=0 & SAV & RD & RSsrc & RTsrc { RD = (RSsrc << SAV) + RTsrc; } :ldpc RS, S18L3 is $(AMODE) & REL6=1 & prime=0x3B & pcrel2=0x6 & RS & S18L3 { tmp:8 = inst_start + sext(S18L3); tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); RS = sext(*[ram]:8 tmpa); } :lldx RT, OFF_BASER6 is $(AMODE) & REL6=1 & prime=0x1F & fct=0x37 & bit6=1 & OFF_BASER6 & RT { RT = *[ram]:8 OFF_BASER6; } :lwupc RS, S19L2 is $(AMODE) & REL6=1 & prime=0x3B & pcrel=0x2 & RS & S19L2 { tmp:8 = inst_start + sext(S19L2); tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); RS = zext(*[ram]:4 tmpa); } :sdcx RTsrc, OFF_BASER6 is $(AMODE) & REL6=1 & prime=0x1E & fct=0x27 & bit6=1 & OFF_BASER6 & RTsrc { *[ram]:8 OFF_BASER6 = RTsrc; } ================================================ FILE: pypcode/processors/MIPS/data/languages/mips64R6.pspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips64_32_n32.cspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips64_32_o32.cspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips64_32_o64.cspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips64be.cspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips64be.slaspec ================================================ # SLA specification file for MIPS r5000 big endian @define ENDIAN "big" @define MIPS64 "" @define ISA_VARIANT "" @include "mips.sinc" @include "mips32Instructions.sinc" @include "mips16.sinc" @include "mipsmicro.sinc" @include "mips64Instructions.sinc" @include "mips_dsp.sinc" ================================================ FILE: pypcode/processors/MIPS/data/languages/mips64le.cspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips64le.slaspec ================================================ # SLA specification file for MIPS r5000 little endian @define ENDIAN "little" @define MIPS64 "" @define ISA_VARIANT "" @include "mips.sinc" @include "mips32Instructions.sinc" @include "mips16.sinc" @include "mipsmicro.sinc" @include "mips64Instructions.sinc" @include "mips_dsp.sinc" ================================================ FILE: pypcode/processors/MIPS/data/languages/mips64micro.pspec ================================================ ================================================ FILE: pypcode/processors/MIPS/data/languages/mips_dsp.sinc ================================================ define pcodeop ABSQ_S.PH; define pcodeop ABSQ_S.QB; define pcodeop ABSQ_S.W; define pcodeop ADDQ.PH; define pcodeop ADDQ_S.W; define pcodeop ADDQH.PH; define pcodeop ADDQH.W; define pcodeop ADDSC; define pcodeop ADDU.PH; define pcodeop ADDU.QB; define pcodeop ADDWC; define pcodeop ADDUH.QB; define pcodeop BITREV; define pcodeop DPA.W.PH; define pcodeop DPAQ_S.W.PH; define pcodeop DPAQ_SA.L.W; define pcodeop DPAQX_S.W.PH; define pcodeop DPAQX_SA.W.PH; define pcodeop DPAU.H.QBL; define pcodeop DPAU.H.QBR; define pcodeop DPAX.W.PH; define pcodeop DPS.W.PH; define pcodeop DPSQ_S.W.PH; define pcodeop DPSQ_SA.L.W; define pcodeop DPSQX_S.W.PH; define pcodeop DPSQX_SA.W.PH; define pcodeop DPSU.H.QBL; define pcodeop DPSU.H.QBR; define pcodeop DPSX.W.PH; define pcodeop EXTP; define pcodeop EXTPDP; define pcodeop EXTPDPV; define pcodeop EXTPV; define pcodeop EXTR.W; define pcodeop EXTR_S.H; define pcodeop EXTRV.W; define pcodeop EXTRV_S.H; define pcodeop INSV; define pcodeop MAQ_S.W.PHL; define pcodeop MAQ_S.W.PHR; define pcodeop MUL.PH; define pcodeop MULEQ_S.W.PHL; define pcodeop MULEQ_S.W.PHR; define pcodeop MULEU_S.PH.QBL; define pcodeop MULEU_S.PH.QBR; define pcodeop MULQ_RS.PH; define pcodeop MULQ_RS.W; define pcodeop MULQ_S.PH; define pcodeop MULQ_S.W; define pcodeop MULSA.W.PH; define pcodeop MULSAQ_S.W.PH; define pcodeop PRECEQ.W.PHL; define pcodeop PRECEQ.W.PHR; define pcodeop PRECEQU.PH.QBL; define pcodeop PRECEQU.PH.QBLA; define pcodeop PRECEQU.PH.QBR; define pcodeop PRECEQU.PH.QBRA; define pcodeop PRECEU.PH.QBL; define pcodeop PRECEU.PH.QBLA; define pcodeop PRECEU.PH.QBR; define pcodeop PRECEU.PH.QBRA; define pcodeop PRECR.QB.PH; define pcodeop PRECR_SRA.PH.W; define pcodeop PRECRQ.PH.W; define pcodeop PRECRQ.QB.PH; define pcodeop PRECRQU_S.QB.PH; define pcodeop PRECRQ_RS.PH.W; define pcodeop RADDU.W.QB; define pcodeop REPLV.PH; define pcodeop REPLV.QB; define pcodeop SHLL.PH; define pcodeop SHLL.QB; define pcodeop SHLLV.PH; define pcodeop SHLLV.QB; define pcodeop SHLLV_S.W; define pcodeop SHLL_S.W; define pcodeop SHRA.QB; define pcodeop SHRA.PH; define pcodeop SHRAV.PH; define pcodeop SHRAV.QB; define pcodeop SHRAV_R.W; define pcodeop SHRA_R.W; define pcodeop SHRL.PH; define pcodeop SHRL.QB; define pcodeop SHRLV.PH; define pcodeop SHRLV.QB; define pcodeop SUBQ.PH; define pcodeop SUBQ_S.W; define pcodeop SUBQH.PH; define pcodeop SUBQH.W; define pcodeop SUBU.PH; define pcodeop SUBU.QB; define pcodeop SUBUH.QB; # ABSQ_S.PH Purpose: Find Absolute Value of Two Fractional Halfwords :absq_s.ph RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x9 & fct=0x12 { RD = ABSQ_S.PH(RTsrc); } # ABSQ_S.QB Purpose: Find Absolute Value of Four Fractional Byte Values :absq_s.qb RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x1 & fct=0x12 { RD = ABSQ_S.QB(RTsrc); } # ABSQ_S.W Purpose: Find Absolute Value of Fractional Word :absq_s.w RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x11 & fct=0x12 { RD = ABSQ_S.W(RTsrc); } # ADDQ[_S].PH Purpose: Add Fractional Halfword Vectors :addq.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xa & fct=0x10 { RD = ADDQ.PH(RSsrc, RTsrc); } :addq_s.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xe & fct=0x10 { RD = ADDQ.PH(RSsrc, RTsrc); } # ADDQ_S.W Purpose: Add Fractional Words :addq_s.w RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x16 & fct=0x10 { RD = ADDQ_S.W(RSsrc, RTsrc); } # ADDQH[_R].PH Purpose: Add Fractional Halfword Vectors And Shift Right to Halve Results :addqh.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x8 & fct=0x18 { RD = ADDQH.PH(RSsrc, RTsrc); } :addqh_r.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xa & fct=0x18 { RD = ADDQH.PH(RSsrc, RTsrc); } # ADDQH[_R].W Purpose: Add Fractional Words And Shift Right to Halve Results :addqh.w RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x10 & fct=0x18 { RD = ADDQH.W(RSsrc, RTsrc); } :addqh_r.w RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x12 & fct=0x18 { RD = ADDQH.W(RSsrc, RTsrc); } # ADDSC Purpose: Add Signed Word and Set Carry Bit :addsc RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x10 & fct=0x10 { RD = ADDSC(RSsrc, RTsrc); } # ADDU[_S].PH Purpose: Unsigned Add Integer Halfwords :addu.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x8 & fct=0x10 { RD = ADDU.PH(RSsrc, RTsrc); } :addu_s.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xc & fct=0x10 { RD = ADDU.PH(RSsrc, RTsrc); } # ADDU[_S].QB Purpose: Unsigned Add Quad Byte Vectors :addu.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x0 & fct=0x10 { RD = ADDU.QB(RSsrc, RTsrc); } :addu_s.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x4 & fct=0x10 { RD = ADDU.QB(RSsrc, RTsrc); } # ADDWC Purpose: Add Word with Carry Bit :addwc RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x11 & fct=0x10 { RD = ADDWC(RSsrc, RTsrc); } # ADDUH[_R].QB Purpose: Unsigned Add Vector Quad-Bytes And Right Shift to Halve Results :adduh.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x0 & fct=0x18 { RD = ADDUH.QB(RSsrc, RTsrc); } :adduh_r.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x2 & fct=0x18 { RD = ADDUH.QB(RSsrc, RTsrc); } # APPEND Purpose: Left Shift and Append Bits to the LSB :append RTsrc, RSsrc, sa_dsp is $(AMODE) & prime=0x1f & RSsrc & RTsrc & sa_dsp & fct2=0x0 & fct=0x31 { RSval:$(REGSIZE) = RSsrc & (2^sa_dsp-1); RTsrc = (RTsrc << sa_dsp) | (RSval); } # BALIGN Purpose: Byte Align Contents from Two Registers :balign RTsrc, RSsrc, bp is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & bp & fct2=0x10 & fct=0x31 { RTsrc = (RTsrc << 8*bp) | (RSsrc >> 8*(4-bp)); } # BITREV Purpose: Bit-Reverse Halfword :bitrev RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x1b & fct=0x12 { RD = BITREV(RTsrc); } # BPOSGE32 Purpose: Branch on Greater Than or Equal To Value 32 in DSPControl Pos Field :bposge32 Rel16 is $(AMODE) & prime=0x1 & zero21=0x0 & op=0x1c & Rel16 { dsp_pos:$(REGSIZE) = DSPControl & 0x1f; if (dsp_pos < 32) goto inst_next; delayslot(1); goto Rel16; } # BPOSGE32C Purpose: Branch on Greater Than or Equal To Value 32 in DSPControl Pos Field Compact # no branch delay :bposge32c Rel16 is $(AMODE) & prime=0x1 & zero21=0x0 & op=0x1a & Rel16 { dsp_pos:$(REGSIZE) = DSPControl & 0x1f; if (dsp_pos < 32) goto inst_next; goto Rel16; } # CMP.cond.PH Purpose: Compare Vectors of Signed Integer Halfword Values :cmp.eq.ph RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero4=0x0 & fct2=0x8 & fct=0x11 { tmp_rs:2 = RSsrc(0) & 0xffff; tmp_rt:2 = RTsrc(0) & 0xffff; cca:1 = (tmp_rs == tmp_rt); tmp_rs = RSsrc(2) & 0xffff; tmp_rt = RTsrc(2) & 0xffff; ccb:1 = (tmp_rs == tmp_rt); flags:$(REGSIZE) = 0xfcffffff; DSPControl = (DSPControl & flags) | zext(cca << 24) | zext(ccb << 25); } :cmp.lt.ph RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero4=0x0 & fct2=0x9 & fct=0x11 { tmp_rs:2 = RSsrc(0) & 0xffff; tmp_rt:2 = RTsrc(0) & 0xffff; cca:1 = (tmp_rs s< tmp_rt); tmp_rs = RSsrc(2) & 0xffff; tmp_rt = RTsrc(2) & 0xffff; ccb:1 = (tmp_rs s< tmp_rt); flags:$(REGSIZE) = 0xfcffffff; DSPControl = (DSPControl & flags) | zext(cca << 24) | zext(ccb << 25); } :cmp.le.ph RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero4=0x0 & fct2=0xa & fct=0x11 { tmp_rs:2 = RSsrc(0) & 0xffff; tmp_rt:2 = RTsrc(0) & 0xffff; cca:1 = (tmp_rs s<= tmp_rt); tmp_rs = RSsrc(2) & 0xffff; tmp_rt = RTsrc(2) & 0xffff; ccb:1 = (tmp_rs s<= tmp_rt); flags:$(REGSIZE) = 0xfcffffff; DSPControl = (DSPControl & flags) | zext(cca << 24) | zext(ccb << 25); } # CMPGDU.cond.QB Purpose: Compare Unsigned Vector of Four Bytes and Write Result to GPR and DSPControl :cmpgdu.eq.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x18 & fct=0x11 { tmp_rs:1 = RSsrc(0) & 0xff; tmp_rt:1 = RTsrc(0) & 0xff; cca:$(REGSIZE) = zext(tmp_rs == tmp_rt); tmp_rs = RSsrc(1) & 0xff; tmp_rt = RTsrc(1) & 0xff; ccb:$(REGSIZE) = zext(tmp_rs == tmp_rt); tmp_rs = RSsrc(2) & 0xff; tmp_rt = RTsrc(2) & 0xff; ccc:$(REGSIZE) = zext(tmp_rs == tmp_rt); tmp_rs = RSsrc(3) & 0xff; tmp_rt = RTsrc(3) & 0xff; ccd:$(REGSIZE) = zext(tmp_rs == tmp_rt); flags:$(REGSIZE) = 0xf0ffffff; DSPControl = (DSPControl & flags) | (ccd << 27) | (ccc << 26) | (ccb << 25) | (cca << 24); RD = (ccd << 3) | (ccc << 2) | (ccb << 1) | (cca); } :cmpgdu.lt.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x19 & fct=0x11 { tmp_rs:1 = RSsrc(0) & 0xff; tmp_rt:1 = RTsrc(0) & 0xff; cca:$(REGSIZE) = zext(tmp_rs < tmp_rt); tmp_rs = RSsrc(1) & 0xff; tmp_rt = RTsrc(1) & 0xff; ccb:$(REGSIZE) = zext(tmp_rs < tmp_rt); tmp_rs = RSsrc(2) & 0xff; tmp_rt = RTsrc(2) & 0xff; ccc:$(REGSIZE) = zext(tmp_rs < tmp_rt); tmp_rs = RSsrc(3) & 0xff; tmp_rt = RTsrc(3) & 0xff; ccd:$(REGSIZE) = zext(tmp_rs < tmp_rt); flags:$(REGSIZE) = 0xf0ffffff; DSPControl = (DSPControl & flags) | (cca << 24) | (ccb << 25) | (ccc << 26) | (ccd << 27); RD = (cca) | (ccb << 1) | (ccc << 2) | (ccd << 3); } :cmpgdu.le.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x1a & fct=0x11 { tmp_rs:1 = RSsrc(0) & 0xff; tmp_rt:1 = RTsrc(0) & 0xff; cca:$(REGSIZE) = zext(tmp_rs <= tmp_rt); tmp_rs = RSsrc(1) & 0xff; tmp_rt = RTsrc(1) & 0xff; ccb:$(REGSIZE) = zext(tmp_rs <= tmp_rt); tmp_rs = RSsrc(2) & 0xff; tmp_rt = RTsrc(2) & 0xff; ccc:$(REGSIZE) = zext(tmp_rs <= tmp_rt); tmp_rs = RSsrc(3) & 0xff; tmp_rt = RTsrc(3) & 0xff; ccd:$(REGSIZE) = zext(tmp_rs <= tmp_rt); flags:$(REGSIZE) = 0xf0ffffff; DSPControl = (DSPControl & flags) | (cca << 24) | (ccb << 25) | (ccc << 26) | (ccd << 27); RD = (cca) | (ccb << 1) | (ccc << 2) | (ccd << 3); } # CMPGU.cond.QB Purpose: Compare Vectors of Unsigned Byte Values and Write Results to a GPR :cmpgu.eq.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x4 & fct=0x11 { tmp_rs:1 = RSsrc(0) & 0xff; tmp_rt:1 = RTsrc(0) & 0xff; cca:1 = (tmp_rs == tmp_rt); tmp_rs = RSsrc(1) & 0xff; tmp_rt = RTsrc(1) & 0xff; ccb:1 = (tmp_rs == tmp_rt); tmp_rs = RSsrc(2) & 0xff; tmp_rt = RTsrc(2) & 0xff; ccc:1 = (tmp_rs == tmp_rt); tmp_rs = RSsrc(3) & 0xff; tmp_rt = RTsrc(3) & 0xff; ccd:1 = (tmp_rs == tmp_rt); RD = zext(cca) | zext(ccb << 1) | zext(ccc << 2) | zext(ccd << 3); } :cmpgu.lt.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x5 & fct=0x11 { tmp_rs:1 = RSsrc(0) & 0xff; tmp_rt:1 = RTsrc(0) & 0xff; cca:1 = (tmp_rs < tmp_rt); tmp_rs = RSsrc(1) & 0xff; tmp_rt = RTsrc(1) & 0xff; ccb:1 = (tmp_rs < tmp_rt); tmp_rs = RSsrc(2) & 0xff; tmp_rt = RTsrc(2) & 0xff; ccc:1 = (tmp_rs < tmp_rt); tmp_rs = RSsrc(3) & 0xff; tmp_rt = RTsrc(3) & 0xff; ccd:1 = (tmp_rs < tmp_rt); RD = zext(cca) | zext(ccb << 1) | zext(ccc << 2) | zext(ccd << 3); } :cmpgu.le.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x6 & fct=0x11 { tmp_rs:1 = RSsrc(0) & 0xff; tmp_rt:1 = RTsrc(0) & 0xff; cca:1 = (tmp_rs <= tmp_rt); tmp_rs = RSsrc(1) & 0xff; tmp_rt = RTsrc(1) & 0xff; ccb:1 = (tmp_rs <= tmp_rt); tmp_rs = RSsrc(2) & 0xff; tmp_rt = RTsrc(2) & 0xff; ccc:1 = (tmp_rs <= tmp_rt); tmp_rs = RSsrc(3) & 0xff; tmp_rt = RTsrc(3) & 0xff; ccd:1 = (tmp_rs <= tmp_rt); RD = zext(cca) | zext(ccb << 1) | zext(ccc << 2) | zext(ccd << 3); } # CMPU.cond.QB Purpose: Compare Vectors of Unsigned Byte Values :cmpu.eq.qb RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero4=0x0 & fct2=0x0 & fct=0x11 { tmp_rs:1 = RSsrc(0) & 0xff; tmp_rt:1 = RTsrc(0) & 0xff; cca:1 = (tmp_rs == tmp_rt); tmp_rs = RSsrc(1) & 0xff; tmp_rt = RTsrc(1) & 0xff; ccb:1 = (tmp_rs == tmp_rt); tmp_rs = RSsrc(2) & 0xff; tmp_rt = RTsrc(2) & 0xff; ccc:1 = (tmp_rs == tmp_rt); tmp_rs = RSsrc(3) & 0xff; tmp_rt = RTsrc(3) & 0xff; ccd:1 = (tmp_rs == tmp_rt); flags:$(REGSIZE) = 0xf0ffffff; DSPControl = (DSPControl & flags) | zext(cca << 24) | zext(ccb << 25) | zext(ccc << 26) | zext(ccd << 27); } :cmpu.lt.qb RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero4=0x0 & fct2=0x1 & fct=0x11 { tmp_rs:1 = RSsrc(0) & 0xff; tmp_rt:1 = RTsrc(0) & 0xff; cca:1 = (tmp_rs < tmp_rt); tmp_rs = RSsrc(1) & 0xff; tmp_rt = RTsrc(1) & 0xff; ccb:1 = (tmp_rs < tmp_rt); tmp_rs = RSsrc(2) & 0xff; tmp_rt = RTsrc(2) & 0xff; ccc:1 = (tmp_rs < tmp_rt); tmp_rs = RSsrc(3) & 0xff; tmp_rt = RTsrc(3) & 0xff; ccd:1 = (tmp_rs < tmp_rt); flags:$(REGSIZE) = 0xf0ffffff; DSPControl = (DSPControl & flags) | zext(cca << 24) | zext(ccb << 25) | zext(ccc << 26) | zext(ccd << 27); } :cmpu.le.qb RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero4=0x0 & fct2=0x2 & fct=0x11 { tmp_rs:1 = RSsrc(0) & 0xff; tmp_rt:1 = RTsrc(0) & 0xff; cca:1 = (tmp_rs <= tmp_rt); tmp_rs = RSsrc(1) & 0xff; tmp_rt = RTsrc(1) & 0xff; ccb:1 = (tmp_rs <= tmp_rt); tmp_rs = RSsrc(2) & 0xff; tmp_rt = RTsrc(2) & 0xff; ccc:1 = (tmp_rs <= tmp_rt); tmp_rs = RSsrc(3) & 0xff; tmp_rt = RTsrc(3) & 0xff; ccd:1 = (tmp_rs <= tmp_rt); flags:$(REGSIZE) = 0xf0ffffff; DSPControl = (DSPControl & flags) | zext(cca << 24) | zext(ccb << 25) | zext(ccc << 26) | zext(ccd << 27); } # DPA.W.PH Purpose: Dot Product with Accumulate on Vector Integer Halfword Elements :dpa.w.ph ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x0 & fct=0x30 { ac = DPA.W.PH(ac, RSsrc, RTsrc); } # DPAQ_S.W.PH Purpose: Dot Product with Accumulation on Fractional Halfword Elements :dpaq_s.w.ph ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x4 & fct=0x30 { ac = DPAQ_S.W.PH(ac, RSsrc, RTsrc); } # DPAQ_SA.L.W Purpose: Dot Product with Accumulate on Fractional Word Element :dpaq_sa.l.w ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0xc & fct=0x30 { ac = DPAQ_SA.L.W(ac, RSsrc, RTsrc); } # DPAQX_S.W.PH Purpose: Cross Dot Product with Accumulation on Fractional Halfword Elements :dpaqx_s.w.ph ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x18 & fct=0x30 { ac = DPAQX_S.W.PH(ac, RSsrc, RTsrc); } # DPAQX_SA.W.PH Purpose: Cross Dot Product with Accumulation on Fractional Halfword Elements :dpaqx_sa.w.ph ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x1a & fct=0x30 { ac = DPAQX_SA.W.PH(ac, RSsrc, RTsrc); } # DPAU.H.QBL Purpose: Dot Product with Accumulate on Vector Unsigned Byte Elements :dpau.h.qbl ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x3 & fct=0x30 { ac = DPAU.H.QBL(ac, RSsrc, RTsrc); } # DPAU.H.QBR Purpose: Dot Product with Accumulate on Vector Unsigned Byte Elements :dpau.h.qbr ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x7 & fct=0x30 { ac = DPAU.H.QBR(ac, RSsrc, RTsrc); } # DPAX.W.PH Purpose: Cross Dot Product with Accumulate on Vector Integer Halfword Elements :dpax.w.ph ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x8 & fct=0x30 { ac = DPAX.W.PH(ac, RSsrc, RTsrc); } # DPS.W.PH Purpose: Dot Product with Subtract on Vector Integer Half-Word Elements :dps.w.ph ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x1 & fct=0x30 { ac = DPS.W.PH(ac, RSsrc, RTsrc); } # DPSQ_S.W.PH Purpose: Dot Product with Subtraction on Fractional Halfword Elements :dpsq_s.w.ph ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x5 & fct=0x30 { ac = DPSQ_S.W.PH(ac, RSsrc, RTsrc); } # DPSQ_SA.L.W Purpose: Dot Product with Subtraction on Fractional Word Element :dpsq_sa.l.w ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0xd & fct=0x30 { ac = DPSQ_SA.L.W(ac, RSsrc, RTsrc); } # DPSQX_S.W.PH Purpose: Cross Dot Product with Subtraction on Fractional Halfword Elements :dpsqx_s.w.ph ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x19 & fct=0x30 { ac = DPSQX_S.W.PH(ac, RSsrc, RTsrc); } # DPSQX_SA.W.PH Purpose: Cross Dot Product with Subtraction on Fractional Halfword Elements :dpsqx_sa.w.ph ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x1b & fct=0x30 { ac = DPSQX_SA.W.PH(ac, RSsrc, RTsrc); } # DPSU.H.QBL Purpose: Dot Product with Subtraction on Vector Unsigned Byte Elements :dpsu.h.qbl ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0xb & fct=0x30 { ac = DPSU.H.QBL(ac, RSsrc, RTsrc); } # DPSU.H.QBR Purpose: Dot Product with Subtraction on Vector Unsigned Byte Elements :dpsu.h.qbr ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0xf & fct=0x30 { ac = DPSU.H.QBR(ac, RSsrc, RTsrc); } # DPSX.W.PH Purpose: Cross Dot Product with Subtract on Vector Integer Halfword Elements :dpsx.w.ph ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x9 & fct=0x30 { ac = DPSX.W.PH(ac, RSsrc, RTsrc); } # EXTP Purpose: Extract Fixed Bitfield From Arbitrary Position in Accumulator to GPR :extp RT, ac, sz is $(AMODE) & prime=0x1f & sz & RT & zero1315=0x0 & ac & fct2=0x2 & fct=0x38 { RT = EXTP(ac, sz:1); } # EXTPDP Purpose: Extract Fixed Bitfield From Arbitrary Position in Accumulator to GPR and Decrement Pos :extpdp RT, ac, sz is $(AMODE) & prime=0x1f & sz & RT & zero1315=0x0 & ac & fct2=0xa & fct=0x38 { RT = EXTPDP(ac, sz:1); } # EXTPDPV Purpose: Extract Variable Bitfield From Arbitrary Position in Accumulator to GPR and Decrement Pos :extpdpv RT, ac, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RT & zero1315=0x0 & ac & fct2=0xb & fct=0x38 { RT = EXTPDPV(ac, RSsrc); } # EXTPV Purpose: Extract Variable Bitfield From Arbitrary Position in Accumulator to GPR :extpv RT, ac, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RT & zero1315=0x0 & ac & fct2=0x3 & fct=0x38 { RT = EXTPV(ac, RSsrc); } # EXTR[_RS].W Purpose: Extract Word Value With Right Shift From Accumulator to GPR :extr.w RT, ac, shift21 is $(AMODE) & prime=0x1f & shift21 & RT & zero1315=0x0 & ac & fct2=0x0 & fct=0x38 { val:$(DREGSIZE) = ac >> shift21:1; result:4 = val(0); RT = zext(result); } :extr_r.w RT, ac, shift21 is $(AMODE) & prime=0x1f & shift21 & RT & zero1315=0x0 & ac & fct2=0x4 & fct=0x38 { val:$(DREGSIZE) = ac >> shift21:1; result:4 = val(0); RT = EXTR.W(result, 1:1); } :extr_rs.w RT, ac, shift21 is $(AMODE) & prime=0x1f & shift21 & RT & zero1315=0x0 & ac & fct2=0x6 & fct=0x38 { val:$(DREGSIZE) = ac >> shift21:1; result:4 = val(0); RT = EXTR.W(result, 2:1); } # EXTR_S.H Purpose: Extract Halfword Value From Accumulator to GPR With Right Shift and Saturate :extr_s.h RT, ac, shift21 is $(AMODE) & prime=0x1f & shift21 & RT & zero1315=0x0 & ac & fct2=0xe & fct=0x38 { val:$(DREGSIZE) = ac >> shift21:1; result:2 = val(0); RT = EXTR_S.H(result); } # EXTRV[_RS].W Purpose: Extract Word Value With Variable Right Shift From Accumulator to GPR :extrv.w RT, ac, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RT & zero1315=0x0 & ac & fct2=0x1 & fct=0x38 { shift:1 = RSsrc(0) & 0x3f; val:$(DREGSIZE) = ac >> shift; result:4 = val(0); RT = EXTRV.W(result, 0:1); } :extrv_r.w RT, ac, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RT & zero1315=0x0 & ac & fct2=0x5 & fct=0x38 { shift:1 = RSsrc(0) & 0x3f; val:$(DREGSIZE) = ac >> shift; result:4 = val(0); RT = EXTRV.W(result, 1:1); } :extrv_rs.w RT, ac, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RT & zero1315=0x0 & ac & fct2=0x7 & fct=0x38 { shift:1 = RSsrc(0) & 0x3f; val:$(DREGSIZE) = ac >> shift; result:4 = val(0); RT = EXTRV.W(result, 2:1); } # EXTRV_S.H Purpose: Extract Halfword Value Variable From Accumulator to GPR With Right Shift and Saturate :extrv_s.h RT, ac, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RT & zero1315=0x0 & ac & fct2=0xf & fct=0x38 { shift:1 = RSsrc(0) & 0x3f; val:$(DREGSIZE) = ac >> shift; result:2 = val(0); RT = EXTR_S.H(result); } # INSV Purpose: Insert Bit Field Variable :insv RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero4=0x0 & fct2=0x0 & fct=0xc { RTsrc = INSV(RTsrc, RSsrc); } # LBUX Purpose: Load Unsigned Byte Indexed :lbux RD, INDEX_BASE is $(AMODE) & prime=0x1f & INDEX_BASE & RD & fct2=0x6 & fct=0xa { RD = zext(*[ram]:1 INDEX_BASE); } @ifdef MIPS64 # LDX Load Doubleword Indexed :ldx RD, INDEX_BASE is $(AMODE) & prime=0x1F & RD & fct=10 & fct2=8 & INDEX_BASE { RD = *[ram]:8 INDEX_BASE; } @endif # LHX Purpose: Load Halfword Indexed :lhx RD, INDEX_BASE is $(AMODE) & prime=0x1f & INDEX_BASE & RD & fct2=0x4 & fct=0xa { RD = sext(*[ram]:2 INDEX_BASE); } # LWX Purpose: Load Word Indexed :lwx RD, INDEX_BASE is $(AMODE) & prime=0x1f & INDEX_BASE & RD & fct2=0x0 & fct=0xa { @ifdef MIPS64 RD = sext(*[ram]:4 INDEX_BASE); @else RD = *[ram]:4 INDEX_BASE; @endif } # MADD Purpose: Multiply Word and Add to Accumulator :madd ac, RS32src, RT32src is $(AMODE) & prime=0x1c & RS32src & RT32src & zero1315=0x0 & ac & fct2=0x0 & fct=0x0 { tmp1:$(DREGSIZE) = zext(RS32src); tmp2:$(DREGSIZE) = zext(RT32src); prod:$(DREGSIZE) = tmp1 * tmp2; ac = ac + prod; } # MADDU Purpose: Multiply Unsigned Word and Add to Accumulator :maddu ac, RS32src, RT32src is $(AMODE) & prime=0x1c & RS32src & RT32src & zero1315=0x0 & ac & fct2=0x0 & fct=0x1 { tmp1:$(DREGSIZE) = zext(RS32src); tmp2:$(DREGSIZE) = zext(RT32src); prod:$(DREGSIZE) = tmp1 * tmp2; ac = ac + prod; } # MAQ_S[A].W.PHL Purpose: Multiply with Accumulate Single Vector Fractional Halfword Element :maq_s.w.phl ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x14 & fct=0x30 { ac = MAQ_S.W.PHL(RSsrc, RTsrc); } :maq_sa.w.phl ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x10 & fct=0x30 { ac = MAQ_S.W.PHL(RSsrc, RTsrc); } # MAQ_S[A].W.PHR Purpose: Multiply with Accumulate Single Vector Fractional Halfword Element :maq_s.w.phr ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x16 & fct=0x30 { ac = MAQ_S.W.PHR(RSsrc, RTsrc); } :maq_sa.w.phr ac, RSsrc, RTsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x12 & fct=0x30 { ac = MAQ_S.W.PHR(RSsrc, RTsrc); } # MFHI Purpose: Move from HI register :mfhi RD, acfhi is $(AMODE) & prime=0x0 & zero2325=0x0 & acfhi & zero1620=0x0 & RD & fct2=0x0 & fct=0x10 { RD = acfhi; } # MFLO Purpose: Move from LO register :mflo RD, acflo is $(AMODE) & prime=0x0 & zero2325=0x0 & acflo & zero1620=0x0 & RD & fct2=0x0 & fct=0x12 { RD = acflo; } # MODSUB Purpose: Modular Subtraction on an Index Value :modsub RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x12 & fct=0x10 { decr:1 = RTsrc(0); lastIdx:2 = RTsrc(1); equals:1 = (RSsrc == 0); RD = (zext(equals) * zext(lastIdx)) + (zext(!equals) * (RSsrc - zext(decr))); } # MSUB Purpose: Multiply Word and Subtract from Accumulator :msub ac, RS32src, RT32src is $(AMODE) & prime=0x1c & RS32src & RT32src & zero1315=0x0 & ac & aclo & achi & fct2=0x0 & fct=0x4 { tmp1:$(DREGSIZE) = sext(RS32src); tmp2:$(DREGSIZE) = sext(RT32src); prod:$(DREGSIZE) = tmp1 * tmp2; ac = ac - prod; } # MSUBU Purpose: Multiply Unsigned Word and Add to Accumulator :msubu ac, RS32src, RT32src is $(AMODE) & prime=0x1c & RS32src & RT32src & zero1315=0x0 & ac & fct2=0x0 & fct=0x5 { tmp1:$(DREGSIZE) = zext(RS32src); tmp2:$(DREGSIZE) = zext(RT32src); prod:$(DREGSIZE) = tmp1 * tmp2; ac = ac - prod; } # MTHI Purpose: Move to HI register :mthi RS, achi is $(AMODE) & prime=0x0 & RS & zero1320=0x0 & achi & fct2=0x0 & fct=0x11 { RS = achi; } # MTHLIP Purpose: Copy LO to HI and a GPR to LO and Increment Pos by 32 :mthlip RS, ac is $(AMODE) & prime=0x1f & RS & zero1320=0x0 & ac & aclo & achi & fct2=0x1f & fct=0x38 { achi = aclo; aclo = RS; # increment DSPControl pos field by 32 } # MTLO Purpose: Move to LO register :mtlo RS, aclo is $(AMODE) & prime=0x0 & RS & zero1320=0x0 & aclo & fct2=0x0 & fct=0x13 { RS=aclo; } # MUL[_S].PH Purpose: Multiply Vector Integer HalfWords to Same Size Products :mul.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xc & fct=0x18 { RD = MUL.PH(RSsrc, RTsrc); } :mul_s.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xe & fct=0x18 { RD = MUL.PH(RSsrc, RTsrc); } # MULEQ_S.W.PHL Purpose: Multiply Vector Fractional Left Halfwords to Expanded Width Products :muleq_s.w.phl RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x1c & fct=0x10 { RD = MULEQ_S.W.PHL(RSsrc, RTsrc); } # MULEQ_S.W.PHR Purpose: Multiply Vector Fractional Right Halfwords to Expanded Width Products :muleq_s.w.phr RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x1d & fct=0x10 { RD = MULEQ_S.W.PHR(RSsrc, RTsrc); } # MULEU_S.PH.QBL Purpose: Multiply Unsigned Vector Left Bytes by Halfwords to Halfword Products :muleu_s.ph.qbl RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x6 & fct=0x10 { RD = MULEU_S.PH.QBL(RSsrc, RTsrc); } # MULEU_S.PH.QBR Purpose: Multiply Unsigned Vector Right Bytes with halfwords to Half Word Products :muleu_s.ph.qbr RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x7 & fct=0x10 { RD = MULEU_S.PH.QBR(RSsrc, RTsrc); } # MULQ_RS.PH Purpose: Multiply Vector Fractional Halfwords to Fractional Halfword Products :mulq_rs.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x1f & fct=0x10 { RD = MULQ_RS.PH(RSsrc, RTsrc); } # MULQ_RS.W Purpose: Multiply Fractional Words to Same Size Product with Saturation and Rounding :mulq_rs.w RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x17 & fct=0x18 { RD = MULQ_RS.W(RSsrc, RTsrc); } # MULQ_S.PH Purpose: Multiply Vector Fractional Half-Words to Same Size Products :mulq_s.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x1e & fct=0x10 { RD = MULQ_S.PH(RSsrc, RTsrc); } # MULQ_S.W Purpose: Multiply Fractional Words to Same Size Product with Saturation :mulq_s.w RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x16 & fct=0x18 { RD = MULQ_S.W(RSsrc, RTsrc); } # MULSA.W.PH Purpose: Multiply and Subtract Vector Integer Halfword Elements and Accumulate :mulsa.w.ph ac, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x2 & fct=0x30 { ac = MULSA.W.PH(RSsrc, RTsrc); } # MULSAQ_S.W.PH Purpose: Multiply And Subtract Vector Fractional Halfwords And Accumulate :mulsaq_s.w.ph ac, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & zero1315=0x0 & ac & fct2=0x6 & fct=0x30 { ac = MULSAQ_S.W.PH(RSsrc, RTsrc); } # MULT Purpose: Multiply Word :mult ac, RS32src, RT32src is $(AMODE) & prime=0x0 & RS32src & RT32src & zero1315=0x0 & ac & fct2=0x0 & fct=0x18 { tmp1:$(DREGSIZE) = sext( RS32src ); tmp2:$(DREGSIZE) = sext( RT32src ); ac = tmp1 * tmp2; } # MULTU Purpose: Multiply Unsigned Word :multu ac, RS32src, RT32src is $(AMODE) & prime=0x0 & RS32src & RT32src & zero1315=0x0 & ac & fct2=0x0 & fct=0x19 { tmp1:$(DREGSIZE) = zext( RS32src ); tmp2:$(DREGSIZE) = zext( RT32src ); ac = tmp1 * tmp2; } # PACKRL.PH Purpose: Pack a Vector of Halfwords from Vector Halfword Sources :packrl.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xe & fct=0x11 { src1:2 = RSsrc(0); src2:2 = RTsrc(2); RD = zext(src1 << 16) + zext(src2); } # PICK.PH Purpose: Pick a Vector of Halfword Values Based on Condition Code Bits :pick.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xb & fct=0x11 { cc24:1 = DSPControl[24,1]; cc25:1 = DSPControl[25,1]; val1:2 = RSsrc(0); val2:2 = RTsrc(0); tmp1:2 = (zext(cc24 == 1) * val1) + ((zext(cc24==0)) * val2); val1 = RSsrc(2); val2 = RTsrc(2); tmp2:2 = (zext(cc25 == 1) * val1) + ((zext(cc25==0)) * val2); RD = zext(tmp1) + zext(tmp2 << 16); } # PICK.QB Purpose: Pick a Vector of Byte Values Based on Condition Code Bits :pick.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x3 & fct=0x11 { local cc1:1 = DSPControl[24,1]; local cc2:1 = DSPControl[25,1]; local cc3:1 = DSPControl[26,1]; local cc4:1 = DSPControl[27,1]; local val1:1 = RSsrc(0); local val2:1 = RTsrc(0); local tmp1:1 = ((cc1 == 1) * val1) + (((cc1==0)) * val2); val1 = RSsrc(1); val2 = RTsrc(1); local tmp2:1 = ((cc2 == 1) * val1) + (((cc2==0)) * val2); val1 = RSsrc(2); val2 = RTsrc(2); local tmp3:1 = ((cc3 == 1) * val1) + (((cc3==0)) * val2); val1 = RSsrc(3); val2 = RTsrc(3); local tmp4:1 = ((cc4 == 1) * val1) + (((cc4==0)) * val2); RD = zext(tmp1) + zext(tmp2 << 8) + zext(tmp3 << 16) + zext(tmp4 << 24); } # PRECEQ.W.PHL Purpose: Precision Expand Fractional Halfword to Fractional Word Value :preceq.w.phl RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0xc & fct=0x12 { RD = PRECEQ.W.PHL(RTsrc); } # PRECEQ.W.PHR Purpose: Precision Expand Fractional Halfword to Fractional Word Value :preceq.w.phr RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0xd & fct=0x12 { RD = PRECEQ.W.PHR(RTsrc); } # PRECEQU.PH.QBL Purpose: Precision Expand two Unsigned Bytes to Fractional Halfword Values :precequ.ph.qbl RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x4 & fct=0x12 { RD = PRECEQU.PH.QBL(RTsrc); } # PRECEQU.PH.QBLA Purpose: Precision Expand two Unsigned Bytes to Fractional Halfword Values :precequ.ph.qbla RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x6 & fct=0x12 { RD = PRECEQU.PH.QBLA(RTsrc); } # PRECEQU.PH.QBR Purpose: Precision Expand two Unsigned Bytes to Fractional Halfword Values :precequ.ph.qbr RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x5 & fct=0x12 { RD = PRECEQU.PH.QBR(RTsrc); } # PRECEQU.PH.QBRA Purpose: Precision Expand two Unsigned Bytes to Fractional Halfword Values :precequ.ph.qbra RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x7 & fct=0x12 { RD = PRECEQU.PH.QBRA(RTsrc); } # PRECEU.PH.QBL Purpose: Precision Expand Two Unsigned Bytes to Unsigned Halfword Values :preceu.ph.qbl RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x1c & fct=0x12 { RD = PRECEU.PH.QBL(RTsrc); } # PRECEU.PH.QBLA Purpose: Precision Expand Two Unsigned Bytes to Unsigned Halfword Values :preceu.ph.qbla RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x1e & fct=0x12 { RD = PRECEU.PH.QBLA(RTsrc); } # PRECEU.PH.QBR Purpose: Precision Expand two Unsigned Bytes to Unsigned Halfword Values :preceu.ph.qbr RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x1d & fct=0x12 { RD = PRECEU.PH.QBR(RTsrc); } # PRECEU.PH.QBRA Purpose: Precision Expand Two Unsigned Bytes to Unsigned Halfword Values :preceu.ph.qbra RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x1f & fct=0x12 { RD = PRECEU.PH.QBRA(RTsrc); } # PRECR.QB.PH Purpose: Precision Reduce Four Integer Halfwords to Four Bytes :precr.qb.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xd & fct=0x11 { RD = PRECR.QB.PH(RSsrc, RTsrc); } # PRECR_SRA[_R].PH.W Purpose: Precision Reduce Two Integer Words to Halfwords after a Right Shift :precr_sra.ph.w rt, rs, sa_dsp is $(AMODE) & prime=0x1f & rs & rt & sa_dsp & fct2=0x1e & fct=0x11 { PRECR_SRA.PH.W(); } :precr_sra_r.ph.w rt, rs, sa_dsp is $(AMODE) & prime=0x1f & rs & rt & sa_dsp & fct2=0x1f & fct=0x11 { PRECR_SRA.PH.W(); } # PRECRQ.PH.W Purpose: Precision Reduce Fractional Words to Fractional Halfwords :precrq.ph.w RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x14 & fct=0x11 { RD = PRECRQ.PH.W(RSsrc, RTsrc); } # PRECRQ.QB.PH Purpose: Precision Reduce Four Fractional Halfwords to Four Bytes :precrq.qb.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xc & fct=0x11 { RD = PRECRQ.QB.PH(RSsrc, RTsrc); } # PRECRQU_S.QB.PH Purpose: Precision Reduce Fractional Halfwords to Unsigned Bytes With Saturation :precrqu_s.qb.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xf & fct=0x11 { RD = PRECRQU_S.QB.PH(RSsrc, RTsrc); } # PRECRQ_RS.PH.W Purpose: Precision Reduce Fractional Words to Halfwords With Rounding and Saturation :precrq_rs.ph.w RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x15 & fct=0x11 { RD = PRECRQ_RS.PH.W(RSsrc, RTsrc); } # PREPEND Purpose: Right Shift and Prepend Bits to the MSB :prepend RT, RSsrc, sa_dsp is $(AMODE) & prime=0x1f & RSsrc & RT & sa_dsp & fct2=0x1 & fct=0x31 { shift_val:1 = sa_dsp; trunc_val:1 = ($(REGSIZE) * 8) - shift_val; temp:$(REGSIZE) = (RSsrc << shift_val) + (RT >> trunc_val); RT = (zext(shift_val == 0) * RT) + (zext(shift_val != 0) * temp); } # RADDU.W.QB Purpose: Unsigned Reduction Add Vector Quad Bytes :raddu.w.qb RD, RSsrc is $(AMODE) & prime=0x1f & RSsrc & zero1620=0x0 & RD & fct2=0x14 & fct=0x10 { RD = RADDU.W.QB(RSsrc); } # RDDSP Purpose: Read DSPControl Register Fields to a GPR :rddsp RD, immed1625 is $(AMODE) & prime=0x1f & immed1625 & RD & fct2=0x12 & fct=0x38 { RD = DSPControl & immed1625:$(REGSIZE); } :rddsp RD is $(AMODE) & prime=0x1f & immed1625=0x1f & RD & fct2=0x12 & fct=0x38 { RD = DSPControl; } # REPL.PH Purpose: Replicate Immediate Integer into all Vector Element Positions :repl.ph RD, immed1625 is $(AMODE) & prime=0x1f & immed1625 & RD & fct2=0xa & fct=0x12 { val:2 = immed1625; repl:$(REGSIZE) = sext(val) << 16 | zext(val); RD = repl; } # REPL.QB Purpose: Replicate Immediate Integer into all Vector Element Positions :repl.qb RD, immed1623 is $(AMODE) & prime=0x1f & zero2425=0x0 & immed1623 & RD & fct2=0x2 & fct=0x12 { byte:1 = immed1623; RD = sext((byte << 24) | (byte << 16) | (byte << 8) | (byte)); } # REPLV.PH Purpose: Replicate a Halfword into all Vector Element Positions :replv.ph RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0xb & fct=0x12 { RD = REPLV.PH(RTsrc); } # REPLV.QB Purpose: Replicate Byte into all Vector Element Positions :replv.qb RD, RTsrc is $(AMODE) & prime=0x1f & zero21=0x0 & RTsrc & RD & fct2=0x3 & fct=0x12 { RD = REPLV.QB(RTsrc); } # SHILO Purpose: Shift an Accumulator Value Leaving the Result in the Same Accumulator :shilo ac, shift20 is $(AMODE) & prime=0x1f & shift20 & zero1619=0x0 & zero1315=0x0 & ac & fct2=0x1a & fct=0x38 { shift_val:1 = shift20; ac = (zext(shift_val s>= 0) * (ac >> shift_val)) + (zext(shift_val s< 0) * (ac << (-shift_val))); } # SHILOV Purpose: Variable Shift of Accumulator Value Leaving the Result in the Same Accumulator :shilov ac, RSsrc is $(AMODE) & prime=0x1f & RSsrc & zero1620=0x0 & zero1315=0x0 & ac & fct2=0x1b & fct=0x38 { shift_val:1 = RSsrc(1) & 0x7f; ac = (zext(shift_val s>= 0) * (ac >> shift_val)) + (zext(shift_val s< 0) * (ac << (-shift_val))); } # SHLL[_S].PH Purpose: Shift Left Logical Vector Pair Halfwords :shll.ph RD, RTsrc, sa_dsp2 is $(AMODE) & prime=0x1f & bit25=0x0 & sa_dsp2 & RTsrc & RD & fct2=0x8 & fct=0x13 { shift_val:1 = sa_dsp2; RD = SHLL.PH(RTsrc, shift_val); } :shll_s.ph RD, RTsrc, sa_dsp2 is $(AMODE) & prime=0x1f & bit25=0x0 & sa_dsp2 & RTsrc & RD & fct2=0xc & fct=0x13 { shift_val:1 = sa_dsp2; RD= SHLL.PH(RTsrc, shift_val); } # SHLL.QB Purpose: Shift Left Logical Vector Quad Bytes :shll.qb RD, RTsrc, sa_dsp2 is $(AMODE) & prime=0x1f & sa_dsp2 & RTsrc & RD & fct2=0x0 & fct=0x13 { shift_val:1 = sa_dsp2; RD = SHLL.QB(RTsrc, shift_val); } # SHLLV[_S].PH Purpose: Shift Left Logical Variable Vector Pair Halfwords :shllv.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xa & fct=0x13 { RD = SHLLV.PH(RTsrc, RSsrc); } :shllv_s.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xe & fct=0x13 { RD = SHLLV.PH(RTsrc, RSsrc); } # SHLLV.QB Purpose: Shift Left Logical Variable Vector Quad Bytes :shllv.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x2 & fct=0x13 { RD = SHLLV.QB(RTsrc, RSsrc); } # SHLLV_S.W Purpose: Shift Left Logical Variable Vector Word :shllv_s.w RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x16 & fct=0x13 { RD = SHLLV_S.W(RTsrc, RSsrc); } # SHLL_S.W Purpose: Shift Left Logical Word with Saturation :shll_s.w RD, RTsrc, sa_dsp2 is $(AMODE) & prime=0x1f & sa_dsp2 & RTsrc & RD & fct2=0x14 & fct=0x13 { shift_val:1 = sa_dsp2; RD = SHLL_S.W(RTsrc, shift_val); } # SHRA[_R].QB Purpose: Shift Right Arithmetic Vector of Four Bytes :shra.qb RD, RTsrc, sa_dsp2 is $(AMODE) & prime=0x1f & sa_dsp2 & RTsrc & RD & fct2=0x4 & fct=0x13 { shift_val:1 = sa_dsp2; RD = SHRA.QB(RTsrc, shift_val); } :shra_r.qb RD, RTsrc, sa_dsp2 is $(AMODE) & prime=0x1f & sa_dsp2 & RTsrc & RD & fct2=0x5 & fct=0x13 { shift_val:1 = sa_dsp2; RD = SHRA.QB(RTsrc, shift_val); } # SHRA[_R].PH Purpose: Shift Right Arithmetic Vector Pair Halfwords :shra.ph RD, RTsrc, sa_dsp2 is $(AMODE) & prime=0x1f & bit25=0x0 & sa_dsp2 & RTsrc & RD & fct2=0x9 & fct=0x13 { shift_val:1 = sa_dsp2; RD = SHRA.PH(RTsrc, shift_val); } :shra_r.ph RD, RTsrc, sa_dsp2 is $(AMODE) & prime=0x1f & bit25=0x0 & sa_dsp2 & RTsrc & RD & fct2=0xd & fct=0x13 { shift_val:1 = sa_dsp2; RD = SHRA.PH(RTsrc, shift_val); } # SHRAV[_R].PH Purpose: Shift Right Arithmetic Variable Vector Pair Halfwords :shrav.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xb & fct=0x13 { RD = SHRAV.PH(RTsrc, RSsrc); } :shrav_r.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xf & fct=0x13 { RD = SHRAV.PH(RTsrc, RSsrc); } # SHRAV[_R].QB Purpose: Shift Right Arithmetic Variable Vector of Four Bytes :shrav.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x6 & fct=0x13 { RD = SHRAV.QB(RTsrc, RSsrc); } :shrav_r.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x7 & fct=0x13 { RD = SHRAV.QB(RTsrc, RSsrc); } # SHRAV_R.W Purpose: Shift Right Arithmetic Variable Word with Rounding :shrav_r.w RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x17 & fct=0x13 { RD = SHRAV_R.W(RTsrc, RSsrc); } # SHRA_R.W Purpose: Shift Right Arithmetic Word with Rounding :shra_r.w RD, RTsrc, sa_dsp2 is $(AMODE) & prime=0x1f & sa_dsp2 & RTsrc & RD & fct2=0x15 & fct=0x13 { shift_val:1 = sa_dsp2; RD = SHRA_R.W(RTsrc, shift_val); } # SHRL.PH Purpose: Shift Right Logical Two Halfwords :shrl.ph RD, RTsrc, sa_dsp2 is $(AMODE) & prime=0x1f & bit25=0x0 & sa_dsp2 & RTsrc & RD & fct2=0x19 & fct=0x13 { shift_val:1 = sa_dsp2; RD = SHRL.PH(RTsrc, shift_val); } # SHRL.QB Purpose: Shift Right Logical Vector Quad Bytes :shrl.qb RD, RTsrc, sa_dsp2 is $(AMODE) & prime=0x1f & sa_dsp2 & RTsrc & RD & fct2=0x1 & fct=0x13 { shift_val:1 = sa_dsp2; RD = SHRL.QB(RTsrc, shift_val); } # SHRLV.PH Purpose: Shift Variable Right Logical Pair of Halfwords :shrlv.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x1b & fct=0x13 { RD = SHRLV.PH(RTsrc, RSsrc); } # SHRLV.QB Purpose: Shift Right Logical Variable Vector Quad Bytes :shrlv.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x3 & fct=0x13 { RD = SHRLV.QB(RTsrc, RSsrc); } # SUBQ[_S].PH Purpose: Subtract Fractional Halfword Vector :subq.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xb & fct=0x10 { RD = SUBQ.PH(RSsrc, RTsrc); } :subq_s.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xf & fct=0x10 { RD = SUBQ.PH(RSsrc, RTsrc); } # SUBQ_S.W Purpose: Subtract Fractional Word :subq_s.w RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x17 & fct=0x10 { RD = SUBQ_S.W(RSsrc, RTsrc); } # SUBQH[_R].PH Purpose: Subtract Fractional Halfword Vectors And Shift Right to Halve Results :subqh.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x9 & fct=0x18 { RD = SUBQH.PH(RSsrc, RTsrc); } :subqh_r.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xb & fct=0x18 { RD = SUBQH.PH(RSsrc, RTsrc); } # SUBQH[_R].W Purpose: Subtract Fractional Words And Shift Right to Halve Results :subqh.w RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x11 & fct=0x18 { RD = SUBQH.W(RSsrc, RTsrc); } :subqh_r.w RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x13 & fct=0x18 { RD = SUBQH.W(RSsrc, RTsrc); } # SUBU[_S].PH Purpose: Subtract Unsigned Integer Halfwords :subu.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x9 & fct=0x10 { RD = SUBU.PH(RSsrc, RTsrc); } :subu_s.ph RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0xd & fct=0x10 { RD = SUBU.PH(RSsrc, RTsrc); } # SUBU[_S].QB Purpose: Subtract Unsigned Quad Byte Vector :subu.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x1 & fct=0x10 { RD = SUBU.QB(RSsrc, RTsrc); } :subu_s.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x5 & fct=0x10 { RD = SUBU.QB(RSsrc, RTsrc); } # SUBUH[_R].QB Purpose: Subtract Unsigned Bytes And Right Shift to Halve Results :subuh.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x1 & fct=0x18 { RD = SUBUH.QB(RSsrc, RTsrc); } :subuh_r.qb RD, RTsrc, RSsrc is $(AMODE) & prime=0x1f & RSsrc & RTsrc & RD & fct2=0x3 & fct=0x18 { RD = SUBUH.QB(RSsrc, RTsrc); } # WRDSP Purpose: Write Fields to DSPControl Register from a GPR :wrdsp RSsrc, mask is $(AMODE) & prime=0x1f & RSsrc & mask & fct2=0x13 & fct=0x38 { DSPControl = (RSsrc & mask); } :wrdsp RSsrc is $(AMODE) & prime=0x1f & RSsrc & mask=0x1f & fct2=0x13 & fct=0x38 { DSPControl = RSsrc; } ================================================ FILE: pypcode/processors/MIPS/data/languages/mips_mt.sinc ================================================ define pcodeop fork; define pcodeop move_from_thread_context; define pcodeop move_from_thread_cp0; define pcodeop move_from_thread_gpr; define pcodeop move_from_thread_dsp; define pcodeop move_from_thread_fpr; define pcodeop move_from_thread_fpcr; define pcodeop move_from_thread_cop2_data; define pcodeop move_from_thread_cop2_control; define pcodeop move_to_thread_context; define pcodeop move_to_thread_cp0; define pcodeop move_to_thread_gpr; define pcodeop move_to_thread_dsp; define pcodeop move_to_thread_fpr; define pcodeop move_to_thread_fpcr; define pcodeop move_to_thread_cop2_data; define pcodeop move_to_thread_cop2_control; define pcodeop yield; # Disable multi-threaded execution. Returns VPEControl :dmt RT is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0xB & rd32=0x1 & zero5=0xF & fct=0x1 & RT { # Clear VPEControl IE bit (bit 15) RT = VPEControl; VPEControl = VPEControl & ~0x8000; #VPEControl[15,1] = 0; } # Disable Virtual Processor Execution. Returns VPEControl :dvpe RT is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0xB & rd32=0x0 & zero5=0x0 & fct=0x1 & RT { # Clear MVPControl EVP bit (bit 0) RT = MVPControl; MVPControl = MVPControl & ~0x1; } # Enable multi-threaded execution. Returns VPEControl :emt RT is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0xB & rd32=0x1 & zero5=0xF & fct=0x21 & RT { # Set VPEControl TE bit (bit 15) RT = VPEControl; VPEControl = VPEControl | 0x8000; # VPEControl[15,1] = 1; } # Enable Virtual Processor Execution. Returns VPEControl :evpe RT is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0xB & rd32=0x0 & zero5=0x0 & fct=0x21 & RT { # Set MVPControl EVP bit (bit 0)h RT = MVPControl; MVPControl = MVPControl | 0x1; } :fork "Thread_GPR["^RDsrc^"]", RSsrc, RTsrc is $(AMODE) & REL6=0 & prime=0x1F & zero5=0x0 & fct=0x8 & RDsrc & RSsrc & RTsrc { fork(RDsrc, RSsrc, RTsrc); } # Move From Thread Context # MFTR general instruction :mftr RD, RTsrc, bit5, sel, h is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0x8 & bit5 & h & bit3=0 & sel & RD & RTsrc { tmp:$(REGSIZE) = move_from_thread_context(RTsrc, bit5:1, sel:1, h:1); RD = tmp; } # MFTR instructions have many idioms for sub-decodings # Move from coprocessor 0 register rt, sel=sel :mftc0 RD, "Thread_Co0["^RT0thread^"]", sel is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0x8 & bit5=0 & bit3=0 & RD & RT0thread & sel { RD = move_from_thread_cp0(RT0thread:1, sel:1); } :mftc0 RD, "Thread_Co0["^RT0thread^"]" is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0x8 & bit5=0 & bit3=0 & RD & RT0thread & sel=0 { RD = move_from_thread_cp0(RT0thread:1, 0:1); } # Move from GPR[rt] :mftgpr RD, "Thread_GPR["^RTthread^"]" is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0x8 & bit5=1 & bit3=0 & sel=0x0 & RD & RTthread { RD = move_from_thread_gpr(RTthread); } RtDSP: "lo" is lohiacx=0 { } RtDSP: "hi" is lohiacx=1 { } RtDSP: "acx" is lohiacx=2 { } RtDSP: "dsp" is rtmtdsp=16 { } :mft^RtDSP RD, rtmtdsp is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0x8 & bit5=1 & bit3=0 & sel=0x1 & RD & RtDSP & rtmtdsp { RD = move_from_thread_dsp(rtmtdsp); } # Move from FPR[rt] :mftc1 RD, "Thread_FPR["^FTthread^"]" is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0x8 & bit5=1 & h=0 & bit3=0 & sel=0x2 & RD & FTthread { RD = move_from_thread_fpr(FTthread, 0:1); } :mfthc1 RD, "Thread_FPR["^FTthread^"]" is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0x8 & bit5=1 & h=1 & bit3=0 & sel=0x2 & RD & FTthread { RD = move_from_thread_fpr(FTthread, 1:1); } # Move from FPCR[rt] :cftc1 RD, "Thread_FPCR["^FCTthread^"]" is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0x8 & bit5=1 & bit3=0 & sel=0x3 & RD & FCTthread { RD = move_from_thread_fpcr(FCTthread); } # Skipping for now: MFTR for C0P2 Data and C0P2 Control (bit5=1, sel=4/5) # Move to Thread Context # MTTR general instruction :mttr RDsrc, RTsrc, bit5, sel, h is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0xc & bit5 & h & bit3=0 & sel & RDsrc & RTsrc { move_to_thread_context(RDsrc, RTsrc, bit5:1, sel:1, h:1); } # MTTR instructions have many idioms for sub-decodings # Move rt to coprocessor 0 register rd, sel=sel :mttc0 RTsrc, "Thread_Co0["^RD0thread^"]", sel is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0xc & bit5=0 & bit3=0 & RTsrc & RD0thread & sel { move_to_thread_cp0(RD0thread:1, RTsrc, sel:1); } :mttc0 RTsrc, "Thread_Co0["^RD0thread^"]" is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0xc & bit5=0 & bit3=0 & RTsrc & RD0thread & sel=0 { move_to_thread_cp0(RD0thread:1, RTsrc, 0:1); } # Move to GPR[rd] :mttgpr RTsrc, "Thread_GPR["^RDthread^"]" is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0xc & bit5=1 & bit3=0 & sel=0x0 & RTsrc & RDthread { move_to_thread_gpr(RDthread, RTsrc); } RdDSP: "lo" is lohiacx2=0 { } RdDSP: "hi" is lohiacx2=1 { } RdDSP: "acx" is lohiacx2=2 { } RdDSP: "dsp" is rdmtdsp=16 { } # Move to DSP[rd] :mtt^RdDSP RTsrc, rdmtdsp is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0xc & bit5=1 & bit3=0 & sel=0x1 & RTsrc & RdDSP & rdmtdsp { move_to_thread_dsp(rdmtdsp, RTsrc); } # Move to FPR[rd] :mttc1 RTsrc, "Thread_FPR["^FDthread^"]" is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0xC & bit5=1 & h=0 & bit3=0 & sel=0x2 & RTsrc & FDthread { move_to_thread_fpr(FDthread, RTsrc, 0:1); } :mtthc1 RTsrc, "Thread_FPR["^FDthread^"]" is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0xC & bit5=1 & h=1 & bit3=0 & sel=0x2 & RTsrc & FDthread { move_to_thread_fpr(FDthread, RTsrc, 1:1); } # Move to FPCR[rd] :cttc1 RTsrc, "Thread_FPCR["^FCRthread^"]" is $(AMODE) & REL6=0 & prime=0x10 & mfmc0=0xC & bit5=1 & bit3=0 & sel=0x3 & RTsrc & FCRthread { move_to_thread_fpcr(FCRthread, RTsrc); } # Skipping for now: MTTR for C0P2 Data and C0P2 control (bit5=1, sel=4/5) # Conditionally Deschedule or Deallocate the Current Thread :yield RD, RSsrc is $(AMODE) & REL6=0 & prime=0x1F & op=0 & zero5=0x0 & fct=0x9 & RD & RSsrc { yield(RSsrc); RD = RSsrc & YQMask; } :yield RSsrc is $(AMODE) & REL6=0 & prime=0x1F & op=0 & zero5=0x0 & fct=0x9 & rd=0 & RSsrc { yield(RSsrc); } ================================================ FILE: pypcode/processors/MIPS/data/languages/mipsfloat.sinc ================================================ ############################ # # MIPS Floating Point (COP1 - coprocessor 1) instructions # includes arithmetic, compares, branch on FP condition flag, conversions # Also Coprocessor 0 instructions # # Note a MIPS word is 32-bits, and a long is a 64-bit integer # # mipsP6float.sinc contains floating point instructions that are in pre-Release 6 but not in Release 6 # # mipsR6float.sinc contains floating point instructions that are in Release 6 and later # ############################ define pcodeop mipsFloatPS; # 0100 01ff fff0 0000 ssss sddd dd00 0101 :abs.S fd, fs is $(AMODE) & prime=17 & fct=5 & fmt1 & format=0x10 & fs & fd { fd[0,32] = abs( fs:4 ); } :abs.D fd, fs is $(AMODE) & prime=17 & fct=5 & fmt1 & format=0x11 & fs & fd & fsD & fdD { fdD = abs(fsD); } :abs.PS fd, fs is $(AMODE) & REL6=0 & prime=17 & fct=5 & fmt1 & fs & fd & format=0x16 & fdD & fsD { fdD = mipsFloatPS(fsD); } # 0100 01ff ffft tttt ssss sddd dd00 0000 :add.S fd, fs, ft is $(AMODE) & prime=17 & fct=0 & fmt1 & format=0x10 & ft & fs & fd { fd[0,32] = fs:4 f+ ft:4; } :add.D fd, fs, ft is $(AMODE) & prime=17 & fct=0 & fmt1 & format=0x11 & ft & fs & fd & ftD & fsD & fdD { fdD = fsD f+ ftD; } :add.PS fd, fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=0 & fmt1 & ft & fs & fd & format=0x16 & ftD & fsD & fdD { fdD = mipsFloatPS(fsD, ftD); } # 0100 01ff fff0 0000 ssss sddd dd00 1010 :ceil.l.S fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x0A & fmt2 & fd & fs & format=0x10 & fdD { # Note this instruction is in release 2 and later fd_tmp:4 = ceil(fs:4); # Note that ceil returns a float the same size as its argument fdD = trunc(fd_tmp); # Note that trunc converts a float to an integer } :ceil.l.D fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x0A & fmt2 & fd & fs & format=0x11 & fsD & fdD { # Note this instruction is in release 2 and later fsD_tmp:8 = ceil(fsD); # Note that ceil returns a float the same size as its argument fdD = trunc(fsD_tmp); # Convert to 64-bit integer } # 0100 01ff fff0 0000 ssss sddd dd00 1110 :ceil.w.S fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x0E & fmt2 & fd & fs & format=0x10 { fs_ceil_tmp:4 = ceil(fs:4); # Note that ceil returns a float the same size as its argument fd[0,32] = trunc(fs_ceil_tmp); } :ceil.w.D fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x0E & fmt2 & fd & fs & format=0x11 & fsD { fs_tmp:8 = ceil(fsD); # Note that ceil returns a float the same size as its argument fd[0,32] = trunc( fs_tmp ); # Need to set only 32 bits of fd } # 0100 0100 010t tttt ssss s000 0000 0000 :cfc1 RT, fs_unk is $(AMODE) & prime=17 & copop=2 & RT & fs_unk & bigfunct=0 { tmp:4 = getCopControlWord( 1:1, fs_unk:4 ); RT = sext(tmp); } :cfc1 RT, fs_fcr is $(AMODE) & prime=17 & copop=2 & RT & fs_fcr & (fs=0 | fs=25 | fs=26 | fs=28 | fs=31) & bigfunct=0 { RT = sext(fs_fcr); } # Since we don't track the state of the FCSR bits, no sense in introducing complex code #:cfc1 RT, fs_fcr is $(AMODE) & prime=17 & copop=2 & RT & fs_fcr & fs=25 & bigfunct=0 { # tmp1:4 = (fcsr & 0x00800000) >> 23; # tmp2:4 = (fcsr & 0xfe000000) >> 24; # RT = sext(tmp2 + tmp1); #} #:cfc1 RT, fs_fcr is $(AMODE) & prime=17 & copop=2 & RT & fs_fcr & fs=26 & bigfunct=0 { # tmp1:4 = fcsr & 0x0003f07c; # RT = sext(tmp1); #} #:cfc1 RT, fs_fcr is $(AMODE) & prime=17 & copop=2 & RT & fs_fcr & fs=28 & bigfunct=0 { # tmp1:4 = fcsr & 0x00000f83; # tmp2:4 = (fcsr & 0x01000000) >> 24; # RT = sext(tmp1 + tmp2); #} #:cfc1 RT, fs_fcr is $(AMODE) & prime=17 & copop=2 & RT & fs_fcr & fs=31 & bigfunct=0 { # RT = sext(fcsr); #} # 0100 0100 110t tttt ssss s000 0000 0000 :ctc1 RTsrc, fs_unk is $(AMODE) & prime=17 & copop=6 & RTsrc & fs_unk & bigfunct=0 { setCopControlWord( 1:1, fs_unk:4, RTsrc ); } :ctc1 RTsrc, fs_fcr is $(AMODE) & prime=17 & copop=6 & RTsrc & fs_fcr & (fs=0 | fs=25 | fs=26 | fs=28 | fs=31) & bigfunct=0 { fs_fcr = RTsrc:$(SIZETO4); } # 0100 01ff fff0 0000 ssss sddd dd10 0001 :cvt.d.S fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x21 & fmt3 & fd & fs & format=0x10 & fdD { # Convert from single float to double float fdD = float2float(fs:4); } :cvt.d.W fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x21 & fmt3 & fd & fs & format=0x14 & fdD { # Convert from 32-bit int word source to double float fs_tmp:4 = fs:4; fdD = int2float(fs_tmp); } :cvt.d.L fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x21 & fmt3 & fd & fs & format=0x15 & fdD & fsD { # Note this instruction is in release 2 and later # Convert from 64-bit long source to double float fdD = int2float(fsD); } # 0100 01ff fff0 0000 ssss sddd dd10 0101 :cvt.l.S fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x25 & fmt2 & fd & fs & format=0x10 & fdD { # Note this instruction is in release 2 and later # Convert from single float source to 64-bit long integer, using the fcsr RM rounding mode bits rm_tmp:1 = fcsr[0,2]; # Get RM rounding mode bits fs_tmp:4 = fs:4; # Get the lower 32-bits as a floating point single fs_cvt_tmp:4 = 0; if (rm_tmp == 0) goto ; fs_cvt_tmp = floor(fs_tmp); # RM is 1, no rounding, and floor returns a float goto ; fs_cvt_tmp = round(fs_tmp); # round returns a float fdD = trunc(fs_cvt_tmp); # trunc returns an integer } :cvt.l.D fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x25 & fmt2 & fd & fs & format=0x11 & fsD & fdD { # Note this instruction is in release 2 and later # Convert from double float to 64-bit long integer, using fcsr RM rounding mode bits rm_tmp:1 = fcsr[0,2]; # Get RM rounding mode bits if (rm_tmp == 0) goto ; fd_tmp:8 = floor(fsD); # RM is 1, no rounding goto ; fd_tmp = round(fsD); fdD = trunc(fd_tmp); } # 0100 0110 000t tttt ssss sddd dd10 0110 :cvt.PS.S fd, fs, ft is $(AMODE) & REL6=0 & prime=0x11 & format=0x10 & fct=0x26 & fd & fs & ft & ftD & fsD & fdD { fdD = mipsFloatPS(fsD, ftD); } # 0100 01ff fff0 0000 ssss sddd dd10 0000 :cvt.s.D fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x20 & fmt4 & fd & fs & format=0x11 & fsD { fd[0,32] = float2float(fsD); } :cvt.s.W fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x20 & fmt4 & fd & fs & format=0x14 { fd[0,32] = int2float(fs:4); } :cvt.s.L fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x20 & fmt4 & fd & fs & format=0x15 & fsD { # Note this instruction is in release 2 and later fd[0,32] = int2float(fsD); } # 0100 0110 1100 0000 ssss sddd dd10 1000 :cvt.s.pl fd, fs is $(AMODE) & REL6=0 & prime=0x11 & format=0x16 & ft=0x0 & fct=0x28 & fd & fs & fdD & fsD { fdD = mipsFloatPS(fsD); } # 0100 0110 1100 0000 ssss sddd dd10 0000 :cvt.s.pu fd, fs is $(AMODE) & REL6=0 & prime=0x11 & format=0x16 & ft=0x0 & fct=0x20 & fd & fs & fdD & fsD { fdD = mipsFloatPS(fsD); } # 0100 01ff fff0 0000 ssss sddd dd10 0100 :cvt.w.S fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x24 & fmt2 & fd & fs & format=0x10 { # Convert from single float source to 32-bit integer word, using the fcsr RM rounding mode bits rm_tmp:1 = fcsr[0,2]; # Get RM rounding mode bits fs_tmp:4 = fs:4; fs_cvt_tmp:4 = 0; if (rm_tmp == 0) goto ; fs_cvt_tmp = floor(fs_tmp); # RM is 1, no rounding, and floor returns a float goto ; fs_cvt_tmp = round(fs_tmp); # round returns a float fd[0,32] = trunc(fs_cvt_tmp); # trunc returns an integer } :cvt.w.D fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x24 & fmt2 & fd & fs & format=0x11 & fsD { # Convert from double float source to 32-bit integer word, using the fcsr RM rounding mode bits rm_tmp:1 = fcsr[0,2]; # Get RM rounding mode bits if (rm_tmp == 0) goto ; fs_tmp:8 = floor(fsD); # RM is 1, no rounding goto ; fs_tmp = round(fsD); fd[0,32] = trunc(fs_tmp); } # 0100 01ff ffft tttt ssss sddd dd00 0011 :div.S fd, fs, ft is $(AMODE) & prime=17 & fct=3 & fmt2 & ft & fs & fd & format=0x10 { fd[0,32] = fs:4 f/ ft:4; } :div.D fd, fs, ft is $(AMODE) & prime=17 & fct=3 & fmt2 & ft & fs & fd & format=0x11 & fdD & fsD & ftD { fdD = fsD f/ ftD; } # 0100 01ff fff0 0000 ssss sddd dd00 1011 :floor.l.S fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x0B & fmt2 & fd & fs & format=0x10 & fdD { # Note this instruction is in release 2 and later # Convert floor of single float to a 64-bit long integer fd_tmp:4 = floor(fs:4); # returns a float fdD = trunc(fd_tmp); # converts float to int } :floor.l.D fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x0B & fmt2 & fd & fs & format=0x11 & fdD & fsD { # Note this instruction is in release 2 and later fsD_tmp:8 = floor(fsD); fdD = trunc(fsD_tmp); } # 0100 01ff fff0 0000 ssss sddd dd00 1111 :floor.w.S fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x0F & fmt2 & fd & fs & format=0x10 { # Floor of single float copied to a 32-bit integer word fd_tmp:4 = floor(fs:4); # returns a float fd = trunc(fd_tmp); # converts float to int } :floor.w.D fd, fs is $(AMODE) & prime=0x11 & ft=0x0 & fct=0x0F & fmt2 & fd & fs & format=0x11 & fsD { # Floor of double float to a 32-bit integer word fd[0,32] = trunc(floor(fsD)); } # 1101 01bb bbbt tttt iiii iiii iiii iiii :ldc1 ft, OFF_BASE is $(AMODE) & prime=53 & OFF_BASE & ft & ftD { ftD = *[ram]:8 OFF_BASE; } # 1111 01bb bbbt tttt iiii iiii iiii iiii :sdc1 ft, OFF_BASE is $(AMODE) & prime=61 & OFF_BASE & ft & ftD { *[ram]:8 OFF_BASE = ftD; } # 1100 01bb bbbt tttt iiii iiii iiii iiii :lwc1 ft, OFF_BASE is $(AMODE) & prime=49 & OFF_BASE & ft { ft[0,32] = *[ram]:4 OFF_BASE; } # 0100 0100 000t tttt ssss s000 0000 0000 :mfc1 RT, fs is $(AMODE) & prime=17 & copop=0 & RT & fs & bigfunct=0 { # Move just a word, 32-bits RT = sext( fs:$(SIZETO4) ); } # 0100 0100 011t tttt ssss s000 0000 0000 :mfhc1 RT, fs is $(AMODE) & prime=17 & copop=3 & bigfunct=0 & RT & fs & fsD { RT = sext(fsD[32,32]); } # 0100 01ff fff0 0000 ssss sddd dd00 0110 :mov.S fd, fs is $(AMODE) & prime=17 & fct=6 & fmt1 & fs & fd & format=0x10 { fd[0,32] = fs:4; } :mov.D fd, fs is $(AMODE) & prime=17 & fct=6 & fmt1 & fs & fd & format=0x11 & fdD & fsD { fdD = fsD; } :mov.PS fd, fs is $(AMODE) & REL6=0 & prime=17 & fct=6 & fmt1 & fs & fd & format=0x16 & fsD & fdD { fdD = mipsFloatPS(fsD); } # 0100 0100 100t tttt dddd d000 0000 0000 :mtc1 RTsrc, fs is $(AMODE) & prime=17 & copop=4 & RTsrc & fs & bigfunct=0 { # Move 32-bits of RTsrc to Low Half of FPR fs fs[0,32] = RTsrc:$(SIZETO4); } # 0100 0100 111t tttt ssss s000 0000 0000 :mthc1 RTsrc, fs is $(AMODE) & prime=17 & copop=0x07 & bigfunct=0x0 & RTsrc & fs & fsD { # Move 32-bits of RTsrc to High Half of FPR fsD[32,32] = RTsrc:4; } # 0100 01ff ffft tttt ssss sddd dd00 0010 :mul.S fd, fs, ft is $(AMODE) & prime=17 & fct=2 & fmt1 & ft & fs & fd & format=0x10 { fd[0,32] = fs:4 f* ft:4; # need to only get the single float 32-bit (fs might be 64-bits) } :mul.D fd, fs, ft is $(AMODE) & prime=17 & fct=2 & fmt1 & ft & fs & fd & format=0x11 & fdD & fsD & ftD { fdD = fsD f* ftD; } :mul.PS fd, fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=2 & fmt1 & ft & fs & fd & format=0x16 & ftD & fsD & fdD { fdD = mipsFloatPS(fsD, ftD); } # 0100 01ff fff0 0000 ssss sddd dd00 0111 :neg.S fd, fs is $(AMODE) & prime=17 & fct=7 & fmt1 & fs & fd & format=0x10 { fd[0,32] = f- fs:4; } :neg.D fd, fs is $(AMODE) & prime=17 & fct=7 & fmt1 & fs & fd & format=0x11 & fdD & fsD { fdD = f- fsD; } :neg.PS fd, fs is $(AMODE) & REL6=0 & prime=17 & fct=7 & fmt1 & fs & fd & format=0x16 & fsD & fdD { fdD = mipsFloatPS(fsD); } # 0100 01ff fff0 0000 ssss sddd dd01 0101 :recip.S fd, fs is $(AMODE) & prime=17 & ft=0 & fct=21 & fmt2 & fd & fs & format=0x10 { fd[0,32] = 1:4 f/ fs:4; } :recip.D fd, fs is $(AMODE) & prime=17 & ft=0 & fct=21 & fmt2 & fd & fs & format=0x11 & fdD & fsD { fdD = 1:8 f/ fsD; } # 0100 01ff fff0 0000 ssss sddd dd00 1000 :round.l.S fd, fs is $(AMODE) & prime=17 & ft=0 & fct=8 & fmt2 & fd & fs & format=0x10 & fdD { # Note this instruction is in release 2 and later fd_tmp:4 = round(fs:4); # round returns a float of the same size are the arg fdD = trunc(fd_tmp); # trunc converts to any size integer } :round.l.D fd, fs is $(AMODE) & prime=17 & ft=0 & fct=8 & fmt2 & fd & fs &format=0x11 & fsD & fdD { # Note this instruction is in release 2 and later fsD_tmp:8 = round(fsD); fdD = trunc(fsD_tmp); } # 0100 01ff fff0 0000 ssss sddd dd00 1100 :round.w.S fd, fs is $(AMODE) & prime=17 & ft=0 & fct=12 & fmt2 & fd & fs & format=0x10 { fd_tmp:4 = round(fs:4); fd = trunc(fd_tmp); } :round.w.D fd, fs is $(AMODE) & prime=17 & ft=0 & fct=12 & fmt2 & fd & fs & format=0x11 & fsD { fdD_tmp:8 = round(fsD); # round returns a float, not an int fd[0,32] = trunc(fdD_tmp); # We need only a 32-bit integer } # 0100 01ff fff0 0000 ssss sddd dd01 0110 :rsqrt.S fd, fs is $(AMODE) & prime=17 & ft=0 & fct=22 & fmt2 & fd & fs & format=0x10 { fd[0,32] = 1:4 f/ sqrt(fs:4); } :rsqrt.D fd, fs is $(AMODE) & prime=17 & ft=0 & fct=22 & fmt2 & fd & fs & format=0x11 & fdD & fsD { fdD = 1:8 f/ sqrt(fsD); } # 0100 01ff fff0 0000 ssss sddd dd00 0100 :sqrt.S fd, fs is $(AMODE) & prime=17 & ft=0 & fct=4 & fmt2 & fd & fs & format=0x10 { fd[0,32] = sqrt(fs:4); } :sqrt.D fd, fs is $(AMODE) & prime=17 & ft=0 & fct=4 & fmt2 & fd & fs & format=0x11 & fsD & fdD { fdD = sqrt(fsD); } # 0100 01ff ffft tttt ssss sddd dd00 0001 :sub.S fmt1 fd, fs, ft is $(AMODE) & prime=17 & fct=1 & fmt1 & ft & fs & fd & format=0x10 { fd[0,32] = fs:4 f- ft:4; } :sub.D fmt1 fd, fs, ft is $(AMODE) & prime=17 & fct=1 & fmt1 & ft & fs & fd & format=0x11 & fdD & fsD & ftD { fdD = fsD f- ftD; } :sub.PS fmt1 fd, fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=1 & fmt1 & ft & fs & fd & format=0x16 & ftD & fsD & fdD { fdD = mipsFloatPS(fsD, ftD); } # 1110 01bb bbbt tttt iiii iiii iiii iiii :swc1 ft, OFF_BASE is $(AMODE) & prime=57 & OFF_BASE & ft { *[ram]:4 OFF_BASE = ft:$(SIZETO4); } # 0100 01ff fff0 0000 ssss sddd dd00 1001 :trunc.l.S fd, fs is $(AMODE) & prime=17 & cop1code=0 & fmt2 & fs & fd & fct=9 & format=0x10 & fdD { # Note this instruction is in release 2 and later fd = trunc(fs:4); } :trunc.l.D fd, fs is $(AMODE) & prime=17 & cop1code=0 & fmt2 & fs & fd & fct=9 & format=0x11 & fdD & fsD { # Note this instruction is in release 2 and later fdD = trunc(fsD); } # 0100 01ff fff0 0000 ssss sddd dd00 1101 :trunc.w.S fd, fs is $(AMODE) & prime=17 & cop1code=0 & fmt2 & fs & fd & fct=13 & format=0x10 { fd[0,32] = trunc(fs:4); } :trunc.w.D fd, fs is $(AMODE) & prime=17 & cop1code=0 & fmt2 & fs & fd & fct=13 & format=0x11 & fsD { fd[0,32] = trunc(fsD); } ############################ # # COP1X (Extended FP) # ############################ # 0100 11ss ssst tttt ssss sddd dd01 1110 :alnv.PS fd, fs, ft, rs is $(AMODE) & REL6=0 & prime=0x13 & fct=0x1E & rs & ft & fs & fd & ftD & fsD & fdD { fdD = mipsFloatPS(fsD, ftD); } # 0100 11bb bbbi iiii 0000 0ddd dd00 0001 :ldxc1 fd, INDEX_BASE is $(AMODE) & REL6=0 & prime=0x13 & zero4=0 & fct=0x01 & INDEX_BASE & fd & fdD { fdD = *[ram]:8 INDEX_BASE; } # 0100 11bb bbbi iiii 0000 0ddd dd00 0101 :luxc1 fd, INDEX_BASE is $(AMODE) & REL6=0 & prime=0x13 & zero4=0 & fct=0x05 & INDEX_BASE & fd & fdD { ptr:$(ADDRSIZE) = INDEX_BASE & -16:$(ADDRSIZE); fdD = *[ram]:8 ptr; } # 0100 11bb bbbi iiii 0000 0ddd dd00 0000 :lwxc1 fd, INDEX_BASE is $(AMODE) & REL6=0 & prime=0x13 & zero4=0 & fct=0x0 & INDEX_BASE & fd { fd[0,32] = *[ram]:4 INDEX_BASE; } # 0100 11rr rrrt tttt ssss sddd dd10 0fff :madd.S fd, fr, fs, ft is $(AMODE) & REL6=0 & prime=0x13 & op4=0x04 & fmt5 & fd & fr & fs & ft & format1X=0x0 { fd[0,32] = (fs:4 f* ft:4) f+ fr:4; # must do floating arithmetic in 32 bit } :madd.D fd, fr, fs, ft is $(AMODE) & REL6=0 & prime=0x13 & op4=0x04 & fmt5 & fd & fr & fs & ft & format1X=0x1 & fdD & fsD & frD & ftD { fdD = (fsD f* ftD) f+ frD; } :madd.PS fd, fr, fs, ft is $(AMODE) & REL6=0 & prime=0x13 & op4=0x04 & fmt5 & fd & fr & fs & ft & format1X=0x6 & ftD & fsD & fdD { fdD = mipsFloatPS(fsD, ftD); } # 0100 11rr rrrt tttt ssss sddd dd10 1fff :msub.S fd, fr, fs, ft is $(AMODE) & REL6=0 & prime=0x13 & op4=0x05 & fmt5 & fd & fr & fs & ft & format1X=0x0 { fd[0,32] = (fs:4 f* ft:4) f- fr:4; # must do floating arithmetic in 32 bit } :msub.D fd, fr, fs, ft is $(AMODE) & REL6=0 & prime=0x13 & op4=0x05 & fmt5 & fd & fr & fs & ft & format1X=0x1 & fdD & fsD & ftD & frD { fdD = (fsD f* ftD) f- frD; } :msub.PS fd, fr, fs, ft is $(AMODE) & REL6=0 & prime=0x13 & op4=0x05 & fmt5 & fd & fr & fs & ft & format1X=0x6 & ftD & fsD & fdD { fdD = mipsFloatPS(fsD, ftD); } # 0100 11rr rrrt tttt ssss sddd dd11 0fff :nmadd.S fd, fr, fs, ft is $(AMODE) & REL6=0 & prime=0x13 & op4=0x06 & fmt5 & fd & fr & fs & ft & format1X=0x0 { fd[0,32] = f- (fs:4 f* ft:4) f+ fr:4; # must do floating arithmetic in 32 bit } :nmadd.D fd, fr, fs, ft is $(AMODE) & REL6=0 & prime=0x13 & op4=0x06 & fmt5 & fd & fr & fs & ft & format1X=0x1 & fdD & fsD & ftD & frD { fdD = f- ((fsD f* ftD) f+ frD); } :nmadd.PS fd, fr, fs, ft is $(AMODE) & REL6=0 & prime=0x13 & op4=0x06 & fmt5 & fd & fr & fs & ft & format1X=0x6 & ftD & fsD & fdD { fdD = mipsFloatPS(fsD, ftD); } # 0100 11rr rrrt tttt ssss sddd dd11 1fff :nmsub.S fd, fr, fs, ft is $(AMODE) & REL6=0 & prime=0x13 & op4=0x07 & fmt5 & fd & fr & fs & ft & format1X=0x0 { fd[0,32] = f- (fs:4 f* ft:4) f- fr:4; # must do floating arithmetic in 32 bit } :nmsub.D fd, fr, fs, ft is $(AMODE) & REL6=0 & prime=0x13 & op4=0x07 & fmt5 & fd & fr & fs & ft & format1X=0x1 & fdD & fsD & ftD & frD { fdD = f- ((fsD f* ftD) f- frD); } :nmsub.PS fd, fr, fs, ft is $(AMODE) & REL6=0 & prime=0x13 & op4=0x07 & fmt5 & fd & fr & fs & ft & format1X=0x6 & ftD & fsD & fdD { fdD = mipsFloatPS(fsD, ftD); } # 0100 11bb bbbi iiii hhhh h000 0000 1111 :prefx hint, INDEX_BASE is $(AMODE) & REL6=0 & prime=0x13 & zero5=0x0 & fct=0x0F & hint & INDEX_BASE { prefetch(INDEX_BASE, hint:1); } # 0100 11bb bbbi iiii ssss s000 0000 1001 :sdxc1 fs, INDEX_BASE is $(AMODE) & REL6=0 & prime=0x13 & zero5=0x0 & fct=0x09 & fs & fsD & INDEX_BASE { *[ram]:8 INDEX_BASE = fsD; } # 0100 11bb bbbi iiii ssss s000 0000 1101 :suxc1 fs, INDEX_BASE is $(AMODE) & REL6=0 & prime=19 & fct=13 & INDEX_BASE & fs & fsD { INDEX_BASE = INDEX_BASE & 0xfffffffffffffff0; *[ram]:8 INDEX_BASE = fsD; } # 0100 11bb bbbi iiii ssss s000 0000 1000 :swxc1 fs, INDEX_BASE is $(AMODE) & REL6=0 & prime=19 & INDEX_BASE & fs & fd=0 & fct=8 { *[ram]:4 INDEX_BASE = fs:$(SIZETO4); } #### # # Pre-6 semantics # #### # 0100 0101 000c cc00 iiii iiii iiii iiii :bc1f Rel16 is $(AMODE) & REL6=0 & prime=17 & copop=8 & cc=0 & nd=0 & tf=0 & Rel16 { tmp:1 = fcsr[23,1]; # The floating point condition bit delayslot(1); if (tmp != 0) goto inst_next; goto Rel16; } :bc1f cc,Rel16 is $(AMODE) & REL6=0 & prime=17 & copop=8 & cc & nd=0 & tf=0 & Rel16 { # tmp:1 = getFpCondition(cc:1); # Note that other cc conditions are not implemented tmp:1 = fcsr[23,1]; # The floating point condition bit delayslot(1); if (tmp != 0) goto inst_next; goto Rel16; } # 0100 0101 000c cc10 iiii iiii iiii iiii :bc1fl Rel16 is $(AMODE) & REL6=0 & prime=17 & copop=8 & cc=0 & nd=1 & tf=0 & Rel16 { tmp:1 = fcsr[23,1]; if (tmp != 0) goto inst_next; delayslot(1); goto Rel16; } :bc1fl cc,Rel16 is $(AMODE) & REL6=0 & prime=17 & copop=8 & cc & nd=1 & tf=0 & Rel16 { # tmp:1 = getFpCondition(cc:1); tmp:1 = fcsr[23,1]; # The floating point condition bit if (tmp != 0) goto inst_next; delayslot(1); goto Rel16; } # 0100 0101 000c cc01 iiii iiii iiii iiii :bc1t Rel16 is $(AMODE) & REL6=0 & prime=17 & copop=8 & cc=0 & nd=0 & tf=1 & Rel16 { tmp:1 = fcsr[23,1]; delayslot(1); if (tmp == 0) goto inst_next; goto Rel16; } :bc1t cc,Rel16 is $(AMODE) & REL6=0 & prime=17 & copop=8 & cc & nd=0 & tf=1 & Rel16 { # tmp:1 = getFpCondition(cc:1); tmp:1 = fcsr[23,1]; # The floating point condition bit delayslot(1); if (tmp == 0) goto inst_next; goto Rel16; } # 0100 0101 000c cc11 iiii iiii iiii iiii :bc1tl Rel16 is $(AMODE) & REL6=0 & prime=17 & copop=8 & cc=0 & nd=1 & tf=1 & Rel16 { tmp:1 = fcsr[23,1]; if (tmp == 0) goto inst_next; delayslot(1); goto Rel16; } :bc1tl cc,Rel16 is $(AMODE) & REL6=0 & prime=17 & copop=8 & cc & nd=1 & tf=1 & Rel16 { # tmp:1 = getFpCondition(cc:1); tmp:1 = fcsr[23,1]; # The floating point condition bit if (tmp == 0) goto inst_next; delayslot(1); goto Rel16; } # The pre-release 6 floating point compare instructions, c.condn.S or .D, set the fcsr bit 23 macro trapIfNaN(x1, x2) { } macro trapIfSNaN(x1, x2) { } :c.f.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=48 & fmt1 & format=0x10 & fs & ft { trapIfSNaN(fs:4, ft:4); # Trap if either operand is a Signaling NaN fcsr[23,1] = 0; # Always false } :c.f.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=48 & fmt1 & format=0x11 & fs & ft & fsD & ftD { trapIfSNaN(fsD, ftD); fcsr[23,1] = 0; } :c.f.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=48 & fmt1 & fs & ft & format=0x16 & ftD & fsD & fdD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.un.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=49 & fmt1 & format=0x10 & fs & ft { trapIfSNaN(fs:4, ft:4); fcsr[23,1] = nan(fs:4) || nan(ft:4); # True if operands are NaN } :c.un.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=49 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfSNaN(fsD, ftD); fcsr[23,1] = nan(fsD) || nan(ftD); } :c.un.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=49 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.eq.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=50 & fmt1 & format=0x10 & fs & ft { trapIfSNaN(fs:4, ft:4); fcsr[23,1] = (fs:4 f== ft:4); } :c.eq.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=50 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfSNaN(fsD, ftD); fcsr[23,1] = (fsD f== ftD); } :c.eq.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=50 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.ueq.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=51 & fmt1 & format=0x10 & fs & ft { trapIfSNaN(fs:4, ft:4); fcsr[23,1] = (fs:4 f== ft:4); } :c.ueq.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=51 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfSNaN(fsD, ftD); fcsr[23,1] = (fsD f== ftD); } :c.ueq.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=51 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.olt.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=52 & fmt1 & format=0x10 & fs & ft { trapIfSNaN(fs:4, ft:4); fcsr[23,1] = (fs:4 f< ft:4); } :c.olt.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=52 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfSNaN(fsD, ftD); fcsr[23,1] = (fsD f< ftD); } :c.olt.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=52 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.ult.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=53 & fmt1 & format=0x10 & fs & ft { trapIfSNaN(fs:4, ft:4); fcsr[23,1] = (fs:4 f< ft:4) || nan(fs:4) || nan(ft:4); # Less than or NaN } :c.ult.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=53 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfSNaN(fsD, ftD); fcsr[23,1] = (fsD f< ftD) || nan(fsD) || nan(ftD); } :c.ult.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=53 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.ole.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=54 & fmt1 & format=0x10 & fs & ft { trapIfSNaN(fs:4, ft:4); fcsr[23,1] = (fs:4 f<= ft:4); } :c.ole.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=54 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfSNaN(fsD, ftD); fcsr[23,1] = (fsD f<= ftD); } :c.ole.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=54 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.ule.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=55 & fmt1 & format=0x10 & fs & ft { trapIfSNaN(fs:4, ft:4); fcsr[23,1] = (fs:4 f<= ft:4) || nan(fs:4) || nan(ft:4); # Less than or equal or NaN } :c.ule.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=55 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfSNaN(fsD, ftD); fcsr[23,1] = (fsD f<= ftD) || nan(fsD) || nan(ftD); } :c.ule.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=55 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } # The pre-release 6 floating point compare instructions that trap if either operand is NaN (either QNaN or SNaN) :c.sf.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=56 & fmt1 & fs & ft & format=0x10 { trapIfNaN(fs:4, ft:4); fcsr[23,1] = 0; # Always false } :c.sf.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=56 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfNaN(fsD, ftD); fcsr[23,1] = 0; } :c.sf.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=56 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.ngle.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=57 & fmt1 & format=0x10 & fs & ft { trapIfNaN(fs:4, ft:4); fcsr[23,1] = nan(fs:4) || nan(ft:4); } :c.ngle.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=57 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfNaN(fsD, ftD); fcsr[23,1] = nan(fsD) || nan(ftD); } :c.ngle.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=57 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.seq.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=58 & fmt1 & format=0x10 & fs & ft { trapIfNaN(fs:4, ft:4); fcsr[23,1] = (fs:4 f== ft:4); } :c.seq.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=58 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfNaN(fsD, ftD); fcsr[23,1] = (fsD f== ftD); } :c.seq.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=58 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.ngl.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=59 & fmt1 & format=0x10 & fs & ft { trapIfNaN(fs:4, ft:4); fcsr[23,1] = (fs:4 f== ft:4) || nan(fs:4) || nan(ft:4); } :c.ngl.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=59 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfNaN(fsD, ftD); fcsr[23,1] = (fsD f== ftD) || nan(fsD) || nan(ftD); } :c.ngl.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=59 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.lt.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=60 & fmt1 & format=0x10 & fs & ft { trapIfNaN(fs:4, ft:4); fcsr[23,1] = (fs:4 f< ft:4); } :c.lt.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=60 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfNaN(fsD, ftD); fcsr[23,1] = (fsD f< ftD); } :c.lt.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=60 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.nge.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=61 & fmt1 & format=0x10 & fs & ft { trapIfNaN(fs:4, ft:4); fcsr[23,1] = (fs:4 f< ft:4) || nan(fs:4) || nan(ft:4); } :c.nge.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=61 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfNaN(fsD, ftD); fcsr[23,1] = (fsD f< ftD) || nan(fsD) || nan(ftD); } :c.nge.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=61 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.le.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=62 & fmt1 & format=0x10 & fs & ft { trapIfNaN(fs:4, ft:4); fcsr[23,1] = (fs:4 f<= ft:4); } :c.le.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=62 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfNaN(fsD, ftD); fcsr[23,1] = (fsD f<= ftD); } :c.le.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=62 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } :c.ngt.S fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=63 & fmt1 & format=0x10 & fs & ft { trapIfNaN(fs:4, ft:4); fcsr[23,1] = (fs:4 f<= ft:4) || nan(fs:4) || nan(ft:4); # Less than or equal or NaN } :c.ngt.D fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=63 & fmt1 & fs & ft & format=0x11 & fsD & ftD { trapIfNaN(fsD, ftD); fcsr[23,1] = (fsD f<= ftD) || nan(fsD) || nan(ftD); } :c.ngt.PS fs, ft is $(AMODE) & REL6=0 & prime=17 & fct=63 & fmt1 & fs & ft & format=0x16 & ftD & fsD { fcsr[23,1] = mipsFloatPS(fsD, ftD); } # 0000 00ss sssc cc00 dddd d000 0000 0001 :movf RD, RSsrc, cc is $(AMODE) & REL6=0 & prime=0 & nd=0 & tf=0 & zero5=0 & fct=1 & RD & RSsrc & cc { # Move if FP condition flag is false tmp:1 = fcsr[23,1]; if (tmp != 0) goto ; RD = RSsrc; } # 0100 01ff fffc cc00 ssss sddd dd01 0001 :movf.S fd, fs, cc is $(AMODE) & REL6=0 & prime=17 & nd=0 & tf=0 & fct=17 & fmt1 & fd & fs & cc & format=0x10 { # Move if FP condition flag is false tmp:1 = fcsr[23,1]; if (tmp != 0) goto ; fd[0,32] = fs:4; } :movf.D fd, fs, cc is $(AMODE) & REL6=0 & prime=17 & nd=0 & tf=0 & fct=17 & fmt1 & fd & fs & cc & format=0x11 & fdD & fsD { # Move if FP condition flag is false tmp:1 = fcsr[23,1]; if (tmp != 0) goto ; fdD = fsD; } :movf.PS fd, fs, cc is $(AMODE) & REL6=0 & prime=17 & nd=0 & tf=0 & fct=17 & fmt1 & fd & fs & cc & format=0x16 & fsD & fdD { fdD = mipsFloatPS(fcsr[23,1], fsD); } # 0100 01ff ffft tttt ssss sddd dd01 0011 :movn.S fd, fs, RTsrc is $(AMODE) & REL6=0 & prime=17 & fct=19 & fmt1 & fd & fs & RTsrc & format=0x10 { if (RTsrc == 0) goto ; fd[0,32] = fs:4; } :movn.D fd, fs, RTsrc is $(AMODE) & REL6=0 & prime=17 & fct=19 & fmt1 & fd & fs & RTsrc & format=0x11 & fdD & fsD { if (RTsrc == 0) goto ; fdD = fsD; } :movn.PS fd, fs, RTsrc is $(AMODE) & REL6=0 & prime=17 & fct=19 & fmt1 & fd & fs & RTsrc & format=0x16 & fsD & fdD { fdD = mipsFloatPS(fcsr[23,1], fsD); } # 0000 00ss sssc cc01 dddd d000 0000 0001 :movt RD, RSsrc, cc is $(AMODE) & REL6=0 & prime=0 & nd=0 & tf=1 & zero5=0 & fct=1 & RD & RSsrc & cc { # Move if FP condition flag is true tmp:1 = fcsr[23,1]; if (tmp != 1) goto ; RD = RSsrc; } # 0100 01ff fffc cc01 ssss sddd dd01 0001 :movt.S fd, fs, cc is $(AMODE) & REL6=0 & prime=17 & nd=0 & tf=1 & fct=17 & fmt1 & fd & fs & cc & format=0x10 { # Move if FP condition flag is true tmp:1 = fcsr[23,1]; if (tmp != 1) goto ; fd[0,32] = fs:4; } :movt.D fd, fs, cc is $(AMODE) & REL6=0 & prime=17 & nd=0 & tf=1 & fct=17 & fmt1 & fd & fs & cc & format=0x11 & fdD & fsD { # Move if FP condition flag is true tmp:1 = fcsr[23,1]; if (tmp != 1) goto ; fdD = fsD; } :movt.PS fd, fs, cc is $(AMODE) & REL6=0 & prime=17 & nd=0 & tf=1 & fct=17 & fmt1 & fd & fs & cc & format=0x16 & fsD & ftD & fdD { fdD = mipsFloatPS(fsD, ftD); } # 0100 01ff ffft tttt ssss sddd dd01 0010 :movz.S fd, fs, RTsrc is $(AMODE) & REL6=0 & prime=17 & fct=18 & fmt1 & fd & fs & RTsrc & format=0x10 { if (RTsrc != 0) goto ; fd[0,32] = fs:4; } :movz.D fd, fs, RTsrc is $(AMODE) & REL6=0 & prime=17 & fct=18 & fmt1 & fd & fs & RTsrc & format=0x11 & fdD & fsD { if (RTsrc != 0) goto ; fdD = fsD; } :movz.PS fd, fs, RTsrc is $(AMODE) & REL6=0 & prime=17 & fct=18 & fmt1 & fd & fs & RTsrc & format=0x16 & fsD & fdD { fdD = mipsFloatPS(RTsrc, fsD); } #### # # Release 6 semantics # #### :bc1eqz ft,Rel16 is $(AMODE) & REL6=1 & prime=0x11 & format=0x09 & Rel16 & ft { # Branch if FPR ft LSB equals 0 (false) (This insn replaces bc1f) tmp:1 = ft[0,8] & 0x01; # Only need to check the LSB delayslot(1); if (tmp == 0x00) goto Rel16; } :bc1nez ft,Rel16 is $(AMODE) & REL6=1 & prime=0x11 & format=0x0d & Rel16 & ft & ftD { # Branch if FPR ft LSB equals 1 (true) (This insn replaces bc1t) tmp:1 = ft[0,8] & 0x01; # Only need to check the LSB delayslot(1); if (tmp == 0x01) goto Rel16; } :class.S fd,fs is $(AMODE) & REL6=1 & prime=0x11 & format=0x10 & op=0x00 & fct=0x1b & fd & fs { # Set fd to the 10-bit mask that is the IEEE floating point class of the value in fs # Bit 0: signaling SNaN # Bit 1: quiet QNaN # Bit 2: negative infinity # Bit 3: negative normalized number # Bit 4: negative subnormal, ie denormalized # Bit 5: negative zero # Bit 6: positive infinity # Bit 7: positive normal # Bit 8: positive subnormal # Bit 9: positive zero # Bits 31-10 are set to 0 tmp_fs:4 = fs:4; # Get just the 4 byte single floating point value tmp_exponent:4 = zext(tmp_fs[23,8]); tmp_fraction:4 = zext(tmp_fs[0,23]); tmp_sign:4 = zext(tmp_fs[31,1]); tmp_b1:4 = zext(tmp_fs[22,1]); # High order bit of fraction, used for NaN tmp_SNaN:4 = zext((tmp_exponent == 0x0ff) && (tmp_fraction != 0x0) && (tmp_b1 == 0x0)); tmp_QNaN:4 = zext((tmp_exponent == 0x0ff) && (tmp_fraction != 0x0) && (tmp_b1 == 0x01)); tmp_Neg_Infinity:4 = zext((tmp_sign == 0x01) && (tmp_exponent == 0x0ff) && (tmp_fraction == 0x0)); tmp_Neg_Normal:4 = zext((tmp_sign == 0x01) && (tmp_exponent != 0x0) && (tmp_exponent != 0x0ff)); tmp_Neg_Subnormal:4 = zext((tmp_sign == 0x01) && (tmp_exponent == 0x0) && (tmp_fraction != 0x0)); tmp_Neg_Zero:4 = zext((tmp_sign == 0x01) && (tmp_exponent == 0x0) && (tmp_fraction == 0x0)); tmp_Pos_Infinity:4 = zext((tmp_sign == 0x0) && (tmp_exponent == 0x0ff) && (tmp_fraction == 0x0)); tmp_Pos_Normal:4 = zext((tmp_sign == 0x0) && (tmp_exponent != 0x0) && (tmp_exponent != 0x0ff)); tmp_Pos_Subnormal:4 = zext((tmp_sign == 0x0) && (tmp_exponent == 0x0) && (tmp_fraction != 0x0)); tmp_Pos_Zero:4 = zext((tmp_sign == 0x0) && (tmp_exponent == 0x0) && (tmp_fraction == 0x0)); tmp_fd:4 = 0; tmp_fd = tmp_SNaN | (tmp_QNaN << 1) | (tmp_Neg_Infinity << 2) | (tmp_Neg_Normal << 3) | (tmp_Neg_Subnormal << 4) | (tmp_Neg_Zero << 5) | (tmp_Pos_Infinity << 6) | (tmp_Pos_Normal << 7) | (tmp_Pos_Subnormal << 8) | (tmp_Pos_Zero << 9); fd = zext(tmp_fd); } :class.D fd,fs is $(AMODE) & REL6=1 & prime=0x11 & format=0x11 & op=0x00 & fct=0x1b & fd & fs & fdD & fsD { # Set fd to the 10-bit mask that is the IEEE floating point class of the value in fs # Bit 0: signaling SNaN # Bit 1: quiet QNaN # Bit 2: negative infinity # Bit 3: negative normalized number # Bit 4: negative subnormal, ie denormalized # Bit 5: negative zero # Bit 6: positive infinity # Bit 7: positive normal # Bit 8: positive subnormal # Bit 9: positive zero # Bits 31-10 are set to 0 tmp_fs:8 = fsD; tmp_sign:4 = zext(tmp_fs[63,1]); tmp_exponent:4 = zext(tmp_fs[52,11]); tmp_fraction:8 = zext(tmp_fs[0,51]); tmp_b1:4 = zext(tmp_fs[51,1]); # High order bit of fraction, used for NaN tmp_SNaN:4 = zext((tmp_exponent == 0x07ff) && (tmp_fraction != 0x0) && (tmp_b1 == 0x0)); tmp_QNaN:4 = zext((tmp_exponent == 0x07ff) && (tmp_fraction != 0x0) && (tmp_b1 == 0x01)); tmp_Neg_Infinity:4 = zext((tmp_sign == 0x01) && (tmp_exponent == 0x07ff) && (tmp_fraction == 0x0)); tmp_Neg_Normal:4 = zext((tmp_sign == 0x01) && (tmp_exponent != 0x0) && (tmp_exponent != 0x07ff)); tmp_Neg_Subnormal:4 = zext((tmp_sign == 0x01) && (tmp_exponent == 0x0) && (tmp_fraction != 0x0)); tmp_Neg_Zero:4 = zext((tmp_sign == 0x01) && (tmp_exponent == 0x0) && (tmp_fraction == 0x0)); tmp_Pos_Infinity:4 = zext((tmp_sign == 0x0) && (tmp_exponent == 0x07ff) && (tmp_fraction == 0x0)); tmp_Pos_Normal:4 = zext((tmp_sign == 0x0) && (tmp_exponent != 0x0) && (tmp_exponent != 0x07ff)); tmp_Pos_Subnormal:4 = zext((tmp_sign == 0x0) && (tmp_exponent == 0x0) && (tmp_fraction != 0x0)); tmp_Pos_Zero:4 = zext((tmp_sign == 0x0) && (tmp_exponent == 0x0) && (tmp_fraction == 0x0)); tmp_fd:4 = 0; tmp_fd = tmp_SNaN | (tmp_QNaN << 1) | (tmp_Neg_Infinity << 2) | (tmp_Neg_Normal << 3) | (tmp_Neg_Subnormal << 4) | (tmp_Neg_Zero << 5) | (tmp_Pos_Infinity << 6) | (tmp_Pos_Normal << 7) | (tmp_Pos_Subnormal << 8) | (tmp_Pos_Zero << 9); fdD = zext(tmp_fd); } :sel.S fd,fs,ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x10 & fct=0x10 & fd & fs & ft { # Floating point select, if LSB of fd == 1 then fd = ft, else fd = fs # Note that the data in the FPRs might be 32-bit ints, ie there's no interpretation of the values tmp:1 = (fd[0,1] == 0x01); fd = (zext(tmp) * ft) | (zext(tmp == 0x0) * fs); } :sel.D fd,fs,ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x11 & fct=0x10 & fd & fs & ft & fdD & fsD & ftD { # Floating point select, if LSB of fd == 1 then fd = ft, else fd = fs # Note that the data in the FPRs might be 64-bit ints, ie there's no interpretation of the values tmp:1 = (fdD[0,1] == 0x01); fdD = (zext(tmp) * ftD) | (zext(tmp == 0x0) * fsD); } # # The R6 floating point compare cmp instruction is described on page 146 of the # MIPS64 Architecture Volume II ISA manual # # Note that when the condition is true the target FPR is set to all ones, and to 0 if false. # If the target is 32-bit and you have 64-bit FPRs, then setting the top word is optional # # Note that when the format is Single and you have 64-bit FPRS, # then you need to first pull out the 32-bit float word and then pass that to the correct p-code # # TBD: Note that when fct bits 5 and 4 (left most 2 bits) are 01 then the condition is negated # :cmp.af.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x00 & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfSNaN(fs:4, ft:4); # Trap if either operand is a Signaling NaN fd = 0x0; # Always false } :cmp.af.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x00 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & ft & fsD & ftD { trapIfSNaN(fsD, ftD); fdD = 0x0; } :cmp.un.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x01 & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfSNaN(fs:4, ft:4); fd[0,32] = sext((nan(fs:4) || nan(ft:4)) * 0xff); # True if operands are NaN } :cmp.un.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x01 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfSNaN(fsD, ftD); fdD = sext((nan(fsD) || nan(ftD)) * 0xff); } :cmp.or.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x11 & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfSNaN(fs:4, ft:4); fd[0,32] = sext( (!(nan(fs:4) || nan(ft:4))) * 0xff); # The negated predicate of "c.un" } :cmp.or.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x11 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfSNaN(fsD, ftD); fdD = sext( (!(nan(fsD) || nan(ftD))) * 0xff); } :cmp.eq.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x02 & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfSNaN(fs:4, ft:4); fd[0,32] = sext((fs:4 f== ft:4) * 0xff); } :cmp.eq.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x02 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfSNaN(fsD, ftD); fdD = sext((fsD f== ftD) * 0xff); } :cmp.une.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x12 & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { # The negated predicate of cmp.eq trapIfSNaN(fs:4, ft:4); fd[0,32] = sext((fs:4 f!= ft:4) * 0xff); } :cmp.une.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x12 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfSNaN(fsD, ftD); fdD = sext((fsD f!= ftD) * 0xff); } :cmp.ueq.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x03 & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { # NaN or equal trapIfSNaN(fs:4, ft:4); fd[0,32] = sext( ( nan(fs:4) || nan(ft:4) || (fs:4 f== ft:4) ) * 0xff); } :cmp.ueq.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x03 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfSNaN(fsD, ftD); fdD = sext( ( nan(fsD) || nan(ftD) || (fsD f== ftD) ) * 0xff); } :cmp.ne.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x13 & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { # The negated predicate of cmp.ueq trapIfSNaN(fs:4, ft:4); fd[0,32] = sext( (!( ( nan(fs:4) || nan(ft:4) || (fs:4 f== ft:4) ))) * 0xff); } :cmp.ne.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x13 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfSNaN(fsD, ftD); fdD = sext( (!( ( nan(fsD) || nan(ftD) || (fsD f== ftD) ))) * 0xff); } :cmp.lt.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x04 & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfSNaN(fs:4, ft:4); fd[0,32] = sext((fs:4 f< ft:4) * 0xff); } :cmp.lt.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x04 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfSNaN(fsD, ftD); fdD = sext((fsD f< ftD) * 0xff); } :cmp.ult.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x05 & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { # NaN or less than trapIfSNaN(fs:4, ft:4); fd[0,32] = sext( ( nan(fs:4) || nan(ft:4) || (fs:4 f< ft:4) ) * 0xff); } :cmp.ult.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x05 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfSNaN(fsD, ftD); fdD = sext( ( nan(fsD) || nan(ftD) || (fsD f< ftD) ) * 0xff); } :cmp.le.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x06 & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { # Less than or equal trapIfSNaN(fs:4, ft:4); fd[0,32] = sext((fs:4 f<= ft:4) * 0xff); } :cmp.le.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x06 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfSNaN(fsD, ftD); fdD = sext((fsD f<= ftD) * 0xff); } :cmp.ule.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x07 & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { # NaN or less than or equal trapIfSNaN(fs:4, ft:4); fd[0,32] = sext( ( nan(fs:4) || nan(ft:4) || (fs:4 f<= ft:4) ) * 0xff); } :cmp.ule.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x07 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfSNaN(fsD, ftD); fdD = sext( ( nan(fsD) || nan(ftD) || (fsD f<= ftD) ) * 0xff); } # The cmp instructions that signal (ie trap) if either of the operands are NaN :cmp.saf.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x08 & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfNaN(fs:4, ft:4); fd = 0x0; # Always false } :cmp.saf.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x08 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fsD, ftD); fdD = 0x0; } :cmp.sun.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x09 & bit5=0 & fmt6 & format=0x14 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fs:4, ft:4); fd[0,32] = sext((nan(fs:4) || nan(ft:4)) * 0xff); } :cmp.sun.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x09 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fsD, ftD); fdD = sext((nan(fsD) || nan(ftD)) * 0xff); } :cmp.sor.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x19 & bit5=0 & fmt6 & format=0x14 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fs:4, ft:4); fd[0,32] = sext( (!(nan(fs:4) || nan(ft:4))) * 0xff); # negate of cmp.sun } :cmp.sor.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x19 & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fsD, ftD); fdD = sext( (!(nan(fsD) || nan(ftD))) * 0xff); } :cmp.seq.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x0a & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfNaN(fs:4, ft:4); fd[0,32] = sext((fs:4 f== ft:4) * 0xff); } :cmp.seq.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x0a & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fsD, ftD); fdD = sext((fsD f== ftD) * 0xff); } :cmp.sune.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x1a & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfNaN(fs:4, ft:4); fd[0,32] = sext((fs:4 f!= ft:4) * 0xff); } :cmp.sune.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x1a & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fsD, ftD); fdD = sext((fsD f!= ftD) * 0xff); } :cmp.sueq.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x0b & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { # NaN or equal trapIfNaN(fs:4, ft:4); fd[0,32] = sext( ( nan(fs:4) || nan(ft:4) || (fs:4 f== ft:4) ) * 0xff); } :cmp.sueq.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x0b & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fsD, ftD); fdD = sext( ( nan(fsD) || nan(ftD) || (fsD f== ftD) ) * 0xff); } :cmp.sne.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x1b & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfNaN(fs:4, ft:4); # negate of cmp.sueq fd[0,32] = sext( (! ( nan(fs:4) || nan(ft:4) || (fs:4 f== ft:4) )) * 0xff); } :cmp.sne.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x1b & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fsD, ftD); fdD = sext( (! ( nan(fsD) || nan(ftD) || (fsD f== ftD) )) * 0xff); } :cmp.slt.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x0c & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfNaN(fs:4, ft:4); fd[0,32] = sext( (fs:4 f< ft:4) * 0xff); } :cmp.slt.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x0c & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fsD, ftD); fdD = sext( (fsD f< ftD) * 0xff); } :cmp.sult.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x0d & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfNaN(fs:4, ft:4); fd[0,32] = sext( ( nan(fs:4) || nan(ft:4) || (fs:4 f< ft:4) ) * 0xff); } :cmp.sult.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x0d & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fsD, ftD); fdD = sext( ( nan(fsD) || nan(ftD) || (fsD f< ftD) ) * 0xff); } :cmp.sle.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x0e & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfNaN(fs:4, ft:4); fd[0,32] = sext( (fs:4 f<= ft:4) * 0xff); } :cmp.sle.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x0e & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fsD, ftD); fdD = sext( (fsD f<= ftD) * 0xff); } :cmp.sule.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x0f & bit5=0 & fmt6 & format=0x14 & fd & fs & ft { trapIfNaN(fs:4, ft:4); fd[0,32] = sext( ( nan(fs:4) || nan(ft:4) || (fs:4 f<= ft:4) ) * 0xff); } :cmp.sule.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & fct=0x0f & bit5=0 & fmt6 & format=0x15 & fd & fdD & fs & fsD & ft & ftD { trapIfNaN(fsD, ftD); fdD = sext( ( nan(fsD) || nan(ftD) || (fsD f<= ftD) ) * 0xff); } :rint.S fd, fs is $(AMODE) & REL6=1 & prime=0x11 & format=0x10 & cop1code=0x0 & fs & fd & fct=0x1a { # floating point round to integral floating point rm_tmp:1 = fcsr[0,2]; # Get RM rounding mode bits fs_tmp:4 = fs:4; if (rm_tmp == 0) goto ; fd[0,32] = floor(fs_tmp); # RM is 1, no rounding, and floor returns a float goto ; fd[0,32] = round(fs_tmp); # round returns a float } :rint.D fd, fs is $(AMODE) & REL6=1 & prime=0x11 & format=0x11 & cop1code=0x0 & fs & fd & fsD & fdD & fct=0x1a { # floating point round to integral floating point rm_tmp:1 = fcsr[0,2]; # Get RM rounding mode bits if (rm_tmp == 0) goto ; fdD = floor(fsD); # RM is 1, no rounding, and floor returns a float goto ; fdD = round(fsD); # round returns a float } :min.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x10 & cop1code=0x0 & fs & fd & ft & fct=0x1c { # set floating point fd to the min of fs and ft, TBD special case for NaN tmp_cond:1 = fs:4 f< ft:4; fd[0,32] = (fs:4 * zext(tmp_cond == 1)) | (ft:4 * zext(tmp_cond == 0) ); } :min.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x11 & cop1code=0x0 & fs & fd & ft & fct=0x1c & fsD & fdD & ftD { tmp_cond:1 = fsD f< ftD; fdD = zext( (fsD * zext(tmp_cond == 1)) | (ftD * zext(tmp_cond == 0) ) ); } :max.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x10 & cop1code=0x0 & fs & fd & ft & fct=0x1d { # set floating point fd to the max of fs and ft, TBD special case for NaN tmp_cond:1 = fs:4 f> ft:4; fd[0,32] = (fs:4 * zext(tmp_cond == 1)) | (ft:4 * zext(tmp_cond == 0) ); } :max.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x11 & cop1code=0x0 & fs & fd & ft & fct=0x1d & fsD & fdD & ftD { tmp_cond:1 = fsD f> ftD; fdD = zext( (fsD * zext(tmp_cond == 1)) | (ftD * zext(tmp_cond == 0) ) ); } :mina.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x10 & cop1code=0x0 & fs & fd & ft & fct=0x1e { # set floating point fd to the min of absolute values of fs and ft, TBD special case for NaN tmp_cond:1 = abs(fs:4) f< abs(ft:4); fd[0,32] = (fs:4 * zext(tmp_cond == 1)) | (ft:4 * zext(tmp_cond == 0) ); } :mina.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x11 & cop1code=0x0 & fs & fd & ft & fct=0x1e & fsD & fdD & ftD { tmp_cond:1 = abs(fsD) f< abs(ftD); fdD = zext( (fsD * zext(tmp_cond == 1)) | (ftD * zext(tmp_cond == 0) ) ); } :maxa.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x10 & cop1code=0x0 & fs & fd & ft & fct=0x1f { # set floating point fd to the max of absolute values of fs and ft, TBD special case for NaN tmp_cond:1 = abs(fs:4) f> abs(ft:4); fd[0,32] = (fs:4 * zext(tmp_cond == 1)) | (ft:4 * zext(tmp_cond == 0) ); } :maxa.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x11 & cop1code=0x0 & fs & fd & ft & fct=0x1f & fsD & fdD & ftD { tmp_cond:1 = abs(fsD) f> abs(ftD); fdD = zext( (fsD * zext(tmp_cond == 1)) | (ftD * zext(tmp_cond == 0) ) ); } :maddf.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x10 & cop1code=0x0 & fs & fd & ft & fct=0x18 { # set floating point fd = fd + fs * ft, using 32-bit floating values fd[0,32] = fd:4 f+ (fs:4 f* ft:4); } :maddf.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x11 & cop1code=0x0 & fs & fd & ft & fct=0x18 & fsD & fdD & ftD { fdD = fdD f+ (fsD f* ftD); } :msubf.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x10 & cop1code=0x0 & fs & fd & ft & fct=0x19 { # set floating point fd = fd - fs * ft, using 32-bit floating values fd[0,32] = fd:4 f- (fs:4 f* ft:4); } :msubf.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x11 & cop1code=0x0 & fs & fd & ft & fct=0x19 & fsD & fdD & ftD { fdD = fdD f- (fsD f* ftD); } :seleqz.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x10 & cop1code=0x0 & fs & fd & ft & fct=0x14 { # Set floating point register fd to fs if ft[0] == 0, else if == 1 then set fd to 0, TBD special case for NaN # Note that the description of these select instructions in the MIPS manual does not properly use the C conditional operator fd = zext(fs * zext(ft[0,1] == 0)); } :seleqz.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x11 & cop1code=0x0 & fs & fd & ft & fct=0x14 & fsD & fdD & ftD { fdD = zext(fsD * zext(ftD[0,1] == 0)); } :selnez.S fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x10 & cop1code=0x0 & fs & fd & ft & fct=0x17 { # set floating point register fd to fs if ft[0] == 1, else if == 0 then set fd to 0, TBD special case for NaN fd = zext(fs * zext(ft[0,1] == 1)); } :selnez.D fd, fs, ft is $(AMODE) & REL6=1 & prime=0x11 & format=0x11 & cop1code=0x0 & fs & fd & ft & fct=0x17 & fsD & fdD & ftD { fdD = zext(fsD * zext(ftD[0,1] == 1)); } ================================================ FILE: pypcode/processors/MIPS/data/languages/mipsmicro.sinc ================================================ define token micinstr (16) mic_op=(10,15) mic_code=(0,9) mic_code4=(0,3) mic_code4s=(0,3) signed mic_code4r6=(6,9) mic_off4r6=(4,7) mic_base0=(0,4) mic_base4=(4,6) mic_index=(5,9) mic_rd7=(7,9) mic_rd7lo=(7,9) mic_rd1=(1,3) mic_rd1lo=(7,9) mic_rs0=(0,2) mic_rs0lo=(0,2) mic_rs1=(1,3) mic_rs1lo=(1,3) mic_rs4=(4,6) mic_rs4lo=(4,6) mic_rs7=(7,9) mic_rs7lo=(7,9) mic_rt3=(3,5) mic_rt3lo=(3,5) mic_rt4=(4,6) mic_rt4lo=(4,6) mic_rt7=(7,9) mic_rt7lo=(7,9) mic_rd32_5=(5,9) mic_rd32_5lo=(5,9) mic_rd32_11=(11,15) mic_rd32_0=(0,4) mic_rs32_0=(0,4) mic_rs32_0a=(0,4) mic_rs32_0b=(0,4) mic_rs32_0lo=(0,4) mic_rs32_hw=(0,4) mic_rs32_5=(5,9) mic_rt32_0=(0,4) mic_rt32_5=(5,9) mic_rt32_5a=(5,9) mic_rt32_5lo=(5,9) mic_fd=(0,4) mic_fdD=(0,4) mic_fs=(0,4) mic_fsD=(0,4) mic_fs_5=(5,9) mic_fsD_5=(5,9) mic_ft_0=(0,4) mic_ft_5=(5,9) mic_ftD_5=(5,9) mic_ct=(0,4) mic_stype=(0,4) mic_funci=(5,9) mic_cop5=(5,9) mic_impl=(0,4) mic_pcf=(0,4) mic_pcz=(3,4) mic_cc=(2,4) mic_pcf2=(2,4) mic_cp2z=(0,1) mic_rlist=(5,9) mic_imm10=(0,9) mic_imm9=(1,9) mic_imm9s=(1,9) signed mic_imm9e=(0,8) mic_imm7=(0,6) mic_imm6=(1,6) mic_imm6r6=(0,5) mic_imm5=(0,4) mic_imm5s=(0,4) signed mic_imm5r6=(5,9) mic_imm4=(1,4) mic_imm4s=(1,4) signed mic_imm3=(1,3) mic_imm03=(0,3) mic_imm02=(0,2) mic_imm01=(0,1) mic_bit0=(0,0) mic_bit01=(0,1) mic_bit3=(3,3) mic_bit10=(10,10) mic_sub2=(5,9) mic_csub=(6,9) mic_csubr6=(0,3) mic_jalr=(5,9) mic_jalrr6=(0,4) mic_off12=(0,11) mic_off10=(0,9) mic_soff10=(0,9) signed mic_off7=(0,6) mic_soff7=(0,6) signed mic_off4=(0,3) mic_break=(4,9) mic_breakr6=(0,5) mic_ja32=(6,15) mic_list=(4,5) mic_listr6=(8,9) mic_cofun=(3,15) mic_encrs=(1,3) mic_encrt=(4,6) mic_encrd=(7,9) mic_encre=(7,9) mic_encrt2=(7,9) mic_sa=(1,3) ; define token micinstrb (16) micb_imm16=(0,15) micb_simm16=(0,15) signed micb_poolax=(0,5) micb_poolfx=(0,5) micb_bp=(9,10) micb_bp8=(8,10) micb_flt6=(6,12) micb_fmt14=(14,14) micb_fmt=(13,14) micb_fmt8=(8,9) micb_fmt9=(9,10) micb_fmt10=(10,11) micb_spec=(0,5) micb_axf=(6,15) micb_axf2=(0,9) micb_axf3=(6,11) micb_code10=(6,15) micb_asel=(6,8) micb_fxf=(6,15) micb_fxf2=(0,10) micb_fxf3=(0,7) micb_fxf4=(6,13) micb_fxf5=(0,8) micb_bit10=(10,10) micb_bit11=(11,11) micb_bit12=(12,12) micb_bit15=(15,15) micb_cc=(13,15) micb_rd32=(11,15) micb_rd32lo=(11,15) micb_rs32=(6,10) micb_fd=(11,15) micb_fdD=(11,15) micb_fr=(6,10) micb_frD=(6,10) micb_rx=(6,10) micb_pos=(6,10) micb_size=(11,15) micb_sa=(11,15) micb_sa9=(9,10) micb_hint=(11,15) micb_offset12=(0,11) micb_offset12s=(0,11) signed micb_offset11=(0,10) micb_offset11s=(0,10) signed micb_offset9=(0,8) micb_offset9s=(0,8) signed micb_func12=(12,15) micb_trap=(6,11) micb_cond=(6,9) micb_cond2=(6,10) micb_sub9=(9,11) micb_cop=(0,2) micb_cofun=(3,15) micb_z14=(14,15) micb_z12=(12,15) micb_z11=(11,12) micb_z9=(9,10) micb_z68=(6,8) micb_z67=(6,7) micb_sel=(11,13) micb_cpf=(6,10) ; attach variables [ mic_rd7 mic_rd1 mic_rt4 mic_rs1 mic_rs7 mic_rs0 mic_rs4 mic_rt3 mic_rt7 mic_base4 ] [ s0 s1 v0 v1 a0 a1 a2 a3 ]; attach variables [ mic_rs32_hw ] [ HW_CPUNUM HW_SYNCI_STEP HW_CC HW_CCRe HW_PerfCtr HW_XNP HW_RES6 HW_RES7 HW_RES8 HW_RES9 HW_RES10 HW_RES11 HW_RES12 HW_RES13 HW_RES14 HW_RES15 HW_RES16 HW_RES17 HW_RES18 HW_RES19 HW_RES20 HW_RES21 HW_RES22 HW_RES23 HW_RES24 HW_RES25 HW_RES26 HW_RES27 HW_RES28 HW_ULR HW_RESIM30 HW_RESIM31 ]; @ifdef MIPS64 attach variables [ mic_rd7lo mic_rd1lo mic_rt4lo mic_rs1lo mic_rs7lo mic_rs0lo mic_rs4lo mic_rt3lo mic_rt7lo ] [ s0_lo s1_lo v0_lo v1_lo a0_lo a1_lo a2_lo a3_lo ]; attach variables [ mic_rs32_0lo mic_rt32_5lo micb_rd32lo mic_rd32_5lo ext_32_rs1lo] [ zero_lo at_lo v0_lo v1_lo a0_lo a1_lo a2_lo a3_lo t0_lo t1_lo t2_lo t3_lo t4_lo t5_lo t6_lo t7_lo s0_lo s1_lo s2_lo s3_lo s4_lo s5_lo s6_lo s7_lo t8_lo t9_lo k0_lo k1_lo gp_lo sp_lo s8_lo ra_lo ]; @else attach variables [ mic_rd7lo mic_rd1lo mic_rt4lo mic_rs1lo mic_rs7lo mic_rs0lo mic_rs4lo mic_rt3lo mic_rt7lo ] [ s0 s1 v0 v1 a0 a1 a2 a3 ]; attach variables [ mic_rs32_0lo mic_rt32_5lo micb_rd32lo mic_rd32_5lo ext_32_rs1lo] [ zero at v0 v1 a0 a1 a2 a3 t0 t1 t2 t3 t4 t5 t6 t7 s0 s1 s2 s3 s4 s5 s6 s7 t8 t9 k0 k1 gp sp s8 ra ]; @endif attach variables [ mic_encrs mic_encrt ext_16_rs] [ zero s1 v0 v1 s0 s2 s3 s4 ]; attach variables [ mic_encrt2] [ zero s1 v0 v1 a0 a1 a2 a3 ]; attach variables [ mic_encrd] [ a1 a1 a2 a0 a0 a0 a0 a0 ]; attach variables [ mic_encre] [ a2 a3 a3 s5 s6 a1 a2 a3 ]; attach variables [ mic_rd32_5 mic_rs32_0 mic_rs32_5 mic_rt32_5 mic_rd32_11 mic_rd32_0 ext_32_base micb_rd32 ext_32_rd micb_rs32 mic_base0 ext_32_rs1 mic_index] [ zero at v0 v1 a0 a1 a2 a3 t0 t1 t2 t3 t4 t5 t6 t7 s0 s1 s2 s3 s4 s5 s6 s7 t8 t9 k0 k1 gp sp s8 ra ]; attach variables [ mic_fs mic_ft_5 micb_fd micb_fr mic_fs_5 mic_fd] [ f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18 f19 f20 f21 f22 f23 f24 f25 f26 f27 f28 f29 f30 f31 ]; @if FREGSIZE == "4" # For 64-bit floating point Double instruction operands need to bond two 32-bit FPRs attach variables [ mic_fsD mic_ftD_5 micb_fdD micb_frD mic_fsD_5 mic_fdD] [ f0_1 _ f2_3 _ f4_5 _ f6_7 _ f8_9 _ f10_11 _ f12_13 _ f14_15 _ f16_17 _ f18_19 _ f20_21 _ f22_23 _ f24_25 _ f26_27 _ f28_29 _ f30_31 _ ]; @else # FREGSIZE == "8" attach variables [ mic_fsD mic_ftD_5 micb_fdD micb_frD mic_fsD_5 mic_fdD] [ f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18 f19 f20 f21 f22 f23 f24 f25 f26 f27 f28 f29 f30 f31 ]; @endif attach names [ ext_t4_name][ _ "s0" "s0-s1" "s0-s2" "s0-s3" "s0-s4" "s0-s5" "s0-s6" "s0-s7" "s0-s8" _ _ _ _ _ _ ]; RD5L: mic_rd32_5 is mic_rd32_5 & mic_rd32_5lo { export mic_rd32_5lo; } RD5L: mic_rd32_5 is mic_rd32_5 & mic_rd32_5lo=0 { tmp:4 = 0; export tmp; } RD7R1: mic_rd7 is mic_rd7 & REL6=0 { export mic_rd7; } RD7R1: mic_rd1 is mic_rd1 & REL6=1 { export mic_rd1; } RSEXTL: ext_32_rs1 is ext_32_rs1 & ext_32_rs1lo { export ext_32_rs1lo; } RSEXTL: ext_32_rs1 is ext_32_rs1 & ext_32_rs1lo=0 { tmp:4 = 0; export tmp; } RS0L: mic_rs32_0 is mic_rs32_0 & mic_rs32_0lo { export mic_rs32_0lo; } RS0L: mic_rs32_0 is mic_rs32_0 & mic_rs32_0lo=0 { tmp:4 = 0; export tmp; } RS4L: mic_rs4 is mic_rs4 & mic_rs4lo { export mic_rs4lo; } RS0R4: mic_rs0 is mic_rs0 & REL6=0 { export mic_rs0; } RS0R4: mic_rs4 is mic_rs4 & REL6=1 { export mic_rs4; } RS0R5: mic_rs32_0 is mic_rs32_0 & REL6=0 { export mic_rs32_0; } RS0R5: mic_rs32_0 is mic_rs32_0 & mic_rs32_0=0 & REL6=0 { tmp:$(REGSIZE) = 0; export tmp; } RS0R5: mic_rs32_5 is mic_rs32_5 & REL6=1 { export mic_rs32_5; } RS0R5: mic_rs32_5 is mic_rs32_5 & mic_rs32_5=0 & REL6=1 { tmp:$(REGSIZE) = 0; export tmp; } RS1R7L: mic_rs1 is mic_rs1 & mic_rs1lo & REL6=0 { export mic_rs1lo; } RS1R7L: mic_rs7 is mic_rs7 & mic_rs7lo & REL6=1 { export mic_rs7lo; } RT4L: mic_rt4 is mic_rt4 & mic_rt4lo { export mic_rt4lo; } RT5L: mic_rt32_5 is mic_rt32_5 & mic_rt32_5lo { export mic_rt32_5lo; } RT5L: mic_rt32_5 is mic_rt32_5 & mic_rt32_5lo=0 { tmp:4 = 0; export tmp; } RT3R7: mic_rt3 is mic_rt3 & REL6=0 { export mic_rt3; } RT3R7: mic_rt7 is mic_rt7 & REL6=1 { export mic_rt7; } RST7R5: mic_rs7 is mic_rs7 & REL6=0 { export mic_rs7; } RST7R5: mic_rt32_5 is mic_rt32_5 & REL6=1 { export mic_rt32_5; } RST7R5: mic_rt32_5 is mic_rt32_5 & mic_rt32_5=0 & REL6=1 { tmp:$(REGSIZE) = 0; export tmp; } RT5RD5: ext_32_rd is ext_32_rd & REL6=0 { export ext_32_rd; } RT5RD5: ext_32_rd is ext_32_rd & ext_32_rd=0 & REL6=0 { tmp:$(REGSIZE) = 0; export tmp; } RT5RD5: micb_rd32 is micb_rd32 & REL6=1 { export micb_rd32; } RT5RD5: micb_rd32 is micb_rd32 & micb_rd32=0 & REL6=1 { tmp:$(REGSIZE) = 0; export tmp; } RS0RT5: mic_rs32_0 is mic_rs32_0 & REL6=0 { export mic_rs32_0; } RS0RT5: mic_rs32_0 is mic_rs32_0 & mic_rs32_0=0 & REL6=0 { tmp:$(REGSIZE) = 0; export tmp; } RS0RT5: mic_rt32_5 is mic_rt32_5 & REL6=1 { export mic_rt32_5; } RS0RT5: mic_rt32_5 is mic_rt32_5 & mic_rt32_5=0 & REL6=1 { tmp:$(REGSIZE) = 0; export tmp; } ENCRS: mic_encrs is mic_encrs & REL6=0 { export mic_encrs; } ENCRS: mic_encrs is mic_encrs & mic_encrs=0 & REL6=0 { tmp:$(REGSIZE) = 0; export tmp; } ENCRS: ext_16_rs is ext_16_rs & REL6=1 { export ext_16_rs; } ENCRS: ext_16_rs is ext_16_rs & ext_16_rs=0 & REL6=1 { tmp:$(REGSIZE) = 0; export tmp; } Abs26_mic1: reloc is ext_32_code & micb_imm16 [ reloc=((inst_start+4) $and 0xfffffffff8000000)+2*(micb_imm16 | (ext_32_code << 16)); ] { export *:$(ADDRSIZE) reloc; } Abs26_mic2: reloc is ext_32_code & micb_imm16 [ reloc=((inst_start+4) $and 0xfffffffff0000000)+4*(micb_imm16 | (ext_32_code << 16)); ] { export *:$(ADDRSIZE) reloc; } Rel26_mic: reloc is micb_imm16 [ reloc=inst_start+4+2*((ext_32_codes << 16) | micb_imm16); ] { export *:$(ADDRSIZE) reloc; } Rel21_mic: reloc is micb_imm16 [ reloc=inst_start+4+2*((ext_32_imm5s << 16) | micb_imm16); ] { export *:$(ADDRSIZE) reloc; } Rel16_mic: reloc is micb_simm16 [ reloc=inst_start+4+2*micb_simm16; ] { export *:$(ADDRSIZE) reloc; } Rel10_mic: reloc is mic_soff10 [ reloc=inst_start+2+2*mic_soff10; ] { export *:$(ADDRSIZE) reloc; } Rel7_mic: reloc is mic_soff7 [ reloc=inst_start+2+2*mic_soff7; ] { export *:$(ADDRSIZE) reloc; } EXT_CODE3: val is mic_imm3=0 [ext_off16_s = 0x0001; val = ext_off16_s; ] { export *[const]:2 val; } EXT_CODE3: val is mic_imm3=7 [ext_off16_s = 0xFFFF; val = ext_off16_s; ] { export *[const]:2 val; } EXT_CODE3: val is mic_imm3 [ext_off16_s = mic_imm3 << 2; val = ext_off16_s; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0x0 [ext_off16_u = 0x80; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0x1 [ext_off16_u = 0x1; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0x2 [ext_off16_u = 0x2; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0x3 [ext_off16_u = 0x3; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0x4 [ext_off16_u = 0x4; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0x5 [ext_off16_u = 0x7; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0x6 [ext_off16_u = 0x8; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0x7 [ext_off16_u = 0xf; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0x8 [ext_off16_u = 0x10; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0x9 [ext_off16_u = 0x1f; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0xa [ext_off16_u = 0x20; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0xb [ext_off16_u = 0x3f; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0xc [ext_off16_u = 0x40; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0xd [ext_off16_u = 0xff; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0xe [ext_off16_u = 0x8000; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4A: val is mic_imm03=0xf [ext_off16_u = 0xffff; val = ext_off16_u; ] { export *[const]:2 val; } EXT_CODE4B: val is mic_code4=0xf [ ext_off16_s = 0xffff; val = ext_off16_s; ] { export *[const]:2 val; } EXT_CODE4B: val is mic_code4 [ ext_off16_s = mic_code4; val = ext_off16_s; ] { export *[const]:2 val; } EXT_CODE4C: val is mic_code4 [ val = mic_code4 << 1; ] { export *[const]:2 val; } EXT_CODE4D: val is mic_code4s [ val = mic_code4s << 2; ] { export *[const]:2 val; } EXT_CODE4E: val is mic_code4 & REL6=0 [ val = mic_code4 << 2; ] { export *[const]:2 val; } EXT_CODE4E: val is mic_off4r6 & REL6=1 [ val = mic_off4r6 << 2; ] { export *[const]:2 val; } EXT_CODE5R6: val is mic_imm5r6 [ val = mic_imm5r6 << 2; ] { export *[const]:2 val; } EXT_CODE5: val is mic_imm5 [ val = mic_imm5 << 2; ] { export *[const]:2 val; } EXT_CODE7: val is mic_imm7=0x7f [ ext_off16_s = 0xffff; val = ext_off16_s; ] { export *[const]:2 val; } EXT_CODE7: val is mic_imm7 [ ext_off16_s = mic_imm7; val = ext_off16_s; ] { export *[const]:2 val; } EXT_CODE7A: val is mic_soff7 [ val = mic_soff7 << 2; ] { export *[const]:2 val; } EXT_CODE9: val is mic_imm9=0x000 [ext_off16_s = 0x0100; val = ext_off16_s << 2; ] { export *[const]:2 val; } EXT_CODE9: val is mic_imm9=0x001 [ext_off16_s = 0x0101; val = ext_off16_s << 2; ] { export *[const]:2 val; } EXT_CODE9: val is mic_imm9=0x1fe [ext_off16_s = 0xfefe; val = ext_off16_s << 2; ] { export *[const]:2 val; } EXT_CODE9: val is mic_imm9=0x1ff [ext_off16_s = 0xfeff; val = ext_off16_s << 2; ] { export *[const]:2 val; } EXT_CODE9: val is mic_imm9s [ val = mic_imm9s << 2; ] { export *[const]:2 val; } EXT_CODE9E: val is micb_offset9s [ val = micb_offset9s << 0; ] { export *[const]:2 val; } EXT_CODE12: val is micb_offset12s [ val = micb_offset12s << 0; ] { export *[const]:2 val; } EXT_CODE16: val is micb_code10 [ val = (ext_32_imm6 << 10) | micb_code10; ] { export *[const]:2 val; } EXT_MS16: val is micb_simm16 [ val = micb_simm16 << 0; ] { export *[const]:2 val; } EXT_MS18: val is micb_imm16 [ val = (ext_32_imm2s << 19) | (micb_imm16 << 3); ] { export *[const]:4 val; } EXT_MS19: val is micb_imm16 [ val = (ext_32_imm3s << 18) | (micb_imm16 << 2); ] { export *[const]:4 val; } EXT_MS32: val is micb_simm16 [ val = micb_simm16 << 16; ] { export *[const]:4 val; } EXT_MS48: val is micb_simm16 [ val = micb_simm16 << 32; ] { export *[const]:8 val; } EXT_MS64: val is micb_simm16 [ val = micb_simm16 << 48; ] { export *[const]:8 val; } EXT_MSPC: val is ext_32_imm3s & micb_imm16 & REL6=1 [val = (ext_32_imm3s << 18) | (micb_imm16 << 2); ] { export *[const]:4 val; } EXT_MSPC: val is ext_32_addims & micb_imm16 & REL6=0 [val = (ext_32_addims << 18) | (micb_imm16 << 2); ] { export *[const]:4 val; } EXT_MU23: val is ext_32_code & micb_cofun [val = (ext_32_code << 13) | micb_cofun; ] { export *[const]:4 val; } EXT_MU6: val is mic_imm6 [val = mic_imm6 << 2; ] { export *[const]:1 val; } EXT_SA: val is mic_sa=0 [ val = 8; ] { export *[const]:1 val; } EXT_SA: val is mic_sa [ val = mic_sa << 0; ] {export *[const]:1 val; } EXT_SA9: val is micb_sa9 [ val = micb_sa9+1; ] { export *[const]:1 val; } DIDISP: is mic_rs32_0=0 {} DIDISP: mic_rs32_0 is mic_rs32_0 {} RTIMP: is mic_rt32_5=31 {} RTIMP: mic_rt32_5", " is mic_rt32_5 {} SIZEP: val is micb_size [ val = micb_size+1; ] { export *[const]:1 val; } SIZEPLG: val is micb_size [ val = micb_size+1+32; ] { export *[const]:1 val; } SIZEQ: val is micb_size & micb_pos [ val = micb_size + 1 - micb_pos; ] { export *[const]:1 val; } SIZEQLG: val is micb_size & micb_pos [ val = micb_size + 1 - micb_pos + 32; ] { export *[const]:1 val; } POSHI: val is micb_pos [ val = micb_pos+32; ] { export *[const]:1 val; } CPSEL: is micb_sel=0 {} CPSEL: ", "micb_sel is micb_sel {} STYPE: is mic_stype=0 {} STYPE: val is mic_stype [ val = mic_stype << 0; ] {} SDB16: mic_code4 is mic_code4 & REL6=0 { export *[const]:1 mic_code4; } SDB16: mic_code4r6 is mic_code4r6 & REL6=1 { export *[const]:1 mic_code4r6; } SA32: val is micb_sa [ val = micb_sa+32; ] { export *[const]:1 val; } COP2CC: is mic_cc=0 {} COP2CC: val", " is mic_cc [ val = mic_cc << 0; ] {} LOAD_S8: is ext_t4=8 {} LOAD_S8: is ext_t4 { MemSrcCast(s8,tsp); tsp = tsp+$(REGSIZE); } LOAD_S7: is ext_t4=7 {} LOAD_S7: is LOAD_S8 { MemSrcCast(s7,tsp); tsp = tsp+$(REGSIZE); build LOAD_S8; } LOAD_S6: is ext_t4=6 {} LOAD_S6: is LOAD_S7 { MemSrcCast(s6,tsp); tsp = tsp+$(REGSIZE); build LOAD_S7; } LOAD_S5: is ext_t4=5 {} LOAD_S5: is LOAD_S6 {MemSrcCast(s5,tsp); tsp = tsp+$(REGSIZE); build LOAD_S6; } LOAD_S4: is ext_t4=4 {} LOAD_S4: is LOAD_S5 { MemSrcCast(s4,tsp); tsp = tsp+$(REGSIZE); build LOAD_S5; } LOAD_S3: is ext_t4=3 {} LOAD_S3: is LOAD_S4 { MemSrcCast(s3,tsp); tsp = tsp+$(REGSIZE); build LOAD_S4; } LOAD_S2: is ext_t4=2 {} LOAD_S2: is LOAD_S3 { MemSrcCast(s2,tsp); tsp = tsp+$(REGSIZE); build LOAD_S3; } LOAD_S1: is ext_t4=1 {} LOAD_S1: is LOAD_S2 { MemSrcCast(s1,tsp); tsp = tsp+$(REGSIZE); build LOAD_S2; } LOAD_S0: is ext_t4=0 {} LOAD_S0: is LOAD_S1 { MemSrcCast(s0,tsp); tsp = tsp+$(REGSIZE); build LOAD_S1; } LOAD_SREG: is ext_t4=0 {} LOAD_SREG: ext_t4_name"," is LOAD_S0 & ext_t4_name { build LOAD_S0; } LOAD_RA: is ext_tra=0 {} LOAD_RA: "ra," is ext_tra=1 { MemSrcCast(ra,tsp); tsp = tsp+$(REGSIZE); } LOAD_TOP: LOAD_SREG^LOAD_RA EXT_CODE12(ext_32_base) is LOAD_SREG & LOAD_RA & ext_32_base & ext_32_rlist & EXT_CODE12 [ext_t4 = ext_32_rlist $and 0xf; ext_tra = ext_32_rlist >> 4; ] { build EXT_CODE12; tmp:$(REGSIZE) = sext(EXT_CODE12); tsp = ext_32_base + tmp; build LOAD_SREG; build LOAD_RA; } LOAD_TOP16: LOAD_SREG^ra,EXT_CODE4E(sp) is mic_list & REL6=0 & LOAD_SREG & EXT_CODE4E & ra & sp [ext_t4 = mic_list+1;] { build EXT_CODE4E; tmp:$(REGSIZE) = zext(EXT_CODE4E); tsp = sp + tmp; build LOAD_SREG; MemSrcCast(ra,tsp); } LOAD_TOP16: LOAD_SREG^ra,EXT_CODE4E(sp) is mic_listr6 & REL6=1 & LOAD_SREG & EXT_CODE4E & ra & sp [ext_t4 = mic_listr6+1;] { build EXT_CODE4E; tmp:$(REGSIZE) = zext(EXT_CODE4E); tsp = sp + tmp; build LOAD_SREG; MemSrcCast(ra,tsp); } STORE_S8: is ext_t4=8 {} STORE_S8: is ext_t4 { MemDestCast(tsp,s8); tsp = tsp+$(REGSIZE); } STORE_S7: is ext_t4=7 {} STORE_S7: is STORE_S8 { MemDestCast(tsp,s7); tsp = tsp+$(REGSIZE); build STORE_S8; } STORE_S6: is ext_t4=6 {} STORE_S6: is STORE_S7 { MemDestCast(tsp,s6); tsp = tsp+$(REGSIZE); build STORE_S7; } STORE_S5: is ext_t4=5 {} STORE_S5: is STORE_S6 { MemDestCast(tsp,s5); tsp = tsp+$(REGSIZE); build STORE_S6; } STORE_S4: is ext_t4=4 {} STORE_S4: is STORE_S5 { MemDestCast(tsp,s4); tsp = tsp+$(REGSIZE); build STORE_S5; } STORE_S3: is ext_t4=3 {} STORE_S3: is STORE_S4 { MemDestCast(tsp,s3); tsp = tsp+$(REGSIZE); build STORE_S4; } STORE_S2: is ext_t4=2 {} STORE_S2: is STORE_S3 { MemDestCast(tsp,s2); tsp = tsp+$(REGSIZE); build STORE_S3; } STORE_S1: is ext_t4=1 {} STORE_S1: is STORE_S2 { MemDestCast(tsp,s1); tsp = tsp+$(REGSIZE); build STORE_S2; } STORE_S0: is ext_t4=0 {} STORE_S0: is STORE_S1 { MemDestCast(tsp,s0); tsp = tsp+$(REGSIZE); build STORE_S1; } STORE_SREG: is ext_t4=0 {} STORE_SREG: ext_t4_name"," is STORE_S0 & ext_t4_name { build STORE_S0; } STORE_RA: is ext_tra=0 {} STORE_RA: "ra," is ext_tra=1 { MemDestCast(tsp,ra); tsp = tsp+$(REGSIZE); } STORE_TOP: STORE_SREG^STORE_RA EXT_CODE12(ext_32_base) is STORE_SREG & STORE_RA & ext_32_base & ext_32_rlist & EXT_CODE12 [ext_t4 = ext_32_rlist $and 0xf; ext_tra = ext_32_rlist >> 4; ] { build EXT_CODE12; tmp:$(REGSIZE) = sext(EXT_CODE12); tsp = ext_32_base + tmp; build STORE_SREG; build STORE_RA; } STORE_TOP16: STORE_SREG^ra,EXT_CODE4E(sp) is mic_list & REL6=0 & STORE_SREG & EXT_CODE4E & ra & sp [ext_t4 = mic_list+1;] { build EXT_CODE4E; tmp:$(REGSIZE) = zext(EXT_CODE4E); tsp = sp + tmp; build STORE_SREG; MemDestCast(tsp,ra); } STORE_TOP16: STORE_SREG^ra,EXT_CODE4E(sp) is mic_listr6 & REL6=1 & STORE_SREG & EXT_CODE4E & ra & sp [ext_t4 = mic_listr6+1;] { build EXT_CODE4E; tmp:$(REGSIZE) = zext(EXT_CODE4E); tsp = sp + tmp; build STORE_SREG; MemDestCast(tsp,ra); } #### # # Common semantics # #### :abs.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_bit15=0 & micb_fmt=0 & micb_poolfx=0b111011 & micb_flt6=0b0001101 { fs_tmp:4 = mic_fs:4; fd_tmp:4 = abs(fs_tmp); mic_ft_5 = zext(fd_tmp); } :abs.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5; micb_bit15=0 & micb_fmt=1 & micb_poolfx=0b111011 & micb_flt6=0b0001101 { mic_ftD_5 = abs(mic_fsD); } :add micb_rd32, RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & RT5L ; micb_axf2=0b0100010000 & micb_bit10=0 & micb_rd32 { tmps:8 = zext(RS0L); tmpt:8 = zext(RT5L); tmps = tmps + tmpt; tmpt = tmps >> 32; if (tmpt != 0) goto ; micb_rd32 = sext(tmps:4); } :add.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_fd & micb_bit10=0 & micb_fmt8=0 & micb_fxf3=0b00110000 { fd_tmp:4 = mic_fs:4 f+ mic_ft_5:4; micb_fd = zext(fd_tmp); } :add.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_bit10=0 & micb_fmt8=1 & micb_fxf3=0b00110000 { micb_fdD = mic_fsD f+ mic_ftD_5; } :addiu mic_rt32_5, RS0L, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b001100 & RS0L & mic_rt32_5 ; EXT_MS16 { tmp:4 = sext(EXT_MS16); tmp = tmp + RS0L; mic_rt32_5 = sext(tmp); } :addiupc RST7R5, EXT_MSPC is ISA_MODE=1 & RELP=0 & mic_op=0b011110 & RST7R5 & mic_imm7 & (REL6=0 | (REL6=1 & mic_pcz=0)); EXT_MSPC [ ext_32_addim=mic_imm7; ] { tmpa:$(REGSIZE) = inst_start & ~3; tmpb:$(REGSIZE) = sext(EXT_MSPC); RST7R5 = tmpa + tmpb; } :addiur1sp mic_rd7, EXT_MU6 is ISA_MODE=1 & RELP=0 & mic_op=0b011011 & mic_bit0=1 & mic_rd7 & EXT_MU6 { @if REGSIZE == "4" tmp:4 = sp + zext(EXT_MU6); @else tmp:4 = sp_lo + zext(EXT_MU6); @endif mic_rd7 = sext(tmp); } :addiur2 mic_rd7, RS4L, EXT_CODE3 is ISA_MODE=1 & RELP=0 & mic_op=0b011011 & mic_bit0=0 & RS4L & mic_rd7 & EXT_CODE3 { tmp:4 = RS4L + sext(EXT_CODE3); mic_rd7 = sext(tmp); } :addiusp EXT_CODE9 is ISA_MODE=1 & RELP=0 & mic_op=0b010011 & mic_bit0=1 & EXT_CODE9 { @if REGSIZE == "4" tmp:4 = sp + sext(EXT_CODE9); @else tmp:4 = sp_lo + sext(EXT_CODE9); @endif sp = sext(tmp); } :addius5 RD5L, mic_imm4s is ISA_MODE=1 & RELP=0 & mic_op=0b010011 & mic_bit0=0 & mic_rd32_5 & RD5L & mic_imm4s { tmp:4 = mic_imm4s; tmp = tmp + RD5L; mic_rd32_5 = sext(tmp); } :addu micb_rd32, RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & RT5L ; micb_axf2=0b0101010000 & micb_bit10=0 & micb_rd32 { tmp:4 = RS0L + RT5L; micb_rd32 = sext(tmp); } :addu16 RD7R1, RT4L, RS1R7L is ISA_MODE=1 & RELP=0 & mic_op=0b000001 & mic_bit0=0 & RD7R1 & RT4L & RS1R7L { tmp:4 = RT4L + RS1R7L; RD7R1 = sext(tmp); } :and micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b1001010000 & micb_bit10=0 & micb_rd32 { micb_rd32 = mic_rs32_0 & mic_rt32_5; } :and16 RT3R7, RS0R4 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & RT3R7 & RS0R4 & ((mic_csub=0b0010 & REL6=0) | (mic_csubr6=0b0001 & REL6=1)) { RT3R7 = RT3R7 & RS0R4; } :andi mic_rt32_5, mic_rs32_0, micb_imm16 is ISA_MODE=1 & RELP=0 & mic_op=0b110100 & mic_rs32_0 & mic_rt32_5 ; micb_imm16 { tmp:$(REGSIZE) = micb_imm16; mic_rt32_5 = mic_rs32_0 & tmp; } :andi16 mic_rd7, mic_rs4, EXT_CODE4A is ISA_MODE=1 & RELP=0 & mic_op=0b001011 & mic_rd7 & mic_rs4 & EXT_CODE4A { mic_rd7 = mic_rs4 & zext(EXT_CODE4A); } :break is ISA_MODE=1 & RELP=0 & mic_op=0b000000 ; micb_poolax=0b000111 { break(); } :break16 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & SDB16 & ((mic_break=0b101000 & REL6=0) | (mic_breakr6=0b011011 & REL6=1)) { break(); } :cachee mic_cop5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_cop5 & mic_base0 ; micb_func12=0b1010 & micb_sub9=0b011 & EXT_CODE9E { cacheOp(); } :ceil.l.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_ftD_5 ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b01001100 { fd_tmp:4 = ceil(mic_fs:4); mic_ftD_5 = trunc(fd_tmp); } :ceil.l.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b01001100 { fsD_tmp:8 = ceil(mic_fsD); mic_ftD_5 = trunc(fsD_tmp); } :ceil.w.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b01101100 { fs_ceil_tmp:4 = ceil(mic_fs:4); fd_tmp:4 = trunc(fs_ceil_tmp); mic_ft_5 = zext(fd_tmp); } :ceil.w.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b01101100 { fs_tmp:8 = ceil(mic_fsD); fd_tmp:4 = trunc(fs_tmp); mic_ft_5 = zext(fd_tmp); } :cfc1 mic_rt32_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_rt32_5 & mic_fs ; micb_poolfx=0b111011 & micb_fxf=0b0001000000 { mic_rt32_5 = getCopControlWord( 1:1, mic_fs:4 ); } :cfc2 mic_rt32_5, mic_impl is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_impl ; micb_poolfx=0b111100 & micb_axf=0b1100110100 { tmpa:1 = 2; tmpb:1 = mic_impl; mic_rt32_5 = getCopControlWord(tmpa,tmpb); } :clo mic_rt32_5, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & RS0L ; micb_poolax=0b111100 & micb_axf=0b0100101100 { mic_rt32_5 = lzcount( ~RS0L ); } :clz mic_rt32_5, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & RS0L ; micb_poolax=0b111100 & micb_axf=0b0101101100 { mic_rt32_5 = lzcount( RS0L ); } :cop2 EXT_MU23 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_code ; micb_cop=0b010 & EXT_MU23 [ ext_32_code=mic_code; ] { tmp:1 = 2; copFunction(tmp,EXT_MU23); } :ctc1 RT5L, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & RT5L & mic_fs ; micb_poolfx=0b111011 & micb_fxf=0b0001100000 { setCopControlWord( 1:1, mic_fs:4, RT5L ); } :ctc2 mic_rt32_5, mic_impl is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_impl ; micb_poolax=0b111100 & micb_axf=0b1101110100 { tmpa:1 = 2; tmpb:1 = mic_impl; setCopControlWord(tmpa,tmpb,mic_rt32_5); } :cvt.d.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_ftD_5 ; micb_bit15=0 & micb_fmt=0 & micb_poolfx=0b111011 & micb_flt6=0b1001101 { fs_tmp:4 = mic_fs:4; mic_ftD_5 = float2float(fs_tmp); } :cvt.d.W mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_ftD_5 ; micb_bit15=0 & micb_fmt=1 & micb_poolfx=0b111011 & micb_flt6=0b1001101 { fs_tmp:4 = mic_fs:4; mic_ftD_5 = int2float(fs_tmp); } :cvt.d.L mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_bit15=0 & micb_fmt=2 & micb_poolfx=0b111011 & micb_flt6=0b1001101 { mic_ftD_5 = int2float(mic_fsD); } :cvt.l.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_ftD_5 ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b00000100 { rm_tmp:1 = fcsr[0,2]; fs_tmp:4 = mic_fs:4; fs_cvt_tmp:4 = 0; if (rm_tmp == 0) goto ; fs_cvt_tmp = floor(fs_tmp); # RM is 1, no rounding, and floor returns a float goto ; fs_cvt_tmp = round(fs_tmp); # round returns a float mic_ftD_5 = trunc(fs_cvt_tmp); } :cvt.l.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b00000100 { rm_tmp:1 = fcsr[0,2]; # Get RM rounding mode bits if (rm_tmp == 0) goto ; fd_tmp:8 = floor(mic_fsD); # RM is 1, no rounding goto ; fd_tmp = round(mic_fsD); mic_ftD_5 = trunc(fd_tmp); } :cvt.s.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD ; micb_bit15=0 & micb_fmt=0 & micb_poolfx=0b111011 & micb_flt6=0b1101101 { fd_tmp:4 = float2float(mic_fsD); mic_ft_5 = zext(fd_tmp); } :cvt.s.W mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_bit15=0 & micb_fmt=1 & micb_poolfx=0b111011 & micb_flt6=0b1101101 { fs_tmp:4 = mic_fs:4; fd_tmp:4 = int2float(fs_tmp); mic_ft_5 = zext(fd_tmp); } :cvt.s.L mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD ; micb_bit15=0 & micb_fmt=2 & micb_poolfx=0b111011 & micb_flt6=0b1101101 { fd_tmp:4 = int2float(mic_fsD); mic_ft_5 = zext(fd_tmp); } :cvt.w.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b00100100 { rm_tmp:1 = fcsr[0,2]; # Get RM rounding mode bits fs_tmp:4 = mic_fs:4; fs_cvt_tmp:4 = 0; if (rm_tmp == 0) goto ; fs_cvt_tmp = floor(fs_tmp); # RM is 1, no rounding, and floor returns a float goto ; fs_cvt_tmp = round(fs_tmp); # round returns a float fd_tmp:4 = trunc(fs_cvt_tmp); mic_ft_5 = zext(fd_tmp); # trunc returns an integer } :cvt.w.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD ; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b00100100 { rm_tmp:1 = fcsr[0,2]; # Get RM rounding mode bits if (rm_tmp == 0) goto ; fs_tmp:8 = floor(mic_fsD); # RM is 1, no rounding goto ; fs_tmp = round(mic_fsD); fd_tmp:4 = trunc(fs_tmp); mic_ft_5 = zext(fd_tmp); # In 64-bit FPUs, fd might be 64-bits, so need to set top half to something } :deret is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_code=0 ; micb_poolax=0b111100 & micb_axf=0b1110001101 {} :di DIDISP is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5=0 & DIDISP ; micb_poolax=0b111100 & micb_axf=0b0100011101 { disableInterrupts(DIDISP); } :div.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_fd & micb_bit10=0 & micb_fmt8=0 & micb_fxf3=0b11110000 { fs_tmp:4 = mic_fs:4; # need to only get the single float 32-bit (mic_fs might be 64-bits) ft_tmp:4 = mic_ft_5:4; fd_tmp:4 = fs_tmp f/ ft_tmp; micb_fd = zext(fd_tmp); } :div.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_fdD & micb_fd & micb_bit10=0 & micb_fmt8=1 & micb_fxf3=0b11110000 { micb_fdD = mic_fsD f/ mic_ftD_5; } :ehb is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_code=0b0000000000 ; micb_poolax=0 & micb_rx=0 & micb_rd32=3 { hazzard(); } :ei DIDISP is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5=0 & DIDISP ; micb_poolax=0b111100 & micb_axf=0b0101011101 { enableInterrupts(DIDISP); } :eret is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_code=0 ; micb_poolax=0b111100 & micb_axf=0b1111001101 {} :eretnc is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_code=1 ; micb_poolax=0b111100 & micb_axf=0b1111001101 {} :ext mic_rt32_5, RS0L, micb_pos, SIZEP is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & RS0L ; micb_poolax=0b101100 & micb_pos & SIZEP { tmpa:4 = 0xFFFFFFFF; tmpa = tmpa >> (32 - SIZEP); tmpb:4 = RS0L; tmpb = (tmpb >> micb_pos) & tmpa; mic_rt32_5 = sext(tmpb); } :floor.l.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_ftD_5 ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b00001100 { fs_tmp:4 = mic_fs:4; fd_tmp:4 = floor(fs_tmp); # returns a float mic_ftD_5 = trunc(fd_tmp); # converts float to int } :floor.l.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b00001100 { fsD_tmp:8 = floor(mic_fsD); mic_ftD_5 = trunc(fsD_tmp); } :floor.w.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b00101100 { fs_tmp:4 = mic_fs:4; fd_tmp:4 = floor(fs_tmp); # returns a float mic_ft_5 = trunc(fd_tmp); # converts float to int } :floor.w.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD ; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b00101100 { fs_tmp:8 = floor(mic_fsD); fd_tmp:4 = trunc(fs_tmp); mic_ft_5 = zext(fd_tmp); } :ins RT5L, RS0L, micb_pos, SIZEQ is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & RS0L & RT5L ; micb_poolax=0b001100 & micb_pos & SIZEQ { tmpa:4 = 0xFFFFFFFF; tmpa = tmpa >> (32 - SIZEQ); tmpb:4 = RS0L & tmpa; tmpa = tmpa << micb_pos; tmpa = ~tmpa; tmpb = tmpb << micb_pos; RT5L = (RT5L & tmpa) | tmpb; mic_rt32_5 = sext(RT5L); } :lb mic_rt32_5, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b000111 & mic_rt32_5 & mic_base0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext(*[ram]:1 tmpa); } :lbe mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_rt32_5 & mic_base0 ; micb_func12=0b0110 & micb_sub9=0b100 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext(*[ram]:1 tmpa); } :lbu mic_rt32_5, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b000101 & mic_rt32_5 & mic_base0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = zext(*[ram]:1 tmpa); } :lbue mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_rt32_5 & mic_base0 ; micb_func12=0b0110 & micb_sub9=0b000 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = zext(*[ram]:1 tmpa); } :lbu16 mic_rt7, EXT_CODE4B(mic_base4) is ISA_MODE=1 & RELP=0 & mic_op=0b000010 & mic_rt7 & mic_base4 & EXT_CODE4B { tmp:$(REGSIZE) = sext(EXT_CODE4B); tmp = tmp + mic_base4; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt7 = zext(*[ram]:1 tmpa); } :ldc1 mic_ft_5, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b101111 & mic_base0 & mic_ft_5 & mic_ftD_5; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_ftD_5 = *[ram]:8 tmpa; } :ldc2 mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b001000 & mic_rt32_5 & mic_base0 ; micb_func12=0b0010 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); setCopReg(2:1,mic_rt32_5,*[ram]:8 tmpa); } :lh mic_rt32_5, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b001111 & mic_rt32_5 & mic_base0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext(*[ram]:2 tmpa); } :lhe mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_rt32_5 & mic_base0 ; micb_func12=0b0110 & micb_sub9=0b101 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext(*[ram]:2 tmpa); } :lhu mic_rt32_5, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b001101 & mic_rt32_5 & mic_base0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = zext(*[ram]:2 tmpa); } :lhue mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_rt32_5 & mic_base0 ; micb_func12=0b0110 & micb_sub9=0b001 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = zext(*[ram]:2 tmpa); } :lhu16 mic_rt7, EXT_CODE4C(mic_base4) is ISA_MODE=1 & RELP=0 & mic_op=0b001010 & mic_rt7 & mic_base4 & EXT_CODE4C { tmp:$(REGSIZE) = sext(EXT_CODE4C); tmp = tmp + mic_base4; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt7 = zext(*[ram]:2 tmpa); } :li16 mic_rd7, EXT_CODE7 is ISA_MODE=1 & RELP=0 & mic_op=0b111011 & mic_rd7 & EXT_CODE7 { mic_rd7 = sext(EXT_CODE7); } :ll mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_rt32_5 & mic_base0 ; micb_func12=0b0011 & micb_sub9=0 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext(*[ram]:4 tmpa); lockload(tmp); } :lle mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_rt32_5 & mic_base0 ; micb_func12=0b0110 & micb_sub9=0b110 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext(*[ram]:4 tmpa); lockload(tmp); } :lui RS0RT5, micb_imm16 is ISA_MODE=1 & RELP=0 & RS0RT5 & ((mic_op=0b010000 & mic_sub2=0b01101 & REL6=0) | (mic_op=0b000100 & mic_rs32_0=0 & REL6=1)); micb_imm16 { tmp:4 = micb_imm16; tmp = tmp << 16; RS0RT5 = sext(tmp); } :lw mic_rt32_5, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b111111 & mic_rt32_5 & mic_base0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext(*[ram]:4 tmpa); } :lwe mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_rt32_5 & mic_base0 ; micb_func12=0b0110 & micb_sub9=0b111 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext(*[ram]:4 tmpa); } :lw16 mic_rt7, EXT_CODE4D(mic_base4) is ISA_MODE=1 & RELP=0 & mic_op=0b011010 & mic_rt7 & mic_base4 & EXT_CODE4D { tmp:$(REGSIZE) = sext(EXT_CODE4D); tmp = tmp + mic_base4; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt7 = sext(*[ram]:4 tmpa); } :lwm16 LOAD_TOP16 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & LOAD_TOP16 & ((mic_csub=0b0100 & REL6=0) | (mic_csubr6=0b0010 & REL6=1)) { build LOAD_TOP16; } :lwm32 LOAD_TOP is ISA_MODE=1 & RELP=0 & mic_op=0b001000 & mic_base0 & mic_rlist ; micb_func12=0b0101 & LOAD_TOP [ ext_32_basea=mic_base0; ext_32_rlist=mic_rlist; ] { build LOAD_TOP; } :lwc1 mic_ft_5, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b100111 & mic_base0 & mic_ft_5 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_ft_5 = sext( *[ram]:4 tmpa); } :lwc2 mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b001000 & mic_rt32_5 & mic_base0 ; micb_func12=0b0000 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); setCopReg(2:1,mic_rt32_5,*[ram]:4 tmpa); } :lwp mic_rd32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b001000 & mic_rd32_5 & mic_base0 & ext_32_rd ; micb_func12=0b0001 & EXT_CODE12 [ext_32_rdset = mic_rd32_5+1;] { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rd32_5 = sext( *[ram]:4 tmpa); tmp = tmp + 4; ValCast(tmpa,tmp); ext_32_rd = sext( *[ram]:4 tmpa); } :lwgp mic_rt7, EXT_CODE7A(gp) is ISA_MODE=1 & RELP=0 & mic_op=0b011001 & mic_rt7 & gp & EXT_CODE7A { tmp:$(REGSIZE) = sext(EXT_CODE7A); tmp = tmp + gp; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt7 = sext( *[ram]:4 tmpa); } :lwsp mic_rt32_5, EXT_CODE5(sp) is ISA_MODE=1 & RELP=0 & mic_op=0b010010 & mic_rt32_5 & sp & EXT_CODE5 { tmp:$(REGSIZE) = zext(EXT_CODE5); tmp = tmp + sp; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext( *[ram]:4 tmpa); } :lwxs micb_rd32, mic_index(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_index & mic_base0 ; micb_bit10=0 & micb_rd32 & ((micb_axf2=0b0100011000 & REL6=0) | (micb_axf2=0b0100000000 & REL6=1)) { tmp:$(REGSIZE) = mic_base0 + (mic_index << 2); tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); micb_rd32 = sext( *[ram]:4 tmpa); } :mfc0 mic_rt32_5, mic_rs32_0, CPSEL is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0 & mic_rt32_5 ; micb_z14=0 & micb_cpf=0b00011 & micb_poolax=0b111100 & CPSEL { mic_rt32_5 = getCopReg(0:1,mic_rs32_0,CPSEL); } :mfc1 mic_rt32_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_rt32_5 & mic_fs ; micb_poolfx=0b111011 & micb_fxf=0b0010000000 { mic_rt32_5 = sext(mic_fs:4); } :mfc2 mic_rt32_5, mic_impl is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_impl ; micb_poolax=0b111100 & micb_axf=0b0100110100 { mic_rt32_5 = getCopReg(2:1,mic_impl:1); } :mfhc0 mic_rt32_5, mic_rs32_0, CPSEL is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0 & mic_rt32_5 ; micb_z14=0 & micb_cpf=0b00011 & micb_poolax=0b110100 & CPSEL { mic_rt32_5 = getCopRegH(0:1,mic_rs32_0,CPSEL); } :mfhc1 mic_rt32_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_rt32_5 & mic_fs & mic_fsD ; micb_poolfx=0b111011 & micb_fxf=0b0011000000 { tmp:4 = mic_fsD[32,32]; mic_rt32_5 = sext(tmp); } :mfhc2 mic_rt32_5, mic_impl is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_impl ; micb_poolax=0b111100 & micb_axf=0b1000110100 { mic_rt32_5 = getCopRegH(2:1,mic_impl:1); } :mov.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_bit15=0 & micb_fmt=0 & micb_poolfx=0b111011 & micb_flt6=0b0000001 { mic_ft_5 = zext(mic_fs:4); } :mov.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_bit15=0 & micb_fmt=1 & micb_poolfx=0b111011 & micb_flt6=0b0000001 { mic_ftD_5 = mic_fsD; } :move16 mic_rd32_5, mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000011 & mic_rd32_5 & mic_rs32_0 { mic_rd32_5 = mic_rs32_0; } # The docs are not clear if this format is pre and/or post R6. :movep mic_encrd, mic_encre, ENCRS, mic_encrt is ISA_MODE=1 & RELP=0 & mic_op=0b100001 & mic_bit0=0 & mic_encrd & mic_encre & ENCRS & mic_encrt & mic_bit3 & mic_bit01 [ext_16_rshi=mic_bit3; ext_16_rslo=mic_bit01;] { mic_encrd = ENCRS; mic_encre = mic_encrt; } :mtc0 mic_rt32_5, mic_rs32_0, CPSEL is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0 & mic_rt32_5 ; micb_z14=0 & micb_cpf=0b01011 & micb_poolax=0b111100 & CPSEL { setCopReg(0:1,mic_rs32_0,mic_rt32_5,CPSEL); } :mtc1 RT5L, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & RT5L & mic_fs ; micb_poolfx=0b111011 & micb_fxf=0b0010100000 { mic_fs[0,32] = RT5L; } :mtc2 mic_rt32_5, mic_impl is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_impl ; micb_poolax=0b111100 & micb_axf=0b0101110100 { setCopReg(2:1,mic_rt32_5,mic_impl:1); } :mthc0 mic_rt32_5, mic_rs32_0, CPSEL is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0 & mic_rt32_5 ; micb_z14=0 & micb_cpf=0b01011 & micb_poolax=0b110100 & CPSEL { setCopRegH(0:1,mic_rs32_0,mic_rt32_5,CPSEL); } :mthc1 RT5L, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & RT5L & mic_fs & mic_fsD ; micb_poolfx=0b111011 & micb_fxf=0b0011100000 { mic_fsD[32,32] = RT5L; } :mthc2 mic_rt32_5, mic_impl is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_impl ; micb_poolax=0b111100 & micb_axf=0b1001110100 { setCopRegH(2:1,mic_rt32_5,mic_impl:1); } :mul micb_rd32, RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & RT5L ; micb_rd32 & micb_bit10=0 & ((micb_axf2=0b1000010000 & REL6=0) | (micb_axf2=0b0000011000 & REL6=1)) { tmp:4 = RS0L * RT5L; micb_rd32 = sext(tmp); } :mul.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_fd & micb_bit10=0 & micb_fmt8=0 & micb_fxf3=0b10110000 { fd_tmp:4 = mic_fs:4 f* mic_ft_5:4; micb_fd = zext(fd_tmp); } :mul.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5; micb_fd & micb_fdD & micb_bit10=0 & micb_fmt8=1 & micb_fxf3=0b10110000 { micb_fdD = mic_fsD f* mic_ftD_5; } :neg.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_bit15=0 & micb_fmt=0 & micb_poolfx=0b111011 & micb_flt6=0b0101101 { fs_tmp:4 = mic_fs:4; fd_tmp:4 = f- fs_tmp; mic_ft_5 = zext(fd_tmp); } :neg.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5; micb_bit15=0 & micb_fmt=1 & micb_poolfx=0b111011 & micb_flt6=0b0101101 { mic_ftD_5 = f- mic_fsD; } # This is a special case of sll :nop is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0=0 & mic_rt32_5=0 ; micb_sa=0 & micb_axf2=0b0000000000 & micb_bit10=0 { } :nor micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b1011010000 & micb_bit10=0 & micb_rd32 { micb_rd32 = ~(mic_rs32_0 | mic_rt32_5); } :not16 RT3R7, RS0R4 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & RT3R7 & RS0R4 & ((mic_csub=0b0000 & REL6=0) | (mic_csubr6=0b0000 & REL6=1)) { RT3R7 = ~RS0R4; } :or micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b1010010000 & micb_bit10=0 & micb_rd32 { micb_rd32 = mic_rs32_0 | mic_rt32_5; } :ori mic_rt32_5, mic_rs32_0, micb_imm16 is ISA_MODE=1 & RELP=0 & mic_op=0b010100 & mic_rs32_0 & mic_rt32_5 ; micb_imm16 { tmp:$(REGSIZE) = micb_imm16; mic_rt32_5 = mic_rs32_0 | tmp; } :or16 RT3R7, RS0R4 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & RT3R7 & RS0R4 & ((mic_csub=0b0011 & REL6=0) | (mic_csubr6=0b1001 & REL6=1)) { RT3R7 = RT3R7 | RS0R4; } :pause is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0=0 & mic_rt32_5=0 ; micb_sa=0b00101 & micb_axf2=0b0000000000 & micb_bit10=0 { wait(); } :pref mic_cop5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_cop5 & mic_base0 ; micb_func12=0b0100 & micb_sub9=0b000 & EXT_CODE9E { tmp:$(REGSIZE) = zext(EXT_CODE9E); tmp = tmp + mic_base0; prefetch(mic_cop5:1,tmp); } :prefe mic_cop5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_cop5 & mic_base0 ; micb_func12=0b1010 & micb_sub9=0b010 & EXT_CODE9E { tmp:$(REGSIZE) = zext(EXT_CODE9E); tmp = tmp + mic_base0; prefetch(mic_cop5:1,tmp); } :rdpgpr mic_rt32_5, mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b111100 & micb_axf=0b1110000101 { mic_rt32_5 = getShadow(mic_rs32_0); } :recip.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b01001000 { fd_tmp:4 = 1:4 f/ mic_fs:4; mic_ft_5 = zext(fd_tmp); } :recip.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_ftD_5 & mic_fsD ; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b01001000 { mic_ftD_5 = 1:8 f/ mic_fsD; } :rotr mic_rt32_5, RS0L, micb_sa is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & mic_rt32_5 ; micb_sa & micb_axf2=0b0011000000 & micb_bit10=0 { tmpa:4 = RS0L >> micb_sa; tmpb:4 = RS0L << (32 - micb_sa); tmpa = tmpa | tmpb; mic_rt32_5 = sext(tmpa); } :rotrv micb_rd32, RT5L, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & RT5L ; micb_rd32 & micb_axf2=0b0011010000 & micb_bit10=0 { tmpr:1 = RS0L[0,5]; tmpa:4 = RT5L >> tmpr; tmpb:4 = RT5L << (32 - tmpr); tmpa = tmpa | tmpb; micb_rd32 = sext(tmpa); } :round.l.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_ftD_5 ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b11001100 { fd_tmp:4 = round(mic_fs:4); # round returns a float of the same size are the arg mic_ftD_5 = trunc(fd_tmp); # trunc converts to any size integer } :round.l.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b11001100 { fsD_tmp:8 = round(mic_fsD); mic_ftD_5 = trunc(fsD_tmp); } :round.w.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_ftD_5 ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b11101100 { fd_tmp:4 = round(mic_fs:4); mic_ft_5 = trunc(fd_tmp); } :round.w.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD ; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b11101100 { fdD_tmp:8 = round(mic_fsD); # round returns a float, not an int fd_tmp:4 = trunc(fdD_tmp); # We need only a 32-bit integer mic_ft_5 = zext(fd_tmp); # But fill the top half with 0s if we have a 64-bit FPU } :rsqrt.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b00001000 { fd_tmp:4 = 1:4 f/ sqrt(mic_fs:4); mic_ft_5 = zext(fd_tmp); } :rsqrt.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b00001000 { mic_ftD_5 = 1:8 f/ sqrt( mic_fsD ); } :sb RT5L, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b000110 & RT5L & mic_base0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:1 tmpa = RT5L:1; } :sbe RT5L, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & RT5L & mic_base0 ; micb_func12=0b1010 & micb_sub9=0b100 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:1 tmpa = RT5L:1; } :sb16 mic_encrt2, mic_off4(mic_base4) is ISA_MODE=1 & RELP=0 & mic_op=0b100010 & mic_encrt2 & mic_base4 & mic_off4 { tmp:$(REGSIZE) = mic_off4; tmp = tmp + mic_base4; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:1 tmpa = mic_encrt2:1; } :sc RT5L, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & RT5L & mic_base0 ; micb_func12=0b1011 & micb_sub9=0b000 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; lockwrite(tmp); tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = RT5L; RT5L = 1; } :sce RT5L, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & RT5L & mic_base0 ; micb_func12=0b1010 & micb_sub9=0b110 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = RT5L; lockwrite(tmp); } :sdbbp mic_code is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_code ; micb_poolax=0b111100 & micb_axf=0b1101101101 { break(mic_code:2); } :sdbbp16 SDB16 is ISA_MODE=1 & RELP=0 & mic_op=0b100010 & SDB16 & ((mic_break=0b101100 & REL6=0) | (mic_breakr6=0b111011 & REL6=1)) { break(SDB16); } :sdc1 mic_ft_5, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b101110 & mic_base0 & mic_ft_5 & mic_ftD_5 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:8 tmpa = mic_ftD_5; } :sdc2 mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_rt32_5 & mic_base0 ; micb_func12=0b1010 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:8 tmpa = getCopReg(2:1,mic_rt32_5); } :seb mic_rt32_5, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & RS0L ; micb_poolax=0b111100 & micb_axf=0b0010101100 { mic_rt32_5 = sext(RS0L:1); } :seh mic_rt32_5, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & RS0L ; micb_poolax=0b111100 & micb_axf=0b0011101100 { mic_rt32_5 = sext(RS0L:2); } :sh RT5L, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b001110 & RT5L & mic_base0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:2 tmpa = RT5L:2; } :she RT5L, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & RT5L & mic_base0 ; micb_func12=0b1010 & micb_sub9=0b101 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:2 tmpa = RT5L:2; } :sh16 mic_encrt2, EXT_CODE4C(mic_base4) is ISA_MODE=1 & RELP=0 & mic_op=0b101010 & mic_encrt2 & mic_base4 & EXT_CODE4C { tmp:$(REGSIZE) = sext(EXT_CODE4C); tmp = tmp + mic_base4; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:2 tmpa = mic_encrt2:2; } :sll mic_rt32_5, RS0L, micb_sa is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & mic_rt32_5 ; micb_sa & micb_axf2=0b0000000000 & micb_bit10=0 { mic_rt32_5 = sext(RS0L << micb_sa); } :sllv micb_rd32, RT5L, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & RT5L ; micb_axf2=0b0000010000 & micb_bit10=0 & micb_rd32 { tmp:1 = RS0L[0,5]; micb_rd32 = sext(RT5L << tmp); } :sll16 mic_rd7, RT4L, EXT_SA is ISA_MODE=1 & RELP=0 & mic_op=0b001001 & mic_bit0=0 & mic_rd7 & RT4L & EXT_SA { mic_rd7 = sext(RT4L << EXT_SA); } :slt micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b1101010000 & micb_bit10=0 & micb_rd32 { tmp:1 = mic_rs32_0 s< mic_rt32_5; micb_rd32 = zext(tmp); } :slti mic_rt32_5, mic_rs32_0, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b100100 & mic_rs32_0 & mic_rt32_5 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmpa:1 = mic_rs32_0 s< tmp; mic_rt32_5 = zext(tmpa); } :sltiu mic_rt32_5, mic_rs32_0, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b101100 & mic_rs32_0 & mic_rt32_5 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmpa:1 = mic_rs32_0 < tmp; mic_rt32_5 = zext(tmpa); } :sltu micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b1110010000 & micb_bit10=0 & micb_rd32 { tmp:1 = mic_rs32_0 < mic_rt32_5; micb_rd32 = zext(tmp); } :sqrt.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b00101000 { fd_tmp:4 = sqrt(mic_fs:4); mic_ft_5 = zext(fd_tmp); } :sqrt.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b00101000 { mic_ftD_5 = sqrt(mic_fsD); } :sra mic_rt32_5, RS0L, micb_sa is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & mic_rt32_5 ; micb_sa & micb_axf2=0b0010000000 & micb_bit10=0 { mic_rt32_5 = sext(RS0L s>> micb_sa); } :srav micb_rd32, RT5L, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & RT5L ; micb_axf2=0b0010010000 & micb_bit10=0 & micb_rd32 { tmp:1 = RS0L[0,5]; micb_rd32 = sext(RT5L s>> tmp); } :srl mic_rt32_5, RS0L, micb_sa is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & mic_rt32_5 ; micb_sa & micb_axf2=0b0001000000 & micb_bit10=0 { mic_rt32_5 = sext(RS0L >> micb_sa); } :srlv micb_rd32, RT5L, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & RT5L ; micb_axf2=0b0001010000 & micb_bit10=0 & micb_rd32 { tmp:1 = RS0L[0,5]; micb_rd32 = sext(RT5L >> tmp); } :srl16 mic_rd7, RT4L, EXT_SA is ISA_MODE=1 & RELP=0 & mic_op=0b001001 & mic_bit0=1 & mic_rd7 & RT4L & EXT_SA { mic_rd7 = sext(RT4L >> EXT_SA); } :ssnop is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0=0 & mic_rt32_5=0 ; micb_sa=1 & micb_axf2=0b0000000000 & micb_bit10=0 {} :sub micb_rd32, RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & RT5L ; micb_axf2=0b0110010000 & micb_bit10=0 & micb_rd32 { # Do we want this check? #tmpa:8 = sext(RS0L); #tmpb:8 = sext(RT5L); #tmpa = tmpa - tmpb; #if (tmpa[31,1] != tmpa[32,1]) goto ; micb_rd32 = sext(RS0L - RT5L); # } :sub.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_fd & micb_bit10=0 & micb_fmt8=0 & micb_fxf3=0b01110000 { fd_tmp:4 = mic_fs:4 f- mic_ft_5:4; micb_fd = zext(fd_tmp); } :sub.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_fdD & micb_fd & micb_bit10=0 & micb_fmt8=1 & micb_fxf3=0b01110000 { micb_fdD = mic_fsD f- mic_ftD_5; } :subu micb_rd32, RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & RS0L & RT5L ; micb_axf2=0b0111010000 & micb_bit10=0 & micb_rd32 { micb_rd32 = sext(RS0L - RT5L); } :subu16 RD7R1, RS1R7L, RT4L is ISA_MODE=1 & RELP=0 & mic_op=0b000001 & mic_bit0=1 & RD7R1 & RT4L & RS1R7L { RD7R1 = sext(RS1R7L - RT4L); } :sw RT5L, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b111110 & RT5L & mic_base0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = RT5L:4; } :swe RT5L, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & RT5L & mic_base0 ; micb_func12=0b1010 & micb_sub9=0b111 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = RT5L:4; } :sw16 mic_encrt2, EXT_CODE4E(mic_base4) is ISA_MODE=1 & RELP=0 & mic_op=0b111010 & mic_encrt2 & mic_base4 & EXT_CODE4E { tmp:$(REGSIZE) = sext(EXT_CODE4E); tmp = tmp + mic_base4; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = mic_encrt2:4; } :swc1 mic_ft_5, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b100110 & mic_base0 & mic_ft_5 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = mic_ft_5:4; } :swc2 mic_rt32_5, micb_offset11s(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b001000 & mic_rt32_5 & mic_base0 ; micb_func12=0b1000 & micb_bit11=0 & micb_offset11s { tmp:$(REGSIZE) = micb_offset11s; tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = getCopReg(2:1,mic_rt32_5); } :swsp RT5L, EXT_CODE5(sp) is ISA_MODE=1 & RELP=0 & mic_op=0b110010 & RT5L & sp & EXT_CODE5 { tmp:$(REGSIZE) = zext(EXT_CODE5); tmp = tmp + sp; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = RT5L; } :swp RT5L, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b001000 & RT5L & mic_base0 & RSEXTL & mic_rs32_5; micb_func12=0b1001 & EXT_CODE12 [ext_32_rs1set = mic_rs32_5+1;] { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = RT5L; tmp = tmp + 4; ValCast(tmpa,tmp); *[ram]:4 tmpa = RSEXTL; } :swm16 STORE_TOP16 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & STORE_TOP16 & ((mic_csub=0b0101 & REL6=0) | (mic_csubr6=0b1010 & REL6=1)) { build STORE_TOP16; } :swm32 STORE_TOP is ISA_MODE=1 & RELP=0 & mic_op=0b001000 & mic_base0 & mic_rlist ; micb_func12=0b1101 & STORE_TOP [ ext_32_basea=mic_base0; ext_32_rlist=mic_rlist; ] { build STORE_TOP; } :sync STYPE is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_funci=0 & STYPE ; micb_poolax=0b111100 & micb_axf=0b0110101101 { synch(STYPE); } :synci EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & mic_base0 & ((mic_funci=0b10000 & REL6=0) | (mic_funci=0b01100 & REL6=1)); EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; synch(tmp); } :syscall is ISA_MODE=1 & RELP=0 & mic_op=0b000000 ; micb_poolax=0b111100 & micb_axf=0b1000101101 { syscall(); } :teq mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b111100 & micb_trap=0b000000 { if (mic_rs32_0 != mic_rt32_5) goto ; trap(); } :tge mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b111100 & micb_trap=0b001000 { if (mic_rt32_5 s< mic_rs32_0) goto ; trap(); } :tgeu mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b111100 & micb_trap=0b010000 { if (mic_rt32_5 < mic_rs32_0) goto ; trap(); } :tlbinv is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_code=0 ; micb_poolax=0b111100 & micb_axf=0b0100001101 { tlbop(0:1); } :tlbinvf is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_code=0 ; micb_poolax=0b111100 & micb_axf=0b0101001101 { tlbop(1:1); } :tlbp is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_code=0 ; micb_poolax=0b111100 & micb_axf=0b0000001101 { tlbop(2:1); } :tlbr is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_code=0 ; micb_poolax=0b111100 & micb_axf=0b0001001101 { tlbop(3:1); } :tlbwi is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_code=0 ; micb_poolax=0b111100 & micb_axf=0b0010001101 { tlbop(4:1); } :tlbwr is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_code=0 ; micb_poolax=0b111100 & micb_axf=0b0011001101 { tlbop(5:1); } :tlt mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b111100 & micb_trap=0b100000 { if (mic_rt32_5 s>= mic_rs32_0) goto ; trap(); } :tltu mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b111100 & micb_trap=0b101000 { if (mic_rt32_5 >= mic_rs32_0) goto ; trap(); } :tne mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b111100 & micb_trap=0b110000 { if (mic_rt32_5 == mic_rs32_0) goto ; trap(); } :trunk.l.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b10001100 { mic_ft_5 = trunc(mic_fs:4); } :trunk.l.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b10001100 { mic_ftD_5 = trunc(mic_fsD); } :trunk.w.S mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs ; micb_bit15=0 & micb_fmt14=0 & micb_poolfx=0b111011 & micb_fxf4=0b10101100 { fd_tmp:4 = trunc(mic_fs:4); mic_ft_5 = zext(fd_tmp); } :trunk.w.D mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD ; micb_bit15=0 & micb_fmt14=1 & micb_poolfx=0b111011 & micb_fxf4=0b10101100 { fd_tmp:4 = trunc(mic_fsD); mic_ft_5 = zext(fd_tmp); } :wait is ISA_MODE=1 & RELP=0 & mic_op=0b000000 ; micb_poolax=0b111100 & micb_axf=0b1001001101 { wait(); } :wrpgpr mic_rt32_5, mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b111100 & micb_axf=0b1111000101 { setShadow(mic_rt32_5,mic_rs32_0); } :wsbh mic_rt32_5, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & RS0L ; micb_poolax=0b111100 & micb_axf=0b0111101100 { tmp1:4 = zext(RS0L[24,8]); tmp2:4 = zext(RS0L[16,8]); tmp3:4 = zext(RS0L[8,8]); tmp4:4 = zext(RS0L[0,8]); tmp5:4 = (tmp2 << 24) | (tmp1 << 16) | (tmp4 << 8) | tmp3; mic_rt32_5 = sext(tmp5); } :xor micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b1100010000 & micb_bit10=0 & micb_rd32 { micb_rd32 = mic_rs32_0 ^ mic_rt32_5; } :xori mic_rt32_5, mic_rs32_0, micb_imm16 is ISA_MODE=1 & RELP=0 & mic_op=0b011100 & mic_rs32_0 & mic_rt32_5 ; micb_imm16 { tmp:$(REGSIZE) = micb_imm16; mic_rt32_5 = mic_rs32_0 ^ tmp; } :xor16 RT3R7, RS0R4 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & RT3R7 & RS0R4 & ((mic_csub=0b0001 & REL6=0) | (mic_csubr6=0b1000 & REL6=1)) { RT3R7 = RT3R7 ^ RS0R4; } @ifdef MIPS64 :dadd micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0100010000 & micb_bit10=0 & micb_rd32 { # The dest doesn't get modified if there's an overflow. As our sleigh max size is 64bits, checking for this is a little complicated. # Should we include this check or not? # tmpsl:8 = zext(mic_rs32_0:4); # tmptl:8 = zext(mic_rt32_5:4); # tmpsh:8 = zext(mic_rs32_0[32,32]); # tmpth:8 = zext(mic_rt32_5[32,32]); # tmpres:8 = tmpsl + tmptl; # tmpres = tmpres >> 32; # tmpres = tmpres + tmpsh + tmpth; # tmpres = tmpres >> 32; # if (tmpres == 1) goto ; micb_rd32 = mic_rs32_0 + mic_rt32_5; # } :daddiu mic_rt32_5, mic_rs32_0, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b010111 & mic_rs32_0 & mic_rt32_5 ; EXT_MS16 { tmp:8 = sext(EXT_MS16); mic_rt32_5 = mic_rs32_0 + tmp; } :daddu micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0101010000 & micb_bit10=0 & micb_rd32 { micb_rd32 = mic_rs32_0 + mic_rt32_5; } :dclo mic_rt32_5, mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b111100 & micb_axf=0b0100101100 { mic_rt32_5 = lzcount( ~mic_rs32_0 ); } :dclz mic_rt32_5, mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b111100 & micb_axf=0b0101101100 { mic_rt32_5 = lzcount( mic_rs32_0 ); } :dext mic_rt32_5, mic_rs32_0, micb_pos, SIZEP is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b101100 & micb_pos & SIZEP { tmpa:8 = 0xFFFFFFFFFFFFFFFF; tmpa = tmpa >> (64 - SIZEP); tmpb:8 = mic_rs32_0; tmpb = (tmpb >> micb_pos) & tmpa; mic_rt32_5 = tmpb; } :dextm mic_rt32_5, mic_rs32_0, micb_pos, SIZEPLG is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b100100 & micb_pos & SIZEPLG { tmpa:8 = 0xFFFFFFFFFFFFFFFF; tmpa = tmpa >> (64 - SIZEPLG); tmpb:8 = mic_rs32_0; tmpb = (tmpb >> micb_pos) & tmpa; mic_rt32_5 = tmpb; } :dextu mic_rt32_5, mic_rs32_0, POSHI, SIZEP is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b010100 & POSHI & SIZEP { tmpa:8 = 0xFFFFFFFFFFFFFFFF; tmpa = tmpa >> (64 - SIZEP); tmpb:8 = mic_rs32_0; tmpb = (tmpb >> POSHI) & tmpa; mic_rt32_5 = tmpb; } :dins mic_rt32_5, mic_rs32_0, micb_pos, SIZEQ is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b001100 & micb_pos & SIZEQ { tmpa:8 = 0xFFFFFFFFFFFFFFFF; tmpa = tmpa >> (64 - SIZEQ); tmpb:8 = mic_rs32_0 & tmpa; tmpa = tmpa << micb_pos; tmpa = ~tmpa; tmpb = tmpb << micb_pos; mic_rt32_5 = (mic_rt32_5 & tmpa) | tmpb; } :dinsm mic_rt32_5, mic_rs32_0, micb_pos, SIZEQLG is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b000100 & micb_pos & SIZEQLG { tmpa:8 = 0xFFFFFFFFFFFFFFFF; tmpa = tmpa >> (64 - SIZEQLG); tmpb:8 = mic_rs32_0 & tmpa; tmpa = tmpa << micb_pos; tmpa = ~tmpa; tmpb = tmpb << micb_pos; mic_rt32_5 = (mic_rt32_5 & tmpa) | tmpb; } :dinsu mic_rt32_5, mic_rs32_0, POSHI, SIZEQ is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b110100 & POSHI & SIZEQ { tmpa:8 = 0xFFFFFFFFFFFFFFFF; tmpa = tmpa >> (64 - SIZEQ); tmpb:8 = mic_rs32_0 & tmpa; tmpa = tmpa << POSHI; tmpa = ~tmpa; tmpb = tmpb << POSHI; mic_rt32_5 = (mic_rt32_5 & tmpa) | tmpb; } :dmfc0 mic_rt32_5, mic_rs32_0, CPSEL is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; micb_z14=0 & micb_cpf=0b00011 & micb_poolax=0b111100 & CPSEL { mic_rt32_5 = getCopReg(0:1,mic_rs32_0,CPSEL); } :dmfc1 mic_rt32_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_rt32_5 & mic_fs & mic_fsD ; micb_poolfx=0b111011 & micb_fxf=0b0010010000 { mic_rt32_5 = mic_fsD; } :dmfc2 mic_rt32_5, mic_impl is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_impl ; micb_poolax=0b111100 & micb_axf=0b0110110100 { mic_rt32_5 = getCopReg(2:1,mic_impl:1); } :dmtc0 mic_rt32_5, mic_rs32_0, CPSEL is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; micb_z14=0 & micb_cpf=0b01011 & micb_poolax=0b111100 & CPSEL { setCopReg(0:1,mic_rs32_0,mic_rt32_5,CPSEL); } :dmtc1 mic_rt32_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_rt32_5 & mic_fs & mic_fsD ; micb_poolfx=0b111011 & micb_fxf=0b0010110000 { mic_fsD = mic_rt32_5; } :dmtc2 mic_rt32_5, mic_impl is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_rt32_5 & mic_impl ; micb_poolax=0b000011 & micb_axf=0b0111110100 { setCopReg(2:1,mic_rt32_5,mic_impl:1); } :drotr mic_rt32_5, mic_rs32_0, micb_sa is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; micb_sa & micb_axf2=0b0011000000 & micb_bit10=0 { tmpa:8 = mic_rs32_0 >> micb_sa; tmpb:8 = mic_rs32_0 << (64 - micb_sa); tmpa = tmpa | tmpb; mic_rt32_5 = tmpa; } :drotr32 mic_rt32_5, mic_rs32_0, SA32 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; SA32 & micb_axf2=0b0011001000 & micb_bit10=0 { tmpa:8 = mic_rs32_0 >> SA32; tmpb:8 = mic_rs32_0 << (64 - SA32); tmpa = tmpa | tmpb; mic_rt32_5 = tmpa; } :drotrv micb_rd32, mic_rt32_5, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & RS0L & mic_rt32_5 ; micb_rd32 & micb_axf2=0b0011010000 & micb_bit10=0 { tmpr:1 = RS0L[0,6]; tmpa:8 = mic_rt32_5 >> tmpr; tmpb:8 = mic_rt32_5 << (64 - tmpr); tmpa = tmpa | tmpb; micb_rd32 = tmpa; } :dsbh mic_rt32_5, mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b111100 & micb_axf=0b0111101100 { tmp1:8 = zext(mic_rs32_0[56,8]); tmp2:8 = zext(mic_rs32_0[48,8]); tmp3:8 = zext(mic_rs32_0[40,8]); tmp4:8 = zext(mic_rs32_0[32,8]); tmp5:8 = zext(mic_rs32_0[24,8]); tmp6:8 = zext(mic_rs32_0[16,8]); tmp7:8 = zext(mic_rs32_0[8,8]); tmp8:8 = zext(mic_rs32_0[0,8]); tmp9:8 = (tmp2 << 56) | (tmp1 << 48) | (tmp4 << 40) | (tmp3 << 32) | (tmp6 << 24) | (tmp5 << 16) | (tmp8 << 8) | tmp7; mic_rt32_5 = tmp9; } :dshd mic_rt32_5, mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b111100 & micb_axf=0b1111101100 { tmp1:8 = zext(mic_rs32_0[48,16]); tmp2:8 = zext(mic_rs32_0[32,16]); tmp3:8 = zext(mic_rs32_0[16,16]); tmp4:8 = zext(mic_rs32_0[0,16]); tmp5:8 = (tmp4 << 48) | (tmp3 << 32) | (tmp2 << 16) | tmp1; mic_rt32_5 = tmp5; } :dsll mic_rt32_5, mic_rs32_0, micb_sa is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; micb_sa & micb_axf2=0b0000000000 & micb_bit10=0 { mic_rt32_5 = mic_rs32_0 << micb_sa; } :dsll32 mic_rt32_5, mic_rs32_0, SA32 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; SA32 & micb_axf2=0b0000001000 & micb_bit10=0 { mic_rt32_5 = mic_rs32_0 << SA32; } :dsllv micb_rd32, mic_rt32_5, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & RS0L & mic_rt32_5 ; micb_axf2=0b0000010000 & micb_bit10=0 & micb_rd32 { tmps:1 = RS0L[0,6]; micb_rd32 = mic_rt32_5 << tmps; } :dsra mic_rt32_5, mic_rs32_0, micb_sa is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; micb_sa & micb_axf2=0b0010000000 & micb_bit10=0 { mic_rt32_5 = mic_rs32_0 s>> micb_sa; } :dsra32 mic_rt32_5, mic_rs32_0, SA32 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; SA32 & micb_axf2=0b0010000100 & micb_bit10=0 { mic_rt32_5 = mic_rs32_0 s>> SA32; } :dsrav micb_rd32, mic_rt32_5, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & RS0L & mic_rt32_5 ; micb_axf2=0b0010010000 & micb_bit10=0 & micb_rd32 { tmps:1 = RS0L[0,6]; micb_rd32 = mic_rt32_5 s>> tmps; } :dsrl mic_rt32_5, mic_rs32_0, micb_sa is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; micb_sa & micb_axf2=0b0001000000 & micb_bit10=0 { mic_rt32_5 = mic_rs32_0 >> micb_sa; } :dsrl32 mic_rt32_5, mic_rs32_0, SA32 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; SA32 & micb_axf2=0b0001001000 & micb_bit10=0 { mic_rt32_5 = mic_rs32_0 >> SA32; } :dsrlv micb_rd32, mic_rt32_5, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & RS0L & mic_rt32_5 ; micb_axf2=0b0001010000 & micb_bit10=0 & micb_rd32 { tmps:1 = RS0L[0,6]; micb_rd32 = mic_rt32_5 >> tmps; } :dsub micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0110010000 & micb_bit10=0 & micb_rd32 { # Do we want to test for this? #tmpt:1 = zext(mic_rt32_5[63,1]); #tmps:1 = zext(mic_rs32_0[63,1]); #tmp:8 = mic_rs32_0 - mic_rt32_5; #tmpa:1 = zext(tmp[63,1]); #if ((tmpa ^ tmps) & (tmpt ^ tmps)) goto ; micb_rd32 = mic_rs32_0 - mic_rt32_5; # } :dsubu micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0111010000 & micb_bit10=0 & micb_rd32 { micb_rd32 = mic_rs32_0 - mic_rt32_5; } :ld mic_rt32_5, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b110111 & mic_rt32_5 & mic_base0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = *[ram]:8 tmpa; } :lld mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_rt32_5 & mic_base0 ; micb_func12=0b0111 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; lockload(tmp); tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = *[ram]:8 tmpa; } :lwu mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_rt32_5 & mic_base0 ; micb_func12=0b1110 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); tmpb:4 = *[ram]:4 tmpa; mic_rt32_5 = zext(tmpb); } :scd mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & mic_rt32_5 & mic_base0 ; micb_func12=0b1111 & micb_sub9=0b000 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; lockwrite(tmp); tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:8 tmpa = mic_rt32_5; mic_rt32_5 = 1; } :sd mic_rt32_5, EXT_MS16(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b110110 & mic_rt32_5 & mic_base0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:8 tmpa = mic_rt32_5; } @endif #### # # Pre-6 semantics # #### :abs.PS mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_bit15=0 & micb_fmt=2 & micb_poolfx=0b111011 & micb_flt6=0b0001101 { mic_ftD_5 = mipsFloatPS(mic_fsD); } :add.PS micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_bit10=0 & micb_fmt8=2 & micb_fxf3=0b00110000 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } :addi mic_rt32_5, RS0L, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b000100 & REL6=0 & RS0L & mic_rt32_5; EXT_MS16 { tmp:4 = sext(EXT_MS16); tmp = tmp + RS0L; mic_rt32_5 = sext(tmp); } :alnv.ps micb_fd, mic_fs, mic_ft_5, micb_rs32 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_rs32 & micb_poolfx=0b011001 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } :b Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_rs32_0=0 & mic_rt32_5=0 ; Rel16_mic { delayslot( 1 ); goto Rel16_mic; } :b16 Rel10_mic is ISA_MODE=1 & RELP=0 & mic_op=0b110011 & REL6=0 & Rel10_mic { delayslot( 1 ); goto Rel10_mic; } :bal Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b00011 & mic_rs32_0=0 ; Rel16_mic { ra = inst_next | 0x1; delayslot( 1 ); call Rel16_mic; } :bc1f COP2CC^Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b11100 & mic_cp2z=0 & COP2CC ; Rel16_mic { # tmp:1 = getFpCondition(cc:1); # Note that other cc conditions are not implemented tmp:1 = fcsr[23,1]; # The floating point condition bit delayslot(1); if (tmp != 0) goto inst_next; goto Rel16_mic; } :bc1t COP2CC^Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b11101 & mic_cp2z=0 & COP2CC ; Rel16_mic { # tmp:1 = getFpCondition(cc:1); tmp:1 = fcsr[23,1]; # The floating point condition bit delayslot(1); if (tmp == 0) goto inst_next; goto Rel16_mic; } :bc2f COP2CC^Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b10100 & mic_cp2z=0 & COP2CC ; Rel16_mic { tmp:1 = getCopCondition(2:1, COP2CC); delayslot(1); if (tmp != 0) goto inst_next; goto Rel16_mic; } :bc2t COP2CC^Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b10101 & mic_cp2z=0 & COP2CC ; Rel16_mic { tmp:1 = getCopCondition(2:1, COP2CC); delayslot(1); if (tmp == 0) goto inst_next; goto Rel16_mic; } :beq mic_rs32_0, mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b100101 & REL6=0 & mic_rs32_0 & mic_rt32_5; Rel16_mic { delayflag:1 = ( mic_rs32_0 == mic_rt32_5 ); delayslot( 1 ); if (delayflag) goto Rel16_mic; } :beqz16 mic_rs7, Rel7_mic is ISA_MODE=1 & RELP=0 & mic_op=0b100011 & REL6=0 & mic_rs7 & Rel7_mic { delayflag:1 = ( mic_rs7 == 0 ); delayslot( 1 ); if (delayflag) goto Rel7_mic; } :beqzc mic_rs32_0, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b00111 & mic_rs32_0 ; Rel16_mic { if (mic_rs32_0 == 0) goto Rel16_mic; } :bgez mic_rs32_0, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b00010 & mic_rs32_0 ; Rel16_mic { delayflag:1 = (mic_rs32_0 s>= 0); delayslot( 1 ); if (delayflag) goto Rel16_mic; } :bgezal mic_rs32_0, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b00011 & mic_rs32_0 ; Rel16_mic { delayflag:1 = (mic_rs32_0 s>= 0); ra = inst_next | 0x1; delayslot( 1 ); if (delayflag) goto Rel16_mic; } :bgezals mic_rs32_0, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b10011 & mic_rs32_0 ; Rel16_mic { delayflag:1 = (mic_rs32_0 s>= 0); ra = inst_next | 0x1; delayslot( 1 ); if (delayflag) goto Rel16_mic; } :bgtz mic_rs32_0, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b00110 & mic_rs32_0 ; Rel16_mic { delayflag:1 = (mic_rs32_0 s> 0); delayslot( 1 ); if (delayflag) goto Rel16_mic; } :blez mic_rs32_0, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b00100 & mic_rs32_0 ; Rel16_mic { delayflag:1 = (mic_rs32_0 s<= 0); delayslot( 1 ); if (delayflag) goto Rel16_mic; } :bltz mic_rs32_0, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b00000 & mic_rs32_0 ; Rel16_mic { delayflag:1 = (mic_rs32_0 s< 0); delayslot( 1 ); if (delayflag) goto Rel16_mic; } :bltzal mic_rs32_0, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b00001 & mic_rs32_0 ; Rel16_mic { delayflag:1 = (mic_rs32_0 s< 0); ra = inst_next | 0x1; delayslot( 1 ); if (delayflag) goto Rel16_mic; } :bltzals mic_rs32_0, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b10001 & mic_rs32_0 ; Rel16_mic { delayflag:1 = (mic_rs32_0 s< 0); ra = inst_next | 0x1; delayslot( 1 ); if (delayflag) goto Rel16_mic; } :bne mic_rs32_0, mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b101101 & REL6=0 & mic_rs32_0 & mic_rt32_5 ; Rel16_mic { delayflag:1 = (mic_rs32_0 != mic_rt32_5); delayslot( 1 ); if (delayflag) goto Rel16_mic; } :bnez16 mic_rs7, Rel7_mic is ISA_MODE=1 & RELP=0 & mic_op=0b101011 & REL6=0 & mic_rs7 & Rel7_mic { delayflag:1 = (mic_rs7 != 0); delayslot( 1 ); if (delayflag) goto Rel7_mic; } :bnezc mic_rs32_0, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_sub2=0b00101 & mic_rs32_0 ; Rel16_mic { if (mic_rs32_0 != 0) goto Rel16_mic; } :c.f.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=0 & micb_poolfx=0b111100 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN fcsr[23,1] = 0; # Always false, no trap } :c.f.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=0 & micb_poolfx=0b111100 { trapIfSNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = 0; # Always false, no trap } :c.f.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=0 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.un.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=1 & micb_poolfx=0b111100 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN fcsr[23,1] = nan(mic_fs:4) || nan(mic_ft_5:4); # True if an operand is NaN, no trap } :c.un.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=1 & micb_poolfx=0b111100 { trapIfSNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = nan(mic_fsD) || nan(mic_ftD_5); # True if an operand is NaN, no trap } :c.un.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=1 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.eq.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=2 & micb_poolfx=0b111100 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN fcsr[23,1] = (mic_fs:4 f== mic_ft_5:4); # No trap } :c.eq.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=2 & micb_poolfx=0b111100 { trapIfSNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = (mic_fsD f== mic_ftD_5); } :c.eq.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=2 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.ueq.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=3 & micb_poolfx=0b111100 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN fcsr[23,1] = (mic_fs:4 f== mic_ft_5:4); # No trap } :c.ueq.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=3 & micb_poolfx=0b111100 { trapIfSNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = (mic_fsD f== mic_ftD_5); # No trap } :c.ueq.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=3 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.olt.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=4 & micb_poolfx=0b111100 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN fcsr[23,1] = (mic_fs:4 f< mic_ft_5:4); # No trap } :c.olt.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=4 & micb_poolfx=0b111100 { trapIfSNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = (mic_fsD f< mic_ftD_5); # No trap } :c.olt.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=4 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.ult.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=5 & micb_poolfx=0b111100 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN fcsr[23,1] = (mic_fs:4 f< mic_ft_5:4) || nan(mic_fs:4) || nan(mic_ft_5:4); # Less than or NaN No trap } :c.ult.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=5 & micb_poolfx=0b111100 { trapIfSNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = (mic_fsD f< mic_ftD_5) || nan(mic_fsD) || nan(mic_ftD_5); # No trap } :c.ult.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=5 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.ole.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=6 & micb_poolfx=0b111100 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN fcsr[23,1] = (mic_fs:4 f<= mic_ft_5:4); # No trap } :c.ole.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=6 & micb_poolfx=0b111100 { trapIfSNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = (mic_fsD f<= mic_ftD_5); # No trap } :c.ole.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=6 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.ule.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=7 & micb_poolfx=0b111100 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN fcsr[23,1] = (mic_fs:4 f<= mic_ft_5:4) || nan(mic_fs:4) || nan(mic_ft_5:4); # Less than or equal or NaN No trap } :c.ule.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=7 & micb_poolfx=0b111100 { trapIfSNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = (mic_fsD f<= mic_ftD_5) || nan(mic_fsD) || nan(mic_ftD_5); # No trap } :c.ule.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=7 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.sf.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=8 & micb_poolfx=0b111100 { trapIfNaN(mic_fs:4, mic_ft_5:4); fcsr[23,1] = 0; # Always false, trap if either operand is NaN } :c.sf.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=8 & micb_poolfx=0b111100 { trapIfNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = 0; # Always false, trap } :c.sf.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=8 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.ngle.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=9 & micb_poolfx=0b111100 { trapIfNaN(mic_fs:4, mic_ft_5:4); fcsr[23,1] = nan(mic_fs:4) || nan(mic_ft_5:4); # True if an operand is NaN, trap } :c.ngle.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=9 & micb_poolfx=0b111100 { trapIfNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = nan(mic_fsD) || nan(mic_ftD_5); # True if an operand is NaN, trap } :c.ngle.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=9 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.seq.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=10 & micb_poolfx=0b111100 { trapIfNaN(mic_fs:4, mic_ft_5:4); fcsr[23,1] = (mic_fs:4 f== mic_ft_5:4); # trap } :c.seq.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=10 & micb_poolfx=0b111100 { trapIfNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = (mic_fsD f== mic_ftD_5); # trap } :c.seq.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=10 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.ngl.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=11 & micb_poolfx=0b111100 { trapIfNaN(mic_fs:4, mic_ft_5:4); fcsr[23,1] = (mic_fs:4 f== mic_ft_5:4) || nan(mic_fs:4) || nan(mic_ft_5:4); # trap } :c.ngl.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=11 & micb_poolfx=0b111100 { trapIfNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = (mic_fsD f== mic_ftD_5) || nan(mic_fsD) || nan(mic_ftD_5); # trap } :c.ngl.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=11 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.lt.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=12 & micb_poolfx=0b111100 { trapIfNaN(mic_fs:4, mic_ft_5:4); fcsr[23,1] = (mic_fs:4 f< mic_ft_5:4); # trap } :c.lt.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=12 & micb_poolfx=0b111100 { trapIfNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = (mic_fsD f< mic_ftD_5); # trap } :c.lt.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=12 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.nge.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=13 & micb_poolfx=0b111100 { trapIfNaN(mic_fs:4, mic_ft_5:4); fcsr[23,1] = (mic_fs:4 f< mic_ft_5:4) || nan(mic_fs:4) || nan(mic_ft_5:4); # trap } :c.nge.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=13 & micb_poolfx=0b111100 { trapIfNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = (mic_fsD f< mic_ftD_5) || nan(mic_fsD) || nan(mic_ftD_5); # trap } :c.nge.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=13 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.le.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=14 & micb_poolfx=0b111100 { trapIfNaN(mic_fs:4, mic_ft_5:4); fcsr[23,1] = (mic_fs:4 f<= mic_ft_5:4); # Less than or equal trap } :c.le.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=14 & micb_poolfx=0b111100 { trapIfNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = (mic_fsD f<= mic_ftD_5); # trap } :c.le.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=14 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :c.ngt.S micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs ; micb_cc & micb_bit12=0 & micb_fmt10=0 & micb_cond=15 & micb_poolfx=0b111100 { trapIfNaN(mic_fs:4, mic_ft_5:4); fcsr[23,1] = (mic_fs:4 f<= mic_ft_5:4) || nan(mic_fs:4) || nan(mic_ft_5:4); # Less than or equal or NaN trap } :c.ngt.D micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=1 & micb_cond=15 & micb_poolfx=0b111100 { trapIfNaN(mic_fsD, mic_ftD_5); fcsr[23,1] = (mic_fsD f<= mic_ftD_5) || nan(mic_fsD) || nan(mic_ftD_5); # trap } :c.ngt.PS micb_cc, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_cc & micb_bit12=0 & micb_fmt10=2 & micb_cond=15 & micb_poolfx=0b111100 { fcsr[23,1] = mipsFloatPS(mic_fsD, mic_ftD_5); } :cache mic_cop5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b001000 & REL6=0 & mic_cop5 & mic_base0 ; micb_func12=0b0110 & EXT_CODE12 { cacheOp(); } :cvt.PS micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_fxf2=0b00110000000 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } :cvt.s.PL mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_bit15=0 & micb_poolfx=0b111011 & micb_fxf4=0b10000100 { mic_ftD_5 = mipsFloatPS(mic_fsD); } :cvt.s.PU mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5 ; micb_bit15=0 & micb_poolfx=0b111011 & micb_fxf4=0b10100100 { mic_ftD_5 = mipsFloatPS(mic_fsD); } :div RT5L, RS0L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & RS0L & RT5L ; micb_poolax=0b111100 & micb_axf=0b1010101100 { lo = sext(RS0L s/ RT5L); hi = sext(RS0L s% RT5L); } :divu RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & RS0L & RT5L ; micb_poolax=0b111100 & micb_axf=0b1011101100 { lo = sext(RS0L / RT5L); hi = sext(RS0L % RT5L); } :j Abs26_mic1 is ISA_MODE=1 & RELP=0 & mic_op=0b110101 & REL6=0 & mic_code ; Abs26_mic1 [ ext_32_code = mic_code; ] { delayslot( 1 ); goto Abs26_mic1; } :jal Abs26_mic1 is ISA_MODE=1 & RELP=0 & mic_op=0b111101 & REL6=0 & mic_code ; Abs26_mic1 [ ext_32_code = mic_code; ] { ra = inst_next | 0x1; delayslot( 1 ); call Abs26_mic1; } :jalr RTIMP^mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rs32_0 & mic_rt32_5 & RTIMP ; micb_axf=0b0000111100 & micb_poolax=0b111100 { JXWritePC(mic_rs32_0); mic_rt32_5 = inst_next | 0x1; delayslot( 1 ); call [pc]; } :jalr.hb RTIMP^mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rs32_0 & mic_rt32_5 & RTIMP ; micb_axf=0b0001111100 & micb_poolax=0b111100 { JXWritePC(mic_rs32_0); mic_rt32_5 = inst_next | 0x1; delayslot( 1 ); call [pc]; } :jalr16 mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=0 & mic_jalr=0b01110 & mic_rs32_0 { JXWritePC(mic_rs32_0); ra = inst_next | 0x1; delayslot( 1 ); call [pc]; } :jalrs16 mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=0 & mic_jalr=0b01111 & mic_rs32_0 { JXWritePC(mic_rs32_0); ra = inst_next | 0x1; delayslot( 1 ); call [pc]; } :jalrs RTIMP^mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rs32_0 & mic_rt32_5 & RTIMP ; micb_axf=0b0100111100 & micb_poolax=0b111100 { JXWritePC(mic_rs32_0); mic_rt32_5 = inst_next | 0x1; delayslot( 1 ); call [pc]; } :jalrs.hb RTIMP^mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rs32_0 & mic_rt32_5 & RTIMP ; micb_axf=0b0101111100 & micb_poolax=0b111100 { JXWritePC(mic_rs32_0); mic_rt32_5 = inst_next | 0x1; delayslot( 1 ); call [pc]; } :jals Abs26_mic1 is ISA_MODE=1 & RELP=0 & mic_op=0b011101 & REL6=0 & mic_code ; Abs26_mic1 [ ext_32_code = mic_code; ] { ra = inst_next | 0x1; delayslot( 1 ); call Abs26_mic1; } :jalx Abs26_mic2 is ISA_MODE=1 & RELP=0 & mic_op=0b111100 & REL6=0 & mic_code ; Abs26_mic2 [ ext_32_code = mic_code; ISA_MODE = 0; globalset(Abs26_mic2, ISA_MODE);] { ra = inst_next | 0x1; delayslot( 1 ); ISAModeSwitch = 0; call Abs26_mic2; } :jr mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rs32_0 & mic_rt32_5=0 ; micb_axf=0b0000111100 & micb_poolax=0b111100 { JXWritePC(mic_rs32_0); delayslot( 1 ); goto [pc]; } :jr.hb mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rs32_0 & mic_rt32_5=0 ; micb_axf=0b0001111100 & micb_poolax=0b111100 { JXWritePC(mic_rs32_0); delayslot( 1 ); goto [pc]; } :jr16 mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=0 & mic_jalr=0b01100 & mic_rs32_0 { JXWritePC(mic_rs32_0); delayslot( 1 ); goto [pc]; } :jr16 ra is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=0 & mic_jalr=0b01100 & mic_rs32_0=31 & ra { JXWritePC(ra); delayslot( 1 ); return [pc]; } :jraddiusp EXT_CODE5 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=0 & mic_jalr=0b11000 & EXT_CODE5 { tmp:4 = zext(EXT_CODE5); @if REGSIZE == "4" sp = sp + tmp; @else sp_lo = sp_lo + tmp; sp = sext(sp_lo); @endif JXWritePC(ra); return [pc]; } :jrc mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=0 & mic_jalr=0b01101 & mic_rs32_0 { JXWritePC(mic_rs32_0); goto [pc]; } :jrc ra is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=0 & mic_jalr=0b01101 & mic_rs32_0=31 & ra { JXWritePC(ra); return [pc]; } @if ENDIAN == "big" :lwl mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b0000 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = mic_rt32_5:$(SIZETO4) & (0xffffffff >> ((4-shft) * 8)); valLoad:4 = 0; MemSrcCast(valLoad,addr); valLoad = valLoad << (shft * 8); mic_rt32_5 = sext( valLoad | valOrig ); } :lwle mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b0110 & micb_sub9=0b010 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = mic_rt32_5:$(SIZETO4) & (0xffffffff >> ((4-shft) * 8)); valLoad:4 = 0; MemSrcCast(valLoad,addr); valLoad = valLoad << (shft * 8); mic_rt32_5 = sext( valLoad | valOrig ); } :lwr mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b0001 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = mic_rt32_5:$(SIZETO4) & (0xffffffff << ((shft+1) * 8)); valLoad:4 = 0; MemSrcCast(valLoad,addr); valLoad = valLoad >> ((3-shft) * 8); mic_rt32_5 = sext( valOrig | valLoad ); } :lwre mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b0110 & micb_sub9=0b011 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = mic_rt32_5:$(SIZETO4) & (0xffffffff << ((shft+1) * 8)); valLoad:4 = 0; MemSrcCast(valLoad,addr); valLoad = valLoad >> ((3-shft) * 8); mic_rt32_5 = sext( valOrig | valLoad ); } @else :lwl mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b0000 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = mic_rt32_5:$(SIZETO4) & (0xffffffff >> ((shft+1)* 8)); valLoad:4 = 0; MemSrcCast(valLoad,addr); valLoad = valLoad << ((3-shft) * 8); mic_rt32_5 = sext( valLoad | valOrig ); } :lwle mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b0110 & micb_sub9=0b010 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = mic_rt32_5:$(SIZETO4) & (0xffffffff >> ((shft+1)* 8)); valLoad:4 = 0; MemSrcCast(valLoad,addr); valLoad = valLoad << ((3-shft) * 8); mic_rt32_5 = sext( valLoad | valOrig ); } :lwr mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b0001 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = mic_rt32_5:$(SIZETO4) & (0xffffffff << ((4-shft)* 8)); valLoad:4 = 0; MemSrcCast(valLoad,addr); valLoad = valLoad >> (shft * 8); mic_rt32_5 = sext( valOrig | valLoad ); } :lwre mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b0110 & micb_sub9=0b011 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = mic_rt32_5:$(SIZETO4) & (0xffffffff << ((4-shft)* 8)); valLoad:4 = 0; MemSrcCast(valLoad,addr); valLoad = valLoad >> (shft * 8); mic_rt32_5 = sext( valOrig | valLoad ); } @endif # lwl and lwr almost always come in pairs. # When the analyzer does finds a matching lwl/lwr pair, the pcode is simplified so that # lwl does all the loading while lwr is a no-op @if ENDIAN == "big" :lwl mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 & PAIR_INSTRUCTION_FLAG=1 ; micb_func12=0b0000 & EXT_CODE12 [ PAIR_INSTRUCTION_FLAG = 1; globalset(inst_next, PAIR_INSTRUCTION_FLAG);] { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext( *[ram]:4 tmpa ); } :lwr mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 & PAIR_INSTRUCTION_FLAG=1 ; micb_func12=0b0001 & EXT_CODE12 [ PAIR_INSTRUCTION_FLAG = 0; ] { } @else :lwl mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 & PAIR_INSTRUCTION_FLAG=1 ; micb_func12=0b0000 & EXT_CODE12 [ PAIR_INSTRUCTION_FLAG = 1; globalset(inst_next, PAIR_INSTRUCTION_FLAG);] { } :lwr mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 & PAIR_INSTRUCTION_FLAG=1 ; micb_func12=0b0001 & EXT_CODE12 [ PAIR_INSTRUCTION_FLAG = 0; ] { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext( *[ram]:4 tmpa ); } @endif :lwxc1 micb_fd, mic_index(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_index & mic_base0 ; micb_fd & micb_fxf2=0b00001001000 { tmpa:$(REGSIZE) = mic_index + mic_base0; tmpb:$(ADDRSIZE) = 0; ValCast(tmpb,tmpa); tmp:4 = *[ram]:4 tmpb; micb_fd = (micb_fd ^ 0xffffffff) + zext(tmp); } :madd RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & RT5L & RS0L ; micb_poolax=0b111100 & micb_axf=0b1100101100 { tmp1:8 = sext(RS0L); tmp2:8 = sext(RT5L); prod:8 = tmp1 * tmp2; lo = lo & 0xffffffff; # Make sure any upper bits of lo don't contribute to sum sum:8 = (zext(hi) << 32) + zext(lo) + prod; lo = sext(sum:4); sum = sum >> 32; hi = sext(sum:4); } :madd.S micb_fd, micb_fr, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 ; micb_fd & micb_fr & micb_poolfx=0b000001 { fd_tmp:4 = (mic_fs:4 f* mic_ft_5:4) f+ micb_fr:4; micb_fd = zext(fd_tmp); } :madd.D micb_fd, micb_fr, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_ftD_5 & mic_fsD; micb_fd & micb_fdD & micb_fr &micb_frD & micb_poolfx=0b001001 { micb_fdD = (mic_fsD f* mic_ftD_5) f+ micb_frD; } :madd.PS micb_fd, micb_fr, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_ftD_5 & mic_fsD ; micb_fd & micb_fdD & micb_fr & micb_poolfx=0b010001 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } :maddu RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & RT5L & RS0L ; micb_poolax=0b111100 & micb_axf=0b1101101100 { tmp1:8 = zext(RS0L); tmp2:8 = zext(RT5L); prod:8 = tmp1 * tmp2; lo = lo & 0xffffffff; # Make sure any upper bits of lo don't contribute to sum sum:8 = (zext(hi) << 32) + zext(lo) + prod; lo = sext(sum:4); sum = sum >> 32; hi = sext(sum:4); } :mfhi mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rt32_5=0 & mic_rs32_0 ; micb_poolax=0b111100 & micb_axf=0b0000110101 { mic_rs32_0 = hi; } :mfhi16 mic_rd32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=0 & mic_sub2=0b10000 & mic_rd32_0 { mic_rd32_0 = hi; } :mflo mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rt32_5=0 & mic_rs32_0 ; micb_poolax=0b111100 & micb_axf=0b0001110101 { mic_rs32_0 = lo; } :mflo16 mic_rd32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=0 & mic_sub2=0b10010 & mic_rd32_0 { mic_rd32_0 = lo; } :mov.PS mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_ftD_5 & mic_fsD ; micb_bit15=0 & micb_fmt=2 & micb_poolfx=0b111011 & micb_flt6=0b0000001 { mic_ftD_5 = mipsFloatPS(mic_fsD); } :movf mic_rt32_5, mic_rs32_0, micb_cc is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_rs32_0 & mic_rt32_5 ; micb_cc & micb_poolfx=0b111011 & micb_flt6=0b0000101 { tmp:1 = fcsr[23,1]; # was getFpCondition(cc:1); if (tmp != 0) goto ; mic_rt32_5 = mic_rs32_0; } :movf.S mic_ft_5, mic_fs, micb_cc is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 ; micb_cc & micb_fmt9=0 & micb_z11=0 & micb_fxf5=0b000100000 { tmp:1 = fcsr[23,1]; # was getFpCondition(cc:1); if (tmp != 0) goto ; fs_tmp:4 = mic_fs:4; mic_ft_5 = zext(fs_tmp); } :movf.D mic_ft_5, mic_fs, micb_cc is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_cc & micb_fmt9=1 & micb_z11=0 & micb_fxf5=0b000100000 { tmp:1 = fcsr[23,1]; # was getFpCondition(cc:1); if (tmp != 0) goto ; mic_ftD_5 = mic_fsD; } :movf.PS mic_ft_5, mic_fs, micb_cc is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_ftD_5 & mic_fsD ; micb_cc & micb_fmt9=2 & micb_z11=0 & micb_fxf5=0b000100000 { mic_ftD_5 = mipsFloatPS(mic_fsD); } :movn micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0000011000 & micb_bit10=0 & micb_rd32 { if (mic_rt32_5 == 0) goto ; micb_rd32 = mic_rs32_0; } :movn.S micb_fd, mic_fs, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_rt32_5 & mic_fs ; micb_fd & micb_bit10=0 & micb_fmt8=0 & micb_fxf3=0b00111000 { if (mic_rt32_5 == 0) goto ; fs_tmp:4 = mic_fs:4; micb_fd = zext(fs_tmp); } :movn.D micb_fd, mic_fs, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_rt32_5 & mic_fs & mic_fsD ; micb_fdD & micb_fd & micb_bit10=0 & micb_fmt8=1 & micb_fxf3=0b00111000 { if (mic_rt32_5 == 0) goto ; micb_fdD = mic_fsD; } :movn.PS micb_fd, mic_fs, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_rt32_5 & mic_fs & mic_fsD & mic_ftD_5; micb_fd & micb_fdD & micb_bit10=0 & micb_fmt8=2 & micb_fxf3=0b00111000 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } :movt mic_rt32_5, mic_rs32_0, micb_cc is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_rs32_0 & mic_rt32_5 ; micb_cc & micb_poolfx=0b111011 & micb_flt6=0b0100101 { tmp:1 = fcsr[23,1]; # was getFpCondition(cc:1); if (tmp == 0) goto ; mic_rt32_5 = mic_rs32_0; } :movt.S mic_ft_5, mic_fs, micb_cc is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 ; micb_cc & micb_fmt9=0 & micb_z11=0 & micb_fxf5=0b001100000 { tmp:1 = fcsr[23,1]; # was getFpCondition(cc:1); if (tmp == 0) goto ; fs_tmp:4 = mic_fs:4; mic_ft_5 = zext(fs_tmp); } :movt.D mic_ft_5, mic_fs, micb_cc is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5; micb_cc & micb_fmt9=1 & micb_z11=0 & micb_fxf5=0b001100000 { tmp:1 = fcsr[23,1]; # was getFpCondition(cc:1); if (tmp == 0) goto ; mic_ftD_5 = mic_fsD; } :movt.PS mic_ft_5, mic_fs, micb_cc is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_ftD_5 & mic_fsD ; micb_cc & micb_fmt9=2 & micb_z11=0 & micb_fxf5=0b001100000 { mic_ftD_5 = mipsFloatPS(mic_fsD); } :movz micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0001011000 & micb_bit10=0 & micb_rd32 { if (mic_rt32_5 != 0) goto ; micb_rd32 = mic_rs32_0; } :movz.S micb_fd, mic_fs, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_rt32_5 & mic_fs ; micb_fd & micb_bit10=0 & micb_fmt8=0 & micb_fxf3=0b01111000 { if (mic_rt32_5 != 0) goto ; fs_tmp:4 = mic_fs:4; micb_fd = zext(fs_tmp); } :movz.D micb_fd, mic_fs, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_rt32_5 & mic_fs & mic_fsD ; micb_fdD & micb_fd & micb_bit10=0 & micb_fmt8=1 & micb_fxf3=0b01111000 { if (mic_rt32_5 != 0) goto ; micb_fdD = mic_fsD; } :movz.PS micb_fd, mic_fs, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_rt32_5 & mic_fs & mic_fsD & mic_ftD_5; micb_fd & micb_fdD & micb_bit10=0 & micb_fmt8=2 & micb_fxf3=0b01111000 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } :msub RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & RT5L & RS0L ; micb_poolax=0b111100 & micb_axf=0b1110101100 { tmp1:8 = sext(RS0L); tmp2:8 = sext(RT5L); prod:8 = tmp1 * tmp2; lo = lo & 0xffffffff; # Make sure any upper bits of lo don't contribute to sum sum:8 = (zext(hi) << 32) + zext(lo) - prod; lo = sext(sum:4); sum = sum >> 32; hi = sext(sum:4); } :msub.S micb_fd, micb_fr, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 ; micb_fd & micb_fr & micb_poolfx=0b100001 { fd_tmp:4 = (mic_fs:4 f* mic_ft_5:4) f- micb_fr:4; micb_fd = zext(fd_tmp); } :msub.D micb_fd, micb_fr, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5; micb_fdD & micb_frD & micb_fd & micb_fr & micb_poolfx=0b101001 { micb_fdD = (mic_fsD f* mic_ftD_5) f- micb_frD; } :msub.PS micb_fd, micb_fr, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5; micb_fd & micb_fdD & micb_fr & micb_poolfx=0b110001 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } :msubu RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & RT5L & RS0L ; micb_poolax=0b111100 & micb_axf=0b1111101100 { tmp1:8 = zext(RS0L); tmp2:8 = zext(RT5L); prod:8 = tmp1 * tmp2; lo = lo & 0xffffffff; # Make sure any upper bits of lo don't contribute to sum sum:8 = (zext(hi) << 32) + zext(lo) - prod; lo = sext(sum:4); sum = sum >> 32; hi = sext(sum:4); } :mthi mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rt32_5=0 & mic_rs32_0 ; micb_poolax=0b111100 & micb_axf=0b0010110101 { hi = mic_rs32_0; } :mtlo mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rt32_5=0 & mic_rs32_0 ; micb_poolax=0b111100 & micb_axf=0b0011110101 { lo = mic_rs32_0; } :mul.PS micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5; micb_fd & micb_fdD & micb_bit10=0 & micb_fmt8=2 & micb_fxf3=0b10110000 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } :mult RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & RT5L & RS0L ; micb_poolax=0b111100 & micb_axf=0b1000101100 { tmps:8 = sext(RS0L); tmpt:8 = sext(RT5L); tmpr:8= tmps * tmpt; lo = sext(tmpr[0,32]); hi = sext(tmpr[32,32]); } :multu RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & RT5L & RS0L ; micb_poolax=0b111100 & micb_axf=0b1001101100 { tmps:8 = zext(RS0L); tmpt:8 = zext(RT5L); tmpr:8= tmps * tmpt; lo = sext(tmpr[0,32]); hi = sext(tmpr[32,32]); } :neg.PS mic_ft_5, mic_fs is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_ft_5 & mic_fs & mic_ftD_5 & mic_fsD ; micb_bit15=0 & micb_fmt=2 & micb_poolfx=0b111011 & micb_flt6=0b0101101 { mic_ftD_5 = mipsFloatPS(mic_fsD); } :pll.PS micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5; micb_fd & micb_fdD & micb_fxf2=0b00010000000 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } :plu.PS micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5; micb_fd & micb_fdD & micb_fxf2=0b00011000000 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } :prefx micb_hint, mic_index(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_index & mic_base0 ; micb_axf2=0b0110100000 & micb_bit10=0 & micb_hint { tmp:$(REGSIZE) = mic_index + mic_base0; prefetch(tmp,micb_hint:1); } :pul.PS micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5; micb_fd & micb_fdD & micb_fxf2=0b00100000000 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } :puu.PS micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5; micb_fd & micb_fdD & micb_fxf2=0b00101000000 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } :rdhwr mic_rt32_5, mic_rs32_hw is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=0 & mic_rt32_5 & mic_rs32_hw ; micb_poolax=0b111100 & micb_axf=0b0110101100 { mic_rt32_5 = getHWRegister(mic_rs32_hw); } :sub.PS micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & mic_ft_5 & mic_fs & mic_fsD & mic_ftD_5; micb_fd & micb_fdD & micb_bit10=0 & micb_fmt8=2 & micb_fxf3=0b01110000 { micb_fdD = mipsFloatPS(mic_fsD, mic_ftD_5); } @if ENDIAN == "big" :swl mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b1000 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpRT:4 = mic_rt32_5:$(SIZETO4); shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = 0; MemSrcCast(valOrig,addr); valOrig = valOrig & (0xffffffff << ((4-shft) * 8)); valStore:4 = (tmpRT >> (shft * 8)) | valOrig; MemDestCast(addr,valStore); } :swle mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b1010 & micb_sub9=0b000 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpRT:4 = mic_rt32_5:$(SIZETO4); shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = 0; MemSrcCast(valOrig,addr); valOrig = valOrig & (0xffffffff << ((4-shft) * 8)); valStore:4 = (tmpRT >> (shft * 8)) | valOrig; MemDestCast(addr,valStore); } :swr mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b1001 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpRT:4 = mic_rt32_5:$(SIZETO4); shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = 0; MemSrcCast(valOrig,addr); valOrig = valOrig & (0xffffffff >> ((shft+1) * 8)); valStore:4 = (tmpRT << ((3-shft)*8)) | valOrig; MemDestCast(addr,valStore); } :swre mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b1010 & micb_sub9=0b001 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpRT:4 = mic_rt32_5:$(SIZETO4); shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = 0; MemSrcCast(valOrig,addr); valOrig = valOrig & (0xffffffff >> ((shft+1) * 8)); valStore:4 = (tmpRT << ((3-shft)*8)) | valOrig; MemDestCast(addr,valStore); } @else :swl mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b1000 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpRT:4 = mic_rt32_5:$(SIZETO4); shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = 0; MemSrcCast(valOrig,addr); valOrig = valOrig & (0xffffffff << ((shft+1) * 8)); valStore:4 = (tmpRT >> ((3-shft) * 8)) | valOrig; MemDestCast(addr,valStore); } :swle mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b1010 & micb_sub9=0b000 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpRT:4 = mic_rt32_5:$(SIZETO4); shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = 0; MemSrcCast(valOrig,addr); valOrig = valOrig & (0xffffffff << ((shft+1) * 8)); valStore:4 = (tmpRT >> ((3-shft) * 8)) | valOrig; MemDestCast(addr,valStore); } :swr mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b1001 & EXT_CODE12 { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpRT:4 = mic_rt32_5:$(SIZETO4); shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = 0; MemSrcCast(valOrig,addr); valOrig = valOrig & (0xffffffff >> ((4-shft) * 8)); valStore:4 = (tmpRT << (shft*8)) | valOrig; MemDestCast(addr,valStore); } :swre mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 ; micb_func12=0b1010 & micb_sub9=0b001 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpRT:4 = mic_rt32_5:$(SIZETO4); shft:$(REGSIZE) = tmp & 0x3; addr:$(REGSIZE) = tmp - shft; valOrig:4 = 0; MemSrcCast(valOrig,addr); valOrig = valOrig & (0xffffffff >> ((4-shft) * 8)); valStore:4 = (tmpRT << (shft*8)) | valOrig; MemDestCast(addr,valStore); } @endif # When the analyzer finds a matching swl/swr pair, the pcode is simplified so that # swl does all the storing while swr is a no-op @if ENDIAN == "big" :swl mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 & PAIR_INSTRUCTION_FLAG=1 ; micb_func12=0b1000 & EXT_CODE12 [ PAIR_INSTRUCTION_FLAG = 1; globalset(inst_next, PAIR_INSTRUCTION_FLAG);] { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = mic_rt32_5:$(SIZETO4); } :swr mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 & PAIR_INSTRUCTION_FLAG=1 ; micb_func12=0b1001 & EXT_CODE12 [ PAIR_INSTRUCTION_FLAG = 0; ] { } @else :swl mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 & PAIR_INSTRUCTION_FLAG=1 ; micb_func12=0b1000 & EXT_CODE12 [ PAIR_INSTRUCTION_FLAG = 1; globalset(inst_next, PAIR_INSTRUCTION_FLAG);] { } :swr mic_rt32_5, EXT_CODE12(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=0 & mic_rt32_5 & mic_base0 & PAIR_INSTRUCTION_FLAG=1 ; micb_func12=0b1001 & EXT_CODE12 [ PAIR_INSTRUCTION_FLAG = 0; ] { tmp:$(REGSIZE) = sext(EXT_CODE12); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = mic_rt32_5:$(SIZETO4); } @endif :sdxc1 micb_fd, mic_index(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_index & mic_base0 ; micb_fd & micb_fdD & micb_fxf2=0b00100001000 { tmp:$(REGSIZE) = mic_index + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:8 tmpa = micb_fdD; } :swxc1 micb_fd, mic_index(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_index & mic_base0 ; micb_fd & micb_fxf2=0b00010001000 { tmp:$(REGSIZE) = mic_index + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = micb_fd:4; } :teqi mic_rs32_0, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_funci=0b01110 & mic_rs32_0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); if (mic_rs32_0 != tmp) goto ; trap(); } :tgei mic_rs32_0, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_funci=0b01001 & mic_rs32_0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); if (tmp s< mic_rs32_0) goto ; trap(); } :tgeiu mic_rs32_0, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_funci=0b01011 & mic_rs32_0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); if (tmp < mic_rs32_0) goto ; trap(); } :tlti mic_rs32_0, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_funci=0b01000 & mic_rs32_0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); if (tmp s>= mic_rs32_0) goto ; trap(); } :tltiu mic_rs32_0, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_funci=0b01010 & mic_rs32_0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); if (tmp >= mic_rs32_0) goto ; trap(); } :tnei mic_rs32_0, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=0 & mic_funci=0b01100 & mic_rs32_0 ; EXT_MS16 { tmp:$(REGSIZE) = sext(EXT_MS16); if (mic_rs32_0 == tmp) goto ; trap(); } @ifdef MIPS64 :ddiv mic_rt32_5, mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=0 & mic_rs32_0 & mic_rt32_5 ; micb_poolax=0b111100 & micb_axf=0b1010101100 { lo = mic_rs32_0 s/ mic_rt32_5; hi = mic_rs32_0 s% mic_rt32_5; } :ddivu mic_rt32_5, mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=0 & mic_rs32_0 & mic_rt32_5 ; micb_poolax=0b111100 & micb_axf=0b1011101100 { lo = mic_rs32_0 / mic_rt32_5; hi = mic_rs32_0 % mic_rt32_5; } :luxc1 micb_fd, mic_index(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_index & mic_base0 ; micb_fd & micb_fxf2=0b00101001000 { tmp:$(REGSIZE) = mic_index + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); ptr:$(ADDRSIZE) = tmpa; micb_fd = *[ram]:8 ptr; } @endif #### # # Release 6 semantics # #### :align micb_rd32, RS0L, RT5L, micb_bp is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & RT5L & RS0L ; micb_poolax=0b011111 & micb_z68=0 & micb_rd32 & micb_bp { tmp:4 = RT5L << (8 * micb_bp); tmp = tmp | (RS0L >> (32 - (8 * micb_bp))); micb_rd32 = sext(tmp); } :aluipc mic_rt32_5, EXT_MS32 is ISA_MODE=1 & RELP=0 & mic_op=0b011110 & REL6=1 & mic_pcf=0b11111 & mic_rt32_5 ; EXT_MS32 { tmp:$(REGSIZE) = sext(EXT_MS32); tmp = tmp + inst_start; tmp = tmp & ~0xFFFF; mic_rt32_5 = tmp; } :aui mic_rt32_5, RS0L, EXT_MS32 is ISA_MODE=1 & RELP=0 & mic_op=0b000100 & REL6=1 & mic_rt32_5 & RS0L ; EXT_MS32 { tmp:4 = RS0L + EXT_MS32; mic_rt32_5 = sext(tmp); } :auipc mic_rt32_5, EXT_MS32 is ISA_MODE=1 & RELP=0 & mic_op=0b011110 & REL6=1 & mic_pcf=0b11110 & mic_rt32_5 ; EXT_MS32 { tmp:$(REGSIZE) = sext(EXT_MS32); tmp = tmp + inst_start; mic_rt32_5 = tmp; } # Check this. Says left shift by 1 bit, but then says 4 byte alligned. BC instruction is simlar though it says left shift by 2 bits. # Either way, there is a mistake in the documentation :balc Rel26_mic is ISA_MODE=1 & RELP=0 & mic_op=0b101101 & REL6=1 ; Rel26_mic { ra = inst_next | 0x1; call Rel26_mic; } :bc Rel26_mic is ISA_MODE=1 & RELP=0 & mic_op=0b100101 & REL6=1 ; Rel26_mic { goto Rel26_mic; } :bc16 Rel10_mic is ISA_MODE=1 & RELP=0 & mic_op=0b110011 & REL6=1 & Rel10_mic { goto Rel10_mic; } :bc1eqzc mic_ft_0, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=1 & mic_funci=0b01000 & mic_ft_0; Rel16_mic { tmp:1 = mic_ft_0[0,8] & 0x01; # Only need to check the LSB if (tmp == 0x00) goto Rel16_mic; } :bc1nezc mic_ft_0, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=1 & mic_funci=0b01001 & mic_ft_0; Rel16_mic { tmp:1 = mic_ft_0[0,8] & 0x01; # Only need to check the LSB if (tmp == 0x01) goto Rel16_mic; } :bc2eqzc mic_ct, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=1 & mic_funci=0b01010 & mic_ct; Rel16_mic { tmp:1 = getCopCondition(2:1, mic_ct:1); if (tmp == 0) goto inst_next; goto Rel16_mic; } :bc2nezc mic_ct, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=1 & mic_funci=0b01011 & mic_ct; Rel16_mic { tmp:1 = getCopCondition(2:1, mic_ct:1); if (tmp == 1) goto inst_next; goto Rel16_mic; } :beqzc16 mic_rs7, Rel7_mic is ISA_MODE=1 & RELP=0 & mic_op=0b100011 & REL6=1 & mic_rs7 & Rel7_mic { if (mic_rs7 == 0) goto Rel7_mic; } :bnezc16 mic_rs7, Rel7_mic is ISA_MODE=1 & RELP=0 & mic_op=0b101011 & REL6=1 & mic_rs7 & Rel7_mic { if (mic_rs7 != 0) goto Rel7_mic; } # Some of the branch instructions have a != in the pattern description in mips documentation. # In order to avoid pattern blowup in sleigh, I use some fake instructions as sinks when possible. # It was not possible to avoid all instances of != in the constructors :bad1 is ISA_MODE=1 & RELP=0 & mic_op=0b110000 & REL6=1 & mic_code=0; Rel16_mic unimpl :blezalc mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b110000 & REL6=1 & mic_rs32_0=0 & mic_rt32_5; Rel16_mic { if (mic_rt32_5 s> 0) goto inst_next; ra = inst_next | 0x1; call Rel16_mic; } :bgezalc mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b110000 & REL6=1 & mic_rt32_5 & mic_rt32_5=mic_rs32_0 & mic_rt32_5a!=0; Rel16_mic { if (mic_rt32_5 s< 0) goto inst_next; ra = inst_next | 0x1; call Rel16_mic; } :bgeuc mic_rs32_0,mic_rt32_5,Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b110000 & REL6=1 & mic_rt32_5 & mic_rs32_0; Rel16_mic { if (mic_rs32_0 >= mic_rt32_5) goto Rel16_mic; } :bad2 is ISA_MODE=1 & RELP=0 & mic_op=0b111000 & REL6=1 & mic_code=0; Rel16_mic unimpl :bgtzalc mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b111000 & REL6=1 & mic_rs32_0=0 & mic_rt32_5; Rel16_mic { if (mic_rt32_5 s<= 0) goto inst_next; ra = inst_next | 0x1; call Rel16_mic; } :bltzalc mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b111000 & REL6=1 & mic_rt32_5 & mic_rt32_5=mic_rs32_0 & mic_rt32_5a!=0; Rel16_mic { if (mic_rt32_5 s>= 0) goto inst_next; ra = inst_next | 0x1; call Rel16_mic; } :bltuc mic_rs32_0,mic_rt32_5,Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b111000 & REL6=1 & mic_rt32_5 & mic_rs32_0; Rel16_mic { if (mic_rs32_0 < mic_rt32_5) goto Rel16_mic; } # for this case, bovc is the catch-all. :beqzalc mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b011101 & REL6=1 & mic_rs32_0=0 & mic_rt32_5 & mic_rt32_5a!=0; Rel16_mic { if (mic_rt32_5 != 0) goto inst_next; ra = inst_next | 0x1; call Rel16_mic; } :beqc mic_rs32_0, mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b011101 & REL6=1 & mic_rt32_5 & mic_rs32_0 & mic_rs32_0a!=0 & mic_rs32_0b 0x7FFFFFFF) || (tmpS s< -2147483648); @if REGSIZE == "8" tmpF = tmpF || (mic_rt32_5 s> 0x7FFFFFFF) || (mic_rt32_5 s< -2147483648) || (mic_rs32_0 s> 0x7FFFFFFF) || (mic_rs32_0 s< -2147483648); @endif if (tmpF == 1) goto Rel16_mic; } # for this case, bnvc is the catch-all. :bnezalc mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b011111 & REL6=1 & mic_rs32_0=0 & mic_rt32_5 & mic_rt32_5a!=0; Rel16_mic { if (mic_rt32_5 == 0) goto inst_next; ra = inst_next | 0x1; call Rel16_mic; } :bnec mic_rs32_0, mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b011111 & REL6=1 & mic_rt32_5 & mic_rs32_0 & mic_rs32_0a!=0 & mic_rs32_0b 0x7FFFFFFF) || (tmpS s< -2147483648); @if FREGSIZE == "8" tmpF = tmpF || (mic_rt32_5 s> 0x7FFFFFFF) || (mic_rt32_5 s< -2147483648) || (mic_rs32_0 s> 0x7FFFFFFF) || (mic_rs32_0 s< -2147483648); @endif if (tmpF == 0) goto Rel16_mic; } :bad3 is ISA_MODE=1 & RELP=0 & mic_op=0b111001 & REL6=1 & mic_code=0; Rel16_mic unimpl :blezc mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b111001 & REL6=1 & mic_rs32_0=0 & mic_rt32_5; Rel16_mic { if (mic_rt32_5 s<= 0) goto Rel16_mic; } :bgezc mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b111001 & REL6=1 & mic_rt32_5 & mic_rt32_5=mic_rs32_0 & mic_rt32_5a!=0; Rel16_mic { if (mic_rt32_5 s>= 0) goto Rel16_mic; } :bgec mic_rs32_0,mic_rt32_5,Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b111001 & REL6=1 & mic_rt32_5 & mic_rs32_0; Rel16_mic { if (mic_rs32_0 s>= mic_rt32_5) goto Rel16_mic; } :bad4 is ISA_MODE=1 & RELP=0 & mic_op=0b110001 & REL6=1 & mic_code=0; Rel16_mic unimpl :bgtzc mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b110001 & REL6=1 & mic_rs32_0=0 & mic_rt32_5; Rel16_mic { if (mic_rt32_5 s> 0) goto Rel16_mic; } :bltzc mic_rt32_5, Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b110001 & REL6=1 & mic_rt32_5 & mic_rt32_5=mic_rs32_0 & mic_rt32_5a!=0; Rel16_mic { if (mic_rt32_5 s< 0) goto Rel16_mic; } :bltc mic_rs32_0,mic_rt32_5,Rel16_mic is ISA_MODE=1 & RELP=0 & mic_op=0b110001 & REL6=1 & mic_rt32_5 & mic_rs32_0; Rel16_mic { if (mic_rs32_0 s< mic_rt32_5) goto Rel16_mic; } :beqzc mic_rs32_5, Rel21_mic is ISA_MODE=1 & RELP=0 & mic_op=0b100000 & REL6=1 & mic_rs32_5 & mic_imm5; Rel21_mic [ext_32_imm5 = mic_imm5;] { if (mic_rs32_5 == 0) goto Rel21_mic; } :bad6 is ISA_MODE=1 & RELP=0 & mic_op=0b10100 & REL6=1 & mic_rs32_5=0; Rel21_mic unimpl :bnezc mic_rs32_5, Rel21_mic is ISA_MODE=1 & RELP=0 & mic_op=0b10100 & REL6=1 & mic_rs32_5 & mic_imm5; Rel21_mic [ext_32_imm5 = mic_imm5;] { if (mic_rs32_5 != 0) goto Rel21_mic; } :bitswap mic_rd32_0, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & RT5L & mic_rd32_0 ; micb_poolax=0b111100 & micb_axf3=0b101100 & micb_z12=0 { tmp:4 = bitSwap(RT5L); mic_rd32_0 = sext(tmp); } :cache mic_cop5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b001000 & REL6=1 & mic_cop5 & mic_base0 ; micb_func12=0b0110 & micb_sub9=0 & EXT_CODE9E { cacheOp(); } :class.S mic_fd, mic_fs_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs_5 & mic_fd ; micb_size=0 & micb_fmt9=0 & micb_fxf5=0b001100000 { tmp_fs:4 = mic_fs_5:4; # Get just the 4 byte single floating point value tmp_exponent:4 = zext(tmp_fs[23,8]); tmp_fraction:4 = zext(tmp_fs[0,23]); tmp_sign:4 = zext(tmp_fs[31,1]); tmp_b1:4 = zext(tmp_fs[22,1]); # High order bit of fraction, used for NaN tmp_SNaN:4 = zext((tmp_exponent == 0x0ff) && (tmp_fraction != 0x0) && (tmp_b1 == 0x0)); tmp_QNaN:4 = zext((tmp_exponent == 0x0ff) && (tmp_fraction != 0x0) && (tmp_b1 == 0x01)); tmp_Neg_Infinity:4 = zext((tmp_sign == 0x01) && (tmp_exponent == 0x0ff) && (tmp_fraction == 0x0)); tmp_Neg_Normal:4 = zext((tmp_sign == 0x01) && (tmp_exponent != 0x0) && (tmp_exponent != 0x0ff)); tmp_Neg_Subnormal:4 = zext((tmp_sign == 0x01) && (tmp_exponent == 0x0) && (tmp_fraction != 0x0)); tmp_Neg_Zero:4 = zext((tmp_sign == 0x01) && (tmp_exponent == 0x0) && (tmp_fraction == 0x0)); tmp_Pos_Infinity:4 = zext((tmp_sign == 0x0) && (tmp_exponent == 0x0ff) && (tmp_fraction == 0x0)); tmp_Pos_Normal:4 = zext((tmp_sign == 0x0) && (tmp_exponent != 0x0) && (tmp_exponent != 0x0ff)); tmp_Pos_Subnormal:4 = zext((tmp_sign == 0x0) && (tmp_exponent == 0x0) && (tmp_fraction != 0x0)); tmp_Pos_Zero:4 = zext((tmp_sign == 0x0) && (tmp_exponent == 0x0) && (tmp_fraction == 0x0)); tmp_fd:4 = 0; tmp_fd = tmp_SNaN | (tmp_QNaN << 1) | (tmp_Neg_Infinity << 2) | (tmp_Neg_Normal << 3) | (tmp_Neg_Subnormal << 4) | (tmp_Neg_Zero << 5) | (tmp_Pos_Infinity << 6) | (tmp_Pos_Normal << 7) | (tmp_Pos_Subnormal << 8) | (tmp_Pos_Zero << 9); mic_fd = zext(tmp_fd); } :class.D mic_fd, mic_fs_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs_5 & mic_fd & mic_fsD_5 & mic_fdD ; micb_size=0 & micb_fmt9=1 & micb_fxf5=0b001100000 { tmp_fs:8 = mic_fsD_5; tmp_sign:4 = zext(tmp_fs[63,1]); tmp_exponent:4 = zext(tmp_fs[52,11]); tmp_fraction:8 = zext(tmp_fs[0,51]); tmp_b1:4 = zext(tmp_fs[51,1]); # High order bit of fraction, used for NaN tmp_SNaN:4 = zext((tmp_exponent == 0x07ff) && (tmp_fraction != 0x0) && (tmp_b1 == 0x0)); tmp_QNaN:4 = zext((tmp_exponent == 0x07ff) && (tmp_fraction != 0x0) && (tmp_b1 == 0x01)); tmp_Neg_Infinity:4 = zext((tmp_sign == 0x01) && (tmp_exponent == 0x07ff) && (tmp_fraction == 0x0)); tmp_Neg_Normal:4 = zext((tmp_sign == 0x01) && (tmp_exponent != 0x0) && (tmp_exponent != 0x07ff)); tmp_Neg_Subnormal:4 = zext((tmp_sign == 0x01) && (tmp_exponent == 0x0) && (tmp_fraction != 0x0)); tmp_Neg_Zero:4 = zext((tmp_sign == 0x01) && (tmp_exponent == 0x0) && (tmp_fraction == 0x0)); tmp_Pos_Infinity:4 = zext((tmp_sign == 0x0) && (tmp_exponent == 0x07ff) && (tmp_fraction == 0x0)); tmp_Pos_Normal:4 = zext((tmp_sign == 0x0) && (tmp_exponent != 0x0) && (tmp_exponent != 0x07ff)); tmp_Pos_Subnormal:4 = zext((tmp_sign == 0x0) && (tmp_exponent == 0x0) && (tmp_fraction != 0x0)); tmp_Pos_Zero:4 = zext((tmp_sign == 0x0) && (tmp_exponent == 0x0) && (tmp_fraction == 0x0)); tmp_fd:4 = 0; tmp_fd = tmp_SNaN | (tmp_QNaN << 1) | (tmp_Neg_Infinity << 2) | (tmp_Neg_Normal << 3) | (tmp_Neg_Subnormal << 4) | (tmp_Neg_Zero << 5) | (tmp_Pos_Infinity << 6) | (tmp_Pos_Normal << 7) | (tmp_Pos_Subnormal << 8) | (tmp_Pos_Zero << 9); mic_fdD = zext(tmp_fd); } #:cmp.condn.fmt :cmp.af.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x00 & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN micb_fd = 0x0; } :cmp.af.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x00 & micb_poolfx=0b010101 { trapIfSNaN(mic_fsD, mic_ftD_5); micb_fdD = 0x0; } :cmp.un.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x01 & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN micb_fd = sext((nan(mic_fs:4) || nan(mic_ft_5:4)) * 0xff); } :cmp.un.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5; micb_fd & micb_fdD & micb_cond2=0x01 & micb_poolfx=0b010101 { trapIfSNaN(mic_fsD, mic_ftD_5); micb_fdD = sext((nan(mic_fsD) || nan(mic_ftD_5)) * 0xff); } :cmp.eq.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x02 & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN micb_fd = sext((mic_fs:4 f== mic_ft_5:4) * 0xff); } :cmp.eq.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x02 & micb_poolfx=0b010101 { trapIfSNaN(mic_fsD, mic_ftD_5); micb_fdD = sext((mic_fsD f== mic_ftD_5) * 0xff); } :cmp.ueq.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x03 & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN micb_fd = sext( ( nan(mic_fs:4) || nan(mic_ft_5:4) || (mic_fs:4 f== mic_ft_5:4) ) * 0xff); } :cmp.ueq.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x03 & micb_poolfx=0b010101 { trapIfSNaN(mic_fsD, mic_ftD_5); micb_fdD = sext( ( nan(mic_fsD) || nan(mic_ftD_5) || (mic_fsD f== mic_ftD_5) ) * 0xff); } :cmp.lt.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x04 & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN micb_fd = sext((mic_fs:4 f< mic_ft_5:4) * 0xff); } :cmp.lt.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x04 & micb_poolfx=0b010101 { trapIfSNaN(mic_fsD, mic_ftD_5); micb_fdD = sext((mic_fsD f< mic_ftD_5) * 0xff); } :cmp.ult.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x05 & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN micb_fd = sext( ( nan(mic_fs:4) || nan(mic_ft_5:4) || (mic_fs:4 f< mic_ft_5:4) ) * 0xff); } :cmp.ult.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x05 & micb_poolfx=0b010101 { trapIfSNaN(mic_fsD, mic_ftD_5); micb_fdD = sext( ( nan(mic_fsD) || nan(mic_ftD_5) || (mic_fsD f< mic_ftD_5) ) * 0xff); } :cmp.le.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x06 & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN micb_fd = sext((mic_fs:4 f<= mic_ft_5:4) * 0xff); } :cmp.le.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x06 & micb_poolfx=0b010101 { trapIfSNaN(mic_fsD, mic_ftD_5); micb_fdD = sext((mic_fsD f<= mic_ftD_5) * 0xff); } :cmp.ule.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x07 & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); # Trap if either operand is a Signaling NaN micb_fd = sext( ( nan(mic_fs:4) || nan(mic_ft_5:4) || (mic_fs:4 f<= mic_ft_5:4) ) * 0xff); } :cmp.ule.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x07 & micb_poolfx=0b010101 { trapIfSNaN(mic_fsD, mic_ftD_5); micb_fdD = sext( ( nan(mic_fsD) || nan(mic_ftD_5) || (mic_fsD f<= mic_ftD_5) ) * 0xff); } :cmp.saf.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x08 & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); micb_fd = 0x0; } :cmp.saf.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x08 & micb_poolfx=0b010101 { trapIfNaN(mic_fsD, mic_ftD_5); micb_fdD = 0x0; } :cmp.sun.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x09 & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); micb_fd = sext((nan(mic_fs:4) || nan(mic_ft_5:4)) * 0xff); } :cmp.sun.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x09 & micb_poolfx=0b010101 { trapIfNaN(mic_fsD, mic_ftD_5); micb_fdD = sext((nan(mic_fsD) || nan(mic_ftD_5)) * 0xff); } :cmp.seq.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x0A & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); micb_fd = sext((mic_fs:4 f== mic_ft_5:4) * 0xff); } :cmp.seq.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x0A & micb_poolfx=0b010101 { trapIfNaN(mic_fsD, mic_ftD_5); micb_fdD = sext((mic_fsD f== mic_ftD_5) * 0xff); } :cmp.sueq.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x0B & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); micb_fd = sext( ( nan(mic_fs:4) || nan(mic_ft_5:4) || (mic_fs:4 f== mic_ft_5:4) ) * 0xff); } :cmp.sueq.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x0B & micb_poolfx=0b010101 { trapIfNaN(mic_fsD, mic_ftD_5); micb_fdD = sext( ( nan(mic_fsD) || nan(mic_ftD_5) || (mic_fsD f== mic_ftD_5) ) * 0xff); } :cmp.slt.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x0C & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); micb_fd = sext( (mic_fs:4 f< mic_ft_5:4) * 0xff); } :cmp.slt.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x0C & micb_poolfx=0b010101 { trapIfNaN(mic_fsD, mic_ftD_5); micb_fdD = sext( (mic_fsD f< mic_ftD_5) * 0xff); } :cmp.sult.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x0D & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); micb_fd = sext( ( nan(mic_fs:4) || nan(mic_ft_5:4) || (mic_fs:4 f< mic_ft_5:4) ) * 0xff); } :cmp.sult.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x0D & micb_poolfx=0b010101 { trapIfNaN(mic_fsD, mic_ftD_5); micb_fdD = sext( ( nan(mic_fsD) || nan(mic_ftD_5) || (mic_fsD f< mic_ftD_5) ) * 0xff); } :cmp.sle.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x0E & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); micb_fd = sext( (mic_fs:4 f<= mic_ft_5:4) * 0xff); } :cmp.sle.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x0E & micb_poolfx=0b010101 { trapIfNaN(mic_fsD, mic_ftD_5); micb_fdD = sext( (mic_fsD f<= mic_ftD_5) * 0xff); } :cmp.sule.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x0F & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); micb_fd = sext( ( nan(mic_fs:4) || nan(mic_ft_5:4) || (mic_fs:4 f<= mic_ft_5:4) ) * 0xff); } :cmp.sule.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x0F & micb_poolfx=0b010101 { trapIfNaN(mic_fsD, mic_ftD_5); micb_fdD = sext( ( nan(mic_fsD) || nan(mic_ftD_5) || (mic_fsD f<= mic_ftD_5) ) * 0xff); } :cmp.or.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x11 & micb_poolfx=0b000101 { trapIfSNaN(mic_fs:4, mic_ft_5:4); micb_fd = sext( (!(nan(mic_fs:4) || nan(mic_ft_5:4))) * 0xff); # The negated predicate of "c.un" } :cmp.or.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x11 & micb_poolfx=0b010101 { trapIfSNaN(mic_fsD, mic_ftD_5); micb_fdD = sext( (!(nan(mic_fsD) || nan(mic_ftD_5))) * 0xff); } :cmp.une.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x12 & micb_poolfx=0b000101 { # The negated predicate of cmp.eq trapIfSNaN(mic_fs:4, mic_ft_5:4); micb_fd = sext((mic_fs:4 f!= mic_ft_5:4) * 0xff); } :cmp.une.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x12 & micb_poolfx=0b010101 { trapIfSNaN(mic_fsD, mic_ftD_5); micb_fdD = sext((mic_fsD f!= mic_ftD_5) * 0xff); } :cmp.ne.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x13 & micb_poolfx=0b000101 { # The negated predicate of cmp.ueq trapIfSNaN(mic_fs:4, mic_ft_5:4); micb_fd = sext( (!( ( nan(mic_fs:4) || nan(mic_ft_5:4) || (mic_fs:4 f== mic_ft_5:4) ))) * 0xff); } :cmp.ne.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x13 & micb_poolfx=0b010101 { trapIfSNaN(mic_fsD, mic_ftD_5); micb_fdD = sext( (!( ( nan(mic_fsD) || nan(mic_ftD_5) || (mic_fsD f== mic_ftD_5) ))) * 0xff); } :cmp.sor.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x19 & micb_poolfx=0b000101 { trapIfNaN(mic_fs:4, mic_ft_5:4); micb_fd = sext( (!(nan(mic_fs:4) || nan(mic_ft_5:4))) * 0xff); # negate of cmp.sun } :cmp.sor.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x19 & micb_poolfx=0b010101 { trapIfNaN(mic_fsD, mic_ftD_5); micb_fdD = sext( (!(nan(mic_fsD) || nan(mic_ftD_5))) * 0xff); } :cmp.sune.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x1A & micb_poolfx=0b000101 { trapIfNaN(mic_fs:4, mic_ft_5:4); micb_fd = sext((mic_fs:4 f!= mic_ft_5:4) * 0xff); } :cmp.sune.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x1A & micb_poolfx=0b010101 { trapIfNaN(mic_fsD, mic_ftD_5); micb_fdD = sext((mic_fsD f!= mic_ftD_5) * 0xff); } :cmp.sne.S micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_cond2=0x1B & micb_poolfx=0b000101 { trapIfNaN(mic_fs:4, mic_ft_5:4); # negate of cmp.sueq micb_fd = sext( (! ( nan(mic_fs:4) || nan(mic_ft_5:4) || (mic_fs:4 f== mic_ft_5:4) )) * 0xff); } :cmp.sne.D micb_fd, mic_fs, mic_ft_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_cond2=0x1B & micb_poolfx=0b010101 { trapIfNaN(mic_fsD, mic_ftD_5); micb_fdD = sext( (! ( nan(mic_fsD) || nan(mic_ftD_5) || (mic_fsD f== mic_ftD_5) )) * 0xff); } :div micb_rd32, RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & RS0L & RT5L ; micb_axf2=0b0100011000 & micb_bit10=0 & micb_rd32 { tmp:4 = RS0L s/ RT5L; micb_rd32 = sext(tmp); } :divu micb_rd32, RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & RS0L & RT5L ; micb_axf2=0b0110011000 & micb_bit10=0 & micb_rd32 { tmp:4 = RS0L / RT5L; micb_rd32 = sext(tmp); } :dvp STYPE is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_funci=0 & STYPE ; micb_poolax=0b111100 & micb_axf=0b0001100101 { disableProcessor(STYPE); } :evp STYPE is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & mic_funci=0 & STYPE ; micb_poolax=0b111100 & micb_axf=0b0011100101 { enableProcessor(STYPE); } :jalrc RTIMP^mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & mic_rt32_5 & mic_rs32_0 & RTIMP ; micb_axf=0b0000111100 & micb_poolax=0b111100 { JXWritePC(mic_rs32_0); mic_rt32_5 = inst_next | 0x1; call [pc]; } :jalrc.hb RTIMP^mic_rs32_0 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & mic_rt32_5 & mic_rs32_0 & RTIMP ; micb_axf=0b0001111100 & micb_poolax=0b111100 { JXWritePC(mic_rs32_0); mic_rt32_5 = inst_next | 0x1; call [pc]; } :jalrc16 mic_rs32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=1 & mic_jalrr6=0b01110 & mic_rs32_5 { JXWritePC(mic_rs32_5); ra = inst_next | 0x1; call [pc]; } :jialc mic_rt32_0, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b101000 & REL6=1 & mic_index=0 & mic_rt32_0 ; EXT_MS16 { tmp:$(REGSIZE) = mic_rt32_0 + sext(EXT_MS16); JXWritePC(tmp); ra = inst_next | 0x1; goto [pc]; } :jic mic_rt32_0, EXT_MS16 is ISA_MODE=1 & RELP=0 & mic_op=0b100000 & REL6=1 & mic_index=0 & mic_rt32_0 ; EXT_MS16 { tmp:$(REGSIZE) = mic_rt32_0 + sext(EXT_MS16); JXWritePC(tmp); goto [pc]; } :jrcaddiusp EXT_CODE5R6 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=1 & mic_jalrr6=0b10011 & EXT_CODE5R6 { tmp:$(REGSIZE) = zext(EXT_CODE5R6); sp = sp + tmp; JXWritePC(ra); goto [pc]; } :jrc16 mic_rs32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=1 & mic_jalrr6=0b01100 & mic_rs32_5 { JXWritePC(mic_rs32_5); goto [pc]; } :jrc16 ra is ISA_MODE=1 & RELP=0 & mic_op=0b010001 & REL6=1 & mic_jalrr6=0b01100 & mic_rs32_5=31 & ra { JXWritePC(ra); return [pc]; } :llx mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=1 & mic_rt32_5 & mic_base0 ; micb_func12=0b0001 & micb_sub9=0b000 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext(*[ram]:4 tmpa); lockload(tmp); } :llxe mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=1 & mic_rt32_5 & mic_base0 ; micb_func12=0b0110 & micb_sub9=0b010 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext(*[ram]:4 tmpa); lockload(tmp); } :lsa micb_rd32, RS0L, RT5L, EXT_SA9 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & RS0L & RT5L ; micb_poolax=0b001111 & micb_asel=0b000 & EXT_SA9 & micb_rd32 { tmp:4 = (RS0L << EXT_SA9) + RT5L; micb_rd32 = sext(tmp); } :lwpc mic_rt32_5, EXT_MS19 is ISA_MODE=1 & RELP=0 & mic_op=0b011110 & REL6=1 & mic_pcz=0b01 & mic_rt32_5 & mic_imm02 ; EXT_MS19 [ ext_32_imm3=mic_imm02; ] { tmp:$(REGSIZE) = inst_start + sext(EXT_MS19); tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); tmpl:4 = *[ram]:4 tmpa; mic_rt32_5 = sext(tmpl); } :lwupc mic_rt32_5, EXT_MS19 is ISA_MODE=1 & RELP=0 & mic_op=0b011110 & REL6=1 & mic_pcz=0b10 & mic_rt32_5 & mic_imm02 ; EXT_MS19 [ ext_32_imm3=mic_imm02; ] { tmp:$(REGSIZE) = inst_start + sext(EXT_MS19); tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); tmpl:4 = *[ram]:4 tmpa; mic_rt32_5 = zext(tmpl); } :maddf.S mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_fmt9=0 & micb_fxf5=0b110111000 { tmp:4 = micb_fd:4 f+ (mic_fs:4 f* mic_ft_5:4); micb_fd = zext(tmp); } :maddf.D mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_fmt9=1 & micb_fxf5=0b110111000 { micb_fdD = micb_fdD f+ (mic_fsD f* mic_ftD_5); } :max.S mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_fmt9=0 & micb_fxf5=0b000001011 { # set floating point fd to the max of fs and ft, TBD special case for NaN tmp_cond:1 = mic_fs:4 f> mic_ft_5:4; micb_fd = zext( (mic_fs:4 * zext(tmp_cond == 1)) | (mic_ft_5:4 * zext(tmp_cond == 0) ) ); } :max.D mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_fmt9=1 & micb_fxf5=0b000001011 { tmp_cond:1 = mic_fsD f> mic_ftD_5; micb_fdD = zext( (mic_fsD * zext(tmp_cond == 1)) | (mic_ftD_5 * zext(tmp_cond == 0) ) ); } :maxa.S mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_fmt9=0 & micb_fxf5=0b000101011 { # set floating point fd to the max of absolute values of fs and ft, TBD special case for NaN tmp_cond:1 = abs(mic_fs:4) f> abs(mic_ft_5:4); micb_fd = zext( (mic_fs:4 * zext(tmp_cond == 1)) | (mic_ft_5:4 * zext(tmp_cond == 0) ) ); } :maxa.D mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_fmt9=1 & micb_fxf5=0b000101011 { tmp_cond:1 = abs(mic_fsD) f> abs(mic_ftD_5); micb_fdD = zext( (mic_fsD * zext(tmp_cond == 1)) | (mic_ftD_5 * zext(tmp_cond == 0) ) ); } :min.S mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_fmt9=0 & micb_fxf5=0b000000011 { # set floating point fd to the min of fs and ft, TBD special case for NaN tmp_cond:1 = mic_fs:4 f< mic_ft_5:4; micb_fd = zext( (mic_fs:4 * zext(tmp_cond == 1)) | (mic_ft_5:4 * zext(tmp_cond == 0) ) ); } :min.D mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_fmt9=1 & micb_fxf5=0b000000011 { tmp_cond:1 = mic_fsD f< mic_ftD_5; micb_fdD = zext( (mic_fsD * zext(tmp_cond == 1)) | (mic_ftD_5 * zext(tmp_cond == 0) ) ); } :mina.S mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_fmt9=0 & micb_fxf5=0b000100011 { # set floating point fd to the min of absolute values of fs and ft, TBD special case for NaN tmp_cond:1 = abs(mic_fs:4) f< abs(mic_ft_5:4); micb_fd = zext( (mic_fs:4 * zext(tmp_cond == 1)) | (mic_ft_5:4 * zext(tmp_cond == 0) ) ); } :mina.D mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_fmt9=1 & micb_fxf5=0b000100011 { tmp_cond:1 = abs(mic_fsD) f< abs(mic_ftD_5); micb_fdD = zext( (mic_fsD * zext(tmp_cond == 1)) | (mic_ftD_5 * zext(tmp_cond == 0) ) ); } :mod micb_rd32, RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & RS0L & RT5L ; micb_axf2=0b0101011000 & micb_bit10=0 & micb_rd32 { tmp:4 = RS0L s% RT5L; micb_rd32 = sext(tmp); } :modu micb_rd32, RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & RS0L & RT5L ; micb_axf2=0b0111011000 & micb_bit10=0 & micb_rd32 { tmp:4 = RS0L % RT5L; micb_rd32 = sext(tmp); } :msubf.S mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_fmt9=0 & micb_fxf5=0b111111000 { # set floating point fd = fd - fs * ft, using 32-bit floating values tmp:4 = micb_fd:4 f- (mic_fs:4 f* mic_ft_5:4); micb_fd = zext(tmp); } :msubf.D mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_fmt9=1 & micb_fxf5=0b111111000 { micb_fdD = micb_fdD f- (mic_fsD f* mic_ftD_5); } :muh micb_rd32, RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & RS0L & RT5L ; micb_axf2=0b0001011000 & micb_bit10=0 & micb_rd32 { tmpS:8 = sext(RS0L); tmpT:8 = sext(RT5L); tmpS = tmpS * tmpT; tmp:4 = tmpS[32,32]; micb_rd32 = sext(tmp); } :mulu micb_rd32, RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & RS0L & RT5L ; micb_axf2=0b0010011000 & micb_bit10=0 & micb_rd32 { tmpS:8 = zext(RS0L); tmpT:8 = zext(RT5L); tmpS = tmpS * tmpT; tmp:4 = tmpS[0,32]; micb_rd32 = sext(tmp); } :muhu micb_rd32, RS0L, RT5L is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & RS0L & RT5L ; micb_axf2=0b0011011000 & micb_bit10=0 & micb_rd32 { tmpS:8 = zext(RS0L); tmpT:8 = zext(RT5L); tmpS = tmpS * tmpT; tmp:4 = tmpS[32,32]; micb_rd32 = sext(tmp); } #sel only valid for PerfCtr :rdhwr mic_rt32_5, mic_rs32_hw, micb_sel is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & mic_rt32_5 & mic_rs32_hw & mic_rs32_0=4; micb_sel & micb_z14=0 & micb_bit10=0 & micb_axf2=0b0111000000 { mic_rt32_5 = getHWRegister(mic_rs32_hw, micb_sel:1); } :rdhwr mic_rt32_5, mic_rs32_hw is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & mic_rt32_5 & mic_rs32_hw & mic_rs32_0!=4; micb_sel=0 & micb_z14=0 & micb_bit10=0 & micb_axf2=0b0111000000 { mic_rt32_5 = getHWRegister(mic_rs32_hw); } :rint.S mic_fd, mic_fs_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs_5 & mic_fd ; micb_fd=0 & micb_fmt9=0 & micb_fxf5=0b000100000 { # floating point round to integral floating point rm_tmp:1 = fcsr[0,2]; # Get RM rounding mode bits fs_tmp:4 = mic_fs_5:4; fs_cvt_tmp:4 = 0; if (rm_tmp == 0) goto ; fs_cvt_tmp = floor(fs_tmp); # RM is 1, no rounding, and floor returns a float goto ; fs_cvt_tmp = round(fs_tmp); # round returns a float mic_fd = zext(fs_cvt_tmp); } :rint.D mic_fd, mic_fs_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs_5 & mic_fd & mic_fsD_5 & mic_fdD; micb_fd=0 & micb_fmt9=1 & micb_fxf5=0b000100000 { # floating point round to integral floating point rm_tmp:1 = fcsr[0,2]; # Get RM rounding mode bits if (rm_tmp == 0) goto ; mic_fdD = floor(mic_fsD_5); # RM is 1, no rounding, and floor returns a float goto ; mic_fdD = round(mic_fsD_5); # round returns a float } :scx RT5L, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=1 & RT5L & mic_base0 ; micb_func12=0b1001 & micb_sub9=0b000 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; lockwrite(tmp); tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = RT5L; RT5L = 1; } :scxe RT5L, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=1 & RT5L & mic_base0 ; micb_func12=0b1010 & micb_sub9=0b000 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:4 tmpa = RT5L; lockwrite(tmp); } :seleqz micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0101000000 & micb_bit10=0 & micb_rd32 { # We use tmp to cover case where rs and rd are the same reg tmps:$(REGSIZE) = mic_rs32_0; tmpt:$(REGSIZE) = mic_rt32_5; micb_rd32 = 0; if (tmpt != 0) goto inst_next; micb_rd32 = tmps; } :selnez micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0110000000 & micb_bit10=0 & micb_rd32 { # We use tmp to cover case where rs and rd are the same reg tmps:$(REGSIZE) = mic_rs32_0; tmpt:$(REGSIZE) = mic_rt32_5; micb_rd32 = 0; if (tmpt == 0) goto inst_next; micb_rd32 = tmps; } :sel.S mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_ft_5 ; micb_fd & micb_fmt9=0 & micb_fxf5=0b010111000 { tmp:1 = (micb_fd[0,1] == 0x01); micb_fd = (zext(tmp) * mic_ft_5) | (zext(tmp == 0x0) * mic_fs); } :sel.D mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=0 & mic_fs & mic_fsD & mic_ft_5 & mic_ftD_5; micb_fd & micb_fdD & micb_fmt9=1 & micb_fxf5=0b010111000 { tmp:1 = (micb_fdD[0,1] == 0x01); micb_fdD = (zext(tmp) * mic_ftD_5) | (zext(tmp == 0x0) * mic_fsD); } :seleqz.S mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_fmt9=0 & micb_fxf5=0b000111000 { micb_fd = zext(mic_fs * zext(mic_ft_5[0,1] == 0)); } :seleqz.D mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5; micb_fd & micb_fdD & micb_fmt9=1 & micb_fxf5=0b000111000 { micb_fdD = zext(mic_fsD * zext(mic_ftD_5[0,1] == 0)); } :selnez.S mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 ; micb_fd & micb_fmt9=0 & micb_fxf5=0b001111000 { micb_fd = zext(mic_fs * zext(mic_ft_5[0,1] == 1)); } :selnez.D mic_ft_5, mic_fs, micb_fd is ISA_MODE=1 & RELP=0 & mic_op=0b010101 & REL6=1 & mic_fs & mic_ft_5 & mic_fsD & mic_ftD_5 ; micb_fd & micb_fdD & micb_fmt9=1 & micb_fxf5=0b001111000 { micb_fdD = zext(mic_fsD * zext(mic_ftD_5[0,1] == 1)); } :sigrie EXT_CODE16 is ISA_MODE=1 & RELP=0 & mic_op=0b000000 & REL6=1 & mic_code4r6=0 & mic_imm6r6; micb_poolax=0b111111 & EXT_CODE16 [ ext_32_imm6 = mic_imm6r6; ] { signalReservedInstruction(EXT_CODE16); } @ifdef MIPS64 :dalign micb_rd32, mic_rs32_0, mic_rt32_5, micb_bp8 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rt32_5 & mic_rs32_0 ; micb_poolax=0b011100 & micb_z67=0 & micb_rd32 & micb_bp8 { tmp:8 = mic_rt32_5 << (8 * micb_bp8); micb_rd32 = tmp | (mic_rs32_0 >> (64 - (8 * micb_bp8))); } :dahi mic_rs32_0, EXT_MS48 is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=1 & mic_funci=0b10001 & mic_rs32_0 ; EXT_MS48 { mic_rs32_0 = mic_rs32_0 + EXT_MS48; } :dati mic_rs32_0, EXT_MS64 is ISA_MODE=1 & RELP=0 & mic_op=0b010000 & REL6=1 & mic_funci=0b10000 & mic_rs32_0 ; EXT_MS64 { mic_rs32_0 = mic_rs32_0 + EXT_MS64; } :daui mic_rt32_5, mic_rs32_0, EXT_MS32 is ISA_MODE=1 & RELP=0 & mic_op=0b111100 & REL6=1 & mic_rt32_5 & mic_rs32_0 ; EXT_MS32 { mic_rt32_5 = mic_rs32_0 + sext(EXT_MS32); } :dbitswap mic_rd32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rt32_5 & mic_rd32_0 ; micb_poolax=0b111100 & micb_axf3=0b101100 & micb_z12=0 { mic_rd32_0 = bitSwap(mic_rt32_5); } :ddiv micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0100011000 & micb_bit10=0 & micb_rd32 { micb_rd32 = mic_rs32_0 s/ mic_rt32_5; } :ddivu micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0110011000 & micb_bit10=0 & micb_rd32 { micb_rd32 = mic_rs32_0 / mic_rt32_5; } :dlsa micb_rd32, mic_rs32_0, mic_rt32_5, EXT_SA9 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rs32_0 & mic_rt32_5 ; micb_poolax=0b000100 & micb_asel=0b100 & EXT_SA9 & micb_rd32 { micb_rd32 = (mic_rs32_0 << EXT_SA9) + mic_rt32_5; } :dmod micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0101011000 & micb_bit10=0 & micb_rd32 { micb_rd32 = mic_rs32_0 s% mic_rt32_5; } :dmodu micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0111011000 & micb_bit10=0 & micb_rd32 { micb_rd32 = mic_rs32_0 % mic_rt32_5; } :dmul micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0000011000 & micb_bit10=0 & micb_rd32 { micb_rd32 = mic_rs32_0 * mic_rt32_5; } :dmuh micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0001011000 & micb_bit10=0 & micb_rd32 { tmp:16 = sext(mic_rs32_0) * sext(mic_rt32_5); micb_rd32 = tmp(8); } :dmulu micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0010011000 & micb_bit10=0 & micb_rd32 { micb_rd32 = mic_rs32_0 * mic_rt32_5; } :dmuhu micb_rd32, mic_rs32_0, mic_rt32_5 is ISA_MODE=1 & RELP=0 & mic_op=0b010110 & REL6=1 & mic_rs32_0 & mic_rt32_5 ; micb_axf2=0b0011011000 & micb_bit10=0 & micb_rd32 { tmp:16 = zext(mic_rs32_0) * zext(mic_rt32_5); micb_rd32 = tmp(8); } :ldpc mic_rt32_5, EXT_MS18 is ISA_MODE=1 & RELP=0 & mic_op=0b011110 & REL6=1 & mic_pcf2=0b110 & mic_rt32_5 & mic_imm01 ; EXT_MS18 [ ext_32_imm2=mic_imm01; ] { tmp:8 = inst_start + sext(EXT_MS18); tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = *[ram]:8 tmpa; } :lldx mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=1 & mic_rt32_5 & mic_base0 ; micb_func12=0b0101 & micb_sub9=0b000 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); mic_rt32_5 = sext(*[ram]:4 tmpa); lockload(tmp); } # The documentation is partly wrong for this instruction and generates a conflict (same pattern) as lldx instruction. The op-code map section of the document # does have the difference between this and the lldx so I went with that. :scdx mic_rt32_5, EXT_CODE9E(mic_base0) is ISA_MODE=1 & RELP=0 & mic_op=0b011000 & REL6=1 & mic_rt32_5 & mic_base0 ; micb_func12=0b1101 & micb_sub9=0b000 & EXT_CODE9E { tmp:$(REGSIZE) = sext(EXT_CODE9E); tmp = tmp + mic_base0; lockwrite(tmp); tmpa:$(ADDRSIZE) = 0; ValCast(tmpa,tmp); *[ram]:8 tmpa = mic_rt32_5; mic_rt32_5 = 1; } @endif ================================================ FILE: pypcode/processors/MIPS/data/manuals/MIPS.idx ================================================ @mips64v2.pdf[MIPS64 Architecture For Programmers - Volume II, July 2005 (MD00087)] abs. , 49 add , 50 add. , 51 addi , 52 addiu , 53 addu , 54 alnv.ps , 55 and , 58 andi , 59 b , 60 bal , 61 bc1f , 62 bc1fl , 64 bc1t , 66 bc1tl , 68 bc2f , 70 bc2fl , 71 bc2t , 73 bc2tl , 74 beq , 76 beql , 77 bgez , 79 bgezal , 80 bgezall , 81 bgezl , 83 bgtz , 85 bgtzl , 86 blez , 88 blezl , 89 bltz , 91 bltzal , 92 bltzall , 93 bltzl , 95 bne , 97 bnel , 98 break , 100 c. , 101 cache , 106 ceil.l. , 113 ceil.w. , 115 cfc1 , 116 cfc2 , 118 clear , 54 clo , 119 clz , 121 cop2 , 120 ctc1 , 122 ctc2 , 124 cvt.d. , 125 cvt.l. , 126 cvt.ps.s , 128 cvt.s.d , 130 cvt.s.l , 130 cvt.s.pl , 131 cvt.s.pu , 132 cvt.s.w , 130 cvt.w. , 133 dadd , 134 daddi , 135 daddiu , 136 daddu , 137 dclo , 138 dclz , 139 ddiv , 140 ddivu , 141 deret , 142 dext , 144 dextm , 146 dextu , 148 di , 150 dins , 152 dinsm , 154 dinsu , 156 div , 158 div. , 160 divu , 161 dmfc0 , 162 dmfc1 , 163 dmfc2 , 164 dmtc0 , 165 dmtc1 , 166 dmtc2 , 167 dmult , 168 dmultu , 169 drotr , 170 drotrv , 172 drotr32 , 171 dsbh , 173 dshd , 175 dsll , 177 dsllv , 179 dsll32 , 178 dsra , 180 dsrav , 182 dsra32 , 181 dsrl , 183 dsrlv , 185 dsrl32 , 184 dsub , 186 dsubu , 187 ehb , 188 ei , 189 eret , 191 ext , 193 floor.l. , 195 floor.w. , 197 ins , 198 j , 200 jal , 201 jalr , 202 jalr.hb , 204 jr , 207 jr.hb , 209 lb , 212 lbu , 213 ld , 214 ldc1 , 215 ldc2 , 216 ldl , 217 ldr , 219 ldxc1 , 222 lh , 223 lhu , 224 ll , 225 lld , 227 lui , 229 luxc1 , 230 lw , 231 lwc1 , 232 lwc2 , 233 lwl , 234 lwr , 237 lwu , 241 lwxc1 , 242 madd , 243 madd.d , 244 madd.ps , 244 madd.s , 244 maddu , 246 mfc0 , 247 mfc1 , 248 mfc2 , 249 mfhc1 , 250 mfhc2 , 251 mfhi , 252 mflo , 253 mov. , 254 move , 254 movf , 255 movf. , 256 movn , 258 movn. , 259 movt , 261 movt. , 262 movz , 264 movz. , 265 msub , 267 msub. , 268 msubu , 270 mtc0 , 271 mtc1 , 272 mtc2 , 273 mthc1 , 274 mthc2 , 275 mthi , 276 mtlo , 277 mul , 278 mul. , 279 mult , 280 multu , 281 neg. , 282 nmadd. , 283 nmsub. , 285 nop , 287 nor , 288 or , 289 ori , 290 pll.ps , 291 plu.ps , 292 pref , 293 prefx , 297 pul.ps , 298 puu.ps , 299 rdhwr , 300 rdpgpr , 302 recip. , 303 retn , 207 rotr , 305 rotrv , 306 round.l. , 307 round.w. , 309 rsqrt. , 311 sb , 313 sc , 314 scd , 317 sd , 320 sdbbp , 321 sdc1 , 322 sdc2 , 323 sdl , 324 sdr , 327 sdxc1 , 330 seb , 331 seh , 332 sh , 334 sll , 335 sllv , 336 slt , 337 slti , 338 sltiu , 339 sltu , 340 sqrt. , 341 sra , 342 srav , 343 srl , 344 srlv , 345 ssnop , 346 sub , 347 sub. , 348 subu , 349 suxc1 , 350 sw , 351 swc1 , 352 swcw , 353 swl , 354 swr , 356 swxc1 , 358 sync , 359 synci , 363 syscall , 366 teq , 367 teqi , 368 tge , 369 tgei , 370 tgeiu , 371 tgeu , 372 tlbp , 373 tlbr , 374 tlbwi , 376 tlbwr , 378 tlt , 380 tlti , 381 tltiu , 382 tltu , 383 tne , 384 tnei , 385 trunc.l. , 386 trunc.w. , 388 wait , 390 wrpgpr , 392 wsbh , 393 xor , 394 xori , 395 ================================================ FILE: pypcode/processors/MIPS/data/manuals/mipsM16.idx ================================================ @MD00087-2B-MIPS64BIS-AFP-6.06.pdf [MIPS Architecture For Programmers Volume II-A: The MIPS64 Instruction Set Reference Manual, MD00087, 6.06, December 15, 2016] abs.d,44 abs.ps,44 abs.s,44 add,45 add.d,46 add.ps,46 add.s,46 addi,47 addiu,48 addiupc,49 addu,50 align,51 alnv.ps,53 aluipc,55 and,56 andi,57 aui,58 auipc,60 b,61 b.af.c,94 b.at.c,94 b.eq.c,94 b.f.c,94 b.ge.c,94 b.gl.c,94 b.gle.c,94 b.gt.c,94 b.le.c,94 b.lt.c,94 b.ne.c,94 b.neq.c,94 b.nge.c,94 b.ngl.c,94 b.ngle.c,94 b.ngt.c,94 b.nle.c,94 b.nlt.c,94 b.oge.c,94 b.ogl.c,94 b.ogt.c,94 b.ole.c,94 b.olt.c,94 b.or.c,94 b.saf.c,94 b.sat.c,94 b.seq.c,94 b.sf.c,94 b.sle.c,94 b.slt.c,94 b.sne.c,94 b.soge.c,94 b.sogt.c,94 b.sor.c,94 b.st.c,94 b.sueq.c,94 b.suge.c,94 b.sugt.c,94 b.sule.c,94 b.sult.c,94 b.sun.c,94 b.sune.c,94 b.t.c,94 b.ueq.c,94 b.uge.c,94 b.ugt.c,94 b.ule.c,94 b.ult.c,94 b.un.c,94 b.une.c,94 bal,62 balc,64 bc,65 bc1eqz,66 bc1f,68 bc1fl,70 bc1nez,66 bc1t,72 bc1tl,74 bc2eqz,76 bc2f,78 bc2fl,79 bc2nez,76 bc2t,81 bc2tl,82 beq,84 beql,85 beqzalc,89 bgez,87 bgezal,88 bgezalc,89 bgezall,92 bgezl,98 bgtz,100 bgtzalc,89 bgtzl,101 bitswap,103 blez,105 blezalc,89 blezl,106 bltz,108 bltzal,109 bltzalc,89 bltzall,110 bltzl,112 bne,114 bnel,115 bnezalc,89 bnvc,117 bovc,117 break,119 c.af.d,120 c.af.ps,120 c.af.s,120 c.at.d,120 c.at.ps,120 c.at.s,120 c.eq.d,120 c.eq.ps,120 c.eq.s,120 c.f.d,120 c.f.ps,120 c.f.s,120 c.ge.d,120 c.ge.ps,120 c.ge.s,120 c.gl.d,120 c.gl.ps,120 c.gl.s,120 c.gle.d,120 c.gle.ps,120 c.gle.s,120 c.gt.d,120 c.gt.ps,120 c.gt.s,120 c.le.d,120 c.le.ps,120 c.le.s,120 c.lt.d,120 c.lt.ps,120 c.lt.s,120 c.ne.d,120 c.ne.ps,120 c.ne.s,120 c.neq.d,120 c.neq.ps,120 c.neq.s,120 c.nge.d,120 c.nge.ps,120 c.nge.s,120 c.ngl.d,120 c.ngl.ps,120 c.ngl.s,120 c.ngle.d,120 c.ngle.ps,120 c.ngle.s,120 c.ngt.d,120 c.ngt.ps,120 c.ngt.s,120 c.nle.d,120 c.nle.ps,120 c.nle.s,120 c.nlt.d,120 c.nlt.ps,120 c.nlt.s,120 c.oge.d,120 c.oge.ps,120 c.oge.s,120 c.ogl.d,120 c.ogl.ps,120 c.ogl.s,120 c.ogt.d,120 c.ogt.ps,120 c.ogt.s,120 c.ole.d,120 c.ole.ps,120 c.ole.s,120 c.olt.d,120 c.olt.ps,120 c.olt.s,120 c.or.d,120 c.or.ps,120 c.or.s,120 c.saf.d,120 c.saf.ps,120 c.saf.s,120 c.sat.d,120 c.sat.ps,120 c.sat.s,120 c.seq.d,120 c.seq.ps,120 c.seq.s,120 c.sf.d,120 c.sf.ps,120 c.sf.s,120 c.sle.d,120 c.sle.ps,120 c.sle.s,120 c.slt.d,120 c.slt.ps,120 c.slt.s,120 c.sne.d,120 c.sne.ps,120 c.sne.s,120 c.soge.d,120 c.soge.ps,120 c.soge.s,120 c.sogt.d,120 c.sogt.ps,120 c.sogt.s,120 c.sor.d,120 c.sor.ps,120 c.sor.s,120 c.st.d,120 c.st.ps,120 c.st.s,120 c.sueq.d,120 c.sueq.ps,120 c.sueq.s,120 c.suge.d,120 c.suge.ps,120 c.suge.s,120 c.sugt.d,120 c.sugt.ps,120 c.sugt.s,120 c.sule.d,120 c.sule.ps,120 c.sule.s,120 c.sult.d,120 c.sult.ps,120 c.sult.s,120 c.sun.d,120 c.sun.ps,120 c.sun.s,120 c.sune.d,120 c.sune.ps,120 c.sune.s,120 c.t.d,120 c.t.ps,120 c.t.s,120 c.ueq.d,120 c.ueq.ps,120 c.ueq.s,120 c.uge.d,120 c.uge.ps,120 c.uge.s,120 c.ugt.d,120 c.ugt.ps,120 c.ugt.s,120 c.ule.d,120 c.ule.ps,120 c.ule.s,120 c.ult.d,120 c.ult.ps,120 c.ult.s,120 c.un.d,120 c.un.ps,120 c.un.s,120 c.une.d,120 c.une.ps,120 c.une.s,120 cache,124 cache,125 cachee,130 ceil.l.d,136 ceil.l.s,136 ceil.w.d,137 ceil.w.s,137 cfc1,138 cfc2,140 class.d,141 class.s,141 clo,143 clz,144 cmp.af.d,145 cmp.af.s,145 cmp.at.d,145 cmp.at.s,145 cmp.eq.d,145 cmp.eq.s,145 cmp.f.d,145 cmp.f.s,145 cmp.ge.d,145 cmp.ge.s,145 cmp.gl.d,145 cmp.gl.s,145 cmp.gle.d,145 cmp.gle.s,145 cmp.gt.d,145 cmp.gt.s,145 cmp.le.d,145 cmp.le.s,145 cmp.lt.d,145 cmp.lt.s,145 cmp.ne.d,145 cmp.ne.s,145 cmp.neq.d,145 cmp.neq.s,145 cmp.nge.d,145 cmp.nge.s,145 cmp.ngl.d,145 cmp.ngl.s,145 cmp.ngle.d,145 cmp.ngle.s,145 cmp.ngt.d,145 cmp.ngt.s,145 cmp.nle.d,145 cmp.nle.s,145 cmp.nlt.d,145 cmp.nlt.s,145 cmp.oge.d,145 cmp.oge.s,145 cmp.ogl.d,145 cmp.ogl.s,145 cmp.ogt.d,145 cmp.ogt.s,145 cmp.ole.d,145 cmp.ole.s,145 cmp.olt.d,145 cmp.olt.s,145 cmp.or.d,145 cmp.or.s,145 cmp.saf.d,145 cmp.saf.s,145 cmp.sat.d,145 cmp.sat.s,145 cmp.seq.d,145 cmp.seq.s,145 cmp.sf.d,145 cmp.sf.s,145 cmp.sle.d,145 cmp.sle.s,145 cmp.slt.d,145 cmp.slt.s,145 cmp.sne.d,145 cmp.sne.s,145 cmp.soge.d,145 cmp.soge.s,145 cmp.sogt.d,145 cmp.sogt.s,145 cmp.sor.d,145 cmp.sor.s,145 cmp.st.d,145 cmp.st.s,145 cmp.sueq.d,145 cmp.sueq.s,145 cmp.suge.d,145 cmp.suge.s,145 cmp.sugt.d,145 cmp.sugt.s,145 cmp.sule.d,145 cmp.sule.s,145 cmp.sult.d,145 cmp.sult.s,145 cmp.sun.d,145 cmp.sun.s,145 cmp.sune.d,145 cmp.sune.s,145 cmp.t.d,145 cmp.t.s,145 cmp.ueq.d,145 cmp.ueq.s,145 cmp.uge.d,145 cmp.uge.s,145 cmp.ugt.d,145 cmp.ugt.s,145 cmp.ule.d,145 cmp.ule.s,145 cmp.ult.d,145 cmp.ult.s,145 cmp.un.d,145 cmp.un.s,145 cmp.une.d,145 cmp.une.s,145 cop2,150 crc32b,152 crc32cb,155 crc32cd,155 crc32ch,155 crc32cw,155 crc32d,152 crc32h,152 crc32w,152 ctc1,158 ctc2,161 cvt.d.l,162 cvt.d.s,162 cvt.d.w,162 cvt.l.d,163 cvt.l.s,163 cvt.ps.s,164 cvt.s.d,168 cvt.s.l,168 cvt.s.pl,166 cvt.s.pu,167 cvt.s.w,168 cvt.w.d,169 cvt.w.s,169 dadd,170 daddi,171 daddiu,172 daddu,173 dahi,58 dalign,51 dati,58 daui,58 dbitswap,103 dclo,174 dclz,175 ddiv,176 ddivu,177 deret,178 dext,179 dextm,181 dextu,183 di,185 dins,186 dinsm,188 dinsu,190 div,192 div.d,197 div.s,197 divu,194 dlsa,294 dmfc0,199 dmfc1,200 dmfc2,201 dmod,194 dmodu,194 dmtc0,202 dmtc1,203 dmtc2,204 dmuh,363 dmuhu,363 dmul,363 dmult,205 dmultu,206 dmulu,363 drotr,207 drotr32,208 drotrv,209 dsbh,210 dshd,211 dsll,212 dsll32,213 dsllv,214 dsra,215 dsra32,216 dsrav,217 dsrl,218 dsrl32,219 dsrlv,220 dsub,221 dsubu,222 dvp,223 ehb,226 ei,227 eret,228 eretnc,230 evp,232 ext,234 floor.l.d,236 floor.l.s,236 floor.w.d,237 floor.w.s,237 ginvi,239 ginvt,241 ins,244 j,246 jal,247 jalr,248 jalr.hb,250 jalx,253 jialc,255 jic,257 jr,258 jr.hb,260 lb,263 lbe,264 lbu,265 lbue,266 ld,267 ldc1,268 ldc2,269 ldl,271 ldpc,273 ldr,274 ldxc1,276 lh,277 lhe,278 lhu,279 lhue,280 ll,281 lld,283 lldp,288 lle,286 llwp,290 llwpe,292 lsa,294 lui,295 luxc1,296 lw,297 lwc1,298 lwc2,299 lwe,300 lwl,301 lwle,304 lwpc,307 lwr,308 lwre,311 lwre,312 lwu,315 lwupc,316 lwxc1,317 madd,318 madd.d,319 madd.ps,319 madd.s,319 maddf.d,321 maddf.s,321 maddu,323 max.d,324 max.s,324 maxa.d,324 maxa.s,324 mfc0,328 mfc1,329 mfc2,330 mfhc0,331 mfhc1,333 mfhc2,334 mfhi,335 mflo,336 min.d,324 min.s,324 mina.d,324 mina.s,324 mod,194 modu,194 mov.d,337 mov.ps,337 mov.s,337 movf,338 movf.d,339 movf.ps,339 movf.s,339 movn,341 movn.d,342 movn.ps,342 movn.s,342 movt,343 movt.d,344 movt.ps,344 movt.s,344 movz,346 movz.d,347 movz.ps,347 movz.s,347 msub,348 msub.d,349 msub.ps,349 msub.s,349 msubf.d,321 msubf.s,321 msubu,351 mtc0,352 mtc1,354 mtc2,355 mthc0,356 mthc1,358 mthc2,359 mthi,360 mtlo,361 muh,363 muhu,363 mul,362 mul.d,366 mul.ps,366 mul.s,366 mult,367 multu,368 mulu,363 nal,369 neg.d,370 neg.ps,370 neg.s,370 nmadd.d,371 nmadd.ps,371 nmadd.s,371 nmsub.d,373 nmsub.ps,373 nmsub.s,373 nop,375 nor,376 or,377 ori,378 pause,379 pll.ps,381 plu.ps,382 pref,383 prefe,387 prefx,391 pul.ps,392 puu.ps,393 rdhwr,394 rdpgpr,397 recip.d,398 recip.s,398 rint.d,399 rint.s,399 rotr,401 rotrv,402 round.l.d,403 round.l.s,403 round.w.d,404 round.w.s,404 rsqrt.d,405 rsqrt.s,405 sb,406 sbe,407 sc,408 scd,412 scdp,415 sce,418 scwp,422 scwpe,426 sd,429 sdbbp,430 sdc1,431 sdc2,432 sdl,433 sdr,435 sdxc1,437 seb,438 seh,439 sel.d,441 sel.s,441 seleqz,442 seleqz.d,444 seleqz.s,444 selnez,442 sh,446 she,447 sigrie,448 sll,449 sllv,450 slt,451 slti,452 sltiu,453 sltu,454 sqrt.d,455 sqrt.s,455 sra,456 srav,457 srl,458 srlv,459 ssnop,460 sub,461 sub.d,462 sub.ps,462 sub.s,462 subu,463 suxc1,464 sw,465 swc2,467 swe,468 swl,469 swle,471 swr,473 swre,475 swxc1,477 sync,478 synci,483 syscall,486 teq,487 teqi,488 tge,489 tgei,490 tgeiu,491 tgeu,492 tlbinv,493 tlbinvf,495 tlbp,497 tlbr,498 tlbwi,500 tlbwr,502 tlt,504 tlti,505 tltiu,506 tltu,507 tne,508 tnei,509 trunc.l.d,510 trunc.l.s,510 trunc.w.d,511 trunc.w.s,511 wait,512 wrpgpr,514 wsbh,515 xor,516 xori,517 @MD00076-2B-MIPS1632-AFP-02.63.pdf [MIPS32 Architecture for Programmers Volume IV-a: The MIPS16e Application-Specific Extension to the MIPS32 Architecture, MD00076 2.63, July 16, 2013] asmacro,65 beqz,68 bnez,70 bteqz,73 btnez,75 cmp,77 cmpi,78 jrc,90 retnc,90 neq,114 restore,118 save,123 zeb,159 zeh,160 ================================================ FILE: pypcode/processors/MIPS/data/manuals/mipsMic.idx ================================================ @MD00087-2B-MIPS64BIS-AFP-6.06.pdf [MIPS Architecture For Programmers Volume II-A: The MIPS64 Instruction Set Reference Manual, MD00087, 6.06, December 15, 2016] abs.d,44 abs.ps,44 abs.s,44 add,45 add.d,46 add.ps,46 add.s,46 addi,47 addiu,48 addiupc,49 addu,50 align,51 alnv.ps,53 aluipc,55 and,56 andi,57 aui,58 auipc,60 b,61 b.af.c,94 b.at.c,94 b.eq.c,94 b.f.c,94 b.ge.c,94 b.gl.c,94 b.gle.c,94 b.gt.c,94 b.le.c,94 b.lt.c,94 b.ne.c,94 b.neq.c,94 b.nge.c,94 b.ngl.c,94 b.ngle.c,94 b.ngt.c,94 b.nle.c,94 b.nlt.c,94 b.oge.c,94 b.ogl.c,94 b.ogt.c,94 b.ole.c,94 b.olt.c,94 b.or.c,94 b.saf.c,94 b.sat.c,94 b.seq.c,94 b.sf.c,94 b.sle.c,94 b.slt.c,94 b.sne.c,94 b.soge.c,94 b.sogt.c,94 b.sor.c,94 b.st.c,94 b.sueq.c,94 b.suge.c,94 b.sugt.c,94 b.sule.c,94 b.sult.c,94 b.sun.c,94 b.sune.c,94 b.t.c,94 b.ueq.c,94 b.uge.c,94 b.ugt.c,94 b.ule.c,94 b.ult.c,94 b.un.c,94 b.une.c,94 bal,62 balc,64 bc,65 bc1eqz,66 bc1f,68 bc1fl,70 bc1nez,66 bc1t,72 bc1tl,74 bc2eqz,76 bc2f,78 bc2fl,79 bc2nez,76 bc2t,81 bc2tl,82 beq,84 beql,85 beqzalc,89 bgez,87 bgezal,88 bgezalc,89 bgezall,92 bgezl,98 bgtz,100 bgtzalc,89 bgtzl,101 bitswap,103 blez,105 blezalc,89 blezl,106 bltz,108 bltzal,109 bltzalc,89 bltzall,110 bltzl,112 bne,114 bnel,115 bnezalc,89 bnvc,117 bovc,117 break,119 c.af.d,120 c.af.ps,120 c.af.s,120 c.at.d,120 c.at.ps,120 c.at.s,120 c.eq.d,120 c.eq.ps,120 c.eq.s,120 c.f.d,120 c.f.ps,120 c.f.s,120 c.ge.d,120 c.ge.ps,120 c.ge.s,120 c.gl.d,120 c.gl.ps,120 c.gl.s,120 c.gle.d,120 c.gle.ps,120 c.gle.s,120 c.gt.d,120 c.gt.ps,120 c.gt.s,120 c.le.d,120 c.le.ps,120 c.le.s,120 c.lt.d,120 c.lt.ps,120 c.lt.s,120 c.ne.d,120 c.ne.ps,120 c.ne.s,120 c.neq.d,120 c.neq.ps,120 c.neq.s,120 c.nge.d,120 c.nge.ps,120 c.nge.s,120 c.ngl.d,120 c.ngl.ps,120 c.ngl.s,120 c.ngle.d,120 c.ngle.ps,120 c.ngle.s,120 c.ngt.d,120 c.ngt.ps,120 c.ngt.s,120 c.nle.d,120 c.nle.ps,120 c.nle.s,120 c.nlt.d,120 c.nlt.ps,120 c.nlt.s,120 c.oge.d,120 c.oge.ps,120 c.oge.s,120 c.ogl.d,120 c.ogl.ps,120 c.ogl.s,120 c.ogt.d,120 c.ogt.ps,120 c.ogt.s,120 c.ole.d,120 c.ole.ps,120 c.ole.s,120 c.olt.d,120 c.olt.ps,120 c.olt.s,120 c.or.d,120 c.or.ps,120 c.or.s,120 c.saf.d,120 c.saf.ps,120 c.saf.s,120 c.sat.d,120 c.sat.ps,120 c.sat.s,120 c.seq.d,120 c.seq.ps,120 c.seq.s,120 c.sf.d,120 c.sf.ps,120 c.sf.s,120 c.sle.d,120 c.sle.ps,120 c.sle.s,120 c.slt.d,120 c.slt.ps,120 c.slt.s,120 c.sne.d,120 c.sne.ps,120 c.sne.s,120 c.soge.d,120 c.soge.ps,120 c.soge.s,120 c.sogt.d,120 c.sogt.ps,120 c.sogt.s,120 c.sor.d,120 c.sor.ps,120 c.sor.s,120 c.st.d,120 c.st.ps,120 c.st.s,120 c.sueq.d,120 c.sueq.ps,120 c.sueq.s,120 c.suge.d,120 c.suge.ps,120 c.suge.s,120 c.sugt.d,120 c.sugt.ps,120 c.sugt.s,120 c.sule.d,120 c.sule.ps,120 c.sule.s,120 c.sult.d,120 c.sult.ps,120 c.sult.s,120 c.sun.d,120 c.sun.ps,120 c.sun.s,120 c.sune.d,120 c.sune.ps,120 c.sune.s,120 c.t.d,120 c.t.ps,120 c.t.s,120 c.ueq.d,120 c.ueq.ps,120 c.ueq.s,120 c.uge.d,120 c.uge.ps,120 c.uge.s,120 c.ugt.d,120 c.ugt.ps,120 c.ugt.s,120 c.ule.d,120 c.ule.ps,120 c.ule.s,120 c.ult.d,120 c.ult.ps,120 c.ult.s,120 c.un.d,120 c.un.ps,120 c.un.s,120 c.une.d,120 c.une.ps,120 c.une.s,120 cache,124 cache,125 cachee,130 ceil.l.d,136 ceil.l.s,136 ceil.w.d,137 ceil.w.s,137 cfc1,138 cfc2,140 class.d,141 class.s,141 clo,143 clz,144 cmp.af.d,145 cmp.af.s,145 cmp.at.d,145 cmp.at.s,145 cmp.eq.d,145 cmp.eq.s,145 cmp.f.d,145 cmp.f.s,145 cmp.ge.d,145 cmp.ge.s,145 cmp.gl.d,145 cmp.gl.s,145 cmp.gle.d,145 cmp.gle.s,145 cmp.gt.d,145 cmp.gt.s,145 cmp.le.d,145 cmp.le.s,145 cmp.lt.d,145 cmp.lt.s,145 cmp.ne.d,145 cmp.ne.s,145 cmp.neq.d,145 cmp.neq.s,145 cmp.nge.d,145 cmp.nge.s,145 cmp.ngl.d,145 cmp.ngl.s,145 cmp.ngle.d,145 cmp.ngle.s,145 cmp.ngt.d,145 cmp.ngt.s,145 cmp.nle.d,145 cmp.nle.s,145 cmp.nlt.d,145 cmp.nlt.s,145 cmp.oge.d,145 cmp.oge.s,145 cmp.ogl.d,145 cmp.ogl.s,145 cmp.ogt.d,145 cmp.ogt.s,145 cmp.ole.d,145 cmp.ole.s,145 cmp.olt.d,145 cmp.olt.s,145 cmp.or.d,145 cmp.or.s,145 cmp.saf.d,145 cmp.saf.s,145 cmp.sat.d,145 cmp.sat.s,145 cmp.seq.d,145 cmp.seq.s,145 cmp.sf.d,145 cmp.sf.s,145 cmp.sle.d,145 cmp.sle.s,145 cmp.slt.d,145 cmp.slt.s,145 cmp.sne.d,145 cmp.sne.s,145 cmp.soge.d,145 cmp.soge.s,145 cmp.sogt.d,145 cmp.sogt.s,145 cmp.sor.d,145 cmp.sor.s,145 cmp.st.d,145 cmp.st.s,145 cmp.sueq.d,145 cmp.sueq.s,145 cmp.suge.d,145 cmp.suge.s,145 cmp.sugt.d,145 cmp.sugt.s,145 cmp.sule.d,145 cmp.sule.s,145 cmp.sult.d,145 cmp.sult.s,145 cmp.sun.d,145 cmp.sun.s,145 cmp.sune.d,145 cmp.sune.s,145 cmp.t.d,145 cmp.t.s,145 cmp.ueq.d,145 cmp.ueq.s,145 cmp.uge.d,145 cmp.uge.s,145 cmp.ugt.d,145 cmp.ugt.s,145 cmp.ule.d,145 cmp.ule.s,145 cmp.ult.d,145 cmp.ult.s,145 cmp.un.d,145 cmp.un.s,145 cmp.une.d,145 cmp.une.s,145 cop2,150 crc32b,152 crc32cb,155 crc32cd,155 crc32ch,155 crc32cw,155 crc32d,152 crc32h,152 crc32w,152 ctc1,158 ctc2,161 cvt.d.l,162 cvt.d.s,162 cvt.d.w,162 cvt.l.d,163 cvt.l.s,163 cvt.ps.s,164 cvt.s.d,168 cvt.s.l,168 cvt.s.pl,166 cvt.s.pu,167 cvt.s.w,168 cvt.w.d,169 cvt.w.s,169 dadd,170 daddi,171 daddiu,172 daddu,173 dahi,58 dalign,51 dati,58 daui,58 dbitswap,103 dclo,174 dclz,175 ddiv,176 ddivu,177 deret,178 dext,179 dextm,181 dextu,183 di,185 dins,186 dinsm,188 dinsu,190 div,192 div.d,197 div.s,197 divu,194 dlsa,294 dmfc0,199 dmfc1,200 dmfc2,201 dmod,194 dmodu,194 dmtc0,202 dmtc1,203 dmtc2,204 dmuh,363 dmuhu,363 dmul,363 dmult,205 dmultu,206 dmulu,363 drotr,207 drotr32,208 drotrv,209 dsbh,210 dshd,211 dsll,212 dsll32,213 dsllv,214 dsra,215 dsra32,216 dsrav,217 dsrl,218 dsrl32,219 dsrlv,220 dsub,221 dsubu,222 dvp,223 ehb,226 ei,227 eret,228 eretnc,230 evp,232 ext,234 floor.l.d,236 floor.l.s,236 floor.w.d,237 floor.w.s,237 ginvi,239 ginvt,241 ins,244 j,246 jal,247 jalr,248 jalr.hb,250 jalx,253 jialc,255 jic,257 jr,258 jr.hb,260 lb,263 lbe,264 lbu,265 lbue,266 ld,267 ldc1,268 ldc2,269 ldl,271 ldpc,273 ldr,274 ldxc1,276 lh,277 lhe,278 lhu,279 lhue,280 ll,281 lld,283 lldp,288 lle,286 llwp,290 llwpe,292 lsa,294 lui,295 luxc1,296 lw,297 lwc1,298 lwc2,299 lwe,300 lwl,301 lwle,304 lwpc,307 lwr,308 lwre,311 lwre,312 lwu,315 lwupc,316 lwxc1,317 madd,318 madd.d,319 madd.ps,319 madd.s,319 maddf.d,321 maddf.s,321 maddu,323 max.d,324 max.s,324 maxa.d,324 maxa.s,324 mfc0,328 mfc1,329 mfc2,330 mfhc0,331 mfhc1,333 mfhc2,334 mfhi,335 mflo,336 min.d,324 min.s,324 mina.d,324 mina.s,324 mod,194 modu,194 mov.d,337 mov.ps,337 mov.s,337 movf,338 movf.d,339 movf.ps,339 movf.s,339 movn,341 movn.d,342 movn.ps,342 movn.s,342 movt,343 movt.d,344 movt.ps,344 movt.s,344 movz,346 movz.d,347 movz.ps,347 movz.s,347 msub,348 msub.d,349 msub.ps,349 msub.s,349 msubf.d,321 msubf.s,321 msubu,351 mtc0,352 mtc1,354 mtc2,355 mthc0,356 mthc1,358 mthc2,359 mthi,360 mtlo,361 muh,363 muhu,363 mul,362 mul.d,366 mul.ps,366 mul.s,366 mult,367 multu,368 mulu,363 nal,369 neg.d,370 neg.ps,370 neg.s,370 nmadd.d,371 nmadd.ps,371 nmadd.s,371 nmsub.d,373 nmsub.ps,373 nmsub.s,373 nop,375 nor,376 or,377 ori,378 pause,379 pll.ps,381 plu.ps,382 pref,383 prefe,387 prefx,391 pul.ps,392 puu.ps,393 rdhwr,394 rdpgpr,397 recip.d,398 recip.s,398 rint.d,399 rint.s,399 rotr,401 rotrv,402 round.l.d,403 round.l.s,403 round.w.d,404 round.w.s,404 rsqrt.d,405 rsqrt.s,405 sb,406 sbe,407 sc,408 scd,412 scdp,415 sce,418 scwp,422 scwpe,426 sd,429 sdbbp,430 sdc1,431 sdc2,432 sdl,433 sdr,435 sdxc1,437 seb,438 seh,439 sel.d,441 sel.s,441 seleqz,442 seleqz.d,444 seleqz.s,444 selnez,442 sh,446 she,447 sigrie,448 sll,449 sllv,450 slt,451 slti,452 sltiu,453 sltu,454 sqrt.d,455 sqrt.s,455 sra,456 srav,457 srl,458 srlv,459 ssnop,460 sub,461 sub.d,462 sub.ps,462 sub.s,462 subu,463 suxc1,464 sw,465 swc2,467 swe,468 swl,469 swle,471 swr,473 swre,475 swxc1,477 sync,478 synci,483 syscall,486 teq,487 teqi,488 tge,489 tgei,490 tgeiu,491 tgeu,492 tlbinv,493 tlbinvf,495 tlbp,497 tlbr,498 tlbwi,500 tlbwr,502 tlt,504 tlti,505 tltiu,506 tltu,507 tne,508 tnei,509 trunc.l.d,510 trunc.l.s,510 trunc.w.d,511 trunc.w.s,511 wait,512 wrpgpr,514 wsbh,515 xor,516 xori,517 @MD00594-2B-microMIPS64-AFP-6.05.pdf [MIPS Architecture for Programmers Volume II-B: microMIPS64 Instruction Set, MD00594, 6.05, December 15, 2016] abs.d,128 abs.s,128 add,129 add.d,130 add.s,130 addiu,131 addiupc,132 addiur1sp,69 addiur2,70 addius5,72 addiusp,74 addu,133 addu16,76 align,134 aluipc,136 and,137 and16,77 andi,138 andi16,78 aui,139 auipc,141 b.af.c,149 b.at.c,149 b.eq.c,149 b.f.c,149 b.ge.c,149 b.gl.c,149 b.gle.c,149 b.gt.c,149 b.le.c,149 b.lt.c,149 b.ne.c,149 b.neq.c,149 b.nge.c,149 b.ngl.c,149 b.ngle.c,149 b.ngt.c,149 b.nle.c,149 b.nlt.c,149 b.oge.c,149 b.ogl.c,149 b.ogt.c,149 b.ole.c,149 b.olt.c,149 b.or.c,149 b.saf.c,149 b.sat.c,149 b.seq.c,149 b.sf.c,149 b.sle.c,149 b.slt.c,149 b.sne.c,149 b.soge.c,149 b.sogt.c,149 b.sor.c,149 b.st.c,149 b.sueq.c,149 b.suge.c,149 b.sugt.c,149 b.sule.c,149 b.sult.c,149 b.sun.c,149 b.sune.c,149 b.t.c,149 b.ueq.c,149 b.uge.c,149 b.ugt.c,149 b.ule.c,149 b.ult.c,149 b.un.c,149 b.une.c,149 balc,142 bc,152 bc16,79 bc1eqzc,143 bc1nezc,143 bc2eqzc,145 bc2nezc,145 beqzalc,147 beqzc16,80 bgezalc,147 bgtzalc,147 bitswap,154 blezalc,147 bltzalc,147 bnezalc,147 bnezc16,81 bnvc,156 bovc,156 break,153 break16,82 cache,158 cachee,164 ceil.l.d,170 ceil.l.s,170 ceil.w.d,171 ceil.w.s,171 cfc1,172 cfc2,174 class.d,175 class.s,175 clo,177 clz,178 cmp.af.d,179 cmp.af.s,179 cmp.at.d,179 cmp.at.s,179 cmp.eq.d,179 cmp.eq.s,179 cmp.f.d,179 cmp.f.s,179 cmp.ge.d,179 cmp.ge.s,179 cmp.gl.d,179 cmp.gl.s,179 cmp.gle.d,179 cmp.gle.s,179 cmp.gt.d,179 cmp.gt.s,179 cmp.le.d,179 cmp.le.s,179 cmp.lt.d,179 cmp.lt.s,179 cmp.ne.d,179 cmp.ne.s,179 cmp.neq.d,179 cmp.neq.s,179 cmp.nge.d,179 cmp.nge.s,179 cmp.ngl.d,179 cmp.ngl.s,179 cmp.ngle.d,179 cmp.ngle.s,179 cmp.ngt.d,179 cmp.ngt.s,179 cmp.nle.d,179 cmp.nle.s,179 cmp.nlt.d,179 cmp.nlt.s,179 cmp.oge.d,179 cmp.oge.s,179 cmp.ogl.d,179 cmp.ogl.s,179 cmp.ogt.d,179 cmp.ogt.s,179 cmp.ole.d,179 cmp.ole.s,179 cmp.olt.d,179 cmp.olt.s,179 cmp.or.d,179 cmp.or.s,179 cmp.saf.d,179 cmp.saf.s,179 cmp.sat.d,179 cmp.sat.s,179 cmp.seq.d,179 cmp.seq.s,179 cmp.sf.d,179 cmp.sf.s,179 cmp.sle.d,179 cmp.sle.s,179 cmp.slt.d,179 cmp.slt.s,179 cmp.sne.d,179 cmp.sne.s,179 cmp.soge.d,179 cmp.soge.s,179 cmp.sogt.d,179 cmp.sogt.s,179 cmp.sor.d,179 cmp.sor.s,179 cmp.st.d,179 cmp.st.s,179 cmp.sueq.d,179 cmp.sueq.s,179 cmp.suge.d,179 cmp.suge.s,179 cmp.sugt.d,179 cmp.sugt.s,179 cmp.sule.d,179 cmp.sule.s,179 cmp.sult.d,179 cmp.sult.s,179 cmp.sun.d,179 cmp.sun.s,179 cmp.sune.d,179 cmp.sune.s,179 cmp.t.d,179 cmp.t.s,179 cmp.ueq.d,179 cmp.ueq.s,179 cmp.uge.d,179 cmp.uge.s,179 cmp.ugt.d,179 cmp.ugt.s,179 cmp.ule.d,179 cmp.ule.s,179 cmp.ult.d,179 cmp.ult.s,179 cmp.un.d,179 cmp.un.s,179 cmp.une.d,179 cmp.une.s,179 cop2,184 crc32b,186 crc32cb,189 crc32cd,189 crc32ch,189 crc32cw,189 crc32d,186 crc32h,186 crc32w,186 ctc1,192 ctc2,195 cvt.d.l,196 cvt.d.s,196 cvt.d.w,196 cvt.l.d,197 cvt.l.s,197 cvt.s.d,198 cvt.s.l,198 cvt.s.w,198 cvt.w.d,199 cvt.w.s,199 dadd,200 daddiu,201 daddu,202 dahi,139 dalign,134 dati,139 daui,139 dbitswap,154 dclo,203 dclz,204 ddiv,220 ddivu,220 deret,205 dext,206 dextm,208 dextu,210 di,212 dins,213 dinsm,215 dinsu,217 div,220 div.d,219 div.s,219 divu,220 dlsa,300 dmfc0,224 dmfc1,225 dmfc2,226 dmod,220 dmodu,220 dmtc0,227 dmtc1,228 dmtc2,229 dmuh,330 dmuhu,330 dmul,330 dmulu,330 drotr,230 drotr32,231 drotrv,232 dsbh,233 dshd,234 dsll,235 dsll32,236 dsllv,237 dsra,238 dsra32,239 dsrav,240 dsrl,241 dsrl32,242 dsrlv,243 dsub,244 dsubu,245 dvp,246 ehb,249 ei,250 eret,251 eretnc,252 evp,254 ext,256 floor.l.d,258 floor.l.s,258 floor.w.d,259 floor.w.s,259 ginvi,260 ginvt,262 ins,265 jalrc,267 jalrc.hb,269 jalrc16,83 jialc,272 jic,274 jrc16,87 jrcaddiusp,85 lb,276 lbe,277 lbu,278 lbu16,88 lbue,279 ld,280 ldc1,281 ldc2,282 ldm,90 ldp,92 ldpc,283 lh,284 lhe,285 lhu,286 lhu16,94 lhue,287 li16,95 ll,288 lld,290 lldp,294 lle,291 llwp,296 llwpe,298 lsa,300 lui,301 lw,302 lw16,97 lwc1,303 lwc2,304 lwe,305 lwgp,102 lwm16,100 lwm32,98 lwp,96 lwpc,306 lwsp,103 lwu,308 lwupc,307 maddf.d,309 maddf.s,309 max.d,311 max.s,311 maxa.d,311 maxa.s,311 mfc0,315 mfc1,316 mfc2,317 mfhc0,318 mfhc1,320 mfhc2,321 min.d,311 min.s,311 mina.d,311 mina.s,311 mod,220 modu,220 mov.d,322 mov.s,322 move16,104 movep,105 msubf.d,309 msubf.s,309 mtc0,323 mtc1,324 mtc2,325 mthc0,326 mthc1,328 mthc2,329 muh,330 muhu,330 mul,330 mul.d,333 mul.s,333 mulu,330 neg.d,334 neg.s,334 nop,335 nor,336 not16,107 or,337 or16,108 ori,338 pause,339 pref,341 prefe,345 rdhwr,349 rdpgpr,352 recip.d,353 recip.s,353 rotr,356 rotrv,357 round.l.d,358 round.l.s,358 round.w.d,359 round.w.s,359 rsqrt.d,360 rsqrt.s,360 sb,361 sb16,109 sbe,362 sc,363 scd,370 scdp,373 sce,367 scwp,376 scwpe,380 sd,383 sdbbp,384 sdbbp16,110 sdc1,385 sdc2,386 sdm,111 sdp,113 seb,387 seh,388 seleqz,391 seleqz.d,393 seleqz.s,393 selnez,391 sh,395 sh16,114 she,396 sigrie,397 sll,398 sll16,115 sllv,399 slt,400 slti,401 sltiu,402 sltu,403 sqrt.d,404 sqrt.s,404 sra,405 srav,406 srl,407 srl16,116 srlv,408 ssnop,409 sub,410 sub.d,411 sub.s,411 subu,412 subu16,118 sw,413 sw16,119 swc2,416 swe,414 swm16,121 swm32,123 swp,125 swsp,120 sync,417 synci,422 syscall,425 teq,426 tge,427 tgeu,428 tlbinv,430 tlbinvf,433 tlbp,435 tlbr,436 tlbwi,438 tlbwr,440 tlt,442 tltu,443 tne,444 trunc.l.d,445 trunc.l.s,445 trunc.w.d,446 trunc.w.s,446 wait,447 wrpgpr,449 wsbh,450 xor,451 xor16,126 xori,452 @MIPS_Architecture_microMIPS32_InstructionSet_AFP_P_MD00582_06.04.pdf [MIPS Architecture for Programmers Volume II-B: microMIPS32 Instruction Set, MD00582, 6.04, June 6, 2016] addiur1sp,65 addiur2,66 addius5,67 addiusp,69 addu16,71 and16,72 andi16,73 bc16,74 beqzc16,75 bnezc16,76 break16,77 jalrc16,78 jrcaddiusp,80 jrc16,82 lbu16,83 lhu16,85 li16,86 lwp,87 lw16,88 lwm32,89 lwm16,91 lwgp,93 lwsp,94 move16,95 movep,96 not16,98 or16,99 sb16,100 sdbbp16,101 sh16,102 sll16,103 srl16,104 subu16,105 sw16,106 swsp,107 swm16,108 swm32,110 swp,112 xor16,113 abs.s,115 abs.d,115 add,116 add.s,117 add.d,117 addiu,118 addiupc,119 addu,120 align,121 aluipc,123 and,124 andi,125 aui,126 auipc,127 balc,128 bc1eqzc,129 bc1nezc,129 bc2eqzc,131 bc2nezc,131 blezalc,133 bgezalc,133 bgtzalc,133 bltzalc,133 beqzalc,133 bnezalc,133 b.f.c,135 b.un.c,135 b.eq.c,135 b.ueq.c,135 b.olt.c,135 b.ult.c,135 b.ole.c,135 b.ule.c,135 b.sf.c,135 b.ngle.c,135 b.seq.c,135 b.ngl.c,135 b.lt.c,135 b.nge.c,135 b.le.c,135 b.ngt.c,135 bc,138 break,139 bitswap,140 bovc,142 bnvc,142 cache,144 cachee,150 ceil.l.s,156 ceil.l.d,156 ceil.w.s,157 ceil.w.d,157 cfc1,158 cfc2,160 class.s,161 class.d,161 clo,163 clz,164 cmp.f.s,165 cmp.un.s,165 cmp.eq.s,165 cmp.ueq.s,165 cmp.olt.s,165 cmp.ult.s,165 cmp.ole.s,165 cmp.ule.s,165 cmp.sf.s,165 cmp.ngle.s,165 cmp.seq.s,165 cmp.ngl.s,165 cmp.lt.s,165 cmp.nge.s,165 cmp.le.s,165 cmp.ngt.s,165 cmp.f.d,165 cmp.un.d,165 cmp.eq.d,165 cmp.ueq.d,165 cmp.olt.d,165 cmp.ult.d,165 cmp.ole.d,165 cmp.ule.d,165 cmp.sf.d,165 cmp.ngle.d,165 cmp.seq.d,165 cmp.ngl.d,165 cmp.lt.d,165 cmp.nge.d,165 cmp.le.d,165 cmp.ngt.d,165 cop2,170 ctc1,171 ctc2,174 cvt.d.s,175 cvt.d.w,175 cvt.d.l,175 cvt.l.s,176 cvt.l.d,176 cvt.s.d,177 cvt.s.w,177 cvt.s.l,177 cvt.w.s,178 cvt.w.d,178 deret,179 di,180 div.s,181 div.d,181 div,182 mod,182 divu,182 modu,182 dvp,185 ehb,188 ei,189 eret,190 eretnc,191 ext,193 evp,196 floor.l.s,198 floor.l.d,198 floor.w.s,199 floor.w.d,199 ins,200 jalrc,202 jalrc.hb,204 jialc,207 jic,209 lb,211 lbe,212 lbu,213 lbue,214 ldc1,215 ldc2,216 lh,217 lhe,218 lhu,219 lhue,220 ll,221 lle,223 llwp,226 llwpe,228 lsa,230 lui,231 lw,232 lwc1,233 lwc2,234 lwe,235 lwpc,236 maddf.s,237 maddf.d,237 msubf.s,237 msubf.d,237 max.s,239 max.d,239 min.s,239 min.d,239 maxa.s,239 maxa.d,239 mina.s,239 mina.d,239 mfc0,243 mfc1,244 mfc2,245 mfhc0,246 mfhc1,247 mfhc2,248 mov.s,249 mov.d,249 mtc0,250 mtc1,251 mtc2,252 mthc0,253 mthc1,254 mthc2,255 mul,256 muh,256 mulu,256 muhu,256 mul.s,258 mul.d,258 neg.s,259 neg.d,259 nop,260 nor,261 or,262 ori,263 pause,264 pref,266 prefe,270 rdhwr,274 rdpgpr,277 recip.s,278 recip.d,278 rotr,281 rotrv,282 round.l.s,283 round.l.d,283 round.w.s,284 round.w.d,284 rsqrt.s,285 rsqrt.d,285 sb,286 sbe,287 sc,288 sce,291 scwp,294 scwpe,298 sdbbp,301 sdc1,302 sdc2,303 seb,304 seh,305 seleqz,307 selnez,307 seleqz.s,309 seleqz.d,309 sh,311 she,312 sigrie,313 sll,314 sllv,315 slt,316 slti,317 sltiu,318 sltu,319 sqrt.s,320 sqrt.d,320 sra,321 srav,322 srl,323 srlv,324 ssnop,325 sub,326 sub.s,327 sub.d,327 subu,328 sw,329 swe,330 swc2,332 sync,333 synci,338 syscall,341 teq,342 tge,343 tgeu,344 tlbinv,346 tlbinvf,349 tlbp,351 tlbr,352 tlbwi,354 tlbwr,356 tlt,358 tltu,359 tne,360 trunc.l.s,361 trunc.l.d,361 trunc.w.s,362 trunc.w.d,362 wait,363 wrpgpr,365 wsbh,366 xor,367 xori,368 ================================================ FILE: pypcode/processors/MIPS/data/manuals/r4000.idx ================================================ @r4000.pdf[MIPS R4000 Microprocessor User's Manual, Second Edition, July 2005] add , 479 addi , 480 addiu , 481 addu , 482 and , 483 andi , 484 bczf , 485 bczfl , 487 bczt , 489 bcztl , 491 beq , 493 beql , 494 bgez , 495 bgezal , 496 bgezall , 497 bgezl , 498 bgtz , 499 bgtzl , 500 blez , 501 blezl , 502 bltz , 503 bltzal , 504 bltzall , 505 bltzl , 506 bne , 507 bnel , 508 break , 509 cache , 510 cfc , 516 cop , 517 ctc , 518 dadd , 519 daddi , 520 daddiu , 521 daddu , 522 ddiv , 523 ddivu , 524 div , 525 divu , 527 dmfc0 , 529 dmtc0 , 530 dmult , 531 dmultu , 532 dsll , 533 dsllv , 534 dsll32 , 535 dsra , 536 dsrav , 537 dsra32 , 538 dsrl , 539 dsrlv , 540 dsrl32 , 541 dsub , 542 dsubu , 543 eret , 544 j , 545 jal , 546 jalr , 547 jr , 548 lb , 549 lbu , 550 ld , 551 ldc , 552 ldl , 554 ldr , 557 lh , 560 lhu , 561 ll , 562 lld , 564 lui , 566 lw , 567 lwc , 568 lwl , 570 lwr , 573 lwu , 576 mfc0 , 577 mfcz , 578 mfhi , 580 mflo , 581 mtc0 , 582 mtcz , 583 mthi , 584 mtlo , 585 mult , 586 multu , 588 nor , 590 or , 591 ori , 592 sb , 593 sc , 594 scd , 596 sd , 598 sdcz , 599 sdl , 601 sdr , 604 sh , 607 sll , 608 sllv , 609 slt , 610 slti , 611 sltiu , 612 sltu , 613 sra , 614 srav , 615 srl , 616 srlv , 617 sub , 618 subu , 619 sw , 620 swcz , 621 swl , 623 swr , 626 sync , 629 syscall , 630 teq , 631 teqi , 632 tge , 633 tgei , 634 tgeiu , 635 tgeu , 636 tlbp , 637 tlbr , 638 tlbwi , 639 tlbwr , 640 tlt , 641 tlti , 642 tltiu , 643 tltu , 644 tne , 645 tnei , 646 xor , 647 xori , 648 abs , 663 add , 664 bc1f , 665 bc1fl , 666 bc1t , 667 bc1tl , 668 c. , 669 ceil.l. , 671 ceil.W. , 673 cfc1 , 675 ctc1 , 676 cvt. , 677 div. , 681 dmfc1 , 682 dmtc1 , 683 floor. , 684 ldc1 , 688 lwc1 , 690 mfc1 , 692 mov. , 693 mtc1 , 694 mul. , 695 neg. , 696 round. , 697 sdc1 , 701 sqrt , 703 sub , 704 swc1 , 705 trunc , 707 ================================================ FILE: pypcode/processors/MIPS/data/patterns/MIPS_BE_patterns.xml ================================================ 0x03e00008 0x........ 0x03e00008 0x........ 0x00000000 0x03e00008 0x........ 0x00000000 0x00000000 0x03e00008 0x........ 0x00000000 0x00000000 0x00000000 000010.. 0x...... 0.100111 0xbd 0....... ......00 000010.. 0x...... 0.100111 0xbd 0....... ......00 0x00000000 0x1000.... 0.100111 0xbd 0....... ......00 0x1000.... 0.100111 0xbd 0....... ......00 0x00000000 0x03 0x20 00000... ..001000 0.100111 0xbd 0x0. 0x.. 0.100111 10111101 1....... ......00 0x3c...... 0.100111 0xbd 1....... ......00 100011.. 0x...... 0.100111 0xbd 1....... ......00 0x3c...... 100011.. 0x...... 0.100111 0xbd 1....... ......00 0x3c1c.... 0.100111 0x9c.... 0x03e00008 0x........ 0x03e00008 0x........ 0x00000000 0x03e00008 0x........ 0x00000000 0x00000000 0x03e00008 0x........ 0x00000000 0x00000000 0x00000000 000010.. 0x...... 0.100111 0xbd 0....... ......00 0x1000.... 0.100111 0xbd 0....... ......00 0x03 0x20 00000... ..001000 0.100111 0xbd 0x0. 0x.. 0x3c06.... 01100111 10111101 1....... ......00 0xff 0xbc 0....... ......00 00100111 10111101 1....... ......00 0xaf 0xbc 0....... ......00 0x3c 0x0f 0x.. 0x.. 0x8d 0xf9 0x.. 0x.. 0x03 0x20 00000... 0x08 0x25 0xf8 0x.. 0x.. 0xb2 0x03 0x9a 0x60 0x65 .....010 0xeb 0x00 0x65 .....011 ================================================ FILE: pypcode/processors/MIPS/data/patterns/MIPS_LE_patterns.xml ================================================ 0x0800e003 0x........ 0x0800e003 0x........ 0x00000000 0x0800e003 0x........ 0x00000000 0x00000000 0x0800e003 0x........ 0x00000000 0x00000000 0x00000000 0x...... 000010.. ......00 0....... 0xbd 0.100111 0x....0010 ......00 0....... 0xbd 0.100111 ..001000 00000... 0x20 0x03 0x0. 0x.. 0xbd 0.100111 ......00 1....... 10111101 00100111 0x......3c ......00 1....... 0xbd 0.100111 0x...... 100011.. ......00 1....... 0xbd 0.100111 0x......3c 0x...... 100011.. ......00 1....... 0xbd 0.100111 0x....1c3c 0x....9c 0.100111 0x0800e003 0x........ 0x0800e003 0x........ 0x00000000 0x0800e003 0x........ 0x00000000 0x00000000 0x0800e003 0x........ 0x00000000 0x00000000 0x00000000 0x...... 000010.. ......00 0....... 0xbd 0.100111 0x....0010 ......00 0....... 0xbd 0.100111 ..001000 00000... 0x20 0x03 0x0. 0x.. 0xbd 0.100111 0x....063c ......00 1....... 10111101 01100111 ......00 0....... 0xbc 0xff ......00 1....... 10111101 00100111 ......00 0....... 0xbc 0xaf 0x.. 0x.. 0x0f 0x3c 0x.. 0x.. 0xf9 0x8d 0x08 00000... 0x20 0x03 0x.. 0x.. 0xf8 0x25 0x03 0xb2 0x60 0x9a .....010 0x65 0x00 0xeb .....011 0x65 ================================================ FILE: pypcode/processors/MIPS/data/patterns/patternconstraints.xml ================================================ MIPS_BE_patterns.xml MIPS_LE_patterns.xml ================================================ FILE: pypcode/processors/NDS32/data/languages/lsmw.sinc ================================================ Dreg: a0 is a0 & regNum=0 { export a0; } Dreg: a1 is a1 & regNum=1 { export a1; } Dreg: a2 is a2 & regNum=2 { export a2; } Dreg: a3 is a3 & regNum=3 { export a3; } Dreg: a4 is a4 & regNum=4 { export a4; } Dreg: a5 is a5 & regNum=5 { export a5; } Dreg: s0 is s0 & regNum=6 { export s0; } Dreg: s1 is s1 & regNum=7 { export s1; } Dreg: s2 is s2 & regNum=8 { export s2; } Dreg: s3 is s3 & regNum=9 { export s3; } Dreg: s4 is s4 & regNum=10 { export s4; } Dreg: s5 is s5 & regNum=11 { export s5; } Dreg: s6 is s6 & regNum=12 { export s6; } Dreg: s7 is s7 & regNum=13 { export s7; } Dreg: s8 is s8 & regNum=14 { export s8; } Dreg: ta is ta & regNum=15 { export ta; } Dreg: t0 is t0 & regNum=16 { export t0; } Dreg: t1 is t1 & regNum=17 { export t1; } Dreg: t2 is t2 & regNum=18 { export t2; } Dreg: t3 is t3 & regNum=19 { export t3; } Dreg: t4 is t4 & regNum=20 { export t4; } Dreg: t5 is t5 & regNum=21 { export t5; } Dreg: t6 is t6 & regNum=22 { export t6; } Dreg: t7 is t7 & regNum=23 { export t7; } Dreg: t8 is t8 & regNum=24 { export t8; } Dreg: t9 is t9 & regNum=25 { export t9; } Dreg: p0 is p0 & regNum=26 { export p0; } Dreg: p1 is p1 & regNum=27 { export p1; } Dreg: fp is fp & regNum=28 { export fp; } Dreg: gp is gp & regNum=29 { export gp; } Dreg: lp is lp & regNum=30 { export lp; } Dreg: sp is sp & regNum=31 { export sp; } macro Smwad(reg) { mult_addr = mult_addr - 4; *mult_addr = reg; } macro LmwOp(reg) { reg = *mult_addr; } macro SmwOp(reg) { *mult_addr = reg; } macro MwDec() { mult_addr = mult_addr - 4; } macro MwInc() { mult_addr = mult_addr + 4; } Lsmw_id: is LsmwId=0 { MwInc(); } Lsmw_id: is LsmwId=1 { MwDec(); } Lmw.fp: fp is Lsmw_id & LsmwBa=0 & Enable4_fp=1 & fp { LmwOp(fp); build Lsmw_id; } Lmw.fp: fp is Lsmw_id & LsmwBa=1 & Enable4_fp=1 & fp { build Lsmw_id; LmwOp(fp); } Lmw.fp: is Enable4_fp=0 { } Lmw.gp: gp is Lsmw_id & LsmwBa=0 & Enable4_gp=1 & gp { LmwOp(gp); build Lsmw_id; } Lmw.gp: gp is Lsmw_id & LsmwBa=1 & Enable4_gp=1 & gp { build Lsmw_id; LmwOp(gp); } Lmw.gp: is Enable4_gp=0 { } Lmw.lp: lp is Lsmw_id & LsmwBa=0 & Enable4_lp=1 & lp { LmwOp(lp); build Lsmw_id; } Lmw.lp: lp is Lsmw_id & LsmwBa=1 & Enable4_lp=1 & lp { build Lsmw_id; LmwOp(lp); } Lmw.lp: is Enable4_lp=0 { } Lmw.sp: sp is Lsmw_id & LsmwBa=0 & Enable4_sp=1 & sp { LmwOp(sp); build Lsmw_id; } Lmw.sp: sp is Lsmw_id & LsmwBa=1 & Enable4_sp=1 & sp { build Lsmw_id; LmwOp(sp); } Lmw.sp: is Enable4_sp=0 { } # Terminating condition LmwReg: Dreg is LsmwId=0 & LsmwBa=0 & Dreg & counter=1 [regNum=regNum+1;] { build Dreg; LmwOp(Dreg); MwInc(); } LmwReg: Dreg is LsmwId=1 & LsmwBa=0 & Dreg & counter=1 [regNum=regNum-1;] { build Dreg; LmwOp(Dreg); MwDec(); } LmwReg: Dreg is LsmwId=0 & LsmwBa=1 & Dreg & counter=1 [regNum=regNum+1;] { build Dreg; MwInc(); LmwOp(Dreg); } LmwReg: Dreg is LsmwId=1 & LsmwBa=1 & Dreg & counter=1 [regNum=regNum-1;] { build Dreg; MwDec(); LmwOp(Dreg); } LmwReg: Dreg, LmwReg is LsmwId=0 & LsmwBa=0 & Dreg & LmwReg [ counter = counter-1; regNum=regNum+1;] { LmwOp(Dreg); MwInc(); build LmwReg; } LmwReg: Dreg, LmwReg is LsmwId=1 & LsmwBa=0 & Dreg & LmwReg [ counter = counter-1; regNum=regNum-1;] { LmwOp(Dreg); MwDec(); build LmwReg; } LmwReg: Dreg, LmwReg is LsmwId=0 & LsmwBa=1 & Dreg & LmwReg [ counter = counter-1; regNum=regNum+1;] { MwInc(); LmwOp(Dreg); build LmwReg; } LmwReg: Dreg, LmwReg is LsmwId=1 & LsmwBa=1 & Dreg & LmwReg [ counter = counter-1; regNum=regNum-1;] { MwDec(); LmwOp(Dreg); build LmwReg; } # Initial conditions Lmw.regs: is (LsmwRe_ & LsmwRb_ & LsmwId=0 & Lmw.fp & Lmw.gp & Lmw.lp & Lmw.sp) ... & LmwReg [ regNum=LsmwRb_-1; counter=LsmwRe_-LsmwRb_+1; ] { build LmwReg; build Lmw.fp; build Lmw.gp; build Lmw.lp; build Lmw.sp; } Lmw.regs: is (LsmwRe_ & LsmwRb_ & LsmwId=1 & Lmw.sp & Lmw.lp & Lmw.gp & Lmw.fp) ... & LmwReg [ regNum=LsmwRe_+1; counter=LsmwRe_-LsmwRb_+1; ] { build Lmw.sp; build Lmw.lp; build Lmw.gp; build Lmw.fp; build LmwReg; } Lmw.regs: is LsmwRe_=0x1f & LsmwRb_=0x1f & LsmwId=0 & Lmw.fp & Lmw.gp & Lmw.lp & Lmw.sp { build Lmw.fp; build Lmw.gp; build Lmw.lp; build Lmw.sp; } Lmw.regs: is LsmwRe_=0x1f & LsmwRb_=0x1f & LsmwId=1 & Lmw.sp & Lmw.lp & Lmw.gp & Lmw.fp { build Lmw.sp; build Lmw.lp; build Lmw.gp; build Lmw.fp; } Lmwa.regs: is (LsmwRe_ & LsmwRb_ & LsmwId=0 & Lmw.fp & Lmw.gp & Lmw.lp & Lmw.sp) ... & LmwReg [ regNum=LsmwRb_-1; counter=LsmwRe_-LsmwRb_+1; ] { build LmwReg; build Lmw.sp; build Lmw.lp; build Lmw.gp; build Lmw.fp; } Lmwa.regs: is (LsmwRe_ & LsmwRb_ & LsmwId=1 & Lmw.sp & Lmw.lp & Lmw.gp & Lmw.fp) ... & LmwReg [ regNum=LsmwRe_+1; counter=LsmwRe_-LsmwRb_+1; ] { build Lmw.fp; build Lmw.gp; build Lmw.lp; build Lmw.sp; build LmwReg; } Lmwa.regs: is LsmwRe_=0x1f & LsmwRb_=0x1f & LsmwId=0 & Lmw.fp & Lmw.gp & Lmw.lp & Lmw.sp { build Lmw.sp; build Lmw.lp; build Lmw.gp; build Lmw.fp; } Lmwa.regs: is LsmwRe_=0x1f & LsmwRb_=0x1f & LsmwId=1 & Lmw.sp & Lmw.lp & Lmw.gp & Lmw.fp { build Lmw.fp; build Lmw.gp; build Lmw.lp; build Lmw.sp; } Smw.fp: fp is Lsmw_id & LsmwBa=0 & Enable4_fp=1 & fp { SmwOp(fp); build Lsmw_id; } Smw.fp: fp is Lsmw_id & LsmwBa=1 & Enable4_fp=1 & fp { build Lsmw_id; SmwOp(fp); } Smw.fp: is Enable4_fp=0 { } Smw.gp: gp is Lsmw_id & LsmwBa=0 & Enable4_gp=1 & gp { SmwOp(gp); build Lsmw_id; } Smw.gp: gp is Lsmw_id & LsmwBa=1 & Enable4_gp=1 & gp { build Lsmw_id; SmwOp(gp); } Smw.gp: is Enable4_gp=0 { } Smw.lp: lp is Lsmw_id & LsmwBa=0 & Enable4_lp=1 & lp { SmwOp(lp); build Lsmw_id; } Smw.lp: lp is Lsmw_id & LsmwBa=1 & Enable4_lp=1 & lp { build Lsmw_id; SmwOp(lp); } Smw.lp: is Enable4_lp=0 { } Smw.sp: sp is Lsmw_id & LsmwBa=0 & Enable4_sp=1 & sp { SmwOp(sp); build Lsmw_id; } Smw.sp: sp is Lsmw_id & LsmwBa=1 & Enable4_sp=1 & sp { build Lsmw_id; SmwOp(sp); } Smw.sp: is Enable4_sp=0 { } # Terminating condition SmwReg: Dreg is LsmwId=0 & LsmwBa=0 & Dreg & counter=1 [regNum=regNum+1;] { build Dreg; SmwOp(Dreg); MwInc(); } SmwReg: Dreg is LsmwId=1 & LsmwBa=0 & Dreg & counter=1 [regNum=regNum-1;] { build Dreg; SmwOp(Dreg); MwDec(); } SmwReg: Dreg is LsmwId=0 & LsmwBa=1 & Dreg & counter=1 [regNum=regNum+1;] { build Dreg; MwInc(); SmwOp(Dreg); } SmwReg: Dreg is LsmwId=1 & LsmwBa=1 & Dreg & counter=1 [regNum=regNum-1;] { build Dreg; MwDec(); SmwOp(Dreg); } SmwReg: Dreg, SmwReg is LsmwId=0 & LsmwBa=0 & Dreg & SmwReg [ counter = counter-1; regNum=regNum+1;] { build Dreg; SmwOp(Dreg); MwInc(); build SmwReg; } SmwReg: Dreg, SmwReg is LsmwId=1 & LsmwBa=0 & Dreg & SmwReg [ counter = counter-1; regNum=regNum-1;] { build Dreg; SmwOp(Dreg); MwDec(); build SmwReg; } SmwReg: Dreg, SmwReg is LsmwId=0 & LsmwBa=1 & Dreg & SmwReg [ counter = counter-1; regNum=regNum+1;] { build Dreg; MwInc(); SmwOp(Dreg); build SmwReg; } SmwReg: Dreg, SmwReg is LsmwId=1 & LsmwBa=1 & Dreg & SmwReg [ counter = counter-1; regNum=regNum-1;] { build Dreg; MwDec(); SmwOp(Dreg); build SmwReg; } # Initial conditions Smw.regs: is (LsmwRe_ & LsmwRb_ & LsmwId=0 & Smw.fp & Smw.gp & Smw.lp & Smw.sp) ... & SmwReg [ regNum=LsmwRb_-1; counter=LsmwRe_-LsmwRb_+1; ] { build SmwReg; build Smw.fp; build Smw.gp; build Smw.lp; build Smw.sp; } Smw.regs: is (LsmwRe_ & LsmwRb_ & LsmwId=1 & Smw.sp & Smw.lp & Smw.gp & Smw.fp) ... & SmwReg [ regNum=LsmwRe_+1; counter=LsmwRe_-LsmwRb_+1; ] { build Smw.sp; build Smw.lp; build Smw.gp; build Smw.fp; build SmwReg; } Smw.regs: is LsmwRe_=0x1f & LsmwRb_=0x1f & LsmwId=0 & Smw.fp & Smw.gp & Smw.lp & Smw.sp { build Smw.fp; build Smw.gp; build Smw.lp; build Smw.sp; } Smw.regs: is LsmwRe_=0x1f & LsmwRb_=0x1f & LsmwId=1 & Smw.sp & Smw.lp & Smw.gp & Smw.fp { build Smw.sp; build Smw.lp; build Smw.gp; build Smw.fp; } Smwa.regs: is (LsmwRe_ & LsmwRb_ & LsmwId=0 & Smw.fp & Smw.gp & Smw.lp & Smw.sp) ... & SmwReg [ regNum=LsmwRb_-1; counter=LsmwRe_-LsmwRb_+1; ] { build SmwReg; build Smw.sp; build Smw.lp; build Smw.gp; build Smw.fp; } Smwa.regs: is (LsmwRe_ & LsmwRb_ & LsmwId=1 & Smw.sp & Smw.lp & Smw.gp & Smw.fp) ... & SmwReg [ regNum=LsmwRe_+1; counter=LsmwRe_-LsmwRb_+1; ] { build Smw.fp; build Smw.gp; build Smw.lp; build Smw.sp; build SmwReg; } Smwa.regs: is LsmwRe_=0x1f & LsmwRb_=0x1f & LsmwId=0 & Smw.fp & Smw.gp & Smw.lp & Smw.sp { build Smw.sp; build Smw.lp; build Smw.gp; build Smw.fp; } Smwa.regs: is LsmwRe_=0x1f & LsmwRb_=0x1f & LsmwId=1 & Smw.sp & Smw.lp & Smw.gp & Smw.fp { build Smw.fp; build Smw.gp; build Smw.lp; build Smw.sp; } ================================================ FILE: pypcode/processors/NDS32/data/languages/nds32.cspec ================================================ ================================================ FILE: pypcode/processors/NDS32/data/languages/nds32.dwarf ================================================ ================================================ FILE: pypcode/processors/NDS32/data/languages/nds32.ldefs ================================================ NDS32 default processor 32-bit big-endian NDS32 default processor 32-bit little-endian ================================================ FILE: pypcode/processors/NDS32/data/languages/nds32.opinion ================================================ ================================================ FILE: pypcode/processors/NDS32/data/languages/nds32.pspec ================================================ ================================================ FILE: pypcode/processors/NDS32/data/languages/nds32.sinc ================================================ ### General ### define endian=big; define alignment=2; define space ram type=ram_space size=4 wordsize=1 default; define space register type=register_space size=4; define space csreg type=ram_space size=2 wordsize=4; @define CSR_REG_START "0x0000" define register offset=0 size=4 [a0 a1 a2 a3 a4 a5 s0 s1 s2 s3 s4 s5 s6 s7 s8 ta t0 t1 t2 t3 t4 t5 t6 t7 t8 t9 p0 p1 fp gp lp sp]; define register offset=0x90 size=4 [pc mult_addr mult_inc]; define register offset=0x100 size=8 [d0 d1]; define register offset=0x100 size=4 [d0.hi d0.lo d1.hi d1.lo]; define register offset=0x200 size=4 [ itb lb lc le ifc_lp fpcsr fpcfg ]; define register offset=0x1000 size=4 [ fs0 fs1 fs2 fs3 fs4 fs5 fs6 fs7 fs8 fs9 fs10 fs11 fs12 fs13 fs14 fs15 fs16 fs17 fs18 fs19 fs20 fs21 fs22 fs23 fs24 fs25 fs26 fs27 fs28 fs29 fs30 fs31 ]; define register offset=0x1000 size=8 [ fd0 fd1 fd2 fd3 fd4 fd5 fd6 fd7 fd8 fd9 fd10 fd11 fd12 fd13 fd14 fd15 fd16 fd17 fd18 fd19 fd20 fd21 fd22 fd23 fd24 fd25 fd26 fd27 fd28 fd29 fd30 fd31 ]; define csreg offset=0x0a9 size=4 [ ipc ]; define register offset=0x300 size=8 contextreg; define context contextreg counter = (22,26) regNum = (27,31) # register for load/store multiple instructions ; define token instr32(32) OpSz = (31, 31) Opc = (25, 30) Rt = (20, 24) Fst = (20, 24) Fdt = (20, 24) Rth = (21, 24) Rtl = (21, 24) Ra = (15, 19) Fsa = (15, 19) Fda = (15, 19) Rb = (10, 14) Fsb = (10, 14) Fdb = (10, 14) Rd = (5, 9) Rs = (5, 9) Sub5 = (0, 4) Sub6 = (0, 5) Sub7 = (0, 6) Sub8 = (0, 7) Sub3 = (7, 9) fop4 = (6, 9) cop4 = (0, 3) f2op = (10, 14) fcnd = (7, 9) cmpe = (6, 6) fbi = (7, 7) cpn = (13, 14) fsbi = (12, 12) Imm8u = (7,14) Imm5u = (10, 14) Imm5s = (10, 14) signed Br1t = (14, 14) Br2t = (16, 19) Alu2Mod = (6, 9) Dtl = (22, 24) Dt = (21, 21) Dtlow = (21, 21) Dthigh = (21, 21) Dtr = (20, 20) JIt = (24, 24) Imm19s = (0, 18) signed Imm18s = (0, 17) signed Imm17s = (0, 16) signed Imm16s = (0, 15) signed Imm14s = (0, 13) signed Imm15u = (0, 14) Imm15s = (0, 14) signed Imm20u = (0, 19) Imm20s = (0, 19) signed Imm24s = (0, 23) signed Imm12s = (0, 11) signed Imm11s = (8, 18) signed Imm8s = (0, 7) signed sv = (8, 9) SrIdx = (10, 19) Swid = (5, 19) CctlZ = (11, 14) CctlLevel = (10, 10) CctlSub = (5, 9) MsyncZ = (8, 19) MsyncSub = (5, 7) DtIt = (8, 9) Jz = (6, 7) JrHint = (5, 5) ToggleL = (21, 24) Toggle = (20, 20) Usr = (15, 19) Group = (10, 14) DprefD = (24, 24) DprefSub = (20, 23) TlbopSub = (5, 9) StandbyZ = (7, 9) StandbySub = (5, 6) GpSub1 = (19, 19) GpSub2 = (18, 19) GpSub3 = (17, 19) sh = (5, 9) Bxxc = (19, 19) LsmwRa = (15, 19) LsmwRb = (20, 24) LsmwRb_ = (20, 24) LsmwRe = (10, 14) LsmwRe_ = (10, 14) Enable4 = (6, 9) Enable4_fp = (9, 9) Enable4_gp = (8, 8) Enable4_lp = (7, 7) Enable4_sp = (6, 6) LsmwLs = (5, 5) LsmwBa = (4, 4) LsmwId = (3, 3) LsmwM = (2, 2) LsmwSub = (0, 1) ; attach variables [Rt Rs Ra Rb Rd LsmwRa LsmwRb LsmwRe] [ a0 a1 a2 a3 a4 a5 s0 s1 s2 s3 s4 s5 s6 s7 s8 ta t0 t1 t2 t3 t4 t5 t6 t7 t8 t9 p0 p1 fp gp lp sp ]; attach variables [Rtl] [ a0 a2 a4 s0 s2 s4 s6 s8 t0 t2 t4 t6 t8 p0 fp lp ]; attach variables [Rth] [ a1 a3 a5 s1 s3 s5 s7 ta t1 t3 t5 t7 t9 p1 gp sp ]; attach variables [Dt] [ d0 d1 ]; attach variables [Dtlow] [ d0.lo d1.lo ]; attach variables [Dthigh] [ d0.hi d1.hi ]; attach variables [ Fst Fsa Fsb ] [ fs0 fs1 fs2 fs3 fs4 fs5 fs6 fs7 fs8 fs9 fs10 fs11 fs12 fs13 fs14 fs15 fs16 fs17 fs18 fs19 fs20 fs21 fs22 fs23 fs24 fs25 fs26 fs27 fs28 fs29 fs30 fs31 ]; attach variables [ Fdt Fda Fdb ] [ fd0 fd1 fd2 fd3 fd4 fd5 fd6 fd7 fd8 fd9 fd10 fd11 fd12 fd13 fd14 fd15 fd16 fd17 fd18 fd19 fd20 fd21 fd22 fd23 fd24 fd25 fd26 fd27 fd28 fd29 fd30 fd31 ]; @define I32 "(OpSz=0)" @define LBGP "(Opc=0b010111)" @define LWC "(Opc=0b011000)" @define SWC "(Opc=0b011001)" @define LDC "(Opc=0b011010)" @define SDC "(Opc=0b011011)" @define LSMW "(Opc=0b011101)" @define MEM "(Opc=0b011100)" @define HWGP "(Opc=0b011110)" @define SBGP "(Opc=0b011111)" @define ALU_1 "(Opc=0b100000)" @define ALU_2 "(Opc=0b100001)" @define JI "(Opc=0b100100)" @define JREG "(Opc=0b100101)" @define BR1 "(Opc=0b100110)" @define BR2 "(Opc=0b100111)" @define BR3 "(Opc=0b101101)" @define MISC "(Opc=0b110010)" @define COP "(Opc=0b110101)" @define SIMD "(Opc=0b111000)" @define ALU2Z "(Alu2Mod=0b0000)" @define GPR "(Alu2Mod=0b0001)" ### ALU Instruction with Immediate ### :addi Rt, Ra, Imm15s is $(I32) & Opc=0b101000 & Rt & Ra & Imm15s { Rt = Ra + Imm15s; } :subri Rt, Ra, Imm15s is $(I32) & Opc=0b101001 & Rt & Ra & Imm15s { Rt = Imm15s - Ra; } :andi Rt, Ra, Imm15u is $(I32) & Opc=0b101010 & Rt & Ra & Imm15u { Rt = Ra & Imm15u; } :ori Rt, Ra, Imm15u is $(I32) & Opc=0b101100 & Rt & Ra & Imm15u { Rt = Ra | Imm15u; } :xori Rt, Ra, Imm15u is $(I32) & Opc=0b101011 & Rt & Ra & Imm15u { Rt = Ra ^ Imm15u; } :slti Rt, Ra, Imm15s is $(I32) & Opc=0b101110 & Rt & Ra & Imm15s { Rt = zext(Ra < Imm15s); } :sltsi Rt, Ra, Imm15s is $(I32) & Opc=0b101111 & Rt & Ra & Imm15s { Rt = zext(Ra s< Imm15s); } :movi Rt, Imm20s is $(I32) & Opc=0b100010 & Rt & Imm20s { Rt = Imm20s; } :sethi Rt, Imm20u is $(I32) & Opc=0b100011 & Rt & Imm20u { Rt = Imm20u << 12;} ### ALU Instruction ### :add Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b00000 { Rt = Ra + Rb; } :sub Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b00001 { Rt = Ra - Rb; } :and Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b00010 { Rt = Ra & Rb; } :xor Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b00011 { Rt = Ra ^ Rb; } :or Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b00100 { Rt = Ra | Rb; } :nor Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b00101 { Rt = ~(Ra | Rb); } :slt Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b00110 { Rt = zext(Ra < Rb); } :slts Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b00111 { Rt = zext(Ra s< Rb); } :sva Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b11000 { Rt = zext(scarry(Ra, Rb)); } :svs Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b11001 { Rt = zext(sborrow(Ra, Rb)); } :seb Rt, Ra is $(I32) & $(ALU_1) & Rt & Ra & Rb=0b00000 & Rd=0 & Sub5=0b10000 { local tmp = Ra; Rt = sext(tmp:1); } :seh Rt, Ra is $(I32) & $(ALU_1) & Rt & Ra & Rb=0b00000 & Rd=0 & Sub5=0b10001 { local tmp = Ra; Rt = sext(tmp:2); } :zeb Rt, Ra is $(I32) & Opc=0b101010 & Rt & Ra & Imm15u=0xff { local tmp = Ra; Rt = zext(tmp:1); } :zeh Rt, Ra is $(I32) & $(ALU_1) & Rt & Ra & Rb=0b00000 & Rd=0 & Sub5=0b10011 { local tmp = Ra; Rt = zext(tmp:2); } :wsbh Rt, Ra is $(I32) & $(ALU_1) & Rt & Ra & Rb=0b00000 & Rd=0 & Sub5=0b10100 { Rt = ((Ra & 0x000000ff) << 8) | ((Ra & 0x0000ff00) >> 8) | ((Ra & 0x00ff0000) << 8) | ((Ra & 0xff000000) >> 8); } ### Shifter Instruction ### :slli Rt, Ra, Imm5u is $(I32) & $(ALU_1) & Rt & Ra & Imm5u & Rd=0 & Sub5=0b01000 { Rt = Ra << Imm5u; } :srli Rt, Ra, Imm5u is $(I32) & $(ALU_1) & Rt & Ra & Imm5u & Rd=0 & Sub5=0b01001 { Rt = Ra >> Imm5u; } :srai Rt, Ra, Imm5u is $(I32) & $(ALU_1) & Rt & Ra & Imm5u & Rd=0 & Sub5=0b01010 { Rt = Ra s>> Imm5u; } :rotri Rt, Ra, Imm5u is $(I32) & $(ALU_1) & Rt & Ra & Imm5u & Rd=0 & Sub5=0b01011 { Rt = (Ra >> Imm5u) | (Ra << (32 - Imm5u)); } :sll Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b01100 { tmp:4 = Rb & 0b11111; Rt = Ra << tmp; } :srl Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b01101 { tmp:4 = Rb & 0b11111; Rt = Ra >> tmp; } :sra Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b01110 { tmp:4 = Rb & 0b11111; Rt = Ra s>> tmp; } :rotr Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b01111 { tmp:4 = Rb & 0b11111; Rt = (Ra >> tmp) | (Ra << (32 - tmp)); } ### Multiply Instruction ### :mul Rt, Ra, Rb is $(I32) & $(ALU_2) & Rt & Ra & Rb & $(ALU2Z) & Sub6=0b100100 { Rt = Ra * Rb; } :mults64 Dt, Ra, Rb is $(I32) & $(ALU_2) & Dtl=0 & Dt & Dtr=0 & Ra & Rb & $(ALU2Z) & Sub6=0b101000 { Dt = sext(Ra) * sext(Rb); } :mult64 Dt, Ra, Rb is $(I32) & $(ALU_2) & Dtl=0 & Dt & Dtr=0 & Ra & Rb & $(ALU2Z) & Sub6=0b101001 { Dt = zext(Ra) * zext(Rb); } :madds64 Dt, Ra, Rb is $(I32) & $(ALU_2) & Dtl=0 & Dt & Dtr=0 & Ra & Rb & $(ALU2Z) & Sub6=0b101010 { Dt = Dt + (sext(Ra) * sext(Rb)); } :madd64 Dt, Ra, Rb is $(I32) & $(ALU_2) & Dtl=0 & Dt & Dtr=0 & Ra & Rb & $(ALU2Z) & Sub6=0b101011 { Dt = Dt + (zext(Ra) * zext(Rb)); } :msubs64 Dt, Ra, Rb is $(I32) & $(ALU_2) & Dtl=0 & Dt & Dtr=0 & Ra & Rb & $(ALU2Z) & Sub6=0b101100 { Dt = Dt - (sext(Ra) * sext(Rb)); } :msub64 Dt, Ra, Rb is $(I32) & $(ALU_2) & Dtl=0 & Dt & Dtr=0 & Ra & Rb & $(ALU2Z) & Sub6=0b101101 { Dt = Dt - (zext(Ra) * zext(Rb)); } :mult32 Dtlow, Ra, Rb is $(I32) & $(ALU_2) & Dtl=0 & Dtlow & Dtr=0 & Ra & Rb & $(ALU2Z) & Sub6=0b110001 { Dtlow = Ra * Rb; } :madd32 Dtlow, Ra, Rb is $(I32) & $(ALU_2) & Dtl=0 & Dtlow & Dtr=0 & Ra & Rb & $(ALU2Z) & Sub6=0b110011 { Dtlow = Dtlow + (Ra * Rb); } :msub32 Dtlow, Ra, Rb is $(I32) & $(ALU_2) & Dtl=0 & Dtlow & Dtr=0 & Ra & Rb & $(ALU2Z) & Sub6=0b110101 { Dtlow = Dtlow - (Ra * Rb); } # Group 0 UsrName: d0.lo is Group=0 & Usr=0 & d0.lo { export d0.lo; } UsrName: d0.hi is Group=0 & Usr=1 & d0.hi { export d0.hi; } UsrName: d1.lo is Group=0 & Usr=2 & d1.lo { export d1.lo; } UsrName: d1.hi is Group=0 & Usr=3 & d1.hi { export d1.hi; } UsrName: lb is Group=0 & Usr=25 & lb { export lb; } UsrName: le is Group=0 & Usr=26 & le { export le; } UsrName: lc is Group=0 & Usr=27 & lc { export lc; } UsrName: itb is Group=0 & Usr=28 & itb { export itb; } UsrName: ifc_lp is Group=0 & Usr=29 & ifc_lp { export ifc_lp; } #UsrName: pc is Group=0 & Usr=31 & pc { export pc; } # handled separately # Group 1 UsrName: "dma_cfg" is Group=1 & Usr=0 { tmp:2 = 0x280; export *[csreg]:4 tmp; } UsrName: "dma_gcsw" is Group=1 & Usr=1 { tmp:2 = 0x288; export *[csreg]:4 tmp; } UsrName: "dma_chnsel" is Group=1 & Usr=2 { tmp:2 = 0x290; export *[csreg]:4 tmp; } UsrName: "dma_act" is Group=1 & Usr=3 { tmp:2 = 0x298; export *[csreg]:4 tmp; } UsrName: "dma_setup" is Group=1 & Usr=4 { tmp:2 = 0x2a0; export *[csreg]:4 tmp; } UsrName: "dma_isaddr" is Group=1 & Usr=5 { tmp:2 = 0x2a8; export *[csreg]:4 tmp; } UsrName: "dma_esaddr" is Group=1 & Usr=6 { tmp:2 = 0x2b0; export *[csreg]:4 tmp; } UsrName: "dma_tcnt" is Group=1 & Usr=7 { tmp:2 = 0x2b8; export *[csreg]:4 tmp; } UsrName: "dma_status" is Group=1 & Usr=8 { tmp:2 = 0x2c0; export *[csreg]:4 tmp; } UsrName: "dma_2dset" is Group=1 & Usr=9 { tmp:2 = 0x2c8; export *[csreg]:4 tmp; } UsrName: "dma_rcnt" is Group=1 & Usr=23 { tmp:2 = 0x2b9; export *[csreg]:4 tmp; } UsrName: "dma_hstatus" is Group=1 & Usr=24 { tmp:2 = 0x2c1; export *[csreg]:4 tmp; } UsrName: "dma_2dsctl" is Group=1 & Usr=25 { tmp:2 = 0x2c9; export *[csreg]:4 tmp; } # Group 2 UsrName: "pfmc0" is Group=2 & Usr=0 { tmp:2 = 0x200; export *[csreg]:4 tmp; } UsrName: "pfmc1" is Group=2 & Usr=1 { tmp:2 = 0x201; export *[csreg]:4 tmp; } UsrName: "pfmc2" is Group=2 & Usr=2 { tmp:2 = 0x202; export *[csreg]:4 tmp; } UsrName: "pfm_ctl" is Group=2 & Usr=4 { tmp:2 = 0x208; export *[csreg]:4 tmp; } :mfusr Rt, UsrName is $(I32) & $(ALU_2) & Rt & UsrName & $(ALU2Z) & Sub6=0b100000 { Rt = UsrName; } :mfusr Rt, pc is $(I32) & $(ALU_2) & Rt & Group=0 & Usr=0b11111 & $(ALU2Z) & Sub6=0b100000 & pc { Rt = inst_next; } :mtusr Rt, UsrName is $(I32) & $(ALU_2) & Rt & UsrName & $(ALU2Z) & Sub6=0b100001 { UsrName = Rt; } :mtusr Rt, pc is $(I32) & $(ALU_2) & Rt & Group=0 & Usr=0b11111 & $(ALU2Z) & Sub6=0b100001 & pc { pc = Rt; goto[pc]; } # Not sure this works correctly ### Divide Instructions ### :div Dt, Ra, Rb is $(I32) & $(ALU_2) & Dtl=0 & Dt & Dtlow & Dthigh & Dtr=0 & Ra & Rb & $(ALU2Z) & Sub6=0b101111 { Dtlow = Ra / Rb; Dthigh = Ra % Rb; } :divs Dt, Ra, Rb is $(I32) & $(ALU_2) & Dtl=0 & Dt & Dtlow & Dthigh & Dtr=0 & Ra & Rb & $(ALU2Z) & Sub6=0b101110 { Dtlow = Ra s/ Rb; Dthigh = Ra s% Rb; } ### Load / Store Instruction (immediate) ### ByteOffset: off is Imm15s [ off = Imm15s << 0; ] { export *[const]:4 off; } HalfOffset: off is Imm15s [ off = Imm15s << 1; ] { export *[const]:4 off; } WordOffset: off is Imm15s [ off = Imm15s << 2; ] { export *[const]:4 off; } AddrByteRaImm15s: [Ra + ByteOffset] is Ra & ByteOffset { addr:4 = Ra + ByteOffset; export addr; } AddrHalfRaImm15s: [Ra + HalfOffset] is Ra & HalfOffset { addr:4 = Ra + HalfOffset; export addr; } AddrWordRaImm15s: [Ra + WordOffset] is Ra & WordOffset { addr:4 = Ra + WordOffset; export addr; } :lwi Rt, AddrWordRaImm15s is $(I32) & Opc=0b000010 & Rt & AddrWordRaImm15s { Rt = *AddrWordRaImm15s; } :lhi Rt, AddrHalfRaImm15s is $(I32) & Opc=0b000001 & Rt & AddrHalfRaImm15s { local tmp:2 = *AddrHalfRaImm15s; Rt = zext(tmp); } :lhsi Rt, AddrHalfRaImm15s is $(I32) & Opc=0b010001 & Rt & AddrHalfRaImm15s { local tmp:2 = *AddrHalfRaImm15s; Rt = sext(tmp); } :lbi Rt, AddrByteRaImm15s is $(I32) & Opc=0b000000 & Rt & AddrByteRaImm15s { local tmp:1 = *AddrByteRaImm15s; Rt = zext(tmp); } :lbsi Rt, AddrByteRaImm15s is $(I32) & Opc=0b010000 & Rt & AddrByteRaImm15s { local tmp:1 = *AddrByteRaImm15s; Rt = sext(tmp); } :swi Rt, AddrWordRaImm15s is $(I32) & Opc=0b001010 & Rt & AddrWordRaImm15s { *AddrWordRaImm15s = Rt; } :shi Rt, AddrHalfRaImm15s is $(I32) & Opc=0b001001 & Rt & AddrHalfRaImm15s { local tmp = Rt; *AddrHalfRaImm15s = tmp:2; } :sbi Rt, AddrByteRaImm15s is $(I32) & Opc=0b001000 & Rt & AddrByteRaImm15s { local tmp = Rt; *AddrByteRaImm15s = tmp:1; } ### Load / Store Instruction (immediate, postincr) ### :lwi.bi Rt, [Ra], WordOffset is $(I32) & Opc=0b000110 & Rt & Ra & WordOffset { Rt = *Ra; Ra = Ra + WordOffset; } :lhi.bi Rt, [Ra], HalfOffset is $(I32) & Opc=0b000101 & Rt & Ra & HalfOffset { local tmp:2 = *Ra; Rt = zext(tmp); Ra = Ra + HalfOffset; } :lhsi.bi Rt, [Ra], HalfOffset is $(I32) & Opc=0b010101 & Rt & Ra & HalfOffset { local tmp:2 = *Ra; Rt = sext(tmp); Ra = Ra + HalfOffset; } :lbi.bi Rt, [Ra], ByteOffset is $(I32) & Opc=0b000100 & Rt & Ra & ByteOffset { local tmp:1 = *Ra; Rt = zext(tmp); Ra = Ra + ByteOffset; } :lbsi.bi Rt, [Ra], ByteOffset is $(I32) & Opc=0b010100 & Rt & Ra & ByteOffset { local tmp:1 = *Ra; Rt = sext(tmp); Ra = Ra + ByteOffset; } :swi.bi Rt, [Ra], WordOffset is $(I32) & Opc=0b001110 & Rt & Ra & WordOffset { *Ra = Rt; Ra = Ra + WordOffset; } :shi.bi Rt, [Ra], HalfOffset is $(I32) & Opc=0b001101 & Rt & Ra & HalfOffset { local tmp = Rt; *Ra = tmp:2; Ra = Ra + HalfOffset; } :sbi.bi Rt, [Ra], ByteOffset is $(I32) & Opc=0b001100 & Rt & Ra & ByteOffset { local tmp = Rt; *Ra = tmp:1; Ra = Ra + ByteOffset; } ### Load / Store Instruction (register) ### OffsetRbsv: (Rb "<<" sv) is Rb & sv { off:4 = Rb << sv; export off; } AddrRaRbsv: [Ra + OffsetRbsv] is Ra & OffsetRbsv { addr:4 = Ra + OffsetRbsv; export addr; } :lw Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00000010 { Rt = *AddrRaRbsv; } :lh Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00000001 { local tmp:2 = *AddrRaRbsv; Rt = zext(tmp); } :lhs Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00010001 { local tmp:2 = *AddrRaRbsv; Rt = sext(tmp); } :lb Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00000000 { local tmp:1 = *AddrRaRbsv; Rt = zext(tmp); } :lbs Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00010000 { local tmp:1 = *AddrRaRbsv; Rt = sext(tmp); } :sw Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00001010 { *AddrRaRbsv = Rt; } :sh Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00001001 { local tmp = Rt; *AddrRaRbsv = tmp:2; } :sb Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00001000 { local tmp = Rt; *AddrRaRbsv = tmp:1; } ### Load / Store Instruction (register, postincr) ### :lw.bi Rt, [Ra], OffsetRbsv is $(I32) & $(MEM) & Rt & Ra & OffsetRbsv & Sub8=0b00000110 { Rt = *Ra; Ra = Ra + OffsetRbsv; } :lh.bi Rt, [Ra], OffsetRbsv is $(I32) & $(MEM) & Rt & Ra & OffsetRbsv & Sub8=0b00000101 { local tmp:2 = *Ra; Rt = zext(tmp); Ra = Ra + OffsetRbsv; } :lhs.bi Rt, [Ra], OffsetRbsv is $(I32) & $(MEM) & Rt & Ra & OffsetRbsv & Sub8=0b00010101 { local tmp:2 = *Ra; Rt = sext(tmp); Ra = Ra + OffsetRbsv; } :lb.bi Rt, [Ra], OffsetRbsv is $(I32) & $(MEM) & Rt & Ra & OffsetRbsv & Sub8=0b00000100 { local tmp:1 = *Ra; Rt = zext(tmp); Ra = Ra + OffsetRbsv; } :lbs.bi Rt, [Ra], OffsetRbsv is $(I32) & $(MEM) & Rt & Ra & OffsetRbsv & Sub8=0b00010100 { local tmp:1 = *Ra; Rt = sext(tmp); Ra = Ra + OffsetRbsv; } :sw.bi Rt, [Ra], OffsetRbsv is $(I32) & $(MEM) & Rt & Ra & OffsetRbsv & Sub8=0b00001110 { *Ra = Rt; Ra = Ra + OffsetRbsv; } :sh.bi Rt, [Ra], OffsetRbsv is $(I32) & $(MEM) & Rt & Ra & OffsetRbsv & Sub8=0b00001101 { local tmp = Rt; *Ra = tmp:2; Ra = Ra + OffsetRbsv; } :sb.bi Rt, [Ra], OffsetRbsv is $(I32) & $(MEM) & Rt & Ra & OffsetRbsv & Sub8=0b00001100 { local tmp = Rt; *Ra = tmp:1; Ra = Ra + OffsetRbsv; } ### Load / Store Multiple Word Instruction ### @include "lsmw.sinc" LsmwBa_: "b" is LsmwBa=0 { } LsmwBa_: "a" is LsmwBa=1 { } LsmwId_: "i" is LsmwId=0 { } LsmwId_: "d" is LsmwId=1 { } LsmwM_: "" is LsmwRa & LsmwM=0 { } LsmwM_: "m" is LsmwRa & LsmwM=1 { LsmwRa = mult_addr; } :lmw.^LsmwBa_^LsmwId_^LsmwM_ LsmwRb, [LsmwRa], LsmwRe, Enable4 is ($(I32) & $(LSMW) & LsmwRb & LsmwRa & LsmwRe & Enable4 & LsmwLs=0 & LsmwBa_ & LsmwId_ & LsmwM_ & LsmwSub=0b00) ... & Lmw.regs { mult_addr = LsmwRa; build Lmw.regs; build LsmwM_; } :smw.^LsmwBa_^LsmwId_^LsmwM_ LsmwRb, [LsmwRa], LsmwRe, Enable4 is ($(I32) & $(LSMW) & LsmwRb & LsmwRa & LsmwRe & Enable4 & LsmwLs=1 & LsmwBa_ & LsmwId_ & LsmwM_ & LsmwSub=0b00) ... & Smw.regs { mult_addr = LsmwRa; build Smw.regs; build LsmwM_; } ### Load / Store Instruction for Atomic Updates ### :llw Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00011000 { Rt = *AddrRaRbsv; } :scw Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00011001 { *AddrRaRbsv = Rt; } ### Load / Store Instructions with User-mode Privilege ### # TODO : special constraint (user-mode address translation) :lwup Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00100010 { Rt = *AddrRaRbsv; } :swup Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00101010 { *AddrRaRbsv = Rt; } ### Jump Instruction ### Rel24: addr is Imm24s [ addr = inst_start + (Imm24s << 1); ] { export *:4 addr; } :j Rel24 is $(I32) & $(JI) & JIt=0 & Rel24 { goto Rel24; } :jal Rel24 is $(I32) & $(JI) & JIt=1 & Rel24 { lp = inst_next; call Rel24; } :jr Rb is $(I32) & $(JREG) & Rt=0 & Ra=0 & Rb & DtIt=0b00 & Jz=0 & JrHint=0 & Sub5=0b00000 { goto [Rb]; } :ret Rb is $(I32) & $(JREG) & Rt=0 & Ra=0 & Rb & DtIt=0b00 & Jz=0 & JrHint=1 & Sub5=0b00000 { return [Rb]; } :jral Rt,Rb is $(I32) & $(JREG) & Rt & Ra=0 & Rb & DtIt=0b00 & Jz=0 & JrHint=0 & Sub5=0b00001 { Rt = inst_next; call [Rb]; } ### Branch Instruction ### Rel14: addr is Imm14s [ addr = inst_start + (Imm14s << 1); ] { export *:4 addr; } Rel16: addr is Imm16s [ addr = inst_start + (Imm16s << 1); ] { export *:4 addr; } :beq Rt, Ra, Rel14 is $(I32) & $(BR1) & Rt & Ra & Br1t=0 & Rel14 { if(Rt == Ra) goto Rel14; } :bne Rt, Ra, Rel14 is $(I32) & $(BR1) & Rt & Ra & Br1t=1 & Rel14 { if(Rt != Ra) goto Rel14; } :beqz Rt, Rel16 is $(I32) & $(BR2) & Rt & Br2t=0b0010 & Rel16 { if(Rt == 0) goto Rel16; } :bnez Rt, Rel16 is $(I32) & $(BR2) & Rt & Br2t=0b0011 & Rel16 { if(Rt != 0) goto Rel16; } :bgez Rt, Rel16 is $(I32) & $(BR2) & Rt & Br2t=0b0100 & Rel16 { if(Rt s>= 0) goto Rel16; } :bltz Rt, Rel16 is $(I32) & $(BR2) & Rt & Br2t=0b0101 & Rel16 { if(Rt s< 0) goto Rel16; } :bgtz Rt, Rel16 is $(I32) & $(BR2) & Rt & Br2t=0b0110 & Rel16 { if(Rt s> 0) goto Rel16; } :blez Rt, Rel16 is $(I32) & $(BR2) & Rt & Br2t=0b0111 & Rel16 { if(Rt s<= 0) goto Rel16; } ### Branch with link Instruction ### :bgezal Rt, Rel16 is $(I32) & $(BR2) & Rt & Br2t=0b1100 & Rel16 { lp = inst_next; if(Rt s>= 0) goto ; call Rel16; } :bltzal Rt, Rel16 is $(I32) & $(BR2) & Rt & Br2t=0b1101 & Rel16 { lp = inst_next; if(Rt s< 0) goto ; call Rel16; } ### Read / Write System Registers ### # TODO : special instruction, do we create the system registers ? define pcodeop mfsr; define pcodeop mtsr; csr: csr_reg is SrIdx [ csr_reg = $(CSR_REG_START) + SrIdx; ] { export *[csreg]:4 csr_reg; } :mfsr Rt, csr is $(I32) & $(MISC) & Rt & csr & Rd=0 & Sub5=0b00010 { Rt = csr; } :mtsr Rt, csr is $(I32) & $(MISC) & Rt & csr & Rd=0 & Sub5=0b00011 { csr = Rt; } ### Jump Register with System Register Update ### # TODO : special constraint (address translation off) :jr.itoff Rb is $(I32) & $(JREG) & Rt=0 & Ra=0 & Rb & DtIt=0b01 & Jz=0 & JrHint=0 & Sub5=0b00000 { goto [Rb]; } :jr.toff Rb is $(I32) & $(JREG) & Rt=0 & Ra=0 & Rb & DtIt=0b11 & Jz=0 & JrHint=0 & Sub5=0b00000 { goto [Rb]; } :jral.iton Rt,Rb is $(I32) & $(JREG) & Rt & Ra=0 & Rb & DtIt=0b01 & Jz=0 & JrHint=0 & Sub5=0b00001 { Rt = inst_next; call [Rb]; } :jral.ton Rt,Rb is $(I32) & $(JREG) & Rt & Ra=0 & Rb & DtIt=0b11 & Jz=0 & JrHint=0 & Sub5=0b00001 { Rt = inst_next; call [Rb]; } ### MMU Instruction ### define pcodeop TLB_TargetRead; define pcodeop TLB_TargetWrite; define pcodeop TLB_RWrite; define pcodeop TLB_RWriteLock; define pcodeop TLB_Unlock; define pcodeop TLB_Probe; define pcodeop TLB_Invalidate; define pcodeop TLB_FlushAll; :tlbop Ra,"TargetRead" is $(I32) & $(MISC) & Rt & Ra & Rb=0 & TlbopSub=0 & Sub5=0b01110 { TLB_TargetRead(Ra:4); } :tlbop Ra,"TargetWrite" is $(I32) & $(MISC) & Rt & Ra & Rb=0 & TlbopSub=1 & Sub5=0b01110 { TLB_TargetWrite(Ra:4); } :tlbop Ra,"RWrite" is $(I32) & $(MISC) & Rt & Ra & Rb=0 & TlbopSub=2 & Sub5=0b01110 { TLB_RWrite(Ra:4); } :tlbop Ra,"RWriteLock" is $(I32) & $(MISC) & Rt & Ra & Rb=0 & TlbopSub=3 & Sub5=0b01110 { TLB_RWriteLock(Ra:4); } :tlbop Ra,"Unlock" is $(I32) & $(MISC) & Rt & Ra & Rb=0 & TlbopSub=4 & Sub5=0b01110 { TLB_Unlock(Ra:4); } :tlbop Rt,Ra,"Probe" is $(I32) & $(MISC) & Rt & Ra & Rb=0 & TlbopSub=5 & Sub5=0b01110 { TLB_Probe(Rt:4, Ra:4); } :tlbop Ra,"Invalidate" is $(I32) & $(MISC) & Rt & Ra & Rb=0 & TlbopSub=6 & Sub5=0b01110 { TLB_Invalidate(Ra:4); } :tlbop "FlushAll" is $(I32) & $(MISC) & Rt & Ra & Rb=0 & TlbopSub=7 & Sub5=0b01110 { TLB_FlushAll(); } ### Conditional Move ### :cmovz Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b11010 { if(Rb != 0) goto ; Rt = Ra; } :cmovn Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b11011 { if(Rb == 0) goto ; Rt = Ra; } ### Synchronization Instruction ### # TODO : special function, and subfunctions define pcodeop msync; define pcodeop isync; :msync MsyncSub is $(I32) & $(MISC) & Rt=0 & MsyncZ=0 & MsyncSub & Sub5=0b01100 { msync(MsyncSub:1); } :isync Rt is $(I32) & $(MISC) & Rt & Ra=0 & Rb=0 & Rd=0 & Sub5=0b01101 { isync(Rt:4); } ### Prefetch Instruction ### define pcodeop dpref; OffsetRbsv2: (Rb "<<" sv) is Rb & sv { off:4 = Rb << (sv + 1); export off; } AddrRaRbsv2: [Ra + OffsetRbsv2] is Ra & OffsetRbsv2 { addr:4 = Ra + OffsetRbsv2; export addr; } :dpref DprefSub, AddrRaRbsv2 is $(I32) & $(MEM) & DprefD=0 & DprefSub & AddrRaRbsv2 & Sub8=0b00010011 { dpref(DprefSub:1, AddrRaRbsv2:4); } DprefD_: "w" is DprefD=0 { } DprefD_: "d" is DprefD=1 { } DprefiAddr: [Ra + Offset] is DprefD=0 & Ra & Imm15s [ Offset = Imm15s << 2; ] { export *[const]:4 Offset; } DprefiAddr: [Ra + Offset] is DprefD=1 & Ra & Imm15s [ Offset = Imm15s << 3; ] { export *[const]:4 Offset; } :dprefi.^DprefD_ DprefSub, DprefiAddr is $(I32) & Opc=0b010011 & DprefD_ & DprefSub & DprefiAddr { dpref(DprefSub:1, DprefiAddr:4); } ### NOP Instruction ### :nop is $(I32) & $(ALU_1) & Rt=0 & Ra=0 & Imm5u=0 & Rd=0 & Sub5=0b01001 { } ### Serialization Instruction ### define pcodeop dsb; define pcodeop isb; :dsb is $(I32) & $(MISC) & Rt=0 & Ra=0 & Rb=0 & Rd=0 & Sub5=0b01000 { dsb(); } :isb is $(I32) & $(MISC) & Rt=0 & Ra=0 & Rb=0 & Rd=0 & Sub5=0b01001 { isb(); } ### Exception Generation Instruction ### define pcodeop break; define pcodeop syscall; define pcodeop trap; :break Swid is $(I32) & $(MISC) & Rt=0 & Swid & Sub5=0b01010 { break(Swid:4); } :syscall Swid is $(I32) & $(MISC) & Rt=0 & Swid & Sub5=0b01011 { syscall(Swid:4); } :trap Swid is $(I32) & $(MISC) & Rt=0 & Swid & Sub5=0b00101 { trap(Swid:4); } :teqz Rt, Swid is $(I32) & $(MISC) & Rt & Swid & Sub5=0b00110 { if(Rt != 0) goto ; trap(Swid:4); } :tnez Rt, Swid is $(I32) & $(MISC) & Rt & Swid & Sub5=0b00111 { if(Rt == 0) goto ; trap(Swid:4); } ### Special Return Instruction ### :iret is $(I32) & $(MISC) & Rt=0 & Ra=0 & Rb=0 & Rd=0 & Sub5=0b00100 { return [ipc]; } # TODO : special constraint (address translation off) :ret.itoff Rb is $(I32) & $(JREG) & Rt=0 & Ra=0 & Rb & DtIt=0b01 & Jz=0 & JrHint=1 & Sub5=0b00000 { return [Rb]; } :ret.toff Rb is $(I32) & $(JREG) & Rt=0 & Ra=0 & Rb & DtIt=0b11 & Jz=0 & JrHint=1 & Sub5=0b00000 { return [Rb]; } ### Cache Control Instruction ### # TODO : special function, with subfunctions define pcodeop cctl; :cctl Rt, Ra, CctlLevel, CctlSub is $(I32) & $(MISC) & Rt & Ra & CctlZ=0 & CctlLevel & CctlSub & Sub5=0b00001 { cctl(Rt:4, Ra:4, CctlLevel:1, CctlSub:1); } # Miscellaneous Instructions (Baseline) # TODO : special function. Not sure if we use context or registers for this. define pcodeop setgie; SetgieEN: "d" is Toggle=0 { setgie(0:1); } SetgieEN: "e" is Toggle=1 { setgie(1:1); } :setgie.^SetgieEN is $(I32) & $(MISC) & ToggleL=0 & SetgieEN & SrIdx=0b0010000000 & Rd=0b00010 & Sub5=0b00011 { } define pcodeop setend; SetendBE: "l" is Toggle=0 { setend(0:1); } SetendBE: "b" is Toggle=1 { setend(1:1); } :setend.^SetendBE is $(I32) & $(MISC) & ToggleL=0 & SetendBE & SrIdx=0b0010000000 & Rd=0b00001 & Sub5=0b00011 { } :standby StandbySub is $(I32) & $(MISC) & Rt=0 & Ra=0 & Rb=0 & StandbyZ=0 & StandbySub & Sub5=0b00000 { goto inst_start; } ### 32-bit Baseline V2 instructions ### ### ALU Instructions ### :addi.gp is $(I32) & $(SBGP) & Rt & GpSub1=0b1 & Imm19s { Rt = gp + Imm19s; } ### Multiply and Divide Instructions (V2) ### :mulr64 Rt, Ra, Rb is $(I32) & $(ALU_2) & Rt & Ra & Rb & $(GPR) & Sub6=0b101001 & Rtl & Rth { res:8 = zext(Ra) * zext(Rb); Rtl = res[32,32]; Rth = res[0,32]; } :mulsr64 Rt, Ra, Rb is $(I32) & $(ALU_2) & Rt & Ra & Rb & $(GPR) & Sub6=0b101000 & Rtl & Rth { res:8 = sext(Ra) * sext(Rb); Rtl = res[32,32]; Rth = res[0,32]; } :maddr32 Rt, Ra, Rb is $(I32) & $(ALU_2) & Rt & Ra & Rb & $(GPR) & Sub6=0b110011 { Rt = Rt + (Ra * Rb); } :msubr32 Rt, Ra, Rb is $(I32) & $(ALU_2) & Rt & Ra & Rb & $(GPR) & Sub6=0b110101 { Rt = Rt - (Ra * Rb); } :divr Rt, Rs, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rs & Sub5=0b10111 { local div = Ra / Rb; local mod = Ra % Rb; Rs = mod; Rt = div; } :divsr Rt, Rs, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rs & Sub5=0b10110 { local div = Ra s/ Rb; local mod = Ra s% Rb; Rs = mod; Rt = div; } ### Load/Store Instructions ### GpByteAddress: [+ off] is Imm19s [ off = Imm19s << 0; ] { addr:4 = gp + off; export addr; } GpHalfAddress: [+ off] is Imm18s [ off = Imm18s << 1; ] { addr:4 = gp + off; export addr; } GpWordAddress: [+ off] is Imm17s [ off = Imm17s << 2; ] { addr:4 = gp + off; export addr; } :lbi.gp Rt, GpByteAddress is $(I32) & $(LBGP) & Rt & GpSub1=0b0 & GpByteAddress { local tmp:1 = *GpByteAddress; Rt = zext(tmp); } :lbsi.gp Rt, GpByteAddress is $(I32) & $(LBGP) & Rt & GpSub1=0b1 & GpByteAddress { local tmp:1 = *GpByteAddress; Rt = sext(tmp); } :lhi.gp Rt, GpHalfAddress is $(I32) & $(HWGP) & Rt & GpSub2=0b00 & GpHalfAddress { local tmp:2 = *GpHalfAddress; Rt = zext(tmp); } :lhsi.gp Rt, GpHalfAddress is $(I32) & $(HWGP) & Rt & GpSub2=0b01 & GpHalfAddress { local tmp:2 = *GpHalfAddress; Rt = sext(tmp); } :lwi.gp Rt, GpWordAddress is $(I32) & $(HWGP) & Rt & GpSub3=0b110 & GpWordAddress { Rt = *GpWordAddress; } :sbi.gp Rt, GpByteAddress is $(I32) & $(SBGP) & Rt & GpSub1=0b0 & GpByteAddress { local tmp = Rt; *GpByteAddress = tmp:1; } :shi.gp Rt, GpHalfAddress is $(I32) & $(HWGP) & Rt & GpSub2=0b10 & GpHalfAddress { local tmp = Rt; *GpHalfAddress = tmp:2; } :swi.gp Rt, GpWordAddress is $(I32) & $(HWGP) & Rt & GpSub3=0b111 & GpWordAddress { *GpWordAddress = Rt; } :lmwa.^LsmwBa_^LsmwId_^LsmwM_ LsmwRb, [LsmwRa], LsmwRe, Enable4 is ($(I32) & $(LSMW) & LsmwRb & LsmwRa & LsmwRe & Enable4 & LsmwLs=0 & LsmwBa_ & LsmwId_ & LsmwM_ & LsmwSub=0b01) ... & Lmwa.regs { mult_addr = LsmwRa; build Lmwa.regs; build LsmwM_; } :smwa.^LsmwBa_^LsmwId_^LsmwM_ LsmwRb, [LsmwRa], LsmwRe, Enable4 is ($(I32) & $(LSMW) & LsmwRb & LsmwRa & LsmwRe & Enable4 & LsmwLs=1 & LsmwBa_ & LsmwId_ & LsmwM_ & LsmwSub=0b01) ... & Smwa.regs { mult_addr = LsmwRa; build Smwa.regs; build LsmwM_; } :lbup Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00100000 { local tmp:1 = *AddrRaRbsv; Rt = zext(tmp); } :sbup Rt, AddrRaRbsv is $(I32) & $(MEM) & Rt & AddrRaRbsv & Sub8=0b00101000 { local tmp = Rt; *AddrRaRbsv = tmp:1; } ### 32-bit Baseline V3 instructions ### ### ALU Instructions with Shift Operation (v3) ### :add_slli Rt, Ra, Rb, sh is $(I32) & $(ALU_1) & Rt & Ra & Rb & sh & Sub5=0b00000 { Rt = Ra + (Rb << sh); } :and_slli Rt, Ra, Rb, sh is $(I32) & $(ALU_1) & Rt & Ra & Rb & sh & Sub5=0b00010 { Rt = Ra & (Rb << sh); } :or_slli Rt, Ra, Rb, sh is $(I32) & $(ALU_1) & Rt & Ra & Rb & sh & Sub5=0b00100 { Rt = Ra | (Rb << sh); } :sub_slli Rt, Ra, Rb, sh is $(I32) & $(ALU_1) & Rt & Ra & Rb & sh & Sub5=0b00001 { Rt = Ra - (Rb << sh); } :xor_slli Rt, Ra, Rb, sh is $(I32) & $(ALU_1) & Rt & Ra & Rb & sh & Sub5=0b00011 { Rt = Ra ^ (Rb << sh); } :add_srli Rt, Ra, Rb, sh is $(I32) & $(ALU_1) & Rt & Ra & Rb & sh & Sub5=0b11100 { Rt = Ra + (Rb << sh); } :and_srli Rt, Ra, Rb, sh is $(I32) & $(ALU_1) & Rt & Ra & Rb & sh & Sub5=0b11110 { Rt = Ra & (Rb << sh); } :or_srli Rt, Ra, Rb, sh is $(I32) & $(ALU_1) & Rt & Ra & Rb & sh & Sub5=0b10101 { Rt = Ra | (Rb << sh); } :sub_srli Rt, Ra, Rb, sh is $(I32) & $(ALU_1) & Rt & Ra & Rb & sh & Sub5=0b11101 { Rt = Ra - (Rb << sh); } :xor_srli Rt, Ra, Rb, sh is $(I32) & $(ALU_1) & Rt & Ra & Rb & sh & Sub5=0b11111 { Rt = Ra ^ (Rb << sh); } ### Conditional Branch and Jump Instructions (V3) ### Rel8: addr is Imm8s [ addr = inst_start + (Imm8s << 1); ] { export *:4 addr; } :beqc Rt, Imm11s, Rel8 is $(I32) & $(BR3) & Rt & Bxxc=0 & Imm11s & Rel8 { if(Rt == Imm11s) goto Rel8; } :bnec Rt, Imm11s, Rel8 is $(I32) & $(BR3) & Rt & Bxxc=1 & Imm11s & Rel8 { if(Rt != Imm11s) goto Rel8; } :jralnez Rt,Rb is $(I32) & $(JREG) & Rt & Ra=0 & Rb & DtIt=0b00 & Jz=0 & JrHint=0 & Sub5=0b00011 { if(Rb == 0) goto ; Rt = inst_next; call [Rb]; } :jrnez Rb is $(I32) & $(JREG) & Rt=0 & Ra=0 & Rb & DtIt=0b00 & Jz=0 & JrHint=0 & Sub5=0b00010 { if(Rb == 0) goto ; goto [Rb]; } ### Bit Manipulation Instructions (V3) ### :bitc Rt, Ra, Rb is $(I32) & $(ALU_1) & Rt & Ra & Rb & Rd=0 & Sub5=0b10010 { Rt = Ra & (~Rb); } :bitci Rt, Ra, Imm15u is $(I32) & Opc=0b110011 & Rt & Ra & Imm15u { Rt = Ra & (~Imm15u); } ### Cache Control Instruction (V3) ### # TODO: Add CCTL L1D_WBALL, level ### 32-bit ISA extension ### ### ALU Instruction (Performance) ### :abs Rt, Ra is $(I32) & $(ALU_2) & Rt & Ra & Rb=0 & $(ALU2Z) & Sub6=0b000011 { gez:4 = zext(Ra s>= 0); ltz:4 = zext(Ra s< 0); Rt = (Ra * gez) | ((-Ra) * ltz); } :ave Rt, Ra, Rb is $(I32) & $(ALU_2) & Rt & Ra & Rb & $(ALU2Z) & Sub6=0b000010 { Rt = (Ra + Rb + 1) s>> 2; } :max Rt, Ra, Rb is $(I32) & $(ALU_2) & Rt & Ra & Rb & $(ALU2Z) & Sub6=0b000000 { altb:4 = zext(Ra s< Rb); ageb:4 = zext(Ra s>= Rb); Rt = (Ra * ageb) | (Rb * altb); } :min Rt, Ra, Rb is $(I32) & $(ALU_2) & Rt & Ra & Rb & $(ALU2Z) & Sub6=0b000001 { altb:4 = zext(Ra s< Rb); ageb:4 = zext(Ra s>= Rb); Rt = (Ra * altb) | (Rb * ageb); } :bset Rt, Ra, Imm5u is $(I32) & $(ALU_2) & Rt & Ra & Imm5u & $(ALU2Z) & Sub6=0b001000 { Rt = Ra | (1 << Imm5u); } :bclr Rt, Ra, Imm5u is $(I32) & $(ALU_2) & Rt & Ra & Imm5u & $(ALU2Z) & Sub6=0b001001 { Rt = Ra & ~(1 << Imm5u); } :btgl Rt, Ra, Imm5u is $(I32) & $(ALU_2) & Rt & Ra & Imm5u & $(ALU2Z) & Sub6=0b001010 { Rt = Ra ^ (1 << Imm5u); } :btst Rt, Ra, Imm5u is $(I32) & $(ALU_2) & Rt & Ra & Imm5u & $(ALU2Z) & Sub6=0b001011 { Rt = (Ra >> Imm5u) & 1; } :clips Rt, Ra, Imm5u is $(I32) & $(ALU_2) & Rt & Ra & Imm5u & $(ALU2Z) & Sub6=0b000100 { local upper:4 = (1 << Imm5u) - 1; local lower:4 = -(1 << Imm5u); if(Ra s<= upper) goto ; Rt = upper; goto ; if(Ra s>= lower) goto ; Rt = lower; goto ; Rt = Ra; } :clip Rt, Ra, Imm5u is $(I32) & $(ALU_2) & Rt & Ra & Imm5u & $(ALU2Z) & Sub6=0b000101 { local upper:4 = (1 << Imm5u) - 1; if(Ra s<= upper) goto ; Rt = upper; goto ; if(Ra s>= 0) goto ; Rt = 0; goto ; Rt = Ra; } :clz Rt, Ra is $(I32) & $(ALU_2) & Rt & Ra & Imm5u=0 & $(ALU2Z) & Sub6=0b000111 { countTmp:4 = 0; inputTmp:4 = Ra; if ((inputTmp & 0x80000000) != 0) goto ; countTmp = countTmp + 1; inputTmp = (inputTmp << 1) | 1; goto ; Rt = countTmp; } :clo Rt, Ra is $(I32) & $(ALU_2) & Rt & Ra & Imm5u=0 & $(ALU2Z) & Sub6=0b000110 { countTmp:4 = 0; inputTmp:4 = Ra; if ((inputTmp & 0x80000000) == 0) goto ; countTmp = countTmp + 1; inputTmp = (inputTmp << 1) | 1; goto ; Rt = countTmp; } ### Performance Extension V2 ### # TODO : arithmetic functions: bs* :bse is $(I32) & $(ALU_2) & Rt & Ra & Rb & $(ALU2Z) & Sub6=0b001100 unimpl :bsp is $(I32) & $(ALU_2) & Rt & Ra & Rb & $(ALU2Z) & Sub6=0b001101 unimpl macro add_abs_diff(dst, src1, src2, shift) { local src1_ = src1 >> shift; local src2_ = src2 >> shift; local src1__ = src1_:1; local src2__ = src2_:1; local a:1 = src1__ - src2__; local agez:1 = zext(a s>= 0); local altz:1 = zext(a s< 0); local aabs:1 = (a * agez) | ((-a) * altz); dst = dst + zext(aabs); } :pbsad Rt, Ra, Rb is $(I32) & $(SIMD) & Rt & Ra & Rb & Rd=0 & Sub5=0b0000 { Rt = 0; add_abs_diff(Rt, Ra, Rb, 0); add_abs_diff(Rt, Ra, Rb, 8); add_abs_diff(Rt, Ra, Rb, 16); add_abs_diff(Rt, Ra, Rb, 24); } :pbsada Rt, Ra, Rb is $(I32) & $(SIMD) & Rt & Ra & Rb & Rd=0 & Sub5=0b0001 { add_abs_diff(Rt, Ra, Rb, 0); add_abs_diff(Rt, Ra, Rb, 8); add_abs_diff(Rt, Ra, Rb, 16); add_abs_diff(Rt, Ra, Rb, 24); } # 32-bit String Extension :ffb Rt, Ra, Rb is $(I32) & $(ALU_2) & Rt & Ra & Rb & Sub3=0 & Sub7=0b0001110 { match:1 = Rb[0,8]; m1:1 = (Ra[0,8] == match); m2:1 = (Ra[8,8] == match); m3:1 = (Ra[16,8] == match); m4:1 = (Ra[24,8] == match); Rt = -4; if (m1) goto inst_next; Rt = -3; if (m2) goto inst_next; Rt = -2; if (m3) goto inst_next; Rt = -1; if (m4) goto inst_next; Rt = 0; # choosery method # rd = 0 + (zext(m1)*-4) + (zext(m2)*-3) + (zext(m3)*-2) + (zext(m4)*-1); } :ffbi Rt, Ra, Imm8u is $(I32) & $(ALU_2) & Rt & Ra & Imm8u & Sub7=0b1001110 { match:1 = Imm8u; m1:1 = (Ra[0,8] == match); m2:1 = (Ra[8,8] == match); m3:1 = (Ra[16,8] == match); m4:1 = (Ra[24,8] == match); Rt = -4; if (m1) goto inst_next; Rt = -3; if (m2) goto inst_next; Rt = -2; if (m3) goto inst_next; Rt = -1; if (m4) goto inst_next; Rt = 0; } :ffmism Rt, Ra, Rb is $(I32) & $(ALU_2) & Rt & Ra & Rb & Sub3=0 & Sub7=0b0001111 { match:1 = Rb[0,8]; m1:1 = (Ra[0,8] != Rb[0,8]); m2:1 = (Ra[8,8] != Rb[8,8]); m3:1 = (Ra[16,8] != Rb[16,8]); m4:1 = (Ra[24,8] != Rb[24,8]); @if ENDIAN == "little" Rt = -4; if (m1) goto inst_next; Rt = -3; if (m2) goto inst_next; Rt = -2; if (m3) goto inst_next; Rt = -1; if (m4) goto inst_next; Rt = 0; # choosery method # rd = 0 + (zext(m1)*-4) + (zext(m2)*-3) + (zext(m3)*-2) + (zext(m4)*-1); @else Rt = -4; if (m4) goto inst_next; Rt = -3; if (m3) goto inst_next; Rt = -2; if (m2) goto inst_next; Rt = -1; if (m1) goto inst_next; Rt = 0; @endif } :flmism Rt, Ra, Rb is $(I32) & $(ALU_2) & Rt & Ra & Rb & Sub3=0 & Sub7=0b1001111 { match:1 = Rb[0,8]; m1:1 = (Ra[0,8] != Rb[0,8]); m2:1 = (Ra[8,8] != Rb[8,8]); m3:1 = (Ra[16,8] != Rb[16,8]); m4:1 = (Ra[24,8] != Rb[24,8]); @if ENDIAN == "little" Rt = -1; if (m4) goto inst_next; Rt = -2; if (m3) goto inst_next; Rt = -3; if (m2) goto inst_next; Rt = -4; if (m1) goto inst_next; Rt = 0; # choosery method # rd = 0 + (zext(m1)*-4) + (zext(m2)*-3) + (zext(m3)*-2) + (zext(m4)*-1); @else Rt = -1; if (m1) goto inst_next; Rt = -2; if (m2) goto inst_next; Rt = -3; if (m3) goto inst_next; Rt = -4; if (m4) goto inst_next; Rt = 0; @endif } ########### 16b ############ define token instr16(16) opsz = (15, 15) opc4 = (11, 14) opc5 = (10, 14) opc6 = (9, 14) opc7 = (8, 14) opc8 = (7, 14) opc10 = (5, 14) re2 = (5, 6) rt5 = (5, 9) ra4 = (5, 8) rt4 = (5, 8) ra5 = (0, 4) rb5 = (0, 4) rt5b = (0, 4) rt3 = (6, 8) rt3b = (8, 10) ra3 = (3, 5) rb3 = (0, 2) bit5 = (5,5) bit6 = (6,6) bit7 = (7,7) bit8 = (8,8) imm3u = (0, 2) imm3ub = (3, 5) imm4u = (5, 8) imm5u = (0, 4) imm5s = (0, 4) signed imm6u = (0, 5) imm7u = (0, 6) imm8s = (0, 7) signed imm10s = (0, 9) signed xwi37_ls = (7, 7) swid9 = (0, 8) rt5e1 = (4, 7) rt5e2 = (4, 7) ra5e1 = (0, 3) ra5e2 = (0, 3) ; attach variables [rt5 ra5 rb5 rt5b] [ a0 a1 a2 a3 a4 a5 s0 s1 s2 s3 s4 s5 s6 s7 s8 ta t0 t1 t2 t3 t4 t5 t6 t7 t8 t9 p0 p1 fp gp lp sp ]; attach variables [ra4 rt4] [ a0 a1 a2 a3 a4 a5 s0 s1 s2 s3 s4 s5 t0 t1 t2 t3 ]; attach variables [rt3 ra3 rt3b rb3] [ a0 a1 a2 a3 a4 a5 s0 s1 ]; attach variables [ra5e1 rt5e1] [ a0 a2 a4 s0 s2 s4 s6 s8 t0 t2 t4 t6 t8 p0 fp lp ]; attach variables [ra5e2 rt5e2] [ a1 a3 a5 s1 s3 s5 s7 ta t1 t3 t5 t7 t9 p1 gp sp ]; attach variables [re2] [ s0 s2 s4 s8 ]; @define I16 "(opsz=1)" @define BFMI333 "(opc6=0b001011)" @define XWI37 "(opc4=0b0111)" @define XWI37SP "(opc4=0b1110)" @define MISC33 "(opc6=0b111111)" ### Move Instruction ### :movi55 rt5, imm5s is $(I16) & opc5=0b00001 & rt5 & imm5s { rt5 = imm5s; } :mov55 rt5, ra5 is $(I16) & opc5=0b00000 & rt5 & ra5 { rt5 = ra5; } ### Add/Sub Instruction with Immediate ### :addi45 rt4, imm5u is $(I16) & opc6=0b000110 & rt4 & imm5u { rt4 = rt4 + imm5u; } :addi333 rt3, ra3, imm3u is $(I16) & opc6=0b001110 & rt3 & ra3 & imm3u { rt3 = ra3 + imm3u; } :subi45 rt4, imm5u is $(I16) & opc6=0b000111 & rt4 & imm5u { rt4 = rt4 - imm5u; } :subi333 rt3, ra3, imm3u is $(I16) & opc6=0b001111 & rt3 & ra3 & imm3u { rt3 = ra3 - imm3u; } ### Add/Sub Instruction ### :add45 rt4, rb5 is $(I16) & opc6=0b000100 & rt4 & rb5 { rt4 = rt4 + rb5; } :add333 rt3, ra3, rb3 is $(I16) & opc6=0b001100 & rt3 & ra3 & rb3 { rt3 = ra3 + rb3; } :sub45 rt4, rb5 is $(I16) & opc6=0b000101 & rt4 & rb5 { rt4 = rt4 - rb5; } :sub333 rt3, ra3, rb3 is $(I16) & opc6=0b001101 & rt3 & ra3 & rb3 { rt3 = ra3 - rb3; } ### Shift Instruction with Immediate ### :srai45 rt4, imm5u is $(I16) & opc6=0b001000 & rt4 & imm5u { rt4 = rt4 s>> imm5u; } :srli45 rt4, imm5u is $(I16) & opc6=0b001001 & rt4 & imm5u { rt4 = rt4 >> imm5u; } :slli333 rt3, ra3, imm3u is $(I16) & opc6=0b001010 & rt3 & ra3 & imm3u { rt3 = ra3 << imm3u; } ### Bit Field Mask Instruction with Immediate ### :zeb33 rt3, ra3 is $(I16) & $(BFMI333) & rt3 & ra3 & imm3u=0b000 { local tmp = ra3; rt3 = zext(tmp:1); } :zeh33 rt3, ra3 is $(I16) & $(BFMI333) & rt3 & ra3 & imm3u=0b001 { local tmp = ra3; rt3 = zext(tmp:2); } :seb33 rt3, ra3 is $(I16) & $(BFMI333) & rt3 & ra3 & imm3u=0b010 { local tmp = ra3; rt3 = sext(tmp:1); } :seh33 rt3, ra3 is $(I16) & $(BFMI333) & rt3 & ra3 & imm3u=0b011 { local tmp = ra3; rt3 = sext(tmp:2); } :xlsb33 rt3, ra3 is $(I16) & $(BFMI333) & rt3 & ra3 & imm3u=0b100 { rt3 = ra3 & 1; } :x11b33 rt3, ra3 is $(I16) & $(BFMI333) & rt3 & ra3 & imm3u=0b101 { rt3 = ra3 & 0x7ff; } ### Load / Store Instruction ### :lwi450 rt4,[ra5] is $(I16) & opc6=0b011010 & rt4 & ra5 { rt4 = *ra5; } rel3w: off is imm3u [ off = imm3u << 2; ] { export *[const]:4 off; } rel3h: off is imm3u [ off = imm3u << 1; ] { export *[const]:4 off; } rel3b: off is imm3u [ off = imm3u << 0 ; ] { export *[const]:4 off; } ra3_rel3w: [ra3 + rel3w] is ra3 & rel3w { addr:4 = ra3 + rel3w; export addr; } ra3_rel3h: [ra3 + rel3h] is ra3 & rel3h { addr:4 = ra3 + rel3h; export addr; } ra3_rel3b: [ra3 + rel3b] is ra3 & rel3b { addr:4 = ra3 + rel3b; export addr; } :lwi333 rt3, ra3_rel3w is $(I16) & opc6=0b010000 & rt3 & ra3_rel3w { rt3 = *ra3_rel3w; } :lwi333.bi rt3, [ra3], rel3w is $(I16) & opc6=0b010001 & rt3 & ra3 & rel3w { rt3 = *ra3; ra3 = ra3 + rel3w; } :lhi333 rt3, ra3_rel3h is $(I16) & opc6=0b010010 & rt3 & ra3_rel3h { local tmp:2 = *ra3_rel3h; rt3 = zext(tmp); } :lbi333 rt3, ra3_rel3b is $(I16) & opc6=0b010011 & rt3 & ra3_rel3b { local tmp:1 = *ra3_rel3b; rt3 = zext(tmp); } :swi450 rt4, [ra5] is $(I16) & opc6=0b011011 & rt4 & ra5 { *ra5 = rt4; } :swi333 rt3, ra3_rel3w is $(I16) & opc6=0b010100 & rt3 & ra3_rel3w { *ra3_rel3w = rt3; } :swi333.bi rt3, [ra3], rel3w is $(I16) & opc6=0b010101 & rt3 & ra3 & rel3w { *ra3 = rt3; ra3 = ra3 + rel3w; } :shi333 rt3, ra3_rel3h is $(I16) & opc6=0b010110 & rt3 & ra3_rel3h { local tmp = rt3; *ra3_rel3h = tmp:2; } :sbi333 rt3, ra3_rel3b is $(I16) & opc6=0b010111 & rt3 & ra3_rel3b { local tmp = rt3; *ra3_rel3b = tmp:1; } ### Load/Store Instruction with Implied FP ### rel7w: off is imm7u [ off = imm7u << 2; ] { export *[const]:4 off; } fp_rel7w: [fp + rel7w] is fp & rel7w { addr:4 = fp + rel7w; export addr; } :lwi37 rt3b, fp_rel7w is $(I16) & rt3b & $(XWI37) & xwi37_ls=0 & fp_rel7w { rt3b = *fp_rel7w; } :swi37 rt3b, fp_rel7w is $(I16) & rt3b & $(XWI37) & xwi37_ls=1 & fp_rel7w { *fp_rel7w = rt3b; } ### Branch and Jump Instruction ### rel8: addr is imm8s [ addr = inst_start + (imm8s << 1); ] { export *:4 addr; } :beqs38 rt3b,rel8 is $(I16) & opc4=0b1010 & rt3b & rel8 { if(a5 == rt3b) goto rel8; } :bnes38 rt3b,rel8 is $(I16) & opc4=0b1011 & rt3b & rel8 { if(a5 != rt3b) goto rel8; } :beqz38 rt3b,rel8 is $(I16) & opc4=0b1000 & rt3b & rel8 { if(rt3b == 0) goto rel8; } :bnez38 rt3b,rel8 is $(I16) & opc4=0b1001 & rt3b & rel8 { if(rt3b != 0) goto rel8; } :j8 rel8 is $(I16) & opc7=0b1010101 & rel8 { goto rel8; } :jr5 rb5 is $(I16) & opc10=0b1011101000 & rb5 { goto [rb5]; } :ret5 rb5 is $(I16) & opc10=0b1011101100 & rb5 { return [rb5]; } :jral5 rb5 is $(I16) & opc10=0b1011101001 & rb5 { lp = inst_next; call [rb5]; } ### Compare and Branch Instruction ### :slti45 ra4, imm5u is $(I16) & opc6=0b110011 & ra4 & imm5u { ta = zext(ra4 < imm5u); } :sltsi45 ra4, imm5u is $(I16) & opc6=0b110010 & ra4 & imm5u { ta = zext(ra4 s< imm5u); } :slt45 ra4, rb5 is $(I16) & opc6=0b110001 & ra4 & rb5 { ta = zext(ra4 < rb5); } :slts45 ra4, rb5 is $(I16) & opc6=0b110000 & ra4 & rb5 { ta = zext(ra4 s< rb5); } :beqzs8 rel8 is $(I16) & opc7=0b1101000 & rel8 { if(ta == 0) goto rel8; } :bnezs8 rel8 is $(I16) & opc7=0b1101001 & rel8 { if(ta != 0) goto rel8; } ### Misc Instruction ### # V3 doesn't allow break16 with SWID greater than 31 :break16 swid9 is $(I16) & opc6=0b110101 & imm4u=0 & swid9 { break(swid9:4); } :nop16 is $(I16) & opc6=0b001001 & rt4=0 & imm5u=0 { } ### ALU Instructions (V2) ### :addi10.sp imm10s is $(I16) & opc5=0b11011 & imm10s { sp = sp + imm10s; } ### Load/Store Instruction (V2) ### sp_rel7w: [+ rel7w] is rel7w { addr:4 = sp + rel7w; export addr; } :lwi37.sp rt3b, sp_rel7w is $(I16) & rt3b & $(XWI37SP) & xwi37_ls=0 & sp_rel7w { rt3b = *sp_rel7w; } :swi37.sp rt3b, sp_rel7w is $(I16) & rt3b & $(XWI37SP) & xwi37_ls=1 & sp_rel7w { *sp_rel7w = rt3b; } ### 16-bit Baseline V3 instructions ### ### ALU Instructions (V3 16-bit) ### imm6u_: imm8 is imm6u [ imm8 = imm6u << 2; ] { export *[const]:4 imm8; } :addri36.sp rt3, imm6u_ is $(I16) & opc6=0b011000 & rt3 & imm6u_ { rt3 = sp + imm6u_; } :add5.pc rt5b is $(I16) & opc10=0b1011101101 & rt5b { rt5b = pc + rt5b; } :and33 rt3, ra3 is $(I16) & $(MISC33) & rt3 & ra3 & imm3u=0b110 { rt3 = rt3 & ra3; } :neg33 rt3, ra3 is $(I16) & $(MISC33) & rt3 & ra3 & imm3u=0b010 { rt3 = -ra3; } :not33 rt3, ra3 is $(I16) & $(MISC33) & rt3 & ra3 & imm3u=0b011 { rt3 = ~ra3; } :or33 rt3, ra3 is $(I16) & $(MISC33) & rt3 & ra3 & imm3u=0b111 { rt3 = rt3 | ra3; } :xor33 rt3, ra3 is $(I16) & $(MISC33) & rt3 & ra3 & imm3u=0b101 { rt3 = rt3 ^ ra3; } ### Bit Manipulation Instructions (V3 16-bit) ### :bmski33 rt3, imm3ub is $(I16) & opc6=0b001011 & rt3 & imm3ub & imm3u=0b110 { rt3 = (rt3 >> imm3ub) & 1; } :fexti33 rt3, imm3ub is $(I16) & opc6=0b001011 & rt3 & imm3ub & imm3u=0b111 { rt3 = rt3 & ((1 << (imm3ub + 1)) - 1); } ### Misc. Instructions (V3 16-bit) ### imm7n: off is imm5u [ off = -((32 - imm5u) << 2); ] { export *[const]:4 off; } :lwi45.fe rt4, [imm7n] is $(I16) & opc6=0b011001 & rt4 & imm7n { addr:4 = s2 + imm7n; rt4 = *addr; } :movd44 rt5e1, ra5e1 is $(I16) & opc7=0b1111101 & rt5e1 & rt5e2 & ra5e1 & ra5e2 { rt5e1 = ra5e1; rt5e2 = ra5e2; } imm5u_: imm6 is imm5u [ imm6 = imm5u + 16; ] { export *[const]:4 imm6; } :movpi45 rt4, imm5u_ is $(I16) & opc6=0b111101 & rt4 & imm5u_ { rt4 = imm5u_; } :mul33 rt3, ra3 is $(I16) & $(MISC33) & rt3 & ra3 & imm3u=0b100 { rt3 = rt3 * ra3; } # Note: POP25 and PUSH25 are highly untested ! And they just look messy :/ imm5u__: imm8 is imm5u [ imm8 = imm5u << 3; ] { export *[const]:4 imm8; } macro push25_special() { Smwad(lp); Smwad(gp); Smwad(fp); } macro push25_s0() { Smwad(s0); } macro push25_s2() { Smwad(s2); Smwad(s1); push25_s0(); } macro push25_s4() { Smwad(s4); Smwad(s3); push25_s2(); } macro push25_s8() { Smwad(s8); Smwad(s7); Smwad(s6); Smwad(s5); push25_s4(); } push25_re: re2 is re2 & re2=0 { push25_s0(); } push25_re: re2 is re2 & re2=1 { push25_s2(); } push25_re: re2 is re2 & re2=2 { push25_s4(); } push25_re: re2 is re2 & re2=3 { push25_s8(); } :push25 push25_re, imm5u__ is $(I16) & opc8=0b11111000 & re2 & push25_re & imm5u__ { mult_addr = sp; push25_special(); build push25_re; sp = mult_addr - imm5u__; if(re2 < 1) goto ; s2 = pc & 0xfffffffc; } macro pop25_special() { LmwOp(fp); LmwOp(gp); LmwOp(lp); } macro pop25_s0() { LmwOp(s0); } macro pop25_s2() { pop25_s0(); LmwOp(s1); LmwOp(s2); } macro pop25_s4() { pop25_s2(); LmwOp(s3); LmwOp(s4); } macro pop25_s8() { pop25_s4(); LmwOp(s5); LmwOp(s6); LmwOp(s7); LmwOp(s8); } pop25_re: re2 is re2 & re2=0 { pop25_s0(); } pop25_re: re2 is re2 & re2=1 { pop25_s2(); } pop25_re: re2 is re2 & re2=2 { pop25_s4(); } pop25_re: re2 is re2 & re2=3 { pop25_s8(); } :pop25 pop25_re, imm5u__ is $(I16) & opc8=0b11111001 & re2 & pop25_re & imm5u__ { mult_addr = sp; build pop25_re; pop25_special(); sp = mult_addr + imm5u__; return [lp]; } # EX9.IT imm9u_: imm9 is imm5u & imm4u [ imm9 = (imm4u << 5) | imm5u; ] { export *[const]:4 imm9; } define pcodeop ex9; # TODO: Depending on the value of ITB.HW the address is either set by hardware or set by ITB.Addr :ex9.it imm9u_ is $(I16) & opc6=0b110101 & (bit5=1 | bit6=1 | bit7=1 | bit8=1) & imm9u_ { ex9(imm9u_); } :ex9.it imm5u is $(I16) & opc10=0b1011101010 & imm5u { ex9(imm5u:4); } ########################## # Floating Point Extension # FPU_FS1 define pcodeop fadds; :fadds Fst, Fsa, Fsb is $(I32) & $(COP) & fop4=0x0 & Fst & Fsa & Fsb & cop4=0x0 { Fst = fadds(Fsa, Fsb); } define pcodeop fsubs; :fsubs Fst, Fsa, Fsb is $(I32) & $(COP) & fop4=0x1 & Fst & Fsa & Fsb & cop4=0x0 { Fst = fsubs(Fsa, Fsb); } define pcodeop fcpynss; :fcpynss Fst, Fsa, Fsb is $(I32) & $(COP) & fop4=0x2 & Fst & Fsa & Fsb & cop4=0x0 { Fst = fcpynss(Fsa, Fsb); } define pcodeop fcpyss; :fcpyss Fst, Fsa, Fsb is $(I32) & $(COP) & fop4=0x3 & Fst & Fsa & Fsb & cop4=0x0 { Fst = fcpyss(Fsa, Fsb); } define pcodeop fmadds; :fmadds Fst, Fsa, Fsb is $(I32) & $(COP) & fop4=0x4 & Fst & Fsa & Fsb & cop4=0x0 { Fst = fmadds(Fsa, Fsb); } define pcodeop fmsubs; :fmsubs Fst, Fsa, Fsb is $(I32) & $(COP) & fop4=0x5 & Fst & Fsa & Fsb & cop4=0x0 { Fst = fmsubs(Fsa, Fsb); } define pcodeop fcmovns; :fcmovns Fst, Fsa, Fsb is $(I32) & $(COP) & fop4=0x6 & Fst & Fsa & Fsb & cop4=0x0 { Fst = fcmovns(Fsa, Fsb); } define pcodeop fcmovzs; :fcmovzs Fst, Fsa, Fsb is $(I32) & $(COP) & fop4=0x7 & Fst & Fsa & Fsb & cop4=0x0 { Fst = fcmovzs(Fsa, Fsb); } define pcodeop fnmadds; :fnmadds Fst, Fsa, Fsb is $(I32) & $(COP) & fop4=0x8 & Fst & Fsa & Fsb & cop4=0x0 { Fst = fnmadds(Fsa, Fsb); } define pcodeop fnmsubs; :fnmsubs Fst, Fsa, Fsb is $(I32) & $(COP) & fop4=0x9 & Fst & Fsa & Fsb & cop4=0x0 { Fst = fnmsubs(Fsa, Fsb); } define pcodeop fmuls; :fmuls Fst, Fsa, Fsb is $(I32) & $(COP) & fop4=0xa & Fst & Fsa & Fsb & cop4=0x0 { Fst = fmuls(Fsa, Fsb); } define pcodeop fdivs; :fdivs Fst, Fsa, Fsb is $(I32) & $(COP) & fop4=0xb & Fst & Fsa & Fsb & cop4=0x0 { Fst = fdivs(Fsa, Fsb); } # FPU_FS1_F2OP define pcodeop fs2d; :fs2d Fdt, Fsa is $(I32) & $(COP) & fop4=0xf & Fdt & Fsa & f2op=0 & cop4=0x0 { Fdt = fs2d(Fsa); } define pcodeop fsqrts; :fsqrts Fst, Fsa is $(I32) & $(COP) & fop4=0xf & Fst & Fsa & f2op=1 & cop4=0x0 { Fst = fsqrts(Fsa); } define pcodeop fabss; :fabss Fst, Fsa is $(I32) & $(COP) & fop4=0xf & Fst & Fsa & f2op=0x5 & cop4=0x0 { Fst = fabss(Fsa); } define pcodeop fui2s; :fui2s Fst, Fsa is $(I32) & $(COP) & fop4=0xf & Fst & Fsa & f2op=0x8 & cop4=0x0 { Fst = fui2s(Fsa); } define pcodeop fsi2s; :fsi2s Fst, Fsa is $(I32) & $(COP) & fop4=0xf & Fst & Fsa & f2op=0xc & cop4=0x0 { Fst = fsi2s(Fsa); } define pcodeop fs2ui; :fs2ui Fst, Fsa is $(I32) & $(COP) & fop4=0xf & Fst & Fsa & f2op=0x10 & cop4=0x0 { Fst = fs2ui(Fsa); } define pcodeop fs2ui.z; :fs2ui.z Fst, Fsa is $(I32) & $(COP) & fop4=0xf & Fst & Fsa & f2op=0x14 & cop4=0x0 { Fst = fs2ui.z(Fsa); } define pcodeop fs2si; :fs2si Fst, Fsa is $(I32) & $(COP) & fop4=0xf & Fst & Fsa & f2op=0x18 & cop4=0x0 { Fst = fs2si(Fsa); } define pcodeop fs2si.z; :fs2si.z Fst, Fsa is $(I32) & $(COP) & fop4=0xf & Fst & Fsa & f2op=0x1c & cop4=0x0 { Fst = fs2si.z(Fsa); } # FPU_FS2 fcond: "eq" is fcnd=0 { local tmp:1 = 0; export *[const]:1 tmp; } fcond: "lt" is fcnd=1 { local tmp:1 = 1; export *[const]:1 tmp; } fcond: "le" is fcnd=2 { local tmp:1 = 2; export *[const]:1 tmp; } fcond: "un" is fcnd=3 { local tmp:1 = 3; export *[const]:1 tmp; } fcmpe: "" is cmpe=0 { local tmp:1 = 0; export *[const]:1 tmp; } fcmpe: ".e" is cmpe=1 { local tmp:1 = 1; export *[const]:1 tmp; } define pcodeop fcmps; :fcmp^fcond^"s"^fcmpe Fst, Fsa, Fsb is $(I32) & $(COP) & Fst & Fsa & Fsb & cop4=0x4 & fcond & fcmpe { Fst = fcmps(Fsa, Fsb, fcond, fcmpe); } # FPU_FD1 define pcodeop faddd; :faddd Fdt, Fda, Fdb is $(I32) & $(COP) & fop4=0x0 & Fdt & Fda & Fdb & cop4=0x8 { Fdt = faddd(Fda, Fdb); } define pcodeop fsubd; :fsubd Fdt, Fda, Fdb is $(I32) & $(COP) & fop4=0x1 & Fdt & Fda & Fdb & cop4=0x8 { Fdt = fsubd(Fda, Fdb); } define pcodeop fcpynsd; :fcpynsd Fdt, Fda, Fdb is $(I32) & $(COP) & fop4=0x2 & Fdt & Fda & Fdb & cop4=0x8 { Fdt = fcpynsd(Fda, Fdb); } define pcodeop fcpysd; :fcpysd Fdt, Fda, Fdb is $(I32) & $(COP) & fop4=0x3 & Fdt & Fda & Fdb & cop4=0x8 { Fdt = fcpysd(Fda, Fdb); } define pcodeop fmaddd; :fmaddd Fdt, Fda, Fdb is $(I32) & $(COP) & fop4=0x4 & Fdt & Fda & Fdb & cop4=0x8 { Fdt = fmaddd(Fda, Fdb); } define pcodeop fmsubd; :fmsubd Fdt, Fda, Fdb is $(I32) & $(COP) & fop4=0x5 & Fdt & Fda & Fdb & cop4=0x8 { Fdt = fmsubd(Fda, Fdb); } define pcodeop fcmovnd; :fcmovnd Fdt, Fda, Fdb is $(I32) & $(COP) & fop4=0x6 & Fdt & Fda & Fdb & cop4=0x8 { Fdt = fcmovnd(Fda, Fdb); } define pcodeop fcmovzd; :fcmovzd Fdt, Fda, Fdb is $(I32) & $(COP) & fop4=0x7 & Fdt & Fda & Fdb & cop4=0x8 { Fdt = fcmovzd(Fda, Fdb); } define pcodeop fnmaddd; :fnmaddd Fdt, Fda, Fdb is $(I32) & $(COP) & fop4=0x8 & Fdt & Fda & Fdb & cop4=0x8 { Fdt = fnmaddd(Fda, Fdb); } define pcodeop fnmsubd; :fnmsubd Fdt, Fda, Fdb is $(I32) & $(COP) & fop4=0x9 & Fdt & Fda & Fdb & cop4=0x8 { Fdt = fnmsubd(Fda, Fdb); } define pcodeop fmuld; :fmuld Fdt, Fda, Fdb is $(I32) & $(COP) & fop4=0xa & Fdt & Fda & Fdb & cop4=0x8 { Fdt = fmuld(Fda, Fdb); } define pcodeop fdivd; :fdivd Fdt, Fda, Fdb is $(I32) & $(COP) & fop4=0xb & Fdt & Fda & Fdb & cop4=0x8 { Fdt = fdivd(Fda, Fdb); } # FPU_FD1_F2OP define pcodeop fd2s; :fd2s Fst, Fda is $(I32) & $(COP) & fop4=0xf & Fst & Fda & f2op=0 & cop4=0x8 { Fst = fd2s(Fda); } define pcodeop fsqrtd; :fsqrtd Fdt, Fda is $(I32) & $(COP) & fop4=0xf & Fdt & Fda & f2op=1 & cop4=0x8 { Fdt = fsqrtd(Fda); } define pcodeop fabsd; :fabsd Fdt, Fda is $(I32) & $(COP) & fop4=0xf & Fdt & Fda & f2op=0x5 & cop4=0x8 { Fdt = fabsd(Fda); } define pcodeop fui2d; :fui2d Fdt, Fsa is $(I32) & $(COP) & fop4=0xf & Fdt & Fsa & f2op=0x8 & cop4=0x8 { Fdt = fui2d(Fsa); } define pcodeop fsi2d; :fsi2d Fdt, Fsa is $(I32) & $(COP) & fop4=0xf & Fdt & Fsa & f2op=0xc & cop4=0x8 { Fdt = fsi2d(Fsa); } define pcodeop fd2ui; :fd2ui Fst, Fda is $(I32) & $(COP) & fop4=0xf & Fst & Fda & f2op=0x10 & cop4=0x8 { Fst = fd2ui(Fda); } define pcodeop fd2ui.z; :fd2ui.z Fst, Fda is $(I32) & $(COP) & fop4=0xf & Fst & Fda & f2op=0x14 & cop4=0x8 { Fst = fs2ui.z(Fda); } define pcodeop fd2si; :fd2si Fst, Fda is $(I32) & $(COP) & fop4=0xf & Fst & Fda & f2op=0x18 & cop4=0x8 { Fst = fd2si(Fda); } define pcodeop fd2si.z; :fd2si.z Fst, Fda is $(I32) & $(COP) & fop4=0xf & Fst & Fda & f2op=0x1c & cop4=0x8 { Fst = fs2si.z(Fda); } # FPU_FS2 define pcodeop fcmpd; :fcmp^fcond^"d"^fcmpe Fst, Fda, Fdb is $(I32) & $(COP) & Fst & Fda & Fdb & cop4=0xc & fcond & fcmpe { Fst = fcmpd(Fda, Fdb, fcond, fcmpe); } # FPU_MFCP define pcodeop fmfsr; :fmfsr Rt, Fsa is $(I32) & $(COP) & fop4=0x0 & Rt & Fsa & f2op=0x0 & cop4=0x1 { Rt = fmfsr(Fsa); } define pcodeop fmfdr; :fmfdr Rt, Fda is $(I32) & $(COP) & fop4=0x1 & Rt & Fda & f2op=0x0 & cop4=0x1 { Rt = fmfdr(Fda); } # FPU_MTCP define pcodeop fmtsr; :fmtsr Rt, Fsa is $(I32) & $(COP) & fop4=0x0 & Rt & Fsa & f2op=0x0 & cop4=0x9 { Fsa = fmtsr(Rt); } define pcodeop fmtdr; :fmtdr Rt, Fda is $(I32) & $(COP) & fop4=0x1 & Rt & Fda & f2op=0x0 & cop4=0x9 { Fda = fmtdr(Rt); } # FPU_FLS :fls Fst, AddrRaRbsv is $(I32) & $(COP) & Fst & AddrRaRbsv & fbi=0 & cop4=0x2 { Fst = *AddrRaRbsv; } :fls.bi Fst [Ra], OffsetRbsv is $(I32) & $(COP) & Fst & Ra & OffsetRbsv & fbi=1 & cop4=2 { Fst = *Ra; Ra = Ra + OffsetRbsv; } # FPU_FLD :fld Fdt, AddrRaRbsv is $(I32) & $(COP) & Fdt & AddrRaRbsv & fbi=0 & cop4=0x3 { Fdt = *AddrRaRbsv; } :fld.bi Fdt [Ra], OffsetRbsv is $(I32) & $(COP) & Fdt & Ra & OffsetRbsv & fbi=1 & cop4=3 { Fdt = *Ra; Ra = Ra + OffsetRbsv; } # FPU_FSS :fss Fst, AddrRaRbsv is $(I32) & $(COP) & Fst & AddrRaRbsv & fbi=0 & cop4=0xa { *AddrRaRbsv = Fst; } :fss.bi Fst [Ra], OffsetRbsv is $(I32) & $(COP) & Fst & Ra & OffsetRbsv & fbi=1 & cop4=0xa { *Ra = Fst; Ra = Ra + OffsetRbsv; } # FPU_FSD :fsd Fdt, AddrRaRbsv is $(I32) & $(COP) & Fdt & AddrRaRbsv & fbi=0 & cop4=0xb { *AddrRaRbsv = Fdt; } :fsd.bi Fdt [Ra], OffsetRbsv is $(I32) & $(COP) & Fdt & Ra & OffsetRbsv & fbi=1 & cop4=0xb { *Ra = Fdt; Ra = Ra + OffsetRbsv; } # LWC0 AddrRaImm12s: [Ra + offs] is Ra & Imm12s [ offs = Imm12s << 2; ] { addr:4 = Ra + offs; export addr; } OffImm12s: (offs) is Imm12s [ offs = Imm12s << 2; ] { export *[const]:4 offs; } :flsi Fst, AddrRaImm12s is $(I32) & $(LWC) & Fst & cpn=0 & fsbi=0 & AddrRaImm12s { Fst = *AddrRaImm12s; } :flsi.bi Fst [Ra], OffImm12s is $(I32) & $(LWC) & Fst & Ra & cpn=0 & fsbi=1 & OffImm12s { Fst = *Ra; Ra = Ra + OffImm12s; } # LDC0 :fldi Fdt, AddrRaImm12s is $(I32) & $(LDC) & Fdt & cpn=0 & fsbi=0 & AddrRaImm12s { Fdt = *AddrRaImm12s; } :fldi.bi Fdt [Ra], OffImm12s is $(I32) & $(LDC) & Fdt & Ra & cpn=0 & fsbi=1 & OffImm12s { Fdt = *Ra; Ra = Ra + OffImm12s; } # SWC0 :fssi Fst, AddrRaImm12s is $(I32) & $(SWC) & Fst & cpn=0 & fsbi=0 & AddrRaImm12s { *AddrRaImm12s = Fst; } :fssi.bi Fst [Ra], OffImm12s is $(I32) & $(SWC) & Fst & Ra & cpn=0 & fsbi=1 & OffImm12s { *Ra = Fst; Ra = Ra + OffImm12s; } # SDC0 :fsdi Fdt, AddrRaImm12s is $(I32) & $(SDC) & Fdt & cpn=0 & fsbi=0 & AddrRaImm12s { *AddrRaImm12s = Fdt; } :fsdi.bi Fdt [Ra], OffImm12s is $(I32) & $(SDC) & Fdt & Ra & cpn=0 & fsbi=1 & OffImm12s { *Ra = Fdt; Ra = Ra + OffImm12s; } :fmfcfg Rt is $(I32) & $(COP) & fop4=0xc & Rt & f2op=0x0 & cop4=0x1 { Rt = fpcfg; } :fmfcsr Rt is $(I32) & $(COP) & fop4=0xc & Rt & f2op=0x1 & cop4=0x1 { Rt = fpcsr; } :fmtcsr Rt is $(I32) & $(COP) & fop4=0xc & Rt & Fsa & f2op=0x1 & cop4=0x9 { fpcsr = Rt; } ================================================ FILE: pypcode/processors/NDS32/data/languages/nds32be.slaspec ================================================ @define ENDIAN "big" @include "nds32.sinc" ================================================ FILE: pypcode/processors/NDS32/data/languages/nds32le.slaspec ================================================ @define ENDIAN "little" @include "nds32.sinc" ================================================ FILE: pypcode/processors/NDS32/data/patterns/nds32_patterns.xml ================================================ 0xfc 1....... 0xdd 0x9e 0xfc 0....... ================================================ FILE: pypcode/processors/NDS32/data/patterns/patternconstraints.xml ================================================ nds32_patterns.xml ================================================ FILE: pypcode/processors/PA-RISC/data/languages/pa-risc.ldefs ================================================ Generic PA-RISC 32-bit big endian ================================================ FILE: pypcode/processors/PA-RISC/data/languages/pa-risc.opinion ================================================ ================================================ FILE: pypcode/processors/PA-RISC/data/languages/pa-risc.sinc ================================================ # PA-RISC common specification file for 32 and 64-bit processors # Appropriate defines (PARISC32, PARISC64) must be # specified before including this file # Known Issues: # The branch target is annotated onto the line following the branch, not the branch itself # There may still be issues with condition codes and instructions like INST,=,N r2,r3,r2 that write back into one of the input argument registers. # The condition uses the final value in r2, when it should use the value passed in to r2 before the operation. # The implementation of space registers is incorrect. There should really be a 64 bit address space with space register in the upper 64 bits. # Right now, the space register is either ignored or else just added to the base address. # Some of the pcode could be simplified. # Attention- Sometimes the source and destination registers can be the same registers. # When you write to a destination reg, it's important that your pcode always keeps a temp copy # of the original if you later refer to a source reg. # Register conventions # # r0 is always 0 # r1 scratch register, caller saved # r2 is rp is the return pointer # r3 to r18 callee saves # r19 to r22 caller saves # r23 is arg3 # r24 is arg2 # r25 is arg1 # r26 is arg0 # r27 is the global data pointer dp and must be set to the .data symbol in the elf header # r28 is ret0 the subroutine return value, high order word if a 64-bit return value # r29 is ret1 the low order word of a 64-bit return value # r30 is sp the stack pointer # r31 is the millicode return pointer # # fr0 is farg0 if floating arg is 32-bits, 64-bit args are in farg1 high and low # fr1 is farg0 if floating arg is 32-bits # fr2 is farg0 if floating arg is 32-bits, 64-bit args are in farg3 high and low # fr0 is farg0 if floating arg is 32-bits # fr4 is fret floating point return (8 bytes) # # Note- only two double arguments can be passed in registers. # If a function return value is larger than 64 bits, then the caller passes the address in r28 # # Note- function calls and returns are usually through relocation stubs # #----- @ifdef PARISC64 @define REGSIZE "8" # General purpose register size (8 or 4) @define ADDRSIZE "8" # Memory address size (8 bytes in 64 bit mode) @else # PARISC32 @define REGSIZE "4" # General purpose register size (8 or 4) @define ADDRSIZE "4" # Memory address size (8 bytes, using the space registers as the upper 32 bits and the regular base and offset as the lower 32 bits) @endif #----- define endian=$(ENDIAN); define alignment=4; # I'm not sure what to set for the sizes of these spaces -- TODO set these correctly define space ram type=ram_space size=$(ADDRSIZE) default; define space register type=register_space size=4; # this is a large enough address space to address all the registers. Two bytes would probably be enough # General purpose registers define register offset=0 size=$(REGSIZE) [ r0 r1 rp r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17 r18 r19 r20 r21 r22 r23 r24 r25 r26 dp r28 r29 sp r31 ]; # Floating point registers define register offset=0x1000 size=8 [ fr0 fpe23 fpe45 fpe67 fr4 fr5 fr6 fr7 fr8 fr9 fr10 fr11 fr12 fr13 fr14 fr15 fr16 fr17 fr18 fr19 fr20 fr21 fr22 fr23 fr24 fr25 fr26 fr27 fr28 fr29 fr30 fr31 ]; define register offset=0x1000 size=4 [ fr0R fr0L fpe2 fpe3 fpe4 fpe5 fpe6 fpe7 fr4L fr4R fr5L fr5R fr6L fr6R fr7L fr7R fr8L fr8R fr9L fr9R fr10L fr10R fr11L fr11R fr12L fr12R fr13L fr13R fr14L fr14R fr15L fr15R fr16L fr16R fr17L fr17R fr18L fr18R fr19L fr19R fr20L fr20R fr21L fr21R fr22L fr22R fr23L fr23R fr24L fr24R fr25L fr25R fr26L fr26R fr27L fr27R fr28L fr28R fr29L fr29R fr30L fr30R fr31L fr31R ]; # Floating Point Status Register # Only the compare bit and the compare queue are implemented here # and the Condition Queue is only 8 bits, rather than 10 define register offset=0x1100 size=1 [ compareBit compareQueue ]; # Shadow Registers define register offset=0x2000 size=$(REGSIZE) [ shr0 shr1 shr2 shr3 shr4 shr5 shr6 ]; # Space Registers define register offset=0x3000 size=4 [ sr0 sr1 sr2 sr3 sr4 sr5 sr6 sr7 ]; # Processor Status Word define register offset=0x4000 size=1 [ pswY pswZ pswE pswS pswT pswH pswL pswN pswX pswB pswC pswV pswM pswCB pswG pswF pswR pswQ pswP pswD pswI ]; # Control Registers define register offset=0x5000 size=4 [ cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7 cr8 cr9 cr10 sar cr12 cr13 cr14 cr15 cr16 cr17 cr18 cr19 cr20 cr21 cr22 cr23 cr24 cr25 cr26 cr27 cr28 cr29 cr30 cr31 ]; # instruction address offset and instruction address space queues # these are used for branching, to compute target addresses and deal with branch delay slots define register offset=0x6000 size=4 [ iasq_front iasq_back iaoq_front iaoq_back ]; # special hidden registers to support the nullify and delay slot features of PA-RISC define register offset=0x7000 size=1 [nullifyCond nullifyNextCond branchCond branchExecuted]; define register offset=0x7100 size=4 [branchIndDest nullifyCondResult]; define register offset=0x7200 size=12 contextreg; define context contextreg # transient context phase = (0,2) noflow temp1 = (3,3) # stored context nullifyEnable = (4,4) noflow branchCouldBeNullified = (5,5) noflow branchEnable = (6,6) noflow branchType = (7,9) noflow # These do not appear to be used: # branchIsInd = (7,7) noflow # branchIsCond = (8,8) noflow # branchIsCall = (9,9) noflow branchIsReturn = (10,10) noflow # used for returns padding = (10,31) noflow branchImmDest = (32,63) noflow temp32 = (64,95) noflow ; @define COMMON "phase=1 " # Instruction fields define token instr(32) opfam = (26,31) cr = (21,25) crname2 = (21,25) freg2 = (21,25) freg2sgl = (21,25) fr2half = (21,25) fusedr2 = (21,25) reg2 = (21,25) b = (21,25) bboffset = (21,25) crname1 = (21,25) bit20 = (20,20) signed fpc1sub2 = (17,20) highIm10 = (16,25) reg1 = (16,20) fusedr1 = (16,20) freg1 = (16,20) freg1sgl = (16,20) fr1half = (16,20) r = (16,20) highIm5 = (16,20) signed highIm5less16 = (17,20) tr = (16,20) x = (16,20) w1 = (16,20) bit16 = (16,16) fpc1sub = (15,16) srbit1 = (15,15) s = (14,15) srbit0 = (14,14) im13 = (13,25) signed SEDCondSym = (13,15) RegUnitCondSym = (13,15) InvUnitCondSym = (13,15) RegLogicCondSym = (13,15) InvLogicCondSym = (13,15) RegAddCondSym = (13,15) InvAddCondSym = (13,15) RegCSCondSym = (13,15) InvCSCondSym = (13,15) c = (13,15) fpsub = (13,15) srbit2 = (13,13) fpdf = (13,14) fixeddf = (13,14) fpdfraw = (13,14) fixedsf = (13,14) a = (13,13) u = (13,13) fpr1x = (12,12) f = (12,12) fv = (12,12) zero = (12,12) one = (12,12) subop1012 = (10,12) im15 = (11,25) sopim10 = (11,20) signed sopim5 = (11,15) signed fpta = (11,15) fusedta = (11,15) fpsf = (11,12) fpsfraw = (11,12) fpfmt = (11,12) fpfmt1bit = (11,11) bit11 = (11,11) cc = (10,11) ldcc = (10,11) stcc = (10,11) ldcwcc = (10,11) bit10 = (10,10) sopim17 = (9,25) pmuop = (9,13) specop = (9,10) fpclass = (9,10) bit9 = (9,9) bit8 = (8,8) fpx = (8,8) bits78 = (7,8) fpr2x = (7,7) fpra = (6,10) fusedra = (6,10) ext4 = (6,9) C = (6,9) subop = (6,9) sysopshifted= (6,13) sysopshiftedshort = (6,12) op = (6,11) sfu = (6,8) fp0czero = (6,8) fptx = (6,6) bit6 = (6,6) sysop = (5,12) i2 = (5,11) bits59 = (5,9) cp = (5,9) fusedfmt = (5,5) bit5 = (5,5) m = (5,5) spn = (5,5) w2 = (2,12) w2_2 = (2,2) w2less2 = (3,12) n = (1,1) im26 = (0,25) signed sim21 = (0,20) signed sim14 = (0,13) signed im21less0 = (1,20) im21_1_12 = (1,11) im21_12_14 = (12,13) im21_14_16 = (14,15) im21_16_21 = (16,20) im14 = (0,13) im14less0 = (1,13) im11 = (0,10) signed im11less0 = (1,11) sim5 = (0,4) signed im5 = (0,4) im5less0 = (1,4) fpcond = (0,4) fptest = (0,4) fpt = (0,4) fptsgl = (0,4) fusedrt = (0,4) fpthalf = (0,4) crnamet = (0,4) t = (0,4) bit0 = (0,0) w = (0,0) ; # general purpose registers attach variables [reg1 reg2 t r b x] [ r0 r1 rp r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17 r18 r19 r20 r21 r22 r23 r24 r25 r26 dp r28 r29 sp r31 ]; # fp registers - 64 bit doubles attach variables [ freg2 freg1 fpt fpra fpta ] [ fr0 fpe23 fpe45 fpe67 fr4 fr5 fr6 fr7 fr8 fr9 fr10 fr11 fr12 fr13 fr14 fr15 fr16 fr17 fr18 fr19 fr20 fr21 fr22 fr23 fr24 fr25 fr26 fr27 fr28 fr29 fr30 fr31 ]; # 64 bit fp registers when used as 32 bit (.sgl completer) # This mapping goes from bit pattern N -> register NL attach variables [ freg2sgl freg1sgl fptsgl ] [ fr0L fpe2 fpe4 fpe6 fr4L fr5L fr6L fr7L fr8L fr9L fr10L fr11L fr12L fr13L fr14L fr15L fr16L fr17L fr18L fr19L fr20L fr21L fr22L fr23L fr24L fr25L fr26L fr27L fr28L fr29L fr30L fr31L ]; # 32 bit single precision mode fpra fpta for mult-add and mult-sub instructions attach variables [ fusedra fusedta fusedr2 fusedr1 fusedrt ] [ fr16L fr17L fr18L fr19L fr20L fr21L fr22L fr23L fr24L fr25L fr26L fr27L fr28L fr29L fr30L fr31L fr16R fr17R fr18R fr19R fr20R fr21R fr22R fr23R fr24R fr25R fr26R fr27R fr28R fr29R fr30R fr31R ]; # control registers attach variables [ cr ] [ cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7 cr8 cr9 cr10 sar cr12 cr13 cr14 cr15 cr16 cr17 cr18 cr19 cr20 cr21 cr22 cr23 cr24 cr25 cr26 cr27 cr28 cr29 cr30 cr31 ]; # control registers by their purpose names attach names [ crname2 crname1 crnamet ] [ RCTR CR1 CR2 CR3 CR4 CR5 CR6 CR7 PID1 PID2 CCR SAR PID3 PID4 IVA EIEM ITMR IIASQ IIAOQ IIR ISR IOR IPSW EIRR TMP0 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 TMP7 ]; attach names [ ldcc ] [ none resv sl resv ]; attach names [ stcc ] [ none BC SL resv ]; attach names [ ldcwcc ] [ none CO resv resv ]; # Table 5-7 attach names [ SEDCondSym ] [ "" ",=" ",<" ",OD" ",TR" ",<>" ",>=" ",EV" ]; attach names [ RegUnitCondSym ] [ "" _ ",SBZ" ",SHZ" ",SDC" _ ",SBC" ",SHC" ]; attach names [ InvUnitCondSym ] [ "" _ ",NBZ" ",NHZ" ",NDC" _ ",NBC" ",NHC" ]; attach names [ RegLogicCondSym ] [ "" ",=" ",<" ",<=" _ _ _ ",OD" ]; attach names [ InvLogicCondSym ] [ "" ",<>" ",>=" ",>" _ _ _ ",EV" ]; # Table 5-4 attach names [ RegAddCondSym ] [ "" ",=" ",<" ",<=" ",NUV" ",ZNV" ",SV" ",OD" ]; attach names [ InvAddCondSym ] [ "" ",<>" ",>=" ",>" ",UV" ",VNZ" ",NSV" ",EV" ]; attach names [ RegCSCondSym ] [ "" ",=" ",<" ",<=" ",<<" ",<<=" ",SV" ",OD" ]; attach names [ InvCSCondSym ] [ "" ",<>" ",>=" ",>" ",>>=" ",>>" ",NSV" ",EV" ]; # the different tests used by the FTEST instruction attach names [ fptest ] [ "" "ACC" "REJ" _ _ "ACC8" "REJ8" _ _ "ACC6" _ _ _ "ACC4" _ _ _ "ACC2" _ _ _ _ _ _ _ _ _ _ _ _ _ _ ]; # the floating point number types attach names [ fpsf fpdf fpfmt ] [ ",SGL" ",DBL" ",QUAD" _ ]; attach names [ fpfmt1bit ] [ ",SGL" ",DBL" ]; # the fixed point number types attach names [ fixedsf fixeddf ] [ ",UW" ",UD" ",UQ" _ ]; # the floating point number types for fused ops attach names [ fusedfmt ] [ ",DBL" ",SGL" ]; ###################################################################################### # caret instruction builders for nullify and branch ###################################################################################### # These all assume a fall through from a preceding branch # If there is a branch into the delay slot of a different branch, # then the branch logic will still be present from the preceding instruction # and the control flow will branch to that destination. # This case should result in a red bookmark due to the disassembly context difference # These all also assume that the instruction following a branch (in the delay slot) # does not nullify its succeeding instruction ####################################################################################### # no branch, no nullify # previous instruction was not a branch and did not nullify this instruction :^instruction is phase=0 & branchEnable=0 & nullifyEnable=0 & instruction [ phase=1; ] { nullifyNextCond=0; build instruction; nullifyCond=nullifyNextCond; } # previous instruction was not a branch, but may have conditionally nullified this instruction :^instruction is phase=0 & branchEnable=0 & nullifyEnable=1 & instruction [ phase=1; ] { local wasNullified = nullifyCond; nullifyCond = 0; nullifyNextCond=0; if (wasNullified) goto ; build instruction; nullifyCond=nullifyNextCond; } # # Handle branches that don't nullify their branch delay slot instructions # These instructions themselves may have been nullified, so we need to # check and conditionally execute the branch based on that. # # Need to reset branchCond = 0 before build instruction because the insruction does not reset it, # and if branchcond is not reset, then it persists to subsequent instructions. immediateDest: is branchImmDest { export *:$(ADDRSIZE) branchImmDest; } # previous instruction was a branch, but this instruction was not nullified # but branch instruction could have been nullified # branchType = 0, unconditional immediate branch :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=0 & branchCouldBeNullified=1 & instruction & immediateDest [ phase=1; ] { nullifyCond = 0; nullifyNextCond = 0; local previousBranchExecuted = branchExecuted; branchCond = 0; branchExecuted = 0; build instruction; if ( previousBranchExecuted ) goto immediateDest; nullifyCond=nullifyNextCond; } # previous instruction was a branch, but this instruction was not nullified # branchType = 0, unconditional immediate branch :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=0 & branchCouldBeNullified=0 & instruction & immediateDest [ phase=1; ] { nullifyCond = 0; nullifyNextCond = 0; branchCond = 0; branchExecuted=0; build instruction; goto immediateDest; } # branchType = 1, unconditional immediate call # this doesn't handle the case where the inst in the delay slot nullifies the next instruction :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=1 & branchCouldBeNullified=1 & instruction & immediateDest [ phase=1; ] { nullifyCond = 0; nullifyNextCond = 0; local previousBranchExecuted = branchExecuted; branchCond = 0; branchExecuted = 0; build instruction; if ( ! previousBranchExecuted ) goto ; call immediateDest; nullifyCond=nullifyNextCond; } # branchType = 1, unconditional immediate call # this doesn't handle the case where the inst in the delay slot nullifies the next instruction # this is the case where the branch could not have been nullified :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=1 & branchCouldBeNullified=0 & instruction & immediateDest [ phase=1; ] { nullifyCond = 0; nullifyNextCond = 0; branchExecuted = 0; build instruction; call immediateDest; } # branchType = 2, conditional immediate branch # the preceding instruction was a branch that may or may not have been taken and may or may not have been nullified # but was not nullifying itself (the delay slot must execute) # this doesn't handle the case where the instruction in the branch delay slot nullifies the next instruction :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=2 & branchCouldBeNullified=1 & instruction & immediateDest [ phase=1; ] { nullifyCond = 0; nullifyNextCond=0; local previousBranchExecuted = branchExecuted; branchExecuted = 0; local previousBranchCond = branchCond; branchCond = 0; build instruction; if (previousBranchCond && previousBranchExecuted) goto immediateDest; nullifyCond=nullifyNextCond; } # branchType = 2, conditional immediate branch # the preceding instruction was a branch that may or may not have been taken and may or may not have been nullified # but was not nullifying itself (the delay slot must execute) # this doesn't handle the case where the instruction in the branch delay slot nullifies the next instruction :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=2 & branchCouldBeNullified=0 & instruction & immediateDest [ phase=1; ] { nullifyCond = 0; nullifyNextCond = 0; branchExecuted = 0; local previousBranchCond = branchCond; branchCond = 0; build instruction; if (previousBranchCond) goto immediateDest; nullifyCond=nullifyNextCond;} # branchType = 3, conditional immediate call, (currently not used, may not exist in PA-RISC) :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=3 & branchCouldBeNullified=1 & instruction & immediateDest [ phase=1; ] { nullifyNextCond=0; local previousBranchExecuted = branchExecuted; local previousBranchCond = branchCond; branchCond = 0; branchExecuted = 0; build instruction; if ( ! (previousBranchCond && previousBranchExecuted) ) goto ; call immediateDest; nullifyCond=nullifyNextCond; } # branchType = 3, conditional immediate call, (currently not used, may not exist in PA-RISC) :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=3 & branchCouldBeNullified=0 & instruction & immediateDest [ phase=1; ] { nullifyNextCond=0; local previousBranchCond = branchCond; branchCond = 0; branchExecuted = 0; build instruction; if ( ! (previousBranchCond) ) goto ; call immediateDest; nullifyCond=nullifyNextCond; } # branchType = 4, unconditional indirect branch # the preceding instruction was a branch that did not nullify its branch delay slot inst # The branch is indirect and may have been nullified :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchCouldBeNullified=1 & branchType=4 & instruction [ phase=1; ] { nullifyCond = 0; nullifyNextCond = 0; local previousBranchExecuted = branchExecuted; branchExecuted = 0; build instruction; if ( ! previousBranchExecuted ) goto ; goto [branchIndDest]; nullifyCond=nullifyNextCond; } # branchType = 4, unconditional indirect branch and also return # the preceding instruction was a branch that did not nullify its branch delay slot inst # The branch is indirect and WAS NOT nullified # :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchCouldBeNullified=0 & branchType=4 & instruction & branchIsReturn=0 [ phase=1; ] { nullifyCond = 0; nullifyNextCond = 0; branchExecuted = 0; build instruction; goto [branchIndDest]; } :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchCouldBeNullified=0 & branchType=4 & instruction & branchIsReturn=1 [ phase=1; ] { nullifyCond = 0; nullifyNextCond = 0; branchExecuted = 0; build instruction; return [branchIndDest]; } # branchType = 5, unconditional indirect call :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=5 & branchCouldBeNullified=1 & instruction [ phase=1; ] { nullifyCond = 0; nullifyNextCond = 0; local previousBranchExecuted = branchExecuted; branchExecuted = 0; build instruction; if ( ! previousBranchExecuted ) goto ; call [branchIndDest]; nullifyCond=nullifyNextCond; } # branchType = 5, unconditional indirect call :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=5 & branchCouldBeNullified=0 & instruction [ phase=1; ] { nullifyCond = 0; nullifyNextCond = 0; branchExecuted = 0; build instruction; call [branchIndDest]; } # branchType = 6, conditional indirect branch # previous instruction was a conditional branch that may or may not be taken and may have been nullified # however, the branch does not nullify the inst in its branch delay slot :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=6 & branchCouldBeNullified=1 & instruction [ phase=1; ] { nullifyCond = 0; nullifyNextCond=0; local previousBranchExecuted = branchExecuted; local previousBranchCond = branchCond; branchCond = 0; branchExecuted = 0; build instruction; if ( ! previousBranchExecuted || ! previousBranchCond) goto ; goto [branchIndDest]; nullifyCond=nullifyNextCond; } # branchType = 6, conditional indirect branch # previous instruction was a conditional branch that may or may not be taken and may have been nullified # however, the branch does not nullify the inst in its branch delay slot :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=6 & branchCouldBeNullified=0 & instruction [ phase=1; ] { nullifyCond = 0; nullifyNextCond=0; branchExecuted = 0; local previousBranchCond = branchCond; branchCond = 0; build instruction; if ( ! previousBranchCond ) goto ; goto [branchIndDest]; nullifyCond=nullifyNextCond; } # branchType = 7, conditional indirect call :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=7 & branchCouldBeNullified=1 & instruction [ phase=1; ] { nullifyCond = 0; nullifyNextCond=0; local previousBranchExecuted = branchExecuted; local previousBranchCond = branchCond; branchCond = 0; branchExecuted = 0; build instruction; if ( ! previousBranchExecuted || ! previousBranchCond) goto ; call [branchIndDest]; nullifyCond=nullifyNextCond; } # branchType = 7, conditional indirect call :^instruction is phase=0 & branchEnable=1 & nullifyEnable=0 & branchType=7 & branchCouldBeNullified=0 & instruction [ phase=1; ] { nullifyCond = 0; nullifyNextCond=0; branchExecuted = 0; local previousBranchCond= branchCond; branchCond = 0; build instruction; if ( ! previousBranchCond) goto ; call [branchIndDest]; nullifyCond=nullifyNextCond; } # # Handle branches with nullification -- the branch may nullify its branch delay slot # # branchType = 0, unconditional immediate branch :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=0 & branchCouldBeNullified=1 & instruction & immediateDest [ phase=1; ] { local nullify = nullifyCond; nullifyCond = 0; nullifyNextCond = 0; local previousBranchExecuted = branchExecuted; branchExecuted = 0; if (nullify) goto ; build instruction; if ( previousBranchExecuted ) goto immediateDest; nullifyCond=nullifyNextCond; } # branchType = 0, unconditional immediate branch :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=0 & branchCouldBeNullified=0 & instruction & immediateDest [ phase=1; ] { local nullify = nullifyCond; nullifyCond = 0; nullifyNextCond = 0; branchExecuted = 0; if (nullify) goto ; build instruction; goto immediateDest; } # branchType = 1, unconditional immediate call :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=1 & branchCouldBeNullified=1 & instruction & immediateDest [ phase=1; ] { local nullify = nullifyCond; nullifyCond = 0; nullifyNextCond = 0; local previousBranchExecuted = branchExecuted; branchExecuted = 0; if (nullify) goto ; build instruction; if ( ! previousBranchExecuted ) goto ; call immediateDest; nullifyCond=nullifyNextCond; } # branchType = 1, unconditional immediate call :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=1 & branchCouldBeNullified=0 & instruction & immediateDest [ phase=1; ] { local nullify = nullifyCond; nullifyCond = 0; nullifyNextCond = 0; branchExecuted = 0; if (nullify) goto ; build instruction; call immediateDest; } # branchType = 2, conditional immediate branch :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=2 & branchCouldBeNullified=1 & instruction & immediateDest [ phase=1; ] { local nullify = nullifyCond; nullifyCond = 0; nullifyNextCond=0; local previousBranchExecuted = branchExecuted; branchExecuted = 0; local previousBranchCond = branchCond; branchCond = 0; if (nullify) goto ; build instruction; if (previousBranchCond && previousBranchExecuted) goto immediateDest; nullifyCond=nullifyNextCond; } # branchType = 2, conditional immediate branch :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=2 & branchCouldBeNullified=0 & instruction & immediateDest [ phase=1; ] { local nullify = nullifyCond; nullifyCond = 0; nullifyNextCond=0; branchExecuted = 0; local previousBranchCond = branchCond; branchCond = 0; # Need to reset branchCond if (nullify) goto ; build instruction; if (previousBranchCond) goto immediateDest; nullifyCond=nullifyNextCond; } # branchType = 3, conditional immediate call :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=3 & branchCouldBeNullified=1 & instruction & immediateDest [ phase=1; ] { local nullify = nullifyCond; nullifyNextCond=0; local previousBranchExecuted = branchExecuted; local previousBranchCond = branchCond; branchCond = 0; branchExecuted = 0; if (nullify) goto ; build instruction; if ( ! (previousBranchCond && previousBranchExecuted) ) goto ; call immediateDest; nullifyCond=nullifyNextCond; } # branchType = 3, conditional immediate call :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=3 & branchCouldBeNullified=0 & instruction & immediateDest [ phase=1; ] { local nullify = nullifyCond; nullifyNextCond=0; local previousBranchCond = branchCond; branchCond = 0; branchExecuted = 0; if (nullify) goto ; build instruction; if ( ! previousBranchCond ) goto ; call immediateDest; nullifyCond=nullifyNextCond; } # branchType = 4, unconditional indirect branch :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=4 & branchCouldBeNullified=1 & instruction [ phase=1; ] { local nullify = nullifyCond; nullifyCond = 0; nullifyNextCond = 0; local previousBranchExecuted = branchExecuted; branchExecuted = 0; if (nullify) goto ; build instruction; if ( ! previousBranchExecuted ) goto ; goto [branchIndDest]; nullifyCond=nullifyNextCond; } # branchType = 4, unconditional indirect branch :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=4 & branchCouldBeNullified=0 & instruction [ phase=1; ] { local nullify = nullifyCond; nullifyCond = 0; nullifyNextCond = 0; branchExecuted = 0; if (nullify) goto ; build instruction; goto [branchIndDest]; } # branchType = 5, unconditional indirect call :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=5 & branchCouldBeNullified=1 & instruction [ phase=1; ] { local nullify = nullifyCond; nullifyCond = 0; nullifyNextCond = 0; local previousBranchExecuted = branchExecuted; branchExecuted = 0; if (nullify) goto ; build instruction; if ( ! previousBranchExecuted ) goto ; call [branchIndDest]; nullifyCond=nullifyNextCond; } # branchType = 5, unconditional indirect call :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=5 & branchCouldBeNullified=0 & instruction [ phase=1; ] { local nullify = nullifyCond; nullifyCond = 0; nullifyNextCond = 0; branchExecuted = 0; if (nullify) goto ; build instruction; call [branchIndDest]; } # branchType = 6, conditional indirect branch :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=6 & branchCouldBeNullified=1 & instruction [ phase=1; ] { local nullify = nullifyCond; nullifyCond = 0; nullifyNextCond=0; local previousBranchExecuted = branchExecuted; branchExecuted = 0; local previousBranchCond = branchCond; branchCond = 0; if (nullify) goto ; build instruction; if ( ! previousBranchExecuted || ! previousBranchCond) goto ; goto [branchIndDest]; nullifyCond=nullifyNextCond; } # branchType = 6, conditional indirect branch :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=6 & branchCouldBeNullified=0 & instruction [ phase=1; ] { local nullify = nullifyCond; nullifyCond = 0; nullifyNextCond=0; branchExecuted = 0; local previousBranchCond = branchCond; branchCond = 0; if (nullify) goto ; build instruction; if ( ! previousBranchCond) goto ; goto [branchIndDest]; nullifyCond=nullifyNextCond; } # branchType = 7, conditional indirect call :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=7 & branchCouldBeNullified=1 & instruction [ phase=1; ] { local nullify = nullifyCond; nullifyNextCond=0; local previousBranchExecuted = branchExecuted; branchExecuted = 0; local previousBranchCond = branchCond; branchCond = 0; if (nullify) goto ; build instruction; if ( ! previousBranchExecuted || ! previousBranchCond) goto ; call [branchIndDest]; nullifyCond=nullifyNextCond; } # branchType = 7, conditional indirect call :^instruction is phase=0 & branchEnable=1 & nullifyEnable=1 & branchType=7 & branchCouldBeNullified=0 & instruction [ phase=1; ] { local nullify = nullifyCond; nullifyNextCond=0; branchExecuted = 0; local previousBranchCond = branchCond; branchCond = 0; if (nullify) goto ; build instruction; if ( ! previousBranchCond ) goto ; call [branchIndDest]; nullifyCond=nullifyNextCond; } ############################################################ # Subconstructors ############################################################ ############################# # general purpose registers selected # by different fields for different purposes (index, base, general, src, target, ...) ############################# R1: reg1 is reg1 & reg1=0 { export 0:$(REGSIZE); } R1: reg1 is reg1 { export reg1; } R1dst: reg1 is reg1 { export reg1; } R2: reg2 is reg2 & reg2=0 { export 0:$(REGSIZE); } R2: reg2 is reg2 { export reg2; } R2dst: reg2 is reg2 { export reg2; } RT: t is t { export t; } RB: b is b & b=0 { export 0:$(REGSIZE); } RB: b is b { export b; } RX: is x=0 { export 0:$(REGSIZE); } RX: x is x { export x; } RR: r is r & r=0 { export 0:$(REGSIZE); } RR: r is r { export r; } SAR: "SAR" is epsilon { export sar; } SR0: sr0 is sr0 { export sr0; } R31: r31 is r31 { export r31; } # 64 bit fp register access FPR164: freg1 is freg1 { export freg1; } FPR264: freg2 is freg2 { export freg2; } FPRT64: fpt is fpt { export fpt; } # register encoding for fused ops (fmpyadd, fmpysub) FUSEDR1: fusedr1 is fusedr1 { export fusedr1; } FUSEDR2: fusedr2 is fusedr2 { export fusedr2; } FUSEDRA: fusedra is fusedra { export fusedra; } FUSEDTA: fusedta is fusedta { export fusedta; } FUSEDRT: fusedrt is fusedrt { export fusedrt; } # 32 bit fp register access, lower half (L) and upper half (R) FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 0 { export fr0L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 1 { export fpe2; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 2 { export fpe4; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 3 { export fpe6; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 4 { export fr4L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 5 { export fr5L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 6 { export fr6L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 7 { export fr7L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 8 { export fr8L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 9 { export fr9L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 10 { export fr10L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 11 { export fr11L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 12 { export fr12L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 13 { export fr13L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 14 { export fr14L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 15 { export fr15L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 16 { export fr16L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 17 { export fr17L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 18 { export fr18L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 19 { export fr19L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 20 { export fr20L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 21 { export fr21L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 22 { export fr22L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 23 { export fr23L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 24 { export fr24L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 25 { export fr25L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 26 { export fr26L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 27 { export fr27L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 28 { export fr28L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 29 { export fr29L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 30 { export fr30L; } FPR132: freg1^"L" is fpr1x = 0 & freg1 & fr1half = 31 { export fr31L; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 0 { export fr0R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 1 { export fpe3; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 2 { export fpe5; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 3 { export fpe7; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 4 { export fr4R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 5 { export fr5R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 6 { export fr6R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 7 { export fr7R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 8 { export fr8R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 9 { export fr9R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 10 { export fr10R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 11 { export fr11R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 12 { export fr12R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 13 { export fr13R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 14 { export fr14R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 15 { export fr15R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 16 { export fr16R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 17 { export fr17R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 18 { export fr18R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 19 { export fr19R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 20 { export fr20R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 21 { export fr21R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 22 { export fr22R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 23 { export fr23R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 24 { export fr24R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 25 { export fr25R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 26 { export fr26R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 27 { export fr27R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 28 { export fr28R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 29 { export fr29R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 30 { export fr30R; } FPR132: freg1^"R" is freg1 & fpr1x = 1 & fr1half = 31 { export fr31R; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 0 { export fr0L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 1 { export fpe2; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 2 { export fpe4; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 3 { export fpe6; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 4 { export fr4L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 5 { export fr5L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 6 { export fr6L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 7 { export fr7L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 8 { export fr8L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 9 { export fr9L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 10 { export fr10L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 11 { export fr11L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 12 { export fr12L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 13 { export fr13L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 14 { export fr14L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 15 { export fr15L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 16 { export fr16L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 17 { export fr17L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 18 { export fr18L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 19 { export fr19L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 20 { export fr20L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 21 { export fr21L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 22 { export fr22L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 23 { export fr23L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 24 { export fr24L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 25 { export fr25L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 26 { export fr26L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 27 { export fr27L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 28 { export fr28L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 29 { export fr29L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 30 { export fr30L; } FPR232: freg2^"L" is fpr2x = 0 & freg2 & fr2half = 31 { export fr31L; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 0 { export fr0R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 1 { export fpe3; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 2 { export fpe5; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 3 { export fpe7; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 4 { export fr4R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 5 { export fr5R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 6 { export fr6R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 7 { export fr7R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 8 { export fr8R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 9 { export fr9R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 10 { export fr10R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 11 { export fr11R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 12 { export fr12R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 13 { export fr13R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 14 { export fr14R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 15 { export fr15R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 16 { export fr16R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 17 { export fr17R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 18 { export fr18R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 19 { export fr19R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 20 { export fr20R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 21 { export fr21R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 22 { export fr22R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 23 { export fr23R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 24 { export fr24R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 25 { export fr25R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 26 { export fr26R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 27 { export fr27R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 28 { export fr28R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 29 { export fr29R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 30 { export fr30R; } FPR232: freg2^"R" is freg2 & fpr2x = 1 & fr2half = 31 { export fr31R; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 0 { export fr0L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 1 { export fpe2; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 2 { export fpe4; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 3 { export fpe6; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 4 { export fr4L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 5 { export fr5L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 6 { export fr6L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 7 { export fr7L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 8 { export fr8L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 9 { export fr9L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 10 { export fr10L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 11 { export fr11L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 12 { export fr12L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 13 { export fr13L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 14 { export fr14L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 15 { export fr15L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 16 { export fr16L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 17 { export fr17L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 18 { export fr18L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 19 { export fr19L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 20 { export fr20L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 21 { export fr21L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 22 { export fr22L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 23 { export fr23L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 24 { export fr24L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 25 { export fr25L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 26 { export fr26L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 27 { export fr27L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 28 { export fr28L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 29 { export fr29L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 30 { export fr30L; } FPRT32: fpt^"L" is fptx = 0 & fpt & fpthalf = 31 { export fr31L; } # 32 bit fp register access, upper half (R) FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 0 { export fr0R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 1 { export fpe3; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 2 { export fpe5; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 3 { export fpe7; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 4 { export fr4R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 5 { export fr5R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 6 { export fr6R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 7 { export fr7R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 8 { export fr8R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 9 { export fr9R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 10 { export fr10R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 11 { export fr11R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 12 { export fr12R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 13 { export fr13R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 14 { export fr14R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 15 { export fr15R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 16 { export fr16R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 17 { export fr17R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 18 { export fr18R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 19 { export fr19R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 20 { export fr20R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 21 { export fr21R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 22 { export fr22R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 23 { export fr23R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 24 { export fr24R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 25 { export fr25R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 26 { export fr26R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 27 { export fr27R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 28 { export fr28R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 29 { export fr29R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 30 { export fr30R; } FPRT32: fpt^"R" is fptx = 1 & fpt & fpthalf = 31 { export fr31R; } fpcmp: ",FALSE?" is fpcond=0 { result:1 = 0; export result; } fpcmp: ",false" is fpcond=1 { result:1 = 0; export result; } fpcmp: ",?" is fpcond=2 { result:1 = 0; export result; } fpcmp: ",!<=>" is fpcond=3 { result:1 = 0; export result; } fpcmp: ",=" is fpcond=4 & FPR132 & FPR232 { result:1 = FPR232 f== FPR132; export result; } fpcmp: ",=T" is fpcond=5 & FPR132 & FPR232 { result:1 = FPR232 f== FPR132; export result; } fpcmp: ",?=" is fpcond=6 & FPR132 & FPR232 { result:1 = FPR232 f== FPR132; export result; } fpcmp: ",!<>" is fpcond=7 & FPR132 & FPR232 { result:1 = FPR232 f== FPR132; export result; } fpcmp: ",!?>=" is fpcond=8 & FPR132 & FPR232 { result:1 = FPR232 f< FPR132; export result; } fpcmp: ",<" is fpcond=9 & FPR132 & FPR232 { result:1 = FPR232 f< FPR132; export result; } fpcmp: ",?<" is fpcond=10 & FPR132 & FPR232 { result:1 = FPR232 f< FPR132; export result; } fpcmp: ",!>=" is fpcond=11 & FPR132 & FPR232 { result:1 = FPR232 f< FPR132; export result; } fpcmp: ",!?>" is fpcond=12 & FPR132 & FPR232 { result:1 = FPR232 f<= FPR132; export result; } fpcmp: ",<=" is fpcond=13 & FPR132 & FPR232 { result:1 = FPR232 f<= FPR132; export result; } fpcmp: ",?<=" is fpcond=14 & FPR132 & FPR232 { result:1 = FPR232 f<= FPR132; export result; } fpcmp: ",!>" is fpcond=15 & FPR132 & FPR232 { result:1 = FPR232 f<= FPR132; export result; } fpcmp: ",!?<=" is fpcond=16 & FPR132 & FPR232 { result:1 = FPR232 f> FPR132; export result; } fpcmp: ",>" is fpcond=17 & FPR132 & FPR232 { result:1 = FPR232 f> FPR132; export result; } fpcmp: ",?>" is fpcond=18 & FPR132 & FPR232 { result:1 = FPR232 f> FPR132; export result; } fpcmp: ",!<=" is fpcond=19 & FPR132 & FPR232 { result:1 = FPR232 f> FPR132; export result; } fpcmp: ",!?<" is fpcond=20 & FPR132 & FPR232 { result:1 = FPR232 f>= FPR132; export result; } fpcmp: ",>=" is fpcond=21 & FPR132 & FPR232 { result:1 = FPR232 f>= FPR132; export result; } fpcmp: ",?>=" is fpcond=22 & FPR132 & FPR232 { result:1 = FPR232 f>= FPR132; export result; } fpcmp: ",!<;" is fpcond=23 & FPR132 & FPR232 { result:1 = FPR232 f>= FPR132; export result; } fpcmp: ",!?=" is fpcond=24 & FPR132 & FPR232 { result:1 = FPR232 f!= FPR132; export result; } fpcmp: ",<>" is fpcond=25 & FPR132 & FPR232 { result:1 = FPR232 f!= FPR132; export result; } fpcmp: ",!=" is fpcond=26 & FPR132 & FPR232 { result:1 = FPR232 f!= FPR132; export result; } fpcmp: ",!=T" is fpcond=27 & FPR132 & FPR232 { result:1 = FPR232 f!= FPR132; export result; } fpcmp: ",!?" is fpcond=28 { result:1 = 1; export result; } fpcmp: ",<=>" is fpcond=29 { result:1 = 1; export result; } fpcmp: ",TRUE?" is fpcond=30 { result:1 = 1; export result; } fpcmp: ",TRUE" is fpcond=31 { result:1 = 1; export result; } fpcmp64: ",FALSE?" is fpcond=0 { result:1 = 0; export result; } fpcmp64: ",false" is fpcond=1 { result:1 = 0; export result; } fpcmp64: ",?" is fpcond=2 { result:1 = 0; export result; } fpcmp64: ",!<=>;" is fpcond=3 { result:1 = 0; export result; } fpcmp64: ",=" is fpcond=4 & FPR164 & FPR264 { result:1 = FPR264 f== FPR164; export result; } fpcmp64: ",=T" is fpcond=5 & FPR164 & FPR264 { result:1 = FPR264 f== FPR164; export result; } fpcmp64: ",?=" is fpcond=6 & FPR164 & FPR264 { result:1 = FPR264 f== FPR164; export result; } fpcmp64: ",!<>" is fpcond=7 & FPR164 & FPR264 { result:1 = FPR264 f== FPR164; export result; } fpcmp64: ",!?>=" is fpcond=8 & FPR164 & FPR264 { result:1 = FPR264 f< FPR164; export result; } fpcmp64: ",<" is fpcond=9 & FPR164 & FPR264 { result:1 = FPR264 f< FPR164; export result; } fpcmp64: ",?<" is fpcond=10 & FPR164 & FPR264 { result:1 = FPR264 f< FPR164; export result; } fpcmp64: ",!>=" is fpcond=11 & FPR164 & FPR264 { result:1 = FPR264 f< FPR164; export result; } fpcmp64: ",!?>" is fpcond=12 & FPR164 & FPR264 { result:1 = FPR264 f<= FPR164; export result; } fpcmp64: ",<=" is fpcond=13 & FPR164 & FPR264 { result:1 = FPR264 f<= FPR164; export result; } fpcmp64: ",?<=" is fpcond=14 & FPR164 & FPR264 { result:1 = FPR264 f<= FPR164; export result; } fpcmp64: ",!>" is fpcond=15 & FPR164 & FPR264 { result:1 = FPR264 f<= FPR164; export result; } fpcmp64: ",!?<=" is fpcond=16 & FPR164 & FPR264 { result:1 = FPR264 f> FPR164; export result; } fpcmp64: ",>" is fpcond=17 & FPR164 & FPR264 { result:1 = FPR264 f> FPR164; export result; } fpcmp64: ",?>" is fpcond=18 & FPR164 & FPR264 { result:1 = FPR264 f> FPR164; export result; } fpcmp64: ",!<=" is fpcond=19 & FPR164 & FPR264 { result:1 = FPR264 f> FPR164; export result; } fpcmp64: ",!?<" is fpcond=20 & FPR164 & FPR264 { result:1 = FPR264 f>= FPR164; export result; } fpcmp64: ",>=" is fpcond=21 & FPR164 & FPR264 { result:1 = FPR264 f>= FPR164; export result; } fpcmp64: ",?>=" is fpcond=22 & FPR164 & FPR264 { result:1 = FPR264 f>= FPR164; export result; } fpcmp64: ",!<" is fpcond=23 & FPR164 & FPR264 { result:1 = FPR264 f>= FPR164; export result; } fpcmp64: ",!?=" is fpcond=24 & FPR164 & FPR264 { result:1 = FPR264 f!= FPR164; export result; } fpcmp64: ",<>" is fpcond=25 & FPR164 & FPR264 { result:1 = FPR264 f!= FPR164; export result; } fpcmp64: ",!=" is fpcond=26 & FPR164 & FPR264 { result:1 = FPR264 f!= FPR164; export result; } fpcmp64: ",!=T" is fpcond=27 & FPR164 & FPR264 { result:1 = FPR264 f!= FPR164; export result; } fpcmp64: ",!?" is fpcond=28 { result:1 = 1; export result; } fpcmp64: ",<=>" is fpcond=29 { result:1 = 1; export result; } fpcmp64: ",TRUE?" is fpcond=30 { result:1 = 1; export result; } fpcmp64: ",TRUE" is fpcond=31 { result:1 = 1; export result; } ################################# # space register subconstructors ################################# SR: "srN" is s = 0 { export 0:4; } SR: sr1 is s = 1 & sr1 { export sr1; } SR: sr2 is s = 2 & sr2 { export sr2; } SR: sr3 is s = 3 & sr3 { export sr3; } SR3bit: sr0 is srbit2=0 & srbit1=0 & srbit0=0 & sr0 { export sr0; } SR3bit: sr1 is srbit2=0 & srbit1=0 & srbit0=1 & sr1 { export sr1; } SR3bit: sr2 is srbit2=0 & srbit1=1 & srbit0=0 & sr2 { export sr2; } SR3bit: sr3 is srbit2=0 & srbit1=1 & srbit0=1 & sr3 { export sr3; } SR3bit: sr4 is srbit2=1 & srbit1=0 & srbit0=0 & sr4 { export sr4; } SR3bit: sr5 is srbit2=1 & srbit1=0 & srbit0=1 & sr5 { export sr5; } SR3bit: sr6 is srbit2=1 & srbit1=1 & srbit0=0 & sr6 { export sr6; } SR3bit: sr7 is srbit2=1 & srbit1=1 & srbit0=1 & sr7 { export sr7; } # These two are the cosmetic constructors for printing purposes # They print out SR,RB or else just RB if the space register is determined by the lower bits of RB # s=0 means use the least significant two bits of the address in RB as the space register selection (added to 4) SRRB: (RB) is RB & s=0 { } # s=1-3 means use SR1 - SR3 SRRB: (SR,RB) is SR & RB { } # BE uses three bits for the space register SRRB3bit: (SR3bit,RB) is SR3bit & RB { } # these are the semantic constructors dealing with space registers # this first one gets the value in the appropriate space register and returns it. # it is used by LDSID. SRVAL: SR is SR & RB & s=0 { local selbits = (RB >> 30); srreg:4 = &sr4 + 4 * selbits; spc:4 = *[register] srreg; export spc; } SRVAL: sr1 is sr1 & s=1 { export sr1; } SRVAL: sr2 is sr2 & s=2 { export sr2; } SRVAL: sr3 is sr3 & s=3 { export sr3; } # TODO This is broken until we decide on how to handle space registers and a 64 bit extended address space SPCBASE: is SRVAL & RB { # space:$(ADDRSIZE) = zext(SRVAL); off:$(ADDRSIZE) = zext(RB); # need to decide whether to remove the lower bits to hide privilege-- TODO & 0xFFFFFFFFFFFFFFFC; # address:$(ADDRSIZE) = (space << 32) | offset; # export address; export off; } ############################################### # encodings for branches ############################################### # 12 bit displacement encoded using assemble_12 displacement2W: target is w & w2 [ target = (1-(w*2)) * ( ((w2 & 0x1) << 10) | ((w2>>1) & 0x3FF) ); ] { temp:$(ADDRSIZE) = target; export temp; } #branchTarget2W: target is w & w2 [ target = inst_start + 8 + 4 * ( (w * 0xFFFFFFFFFFFFF800) | ((w2 & 0x1) << 10) | ((w2>>1) & 0x3FF) ); ] { temp:$(ADDRSIZE) = target; export temp; } # this has the space register added #branchTarget2W: target is w & w2 [ target = inst_start + 8 + 4 * ( (w * 0xFFFFFFFFFFFFF800) | ((w2 & 0x1) << 10) | ((w2>>1) & 0x3FF) ); ] { temp:$(ADDRSIZE) = target; temp = temp + (iasq_front<<32); export *:$(ADDRSIZE) temp; } ##### good before caret branchTarget2W: target is w & w2 [ target = inst_start + 8 + 4 * ( (w * 0xFFFFFFFFFFFFF800) | ((w2 & 0x1) << 10) | ((w2>>1) & 0x3FF) ); ] { export *:$(ADDRSIZE) target; } #branchTarget2W: target is w & w2 [ branchTarget2W: target is w & w2less2 & w2_2 [ # target = inst_start + 8 + 4 * ( (w * 0xFFFFFFFFFFFFF800) | ((w2 & 0x1) << 10) | ((w2>>1) & 0x3FF) ); target = inst_start + 8 + 4 * ( ((-1 * w) << 11) | (w2_2 << 10) | w2less2 ); temp32 = branchImmDest; branchImmDest = target; globalset(inst_next, branchImmDest); branchImmDest = temp32; ] { export *:$(ADDRSIZE) target; } # 17 bit displacement encoded using assemble_17 #displacement3W: target is w & w1 & w2 [ target = 4 * ( (w * 0xFFFFFFFFFFFF0000) | (w1 << 11) | ((w2 & 0x1) << 10) | ((w2>>1) & 0x3FF) ); ] { temp:$(ADDRSIZE) = target; export temp; } displacement3W: target is w & w1 & w2less2 & w2_2 [ target = 4 * ( ((-1 * w) << 16) | (w1 << 11) | (w2_2 << 10) | w2less2 );] { temp:$(ADDRSIZE) = target; export temp; } #branchTarget3W: target is w & w1 & w2 [ target = inst_start + 8 + 4 * ( (w * 0xFFFFFFFFFFFF0000) | (w1 << 11) | ((w2 & 0x1) << 10) | ((w2>>1) & 0x3FF) ); ] { temp:$(ADDRSIZE) = target; export temp; } #branchTarget3W: target is w & w1 & w2 [ target = inst_start + 8 + 4 * ( (w * 0xFFFFFFFFFFFF0000) | (w1 << 11) | ((w2 & 0x1) << 10) | ((w2>>1) & 0x3FF) ); ] { temp:$(ADDRSIZE) = target; target = target + (iasq_front << 32); export temp; } #####good before caret branchTarget3W: target is w & w1 & w2 [ target = inst_start + 8 + 4 * ( (w * 0xFFFFFFFFFFFF0000) | (w1 << 11) | ((w2 & 0x1) << 10) | ((w2>>1) & 0x3FF) ); ] { export *:$(ADDRSIZE) target; } #branchTarget3W: target is w & w1 & w2 branchTarget3W: target is w & w1 & w2less2 & w2_2 [ # target = inst_start + 8 + 4 * ( (w * 0xFFFFFFFFFFFF0000) | (w1 << 11) | ((w2 & 0x1) << 10) | ((w2>>1) & 0x3FF) ); target = inst_start + 8 + 4 * ( ((-1 * w) << 16) | (w1 << 11) | (w2_2 << 10) | w2less2 ); temp32 = branchImmDest; branchImmDest = target; globalset(inst_next, branchImmDest); branchImmDest = temp32; ] { export *:$(ADDRSIZE) target; } # simple IP relative branch IPRelativeIndexedTarget: is RX { target:$(ADDRSIZE) = inst_start + (RX<<3) + 8; branchIndDest = target; export target; } # vectored (base register plus index register) branch IndexedTarget: is RX & RB { target:$(ADDRSIZE) = (RX<<3) + RB; branchIndDest = target; export target; } # vectored (base register plus index register -- but index register is r0 so skip it since this is a return) branch ReturnTarget: is RB { branchIndDest = RB; export RB; } # generate the target address for an external branch externalTarget: displacement3W^SRRB3bit is RB & displacement3W & SRRB3bit { # spaceID:$(ADDRSIZE) = zext(SRVAL); # spaceID = spaceID << 32; # spaceID:$(ADDRSIZE) = 0; # currently ignoring the spaceID TODO FIX THIS? # target = spaceID + sext(RB) + sext(displacement3W); target:$(ADDRSIZE) = sext(displacement3W) + sext(RB); branchIndDest = target; export target; } shiftC: shift is cp [ shift=31-cp; ] { amount:4 = shift; export amount; } shiftCLen: shift is im5 [ shift=32-im5; ] { amount:4 = shift; export amount; } #lse14: offset is sim14 & bit0 [ offset = (-1 * bit0) * ( (sim14 >> 1) & 0x1FFF ); ] { temp:4 = offset; export temp; } #lse14: offset is sim14 & bit0 [ offset = (-1 * 0x2000 * bit0) | ( (sim14 >> 1) & 0x1FFF ); ] { temp:4 = sext(offset:4); export temp; } lse14: off is im14less0 & bit0 [ off = ((-1 * bit0) << 13) | im14less0; ] { temp:4 = sext(off:4); export temp; } ####lse14: offset is sim14 & bit0 [ offset = (0xFFFFFFFFFFFFE000 * bit0) | ( (sim14 >> 1) & 0x1FFF ); ] { temp:4 = offset; export temp; } #lse5: offset is sim5 & bit0 [ offset = (-1 * 0x10 * bit0) | ( (sim5 >> 1) & 0xF ); ] { temp:1 = sext(offset:1); export temp; } lse5: off is im5less0 & bit0 [ off = ((-1 * bit0) << 4) | im5less0; ] { temp:1 = sext(off:1); export temp; } #highlse5: offset is highIm5 & bit16 [ offset = (-1 * 0x10 * bit16) | ( (highIm5 >> 1) & 0xF ); ] { temp:1 = offset; export temp; } highlse5: off is highIm5less16 & bit16 [ off = ((-1 * bit16) << 4) | highIm5less16; ] { temp:1 = off; export temp; } #lse21: offset is sim21 [ offset = ( ((sim21 & 0x1) * 0xFFFFFFFFFFF00000) | ((sim21 & 0xFFE) << 8) | ((sim21 & 0xC000) >> 7) | ((sim21 & 0x1F0000) >> 14) | ((sim21 & 0x3000) >> 12) ) << 11 ; ] { temp:$(REGSIZE) = offset; export temp; } lse21: off is im21less0 & bit0 & im21_1_12 & im21_12_14 & im21_14_16 & im21_16_21 [ off = ( ((-1 * bit0) << 20) | (im21_1_12 << 9) | (im21_14_16 << 7) | (im21_16_21 << 2) | im21_12_14 ) << 11; ] { temp:$(REGSIZE) = off; export temp; } # Note for the im11 11-bit immediate, the sign is in bit 0, and the rest of the value is in bit 1 to 10. # Negative numbers are stored 2s complement, with bit0 set to 1. # # Need to set temp32 to lse11 since the value of lse11 does not propogate so well to uplevel tables, maybe a compiler bug # #lse11: immed is im11 & bit0 lse11: immed is im11less0 & bit0 [ immed = ((-1*bit0) << 10) | im11less0; temp32 = immed; ] # [ immed = (bit0 * 0xFFFFFFFFFFFFFC00) | ( (im11 >> 1) & 0x3FF ); temp32 = immed; ] { temp:4 = immed; export temp; } OFF_BASE_14: lse14^SRRB is lse14 & SRRB & SPCBASE { temp:$(ADDRSIZE) = SPCBASE + sext(lse14); export temp; } ############################# # condition codes ############################# # shift conditions. There are no inverted forms of these conditions. ShiftCond: is c=0 { export 0:1; } # never ShiftCond: is c=1 & RT { tmp:1 = (nullifyCondResult == 0) ; export tmp; } # equal ShiftCond: is c=2 & RT { tmp:1 = (((nullifyCondResult >> ($(REGSIZE)*8 - 1)) & 1) != 0) ; export tmp; } # leftmost bit is one ShiftCond: is c=3 & RT { tmp:1 = ((nullifyCondResult & 0x1) != 0) ; export tmp; } # rightmost bit is 1 (odd) ShiftCond: is c=4 { export 1:1; } # always ShiftCond: is c=5 & RT { tmp:1 = (nullifyCondResult != 0) ; export tmp; } # some bits are one ShiftCond: is c=6 & RT { tmp:1 = (((nullifyCondResult >> ($(REGSIZE)*8 - 1)) & 1) == 0) ; export tmp; } # leftmost bit is zero ShiftCond: is c=7 & RT { tmp:1 = ((nullifyCondResult & 0x1) == 0) ; export tmp; } # rightmost bit is zero (even) ShiftCondNullify: is c=0 { } ShiftCondNullify: is ShiftCond [ nullifyEnable = 1; globalset(inst_next, nullifyEnable); ] { nullifyNextCond = ShiftCond; } # deposit conditions. There are no inverted forms of these conditions. DepCond: is c=0 { export 0:1; } # never DepCond: is c=1 { tmp:1 = (nullifyCondResult == 0) ; export tmp; } # equal DepCond: is c=2 { tmp:1 = (nullifyCondResult s< 0) ; export tmp; } # leftmost bit is one (< 0) DepCond: is c=3 { tmp:1 = ((nullifyCondResult & 0x1) == 1) ; export tmp; } # rightmost bit is 1 (odd) DepCond: is c=4 { export 1:1; } # always DepCond: is c=5 { tmp:1 = (nullifyCondResult != 0) ; export tmp; } # some bits are one DepCond: is c=6 { tmp:1 = (nullifyCondResult s>= 0) ; export tmp; } # leftmost bit is zero (>=) DepCond: is c=7 { tmp:1 = ((nullifyCondResult & 0x1) == 0) ; export tmp; } # rightmost bit is zero (even) DepCondNullify: is c=0 { } DepCondNullify: is DepCond [nullifyEnable = 1; globalset(inst_next, nullifyEnable); ] { nullifyNextCond = DepCond; } # extract conditions. The extract ops target R1, not R2 like deposit does ExtrCond: is c=0 { export 0:1; } # never ExtrCond: is c=1 { tmp:1 = (nullifyCondResult == 0); export tmp; } # equal ExtrCond: is c=2 { tmp:1 = (((nullifyCondResult >> ($(REGSIZE)*8 - 1)) & 1) != 0); export tmp; } # leftmost bit is one ExtrCond: is c=3 { tmp:1 = ((nullifyCondResult & 0x1) != 0) ; export tmp; } # rightmost bit is 1 (odd) ExtrCond: is c=4 { export 1:1; } # always ExtrCond: is c=5 { tmp:1 = (nullifyCondResult != 0); export tmp; } # some bits are one ExtrCond: is c=6 { tmp:1 = (((nullifyCondResult >> ($(REGSIZE)*8 - 1)) & 1) == 0); export tmp; } # leftmost bit is zero ExtrCond: is c=7 { tmp:1 = ((nullifyCondResult & 0x1) == 0); export tmp; } # rightmost bit is zero (even) ExtrCondNullify: is c=0 { } ExtrCondNullify: is ExtrCond [ nullifyEnable = 1; globalset(inst_next, nullifyEnable); ] { nullifyNextCond = ExtrCond; } # a subset of the SED conditions used for bit tests BVBCond: is c=2 & R1 { tmp:1 = ((R1 >> (31-sar)) & 0x1) == 1 ; export tmp; } # target bit is one BVBCond: is c=6 & R1 { tmp:1 = ((R1 >> (31-sar)) & 0x1) == 0 ; export tmp; } # target bit is zero BBCond: is c=2 & R1 & bboffset { tmp:1 = ((R1 >> (31-bboffset)) & 0x1) == 1 ; export tmp; } # target bit is one BBCond: is c=6 & R1 & bboffset { tmp:1 = ((R1 >> (31-bboffset)) & 0x1) == 0 ; export tmp; } # target bit is zero # # unit conditions for checking byte ranges within a word # RegUnitCond: is c=0 { export 0:1; } # never RegUnitCond: is c=2 & RT { tmp:1 = (RT:1 == 0) || ((RT:2 & 0xFF00) == 0) || ((RT:3 & 0xFF0000) == 0) || ((RT & 0xFF000000) == 0) ; export tmp; } # some byte zero RegUnitCond: is c=3 & RT { tmp:1 = ((RT:2 & 0xFFFF) == 0) || ((RT & 0xFFFF0000) == 0) ; export tmp; } # some halfword zero RegUnitCond: is c=4 { export 0:1; } # some digit carry -- TODO FIGURE OUT BCD RegUnitCond: is c=6 { export 0:1; } # some byte carry -- TODO BCD RegUnitCond: is c=7 { export 0:1; } # some halfword carry -- TODO BCD UnitCond: "" is RegUnitCond & fv=0 { export RegUnitCond; } UnitCond: "" is RegUnitCond & fv=1 { tmp:1 = ! RegUnitCond; export tmp; } UnitCondNullify: is c=0 & fv=0 { } UnitCondNullify: is UnitCond [ nullifyEnable = 1; globalset(inst_next, nullifyEnable); ] { nullifyNextCond = UnitCond; } UnitCondSym: RegUnitCondSym is RegUnitCondSym & fv=0 { } UnitCondSym: InvUnitCondSym is InvUnitCondSym & fv=1 { } # ##### The Add Conditions from table 5-4 on page 5-5 # RegAddCond: is c = 0 { export 0:1; } # never RegAddCond: is c = 1 & R1 & R2 { tmp:1 = (R1 == R2) ; export tmp; } # equal RegAddCond: is c = 2 & R1 & R2 { tmp:1 = (R1 s< -R2) ; export tmp; } # signed less than negative of R2 RegAddCond: is c = 3 & R1 & R2 { tmp:1 = (R1 s<= R2) ; export tmp; } # signed less than or equal to R2 RegAddCond: is c = 4 & R1 & R2 { tmp:1 = ! carry(R1,R2) ; export tmp; } # unsigned sum does not overflow RegAddCond: is c = 5 & R1 & R2 { tmp:1 = (R1 + R2) == 0 ; export tmp; } # sum is zero or no overflow (why two definitions??) RegAddCond: is c = 6 & R1 & R2 { tmp:1 = scarry(R1,R2); export tmp; } # signed sum overflows RegAddCond: is c = 7 & R1 & R2 { tmp:1 = ((R1+R2) & 0x1) == 0x1 ; export tmp; } # sum is odd AddCond: is RegAddCond & fv=0 { export RegAddCond; } AddCond: is RegAddCond & fv=1 { tmp:1 = ! RegAddCond; export tmp; } AddCondNullify: is c=0 & fv=0 { } AddCondNullify: is AddCond [ nullifyEnable=1; globalset(inst_next, nullifyEnable); ] { nullifyNextCond = AddCond; } AddCondSym: RegAddCondSym is RegAddCondSym & fv=0 { } AddCondSym: InvAddCondSym is InvAddCondSym & fv=1 { } RegAddCondI: is c = 0 { export 0:1; } # never RegAddCondI: is c = 1 & highlse5 & R2 { val:$(REGSIZE) = sext(highlse5:1); tmp:1 = (val == -R2) ; export tmp; } # equal to negated RegAddCondI: is c = 2 & highlse5 & R2 { val:$(REGSIZE) = sext(highlse5:1); tmp:1 = (val s< -R2) ; export tmp; } # signed less than negated R2 RegAddCondI: is c = 3 & highlse5 & R2 { val:$(REGSIZE) = sext(highlse5:1); tmp:1 = (val s<= -R2) ; export tmp; } # signed less than or equal to R2 RegAddCondI: is c = 4 & highlse5 & R2 { val:$(REGSIZE) = sext(highlse5:1); tmp:1 = ! carry(val,R2) ; export tmp; } # unsigned sum does not overflow RegAddCondI: is c = 5 & highlse5 & R2 { val:$(REGSIZE) = sext(highlse5:1); tmp:1 = (val + R2) == 0 ; export tmp; } # sum is zero or no overflow (why two definitions??) RegAddCondI: is c = 6 & highlse5 & R2 { val:$(REGSIZE) = sext(highlse5:1); tmp:1 = scarry(val,R2); export tmp; } # signed sum overflows RegAddCondI: is c = 7 & highlse5 & R2 { val:$(REGSIZE) = sext(highlse5:1); tmp:1 = ((val+R2) & 0x1) == 0x1 ; export tmp; } # sum is odd # Some notes- # lse11 is derived from an 11-bit immediate, so we need to take 2 bytes, not 1 byte as in the original coding # R2 must sometimes be negated before the comparison, as per the manual, Table 5-4 # The arithmetic comparison must be performed on a larger size temp than the original, so that negating 0x80000000 works correctly # temp32 is used because the value of lse11 in the pcode does not flow so well to here - maybe a compiler bug? # RegAddCondI11: is c = 0 { export 0:1; } # never RegAddCondI11: is temp32 & c = 1 & lse11 & R2 { val:8 = sext(temp32:2); tmp_R2:8 = sext(R2); tmp:1 = (val == -tmp_R2) ; export tmp; } # equal RegAddCondI11: is temp32 & c = 2 & lse11 & R2 { val:8 = sext(temp32:2); tmp_R2:8 = sext(R2); tmp:1 = (val s< -tmp_R2) ; export tmp; } # signed less than negative of R2 RegAddCondI11: is temp32 & c = 3 & lse11 & R2 { val:8 = sext(temp32:2); tmp_R2:8 = sext(R2); tmp:1 = (val s<= -tmp_R2) ; export tmp; } # signed less than or equal to R2 RegAddCondI11: is temp32 & c = 4 & lse11 & R2 { val:$(REGSIZE) = sext(temp32:2); tmp:1 = ! carry(val,R2) ; export tmp; } # unsigned sum does not overflow RegAddCondI11: is temp32 & c = 5 & lse11 & R2 { # Don't need 64-bit arithmetic here val:$(REGSIZE) = sext(temp32:2); tmp:1 = (val + R2) == 0 ; export tmp; } # sum is zero or no overflow (why two definitions??) RegAddCondI11: is temp32 & c = 6 & lse11 & R2 { val:$(REGSIZE) = sext(temp32:2); tmp:1 = scarry(val,R2); export tmp; } # signed sum overflows RegAddCondI11: is temp32 & c = 7 & lse11 & R2 { val:8 = sext(temp32:2); tmp_R2:8 = sext(R2); tmp:1 = ((val+tmp_R2) & 0x1) == 0x1 ; export tmp; } # sum is odd AddCondI11: is RegAddCondI11 & fv=0 { export RegAddCondI11; } AddCondI11: is RegAddCondI11 & fv=1 { temp:1 = ! RegAddCondI11; export temp; } AddCondI11Nullify: is c=0 & fv=0 { } AddCondI11Nullify: is AddCondI11 [ nullifyEnable=1; globalset(inst_next, nullifyEnable); ] { nullifyNextCond = AddCondI11; } # #### Compare / Subtract Instructions # RegCSCond: is c=0 & R1 & R2 { export 0:1; } # never RegCSCond: is c=1 & R1 & R2 { tmp:1 = (R1 == R2) ; export tmp; } # equal RegCSCond: is c=2 & R1 & R2 { tmp:1 = (R1 s< R2) ; export tmp; } # signed less than RegCSCond: is c=3 & R1 & R2 { tmp:1 = (R1 s<= R2) ; export tmp; } # signed less than equal RegCSCond: is c=4 & R1 & R2 { tmp:1 = (R1 < R2) ; export tmp; } # unsigned less than RegCSCond: is c=5 & R1 & R2 { tmp:1 = (R1 <= R2) ; export tmp; } # unsigned less than equal RegCSCond: is c=6 & R1 & R2 { tmp:1 = sborrow(R1,R2) ; export tmp; } # signed minus overflows (borrows) RegCSCond: is c=7 & R1 & R2 { tmp:1 = ((R1 - R2) & 0x1) == 1 ; export tmp; } # odd CSCond: is fv=0 & RegCSCond { export RegCSCond; } CSCond: is fv=1 & RegCSCond { tmp:1 = ! RegCSCond; export tmp; } CSCondNullify: is c=0 & fv=0 { } CSCondNullify: is CSCond [ nullifyEnable = 1; globalset(inst_next, nullifyEnable); ] { nullifyNextCond = CSCond; } CSCondSym: RegCSCondSym is RegCSCondSym & fv=0 { } CSCondSym: InvCSCondSym is InvCSCondSym & fv=1 { } # The Compare or Subtract conditions compared with 5 bit immediates # This is used in the COMIB[TF] instructions. The inverted versions are never used. RegCSCondI: is c=0 & R2 & highlse5 { export 0:1; } # never RegCSCondI: is c=1 & R2 & highlse5 { val:$(REGSIZE) = sext(highlse5); tmp:1 = (val == R2) ; export tmp; } # equal RegCSCondI: is c=2 & R2 & highlse5 { val:$(REGSIZE) = sext(highlse5); tmp:1 = (val s< R2) ; export tmp; } # signed less than RegCSCondI: is c=3 & R2 & highlse5 { val:$(REGSIZE) = sext(highlse5); tmp:1 = (val s<= R2) ; export tmp; } # signed less than equal RegCSCondI: is c=4 & R2 & highlse5 { val:$(REGSIZE) = sext(highlse5); tmp:1 = (val < R2) ; export tmp; } # unsigned less than RegCSCondI: is c=5 & R2 & highlse5 { val:$(REGSIZE) = sext(highlse5); tmp:1 = (val <= R2) ; export tmp; } # unsigned less than equal RegCSCondI: is c=6 & R2 & highlse5 { val:$(REGSIZE) = sext(highlse5); local diff = (val - R2); tmp:1 = (val s> 0 && R2 s> 0 && diff s< 0) || (val s< 0 && R2 s< 0 && diff s> 0) ; export tmp; } # overflow RegCSCondI: is c=7 & R2 & highlse5 { val:$(REGSIZE) = sext(highlse5); tmp:1 = ((val - R2) & 0x1) == 1 ; export tmp; } # odd # The Compare or Subtract conditions compared with 11 bit immediates. These are used with the SUBI[O] and COMICLR instructions. Both regular and inverted forms are used. RegCSCondI11: is c=0 & R2 & lse11 { export 0:1; } # never RegCSCondI11: is c=1 & R2 & lse11 { val:$(REGSIZE) = sext(lse11); tmp:1 = (val == R2) ; export tmp; } # equal RegCSCondI11: is c=2 & R2 & lse11 { val:$(REGSIZE) = sext(lse11); tmp:1 = (val s< R2) ; export tmp; } # signed less than RegCSCondI11: is c=3 & R2 & lse11 { val:$(REGSIZE) = sext(lse11); tmp:1 = (val s<= R2) ; export tmp; } # signed less than equal RegCSCondI11: is c=4 & R2 & lse11 { val:$(REGSIZE) = sext(lse11); tmp:1 = (val < R2) ; export tmp; } # unsigned less than RegCSCondI11: is c=5 & R2 & lse11 { val:$(REGSIZE) = sext(lse11); tmp:1 = (val <= R2) ; export tmp; } # unsigned less than equal RegCSCondI11: is c=6 & R2 & lse11 { val:$(REGSIZE) = sext(lse11); local diff = (val - R2); tmp:1 = (val s> 0 && R2 s> 0 && diff s< 0) || (val s< 0 && R2 s< 0 && diff s> 0) ; export tmp; } # overflow RegCSCondI11: is c=7 & R2 & lse11 { val:$(REGSIZE) = sext(lse11); tmp:1 = ((val - R2) & 0x1) == 1 ; export tmp; } # odd CSCondI11: is RegCSCondI11 & fv=0 { export RegCSCondI11; } CSCondI11: is RegCSCondI11 & fv=1 { temp:1 = ! RegCSCondI11; export temp; } CSCondI11Nullify: is c=0 & fv=0 { } CSCondI11Nullify: is CSCondI11 [ nullifyEnable = 1; globalset(inst_next, nullifyEnable); ] { nullifyNextCond = CSCondI11; } # #### Logical Conditions # RegLogicCond: is c=0 { export 0:1; } # never RegLogicCond: is c=1 & RT { tmp:1 = (RT == 0) ; export tmp; } # equal, all zeros RegLogicCond: is c=2 & RT { tmp:1 = (RT & 0x80000000) != 0 ; export tmp; } # <, leftmost bit is 1 RegLogicCond: is c=3 & RT { tmp:1 = ((RT & 0x80000000) != 0) || RT == 0 ; export tmp; } # <=, leftmost bit is 1 or all bits are zero RegLogicCond: is c=7 & RT { tmp:1 = (RT & 0x1) == 0x1; export tmp; } # odd, rightmost bit is 1 LogicCond: is fv=0 & RegLogicCond { tmp:1 = RegLogicCond; export tmp; } # non-inverted cases LogicCond: is fv=1 & RegLogicCond { tmp:1 = ! RegLogicCond; export tmp; } # inverted cases LogicCondSym: RegLogicCondSym is RegLogicCondSym & fv=0 { } LogicCondSym: InvLogicCondSym is InvLogicCondSym & fv=1 { } LogicCondNullify: is c=0 & fv=0 { } LogicCondNullify: is LogicCond [ nullifyEnable = 1; globalset(inst_next, nullifyEnable); ] { nullifyNextCond = LogicCond; } ########################################## # Completers from the tables in chapter 5 ########################################## # Table 5-11 on page 5-22 # The shifted form for byte doesn't shift, as byte addressing is single byte aligned indexedByteAccessCmplt: is u=0 & m=0 & RX & SPCBASE { off:$(ADDRSIZE) = SPCBASE + sext(RX); export off; } # none indexedByteAccessCmplt: ",M" is u=0 & m=1 & RX & RB & SPCBASE { off:$(ADDRSIZE) = SPCBASE; RB = RB + RX; export off; } # M, modify, post inc by RX indexedByteAccessCmplt: ",S" is u=1 & m=0 & RX & SPCBASE { off:$(ADDRSIZE) = SPCBASE + sext(RX); export off; } # S, shift left by 2 indexedByteAccessCmplt: ",SM" is u=1 & m=1 & RX & RB & SPCBASE { off:$(ADDRSIZE) = SPCBASE; RB = RB + sext(RX); export off; } # SM, shift and modify # The shifted form for halfword shifts by 2 bytes, since that is the size of a halfword indexedHalfwordAccessCmplt: is u=0 & m=0 & RX & SPCBASE { off:$(ADDRSIZE) = SPCBASE + sext(RX); export off; } # none indexedHalfwordAccessCmplt: ",M" is u=0 & m=1 & RX & RB & SPCBASE { off:$(ADDRSIZE) = SPCBASE; RB = RB + RX; export off; } # M, modify, post inc by RX indexedHalfwordAccessCmplt: ",S" is u=1 & m=0 & RX & SPCBASE { off:$(ADDRSIZE) = SPCBASE + sext((RX << 1)); export off; } # S, shift left by 1 indexedHalfwordAccessCmplt: ",SM" is u=1 & m=1 & RX & RB & SPCBASE { off:$(ADDRSIZE) = SPCBASE; RB = RB + sext(RX << 1); export off; } # SM, shift and modify # The shifted form for words shifts by 2 (x4), since words are aligned on 4 and increment by 4 indexedWordAccessCmplt: is u=0 & m=0 & RX & SPCBASE { off:$(ADDRSIZE) = SPCBASE + sext(RX); export off; } # none indexedWordAccessCmplt: ",M" is u=0 & m=1 & RX & RB & SPCBASE { off:$(ADDRSIZE) = SPCBASE; RB = RB + RX; export off; } # M, modify, post inc by RX indexedWordAccessCmplt: ",S" is u=1 & m=0 & RX & SPCBASE { off:$(ADDRSIZE) = SPCBASE + sext((RX << 2)); export off; } # S, shift left by 2 indexedWordAccessCmplt: ",SM" is u=1 & m=1 & RX & RB & SPCBASE { off:$(ADDRSIZE) = SPCBASE; RB = RB + sext(RX << 2); export off; } # SM, shift and modify # same as above, but shifts by 3 bits. Used for the LDCWX instruction indexedDoublewordAccessCmplt: is u=0 & m=0 & RX & SPCBASE { off:$(ADDRSIZE) = SPCBASE + sext(RX); export off; } # none indexedDoublewordAccessCmplt: ",M" is u=0 & m=1 & RX & RB & SPCBASE { off:$(ADDRSIZE) = SPCBASE; RB = RB + RX; export off; } # M, modify, post inc by RX indexedDoublewordAccessCmplt: ",S" is u=1 & m=0 & RX & SPCBASE { off:$(ADDRSIZE) = SPCBASE + sext((RX << 3)); export off; } # S, shift left by 3 NOTE YES THIS IS 3 indexedDoublewordAccessCmplt: ",SM" is u=1 & m=1 & RX & RB & SPCBASE { off:$(ADDRSIZE) = SPCBASE; RB = RB + sext(RX << 3); export off; } # SM, shift and modify # Table 5-12 on Page 5-24 # these are for loads, e.g. ldws shortDispCmplt: is m=0 & highlse5 & SPCBASE { off:$(ADDRSIZE) = SPCBASE + sext(highlse5); export off; } # no modification shortDispCmplt: ",MA" is u=0 & m=1 & RB & RX & SPCBASE & highlse5 { off:$(ADDRSIZE) = SPCBASE; RB = RB + sext(highlse5); export off; } # modify after shortDispCmplt: ",MB" is u=1 & m=1 & RB & RX & SPCBASE & highlse5 { local lse = sext(highlse5); off:$(ADDRSIZE) = SPCBASE + sext(lse); RB = RB + lse; export off; } # modify before # short displacement for stores, e.g. stws storeShortDispCmplt: is m=0 & lse5 & SPCBASE { off:$(ADDRSIZE) = SPCBASE + sext(lse5); export off; } # no modification storeShortDispCmplt: ",MA" is u=0 & m=1 & RB & RX & SPCBASE & lse5 { off:$(ADDRSIZE) = SPCBASE; RB = RB + sext(lse5); export off; } # modify after storeShortDispCmplt: ",MB" is u=1 & m=1 & RB & RX & SPCBASE & lse5 { local lse = sext(lse5); off:$(ADDRSIZE) = SPCBASE + sext(lse); RB = RB + lse; export off; } # modify before # Table 5-13 on page 5-26 storeBytesShortCmplt: is u=0 & m=0 & SPCBASE & lse5 { off:$(ADDRSIZE) = SPCBASE + sext(lse5); export off; } # none / beginning, don't modify base register storeBytesShortCmplt: ",BM" is u=0 & m=1 & SPCBASE & RR & lse5 { off:$(ADDRSIZE) = SPCBASE + sext(lse5); RR = (RR + sext(lse5)) & 0xFFFFFFFC; export off; } # beginning, modify base register storeBytesShortCmplt: ",E" is u=1 & m=0 & SPCBASE & lse5 { off:$(ADDRSIZE) = SPCBASE + sext(lse5); export off; } # ending, don't modify storeBytesShortCmplt: ",EM" is u=1 & m=1 & SPCBASE & RR & lse5 { off:$(ADDRSIZE) = SPCBASE + sext(lse5); RR = (RR + sext(lse5)) & 0xFFFFFFFC; export off; } # ending, modify # u fixed at 0, Table 5-11 on page 5-22, for LPA and related system level opcodes sysCmplt: is m=0 { } sysCmplt: ",M" is m=1 { } # Table 5-8 on page 5-17 loadCC: is cc=0 { } loadCC: ",SL" is cc=2 { } # Table 5-9 on page 5-18 storeCC: is cc=0 { } storeCC: ",BC" is cc=1 { } storeCC: ",SL" is cc=2 { } # Table 5-10 on page 5-18 loadClearCC: is cc=0 { } loadClearCC: ",CO" is cc=1 { } # nullification as used with branches #####nullifyForBranch: is n=0 { export 0:1; } #####nullifyForBranch: ",N" is n=1 { export 1:1; } # caret versions nullifyForBranch: is n=0 { export 0:1; } nullifyForBranch: ",N" is n=1 [ nullifyEnable = 1; globalset(inst_next, nullifyEnable); ] { export 1:1; } nullifySymForBranch: is n=0 { } nullifySymForBranch: ",N" is n=1 { } # nullification as used with special function unit ops nullifyForSpecial: is spn=0 { } nullifyForSpecial: ",N" is spn=1 { } # Floating point completers #csize: "SGL" is fpsize=0 & opfam=0x0C { } #csize: "DBL" is fpsize=1 & opfam=0x0C { } #csize: "QUAD" is fpsize=3 & opfam=0x0C { } #esize: "SGL" is fpsize=0 & opfam=0x0E { } #esize: "DBL" is fpsize=1 & opfam=0x0E { } SFU: sfu is sfu { } ================================================ FILE: pypcode/processors/PA-RISC/data/languages/pa-risc32.cspec ================================================ ================================================ FILE: pypcode/processors/PA-RISC/data/languages/pa-risc32.pspec ================================================ ================================================ FILE: pypcode/processors/PA-RISC/data/languages/pa-risc32be.slaspec ================================================ # SLA specification file for PA-RISC 32 bit big endian @define ENDIAN "big" @define BITS "32" @include "pa-risc.sinc" @include "pa-riscInstructions.sinc" ================================================ FILE: pypcode/processors/PA-RISC/data/languages/pa-riscInstructions.sinc ================================================ ############################ # # PA-RISC 1.1 Instructions # ############################ with : phase=1 { ############################ # Load and Store and Address Generation Instructions ############################ :LDW OFF_BASE_14,R1dst is opfam=0x12 & OFF_BASE_14 & R1dst { addr:$(ADDRSIZE) = OFF_BASE_14; R1dst = zext(*[ram]:4 addr) ; } :LDH OFF_BASE_14,R1dst is opfam=0x11 & OFF_BASE_14 & R1dst { addr:$(ADDRSIZE) = OFF_BASE_14; R1dst = zext(*[ram]:2 addr) ; } :LDB OFF_BASE_14,R1dst is opfam=0x10 & OFF_BASE_14 & R1dst { addr:$(ADDRSIZE) = OFF_BASE_14; R1dst = zext(*[ram]:1 addr) ; } :STW R1,OFF_BASE_14 is opfam=0x1A & OFF_BASE_14 & R1 { addr:$(ADDRSIZE) = OFF_BASE_14; *[ram]:4 addr = R1:4; } :STH R1,OFF_BASE_14 is opfam=0x19 & R1 & OFF_BASE_14 { addr:$(ADDRSIZE) = OFF_BASE_14; *[ram]:2 addr = R1:2; } :STB R1,OFF_BASE_14 is opfam=0x18 & OFF_BASE_14 & R1 { addr:$(ADDRSIZE) = OFF_BASE_14; *[ram]:1 addr = R1:1; } :LDW^",M" OFF_BASE_14,R1dst is opfam=0x13 & OFF_BASE_14 & R1dst & RB & SPCBASE & lse14 { off:$(ADDRSIZE) = 0; if (lse14 s>= 0x0) goto ; off = sext(lse14); addr:$(ADDRSIZE) = SPCBASE + off; RB = RB + lse14; R1dst = zext( *[ram]:4 addr ); } :STW^",M" R1,OFF_BASE_14 is opfam=0x1B & OFF_BASE_14 & R1 & RB & SPCBASE & lse14 { addr:$(ADDRSIZE) = SPCBASE; local imm = sext(lse14); if (imm s>= 0x0) goto ; addr = addr + imm; *[ram]:4 addr = R1; RB = RB + imm; } # LDWX :LDW^indexedWordAccessCmplt^loadCC RX^SRRB,RT is opfam=0x03 & subop=2 & zero=0 & indexedWordAccessCmplt & loadCC & RX & RT & SRRB { address:$(ADDRSIZE) = indexedWordAccessCmplt; RT = zext(*[ram]:4 address) ; } #LDHX :LDH^indexedHalfwordAccessCmplt^loadCC RX^SRRB,RT is opfam=0x03 & subop=1 & zero=0 & indexedHalfwordAccessCmplt & loadCC & RX & SRRB & RT { address:$(ADDRSIZE) = indexedHalfwordAccessCmplt; RT = zext(*[ram]:2 address) ; } #LDBX :LDB^indexedByteAccessCmplt^loadCC RX^SRRB,RT is opfam=0x03 & subop=0 & zero=0 & indexedByteAccessCmplt & loadCC & RX & SRRB & RT { address:$(ADDRSIZE) = indexedByteAccessCmplt; RT = zext(*[ram]:1 address) ; } # same as LDWX, except that it checks privilege levels :LDWAX^indexedWordAccessCmplt^loadCC RX(RB),RT is opfam=0x03 & subop=6 & zero=0 & s=0 & indexedWordAccessCmplt & loadCC & RX & SRRB & RB & RT { address:$(ADDRSIZE) = indexedWordAccessCmplt; RT = zext(*[ram]:4 address) ; } :LDCWX^indexedDoublewordAccessCmplt^loadClearCC RX^SRRB,RT is opfam=0x03 & subop=7 & zero=0 & indexedDoublewordAccessCmplt & loadClearCC & RX & SRRB & RT { address:$(ADDRSIZE) = indexedDoublewordAccessCmplt; RT = zext(*[ram]:4 address); *[ram]:4 address = 0:4; } # LDWS :LDW^shortDispCmplt^loadCC highlse5^SRRB,RT is opfam=0x03 & subop=2 & one=1 & shortDispCmplt & loadCC & highlse5 & SRRB & RT { addr:$(ADDRSIZE) = shortDispCmplt; RT = zext(*[ram]:4 addr) ; } # LDHS :LDH^shortDispCmplt^loadCC highlse5^SRRB,RT is opfam=0x03 & subop=1 & one=1 & shortDispCmplt & loadCC & highlse5 & SRRB & RT { addr:$(ADDRSIZE) = shortDispCmplt; RT = zext(*[ram]:2 addr) ; } # LDBS :LDB^shortDispCmplt^loadCC highlse5^SRRB,RT is opfam=0x03 & subop=0 & one=1 & shortDispCmplt & loadCC & highlse5 & SRRB & RT { addr:$(ADDRSIZE) = shortDispCmplt; RT = zext(*[ram]:1 addr) ; } #STWS :STW^storeShortDispCmplt^storeCC RR,lse5^SRRB is opfam=0x03 & subop=0xA & one=1 & storeShortDispCmplt & storeCC & lse5 & SRRB & RR { addr:$(ADDRSIZE) = storeShortDispCmplt; *[ram]:4 addr = RR:4; } #STHS :STH^storeShortDispCmplt^storeCC RR,lse5^SRRB is opfam=0x03 & subop=0x9 & one=1 & storeShortDispCmplt & storeCC & lse5 & SRRB & RR { addr:$(ADDRSIZE) = storeShortDispCmplt; *[ram]:2 addr = RR:2; } #STBS :STB^storeShortDispCmplt^storeCC RR,lse5^SRRB is opfam=0x03 & subop=0x8 & one=1 & storeShortDispCmplt & storeCC & lse5 & SRRB & RR { addr:$(ADDRSIZE) = storeShortDispCmplt; *[ram]:1 addr = RR:1; } :LDWAS^shortDispCmplt^loadCC highlse5(RB),RT is opfam=0x03 & subop=6 & one=1 & shortDispCmplt & loadCC & highlse5 & RB & RT { addr:$(ADDRSIZE) = shortDispCmplt; RT = zext(*[ram]:4 addr) ; } :LDCWS^shortDispCmplt^loadClearCC highlse5^SRRB,RT is opfam=0x03 & subop=7 & one=1 & shortDispCmplt & loadClearCC & highlse5 & SRRB & RT { address:$(ADDRSIZE) = shortDispCmplt; RT = zext(*:4 address); *[ram]:4 address = 0:4; } :STWAS^shortDispCmplt^storeCC RR,lse5(RB) is opfam=0x03 & subop=0xE & one=1 & shortDispCmplt & storeCC & lse5 & RB & RR { addr:$(ADDRSIZE) = shortDispCmplt; *[ram] :4 addr = RR:4; } # This is the "begin" version for big endian. I am not doing the little endian version. :STBYS^storeBytesShortCmplt^storeCC RR,lse5^SRRB is opfam=0x03 & subop=0xC & one=1 & storeBytesShortCmplt & storeCC & lse5 & SRRB & RR & u=0 { # get the address, which is most likely not word aligned addr:$(ADDRSIZE) = storeBytesShortCmplt; # figure out how many bytes need to be written local numBytes = 4 - (addr & 0x3); # this is the address where we stop (one byte past the last address to which we write) local finalAddr = addr + numBytes; # copy the contents of RR data:$(REGSIZE) = RR >> ((4 - numBytes) * 8); # use a for loop to write out the 1,2,3, or 4 bytes if (addr == finalAddr) goto ; *[ram]:1 addr = data:1; data = data >> 8; addr = addr + 1; goto ; } # This is the "end" version for big endian. I am not doing the little endian version. :STBYS^storeBytesShortCmplt^storeCC RR,lse5^SRRB is opfam=0x03 & subop=0xC & one=1 & storeBytesShortCmplt & storeCC & lse5 & SRRB & RR & u=1 { # get the address, which is most likely not word aligned addr:$(ADDRSIZE) = storeBytesShortCmplt; # figure out how many bytes need to be written local numBytes = (addr & 0x3); # now make a word aligned address addr = addr & 0x3; # this is the address where we stop (one byte past the last address to which we write) local finalAddr = addr + numBytes; # copy the contents of RR data:$(REGSIZE) = RR >> ((4 - numBytes) * 8); # use a for loop to write out the 1,2,3, or 4 bytes if (addr == finalAddr) goto ; *[ram]:1 addr = data:1; data = data >> 8; addr = addr + 1; goto ; } # Note: LDO and LDIL do not access memory, they just load an address into a register # LDI is a pseudo-op that is commonly used to load immediate values into registers. The values may or may not be addresses. :LDO lse14(RB),R1dst is opfam=0x0D & lse14 & RB & R1dst { R1dst = RB + lse14; } # LDO is often used as a copy operator, so when the offset is zero, we display it as a copy :COPY RB,R1dst is opfam=0x0D & im14=0 & RB & R1dst { R1dst = RB; } :LDI lse14,R1dst is opfam=0x0D & b=0 & lse14 & R1dst { R1dst = lse14; } :LDIL lse21,R2 is opfam=0x08 & R2 & lse21 { R2 = lse21; } # this instruction adds lse21 + R2 and puts the result into the hard coded r1 register # (not the register specified by bitfield reg1, the actual register r1) :ADDIL lse21, R2, r1 is opfam=0x0A & R2 & lse21 & r1 { r1 = R2 + lse21; } ################################################################################ # Branch Instructions ################################################################################ ################################################################################ # unconditional immediate calls ################################################################################ # non-nullifying unconditional immediate call :B^",L"^nullifyForBranch branchTarget3W,R2dst is opfam=0x3A & c=0x0 & R2dst & nullifyForBranch & n=0 & branchTarget3W & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 1; # unconditional imm call globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { R2dst = inst_start+8; branchExecuted = 1; } # nullifying unconditional immediate call # special case that doesn't need the deferral mechanism :B^",L"^nullifySymForBranch branchTarget3W,R2dst is opfam=0x3A & c=0x0 & R2dst & nullifySymForBranch & n=1 & branchTarget3W & $(COMMON) { R2dst = inst_start+8; call branchTarget3W; } ################################################################################ # unconditional immediate branches # since PA-RISC doesn't have a straight branch without link, it uses branch and link into the R0 register. # By having a second op for it, we can use a goto and prevent analysis tools from thinking this is a legitimate subroutine call ################################################################################ # non-nullifying unconditional immediate branch (no link) :B^nullifyForBranch branchTarget3W is opfam=0x3A & c=0x0 & reg2=0 & nullifyForBranch & n=0 & branchTarget3W & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 0; # unconditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchExecuted = 1; } # nullifying unconditional immediate branch (no link) # This is a special case, as we just do the branch and don't use # our defered branching mechanism :B^nullifySymForBranch branchTarget3W is opfam=0x3A & c=0x0 & reg2=0 & nullifySymForBranch & n=1 & branchTarget3W & $(COMMON) { goto branchTarget3W; } ################################################################################ # conditional immediate branches - comparison with registers ################################################################################ :CMPBT^RegCSCondSym^nullifyForBranch R1,R2,branchTarget2W is opfam=0x20 & R1 & R2 & nullifyForBranch & n=0 & branchTarget2W & RegCSCondSym & RegCSCond & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchCond = RegCSCond; branchExecuted = 1; } :CMPBT^RegCSCondSym^nullifyForBranch R1,R2,branchTarget2W is opfam=0x20 & R1 & R2 & nullifyForBranch & n=1 & branchTarget2W & displacement2W & RegCSCondSym & RegCSCond & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchCond = RegCSCond; branchExecuted = 1; nullifyNextCond = ( ! branchCond && (displacement2W s< 0)) || ( branchCond && (displacement2W s>= 0) ); } :CMPBF^RegCSCondSym^nullifyForBranch R1,R2,branchTarget2W is opfam=0x22 & R1 & R2 & nullifyForBranch & n=0 & branchTarget2W & RegCSCondSym & RegCSCond & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchCond = ! RegCSCond; branchExecuted = 1; } :CMPBF^RegCSCondSym^nullifyForBranch R1,R2,branchTarget2W is opfam=0x22 & R1 & R2 & nullifyForBranch & n=1 & branchTarget2W & displacement2W & RegCSCondSym & RegCSCond & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchCond = ! RegCSCond; branchExecuted = 1; nullifyNextCond = ( ! branchCond && (displacement2W s< 0)) || ( branchCond && (displacement2W s>= 0) ); } ################################################################################ # conditional immediate branches -- comparison with immediates ################################################################################ :CMPIBT^RegCSCondSym^nullifyForBranch highlse5,R2,branchTarget2W is opfam=0x21 & highlse5 & R2 & nullifyForBranch & branchTarget2W & RegCSCondSym & RegCSCondI & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchCond = RegCSCondI; branchExecuted = 1; } :CMPIBT^RegCSCondSym^nullifyForBranch highlse5,R2,branchTarget2W is opfam=0x21 & highlse5 & R2 & nullifyForBranch & branchTarget2W & displacement2W & RegCSCondSym & RegCSCondI & n=1 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchCond = RegCSCondI; branchExecuted = 1; nullifyNextCond = ( ! branchCond && (displacement2W s< 0)) || ( branchCond && (displacement2W s>= 0) ); } :CMPIBF^RegCSCondSym^nullifyForBranch highlse5,R2,branchTarget2W is opfam=0x23 & highlse5 & R2 & nullifyForBranch & branchTarget2W & RegCSCondSym & RegCSCondI & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchCond = !RegCSCondI; branchExecuted = 1; } :CMPIBF^RegCSCondSym^nullifyForBranch highlse5,R2,branchTarget2W is opfam=0x23 & highlse5 & R2 & nullifyForBranch & branchTarget2W & displacement2W & RegCSCondSym & RegCSCondI & n=1 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchCond = !RegCSCondI; branchExecuted = 1; nullifyNextCond = ( ! branchCond && (displacement2W s< 0)) || ( branchCond && (displacement2W s>= 0) ); } ################################################################################ # unconditional indirect calls & branches ################################################################################ # IP relative unconditional indirect call :BLR^nullifyForBranch RR,R2dst is opfam=0x3A & c=0x2 & R2dst & nullifyForBranch & RR & IPRelativeIndexedTarget & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 5; # unconditional indirect call globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { R2dst = inst_start+8; branchExecuted = 1; } # IP relative unconditional indirect call :BLR^nullifySymForBranch RR,R2dst is opfam=0x3A & c=0x2 & R2dst & nullifySymForBranch & RR & IPRelativeIndexedTarget & n=1 & $(COMMON) { R2dst = inst_start+8; call [branchIndDest]; } # vectored (offset register plus index register) unconditional indirect branch :BV^nullifyForBranch RX(RB) is opfam=0x3A & c=0x6 & RB & nullifyForBranch & RX & IndexedTarget & bit0=0 & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 4; # unconditional indirect branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchExecuted = 1; } # vectored (offset register plus index register) unconditional indirect branch :BV^nullifySymForBranch RX(RB) is opfam=0x3A & c=0x6 & RB & nullifySymForBranch & RX & IndexedTarget & bit0=0 & n=1 & $(COMMON) { goto [branchIndDest]; } # this is the idiom for return from subroutine # currently we pull it out so we don't print out the R0, but we could modify this to do a return... :BV^nullifyForBranch (RB) is opfam=0x3A & c=0x6 & RB & nullifyForBranch & reg1=0 & ReturnTarget & bit0=0 & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 4; # unconditional indirect branch globalset(inst_next, branchType); branchIsReturn=1; globalset(inst_next, branchIsReturn); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchExecuted = 1; } # this is the idiom for return from subroutine # currently we pull it out so we don't print out the R0, but we could modify this to do a return... :BV^nullifyForBranch (RB) is opfam=0x3A & c=0x6 & RB & nullifyForBranch & reg1=0 & ReturnTarget & bit0=0 & n=1 & $(COMMON) { return [branchIndDest]; } ########################### #### MOVB :MOVB^SEDCondSym^nullifyForBranch R1,R2dst,branchTarget2W is opfam=0x32 & R2dst & R1 & nullifyForBranch & branchTarget2W & DepCond & SEDCondSym & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { R2dst = R1; nullifyCondResult = R1; build DepCond; # force the condition evaluation after the move branchCond = DepCond; branchExecuted = 1; } :MOVB^SEDCondSym^nullifyForBranch R1,R2dst,branchTarget2W is opfam=0x32 & R2dst & R1 & nullifyForBranch & branchTarget2W & displacement2W & DepCond & SEDCondSym & n=1 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { R2dst = R1; nullifyCondResult = R1; build DepCond; # force the condition evaluation after the move branchCond = DepCond; branchExecuted = 1; nullifyNextCond = ( ! branchCond && (displacement2W s< 0)) || ( branchCond && (displacement2W s>= 0) ); } ########################### #### MOVIB :MOVIB^SEDCondSym^nullifyForBranch im5,R2dst,branchTarget2W is opfam=0x33 & R2dst & im5 & nullifyForBranch & branchTarget2W & DepCond & SEDCondSym & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { R2dst = sext(im5:1); nullifyCondResult = sext(im5:1); build DepCond; # force the condition evaluation after the move branchExecuted = 1; branchCond = DepCond; } :MOVIB^SEDCondSym^nullifyForBranch im5,R2dst,branchTarget2W is opfam=0x33 & R2dst & im5 & nullifyForBranch & branchTarget2W & displacement2W & DepCond & SEDCondSym & n=1 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { R2dst = sext(im5:1); nullifyCondResult = sext(im5:1); build DepCond; # force the condition evaluation after the move branchCond = DepCond; branchExecuted = 1; nullifyNextCond = ( ! branchCond && (displacement2W s< 0)) || ( branchCond && (displacement2W s>= 0) ); } ########################### :ADDBT^RegAddCondSym^nullifyForBranch R1,R2dst,branchTarget2W is opfam=0x28 & R1 & R2dst & R2 & nullifyForBranch & branchTarget2W & RegAddCond & RegAddCondSym & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { build RegAddCond; # force the condition evaluation before the move branchCond = RegAddCond; branchExecuted = 1; R2dst = R1 + R2; } :ADDBT^RegAddCondSym^nullifyForBranch R1,R2dst,branchTarget2W is opfam=0x28 & R1 & R2dst & R2 & nullifyForBranch & branchTarget2W & displacement2W & RegAddCond & RegAddCondSym & n=1 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { build RegAddCond; # force the condition evaluation before the move branchCond = RegAddCond; branchExecuted = 1; nullifyNextCond = ( ! branchCond && (displacement2W s< 0)) || ( branchCond && (displacement2W s>= 0) ); R2dst = R1 + R2; } ########################### :ADDBF^RegAddCondSym^nullifyForBranch R1,R2dst,branchTarget2W is opfam=0x2A & R1 & R2dst & R2 & nullifyForBranch & branchTarget2W & RegAddCond & RegAddCondSym & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { build RegAddCond; # force the condition evaluation before the move branchCond = !RegAddCond; branchExecuted = 1; R2dst = R1 + R2; } :ADDBF^RegAddCondSym^nullifyForBranch R1,R2dst,branchTarget2W is opfam=0x2A & R1 & R2 & R2dst & nullifyForBranch & branchTarget2W & displacement2W & RegAddCondSym & RegAddCond & n=1 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { build RegAddCond; # force the condition evaluation before the move branchCond = !RegAddCond; branchExecuted = 1; nullifyNextCond = ( ! branchCond && (displacement2W s< 0)) || ( branchCond && (displacement2W s>= 0) ); R2dst = R1 + R2; } ########################### :ADDIBT^RegAddCondSym^nullifyForBranch highlse5,R2dst,branchTarget2W is opfam=0x29 & highlse5 & R2 & R2dst & nullifyForBranch & branchTarget2W & RegAddCondI & RegAddCondSym & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { build RegAddCondI; # force the condition evaluation before the move branchCond = RegAddCondI; branchExecuted = 1; R2dst = R2 + sext(highlse5); } :ADDIBT^RegAddCondSym^nullifyForBranch highlse5,R2dst,branchTarget2W is opfam=0x29 & highlse5 & R2 & R2dst & nullifyForBranch & branchTarget2W & displacement2W & RegAddCondI & RegAddCondSym & n=1 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { build RegAddCondI; # force the condition evaluation before the move branchCond = RegAddCondI; branchExecuted = 1; nullifyNextCond = ( ! branchCond && (displacement2W s< 0)) || ( branchCond && (displacement2W s>= 0) ); R2dst = R2 + sext(highlse5); } :ADDIBF^RegAddCondSym^nullifyForBranch highlse5,R2dst,branchTarget2W is opfam=0x2B & highlse5 & R2 & R2dst & nullifyForBranch & branchTarget2W & RegAddCondI & RegAddCondSym & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { build RegAddCondI; # force the condition evaluation before the move branchCond = ! RegAddCondI; branchExecuted = 1; R2dst = R2 + sext(highlse5); } :ADDIBF^RegAddCondSym^nullifyForBranch highlse5,R2dst,branchTarget2W is opfam=0x2B & highlse5 & R2 & R2dst & nullifyForBranch & branchTarget2W & displacement2W & RegAddCondI & RegAddCondSym & n=1 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { build RegAddCondI; # force the condition evaluation before the move branchCond = ! RegAddCondI; branchExecuted = 1; nullifyNextCond = ( ! branchCond && (displacement2W s< 0)) || ( branchCond && (displacement2W s>= 0) ); R2dst = R2 + sext(highlse5); } ####################################### :BB^SEDCondSym^nullifyForBranch R1,SAR,branchTarget2W is opfam=0x30 & branchTarget2W & nullifyForBranch & R1 & BVBCond & SEDCondSym & SAR & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchCond = BVBCond; branchExecuted = 1; } :BB^SEDCondSym^nullifyForBranch R1,SAR,branchTarget2W is opfam=0x30 & branchTarget2W & nullifyForBranch & displacement2W & R1 & BVBCond & SEDCondSym & SAR & n=1 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchCond = BVBCond; branchExecuted = 1; nullifyNextCond = ( ! branchCond && (displacement2W s< 0)) || ( branchCond && (displacement2W s>= 0) ); } ####################################### :BB^SEDCondSym^nullifyForBranch R1,bboffset,branchTarget2W is opfam=0x31 & branchTarget2W & nullifyForBranch & R1 & BBCond & SEDCondSym & n=0 & bboffset & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchCond = BBCond; branchExecuted = 1; } :BB^SEDCondSym^nullifyForBranch R1,bboffset,branchTarget2W is opfam=0x31 & branchTarget2W & displacement2W & nullifyForBranch & R1 & BBCond & SEDCondSym & n=1 & bboffset & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 2; # conditional imm branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { branchCond = BBCond; branchExecuted = 1; nullifyNextCond = ( ! branchCond && (displacement2W s< 0)) || ( branchCond && (displacement2W s>= 0) ); } ####################################### # These instructions change the privilege level or the space register define pcodeop changePrivLevel; define pcodeop changeSpace; define pcodeop getCurrentSpace; :BE^nullifyForBranch externalTarget is opfam=0x38 & nullifyForBranch & externalTarget & SR3bit & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 4; # unconditional indirect branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { iasq_front = SR3bit; # set the space ID to the new space ID # with the current deferral system, this will mean the sr is wrong during the delay slot branchExecuted = 1; } :BE^nullifyForBranch externalTarget is opfam=0x38 & nullifyForBranch & externalTarget & SR3bit & n=1 & $(COMMON) { iasq_front = SR3bit; # set the space ID to the new space ID goto [externalTarget]; } :BE^",L"^nullifyForBranch externalTarget,SR0,R31 is opfam=0x39 & nullifyForBranch & SR0 & R31 & SR3bit & externalTarget & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 5; # unconditional indirect call globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { r31 = inst_next+4; # store the link/return address sr0 = iasq_front; # store the link/return space ID iasq_front = SR3bit; # set the space ID to the new space ID # with the current deferral system, this will mean the sr is wrong during the delay slot branchExecuted = 1; } :BE^",L"^nullifyForBranch externalTarget,SR0,R31 is opfam=0x39 & nullifyForBranch & SR0 & R31 & SR3bit & externalTarget & n=1 & $(COMMON) { r31 = inst_next+4; # store the link/return address sr0 = iasq_front; # store the link/return space ID iasq_front = SR3bit; # set the space ID to the new space ID call [externalTarget]; } :B^",GATE"^nullifyForBranch branchTarget3W,R2dst is opfam=0x3A & c=0x1 & R2dst & nullifyForBranch & branchTarget3W & n=0 & $(COMMON) [ branchEnable = 1; globalset(inst_next, branchEnable); branchType = 0; # unconditional immediate branch globalset(inst_next, branchType); branchCouldBeNullified = nullifyEnable; globalset(inst_next, branchCouldBeNullified); ] { R2dst = changePrivLevel(); branchExecuted = 1; } :B^",GATE"^nullifyForBranch branchTarget3W,R2dst is opfam=0x3A & c=0x1 & R2dst & nullifyForBranch & branchTarget3W & n=1 & $(COMMON) { R2dst = changePrivLevel(); goto [branchTarget3W]; } ####################################### ####################################### # General Arithmetic/Logic Instructions ####################################### ####################################### define pcodeop trap; :ADD^AddCondSym R1,R2,RT is opfam=0x02 & op=0x18 & m=0 & R1 & R2 & RT & AddCondNullify & AddCondSym { pswCB = carry(R1,R2); RT = R1 + R2; build AddCondNullify; } # this version intentionally does not set the carry bits :ADD^",L"^AddCondSym R1,R2,RT is opfam=0x02 & op=0x28 & m=0 & R1 & R2 & RT & AddCondNullify & AddCondSym { RT = R1 + R2; build AddCondNullify; } :ADDO^AddCondSym R1,R2,RT is opfam=0x02 & op=0x38 & m=0 & R1 & R2 & RT & AddCondSym & AddCondNullify { if (scarry(R1,R2)) goto ; pswCB = carry(R1,R2); RT = R1 + R2; goto ; trap(); build AddCondNullify; } :ADD^",C"^AddCondSym R1,R2,RT is opfam=0x02 & op=0x1C & m=0 & R1 & R2 & RT & AddCondSym & AddCondNullify { local partialSum = R2 + zext(pswCB); pswCB = carry(partialSum, R1); RT = partialSum + R1; build AddCondNullify; } # TODO This may need some work :ADD^",CO"^AddCondSym R1,R2,RT is opfam=0x02 & op=0x3C & m=0 & R1 & R2 & RT & AddCondSym & AddCondNullify { local partialSum = R2 + zext(pswCB); partialCarry:1 = carry(R2, zext(pswCB)); partialOverflow:1 = scarry(R2, zext(pswCB)); takeTrap:1 = partialOverflow == 0x1:1; if (takeTrap) goto ; RT = partialSum + R1; finalCarry:1 = carry(partialSum, R1); finalOverflow:1 = scarry(partialSum, R1); takeTrap = finalOverflow == 0x1:1; if (takeTrap) goto ; goto ; trap(); pswCB = partialCarry ^ finalCarry; build AddCondNullify; } :SH1ADD^AddCondSym R1,R2,RT is opfam=0x02 & op=0x19 & m=0 & R1 & R2 & RT & AddCond & AddCondSym & AddCondNullify { local shiftedR1 = R1 << 1; pswCB = carry(shiftedR1,R2); RT = shiftedR1 + R2; build AddCondNullify; } :SH1ADDL^AddCondSym R1,R2,RT is opfam=0x02 & op=0x29 & m=0 & R1 & R2 & RT & AddCondNullify & AddCondSym { local shiftedR1 = R1 << 1; RT = shiftedR1 + R2; build AddCondNullify; } :SH1ADDO^SEDCondSym R1,R2,RT is opfam=0x02 & op=0x39 & m=0 & R1 & R2 & RT & AddCondNullify & SEDCondSym { local shiftedR1 = R1 << 1; if (scarry(shiftedR1, R2)) goto ; pswCB = carry(shiftedR1,R2); RT = shiftedR1 + R2; goto ; trap(); build AddCondNullify; } :SH2ADD^SEDCondSym R1,R2,RT is opfam=0x02 & op=0x1A & m=0 & R1 & R2 & RT & AddCondNullify & SEDCondSym { local shiftedR1 = R1 << 2; pswCB = carry(shiftedR1,R2); RT = shiftedR1 + R2; build AddCondNullify; } :SH2ADDL^AddCondSym R1,R2,RT is opfam=0x02 & op=0x2A & m=0 & R1 & R2 & RT & AddCondNullify & AddCondSym { local shiftedR1 = R1 << 2; RT = shiftedR1 + R2; build AddCondNullify; } :SH2ADDO^AddCondSym R1,R2,RT is opfam=0x02 & op=0x3A & m=0 & R1 & R2 & RT & AddCondNullify & AddCondSym { local shiftedR1 = R1 << 2; if (scarry(shiftedR1, R2)) goto ; pswCB = carry(shiftedR1,R2); RT = shiftedR1 + R2; goto ; trap(); build AddCondNullify; } :SH3ADD^AddCondSym R1,R2,RT is opfam=0x02 & op=0x1B & m=0 & R1 & R2 & RT & AddCondNullify & AddCondSym { local shiftedR1 = R1 << 3; pswCB = carry(shiftedR1,R2); RT = shiftedR1 + R2; build AddCondNullify; } :SH3ADDL^AddCondSym R1,R2,RT is opfam=0x02 & op=0x2B & m=0 & R1 & R2 & RT & AddCondNullify & AddCondSym { local shiftedR1 = R1 << 3; RT = shiftedR1 + R2; build AddCondNullify; } :SH3ADDO^AddCondSym R1,R2,RT is opfam=0x02 & op=0x3B & m=0 & R1 & R2 & RT & AddCondNullify & AddCondSym { local shiftedR1 = R1 << 3; if (scarry(shiftedR1, R2)) goto ; pswCB = carry(shiftedR1,R2); RT = shiftedR1 + R2; goto ; trap(); build AddCondNullify; } :SUB^CSCondSym R1,R2,RT is opfam=0x02 & op=0x10 & m=0 & R1 & R2 & RT & CSCondSym & CSCondNullify { pswCB = ! (R1 < R2); RT = R1 - R2; build CSCondNullify; } :SUB",O"^CSCondSym R1,R2,RT is opfam=0x02 & op=0x30 & m=0 & R1 & R2 & RT & CSCondSym & CSCondNullify { if (sborrow(R1,R2)) goto ; pswCB = ! (R1 < R2); RT = R1 - R2; build CSCondNullify; goto ; trap(); } :SUB",B"^CSCondSym R1,R2,RT is opfam=0x02 & op=0x14 & m=0 & R1 & R2 & RT & CSCondSym & CSCondNullify { right:$(REGSIZE) = ~R2 + zext(pswCB); pswCB = ! (R1 < right); #! carry(R1, right); RT = R1 + right; build CSCondNullify; } :SUB",BO"^CSCondSym R1,R2,RT is opfam=0x02 & op=0x34 & m=0 & R1 & R2 & RT & CSCondSym & CSCondNullify { right:$(REGSIZE) = ~R2 + zext(pswCB); if (sborrow(R1,right)) goto ; pswCB = ! (R1 < right); RT = R1 + right; build CSCondNullify; goto ; trap(); } # subtract and trap on condition # this instruction does not have a nullify form :SUB",T"^CSCondSym R1,R2,RT is opfam=0x02 & op=0x13 & m=0 & R1 & R2 & RT & CSCondSym & CSCond { pswCB = ! (R1 < R2); RT = R1 - R2; build CSCond; if (CSCond) goto ; goto ; trap(); } :SUB",TO"^CSCondSym R1,R2,RT is opfam=0x02 & op=0x33 & m=0 & R1 & R2 & RT & CSCondSym & CSCond { if (sborrow(R1,R2)) goto ; pswCB = ! (R1; goto ; trap(); } # R1 is partial remainder, R2 is denominator, RT is updated partial remainder :DS^CSCondSym R1,R2,RT is opfam=0x02 & op=0x11 & m=0 & R1 & R2 & RT & CSCondSym & CSCondNullify { local origR2 = R2; left:$(REGSIZE) = (R1 << 1) | zext(pswCB); if (pswV) goto ; right:$(REGSIZE) = R2; goto ; right = ~R2 + 1; RT = left + right; pswCB = carry(left,right); # handle pswV -- I'm using R2 here, the book says R2, but non-restoring algorithms usually use R1, so # this could be the error. shiftBit:1 = ((origR2 >> 31) & 0x1) == 1; pswV = pswCB ^ shiftBit; # handle nullification -- TODO This condition is special and the stock CSCondNullify won't work -- broken -- see page 5-103 build CSCondNullify; } :CMPCLR^CSCondSym R1,R2,RT is opfam=0x02 & op=0x22 & m=0 & R1 & R2 & RT & CSCondSym & CSCondNullify { build CSCondNullify; # must do this before setting register RT = 0; } # COPY is a pseudo-op using OR to move values between registers :COPY R1,RT is opfam=0x02 & op=0x09 & m=0 & R1 & reg2=0 & RT & c=0 & fv=0 { RT = R1; } # nop is a pseudo-op for OR R0,R0, which is one way to make a nop :NOP is opfam=0x02 & op=0x09 & m=0 & reg1=0 & reg2=0 & t=0 & c=0 & fv=0 { } # intentionally left blank :OR^LogicCondSym R1,R2,RT is opfam=0x02 & op=0x09 & m=0 & R1 & R2 & RT & LogicCondSym & LogicCondNullify { RT = R1 | R2; build LogicCondNullify; } :XOR^LogicCondSym R1,R2,RT is opfam=0x02 & op=0x0A & m=0 & R1 & R2 & RT & LogicCondSym & LogicCondNullify { RT = R1 ^ R2; build LogicCondNullify; } :AND^LogicCondSym R1,R2,RT is opfam=0x02 & op=0x08 & m=0 & R1 & R2 & RT & LogicCondSym & LogicCondNullify { RT = R1 & R2; build LogicCondNullify; } :ANDCM^LogicCondSym R1,R2,RT is opfam=0x02 & op=0x00 & m=0 & R1 & R2 & RT & LogicCondSym & LogicCondNullify { RT = R1 & ~R2; build LogicCondNullify; } :UXOR^UnitCondSym R1,R2,RT is opfam=0x02 & op=0x0E & m=0 & R1 & R2 & RT & UnitCondSym & UnitCondNullify { RT = R1 ^ R2; build UnitCondNullify; } :UADDCM^UnitCondSym R1,R2,RT is opfam=0x02 & op=0x26 & m=0 & R1 & R2 & RT & UnitCondSym & UnitCondNullify { RT = R1 + ~R2; build UnitCondNullify; } :UADDCMT^UnitCondSym R1,R2,RT is opfam=0x02 & op=0x27 & m=0 & R1 & R2 & RT & UnitCond & UnitCondSym { if (UnitCond) goto ; RT = R1 + ~R2; goto ; trap(); } :ADDI^AddCondSym lse11,R2,R1dst is opfam=0x2D & bit11=0 & lse11 & R2 & R1dst & AddCondSym & AddCondI11Nullify { tmp:$(REGSIZE) = R2; # Don't clobber the original value, when R1 is the same as R2 R1dst = R2 + lse11; pswCB = carry(tmp,lse11); build AddCondI11Nullify; } # PA1.1 this is ADDIO, PA2.0 is ADDI,TSV (trap on signed overflow) :ADDI^",TSV"^AddCondSym lse11,R2,R1dst is opfam=0x2D & bit11=1 & lse11 & R2 & R1dst & AddCondSym & AddCondI11Nullify { tmp:$(REGSIZE) = R2; # Don't clobber the original value, when R1 is the same as R2 if (scarry(R2, lse11) == 1:1) goto ; R1dst = R2 + lse11; pswCB = carry(tmp,sext(lse11)); goto ; trap(); build AddCondI11Nullify; } # PA11 this is ADDIT, PA2.0 this is ADDI,TC :ADDI^",TC"^AddCondSym lse11,R2,R1dst is opfam=0x2C & bit11=0 & lse11 & R2 & R1dst & AddCondSym & AddCondI11 { tmp:$(REGSIZE) = R2; # Don't clobber the original value, when R1 is the same as R2 R1dst = R2 + lse11; build AddCondI11; if (AddCondI11) goto ; pswCB = carry(tmp,lse11); goto ; trap(); } # PA11 this is ADDITO, PA2.0 this is ADDI,TC,TSV :ADDI^",TC,TSV"^AddCondSym lse11,R2,R1dst is opfam=0x2C & bit11=1 & lse11 & R2 & R1dst & AddCondSym & AddCondI11 { tmp:$(REGSIZE) = R2; # Don't clobber the original value, when R1 is the same as R2 R1dst = R2 + lse11; build AddCondI11; if (AddCondI11) goto ; if (scarry(tmp, lse11) == 1:1) goto ; pswCB = carry(tmp,lse11); goto ; trap(); } :SUBI^CSCondSym lse11,R2,R1dst is opfam=0x25 & bit11=0 & lse11 & R2 & R1dst & CSCondI11Nullify & CSCondSym { tmp:$(REGSIZE) = R2; # Don't clobber the original value, when R1 is the same as R2 R1dst = lse11 - R2; pswCB = !(lse11 < tmp); build CSCondI11Nullify; } :SUBI^",TSV"^CSCondSym lse11,R2,R1dst is opfam=0x25 & bit11=1 & lse11 & R2 & R1dst & CSCondI11 & CSCondSym & CSCondI11Nullify { tmp:$(REGSIZE) = R2; # Don't clobber the original value, when R1 is the same as R2 if (sborrow(lse11, R2) == 1:1) goto ; R1dst = lse11 - R2; pswCB = !(lse11 < tmp); goto ; trap(); build CSCondI11Nullify; } :CMPICLR^CSCondSym lse11,R2,R1dst is opfam=0x24 & bit11=0 & lse11 & R2 & R1dst & CSCondI11Nullify & CSCondSym { R1dst = 0; } :SHRPW^SEDCondSym R1,R2,SAR,RT is opfam=0x34 & subop1012=0 & bits59=0 & R1 & R2 & RT & ShiftCondNullify & SEDCondSym & SAR { left:8 = zext( (R1 & 0x7FFFFFFF) ) << 32; concat:8 = left | zext(R2); concat = concat >> SAR; nullifyCondResult=concat:$(REGSIZE); RT = concat:4; build ShiftCondNullify; } # previously this was shd, now is shrpw form 14 :SHRPW^SEDCondSym R1,R2,shiftC,RT is opfam=0x34 & R2 & R1 & subop1012=2 & shiftC & RT & SEDCondSym & ShiftCondNullify { left:8 = zext( (R1 & 0x7FFFFFFF) ) << 32; concat:8 = left | zext(R2); concat = concat >> shiftC; nullifyCondResult=concat:$(REGSIZE); RT = concat:4; build ShiftCondNullify; } # extract unsigned using SAR :EXTRW",U"^SEDCondSym R2,SAR,shiftCLen,R1dst is opfam=0x34 & bits59=0 & subop1012=4 & R2 & shiftCLen & R1dst & SEDCondSym & ExtrCondNullify & SAR { local value = R2 >> (31-SAR); mask:4 = 0xffffffff >> (32 - shiftCLen); nullifyCondResult = value & mask; R1dst = nullifyCondResult; build ExtrCondNullify; } # extract signed using SAR :EXTRW",S"^SEDCondSym R2,SAR,shiftCLen,R1dst is opfam=0x34 & bits59=0 & subop1012=5 & R2 & shiftCLen & R1dst & SEDCondSym & ExtrCondNullify & SAR { local value = R2 s>> (31-SAR); value = value << (32 - shiftCLen); value = value s>> (32 - shiftCLen); nullifyCondResult = value; R1dst = value; build ExtrCondNullify; } # extract unsigned using immediate :EXTRW^",U"^SEDCondSym R2,cp,shiftCLen,R1dst is opfam=0x34 & subop1012=6 & cp & R2 & shiftCLen & R1dst & SEDCondSym & ExtrCondNullify & shiftC { local value = R2 >> shiftC; mask:4 = 0xffffffff >> (32-shiftCLen); nullifyCondResult = value & mask; R1dst = nullifyCondResult; build ExtrCondNullify; } # extract signed using immediate :EXTRW^",S"^SEDCondSym R2,cp,shiftCLen,R1dst is opfam=0x34 & subop1012=7 & cp & R2 & shiftC & shiftCLen & R1dst & SEDCondSym & ExtrCondNullify { local value = R2 s>> shiftC; value = value << (32 - shiftCLen); value = value s>> (32 - shiftCLen); nullifyCondResult = value; R1dst = value; build ExtrCondNullify; } # non-zeroing SAR version (VDEP) :DEPW^SEDCondSym R1,SAR,shiftCLen,R2dst is opfam=0x35 & bits59=0 & subop1012=1 & R1 & R2 & R2dst & shiftCLen & DepCondNullify & SEDCondSym & SAR { mask:4 = 0xffffffff >> (32-shiftCLen); local value = R1 & mask; value = value << (31-SAR); mask = mask << (31-SAR); local result = R2 & ~mask; result = result | value; R2dst = result; nullifyCondResult = result; build DepCondNullify; } # non-zeroing constant version (DEP) :DEPW^SEDCondSym R1,shiftC,shiftCLen,R2dst is opfam=0x35 & subop1012=3 & shiftC & shiftCLen & R1 & R2 & R2dst & DepCondNullify & SEDCondSym & cp { mask:4 = 0xffffffff >> (32-shiftCLen); local value = R1 & mask; value = value << cp; mask = mask << cp; local result = R2 & ~mask; result = result | value; R2dst = result; nullifyCondResult = result; build DepCondNullify; } # non-zeroing immediate SAR version (VDEPI) :DEPWI^SEDCondSym highlse5,SAR,shiftCLen,R2dst is opfam=0x35 & bits59=0 & subop1012=5 & shiftCLen & highlse5 & R2 & R2dst & DepCondNullify & SEDCondSym & SAR { mask:4 = 0xffffffff >> (32 - shiftCLen); depbits:4 = sext(highlse5); depbits = sext(depbits); local value = depbits & mask; value = value << (31-SAR); mask = mask << (31-SAR); local result = R2 & ~mask; result = result | value; R2dst = result; nullifyCondResult = result; build DepCondNullify; } # non-zeroing immediate constant version (DEPI) :DEPWI^SEDCondSym highlse5,shiftC,shiftCLen,R2dst is opfam=0x35 & subop1012=7 & shiftC & highlse5 & R2 & R2dst & shiftCLen & DepCondNullify & SEDCondSym & cp { mask:4 = 0xffffffff >> (32-shiftCLen); depbits:4 = sext(highlse5); local value = depbits & mask; value = value << cp; mask = mask << cp; local result = R2 & ~mask; result = result | value; R2dst = result; nullifyCondResult = result; build DepCondNullify; } # DEPW,Z SAR version (ZVDEP) :DEPW",Z"^SEDCondSym R1,SAR,shiftCLen,R2dst is opfam=0x35 & bits59=0 & subop1012=0 & R1 & shiftCLen & R2 & R2dst & DepCondNullify & SEDCondSym & SAR { mask:4 = 0xffffffff >> (32-shiftCLen); local value = R1 & mask; value = value << (31-SAR); R2dst = value; nullifyCondResult = value; build DepCondNullify; } # DEPW,Z constant version (ZDEP) :DEPW^",Z"^SEDCondSym R1,shiftC,shiftCLen,R2dst is opfam=0x35 & subop1012=2 & shiftC & R2 & R2dst & shiftCLen & R1 & DepCondNullify & SEDCondSym & cp { mask:4 = 0xffffffff >> (32-shiftCLen); local value = R1 & mask; value = value << cp; R2dst = value; nullifyCondResult = value; build DepCondNullify; } # DEPWI,Z SAR version (ZVDEPI) :DEPWI",Z"^SEDCondSym highlse5,SAR,shiftCLen,R2dst is opfam=0x35 & SAR & bits59=0 & subop1012=4 & shiftCLen & highlse5 & R2dst & DepCondNullify & SEDCondSym { mask:4 = 0xffffffff >> (32-shiftCLen); depbits:4 = sext(highlse5); local value = depbits & mask; value = value << (31-SAR); R2dst = value; nullifyCondResult = value; build DepCondNullify; } # DEPWI,Z constant version (ZDEPI) :DEPWI",Z"^SEDCondSym highlse5,shiftC,shiftCLen,R2dst is opfam=0x35 & subop1012=6 & shiftC & highlse5 & R2dst & shiftCLen & DepCondNullify & SEDCondSym & cp { mask:4 = 0xffffffff >> (32-shiftCLen); depbits:4 = sext(highlse5); local value = depbits & mask; value = value << cp; nullifyCondResult = value; R2dst = value; build DepCondNullify; } # BCD instructions :DCOR^UnitCondSym R2,RT is opfam=0x02 & op=0x2E & R2 & reg1=0x0 & RT & UnitCond & UnitCondSym { } # TODO :IDCOR^UnitCondSym R2,RT is opfam=0x02 & op=0x2F & R2 & reg1=0x0 & RT & UnitCond & UnitCondSym { } # TODO ################################################# # System Instructions ################################################# define pcodeop break; :BREAK im5,im13 is opfam=0x0 & sysop=0x00 & im5 & im13 { break(); } :RFI is opfam=0x0 & sysop=0x60 & im5=0x0 { iaoq_back = cr18; iaoq_front = cr18; iasq_back = cr17; iasq_front = cr17; upperBits:8 = zext(iasq_front); lowerBits:8 = zext(iaoq_front); local returnAddr = (upperBits << 32) | lowerBits; goto [returnAddr]; } :RFI^",R" is opfam=0x0 & sysop=0x65 & im5=0x0 { r1 = shr0; r8 = shr1; r9 = shr2; r16 = shr3; r17 = shr4; r24 = shr5; r25 = shr6; # psw = ipsw; iaoq_back = cr18; iaoq_front = cr18; iasq_back = cr17; iasq_front = cr17; upperBits:8 = zext(iasq_front); lowerBits:8 = zext(iaoq_front); returnAddr:8 = (upperBits << 32) | lowerBits; goto [returnAddr]; } :SSM highIm10,RT is opfam=0x0 & sysop=0x6B & c=0x0 & highIm10 & RT { widePswG:4 = zext(pswG); widePswF:4 = zext(pswG); widePswR:4 = zext(pswG); widePswP:4 = zext(pswG); widePswD:4 = zext(pswG); widePswI:4 = zext(pswG); RT = (widePswG << 31) | (widePswF << 30) | (widePswR << 29) | (widePswP << 27) | (widePswD << 26) | (widePswI << 25); pswG = pswG || (highIm10 & 0x40); pswF = pswF || (highIm10 & 0x20); pswR = pswR || (highIm10 & 0x10); pswP = pswP || (highIm10 & 0x4); pswD = pswD || (highIm10 & 0x2); pswI = pswI || (highIm10 & 0x1); } :RSM highIm10,RT is opfam=0x0 & sysop=0x73 & c=0x0 & highIm10 & RT { widePswG:4 = zext(pswG); widePswF:4 = zext(pswG); widePswR:4 = zext(pswG); widePswP:4 = zext(pswG); widePswD:4 = zext(pswG); widePswI:4 = zext(pswG); RT = (widePswG << 31) | (widePswF << 30) | (widePswR << 29) | (widePswP << 27) | (widePswD << 26) | (widePswI << 25); pswG = pswG && ((highIm10 & 0x40:1) == 0); pswF = pswF && ((highIm10 & 0x20:1) == 0); pswR = pswR && ((highIm10 & 0x10:1) == 0); pswP = pswP && ((highIm10 & 0x4:1) == 0); pswD = pswD && ((highIm10 & 0x2:1) == 0); pswI = pswI && ((highIm10 & 0x1:1) == 0); } :MTSM R1 is opfam=0x0 & sysop=0xC3 & c=0x0 & im5=0 & R1 { pswG = ((R1 & 0x00000040:4) != 0); pswF = (R1 & 0x00000020:4) != 0; pswR = (R1 & 0x00000010:4) != 0; pswQ = (R1 & 0x00000008:4) != 0; pswP = (R1 & 0x00000004:4) != 0; pswD = (R1 & 0x00000002:4) != 0; pswI = (R1 & 0x00000001:4) != 0; } :LDSID SRRB,RT is opfam=0x0 & sysop=0x85 & u=0 & RT & SRRB & SR & SRVAL { RT = SRVAL; } :MTSP R1,SR3bit is opfam=0x0 & sysop=0xC1 & im5=0 & R1 & SR3bit { SR3bit = R1; } :MTCTL R1,crname2 is opfam=0x0 & sysop=0xC2 & im5=0 & R1 & crname2 & cr { cr = R1; } :MTSAR R1 is opfam=0x0 & sysop=0xC2 & im5=0 & R1 & crname2 & cr=11 { sar = (R1 & 0x1F); } :MFSP SR3bit,RT is opfam=0x0 & sysop=0x25 & reg1=0x0 & RT & SR3bit { RT = SR3bit; } :MFCTL crname2,RT is opfam=0x0 & sysop=0x45 & RT & crname2 & reg1=0 & cr { RT = cr; } # this instruction is PA-RISC 2.0 only, but here to avoid disassembler comparison issues :MFIA RT is opfam=0x0 & sysop=0xA5 & RT & reg1=0 { RT = iaoq_front; } define pcodeop sync; :SYNC is opfam=0x0 & sysop=0x20 & im5=0 & c=0 & bit20=0 { sync(); } # sync cache :SYNCDMA is opfam=0x0 & sysop=0x20 & im5=0 & c=0 & bit20=1 { sync(); } # sync DMA define pcodeop probe; :PROBER SRRB,R1,RT is opfam=0x1 & sysopshifted=0x46 & m=0 & SRRB & R1 & RT & SPCBASE { RT = probe(R1, SPCBASE); } # probe read :PROBERI SRRB,highIm5,RT is opfam=0x1 & sysopshifted=0xC6 & m=0 & SRRB & highIm5 & RT & SPCBASE { RT = probe(highIm5:1, SPCBASE); } # probe read imm :PROBEW SRRB,R1,RT is opfam=0x1 & sysopshifted=0x47 & m=0 & SRRB & R1 & RT & SPCBASE { RT = probe(R1, SPCBASE); } # probe write :PROBEWI SRRB,highIm5,RT is opfam=0x1 & sysopshifted=0xC7 & m=0 & SRRB & highIm5 & RT & SPCBASE { RT = probe(highIm5:1, SPCBASE); } # probe write imm define pcodeop physicalAddress; :LPA^sysCmplt RX^SRRB,RT is opfam=0x1 & sysopshifted=0x4D & sysCmplt & RX & SRRB & RT & SPCBASE { RT = physicalAddress(SPCBASE); } # virt to phy addr translation define pcodeop coherenceIndex; :LCI RX^SRRB,RT is opfam=0x1 & sysopshifted=0x4C & m=0 & RX & SRRB & RT & SPCBASE { RT = coherenceIndex(SPCBASE); } # load coherence index define pcodeop purgeTLB; :PDTLB^sysCmplt RX^SRRB3bit is opfam=0x1 & sysopshifted=0x48 & RX & SRRB3bit & sysCmplt & m=0 & SPCBASE { purgeTLB(RX, SPCBASE); } :PDTLB^sysCmplt RX^SRRB3bit is opfam=0x1 & sysopshifted=0x48 & RX & SRRB3bit & sysCmplt & m=1 & SPCBASE & RB { RB = RB + RX; purgeTLB(RX, SPCBASE); } :PITLB^sysCmplt RX^SRRB3bit is opfam=0x1 & sysopshifted=0x8 & RX & SRRB3bit & sysCmplt & m=0 & SPCBASE { purgeTLB(RX, SPCBASE); } :PITLB^sysCmplt RX^SRRB3bit is opfam=0x1 & sysopshifted=0x8 & RX & SRRB3bit & sysCmplt & m=1 & SPCBASE & RB { RB = RB + RX; purgeTLB(RX, SPCBASE); } :PDTLBE^sysCmplt RX^SRRB3bit is opfam=0x1 & sysopshifted=0x49 & RX & SRRB3bit & sysCmplt & m=0 & SPCBASE { purgeTLB(RX, SPCBASE); } :PDTLBE^sysCmplt RX^SRRB3bit is opfam=0x1 & sysopshifted=0x49 & RX & SRRB3bit & sysCmplt & m=1 & SPCBASE & RB { RB = RB + RX; purgeTLB(RX, SPCBASE); } :PITLBE^sysCmplt RX^SRRB3bit is opfam=0x1 & sysopshifted=0x9 & RX & SRRB3bit & sysCmplt & m=0 & SPCBASE { purgeTLB(RX, SPCBASE); } :PITLBE^sysCmplt RX^SRRB3bit is opfam=0x1 & sysopshifted=0x9 & RX & SRRB3bit & sysCmplt & m=1 & SPCBASE & RB { RB = RB + RX; purgeTLB(RX, SPCBASE); } define pcodeop insertTLBEntry; :IDTLBA R1,SRRB is opfam=0x1 & sysopshifted=0x41 & m=0 & SRRB & R1 & SPCBASE & im5=0 { insertTLBEntry(SPCBASE, R1); } :IITLBA R1,(SR3bit,RB) is opfam=0x1 & sysopshiftedshort=0x01 & m=0 & SR3bit & RB & R1 & im5=0 { insertTLBEntry(SR3bit, RB, R1); } :IDTLBP R1,SRRB is opfam=0x1 & sysopshifted=0x40 & m=0 & SRRB & R1 & SPCBASE & im5=0 { insertTLBEntry(SPCBASE, R1); } :IITLBP R1,(SR3bit,RB) is opfam=0x1 & sysopshiftedshort=0x00 & m=0 & im5=0 & SR3bit & RB & R1 { insertTLBEntry(SR3bit, RB, R1); } :IITLBT R1, R2 is opfam=0x1 & sysopshiftedshort=0x20 & m=0 & im5=0 & fpsub=0 & R2 & R1 { insertTLBEntry(R1, R2); } define pcodeop purgeCache; :PDC^indexedWordAccessCmplt RX^SRRB is opfam=0x1 & sysopshifted=0x4E & indexedWordAccessCmplt & RX & SRRB & SPCBASE & im5=0 & m=0 { purgeCache(SPCBASE, RX); } :PDC^indexedWordAccessCmplt RX^SRRB is opfam=0x1 & sysopshifted=0x4E & indexedWordAccessCmplt & RX & SRRB & SPCBASE & RB & im5=0 & m=1 { purgeCache(SPCBASE, RX); RB = RB + RX; } :FDC^indexedWordAccessCmplt RX^SRRB is opfam=0x1 & sysopshifted=0x4A & indexedWordAccessCmplt & RX & SRRB & SPCBASE & m=0 { purgeCache(SPCBASE, RX); } :FDC^indexedWordAccessCmplt RX^SRRB is opfam=0x1 & sysopshifted=0x4A & indexedWordAccessCmplt & RX & SRRB & SPCBASE & RB & m=1 { purgeCache(SPCBASE, RX); RB = RB + RX; } :FIC^indexedWordAccessCmplt RX(SR3bit,RB) is opfam=0x1 & sysopshiftedshort=0x0A & indexedWordAccessCmplt & RX & SR3bit & RB & m=0 { purgeCache(SR3bit, RB, RX); } :FIC^indexedWordAccessCmplt RX(SR3bit,RB) is opfam=0x1 & sysopshiftedshort=0x0A & indexedWordAccessCmplt & RX & SR3bit & RB & m=1 { purgeCache(SR3bit, RB, RX); RB = RB + RX;} :FDCE^indexedWordAccessCmplt RX^SRRB is opfam=0x1 & sysopshifted=0x4B & indexedWordAccessCmplt & RX & SRRB & SPCBASE & m=0 { purgeCache(SPCBASE, RX); } :FDCE^indexedWordAccessCmplt RX^SRRB is opfam=0x1 & sysopshifted=0x4B & indexedWordAccessCmplt & RX & SRRB & SPCBASE & RB & m=1 { purgeCache(SPCBASE, RX); RB = RB + RX; } :FICE^indexedWordAccessCmplt RX(SR3bit,RB) is opfam=0x1 & sysopshiftedshort=0x0B & indexedWordAccessCmplt & RX & SR3bit & RB & m=0 { purgeCache(SR3bit, RB, RX); } :FICE^indexedWordAccessCmplt RX(SR3bit,RB) is opfam=0x1 & sysopshiftedshort=0x0B & indexedWordAccessCmplt & RX & SR3bit & RB & m=1 { purgeCache(SR3bit, RB, RX); RB = RB + RX; } define pcodeop diag; :DIAG im26 is opfam=0x05 & im26 { diag(im26:4); } ################################################# # Coprocessor and Special Function Instructions ################################################# :SPOP0,SFU^nullifyForSpecial sop is opfam=0x04 & specop=0 & SFU & nullifyForSpecial & im5 & im15 [ sop=(im15 << 5) | im5; ] unimpl :SPOP1,SFU^nullifyForSpecial sop is opfam=0x04 & specop=1 & SFU & nullifyForSpecial & im5 & sopim10 [ sop=(sopim10 << 5) | im5; ] unimpl :SPOP2,SFU^nullifyForSpecial sop R2 is opfam=0x04 & specop=2 & SFU & nullifyForSpecial & R2 & im5 & sopim5 [ sop=(sopim5 << 5) | im5; ] unimpl :SPOP3,SFU^nullifyForSpecial sop R1,R2 is opfam=0x04 & specop=3 & SFU & nullifyForSpecial & R1 & R2 & im5 & sopim5 [ sop=(sopim5 << 5) | im5; ] unimpl :COPR,SFU,sop^nullifyForSpecial is opfam=0x0C & SFU & nullifyForSpecial & im5 & sopim17 [ sop=(sopim17 << 5) | im5; ] unimpl :CLDW,SFU^indexedWordAccessCmplt^loadCC RX^SRRB,RT is opfam=0x09 & bit9=0 & zero=0 & RX & SRRB & RT & SFU & indexedWordAccessCmplt & loadCC unimpl :CLDD,SFU^indexedDoublewordAccessCmplt^loadCC RX^SRRB,RT is opfam=0x0B & bit9=0 & zero=0 & RX & SRRB & RT & SFU & indexedDoublewordAccessCmplt & loadCC unimpl :CSTW,SFU^indexedWordAccessCmplt^storeCC RT,RX^SRRB is opfam=0x09 & bit9=1 & zero=0 & RX & SRRB & RT & SFU & indexedWordAccessCmplt & storeCC unimpl :CSTD,SFU^indexedDoublewordAccessCmplt^storeCC RT,RX^SRRB is opfam=0x0B & bit9=1 & zero=0 & RX & SRRB & RT & SFU & indexedDoublewordAccessCmplt & storeCC unimpl :CLDW,SFU^shortDispCmplt^loadCC highlse5^SRRB,RT is opfam=0x09 & bit9=0 & one=1 & SRRB & RT & SFU & highlse5 & shortDispCmplt & loadCC unimpl :CLDD,SFU^shortDispCmplt^loadCC highlse5^SRRB,RT is opfam=0x0B & bit9=0 & one=1 & SRRB & RT & SFU & highlse5 & shortDispCmplt & loadCC unimpl :CSTW,SFU^shortDispCmplt^storeCC RT,highlse5^SRRB is opfam=0x09 & bit9=1 & one=1 & SRRB & RT & SFU & highlse5 & shortDispCmplt & storeCC unimpl :CSTD,SFU^shortDispCmplt^storeCC RT,highlse5^SRRB is opfam=0x0B & bit9=1 & one=1 & SRRB & RT & SFU & highlse5 & shortDispCmplt & storeCC unimpl ################################################# # Floating Point Instructions ################################################# # These ld/st instructions are the same as the coprocessor instructions, with an SFU of zero or one # Floating Point Loads, 32 bit, indirect/indexed :FLDW^indexedWordAccessCmplt^loadCC RX^SRRB,FPRT32 is opfam=0x09 & zero=0 & bit9=0 & bits78=0 & indexedWordAccessCmplt & loadCC & SRRB & RX & FPRT32 & SPCBASE & u=0 & m=0 { addr:$(ADDRSIZE) = SPCBASE + sext(RX); FPRT32 = zext(*:4 addr); } :FLDW^indexedWordAccessCmplt^loadCC RX^SRRB,FPRT32 is opfam=0x09 & zero=0 & bit9=0 & bits78=0 & indexedWordAccessCmplt & loadCC & SRRB & RX & FPRT32 & SPCBASE & u=1 & m=0 { addr:$(ADDRSIZE) = SPCBASE + (sext(RX) << 2); FPRT32 = zext(*:4 addr); } :FLDW^indexedWordAccessCmplt^loadCC RX^SRRB,FPRT32 is opfam=0x09 & zero=0 & bit9=0 & bits78=0 & indexedWordAccessCmplt & loadCC & SRRB & RX & RB & FPRT32 & SPCBASE & u=0 & m=1 { addr:$(ADDRSIZE) = SPCBASE + sext(RX); FPRT32 = zext(*:4 addr); RB = RB + RX; } :FLDW^indexedWordAccessCmplt^loadCC RX^SRRB,FPRT32 is opfam=0x09 & zero=0 & bit9=0 & bits78=0 & indexedWordAccessCmplt & loadCC & SRRB & RB & RX & FPRT32 & SPCBASE & u=1 & m=1 { addr:$(ADDRSIZE) = SPCBASE + (sext(RX) << 2); FPRT32 = zext(*:4 addr); RB = RB + RX; } # Floating Point Loads, 64 bit, indirect/indexed :FLDD^indexedDoublewordAccessCmplt^loadCC RX^SRRB,FPRT64 is opfam=0x0B & zero=0 & bit9=0 & sfu=0 & indexedDoublewordAccessCmplt & loadCC & SRRB & RX & FPRT64 & SPCBASE & u=0 & m=0 { addr:$(ADDRSIZE) = SPCBASE + sext(RX); FPRT64 = zext(*:8 addr); } :FLDD^indexedDoublewordAccessCmplt^loadCC RX^SRRB,FPRT64 is opfam=0x0B & zero=0 & bit9=0 & sfu=0 & indexedDoublewordAccessCmplt & loadCC & SRRB & RX & FPRT64 & SPCBASE & u=1 & m=0 { addr:$(ADDRSIZE) = SPCBASE + (sext(RX) << 2); FPRT64 = zext(*:8 addr); } :FLDD^indexedDoublewordAccessCmplt^loadCC RX^SRRB,FPRT64 is opfam=0x0B & zero=0 & bit9=0 & sfu=0 & indexedDoublewordAccessCmplt & loadCC & SRRB & RX & RB & FPRT64 & SPCBASE & u=0 & m=1 { addr:$(ADDRSIZE) = SPCBASE + sext(RX); FPRT64 = zext(*:8 addr); RB = RB + RX; } :FLDD^indexedDoublewordAccessCmplt^loadCC RX^SRRB,FPRT64 is opfam=0x0B & zero=0 & bit9=0 & sfu=0 & indexedDoublewordAccessCmplt & loadCC & SRRB & RB & RX & FPRT64 & SPCBASE & u=1 & m=1 { addr:$(ADDRSIZE) = SPCBASE + (sext(RX) << 3); FPRT64 = zext(*:8 addr); RB = RB + RX; } # Floating Point Stores -- 32 and 64 bits :FSTW^indexedWordAccessCmplt^storeCC FPRT32,RX^SRRB is opfam=0x09 & zero=0 & bit9=1 & bits78=0 & indexedWordAccessCmplt & storeCC & SRRB & RX & FPRT32 & SPCBASE & u=0 & m=0 { addr:$(ADDRSIZE) = SPCBASE + sext(RX); *addr = FPRT32:4; } :FSTW^indexedWordAccessCmplt^storeCC FPRT32,RX^SRRB is opfam=0x09 & zero=0 & bit9=1 & bits78=0 & indexedWordAccessCmplt & storeCC & SRRB & RX & FPRT32 & SPCBASE & u=1 & m=0 { addr:$(ADDRSIZE) = SPCBASE + (sext(RX) << 2); *addr = FPRT32:4; } :FSTW^indexedWordAccessCmplt^storeCC FPRT32,RX^SRRB is opfam=0x09 & zero=0 & bit9=1 & bits78=0 & indexedWordAccessCmplt & storeCC & SRRB & RX & RB & FPRT32 & SPCBASE & u=0 & m=1 { addr:$(ADDRSIZE) = SPCBASE + sext(RX); *addr = FPRT32:4; RB = RB + RX; } :FSTW^indexedWordAccessCmplt^storeCC FPRT32,RX^SRRB is opfam=0x09 & zero=0 & bit9=1 & bits78=0 & indexedWordAccessCmplt & storeCC & SRRB & RX & RB & FPRT32 & SPCBASE & u=1 & m=1 { addr:$(ADDRSIZE) = SPCBASE + (sext(RX) << 2); *addr = FPRT32:4; RB = RB + RX; } :FSTD^indexedDoublewordAccessCmplt^storeCC FPRT64,RX^SRRB is opfam=0x0B & zero=0 & bit9=1 & bits78=0 & indexedDoublewordAccessCmplt & storeCC & SRRB & RX & FPRT64 & SPCBASE & u=0 & m=0 { addr:$(ADDRSIZE) = SPCBASE + sext(RX); *addr = FPRT64:8; } :FSTD^indexedDoublewordAccessCmplt^storeCC FPRT64,RX^SRRB is opfam=0x0B & zero=0 & bit9=1 & bits78=0 & indexedDoublewordAccessCmplt & storeCC & SRRB & RX & FPRT64 & SPCBASE & u=1 & m=0 { addr:$(ADDRSIZE) = SPCBASE + (sext(RX) << 3); *addr = FPRT64:8; } :FSTD^indexedDoublewordAccessCmplt^storeCC FPRT64,RX^SRRB is opfam=0x0B & zero=0 & bit9=1 & bits78=0 & indexedDoublewordAccessCmplt & storeCC & SRRB & RX & RB & FPRT64 & SPCBASE & u=0 & m=1 { addr:$(ADDRSIZE) = SPCBASE + sext(RX); *addr = FPRT64:8; RB = RB + RX; } :FSTD^indexedDoublewordAccessCmplt^storeCC FPRT64,RX^SRRB is opfam=0x0B & zero=0 & bit9=1 & bits78=0 & indexedDoublewordAccessCmplt & storeCC & SRRB & RX & RB & FPRT64 & SPCBASE & u=1 & m=1 { addr:$(ADDRSIZE) = SPCBASE + (sext(RX) << 2); *addr = FPRT64:8; RB = RB + RX; } # Floating Point Load Word with Short Displacement, no modification to RB :FLDW^shortDispCmplt^loadCC highlse5^SRRB,FPRT32 is opfam=0x09 & one=1 & bit9=0 & bits78=0 & shortDispCmplt & loadCC & SRRB & SPCBASE & FPRT32 & highlse5 & m=0 { addr:$(ADDRSIZE) = SPCBASE + sext(highlse5); FPRT32 = zext(*:4 addr); } # Floating Point Load Word with Short Displacement, post-modification to RB :FLDW^shortDispCmplt^loadCC highlse5^SRRB,FPRT32 is opfam=0x09 & one=1 & bit9=0 & bits78=0 & shortDispCmplt & loadCC & SRRB & SPCBASE & FPRT32 & RB & highlse5 & m=1 & u=0 { addr:$(ADDRSIZE) = SPCBASE; FPRT32 = zext(*:4 addr); RB = RB + sext(highlse5); } # Floating Point Load Word with Short Displacement, pre-modification to RB :FLDW^shortDispCmplt^loadCC highlse5^SRRB,FPRT32 is opfam=0x09 & one=1 & bit9=0 & bits78=0 & shortDispCmplt & loadCC & SRRB & RB & SPCBASE & FPRT32 & highlse5 & m=1 & u=1 { addr:$(ADDRSIZE) = SPCBASE + sext(highlse5); FPRT32 = zext(*:4 addr); RB = RB + sext(highlse5); } :FLDD^shortDispCmplt^loadCC highlse5^SRRB,FPRT64 is opfam=0x0B & one=1 & bit6=0 & bits78=0 & bit9=0 & shortDispCmplt & loadCC & SRRB & SPCBASE & FPRT64 & highlse5 & m=0 { addr:$(ADDRSIZE) = SPCBASE + sext(highlse5); FPRT64 = zext(*:8 addr); } :FLDD^shortDispCmplt^loadCC highlse5^SRRB,FPRT64 is opfam=0x0B & one=1 & bit6=0 & bits78=0 & bit9=0 & shortDispCmplt & loadCC & SRRB & RB & SPCBASE & FPRT64 & highlse5 & m=1 & u=0 { addr:$(ADDRSIZE) = SPCBASE; FPRT64 = zext(*:8 addr); RB = RB + sext(highlse5); } :FLDD^shortDispCmplt^loadCC highlse5^SRRB,FPRT64 is opfam=0x0B & one=1 & bit6=0 & bits78=0 & bit9=0 & shortDispCmplt & loadCC & SRRB & RB & SPCBASE & FPRT64 & highlse5 & m=1 & u=1 { addr:$(ADDRSIZE) = SPCBASE + sext(highlse5); FPRT64 = zext(*:8 addr); RB = RB + sext(highlse5); } :FSTW^shortDispCmplt^storeCC FPRT32,highlse5^SRRB is opfam=0x09 & one=1 & bits78=0 & bit9=1 & shortDispCmplt & storeCC & SRRB & SPCBASE & FPRT32 & highlse5 & m=0 { addr:$(ADDRSIZE) = SPCBASE + sext(highlse5); *addr = FPRT32:4; } :FSTW^shortDispCmplt^storeCC FPRT32,highlse5^SRRB is opfam=0x09 & one=1 & bits78=0 & bit9=1 & shortDispCmplt & storeCC & SRRB & SPCBASE & RB & FPRT32 & highlse5 & m=1 & u=0 { addr:$(ADDRSIZE) = SPCBASE; *addr = FPRT32:4; RB = RB + sext(highlse5); } :FSTW^shortDispCmplt^storeCC FPRT32,highlse5^SRRB is opfam=0x09 & one=1 & bits78=0 & bit9=1 & shortDispCmplt & storeCC & SRRB & SPCBASE & RB & FPRT32 & highlse5 & m=1 & u=1 { addr:$(ADDRSIZE) = SPCBASE + sext(highlse5); *addr = FPRT32:4; RB = RB + sext(highlse5); } :FSTD^shortDispCmplt^storeCC FPRT64,highlse5^SRRB is opfam=0x0B & one=1 & bit6=0 & bits78=0 & bit9=1 & shortDispCmplt & storeCC & SRRB & SPCBASE & FPRT64 & highlse5 & m=0 { addr:$(ADDRSIZE) = SPCBASE + sext(highlse5); *addr = FPRT64:8; } :FSTD^shortDispCmplt^storeCC FPRT64,highlse5^SRRB is opfam=0x0B & one=1 & bit6=0 & bits78=0 & bit9=1 & shortDispCmplt & storeCC & SRRB & RB & SPCBASE & FPRT64 & highlse5 & u=0 & m=1 { addr:$(ADDRSIZE) = SPCBASE; *addr = FPRT64:8; RB = RB + sext(highlse5); } :FSTD^shortDispCmplt^storeCC FPRT64,highlse5^SRRB is opfam=0x0B & one=1 & bit6=0 & bits78=0 & bit9=1 & shortDispCmplt & storeCC & SRRB & RB & SPCBASE & FPRT64 & highlse5 & u=1 & m=1 { addr:$(ADDRSIZE) = SPCBASE + sext(highlse5); *addr = FPRT64:8; RB = RB + sext(highlse5); } # Floating Point Format Conversion Instructions :FCNV^fpsf^fpdf FPR232,FPRT32 is opfam=0x0E & fpclass=1 & fpc1sub=0 & fpc1sub2=0 & FPR232 & FPRT32 & fpsf & fpdf { FPRT32 = float2float(FPR232); } :FCNV^fpsf^fpdf FPR264,FPRT64 is opfam=0x0C & fpclass=1 & fpc1sub=0 & fpc1sub2=0 & sfu=0 & bit5=0 & FPR264 & FPRT64 & fpsf & fpsfraw=0 & fpdf & freg2sgl & fptsgl { # if src format is sgl, this is sgl to dbl # if src format is dbl, this is dbl to sgl # sgl to sgl or dbl to dbl don't make sense # and we don't support quad right now FPRT64 = float2float(freg2sgl); } :FCNV^fpsf^fpdf FPR264,FPRT64 is opfam=0x0C & fpclass=1 & fpc1sub=0 & fpc1sub2=0 & sfu=0 & bit5=0 & FPR264 & FPRT64 & fpsf & fpsfraw=1 & fpdf & freg2sgl & fptsgl { # if src format is sgl, this is sgl to dbl # if src format is dbl, this is dbl to sgl # sgl to sgl or dbl to dbl don't make sense # and we don't support quad right now fptsgl = float2float(FPR264); } :FCNVXF^fpsf^fpdf FPR232,FPRT32 is opfam=0x0E & fpclass=1 & fpc1sub=1 & fpc1sub2=0 & FPR232 & FPRT32 & fpsf & fpdf { FPRT32 = int2float(FPR232); } # int2float -- support single/double size ints and single/double floats # so handle 4 different cases :FCNVXF^fixedsf^fpdf FPR264,FPRT64 is opfam=0x0C & fpclass=1 & fpc1sub=1 & fpc1sub2=0 & FPR264 & FPRT64 & sfu=0 & fixedsf & fpdf & fptsgl & freg2sgl & fpsfraw=0 & fpdfraw=0 { fptsgl = int2float(freg2sgl); } :FCNVXF^fixedsf^fpdf FPR264,FPRT64 is opfam=0x0C & fpclass=1 & fpc1sub=1 & fpc1sub2=0 & FPR264 & FPRT64 & sfu=0 & fixedsf & fpdf & fptsgl & freg2sgl & fpsfraw=0 & fpdfraw=1 { FPRT64 = int2float(freg2sgl); } :FCNVXF^fixedsf^fpdf FPR264,FPRT64 is opfam=0x0C & fpclass=1 & fpc1sub=1 & fpc1sub2=0 & FPR264 & FPRT64 & sfu=0 & fixedsf & fpdf & fptsgl & freg2sgl & fpsfraw=1 & fpdfraw=0 { fptsgl = int2float(FPR264); } :FCNVXF^fixedsf^fpdf FPR264,FPRT64 is opfam=0x0C & fpclass=1 & fpc1sub=1 & fpc1sub2=0 & FPR264 & FPRT64 & sfu=0 & fixedsf & fpdf & fptsgl & freg2sgl & fpsfraw=1 & fpdfraw=1 { FPRT64 = int2float(FPR264); } :FCNVFX^fpsf^fixeddf FPR232,FPRT32 is opfam=0x0E & fpclass=1 & fpc1sub=2 & fpc1sub2=0 & FPR232 & FPRT32 & fpsf & fixeddf { temp:4 = round(FPR232); FPRT32 = trunc(temp); } :FCNVFX^fpsf^fixeddf FPR264,FPRT64 is opfam=0x0C & fpclass=1 & fpc1sub=2 & fpc1sub2=0 & sfu=0 & FPR264 & FPRT64 & fpsf & fpsfraw=1 & fixeddf & fptsgl & freg2sgl { local temp:8 = FPR264; temp = round(temp); FPRT64 = trunc(temp); } :FCNVFX^fpsf^fixeddf FPR264,FPRT64 is opfam=0x0C & fpclass=1 & fpc1sub=2 & fpc1sub2=0 & sfu=0 & FPR264 & FPRT64 & fpsf & fpsfraw=0 & fixeddf & fptsgl & freg2sgl { local temp:8 = float2float(freg2sgl); # convert single precision to double temp = round(temp); FPRT64 = trunc(temp); } :FCNVFXT^fpsf^fixeddf FPR232,FPRT32 is opfam=0x0E & fpclass=1 & fpc1sub=3 & fpc1sub2=0 & FPR232 & FPRT32 & fpsf & fixeddf { FPRT32 = trunc(FPR232); } :FCNVFXT^fpsf^fixeddf FPR264,FPRT64 is opfam=0x0C & fpclass=1 & fpc1sub=3 & fpc1sub2=0 & sfu=0 & FPR264 & FPRT64 & fpsf & fpsfraw=1 & fixeddf & fpsfraw & fptsgl & freg2sgl { local value:4 = float2float(FPR264); # convert double precision to single fptsgl = trunc(value); } :FCNVFXT^fpsf^fixeddf FPR264,FPRT64 is opfam=0x0C & fpclass=1 & fpc1sub=3 & fpc1sub2=0 & sfu=0 & FPR264 & FPRT64 & fpsf & fpsfraw=0 & fixeddf & fpsfraw & fptsgl & freg2sgl { local value:4 = freg2sgl; # get single precision value from left half of 64 bit register fptsgl = trunc(value); } # Floating Point Functions :FCPY^fpfmt FPR232,FPRT32 is opfam=0x0E & fpclass=0 & fpsub=2 & freg1=0 & FPR232 & FPRT32 & fpfmt { FPRT32 = FPR232; } :FCPY^fpfmt FPR264,FPRT64 is opfam=0x0C & fpclass=0 & fpsub=2 & freg1=0 & FPR264 & FPRT64 & fpfmt & fpsfraw=1 & fptsgl & freg2sgl { fptsgl = freg2sgl; } :FCPY^fpfmt FPR264,FPRT64 is opfam=0x0C & fpclass=0 & fpsub=2 & freg1=0 & FPR264 & FPRT64 & fpfmt & fpsfraw=0 & fptsgl & freg2sgl { FPRT64 = FPR264; } :FABS^fpfmt FPR232,FPRT32 is opfam=0x0E & fpclass=0 & fpsub=3 & freg1=0 & bit5=0 & bit8=0 & FPR232 & FPRT32 & fpfmt { FPRT32 = abs(FPR232); } :FABS^fpfmt FPR264,FPRT64 is opfam=0x0C & fpclass=0 & fpsub=3 & freg1=0 & bits59=0 & bit10=0 & FPR264 & FPRT64 & fpfmt { FPRT64 = abs(FPR264); } :FSQRT^fpfmt FPR232,FPRT32 is opfam=0x0E & fpclass=0 & fpsub=4 & freg1=0 & FPR232 & FPRT32 & fpfmt { FPRT32 = sqrt(FPR232); } :FSQRT^fpfmt FPR264,FPRT64 is opfam=0x0C & fpclass=0 & fpsub=4 & freg1=0 & FPR264 & FPRT64 & fpfmt { FPRT64 = sqrt(FPR264); } :FRND^fpfmt FPR232,FPRT32 is opfam=0x0E & fpclass=0 & fpsub=5 & FPR232 & FPRT32 & fpfmt { FPRT32 = round(FPR232); } :FRND^fpfmt FPR264,FPRT64 is opfam=0x0C & fpclass=0 & fpsub=5 & FPR264 & FPRT64 & fpfmt { FPRT64 = round(FPR264); } :FADD^fpfmt FPR232,FPR132,FPRT32 is opfam=0x0E & fpclass=3 & fpsub=0 & FPR232 & FPR132 & FPRT32 & fpfmt { FPRT32 = FPR132 f+ FPR232; } :FADD^fpfmt FPR264,FPR164,FPRT64 is opfam=0x0C & fpclass=3 & fpsub=0 & FPR264 & FPRT64 & FPR164 & fpfmt & fpsfraw=0 & freg1sgl & freg2sgl & fptsgl { fptsgl = freg1sgl f+ freg2sgl; } :FADD^fpfmt FPR264,FPR164,FPRT64 is opfam=0x0C & fpclass=3 & fpsub=0 & FPR264 & FPRT64 & FPR164 & fpfmt & fpsfraw=1 & freg1sgl & freg2sgl & fptsgl { FPRT64 = FPR164 f+ FPR264; } :FSUB^fpfmt FPR232,FPR132,FPRT32 is opfam=0x0E & fpclass=3 & fpsub=1 & FPR232 & FPR132 & FPRT32 & fpfmt { FPRT32 = FPR232 f- FPR132; } :FSUB^fpfmt FPR264,FPR164,FPRT64 is opfam=0x0C & fpclass=3 & fpsub=1 & FPR264 & FPRT64 & FPR164 & fpfmt & fpsfraw=0 & fptsgl & freg1sgl & freg2sgl { fptsgl = freg2sgl f- freg1sgl; } :FSUB^fpfmt FPR264,FPR164,FPRT64 is opfam=0x0C & fpclass=3 & fpsub=1 & FPR264 & FPRT64 & FPR164 & fpfmt & fpsfraw=1 & fptsgl & freg1sgl & freg2sgl { FPRT64 = FPR264 f- FPR164; } :FMPY^fpfmt FPR232,FPR132,FPRT32 is opfam=0x0E & fpclass=3 & fpsub=2 & bit8=0 & FPR232 & FPR132 & FPRT32 & fpfmt { FPRT32 = FPR132 f* FPR232; } :FMPY^fpfmt FPR264,FPR164,FPRT64 is opfam=0x0C & fpclass=3 & fpsub=2 & bit8=0 & FPR264 & FPRT64 & FPR164 & fpfmt & fpsfraw=0 & fptsgl & freg1sgl & freg2sgl { fptsgl = freg1sgl f* freg2sgl; } :FMPY^fpfmt FPR264,FPR164,FPRT64 is opfam=0x0C & fpclass=3 & fpsub=2 & bit8=0 & FPR264 & FPRT64 & FPR164 & fpfmt & fpsfraw=1 & fptsgl & freg1sgl & freg2sgl { FPRT64 = FPR164 f* FPR264; } :FDIV^fpfmt FPR232,FPR132,FPRT32 is opfam=0x0E & fpclass=3 & fpsub=3 & FPR232 & FPR132 & FPRT32 & fpfmt { FPRT32 = FPR232 f/ FPR132; } :FDIV^fpfmt FPR264,FPR164,FPRT64 is opfam=0x0C & fpclass=3 & fpsub=3 & FPR264 & FPRT64 & FPR164 & fpfmt & fpsfraw=0 & fptsgl & freg1sgl & freg2sgl { fptsgl = freg2sgl f/ freg1sgl; } :FDIV^fpfmt FPR264,FPR164,FPRT64 is opfam=0x0C & fpclass=3 & fpsub=3 & FPR264 & FPRT64 & FPR164 & fpfmt & fpsfraw=1 & fptsgl & freg1sgl & freg2sgl { FPRT64 = FPR264 f/ FPR164; } # 64 bit version :FMPYADD^fusedfmt FPR264,FPR164,FPRT64,fpra,fpta is opfam=0x06 & FPR264 & FPR164 & FPRT64 & fusedfmt & fpra & fpta & bit5=0 { FPRT64 = FPR164 f* FPR264; fpta = fpta f+ fpra; } # 32 bit version -- this uses a special encoding of the 32 bit registers and can only use 16-31{LR}, not the lower registers :FMPYADD^fusedfmt FUSEDR2,FUSEDR1,FUSEDRT,FUSEDRA,FUSEDTA is opfam=0x06 & FUSEDR2 & FUSEDR1 & FUSEDRT & fusedfmt & FUSEDRA & FUSEDTA & bit5=1 { FUSEDRT = FUSEDR1 f* FUSEDR2; FUSEDTA = FUSEDTA f+ FUSEDRA; } :FMPYSUB^fusedfmt FPR264,FPR164,FPRT64,fpra,fpta is opfam=0x26 & FPR264 & FPR164 & FPRT64 & fusedfmt & fpra & fpta & bit5=0 { FPRT64 = FPR164 f* FPR264; fpta = fpta f- fpra; } :FMPYSUB^fusedfmt FUSEDR2,FUSEDR1,FUSEDRT,FUSEDRA,FUSEDTA is opfam=0x26 & FUSEDR2 & FUSEDR1 & FUSEDRT & fusedfmt & FUSEDRA & FUSEDTA & bit5=1 { FUSEDRT = FUSEDR1 f* FUSEDR2; FUSEDTA = FUSEDTA f- FUSEDRA; } # Fixed Point / Integer Multiply :XMPYU^fpfmt FPR232,FPR132,FPRT64 is opfam=0x0E & fpclass=3 & fpsub=2 & bit8=1 & FPR232 & FPR132 & FPRT64 & fpfmt & fptsgl { arg1:8 = zext(FPR232); arg2:8 = zext(FPR132); prod:8 = arg1 * arg2; FPRT64 = prod; } # Floating Point Compare # 32 bit register comparison :FCMP^fpfmt1bit^fpcmp FPR232,FPR132 is opfam=0x0E & fpclass=2 & fpsub=0 & FPR232 & FPR132 & fpfmt1bit & bit11=0 & fpcmp { local result:1 = 0:1; # shift the previous compareBit onto the compareQueue compareQueue = (compareQueue << 1); compareQueue = compareQueue | compareBit; result = fpcmp; compareBit = result; } :FCMP^fpfmt1bit^fpcmp64 FPR232,FPR132 is opfam=0x0E & fpclass=2 & fpsub=0 & FPR232 & FPR132 & fpfmt1bit & bit11=1 & fpcmp64 { local result:1 = 0:1; # shift the previous compareBit onto the compareQueue compareQueue = (compareQueue << 1); compareQueue = compareQueue | compareBit; result = fpcmp64; compareBit = result; } # 64 bit register comparison :FCMP^fpfmt^fpcmp FPR264,FPR164 is opfam=0x0C & fpclass=2 & fpsub=0 & FPR264 & FPR164 & fpfmt & fpsfraw=0 & fpcmp { local result:1 = 0:1; # shift the previous compareBit onto the compareQueue compareQueue = (compareQueue << 1); compareQueue = compareQueue | compareBit; result = fpcmp; compareBit = result; } :FCMP^fpfmt^fpcmp64 FPR264,FPR164 is opfam=0x0C & fpclass=2 & fpsub=0 & FPR264 & FPR164 & fpfmt & fpsfraw=1 & fpcmp64 { local result:1 = 0:1; # shift the previous compareBit onto the compareQueue compareQueue = (compareQueue << 1); compareQueue = compareQueue | compareBit; result = fpcmp64; compareBit = result; } :FTEST^fptest is opfam=0x0C & fpclass=2 & fpsub=1 & fptest & $(COMMON) [ nullifyEnable = 1; globalset(inst_next, nullifyEnable); ] { nullifyNextCond = compareBit; } # Misc Floating Point Functions :COPR.0.0 is opfam=0x0C & im26=0x0 { } ############################################################### # Performance Montioring Unit Instructions ############################################################### :PMENB is opfam=0x0C & bit5=0 & sfu=0x2 & pmuop=0x3 { } :PMDIS is opfam=0x0C & bit5=0 & sfu=0x2 & pmuop=0x1 { } } # end with : phase=1 ================================================ FILE: pypcode/processors/PA-RISC/data/manuals/pa11_acd.idx ================================================ @pa11_acd.pdf[PA-RISC 1.1 Architecture and Instruction Set Reference Manual, HP Part Number: 09740-90039, February 1994, Third Edition] ADD , 171 ADDB , 163 ADDBF , 164 ADDBT , 163 ADDO , 173 ADDIB , 165 ADDIBF , 166 ADDIBT , 165 ADDIL , 145 ADDL , 172 ADDI , 203 ADDIT , 205 ADDITO , 206 ADDIO , 204 ADDC , 174 ADDCO , 175 AND , 195 ANDCM , 196 B , 150 BL , 150 BLE , 156 BLR , 153 BE , 155 BB , 168 BVB , 167 BV , 154 BREAK , 226 COMB , 159 CMPBF , 160 CMPBT , 159 CMPCLR , 192 CMPIB , 161 CMPIBF , 162 CMPIBT , 161 CMPICLR , 209 CLDDX , 276 CLDDS , 280 CLDWX , 275 CLDWS , 279 COPR , 274 CSTDX , 278 CSTDS , 282 CSTWX , 277 CSTWS , 281 COPY , 193 DCOR , 200 DEPW , 217 DEPWI , 219 DIAG , 263 DS , 191 XOR , 194 EXTRW , 214 XMPYU , 341 FABS , 332 FADD , 335 FCMP , 342 FCNVXF , 328 FCNVFX , 329 FCNVFXT , 330 FCNVFF , 327 FCPY , 331 FDIV , 338 COPR , 345 FLDDX , 320 FLDDS , 324 FLDWX , 319 FLDWS , 323 FMPY , 337 FMPYADD , 339 FMPYSUB , 340 FRND , 334 FSQRT , 333 FSTDX , 322 FSTDS , 326 FSTWX , 321 FSTWS , 325 FSUB , 336 FTEST , 344 FDC , 259 FDCE , 261 FIC , 260 FICE , 262 GATE , 151 DEBUGID , 356 SPOP1 , 271 OR , 193 IDTLBA , 253 IDTLBP , 255 IITLBA , 254 IITLBP , 256 IDCOR , 202 LDCWX , 128 LDCWS , 134 LDB , 118 LDBX , 126 LDBS , 132 LCI , 248 LDH , 117 LDHX , 125 LDHS , 131 LDI , 143 LDIL , 144 LDO , 143 LPA , 246 LDSID , 234 LDW , 116 LDWAX , 127 LDWAS , 133 LDWM , 122 LDWX , 124 LDWS , 130 MOVB , 157 MFCTL , 239 MFDBAM , 357 MFDBAO , 358 MFIBAM , 359 MFIBAO , 360 MFSP , 238 MOVIB , 158 MTCTL , 236 MTDBAM , 361 MTDBAO , 362 MTIBAM , 363 MTIBAO , 364 MTSAR , 237 MTSP , 235 MTSM , 233 NOP , 193 PMDIS , 350 PMENB , 349 PROBER , 242 PROBERI , 243 PROBEW , 244 PROBEWI , 245 PDC , 257 PDTLB , 249 PDTLBE , 251 PITLB , 250 PITLBE , 252 RSM , 232 RFI , 227 RFIR , 229 SSM , 231 SHRPW , 211 SH1ADD , 176 SH1ADDL , 177 SH1ADDO , 178 SH3ADD , 182 SH3ADDL , 183 SH3ADDO , 184 SH2ADD , 179 SH2ADDL , 180 SH2ADDO , 181 SPOP1 , 271 SPOP3 , 273 SPOP2 , 272 SPOP0 , 270 STB , 121 STBS , 138 STBYS , 140 STH , 120 STHS , 137 STW , 119 STWAS , 139 STWM , 123 STWS , 136 SUB , 185 SUBT , 189 SUBTO , 190 SUBO , 186 SUBI , 207 SUBIO , 208 SUBB , 187 SUBBO , 188 SYNC , 240 SYNCDMA , 241 UADDCM , 198 UADDCMT , 199 UXOR , 197 VDEP , 216 VDEPI , 218 VEXTRS , 213 VEXTRU , 212 VSHD , 210 ZDEP , 221 ZDEPI , 223 ZVDEP , 220 ZVDEPI , 222 ================================================ FILE: pypcode/processors/PA-RISC/data/patterns/pa-risc_patterns.xml ================================================ 0xe840c002 0xe840c000 0x........ 0x6bc23fd9 0x6bc23fd9 0x08030241 0x081e0243 0x........ 0x08030241 0x........ 0x........ 0x08030241 ================================================ FILE: pypcode/processors/PA-RISC/data/patterns/patternconstraints.xml ================================================ pa-risc_patterns.xml ================================================ FILE: pypcode/processors/PIC/data/languages/PIC24.cspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/PIC24.ldefs ================================================ PIC-24E PIC-24F PIC-24H dsPIC30F dsPIC33F dsPIC33E dsPIC33C ================================================ FILE: pypcode/processors/PIC/data/languages/PIC24.opinion ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/PIC24.pspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/PIC24.sinc ================================================ #TODO: relative branches are not computed as addresses in [] # TODO: byte oriented inst_next/inst_start must be changed to word oriented when storing into reg/stack... # NOTE: No support for stack pointer limit register (SPLIM) # NOTE: Some registers are larger than PIC defines. # NOTE: No support for signal emulation. # NOTE: No support for mapping a 32Kbyte section of program memory space into upper 32 Kbytes of data address space. # NOTE: No repeat loop status bit. # NOTE: Split flags register, which could cause side effects. # NOTE: pwrsav pcode is not defined. # TODO: Support for fractional multiplier mode. # TODO: Check ACCA and ACCB staturation modes. # TODO: Check accumulator staturation and rounding modes. # TODO: Test Stack Frame Active (SFA) status bit. # Basic ================================================================================ #define endian=little; # little endian only define alignment=2; # 2 signifies how the PC moves per instruction define space ram type=ram_space size=2; # (2x8=)16-bit address space define space register type=register_space size=2; # (2x8=)16-bit address space define space rom type=ram_space size=3 wordsize=2 default ; # (3x8=)24-bit address space # (2x8=)16-bit word per address # 24-bit canonical instruction: # address : code # 0 : byte2 | byte1 # 1 : pad | byte3 # 2 : byte2 | byte1 # 3 : pad | byte3 # . # . # . # etc # Registers ============================================================================ define ram offset=0 size=2 [ W0 W1 W2 W3 W4 W5 W6 W7 W8 W9 W10 W11 W12 W13 W14 W15 ]; define ram offset=0 size=4 [ W1W0 W3W2 W5W4 W7W6 W9W8 W11W10 W13W12 W15W14 ]; # define ram offset=28 size=3 [ FP ]; # define ram offset=32 size=3 [ SP ]; # Note: This assumes little endian only define ram offset=0 size=1 [ W0byte _ W1byte _ W2byte _ W3byte _ W4byte _ W5byte _ W6byte _ W7byte _ W8byte _ W9byte _ W10byte _ W11byte _ W12byte _ W13byte _ W14byte _ W15byte _ ]; define register offset=0 size=2 [ SHADOW_W0 SHADOW_W1 SHADOW_W2 SHADOW_W3 ]; define ram offset=0x20 size=2 [ SPLIM ]; # Stack Pointer Limit # Note: ACCxU implemented here as a 16 bit, actual register is only 8 bits in PIC3x # Note: This assumes little endian only define ram offset=0x22 size=2 [ ACCAL ACCAH ACCAU ACCBL ACCBH ACCBU ]; # Note: Like above ACCx implemented here as a 48 bit, actual register is only 40 bits in PIC3x define ram offset=0x22 size=6 [ ACCA ACCB ]; define ram offset=0x2E size=3 [ PC ]; @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) define ram offset=0x32 size=2 [ DSRPAG ]; # 9bit Data Space Read Page Address define ram offset=0x34 size=2 [ DSWPAG ]; # 8bit Data Space Write Page Address define ram offset=0x36 size=2 [ RCOUNT ]; # Repeat counter # TODO: Re-implement with shadow stack # define ram offset=0x38 size=2 [ DCOUNT ]; # 13 bits long DO Loop counter define ram offset=0x54 size=1 [ TBLPAG ]; # 7bit Data Table Page Address @else define ram offset=0x32 size=1 [ TBLPAG ]; # 8bit Data Table Page Address define ram offset=0x34 size=1 [ PSVPAG ]; # Program Memory Visibility Page Address Pointer define ram offset=0x36 size=2 [ RCOUNT ]; # Repeat counter define ram offset=0x38 size=2 [ DCOUNT ]; # 13 bits long DO Loop counter define ram offset=0x3A size=3 [ DOSTART ]; define ram offset=0x3E size=3 [ DOEND ]; @endif define ram offset=0x44 size=2 [ CORCON ]; # Core Control Register define ram offset=0x46 size=2 [ MODCON ]; define ram offset=0x48 size=2 [ XMODSRT ]; define ram offset=0x4A size=2 [ XMODEND ]; define ram offset=0x4C size=2 [ YMODSRT ]; define ram offset=0x4E size=2 [ YMODEND ]; define ram offset=0x50 size=2 [ XBREV ]; define ram offset=0x52 size=2 [ DISICNT ]; # Disable Interrupts Counter # Control registers define register offset=1024 size=1 [ SRL SRH ]; # SR component register fields (pseudo) define register offset=1536 size=1 [ SRH_OA SRH_OB SRH_SA SRH_SB SRH_OAB SRH_SAB SRH_DA SRH_DC SRL_IPL2 SRL_IPL1 SRL_IPL0 SRL_RA SRL_N SRL_OV SRL_Z SRL_C DISI SHADOW_SRH_DC SHADOW_SRL_N SHADOW_SRL_OV SHADOW_SRL_Z SHADOW_SRL_C ]; # System register - Program Counter # Note: actual PC is 23-bits wide, with a fixed 0 for the zeroeth bit define register offset=2048 size=3 [ # dsPIC33E uses array of registers to have 4 deep zero overhead do loops @if defined(dsPIC33E) || defined(dsPIC33C) DOSTART # 24 bits long bit23=bit0=0 DO Loop Start Address DOSTART1 DOSTART2 DOSTART3 DOEND # 24 bits long bit23=bit0=0 DO Loop End Address DOEND1 DOEND2 DOEND3 @endif DOSTART_SHADOW DOEND_SHADOW ]; # System register - CPU Core Control Register # ????? connect this to meta-model? define register offset=2560 size=2 [ WDTcount # ????? formal name not documented, true size unknown WDTprescalarA # ????? formal name not documented, true size unknown WDTprescalarB # ????? formal name not documented, true size unknown ]; # CORCON component register fields (pseudo) define register offset=3072 size=1 [ CORCON_VAR CORCON_IPL3 CORCON_PSV CORCON_SFA @if defined(dsPIC33E) || defined(dsPIC33C) CORCON_DL # DO loop nesting level status bits (3 bit long) @endif ]; # NOTE: Technically this section only apply to dsPIC30F, dsPIC33F, dsPIC33E, and dsPIC33C but we will allow all profiles to see it. # This supports the zero overhead do loop specific registers define register offset=4096 size=2 [ # dsPIC33E uses array of registers to have 4 deep zero overhead do loops @if defined(dsPIC33E) || defined(dsPIC33C) DCOUNT # 13 bits long DO Loop counter DCOUNT1 DCOUNT2 DCOUNT3 @else # dsPIC30 and dsPIC33F use shadow register to have allow one level deep zero overhead do loop (doesn't hurt to have in other variants) DCOUNT_SHADOW @endif ]; # This supports the "skip next instruction" conditionals define register offset=4608 size=1 [ SkipNextFlag # pseudo run-time register to flag skipping over this instruction (making it a NOP) ]; # contextreg is our instruction context # here we are only using it for zero overhead do loops # NOTE: At first i used size=2 and it seem to be having problems? define register offset=5120 size=4 [ contextreg ]; define context contextreg blockEnd=(0,0) noflow # Flag to indicate end do zero overhead do loop phase=(2,3) # Flag to indicate that we are in the middle of a instruction and not to change context repeatInstr=(4,4) noflow # Flag to indicate end of repeat instruction loop skipInstr=(5,5) noflow # Flag to indicate that we are in the next instruction after a "skip next instruction" command ; # Tokens ============================================================================ define token instr(32) padding=(24,31) # padding for 4th byte that is not decoded in actual processor OP_31_0 =(0,31) OP_31_4 =(4,31) OP_23_0 =(0,23) OP_23_1 =(1,23) OP_23_4 =(4,23) OP_23_11=(11,23) OP_23_12=(12,23) OP_23_14=(14,23) OP_23_15=(15,23) OP_23_16=(16,23) OP_23_18=(18,23) OP_23_19=(19,23) OP_23_20=(20,23) OP_21_20=(20,21) OP_19_16=(16,19) OP_19_17=(17,19) OP_19_18=(18,19) OP_15_8 =(8,15) OP_15_12=(12,15) OP_15_14=(14,15) OP_14_0 =(0,14) OP_14_4 =(4,14) OP_14_6 =(6,14) OP_14_7 =(7,14) OP_14_11=(11,14) OP_14_12=(12,14) OP_13_4 =(4,13) OP_11_7 =(7,11) OP_11_8 =(8,11) OP_11_10=(10,11) OP_10_8 =(8,10) OP_10_7 =(7,10) OP_10_4 =(4,10) OP_9_4 =(4,9) OP_9_8 =(8,9) OP_7_0 =(0,7) OP_7_4 =(4,7) OP_7_5 =(5,7) OP_7_6 =(6,7) OP_6_0 =(0,6) OP_6_4 =(4,6) OP_6_5 =(5,6) OP_5_4 =(4,5) OP_3_0 =(0,3) OP_1_0 =(0,1) OP_19 =(19,19) OP_15 =(15,15) OP_14 =(14,14) OP_13 =(13,13) OP_12 =(12,12) OP_11 =(11,11) OP_7 =(7,7) OP_6 =(6,6) OP_5 =(5,5) OP_3 =(3,3) OP_0 =(0,0) TOK_n =(16,16) TOK_A =(15,15) TOK_B =(14,14) TOK_Bb =(10,10) TOK_CCCC =(16,19) TOK_D =(13,13) TOK_W =(6,6) TOK_Z =(15,15) TOK_Zb =(11,11) TOK_f12 =(1,12) TOK_f13 =(0,12) TOK_f15 =(1,15) TOK_f15b =(4,18) TOK_k3 =(0,2) TOK_k4 =(0,3) TOK_k5 =(0,4) TOK_k6 =(0,5) signed TOK_k8a =(0,4) TOK_k4b =(4,7) TOK_k8b =(7,9) TOK_k8c =(4,11) TOK_k10 =(4,13) TOK_k14 =(0,13) TOK_k15 =(0,14) TOK_k16 =(4,19) TOK_k16t =(0,15) TOK_r4 =(7,10) signed TOK_bit4word =(0,0) TOK_b3 =(13,15) TOK_b1 =(0,0) TOK_b4 =(12,15) TOK_n6 =(4,9) signed TOK_n7 =(0,6) TOK_n15 =(1,15) TOK_n16 =(0,15) signed TOK_0 =(0,0) TOK_7 =(7,7) TOK_13 =(13,13) TOK_3_0_Wreg =(0,3) # 16-bit full reg TOK_3_0_Breg =(0,3) # 8-bit LSB of reg TOK_3_1_Dreg =(1,3) # 32-bit reg pair, e.g., W1:W0 TOK_3_1_Dregn =(1,3) # 32-bit reg pair name, e.g., W0 TOK_4_0_U =(0,4) TOK_6_4_U =(4,6) TOK_9_0_U =(0,9) TOK_10_7_Wreg =(7,10) # 16-bit full reg TOK_10_7_Breg =(7,10) # 8-bit LSB of reg TOK_10_7_Wregp =(7,10) # 16-bit full reg offset by one TOK_10_8_Dreg =(8,10) # 32-bit reg pair, e.g., W1:W0 TOK_10_8_Dregn =(8,10) # 32-bit reg pair name, e.g., W0 TOK_11_8_Wreg =(8,11) # 16-bit full reg TOK_13_11_U =(11,13) TOK_14_12_Dreg =(12,14) # 32-bit reg pair, e.g., W1:W0 TOK_14_12_Dregn=(12,14) # 32-bit reg pair name, e.g., W0 TOK_14_11_Wreg =(11,14) # 16-bit full reg TOK_14_11_Wregn=(11,14) # 16-bit full reg offset by one TOK_14_11_Breg =(11,14) # 8-bit LSB of reg TOK_18_15_Wreg =(15,18) # 16-bit full reg TOK_18_15_Breg =(15,18) # 8-bit LSB of reg TOK_18_15_S =(15,18) signed TOK_17_16_mm =(16,17) TOK_18_16_mmm =(16,18) TOK_13_12_xx =(12,13) TOK_13_12_kk =(12,13) TOK_11_10_yy =(10,11) TOK_11_10_PP =(10,11) TOK_9_6_iiii =(6,9) TOK_5_2_jjjj =(2,5) TOK_1_0_aa =(0,1) ; # Attach variables ===================================================== # attach normal registers attach variables [ TOK_18_15_Wreg TOK_3_0_Wreg TOK_10_7_Wreg TOK_11_8_Wreg TOK_14_11_Wreg ] [ W0 W1 W2 W3 W4 W5 W6 W7 W8 W9 W10 W11 W12 W13 W14 W15 ]; attach variables [ TOK_14_11_Wregn ] [ _ W0 W1 W2 W3 W4 W5 W6 W7 W8 W9 W10 W11 W12 W13 W14 ]; attach variables [ TOK_10_7_Wregp ] [ W0 W1 W2 W3 W4 W5 W6 W7 W8 W9 W10 W11 W12 W13 W14 W0 ]; # attach lower byte sub-registers attach variables [ TOK_18_15_Breg TOK_3_0_Breg TOK_10_7_Breg TOK_14_11_Breg ] [ W0byte W1byte W2byte W3byte W4byte W5byte W6byte W7byte W8byte W9byte W10byte W11byte W12byte W13byte W14byte W15byte ]; # attach double registers (ref by even reg only) attach variables [ TOK_3_1_Dreg TOK_10_8_Dreg TOK_14_12_Dreg ] [ W1W0 W3W2 W5W4 W7W6 W9W8 W11W10 W13W12 W15W14 ]; attach variables [ TOK_3_1_Dregn TOK_10_8_Dregn TOK_14_12_Dregn ] [ W0 W2 W4 W6 W8 W10 W12 W14 ]; # Sub-constructors ==================================================================== @define WSconstraint "(OP_6=0 | OP_5=0)" @define WDconstraint "(OP_13=0 | OP_12=0)" # Use must be constrained by $(WSconstraint) Ws_t: TOK_3_0_Wreg is TOK_6_4_U=0x0 & TOK_3_0_Wreg { export TOK_3_0_Wreg; } Ws_t: "["TOK_3_0_Wreg"]" is TOK_6_4_U=0x1 & TOK_3_0_Wreg { export *[ram]:2 TOK_3_0_Wreg; } Ws_t: "["TOK_3_0_Wreg"--]" is TOK_6_4_U=0x2 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg - 2; export *[ram]:2 tmp; } Ws_t: "["TOK_3_0_Wreg"++]" is TOK_6_4_U=0x3 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg + 2; export *[ram]:2 tmp; } Ws_t: "[--"TOK_3_0_Wreg"]" is TOK_6_4_U=0x4 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg - 2; export *[ram]:2 TOK_3_0_Wreg; } Ws_t: "[++"TOK_3_0_Wreg"]" is TOK_6_4_U=0x5 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg + 2; export *[ram]:2 TOK_3_0_Wreg; } # Use must be constrained by $(WSconstraint) Wsd_t: TOK_3_0_Wreg is TOK_6_4_U=0x0 & TOK_3_0_Wreg & TOK_3_1_Dreg & OP_0=0 { export TOK_3_1_Dreg; } Wsd_t: "["TOK_3_0_Wreg"]" is TOK_6_4_U=0x1 & TOK_3_0_Wreg { export *[ram]:4 TOK_3_0_Wreg; } Wsd_t: "["TOK_3_0_Wreg"--]" is TOK_6_4_U=0x2 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg - 4; export *[ram]:4 tmp; } Wsd_t: "["TOK_3_0_Wreg"++]" is TOK_6_4_U=0x3 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg + 4; export *[ram]:4 tmp; } Wsd_t: "[--"TOK_3_0_Wreg"]" is TOK_6_4_U=0x4 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg - 4; export *[ram]:4 TOK_3_0_Wreg; } Wsd_t: "[++"TOK_3_0_Wreg"]" is TOK_6_4_U=0x5 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg + 4; export *[ram]:4 TOK_3_0_Wreg; } # Use must be constrained by $(WSconstraint) Wsnd_t: TOK_3_0_Wreg is TOK_13_11_U=0x0 & TOK_3_0_Wreg & TOK_3_1_Dreg & OP_0=0 { export TOK_3_1_Dreg; } Wsnd_t: "["TOK_3_0_Wreg"]" is TOK_13_11_U=0x1 & TOK_3_0_Wreg { export *[ram]:4 TOK_3_0_Wreg; } Wsnd_t: "["TOK_3_0_Wreg"--]" is TOK_13_11_U=0x2 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg - 4; export *[ram]:4 tmp; } Wsnd_t: "["TOK_3_0_Wreg"++]" is TOK_13_11_U=0x3 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg + 4; export *[ram]:4 tmp; } Wsnd_t: "[--"TOK_3_0_Wreg"]" is TOK_13_11_U=0x4 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg - 4; export *[ram]:4 TOK_3_0_Wreg; } Wsnd_t: "[++"TOK_3_0_Wreg"]" is TOK_13_11_U=0x5 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg + 4; export *[ram]:4 TOK_3_0_Wreg; } # Use must be constrained by $(WSconstraint) Wsb_t: TOK_3_0_Wreg is TOK_6_4_U=0x0 & TOK_3_0_Wreg { export TOK_3_0_Wreg; } Wsb_t: "["TOK_3_0_Wreg"]" is TOK_6_4_U=0x1 & TOK_3_0_Wreg { export *[ram]:2 TOK_3_0_Wreg; } Wsb_t: "["TOK_3_0_Wreg"--]" is TOK_6_4_U=0x2 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg - 2; export *[ram]:2 tmp; } Wsb_t: "["TOK_3_0_Wreg"++]" is TOK_6_4_U=0x3 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg + 2; export *[ram]:2 tmp; } Wsb_t: "[--"TOK_3_0_Wreg"]" is TOK_6_4_U=0x4 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg - 2; export *[ram]:2 TOK_3_0_Wreg; } Wsb_t: "[++"TOK_3_0_Wreg"]" is TOK_6_4_U=0x5 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg + 2; export *[ram]:2 TOK_3_0_Wreg; } # Use must be constrained by $(WSconstraint) Wsbyte_t: TOK_3_0_Wreg is TOK_6_4_U=0x0 & TOK_3_0_Wreg & TOK_3_0_Breg { export TOK_3_0_Breg; } Wsbyte_t: "["TOK_3_0_Wreg"]" is TOK_6_4_U=0x1 & TOK_3_0_Wreg { export *[ram]:1 TOK_3_0_Wreg; } Wsbyte_t: "["TOK_3_0_Wreg"--]" is TOK_6_4_U=0x2 & TOK_3_0_Wreg { local tmp = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg - 1; export *[ram]:1 tmp; } Wsbyte_t: "["TOK_3_0_Wreg"++]" is TOK_6_4_U=0x3 & TOK_3_0_Wreg { local tmp = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg + 1; export *[ram]:1 tmp; } Wsbyte_t: "[--"TOK_3_0_Wreg"]" is TOK_6_4_U=0x4 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg - 1; export *[ram]:1 TOK_3_0_Wreg; } Wsbyte_t: "[++"TOK_3_0_Wreg"]" is TOK_6_4_U=0x5 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg + 1; export *[ram]:1 TOK_3_0_Wreg; } # Use must be constrained by $(WSconstraint) Wsbbyte_t: TOK_3_0_Wreg is TOK_6_4_U=0x0 & TOK_3_0_Wreg & TOK_3_0_Breg { export TOK_3_0_Breg; } Wsbbyte_t: "["TOK_3_0_Wreg"]" is TOK_6_4_U=0x1 & TOK_3_0_Wreg { export *[ram]:1 TOK_3_0_Wreg; } Wsbbyte_t: "["TOK_3_0_Wreg"--]" is TOK_6_4_U=0x2 & TOK_3_0_Wreg { local tmp = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg - 1; export *[ram]:1 tmp; } Wsbbyte_t: "["TOK_3_0_Wreg"++]" is TOK_6_4_U=0x3 & TOK_3_0_Wreg { local tmp = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg + 1; export *[ram]:1 tmp; } Wsbbyte_t: "[--"TOK_3_0_Wreg"]" is TOK_6_4_U=0x4 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg - 1; export *[ram]:1 TOK_3_0_Wreg; } Wsbbyte_t: "[++"TOK_3_0_Wreg"]" is TOK_6_4_U=0x5 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg + 1; export *[ram]:1 TOK_3_0_Wreg; } # Use must be constrained by $(WDconstraint) Wd_t: TOK_10_7_Wreg is TOK_13_11_U=0x0 & TOK_10_7_Wreg { export TOK_10_7_Wreg; } Wd_t: "["TOK_10_7_Wreg"]" is TOK_13_11_U=0x1 & TOK_10_7_Wreg { export *[ram]:2 TOK_10_7_Wreg; } Wd_t: "["TOK_10_7_Wreg"--]" is TOK_13_11_U=0x2 & TOK_10_7_Wreg { local tmp = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg - 2; export *[ram]:2 tmp; } Wd_t: "["TOK_10_7_Wreg"++]" is TOK_13_11_U=0x3 & TOK_10_7_Wreg { local tmp = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg + 2; export *[ram]:2 tmp; } Wd_t: "[--"TOK_10_7_Wreg"]" is TOK_13_11_U=0x4 & TOK_10_7_Wreg { TOK_10_7_Wreg = TOK_10_7_Wreg - 2; export *[ram]:2 TOK_10_7_Wreg; } Wd_t: "[++"TOK_10_7_Wreg"]" is TOK_13_11_U=0x5 & TOK_10_7_Wreg { TOK_10_7_Wreg = TOK_10_7_Wreg + 2; export *[ram]:2 TOK_10_7_Wreg; } # Use must be constrained by $(WDconstraint) Wdd_t: TOK_10_7_Wreg is TOK_13_11_U=0x0 & TOK_10_7_Wreg & TOK_10_8_Dreg & OP_7=0 { export TOK_10_8_Dreg; } Wdd_t: "["TOK_10_7_Wreg"]" is TOK_13_11_U=0x1 & TOK_10_7_Wreg { export *[ram]:4 TOK_10_7_Wreg; } Wdd_t: "["TOK_10_7_Wreg"--]" is TOK_13_11_U=0x2 & TOK_10_7_Wreg { local tmp = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg - 4; export *[ram]:4 tmp; } Wdd_t: "["TOK_10_7_Wreg"++]" is TOK_13_11_U=0x3 & TOK_10_7_Wreg { local tmp = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg + 4; export *[ram]:4 tmp; } Wdd_t: "[--"TOK_10_7_Wreg"]" is TOK_13_11_U=0x4 & TOK_10_7_Wreg { TOK_10_7_Wreg = TOK_10_7_Wreg - 4; export *[ram]:4 TOK_10_7_Wreg; } Wdd_t: "[++"TOK_10_7_Wreg"]" is TOK_13_11_U=0x5 & TOK_10_7_Wreg { TOK_10_7_Wreg = TOK_10_7_Wreg + 4; export *[ram]:4 TOK_10_7_Wreg; } # Use must be constrained by $(WDconstraint) Wdbyte_t: TOK_10_7_Wreg is TOK_13_11_U=0x0 & TOK_10_7_Wreg & TOK_10_7_Breg { export TOK_10_7_Breg; } Wdbyte_t: "["TOK_10_7_Wreg"]" is TOK_13_11_U=0x1 & TOK_10_7_Wreg { export *[ram]:1 TOK_10_7_Wreg; } Wdbyte_t: "["TOK_10_7_Wreg"--]" is TOK_13_11_U=0x2 & TOK_10_7_Wreg { local tmp = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg - 1; export *[ram]:1 tmp; } Wdbyte_t: "["TOK_10_7_Wreg"++]" is TOK_13_11_U=0x3 & TOK_10_7_Wreg { local tmp = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg + 1; export *[ram]:1 tmp; } Wdbyte_t: "[--"TOK_10_7_Wreg"]" is TOK_13_11_U=0x4 & TOK_10_7_Wreg { TOK_10_7_Wreg = TOK_10_7_Wreg - 1; export *[ram]:1 TOK_10_7_Wreg; } Wdbyte_t: "[++"TOK_10_7_Wreg"]" is TOK_13_11_U=0x5 & TOK_10_7_Wreg { TOK_10_7_Wreg = TOK_10_7_Wreg + 1; export *[ram]:1 TOK_10_7_Wreg; } # A lot like WdWRD_t movWs: TOK_3_0_Wreg is TOK_6_4_U=0x0 & TOK_3_0_Wreg { export TOK_3_0_Wreg; } movWs: "["TOK_3_0_Wreg"]" is TOK_6_4_U=0x1 & TOK_3_0_Wreg { export *[ram]:2 TOK_3_0_Wreg; } movWs: "["TOK_3_0_Wreg"--]" is TOK_6_4_U=0x2 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg - 2; export *[ram]:2 tmp; } movWs: "["TOK_3_0_Wreg"++]" is TOK_6_4_U=0x3 & TOK_3_0_Wreg { local tmp = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg + 2; export *[ram]:2 tmp; } movWs: "[--"TOK_3_0_Wreg"]" is TOK_6_4_U=0x4 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg - 2; export *[ram]:2 TOK_3_0_Wreg; } movWs: "[++"TOK_3_0_Wreg"]" is TOK_6_4_U=0x5 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg + 2; export *[ram]:2 TOK_3_0_Wreg; } movWs: "["TOK_3_0_Wreg"+"TOK_18_15_Wreg"]" is TOK_6_4_U=0x6 & TOK_18_15_Wreg & TOK_3_0_Wreg { local tmp = (TOK_3_0_Wreg + TOK_18_15_Wreg); export *[ram]:2 tmp; } movWs: "["TOK_3_0_Wreg"+"TOK_18_15_Wreg"]" is TOK_6_4_U=0x7 & TOK_18_15_Wreg & TOK_3_0_Wreg { local tmp = (TOK_3_0_Wreg + TOK_18_15_Wreg); export *[ram]:2 tmp; } movWsbyte: TOK_3_0_Wreg is TOK_6_4_U=0x0 & TOK_3_0_Wreg & TOK_3_0_Breg { export TOK_3_0_Breg; } movWsbyte: "["TOK_3_0_Wreg"]" is TOK_6_4_U=0x1 & TOK_3_0_Wreg { export *[ram]:1 TOK_3_0_Wreg; } movWsbyte: "["TOK_3_0_Wreg"--]" is TOK_6_4_U=0x2 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg - 1; export *[ram]:1 tmp; } movWsbyte: "["TOK_3_0_Wreg"++]" is TOK_6_4_U=0x3 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg + 1; export *[ram]:1 tmp; } movWsbyte: "[--"TOK_3_0_Wreg"]" is TOK_6_4_U=0x4 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg - 1; export *[ram]:1 TOK_3_0_Wreg; } movWsbyte: "[++"TOK_3_0_Wreg"]" is TOK_6_4_U=0x5 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg + 1; export *[ram]:1 TOK_3_0_Wreg; } movWsbyte: "["TOK_3_0_Wreg"+"TOK_18_15_Wreg"]" is TOK_6_4_U=0x6 & TOK_18_15_Wreg & TOK_3_0_Wreg { tmp:2 = (TOK_3_0_Wreg + TOK_18_15_Wreg); export *[ram]:1 tmp; } movWsbyte: "["TOK_3_0_Wreg"+"TOK_18_15_Wreg"]" is TOK_6_4_U=0x7 & TOK_18_15_Wreg & TOK_3_0_Wreg { tmp:2 = (TOK_3_0_Wreg + TOK_18_15_Wreg); export *[ram]:1 tmp; } movWd: TOK_10_7_Wreg is TOK_13_11_U=0x0 & TOK_10_7_Wreg { export TOK_10_7_Wreg; } movWd: "["TOK_10_7_Wreg"]" is TOK_13_11_U=0x1 & TOK_10_7_Wreg { export *[ram]:2 TOK_10_7_Wreg; } movWd: "["TOK_10_7_Wreg"--]" is TOK_13_11_U=0x2 & TOK_10_7_Wreg { tmp:2 = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg - 2; export *[ram]:2 tmp; } movWd: "["TOK_10_7_Wreg"++]" is TOK_13_11_U=0x3 & TOK_10_7_Wreg { tmp:2 = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg + 2; export *[ram]:2 tmp; } movWd: "[--"TOK_10_7_Wreg"]" is TOK_13_11_U=0x4 & TOK_10_7_Wreg { TOK_10_7_Wreg = TOK_10_7_Wreg - 2; export *[ram]:2 TOK_10_7_Wreg; } movWd: "[++"TOK_10_7_Wreg"]" is TOK_13_11_U=0x5 & TOK_10_7_Wreg { TOK_10_7_Wreg = TOK_10_7_Wreg + 2; export *[ram]:2 TOK_10_7_Wreg; } movWd: "["TOK_10_7_Wreg"+"TOK_18_15_Wreg"]" is TOK_13_11_U=0x6 & TOK_18_15_Wreg & TOK_10_7_Wreg { tmp:2 = (TOK_10_7_Wreg + TOK_18_15_Wreg); export *[ram]:2 tmp; } movWd: "["TOK_10_7_Wreg"+"TOK_18_15_Wreg"]" is TOK_13_11_U=0x7 & TOK_18_15_Wreg & TOK_10_7_Wreg { tmp:2 = (TOK_10_7_Wreg + TOK_18_15_Wreg); export *[ram]:2 tmp; } movWdbyte: TOK_10_7_Wreg is TOK_13_11_U=0x0 & TOK_10_7_Wreg & TOK_10_7_Breg { export TOK_10_7_Breg; } movWdbyte: "["TOK_10_7_Wreg"]" is TOK_13_11_U=0x1 & TOK_10_7_Wreg { export *[ram]:1 TOK_10_7_Wreg; } movWdbyte: "["TOK_10_7_Wreg"--]" is TOK_13_11_U=0x2 & TOK_10_7_Wreg { tmp:2 = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg - 1; export *[ram]:1 tmp; } movWdbyte: "["TOK_10_7_Wreg"++]" is TOK_13_11_U=0x3 & TOK_10_7_Wreg { tmp:2 = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg + 1; export *[ram]:1 tmp; } movWdbyte: "[--"TOK_10_7_Wreg"]" is TOK_13_11_U=0x4 & TOK_10_7_Wreg { TOK_10_7_Wreg = TOK_10_7_Wreg - 1; export *[ram]:1 TOK_10_7_Wreg; } movWdbyte: "[++"TOK_10_7_Wreg"]" is TOK_13_11_U=0x5 & TOK_10_7_Wreg { TOK_10_7_Wreg = TOK_10_7_Wreg + 1; export *[ram]:1 TOK_10_7_Wreg; } movWdbyte: "["TOK_10_7_Wreg"+"TOK_18_15_Wreg"]" is TOK_13_11_U=0x6 & TOK_18_15_Wreg & TOK_10_7_Wreg { tmp:2 = (TOK_10_7_Wreg + TOK_18_15_Wreg); export *[ram]:1 tmp; } movWdbyte: "["TOK_10_7_Wreg"+"TOK_18_15_Wreg"]" is TOK_13_11_U=0x7 & TOK_18_15_Wreg & TOK_10_7_Wreg { tmp:2 = (TOK_10_7_Wreg + TOK_18_15_Wreg); export *[ram]:1 tmp; } Wn_t: TOK_3_0_Wreg is TOK_3_0_Wreg { export TOK_3_0_Wreg; } Wnbyte_t: TOK_3_0_Wreg is TOK_3_0_Wreg & TOK_3_0_Breg { export TOK_3_0_Breg; } Wnd_t: TOK_10_7_Wreg is TOK_10_7_Wreg { export TOK_10_7_Wreg; } Wndd_t: TOK_10_8_Dregn is TOK_10_8_Dreg & TOK_10_8_Dregn { export TOK_10_8_Dreg; } Wnda_t: TOK_10_7_Wreg is TOK_10_7_Wreg { export TOK_10_7_Wreg; } Wnbf_t: TOK_11_8_Wreg is TOK_11_8_Wreg { export TOK_11_8_Wreg; } Wdpp_t: "["TOK_10_7_Wreg"++]" is TOK_10_7_Wreg { local tmp = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg + 2; export *[ram]:2 tmp; } Wndabyte_t: TOK_10_7_Wreg is TOK_10_7_Wreg & TOK_10_7_Breg { export TOK_10_7_Breg; } Wndb_t: TOK_3_0_Wreg is TOK_3_0_Wreg { export TOK_3_0_Wreg; } Wndbyte_t: TOK_3_0_Wreg is TOK_3_0_Wreg & TOK_3_0_Breg { export TOK_3_0_Breg; } Wns_t: TOK_3_0_Wreg is TOK_3_0_Wreg { export TOK_3_0_Wreg; } Wnsbyte_t: TOK_3_0_Wreg is TOK_3_0_Wreg & TOK_3_0_Breg { export TOK_3_0_Breg; } Wb_t: TOK_18_15_Wreg is TOK_18_15_Wreg { export TOK_18_15_Wreg; } Wbbyte_t: TOK_18_15_Wreg is TOK_18_15_Wreg & TOK_18_15_Breg { export TOK_18_15_Breg; } Wbb_t: TOK_14_11_Wreg is TOK_14_11_Wreg { export TOK_14_11_Wreg; } Wbds_t: TOK_14_12_Dregn is TOK_14_12_Dreg & TOK_14_12_Dregn { export TOK_14_12_Dreg; } Wbbbyte_t: TOK_14_11_Wreg is TOK_14_11_Wreg & TOK_14_11_Breg { export TOK_14_11_Breg; } Wnb_t: TOK_3_0_Wreg is TOK_3_0_Wreg { export TOK_3_0_Wreg; } Wnbbyte_t: TOK_3_0_Wreg is TOK_3_0_Wreg & TOK_3_0_Breg { export TOK_3_0_Breg; } Wbd_t: TOK_14_11_Wreg is TOK_14_11_Wreg { export TOK_14_11_Wreg; } WREG_t: ",wreg" is TOK_B=0 & TOK_D=0 { export W0; } WREG_t: "" is TOK_B=0 & TOK_D=1 & TOK_f13 { export *[ram]:2 TOK_f13; } WREGbyte_t: ",wreg" is TOK_B=1 & TOK_D=0 { export W0byte; } WREGbyte_t: "" is TOK_B=1 & TOK_D=1 & TOK_f13 { export *[ram]:1 TOK_f13; } WREGb_t: "wreg" is TOK_B=0 & TOK_D=0 { export W0; } WREGbbyte_t: "wreg" is TOK_B=1 & TOK_D=0 { export W0byte; } WREG_W0_t: "wreg" is TOK_B=0 { export W0; } WREG_W0byte_t: "wreg" is TOK_B=1 { export W0byte; } f13b_t: TOK_f13 is TOK_B=0 & TOK_D=1 & TOK_f13 { export *[ram]:2 TOK_f13; } f13bbyte_t: TOK_f13 is TOK_B=1 & TOK_D=1 & TOK_f13 { export *[ram]:1 TOK_f13; } f12_t: val is TOK_f12 [ val = TOK_f12 << 1; ] { export *[ram]:2 val; } f13_t: TOK_f13 is TOK_B=0 & TOK_f13 { export *[ram]:2 TOK_f13; } f13byte_t: TOK_f13 is TOK_B=1 & TOK_f13 { export *[ram]:1 TOK_f13; } f15_t: addr is TOK_f15 [ addr = ( TOK_f15 << 1 ); ] { export *[ram]:2 addr; } f15b_t: addr is TOK_f15b [ addr = ( TOK_f15b << 1 ); ] { export *[ram]:2 addr; } k3_t: "#"TOK_k3 is TOK_k3 { export *[const]:1 TOK_k3; } k4_t: "#"TOK_k4 is TOK_k4 { export *[const]:1 TOK_k4; } k5: "#"TOK_k5 is TOK_k5 { export *[const]:2 TOK_k5; } k5_t: "#"TOK_k5 is TOK_B=0 & TOK_k5 { export *[const]:2 TOK_k5; } k5byte_t: "#"TOK_k5 is TOK_B=1 & TOK_k5 { export *[const]:1 TOK_k5; } k5_B10_t: "#"TOK_k5 is TOK_Bb=0 & TOK_k5 { export *[const]:2 TOK_k5; } k5byte_B10_t: "#"TOK_k5 is TOK_Bb=1 & TOK_k5 { export *[const]:1 TOK_k5; } k10_t: "#"TOK_k10 is TOK_B=0 & TOK_k10 { export *[const]:2 TOK_k10; } k10byte_t: "#"TOK_k10 is TOK_B=1 & TOK_13_12_xx=0 & TOK_k10 { export *[const]:1 TOK_k10; } k13_12_t: "#"TOK_13_12_kk is TOK_13_12_kk { export *[const]:1 TOK_13_12_kk; } k14_t: "#"TOK_k14 is TOK_k14 { export *[const]:2 TOK_k14; } k15_t: "#"TOK_k15 is TOK_k15 { export *[const]:2 TOK_k15; } k16_t: "#"TOK_k16 is TOK_k16 { export *[const]:2 TOK_k16; } bit4_t: "#"bit4 is TOK_b3 & TOK_b1 [ bit4 = (TOK_b1 << 3) | TOK_b3; ] { export *[const]:1 bit4; } bit4byte_t: ".w" is TOK_bit4word=1 { } #display only bit4byte_t: ".b" is TOK_bit4word=0 { } #display only Bbit4_t: "#"TOK_b4 is TOK_b4 { export *[const]:1 TOK_b4; } n15_t: TOK_n15 is TOK_n15 { export *:3 TOK_n15; } n16_t: dest is TOK_n16 [ dest = inst_next + ( TOK_n16 << 1 ); ] { export *:3 dest; } dest24_t: dest is TOK_n15 ; TOK_n7 [ dest = (( TOK_n7 << 16 ) $or ( TOK_n15 << 1 )); ] { export *:3 dest; } WordInstNext: winstNext is epsilon [ winstNext = inst_next + 0; ] { export *[const]:3 winstNext; } WordInstNext4: winstNext is epsilon [ winstNext = inst_next + 0; ] { export *[const]:4 winstNext; } WnDest_t: TOK_3_0_Wreg is TOK_3_0_Wreg { dest:3 = zext(TOK_3_0_Wreg & 0xFFFE); export dest; } WnRDest_t: TOK_3_0_Wreg is TOK_3_0_Wreg { dest:3 = 2 * zext(TOK_3_0_Wreg); export dest; } # *2 WsSlit10_t: "["TOK_3_0_Wreg"+"val"]" is TOK_18_15_S & TOK_13_11_U & TOK_6_4_U & TOK_3_0_Wreg [ val = ((TOK_18_15_S << 6) $or (TOK_13_11_U << 3) $or TOK_6_4_U) << 1; ] { tmp:2 = (TOK_3_0_Wreg + val); export *[ram]:2 tmp; } WsSlit10byte_t: "["TOK_3_0_Wreg"+"val"]" is TOK_18_15_S & TOK_13_11_U & TOK_6_4_U & TOK_3_0_Wreg [ val = (TOK_18_15_S << 6) $or (TOK_13_11_U << 3) $or TOK_6_4_U; ] { tmp:2 = (TOK_3_0_Wreg + val); export *[ram]:1 tmp; } # *2 WdSlit10_t: "["TOK_10_7_Wreg"+"val"]" is TOK_18_15_S & TOK_13_11_U & TOK_10_7_Wreg & TOK_6_4_U [ val = ((TOK_18_15_S << 6) $or (TOK_13_11_U << 3) $or TOK_6_4_U) << 1; ] { tmp:2 = (TOK_10_7_Wreg + val); export *[ram]:2 tmp; } WdSlit10byte_t: "["TOK_10_7_Wreg"+"val"]" is TOK_18_15_S & TOK_13_11_U & TOK_10_7_Wreg & TOK_6_4_U [ val = (TOK_18_15_S << 6) $or (TOK_13_11_U << 3) $or TOK_6_4_U; ] { tmp:2 = (TOK_10_7_Wreg + val); export *[ram]:1 tmp; } @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) n6_t: dest is TOK_n6 [ dest = inst_next + ( 2 * TOK_n6 ); ] { export *:3 dest; } k8_t: "#"k8 is TOK_k8b & TOK_k8a [ k8 = (TOK_k8b << 5) | TOK_k8a; ] { export *[const]:2 k8; } k8byte_t: "#"k8 is TOK_k8b & TOK_k8a [ k8 = (TOK_k8b << 5) | TOK_k8a; ] { export *[const]:1 k8; } WnWn1_t: TOK_3_0_Wreg is TOK_3_0_Wreg & TOK_14_11_Wreg { dest:3 = ( ( zext( TOK_14_11_Wreg & 0x007F ) << 16 ) | zext( TOK_3_0_Wreg & 0xFFFE ) ); export *:3 dest; } @endif @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) Wbsft_t: TOK_3_0_Wreg is TOK_3_0_Wreg { export TOK_3_0_Wreg; } ACCA_t: ACCA is TOK_A=0 & ACCA { export ACCA; } ACCB_t: ACCB is TOK_A=1 & ACCB { export ACCB; } r4_t: is TOK_r4 & OP_10_7=0 { export *[const]:1 TOK_r4; } r4_t: ",#"TOK_r4 is TOK_r4 { export *[const]:1 TOK_r4; } k6_t: "#"TOK_k6 is TOK_k6 { export *[const]:1 TOK_k6; } WsWRO_t: TOK_3_0_Wreg is TOK_6_4_U=0x0 & TOK_3_0_Wreg { export TOK_3_0_Wreg; } WsWRO_t: "["TOK_3_0_Wreg"]" is TOK_6_4_U=0x1 & TOK_3_0_Wreg { export *[ram]:2 TOK_3_0_Wreg; } WsWRO_t: "["TOK_3_0_Wreg"--]" is TOK_6_4_U=0x2 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg - 2; export *[ram]:2 tmp; } WsWRO_t: "["TOK_3_0_Wreg"++]" is TOK_6_4_U=0x3 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg + 2; export *[ram]:2 tmp; } WsWRO_t: "[--"TOK_3_0_Wreg"]" is TOK_6_4_U=0x4 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg - 2; export *[ram]:2 TOK_3_0_Wreg; } WsWRO_t: "[++"TOK_3_0_Wreg"]" is TOK_6_4_U=0x5 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg + 2; export *[ram]:2 TOK_3_0_Wreg; } WsWRO_t: "["TOK_3_0_Wreg"+"TOK_18_15_Wreg"]" is TOK_6_4_U=0x6 & TOK_18_15_Wreg & TOK_3_0_Wreg { tmp:2 = (TOK_3_0_Wreg + TOK_18_15_Wreg); export *[ram]:2 tmp; } WsWRO_t: "["TOK_3_0_Wreg"+"TOK_18_15_Wreg"]" is TOK_6_4_U=0x7 & TOK_18_15_Wreg & TOK_3_0_Wreg { tmp:2 = (TOK_3_0_Wreg + TOK_18_15_Wreg); export *[ram]:2 tmp; } WdWRO_t: TOK_3_0_Wreg is TOK_6_4_U=0x0 & TOK_3_0_Wreg { export TOK_3_0_Wreg; } WdWRO_t: "["TOK_3_0_Wreg"]" is TOK_6_4_U=0x1 & TOK_3_0_Wreg { export *[ram]:2 TOK_3_0_Wreg; } WdWRO_t: "["TOK_3_0_Wreg"--]" is TOK_6_4_U=0x2 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg - 2; export *[ram]:2 tmp; } WdWRO_t: "["TOK_3_0_Wreg"++]" is TOK_6_4_U=0x3 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg + 2; export *[ram]:2 tmp; } WdWRO_t: "[--"TOK_3_0_Wreg"]" is TOK_6_4_U=0x4 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg - 2; export *[ram]:2 TOK_3_0_Wreg; } WdWRO_t: "[++"TOK_3_0_Wreg"]" is TOK_6_4_U=0x5 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg + 2; export *[ram]:2 TOK_3_0_Wreg; } WdWRO_t: "["TOK_3_0_Wreg"+"TOK_18_15_Wreg"]" is TOK_6_4_U=0x6 & TOK_18_15_Wreg & TOK_3_0_Wreg { tmp:2 = (TOK_3_0_Wreg + TOK_18_15_Wreg); export *[ram]:2 tmp; } WdWRO_t: "["TOK_3_0_Wreg"+"TOK_18_15_Wreg"]" is TOK_6_4_U=0x7 & TOK_18_15_Wreg & TOK_3_0_Wreg { tmp:2 = (TOK_3_0_Wreg + TOK_18_15_Wreg); export *[ram]:2 tmp; } Wx_t: ",["W8"]," is TOK_9_6_iiii=0x0 & W8 { export *[ram]:2 W8; } Wx_t: ",["W8"]+=2," is TOK_9_6_iiii=0x1 & W8 { tmp:2 = W8; W8 = W8 + 2; export *[ram]:2 tmp; } Wx_t: ",["W8"]+=4," is TOK_9_6_iiii=0x2 & W8 { tmp:2 = W8; W8 = W8 + 4; export *[ram]:2 tmp; } Wx_t: ",["W8"]+=6," is TOK_9_6_iiii=0x3 & W8 { tmp:2 = W8; W8 = W8 + 6; export *[ram]:2 tmp; } Wx_t: "" is TOK_9_6_iiii=0x4 # No Prefetch for X Data Space { tmp:2 = 0; export tmp; } Wx_t: ",["W8"]-=6," is TOK_9_6_iiii=0x5 & W8 { tmp:2 = W8; W8 = W8 - 6; export *[ram]:2 tmp; } Wx_t: ",["W8"]-=4," is TOK_9_6_iiii=0x6 & W8 { tmp:2 = W8; W8 = W8 - 4; export *[ram]:2 tmp; } Wx_t: ",["W8"]-=2," is TOK_9_6_iiii=0x7 & W8 { tmp:2 = W8; W8 = W8 - 2; export *[ram]:2 tmp; } Wx_t: ",["W9"]," is TOK_9_6_iiii=0x8 & W9 { export *[ram]:2 W9; } Wx_t: ",["W9"]+=2," is TOK_9_6_iiii=0x9 & W9 { tmp:2 = W9; W9 = W9 + 2; export *[ram]:2 tmp; } Wx_t: ",["W9"]+=4," is TOK_9_6_iiii=0xA & W9 { tmp:2 = W9; W9 = W9 + 4; export *[ram]:2 tmp; } Wx_t: ",["W9"]+=6," is TOK_9_6_iiii=0xB & W9 { tmp:2 = W9; W9 = W9 + 6; export *[ram]:2 tmp; } Wx_t: ",["W9"+"W12"]," is TOK_9_6_iiii=0xC & W9 & W12 { tmp:2 = (W9 + W12); export *[ram]:2 tmp; } Wx_t: ",["W9"]-=6," is TOK_9_6_iiii=0xD & W9 { tmp:2 = W9; W9 = W9 - 6; export *[ram]:2 tmp; } Wx_t: ",["W9"]-=4," is TOK_9_6_iiii=0xE & W9 { tmp:2 = W9; W9 = W9 - 4; export *[ram]:2 tmp; } Wx_t: ",["W9"]-=2," is TOK_9_6_iiii=0xF & W9 { tmp:2 = W9; W9 = W9 - 2; export *[ram]:2 tmp; } Wxd_t: ","W4 is TOK_13_12_xx=0x0 & W4 { export W4; } Wxd_t: ","W5 is TOK_13_12_xx=0x1 & W5 { export W5; } Wxd_t: ","W6 is TOK_13_12_xx=0x2 & W6 { export W6; } Wxd_t: ","W7 is TOK_13_12_xx=0x3 & W7 { export W7; } Wy_t: ",["W10"]," is TOK_5_2_jjjj=0x0 & W10 { export *[ram]:2 W10; } Wy_t: ",["W10"]+=2," is TOK_5_2_jjjj=0x1 & W10 { tmp:2 = W10; W10 = W10 + 2; export *[ram]:2 tmp; } Wy_t: ",["W10"]+=4," is TOK_5_2_jjjj=0x2 & W10 { tmp:2 = W10; W10 = W10 + 4; export *[ram]:2 tmp; } Wy_t: ",["W10"]+=6," is TOK_5_2_jjjj=0x3 & W10 { tmp:2 = W10; W10 = W10 + 6; export *[ram]:2 tmp; } Wy_t: "" is TOK_5_2_jjjj=0x4 # No Prefetch for Y Data Space { tmp:2 = 0; export tmp; } Wy_t: ",["W10"]-=6," is TOK_5_2_jjjj=0x5 & W10 { tmp:2 = W10; W10 = W10 - 6; export *[ram]:2 tmp; } Wy_t: ",["W10"]-=4," is TOK_5_2_jjjj=0x6 & W10 { tmp:2 = W10; W10 = W10 - 4; export *[ram]:2 tmp; } Wy_t: ",["W10"]-=2," is TOK_5_2_jjjj=0x7 & W10 { tmp:2 = W10; W10 = W10 - 2; export *[ram]:2 tmp; } Wy_t: ",["W11"]" is TOK_5_2_jjjj=0x8 & W11 { export *[ram]:2 W11; } Wy_t: ",["W11"]+=2," is TOK_5_2_jjjj=0x9 & W11 { tmp:2 = W11; W11 = W11 + 2; export *[ram]:2 tmp; } Wy_t: ",["W11"]+=4," is TOK_5_2_jjjj=0xA & W11 { tmp:2 = W11; W11 = W11 + 4; export *[ram]:2 tmp; } Wy_t: ",["W11"]+=6," is TOK_5_2_jjjj=0xB & W11 { tmp:2 = W11; W11 = W11 + 6; export *[ram]:2 tmp; } Wy_t: ",["W11"+"W12"]," is TOK_5_2_jjjj=0xC & W11 & W12 { tmp:2 = (W11 + W12); export *[ram]:2 tmp; } Wy_t: ",["W11"]-=6," is TOK_5_2_jjjj=0xD & W11 { tmp:2 = W11; W11 = W11 - 6; export *[ram]:2 tmp; } Wy_t: ",["W11"]-=4," is TOK_5_2_jjjj=0xE & W11 { tmp:2 = W11; W11 = W11 - 4; export *[ram]:2 tmp; } Wy_t: ",["W11"]-=2," is TOK_5_2_jjjj=0xF & W11 { tmp:2 = W11; W11 = W11 - 2; export *[ram]:2 tmp; } WmWm_t: W4"*W4" is TOK_17_16_mm=0x0 & W4 { tmp:6 = (sext(W4) * sext(W4)); export tmp; } WmWm_t: W5"*W5" is TOK_17_16_mm=0x1 & W5 { tmp:6 = (sext(W5) * sext(W5)); export tmp; } WmWm_t: W6"*W6" is TOK_17_16_mm=0x2 & W6 { tmp:6 = (sext(W6) * sext(W6)); export tmp; } WmWm_t: W7"*W7" is TOK_17_16_mm=0x3 & W7 { tmp:6 = (sext(W7) * sext(W7)); export tmp; } WmWn_t: W4"*"W5 is TOK_18_16_mmm=0x0 & W4 & W5 { tmp:6 = (sext(W4) * sext(W5)); export tmp; } WmWn_t: W4"*"W6 is TOK_18_16_mmm=0x1 & W4 & W6 { tmp:6 = (sext(W4) * sext(W6)); export tmp; } WmWn_t: W4"*"W7 is TOK_18_16_mmm=0x2 & W4 & W7 { tmp:6 = (sext(W4) * sext(W7)); export tmp; } WmWn_t: "invalid" is TOK_18_16_mmm=0x3 { tmp:6 = 0; export tmp; } WmWn_t: W5"*"W6 is TOK_18_16_mmm=0x4 & W5 & W6 { tmp:6 = (sext(W5) * sext(W6)); export tmp; } WmWn_t: W5"*"W7 is TOK_18_16_mmm=0x5 & W5 & W7 { tmp:6 = (sext(W5) * sext(W7)); export tmp; } WmWn_t: W6"*"W7 is TOK_18_16_mmm=0x6 & W6 & W7 { tmp:6 = (sext(W6) * sext(W7)); export tmp; } WmWn_t: "invalid" is TOK_18_16_mmm=0x7 { tmp:6 = 0; export tmp; } Wyd_t: ","W4 is TOK_11_10_yy=0x0 & W4 { export W4; } Wyd_t: ","W5 is TOK_11_10_yy=0x1 & W5 { export W5; } Wyd_t: ","W6 is TOK_11_10_yy=0x2 & W6 { export W6; } Wyd_t: ","W7 is TOK_11_10_yy=0x3 & W7 { export W7; } AWB_t: ","W13 is TOK_1_0_aa=0x0 & W13 { export W13; } AWB_t: ",["W13"]+=2" is TOK_1_0_aa=0x1 & W13 { tmp:2 = W13; W13 = W13 + 2; export *[ram]:2 tmp; } AWB_t: "" is TOK_1_0_aa=0x2 # No write back { tmp:2 = 0; export tmp; } AWB_t: ",invalid" is TOK_1_0_aa=0x3 # invalid { tmp:2 = 0; export tmp; } @endif # Use must be constrained by $(WSconstraint) WsMUL_t: TOK_3_0_Wreg is TOK_6_4_U=0x0 & TOK_3_0_Wreg { export TOK_3_0_Wreg; } WsMUL_t: "["TOK_3_0_Wreg"]" is TOK_6_4_U=0x1 & TOK_3_0_Wreg { export *[ram]:2 TOK_3_0_Wreg; } WsMUL_t: "["TOK_3_0_Wreg"--]" is TOK_6_4_U=0x2 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg - 2; export *[ram]:2 tmp; } WsMUL_t: "["TOK_3_0_Wreg"++]" is TOK_6_4_U=0x3 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; TOK_3_0_Wreg = TOK_3_0_Wreg + 2; export *[ram]:2 tmp; } WsMUL_t: "[--"TOK_3_0_Wreg"]" is TOK_6_4_U=0x4 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg - 2; export *[ram]:2 TOK_3_0_Wreg; } WsMUL_t: "[++"TOK_3_0_Wreg"]" is TOK_6_4_U=0x5 & TOK_3_0_Wreg { TOK_3_0_Wreg = TOK_3_0_Wreg + 2; export *[ram]:2 TOK_3_0_Wreg; } # Use must be constrained by $(WDconstraint) WdMUL_t: TOK_10_7_Wreg is TOK_13_11_U=0x0 & TOK_10_7_Wreg { export TOK_10_7_Wreg; } WdMUL_t: "["TOK_10_7_Wreg"]" is TOK_13_11_U=0x1 & TOK_10_7_Wreg { export *[ram]:2 TOK_10_7_Wreg; } WdMUL_t: "["TOK_10_7_Wreg"--]" is TOK_13_11_U=0x2 & TOK_10_7_Wreg { tmp:2 = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg - 2; export *[ram]:2 tmp; } WdMUL_t: "["TOK_10_7_Wreg"++]" is TOK_13_11_U=0x3 & TOK_10_7_Wreg { tmp:2 = TOK_10_7_Wreg; TOK_10_7_Wreg = TOK_10_7_Wreg + 2; export *[ram]:2 tmp; } WdMUL_t: "[--"TOK_10_7_Wreg"]" is TOK_13_11_U=0x4 & TOK_10_7_Wreg { TOK_10_7_Wreg = TOK_10_7_Wreg - 2; export *[ram]:2 TOK_10_7_Wreg; } WdMUL_t: "[++"TOK_10_7_Wreg"]" is TOK_13_11_U=0x5 & TOK_10_7_Wreg { TOK_10_7_Wreg = TOK_10_7_Wreg + 2; export *[ram]:2 TOK_10_7_Wreg; } # Use must be constrained by $(WSconstraint) WsROM_t: "["TOK_3_0_Wreg"]" is TOK_6_4_U=0x1 & TOK_3_0_Wreg { addr:3 = (zext(TBLPAG)<<16) | zext(TOK_3_0_Wreg); export addr; } WsROM_t: "["TOK_3_0_Wreg"--]" is TOK_6_4_U=0x2 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; addr:3 = (zext(TBLPAG)<<16) | zext(tmp); TOK_3_0_Wreg = tmp - 2; export addr; } WsROM_t: "["TOK_3_0_Wreg"++]" is TOK_6_4_U=0x3 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg; addr:3 = (zext(TBLPAG)<<16) | zext(tmp); TOK_3_0_Wreg = tmp + 2; export addr; } WsROM_t: "[--"TOK_3_0_Wreg"]" is TOK_6_4_U=0x4 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg - 2; addr:3 = (zext(TBLPAG)<<16) | zext(tmp); TOK_3_0_Wreg = tmp; export addr; } WsROM_t: "[++"TOK_3_0_Wreg"]" is TOK_6_4_U=0x5 & TOK_3_0_Wreg { tmp:2 = TOK_3_0_Wreg + 2; addr:3 = (zext(TBLPAG)<<16) | zext(tmp); TOK_3_0_Wreg = tmp; export addr; } # Use must be constrained by $(WDconstraint) WdROM_t: "["TOK_10_7_Wreg"]" is TOK_13_11_U=0x1 & TOK_10_7_Wreg { addr:3 = (zext(TBLPAG)<<16) | zext(TOK_10_7_Wreg); export addr; } WdROM_t: "["TOK_10_7_Wreg"--]" is TOK_13_11_U=0x2 & TOK_10_7_Wreg { tmp:2 = TOK_10_7_Wreg; addr:3 = (zext(TBLPAG)<<16) | zext(tmp); TOK_10_7_Wreg = tmp - 2; export addr; } WdROM_t: "["TOK_10_7_Wreg"++]" is TOK_13_11_U=0x3 & TOK_10_7_Wreg { tmp:2 = TOK_10_7_Wreg; addr:3 = (zext(TBLPAG)<<16) | zext(tmp); TOK_10_7_Wreg = tmp + 2; export addr; } WdROM_t: "[--"TOK_10_7_Wreg"]" is TOK_13_11_U=0x4 & TOK_10_7_Wreg { tmp:2 = TOK_10_7_Wreg - 2; addr:3 = (zext(TBLPAG)<<16) | zext(tmp); TOK_10_7_Wreg = tmp; export addr; } WdROM_t: "[++"TOK_10_7_Wreg"]" is TOK_13_11_U=0x5 & TOK_10_7_Wreg { tmp:2 = TOK_10_7_Wreg + 2; addr:3 = (zext(TBLPAG)<<16) | zext(tmp); TOK_10_7_Wreg = tmp; export addr; } # Macros ========================================================================== ## 16-bit working register math flag support # for decimal carry of a byte macro testSRH_DCbyte(a) { SRH_DC = ( 0x10 & a ) != 0; } # for decimal carry of a word macro testSRH_DCword(a) { SRH_DC = ( 0x100 & a ) != 0; } # for negative of a byte or word macro testSRL_N(a) { SRL_N = (a s< 0); } # for (signed) addition of bytes or words: a + b macro testSRL_OVadd(a,b) { SRL_OV = scarry(a,b); } # for (signed) addition of words with carry: a + b + c macro testSRL_OVaddc(a,b,c) { tmp:4 = sext(a) + sext(b) + zext(c); SRL_OV = (tmp s< -0x00008000) || (tmp s> 0x00007FFF); } # for (signed) addition of bytes with carry: a + b + c macro testSRL_OVaddcByte(a,b,c) { tmp:2 = sext(a) + sext(b) + zext(c); SRL_OV = (tmp s< -0x0080) || (tmp s> 0x007F); } # for (signed) subtraction of bytes or words: a - b macro testSRL_OVsub(a,b) { SRL_OV = sborrow(a,b); } # for (signed) subtraction of words with carry: a - b - c macro testSRL_OVsubc(a,b,c) { tmp:4 = sext(a) - sext(b) - zext(c); SRL_OV = (tmp s< -0x00008000) || (tmp s> 0x00007FFF); } # for (signed) subtraction of bytes with carry: a - b - c macro testSRL_OVsubcByte(a,b,c) { tmp:2 = sext(a) - sext(b) - zext(c); SRL_OV = (tmp s< -0x0080) || (tmp s> 0x007F); } # for zero of a byte or word macro testSRL_Z(a) { SRL_Z = (a == 0); } # for zero sticky of a byte or word macro testSRL_Zsticky(a) { SRL_Z = SRL_Z && (a == 0); } macro addflags(a,b) { SRL_C = carry(a,b); SRL_OV = scarry(a,b); } macro addflagsWithCarry(a,b,c) { local ab = a + b; SRL_C = carry(a,b) || carry(ab,c); SRL_OV = scarry(a,b) || scarry(ab,c); } macro subflags(a,b) { SRL_C = a >= b; # inverted for borrow SRL_OV = sborrow(a,b); } macro subflagsWithCarry(a,b,c) { local bc = b + c; SRL_C = (a >= b) && (a >= bc); # inverted for borrow SRL_OV = sborrow(a,b) || sborrow(a,bc); } ## 40-bit ACCA and ACCB register math flag support # for addition (signed) macro testSRH_OA() { SRH_OA = ( ACCA & 0xFFFF00000000 ) != 0; SRH_OAB = SRH_OA || SRH_OB; } # for addition (signed) macro testSRH_OB() { SRH_OB = ( ACCB & 0xFFFF00000000 ) != 0; SRH_OAB = SRH_OA || SRH_OB; } # for addition (signed) # Note: sticky bits macro testSRH_SA() { SRH_SA = SRH_SA | ( ( ACCA & 0x000100000000 ) != 0 ); SRH_SAB = SRH_SAB | SRH_SA; } # for addition (signed) # Note: sticky bits macro testSRH_SB() { SRH_SB = SRH_SB | ( ( ACCB & 0x000100000000 ) != 0 ); SRH_SAB = SRH_SAB | SRH_SB; } # 1000 0000 @define SRH_OAbit "0x80" # 0100 0000 @define SRH_OBbit "0x40" # 0010 0000 @define SRH_SAbit "0x20" # 0001 0000 @define SRH_SBbit "0x10" # 0000 1000 @define SRH_OABbit "0x08" # 0000 0100 @define SRH_SABbit "0x04" # 0000 0010 @define SRH_DAbit "0x02" # 0000 0001 @define SRH_DCbit "0x01" # SR component register fields (pseudo) macro unpackSRH( unpackByte ) { SRH_OA = ( $(SRH_OAbit) & unpackByte ) != 0; SRH_OB = ( $(SRH_OBbit) & unpackByte ) != 0; SRH_SA = ( $(SRH_SAbit) & unpackByte ) != 0; SRH_SB = ( $(SRH_SBbit) & unpackByte ) != 0; SRH_OAB = ( $(SRH_OABbit) & unpackByte ) != 0; SRH_SAB = ( $(SRH_SABbit) & unpackByte ) != 0; SRH_DA = ( $(SRH_DAbit) & unpackByte ) != 0; SRH_DC = ( $(SRH_DCbit) & unpackByte ) != 0; } macro packSRH( packByte ) { packByte = (SRH_OA * $(SRH_OAbit)) | (SRH_OB * $(SRH_OBbit)) | (SRH_SA * $(SRH_SAbit)) | (SRH_SB * $(SRH_SBbit)) | (SRH_OAB * $(SRH_OABbit)) | (SRH_SAB * $(SRH_SABbit)) | (SRH_DA * $(SRH_DAbit)) | (SRH_DC * $(SRH_DCbit)) ; } # 1000 0000 @define SRL_IPL2bit "0x80" # 0100 0000 @define SRL_IPL1bit "0x40" # 0010 0000 @define SRL_IPL0bit "0x20" # 0001 0000 @define SRL_RAbit "0x10" # 0000 1000 @define SRL_Nbit "0x08" # 0000 0100 @define SRL_OVbit "0x04" # 0000 0010 @define SRL_Zbit "0x02" # 0000 0001 @define SRL_Cbit "0x01" macro unpackSRL( unpackByte ) { SRL_IPL2 = ( $(SRL_IPL2bit) & unpackByte ) != 0; SRL_IPL1 = ( $(SRL_IPL1bit) & unpackByte ) != 0; SRL_IPL0 = ( $(SRL_IPL0bit) & unpackByte ) != 0; SRL_RA = ( $(SRL_RAbit) & unpackByte ) != 0; SRL_N = ( $(SRL_Nbit) & unpackByte ) != 0; SRL_OV = ( $(SRL_OVbit) & unpackByte ) != 0; SRL_Z = ( $(SRL_Zbit) & unpackByte ) != 0; SRL_C = ( $(SRL_Cbit) & unpackByte ) != 0; } macro packSRL( packByte ) { packByte = (SRL_IPL2 * $(SRL_IPL2bit)) | (SRL_IPL1 * $(SRL_IPL1bit)) | (SRL_IPL0 * $(SRL_IPL0bit)) | (SRL_RA * $(SRL_RAbit)) | (SRL_N * $(SRL_Nbit)) | (SRL_OV * $(SRL_OVbit)) | (SRL_Z * $(SRL_Zbit)) | (SRL_C * $(SRL_Cbit)) ; } # Constructors ==================================================================== @if defined(dsPIC30) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) # The "blockEnd" is the flag to indicate where the goto to cause the loop must be inserted. # Previously an :do instruction figured out the address it needed an goto to be inserted and do something like # do: .... [ blockEnd=1; globalset(addressForGoto,blockEnd); ] which will cause the context bit blockEnd to be # set on the end of loop address. # PHASE-0 Handle end-of-loop getDOSTART: is epsilon { export *:3 DOSTART; } :^instruction is phase=0 & blockEnd=1 & instruction & getDOSTART [ phase = 1; ] { @if defined(dsPIC30F) || defined(dsPIC33F) DCOUNT = DCOUNT - 1; if (DCOUNT != 0) goto getDOSTART; DCOUNT = DCOUNT_SHADOW; DOEND = DOEND_SHADOW; DOSTART = DOSTART_SHADOW; @endif @if defined(dsPIC33E) || defined(dsPIC33C) DL:2 = zext(CORCON_DL); *[register]:2 (&:2 DCOUNT + DL*2) = (*[register]:2 (&:2 DCOUNT + DL*2)) - 1; count:2 = *[register]:2 (&:2 DCOUNT + DL*2); if (count != 0) goto getDOSTART; # stack 4 levels deep but we don't enforce this CORCON_DL = CORCON_DL - 1; @endif } @endif :^instruction is phase=0 & instruction [ phase = 1; ] { build instruction; } # PHASE-1 Handle repeat / skip :^instruction is phase=1 & repeatInstr=1 & instruction [ phase = 2; ] { if (RCOUNT == 0) goto ; RCOUNT = RCOUNT - 1; build instruction; goto inst_start; } # TODO: Why is context/^instruction used instead of simply having skip instructions branch around next instruction? # skipNext global support: :^instruction is phase=1 & skipInstr=1 & instruction [ phase = 2; ] { if ( SkipNextFlag == 1 ) goto ; build instruction; } :^instruction is phase=1 & instruction [ phase = 2; ] { build instruction; } with : phase = 2 { :add.w f13_t^WREG_t is OP_23_20=0xB & OP_19_16=0x4 & OP_15=0 & WREG_t & f13_t { local src = f13_t; addflags( src, W0 ); local result = src + W0; WREG_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCword( result ); } :add.b f13byte_t^WREGbyte_t is OP_23_20=0xB & OP_19_16=0x4 & OP_15=0 & WREGbyte_t & f13byte_t { local src = f13byte_t; addflags( src, W0byte ); local result = src + W0byte; WREGbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCbyte( result ); } :add.w k10_t,Wn_t is OP_23_20=0xB & OP_19_16=0x0 & OP_15=0 & k10_t & Wn_t { local src = k10_t; addflags( src, Wn_t ); local result = src + Wn_t; Wn_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCword( result ); } :add.b k10byte_t,Wnbyte_t is OP_23_20=0xB & OP_19_16=0x0 & OP_15=0 & k10byte_t & Wnbyte_t { local src = k10byte_t; addflags( src, Wnbyte_t ); local result = src + Wnbyte_t; Wnbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCbyte( result ); } :add.w Wb_t,k5_t,Wd_t is OP_23_20=0x4 & OP_19=0x0 & OP_6_5=0x3 & Wb_t & $(WDconstraint) & Wd_t & k5_t { addflags( k5_t, Wb_t ); local result = k5_t + Wb_t; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCword( result ); } :add.b Wbbyte_t,k5byte_t,Wdbyte_t is OP_23_20=0x4 & OP_19=0x0 & OP_6_5=0x3 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & k5byte_t { addflags( k5byte_t, Wbbyte_t ); local result = k5byte_t + Wbbyte_t; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCbyte( result ); } :add.w Wb_t,Ws_t,Wd_t is OP_23_20=0x4 & OP_19=0x0 & TOK_B=0 & Wb_t & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; addflags( Wb_t, src ); local result = Wb_t + src; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCword( result ); } :add.b Wbbyte_t,Wsbyte_t,Wdbyte_t is OP_23_20=0x4 & OP_19=0x0 & TOK_B=1 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; addflags( Wbbyte_t, src ); local result = Wbbyte_t + src; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCbyte( result ); } @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :add ACCA_t is OP_23_20=0xC & OP_19_16=0xB & ACCA_t & OP_14_12=0x0 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { ACCA = ACCA + ACCB; testSRH_OA(); testSRH_SA(); } :add ACCB_t is OP_23_20=0xC & OP_19_16=0xB & ACCB_t & OP_14_12=0x0 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { ACCB = ACCA + ACCB; testSRH_OB(); testSRH_SB(); } @endif @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :add WsWRO_t^r4_t,ACCA_t is OP_23_20=0xC & OP_19_16=0x9 & ACCA_t & r4_t & WsWRO_t { ACCA = (sext(WsWRO_t) << (16 - r4_t)) + ACCA; testSRH_OA(); testSRH_SA(); } :add WsWRO_t^r4_t,ACCB_t is OP_23_20=0xC & OP_19_16=0x9 & ACCB_t & r4_t & WsWRO_t { ACCB = (sext(WsWRO_t) << (16 - r4_t)) + ACCB; testSRH_OB(); testSRH_SB(); } @endif :addc.w f13_t^WREG_t is OP_23_20=0xB & OP_19_16=0x4 & OP_15=1 & WREG_t & f13_t { local c:2 = zext(SRL_C); local src = f13_t; addflagsWithCarry( src, W0, c ); local result = src + W0 + c; WREG_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCword ( result ); } :addc.b f13byte_t^WREGbyte_t is OP_23_20=0xB & OP_19_16=0x4 & OP_15=1 & WREGbyte_t & f13byte_t { local c = SRL_C; local src = f13byte_t; addflagsWithCarry( src, W0byte, c ); local result = src + W0byte + c; WREGbyte_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCbyte ( result ); } :addc.w k10_t,Wn_t is OP_23_20=0xB & OP_19_16=0x0 & OP_15=1 & k10_t & Wn_t { local c:2 = zext(SRL_C); addflagsWithCarry( k10_t, Wn_t, c ); Wn_t = k10_t + Wn_t + c; testSRL_N ( Wn_t ); testSRL_Zsticky( Wn_t ); testSRH_DCword ( Wn_t ); } :addc.b k10byte_t,Wnbyte_t is OP_23_20=0xB & OP_19_16=0x0 & OP_15=1 & k10byte_t & Wnbyte_t { local c = SRL_C; addflagsWithCarry( k10byte_t, Wnbyte_t, c ); Wnbyte_t = k10byte_t + Wnbyte_t + c; testSRL_N ( Wnbyte_t ); testSRL_Zsticky( Wnbyte_t ); testSRH_DCbyte ( Wnbyte_t ); } :addc.w Wb_t,k5_t,Wd_t is OP_23_20=0x4 & OP_19=0x1 & OP_6_5=0x3 & Wb_t & $(WDconstraint) & Wd_t & k5_t { local c:2 = zext(SRL_C); addflagsWithCarry( k5_t, Wb_t, c ); local result = k5_t + Wb_t + c; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCword ( result ); } :addc.b Wbbyte_t,k5byte_t,Wdbyte_t is OP_23_20=0x4 & OP_19=0x1 & OP_6_5=0x3 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & k5byte_t { local c = SRL_C; addflagsWithCarry( k5byte_t, Wbbyte_t, c ); local result = k5byte_t + Wbbyte_t + c; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCbyte ( result ); } :addc.w Wb_t,Ws_t,Wd_t is OP_23_20=0x4 & OP_19=0x1 & TOK_B=0 & Wb_t & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local c:2 = zext(SRL_C); local src = Ws_t; addflagsWithCarry( Wb_t, src, c ); local result = Wb_t + src + c; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCword ( result ); } :addc.b Wbbyte_t,Wsbyte_t,Wdbyte_t is OP_23_20=0x4 & OP_19=0x1 & TOK_B=1 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local c = SRL_C; local src = Wsbyte_t; addflagsWithCarry( Wbbyte_t, src, c ); local result = Wbbyte_t + src + c; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCbyte ( result ); } :and.w f13_t^WREG_t is OP_23_20=0xB & OP_19_16=0x6 & OP_15=0 & WREG_t & f13_t { WREG_t = f13_t & W0; testSRL_N ( WREG_t ); testSRL_Z ( WREG_t ); } :and.b f13byte_t^WREGbyte_t is OP_23_20=0xB & OP_19_16=0x6 & OP_15=0 & WREGbyte_t & f13byte_t { WREGbyte_t = f13byte_t & W0byte; testSRL_N ( WREGbyte_t ); testSRL_Z ( WREGbyte_t ); } :and.w k10_t,Wn_t is OP_23_20=0xB & OP_19_16=0x2 & OP_15=0 & k10_t & Wn_t { Wn_t = k10_t & Wn_t; testSRL_N ( Wn_t ); testSRL_Z ( Wn_t ); } :and.b k10byte_t,Wnbyte_t is OP_23_20=0xB & OP_19_16=0x2 & OP_15=0 & k10byte_t & Wnbyte_t { Wnbyte_t = k10byte_t & Wnbyte_t; testSRL_N ( Wnbyte_t ); testSRL_Z ( Wnbyte_t ); } :and.w Wb_t,k5_t,Wd_t is OP_23_20=0x6 & OP_19=0x0 & OP_6_5=0x3 & Wb_t & $(WDconstraint) & Wd_t & k5_t { local result = k5_t & Wb_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); } :and.b Wbbyte_t,k5byte_t,Wdbyte_t is OP_23_20=0x6 & OP_19=0x0 & OP_6_5=0x3 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & k5byte_t { local result = k5byte_t & Wbbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); } :and.w Wb_t,Ws_t,Wd_t is OP_23_20=0x6 & OP_19=0x0 & TOK_B=0 & Wb_t & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local result = Wb_t & Ws_t; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); } :and.b Wbbyte_t,Wsbyte_t,Wdbyte_t is OP_23_20=0x6 & OP_19=0x0 & TOK_B=1 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local result = Wbbyte_t & Wsbyte_t; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); } :asr.w f13_t^WREG_t is OP_23_20=0xD & OP_19_16=0x5 & OP_15=1 & WREG_t & f13_t { local src = f13_t; SRL_C = ( src & 1 ) != 0; WREG_t = src s>> 1; testSRL_N ( WREG_t ); testSRL_Z ( WREG_t ); } :asr.b f13byte_t^WREGbyte_t is OP_23_20=0xD & OP_19_16=0x5 & OP_15=1 & WREGbyte_t & f13byte_t { local src = f13byte_t; SRL_C = ( src & 1 ) != 0; WREGbyte_t = src s>> 1; testSRL_N ( WREGbyte_t ); testSRL_Z ( WREGbyte_t ); } :asr.w Ws_t,Wd_t is OP_23_20=0xD & OP_19_16=0x1 & OP_15=0x1 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; SRL_C = ( src & 1 ) != 0; local result = src s>> 1; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); } :asr.b Wsbyte_t,Wdbyte_t is OP_23_20=0xD & OP_19_16=0x1 & OP_15=0x1 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; SRL_C = ( src & 1 ) != 0; local result = src s>> 1; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); } :asr.w Wbd_t,k4_t,Wnd_t is OP_23_20=0xD & OP_19_16=0xE & OP_15=0x1 & OP_6_4=0x4 & Wbd_t & Wnd_t & k4_t { Wnd_t = Wbd_t s>> k4_t; testSRL_N ( Wnd_t ); testSRL_Z ( Wnd_t ); } :asr.w Wbd_t,Wns_t,Wnd_t is OP_23_20=0xD & OP_19_16=0xE & OP_15=0x1 & OP_6_4=0x0 & Wbd_t & Wnd_t & Wns_t { Wnd_t = Wbd_t s>> ( Wns_t & 0x001F ); testSRL_N ( Wnd_t ); testSRL_Z ( Wnd_t ); } # The pdf manual is very confusing for this instruction. The final conclusion is that the .B # is really a pseudo instruction and that everything is actually encoded as a word with the # 'f' bits being left shifted by 1 :bclr.w f12_t,bit4_t is OP_23_20=0xA & OP_19_16=0x9 & bit4_t & f12_t { local mask:2 = ~(1 << bit4_t); f12_t = f12_t & mask; } # DSRPAG ????? :bclr.w Wsb_t,Bbit4_t is OP_23_20=0xA & OP_19_16=0x1 & Bbit4_t & OP_11=0x0 & TOK_Bb=0 & OP_9_8=0x0 & OP_7=0x0 & $(WSconstraint) & Wsb_t { local mask:2 = ~(1 << Bbit4_t); Wsb_t = Wsb_t & mask; } :bclr.b Wsbbyte_t,Bbit4_t is OP_23_20=0xA & OP_19_16=0x1 & Bbit4_t & OP_11=0x0 & TOK_Bb=1 & OP_9_8=0x0 & OP_7=0x0 & $(WSconstraint) & Wsbbyte_t { local mask:1 = ~(1 << Bbit4_t); Wsbbyte_t = Wsbbyte_t & mask; } @if defined(dsPIC33C) :bfext TOK_k4 "#"wid5, Wsb_t, Wnbf_t is OP_23_16=0x0A & OP_15_12=0x8 & Wnbf_t & TOK_k4b & TOK_k4; OP_23_16=0x0 & OP_15_8=0x0 & OP_7=0x0 & Wsb_t [wid5 = TOK_k4b - TOK_k4 + 1;] { local mask:2 = (0xff >> (16-(wid5 + 1))) << TOK_k4; local result:2 = (Wsb_t & mask) >> TOK_k4; Wnbf_t = result; } :bfext TOK_k4 "#"wid5, n15_t, Wnbf_t is OP_23_16=0x0A & OP_15_12=0xA & Wnbf_t & TOK_k4b & TOK_k4; OP_23_16=0x0 & OP_0=0x0 & n15_t [wid5 = TOK_k4b - TOK_k4 + 1;] { local mask:2 = (0xff >> (16-(wid5 + 1))) << TOK_k4; local word = *:2 n15_t; local result:2 = (word & mask) >> TOK_k4; Wnbf_t = result; } :bfins TOK_k4 "#"wid5, Wnbf_t, Wsb_t is OP_23_16=0x0A & OP_15_12=0x0 & Wnbf_t & TOK_k4b & TOK_k4; OP_23_16=0x0 & OP_15_8=0x0 & OP_7=0x0 & Wsb_t [wid5 = TOK_k4b - TOK_k4 + 1;] { local mask:2 = (0xff >> (16-(wid5 + 1))) << TOK_k4; local result:2 = (Wnbf_t & mask) >> TOK_k4; Wsb_t = result; } :bfins TOK_k4 "#"wid5, Wnbf_t, n15_t is OP_23_16=0x0A & OP_15_12=0x2 & Wnbf_t & TOK_k4b & TOK_k4; OP_23_16=0x0 & OP_0=0x0 & n15_t [wid5 = TOK_k4b - TOK_k4 + 1;] { local mask:2 = (0xff >> (16-(wid5 + 1))) << TOK_k4; local result:2 = (Wnbf_t & mask) >> TOK_k4; *:2 n15_t = result; } @endif @if defined(dsPIC24F) || defined(dsPIC33E) || defined(dsPIC33C) define pcodeop bootswap; :bootswp is OP_23_0=0xFE2000 { bootswap(); } @endif :bra n16_t is OP_23_20=0x3 & OP_19_16=0x7 & n16_t { goto n16_t; } @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :bra Wns_t is OP_23_20=0x0 & OP_19_16=0x1 & OP_15_12=0x6 & OP_11_8=0x0 & OP_7_4=0x0 & Wns_t & WordInstNext { #Note: identical operation as below, unique targets dest:3 = WordInstNext + 2 * sext(Wns_t); goto [dest]; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :bra Wns_t is OP_23_20=0x0 & OP_19_16=0x1 & OP_15_12=0x0 & OP_11_8=0x6 & OP_7_4=0x0 & Wns_t { #Note: identical operation as above, unique targets # inst_next is byte oriented here, and word oriented inside of [ ]; dest:3 = (inst_next/2) + 2 * sext(Wns_t); goto [dest]; } @endif cond1: "c" is TOK_CCCC=0x1 { tmpBool:1 = SRL_C; export tmpBool; } cond1: "ge" is TOK_CCCC=0xD { tmpBool:1 = ((SRL_N && SRL_OV) || (!SRL_N && !SRL_OV)); export tmpBool; } #Note: same as branch C, not supported in disassembly # cond1: "geu" is TOK_CCCC=0x1 { tmpBool:1 = SRL_C ); export tmpBool; } cond1: "gt" is TOK_CCCC=0xC { tmpBool:1 = ((!SRL_Z && SRL_N && SRL_OV) || (!SRL_Z && !SRL_N && !SRL_OV)); export tmpBool; } cond1: "gtu" is TOK_CCCC=0xE { tmpBool:1 = SRL_C && !SRL_Z; export tmpBool; } cond1: "le" is TOK_CCCC=0x4 { tmpBool:1 = (SRL_Z || (SRL_N != SRL_OV)); export tmpBool; } cond1: "leu" is TOK_CCCC=0x6 { tmpBool:1 = (!SRL_C || SRL_Z); export tmpBool; } cond1: "lt" is TOK_CCCC=0x5 { tmpBool:1 = ((SRL_N && !SRL_OV) || (!SRL_N && SRL_OV)); export tmpBool; } cond1: "n" is TOK_CCCC=0x3 { tmpBool:1 = (SRL_N); export tmpBool; } cond1: "nc" is TOK_CCCC=0x9 { tmpBool:1 = (!SRL_C); export tmpBool; } cond1: "nn" is TOK_CCCC=0xB { tmpBool:1 = (!SRL_N); export tmpBool; } cond1: "nov" is TOK_CCCC=0x8 { tmpBool:1 = (!SRL_OV); export tmpBool; } cond1: "nz" is TOK_CCCC=0xA { tmpBool:1 = (!SRL_Z); export tmpBool; } cond2: "oa" is TOK_CCCC=0xC { tmpBool:1 = (SRH_OA); export tmpBool; } cond2: "ob" is TOK_CCCC=0xD { tmpBool:1 = (SRH_OB); export tmpBool; } cond1: "ov" is TOK_CCCC=0x0 { tmpBool:1 = (SRL_OV); export tmpBool; } cond2: "sa" is TOK_CCCC=0xE { tmpBool:1 = (SRH_SA); export tmpBool; } cond2: "sb" is TOK_CCCC=0xF { tmpBool:1 = (SRH_SB); export tmpBool; } cond1: "z" is TOK_CCCC=0x2 { tmpBool:1 = (SRL_Z); export tmpBool; } ## ## ## ## conditional branch for the 16-bit status branches above ## ## ## :bra cond1,n16_t is OP_23_20=0x3 & cond1 & n16_t { if ( cond1 ) goto n16_t; } ## ## ## ## conditional branch for the 40-bit status branches above ## ## ## :bra cond2,n16_t is OP_23_20=0x0 & cond2 & n16_t { if ( cond2 ) goto n16_t; } :bset.w f12_t,bit4_t is OP_23_20=0xA & OP_19_16=0x8 & bit4byte_t & bit4_t & f12_t { local mask:2 = 1 << bit4_t; f12_t = f12_t | mask; } :bset.w Wsb_t,Bbit4_t is OP_23_20=0xA & OP_19_16=0x0 & Bbit4_t & OP_11=0x0 & TOK_Bb=0 & OP_9_8=0x0 & OP_7=0x0 & $(WSconstraint) & Wsb_t { local mask:2 = 1 << Bbit4_t; Wsb_t = Wsb_t | mask; } :bset.b Wsbbyte_t,Bbit4_t is OP_23_20=0xA & OP_19_16=0x0 & Bbit4_t & OP_11=0x0 & TOK_Bb=1 & OP_9_8=0x0 & OP_7=0x0 & $(WSconstraint) & Wsbbyte_t { local mask:1 = 1 << Bbit4_t; Wsbbyte_t = Wsbbyte_t | mask; } :bsw.c Ws_t,Wbd_t is OP_23_20=0xA & OP_19_16=0xD & TOK_Z=0 & Wbd_t & OP_10_8=0x0 & OP_7=0x0 & $(WSconstraint) & Ws_t { # clear the bit and or flag in; write the bit local bit = Wbd_t & 0xF; Ws_t = ( Ws_t & ~(1 << bit) ) | (zext(SRL_C) << bit); } :bsw.z Ws_t,Wbd_t is OP_23_20=0xA & OP_19_16=0xD & TOK_Z=1 & Wbd_t & OP_10_8=0x0 & OP_7=0x0 & $(WSconstraint) & Ws_t { # clear the bit and or flag in; write the bit local bit = Wbd_t & 0xF; Ws_t = ( Ws_t & ~(1 << bit) ) | (zext(SRL_Z) << bit); } :btg^bit4byte_t f12_t,bit4_t is OP_23_20=0xA & OP_19_16=0xA & bit4byte_t & bit4_t & f12_t { local mask:2 = 1 << bit4_t; f12_t = f12_t ^ mask; } # DSRPAG ????? :btg.w Wsb_t,Bbit4_t is OP_23_20=0xA & OP_19_16=0x2 & Bbit4_t & OP_11=0x0 & TOK_Bb=0 & OP_9_8=0x0 & OP_7=0x0 & $(WSconstraint) & Wsb_t { local mask:2 = 1 << Bbit4_t; Wsb_t = Wsb_t ^ mask; } :btg.b Wsbbyte_t,Bbit4_t is OP_23_20=0xA & OP_19_16=0x2 & Bbit4_t & OP_11=0x0 & TOK_Bb=1 & OP_9_8=0x0 & OP_7=0x0 & $(WSconstraint) & Wsbbyte_t { local mask:1 = 1 << Bbit4_t; Wsbbyte_t = Wsbbyte_t ^ mask; } :btsc^bit4byte_t f12_t,bit4_t is OP_23_20=0xA & OP_19_16=0xF & bit4byte_t & bit4_t & f12_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { local mask:2 = 1 << bit4_t; SkipNextFlag = ( ( f12_t & mask ) == 0 ); } :btsc.w Wsb_t,Bbit4_t is OP_23_16=0xa7 & OP_11_7=0x0 & Bbit4_t & $(WSconstraint) & Wsb_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { local mask:2 = 1 << Bbit4_t; SkipNextFlag = ( ( Wsb_t & mask ) == 0 ); } :btss^bit4byte_t f12_t,bit4_t is OP_23_20=0xA & OP_19_16=0xE & bit4byte_t & bit4_t & f12_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { local mask:2 = 1 << bit4_t; SkipNextFlag = ( ( f12_t & mask ) != 0 ); } :btss.w Wsb_t,Bbit4_t is OP_23_16=0xa6 & OP_11_7=0x0 & Bbit4_t & $(WSconstraint) & Wsb_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { local mask:2 = 1 << Bbit4_t; SkipNextFlag = ( ( Wsb_t & mask ) != 0 ); } :btst.w f12_t,bit4_t is OP_23_20=0xA & OP_19_16=0xB & bit4byte_t & bit4_t & f12_t { local mask:2 = 1 << bit4_t; SRL_Z = ( ( f12_t & mask ) == 0 ); } # 1010 0011 1111 0000 0000 0001 :btst.c Wsb_t,Bbit4_t is OP_23_16=0xa3 & OP_10_7=0x0 & Bbit4_t & TOK_Zb=0 & OP_10_8=0x0 & $(WSconstraint) & Wsb_t { local mask:2 = 1 << Bbit4_t; # set C to value of bit SRL_C = ( Wsb_t & mask ) != 0; } :btst.z Wsb_t,Bbit4_t is OP_23_20=0xA & OP_19_16=0x3 & Bbit4_t & TOK_Zb=1 & OP_10_8=0x0 & OP_7=0x0 & $(WSconstraint) & Wsb_t { local mask:2 = 1 << Bbit4_t; # set Z to value of bit complemented SRL_Z = ( Wsb_t & mask ) == 0; } :btst.c Ws_t,Wbd_t is OP_23_20=0xA & OP_19_16=0x5 & TOK_Z=0 & Wbd_t & OP_10_8=0x0 & OP_7=0x0 & $(WSconstraint) & Ws_t { # set C to value of bit SRL_C = ( Ws_t & (1 << (Wbd_t & 0xF)) ) != 0; } :btst.z Ws_t,Wbd_t is OP_23_20=0xA & OP_19_16=0x5 & TOK_Z=1 & Wbd_t & OP_10_8=0x0 & OP_7=0x0 & $(WSconstraint) & Ws_t { # set Z to value of bit complemented SRL_Z = ( Ws_t & (1 << (Wbd_t & 0xF)) ) == 0; } :btsts^bit4byte_t f12_t,bit4_t is OP_23_20=0xA & OP_19_16=0xC & bit4byte_t & bit4_t & f12_t { local mask:2 = 1 << bit4_t; # set Z to value of bit complemented SRL_Z = ( ( f12_t & mask ) == 0 ); # set value of bit to 1 f12_t = f12_t | mask; } # DSRPAG ????? :btsts.c Wsb_t,Bbit4_t is OP_23_20=0xA & OP_19_16=0x4 & Bbit4_t & TOK_Zb=0 & OP_10_8=0x0 & OP_7=0x0 & $(WSconstraint) & Wsb_t { local mask:2 = 1 << Bbit4_t; local wsSrc = Wsb_t; # set C to value of bit SRL_C = ( wsSrc & mask ) != 0; # set value of bit to 1 Wsb_t = wsSrc | mask; } :btsts.z Wsb_t,Bbit4_t is OP_23_20=0xA & OP_19_16=0x4 & Bbit4_t & TOK_Zb=1 & OP_10_8=0x0 & OP_7=0x0 & $(WSconstraint) & Wsb_t { local mask:2 = 1 << Bbit4_t; local wsSrc = Wsb_t; # set Z to value of bit complemented SRL_Z = ( wsSrc & mask ) == 0; # set value of bit to 1 Wsb_t = wsSrc | mask; } @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :call dest24_t is ( OP_23_20=0x0 & OP_19_16=0x2 & OP_0=0x0 & WordInstNext4; OP_23_20=0x0 & OP_19_16=0x0 & OP_15_12=0x0 & OP_11_8=0x0 & OP_7=0x0 ) & dest24_t { *[ram]:4 W15 = WordInstNext4; W15 = W15 + 4; call dest24_t; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :call dest24_t is ( OP_23_20=0x0 & OP_19_16=0x2 & OP_0=0x0; OP_23_20=0x0 & OP_19_16=0x0 & OP_15_12=0x0 & OP_11_8=0x0 & OP_7=0x0 ) & dest24_t { *[ram]:4 W15 = (inst_next / 2) | zext(CORCON_SFA); W15 = W15 + 4; call dest24_t; } @endif @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :call WnDest_t is OP_23_20=0x0 & OP_19_16=0x1 & OP_15_12=0x0 & OP_11_8=0x0 & OP_7_4=0x0 & WnDest_t & WordInstNext4 { *[ram]:4 W15 = WordInstNext4; W15 = W15 + 4; call [WnDest_t]; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :call WnDest_t is OP_23_20=0x0 & OP_19_16=0x1 & OP_15_12=0x0 & OP_11_8=0x0 & OP_7_4=0x0 & WnDest_t & WordInstNext4 { *[ram]:4 W15 = WordInstNext4 | zext(CORCON_SFA); W15 = W15 + 4; call [WnDest_t]; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :call.l WnWn1_t is OP_23_20=0x0 & OP_19_16=0x1 & OP_15=0x1 & WnWn1_t & OP_10_8=0x0 & OP_7_4=0x0 & WordInstNext4 { *[ram]:4 W15 = WordInstNext4 | zext(CORCON_SFA); W15 = W15 + 4; call [WnWn1_t]; } @endif :clr.w f13b_t is OP_23_20=0xE & OP_19_16=0xF & OP_15=0 & f13b_t { f13b_t = 0; } :clr.w WREGb_t is OP_23_20=0xE & OP_19_16=0xF & OP_15=0 & WREGb_t { WREGb_t = 0; } :clr.b f13bbyte_t is OP_23_20=0xE & OP_19_16=0xF & OP_15=0 & f13bbyte_t { f13bbyte_t = 0; } :clr.b WREGbbyte_t is OP_23_20=0xE & OP_19_16=0xF & OP_15=0 & WREGbbyte_t { WREGbbyte_t = 0; } :clr.w Wd_t is OP_23_20=0xE & OP_19_16=0xB & OP_15=0x0 & TOK_B=0 & $(WDconstraint) & Wd_t & OP_6_4=0x0 & OP_3_0=0x0 { Wd_t = 0; } :clr.b Wdbyte_t is OP_23_20=0xE & OP_19_16=0xB & OP_15=0x0 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & OP_6_4=0x0 & OP_3_0=0x0 { Wdbyte_t = 0; } @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) # # Note: The following constructors have three optional parameters, Wx, Wy and AWB. To implement them # without if-then-else constructs, the permutations were made. The TOK tokens correspond, in order, # with the elements that are removed (commented out) from the constructor when the "no prefetch" # or "no write back" cases occur. Corresponding sub-constructors were also removed from the display # section because an unused destination would otherwise be displayed (i.e. "clr ACCAW4W4" -> "clr ACCA"). # ## ## ## ACCA series ## ## :clr ACCA_t is OP_23_20=0xC & OP_19_16=0x3 & ACCA_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # 0 -> ACCA ACCA = 0; SRH_OA = 0; SRH_SA = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :clr ACCA_t^AWB_t is OP_23_20=0xC & OP_19_16=0x3 & ACCA_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # 0 -> ACCA ACCA = 0; SRH_OA = 0; SRH_SA = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } :clr ACCA_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19_16=0x3 & ACCA_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # 0 -> ACCA ACCA = 0; SRH_OA = 0; SRH_SA = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :clr ACCA_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x3 & ACCA_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # 0 -> ACCA ACCA = 0; SRH_OA = 0; SRH_SA = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } :clr ACCA_t^Wx_t^Wxd_t is OP_23_20=0xC & OP_19_16=0x3 & ACCA_t & OP_14=0x0 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # 0 -> ACCA ACCA = 0; SRH_OA = 0; SRH_SA = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :clr ACCA_t^Wx_t^Wxd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x3 & ACCA_t & OP_14=0x0 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # 0 -> ACCA ACCA = 0; SRH_OA = 0; SRH_SA = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } :clr ACCA_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19_16=0x3 & ACCA_t & OP_14=0x0 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # 0 -> ACCA ACCA = 0; SRH_OA = 0; SRH_SA = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :clr ACCA_t^Wx_t^Wxd_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x3 & ACCA_t & OP_14=0x0 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # 0 -> ACCA ACCA = 0; SRH_OA = 0; SRH_SA = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } ## ## ## ACCB series ## ## :clr ACCB_t is OP_23_20=0xC & OP_19_16=0x3 & ACCB_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction ACCB = 0; SRH_OB = 0; SRH_SB = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :clr ACCB_t^AWB_t is OP_23_20=0xC & OP_19_16=0x3 & ACCB_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction ACCB = 0; SRH_OB = 0; SRH_SB = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } :clr ACCB_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19_16=0x3 & ACCB_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction ACCB = 0; SRH_OB = 0; SRH_SB = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :clr ACCB_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x3 & ACCB_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction ACCB = 0; SRH_OB = 0; SRH_SB = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } :clr ACCB_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x3 & ACCB_t & OP_14=0x0 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction ACCB = 0; SRH_OB = 0; SRH_SB = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :clr ACCB_t^Wx_t^Wxd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x3 & ACCB_t & OP_14=0x0 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction ACCB = 0; SRH_OB = 0; SRH_SB = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } :clr ACCB_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19_16=0x3 & ACCB_t & OP_14=0x0 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction ACCB = 0; SRH_OB = 0; SRH_SB = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :clr ACCB_t^Wx_t^Wxd_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x3 & ACCB_t & OP_14=0x0 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction ACCB = 0; SRH_OB = 0; SRH_SB = 0; # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } @endif :clrwdt is OP_23_20=0xF & OP_19_16=0xE & OP_15_12=0x6 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { WDTcount = 0; # ????? formal name not documented WDTprescalarA = 0; # ????? formal name not documented WDTprescalarB = 0; # ????? formal name not documented } :com.w f13_t^WREG_t is OP_23_20=0xE & OP_19_16=0xE & OP_15=1 & WREG_t & f13_t { WREG_t = ~f13_t; testSRL_N ( WREG_t ); testSRL_Z ( WREG_t ); } :com.b f13byte_t^WREGbyte_t is OP_23_20=0xE & OP_19_16=0xE & OP_15=1 & WREGbyte_t & f13byte_t { WREGbyte_t = ~f13byte_t; testSRL_N ( WREGbyte_t ); testSRL_Z ( WREGbyte_t ); } :com.w Ws_t,Wd_t is OP_23_20=0xE & OP_19_16=0xA & OP_15=0x1 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local result = ~Ws_t; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); } :com.b Wsbyte_t,Wdbyte_t is OP_23_20=0xE & OP_19_16=0xA & OP_15=0x1 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local result = ~Wsbyte_t; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); } :cp.w f13_t is OP_23_20=0xE & OP_19_16=0x3 & OP_15=0 & OP_13=0 & f13_t { local src = f13_t; subflags( src, W0 ); local tmp:2 = src - W0; testSRL_N ( tmp ); testSRL_Z ( tmp ); testSRH_DCword( tmp ); } :cp.b f13byte_t is OP_23_20=0xE & OP_19_16=0x3 & OP_15=0 & OP_13=0 & f13byte_t { local src = f13byte_t; subflags( src, W0byte ); local tmp:1 = src - W0byte; testSRL_N ( tmp ); testSRL_Z ( tmp ); testSRH_DCbyte( tmp ); } @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :cp.w Wbb_t,k5_B10_t is OP_23_20=0xE & OP_19_16=0x1 & OP_15=0x0 & OP_9_8=0x0 & OP_7_5=0x3 & Wbb_t & k5_B10_t { subflags( Wbb_t, k5_B10_t ); tmp:2 = Wbb_t - k5_B10_t; testSRL_N ( tmp ); testSRL_Z ( tmp ); testSRH_DCword( tmp ); } :cp.b Wbbbyte_t,k5byte_B10_t is OP_23_20=0xE & OP_19_16=0x1 & OP_15=0x0 & OP_9_8=0x0 & OP_7_5=0x3 & Wbbbyte_t & k5byte_B10_t { subflags( Wbbbyte_t, k5byte_B10_t ); tmp:1 = Wbbbyte_t - k5byte_B10_t; testSRL_N ( tmp ); testSRL_Z ( tmp ); testSRH_DCbyte( tmp ); } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :cp.w Wbb_t,k8_t is OP_23_20=0xE & OP_19_16=0x1 & OP_15=0x0 & TOK_Bb=0 & OP_6_5=0x3 & Wbb_t & k8_t { subflags( Wbb_t, k8_t ); tmp:2 = Wbb_t - k8_t; testSRL_N ( tmp ); testSRL_Z ( tmp ); testSRH_DCword( tmp ); } :cp.b Wbbbyte_t,k8byte_t is OP_23_20=0xE & OP_19_16=0x1 & OP_15=0x0 & TOK_Bb=1 & OP_6_5=0x3 & Wbbbyte_t & k8byte_t { subflags( Wbbbyte_t, k8byte_t ); tmp:1 = Wbbbyte_t - k8byte_t; testSRL_N ( tmp ); testSRL_Z ( tmp ); testSRH_DCbyte( tmp ); } @endif :cp.w Wbb_t,Wsb_t is OP_23_20=0xE & OP_19_16=0x1 & OP_15=0x0 & TOK_Bb=0 & OP_9_8=0x0 & OP_7=0x0 & Wbb_t & $(WSconstraint) & Wsb_t { local src = Wsb_t; subflags( Wbb_t, src ); tmp:2 = Wbb_t - src; testSRL_N ( tmp ); testSRL_Z ( tmp ); testSRH_DCword( tmp ); } :cp.b Wbbbyte_t,Wsbbyte_t is OP_23_20=0xE & OP_19_16=0x1 & OP_15=0x0 & TOK_Bb=1 & OP_9_8=0x0 & OP_7=0x0 & Wbbbyte_t & $(WSconstraint) & Wsbbyte_t { local src = Wsbbyte_t; subflags( Wbbbyte_t, src ); tmp:1 = Wbbbyte_t - src; testSRL_N ( tmp ); testSRL_Z ( tmp ); testSRH_DCbyte( tmp ); } :cp0.w f13_t is OP_23_20=0xE & OP_19_16=0x2 & OP_15=0 & OP_13=0 & f13_t { local src = f13_t; local zero:2 = 0; subflags( src, zero ); testSRL_N ( src ); testSRL_Z ( src ); testSRH_DCword( src ); } :cp0.b f13byte_t is OP_23_20=0xE & OP_19_16=0x2 & OP_15=0 & OP_13=0 & f13byte_t { local src = f13byte_t; local zero:1 = 0; subflags( src, zero ); testSRL_N ( src ); testSRL_Z ( src ); testSRH_DCbyte( src ); } :cp0.w Wsb_t is OP_23_20=0xE & OP_19_16=0x0 & OP_15_12=0x0 & OP_11=0x0 & TOK_Bb=0 & OP_9_8=0x0 & OP_7=0x0 & $(WSconstraint) & Wsb_t { local src = Wsb_t; local zero:2 = 0; subflags( src, zero ); testSRL_N ( src ); testSRL_Z ( src ); testSRH_DCword( src ); } :cp0.b Wsbbyte_t is OP_23_20=0xE & OP_19_16=0x0 & OP_15_12=0x0 & OP_11=0x0 & TOK_Bb=1 & OP_9_8=0x0 & OP_7=0x0 & $(WSconstraint) & Wsbbyte_t { local src = Wsbbyte_t; local zero:1 = 0; subflags( src, zero ); testSRL_N ( src ); testSRL_Z ( src ); testSRH_DCbyte( src ); } :cpb.w f13_t is OP_23_20=0xE & OP_19_16=0x3 & OP_15=1 & OP_13=0 & f13_t { local notCarry:2 = zext(!SRL_C); local src = f13_t; subflagsWithCarry( src, W0, notCarry ); tmp:2 = src - W0 - notCarry; testSRL_N ( tmp ); testSRL_Zsticky( tmp ); testSRH_DCword ( tmp ); } :cpb.b f13byte_t is OP_23_20=0xE & OP_19_16=0x3 & OP_15=1 & OP_13=0 & f13byte_t { local notCarry = !SRL_C; local src = f13byte_t; subflagsWithCarry( src, W0byte, notCarry ); tmp:1 = src - W0byte - notCarry; testSRL_N ( tmp ); testSRL_Zsticky( tmp ); testSRH_DCbyte ( tmp ); } @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :cpb.w Wb_t,k5_t is OP_23_20=0xE & OP_19_16=0x1 & OP_15=0x1 & OP_9_8=0x0 & OP_7_5=0x3 & Wb_t & k5_t { local notCarry:2 = zext(!SRL_C); subflagsWithCarry( Wb_t, k5_t, notCarry ); tmp:2 = Wb_t - k5_t - notCarry; testSRL_N ( tmp ); testSRL_Zsticky( tmp ); testSRH_DCword ( tmp ); } :cpb.b Wbbyte_t,k5byte_t is OP_23_20=0xE & OP_19_16=0x1 & OP_15=0x1 & OP_9_8=0x0 & OP_7_5=0x3 & Wbbyte_t & k5byte_t { local notCarry = !SRL_C; subflagsWithCarry( Wbbyte_t, k5byte_t, notCarry ); tmp:1 = Wbbyte_t - k5byte_t - notCarry; testSRL_N ( tmp ); testSRL_Zsticky( tmp ); testSRH_DCbyte ( tmp ); } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :cpb.w Wbb_t,k8_t is OP_23_20=0xE & OP_19_16=0x1 & OP_15=0x1 & TOK_Bb=0 & OP_6_5=0x3 & Wbb_t & k8_t { local notCarry:2 = zext(!SRL_C); subflagsWithCarry( Wbb_t, k8_t, notCarry ); tmp:2 = Wbb_t - k8_t - notCarry; testSRL_N ( tmp ); testSRL_Zsticky( tmp ); testSRH_DCword ( tmp ); } :cpb.b Wbbbyte_t,k8byte_t is OP_23_20=0xE & OP_19_16=0x1 & OP_15=0x1 & TOK_Bb=1 & OP_6_5=0x3 & Wbbbyte_t & k8byte_t { local notCarry = !SRL_C; subflagsWithCarry( Wbbbyte_t, k8byte_t, notCarry ); tmp:1 = Wbbbyte_t - k8byte_t - notCarry; testSRL_N ( tmp ); testSRL_Zsticky( tmp ); testSRH_DCbyte ( tmp ); } @endif :cpb.w Wbb_t,Wsb_t is OP_23_20=0xE & OP_19_16=0x1 & OP_15=0x1 & TOK_Bb=0 & OP_9_8=0x0 & OP_7=0x0 & Wbb_t & $(WSconstraint) & Wsb_t { local notCarry:2 = zext(!SRL_C); local src = Wsb_t; subflagsWithCarry( Wbb_t, src, notCarry ); tmp:2 = Wbb_t - src - notCarry; testSRL_N ( tmp ); testSRL_Zsticky( tmp ); testSRH_DCword ( tmp ); } :cpb.b Wbbbyte_t,Wsbbyte_t is OP_23_20=0xE & OP_19_16=0x1 & OP_15=0x1 & TOK_Bb=1 & OP_9_8=0x0 & OP_7=0x0 & Wbbbyte_t & $(WSconstraint) & Wsbbyte_t { local notCarry = !SRL_C; local src = Wsbbyte_t; subflagsWithCarry( Wbbbyte_t, src, notCarry ); tmp:1 = Wbbbyte_t - src - notCarry; testSRL_N ( tmp ); testSRL_Zsticky( tmp ); testSRH_DCbyte ( tmp ); } @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :cpbeq.w Wbb_t,Wnb_t,n6_t is OP_23_20=0xE & OP_19_16=0x7 & OP_15=0x1 & TOK_Bb=0 & Wbb_t & n6_t & Wnb_t { if (Wbb_t == Wnb_t) goto n6_t; # ????? what about the flags, examples show them setting? } :cpbeq.b Wbbbyte_t,Wnbbyte_t,n6_t is OP_23_20=0xE & OP_19_16=0x7 & OP_15=0x1 & TOK_Bb=1 & Wbbbyte_t & n6_t & Wnbbyte_t { if (Wbbbyte_t == Wnbbyte_t) goto n6_t; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :cpbgt.w Wbb_t,Wnb_t,n6_t is OP_23_20=0xE & OP_19_16=0x6 & OP_15=0x0 & TOK_Bb=0 & Wbb_t & n6_t & Wnb_t { if (Wbb_t s> Wnb_t) goto n6_t; # ????? what about the flags, examples show them setting? } :cpbgt.b Wbbbyte_t,Wnbbyte_t,n6_t is OP_23_20=0xE & OP_19_16=0x6 & OP_15=0x0 & TOK_Bb=1 & Wbbbyte_t & n6_t & Wnbbyte_t { if (Wbbbyte_t s> Wnbbyte_t) goto n6_t; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :cpblt.w Wbb_t,Wnb_t,n6_t is OP_23_20=0xE & OP_19_16=0x6 & OP_15=0x1 & TOK_Bb=0 & Wbb_t & n6_t & Wnb_t { if (Wbb_t s< Wnb_t) goto n6_t; # ????? what about the flags, examples show them setting? } :cpblt.b Wbbbyte_t,Wnbbyte_t,n6_t is OP_23_20=0xE & OP_19_16=0x6 & OP_15=0x1 & TOK_Bb=1 & Wbbbyte_t & n6_t & Wnbbyte_t { if (Wbbbyte_t s< Wnbbyte_t) goto n6_t; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :cpbne.w Wbb_t,Wnb_t,n6_t is OP_23_20=0xE & OP_19_16=0x7 & OP_15=0x0 & TOK_Bb=0 & Wbb_t & n6_t & Wnb_t { if (Wbb_t != Wnb_t) goto n6_t; # ????? what about the flags, examples show them setting? } :cpbne.b Wbbbyte_t,Wnbbyte_t,n6_t is OP_23_20=0xE & OP_19_16=0x7 & OP_15=0x0 & TOK_Bb=1 & Wbbbyte_t & n6_t & Wnbbyte_t { if (Wbbbyte_t != Wnbbyte_t) goto n6_t; } @endif @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :cpseq.w Wbb_t,Wnb_t is OP_23_20=0xE & OP_19_16=0x7 & OP_15=0x1 & TOK_Bb=0 & Wbb_t & OP_9_8=0x0 & OP_7_4=0x0 & Wnb_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbb_t == Wnb_t); # ????? what about the flags, examples show them setting? } :cpseq.b Wbbbyte_t,Wnbbyte_t is OP_23_20=0xE & OP_19_16=0x7 & OP_15=0x1 & TOK_Bb=1 & Wbbbyte_t & OP_9_8=0x0 & OP_7_4=0x0 & Wnbbyte_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbbbyte_t == Wnbbyte_t); } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :cpseq.w Wbb_t,Wnb_t is OP_23_20=0xE & OP_19_16=0x7 & OP_15=0x1 & TOK_Bb=0 & Wbb_t & OP_9_8=0x0 & OP_7_4=0x1 & Wnb_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbb_t == Wnb_t); # ????? what about the flags, examples show them setting? } :cpseq.b Wbbbyte_t,Wnbbyte_t is OP_23_20=0xE & OP_19_16=0x7 & OP_15=0x1 & TOK_Bb=1 & Wbbbyte_t & OP_9_8=0x0 & OP_7_4=0x1 & Wnbbyte_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbbbyte_t == Wnbbyte_t); } @endif @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :cpsgt.w Wbb_t,Wnb_t is OP_23_20=0xE & OP_19_16=0x6 & OP_15=0x0 & TOK_Bb=0 & Wbb_t & OP_9_8=0x0 & OP_7_4=0x0 & Wnb_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbb_t s> Wnb_t); # ????? what about the flags, examples show them setting? } :cpsgt.b Wbbbyte_t,Wnbbyte_t is OP_23_20=0xE & OP_19_16=0x6 & OP_15=0x0 & TOK_Bb=1 & Wbbbyte_t & OP_9_8=0x0 & OP_7_4=0x0 & Wnbbyte_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbbbyte_t s> Wnbbyte_t); } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :cpsgt.w Wbb_t,Wnb_t is OP_23_20=0xE & OP_19_16=0x6 & OP_15=0x0 & TOK_Bb=0 & Wbb_t & OP_9_8=0x0 & OP_7_4=0x1 & Wnb_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbb_t s> Wnb_t); # ????? what about the flags, examples show them setting? } :cpsgt.b Wbbbyte_t,Wnbbyte_t is OP_23_20=0xE & OP_19_16=0x6 & OP_15=0x0 & TOK_Bb=1 & Wbbbyte_t & OP_9_8=0x0 & OP_7_4=0x1 & Wnbbyte_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbbbyte_t s> Wnbbyte_t); } @endif @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :cpslt.w Wbb_t,Wnb_t is OP_23_20=0xE & OP_19_16=0x6 & OP_15=0x1 & TOK_Bb=0 & Wbb_t & OP_9_8=0x0 & OP_7_4=0x0 & Wnb_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbb_t s< Wnb_t); # ????? what about the flags, examples show them setting? } :cpslt.b Wbbbyte_t,Wnbbyte_t is OP_23_20=0xE & OP_19_16=0x6 & OP_15=0x1 & TOK_Bb=1 & Wbbbyte_t & OP_9_8=0x0 & OP_7_4=0x0 & Wnbbyte_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbbbyte_t s< Wnbbyte_t); } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :cpslt.w Wbb_t,Wnb_t is OP_23_20=0xE & OP_19_16=0x6 & OP_15=0x1 & TOK_Bb=0 & Wbb_t & OP_9_8=0x0 & OP_7_4=0x1 & Wnb_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbb_t s< Wnb_t); # ????? what about the flags, examples show them setting? } :cpslt.b Wbbbyte_t,Wnbbyte_t is OP_23_20=0xE & OP_19_16=0x6 & OP_15=0x1 & TOK_Bb=1 & Wbbbyte_t & OP_9_8=0x0 & OP_7_4=0x1 & Wnbbyte_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbbbyte_t s< Wnbbyte_t); } @endif @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :cpsne.w Wbb_t,Wnb_t is OP_23_20=0xE & OP_19_16=0x7 & OP_15=0x0 & TOK_Bb=0 & Wbb_t & OP_9_8=0x0 & OP_7_4=0x0 & Wnb_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbb_t != Wnb_t); # ????? what about the flags, examples show them setting? } :cpsne.b Wbbbyte_t,Wnbbyte_t is OP_23_20=0xE & OP_19_16=0x7 & OP_15=0x0 & TOK_Bb=1 & Wbbbyte_t & OP_9_8=0x0 & OP_7_4=0x0 & Wnbbyte_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbbbyte_t != Wnbbyte_t); } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :cpsne.w Wbb_t,Wnb_t is OP_23_20=0xE & OP_19_16=0x7 & OP_15=0x0 & TOK_Bb=0 & Wbb_t & OP_9_8=0x0 & OP_7_4=0x1 & Wnb_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbb_t != Wnb_t); # ????? what about the flags, examples show them setting? } :cpsne.b Wbbbyte_t,Wnbbyte_t is OP_23_20=0xE & OP_19_16=0x7 & OP_15=0x0 & TOK_Bb=1 & Wbbbyte_t & OP_9_8=0x0 & OP_7_4=0x1 & Wnbbyte_t [ skipInstr = 1; globalset(inst_next,skipInstr); ] { SkipNextFlag = (Wbbbyte_t != Wnbbyte_t); } @endif @if defined(dsPIC33E) || defined(dsPIC33C) define pcodeop contextswap; :ctxtswp k3_t is OP_23_4=0xFE200 & OP_3=0x0 & k3_t { contextswap(k3_t); } :ctxtswp Wndb_t is OP_23_4=0xFEF00 & Wndb_t { contextswap(Wndb_t); } @endif :daw.b Wnsbyte_t is OP_23_20=0xF & OP_19_16=0xD & OP_15_12=0x4 & OP_11_8=0x0 & OP_7_4=0x0 & Wnsbyte_t { if !( ( ( Wnsbyte_t & 0x0F ) > 0x09 ) || SRH_DC ) goto ; Wnsbyte_t = Wnsbyte_t + 0x06; if !( ( ( Wnsbyte_t & 0xF0 ) > 0x90 ) || SRL_C ) goto ; Wnsbyte_t = Wnsbyte_t + 0x60; SRL_C = 1; } :dec.w f13_t,^WREG_t is OP_23_20=0xE & OP_19_16=0xD & OP_15=0 & WREG_t & f13_t { local src = f13_t; local one:2 = 1; subflags( src, one ); WREG_t = src - one; testSRL_N ( WREG_t ); testSRL_Z ( WREG_t ); testSRH_DCword( WREG_t ); } :dec.b f13byte_t^WREGbyte_t is OP_23_20=0xE & OP_19_16=0xD & OP_15=0 & WREGbyte_t & f13byte_t { local src = f13byte_t; local one:1 = 1; subflags( src, one ); WREGbyte_t = src - one; testSRL_N ( WREGbyte_t ); testSRL_Z ( WREGbyte_t ); testSRH_DCbyte( WREGbyte_t ); } :dec.w Ws_t,Wd_t is OP_23_20=0xE & OP_19_16=0x9 & OP_15=0x0 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; local one:2 = 1; subflags( src, one ); local result = src - one; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCword( result ); } :dec.b Wsbyte_t,Wdbyte_t is OP_23_20=0xE & OP_19_16=0x9 & OP_15=0x0 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; local one:1 = 1; subflags( src, one ); local result = src - one; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCbyte( result ); } :dec2.w f13_t^WREG_t is OP_23_20=0xE & OP_19_16=0xD & OP_15=1 & WREG_t & f13_t { local src = f13_t; local two:2 = 2; subflags( src, two ); WREG_t = src - two; testSRL_N ( WREG_t ); testSRL_Z ( WREG_t ); testSRH_DCword( WREG_t ); } :dec2.b f13byte_t^WREGbyte_t is OP_23_20=0xE & OP_19_16=0xD & OP_15=1 & WREGbyte_t & f13byte_t { local src = f13byte_t; local two:1 = 2; subflags( src, two ); WREGbyte_t = src - two; testSRL_N ( WREGbyte_t ); testSRL_Z ( WREGbyte_t ); testSRH_DCbyte( WREGbyte_t ); } :dec2.w Ws_t,Wd_t is OP_23_20=0xE & OP_19_16=0x9 & OP_15=0x1 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; local two:2 = 2; subflags( src, two ); local result = src - two; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCword( result ); } :dec2.b Wsbyte_t,Wdbyte_t is OP_23_20=0xE & OP_19_16=0x9 & OP_15=0x1 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; local two:1 = 2; subflags( src, two ); local result = src - two; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCbyte( result ); } :disi k14_t is OP_23_20=0xF & OP_19_16=0xC & OP_15_14=0x0 & k14_t { DISICNT = k14_t; DISI = 1; } :repeat" 0x11 div.sw" TOK_10_7_Wreg,TOK_3_0_Wreg is OP_31_0=0x090011; OP_23_20=0xD & OP_19_16=0x8 & OP_15=0x0 & TOK_10_7_Wreg & TOK_W=0 & OP_5_4=0x0 & TOK_3_0_Wreg { # Note: this implementation is not iterative, like the actual op. # Rather, it will decompile accurately and emulate correctly using the Sleigh divide support. local div:2 = sext(TOK_10_7_Wreg) s/ sext(TOK_3_0_Wreg); local rem:2 = sext(TOK_10_7_Wreg) s% sext(TOK_3_0_Wreg); W0 = zext(div:1); W1 = zext(rem:1); testSRL_N ( W1 ); # overflow as defined in note 2 SRL_OV = (TOK_10_7_Wreg == 0x8000) && (TOK_3_0_Wreg == 0xFFFF); testSRL_Z ( W1 ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } define pcodeop pic30_div; define pcodeop pic30_rem; :div.sw TOK_10_7_Wreg,TOK_3_0_Wreg is OP_23_20=0xD & OP_19_16=0x8 & OP_15=0x0 & TOK_10_7_Wreg & TOK_W=0 & OP_5_4=0x0 & TOK_3_0_Wreg { local div:2 = pic30_div(TOK_10_7_Wreg,TOK_3_0_Wreg); local rem:2 = pic30_rem(TOK_10_7_Wreg,TOK_3_0_Wreg); W0 = div; W1 = rem; testSRL_N ( W1 ); # overflow as defined in note 2 SRL_OV = (TOK_10_7_Wreg == 0x8000) && (TOK_3_0_Wreg == 0xFFFF); testSRL_Z ( W1 ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } define pcodeop isDivideOverflow; :repeat" 0x11 div.sd" TOK_10_8_Dregn,TOK_3_0_Wreg is OP_31_0=0x090011; OP_23_20=0xD & OP_19_16=0x8 & OP_15=0x0 & TOK_10_8_Dreg & TOK_10_8_Dregn & OP_7=0 & TOK_W=1 & OP_5_4=0x0 & TOK_3_0_Wreg { # overflow as defined in note 2 SRL_OV = isDivideOverflow(TOK_10_8_Dreg, TOK_3_0_Wreg); # Note: this implementation is not iterative, like the actual op. # Rather, it will decompile accurately and emulate correctly using the Sleigh divide support. local div:4 = TOK_10_8_Dreg s/ sext(TOK_3_0_Wreg); local rem:4 = TOK_10_8_Dreg s% sext(TOK_3_0_Wreg); W0 = div:2; W1 = rem:2; testSRL_N ( W1 ); testSRL_Z ( W1 ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } :div.sd TOK_10_8_Dregn,TOK_3_0_Wreg is OP_23_20=0xD & OP_19_16=0x8 & OP_15=0x0 & TOK_10_8_Dreg & TOK_10_8_Dregn & OP_7=0 & TOK_W=1 & OP_5_4=0x0 & TOK_3_0_Wreg { # overflow as defined in note 2 SRL_OV = isDivideOverflow(TOK_10_8_Dreg, TOK_3_0_Wreg); local div:4 = pic30_div(TOK_10_8_Dreg,TOK_3_0_Wreg); local rem:4 = pic30_rem(TOK_10_8_Dreg,TOK_3_0_Wreg); W0 = div:2; W1 = rem:2; testSRL_N ( W1 ); testSRL_Z ( W1 ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } :repeat" 0x11 div.uw" TOK_10_7_Wreg,TOK_3_0_Wreg is OP_31_0=0x090011; OP_23_20=0xD & OP_19_16=0x8 & OP_15=0x1 & TOK_10_7_Wreg & TOK_W=0 & OP_5_4=0x0 & TOK_3_0_Wreg { # Note: this implementation is not iterative, like the actual op. # Rather, it will decompile accurately and emulate correctly using the Sleigh divide support. local div:2 = zext(TOK_10_7_Wreg) / zext(TOK_3_0_Wreg); local rem:2 = zext(TOK_10_7_Wreg) % zext(TOK_3_0_Wreg); W0 = zext(div:1); W1 = zext(rem:1); testSRL_N ( W1 ); # overflow as defined in note 2 SRL_OV = 0; testSRL_Z ( W1 ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } :div.uw TOK_10_7_Wreg,TOK_3_0_Wreg is OP_23_20=0xD & OP_19_16=0x8 & OP_15=0x1 & TOK_10_7_Wreg & TOK_W=0 & OP_5_4=0x0 & TOK_3_0_Wreg { local div:2 = pic30_div(TOK_10_7_Wreg,TOK_3_0_Wreg); local rem:2 = pic30_rem(TOK_10_7_Wreg,TOK_3_0_Wreg); W0 = div; W1 = rem; testSRL_N ( W1 ); # overflow as defined in note 2 SRL_OV = 0; testSRL_Z ( W1 ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } :repeat" 0x11 div.ud" TOK_10_8_Dregn,TOK_3_0_Wreg is OP_31_0=0x090011; OP_23_20=0xD & OP_19_16=0x8 & OP_15=0x1 & TOK_10_8_Dreg & TOK_10_8_Dregn & OP_7=0 & TOK_W=1 & OP_5_4=0x0 & TOK_3_0_Wreg { # overflow as defined in note 2 SRL_OV = isDivideOverflow(TOK_10_8_Dreg, TOK_3_0_Wreg); # Note: this implementation is not iterative, like the actual op. # Rather, it will decompile accurately and emulate correctly using the Sleigh divide support. local div:4 = TOK_10_8_Dreg / sext(TOK_3_0_Wreg); local rem:4 = TOK_10_8_Dreg % sext(TOK_3_0_Wreg); W0 = div:2; W1 = rem:2; testSRL_N ( W1 ); testSRL_Z ( W1 ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } :div.ud TOK_10_8_Dregn,TOK_3_0_Wreg is OP_23_20=0xD & OP_19_16=0x8 & OP_15=0x1 & TOK_10_8_Dreg & TOK_10_8_Dregn & OP_7=0 & TOK_W=1 & OP_5_4=0x0 & TOK_3_0_Wreg { # overflow as defined in note 2 SRL_OV = isDivideOverflow(TOK_10_8_Dreg, TOK_3_0_Wreg); local div:4 = pic30_div(TOK_10_8_Dreg,TOK_3_0_Wreg); local rem:4 = pic30_rem(TOK_10_8_Dreg,TOK_3_0_Wreg); W0 = div:2; W1 = rem:2; testSRL_N ( W1 ); testSRL_Z ( W1 ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :repeat" 0x11 divf" TOK_14_11_Wreg,TOK_3_0_Wreg is OP_31_0=0x090011; OP_23_20=0xD & OP_19_16=0x9 & OP_15=0x0 & TOK_14_11_Wreg & OP_10_8=0x0 & OP_7_4=0x0 & TOK_3_0_Wreg { dividend:4 = (sext(TOK_14_11_Wreg) << 16); local tmp0:4 = dividend s/ sext(TOK_3_0_Wreg); W0 = tmp0:2; local tmp1 = dividend s% sext(TOK_3_0_Wreg); W1 = tmp1:2; testSRL_N ( W1 ); # overflow as defined in note 1 SRL_OV = (TOK_14_11_Wreg s>= TOK_3_0_Wreg); testSRL_Z ( W1 ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } :divf TOK_14_11_Wreg,TOK_3_0_Wreg is OP_23_20=0xD & OP_19_16=0x9 & OP_15=0x0 & TOK_14_11_Wreg & OP_10_8=0x0 & OP_7_4=0x0 & TOK_3_0_Wreg { # Note: this implementation is not iterative, like the actual op. # Rather, it will decompile accurately and emulate correctly using the Sleigh divide support. local dividend:4 = (sext(TOK_14_11_Wreg) << 16); local tmp0:4 = pic30_div(dividend,TOK_3_0_Wreg); W0 = tmp0:2; local tmp1:4 = pic30_rem(dividend,TOK_3_0_Wreg); W1 = tmp1:2; testSRL_N ( W1 ); # overflow as defined in note 1 SRL_OV = (TOK_14_11_Wreg s>= TOK_3_0_Wreg); testSRL_Z ( W1 ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } @endif @if defined(dsPIC33C) define pcodeop pic30_fdiv; define pcodeop pic30_frem; :divf2 TOK_14_11_Wreg,TOK_3_0_Wreg is OP_23_20=0xD & OP_19_16=0x9 & OP_15=0x0 & TOK_14_11_Wreg & OP_10_8=0x0 & OP_7_4=0x2 & TOK_3_0_Wreg & TOK_14_11_Wregn { # Note: this implementation is not iterative, like the actual op. # Rather, it will decompile accurately and emulate correctly using the Sleigh divide support. local dividend:4 = (sext(TOK_14_11_Wreg) << 16); local tmp0:4 = pic30_fdiv(dividend,TOK_3_0_Wreg,TOK_14_11_Wregn); local tmp1:4 = pic30_frem(dividend,TOK_3_0_Wreg,TOK_14_11_Wregn); TOK_14_11_Wregn = tmp0:2; TOK_14_11_Wreg = tmp1:2; testSRL_N ( TOK_14_11_Wreg ); # overflow as defined in note 1 SRL_OV = (TOK_14_11_Wreg s>= TOK_3_0_Wreg); testSRL_Z ( TOK_14_11_Wreg ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } define pcodeop pic30_div2; define pcodeop pic30_rem2; :div2.sw TOK_10_7_Wreg,TOK_3_0_Wreg is OP_23_20=0xD & OP_19_16=0x8 & OP_15=0x0 & TOK_10_7_Wreg & TOK_10_7_Wregp & TOK_W=0 & OP_5_4=0x2 & TOK_3_0_Wreg { local div:2 = pic30_div(TOK_10_7_Wreg,TOK_3_0_Wreg); local rem:2 = pic30_rem(TOK_10_7_Wreg,TOK_3_0_Wreg); TOK_10_7_Wreg = div; TOK_10_7_Wregp = rem; testSRL_N ( TOK_10_7_Wregp ); # overflow as defined in note 2 SRL_OV = (TOK_10_7_Wreg == 0x8000) && (TOK_3_0_Wreg == 0xFFFF); testSRL_Z ( TOK_10_7_Wregp ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } :div2.sd TOK_10_8_Dregn,TOK_3_0_Wreg is OP_23_20=0xD & OP_19_16=0x8 & OP_15=0x0 & TOK_10_8_Dreg & TOK_10_8_Dregn & OP_7=0 & TOK_W=1 & OP_5_4=0x2 & TOK_3_0_Wreg { # overflow as defined in note 2 SRL_OV = isDivideOverflow(TOK_10_8_Dreg, TOK_3_0_Wreg); local div:2 = pic30_div2(TOK_10_8_Dreg,TOK_3_0_Wreg); local rem:2 = pic30_rem2(TOK_10_8_Dreg,TOK_3_0_Wreg); TOK_10_8_Dreg = (zext(rem) << 16) + zext(div); testSRL_N ( rem ); testSRL_Z ( rem ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } :div2.uw TOK_10_7_Wreg,TOK_3_0_Wreg is OP_23_20=0xD & OP_19_16=0x8 & OP_15=0x1 & TOK_10_7_Wreg & TOK_10_7_Wregp & TOK_W=0 & OP_5_4=0x2 & TOK_3_0_Wreg { local div:2 = pic30_div(TOK_10_7_Wreg,TOK_3_0_Wreg); local rem:2 = pic30_rem(TOK_10_7_Wreg,TOK_3_0_Wreg); TOK_10_7_Wreg = div; TOK_10_7_Wregp = rem; testSRL_N ( TOK_10_7_Wregp ); # overflow as defined in note 2 SRL_OV = 0; testSRL_Z ( TOK_10_7_Wregp ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } :div2.ud TOK_10_8_Dregn,TOK_3_0_Wreg is OP_23_20=0xD & OP_19_16=0x8 & OP_15=0x1 & TOK_10_8_Dreg & TOK_10_8_Dregn & OP_7=0 & TOK_W=1 & OP_5_4=0x2 & TOK_3_0_Wreg { # overflow as defined in note 2 SRL_OV = isDivideOverflow(TOK_10_8_Dreg, TOK_3_0_Wreg); local div:2 = pic30_div2(TOK_10_8_Dreg,TOK_3_0_Wreg); local rem:2 = pic30_rem2(TOK_10_8_Dreg,TOK_3_0_Wreg); TOK_10_8_Dreg = (zext(rem) << 16) + zext(div); testSRL_N ( rem ); testSRL_Z ( rem ); # Carry is modified, but modification is not defined, just assign to 0 for data flow analysis SRL_C = 0; } @endif @if defined(dsPIC30F) || defined(dsPIC33F) :do k14_t,n16_t is OP_23_20=0x0 & OP_19_16=0x8 & OP_15_14=0x0 & k14_t ; OP_23_20=0x0 & OP_19_16=0x0 & n16_t & WordInstNext [ blockEnd=1; globalset(n16_t,blockEnd);] { DCOUNT_SHADOW = DCOUNT; DCOUNT = k14_t + 1; DOEND_SHADOW = DOEND; DOEND = &n16_t; DOSTART_SHADOW = DOSTART; DOSTART = WordInstNext; } @endif @if defined(dsPIC33E) || defined(dsPIC33C) :do k15_t,n16_t is OP_23_20=0x0 & OP_19_16=0x8 & OP_15=0x0 & k15_t ; OP_23_20=0x0 & OP_19_16=0x0 & n16_t & WordInstNext [ blockEnd=1; globalset(n16_t,blockEnd); ] { # stack 4 levels deep but we don't enforce this DL:2 = zext(CORCON_DL); *[register]:2 (&:2 DCOUNT + DL*2) = k15_t + 1; *[register]:3 (&:2 DOEND + DL*2) = &n16_t; *[register]:3 (&:2 DOSTART + DL*2) = WordInstNext; CORCON_DL = CORCON_DL + 1; } @endif @if defined(dsPIC30F) || defined(dsPIC33F) :do Wns_t,n16_t is OP_23_20=0x0 & OP_19_16=0x8 & OP_15_12=0x8 & OP_11_8=0x0 & OP_7_4=0x0 & Wns_t ; OP_23_20=0x0 & OP_19_16=0x0 & n16_t & WordInstNext [ blockEnd=1; globalset(n16_t,blockEnd); ] { DCOUNT_SHADOW = DCOUNT; DCOUNT = Wns_t + 1; DOEND_SHADOW = DOEND; DOEND = &n16_t; DOSTART_SHADOW = DOSTART; DOSTART = WordInstNext; } @endif @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :ed WmWm_t,ACCA_t,Wx_t,Wy_t,Wxd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCA_t & OP_14=0x1 & Wxd_t & OP_11_10=0x0 & Wx_t & Wy_t & OP_1_0=0x3 { # Note: MAC-class instruction # (Wm)*(Wm) -> ACCA ACCA = WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx] - [Wy]) -> Wxd # (Wx) +/- kx -> Wx # (Wy) +/- ky -> Wy Wxd_t = Wx_t - Wy_t; } :ed WmWm_t,ACCB_t,Wx_t,Wy_t,Wxd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCB_t & OP_14=0x1 & Wxd_t & OP_11_10=0x0 & Wx_t & Wy_t & OP_1_0=0x3 { # Note: MAC-class instruction # (Wm)*(Wm) -> ACCB ACCB = WmWm_t; testSRH_OB(); testSRH_SB(); # ([Wx] - [Wy]) -> Wxd # (Wx) +/- kx -> Wx # (Wy) +/- ky -> Wy Wxd_t = Wx_t - Wy_t; } @endif @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :edac WmWm_t,ACCA_t,Wx_t,Wy_t,Wxd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCA_t & OP_14=0x1 & Wxd_t & OP_11_10=0x0 & Wx_t & Wy_t & OP_1_0=0x2 { # Note: MAC-class instruction # ACCA + (Wm)*(Wm) -> ACCA ACCA = ACCA + WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx] - [Wy]) -> Wxd # (Wx) +/- kx -> Wx # (Wy) +/- ky -> Wy Wxd_t = Wx_t - Wy_t; } :edac WmWm_t,ACCB_t,Wx_t,Wy_t,Wxd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCB_t & OP_14=0x1 & Wxd_t & OP_11_10=0x0 & Wx_t & Wy_t & OP_1_0=0x2 { # Note: MAC-class instruction # ACCB + (Wm)*(Wm) -> ACCB ACCB = ACCB + WmWm_t; testSRH_OB(); testSRH_SB(); # ([Wx] - [Wy]) -> Wxd # (Wx) +/- kx -> Wx # (Wy) +/- ky -> Wy Wxd_t = Wx_t - Wy_t; } @endif :exch Wns_t,Wnd_t is OP_23_20=0xF & OP_19_16=0xD & OP_15_12=0x0 & OP_11=0x0 & Wnd_t & OP_6_4=0x0 & Wns_t { tmp:2 = Wnd_t; Wnd_t = Wns_t; Wns_t = tmp; } # OP_23_20=0x0 & OP_19_16=0x0 & OP_15_12=0x0 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { :fbcl Ws_t,Wnd_t is OP_23_20=0xD & OP_19_16=0xF & OP_15_12=0x0 & OP_11=0x0 & Wnd_t & $(WSconstraint) & Ws_t { local src = Ws_t; sign:2 = src & 0x8000; temp:2 = src << 1; shift:2 = 0; if !( (shift < 15) && ((temp & 0x8000) == sign) ) goto ; temp = temp << 1; shift = shift + 1; goto ; SRL_C = (shift == 15); Wnd_t = -shift; } # TODO: locate encoding details for FEX instruction # :fex :ff1l Ws_t,Wnd_t is OP_23_20=0xC & OP_19_16=0xF & OP_15_12=0x8 & OP_11=0x0 & Wnd_t & $(WSconstraint) & Ws_t { temp:2 = Ws_t; shift:2 = 1; if !( (shift < 17) && ((temp & 0x8000) == 0) ) goto ; temp = temp << 1; shift = shift + 1; goto ; # If (Shift == Max_Shift) # C = 1 # 0 (Wnd) # Else # C = 0 # Shift (Wnd) # SRL_C = (shift == 17); Wnd_t = shift * zext(!SRL_C); } :ff1r Ws_t,Wnd_t is OP_23_20=0xC & OP_19_16=0xF & OP_15_12=0x0 & OP_11=0x0 & Wnd_t & $(WSconstraint) & Ws_t { temp:2 = Ws_t; shift:2 = 1; if !( (shift < 17) && ((temp & 0x0001) == 0) ) goto ; temp = temp >> 1; shift = shift + 1; goto ; # If (Shift == Max_Shift) # C = 1 # 0 (Wnd) # Else # C = 0 # Shift (Wnd) # SRL_C = (shift == 17); Wnd_t = shift * zext(!SRL_C); } @if defined(dsPIC33C) define pcodeop force_data_range; :flim Wbds_t, Ws_t is OP_23_16=0xE4 & OP_15=0x0 & OP_10_7=0x0 & Wbds_t & Ws_t { force_data_range(Ws_t, Wbds_t); } :flim.v Wbds_t, Ws_t, Wnd_t is OP_23_16=0xE5 & OP_15=1 & Wnd_t & Wbds_t & Ws_t{ Wnd_t = force_data_range(Ws_t, Wbds_t); } @endif :goto dest24_t is ( OP_23_20=0x0 & OP_19_16=0x4 & OP_0=0x0 ; OP_23_20=0x0 & OP_19_16=0x0 & OP_15_12=0x0 & OP_11_8=0x0 & OP_7=0x0 ) & dest24_t { goto dest24_t; } @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :goto WnDest_t is OP_23_20=0x0 & OP_19_16=0x1 & OP_15_12=0x4 & OP_11_8=0x0 & OP_7_4=0x0 & WnDest_t { goto [WnDest_t]; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :goto WnDest_t is OP_23_20=0x0 & OP_19_16=0x1 & OP_15_12=0x0 & OP_11_8=0x4 & OP_7_4=0x0 & WnDest_t { goto [WnDest_t]; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :goto.l WnWn1_t is OP_23_20=0x0 & OP_19_16=0x1 & OP_15=0x1 & WnWn1_t & OP_10_8=0x4 & OP_7_4=0x0 { goto [WnWn1_t]; } @endif :inc.w f13_t^WREG_t is OP_23_20=0xE & OP_19_16=0xC & OP_15=0 & WREG_t & f13_t { local src = f13_t; local one:2 = 1; addflags( src, one ); WREG_t = src + one; testSRL_N ( WREG_t ); testSRL_Z ( WREG_t ); testSRH_DCword( WREG_t ); } :inc.b f13byte_t^WREGbyte_t is OP_23_20=0xE & OP_19_16=0xC & OP_15=0 & WREGbyte_t & f13byte_t { local src = f13byte_t; local one:1 = 1; addflags( src, one ); WREGbyte_t = src + one; testSRL_N ( WREGbyte_t ); testSRL_Z ( WREGbyte_t ); testSRH_DCbyte( WREGbyte_t ); } :inc.w Ws_t,Wd_t is OP_23_20=0xE & OP_19_16=0x8 & OP_15=0x0 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; local one:2 = 1; addflags( src, one ); local result = src + one; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCword( result ); } :inc.b Wsbyte_t,Wdbyte_t is OP_23_20=0xE & OP_19_16=0x8 & OP_15=0x0 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; local one:1 = 1; addflags( src, one ); local result = src + one; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCbyte( result ); } :inc2.w f13_t^WREG_t is OP_23_16=0xEC & OP_15=1 & WREG_t & f13_t { local src = f13_t; local two:2 = 2; addflags( src, two ); WREG_t = src + two; testSRL_N ( WREG_t ); testSRL_Z ( WREG_t ); testSRH_DCword( WREG_t ); } :inc2.b f13byte_t^WREGbyte_t is OP_23_16=0xEC & OP_15=1 & WREGbyte_t & f13byte_t { local src = f13byte_t; local two:1 = 2; addflags( src, two ); WREGbyte_t = src + two; testSRL_N ( WREGbyte_t ); testSRL_Z ( WREGbyte_t ); testSRH_DCbyte( WREGbyte_t ); } :inc2.w Ws_t,Wd_t is OP_23_16=0xE8 & OP_15=0x1 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; local two:2 = 2; addflags( src, two ); local result = src + two; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCword( result ); } :inc2.b Wsbyte_t,Wdbyte_t is OP_23_16=0xE8 & OP_15=0x1 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; local two:1 = 2; addflags( src, two ); local result = src + two; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCbyte( result ); } :ior.w f13_t^WREG_t is OP_23_20=0xB & OP_19_16=0x7 & OP_15=0 & WREG_t & f13_t { WREG_t = f13_t | W0; testSRL_N ( WREG_t ); testSRL_Z ( WREG_t ); } :ior.b f13byte_t^WREGbyte_t is OP_23_20=0xB & OP_19_16=0x7 & OP_15=0 & WREGbyte_t & f13byte_t { WREGbyte_t = f13byte_t | W0byte; testSRL_N ( WREGbyte_t ); testSRL_Z ( WREGbyte_t ); } :ior.w k10_t,Wn_t is OP_23_20=0xB & OP_19_16=0x3 & OP_15=0 & k10_t & Wn_t { Wn_t = k10_t | Wn_t; testSRL_N ( Wn_t ); testSRL_Z ( Wn_t ); } :ior.b k10byte_t,Wnbyte_t is OP_23_20=0xB & OP_19_16=0x3 & OP_15=0 & k10byte_t & Wnbyte_t { Wnbyte_t = k10byte_t | Wnbyte_t; testSRL_N ( Wnbyte_t ); testSRL_Z ( Wnbyte_t ); } :ior.w Wb_t,k5_t,Wd_t is OP_23_20=0x7 & OP_19=0x0 & OP_6_5=0x3 & Wb_t & $(WDconstraint) & Wd_t & k5_t { local result = k5_t | Wb_t; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); } :ior.b Wbbyte_t,k5byte_t,Wdbyte_t is OP_23_20=0x7 & OP_19=0x0 & OP_6_5=0x3 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & k5byte_t { local result = k5byte_t | Wbbyte_t; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); } :ior.w Wb_t,Ws_t,Wd_t is OP_23_20=0x7 & OP_19=0x0 & TOK_B=0 & Wb_t & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local result = Wb_t | Ws_t; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); } :ior.b Wbbyte_t,Wsbyte_t,Wdbyte_t is OP_23_20=0x7 & OP_19=0x0 & TOK_B=1 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local result = Wbbyte_t | Wsbyte_t; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); } @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :lac WsWRO_t^r4_t,ACCA_t is OP_23_20=0xC & OP_19_16=0xA & ACCA_t & r4_t & WsWRO_t { ACCA = (sext(WsWRO_t) << (16 - r4_t)); testSRH_OA(); testSRH_SA(); } :lac WsWRO_t^r4_t,ACCB_t is OP_23_20=0xC & OP_19_16=0xA & ACCB_t & r4_t & WsWRO_t { ACCB = (sext(WsWRO_t) << (16 - r4_t)); testSRH_OB(); testSRH_SB(); } @endif @if defined(dsPIC33C) :lac.d Wsd_t^r4_t,ACCA_t is OP_23_16=0xDB & OP_14_11=0x0 & ACCA_t & r4_t & Wsd_t { ACCA = (sext(Wsd_t) << (16 - r4_t)); testSRH_OA(); testSRH_SA(); } :lac.d Wsd_t^r4_t,ACCB_t is OP_23_16=0xDB & OP_14_11=0x0 & ACCB_t & r4_t & Wsd_t { ACCB = (sext(Wsd_t) << (16 - r4_t)); testSRH_OB(); testSRH_SB(); } @endif @if defined(dsPIC33C) PSV_t: Ws_t is Ws_t { local psv_addr = (Ws_t & 0xf7 ) + (DSRPAG & 0xff) << 0xf; export *[rom]:3 psv_addr; } EDS_t: Wdpp_t is Wdpp_t { local eds_addr = (Wdpp_t & 0xf7 ) + (DSWPAG & 0xff) << 0xf; export *[rom]:2 eds_addr; } define pcodeop loadslave; :ldslv PSV_t, EDS_t, k13_12_t is OP_23_16=0x03 & OP_15_14=0x0 & OP_11=0 & k13_12_t & EDS_t & PSV_t { EDS_t = loadslave(PSV_t, k13_12_t); } @endif @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :lnk k14_t is OP_23_20=0xF & OP_19_16=0xA & OP_15_14=0x0 & k14_t & OP_0=0x0 { *[ram]:2 W15 = W14; W15 = W15 + 2; W14 = W15; W15 = W15 + k14_t; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :lnk k14_t is OP_23_20=0xF & OP_19_16=0xA & OP_15_14=0x0 & k14_t & OP_0=0x0 { *[ram]:2 W15 = W14; W15 = W15 + 2; W14 = W15; CORCON_SFA = 1; W15 = W15 + k14_t; } @endif :lsr.w f13_t^WREG_t is OP_23_20=0xD & OP_19_16=0x5 & OP_15=0 & WREG_t & f13_t { local src = f13_t; SRL_C = ( src & 0x0001 ) != 0; WREG_t = src >> 1; testSRL_N ( WREG_t ); testSRL_Z ( WREG_t ); } :lsr.b f13byte_t^WREGbyte_t is OP_23_20=0xD & OP_19_16=0x5 & OP_15=0 & WREGbyte_t & f13byte_t { local src = f13byte_t; SRL_C = ( src & 0x01 ) != 0; WREGbyte_t = src >> 1; testSRL_N ( WREGbyte_t ); testSRL_Z ( WREGbyte_t ); } :lsr.w Ws_t,Wd_t is OP_23_20=0xD & OP_19_16=0x1 & OP_15=0x0 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; SRL_C = ( src & 0x0001 ) != 0; local result = src >> 1; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); } :lsr.b Wsbyte_t,Wdbyte_t is OP_23_20=0xD & OP_19_16=0x1 & OP_15=0x0 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; SRL_C = ( src & 0x01 ) != 0; local result = src >> 1; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( Wdbyte_t ); testSRL_Z ( Wdbyte_t ); } :lsr.w Wbd_t,k4_t,Wnd_t is OP_23_20=0xD & OP_19_16=0xE & OP_15=0x0 & OP_6_4=0x4 & Wbd_t & Wnd_t & k4_t { Wnd_t = Wbd_t >> k4_t; testSRL_N ( Wnd_t ); testSRL_Z ( Wnd_t ); } :lsr.w Wbd_t,Wns_t,Wnd_t is OP_23_20=0xD & OP_19_16=0xE & OP_15=0x0 & OP_6_4=0x0 & Wbd_t & Wnd_t & Wns_t { Wnd_t = Wbd_t >> ( Wns_t & 0x001F ); testSRL_N ( Wnd_t ); testSRL_Z ( Wnd_t ); } @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) ## ## ## ACCA series ## ## :mac WmWn_t,ACCA_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA + (Wm)*(Wn) -> ACCA ACCA = ACCA + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :mac WmWn_t,ACCA_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA + (Wm)*(Wn) -> ACCA ACCA = ACCA + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } :mac WmWn_t,ACCA_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA + (Wm)*(Wn) -> ACCA ACCA = ACCA + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :mac WmWn_t,ACCA_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA + (Wm)*(Wn) -> ACCA ACCA = ACCA + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } :mac WmWn_t,ACCA_t^Wx_t^Wxd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x0 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA + (Wm)*(Wn) -> ACCA ACCA = ACCA + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :mac WmWn_t,ACCA_t^Wx_t^Wxd_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x0 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA + (Wm)*(Wn) -> ACCA ACCA = ACCA + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } :mac WmWn_t,ACCA_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x0 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA + (Wm)*(Wn) -> ACCA ACCA = ACCA + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :mac WmWn_t,ACCA_t^Wx_t^Wxd_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x0 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA + (Wm)*(Wn) -> ACCA ACCA = ACCA + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } ## ## ## ACCB series ## ## :mac WmWn_t,ACCB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB + (Wm)*(Wn) -> ACCB ACCB = ACCB + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :mac WmWn_t,ACCB_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB + (Wm)*(Wn) -> ACCB ACCB = ACCB + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } :mac WmWn_t,ACCB_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB + (Wm)*(Wn) -> ACCB ACCB = ACCB + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :mac WmWn_t,ACCB_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB + (Wm)*(Wn) -> ACCB ACCB = ACCB + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } :mac WmWn_t,ACCB_t^Wx_t^Wxd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x0 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB + (Wm)*(Wn) -> ACCB ACCB = ACCB + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :mac WmWn_t,ACCB_t^Wx_t^Wxd_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x0 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB + (Wm)*(Wn) -> ACCB ACCB = ACCB + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } :mac WmWn_t,ACCB_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x0 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB + (Wm)*(Wn) -> ACCB ACCB = ACCB + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :mac WmWn_t,ACCB_t^Wx_t^Wxd_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x0 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB + (Wm)*(Wn) -> ACCB ACCB = ACCB + WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } @endif @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) ## ## ## ACCA series ## ## :mac WmWm_t,ACCA_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCA_t & OP_14=0x0 & OP_1_0=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # ACCA + (Wm)*(Wm) -> ACCA ACCA = ACCA + WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mac WmWm_t,ACCA_t^Wy_t^Wyd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCA_t & OP_14=0x0 & OP_1_0=0x0 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # ACCA + (Wm)*(Wm) -> ACCA ACCA = ACCA + WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } :mac WmWm_t,ACCA_t^Wx_t^Wxd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCA_t & OP_14=0x0 & OP_1_0=0x0 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # ACCA + (Wm)*(Wm) -> ACCA ACCA = ACCA + WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mac WmWm_t,ACCA_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCA_t & OP_14=0x0 & OP_1_0=0x0 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # ACCA + (Wm)*(Wm) -> ACCA ACCA = ACCA + WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } ## ## ## ACCB series ## ## :mac WmWm_t,ACCB_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCB_t & OP_14=0x0 & OP_1_0=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # ACCB + (Wm)*(Wm) -> ACCB ACCB = ACCB + WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mac WmWm_t,ACCB_t^Wy_t^Wyd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCB_t & OP_14=0x0 & OP_1_0=0x0 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # ACCB + (Wm)*(Wm) -> ACCB ACCB = ACCB + WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } :mac WmWm_t,ACCB_t^Wx_t^Wxd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCB_t & OP_14=0x0 & OP_1_0=0x0 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # ACCB + (Wm)*(Wm) -> ACCB ACCB = ACCB + WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mac WmWm_t,ACCB_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCB_t & OP_14=0x0 & OP_1_0=0x0 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # ACCB + (Wm)*(Wm) -> ACCB ACCB = ACCB + WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } @endif @if defined(dsPIC33C) :max ACCA_t is OP_23_16=0xCE & ACCA_t & OP_14_0=0x1000 { local val = ACCA_t - ACCB; ACCA_t = (ACCA)*zext(val s<= 0) + (ACCB)*zext(val s> 0); SRL_Z = zext(val s<= 0); SRL_N = 0; SRL_OV = 0; } :max ACCB_t is OP_23_16=0xCE & ACCB_t & OP_14_0=0x1000 { local val = ACCB_t - ACCA; ACCB_t = (ACCB)*zext(val s<= 0) + (ACCA)*zext(val s> 0); SRL_Z = zext(val s<= 0); SRL_N = 0; SRL_OV = 0; } :max.v ACCA_t, Ws_t is OP_23_16=0xCE & ACCA_t & OP_14_7=0x30 & Ws_t { local val = ACCA_t - ACCB; ACCA_t = (ACCA)*zext(val s<= 0) + (ACCB)*zext(val s> 0); Ws_t = (val:2)*zext(val s> 0); SRL_Z = zext(val s<= 0); SRL_N = 0; SRL_OV = 0; } :max.v ACCB_t, Ws_t is OP_23_16=0xCE & ACCB_t & OP_14_7=0x30 & Ws_t { local val = ACCB_t - ACCA; ACCB_t = (ACCB)*zext(val s<= 0) + (ACCA)*zext(val s> 0); Ws_t = (val:2)*zext(val s> 0); SRL_Z = zext(val s<= 0); SRL_N = 0; SRL_OV = 0; } :min ACCA_t is OP_23_16=0xCE & ACCA_t & OP_14_0=0x3000 { local val = ACCA_t - ACCB; ACCA_t = (ACCA)*zext(val s>= 0) + (ACCB)*zext(val s< 0); SRL_Z = zext(val s>= 0); SRL_N = zext(val s< 0); SRL_OV = 0; } :min ACCB_t is OP_23_16=0xCE & ACCB_t & OP_14_0=0x3000 { local val = ACCB_t - ACCA; ACCB_t = (ACCB)*zext(val s>= 0) + (ACCA)*zext(val s< 0); SRL_Z = zext(val s>= 0); SRL_N = zext(val s< 0); SRL_OV = 0; } :min.v ACCA_t, Ws_t is OP_23_16=0xCE & ACCA_t & OP_14_7=0x70 & Ws_t { local val = ACCA_t - ACCB; ACCA_t = (ACCA)*zext(val s>= 0) + (ACCB)*zext(val s< 0); Ws_t = (val:2)*zext(val s< 0); SRL_Z = zext(val s>= 0); SRL_N = zext(val s< 0); SRL_OV = 0; } :min.v ACCB_t, Ws_t is OP_23_16=0xCE & ACCB_t & OP_14_7=0x70 & Ws_t { local val = ACCB_t - ACCA; ACCB_t = (ACCB)*zext(val s>= 0) + (ACCA)*zext(val s< 0); Ws_t = (val:2)*zext(val s< 0); SRL_Z = zext(val s>= 0); SRL_N = zext(val s< 0); SRL_OV = 0; } :minz ACCA_t is OP_23_16=0xCE & ACCA_t & OP_14_0=0x3400 { if (SRL_Z == 0) goto inst_next; local val = ACCA_t - ACCB; ACCA_t = (ACCA)*zext(val s>= 0) + (ACCB)*zext(val s< 0); SRL_Z = zext(val s>= 0); SRL_N = zext(val s< 0); SRL_OV = 0; } :minz ACCB_t is OP_23_16=0xCE & ACCB_t & OP_14_0=0x3400 { if (SRL_Z == 0) goto inst_next; local val = ACCB_t - ACCA; ACCB_t = (ACCB)*zext(val s>= 0) + (ACCA)*zext(val s< 0); SRL_Z = zext(val s>= 0); SRL_N = zext(val s< 0); SRL_OV = 0; } :minz.v ACCA_t, Ws_t is OP_23_16=0xCE & ACCA_t & OP_14_7=0x78 & Ws_t { if (SRL_Z == 0) goto inst_next; local val = ACCA_t - ACCB; ACCA_t = (ACCA)*zext(val s>= 0) + (ACCB)*zext(val s< 0); Ws_t = (val:2)*zext(val s< 0); SRL_Z = zext(val s>= 0); SRL_N = zext(val s< 0); SRL_OV = 0; } :minz.v ACCB_t, Ws_t is OP_23_16=0xCE & ACCB_t & OP_14_7=0x78 & Ws_t { if (SRL_Z == 0) goto inst_next; local val = ACCB_t - ACCA; ACCB_t = (ACCB)*zext(val s>= 0) + (ACCA)*zext(val s< 0); Ws_t = (val:2)*zext(val s< 0); SRL_Z = zext(val s>= 0); SRL_N = zext(val s< 0); SRL_OV = 0; } @endif :mov.w f13_t^WREG_t is OP_23_20=0xB & OP_19_16=0xF & OP_15=1 & WREG_t & f13_t { WREG_t = f13_t; testSRL_N ( WREG_t ); testSRL_Z ( WREG_t ); } :mov.b f13byte_t^WREGbyte_t is OP_23_20=0xB & OP_19_16=0xF & OP_15=1 & WREGbyte_t & f13byte_t { WREGbyte_t = f13byte_t; testSRL_N ( WREGbyte_t ); testSRL_Z ( WREGbyte_t ); } :mov.w WREG_W0_t,f13_t is OP_23_20=0xB & OP_19_16=0x7 & OP_15=1 & WREG_W0_t & OP_13=1 & f13_t { f13_t = WREG_W0_t; } :mov.b WREG_W0byte_t,f13byte_t is OP_23_20=0xB & OP_19_16=0x7 & OP_15=1 & WREG_W0byte_t & OP_13=1 & f13byte_t { f13byte_t = WREG_W0byte_t; } :mov.w f15b_t,Wndb_t is OP_23_20=0x8 & OP_19=0x0 & f15b_t & Wndb_t { Wndb_t = f15b_t; } :mov.w Wns_t,f15b_t is OP_23_20=0x8 & OP_19=0x1 & f15b_t & Wns_t { f15b_t = Wns_t; } :mov.b TOK_k8c,Wndbyte_t is OP_23_20=0xB & OP_19_16=0x3 & OP_15_12=0xC & TOK_k8c & Wndbyte_t { Wndbyte_t = TOK_k8c; } :mov.w k16_t,Wndb_t is OP_23_20=0x2 & k16_t & Wndb_t { Wndb_t = k16_t; } :mov.w WsSlit10_t,Wnda_t is OP_23_20=0x9 & OP_19=0 & TOK_B=0 & WsSlit10_t & Wnda_t { Wnda_t = WsSlit10_t; } :mov.b WsSlit10byte_t,Wndabyte_t is OP_23_20=0x9 & OP_19=0 & TOK_B=1 & WsSlit10byte_t & Wndabyte_t { Wndabyte_t = WsSlit10byte_t; } :mov.w Wn_t,WdSlit10_t is OP_23_20=0x9 & OP_19=1 & TOK_B=0 & WdSlit10_t & Wn_t { WdSlit10_t = Wn_t; } :mov.b Wnbyte_t,WdSlit10byte_t is OP_23_20=0x9 & OP_19=1 & TOK_B=1 & WdSlit10byte_t & Wnbyte_t { WdSlit10byte_t = Wnbyte_t; } :mov.w movWs,movWd is OP_23_20=0x7 & OP_19=1 & TOK_B=0 & movWd & movWs { local result = movWs; build movWd; movWd = result; } :mov.b movWsbyte,movWdbyte is OP_23_20=0x7 & OP_19=1 & TOK_B=1 & movWdbyte & movWsbyte { local result = movWsbyte; build movWdbyte; movWdbyte = result; } :mov.d Wsd_t,Wndd_t is OP_23_20=0xB & OP_19_16=0xE & OP_15_12=0x0 & OP_11=0x0 & Wndd_t & OP_7=0x0 & $(WSconstraint) & Wsd_t { Wndd_t = Wsd_t; } :mov.d TOK_3_1_Dregn,Wdd_t is OP_23_20=0xb & OP_19_16=0xe & OP_15_14=0x2 & OP_6_4=0x0 & TOK_3_1_Dreg & TOK_3_1_Dregn & OP_0=0 & $(WDconstraint) & Wdd_t { local result = TOK_3_1_Dreg; build Wdd_t; Wdd_t = result; } @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) TOK_9_0_U_t: is TOK_9_0_U { export *[const]:2 TOK_9_0_U; } :movpag TOK_9_0_U_t,DSRPAG is OP_23_20=0xF & OP_19_16=0xE & OP_15_12=0xC & TOK_11_10_PP=0 & TOK_9_0_U_t & DSRPAG { DSRPAG = TOK_9_0_U_t; } :movpag TOK_9_0_U_t,DSWPAG is OP_23_20=0xF & OP_19_16=0xE & OP_15_12=0xC & TOK_11_10_PP=1 & TOK_9_0_U_t & DSWPAG { DSWPAG = TOK_9_0_U_t; } :movpag TOK_9_0_U_t,TBLPAG is OP_23_20=0xF & OP_19_16=0xE & OP_15_12=0xC & TOK_11_10_PP=2 & TOK_9_0_U_t & TBLPAG { TBLPAG = TOK_9_0_U_t:1; } :movpag TOK_3_0_Wreg,DSRPAG is OP_23_20=0xF & OP_19_16=0xE & OP_15_12=0xD & TOK_11_10_PP=0 & OP_9_4=0x0 & TOK_3_0_Wreg & DSRPAG { DSRPAG = TOK_3_0_Wreg; } :movpag TOK_3_0_Wreg,DSWPAG is OP_23_20=0xF & OP_19_16=0xE & OP_15_12=0xD & TOK_11_10_PP=1 & OP_9_4=0x0 & TOK_3_0_Wreg & DSWPAG { DSWPAG = TOK_3_0_Wreg; } :movpag TOK_3_0_Wreg,TBLPAG is OP_23_20=0xF & OP_19_16=0xE & OP_15_12=0xD & TOK_11_10_PP=2 & OP_9_4=0x0 & TOK_3_0_Wreg & TBLPAG { TBLPAG = TOK_3_0_Wreg:1; } @endif @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) ## ## ## ACCA series ## ## :movsac ACCA_t is OP_23_20=0xC & OP_19_16=0x7 & ACCA_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :movsac ACCA_t^AWB_t is OP_23_20=0xC & OP_19_16=0x7 & ACCA_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } :movsac ACCA_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19_16=0x7 & ACCA_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :movsac ACCA_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x7 & ACCA_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } :movsac ACCA_t^Wx_t^Wxd_t is OP_23_20=0xC & OP_19_16=0x7 & ACCA_t & OP_14=0x0 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :movsac ACCA_t^Wx_t^Wxd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x7 & ACCA_t & OP_14=0x0 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } :movsac ACCA_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19_16=0x7 & ACCA_t & OP_14=0x0 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :movsac ACCA_t^Wx_t^Wxd_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x7 & ACCA_t & OP_14=0x0 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } ## ## ## ACCB series ## ## :movsac ACCB_t is OP_23_20=0xC & OP_19_16=0x7 & ACCB_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :movsac ACCB_t^AWB_t is OP_23_20=0xC & OP_19_16=0x7 & ACCB_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } :movsac ACCB_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19_16=0x7 & ACCB_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :movsac ACCB_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x7 & ACCB_t & OP_14=0x0 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } :movsac ACCB_t^Wx_t^Wxd_t is OP_23_20=0xC & OP_19_16=0x7 & ACCB_t & OP_14=0x0 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :movsac ACCB_t^Wx_t^Wxd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x7 & ACCB_t & OP_14=0x0 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } :movsac ACCB_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19_16=0x7 & ACCB_t & OP_14=0x0 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :movsac ACCB_t^Wx_t^Wxd_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19_16=0x7 & ACCB_t & OP_14=0x0 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } @endif @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :mpy WmWn_t,ACCA_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x0 & OP_1_0=0x3 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wn) -> ACCA ACCA = WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mpy WmWn_t,ACCA_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x0 & OP_1_0=0x3 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wn) -> ACCA ACCA = WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } :mpy WmWn_t,ACCA_t^Wx_t^Wxd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x0 & OP_1_0=0x3 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wn) -> ACCA ACCA = WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mpy WmWn_t,ACCA_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x0 & OP_1_0=0x3 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wn) -> ACCA ACCA = WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } ## ## ## ACCB series ## ## :mpy WmWn_t,ACCB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x0 & OP_1_0=0x3 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wn) -> ACCB ACCB = WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mpy WmWn_t,ACCB_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x0 & OP_1_0=0x3 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wn) -> ACCB ACCB = WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } :mpy WmWn_t,ACCB_t^Wx_t^Wxd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x0 & OP_1_0=0x3 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wn) -> ACCB ACCB = WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mpy WmWn_t,ACCB_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x0 & OP_1_0=0x3 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wn) -> ACCB ACCB = WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } @endif @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :mpy WmWm_t,ACCA_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCA_t & OP_14=0x0 & OP_1_0=0x1 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wm) -> ACCA ACCA = WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mpy WmWm_t,ACCA_t^Wy_t^Wyd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCA_t & OP_14=0x0 & OP_1_0=0x1 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wm) -> ACCA ACCA = WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } :mpy WmWm_t,ACCA_t^Wx_t^Wxd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCA_t & OP_14=0x0 & OP_1_0=0x1 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wm) -> ACCA ACCA = WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mpy WmWm_t,ACCA_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCA_t & OP_14=0x0 & OP_1_0=0x1 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wm) -> ACCA ACCA = WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } ## ## ## ACCB series ## ## :mpy WmWm_t,ACCB_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCB_t & OP_14=0x0 & OP_1_0=0x1 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wm) -> ACCB ACCB = WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mpy WmWm_t,ACCB_t^Wy_t^Wyd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCB_t & OP_14=0x0 & OP_1_0=0x1 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wm) -> ACCB ACCB = WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } :mpy WmWm_t,ACCB_t^Wx_t^Wxd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCB_t & OP_14=0x0 & OP_1_0=0x1 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wm) -> ACCB ACCB = WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mpy WmWm_t,ACCB_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xF & OP_19_18=0x0 & WmWm_t & ACCB_t & OP_14=0x0 & OP_1_0=0x1 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # (Wm)*(Wm) -> ACCB ACCB = WmWm_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } @endif @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :mpy.n WmWn_t,ACCA_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x1 & OP_1_0=0x3 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # -(Wm)*(Wn) -> ACCA ACCA = -WmWn_t; testSRH_OA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mpy.n WmWn_t,ACCA_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x1 & OP_1_0=0x3 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # -(Wm)*(Wn) -> ACCA ACCA = -WmWn_t; testSRH_OA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } :mpy.n WmWn_t,ACCA_t^Wx_t^Wxd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x1 & OP_1_0=0x3 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # -(Wm)*(Wn) -> ACCA ACCA = -WmWn_t; testSRH_OA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mpy.n WmWn_t,ACCA_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x1 & OP_1_0=0x3 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # -(Wm)*(Wn) -> ACCA ACCA = -WmWn_t; testSRH_OA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } ## ## ## ACCB series ## ## :mpy.n WmWn_t,ACCB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x1 & OP_1_0=0x3 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # -(Wm)*(Wn) -> ACCB ACCB = -WmWn_t; testSRH_OA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mpy.n WmWn_t,ACCB_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x1 & OP_1_0=0x3 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # -(Wm)*(Wn) -> ACCB ACCB = -WmWn_t; testSRH_OA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } :mpy.n WmWn_t,ACCB_t^Wx_t^Wxd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x1 & OP_1_0=0x3 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # -(Wm)*(Wn) -> ACCB ACCB = -WmWn_t; testSRH_OA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; } :mpy.n WmWn_t,ACCB_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x1 & OP_1_0=0x3 & Wxd_t & Wyd_t & Wx_t & Wy_t { # Note: MAC-class instruction # -(Wm)*(Wn) -> ACCB ACCB = -WmWn_t; testSRH_OA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; } @endif @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :msc WmWn_t,ACCA_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x1 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA - (Wm)*(Wn) -> ACCA ACCA = ACCA - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :msc WmWn_t,ACCA_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x1 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA - (Wm)*(Wn) -> ACCA ACCA = ACCA - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } :msc WmWn_t,ACCA_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x1 & TOK_9_6_iiii=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA - (Wm)*(Wn) -> ACCA ACCA = ACCA - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :msc WmWn_t,ACCA_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x1 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA - (Wm)*(Wn) -> ACCA ACCA = ACCA - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } :msc WmWn_t,ACCA_t^Wx_t^Wxd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x1 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA - (Wm)*(Wn) -> ACCA ACCA = ACCA - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :msc WmWn_t,ACCA_t^Wx_t^Wxd_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x1 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA - (Wm)*(Wn) -> ACCA ACCA = ACCA - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } :msc WmWn_t,ACCA_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x1 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA - (Wm)*(Wn) -> ACCA ACCA = ACCA - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB # AWB_t = ACCBH; } :msc WmWn_t,ACCA_t^Wx_t^Wxd_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCA_t & OP_14=0x1 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCA - (Wm)*(Wn) -> ACCA ACCA = ACCA - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCB rounded -> AWB AWB_t = ACCBH; } ## ## ## ACCB series ## ## :msc WmWn_t,ACCB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x1 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB - (Wm)*(Wn) -> ACCB ACCB = ACCB - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :msc WmWn_t,ACCB_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x1 & TOK_9_6_iiii=0x4 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB - (Wm)*(Wn) -> ACCB ACCB = ACCB - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } :msc WmWn_t,ACCB_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x1 & TOK_9_6_iiii=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB - (Wm)*(Wn) -> ACCB ACCB = ACCB - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :msc WmWn_t,ACCB_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x1 & TOK_9_6_iiii=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB - (Wm)*(Wn) -> ACCB ACCB = ACCB - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx # Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } :msc WmWn_t,ACCB_t^Wx_t^Wxd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x1 & TOK_5_2_jjjj=0x4 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB - (Wm)*(Wn) -> ACCB ACCB = ACCB - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :msc WmWn_t,ACCB_t^Wx_t^Wxd_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x1 & TOK_5_2_jjjj=0x4 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB - (Wm)*(Wn) -> ACCB ACCB = ACCB - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy # Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } :msc WmWn_t,ACCB_t^Wx_t^Wxd_t^Wy_t^Wyd_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x1 & TOK_1_0_aa=0x2 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB - (Wm)*(Wn) -> ACCB ACCB = ACCB - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB # AWB_t = ACCAH; } :msc WmWn_t,ACCB_t^Wx_t^Wxd_t^Wy_t^Wyd_t^AWB_t is OP_23_20=0xC & OP_19=0x0 & WmWn_t & ACCB_t & OP_14=0x1 & Wxd_t & Wyd_t & Wx_t & Wy_t & AWB_t { # Note: MAC-class instruction # ACCB - (Wm)*(Wn) -> ACCB ACCB = ACCB - WmWn_t; testSRH_OA(); testSRH_SA(); # ([Wx]) -> Wxd # (Wx) +/- kx -> Wx Wxd_t = Wx_t; # ([Wy]) -> Wyd # (Wy) +/- ky -> Wy Wyd_t = Wy_t; # ACCA rounded -> AWB AWB_t = ACCAH; } @endif :mul.w f13_t is OP_23_16=0xbc & OP_15=0 & OP_13=0x0 & OP_14=0 & f13_t { W3W2 = zext(f13_t) * zext(W0); } :mul.b f13byte_t is OP_23_16=0xbc & OP_15=0 & OP_13=0x0 & OP_14=1 & f13byte_t { W2 = zext(f13byte_t) * zext(W0byte); } :mul.ss Wbd_t,WsMUL_t,Wndd_t is OP_23_20=0xb & OP_19_16=0x9 & OP_15=1 & Wbd_t & Wndd_t & OP_7=0 & $(WSconstraint) & WsMUL_t { Wndd_t = sext(Wbd_t) * sext(WsMUL_t); } @if defined(dsPIC33E) || defined(dsPIC33C) A7_t: ACCA is TOK_7=0 & ACCA { export ACCA; } A7_t: ACCB is TOK_7=1 & ACCB { export ACCB; } :mul.ss Wbd_t,WsMUL_t,A7_t is OP_23_20=0xb & OP_19_16=0x9 & OP_15=1 & Wbd_t & OP_10_8=0x7 & A7_t & $(WSconstraint) & WsMUL_t { A7_t = sext(Wbd_t) * sext(WsMUL_t); } @endif :mul.su Wbd_t,k5,Wndd_t is OP_23_20=0xb & OP_19_16=0x9 & OP_15=0 & Wbd_t & Wndd_t & OP_7=0 & OP_6_5=3 & k5 { Wndd_t = sext(Wbd_t) * zext(k5); } :mul.su Wbd_t,WsMUL_t,Wndd_t is OP_23_20=0xb & OP_19_16=0x9 & OP_15=0 & Wbd_t & Wndd_t & OP_7=0 & $(WSconstraint) & WsMUL_t { Wndd_t = sext(Wbd_t) * zext(WsMUL_t); } @if defined(dsPIC33E) || defined(dsPIC33C) :mul.su Wbd_t,WsMUL_t,A7_t is OP_23_20=0xb & OP_19_16=0x9 & OP_15=0 & Wbd_t & OP_10_8=0x7 & A7_t & $(WSconstraint) & WsMUL_t { A7_t = sext(Wbd_t) * zext(WsMUL_t); } :mul.su Wbd_t,k5,A7_t is OP_23_20=0xb & OP_19_16=0x9 & OP_15=0 & Wbd_t & OP_10_8=0x7 & A7_t & OP_6_5=0x3 & k5 { A7_t = sext(Wbd_t) * zext(k5); } @endif :mul.us Wbd_t,WsMUL_t,Wndd_t is OP_23_20=0xb & OP_19_16=0x8 & OP_15=1 & Wbd_t & Wndd_t & OP_7=0 & $(WSconstraint) & WsMUL_t { Wndd_t = zext(Wbd_t) * sext(WsMUL_t); } @if defined(dsPIC33E) || defined(dsPIC33C) :mul.us Wbd_t,WsMUL_t,A7_t is OP_23_20=0xb & OP_19_16=0x8 & OP_15=1 & Wbd_t & OP_10_8=0x7 & A7_t & $(WSconstraint) & WsMUL_t { A7_t = zext(Wbd_t) * sext(WsMUL_t); } @endif :mul.uu Wbd_t,k5,Wndd_t is OP_23_20=0xb & OP_19_16=0x8 & OP_15=0 & Wbd_t & Wndd_t & OP_7=0 & OP_6_5=0x3 & k5 { Wndd_t = zext(Wbd_t) * zext(k5); } :mul.uu Wbd_t,WsMUL_t,Wndd_t is OP_23_20=0xb & OP_19_16=0x8 & OP_15=0 & Wbd_t & Wndd_t & OP_7=0 & $(WSconstraint) & WsMUL_t { Wndd_t = zext(Wbd_t) * zext(WsMUL_t); } @if defined(dsPIC33E) || defined(dsPIC33C) :mul.uu Wbd_t,WsMUL_t,A7_t is OP_23_20=0xb & OP_19_16=0x8 & OP_15=0 & Wbd_t & OP_10_8=0x7 & A7_t & $(WSconstraint) & WsMUL_t { A7_t = zext(Wbd_t) * zext(WsMUL_t); } :mul.uu Wbd_t,k5,A7_t is OP_23_20=0xb & OP_19_16=0x8 & OP_15=0 & Wbd_t & OP_10_8=0x7 & A7_t & OP_6_5=0x3 & k5 { A7_t = zext(Wbd_t) * zext(k5); } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :mulw.ss Wbd_t,WsMUL_t,Wndd_t is OP_23_20=0xb & OP_19_16=0x9 & OP_15=1 & Wbd_t & Wndd_t & OP_7=1 & $(WSconstraint) & WsMUL_t { Wndd_t = sext(Wbd_t) * sext(WsMUL_t); } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :mulw.su Wbd_t,WsMUL_t,Wndd_t is OP_23_20=0xb & OP_19_16=0x9 & OP_15=0 & Wbd_t & Wndd_t & OP_7=1 & $(WSconstraint) & WsMUL_t { Wndd_t = sext(Wbd_t) * zext(WsMUL_t); } :mulw.su Wbd_t,k5_t,Wndd_t is OP_23_20=0xb & OP_19_16=0x9 & OP_15=0 & Wbd_t & Wndd_t & OP_7=1 & OP_6_5=3 & k5_t { Wndd_t = sext(Wbd_t) * zext(k5_t); } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :mulw.us Wbd_t,WsMUL_t,Wndd_t is OP_23_20=0xb & OP_19_16=0x8 & OP_15=1 & Wbd_t & Wndd_t & OP_7=1 & $(WSconstraint) & WsMUL_t { Wndd_t = zext(Wbd_t) * sext(WsMUL_t); } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :mulw.uu Wbd_t,WsMUL_t,Wndd_t is OP_23_20=0xb & OP_19_16=0x8 & OP_15=0 & Wbd_t & Wndd_t & OP_7=1 & $(WSconstraint) & WsMUL_t { Wndd_t = zext(Wbd_t) * zext(WsMUL_t); } :mulw.uu Wbd_t,k5_t,Wndd_t is OP_23_20=0xb & OP_19_16=0x8 & OP_15=0 & Wbd_t & Wndd_t & OP_7=1 & OP_6_5=3 & k5_t { Wndd_t = zext(Wbd_t) * zext(k5_t); } @endif :neg.w f13_t^WREG_t is OP_23_16=0xee & OP_15=0 & OP_14=0 & WREG_t & f13_t { WREG_t = -f13_t; SRH_DC = 0; SRL_OV = 0; SRL_C = 0; testSRL_N(WREG_t); testSRL_Z(WREG_t); } :neg.b f13byte_t^WREGbyte_t is OP_23_16=0xee & OP_15=0 & OP_14=1 & WREGbyte_t & f13byte_t { WREGbyte_t = -f13byte_t; SRH_DC = 0; SRL_OV = 0; SRL_C = 0; testSRL_N(WREGbyte_t); testSRL_Z(WREGbyte_t); } :neg.w Ws_t,Wd_t is OP_23_16=0xea & OP_15=0 & OP_14=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local result = -Ws_t; build Wd_t; Wd_t = result; SRH_DC = 0; SRL_OV = 0; SRL_C = 0; testSRL_N(result); testSRL_Z(result); } :neg.b Wsbyte_t,Wdbyte_t is OP_23_16=0xea & OP_15=0 & OP_14=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local result = -Wsbyte_t; build Wdbyte_t; Wdbyte_t = result; SRH_DC = 0; SRL_OV = 0; SRL_C = 0; testSRL_N(result); testSRL_Z(result); } @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :neg ACCA_t is OP_23_20=0xC & OP_19_16=0xB & ACCA_t & OP_14_12=0x1 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { ACCA = -ACCA; testSRH_OA(); testSRH_SA(); } :neg ACCB_t is OP_23_20=0xC & OP_19_16=0xB & ACCB_t & OP_14_12=0x1 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { ACCB = -ACCB; testSRH_OB(); testSRH_SB(); } @endif :nop is OP_23_16=0x0 { # No definition on purpose } # :nopr is OP_23_16=0xff { # No definition on purpose } :pop f15_t is OP_23_20=0xF & OP_19_16=0x9 & f15_t & OP_0=0x0 { W15 = W15 - 2; f15_t = *[ram]:2 W15; } :pop movWd is OP_23_20=0x7 & OP_19=0x1 & movWd & OP_14=0x0 & OP_6_4=0x4 & OP_3_0=0xF { W15 = W15 - 2; local result = *[ram]:2 W15; build movWd; movWd = result; } :pop.d Wndd_t is OP_23_20=0xB & OP_19_16=0xE & OP_15_12=0x0 & OP_11=0x0 & Wndd_t & OP_7_4=0x4 & OP_3_0=0xF { W15 = W15 - 4; Wndd_t = *[ram]:4 W15; } :pop.s is OP_23_20=0xF & OP_19_16=0xE & OP_15_12=0x8 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { W0 = SHADOW_W0; W1 = SHADOW_W1; W2 = SHADOW_W2; W3 = SHADOW_W3; SRL_C = SHADOW_SRL_C; SRL_Z = SHADOW_SRL_Z; SRL_OV = SHADOW_SRL_OV; SRL_N = SHADOW_SRL_N; SRH_DC = SHADOW_SRH_DC; } :push f15_t is OP_23_20=0xF & OP_19_16=0x8 & f15_t & OP_0=0x0 { *[ram]:2 W15 = f15_t; W15 = W15 + 2; } :push movWs is OP_23_20=0x7 & OP_19=0x1 & movWs & OP_14_12=0x1 & OP_11_8=0xF & OP_7=0x1 { *[ram]:2 W15 = movWs; W15 = W15 + 2; } :push.d TOK_3_1_Dregn is OP_23_20=0xB & OP_19_16=0xE & OP_15_12=0x9 & OP_11_8=0xF & OP_7_4=0x8 & TOK_3_1_Dreg & TOK_3_1_Dregn & OP_0=0x0 { *[ram]:4 W15 = TOK_3_1_Dreg; W15 = W15 + 4; } :push.s is OP_23_20=0xF & OP_19_16=0xE & OP_15_12=0xA & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { SHADOW_W0 = W0; SHADOW_W1 = W1; SHADOW_W2 = W2; SHADOW_W3 = W3; SHADOW_SRL_C = SRL_C; SHADOW_SRL_Z = SRL_Z; SHADOW_SRL_OV = SRL_OV; SHADOW_SRL_N = SRL_N; SHADOW_SRH_DC = SRH_DC; } define pcodeop pwrsavOp; :pwrsav OP_0 is OP_23_1=0x7f2000 & OP_0 { pwrsavOp(); } @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :rcall n16_t is OP_23_20=0x0 & OP_19_16=0x7 & n16_t & WordInstNext4 { *[ram]:4 W15 = WordInstNext4; W15 = W15 + 4; call n16_t; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :rcall n16_t is OP_23_20=0x0 & OP_19_16=0x7 & n16_t & WordInstNext4 { *[ram]:4 W15 = WordInstNext4 | zext(CORCON_SFA); W15 = W15 + 4; CORCON_SFA = 0; call n16_t; } @endif @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :rcall WnRDest_t is OP_23_20=0x0 & OP_19_16=0x1 & OP_15_12=0x2 & OP_11_8=0x0 & OP_7_4=0x0 & WnRDest_t & WordInstNext4 { *[ram]:4 W15 = WordInstNext4; W15 = W15 + 4; call [WnRDest_t]; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :rcall WnRDest_t is OP_23_20=0x0 & OP_19_16=0x1 & OP_15_12=0x0 & OP_11_8=0x2 & OP_7_4=0x0 & WnRDest_t & WordInstNext4 { *[ram]:4 W15 = WordInstNext4 | zext(CORCON_SFA); W15 = W15 + 4; CORCON_SFA = 0; call [WnRDest_t]; } @endif @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :repeat k14_t is OP_23_14=0x24 & k14_t [ repeatInstr=1; globalset(inst_next,repeatInstr); ] { RCOUNT = k14_t + 1; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :repeat k15_t is OP_23_16=0x09 & OP_15=0 & k15_t [ repeatInstr=1; globalset(inst_next,repeatInstr); ] { RCOUNT = k15_t + 1; } @endif @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :repeat TOK_3_0_Wreg is OP_23_4=0x9800 & TOK_3_0_Wreg [ repeatInstr=1; globalset(inst_next,repeatInstr); ] { RCOUNT = TOK_3_0_Wreg & 0x7FFF; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :repeat TOK_3_0_Wreg is OP_23_4=0x9800 & TOK_3_0_Wreg [ repeatInstr=1; globalset(inst_next,repeatInstr); ] { RCOUNT = TOK_3_0_Wreg; } @endif :reset is OP_23_0=0xfe0000 { SRH_OA = 0; SRH_OB = 0; SRH_OAB = 0; SRH_SA = 0; SRH_SB = 0; SRH_SAB = 0; SRH_DA = 0; SRH_DC = 0; SRL_IPL2 = 0; SRL_IPL1 = 0; SRL_IPL0 = 0; SRL_RA = 0; SRL_N = 0; SRL_OV = 0; SRL_Z = 0; SRL_C = 0; CORCON_SFA = 0; PC = 0; goto [PC]; } @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :retfie is OP_23_20=0x0 & OP_19_16=0x6 & OP_15_12=0x4 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { W15 = W15 - 4; local tmp = *[ram]:4 W15; tmpSRL:1 = tmp(3); unpackSRL( tmpSRL ); CORCON_IPL3 = (tmp & 0x00800000) != 0; return [tmp & 0x7FFFFF]; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :retfie is OP_23_20=0x0 & OP_19_16=0x6 & OP_15_12=0x4 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { W15 = W15 - 4; local tmp = *[ram]:4 W15; tmpSRL:1 = tmp(3); unpackSRL( tmpSRL ); CORCON_IPL3 = (tmp & 0x00800000) != 0; CORCON_SFA = (tmp & 0x1) != 0; return [tmp & 0x7FFFFE]; } @endif @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :retlw.w k10_t,Wn_t is OP_23_20=0x0 & OP_19_16=0x5 & OP_15=0 & k10_t & Wn_t { W15 = W15 - 4; local tmp = *[ram]:4 W15; tmpSRL:1 = tmp(3); unpackSRL( tmpSRL ); CORCON_IPL3 = (tmp & 0x00800000) != 0; Wn_t = k10_t; return [tmp & 0x7FFFFF]; } :retlw.b k10byte_t,Wnbyte_t is OP_23_20=0x0 & OP_19_16=0x5 & OP_15=0 & k10byte_t & Wnbyte_t { W15 = W15 - 4; local tmp = *[ram]:4 W15; tmpSRL:1 = tmp(3); unpackSRL( tmpSRL ); CORCON_IPL3 = (tmp & 0x00800000) != 0; Wnbyte_t = k10byte_t; return [tmp & 0x7FFFFF]; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :retlw.w k10_t,Wn_t is OP_23_20=0x0 & OP_19_16=0x5 & OP_15=0 & k10_t & Wn_t { W15 = W15 - 4; local tmp = *[ram]:4 W15; tmpSRL:1 = tmp(3); unpackSRL( tmpSRL ); CORCON_IPL3 = (tmp & 0x00800000) != 0; CORCON_SFA = (tmp & 0x1) != 0; Wn_t = k10_t; return [tmp & 0x7FFFFE]; } :retlw.b k10byte_t,Wnbyte_t is OP_23_20=0x0 & OP_19_16=0x5 & OP_15=0 & k10byte_t & Wnbyte_t { W15 = W15 - 4; local tmp = *[ram]:4 W15; tmpSRL:1 = tmp(3); unpackSRL( tmpSRL ); CORCON_IPL3 = (tmp & 0x00800000) != 0; CORCON_SFA = (tmp & 0x1) != 0; Wnbyte_t = k10byte_t; return [tmp & 0x7FFFFE]; } @endif @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :return is OP_23_20=0x0 & OP_19_16=0x6 & OP_15_12=0x0 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { W15 = W15 - 4; local tmp = *[ram]:4 W15; return [tmp & 0x7FFFFF]; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :return is OP_23_20=0x0 & OP_19_16=0x6 & OP_15_12=0x0 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { W15 = W15 - 4; local tmp = *[ram]:4 W15; CORCON_SFA = ( tmp & 0x0001 ) != 0; tmp = tmp & 0x7FFFFE; return [tmp]; } @endif :rlc.w f13_t^WREG_t is OP_23_16=0xd6 & OP_15=1 & WREG_t & f13_t { local src = f13_t; WREG_t = ( src << 1 ) | zext(SRL_C); testSRL_N(WREG_t); testSRL_Z(WREG_t); SRL_C = ( src & 0x8000 ) != 0; } :rlc.b f13byte_t^WREGbyte_t is OP_23_16=0xd6 & OP_15=1 & WREGbyte_t & f13byte_t { local src = f13byte_t; WREGbyte_t = ( src << 1 ) | SRL_C; testSRL_N(WREGbyte_t); testSRL_Z(WREGbyte_t); SRL_C = ( src & 0x80 ) != 0; } :rlc.w Ws_t,Wd_t is OP_23_16=0xd2 & OP_15=1 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; local result = (src << 1) | zext(SRL_C); build Wd_t; Wd_t = result; testSRL_N(result); testSRL_Z(result); SRL_C = (src & 0x8000) != 0; } :rlc.b Wsbyte_t,Wdbyte_t is OP_23_16=0xd2 & OP_15=1 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; local result = (src << 1) | SRL_C; build Wdbyte_t; Wdbyte_t = result; testSRL_N(result); testSRL_Z(result); SRL_C = (src & 0x80) != 0; } :rlnc.w f13_t^WREG_t is OP_23_16=0xd6 & OP_15=0 & WREG_t & f13_t { local src = f13_t; WREG_t = (src << 1) | ((src & 0x8000) >> 15); testSRL_N(WREG_t); testSRL_Z(WREG_t); } :rlnc.b f13byte_t^WREGbyte_t is OP_23_16=0xd6 & OP_15=0 & WREGbyte_t & f13byte_t { local src = f13byte_t; WREGbyte_t = (src << 1) | ((src & 0x80) >> 7); testSRL_N(WREGbyte_t); testSRL_Z(WREGbyte_t); } :rlnc.w Ws_t,Wd_t is OP_23_16=0xd2 & OP_15=0 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; local result = (src << 1) | ((src & 0x8000) >> 15); build Wd_t; Wd_t = result; testSRL_N(result); testSRL_Z(result); } :rlnc.b Wsbyte_t,Wdbyte_t is OP_23_16=0xd2 & OP_15=0 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; local result = (src << 1) | ((src & 0x80) >> 7); build Wdbyte_t; Wdbyte_t = result; testSRL_N(result); testSRL_Z(result); } :rrc.w f13_t^WREG_t is OP_23_16=0xd7 & OP_15=1 & WREG_t & f13_t { local src = f13_t; WREG_t = ((zext(SRL_C)) * 0x8000) | (src >> 1); testSRL_N(WREG_t); testSRL_Z(WREG_t); SRL_C = (src & 1) != 0; } :rrc.b f13byte_t^WREGbyte_t is OP_23_16=0xd7 & OP_15=1 & WREGbyte_t & f13byte_t { local src = f13byte_t; WREGbyte_t = (SRL_C * 0x80) | (src >> 1) ; testSRL_N(WREGbyte_t); testSRL_Z(WREGbyte_t); SRL_C = (src & 1) != 0; } :rrc.w Ws_t,Wd_t is OP_23_16=0xd3 & OP_15=1 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; local result = ((zext(SRL_C)) * 0x8000) | (src >> 1); build Wd_t; Wd_t = result; testSRL_N(result); testSRL_Z(result); SRL_C = (src & 1) != 0; } :rrc.b Wsbyte_t,Wdbyte_t is OP_23_16=0xd3 & OP_15=1 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; local result = (SRL_C * 0x80) | (src >> 1); build Wdbyte_t; Wdbyte_t = result; testSRL_N(result); testSRL_Z(result); SRL_C = (src & 1) != 0; } :rrnc.w f13_t^WREG_t is OP_23_16=0xd7 & OP_15=0 & TOK_B=0 & WREG_t & f13_t { local src = f13_t; WREG_t = (src >> 1) | ((src & 1) * 0x8000); testSRL_N(WREG_t); testSRL_Z(WREG_t); } :rrnc.b f13byte_t^WREGbyte_t is OP_23_16=0xd7 & OP_15=0 & TOK_B=1 & WREGbyte_t & f13byte_t { local src = f13byte_t; WREGbyte_t = (src >> 1) | ((src & 1) * 0x80); testSRL_N(WREGbyte_t); testSRL_Z(WREGbyte_t); } :rrnc.w Ws_t,Wd_t is OP_23_16=0xd3 & OP_15=0 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; local result = (src >> 1) | ((src & 1) * 0x8000); build Wd_t; Wd_t = result; testSRL_N(Wd_t); testSRL_Z(Wd_t); } :rrnc.b Wsbyte_t,Wdbyte_t is OP_23_16=0xd3 & OP_15=0 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; local result = (src >> 1) | ((src & 1) * 0x80); build Wdbyte_t; Wdbyte_t = result; testSRL_N(result); testSRL_Z(result); } @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :sac ACCA_t^r4_t,WdWRO_t is OP_23_20=0xC & OP_19_16=0xC & ACCA_t & r4_t & WdWRO_t { local tmp:6 = ACCA s>> (16 + r4_t); WdWRO_t = tmp:2; } :sac ACCB_t^r4_t,WdWRO_t is OP_23_20=0xC & OP_19_16=0xC & ACCB_t & r4_t & WdWRO_t { local tmp:6 = ACCB s>> (16 + r4_t); WdWRO_t = tmp:2; } @endif @if defined(dsPIC33C) :sac.d ACCA_t^r4_t,Wsnd_t is OP_23_16=0xDC & OP_14=0x0 & OP_7_4=0x0 & ACCA_t & r4_t & Wsnd_t { local tmp:6 = ACCA s>> (16 + r4_t); Wsnd_t = tmp:4; } :sac.d ACCB_t^r4_t,Wsnd_t is OP_23_16=0xDC & OP_14=0x0 & OP_7_4=0x0 & ACCB_t & r4_t & Wsnd_t { local tmp:6 = ACCB s>> (16 + r4_t); Wsnd_t = tmp:4; } @endif @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :sac.r ACCA_t^r4_t,WdWRO_t is OP_23_20=0xC & OP_19_16=0xD & ACCA_t & r4_t & WdWRO_t { local tmp:6 = ( ACCA + (0x80000000 >> (16 - r4_t)) ) s>> (16 + r4_t); WdWRO_t = tmp:2; } :sac.r ACCB_t^r4_t,WdWRO_t is OP_23_20=0xC & OP_19_16=0xD & ACCB_t & r4_t & WdWRO_t { local tmp:6 = ( ACCB + (0x80000000 >> (16 - r4_t)) ) s>> (16 + r4_t); WdWRO_t = tmp:2; } @endif :se Ws_t,Wnd_t is OP_23_11=0x1f60 & Wnd_t & $(WSconstraint) & Ws_t { Wnd_t = sext(Ws_t:1); testSRL_N(Wnd_t); testSRL_Z(Wnd_t); SRL_C = !SRL_N; } :setm.w f13_t^WREG_t is OP_23_16=0xef & OP_15=1 & WREG_t & f13_t { WREG_t = 0xFFFF; } :setm.b f13byte_t^WREGbyte_t is OP_23_16=0xef & OP_15=1 & WREGbyte_t & f13byte_t { WREGbyte_t = 0xFF; } :setm.w Wd_t is OP_23_16=0xeb & OP_15=1 & OP_6_0=0x0 & TOK_B=0 & $(WDconstraint) & Wd_t { Wd_t = 0xFFFF; } :setm.b Wdbyte_t is OP_23_16=0xeb & OP_15=1 & OP_6_0=0x0 & TOK_B=1 & $(WDconstraint) & Wdbyte_t { Wdbyte_t = 0xFF; } @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :sftac ACCA_t,k6_t is OP_23_20=0xC & OP_19_16=0x8 & ACCA_t & OP_14_12=0x0 & OP_11_8=0x0 & OP_7_6=0x1 & k6_t { local tmp:8 = (sext(ACCA) << (16 - k6_t)) >> 16; ACCA = tmp:6; testSRH_OA(); testSRH_SA(); } :sftac ACCB_t,k6_t is OP_23_20=0xC & OP_19_16=0x8 & ACCB_t & OP_14_12=0x0 & OP_11_8=0x0 & OP_7_6=0x1 & k6_t { local tmp:8 = (sext(ACCB) << (16 - k6_t)) >> 16; ACCB = tmp:6; testSRH_OB(); testSRH_SB(); } @endif @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :sftac ACCA_t,Wbsft_t is OP_23_20=0xC & OP_19_16=0x8 & ACCA_t & OP_14_12=0x0 & OP_11_8=0x0 & OP_7_4=0x0 & Wbsft_t { local tmp:8 = (sext(ACCA) << (16 - Wbsft_t)) >> 16; ACCA = tmp:6; testSRH_OA(); testSRH_SA(); } :sftac ACCB_t,Wbsft_t is OP_23_20=0xC & OP_19_16=0x8 & ACCB_t & OP_14_12=0x0 & OP_11_8=0x0 & OP_7_4=0x0 & Wbsft_t { local tmp:8 = (sext(ACCB) << (16 - Wbsft_t)) >> 16; ACCB = tmp:6; testSRH_OB(); testSRH_SB(); } @endif :sl.w f13_t^WREG_t is OP_23_20=0xD & OP_19_16=0x4 & OP_15=0x0 & WREG_t & f13_t { local src = f13_t; WREG_t = src << 1; testSRL_N(WREG_t); testSRL_Z(WREG_t); SRL_C = ((src & 0x8000) != 0); } :sl.b f13byte_t^WREGbyte_t is OP_23_20=0xD & OP_19_16=0x4 & OP_15=0x0 & WREGbyte_t & f13byte_t { local src = f13byte_t; WREGbyte_t = src << 1; testSRL_N(WREGbyte_t); testSRL_Z(WREGbyte_t); SRL_C = ((src & 0x80) != 0); } :sl.w Ws_t,Wd_t is OP_23_20=0xD & OP_19_16=0x0 & OP_15=0x0 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; local result = src << 1; build Wd_t; Wd_t = result; testSRL_N(result); testSRL_Z(result); SRL_C = ((src & 0x8000) != 0); } :sl.b Wsbyte_t,Wdbyte_t is OP_23_20=0xD & OP_19_16=0x0 & OP_15=0x0 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; local result = src << 1; build Wdbyte_t; Wdbyte_t = result; testSRL_N(result); testSRL_Z(result); SRL_C = ((src & 0x80) != 0); } :sl Wbd_t,k4_t,Wnd_t is OP_23_20=0xD & OP_19_16=0xD & OP_15=0x0 & Wbd_t & Wnd_t & OP_6_4=0x4 & k4_t { Wnd_t = Wbd_t << k4_t; testSRL_N(Wnd_t); testSRL_Z(Wnd_t); } :sl Wbd_t,Wns_t,Wnd_t is OP_23_20=0xD & OP_19_16=0xD & OP_15=0x0 & Wbd_t & Wnd_t & OP_6_4=0x0 & Wns_t { Wnd_t = Wbd_t << (Wns_t & 0x1F); testSRL_N(Wnd_t); testSRL_Z(Wnd_t); } # SSTEP - ICD instruction compatible with Microchips ICD debugging hardware # TODO: locate encoding details for SSTEP instruction # define pcodeop sstep; # :sstep is OP_23_0=?? { # sstep(); # In-Circuit Debugger (ICD) Single Step # } :sub.w f13_t^WREG_t is OP_23_20=0xB & OP_19_16=0x5 & OP_15=0 & WREG_t & f13_t { local src = f13_t; subflags( src, W0 ); WREG_t = src - W0; testSRL_N ( WREG_t ); testSRL_Z ( WREG_t ); testSRH_DCword( WREG_t ); } :sub.b f13byte_t^WREGbyte_t is OP_23_20=0xB & OP_19_16=0x5 & OP_15=0 & WREGbyte_t & f13byte_t { local src = f13byte_t; subflags( src, W0byte ); WREGbyte_t = src - W0byte; testSRL_N ( WREGbyte_t ); testSRL_Z ( WREGbyte_t ); testSRH_DCbyte( WREGbyte_t ); } :sub.w k10_t,Wn_t is OP_23_20=0xB & OP_19_16=0x1 & OP_15=0 & k10_t & Wn_t { subflags( Wn_t, k10_t ); Wn_t = Wn_t - k10_t; testSRL_N ( Wn_t ); testSRL_Z ( Wn_t ); testSRH_DCword( Wn_t ); } :sub.b k10byte_t,Wnbyte_t is OP_23_20=0xB & OP_19_16=0x1 & OP_15=0 & k10byte_t & Wnbyte_t { subflags( Wnbyte_t, k10byte_t ); Wnbyte_t = Wnbyte_t - k10byte_t; testSRL_N ( Wnbyte_t ); testSRL_Z ( Wnbyte_t ); testSRH_DCbyte( Wnbyte_t ); } :sub.w Wb_t,k5_t,Wd_t is OP_23_20=0x5 & OP_19=0x0 & OP_6_5=0x3 & Wb_t & $(WDconstraint) & Wd_t & k5_t { subflags( Wb_t, k5_t ); local result = Wb_t - k5_t; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCword( result ); } :sub.b Wbbyte_t,k5byte_t,Wdbyte_t is OP_23_20=0x5 & OP_19=0x0 & OP_6_5=0x3 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & k5byte_t { subflags( Wbbyte_t, k5byte_t ); local result = Wbbyte_t - k5byte_t; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCbyte( result ); } :sub.w Wb_t,Ws_t,Wd_t is OP_23_20=0x5 & OP_19=0x0 & TOK_B=0 & Wb_t & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; subflags( Wb_t, src ); local result = Wb_t - src; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCword( result ); } :sub.b Wbbyte_t,Wsbyte_t,Wdbyte_t is OP_23_20=0x5 & OP_19=0x0 & TOK_B=1 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; subflags( Wbbyte_t, src ); local result = Wbbyte_t - src; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCbyte( result ); } @if defined(dsPIC30F) || defined(dsPIC33F) || defined(dsPIC33E) || defined(dsPIC33C) :sub ACCA_t is OP_23_20=0xC & OP_19_16=0xB & ACCA_t & OP_14_12=0x3 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { ACCA = ACCB - ACCA; testSRH_OA(); testSRH_SA(); } :sub ACCB_t is OP_23_20=0xC & OP_19_16=0xB & ACCB_t & OP_14_12=0x3 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { ACCB = ACCA - ACCB; testSRH_OB(); testSRH_SB(); } @endif :subb.w f13_t^WREG_t is OP_23_20=0xB & OP_19_16=0x5 & OP_15=1 & WREG_t & f13_t { local notCarry:2 = zext(!SRL_C); subflagsWithCarry( f13_t, W0, notCarry ); WREG_t = f13_t - W0 - notCarry; testSRL_N ( WREG_t ); testSRL_Zsticky( WREG_t ); testSRH_DCword ( WREG_t ); } :subb.b f13byte_t^WREGbyte_t is OP_23_20=0xB & OP_19_16=0x5 & OP_15=1 & WREGbyte_t & f13byte_t { local notCarry = !SRL_C; subflagsWithCarry( f13byte_t, W0byte, notCarry ); WREGbyte_t = f13byte_t - W0byte - notCarry; testSRL_N ( WREGbyte_t ); testSRL_Zsticky( WREGbyte_t ); testSRH_DCbyte ( WREGbyte_t ); } :subb.w k10_t,Wn_t is OP_23_20=0xB & OP_19_16=0x1 & OP_15=1 & k10_t & Wn_t { local notCarry:2 = zext(!SRL_C); subflagsWithCarry( Wn_t, k10_t, notCarry ); Wn_t = Wn_t - k10_t - notCarry; testSRL_N ( Wn_t ); testSRL_Zsticky( Wn_t ); testSRH_DCword ( Wn_t ); } :subb.b k10byte_t,Wnbyte_t is OP_23_20=0xB & OP_19_16=0x1 & OP_15=1 & k10byte_t & Wnbyte_t { local notCarry = !SRL_C; subflagsWithCarry( Wnbyte_t, k10byte_t, notCarry ); Wnbyte_t = Wnbyte_t - k10byte_t - notCarry; testSRL_N ( Wnbyte_t ); testSRL_Zsticky( Wnbyte_t ); testSRH_DCbyte ( Wnbyte_t ); } :subb.w Wb_t,k5_t,Wd_t is OP_23_20=0x5 & OP_19=0x1 & OP_6_5=0x3 & Wb_t & $(WDconstraint) & Wd_t & k5_t { local notCarry:2 = zext(!SRL_C); subflagsWithCarry( Wb_t, k5_t, notCarry ); local result = Wb_t - k5_t - notCarry; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCword ( result ); } :subb.b Wbbyte_t,k5byte_t,Wdbyte_t is OP_23_20=0x5 & OP_19=0x1 & OP_6_5=0x3 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & k5byte_t { local notCarry = !SRL_C; subflagsWithCarry( Wbbyte_t, k5byte_t, notCarry ); local result = Wbbyte_t - k5byte_t - notCarry; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCbyte ( result ); } :subb.w Wb_t,Ws_t,Wd_t is OP_23_20=0x5 & OP_19=0x1 & TOK_B=0 & Wb_t & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local notCarry:2 = zext(!SRL_C); local src = Ws_t; subflagsWithCarry( Wb_t, src, notCarry ); local result = Wb_t - src - notCarry; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCword ( result ); } :subb.b Wbbyte_t,Wsbyte_t,Wdbyte_t is OP_23_20=0x5 & OP_19=0x1 & TOK_B=1 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local notCarry = !SRL_C; local src = Wsbyte_t; subflagsWithCarry( Wbbyte_t, src, notCarry ); local result = Wbbyte_t - src - notCarry; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCbyte ( result ); } :subbr.w f13_t^WREG_t is OP_23_20=0xB & OP_19_16=0xD & OP_15=1 & WREG_t & f13_t { local notCarry:2 = zext(!SRL_C); local src = f13_t; subflagsWithCarry( W0, src, notCarry ); WREG_t = W0 - src - notCarry; testSRL_N ( WREG_t ); testSRL_Zsticky( WREG_t ); testSRH_DCword ( WREG_t ); } :subbr.b f13byte_t^WREGbyte_t is OP_23_20=0xB & OP_19_16=0xD & OP_15=1 & WREGbyte_t & f13byte_t { local notCarry = !SRL_C; local src = f13byte_t; subflagsWithCarry( W0byte, src, notCarry ); WREGbyte_t = W0byte - src - notCarry; testSRL_N ( WREGbyte_t ); testSRL_Zsticky( WREGbyte_t ); testSRH_DCbyte ( WREGbyte_t ); } :subbr.w Wb_t,k5_t,Wd_t is OP_23_20=0x1 & OP_19=0x1 & OP_6_5=0x3 & Wb_t & $(WDconstraint) & Wd_t & k5_t { local notCarry:2 = zext(!SRL_C); subflagsWithCarry( k5_t, Wb_t, notCarry ); local result = k5_t - Wb_t - notCarry; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCword ( result ); } :subbr.b Wbbyte_t,k5byte_t,Wdbyte_t is OP_23_20=0x1 & OP_19=0x1 & OP_6_5=0x3 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & k5byte_t { local notCarry = !SRL_C; subflagsWithCarry( k5byte_t, Wbbyte_t, notCarry ); local result = k5byte_t - Wbbyte_t - notCarry; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCbyte ( result ); } :subbr.w Wb_t,Ws_t,Wd_t is OP_23_20=0x1 & OP_19=0x1 & TOK_B=0 & Wb_t & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local notCarry:2 = zext(!SRL_C); local src = Ws_t; subflagsWithCarry( src, Wb_t, notCarry ); local result = src - Wb_t - notCarry; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCword ( result ); } :subbr.b Wbbyte_t,Wsbyte_t,Wdbyte_t is OP_23_20=0x1 & OP_19=0x1 & TOK_B=1 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local notCarry = !SRL_C; local src = Wsbyte_t; subflagsWithCarry( src, Wbbyte_t, notCarry ); local result = src - Wbbyte_t - notCarry; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Zsticky( result ); testSRH_DCbyte ( result ); } :subr.w f13_t^WREG_t is OP_23_20=0xB & OP_19_16=0xD & OP_15=0 & WREG_t & f13_t { local src = f13_t; subflags( W0, src ); WREG_t = W0 - src; testSRL_N ( WREG_t ); testSRL_Z ( WREG_t ); testSRH_DCword( WREG_t ); } :subr.b f13byte_t^WREGbyte_t is OP_23_20=0xB & OP_19_16=0xD & OP_15=0 & WREGbyte_t & f13byte_t { local src = f13byte_t; subflags( W0byte, src ); WREGbyte_t = W0byte - src; testSRL_N ( WREGbyte_t ); testSRL_Z ( WREGbyte_t ); testSRH_DCbyte( WREGbyte_t ); } :subr.w Wb_t,k5_t,Wd_t is OP_23_20=0x1 & OP_19=0x0 & OP_6_5=0x3 & Wb_t & $(WDconstraint) & Wd_t & k5_t { subflags( k5_t, Wb_t ); local result = k5_t - Wb_t; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCword( result ); } :subr.b Wbbyte_t,k5byte_t,Wdbyte_t is OP_23_20=0x1 & OP_19=0x0 & OP_6_5=0x3 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & k5byte_t { subflags( k5byte_t, Wbbyte_t ); local result = k5byte_t - Wbbyte_t; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCbyte( result ); } :subr.w Wb_t,Ws_t,Wd_t is OP_23_20=0x1 & OP_19=0x0 & TOK_B=0 & Wb_t & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local src = Ws_t; subflags( src, Wb_t ); local result = src - Wb_t; build Wd_t; Wd_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCword( result ); } :subr.b Wbbyte_t,Wsbyte_t,Wdbyte_t is OP_23_20=0x1 & OP_19=0x0 & TOK_B=1 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; subflags( src, Wbbyte_t ); local result = src - Wbbyte_t; build Wdbyte_t; Wdbyte_t = result; testSRL_N ( result ); testSRL_Z ( result ); testSRH_DCbyte( result ); } :swap.w Wn_t is OP_23_20=0xF & OP_19_16=0xD & OP_15=1 & OP_13_4=0x0 & TOK_B=0 & Wn_t { Wn_t = ((Wn_t & 0xFF) << 8) | ((Wn_t & 0xFF00) >> 8); } :swap.b Wnbyte_t is OP_23_20=0xF & OP_19_16=0xD & OP_15=1 & OP_13_4=0x0 & TOK_B=1 & Wnbyte_t { Wnbyte_t = ((Wnbyte_t & 0xF) << 4) | ((Wnbyte_t & 0xF0) >> 4); } # constructor Encoding: 1011 1010 1Bqq qddd dppp ssss :tblrdh.w WsROM_t,Wd_t is OP_23_20=0xB & OP_19_16=0xA & OP_15=1 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & WsROM_t { local src = WsROM_t; local result = zext( *[rom]:1 (src | 1) ); build Wd_t; Wd_t = result; } :tblrdh.b WsROM_t,Wdbyte_t is OP_23_20=0xB & OP_19_16=0xA & OP_15=1 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & WsROM_t { local src = WsROM_t; local result:1 = 0; if ( (src & 0x1) != 0) goto ; result = *[rom]:1 (src | 1); build Wdbyte_t; Wdbyte_t = result; } # constructor Encoding: 1011 1010 0Bqq qddd dppp ssss :tblrdl.w WsROM_t,Wd_t is OP_23_20=0xB & OP_19_16=0xA & OP_15=0 & TOK_B=0 & $(WDconstraint) & Wd_t & $(WSconstraint) & WsROM_t { local src = WsROM_t; local result = *[rom]:2 (src & 0xfffffe); build Wd_t; Wd_t = result; } :tblrdl.b WsROM_t,Wdbyte_t is OP_23_20=0xB & OP_19_16=0xA & OP_15=0 & TOK_B=1 & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & WsROM_t { local src = WsROM_t; local lbit = src & 1; local val = *[rom]:2 (src & 0xfffffe); if (lbit == 0) goto ; val = val >> 8; local result = val:1; build Wdbyte_t; Wdbyte_t = result; } :tblwth.w Ws_t,WdROM_t is OP_23_20=0xB & OP_19_16=0xB & OP_15=1 & TOK_B=0 & $(WDconstraint) & WdROM_t & $(WSconstraint) & Ws_t { local src = Ws_t; local addr = WdROM_t | 1; # add 1, should be word alligned # writing to upper byte PM<23:16>, can't write to padding byte *[rom]:1 addr = src; } :tblwth.b Wsbyte_t,WdROM_t is OP_23_20=0xB & OP_19_16=0xB & OP_15=1 & TOK_B=1 & $(WDconstraint) & WdROM_t & $(WSconstraint) & Wsbyte_t { local src = Wsbyte_t; local addr = WdROM_t; local lbit = addr & 1; if ( lbit != 0) goto ; addr = addr | 1; *[rom]:1 addr = src; } :tblwtl.w Ws_t,WdROM_t is OP_23_20=0xB & OP_19_16=0xB & OP_15=0 & TOK_B=0 & $(WDconstraint) & WdROM_t & $(WSconstraint) & Ws_t { local src = Ws_t; local addr = WdROM_t & 0xfffffe; *[rom]:2 addr = src; } :tblwtl.b Wsbyte_t,WdROM_t is OP_23_20=0xB & OP_19_16=0xB & OP_15=0 & TOK_B=1 & $(WDconstraint) & WdROM_t & $(WSconstraint) & Wsbyte_t { local src = zext(Wsbyte_t); local addr = WdROM_t; local lobit = addr & 1; local val = *[rom]:2 addr; local mask = 0xff00; # if dest is not word aligned, then write to PM<15:7> if (lobit == 0) goto ; mask = mask >> 8; # writing to second byte of word, protect low byte src = src << 8; *[rom]:2 addr = (val & mask) | src; } @if defined(dsPIC30F) || defined(dsPIC33F) define pcodeop Vector; :trap TOK_n,TOK_k16t is OP_23_20=0 & OP_19_17=5 & TOK_n & TOK_k16t & WordInstNext4 { *[ram]:4 W15 = WordInstNext4; W15 = W15 + 4; *[ram]:2 W15 = TOK_k16t; ptr:3 = Vector(TOK_n:1); # uncertain about vector storage call [ptr]; } @endif @if defined(dsPIC33C) define pcodeop verifyslave; :vfslv PSV_t, EDS_t, k13_12_t is OP_23_16=0x03 & OP_15_14=0x2 & OP_11=0 & k13_12_t & EDS_t & PSV_t { verifyslave(PSV_t, EDS_t, k13_12_t); } @endif @if defined(PIC24F) || defined(PIC24H) || defined(dsPIC30F) || defined(dsPIC33F) :ulnk is OP_23_20=0xF & OP_19_16=0xA & OP_15_12=0x08 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { W15 = W14; W15 = W15 - 2; W14 = *[ram]:2 W15; } @endif @if defined(PIC24E) || defined(dsPIC33E) || defined(dsPIC33C) :ulnk is OP_23_20=0xF & OP_19_16=0xA & OP_15_12=0x08 & OP_11_8=0x0 & OP_7_4=0x0 & OP_3_0=0x0 { W15 = W14; W15 = W15 - 2; W14 = *[ram]:2 W15; CORCON_SFA = 0; } @endif # URUN - ICD instruction compatible with Microchips ICD debugging hardware define pcodeop urun; :urun is OP_23_0=0xDAC000 { urun(); # In-Circuit Debugger (ICD) Run } :xor.w f13_t^WREG_t is OP_23_20=0xB & OP_19_16=0x6 & OP_15=1 & WREG_t & f13_t { WREG_t = W0 ^ f13_t; testSRL_N(WREG_t); testSRL_Z(WREG_t); } :xor.b f13byte_t^WREGbyte_t is OP_23_20=0xB & OP_19_16=0x6 & OP_15=1 & WREGbyte_t & f13byte_t { WREGbyte_t = W0byte ^ f13byte_t; testSRL_N(WREGbyte_t); testSRL_Z(WREGbyte_t); } :xor.w k10_t,Wn_t is OP_23_20=0xB & OP_19_16=0x2 & OP_15=1 & k10_t & Wn_t { Wn_t = Wn_t ^ k10_t; testSRL_N(Wn_t); testSRL_Z(Wn_t); } :xor.b k10byte_t,Wnbyte_t is OP_23_20=0xB & OP_19_16=0x2 & OP_15=1 & k10byte_t & Wnbyte_t { Wnbyte_t = Wnbyte_t ^ k10byte_t; testSRL_N(Wnbyte_t); testSRL_Z(Wnbyte_t); } :xor.w Wb_t,k5_t,Wd_t is OP_23_20=0x6 & OP_19=0x1 & Wb_t & $(WDconstraint) & Wd_t & OP_6_5=0x3 & k5_t { Wd_t = Wb_t ^ k5_t; testSRL_N(Wd_t); testSRL_Z(Wd_t); } :xor.b Wbbyte_t,k5byte_t,Wdbyte_t is OP_23_20=0x6 & OP_19=0x1 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & OP_6_5=0x3 & k5byte_t { Wdbyte_t = Wbbyte_t ^ k5byte_t; testSRL_N(Wdbyte_t); testSRL_Z(Wdbyte_t); } :xor.w Wb_t,Ws_t,Wd_t is OP_23_20=0x6 & OP_19=0x1 & TOK_B=0 & Wb_t & $(WDconstraint) & Wd_t & $(WSconstraint) & Ws_t { local result = Wb_t ^ Ws_t; build Wd_t; Wd_t = result; testSRL_N(result); testSRL_Z(result); } :xor.b Wbbyte_t,Wsbyte_t,Wdbyte_t is OP_23_20=0x6 & OP_19=0x1 & TOK_B=1 & Wbbyte_t & $(WDconstraint) & Wdbyte_t & $(WSconstraint) & Wsbyte_t { local result = Wbbyte_t ^ Wsbyte_t; build Wdbyte_t; Wdbyte_t = result; testSRL_N(result); testSRL_Z(result); } :ze Ws_t,Wnd_t is OP_23_20=0xF & OP_19_16=0xB & OP_15_12=0x8 & OP_11=0x0 & Wnd_t & $(WSconstraint) & Ws_t { local result = zext(Ws_t:1); build Wnd_t; Wnd_t = result; SRL_N = 0; testSRL_Z(result); SRL_C = 1; } # UNVERIFIED - not found in manual but was produced by toolchain for PIC30F2010 # There appear to be a few variations of this encoding produced by toolchain # but do not decode with objdump define pcodeop break; :break is OP_23_0=0xDA4000 { break(); } } # end with : phase = 2 ================================================ FILE: pypcode/processors/PIC/data/languages/PIC24E.slaspec ================================================ # This module defines Microchip PIC-24. # Based on "Microchip 16-bit MCU and DSC Programmer's Reference Manual (c)2005-2011 (i.e. PIC24_InstructionSet.pdf) define endian=little; # little endian only @define PIC24E "1" @include "PIC24.sinc" ================================================ FILE: pypcode/processors/PIC/data/languages/PIC24F.slaspec ================================================ # This module defines Microchip PIC-24. # Based on "Microchip 16-bit MCU and DSC Programmer's Reference Manual (c)2005-2011 (i.e. PIC24_InstructionSet.pdf) define endian=little; # little endian only @define PIC24F "1" @include "PIC24.sinc" ================================================ FILE: pypcode/processors/PIC/data/languages/PIC24H.slaspec ================================================ # This module defines Microchip PIC-24. # Based on "Microchip 16-bit MCU and DSC Programmer's Reference Manual (c)2005-2011 (i.e. PIC24_InstructionSet.pdf) define endian=little; # little endian only @define PIC24H "1" @include "PIC24.sinc" ================================================ FILE: pypcode/processors/PIC/data/languages/PIC30.dwarf ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/PIC33.dwarf ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/dsPIC30F.slaspec ================================================ # This module defines Microchip PIC-24. # Based on "Microchip 16-bit MCU and DSC Programmer's Reference Manual (c)2005-2011 (i.e. PIC24_InstructionSet.pdf) define endian=little; # little endian only @define dsPIC30F "1" @include "PIC24.sinc" ================================================ FILE: pypcode/processors/PIC/data/languages/dsPIC33C.slaspec ================================================ # This module defines Microchip PIC-24. define endian=little; # little endian only @define dsPIC33C "1" @include "PIC24.sinc" ================================================ FILE: pypcode/processors/PIC/data/languages/dsPIC33E.slaspec ================================================ # This module defines Microchip PIC-24. # Based on "Microchip 16-bit MCU and DSC Programmer's Reference Manual (c)2005-2011 (i.e. PIC24_InstructionSet.pdf) define endian=little; # little endian only @define dsPIC33E "1" @include "PIC24.sinc" ================================================ FILE: pypcode/processors/PIC/data/languages/dsPIC33F.slaspec ================================================ # This module defines Microchip PIC-24. # Based on "Microchip 16-bit MCU and DSC Programmer's Reference Manual (c)2005-2011 (i.e. PIC24_InstructionSet.pdf) define endian=little; # little endian only @define dsPIC33F "1" @include "PIC24.sinc" ================================================ FILE: pypcode/processors/PIC/data/languages/pic12.sinc ================================================ # # PIC-12 Main Section # includes constants, memory space and common register space definitions # # STATUS bit definitions @define STATUS_PA0_BIT 5 @define STATUS_Z_BIT 2 @define STATUS_DC_BIT 1 @define STATUS_C_BIT 0 # STATUS bit masks used for setting @define STATUS_PA_MASK 0x60 @define STATUS_Z_MASK 0x04 @define STATUS_DC_MASK 0x02 @define STATUS_C_MASK 0x01 # STATUS bit masks used for clearing @define STATUS_PA_CLEARMASK 0x9F @define STATUS_Z_CLEARMASK 0xFB @define STATUS_DC_CLEARMASK 0xFD @define STATUS_C_CLEARMASK 0xFE @define FSR_BSEL_MASK 0x60 # FSR<5:6> Bank Select bits : Direct Addressing define endian=little; define alignment=2; # Instruction Memory (ROM-based) define space CODE type=ram_space wordsize=2 size=2 default; # General Purpose Register Memory consists of 2-banks of 32-bytes each # Bank selection occurs using FSR bits <6:5> define space DATA type=ram_space size=1; # HWSTACK consists of a 2-word by 12-bit RAM and a corresponding to a hidden stack pointer (STKPTR). define space HWSTACK type=ram_space wordsize=2 size=1; # WORDSIZE is actually 12-bits define space register type=register_space size=2; # Program Counter (9-bits) - PC Latch: PCL define register offset=0x0000 size=2 [ PC ]; # Stack Pointer define register offset=0x0002 size=1 [ STKPTR ]; # Working register define register offset=0x0003 size=1 [ W ]; # PC Latch register (real register is memory based) define register offset=0x0004 size=1 [ PCL ]; # File Selection register (real register is memory based) define register offset=0x0005 size=1 [ FSR ]; # STATUS register (real register is memory based) define register offset=0x0006 size=1 [ STATUS ]; # Status bit registers (these do not really exist and must get reflected into the STATUS byte register) define register offset=0x0007 size=1 [ PA Z DC C ]; # Option Register define register offset=0x00b size=1 [ OPTION ]; ================================================ FILE: pypcode/processors/PIC/data/languages/pic12_instructions.sinc ================================================ # # PIC-12 Instruction Section # includes token definitions, macros, sub-constructors and instruction definitions # # Little-endian bit numbering define token instr16(16) op12 = (0,11) op6 = (6,11) op4 = (8,11) op3 = (9,11) d = (5,5) b3 = (5,7) f5 = (0,4) f5h = (4,4) k8 = (0,7) k9 = (0,8) ; # # Unsupported Operations # define pcodeop clearWatchDogTimer; define pcodeop sleep; # # MACROS # # Pack status bits into STATUS register macro packStatus() { # STATUS = (PA << $(STATUS_PA0_BIT)) # | (Z << $(STATUS_Z_BIT)) # | (DC << $(STATUS_DC_BIT)) # | (C << $(STATUS_C_BIT)); } # Unpack status bits from STATUS register macro unpackStatus() { # PA = (STATUS & $(STATUS_PA_MASK)) >> $(STATUS_PA0_BIT); # Z = ((STATUS & $(STATUS_Z_MASK)) != 0); # DC = ((STATUS & $(STATUS_DC_MASK)) != 0); # C = ((STATUS & $(STATUS_C_MASK)) != 0); } macro setResultFlags(result) { Z = (result == 0); } macro setAddCCarryFlag(op1,op2) { C = (carry(op1,C) || carry(op2,op1 + C)); } macro setAddCDigitCarryFlag(op1,op2) { # op1 and op2 are assumed to be 8-bit values local tmp1 = op1 << 4; local tmp2 = op2 << 4; DC = (carry(tmp1,DC) || carry(tmp2,tmp1 + DC)); } macro setAddCFlags(op1,op2) { setAddCCarryFlag(op1,op2); setAddCDigitCarryFlag(op1,op2); } macro setAddFlags(op1,op2) { C = carry(op1,op2); DC = carry(op1<<4,op2<<4); } macro setSubtractCCarryFlag(op1,op2) { local notC = ~C; C = ((op1 < notC) || (op2 < (op1 - notC))); } macro setSubtractCDigitCarryFlag(op1,op2) { # op1 and op2 are assumed to be 8-bit values local notDC = ~DC; local tmp1 = op1 << 4; local tmp2 = op2 << 4; local tmp3 = (tmp1 - notDC) << 4; DC = ((tmp1 < notDC) || (tmp2 < tmp3)); } macro setSubtractCFlags(op1,op2) { setSubtractCCarryFlag(op1,op2); setSubtractCDigitCarryFlag(op1,op2); } macro setSubtractFlags(op1,op2) { # op1 and op2 are assumed to be 8-bit values # NOTE: carry flag is SET if there is NO borrow C = (op1 >= op2); DC = ((op1<<4) < (op2<<4)); } macro push(val) { # TODO: Uncertain about this !! *[HWSTACK]:2 STKPTR = val; STKPTR = STKPTR + 2; } macro pop(val) { # TODO: Uncertain about this !! STKPTR = STKPTR - 2; val = *[HWSTACK]:2 STKPTR; } # # SUB-CONSTRUCTORS # # File register index (f5!=0): bank selection determined by FSR<5:6> bits fREGLoc: f5 is f5 { addr:1 = (FSR & $(FSR_BSEL_MASK)) + f5; export *[DATA]:1 addr; } # File register index (f5=0): INDF use implies indirect data access using FSR value fREGLoc: "INDF" is f5=0 { addr:1 = FSR; # only low order 7-bits are used for indirect address export *[DATA]:1 addr; } # File register index : low 16-bytes of each bank always mapped to Bank-0 fREGLoc: f5 is f5h=0x0 & f5 { export *[DATA]:1 f5; } # Special File Registers which have been mirrored into the register space # to improve decompiler results fREGLoc: "STATUS" is f5=0x03 { packStatus(); export STATUS; } fREGLoc: "FSR" is f5=0x04 { export FSR; } fREGLoc: "PCL" is f5=0x02 { export PCL; } # File register index (bank selection determined by RP bits in STATUS reg) srcREG: fREGLoc is fREGLoc { export fREGLoc; } #srcREG: "STATUS" is f5=0x03 { packStatus(); export STATUS; } #srcREG: "FSR" is f5=0x04 { export FSR; } srcREG: "PCL" is f5=0x02 { # PCL and PA1:PA0 is latched addr:2 = inst_start >> 1; # Compenstate for CODE wordsize PCL = addr:1; addr = (addr >> 9) & 0x3; PA = addr:1; export PCL; } # Destination register (either srcREG or W) destREG: "0" is d=0 { export W; } destREG: "1" is d=1 & srcREG { export srcREG; } #destREG: "1" is d=1 & f5=0x03 { export STATUS; } #destREG: "1" is d=1 & f5=0x04 { export FSR; } #destREG: "1" is d=1 & f5=0x02 { # # Storing to PCL causes a branch, # # PC<8> is always cleared for CALL and modifying instructions. # # The MOVWF, ADDWF, BSF and BCF definition below has a specific case to handle this write to PCL # export PCL; #} #destREG: "1" is d=1 & f5=0x00 & fREGLoc { # # INDF use (indirect data access) # export *[DATA]:1 fREGLoc; #} # Destination operand representation (w: W register is destination; f: specified srcREG is destination) D: "w" is d=0 { } D: "f" is d=1 { } # Absolute addresses generated from k8 or k9 and STATUS.PA absAddr8: k8 is k8 { addr:2 = (zext(PA) << 9) + k8; export addr; } absAddr9: k9 is k9 { addr:2 = (zext(PA) << 9) + k9; export addr; } # Skip instruction address skipInst: inst_skip is op12 [ inst_skip = inst_next + 1; ] {export *[CODE]:2 inst_skip; } # Immediate Data (Literal operation) imm8: "#"k8 is k8 { export *[const]:1 k8; } # Bit identifier bit: "#"b3 is b3 { export *[const]:1 b3; } # PC register write - instruction must set PC with PCLATH/PCL and perform branch operation pcl: "PC" is f5=0x02 { export PCL; } # STATUS register status: "STATUS" is f5=0x03 { export STATUS; } # # BYTE-ORIENTED FILE REGISTER OPERATIONS # :ADDWF srcREG, D is op6=0x07 & srcREG & D & destREG { # ---- 0001 11df ffff # 0000 0001 1100 0000 -> ADDWF INDF, 0 # 0000 0001 1110 0000 -> ADDWF INDF, 1 # 0000 0001 1101 0010 -> ADDWF 0x12, 0 # 0000 0001 1111 0010 -> ADDWF 0x12, 1 tmp:1 = srcREG; setAddFlags(W, tmp); tmp = W + tmp; destREG = tmp; setResultFlags(tmp); } :ADDWF pcl, D is op6=0x07 & D & pcl { # ---- 0001 11df ffff # 0000 0001 1110 0010 -> ADDWF PCL, w, ACCESS addr:2 = (inst_start >> 1) & 0x3f; # shift compenstates for CODE wordsize tmpLo:1 = addr:1; PA = addr(1); setAddFlags(tmpLo, W); tmpLo = tmpLo + W; setResultFlags(tmpLo); addr = (zext(PA) << 9) + zext(tmpLo); PCL = tmpLo; goto [addr]; } :ANDLW imm8 is op4=0xe & imm8 { # ---- 1110 kkkk kkkk # 0000 1110 0001 0010 -> ANDLW #0x12 W = W & imm8; setResultFlags(W); } :ANDWF srcREG, D is op6=0x05 & srcREG & D & destREG { # ---- 0001 01df ffff # 0000 0001 0100 0000 -> ANDWF INDF, 0 # 0000 0001 0110 0000 -> ANDWF INDF, 1 # 0000 0001 0101 0010 -> ANDWF 0x12, 0 # 0000 0001 0111 0010 -> ANDWF 0x12, 1 tmp:1 = W & srcREG; destREG = tmp; setResultFlags(tmp); } :BCF srcREG, bit is op4=0x4 & bit & srcREG { # ---- 0100 bbbf ffff # 0000 0100 1000 0000 -> BCF INDF, #0x4 # 0000 0100 1001 0010 -> BCF 0x12, #0x4 local bitmask = ~(1 << bit); srcREG = srcREG & bitmask; } :BCF status, bit is op4=0x4 & b3=0 & bit & status { # ---- 0100 bbbf ffff # 0000 0100 0000 0000 -> BCF STATUS, #C C = 0; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BCF status, bit is op4=0x4 & b3=1 & bit & status { # ---- 0100 bbbf ffff # 0000 0100 0010 0000 -> BCF STATUS, #DC DC = 0; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BCF status, bit is op4=0x4 & b3=2 & bit & status { # ---- 0100 bbbf ffff # 0000 0100 0100 0000 -> BCF STATUS, #Z Z = 0; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BCF status, bit is op4=0x4 & b3=5 & bit & status { # ---- 0100 bbbf ffff # 0000 0100 1010 0000 -> BCF STATUS, #PA0 PA = PA & 0x1; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BCF status, bit is op4=0x4 & b3=6 & bit & status { # ---- 0100 bbbf ffff # 0000 0100 1100 0000 -> BCF STATUS, #PA1 PA = PA & 0x2; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BSF srcREG, bit is op4=0x5 & bit & srcREG { # ---- 0101 bbbf ffff # 0000 0101 1000 0000 -> BSF INDF, #0x4 # 0000 0101 1001 0010 -> BSF 0x12, #0x4 local bitmask = 1 << bit; srcREG = srcREG | bitmask; } :BSF status, bit is op4=0x5 & b3=0 & bit & status { # ---- 0101 bbbf ffff # 0000 0101 0000 0000 -> BSF STATUS, #C C = 1; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BSF status, bit is op4=0x5 & b3=1 & bit & status { # ---- 0101 bbbf ffff # 0000 0101 0010 0000 -> BSF STATUS, #DC DC = 1; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BSF status, bit is op4=0x5 & b3=2 & bit & status { # ---- 0101 bbbf ffff # 0000 0101 0100 0000 -> BSF STATUS, #Z Z = 1; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BSF status, bit is op4=0x5 & b3=5 & bit & status { # ---- 0101 bbbf ffff # 0000 0101 1010 0000 -> BSF STATUS, #PA0 PA = PA | 0x1; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BSF status, bit is op4=0x5 & b3=6 & bit & status { # ---- 0101 bbbf ffff # 0000 0101 1100 0000 -> BSF STATUS, #PA1 PA = PA | 0x2; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BTFSC srcREG, bit is op4=0x6 & bit & srcREG & skipInst { # ---- 0110 bbbf ffff # 0000 0110 1000 0000 -> BTFSC INDF, #0x4 # 0000 0110 1001 0010 -> BTFSC 0x12, #0x4 local bitmask = 1 << bit; local tmp = srcREG & bitmask; if (tmp == 0) goto skipInst; } :BTFSC status, bit is op4=0x6 & b3=0 & bit & status & skipInst { # ---- 0110 bbbf ffff # 0000 0110 0000 0000 -> BTFSC STATUS, #C if (C == 0) goto skipInst; } :BTFSC status, bit is op4=0x6 & b3=1 & bit & status & skipInst { # ---- 0110 bbbf ffff # 0000 0110 0010 0000 -> BTFSC STATUS, #DC if (DC == 0) goto skipInst; } :BTFSC status, bit is op4=0x6 & b3=2 & bit & status & skipInst { # ---- 0110 bbbf ffff # 0000 0110 0100 0000 -> BTFSC STATUS, #Z if (Z == 0) goto skipInst; } :BTFSS srcREG, bit is op4=0x7 & bit & srcREG & skipInst { # ---- 0111 bbbf ffff # 0000 0111 1000 0000 -> BTFSS INDF, #0x4 # 0000 0111 1001 0010 -> BTFSS 0x12, #0x4 local bitmask = 1 << bit; local tmp = srcREG & bitmask; if (tmp != 0) goto skipInst; } :BTFSS status, bit is op4=0x7 & b3=0 & bit & status & skipInst { # ---- 0111 bbbf ffff # 0000 0111 0000 0000 -> BTFSS STATUS, #C if (C != 0) goto skipInst; } :BTFSS status, bit is op4=0x7 & b3=1 & bit & status & skipInst { # ---- 0111 bbbf ffff # 0000 0111 0010 0000 -> BTFSS STATUS, #DC if (DC != 0) goto skipInst; } :BTFSS status, bit is op4=0x7 & b3=2 & bit & status & skipInst { # ---- 0111 bbbf ffff # 0000 0111 0100 0000 -> BTFSS STATUS, #Z if (Z != 0) goto skipInst; } :CALL absAddr8 is op4=0x9 & absAddr8 { # ---- 1001 kkkk kkkk # 0000 1001 0010 0011 -> CALL 0x23 # 0000 1001 0001 0000 -> CALL 0x10 push(&:2 inst_next); call [absAddr8]; } :CLRF srcREG is op6=0x01 & d=1 & srcREG { # ---- 0000 011f ffff # 0000 0000 0110 0000 -> CLRF INDF # 0000 0000 0111 0010 -> CLRF 0x12 srcREG = 0; Z = 1; } :CLRW is op6=0x01 & d=0 & f5=0 { # ---- 0000 0100 0000 # 0000 0001 0000 0000 -> CLRW W = 0; Z = 1; } :CLRWDT is op12=0x0004 { # ---- 0000 0000 0100 # Clear Watchdog Timer - Not Implemented clearWatchDogTimer(); } :COMF srcREG, D is op6=0x09 & srcREG & D & destREG { # ---- 0010 01df ffff # 0000 0010 0100 0000 -> COMF INDF, 0 # 0000 0010 0110 0000 -> COMF INDF, 1 # 0000 0010 0101 0010 -> COMF 0x12, 0 # 0000 0010 0111 0010 -> COMF 0x12, 1 tmp:1 = ~srcREG; destREG = tmp; setResultFlags(tmp); } :DECF srcREG, D is op6=0x03 & srcREG & D & destREG { # ---- 0000 11df ffff # 0000 0000 1100 0000 -> DECF INDF, 0 # 0000 0000 1110 0000 -> DECF INDF, 1 # 0000 0000 1101 0010 -> DECF 0x12, 0 # 0000 0000 1111 0010 -> DECF 0x12, 1 tmp:1 = srcREG - 1; destREG = tmp; setResultFlags(tmp); } :DECFSZ srcREG, D is op6=0x0b & srcREG & D & destREG & skipInst { # ---- 0010 11df ffff # 0000 0010 1100 0000 -> DECFSZ INDF, 0 # 0000 0010 1110 0000 -> DECFSZ INDF, 1 # 0000 0010 1101 0010 -> DECFSZ 0x12, 0 # 0000 0010 1111 0010 -> DECFSZ 0x12, 1 tmp:1 = srcREG - 1; destREG = tmp; if (tmp == 0) goto skipInst; } :GOTO absAddr9 is op3=0x5 & absAddr9 { # ---- 101k kkkk kkkk # 0000 1011 0010 0011 -> GOTO 0x123 # 0000 1010 0001 0000 -> GOTO 0x10 goto [absAddr9]; } :INCF srcREG, D is op6=0x0a & srcREG & D & destREG { # ---- 0010 10df ffff # 0000 0010 1000 0000 -> INCF INDF, 0 # 0000 0010 1010 0000 -> INCF INDF, 1 # 0000 0010 1001 0010 -> INCF 0x12, 0 # 0000 0010 1011 0010 -> INCF 0x12, 1 tmp:1 = srcREG + 1; destREG = tmp; setResultFlags(tmp); } :INCFSZ srcREG, D is op6=0x0f & srcREG & D & destREG & skipInst { # ---- 0011 11df ffff # 0000 0011 1100 0000 -> INCFSZ INDF, 0 # 0000 0011 1110 0000 -> INCFSZ INDF, 1 # 0000 0011 1101 0010 -> INCFSZ 0x12, 0 # 0000 0011 1111 0010 -> INCFSZ 0x12, 1 tmp:1 = srcREG + 1; destREG = tmp; if (tmp == 0) goto skipInst; } :IORLW imm8 is op4=0xd & imm8 { # ---- 1101 kkkk kkkk # 0000 1101 0001 0010 -> IORLW #0x12 W = W | imm8; setResultFlags(W); } :IORWF srcREG, D is op6=0x04 & srcREG & D & destREG { # ---- 0001 00df ffff # 0000 0001 0000 0000 -> IORWF INDF, 0 # 0000 0001 0010 0000 -> IORWF INDF, 1 # 0000 0001 0001 0010 -> IORWF 0x20, 0 # 0000 0001 0011 0010 -> IORWF 0x20, 1 tmp:1 = W | srcREG; destREG = tmp; setResultFlags(tmp); } :MOVLW imm8 is op4=0xc & imm8 { # ---- 1100 kkkk kkkk # 0000 1100 0001 0010 -> MOVLW #0x12 W = imm8; } :MOVF srcREG, D is op6=0x08 & srcREG & D & destREG { # ---- 0010 00df ffff # 0000 0010 0000 0000 -> MOVF INDF, 0 # 0000 0010 0010 0000 -> MOVF INDF, 1 # 0000 0010 0001 0010 -> MOVF 0x12, 0 # 0000 0010 0011 0010 -> MOVF 0x12, 1 tmp:1 = srcREG; destREG = tmp; setResultFlags(tmp); } :MOVWF srcREG is op6=0x00 & d=1 & srcREG { # ---- 0000 001f ffff # 0000 0000 0010 0000 -> MOVWF INDF # 0000 0000 0011 0010 -> MOVWF 0x12 srcREG = W; } :MOVWF pcl is op6=0x00 & pcl { # ---- 0000 001f ffff # 0000 0000 0010 0010 -> MOVWF PCL PCL = W; addr:2 = (zext(PA) << 9) + zext(PCL); goto [addr]; } :NOP is op12=0x00 { # ---- 0000 0000 0000 } :OPTION is op12=0x0002 { # ---- 0000 0000 0010 OPTION = W; } :RETLW imm8 is op4=0x8 & imm8 { # ---- 1000 kkkk kkkk # 0000 1000 0001 0010 -> RETLW #0x12 W = imm8; retAddr:2 = 0; pop(retAddr); return [retAddr]; } :RLF srcREG, D is op6=0x0d & srcREG & D & destREG { # ---- 0011 01df ffff # 0000 0011 0100 0000 -> RLF INDF, 0 # 0000 0011 0110 0000 -> RLF INDF, 1 # 0000 0011 0101 0010 -> RLF 0x12, 0 # 0000 0011 0111 0010 -> RLF 0x12, 1 local tmpC = C; tmp:1 = srcREG; C = (tmp s< 0); tmp = (tmp << 1) | tmpC; destREG = tmp; setResultFlags(tmp); } :RRF srcREG, D is op6=0x0c & srcREG & D & destREG { # ---- 0011 00df ffff # 0000 0011 0000 0000 -> RRF INDF, 0 # 0000 0011 0010 0000 -> RRF INDF, 1 # 0000 0011 0001 0010 -> RRF 0x12, 0 # 0000 0011 0011 0010 -> RRF 0x12, 1 local tmpC = C << 7; tmp:1 = srcREG; C = (tmp & 1) != 0; tmp = (tmp >> 1) | tmpC; destREG = tmp; setResultFlags(tmp); } :SLEEP is op12=0x0003 { # ---- 0000 0000 0011 # Sleep - Not Implemented sleep(); } :SUBWF srcREG, D is op6=0x02 & srcREG & D & destREG { # ---- 0000 10df ffff # 0000 0000 1000 0000 -> SUBWF INDF, 0 # 0000 0000 1010 0000 -> SUBWF INDF, 1 # 0000 0000 1001 0010 -> SUBWF 0x12, 0 # 0000 0000 1011 0010 -> SUBWF 0x12, 1 tmp:1 = srcREG; setSubtractFlags(tmp, W); tmp = tmp - W; destREG = tmp; setResultFlags(tmp); } :SWAPF srcREG, D is op6=0x0e & srcREG & D & destREG { # ---- 0011 10df ffff # 0000 0011 1000 0000 -> SUBWF INDF, 0 # 0000 0011 1010 0000 -> SUBWF INDF, 1 # 0000 0011 1001 0010 -> SUBWF 0x12, 0 # 0000 0011 1011 0010 -> SUBWF 0x12, 1 tmp:1 = srcREG; destREG = (tmp << 4) | (tmp >> 4); } :XORLW imm8 is op4=0xf & imm8 { # ---- 1111 kkkk kkkk # 0000 1111 0001 0010 -> XORLW #0x12 W = imm8 ^ W; setResultFlags(W); } :XORWF srcREG, D is op6=0x06 & srcREG & D & destREG { # ---- 0001 10df ffff # 0000 0001 1000 0000 -> XORWF INDF, 0 # 0000 0001 1010 0000 -> XORWF INDF, 1 # 0000 0001 1001 0010 -> XORWF 0x12, 0 # 0000 0001 1011 0010 -> XORWF 0x12, 1 tmp:1 = W ^ srcREG; destREG = tmp; setResultFlags(tmp); } ================================================ FILE: pypcode/processors/PIC/data/languages/pic12c5xx.cspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/pic12c5xx.ldefs ================================================ PIC-12C5xx ================================================ FILE: pypcode/processors/PIC/data/languages/pic12c5xx.pspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/pic12c5xx.slaspec ================================================ @define PROCESSOR "PIC_12C5XX" @include "pic12.sinc" # # NOTES - # 1. If a specific PIC-12 has a different register set, this file and the pic12c5xx.specl file may be copied/renamed and # slightly modified to specify a the correct Register File Map. # # Bank-0 File Registers define DATA offset=0x00 size=1 [ INDF TMR0 PCL.0 STATUS.0 FSR.0 OSCCAL GPIO ]; @include "pic12_instructions.sinc" # IO Tristate Register define register offset=0x0020 size=1 [ TRIS ]; # TRIS register trisREG: "6" is f5=0x6 { export TRIS; } :TRIS trisREG is op6=0x00 & d=0 & trisREG { # ---- 0000 0000 0fff # 0000 0000 0000 0110 -> TRIS 6 trisREG = W; } ================================================ FILE: pypcode/processors/PIC/data/languages/pic16.cspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/pic16.ldefs ================================================ PIC-16(C,CR)XXX PIC-16(L)FXXX ================================================ FILE: pypcode/processors/PIC/data/languages/pic16.pspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/pic16.sinc ================================================ # # PIC-16 Main Section # includes constants, memory space and common register space definitions # define endian=little; define alignment=2; # Instruction Memory (ROM-based) define space CODE type=ram_space wordsize=2 size=2 default; # General Purpose Register Memory consists of 4-banks of 255-bytes for PIC16, # or 32-banks of 255 bytes each for PIC_16F. # Bank selection occurs using STATUS register bits RP0 & RP1 define space DATA type=ram_space size=2; # HWSTACK consists of a 8-word by 13-bit RAM and a corresponding to a hidden stack pointer (STKPTR). define space HWSTACK type=ram_space wordsize=2 size=1; # WORDSIZE is actually 13-bits define space register type=register_space size=2; # Program Counter (13-bits) - PC Latch: PCLATH / PCL define register offset=0x0000 size=2 [ PC ]; # Stack Pointer define register offset=0x0002 size=1 [ STKPTR ]; # Working register define register offset=0x0003 size=1 [ W SkipNext ]; # Status bit registers (these do not really exist and must get reflected into the STATUS byte register) define register offset=0x0007 size=1 [ IRP RP ]; @define C "STATUS[0,1]" @define DC "STATUS[1,1]" @define Z "STATUS[2,1]" @define PD "STATUS[3,1]" @define TO "STATUS[4,1]" @define RP "STATUS[5,2]" @define IRP "STATUS[7,1]" # STATUS bit definitions @define STATUS_IRP_BIT 7 @define STATUS_RP_BIT 5 @define STATUS_Z_BIT 2 @define STATUS_DC_BIT 1 @define STATUS_C_BIT 0 # STATUS bit masks used for setting @define STATUS_IRP_MASK 0x80 @define STATUS_RP_MASK 0x60 @define STATUS_Z_MASK 0x04 @define STATUS_DC_MASK 0x02 @define STATUS_C_MASK 0x01 # STATUS bit masks used for clearing @define STATUS_IRP_CLEARMASK 0x7F @define STATUS_RP_CLEARMASK 0x9F @define STATUS_Z_CLEARMASK 0xFB @define STATUS_DC_CLEARMASK 0xFD @define STATUS_C_CLEARMASK 0xFE # # WARNING! - Reflection of these DATA-based registers with the corresponding register # is not fully implemented due to the complexity of doing so within this language specification. # Reflection of certain registers (e.g., STATUS) within other memory banks is also not modeled. # # NOTES - # 1. Chips with voltage comparator and reference functions may replace A/D registers (ADCON0 and ADCON1) with (VMCON and VRCON) # Instances of this include PIC16F627A/628A/648A (there could be others) # 2. If a specific PIC-16 has a different register set, this file and the pic16.pspec file may be copied/renamed and # slightly modified to specify a the correct Register File Map. # # # Bank-0 File Registers # @if PROCESSOR == "PIC_16" define DATA offset=0x0000 size=1 [ INDF _ PCL STATUS FSR _ _ _ _ _ PCLATH INTCON _ _ _ _ ]; @elif PROCESSOR == "PIC_16F" define DATA offset=0x0000 size=1 [ INDF0 INDF1 PCL STATUS FSR0L FSR0H FSR1L FSR1H BSR WREG PCLATH INTCON _ _ _ _ ]; define DATA offset=0x0004 size=2 [ FSR0 FSR1 ]; @endif # Additional Data Bank data registers are defined in the .PSPEC file. ================================================ FILE: pypcode/processors/PIC/data/languages/pic16.slaspec ================================================ @define PROCESSOR "PIC_16" @include "pic16.sinc" @include "pic16_instructions.sinc" ================================================ FILE: pypcode/processors/PIC/data/languages/pic16_instructions.sinc ================================================ # # PIC-16 Instruction Section # includes token definitions, macros, sub-constructors and instruction definitions # # PC register write - instruction must set PC with PCLATH/PCL and perform branch operation # Little-endian bit numbering define token instr16(16) op14 = (0,13) op12 = (2,13) op11 = (3,13) op9 = (5,13) op8 = (6,13) op7 = (7,13) op6 = (8,13) op5 = (9,13) op4 = (10,13) op3 = (11,13) d = (7,7) b3 = (7,9) IntConBits = (7,9) StatusBit = (7,9) f7 = (0,6) lf7 = (0,3) uf7 = (4,6) fsr = (2,2) fsrk = (6,6) k5 = (0,4) k6 = (0,5) k7 = (0,6) k8 = (0,7) sk9 = (0,8) signed k11 = (0,10) sk6 = (0,5) signed l5 = (0,4) # low order 5-bits of instr16 mm = (0,1) ; define register offset=0x100 size=4 contextreg; define context contextreg doPseudo = (0,0) possibleSkip = (1,1) noflow ; @if PROCESSOR == "PIC_16F" attach names [IntConBits] [ IOCIF INTF TMR0IF IOCIE INTE TMR0IE PEIE GIE ]; attach variables [ fsr fsrk ] [ FSR0 FSR1 ]; @endif # # Unsupported Operations # define pcodeop clearWatchDogTimer; define pcodeop sleep; define pcodeop reset; # # MACROS # # Pack status bits into STATUS register macro packStatus() { # STATUS = (IRP << $(STATUS_IRP_BIT)) # | (RP << $(STATUS_RP0_BIT)) # | (Z << $(STATUS_Z_BIT)) # | (DC << $(STATUS_DC_BIT)) # | (C << $(STATUS_C_BIT)); } # Unpack status bits from STATUS register macro unpackStatus() { # IRP = ((STATUS & $(STATUS_IRP_MASK)) != 0); # RP = (STATUS & $(STATUS_RP_MASK)) >> $(STATUS_RP0_BIT); # Z = ((STATUS & $(STATUS_Z_MASK)) != 0); # DC = ((STATUS & $(STATUS_DC_MASK)) != 0); # C = ((STATUS & $(STATUS_C_MASK)) != 0); } macro setResultFlags(result) { $(Z) = (result == 0); } macro setAddCCarryFlag(op1,op2) { local tc = $(C); $(C) = (carry(op1,tc) || carry(op2,op1 + tc)); } macro setAddCDigitCarryFlag(op1,op2) { # op1 and op2 are assumed to be 8-bit values local tmp1 = op1 << 4; local tmp2 = op2 << 4; local tdc = $(DC); $(DC) = (carry(tmp1,tdc) || carry(tmp2,tmp1 + tdc)); } macro setAddCFlags(op1,op2) { setAddCCarryFlag(op1,op2); setAddCDigitCarryFlag(op1,op2); } macro setAddFlags(op1,op2) { $(C)= carry(op1,op2); $(DC) = carry(op1<<4,op2<<4); } macro setSubtractCCarryFlag(op1,op2) { local tc = $(C); local notC = !tc; $(C) = op2 >= !tc & op1 >= (op2 - !tc); } macro setSubtractCDigitCarryFlag(op1,op2) { # op1 and op2 are assumed to be 8-bit values local notDC = ~$(DC); local tmp1 = op1 << 4; local tmp2 = op2 << 4; local tmp3 = (tmp1 - notDC) << 4; $(DC) = ((tmp1 < notDC) || (tmp2 < tmp3)); } macro setSubtractCFlags(op1,op2) { setSubtractCCarryFlag(op1,op2); setSubtractCDigitCarryFlag(op1,op2); } macro setSubtractFlags(op1,op2) { # op1 and op2 are assumed to be 8-bit values # NOTE: carry flag is SET if there is NO borrow $(C) = (op1 >= op2); $(DC) = ((op1<<4) < (op2<<4)); } macro push(val) { # TODO: Uncertain about this !! *[HWSTACK]:2 STKPTR = val; STKPTR = STKPTR + 2; } macro pop(val) { # TODO: Uncertain about this !! STKPTR = STKPTR - 2; val = *[HWSTACK]:2 STKPTR; } # # SUB-CONSTRUCTORS # # File register index (f7!=0): bank selection determined by RP bits in STATUS reg @if PROCESSOR == "PIC_16" srcREG: f7 is f7 { addr:2 = (zext(RP) << 7) + f7; export *[DATA]:1 addr; } @elif PROCESSOR == "PIC_16F" srcREG: f7 is f7 { addr:2 = (zext(BSR) << 7) + f7; export *[DATA]:1 addr; } @endif # Top 16 bytes are shared RAM on PIC16 and PIC16F srcREG: fv is uf7=0x7 & lf7 [fv = 0x70 + lf7; ] { addr:2 = fv; export *[DATA]:1 addr; } # The registers listed here are explicitly defined as registers in sleigh. # There are other registers but they are named in the .pspec file. # The reason this is done is to have cross references created to certain registers, and to have # only the registers that must be accessed directly in sleigh (e.g. PCL, FSR) defined in sleigh. # Register explicitly defined in sleigh will not have xref's created to them. # Registers named only in the .pspec file will have xref's to them in most cases. # # Also, these registers ignore RP, or BSR which allow more registers to be in a different register bank. # # PIC16 : INDF _ PCL STATUS FSR _ _ _ _ _ PCLATH INTCON _ _ _ _ # PIC16F: INDF0 INDF1 PCL STATUS FSR0L FSR0H FSR1L FSR1H BSR W PCLATH INTCON _ _ _ _ # File register index (f7=0): INDF use implies indirect data access using FSR value and IRP bit in STATUS reg @if PROCESSOR == "PIC_16" srcREG: INDF is f7=0 & INDF { addr:2 = (zext(IRP) << 8) + zext(FSR); export *[DATA]:1 addr; } srcREG: lf7 is f7=1 & lf7 { rpval:2 = zext(RP == 1) + zext(RP == 3); addr:2 = (zext(rpval) << 7) + 1; export *[DATA]:1 addr; } @elif PROCESSOR == "PIC_16F" srcREG: INDF0 is f7=0 & INDF0 { addr:2 = FSR0; export *[DATA]:1 addr; } srcREG: INDF1 is f7=1 & INDF1 { addr:2 = FSR1; export *[DATA]:1 addr; } @endif # Special File Registers always mapped to Bank-0 srcREG: PCL is f7=0x02 & PCL { # PCL and PCLATH must be latched addr:2 = inst_start >> 1; # Compensate for CODE wordsize PCL = addr:1; PCLATH = addr(1); export PCL; } srcREG: STATUS is f7=0x03 & STATUS { export STATUS; } @if PROCESSOR == "PIC_16" srcREG: FSR is f7=0x04 & FSR { export FSR; } @elif PROCESSOR == "PIC_16F" srcREG: FSR0L is f7=0x04 & FSR0L { export FSR0L; } srcREG: FSR0H is f7=0x05 & FSR0H { export FSR0H; } srcREG: FSR1L is f7=0x06 & FSR1L { export FSR1L; } srcREG: FSR1H is f7=0x07 & FSR1H { export FSR1H; } srcREG: BSR is f7=0x08 & BSR { export BSR; } srcREG: W is f7=0x09 & W { export W; } @endif srcREG: PCLATH is f7=0x0a & PCLATH { export PCLATH; } srcREG: INTCON is f7=0x0b & INTCON { export INTCON; } # Destination register (either srcREG or W) destREG: "0" is d=0 { export W; } # Destination register: bank selection determined by RP bits in STATUS reg destREG: "1" is d=1 & f7 & srcREG { export srcREG; } # Destination register: Special File Registers always mapped to Bank-0 destREG: "1" is d=1 & f7=0x02 { export PCL; } # PCL (special behavior reqd) # Destination operand representation (w: W register is destination; f: specified srcREG is destination) D: "w" is d=0 { } D: "f" is d=1 { } # Absolute address generated from k11 and PCLATH<4:3> absAddr11: k11 is k11 { addr:2 = ((zext(PCLATH) & 0x18) << 8) | k11; export addr; } @if PROCESSOR == "PIC_16F" # Relative address relAddr9: addr is sk9 [ addr = inst_next + sk9; ] { export *[CODE]:2 addr; } @endif # Immediate Data (Literal operation) imm8: "#"k8 is k8 { export *[const]:1 k8; } @if PROCESSOR == "PIC_16F" # Immediate Data (Literal operation) imm7: "#"k7 is k7 { export *[const]:1 k7; } # Immediate Data (Literal operation) imm6: "#"k6 is k6 { export *[const]:1 k6; } # Immediate Data (Literal operation) imm5: "#"k5 is k5 { export *[const]:1 k5; } @endif # Bit identifier bit: "#"b3 is b3 { export *[const]:1 b3; } # TRIS register (TODO: not sure if this TRIS mapping is correct - see TRIS instruction) @if PROCESSOR == "PIC_16" trisREG: "5" is l5=5 { local trl:2 = 0x89; export *[DATA]:1 trl; } # TRISA trisREG: "6" is l5=6 { local trl:2 = 0x187; export *[DATA]:1 trl; } # TRISB trisREG: "7" is l5=7 { local trl:2 = 0x188; export *[DATA]:1 trl; } # TRISC @elif PROCESSOR == "PIC_16F" trisREG: "5" is l5=5 { local trl:2 = 0x10C; export *[DATA]:1 trl; } # TRISA trisREG: "6" is l5=6 { local trl:2 = 0x10D; export *[DATA]:1 trl; } # TRISB trisREG: "7" is l5=7 { local trl:2 = 0x10E; export *[DATA]:1 trl; } # TRISC @endif :^instruction is possibleSkip=1 & instruction [ possibleSkip=0; ] { if (SkipNext) goto inst_next; build instruction; } # # BYTE-ORIENTED FILE REGISTER OPERATIONS # @if PROCESSOR == "PIC_16F" :ADDFSR fsrk, sk6 is op7=0x62 & fsrk & sk6 { fsrk = fsrk + sk6; } @endif :ADDLW imm8 is op6=0x3e & imm8 { # --11 111x kkkk kkkk # 0011 1110 0001 0010 -> ADDLW #0x12 setAddFlags(W, imm8); W = W + imm8; setResultFlags(W); } :ADDWF srcREG, D is op6=0x07 & srcREG & D & destREG { # --00 0111 dfff ffff # 0000 0111 0000 0000 -> ADDWF INDF, 0 # 0000 0111 1000 0000 -> ADDWF INDF, 1 # 0000 0111 0010 0000 -> ADDWF 0x20, 0 # 0000 0111 1010 0000 -> ADDWF 0x20, 1 val:1 = srcREG; setAddFlags(W, val); val = W + val; setResultFlags(val); destREG = val; } :ADDWF PC, D is op6=0x07 & D & d=1 & f7=0x02 & PC { # --00 0111 dfff ffff # 0000 0111 1000 0010 -> ADDWF PCL, w, ACCESS addr:2 = inst_start >> 1; # Compenstate for CODE wordsize PCLATH = addr(1); tmp:1 = addr:1; setAddFlags(tmp, W); tmp = tmp + W; addr = ((zext(PCLATH) & 0x1F) << 8) | zext(tmp); PCL = tmp; setResultFlags(tmp); goto [addr]; } :ADDWFC srcREG, D is op6=0x3D & srcREG & D & destREG { val:1 = srcREG; local tmpC = $(C); setAddFlags(W, val); val = W + val; local tc = $(C); setAddFlags(val,tmpC); $(C) = $(C) | tc; val = val + tmpC; setResultFlags(val); destREG = val; } :ADDWFC PC, D is op6=0x3D & D & d=1 & f7=0x02 & PC { # --00 0111 dfff ffff # 0000 0111 1000 0010 -> ADDWF PCL, w, ACCESS addr:2 = inst_start >> 1; # Compenstate for CODE wordsize PCLATH = addr(1); val:1 = addr:1; local tmpC = $(C); setAddFlags(W, val); local tc = $(C); val = W + val; setAddFlags(val,tmpC); $(C) = $(C) | tc; val = val + tmpC; addr = ((zext(PCLATH) & 0x1F) << 8) | zext(val); PCL = val; setResultFlags(val); goto [addr]; } :ANDLW imm8 is op6=0x39 & imm8 { # --11 1001 kkkk kkkk # 0011 1001 0001 0010 -> ANDLW #0x12 W = W & imm8; setResultFlags(W); } :ANDWF srcREG, D is op6=0x05 & srcREG & D & destREG { # --00 0101 dfff ffff # 0000 0101 0000 0000 -> ANDWF INDF, 0 # 0000 0101 1000 0000 -> ANDWF INDF, 1 # 0000 0101 0010 0000 -> ANDWF 0x20, 0 # 0000 0101 1010 0000 -> ANDWF 0x20, 1 val:1 = srcREG; val = W & val; setResultFlags(val); destREG = val; } :ASRF srcREG, D is op6=0x37 & srcREG & D & destREG { # --11 0111 dfff ffff $(C) = srcREG & 0x1; val:1 = srcREG s>> 1; setResultFlags(val); destREG = val; } :BCF srcREG, bit is op4=0x4 & srcREG & bit { # --01 00bb bfff ffff # 0001 0010 0000 0000 -> BCF INDF, #0x4 # 0001 0010 0010 0000 -> BCF 0x20, #0x4 local bitmask = ~(1 << bit); srcREG = srcREG & bitmask; } :BCF srcREG, IntConBits is op4=0x4 & f7=0xb & bit & srcREG & IntConBits { local bitmask = ~(1 << bit); srcREG = srcREG & bitmask; } :BCF STATUS, "C" is op4=0x4 & b3=0 & f7=0x3 & STATUS { # --01 00bb bfff ffff # 0001 0000 0000 0011 -> BCF STATUS, #C $(C) = 0; } :BCF STATUS, "DC" is op4=0x4 & b3=1 & f7=0x3 & STATUS & bit { # --01 00bb bfff ffff # 0001 0000 1000 0011 -> BCF STATUS, #DC $(DC) = 0; } :BCF STATUS, "Z" is op4=0x4 & b3=2 & f7=0x3 & STATUS & bit { # --01 00bb bfff ffff # 0001 0001 0000 0011 -> BCF STATUS, #Z $(Z) = 0; } :BCF STATUS, "RP0" is op4=0x4 & b3=5 & f7=0x3 & STATUS & bit { # --01 00bb bfff ffff # 0001 0010 1000 0011 -> BCF STATUS, #RP0 RP = RP & 0x2; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BCF STATUS, "RP1" is op4=0x4 & b3=6 & f7=0x3 & STATUS & bit { # --01 00bb bfff ffff # 0001 0011 0000 0011 -> BCF STATUS, #RP1 RP = RP & 0x1; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BCF STATUS, "IRP" is op4=0x4 & b3=7 & f7=0x3 & STATUS & bit { # --01 00bb bfff ffff # 0001 0011 1000 0011 -> BCF STATUS, #IRP IRP = 0; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BSF srcREG, bit is op4=0x5 & bit & srcREG { # --01 01bb bfff ffff # 0001 0110 0000 0000 -> BSF INDF, #0x4 # 0001 0110 0010 0000 -> BSF 0x20, #0x4 local bitmask = 1 << bit; srcREG = srcREG | bitmask; } :BSF srcREG, IntConBits is op4=0x5 & f7=0xb & bit & srcREG & IntConBits { local bitmask = 1 << bit; srcREG = srcREG | bitmask; } :BSF STATUS, "C" is op4=0x5 & b3=0 & f7=0x3 & STATUS { # --01 01bb bfff ffff # 0001 0100 0000 0011 -> BSF STATUS, #C $(C) = 1; } :BSF STATUS, "DC" is op4=0x5 & b3=1 & f7=0x3 & STATUS { # --01 01bb bfff ffff # 0001 0100 1000 0011 -> BSF STATUS, #DC $(DC) = 1; } :BSF STATUS, "Z" is op4=0x5 & b3=2 & f7=0x3 & STATUS & bit { # --01 01bb bfff ffff # 0001 0111 0000 0011 -> BSF STATUS, #Z $(Z) = 1; } :BSF STATUS, "RP0" is op4=0x5 & b3=5 & f7=0x3 & STATUS & bit { # --01 01bb bfff ffff # 0001 0110 1000 0011 -> BSF STATUS, #RP0 RP = RP | 0x1; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BSF STATUS, "RP1" is op4=0x5 & b3=6 & f7=0x3 & STATUS & bit { # --01 01bb bfff ffff # 0001 0111 0000 0011 -> BSF STATUS, #RP1 RP = RP | 0x2; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BSF STATUS, "IRP" is op4=0x5 & b3=7 & f7=0x3 & STATUS & bit { # --01 01bb bfff ffff # 0001 0111 1000 0011 -> BSF STATUS, #IRP IRP = 1; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BTFSC srcREG, bit is op4=0x6 & bit & srcREG [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { # --01 10bb bfff ffff # 0001 1010 0000 0000 -> BTFSC INDF, #0x4 # 0001 1010 0010 0000 -> BTFSC 0x20, #0x4 local bitmask = 1 << bit; local tmp = srcREG & bitmask; SkipNext = (tmp == 0); } :BC absAddr11 is doPseudo=1 & op4=0x6 & b3=0 & bit & f7=0x3 ; op3=0x5 & absAddr11 { if ($(C) == 0) goto inst_next; goto [absAddr11]; } @if PROCESSOR == "PIC_16F" :BRA relAddr9 is op5=0x19 & relAddr9 { goto [relAddr9]; } :BRW is op14=0xb { # inst_next is byte, not word offset, need to load PC with word offset PC = (inst_next >> 1) + zext(W); goto [PC]; } @endif :BTFSC STATUS, bit is op4=0x6 & b3=0 & bit & f7=0x3 & STATUS [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { # --01 10bb bfff ffff # 0001 1000 0000 0011 -> BTFSC STATUS, #C SkipNext = ($(C) == 0); } :SKPC is doPseudo=1 & op4=0x6 & b3=0 & bit & f7=0x3 [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { SkipNext = ($(C) == 1); } :SKPNC is doPseudo=1 & op4=0x7 & b3=0 & bit & f7=0x3 [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { SkipNext = ($(C) != 1); } :SKPZ is doPseudo=1 & op4=0x6 & b3=2 & bit & f7=0x3 [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { SkipNext = ($(Z) == 1); } :SKPNZ is doPseudo=1 & op4=0x7 & b3=2 & bit & f7=0x3 [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { SkipNext = ($(Z) != 1); } :BTFSC STATUS, bit is op4=0x6 & b3=1 & bit & f7=0x3 & STATUS [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { # --01 10bb bfff ffff # 0001 1000 1000 0011 -> BTFSC STATUS, #DC SkipNext = ($(DC) == 0); } :BZ absAddr11 is doPseudo=1 & op4=0x6 & b3=2 & bit & f7=0x3 ; op3=0x5 & absAddr11 { if ($(Z) == 0) goto inst_next; goto [absAddr11]; } :BTFSC STATUS, bit is op4=0x6 & b3=2 & bit & f7=0x3 & STATUS [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { # --01 10bb bfff ffff # 0001 1001 0000 0011 -> BTFSC STATUS, #Z SkipNext = ($(Z) == 0); } :BTFSS srcREG, bit is op4=0x7 & bit & srcREG [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { # --01 11bb bfff ffff # 0001 1110 0000 0000 -> BTFSS INDF, #0x4 # 0001 1110 0010 0000 -> BTFSS 0x20, #0x4 local bitmask = 1 << bit; local tmp = srcREG & bitmask; SkipNext = (tmp != 0); } :BNC absAddr11 is doPseudo=1 & op4=0x7 & b3=0 & bit & f7=0x3 ; op3=0x5 & absAddr11 { if ($(C) != 0) goto inst_next; goto [absAddr11]; } :BTFSS STATUS, bit is op4=0x7 & b3=0 & bit & f7=0x3 & STATUS [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { # --01 11bb bfff ffff # 0001 1100 0000 0011 -> BTFSS STATUS, #C SkipNext = ($(C) != 0); } :BTFSS STATUS, bit is op4=0x7 & b3=1 & bit & f7=0x3 & STATUS [ possibleSkip = 1; globalset(inst_next,possibleSkip); ]{ # --01 11bb bfff ffff # 0001 1100 1000 0011 -> BTFSS STATUS, #DC SkipNext = ($(DC) != 0); } :BNZ absAddr11 is doPseudo=1 & op4=0x7 & b3=2 & bit & f7=0x3 ; op3=0x5 & absAddr11 { if ($(Z) != 0) goto inst_next; goto [absAddr11]; } :BTFSS STATUS, bit is op4=0x7 & b3=2 & bit & f7=0x3 & STATUS [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { # --01 11bb bfff ffff # 0001 1101 0000 0011 -> BTFSS STATUS, #Z SkipNext = ($(Z) != 0); } :CALL absAddr11 is op3=0x4 & absAddr11 { # --10 0kkk kkkk kkkk # 0010 0001 0010 0011 -> CALL 0x123 # 0010 0000 0001 0000 -> CALL 0x10 push(&:2 inst_next); call [absAddr11]; } :CALLW is op14=0x000a { # --00 0000 0000 1010 push(&:2 inst_next); call [W]; } :CLRF srcREG is op6=0x01 & d=1 & srcREG { # --00 0001 1fff ffff # 0000 0001 1000 0000 -> CLRF INDF # 0000 0001 1010 0000 -> CLRF 0x20 srcREG = 0; $(Z) = 1; } :CLRF STATUS is op6=0x01 & d=1 & f7=0x3 & STATUS { # --00 0001 1fff ffff # 0000 0001 1000 0011 -> CLRF STATUS STATUS = 0; IRP = 0; RP = 0; $(Z) = 0; $(DC) = 0; $(C) = 0; } :CLRW is op12=0b000001000000 & mm { # --00 0001 0xxx xxxx # 0000 0001 0000 0000 -> CLRW W = 0; $(Z) = 1; } :CLRWDT is op14=0x0064 { # --00 0000 0110 0100 # Clear Watchdog Timer - Not Implemented clearWatchDogTimer(); } :COMF srcREG, D is op6=0x09 & srcREG & D & destREG { # --00 1001 dfff ffff # 0000 1001 0000 0000 -> COMF INDF, 0 # 0000 1001 1000 0000 -> COMF INDF, 1 # 0000 1001 0010 0000 -> COMF 0x20, 0 # 0000 1001 1010 0000 -> COMF 0x20, 1 tmp:1 = ~srcREG; destREG = tmp; setResultFlags(tmp); } :DECF srcREG, D is op6=0x03 & srcREG & D & destREG { # --00 0011 dfff ffff # 0000 0011 0000 0000 -> DECF INDF, 0 # 0000 0011 1000 0000 -> DECF INDF, 1 # 0000 0011 0010 0000 -> DECF 0x20, 0 # 0000 0011 1010 0000 -> DECF 0x20, 1 val:1 = srcREG - 1; destREG = val; setResultFlags(val); } :DECFSZ srcREG, D is op6=0x0b & srcREG & D & destREG [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { # --00 1011 dfff ffff # 0000 1011 0000 0000 -> DECFSZ INDF, 0 # 0000 1011 1000 0000 -> DECFSZ INDF, 1 # 0000 1011 0010 0000 -> DECFSZ 0x20, 0 # 0000 1011 1010 0000 -> DECFSZ 0x20, 1 val:1 = srcREG - 1; destREG = val; SkipNext = (val == 0); } :GOTO absAddr11 is op3=0x5 & absAddr11 { # --10 1kkk kkkk kkkk # 0010 1001 0010 0011 -> GOTO 0x123 # 0010 1000 0001 0000 -> GOTO 0x10 goto [absAddr11]; } :INCF srcREG, D is op6=0x0a & srcREG & D & destREG { # --00 1010 dfff ffff # 0000 1010 0000 0000 -> INCF INDF, 0 # 0000 1010 1000 0000 -> INCF INDF, 1 # 0000 1010 0010 0000 -> INCF 0x20, 0 # 0000 1010 1010 0000 -> INCF 0x20, 1 val:1 = srcREG + 1; destREG = val; setResultFlags(val); } :INCFSZ srcREG, D is op6=0x0f & srcREG & D & destREG [ possibleSkip = 1; globalset(inst_next,possibleSkip); ] { # --00 1111 dfff ffff # 0000 1111 0000 0000 -> INCFSZ INDF, 0 # 0000 1111 1000 0000 -> INCFSZ INDF, 1 # 0000 1111 0010 0000 -> INCFSZ 0x20, 0 # 0000 1111 1010 0000 -> INCFSZ 0x20, 1 val:1 = srcREG + 1; destREG = val; SkipNext = (val == 0); } :IORLW imm8 is op6=0x38 & imm8 { # --11 1000 kkkk kkkk # 0011 1000 0001 0010 -> IORLW #0x12 W = W | imm8; setResultFlags(W); } :IORWF srcREG, D is op6=0x04 & srcREG & D & destREG { # --00 0100 dfff ffff # 0000 0100 0000 0000 -> IORWF INDF, 0 # 0000 0100 1000 0000 -> IORWF INDF, 1 # 0000 0100 0010 0000 -> IORWF 0x20, 0 # 0000 0100 1010 0000 -> IORWF 0x20, 1 val:1 = W | srcREG; destREG = val; setResultFlags(val); } @if PROCESSOR == "PIC_16F" srcFSR: "++"fsr is fsr & mm=0 { fsr = fsr + 1; addr:2 = fsr; export *[DATA]:1 addr; } srcFSR: "--"fsr is fsr & mm=1 { fsr = fsr - 1; addr:2 = fsr; export *[DATA]:1 addr; } srcFSR: fsr"++" is fsr & mm=2 { addr:2 = fsr; fsr = fsr + 1; export *[DATA]:1 addr; } srcFSR: fsr"--" is fsr & mm=3 { addr:2 = fsr; fsr = fsr - 1; export *[DATA]:1 addr; } srcFSRk: sk6"["fsrk"]" is fsrk & sk6 { addr:2 = fsrk + sk6; export *[DATA]:1 addr; } :LSLF srcREG, D is op6=0x35 & srcREG & D & destREG { # --11 0101 dfff ffff $(C) = (srcREG & 0x80) != 0; val:1 = srcREG << 1; setResultFlags(val); destREG = val; } :LSRF srcREG, D is op6=0x36 & srcREG & D & destREG { # --11 0110 dfff ffff $(C) = srcREG & 0x1; val:1 = srcREG >> 1; setResultFlags(val); destREG = val; } :MOVIW srcFSR is op11=2 & srcFSR { W = srcFSR; setResultFlags(W); } :MOVIW srcFSRk is op7=0x7e & srcFSRk { W = srcFSRk; setResultFlags(W); } :MOVWI srcFSR is op11=3 & srcFSR { srcFSR = W; } :MOVWI srcFSRk is op7=0x7f & srcFSRk { srcFSRk = W; } :MOVLB imm5 is op9=0x1 & imm5 { BSR = imm5; } # Alternate variant in certain pic16f variants :MOVLB imm6 is op8=0x5 & imm6 { BSR = imm6; } :MOVLP imm7 is op7=0x63 & imm7 { PCLATH = imm7 & 0x1F; } @endif :MOVLW imm8 is op6=0x30 & imm8 { # --11 00xx kkkk kkkk # 0011 0000 0001 0010 -> MOVLW #0x12 W = imm8; } :MOVF srcREG, D is op6=0x08 & srcREG & D & destREG { # --00 1000 dfff ffff # 0000 1000 0000 0000 -> MOVF INDF, 0 # 0000 1000 1000 0000 -> MOVF INDF, 1 # 0000 1000 0010 0000 -> MOVF 0x20, 0 # 0000 1000 1010 0000 -> MOVF 0x20, 1 val:1 = srcREG; destREG = val; setResultFlags(val); } :MOVWF srcREG is op6=0x00 & d=1 & srcREG { # --00 0000 1fff ffff # 0000 0000 1000 0000 -> MOVWF INDF # 0000 0000 1010 0000 -> MOVWF 0x20 srcREG = W; } :MOVWF PC is op6=0x00 & d=1 & f7=0x02 & PC { # --00 0000 1fff ffff # 0000 0000 1000 0010 -> MOVWF PCL PCL = W; addr:2 = ((zext(PCLATH) & 0x1F) << 8) | zext(PCL); goto [addr]; } :NOP is op6=0 & d=0 & l5=0 { # --00 0000 0xx0 0000 } :OPTION is op14=0x0062 { # --00 0000 0110 0010 OPTION = W; } :RESET is op14=0x0001 { reset(); goto 0x0; } :RETFIE is op14=0x0009 { # --00 0000 0000 1001 INTCON = 0x80 | INTCON; # set INTCON.GIE bit retAddr:4 = 0; pop(retAddr); return [retAddr]; } :RETLW imm8 is op4=0xd & imm8 { # --11 01xx kkkk kkkk # 0011 0100 0001 0010 -> RETLW #0x12 W = imm8; retAddr:4 = 0; pop(retAddr); return [retAddr]; } :RETURN is op14=0x0008 { # --00 0000 0000 1000 retAddr:4 = 0; pop(retAddr); return [retAddr]; } :RLF srcREG, D is op6=0x0d & srcREG & D & destREG { # --00 1101 dfff ffff # 0000 1101 0000 0000 -> RLF INDF, 0 # 0000 1101 1000 0000 -> RLF INDF, 1 # 0000 1101 0010 0000 -> RLF 0x20, 0 # 0000 1101 1010 0000 -> RLF 0x20, 1 local tmpC = $(C); val:1 = srcREG; $(C) = (val s< 0); val = (val << 1) | tmpC; destREG = val; setResultFlags(val); } :RRF srcREG, D is op6=0x0c & srcREG & D & destREG { # --00 1100 dfff ffff # 0000 1100 0000 0000 -> RRF INDF, 0 # 0000 1100 1000 0000 -> RRF INDF, 1 # 0000 1100 0010 0000 -> RRF 0x20, 0 # 0000 1100 1010 0000 -> RRF 0x20, 1 local tmpC = $(C) << 7; val:1 = srcREG; $(C) = (val & 1) != 0; val = (val >> 1) | tmpC; destREG = val; setResultFlags(val); } :SLEEP is op14=0x0063 { # --00 0000 0110 0011 # Sleep - Not Implemented sleep(); } :SUBLW imm8 is op6=0x3c & imm8 { # --11 110x kkkk kkkk # 0011 1100 0001 0010 -> SUBLW #0x12 setSubtractFlags(imm8, W); W = imm8 - W; setResultFlags(W); } :SUBWF srcREG, D is op6=0x02 & srcREG & D & destREG { # --00 0010 dfff ffff # 0000 0010 0000 0000 -> SUBWF INDF, 0 # 0000 0010 1000 0000 -> SUBWF INDF, 1 # 0000 0010 0010 0000 -> SUBWF 0x20, 0 # 0000 0010 1010 0000 -> SUBWF 0x20, 1 val:1 = srcREG; setSubtractFlags(val, W); val = val - W; setResultFlags(val); destREG = val; } :SUBWFB srcREG, D is op6=0x3b & srcREG & D & destREG { val:1 = srcREG; bor:1 = !$(C); setSubtractFlags(val, W); val = val - W; tb:1 = $(C); setSubtractFlags(val,bor); # first subtraction could cause borrow $(C) = $(C) & tb; # borrow if C not set val = val - bor; setResultFlags(val); destREG = val; } :SWAPF srcREG, D is op6=0x0e & srcREG & D & destREG { # --00 1110 dfff ffff # 0000 1110 0000 0000 -> SUBWF INDF, 0 # 0000 1110 1000 0000 -> SUBWF INDF, 1 # 0000 1110 0010 0000 -> SUBWF 0x20, 0 # 0000 1110 1010 0000 -> SUBWF 0x20, 1 val:1 = srcREG; destREG = (val << 4) | (val >> 4); } :TRIS trisREG is op9=0x3 & trisREG { # --00 0000 0110 0fff # 0000 0000 0110 0101 -> TRIS 5 trisREG = W; } :XORLW imm8 is op6=0x3a & imm8 { # --11 1010 kkkk kkkk # 0011 1010 0001 0010 -> XORLW #0x12 W = imm8 ^ W; setResultFlags(W); } :XORWF srcREG, D is op6=0x06 & srcREG & D & destREG { # --00 0110 dfff ffff # 0000 0110 0000 0000 -> XORWF INDF, 0 # 0000 0110 1000 0000 -> XORWF INDF, 1 # 0000 0110 0010 0000 -> XORWF 0x20, 0 # 0000 0110 1010 0000 -> XORWF 0x20, 1 val:1 = W ^ srcREG; destREG = val; setResultFlags(val); } ================================================ FILE: pypcode/processors/PIC/data/languages/pic16c5x.cspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/pic16c5x.ldefs ================================================ PIC-16C5x ================================================ FILE: pypcode/processors/PIC/data/languages/pic16c5x.pspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/pic16c5x.slaspec ================================================ @define PROCESSOR "PIC_12C5XX" @include "pic12.sinc" # # NOTES - # 1. If a specific PIC-12 has a different register set, this file and the pic12c5xx.specl file may be copied/renamed and # slightly modified to specify a the correct Register File Map. # # Bank-0 File Registers define DATA offset=0x00 size=1 [ INDF TMR0 PCL.0 STATUS.0 FSR.0 PORTA PORTB PORTC ]; @include "pic12_instructions.sinc" # IO Tristate Registers define register offset=0x0020 size=1 [ TRISA TRISB TRISC ]; # TRIS register trisREG: "5" is f5=0x5 { export TRISA; } trisREG: "6" is f5=0x6 { export TRISB; } trisREG: "7" is f5=0x7 { export TRISC; } :TRIS trisREG is op6=0x00 & d=0 & trisREG { # ---- 0000 0000 0fff # 0000 0000 0000 0110 -> TRIS 6 trisREG = W; } ================================================ FILE: pypcode/processors/PIC/data/languages/pic16f.cspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/pic16f.pspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/pic16f.slaspec ================================================ @define PROCESSOR "PIC_16F" @include "pic16.sinc" @include "pic16_instructions.sinc" ================================================ FILE: pypcode/processors/PIC/data/languages/pic17c7xx.cspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/pic17c7xx.ldefs ================================================ PIC-17C7xx ================================================ FILE: pypcode/processors/PIC/data/languages/pic17c7xx.pspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/pic17c7xx.sinc ================================================ # # PIC-17C7xx Main Section # includes constants, memory space and common register space definitions # @define SFR_BASE 0x0F80 @define BANK15_BASE 0x0F00 # ALUSTA bit definitions @define STATUS_OV_BIT 3 @define STATUS_Z_BIT 2 @define STATUS_DC_BIT 1 @define STATUS_C_BIT 0 # ALUSTA bit masks used for clearing @define STATUS_OV_CLEARMASK 0xF7 @define STATUS_Z_CLEARMASK 0xFB @define STATUS_DC_CLEARMASK 0xFD @define STATUS_C_CLEARMASK 0xFE define endian=little; define alignment=2; # Instruction Memory (ROM-based) define space CODE type=ram_space wordsize=2 size=2 default; # General Purpose Register Memory # 0x00 - 0x0f : Unbanked registers # 0x10 - 0x17 : Banked registers (9 banks controlled by lower nibble of BSR) # 0x18 - 0x19 : Unbanked registers # 0x1a - 0x1f : Unbanked GPRs # 0x20 - 0xff : Banked GPRs (4 banks controlled by upper nibble of BSR) define space DATA type=ram_space size=2; # The HWSTACK consists of a 16_word by 16_bit RAM and a corresponding 4_bit STKPTR register (which is not readable or writable). # There is no means of directly accessing the stack space other than via a CALL, RETURN, RETLW or RETFIE define space HWSTACK type=ram_space size=1; # implemented as independently addressable bytes (each location is 2-bytes wide) define space register type=register_space size=2; # Program Counter define register offset=0x0000 size=2 [ PC ]; # Stack Pointer (4-bits) define register offset=0x0004 size=1 [ STKPTR ]; # ALUSTA bit registers (these do not really exist and must get reflected into the STATUS byte register) define register offset=0x0005 size=1 [ FS32 FS10 OV Z DC C ]; # Table Latch (not visible) define register offset=0x0010 size=1 [ TBLATL TBLATH ]; define register offset=0x0010 size=2 [ TBLAT ]; # Mirrored registers for improved decompiler behavior define register offset=0x0020 size=1 [ WREG ]; ================================================ FILE: pypcode/processors/PIC/data/languages/pic17c7xx.slaspec ================================================ @define PROCESSOR "PIC_17C7xx" @include "pic17c7xx.sinc" # 0x00 - 0x0f (Unbanked - BSR ignored, WREG hidden and mirrored in register space) define DATA offset=0x000 size=1 [ INDF0 FSR0 PCL PCLATH ALUSTA T0STA CPUSTA INTSTA INDF1 FSR1 _ TMR0L TMR0H TBLPTRL TBLPTRH BSR ]; # Bank-0 0x10 - 0x17 (lower nibble of BSR determines bank, i.e. address<11:8>) define DATA offset=0x010 size=1 [ PORTA DDRB PORTB RCSTA1 RCREG1 TXSTA1 TXREG1 SPBRG1 ]; # Bank-1 0x10 - 0x17 (lower nibble of BSR determines bank, i.e. address<11:8>) define DATA offset=0x110 size=1 [ DDRC PORTC DDRD PORTD DDRE PORTE PIR1 PIE1 ]; # Bank-2 0x10 - 0x17 (lower nibble of BSR determines bank, i.e. address<11:8>) define DATA offset=0x210 size=1 [ TMR1 TMR2 TMR3L TMR3H PR1 PR2 PR3LCA1L PR3HCA1H ]; # Bank-3 0x10 - 0x17 (lower nibble of BSR determines bank, i.e. address<11:8>) define DATA offset=0x310 size=1 [ PW1DCL PW2DCL PW1DCH PW2DCH CA2L CA2H TCON1 TCON2 ]; # Bank-4 0x10 - 0x17 (lower nibble of BSR determines bank, i.e. address<11:8>) define DATA offset=0x410 size=1 [ PIR2 PIE2 _ RCSTA2 RCREG2 TXSTA2 TXREG2 SPBRG2 ]; # Bank-5 0x10 - 0x17 (lower nibble of BSR determines bank, i.e. address<11:8>) define DATA offset=0x510 size=1 [ DDRF PORTF DDRG PORTG ADCON0 ADCON1 ADRESL ADRESH ]; # Bank-6 0x10 - 0x17 (lower nibble of BSR determines bank, i.e. address<11:8>) define DATA offset=0x610 size=1 [ SSPADD SSPCON1 SSPCON2 SSPSTAT SSPBUF ]; # Bank-7 0x10 - 0x17 (lower nibble of BSR determines bank, i.e. address<11:8>) define DATA offset=0x710 size=1 [ PW3DCL PW3DCH CA3L CA3H CA4L CA4H TCON3 ]; # Bank-8 0x10 - 0x17 (lower nibble of BSR determines bank, i.e. address<11:8>) define DATA offset=0x810 size=1 [ DDRH PORTH DDRJ PORTJ ]; # 0x18 - 0x1f (Unbanked - BSR ignored) define DATA offset=0x018 size=1 [ PRODL PRODH ]; define DATA offset=0x00d size=2 [ TBLPTR ]; define DATA offset=0x002 size=2 [ PCLAT ]; define DATA offset=0x018 size=2 [ PROD ]; define DATA offset=0x516 size=2 [ ADRES ]; @include "pic17c7xx_instructions.sinc" ================================================ FILE: pypcode/processors/PIC/data/languages/pic17c7xx_instructions.sinc ================================================ # # PIC-17C7xx Instruction Section # includes token definitions, macros, sub-constructors and instruction definitions # # 16-bit instruction token uses big-endian bit numbering which agrees with # instruction bit numbering with PIC documentation. # 15-14-13-12-11-10-9-8-7-6-5-4-3-2-1-0 define token instr16(16) op16 = (0,15) op8 = (8,15) op7 = (9,15) op6 = (10,15) op5 = (11,15) op3 = (13,15) t = (9,9) d = (8,8) s = (8,8) i = (8,8) b3 = (8,10) p5_4 = (12,12) p5_3 = (11,11) p5 = (8,12) p5reg = (8,12) u4hi = (4,7) u4lo = (0,3) f8 = (0,7) f8hi = (5,7) f8_4 = (4,4) f8_3 = (3,3) f8reg = (0,4) k8 = (0,7) k8_h = (4,7) k8_l = (0,3) k13 = (0,12) ; attach variables [ f8reg p5reg ] [ INDF0 FSR0 PCL PCLATH ALUSTA T0STA CPUSTA INTSTA INDF1 FSR1 WREG TMR0L TMR0H TBLPTRL TBLPTRH BSR _ _ _ _ _ _ _ _ PRODL PRODH _ _ _ _ _ _ ]; attach variables [ t ] [ TBLATL TBLATH ]; # # Special PIC-17 Operations # # Return a decimal adjusted value for the value provided (see DAW instruction) define pcodeop decimalAdjust; # Perform a Master Clear Reset define pcodeop reset; define pcodeop clearWatchDogTimer; define pcodeop sleep; # # MACROS # macro setResultFlags(result) { Z = (result == 0); } macro setAddCOverflowFlag(op1,op2) { local tmpC = C & 1; OV = scarry(op1,tmpC) || scarry(op2,op1 + tmpC); } macro setAddCCarryFlag(op1,op2) { local tmpC = C & 1; C = carry(op1,tmpC) || carry(op2,op1 + tmpC); } macro setAddCDigitCarryFlag(op1,op2) { # op1 and op2 are assumed to be 8-bit values local tmp1 = op1 << 4; local tmp2 = op2 << 4; local tmpDC = DC & 1; DC = carry(tmp1,tmpDC) || carry(tmp2,tmp1 + tmpDC); } macro setAddCFlags(op1,op2) { setAddCCarryFlag(op1,op2); setAddCDigitCarryFlag(op1,op2); setAddCOverflowFlag(op1,op2); } macro setAddFlags(op1,op2) { C = carry(op1,op2); DC = carry(op1<<4,op2<<4); OV = scarry(op1,op2); } macro setSubtractCOverflowFlag(op1,op2) { local notC = ~(C & 1); OV = sborrow(op1,notC) || sborrow(op2,op1 - notC); } macro setSubtractCCarryFlag(op1,op2) { local notC = ~(C & 1); C = (op1 < notC) || (op2 < (op1 - notC)); } macro setSubtractCDigitCarryFlag(op1,op2) { # op1 and op2 are assumed to be 8-bit values local notDC = ~(DC & 1); local tmp1 = op1 << 4; local tmp2 = op2 << 4; local tmp3 = (tmp1 - notDC) << 4; DC = (tmp1 < notDC) || (tmp2 < tmp3); } macro setSubtractCFlags(op1,op2) { setSubtractCCarryFlag(op1,op2); setSubtractCDigitCarryFlag(op1,op2); setSubtractCOverflowFlag(op1,op2); } macro setSubtractFlags(op1,op2) { # op1 and op2 are assumed to be 8-bit values # NOTE: carry flag is SET if there is NO borrow C = (op1 >= op2); DC = ((op1<<4) < (op2<<4)); OV = sborrow(op1,op2); } macro push(val) { # TODO: Uncertain about this !! # CheckStackFull(); *[HWSTACK]:2 STKPTR = val; STKPTR = STKPTR + 2; } macro pop(rval) { # TODO: Uncertain about this !! # CheckStackUnderflow(); STKPTR = STKPTR - 2; rval = *[HWSTACK]:2 STKPTR; } # # SUB-CONSTRUCTORS # # PC register write - instruction must set PCLATH/PCL and perform branch operation fPC: "PC" is f8=0x02 { export PCL; } pPC: "PC" is p5=0x02 { export PCL; } # ALUSTA register fALUSTA: f8reg is f8=0x04 & f8reg { export f8reg; } #pALUSTA: p5reg is p5=0x04 & p5reg { export p5reg; } # # f Register subconstructors # # 0x00-0x0f Unbanked registers fREGLoc: f8reg is f8hi=0 & f8_4=0 & f8reg { export f8reg; } # 0x10-0x1f Banked registers fREGLoc: f8 is f8hi=0 & f8_4=1 & f8_3=0 & f8 { ptr:2 = (zext(BSR & 0x0f) << 8) + f8; export *[DATA]:1 ptr; } # 0x18-0x19 Unbanked registers (PRODL,PRODH) fREGLoc: f8reg is f8=0x18 & f8reg { export f8reg; } fREGLoc: f8reg is f8=0x19 & f8reg { export f8reg; } # Unbanked general purpose RAM fREGLoc: f8 is f8hi=0 & f8_4=1 & f8_3=1 & f8 { export *[DATA]:1 f8; } # Banked general purpose RAM fREGLoc: f8 is f8 { ptr:2 = (zext(BSR & 0xf0) << 4) + f8; export *[DATA]:1 ptr; } # Indirect File Register access - INDF0 fREGLoc: f8reg is f8=0x00 & f8reg { addr:1 = FSR0; val:1 = ((FS10 == 0x1) * 1) + ((FS10 == 0x0) * -1); FSR0 = addr + val; export *[DATA]:1 addr; } # Indirect File Register access - INDF1 fREGLoc: f8reg is f8=0x08 & f8reg { addr:1 = FSR1; val:1 = ((FS32 == 0x1) * 1) + ((FS32 == 0x0) * -1); FSR1 = addr + val; export *[DATA]:1 addr; } # # p Register subconstructors # # 0x00-0x0f Unbanked registers pREGLoc: p5reg is p5_4=0 & p5reg { export p5reg; } # 0x10-0x17 Banked registers pREGLoc: p5 is p5_4=1 & p5_3=0 & p5 { ptr:2 = (zext(BSR & 0x0f) << 8) + p5; export *[DATA]:1 ptr; } # 0x18-0x19 Unbanked registers (PRODL,PRODH) pREGLoc: p5reg is p5=0x18 & p5reg { export p5reg; } pREGLoc: p5reg is p5=0x19 & p5reg { export p5reg; } # Unbanked general purpose RAM pREGLoc: p5 is p5_4=1 & p5_3=1 & p5 { export *[DATA]:1 p5; } # Indirect File Register access - INDF0 pREGLoc: p5reg is p5=0x00 & p5reg { addr:1 = FSR0; val:1 = ((FS10 == 0x1) * 1) + ((FS10 == 0x0) * -1); FSR0 = addr + val; export *[DATA]:1 addr; } # Indirect File Register access - INDF1 pREGLoc: p5reg is p5=0x08 & p5reg { addr:1 = FSR1; val:1 = ((FS32 == 0x1) * 1) + ((FS32 == 0x0) * -1); FSR1 = addr + val; export *[DATA]:1 addr; } # Direct File register data srcFREG: fREGLoc is fREGLoc { export fREGLoc; } # PCL read - latch PC into PCL and PCLATH srcFREG: "PC" is f8=0x02 { PCLAT = inst_start; export PCL; } # Destination register (always fREGLoc) destFREG: fREGLoc is fREGLoc { export fREGLoc; } # Destination register (either fREGLoc or WREG) destREG: "0" is d=0 { export WREG; } destREG: "1" is d=1 & fREGLoc { export fREGLoc; } # Direct File register data srcPREG: pREGLoc is pREGLoc { export pREGLoc; } # PCL read - latch PC into PCL and PCLATH srcPREG: "PC" is p5=0x02 { PCLAT = inst_start; export PCL; } # Destination register (always pREGLoc) destPREG: pREGLoc is pREGLoc { export pREGLoc; } # Destination operand representation (w: W register is destination; f: specified fREG is destination) D: "w" is d=0 { } D: "f" is d=1 { } # s-flag used by those instructions which can optionally store result in both srcFREG and WREG S: "0" is s=0 { } S: "1" is s=1 { } # Table read/write i-flag I: "0" is i=0 { } I: "1" is i=1 { } # Table read/write t-flag identifies table latch register (high or low byte) T: t is t { export t; } # Relative instruction location with an 8K page shortAddr: nLoc is k13 [ nLoc = (inst_next & 0xe000) + k13; ] { tmp:2 = nLoc:2 >> 8; PCLATH = tmp:1; export *[CODE]:2 nLoc; } # Absolute instruction location within 64K space (PCLATH contain upper 8-bits) longAddr: k8 is k8 { addr:2 = (zext(PCLATH) << 8) + k8; export addr; } # Skip instruction address skipInst: inst_skip is op16 [ inst_skip = inst_next + 1; ] {export *[CODE]:2 inst_skip; } # Immediate Data (Literal operation) imm8: "#"k8 is k8 { export *[const]:1 k8; } imm8h: "#"k8_h is k8_h { export *[const]:1 k8_h; } imm8l: "#"k8_l is k8_l { export *[const]:1 k8_l; } # Bit identifier bit: "#"b3 is b3 { export *[const]:1 b3; } # # Instructions # :ADDLW imm8 is op8=0xb1 & imm8 { # 1011 0001 kkkk kkkk tmp1:1 = WREG; tmp2:1 = imm8; setAddFlags(tmp1, tmp2); local tmp = tmp1 + tmp2; WREG = tmp; setResultFlags(tmp); } :ADDWF srcFREG, D is op7=0x07 & D & srcFREG & destREG { # 0000 111d ffff ffff tmp1:1 = srcFREG; # read only once! tmp2:1 = WREG; setAddFlags(tmp1, tmp2); local tmp = tmp1 + tmp2; destREG = tmp; setResultFlags(tmp); } :ADDWF fPC, D is op7=0x07 & D & d=1 & fPC { # 0000 111d ffff ffff # 0000 1110 ffff ffff -> ADDWF PCL, w addr:2 = inst_start >> 1; # Compenstate for CODE wordsize addrHi:1 = addr(1); PCLATH = addrHi; addrLo:1 = addr:1; tmpW:1 = WREG; setAddFlags(addrLo, tmpW); addrLo = addrLo + tmpW; addr = (zext(addrHi) << 8) + zext(addrLo); setResultFlags(addrLo); goto [addr]; } :ADDWFC srcFREG, D is op7=0x08 & D & srcFREG & destREG { # 0001 000d ffff ffff local tmpC = C & 1; tmp1:1 = srcFREG; # read only once! tmp2:1 = WREG; setAddCFlags(tmp1, tmp2); local tmp = tmp1 + tmp2 + tmpC; destREG = tmp; setResultFlags(tmp); } :ANDLW imm8 is op8=0xb5 & imm8 { # 1011 0101 kkkk kkkk tmp:1 = WREG & imm8; WREG = tmp; setResultFlags(tmp); } :ANDWF srcFREG, D is op7=0x05 & D & srcFREG & destREG { # 0000 101d ffff ffff tmp:1 = srcFREG & WREG; destREG = tmp; setResultFlags(tmp); } :BCF srcFREG, bit is op5=0x11 & bit & srcFREG { # 1000 1bbb ffff ffff local bitmask = ~(1 << bit); srcFREG = srcFREG & bitmask; } :BCF fALUSTA, bit is op5=0x11 & b3=0 & fALUSTA & bit { # 1000 1000 0000 0100 -> BCF ALUSTA, #C C = 0; } :BCF fALUSTA, bit is op5=0x11 & b3=1 & fALUSTA & bit { # 1000 1001 0000 0100 -> BCF ALUSTA, #DC DC = 0; } :BCF fALUSTA, bit is op5=0x11 & b3=2 & fALUSTA & bit { # 1000 1010 0000 0100 -> BCF ALUSTA, #Z Z = 0; } :BCF fALUSTA, bit is op5=0x11 & b3=3 & fALUSTA & bit { # 1000 1011 0000 0100 -> BCF ALUSTA, #OV OV = 0; } :BCF fALUSTA, bit is op5=0x11 & b3=4 & fALUSTA & bit { # 1000 1100 0000 0100 -> BCF ALUSTA, #FS0 FS10 = FS10 & 0x2; } :BCF fALUSTA, bit is op5=0x11 & b3=5 & fALUSTA & bit { # 1000 1101 0000 0100 -> BCF ALUSTA, #FS1 FS10 = FS10 & 0x1; } :BCF fALUSTA, bit is op5=0x11 & b3=6 & fALUSTA & bit { # 1000 1110 0000 0100 -> BCF ALUSTA, #FS2 FS32 = FS32 & 0x2; } :BCF fALUSTA, bit is op5=0x11 & b3=7 & fALUSTA & bit { # 1000 1111 0000 0100 -> BCF ALUSTA, #FS3 FS32 = FS32 & 0x1; } :BSF srcFREG, bit is op5=0x10 & bit & srcFREG { # 1000 0bbb ffff ffff local bitmask = 1 << bit; srcFREG = srcFREG | bitmask; } :BSF fALUSTA, bit is op5=0x10 & b3=0 & bit & fALUSTA { # 1000 0000 0000 0100 -> BSF ALUSTA, #C C = 1; } :BSF fALUSTA, bit is op5=0x10 & b3=1 & bit & fALUSTA { # 1000 0000 0000 0100 -> BSF ALUSTA, #DC DC = 1; } :BSF fALUSTA, bit is op5=0x10 & b3=2 & bit & fALUSTA { # 1000 0000 0000 0100 -> BSF ALUSTA, #Z Z = 1; } :BSF fALUSTA, bit is op5=0x10 & b3=3 & bit & fALUSTA { # 1000 0000 0000 0100 -> BSF ALUSTA, #OV OV = 1; } :BSF fALUSTA, bit is op5=0x10 & b3=4 & bit & fALUSTA { # 1000 0000 0000 0100 -> BSF ALUSTA, #FS0 FS10 = FS10 | 0x1; } :BSF fALUSTA, bit is op5=0x10 & b3=5 & bit & fALUSTA { # 1000 0000 0000 0100 -> BSF ALUSTA, #FS1 FS10 = FS10 | 0x2; } :BSF fALUSTA, bit is op5=0x10 & b3=6 & bit & fALUSTA { # 1000 0000 0000 0100 -> BSF ALUSTA, #FS2 FS32 = FS32 | 0x1; } :BSF fALUSTA, bit is op5=0x10 & b3=7 & bit & fALUSTA { # 1000 0000 0000 0100 -> BSF ALUSTA, #FS3 FS32 = FS32 | 0x2; } :BTFSC srcFREG, bit is op5=0x13 & bit & srcFREG & skipInst { # 1001 1bbb ffff ffff local bitmask = 1 << bit; local tmp = srcFREG & bitmask; if (tmp == 0) goto skipInst; } :BTFSC fALUSTA, bit is op5=0x13 & b3=0 & bit & fALUSTA & skipInst { # 1001 1000 0000 0100 -> BTFSC STATUS, #C if (C == 0) goto skipInst; } :BTFSC fALUSTA, bit is op5=0x13 & b3=1 & bit & fALUSTA & skipInst { # 1001 1001 0000 0100 -> BTFSC STATUS, #DC if (DC == 0) goto skipInst; } :BTFSC fALUSTA, bit is op5=0x13 & b3=2 & bit & fALUSTA & skipInst { # 1001 1010 0000 0100 -> BTFSC STATUS, #Z if (Z == 0) goto skipInst; } :BTFSC fALUSTA, bit is op5=0x13 & b3=3 & bit & fALUSTA & skipInst { # 1001 1011 0000 0100 -> BTFSC STATUS, #OV if (OV == 0) goto skipInst; } :BTFSS srcFREG, bit is op5=0x12 & bit & srcFREG & skipInst { # 1001 0bbb ffff ffff local bitmask = 1 << bit; local tmp = srcFREG & bitmask; if (tmp != 0) goto skipInst; } :BTFSS fALUSTA, bit is op5=0x12 & b3=0 & bit & fALUSTA & skipInst { # 1001 1000 0000 0100 -> BTFSS STATUS, #C if (C != 0) goto skipInst; } :BTFSS fALUSTA, bit is op5=0x12 & b3=1 & bit & fALUSTA & skipInst { # 1001 1001 0000 0100 -> BTFSS STATUS, #DC if (DC != 0) goto skipInst; } :BTFSS fALUSTA, bit is op5=0x12 & b3=2 & bit & fALUSTA & skipInst { # 1001 1010 0000 0100 -> BTFSS STATUS, #Z if (Z != 0) goto skipInst; } :BTFSS fALUSTA, bit is op5=0x12 & b3=3 & bit & fALUSTA & skipInst { # 1001 1011 0000 0100 -> BTFSS STATUS, #OV if (OV != 0) goto skipInst; } :BTG srcFREG, bit is op5=0x7 & bit & srcFREG & skipInst { # 0011 1bbb ffff ffff local bitmask = 1 << bit; tmp:1 = srcFREG; srcFREG = ~(tmp & bitmask) | (tmp & ~bitmask); } :CALL shortAddr is op3=0x7 & shortAddr { # 111k kkkk kkkk kkkk push(&:2 inst_next); call shortAddr; } # Special case for Call which appears to correspond to uninitialized :BADCALL shortAddr is op16=0xffff & shortAddr { addr:2 = shortAddr; return [addr]; } :CLRF destFREG, S is op7=0x14 & s=0 & S & destFREG { # 0010 1000 ffff ffff destFREG = 0; WREG = 0; } :CLRF destFREG, S is op7=0x14 & s=1 & S & destFREG { # 0010 1001 ffff ffff destFREG = 0; } :CLRF fALUSTA, S is op7=0x14 & s=0 & S & fALUSTA { # 0010 1000 0000 0100 C = 0; DC = 0; Z = 0; OV = 0; FS10 = 0; FS32 = 0; WREG = 0; } :CLRF fALUSTA, S is op7=0x14 & s=1 & S & fALUSTA { # 0010 1001 0000 0100 C = 0; DC = 0; Z = 0; OV = 0; FS10 = 0; FS32 = 0; } :CLRWDT is op16=0x0004 { # 0000 0000 0000 0100 clearWatchDogTimer(); } :COMF srcFREG, D is op7=0x09 & D & srcFREG & destREG { # 0001 001d ffff ffff tmp:1 = ~srcFREG; destREG = tmp; setResultFlags(tmp); } :CPFSEQ srcFREG is op8=0x31 & srcFREG & skipInst { # 0011 0001 ffff ffff if (srcFREG == WREG) goto skipInst; } :CPFSGT srcFREG is op8=0x32 & srcFREG & skipInst { # 0011 0010 ffff ffff if (srcFREG > WREG) goto skipInst; } :CPFSLT srcFREG is op8=0x30 & srcFREG & skipInst { # 0011 0000 ffff ffff if (srcFREG < WREG) goto skipInst; } :DAW destFREG, S is op7=0x17 & s=0 & S & destFREG { # 0010 1110 ffff ffff tmp:1 = decimalAdjust(WREG); destFREG = tmp; WREG = tmp; } :DAW destFREG, S is op7=0x17 & s=1 & S & destFREG { # 0010 1111 ffff ffff tmp:1 = decimalAdjust(WREG); destFREG = tmp; setResultFlags(tmp); } :DECF srcFREG, D is op7=0x03 & D & srcFREG & destREG { # 0000 011d ffff ffff tmp:1 = srcFREG; setSubtractFlags(tmp, 1); tmp = tmp - 1; destREG = tmp; setResultFlags(tmp); } :DECFSZ srcFREG, D is op7=0x0b & D & srcFREG & destREG & skipInst { # 0001 011d ffff ffff val:1 = srcFREG - 1; destREG = val; if (val == 0) goto skipInst; } :DCFSNZ srcFREG, D is op7=0x13 & D & srcFREG & destREG & skipInst { # 0010 011d ffff ffff val:1 = srcFREG - 1; destREG = val; if (val != 0) goto skipInst; } :GOTO shortAddr is op3=0x6 & shortAddr { # 110k kkkk kkkk kkkk goto shortAddr; } :INCF srcFREG, D is op7=0x0a & D & srcFREG & destREG { # 0001 010d ffff ffff tmp:1 = srcFREG; # read once only! setAddFlags(tmp, 1); tmp = tmp + 1; destREG = tmp; setResultFlags(tmp); } :INCFSZ srcFREG, D is op7=0x0f & D & srcFREG & destREG & skipInst { # 0001 111d ffff ffff val:1 = srcFREG + 1; destREG = val; if (val == 0) goto skipInst; } :INFSNZ srcFREG, D is op7=0x12 & D & srcFREG & destREG & skipInst { # 0010 010d ffff ffff val:1 = srcFREG + 1; destREG = val; if (val != 0) goto skipInst; } :IORLW imm8 is op8=0xb3 & imm8 { # 1011 0011 kkkk kkkk tmp:1 = WREG | imm8; WREG = tmp; setResultFlags(tmp); } :IORWF srcFREG, D is op7=0x04 & D & srcFREG & destREG { # 0000 100d ffff ffff tmp:1 = WREG | srcFREG; destREG = tmp; setResultFlags(tmp); } :LCALL longAddr is op8=0xb7 & longAddr { # 1011 0111 kkkk kkkk push(&:2 inst_next); call [longAddr]; } :MOVFP srcFREG, destPREG is op3=0x3 & srcFREG & destPREG { # 011p pppp ffff ffff destPREG = srcFREG; } :MOVFP srcFREG, pPC is op3=0x3 & srcFREG & pPC { # 0110 0010 ffff ffff addr:2 = (zext(PCLATH) << 8) + zext(srcFREG); goto [addr]; } :MOVLB imm8l is op8=0xb8 & u4hi=0 & imm8l { # 1011 1000 0000 kkkk BSR = (BSR & 0xf0) | imm8l; } :MOVLR imm8h is op7=0x5d & u4lo=0 & imm8h { # 1011 101x kkkk 0000 BSR = (BSR & 0x0f) | (imm8h << 4); } :MOVLW imm8 is op8=0xb0 & imm8 { # 1011 0000 kkkk kkkk WREG = imm8; } :MOVPF srcPREG, destFREG is op3=0x2 & srcPREG & destFREG { # 010p pppp ffff ffff tmp:1 = srcPREG; destFREG = tmp; setResultFlags(tmp); } :MOVPF srcPREG, fPC is op3=0x2 & srcPREG & fPC { tmp:1 = srcPREG; addr:2 = (zext(PCLATH) << 8) + zext(tmp); setResultFlags(tmp); goto [addr]; } :MOVWF destFREG is op8=0x01 & destFREG { # 0000 0001 ffff ffff destFREG = WREG; } :MOVWF fPC is op8=0x01 & fPC { addr:2 = (zext(PCLATH) << 8) + zext(WREG); goto [addr]; } :MULLW imm8 is op8=0xbc & imm8 { # 1011 1100 kkkk kkkk PROD = zext(WREG) * zext(imm8); } :MULLWF srcFREG is op8=0x34 & srcFREG { # 0011 0100 ffff ffff PROD = zext(WREG) * zext(srcFREG); } :NEGW destFREG, S is op7=0x16 & s=0 & S & destFREG { # 0010 110s ffff ffff tmp:1 = -WREG; destFREG = tmp; WREG = tmp; C = (tmp s< 0); OV = sborrow(0,tmp); setResultFlags(tmp); } :NEGW destFREG, S is op7=0x16 & s=1 & S & destFREG { # 0010 110s ffff ffff tmp:1 = -WREG; destFREG = tmp; C = (tmp s< 0); OV = sborrow(0,tmp); setResultFlags(tmp); } :NOP is op16=0x0 { } :RETFIE is op16=0x0005 { # 0000 0000 0000 0101 retAddr:2 = 0; pop(retAddr); return [retAddr]; } :RETLW imm8 is op8=0xb6 & imm8 { # 1011 0110 kkkk kkkk WREG = imm8; retAddr:2 = 0; pop(retAddr); return [retAddr]; } :RETURN is op16=0x0002 { # 0000 0000 0000 0010 retAddr:2 = 0; pop(retAddr); return [retAddr]; } :RLCF srcFREG, D is op7=0x0d & D & srcFREG & destREG { # 0001 101d ffff ffff local tmpC = C; val:1 = srcFREG; C = (val s< 0); val = (val << 1) | tmpC; destREG = val; } :RLNCF srcFREG, D is op7=0x11 & D & srcFREG & destREG { # 0010 001d ffff ffff tmp:1 = srcFREG << 1; destREG = tmp; } :RRCF srcFREG, D is op7=0x0c & D & srcFREG & destREG { # 0001 100d ffff ffff local tmpC = C << 7; tmp:1 = srcFREG; C = (tmp & 1) != 0; tmp = (tmp >> 1) | tmpC; destREG = tmp; } :RRNCF srcFREG, D is op7=0x10 & D & srcFREG & destREG { # 0010 000d ffff ffff tmp:1 = srcFREG >> 1; destREG = tmp; } :SETF destFREG, S is op7=0x15 & s=0 & S & destFREG { # 0010 1010 ffff ffff destFREG = 0xff; WREG = 0xff; } :SETF destFREG, S is op7=0x15 & s=1 & S & destFREG { # 0010 1011 ffff ffff destFREG = 0xff; } :SETF fALUSTA, S is op7=0x15 & s=0 & S & fALUSTA { # 0010 1010 0000 0100 C = 1; DC = 1; Z = 1; OV = 1; FS10 = 0x3; FS32 = 0x3; WREG = 0xff; } :SETF fALUSTA, S is op7=0x15 & s=1 & S & fALUSTA { # 0010 1011 0000 0100 C = 1; DC = 1; Z = 1; OV = 1; FS10 = 0x3; FS32 = 0x3; } :SLEEP is op16=0x0003 { # 0000 0000 0000 0011 sleep(); } :SUBLW imm8 is op8=0xb2 & imm8 { # 1011 0010 kkkk kkkk tmp:1 = imm8; tmpW:1 = WREG; setSubtractFlags(tmp, tmpW); tmp = tmp - tmpW; WREG = tmp; setResultFlags(tmp); } :SUBWF srcFREG, D is op7=0x02 & D & srcFREG & destREG { # 0000 010d ffff ffff tmp:1 = srcFREG; tmpW:1 = WREG; setSubtractFlags(tmp, tmpW); tmp = tmp - tmpW; destREG = tmp; setResultFlags(tmp); } :SUBWFB srcFREG, D is op7=0x01 & D & srcFREG & destREG { # 0000 001d ffff ffff local notC = ~(C & 1); tmp:1 = srcFREG; tmpW:1 = WREG; setSubtractCFlags(tmp, tmpW); tmp = tmp - tmpW - notC; destREG = tmp; setResultFlags(tmp); } :SWAPF srcFREG, D is op7=0x0e & D & srcFREG & destREG { # 0001 110d ffff ffff tmp:1 = srcFREG; destREG = (tmp << 4) | (tmp >> 4); } :TABLRD T, I, destFREG is op6=0x2a & T & I & i & destFREG { # 1010 10ti ffff ffff destFREG = T; ptr:2 = TBLPTR; TBLAT = *[CODE]:2 ptr; TBLPTR = ptr + i; } :TABLWT T, I, srcFREG is op6=0x2b & T & I & i & srcFREG { # 1010 11ti ffff ffff T = srcFREG; ptr:2 = TBLPTR; *[CODE]:2 ptr = TBLAT; TBLPTR = ptr + i; } :TLRD T, destFREG is op6=0x28 & T & destFREG { # 1010 00tx ffff ffff destFREG = T; } :TLWT T, srcFREG is op6=0x29 & T & srcFREG { # 1010 01tx ffff ffff T = srcFREG; } :TSTFSZ srcFREG is op8=0x33 & srcFREG & skipInst { # 0011 0011 ffff ffff if (srcFREG == 0) goto skipInst; } :XORLW imm8 is op8=0xb4 & imm8 { # 1011 0100 kkkk kkkk tmp:1 = WREG ^ imm8; WREG = tmp; setResultFlags(tmp); } :XORWF srcFREG, D is op7=0x06 & D & srcFREG & destREG { # 0000 110d ffff ffff tmp:1 = WREG ^ srcFREG; destREG = tmp; setResultFlags(tmp); } ================================================ FILE: pypcode/processors/PIC/data/languages/pic18.cspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/pic18.ldefs ================================================ PIC-18 ================================================ FILE: pypcode/processors/PIC/data/languages/pic18.pspec ================================================ ================================================ FILE: pypcode/processors/PIC/data/languages/pic18.sinc ================================================ # # PIC-18 Main Section # includes constants, memory space and common register space definitions # @define SFR_BASE 0x0F80 @define BANK15_BASE 0x0F00 # STATUS bit definitions @define STATUS_N_BIT 4 @define STATUS_OV_BIT 3 @define STATUS_Z_BIT 2 @define STATUS_DC_BIT 1 @define STATUS_C_BIT 0 # STATUS bit masks used for clearing @define STATUS_N_CLEARMASK 0xEF @define STATUS_OV_CLEARMASK 0xF7 @define STATUS_Z_CLEARMASK 0xFB @define STATUS_DC_CLEARMASK 0xFD @define STATUS_C_CLEARMASK 0xFE @define STATUS_N_Z_MASK 0x14 # STACK bit defintions @define STKPTR_STKFUL_BIT 7 @define STKPTR_STKUNF_BIT 6 # STACK bit masks @define STKPTR_SP_MASK 0x1F @define STKPTR_NOT_SP_MASK 0xE0 @define STKPTR_STKFUL_MASK 0x80 @define STKPTR_STKUNF_MASK 0x40 define endian=little; define alignment=2; # Instruction Memory (ROM-based) define space CODE type=ram_space size=3 default; # General Purpose Register Memory consists of 16-banks of 255-bytes each define space DATA type=ram_space size=2; # The HWSTACK consists of a 31_word by 21_bit RAM and a corresponding 8_bit STKPTR register. # The real STKPTR register format is: # bit 7: Stack Full Flag (STKFUL) - See Note below # bit 6: Stack Underflow Flag (STKUNF) - See Note below # bit 5: # bit 4_0: stack pointer location within the 31_word by 21_bit # Each stack entry generally contains a 21_bit Program Counter value. # The top_of_stack entry (last push) may be accessed via the SFR registers TOSU, TOSH, and TOSL: # bit 20_16: TOSU # bit 15_8: TOSH # bit 7_0: TOSL # When accessing these top_of_stack registers, the global interrupts should/must be disabled. # # NOTE: This PIC-18 pcode implementation does not implement the STKFUL and STKUNF bits. # The entire STKPTR register is treated as an address offset into the stack space for simplification. # STKPTR value must be multiplied. # define space HWSTACK type=ram_space size=1; # implemented as independently addressable bytes (each location is 4-bytes wide) define space register type=register_space size=2; # Program Counter define register offset=0x0000 size=3 [ PC ]; # Bad Register (needed only for attach usage) define register offset=0x0003 size=1 [ BAD ]; # Stack Pointer define register offset=0x0004 size=1 [ STKPTR ]; # Status bit registers (these do not really exist and must get reflected into the STATUS byte register) define register offset=0x0005 size=1 [ N OV Z DC C ]; # Shadow registers (not visible) define register offset=0x000a size=1 [ WS STATUSS BSRS ]; ================================================ FILE: pypcode/processors/PIC/data/languages/pic18.slaspec ================================================ @define PROCESSOR "PIC_18" @include "pic18.sinc" # # NOTES - # 1. If a specific PIC-18 has a different register set, this file and the pic18.pspec file may be copied/renamed and # slightly modified to specify a the correct Register File Map. The following register definitions must be preserved: # STATUS, STKPTR, PCLAT (PCL, PCLATH, PCLATU), TOS (TOSL, TOSH, TOSU), FSR0 (FSR0L, FSR0H), FSR1 (FSR1L, FSR1H), FSR2 (FSR2L, FSR2H), # PROD (PRODL, PRODH) # define DATA offset=0x0f60 size=1 [ sfrF60 sfrF61 sfrF62 sfrF63 sfrF64 sfrF65 sfrF66 sfrF67 sfrF68 sfrF69 sfrF6A RCSTA2 TXSTA2 TXREG2 RCREG2 SPBREG2 CCP5CON CCP5RL CCPR5H CCP4CON CCPR4L CCPR4H T4CON PR4 TMR4 sfrF79 sfrF7A sfrF7B sfrF7C sfrF7D sfrF7E sfrF7F PORTA PORTB PORTC PORTD PORTE PORTF PORTG PORTH PORTJ LATA LATB LATC LATD LATE LATF LATG LATH LATJ TRISA TRISB TRISC TRISD TRISE TRISF TRISG TRISH TRISJ sfrF9B MEMCON PIE1 PIR1 IPR1 PIE2 PIR2 IPR2 PIE3 PIR3 IPR3 EECON1 EECON2 EEDATA EEADR EEADRH RCSTA1 TXSTA1 TXREG1 RCREG1 SPBRG1 PSPCON T3CON TMR3L TMR3H CMCON CVRCON sfrFB6 CCP3CON CCP3RL CCP3RH CCP2CON CCPR2L CCPR2H CCP1CON CCPR1L CCPR1H ADCON2 ADCON1 ADCON0 ADRESL ADRESH SSPCON2 SSPCON1 SSPSTAT SSPADD SSPBUF T2CON PR2 TMR2 T1CON TMR1L TMR1H RCON WDTCON LVDCON OSCCON sfrFD4 T0CON TMR0L TMR0H STATUS FSR2L FSR2H PLUSW2 PREINC2 POSTDEC2 POSTINC2 INDF2 BSR FSR1L FSR1H PLUSW1 PREINC1 POSTDEC1 POSTINC1 INDF1 WREG FSR0L FSR0H PLUSW0 PREINC0 POSTDEC0 POSTINC0 INDF0 INTCON3 INTCON2 INTCON PRODL PRODH TABLAT TBLPTRL TBLPTRH TBLPTRU PCL PCLATH PCLATU .STKPTR TOSL TOSH TOSU ]; define DATA offset=0x0fbb size=2 [ CCPR2 ]; define DATA offset=0x0fbe size=2 [ CCPR1 ]; define DATA offset=0x0fc3 size=2 [ ADRES ]; define DATA offset=0x0fce size=2 [ TMR1 ]; define DATA offset=0x0fd6 size=2 [ TMR0 ]; define DATA offset=0x0fd9 size=2 [ FSR2 ]; define DATA offset=0x0fe1 size=2 [ FSR1 ]; define DATA offset=0x0fe9 size=2 [ FSR0 ]; define DATA offset=0x0ff3 size=2 [ PROD ]; define DATA offset=0x0ff6 size=3 [ TBLPTR ]; define DATA offset=0x0ff9 size=3 [ PCLAT ]; define DATA offset=0x0ffd size=3 [ TOS ]; @include "pic18_instructions.sinc" ================================================ FILE: pypcode/processors/PIC/data/languages/pic18_instructions.sinc ================================================ # # PIC-18 Instruction Section # includes token definitions, macros, sub-constructors and instruction definitions # # The bytes are imported from the file in a 16-bit little-endian word format. This, combined # with the little-endian mode used by this language results in an unusual instruction format. # The 16-bit instruction token uses what appears to be big endian bit numbering, whereas the # 32-bit instruction token uses somewhat of a hybrid bit numbering (see below). # 16-bit instruction token uses big-endian bit numbering which agrees with # instruction bit numbering with PIC documentation. # 15-14-13-12-11-10-9-8-7-6-5-4-3-2-1-0 define token instr16(16) op4 = (12,15) op5 = (11,15) op6 = (10,15) op8 = (8,15) op12 = (4,15) op16 = (0,15) d = (9,9) a = (8,8) _xfsr = (6,7) xfsr = (6,7) f8_57 = (5,7) f8 = (0,7) freg = (0,7) b3 = (9,11) k8 = (0,7) k6 = (0,5) n11 = (0,10) signed n8 = (0,7) signed s_0 = (0,0) ; # 32-bit instruction token uses a hybrid bit numbering: # Natural bit numbering (used by documentation): # 31-30-29-28-27-26-25-24-23-22-21-20-19-18-17-16-15-14-13-12-11-10-09-08-07-06-05-04-03-02-01-00 # Hybrid bit nubering used by token: # 15-14-13-12-11-10-09-08-07-06-05-04-03-02-01-00-31-30-29-28-27-26-25-24-23-22-21-20-19-18-17-16 define token instr32(32) lop4 = (12,15) lop5 = (11,15) lop7 = (9,15) lop8 = (8,15) lop9 = (7,15) lop10 = (6,15) _fsr = (4,5) fsr = (4,5) kh = (0,3) s_8 = (8,8) n20_l = (0,7) # low order 8 bits of n20 zs = (0,6) fs_h = (8,11) fs_57 = (5,7) fs = (0,11) fsreg = (0,7) qual4 = (28,31) qual8 = (24,31) fd_h = (24,27) fd_57 = (21,23) fd = (16,27) fdreg = (16,23) kl = (16,23) n20_h = (16,27) # high order 12 bits of n20 zd = (16,22) ; attach variables [ freg fsreg fdreg ] [ BAD _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ sfrF60 sfrF61 sfrF62 sfrF63 sfrF64 sfrF65 sfrF66 sfrF67 sfrF68 sfrF69 sfrF6A RCSTA2 TXSTA2 TXREG2 RCREG2 SPBREG2 CCP5CON CCP5RL CCPR5H CCP4CON CCPR4L CCPR4H T4CON PR4 TMR4 sfrF79 sfrF7A sfrF7B sfrF7C sfrF7D sfrF7E sfrF7F PORTA PORTB PORTC PORTD PORTE PORTF PORTG PORTH PORTJ LATA LATB LATC LATD LATE LATF LATG LATH LATJ TRISA TRISB TRISC TRISD TRISE TRISF TRISG TRISH TRISJ sfrF9B MEMCON PIE1 PIR1 IPR1 PIE2 PIR2 IPR2 PIE3 PIR3 IPR3 EECON1 EECON2 EEDATA EEADR EEADRH RCSTA1 TXSTA1 TXREG1 RCREG1 SPBRG1 PSPCON T3CON TMR3L TMR3H CMCON CVRCON sfrFB6 CCP3CON CCP3RL CCP3RH CCP2CON CCPR2L CCPR2H CCP1CON CCPR1L CCPR1H ADCON2 ADCON1 ADCON0 ADRESL ADRESH SSPCON2 SSPCON1 SSPSTAT SSPADD SSPBUF T2CON PR2 TMR2 T1CON TMR1L TMR1H RCON WDTCON LVDCON OSCCON sfrFD4 T0CON TMR0L TMR0H STATUS FSR2L FSR2H PLUSW2 PREINC2 POSTDEC2 POSTINC2 INDF2 BSR FSR1L FSR1H PLUSW1 PREINC1 POSTDEC1 POSTINC1 INDF1 WREG FSR0L FSR0H PLUSW0 PREINC0 POSTDEC0 POSTINC0 INDF0 INTCON3 INTCON2 INTCON PRODL PRODH TABLAT TBLPTRL TBLPTRH TBLPTRU PCL PCLATH PCLATU STKPTR TOSL TOSH TOSU ]; # attach variables [ _fsr _xfsr ] [ FSR0 FSR1 FSR2 _ ]; # # Special PIC-18 Operations # # If stack is full (SP==0x1F) STKFUL gets set, if STVREN is set the processor will reset #define pcodeop CheckStackFull; # If stack is empty (SP==0) STKUNF gets set, if STVREN is set the processor will reset #define pcodeop CheckStackUnderflow; # Return a decimal adjusted value for the value provided (see DAW instruction) define pcodeop decimalAdjust; # Perform a Master Clear Reset define pcodeop reset; define pcodeop clearWatchDogTimer; define pcodeop sleep; # # MACROS # macro setResultFlags(result) { N = (result s< 0); Z = (result == 0); } macro setAddCOverflowFlag(op1,op2) { local tmpC = C & 1; OV = scarry(op1,tmpC) || scarry(op2,op1 + tmpC); } macro setAddCCarryFlag(op1,op2) { local tmpC = C & 1; C = carry(op1,tmpC) || carry(op2,op1 + tmpC); } macro setAddCDigitCarryFlag(op1,op2) { # op1 and op2 are assumed to be 8-bit values local tmp1 = op1 << 4; local tmp2 = op2 << 4; local tmpDC = DC & 1; DC = carry(tmp1,tmpDC) || carry(tmp2,tmp1 + tmpDC); } macro setAddCFlags(op1,op2) { setAddCCarryFlag(op1,op2); setAddCDigitCarryFlag(op1,op2); setAddCOverflowFlag(op1,op2); } macro setAddFlags(op1,op2) { C = carry(op1,op2); DC = carry(op1<<4,op2<<4); OV = scarry(op1,op2); } macro setSubtractCOverflowFlag(op1,op2) { local notC = ~(C & 1); OV = sborrow(op1,notC) || sborrow(op2,op1 - notC); } macro setSubtractCCarryFlag(op1,op2) { local notC = ~(C & 1); C = (op1 < notC) || (op2 < (op1 - notC)); } macro setSubtractCDigitCarryFlag(op1,op2) { # op1 and op2 are assumed to be 8-bit values local notDC = ~(DC & 1); local tmp1 = op1 << 4; local tmp2 = op2 << 4; local tmp3 = (tmp1 - notDC) << 4; DC = (tmp1 < notDC) || (tmp2 < tmp3); } macro setSubtractCFlags(op1,op2) { setSubtractCCarryFlag(op1,op2); setSubtractCDigitCarryFlag(op1,op2); setSubtractCOverflowFlag(op1,op2); } macro setSubtractFlags(op1,op2) { # op1 and op2 are assumed to be 8-bit values # NOTE: carry flag is SET if there is NO borrow C = (op1 >= op2); DC = ((op1<<4) < (op2<<4)); OV = sborrow(op1,op2); } macro push(val) { # TODO: Uncertain about this !! # CheckStackFull(); *[HWSTACK]:4 STKPTR = val; STKPTR = STKPTR + 4; } macro pop(rval) { # TODO: Uncertain about this !! # CheckStackUnderflow(); STKPTR = STKPTR - 4; rval = *[HWSTACK]:4 STKPTR; } # # SUB-CONSTRUCTORS # # PC register write - instruction must set PCLATH/PCL and perform branch operation pcl: "PC" is a=0 & f8=0xf9 { export PCL; } # STATUS register status: freg is a=0 & f8=0xd8 & freg { export STATUS; } # File Register specified by an 8-bit file register offset within Bank specified by BSR fREGLoc: f8 is a=1 & f8 { # (Banked mode) addr:2 = (zext(BSR) << 8) + f8; export *[DATA]:1 addr; } # File Register specified by an 8-bit offset within "Access Bank" # The partitioning of the access bank may differ between specific PIC18 chips. # Some partition at 0x80 (i.e., PIC18Cxx2), while more advanced PIC18 chips partition at 0x60 (i.e., PIC18Fxx20). # This implementation partitions the access bank mode at 0x60. # # TODO: Need to add another language PIC18C that has the SFR start at 0xf80 # fREGLoc: f8 is a=0 & f8_57=0x0 & f8 { export *[DATA]:1 f8; } # 0x00-0x1f (Access mode) fREGLoc: f8 is a=0 & f8_57=0x1 & f8 { export *[DATA]:1 f8; } # 0x20-0x3f (Access mode) fREGLoc: f8 is a=0 & f8_57=0x2 & f8 { export *[DATA]:1 f8; } # 0x40-0x5f (Access mode) fREGLoc: freg is a=0 & freg { export freg; } # 0xf60-0xfff (Access mode) # TOSL - access mirrored into stack space using STKPTR fREGLoc: freg is a=0 & f8=0xfd & freg { addr:1 = STKPTR + 1; export *[HWSTACK]:1 addr; } # TOSH - access mirrored into stack space using STKPTR fREGLoc: freg is a=0 & f8=0xfe & freg { addr:1 = STKPTR + 2; export *[HWSTACK]:1 addr; } # TOSU - access mirrored into stack space using STKPTR fREGLoc: freg is a=0 & f8=0xff & freg { addr:1 = STKPTR + 3; export *[HWSTACK]:1 addr; } # Indirect File Register access - INDF0 fREGLoc: freg is a=0 & f8=0xef & freg { addr:2 = FSR0; export *[DATA]:1 addr; } # Indirect File Register access - INDF1 fREGLoc: freg is a=0 & f8=0xe7 & freg { addr:2 = FSR1; export *[DATA]:1 addr; } # Indirect File Register access - INDF2 fREGLoc: freg is a=0 & f8=0xdf & freg { addr:2 = FSR2; export *[DATA]:1 addr; } # Post-increment File Register access - POSTINC0 fREGLoc: freg is a=0 & f8=0xee & freg { addr:2 = FSR0; FSR0 = FSR0 + 1; export *[DATA]:1 addr; } # Post-increment File Register access - POSTINC1 fREGLoc: freg is a=0 & f8=0xe6 & freg { addr:2 = FSR1; FSR1 = FSR1 + 1; export *[DATA]:1 addr; } # Post-increment File Register access - POSTINC2 fREGLoc: freg is a=0 & f8=0xde & freg { addr:2 = FSR2; FSR2 = FSR2 + 1; export *[DATA]:1 addr; } # Post-decrement File Register access - POSTDEC0 fREGLoc: freg is a=0 & f8=0xed & freg { addr:2 = FSR0; FSR0 = FSR0 - 1; export *[DATA]:1 addr; } # Post-decrement File Register access - POSTDEC1 fREGLoc: freg is a=0 & f8=0xe5 & freg { addr:2 = FSR1; FSR1 = FSR1 - 1; export *[DATA]:1 addr; } # Post-decrement File Register access - POSTDEC2 fREGLoc: freg is a=0 & f8=0xdd & freg { addr:2 = FSR2; FSR2 = FSR2 - 1; export *[DATA]:1 addr; } # Pre-increment File Register access - PREINC0 fREGLoc: freg is a=0 & f8=0xec & freg { FSR0 = FSR0 + 1; addr:2 = FSR0; export *[DATA]:1 addr; } # Pre-increment File Register access - PREINC1 fREGLoc: freg is a=0 & f8=0xe4 & freg { FSR1 = FSR1 + 1; addr:2 = FSR1; export *[DATA]:1 addr; } # Pre-increment File Register access - PREINC2 fREGLoc: freg is a=0 & f8=0xdc & freg { FSR2 = FSR2 + 1; addr:2 = FSR2; export *[DATA]:1 addr; } # Pre-increment w/WREG-Offset File Register access - PLUSW0 fREGLoc: freg is a=0 & f8=0xeb & freg { FSR0 = FSR0 + 1; addr:2 = FSR0 + sext(WREG); export *[DATA]:1 addr; } # Pre-increment w/WREG-Offset File Register access - PLUSW1 fREGLoc: freg is a=0 & f8=0xe3 & freg { FSR1 = FSR1 + 1; addr:2 = FSR1 + sext(WREG); export *[DATA]:1 addr; } # Pre-increment w/WREG-Offset File Register access - PLUSW2 fREGLoc: freg is a=0 & f8=0xdb & freg { FSR2 = FSR2 + 1; addr:2 = FSR2 + sext(WREG); export *[DATA]:1 addr; } # Direct File register data srcREG: fREGLoc is fREGLoc { export fREGLoc; } # PCL read - latch PC into PCL, PCLATH, and PCLATU srcREG: "PC" is a=0 & f8=0xf9 { PCLAT = inst_start; export PCL; } # Destination register (either srcREG or WREG) destREG: "0" is d=0 { export WREG; } destREG: "1" is d=1 & srcREG { export srcREG; } #destREG: "1" is d=1 & f8=0xf9 { # # Storing to PCL must write the PC using both the stored PCL (PC<7:0>), PCLATH (PC<15:8>) and PCLATU (PC<21:16>) # # The ADDWF and MOVWF definitions below have a specific case to handle this write to PCL # export PCL; #} # Destination operand representation (w: W register is destination; f: specified fREG is destination) D: "w" is d=0 { } D: "f" is d=1 { } # Source File Registers specified by a 12-bit absolute offsets within 32-bit instriction srcREG32: fs is fs { export *[DATA]:1 fs; } # 0x000-0xeff srcREG32: fs is fs_h=0xf & fs_57=0 & fs { export *[DATA]:1 fs; } # 0xf00-0xf1f srcREG32: fs is fs_h=0xf & fs_57=1 & fs { export *[DATA]:1 fs; } # 0xf20-0xf3f srcREG32: fs is fs_h=0xf & fs_57=2 & fs { export *[DATA]:1 fs; } # 0xf40-0xf5f srcREG32: fsreg is fs_h=0xf & fsreg { export fsreg; } # 0xf60-0xfff # PCL read - latch PC into PCL, PCLATH, and PCLATU srcREG32: "PC" is fs=0xff9 { PCLAT = inst_start; export PCL; } # TOSL - access mirrored into stack space using STKPTR srcREG32: fsreg is fs=0xffd & fsreg { addr:1 = STKPTR + 1; export *[HWSTACK]:1 addr; } # TOSH - access mirrored into stack space using STKPTR srcREG32: fsreg is fs=0xffe & fsreg { addr:1 = STKPTR + 2; export *[HWSTACK]:1 addr; } # TOSU - access mirrored into stack space using STKPTR srcREG32: fsreg is fs=0xfff & fsreg { addr:1 = STKPTR + 3; export *[HWSTACK]:1 addr; } # Indirect File Register access - INDF0 srcREG32: fsreg is fs=0xfef & fsreg { addr:2 = FSR0; export *[DATA]:1 addr; } # Indirect File Register access - INDF1 srcREG32: fsreg is fs=0xfe7 & fsreg { addr:2 = FSR1; export *[DATA]:1 addr; } # Indirect File Register access - INDF2 srcREG32: fsreg is fs=0xfdf & fsreg { addr:2 = FSR2; export *[DATA]:1 addr; } # Post-increment File Register access - POSTINC0 srcREG32: fsreg is fs=0xfee & fsreg { addr:2 = FSR0; FSR0 = FSR0 + 1; export *[DATA]:1 addr; } # Post-increment File Register access - POSTINC1 srcREG32: fsreg is fs=0xfe6 & fsreg { addr:2 = FSR1; FSR1 = FSR1 + 1; export *[DATA]:1 addr; } # Post-increment File Register access - POSTINC2 srcREG32: fsreg is fs=0xfde & fsreg { addr:2 = FSR2; FSR2 = FSR2 + 1; export *[DATA]:1 addr; } # Post-decrement File Register access - POSTDEC0 srcREG32: fsreg is fs=0xfed & fsreg { addr:2 = FSR0; FSR0 = FSR0 - 1; export *[DATA]:1 addr; } # Post-decrement File Register access - POSTDEC1 srcREG32: fsreg is fs=0xfe5 & fsreg { addr:2 = FSR1; FSR1 = FSR1 - 1; export *[DATA]:1 addr; } # Post-decrement File Register access - POSTDEC2 srcREG32: fsreg is fs=0xfdd & fsreg { addr:2 = FSR2; FSR2 = FSR2 - 1; export *[DATA]:1 addr; } # Pre-increment File Register access - PREINC0 srcREG32: fsreg is fs=0xfec & fsreg { FSR0 = FSR0 + 1; addr:2 = FSR0; export *[DATA]:1 addr; } # Pre-increment File Register access - PREINC1 srcREG32: fsreg is fs=0xfe4 & fsreg { FSR1 = FSR1 + 1; addr:2 = FSR1; export *[DATA]:1 addr; } # Pre-increment File Register access - PREINC2 srcREG32: fsreg is fs=0xfdc & fsreg { FSR2 = FSR2 + 1; addr:2 = FSR2; export *[DATA]:1 addr; } # Pre-increment w/WREG-Offset File Register access - PLUSW0 srcREG32: fsreg is fs=0xfeb & fsreg { FSR0 = FSR0 + 1; addr:2 = FSR0 + sext(WREG); export *[DATA]:1 addr; } # Pre-increment w/WREG-Offset File Register access - PLUSW1 srcREG32: fsreg is fs=0xfe3 & fsreg { FSR1 = FSR1 + 1; addr:2 = FSR1 + sext(WREG); export *[DATA]:1 addr; } # Pre-increment w/WREG-Offset File Register access - PLUSW2 srcREG32: fsreg is fs=0xfdb & fsreg { FSR2 = FSR2 + 1; addr:2 = FSR2 + sext(WREG); export *[DATA]:1 addr; } # Destination File Registers specified by a 12-bit absolute offsets within 32-bit instriction destREG32: fd is fd { export *[DATA]:1 fd; } # 0x000-0xeff destREG32: fd is fd_h=0xf & fd_57=0 & fd { export *[DATA]:1 fd; } # 0xf00-0xf1f destREG32: fd is fd_h=0xf & fd_57=1 & fd { export *[DATA]:1 fd; } # 0xf20-0xf3f destREG32: fd is fd_h=0xf & fd_57=2 & fd { export *[DATA]:1 fd; } # 0xf40-0xf5f destREG32: fdreg is fd_h=0xf & fdreg { export fdreg; } # 0xf60-0xfff #destREG32: "PCL" is fd=0xff9 { # # Storing to PCL must write the PC using both the stored PCL (PC<7:0>), PCLATH (PC<15:8>) and PCLATU (PC<21:16>) # # The MOVFF and MOVSF definitions below have a specific case to handle this write to PCL # export PCL; #} # TOSL - access mirrored into stack space using STKPTR destREG32: fdreg is fd=0xffd & fdreg { addr:1 = STKPTR + 1; export *[HWSTACK]:1 addr; } # TOSH - access mirrored into stack space using STKPTR destREG32: fdreg is fd=0xffe & fdreg { addr:1 = STKPTR + 2; export *[HWSTACK]:1 addr; } # TOSU - access mirrored into stack space using STKPTR destREG32: fdreg is fd=0xfff & fdreg { addr:1 = STKPTR + 3; export *[HWSTACK]:1 addr; } # Indirect File Register access - INDF0 destREG32: fdreg is fd=0xfef & fdreg { addr:2 = FSR0; export *[DATA]:1 addr; } # Indirect File Register access - INDF1 destREG32: fdreg is fd=0xfe7 & fdreg { addr:2 = FSR1; export *[DATA]:1 addr; } # Indirect File Register access - INDF2 destREG32: fdreg is fd=0xfdf & fdreg { addr:2 = FSR2; export *[DATA]:1 addr; } # Post-increment File Register access - POSTINC0 destREG32: fdreg is fd=0xfee & fdreg { addr:2 = FSR0; FSR0 = FSR0 + 1; export *[DATA]:1 addr; } # Post-increment File Register access - POSTINC1 destREG32: fdreg is fd=0xfe6 & fdreg { addr:2 = FSR1; FSR1 = FSR1 + 1; export *[DATA]:1 addr; } # Post-increment File Register access - POSTINC2 destREG32: fdreg is fd=0xfde & fdreg { addr:2 = FSR2; FSR2 = FSR2 + 1; export *[DATA]:1 addr; } # Post-decrement File Register access - POSTDEC0 destREG32: fdreg is fd=0xfed & fdreg { addr:2 = FSR0; FSR0 = FSR0 - 1; export *[DATA]:1 addr; } # Post-decrement File Register access - POSTDEC1 destREG32: fdreg is fd=0xfe5 & fdreg { addr:2 = FSR1; FSR1 = FSR1 - 1; export *[DATA]:1 addr; } # Post-decrement File Register access - POSTDEC2 destREG32: fdreg is fd=0xfdd & fdreg { addr:2 = FSR2; FSR2 = FSR2 - 1; export *[DATA]:1 addr; } # Pre-increment File Register access - PREINC0 destREG32: fdreg is fd=0xfec & fdreg { FSR0 = FSR0 + 1; addr:2 = FSR0; export *[DATA]:1 addr; } # Pre-increment File Register access - PREINC1 destREG32: fdreg is fd=0xfe4 & fdreg { FSR1 = FSR1 + 1; addr:2 = FSR1; export *[DATA]:1 addr; } # Pre-increment File Register access - PREINC2 destREG32: fdreg is fd=0xfdc & fdreg { FSR2 = FSR2 + 1; addr:2 = FSR2; export *[DATA]:1 addr; } # Pre-increment w/WREG-Offset File Register access - PLUSW0 destREG32: fdreg is fd=0xfeb & fdreg { FSR0 = FSR0 + 1; addr:2 = FSR0 + sext(WREG); export *[DATA]:1 addr; } # Pre-increment w/WREG-Offset File Register access - PLUSW1 destREG32: fdreg is fd=0xfe3 & fdreg { FSR1 = FSR1 + 1; addr:2 = FSR1 + sext(WREG); export *[DATA]:1 addr; } # Pre-increment w/WREG-Offset File Register access - PLUSW2 destREG32: fdreg is fd=0xfdb & fdreg { FSR2 = FSR2 + 1; addr:2 = FSR2 + sext(WREG); export *[DATA]:1 addr; } # Absolute 20-bit instruction location constructed from nl:8 and nh:12 absAddr21: nLoc is n20_h & n20_l [ nLoc = (n20_h << 9) + (n20_l << 1); ] { export *[CODE]:2 nLoc; } # Relative 8-bit and 11-bit instruction offsets relAddr8: nLoc is n8 [ nLoc = inst_next + (n8 << 1); ] { export *[CODE]:2 nLoc; } relAddr11: nLoc is n11 [ nLoc = inst_next + (n11 << 1); ] { export *[CODE]:2 nLoc; } # Skip instruction address (could jump into middle of 32-bit instruction which appears as NOP) skipInst: inst_skip is op16 [ inst_skip = inst_next + 2; ] {export *[CODE]:2 inst_skip; } # Immediate Data (Literal operation) imm6: "#"k6 is k6 { export *[const]:1 k6; } imm8: "#"k8 is k8 { export *[const]:1 k8; } imm12: "#"kVal is kl & kh [ kVal = (kh << 8) + kl; ] { export *[const]:2 kVal; } # Bit identifier bit: "#"b3 is b3 { export *[const]:1 b3; } # FSR Register (see LFSR) FSRn: "FSR0" is fsr=0 & _fsr { export FSR0; } FSRn: "FSR1" is fsr=1 & _fsr { export FSR1; } FSRn: "FSR2" is fsr=2 & _fsr { export FSR2; } # FSR Register (see Extended Instructions) xFSRn: "FSR0" is xfsr=0 & _xfsr { export FSR0; } xFSRn: "FSR1" is xfsr=1 & _xfsr { export FSR1; } xFSRn: "FSR2" is xfsr=2 & _xfsr { export FSR2; } # Source and Destination FSR2 Indexed Operand ZS: zs"[FSR2]" is zs { fsLoc:2 = FSR2 + zs; export *[DATA]:1 fsLoc; } ZD: zd"[FSR2]" is zd { fdLoc:2 = FSR2 + zd; export *[DATA]:1 fdLoc; } # Access Bank mode A: "ACCESS" is a=0 { } A: "BANKED" is a=1 { } # # BYTE-ORIENTED FILE REGISTER OPERATIONS # :ADDWF srcREG, D, A is op6=0x09 & srcREG & A & destREG & D { # 0010 01da ffff ffff # 0010 0100 0000 0000 -> ADDWF DAT_DATA_0000, w, ACCESS # 0010 0101 0000 0000 -> ADDWF REG0x0, w, BANKED # 0010 0100 1101 1000 -> ADDWF STATUS, w, ACCESS # 0010 0101 1101 1000 -> ADDWF REG0xD8, w, BANKED # 0010 0110 0000 0000 -> ADDWF DAT_DATA_0000, f, ACCESS # 0010 0111 0000 0000 -> ADDWF REG0x0, f, BANKED # 0010 0110 1101 1000 -> ADDWF STATUS, f, ACCESS # 0010 0111 1101 1000 -> ADDWF REG0xD8, f, BANKED # 0010 0100 1111 1001 -> ADDWF PC, w, ACCESS tmp:1 = srcREG; # read only once! setAddFlags(tmp, WREG); tmp = tmp + WREG; destREG = tmp; setResultFlags(tmp); } :ADDWF pcl, D, A is op6=0x09 & A & D & d=1 & pcl { # 0010 01da ffff ffff # 0010 0110 1111 1001 -> ADDWF PC, f, ACCESS addr:3 = inst_start; PCLAT = addr; addrHi:2 = addr(1); addrLo:1 = addr:1; tmpW:1 = WREG; setAddFlags(addrLo, tmpW); addrLo = addrLo + tmpW; addr = (zext(addrHi) << 8) + zext(addrLo); setResultFlags(addrLo); goto [addr]; } :ADDWFC srcREG, D, A is op6=0x08 & srcREG & destREG & D & A { # 0010 00da ffff ffff # 0010 0000 0000 0000 -> ADDWFC DAT_DATA_0000, w, ACCESS # 0010 0001 0000 0000 -> ADDWFC REG0x0, w, BANKED # 0010 0000 1101 1000 -> ADDWFC STATUS, w, ACCESS # 0010 0001 1101 1000 -> ADDWFC REG0xD8, w, BANKED # 0010 0010 0000 0000 -> ADDWFC DAT_DATA_0000, f, ACCESS # 0010 0011 0000 0000 -> ADDWFC REG0x0, f, BANKED # 0010 0010 1101 1000 -> ADDWFC STATUS, f, ACCESS # 0010 0011 1101 1000 -> ADDWFC REG0xD8, f, BANKED local tmpC = C & 1; tmp:1 = srcREG; setAddCFlags(tmp, WREG); tmp = tmp + WREG + tmpC; destREG = tmp; setResultFlags(tmp); } :ANDWF srcREG, D, A is op6=0x05 & srcREG & destREG & D & A { # 0001 01da ffff ffff # 0001 0100 0000 0000 -> ANDWF DAT_DATA_0000, w, ACCESS # 0001 0101 0000 0000 -> ANDWF REG0x0, w, BANKED # 0001 0100 1101 1000 -> ANDWF STATUS, w, ACCESS # 0001 0101 1101 1000 -> ANDWF REG0xD8, w, BANKED # 0001 0110 0000 0000 -> ANDWF DAT_DATA_0000, f, ACCESS # 0001 0111 0000 0000 -> ANDWF REG0x0, f, BANKED # 0001 0110 1101 1000 -> ANDWF STATUS, f, ACCESS # 0001 0111 1101 1000 -> ANDWF REG0xD8, f, BANKED tmp:1 = srcREG & WREG; destREG = tmp; setResultFlags(tmp); } :CLRF srcREG, A is op6=0x1a & d=1 & srcREG & A { # 0110 101a ffff ffff # 0110 1010 0000 0000 -> CLRF DAT_DATA_0000, 0 # 0110 1011 0000 0000 -> CLRF REG0x0, 1 # 0110 1010 1101 1000 -> CLRF STATUS, 0 # 0110 1011 1101 1000 -> CLRF REG0xD8, 1 srcREG = 0; Z = 1; } :COMF srcREG, D, A is op6=0x07 & srcREG & destREG & D & A { # 0001 11da ffff ffff # 0001 1100 0000 0000 -> COMF DAT_DATA_0000, w, ACCESS # 0001 1101 0000 0000 -> COMF REG0x0, w, BANKED # 0001 1100 1101 1000 -> COMF STATUS, w, ACCESS # 0001 1101 1101 1000 -> COMF REG0xD8, w, BANKED # 0001 1110 0000 0000 -> COMF DAT_DATA_0000, f, ACCESS # 0001 1111 0000 0000 -> COMF REG0x0, f, BANKED # 0001 1110 1101 1000 -> COMF STATUS, f, ACCESS # 0001 1111 1101 1000 -> COMF REG0xD8, f, BANKED tmp:1 = ~srcREG; destREG = tmp; setResultFlags(tmp); } :CPFSEQ srcREG, A is op6=0x18 & d=1 & srcREG & A & skipInst { # 0110 001a ffff ffff # 0110 0010 0000 0000 -> CPFSEQ DAT_DATA_0000, 0 # 0110 0011 0000 0000 -> CPFSEQ REG0x0, 1 # 0110 0010 1101 1000 -> CPFSEQ STATUS, 0 # 0110 0011 1101 1000 -> CPFSEQ REG0xD8, 1 if (srcREG == WREG) goto skipInst; } :CPFSGT srcREG, A is op6=0x19 & d=0 & srcREG & A & skipInst { # 0110 010a ffff ffff # 0110 0100 0000 0000 -> CPFSGT DAT_DATA_0000, 0 # 0110 0101 0000 0000 -> CPFSGT REG0x0, 1 # 0110 0100 1101 1000 -> CPFSGT STATUS, 0 # 0110 0101 1101 1000 -> CPFSGT REG0xD8, 1 if (srcREG > WREG) goto skipInst; } :CPFSLT srcREG, A is op6=0x18 & d=0 & srcREG & A & skipInst { # 0110 000a ffff ffff # 0110 0000 0000 0000 -> CPFSLT DAT_DATA_0000, 0 # 0110 0001 0000 0000 -> CPFSLT REG0x0, 1 # 0110 0000 1101 1000 -> CPFSLT STATUS, 0 # 0110 0001 1101 1000 -> CPFSLT REG0xD8, 1 if (srcREG < WREG) goto skipInst; } :DECF srcREG, D, A is op6=0x01 & srcREG & destREG & D & A { # 0000 01da ffff ffff # 0000 0100 0000 0000 -> DECF DAT_DATA_0000, w, ACCESS # 0000 0101 0000 0000 -> DECF REG0x0, w, BANKED # 0000 0100 1101 1000 -> DECF STATUS, w, ACCESS # 0000 0101 1101 1000 -> DECF REG0xD8, w, BANKED # 0000 0110 0000 0000 -> DECF DAT_DATA_0000, f, ACCESS # 0000 0111 0000 0000 -> DECF REG0x0, f, BANKED # 0000 0110 1101 1000 -> DECF STATUS, f, ACCESS # 0000 0111 1101 1000 -> DECF REG0xD8, f, BANKED tmp:1 = srcREG; setSubtractFlags(tmp, 1); tmp = tmp - 1; destREG = tmp; setResultFlags(tmp); } :DECFSZ srcREG, D, A is op6=0x0b & srcREG & destREG & D & A & skipInst { # 0010 11da ffff ffff # 0010 1100 0000 0000 -> DECFSZ DAT_DATA_0000, w, ACCESS # 0010 1101 0000 0000 -> DECFSZ REG0x0, w, BANKED # 0010 1100 1101 1000 -> DECFSZ STATUS, w, ACCESS # 0010 1101 1101 1000 -> DECFSZ REG0xD8, w, BANKED # 0010 1110 0000 0000 -> DECFSZ DAT_DATA_0000, f, ACCESS # 0010 1111 0000 0000 -> DECFSZ REG0x0, f, BANKED # 0010 1110 1101 1000 -> DECFSZ STATUS, f, ACCESS # 0010 1111 1101 1000 -> DECFSZ REG0xD8, f, BANKED tmp:1 = srcREG - 1; destREG = tmp; if (tmp == 0) goto skipInst; } :DCFSNZ srcREG, D, A is op6=0x13 & srcREG & destREG & D & A & skipInst { # 0100 11da ffff ffff # 0100 1100 0000 0000 -> DCFSNZ DAT_DATA_0000, w, ACCESS # 0100 1101 0000 0000 -> DCFSNZ REG0x0, w, BANKED # 0100 1100 1101 1000 -> DCFSNZ STATUS, w, ACCESS # 0100 1101 1101 1000 -> DCFSNZ REG0xD8, w, BANKED # 0100 1110 0000 0000 -> DCFSNZ DAT_DATA_0000, f, ACCESS # 0100 1111 0000 0000 -> DCFSNZ REG0x0, f, BANKED # 0100 1110 1101 1000 -> DCFSNZ STATUS, f, ACCESS # 0100 1111 1101 1000 -> DCFSNZ REG0xD8, f, BANKED tmp:1 = srcREG - 1; destREG = tmp; if (tmp != 0) goto skipInst; } :INCF srcREG, D, A is op6=0x0a & srcREG & destREG & D & A { # 0010 10da ffff ffff # 0010 1000 0000 0000 -> INCF DAT_DATA_0000, w, ACCESS # 0010 1001 0000 0000 -> INCF REG0x0, w, BANKED # 0010 1000 1101 1000 -> INCF STATUS, w, ACCESS # 0010 1001 1101 1000 -> INCF REG0xD8, w, BANKED # 0010 1010 0000 0000 -> INCF DAT_DATA_0000, f, ACCESS # 0010 1011 0000 0000 -> INCF REG0x0, f, BANKED # 0010 1010 1101 1000 -> INCF STATUS, f, ACCESS # 0010 1011 1101 1000 -> INCF REG0xD8, f, BANKED tmp:1 = srcREG; # read once only! setAddFlags(tmp, 1); tmp = tmp + 1; destREG = tmp; setResultFlags(tmp); } :INCFSZ srcREG, D, A is op6=0x0f & srcREG & destREG & D & A & skipInst { # 0011 11da ffff ffff # 0011 1100 0000 0000 -> INCFSZ DAT_DATA_0000, w, ACCESS # 0011 1101 0000 0000 -> INCFSZ REG0x0, w, BANKED # 0011 1100 1101 1000 -> INCFSZ STATUS, w, ACCESS # 0011 1101 1101 1000 -> INCFSZ REG0xD8, w, BANKED # 0011 1110 0000 0000 -> INCFSZ DAT_DATA_0000, f, ACCESS # 0011 1111 0000 0000 -> INCFSZ REG0x0, f, BANKED # 0011 1110 1101 1000 -> INCFSZ STATUS, f, ACCESS # 0011 1111 1101 1000 -> INCFSZ REG0xD8, f, BANKED tmp:1 = srcREG + 1; destREG = tmp; if (tmp == 0) goto skipInst; } :INFSNZ srcREG, D, A is op6=0x12 & srcREG & destREG & D & A & skipInst { # 0100 10da ffff ffff # 0100 1000 0000 0000 -> INFSNZ DAT_DATA_0000, w, ACCESS # 0100 1001 0000 0000 -> INFSNZ REG0x0, w, BANKED # 0100 1000 1101 1000 -> INFSNZ STATUS, w, ACCESS # 0100 1001 1101 1000 -> INFSNZ REG0xD8, w, BANKED # 0100 1010 0000 0000 -> INFSNZ DAT_DATA_0000, f, ACCESS # 0100 1011 0000 0000 -> INFSNZ REG0x0, f, BANKED # 0100 1010 1101 1000 -> INFSNZ STATUS, f, ACCESS # 0100 1011 1101 1000 -> INFSNZ REG0xD8, f, BANKED tmp:1 = srcREG + 1; destREG = tmp; if (tmp != 0) goto skipInst; } :IORWF srcREG, D, A is op6=0x04 & srcREG & destREG & D & A { # 0001 00da ffff ffff # 0001 0000 0000 0000 -> IORWF DAT_DATA_0000, w, ACCESS # 0001 0001 0000 0000 -> IORWF REG0x0, w, BANKED # 0001 0000 1101 1000 -> IORWF STATUS, w, ACCESS # 0001 0001 1101 1000 -> IORWF REG0xD8, w, BANKED # 0001 0010 0000 0000 -> IORWF DAT_DATA_0000, f, ACCESS # 0001 0011 0000 0000 -> IORWF REG0x0, f, BANKED # 0001 0010 1101 1000 -> IORWF STATUS, f, ACCESS # 0001 0011 1101 1000 -> IORWF REG0xD8, f, BANKED tmp:1 = srcREG | WREG; destREG = tmp; setResultFlags(tmp); } :MOVF srcREG, D, A is op6=0x14 & srcREG & destREG & D & A { # 0101 00da ffff ffff # 0101 0000 0000 0000 -> MOVF DAT_DATA_0000, w, ACCESS # 0101 0001 0000 0000 -> MOVF REG0x0, w, BANKED # 0101 0000 1101 1000 -> MOVF STATUS, w, ACCESS # 0101 0001 1101 1000 -> MOVF REG0xD8, w, BANKED # 0101 0010 0000 0000 -> MOVF DAT_DATA_0000, f, ACCESS # 0101 0011 0000 0000 -> MOVF REG0x0, f, BANKED # 0101 0010 1101 1000 -> MOVF STATUS, f, ACCESS # 0101 0011 1101 1000 -> MOVF REG0xD8, f, BANKED # 0101 0000 1110 1111 -> MOVF INDF0, w, ACCESS # 0101 0000 1110 0111 -> MOVF INDF1, w, ACCESS # 0101 0000 1101 1111 -> MOVF INDF2, w, ACCESS tmp:1 = srcREG; destREG = tmp; setResultFlags(tmp); } :MOVFF srcREG32, destREG32 is lop4=0x0c & srcREG32 & qual4=0x0f & destREG32 { # 1100 ssss ssss ssss 1111 dddd dddd dddd # 1100 0000 0000 0000 1111 1111 1101 1000 -> MOVFF DAT_DATA_0000, STATUS destREG32 = srcREG32; } :MOVFF srcREG32, destREG32 is lop4=0x0c & srcREG32 & qual4=0x0f & destREG32 & fd=0xff9 { # 1100 ssss ssss ssss 1111 dddd dddd dddd # 1100 0000 0000 0000 1111 1111 1111 1001 -> MOVFF DAT_DATA_0000, PCL addr:3 = (zext(PCLATU) << 16) + (zext(PCLATH) << 8) + zext(srcREG32); goto [addr]; } :MOVWF srcREG, A is op6=0x1b & d=0x1 & srcREG & A { # 0110 111a ffff ffff # 0110 1110 0000 0000 -> MOVWF DAT_DATA_0000, 0 # 0110 1111 0000 0000 -> MOVWF REG0x0, 1 # 0110 1110 1101 1000 -> MOVWF STATUS, 0 # 0110 1111 1101 1000 -> MOVWF REG0xD8, 1 srcREG = WREG; } :MOVWF pcl, A is op6=0x1b & A & pcl { # 0110 111a ffff ffff # 0110 1110 1111 1001 -> MOVWF PCL, 0 addr:3 = (zext(PCLATU) << 16) + (zext(PCLATH) << 8) + zext(WREG); goto [addr]; } :MULWF srcREG, A is op6=0x00 & d=0x1 & srcREG & A { # 0000 001a ffff ffff # 0000 0010 0000 0000 -> MULWF DAT_DATA_0000, 0 # 0000 0011 0000 0000 -> MULWF REG0x0, 1 # 0000 0010 1101 1000 -> MULWF STATUS, 0 # 0000 0011 1101 1000 -> MULWF REG0xD8, 1 tmp1:2 = zext(srcREG); tmp2:2 = zext(WREG); PROD = tmp1 * tmp2; } :NEGF srcREG, A is op6=0x1b & d=0x0 & srcREG & A { # 0110 110a ffff ffff # 0110 1100 0000 0000 -> NEGF DAT_DATA_0000, 0 # 0110 1101 0000 0000 -> NEGF REG0x0, 1 # 0110 1100 1101 1000 -> NEGF STATUS, 0 # 0110 1101 1101 1000 -> NEGF REG0xD8, 1 tmp:1 = -srcREG; srcREG = tmp; C = (tmp s> 0); OV = sborrow(0,tmp); setResultFlags(tmp); } :RLCF srcREG, D, A is op6=0x0d & srcREG & destREG & D & A { # 0011 01da ffff ffff # 0011 0100 0000 0000 -> RLCF DAT_DATA_0000, w, ACCESS # 0011 0101 0000 0000 -> RLCF REG0x0, w, BANKED # 0011 0100 1101 1000 -> RLCF STATUS, w, ACCESS # 0011 0101 1101 1000 -> RLCF REG0xD8, w, BANKED # 0011 0110 0000 0000 -> RLCF DAT_DATA_0000, f, ACCESS # 0011 0111 0000 0000 -> RLCF REG0x0, f, BANKED # 0011 0110 1101 1000 -> RLCF STATUS, f, ACCESS # 0011 0111 1101 1000 -> RLCF REG0xD8, f, BANKED local tmpC = C & 1; tmp:1 = srcREG; C = (tmp s< 0); tmp = (tmp << 1) | tmpC; destREG = tmp; setResultFlags(tmp); } :RLNCF srcREG, D, A is op6=0x11 & srcREG & destREG & D & A { # 0100 01da ffff ffff # 0100 0100 0000 0000 -> RLNCF DAT_DATA_0000, w, ACCESS # 0100 0101 0000 0000 -> RLNCF REG0x0, w, BANKED # 0100 0100 1101 1000 -> RLNCF STATUS, w, ACCESS # 0100 0101 1101 1000 -> RLNCF REG0xD8, w, BANKED # 0100 0110 0000 0000 -> RLNCF DAT_DATA_0000, f, ACCESS # 0100 0111 0000 0000 -> RLNCF REG0x0, f, BANKED # 0100 0110 1101 1000 -> RLNCF STATUS, f, ACCESS # 0100 0111 1101 1000 -> RLNCF REG0xD8, f, BANKED tmp:1 = srcREG << 1; destREG = tmp; setResultFlags(tmp); } :RRCF srcREG, D, A is op6=0x0c & srcREG & destREG & D & A { # 0011 00da ffff ffff # 0011 0000 0000 0000 -> RRCF DAT_DATA_0000, w, ACCESS # 0011 0001 0000 0000 -> RRCF REG0x0, w, BANKED # 0011 0000 1101 1000 -> RRCF STATUS, w, ACCESS # 0011 0001 1101 1000 -> RRCF REG0xD8, w, BANKED # 0011 0010 0000 0000 -> RRCF DAT_DATA_0000, f, ACCESS # 0011 0011 0000 0000 -> RRCF REG0x0, f, BANKED # 0011 0010 1101 1000 -> RRCF STATUS, f, ACCESS # 0011 0011 1101 1000 -> RRCF REG0xD8, f, BANKED local tmpC = C << 7; tmp:1 = srcREG; C = (tmp & 1); tmp = (tmp >> 1) | tmpC; destREG = tmp; setResultFlags(tmp); } :RRNCF srcREG, D, A is op6=0x10 & srcREG & destREG & D & A { # 0100 00da ffff ffff # 0100 0000 0000 0000 -> RRNCF DAT_DATA_0000, w, ACCESS # 0100 0001 0000 0000 -> RRNCF REG0x0, w, BANKED # 0100 0000 1101 1000 -> RRNCF STATUS, w, ACCESS # 0100 0001 1101 1000 -> RRNCF REG0xD8, w, BANKED # 0100 0010 0000 0000 -> RRNCF DAT_DATA_0000, f, ACCESS # 0100 0011 0000 0000 -> RRNCF REG0x0, f, BANKED # 0100 0010 1101 1000 -> RRNCF STATUS, f, ACCESS # 0100 0011 1101 1000 -> RRNCF REG0xD8, f, BANKED tmp:1 = srcREG >> 1; destREG = tmp; setResultFlags(tmp); } :SETF srcREG, A is op6=0x1a & d=0x0 & srcREG & A { # 0110 100a ffff ffff # 0110 1000 0000 0000 -> SETF DAT_DATA_0000, 0 # 0110 1001 0000 0000 -> SETF REG0x0, 1 # 0110 1000 1101 1000 -> SETF STATUS, 0 # 0110 1001 1101 1000 -> SETF REG0xD8, 1 srcREG = 0xff; } :SUBFWB srcREG, D, A is op6=0x15 & srcREG & destREG & D & A { # 0101 01da ffff ffff # 0101 0100 0000 0000 -> SUBFWB DAT_DATA_0000, w, ACCESS # 0101 0101 0000 0000 -> SUBFWB REG0x0, w, BANKED # 0101 0100 1101 1000 -> SUBFWB STATUS, w, ACCESS # 0101 0101 1101 1000 -> SUBFWB REG0xD8, w, BANKED # 0101 0110 0000 0000 -> SUBFWB DAT_DATA_0000, f, ACCESS # 0101 0111 0000 0000 -> SUBFWB REG0x0, f, BANKED # 0101 0110 1101 1000 -> SUBFWB STATUS, f, ACCESS # 0101 0111 1101 1000 -> SUBFWB REG0xD8, f, BANKED local notC = ~(C & 1); tmp:1 = srcREG; setSubtractCFlags(WREG, tmp); tmp = WREG - tmp - notC; destREG = tmp; setResultFlags(tmp); } :SUBWF srcREG, D, A is op6=0x17 & srcREG & destREG & D & A { # 0101 11da ffff ffff # 0101 1100 0000 0000 -> SUBWF DAT_DATA_0000, w, ACCESS # 0101 1101 0000 0000 -> SUBWF REG0x0, w, BANKED # 0101 1100 1101 1000 -> SUBWF STATUS, w, ACCESS # 0101 1101 1101 1000 -> SUBWF REG0xD8, w, BANKED # 0101 1110 0000 0000 -> SUBWF DAT_DATA_0000, f, ACCESS # 0101 1111 0000 0000 -> SUBWF REG0x0, f, BANKED # 0101 1110 1101 1000 -> SUBWF STATUS, f, ACCESS # 0101 1111 1101 1000 -> SUBWF REG0xD8, f, BANKED tmp:1 = srcREG; setSubtractFlags(tmp, WREG); tmp = tmp - WREG; destREG = tmp; setResultFlags(tmp); } :SUBWFB srcREG, D, A is op6=0x16 & srcREG & destREG & D & A { # 0101 10da ffff ffff # 0101 1000 0000 0000 -> SUBWFB DAT_DATA_0000, w, ACCESS # 0101 1001 0000 0000 -> SUBWFB REG0x0, w, BANKED # 0101 1000 1101 1000 -> SUBWFB STATUS, w, ACCESS # 0101 1001 1101 1000 -> SUBWFB REG0xD8, w, BANKED # 0101 1010 0000 0000 -> SUBWFB DAT_DATA_0000, f, ACCESS # 0101 1011 0000 0000 -> SUBWFB REG0x0, f, BANKED # 0101 1010 1101 1000 -> SUBWFB STATUS, f, ACCESS # 0101 1011 1101 1000 -> SUBWFB REG0xD8, f, BANKED local notC = ~(C & 1); tmp:1 = srcREG; setSubtractCFlags(tmp, WREG); tmp = tmp - WREG - notC; destREG = tmp; setResultFlags(tmp); } :SWAPF srcREG, D, A is op6=0x0e & srcREG & destREG & D & A { # 0011 10da ffff ffff # 0011 1000 0000 0000 -> SWAPF DAT_DATA_0000, w, ACCESS # 0011 1001 0000 0000 -> SWAPF REG0x0, w, BANKED # 0011 1000 1101 1000 -> SWAPF STATUS, w, ACCESS # 0011 1001 1101 1000 -> SWAPF REG0xD8, w, BANKED # 0011 1010 0000 0000 -> SWAPF DAT_DATA_0000, f, ACCESS # 0011 1011 0000 0000 -> SWAPF REG0x0, f, BANKED # 0011 1010 1101 1000 -> SWAPF STATUS, f, ACCESS # 0011 1011 1101 1000 -> SWAPF REG0xD8, f, BANKED tmp:1 = srcREG; destREG = (tmp << 4) | (tmp >> 4); } :TSTFSZ srcREG, A is op6=0x19 & d=0x1 & srcREG & A & skipInst { # 0110 011a ffff ffff # 0110 0110 0000 0000 -> TSTFSZ DAT_DATA_0000, 0 # 0110 0111 0000 0000 -> TSTFSZ REG0x0, 1 # 0110 0110 1101 1000 -> TSTFSZ STATUS, 0 # 0110 0111 1101 1000 -> TSTFSZ REG0xD8, 1 if (srcREG == 0) goto skipInst; } :XORWF srcREG, D, A is op6=0x06 & srcREG & destREG & D & A { # 0001 10da ffff ffff # 0001 1000 0000 0000 -> XORWF DAT_DATA_0000, w, ACCESS # 0001 1001 0000 0000 -> XORWF REG0x0, w, BANKED # 0001 1000 1101 1000 -> XORWF STATUS, w, ACCESS # 0001 1001 1101 1000 -> XORWF REG0xD8, w, BANKED # 0001 1010 0000 0000 -> XORWF DAT_DATA_0000, f, ACCESS # 0001 1011 0000 0000 -> XORWF REG0x0, f, BANKED # 0001 1010 1101 1000 -> XORWF STATUS, f, ACCESS # 0001 1011 1101 1000 -> XORWF REG0xD8, f, BANKED tmp:1 = WREG ^ srcREG; destREG = tmp; setResultFlags(tmp); } # # BIT-ORIENTED FILE REGISTER OPERATIONS # :BCF srcREG, bit, A is op4=0x09 & srcREG & bit & A { # 1001 bbba ffff ffff # 1001 0010 0000 0000 -> BCF DAT_DATA_0000, #0x1, 0 # 1001 0101 0000 0000 -> BCF REG0x0, #0x2, 1 # 1001 0010 1101 1000 -> BCF STATUS, #0x1, 0 # 1001 0101 1101 1000 -> BCF REG0xD8, #0x2, 1 local bitmask = ~(1 << bit); srcREG = srcREG & bitmask; } :BCF status, bit, A is op4=0x09 & status & b3=0 & bit & A { # 1001 bbba ffff ffff # 1001 0000 1101 1000 -> BCF STATUS, #C, 0 C = 0; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BCF status, bit, A is op4=0x09 & status & b3=1 & bit & A { # 1001 bbba ffff ffff # 1001 0010 1101 1000 -> BCF STATUS, #DC, 0 DC = 0; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BCF status, bit, A is op4=0x09 & status & b3=2 & bit & A { # 1001 bbba ffff ffff # 1001 0100 1101 1000 -> BCF STATUS, #Z, 0 Z = 0; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BCF status, bit, A is op4=0x09 & status & b3=3 & bit & A { # 1001 bbba ffff ffff # 1001 0110 1101 1000 -> BCF STATUS, #OV, 0 OV = 0; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BCF status, bit, A is op4=0x09 & status & b3=4 & bit & A { # 1001 bbba ffff ffff # 1001 1000 1101 1000 -> BCF STATUS, #N, 0 N = 0; local bitmask = ~(1 << bit); STATUS = STATUS & bitmask; } :BSF srcREG, bit, A is op4=0x08 & srcREG & bit & A { # 1000 bbba ffff ffff # 1000 0010 0000 0000 -> BSF DAT_DATA_0000, #0x1, 0 # 1000 0101 0000 0000 -> BSF REG0x0, #0x2, 1 # 1000 0010 1101 1000 -> BSF STATUS, #0x1, 0 # 1000 0101 1101 1000 -> BSF REG0xD8, #0x2, 1 local bitmask = 1 << bit; srcREG = srcREG | bitmask; } :BSF status, bit, A is op4=0x08 & status & b3=0 & bit & A { # 1000 bbba ffff ffff # 1000 0000 1101 1000 -> BSF STATUS, #C, 0 C = 1; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BSF status, bit, A is op4=0x08 & status & b3=1 & bit & A { # 1000 bbba ffff ffff # 1000 0010 1101 1000 -> BSF STATUS, #DC, 0 DC = 1; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BSF status, bit, A is op4=0x08 & status & b3=2 & bit & A { # 1000 bbba ffff ffff # 1000 0100 1101 1000 -> BSF STATUS, #Z, 0 Z = 1; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BSF status, bit, A is op4=0x08 & status & b3=3 & bit & A { # 1000 bbba ffff ffff # 1000 0110 1101 1000 -> BSF STATUS, #OV, 0 OV = 1; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BSF status, bit, A is op4=0x08 & status & b3=4 & bit & A { # 1000 bbba ffff ffff # 1000 1000 1101 1000 -> BSF STATUS, #N, 0 N = 1; local bitmask = 1 << bit; STATUS = STATUS | bitmask; } :BTFSC srcREG, bit, A is op4=0x0b & srcREG & bit & A & skipInst { # 1011 bbba ffff ffff # 1011 0010 0000 0000 -> BTFSC DAT_DATA_0000, #0x1, 0 # 1011 0101 0000 0000 -> BTFSC REG0x0, #0x2, 1 # 1011 0010 1101 1000 -> BTFSC STATUS, #0x1, 0 # 1011 0101 1101 1000 -> BTFSC REG0xD8, #0x2, 1 local bitmask = 1 << bit; local tmp = srcREG & bitmask; if (tmp == 0) goto skipInst; } :BTFSC status, bit, A is op4=0x0b & b3=0 & bit & status & A & skipInst { # 1011 bbba ffff ffff # 1011 0000 1101 1000 -> BTFSC STATUS, #C, 0 if ((C & 1) == 0) goto skipInst; } :BTFSC status, bit, A is op4=0x0b & b3=1 & bit & status & A & skipInst { # 1011 bbba ffff ffff # 1011 0000 1101 1000 -> BTFSC STATUS, #DC, 0 if ((DC & 1) == 0) goto skipInst; } :BTFSC status, bit, A is op4=0x0b & b3=2 & bit & status & A & skipInst { # 1011 bbba ffff ffff # 1011 0000 1101 1000 -> BTFSC STATUS, #Z, 0 if ((Z & 1) == 0) goto skipInst; } :BTFSC status, bit, A is op4=0x0b & b3=3 & bit & status & A & skipInst { # 1011 bbba ffff ffff # 1011 0110 1101 1000 -> BTFSC STATUS, #OV, 0 if ((OV & 1) == 0) goto skipInst; } :BTFSC status, bit, A is op4=0x0b & b3=4 & bit & status & A & skipInst { # 1011 bbba ffff ffff # 1011 1000 1101 1000 -> BTFSC STATUS, #N, 0 if ((N & 1) == 0) goto skipInst; } :BTFSS srcREG, bit, A is op4=0x0a & srcREG & bit & A & skipInst { # 1010 bbba ffff ffff # 1010 0010 0000 0000 -> BTFSS DAT_DATA_0000, #0x1, 0 # 1010 0101 0000 0000 -> BTFSS REG0x0, #0x2, 1 # 1010 0010 1101 1000 -> BTFSS STATUS, #0x1, 0 # 1010 0101 1101 1000 -> BTFSS REG0xD8, #0x2, 1 local bitmask = 1 << bit; local tmp = srcREG & bitmask; if (tmp != 0) goto skipInst; } :BTFSS status, bit, A is op4=0x0a & b3=0 & bit & status & A & skipInst { # 1010 bbba ffff ffff # 1010 0000 1101 1000 -> BTFSS STATUS, #C, 0 if ((C & 1) != 0) goto skipInst; } :BTFSS status, bit, A is op4=0x0a & b3=1 & bit & status & A & skipInst { # 1010 bbba ffff ffff # 1010 0010 1101 1000 -> BTFSS STATUS, #DC, 0 if ((DC & 1) != 0) goto skipInst; } :BTFSS status, bit, A is op4=0x0a & b3=2 & bit & status & A & skipInst { # 1010 bbba ffff ffff # 1010 0100 1101 1000 -> BTFSS STATUS, #Z, 0 if ((Z & 1) != 0) goto skipInst; } :BTFSS status, bit, A is op4=0x0a & b3=3 & bit & status & A & skipInst { # 1010 bbba ffff ffff # 1010 0110 1101 1000 -> BTFSS STATUS, #OV, 0 if ((OV & 1) != 0) goto skipInst; } :BTFSS status, bit, A is op4=0x0a & b3=4 & bit & status & A & skipInst { # 1010 bbba ffff ffff # 1010 1000 1101 1000 -> BTFSS STATUS, #N, 0 if ((N & 1) != 0) goto skipInst; } :BTG srcREG, bit, A is op4=0x07 & srcREG & bit & A { # 0111 bbba ffff ffff # 0111 0010 0000 0000 -> BTG DAT_DATA_0000, #0x1, 0 # 0111 0101 0000 0000 -> BTG REG0x0, #0x2, 1 # 0111 0010 1101 1000 -> BTG STATUS, #0x1, 0 # 0111 0101 1101 1000 -> BTG REG0xD8, #0x2, 1 local bitmask = 1 << bit; tmp:1 = srcREG; srcREG = ~(tmp & bitmask) | (tmp & ~bitmask); } # # CONTROL OPERATIONS # :BC relAddr8 is op8=0xe2 & relAddr8 { # 1110 0010 nnnn nnnn # 1110 0010 0001 0000 -> BC LAB_CODE_XXXX if ((C & 1) != 0) goto relAddr8; } :BN relAddr8 is op8=0xe6 & relAddr8 { # 1110 0110 nnnn nnnn # 1110 0110 0001 0000 -> BN LAB_CODE_XXXX if ((N & 1) != 0) goto relAddr8; } :BNC relAddr8 is op8=0xe3 & relAddr8 { # 1110 0011 nnnn nnnn # 1110 0011 0001 0000 -> BNC LAB_CODE_XXXX if ((C & 1) == 0) goto relAddr8; } :BNN relAddr8 is op8=0xe7 & relAddr8 { # 1110 0111 nnnn nnnn # 1110 0111 0001 0000 -> BNN LAB_CODE_XXXX if ((N & 1) == 0) goto relAddr8; } :BNOV relAddr8 is op8=0xe5 & relAddr8 { # 1110 0101 nnnn nnnn # 1110 0101 0001 0000 -> BNOV LAB_CODE_XXXX if ((OV & 1) == 0) goto relAddr8; } :BNZ relAddr8 is op8=0xe1 & relAddr8 { # 1110 0001 nnnn nnnn # 1110 0001 0001 0000 -> BNZ LAB_CODE_XXXX if ((Z & 1) == 0) goto relAddr8; } :BOV relAddr8 is op8=0xe4 & relAddr8 { # 1110 0100 nnnn nnnn # 1110 0100 0001 0000 -> BOV LAB_CODE_XXXX if ((OV & 1) != 0) goto relAddr8; } :BRA relAddr11 is op5=0x1a & relAddr11 { # 1101 0nnn nnnn nnnn # 1101 0001 0001 0000 -> BRA LAB_CODE_XXXX (inst_next+0x220) goto relAddr11; } :BZ relAddr8 is op8=0xe0 & relAddr8 { # 1110 0000 nnnn nnnn # 1110 0000 0001 0000 -> BZ LAB_CODE_XXXX if ((Z & 1) != 0) goto relAddr8; } :CALL absAddr21, s_8 is lop8=0xec & absAddr21 & s_8 { # 1110 110s kkkk kkkk 1111 kkkk kkkk kkkk # 1110 1100 0100 0101 1111 0001 0010 0011 -> CALL SUB_CODE_02468a, 0 push(&:3 inst_next); call absAddr21; } :CALL absAddr21, s_8 is lop8=0xed & absAddr21 & s_8 { # 1110 110s kkkk kkkk 1111 kkkk kkkk kkkk # 1110 1101 0100 0101 1111 0001 0010 0011 -> CALL SUB_CODE_02468a, 1 WS = WREG; STATUSS = STATUS; BSRS = BSR; push(&:3 inst_next); call absAddr21; } :CLRWDT is op16=0x0004 { # 0000 0000 0000 0100 clearWatchDogTimer(); } :DAW is op16=0x0007 { # 0000 0000 0000 0111 tmp:1 = decimalAdjust(WREG); WREG = tmp; setResultFlags(tmp); } :GOTO absAddr21 is lop8=0xef & absAddr21 { # 1110 1111 kkkk kkkk 1111 kkkk kkkk kkkk # 1110 1111 0100 0101 1111 0001 0010 0011 -> GOTO LAB_CODE_02468a goto absAddr21; } :NOP is op16=0x0000 { } :NOP is op4=0x0f { } :POP is op16=0x0006 { # 0000 0000 0000 0110 tmp:4 = 0; pop(tmp); } :PUSH is op16=0x0005 { # 0000 0000 0000 0101 push(&:3 inst_next); } :RCALL relAddr11 is op5=0x1b & relAddr11 { # 1101 1nnn nnnn nnnn # 1101 1001 0000 0000 -> CALL SUB_CODE_XXXX push(&:3 inst_next); call relAddr11; } :RESET is op16=0x00ff { # 0000 0000 1111 1111 reset(); } :RETFIE s_0 is op16=0x0010 & s_0 { # TODO: Set GIE/GIEH and/or PEIE/GIEL # 0000 0000 0001 0000 retAddr:4 = 0; pop(retAddr); return [retAddr]; } :RETFIE s_0 is op16=0x0011 & s_0 { # TODO: Set GIE/GIEH and/or PEIE/GIEL # 0000 0000 0001 0001 WREG = WS; STATUS = STATUSS; BSR = BSRS; retAddr:4 = 0; pop(retAddr); return [retAddr]; } :RETURN s_0 is op16=0x0012 & s_0 { # 0000 0000 0001 0010 retAddr:4 = 0; pop(retAddr); return [retAddr]; } :RETURN s_0 is op16=0x0013 & s_0 { # 0000 0000 0001 0011 WREG = WS; STATUS = STATUSS; BSR = BSRS; retAddr:4 = 0; pop(retAddr); return [retAddr]; } :SLEEP is op16=0x0003 { # 0000 0000 0000 0011 sleep(); } # # LITERAL OPERATIONS # :ADDLW imm8 is op8=0x0f & imm8 { # 0000 1111 kkkk kkkk # 0000 1111 0001 0010 -> ADDLW #0x12 setAddFlags(imm8, WREG); WREG = WREG + imm8; setResultFlags(WREG); } :ANDLW imm8 is op8=0xb & imm8 { # 0000 1011 kkkk kkkk # 0000 1011 0001 0010 -> ANDLW #0x12 WREG = WREG & imm8; setResultFlags(WREG); } :IORLW imm8 is op8=0x9 & imm8 { # 0000 1001 kkkk kkkk # 0000 1001 0001 0010 -> IORLW #0x12 WREG = WREG | imm8; setResultFlags(WREG); } :LFSR FSRn, imm12 is lop10=0x3b8 & fsr<3 & FSRn & imm12 { # 1110 1110 00ff kkkk 1111 0000 kkkk kkkk # 1110 1110 0001 0001 1111 0000 0010 0011 -> LFSR FSR1, 0x123 FSRn = imm12; } :MOVLB imm8 is op8=0x01 & imm8 { # Manual is inconsistent imm4 vs. imm8 # 0000 0001 kkkk kkkk # 0000 0001 0001 0010 -> MOVLB #0x12 BSR = imm8; } :MOVLW imm8 is op8=0x0e & imm8 { # 0000 1110 kkkk kkkk # 0000 1110 0001 0010 -> MOVLW #0x12 WREG = imm8; } :MULLW imm8 is op8=0x0d & imm8 { # 0000 1101 kkkk kkkk # 0000 1101 0001 0010 -> MULLW #0x12 tmp1:2 = zext(imm8); tmp2:2 = zext(WREG); PROD = tmp1 * tmp2; } :RETLW imm8 is op8=0x0c & imm8 { # 0000 1100 kkkk kkkk # 0000 1100 0001 0010 -> RETLW #0x12 WREG = imm8; retAddr:4 = 0; pop(retAddr); return [retAddr]; } :SUBLW imm8 is op8=0x08 & imm8 { # 0000 1000 kkkk kkkk # 0000 1000 0001 0010 -> SUBLW #0x12 setSubtractFlags(imm8, WREG); WREG = imm8 - WREG; setResultFlags(WREG); } :XORLW imm8 is op8=0x0a & imm8 { # 0000 1010 kkkk kkkk # 0000 1010 0001 0010 -> XORLW #0x12 WREG = WREG ^ imm8; setResultFlags(WREG); } # # DATA MEMORY <-> PROGRAM MEMORY OPERATIONS # :TBLRD* is op16=0x0008 { # 0000 0000 0000 1000 ptr:3 = TBLPTR; TABLAT = *[CODE] ptr; } :TBLRD*+ is op16=0x0009 { # 0000 0000 0000 1001 ptr:3 = TBLPTR; TABLAT = *[CODE] ptr; ptr = ptr + 1; TBLPTR = ptr; } :TBLRD*- is op16=0x000a { # 0000 0000 0000 1010 ptr:3 = TBLPTR; TABLAT = *[CODE] ptr; ptr = ptr - 1; TBLPTR = ptr; } :TBLRD+* is op16=0x000b { # 0000 0000 0000 1011 ptr:3 = TBLPTR; ptr = ptr + 1; TBLPTR = ptr; TABLAT = *[CODE] ptr; } :TBLWT* is op16=0x000c { # 0000 0000 0000 1100 ptr:3 = TBLPTR; *[CODE] ptr = TABLAT; } :TBLWT*+ is op16=0x000d { # 0000 0000 0000 1101 ptr:3 = TBLPTR; *[CODE] ptr = TABLAT; TBLPTR = ptr + 1; } :TBLWT*- is op16=0x000e { # 0000 0000 0000 1110 ptr:3 = TBLPTR; *[CODE] ptr = TABLAT; TBLPTR = ptr - 1; } :TBLWT+* is op16=0x000f { # 0000 0000 0000 1111 ptr:3 = TBLPTR; ptr = ptr + 1; TBLPTR = ptr; *[CODE] ptr = TABLAT; } # # EXTENDED INSTRUCTION SET # :ADDFSR xFSRn, imm6 is op8=0xe8 & xfsr<3 & xFSRn & imm6 { # 1110 1000 ffkk kkkk # 1110 1000 1001 0010 -> ADDFSR FSR2, #0x12 xFSRn = xFSRn + zext(imm6); } :ADDULNK imm6 is op8=0xe8 & xfsr=3 & imm6 { # 1110 1000 11kk kkkk # 1110 1000 1101 0010 -> ADDULNK #0x12 retAddr:4 = 0; pop(retAddr); FSR2 = FSR2 + zext(imm6); return [retAddr]; } :CALLW is op16=0x0014 { # 0000 0000 0001 0100 loc:3 = (zext(PCLATU) << 16) | (zext(PCLATH) << 8) | zext(WREG); push(&:3 inst_next); call [loc]; } :MOVSF ZS, destREG32 is lop9=0x1d6 & ZS & qual4=0xf & destREG32 { # 1110 1011 0zzz zzzz 1111 ffff ffff ffff # 1110 1011 0001 0010 1111 0001 0010 0011 -> MOVSF 0x12[FSR2], DAT_DATA_0123 destREG32 = ZS; } :MOVSF ZS, destREG32 is lop9=0x1d6 & ZS & qual4=0xf & destREG32 & fd=0xff9 { # 1110 1011 0zzz zzzz 1111 ffff ffff ffff # 1110 1011 0001 0010 1111 1111 1111 1001 -> MOVSF 0x12[FSR2], PCL addr:3 = (zext(PCLATU) << 16) + (zext(PCLATH) << 8) + zext(ZS); goto [addr]; } :MOVSS ZS, ZD is lop9=0x1d7 & ZS & ZD { # 1110 1011 1sss ssss 1111 xxxx xddd dddd # s: corresponds to zs # d: corresponds to zd # x: appear to be unused bits (don't care) # 1110 1011 1001 0010 1111 1111 1100 0101 -> MOVSS 0x12[FSR2], 0x45[FSR2] ZD = ZS; } :PUSHL imm8 is op8=0xfa & imm8 { # 1111 1010 kkkk kkkk # 1111 1010 0001 0010 -> PUSHL #0x12 local loc = FSR2; *[DATA]:1 loc = imm8; FSR2 = loc - 1; } :SUBFSR xFSRn, imm6 is op8=0xe9 & xfsr<3 & xFSRn & imm6 { # 1110 1001 ffkk kkkk # 1110 1001 0101 0010 -> SUBFSR FSR1, 0x12 xFSRn = xFSRn - zext(imm6); } :SUBULNK imm6 is op8=0xe9 & xfsr=3 & imm6 { # 1110 1001 11kk kkkk # 1110 1001 1101 0010 -> SUBULNK #0x12 retAddr:4 = 0; pop(retAddr); FSR2 = FSR2 - zext(imm6); return [retAddr]; } ================================================ FILE: pypcode/processors/PIC/data/manuals/PIC-12.idx ================================================ @PIC12_40139e.pdf [PIC12C5XX 8-Pin, 8-Bit CMOS Microcontrollers (DS40139E)] ADDWF , 49 ANDLW , 49 ANDWF , 49 BCF , 49 BSF , 50 BTFSC , 50 BTFSS , 50 CALL , 51 CLRF , 51 CLRW , 51 CLRWDT , 51 COMF , 52 DECF , 52 DECFSZ , 52 GOTO , 52 INCF , 53 INCFSZ , 53 IORLW , 53 IORWF , 53 MOVLW , 54 MOVF , 54 MOVWF , 54 NOP , 54 OPTION , 55 RETLW , 55 RLF , 55 RRF , 55 SLEEP , 56 SUBWF , 56 SWAPF , 57 TRIS , 57 XORLW , 57 XORWF , 57 ================================================ FILE: pypcode/processors/PIC/data/manuals/PIC-16.idx ================================================ @PIC16_33023a.pdf [PICmicro� Mid-Range MCU Family Reference Manual, December 1997 (DS33023A)] ADDLW , 530 ADDWF , 531 ANDLW , 532 ANDWF , 533 BCF , 534 BSF , 535 BTFSC , 536 BTFSS , 537 CALL , 538 CLRF , 539 CLRW , 540 CLRWDT , 541 COMF , 542 DECF , 543 DECFSZ , 544 GOTO , 545 INCF , 546 INCFSZ , 547 IORLW , 548 IORWF , 549 MOVLW , 550 MOVF , 551 MOVWF , 552 NOP , 553 OPTION , 554 RETFIE , 555 RETLW , 556 RETURN , 557 RLF , 558 RRF , 559 SLEEP , 560 SUBLW , 561 SUBWF , 562 SWAPF , 563 TRIS , 564 XORLW , 565 XORWF , 566 ================================================ FILE: pypcode/processors/PIC/data/manuals/PIC-16F.idx ================================================ @PIC16F_40001761E.pdf [Microchip PIC16LF1554/1559 (DS40001761E)] ADDFSR , 273 ADDLW , 273 ADDWF , 273 ADDWFC , 273 ANDLW , 273 ANDWF , 273 ASRF , 273 BCF , 274 BRA , 274 BRW , 274 BSF , 274 BTFSC , 274 BTFSS , 274 CALL , 275 CALLW , 275 CLRF , 275 CLRW , 275 CLRWDT , 275 COMF , 275 DECF , 275 DECFSZ , 276 GOTO , 276 INCF , 276 INCFSZ , 276 IORLW , 276 IORWF , 276 LSLF , 277 LSRF , 277 MOVF , 277 MOVIW , 278 MOVLB , 278 MOVLP , 278 MOVLW , 278 MOVWF , 278 MOVWI , 279 NOP , 279 OPTION , 279 RESET , 279 RETFIE , 280 RETLW , 280 RETURN , 280 RLF , 280 RRF , 281 SLEEP , 281 SUBLW , 281 SUBWF , 281 SUBWFB , 281 SWAPF , 282 TRIS , 282 XORLW , 282 XORWF , 282 ================================================ FILE: pypcode/processors/PIC/data/manuals/PIC-17.idx ================================================ @PIC17_30289b.pdf [High-Performance 8-bit CMOS EPROM Microcontrollers with 10-bit A/D, 2000 (DS30289B)] ADDLW , 202 ADDWF , 202 ADDWFC , 203 ANDLW , 203 ANDWF , 204 BCF , 204 BSF , 205 BTFSC , 205 BTFSS , 206 BTG , 206 CALL , 207 CLRF , 207 CLRWDT , 208 COMF , 208 CPFSEQ , 209 CPFSGT , 209 CPFSLT , 210 DAW , 210 DECF , 211 DECFSZ , 211 DCFSNZ , 212 GOTO , 212 INCF , 213 INCFSZ , 213 INFSNZ , 214 IORLW , 214 IORWF , 215 LCALL , 215 MOVFP , 216 MOVLB , 216 MOVLR , 217 MOVLW , 217 MOVPF , 218 MOVWF , 218 MULLW , 219 MULWF , 219 NEGW , 220 NOP , 220 RETFIE , 221 RETLW , 221 RETURN , 222 RLCF , 222 RLNCF , 223 RRCF , 223 RRNCF , 224 SETF , 224 SLEEP , 225 SUBLW , 225 SUBWF , 226 SUBWFB , 226 SWAPF , 227 TABLRD , 227 TABLWT , 228 TLRD , 229 TLWT , 230 TSTFSZ , 230 XORLW , 231 XORWF , 231 ================================================ FILE: pypcode/processors/PIC/data/manuals/PIC-18.idx ================================================ @PIC18_14702.pdf [PIC18CXX2 High-Performance Microcontrollers with 10-Bit A/D, 7/99 (DS39026B)] ADDLW , 197 ADDWF , 197 ADDWFC , 198 ANDLW , 198 ANDWF , 199 BC , 199 BCF , 200 BN , 200 BNC , 201 BNN , 201 BNOV , 202 BNZ , 202 BRA , 203 BSF , 203 BTFSC , 204 BTFSS , 204 BTG , 205 BOV , 205 BZ , 206 CALL , 206 CLRF , 207 CLRWDT , 207 COMF , 208 CPFSEQ , 208 CPFSGT , 209 CPFSLT , 209 DAW , 210 DECF , 210 DECFSZ , 211 DCFSNZ , 211 GOTO , 212 INCF , 212 INCFSZ , 213 INFSNZ , 213 IORLW , 214 IORWF , 214 LFSR , 215 MOVF , 215 MOVFF , 216 MOVLB , 216 MOVLW , 217 MOVWF , 217 MULLW , 218 MULWF , 218 NEGF , 219 NOP , 219 POP , 220 PUSH , 220 RCALL , 221 RESET , 221 RETFIE , 222 RETLW , 222 RETURN , 223 RLCF , 223 RLNCF , 224 RRCF , 224 RRNCF , 225 SETF , 225 SLEEP , 226 SUBWFB , 226 SUBLW , 227 SUBWF , 228 SUBWFB , 229 SWAPF , 230 TBLRD , 231 TBLWT , 232 TSTFSZ , 233 XORLW , 233 XORWF , 234 ================================================ FILE: pypcode/processors/PIC/data/manuals/PIC24.idx ================================================ @70000157g.pdf[16-bit MCU and DSC Programmer's Reference Manual - DS70000157G] add, 102 addc, 110 and, 116 asr, 121 bclr, 127 bfext, 130 bfins, 132 bootswp, 135 bra, 136 bset, 160 bsw, 163 btg, 165 btsc, 168 btss, 172 btst, 175 btsts, 180 call, 183 call.l, 191 clr, 192 clrwdt, 196 com, 197 cp, 200 cp0, 204 cpb, 206 cpbeq, 211 cpbgt, 212 cpblt, 213 cpbne, 214 cpseq, 215 cpsgt, 217 cpslt, 219 cpsne, 221 ctxswp, 223 daw.b, 225 dec, 226 dec.2, 229 disi, 232 div.s, 233 div.u, 235 divf, 236 divf2, 238 div2.s, 240 div2.u, 241 do, 242 ed, 250 edac, 252 exch, 254 fbcl, 255 ff1l, 257 ff1r, 259 flim, 261 flim.v, 262 goto, 263 goto.l, 266 inc, 267 inc2, 269 ior, 271 lac, 276 lac.d, 278 ldslv, 279 lnk, 280 lsr, 282 mac, 288 max, 292 max.v, 293 min, 294 min.v, 295 minz, 296 minz.v, 297 mov, 299 mov.b, 303 mov.d, 309 movpag, 311 movsac, 313 mpy, 315 mpy.n, 319 msc, 321 mul, 323 mul.ss, 325 mul.su, 328 mul.us, 333 mul.uu, 336 mulw.ss, 341 mulw.su, 343 mulw.us, 346 mulw.uu, 348 neg, 350 nop, 354 nopr, 355 norm, 356 pop, 357 pop.d, 359 pop.s, 360 push, 361 push.d, 364 push.s, 365 pwrsav, 366 rcall, 367 repeat, 375 reset, 379 retfie, 380 retlw, 382 return, 386 rlc, 388 rlnc, 391 rrc, 394 rrnc, 398 sac, 401 sac.d, 403 sac.r, 404 se, 406 setm, 408 sftac, 410 sl, 412 sub, 418 subb, 424 subbr, 430 swap, 439 tblrdh, 440 tblrdl, 442 tblwth, 444 tblwtl, 446 ulnk, 448 vfslv, 450 xor, 451 ze, 456 ================================================ FILE: pypcode/processors/PowerPC/data/languages/4xx.sinc ================================================ #dcread r0,0,r0 0x7c 00 03 cc :dcread S,RA_OR_ZERO,B is OP=31 & S & B & (XOP_1_10=486 | XOP_1_10=326) & BIT_0=0 & RA_OR_ZERO { # ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; S = dataCacheRead(RA_OR_ZERO,B); } # ======================================================================== # PowerISA II: Chapter 10. Legacy Move Assist Instruction [Category: Legacy Move Assist] # CMT: Determine Leftmost Zero Byte # FORM: X-form # binutils: 476.d: 1a4: 7c 83 28 9c dlmzb r3,r4,r5 # binutils: titan.d: 158: 7c 22 00 9c dlmzb r2,r1,r0 define pcodeop DetermineLeftmostZeroByte; :dlmzb S,A,B is OP=31 & S & A & B & XOP_1_10=78 & Rc=0 { # search from left for the first occurrence of null byte # low 32 bits of RS concatenated with low 32 bits of RB @ifdef BIT_64 tmpD:8 = zext( S:4 ); @else tmpD:8 = zext( S ); @endif tmpD = tmpD << 32; @ifdef BIT_64 tmpD = tmpD | zext( B:4 ); @else tmpD = tmpD | zext( B ); @endif tmpX:8 = 0; if ( tmpX == 8 ) goto ; tmpX = tmpX + 1; if ( ( ( tmpD << ( (tmpX-1) * 8 ) ) & 0xFF00000000000000 ) != 0 ) goto ; # place byte number in register A and low 7 bits of XER @ifdef BIT_64 A = tmpX; XER = ( XER & 0xFFFFFFFFFFFFFF80 ) | tmpX; @else A = tmpX:4; XER = ( XER & 0xFFFFFF80 ) | tmpX:4; @endif } # PowerISA II: Chapter 10. Legacy Move Assist Instruction [Category: Legacy Move Assist] # CMT: Determine Leftmost Zero Byte # FORM: X-form # binutils: 476.d: 1a8: 7c 83 28 9d dlmzb\. r3,r4,r5 # binutils: titan.d: 15c: 7c 22 00 9d dlmzb\. r2,r1,r0 define pcodeop DetermineLeftmostZeroByte1; :dlmzb. S,A,B is OP=31 & S & A & B & XOP_1_10=78 & Rc=1 { # search from left for the first occurrence of null byte # low 32 bits of RS concatenated with low 32 bits of RB @ifdef BIT_64 tmpD:8 = zext( S:4 ); @else tmpD:8 = zext( S ); @endif tmpD = tmpD << 32; @ifdef BIT_64 tmpD = tmpD | zext( B:4 ); @else tmpD = tmpD | zext( B ); @endif tmpX:8 = 0; tmpY:8 = 0; if ( tmpX == 8 ) goto ; tmpX = tmpX + 1; if ( ( ( tmpD << ( (tmpX - 1) * 8 ) ) & 0xFF00000000000000 ) != 0 ) goto ; # matched tmpY = 1; # place byte number in register A and low 7 bits of XER @ifdef BIT_64 A = tmpX; XER = ( XER & 0xFFFFFFFFFFFFFF80 ) | tmpX; @else A = tmpX:4; XER = ( XER & 0xFFFFFF80 ) | tmpX:4; @endif # Rc section # Set bit 35 of CR to SO cr0 = (cr0 & 0xe) | zext( xer_so & 1); # Set bits 32:34 of CR if ( tmpY != 1 ) goto ; if ( tmpX >= 5 ) goto ; cr0 = ( cr0 & 0x1 ) | 4; goto ; cr0 = ( cr0 & 0x1 ) | 8; goto ; cr0 = ( cr0 & 0x1 ) | 2; } #icread 0,r0 0x7c 00 07 cc :icread RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=998 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; instructionCacheRead(ea); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/FPRC.sinc ================================================ period: "" is Rc=0 { setSummaryFPSCR(); } period: "." is Rc=1 { setSummaryFPSCR(); cr1flags(); } # Floating Convert To Integer Doubleword Unsigned :fctidu^period fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=942 & period { # src is rounded to integer fT = trunc(round(fB)); # if src is Nan, result is 0 and VXSNAN is set to 1 fT = fT * zext(nan(fB) == 0); fp_vxsnan = fp_vxsnan | nan(fB); # if src > 2^64 - 1, result is 0xffff_ffff_ffff_ffff and VXCVI is set to 1 bigi:16 = 0xffffffffffffffff; bigf:8 = int2float(bigi); fT = fT - (0xffffffffffffffff + fT) * zext(fB f> bigf); fp_vxcvi = fp_vxcvi | (fB f> bigf); # if rounded value < 0, result is 0 and VXCVI is set to 1 fp_vxcvi = fp_vxcvi | (fT s< 0); fT = fT * zext(fT s> 0); build period; } # Floating Convert To Integer Doubleword Unsigned with round toward Zero :fctiduz^period fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=943 & period { # src is rounded to integer fT = trunc(fB); # if src is Nan, result is 0 and VXSNAN is set to 1 fT = fT * zext(nan(fB) == 0); fp_vxsnan = fp_vxsnan | nan(fB); # if src > 2^64 - 1, result is 0xffff_ffff_ffff_ffff and VXCVI is set to 1 bigi:16 = 0xffffffffffffffff; bigf:8 = int2float(bigi); fT = fT - (0xffffffffffffffff + fT) * zext(fB f> bigf); fp_vxcvi = fp_vxcvi | (fB f> bigf); # if rounded value < 0, result is 0 and VXCVI is set to 1 fp_vxcvi = fp_vxcvi | (fT s< 0); fT = fT * zext(fT s> 0); build period; } # Floating Convert To Integer Word Unsigned :fctiwu^period fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=142 & period { # src is rounded to integer fT = trunc(round(fB)); # if src is NaN then result is 0 and VXSNAN is set to 1 fT = fT * zext(nan(fB) == 0); fp_vxsnan = fp_vxsnan | nan(fB); # if src > 2^32 - 1, result is 0xffff_ffff and VXCVI is set to 1 bigi:16 = 0xffffffff; bigf:8 = int2float(bigi); fT = fT - (0xffffffff + fT) * zext(fB f> bigf); fp_vxcvi = fp_vxcvi | (fB f> bigf); # if rounded value < 0, result is 0 and VXCVI is set to 1 fp_vxcvi = fp_vxcvi | (fT s< 0); fT = fT * zext(fT s> 0); build period; } # Floating Convert To Integer Word Unsigned with round toward Zero :fctiwuz^period fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=143 & period { # src is rounded to integer fT = trunc(fB); # if src is NaN then result is 0 and VXNAN is set to 1 fT = fT * zext(nan(fB) == 0); fp_vxsnan = fp_vxsnan | nan(fB); # if src > 2^32 - 1, result is 0xffff_ffff and VXCVI is set to 1 bigi:16 = 0xffffffff; bigf:8 = int2float(bigi); fT = fT - (0xffffffff + fT) * zext(fB f> bigf); fp_vxcvi = fp_vxcvi | (fB f> bigf); # if rounded value < 0, result is 0 and VXCVI is set to 1 fp_vxcvi = fp_vxcvi | (fT s< 0); fT = fT * zext(fT s> 0); build period; } # Floating Convert From Integer Doubleword Unsigned X-form :fcfidu^period fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=974 & period { # convert source to unsigned int by extension local tmpI:8 = zext(fB); # src is converted to floating point fT = int2float(tmpI); # FPSCR is class and sign of result setFPRF(fT); build period; } # Floating Convert From Integer Doubleword Single X-form :fcfids^period fT,fB is $(NOTVLE) & OP=59 & fT & BITS_16_20=0 & fB & XOP_1_10=846 & period { # src is converted to single-precision floating point local tmpF:4 = int2float(fB); # convert the result to double-precision fT = float2float(tmpF); # FPSCR is class and sign of result setFPRF(fT); build period; } # fcfidus fT,fB # Floating Convert From Integer Doubleword Unsigned Single :fcfidus^period fT,fB is $(NOTVLE) & OP=59 & fT & BITS_16_20=0 & fB & XOP_1_10=974 & period { # convert source to unsigned int by extension local tmpI:8 = zext(fB); # src is converted to single-precision floating point local tmpF:4 = int2float(tmpI); # src is converted to double-precision fT = float2float(tmpF); # FPSCR is class and sign of result setFPRF(fT); build period; } # Floating Test for software Divide :ftdiv CRFD,fA,fB is $(NOTVLE) & OP=63 & CRFD & BITS_21_22=0 & fA & fB & XOP_1_10=128 & BIT_0=0 { zero:8 = int2float(0:1); # fe if fA or fB is Nan or infinity, or if fB is 0 # and other conditions on the exponents fe_flag:1 = nan(fA) | nan(fB) | (fB f== zero); # fg if fA or fB are infinite, or fB is NaN or denomrmalized or zero fg_flag:1 = nan(fB) | (fB f== zero); CRFD = (fg_flag << 2) | (fe_flag << 1); } # Floating Test for software Square Root :ftsqrt CRFD,fB is $(NOTVLE) & OP=63 & CRFD & BITS_21_22=0 & BITS_16_20=0 & fB & XOP_1_10=160 & BIT_0=0 { zero:8 = int2float(0:1); # fe if fB is zero, NAN, infinity, or negative fe_flag:1 = nan(fB) | (fB f< zero); # fg if fB is zero, infinity, or denormalized fg_flag:1 = nan(fB) | (fB f== zero); CRFD = (fg_flag << 2) | (fe_flag << 1); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/PowerPC.opinion ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/SPEF_SCR.sinc ================================================ # Based on "PowerISA Version 2.06 Revision B" document dated July 23, 2010 # Category: SPE.Embedded Float Vector Instructions # version 1.0 define register offset=0x600 size=1 [ spef_sovh spef_ovh spef_fgh spef_fxh spef_finvh spef_fdbzh spef_funfh spef_fovfh spef_reserved1 spef_reserved2 spef_finxs spef_finvs spef_fdbzs spef_funfs spef_fovfs spef_reserved3 spef_sov spef_ov spef_fg spef_fx spef_finv spef_fdbz spef_funf spef_fovf spef_reserved4 spef_finxe spef_finve spef_fdbze spef_funfe spef_fovfe spef_frmc0 spef_frmc1 ]; macro setSPEFSCR_L(result) { spef_finv = nan(result); spef_finvs = spef_finvs | spef_finv; } macro setSPEFSCR_H(result) { spef_finvh = nan(result); spef_finvs = spef_finvs | spef_finvh; } macro setSummarySPEFSCR() { spef_sov = spef_sov | spef_ov; spef_sovh = spef_sovh | spef_ovh; spef_finxs = spef_finxs | spef_fx | spef_fxh; spef_finvs = spef_finvs | spef_finv | spef_finvh; spef_fdbzs = spef_fdbzs | spef_fdbz | spef_fdbzh; spef_funfs = spef_funfs | spef_funf | spef_funfh; spef_fovfs = spef_fovfs | spef_fovf | spef_fovfh; } macro setSPEFSCRAddFlags_L(op1, op2, result) { setSPEFSCR_L(result); spef_fx = spef_fx | nan(op1) | nan(op2); spef_finv = spef_fx; setSummarySPEFSCR(); } macro setSPEFSCRAddFlags_H(op1, op2, result) { setSPEFSCR_H(result); spef_fxh = spef_fxh | nan(op1) | nan(op2); spef_finvh = spef_fxh; setSummarySPEFSCR(); } macro setSPEFSCRDivFlags_L(op1, op2, result) { setSPEFSCR_L(result); spef_fdbz = spef_fdbz | (op2 f== 0); spef_fx = spef_fx | nan(op1) | nan(op2); spef_finv = spef_fx; setSummarySPEFSCR(); } macro setSPEFSCRDivFlags_H(op1, op2, result) { setSPEFSCR_H(result); spef_fdbzh = spef_fdbzh | (op2 f== 0); spef_fxh = spef_fxh | nan(op1) | nan(op2); spef_finvh = spef_fxh; setSummarySPEFSCR(); } macro setSPEFSCRMulFlags_L(op1, op2, result) { setSPEFSCR_L(result); spef_fx = spef_fx | nan(op1) | nan(op2); spef_finv = spef_fx; setSummarySPEFSCR(); } macro setSPEFSCRMulFlags_H(op1, op2, result) { setSPEFSCR_H(result); spef_fxh = spef_fxh | nan(op1) | nan(op2); spef_finvh = spef_fxh; setSummarySPEFSCR(); } macro setSPEFSCRSubFlags_L(op1, op2, result) { setSPEFSCR_L(result); spef_fx = spef_fx | nan(op1) | nan(op2); spef_finv = spef_fx; setSummarySPEFSCR(); } macro setSPEFSCRSubFlags_H(op1, op2, result) { setSPEFSCR_H(result); spef_fxh = spef_fxh | nan(op1) | nan(op2); spef_finvh = spef_fxh; setSummarySPEFSCR(); } macro packSPEFSCR(tmp) { packbits(tmp, spef_sovh, spef_ovh, spef_fgh, spef_fxh, spef_finvh, spef_fdbzh, spef_funfh, spef_fovfh, spef_reserved1, spef_reserved2, spef_finxs, spef_finvs, spef_fdbzs, spef_funfs, spef_fovfs, spef_reserved3, spef_sov, spef_ov, spef_fg, spef_fx, spef_finv, spef_fdbz, spef_funf, spef_fovf, spef_reserved4, spef_finxe, spef_finve, spef_fdbze, spef_funfe, spef_fovfe, spef_frmc0, spef_frmc1 ); } macro unpackSPEFSCR(tmp) { unpackbits(tmp, spef_sovh, spef_ovh, spef_fgh, spef_fxh, spef_finvh, spef_fdbzh, spef_funfh, spef_fovfh, spef_reserved1, spef_reserved2, spef_finxs, spef_finvs, spef_fdbzs, spef_funfs, spef_fovfs, spef_reserved3, spef_sov, spef_ov, spef_fg, spef_fx, spef_finv, spef_fdbz, spef_funf, spef_fovf, spef_reserved4, spef_finxe, spef_finve, spef_fdbze, spef_funfe, spef_fovfe, spef_frmc0, spef_frmc1 ); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/SPE_APU.sinc ================================================ # Based on "EREF: A Reference for Motorola Book E and e500 Core" document version 01/2004 Rev2 # Instructions that are specific to the (PowerPC) e500 core are implemented as auxiliary processing units (APUs) # Signal Processing Engine APU (SPE APU) @ifdef BIT_64 @define MEMMASK "0xFFFFFFFFFFFFFFFF" @else @define MEMMASK "0xFFFFFFFF" @endif # There are three versions of e500 core, namely e500v1, the e500v2, and the e500mc. # A 64-bit evolution of the e500mc core is called e5500 core. # All PowerQUICC 85xx devices are based on e500v1 or e500v2 cores. # The SPE, and embedded SPFP functionality is implemented in # the MPC8540, the MPC8560 and in their derivatives (that is, in # all PowerQUICC III devices). However, these instructions will # not be supported in devices subsequent to PowerQUICC III. # version 1.0 # SPEFSCR.OVH Integer Overflow High bit 33 # SPEFSCR.OV Integer Overflow bit 49 # SPEFSCR.SOVH Summary Integer Overflow High bit 32 # SPEFSCR.SOV Summary Integer Overflow bit 48 # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh # SPEFSCR.SOV = SPEFSCR.SOV | ovl # The SPE requires a GPR register file with thirty-two # 64-bit registers. For 32-bit implementations, instructions # that normally operate on a 32-bit register file # access and change only the least significant 32-bits of # the GPRs leaving the most significant 32-bits # unchanged. For 64-bit implementations, operation of # these instructions is unchanged, i.e. those instructions # continue to operate on the 64-bit registers as they # would if the SPE was not implemented. Most SPE # instructions view the 64-bit register as being composed # of a vector of two elements, each of which is 32 bits # wide (some instructions read or write 16-bit elements). # The most significant 32-bits are called the upper word, # high word or even word. The least significant 32-bits # are called the lower word, low word or odd word. # Unless otherwise specified, SPE instructions write all # 64-bits of the destination register. # Key to some symbols used in descriptions # RT.l => low part of RT 0-31 bits # RT.h => high part of RT 32-63 bits # RT.t => total RT 0-63 bits # temp.b31 => bit 31 of temp # temp.B1 => byte 1 of temp # temp.S0 => first 2 bytes # ABS() # EXTZ() => Result of extending x on the left with sign bits # SATURATE() => # ONESCOMP() => one's complement # CR.bsub(..:..)=> bit range # >u => unsigned greaterthan # EQUIV => Equivalence logical operators = (a ^ (ONESCOMP(B))) # *si Signed-integer multiplication # *ui Unsigned-integer multiplication # *gsf # Guarded signed fractional multiplication. # Result of multiplying 2 signed fractional # quantities having bit length 16 taking the # least significant 31 bits of the sign # extended product and concatenating a 0 # to the least significant bit forming a # guarded signed fractional result of 64 bits. # Since guarded signed fractional multiplication # produces a 64-bit result, fractional # input quantities of -1 and -1 can produce # +1 in the intermediate product. Two 16-bit # fractional quantities, a and b are multiplied, # as shown below: # ea0:31 = EXTS(a) # eb0:31 = EXTS(b) # prod0:63 = ea X eb # eprod0:63 = EXTS(prod32:63) # result0:63 = eprod1:63 || 0b0 define pcodeop GuardedSignedFractionalMultiplication; # *sf # Signed fractional multiplication. Result of # multiplying 2 signed fractional quantities # having bit length n taking the least significant # 2n-1 bits of the sign extended product # and concatenating a 0 to the least significant # bit forming a signed fractional result # of 2n bits. Two 16-bit signed fractional # quantities, a and b are multiplied, as # shown below: # ea0:31 = EXTS(a) # eb0:31 = EXTS(b) # prod0:63 = ea X eb # prod0:63 = EXTS(prod32:63) # result0:31 = eprod33:63 || 0b0 define pcodeop SignedFractionalMultiplication; # ================================================================== # ======================================================================= # Page D-10 # evabs RT,RA # ISA-cmt: Vector Absolute Value # evabs rD,rA 010 0000 1000 SPE_APU_Vector_Instructions :evabs D,A is OP=4 & D & A & XOP_0_10=0x208 & BITS_11_15=0 { # RT.l = ABS(RA.l); # RT.h = ABS(RA.h); temp:8 = zext(A); lo:8 = (( temp & (0x00000000FFFFFFFF) ) ); lo = zext( ((lo:4 + (lo:4 >> 32)) ^ (lo:4 >> 32)) ); hi:8 = (( temp & (0xFFFFFFFF00000000) ) >> 32); hi = zext( ((hi:4 + (hi:4 >> 32)) ^ (hi:4 >> 32)) ); D = (( zext(hi) << 32) | zext(lo) ); } # evaddiw RT,RB,UI # ISA-cmt: Vector Add Immediate Word # evaddiw rD,BU_UIMM,rB 010 0000 0010 SPE_APU_Vector_Instructions :evaddiw D,B,BU_UIMM is OP=4 & D & BU_UIMM & B & XOP_0_10=0x202 { # RT.l = RB.l + EXTZ(UI); # RT.h = RB.h + EXTZ(UI); tmp:8 = BU_UIMM; lo:8 = (( B & (0x00000000FFFFFFFF) ) ) + (tmp & 0xFFFF); hi:8 = (( B & (0xFFFFFFFF00000000) ) >> 32) + (tmp & 0xFFFF); D = (( zext(hi) << 32) | zext(lo) ); } # evaddsmiaaw RT,RA # ISA-cmt: Vector Add Signed, Modulo, Integer to Accumulator Word # evaddsmiaaw rD,rA 100 1100 1001 SPE_APU_Vector_Instructions :evaddsmiaaw D,A is OP=4 & D & A & XOP_0_10=0x4C9 & BITS_11_15=0 { # RT.l = ACC.l + RA.l; # RT.h = ACC.h + RA.h; # ACC.t = RT.t; lo:8 = (( ACC & (0x00000000FFFFFFFF) ) ) + (( A & (0x00000000FFFFFFFF) ) ); hi:8 = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + (( A & (0xFFFFFFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # macro SATURATE(ov,carry, sat_ovn, sat_ov, val) { # sat = (ov * carray)*sat_ovn + (ov * !carray)*sat_ov + (! ov) * val; # } # evaddssiaaw RT,RA # ISA-cmt: Vector Add Signed, Saturate, Integer to Accumulator Word # evaddssiaaw rD,rA 100 1100 0001 SPE_APU_Vector_Instructions define pcodeop VectorAddSignedSaturateIntgerToAccumulatorWord1; define pcodeop VectorAddSignedSaturateIntgerToAccumulatorWord2; :evaddssiaaw D,A is OP=4 & D & A & XOP_0_10=0x4C1 & BITS_11_15=0 { # TODO definition complicated SATURATE() # temp.t = EXTS(ACC.l) + EXTS(RA.l); # ovh = temp.b31 ^ temp.b32; # RT.l = SATURATE(ovh, temp.b31, 0x8000_0000, 0x7FFF_FFFF, temp.h); # temp.t = EXTS(ACC.h) + EXTS(RA.h); # ovl = temp.31 ^ temp.32; # RT.h = SATURATE(ovl, temp.b31, 0x8000_0000, 0x7FFF_FFFF, temp.h); # ACC.t = RT.t; # SPEFSCR.ovh = ovh; # SPEFSCR.ov = ov; # SPEFSCR.sovh = SPEFSCR.sovh | ovh; # SPEFSCR.sov = SPEFSCR.sov | ovh; # temp:8 = sext( extrBytes(ACC,8,4,0) ) + sext( extrBytes(A,8,4,0) ); # ovh = getBits(temp,31,31,8) ^ getBits(temp,32,32,8); # SATURATE(ovh, getBits(temp,31,31,8), 0x80000000,0x7FFFFFFF, temp); # lo = sat; D = VectorAddSignedSaturateIntgerToAccumulatorWord1(ACC, A); spr200 = VectorAddSignedSaturateIntgerToAccumulatorWord2(ACC, A); } # evaddumiaaw RT,RA # ISA-cmt: Vector Add Unsigned, Modulo, Integer to Accumulator Word # evaddumiaaw rD,rA 100 1100 1000 SPE_APU_Vector_Instructions :evaddumiaaw D,A is OP=4 & D & A & XOP_0_10=0x4C8 & BITS_11_15=0 { # RT.l = ACC.l + RA.l; # RT.h = ACC.h + RA.h; # ACC.t = RT.t; lo:8 = (( ACC & (0x00000000FFFFFFFF) ) ) + (( A & (0x00000000FFFFFFFF) ) ); hi:8 = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + (( A & (0xFFFFFFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evaddusiaaw RT,RA # ISA-cmt: Vector Add Unsigned, Saturate, Integer to Accumulator Word # evaddusiaaw rD,rA 100 1100 0000 define pcodeop VectorAddUnsignedSaturateIntegerToAccumulatorWord1; define pcodeop VectorAddUnsignedSaturateIntegerToAccumulatorWord2; :evaddusiaaw D,A is OP=4 & D & A & XOP_0_10=0x4C0 & BITS_11_15=0 { # TODO definition complicated SATURATE() # temp.t = EXTZ(ACC.l) + EXTZ(RA.l); # ovh = temp.b31; # RT.l = SATURATE(ovh, temp.31, 0xFFFF_FFFF, 0xFFFF_FFFF, temp.h); # ovl = temp.b31 # RT.h = SATURATE(ovl, temp.31, 0xFFFF_FFFF, 0xFFFF_FFFF, temp.h); # ACC.t = RT.t; # SPEFSCR.ovh = ovh; # SPEFSCR.ov = SPESCR.sovh | ovh; # SPEFSCR.sovh = SPESCR.sov | ovl; D = VectorAddUnsignedSaturateIntegerToAccumulatorWord1(ACC, A); spr200 = VectorAddUnsignedSaturateIntegerToAccumulatorWord2(ACC, A); } # evaddw RT,RA,RB # ISA-cmt: Vector Add Word # evaddw rD,rA,rB 010 0000 0000 :evaddw D,A,B is OP=4 & D & A & B & XOP_0_10=0x200 { # RT.l = RA.l + RB.l; # RT.h = RA.h + RB.h; lo:8 = (( A & (0x00000000FFFFFFFF) ) ) + (( B & (0x00000000FFFFFFFF) ) ); hi:8 = (( A & (0xFFFFFFFF00000000) ) >> 32) + (( B & (0xFFFFFFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evand RT,RA,RB # ISA-cmt: Vector AND # evand rD,rA,rB 010 0001 0001 :evand D,A,B is OP=4 & D & A & B& XOP_0_10=0x211 { # RT.l = RA.l & RB.l; # RT.h = RA.h & RB.h; lo:8 = (( A & (0x00000000FFFFFFFF) ) ) & (( B & (0x00000000FFFFFFFF) ) ); hi:8 = (( A & (0xFFFFFFFF00000000) ) >> 32) & (( B & (0xFFFFFFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evandc RT,RA,RB # ISA-cmt: Vector AND with Complement # evandc rD,rA,rB 010 0001 0010 :evandc D,A,B is OP=4 & D & A & B & XOP_0_10=0x212 { # RT.l = RA.l & (ONESCOMP(RB.l)); # RT.h = RA.h & (ONESCOMP(RB.h)); lo:8 = (( A & (0x00000000FFFFFFFF) ) ) & (~ (( B & (0x00000000FFFFFFFF) ) )); hi:8 = (( A & (0xFFFFFFFF00000000) ) >> 32) & (~ (( B & (0xFFFFFFFF00000000) ) >> 32)); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evcmpeq BF,RA,RB # ISA-cmt: Vector Compare Equal # evcmpeq crfD,rA,rB 010 0011 0100 :evcmpeq crfD,A,B is OP=4 & crfD & A & B & XOP_0_10=0x234 & BITS_21_22=0 { # ah = RA.l # al = RA.h # bh = RB.l # bl = RB.h # if (ah == bh) { # ch = 1; # } else { # ch = 0; # } # if (al == bl) { # cl = 1; # } else { # cl = 0; # } # CR.bsub(4xBF+32:4xBF+35) = ch || cl || (ch | cl) || (ch & cl); lo:$(REGISTER_SIZE) = (A & 0x00000000FFFFFFFF); hi:$(REGISTER_SIZE) = ((A & 0xFFFFFFFF00000000) >> 32); b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); if (hi == b_hi) goto ; ch:1 = 0; ch = 1; if (lo == b_lo) goto ; cl:1 = 0; ch = 1; crfD = (ch | (cl < 1) | ((ch|cl) < 2) | ((ch&cl) < 3)); } # evcmpgts BF,RA,RB # ISA-cmt: Vector Compare Greater Than Signed # evcmpgts crfD,rA,rB 010 0011 0001 :evcmpgts crfD,A,B is OP=4 & crfD & A & B & XOP_0_10=0x231 & BITS_21_22=0 { # ah = RA.l; # al = RA.h; # bh = RB.l; # bl = RB.h; # if (ah > bh) { # ch = 1; # } else { # ch = 0; # } # if (al > bl) { # cl = 1; # } else { # ch = 0; # } # CR.bsub(4xBF+32:4xBF+35) = ch || cl || (ch | cl) || (ch & cl); lo:$(REGISTER_SIZE) = (A & 0x00000000FFFFFFFF); hi:$(REGISTER_SIZE) = ((A & 0xFFFFFFFF00000000) >> 32); b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); if (hi s> b_hi) goto ; ch:1 = 0; ch = 1; if (lo s> b_lo) goto ; cl:1 = 0; cl = 1; crfD = (ch | (cl < 1) | ((ch|cl) < 2) | ((ch&cl) < 3)); } # evcmpgtu BF,RA,RB # ISA-cmt: Vector Compare Greater Than Unsigned # evcmpgtu crfD,rA,rB 010 0011 0000 :evcmpgtu crfD,A,B is OP=4 & crfD & A & B & XOP_0_10=0x230 & BITS_21_22=0 { # ah = RA.l; # al = RA.h; # bh = RB.l; # bl = RB.h; # if (ah >u bh) { # ch = 1; # } else { # ch = 0; # } # if (al >u bl) { # cl = 1; # } else { # cl = 0; # } # CR.bsub(4xBF+32:4xBF+35) = ch || cl || (ch | cl) || (ch & cl); lo:$(REGISTER_SIZE) = (A & 0x00000000FFFFFFFF); hi:$(REGISTER_SIZE) = ((A & 0xFFFFFFFF00000000) >> 32); b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); if (hi > b_hi) goto ; ch:1 = 0; ch = 1; if (lo > b_lo) goto ; cl:1 = 0; cl = 1; crfD = (ch | (cl < 1) | ((ch|cl) < 2) | ((ch&cl) < 3)); } # evcmplts BF,RA,RB # ISA-cmt: Vector Compare Less Than Signed # evcmplts crfD,rA,rB 010 0011 0011 :evcmplts crfD,A,B is OP=4 & crfD & A & B & XOP_0_10=0x233 & BITS_21_22=0 { # ah = RA.l; # al = RA.h; # bh = RB.l; # bl = RB.h; # if (ah < bh) { # ch = 1; # } else { # ch = 0; # } # if (al < bl) { # cl = 1; # } else { # cl = 0; # } # CR.bsub(4xBF+32:4xBF+35) = ch || ch || (ch | cl) || (ch & cl); lo:$(REGISTER_SIZE) = (A & 0x00000000FFFFFFFF); hi:$(REGISTER_SIZE) = ((A & 0xFFFFFFFF00000000) >> 32); b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); if (hi s< b_hi) goto ; ch:1 = 0; ch = 1; if (lo s< b_lo) goto ; cl:1 = 0; cl = 1; crfD = (ch | (cl < 1) | ((ch|cl) < 2) | ((ch&cl) < 3)); } # evcmpltu BF,RA,RB # ISA-cmt: Vector Compare Less Than Unsigned # evcmpltu crfD,rA,rB 010 0011 0010 :evcmpltu crfD,A,B is OP=4 & crfD & A & B & XOP_0_10=0x232 & BITS_21_22=0 { # ah = RA.l; # al = RA.h; # bh = RB.l; # bl = RB.h; # if (ah > 32); b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); if (hi < b_hi) goto ; ch:1 = 0; ch = 1; if (lo < b_lo) goto ; cl:1 = 0; cl = 1; crfD = (ch | (cl < 1) | ((ch|cl) < 2) | ((ch&cl) < 3)); } # evcntlsw RT,RA # ISA-cmt: Vector Count Leading Signed Bits Word # evcntlsw rD,rA 010 0000 1110 define pcodeop VectorCountLeadingSignBitsWord; :evcntlsw D,A is OP=4 & D & A & XOP_0_10=0x20E & BITS_11_15=0 { # TODO definition complicated # n = 0; # s = RA.b(n); # do while (n < 32) { # if (RA.b(n) != s) { # leave; # } else { # n = n + 1; # } # RT.l = n; # n = 0; # s = RA.b(n+32); # do while (n < 32) { # if (RA.b(n+32) != s) { # leave; # } # n = n + 1; # } # RT.h = n; # } D = VectorCountLeadingSignBitsWord(A); } # evcntlzw RT,RA # ISA-cmt: Vector Count Leading Zeros Word # evcntlzw rD,rA 010 0000 1101 define pcodeop VectorCountLeadingZerosWord; :evcntlzw D,A is OP=4 & D & A & XOP_0_10=0x20D & BITS_11_15=0 { # TODO definition # n = 0; # do while (n < 32) { # if (RA.b(n) = 1) { # leave; # } else { # n = n + 1; # } # } # RT.l = n; # n = 0; # do while (n < 32) { # if (RA.b(n+32) == 1) { # leave; # } else { # n = n + 1; # } # } # RT.h = n; D = VectorCountLeadingZerosWord(A); } # evdivws RT,RA,RB # ISA-cmt: Vector Divide Word Signed # evdivws rD,rA,rB 100 1100 0110 define pcodeop VectorDivideWordSigned1; define pcodeop VectorDivideWordSigned2; :evdivws D,A,B is OP=4 & D & A & B & XOP_0_10=0x4C6 { # TODO definition complicated # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; D = VectorDivideWordSigned1(A,B); flags:8 = VectorDivideWordSigned2(A,B); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evdivwu RT,RA,RB # ISA-cmt: Vector Divide Word Unsigned # evdivwu rD,rA,rB 100 1100 0111 define pcodeop VectorDivideWordUnsigned1; define pcodeop VectorDivideWordUnsigned2; :evdivwu D,A,B is OP=4 & D & A & B & XOP_0_10=0x4C7 { # TODO definition complicated # SPEFSCR.OV = ovl # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh # SPEFSCR.SOV = SPEFSCR.SOV | ovl D = VectorDivideWordUnsigned1(A,B); flags:8 = VectorDivideWordUnsigned2(A,B); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # eveqv RT,RA,RB # ISA-cmt: Vector Equivalent # eveqv rD,rA,rB 010 0001 1001 :eveqv D,A,B is OP=4 & D & A & B & XOP_0_10=0x219 { # RT.l = EQUIV(RA.l, RB.l); # RT.h = EQUIV(RA.h, RB.h); lo:$(REGISTER_SIZE) = (A & 0x00000000FFFFFFFF); hi:$(REGISTER_SIZE) = ((A & 0xFFFFFFFF00000000) >> 32); b_lo:$(REGISTER_SIZE) = (B & 0x00000000FFFFFFFF); b_hi:$(REGISTER_SIZE) = ((B & 0xFFFFFFFF00000000) >> 32); lo = lo ^ b_lo; # TODO check hi = hi ^ b_hi; D = ((hi << 32) | lo); } # evextsb RT,RA # ISA-cmt: Vector Extend Sign Byte # evextsb rD,rA 010 0000 1010 :evextsb D,A is OP=4 & D & A & XOP_0_10=0x20A & BITS_11_15=0 { # RT.l = EXTS(RA.B3); # RT.h = EXTS(RA.B7); lo:$(REGISTER_SIZE) = (( A & (0x00000000FF000000) ) >> 24); hi:$(REGISTER_SIZE) = (( A & (0xFF00000000000000) ) >> 56); lo = sext(lo:1); hi = sext(hi:1); D = (( zext(hi) << 32) | zext(lo) ); } # evextsh RT,RA # ISA-cmt: Vector Extend Sign Halfword # evextsh rD,rA, 010 0000 1011 :evextsh D,A is OP=4 & D & A & XOP_0_10=0x20B & BITS_11_15=0 { # RT.l = EXTS(RA.S1); # RT.h = EXTS(RA.S3); lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFF0000) ) >> 16); hi:$(REGISTER_SIZE) = (( A & (0xFFFF000000000000) ) >> 48); lo = sext(lo:2); hi = sext(hi:2); D = (( zext(hi) << 32) | zext(lo) ); } # ======================================================================= # Page D-11 # evldd RT,D(RA) # ISA-cmt: Vector Load Double Word into Double Word # evldd rD,d(rA) :evldd RT,dUI16PlusRAOrZeroAddress is OP=4 & RT & dUI16PlusRAOrZeroAddress & XOP_0_10=769 { ea:$(REGISTER_SIZE) = dUI16PlusRAOrZeroAddress; RT = *:8 ($(EATRUNC)); } # evlddx RT,RA,RB # ISA-cmt: Vector Load Double Word into Double Word Indexed # evlddx :evlddx RT,RA_OR_ZERO,RB is OP=4 & RT & RA_OR_ZERO & RB & XOP_0_10=768 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + RB; RT = *:8 ($(EATRUNC)); } # evldh RT,D(RA) # ISA-cmt: Vector Load Double into Four Halfwords # evldh rD,rA 011 0000 0101 :evldh RT,EVUIMM_8_RAt is OP=4 & RT & EVUIMM_8_RAt & XOP_0_10=0x305 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*8); # RT.S0 = MEM(EA, 2); # RT.S1 = MEM(EA+2, 2); # RT.S2 = MEM(EA+4, 2); # RT.S3 = MEM(EA+6, 2); EA:8 = EVUIMM_8_RAt; *:2 (RT) = *:2 ((EA) & $(MEMMASK)); *:2 (RT+2) = *:2 ((EA+2) & $(MEMMASK)); *:2 (RT+4) = *:2 ((EA+4) & $(MEMMASK)); *:2 (RT+6) = *:2 ((EA+6) & $(MEMMASK)); } # evldhx RT,RA,RB # ISA-cmt: Vector Load Double into Four Halfwords Indexed # evldhx rD,rA,rB 011 0000 0100 :evldhx D,A,B is OP=4 & A & D & B & XOP_0_10=0x304 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # RT.S0 = MEM(EA, 2); # RT.S1 = MEM(EA+2, 2); # RT.S2 = MEM(EA+4, 2); # RT.S3 = MEM(EA+6, 2); EA:8 = A + B; *:2 (D) = *:2 ((EA) & $(MEMMASK)); *:2 (D+2) = *:2 ((EA+2) & $(MEMMASK)); *:2 (D+4) = *:2 ((EA+4) & $(MEMMASK)); *:2 (D+6) = *:2 ((EA+6) & $(MEMMASK)); } # evldw RT,D(RA) # ISA-cmt: Vector Load Double into Two Words # evldw rD,rA 011 0000 0011 :evldw RT,EVUIMM_8_RAt is OP=4 & RT & EVUIMM_8_RAt & XOP_0_10=0x303 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*8); # RT.l = MEM(EA, 4); # RT.h = MEM(EA+4, 4); EA:$(REGISTER_SIZE) = EVUIMM_8_RAt; *:4 (RT) = *:4 ((EA) & $(MEMMASK)); *:4 (RT+4) = *:4 ((EA+4) & $(MEMMASK)); } # evldwx RT,RA,RB # ISA-cmt: Vector Load Double into Two Words Indexed # evldwx rD,rA,rB 011 0000 0010 :evldwx D,A,B is OP=4 & A & B & D & XOP_0_10=0x302 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # RT.l = MEM(EA, 4); # RT.h = MEM(EA+4, 4); EA:$(REGISTER_SIZE) = A + B; *:4 (D) = *:4 ((EA) & $(MEMMASK)); *:4 (D+4) = *:4 ((EA+4) & $(MEMMASK)); } # evlhhesplat RT,D(RA) # ISA-cmt: Vector Load Halfword into Halfwords Even and Splat # evlhhesplat rD,rA 011 0000 1001 :evlhhesplat RT,EVUIMM_2_RAt is OP=4 & RT & EVUIMM_2_RAt & XOP_0_10=0x309 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*2); # RT.S0 = MEM(EA,2); # RT.S1 = 0x0000; # RT.S2 = MEM(EA,2); # RT.S3 = 0x0000; EA:$(REGISTER_SIZE) = EVUIMM_2_RAt; *:2 (RT) = *:2 ((EA) & $(MEMMASK)); *:2 (RT+2) = 0x0000; *:2 (RT+4) = *:2 ((EA) & $(MEMMASK)); *:2 (RT+6) = 0x0000; } # evlhhesplatx RT,RA,RB # ISA-cmt: Vector Load Halfword into Halfwords Even and Splat Indexed # evlhhesplatx rD,rA,rB 011 0000 1000 :evlhhesplatx D,A,B is OP=4 & A & B & D & XOP_0_10=0x308 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # RT.S0 = MEM(EA, 2); # RT.S1 = 0x0000; # RT.S2 = MEM(EA, 2); # RT.S3 = 0x0000; EA:$(REGISTER_SIZE) = A + B; *:2 (D) = *:2 ((EA) & $(MEMMASK)); *:2 (D+2) = 0x0000; *:2 (D+4) = *:2 ((EA) & $(MEMMASK)); *:2 (D+6) = 0x0000; } # evlhhossplat RT,D(RA) # ISA-cmt: Vector Load Halfword into Halfword Odd Signed and Splat # evlhhossplat rD,rA 011 0000 1111 :evlhhossplat RT,EVUIMM_2_RAt is OP=4 & RT & EVUIMM_2_RAt & XOP_0_10=0x30F { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*2); # RT.l = EXTS(MEM(EA, 2)); # RT.h = EXTS(MEM(EA, 2)); EA:$(REGISTER_SIZE) = EVUIMM_2_RAt; *:4 (RT) = sext( *:2 (((EA) & $(MEMMASK)))); *:4 (RT+4) = sext( *:2 (((EA) & $(MEMMASK)))); } # evlhhossplatx RT,RA,RB # ISA-cmt: Vector Load Halfword into Halfword Odd Signed and Splat Indexed # evlhhossplatx rD,rA,rB 011 0000 1110 :evlhhossplatx D,A,B is OP=4 & A & B & D & XOP_0_10=0x30E { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # RT.l = EXTS(MEM(EA, 2)); # RT.h = EXTS(MEM(EA, 2)); EA:$(REGISTER_SIZE) = A + B; *:4 (D) = sext( *:2 (((EA) & $(MEMMASK)))); *:4 (D+4) = sext( *:2 (((EA) & $(MEMMASK)))); } # evlhhousplat RT,D(RA) # ISA-cmt: Vector Load Halfword into Halfword Odd Unsigned and Splat # evlhhousplat rD,rA 011 0000 1101 :evlhhousplat RT,EVUIMM_2_RAt is OP=4 & RT & EVUIMM_2_RAt & XOP_0_10=0x30D { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*2); # RT.l = EXTZ(MEM(EA, 2)); # RT.h = EXTZ(MEM(EA, 2)); EA:$(REGISTER_SIZE) = EVUIMM_2_RAt; *:4 (RT) = zext( *:2 (((EA) & $(MEMMASK)))); *:4 (RT+4) = zext( *:2 (((EA) & $(MEMMASK)))); } # evlhhousplatx RT,RA,RB # ISA-cmt: Vector Load Halfword into Halfword Odd Unsigned and Splat Indexed # evlhhousplatx rD,rA,rB 011 0000 1100 :evlhhousplatx D,A,B is OP=4 & A & B & D & XOP_0_10=0x30C { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # RT.l = EXTZ(MEM(EA, 2)); # RT.h = EXTZ(MEM(EA, 2)); EA:$(REGISTER_SIZE) = A + B; *:4 (D) = zext( *:2 (((EA) & $(MEMMASK)))); *:4 (D) = zext( *:2 (((EA) & $(MEMMASK)))); } # evlwhe RT,D(RA) # ISA-cmt: Vector Load Word into Two Halfwords Even # evlwhe rD,rA 011 0001 0001 # evlwhe confict with mullhwu. # define pcodeop VectorLoadWordIntoTwoHalfWordsEven; # :evlwhe D,A is OP=4 & A & EVUIMM_4 & D & XOP_0_10=0x311 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*4); # RT.S0 = MEM(EA, 2); # RT.S1 = 0x0000; # RT.S2 = MEM(EA+2, 2); # RT.S3 = 0x0000; # VectorLoadWordIntoTwoHalfWordsEven(D,A); # } # ================================================================= # Page D-12 # evlwhex RT,RA,RB # ISA-cmt: Vector Load Word into Two Halfwords Even Indexed # evlwhex rD,rA 011 0001 0000 # evlwhex confict with mullhwu # define pcodeop VectorLoadWordIntoTwoHalfWordsEvenIndexed; # :evlwhex D,A is OP=4 & B & A & D & XOP_0_10=0x310 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b+ RB; # RT.S0 = MEM(EA, 2); # RT.S1 = 0x0000; # RT.S2 = MEM(EA + 2, 2); # RT.S3 = 0x0000; # VectorLoadWordIntoTwoHalfWordsEvenIndexed(D,A); # } # evlwhos RT,D(RA) # ISA-cmt: Vector Load Word into Two Halfwords Odd Signed (with sign extension) # evlwhos rD,rA 011 0001 0111 :evlwhos RT,EVUIMM_4_RAt is OP=4 & EVUIMM_4_RAt & RT & XOP_0_10=0x317 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*4); # RT.l = EXTS(MEM(EA, 2)); # RT.h = EXTS(MEM(EA+2, 2)); EA:$(REGISTER_SIZE) = EVUIMM_4_RAt; *:4 (RT) = sext( *:2 (((EA) & $(MEMMASK)))); *:4 (RT+4) = sext( *:2 (((EA+2) & $(MEMMASK)))); } # evlwhosx RT,RA,RB # ISA-cmt: Vector Load Word into Two Halfwords Odd Signed Indexed (with sign extension) # evlwhosx rD,rA,rB 011 0001 0110 :evlwhosx D,A,B is OP=4 & A & B & D & XOP_0_10=0x316 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # RT.l = EXTS(MEM(EA,2)); # RT.h = EXTS(MEM(EA+2, 2)); EA:$(REGISTER_SIZE) = A + B; *:4 (D) = sext( *:2 (((EA) & $(MEMMASK)))); *:4 (D+4) = sext( *:2 (((EA+2) & $(MEMMASK)))); } # evlwhou RT,D(RA) # ISA-cmt: Vector Load Word into Two Halfwords Odd Unsigned (zero-extended) # evlwhou rD,rA 011 0001 0101 :evlwhou RT,EVUIMM_4_RAt is OP=4 & EVUIMM_4_RAt & RT & XOP_0_10=0x315 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*4); # RT.l = EXTZ(MEM(EA, 2)); # RT.h = EXTZ(MEM(EA+2, 2)); EA:$(REGISTER_SIZE) = EVUIMM_4_RAt; *:4 (RT) = zext( *:2 (((EA) & $(MEMMASK)))); *:4 (RT+4) = zext( *:2 (((EA+2) & $(MEMMASK)))); } # evlwhoux RT,RA,RB # ISA-cmt: Vector Load Word into Two Halfwords Odd Unsigned Indexed (zero-extended) # evlwhoux rD,rA,rB 011 0001 0100 :evlwhoux D,A,B is OP=4 & A & B & D & XOP_0_10=0x314 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # RT.l = EXTZ(MEM(EA,2)); # RT.h = EXTZ(MEM(EA+2,2)); EA:$(REGISTER_SIZE) = A + B; *:4 (D) = zext( *:2 (((EA) & $(MEMMASK)))); *:4 (D+4) = zext( *:2 (((EA+2) & $(MEMMASK)))); } # evlwhsplat RT,D(RA) # ISA-cmt: Vector Load Word into Two Halfwords and Splat # evlwhsplat rD,rA 011 0001 1101 :evlwhsplat RS,EVUIMM_4_RAt is OP=4 & XOP_0_10=0x31D & EVUIMM_4_RAt & RS { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*4); # RT.S0 = MEM(EA,2); # RT.S1 = MEM(EA,2); # RT.S2 = MEM(EA+2,2); # RT.S3 = MEM(EA+2,2); EA:$(REGISTER_SIZE) = EVUIMM_4_RAt; *:2 (RS) = *:2 ((EA) & $(MEMMASK)); *:2 (RS+2) = *:2 ((EA) & $(MEMMASK)); *:2 (RS+4) = *:2 ((EA+2) & $(MEMMASK)); *:2 (RS+6) = *:2 ((EA+2) & $(MEMMASK)); } # evlwhsplatx RT,RA,RB # ISA-cmt: Vector Load Word into Two Halfwords and Splat Indexed # evlwhsplatx rD,rA,rB 011 0001 1100 :evlwhsplatx D,A,B is OP=4 & A & B & D & XOP_0_10=0x31C { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # RT.S0 = MEM(EA,2); # RT.S1 = MEM(EA,2); # RT.S2 = MEM(EA+2,2); # RT.S3 = MEM(EA+2,2); EA:$(REGISTER_SIZE) = A + B; *:2 (D) = *:2 ((EA) & $(MEMMASK)); *:2 (D+2) = *:2 ((EA) & $(MEMMASK)); *:2 (D+4) = *:2 ((EA+2) & $(MEMMASK)); *:2 (D+6) = *:2 ((EA+2) & $(MEMMASK)); } # evlwwsplat RT,D(RA) # ISA-cmt: Vector Load Word into Word and Splat # evlwwsplat rD,rA 011 0001 1001 # define pcodeop VectorLoadWordIntoWordAndSplat; # evlwwsplat conficts with maclhwu. # :evlwwsplat RT,EVUIMM_4_RAt is OP=4 & A & D & EVUIMM_4_RAt & RT & XOP_0_10=0x319 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*4); # RT.l = MEM(EA,4); # RT.h = MEM(EA,4); # VectorLoadWordIntoWordAndSplat(D,A); # } # evlwwsplatx RT,RA,RB # ISA-cmt: Vector Load Word into Word and Splat Indexed # evlwwsplatx rD,rA,rB 011 0001 1000 # define pcodeop VectorLoadWordIntoWordAndSplatIndexed; # evlwwsplatx conficts with maclhwu # :evlwwsplatx D,A,B is OP=4 & A & B & D & XOP_0_10=0x318 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # RT.l = MEM(EA,4); # RT.h = MEM(EA,4); # VectorLoadWordIntoWordAndSplatIndexed(D,A,B); # } # evmergehi RT,RA,RB # ISA-cmt: Vector Merge High # evmergehi rD,rA,rB 010 0010 1100 @if REGISTER_SIZE=="8" :evmergehi S,A,B is OP=4 & S & A & B & XOP_0_10=556 { S[32,32] = A[32,32]; S[ 0,32] = B[32,32]; } # evmergehilo RT,RA,RB # ISA-cmt: Vector Merge High/Low # evmergehilo rD,rA,rB 010 0010 1110 :evmergehilo S,A,B is OP=4 & S & A & B & XOP_0_10=558 { S[32,32] = A[32,32]; S[ 0,32] = B[ 0,32]; } # evmergelo RT,RA,RB # ISA-cmt: Vector Merge Low # evmergelo rD,rA,rB 010 0010 1101 :evmergelo S,A,B is OP=4 & S & A & B & XOP_0_10=557 { S[32,32] = A[0,32]; S[ 0,32] = B[0,32]; } # evmergelohi RT,RA,RB # ISA-cmt: Vector Merge Low/High # evmergelohi rD,rA,rB 010 0010 1111 :evmergelohi S,A,B is OP=4 & S & A & B & XOP_0_10=559 { S[32,32] = A[ 0,32]; S[ 0,32] = B[32,32]; } @endif # evmhegsmfaa RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Guarded, Signed, Modulo, Fractional and Accumulate # evmhegsmfaa rD,rA,rB 101 0010 1011 :evmhegsmfaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x52B { # u64 temp; # temp = RA.S2 *gsf RB.S2; # RT = ACC + temp; # ACC = RT; D = ACC + GuardedSignedFractionalMultiplication( (( A & (0x0000FFFFFFFF0000) ) >> 16) , (( B & (0x0000FFFFFFFF0000) ) >> 16) ); ACC = D; } # evmhegsmfan RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Guarded, Signed, Modulo, Fractional and Accumulate Negative # evmhegsmfan rD,rA,rB 101 1010 1011 :evmhegsmfan D,A,B is OP=4 & A & B & D & XOP_0_10=0x5AB { # u64 temp; # temp = RA.S2 *gsf RB.S2; # RT = ACC - temp; # ACC = RT; D = ACC - GuardedSignedFractionalMultiplication( (( A & (0x0000FFFFFFFF0000) ) >> 16) , (( B & (0x0000FFFFFFFF0000) ) >> 16) ); ACC = D; } # evmhegsmiaa RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Guarded, Signed, Modulo, Integer and Accumulate # evmhegsmiaa rD,rA,rB 101 0010 1001 :evmhegsmiaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x529 { # u64 temp; # temp.l = RA.l2 *si RB.l2; # temp.h = EXTS(temp.l); # RT = ACC + temp; # ACC = RT; lo:8 = (( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32); hi:8 = sext(lo:2); lo = (( zext(hi) << 32) | zext(lo) ); D = ACC + lo; ACC = D; } # evmhegsmian RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Guarded, Signed, Modulo, Integer and Accumulate Negative # evmhegsmian rD,rA,rB 101 1010 1001 :evmhegsmian D,A,B is OP=4 & A & B & D & XOP_0_10=0x5A9 { # u64 temp; # temp.l = RA.S2 *si RB.S2; # temp = EXTS(temp.l); # RT = ACC - temp; # ACC = RT; lo:8 = (( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32); hi:8 = sext(lo:2); lo = (( zext(hi) << 32) | zext(lo) ); D = ACC - lo; ACC = D; } # evmhegumiaa RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Guarded, Unsigned, Modulo, Integer and Accumulate # evmhegumiaa rD,rA,rB 101 0010 1000 :evmhegumiaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x528 { # u64 temp; # temp.l = RA.S2 *ui RB.S2; # temp = EXTZ(temp.l); # RT = ACC + temp; # ACC = RT; temp:$(REGISTER_SIZE) = (( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32); temp = zext(temp:4); D = ACC + temp; ACC = D; } # evmhegumian RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Guarded, Unsigned, Modulo, Integer and Accumulate Negative # evmhegumian rD,rA,rB 101 1010 1000 :evmhegumian D,A,B is OP=4 & A & B & D & XOP_0_10=0x5A8 { # u64 temp; # temp.l = RA.S2 *ui RB.S2; # temp = EXTZ(temp); # RT = ACC - temp; # ACC = RT; temp:$(REGISTER_SIZE) = (( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32); temp = zext(temp:4); D = ACC - temp; ACC = D; } # evmhesmf RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Fractional # evmhesmf rD,rA,rB 100 0000 1011 :evmhesmf D,A,B is OP=4 & A & B & D & XOP_0_10=0x40B { # RT = RA.S0 *sf RB.S0; # RT.S2 = RA.S2 *sf RB.S2; D = SignedFractionalMultiplication( (( A & (0x0000000000000000) ) >> 16) , (( B & (0x0000000000000000) ) >> 16) ); D = (D & 0xFFFF) | ( (SignedFractionalMultiplication((( A & (0x0000FFFFFFFF0000) ) >> 16),(( B & (0x0000FFFFFFFF0000) ) >> 16)) ) << 16); } # evmhesmfa RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Fractional to Accumulator # evmhesmfa rD,rA,rB 100 0010 1011 :evmhesmfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x42B { # RT.l = RA.S0 *sf RB.S0; # RT.h = RA.S2 *sf RB.S2; # ACC = RT; D = SignedFractionalMultiplication( (( A & (0x0000000000000000) ) >> 16) , (( B & (0x0000000000000000) ) >> 16) ); D = (D & 0xFFFF) | ( (SignedFractionalMultiplication((( A & (0x0000FFFFFFFF0000) ) >> 16),(( B & (0x0000FFFFFFFF0000) ) >> 16)) ) << 16); ACC = D; } # evmhesmfaaw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Fractional and Accumulate into Words # evmhesmfaaw rD,rA,rB 101 0000 1011 :evmhesmfaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x50B { # u64 temp; # temp = RA.S0 *sf RB.S0; # RT.l = ACC.l + temp.l; # temp.l = RA.S2 *sf RB.S2; # RT = ACC.h + temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) + SignedFractionalMultiplication( (( A & (0x0000000000000000) ) >> 16) , (( B & (0x0000000000000000) ) >> 16) ); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + SignedFractionalMultiplication( (( A & (0x0000FFFFFFFF0000) ) >> 16) , (( B & (0x0000FFFFFFFF0000) ) >> 16) ); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmhesmfanw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Fractional and Accumulate Negative into Words # evmhesmfanw rD,rA,rB 101 1000 1011 :evmhesmfanw D,A,B is OP=4 & A & B & D & XOP_0_10=0x58B { # u64 temp; # temp.l = RA.S0 *sf RB.S0; # RT.l = ACC.l - temp.l; # temp.l = RA.S2 *sf RB.S2; # RT.h = ACC.h - temp; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) - SignedFractionalMultiplication( (( A & (0x0000000000000000) ) >> 16) , (( B & (0x0000000000000000) ) >> 16) ); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - SignedFractionalMultiplication( (( A & (0x0000FFFFFFFF0000) ) >> 16) , (( B & (0x0000FFFFFFFF0000) ) >> 16) ); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmhesmi RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Integer # evmhesmi rD,rA,rB 100 0000 1001 :evmhesmi D,A,B is OP=4 & A & B & D & XOP_0_10=0x409 { # RT.l = RA.S0 *si RB.S0; # RT.h = RA.S2 *si RB.S2; lo:$(REGISTER_SIZE) = (( A & (0x000000000000FFFF) ) ) * (( B & (0x000000000000FFFF) ) ); hi:$(REGISTER_SIZE) = (( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); } # evmhesmia RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Integer to Accumulator # evmhesmia rD,rA,rB 100 0010 1001 :evmhesmia D,A,B is OP=4 & A & B & D & XOP_0_10=0x429 { # RT.l = RA.S0 *si RB.S0; # RT.h = RA.S2 *si RB.s2; # ACC = RT; lo:$(REGISTER_SIZE) = (( A & (0x000000000000FFFF) ) ) * (( B & (0x000000000000FFFF) ) ); hi:$(REGISTER_SIZE) = (( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmhesmiaaw rD,rA,rB 101 0000 1001 # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Integer and Accumulate into Words :evmhesmiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x509 { # u64 temp; # temp.l = RA.S0 *si RB.S0; # RT.l = ACC.l + temp.l; # temp.l = RA.S2 *si RB.S2; # RT.h = ACC.h + temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x00000000FFFFFFFF) ) ) + ((( A & (0x000000000000FFFF) ) ) * (( B & (0x000000000000FFFF) ) )); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + ((( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32)); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmhesmianw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Modulo, Integer and Accumulate Negative into Words # evmhesmianw rD,rA,rB 101 1000 1001 :evmhesmianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x589 { # u64 temp; # temp.l = RA.S0 *si RB.S0; # RT.l = ACC.l - temp.l; # temp.l = RA.S2 *si RB.S2; # RT.S2 = ACC.S2 - temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x00000000FFFFFFFF) ) ) - ((( A & (0x000000000000FFFF) ) ) * (( B & (0x000000000000FFFF) ) )); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - ((( A & (0x0000FFFF00000000) ) >> 32) * (( B & (0x0000FFFF00000000) ) >> 32)); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmhessf RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Saturate, Fractional # evmhessf rD,rA,rB 100 0000 0011 define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractional1; define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractional2; :evmhessf D,A,B is OP=4 & A & B & D & XOP_0_10=0x403 { # TODO definition complicated # SPEFSCR.OVH = movh; # SPEFSCR.OV = movl; # SPEFSCR.SOVH = SPEFSCR.SOVH | movh; # SPEFSCR.SOV = SPEFSCR.SOV | movl; D = VectorMultiplyHalfWordsEvenSignedSaturateFractional1(A,B); flags:8 = VectorMultiplyHalfWordsEvenSignedSaturateFractional2(A,B); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhessfa RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Saturate, Fractional to Accumulator # evmhessfa rD,rA,rB 100 0010 0011 define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractionalAccumulate1; define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractionalAccumulate2; :evmhessfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x423 { # SPEFSCR.OVH = movh; # SPEFSCR.OV = movl; # SPEFSCR.SOVH = SPEFSCR.SOVH | movh; # SPEFSCR.SOV = SPEFSCR.SOV | movl; # TODO definition complicated D = VectorMultiplyHalfWordsEvenSignedSaturateFractionalAccumulate1(A,B); ACC = D; flags:8 = VectorMultiplyHalfWordsEvenSignedSaturateFractionalAccumulate2(A,B); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhessfaaw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Saturate, Fractional and Accumulate into Words # evmhessfaaw rD,rA,rB 101 0000 0011 define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateIntoWords1; define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateIntoWords2; :evmhessfaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x503 { # SPEFSCR.OVH = ovh | movh # SPEFSCR.OV = ovl| movl # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh | movh # SPEFSCR.SOV = SPEFSCR.SOV | ovl| movl # TODO definition complicated D = VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateIntoWords1(A,B,ACC,spr200); flags:8 = VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateIntoWords2(A,B,ACC,spr200); ACC = D; spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhessfanw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Saturate, Fractional and Accumulate Negative into Words # evmhessfanw rD,rA,rB 101 1000 0011 define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateNegativeIntoWords1; define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateNegativeIntoWords2; :evmhessfanw D,A,B is OP=4 & A & B & D & XOP_0_10=0x583 { # SPEFSCR.OVH = ovh | movh; # SPEFSCR.OV = ovl| movl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh | movh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl| movl; # TODO definition complicated D = VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateNegativeIntoWords1(A,B,ACC,spr200); flags:8 = VectorMultiplyHalfWordsEvenSignedSaturateFractionalAndAccumulateNegativeIntoWords2(A,B,ACC,spr200); ACC = D; spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhessiaaw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Saturate, Integer and Accumulate into Words # evmhessiaaw rD,rA,rB 101 0000 0001 define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateIntoWords1; define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateIntoWords2; :evmhessiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x501 { # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; # TODO definition complicated D = VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateIntoWords1(A,B,ACC,spr200); flags:8 = VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateIntoWords2(A,B,ACC,spr200); ACC = D; spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhessianw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Signed, Saturate, Integer and Accumulate Negative into Words # evmhessianw rD,rA,rB 101 1000 0001 define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateNegativeIntoWords1; define pcodeop VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateNegativeIntoWords2; :evmhessianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x581 { # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; # TODO definition complicated D = VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateNegativeIntoWords1(A,B,ACC,spr200); flags:8 = VectorMultiplyHalfWordsEvenSignedSaturateIntegerAndAccumulateNegativeIntoWords2(A,B,ACC,spr200); ACC = D; spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # ================================================================= # Page D-13 # evmheumi RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Unsigned, Modulo, Integer # evmheumi rD,rA,rB 100 0000 1000 :evmheumi D,A,B is OP=4 & A & B & D & XOP_0_10=0x408 { # RT.l = RA.S0 *ui RB.S0; # RT.h = RA.S2 *ui RB.S2; lo:$(REGISTER_SIZE) = (( A & (0x0000000000000000) ) >> 16) * (( B & (0x0000000000000000) ) >> 16); hi:$(REGISTER_SIZE) = (( A & (0x0000FFFFFFFF0000) ) >> 16) * (( B & (0x0000FFFFFFFF0000) ) >> 16); D = (( zext(hi) << 32) | zext(lo) ); } # evmheumia RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Unsigned, Modulo, Integer to Accumulator # evmheumia rD,rA,rB 100 0010 1000 :evmheumia D,A,B is OP=4 & A & B & D & XOP_0_10=0x428 { # RT.l = RA.S0 *ui RB.S0; # RT.h = RA.S2 *ui RB.S2; # ACC = RT; lo:$(REGISTER_SIZE) = (( A & (0x0000000000000000) ) >> 16) * (( B & (0x0000000000000000) ) >> 16); hi:$(REGISTER_SIZE) = (( A & (0x0000FFFFFFFF0000) ) >> 16) * (( B & (0x0000FFFFFFFF0000) ) >> 16); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmheumiaaw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Unsigned, Modulo, Integer and Accumulate into Words # evmheumiaaw rD,rA,rB 101 0000 1000 :evmheumiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x508 { # u64 temp; # temp.l = RA.S0 *ui RB.S0; # RT.l = ACC.l + temp.l; # temp.l = RA.S2 *ui RB.S2; # RT.h = ACC.h + temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) + (( A & (0x0000000000000000) ) >> 16) * (( B & (0x0000000000000000) ) >> 16); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + (( A & (0x0000FFFFFFFF0000) ) >> 16) * (( B & (0x0000FFFFFFFF0000) ) >> 16); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmheumianw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Unsigned, Modulo, Integer and Accumulate Negative into Words # evmheumianw rD,rA,rB 101 1000 1000 :evmheumianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x588 { # u64 temp; # temp.l = RA.S0 *ui RB.S0; # RT.l = ACC.l - temp.l; # temp.l = RA.S2 *ui RB.S2; # RT.h = ACC.h - temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) - (( A & (0x0000000000000000) ) >> 16) * (( B & (0x0000000000000000) ) >> 16); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - (( A & (0x0000FFFFFFFF0000) ) >> 16) * (( B & (0x0000FFFFFFFF0000) ) >> 16); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmheusiaaw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Unsigned, Saturate, Integer and Accumulate into Words # evmheusiaaw rD,rA,rB 101 0000 0000 define pcodeop VectorMultiplyHalfWordsEvenUnsignedSaturateIntegerAndAccumulateIntoWords1; define pcodeop VectorMultiplyHalfWordsEvenUnsignedSaturateIntegerAndAccumulateIntoWords2; :evmheusiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x500 { # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; # TODO definition complicated D = VectorMultiplyHalfWordsEvenUnsignedSaturateIntegerAndAccumulateIntoWords1(A,B,ACC,spr200); flags:8 = VectorMultiplyHalfWordsEvenUnsignedSaturateIntegerAndAccumulateIntoWords2(A,B,ACC,spr200); ACC = D; spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmheusianw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Even, Unsigned, Saturate, Integer and Accumulate Negative into Words # evmheusianw rD,rA,rB 101 1000 0000 define pcodeop evmheusianwOP1; define pcodeop evmheusianwOP2; :evmheusianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x580 { # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; # TODO definition complicated D = evmheusianwOP1(A,B,ACC,spr200); flags:8 = evmheusianwOP2(A,B,ACC,spr200); ACC = D; spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhogsmfaa RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Guarded, Signed, Modulo, Fractional and Accumulate # evmhogsmfaa rD,rA,rB 101 0010 1111 :evmhogsmfaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x52F { # u64 temp; # temp = RA.S3 *gsf RB.S3; # RT = ACC + temp; # ACC = RT; D = ACC + GuardedSignedFractionalMultiplication( (( A & (0xFFFFFFFFFFFF0000) ) >> 16) , (( B & (0xFFFFFFFFFFFF0000) ) >> 16) ); ACC = D; } # evmhogsmfan RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Guarded, Signed, Modulo, Fractional and Accumulate Negative # evmhogsmfan rD,rA,rB 101 1010 1111 :evmhogsmfan D,A,B is OP=4 & A & B & D & XOP_0_10=0x5AF { # u64 temp; # temp = RA.S3 *gsf RB.S3; # RT = ACC - temp; # ACC = RT; D = ACC - GuardedSignedFractionalMultiplication( (( A & (0xFFFFFFFFFFFF0000) ) >> 16) , (( B & (0xFFFFFFFFFFFF0000) ) >> 16) ); ACC = D; } # evmhogsmiaa RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Guarded, Signed, Modulo, Integer and Accumulate # evmhogsmiaa rD,rA,rB 101 0010 1101 :evmhogsmiaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x52D { # u64 temp; # temp.l = RA.S3 *si RB.S3; # temp = EXTS(temp.l); # RT = ACC + temp; # ACC = RT; lo:$(REGISTER_SIZE) = (( A & (0xFFFF000000000000) ) >> 48); lo = sext(lo:2); D = ACC + lo; ACC = D; } # evmhogsmian RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Guarded, Signed, Modulo, Integer and Accumulate Negative # evmhogsmian rD,rA,rB 101 1010 1101 :evmhogsmian D,A,B is OP=4 & A & B & D & XOP_0_10=0x5AD { # u64 temp; # temp.l = RA.S3 *si RB.S3; # temp = EXTS(temp); # RT = ACC - temp; # ACC = RT; lo:$(REGISTER_SIZE) = (( A & (0xFFFF000000000000) ) >> 48); lo = sext(lo:2); D = ACC - lo; ACC = D; } # evmhogumiaa RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Guarded, Unsigned, Modulo, Integer and Accumulate # evmhogumiaa rD,rA,rB 101 0010 1100 :evmhogumiaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x52C { # u64 temp; # tempo.l = RA.S3 *ui RB.S3; # temp = EXTZ(temp.l); # RT = ACC + temp; # ACC = RT; temp:8 = (( A & (0xFFFFFFFFFFFF0000) ) >> 16) * (( B & (0xFFFFFFFFFFFF0000) ) >> 16); temp = zext( (( temp & (0x0000000000000000) ) >> 32) ); D = ACC + temp; ACC = D; } # evmhogumian RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Guarded, Unsigned, Modulo, Integer and Accumulate Negative # evmhogumian rD,rA,rB 101 1010 1100 :evmhogumian D,A,B is OP=4 & A & B & D & XOP_0_10=0x5AC { # u64 temp; # temp.l = RA.S3 *ui RB.S3; # temp = EXTZ(temp.l); # RT = ACC - temp; # ACC = RT; temp:8 = (( A & (0xFFFFFFFFFFFF0000) ) >> 16) * (( B & (0xFFFFFFFFFFFF0000) ) >> 16); temp = zext( (( temp & (0x0000000000000000) ) >> 32) ); D = ACC - temp; ACC = D; } # evmhosmf RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Fractional # evmhosmf rD,rA,rB 100 0000 1111 :evmhosmf D,A,B is OP=4 & A & B & D & XOP_0_10=0x40F { # RT.l = RA.S1 *sf RB.S1; # RT.h = RA.S3 *sf RB.S3; lo:8 = SignedFractionalMultiplication( (( A & (0x00000000FFFF0000) ) >> 16) , (( B & (0x00000000FFFF0000) ) >> 16) ); hi:8 = SignedFractionalMultiplication( (( A & (0xFFFFFFFFFFFF0000) ) >> 16) , (( B & (0xFFFFFFFFFFFF0000) ) >> 16) ); D = (( zext(hi) << 32) | zext(lo) ); } # evmhosmfa RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Fractional to Accumulator # evmhosmfa rD,rA,rB 100 0010 1111 :evmhosmfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x42F { # RT.l = RA.S1 *sf RB.S1; # RT.h = RA.S3 *sf RB.S3; # ACC = RT; lo:8 = SignedFractionalMultiplication( (( A & (0x00000000FFFF0000) ) >> 16) , (( B & (0x00000000FFFF0000) ) >> 16) ); hi:8 = SignedFractionalMultiplication( (( A & (0xFFFFFFFFFFFF0000) ) >> 16) , (( B & (0xFFFFFFFFFFFF0000) ) >> 16) ); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmhosmfaaw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Fractional and Accumulate into Words # evmhosmfaaw rD,rA,rB 101 0000 1111 :evmhosmfaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x50F { # u64 temp; # temp.l = RA.S1 *sf RB.S1; # RT.l = ACC.l + temp.l; # temp.l = RA.S3 *sf RB.S3; # RT.h = ACC.h + temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) + SignedFractionalMultiplication( (( A & (0x00000000FFFF0000) ) >> 16) , (( B & (0x00000000FFFF0000) ) >> 16) ); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + SignedFractionalMultiplication( (( A & (0xFFFFFFFFFFFF0000) ) >> 16) , (( B & (0xFFFFFFFFFFFF0000) ) >> 16) ); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmhosmfanw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Fractional and Accumulate Negative into Words # evmhosmfanw rD,rA,rB 101 1000 1111 :evmhosmfanw D,A,B is OP=4 & A & B & D & XOP_0_10=0x58F { # u64 temp; # temp.l = RA.S1 *sf RB.S1; # RT.l = ACC.l - temp.l; # temp.l = RA.S3 *sf RB.S3; # RT.h = ACC.h - temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) - SignedFractionalMultiplication( (( A & (0x00000000FFFF0000) ) >> 16) , (( B & (0x00000000FFFF0000) ) >> 16) ); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - SignedFractionalMultiplication( (( A & (0xFFFFFFFFFFFF0000) ) >> 16) , (( B & (0xFFFFFFFFFFFF0000) ) >> 16) ); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmhosmi RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Integer # evmhosmi rD,rA,rB 100 0000 1101 :evmhosmi D,A,B is OP=4 & A & B & D & XOP_0_10=0x40D { # RT.l = RA.S1 *si RB.S1; # RT.h = RA.S3 *si RB.S3; lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16); hi:$(REGISTER_SIZE) = (( A & (0xFFFF000000000000) ) >> 48) * (( B & (0xFFFF000000000000) ) >> 48); D = (( zext(hi) << 32) | zext(lo) ); } # evmhosmia RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Integer to Accumulator # evmhosmia rD,rA,rB 100 0010 1101 :evmhosmia D,A,B is OP=4 & A & B & D & XOP_0_10=0x42D { # RT.l = RA.S1 *si RB.S1; # RT.h = RA.S3 *si RB.S3; # ACC = RT; lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16); hi:$(REGISTER_SIZE) = (( A & (0xFFFF000000000000) ) >> 48) * (( B & (0xFFFF000000000000) ) >> 48); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmhosmiaaw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Integer and Accumulate into Words # evmhosmiaaw rD,rA,rB 101 0000 1101 :evmhosmiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x50D { # u64 temp; # temp.l = RA.S1 *si RB.S1; # RT.l = ACC.l + temp.l; # temp.l = RA.S3 *si RB.S3; # RT.h = ACC.h + temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x00000000FFFFFFFF) ) ) + ((( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16)); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + ((( A & (0xFFFF000000000000) ) >> 48) * (( B & (0xFFFF000000000000) ) >> 48)); D = (( zext(hi) << 32) | zext(lo) ); } # evmhosmianw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Modulo, Integer and Accumulate Negative into Words # evmhosmianw rD,rA,rB 101 1000 1101 :evmhosmianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x58D { # u64 temp; # temp.l = RA.S1 *si RB.S1; # RT.l = ACC.l - temp.l; # temp.l = RA.S3 *si RB.SI; # RT.h = ACC.h - temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x00000000FFFFFFFF) ) ) - ((( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16)); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - ((( A & (0xFFFF000000000000) ) >> 48) * (( B & (0xFFFF000000000000) ) >> 48)); D = (( zext(hi) << 32) | zext(lo) ); } # evmhossf RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Saturate, Fractional # evmhossf rD,rA,rB 100 0000 0111 define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator1; define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator2; :evmhossf D,A,B is OP=4 & A & B & D & XOP_0_10=0x407 { # SPEFSCR.OVH = movh; # SPEFSCR.OV = movl; # SPEFSCR.SOVH = SPEFSCR.SOVH | movh; # SPEFSCR.SOV = SPEFSCR.SOV | movl; # TODO definition complicated D = VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator1(A,B); flags:8 = VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator2(A,B); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhossfa rD,rA,rB 100 0010 0111 # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Saturate, Fractional to Accumulator define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator2a; define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator2b; :evmhossfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x427 { # SPEFSCR.OVH = movh; # SPEFSCR.OV = movl; # SPEFSCR.SOVH = SPEFSCR.SOVH | movh; # SPEFSCR.SOV = SPEFSCR.SOV | movl; # TODO definition complicated D = VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator2a(A,B); flags:8 = VectorMultiplyHalfWordsOddSignedSaturateFractionalToAccumulator2b(A,B); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhossfaaw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Saturate, Fractional and Accumulate into Words # evmhossfaaw rD,rA,rB 101 0000 0111 define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateIntoWords1; define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateIntoWords2; :evmhossfaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x507 { # SPEFSCR.OVH = ovh | movh; # SPEFSCR.OV = ovl| movl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh | movh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl| movl; # TODO definition complicated D = VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateIntoWords1(A,B,ACC,spr200); flags:8 = VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateIntoWords2(A,B,ACC,spr200); ACC = D; spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhossfanw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Saturate, Fractional and Accumulate Negative into Words # evmhossfanw rD,rA,rB 101 1000 0111 define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateNegativeIntoWords1; define pcodeop VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateNegativeIntoWords2; :evmhossfanw D,A,B is OP=4 & A & B & D & XOP_0_10=0x587 { # SPEFSCR.OVH = ovh | movh; # SPEFSCR.OV = ovl| movl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh | movh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl| movl; # TODO definition complicated D = VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateNegativeIntoWords1(A,B,ACC,spr200); flags:8 = VectorMultiplyHalfWordsOddSignedSaturateFractionalAndAccumulateNegativeIntoWords2(A,B,ACC,spr200); ACC = D; spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhossiaaw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Saturate, Integer and Accumulate into Words # evmhossiaaw rD,rA,rB 101 0000 0101 define pcodeop VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateIntoWords1; define pcodeop VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateIntoWords2; :evmhossiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x505 { # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; # TODO definition complicated D = VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateIntoWords1(A,B,ACC,spr200); flags:8 = VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateIntoWords2(A,B,ACC,spr200); ACC = D; spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhossianw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Signed, Saturate, Integer and Accumulate Negative into Words # evmhossianw rD,rA,rB 101 1000 0101 define pcodeop VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateNegativeIntoWords1; define pcodeop VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateNegativeIntoWords2; :evmhossianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x585 { # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; # TODO definition complicated D = VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateNegativeIntoWords1(A,B,ACC,spr200); ACC = D; flags:8 = VectorMultiplyHalfWordsOddSignedSaturateIntegerAndAccumulateNegativeIntoWords2(A,B,ACC,spr200); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhoumi RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Unsigned, Modulo, Integer # evmhoumi rD,rA,rB 100 0000 1100 :evmhoumi D,A,B is OP=4 & A & B & D & XOP_0_10=0x40C { # RT.l = RA.S1 *ui RB.S1; # RT.h = RA.S3 *ui RB.S3; lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16); hi:$(REGISTER_SIZE) = (( A & (0xFFFFFFFFFFFF0000) ) >> 16) * (( B & (0xFFFFFFFFFFFF0000) ) >> 16); D = (( zext(hi) << 32) | zext(lo) ); } # evmhoumia RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Unsigned, Modulo, Integer to Accumulator # evmhoumia rD,rA,rB 100 0010 1100 :evmhoumia D,A,B is OP=4 & A & B & D & XOP_0_10=0x42C { # RT.l = RA.S1 *ui RB.S1; # RT.h = RA.S3 *ui RB.S3; # ACC = RT; lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16); hi:$(REGISTER_SIZE) = (( A & (0xFFFFFFFFFFFF0000) ) >> 16) * (( B & (0xFFFFFFFFFFFF0000) ) >> 16); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmhoumiaaw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Unsigned, Modulo, Integer and Accumulate into Words # evmhoumiaaw rD,rA,rB 101 0000 1100 :evmhoumiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x50C { # u64 temp; # temp = RA.S1 *ui RB.S1; # RT.l = ACC.l + temp.l; # temp.l = RA.S3 *ui RB.S3; # RT.h = ACC.h + temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) + ((( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16)); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + ((( A & (0xFFFFFFFFFFFF0000) ) >> 16) * (( B & (0xFFFFFFFFFFFF0000) ) >> 16)); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmhoumianw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Unsigned, Modulo, Integer and Accumulate Negative into Words # evmhoumianw rD,rA,rB 101 1000 1100 :evmhoumianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x58C { # u64 temp; # temp = RA.S1 *ui RB.S1; # RT.l = ACC.l - temp.l; # temp.l = RA.S3 *ui RB.S3; # RT.h = ACC.h - temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) - ((( A & (0x00000000FFFF0000) ) >> 16) * (( B & (0x00000000FFFF0000) ) >> 16)); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - ((( A & (0xFFFFFFFFFFFF0000) ) >> 16) * (( B & (0xFFFFFFFFFFFF0000) ) >> 16)); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmhousiaaw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Unsigned, Saturate, Integer and Accumulate into Words # evmhousiaaw rD,rA,rB 101 0000 0100 define pcodeop VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateIntoWords1; define pcodeop VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateIntoWords2; :evmhousiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x504 { # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; # TODO definition complicated D = VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateIntoWords1(A,B,ACC,spr200); ACC = D; flags:8 = VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateIntoWords2(A,B,ACC,spr200); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmhousianw RT,RA,RB # ISA-cmt: Vector Multiply Halfwords, Odd, Unsigned, Saturate, Integer and Accumulate Negative into Words # evmhousianw rD,rA,rB 101 1000 0100 define pcodeop VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateNegativeIntoWords1; define pcodeop VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateNegativeIntoWords2; :evmhousianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x584 { # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; # TODO definition complicated D = VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateNegativeIntoWords1(A,B,ACC,spr200); ACC = D; flags:8 = VectorMultiplyHalfWordsOddUnsignedSaturateIntegerAndAccumulateNegativeIntoWords2(A,B,ACC,spr200); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # ================================================================= # Page D-14 # evmra RT,RA # ISA-cmt: Initialize Accumulator # evmra rD,rA 100 1100 0100 :evmra RT,RA is OP=4 & RT & RA & BITS_11_15=0 & XOP_0_10=1220 { ACC = zext(RA); RT = RA; } # evmwhsmf RT,RA,RB # ISA-cmt: Vector Multiply Word High Signed, Modulo, Fractional # evmwhsmf rD,rA,rB 100 0100 1111 :evmwhsmf D,A,B is OP=4 & A & B & D & XOP_0_10=0x44F { # u64 temp; # temp = RA.l *sf RB.l; # RT.l = temp.l; # temp = RA.h *sf RB.h; # RT.h = temp.l; lo:8 = SignedFractionalMultiplication( (( A & (0x0000000000000000) ) >> 32) , (( B & (0x0000000000000000) ) >> 32) ); hi:8 = SignedFractionalMultiplication( (( A & (0xFFFFFFFF00000000) ) >> 32) , (( B & (0xFFFFFFFF00000000) ) >> 32) ); D = (( zext(hi) << 32) | zext(lo) ); } # evmwhsmfa RT,RA,RB # ISA-cmt: Vector Multiply Word High Signed, Modulo, Fractional to Accumulator # evmwhsmfa rD,rA,rB 100 0110 1111 :evmwhsmfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x46F { # u64 temp; # temp = RA.l *sf RB.l; # RT.l = temp.l; # temp = RA.h *sf RB.h; # RT.h = temp.l; # ACC = RT; lo:8 = SignedFractionalMultiplication( (( A & (0x0000000000000000) ) >> 32) , (( B & (0x0000000000000000) ) >> 32) ); hi:8 = SignedFractionalMultiplication( (( A & (0xFFFFFFFF00000000) ) >> 32) , (( B & (0xFFFFFFFF00000000) ) >> 32) ); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmwhsmi RT,RA,RB # ISA-cmt: Vector Multiply Word High Signed, Modulo, Integer # evmwhsmi rD,rA,rB 100 0100 1101 :evmwhsmi D,A,B is OP=4 & A & B & D & XOP_0_10=0x44D { # u64 temp; # temp = RA.l *si RB.l; # RT.l = temp.l; # temp = RA.h *si RB.h; # RT.h = temp.l; lo:$(REGISTER_SIZE) = ((( A & (0x00000000FFFFFFFF) ) ) * (( B & (0x00000000FFFFFFFF) ) )) & 0xFFFFFFFF; hi:$(REGISTER_SIZE) = ((( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32)) & 0xFFFFFFFF; D = (( zext(hi) << 32) | zext(lo) ); } # evmwhsmia RT,RA,RB # ISA-cmt: Vector Multiply Word High Signed, Modulo, Integer to Accumulator # evmwhsmia rD,rA,rB 100 0110 1101 :evmwhsmia D,A,B is OP=4 & A & B & D & XOP_0_10=0x46D { # u64 temp; # temp = RA.l *si RB.l; # RT.l = temp.l; # temp = RA.h *si RB.h; # RT.h = temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = ((( A & (0x00000000FFFFFFFF) ) ) * (( B & (0x00000000FFFFFFFF) ) )) & 0xFFFFFFFF; hi:$(REGISTER_SIZE) = ((( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32)) & 0xFFFFFFFF; D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmwhssf RT,RA,RB # ISA-cmt: Vector Multiply Word High Signed, Saturate, Fractional # evmwhssf rD,rA,rB 100 0100 0111 define pcodeop VectorMultiplyWordHighSignedSaturateFractional1; define pcodeop VectorMultiplyWordHighSignedSaturateFractional2; :evmwhssf D,A,B is OP=4 & A & B & D & XOP_0_10=0x447 { # SPEFSCR.OVH = movh; # SPEFSCR.OV = movl; # SPEFSCR.SOVH = SPEFSCR.SOVH | movh; # SPEFSCR.SOV = SPEFSCR.SOV | movl; # TODO definition complicated D = VectorMultiplyWordHighSignedSaturateFractional1(A,B); flags:8 = VectorMultiplyWordHighSignedSaturateFractional2(A,B); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmwhssfa RT,RA,RB # ISA-cmt: Vector Multiply Word High Signed, Saturate, Fractional to Accumulator # evmwhssfa rD,rA,rB 100 0110 0111 define pcodeop VectorMultiplyWordHighSignedSaturateFractionalToAccumulator1; define pcodeop VectorMultiplyWordHighSignedSaturateFractionalToAccumulator2; :evmwhssfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x467 { # SPEFSCR.OVH = movh; # SPEFSCR.OV = movl; # SPEFSCR.SOVH = SPEFSCR.SOVH | movh; # SPEFSCR.SOV = SPEFSCR.SOV | movl; # TODO definition complicated D = VectorMultiplyWordHighSignedSaturateFractionalToAccumulator1(A,B); ACC = D; flags:8 = VectorMultiplyWordHighSignedSaturateFractionalToAccumulator2(A,B); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmwhumi RT,RA,RB # ISA-cmt: Vector Multiply Word High Unsigned, Modulo, Integer # evmwhumi rD,rA,rB 100 0100 1100 :evmwhumi D,A,B is OP=4 & A & B & D & XOP_0_10=0x44C { # u64 temp; # temp = RA.l *ui RB.l; # RT.l = temp.l; # temp = RA.h *ui RB.h; # RT.h = temp.l; lo:$(REGISTER_SIZE) = (( A & (0x0000000000000000) ) >> 32) * (( B & (0x0000000000000000) ) >> 32); lo = zext(lo:4); hi:$(REGISTER_SIZE) = (( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32); hi = zext(hi:4); D = (( zext(hi) << 32) | zext(lo) ); } # evmwhumia RT,RA,RB # ISA-cmt: Vector Multiply Word High Unsigned, Modulo, Integer to Accumulator # evmwhumia rD,rA,rB 100 0110 1100 :evmwhumia D,A,B is OP=4 & A & B & D & XOP_0_10=0x46C { # u64 temp; # temp = RA.l *ui RB.l; # RT.l = temp.l; # temp = RA.h *ui RB.h; # RT.h = temp.l; # ACC = RT; lo:$(REGISTER_SIZE) = (( A & (0x0000000000000000) ) >> 32) * (( B & (0x0000000000000000) ) >> 32); lo = zext(lo:4); hi:$(REGISTER_SIZE) = (( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32); hi = zext(hi:4); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmwlsmi rD,rA,rB # ISA-cmt: Vector Multiply Word Low Signed, Modulo, Integer and Accumulate into Words # define VectorMultiplyWordLowUnsigned,ModuloInteger; # YYY No definition in manual # evmwhusiaaw rD,rA,rB 101 0100 0100 # TODO Not in PowerISA Version 2.06 manual? define pcodeop evmwhusiaawOP; :evmwhusiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x544 { evmwhusiaawOP(D,A,B); } # evmwhusianw rD,rA,rB 101 1100 0100 # TODO Not in PowerISA Version 2.06 manual? define pcodeop evmwhusianwOP; :evmwhusianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x5C4 { evmwhusianwOP(D,A,B,ACC); } # evmwlsmiaaw RT,RA,RB # ISA-cmt: Vector Multiply Word Low Signed, Modulo, Integer and Accumulate into Words # evmwlsmiaaw ?? # u64 temp; # temp = RA.l *si RB.l; # RT.l = ACC.l + temp.h; # temp = RA.h *si RB.h; # RT.h = ACC.h + temp.h; # ACC = RT; # evmwlsmianw RT,RA,RB # ISA-cmt: Vector Multiply Word Low Signed, Modulo, Integer and Accumulate Negative in Words # evmwlsmianw ?? # u64 temp; # temp = RA.l *si RB.l; # RT.l = ACC.l - temp.h; # temp = RA.h *si RB.h; # RT.h = ACC.h - temp.h; # ACC = RT; # evmwlssiaaw RT,RA,RB # ISA-cmt: Vector Multiply Word Low Signed, aturate, Integer and Accumulate into Words # evmwlssiaaw rD,rA,rB 101 0100 0001 define pcodeop VectorMultiplyWordLowSignedSaturateIntegerAndAccumulateInWords1; define pcodeop VectorMultiplyWordLowSignedSaturateIntegerAndAccumulateInWords2; :evmwlssiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x541 { # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; # TODO definition complicated D = VectorMultiplyWordLowSignedSaturateIntegerAndAccumulateInWords1(A,B,ACC,spr200); ACC = D; flags:8 = VectorMultiplyWordLowSignedSaturateIntegerAndAccumulateInWords2(A,B,ACC,spr200); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmwlumi RT,RA,RB # ISA-cmt: Vector Multiply Word Low Unsigned, Modulo, Integer # evmwlumi rD,rA,rB 100 0100 1000 :evmwlumi D,A,B is OP=4 & A & B & D & XOP_0_10=0x448 { # u64 temp; # temp = RA.l *ui RB.l; # RT.l = temp.h; # temp = RA.h *ui RB.h; # RT.h = temp.h; lo:8 = (( A & (0x0000000000000000) ) >> 32) * (( B & (0x0000000000000000) ) >> 32); lo = (( lo & (0xFFFFFFFF00000000) ) >> 32); hi:8 = (( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32); lo = (( hi & (0xFFFFFFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); } # evmwlumia RT,RA,RB # ISA-cmt: Vector Multiply Word Low Unsigned, Modulo, Integer to Accumulator # evmwlumia rD,rA,rB 100 0110 1000 :evmwlumia D,A,B is OP=4 & A & B & D & XOP_0_10=0x468 { # u64 temp; # temp = RA.l *ui RB.l; # RT.l = temp.h; # temp = RA.h *ui RB.h; # RT.h = temp.h; # ACC = RT; lo:8 = (( A & (0x0000000000000000) ) >> 32) * (( B & (0x0000000000000000) ) >> 32); lo = (( lo & (0xFFFFFFFF00000000) ) >> 32); hi:8 = (( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32); lo = (( hi & (0xFFFFFFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmwlumiaaw RT,RA,RB # ISA-cmt: Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate into Words # evmwlumiaaw rD,rA,rB 101 0100 1000 :evmwlumiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x548 { # u64 temp; # temp = RA.l *ui RB.l; # RT.l = ACC.l + temp.h; # temp = RA.h *ui RB.h; # RT.h = ACC.h + temp.h; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) + ((( A & (0x0000000000000000) ) >> 32) * (( B & (0x0000000000000000) ) >> 32)); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) + ((( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32)); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmwlumianw RT,RA,RB # ISA-cmt: Vector Multiply Word Low Unsigned, Modulo, Integer and Accumulate Negative in Words # evmwlumianw rD,rA,rB 101 1100 1000 :evmwlumianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x5C8 { # u64 temp; # temp = RA.l *ui RB.l; # RT.l = ACC.l - temp.h; # temp = RA.h *ui RB.h; # RT.h = ACC.h - temp.h; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) - ((( A & (0x0000000000000000) ) >> 32) * (( B & (0x0000000000000000) ) >> 32)); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - ((( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32)); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evmwlusiaaw RT,RA,RB # ISA-cmt: Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate into Words # evmwlusiaaw rD,rA,rB 101 0100 0000 define pcodeop VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateInWords1; define pcodeop VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateInWords2; :evmwlusiaaw D,A,B is OP=4 & A & B & D & XOP_0_10=0x540 { # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; # TODO definition complicated D = VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateInWords1(A,B,ACC,spr200); ACC = D; flags:8 = VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateInWords2(A,B,ACC,spr200); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmwlusianw RT,RA,RB # ISA-cmt: Vector Multiply Word Low Unsigned, Saturate, Integer and Accumulate Negative in Words # evmwlusianw rD,rA,rB 101 1100 0000 define pcodeop VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateNegativeInWords1; define pcodeop VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateNegativeInWords2; :evmwlusianw D,A,B is OP=4 & A & B & D & XOP_0_10=0x5C0 { # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; # TODO definition complicated D = VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateNegativeInWords1(D,A,B,ACC,spr200); ACC = D; flags:8 = VectorMultiplyWordLowUnsignedSaturateIntegerAndAccumulateNegativeInWords2(D,A,B,ACC,spr200); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmwsmf RT,RA,RB # ISA-cmt: Vector Multiply Word Signed, Modulo, Fractional # evmwsmf rD,rA,rB 100 0101 1011 :evmwsmf D,A,B is OP=4 & A & B & D & XOP_0_10=0x45B { # RT = RA.h *sf RB.h; D = SignedFractionalMultiplication( (( A & (0xFFFFFFFF00000000) ) >> 32) , (( B & (0xFFFFFFFF00000000) ) >> 32) ); } # evmwsmfa RT,RA,RB # ISA-cmt: Vector Multiply Word Signed, Modulo, Fractional to Accumulator # evmwsmfa rD,rA,rB 100 0111 1011 :evmwsmfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x47B { # RT = RA.h *sf RB.h; # ACC = RT; D = SignedFractionalMultiplication( (( A & (0xFFFFFFFF00000000) ) >> 32) , (( B & (0xFFFFFFFF00000000) ) >> 32) ); ACC = D; } # evmwsmfaa RT,RA,RB # ISA-cmt: Vector Multiply Word Signed, Modulo, Fractional and Accumulate # evmwsmfaa rD,rA,rB 101 0101 1011 101 0101 1011 :evmwsmfaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x55B { # u64 temp; # temp = RA.h *sf RB.h; # RT = ACC + temp; # ACC = RT; D = ACC + ( SignedFractionalMultiplication( (( A & (0xFFFFFFFF00000000) ) >> 32) , (( B & (0xFFFFFFFF00000000) ) >> 32) ) ); ACC = D; } # evmwsmfan RT,RA,RB # ISA-cmt: Vector Multiply Word Signed, Modulo, Fractional and Accumulate Negative # evmwsmfan rD,rA,rB 101 1101 1011 :evmwsmfan D,A,B is OP=4 & A & B & D & XOP_0_10=0x5DB { # u64 temp; # temp = RA.h *sf RB.h; # RT = ACC - temp; # ACC = RT; D = ACC - ( SignedFractionalMultiplication( (( A & (0xFFFFFFFF00000000) ) >> 32) , (( B & (0xFFFFFFFF00000000) ) >> 32) ) ); ACC = D; } # evmwsmi RT,RA,RB # ISA-cmt: Vector Multiply Word Signed, Modulo, Integer # evmwsmi rD,rA,rB 100 0101 1001 # evmwsmi confict with machhwo. # :evmwsmi D,A,B is OP=4 & A & B & D & XOP_0_10=0x459 { # RT = RA.h *si RB.h; # } # evmwsmia RT,RA,RB # ISA-cmt: Vector Multiply Word Signed, Modulo, Integer to Accumulator # evmwsmia rD,rA,rB 100 0111 1001 :evmwsmia D,A,B is OP=4 & A & B & D & XOP_0_10=0x479 { # RT = RA.h *si RB.h; # ACC = RT; D = (( A & (0xFFFFFFFF00000000) ) >> 32) * (( B & (0xFFFFFFFF00000000) ) >> 32); ACC = D; } # evmwsmiaa RT,RA,RB # ISA-cmt: Vector Multiply Word Signed, Modulo, Integer and Accumulate # evmwsmiaa rD,rA,rB 101 0101 1001 # YYY duplicate??? # define pcodeop VectorMultiplyWordSignedModuloIntegerAndAccumulate2; # u64 temp; # temp = RA.h *si RB.h; # RT = ACC + temp; # ACC = RT; # :evmwsmiaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x559 { # u64 temp; # temp = RA.h *si RB.h; # RT = ACC + temp; # ACC = RT; #} # evmwsmian RT,RA,RB # ISA-cmt: Vector Multiply Word Signed, Modulo, Integer and Accumulate Negative # evmwsmian rD,rA,rB 101 1101 1001 # evmwsmian confict with macchwso. # ppc_instructions.sinc :macchwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=236 & Rc=1 # define pcodeop VectorMultiplyWordSignedModuloIntegerAndAccumulateNegative; # :evmwsmian D,A,B is OP=4 & A & B & D & XOP_0_10=0x5D9 { # u64 temp; # temp = RA.h *si RB.h; # RT = ACC - temp; # ACC = RT; # } # evmwssf RT,RA,RB # ISA-cmt: Vector Multiply Word Signed, Saturate, Fractional # evmwssf rD,rA,rB 100 0101 0011 define pcodeop VectorMultiplyWordSignedSaturateFractional1; define pcodeop VectorMultiplyWordSignedSaturateFractional2; :evmwssf D,A,B is OP=4 & A & B & D & XOP_0_10=0x453 { # SPEFSCR.OVH = 0; # SPEFSCR.OV = mov; # SPEFSCR.SOV = SPEFSCR.SOV | mov; # TODO definition D = VectorMultiplyWordSignedSaturateFractional1(D,A,B,ACC); ACC = D; flags:8 = VectorMultiplyWordSignedSaturateFractional2(D,A,B,ACC); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmwssfa RT,RA,RB # ISA-cmt: Vector Multiply Word Signed, Saturate, Fractional to Accumulator # evmwssfa rD,rA,rB 100 0111 0011 define pcodeop VectorMultiplyWordSignedSaturateFractionalAndAccumulate1a; define pcodeop VectorMultiplyWordSignedSaturateFractionalAndAccumulate1b; :evmwssfa D,A,B is OP=4 & A & B & D & XOP_0_10=0x473 { # SPEFSCR.OVH = 0; # SPEFSCR.OV = mov; # SPEFSCR.SOV = SPEFSCR.SOV | mov; # TODO definition D = VectorMultiplyWordSignedSaturateFractionalAndAccumulate1a(D,A,B,ACC); ACC = D; flags:8 = VectorMultiplyWordSignedSaturateFractionalAndAccumulate1b(D,A,B,ACC); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmwssfaa RT,RA,RB # ISA-cmt: Vector Multiply Word Signed, Saturate, Fractional and Accumulate # evmwssfaa rD,rA,rB 101 0101 0011 define pcodeop VectorMultiplyWordSignedSaturateFractionalAndAccumulate2a; define pcodeop VectorMultiplyWordSignedSaturateFractionalAndAccumulate2b; :evmwssfaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x553 { # SPEFSCR.OVH = 0; # SPEFSCR.OV = ov | mov; # SPEFSCR.SOV = SPEFSCR.SOV | ov | mov; # TODO definition D = VectorMultiplyWordSignedSaturateFractionalAndAccumulate2a(A,B,ACC); flags:8 = VectorMultiplyWordSignedSaturateFractionalAndAccumulate2b(A,B,ACC); ACC = D; spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evmwssfan RT,RA,RB # ISA-cmt: Vector Multiply Word Signed, Saturate, Fractional and Accumulate Negative # evmwssfan rD,rA,rB 101 1101 0011 define pcodeop VectorMultiplyWordSignedSaturateFractionalAndAccumulateNegative1; define pcodeop VectorMultiplyWordSignedSaturateFractionalAndAccumulateNegative2; :evmwssfan D,A,B is OP=4 & A & B & D & XOP_0_10=0x5D3 { # SPEFSCR.OVH = 0; # SPEFSCR.OV = ov | mov; # SPEFSCR.SOV = SPEFSCR.SOV | ov | mov; # TODO definition D = VectorMultiplyWordSignedSaturateFractionalAndAccumulateNegative1(A,B,ACC,spr200); flags:8 = VectorMultiplyWordSignedSaturateFractionalAndAccumulateNegative2(A,B,ACC,spr200); ACC = D; spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # ================================================================= # Page D-15 # evnand RT,RA,RB # ISA-cmt: Vector NAND # evnand rD,rA,rB 010 0001 1110 :evnand D,A,B is OP=4 & A & B & D & XOP_0_10=0x21E { # RT.l = ONESCOMP(RA.l & RB.l); # RT.h = ONESCOMP(RA.h & RB.h); lo:$(REGISTER_SIZE) = ~ ( (( A & (0x00000000FFFFFFFF) ) ) & (( B & (0x00000000FFFFFFFF) ) ) ); hi:$(REGISTER_SIZE) = ~ ( (( A & (0xFFFFFFFF00000000) ) >> 32) & (( B & (0xFFFFFFFF00000000) ) >> 32) ); D = (( zext(hi) << 32) | zext(lo) ); } # evneg RT,RA # ISA-cmt: Vector Negate # evneg rD,rA 010 0000 1001 :evneg D,A is OP=4 & A & D & XOP_0_10=0x209 & BITS_11_15=0 { # RT.l = NEG(RA.l); # RT.h = NEG(RA.h); lo:$(REGISTER_SIZE) = - (( A & (0x00000000FFFFFFFF) ) ); hi:$(REGISTER_SIZE) = - (( A & (0xFFFFFFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); } # evnor RT,RA,RB # ISA-cmt: Vector NOR # evnor rD,rA,rB 010 0001 1000 :evnor D,A,B is OP=4 & A & B & D & XOP_0_10=0x218 { # RT.l = ONESCOMP(RA.l | RB.l); # RT.h = ONESCOMP(RA.h | RB.h); lo:$(REGISTER_SIZE) = ~ ( (( A & (0x00000000FFFFFFFF) ) ) | (( B & (0x00000000FFFFFFFF) ) ) ); hi:$(REGISTER_SIZE) = ~ ( (( A & (0xFFFFFFFF00000000) ) >> 32) | (( B & (0xFFFFFFFF00000000) ) >> 32) ); D = (( zext(hi) << 32) | zext(lo) ); } # evnot => evnor # evor RT,RA,RB # ISA-cmt: Vector OR # evor rD,rA,rB 010 0001 0111 :evor D,A,B is OP=4 & A & B & D & XOP_0_10=0x217 { # RT.l = RA.l | RB.l; # RT.h = RA.h | RB.h; lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFFFFFF) ) ) | (( B & (0x00000000FFFFFFFF) ) ); hi:$(REGISTER_SIZE) = (( A & (0xFFFFFFFF00000000) ) >> 32) | (( B & (0xFFFFFFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); } # evorc RT,RA,RB # ISA-cmt: Vector OR with Complement # evorc rD,rA,rB 010 0001 1011 :evorc D,A,B is OP=4 & A & B & D & XOP_0_10=0x21B { # RT.l = RA.l | ONESCOMP(RB.l); # RT.h = RA.h | ONESCOMP(RB.h); lo:$(REGISTER_SIZE) = (( A & (0x00000000FFFFFFFF) ) ) | (~ (( B & (0x00000000FFFFFFFF) ) )); hi:$(REGISTER_SIZE) = (( A & (0xFFFFFFFF00000000) ) >> 32) | (~ (( B & (0xFFFFFFFF00000000) ) >> 32)); D = (( zext(hi) << 32) | zext(lo) ); } define pcodeop ROTL64; # evrlw RT,RA,RB # ISA-cmt: Vector Rotate Left Word # evrlw rD,rA,rB 010 0010 1000 :evrlw D,A,B is OP=4 & A & B & D & XOP_0_10=0x228 { # nh = RB.bsub(27:31); # nl = RB.bsub(59:63); # RT.l = ROTL(RA.l, nh); # RT.h = ROTL(RA.h, nl); nh:$(REGISTER_SIZE) = ((B & 0x00000000f8000000) >> 27); nl:$(REGISTER_SIZE) = ((B & 0xf800000000000000) >> 59); lo:8 = ROTL64( (( A & (0x00000000FFFFFFFF) ) ) ,nh); hi:8 = ROTL64( (( A & (0xFFFFFFFF00000000) ) >> 32) ,nl); D = (( zext(hi) << 32) | zext(lo) ); } # evrlwi RT,RA,UI # ISA-cmt: Vector Rotate Left Word Immediate # evrlwi rD,rA,EVUIMM 010 0010 1010 :evrlwi D,A,EVUIMM is OP=4 & A & D & EVUIMM & XOP_0_10=0x22A { # n = UI; # RT.l = ROTL(RA.l, n); # RT.h = ROTL(RA.h, n); n:8 = EVUIMM; lo:8 = ROTL64( (( A & (0x00000000FFFFFFFF) ) ) ,n); hi:8 = ROTL64( (( A & (0xFFFFFFFF00000000) ) >> 32) ,n); D = (( zext(hi) << 32) | zext(lo) ); } # evrndw RT,RA # ISA-cmt: Vector Round Word # evrndw rD,rA 010 0000 1100 :evrndw D,A is OP=4 & A & D & UIMM & XOP_0_10=0x20C { # RT.l = (RA.l + 0x00008000) & 0xFFFF0000; # RT.h = (RA.h + 0x00008000) & 0xFFFF0000; lo:$(REGISTER_SIZE) = ((( A & (0x00000000FFFFFFFF) ) ) + 0x00008000) & 0xFFFF0000; hi:$(REGISTER_SIZE) = ((( A & (0x00FFFFFFFF00000000) ) >> 32) + 0x00008000) & 0xFFFF0000; D = (( zext(hi) << 32) | zext(lo) ); } # SPECIAL ** YYY # evsel RT,RA,RB,BFA # ISA-cmt: Vector Select # evsel rD,rA,rB,crS 0100 1111 # define pcodeop VectorSelect; # :evsel D,A,B,crS is OP=4 & A & B & D & crS & XOP_3_10=0x4F { # TODO definition complicated # VectorSelect(D,A,B,crS); # } # evslw RT,RA,RB # ISA-cmt: Vector Shift Left Word # evslw rD,rA,rB 010 0010 0100 :evslw D,A,B is OP=4 & A & B & D & XOP_0_10=0x224 { # nh = RB.bsub(26:31); # nl = RB.bsub(58:63); # RT.l = SL(RA.l,nh); # RT.h = SL(RA.h,nl); nh:$(REGISTER_SIZE) = ((B & 0x00000000fc000000) >> 26); nl:$(REGISTER_SIZE) = ((B & 0xfc00000000000000) >> 58); lo:$(REGISTER_SIZE) = ((( A & (0x00000000FFFFFFFF) ) ) << nh); hi:$(REGISTER_SIZE) = ((( A & (0xFFFFFFFF00000000) ) >> 32) << nl); D = (( zext(hi) << 32) | zext(lo) ); } # c RT,RA,UI # ISA-cmt: Vector Shift Left Word Immediate # evslwi rD,rA,EVUIMM 010 0010 0110 :evslwi D,A,EVUIMM is OP=4 & A & D & EVUIMM & XOP_0_10=0x226 { # n = UI; # RT.l = SL(RA.l, n); # RT.h = SL(RA.h, n); n:8 = EVUIMM; lo:8 = (( A & (0x00000000FFFFFFFF) ) ) << n; hi:8 = (( A & (0xFFFFFFFF00000000) ) >> 32) << n; D = (( zext(hi) << 32) | zext(lo) ); } # evsplatfi RT,SI # ISA-cmt: Vector Splat Fractional Immediate # evsplatfi rD,BU_SIMM 010 0010 1011 define pcodeop VectorSplatFractionalImmediate; :evsplatfi D,BU_SIMM is OP=4 & D & BU_SIMM & XOP_0_10=0x22B { # TODO definition # RT0:31 = SI || 270 # RT32:63 = SI || 270 # The value specified by SI is padded with trailing zeros # and placed in both elements of RT. The SI ends up in # bit positions RT0:4 and RT32:36. D = VectorSplatFractionalImmediate(); } # BU_SIMMt: is BU_SIMM [ val = BU_SIMM; ] { tmp:8 = sext(BU_SIMM); export tmp; } # evsplati RT,SI # ISA-cmt: Vector Splat Immediate # evsplati rD,BU_SIMM 010 0010 1001 define pcodeop VectorSplatImmediate; :evsplati D,BU_SIMM is OP=4 & D & BU_SIMM & XOP_0_10=0x229 { # RT.l = EXTS(SI); # RT.h = EXTS(SI); # lo:8 = BU_SIMMt; # sign or zext # hi:8 = BU_SIMM; # D = 64From2_32(hi,lo); D = VectorSplatImmediate(); } # evsrwis RT,RA,UI # ISA-cmt: Vector Shift Right Word Immediate Signed # evsrwis rD,rA,EVUIMM 010 0010 0011 define pcodeop VectorShiftRightWordImmediateSigned; :evsrwis D,A,EVUIMM is OP=4 & A & D & EVUIMM & XOP_0_10=0x223 { # TODO definition # n = UI # RT0:31 = EXTS((RA)0:31-n) # RT32:63 = EXTS((RA)32:63-n) # Both high and low elements of RA are shifted right by # the 5-bit UI value. Bits in the most significant positions # vacated by the shift are filled with a copy of the sign bit. D = VectorShiftRightWordImmediateSigned(A); } # evsrwiu RT,RA,UI # ISA-cmt: Vector Shift Right Word Immediate Unsigned # evsrwiu rD,rA,EVUIMM 010 0010 0010 define pcodeop VectorShiftRightWordImmediateUnsigned; :evsrwiu D,A,EVUIMM is OP=4 & A & D & EVUIMM & XOP_0_10=0x222 { # TODO definition # n = UI # RT0:31 = EXTZ((RA)0:31-n) # RT32:63 = EXTZ((RA)32:63-n) # Both high and low elements of RA are shifted right by # the 5-bit UI value; zeros are shifted into the most significant # position. D = VectorShiftRightWordImmediateUnsigned(A); } @if REGISTER_SIZE=="8" :evsrws S,A,B is OP=4 & S & A & B & XOP_0_10=545 { local low:4 = A[0,32]; local high:4 = A[32,32]; local low_shift:1 = B[0,5]; local high_shift:1 = B[32,5]; S[0,32] = low s>> zext(low_shift); S[32,32] = high s>> zext(high_shift); } :evsrwu S,A,B is OP=4 & S & A & B & XOP_0_10=544 { local low:4 = A[0,32]; local high:4 = A[32,32]; local low_shift:1 = B[0,5]; local high_shift:1 = B[32,5]; S[0,32] = low >> zext(low_shift); S[32,32] = high >> zext(high_shift); } @endif # evstdd RS,D(RA) # ISA-cmt: Vector Store Double of Double # evstdd rD,rA,EVUIMM_8 011 0010 0001 :evstdd RS,dUI16PlusRAOrZeroAddress is OP=4 & RS & dUI16PlusRAOrZeroAddress & XOP_0_10=801 { ea:$(REGISTER_SIZE) = dUI16PlusRAOrZeroAddress; *:8 ($(EATRUNC)) = RS; } # evstddx RS,RA,RB # ISA-cmt: Vector Store Double of Double Indexed # evstddx rS,rA,rB 011 0010 0000 :evstddx RS,RA_OR_ZERO,RB is OP=4 & RS & RA_OR_ZERO & RB & XOP_0_10=800 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + RB; *:8 ($(EATRUNC)) = RS; } # evstdh RS,D(RA) # ISA-cmt: Vector Store Double of Four Halfwords # evstdh rS,rA,EVUIMM_8 011 0010 0101 :evstdh S,EVUIMM_8_RAt is OP=4 & S & EVUIMM_8_RAt & XOP_0_10=0x325 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*8); # MEM(EA,2) = RS.S0; # MEM(EA+2,2) = RS.S1; # MEM(EA+4,2) = RS.S2; # MEM(EA+6,2) = RS.S3; EA:$(REGISTER_SIZE) = EVUIMM_8_RAt; *:2 (EA) = *:2 ((S) & $(MEMMASK)); *:2 (EA+2) = *:2 ((S+2) & $(MEMMASK)); *:2 (EA+4) = *:2 ((S+4) & $(MEMMASK)); *:2 (EA+6) = *:2 ((S+4) & $(MEMMASK)); } # evstdhx RS,RA,RB # ISA-cmt: Vector Store Double of Four Halfwords Indexed # evstdhx rS,rA,rB 011 0010 0100 :evstdhx S,A,B is OP=4 & A & B & S & XOP_0_10=0x324 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # MEM(EA,2) = RS.S0; # MEM(EA+2,2) = RS.S1; # MEM(EA+4,2) = RS.S2; # MEM(EA+6,2) = RS.S3; EA:$(REGISTER_SIZE) = A + B; *:2 (EA) = *:2 ((S) & $(MEMMASK)); *:2 (EA+2) = *:2 ((S+2) & $(MEMMASK)); *:2 (EA+4) = *:2 ((S+4) & $(MEMMASK)); *:2 (EA+6) = *:2 ((S+6) & $(MEMMASK)); } # evstdw RS,D(RA) # ISA-cmt: Vector Store Double of Two Words # evstdw rS,rA,EVUIMM_8 011 0010 0011 :evstdw S,EVUIMM_8_RAt is OP=4 & S & EVUIMM_8_RAt & XOP_0_10=0x323 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*8); # MEM(EA,4) = RS.l; # MEM(EA+4,4) = RS.h; EA:$(REGISTER_SIZE) = EVUIMM_8_RAt; *:4 (EA) = *:4 ((S) & $(MEMMASK)); *:4 (EA+4) = *:4 ((S+4) & $(MEMMASK)); } # evstdwx RS,RA,RB # ISA-cmt: Vector Store Double of Two Words Indexed # evstdwx rS,rA,rB 011 0010 0010 :evstdwx S,A,B is OP=4 & A & B & S & XOP_0_10=0x322 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # MEM(EA,4) = RS.l; # MEM(EA+4,4) = RS.h; EA:$(REGISTER_SIZE) = A + B; *:4 (EA) = *:4 ((S) & $(MEMMASK)); *:4 (EA+4) = *:4 ((S+4) & $(MEMMASK)); } # evstwhe RS,D(RA) # ISA-cmt: Vector Store Word of Two Halfwords from Even # evstwhe rS,rA,EVUIMM_4 011 0011 0001 :evstwhe S,EVUIMM_4_RAt is OP=4 & S & EVUIMM_4_RAt & XOP_0_10=0x331 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*4); # MEM(EA,2) = RS.S0; # MEM(EA+2,2) = RS.S2; EA:$(REGISTER_SIZE) = EVUIMM_4_RAt; *:2 (EA) = *:2 ((S) & $(MEMMASK)); *:2 (EA+2) = *:2 ((S+2) & $(MEMMASK)); } # evstwhex RS,RA,RB # ISA-cmt: Vector Store Word of Two Halfwords from Even Indexed # evstwhex rS,rA,rB 011 0011 0000 :evstwhex S,A,B is OP=4 & A & B & S & XOP_0_10=0x330 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # MEM(EA,2) = RS.S0; # MEM(EA+2,2) = RS.S2; EA:$(REGISTER_SIZE) = A + B; *:2 (EA) = *:2 ((S) & $(MEMMASK)); *:2 (EA+2) = *:2 ((S+2) & $(MEMMASK)); } # evstwho RS,D(RA) # ISA-cmt: Vector Store Word of Two Halfwords from Odd # evstwho rS,rA,EVUIMM_4 011 0011 0101 :evstwho S,EVUIMM_4_RAt is OP=4 & S & EVUIMM_4_RAt & XOP_0_10=0x335 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*4); # MEM(EA,2) = RS.S1; # MEM(EA+2,2) = RS.S3; EA:$(REGISTER_SIZE) = EVUIMM_4_RAt; *:2 (EA) = *:2 ((S+2) & $(MEMMASK)); *:2 (EA+2) = *:2 ((S+6) & $(MEMMASK)); } # evstwhox RS,RA,RB # ISA-cmt: Vector Store Word of Two Halfwords from Odd Indexed # evstwhox rS,rA,rB 011 0011 0100 :evstwhox S,RA_OR_ZERO,B is OP=4 & RA_OR_ZERO & B & S & XOP_0_10=0x334 { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # MEM(EA,2) = RS.S1; # MEM(EA+2,2) = RS.S3; EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *:2 (EA) = *:2 ((S+2) & $(MEMMASK)); *:2 (EA+2) = *:2 ((S+6) & $(MEMMASK)); } # evstwwe RS,D(RA) # ISA-cmt: Vector Store Word of Word from Even # evstwwe rS,rA,UIMM 011 0011 1001 :evstwwe S,EVUIMM_4_RAt is OP=4 & S & EVUIMM_4_RAt & UI & XOP_0_10=0x339 { ea:$(REGISTER_SIZE) = EVUIMM_4_RAt; *:4 ($(EATRUNC)) = S:4; } # evstwwex RS,RA,RB # ISA-cmt: Vector Store Word of Word from Even Indexed # evstwwex rS,rA,rB 011 0011 1000 :evstwwex S,RA_OR_ZERO,RB is OP=4 & S & RA_OR_ZERO & RB & XOP_0_10=0x338 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + RB; *:4 ($(EATRUNC)) = S:4; } # evstwwo RS,D(RA) # ISA-cmt: Vector Store Word of Word from Odd # evstwwo rS,rA,EVUIMM_4 011 0011 1101 :evstwwo S,EVUIMM_4_RAt is OP=4 & S & EVUIMM_4_RAt & UI & XOP_0_10=0x33D { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + EXTZ(UI*4); # MEM(EA,4) = RS.h; EA:$(REGISTER_SIZE) = EVUIMM_4_RAt; *:4 (EA) = *:4 ((S+4) & $(MEMMASK)); } # evstwwox RS,RA,RB # ISA-cmt: Vector Store Word of Word from Odd Indexed # evstwwox rS,rA,rB 011 0011 1100 :evstwwox S,A,B is OP=4 & A & B & S & XOP_0_10=0x33C { # if (RA == 0) { # b = 0; # } else { # b = RA; # } # EA = b + RB; # MEM(EA,4) = RS.h; EA:$(REGISTER_SIZE) = A + B; *:4 (EA) = *:4 ((S+4) & $(MEMMASK)); } # evsubfsmiaaw RT,RA # ISA-cmt: Vector Subtract Signed, Modulo, Integer to Accumulator Word # evsubfsmiaaw rD,rA 100 0011 1011 :evsubfsmiaaw D,A is OP=4 & A & D & XOP_0_10=0x4CB & BITS_11_15=0 { # RT.l = ACC.l - RA.l; # RT.h = ACC.h - RA.h; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x00000000FFFFFFFF) ) ) - (( A & (0x00000000FFFFFFFF) ) ); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - (( A & (0xFFFFFFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # ================================================================= # Page D-16 # evsubfssiaaw RT,RA # ISA-cmt: Vector Subtract Signed, Saturate, Integer to Accumulator Word # evsubfssiaaw rD,rA 100 1100 0011 define pcodeop VectorSubtractSignedSaturateIntegerToAccumulatorWord1; define pcodeop VectorSubtractSignedSaturateIntegerToAccumulatorWord2; :evsubfssiaaw D,A is OP=4 & A & D & XOP_0_10=0x4C3 & BITS_11_15=0 { # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; # TODO definition complicated D = VectorSubtractSignedSaturateIntegerToAccumulatorWord1(A,ACC); flags:8 = VectorSubtractSignedSaturateIntegerToAccumulatorWord2(A,ACC,spr200); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evsubfumiaaw RT,RA # ISA-cmt: Vector Subtract Unsigned, Modulo, Integer to Accumulator Word # evsubfumiaaw rD,rA 100 1100 1010 :evsubfumiaaw D,A is OP=4 & A & D & XOP_0_10=0x4CA & BITS_11_15=0 { # RT.l = ACC.l - RA.l; # RT.h = ACC.h - RA.h; # ACC = RT; lo:$(REGISTER_SIZE) = (( ACC & (0x0000000000000000) ) >> 32) - (( A & (0x0000000000000000) ) >> 32); hi:$(REGISTER_SIZE) = (( ACC & (0xFFFFFFFF00000000) ) >> 32) - (( A & (0xFFFFFFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evsubfusiaaw RT,RA # ISA-cmt: Vector Subtract Unsigned, Saturate, Integer to Accumulator Word # evsubfusiaaw rD,rA 100 1100 0010 define pcodeop VectorSubtractUnsignedSaturateIntegerToAccumulatorWord1; define pcodeop VectorSubtractUnsignedSaturateIntegerToAccumulatorWord2; # SPEFSCR.OVH = ovh; # SPEFSCR.OV = ovl; # SPEFSCR.SOVH = SPEFSCR.SOVH | ovh; # SPEFSCR.SOV = SPEFSCR.SOV | ovl; :evsubfusiaaw D,A is OP=4 & A & D & XOP_0_10=0x4C2 & BITS_11_15=0 { # TODO definition complicated VectorSubtractUnsignedSaturateIntegerToAccumulatorWord1(D,A,ACC,spr200); flags:8 = VectorSubtractUnsignedSaturateIntegerToAccumulatorWord2(D,A,ACC,spr200); spr200 = (spr200 & (~ (0x200000000)) ) | (flags & (0x200000000)); spr200 = (spr200 & (~ (0x2000000000000)) ) | (flags & (0x2000000000000)); spr200 = spr200 | (flags & (0x100000000)); spr200 = spr200 | (flags & (0x1000000000000)); } # evsubfw RT,RA,RB # ISA-cmt: Vector Subtract from Word # evsubfw rD,rA,rB 010 0000 0100 :evsubfw D,A,B is OP=4 & A & B & D & XOP_0_10=0x204 { # RT.l = RB.l - RA.l; # RT.h = RB.h - RA.h; lo:$(REGISTER_SIZE) = (( B & (0x00000000FFFFFFFF) ) ) - (( A & (0x00000000FFFFFFFF) ) ); hi:$(REGISTER_SIZE) = (( B & (0xFFFFFFFF00000000) ) >> 32) - (( A & (0xFFFFFFFF00000000) ) >> 32); D = (( zext(hi) << 32) | zext(lo) ); ACC = D; } # evsubifw RT,UI,RB # ISA-cmt: Vector Subtract Immediate from Word # evsubifw rD,UIMM,rB 010 0000 0110 :evsubifw D,BITS_16_20,B is OP=4 & D & BITS_16_20 & B & XOP_0_10=0x206 { # RT.l = RB.l - EXTZ(UI); # RT.h = RB.h - EXTZ(UI); tmp:8 = BITS_16_20*1; lo:$(REGISTER_SIZE) = (( B & (0x0000000000000000) ) >> 32) - tmp; hi:$(REGISTER_SIZE) = (( B & (0xFFFFFFFF00000000) ) >> 32) - tmp; D = (( zext(hi) << 32) | zext(lo) ); } # evsubiw => evsubifw # evsubw => evsubfw # evxor RT,RA,RB # ISA-cmt: Vector XOR # evxor rD,rA,rB 010 0001 0110 :evxor vrD_64_0,vrA_64_0,vrB_64_0 is OP=4 & vrD_64_0 & vrA_64_0 & vrB_64_0 & XOP_0_10=534 { vrD_64_0 = vrA_64_0 ^ vrB_64_0; } # TODO evmwlssianw RT,RA,RB # TODO complicated ================================================ FILE: pypcode/processors/PowerPC/data/languages/SPE_EFSD.sinc ================================================ # Based on "PowerISA Version 2.06 Revision B" document dated July 23, 2010 # Category: SPE.Embedded Float Scalar Double # version 1.0 # ================================================================= # Page 576 # efdabs rT,rA # ISA-cmt: efdabs - Floating-Point Double-Precision Absolute Value # ISA-info: efdabs - Form "EVX" Page 576 Category "SP.FD" # binutils: e500.d: 34: 10 a4 02 e4 efdabs r5,r4 :efdabs D,A is OP=4 & D & A & BITS_11_15=0 & XOP_0_10=740 { D = abs( A ); } # ================================================================= # Page 577 # efdadd rT,rA,rB # ISA-cmt: efdadd - Floating-Point Double-Precision Add # ISA-info: efdadd - Form "EVX" Page 577 Category "SP.FD" # binutils: e500.d: 40: 10 a4 1a e0 efdadd r5,r4,r3 :efdadd D,A,B is OP=4 & D & A & B & XOP_0_10=736 { D = A f+ B; setSPEFSCRAddFlags_L( A, B, D ); } # ================================================================= # Page 582 # efdcfs rT,rB # ISA-cmt: efdcfs - Floating-Point Double-Precision Convert from Single-Precision # ISA-info: efdcfs - Form "EVX" Page 582 Category "SP.FD" # binutils: e500.d: a4: 10 a0 22 ef efdcfs r5,r4 :efdcfs D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=751 { D = float2float( B:4 ); setSPEFSCR_L( D ); setSummarySPEFSCR(); } # ================================================================= # Page 580 # efdcfsf rT,rB # ISA-cmt: efdcfsf - Convert Floating-Point Double-Precision from Signed Fraction # ISA-info: efdcfsf - Form "EVX" Page 580 Category "SP.FD" # binutils: e500.d: 7c: 10 a0 22 f3 efdcfsf r5,r4 :efdcfsf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=755 { # load fractional divisor as a float tmpA:4 = 0x80000000; tmpA = int2float( tmpA ); setSPEFSCR_L( tmpA ); # check if negative if ( ( B:4 & 0x80000000 ) != 0 ) goto ; # float the fractional portion of register B tmpB:4 = int2float( B:4 ); setSPEFSCR_L( tmpB ); tmpB = tmpB f/ tmpA; setSPEFSCRDivFlags_L( tmpB, tmpA, tmpB ); goto ; # float the fractional portion of register B, 2's complement negate tmpB = int2float( -( B:4 ) ); setSPEFSCR_L( tmpB ); tmpB = tmpB f/ tmpA; setSPEFSCRDivFlags_L( tmpB, tmpA, tmpB ); # negate the float tmpB = f-( tmpB ); setSPEFSCR_L( tmpB ); tmpC:8 = float2float( tmpB ); setSPEFSCR_L( tmpC ); setSummarySPEFSCR(); D = tmpC; } # ================================================================= # Page 579 # efdcfsi rT,rB # ISA-cmt: efdcfsi - Convert Floating-Point Double-Precision from Signed Integer # ISA-info: efdcfsi - Form "EVX" Page 579 Category "SP.FD" # binutils: e500.d: 6c: 10 a0 22 f1 efdcfsi r5,r4 :efdcfsi D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=753 { # check if negative if ( ( B:4 & 0x80000000 ) != 0 ) goto ; # float the integer portion of register B tmpB:8 = int2float( B:4 ); setSPEFSCR_L( tmpB ); goto ; # float the integer portion of register B, 2's complement negate tmpB = int2float( -( B:4 ) ); setSPEFSCR_L( tmpB ); # negate the float tmpB = f-( tmpB ); setSPEFSCR_L( tmpB ); setSummarySPEFSCR(); D = tmpB; } # ================================================================= # Page 580 # efdcfsid rT,rB # ISA-cmt: efdcfsid - Convert Floating-Point Double-Precision from Signed Integer Doubleword # ISA-info: efdcfsid - Form "EVX" Page 580 Category "SP.FD" # binutils: e500.d: 70: 10 a0 22 e3 efdcfsid r5,r4 :efdcfsid D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=739 { # check if negative if ( ( B & 0x8000000000000000 ) != 0 ) goto ; # float the integer portion of register B tmpB:8 = int2float( B ); setSPEFSCR_L( tmpB ); goto ; # float the integer portion of register B, 2's complement negate tmpB = int2float( -( B ) ); setSPEFSCR_L( tmpB ); # negate the float tmpB = f-( tmpB ); setSPEFSCR_L( tmpB ); setSummarySPEFSCR(); D = tmpB; } # ================================================================= # Page 580 # efdcfuf rT,rB # ISA-cmt: efdcfuf - Convert Floating-Point Double-Precision from Unsigned Fraction # ISA-info: efdcfuf - Form "EVX" Page 580 Category "SP.FD" # binutils: e500.d: 80: 10 a0 22 f2 efdcfuf r5,r4 :efdcfuf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=754 { # load fractional divisor as a float tmpA:8 = 0x0000000100000000; tmpA = int2float( tmpA ); setSPEFSCR_L( tmpA ); # float the fractional portion of register B tmpB:8 = int2float( B:4 ); setSPEFSCR_L( tmpB ); tmpB = tmpB f/ tmpA; setSPEFSCRDivFlags_L( tmpB, tmpA, tmpB ); D = tmpB; } # ================================================================= # Page 579 #efdcfui rT,rB # ISA-cmt: efdcfui - Convert Floating-Point Double-Precision from Unsigned Integer # ISA-info: efdcfui - Form "EVX" Page 579 Category "SP.FD" # binutils: e500.d: 74: 10 a0 22 f0 efdcfui r5,r4 :efdcfui D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=752 { tmp:8 = int2float( B:4 ); setSPEFSCR_L( tmp ); setSummarySPEFSCR(); D = tmp; } # ================================================================= # Page 580 #efdcfuid rT,rB # ISA-cmt: efdcfuid - Convert Floating-Point Double-Precision from Unsigned Integer Doubleword # ISA-info: efdcfuid - Form "EVX" Page 580 Category "SP.FD" # binutils: e500.d: 78: 10 a0 22 e2 efdcfuid r5,r4 :efdcfuid D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=738 { tmp:8 = int2float( B ); setSPEFSCR_L( tmp ); setSummarySPEFSCR(); D = tmp; } # ================================================================= # Page 578 # efdcmpeq CRFD,rA,rB # ISA-cmt: efdcmpeq - Floating-Point Double-Precision Compare Equal # ISA-info: efdcmpeq - Form "EVX" Page 578 Category "SP.FD" # binutils: e500.d: 58: 12 84 1a ee efdcmpeq cr5,r4,r3 :efdcmpeq CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=750 { CRFD = A f== B; } # ================================================================= # Page 578 # efdcmpgt CRFD,rA,rB # ISA-cmt: efdcmpgt - Floating-Point Double-Precision Compare Greater Than # ISA-info: efdcmpgt - Form "EVX" Page 578 Category "SP.FD" # binutils: e500.d: 50: 12 84 1a ec efdcmpgt cr5,r4,r3 :efdcmpgt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=748 { CRFD = A f> B; } # ================================================================= # Page 578 # efdcmplt CRFD,rA,rB # ISA-cmt: efdcmplt - Floating-Point Double-Precision Compare Less Than # ISA-info: efdcmplt - Form "EVX" Page 578 Category "SP.FD" # binutils: e500.d: 54: 12 84 1a ed efdcmplt cr5,r4,r3 :efdcmplt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=749 { CRFD = A f< B; } # ================================================================= # Page 578 # efdctsf rT,rB # ISA-cmt: efdctsf - Convert Floating-Point Double-Precision to Signed Fraction # ISA-info: efdctsf - Form "EVX" Page 582 Category "SP.FD" # binutils: e500.d: 9c: 10 a0 22 f7 efdctsf r5,r4 :efdctsf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=759 { # multiply by 0x8000 0000 0000 0000 to scale the fraction up to integer range # load fractional multiplier as a float tmpM:8 = 0x8000000000000000; tmpM = int2float( tmpM ); setSPEFSCR_L( tmpM ); # load saturation limit as a float tmpL:8 = 0x8000000000000000 - 1; tmpL = int2float( tmpL ); setSPEFSCR_L( tmpL ); # scale the saturation limit to a fractional float tmpL = tmpL f/ tmpM; setSPEFSCRDivFlags_L( tmpL, tmpM, tmpL ); tmpB:8 = B; # check if less than or equal to positive saturation limit if ( tmpB f<= tmpL ) goto ; # set to positive saturation tmpB = tmpL; spef_fx = 1; spef_finxs = 1; spef_fg = 1; goto ; # check if greater than or equal to negative saturation limit tmpL = f-( tmpL ); if ( tmpB f>= tmpL ) goto ; # set to negative saturation tmpB = tmpL; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # scale the fractional portion up to integer side of mantissa tmpB = tmpB f* tmpM; setSPEFSCRMulFlags_L( tmpB, tmpM, tmpB ); # truncate back to signed fraction format tmpC:8 = trunc( tmpB ); setSPEFSCR_L( tmpB ); setSummarySPEFSCR(); D = tmpC; } # ================================================================= # Page 580 # efdctsi rT,rB # ISA-cmt: efdctsi - Convert Floating-Point Double-Precision to Signed Integer # ISA-info: efdctsi - Form "EVX" Page 580 Category "SP.FD" # binutils: e500.d: 84: 10 a0 22 f5 efdctsi r5,r4 :efdctsi D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=757 { # create zero float constant tmpA:8 = 0; tmpA = int2float( tmpA ); # check if negative if ( B f< tmpA ) goto ; tmpB:8 = round( B ); setSPEFSCR_L( tmpB ); # limit to positive saturation if ( tmpB <= 0x000000007FFFFFFF ) goto ; tmpB = 0x000000007FFFFFFF; spef_fx = 1; spef_finxs = 1; spef_fg = 1; goto ; # negate the float tmpB = round( f-( B ) ); setSPEFSCR_L( tmpB ); # limit to negative saturation if ( tmpB <= 0x0000000080000000 ) goto ; tmpB = 0x0000000080000000; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # negate the signed int tmpB = -( tmpB ); setSummarySPEFSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); } # ================================================================= # Page 581 # efdctsidz rT,rB # ISA-cmt: efdctsidz - Convert Floating-Point Double-Precision to Signed Integer Doubleword with Round toward Zero # ISA-info: efdctsidz - Form "EVX" Page 581 Category "SP.FD" # binutils: e500.d: 88: 10 a0 22 eb efdctsidz r5,r4 # Note: This may not work correctly as the number approaches saturation; too little (16 digits) precision in mantissa :efdctsidz D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=747 { # create zero float constant tmpA:8 = 0; tmpA = int2float( tmpA ); tmpB:8 = B; # check if negative if ( tmpB f< tmpA ) goto ; # load saturation limit as a float tmpL:8 = 0x8000000000000000 - 1; tmpL = int2float( tmpL ); setSPEFSCR_L( tmpL ); # limit to saturation if ( tmpB <= tmpL ) goto ; tmpB = tmpL; spef_fx = 1; spef_finxs = 1; spef_fg = 1; tmpB = trunc( tmpB ); setSPEFSCR_L( tmpB ); goto ; # load saturation limit as a float tmpL = 0x8000000000000000; tmpL = int2float( tmpL ); setSPEFSCR_L( tmpL ); # negate float (make positive) tmpB = f-( tmpB ); # limit to saturation if ( tmpB <= tmpL ) goto ; tmpB = tmpL; spef_fx = 1; spef_finxs = 1; spef_fg = 1; tmpB = trunc( tmpB ); setSPEFSCR_L( tmpB ); # negate the signed int tmpB = -( tmpB ); setSummarySPEFSCR(); D = tmpB; } # ================================================================= # Page 582 # efdctsiz rT,rB # ISA-cmt: efdctsiz - Convert Floating-Point Double-Precision to Signed Integer with Round toward Zero # ISA-info: efdctsiz - Form "EVX" Page 582 Category "SP.FD" # binutils: e500.d: 8c: 10 a0 22 fa efdctsiz r5,r4 :efdctsiz D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=762 { # create zero float constant tmpA:8 = 0; tmpA = int2float( tmpA ); # check if negative if ( B f< tmpA ) goto ; tmpB:8 = trunc( B ); setSPEFSCR_L( tmpB ); # limit to positive saturation if ( tmpB <= 0x000000007FFFFFFF ) goto ; tmpB = 0x000000007FFFFFFF; spef_fx = 1; spef_finxs = 1; spef_fg = 1; goto ; # negate the float tmpB = trunc( f-( B ) ); setSPEFSCR_L( tmpB ); # limit to negative saturation if ( tmpB <= 0x0000000080000000 ) goto ; tmpB = 0x0000000080000000; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # negate the signed int tmpB = -( tmpB ); setSummarySPEFSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); } # ================================================================= # Page 582 # efdctuf rT,rB # ISA-cmt: efdctuf - Convert Floating-Point Double-Precision to Unsigned Fraction # ISA-info: efdctuf - Form "EVX" Page 582 Category "SP.FD" # binutils: e500.d: a0: 10 a0 22 f6 efdctuf r5,r4 :efdctuf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=758 { # multiply by 0x0000 0001 0000 0000 to scale the fraction up to integer range # load fractional multiplier as a float tmpM:8 = 0x0000000100000000; tmpM = int2float( tmpM ); setSPEFSCR_L( tmpM ); # load saturation limit as a float tmpL:8 = 0x0000000100000000 - 1; tmpL = int2float( tmpL ); setSPEFSCR_L( tmpL ); # scale the saturation limit to a fractional float tmpL = tmpL f/ tmpM; setSPEFSCRDivFlags_L( tmpL, tmpM, tmpL ); # get B float up to 64 bit width tmpB:8 = B; setSPEFSCR_L( tmpB ); # check if less than or equal to positive saturation limit if ( tmpB f<= tmpL ) goto ; # set to saturation tmpB = tmpL; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # scale the fractional portion up to integer side of mantissa tmpB = tmpB f* tmpM; setSPEFSCRMulFlags_L( tmpB, tmpM, tmpB ); # truncate back to integer tmpC:4 = trunc( tmpB ); setSPEFSCR_L( tmpC ); setSummarySPEFSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpC ); } # ================================================================= # Page 580 # efdctui rT,rB # ISA-cmt: efdctui - Convert Floating-Point Double-Precision to Unsigned Integer # ISA-info: efdctui - Form "EVX" Page 580 Category "SP.FD" # binutils: e500.d: 90: 10 a0 22 f4 efdctui r5,r4 :efdctui D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=756 { tmpB:8 = B; # load saturation limit as a float tmpL:8 = 0x00000000FFFFFFFF; tmpL = int2float( tmpL ); setSPEFSCR_L( tmpL ); # limit to saturation if ( tmpB f<= tmpL ) goto ; tmpB = tmpL; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # round back to integer tmpC:4 = trunc(round( tmpB )); setSPEFSCR_L( tmpB ); setSummarySPEFSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpC ); } # ================================================================= # Page 581 # efdctuidz rT,rB # ISA-cmt: efdctuidz - Convert Floating-Point Double-Precision to Unsigned Integer Doubleword with Round toward Zero # ISA-info: efdctuidz - Form "EVX" Page 581 Category "SP.FD" # binutils: e500.d: 94: 10 a0 22 ea efdctuidz r5,r4 :efdctuidz D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=746 { tmpB:8 = B; # load saturation limit as a float tmpL:8 = 0xFFFFFFFFFFFFFFFF; tmpL = int2float( tmpL ); setSPEFSCR_L( tmpL ); # limit to saturation if ( tmpB f<= tmpL ) goto ; tmpB = tmpL; spef_fx = 1; spef_finxs = 1; spef_fg = 1; tmpB = trunc( tmpB ); setSummarySPEFSCR(); D = tmpB; } # ================================================================= # Page 582 # efdctuiz rT,rB # ISA-cmt: efdctuiz - Convert Floating-Point Double-Precision to Unsigned Integer with Round toward Zero # ISA-info: efdctuiz - Form "EVX" Page 582 Category "SP.FD" # binutils: e500.d: 98: 10 a0 22 f8 efdctuiz r5,r4 :efdctuiz D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=760 { tmpB:8 = B; # load saturation limit as a float tmpL:8 = 0x00000000FFFFFFFF; tmpL = int2float( tmpL ); setSPEFSCR_L( tmpL ); # limit to saturation if ( tmpB f<= tmpL ) goto ; tmpB = tmpL; spef_fx = 1; spef_finxs = 1; spef_fg = 1; tmpB = trunc( tmpB ); setSummarySPEFSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); } # ================================================================= # Page 577 # efddiv rT,rA,rB # ISA-cmt: efddiv - Floating-Point Double-Precision Divide # ISA-info: efddiv - Form "EVX" Page 577 Category "SP.FD" # binutils: e500.d: 4c: 10 a4 1a e9 efddiv r5,r4,r3 :efddiv D,A,B is OP=4 & D & A & B & XOP_0_10=745 { D = A f/ B; setSPEFSCRDivFlags_L( A, B, D ); } # ================================================================= # Page 577 # efdmul rT,rA,rB # ISA-cmt: efdmul - Floating-Point Double-Precision Multiply # ISA-info: efdmul - Form "EVX" Page 577 Category "SP.FD" # binutils: e500.d: 48: 10 a4 1a e8 efdmul r5,r4,r3 :efdmul D,A,B is OP=4 & D & A & B & XOP_0_10=744 { D = A f* B; setSPEFSCRMulFlags_L( A, B, D ); } # ================================================================= # Page 576 # efdnabs rT,rA # ISA-cmt: efdnabs - Floating-Point Double-Precision Negative Absolute Value # ISA-info: efdnabs - Form "EVX" Page 576 Category "SP.FD" # binutils: e500.d: 38: 10 a4 02 e5 efdnabs r5,r4 :efdnabs D,A is OP=4 & D & A & BITS_11_15=0 & XOP_0_10=741 { D = f- ( abs( A ) ); } # ================================================================= # Page 577 # efdneg rT,rA # ISA-cmt: efdneg - Floating-Point Double-Precision Negate # ISA-info: efdneg - Form "EVX" Page 576 Category "SP.FD" # binutils: e500.d: 3c: 10 a4 02 e6 efdneg r5,r4 :efdneg D,A is OP=4 & D & A & BITS_11_15=0 & XOP_0_10=742 { D = f-( A ); } # ================================================================= # Page 577 # efdsub rT,rA,rB # ISA-cmt: efdsub - Floating-Point Double-Precision Subtract # ISA-info: efdsub - Form "EVX" Page 577 Category "SP.FD" # binutils: e500.d: 44: 10 a4 1a e1 efdsub r5,r4,r3 :efdsub D,A,B is OP=4 & D & A & B & XOP_0_10=737 { D = A f- B; setSPEFSCRSubFlags_L( A, B, D ); } # ================================================================= # Page 579 # efdtsteq CRFD,rA,rB # ISA-cmt: efdtsteq - Floating-Point Double-Precision Test Equal # ISA-info: efdtsteq - Form "EVX" Page 579 Category "SP.FD" # binutils: e500.d: 68: 12 84 1a fe efdtsteq cr5,r4,r3 :efdtsteq CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=766 { CRFD = A f== B; } # ================================================================= # Page 578 # efdtstgt CRFD,rA,rB # ISA-cmt: efdtstgt - Floating-Point Double-Precision Test Greater Than # ISA-info: efdtstgt - Form "EVX" Page 578 Category "SP.FD" # binutils: e500.d: 5c: 12 84 1a fc efdtstgt cr5,r4,r3 # binutils: e500.d: 60: 12 84 1a fc efdtstgt cr5,r4,r3 :efdtstgt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=764 { CRFD = A f> B; } # ================================================================= # Page 579 # efdtstlt CRFD,rA,rB # ISA-cmt: efdtstlt - Floating-Point Double-Precision Test Less Than # ISA-info: efdtstlt - Form "EVX" Page 579 Category "SP.FD" # binutils: e500.d: 64: 12 84 1a fd efdtstlt cr5,r4,r3 :efdtstlt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=765 { CRFD = A f< B; } # ================================================================= # Page 583 # efscfd rT,rB # ISA-cmt: efscfd - Floating-Point Single-Precision Convert from Double-Precision # ISA-info: efscfd - Form "EVX" Page 583 Category "SP.FD" # binutils: e500.d: 30: 10 a0 22 cf efscfd r5,r4 :efscfd D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=719 { tmpB:4 = float2float( B ); setSPEFSCR_L( tmpB ); setSummarySPEFSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB ); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/SPE_EFV.sinc ================================================ # Based on "PowerISA Version 2.06 Revision B" document dated July 23, 2010 # Category: SPE.Embedded Float Vector Instructions # ================================================================= # Page 561 # evfsabs rT,rA # ISA-cmt: evfsabs - Vector Floating-Point Single-Precision Absolute Value # ISA-info: evfsabs - Form "EVX" Page 561 Category "SP.FV" # binutils: mytest.d: 1e0: 10 22 02 84 evfsabs r1,r2 :evfsabs D,A is OP=4 & D & A & BITS_11_15=0 & XOP_0_10=644 { # # low section # tmpA:4 = abs( A:4 ); # # high section # tmpB:4 = abs( A(4) ); # move results into upper and lower words tmpC:8 = zext( tmpB ); tmpC = ( tmpC << 32 ) | zext( tmpA ); D = tmpC; } # ================================================================= # Page 562 # evfsadd rT,rA,rB # ISA-cmt: evfsadd - Vector Floating-Point Single-Precision Add # ISA-info: evfsadd - Form "EVX" Page 562 Category "SP.FV" # binutils: mytest.d: 1d8: 10 22 1a 80 evfsadd r1,r2,r3 :evfsadd D,A,B is OP=4 & D & A & B & XOP_0_10=640 { # # low section # tmpA:4 = A:4 f+ B:4; setSPEFSCRAddFlags_L( A:4, B:4, tmpA ); # # high section # tmpB:4 = A(4) f+ B(4); # SLEIGH had a problem with using A(4) and B(4) directly here tmpD:4 = A(4); tmpE:4 = B(4); setSPEFSCRAddFlags_H( tmpD, tmpE, tmpB ); # move results into upper and lower words tmpC:8 = zext( tmpB ); tmpC = ( tmpC << 32 ) | zext( tmpA ); D = tmpC; } # ================================================================= # Page 566 # evfscfsf rT,rB # ISA-cmt: evfscfsf - Vector Convert Floating-Point Single-Precision from Signed Fraction # ISA-info: evfscfsf - Form "EVX" Page 566 Category "SP.FV" # binutils: mytest.d: 20c: 10 20 12 93 evfscfsf r1,r2 :evfscfsf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=659 { # load fractional divisor as a float tmpA:4 = 0x80000000; tmpA = int2float( tmpA ); setSPEFSCR_L( tmpA ); # # low section # tmpE:4 = B:4; # check if negative if ( ( tmpE & 0x80000000 ) != 0 ) goto ; # float the fractional portion of register B tmpB:4 = int2float( tmpE ); setSPEFSCR_L( tmpB ); tmpC:4 = tmpB f/ tmpA; setSPEFSCRDivFlags_L( tmpB, tmpA, tmpC ); goto ; # float the fractional portion of register B, 2's complement negate tmpB = int2float( -( tmpE ) ); setSPEFSCR_L( tmpB ); tmpC = tmpB f/ tmpA; setSPEFSCRDivFlags_L( tmpB, tmpA, tmpC ); # negate the float tmpC = f-( tmpC ); setSPEFSCR_L( tmpC ); setSummarySPEFSCR(); # # high section # tmpE = B(4); # check if negative if ( ( tmpE & 0x80000000 ) != 0 ) goto ; # float the fractional portion of register B tmpB = int2float( tmpE ); setSPEFSCR_H( tmpB ); tmpD:4 = tmpB f/ tmpA; setSPEFSCRDivFlags_H( tmpB, tmpA, tmpD ); goto ; # float the fractional portion of register B, 2's complement negate tmpB = int2float( -( tmpE ) ); setSPEFSCR_H( tmpB ); tmpD = tmpB f/ tmpA; setSPEFSCRDivFlags_H( tmpB, tmpA, tmpD ); # negate the float tmpD = f-( tmpD ); setSPEFSCR_H( tmpD ); setSummarySPEFSCR(); # move results into upper and lower words tmpZ:8 = zext( tmpD ); tmpZ = ( tmpZ << 32 ) | zext( tmpC ); D = tmpZ; } # ================================================================= # Page 566 # evfscfsi rT,rB # ISA-cmt: evfscfsi - Vector Convert Floating-Point Single-Precision from Signed Integer # ISA-info: evfscfsi - Form "EVX" Page 566 Category "SP.FV" # binutils: mytest.d: 204: 10 20 12 91 evfscfsi r1,r2 :evfscfsi D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=657 { # # low section # tmpE:4 = B:4; # check if negative if ( ( tmpE & 0x80000000 ) != 0 ) goto ; # float the integer portion of register B tmpB:4 = int2float( tmpE ); setSPEFSCR_L( tmpB ); goto ; # float the integer portion of register B, 2's complement negate tmpB = int2float( -( tmpE ) ); setSPEFSCR_L( tmpB ); # negate the float tmpB = f-( tmpB ); setSPEFSCR_L( tmpB ); setSummarySPEFSCR(); # # high section # tmpE = B(4); # check if negative if ( ( tmpE & 0x80000000 ) != 0 ) goto ; # float the integer portion of register B tmpC:4 = int2float( tmpE ); setSPEFSCR_H( tmpC ); goto ; # float the integer portion of register B, 2's complement negate tmpC = int2float( -( tmpE ) ); setSPEFSCR_H( tmpC ); # negate the float tmpC = f-( tmpC ); setSPEFSCR_H( tmpC ); setSummarySPEFSCR(); # move results into upper and lower words tmpZ:8 = zext( tmpC ); tmpZ = ( tmpZ << 32 ) | zext( tmpB ); D = tmpZ; } # ================================================================= # Page 566 # evfscfuf rT,rB # ISA-cmt: evfscfuf - Vector Convert Floating-Point Single-Precision from Unsigned Fraction # ISA-info: evfscfuf - Form "EVX" Page 566 Category "SP.FV" # binutils: mytest.d: 208: 10 20 12 92 evfscfuf r1,r2 :evfscfuf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=658 { # load fractional divisor as a float tmpA:8 = 0x0000000100000000; tmpF:4 = int2float( tmpA ); setSPEFSCR_L( tmpF ); # # low section # tmpE:4 = B:4; # float the fractional portion of register B tmpB:4 = int2float( tmpE ); setSPEFSCR_L( tmpB ); tmpC:4 = tmpB f/ tmpF; setSPEFSCRDivFlags_L( tmpB, tmpF, tmpC ); # # high section # tmpE = B(4); # float the fractional portion of register B tmpB = int2float( tmpE ); setSPEFSCR_H( tmpB ); tmpD:4 = tmpB f/ tmpF; setSPEFSCRDivFlags_H( tmpB, tmpF, tmpD ); # move results into upper and lower words tmpZ:8 = zext( tmpD ); tmpZ = ( tmpZ << 32 ) | zext( tmpC ); D = tmpZ; } # ================================================================= # Page 566 #evfscfui rT,rB # ISA-cmt: evfscfui - Vector Convert Floating-Point Single-Precision from Unsigned Integer # ISA-info: evfscfui - Form "EVX" Page 566 Category "SP.FV" # binutils: mytest.d: 200: 10 20 12 90 evfscfui r1,r2 :evfscfui D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=656 { # # low section # tmpE:4 = B:4; tmpC:4 = int2float( tmpE ); setSPEFSCR_L( tmpC ); # # high section # tmpE = B(4); tmpD:4 = int2float( tmpE ); setSPEFSCR_H( tmpD ); setSummarySPEFSCR(); # move results into upper and lower words tmpZ:8 = zext( tmpD ); tmpZ = ( tmpZ << 32 ) | zext( tmpC ); D = tmpZ; } # ================================================================= # Page 564 # evfscmpeq CRFD,rA,rB # ISA-cmt: evfscmpeq - Vector Floating-Point Single-Precision Compare Equal # ISA-info: evfscmpeq - Form "EVX" Page 564 Category "SP.FV" # binutils: mytest.d: 1fc: 10 82 1a 8e evfscmpeq cr1,r2,r3 :evfscmpeq CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=654 { tmpA:4 = A:4; tmpB:4 = B:4; tmpC:4 = A(4); tmpD:4 = B(4); tmpL:1 = tmpA f== tmpB; tmpH:1 = tmpC f== tmpD; CRFD = (8 * tmpH ) + (4 * tmpL ) + (2 * (tmpH | tmpL) ) + (tmpH & tmpL); } # ================================================================= # Page 563 # evfscmpgt CRFD,rA,rB # ISA-cmt: evfscmpgt - Vector Floating-Point Single-Precision Compare Greater Than # ISA-info: evfscmpgt - Form "EVX" Page 563 Category "SP.FV" # binutils: mytest.d: 1f4: 10 82 1a 8c evfscmpgt cr1,r2,r3 :evfscmpgt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=652 { tmpA:4 = A:4; tmpB:4 = B:4; tmpC:4 = A(4); tmpD:4 = B(4); tmpL:1 = tmpA f> tmpB; tmpH:1 = tmpC f> tmpD; CRFD = (8 * tmpH ) + (4 * tmpL ) + (2 * (tmpH | tmpL) ) + (tmpH & tmpL); } # ================================================================= # Page 563 # evfscmplt CRFD,rA,rB # ISA-cmt: evfscmplt - Vector Floating-Point Single-Precision Compare Less Than # ISA-info: evfscmplt - Form "EVX" Page 563 Category "SP.FV" # binutils: mytest.d: 1f8: 10 82 1a 8d evfscmplt cr1,r2,r3 :evfscmplt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=653 { tmpA:4 = A:4; tmpB:4 = B:4; tmpC:4 = A(4); tmpD:4 = B(4); tmpL:1 = tmpA f< tmpB; tmpH:1 = tmpC f< tmpD; CRFD = (8 * tmpH ) + (4 * tmpL ) + (2 * (tmpH | tmpL) ) + (tmpH & tmpL); } # ================================================================= # Page 568 # evfsctsf rT,rB # ISA-cmt: evfsctsf - Vector Convert Floating-Point Single-Precision to Signed Fraction # ISA-info: evfsctsf - Form "EVX" Page 568 Category "SP.FV" # binutils: mytest.d: 21c: 10 20 12 97 evfsctsf r1,r2 :evfsctsf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=663 { # multiply by 0x8000 0000 to scale the fraction up to integer range # load fractional multiplier as a float tmpM:4 = 0x80000000; tmpM = int2float( tmpM ); setSPEFSCR_L( tmpM ); # load saturation limit as a float tmpS:4 = 0x80000000 - 1; tmpS = int2float( tmpS ); setSPEFSCR_L( tmpS ); # scale the saturation limit to a fractional float tmpS = tmpS f/ tmpM; setSPEFSCRDivFlags_L( tmpS, tmpM, tmpS ); # form negative saturation limit tmpN:4 = f-( tmpS ); # # low section # tmpB:4 = B:4; # check if less than or equal to positive saturation limit if ( tmpB f<= tmpS ) goto ; # set to positive saturation tmpB = tmpS; spef_fx = 1; spef_finxs = 1; spef_fg = 1; goto ; # check if greater than or equal to negative saturation limit if ( tmpB f>= tmpN ) goto ; # set to negative saturation tmpB = tmpN; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # scale the fractional portion up to integer side of mantissa tmpB = tmpB f* tmpM; setSPEFSCRMulFlags_L( tmpB, tmpM, tmpB ); # truncate back to signed fraction format tmpL:4 = trunc( tmpB ); setSPEFSCR_L( tmpL ); setSummarySPEFSCR(); # # high section # tmpB = B(4); # check if less than or equal to positive saturation limit if ( tmpB f<= tmpS ) goto ; # set to positive saturation tmpB = tmpS; spef_fx = 1; spef_finxs = 1; spef_fg = 1; goto ; # check if greater than or equal to negative saturation limit if ( tmpB f>= tmpN ) goto ; # set to negative saturation tmpB = tmpN; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # scale the fractional portion up to integer side of mantissa tmpB = tmpB f* tmpM; setSPEFSCRMulFlags_H( tmpB, tmpM, tmpB ); # truncate back to signed fraction format tmpH:4 = trunc( tmpB ); setSPEFSCR_H( tmpH ); setSummarySPEFSCR(); # move results into upper and lower words tmpZ:8 = zext( tmpH ); tmpZ = ( tmpZ << 32 ) | zext( tmpL ); D = tmpZ; } # ================================================================= # Page 567 # evfsctsi rT,rB # ISA-cmt: evfsctsi - Vector Convert Floating-Point Single-Precision to Signed Integer # ISA-info: evfsctsi - Form "EVX" Page 567 Category "SP.FV" # binutils: mytest.d: 214: 10 20 12 95 evfsctsi r1,r2 :evfsctsi D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=661 { # create zero float constant tmpA:4 = 0; tmpA = int2float( tmpA ); # # low section # tmpB:4 = B:4; # check if negative if ( tmpB f< tmpA ) goto ; tmpB = round( tmpB ); setSPEFSCR_L( tmpB ); # limit to positive saturation if ( tmpB <= 0x000000007FFFFFFF ) goto ; tmpB = 0x000000007FFFFFFF; spef_fx = 1; spef_finxs = 1; spef_fg = 1; tmpL:4 = tmpB; goto ; # negate the float tmpB = round( f-( tmpB ) ); setSPEFSCR_L( tmpB ); # limit to negative saturation if ( tmpB <= 0x0000000080000000 ) goto ; tmpB = 0x0000000080000000; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # negate the signed int tmpL = -( tmpB ); setSummarySPEFSCR(); # # high section # tmpB = B(4); # check if negative if ( tmpB f< tmpA ) goto ; tmpB = round( tmpB ); setSPEFSCR_H( tmpB ); # limit to positive saturation if ( tmpB <= 0x000000007FFFFFFF ) goto ; tmpB = 0x000000007FFFFFFF; spef_fx = 1; spef_finxs = 1; spef_fg = 1; tmpH:4 = tmpB; goto ; # negate the float tmpB = round( f-( tmpB ) ); setSPEFSCR_H( tmpB ); # limit to negative saturation if ( tmpB <= 0x0000000080000000 ) goto ; tmpB = 0x0000000080000000; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # negate the signed int tmpH = -( tmpB ); setSummarySPEFSCR(); # move results into upper and lower words tmpZ:8 = zext( tmpH ); tmpZ = ( tmpZ << 32 ) | zext( tmpL ); D = tmpZ; } # ================================================================= # Page 567 # evfsctsiz rT,rB # ISA-cmt: evfsctsiz - Vector Convert Floating-Point Single-Precision to Signed Integer with Round toward Zero # ISA-info: evfsctsiz - Form "EVX" Page 567 Category "SP.FV" # binutils: mytest.d: 224: 10 20 12 9a evfsctsiz r1,r2 :evfsctsiz D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=666 { # create zero float constant tmpA:8 = 0; tmpA = int2float( tmpA ); # create positive saturation float constant tmpS:8 = 0x000000007FFFFFFF; tmpS = int2float( tmpS ); # create negative saturation float constant tmpN:8 = 0x0000000080000000; tmpN = int2float( tmpN ); # # low section # tmpB:8 = float2float( B:4 ); # check if negative if ( tmpB f< tmpA ) goto ; # limit to positive saturation if ( tmpB f<= tmpS ) goto ; tmpB = tmpS; spef_fx = 1; spef_finxs = 1; spef_fg = 1; tmpL:4 = trunc( tmpB ); setSPEFSCR_L( tmpL ); goto ; # negate the float tmpB = f-( tmpB ); # limit to negative saturation if ( tmpB f<= tmpN ) goto ; tmpB = tmpN; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # negate the signed int tmpL = -( trunc( tmpB ) ); setSPEFSCR_L( tmpL ); setSummarySPEFSCR(); # # high section # tmpE:4 = B(4); tmpB = float2float( tmpE ); # check if negative if ( tmpB f< tmpA ) goto ; # limit to positive saturation if ( tmpB f<= tmpS ) goto ; tmpB = tmpS; spef_fx = 1; spef_finxs = 1; spef_fg = 1; tmpH:4 = trunc( tmpB ); setSPEFSCR_H( tmpH ); goto ; # negate the float tmpB = f-( tmpB ); # limit to negative saturation if ( tmpB f<= tmpN ) goto ; tmpB = tmpN; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # negate the signed int tmpH = -( trunc( tmpB ) ); setSPEFSCR_H( tmpH ); setSummarySPEFSCR(); # move results into upper and lower words tmpZ:8 = zext( tmpH ); tmpZ = ( tmpZ << 32 ) | zext( tmpL ); D = tmpZ; } # ================================================================= # Page 568 # evfsctuf rT,rB # ISA-cmt: evfsctuf - Vector Convert Floating-Point Single-Precision to Unsigned Fraction # ISA-info: evfsctuf - Form "EVX" Page 568 Category "SP.FV" # binutils: mytest.d: 218: 10 20 12 96 evfsctuf r1,r2 :evfsctuf D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=662 { # multiply by 0x0000 0001 0000 0000 to scale the fraction up to integer range # load fractional multiplier as a float tmpM:8 = 0x0000000100000000; tmpM = int2float( tmpM ); setSPEFSCR_L( tmpM ); # load saturation limit as a float tmpS:8 = 0x0000000100000000 - 1; tmpS = int2float( tmpS ); setSPEFSCR_L( tmpS ); # scale the saturation limit to a fractional float tmpS = tmpS f/ tmpM; setSPEFSCRDivFlags_L( tmpS, tmpM, tmpS ); # # low section # # get B float up to 64 bit width tmpE:4 = B:4; tmpB:8 = float2float( tmpE ); setSPEFSCR_L( tmpB ); # check if less than or equal to positive saturation limit if ( tmpB f<= tmpS ) goto ; # set to saturation tmpB = tmpS; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # scale the fractional portion up to integer side of mantissa tmpB = tmpB f* tmpM; setSPEFSCRMulFlags_L( tmpB, tmpM, tmpB ); # truncate back to integer tmpL:4 = trunc( tmpB ); setSPEFSCR_L( tmpL ); setSummarySPEFSCR(); # # high section # # get B float up to 64 bit width tmpE = B(4); tmpB = float2float( tmpE ); setSPEFSCR_H( tmpB ); # check if less than or equal to positive saturation limit if ( tmpB f<= tmpS ) goto ; # set to saturation tmpB = tmpS; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # scale the fractional portion up to integer side of mantissa tmpB = tmpB f* tmpM; setSPEFSCRMulFlags_H( tmpB, tmpM, tmpB ); # truncate back to integer tmpH:4 = trunc( tmpB ); setSPEFSCR_H( tmpH ); setSummarySPEFSCR(); # move results into upper and lower words tmpZ:8 = zext( tmpH ); tmpZ = ( tmpZ << 32 ) | zext( tmpL ); D = tmpZ; } # ================================================================= # Page 567 # evfsctui rT,rB # ISA-cmt: evfsctui - Vector Convert Floating-Point Single-Precision to Unsigned Integer # ISA-info: evfsctui - Form "EVX" Page 567 Category "SP.FV" # binutils: mytest.d: 210: 10 20 12 94 evfsctui r1,r2 :evfsctui D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=660 { # load saturation limit as a float tmpS:8 = 0x00000000FFFFFFFF; tmpS = int2float( tmpS ); setSPEFSCR_L( tmpS ); # # low section # tmpE:4 = B:4; tmpB:8 = float2float( tmpE ); # limit to saturation if ( tmpB f<= tmpS ) goto ; tmpB = tmpS; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # round back to integer tmpL:4 = trunc(round( tmpB )); setSPEFSCR_L( tmpL ); setSummarySPEFSCR(); # # high section # tmpE = B(4); tmpB = float2float( tmpE ); # limit to saturation if ( tmpB f<= tmpS ) goto ; tmpB = tmpS; spef_fx = 1; spef_finxs = 1; spef_fg = 1; # round back to integer tmpH:4 = trunc(round( tmpB )); setSPEFSCR_H( tmpH ); setSummarySPEFSCR(); # move results into upper and lower words tmpZ:8 = zext( tmpH ); tmpZ = ( tmpZ << 32 ) | zext( tmpL ); D = tmpZ; } # ================================================================= # Page 567 # evfsctuiz rT,rB # ISA-cmt: evfsctuiz - Vector Convert Floating-Point Single-Precision to Unsigned Integer with Round toward Zero # ISA-info: evfsctuiz - Form "EVX" Page 567 Category "SP.FV" # binutils: mytest.d: 220: 10 20 12 98 evfsctuiz r1,r2 :evfsctuiz D,B is OP=4 & D & BITS_16_20=0 & B & XOP_0_10=664 { # load saturation limit as a float tmpS:8 = 0x00000000FFFFFFFF; tmpS = int2float( tmpS ); setSPEFSCR_L( tmpS ); # # low section # tmpE:4 = B:4; tmpB:8 = float2float( tmpE ); # limit to saturation if ( tmpB f<= tmpS ) goto ; tmpB = tmpS; spef_fx = 1; spef_finxs = 1; spef_fg = 1; tmpL:4 = trunc( tmpB ); setSummarySPEFSCR(); # # high section # tmpE = B(4); tmpB = float2float( tmpE ); # limit to saturation if ( tmpB f<= tmpS ) goto ; tmpB = tmpS; spef_fx = 1; spef_finxs = 1; spef_fg = 1; tmpH:4 = trunc( tmpB ); setSummarySPEFSCR(); # move results into upper and lower words tmpZ:8 = zext( tmpH ); tmpZ = ( tmpZ << 32 ) | zext( tmpL ); D = tmpZ; } # ================================================================= # Page 562 # evfsdiv rT,rA,rB # ISA-cmt: evfsdiv - Vector Floating-Point Single-Precision Divide # ISA-info: evfsdiv - Form "EVX" Page 562 Category "SP.FV" # binutils: mytest.d: 1f0: 10 22 1a 89 evfsdiv r1,r2,r3 :evfsdiv D,A,B is OP=4 & D & A & B & XOP_0_10=649 { tmpAL:4 = A:4; tmpAH:4 = A(4); tmpBL:4 = B:4; tmpBH:4 = B(4); tmpL:4 = tmpAL f/ tmpBL; setSPEFSCRDivFlags_L( tmpAL, tmpBL, tmpL ); tmpH:4 = tmpAH f/ tmpBH; setSPEFSCRDivFlags_H( tmpAH, tmpBH, tmpH ); # move results into upper and lower words tmpZ:8 = zext( tmpH ); tmpZ = ( tmpZ << 32 ) | zext( tmpL ); D = tmpZ; } # ================================================================= # Page 562 # evfsmul rT,rA,rB # ISA-cmt: evfsmul - Vector Floating-Point Single-Precision Multiply # ISA-info: evfsmul - Form "EVX" Page 562 Category "SP.FV" # binutils: mytest.d: 1ec: 10 22 1a 88 evfsmul r1,r2,r3 :evfsmul D,A,B is OP=4 & D & A & B & XOP_0_10=648 { tmpAL:4 = A:4; tmpAH:4 = A(4); tmpBL:4 = B:4; tmpBH:4 = B(4); tmpL:4 = tmpAL f* tmpBL; setSPEFSCRMulFlags_L( tmpAL, tmpBL, tmpL ); tmpH:4 = tmpAH f* tmpBH; setSPEFSCRMulFlags_H( tmpAH, tmpBH, tmpH ); # move results into upper and lower words tmpZ:8 = zext( tmpH ); tmpZ = ( tmpZ << 32 ) | zext( tmpL ); D = tmpZ; } # ================================================================= # Page 561 # evfsnabs rT,rA # ISA-cmt: evfsnabs - Vector Floating-Point Single-Precision Negative Absolute Value # ISA-info: evfsnabs - Form "EVX" Page 561 Category "SP.FV" # binutils: mytest.d: 1e4: 10 22 02 85 evfsnabs r1,r2 :evfsnabs D,A is OP=4 & D & A & BITS_11_15=0 & XOP_0_10=645 { tmpAL:4 = A:4; tmpAH:4 = A(4); tmpL:4 = f- ( abs( tmpAL ) ); tmpH:4 = f- ( abs( tmpAH ) ); # move results into upper and lower words tmpZ:8 = zext( tmpH ); tmpZ = ( tmpZ << 32 ) | zext( tmpL ); D = tmpZ; } # ================================================================= # Page 561 # evfsneg rT,rA # ISA-cmt: evfsneg - Vector Floating-Point Single-Precision Negate # ISA-info: evfsneg - Form "EVX" Page 561 Category "SP.FV" # binutils: mytest.d: 1e8: 10 22 02 86 evfsneg r1,r2 :evfsneg D,A is OP=4 & D & A & BITS_11_15=0 & XOP_0_10=646 { tmpAL:4 = A:4; tmpAH:4 = A(4); tmpL:4 = f-( tmpAL ); tmpH:4 = f-( tmpAH ); # move results into upper and lower words tmpZ:8 = zext( tmpH ); tmpZ = ( tmpZ << 32 ) | zext( tmpL ); D = tmpZ; } # ================================================================= # Page 562 # evfssub rT,rA,rB # ISA-cmt: evfssub - Vector Floating-Point Single-Precision Subtract # ISA-info: evfssub - Form "EVX" Page 562 Category "SP.FV" # binutils: mytest.d: 1dc: 10 22 1a 81 evfssub r1,r2,r3 :evfssub D,A,B is OP=4 & D & A & B & XOP_0_10=641 { tmpAL:4 = A:4; tmpAH:4 = A(4); tmpBL:4 = B:4; tmpBH:4 = B(4); tmpL:4 = tmpAL f- tmpBL; setSPEFSCRSubFlags_L( tmpAL, tmpBL, tmpL ); tmpH:4 = tmpAH f- tmpBH; setSPEFSCRSubFlags_H( tmpAH, tmpBH, tmpH ); } # ================================================================= # Page 565 # evfststeq CRFD,rA,rB # ISA-cmt: evfststeq - Vector Floating-Point Single-Precision Test Equal # ISA-info: evfststeq - Form "EVX" Page 565 Category "SP.FV" # binutils: mytest.d: 230: 10 82 1a 9e evfststeq cr1,r2,r3 :evfststeq CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=670 { tmpA:4 = A:4; tmpB:4 = B:4; tmpC:4 = A(4); tmpD:4 = B(4); tmpL:1 = tmpA f== tmpB; tmpH:1 = tmpC f== tmpD; CRFD = (8 * tmpH ) + (4 * tmpL ) + (2 * (tmpH | tmpL) ) + (tmpH & tmpL); } # ================================================================= # Page 564 # evfststgt CRFD,rA,rB # ISA-cmt: evfststgt - Vector Floating-Point Single-Precision Test Greater Than # ISA-info: evfststgt - Form "EVX" Page 564 Category "SP.FV" # binutils: mytest.d: 228: 10 82 1a 9c evfststgt cr1,r2,r3 :evfststgt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=668 { tmpA:4 = A:4; tmpB:4 = B:4; tmpC:4 = A(4); tmpD:4 = B(4); tmpL:1 = tmpA f> tmpB; tmpH:1 = tmpC f> tmpD; CRFD = (8 * tmpH ) + (4 * tmpL ) + (2 * (tmpH | tmpL) ) + (tmpH & tmpL); } # ================================================================= # Page 565 # evfststlt CRFD,rA,rB # ISA-cmt: evfststlt - Vector Floating-Point Single-Precision Test Less Than # ISA-info: evfststlt - Form "EVX" Page 565 Category "SP.FV" # binutils: mytest.d: 22c: 10 82 1a 9d evfststlt cr1,r2,r3 :evfststlt CRFD,A,B is OP=4 & CRFD & BITS_21_22=0 & A & B & XOP_0_10=669 { tmpA:4 = A:4; tmpB:4 = B:4; tmpC:4 = A(4); tmpD:4 = B(4); tmpL:1 = tmpA f< tmpB; tmpH:1 = tmpC f< tmpD; CRFD = (8 * tmpH ) + (4 * tmpL ) + (2 * (tmpH | tmpL) ) + (tmpH & tmpL); } # ================================================================= # Page 915 # evlddepx rT,rA,rB # Note: context is not supported :evlddepx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=799 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; D = *:8(ea); } # ================================================================= # Page 519 # evlwhe RT,D(RA) # evlwhe rT,rA,UI :evlwhe D,EVUIMM_4_RAt is OP=4 & D & EVUIMM_4_RAt & XOP_0_10=785 { ea:$(REGISTER_SIZE) = EVUIMM_4_RAt; # move results into upper and lower words tmpZ:8 = zext( *:2(ea + 2) ); tmpZ = ( tmpZ << 32 ) | zext( *:2(ea) ); D = tmpZ; } # ================================================================= # Page 519 # evlwhex rT,rA,rB # ISA-cmt: evlwhex - Vector Load Word into Two Halfwords Even Indexed # ISA-info: evlwhex - Form "EVX" Page 519 Category "SP" # binutils: mytest.d: 238: 10 22 1b 10 evlwhex r1,r2,r3 :evlwhex D,RA_OR_ZERO,B is OP=4 & D & RA_OR_ZERO & B & XOP_0_10=784 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; # move results into upper and lower words tmpZ:8 = zext( *:2(ea + 2) ); tmpZ = ( tmpZ << 32 ) | zext( *:2(ea) ); D = tmpZ; } # ================================================================= # Page 521 # evlwwsplat RT,D(RA) # evlwwsplat rT,rA,UI # ISA-cmt: evlwwsplat - Vector Load Word into Word and Splat # ISA-info: evlwwsplat - Form "EVX" Page 521 Category "SP" # binutils: NO-EXAMPLE - evlwwsplat # collides with maclhwu :evlwwsplat D,EVUIMM_4_RAt is OP=4 & D & EVUIMM_4_RAt & XOP_0_10=793 { ea:$(REGISTER_SIZE) = EVUIMM_4_RAt; # move results into upper and lower words tmpZ:8 = zext( *:4(ea) ); tmpZ = ( tmpZ << 32 ) | zext( *:4(ea) ); D = tmpZ; } # ================================================================= # Page 521 # evlwwsplatx rT,rA,rB # ISA-cmt: evlwwsplatx - Vector Load Word into Word and Splat Indexed # ISA-info: evlwwsplatx - Form "EVX" Page 521 Category "SP" # binutils: mytest.d: 23c: 10 22 1b 18 evlwwsplatx r1,r2,r3 # collides with maclhwu :evlwwsplatx D,RA_OR_ZERO,B is OP=4 & D & RA_OR_ZERO & B & XOP_0_10=792 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; # move results into upper and lower words tmpZ:8 = zext( *:4(ea) ); tmpZ = ( tmpZ << 32 ) | zext( *:4(ea) ); D = tmpZ; } # ================================================================= # Page 541 # evmwlsmiaaw rT,rA,rB # ISA-cmt: evmwlsmiaaw - Vector Multiply Word Low Signed # ISA-info: evmwlsmiaaw - Form "EVX" Page 541 Category "SP" # binutils: mytest.d: 248: 10 22 1d 49 evmwlsmiaaw r1,r2,r3 :evmwlsmiaaw D,A,B is OP=4 & D & A & B & XOP_0_10=1353 { tmpACCL:4 = ACC:4; tmpACCH:4 = ACC(4); tmpAL:8 = zext( A:4 ); tmp:4 = A(4); tmpAH:8 = zext( tmp ); tmpBL:8 = zext( B:4 ); tmp = B(4); tmpBH:8 = zext( tmp ); temp:8 = tmpAH * tmpBH; tmpD:4 = tmpACCH + temp:4; D = ( zext( tmpD ) ) << 32; temp = tmpAL * tmpBL; tmpDL:4 = tmpACCL + temp:4; D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpDL ); ACC = D; } # ================================================================= # Page 541 # evmwlsmianw rT,rA,rB # ISA-cmt: evmwlsmianw - Vector Multiply Word Low Signed # ISA-info: evmwlsmianw - Form "EVX" Page 541 Category "SP" # binutils: mytest.d: 254: 10 22 1d c9 evmwlsmianw r1,r2,r3 :evmwlsmianw D,A,B is OP=4 & D & A & B & XOP_0_10=1481 { tmpACCL:4 = ACC:4; tmpACCH:4 = ACC(4); tmpAL:8 = zext( A:4 ); tmp:4 = A(4); tmpAH:8 = zext( tmp ); tmpBL:8 = zext( B:4 ); tmp = B(4); tmpBH:8 = zext( tmp ); temp:8 = tmpAH * tmpBH; tmpD:4 = tmpACCH - temp:4; D = ( zext( tmpD ) ) << 32; temp = tmpAL * tmpBL; tmpDL:4 = tmpACCL - temp:4; D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpDL ); ACC = D; } # ================================================================= # Page 541 # evmwlssianw rT,rA,rB # ISA-cmt: evmwlssianw - Vector Multiply Word Low Signed # ISA-info: evmwlssianw - Form "EVX" Page 541 Category "SP" # binutils: mytest.d: 250: 10 22 1d c1 evmwlssianw r1,r2,r3 :evmwlssianw D,A,B is OP=4 & D & A & B & XOP_0_10=1473 { tmpACCL:4 = ACC:4; tmpACCH:4 = ACC(4); tmpAL:8 = zext( A:4 ); tmp:4 = A(4); tmpAH:8 = zext( tmp ); tmpBL:8 = zext( B:4 ); tmp = B(4); tmpBH:8 = zext( tmp ); temp:8 = tmpAH * tmpBH; temp = sext( tmpACCH ) - sext( temp:4 ); tmpOVH:1 = temp[32,1] ^ temp[31,1]; # check for saturation if ( tmpOVH == 0 ) goto ; if ( temp[32,1] == 1 ) goto ; D = ( D & 0x00000000FFFFFFFF ) | 0x7FFFFFFF00000000; goto ; D = ( D & 0x00000000FFFFFFFF ) | 0x8000000000000000; goto ; D = ( D & 0x00000000FFFFFFFF ) | ( zext( temp:4 ) << 32 ); temp = tmpAL * tmpBL; temp = sext( tmpACCL ) - sext( temp:4 ); tmpOVL:1 = temp[32,1] ^ temp[31,1]; # check for saturation if ( tmpOVL == 0 ) goto ; if ( temp[32,1] == 1 ) goto ; D = ( D & 0xFFFFFFFF00000000 ) | 0x000000007FFFFFFF; goto ; D = ( D & 0xFFFFFFFF00000000 ) | 0x0000000080000000; goto ; D = ( D & 0xFFFFFFFF00000000 ) | zext( temp:4 ); ACC = D; spef_ovh = tmpOVH; spef_ov = tmpOVL; spef_sovh = spef_sovh | tmpOVH; spef_sov = spef_sov | tmpOVL; } # ================================================================= # Page 544 # evmwsmi rT,rA,rB # ISA-cmt: evmwsmi - Vector Multiply Word Signed # ISA-info: evmwsmi - Form "EVX" Page 544 Category "SP" # binutils: mytest.d: 244: 10 22 1c 59 evmwsmi r1,r2,r3 # collides with machhwo :evmwsmi D,A,B is OP=4 & D & A & B & XOP_0_10=1113 { tmpAL:8 = zext( A:4 ); tmpBL:8 = zext( B:4 ); D = tmpAL * tmpBL; } # ================================================================= # Page 544 # evmwsmiaa rT,rA,rB # ISA-cmt: evmwsmiaa - Vector Multiply Word Signed # ISA-info: evmwsmiaa - Form "EVX" Page 544 Category "SP" # binutils: mytest.d: 24c: 10 22 1d 59 evmwsmiaa r1,r2,r3 # collides with macchwo. :evmwsmiaa D,A,B is OP=4 & D & A & B & XOP_0_10=1369 { tmpAL:8 = zext( A:4 ); tmpBL:8 = zext( B:4 ); temp:8 = tmpAL * tmpBL; D = ACC + temp; ACC = D; } # ================================================================= # Page 544 # evmwsmian rT,rA,rB # ISA-cmt: evmwsmian - Vector Multiply Word Signed # ISA-info: evmwsmian - Form "EVX" Page 544 Category "SP" # binutils: mytest.d: 25c: 10 22 1d d9 evmwsmian r1,r2,r3 # collides with macchwso. :evmwsmian D,A,B is OP=4 & D & A & B & XOP_0_10=1497 { tmpAL:8 = zext( A:4 ); tmpBL:8 = zext( B:4 ); temp:8 = tmpAL * tmpBL; D = ACC - temp; ACC = D; } # ================================================================= # Page 546 # evmwumi rT,rA,rB # ISA-cmt: evmwumi - Vector Multiply Word Unsigned # ISA-info: evmwumi - Form "EVX" Page 546 Category "SP" # binutils: mytest.d: 240: 10 22 1c 58 evmwumi r1,r2,r3 # collides with machhwo :evmwumi D,A,B is OP=4 & D & A & B & XOP_0_10=1112 { tmpAL:8 = zext( A:4 ); tmpBL:8 = zext( B:4 ); temp:8 = tmpAL * tmpBL; D = temp; } # evmwumia RT,RA,RB # ISA-cmt: Vector Multiply Word Unsigned, Modulo, Integer to Accumulator # evmwumia rD,rA,rB 100 01A1 1000 A=1 :evmwumia D,A,B is OP=4 & A & B & D & XOP_0_10=0x478 { tmpAL:8 = zext( A:4 ); tmpBL:8 = zext( B:4 ); temp:8 = tmpAL * tmpBL; D = temp; ACC = D; } # evmwumiaa RT,RA,RB # ISA-cmt: Vector Multiply Word Unsigned, Modulo, Integer and Accumulate # evmwumiaa rD,rA,rB 101 0101 1000 # evmwumiaa confict with macchwo :evmwumiaa D,A,B is OP=4 & A & B & D & XOP_0_10=0x558 { tmpAL:8 = zext( A:4 ); tmpBL:8 = zext( B:4 ); temp:8 = tmpAL * tmpBL; D = ACC + temp; ACC = D; } # ================================================================= # Page 547 # evmwumian rT,rA,rB # ISA-cmt: evmwumian - Vector Multiply Word Unsigned # ISA-info: evmwumian - Form "EVX" Page 547 Category "SP" # binutils: mytest.d: 258: 10 22 1d d8 evmwumian r1,r2,r3 # collides with macchwso :evmwumian D,A,B is OP=4 & D & A & B & XOP_0_10=1496 { tmpAL:8 = zext( A:4 ); tmpBL:8 = zext( B:4 ); temp:8 = tmpAL * tmpBL; D = ACC - temp; ACC = D; } # ================================================================= # Page 549 # evsel rT,rA,rB # ISA-cmt: evsel - Vector Select # ISA-info: evsel - Form "EVS" Page 549 Category "SP" # binutils: mytest.d: 1d4: 10 22 1a 7c evsel r1,r2,r3,cr4 :evsel D,A,B,BFA is OP=4 & D & A & B & XOP_3_10=79 & BFA { tmpAL:8 = zext( A:4 ); tmp:4 = A(4); tmpAH:8 = zext( tmp ); tmpBL:8 = zext( B:4 ); tmp = B(4); tmpBH:8 = zext( tmp ); tmpBFA:1 = BFA; if ( tmpBFA[3,1] == 0 ) goto ; D = ( D & 0x00000000FFFFFFFF ) | ( tmpAH << 32 ); goto ; D = ( D & 0x00000000FFFFFFFF ) | ( tmpBH << 32 ); if ( tmpBFA[2,1] == 0 ) goto ; D = ( D & 0xFFFFFFFF00000000 ) | tmpAL; goto ; D = ( D & 0xFFFFFFFF00000000 ) | tmpBL; } # ================================================================= # Page 915 # evstddepx rT,rA,rB # Note: context is not supported :evstddepx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=927 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; *:8(ea) = D; } ================================================ FILE: pypcode/processors/PowerPC/data/languages/SPE_FloatMulAdd.sinc ================================================ # Additional SPE Instructions for Devices that Support VLE # Freescale engineering bulletin EB689 # https://www.nxp.com/docs/en/engineering-bulletin/EB689.pdf # additional SPE instructions that are implemented on devices with an e200z3 or e200z6 core that supports VLE # - Vector Floating-Point Single-Precision Multiply-Add # - Vector Floating-Point Single-Precision Multiply-Substract # - Vector Floating-Point Single-Precision Negative Multiply-Add # - Vector Floating-Point Single-Precision Negative Multiply-Substract # - Floating-Point Single-Precision Multiply-Add # - Floating-Point Single-Precision Multiply-Substract # - Floating-Point Single-Precision Negative Multiply-Add # - Floating-Point Single-Precision Negative Multiply-Substract # evfsmadd rD,rA,rB # Vector Floating-Point Single-Precision Multiply-Add :evfsmadd D,A,B is OP=4 & D & A & B & XOP_0_10=0x282 { local tmpAL:4 = A:4; local tmpAH:4 = A(4); local tmpBL:4 = B:4; local tmpBH:4 = B(4); local tmpDL:4 = D:4; local tmpDH:4 = D(4); tmpDL = ((tmpAL f* tmpBL) f+ tmpDL); tmpDH = ((tmpAH f* tmpBH) f+ tmpDH); D = (zext(tmpDH) << 32) | zext(tmpDL); } # evfsmsub rD,rA,rB # Vector Floating-Point Single-Precision Multiply-Substract :evfsmsub D,A,B is OP=4 & D & A & B & XOP_0_10=0x283 { local tmpAL:4 = A:4; local tmpAH:4 = A(4); local tmpBL:4 = B:4; local tmpBH:4 = B(4); local tmpDL:4 = D:4; local tmpDH:4 = D(4); tmpDL = ((tmpAL f* tmpBL) f- tmpDL); tmpDH = ((tmpAH f* tmpBH) f- tmpDH); D = (zext(tmpDH) << 32) | zext(tmpDL); } # evfsnmadd # Vector Floating-Point Single-Precision Negative Multiply-Add :evfsnmadd D,A,B is OP=4 & D & A & B & XOP_0_10=0x28A { local tmpAL:4 = A:4; local tmpAH:4 = A(4); local tmpBL:4 = B:4; local tmpBH:4 = B(4); local tmpDL:4 = D:4; local tmpDH:4 = D(4); tmpDL = f- ((tmpAL f* tmpBL) f+ tmpDL); tmpDH = f- ((tmpAH f* tmpBH) f+ tmpDH); D = (zext(tmpDH) << 32) | zext(tmpDL); } # evfsnmsub # Vector Floating-Point Single-Precision Negative Multiply-Substract :evfsnmsub D,A,B is OP=4 & D & A & B & XOP_0_10=0x28B { local tmpAL:4 = A:4; local tmpAH:4 = A(4); local tmpBL:4 = B:4; local tmpBH:4 = B(4); local tmpDL:4 = D:4; local tmpDH:4 = D(4); tmpDL = f- ((tmpAL f* tmpBL) f- tmpDL); tmpDH = f- ((tmpAH f* tmpBH) f- tmpDH); D = (zext(tmpDH) << 32) | zext(tmpDL); } # efsmadd rD,rA,rB # Floating-Point Single-Precision Multiply-Add :efsmadd D,A,B is OP=4 & D & A & B & XOP_0_10=0x2C2 { local lo:4 = (A:4 f* B:4) f+ D:4; D = (D & 0xFFFFFFFF00000000) | zext(lo); } # efsmsub rD,rA,rB # Floating-Point Single-Precision Multiply-Substract :efsmsub D,A,B is OP=4 & D & A & B & XOP_0_10=0x2C3 { local lo:4 = (A:4 f* B:4) f- D:4; D = (D & 0xFFFFFFFF00000000) | zext(lo); } # efsnmadd rD,rA,rB # Floating-Point Single-Precision Negative Multiply-Add :efsnmadd D,A,B is OP=4 & D & A & B & XOP_0_10=0x2CA { local lo:4 = f- ((A:4 f* B:4) f+ D:4); D = (D & 0xFFFFFFFF00000000) | zext(lo); } # efsnmsub rD,rA,rB # Floating-Point Single-Precision Negative Multiply-Substract :efsnmsub D,A,B is OP=4 & D & A & B & XOP_0_10=0x2CB { local lo:4 = f- ((A:4 f* B:4) f- D:4); D = (D & 0xFFFFFFFF00000000) | zext(lo); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/Scalar_SPFP.sinc ================================================ # Based on "EREF: A Reference for Freescale Book E and e500 Core" document version 01/2004 Rev 2.0 # Instructions that are specific to the (PowerPC) e500 core are implemented as auxiliary processing units (APUs) # Embedded Vector and Scalar Single-Precision Floating-Point APUs (SPFP APU) # There are three versions of e500 core, namely e500v1, the e500v2, and the e500mc. # A 64-bit evolution of the e500mc core is called e5500 core. # All PowerQUICC 85xx devices are based on e500v1 or e500v2 cores. # ================================================================= # Page 408 # efsabs rT,rA 010 1100 0100 #define pcodeop FloatingPointAbsoluteValue; :efsabs D,A is OP=4 & D & A & XOP_0_10=0x2C4 & BITS_11_15=0 { # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( abs( A:4 ) ); } # efsadd rT,rA,rB 010 1100 0000 #define pcodeop FloatingPointAdd; :efsadd D,A,B is OP=4 & D & A & B & XOP_0_10=0x2C0 { # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( A:4 f+ B:4 ); setFPAddFlags( A:4, B:4, D:4 ); } # ================================================================= # Page 410 # efscfsf rT,rB 010 1101 0011 #define pcodeop ConvertFloatingPointFromSignedFraction; :efscfsf D,B is OP=4 & D & B & XOP_0_10=0x2D3 & BITS_16_20=0 { # load fractional divisor as a float tmpA:4 = 0x80000000; tmpA = int2float( tmpA ); setFPRF( tmpA ); # check if negative if ( ( B:4 & 0x80000000 ) != 0 ) goto ; # float the fractional portion of register B tmpB:4 = int2float( B:4 ); setFPRF( tmpB ); tmpB = tmpB f/ tmpA; setFPDivFlags( tmpB, tmpA, tmpB ); goto ; # float the fractional portion of register B, 2's complement negate tmpB = int2float( -( B:4 ) ); setFPRF( tmpB ); tmpB = tmpB f/ tmpA; setFPDivFlags( tmpB, tmpA, tmpB ); # negate the float tmpB = f-( tmpB ); setFPRF( tmpB ); setSummaryFPSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB ); } # efscfsi rT,rB 010 1101 0001 #define pcodeop ConvertFloatingPointFromSignedInteger; :efscfsi D,B is OP=4 & D & B & XOP_0_10=0x2D1 & BITS_16_20=0 { # check if negative if ( ( B:4 & 0x80000000 ) != 0 ) goto ; # float the integer portion of register B tmpB:4 = int2float( B:4 ); setFPRF( tmpB ); goto ; # float the integer portion of register B, 2's complement negate tmpB = int2float( -( B:4 ) ); setFPRF( tmpB ); # negate the float tmpB = f-( tmpB ); setFPRF( tmpB ); setSummaryFPSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB ); } # efscfuf rT,rB 010 1101 0010 define pcodeop ConvertFloatingPointFromUnsignedFraction; :efscfuf D,B is OP=4 & D & B & XOP_0_10=0x2D2 & BITS_16_20=0 { # load fractional divisor as a float tmpA:8 = 0x0000000100000000; tmpA = int2float( tmpA ); setFPRF( tmpA ); # float the fractional portion of register B tmpB:8 = int2float( B:4 ); setFPRF( tmpB ); tmpB = tmpB f/ tmpA; setFPDivFlags( tmpB, tmpA, tmpB ); tmpC:4 = float2float( tmpB ); setFPRF( tmpC ); setSummaryFPSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpC ); } # rT,rB 010 1101 0000 #define pcodeop ConvertFloatingPointFromUnsignedInteger; :efscfui D,B is OP=4 & D & B & XOP_0_10=0x2D0 & BITS_16_20=0 { tmp:4 = int2float( B:4 ); setFPRF( tmp ); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmp ); setSummaryFPSCR(); } # efscmpeq CRFD,rA,rB 010 1100 1110 :efscmpeq CRFD,A,B is OP=4 & CRFD & A & B & XOP_0_10=0x2CE & BITS_21_22=0 { CRFD[2,1] = A:4 f== B:4; } # ================================================================= # Page 415 # efscmpgt CRFD,rA,rB 010 1100 1100 :efscmpgt CRFD,A,B is OP=4 & CRFD & A & B & XOP_0_10=0x2CC & BITS_21_22=0 { CRFD[2,1] = A:4 f> B:4; } # efscmplt CRFD,rA,rB 010 1100 1101 :efscmplt CRFD,A,B is OP=4 & CRFD & A & B & XOP_0_10=0x2CD & BITS_21_22=0 { CRFD[2,1] = A:4 f< B:4; } # efsctsf rT,rB 010 1101 0111 #define pcodeop ConvertFloatingPointToSignedFraction; :efsctsf D,B is OP=4 & D & B & XOP_0_10=0x2D7 & BITS_16_20=0 { # multiply by 0x0000 0000 8000 0000 to scale the fraction up to integer range # load fractional multiplier as a float tmpM:8 = 0x0000000080000000; tmpM = int2float( tmpM ); setFPRF( tmpM ); # load saturation limit as a float tmpL:8 = 0x0000000080000000 - 1; tmpL = int2float( tmpL ); setFPRF( tmpL ); # scale the saturation limit to a fractional float tmpL = tmpL f/ tmpM; setFPDivFlags( tmpL, tmpM, tmpL ); # get B float up to 64 bit width tmpB:8 = float2float( B:4 ); setFPRF( tmpB ); # check if less than or equal to positive saturation limit if ( tmpB f<= tmpL ) goto ; # set to positive saturation tmpB = tmpL; goto ; # check if greater than or equal to negative saturation limit tmpL = f-( tmpL ); if ( tmpB f>= tmpL ) goto ; # set to negative saturation tmpB = tmpL; # scale the fractional portion up to integer side of mantissa tmpB = tmpB f* tmpM; setFPMulFlags( tmpB, tmpM, tmpB ); # truncate back to signed fraction format tmpC:4 = trunc( tmpB ); setFPRF( tmpB ); setSummaryFPSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpC ); } # efsctsi rT,rB 010 1101 0101 #define pcodeop ConvertFloatingPointToSignedInteger; :efsctsi D,B is OP=4 & D & B & XOP_0_10=0x2D5 & BITS_16_20=0 { # create zero float constant tmpA:4 = 0; tmpA = int2float( tmpA ); # check if negative if ( B:4 f< tmpA ) goto ; tmpB:8 = trunc(round( B:4 )); setFPRF( tmpB ); # limit to positive saturation if ( tmpB <= 0x000000007FFFFFFF ) goto ; tmpB = 0x000000007FFFFFFF; goto ; # negate the float tmpB = trunc(round( f-( B:4 ) )); setFPRF( tmpB ); # limit to negative saturation if ( tmpB <= 0x0000000080000000 ) goto ; tmpB = 0x0000000080000000; # negate the signed int tmpB = -( tmpB ); setSummaryFPSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); } # efsctsiz rT,rB 010 1101 1010 #define pcodeop ConvertFloatingPointToSignedIntegerWithRoundTowardZero; :efsctsiz D,B is OP=4 & D & B & XOP_0_10=0x2DA & BITS_16_20=0 { # create zero float constant tmpA:4 = 0; tmpA = int2float( tmpA ); # check if negative if ( B:4 f< tmpA ) goto ; tmpB:8 = trunc( B:4 ); setFPRF( tmpB ); # limit to saturation if ( tmpB <= 0x000000007FFFFFFF ) goto ; tmpB = 0x000000007FFFFFFF; goto ; # negate the float tmpB = trunc( f-( B:4 ) ); setFPRF( tmpB ); # limit to saturation if ( tmpB <= 0x0000000080000000 ) goto ; tmpB = 0x0000000080000000; # negate the signed int tmpB = -( tmpB ); setSummaryFPSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); } # ================================================================= # Page 420 # efsctuf rT,rB 010 1101 0110 #define pcodeop ConvertFloatingPointToUnsignedFraction; :efsctuf D,B is OP=4 & D & B & XOP_0_10=0x2D6 & BITS_16_20=0 { # multiply by 0x0000 0001 0000 0000 to scale the fraction up to integer range # load fractional multiplier as a float tmpM:8 = 0x0000000100000000; tmpM = int2float( tmpM ); setFPRF( tmpM ); # load saturation limit as a float tmpL:8 = 0x0000000100000000 - 1; tmpL = int2float( tmpL ); setFPRF( tmpL ); # scale the saturation limit to a fractional float tmpL = tmpL f/ tmpM; setFPDivFlags( tmpL, tmpM, tmpL ); # get B float up to 64 bit width tmpB:8 = float2float( B:4 ); setFPRF( tmpB ); # check if less than or equal to positive saturation limit if ( tmpB f<= tmpL ) goto ; # set to saturation tmpB = tmpL; # scale the fractional portion up to integer side of mantissa tmpB = tmpB f* tmpM; setFPMulFlags( tmpB, tmpM, tmpB ); # truncate back to integer tmpC:4 = trunc( tmpB ); setFPRF( tmpC ); setSummaryFPSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpC ); } # efsctui rT,rB 010 1101 0100 #define pcodeop ConvertFloatingPointToUnsignedInteger; :efsctui D,B is OP=4 & D & B & XOP_0_10=0x2D4 & BITS_16_20=0 { tmpB:8 = trunc(round( B:4 )); setFPRF( tmpB ); # limit to saturation if ( tmpB <= 0x000000007FFFFFFF ) goto ; tmpB = 0x000000007FFFFFFF; setSummaryFPSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); } # efsctuiz rT,rB 010 1101 1000 #define pcodeop ConvertFloatingPointToUnsignedIntegerWithRoundTowardZero; :efsctuiz D,B is OP=4 & D & B & XOP_0_10=0x2D8 & BITS_16_20=0 { tmpB:8 = trunc( B:4 ); setFPRF( tmpB ); # limit to saturation if ( tmpB <= 0x000000007FFFFFFF ) goto ; tmpB = 0x000000007FFFFFFF; setSummaryFPSCR(); # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( tmpB:4 ); } # efsdiv rT,rA,rB 010 1100 1001 #define pcodeop FloatingPointDivide; :efsdiv D,A,B is OP=4 & D & A & B & XOP_0_10=0x2C9 { # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( A:4 f/ B:4 ); setFPDivFlags( A:4, B:4, D:4 ); } # efsmul rT,rA,rB 010 1100 1000 #define pcodeop FloatingPointMultiply; :efsmul D,A,B is OP=4 & D & A & B & XOP_0_10=0x2C8 { # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( A:4 f* B:4 ); setFPMulFlags( A:4, B:4, D:4 ); } # ================================================================= # Page 425 # efsnabs rT,rA 010 1100 0101 #define pcodeop FloatingPointNegativeAbsoluteValue; :efsnabs D,A is OP=4 & D & A & XOP_0_10=0x2C5 & BITS_11_15=0 { # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( f- ( abs( A:4 ) ) ); setFPRF( D:4 ); setSummaryFPSCR(); } # efsneg rT,rA 010 1100 0110 #define pcodeop FloatingPointNegate; :efsneg D,A is OP=4 & D & A & XOP_0_10=0x2C6 & BITS_11_15=0 { # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( f-( A:4 ) ); setFPRF( D:4 ); setSummaryFPSCR(); } # efssub rT,rA,rB 010 1100 0001 #define pcodeop FloatingPointSubtract; :efssub D,A,B is OP=4 & D & A & B & XOP_0_10=0x2C1 { # assign to lower word of D D = ( D & 0xFFFFFFFF00000000 ) | zext( A:4 f- B:4 ); setFPSubFlags( A:4, B:4, D:4 ); setSummaryFPSCR(); } # efststeq CRFD,rA,rB 010 1101 1110 #define pcodeop FloatingPointTestEqual; :efststeq CRFD,A,B is OP=4 & CRFD & A & B & XOP_0_10=0x2DE & BITS_21_22=0 { CRFD[2,1] = A:4 f== B:4; } # efststgt CRFD,rA,rB 010 1101 1100 #define pcodeop FloatingPointTestGreaterThan; :efststgt CRFD,A,B is OP=4 & CRFD & A & B & XOP_0_10=0x2DC & BITS_21_22=0 { CRFD[2,1] = A:4 f> B:4; } # ================================================================= # Page 430 # efststlt CRFD,rA,rB 010 1101 1101 #define pcodeop FloatingPointTestLessThan; :efststlt CRFD,A,B is OP=4 & CRFD & A & B & XOP_0_10=0x2DD & BITS_21_22=0 { CRFD[2,1] = A:4 f< B:4; } ================================================ FILE: pypcode/processors/PowerPC/data/languages/altivec.sinc ================================================ # altivec pcodes are stubbed out with pseudocode calls define pcodeop dataStreamStop; define pcodeop dataStreamStopAll; define pcodeop dataStreamTouch; define pcodeop dataStreamTouchSoon; define pcodeop dataStreamTouchForStore; define pcodeop dataStreamTouchForStoreTransient; define pcodeop loadVectorElementByteIndexed; define pcodeop loadVectorElementHalfWordIndexed; define pcodeop loadVectorElementWordIndexed; define pcodeop loadVectorForShiftLeft; define pcodeop loadVectorForShiftRight; define pcodeop loadVectorIndexed; define pcodeop loadVectorIndexedLRU; define pcodeop moveFromVectorStatusAndControlRegister; define pcodeop moveToVectorStatusAndControlRegister; define pcodeop storeVectorElementByteIndexed; define pcodeop storeVectorElementHalfWordIndexed; define pcodeop storeVectorElementWordIndexed; define pcodeop storeVectorIndexed; define pcodeop storeVectorIndexedLRU; define pcodeop vectorAddCarryoutUnsignedWord; define pcodeop vectorAddFloatingPoint; define pcodeop vectorAddSignedByteSaturate; define pcodeop vectorAddSignedHalfWordSaturate; define pcodeop vectorAddSignedWordSaturate; define pcodeop vectorAddUnsignedByteSaturate; define pcodeop vectorAddUnsignedHalfWordModulo; define pcodeop vectorAddUnsignedHalfWordSaturate; define pcodeop vectorAddUnsignedWordSaturate; define pcodeop vectorLogicalAnd; define pcodeop vectorLogicalAndWithComplement; define pcodeop vectorAverageSignedByte; define pcodeop vectorAverageSignedHalfWord; define pcodeop vectorAverageSignedWord; define pcodeop vectorAverageUnsignedByte; define pcodeop vectorAverageUnsignedHalfWord; define pcodeop vectorAverageUnsignedWord; define pcodeop vectorConvertFromSignedFixedPointWord; define pcodeop vectorConvertFromUnsignedFixedPointWord; define pcodeop vectorCompareBoundsFloatingPoint; define pcodeop vectorCompareEqualToFloatingPoint; define pcodeop vectorCompareEqualToUnsignedByte; define pcodeop vectorCompareEqualToUnsignedHalfWord; define pcodeop vectorCompareEqualToUnsignedWord; define pcodeop vectorCompareGreaterThanOrEqualToFloatingPoint; define pcodeop vectorCompareGreaterThanFloatingPoint; define pcodeop vectorCompareGreaterThanSignedByte; define pcodeop vectorCompareGreaterThanConditionRegisterSignedHalfWord; define pcodeop vectorCompareGreaterThanSignedWord; define pcodeop vectorCompareGreaterThanUnsignedByte; define pcodeop vectorCompareGreaterThanUnsignedHalfWord; define pcodeop vectorCompareGreaterThanUnsignedWord; define pcodeop vectorConvertToSignedFixedPointWordSaturate; define pcodeop vectorConvertToUnsignedFixedPointWordSaturate; define pcodeop vector2RaisedToTheExponentEstimateFloatingPoint; define pcodeop vectorLog2EstimateFloatingPoint; define pcodeop vectorMultiplyAddFloatingPoint; define pcodeop vectorMaximumFloatingPoint; define pcodeop vectorMaximumSignedByte; define pcodeop vectorMaximumSignedHalfWord; define pcodeop vectorMaximumSignedWord; define pcodeop vectorMaximumUnsignedByte; define pcodeop vectorMaximumUnsignedHalfWord; define pcodeop vectorMaximumUnsignedWord; define pcodeop vectorMultiplyHighAndAddSignedHalfWordSaturate; define pcodeop vectorMultiplyHighRoundAndAddSignedHalfWordSaturate; define pcodeop vectorMinimumFloatingPoint; define pcodeop vectorMinimumSignedByte; define pcodeop vectorMinimumSignedHalfWord; define pcodeop vectorMinimumSignedWord; define pcodeop vectorMinimumUnsignedByte; define pcodeop vectorMinimumUnsignedHalfWord; define pcodeop vectorMinimumUnsignedWord; define pcodeop vectorMultiplyLowAndAddUnsignedHalfWordModulo; define pcodeop vectorMergeHighByte; define pcodeop vectorMergeHighHalfWord; define pcodeop vectorMergeHighWord; define pcodeop vectorMergeLowByte; define pcodeop vectorMergeLowHalfWord; define pcodeop vectorMergeLowWord; define pcodeop vectorMultiplySumMixedSignByteModulo; define pcodeop vectorMultiplySumSignedHalfWordModulo; define pcodeop vectorMultiplySumSignedHalfWordSaturate; define pcodeop vectorMultiplySumUnsignedByteModulo; define pcodeop vectorMultiplySumUnsignedHalfWordModulo; define pcodeop vectorMultiplySumUnsignedHalfWordSaturate; define pcodeop vectorMultiplyEvenSignedByte; define pcodeop vectorMultiplyEvenSignedHalfWord; define pcodeop vectorMultiplyEvenUnsignedByte; define pcodeop vectorMultiplyEvenUnsignedHalfWord; define pcodeop vectorMultiplyOddSignedByte; define pcodeop vectorMultiplyOddSignedHalfWord; define pcodeop vectorMultiplyOddUnsignedByte; define pcodeop vectorMultiplyOddUnsignedHalfWord; define pcodeop vectorNegativeMultiplySubtractFloatingPoint; define pcodeop vectorLogicalNOR; define pcodeop vectorLogicalOR; define pcodeop vectorPackPixel32; define pcodeop vectorPackSignedHalfWordSignedSaturate; define pcodeop vectorPackSignedHalfWordUnsignedSaturate; define pcodeop vectorPackSignedWordSignedSaturate; define pcodeop vectorPackSignedWordUnsignedSaturate; define pcodeop vectorPackUnsignedHalfWordUnsignedModulo; define pcodeop vectorPackUnsignedHalfWordUnsignedSaturate; define pcodeop vectorPackUnsignedWordUnsignedModulo; define pcodeop vectorPackUnsignedWordUnsignedSaturate; define pcodeop vectorReciprocalEstimateFloatingPoint; define pcodeop vectorRoundToFloatingPointIntegerTowardMinusInfinity; define pcodeop vectorRoundToFloatingPointIntegerNearest; define pcodeop vectorRoundToFloatingPointIntegerTowardPluInfinity; define pcodeop vectorRoundToFloatingPointIntegerTowardZero; define pcodeop vectorRotateLeftIntegerByte; define pcodeop vectorRotateLeftIntegerHalfWord; define pcodeop vectorRotateLeftIntegerWord; define pcodeop vectorReciprocalSquareRootEstimateFloatingPoint; define pcodeop vectorConditionalSelect; define pcodeop vectorShiftLeft; define pcodeop vectorShiftLeftIntegerByte; define pcodeop vectorShiftLeftDoubleByOctetImmediate; define pcodeop vectorShiftLeftIntegerHalfWord; define pcodeop vectorShiftLeftByOctet; define pcodeop vectorShiftLeftIntegerWord; define pcodeop vectorSplatByte; define pcodeop vectorSplatHalfWord; define pcodeop vectorSplatImmediateSignedByte; define pcodeop vectorSplatImmediateSignedHalfWord; define pcodeop vectorSplatImmediateSignedWord; define pcodeop vectorSplatWord; define pcodeop vectorShiftRight; define pcodeop vectorShiftRightAlgebraicByte; define pcodeop vectorShiftRightAlgebraicHalfWord; define pcodeop vectorShiftRightAlgebraicWord; define pcodeop vectorShiftRightByte; define pcodeop vectorShiftRightHalfWord; define pcodeop vectorShiftRightByOctet; define pcodeop vectorShiftRightWord; define pcodeop vectorSubtractCarryoutUnsignedWord; define pcodeop vectorSubtractFloatingPoint; define pcodeop vectorSubtractSignedByteSaturate; define pcodeop vectorSubtractSignedHalfWordSaturate; define pcodeop vectorSubtractSignedWordSaturate; define pcodeop vectorSubtractUnsignedByteModulo; define pcodeop vectorSubtractUnsignedByteSaturate; define pcodeop vectorSubtractUnsignedHalfWordSaturate; define pcodeop vectorSubtractUnsignedWordModulo; define pcodeop vectorSubtractUnsignedWordSaturate; define pcodeop vectorSumAcrossSignedWordSaturate; define pcodeop vectorSumAcrossPartialSignedWordSaturate; define pcodeop vectorSumAcrossPartialSignedByteSaturate; define pcodeop vectorSumAcrossPartialSignedHalfWordSaturate; define pcodeop vectorSumAcrossPartialUnsignedByteSaturate; define pcodeop vectorUnpackHighPixel16; define pcodeop vectorUnpackHighSignedByte; define pcodeop vectorUnpackHighSignedHalfWord; define pcodeop vectorUnpackLowPixel16; define pcodeop vectorUnpackLowSignedByte; define pcodeop vectorUnpackLowSignedHalfWord; # dss :dss STRM is $(NOTVLE) & OP=31 & BIT_25=0 & BITS_23_24=0 & STRM & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=822 & Rc=0 { dataStreamStop(STRM:1); } # dssall :dssall STRM is $(NOTVLE) & OP=31 & BIT_25=1 & BITS_23_24=0 & STRM & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=822 & Rc=0 { dataStreamStopAll(STRM:1); } :dst A,B,STRM is $(NOTVLE) & OP=31 & BIT_25=0 & BITS_23_24=0 & STRM & A & B & XOP_1_10=342 & Rc=0 { dataStreamTouch(A,B,STRM:1); } :dstt A,B,STRM is $(NOTVLE) & OP=31 & BIT_25=1 & BITS_23_24=0 & STRM & A & B & XOP_1_10=342 & Rc=0 { dataStreamTouchSoon(A,B,STRM:1); } :dstst A,B,STRM is $(NOTVLE) & OP=31 & BIT_25=0 & BITS_23_24=0 & STRM & A & B & XOP_1_10=374 & Rc=0 { dataStreamTouchForStore(A,B,STRM:1); } :dststt A,B,STRM is $(NOTVLE) & OP=31 & BIT_25=1 & BITS_23_24=0 & STRM & A & B & XOP_1_10=374 & Rc=0 { dataStreamTouchForStoreTransient(A,B,STRM:1); } :lvebx vrD,RA_OR_ZERO,B is OP=31 & vrD & RA_OR_ZERO & B & XOP_1_10=7 & Rc=0 { tmp:$(REGISTER_SIZE) = (RA_OR_ZERO + B); tmpb:1 = *[ram]:1 tmp; eb:1 = tmp[0,4]; # This looks backwards from what the manual says, but it's ok since byte 0 in the manual is MSB # where as for us byte 0 is LSB @if ENDIAN == "big" eb = 0xF - eb; @endif eb = eb * 8; vrD = (zext(tmpb) << eb); #vrD = loadVectorElementByteIndexed(A,B); } :lvehx vrD,A,B is OP=31 & vrD & A & B & XOP_1_10=39 & Rc=0 { # TODO defintion vrD = loadVectorElementHalfWordIndexed(A,B); } :lvewx vrD,A,B is OP=31 & vrD & A & B & XOP_1_10=71 & Rc=0 { # TODO definition vrD = loadVectorElementWordIndexed(A,B); } :lvsl vrD,A,B is OP=31 & vrD & A & B & XOP_1_10=6 & Rc=0 { # TODO definition vrD = loadVectorForShiftLeft(A,B); } :lvsr vrD,RA_OR_ZERO,B is OP=31 & vrD & RA_OR_ZERO & B & XOP_1_10=38 & Rc=0 { tmp:$(REGISTER_SIZE) = (RA_OR_ZERO + B); eb:1 = tmp[0,4]; eb = eb * 8; srca:32=0x0001020304050607; srcb:32=0x08090a0b0c0d0e0f; srcc:32=0x1011121314151617; srcd:32=0x18191a1b1c1d1e1f; src:32 = (srca << 192) | (srcb << 128) | (srcc << 64) | srcd; src = src >> eb; vrD = src:16; } :lvx vrD,RA_OR_ZERO,B is OP=31 & vrD & RA_OR_ZERO & B & XOP_1_10=103 & Rc=0 { # vrD = loadVectorIndexed(A,B); build RA_OR_ZERO; tmp:$(REGISTER_SIZE) = (RA_OR_ZERO + B) & 0xfffffffffffffff0; vrD = *[ram]:16 tmp; } :lvxl vrD,A,B is OP=31 & vrD & A & B & XOP_1_10=359 & Rc=0 { # TODO definition vrD = loadVectorIndexedLRU(A,B); } :mfvscr vrD is OP=4 & vrD & vrAR=0 & vrBR=0 & XOP_1_10=770 & Rc=0 { # TODO definition vrD = moveFromVectorStatusAndControlRegister(); } :mtvscr vrB is OP=4 & vrDR=0 & vrAR=0 & vrB & XOP_1_10=802 & Rc=0 { # TODO definition moveToVectorStatusAndControlRegister(vrB); } :stvebx vrS,RA_OR_ZERO,B is OP=31 & vrS & RA_OR_ZERO & B & XOP_1_10=135 & Rc=0 { # TODO definition EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:1 EA = storeVectorElementByteIndexed(vrS,RA_OR_ZERO,B); } :stvehx vrS,RA_OR_ZERO,B is OP=31 & vrS & RA_OR_ZERO & B & XOP_1_10=167 & Rc=0 { # TODO definition EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:2 EA = storeVectorElementHalfWordIndexed(vrS,RA_OR_ZERO,B); } :stvewx vrS,RA_OR_ZERO,B is OP=31 & vrS & RA_OR_ZERO & B & XOP_1_10=199 & Rc=0 { # TODO definition EA:$(REGISTER_SIZE) = (RA_OR_ZERO + B) & 0xfffffffffffffffc; *[ram]:4 EA = storeVectorElementWordIndexed(vrS,RA_OR_ZERO,B); } :stvx vrS,RA_OR_ZERO,B is OP=31 & vrS & B & RA_OR_ZERO & XOP_1_10=231 & Rc=0 { tmp:$(REGISTER_SIZE) = (RA_OR_ZERO + B) & 0xfffffffffffffff0; *[ram]:16 tmp = vrS; } :stvxl vrS,RA_OR_ZERO,B is OP=31 & vrS & B & RA_OR_ZERO & XOP_1_10=487 & Rc=0 { # TODO definition tmp:$(REGISTER_SIZE) = (RA_OR_ZERO + B) & 0xfffffffffffffff0; *[ram]:16 tmp = vrS; # mark_as_not_likely_to_be_needed_again_anytime_soon(tmp); } :vaddcuw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=384 { # TODO definition vrD = vectorAddCarryoutUnsignedWord(vrA,vrB); } :vaddfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=10 { # TODO definition vrD = vectorAddFloatingPoint(vrA,vrB); } :vaddsbs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=768 { # TODO definition vrD = vectorAddSignedByteSaturate(vrA,vrB); } :vaddshs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=832 { # TODO definition vrD = vectorAddSignedHalfWordSaturate(vrA,vrB); } :vaddsws vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=896 { # TODO definition vrD = vectorAddSignedWordSaturate(vrA,vrB); } vaddubm_part1: is vrA_8_0 & vrA_8_1 & vrA_8_2 & vrA_8_3 & vrA_8_4 & vrA_8_5 & vrA_8_6 & vrA_8_7 & vrB_8_0 & vrB_8_1 & vrB_8_2 & vrB_8_3 & vrB_8_4 & vrB_8_5 & vrB_8_6 & vrB_8_7 & vrD_8_0 & vrD_8_1 & vrD_8_2 & vrD_8_3 & vrD_8_4 & vrD_8_5 & vrD_8_6 & vrD_8_7 { vrD_8_0 = vrA_8_0 + vrB_8_0; vrD_8_1 = vrA_8_1 + vrB_8_1; vrD_8_2 = vrA_8_2 + vrB_8_2; vrD_8_3 = vrA_8_3 + vrB_8_3; vrD_8_4 = vrA_8_4 + vrB_8_4; vrD_8_5 = vrA_8_5 + vrB_8_5; vrD_8_6 = vrA_8_6 + vrB_8_6; vrD_8_7 = vrA_8_7 + vrB_8_7; } vaddubm_part2: is vrA_8_8 & vrA_8_9 & vrA_8_10 & vrA_8_11 & vrA_8_12 & vrA_8_13 & vrA_8_14 & vrA_8_15 & vrB_8_8 & vrB_8_9 & vrB_8_10 & vrB_8_11 & vrB_8_12 & vrB_8_13 & vrB_8_14 & vrB_8_15 & vrD_8_8 & vrD_8_9 & vrD_8_10 & vrD_8_11 & vrD_8_12 & vrD_8_13 & vrD_8_14 & vrD_8_15 { vrD_8_8 = vrA_8_8 + vrB_8_8; vrD_8_9 = vrA_8_9 + vrB_8_9; vrD_8_10 = vrA_8_10 + vrB_8_10; vrD_8_11 = vrA_8_11 + vrB_8_11; vrD_8_12 = vrA_8_12 + vrB_8_12; vrD_8_13 = vrA_8_13 + vrB_8_13; vrD_8_14 = vrA_8_14 + vrB_8_14; vrD_8_15 = vrA_8_15 + vrB_8_15; } # A bug in sleigh compiler forces us to keep the number of imported symbols less than 35 (it slows to a halt pass there), that is why we have vaddubm_part1 & vaddubm_part2 :vaddubm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=0 & vaddubm_part1 & vaddubm_part2 { } :vaddubs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=512 { # TODO definition vrD = vectorAddUnsignedByteSaturate(vrA,vrB); } :vadduhm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=64 & vrA_16_0 & vrA_16_1 & vrA_16_2 & vrA_16_3 & vrA_16_4 & vrA_16_5 & vrA_16_6 & vrA_16_7 & vrB_16_0 & vrB_16_1 & vrB_16_2 & vrB_16_3 & vrB_16_4 & vrB_16_5 & vrB_16_6 & vrB_16_7 & vrD_16_0 & vrD_16_1 & vrD_16_2 & vrD_16_3 & vrD_16_4 & vrD_16_5 & vrD_16_6 & vrD_16_7 { vrD_16_0 = vrA_16_0 + vrB_16_0; vrD_16_1 = vrA_16_1 + vrB_16_1; vrD_16_2 = vrA_16_2 + vrB_16_2; vrD_16_3 = vrA_16_3 + vrB_16_3; vrD_16_4 = vrA_16_4 + vrB_16_4; vrD_16_5 = vrA_16_5 + vrB_16_5; vrD_16_6 = vrA_16_6 + vrB_16_6; vrD_16_7 = vrA_16_7 + vrB_16_7; } :vadduhs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=576 { # TODO definition vrD = vectorAddUnsignedHalfWordSaturate(vrA,vrB); } :vadduwm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=128 & vrA_32_0 & vrA_32_1 & vrA_32_2 & vrA_32_3 & vrB_32_0 & vrB_32_1 & vrB_32_2 & vrB_32_3 & vrD_32_0 & vrD_32_1 & vrD_32_2 & vrD_32_3 { vrD_32_0 = vrA_32_0 + vrB_32_0; vrD_32_1 = vrA_32_1 + vrB_32_1; vrD_32_2 = vrA_32_2 + vrB_32_2; vrD_32_3 = vrA_32_3 + vrB_32_3; } # Collides with vadduws # :vadduws vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=640 # { # TODO definition # vrD = vectorAddUnsignedWordSaturate(vrA,vrB); # } :vand vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1028 { # TODO definition vrD = vectorLogicalAnd(vrA,vrB); } :vandc vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1092 { # TODO definition vrD = vectorLogicalAndWithComplement(vrA,vrB); } :vavgsb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1282 { # TODO definition vrD = vectorAverageSignedByte(vrA,vrB); } :vavgsh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1346 { # TODO definition vrD = vectorAverageSignedHalfWord(vrA,vrB); } :vavgsw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1410 { # TODO definition vrD = vectorAverageSignedWord(vrA,vrB); } :vavgub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1026 { # TODO definition vrD = vectorAverageUnsignedByte(vrA,vrB); } :vavguh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1090 { # TODO definition vrD = vectorAverageUnsignedHalfWord(vrA,vrB); } :vavguw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1154 { # TODO definition vrD = vectorAverageUnsignedWord(vrA,vrB); } :vcfsx vrD,vrB,A_BITS is OP=4 & vrD & A_BITS & vrB & XOP_0_10=842 { # TODO definition vrD = vectorConvertFromSignedFixedPointWord(vrB,A_BITS:1); } :vcfux vrD,vrB,A_BITS is OP=4 & vrD & A_BITS & vrB & XOP_0_10=778 { # TODO definition vrD = vectorConvertFromUnsignedFixedPointWord(vrB,A_BITS:1); } :vcmpbfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=966 { # TODO definition vrD = vectorCompareBoundsFloatingPoint(vrA,vrB); } :vcmpbfp. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=966 { # TODO definition # TODO change CR6 vrD = vectorCompareBoundsFloatingPoint(vrA,vrB); } :vcmpeqfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=198 { # TODO definition # TODO change CR6 vrD = vectorCompareEqualToFloatingPoint(vrA,vrB); } :vcmpeqfp. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=198 { # TODO definition vrD = vectorCompareEqualToFloatingPoint(vrA,vrB); } :vcmpequb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=6 { # TODO definition # TODO change CR6 vrD = vectorCompareEqualToUnsignedByte(vrA,vrB); } :vcmpequb. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=6 { # TODO definition # TODO change CR6 vrD = vectorCompareEqualToUnsignedByte(vrA,vrB); } :vcmpequh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=70 { # TODO definition # TODO change CR6 vrD = vectorCompareEqualToUnsignedHalfWord(vrA,vrB); } :vcmpequh. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=70 { # TODO definition # TODO change CR6 vrD = vectorCompareEqualToUnsignedHalfWord(vrA,vrB); } :vcmpequw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=134 { # TODO definition # TODO change CR6 vrD = vectorCompareEqualToUnsignedWord(vrA,vrB); } :vcmpequw. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=134 { # TODO definition # TODO change CR6 vrD = vectorCompareEqualToUnsignedWord(vrA,vrB); } :vcmpgefp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=454 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanOrEqualToFloatingPoint(vrA,vrB); } :vcmpgefp. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=454 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanOrEqualToFloatingPoint(vrA,vrB); } :vcmpgtfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=710 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanFloatingPoint(vrA,vrB); } :vcmpgtfp. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=710 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanFloatingPoint(vrA,vrB); } :vcmpgtsb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=774 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanSignedByte(vrA,vrB); } :vcmpgtsb. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=774 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanSignedByte(vrA,vrB); } :vcmpgtsh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=838 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanConditionRegisterSignedHalfWord(vrA,vrB); } :vcmpgtsh. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=838 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanConditionRegisterSignedHalfWord(vrA,vrB); } :vcmpgtsw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=902 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanSignedWord(vrA,vrB); } :vcmpgtsw. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=902 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanSignedWord(vrA,vrB); } :vcmpgtub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=518 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanUnsignedByte(vrA,vrB); } :vcmpgtub. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=518 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanUnsignedByte(vrA,vrB); } :vcmpgtuh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=582 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanUnsignedHalfWord(vrA,vrB); } :vcmpgtuh. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=582 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanUnsignedHalfWord(vrA,vrB); } :vcmpgtuw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=0 & XOP_0_9=646 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanUnsignedWord(vrA,vrB); } :vcmpgtuw. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & BIT_10=1 & XOP_0_9=646 { # TODO definition # TODO change CR6 vrD = vectorCompareGreaterThanUnsignedWord(vrA,vrB); } :vctsxs vrD,vrB,A_BITS is OP=4 & vrD & A_BITS & vrB & XOP_0_10=970 { # TODO definition vrD = vectorConvertToSignedFixedPointWordSaturate(vrB,A_BITS:1); } :vctuxs vrD,vrB,A_BITS is OP=4 & vrD & A_BITS & vrB & XOP_0_10=906 { # TODO definition vrD = vectorConvertToUnsignedFixedPointWordSaturate(vrB,A_BITS:1); } :vexptefp vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=394 { # TODO definition vrD = vector2RaisedToTheExponentEstimateFloatingPoint(vrB); } :vlogefp vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=458 { # TODO definition vrD = vectorLog2EstimateFloatingPoint(vrB); } :vmaddfp vrD,vrA,vrC,vrB is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=46 { # TODO definition vrD = vectorMultiplyAddFloatingPoint(vrA,vrC,vrB); } :vmaxfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1034 { # TODO definition vrD = vectorMaximumFloatingPoint(vrA,vrB); } :vmaxsb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=258 { # TODO definition vrD = vectorMaximumSignedByte(vrA,vrB); } :vmaxsh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=322 { # TODO definition vrD = vectorMaximumSignedHalfWord(vrA,vrB); } :vmaxsw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=386 { # TODO definition vrD = vectorMaximumSignedWord(vrA,vrB); } :vmaxub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=2 { # TODO definition vrD = vectorMaximumUnsignedByte(vrA,vrB); } :vmaxuh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=66 { # TODO definition vrD = vectorMaximumUnsignedHalfWord(vrA,vrB); } :vmaxuw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=130 { # TODO definition vrD = vectorMaximumUnsignedWord(vrA,vrB); } :vmhaddshs vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=32 { # TODO definition vrD = vectorMultiplyHighAndAddSignedHalfWordSaturate(vrA,vrB,vrC); } :vmhraddshs vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=33 { # TODO definition vrD = vectorMultiplyHighRoundAndAddSignedHalfWordSaturate(vrA,vrB,vrC); } :vminfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1098 { # TODO definition vrD = vectorMinimumFloatingPoint(vrA,vrB); } :vminsb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=770 { # TODO definition vrD = vectorMinimumSignedByte(vrA,vrB); } :vminsh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=834 { # TODO definition vrD = vectorMinimumSignedHalfWord(vrA,vrB); } :vminsw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=898 { # TODO definition vrD = vectorMinimumSignedWord(vrA,vrB); } :vminub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=514 { # TODO definition vrD = vectorMinimumUnsignedByte(vrA,vrB); } :vminuh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=578 { # TODO definition vrD = vectorMinimumUnsignedHalfWord(vrA,vrB); } :vminuw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=642 { # TODO definition vrD = vectorMinimumUnsignedWord(vrA,vrB); } :vmladduhm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=34 { # TODO definition vrD = vectorMultiplyLowAndAddUnsignedHalfWordModulo(vrA,vrB,vrC); } :vmrghb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=12 { # TODO definition vrD = vectorMergeHighByte(vrA,vrB); } :vmrghh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=76 { # TODO definition vrD = vectorMergeHighHalfWord(vrA,vrB); } :vmrghw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=140 { # TODO definition vrD = vectorMergeHighWord(vrA,vrB); } :vmrglb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=268 { # TODO definition vrD = vectorMergeLowByte(vrA,vrB); } :vmrglh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=332 { # TODO definition vrD = vectorMergeLowHalfWord(vrA,vrB); } :vmrglw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=396 { # TODO definition vrD = vectorMergeLowWord(vrA,vrB); } :vmsummbm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=37 { # TODO definition vrD = vectorMultiplySumMixedSignByteModulo(vrA,vrB,vrC); } :vmsumshm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=40 { # TODO definition vrD = vectorMultiplySumSignedHalfWordModulo(vrA,vrB,vrC); } :vmsumshs vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=41 { # TODO definition vrD = vectorMultiplySumSignedHalfWordSaturate(vrA,vrB,vrC); } :vmsumubm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=36 { # TODO definition vrD = vectorMultiplySumUnsignedByteModulo(vrA,vrB,vrC); } :vmsumuhm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=38 { # TODO definition vrD = vectorMultiplySumUnsignedHalfWordModulo(vrA,vrB,vrC); } :vmsumuhs vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=39 { # TODO definition vrD = vectorMultiplySumUnsignedHalfWordSaturate(vrA,vrB,vrC); } :vmulesb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=776 { # TODO definition vrD = vectorMultiplyEvenSignedByte(vrA,vrB); } :vmulesh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=840 { # TODO definition vrD = vectorMultiplyEvenSignedHalfWord(vrA,vrB); } :vmuleub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=520 { # TODO definition vrD = vectorMultiplyEvenUnsignedByte(vrA,vrB); } :vmuleuh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=584 { # TODO definition vrD = vectorMultiplyEvenUnsignedHalfWord(vrA,vrB); } :vmulosb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=264 { # TODO definition vrD = vectorMultiplyOddSignedByte(vrA,vrB); } :vmulosh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=328 { # TODO definition vrD = vectorMultiplyOddSignedHalfWord(vrA,vrB); } :vmuloub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=8 { # TODO definition vrD = vectorMultiplyOddUnsignedByte(vrA,vrB); } :vmulouh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=72 { # TODO definition vrD = vectorMultiplyOddUnsignedHalfWord(vrA,vrB); } :vnmsubfp vrD,vrA,vrC,vrB is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=47 { # TODO definition vrD = vectorNegativeMultiplySubtractFloatingPoint(vrA,vrC,vrB); } :vnor vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1284 { vrD = ~(vrA | vrB); } :vor vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1156 { vrD = vrA | vrB; } :vperm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=43 { # tmp:32 = (zext(vrA) << 128) | zext(vrB); # tmp2:16 = 0; # tmp3:32 = 0; # cnt:1 = 15; # # tmp2 = (vrC >> (cnt * 8)) & 0x1F; # tmp3 = tmp >> ((31 - tmp2) * 8); # vrD = vrD << 8; # vrD[0,8] = tmp3[0,8]; # if (cnt == 0) goto ; # cnt = cnt - 1; # goto ; # vrD = vectorPermute(vrA,vrB,vrC); } :vpkpx vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=782 { # TODO definition vrD = vectorPackPixel32(vrA,vrB); } :vpkshss vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=398 { # TODO definition vrD = vectorPackSignedHalfWordSignedSaturate(vrA,vrB); } :vpkshus vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=270 { # TODO definition vrD = vectorPackSignedHalfWordUnsignedSaturate(vrA,vrB); } :vpkswss vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=462 { # TODO definition vrD = vectorPackSignedWordSignedSaturate(vrA,vrB); } :vpkswus vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=334 { # TODO definition vrD = vectorPackSignedWordUnsignedSaturate(vrA,vrB); } :vpkuhum vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=14 { # TODO definition vrD = vectorPackUnsignedHalfWordUnsignedModulo(vrA,vrB); } :vpkuhus vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=142 { # TODO definitionXTF = vrD = vectorPackUnsignedHalfWordUnsignedSaturate(vrA,vrB); } :vpkuwum vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=78 { # TODO definition vrD = vectorPackUnsignedWordUnsignedModulo(vrA,vrB); } :vpkuwus vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=206 { # TODO definition vrD = vectorPackUnsignedWordUnsignedSaturate(vrA,vrB); } :vrefp vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=266 { # TODO definition vrD = vectorReciprocalEstimateFloatingPoint(vrB); } :vrfim vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=714 { # TODO definition vrD = vectorRoundToFloatingPointIntegerTowardMinusInfinity(vrB); } :vrfin vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=522 { # TODO definition vrD = vectorRoundToFloatingPointIntegerNearest(vrB); } :vrfip vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=650 { # TODO definition vrD = vectorRoundToFloatingPointIntegerTowardPluInfinity(vrB); } :vrfiz vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=586 { # TODO definition vrD = vectorRoundToFloatingPointIntegerTowardZero(vrB); } :vrlb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=4 { # TODO definition vrD = vectorRotateLeftIntegerByte(vrA,vrB); } :vrlh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=68 { # TODO definition vrD = vectorRotateLeftIntegerHalfWord(vrA,vrB); } :vrlw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=132 { # TODO definition vrD = vectorRotateLeftIntegerWord(vrA,vrB); } :vrsqrtefp vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=330 { # TODO definition vrD = vectorReciprocalSquareRootEstimateFloatingPoint(vrB); } :vsel vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=42 { # TODO definition vrD = vectorConditionalSelect(vrA,vrB,vrC); } :vsl vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=452 { # TODO definition vrD = vectorShiftLeft(vrA,vrB); } :vslb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=260 { # TODO definition vrD = vectorShiftLeftIntegerByte(vrA,vrB); } :vsldoi vrD,vrA,vrB,SHB is OP=4 & vrD & vrA & vrB & BIT_10=0 & SHB & XOP_0_5=44 { tmp:32 = (zext(vrA) << 128) | zext(vrB); tmp = tmp << (SHB:1 * 8); vrD = tmp[128,128]; } :vslh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=324 { # TODO definition vrD = vectorShiftLeftIntegerHalfWord(vrA,vrB); } :vslo vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1036 { # TODO definition vrD = vectorShiftLeftByOctet(vrA,vrB); } :vslw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=388 { # TODO definition vrD = vectorShiftLeftIntegerWord(vrA,vrB); } :vspltb vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=524 { tmp:1 = (0xF - UIMB) * 8; tmpa:16 = (vrB >> tmp) & 0xFF; vrD = tmpa | (tmpa << 8) | (tmpa << 16) | (tmpa << 24) | (tmpa << 32) | (tmpa << 40) | (tmpa << 48) | (tmpa << 56); vrD = vrD | (tmpa << 64) | (tmpa << 72) | (tmpa << 80) | (tmpa << 88) | (tmpa << 96) | (tmpa << 104) | (tmpa << 112) | (tmpa << 120); } :vsplth vrD,vrB,UIMH is OP=4 & vrD & BITS_19_20=0 & UIMH & vrB & XOP_0_10=588 { # TODO definition vrD = vectorSplatHalfWord(vrB,UIMH:1); } :vspltisb vrD,A_BITSS is OP=4 & vrD & A_BITSS & B_BITS=0 & XOP_0_10=780 { # TODO definition vrD = vectorSplatImmediateSignedByte(A_BITSS:1); } :vspltish vrD,A_BITSS is OP=4 & vrD & A_BITSS & B_BITS=0 & XOP_0_10=844 { # TODO definition vrD = vectorSplatImmediateSignedHalfWord(A_BITSS:1); } :vspltisw vrD,A_BITSS is OP=4 & vrD & A_BITSS & B_BITS=0 & XOP_0_10=908 { tmpw:4 = sext(A_BITSS:1); tmp:16 = zext(tmpw); vrD = (tmp) | (tmp << 32) | (tmp << 64) | (tmp << 96); } # A better way to do this would be to make a subtable to interpret # UIMW into the corresponding subword, then assign the subregisters of vrD # to that value. :vspltw vrD,vrB,UIMW is OP=4 & vrD & vrB & BITS_18_20=0 & UIMW & XOP_0_10=652 { local b = (3 - UIMW) * 32; local tmp:16 = (vrB >> b) & 0xffffffff; vrD = (tmp) | (tmp << 32) | (tmp << 64) | (tmp << 96); } :vsr vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=708 { # TODO definition vrD = vectorShiftRight(vrA,vrB); } :vsrab vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=772 { # TODO definition vrD = vectorShiftRightAlgebraicByte(vrA,vrB); } :vsrah vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=836 { # TODO definition vrD = vectorShiftRightAlgebraicHalfWord(vrA,vrB); } :vsraw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=900 { # TODO definition vrD = vectorShiftRightAlgebraicWord(vrA,vrB); } :vsrb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=516 { # TODO definition vrD = vectorShiftRightByte(vrA,vrB); } :vsrh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=580 { # TODO definition vrD = vectorShiftRightHalfWord(vrA,vrB); } :vsro vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1100 { # TODO definition vrD = vectorShiftRightByOctet(vrA,vrB); } :vsrw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=644 { # TODO definition vrD = vectorShiftRightWord(vrA,vrB); } :vsubcuw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1408 { # TODO definition vrD = vectorSubtractCarryoutUnsignedWord(vrA,vrB); } :vsubfp vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=74 { # TODO definition vrD = vectorSubtractFloatingPoint(vrA,vrB); } :vsubsbs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1792 { # TODO definition vrD = vectorSubtractSignedByteSaturate(vrA,vrB); } :vsubshs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1856 { # TODO definition vrD = vectorSubtractSignedHalfWordSaturate(vrA,vrB); } :vsubsws vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1920 { # TODO definition vrD = vectorSubtractSignedWordSaturate(vrA,vrB); } :vsububm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1024 { # TODO definition vrD = vectorSubtractUnsignedByteModulo(vrA,vrB); } :vsububs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1536 { # TODO definition vrD = vectorSubtractUnsignedByteSaturate(vrA,vrB); } :vsubuhm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1088 & vrA_16_0 & vrA_16_1 & vrA_16_2 & vrA_16_3 & vrA_16_4 & vrA_16_5 & vrA_16_6 & vrA_16_7 & vrB_16_0 & vrB_16_1 & vrB_16_2 & vrB_16_3 & vrB_16_4 & vrB_16_5 & vrB_16_6 & vrB_16_7 & vrD_16_0 & vrD_16_1 & vrD_16_2 & vrD_16_3 & vrD_16_4 & vrD_16_5 & vrD_16_6 & vrD_16_7 { vrD_16_0 = vrA_16_0 - vrB_16_0; vrD_16_1 = vrA_16_1 - vrB_16_1; vrD_16_2 = vrA_16_2 - vrB_16_2; vrD_16_3 = vrA_16_3 - vrB_16_3; vrD_16_4 = vrA_16_4 - vrB_16_4; vrD_16_5 = vrA_16_5 - vrB_16_5; vrD_16_6 = vrA_16_6 - vrB_16_6; vrD_16_7 = vrA_16_7 - vrB_16_7; } :vsubuhs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1600 { # TODO definition vrD = vectorSubtractUnsignedHalfWordSaturate(vrA,vrB); } :vsubuwm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1152 { # TODO definition vrD = vectorSubtractUnsignedWordModulo(vrA,vrB); } :vsubuws vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1664 { # TODO definition vrD = vectorSubtractUnsignedWordSaturate(vrA,vrB); } :vsumsws vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1928 { # TODO definition vrD = vectorSumAcrossSignedWordSaturate(vrA,vrB); } :vsum2sws vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1672 { # TODO definition vrD = vectorSumAcrossPartialSignedWordSaturate(vrA,vrB); } :vsum4sbs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1800 { # TODO definition vrD = vectorSumAcrossPartialSignedByteSaturate(vrA,vrB); } :vsum4shs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1608 { # TODO definition vrD = vectorSumAcrossPartialSignedHalfWordSaturate(vrA,vrB); } :vsum4ubs vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1544 { # TODO definition vrD = vectorSumAcrossPartialUnsignedByteSaturate(vrA,vrB); } :vupkhpx vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=846 { # TODO definition vrD = vectorUnpackHighPixel16(vrB); } :vupkhsb vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=526 { # TODO definition vrD = vectorUnpackHighSignedByte(vrB); } :vupkhsh vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=590 { # TODO definition vrD = vectorUnpackHighSignedHalfWord(vrB); } :vupklpx vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=974 { # TODO definition vrD = vectorUnpackLowPixel16(vrB); } :vupklsb vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=654 { # TODO definition vrD = vectorUnpackLowSignedByte(vrB); } :vupklsh vrD,vrB is OP=4 & vrD & A_BITS=0 & vrB & XOP_0_10=718 { # TODO definition vrD = vectorUnpackLowSignedHalfWord(vrB); } :vxor vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1220 { vrD = vrA ^ vrB; } define pcodeop altv207_1; define pcodeop altv207_2; define pcodeop altv207_3; define pcodeop altv207_4; define pcodeop altv207_5; define pcodeop altv207_6; define pcodeop altv207_7; define pcodeop altv207_8; define pcodeop altv207_9; define pcodeop altv207_10; define pcodeop altv207_11; define pcodeop altv207_12; define pcodeop altv207_13; define pcodeop altv207_14; define pcodeop altv207_15; define pcodeop altv207_16; define pcodeop altv207_17; define pcodeop altv207_18; define pcodeop altv207_19; define pcodeop altv207_20; define pcodeop altv207_21; define pcodeop altv207_22; define pcodeop altv207_23; define pcodeop altv207_24; define pcodeop altv207_25; define pcodeop altv207_26; define pcodeop altv207_27; define pcodeop altv207_28; define pcodeop altv207_29; define pcodeop altv207_30; define pcodeop altv207_31; define pcodeop altv207_32; define pcodeop altv207_33; define pcodeop altv207_34; define pcodeop altv207_35; define pcodeop altv207_36; define pcodeop altv207_37; define pcodeop altv207_38; define pcodeop altv207_39; define pcodeop altv207_40; define pcodeop altv207_41; define pcodeop altv207_42; define pcodeop altv207_43; define pcodeop altv207_44; define pcodeop altv207_45; define pcodeop altv207_46; define pcodeop altv207_47; define pcodeop altv207_48; define pcodeop altv207_49; define pcodeop altv207_50; define pcodeop altv207_51; define pcodeop altv207_52; define pcodeop altv207_53; define pcodeop altv207_54; define pcodeop altv207_55; define pcodeop altv207_56; define pcodeop altv207_57; define pcodeop altv207_58; define pcodeop altv207_59; define pcodeop altv207_60; define pcodeop altv207_61; define pcodeop altv207_62; define pcodeop altv207_63; define pcodeop altv207_64; define pcodeop altv207_65; define pcodeop altv300_1; define pcodeop altv300_2; define pcodeop altv300_3; define pcodeop altv300_4; define pcodeop altv300_5; define pcodeop altv300_6; define pcodeop altv300_7; define pcodeop altv300_8; define pcodeop altv300_9; define pcodeop altv300_10; define pcodeop altv300_11; define pcodeop altv300_12; define pcodeop altv300_13; define pcodeop altv300_14; define pcodeop altv300_15; define pcodeop altv300_16; define pcodeop altv300_17; define pcodeop altv300_18; define pcodeop altv300_19; define pcodeop altv300_20; define pcodeop altv300_21; define pcodeop altv300_22; define pcodeop altv300_23; define pcodeop altv300_24; define pcodeop altv300_25; define pcodeop altv300_26; define pcodeop altv300_27; define pcodeop altv300_28; define pcodeop altv300_29; define pcodeop altv300_30; define pcodeop altv300_31; define pcodeop altv300_32; define pcodeop altv300_33; define pcodeop altv300_34; define pcodeop altv300_35; define pcodeop altv300_36; define pcodeop altv300_41; define pcodeop altv300_42; define pcodeop altv300_43; define pcodeop altv300_44; define pcodeop altv300_45; define pcodeop altv300_46; define pcodeop altv300_47; define pcodeop altv300_48; define pcodeop altv300_49; define pcodeop altv300_50; define pcodeop altv300_51; define pcodeop altv300_52; define pcodeop altv300_53; define pcodeop altv300_54; define pcodeop altv300_55; define pcodeop altv300_56; define pcodeop altv300_57; define pcodeop altv300_58; define pcodeop altv300_59; define pcodeop altv300_60; define pcodeop altv300_61; define pcodeop altv300_62; define pcodeop altv300_63; define pcodeop altv300_64; define pcodeop altv300_65; define pcodeop altv300_66; define pcodeop altv300_67; define pcodeop altv300_68; define pcodeop altv300_69; define pcodeop altv300_70; define pcodeop altv300_71; ################# # 2.07 additions :bcdadd. vrD,vrA,vrB,PS is OP=4 & BIT_10=1 & XOP_0_8=1 & vrA & vrB & vrD & PS { vrD = altv207_64(vrA,vrB,PS:1); } :bcdsub. vrD,vrA,vrB,PS is OP=4 & BIT_10=1 & XOP_0_8=65 & vrA & vrB & vrD & PS { vrD = altv207_65(vrA,vrB,PS:1); } :vaddcuq vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=320 { vrD = altv207_1(vrA,vrB); } :vaddecuq vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=61 { vrD = altv207_2(vrA,vrB,vrC); } :vaddeuqm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=60 { vrD = altv207_3(vrA,vrB,vrC); } :vaddudm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=192 { vrD = altv207_4(vrA,vrB); } :vadduqm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=256 { vrD = altv207_5(vrA,vrB); } :vbpermq vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1356 { vrD = altv207_6(vrA,vrB); } :vcipher vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1288 { vrD = altv207_7(vrA,vrB); } :vcipherlast vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1289 { vrD = altv207_8(vrA,vrB); } :vclzb vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1794 { vrD = altv207_9(vrB); } :vclzd vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1986 { vrD = altv207_10(vrB); } :vclzh vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1858 { vrD = altv207_11(vrB); } :vclzw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1922 { vrD = altv207_12(vrB); } :vcmpequd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=199 { vrD = altv207_13(vrA,vrB); } :vcmpequd. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=199 { vrD = altv207_14(vrA,vrB); } :vcmpgtsd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=967 { vrD = altv207_15(vrA,vrB); } :vcmpgtsd. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=967 { vrD = altv207_16(vrA,vrB); } :vcmpgtud vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=711 { vrD = altv207_17(vrA,vrB); } :vcmpgtud. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=711 { vrD = altv207_18(vrA,vrB); } :veqv vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1668 { vrD = altv207_19(vrA,vrB); } :vgbbd vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1292 { vrD = altv207_20(vrB); } :vmaxsd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=450 { vrD = altv207_21(vrA,vrB); } :vmaxud vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=194 { vrD = altv207_22(vrA,vrB); } :vminsd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=962 { vrD = altv207_23(vrA,vrB); } :vminud vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=706 { altv207_24(vrA,vrB); } :vmrgew vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1932 { vrD = altv207_25(vrA,vrB); } :vmrgow vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1676 { vrD = altv207_26(vrA,vrB); } :vmulesw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=904 { vrD = altv207_27(vrA,vrB); } :vmuleuw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=648 { vrD = altv207_28(vrA,vrB); } :vmulosw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=392 { vrD = altv207_29(vrA,vrB); } :vmulouw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=136 { vrD = altv207_30(vrA,vrB); } :vmuluwm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=137 { vrD = altv207_31(vrA,vrB); } :vnand vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1412 { vrD = altv207_32(vrA,vrB); } :vncipher vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1352 { vrD = altv207_33(vrA,vrB); } :vncipherlast vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1353 { vrD = altv207_34(vrA,vrB); } :vorc vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1348 { vrD = altv207_35(vrA,vrB); } :vpermxor vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=45 { vrD = altv207_36(vrA,vrB,vrC); } :vpksdss vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1486 { vrD = altv207_37(vrA,vrB); } :vpksdus vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1358 { vrD = altv207_38(vrA,vrB); } :vpkudum vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1102 { vrD = altv207_39(vrA,vrB); } :vpkudus vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1230 { vrD = altv207_41(vrA,vrB); } :vpmsumb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1032 { vrD = altv207_42(vrA,vrB); } :vpmsumd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1224 { vrD = altv207_43(vrA,vrB); } :vpmsumh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1096 { vrD = altv207_44(vrA,vrB); } :vpmsumw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1160 { vrD = altv207_45(vrA,vrB); } :vpopcntb vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1795 { vrD = altv207_46(vrB); } :vpopcntd vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1987 { vrD = altv207_47(vrB); } :vpopcnth vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1859 { vrD = altv207_48(vrB); } :vpopcntw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1923 { vrD = altv207_49(vrB); } :vrld vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=196 { vrD = altv207_50(vrA,vrB); } :vsbox vrD,vrA is OP=4 & vrD & vrA & BITS_11_15=0 & XOP_0_10=1480 { vrD = altv207_51(vrA); } :vshasigmad vrD,vrA,ST,SIX is OP=4 & vrD & vrA & ST & SIX & XOP_0_10=1730 { vrD = altv207_52(vrA,ST:1,SIX:1); } :vshasigmaw vrD,vrA,ST,SIX is OP=4 & vrD & vrA & ST & SIX & XOP_0_10=1666 { vrD = altv207_53(vrA,ST:1,SIX:1); } :vsld vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1476 { vrD = altv207_54(vrA,vrB); } :vsrad vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=964 { vrD = altv207_55(vrA,vrB); } :vsrd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1732 { vrD = altv207_56(vrA,vrB); } :vsubcuq vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1344 { vrD = altv207_57(vrA,vrB); } :vsubecuq vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=63 { vrD = altv207_58(vrA,vrB,vrC); } :vsubeuqm vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=62 { vrD = altv207_59(vrA,vrB,vrC); } :vsubudm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1216 { vrD = altv207_60(vrA,vrB); } :vsubuqm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1280 { vrD = altv207_61(vrA,vrB); } :vupkhsw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1614 { vrD = altv207_62(vrB); } :vupklsw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1742 { vrD = altv207_63(vrB); } ################### # v3.0 :bcdcfn. vrD,vrB,PS is OP=4 & vrD & vrB & BITS_16_20=7 & XOP_0_8=385 & BIT_10=1 & PS { vrD = altv300_1(vrB,PS:1); } :bcdcfsq. vrD,vrB,PS is OP=4 & vrD & vrB & BITS_16_20=2 & XOP_0_8=385 & BIT_10=1 & PS { vrD = altv300_2(vrB,PS:1); } :bcdcfz. vrD,vrB,PS is OP=4 & vrD & vrB & BITS_16_20=6 & XOP_0_8=385 & BIT_10=1 & PS { vrD = altv300_3(vrB,PS:1); } :bcdcpsgn. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=833 { vrD = altv300_4(vrA,vrB); } :bcdctn. vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=5 & XOP_0_8=385 & BIT_10=1 & BIT_9=0 { vrD = altv300_5(vrB); } :bcdctsq. vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_8=385 & BIT_10=1 & BIT_9=0 { vrD = altv300_6(vrB); } :bcdctz. vrD,vrB,PS is OP=4 & vrD & vrB & BITS_16_20=4 & XOP_0_8=385 & BIT_10=1 & PS { vrD = altv300_7(vrB,PS:1); } :bcds. vrD,vrA,vrB,PS is OP=4 & vrD & vrA & vrB & XOP_0_8=193 & BIT_10=1 & PS { vrD = altv300_8(vrA,vrB,PS:1); } :bcdsetsgn. vrD,vrB,PS is OP=4 & vrD & vrB & BITS_16_20=31 & XOP_0_8=385 & BIT_10=1 & PS { vrD = altv300_9(vrB,PS:1); } :bcdsr. vrD,vrA,vrB,PS is OP=4 & vrD & vrA & vrB & XOP_0_8=449 & BIT_10=1 & PS { vrD = altv300_10(vrA,vrB,PS:1); } :bcdtrunc. vrD,vrA,vrB,PS is OP=4 & vrD & vrA & vrB & XOP_0_8=257 & BIT_10=1 & PS { vrD = altv300_12(vrA,vrB,PS:1); } :bcdus. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_8=129 & BIT_10=1 { vrD = altv300_13(vrA,vrB); } :bcdutrunc. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_8=321 & BIT_10=1 { vrD = altv300_14(vrA,vrB); } :vabsdub vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1027 { vrD = altv300_15(vrA,vrB); } :vabsduh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1091 { vrD = altv300_16(vrA,vrB); } :vabsduw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1155 { vrD = altv300_17(vrA,vrB); } :vbpermd vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1484 { vrD = altv300_18(vrA,vrB); } :vclzlsbb vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=0 & XOP_0_10=1538 { vrD = altv300_19(vrB); } :vcmpneb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=7 { vrD = altv300_20(vrA,vrB); } :vcmpneb. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=7 { vrD = altv300_21(vrA,vrB); } :vcmpneh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=71 { vrD = altv300_22(vrA,vrB); } :vcmpneh. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=71 { vrD = altv300_23(vrA,vrB); } :vcmpnew vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=135 { vrD = altv300_24(vrA,vrB); } :vcmpnew. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=135 { vrD = altv300_25(vrA,vrB); } :vcmpnezb vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=263 { vrD = altv300_26(vrA,vrB); } :vcmpnezb. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=263 { vrD = altv300_27(vrA,vrB); } :vcmpnezh vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=327 { vrD = altv300_28(vrA,vrB); } :vcmpnezh. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=327 { vrD = altv300_29(vrA,vrB); } :vcmpnezw vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=0 & XOP_0_9=391 { vrD = altv300_30(vrA,vrB); } :vcmpnezw. vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & Rc2=1 & XOP_0_9=391 { vrD = altv300_31(vrA,vrB); } :vctzb vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=28 & XOP_0_10=1538 { vrD = altv300_32(vrB); } :vctzh vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=29 & XOP_0_10=1538 { vrD = altv300_33(vrB); } :vctzd vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=31 & XOP_0_10=1538 { vrD = altv300_34(vrB); } :vctzlsbb vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=1 & XOP_0_10=1538 { vrD = altv300_35(vrB); } :vctzw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=30 & XOP_0_10=1538 { vrD = altv300_36(vrB); } :vextractd vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=717 { # if UIMB > 8 the result is undefined vrD = (vrB >> (8 * (8 - UIMB))) & 0xffffffffffffffff; } :vextractub vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=525 { # if UIMB > 15 the result is undefined vrD = (vrB >> (16 * (15 - UIMB))) & 0xff; } :vextractuh vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=589 { # if UIMB > 14 the result is undefined vrD = (vrB >> (16 * (14 - UIMB))) & 0xffff; } :vextractuw vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=653 { # if UIMB > 12 the result is undefined vrD = (vrB >> (16 * (12 - UIMB))) & 0xffffffff; } :vextsb2d vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=24 & XOP_0_10=1538 { vrD = altv300_41(vrB); } :vextsb2w vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=16 & XOP_0_10=1538 { vrD = altv300_42(vrB); } :vextsh2d vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=25 & XOP_0_10=1538 { vrD = altv300_43(vrB); } :vextsh2w vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=17 & XOP_0_10=1538 { vrD = altv300_44(vrB); } :vextsw2d vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=26 & XOP_0_10=1538 { vrD = altv300_45(vrB); } :vextublx D,A,vrB is OP=4 & D & A & vrB & XOP_0_10=1549 { D = altv300_46(A,vrB); } :vextubrx D,A,vrB is OP=4 & D & A & vrB & XOP_0_10=1805 { D = altv300_47(A,vrB); } :vextuhlx D,A,vrB is OP=4 & D & A & vrB & XOP_0_10=1613 { D = altv300_48(A,vrB); } :vextuhrx D,A,vrB is OP=4 & D & A & vrB & XOP_0_10=1869 { D = altv300_49(A,vrB); } # beware the backwards bit/byte ordering in the manual :vextuwlx D,A,vrB is OP=4 & D & A & vrB & XOP_0_10=1677 { local offs:2 = (12 - zext(A[0,4])) * 8; local out:16 = (vrB >> offs) & 0xffffffff; # No need for zext, as mask is already applied D = out:$(REGISTER_SIZE); } :vextuwrx D,A,vrB is OP=4 & D & A & vrB & XOP_0_10=1933 { D = altv300_51(A,vrB); } :vinsertb vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=781 { vrD = altv300_52(vrB,UIMB:1); } :vinsertd vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=973 { vrD = altv300_53(vrB,UIMB:1); } :vinserth vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=845 { vrD = altv300_54(vrB,UIMB:1); } :vinsertw vrD,vrB,UIMB is OP=4 & vrD & BITS_20_20=0 & UIMB & vrB & XOP_0_10=909 { vrD = altv300_55(vrB,UIMB:1); } :vmul10cuq vrD,vrA is OP=4 & vrD & vrA & BITS_11_15=0 & XOP_0_10=1 { vrD = altv300_56(vrA); } :vmul10ecuq vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=65 { vrD = altv300_57(vrA,vrB); } :vmul10euq vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=577 { vrD = altv300_58(vrA,vrB); } :vmul10uq vrD,vrA is OP=4 & vrD & vrA & BITS_11_15=0 & XOP_0_10=513 { vrD = altv300_59(vrA); } :vnegd vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=7 & XOP_0_10=1538 { vrD = altv300_60(vrB); } :vnegw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=6 & XOP_0_10=1538 { vrD = altv300_61(vrB); } :vpermr vrD,vrA,vrB,vrC is OP=4 & vrD & vrA & vrB & vrC & XOP_0_5=59 { vrD = altv300_62(vrA,vrB,vrC); } :vprtybd vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=9 & XOP_0_10=1538 { vrD = altv300_63(vrB); } :vprtybq vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=10 & XOP_0_10=1538 { vrD = altv300_64(vrB); } :vprtybw vrD,vrB is OP=4 & vrD & vrB & BITS_16_20=8 & XOP_0_10=1538 { vrD = altv300_65(vrB); } :vrldmi vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=197 { vrD = altv300_66(vrA,vrB); } :vrldnm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=453 { vrD = altv300_67(vrA,vrB); } :vrlwmi vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=133 { vrD = altv300_68(vrA,vrB); } :vrlwnm vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=389 { vrD = altv300_69(vrA,vrB); } :vslv vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1860 { vrD = altv300_70(vrA,vrB); } :vsrv vrD,vrA,vrB is OP=4 & vrD & vrA & vrB & XOP_0_10=1796 { vrD = altv300_71(vrA,vrB); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/evx.sinc ================================================ @include "Scalar_SPFP.sinc" @ifdef IS_ISA @include "SPE_APU.sinc" @endif :lvx vrD, RA_OR_ZERO, RB is OP=31 & vrD & RA_OR_ZERO & RB & XOP_1_10=103 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + RB; vrD = *:16 ($(EATRUNC)); } :stvx vrS, RA_OR_ZERO, RB is OP=31 & vrS & RA_OR_ZERO & RB & XOP_1_10=231 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + RB; *:16 ($(EATRUNC)) = vrS; } ================================================ FILE: pypcode/processors/PowerPC/data/languages/g2.sinc ================================================ define pcodeop tlbli; define pcodeop tlbld; :tlbld B is $(NOTVLE) & OP=31 & BITS_21_25=0 & BITS_16_20=0 & B & XOP_1_10=978 & BIT_0=0 { tlbld(B); } :tlbli B is $(NOTVLE) & OP=31 & BITS_21_25=0 & BITS_16_20=0 & B & XOP_1_10=1010 & BIT_0=0 { tlbli(B); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/lmwInstructions.sinc ================================================ LDMR0: is lsmul=1 {} LDMR0: is epsilon { loadReg(r0); } LDMR1: is lsmul=2 {} LDMR1: is LDMR0 { build LDMR0; loadReg(r1); } LDMR2: is lsmul=3 {} LDMR2: is LDMR1 { build LDMR1; loadReg(r2); } LDMR3: is lsmul=4 {} LDMR3: is LDMR2 { build LDMR2; loadReg(r3); } LDMR4: is lsmul=5 {} LDMR4: is LDMR3 { build LDMR3; loadReg(r4); } LDMR5: is lsmul=6 {} LDMR5: is LDMR4 { build LDMR4; loadReg(r5); } LDMR6: is lsmul=7 {} LDMR6: is LDMR5 { build LDMR5; loadReg(r6); } LDMR7: is lsmul=8 {} LDMR7: is LDMR6 { build LDMR6; loadReg(r7); } LDMR8: is lsmul=9 {} LDMR8: is LDMR7 { build LDMR7; loadReg(r8); } LDMR9: is lsmul=10 {} LDMR9: is LDMR8 { build LDMR8; loadReg(r9); } LDMR10: is lsmul=11 {} LDMR10: is LDMR9 { build LDMR9; loadReg(r10); } LDMR11: is lsmul=12 {} LDMR11: is LDMR10 { build LDMR10; loadReg(r11); } LDMR12: is lsmul=13 {} LDMR12: is LDMR11 { build LDMR11; loadReg(r12); } LDMR13: is lsmul=14 {} LDMR13: is LDMR12 { build LDMR12; loadReg(r13); } LDMR14: is lsmul=15 {} LDMR14: is LDMR13 { build LDMR13; loadReg(r14); } LDMR15: is lsmul=16 {} LDMR15: is LDMR14 { build LDMR14; loadReg(r15); } LDMR16: is lsmul=17 {} LDMR16: is LDMR15 { build LDMR15; loadReg(r16); } LDMR17: is lsmul=18 {} LDMR17: is LDMR16 { build LDMR16; loadReg(r17); } LDMR18: is lsmul=19 {} LDMR18: is LDMR17 { build LDMR17; loadReg(r18); } LDMR19: is lsmul=20 {} LDMR19: is LDMR18 { build LDMR18; loadReg(r19); } LDMR20: is lsmul=21 {} LDMR20: is LDMR19 { build LDMR19; loadReg(r20); } LDMR21: is lsmul=22 {} LDMR21: is LDMR20 { build LDMR20; loadReg(r21); } LDMR22: is lsmul=23 {} LDMR22: is LDMR21 { build LDMR21; loadReg(r22); } LDMR23: is lsmul=24 {} LDMR23: is LDMR22 { build LDMR22; loadReg(r23); } LDMR24: is lsmul=25 {} LDMR24: is LDMR23 { build LDMR23; loadReg(r24); } LDMR25: is lsmul=26 {} LDMR25: is LDMR24 { build LDMR24; loadReg(r25); } LDMR26: is lsmul=27 {} LDMR26: is LDMR25 { build LDMR25; loadReg(r26); } LDMR27: is lsmul=28 {} LDMR27: is LDMR26 { build LDMR26; loadReg(r27); } LDMR28: is lsmul=29 {} LDMR28: is LDMR27 { build LDMR27; loadReg(r28); } LDMR29: is lsmul=30 {} LDMR29: is LDMR28 { build LDMR28; loadReg(r29); } LDMR30: is lsmul=31 {} LDMR30: is LDMR29 { build LDMR29; loadReg(r30); } LDMR31: is LDMR30 { build LDMR30; loadReg(r31); } :lmw D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=46 & D & BITS_21_25 & dPlusRaOrZeroAddress & LDMR31 [ lsmul = BITS_21_25; ] { tea = dPlusRaOrZeroAddress; build LDMR31; } ================================================ FILE: pypcode/processors/PowerPC/data/languages/lswInstructions.sinc ================================================ #lswi r0,0,7 0x7c 00 3c aa #lswi r0,r2,7 0x7c 02 3c aa DYN_D1: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 1)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } DYN_D2: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 2)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } DYN_D3: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 3)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } DYN_D4: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 4)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } DYN_D5: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 5)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } DYN_D6: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 6)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } DYN_D7: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 7)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=0 & BH=0 & XOP_1_10=597 & BIT_0=0 & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 & DYN_D6 & DYN_D7 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); loadRegister(DYN_D2,ea); loadRegister(DYN_D3,ea); loadRegister(DYN_D4,ea); loadRegister(DYN_D5,ea); loadRegister(DYN_D6,ea); loadRegister(DYN_D7,ea); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=0 & BH & XOP_1_10=597 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; sa:1 = BH; loadRegisterPartial(D,ea,sa); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=1 & BH=0 & XOP_1_10=597 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=1 & BH & XOP_1_10=597 & BIT_0=0 & DYN_D1 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); sa:1 = BH; loadRegisterPartial(DYN_D1,ea,sa); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=2 & BH=0 & XOP_1_10=597 & BIT_0=0 & DYN_D1 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=2 & BH & XOP_1_10=597 & BIT_0=0 & DYN_D1 & DYN_D2 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); sa:1 = BH; loadRegisterPartial(DYN_D2,ea,sa); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=3 & BH=0 & XOP_1_10=597 & BIT_0=0 & DYN_D1 & DYN_D2 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); loadRegister(DYN_D2,ea); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=3 & BH & XOP_1_10=597 & BIT_0=0 & DYN_D1 & DYN_D2 & DYN_D3 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); loadRegister(DYN_D2,ea); sa:1 = BH; loadRegisterPartial(DYN_D3,ea,sa); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=4 & BH=0 & XOP_1_10=597 & BIT_0=0 & DYN_D1 & DYN_D2 & DYN_D3 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); loadRegister(DYN_D2,ea); loadRegister(DYN_D3,ea); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=4 & BH & XOP_1_10=597 & BIT_0=0 & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); loadRegister(DYN_D2,ea); loadRegister(DYN_D3,ea); sa:1 = BH; loadRegisterPartial(DYN_D4,ea,sa); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=5 & BH=0 & XOP_1_10=597 & BIT_0=0 & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); loadRegister(DYN_D2,ea); loadRegister(DYN_D3,ea); loadRegister(DYN_D4,ea); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=5 & BH & XOP_1_10=597 & BIT_0=0 & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); loadRegister(DYN_D2,ea); loadRegister(DYN_D3,ea); loadRegister(DYN_D4,ea); sa:1 = BH; loadRegisterPartial(DYN_D5,ea,sa); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=6 & BH=0 & XOP_1_10=597 & BIT_0=0 & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); loadRegister(DYN_D2,ea); loadRegister(DYN_D3,ea); loadRegister(DYN_D4,ea); loadRegister(DYN_D5,ea); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=6 & BH & XOP_1_10=597 & BIT_0=0 & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 & DYN_D6 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); loadRegister(DYN_D2,ea); loadRegister(DYN_D3,ea); loadRegister(DYN_D4,ea); loadRegister(DYN_D5,ea); sa:1 = BH; loadRegisterPartial(DYN_D6,ea,sa); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=7 & BH=0 & XOP_1_10=597 & BIT_0=0 & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 & DYN_D6 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); loadRegister(DYN_D2,ea); loadRegister(DYN_D3,ea); loadRegister(DYN_D4,ea); loadRegister(DYN_D5,ea); loadRegister(DYN_D6,ea); } :lswi D,RA_OR_ZERO,NB is OP=31 & D & RA_OR_ZERO & NB & BITS_13_15=7 & BH & XOP_1_10=597 & BIT_0=0 & DYN_D1 & DYN_D2 & DYN_D3 & DYN_D4 & DYN_D5 & DYN_D6 & DYN_D7 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; loadRegister(D,ea); loadRegister(DYN_D1,ea); loadRegister(DYN_D2,ea); loadRegister(DYN_D3,ea); loadRegister(DYN_D4,ea); loadRegister(DYN_D5,ea); loadRegister(DYN_D6,ea); sa:1 = BH; loadRegisterPartial(DYN_D7,ea,sa); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/mulhwInstructions.sinc ================================================ #macchw r0,r0,r0 0x10 00 01 58 :macchw D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=172 & Rc=0 { D = macchw(D, A, B); } #macchw. r0,r0,r0 0x10 00 01 59 :macchw. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=172 & Rc=1 { D = macchw(D, A, B); cr0flags(D); } #macchwo r0,r0,r0 0x10 00 05 58 :macchwo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=172 & Rc=0 { D = macchw(D, A, B); xer_mac_update(D, A, B); } #macchwo. r0,r0,r0 0x10 00 05 59 :macchwo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=172 & Rc=1 { D = macchw(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #macchws r0,r0,r0 0x10 00 01 d8 :macchws D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=236 & Rc=0 { D = macchws(D, A, B); } #macchws. r0,r0,r0 0x10 00 01 d9 :macchws. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=236 & Rc=1 { D = macchws(D, A, B); cr0flags(D); } #macchwso r0,r0,r0 0x10 00 05 d8 :macchwso D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=236 & Rc=0 { D = macchws(D, A, B); xer_mac_update(D, A, B); } #macchwso. r0,r0,r0 0x10 00 05 d9 :macchwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=236 & Rc=1 { D = macchws(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #macchwsu r0,r0,r0 0x10 00 01 98 :macchwsu D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=204 & Rc=0 { D = macchwsu(D, A, B); } #macchwsu. r0,r0,r0 0x10 00 01 99 :macchwsu. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=204 & Rc=1 { D = macchwsu(D, A, B); cr0flags(D); } #macchwsuo r0,r0,r0 0x10 00 05 98 :macchwsuo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=204 & Rc=0 { D = macchwsu(D, A, B); xer_mac_update(D, A, B); } #macchwsuo. r0,r0,r0 0x10 00 05 99 :macchwsuo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=204 & Rc=1 { D = macchwsu(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #macchwu r0,r0,r0 0x10 00 01 18 :macchwu D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=140 & Rc=0 { D = macchwu(D, A, B); } #macchwu. r0,r0,r0 0x10 00 01 19 :macchwu. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=140 & Rc=1 { D = macchwu(D, A, B); cr0flags(D); } #macchwuo r0,r0,r0 0x10 00 05 18 :macchwuo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=140 & Rc=0 { D = macchwu(D, A, B); xer_mac_update(D, A, B); } #macchwuo. r0,r0,r0 0x10 00 05 19 :macchwuo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=140 & Rc=1 { D = macchwu(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #machhw r0,r0,r0 0x10 00 00 58 :machhw D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=44 & Rc=0 { D = machhw(D, A, B); } #machhw. r0,r0,r0 0x10 00 00 59 :machhw. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=44 & Rc=1 { D = machhw(D, A, B); cr0flags(D); } #machhwo r0,r0,r0 0x10 00 04 58 :machhwo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=44 & Rc=0 { D = machhw(D, A, B); xer_mac_update(D, A, B); } #machhwo. r0,r0,r0 0x10 00 04 59 :machhwo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=44 & Rc=1 { D = machhw(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #machhws r0,r0,r0 0x10 00 00 d8 :machhws D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=108 & Rc=0 { D = machhws(D, A, B); } #machhws. r0,r0,r0 0x10 00 00 d9 :machhws. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=108 & Rc=1 { D = machhws(D, A, B); cr0flags(D); } #machhwso r0,r0,r0 0x10 00 04 d8 :machhwso D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=108 & Rc=0 { D = machhws(D, A, B); xer_mac_update(D, A, B); } #machhwso. r0,r0,r0 0x10 00 04 d9 :machhwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=108 & Rc=1 { D = machhws(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #machhwsu r0,r0,r0 0x10 00 00 98 :machhwsu D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=76 & Rc=0 { D = machhwsu(D, A, B); } #machhwsu. r0,r0,r0 0x10 00 00 99 :machhwsu. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=76 & Rc=1 { D = machhwsu(D, A, B); cr0flags(D); } #machhwsuo r0,r0,r0 0x10 00 04 98 :machhwsuo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=76 & Rc=0 { D = machhwsu(D, A, B); xer_mac_update(D, A, B); } #machhwsuo. r0,r0,r0 0x10 00 04 99 :machhwsuo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=76 & Rc=1 { D = machhwsu(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #machhwu r0,r0,r0 0x10 00 00 18 :machhwu D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=12 & Rc=0 { D = machhwu(D, A, B); } #machhwu. r0,r0,r0 0x10 00 00 19 :machhwu. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=12 & Rc=1 { D = machhwu(D, A, B); cr0flags(D); } #machhwuo r0,r0,r0 0x10 00 04 18 :machhwuo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=12 & Rc=0 { D = machhwu(D, A, B); xer_mac_update(D, A, B); } #machhwuo. r0,r0,r0 0x10 00 04 19 :machhwuo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=12 & Rc=1 { D = machhwu(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #maclhw r0,r0,r0 0x10 00 03 58 :maclhw D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=428 & Rc=0 { D = maclhw(D, A, B); } #maclhw. r0,r0,r0 0x10 00 03 59 :maclhw. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=428 & Rc=1 { D = maclhw(D, A, B); cr0flags(D); } #maclhwo r0,r0,r0 0x10 00 07 58 :maclhwo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=428 & Rc=0 { D = maclhw(D, A, B); xer_mac_update(D, A, B); } #maclhwo. r0,r0,r0 0x10 00 07 59 :maclhwo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=428 & Rc=1 { D = maclhw(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #maclhws r0,r0,r0 0x10 00 03 d8 :maclhws D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=492 & Rc=0 { D = maclhws(D, A, B); } #maclhws. r0,r0,r0 0x10 00 03 d9 :maclhws. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=492 & Rc=1 { D = maclhws(D, A, B); cr0flags(D); } #maclhwso r0,r0,r0 0x10 00 07 d8 :maclhwso D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=492 & Rc=0 { D = maclhws(D, A, B); xer_mac_update(D, A, B); } #maclhwso. r0,r0,r0 0x10 00 07 d9 :maclhwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=492 & Rc=1 { D = maclhws(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #maclhwsu r0,r0,r0 0x10 00 03 98 :maclhwsu D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=460 & Rc=0 { D = maclhwsu(D, A, B); } #maclhwsu. r0,r0,r0 0x10 00 03 99 :maclhwsu. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=460 & Rc=1 { D = maclhwsu(D, A, B); cr0flags(D); } #maclhwsuo r0,r0,r0 0x10 00 07 98 :maclhwsuo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=460 & Rc=0 { D = maclhwsu(D, A, B); xer_mac_update(D, A, B); } #maclhwsuo. r0,r0,r0 0x10 00 07 99 :maclhwsuo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=460 & Rc=1 { D = maclhwsu(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #maclhwu r0,r0,r0 0x10 00 03 18 :maclhwu D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=396 & Rc=0 { D = maclhwu(D, A, B); } #maclhwu. r0,r0,r0 0x10 00 03 19 :maclhwu. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=396 & Rc=1 { D = maclhwu(D, A, B); cr0flags(D); } #maclhwuo r0,r0,r0 0x10 00 07 18 :maclhwuo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=396 & Rc=0 { D = maclhwu(D, A, B); xer_mac_update(D, A, B); } #maclhwuo. r0,r0,r0 0x10 00 07 19 :maclhwuo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=396 & Rc=1 { D = maclhwu(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #mulchw r0,r0,r0 0x10 00 01 50 :mulchw D,A,B is OP=4 & D & A & B & XOP_1_10=168 & Rc=0 { D = mulchw(D, A, B); } #mulchw. r0,r0,r0 0x10 00 01 51 :mulchw. D,A,B is OP=4 & D & A & B & XOP_1_10=168 & Rc=1 { D = mulchw(D, A, B); cr0flags(D); } #mulchwu r0,r0,r0 0x10 00 01 10 :mulchwu D,A,B is OP=4 & D & A & B & XOP_1_10=136 & Rc=0 { D = mulchwu(D, A, B); } #mulchwu. r0,r0,r0 0x10 00 01 11 :mulchwu. D,A,B is OP=4 & D & A & B & XOP_1_10=136 & Rc=1 { D = mulchwu(D, A, B); cr0flags(D); } #mulhhw r0,r0,r0 0x10 00 00 50 :mulhhw D,A,B is OP=4 & D & A & B & XOP_1_10=40 & Rc=0 { D = mulhhw(D, A, B); } #mulhhw. r0,r0,r0 0x10 00 00 51 :mulhhw. D,A,B is OP=4 & D & A & B & XOP_1_10=40 & Rc=1 { D = mulhhw(D, A, B); cr0flags(D); } #mulhhwu r0,r0,r0 0x10 00 00 10 :mulhhwu D,A,B is OP=4 & D & A & B & XOP_1_10=8 & Rc=0 { D = mulhhwu(D, A, B); } #mulhhwu. r0,r0,r0 0x10 00 00 11 :mulhhwu. D,A,B is OP=4 & D & A & B & XOP_1_10=8 & Rc=1 { D = mulhhwu(D, A, B); cr0flags(D); } #mullhw r0,r0,r0 0x10 00 03 50 :mullhw D,A,B is OP=4 & D & A & B & XOP_1_10=424 & Rc=0 { D = mullhw(D, A, B); } #mullhw. r0,r0,r0 0x10 00 03 51 :mullhw. D,A,B is OP=4 & D & A & B & XOP_1_10=424 & Rc=1 { D = mullhw(D, A, B); cr0flags(D); } # mulhwu r0,r0,r0 0x10 00 03 10 :mullhwu D,A,B is OP=4 & D & A & B & XOP_1_10=392 & Rc=0 { D = mullhwu(D, A, B); } #mullhwu. r0,r0,r0 0x10 00 03 11 :mullhwu. D,A,B is OP=4 & D & A & B & XOP_1_10=392 & Rc=1 { D = mullhwu(D, A, B); cr0flags(D); } #nmacchw r0,r0,r0 0x10 00 01 5c :nmacchw D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=174 & Rc=0 { D = nmacchw(D, A, B); } #nmacchw. r0,r0,r0 0x10 00 01 5d :nmacchw. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=174 & Rc=1 { D = nmacchw(D, A, B); cr0flags(D); } #nmacchwo r0,r0,r0 0x10 00 05 5c :nmacchwo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=174 & Rc=0 { D = nmacchw(D, A, B); xer_mac_update(D, A, B); } #nmacchwo. r0,r0,r0 0x10 00 05 5d :nmacchwo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=174 & Rc=1 { D = nmacchw(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #nmacchws r0,r0,r0 0x10 00 01 dc :nmacchws D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=238 & Rc=0 { D = nmacchws(D, A, B); } #nmacchws. r0,r0,r0 0x10 00 01 dd :nmacchws. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=238 & Rc=1 { D = nmacchws(D, A, B); cr0flags(D); } #nmacchwso r0,r0,r0 0x10 00 05 dc :nmacchwso D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=238 & Rc=0 { D = nmacchws(D, A, B); xer_mac_update(D, A, B); } #nmacchwso. r0,r0,r0 0x10 00 05 dd :nmacchwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=238 & Rc=1 { D = nmacchws(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #nmachhw r0,r0,r0 0x10 00 00 5c :nmachhw D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=46 & Rc=0 { D = nmachhw(D, A, B); } #nmachhw. r0,r0,r0 0x10 00 00 5d :nmachhw. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=46 & Rc=1 { D = nmachhw(D, A, B); cr0flags(D); } #nmachhwo r0,r0,r0 0x10 00 04 5c :nmachhwo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=46 & Rc=0 { D = nmachhw(D, A, B); xer_mac_update(D, A, B); } #nmachhwo. r0,r0,r0 0x10 00 04 5d :nmachhwo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=46 & Rc=1 { D = nmachhw(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #nmachhws r0,r0,r0 0x10 00 00 dc :nmachhws D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=110 & Rc=0 { D = nmachhws(D, A, B); } #nmachhws. r0,r0,r0 0x10 00 00 dd :nmachhws. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=110 & Rc=1 { D = nmachhws(D, A, B); cr0flags(D); } #nmachhwso r0,r0,r0 0x10 00 04 dc :nmachhwso D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=110 & Rc=0 { D = nmachhws(D, A, B); xer_mac_update(D, A, B); } #nmachhwso. r0,r0,r0 0x10 00 04 dd :nmachhwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=110 & Rc=1 { D = nmachhws(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #nmaclhw r0,r0,r0 0x10 00 03 5c :nmaclhw D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=430 & Rc=0 { D = nmaclhw(D, A, B); } #nmaclhw. r0,r0,r0 0x10 00 03 5d :nmaclhw. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=430 & Rc=1 { D = nmaclhw(D, A, B); cr0flags(D); } #nmaclhwo r0,r0,r0 0x10 00 07 5c :nmaclhwo D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=430 & Rc=0 { D = nmaclhw(D, A, B); xer_mac_update(D, A, B); } #nmaclhwo. r0,r0,r0 0x10 00 07 5d :nmaclhwo. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=430 & Rc=1 { D = nmaclhw(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } #nmaclhws r0,r0,r0 0x10 00 03 dc :nmaclhws D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=494 & Rc=0 { D = nmaclhws(D, A, B); } #nmaclhws. r0,r0,r0 0x10 00 03 dd :nmaclhws. D,A,B is OP=4 & D & A & B & OE=0 & XOP_1_9=494 & Rc=1 { D = nmaclhws(D, A, B); cr0flags(D); } #nmaclhwso r0,r0,r0 0x10 00 07 dc :nmaclhwso D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=494 & Rc=0 { D = nmaclhws(D, A, B); xer_mac_update(D, A, B); } #nmaclhwso. r0,r0,r0 0x10 00 07 dd :nmaclhwso. D,A,B is OP=4 & D & A & B & OE=1 & XOP_1_9=494 & Rc=1 { D = nmaclhws(D, A, B); xer_mac_update(D, A, B); cr0flags(D); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/old/oldPPC.lang ================================================ PowerPC:BE:32:DEPRECATED PowerPC ================================================ FILE: pypcode/processors/PowerPC/data/languages/old/oldPPC.trans ================================================ Sleigh-PowerPC 32-bit PowerPC:BE:32:default ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc.dwarf ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc.ldefs ================================================ PowerPC 32-bit big endian w/Altivec, G2 PowerPC 32-bit little endian w/Altivec, G2 PowerPC 64-bit big endian w/Altivec, G2 PowerPC 64-bit big endian w/Altivec and 32 bit addressing, G2 PowerPC 64-bit little endian w/Altivec and 32 bit addressing, G2 PowerPC 64-bit little endian w/Altivec, G2 PowerPC 4xx 32-bit big endian embedded core PowerPC 4xx 32-bit little endian embedded core Freescale MPC8280 32-bit big endian family (PowerQUICC-III) PowerQUICC-III 32-bit big endian family PowerQUICC-III 32-bit little endian family Power ISA e200 32-bit big-endian embedded core w/VLE PowerQUICC-III e500 32-bit big-endian family PowerQUICC-III e500 32-bit little-endian family PowerQUICC-III e500mc 32-bit big-endian family PowerQUICC-III e500mc 32-bit little-endian family Power ISA 3.0 Big Endian w/EVX and 32-bit Addressing Power ISA 3.0 Little Endian w/EVX and 32-bit Addressing Power ISA 3.0 Big Endian w/Altivec and 32-bit Addressing Power ISA 3.0 Little Endian w/Altivec and 32-bit Addressing Power ISA 3.0 Big Endian w/Altivec Power ISA 3.0 Little Endian w/Altivec Power ISA 3.0 Big Endian w/VLE, EVX and 32-bit Addressing Power ISA 3.0 Big Endian w/VLE, Altivec and 32-bit Addressing ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc.ldefs.orig ================================================ PowerPC 32-bit big endian w/Altivec, G2 PowerPC 32-bit little endian w/Altivec, G2 PowerPC 64-bit big endian w/Altivec, G2 PowerPC 64-bit big endian w/Altivec and 32 bit addressing, G2 PowerPC 64-bit little endian w/Altivec and 32 bit addressing, G2 PowerPC 64-bit little endian w/Altivec, G2 PowerPC 4xx 32-bit big endian embedded core PowerPC 4xx 32-bit little endian embedded core Freescale MPC8280 32-bit big endian family (PowerQUICC-III) PowerQUICC-III 32-bit big endian family PowerQUICC-III 32-bit little endian family PowerQUICC-III e500 32-bit big-endian family PowerQUICC-III e500 32-bit little-endian family PowerQUICC-III e500mc 32-bit big-endian family PowerQUICC-III e500mc 32-bit little-endian family Power ISA 3.0 Big Endian w/EVX and 32-bit Addressing Power ISA 3.0 Little Endian w/EVX and 32-bit Addressing Power ISA 3.0 Big Endian w/Altivec and 32-bit Addressing Power ISA 3.0 Little Endian w/Altivec and 32-bit Addressing Power ISA 3.0 Big Endian w/Altivec Power ISA 3.0 Little Endian w/Altivec Power ISA 3.0 Big Endian w/VLE, EVX and 32-bit Addressing Power ISA 3.0 Big Endian w/VLE, Altivec and 32-bit Addressing ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32.cspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32.pspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_4xx_be.slaspec ================================================ # SLA specification file for IBM PowerPC 4xx series core @define ENDIAN "big" @define REGISTER_SIZE "4" @define EATRUNC "ea" @define CTR_OFFSET "32" @include "ppc_common.sinc" @include "4xx.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_4xx_le.slaspec ================================================ # SLA specification file for IBM PowerPC 4xx series core @define ENDIAN "little" @define REGISTER_SIZE "4" @define EATRUNC "ea" @define CTR_OFFSET "32" @include "ppc_common.sinc" @include "4xx.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_be.cspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_be.slaspec ================================================ # SLA specification file for PowerPC 32-bit big endian @define ENDIAN "big" @define REGISTER_SIZE "4" @define EATRUNC "ea" @define CTR_OFFSET "32" @include "ppc_common.sinc" @include "altivec.sinc" @include "g2.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_be_Mac.cspec ================================================ * (r1 + 0x14) = r2; ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_e200.cspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_e200.pspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_e200.slaspec ================================================ # SLA specification file for IBM PowerPC e200 series core # Note: e200 series use VLE, with exception of e200z7 which supports both VLE and non-VLE @define E200 @define ENDIAN "big" @define REGISTER_SIZE "4" @define EATRUNC "ea" @define CTR_OFFSET "32" @define NoLegacyIntegerMultiplyAccumulate @include "ppc_common.sinc" @include "ppc_vle.sinc" @include "evx.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_e500_be.cspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_e500_be.slaspec ================================================ # SLA specification file for IBM PowerPC e500 series core # NOTE: This language variant includes some registers and instructions not supported # by the actual processor (e.g., floating pointer registers and associated instructions). # The actual processor only supports a subset of the registers and instructions implemented. @define E500 "1" @define ENDIAN "big" # Although a 32-bit architecture, 64-bit general purpose registers are supported. # Language has been modeled using a 64-bit implementation with a 32-bit truncated # memory space (see ldefs). @define REGISTER_SIZE "8" @define BIT_64 "64" @define EATRUNC "ea" @define CTR_OFFSET "32" @define NoLegacyIntegerMultiplyAccumulate "1" @include "ppc_common.sinc" @include "quicciii.sinc" @include "SPE_APU.sinc" @include "evx.sinc" @include "SPEF_SCR.sinc" @include "SPE_EFSD.sinc" @include "SPE_EFV.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_e500_le.cspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_e500_le.slaspec ================================================ # SLA specification file for IBM PowerPC e500 series core # NOTE: This language variant includes some registers and instructions not supported # by the actual processor (e.g., floating pointer registers and associated instructions). # The actual processor only supports a subset of the registers and instructions implemented. @define E500 "1" @define ENDIAN "little" # Although a 32-bit architecture, 64-bit general purpose registers are supported. # Language has been modeled using a 64-bit implementation with a 32-bit truncated # memory space (see ldefs). @define REGISTER_SIZE "8" @define BIT_64 "64" @define EATRUNC "ea" @define CTR_OFFSET "32" @define NoLegacyIntegerMultiplyAccumulate "1" @include "ppc_common.sinc" @include "quicciii.sinc" @include "SPE_APU.sinc" @include "evx.sinc" @include "SPEF_SCR.sinc" @include "SPE_EFSD.sinc" @include "SPE_EFV.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_e500mc_be.cspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_e500mc_be.slaspec ================================================ # SLA specification file for IBM PowerPC e500 series core # NOTE: This language variant includes some registers and instructions not supported # by the actual processor (e.g., floating pointer registers and associated instructions). # The actual processor only supports a subset of the registers and instructions implemented. @define ENDIAN "big" @define REGISTER_SIZE "4" @define EATRUNC "ea" # e500mc has 32 bit registers # @define CTR_OFFSET "32" @define NoLegacyIntegerMultiplyAccumulate "1" @include "ppc_common.sinc" @include "quicciii.sinc" @include "evx.sinc" @include "SPEF_SCR.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_e500mc_le.cspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_e500mc_le.slaspec ================================================ # SLA specification file for IBM PowerPC e500 series core #@define E500 @define ENDIAN "little" @define REGISTER_SIZE "4" @define EATRUNC "ea" # e500mc has 32 bit registers # @define CTR_OFFSET "32" @define NoLegacyIntegerMultiplyAccumulate "1" @include "ppc_common.sinc" @include "quicciii.sinc" @include "evx.sinc" @include "SPEF_SCR.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_le.slaspec ================================================ # SLA specification file for PowerPC 32-bit little endian @define ENDIAN "little" @define REGISTER_SIZE "4" @define EATRUNC "ea" @include "ppc_common.sinc" @include "altivec.sinc" @include "g2.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_mpc8270.pspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_quicciii_be.slaspec ================================================ # SLA specification file for IBM PowerPC 4xx series core @define ENDIAN "big" @define REGISTER_SIZE "4" @define EATRUNC "ea" @define CTR_OFFSET "32" @include "ppc_common.sinc" @include "quicciii.sinc" @include "evx.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_32_quicciii_le.slaspec ================================================ # SLA specification file for IBM PowerPC 4xx series core @define ENDIAN "little" @define REGISTER_SIZE "4" @define EATRUNC "ea" @define CTR_OFFSET "32" @include "ppc_common.sinc" @include "quicciii.sinc" @include "evx.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64.pspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64_32.cspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64_be.cspec ================================================ # Inject pcode when returning from a function call to place the r2Save # value into 0x28(r1) which should be restored by the "ld r2,0x28(r1)" # which immediately follows calls which comply with the PPC64 ABI spec. local saveR2ptr = r1 + 0x28; *:8 saveR2ptr = r2Save; ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64_be.slaspec ================================================ # SLA specification file for PowerPC 64-bit big endian @define ENDIAN "big" @define REGISTER_SIZE "8" @define BIT_64 "64" @define EATRUNC "ea:4" @include "ppc_common.sinc" @include "altivec.sinc" @include "g2.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64_be_Mac.cspec ================================================ ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64_isa_altivec_be.slaspec ================================================ # SLA specification file for Power ISA Version 2.06 Revision B (July 23, 2010) # ISA (Instruction Set Architecture) a trademarked name for PowerPC specifications from IBM. @define ENDIAN "big" @define IS_ISA "1" @define NoLegacyIntegerMultiplyAccumulate "1" @define REGISTER_SIZE "8" @define BIT_64 "64" @define EATRUNC "ea" @define CTR_OFFSET "32" @include "ppc_common.sinc" @include "ppc_isa.sinc" @include "ppc_a2.sinc" @include "quicciii.sinc" @include "FPRC.sinc" # A given processor can be compliant with the PowerISA spec by including EITHER # the embedded vector instructions (EVX) OR the AltiVec instructions # However, these instruction sets overlap in their bit patterns, so Sleigh cannot support # both at the same time. We have two language variants for PowerISA # that specify which of these two vector specs is supported. #@include "evx.sinc" #@include "SPEF_SCR.sinc" #@include "SPE_EFSD.sinc" #@include "SPE_EFV.sinc" ## OR @include "altivec.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64_isa_altivec_le.slaspec ================================================ # SLA specification file for Power ISA Version 2.06 Revision B (July 23, 2010) # ISA (Instruction Set Architecture) a trademarked name for PowerPC specifications from IBM. @define ENDIAN "little" @define IS_ISA "1" @define NoLegacyIntegerMultiplyAccumulate "1" @define REGISTER_SIZE "8" @define BIT_64 "64" @define EATRUNC "ea" @define CTR_OFFSET "32" @include "ppc_common.sinc" @include "ppc_isa.sinc" @include "ppc_a2.sinc" @include "quicciii.sinc" @include "FPRC.sinc" # A given processor can be compliant with the PowerISA spec by including EITHER # the embedded vector instructions (EVX) OR the AltiVec instructions # However, these instruction sets overlap in their bit patterns, so Sleigh cannot support # both at the same time. We have two language variants for PowerISA # that specify which of these two vector specs is supported. #@include "evx.sinc" #@include "SPEF_SCR.sinc" #@include "SPE_EFSD.sinc" #@include "SPE_EFV.sinc" # OR @include "altivec.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64_isa_altivec_vle_be.slaspec ================================================ # SLA specification file for Power ISA Version 2.06 Revision B (July 23, 2010) # ISA (Instruction Set Architecture) a trademarked name for PowerPC specifications from IBM. @define ENDIAN "big" @define IS_ISA "1" @define NoLegacyIntegerMultiplyAccumulate "1" @define REGISTER_SIZE "8" @define BIT_64 "64" @define EATRUNC "ea" @define CTR_OFFSET "32" @include "ppc_common.sinc" @include "ppc_isa.sinc" @include "ppc_a2.sinc" @include "quicciii.sinc" @include "FPRC.sinc" @include "ppc_vle.sinc" # A given processor can be compliant with the PowerISA spec by including EITHER # the embedded vector instructions (EVX) OR the AltiVec instructions # However, these instruction sets overlap in their bit patterns, so Sleigh cannot support # both at the same time. We have two language variants for PowerISA # that specify which of these two vector specs is supported. #@include "evx.sinc" #@include "SPEF_SCR.sinc" #@include "SPE_EFSD.sinc" #@include "SPE_EFV.sinc" ## OR @include "altivec.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64_isa_be.slaspec ================================================ # SLA specification file for Power ISA Version 2.06 Revision B (July 23, 2010) # ISA (Instruction Set Architecture) a trademarked name for PowerPC specifications from IBM. @define ENDIAN "big" @define IS_ISA "1" @define NoLegacyIntegerMultiplyAccumulate "1" @define REGISTER_SIZE "8" @define BIT_64 "64" @define EATRUNC "ea" @define CTR_OFFSET "32" @include "ppc_common.sinc" @include "ppc_isa.sinc" @include "quicciii.sinc" @include "FPRC.sinc" # A given processor can be compliant with the PowerISA spec by including EITHER # the embedded vector instructions (EVX) OR the AltiVec instructions # However, these instruction sets overlap in their bit patterns, so Sleigh cannot support # both at the same time. We have two language variants for PowerISA # that specify which of these two vector specs is supported. @include "evx.sinc" @include "SPEF_SCR.sinc" @include "SPE_EFSD.sinc" @include "SPE_EFV.sinc" # OR #@include "altivec.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64_isa_le.slaspec ================================================ # SLA specification file for Power ISA Version 2.06 Revision B (July 23, 2010) # ISA (Instruction Set Architecture) a trademarked name for PowerPC specifications from IBM. @define ENDIAN "little" @define IS_ISA "1" @define NoLegacyIntegerMultiplyAccumulate "1" @define REGISTER_SIZE "8" @define BIT_64 "64" @define EATRUNC "ea" @define CTR_OFFSET "32" @include "ppc_common.sinc" @include "ppc_isa.sinc" @include "quicciii.sinc" @include "FPRC.sinc" # A given processor can be compliant with the PowerISA spec by including EITHER # the embedded vector instructions (EVX) OR the AltiVec instructions # However, these instruction sets overlap in their bit patterns, so Sleigh cannot support # both at the same time. We have two language variants for PowerISA # that specify which of these two vector specs is supported. @include "evx.sinc" @include "SPEF_SCR.sinc" @include "SPE_EFSD.sinc" @include "SPE_EFV.sinc" # OR #@include "altivec.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64_isa_vle_be.slaspec ================================================ # SLA specification file for Power ISA Version 2.06 Revision B (July 23, 2010) # ISA (Instruction Set Architecture) a trademarked name for PowerPC specifications from IBM. @define ENDIAN "big" @define IS_ISA "1" @define NoLegacyIntegerMultiplyAccumulate "1" @define REGISTER_SIZE "8" @define BIT_64 "64" @define EATRUNC "ea" @define CTR_OFFSET "32" @include "ppc_common.sinc" @include "ppc_isa.sinc" @include "quicciii.sinc" @include "FPRC.sinc" @include "ppc_vle.sinc" # A given processor can be compliant with the PowerISA spec by including EITHER # the embedded vector instructions (EVX) OR the AltiVec instructions # However, these instruction sets overlap in their bit patterns, so Sleigh cannot support # both at the same time. We have two language variants for PowerISA # that specify which of these two vector specs is supported. @include "evx.sinc" @include "SPEF_SCR.sinc" @include "SPE_EFSD.sinc" @include "SPE_EFV.sinc" @include "SPE_FloatMulAdd.sinc" # OR #@include "altivec.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64_le.cspec ================================================ # Inject pcode when returning from a function call to place the r2Save # value into 0x28(r1) which should be restored by the "ld r2,0x28(r1)" # which immediately follows calls which comply with the PPC64 ABI spec. local saveR2ptr = r1 + 0x28; *:8 saveR2ptr = r2Save; ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_64_le.slaspec ================================================ # SLA specification file for PowerPC 64-bit little endian @define ENDIAN "little" @define REGISTER_SIZE "8" @define BIT_64 "64" @define EATRUNC "ea:4" @include "ppc_common.sinc" @include "altivec.sinc" @include "g2.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_a2.sinc ================================================ # binutils: a2.d 88: 00 00 02 00 attn # binutils: power4_32.d 28: 00 00 02 00 attn # binutils: power4.d +64: 00 00 02 00 attn # binutils: power6.d 54: 00 00 02 00 attn # "attn", X(0,256), X_MASK, POWER4|PPCA2, PPC476, {0} define pcodeop attnOp; :attn is $(NOTVLE) & OP=0 & XOP_1_10=256 & BITS_11_25=0 { attnOp(); } # binutils: a2.d 214: 7d 4b 01 a6 eratwe r10,r11,0 # binutils: a2.d 218: 7d 4b 19 a6 eratwe r10,r11,3 # {"eratwe", X(31,211), X_MASK, PPCA2, PPCNONE, {RS, RA, WS}}, # WS=> { 0x7, 11, NULL, NULL, 0 }, define pcodeop eratweOp; :eratwe S,A is $(NOTVLE) & OP=31 & XOP_1_10=211 & S & A & BITS_11_13 & BITS_14_15 & BIT_0=0 { eratweOp(S,A); } # binutils: a2.d 200: 7d 4b 66 66 erativax r10,r11,r12 # "erativax", X(31,819), X_MASK, PPCA2, PPCNONE, {RS, RA0, RB} define pcodeop erativaxOp; :erativax S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=819 & S & A & B { erativaxOp(S,A,B); } # binutils: a2.d 1f4: 7c 0a 58 66 eratilx 0,r10,r11 # binutils: a2.d 1f8: 7c 2a 58 66 eratilx 1,r10,r11 # binutils: a2.d 1fc: 7c ea 58 66 eratilx 7,r10,r11 # "eratilx", X(31,51), X_MASK, PPCA2, PPCNONE, {ERAT_T, RA, RB} define pcodeop eratilxOp; :eratilx BITS_21_23,A,B is $(NOTVLE) & OP=31 & XOP_1_10=51 & BITS_21_23 & A & B { eratilxOp(A,B); } # binutils: a2.d 210: 7d 4b 61 26 eratsx r10,r11,r12 # "eratsx", XRC(31,147,0), X_MASK, PPCA2, PPCNONE, {RT, RA0, RB} define pcodeop eratsxOp; :eratsx TH,A,B is $(NOTVLE) & OP=31 & XOP_1_10=147 & Rc=0 & TH & A & B { eratsxOp(TH,A,B); } # binutils: a2.d 20c: 7d 4b 61 27 eratsx\. r10,r11,r12 # "eratsx.", XRC(31,147,1), X_MASK, PPCA2, PPCNONE, {RT, RA0, RB} define pcodeop eratsxXOp; :eratsx. TH,A,B is $(NOTVLE) & OP=31 & XOP_1_10=147 & Rc=1 & TH & A & B { eratsxXOp(TH,A,B); } # "eratre", X(31,179), # binutils: a2.d 204: 7d 4b 01 66 eratre r10,r11,0 # binutils: a2.d 208: 7d 4b 19 66 eratre r10,r11,3 define pcodeop eratreOp; :eratre TH,A,BITS_11_13 is $(NOTVLE) & OP=31 & XOP_1_10=179 & TH & A & BITS_11_13 { eratreOp(TH,A); } # binutils: a2.d 3e0: 7d 4b 63 2c icswx r10,r11,r12 # "icswx", XRC(31,406,0), X_MASK, POWER7|PPCA2, PPCNONE, {RS, RA, RB} define pcodeop icswxOp; :icswx S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=406 & Rc=0 & S & A & B { icswxOp(S,A,B); } # binutils: a2.d 3dc: 7d 4b 63 2d icswx\. r10,r11,r12 # "icswx.", XRC(31,406,1), X_MASK, POWER7|PPCA2, PPCNONE, {RS, RA, RB} define pcodeop icswxDotOp; :icswx. S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=406 & Rc=1 & S & A & B { icswxDotOp(S,A,B); } # binutils: 476.d 49c: 7c 85 02 06 mfdcrx r4,r5 # binutils: a2.d 520: 7d 4b 02 06 mfdcrx r10,r11 # binutils: booke.d 28: 7c 85 02 06 mfdcrx r4,r5 # binutils: booke_xcoff.d 24: 7c 85 02 06 mfdcrx r4,r5 # "mfdcrx", X(31,259), X_MASK, BOOKE|PPCA2|PPC476, TITAN, {S, A} define pcodeop mfdcrxOp; # :mfdcrx S,A is $(NOTVLE) & OP=31 & XOP_1_10=259 & S & A & BITS_11_15=0 { mfdcrxOp(S,A); } # binutils: a2.d 51c: 7d 4b 02 07 mfdcrx\. r10,r11 # "mfdcrx", X(31,259), X_MASK, BOOKE|PPCA2|PPC476, TITAN, {RS, RA} define pcodeop mfdcrxDotOp; :mfdcrx. S,A is $(NOTVLE) & OP=31 & XOP_1_10=259 & Rc=1 & S & A & BITS_11_15=0 { mfdcrxDotOp(S,A); } # binutils: a2.d 564: 7d 6a 03 07 mtdcrx\. r10,r11 define pcodeop mtdcrxDotOp; :mtdcrx. A,S is $(NOTVLE) & OP=31 & XOP_1_10=387 & A & S & Rc=1 { mtdcrxDotOp(A,S); } # binutils: a2.d 884: 7c 00 01 6c wchkall # binutils: a2.d 888: 7c 00 01 6c wchkall # binutils: a2.d 88c: 7d 80 01 6c wchkall cr3 # "wchkall", X(31,182), X_MASK, PPCA2, PPCNONE, {OBF} define pcodeop wchkallOp; :wchkall BITS_23_25 is $(NOTVLE) & OP=31 & XOP_1_10=182 & BITS_23_25 { wchkallOp(); } # binutils: a2.d 894: 7c 20 07 4c wclrall 1 # "wclrall", X(31,934), XRARB_MASK, PPCA2, PPCNONE, {L} define pcodeop wclrallOp; :wclrall L is $(NOTVLE) & OP=31 & XOP_1_10=934 & L { wclrallOp(); } # binutils: a2.d 890: 7c 2a 5f 4c wclr 1,r10,r11 # "wclr", X(31,934), X_MASK, PPCA2, PPCNONE, {L, RA0, RB} define pcodeop wclrOp; # :wclr L,A,B is $(NOTVLE) & OP=31 & XOP_1_10=934 & L & A & B { wclrOp(); } # binutils: a2.d: 514: 7d 4a 3a 87 mfdcr\. r10,234 :mfdcr. D, DCRN is $(NOTVLE) & OP=31 & D & DCRN & XOP_1_10=323 & BIT_0=1 { D = DCRN; } # binutils: a2.d: 55c: 7d 4a 3b 87 mtdcr\. 234,r10 :mtdcr. DCRN, D is $(NOTVLE) & OP=31 & D & DCRN & XOP_1_10=451 & BIT_0=1 { DCRN = D; } # binutils: a2.d: 188: 7d 4b 61 fe dcbtstep r10,r11,r12 # binutils: e500mc.d: a0: 7c 64 29 fe dcbtstep r3,r4,r5 define pcodeop DataCacheBlockTouchForStoreByExternalPID; :dcbtstep TH,A,B is OP=31 & TH & A & B & XOP_1_10=255 & BIT_0=0 { DataCacheBlockTouchForStoreByExternalPID(TH,A,B); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_common.sinc ================================================ # PowerPC assembly SLA spec (size agnostic) # version 1.0 define endian=$(ENDIAN); define alignment=2; # -size: How many bytes make up an address define space ram type=ram_space size=$(REGISTER_SIZE) default; # -size: How many bytes do we need for register addressing define space register type=register_space size=4; # General registers (some pcode that follows depends on these registers being at # offset 0 define register offset=0 size=$(REGISTER_SIZE) [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17 r18 r19 r20 r21 r22 r23 r24 r25 r26 r27 r28 r29 r30 r31 ]; @ifdef E500 # Define 4-byte general purpose sub-registers (LSB) to be used by E500 compiler specification # which must restrict parameter/return passing to low 4-bytes of the 8-byte general purpose registers. @if ENDIAN == "big" define register offset=0 size=4 [ _ _r0 _ _r1 _ _r2 _ _r3 _ _r4 _ _r5 _ _r6 _ _r7 _ _r8 _ _r9 _ _r10 _ _r11 _ _r12 _ _r13 _ _r14 _ _r15 _ _r16 _ _r17 _ _r18 _ _r19 _ _r20 _ _r21 _ _r22 _ _r23 _ _r24 _ _r25 _ _r26 _ _r27 _ _r28 _ _r29 _ _r30 _ _r31 ]; @else define register offset=0 size=4 [ _r0 _ _r1 _ _r2 _ _r3 _ _r4 _ _r5 _ _r6 _ _r7 _ _r8 _ _r9 _ _r10 _ _r11 _ _r12 _ _r13 _ _r14 _ _r15 _ _r16 _ _r17 _ _r18 _ _r19 _ _r20 _ _r21 _ _r22 _ _r23 _ _r24 _ _r25 _ _r26 _ _r27 _ _r28 _ _r29 _ _r30 _ _r31 _ ]; @endif @endif # XER flags define register offset=0x400 size=1 [ xer_so xer_ov xer_ov32 xer_ca xer_ca32 xer_count ]; define register offset=0x500 size=1 [ fp_fx fp_fex fp_vx fp_ox fp_ux fp_zx fp_xx fp_vxsnan fp_vxisi fp_vxidi fp_vxzdz fp_vximz fp_vxvc fp_fr fp_fi fp_c fp_cc0 fp_cc1 fp_cc2 fp_cc3 fp_reserve1 fp_vxsoft fp_vxsqrt fp_vxcvi fp_ve fp_oe fp_ue fp_ze fp_xe fp_ni fp_rn0 fp_rn1 ]; define register offset = 0x700 size =$(REGISTER_SIZE) [MSR]; define register offset = 0x720 size=$(REGISTER_SIZE) [RESERVE_ADDRESS]; define register offset = 0x728 size=1 [RESERVE]; define register offset = 0x730 size=1 [RESERVE_LENGTH]; # Program Counter register: This register is not actually visible in the # API for powerpc but it is needed to create a consistent model for the debugger define register offset=0x780 size=$(REGISTER_SIZE) pc; @define SEG_REGISTER_BASE "0x800" # Segment Registers define register offset=$(SEG_REGISTER_BASE) size=4 [ sr0 sr1 sr2 sr3 sr4 sr5 sr6 sr7 sr8 sr9 sr10 sr11 sr12 sr13 sr14 sr15 ]; # Condition register flags define register offset=0x900 size=1 [ cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7 ]; define register offset=0x900 size=8 [ crall ]; define register offset=0x980 size=$(REGISTER_SIZE) [ tea ]; # Fake storage used to help preserve r2 across function calls within the decompiler (see appropriate cspec) define register offset=0x988 size=$(REGISTER_SIZE) [ r2Save ]; # Special Purpose Registers are defined with generic names with the exception of XER, LR, CTR, SRR0, SRR1, TBL(r/w), TBU(r/w) # These names may be replaced within register_data section within a PPC variant's pspec file define register offset=0x1000 size=$(REGISTER_SIZE) [ spr000 XER spr002 spr003 spr004 spr005 spr006 spr007 LR CTR spr00a spr00b spr00c spr00d spr00e spr00f spr010 spr011 spr012 spr013 spr014 spr015 spr016 spr017 spr018 spr019 SRR0 SRR1 spr01c spr01d spr01e spr01f spr020 spr021 spr022 spr023 spr024 spr025 spr026 spr027 spr028 spr029 spr02a spr02b spr02c spr02d spr02e spr02f spr030 spr031 spr032 spr033 spr034 spr035 spr036 spr037 spr038 spr039 CSRR0 CSRR1 spr03c spr03d spr03e spr03f spr040 spr041 spr042 spr043 spr044 spr045 spr046 spr047 spr048 spr049 spr04a spr04b spr04c spr04d spr04e spr04f spr050 spr051 spr052 spr053 spr054 spr055 spr056 spr057 spr058 spr059 spr05a spr05b spr05c spr05d spr05e spr05f spr060 spr061 spr062 spr063 spr064 spr065 spr066 spr067 spr068 spr069 spr06a spr06b spr06c spr06d spr06e spr06f spr070 spr071 spr072 spr073 spr074 spr075 spr076 spr077 spr078 spr079 spr07a spr07b spr07c spr07d spr07e spr07f spr080 spr081 spr082 spr083 spr084 spr085 spr086 spr087 spr088 spr089 spr08a spr08b spr08c spr08d spr08e spr08f spr090 spr091 spr092 spr093 spr094 spr095 spr096 spr097 spr098 spr099 spr09a spr09b spr09c spr09d spr09e spr09f spr0a0 spr0a1 spr0a2 spr0a3 spr0a4 spr0a5 spr0a6 spr0a7 spr0a8 spr0a9 spr0aa spr0ab spr0ac spr0ad spr0ae spr0af spr0b0 spr0b1 spr0b2 spr0b3 spr0b4 spr0b5 spr0b6 spr0b7 spr0b8 spr0b9 spr0ba spr0bb spr0bc spr0bd spr0be spr0bf spr0c0 spr0c1 spr0c2 spr0c3 spr0c4 spr0c5 spr0c6 spr0c7 spr0c8 spr0c9 spr0ca spr0cb spr0cc spr0cd spr0ce spr0cf spr0d0 spr0d1 spr0d2 spr0d3 spr0d4 spr0d5 spr0d6 spr0d7 spr0d8 spr0d9 spr0da spr0db spr0dc spr0dd spr0de spr0df spr0e0 spr0e1 spr0e2 spr0e3 spr0e4 spr0e5 spr0e6 spr0e7 spr0e8 spr0e9 spr0ea spr0eb spr0ec spr0ed spr0ee spr0ef spr0f0 spr0f1 spr0f2 spr0f3 spr0f4 spr0f5 spr0f6 spr0f7 spr0f8 spr0f9 spr0fa spr0fb spr0fc spr0fd spr0fe spr0ff spr100 spr101 spr102 spr103 spr104 spr105 spr106 spr107 spr108 spr109 spr10a spr10b TBLr TBUr spr10e spr10f spr110 spr111 spr112 spr113 spr114 spr115 spr116 spr117 spr118 spr119 spr11a spr11b TBLw TBUw spr11e spr11f spr120 spr121 spr122 spr123 spr124 spr125 spr126 spr127 spr128 spr129 spr12a spr12b spr12c spr12d spr12e spr12f spr130 spr131 spr132 spr133 spr134 spr135 spr136 spr137 spr138 spr139 spr13a spr13b spr13c spr13d spr13e spr13f spr140 spr141 spr142 spr143 spr144 spr145 spr146 spr147 spr148 spr149 spr14a spr14b spr14c spr14d spr14e spr14f spr150 spr151 spr152 spr153 spr154 spr155 spr156 spr157 spr158 spr159 spr15a spr15b spr15c spr15d spr15e spr15f spr160 spr161 spr162 spr163 spr164 spr165 spr166 spr167 spr168 spr169 spr16a spr16b spr16c spr16d spr16e spr16f spr170 spr171 spr172 spr173 spr174 spr175 spr176 spr177 spr178 spr179 spr17a spr17b spr17c spr17d spr17e spr17f spr180 spr181 spr182 spr183 spr184 spr185 spr186 spr187 spr188 spr189 spr18a spr18b spr18c spr18d spr18e spr18f spr190 spr191 spr192 spr193 spr194 spr195 spr196 spr197 spr198 spr199 spr19a spr19b spr19c spr19d spr19e spr19f spr1a0 spr1a1 spr1a2 spr1a3 spr1a4 spr1a5 spr1a6 spr1a7 spr1a8 spr1a9 spr1aa spr1ab spr1ac spr1ad spr1ae spr1af spr1b0 spr1b1 spr1b2 spr1b3 spr1b4 spr1b5 spr1b6 spr1b7 spr1b8 spr1b9 spr1ba spr1bb spr1bc spr1bd spr1be spr1bf spr1c0 spr1c1 spr1c2 spr1c3 spr1c4 spr1c5 spr1c6 spr1c7 spr1c8 spr1c9 spr1ca spr1cb spr1cc spr1cd spr1ce spr1cf spr1d0 spr1d1 spr1d2 spr1d3 spr1d4 spr1d5 spr1d6 spr1d7 spr1d8 spr1d9 spr1da spr1db spr1dc spr1dd spr1de spr1df spr1e0 spr1e1 spr1e2 spr1e3 spr1e4 spr1e5 spr1e6 spr1e7 spr1e8 spr1e9 spr1ea spr1eb spr1ec spr1ed spr1ee spr1ef spr1f0 spr1f1 spr1f2 spr1f3 spr1f4 spr1f5 spr1f6 spr1f7 spr1f8 spr1f9 spr1fa spr1fb spr1fc spr1fd spr1fe spr1ff spr200 spr201 spr202 spr203 spr204 spr205 spr206 spr207 spr208 spr209 spr20a spr20b spr20c spr20d spr20e spr20f spr210 spr211 spr212 spr213 spr214 spr215 spr216 spr217 spr218 spr219 spr21a spr21b spr21c spr21d spr21e spr21f spr220 spr221 spr222 spr223 spr224 spr225 spr226 spr227 spr228 spr229 spr22a spr22b spr22c spr22d spr22e spr22f spr230 spr231 spr232 spr233 spr234 spr235 spr236 spr237 spr238 spr239 spr23a spr23b spr23c spr23d spr23e spr23f spr240 spr241 spr242 spr243 spr244 spr245 spr246 spr247 spr248 spr249 spr24a spr24b spr24c spr24d spr24e spr24f spr250 spr251 spr252 spr253 spr254 spr255 spr256 spr257 spr258 spr259 spr25a spr25b spr25c spr25d spr25e spr25f spr260 spr261 spr262 spr263 spr264 spr265 spr266 spr267 spr268 spr269 spr26a spr26b spr26c spr26d spr26e spr26f spr270 spr271 spr272 spr273 spr274 spr275 spr276 spr277 spr278 spr279 spr27a spr27b spr27c spr27d spr27e spr27f spr280 spr281 spr282 spr283 spr284 spr285 spr286 spr287 spr288 spr289 spr28a spr28b spr28c spr28d spr28e spr28f spr290 spr291 spr292 spr293 spr294 spr295 spr296 spr297 spr298 spr299 spr29a spr29b spr29c spr29d spr29e spr29f spr2a0 spr2a1 spr2a2 spr2a3 spr2a4 spr2a5 spr2a6 spr2a7 spr2a8 spr2a9 spr2aa spr2ab spr2ac spr2ad spr2ae spr2af spr2b0 spr2b1 spr2b2 spr2b3 spr2b4 spr2b5 spr2b6 spr2b7 spr2b8 spr2b9 spr2ba spr2bb spr2bc spr2bd spr2be spr2bf spr2c0 spr2c1 spr2c2 spr2c3 spr2c4 spr2c5 spr2c6 spr2c7 spr2c8 spr2c9 spr2ca spr2cb spr2cc spr2cd spr2ce spr2cf spr2d0 spr2d1 spr2d2 spr2d3 spr2d4 spr2d5 spr2d6 spr2d7 spr2d8 spr2d9 spr2da spr2db spr2dc spr2dd spr2de spr2df spr2e0 spr2e1 spr2e2 spr2e3 spr2e4 spr2e5 spr2e6 spr2e7 spr2e8 spr2e9 spr2ea spr2eb spr2ec spr2ed spr2ee spr2ef spr2f0 spr2f1 spr2f2 spr2f3 spr2f4 spr2f5 spr2f6 spr2f7 spr2f8 spr2f9 spr2fa spr2fb spr2fc spr2fd spr2fe spr2ff spr300 spr301 spr302 spr303 spr304 spr305 spr306 spr307 spr308 spr309 spr30a spr30b spr30c spr30d spr30e spr30f spr310 spr311 spr312 spr313 spr314 spr315 spr316 spr317 spr318 spr319 spr31a spr31b spr31c spr31d spr31e spr31f spr320 spr321 spr322 spr323 spr324 spr325 spr326 spr327 spr328 spr329 spr32a spr32b spr32c spr32d spr32e TAR spr330 spr331 spr332 spr333 spr334 spr335 spr336 spr337 spr338 spr339 spr33a spr33b spr33c spr33d spr33e spr33f spr340 spr341 spr342 spr343 spr344 spr345 spr346 spr347 spr348 spr349 spr34a spr34b spr34c spr34d spr34e spr34f spr350 spr351 spr352 spr353 spr354 spr355 spr356 spr357 spr358 spr359 spr35a spr35b spr35c spr35d spr35e spr35f spr360 spr361 spr362 spr363 spr364 spr365 spr366 spr367 spr368 spr369 spr36a spr36b spr36c spr36d spr36e spr36f spr370 spr371 spr372 spr373 spr374 spr375 spr376 spr377 spr378 spr379 spr37a spr37b spr37c spr37d spr37e spr37f spr380 spr381 spr382 spr383 spr384 spr385 spr386 spr387 spr388 spr389 spr38a spr38b spr38c spr38d spr38e spr38f spr390 spr391 spr392 spr393 spr394 spr395 spr396 spr397 spr398 spr399 spr39a spr39b spr39c spr39d spr39e spr39f spr3a0 spr3a1 spr3a2 spr3a3 spr3a4 spr3a5 spr3a6 spr3a7 spr3a8 spr3a9 spr3aa spr3ab spr3ac spr3ad spr3ae spr3af spr3b0 spr3b1 spr3b2 spr3b3 spr3b4 spr3b5 spr3b6 spr3b7 spr3b8 spr3b9 spr3ba spr3bb spr3bc spr3bd spr3be spr3bf spr3c0 spr3c1 spr3c2 spr3c3 spr3c4 spr3c5 spr3c6 spr3c7 spr3c8 spr3c9 spr3ca spr3cb spr3cc spr3cd spr3ce spr3cf spr3d0 spr3d1 spr3d2 spr3d3 spr3d4 spr3d5 spr3d6 spr3d7 spr3d8 spr3d9 spr3da spr3db spr3dc spr3dd spr3de spr3df spr3e0 spr3e1 spr3e2 spr3e3 spr3e4 spr3e5 spr3e6 spr3e7 spr3e8 spr3e9 spr3ea spr3eb spr3ec spr3ed spr3ee spr3ef spr3f0 spr3f1 spr3f2 spr3f3 spr3f4 spr3f5 spr3f6 spr3f7 spr3f8 spr3f9 spr3fa spr3fb spr3fc spr3fd spr3fe spr3ff ]; # The floating point registers and the altivec vector registers OVERLAP to VSX registers # This was not done correctly before and has now been fixed. Book 1, Chapter 7.2 has a # very good diagram. # Support for Vector-Scalar Extension - i.e "VSX" define register offset=0x4000 size=16 [ vs0 vs1 vs2 vs3 vs4 vs5 vs6 vs7 vs8 vs9 vs10 vs11 vs12 vs13 vs14 vs15 vs16 vs17 vs18 vs19 vs20 vs21 vs22 vs23 vs24 vs25 vs26 vs27 vs28 vs29 vs30 vs31 vs32 vs33 vs34 vs35 vs36 vs37 vs38 vs39 vs40 vs41 vs42 vs43 vs44 vs45 vs46 vs47 vs48 vs49 vs50 vs51 vs52 vs53 vs54 vs55 vs56 vs57 vs58 vs59 vs60 vs61 vs62 vs63 ]; # Floating point registers # These overlay the first 32 vsx regs with the gaps as indicated so fr0 is in vs0, fr1 is in vs1, etc. # This also means we have to have 2 defs of this due to endian stuff. @if ENDIAN == "big" define register offset=0x4000 size=8 [ f0 _ f1 _ f2 _ f3 _ f4 _ f5 _ f6 _ f7 _ f8 _ f9 _ f10 _ f11 _ f12 _ f13 _ f14 _ f15 _ f16 _ f17 _ f18 _ f19 _ f20 _ f21 _ f22 _ f23 _ f24 _ f25 _ f26 _ f27 _ f28 _ f29 _ f30 _ f31 _ ]; @else define register offset=0x4000 size=8 [ _ f0 _ f1 _ f2 _ f3 _ f4 _ f5 _ f6 _ f7 _ f8 _ f9 _ f10 _ f11 _ f12 _ f13 _ f14 _ f15 _ f16 _ f17 _ f18 _ f19 _ f20 _ f21 _ f22 _ f23 _ f24 _ f25 _ f26 _ f27 _ f28 _ f29 _ f30 _ f31 ]; @endif # All the altivec regs need to start at offset 0x4200 # Sleigh does not allow registers of the same size to overlay. This presents some issues as the normal # Altivec registers overlay the top 32 VSX registers. What we have to do is use a sub-table to display # the Altivec name, but export the matching VSX register. The original vrD, etc. tokens are now sub-tables. # Altivec vector registers (accessed by vrD vrA vrB vrS vrC) # Altivec vector registers #define register offset=0x4200 size=16 [ # v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 # v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 ]; @if ENDIAN == "big" # Create psydo sub-registers for the Altivec vector registers to allow better easier vector instructions by 8 byte subregisters define register offset=0x4200 size=8 [ # 64 bit access to vrN registers (psydo-registers) (accessed by vrD_64_N vrA_64_N vrB_64_N vrS_64_N vrC_64_N) vr0_64_0 vr0_64_1 vr1_64_0 vr1_64_1 vr2_64_0 vr2_64_1 vr3_64_0 vr3_64_1 vr4_64_0 vr4_64_1 vr5_64_0 vr5_64_1 vr6_64_0 vr6_64_1 vr7_64_0 vr7_64_1 vr8_64_0 vr8_64_1 vr9_64_0 vr9_64_1 vr10_64_0 vr10_64_1 vr11_64_0 vr11_64_1 vr12_64_0 vr12_64_1 vr13_64_0 vr13_64_1 vr14_64_0 vr14_64_1 vr15_64_0 vr15_64_1 vr16_64_0 vr16_64_1 vr17_64_0 vr17_64_1 vr18_64_0 vr18_64_1 vr19_64_0 vr19_64_1 vr20_64_0 vr20_64_1 vr21_64_0 vr21_64_1 vr22_64_0 vr22_64_1 vr23_64_0 vr23_64_1 vr24_64_0 vr24_64_1 vr25_64_0 vr25_64_1 vr26_64_0 vr26_64_1 vr27_64_0 vr27_64_1 vr28_64_0 vr28_64_1 vr29_64_0 vr29_64_1 vr30_64_0 vr30_64_1 vr31_64_0 vr31_64_1 ]; # Create psydo sub-registers for the Altivec vector registers to allow better easier vector instructions by 4 byte subregisters define register offset=0x4200 size=4 [ # 32 bit access to vrN registers (psydo-registers) (accessed by vrD_32_N vrA_32_N vrB_32_N vrS_32_N vrC_32_N) vr0_32_0 vr0_32_1 vr0_32_2 vr0_32_3 vr1_32_0 vr1_32_1 vr1_32_2 vr1_32_3 vr2_32_0 vr2_32_1 vr2_32_2 vr2_32_3 vr3_32_0 vr3_32_1 vr3_32_2 vr3_32_3 vr4_32_0 vr4_32_1 vr4_32_2 vr4_32_3 vr5_32_0 vr5_32_1 vr5_32_2 vr5_32_3 vr6_32_0 vr6_32_1 vr6_32_2 vr6_32_3 vr7_32_0 vr7_32_1 vr7_32_2 vr7_32_3 vr8_32_0 vr8_32_1 vr8_32_2 vr8_32_3 vr9_32_0 vr9_32_1 vr9_32_2 vr9_32_3 vr10_32_0 vr10_32_1 vr10_32_2 vr10_32_3 vr11_32_0 vr11_32_1 vr11_32_2 vr11_32_3 vr12_32_0 vr12_32_1 vr12_32_2 vr12_32_3 vr13_32_0 vr13_32_1 vr13_32_2 vr13_32_3 vr14_32_0 vr14_32_1 vr14_32_2 vr14_32_3 vr15_32_0 vr15_32_1 vr15_32_2 vr15_32_3 vr16_32_0 vr16_32_1 vr16_32_2 vr16_32_3 vr17_32_0 vr17_32_1 vr17_32_2 vr17_32_3 vr18_32_0 vr18_32_1 vr18_32_2 vr18_32_3 vr19_32_0 vr19_32_1 vr19_32_2 vr19_32_3 vr20_32_0 vr20_32_1 vr20_32_2 vr20_32_3 vr21_32_0 vr21_32_1 vr21_32_2 vr21_32_3 vr22_32_0 vr22_32_1 vr22_32_2 vr22_32_3 vr23_32_0 vr23_32_1 vr23_32_2 vr23_32_3 vr24_32_0 vr24_32_1 vr24_32_2 vr24_32_3 vr25_32_0 vr25_32_1 vr25_32_2 vr25_32_3 vr26_32_0 vr26_32_1 vr26_32_2 vr26_32_3 vr27_32_0 vr27_32_1 vr27_32_2 vr27_32_3 vr28_32_0 vr28_32_1 vr28_32_2 vr28_32_3 vr29_32_0 vr29_32_1 vr29_32_2 vr29_32_3 vr30_32_0 vr30_32_1 vr30_32_2 vr30_32_3 vr31_32_0 vr31_32_1 vr31_32_2 vr31_32_3 ]; # Create psydo sub-registers for the Altivec vector registers to allow better easier vector instructions by 2 byte subregisters define register offset=0x4200 size=2 [ # 16 bit access to vrN registers (psydo-registers) (accessed by vrD_16_N vrA_16_N vrB_16_N vrS_16_N vrC_16_N) vr0_16_0 vr0_16_1 vr0_16_2 vr0_16_3 vr0_16_4 vr0_16_5 vr0_16_6 vr0_16_7 vr1_16_0 vr1_16_1 vr1_16_2 vr1_16_3 vr1_16_4 vr1_16_5 vr1_16_6 vr1_16_7 vr2_16_0 vr2_16_1 vr2_16_2 vr2_16_3 vr2_16_4 vr2_16_5 vr2_16_6 vr2_16_7 vr3_16_0 vr3_16_1 vr3_16_2 vr3_16_3 vr3_16_4 vr3_16_5 vr3_16_6 vr3_16_7 vr4_16_0 vr4_16_1 vr4_16_2 vr4_16_3 vr4_16_4 vr4_16_5 vr4_16_6 vr4_16_7 vr5_16_0 vr5_16_1 vr5_16_2 vr5_16_3 vr5_16_4 vr5_16_5 vr5_16_6 vr5_16_7 vr6_16_0 vr6_16_1 vr6_16_2 vr6_16_3 vr6_16_4 vr6_16_5 vr6_16_6 vr6_16_7 vr7_16_0 vr7_16_1 vr7_16_2 vr7_16_3 vr7_16_4 vr7_16_5 vr7_16_6 vr7_16_7 vr8_16_0 vr8_16_1 vr8_16_2 vr8_16_3 vr8_16_4 vr8_16_5 vr8_16_6 vr8_16_7 vr9_16_0 vr9_16_1 vr9_16_2 vr9_16_3 vr9_16_4 vr9_16_5 vr9_16_6 vr9_16_7 vr10_16_0 vr10_16_1 vr10_16_2 vr10_16_3 vr10_16_4 vr10_16_5 vr10_16_6 vr10_16_7 vr11_16_0 vr11_16_1 vr11_16_2 vr11_16_3 vr11_16_4 vr11_16_5 vr11_16_6 vr11_16_7 vr12_16_0 vr12_16_1 vr12_16_2 vr12_16_3 vr12_16_4 vr12_16_5 vr12_16_6 vr12_16_7 vr13_16_0 vr13_16_1 vr13_16_2 vr13_16_3 vr13_16_4 vr13_16_5 vr13_16_6 vr13_16_7 vr14_16_0 vr14_16_1 vr14_16_2 vr14_16_3 vr14_16_4 vr14_16_5 vr14_16_6 vr14_16_7 vr15_16_0 vr15_16_1 vr15_16_2 vr15_16_3 vr15_16_4 vr15_16_5 vr15_16_6 vr15_16_7 vr16_16_0 vr16_16_1 vr16_16_2 vr16_16_3 vr16_16_4 vr16_16_5 vr16_16_6 vr16_16_7 vr17_16_0 vr17_16_1 vr17_16_2 vr17_16_3 vr17_16_4 vr17_16_5 vr17_16_6 vr17_16_7 vr18_16_0 vr18_16_1 vr18_16_2 vr18_16_3 vr18_16_4 vr18_16_5 vr18_16_6 vr18_16_7 vr19_16_0 vr19_16_1 vr19_16_2 vr19_16_3 vr19_16_4 vr19_16_5 vr19_16_6 vr19_16_7 vr20_16_0 vr20_16_1 vr20_16_2 vr20_16_3 vr20_16_4 vr20_16_5 vr20_16_6 vr20_16_7 vr21_16_0 vr21_16_1 vr21_16_2 vr21_16_3 vr21_16_4 vr21_16_5 vr21_16_6 vr21_16_7 vr22_16_0 vr22_16_1 vr22_16_2 vr22_16_3 vr22_16_4 vr22_16_5 vr22_16_6 vr22_16_7 vr23_16_0 vr23_16_1 vr23_16_2 vr23_16_3 vr23_16_4 vr23_16_5 vr23_16_6 vr23_16_7 vr24_16_0 vr24_16_1 vr24_16_2 vr24_16_3 vr24_16_4 vr24_16_5 vr24_16_6 vr24_16_7 vr25_16_0 vr25_16_1 vr25_16_2 vr25_16_3 vr25_16_4 vr25_16_5 vr25_16_6 vr25_16_7 vr26_16_0 vr26_16_1 vr26_16_2 vr26_16_3 vr26_16_4 vr26_16_5 vr26_16_6 vr26_16_7 vr27_16_0 vr27_16_1 vr27_16_2 vr27_16_3 vr27_16_4 vr27_16_5 vr27_16_6 vr27_16_7 vr28_16_0 vr28_16_1 vr28_16_2 vr28_16_3 vr28_16_4 vr28_16_5 vr28_16_6 vr28_16_7 vr29_16_0 vr29_16_1 vr29_16_2 vr29_16_3 vr29_16_4 vr29_16_5 vr29_16_6 vr29_16_7 vr30_16_0 vr30_16_1 vr30_16_2 vr30_16_3 vr30_16_4 vr30_16_5 vr30_16_6 vr30_16_7 vr31_16_0 vr31_16_1 vr31_16_2 vr31_16_3 vr31_16_4 vr31_16_5 vr31_16_6 vr31_16_7 ]; # Create psydo sub-registers for the Altivec vector registers to allow better easier vector instructions by 1 byte subregisters define register offset=0x4200 size=1 [ # 8 bit access to vrN registers (psydo-registers) (accessed by vrD_8_N vrA_8_N vrB_8_N vrS_8_N vrC_8_N) vr0_8_0 vr0_8_1 vr0_8_2 vr0_8_3 vr0_8_4 vr0_8_5 vr0_8_6 vr0_8_7 vr0_8_8 vr0_8_9 vr0_8_10 vr0_8_11 vr0_8_12 vr0_8_13 vr0_8_14 vr0_8_15 vr1_8_0 vr1_8_1 vr1_8_2 vr1_8_3 vr1_8_4 vr1_8_5 vr1_8_6 vr1_8_7 vr1_8_8 vr1_8_9 vr1_8_10 vr1_8_11 vr1_8_12 vr1_8_13 vr1_8_14 vr1_8_15 vr2_8_0 vr2_8_1 vr2_8_2 vr2_8_3 vr2_8_4 vr2_8_5 vr2_8_6 vr2_8_7 vr2_8_8 vr2_8_9 vr2_8_10 vr2_8_11 vr2_8_12 vr2_8_13 vr2_8_14 vr2_8_15 vr3_8_0 vr3_8_1 vr3_8_2 vr3_8_3 vr3_8_4 vr3_8_5 vr3_8_6 vr3_8_7 vr3_8_8 vr3_8_9 vr3_8_10 vr3_8_11 vr3_8_12 vr3_8_13 vr3_8_14 vr3_8_15 vr4_8_0 vr4_8_1 vr4_8_2 vr4_8_3 vr4_8_4 vr4_8_5 vr4_8_6 vr4_8_7 vr4_8_8 vr4_8_9 vr4_8_10 vr4_8_11 vr4_8_12 vr4_8_13 vr4_8_14 vr4_8_15 vr5_8_0 vr5_8_1 vr5_8_2 vr5_8_3 vr5_8_4 vr5_8_5 vr5_8_6 vr5_8_7 vr5_8_8 vr5_8_9 vr5_8_10 vr5_8_11 vr5_8_12 vr5_8_13 vr5_8_14 vr5_8_15 vr6_8_0 vr6_8_1 vr6_8_2 vr6_8_3 vr6_8_4 vr6_8_5 vr6_8_6 vr6_8_7 vr6_8_8 vr6_8_9 vr6_8_10 vr6_8_11 vr6_8_12 vr6_8_13 vr6_8_14 vr6_8_15 vr7_8_0 vr7_8_1 vr7_8_2 vr7_8_3 vr7_8_4 vr7_8_5 vr7_8_6 vr7_8_7 vr7_8_8 vr7_8_9 vr7_8_10 vr7_8_11 vr7_8_12 vr7_8_13 vr7_8_14 vr7_8_15 vr8_8_0 vr8_8_1 vr8_8_2 vr8_8_3 vr8_8_4 vr8_8_5 vr8_8_6 vr8_8_7 vr8_8_8 vr8_8_9 vr8_8_10 vr8_8_11 vr8_8_12 vr8_8_13 vr8_8_14 vr8_8_15 vr9_8_0 vr9_8_1 vr9_8_2 vr9_8_3 vr9_8_4 vr9_8_5 vr9_8_6 vr9_8_7 vr9_8_8 vr9_8_9 vr9_8_10 vr9_8_11 vr9_8_12 vr9_8_13 vr9_8_14 vr9_8_15 vr10_8_0 vr10_8_1 vr10_8_2 vr10_8_3 vr10_8_4 vr10_8_5 vr10_8_6 vr10_8_7 vr10_8_8 vr10_8_9 vr10_8_10 vr10_8_11 vr10_8_12 vr10_8_13 vr10_8_14 vr10_8_15 vr11_8_0 vr11_8_1 vr11_8_2 vr11_8_3 vr11_8_4 vr11_8_5 vr11_8_6 vr11_8_7 vr11_8_8 vr11_8_9 vr11_8_10 vr11_8_11 vr11_8_12 vr11_8_13 vr11_8_14 vr11_8_15 vr12_8_0 vr12_8_1 vr12_8_2 vr12_8_3 vr12_8_4 vr12_8_5 vr12_8_6 vr12_8_7 vr12_8_8 vr12_8_9 vr12_8_10 vr12_8_11 vr12_8_12 vr12_8_13 vr12_8_14 vr12_8_15 vr13_8_0 vr13_8_1 vr13_8_2 vr13_8_3 vr13_8_4 vr13_8_5 vr13_8_6 vr13_8_7 vr13_8_8 vr13_8_9 vr13_8_10 vr13_8_11 vr13_8_12 vr13_8_13 vr13_8_14 vr13_8_15 vr14_8_0 vr14_8_1 vr14_8_2 vr14_8_3 vr14_8_4 vr14_8_5 vr14_8_6 vr14_8_7 vr14_8_8 vr14_8_9 vr14_8_10 vr14_8_11 vr14_8_12 vr14_8_13 vr14_8_14 vr14_8_15 vr15_8_0 vr15_8_1 vr15_8_2 vr15_8_3 vr15_8_4 vr15_8_5 vr15_8_6 vr15_8_7 vr15_8_8 vr15_8_9 vr15_8_10 vr15_8_11 vr15_8_12 vr15_8_13 vr15_8_14 vr15_8_15 vr16_8_0 vr16_8_1 vr16_8_2 vr16_8_3 vr16_8_4 vr16_8_5 vr16_8_6 vr16_8_7 vr16_8_8 vr16_8_9 vr16_8_10 vr16_8_11 vr16_8_12 vr16_8_13 vr16_8_14 vr16_8_15 vr17_8_0 vr17_8_1 vr17_8_2 vr17_8_3 vr17_8_4 vr17_8_5 vr17_8_6 vr17_8_7 vr17_8_8 vr17_8_9 vr17_8_10 vr17_8_11 vr17_8_12 vr17_8_13 vr17_8_14 vr17_8_15 vr18_8_0 vr18_8_1 vr18_8_2 vr18_8_3 vr18_8_4 vr18_8_5 vr18_8_6 vr18_8_7 vr18_8_8 vr18_8_9 vr18_8_10 vr18_8_11 vr18_8_12 vr18_8_13 vr18_8_14 vr18_8_15 vr19_8_0 vr19_8_1 vr19_8_2 vr19_8_3 vr19_8_4 vr19_8_5 vr19_8_6 vr19_8_7 vr19_8_8 vr19_8_9 vr19_8_10 vr19_8_11 vr19_8_12 vr19_8_13 vr19_8_14 vr19_8_15 vr20_8_0 vr20_8_1 vr20_8_2 vr20_8_3 vr20_8_4 vr20_8_5 vr20_8_6 vr20_8_7 vr20_8_8 vr20_8_9 vr20_8_10 vr20_8_11 vr20_8_12 vr20_8_13 vr20_8_14 vr20_8_15 vr21_8_0 vr21_8_1 vr21_8_2 vr21_8_3 vr21_8_4 vr21_8_5 vr21_8_6 vr21_8_7 vr21_8_8 vr21_8_9 vr21_8_10 vr21_8_11 vr21_8_12 vr21_8_13 vr21_8_14 vr21_8_15 vr22_8_0 vr22_8_1 vr22_8_2 vr22_8_3 vr22_8_4 vr22_8_5 vr22_8_6 vr22_8_7 vr22_8_8 vr22_8_9 vr22_8_10 vr22_8_11 vr22_8_12 vr22_8_13 vr22_8_14 vr22_8_15 vr23_8_0 vr23_8_1 vr23_8_2 vr23_8_3 vr23_8_4 vr23_8_5 vr23_8_6 vr23_8_7 vr23_8_8 vr23_8_9 vr23_8_10 vr23_8_11 vr23_8_12 vr23_8_13 vr23_8_14 vr23_8_15 vr24_8_0 vr24_8_1 vr24_8_2 vr24_8_3 vr24_8_4 vr24_8_5 vr24_8_6 vr24_8_7 vr24_8_8 vr24_8_9 vr24_8_10 vr24_8_11 vr24_8_12 vr24_8_13 vr24_8_14 vr24_8_15 vr25_8_0 vr25_8_1 vr25_8_2 vr25_8_3 vr25_8_4 vr25_8_5 vr25_8_6 vr25_8_7 vr25_8_8 vr25_8_9 vr25_8_10 vr25_8_11 vr25_8_12 vr25_8_13 vr25_8_14 vr25_8_15 vr26_8_0 vr26_8_1 vr26_8_2 vr26_8_3 vr26_8_4 vr26_8_5 vr26_8_6 vr26_8_7 vr26_8_8 vr26_8_9 vr26_8_10 vr26_8_11 vr26_8_12 vr26_8_13 vr26_8_14 vr26_8_15 vr27_8_0 vr27_8_1 vr27_8_2 vr27_8_3 vr27_8_4 vr27_8_5 vr27_8_6 vr27_8_7 vr27_8_8 vr27_8_9 vr27_8_10 vr27_8_11 vr27_8_12 vr27_8_13 vr27_8_14 vr27_8_15 vr28_8_0 vr28_8_1 vr28_8_2 vr28_8_3 vr28_8_4 vr28_8_5 vr28_8_6 vr28_8_7 vr28_8_8 vr28_8_9 vr28_8_10 vr28_8_11 vr28_8_12 vr28_8_13 vr28_8_14 vr28_8_15 vr29_8_0 vr29_8_1 vr29_8_2 vr29_8_3 vr29_8_4 vr29_8_5 vr29_8_6 vr29_8_7 vr29_8_8 vr29_8_9 vr29_8_10 vr29_8_11 vr29_8_12 vr29_8_13 vr29_8_14 vr29_8_15 vr30_8_0 vr30_8_1 vr30_8_2 vr30_8_3 vr30_8_4 vr30_8_5 vr30_8_6 vr30_8_7 vr30_8_8 vr30_8_9 vr30_8_10 vr30_8_11 vr30_8_12 vr30_8_13 vr30_8_14 vr30_8_15 vr31_8_0 vr31_8_1 vr31_8_2 vr31_8_3 vr31_8_4 vr31_8_5 vr31_8_6 vr31_8_7 vr31_8_8 vr31_8_9 vr31_8_10 vr31_8_11 vr31_8_12 vr31_8_13 vr31_8_14 vr31_8_15 ]; @else define register offset=0x4200 size=8 [ # 64 bit access to vrN registers (psydo-registers) (accessed by vrD_64_N vrA_64_N vrB_64_N vrS_64_N vrC_64_N) vr0_64_1 vr0_64_0 vr1_64_1 vr1_64_0 vr2_64_1 vr2_64_0 vr3_64_1 vr3_64_0 vr4_64_1 vr4_64_0 vr5_64_1 vr5_64_0 vr6_64_1 vr6_64_0 vr7_64_1 vr7_64_0 vr8_64_1 vr8_64_0 vr9_64_1 vr9_64_0 vr10_64_1 vr10_64_0 vr11_64_1 vr11_64_0 vr12_64_1 vr12_64_0 vr13_64_1 vr13_64_0 vr14_64_1 vr14_64_0 vr15_64_1 vr15_64_0 vr16_64_1 vr16_64_0 vr17_64_1 vr17_64_0 vr18_64_1 vr18_64_0 vr19_64_1 vr19_64_0 vr20_64_1 vr20_64_0 vr21_64_1 vr21_64_0 vr22_64_1 vr22_64_0 vr23_64_1 vr23_64_0 vr24_64_1 vr24_64_0 vr25_64_1 vr25_64_0 vr26_64_1 vr26_64_0 vr27_64_1 vr27_64_0 vr28_64_1 vr28_64_0 vr29_64_1 vr29_64_0 vr30_64_1 vr30_64_0 vr31_64_1 vr31_64_0 ]; define register offset=0x4200 size=4 [ # 32 bit access to vrN registers (psydo-registers) (accessed by vrD_32_N vrA_32_N vrB_32_N vrS_32_N vrC_32_N) vr0_32_3 vr0_32_2 vr0_32_1 vr0_32_0 vr1_32_3 vr1_32_2 vr1_32_1 vr1_32_0 vr2_32_3 vr2_32_2 vr2_32_1 vr2_32_0 vr3_32_3 vr3_32_2 vr3_32_1 vr3_32_0 vr4_32_3 vr4_32_2 vr4_32_1 vr4_32_0 vr5_32_3 vr5_32_2 vr5_32_1 vr5_32_0 vr6_32_3 vr6_32_2 vr6_32_1 vr6_32_0 vr7_32_3 vr7_32_2 vr7_32_1 vr7_32_0 vr8_32_3 vr8_32_2 vr8_32_1 vr8_32_0 vr9_32_3 vr9_32_2 vr9_32_1 vr9_32_0 vr10_32_3 vr10_32_2 vr10_32_1 vr10_32_0 vr11_32_3 vr11_32_2 vr11_32_1 vr11_32_0 vr12_32_3 vr12_32_2 vr12_32_1 vr12_32_0 vr13_32_3 vr13_32_2 vr13_32_1 vr13_32_0 vr14_32_3 vr14_32_2 vr14_32_1 vr14_32_0 vr15_32_3 vr15_32_2 vr15_32_1 vr15_32_0 vr16_32_3 vr16_32_2 vr16_32_1 vr16_32_0 vr17_32_3 vr17_32_2 vr17_32_1 vr17_32_0 vr18_32_3 vr18_32_2 vr18_32_1 vr18_32_0 vr19_32_3 vr19_32_2 vr19_32_1 vr19_32_0 vr20_32_3 vr20_32_2 vr20_32_1 vr20_32_0 vr21_32_3 vr21_32_2 vr21_32_1 vr21_32_0 vr22_32_3 vr22_32_2 vr22_32_1 vr22_32_0 vr23_32_3 vr23_32_2 vr23_32_1 vr23_32_0 vr24_32_3 vr24_32_2 vr24_32_1 vr24_32_0 vr25_32_3 vr25_32_2 vr25_32_1 vr25_32_0 vr26_32_3 vr26_32_2 vr26_32_1 vr26_32_0 vr27_32_3 vr27_32_2 vr27_32_1 vr27_32_0 vr28_32_3 vr28_32_2 vr28_32_1 vr28_32_0 vr29_32_3 vr29_32_2 vr29_32_1 vr29_32_0 vr30_32_3 vr30_32_2 vr30_32_1 vr30_32_0 vr31_32_3 vr31_32_2 vr31_32_1 vr31_32_0 ]; # Create psydo sub-registers for the Altivec vector registers to allow better easier vector instructions by 2 byte subregisters define register offset=0x4200 size=2 [ # 16 bit access to vrN registers (psydo-registers) (accessed by vrD_16_N vrA_16_N vrB_16_N vrS_16_N vrC_16_N) vr0_16_7 vr0_16_6 vr0_16_5 vr0_16_4 vr0_16_3 vr0_16_2 vr0_16_1 vr0_16_0 vr1_16_7 vr1_16_6 vr1_16_5 vr1_16_4 vr1_16_3 vr1_16_2 vr1_16_1 vr1_16_0 vr2_16_7 vr2_16_6 vr2_16_5 vr2_16_4 vr2_16_3 vr2_16_2 vr2_16_1 vr2_16_0 vr3_16_7 vr3_16_6 vr3_16_5 vr3_16_4 vr3_16_3 vr3_16_2 vr3_16_1 vr3_16_0 vr4_16_7 vr4_16_6 vr4_16_5 vr4_16_4 vr4_16_3 vr4_16_2 vr4_16_1 vr4_16_0 vr5_16_7 vr5_16_6 vr5_16_5 vr5_16_4 vr5_16_3 vr5_16_2 vr5_16_1 vr5_16_0 vr6_16_7 vr6_16_6 vr6_16_5 vr6_16_4 vr6_16_3 vr6_16_2 vr6_16_1 vr6_16_0 vr7_16_7 vr7_16_6 vr7_16_5 vr7_16_4 vr7_16_3 vr7_16_2 vr7_16_1 vr7_16_0 vr8_16_7 vr8_16_6 vr8_16_5 vr8_16_4 vr8_16_3 vr8_16_2 vr8_16_1 vr8_16_0 vr9_16_7 vr9_16_6 vr9_16_5 vr9_16_4 vr9_16_3 vr9_16_2 vr9_16_1 vr9_16_0 vr10_16_7 vr10_16_6 vr10_16_5 vr10_16_4 vr10_16_3 vr10_16_2 vr10_16_1 vr10_16_0 vr11_16_7 vr11_16_6 vr11_16_5 vr11_16_4 vr11_16_3 vr11_16_2 vr11_16_1 vr11_16_0 vr12_16_7 vr12_16_6 vr12_16_5 vr12_16_4 vr12_16_3 vr12_16_2 vr12_16_1 vr12_16_0 vr13_16_7 vr13_16_6 vr13_16_5 vr13_16_4 vr13_16_3 vr13_16_2 vr13_16_1 vr13_16_0 vr14_16_7 vr14_16_6 vr14_16_5 vr14_16_4 vr14_16_3 vr14_16_2 vr14_16_1 vr14_16_0 vr15_16_7 vr15_16_6 vr15_16_5 vr15_16_4 vr15_16_3 vr15_16_2 vr15_16_1 vr15_16_0 vr16_16_7 vr16_16_6 vr16_16_5 vr16_16_4 vr16_16_3 vr16_16_2 vr16_16_1 vr16_16_0 vr17_16_7 vr17_16_6 vr17_16_5 vr17_16_4 vr17_16_3 vr17_16_2 vr17_16_1 vr17_16_0 vr18_16_7 vr18_16_6 vr18_16_5 vr18_16_4 vr18_16_3 vr18_16_2 vr18_16_1 vr18_16_0 vr19_16_7 vr19_16_6 vr19_16_5 vr19_16_4 vr19_16_3 vr19_16_2 vr19_16_1 vr19_16_0 vr20_16_7 vr20_16_6 vr20_16_5 vr20_16_4 vr20_16_3 vr20_16_2 vr20_16_1 vr20_16_0 vr21_16_7 vr21_16_6 vr21_16_5 vr21_16_4 vr21_16_3 vr21_16_2 vr21_16_1 vr21_16_0 vr22_16_7 vr22_16_6 vr22_16_5 vr22_16_4 vr22_16_3 vr22_16_2 vr22_16_1 vr22_16_0 vr23_16_7 vr23_16_6 vr23_16_5 vr23_16_4 vr23_16_3 vr23_16_2 vr23_16_1 vr23_16_0 vr24_16_7 vr24_16_6 vr24_16_5 vr24_16_4 vr24_16_3 vr24_16_2 vr24_16_1 vr24_16_0 vr25_16_7 vr25_16_6 vr25_16_5 vr25_16_4 vr25_16_3 vr25_16_2 vr25_16_1 vr25_16_0 vr26_16_7 vr26_16_6 vr26_16_5 vr26_16_4 vr26_16_3 vr26_16_2 vr26_16_1 vr26_16_0 vr27_16_7 vr27_16_6 vr27_16_5 vr27_16_4 vr27_16_3 vr27_16_2 vr27_16_1 vr27_16_0 vr28_16_7 vr28_16_6 vr28_16_5 vr28_16_4 vr28_16_3 vr28_16_2 vr28_16_1 vr28_16_0 vr29_16_7 vr29_16_6 vr29_16_5 vr29_16_4 vr29_16_3 vr29_16_2 vr29_16_1 vr29_16_0 vr30_16_7 vr30_16_6 vr30_16_5 vr30_16_4 vr30_16_3 vr30_16_2 vr30_16_1 vr30_16_0 vr31_16_7 vr31_16_6 vr31_16_5 vr31_16_4 vr31_16_3 vr31_16_2 vr31_16_1 vr31_16_0 ]; # Create psydo sub-registers for the Altivec vector registers to allow better easier vector instructions by 1 byte subregisters define register offset=0x4200 size=1 [ # 8 bit access to vrN registers (psydo-registers) (accessed by vrD_8_N vrA_8_N vrB_8_N vrS_8_N vrC_8_N) vr0_8_15 vr0_8_14 vr0_8_13 vr0_8_12 vr0_8_11 vr0_8_10 vr0_8_9 vr0_8_8 vr0_8_7 vr0_8_6 vr0_8_5 vr0_8_4 vr0_8_3 vr0_8_2 vr0_8_1 vr0_8_0 vr1_8_15 vr1_8_14 vr1_8_13 vr1_8_12 vr1_8_11 vr1_8_10 vr1_8_9 vr1_8_8 vr1_8_7 vr1_8_6 vr1_8_5 vr1_8_4 vr1_8_3 vr1_8_2 vr1_8_1 vr1_8_0 vr2_8_15 vr2_8_14 vr2_8_13 vr2_8_12 vr2_8_11 vr2_8_10 vr2_8_9 vr2_8_8 vr2_8_7 vr2_8_6 vr2_8_5 vr2_8_4 vr2_8_3 vr2_8_2 vr2_8_1 vr2_8_0 vr3_8_15 vr3_8_14 vr3_8_13 vr3_8_12 vr3_8_11 vr3_8_10 vr3_8_9 vr3_8_8 vr3_8_7 vr3_8_6 vr3_8_5 vr3_8_4 vr3_8_3 vr3_8_2 vr3_8_1 vr3_8_0 vr4_8_15 vr4_8_14 vr4_8_13 vr4_8_12 vr4_8_11 vr4_8_10 vr4_8_9 vr4_8_8 vr4_8_7 vr4_8_6 vr4_8_5 vr4_8_4 vr4_8_3 vr4_8_2 vr4_8_1 vr4_8_0 vr5_8_15 vr5_8_14 vr5_8_13 vr5_8_12 vr5_8_11 vr5_8_10 vr5_8_9 vr5_8_8 vr5_8_7 vr5_8_6 vr5_8_5 vr5_8_4 vr5_8_3 vr5_8_2 vr5_8_1 vr5_8_0 vr6_8_15 vr6_8_14 vr6_8_13 vr6_8_12 vr6_8_11 vr6_8_10 vr6_8_9 vr6_8_8 vr6_8_7 vr6_8_6 vr6_8_5 vr6_8_4 vr6_8_3 vr6_8_2 vr6_8_1 vr6_8_0 vr7_8_15 vr7_8_14 vr7_8_13 vr7_8_12 vr7_8_11 vr7_8_10 vr7_8_9 vr7_8_8 vr7_8_7 vr7_8_6 vr7_8_5 vr7_8_4 vr7_8_3 vr7_8_2 vr7_8_1 vr7_8_0 vr8_8_15 vr8_8_14 vr8_8_13 vr8_8_12 vr8_8_11 vr8_8_10 vr8_8_9 vr8_8_8 vr8_8_7 vr8_8_6 vr8_8_5 vr8_8_4 vr8_8_3 vr8_8_2 vr8_8_1 vr8_8_0 vr9_8_15 vr9_8_14 vr9_8_13 vr9_8_12 vr9_8_11 vr9_8_10 vr9_8_9 vr9_8_8 vr9_8_7 vr9_8_6 vr9_8_5 vr9_8_4 vr9_8_3 vr9_8_2 vr9_8_1 vr9_8_0 vr10_8_15 vr10_8_14 vr10_8_13 vr10_8_12 vr10_8_11 vr10_8_10 vr10_8_9 vr10_8_8 vr10_8_7 vr10_8_6 vr10_8_5 vr10_8_4 vr10_8_3 vr10_8_2 vr10_8_1 vr10_8_0 vr11_8_15 vr11_8_14 vr11_8_13 vr11_8_12 vr11_8_11 vr11_8_10 vr11_8_9 vr11_8_8 vr11_8_7 vr11_8_6 vr11_8_5 vr11_8_4 vr11_8_3 vr11_8_2 vr11_8_1 vr11_8_0 vr12_8_15 vr12_8_14 vr12_8_13 vr12_8_12 vr12_8_11 vr12_8_10 vr12_8_9 vr12_8_8 vr12_8_7 vr12_8_6 vr12_8_5 vr12_8_4 vr12_8_3 vr12_8_2 vr12_8_1 vr12_8_0 vr13_8_15 vr13_8_14 vr13_8_13 vr13_8_12 vr13_8_11 vr13_8_10 vr13_8_9 vr13_8_8 vr13_8_7 vr13_8_6 vr13_8_5 vr13_8_4 vr13_8_3 vr13_8_2 vr13_8_1 vr13_8_0 vr14_8_15 vr14_8_14 vr14_8_13 vr14_8_12 vr14_8_11 vr14_8_10 vr14_8_9 vr14_8_8 vr14_8_7 vr14_8_6 vr14_8_5 vr14_8_4 vr14_8_3 vr14_8_2 vr14_8_1 vr14_8_0 vr15_8_15 vr15_8_14 vr15_8_13 vr15_8_12 vr15_8_11 vr15_8_10 vr15_8_9 vr15_8_8 vr15_8_7 vr15_8_6 vr15_8_5 vr15_8_4 vr15_8_3 vr15_8_2 vr15_8_1 vr15_8_0 vr16_8_15 vr16_8_14 vr16_8_13 vr16_8_12 vr16_8_11 vr16_8_10 vr16_8_9 vr16_8_8 vr16_8_7 vr16_8_6 vr16_8_5 vr16_8_4 vr16_8_3 vr16_8_2 vr16_8_1 vr16_8_0 vr17_8_15 vr17_8_14 vr17_8_13 vr17_8_12 vr17_8_11 vr17_8_10 vr17_8_9 vr17_8_8 vr17_8_7 vr17_8_6 vr17_8_5 vr17_8_4 vr17_8_3 vr17_8_2 vr17_8_1 vr17_8_0 vr18_8_15 vr18_8_14 vr18_8_13 vr18_8_12 vr18_8_11 vr18_8_10 vr18_8_9 vr18_8_8 vr18_8_7 vr18_8_6 vr18_8_5 vr18_8_4 vr18_8_3 vr18_8_2 vr18_8_1 vr18_8_0 vr19_8_15 vr19_8_14 vr19_8_13 vr19_8_12 vr19_8_11 vr19_8_10 vr19_8_9 vr19_8_8 vr19_8_7 vr19_8_6 vr19_8_5 vr19_8_4 vr19_8_3 vr19_8_2 vr19_8_1 vr19_8_0 vr20_8_15 vr20_8_14 vr20_8_13 vr20_8_12 vr20_8_11 vr20_8_10 vr20_8_9 vr20_8_8 vr20_8_7 vr20_8_6 vr20_8_5 vr20_8_4 vr20_8_3 vr20_8_2 vr20_8_1 vr20_8_0 vr21_8_15 vr21_8_14 vr21_8_13 vr21_8_12 vr21_8_11 vr21_8_10 vr21_8_9 vr21_8_8 vr21_8_7 vr21_8_6 vr21_8_5 vr21_8_4 vr21_8_3 vr21_8_2 vr21_8_1 vr21_8_0 vr22_8_15 vr22_8_14 vr22_8_13 vr22_8_12 vr22_8_11 vr22_8_10 vr22_8_9 vr22_8_8 vr22_8_7 vr22_8_6 vr22_8_5 vr22_8_4 vr22_8_3 vr22_8_2 vr22_8_1 vr22_8_0 vr23_8_15 vr23_8_14 vr23_8_13 vr23_8_12 vr23_8_11 vr23_8_10 vr23_8_9 vr23_8_8 vr23_8_7 vr23_8_6 vr23_8_5 vr23_8_4 vr23_8_3 vr23_8_2 vr23_8_1 vr23_8_0 vr24_8_15 vr24_8_14 vr24_8_13 vr24_8_12 vr24_8_11 vr24_8_10 vr24_8_9 vr24_8_8 vr24_8_7 vr24_8_6 vr24_8_5 vr24_8_4 vr24_8_3 vr24_8_2 vr24_8_1 vr24_8_0 vr25_8_15 vr25_8_14 vr25_8_13 vr25_8_12 vr25_8_11 vr25_8_10 vr25_8_9 vr25_8_8 vr25_8_7 vr25_8_6 vr25_8_5 vr25_8_4 vr25_8_3 vr25_8_2 vr25_8_1 vr25_8_0 vr26_8_15 vr26_8_14 vr26_8_13 vr26_8_12 vr26_8_11 vr26_8_10 vr26_8_9 vr26_8_8 vr26_8_7 vr26_8_6 vr26_8_5 vr26_8_4 vr26_8_3 vr26_8_2 vr26_8_1 vr26_8_0 vr27_8_15 vr27_8_14 vr27_8_13 vr27_8_12 vr27_8_11 vr27_8_10 vr27_8_9 vr27_8_8 vr27_8_7 vr27_8_6 vr27_8_5 vr27_8_4 vr27_8_3 vr27_8_2 vr27_8_1 vr27_8_0 vr28_8_15 vr28_8_14 vr28_8_13 vr28_8_12 vr28_8_11 vr28_8_10 vr28_8_9 vr28_8_8 vr28_8_7 vr28_8_6 vr28_8_5 vr28_8_4 vr28_8_3 vr28_8_2 vr28_8_1 vr28_8_0 vr29_8_15 vr29_8_14 vr29_8_13 vr29_8_12 vr29_8_11 vr29_8_10 vr29_8_9 vr29_8_8 vr29_8_7 vr29_8_6 vr29_8_5 vr29_8_4 vr29_8_3 vr29_8_2 vr29_8_1 vr29_8_0 vr30_8_15 vr30_8_14 vr30_8_13 vr30_8_12 vr30_8_11 vr30_8_10 vr30_8_9 vr30_8_8 vr30_8_7 vr30_8_6 vr30_8_5 vr30_8_4 vr30_8_3 vr30_8_2 vr30_8_1 vr30_8_0 vr31_8_15 vr31_8_14 vr31_8_13 vr31_8_12 vr31_8_11 vr31_8_10 vr31_8_9 vr31_8_8 vr31_8_7 vr31_8_6 vr31_8_5 vr31_8_4 vr31_8_3 vr31_8_2 vr31_8_1 vr31_8_0 ]; @endif # Define context bits define register offset=0x6000 size=4 contextreg; define context contextreg linkreg=(0,1) # 0 - no LR set, 1 - LR set (used to flag branch instructions to be treated as calls) vle=(2,2) # Used to control inclusion/disassembly of vle instructions. '1' means use vle see NOTVLE/ISVLE @define below # FIXME! while allowing vle context to flow is incorrect, the PowerPC disassembly action will not work at all without it # and could easily flow the incorrect context when traversing between VLE and non-VLE sections. # transient context lsmul=(3,7) noflow # Used for Load/store multiple parsing regp=(8,12) noflow # Used in powerISA quad word instructions regpset=(8,12) noflow # Used in powerISA quad word instructions ; @define NOTVLE "vle=0" @define ISVLE "vle=1" # Define Device Control Registers (specific to IBM PowerPC Embedded Controller, see instructions mfdcr/mtdcr) # Device Control Registers are defined with generic names # These names may be replaced within register_data section within a PPC variant's pspec file define register offset=0x7000 size=$(REGISTER_SIZE) [ dcr000 dcr001 dcr002 dcr003 dcr004 dcr005 dcr006 dcr007 dcr008 dcr009 dcr00a dcr00b dcr00c dcr00d dcr00e dcr00f dcr010 dcr011 dcr012 dcr013 dcr014 dcr015 dcr016 dcr017 dcr018 dcr019 dcr01a dcr01b dcr01c dcr01d dcr01e dcr01f dcr020 dcr021 dcr022 dcr023 dcr024 dcr025 dcr026 dcr027 dcr028 dcr029 dcr02a dcr02b dcr02c dcr02d dcr02e dcr02f dcr030 dcr031 dcr032 dcr033 dcr034 dcr035 dcr036 dcr037 dcr038 dcr039 dcr03a dcr03b dcr03c dcr03d dcr03e dcr03f dcr040 dcr041 dcr042 dcr043 dcr044 dcr045 dcr046 dcr047 dcr048 dcr049 dcr04a dcr04b dcr04c dcr04d dcr04e dcr04f dcr050 dcr051 dcr052 dcr053 dcr054 dcr055 dcr056 dcr057 dcr058 dcr059 dcr05a dcr05b dcr05c dcr05d dcr05e dcr05f dcr060 dcr061 dcr062 dcr063 dcr064 dcr065 dcr066 dcr067 dcr068 dcr069 dcr06a dcr06b dcr06c dcr06d dcr06e dcr06f dcr070 dcr071 dcr072 dcr073 dcr074 dcr075 dcr076 dcr077 dcr078 dcr079 dcr07a dcr07b dcr07c dcr07d dcr07e dcr07f dcr080 dcr081 dcr082 dcr083 dcr084 dcr085 dcr086 dcr087 dcr088 dcr089 dcr08a dcr08b dcr08c dcr08d dcr08e dcr08f dcr090 dcr091 dcr092 dcr093 dcr094 dcr095 dcr096 dcr097 dcr098 dcr099 dcr09a dcr09b dcr09c dcr09d dcr09e dcr09f dcr0a0 dcr0a1 dcr0a2 dcr0a3 dcr0a4 dcr0a5 dcr0a6 dcr0a7 dcr0a8 dcr0a9 dcr0aa dcr0ab dcr0ac dcr0ad dcr0ae dcr0af dcr0b0 dcr0b1 dcr0b2 dcr0b3 dcr0b4 dcr0b5 dcr0b6 dcr0b7 dcr0b8 dcr0b9 dcr0ba dcr0bb dcr0bc dcr0bd dcr0be dcr0bf dcr0c0 dcr0c1 dcr0c2 dcr0c3 dcr0c4 dcr0c5 dcr0c6 dcr0c7 dcr0c8 dcr0c9 dcr0ca dcr0cb dcr0cc dcr0cd dcr0ce dcr0cf dcr0d0 dcr0d1 dcr0d2 dcr0d3 dcr0d4 dcr0d5 dcr0d6 dcr0d7 dcr0d8 dcr0d9 dcr0da dcr0db dcr0dc dcr0dd dcr0de dcr0df dcr0e0 dcr0e1 dcr0e2 dcr0e3 dcr0e4 dcr0e5 dcr0e6 dcr0e7 dcr0e8 dcr0e9 dcr0ea dcr0eb dcr0ec dcr0ed dcr0ee dcr0ef dcr0f0 dcr0f1 dcr0f2 dcr0f3 dcr0f4 dcr0f5 dcr0f6 dcr0f7 dcr0f8 dcr0f9 dcr0fa dcr0fb dcr0fc dcr0fd dcr0fe dcr0ff dcr100 dcr101 dcr102 dcr103 dcr104 dcr105 dcr106 dcr107 dcr108 dcr109 dcr10a dcr10b dcr10c dcr10d dcr10e dcr10f dcr110 dcr111 dcr112 dcr113 dcr114 dcr115 dcr116 dcr117 dcr118 dcr119 dcr11a dcr11b dcr11c dcr11d dcr11e dcr11f dcr120 dcr121 dcr122 dcr123 dcr124 dcr125 dcr126 dcr127 dcr128 dcr129 dcr12a dcr12b dcr12c dcr12d dcr12e dcr12f dcr130 dcr131 dcr132 dcr133 dcr134 dcr135 dcr136 dcr137 dcr138 dcr139 dcr13a dcr13b dcr13c dcr13d dcr13e dcr13f dcr140 dcr141 dcr142 dcr143 dcr144 dcr145 dcr146 dcr147 dcr148 dcr149 dcr14a dcr14b dcr14c dcr14d dcr14e dcr14f dcr150 dcr151 dcr152 dcr153 dcr154 dcr155 dcr156 dcr157 dcr158 dcr159 dcr15a dcr15b dcr15c dcr15d dcr15e dcr15f dcr160 dcr161 dcr162 dcr163 dcr164 dcr165 dcr166 dcr167 dcr168 dcr169 dcr16a dcr16b dcr16c dcr16d dcr16e dcr16f dcr170 dcr171 dcr172 dcr173 dcr174 dcr175 dcr176 dcr177 dcr178 dcr179 dcr17a dcr17b dcr17c dcr17d dcr17e dcr17f dcr180 dcr181 dcr182 dcr183 dcr184 dcr185 dcr186 dcr187 dcr188 dcr189 dcr18a dcr18b dcr18c dcr18d dcr18e dcr18f dcr190 dcr191 dcr192 dcr193 dcr194 dcr195 dcr196 dcr197 dcr198 dcr199 dcr19a dcr19b dcr19c dcr19d dcr19e dcr19f dcr1a0 dcr1a1 dcr1a2 dcr1a3 dcr1a4 dcr1a5 dcr1a6 dcr1a7 dcr1a8 dcr1a9 dcr1aa dcr1ab dcr1ac dcr1ad dcr1ae dcr1af dcr1b0 dcr1b1 dcr1b2 dcr1b3 dcr1b4 dcr1b5 dcr1b6 dcr1b7 dcr1b8 dcr1b9 dcr1ba dcr1bb dcr1bc dcr1bd dcr1be dcr1bf dcr1c0 dcr1c1 dcr1c2 dcr1c3 dcr1c4 dcr1c5 dcr1c6 dcr1c7 dcr1c8 dcr1c9 dcr1ca dcr1cb dcr1cc dcr1cd dcr1ce dcr1cf dcr1d0 dcr1d1 dcr1d2 dcr1d3 dcr1d4 dcr1d5 dcr1d6 dcr1d7 dcr1d8 dcr1d9 dcr1da dcr1db dcr1dc dcr1dd dcr1de dcr1df dcr1e0 dcr1e1 dcr1e2 dcr1e3 dcr1e4 dcr1e5 dcr1e6 dcr1e7 dcr1e8 dcr1e9 dcr1ea dcr1eb dcr1ec dcr1ed dcr1ee dcr1ef dcr1f0 dcr1f1 dcr1f2 dcr1f3 dcr1f4 dcr1f5 dcr1f6 dcr1f7 dcr1f8 dcr1f9 dcr1fa dcr1fb dcr1fc dcr1fd dcr1fe dcr1ff dcr200 dcr201 dcr202 dcr203 dcr204 dcr205 dcr206 dcr207 dcr208 dcr209 dcr20a dcr20b dcr20c dcr20d dcr20e dcr20f dcr210 dcr211 dcr212 dcr213 dcr214 dcr215 dcr216 dcr217 dcr218 dcr219 dcr21a dcr21b dcr21c dcr21d dcr21e dcr21f dcr220 dcr221 dcr222 dcr223 dcr224 dcr225 dcr226 dcr227 dcr228 dcr229 dcr22a dcr22b dcr22c dcr22d dcr22e dcr22f dcr230 dcr231 dcr232 dcr233 dcr234 dcr235 dcr236 dcr237 dcr238 dcr239 dcr23a dcr23b dcr23c dcr23d dcr23e dcr23f dcr240 dcr241 dcr242 dcr243 dcr244 dcr245 dcr246 dcr247 dcr248 dcr249 dcr24a dcr24b dcr24c dcr24d dcr24e dcr24f dcr250 dcr251 dcr252 dcr253 dcr254 dcr255 dcr256 dcr257 dcr258 dcr259 dcr25a dcr25b dcr25c dcr25d dcr25e dcr25f dcr260 dcr261 dcr262 dcr263 dcr264 dcr265 dcr266 dcr267 dcr268 dcr269 dcr26a dcr26b dcr26c dcr26d dcr26e dcr26f dcr270 dcr271 dcr272 dcr273 dcr274 dcr275 dcr276 dcr277 dcr278 dcr279 dcr27a dcr27b dcr27c dcr27d dcr27e dcr27f dcr280 dcr281 dcr282 dcr283 dcr284 dcr285 dcr286 dcr287 dcr288 dcr289 dcr28a dcr28b dcr28c dcr28d dcr28e dcr28f dcr290 dcr291 dcr292 dcr293 dcr294 dcr295 dcr296 dcr297 dcr298 dcr299 dcr29a dcr29b dcr29c dcr29d dcr29e dcr29f dcr2a0 dcr2a1 dcr2a2 dcr2a3 dcr2a4 dcr2a5 dcr2a6 dcr2a7 dcr2a8 dcr2a9 dcr2aa dcr2ab dcr2ac dcr2ad dcr2ae dcr2af dcr2b0 dcr2b1 dcr2b2 dcr2b3 dcr2b4 dcr2b5 dcr2b6 dcr2b7 dcr2b8 dcr2b9 dcr2ba dcr2bb dcr2bc dcr2bd dcr2be dcr2bf dcr2c0 dcr2c1 dcr2c2 dcr2c3 dcr2c4 dcr2c5 dcr2c6 dcr2c7 dcr2c8 dcr2c9 dcr2ca dcr2cb dcr2cc dcr2cd dcr2ce dcr2cf dcr2d0 dcr2d1 dcr2d2 dcr2d3 dcr2d4 dcr2d5 dcr2d6 dcr2d7 dcr2d8 dcr2d9 dcr2da dcr2db dcr2dc dcr2dd dcr2de dcr2df dcr2e0 dcr2e1 dcr2e2 dcr2e3 dcr2e4 dcr2e5 dcr2e6 dcr2e7 dcr2e8 dcr2e9 dcr2ea dcr2eb dcr2ec dcr2ed dcr2ee dcr2ef dcr2f0 dcr2f1 dcr2f2 dcr2f3 dcr2f4 dcr2f5 dcr2f6 dcr2f7 dcr2f8 dcr2f9 dcr2fa dcr2fb dcr2fc dcr2fd dcr2fe dcr2ff dcr300 dcr301 dcr302 dcr303 dcr304 dcr305 dcr306 dcr307 dcr308 dcr309 dcr30a dcr30b dcr30c dcr30d dcr30e dcr30f dcr310 dcr311 dcr312 dcr313 dcr314 dcr315 dcr316 dcr317 dcr318 dcr319 dcr31a dcr31b dcr31c dcr31d dcr31e dcr31f dcr320 dcr321 dcr322 dcr323 dcr324 dcr325 dcr326 dcr327 dcr328 dcr329 dcr32a dcr32b dcr32c dcr32d dcr32e dcr32f dcr330 dcr331 dcr332 dcr333 dcr334 dcr335 dcr336 dcr337 dcr338 dcr339 dcr33a dcr33b dcr33c dcr33d dcr33e dcr33f dcr340 dcr341 dcr342 dcr343 dcr344 dcr345 dcr346 dcr347 dcr348 dcr349 dcr34a dcr34b dcr34c dcr34d dcr34e dcr34f dcr350 dcr351 dcr352 dcr353 dcr354 dcr355 dcr356 dcr357 dcr358 dcr359 dcr35a dcr35b dcr35c dcr35d dcr35e dcr35f dcr360 dcr361 dcr362 dcr363 dcr364 dcr365 dcr366 dcr367 dcr368 dcr369 dcr36a dcr36b dcr36c dcr36d dcr36e dcr36f dcr370 dcr371 dcr372 dcr373 dcr374 dcr375 dcr376 dcr377 dcr378 dcr379 dcr37a dcr37b dcr37c dcr37d dcr37e dcr37f dcr380 dcr381 dcr382 dcr383 dcr384 dcr385 dcr386 dcr387 dcr388 dcr389 dcr38a dcr38b dcr38c dcr38d dcr38e dcr38f dcr390 dcr391 dcr392 dcr393 dcr394 dcr395 dcr396 dcr397 dcr398 dcr399 dcr39a dcr39b dcr39c dcr39d dcr39e dcr39f dcr3a0 dcr3a1 dcr3a2 dcr3a3 dcr3a4 dcr3a5 dcr3a6 dcr3a7 dcr3a8 dcr3a9 dcr3aa dcr3ab dcr3ac dcr3ad dcr3ae dcr3af dcr3b0 dcr3b1 dcr3b2 dcr3b3 dcr3b4 dcr3b5 dcr3b6 dcr3b7 dcr3b8 dcr3b9 dcr3ba dcr3bb dcr3bc dcr3bd dcr3be dcr3bf dcr3c0 dcr3c1 dcr3c2 dcr3c3 dcr3c4 dcr3c5 dcr3c6 dcr3c7 dcr3c8 dcr3c9 dcr3ca dcr3cb dcr3cc dcr3cd dcr3ce dcr3cf dcr3d0 dcr3d1 dcr3d2 dcr3d3 dcr3d4 dcr3d5 dcr3d6 dcr3d7 dcr3d8 dcr3d9 dcr3da dcr3db dcr3dc dcr3dd dcr3de dcr3df dcr3e0 dcr3e1 dcr3e2 dcr3e3 dcr3e4 dcr3e5 dcr3e6 dcr3e7 dcr3e8 dcr3e9 dcr3ea dcr3eb dcr3ec dcr3ed dcr3ee dcr3ef dcr3f0 dcr3f1 dcr3f2 dcr3f3 dcr3f4 dcr3f5 dcr3f6 dcr3f7 dcr3f8 dcr3f9 dcr3fa dcr3fb dcr3fc dcr3fd dcr3fe dcr3ff ]; # ACC and SPEFSCR are part of the "EREF: A Reference for Motorola Book E and e500 Core" spec # SPEFSCR is a reperposed spr200 define register offset=0x10000 size=8 [ACC]; # OP=17 & BITS_21_25=0 & BITS_16_20=0(ok) & BITS_5_11=LEV & BITS_2_4=0 & BIT_1=1 & BIT_0=0 define token instr(32) A=(16,20) AA=(1,1) A_BITS=(16,20) A_BITSS=(16,20) signed AX=(2,2) B=(11,15) B_BITS=(11,15) BD=(2,15) signed BF=(17,24) BFA=(0,2) BFA2=(18,20) BF2=(23,25) BH=(11,12) BH_BITS=(11,12) BH_RBE=(11,20) BH_RET=(11,11) BI_BITS=(16,20) BI_CC=(16,17) BI_CR=(18,20) BIT_A=(25,25) BIT_L=(21,21) BIT_R=(21,21) BIT_0=(0,0) BIT_10=(10,10) BIT_1=(1,1) BIT_11=(11,11) BIT_15=(15,15) BIT_16=(16,16) BIT_17=(17,17) BIT_18=(18,18) BIT_20=(20,20) BIT_22=(22,22) BIT_25=(25,25) BIT_9=(9,9) BIT_6=(6,6) BITS_0_1=(0,1) BITS_0_17=(0,17) BITS_0_2=(0,2) BITS_0_3=(0,3) BITS_1_10=(1,10) BITS_11_13=(11,13) BITS_11_15=(11,15) BITS_11_17=(11,17) BITS_11_20=(11,20) BITS_11_22=(11,22) BITS_11_24=(11,24) BITS_11_25=(11,25) BITS_12_15=(12,15) BITS_12_19=(12,19) BITS_12_25=(12,25) BITS_13_15=(13,15) BITS_14_15=(14,15) BITS_16_17=(12,15) BITS_16_18=(16,18) BITS_16_19=(16,19) BITS_16_20=(16,20) BITS_16_22=(16,22) BITS_16_25=(16,25) BITS_17_20=(17,20) BITS_17_24=(17,24) BITS_18_19=(18,19) BITS_18_20=(18,20) BITS_1_9=(1,9) BITS_19_20=(19,20) BITS_20_20=(20,20) BITS_21_22=(21,22) BITS_21_23=(21,23) BITS_21_24=(7,10) BITS_21_25=(21,25) BITS_21_28=(21,28) BITS_22_24=(22,24) BITS_22_25=(22,25) BITS_22_26=(22,26) BITS_2_25=(2,25) BITS_23_24=(23,24) BITS_23_25=(23,25) BITS_2_4=(2,4) BITS_24_25=(24,25) BITS_3_7=(3,7) BITS_4_5=(4,5) BITS_6_10=(6,10) BO_0=(25,25) BO_1=(24,24) BO=(21,25) BO_2=(23,23) BO_3=(22, 22) BO_BITS=(21,25) BX=(1,1) C=(6,10) COND_BRANCH_CTRL=(22,25) CR_A=(18,20) CR_A_CC=(16,17) CR_B=(13,15) CR_B_CC=(11,12) CRBD=(21,25) CRBR=(6,10) CR_D=(23,25) CR_D_CC=(21,22) crfD=(23,25) CRFD=(23,25) CRFS=(18,20) CRM0=(19,19) CRM1=(18,18) CRM=(12,19) CRM2=(17,17) CRM3=(16,16) CRM4=(15,15) CRM5=(14,14) CRM6=(13,13) CRM7=(12,12) CR_X=(8,10) CR_X_CC=(6,7) CT=(21,25) CT2=(21,24) CX=(3,3) D0=(6,15) signed D1=(16,20) D2=(0,0) D=(21,25) Dp=(21,25) DC6=(6,6) DCM=(10,15) DCMX=(16,22) DCRN=(11,20) DGM=(10,15) DM=(8,9) DM2=(2,2) DQ=(4,15) DQs=(4,15) signed DS=(2,15) DSs=(2,15) signed DX=(16,20) DUI=(21,25) DUIS=(11,20) EX=(0,0) fA=(16,20) fB=(11,15) fC=(6,10) fD=(21,25) FM0=(24,24) FM1=(23,23) FM=(17,24) FM2=(22,22) FM3=(21,21) FM4=(20,20) FM5=(19,19) FM6=(18,18) FM7=(17,17) FNC=(11,15) fS=(21,25) fT=(21,25) IMM=(11,15) EVUIMM=(11,15) BU_UIMM=(16,20) BU_SIMM=(16,20) EVUIMM_8=(11,15) EVUIMM_4=(11,15) EVUIMM_2=(11,15) L= (21,22) L2=(21,21) L16=(16,17) LEV=(5,11) LI=(2,25) signed LK=(0,0) MBH=(5,5) MBL=(6,10) ME=(1,5) MO=(21,25) MSR_L=(16,16) NB= (11,15) O=(9,9) OE=(10,10) OP=(26,31) PS=(9,9) Rc=(0,0) Rc2=(10,10) RMC=(9,10) RA=(16,20) RB=(11,15) RS=(21,25) RT=(21,25) R0=(0,0) R16=(16,16) S=(21,25) SBE=(11,11) SH16=(10,15) SHB=(6,9) SHH=(1,1) SHL=(11,15) SHW=(8,9) S8IMM=(0,7) signed S5IMM=(11,15) signed SIMM=(0,15) signed SIMM_DS=(2,15) signed SIMM_SIGN=(15,15) SIX=(11,14) SP=(19,20) SPRVAL=(11,20) SR=(16,19) ST=(15,15) STRM=(21,22) SX=(0,0) SX3=(3,3) T=(21,25) TOA=(21,25) TBR=(11,20) TH=(21,25) TMP_6_10=(21,25) TO=(21,25) TX=(0,0) TX3=(3,3) UI=(11,15) UI_11_s8=(16,20) UI_16_s8=(11,15) UI_16_s16=(0,15) UIMM8=(11,18) UIMM=(0,15) UIM=(16,17) UIMB=(16,19) UIMH=(16,18) UIMW=(16,17) UIMT=(16,21) vrAR=(16,20) # AltVect Vector register vrN selector (128 bit) vrAD=(16,20) vrA_64_0=(16,20) # AltVect Vector register vrN selector (64 bit) vrA_64_1=(16,20) vrA_32_0=(16,20) # AltVect Vector register vrN selector (32 bit) vrA_32_1=(16,20) vrA_32_2=(16,20) vrA_32_3=(16,20) vrA_16_0=(16,20) # AltVect Vector register vrN selector (16 bit) vrA_16_1=(16,20) vrA_16_2=(16,20) vrA_16_3=(16,20) vrA_16_4=(16,20) vrA_16_5=(16,20) vrA_16_6=(16,20) vrA_16_7=(16,20) vrA_8_0=(16,20) # AltVect Vector register vrN selector (8 bit) vrA_8_1=(16,20) vrA_8_2=(16,20) vrA_8_3=(16,20) vrA_8_4=(16,20) vrA_8_5=(16,20) vrA_8_6=(16,20) vrA_8_7=(16,20) vrA_8_8=(16,20) vrA_8_9=(16,20) vrA_8_10=(16,20) vrA_8_11=(16,20) vrA_8_12=(16,20) vrA_8_13=(16,20) vrA_8_14=(16,20) vrA_8_15=(16,20) vrBR=(11,15) # AltVect Vector register vrN selector (128 bit) vrBD=(11,15) vrB_64_0=(11,15) # AltVect Vector register vrN selector (64 bit) vrB_64_1=(11,15) vrB_32_0=(11,15) # AltVect Vector register vrN selector (32 bit) vrB_32_1=(11,15) vrB_32_2=(11,15) vrB_32_3=(11,15) vrB_16_0=(11,15) # AltVect Vector register vrN selector (16 bit) vrB_16_1=(11,15) vrB_16_2=(11,15) vrB_16_3=(11,15) vrB_16_4=(11,15) vrB_16_5=(11,15) vrB_16_6=(11,15) vrB_16_7=(11,15) vrB_8_0=(11,15) # AltVect Vector register vrN selector (8 bit) vrB_8_1=(11,15) vrB_8_2=(11,15) vrB_8_3=(11,15) vrB_8_4=(11,15) vrB_8_5=(11,15) vrB_8_6=(11,15) vrB_8_7=(11,15) vrB_8_8=(11,15) vrB_8_9=(11,15) vrB_8_10=(11,15) vrB_8_11=(11,15) vrB_8_12=(11,15) vrB_8_13=(11,15) vrB_8_14=(11,15) vrB_8_15=(11,15) vrCR=(6,10) # AltVect Vector register vrN selector (128 bit) vrCD=(6,10) vrC_64_0=(6,10) # AltVect Vector register vrN selector (64 bit) vrC_64_1=(6,10) vrC_32_0=(6,10) # AltVect Vector register vrN selector (32 bit) vrC_32_1=(6,10) vrC_32_2=(6,10) vrC_32_3=(6,10) vrC_16_0=(6,10) # AltVect Vector register vrN selector (16 bit) vrC_16_1=(6,10) vrC_16_2=(6,10) vrC_16_3=(6,10) vrC_16_4=(6,10) vrC_16_5=(6,10) vrC_16_6=(6,10) vrC_16_7=(6,10) vrC_8_0=(6,10) # AltVect Vector register vrN selector (8 bit) vrC_8_1=(6,10) vrC_8_2=(6,10) vrC_8_3=(6,10) vrC_8_4=(6,10) vrC_8_5=(6,10) vrC_8_6=(6,10) vrC_8_7=(6,10) vrC_8_8=(6,10) vrC_8_9=(6,10) vrC_8_10=(6,10) vrC_8_11=(6,10) vrC_8_12=(6,10) vrC_8_13=(6,10) vrC_8_14=(6,10) vrC_8_15=(6,10) vrDR=(21,25) # AltVect Vector register vrN selector (128 bit) vrDD=(21,25) vrD_64_0=(21,25) # AltVect Vector register vrN selector (64 bit) vrD_64_1=(21,25) vrD_32_0=(21,25) # AltVect Vector register vrN selector (32 bit) vrD_32_1=(21,25) vrD_32_2=(21,25) vrD_32_3=(21,25) vrD_16_0=(21,25) # AltVect Vector register vrN selector (16 bit) vrD_16_1=(21,25) vrD_16_2=(21,25) vrD_16_3=(21,25) vrD_16_4=(21,25) vrD_16_5=(21,25) vrD_16_6=(21,25) vrD_16_7=(21,25) vrD_8_0=(21,25) # AltVect Vector register vrN selector (8 bit) vrD_8_1=(21,25) vrD_8_2=(21,25) vrD_8_3=(21,25) vrD_8_4=(21,25) vrD_8_5=(21,25) vrD_8_6=(21,25) vrD_8_7=(21,25) vrD_8_8=(21,25) vrD_8_9=(21,25) vrD_8_10=(21,25) vrD_8_11=(21,25) vrD_8_12=(21,25) vrD_8_13=(21,25) vrD_8_14=(21,25) vrD_8_15=(21,25) vrSR=(21,25) # AltVect Vector register vrN selector (128 bit) vrSD=(21,25) vrS_64_0=(21,25) # AltVect Vector register vrN selector (64 bit) vrS_64_1=(21,25) vrS_32_0=(21,25) # AltVect Vector register vrN selector (32 bit) vrS_32_1=(21,25) vrS_32_2=(21,25) vrS_32_3=(21,25) vrS_16_0=(21,25) # AltVect Vector register vrN selector (16 bit) vrS_16_1=(21,25) vrS_16_2=(21,25) vrS_16_3=(21,25) vrS_16_4=(21,25) vrS_16_5=(21,25) vrS_16_6=(21,25) vrS_16_7=(21,25) vrS_8_0=(21,25) # AltVect Vector register vrN selector (8 bit) vrS_8_1=(21,25) vrS_8_2=(21,25) vrS_8_3=(21,25) vrS_8_4=(21,25) vrS_8_5=(21,25) vrS_8_6=(21,25) vrS_8_7=(21,25) vrS_8_8=(21,25) vrS_8_9=(21,25) vrS_8_10=(21,25) vrS_8_11=(21,25) vrS_8_12=(21,25) vrS_8_13=(21,25) vrS_8_14=(21,25) vrS_8_15=(21,25) WC=(21,22) XOP_0_10=(0,10) XOP_0_5=(0,5) XOP_0_8=(0,8) XOP_0_9=(0,9) XOP_1_10=(1,10) XOP_1_4=(1,4) XOP_1_5=(1,5) XOP_1_8=(1,8) XOP_1_9=(1,9) XOP_2_10=(2,10) XOP_2_4=(2,4) XOP_3_5=(3,5) XOP_3_10=(3,10) XOP_3_9=(3,9) XOP_7_10=(7,10) # support VSX args Avsa=(16,20) Avsb=(16,20) Bvsa=(11,15) Bvsb=(11,15) Cvsa=(6,10) Cvsb=(6,10) Svsa=(21,25) Svsb=(21,25) Svsbx=(21,25) Tvsa=(21,25) Tvsb=(21,25) Tvsbx=(21,25) BD15_VLE=(1,15) signed BD24_VLE=(1,24) signed BF_VLE=(21,22) BI_CC_VLE=(16,17) BI_CR_VLE=(18,19) BO_VLE=(20,21) IMM8=(0,7) IMM_0_10_VLE=(0,10) IMM_11_15_VLE=(11,14) IMM_16_20_VLE=(16,20) IMM_21_25_VLE=(21,25) SIMM_11_14_VLE=(11,14) signed SIMM_21_25_VLE=(21,25) signed SCL_VLE=(8,9) LEV_VLE=(11,15) XOP_8_VLE=(8,15) XOP_11_VLE=(11,15) XOP_12_VLE=(12,15) XOP_VLE=(22,25) ; define token instrvle(16) OP4_VLE=(12,15) OP5_VLE=(11,15) OP6_VLE=(10,15) OP15_VLE=(1,15) OP16_VLE=(0,15) OIM5_VLE=(4,8) OIM7_VLE=(4,10) SD4_VLE=(8,11) UI7_VLE=(4,10) UI5_VLE=(4,8) XORR_VLE=(8,9) XOR_VLE=(4,9) ARX_VLE=(0,3) ARY_VLE=(4,7) RY_VLE=(4,7) RZ_VLE=(4,7) RX_VLE=(0,3) BO16_VLE=(10,10) BIT9_VLE=(9,9) BIT8_VLE=(8,8) BI16_VLE=(8,9) BITS_8_9=(8,9) BD8_VLE=(0,7) signed LK8_VLE=(8,8) LK0_VLE=(0,0) ; EVUIMM_2_RAt: val^"("^RA^")" is RA & A & EVUIMM_2 [ val = EVUIMM_2*2; ] { tmp:$(REGISTER_SIZE) = RA+zext(val:4); export tmp; } EVUIMM_2_RAt: val^"("^RA^")" is RA & A=0 & EVUIMM_2 [ val = EVUIMM_2*2; ] { tmp:$(REGISTER_SIZE) = zext(val:4); export tmp; } EVUIMM_4_RAt: val^"("^RA^")" is RA & A & EVUIMM_4 [ val = EVUIMM_4*4; ] { tmp:$(REGISTER_SIZE) = RA+zext(val:4); export tmp; } EVUIMM_4_RAt: val^"("^RA^")" is RA & A=0 & EVUIMM_4 [ val = EVUIMM_4*4; ] { tmp:$(REGISTER_SIZE) = zext(val:4); export tmp; } EVUIMM_8_RAt: val^"("^RA^")" is RA & A & EVUIMM_8 [ val = EVUIMM_8*8; ] { tmp:$(REGISTER_SIZE) = RA+zext(val:4); export tmp; } EVUIMM_8_RAt: val^"("^RA^")" is RA & A=0 & EVUIMM_8 [ val = EVUIMM_8*8; ] { tmp:$(REGISTER_SIZE) = zext(val:4); export tmp; } attach variables [ T ] [ vs0 vs1 vs2 vs3 vs4 vs5 vs6 vs7 vs8 vs9 vs10 vs11 vs12 vs13 vs14 vs15 vs16 vs17 vs18 vs19 vs20 vs21 vs22 vs23 vs24 vs25 vs26 vs27 vs28 vs29 vs30 vs31 ]; attach variables [ RX_VLE RY_VLE RZ_VLE] [ r0 r1 r2 r3 r4 r5 r6 r7 r24 r25 r26 r27 r28 r29 r30 r31]; attach variables [ ARX_VLE ARY_VLE] [ r8 r9 r10 r11 r12 r13 r14 r15 r16 r17 r18 r19 r20 r21 r22 r23]; attach variables [ D A B C S TH RA RB RS RT regp] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17 r18 r19 r20 r21 r22 r23 r24 r25 r26 r27 r28 r29 r30 r31 ]; attach variables [ BFA BI_CR CRFD CRFS CR_A CR_B CR_D CR_X ] [cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7] ; attach variables [ BI_CR_VLE BF_VLE ] [cr0 cr1 cr2 cr3 ] ; attach variables [ fD fB fA fC fS fT ] [ f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 f16 f17 f18 f19 f20 f21 f22 f23 f24 f25 f26 f27 f28 f29 f30 f31 ]; attach variables [ CRBD CRBR ] [ fp_fx fp_fex fp_vx fp_ox fp_ux fp_zx fp_xx fp_vxsnan fp_vxisi fp_vxidi fp_vxzdz fp_vximz fp_vxvc fp_fr fp_fi fp_c fp_cc0 fp_cc1 fp_cc2 fp_cc3 fp_reserve1 fp_vxsoft fp_vxsqrt fp_vxcvi fp_ve fp_oe fp_ue fp_ze fp_xe fp_ni fp_rn0 fp_rn1 ]; attach variables SR [ sr0 sr1 sr2 sr3 sr4 sr5 sr6 sr7 sr8 sr9 sr10 sr11 sr12 sr13 sr14 sr15 ]; ## ## Attach the spr register to the token SPRVAL made up of the bits sprL/sprH ## the low bits are shifted up, so the table is inverted and indexed by sprH,sprL ## This could have been done by computing sprVal = sprH * 32 + sprL but it would ## have resulted in multiple instructions instead of the original single prototype. ## Thus this massive inverted table. attach variables SPRVAL [ spr000 spr020 spr040 spr060 spr080 spr0a0 spr0c0 spr0e0 spr100 spr120 spr140 spr160 spr180 spr1a0 spr1c0 spr1e0 spr200 spr220 spr240 spr260 spr280 spr2a0 spr2c0 spr2e0 spr300 spr320 spr340 spr360 spr380 spr3a0 spr3c0 spr3e0 XER spr021 spr041 spr061 spr081 spr0a1 spr0c1 spr0e1 spr101 spr121 spr141 spr161 spr181 spr1a1 spr1c1 spr1e1 spr201 spr221 spr241 spr261 spr281 spr2a1 spr2c1 spr2e1 spr301 spr321 spr341 spr361 spr381 spr3a1 spr3c1 spr3e1 spr002 spr022 spr042 spr062 spr082 spr0a2 spr0c2 spr0e2 spr102 spr122 spr142 spr162 spr182 spr1a2 spr1c2 spr1e2 spr202 spr222 spr242 spr262 spr282 spr2a2 spr2c2 spr2e2 spr302 spr322 spr342 spr362 spr382 spr3a2 spr3c2 spr3e2 spr003 spr023 spr043 spr063 spr083 spr0a3 spr0c3 spr0e3 spr103 spr123 spr143 spr163 spr183 spr1a3 spr1c3 spr1e3 spr203 spr223 spr243 spr263 spr283 spr2a3 spr2c3 spr2e3 spr303 spr323 spr343 spr363 spr383 spr3a3 spr3c3 spr3e3 spr004 spr024 spr044 spr064 spr084 spr0a4 spr0c4 spr0e4 spr104 spr124 spr144 spr164 spr184 spr1a4 spr1c4 spr1e4 spr204 spr224 spr244 spr264 spr284 spr2a4 spr2c4 spr2e4 spr304 spr324 spr344 spr364 spr384 spr3a4 spr3c4 spr3e4 spr005 spr025 spr045 spr065 spr085 spr0a5 spr0c5 spr0e5 spr105 spr125 spr145 spr165 spr185 spr1a5 spr1c5 spr1e5 spr205 spr225 spr245 spr265 spr285 spr2a5 spr2c5 spr2e5 spr305 spr325 spr345 spr365 spr385 spr3a5 spr3c5 spr3e5 spr006 spr026 spr046 spr066 spr086 spr0a6 spr0c6 spr0e6 spr106 spr126 spr146 spr166 spr186 spr1a6 spr1c6 spr1e6 spr206 spr226 spr246 spr266 spr286 spr2a6 spr2c6 spr2e6 spr306 spr326 spr346 spr366 spr386 spr3a6 spr3c6 spr3e6 spr007 spr027 spr047 spr067 spr087 spr0a7 spr0c7 spr0e7 spr107 spr127 spr147 spr167 spr187 spr1a7 spr1c7 spr1e7 spr207 spr227 spr247 spr267 spr287 spr2a7 spr2c7 spr2e7 spr307 spr327 spr347 spr367 spr387 spr3a7 spr3c7 spr3e7 LR spr028 spr048 spr068 spr088 spr0a8 spr0c8 spr0e8 spr108 spr128 spr148 spr168 spr188 spr1a8 spr1c8 spr1e8 spr208 spr228 spr248 spr268 spr288 spr2a8 spr2c8 spr2e8 spr308 spr328 spr348 spr368 spr388 spr3a8 spr3c8 spr3e8 CTR spr029 spr049 spr069 spr089 spr0a9 spr0c9 spr0e9 spr109 spr129 spr149 spr169 spr189 spr1a9 spr1c9 spr1e9 spr209 spr229 spr249 spr269 spr289 spr2a9 spr2c9 spr2e9 spr309 spr329 spr349 spr369 spr389 spr3a9 spr3c9 spr3e9 spr00a spr02a spr04a spr06a spr08a spr0aa spr0ca spr0ea spr10a spr12a spr14a spr16a spr18a spr1aa spr1ca spr1ea spr20a spr22a spr24a spr26a spr28a spr2aa spr2ca spr2ea spr30a spr32a spr34a spr36a spr38a spr3aa spr3ca spr3ea spr00b spr02b spr04b spr06b spr08b spr0ab spr0cb spr0eb spr10b spr12b spr14b spr16b spr18b spr1ab spr1cb spr1eb spr20b spr22b spr24b spr26b spr28b spr2ab spr2cb spr2eb spr30b spr32b spr34b spr36b spr38b spr3ab spr3cb spr3eb spr00c spr02c spr04c spr06c spr08c spr0ac spr0cc spr0ec TBLr spr12c spr14c spr16c spr18c spr1ac spr1cc spr1ec spr20c spr22c spr24c spr26c spr28c spr2ac spr2cc spr2ec spr30c spr32c spr34c spr36c spr38c spr3ac spr3cc spr3ec spr00d spr02d spr04d spr06d spr08d spr0ad spr0cd spr0ed TBUr spr12d spr14d spr16d spr18d spr1ad spr1cd spr1ed spr20d spr22d spr24d spr26d spr28d spr2ad spr2cd spr2ed spr30d spr32d spr34d spr36d spr38d spr3ad spr3cd spr3ed spr00e spr02e spr04e spr06e spr08e spr0ae spr0ce spr0ee spr10e spr12e spr14e spr16e spr18e spr1ae spr1ce spr1ee spr20e spr22e spr24e spr26e spr28e spr2ae spr2ce spr2ee spr30e spr32e spr34e spr36e spr38e spr3ae spr3ce spr3ee spr00f spr02f spr04f spr06f spr08f spr0af spr0cf spr0ef spr10f spr12f spr14f spr16f spr18f spr1af spr1cf spr1ef spr20f spr22f spr24f spr26f spr28f spr2af spr2cf spr2ef spr30f TAR spr34f spr36f spr38f spr3af spr3cf spr3ef spr010 spr030 spr050 spr070 spr090 spr0b0 spr0d0 spr0f0 spr110 spr130 spr150 spr170 spr190 spr1b0 spr1d0 spr1f0 spr210 spr230 spr250 spr270 spr290 spr2b0 spr2d0 spr2f0 spr310 spr330 spr350 spr370 spr390 spr3b0 spr3d0 spr3f0 spr011 spr031 spr051 spr071 spr091 spr0b1 spr0d1 spr0f1 spr111 spr131 spr151 spr171 spr191 spr1b1 spr1d1 spr1f1 spr211 spr231 spr251 spr271 spr291 spr2b1 spr2d1 spr2f1 spr311 spr331 spr351 spr371 spr391 spr3b1 spr3d1 spr3f1 spr012 spr032 spr052 spr072 spr092 spr0b2 spr0d2 spr0f2 spr112 spr132 spr152 spr172 spr192 spr1b2 spr1d2 spr1f2 spr212 spr232 spr252 spr272 spr292 spr2b2 spr2d2 spr2f2 spr312 spr332 spr352 spr372 spr392 spr3b2 spr3d2 spr3f2 spr013 spr033 spr053 spr073 spr093 spr0b3 spr0d3 spr0f3 spr113 spr133 spr153 spr173 spr193 spr1b3 spr1d3 spr1f3 spr213 spr233 spr253 spr273 spr293 spr2b3 spr2d3 spr2f3 spr313 spr333 spr353 spr373 spr393 spr3b3 spr3d3 spr3f3 spr014 spr034 spr054 spr074 spr094 spr0b4 spr0d4 spr0f4 spr114 spr134 spr154 spr174 spr194 spr1b4 spr1d4 spr1f4 spr214 spr234 spr254 spr274 spr294 spr2b4 spr2d4 spr2f4 spr314 spr334 spr354 spr374 spr394 spr3b4 spr3d4 spr3f4 spr015 spr035 spr055 spr075 spr095 spr0b5 spr0d5 spr0f5 spr115 spr135 spr155 spr175 spr195 spr1b5 spr1d5 spr1f5 spr215 spr235 spr255 spr275 spr295 spr2b5 spr2d5 spr2f5 spr315 spr335 spr355 spr375 spr395 spr3b5 spr3d5 spr3f5 spr016 spr036 spr056 spr076 spr096 spr0b6 spr0d6 spr0f6 spr116 spr136 spr156 spr176 spr196 spr1b6 spr1d6 spr1f6 spr216 spr236 spr256 spr276 spr296 spr2b6 spr2d6 spr2f6 spr316 spr336 spr356 spr376 spr396 spr3b6 spr3d6 spr3f6 spr017 spr037 spr057 spr077 spr097 spr0b7 spr0d7 spr0f7 spr117 spr137 spr157 spr177 spr197 spr1b7 spr1d7 spr1f7 spr217 spr237 spr257 spr277 spr297 spr2b7 spr2d7 spr2f7 spr317 spr337 spr357 spr377 spr397 spr3b7 spr3d7 spr3f7 spr018 spr038 spr058 spr078 spr098 spr0b8 spr0d8 spr0f8 spr118 spr138 spr158 spr178 spr198 spr1b8 spr1d8 spr1f8 spr218 spr238 spr258 spr278 spr298 spr2b8 spr2d8 spr2f8 spr318 spr338 spr358 spr378 spr398 spr3b8 spr3d8 spr3f8 spr019 spr039 spr059 spr079 spr099 spr0b9 spr0d9 spr0f9 spr119 spr139 spr159 spr179 spr199 spr1b9 spr1d9 spr1f9 spr219 spr239 spr259 spr279 spr299 spr2b9 spr2d9 spr2f9 spr319 spr339 spr359 spr379 spr399 spr3b9 spr3d9 spr3f9 SRR0 CSRR0 spr05a spr07a spr09a spr0ba spr0da spr0fa spr11a spr13a spr15a spr17a spr19a spr1ba spr1da spr1fa spr21a spr23a spr25a spr27a spr29a spr2ba spr2da spr2fa spr31a spr33a spr35a spr37a spr39a spr3ba spr3da spr3fa SRR1 CSRR1 spr05b spr07b spr09b spr0bb spr0db spr0fb spr11b spr13b spr15b spr17b spr19b spr1bb spr1db spr1fb spr21b spr23b spr25b spr27b spr29b spr2bb spr2db spr2fb spr31b spr33b spr35b spr37b spr39b spr3bb spr3db spr3fb spr01c spr03c spr05c spr07c spr09c spr0bc spr0dc spr0fc TBLw spr13c spr15c spr17c spr19c spr1bc spr1dc spr1fc spr21c spr23c spr25c spr27c spr29c spr2bc spr2dc spr2fc spr31c spr33c spr35c spr37c spr39c spr3bc spr3dc spr3fc spr01d spr03d spr05d spr07d spr09d spr0bd spr0dd spr0fd TBUw spr13d spr15d spr17d spr19d spr1bd spr1dd spr1fd spr21d spr23d spr25d spr27d spr29d spr2bd spr2dd spr2fd spr31d spr33d spr35d spr37d spr39d spr3bd spr3dd spr3fd spr01e spr03e spr05e spr07e spr09e spr0be spr0de spr0fe spr11e spr13e spr15e spr17e spr19e spr1be spr1de spr1fe spr21e spr23e spr25e spr27e spr29e spr2be spr2de spr2fe spr31e spr33e spr35e spr37e spr39e spr3be spr3de spr3fe spr01f spr03f spr05f spr07f spr09f spr0bf spr0df spr0ff spr11f spr13f spr15f spr17f spr19f spr1bf spr1df spr1ff spr21f spr23f spr25f spr27f spr29f spr2bf spr2df spr2ff spr31f spr33f spr35f spr37f spr39f spr3bf spr3df spr3ff ]; ## ## Attach the dcr register to the token DCRN made up of the bits dcrnL/dcrnH ## the low bits are shifted up, so the table is inverted and indexed by dcrnH,dcrnL ## This could have been done by computing DCRN = dcrnH * 32 + dcrnL but it would ## have resulted in multiple instructions instead of the original single prototype. ## Thus this massive inverted table. attach variables DCRN [ dcr000 dcr020 dcr040 dcr060 dcr080 dcr0a0 dcr0c0 dcr0e0 dcr100 dcr120 dcr140 dcr160 dcr180 dcr1a0 dcr1c0 dcr1e0 dcr200 dcr220 dcr240 dcr260 dcr280 dcr2a0 dcr2c0 dcr2e0 dcr300 dcr320 dcr340 dcr360 dcr380 dcr3a0 dcr3c0 dcr3e0 dcr001 dcr021 dcr041 dcr061 dcr081 dcr0a1 dcr0c1 dcr0e1 dcr101 dcr121 dcr141 dcr161 dcr181 dcr1a1 dcr1c1 dcr1e1 dcr201 dcr221 dcr241 dcr261 dcr281 dcr2a1 dcr2c1 dcr2e1 dcr301 dcr321 dcr341 dcr361 dcr381 dcr3a1 dcr3c1 dcr3e1 dcr002 dcr022 dcr042 dcr062 dcr082 dcr0a2 dcr0c2 dcr0e2 dcr102 dcr122 dcr142 dcr162 dcr182 dcr1a2 dcr1c2 dcr1e2 dcr202 dcr222 dcr242 dcr262 dcr282 dcr2a2 dcr2c2 dcr2e2 dcr302 dcr322 dcr342 dcr362 dcr382 dcr3a2 dcr3c2 dcr3e2 dcr003 dcr023 dcr043 dcr063 dcr083 dcr0a3 dcr0c3 dcr0e3 dcr103 dcr123 dcr143 dcr163 dcr183 dcr1a3 dcr1c3 dcr1e3 dcr203 dcr223 dcr243 dcr263 dcr283 dcr2a3 dcr2c3 dcr2e3 dcr303 dcr323 dcr343 dcr363 dcr383 dcr3a3 dcr3c3 dcr3e3 dcr004 dcr024 dcr044 dcr064 dcr084 dcr0a4 dcr0c4 dcr0e4 dcr104 dcr124 dcr144 dcr164 dcr184 dcr1a4 dcr1c4 dcr1e4 dcr204 dcr224 dcr244 dcr264 dcr284 dcr2a4 dcr2c4 dcr2e4 dcr304 dcr324 dcr344 dcr364 dcr384 dcr3a4 dcr3c4 dcr3e4 dcr005 dcr025 dcr045 dcr065 dcr085 dcr0a5 dcr0c5 dcr0e5 dcr105 dcr125 dcr145 dcr165 dcr185 dcr1a5 dcr1c5 dcr1e5 dcr205 dcr225 dcr245 dcr265 dcr285 dcr2a5 dcr2c5 dcr2e5 dcr305 dcr325 dcr345 dcr365 dcr385 dcr3a5 dcr3c5 dcr3e5 dcr006 dcr026 dcr046 dcr066 dcr086 dcr0a6 dcr0c6 dcr0e6 dcr106 dcr126 dcr146 dcr166 dcr186 dcr1a6 dcr1c6 dcr1e6 dcr206 dcr226 dcr246 dcr266 dcr286 dcr2a6 dcr2c6 dcr2e6 dcr306 dcr326 dcr346 dcr366 dcr386 dcr3a6 dcr3c6 dcr3e6 dcr007 dcr027 dcr047 dcr067 dcr087 dcr0a7 dcr0c7 dcr0e7 dcr107 dcr127 dcr147 dcr167 dcr187 dcr1a7 dcr1c7 dcr1e7 dcr207 dcr227 dcr247 dcr267 dcr287 dcr2a7 dcr2c7 dcr2e7 dcr307 dcr327 dcr347 dcr367 dcr387 dcr3a7 dcr3c7 dcr3e7 dcr008 dcr028 dcr048 dcr068 dcr088 dcr0a8 dcr0c8 dcr0e8 dcr108 dcr128 dcr148 dcr168 dcr188 dcr1a8 dcr1c8 dcr1e8 dcr208 dcr228 dcr248 dcr268 dcr288 dcr2a8 dcr2c8 dcr2e8 dcr308 dcr328 dcr348 dcr368 dcr388 dcr3a8 dcr3c8 dcr3e8 dcr009 dcr029 dcr049 dcr069 dcr089 dcr0a9 dcr0c9 dcr0e9 dcr109 dcr129 dcr149 dcr169 dcr189 dcr1a9 dcr1c9 dcr1e9 dcr209 dcr229 dcr249 dcr269 dcr289 dcr2a9 dcr2c9 dcr2e9 dcr309 dcr329 dcr349 dcr369 dcr389 dcr3a9 dcr3c9 dcr3e9 dcr00a dcr02a dcr04a dcr06a dcr08a dcr0aa dcr0ca dcr0ea dcr10a dcr12a dcr14a dcr16a dcr18a dcr1aa dcr1ca dcr1ea dcr20a dcr22a dcr24a dcr26a dcr28a dcr2aa dcr2ca dcr2ea dcr30a dcr32a dcr34a dcr36a dcr38a dcr3aa dcr3ca dcr3ea dcr00b dcr02b dcr04b dcr06b dcr08b dcr0ab dcr0cb dcr0eb dcr10b dcr12b dcr14b dcr16b dcr18b dcr1ab dcr1cb dcr1eb dcr20b dcr22b dcr24b dcr26b dcr28b dcr2ab dcr2cb dcr2eb dcr30b dcr32b dcr34b dcr36b dcr38b dcr3ab dcr3cb dcr3eb dcr00c dcr02c dcr04c dcr06c dcr08c dcr0ac dcr0cc dcr0ec dcr10c dcr12c dcr14c dcr16c dcr18c dcr1ac dcr1cc dcr1ec dcr20c dcr22c dcr24c dcr26c dcr28c dcr2ac dcr2cc dcr2ec dcr30c dcr32c dcr34c dcr36c dcr38c dcr3ac dcr3cc dcr3ec dcr00d dcr02d dcr04d dcr06d dcr08d dcr0ad dcr0cd dcr0ed dcr10d dcr12d dcr14d dcr16d dcr18d dcr1ad dcr1cd dcr1ed dcr20d dcr22d dcr24d dcr26d dcr28d dcr2ad dcr2cd dcr2ed dcr30d dcr32d dcr34d dcr36d dcr38d dcr3ad dcr3cd dcr3ed dcr00e dcr02e dcr04e dcr06e dcr08e dcr0ae dcr0ce dcr0ee dcr10e dcr12e dcr14e dcr16e dcr18e dcr1ae dcr1ce dcr1ee dcr20e dcr22e dcr24e dcr26e dcr28e dcr2ae dcr2ce dcr2ee dcr30e dcr32e dcr34e dcr36e dcr38e dcr3ae dcr3ce dcr3ee dcr00f dcr02f dcr04f dcr06f dcr08f dcr0af dcr0cf dcr0ef dcr10f dcr12f dcr14f dcr16f dcr18f dcr1af dcr1cf dcr1ef dcr20f dcr22f dcr24f dcr26f dcr28f dcr2af dcr2cf dcr2ef dcr30f dcr32f dcr34f dcr36f dcr38f dcr3af dcr3cf dcr3ef dcr010 dcr030 dcr050 dcr070 dcr090 dcr0b0 dcr0d0 dcr0f0 dcr110 dcr130 dcr150 dcr170 dcr190 dcr1b0 dcr1d0 dcr1f0 dcr210 dcr230 dcr250 dcr270 dcr290 dcr2b0 dcr2d0 dcr2f0 dcr310 dcr330 dcr350 dcr370 dcr390 dcr3b0 dcr3d0 dcr3f0 dcr011 dcr031 dcr051 dcr071 dcr091 dcr0b1 dcr0d1 dcr0f1 dcr111 dcr131 dcr151 dcr171 dcr191 dcr1b1 dcr1d1 dcr1f1 dcr211 dcr231 dcr251 dcr271 dcr291 dcr2b1 dcr2d1 dcr2f1 dcr311 dcr331 dcr351 dcr371 dcr391 dcr3b1 dcr3d1 dcr3f1 dcr012 dcr032 dcr052 dcr072 dcr092 dcr0b2 dcr0d2 dcr0f2 dcr112 dcr132 dcr152 dcr172 dcr192 dcr1b2 dcr1d2 dcr1f2 dcr212 dcr232 dcr252 dcr272 dcr292 dcr2b2 dcr2d2 dcr2f2 dcr312 dcr332 dcr352 dcr372 dcr392 dcr3b2 dcr3d2 dcr3f2 dcr013 dcr033 dcr053 dcr073 dcr093 dcr0b3 dcr0d3 dcr0f3 dcr113 dcr133 dcr153 dcr173 dcr193 dcr1b3 dcr1d3 dcr1f3 dcr213 dcr233 dcr253 dcr273 dcr293 dcr2b3 dcr2d3 dcr2f3 dcr313 dcr333 dcr353 dcr373 dcr393 dcr3b3 dcr3d3 dcr3f3 dcr014 dcr034 dcr054 dcr074 dcr094 dcr0b4 dcr0d4 dcr0f4 dcr114 dcr134 dcr154 dcr174 dcr194 dcr1b4 dcr1d4 dcr1f4 dcr214 dcr234 dcr254 dcr274 dcr294 dcr2b4 dcr2d4 dcr2f4 dcr314 dcr334 dcr354 dcr374 dcr394 dcr3b4 dcr3d4 dcr3f4 dcr015 dcr035 dcr055 dcr075 dcr095 dcr0b5 dcr0d5 dcr0f5 dcr115 dcr135 dcr155 dcr175 dcr195 dcr1b5 dcr1d5 dcr1f5 dcr215 dcr235 dcr255 dcr275 dcr295 dcr2b5 dcr2d5 dcr2f5 dcr315 dcr335 dcr355 dcr375 dcr395 dcr3b5 dcr3d5 dcr3f5 dcr016 dcr036 dcr056 dcr076 dcr096 dcr0b6 dcr0d6 dcr0f6 dcr116 dcr136 dcr156 dcr176 dcr196 dcr1b6 dcr1d6 dcr1f6 dcr216 dcr236 dcr256 dcr276 dcr296 dcr2b6 dcr2d6 dcr2f6 dcr316 dcr336 dcr356 dcr376 dcr396 dcr3b6 dcr3d6 dcr3f6 dcr017 dcr037 dcr057 dcr077 dcr097 dcr0b7 dcr0d7 dcr0f7 dcr117 dcr137 dcr157 dcr177 dcr197 dcr1b7 dcr1d7 dcr1f7 dcr217 dcr237 dcr257 dcr277 dcr297 dcr2b7 dcr2d7 dcr2f7 dcr317 dcr337 dcr357 dcr377 dcr397 dcr3b7 dcr3d7 dcr3f7 dcr018 dcr038 dcr058 dcr078 dcr098 dcr0b8 dcr0d8 dcr0f8 dcr118 dcr138 dcr158 dcr178 dcr198 dcr1b8 dcr1d8 dcr1f8 dcr218 dcr238 dcr258 dcr278 dcr298 dcr2b8 dcr2d8 dcr2f8 dcr318 dcr338 dcr358 dcr378 dcr398 dcr3b8 dcr3d8 dcr3f8 dcr019 dcr039 dcr059 dcr079 dcr099 dcr0b9 dcr0d9 dcr0f9 dcr119 dcr139 dcr159 dcr179 dcr199 dcr1b9 dcr1d9 dcr1f9 dcr219 dcr239 dcr259 dcr279 dcr299 dcr2b9 dcr2d9 dcr2f9 dcr319 dcr339 dcr359 dcr379 dcr399 dcr3b9 dcr3d9 dcr3f9 dcr01a dcr03a dcr05a dcr07a dcr09a dcr0ba dcr0da dcr0fa dcr11a dcr13a dcr15a dcr17a dcr19a dcr1ba dcr1da dcr1fa dcr21a dcr23a dcr25a dcr27a dcr29a dcr2ba dcr2da dcr2fa dcr31a dcr33a dcr35a dcr37a dcr39a dcr3ba dcr3da dcr3fa dcr01b dcr03b dcr05b dcr07b dcr09b dcr0bb dcr0db dcr0fb dcr11b dcr13b dcr15b dcr17b dcr19b dcr1bb dcr1db dcr1fb dcr21b dcr23b dcr25b dcr27b dcr29b dcr2bb dcr2db dcr2fb dcr31b dcr33b dcr35b dcr37b dcr39b dcr3bb dcr3db dcr3fb dcr01c dcr03c dcr05c dcr07c dcr09c dcr0bc dcr0dc dcr0fc dcr11c dcr13c dcr15c dcr17c dcr19c dcr1bc dcr1dc dcr1fc dcr21c dcr23c dcr25c dcr27c dcr29c dcr2bc dcr2dc dcr2fc dcr31c dcr33c dcr35c dcr37c dcr39c dcr3bc dcr3dc dcr3fc dcr01d dcr03d dcr05d dcr07d dcr09d dcr0bd dcr0dd dcr0fd dcr11d dcr13d dcr15d dcr17d dcr19d dcr1bd dcr1dd dcr1fd dcr21d dcr23d dcr25d dcr27d dcr29d dcr2bd dcr2dd dcr2fd dcr31d dcr33d dcr35d dcr37d dcr39d dcr3bd dcr3dd dcr3fd dcr01e dcr03e dcr05e dcr07e dcr09e dcr0be dcr0de dcr0fe dcr11e dcr13e dcr15e dcr17e dcr19e dcr1be dcr1de dcr1fe dcr21e dcr23e dcr25e dcr27e dcr29e dcr2be dcr2de dcr2fe dcr31e dcr33e dcr35e dcr37e dcr39e dcr3be dcr3de dcr3fe dcr01f dcr03f dcr05f dcr07f dcr09f dcr0bf dcr0df dcr0ff dcr11f dcr13f dcr15f dcr17f dcr19f dcr1bf dcr1df dcr1ff dcr21f dcr23f dcr25f dcr27f dcr29f dcr2bf dcr2df dcr2ff dcr31f dcr33f dcr35f dcr37f dcr39f dcr3bf dcr3df dcr3ff ]; attach variables [vrDR vrAR vrBR vrSR vrCR] [ vs32 vs33 vs34 vs35 vs36 vs37 vs38 vs39 vs40 vs41 vs42 vs43 vs44 vs45 vs46 vs47 vs48 vs49 vs50 vs51 vs52 vs53 vs54 vs55 vs56 vs57 vs58 vs59 vs60 vs61 vs62 vs63 ]; ## These attaches are for the Altivec instructions attach names [ vrDD vrAD vrBD vrSD vrCD] [ v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 ]; vrD: vrDD is vrDD & vrDR { export vrDR; } vrA: vrAD is vrAD & vrAR { export vrAR; } vrB: vrBD is vrBD & vrBR { export vrBR; } vrC: vrCD is vrCD & vrCR { export vrCR; } vrS: vrSD is vrSD & vrSR { export vrSR; } # AltVect Vector vrD sub-piece selectors # AltVect Vector vrD sub-piece selectors for size 64 attach variables vrD_64_0 [vr0_64_0 vr1_64_0 vr2_64_0 vr3_64_0 vr4_64_0 vr5_64_0 vr6_64_0 vr7_64_0 vr8_64_0 vr9_64_0 vr10_64_0 vr11_64_0 vr12_64_0 vr13_64_0 vr14_64_0 vr15_64_0 vr16_64_0 vr17_64_0 vr18_64_0 vr19_64_0 vr20_64_0 vr21_64_0 vr22_64_0 vr23_64_0 vr24_64_0 vr25_64_0 vr26_64_0 vr27_64_0 vr28_64_0 vr29_64_0 vr30_64_0 vr31_64_0 ]; attach variables vrD_64_1 [vr0_64_1 vr1_64_1 vr2_64_1 vr3_64_1 vr4_64_1 vr5_64_1 vr6_64_1 vr7_64_1 vr8_64_1 vr9_64_1 vr10_64_1 vr11_64_1 vr12_64_1 vr13_64_1 vr14_64_1 vr15_64_1 vr16_64_1 vr17_64_1 vr18_64_1 vr19_64_1 vr20_64_1 vr21_64_1 vr22_64_1 vr23_64_1 vr24_64_1 vr25_64_1 vr26_64_1 vr27_64_1 vr28_64_1 vr29_64_1 vr30_64_1 vr31_64_1 ]; # AltVect Vector vrD sub-piece selectors for size 32 attach variables vrD_32_0 [vr0_32_0 vr1_32_0 vr2_32_0 vr3_32_0 vr4_32_0 vr5_32_0 vr6_32_0 vr7_32_0 vr8_32_0 vr9_32_0 vr10_32_0 vr11_32_0 vr12_32_0 vr13_32_0 vr14_32_0 vr15_32_0 vr16_32_0 vr17_32_0 vr18_32_0 vr19_32_0 vr20_32_0 vr21_32_0 vr22_32_0 vr23_32_0 vr24_32_0 vr25_32_0 vr26_32_0 vr27_32_0 vr28_32_0 vr29_32_0 vr30_32_0 vr31_32_0 ]; attach variables vrD_32_1 [vr0_32_1 vr1_32_1 vr2_32_1 vr3_32_1 vr4_32_1 vr5_32_1 vr6_32_1 vr7_32_1 vr8_32_1 vr9_32_1 vr10_32_1 vr11_32_1 vr12_32_1 vr13_32_1 vr14_32_1 vr15_32_1 vr16_32_1 vr17_32_1 vr18_32_1 vr19_32_1 vr20_32_1 vr21_32_1 vr22_32_1 vr23_32_1 vr24_32_1 vr25_32_1 vr26_32_1 vr27_32_1 vr28_32_1 vr29_32_1 vr30_32_1 vr31_32_1 ]; attach variables vrD_32_2 [vr0_32_2 vr1_32_2 vr2_32_2 vr3_32_2 vr4_32_2 vr5_32_2 vr6_32_2 vr7_32_2 vr8_32_2 vr9_32_2 vr10_32_2 vr11_32_2 vr12_32_2 vr13_32_2 vr14_32_2 vr15_32_2 vr16_32_2 vr17_32_2 vr18_32_2 vr19_32_2 vr20_32_2 vr21_32_2 vr22_32_2 vr23_32_2 vr24_32_2 vr25_32_2 vr26_32_2 vr27_32_2 vr28_32_2 vr29_32_2 vr30_32_2 vr31_32_2 ]; attach variables vrD_32_3 [vr0_32_3 vr1_32_3 vr2_32_3 vr3_32_3 vr4_32_3 vr5_32_3 vr6_32_3 vr7_32_3 vr8_32_3 vr9_32_3 vr10_32_3 vr11_32_3 vr12_32_3 vr13_32_3 vr14_32_3 vr15_32_3 vr16_32_3 vr17_32_3 vr18_32_3 vr19_32_3 vr20_32_3 vr21_32_3 vr22_32_3 vr23_32_3 vr24_32_3 vr25_32_3 vr26_32_3 vr27_32_3 vr28_32_3 vr29_32_3 vr30_32_3 vr31_32_3 ]; # AltVect Vector vrD sub-piece selectors for size 16 attach variables vrD_16_0 [vr0_16_0 vr1_16_0 vr2_16_0 vr3_16_0 vr4_16_0 vr5_16_0 vr6_16_0 vr7_16_0 vr8_16_0 vr9_16_0 vr10_16_0 vr11_16_0 vr12_16_0 vr13_16_0 vr14_16_0 vr15_16_0 vr16_16_0 vr17_16_0 vr18_16_0 vr19_16_0 vr20_16_0 vr21_16_0 vr22_16_0 vr23_16_0 vr24_16_0 vr25_16_0 vr26_16_0 vr27_16_0 vr28_16_0 vr29_16_0 vr30_16_0 vr31_16_0 ]; attach variables vrD_16_1 [vr0_16_1 vr1_16_1 vr2_16_1 vr3_16_1 vr4_16_1 vr5_16_1 vr6_16_1 vr7_16_1 vr8_16_1 vr9_16_1 vr10_16_1 vr11_16_1 vr12_16_1 vr13_16_1 vr14_16_1 vr15_16_1 vr16_16_1 vr17_16_1 vr18_16_1 vr19_16_1 vr20_16_1 vr21_16_1 vr22_16_1 vr23_16_1 vr24_16_1 vr25_16_1 vr26_16_1 vr27_16_1 vr28_16_1 vr29_16_1 vr30_16_1 vr31_16_1 ]; attach variables vrD_16_2 [vr0_16_2 vr1_16_2 vr2_16_2 vr3_16_2 vr4_16_2 vr5_16_2 vr6_16_2 vr7_16_2 vr8_16_2 vr9_16_2 vr10_16_2 vr11_16_2 vr12_16_2 vr13_16_2 vr14_16_2 vr15_16_2 vr16_16_2 vr17_16_2 vr18_16_2 vr19_16_2 vr20_16_2 vr21_16_2 vr22_16_2 vr23_16_2 vr24_16_2 vr25_16_2 vr26_16_2 vr27_16_2 vr28_16_2 vr29_16_2 vr30_16_2 vr31_16_2 ]; attach variables vrD_16_3 [vr0_16_3 vr1_16_3 vr2_16_3 vr3_16_3 vr4_16_3 vr5_16_3 vr6_16_3 vr7_16_3 vr8_16_3 vr9_16_3 vr10_16_3 vr11_16_3 vr12_16_3 vr13_16_3 vr14_16_3 vr15_16_3 vr16_16_3 vr17_16_3 vr18_16_3 vr19_16_3 vr20_16_3 vr21_16_3 vr22_16_3 vr23_16_3 vr24_16_3 vr25_16_3 vr26_16_3 vr27_16_3 vr28_16_3 vr29_16_3 vr30_16_3 vr31_16_3 ]; attach variables vrD_16_4 [vr0_16_4 vr1_16_4 vr2_16_4 vr3_16_4 vr4_16_4 vr5_16_4 vr6_16_4 vr7_16_4 vr8_16_4 vr9_16_4 vr10_16_4 vr11_16_4 vr12_16_4 vr13_16_4 vr14_16_4 vr15_16_4 vr16_16_4 vr17_16_4 vr18_16_4 vr19_16_4 vr20_16_4 vr21_16_4 vr22_16_4 vr23_16_4 vr24_16_4 vr25_16_4 vr26_16_4 vr27_16_4 vr28_16_4 vr29_16_4 vr30_16_4 vr31_16_4 ]; attach variables vrD_16_5 [vr0_16_5 vr1_16_5 vr2_16_5 vr3_16_5 vr4_16_5 vr5_16_5 vr6_16_5 vr7_16_5 vr8_16_5 vr9_16_5 vr10_16_5 vr11_16_5 vr12_16_5 vr13_16_5 vr14_16_5 vr15_16_5 vr16_16_5 vr17_16_5 vr18_16_5 vr19_16_5 vr20_16_5 vr21_16_5 vr22_16_5 vr23_16_5 vr24_16_5 vr25_16_5 vr26_16_5 vr27_16_5 vr28_16_5 vr29_16_5 vr30_16_5 vr31_16_5 ]; attach variables vrD_16_6 [vr0_16_6 vr1_16_6 vr2_16_6 vr3_16_6 vr4_16_6 vr5_16_6 vr6_16_6 vr7_16_6 vr8_16_6 vr9_16_6 vr10_16_6 vr11_16_6 vr12_16_6 vr13_16_6 vr14_16_6 vr15_16_6 vr16_16_6 vr17_16_6 vr18_16_6 vr19_16_6 vr20_16_6 vr21_16_6 vr22_16_6 vr23_16_6 vr24_16_6 vr25_16_6 vr26_16_6 vr27_16_6 vr28_16_6 vr29_16_6 vr30_16_6 vr31_16_6 ]; attach variables vrD_16_7 [vr0_16_7 vr1_16_7 vr2_16_7 vr3_16_7 vr4_16_7 vr5_16_7 vr6_16_7 vr7_16_7 vr8_16_7 vr9_16_7 vr10_16_7 vr11_16_7 vr12_16_7 vr13_16_7 vr14_16_7 vr15_16_7 vr16_16_7 vr17_16_7 vr18_16_7 vr19_16_7 vr20_16_7 vr21_16_7 vr22_16_7 vr23_16_7 vr24_16_7 vr25_16_7 vr26_16_7 vr27_16_7 vr28_16_7 vr29_16_7 vr30_16_7 vr31_16_7 ]; # AltVect Vector vrD sub-piece selectors for size 8 attach variables vrD_8_0 [vr0_8_0 vr1_8_0 vr2_8_0 vr3_8_0 vr4_8_0 vr5_8_0 vr6_8_0 vr7_8_0 vr8_8_0 vr9_8_0 vr10_8_0 vr11_8_0 vr12_8_0 vr13_8_0 vr14_8_0 vr15_8_0 vr16_8_0 vr17_8_0 vr18_8_0 vr19_8_0 vr20_8_0 vr21_8_0 vr22_8_0 vr23_8_0 vr24_8_0 vr25_8_0 vr26_8_0 vr27_8_0 vr28_8_0 vr29_8_0 vr30_8_0 vr31_8_0 ]; attach variables vrD_8_1 [vr0_8_1 vr1_8_1 vr2_8_1 vr3_8_1 vr4_8_1 vr5_8_1 vr6_8_1 vr7_8_1 vr8_8_1 vr9_8_1 vr10_8_1 vr11_8_1 vr12_8_1 vr13_8_1 vr14_8_1 vr15_8_1 vr16_8_1 vr17_8_1 vr18_8_1 vr19_8_1 vr20_8_1 vr21_8_1 vr22_8_1 vr23_8_1 vr24_8_1 vr25_8_1 vr26_8_1 vr27_8_1 vr28_8_1 vr29_8_1 vr30_8_1 vr31_8_1 ]; attach variables vrD_8_2 [vr0_8_2 vr1_8_2 vr2_8_2 vr3_8_2 vr4_8_2 vr5_8_2 vr6_8_2 vr7_8_2 vr8_8_2 vr9_8_2 vr10_8_2 vr11_8_2 vr12_8_2 vr13_8_2 vr14_8_2 vr15_8_2 vr16_8_2 vr17_8_2 vr18_8_2 vr19_8_2 vr20_8_2 vr21_8_2 vr22_8_2 vr23_8_2 vr24_8_2 vr25_8_2 vr26_8_2 vr27_8_2 vr28_8_2 vr29_8_2 vr30_8_2 vr31_8_2 ]; attach variables vrD_8_3 [vr0_8_3 vr1_8_3 vr2_8_3 vr3_8_3 vr4_8_3 vr5_8_3 vr6_8_3 vr7_8_3 vr8_8_3 vr9_8_3 vr10_8_3 vr11_8_3 vr12_8_3 vr13_8_3 vr14_8_3 vr15_8_3 vr16_8_3 vr17_8_3 vr18_8_3 vr19_8_3 vr20_8_3 vr21_8_3 vr22_8_3 vr23_8_3 vr24_8_3 vr25_8_3 vr26_8_3 vr27_8_3 vr28_8_3 vr29_8_3 vr30_8_3 vr31_8_3 ]; attach variables vrD_8_4 [vr0_8_4 vr1_8_4 vr2_8_4 vr3_8_4 vr4_8_4 vr5_8_4 vr6_8_4 vr7_8_4 vr8_8_4 vr9_8_4 vr10_8_4 vr11_8_4 vr12_8_4 vr13_8_4 vr14_8_4 vr15_8_4 vr16_8_4 vr17_8_4 vr18_8_4 vr19_8_4 vr20_8_4 vr21_8_4 vr22_8_4 vr23_8_4 vr24_8_4 vr25_8_4 vr26_8_4 vr27_8_4 vr28_8_4 vr29_8_4 vr30_8_4 vr31_8_4 ]; attach variables vrD_8_5 [vr0_8_5 vr1_8_5 vr2_8_5 vr3_8_5 vr4_8_5 vr5_8_5 vr6_8_5 vr7_8_5 vr8_8_5 vr9_8_5 vr10_8_5 vr11_8_5 vr12_8_5 vr13_8_5 vr14_8_5 vr15_8_5 vr16_8_5 vr17_8_5 vr18_8_5 vr19_8_5 vr20_8_5 vr21_8_5 vr22_8_5 vr23_8_5 vr24_8_5 vr25_8_5 vr26_8_5 vr27_8_5 vr28_8_5 vr29_8_5 vr30_8_5 vr31_8_5 ]; attach variables vrD_8_6 [vr0_8_6 vr1_8_6 vr2_8_6 vr3_8_6 vr4_8_6 vr5_8_6 vr6_8_6 vr7_8_6 vr8_8_6 vr9_8_6 vr10_8_6 vr11_8_6 vr12_8_6 vr13_8_6 vr14_8_6 vr15_8_6 vr16_8_6 vr17_8_6 vr18_8_6 vr19_8_6 vr20_8_6 vr21_8_6 vr22_8_6 vr23_8_6 vr24_8_6 vr25_8_6 vr26_8_6 vr27_8_6 vr28_8_6 vr29_8_6 vr30_8_6 vr31_8_6 ]; attach variables vrD_8_7 [vr0_8_7 vr1_8_7 vr2_8_7 vr3_8_7 vr4_8_7 vr5_8_7 vr6_8_7 vr7_8_7 vr8_8_7 vr9_8_7 vr10_8_7 vr11_8_7 vr12_8_7 vr13_8_7 vr14_8_7 vr15_8_7 vr16_8_7 vr17_8_7 vr18_8_7 vr19_8_7 vr20_8_7 vr21_8_7 vr22_8_7 vr23_8_7 vr24_8_7 vr25_8_7 vr26_8_7 vr27_8_7 vr28_8_7 vr29_8_7 vr30_8_7 vr31_8_7 ]; attach variables vrD_8_8 [vr0_8_8 vr1_8_8 vr2_8_8 vr3_8_8 vr4_8_8 vr5_8_8 vr6_8_8 vr7_8_8 vr8_8_8 vr9_8_8 vr10_8_8 vr11_8_8 vr12_8_8 vr13_8_8 vr14_8_8 vr15_8_8 vr16_8_8 vr17_8_8 vr18_8_8 vr19_8_8 vr20_8_8 vr21_8_8 vr22_8_8 vr23_8_8 vr24_8_8 vr25_8_8 vr26_8_8 vr27_8_8 vr28_8_8 vr29_8_8 vr30_8_8 vr31_8_8 ]; attach variables vrD_8_9 [vr0_8_9 vr1_8_9 vr2_8_9 vr3_8_9 vr4_8_9 vr5_8_9 vr6_8_9 vr7_8_9 vr8_8_9 vr9_8_9 vr10_8_9 vr11_8_9 vr12_8_9 vr13_8_9 vr14_8_9 vr15_8_9 vr16_8_9 vr17_8_9 vr18_8_9 vr19_8_9 vr20_8_9 vr21_8_9 vr22_8_9 vr23_8_9 vr24_8_9 vr25_8_9 vr26_8_9 vr27_8_9 vr28_8_9 vr29_8_9 vr30_8_9 vr31_8_9 ]; attach variables vrD_8_10 [vr0_8_10 vr1_8_10 vr2_8_10 vr3_8_10 vr4_8_10 vr5_8_10 vr6_8_10 vr7_8_10 vr8_8_10 vr9_8_10 vr10_8_10 vr11_8_10 vr12_8_10 vr13_8_10 vr14_8_10 vr15_8_10 vr16_8_10 vr17_8_10 vr18_8_10 vr19_8_10 vr20_8_10 vr21_8_10 vr22_8_10 vr23_8_10 vr24_8_10 vr25_8_10 vr26_8_10 vr27_8_10 vr28_8_10 vr29_8_10 vr30_8_10 vr31_8_10 ]; attach variables vrD_8_11 [vr0_8_11 vr1_8_11 vr2_8_11 vr3_8_11 vr4_8_11 vr5_8_11 vr6_8_11 vr7_8_11 vr8_8_11 vr9_8_11 vr10_8_11 vr11_8_11 vr12_8_11 vr13_8_11 vr14_8_11 vr15_8_11 vr16_8_11 vr17_8_11 vr18_8_11 vr19_8_11 vr20_8_11 vr21_8_11 vr22_8_11 vr23_8_11 vr24_8_11 vr25_8_11 vr26_8_11 vr27_8_11 vr28_8_11 vr29_8_11 vr30_8_11 vr31_8_11 ]; attach variables vrD_8_12 [vr0_8_12 vr1_8_12 vr2_8_12 vr3_8_12 vr4_8_12 vr5_8_12 vr6_8_12 vr7_8_12 vr8_8_12 vr9_8_12 vr10_8_12 vr11_8_12 vr12_8_12 vr13_8_12 vr14_8_12 vr15_8_12 vr16_8_12 vr17_8_12 vr18_8_12 vr19_8_12 vr20_8_12 vr21_8_12 vr22_8_12 vr23_8_12 vr24_8_12 vr25_8_12 vr26_8_12 vr27_8_12 vr28_8_12 vr29_8_12 vr30_8_12 vr31_8_12 ]; attach variables vrD_8_13 [vr0_8_13 vr1_8_13 vr2_8_13 vr3_8_13 vr4_8_13 vr5_8_13 vr6_8_13 vr7_8_13 vr8_8_13 vr9_8_13 vr10_8_13 vr11_8_13 vr12_8_13 vr13_8_13 vr14_8_13 vr15_8_13 vr16_8_13 vr17_8_13 vr18_8_13 vr19_8_13 vr20_8_13 vr21_8_13 vr22_8_13 vr23_8_13 vr24_8_13 vr25_8_13 vr26_8_13 vr27_8_13 vr28_8_13 vr29_8_13 vr30_8_13 vr31_8_13 ]; attach variables vrD_8_14 [vr0_8_14 vr1_8_14 vr2_8_14 vr3_8_14 vr4_8_14 vr5_8_14 vr6_8_14 vr7_8_14 vr8_8_14 vr9_8_14 vr10_8_14 vr11_8_14 vr12_8_14 vr13_8_14 vr14_8_14 vr15_8_14 vr16_8_14 vr17_8_14 vr18_8_14 vr19_8_14 vr20_8_14 vr21_8_14 vr22_8_14 vr23_8_14 vr24_8_14 vr25_8_14 vr26_8_14 vr27_8_14 vr28_8_14 vr29_8_14 vr30_8_14 vr31_8_14 ]; attach variables vrD_8_15 [vr0_8_15 vr1_8_15 vr2_8_15 vr3_8_15 vr4_8_15 vr5_8_15 vr6_8_15 vr7_8_15 vr8_8_15 vr9_8_15 vr10_8_15 vr11_8_15 vr12_8_15 vr13_8_15 vr14_8_15 vr15_8_15 vr16_8_15 vr17_8_15 vr18_8_15 vr19_8_15 vr20_8_15 vr21_8_15 vr22_8_15 vr23_8_15 vr24_8_15 vr25_8_15 vr26_8_15 vr27_8_15 vr28_8_15 vr29_8_15 vr30_8_15 vr31_8_15 ]; # AltVect Vector vrA sub-piece selectors # AltVect Vector vrA sub-piece selectors for size 64 attach variables vrA_64_0 [vr0_64_0 vr1_64_0 vr2_64_0 vr3_64_0 vr4_64_0 vr5_64_0 vr6_64_0 vr7_64_0 vr8_64_0 vr9_64_0 vr10_64_0 vr11_64_0 vr12_64_0 vr13_64_0 vr14_64_0 vr15_64_0 vr16_64_0 vr17_64_0 vr18_64_0 vr19_64_0 vr20_64_0 vr21_64_0 vr22_64_0 vr23_64_0 vr24_64_0 vr25_64_0 vr26_64_0 vr27_64_0 vr28_64_0 vr29_64_0 vr30_64_0 vr31_64_0 ]; attach variables vrA_64_1 [vr0_64_1 vr1_64_1 vr2_64_1 vr3_64_1 vr4_64_1 vr5_64_1 vr6_64_1 vr7_64_1 vr8_64_1 vr9_64_1 vr10_64_1 vr11_64_1 vr12_64_1 vr13_64_1 vr14_64_1 vr15_64_1 vr16_64_1 vr17_64_1 vr18_64_1 vr19_64_1 vr20_64_1 vr21_64_1 vr22_64_1 vr23_64_1 vr24_64_1 vr25_64_1 vr26_64_1 vr27_64_1 vr28_64_1 vr29_64_1 vr30_64_1 vr31_64_1 ]; # AltVect Vector vrA sub-piece selectors for size 32 attach variables vrA_32_0 [vr0_32_0 vr1_32_0 vr2_32_0 vr3_32_0 vr4_32_0 vr5_32_0 vr6_32_0 vr7_32_0 vr8_32_0 vr9_32_0 vr10_32_0 vr11_32_0 vr12_32_0 vr13_32_0 vr14_32_0 vr15_32_0 vr16_32_0 vr17_32_0 vr18_32_0 vr19_32_0 vr20_32_0 vr21_32_0 vr22_32_0 vr23_32_0 vr24_32_0 vr25_32_0 vr26_32_0 vr27_32_0 vr28_32_0 vr29_32_0 vr30_32_0 vr31_32_0 ]; attach variables vrA_32_1 [vr0_32_1 vr1_32_1 vr2_32_1 vr3_32_1 vr4_32_1 vr5_32_1 vr6_32_1 vr7_32_1 vr8_32_1 vr9_32_1 vr10_32_1 vr11_32_1 vr12_32_1 vr13_32_1 vr14_32_1 vr15_32_1 vr16_32_1 vr17_32_1 vr18_32_1 vr19_32_1 vr20_32_1 vr21_32_1 vr22_32_1 vr23_32_1 vr24_32_1 vr25_32_1 vr26_32_1 vr27_32_1 vr28_32_1 vr29_32_1 vr30_32_1 vr31_32_1 ]; attach variables vrA_32_2 [vr0_32_2 vr1_32_2 vr2_32_2 vr3_32_2 vr4_32_2 vr5_32_2 vr6_32_2 vr7_32_2 vr8_32_2 vr9_32_2 vr10_32_2 vr11_32_2 vr12_32_2 vr13_32_2 vr14_32_2 vr15_32_2 vr16_32_2 vr17_32_2 vr18_32_2 vr19_32_2 vr20_32_2 vr21_32_2 vr22_32_2 vr23_32_2 vr24_32_2 vr25_32_2 vr26_32_2 vr27_32_2 vr28_32_2 vr29_32_2 vr30_32_2 vr31_32_2 ]; attach variables vrA_32_3 [vr0_32_3 vr1_32_3 vr2_32_3 vr3_32_3 vr4_32_3 vr5_32_3 vr6_32_3 vr7_32_3 vr8_32_3 vr9_32_3 vr10_32_3 vr11_32_3 vr12_32_3 vr13_32_3 vr14_32_3 vr15_32_3 vr16_32_3 vr17_32_3 vr18_32_3 vr19_32_3 vr20_32_3 vr21_32_3 vr22_32_3 vr23_32_3 vr24_32_3 vr25_32_3 vr26_32_3 vr27_32_3 vr28_32_3 vr29_32_3 vr30_32_3 vr31_32_3 ]; # AltVect Vector vrA sub-piece selectors for size 16 attach variables vrA_16_0 [vr0_16_0 vr1_16_0 vr2_16_0 vr3_16_0 vr4_16_0 vr5_16_0 vr6_16_0 vr7_16_0 vr8_16_0 vr9_16_0 vr10_16_0 vr11_16_0 vr12_16_0 vr13_16_0 vr14_16_0 vr15_16_0 vr16_16_0 vr17_16_0 vr18_16_0 vr19_16_0 vr20_16_0 vr21_16_0 vr22_16_0 vr23_16_0 vr24_16_0 vr25_16_0 vr26_16_0 vr27_16_0 vr28_16_0 vr29_16_0 vr30_16_0 vr31_16_0 ]; attach variables vrA_16_1 [vr0_16_1 vr1_16_1 vr2_16_1 vr3_16_1 vr4_16_1 vr5_16_1 vr6_16_1 vr7_16_1 vr8_16_1 vr9_16_1 vr10_16_1 vr11_16_1 vr12_16_1 vr13_16_1 vr14_16_1 vr15_16_1 vr16_16_1 vr17_16_1 vr18_16_1 vr19_16_1 vr20_16_1 vr21_16_1 vr22_16_1 vr23_16_1 vr24_16_1 vr25_16_1 vr26_16_1 vr27_16_1 vr28_16_1 vr29_16_1 vr30_16_1 vr31_16_1 ]; attach variables vrA_16_2 [vr0_16_2 vr1_16_2 vr2_16_2 vr3_16_2 vr4_16_2 vr5_16_2 vr6_16_2 vr7_16_2 vr8_16_2 vr9_16_2 vr10_16_2 vr11_16_2 vr12_16_2 vr13_16_2 vr14_16_2 vr15_16_2 vr16_16_2 vr17_16_2 vr18_16_2 vr19_16_2 vr20_16_2 vr21_16_2 vr22_16_2 vr23_16_2 vr24_16_2 vr25_16_2 vr26_16_2 vr27_16_2 vr28_16_2 vr29_16_2 vr30_16_2 vr31_16_2 ]; attach variables vrA_16_3 [vr0_16_3 vr1_16_3 vr2_16_3 vr3_16_3 vr4_16_3 vr5_16_3 vr6_16_3 vr7_16_3 vr8_16_3 vr9_16_3 vr10_16_3 vr11_16_3 vr12_16_3 vr13_16_3 vr14_16_3 vr15_16_3 vr16_16_3 vr17_16_3 vr18_16_3 vr19_16_3 vr20_16_3 vr21_16_3 vr22_16_3 vr23_16_3 vr24_16_3 vr25_16_3 vr26_16_3 vr27_16_3 vr28_16_3 vr29_16_3 vr30_16_3 vr31_16_3 ]; attach variables vrA_16_4 [vr0_16_4 vr1_16_4 vr2_16_4 vr3_16_4 vr4_16_4 vr5_16_4 vr6_16_4 vr7_16_4 vr8_16_4 vr9_16_4 vr10_16_4 vr11_16_4 vr12_16_4 vr13_16_4 vr14_16_4 vr15_16_4 vr16_16_4 vr17_16_4 vr18_16_4 vr19_16_4 vr20_16_4 vr21_16_4 vr22_16_4 vr23_16_4 vr24_16_4 vr25_16_4 vr26_16_4 vr27_16_4 vr28_16_4 vr29_16_4 vr30_16_4 vr31_16_4 ]; attach variables vrA_16_5 [vr0_16_5 vr1_16_5 vr2_16_5 vr3_16_5 vr4_16_5 vr5_16_5 vr6_16_5 vr7_16_5 vr8_16_5 vr9_16_5 vr10_16_5 vr11_16_5 vr12_16_5 vr13_16_5 vr14_16_5 vr15_16_5 vr16_16_5 vr17_16_5 vr18_16_5 vr19_16_5 vr20_16_5 vr21_16_5 vr22_16_5 vr23_16_5 vr24_16_5 vr25_16_5 vr26_16_5 vr27_16_5 vr28_16_5 vr29_16_5 vr30_16_5 vr31_16_5 ]; attach variables vrA_16_6 [vr0_16_6 vr1_16_6 vr2_16_6 vr3_16_6 vr4_16_6 vr5_16_6 vr6_16_6 vr7_16_6 vr8_16_6 vr9_16_6 vr10_16_6 vr11_16_6 vr12_16_6 vr13_16_6 vr14_16_6 vr15_16_6 vr16_16_6 vr17_16_6 vr18_16_6 vr19_16_6 vr20_16_6 vr21_16_6 vr22_16_6 vr23_16_6 vr24_16_6 vr25_16_6 vr26_16_6 vr27_16_6 vr28_16_6 vr29_16_6 vr30_16_6 vr31_16_6 ]; attach variables vrA_16_7 [vr0_16_7 vr1_16_7 vr2_16_7 vr3_16_7 vr4_16_7 vr5_16_7 vr6_16_7 vr7_16_7 vr8_16_7 vr9_16_7 vr10_16_7 vr11_16_7 vr12_16_7 vr13_16_7 vr14_16_7 vr15_16_7 vr16_16_7 vr17_16_7 vr18_16_7 vr19_16_7 vr20_16_7 vr21_16_7 vr22_16_7 vr23_16_7 vr24_16_7 vr25_16_7 vr26_16_7 vr27_16_7 vr28_16_7 vr29_16_7 vr30_16_7 vr31_16_7 ]; # AltVect Vector vrA sub-piece selectors for size 8 attach variables vrA_8_0 [vr0_8_0 vr1_8_0 vr2_8_0 vr3_8_0 vr4_8_0 vr5_8_0 vr6_8_0 vr7_8_0 vr8_8_0 vr9_8_0 vr10_8_0 vr11_8_0 vr12_8_0 vr13_8_0 vr14_8_0 vr15_8_0 vr16_8_0 vr17_8_0 vr18_8_0 vr19_8_0 vr20_8_0 vr21_8_0 vr22_8_0 vr23_8_0 vr24_8_0 vr25_8_0 vr26_8_0 vr27_8_0 vr28_8_0 vr29_8_0 vr30_8_0 vr31_8_0 ]; attach variables vrA_8_1 [vr0_8_1 vr1_8_1 vr2_8_1 vr3_8_1 vr4_8_1 vr5_8_1 vr6_8_1 vr7_8_1 vr8_8_1 vr9_8_1 vr10_8_1 vr11_8_1 vr12_8_1 vr13_8_1 vr14_8_1 vr15_8_1 vr16_8_1 vr17_8_1 vr18_8_1 vr19_8_1 vr20_8_1 vr21_8_1 vr22_8_1 vr23_8_1 vr24_8_1 vr25_8_1 vr26_8_1 vr27_8_1 vr28_8_1 vr29_8_1 vr30_8_1 vr31_8_1 ]; attach variables vrA_8_2 [vr0_8_2 vr1_8_2 vr2_8_2 vr3_8_2 vr4_8_2 vr5_8_2 vr6_8_2 vr7_8_2 vr8_8_2 vr9_8_2 vr10_8_2 vr11_8_2 vr12_8_2 vr13_8_2 vr14_8_2 vr15_8_2 vr16_8_2 vr17_8_2 vr18_8_2 vr19_8_2 vr20_8_2 vr21_8_2 vr22_8_2 vr23_8_2 vr24_8_2 vr25_8_2 vr26_8_2 vr27_8_2 vr28_8_2 vr29_8_2 vr30_8_2 vr31_8_2 ]; attach variables vrA_8_3 [vr0_8_3 vr1_8_3 vr2_8_3 vr3_8_3 vr4_8_3 vr5_8_3 vr6_8_3 vr7_8_3 vr8_8_3 vr9_8_3 vr10_8_3 vr11_8_3 vr12_8_3 vr13_8_3 vr14_8_3 vr15_8_3 vr16_8_3 vr17_8_3 vr18_8_3 vr19_8_3 vr20_8_3 vr21_8_3 vr22_8_3 vr23_8_3 vr24_8_3 vr25_8_3 vr26_8_3 vr27_8_3 vr28_8_3 vr29_8_3 vr30_8_3 vr31_8_3 ]; attach variables vrA_8_4 [vr0_8_4 vr1_8_4 vr2_8_4 vr3_8_4 vr4_8_4 vr5_8_4 vr6_8_4 vr7_8_4 vr8_8_4 vr9_8_4 vr10_8_4 vr11_8_4 vr12_8_4 vr13_8_4 vr14_8_4 vr15_8_4 vr16_8_4 vr17_8_4 vr18_8_4 vr19_8_4 vr20_8_4 vr21_8_4 vr22_8_4 vr23_8_4 vr24_8_4 vr25_8_4 vr26_8_4 vr27_8_4 vr28_8_4 vr29_8_4 vr30_8_4 vr31_8_4 ]; attach variables vrA_8_5 [vr0_8_5 vr1_8_5 vr2_8_5 vr3_8_5 vr4_8_5 vr5_8_5 vr6_8_5 vr7_8_5 vr8_8_5 vr9_8_5 vr10_8_5 vr11_8_5 vr12_8_5 vr13_8_5 vr14_8_5 vr15_8_5 vr16_8_5 vr17_8_5 vr18_8_5 vr19_8_5 vr20_8_5 vr21_8_5 vr22_8_5 vr23_8_5 vr24_8_5 vr25_8_5 vr26_8_5 vr27_8_5 vr28_8_5 vr29_8_5 vr30_8_5 vr31_8_5 ]; attach variables vrA_8_6 [vr0_8_6 vr1_8_6 vr2_8_6 vr3_8_6 vr4_8_6 vr5_8_6 vr6_8_6 vr7_8_6 vr8_8_6 vr9_8_6 vr10_8_6 vr11_8_6 vr12_8_6 vr13_8_6 vr14_8_6 vr15_8_6 vr16_8_6 vr17_8_6 vr18_8_6 vr19_8_6 vr20_8_6 vr21_8_6 vr22_8_6 vr23_8_6 vr24_8_6 vr25_8_6 vr26_8_6 vr27_8_6 vr28_8_6 vr29_8_6 vr30_8_6 vr31_8_6 ]; attach variables vrA_8_7 [vr0_8_7 vr1_8_7 vr2_8_7 vr3_8_7 vr4_8_7 vr5_8_7 vr6_8_7 vr7_8_7 vr8_8_7 vr9_8_7 vr10_8_7 vr11_8_7 vr12_8_7 vr13_8_7 vr14_8_7 vr15_8_7 vr16_8_7 vr17_8_7 vr18_8_7 vr19_8_7 vr20_8_7 vr21_8_7 vr22_8_7 vr23_8_7 vr24_8_7 vr25_8_7 vr26_8_7 vr27_8_7 vr28_8_7 vr29_8_7 vr30_8_7 vr31_8_7 ]; attach variables vrA_8_8 [vr0_8_8 vr1_8_8 vr2_8_8 vr3_8_8 vr4_8_8 vr5_8_8 vr6_8_8 vr7_8_8 vr8_8_8 vr9_8_8 vr10_8_8 vr11_8_8 vr12_8_8 vr13_8_8 vr14_8_8 vr15_8_8 vr16_8_8 vr17_8_8 vr18_8_8 vr19_8_8 vr20_8_8 vr21_8_8 vr22_8_8 vr23_8_8 vr24_8_8 vr25_8_8 vr26_8_8 vr27_8_8 vr28_8_8 vr29_8_8 vr30_8_8 vr31_8_8 ]; attach variables vrA_8_9 [vr0_8_9 vr1_8_9 vr2_8_9 vr3_8_9 vr4_8_9 vr5_8_9 vr6_8_9 vr7_8_9 vr8_8_9 vr9_8_9 vr10_8_9 vr11_8_9 vr12_8_9 vr13_8_9 vr14_8_9 vr15_8_9 vr16_8_9 vr17_8_9 vr18_8_9 vr19_8_9 vr20_8_9 vr21_8_9 vr22_8_9 vr23_8_9 vr24_8_9 vr25_8_9 vr26_8_9 vr27_8_9 vr28_8_9 vr29_8_9 vr30_8_9 vr31_8_9 ]; attach variables vrA_8_10 [vr0_8_10 vr1_8_10 vr2_8_10 vr3_8_10 vr4_8_10 vr5_8_10 vr6_8_10 vr7_8_10 vr8_8_10 vr9_8_10 vr10_8_10 vr11_8_10 vr12_8_10 vr13_8_10 vr14_8_10 vr15_8_10 vr16_8_10 vr17_8_10 vr18_8_10 vr19_8_10 vr20_8_10 vr21_8_10 vr22_8_10 vr23_8_10 vr24_8_10 vr25_8_10 vr26_8_10 vr27_8_10 vr28_8_10 vr29_8_10 vr30_8_10 vr31_8_10 ]; attach variables vrA_8_11 [vr0_8_11 vr1_8_11 vr2_8_11 vr3_8_11 vr4_8_11 vr5_8_11 vr6_8_11 vr7_8_11 vr8_8_11 vr9_8_11 vr10_8_11 vr11_8_11 vr12_8_11 vr13_8_11 vr14_8_11 vr15_8_11 vr16_8_11 vr17_8_11 vr18_8_11 vr19_8_11 vr20_8_11 vr21_8_11 vr22_8_11 vr23_8_11 vr24_8_11 vr25_8_11 vr26_8_11 vr27_8_11 vr28_8_11 vr29_8_11 vr30_8_11 vr31_8_11 ]; attach variables vrA_8_12 [vr0_8_12 vr1_8_12 vr2_8_12 vr3_8_12 vr4_8_12 vr5_8_12 vr6_8_12 vr7_8_12 vr8_8_12 vr9_8_12 vr10_8_12 vr11_8_12 vr12_8_12 vr13_8_12 vr14_8_12 vr15_8_12 vr16_8_12 vr17_8_12 vr18_8_12 vr19_8_12 vr20_8_12 vr21_8_12 vr22_8_12 vr23_8_12 vr24_8_12 vr25_8_12 vr26_8_12 vr27_8_12 vr28_8_12 vr29_8_12 vr30_8_12 vr31_8_12 ]; attach variables vrA_8_13 [vr0_8_13 vr1_8_13 vr2_8_13 vr3_8_13 vr4_8_13 vr5_8_13 vr6_8_13 vr7_8_13 vr8_8_13 vr9_8_13 vr10_8_13 vr11_8_13 vr12_8_13 vr13_8_13 vr14_8_13 vr15_8_13 vr16_8_13 vr17_8_13 vr18_8_13 vr19_8_13 vr20_8_13 vr21_8_13 vr22_8_13 vr23_8_13 vr24_8_13 vr25_8_13 vr26_8_13 vr27_8_13 vr28_8_13 vr29_8_13 vr30_8_13 vr31_8_13 ]; attach variables vrA_8_14 [vr0_8_14 vr1_8_14 vr2_8_14 vr3_8_14 vr4_8_14 vr5_8_14 vr6_8_14 vr7_8_14 vr8_8_14 vr9_8_14 vr10_8_14 vr11_8_14 vr12_8_14 vr13_8_14 vr14_8_14 vr15_8_14 vr16_8_14 vr17_8_14 vr18_8_14 vr19_8_14 vr20_8_14 vr21_8_14 vr22_8_14 vr23_8_14 vr24_8_14 vr25_8_14 vr26_8_14 vr27_8_14 vr28_8_14 vr29_8_14 vr30_8_14 vr31_8_14 ]; attach variables vrA_8_15 [vr0_8_15 vr1_8_15 vr2_8_15 vr3_8_15 vr4_8_15 vr5_8_15 vr6_8_15 vr7_8_15 vr8_8_15 vr9_8_15 vr10_8_15 vr11_8_15 vr12_8_15 vr13_8_15 vr14_8_15 vr15_8_15 vr16_8_15 vr17_8_15 vr18_8_15 vr19_8_15 vr20_8_15 vr21_8_15 vr22_8_15 vr23_8_15 vr24_8_15 vr25_8_15 vr26_8_15 vr27_8_15 vr28_8_15 vr29_8_15 vr30_8_15 vr31_8_15 ]; # AltVect Vector vrB sub-piece selectors # AltVect Vector vrB sub-piece selectors for size 64 attach variables vrB_64_0 [vr0_64_0 vr1_64_0 vr2_64_0 vr3_64_0 vr4_64_0 vr5_64_0 vr6_64_0 vr7_64_0 vr8_64_0 vr9_64_0 vr10_64_0 vr11_64_0 vr12_64_0 vr13_64_0 vr14_64_0 vr15_64_0 vr16_64_0 vr17_64_0 vr18_64_0 vr19_64_0 vr20_64_0 vr21_64_0 vr22_64_0 vr23_64_0 vr24_64_0 vr25_64_0 vr26_64_0 vr27_64_0 vr28_64_0 vr29_64_0 vr30_64_0 vr31_64_0 ]; attach variables vrB_64_1 [vr0_64_1 vr1_64_1 vr2_64_1 vr3_64_1 vr4_64_1 vr5_64_1 vr6_64_1 vr7_64_1 vr8_64_1 vr9_64_1 vr10_64_1 vr11_64_1 vr12_64_1 vr13_64_1 vr14_64_1 vr15_64_1 vr16_64_1 vr17_64_1 vr18_64_1 vr19_64_1 vr20_64_1 vr21_64_1 vr22_64_1 vr23_64_1 vr24_64_1 vr25_64_1 vr26_64_1 vr27_64_1 vr28_64_1 vr29_64_1 vr30_64_1 vr31_64_1 ]; # AltVect Vector vrB sub-piece selectors for size 32 attach variables vrB_32_0 [vr0_32_0 vr1_32_0 vr2_32_0 vr3_32_0 vr4_32_0 vr5_32_0 vr6_32_0 vr7_32_0 vr8_32_0 vr9_32_0 vr10_32_0 vr11_32_0 vr12_32_0 vr13_32_0 vr14_32_0 vr15_32_0 vr16_32_0 vr17_32_0 vr18_32_0 vr19_32_0 vr20_32_0 vr21_32_0 vr22_32_0 vr23_32_0 vr24_32_0 vr25_32_0 vr26_32_0 vr27_32_0 vr28_32_0 vr29_32_0 vr30_32_0 vr31_32_0 ]; attach variables vrB_32_1 [vr0_32_1 vr1_32_1 vr2_32_1 vr3_32_1 vr4_32_1 vr5_32_1 vr6_32_1 vr7_32_1 vr8_32_1 vr9_32_1 vr10_32_1 vr11_32_1 vr12_32_1 vr13_32_1 vr14_32_1 vr15_32_1 vr16_32_1 vr17_32_1 vr18_32_1 vr19_32_1 vr20_32_1 vr21_32_1 vr22_32_1 vr23_32_1 vr24_32_1 vr25_32_1 vr26_32_1 vr27_32_1 vr28_32_1 vr29_32_1 vr30_32_1 vr31_32_1 ]; attach variables vrB_32_2 [vr0_32_2 vr1_32_2 vr2_32_2 vr3_32_2 vr4_32_2 vr5_32_2 vr6_32_2 vr7_32_2 vr8_32_2 vr9_32_2 vr10_32_2 vr11_32_2 vr12_32_2 vr13_32_2 vr14_32_2 vr15_32_2 vr16_32_2 vr17_32_2 vr18_32_2 vr19_32_2 vr20_32_2 vr21_32_2 vr22_32_2 vr23_32_2 vr24_32_2 vr25_32_2 vr26_32_2 vr27_32_2 vr28_32_2 vr29_32_2 vr30_32_2 vr31_32_2 ]; attach variables vrB_32_3 [vr0_32_3 vr1_32_3 vr2_32_3 vr3_32_3 vr4_32_3 vr5_32_3 vr6_32_3 vr7_32_3 vr8_32_3 vr9_32_3 vr10_32_3 vr11_32_3 vr12_32_3 vr13_32_3 vr14_32_3 vr15_32_3 vr16_32_3 vr17_32_3 vr18_32_3 vr19_32_3 vr20_32_3 vr21_32_3 vr22_32_3 vr23_32_3 vr24_32_3 vr25_32_3 vr26_32_3 vr27_32_3 vr28_32_3 vr29_32_3 vr30_32_3 vr31_32_3 ]; # AltVect Vector vrB sub-piece selectors for size 16 attach variables vrB_16_0 [vr0_16_0 vr1_16_0 vr2_16_0 vr3_16_0 vr4_16_0 vr5_16_0 vr6_16_0 vr7_16_0 vr8_16_0 vr9_16_0 vr10_16_0 vr11_16_0 vr12_16_0 vr13_16_0 vr14_16_0 vr15_16_0 vr16_16_0 vr17_16_0 vr18_16_0 vr19_16_0 vr20_16_0 vr21_16_0 vr22_16_0 vr23_16_0 vr24_16_0 vr25_16_0 vr26_16_0 vr27_16_0 vr28_16_0 vr29_16_0 vr30_16_0 vr31_16_0 ]; attach variables vrB_16_1 [vr0_16_1 vr1_16_1 vr2_16_1 vr3_16_1 vr4_16_1 vr5_16_1 vr6_16_1 vr7_16_1 vr8_16_1 vr9_16_1 vr10_16_1 vr11_16_1 vr12_16_1 vr13_16_1 vr14_16_1 vr15_16_1 vr16_16_1 vr17_16_1 vr18_16_1 vr19_16_1 vr20_16_1 vr21_16_1 vr22_16_1 vr23_16_1 vr24_16_1 vr25_16_1 vr26_16_1 vr27_16_1 vr28_16_1 vr29_16_1 vr30_16_1 vr31_16_1 ]; attach variables vrB_16_2 [vr0_16_2 vr1_16_2 vr2_16_2 vr3_16_2 vr4_16_2 vr5_16_2 vr6_16_2 vr7_16_2 vr8_16_2 vr9_16_2 vr10_16_2 vr11_16_2 vr12_16_2 vr13_16_2 vr14_16_2 vr15_16_2 vr16_16_2 vr17_16_2 vr18_16_2 vr19_16_2 vr20_16_2 vr21_16_2 vr22_16_2 vr23_16_2 vr24_16_2 vr25_16_2 vr26_16_2 vr27_16_2 vr28_16_2 vr29_16_2 vr30_16_2 vr31_16_2 ]; attach variables vrB_16_3 [vr0_16_3 vr1_16_3 vr2_16_3 vr3_16_3 vr4_16_3 vr5_16_3 vr6_16_3 vr7_16_3 vr8_16_3 vr9_16_3 vr10_16_3 vr11_16_3 vr12_16_3 vr13_16_3 vr14_16_3 vr15_16_3 vr16_16_3 vr17_16_3 vr18_16_3 vr19_16_3 vr20_16_3 vr21_16_3 vr22_16_3 vr23_16_3 vr24_16_3 vr25_16_3 vr26_16_3 vr27_16_3 vr28_16_3 vr29_16_3 vr30_16_3 vr31_16_3 ]; attach variables vrB_16_4 [vr0_16_4 vr1_16_4 vr2_16_4 vr3_16_4 vr4_16_4 vr5_16_4 vr6_16_4 vr7_16_4 vr8_16_4 vr9_16_4 vr10_16_4 vr11_16_4 vr12_16_4 vr13_16_4 vr14_16_4 vr15_16_4 vr16_16_4 vr17_16_4 vr18_16_4 vr19_16_4 vr20_16_4 vr21_16_4 vr22_16_4 vr23_16_4 vr24_16_4 vr25_16_4 vr26_16_4 vr27_16_4 vr28_16_4 vr29_16_4 vr30_16_4 vr31_16_4 ]; attach variables vrB_16_5 [vr0_16_5 vr1_16_5 vr2_16_5 vr3_16_5 vr4_16_5 vr5_16_5 vr6_16_5 vr7_16_5 vr8_16_5 vr9_16_5 vr10_16_5 vr11_16_5 vr12_16_5 vr13_16_5 vr14_16_5 vr15_16_5 vr16_16_5 vr17_16_5 vr18_16_5 vr19_16_5 vr20_16_5 vr21_16_5 vr22_16_5 vr23_16_5 vr24_16_5 vr25_16_5 vr26_16_5 vr27_16_5 vr28_16_5 vr29_16_5 vr30_16_5 vr31_16_5 ]; attach variables vrB_16_6 [vr0_16_6 vr1_16_6 vr2_16_6 vr3_16_6 vr4_16_6 vr5_16_6 vr6_16_6 vr7_16_6 vr8_16_6 vr9_16_6 vr10_16_6 vr11_16_6 vr12_16_6 vr13_16_6 vr14_16_6 vr15_16_6 vr16_16_6 vr17_16_6 vr18_16_6 vr19_16_6 vr20_16_6 vr21_16_6 vr22_16_6 vr23_16_6 vr24_16_6 vr25_16_6 vr26_16_6 vr27_16_6 vr28_16_6 vr29_16_6 vr30_16_6 vr31_16_6 ]; attach variables vrB_16_7 [vr0_16_7 vr1_16_7 vr2_16_7 vr3_16_7 vr4_16_7 vr5_16_7 vr6_16_7 vr7_16_7 vr8_16_7 vr9_16_7 vr10_16_7 vr11_16_7 vr12_16_7 vr13_16_7 vr14_16_7 vr15_16_7 vr16_16_7 vr17_16_7 vr18_16_7 vr19_16_7 vr20_16_7 vr21_16_7 vr22_16_7 vr23_16_7 vr24_16_7 vr25_16_7 vr26_16_7 vr27_16_7 vr28_16_7 vr29_16_7 vr30_16_7 vr31_16_7 ]; # AltVect Vector vrB sub-piece selectors for size 8 attach variables vrB_8_0 [vr0_8_0 vr1_8_0 vr2_8_0 vr3_8_0 vr4_8_0 vr5_8_0 vr6_8_0 vr7_8_0 vr8_8_0 vr9_8_0 vr10_8_0 vr11_8_0 vr12_8_0 vr13_8_0 vr14_8_0 vr15_8_0 vr16_8_0 vr17_8_0 vr18_8_0 vr19_8_0 vr20_8_0 vr21_8_0 vr22_8_0 vr23_8_0 vr24_8_0 vr25_8_0 vr26_8_0 vr27_8_0 vr28_8_0 vr29_8_0 vr30_8_0 vr31_8_0 ]; attach variables vrB_8_1 [vr0_8_1 vr1_8_1 vr2_8_1 vr3_8_1 vr4_8_1 vr5_8_1 vr6_8_1 vr7_8_1 vr8_8_1 vr9_8_1 vr10_8_1 vr11_8_1 vr12_8_1 vr13_8_1 vr14_8_1 vr15_8_1 vr16_8_1 vr17_8_1 vr18_8_1 vr19_8_1 vr20_8_1 vr21_8_1 vr22_8_1 vr23_8_1 vr24_8_1 vr25_8_1 vr26_8_1 vr27_8_1 vr28_8_1 vr29_8_1 vr30_8_1 vr31_8_1 ]; attach variables vrB_8_2 [vr0_8_2 vr1_8_2 vr2_8_2 vr3_8_2 vr4_8_2 vr5_8_2 vr6_8_2 vr7_8_2 vr8_8_2 vr9_8_2 vr10_8_2 vr11_8_2 vr12_8_2 vr13_8_2 vr14_8_2 vr15_8_2 vr16_8_2 vr17_8_2 vr18_8_2 vr19_8_2 vr20_8_2 vr21_8_2 vr22_8_2 vr23_8_2 vr24_8_2 vr25_8_2 vr26_8_2 vr27_8_2 vr28_8_2 vr29_8_2 vr30_8_2 vr31_8_2 ]; attach variables vrB_8_3 [vr0_8_3 vr1_8_3 vr2_8_3 vr3_8_3 vr4_8_3 vr5_8_3 vr6_8_3 vr7_8_3 vr8_8_3 vr9_8_3 vr10_8_3 vr11_8_3 vr12_8_3 vr13_8_3 vr14_8_3 vr15_8_3 vr16_8_3 vr17_8_3 vr18_8_3 vr19_8_3 vr20_8_3 vr21_8_3 vr22_8_3 vr23_8_3 vr24_8_3 vr25_8_3 vr26_8_3 vr27_8_3 vr28_8_3 vr29_8_3 vr30_8_3 vr31_8_3 ]; attach variables vrB_8_4 [vr0_8_4 vr1_8_4 vr2_8_4 vr3_8_4 vr4_8_4 vr5_8_4 vr6_8_4 vr7_8_4 vr8_8_4 vr9_8_4 vr10_8_4 vr11_8_4 vr12_8_4 vr13_8_4 vr14_8_4 vr15_8_4 vr16_8_4 vr17_8_4 vr18_8_4 vr19_8_4 vr20_8_4 vr21_8_4 vr22_8_4 vr23_8_4 vr24_8_4 vr25_8_4 vr26_8_4 vr27_8_4 vr28_8_4 vr29_8_4 vr30_8_4 vr31_8_4 ]; attach variables vrB_8_5 [vr0_8_5 vr1_8_5 vr2_8_5 vr3_8_5 vr4_8_5 vr5_8_5 vr6_8_5 vr7_8_5 vr8_8_5 vr9_8_5 vr10_8_5 vr11_8_5 vr12_8_5 vr13_8_5 vr14_8_5 vr15_8_5 vr16_8_5 vr17_8_5 vr18_8_5 vr19_8_5 vr20_8_5 vr21_8_5 vr22_8_5 vr23_8_5 vr24_8_5 vr25_8_5 vr26_8_5 vr27_8_5 vr28_8_5 vr29_8_5 vr30_8_5 vr31_8_5 ]; attach variables vrB_8_6 [vr0_8_6 vr1_8_6 vr2_8_6 vr3_8_6 vr4_8_6 vr5_8_6 vr6_8_6 vr7_8_6 vr8_8_6 vr9_8_6 vr10_8_6 vr11_8_6 vr12_8_6 vr13_8_6 vr14_8_6 vr15_8_6 vr16_8_6 vr17_8_6 vr18_8_6 vr19_8_6 vr20_8_6 vr21_8_6 vr22_8_6 vr23_8_6 vr24_8_6 vr25_8_6 vr26_8_6 vr27_8_6 vr28_8_6 vr29_8_6 vr30_8_6 vr31_8_6 ]; attach variables vrB_8_7 [vr0_8_7 vr1_8_7 vr2_8_7 vr3_8_7 vr4_8_7 vr5_8_7 vr6_8_7 vr7_8_7 vr8_8_7 vr9_8_7 vr10_8_7 vr11_8_7 vr12_8_7 vr13_8_7 vr14_8_7 vr15_8_7 vr16_8_7 vr17_8_7 vr18_8_7 vr19_8_7 vr20_8_7 vr21_8_7 vr22_8_7 vr23_8_7 vr24_8_7 vr25_8_7 vr26_8_7 vr27_8_7 vr28_8_7 vr29_8_7 vr30_8_7 vr31_8_7 ]; attach variables vrB_8_8 [vr0_8_8 vr1_8_8 vr2_8_8 vr3_8_8 vr4_8_8 vr5_8_8 vr6_8_8 vr7_8_8 vr8_8_8 vr9_8_8 vr10_8_8 vr11_8_8 vr12_8_8 vr13_8_8 vr14_8_8 vr15_8_8 vr16_8_8 vr17_8_8 vr18_8_8 vr19_8_8 vr20_8_8 vr21_8_8 vr22_8_8 vr23_8_8 vr24_8_8 vr25_8_8 vr26_8_8 vr27_8_8 vr28_8_8 vr29_8_8 vr30_8_8 vr31_8_8 ]; attach variables vrB_8_9 [vr0_8_9 vr1_8_9 vr2_8_9 vr3_8_9 vr4_8_9 vr5_8_9 vr6_8_9 vr7_8_9 vr8_8_9 vr9_8_9 vr10_8_9 vr11_8_9 vr12_8_9 vr13_8_9 vr14_8_9 vr15_8_9 vr16_8_9 vr17_8_9 vr18_8_9 vr19_8_9 vr20_8_9 vr21_8_9 vr22_8_9 vr23_8_9 vr24_8_9 vr25_8_9 vr26_8_9 vr27_8_9 vr28_8_9 vr29_8_9 vr30_8_9 vr31_8_9 ]; attach variables vrB_8_10 [vr0_8_10 vr1_8_10 vr2_8_10 vr3_8_10 vr4_8_10 vr5_8_10 vr6_8_10 vr7_8_10 vr8_8_10 vr9_8_10 vr10_8_10 vr11_8_10 vr12_8_10 vr13_8_10 vr14_8_10 vr15_8_10 vr16_8_10 vr17_8_10 vr18_8_10 vr19_8_10 vr20_8_10 vr21_8_10 vr22_8_10 vr23_8_10 vr24_8_10 vr25_8_10 vr26_8_10 vr27_8_10 vr28_8_10 vr29_8_10 vr30_8_10 vr31_8_10 ]; attach variables vrB_8_11 [vr0_8_11 vr1_8_11 vr2_8_11 vr3_8_11 vr4_8_11 vr5_8_11 vr6_8_11 vr7_8_11 vr8_8_11 vr9_8_11 vr10_8_11 vr11_8_11 vr12_8_11 vr13_8_11 vr14_8_11 vr15_8_11 vr16_8_11 vr17_8_11 vr18_8_11 vr19_8_11 vr20_8_11 vr21_8_11 vr22_8_11 vr23_8_11 vr24_8_11 vr25_8_11 vr26_8_11 vr27_8_11 vr28_8_11 vr29_8_11 vr30_8_11 vr31_8_11 ]; attach variables vrB_8_12 [vr0_8_12 vr1_8_12 vr2_8_12 vr3_8_12 vr4_8_12 vr5_8_12 vr6_8_12 vr7_8_12 vr8_8_12 vr9_8_12 vr10_8_12 vr11_8_12 vr12_8_12 vr13_8_12 vr14_8_12 vr15_8_12 vr16_8_12 vr17_8_12 vr18_8_12 vr19_8_12 vr20_8_12 vr21_8_12 vr22_8_12 vr23_8_12 vr24_8_12 vr25_8_12 vr26_8_12 vr27_8_12 vr28_8_12 vr29_8_12 vr30_8_12 vr31_8_12 ]; attach variables vrB_8_13 [vr0_8_13 vr1_8_13 vr2_8_13 vr3_8_13 vr4_8_13 vr5_8_13 vr6_8_13 vr7_8_13 vr8_8_13 vr9_8_13 vr10_8_13 vr11_8_13 vr12_8_13 vr13_8_13 vr14_8_13 vr15_8_13 vr16_8_13 vr17_8_13 vr18_8_13 vr19_8_13 vr20_8_13 vr21_8_13 vr22_8_13 vr23_8_13 vr24_8_13 vr25_8_13 vr26_8_13 vr27_8_13 vr28_8_13 vr29_8_13 vr30_8_13 vr31_8_13 ]; attach variables vrB_8_14 [vr0_8_14 vr1_8_14 vr2_8_14 vr3_8_14 vr4_8_14 vr5_8_14 vr6_8_14 vr7_8_14 vr8_8_14 vr9_8_14 vr10_8_14 vr11_8_14 vr12_8_14 vr13_8_14 vr14_8_14 vr15_8_14 vr16_8_14 vr17_8_14 vr18_8_14 vr19_8_14 vr20_8_14 vr21_8_14 vr22_8_14 vr23_8_14 vr24_8_14 vr25_8_14 vr26_8_14 vr27_8_14 vr28_8_14 vr29_8_14 vr30_8_14 vr31_8_14 ]; attach variables vrB_8_15 [vr0_8_15 vr1_8_15 vr2_8_15 vr3_8_15 vr4_8_15 vr5_8_15 vr6_8_15 vr7_8_15 vr8_8_15 vr9_8_15 vr10_8_15 vr11_8_15 vr12_8_15 vr13_8_15 vr14_8_15 vr15_8_15 vr16_8_15 vr17_8_15 vr18_8_15 vr19_8_15 vr20_8_15 vr21_8_15 vr22_8_15 vr23_8_15 vr24_8_15 vr25_8_15 vr26_8_15 vr27_8_15 vr28_8_15 vr29_8_15 vr30_8_15 vr31_8_15 ]; # AltVect Vector vrS sub-piece selectors # AltVect Vector vrS sub-piece selectors for size 64 attach variables vrS_64_0 [vr0_64_0 vr1_64_0 vr2_64_0 vr3_64_0 vr4_64_0 vr5_64_0 vr6_64_0 vr7_64_0 vr8_64_0 vr9_64_0 vr10_64_0 vr11_64_0 vr12_64_0 vr13_64_0 vr14_64_0 vr15_64_0 vr16_64_0 vr17_64_0 vr18_64_0 vr19_64_0 vr20_64_0 vr21_64_0 vr22_64_0 vr23_64_0 vr24_64_0 vr25_64_0 vr26_64_0 vr27_64_0 vr28_64_0 vr29_64_0 vr30_64_0 vr31_64_0 ]; attach variables vrS_64_1 [vr0_64_1 vr1_64_1 vr2_64_1 vr3_64_1 vr4_64_1 vr5_64_1 vr6_64_1 vr7_64_1 vr8_64_1 vr9_64_1 vr10_64_1 vr11_64_1 vr12_64_1 vr13_64_1 vr14_64_1 vr15_64_1 vr16_64_1 vr17_64_1 vr18_64_1 vr19_64_1 vr20_64_1 vr21_64_1 vr22_64_1 vr23_64_1 vr24_64_1 vr25_64_1 vr26_64_1 vr27_64_1 vr28_64_1 vr29_64_1 vr30_64_1 vr31_64_1 ]; # AltVect Vector vrS sub-piece selectors for size 32 attach variables vrS_32_0 [vr0_32_0 vr1_32_0 vr2_32_0 vr3_32_0 vr4_32_0 vr5_32_0 vr6_32_0 vr7_32_0 vr8_32_0 vr9_32_0 vr10_32_0 vr11_32_0 vr12_32_0 vr13_32_0 vr14_32_0 vr15_32_0 vr16_32_0 vr17_32_0 vr18_32_0 vr19_32_0 vr20_32_0 vr21_32_0 vr22_32_0 vr23_32_0 vr24_32_0 vr25_32_0 vr26_32_0 vr27_32_0 vr28_32_0 vr29_32_0 vr30_32_0 vr31_32_0 ]; attach variables vrS_32_1 [vr0_32_1 vr1_32_1 vr2_32_1 vr3_32_1 vr4_32_1 vr5_32_1 vr6_32_1 vr7_32_1 vr8_32_1 vr9_32_1 vr10_32_1 vr11_32_1 vr12_32_1 vr13_32_1 vr14_32_1 vr15_32_1 vr16_32_1 vr17_32_1 vr18_32_1 vr19_32_1 vr20_32_1 vr21_32_1 vr22_32_1 vr23_32_1 vr24_32_1 vr25_32_1 vr26_32_1 vr27_32_1 vr28_32_1 vr29_32_1 vr30_32_1 vr31_32_1 ]; attach variables vrS_32_2 [vr0_32_2 vr1_32_2 vr2_32_2 vr3_32_2 vr4_32_2 vr5_32_2 vr6_32_2 vr7_32_2 vr8_32_2 vr9_32_2 vr10_32_2 vr11_32_2 vr12_32_2 vr13_32_2 vr14_32_2 vr15_32_2 vr16_32_2 vr17_32_2 vr18_32_2 vr19_32_2 vr20_32_2 vr21_32_2 vr22_32_2 vr23_32_2 vr24_32_2 vr25_32_2 vr26_32_2 vr27_32_2 vr28_32_2 vr29_32_2 vr30_32_2 vr31_32_2 ]; attach variables vrS_32_3 [vr0_32_3 vr1_32_3 vr2_32_3 vr3_32_3 vr4_32_3 vr5_32_3 vr6_32_3 vr7_32_3 vr8_32_3 vr9_32_3 vr10_32_3 vr11_32_3 vr12_32_3 vr13_32_3 vr14_32_3 vr15_32_3 vr16_32_3 vr17_32_3 vr18_32_3 vr19_32_3 vr20_32_3 vr21_32_3 vr22_32_3 vr23_32_3 vr24_32_3 vr25_32_3 vr26_32_3 vr27_32_3 vr28_32_3 vr29_32_3 vr30_32_3 vr31_32_3 ]; # AltVect Vector vrS sub-piece selectors for size 16 attach variables vrS_16_0 [vr0_16_0 vr1_16_0 vr2_16_0 vr3_16_0 vr4_16_0 vr5_16_0 vr6_16_0 vr7_16_0 vr8_16_0 vr9_16_0 vr10_16_0 vr11_16_0 vr12_16_0 vr13_16_0 vr14_16_0 vr15_16_0 vr16_16_0 vr17_16_0 vr18_16_0 vr19_16_0 vr20_16_0 vr21_16_0 vr22_16_0 vr23_16_0 vr24_16_0 vr25_16_0 vr26_16_0 vr27_16_0 vr28_16_0 vr29_16_0 vr30_16_0 vr31_16_0 ]; attach variables vrS_16_1 [vr0_16_1 vr1_16_1 vr2_16_1 vr3_16_1 vr4_16_1 vr5_16_1 vr6_16_1 vr7_16_1 vr8_16_1 vr9_16_1 vr10_16_1 vr11_16_1 vr12_16_1 vr13_16_1 vr14_16_1 vr15_16_1 vr16_16_1 vr17_16_1 vr18_16_1 vr19_16_1 vr20_16_1 vr21_16_1 vr22_16_1 vr23_16_1 vr24_16_1 vr25_16_1 vr26_16_1 vr27_16_1 vr28_16_1 vr29_16_1 vr30_16_1 vr31_16_1 ]; attach variables vrS_16_2 [vr0_16_2 vr1_16_2 vr2_16_2 vr3_16_2 vr4_16_2 vr5_16_2 vr6_16_2 vr7_16_2 vr8_16_2 vr9_16_2 vr10_16_2 vr11_16_2 vr12_16_2 vr13_16_2 vr14_16_2 vr15_16_2 vr16_16_2 vr17_16_2 vr18_16_2 vr19_16_2 vr20_16_2 vr21_16_2 vr22_16_2 vr23_16_2 vr24_16_2 vr25_16_2 vr26_16_2 vr27_16_2 vr28_16_2 vr29_16_2 vr30_16_2 vr31_16_2 ]; attach variables vrS_16_3 [vr0_16_3 vr1_16_3 vr2_16_3 vr3_16_3 vr4_16_3 vr5_16_3 vr6_16_3 vr7_16_3 vr8_16_3 vr9_16_3 vr10_16_3 vr11_16_3 vr12_16_3 vr13_16_3 vr14_16_3 vr15_16_3 vr16_16_3 vr17_16_3 vr18_16_3 vr19_16_3 vr20_16_3 vr21_16_3 vr22_16_3 vr23_16_3 vr24_16_3 vr25_16_3 vr26_16_3 vr27_16_3 vr28_16_3 vr29_16_3 vr30_16_3 vr31_16_3 ]; attach variables vrS_16_4 [vr0_16_4 vr1_16_4 vr2_16_4 vr3_16_4 vr4_16_4 vr5_16_4 vr6_16_4 vr7_16_4 vr8_16_4 vr9_16_4 vr10_16_4 vr11_16_4 vr12_16_4 vr13_16_4 vr14_16_4 vr15_16_4 vr16_16_4 vr17_16_4 vr18_16_4 vr19_16_4 vr20_16_4 vr21_16_4 vr22_16_4 vr23_16_4 vr24_16_4 vr25_16_4 vr26_16_4 vr27_16_4 vr28_16_4 vr29_16_4 vr30_16_4 vr31_16_4 ]; attach variables vrS_16_5 [vr0_16_5 vr1_16_5 vr2_16_5 vr3_16_5 vr4_16_5 vr5_16_5 vr6_16_5 vr7_16_5 vr8_16_5 vr9_16_5 vr10_16_5 vr11_16_5 vr12_16_5 vr13_16_5 vr14_16_5 vr15_16_5 vr16_16_5 vr17_16_5 vr18_16_5 vr19_16_5 vr20_16_5 vr21_16_5 vr22_16_5 vr23_16_5 vr24_16_5 vr25_16_5 vr26_16_5 vr27_16_5 vr28_16_5 vr29_16_5 vr30_16_5 vr31_16_5 ]; attach variables vrS_16_6 [vr0_16_6 vr1_16_6 vr2_16_6 vr3_16_6 vr4_16_6 vr5_16_6 vr6_16_6 vr7_16_6 vr8_16_6 vr9_16_6 vr10_16_6 vr11_16_6 vr12_16_6 vr13_16_6 vr14_16_6 vr15_16_6 vr16_16_6 vr17_16_6 vr18_16_6 vr19_16_6 vr20_16_6 vr21_16_6 vr22_16_6 vr23_16_6 vr24_16_6 vr25_16_6 vr26_16_6 vr27_16_6 vr28_16_6 vr29_16_6 vr30_16_6 vr31_16_6 ]; attach variables vrS_16_7 [vr0_16_7 vr1_16_7 vr2_16_7 vr3_16_7 vr4_16_7 vr5_16_7 vr6_16_7 vr7_16_7 vr8_16_7 vr9_16_7 vr10_16_7 vr11_16_7 vr12_16_7 vr13_16_7 vr14_16_7 vr15_16_7 vr16_16_7 vr17_16_7 vr18_16_7 vr19_16_7 vr20_16_7 vr21_16_7 vr22_16_7 vr23_16_7 vr24_16_7 vr25_16_7 vr26_16_7 vr27_16_7 vr28_16_7 vr29_16_7 vr30_16_7 vr31_16_7 ]; # AltVect Vector vrS sub-piece selectors for size 8 attach variables vrS_8_0 [vr0_8_0 vr1_8_0 vr2_8_0 vr3_8_0 vr4_8_0 vr5_8_0 vr6_8_0 vr7_8_0 vr8_8_0 vr9_8_0 vr10_8_0 vr11_8_0 vr12_8_0 vr13_8_0 vr14_8_0 vr15_8_0 vr16_8_0 vr17_8_0 vr18_8_0 vr19_8_0 vr20_8_0 vr21_8_0 vr22_8_0 vr23_8_0 vr24_8_0 vr25_8_0 vr26_8_0 vr27_8_0 vr28_8_0 vr29_8_0 vr30_8_0 vr31_8_0 ]; attach variables vrS_8_1 [vr0_8_1 vr1_8_1 vr2_8_1 vr3_8_1 vr4_8_1 vr5_8_1 vr6_8_1 vr7_8_1 vr8_8_1 vr9_8_1 vr10_8_1 vr11_8_1 vr12_8_1 vr13_8_1 vr14_8_1 vr15_8_1 vr16_8_1 vr17_8_1 vr18_8_1 vr19_8_1 vr20_8_1 vr21_8_1 vr22_8_1 vr23_8_1 vr24_8_1 vr25_8_1 vr26_8_1 vr27_8_1 vr28_8_1 vr29_8_1 vr30_8_1 vr31_8_1 ]; attach variables vrS_8_2 [vr0_8_2 vr1_8_2 vr2_8_2 vr3_8_2 vr4_8_2 vr5_8_2 vr6_8_2 vr7_8_2 vr8_8_2 vr9_8_2 vr10_8_2 vr11_8_2 vr12_8_2 vr13_8_2 vr14_8_2 vr15_8_2 vr16_8_2 vr17_8_2 vr18_8_2 vr19_8_2 vr20_8_2 vr21_8_2 vr22_8_2 vr23_8_2 vr24_8_2 vr25_8_2 vr26_8_2 vr27_8_2 vr28_8_2 vr29_8_2 vr30_8_2 vr31_8_2 ]; attach variables vrS_8_3 [vr0_8_3 vr1_8_3 vr2_8_3 vr3_8_3 vr4_8_3 vr5_8_3 vr6_8_3 vr7_8_3 vr8_8_3 vr9_8_3 vr10_8_3 vr11_8_3 vr12_8_3 vr13_8_3 vr14_8_3 vr15_8_3 vr16_8_3 vr17_8_3 vr18_8_3 vr19_8_3 vr20_8_3 vr21_8_3 vr22_8_3 vr23_8_3 vr24_8_3 vr25_8_3 vr26_8_3 vr27_8_3 vr28_8_3 vr29_8_3 vr30_8_3 vr31_8_3 ]; attach variables vrS_8_4 [vr0_8_4 vr1_8_4 vr2_8_4 vr3_8_4 vr4_8_4 vr5_8_4 vr6_8_4 vr7_8_4 vr8_8_4 vr9_8_4 vr10_8_4 vr11_8_4 vr12_8_4 vr13_8_4 vr14_8_4 vr15_8_4 vr16_8_4 vr17_8_4 vr18_8_4 vr19_8_4 vr20_8_4 vr21_8_4 vr22_8_4 vr23_8_4 vr24_8_4 vr25_8_4 vr26_8_4 vr27_8_4 vr28_8_4 vr29_8_4 vr30_8_4 vr31_8_4 ]; attach variables vrS_8_5 [vr0_8_5 vr1_8_5 vr2_8_5 vr3_8_5 vr4_8_5 vr5_8_5 vr6_8_5 vr7_8_5 vr8_8_5 vr9_8_5 vr10_8_5 vr11_8_5 vr12_8_5 vr13_8_5 vr14_8_5 vr15_8_5 vr16_8_5 vr17_8_5 vr18_8_5 vr19_8_5 vr20_8_5 vr21_8_5 vr22_8_5 vr23_8_5 vr24_8_5 vr25_8_5 vr26_8_5 vr27_8_5 vr28_8_5 vr29_8_5 vr30_8_5 vr31_8_5 ]; attach variables vrS_8_6 [vr0_8_6 vr1_8_6 vr2_8_6 vr3_8_6 vr4_8_6 vr5_8_6 vr6_8_6 vr7_8_6 vr8_8_6 vr9_8_6 vr10_8_6 vr11_8_6 vr12_8_6 vr13_8_6 vr14_8_6 vr15_8_6 vr16_8_6 vr17_8_6 vr18_8_6 vr19_8_6 vr20_8_6 vr21_8_6 vr22_8_6 vr23_8_6 vr24_8_6 vr25_8_6 vr26_8_6 vr27_8_6 vr28_8_6 vr29_8_6 vr30_8_6 vr31_8_6 ]; attach variables vrS_8_7 [vr0_8_7 vr1_8_7 vr2_8_7 vr3_8_7 vr4_8_7 vr5_8_7 vr6_8_7 vr7_8_7 vr8_8_7 vr9_8_7 vr10_8_7 vr11_8_7 vr12_8_7 vr13_8_7 vr14_8_7 vr15_8_7 vr16_8_7 vr17_8_7 vr18_8_7 vr19_8_7 vr20_8_7 vr21_8_7 vr22_8_7 vr23_8_7 vr24_8_7 vr25_8_7 vr26_8_7 vr27_8_7 vr28_8_7 vr29_8_7 vr30_8_7 vr31_8_7 ]; attach variables vrS_8_8 [vr0_8_8 vr1_8_8 vr2_8_8 vr3_8_8 vr4_8_8 vr5_8_8 vr6_8_8 vr7_8_8 vr8_8_8 vr9_8_8 vr10_8_8 vr11_8_8 vr12_8_8 vr13_8_8 vr14_8_8 vr15_8_8 vr16_8_8 vr17_8_8 vr18_8_8 vr19_8_8 vr20_8_8 vr21_8_8 vr22_8_8 vr23_8_8 vr24_8_8 vr25_8_8 vr26_8_8 vr27_8_8 vr28_8_8 vr29_8_8 vr30_8_8 vr31_8_8 ]; attach variables vrS_8_9 [vr0_8_9 vr1_8_9 vr2_8_9 vr3_8_9 vr4_8_9 vr5_8_9 vr6_8_9 vr7_8_9 vr8_8_9 vr9_8_9 vr10_8_9 vr11_8_9 vr12_8_9 vr13_8_9 vr14_8_9 vr15_8_9 vr16_8_9 vr17_8_9 vr18_8_9 vr19_8_9 vr20_8_9 vr21_8_9 vr22_8_9 vr23_8_9 vr24_8_9 vr25_8_9 vr26_8_9 vr27_8_9 vr28_8_9 vr29_8_9 vr30_8_9 vr31_8_9 ]; attach variables vrS_8_10 [vr0_8_10 vr1_8_10 vr2_8_10 vr3_8_10 vr4_8_10 vr5_8_10 vr6_8_10 vr7_8_10 vr8_8_10 vr9_8_10 vr10_8_10 vr11_8_10 vr12_8_10 vr13_8_10 vr14_8_10 vr15_8_10 vr16_8_10 vr17_8_10 vr18_8_10 vr19_8_10 vr20_8_10 vr21_8_10 vr22_8_10 vr23_8_10 vr24_8_10 vr25_8_10 vr26_8_10 vr27_8_10 vr28_8_10 vr29_8_10 vr30_8_10 vr31_8_10 ]; attach variables vrS_8_11 [vr0_8_11 vr1_8_11 vr2_8_11 vr3_8_11 vr4_8_11 vr5_8_11 vr6_8_11 vr7_8_11 vr8_8_11 vr9_8_11 vr10_8_11 vr11_8_11 vr12_8_11 vr13_8_11 vr14_8_11 vr15_8_11 vr16_8_11 vr17_8_11 vr18_8_11 vr19_8_11 vr20_8_11 vr21_8_11 vr22_8_11 vr23_8_11 vr24_8_11 vr25_8_11 vr26_8_11 vr27_8_11 vr28_8_11 vr29_8_11 vr30_8_11 vr31_8_11 ]; attach variables vrS_8_12 [vr0_8_12 vr1_8_12 vr2_8_12 vr3_8_12 vr4_8_12 vr5_8_12 vr6_8_12 vr7_8_12 vr8_8_12 vr9_8_12 vr10_8_12 vr11_8_12 vr12_8_12 vr13_8_12 vr14_8_12 vr15_8_12 vr16_8_12 vr17_8_12 vr18_8_12 vr19_8_12 vr20_8_12 vr21_8_12 vr22_8_12 vr23_8_12 vr24_8_12 vr25_8_12 vr26_8_12 vr27_8_12 vr28_8_12 vr29_8_12 vr30_8_12 vr31_8_12 ]; attach variables vrS_8_13 [vr0_8_13 vr1_8_13 vr2_8_13 vr3_8_13 vr4_8_13 vr5_8_13 vr6_8_13 vr7_8_13 vr8_8_13 vr9_8_13 vr10_8_13 vr11_8_13 vr12_8_13 vr13_8_13 vr14_8_13 vr15_8_13 vr16_8_13 vr17_8_13 vr18_8_13 vr19_8_13 vr20_8_13 vr21_8_13 vr22_8_13 vr23_8_13 vr24_8_13 vr25_8_13 vr26_8_13 vr27_8_13 vr28_8_13 vr29_8_13 vr30_8_13 vr31_8_13 ]; attach variables vrS_8_14 [vr0_8_14 vr1_8_14 vr2_8_14 vr3_8_14 vr4_8_14 vr5_8_14 vr6_8_14 vr7_8_14 vr8_8_14 vr9_8_14 vr10_8_14 vr11_8_14 vr12_8_14 vr13_8_14 vr14_8_14 vr15_8_14 vr16_8_14 vr17_8_14 vr18_8_14 vr19_8_14 vr20_8_14 vr21_8_14 vr22_8_14 vr23_8_14 vr24_8_14 vr25_8_14 vr26_8_14 vr27_8_14 vr28_8_14 vr29_8_14 vr30_8_14 vr31_8_14 ]; attach variables vrS_8_15 [vr0_8_15 vr1_8_15 vr2_8_15 vr3_8_15 vr4_8_15 vr5_8_15 vr6_8_15 vr7_8_15 vr8_8_15 vr9_8_15 vr10_8_15 vr11_8_15 vr12_8_15 vr13_8_15 vr14_8_15 vr15_8_15 vr16_8_15 vr17_8_15 vr18_8_15 vr19_8_15 vr20_8_15 vr21_8_15 vr22_8_15 vr23_8_15 vr24_8_15 vr25_8_15 vr26_8_15 vr27_8_15 vr28_8_15 vr29_8_15 vr30_8_15 vr31_8_15 ]; # AltVect Vector vrC sub-piece selectors # AltVect Vector vrC sub-piece selectors for size 64 attach variables vrC_64_0 [vr0_64_0 vr1_64_0 vr2_64_0 vr3_64_0 vr4_64_0 vr5_64_0 vr6_64_0 vr7_64_0 vr8_64_0 vr9_64_0 vr10_64_0 vr11_64_0 vr12_64_0 vr13_64_0 vr14_64_0 vr15_64_0 vr16_64_0 vr17_64_0 vr18_64_0 vr19_64_0 vr20_64_0 vr21_64_0 vr22_64_0 vr23_64_0 vr24_64_0 vr25_64_0 vr26_64_0 vr27_64_0 vr28_64_0 vr29_64_0 vr30_64_0 vr31_64_0 ]; attach variables vrC_64_1 [vr0_64_1 vr1_64_1 vr2_64_1 vr3_64_1 vr4_64_1 vr5_64_1 vr6_64_1 vr7_64_1 vr8_64_1 vr9_64_1 vr10_64_1 vr11_64_1 vr12_64_1 vr13_64_1 vr14_64_1 vr15_64_1 vr16_64_1 vr17_64_1 vr18_64_1 vr19_64_1 vr20_64_1 vr21_64_1 vr22_64_1 vr23_64_1 vr24_64_1 vr25_64_1 vr26_64_1 vr27_64_1 vr28_64_1 vr29_64_1 vr30_64_1 vr31_64_1 ]; # AltVect Vector vrC sub-piece selectors for size 32 attach variables vrC_32_0 [vr0_32_0 vr1_32_0 vr2_32_0 vr3_32_0 vr4_32_0 vr5_32_0 vr6_32_0 vr7_32_0 vr8_32_0 vr9_32_0 vr10_32_0 vr11_32_0 vr12_32_0 vr13_32_0 vr14_32_0 vr15_32_0 vr16_32_0 vr17_32_0 vr18_32_0 vr19_32_0 vr20_32_0 vr21_32_0 vr22_32_0 vr23_32_0 vr24_32_0 vr25_32_0 vr26_32_0 vr27_32_0 vr28_32_0 vr29_32_0 vr30_32_0 vr31_32_0 ]; attach variables vrC_32_1 [vr0_32_1 vr1_32_1 vr2_32_1 vr3_32_1 vr4_32_1 vr5_32_1 vr6_32_1 vr7_32_1 vr8_32_1 vr9_32_1 vr10_32_1 vr11_32_1 vr12_32_1 vr13_32_1 vr14_32_1 vr15_32_1 vr16_32_1 vr17_32_1 vr18_32_1 vr19_32_1 vr20_32_1 vr21_32_1 vr22_32_1 vr23_32_1 vr24_32_1 vr25_32_1 vr26_32_1 vr27_32_1 vr28_32_1 vr29_32_1 vr30_32_1 vr31_32_1 ]; attach variables vrC_32_2 [vr0_32_2 vr1_32_2 vr2_32_2 vr3_32_2 vr4_32_2 vr5_32_2 vr6_32_2 vr7_32_2 vr8_32_2 vr9_32_2 vr10_32_2 vr11_32_2 vr12_32_2 vr13_32_2 vr14_32_2 vr15_32_2 vr16_32_2 vr17_32_2 vr18_32_2 vr19_32_2 vr20_32_2 vr21_32_2 vr22_32_2 vr23_32_2 vr24_32_2 vr25_32_2 vr26_32_2 vr27_32_2 vr28_32_2 vr29_32_2 vr30_32_2 vr31_32_2 ]; attach variables vrC_32_3 [vr0_32_3 vr1_32_3 vr2_32_3 vr3_32_3 vr4_32_3 vr5_32_3 vr6_32_3 vr7_32_3 vr8_32_3 vr9_32_3 vr10_32_3 vr11_32_3 vr12_32_3 vr13_32_3 vr14_32_3 vr15_32_3 vr16_32_3 vr17_32_3 vr18_32_3 vr19_32_3 vr20_32_3 vr21_32_3 vr22_32_3 vr23_32_3 vr24_32_3 vr25_32_3 vr26_32_3 vr27_32_3 vr28_32_3 vr29_32_3 vr30_32_3 vr31_32_3 ]; # AltVect Vector vrC sub-piece selectors for size 16 attach variables vrC_16_0 [vr0_16_0 vr1_16_0 vr2_16_0 vr3_16_0 vr4_16_0 vr5_16_0 vr6_16_0 vr7_16_0 vr8_16_0 vr9_16_0 vr10_16_0 vr11_16_0 vr12_16_0 vr13_16_0 vr14_16_0 vr15_16_0 vr16_16_0 vr17_16_0 vr18_16_0 vr19_16_0 vr20_16_0 vr21_16_0 vr22_16_0 vr23_16_0 vr24_16_0 vr25_16_0 vr26_16_0 vr27_16_0 vr28_16_0 vr29_16_0 vr30_16_0 vr31_16_0 ]; attach variables vrC_16_1 [vr0_16_1 vr1_16_1 vr2_16_1 vr3_16_1 vr4_16_1 vr5_16_1 vr6_16_1 vr7_16_1 vr8_16_1 vr9_16_1 vr10_16_1 vr11_16_1 vr12_16_1 vr13_16_1 vr14_16_1 vr15_16_1 vr16_16_1 vr17_16_1 vr18_16_1 vr19_16_1 vr20_16_1 vr21_16_1 vr22_16_1 vr23_16_1 vr24_16_1 vr25_16_1 vr26_16_1 vr27_16_1 vr28_16_1 vr29_16_1 vr30_16_1 vr31_16_1 ]; attach variables vrC_16_2 [vr0_16_2 vr1_16_2 vr2_16_2 vr3_16_2 vr4_16_2 vr5_16_2 vr6_16_2 vr7_16_2 vr8_16_2 vr9_16_2 vr10_16_2 vr11_16_2 vr12_16_2 vr13_16_2 vr14_16_2 vr15_16_2 vr16_16_2 vr17_16_2 vr18_16_2 vr19_16_2 vr20_16_2 vr21_16_2 vr22_16_2 vr23_16_2 vr24_16_2 vr25_16_2 vr26_16_2 vr27_16_2 vr28_16_2 vr29_16_2 vr30_16_2 vr31_16_2 ]; attach variables vrC_16_3 [vr0_16_3 vr1_16_3 vr2_16_3 vr3_16_3 vr4_16_3 vr5_16_3 vr6_16_3 vr7_16_3 vr8_16_3 vr9_16_3 vr10_16_3 vr11_16_3 vr12_16_3 vr13_16_3 vr14_16_3 vr15_16_3 vr16_16_3 vr17_16_3 vr18_16_3 vr19_16_3 vr20_16_3 vr21_16_3 vr22_16_3 vr23_16_3 vr24_16_3 vr25_16_3 vr26_16_3 vr27_16_3 vr28_16_3 vr29_16_3 vr30_16_3 vr31_16_3 ]; attach variables vrC_16_4 [vr0_16_4 vr1_16_4 vr2_16_4 vr3_16_4 vr4_16_4 vr5_16_4 vr6_16_4 vr7_16_4 vr8_16_4 vr9_16_4 vr10_16_4 vr11_16_4 vr12_16_4 vr13_16_4 vr14_16_4 vr15_16_4 vr16_16_4 vr17_16_4 vr18_16_4 vr19_16_4 vr20_16_4 vr21_16_4 vr22_16_4 vr23_16_4 vr24_16_4 vr25_16_4 vr26_16_4 vr27_16_4 vr28_16_4 vr29_16_4 vr30_16_4 vr31_16_4 ]; attach variables vrC_16_5 [vr0_16_5 vr1_16_5 vr2_16_5 vr3_16_5 vr4_16_5 vr5_16_5 vr6_16_5 vr7_16_5 vr8_16_5 vr9_16_5 vr10_16_5 vr11_16_5 vr12_16_5 vr13_16_5 vr14_16_5 vr15_16_5 vr16_16_5 vr17_16_5 vr18_16_5 vr19_16_5 vr20_16_5 vr21_16_5 vr22_16_5 vr23_16_5 vr24_16_5 vr25_16_5 vr26_16_5 vr27_16_5 vr28_16_5 vr29_16_5 vr30_16_5 vr31_16_5 ]; attach variables vrC_16_6 [vr0_16_6 vr1_16_6 vr2_16_6 vr3_16_6 vr4_16_6 vr5_16_6 vr6_16_6 vr7_16_6 vr8_16_6 vr9_16_6 vr10_16_6 vr11_16_6 vr12_16_6 vr13_16_6 vr14_16_6 vr15_16_6 vr16_16_6 vr17_16_6 vr18_16_6 vr19_16_6 vr20_16_6 vr21_16_6 vr22_16_6 vr23_16_6 vr24_16_6 vr25_16_6 vr26_16_6 vr27_16_6 vr28_16_6 vr29_16_6 vr30_16_6 vr31_16_6 ]; attach variables vrC_16_7 [vr0_16_7 vr1_16_7 vr2_16_7 vr3_16_7 vr4_16_7 vr5_16_7 vr6_16_7 vr7_16_7 vr8_16_7 vr9_16_7 vr10_16_7 vr11_16_7 vr12_16_7 vr13_16_7 vr14_16_7 vr15_16_7 vr16_16_7 vr17_16_7 vr18_16_7 vr19_16_7 vr20_16_7 vr21_16_7 vr22_16_7 vr23_16_7 vr24_16_7 vr25_16_7 vr26_16_7 vr27_16_7 vr28_16_7 vr29_16_7 vr30_16_7 vr31_16_7 ]; # AltVect Vector vrC sub-piece selectors for size 8 attach variables vrC_8_0 [vr0_8_0 vr1_8_0 vr2_8_0 vr3_8_0 vr4_8_0 vr5_8_0 vr6_8_0 vr7_8_0 vr8_8_0 vr9_8_0 vr10_8_0 vr11_8_0 vr12_8_0 vr13_8_0 vr14_8_0 vr15_8_0 vr16_8_0 vr17_8_0 vr18_8_0 vr19_8_0 vr20_8_0 vr21_8_0 vr22_8_0 vr23_8_0 vr24_8_0 vr25_8_0 vr26_8_0 vr27_8_0 vr28_8_0 vr29_8_0 vr30_8_0 vr31_8_0 ]; attach variables vrC_8_1 [vr0_8_1 vr1_8_1 vr2_8_1 vr3_8_1 vr4_8_1 vr5_8_1 vr6_8_1 vr7_8_1 vr8_8_1 vr9_8_1 vr10_8_1 vr11_8_1 vr12_8_1 vr13_8_1 vr14_8_1 vr15_8_1 vr16_8_1 vr17_8_1 vr18_8_1 vr19_8_1 vr20_8_1 vr21_8_1 vr22_8_1 vr23_8_1 vr24_8_1 vr25_8_1 vr26_8_1 vr27_8_1 vr28_8_1 vr29_8_1 vr30_8_1 vr31_8_1 ]; attach variables vrC_8_2 [vr0_8_2 vr1_8_2 vr2_8_2 vr3_8_2 vr4_8_2 vr5_8_2 vr6_8_2 vr7_8_2 vr8_8_2 vr9_8_2 vr10_8_2 vr11_8_2 vr12_8_2 vr13_8_2 vr14_8_2 vr15_8_2 vr16_8_2 vr17_8_2 vr18_8_2 vr19_8_2 vr20_8_2 vr21_8_2 vr22_8_2 vr23_8_2 vr24_8_2 vr25_8_2 vr26_8_2 vr27_8_2 vr28_8_2 vr29_8_2 vr30_8_2 vr31_8_2 ]; attach variables vrC_8_3 [vr0_8_3 vr1_8_3 vr2_8_3 vr3_8_3 vr4_8_3 vr5_8_3 vr6_8_3 vr7_8_3 vr8_8_3 vr9_8_3 vr10_8_3 vr11_8_3 vr12_8_3 vr13_8_3 vr14_8_3 vr15_8_3 vr16_8_3 vr17_8_3 vr18_8_3 vr19_8_3 vr20_8_3 vr21_8_3 vr22_8_3 vr23_8_3 vr24_8_3 vr25_8_3 vr26_8_3 vr27_8_3 vr28_8_3 vr29_8_3 vr30_8_3 vr31_8_3 ]; attach variables vrC_8_4 [vr0_8_4 vr1_8_4 vr2_8_4 vr3_8_4 vr4_8_4 vr5_8_4 vr6_8_4 vr7_8_4 vr8_8_4 vr9_8_4 vr10_8_4 vr11_8_4 vr12_8_4 vr13_8_4 vr14_8_4 vr15_8_4 vr16_8_4 vr17_8_4 vr18_8_4 vr19_8_4 vr20_8_4 vr21_8_4 vr22_8_4 vr23_8_4 vr24_8_4 vr25_8_4 vr26_8_4 vr27_8_4 vr28_8_4 vr29_8_4 vr30_8_4 vr31_8_4 ]; attach variables vrC_8_5 [vr0_8_5 vr1_8_5 vr2_8_5 vr3_8_5 vr4_8_5 vr5_8_5 vr6_8_5 vr7_8_5 vr8_8_5 vr9_8_5 vr10_8_5 vr11_8_5 vr12_8_5 vr13_8_5 vr14_8_5 vr15_8_5 vr16_8_5 vr17_8_5 vr18_8_5 vr19_8_5 vr20_8_5 vr21_8_5 vr22_8_5 vr23_8_5 vr24_8_5 vr25_8_5 vr26_8_5 vr27_8_5 vr28_8_5 vr29_8_5 vr30_8_5 vr31_8_5 ]; attach variables vrC_8_6 [vr0_8_6 vr1_8_6 vr2_8_6 vr3_8_6 vr4_8_6 vr5_8_6 vr6_8_6 vr7_8_6 vr8_8_6 vr9_8_6 vr10_8_6 vr11_8_6 vr12_8_6 vr13_8_6 vr14_8_6 vr15_8_6 vr16_8_6 vr17_8_6 vr18_8_6 vr19_8_6 vr20_8_6 vr21_8_6 vr22_8_6 vr23_8_6 vr24_8_6 vr25_8_6 vr26_8_6 vr27_8_6 vr28_8_6 vr29_8_6 vr30_8_6 vr31_8_6 ]; attach variables vrC_8_7 [vr0_8_7 vr1_8_7 vr2_8_7 vr3_8_7 vr4_8_7 vr5_8_7 vr6_8_7 vr7_8_7 vr8_8_7 vr9_8_7 vr10_8_7 vr11_8_7 vr12_8_7 vr13_8_7 vr14_8_7 vr15_8_7 vr16_8_7 vr17_8_7 vr18_8_7 vr19_8_7 vr20_8_7 vr21_8_7 vr22_8_7 vr23_8_7 vr24_8_7 vr25_8_7 vr26_8_7 vr27_8_7 vr28_8_7 vr29_8_7 vr30_8_7 vr31_8_7 ]; attach variables vrC_8_8 [vr0_8_8 vr1_8_8 vr2_8_8 vr3_8_8 vr4_8_8 vr5_8_8 vr6_8_8 vr7_8_8 vr8_8_8 vr9_8_8 vr10_8_8 vr11_8_8 vr12_8_8 vr13_8_8 vr14_8_8 vr15_8_8 vr16_8_8 vr17_8_8 vr18_8_8 vr19_8_8 vr20_8_8 vr21_8_8 vr22_8_8 vr23_8_8 vr24_8_8 vr25_8_8 vr26_8_8 vr27_8_8 vr28_8_8 vr29_8_8 vr30_8_8 vr31_8_8 ]; attach variables vrC_8_9 [vr0_8_9 vr1_8_9 vr2_8_9 vr3_8_9 vr4_8_9 vr5_8_9 vr6_8_9 vr7_8_9 vr8_8_9 vr9_8_9 vr10_8_9 vr11_8_9 vr12_8_9 vr13_8_9 vr14_8_9 vr15_8_9 vr16_8_9 vr17_8_9 vr18_8_9 vr19_8_9 vr20_8_9 vr21_8_9 vr22_8_9 vr23_8_9 vr24_8_9 vr25_8_9 vr26_8_9 vr27_8_9 vr28_8_9 vr29_8_9 vr30_8_9 vr31_8_9 ]; attach variables vrC_8_10 [vr0_8_10 vr1_8_10 vr2_8_10 vr3_8_10 vr4_8_10 vr5_8_10 vr6_8_10 vr7_8_10 vr8_8_10 vr9_8_10 vr10_8_10 vr11_8_10 vr12_8_10 vr13_8_10 vr14_8_10 vr15_8_10 vr16_8_10 vr17_8_10 vr18_8_10 vr19_8_10 vr20_8_10 vr21_8_10 vr22_8_10 vr23_8_10 vr24_8_10 vr25_8_10 vr26_8_10 vr27_8_10 vr28_8_10 vr29_8_10 vr30_8_10 vr31_8_10 ]; attach variables vrC_8_11 [vr0_8_11 vr1_8_11 vr2_8_11 vr3_8_11 vr4_8_11 vr5_8_11 vr6_8_11 vr7_8_11 vr8_8_11 vr9_8_11 vr10_8_11 vr11_8_11 vr12_8_11 vr13_8_11 vr14_8_11 vr15_8_11 vr16_8_11 vr17_8_11 vr18_8_11 vr19_8_11 vr20_8_11 vr21_8_11 vr22_8_11 vr23_8_11 vr24_8_11 vr25_8_11 vr26_8_11 vr27_8_11 vr28_8_11 vr29_8_11 vr30_8_11 vr31_8_11 ]; attach variables vrC_8_12 [vr0_8_12 vr1_8_12 vr2_8_12 vr3_8_12 vr4_8_12 vr5_8_12 vr6_8_12 vr7_8_12 vr8_8_12 vr9_8_12 vr10_8_12 vr11_8_12 vr12_8_12 vr13_8_12 vr14_8_12 vr15_8_12 vr16_8_12 vr17_8_12 vr18_8_12 vr19_8_12 vr20_8_12 vr21_8_12 vr22_8_12 vr23_8_12 vr24_8_12 vr25_8_12 vr26_8_12 vr27_8_12 vr28_8_12 vr29_8_12 vr30_8_12 vr31_8_12 ]; attach variables vrC_8_13 [vr0_8_13 vr1_8_13 vr2_8_13 vr3_8_13 vr4_8_13 vr5_8_13 vr6_8_13 vr7_8_13 vr8_8_13 vr9_8_13 vr10_8_13 vr11_8_13 vr12_8_13 vr13_8_13 vr14_8_13 vr15_8_13 vr16_8_13 vr17_8_13 vr18_8_13 vr19_8_13 vr20_8_13 vr21_8_13 vr22_8_13 vr23_8_13 vr24_8_13 vr25_8_13 vr26_8_13 vr27_8_13 vr28_8_13 vr29_8_13 vr30_8_13 vr31_8_13 ]; attach variables vrC_8_14 [vr0_8_14 vr1_8_14 vr2_8_14 vr3_8_14 vr4_8_14 vr5_8_14 vr6_8_14 vr7_8_14 vr8_8_14 vr9_8_14 vr10_8_14 vr11_8_14 vr12_8_14 vr13_8_14 vr14_8_14 vr15_8_14 vr16_8_14 vr17_8_14 vr18_8_14 vr19_8_14 vr20_8_14 vr21_8_14 vr22_8_14 vr23_8_14 vr24_8_14 vr25_8_14 vr26_8_14 vr27_8_14 vr28_8_14 vr29_8_14 vr30_8_14 vr31_8_14 ]; attach variables vrC_8_15 [vr0_8_15 vr1_8_15 vr2_8_15 vr3_8_15 vr4_8_15 vr5_8_15 vr6_8_15 vr7_8_15 vr8_8_15 vr9_8_15 vr10_8_15 vr11_8_15 vr12_8_15 vr13_8_15 vr14_8_15 vr15_8_15 vr16_8_15 vr17_8_15 vr18_8_15 vr19_8_15 vr20_8_15 vr21_8_15 vr22_8_15 vr23_8_15 vr24_8_15 vr25_8_15 vr26_8_15 vr27_8_15 vr28_8_15 vr29_8_15 vr30_8_15 vr31_8_15 ]; ################################################################ # Pseudo Instructions ################################################################ define pcodeop clearHistory; define pcodeop countTrailingZeros; define pcodeop dataCacheBlockAllocate; define pcodeop dataCacheBlockFlush; define pcodeop dataCacheBlockInvalidate; define pcodeop dataCacheBlockStore; define pcodeop dataCacheBlockTouch; define pcodeop dataCacheBlockTouchForStore; define pcodeop dataCacheBlockClearToZero; define pcodeop dataCacheCongruenceClassInvalidate; define pcodeop dataCacheRead; define pcodeop externalControlIn; define pcodeop externalControlOut; define pcodeop enforceInOrderExecutionIO; define pcodeop instructionCacheBlockInvalidate; define pcodeop instructionCacheBlockTouch; define pcodeop instructionCacheCongruenceClassInvalidate; define pcodeop instructionCacheRead; define pcodeop instructionSynchronize; define pcodeop floatAddOverflow; define pcodeop floatDivOverflow; define pcodeop floatAddRoundedUp; define pcodeop floatDivRoundedUp; define pcodeop floatAddInexact; define pcodeop floatDivInexact; define pcodeop floatAddUnderflow; define pcodeop floatDivUnderflow; define pcodeop floatInfinityAdd; define pcodeop intToFloatRoundedUp; define pcodeop intToFloatInexact; define pcodeop invalidFloatToInt; define pcodeop floatToIntRoundedUp; define pcodeop floatToIntInexact; define pcodeop floatInfinityDivide; define pcodeop floatMaddInexact; define pcodeop floatMaddRoundedUp; define pcodeop floatMaddOverflow; define pcodeop floatMaddUnderflow; define pcodeop floatInfinityMulZero; define pcodeop floatMsubInexact; define pcodeop floatMsubRoundedUp; define pcodeop floatMsubOverflow; define pcodeop floatMsubUnderflow; define pcodeop floatInfinitySub; define pcodeop floatSubRoundedUp; define pcodeop floatSubInexact; define pcodeop floatSubOverflow; define pcodeop floatSubUnderflow; define pcodeop floatMulRoundedUp; define pcodeop floatMulOverflow; define pcodeop floatMulUnderflow; define pcodeop floatMulInexact; define pcodeop sqrtInvalid; define pcodeop floatSqrtRoundedUp; define pcodeop floatSqrtInexact; define pcodeop eventInterrupt; define pcodeop illegal; define pcodeop message; define pcodeop movebuffer; define pcodeop stopT; define pcodeop waitT; define pcodeop mematom; define pcodeop random; define pcodeop returnFromInterrupt; define pcodeop returnFromCriticalInterrupt; define pcodeop returnFromDebugInterrupt; define pcodeop returnFromGuestInterrupt; define pcodeop returnFromMachineCheckInterrupt; define pcodeop syscall; define pcodeop slbInvalidateAll; define pcodeop slbInvalidateEntry; define pcodeop slbMoveFromEntryESID; define pcodeop slbMoveFromEntryVSID; define pcodeop slbMoveToEntry; define pcodeop storeDoubleWordConditionalIndexed; define pcodeop storeWordConditionalIndexed; define pcodeop trapWord; define pcodeop trapDoubleWordImmediate; define pcodeop trapDoubleWord; define pcodeop sync; define pcodeop loadString; define pcodeop storeString; define pcodeop xer_mac_update; define pcodeop macchw; define pcodeop macchws; define pcodeop macchwsu; define pcodeop macchwu; define pcodeop machhw; define pcodeop machhws; define pcodeop machhwsu; define pcodeop machhwu; define pcodeop maclhw; define pcodeop maclhws; define pcodeop maclhwsu; define pcodeop maclhwu; define pcodeop mulchw; define pcodeop mulchwu; define pcodeop mulhhw; define pcodeop mulhhwu; define pcodeop mullhw; define pcodeop mullhwu; define pcodeop nmacchw; define pcodeop nmacchws; define pcodeop nmachhw; define pcodeop nmachhws; define pcodeop nmaclhw; define pcodeop nmaclhws; define pcodeop copytrans; define pcodeop pastetrans; define pcodeop transaction; define pcodeop TLBRead; define pcodeop TLBSearchIndexed; define pcodeop TLBWrite; define pcodeop WriteExternalEnable; define pcodeop WriteExternalEnableImmediate; # This is really used in the altivec version, but since it's a registered pcode op # and due to the way things get @included, this needs to be here define pcodeop vectorPermute; ################################################################ # Macros ################################################################ macro shiftCarry(value, sa) { local mask = value; # force mask to have same size as value (may vary) mask = (1 << sa) - 1; xer_ca = (value s< 0) && ((value & mask)!=0); } macro getCrBit(crReg, bitIndex, result) { tmp:1 = crReg >> (3-bitIndex); result = tmp & 1; } macro setCrBit(crReg, bitIndex, bit) { shift:1 = 3-bitIndex; mask:1 = ~(1< 0)); # 0b010 setCrBit(cr0, 2, (result == 0)); # 0b001 setCrBit(cr0, 3, (xer_so & 1)); } macro addOverflow(a,b) { xer_ov = scarry(a,b); xer_so = xer_so || xer_ov; } macro subOverflow(a,b) { xer_ov = sborrow(a,b); xer_so = xer_so || xer_ov; } macro addExtendedCarry(op1,op2){ local carryIn:$(REGISTER_SIZE) = zext(xer_ca); tmp:$(REGISTER_SIZE) = op2 + carryIn; xer_ca = carry(op2, carryIn) || carry(op1, tmp); } macro addExtendedOverflow(op1, op2) { local carryIn:$(REGISTER_SIZE) = zext(xer_ca); tmp:$(REGISTER_SIZE) = op1 + op2; xer_ov = scarry(op1,op2) ^^ scarry(tmp, carryIn); xer_so = xer_so || xer_ov; } macro subExtendedCarry(op1,op2){ local carryIn = zext(!xer_ca); local CYa = op1 < op2; local result = op1 - op2; xer_ca = !(CYa || (result < carryIn) ); } macro subExtendedOverflow(op1, op2) { local carryIn = zext(!xer_ca); local result = op1 - op2; xer_ov = sborrow( op1, op2 ) ^^ sborrow( result, carryIn ); xer_so = xer_so || xer_ov; } # check b=0 or (a=0x80000000 and b=-1) macro divOverflow(a,b) { xer_ov = (b==0) || ((b==-1) && (a==0x80000000)); xer_so = xer_so || xer_ov; } macro divZero(b) { xer_ov = (b==0); xer_so = xer_so || xer_ov; } macro mulOverflow64(result) { local tmp:4 = result(0); local sext_tmp:8 = sext(tmp); xer_ov = (sext_tmp != result); xer_so = xer_so || xer_ov; } macro mulOverflow128(result) { local tmp:8 = result(0); local sext_tmp:16 = sext(tmp); xer_ov = (sext_tmp != result); xer_so = xer_so || xer_ov; } macro cr1flags() { setCrBit(cr1, 0, fp_fx); setCrBit(cr1, 1, fp_fex); setCrBit(cr1, 2, fp_vx); setCrBit(cr3, 2, fp_ox); } macro setFPRF(result) { fp_cc0 = result f< 0; fp_cc1 = result f> 0; fp_cc2 = result f== 0; fp_cc3 = nan(result); } macro setSummaryFPSCR() { fp_vx = fp_vxsnan | fp_vxisi | fp_vxidi | fp_vxzdz | fp_vximz | fp_vxvc | fp_vxsoft | fp_vxsqrt | fp_vxcvi; fp_fx = fp_fx | fp_ox | fp_ux | fp_zx | fp_xx; fp_fex = (fp_vx & fp_ve) ^ (fp_ox & fp_oe) ^ (fp_ux & fp_ue) ^ (fp_zx & fp_ze) ^ (fp_xx & fp_xe); } macro setFPAddFlags(op1, op2, result) { setFPRF(result); # fp_fr = floatAddRoundedUp(op1, op2); # fp_fi = floatAddInexact(op1, op2); # fp_ox = fp_ox | floatAddOverflow(op1, op2); # fp_ux = fp_ux | floatAddUnderflow(op1, op2); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(op1) | nan(op2); # fp_vxisi = fp_vxisi | floatInfinityAdd(op1, op2); setSummaryFPSCR(); } macro setFPDivFlags(op1, op2, result) { setFPRF(result); # fp_fr = floatDivRoundedUp(op1, op2); # fp_fi = floatDivInexact(op1, op2); # fp_ox = fp_ox | floatDivOverflow(op1, op2); # fp_ux = fp_ux | floatDivUnderflow(op1, op2); fp_zx = fp_zx | (op2 f== 0); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(op1) | nan(op2); # fp_vxidi = fp_vxidi | floatInfinityDivide(op1, op2); fp_vxzdz = fp_vxzdz | ((op1 f== 0) && (op2 f== 0)); setSummaryFPSCR(); } macro setFPMulFlags(op1, op2, result) { setFPRF(result); # fp_fr = floatMulRoundedUp(op1, op2); # fp_fi = floatMulInexact(op1, op2); # fp_ox = fp_ox | floatMulOverflow(op1, op2); # fp_ux = fp_ux | floatMulUnderflow(op1, op2); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(op1) | nan(op2); # fp_vximz = fp_vximz | floatInfinityMulZero(op1, op2); setSummaryFPSCR(); } macro setFPSubFlags(op1, op2, result) { setFPRF(result); # fp_fr = floatSubRoundedUp(op1, op2); # fp_fi = floatSubInexact(op1, op2); # fp_ox = fp_ox | floatSubOverflow(op1, op2); # fp_ux = fp_ux | floatSubUnderflow(op1, op2); # fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(op1) | nan(op2); # fp_vxisi = fp_vxisi | floatInfinitySub(op1, op2); setSummaryFPSCR(); } macro loadRegister(reg, ea) { @ifdef BIT_64 reg = zext(*:4(ea)); @else reg = *:4(ea); @endif ea = ea+4; } macro loadReg(reg) { @ifdef BIT_64 reg = zext(*:4(tea)); @else reg = *:4(tea); @endif tea = tea+4; } macro loadRegisterPartial(reg, ea, sa) { mask:$(REGISTER_SIZE) = 0xffffffff; sa = ((4-sa) & 3) * 8; mask = mask << sa; @ifdef BIT_64 reg = zext(*:4(ea)); @else reg = *:4(ea); @endif reg = reg & mask; ea = ea + 4; } macro storeRegister(reg, ea) { @ifdef BIT_64 tmp:8 = reg; # workaround *:4(ea) = tmp:4; @else *:4(ea) = reg; @endif ea = ea+4; } macro storeReg(reg) { @ifdef BIT_64 tmp:8 = reg; # workaround *:4(tea) = tmp:4; @else *:4(tea) = reg; @endif tea = tea+4; } macro storeRegisterPartial(reg, ea, sa) { @ifdef BIT_64 tmp:8 = reg; # workaround *:4(ea) = tmp:4; @else *:4(ea) = reg; @endif ea = ea + 4; } macro packbits( D,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15, a16,a17,a18,a19,a20,a21,a22,a23,a24,a25,a26,a27,a28,a29,a30,a31) { D = zext(a31) & 1; D=D|(zext(a0)&1)<<31; D=D|(zext(a1)&1)<<30; D=D|(zext(a2)&1)<<29; D=D|(zext(a3)&1)<<28; D=D|(zext(a4)&1)<<27; D=D|(zext(a5)&1)<<26; D=D|(zext(a6)&1)<<25; D=D|(zext(a7)&1)<<24; D=D|(zext(a8)&1)<<23; D=D|(zext(a9)&1)<<22; D=D|(zext(a10)&1)<<21; D=D|(zext(a11)&1)<<20; D=D|(zext(a12)&1)<<19; D=D|(zext(a13)&1)<<18; D=D|(zext(a14)&1)<<17; D=D|(zext(a15)&1)<<16; D=D|(zext(a16)&1)<<15; D=D|(zext(a17)&1)<<14; D=D|(zext(a18)&1)<<13; D=D|(zext(a19)&1)<<12; D=D|(zext(a20)&1)<<11; D=D|(zext(a21)&1)<<10; D=D|(zext(a22)&1)<<9; D=D|(zext(a23)&1)<<8; D=D|(zext(a24)&1)<<7; D=D|(zext(a25)&1)<<6; D=D|(zext(a26)&1)<<5; D=D|(zext(a27)&1)<<4; D=D|(zext(a28)&1)<<3; D=D|(zext(a29)&1)<<2; D=D|(zext(a30)&1)<<1; } macro unpackbits(D,a0,a1,a2,a3,a4,a5,a6,a7,a8,a9,a10,a11,a12,a13,a14,a15, a16,a17,a18,a19,a20,a21,a22,a23,a24,a25,a26,a27,a28,a29,a30,a31) { a0=(D&0x80000000)!=0; a1=(D&0x40000000)!=0; a2=(D&0x20000000)!=0; a3=(D&0x10000000)!=0; a4=(D&0x8000000)!=0; a5=(D&0x4000000)!=0; a6=(D&0x2000000)!=0; a7=(D&0x1000000)!=0; a8=(D&0x800000)!=0; a9=(D&0x400000)!=0; a10=(D&0x200000)!=0; a11=(D&0x100000)!=0; a12=(D&0x80000)!=0; a13=(D&0x40000)!=0; a14=(D&0x20000)!=0; a15=(D&0x10000)!=0; a16=(D&0x8000)!=0; a17=(D&0x4000)!=0; a18=(D&0x2000)!=0; a19=(D&0x1000)!=0; a20=(D&0x800)!=0; a21=(D&0x400)!=0; a22=(D&0x200)!=0; a23=(D&0x100)!=0; a24=(D&0x80)!=0; a25=(D&0x40)!=0; a26=(D&0x20)!=0; a27=(D&0x10)!=0; a28=(D&0x8)!=0; a29=(D&0x4)!=0; a30=(D&0x2)!=0; a31=(D&0x1)!=0; } macro packFPSCR(tmp) { packbits(tmp, fp_fx, fp_fex, fp_vx, fp_ox, fp_ux, fp_zx, fp_xx, fp_vxsnan, fp_vxisi, fp_vxidi, fp_vxzdz, fp_vximz, fp_vxvc, fp_fr, fp_fi, fp_c, fp_cc0, fp_cc1, fp_cc2, fp_cc3, fp_reserve1, fp_vxsoft, fp_vxsqrt, fp_vxcvi, fp_ve, fp_oe, fp_ue, fp_ze, fp_xe, fp_ni, fp_rn0, fp_rn1); } macro unpackFPSCR(tmp) { unpackbits(tmp, fp_fx, fp_fex, fp_vx, fp_ox, fp_ux, fp_zx, fp_xx, fp_vxsnan, fp_vxisi, fp_vxidi, fp_vxzdz, fp_vximz, fp_vxvc, fp_fr, fp_fi, fp_c, fp_cc0, fp_cc1, fp_cc2, fp_cc3, fp_reserve1, fp_vxsoft, fp_vxsqrt, fp_vxcvi, fp_ve, fp_oe, fp_ue, fp_ze, fp_xe, fp_ni, fp_rn0, fp_rn1); } ################################################################ # Sub-Constructors ################################################################ REL_ABS: "a" is AA = 1 {} REL_ABS: is AA = 0 {} addressLI: reloc is LI & AA=0 [ reloc = inst_start + LI*4;] { export *[ram]:4 reloc; } addressLI: reloc is LI & AA=1 [ reloc = LI*4; ] { export *[ram]:4 reloc; } addressBD: reloc is BD & AA=0 [ reloc = inst_start + BD*4; ] { export *[ram]:4 reloc; } addressBD: reloc is BD & AA=1 [ reloc = BD*4; ] { export *[ram]:4 reloc; } OFF16SH: val is D0 & D1 & D2 [ val = ((D0 << 6) | (D1 << 1) | D2) << 16; ] { export *[const]:4 val;} # X 00-------------------------------06 07-07 08-----------10 11-----------13 14------15 16----------------------------------------------------------------------------31 # X -----------------?-----------------|BO_1=1|-------?-------|-----BI_CR-----|--BI_CC---|---------------------------------------?----------------------------------------| CC: "lt" is BI_CC=0 & BO_1=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } CC: "le" is BI_CC=1 & BO_1=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); tmp = !tmp; export tmp; } CC: "eq" is BI_CC=2 & BO_1=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } CC: "ge" is BI_CC=0 & BO_1=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); tmp = !tmp; export tmp; } CC: "gt" is BI_CC=1 & BO_1=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } CC: "ne" is BI_CC=2 & BO_1=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); tmp = !tmp; export tmp; } CC: "so" is BI_CC=3 & BO_1=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } CC: "ns" is BI_CC=3 & BO_1=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); tmp = !tmp; export tmp; } TOm: "lt" is TO=16 { } TOm: "le" is TO=20 { } TOm: "eq" is TO=4 { } TOm: "ge" is TO=12 { } TOm: "gt" is TO=8 { } TOm: "ne" is TO=24 { } TOm: "llt" is TO=2 { } TOm: "lle" is TO=6 { } TOm: "lge" is TO=5 { } TOm: "lgt" is TO=1 { } TOm: "" is TO { } CTR_DEC: "z" is BO_3=1 {CTR = CTR-1; tmp:1 = (CTR == 0); export tmp; } CTR_DEC: "nz" is BO_3=0 {CTR = CTR-1; tmp:1 = (CTR != 0); export tmp; } CC_TF: "t" is BO_1=1 { tmp:1 = 1; export tmp; } CC_TF: "f" is BO_1=0 { tmp:1 = 0; export tmp; } # OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=129 & BIT_0=0 # X 00---------------------------------------------------10 11-----------13 14------15 16----------------------------------------------------------------------------31 # X ---------------------------?---------------------------|----BI_CR=0----|--BI_CC---|---------------------------------------?----------------------------------------| CC_OP: "lt" is BI_CC=0 & BI_CR=0 & BI_CC { tmp:1 = 0; getCrBit(cr0, BI_CC, tmp); export tmp; } CC_OP: "eq" is BI_CC=2 & BI_CR=0 & BI_CC { tmp:1 = 0; getCrBit(cr0, BI_CC, tmp); export tmp; } CC_OP: "gt" is BI_CC=1 & BI_CR=0 & BI_CC { tmp:1 = 0; getCrBit(cr0, BI_CC, tmp); export tmp; } CC_OP: "so" is BI_CC=3 & BI_CR=0 & BI_CC { tmp:1 = 0; getCrBit(cr0, BI_CC, tmp); export tmp; } CC_OP: "4*"^BI_CR^"+lt" is BI_CC=0 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } CC_OP: "4*"^BI_CR^"+eq" is BI_CC=2 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } CC_OP: "4*"^BI_CR^"+gt" is BI_CC=1 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } CC_OP: "4*"^BI_CR^"+so" is BI_CC=3 & BI_CR & BI_CC { tmp:1 = 0; getCrBit(BI_CR, BI_CC, tmp); export tmp; } # X 00----------------------------------------------------------------------------15 16-----------18 19------20 21---------------------------------------------------31 # X ---------------------------------------?----------------------------------------|----CR_B=0-----|-CR_B_CC--|---------------------------?---------------------------| CC_B_OP: "lt" is CR_B_CC=0 & CR_B=0 & CR_B_CC { tmp:1 = 0; getCrBit(cr0, CR_B_CC, tmp); export tmp; } CC_B_OP: "eq" is CR_B_CC=2 & CR_B=0 & CR_B_CC { tmp:1 = 0; getCrBit(cr0, CR_B_CC, tmp); export tmp; } CC_B_OP: "gt" is CR_B_CC=1 & CR_B=0 & CR_B_CC { tmp:1 = 0; getCrBit(cr0, CR_B_CC, tmp); export tmp; } CC_B_OP: "so" is CR_B_CC=3 & CR_B=0 & CR_B_CC { tmp:1 = 0; getCrBit(cr0, CR_B_CC, tmp); export tmp; } CC_B_OP: "4*"^CR_B^"+lt" is CR_B_CC=0 & CR_B & CR_B_CC { tmp:1 = 0; getCrBit(CR_B, CR_B_CC, tmp); export tmp; } CC_B_OP: "4*"^CR_B^"+eq" is CR_B_CC=2 & CR_B & CR_B_CC { tmp:1 = 0; getCrBit(CR_B, CR_B_CC, tmp); export tmp; } CC_B_OP: "4*"^CR_B^"+gt" is CR_B_CC=1 & CR_B & CR_B_CC { tmp:1 = 0; getCrBit(CR_B, CR_B_CC, tmp); export tmp; } CC_B_OP: "4*"^CR_B^"+so" is CR_B_CC=3 & CR_B & CR_B_CC { tmp:1 = 0; getCrBit(CR_B, CR_B_CC, tmp); export tmp; } # X 00-----------------------------------------------------------------------------------------------------20 21-----------23 24------25 26--------------------------31 # X ----------------------------------------------------?----------------------------------------------------|----CR_X=0-----|-CR_X_CC--|--------------?---------------| CC_X_OP: cr0 is CR_X_CC=0 & CR_X=0 & CR_X_CC & cr0 { tmp:1 = 0; getCrBit(cr0, CR_X_CC, tmp); export tmp; } CC_X_OP: cr0 is CR_X_CC=1 & CR_X=0 & CR_X_CC & cr0 { tmp:1 = 0; getCrBit(cr0, CR_X_CC, tmp); export tmp; } CC_X_OP: cr0 is CR_X_CC=2 & CR_X=0 & CR_X_CC & cr0 { tmp:1 = 0; getCrBit(cr0, CR_X_CC, tmp); export tmp; } CC_X_OP: cr0 is CR_X_CC=3 & CR_X=0 & CR_X_CC & cr0 { tmp:1 = 0; getCrBit(cr0, CR_X_CC, tmp); export tmp; } CC_X_OP: CR_X is CR_X_CC=0 & CR_X & CR_X_CC { tmp:1 = 0; getCrBit(CR_X, CR_X_CC, tmp); export tmp; } CC_X_OP: CR_X is CR_X_CC=1 & CR_X & CR_X_CC { tmp:1 = 0; getCrBit(CR_X, CR_X_CC, tmp); export tmp; } CC_X_OP: CR_X is CR_X_CC=2 & CR_X & CR_X_CC { tmp:1 = 0; getCrBit(CR_X, CR_X_CC, tmp); export tmp; } CC_X_OP: CR_X is CR_X_CC=3 & CR_X & CR_X_CC { tmp:1 = 0; getCrBit(CR_X, CR_X_CC, tmp); export tmp; } CC_X_OPm: "lt" is CR_X_CC=0 & CR_X=0 & CR_X_CC { } CC_X_OPm: "gt" is CR_X_CC=1 & CR_X=0 & CR_X_CC { } CC_X_OPm: "eq" is CR_X_CC=2 & CR_X=0 & CR_X_CC { } CC_X_OPm: "so" is CR_X_CC=3 & CR_X=0 & CR_X_CC { } CC_X_OPm: "lt" is CR_X_CC=0 & CR_X & CR_X_CC { } CC_X_OPm: "gt" is CR_X_CC=1 & CR_X & CR_X_CC { } CC_X_OPm: "eq" is CR_X_CC=2 & CR_X & CR_X_CC { } CC_X_OPm: "so" is CR_X_CC=3 & CR_X & CR_X_CC { } # X 00--------------------------05 06-----------08 09------10 11-----------------------------------------------------------------------------------------------------31 # X --------------?---------------|----CR_D=0-----|-CR_D_CC--|----------------------------------------------------?----------------------------------------------------| CC_D_OP: "lt" is CR_D_CC=0 & CR_D=0 & CR_D_CC { tmp:1 = 0; getCrBit(cr0, CR_D_CC, tmp); export tmp; } CC_D_OP: "eq" is CR_D_CC=2 & CR_D=0 & CR_D_CC { tmp:1 = 0; getCrBit(cr0, CR_D_CC, tmp); export tmp; } CC_D_OP: "gt" is CR_D_CC=1 & CR_D=0 & CR_D_CC { tmp:1 = 0; getCrBit(cr0, CR_D_CC, tmp); export tmp; } CC_D_OP: "so" is CR_D_CC=3 & CR_D=0 & CR_D_CC { tmp:1 = 0; getCrBit(cr0, CR_D_CC, tmp); export tmp; } CC_D_OP: "4*"^CR_D^"+lt" is CR_D_CC=0 & CR_D & CR_D_CC { tmp:1 = 0; getCrBit(CR_D, CR_D_CC, tmp); export tmp; } CC_D_OP: "4*"^CR_D^"+eq" is CR_D_CC=2 & CR_D & CR_D_CC { tmp:1 = 0; getCrBit(CR_D, CR_D_CC, tmp); export tmp; } CC_D_OP: "4*"^CR_D^"+gt" is CR_D_CC=1 & CR_D & CR_D_CC { tmp:1 = 0; getCrBit(CR_D, CR_D_CC, tmp); export tmp; } CC_D_OP: "4*"^CR_D^"+so" is CR_D_CC=3 & CR_D & CR_D_CC { tmp:1 = 0; getCrBit(CR_D, CR_D_CC, tmp); export tmp; } RA_OR_ZERO: A is A { export A; } RA_OR_ZERO: 0 is A=0 { export 0:$(REGISTER_SIZE); } RB_OR_ZERO: B is B { export B; } RB_OR_ZERO: 0 is B=0 { export 0:$(REGISTER_SIZE); } RS_OR_ZERO: S is S { export S; } RS_OR_ZERO: 0 is S=0 { export 0:$(REGISTER_SIZE); } @ifdef BIT_64 MB: mbValue is MBH & MBL [ mbValue=(MBH<<5)|MBL; ] { export *[const]:4 mbValue; } SH: shValue is SHH & SHL [ shValue=(SHH<<5)|SHL; ] { export *[const]:4 shValue; } rotmask: mask is MBL & ME [mask = ((((ME-MBL)>>8) $and 1)*0xffffffffffffffff) $xor (0x7fffffff>>ME) $xor (0xffffffff>>MBL); ] { export *[const]:8 mask; } rotmask_SH: masksh, mbValue, shValue is MBL & MBH & SHL & SHH [ mbValue= (MBH<<5)|MBL; shValue= (SHH<<5)|SHL; masksh = ((((shValue-mbValue)>>8) $and 1)*0xffffffffffffffff) $xor ((0x7fffffffffffffff >> shValue) $xor (0xffffffffffffffff >> mbValue)); ] { local start:4 = mbValue; local stop:4 = 63-shValue; mask_tmp:8 = (zext(start > stop) * 0xffffffffffffffff) ^ (0x7fffffffffffffff>>stop) ^ (0xffffffffffffffff>>start); export *[const]:8 mask_tmp; } rotmask_Z: mask, mbValue is MBL & MBH [mbValue= (MBH<<5)|MBL; mask = ~(0xffffffffffffffff >> (mbValue+1)); ] { mask_tmp:8 = ~(0xffffffffffffffff >> (mbValue+1)); export *[const]:8 mask_tmp; } @else rotmask: mask is MBL & ME [ mask = ((((ME-MBL)>>8) $and 1)*0xffffffff) $xor (0x7fffffff>>ME) $xor (0xffffffff>>MBL); ] { export *[const]:4 mask; } @endif DSIZE: "w" is L {} # L is a don't care bit in 32-bit languages although it should always be 0 @ifdef BIT_64 # L can only be 1 when in 64 bit language DSIZE: "d" is L=1 {} @endif @ifdef BIT_64 REG_A: is L=0 & A {tmp:8 = sext(A:4); export tmp; } REG_A: is L=1 & A {export A; } REG_B: is L=0 & B {tmp:8 = sext(B:4); export tmp; } REG_B: is L=1 & B {export B; } @else # L is a don't care bit in 32-bit languages although it should always be 0 REG_A: is A { export A; } REG_B: is B { export B; } @endif @ifdef BIT_64 UREG_A: is L=0 & A {tmp:8 = zext(A:4); export tmp; } UREG_A: is L=1 & A {export A; } UREG_B: is L=0 & B {tmp:8 = zext(B:4); export tmp; } UREG_B: is L=1 & B {export B; } @else # L is a don't care bit in 32-bit languages although it should always be 0 UREG_A: is A { export A; } UREG_B: is B { export B; } @endif dPlusRaOrZeroAddress: SIMM(RA_OR_ZERO) is SIMM & RA_OR_ZERO { tmp:$(REGISTER_SIZE) = RA_OR_ZERO+SIMM; export tmp; } dPlusRaAddress: SIMM(A) is SIMM & A {tmp:$(REGISTER_SIZE) = A+SIMM; export tmp; } dUI16PlusRAOrZeroAddress: val^"("^RA_OR_ZERO^")" is RA_OR_ZERO & UI_16_s8 [ val = UI_16_s8 << 3; ] { ea:$(REGISTER_SIZE) = RA_OR_ZERO + val; export ea; } @ifdef BIT_64 dsPlusRaAddress: simm_ds(A) is SIMM_DS & A [simm_ds = SIMM_DS << 2;] {tmp:8 = simm_ds + A;export tmp;} dsPlusRaOrZeroAddress: simm_ds(RA_OR_ZERO) is SIMM_DS & RA_OR_ZERO [simm_ds = SIMM_DS << 2;] {tmp:8 = simm_ds + RA_OR_ZERO;export tmp;} @endif FPSCR_CRFS: is CRFS=0 {tmp:1 = fp_fx<<3 | fp_fex<<2 | fp_vx<<1 | fp_ox; fp_fx=0; fp_ox=0; export tmp;} FPSCR_CRFS: is CRFS=1 {tmp:1 = fp_ux<<3 | fp_zx<<2 | fp_xx<<1 | fp_vxsnan; fp_ux=0; fp_zx=0; fp_xx=0; fp_ux=0;export tmp;} FPSCR_CRFS: is CRFS=2 {tmp:1 = fp_vxisi<<3 | fp_vxidi<<2 | fp_vxzdz<<1 | fp_vximz; fp_vxisi=0; fp_vxidi=0; fp_vxzdz=0; fp_vximz=0; export tmp;} FPSCR_CRFS: is CRFS=3 {tmp:1 = fp_vxvc<<3 | fp_fr<<2 | fp_fi<<1 | fp_c; fp_vxvc=0; export tmp;} FPSCR_CRFS: is CRFS=4 {tmp:1 = fp_cc0<<3 | fp_cc1<<2 | fp_cc2<<1 | fp_cc3; export tmp;} FPSCR_CRFS: is CRFS=5 {tmp:1 = fp_vxsoft<<2 | fp_vxsqrt<<1 | fp_vxcvi; fp_vxsoft=0; fp_vxsqrt=0; fp_vxcvi=0; export tmp;} FPSCR_CRFS: is CRFS=6 {tmp:1 = fp_ve<<3 | fp_oe <<2 | fp_ue<<1 | fp_ze; export tmp;} FPSCR_CRFS: is CRFS=7 {tmp:1 = fp_xe<<3 | fp_ni<<2 | fp_rn0<<1 | fp_rn1; export tmp;} CRM_CR: cr7 is CRM=1 & cr7 {tmp:4 = zext(cr7);export tmp;} CRM_CR: cr6 is CRM=2 & cr6 {tmp:4 = zext(cr6) << 4;export tmp;} CRM_CR: cr5 is CRM=4 & cr5 {tmp:4 = zext(cr5) << 8;export tmp;} CRM_CR: cr4 is CRM=8 & cr4 {tmp:4 = zext(cr4) << 12;export tmp;} CRM_CR: cr3 is CRM=16 & cr3 {tmp:4 = zext(cr3) << 16;export tmp;} CRM_CR: cr2 is CRM=32 & cr2 {tmp:4 = zext(cr2) << 20;export tmp;} CRM_CR: cr1 is CRM=64 & cr1 {tmp:4 = zext(cr1) << 24;export tmp;} CRM_CR: cr0 is CRM=128 & cr0 {tmp:4 = zext(cr0) << 28;export tmp;} ################################################################ # Instructions ################################################################ @include "ppc_instructions.sinc" @include "ppc_embedded.sinc" ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_embedded.sinc ================================================ # these are identified as part of the PowerPC Embedded Architecture #dcba 0,r0 0x7c 00 05 ec :dcba RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=758 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; dataCacheBlockAllocate(ea); } #dcbf 0,r0 0x7c 00 00 ac :dcbf RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=86 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; dataCacheBlockFlush(ea); } #dcbi 0,r0 0x7c 00 03 ac :dcbi RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=470 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; dataCacheBlockInvalidate(ea); } #dcbst 0,r0 0x7c 00 00 6c :dcbst RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=54 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; dataCacheBlockStore(ea); } #dcbt 0,r0 0x7c 00 02 2c :dcbt RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=278 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; dataCacheBlockTouch(ea); } #dcbtst 0,r0 0x7c 00 01 ec :dcbtst RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=246 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; dataCacheBlockTouchForStore(ea); } #dcbz 0,r0 0x7c 00 07 ec :dcbz RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=1014 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; dataCacheBlockClearToZero(ea); } #dcbzl 0,r0 0x7c 20 07 ec :dcbzl RA_OR_ZERO,B is OP=31 & BITS_21_25=1 & B & XOP_1_10=1014 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; dataCacheBlockClearToZero(ea); } define pcodeop memoryBarrier; #mbar 0 7c 00 06 ac :mbar MO is OP=31 & MO & XOP_1_10=854 { memoryBarrier(MO:1); } #icbi r0,r0 0x7c 00 07 ac :icbi RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=982 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; instructionCacheBlockInvalidate(ea); } #icbt 0,r0 0x7c 00 02 0c :icbt BITS_21_24,RA_OR_ZERO,B is OP=31 & BIT_25=0 & BITS_21_24 & RA_OR_ZERO & B & XOP_1_10=22 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; instructionCacheBlockTouch(ea); } #isync 0x4c 00 01 2c :isync is $(NOTVLE) & OP=19 & BITS_21_25=0 & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=150 & BIT_0=0 { instructionSynchronize(); } #mfdcr r0,DCRN 0x7c 00 02 86 :mfdcr D, DCRN is OP=31 & D & DCRN & XOP_1_10=323 & BIT_0=0 { D = DCRN; } #mfmsr r0 0x7c 00 00 a6 :mfmsr D is OP=31 & D & BITS_11_20=0 & XOP_1_10=83 & BIT_0=0 { D = MSR; } #mfspr r0 0x7c 00 02 a6 :mfspr D,SPRVAL is OP=31 & D & SPRVAL & XOP_1_10=339 & BIT_0=0 { D = SPRVAL; } #mftb r0,TBLr 0x7c 0c 42 e6 :mftb D,TBLr is $(NOTVLE) & OP=31 & D & TBR=392 & TBLr & XOP_1_10=371 & BIT_0=0 { D = TBLr; } #mftb r0,TBUr 0x7c 0d 42 e6 :mftb D,TBUr is $(NOTVLE) & OP=31 & D & TBR=424 & TBUr & XOP_1_10=371 & BIT_0=0 { D = TBUr; } #mtdcr DCRN,r0 0x7c 00 03 86 :mtdcr DCRN, D is OP=31 & D & DCRN & XOP_1_10=451 & BIT_0=0 { DCRN = D; } # mtmsr varies from processor to processor. This version is consistent with PowerISA v2.07B #mtmsr r0,0 0x7c 00 01 24 :mtmsr S,0 is OP=31 & S & BITS_17_20=0 & MSR_L=0 & BITS_11_15=0 & XOP_1_10=146 & BIT_0=0 { bit59:$(REGISTER_SIZE) = (S >> 4) & 1; #bit 59 bit58:$(REGISTER_SIZE) = (S >> 5) & 1; #bit 58 bit49:$(REGISTER_SIZE) = (S >> 14) & 1; #bit 49 bit48:$(REGISTER_SIZE) = (S >> 15) & 1; #bit 48 local mask:$(REGISTER_SIZE) = 0xffff6fcf; # preserves bits 32:47 49:50 52:57 60:62 local tmp:$(REGISTER_SIZE) = S & mask; # 1111 1111 1111 1111 0110 1111 1100 1111 tmp = tmp | ((bit48 | bit49) << 15); # MSR 48 <- (RS) 48 | (RS) 49 tmp = tmp | ((bit58 | bit49) << 5); # MSR 58 <- (RS) 58 | (RS) 49 tmp = tmp | ((bit59 | bit49) << 4); # MSR 59 <- (RS) 59 | (RS) 49 MSR = (MSR & ~mask) | tmp; } #mtmsr r0,1 0x7c 01 01 24 :mtmsr S,1 is OP=31 & S & BITS_17_20=0 & MSR_L=1 & BITS_11_15=0 & XOP_1_10=146 & BIT_0=0 { mask:$(REGISTER_SIZE) = 0x8002; #preserves bits 48 and 62 MSR = (MSR & ~mask) | (S & mask); } #mtspr spr000,r0 0x7c 00 02 a6 :mtspr SPRVAL,S is OP=31 & SPRVAL & S & XOP_1_10=467 & BIT_0=0 { SPRVAL = S; } :mtspr SPRVAL,S is OP=31 & BITS_11_20=0x100 & BITS_21_25=0 & SPRVAL & S & XOP_1_10=467 & BIT_0=0 [ linkreg=1; globalset(inst_next,linkreg); ] { SPRVAL = S; } :mtspr SPRVAL,S is linkreg=1 & OP=31 & BITS_11_20=0x100 & BITS_21_25=0 & SPRVAL & S & XOP_1_10=467 & BIT_0=0 [ linkreg=0; globalset(inst_start,linkreg); ] { SPRVAL = S; } :rfci is $(NOTVLE) & OP=19 & BITS_21_25=0 & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=51 & BIT_0=0 { MSR = returnFromCriticalInterrupt(MSR, CSRR1); local ra = CSRR0; return[ra]; } #rfi 0x4c 00 00 64 :rfi is $(NOTVLE) & OP=19 & BITS_11_25=0 & XOP_1_10=50 & BIT_0=0 { MSR = returnFromInterrupt(MSR, SRR1); local ra = SRR0; return[ra]; } #tlbre 0x7c 00 07 64 :tlbre is OP=31 & XOP_1_10=946 { TLBRead(); } #tlbsx r0,r0,r0 0x7c 00 07 24 :tlbsx D,RA_OR_ZERO,B is OP=31 & D & B & XOP_1_10=914 & RA_OR_ZERO & Rc=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; D = TLBSearchIndexed(D,ea); } #tlbsx. r0,r0,r0 0x7c 00 07 25 :tlbsx. D,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & D & B & XOP_1_10=914 & RA_OR_ZERO & Rc=1 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; D = TLBSearchIndexed(D,ea); cr0flags(D); } #tlbwe 0x7c 00 07 a4 :tlbwe D,A,B_BITS is OP=31 & D & A & B_BITS & XOP_1_10=978 { D = TLBWrite(D,A,B_BITS:1); } #wrtee r0 0x7c 00 01 06 :wrtee S is OP=31 & S & XOP_1_10=131 { WriteExternalEnable(S); } #wrteei 0 0x7c 00 01 46 :wrteei BIT_15 is OP=31 & BIT_15 & XOP_1_10=163 { WriteExternalEnableImmediate(BIT_15:1); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_instructions.sinc ================================================ #=========================================================== # ADD #=========================================================== #add r1,r2,r3 0x7c 22 1a 14 :add D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=266 & Rc=0 { D = A + B; } #add. r1,r2,r3 0x7c 22 1a 15 :add. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=266 & Rc=1 { D = A + B; cr0flags(D); } #addo r1,r2,r3 0x7c 22 1e 14 :addo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=266 & Rc=0 { addOverflow(A,B); D = A + B; } #addo. r1,r2,r3 0x7c 22 1e 15 :addo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=266 & Rc=1 { addOverflow(A,B); D = A + B; cr0flags(D); } #addc r1,r2,r3 0x7c 22 18 14 :addc D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=10 & Rc=0 { xer_ca = carry(A,B); D = A + B; } #addc. r1,r2,r3 0x7c 22 18 15 :addc. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=10 & Rc=1 { xer_ca = carry(A,B); D = A + B; cr0flags(D); } #addco r1,r2,r3 0x7c 22 1c 14 :addco D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=10 & Rc=0 { xer_ca = carry(A,B); addOverflow( A, B ); D = A + B; } #addco. r1,r2,r3 0x7c 22 1c 15 :addco. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=10 & Rc=1 { xer_ca = carry(A,B); addOverflow( A, B ); D = A + B; cr0flags(D); } #adde r1,r2,r3 0x7c 22 19 14 :adde D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=138 & Rc=0 { zextCarry:$(REGISTER_SIZE) = zext(xer_ca); addExtendedCarry(A,B); D=A + B + zextCarry; } #adde. r1,r2,r3 0x7c 22 19 15 :adde. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=138 & Rc=1 { zextCarry:$(REGISTER_SIZE) = zext(xer_ca); addExtendedCarry(A,B); D=A + B + zextCarry; cr0flags(D); } #addeo r1,r2,r3 0x7c 22 1d 14 :addeo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=138 & Rc=0 { zextCarry:$(REGISTER_SIZE) = zext(xer_ca); addExtendedOverflow(A,B); addExtendedCarry(A,B); D=A + B + zextCarry; } #addeo. r1,r2,r3 0x7c 22 1d 15 :addeo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=138 & Rc=1 { zextCarry:$(REGISTER_SIZE) = zext(xer_ca); addExtendedOverflow(A,B); addExtendedCarry(A,B); D=A + B + zextCarry; cr0flags(D); } #addi r0,0x7fff 0x38 00 7f ff #addi r0,1 0x38 01 00 01 :addi D,A,SIMM is $(NOTVLE) & OP=14 & D & A & SIMM_SIGN=0 & SIMM { D = A + SIMM; } #li r0,1 0x38 00 00 01 # addi simplified mnemonic :li D,SIMM is $(NOTVLE) & OP=14 & D & A=0 & SIMM_SIGN=1 & SIMM { D = SIMM; } #li r0,-0x1 0x38 00 FF FF # addi simplified mnemonic :li D,SIMM is $(NOTVLE) & OP=14 & D & A=0 & SIMM_SIGN=0 & SIMM { D = SIMM; } #subi r0,r1,1 0x38 01 FF FF # addi simplified mnemonic :subi D,A,tmp is $(NOTVLE) & OP=14 & D & A & SIMM_SIGN=1 & SIMM [ tmp = -SIMM; ] { D = A + SIMM; } #addic r0,r0,2 0x30 00 00 02 :addic D,A,SIMM is $(NOTVLE) & OP=12 & D & A & SIMM_SIGN=0 & SIMM { xer_ca=carry(A,SIMM); D = A + SIMM; } #subic r0,r0,2 0x30 00 FF FE # addi simplified mnemonic :subic D,A,tmp is $(NOTVLE) & OP=12 & D & A & SIMM_SIGN=1 & SIMM [ tmp = -SIMM; ] { xer_ca=carry(A,SIMM); D = A + SIMM; } #addic. r0,r0,5 0x34 00 00 05 :addic. D,A,SIMM is $(NOTVLE) & OP=13 & D & A & SIMM_SIGN=0 & SIMM { xer_ca = carry(A,SIMM); D = A + SIMM; cr0flags( D ); } #subic. r0,r0,1 0x34 00 FF FF # addic. simplified mnemonic :subic. D,A,tmp is $(NOTVLE) & OP=13 & D & A & SIMM_SIGN=1 & SIMM [ tmp = -SIMM; ] { xer_ca=carry(A,SIMM); D = A + SIMM; cr0flags( D ); } #addis r0,r1,1 0x3c 01 00 01 :addis D,A,SIMM is $(NOTVLE) & OP=15 & D & A & SIMM_SIGN=0 & SIMM { D = A + (SIMM:$(REGISTER_SIZE) << 16); } #lis r0,1 0x3c 00 00 01 # addis simplified mnemonic :lis D,SIMM is $(NOTVLE) & OP=15 & D & A=0 & SIMM_SIGN=1 & SIMM { D = SIMM:$(REGISTER_SIZE) << 16; } #lis r0,-1 0x3c 00 FF FF # addis simplified mnemonic :lis D,SIMM is $(NOTVLE) & OP=15 & D & A=0 & SIMM_SIGN=0 & SIMM { D = SIMM:$(REGISTER_SIZE) << 16; } #subis r0,r1,1 0x3c 01 FF FF # addis simplified mnemonic :subis D,A,tmp is $(NOTVLE) & OP=15 & D & A & SIMM_SIGN=1 & SIMM [ tmp = -SIMM; ] { D = A + (SIMM:$(REGISTER_SIZE) << 16); } #addme r0,r0 0x7c 00 01 D4 :addme D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=234 & Rc=0 { local zextCarry:$(REGISTER_SIZE) = zext(xer_ca); local BVal:$(REGISTER_SIZE) = ~(0); addExtendedCarry(A,BVal); D=A + BVal + zextCarry; } #addme. r0,r0 0x7c 00 01 D5 :addme. D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=234 & Rc=1 { local zextCarry:$(REGISTER_SIZE) = zext(xer_ca); local BVal:$(REGISTER_SIZE) = ~(0); addExtendedCarry(A,BVal); D=A + BVal + zextCarry; cr0flags(D); } #addmeo r0,r0 0x7C 00 05 D4 :addmeo D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=234 & Rc=0 { local zextCarry:$(REGISTER_SIZE) = zext(xer_ca); local BVal:$(REGISTER_SIZE) = ~(0); addExtendedOverflow(A,BVal); addExtendedCarry(A,BVal); D=A + BVal + zextCarry; } #addmeo. r0,r0 0x7C 00 05 D5 :addmeo. D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=234 & Rc=1 { local zextCarry:$(REGISTER_SIZE) = zext(xer_ca); local BVal:$(REGISTER_SIZE) = ~(0); addExtendedOverflow(A,BVal); addExtendedCarry(A,BVal); D=A + BVal + zextCarry; cr0flags(D); } #addze r0,r0 0x7C 00 01 94 :addze D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=202 & Rc=0 { zextedCarry:$(REGISTER_SIZE) = zext( xer_ca ); xer_ca = carry(A,zextedCarry); D = A + zextedCarry; } #addze. r0,r0 0x7C 00 01 95 :addze. D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=202 & Rc=1 { zextedCarry:$(REGISTER_SIZE) = zext( xer_ca ); xer_ca=carry(A,zextedCarry); D = A + zextedCarry; cr0flags( D ); } #addzeo r0,r0 0x7C 00 05 94 :addzeo D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=202 & Rc=0 { zextedCarry:$(REGISTER_SIZE) = zext( xer_ca ); xer_ca=carry(A,zextedCarry); addOverflow(A,zextedCarry); D = A + zextedCarry; } #addzeo. r0,r0 0x7C 00 05 95 :addzeo. D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=202 & Rc=1 { zextedCarry:$(REGISTER_SIZE) = zext( xer_ca ); xer_ca=carry(A,zextedCarry); addOverflow(A,zextedCarry); D = A + zextedCarry; cr0flags( D ); } #=========================================================== # AND #=========================================================== #and r0,r0,r0 0x7C 00 00 38 :and A,S,B is OP=31 & S & A & B & XOP_1_10=28 & Rc=0 { A = S & B; } #and. r0,r0,r0 0x7C 00 00 39 :and. A,S,B is OP=31 & S & A & B & XOP_1_10=28 & Rc=1 { A = S & B; cr0flags( A ); } #andc r0,r0,r0 0x7C 00 00 78 :andc A,S,B is OP=31 & S & A & B & XOP_1_10=60 & Rc=0 { A = S & ~B; } #andc. r0,r0,r0 0x7C 00 00 79 :andc. A,S,B is OP=31 & S & A & B & XOP_1_10=60 & Rc=1 { A = S & ~B; cr0flags( A ); } #andi. r0,r0,0xffff 0x70 00 ff ff :andi. A,S,UIMM is $(NOTVLE) & OP=28 & S & A & UIMM { A = S & UIMM:$(REGISTER_SIZE); cr0flags( A ); } #andis. r0,r0,1 0x74 00 00 01 :andis. A,S,UIMM is $(NOTVLE) & OP=29 & A & S & UIMM { A = S & (UIMM:$(REGISTER_SIZE) << 16); cr0flags( A ); } #=========================================================== # Branch (op=18) #=========================================================== #b 1008 0x48 00 00 08 (assuming a starting address of 1000) #ba LAB_00000158 0x48 00 01 5a :b^REL_ABS addressLI is $(NOTVLE) & OP=18 & REL_ABS & addressLI & LK=0 { goto addressLI; } :b^REL_ABS addressLI is linkreg=1 & OP=18 & REL_ABS & addressLI & LK=0 [ linkreg=0; globalset(inst_start,linkreg); ] { # don't do this anymore, detect another way # call addressLI; # return [LR]; goto addressLI; } #bl 0x48 00 00 09 #bla 0x48 00 10 0f :bl^REL_ABS addressLI is $(NOTVLE) & OP=18 & REL_ABS & addressLI & LK=1 [ linkreg=0; globalset(inst_start,linkreg); ] { r2Save = r2; # Save r2 (needed for branch to ppc64 call stub) LR = inst_next; call addressLI; } # special case when branch is to fall-through instruction, just loading the link register #bl 0x48 00 00 05 :bl addressLI is $(NOTVLE) & OP=18 & REL_ABS & AA=0 & addressLI & LK=1 & LI=1 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; goto addressLI; } #=========================================================== # Branch Conditional (op=16) #=========================================================== #b sameAddr 0x42 80 00 00 #ba LAB_0000 0x42 80 00 02 :b^REL_ABS addressBD is $(NOTVLE) & OP=16 & addressBD & REL_ABS & BO_0=1 & BO_2=1 & LK=0 { goto addressBD; } :b^REL_ABS addressBD is linkreg=1 & OP=16 & addressBD & REL_ABS & BO_0=1 & BO_2=1 & LK=0 [ linkreg=0; globalset(inst_start,linkreg); ] { # don't do this anymore, detect another way # call addressBD; # return [LR]; goto addressBD; } #bl LAB_0000 0x42 80 00 01 #bla LAB_0000 0x42 80 00 03 :bl^REL_ABS addressBD is $(NOTVLE) & OP=16 & addressBD & REL_ABS & BO_0=1 & BO_2=1 & LK=1 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; call addressBD; } # special case when branch is to fall-through instruction, just loading the link register #bl (Load LR) :bl addressBD is $(NOTVLE) & OP=16 & addressBD & REL_ABS & BO_0=1 & BO_2=1 & BD=1 & LK=1 { LR = inst_next; goto addressBD; } #blt LAB_0000 0x41 80 00 00 :b^CC^REL_ABS addressBD is $(NOTVLE) & OP=16 & CC & addressBD & BO_0=0 & BO_2=1 & BI_CR= 0 & REL_ABS & LK=0 [ linkreg=0; globalset(inst_start,linkreg); ] # affects both flows, but not at this instruction { if (CC) goto addressBD; } ## do a special linkreg setting only if linkreg is set, since this happens all over the code :b^CC^REL_ABS addressBD is linkreg=1 & OP=16 & CC & addressBD & BO_0=0 & BO_2=1 & BI_CR= 0 & REL_ABS & LK=0 [ linkreg=0; globalset(inst_start,linkreg); ] { if (CC) goto addressBD; } #bltl LAB_0000 0x41 80 00 01 :b^CC^"l"^REL_ABS addressBD is $(NOTVLE) & OP=16 & CC & addressBD & BO_0=0 & BO_2=1 & BI_CR= 0 & REL_ABS & LK=1 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; if (!CC) goto inst_next; call addressBD; } #bne cr2,LAB_xxxx 0x40 8a 00 00 :b^CC^REL_ABS BI_CR,addressBD is $(NOTVLE) & OP=16 & CC & BI_CR & addressBD & BO_0=0 & BO_2=1 & REL_ABS & LK=0 [ linkreg=0; globalset(inst_start,linkreg); ] { if (CC) goto addressBD; } #bnel cr2,LAB_xxxx 0x40 8a 00 01 :b^CC^"l"^REL_ABS BI_CR,addressBD is $(NOTVLE) & OP=16 & CC & BI_CR & addressBD & BO_0=0 & BO_2=1 & REL_ABS & LK=1 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; if (!CC) goto inst_next; call addressBD; } #bdnz LAB_0000 0x42 00 00 00 :bd^CTR_DEC^REL_ABS addressBD is $(NOTVLE) & OP=16 & CTR_DEC & REL_ABS & addressBD & BO_0=1 & BO_2=0 & LK=0 { if (CTR_DEC) goto addressBD; } #bdnzl FUN_0xxx 0x42 00 00 01 #bdzla FUN_0000 0x42 40 00 03 :bd^CTR_DEC^"l"^REL_ABS addressBD is $(NOTVLE) & OP=16 & CTR_DEC & REL_ABS & addressBD & BO_0=1 & BO_2=0 & LK=1 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; if (!CTR_DEC) goto inst_next; call addressBD; } #bdnzf lt,LAB_0000 0x40 00 00 00 #bdnzf 4*cr2+eq,LAB_0000 0x40 0a 00 00 :bd^CTR_DEC^CC_TF^REL_ABS CC_OP,addressBD is $(NOTVLE) & OP=16 & CC_TF & REL_ABS & CTR_DEC & CC_OP & addressBD & BO_0=0 & BO_2=0 & LK=0 { if (CTR_DEC && (CC_OP == CC_TF)) goto addressBD; } #bdzfl lt,FUN_0000 0x40 00 00 01 #bdnzfl 4*cr2+eq,FUN_0000 0x40 0a 00 01 :bd^CTR_DEC^CC_TF^"l"^REL_ABS CC_OP,addressBD is $(NOTVLE) & OP=16 & CC_TF & CTR_DEC & REL_ABS & CC_OP & addressBD & BO_0=0 & BO_2=0 & LK=1 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; if (!(CTR_DEC && (CC_OP == CC_TF))) goto inst_next; call addressBD; } #=========================================================== # Branch Conditional CTR(op=19, xop=528) #=========================================================== #bctr 0x4E 80 04 20 :bctr is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=528 { goto [CTR]; } :bctr is $(NOTVLE) & linkreg=1 & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=528 [ linkreg=0; globalset(inst_start,linkreg); ] { # don't do this anymore, detect another way # call [CTR]; # return [LR]; goto [CTR]; } :bctr BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH & XOP_1_10=528 { goto [CTR]; } #bctrl 0x4e 80 04 21 :bctrl is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH=0 & XOP_1_10=528 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; call [CTR]; } :bctrl BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH & XOP_1_10=528 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; call [CTR]; } #bgectr 0x4c 80 04 20 :b^CC^"ctr" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=528 { if (!CC) goto inst_next; goto [CTR]; } :b^CC^"ctr" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH & BH_BITS!=0 & LK=0 & BITS_13_15=0 & XOP_1_10=528 { if (!CC) goto inst_next; goto [CTR]; } #bgectrl 0x4c 80 04 21 :b^CC^"ctrl" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=528 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; if (!CC) goto inst_next; call [CTR]; } :b^CC^"ctrl" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH & BH_BITS!=0 & LK=1 & BITS_13_15=0 & XOP_1_10=528 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; if (!CC) goto inst_next; call [CTR]; } #bgectr cr3 0x4c 8c 04 20 :b^CC^"ctr" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=528 { if (!CC) goto inst_next; goto [CTR]; } #bnectr cr2,#0x3 0x4c 8c 1c 20 :b^CC^"ctr" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=0 & BITS_13_15=0 & XOP_1_10=528 { if (!CC) goto inst_next; goto [CTR]; } #bgectrl cr2,LAB_xxxx 0x4c 8c 04 21 :b^CC^"ctrl" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=528 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; if (!CC) goto inst_next; call [CTR]; } #bnectr cr2,#0x3 0x4c 8c 1c 21 :b^CC^"ctrl" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=1 & BITS_13_15=0 & XOP_1_10=528 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; if (!CC) goto inst_next; call [CTR]; } #=========================================================== # Branch Conditional to Link Register (op=19, XOP=16) #=========================================================== #bclr 0x4E 80 00 20 :blr is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=16 { return [LR]; } :blr BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH & XOP_1_10=16 { goto [LR]; } #blrl 0x4e 80 00 21 :blrl is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { tmp:$(REGISTER_SIZE) = LR; LR = inst_next; call [tmp]; } :blrl BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { tmp:$(REGISTER_SIZE) = LR; LR = inst_next; call [tmp]; } #bgelr 0x4c 80 00 20 :b^CC^"lr" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR=0 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { if (!CC) goto inst_next; return [LR]; } :b^CC^"lr" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR=0 & BH & BH_BITS!=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { if (!CC) goto inst_next; goto [LR]; } #bgelrl 0x4c 80 00 21 :b^CC^"lrl" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR=0 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { tmp:$(REGISTER_SIZE) = LR; LR = inst_next; if (!CC) goto inst_next; call [tmp]; } :b^CC^"lrl" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR=0 & BH & BH_BITS!=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { tmp:$(REGISTER_SIZE) = LR; LR = inst_next; if (!CC) goto inst_next; call [tmp]; } #bgelr cr2 0x4c 88 00 20 :b^CC^"lr" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { if (!CC) goto inst_next; return [LR]; } #bnelr cr2,#0x3 0x4c 8c 18 20 :b^CC^"lr" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & BH_BITS!=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { if (!CC) goto inst_next; goto [LR]; } #bgelrl cr3 0x4c 8c 00 21 :b^CC^"lrl" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { tmp:$(REGISTER_SIZE) = LR; LR = inst_next; if (!CC) goto inst_next; call [tmp]; } #bnelr cr2,#0x3 0x4c 8c 18 21 :b^CC^"lrl" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=1 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { tmp:$(REGISTER_SIZE) = LR; LR = inst_next; if (!CC) goto inst_next; call [tmp]; } ###### #bdnzlr 0x4e 00 00 20 :bd^CTR_DEC^"lr" is $(NOTVLE) & OP=19 & BH=0 & CTR_DEC & BO_0=1 & BO_2=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { if (!CTR_DEC) goto inst_next; goto [LR]; } :bd^CTR_DEC^"lr" BH is $(NOTVLE) & OP=19 & BH & CTR_DEC & BO_0=1 & BO_2=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { if (!CTR_DEC) goto inst_next; goto [LR]; } #bdnzlrl 0x4e 00 00 21 :bd^CTR_DEC^"lrl" is $(NOTVLE) & OP=19 & CTR_DEC & BH=0 & BO_0=1 & BO_2=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { tmp:$(REGISTER_SIZE) = LR; LR = inst_next; if (!CTR_DEC) goto inst_next; call [tmp]; } :bd^CTR_DEC^"lrl" BH is $(NOTVLE) & OP=19 & CTR_DEC & BH & BO_0=1 & BO_2=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { tmp:$(REGISTER_SIZE) = LR; LR = inst_next; if (!CTR_DEC) goto inst_next; call [tmp]; } #bdnzflr lt 0x4c 00 00 20 #bdnzflr 4*cr2+eq 0x4c 0a 00 20 :bd^CTR_DEC^CC_TF^"lr" CC_OP is $(NOTVLE) & OP=19 & CC_TF & CTR_DEC & CC_OP & BO_0=0 & BO_2=0 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { if (!(CTR_DEC && (CC_OP == CC_TF))) goto inst_next; goto [LR]; } #bdnzflr ge 0x4c 00 18 20 #bdnzflr 4*cr2+eq 0x4c 0a 18 20 :bd^CTR_DEC^CC_TF^"lr" CC_OP,BH is $(NOTVLE) & OP=19 & CC_TF & CTR_DEC & CC_OP & BO_0=0 & BO_2=0 & BH & LK=0 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { if (!(CTR_DEC && (CC_OP == CC_TF))) goto inst_next; goto [LR]; } #bdzflrl lt 0x4c 00 00 21 #bdnzflrl 4*cr2+eq 0x4c 0a 00 21 :bd^CTR_DEC^CC_TF^"lrl" CC_OP is $(NOTVLE) & OP=19 & CC_TF & CTR_DEC & CC_OP & BH=0 & BO_0=0 & BO_2=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { tmp:$(REGISTER_SIZE) = LR; LR = inst_next; if (!(CTR_DEC && (CC_OP == CC_TF))) goto inst_next; call [tmp]; } #bdzflrl lt 0x4c 00 18 21 #bdnzflrl 4*cr2+eq 0x4c 0a 18 21 :bd^CTR_DEC^CC_TF^"lrl" CC_OP,BH is $(NOTVLE) & OP=19 & CC_TF & CTR_DEC & CC_OP & BH & BO_0=0 & BO_2=0 & LK=1 & BITS_13_15=0 & XOP_1_10=16 [ linkreg=0; globalset(inst_start,linkreg); ] { tmp:$(REGISTER_SIZE) = LR; LR = inst_next; if (!(CTR_DEC && (CC_OP == CC_TF))) goto inst_next; call [tmp]; } #=========================================================== # CMP #=========================================================== #cmpw r0,r1 0x7c 00 08 00 #cmpd r0,r1 0x7c 20 08 00 (64 bit mode) :cmp^DSIZE A,B is OP=31 & CRFD=0 & BIT_22=0 & DSIZE & A & B & REG_A & REG_B & XOP_1_10=0 & BIT_0=0 { tmpA:$(REGISTER_SIZE) = REG_A; tmpB:$(REGISTER_SIZE) = REG_B; cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } #cmpw cr2,r0,r1 0x7d 00 08 00 #cmpd cr2,r0,r1 0x7d 20 08 00 (64 bit mode) :cmp^DSIZE CRFD,A,B is OP=31 & CRFD & BIT_22=0 & DSIZE & A & B & REG_A & REG_B & XOP_1_10=0 & BIT_0=0 { tmpA:$(REGISTER_SIZE) = REG_A; tmpB:$(REGISTER_SIZE) = REG_B; CRFD = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } ############################### #cmpwi r0,0x00 0x2c 00 00 00 #cmpdi r0,0x00 0x2c 20 00 00 (64 bit mode) :cmp^DSIZE^"i" A,SIMM is $(NOTVLE) & OP=11 & CRFD=0 & BIT_22=0 & DSIZE & A & REG_A & SIMM { tmpA:$(REGISTER_SIZE) = REG_A; tmpB:$(REGISTER_SIZE) = SIMM; cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } #cmpwi cr2,r0,0x00 0x2d 00 00 00 #cmpwi cr2,r0,0x00 0x2d 20 00 00 (64 bit mode) :cmp^DSIZE^"i" CRFD,A,SIMM is $(NOTVLE) & OP=11 & CRFD & BIT_22=0 & DSIZE & A & B & REG_A & SIMM { tmpA:$(REGISTER_SIZE) = REG_A; tmpB:$(REGISTER_SIZE) = SIMM; CRFD = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } ############################ #cmplw r0,r1 0x7c 00 08 40 #cmpld r0,r1 0x7c 20 08 40 (64 bit mode) :cmpl^DSIZE A,B is OP=31 & CRFD=0 & BIT_22=0 & DSIZE & A & B & UREG_A & UREG_B & XOP_1_10=32 & BIT_0=0 { tmpA:$(REGISTER_SIZE) = UREG_A; tmpB:$(REGISTER_SIZE) = UREG_B; cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } #cmplw cr2,r0,r1 0x7d 00 08 40 #cmplw cr2,r0,r1 0x7d 20 08 40 (64 bit mode) :cmpl^DSIZE CRFD,A,B is OP=31 & CRFD & BIT_22=0 & DSIZE & A & B & UREG_A & UREG_B & XOP_1_10=32 & BIT_0=0 { tmpA:$(REGISTER_SIZE) = UREG_A; tmpB:$(REGISTER_SIZE) = UREG_B; CRFD = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } ############################### #cmplwi r0,0x00 0x28 00 00 00 #cmpldi r0,0x00 0x28 20 00 00 (64 bit mode) :cmpl^DSIZE^"i" A,UIMM is $(NOTVLE) & OP=10 & CRFD=0 & BIT_22=0 & DSIZE & A & UREG_A & UIMM { tmpA:$(REGISTER_SIZE) = UREG_A; tmpB:$(REGISTER_SIZE) = UIMM; cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } #cmplwi cr2,r0,0x00 0x29 00 00 00 #cmplwi cr2,r0,0x00 0x29 20 00 00 (64 bit mode) :cmpl^DSIZE^"i" CRFD,A,UIMM is $(NOTVLE) & OP=10 & CRFD & BIT_22=0 & DSIZE & A & B & UREG_A & UIMM { tmpA:$(REGISTER_SIZE) = UREG_A; tmpB:$(REGISTER_SIZE) = UIMM; CRFD = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } #=========================================================== # CNTLZx #=========================================================== @ifdef BIT_64 #cntlzd r0,r0 0x7c 00 00 74 :cntlzd A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=58 & Rc=0 { A = lzcount(S); } #cntlzd. r0,r0 0x7c 00 00 75 :cntlzd. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=58 & Rc=1 { A = lzcount(S); cr0flags(A); } @endif #cntlzw r0,r0 0x7c 00 00 34 :cntlzw A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=26 & Rc=0 { A = lzcount(S:4); } #cntlzw. r0,r0 0x7c 00 00 35 :cntlzw. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=26 & Rc=1 { A = lzcount(S:4); cr0flags(A); } #=========================================================== # CRxxx #=========================================================== #crand lt,lt,lt 0x4c 00 02 02 #crand 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 72 02 :crand CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=257 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,CC_OP & CC_B_OP); } #crandc lt,lt,lt 0x4c 00 01 02 #crandc 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 71 02 :crandc CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=129 & BIT_0=0 { tmp1:1 = !CC_B_OP; setCrBit(CR_D,CR_D_CC,CC_OP & tmp1); } #creqv lt,lt,lt 0x4c 00 02 42 #creqv 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 72 42 :creqv CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=289 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,CC_B_OP == CC_OP); } #crnand lt,lt,lt 0x4c 00 01 c2 #crnand 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 71 c2 :crnand CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=225 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,!(CC_B_OP & CC_OP)); } #crnor lt,lt,lt 0x4c 00 00 42 #crnor 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 70 42 :crnor CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=33 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,!(CC_B_OP | CC_OP)); } #cror lt,lt,lt 0x4c 00 03 82 #cror 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 73 82 :cror CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=449 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,(CC_B_OP | CC_OP)); } #crorc lt,lt,lt 0x4c 00 03 42 #crorc 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 73 42 :crorc CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=417 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,((!CC_B_OP) | CC_OP)); } #crxor lt,lt,lt 0x4c 00 01 82 #crxor 4*cr1+lt,4*cr2+gt,4*cr3+eq 0x4c 89 71 82 :crxor CC_D_OP,CC_OP,CC_B_OP is $(NOTVLE) & OP=19 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=193 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,(CC_B_OP ^ CC_OP)); } @ifndef IS_ISA # replace with dci command in ISA #dccci 0,r0 0x7c 00 03 8c :dccci RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=454 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; dataCacheCongruenceClassInvalidate(ea); } @endif #=========================================================== # DIVxx #=========================================================== @ifdef BIT_64 #divd r0,r0,r0 0x7c 00 03 d2 :divd D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=489 & Rc=0 { D = A s/ B; } #divd. r0,r0,r0 0x7c 00 03 d3 :divd. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=489 & Rc=1 { D = A s/ B; cr0flags(D); } #divdo r0,r0,r0 0x7c 00 07 d2 :divdo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=489 & Rc=0 { divOverflow(A,B); D = A s/ B; } #divdo. r0,r0,r0 0x7c 00 07 d3 :divdo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=489 & Rc=1 { divOverflow(A,B); D = A s/ B; cr0flags(D); } ###################### #divdu r0,r0,r0 0x7c 00 03 92 :divdu D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=457 & Rc=0 { D = A / B; } #divdu. r0,r0,r0 0x7c 00 03 93 :divdu. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=457 & Rc=1 { D = A / B; cr0flags(D); } #divduo r0,r0,r0 0x7c 00 07 92 :divduo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=457 & Rc=0 { divZero(B); D = A / B; } #divduo. r0,r0,r0 0x7c 00 07 93 :divduo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=457 & Rc=1 { divZero(B); D = A / B; cr0flags(D); } @endif #############################3 #divw r0,r0,r0 0x7c 00 03 d6 :divw D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=491 & Rc=0 { @ifdef BIT_64 D = sext(A:4 s/ B:4); @else D = A s/ B; @endif } #divw. r0,r0,r0 0x7c 00 03 d7 :divw. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=491 & Rc=1 { @ifdef BIT_64 divOverflow(A:4,B:4); D = sext(A:4 s/ B:4); cr0flags(D:4); @else divOverflow(A,B); D = A s/ B; cr0flags(D); @endif } #divwo r0,r0,r0 0x7c 00 07 d6 :divwo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=491 & Rc=0 { @ifdef BIT_64 divOverflow(A:4,B:4); D = sext(A:4 s/ B:4); @else divOverflow(A,B); D = A s/ B; @endif } #divwo. r0,r0,r0 0x7c 00 07 d7 :divwo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=491 & Rc=1 { @ifdef BIT_64 divOverflow(A:4,B:4); D = sext(A:4 s/ B:4); cr0flags(D:4); @else divOverflow(A,B); D = A s/ B; cr0flags(D); @endif } ######################### #divwu r0,r0,r0 0x7c 00 03 96 :divwu D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=459 & Rc=0 { @ifdef BIT_64 D = zext(A:4) / zext(B:4); @else D = A / B; @endif } #divwu. r0,r0,r0 0x7c 00 03 97 :divwu. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=459 & Rc=1 { @ifdef BIT_64 D = zext(A:4) / zext(B:4); cr0flags(D:4); @else D = A / B; cr0flags(D); @endif } #divwuo r0,r0,r0 0x7c 00 07 96 :divwuo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=459 & Rc=0 { @ifdef BIT_64 divZero(B:4); D = zext(A:4) / zext(B:4); @else divZero(B); D = A / B; @endif } #divwuo. r0,r0,r0 0x7c 00 07 97 :divwuo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=459 & Rc=1 { @ifdef BIT_64 divZero(B:4); D = zext(A:4) / zext(B:4); cr0flags(D:4); @else divZero(B); D = A / B; cr0flags(D); @endif } #=========================================================== # ECxxx,EIxxx #=========================================================== #eciwx r0,r0,r0 0x7c 00 02 6c :eciwx D,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & D & B & RA_OR_ZERO & XOP_1_10=310 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; D = externalControlIn(ea); } #ecowx r0,r0,r0 0x7c 00 03 6c :ecowx S,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & S & B & RA_OR_ZERO & XOP_1_10=438 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; externalControlOut(ea, S); } #=========================================================== # EIEIO #=========================================================== # binutils-descr: "eieio", X(31,854), 0xffffffff, PPC, BOOKE|PPCA2|PPC476, {0} # binutils: mytest.d: 20: 7c 00 06 ac eieio :eieio is OP=31 & XOP_1_10=854 & BITS_11_25=0 & BIT_0=0 { enforceInOrderExecutionIO(); } #=========================================================== # EQVx #=========================================================== #eqv r0,r0,r0 0x7c 00 02 38 :eqv A,S,B is OP=31 & S & A & B & XOP_1_10=284 & Rc=0 { A = ~(S ^ B); } #eqv. r0,r0,r0 0x7c 00 02 39 :eqv. A,S,B is OP=31 & S & A & B & XOP_1_10=284 & Rc=1 { A = ~(S ^ B); cr0flags(A); } #=========================================================== # EXTSBx #=========================================================== #extsb r0,r0 0x7c 00 07 74 :extsb A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=954 & Rc=0 { A = sext(S:1); } #extsb. r0,r0 0x7c 00 07 75 :extsb. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=954 & Rc=1 { A = sext(S:1); cr0flags(A); } #=========================================================== # EXTSHx #=========================================================== #extsh r0,r0 0x7c 00 07 34 :extsh A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=922 & Rc=0 { A = sext(S:2); } #extsh. r0,r0 0x7c 00 07 35 :extsh. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=922 & Rc=1 { A = sext(S:2); cr0flags(A); } @ifdef BIT_64 #extsw r0,r0 0x7c 00 07 b4 :extsw A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=986 & Rc=0 { A = sext(S:4); } #extsw. r0,r0 0x7c 00 07 b5 :extsw. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=986 & Rc=1 { A = sext(S:4); cr0flags(A); } @endif #=========================================================== # FABSx #=========================================================== #fabs fr,f1r 0xfc 00 02 10 :fabs fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=264 & Rc=0 { fD = abs(fB); } #fabs. fr0,fr1 0xfc 00 02 11 :fabs. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=264 & Rc=1 { fD = abs(fB); cr1flags(); } #fadd fr0,fr0,fr0 0xfc 00 00 2a :fadd fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=0 { local tmpfA = fA; local tmpfB = fB; fD = fA f+ fB; setFPAddFlags(tmpfA,tmpfB,fD); } #fadd. fr0,fr0,fr0 0xfc 00 00 2b :fadd. fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=1 { local tmpfA = fA; local tmpfB = fB; fD = fA f+ fB; setFPAddFlags(tmpfA,tmpfB,fD); cr1flags(); } #fadds fr0,fr0,fr0 0xec 00 00 2a :fadds fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=0 { local tmpfA = fA; local tmpfB = fB; tmp:4 = float2float(fA f+ fB); fD = float2float(tmp); setFPAddFlags(tmpfA,tmpfB,fD); } #fadds. fr0,fr0,fr0 0xec 00 00 2b :fadds. fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=21 & Rc=1 { local tmpfA = fA; local tmpfB = fB; tmp:4 = float2float(fA f+ fB); fD = float2float(tmp); setFPAddFlags(tmpfA,tmpfB,fD); cr1flags(); } #=========================================================== # FCFIDx #=========================================================== #fcfid fr0,fr0 0xfc 00 06 9c :fcfid fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=846 & Rc=0 { fD = int2float(fB); } #fcfid. fr0,fr0 0xfc 00 06 9d :fcfid. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=846 & Rc=1 { fD = int2float(fB); setFPRF(fD); # fp_fr = intToFloatRoundedUp(fB); # fp_fi = intToFloatInexact(fB); fp_xx = fp_xx | fp_fi; setSummaryFPSCR(); cr1flags(); } #=========================================================== # FCMPO #=========================================================== #fcmpo fr0,fr0,fr0 0xfc 00 00 40 :fcmpo CRFD,fA,fB is $(NOTVLE) & OP=63 & CRFD & BITS_21_22=0 & fA & fB & XOP_1_10=32 & BIT_0=0 { tmp:1 = nan(fA) | nan(fB); fp_cc0 = (fA f< fB); fp_cc1 = (fA f> fB); fp_cc2 = (fA f== fB); CRFD = (fp_cc0 << 3) | (fp_cc1 << 2) | (fp_cc2 << 1) | tmp; } #fcmpu fr0,fr0,fr0 0xfc 00 00 00 :fcmpu CRFD,fA,fB is $(NOTVLE) & OP=63 & CRFD & BITS_21_22=0 & fA & fB & XOP_1_10=0 & BIT_0=0 { tmp:1 = nan(fA) | nan(fB); fp_cc0 = (fA f< fB); fp_cc1 = (fA f> fB); fp_cc2 = (fA f== fB); CRFD = (fp_cc0 << 3) | (fp_cc1 << 2) | (fp_cc2 << 1) | tmp; } #fctid fr0,fr0 0xfc 00 06 5c :fctid fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=814 & Rc=0 { # fp_fr = floatToIntRoundedUp(fB); # fp_fi = floatToIntInexact(fB); fp_vxsnan = fp_vxsnan | nan(fB); # fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); # fp_xx = fp_xx | fp_fi; fD = trunc(fB); } #fctid. fr0,fr0 0xfc 00 06 5d :fctid. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=814 & Rc=1 { # fp_fr = floatToIntRoundedUp(fB); # fp_fi = floatToIntInexact(fB); fp_xx = fp_xx | fp_fi; # fp_vxsnan = fp_vxsnan | nan(fB); # fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); setSummaryFPSCR(); cr1flags(); fD = trunc(fB); } #fctidz fr0,fr0 0xfc 00 06 5e :fctidz fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=815 & Rc=0 { fp_fr = 0; # fp_fi = floatToIntInexact(fB); fp_vxsnan = fp_vxsnan | nan(fB); # fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); fp_xx = fp_xx | fp_fi; fD = trunc(fB); } #fctidz. fr0,fr0 0xfc 00 06 5f :fctidz. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=815 & Rc=1 { fp_fr = 0; # fp_fi = floatToIntInexact(fB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(fB); # fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); setSummaryFPSCR(); cr1flags(); fD = trunc(fB); } #fctiw fr0,fr0 0xfc 00 00 1c :fctiw fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=14 & Rc=0 { # fp_fr = floatToIntRoundedUp(fB); # fp_fi = floatToIntInexact(fB); fp_vxsnan = fp_vxsnan | nan(fB); # fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); fp_xx = fp_xx | fp_fi; local intres:4; intres = trunc(fB); fD = sext(intres); } #fctiw. fr0,fr0 0xfc 00 00 1d :fctiw. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=14 & Rc=1 { # fp_fr = floatToIntRoundedUp(fB); # fp_fi = floatToIntInexact(fB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(fB); # fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); setSummaryFPSCR(); cr1flags(); local intres:4; intres = trunc(fB); fD = sext(intres); } #fctiwz fr0,fr0 0xfc 00 00 1e :fctiwz fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=15 & Rc=0 { fp_fr = 0; # fp_fi = floatToIntInexact(fB); fp_vxsnan = fp_vxsnan | nan(fB); # fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); fp_xx = fp_xx | fp_fi; local intres:4; intres = trunc(fB); fD = sext(intres); } #fctiwz. fr0,fr0 0xfc 00 00 1f :fctiwz. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=15 & Rc=1 { fp_fr = 0; # fp_fi = floatToIntInexact(fB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(fB); # fp_vxcvi = fp_vxcvi | invalidFloatToInt(fB); setSummaryFPSCR(); cr1flags(); local intres:4; intres = trunc(fB); fD = sext(intres); } #fdiv fr0,fr0,fr0 0xfc 00 00 24 :fdiv fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=0 { local tmpfA = fA; local tmpfB = fB; fD = fA f/ fB; setFPDivFlags(tmpfA,tmpfB,fD); } #fdiv. fr0,fr0,fr0 0xfc 00 00 25 :fdiv. fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=1 { local tmpfA = fA; local tmpfB = fB; fD = fA f/ fB; setFPDivFlags(tmpfA,tmpfB,fD); cr1flags(); } #fdivs fr0,fr0,fr0 0xec 00 00 24 :fdivs fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=0 { local tmpfA = fA; local tmpfB = fB; tmp:4 = float2float(fA f/ fB); fD = float2float(tmp); setFPDivFlags(tmpfA,tmpfB,fD); } #fdivs. fr0,fr0,fr0 0xec 00 00 25 :fdivs. fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=18 & Rc=1 { local tmpfA = fA; local tmpfB = fB; tmp:4 = float2float(fA f/ fB); fD = float2float(tmp); setFPDivFlags(tmpfA,tmpfB,fD); cr1flags(); } #fmadd fr0,fr0,fr0,fr0 0xfc 00 00 3a :fmadd fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=29 & Rc=0 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; fD = tmp f+ fB; setFPRF(fD); # fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); } #fmadd. fr0,fr0,fr0,fr0 0xfc 00 00 3b :fmadd. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=29 & Rc=1 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; fD = tmp f+ fB; setFPRF(fD); # fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); cr1flags(); } #fmadds fr0,fr0,fr0,fr0 0xec 00 00 3a :fmadds fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=29 & Rc=0 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; tmp2:4 = float2float(tmp f+ fB); fD = float2float(tmp2); setFPRF(fD); # fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tm[fC); setSummaryFPSCR(); } #fmadds. fr0,fr0,fr0,fr0 0xec 00 00 3b :fmadds. fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=29 & Rc=1 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; tmp2:4 = float2float(tmp f+ fB); fD = float2float(tmp2); setFPRF(fD); # fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); cr1flags(); } #fmr fr0,fr0 0xfc 00 00 90 :fmr fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=72 & Rc=0 { fD = fB; } #fmr. fr0,fr0 0xfc 00 00 91 :fmr. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=72 & Rc=1 { fD = fB; cr1flags(); } #fmsub fr0,fr0,fr0,fr0 0xfc 00 00 38 :fmsub fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=28 & Rc=0 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; fD = tmp f- fB; setFPRF(fD); # fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); } #fmsub. fr0,fr0,fr0,fr0 0xfc 00 00 39 :fmsub. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=28 & Rc=1 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; tmp2:4 = float2float(tmp f- fB); fD = float2float(tmp2); setFPRF(fD); # fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); cr1flags(); } #fmsubs fr0,fr0,fr0,fr0 0xec 00 00 38 :fmsubs fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=28 & Rc=0 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; tmp2:4 = float2float(tmp f- fB); fD = float2float(tmp2); setFPRF(fD); # fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); } #fmsubs. fr0,fr0,fr0,fr0 0xfc 00 00 39 :fmsubs. fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=28 & Rc=1 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; tmp2:4 = float2float(tmp f- fB); fD = float2float(tmp2); setFPRF(fD); # fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); cr1flags(); } #fmul fr0,fr0,fr0 0xfc 00 00 32 :fmul fD,fA,fC is $(NOTVLE) & OP=63 & fD & fA & fC & BITS_11_15=0 & XOP_1_5=25 & Rc=0 { local tmpfA = fA; local tmpfC = fC; fD = fA f* fC; setFPMulFlags(tmpfA,tmpfC,fD); } #fmul. fr0,fr0,fr0 0xfc 00 00 33 :fmul. fD,fA,fC is $(NOTVLE) & OP=63 & fD & fA & fC & BITS_11_15=0 & XOP_1_5=25 & Rc=1 { local tmpfA = fA; local tmpfC = fC; fD = fA f* fC; setFPMulFlags(tmpfA,tmpfC,fD); cr1flags(); } #fmuls fr0,fr0,fr0 0xec 00 00 32 :fmuls fD,fA,fC is $(NOTVLE) & OP=59 & fD & fA & fC & BITS_11_15=0 & XOP_1_5=25 & Rc=0 { local tmpfA = fA; local tmpfC = fC; tmp:4 = float2float(fA f* fC); fD = float2float(tmp); setFPMulFlags(tmpfA,tmpfC,fD); } #fmuls. fr0,fr0,fr0 0xec 00 00 33 :fmuls. fD,fA,fC is $(NOTVLE) & OP=59 & fD & fA & fC & BITS_11_15=0 & XOP_1_5=25 & Rc=1 { local tmpfA = fA; local tmpfC = fC; tmp:4 = float2float(fA f* fC); fD = float2float(tmp); setFPMulFlags(tmpfA,tmpfC,fD); cr1flags(); } #fnabs fr0,fr0 0xfc 00 01 10 :fnabs fD,fB is $(NOTVLE) & OP=63 & fD & fB & BITS_16_20=0 & XOP_1_10=136 & Rc=0 { fD = fB | 0x8000000000000000; } #fnabs. fr0,fr0 0xfc 00 01 11 :fnabs. fD,fB is $(NOTVLE) & OP=63 & fD & fB & BITS_16_20=0 & XOP_1_10=136 & Rc=1 { fD = fB | 0x8000000000000000; cr1flags(); } #fneg fr0,fr0 0xfc 00 00 50 :fneg fD,fB is $(NOTVLE) & OP=63 & fD & fB & BITS_16_20=0 & XOP_1_10=40 & Rc=0 { fD = f- fB; } #fneg. fr0,fr0 0xfc 00 00 51 :fneg. fD,fB is $(NOTVLE) & OP=63 & fD & fB & BITS_16_20=0 & XOP_1_10=40 & Rc=1 { fD = f- fB; cr1flags(); } #fnmadd fr0,fr0,fr0,fr0 0xfc 00 00 3e :fnmadd fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=31 & Rc=0 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; fD = f- (tmp f+ fB); setFPRF(fD); # fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); } #fnmadd. fr0,fr0,fr0,fr0 0xfc 00 00 3f :fnmadd. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=31 & Rc=1 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; fD = f- (tmp f+ fB); setFPRF(fD); # fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); cr1flags(); } #fnmadds fr0,fr0,fr0,fr0 0xec 00 00 3e :fnmadds fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=31 & Rc=0 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; tmp2:4 = float2float(tmp f+ fB); fD = f- float2float(tmp2); setFPRF(fD); # fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); } #fnmadds. fr0,fr0,fr0,fr0 0xec 00 00 3f :fnmadds. fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=31 & Rc=1 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; tmp2:4 = float2float(tmp f+ fB); fD = f- float2float(tmp2); setFPRF(fD); # fp_fr = floatMaddRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMaddInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMaddOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMaddUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinityAdd(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); cr1flags(); } #fnmsub fr0,fr0,fr0,fr0 0xfc 00 00 3c :fnmsub fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=30 & Rc=0 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; fD = f- (tmp f- fB); setFPRF(fD); # fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); } #fnmsub. fr0,fr0,fr0,fr0 0xfc 00 00 3d :fnmsub. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fC & fB & XOP_1_5=30 & Rc=1 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; tmp2:4 = float2float(tmp f- fB); fD = f- float2float(tmp2); setFPRF(fD); # fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); cr1flags(); } #fnmsubs fr0,fr0,fr0,fr0 0xec 00 00 3c :fnmsubs fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=30 & Rc=0 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; tmp2:4 = float2float(tmp f- fB); fD = f- float2float(tmp2); setFPRF(fD); # fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); } #fnmsubs. fr0,fr0,fr0,fr0 0xfc 00 00 3d :fnmsubs. fD,fA,fC,fB is $(NOTVLE) & OP=59 & fD & fA & fC & fB & XOP_1_5=30 & Rc=1 { local tmpfA = fA; local tmpfB = fB; local tmpfC = fC; tmp:8 = fA f* fC; tmp2:4 = float2float(tmp f- fB); fD = f- float2float(tmp2); setFPRF(fD); # fp_fr = floatMsubRoundedUp(tmpfA, tmpfC, tmpfB); # fp_fi = floatMsubInexact(tmpfA,tmpfC,tmpfB); # fp_ox = fp_ox | floatMsubOverflow(tmpfA,tmpfC,tmpfB); # fp_ux = fp_ux | floatMsubUnderflow(tmpfA,tmpfC,tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfA) | nan(tmpfC) | nan(tmpfB); # fp_vxisi = fp_vxisi | floatInfinitySub(tmp, tmpfB); # fp_vximz = fp_vximz | floatInfinityMulZero(tmpfA,tmpfC); setSummaryFPSCR(); cr1flags(); } #fres fr0,fr0 0xec 00 00 30 :fres fD,fB is $(NOTVLE) & OP=59 & fD & BITS_16_20 & fB & BITS_6_10=0 & XOP_1_5=24 & Rc=0 { local tmpfB = fB; one:8 = 1; floatOne:8 = int2float(one); tmp:4 = float2float(floatOne f/ fB); fD = float2float(tmp); setFPRF(fD); # fp_fr = floatDivRoundedUp(floatOne, tmpfB); # fp_fi = floatDivInexact(floatOne, tmpfB); # fp_ox = fp_ox | floatDivOverflow(floatOne, tmpfB); # fp_ux = fp_ux | floatDivUnderflow(floatOne, tmpfB); fp_zx = fp_zx | (fB f== 0); fp_vxsnan = fp_vxsnan | nan(tmpfB); setSummaryFPSCR(); } #fres. fr0,fr0 0xec 00 00 31 :fres. fD,fB is $(NOTVLE) & OP=59 & fD & BITS_16_20 & fB & BITS_6_10=0 & XOP_1_5=24 & Rc=1 { local tmpfB = fB; one:8 = 1; floatOne:8 = int2float(one); tmp:4 = float2float(floatOne f/ fB); fD = float2float(tmp); setFPRF(fD); # fp_fr = floatDivRoundedUp(floatOne, tmpfB); # fp_fi = floatDivInexact(floatOne, tmpfB); # fp_ox = fp_ox | floatDivOverflow(floatOne, tmpfB); # fp_ux = fp_ux | floatDivUnderflow(floatOne, tmpfB); fp_zx = fp_zx | (fB f== 0); fp_vxsnan = fp_vxsnan | nan(tmpfB); setSummaryFPSCR(); cr1flags(); } #frsp fr0,fr0 0xfc 00 00 18 :frsp fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=12 & Rc=0 { local tmpfB = fB; #zero:8 = 0; #floatZero:8 = int2float(zero); tmp:4 = float2float(fB); fD = float2float(tmp); setFPRF(fD); # fp_fr = floatAddRoundedUp(floatZero, tmpfB); # fp_fi = floatAddInexact(floatZero, tmpfB); # fp_ox = fp_ox | floatAddOverflow(floatZero, tmpfB); # fp_ux = fp_ux | floatAddUnderflow(floatZero, tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfB); setSummaryFPSCR(); } #frsp. fr0,fr0 0xfc 00 00 19 :frsp. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & XOP_1_10=12 & Rc=1 { local tmpfB = fB; #zero:8 = 0; #floatZero:8 = int2float(zero); tmp:4 = float2float(fB); fD = float2float(tmp); setFPRF(fD); # fp_fr = floatAddRoundedUp(floatZero, tmpfB); # fp_fi = floatAddInexact(floatZero, tmpfB); # fp_ox = fp_ox | floatAddOverflow(floatZero, tmpfB); # fp_ux = fp_ux | floatAddUnderflow(floatZero, tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfB); setSummaryFPSCR(); cr1flags(); } #frsqrte fr0,fr0 0xfc 00 00 34 :frsqrte fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20 & fB & BITS_6_10=0 & XOP_1_5=26 & Rc=0 { local tmpfB = fB; one:8 = 1; floatOne:8 = int2float(one); tmpSqrt:8 = sqrt(fB); fD = (floatOne f/ tmpSqrt); setFPRF(fD); # fp_fr = floatDivRoundedUp(floatOne, tmpSqrt); # fp_fi = floatDivInexact(floatOne, tmpSqrt); # fp_ox = fp_ox | floatDivOverflow(floatOne, tmpSqrt); # fp_ux = fp_ux | floatDivUnderflow(floatOne, tmpSqrt); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfB); setSummaryFPSCR(); } #frsqrte. fr0,fr0 0xfc 00 00 35 :frsqrte. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20 & fB & BITS_6_10=0 & XOP_1_5=26 & Rc=1 { local tmpfB = fB; one:8 = 1; floatOne:8 = int2float(one); tmpSqrt:8 = sqrt(fB); fD = (floatOne f/ tmpSqrt); setFPRF(fD); # fp_fr = floatDivRoundedUp(floatOne, tmpSqrt); # fp_fi = floatDivInexact(floatOne, tmpSqrt); # fp_ox = fp_ox | floatDivOverflow(floatOne, tmpSqrt); # fp_ux = fp_ux | floatDivUnderflow(floatOne, tmpSqrt); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfB); fp_vxsqrt = fp_vxsqrt | sqrtInvalid(tmpfB); setSummaryFPSCR(); cr1flags(); } #fsel f0r,fr0,fr0,fr0 0xfc 00 00 2e :fsel fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fB & fC & XOP_1_5=23 & Rc=0 { local tmpfA = fA; local tmpfB = fB; zero:4=0; fD=fC; if (tmpfA f>= int2float(zero)) goto inst_next; fD=tmpfB; } #fsel. fr0,fr0,fr0,fr0 0xfc 00 00 2f :fsel. fD,fA,fC,fB is $(NOTVLE) & OP=63 & fD & fA & fB & fC & XOP_1_5=23 & Rc=1 { local tmpfA = fA; local tmpfB = fB; zero:4=0; fD=fC; if (tmpfA f>= int2float(zero)) goto ; fD=tmpfB; cr1flags(); } #fsqrt f0r,fr0 0xfc 00 00 2c :fsqrt fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=22 & Rc=0 { local tmpfB = fB; fD = sqrt(fB); setFPRF(fD); # fp_fr = floatSqrtRoundedUp(tmpfB); # fp_fi = floatSqrtInexact(tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfB); # fp_vxsqrt = fp_vxsqrt | sqrtInvalid(tmpfB); setSummaryFPSCR(); } #fsqrt. fr0,fr0 0xfc 00 00 2d :fsqrt. fD,fB is $(NOTVLE) & OP=63 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=22 & Rc=1 { local tmpfB = fB; fD = sqrt(fB); setFPRF(fD); # fp_fr = floatSqrtRoundedUp(tmpfB); # fp_fi = floatSqrtInexact(tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfB); # fp_vxsqrt = fp_vxsqrt | sqrtInvalid(tmpfB); setSummaryFPSCR(); cr1flags(); } #fsqrts fr0,fr0 0xec 00 00 2c :fsqrts fD,fB is $(NOTVLE) & OP=59 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=22 & Rc=0 { local tmpfB = fB; tmp:4 = float2float(sqrt(fB)); fD = float2float(tmp); setFPRF(fD); # fp_fr = floatSqrtRoundedUp(tmpfB); # fp_fi = floatSqrtInexact(tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfB); # fp_vxsqrt = fp_vxsqrt | sqrtInvalid(tmpfB); setSummaryFPSCR(); } #fsqrts. fr0,fr0 0xec 00 00 2d :fsqrts. fD,fB is $(NOTVLE) & OP=59 & fD & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=22 & Rc=1 { local tmpfB = fB; tmp:4 = float2float(sqrt(fB)); fD = float2float(tmp); setFPRF(fD); # fp_fr = floatSqrtRoundedUp(tmpfB); # fp_fi = floatSqrtInexact(tmpfB); fp_xx = fp_xx | fp_fi; fp_vxsnan = fp_vxsnan | nan(tmpfB); # fp_vxsqrt = fp_vxsqrt | sqrtInvalid(tmpfB); setSummaryFPSCR(); cr1flags(); } #fsub fr0,fr0,fr0 0xfc 00 00 28 :fsub fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=0 { local tmpfA = fA; local tmpfB = fB; fD = fA f- fB; setFPSubFlags(tmpfA,tmpfB,fD); } #fsub. fr0,fr0,fr0 0xfc 00 00 29 :fsub. fD,fA,fB is $(NOTVLE) & OP=63 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=1 { local tmpfA = fA; local tmpfB = fB; fD = fA f- fB; setFPSubFlags(tmpfA,tmpfB,fD); cr1flags(); } #fsubs fr0,fr0,fr0 0xec 00 00 28 :fsubs fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=0 { local tmpfA = fA; local tmpfB = fB; tmp:4 = float2float(fA f- fB); fD = float2float(tmp); setFPSubFlags(tmpfA,tmpfB,fD); } #fsubs. fr0,fr0,fr0 0xec 00 00 29 :fsubs. fD,fA,fB is $(NOTVLE) & OP=59 & fD & fA & fB & BITS_6_10=0 & XOP_1_5=20 & Rc=1 { local tmpfA = fA; local tmpfB = fB; tmp:4 = float2float(fA f- fB); fD = float2float(tmp); setFPSubFlags(tmpfA,tmpfB,fD); cr1flags(); } @ifndef IS_ISA # iccci is just a special form of ici #iccci 0,r0 0x7c 00 07 8c :iccci RA_OR_ZERO,B is OP=31 & BITS_21_25=0 & B & XOP_1_10=966 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; instructionCacheCongruenceClassInvalidate(ea); } @endif #lbz r0,3(0) 0x88 00 00 03 #lbz r0,3(r2) 0x88 02 00 03 :lbz D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=34 & D & dPlusRaOrZeroAddress { D = zext(*:1(dPlusRaOrZeroAddress)); } #lbzu r0,3(r2) 0x8c 02 00 03 :lbzu D,dPlusRaAddress is $(NOTVLE) & OP=35 & D & dPlusRaAddress & A { ea:$(REGISTER_SIZE) = dPlusRaAddress; D = zext(*:1(ea)); A = ea; } #lbzux r0,r2,r0 0x7c 02 00 ee :lbzux D,A,B is OP=31 & D & A & B & XOP_1_10=119 & BIT_0=0 { ea:$(REGISTER_SIZE) = A+B; D = zext(*:1(ea)); A = ea; } #lbzx r0,r2,r0 0x7c 02 00 ae :lbzx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=87 & BIT_0=0 { tmp:$(REGISTER_SIZE) = RA_OR_ZERO+B; D = zext(*:1(tmp)); } @ifdef BIT_64 #ld r0,8(r2) 0xe8 02 00 08 :ld D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=58 & D & dPlusRaOrZeroAddress & BITS_0_1=0 { D = *:8(dPlusRaOrZeroAddress); } ##ldarx r0,r0,r0 0x7c 00 00 a8 #:ldarx T,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & T & RA_OR_ZERO & B & XOP_1_10=84 & TX #{ # ea = RA_OR_ZERO+B; # RESERVE = 1; # RESERVE_ADDRSS = ea; # T = *:8(ea); #} #ldu r0,8(r2) 0xe8 02 00 09 :ldu D,dsPlusRaAddress is $(NOTVLE) & OP=58 & D & dsPlusRaAddress & A & BITS_0_1=1 { ea:$(REGISTER_SIZE) = dsPlusRaAddress; D = *:8(ea); A = ea; } #ldux r0,r2,r0 0x7c 02 00 6a :ldux D,A,B is OP=31 & D & A & B & XOP_1_10=53 & BIT_0=0 { ea:$(REGISTER_SIZE) = A+B; D = *:8(ea); A = ea; } @ifndef IS_ISA #ldarx r0,r2,r0 0x7c 02 00 2a :ldarx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=21 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; D = *:8(ea); } @endif @endif #lfd fr0,8(r2) 0xc8 02 00 08 :lfd fD,dPlusRaOrZeroAddress is $(NOTVLE) & OP=50 & fD & dPlusRaOrZeroAddress { fD = *:8(dPlusRaOrZeroAddress); } #lfdu fr0,8(r2) 0xcc 02 00 08 :lfdu fD,dPlusRaAddress is $(NOTVLE) & OP=51 & fD & dPlusRaAddress & A { ea:$(REGISTER_SIZE) = dPlusRaAddress; fD = *:8(ea); A = ea; } #lfdux fr0,r2,r0 0x7c 02 04 ee :lfdux fD,A,B is $(NOTVLE) & OP=31 & fD & A & B & XOP_1_10=631 & BIT_0=0 { ea:$(REGISTER_SIZE) = A+B; fD = *:8(ea); A = ea; } #lfdx fr0,r2,r0 0x7c 02 04 ae :lfdx fD,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fD & RA_OR_ZERO & B & XOP_1_10=599 & BIT_0=0 { fD = *:8(RA_OR_ZERO+B); } #lfs fr0,8(r2) 0xc0 02 00 08 :lfs fD,dPlusRaOrZeroAddress is $(NOTVLE) & OP=48 & fD & dPlusRaOrZeroAddress { fD = float2float(*:4(dPlusRaOrZeroAddress)); } #lfsu fr0,8(r2) 0xc0 02 00 08 :lfsu fD,dPlusRaAddress is $(NOTVLE) & OP=49 & fD & dPlusRaAddress & A { ea:$(REGISTER_SIZE) = dPlusRaAddress; fD = float2float(*:4(ea)); A = ea; } #lfsux fr0,r2,r0 0x7c 02 04 6e :lfsux fD,A,B is $(NOTVLE) & OP=31 & fD & A & B & XOP_1_10=567 & BIT_0=0 { ea:$(REGISTER_SIZE) = A+B; fD = float2float(*:4(ea)); A = ea; } #lfsx fr0,r2,r0 0x7c 02 04 2e :lfsx fD,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fD & RA_OR_ZERO & B & XOP_1_10=535 & BIT_0=0 { fD = float2float(*:4(RA_OR_ZERO+B)); } #lha r0,4(0) 0xa8 00 00 04 #lha r0,4(r2) 0xa8 02 00 04 :lha D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=42 & D & dPlusRaOrZeroAddress { D = sext(*:2(dPlusRaOrZeroAddress)); } #lhau r0,8(r2) 0xac 02 00 08 :lhau D,dPlusRaAddress is $(NOTVLE) & OP=43 & D & dPlusRaAddress & A { ea:$(REGISTER_SIZE) = dPlusRaAddress; D = sext(*:2(ea)); A = ea; } #lhaux r0,r2,r0 0x7c 02 02 ee :lhaux D,A,B is OP=31 & D & A & B & XOP_1_10=375 & BIT_0=0 { ea:$(REGISTER_SIZE) = A+B; D = sext(*:2(ea)); A = ea; } #lhax r0,r2,r0 0x7c 02 02 ae :lhax D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=343 & BIT_0=0 { D = sext(*:2(RA_OR_ZERO+B)); } #lhbrx r0,r2,r0 0x7c 02 06 2c :lhbrx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=790 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; tmp:$(REGISTER_SIZE) = zext(*:1(ea+1)) << 8; D = tmp | zext(*:1(ea)); } #lhz r0,4(0) 0xa0 00 00 04 #lhz r0,4(r2) 0xa0 02 00 04 :lhz D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=40 & D & dPlusRaOrZeroAddress { D = zext(*:2(dPlusRaOrZeroAddress)); } #lhzu r0,4(r2) 0xa4 02 00 04 :lhzu D,dPlusRaAddress is $(NOTVLE) & OP=41 & D & dPlusRaAddress & A { ea:$(REGISTER_SIZE) = dPlusRaAddress; D = zext(*:2(ea)); A = ea; } #lhzux r0,r2,r0 0x7c 02 02 6e :lhzux D,A,B is OP=31 & D & A & B & XOP_1_10=311 & BIT_0=0 { ea:$(REGISTER_SIZE) = A+B; D = zext(*:2(ea)); A = ea; } #lhzx r0,r2,r0 0x7c 02 02 2e :lhzx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=279 & BIT_0=0 { D = zext(*:2(RA_OR_ZERO+B)); } # big stuffs @include "lmwInstructions.sinc" @include "lswInstructions.sinc" #lswx r0,0,r0 0x7c 00 3c 2a #lswx r0,r2,40 0x7c 02 3c 2a define pcodeop lswxOp; :lswx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & NB & BITS_21_25 & B & XOP_1_10=533 & BIT_0=0 { D = lswxOp(D,RA_OR_ZERO,B); } @ifdef BIT_64 #lwa r0,8(r2) 0xe8 02 00 0a :lwa D,dsPlusRaOrZeroAddress is $(NOTVLE) & OP=58 & D & dsPlusRaOrZeroAddress & BITS_0_1=2 { D = sext(*:4(dsPlusRaOrZeroAddress)); } @endif #lwarx r0,r0,r0 0x7c 00 00 28 :lwarx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=20 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; #RESERVE = 1; #RESERVE_ADDRSS:$(REGISTER_SIZE) = ea; @ifdef BIT_64 D = zext(*:4(ea)); @else D = *:4(ea); @endif } @ifdef BIT_64 #lwaux r0,r2,r0 0x7c 02 02 ea :lwaux D,A,B is OP=31 & D & A & B & XOP_1_10=373 & BIT_0=0 { ea:$(REGISTER_SIZE) = A+B; D = sext(*:4(ea)); A = ea; } #lwax r0,r2,r0 0x7c 02 02 aa :lwax D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=341 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; D = sext(*:4(ea)); } @endif #lwbrx r0,r2,r0 0x7c 02 04 2c :lwbrx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=534 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; tmp1:$(REGISTER_SIZE) = zext(*:1(ea+3)) << 24; tmp2:$(REGISTER_SIZE) = zext(*:1(ea+2)) << 16; tmp3:$(REGISTER_SIZE) = zext(*:1(ea+1)) << 8; D = tmp1 | tmp2 | tmp3 | zext(*:1(ea)); } #lwz r0,4(0) 0x80 00 00 04 #lwz r0,4(r2) 0x80 02 00 04 :lwz D,dPlusRaOrZeroAddress is $(NOTVLE) & OP=32 & D & dPlusRaOrZeroAddress { @ifdef BIT_64 D = zext(*:4(dPlusRaOrZeroAddress)); @else D = *:4(dPlusRaOrZeroAddress); @endif } #lwzu r0,4(r2) 0x84 02 00 04 :lwzu D,dPlusRaAddress is $(NOTVLE) & OP=33 & D & dPlusRaAddress & A { ea:$(REGISTER_SIZE) = dPlusRaAddress; @ifdef BIT_64 D = zext(*:4(ea)); @else D = *:4(ea); @endif A = ea; } #lwzux r0,r2,r0 0x7c 02 00 6e :lwzux D,A,B is OP=31 & D & A & B & XOP_1_10=55 & BIT_0=0 { ea:$(REGISTER_SIZE) = A+B; @ifdef BIT_64 D = zext(*:4(ea)); @else D = *:4(ea); @endif A = ea; } #lwzx r0,r2,r0 0x7c 02 00 2e :lwzx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=23 & BIT_0=0 { @ifdef BIT_64 D = zext(*:4(RA_OR_ZERO+B)); @else D = *:4(RA_OR_ZERO+B); @endif } @ifndef NoLegacyIntegerMultiplyAccumulate @include "mulhwInstructions.sinc" @endif #mcrf cr0,cr0 0x4c 00 00 00 :mcrf CRFD,CRFS is $(NOTVLE) & OP=19 & CRFD & BITS_21_22=0 & CRFS & BITS_0_17=0 { CRFD = CRFS; } #mcrfs cr0,cr0 0xfc 00 00 80 :mcrfs CRFD,CRFS is $(NOTVLE) & OP=63 & CRFD & FPSCR_CRFS & BITS_21_22=0 & CRFS & BITS_11_17=0 & XOP_1_10=64 & BIT_0=0 { CRFD = FPSCR_CRFS; } #mcrxr cr0 0x7c 00 04 00 :mcrxr CRFD is OP=31 & CRFD & BITS_11_22=0 & XOP_1_10=512 & BIT_0=0 { CRFD = (xer_so & 1) << 3 | (xer_ov & 1) << 2 | (xer_ca & 1) << 1; xer_so = 0; xer_ov = 0; xer_ca = 0; } #mfcr r0 0x7c 00 00 26 :mfcr D is OP=31 & D & BITS_11_20=0 & XOP_1_10=19 & BIT_0=0 { tmp:4 = zext(cr0 & 0xf) << 28 | zext(cr1 & 0xf) << 24 | zext(cr2 & 0xf) << 20 | zext(cr3 & 0xf) << 16 | zext(cr4 & 0xf) << 12 | zext(cr5 & 0xf) << 8 | zext(cr6 & 0xf) << 4 | zext(cr7 & 0xf); @ifdef BIT_64 D = zext(tmp); @else D = tmp; @endif } #mfocrf D,cr1 0x7c 31 00 26 :mfocrf D,CRM_CR is OP=31 & D & BIT_20=1 & CRM_CR & BIT_11 & XOP_1_10=19 & BIT_0=0 { @ifdef BIT_64 D = zext(CRM_CR); @else D = CRM_CR; @endif } #mffs fD 0xfc 00 04 8e :mffs fD is $(NOTVLE) & OP=63 & fD & BITS_11_20=0 & XOP_1_10=583 & Rc=0 { tmp:4 = 0; packFPSCR(tmp); fD = zext(tmp); } #mffs. fD 0xfc 00 04 8f :mffs. fD is $(NOTVLE) & OP=63 & fD & BITS_11_20=0 & XOP_1_10=583 & Rc=1 { tmp:4 = 0; packFPSCR(tmp); fD = zext(tmp); cr1flags(); } ### is this pcode correct on 64-bit bridge? #mfsr r0,r0 0x7c 00 04 a6 :mfsr D,B is $(NOTVLE) & OP=31 & D & SR & BIT_20=0 & B & BITS_11_15=0 & XOP_1_10=595 & BIT_0=0 { @ifdef BIT_64 D = zext(SR); @else D = SR; @endif } #mfsrin r0,r0 0x7c 00 05 26 :mfsrin D,B is $(NOTVLE) & OP=31 & D & BITS_16_20=0 & B & XOP_1_10=659 & BIT_0=0 { @ifdef BIT_64 tmp:4 = (B:4 >> 28); @else tmp:$(REGISTER_SIZE) = (B >> 28); @endif D = *[register]:4 ($(SEG_REGISTER_BASE)+tmp); } #mtcrf 10,r0 0x7c 01 01 20 :mtcrf CRM,S is OP=31 & S & BIT_20=0 & CRM & CRM0 & CRM1 & CRM2 & CRM3 & CRM4 & CRM5 & CRM6 & CRM7 & BIT_11=0 & XOP_1_10=144 & BIT_0=0 { tmp:$(REGISTER_SIZE) = (S >> 28) & 0xf; cr0 = (cr0 * (CRM0:1 == 0)) | (tmp:1 * (CRM0:1 == 1)); tmp = (S >> 24) & 0xf; cr1 = (cr1 * (CRM1:1 == 0)) | (tmp:1 * (CRM1:1 == 1)); tmp = (S >> 20) & 0xf; cr2 = (cr2 * (CRM2:1 == 0)) | (tmp:1 * (CRM2:1 == 1)); tmp = (S >> 16) & 0xf; cr3 = (cr3 * (CRM3:1 == 0)) | (tmp:1 * (CRM3:1 == 1)); tmp = (S >> 12) & 0xf; cr4 = (cr4 * (CRM4:1 == 0)) | (tmp:1 * (CRM4:1 == 1)); tmp = (S >> 8) & 0xf; cr5 = (cr5 * (CRM5:1 == 0)) | (tmp:1 * (CRM5:1 == 1)); tmp = (S >> 4) & 0xf; cr6 = (cr6 * (CRM6:1 == 0)) | (tmp:1 * (CRM6:1 == 1)); tmp = S & 0xf; cr7 = (cr7 * (CRM7:1 == 0)) | (tmp:1 * (CRM7:1 == 1)); } #mtfsb0 fp_ux 0xfc 80 00 8c :mtfsb0 CRBD is $(NOTVLE) & OP=63 & CRBD & BITS_11_20=0 & XOP_1_10=70 & Rc=0 { CRBD = 0; } #mtfsb0. fp_ux 0xfc 80 00 8d :mtfsb0. CRBD is $(NOTVLE) & OP=63 & CRBD & BITS_11_20=0 & XOP_1_10=70 & Rc=1 { CRBD = 0; cr1flags(); } #mtfsb1 fp_ux 0xfc 80 00 4c :mtfsb1 CRBD is $(NOTVLE) & OP=63 & CRBD & BITS_11_20=0 & XOP_1_10=38 & Rc=0 { CRBD = 1; } #mtfsb1. fp_ux 0xfc 80 00 4d :mtfsb1. CRBD is $(NOTVLE) & OP=63 & CRBD & BITS_11_20=0 & XOP_1_10=38 & Rc=1 { CRBD = 1; } #mtfsf 10,fr0 0xfc 00 05 8e :mtfsf FM,fB is $(NOTVLE) & OP=63 & BIT_25=0 & FM & FM0 & FM1 & FM2 & FM3 & FM4 & FM5 & FM6 & FM7 & BIT_16=0 & fB & XOP_1_10=711 & Rc=0 { tmp:4 = 0; packFPSCR(tmp); mask0:4 = zext((FM0:1 == 1)* 0xf) << 28; mask1:4 = zext((FM1:1 == 1)* 0xf) << 24; mask2:4 = zext((FM2:1 == 1)* 0xf) << 20; mask3:4 = zext((FM3:1 == 1)* 0xf) << 16; mask4:4 = zext((FM4:1 == 1)* 0xf) << 12; mask5:4 = zext((FM5:1 == 1)* 0xf) << 8; mask6:4 = zext((FM6:1 == 1)* 0xf) << 4; mask7:4 = zext((FM7:1 == 1)* 0xf); mask:4 = mask0 | mask1 | mask2 | mask3 | mask4 | mask5 | mask6 | mask7; tmp1:4 = fB:4; tmp2:4 = (tmp & ~mask) | (tmp1 & mask); unpackFPSCR(tmp2); } #mtfsf. 10,fr0 0xfc 00 05 8f :mtfsf. FM,fB is $(NOTVLE) & OP=63 & BIT_25=0 & FM & FM0 & FM1 & FM2 & FM3 & FM4 & FM5 & FM6 & FM7 & BIT_16=0 & fB & XOP_1_10=711 & Rc=1 { tmp:4 = 0; packFPSCR(tmp); mask0:4 = zext((FM0:1 == 1)* 0xf) << 28; mask1:4 = zext((FM1:1 == 1)* 0xf) << 24; mask2:4 = zext((FM2:1 == 1)* 0xf) << 20; mask3:4 = zext((FM3:1 == 1)* 0xf) << 16; mask4:4 = zext((FM4:1 == 1)* 0xf) << 12; mask5:4 = zext((FM5:1 == 1)* 0xf) << 8; mask6:4 = zext((FM6:1 == 1)* 0xf) << 4; mask7:4 = zext((FM7:1 == 1)* 0xf); mask:4 = mask0 | mask1 | mask2 | mask3 | mask4 | mask5 | mask6 | mask7; tmp1:4 = fB:4; tmp2:4 = (tmp & ~mask) | (tmp1 & mask); unpackFPSCR(tmp2); cr1flags(); } #mtfsfi 10,3 0xfc 00 01 0c :mtfsfi crfD,IMM is $(NOTVLE) & OP=63 & crfD & BITS_16_22=0 & IMM & BIT_11=0 & XOP_1_10=134 & Rc=0 { tmp:4 = 0; packFPSCR(tmp); shift:1 = 28-(crfD*4); mask:4 = 0xf << shift; tmp1:4 = IMM << shift; tmp2:4 = (tmp & ~mask) | tmp1; unpackFPSCR(tmp2); } #mtfsfi. 10,3 0xfc 00 01 0d :mtfsfi. crfD,IMM is $(NOTVLE) & OP=63 & crfD & BITS_16_22=0 & IMM & BIT_11=0 & XOP_1_10=134 & Rc=1 { tmp:4 = 0; packFPSCR(tmp); shift:1 = 28-(crfD*4); mask:4 = 0xf << shift; tmp1:4 = IMM << shift; tmp2:4 = (tmp & ~mask) | tmp1; unpackFPSCR(tmp2); cr1flags(); } # This instruction is not exclusive to 64 bit processors, per page 1405 of the PowerISA manual. # Prior to the Power ISA introduction, PowerPC architecture had 32-bit versions of the MSR in 32-bit implementations. # Since this instruction requires 64-bit processsors, it is currently restricted to 64 bit machines. # mtmsrd varies from processor to processor. This version is consistent with PowerISA v2.07B @ifdef BIT_64 #mtmsrd r0,0 0x7c 00 01 64 :mtmsrd S,0 is $(NOTVLE) & OP=31 & S & BITS_17_20=0 & MSR_L=0 & BITS_11_15=0 & XOP_1_10=178 & BIT_0=0 { local bit59:$(REGISTER_SIZE) = (S >> 4) & 1; #bit 59 local bit58:$(REGISTER_SIZE) = (S >> 5) & 1; #bit 58 local bit49:$(REGISTER_SIZE) = (S >> 14) & 1; #bit 49 local bit48:$(REGISTER_SIZE) = (S >> 15) & 1; #bit 48 local bits2931:$(REGISTER_SIZE) = zext(S[32,3]); #bits 29-31 local mbits2931:$(REGISTER_SIZE) = zext(MSR[32,3]); #bits 29-31 local cond = (mbits2931 != 0x2)|(bits2931 != 0); bits2931 = (zext(cond) * bits2931) + (zext(!cond) * mbits2931); local mask:$(REGISTER_SIZE) =0xeffffff8ffff6fce; tmp:8 = S & mask; #preserves (RS) 0:2 4:40 42:47 49:50 52:57 60:62 tmp = tmp | (bits2931) << 32; tmp = tmp | ((bit48 | bit49) << 15); # MSR 48 <- (RS) 48 | (RS) 49 tmp = tmp | ((bit58 | bit49) << 5); # MSR 58 <- (RS) 58 | (RS) 49 tmp = tmp | ((bit59 | bit49) << 4); # MSR 59 <- (RS) 59 | (RS) 49 MSR = (MSR & ~mask) | tmp; } #mtmsrd r0,1 0x7c 01 01 64 :mtmsrd S,1 is $(NOTVLE) & OP=31 & S & BITS_17_20=0 & MSR_L=1 & BITS_11_15=0 & XOP_1_10=178 & BIT_0=0 { mask:$(REGISTER_SIZE) = 0x8002; MSR = (MSR & ~mask) | (S & mask); } @endif CRM_val: crmval is CRM [crmval = CRM+0;] {export *[const]:1 crmval;} #mtocrf 10,r0 0x7c 21 01 20 :mtocrf CRM_val,S is OP=31 & S & BIT_20=1 & CRM_val & CRM0 & CRM1 & CRM2 & CRM3 & CRM4 & CRM5 & CRM6 & CRM7 & BIT_11=0 & XOP_1_10=144 & BIT_0=0 { tmp:$(REGISTER_SIZE) = (S >> 28) & 0xf; cr0 = (cr0 * (CRM_val != 128)) | (tmp:1 * (CRM_val == 128)); tmp = (S >> 24) & 0xf; cr1 = (cr1 * (CRM_val != 64)) | (tmp:1 * (CRM_val == 64)); tmp = (S >> 20) & 0xf; cr2 = (cr2 * (CRM_val != 32)) | (tmp:1 * (CRM_val == 32)); tmp = (S >> 16) & 0xf; cr3 = (cr3 * (CRM_val != 16)) | (tmp:1 * (CRM_val == 16)); tmp = (S >> 12) & 0xf; cr4 = (cr4 * (CRM_val != 8)) | (tmp:1 * (CRM_val == 8)); tmp = (S >> 8) & 0xf; cr5 = (cr5 * (CRM_val != 4)) | (tmp:1 * (CRM_val == 4)); tmp = (S >> 4) & 0xf; cr6 = (cr6 * (CRM_val != 2)) | (tmp:1 * (CRM_val == 2)); tmp = S & 0xf; cr7 = (cr7 * (CRM_val != 1)) | (tmp:1 * (CRM_val == 1)); } ### is this pcode correct on 64-bit bridge? #mtsr sr0,r0 0x7c 00 01 a4 :mtsr SR,S is $(NOTVLE) & OP=31 & S & SR & BIT_20=0 & B & BITS_11_15=0 & XOP_1_10=210 & BIT_0=0 { @ifdef BIT_64 SR = S:4; @else SR = S; @endif } #mtsrd sr0,r0 0x7c 00 0 a4 :mtsrd SR,S is $(NOTVLE) & OP=31 & S & BIT_20=0 & SR & BITS_11_15=0 & XOP_1_10=82 & BIT_0=0 { SR = S:4; } #mtsrdin r0,r0 0x7c 00 00 e4 :mtsrdin S,B is $(NOTVLE) & OP=31 & S & BITS_16_20=0 & B & XOP_1_10=114 & BIT_0=0 { local tmp = (B >> 28) & 0xf; *[register]:4 ($(SEG_REGISTER_BASE)+tmp:4) = S:4; } ### is this pcode correct on 64-bit bridge? #mtsrin r0,r0 0x7c 00 01 e4 :mtsrin S,B is $(NOTVLE) & OP=31 & S & BITS_16_20=0 & B & XOP_1_10=242 & BIT_0=0 { @ifdef BIT_64 tmp:4 = (B:4 >> 28); @else tmp:$(REGISTER_SIZE) = (B >> 28); @endif *[register]:4 ($(SEG_REGISTER_BASE)+tmp) = S; } @ifdef BIT_64 #mulhd r0,r0 0x7c 00 00 92 :mulhd D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=73 & Rc=0 { tmp:16 = sext(A) * sext(B); D = tmp(8); } #mulhd. r0,r0 0x7c 00 00 93 :mulhd. D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=73 & Rc=1 { tmp:16 = sext(A) * sext(B); D = tmp(8); cr0flags(D); } #mulhdu r0,r0 0x7c 00 00 12 :mulhdu D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=9 & Rc=0 { tmp:16 = zext(A) * zext(B); D = tmp(8); } #mulhdu. r0,r0 0x7c 00 00 13 :mulhdu. D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=9 & Rc=1 { tmp:16 = zext(A) * zext(B); D = tmp(8); cr0flags(D); } @endif #mulhw r0,r0,r0 0x7c 00 00 96 :mulhw D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=75 & Rc=0 { @ifdef BIT_64 tmp:8 = sext(A:4) * sext(B:4); tmp2:4 = tmp(4); D = zext(tmp2); @else tmp:8 = sext(A) * sext(B); D = tmp(4); @endif } #mulhw. r0,r0,r0 0x7c 00 00 97 :mulhw. D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=75 & Rc=1 { @ifdef BIT_64 tmp:8 = sext(A:4) * sext(B:4); tmp2:4 = tmp(4); D = zext(tmp2); @else tmp:8 = sext(A) * sext(B); D = tmp(4); @endif cr0flags(D); } #mulhwu r0,r0,r0 0x7c 00 00 16 :mulhwu D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=11 & Rc=0 { @ifdef BIT_64 tmp:8 = zext(A:4) * zext(B:4); tmp2:4 = tmp(4); D=zext(tmp2); @else tmp:8 = zext(A) * zext(B); D = tmp(4); @endif } #mulhwu. r0,r0,r0 0x7c 00 00 17 :mulhwu. D,A,B is OP=31 & D & A & B & BIT_10=0 & XOP_1_9=11 & Rc=1 { @ifdef BIT_64 tmp:8 = zext(A:4) * zext(B:4); tmp2:4 = tmp(4); D=zext(tmp2); @else tmp:8 = zext(A) * zext(B); D = tmp(4); @endif cr0flags(D); } @ifdef BIT_64 #mulld r0, r0, r0 0x7C 00 01 D2 :mulld D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=233 & Rc=0 { tmp:16 = sext(A) * sext(B); D = tmp:8; } #mulld. r0, r0, r0 0x7C 00 01 D3 :mulld. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=233 & Rc=1 { tmp:16 = sext(A) * sext(B); D = tmp:8; cr0flags(D); } #mulldo r0, r0, r0 0x7C 00 05 D2 :mulldo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=233 & Rc=0 { tmp:16 = sext(A) * sext(B); D = tmp:8; mulOverflow128(tmp); } #mulldo. r0, r0, r0 0x7C 00 05 D3 :mulldo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=233 & Rc=1 { tmp:16 = sext(A) * sext(B); D = tmp:8; mulOverflow128(tmp); cr0flags(D); } @endif #mulli r0,r0,r0 0x1C 00 00 00 :mulli D,A,SIMM is $(NOTVLE) & OP=7 & D & A & SIMM { D = A * SIMM; } #mullw r0,r0,r0 0x7C 00 01 D6 :mullw D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=235 & Rc=0 { @ifdef BIT_64 D = sext(A:4) * sext(B:4); @else D = A*B; @endif } #mullw. r0,r0,r0 0x7C 00 01 D7 :mullw. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=235 & Rc=1 { @ifdef BIT_64 D = sext(A:4) * sext(B:4); @else D = A*B; @endif cr0flags(D); } #mullwo r0,r0,r0 0x7C 00 05 D6 :mullwo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=235 & Rc=0 { @ifdef BIT_64 D = sext(A:4) * sext(B:4); mulOverflow64(D); @else tmp:8 = sext(A) * sext(B); mulOverflow64(tmp); D = tmp:4; @endif } #mullwo. r0,r0,r0 0x7C 00 05 D7 :mullwo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=235 & Rc=1 { @ifdef BIT_64 D = sext(A:4) * sext(B:4); mulOverflow64(D); @else tmp:8 = sext(A) * sext(B); mulOverflow64(tmp); D = tmp:4; @endif cr0flags(D); } #nand r0,r0,r0 0x7C 00 03 B8 :nand A,S,B is OP=31 & S & A & B & XOP_1_10=476 & Rc=0 { A = ~(S & B); } #nand. r0,r0,r0 0x7C 00 03 B9 :nand. A,S,B is OP=31 & S & A & B & XOP_1_10=476 & Rc=1 { A = ~(S & B); cr0flags( A ); } #neg r0,r0 0x7C 00 00 D0 :neg D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=104 & Rc=0 { D = -A; } #neg. r0,r0 0x7C 00 00 D1 :neg. D,A is OP=31 & D & A & BITS_11_15=0 & OE=0 & XOP_1_9=104 & Rc=1 { D = -A; cr0flags( D ); } #nego r0,r0 0x7C 00 04 D0 :nego D,A is $(NOTVLE) & OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=104 & Rc=0 { subOverflow(A,1); D = -A; } #nego. r0,r0 0x7C 00 04 D1 :nego. D,A is OP=31 & D & A & BITS_11_15=0 & OE=1 & XOP_1_9=104 & Rc=1 { subOverflow(A,1); D = -A; cr0flags( D ); } #nor r0,r0,r0 0x7C 00 00 F8 :nor A,S,B is OP=31 & A & S & B & XOP_1_10=124 & Rc=0 { A = ~(S | B); } #nor. r0,r0,r0 0x7C 00 00 F9 :nor. A,S,B is OP=31 & A & S & B & XOP_1_10=124 & Rc=1 { A = ~(S | B); cr0flags(A); } #or r0,r0,r0 0x7C 00 03 78 :or A,S,B is OP=31 & A & S & B & XOP_1_10=444 & Rc=0 { A = (S | B); } #or. r0,r0,r0 0x7C 00 03 79 :or. A,S,B is OP=31 & A & S & B & XOP_1_10=444 & Rc=1 { A = (S | B); cr0flags(A); } #orc r0,r0,r0 0x7C 00 03 38 :orc A,S,B is OP=31 & A & S & B & XOP_1_10=412 & Rc=0 { A = S | ~B; } #orc. r0,r0,r0 0x7C 00 03 39 :orc. A,S,B is OP=31 & A & S & B & XOP_1_10=412 & Rc=1 { A = S | ~B; cr0flags(A); } #ori r0,r0,r0 0x60 00 00 00 :ori A,S,UIMM is $(NOTVLE) & OP=24 & A & S & UIMM { A = S | UIMM; } #oris r0,r0,r0 0x64 00 00 00 :oris A,S,UIMM is $(NOTVLE) & OP=25 & A & S & UIMM { A = S | (UIMM << 16); } #rfid 0x4c 00 00 24 :rfid is $(NOTVLE) & OP=19 & BITS_11_25=0 & XOP_1_10=18 & BIT_0=0 { MSR = returnFromInterrupt(MSR, SRR1); local ra = SRR0; return[ra]; } @ifdef BIT_64 #rldcl r0,r0,r0,0 0x78 00 00 10 :rldcl A,S,B,MB is $(NOTVLE) & OP=30 & S & A & B & MB & XOP_1_4=8 & Rc=0 { shift:$(REGISTER_SIZE) = B & 0x3f; tmp:$(REGISTER_SIZE)=(S<>(64-shift)); A = tmp & (0xffffffffffffffff >> MB); } #rldcl. r0,r0,r0,0 0x78 00 00 11 :rldcl. A,S,B,MB is $(NOTVLE) & OP=30 & S & A & B & MB & XOP_1_4=8 & Rc=1 { shift:$(REGISTER_SIZE) = B & 0x3f; tmp:$(REGISTER_SIZE)=(S<>(64-shift)); A = tmp & (0xffffffffffffffff >> MB); cr0flags(A); } #rldcr r0,r0,r0,0 0x78 00 00 12 :rldcr A,S,B,MB is $(NOTVLE) & OP=30 & S & A & B & MB & XOP_1_4=9 & Rc=0 & rotmask_Z { shift:$(REGISTER_SIZE) = B & 0x3f; tmp:$(REGISTER_SIZE)=(S<>(64-shift)); A = tmp & rotmask_Z; } #rldcr. r0,r0,r0,0 0x78 00 00 13 :rldcr. A,S,B,MB is $(NOTVLE) & OP=30 & S & A & B & MB & XOP_1_4=9 & Rc=1 & rotmask_Z { shift:$(REGISTER_SIZE) = B & 0x3f; tmp:$(REGISTER_SIZE)=(S<>(64-shift)); A = tmp & rotmask_Z; cr0flags(A); } #rldic r0,r0,r0,0 0x78 00 00 08 :rldic A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=2 & Rc=0 & rotmask_SH { shift:4 = SH; tmp:$(REGISTER_SIZE)=(S<>(64-shift)); A = tmp & rotmask_SH; } #rldic. r0,r0,r0,0 0x78 00 00 09 :rldic. A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=2 & Rc=1 & rotmask_SH { shift:4 = SH; tmp:$(REGISTER_SIZE)=(S<>(64-shift)); A = tmp & rotmask_SH; cr0flags(A); } #rldicl r0,r0,r0,0 0x78 00 00 00 :rldicl A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=0 & Rc=0 { shift:4 = SH; tmp:$(REGISTER_SIZE)=(S<>(64-shift)); A = tmp & (0xffffffffffffffff >> MB); } #rldicl. r0,r0,r0,0 0x78 00 00 01 :rldicl. A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=0 & Rc=1 { shift:4 = SH; tmp:$(REGISTER_SIZE)=(S<>(64-shift)); A = tmp & (0xffffffffffffffff >> MB); cr0flags(A); } #rldicr r0,r0,r0,0 0x78 00 00 04 :rldicr A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=1 & Rc=0 { shift:4 = SH; tmp:$(REGISTER_SIZE)=(S<>(64-shift)); A = tmp & (0xffffffffffffffff << (63-MB)); } #rldicr. r0,r0,r0,0 0x78 00 00 05 :rldicr. A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=1 & Rc=1 { shift:4 = SH; tmp:$(REGISTER_SIZE)=(S<>(64-shift)); A = tmp & (0xffffffffffffffff << (63-MB)); cr0flags(A); } #rldimi r0,r0,r0,0 0x78 00 00 0c :rldimi A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=3 & Rc=0 & rotmask_SH { shift:4 = SH; tmp:$(REGISTER_SIZE)=(S<>(64-shift)); A = (tmp & rotmask_SH) | (A & ~rotmask_SH); } #rldimi. r0,r0,r0,0 0x78 00 00 0d :rldimi. A,S,SH,MB is $(NOTVLE) & OP=30 & S & A & B & SH & MB & XOP_2_4=3 & Rc=1 & rotmask_SH { shift:4 = SH; tmp:$(REGISTER_SIZE)=(S<>(64-shift)); A = (tmp & rotmask_SH) | (A & ~rotmask_SH); cr0flags(A); } @endif #rlwimi r0,r0,0,0,0 0x50 00 00 00 :rlwimi A,S,SHL,MBL,ME is $(NOTVLE) & OP=20 & S & A & SHL & MBL & ME & Rc=0 & rotmask { shift:1 = SHL; @ifdef BIT_64 tmp:$(REGISTER_SIZE) = (S << 32) | (S & 0xffffffff); tmp2:$(REGISTER_SIZE) = (tmp<>(64-shift)); A = (tmp2 & rotmask) | (A & ~(rotmask)); @else tmp = (S<>(32-shift)); A = (tmp & rotmask) | (A & ~rotmask); @endif } #rlwimi. r0,r0,0,0,0 0x50 00 00 01 :rlwimi. A,S,SHL,MBL,ME is $(NOTVLE) & OP=20 & S & A & SHL & MBL & ME & Rc=1 & rotmask { shift:1 = SHL; @ifdef BIT_64 tmp:$(REGISTER_SIZE) = (S << 32) | (S & 0xffffffff); tmp2:$(REGISTER_SIZE) = (tmp<>(64-shift)); A = (tmp2 & rotmask) | (A & ~(rotmask)); @else tmp = (S<>(32-shift)); A = (tmp & rotmask) | (A & ~rotmask); @endif cr0flags(A); } #rlwinm r0,r0,0,0,0 0x54 00 00 00 :rlwinm A,S,SHL,MBL,ME is $(NOTVLE) & OP=21 & S & A & SHL & MBL & ME & Rc=0 & rotmask { shift:1 = SHL; @ifdef BIT_64 tmp:$(REGISTER_SIZE) = (S << 32) | (S & 0xffffffff); tmp2:$(REGISTER_SIZE) = (tmp<>(64-shift)); A = tmp2 & rotmask; @else tmp = (S<>(32-shift)); A = (tmp & rotmask); @endif } #rlwinm. r0,r0,0,0,0 0x54 00 00 01 :rlwinm. A,S,SHL,MBL,ME is $(NOTVLE) & OP=21 & S & A & SHL & MBL & ME & Rc=1 & rotmask { shift:1 = SHL; @ifdef BIT_64 tmp:$(REGISTER_SIZE) = (S << 32) | (S & 0xffffffff); tmp2:$(REGISTER_SIZE) = (tmp<>(64-shift)); A = tmp2 & rotmask; @else tmp = (S<>(32-shift)); A = (tmp & rotmask); @endif cr0flags(A); } #rlwnm r0,r0,0,0,0 0x5C 00 00 00 :rlwnm A,S,B,MBL,ME is $(NOTVLE) & OP=23 & S & A & B & MBL & ME & Rc=0 & rotmask { shift:$(REGISTER_SIZE) = B & 0x1f; @ifdef BIT_64 tmp:$(REGISTER_SIZE) = (S << 32) | (S & 0xffffffff); tmp2:$(REGISTER_SIZE) = (tmp<>(64-shift)); A = tmp2 & rotmask; @else tmp = (S<>(32-shift)); A = (tmp & rotmask); @endif } #rlwnm. r0,r0,0,0,0 0x5C 00 00 01 :rlwnm. A,S,B,MBL,ME is $(NOTVLE) & OP=23 & S & A & B & MBL & ME & Rc=1 & rotmask { shift:$(REGISTER_SIZE) = B & 0x1f; @ifdef BIT_64 tmp:$(REGISTER_SIZE) = (S << 32) | (S & 0xffffffff); tmp2:$(REGISTER_SIZE) = (tmp<>(64-shift)); A = tmp2 & rotmask; @else tmp = (S<>(32-shift)); A = (tmp & rotmask); @endif cr0flags(A); } #sc 0x44 00 00 02 :sc LEV is $(NOTVLE) & OP=17 & BITS_12_25=0 & LEV & BITS_2_4=0 & BIT_1=1 & BIT_0=0 { syscall(); } #slbia 0x7C 00 03 E4 :slbia is $(NOTVLE) & OP=31 & BITS_11_25=0 & XOP_1_10=498 & BIT_0=0 { slbInvalidateAll(); } #slbie r0 0x7C 00 03 64 :slbie B is $(NOTVLE) & OP=31 & BITS_16_20=0 & B & XOP_1_10=434 & BIT_0=0 { slbInvalidateEntry(); } #slbmfee r0,r0 0x7C 00 07 26 :slbmfee D,B is $(NOTVLE) & OP=31 & D & BITS_16_20=0 & B & XOP_1_10=915 & BIT_0=0 { slbMoveFromEntryESID(); } #slbmfev r0,r0 0x7C 00 06 A6 :slbmfev D,B is $(NOTVLE) & OP=31 & D & BITS_16_20=0 & B & XOP_1_10=851 & BIT_0=0 { slbMoveFromEntryVSID(); } #slbmte r0,r0 0x7C 00 03 24 :slbmte S,B is $(NOTVLE) & OP=31 & S & BITS_16_20=0 & B & XOP_1_10=402 & BIT_0=0 { slbMoveToEntry(); } @ifdef BIT_64 #sld r0,r0,r0 0x7C 00 00 36 :sld A,S,B is OP=31 & S & A & B & XOP_1_10=27 & Rc=0 { A = S << (B & 0x7f); } #sld. 0x7C 00 00 37 :sld. A,S,B is OP=31 & S & A & B & XOP_1_10=27 & Rc=1 { A = S << (B & 0x7f); cr0flags(A); } @endif #slw r0,r0,r0 0x7C 00 00 30 :slw A,S,B is OP=31 & S & A & B & XOP_1_10=24 & Rc=0 { @ifdef BIT_64 shift:4 = B:4 & 0x3f; tmp:4 = S:4 << shift; A = zext(tmp); @else shift = B & 0x3f; A = S << shift; @endif } #slw. r0,r0,r0 0x7C 00 00 31 :slw. A,S,B is OP=31 & S & A & B & XOP_1_10=24 & Rc=1 { @ifdef BIT_64 shift:4 = B:4 & 0x3f; tmp:4 = S:4 << shift; A = zext(tmp); @else shift = B & 0x3f; A = S << shift; @endif cr0flags(A); } @ifdef BIT_64 #srad r0,r0,r0 0x7C 00 06 34 :srad A,S,B is OP=31 & A & S & B & XOP_1_10=794 & Rc=0 { tmp:$(REGISTER_SIZE) = B & 0x7f; shiftCarry(S,tmp); A = S s>> tmp; } #srad. r0,r0,r0 0x7C 00 06 35 :srad. A,S,B is OP=31 & A & S & B & XOP_1_10=794 & Rc=1 { tmp:$(REGISTER_SIZE) = B & 0x7f; shiftCarry(S,tmp); A = S s>> tmp; cr0flags(A); } #sradi r0,r0,r0 0x7C 00 06 74 :sradi A,S,SH is OP=31 & A & S & SH & XOP_2_10=413 & Rc=0 { shiftCarry(S,SH); A = S s>> SH; } #sradi. r0,r0,r0 0x7C 00 06 75 :sradi. A,S,SH is OP=31 & A & S & SH & XOP_2_10=413 & Rc=1 { shiftCarry(S,SH); A = S s>> SH; } @endif #sraw r0,r0,r0 0x7C 00 06 30 :sraw A,S,B is OP=31 & A & S & B & XOP_1_10=792 & Rc=0 { @ifdef BIT_64 shift:4 = B:4 & 0x3f; shiftCarry(S:4,shift); tmp2:4 = S:4 s>> shift; A = sext(tmp2); @else shift = B & 0x3f; shiftCarry(S,shift); A = S s>> shift; @endif } #sraw. r0,r0,r0 0x7C 00 06 31 :sraw. A,S,B is OP=31 & A & S & B & XOP_1_10=792 & Rc=1 { @ifdef BIT_64 shift:4 = B:4 & 0x3f; shiftCarry(S:4,shift); tmp2:4 = S:4 s>> shift; A = sext(tmp2); @else shift = B & 0x3f; shiftCarry(S,shift); A = S s>> shift; @endif cr0flags(A); } #srawi r0,r0,r0 0x7C 00 06 70 :srawi A,S,SHL is OP=31 & A & S & SHL & XOP_1_10=824 & Rc=0 { @ifdef BIT_64 shift:4 = SHL; shiftCarry(S:4,shift); tmp2:4 = S:4 s>> shift; A = sext(tmp2); @else shiftCarry(S,SHL); A = S s>> SHL; @endif } #srawi. r0,r0,r0 0x7C 00 06 71 :srawi. A,S,SHL is OP=31 & A & S & SHL & XOP_1_10=824 & Rc=1 { @ifdef BIT_64 shift:4 = SHL; shiftCarry(S:4,shift); tmp2:4 = S:4 s>> shift; A = sext(tmp2); @else shiftCarry(S,SHL); A = S s>> SHL; @endif cr0flags(A); } @ifdef BIT_64 #srd r0,r0,r0 0x7C 00 04 36 :srd A,S,B is OP=31 & S & A & B & XOP_1_10=539 & Rc=0 { A = S >> (B & 0x7f); } #srd. 0x7C 00 04 37 :srd. A,S,B is OP=31 & S & A & B & XOP_1_10=539 & Rc=1 { A = S >> (B & 0x7f); cr0flags(A); } @endif #srw r0,r0,r0 0x7C 00 04 30 :srw A,S,B is OP=31 & S & A & B & XOP_1_10=536 & Rc=0 { @ifdef BIT_64 shift:4 = B:4 & 0x3f; tmp:4 = S:4 >> shift; A = zext(tmp); @else shift = B & 0x3f; A = S >> shift; @endif } #srw. r0,r0,r0 0x7C 00 04 31 :srw. A,S,B is OP=31 & S & A & B & XOP_1_10=536 & Rc=1 { @ifdef BIT_64 shift:4 = B:4 & 0x3f; tmp:4 = S:4 >> shift; A = zext(tmp); @else shift = B & 0x3f; A = S >> shift; @endif cr0flags(A); } #stb r0,3(0) 0x98 00 00 00 #stb r0,3(r2) 0x98 02 00 00 :stb S,dPlusRaOrZeroAddress is $(NOTVLE) & OP=38 & S & dPlusRaOrZeroAddress { *:1(dPlusRaOrZeroAddress) = S:1; } #stbu r0,3(0) 0x9c 00 00 00 #stbu r0,3(r2) 0x9c 02 00 00 :stbu S,dPlusRaAddress is $(NOTVLE) & OP=39 & S & dPlusRaAddress & A { *:1(dPlusRaAddress) = S:1; A = dPlusRaAddress; } #stbux r0,r2,r0 0x7c 00 01 ee ### WARNING the B in this definition is different from manual - I think the manual is wrong :stbux S,A,B is OP=31 & S & A & B & XOP_1_10=247 & BIT_0=0 { tmp:$(REGISTER_SIZE) = A+B; # S may be same register as A *tmp = S:1; # So do store before updating A A = tmp; } #stbx r0,r2,r0 0x7c 00 01 ae ### WARNING the B in this definition is different from manual - I think the manual is wrong :stbx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=215 & BIT_0=0 { *(RA_OR_ZERO+B) = S:1; } @ifdef BIT_64 #std r0,8(0) 0xf8 00 00 08 #std r0,8(r2) 0xf8 02 00 08 :std S,dsPlusRaOrZeroAddress is $(NOTVLE) & OP=62 & S & dsPlusRaOrZeroAddress & BITS_0_1=0 { *:8(dsPlusRaOrZeroAddress) = S; } #Special case when saving r2 to stack prior to function call (for inline call stub case) #std r2,0x28(r1) :std r2,dsPlusRaOrZeroAddress is $(NOTVLE) & OP=62 & S=2 & r2 & A=1 & SIMM_DS=0xa & dsPlusRaOrZeroAddress & BITS_0_1=0 { r2Save = r2; *:8(dsPlusRaOrZeroAddress) = r2; } #stdcx. r0,8(0) 0x7c 00 01 AD :stdcx. S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=214 & BIT_0=1 { EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; if (RESERVE == 0) goto inst_next; *[ram]:8 EA = storeDoubleWordConditionalIndexed(S,RA_OR_ZERO,B); # set when a stwcx. or stdcx. successfully completes cr0flags(0:$(REGISTER_SIZE)); } #stdu r0,8(0) 0xf8 00 00 01 #stdu r0,8(r2) 0xf8 02 00 01 :stdu S,dsPlusRaAddress is $(NOTVLE) & OP=62 & S & A & dsPlusRaAddress & BITS_0_1=1 { *:8(dsPlusRaAddress) = S; A = dsPlusRaAddress; } #stdux r0,r2,r0 0x7c 00 01 6a :stdux S,A,B is OP=31 & S & A & B & XOP_1_10=181 & BIT_0=0 { local ea:$(REGISTER_SIZE) = A+B; *:8(ea) = S; A = ea; } #stdx r0,r2,r0 0x7c 00 01 2a :stdx S,RA_OR_ZERO,B is OP=31 & S & B & RA_OR_ZERO & XOP_1_10=149 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; *:8(ea) = S; } @endif #stfd fr0,8(0) 0xD8 00 00 08 #stfd fr0,8(r2) 0xD8 02 00 08 :stfd fS,dPlusRaOrZeroAddress is $(NOTVLE) & OP=54 & fS & dPlusRaOrZeroAddress { *:8(dPlusRaOrZeroAddress) = fS; } #stfdu fr0,8(0) 0xDC 00 00 08 #stfdu fr0,8(r2) 0xDC 02 00 08 :stfdu fS,dPlusRaAddress is $(NOTVLE) & OP=55 & fS & dPlusRaAddress & A { ea:$(REGISTER_SIZE) = dPlusRaAddress; *:8(ea) = fS; A = ea; } #stfdux fr0,r2,r0 0x7C 00 05 EE :stfdux fS,A,B is $(NOTVLE) & OP=31 & fS & A & B & XOP_1_10=759 & BIT_0=0 { ea:$(REGISTER_SIZE) = A+B; *:8(ea) = fS; A = ea; } #stfdx fr0,r0,r0 0x7C 00 05 AE :stfdx fS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fS & B & RA_OR_ZERO & XOP_1_10=727 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; *:8(ea) = fS; } #stfiwx fr0,r0,r0 0x7C 00 07 AE :stfiwx fS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fS & B & RA_OR_ZERO & XOP_1_10=983 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; *:4(ea) = fS:4; } #stfs fr0,8(0) 0xD0 00 00 08 #stfs fr0,8(r2) 0xD0 02 00 08 :stfs fS,dPlusRaOrZeroAddress is $(NOTVLE) & OP=52 & fS & dPlusRaOrZeroAddress { tmp:4 = float2float(fS); *:4(dPlusRaOrZeroAddress) = tmp; } #stfsu fr0,8(0) 0xD4 00 00 08 #stfsu fr0,8(r2) 0xD4 02 00 08 :stfsu fS,dPlusRaAddress is $(NOTVLE) & OP=53 & fS & dPlusRaAddress & A { tmp:4 = float2float(fS); *:4(dPlusRaAddress) = tmp; A = dPlusRaAddress; } #stfsux fr0,r0,r0 0x7C 00 05 6E :stfsux fS,A,B is $(NOTVLE) & OP=31 & fS & B & A & XOP_1_10=695 & BIT_0=0 { ea:$(REGISTER_SIZE) = A + B; tmp:4 = float2float(fS); *:4(ea) = tmp; A = ea; } #stfsx fr0,r0,r0 0x7C 00 05 2E :stfsx fS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fS & B & RA_OR_ZERO & XOP_1_10=663 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; tmp:4 = float2float(fS); *:4(ea) = tmp; } #sth r0,r0 0xB0 00 00 00 :sth S,dPlusRaOrZeroAddress is $(NOTVLE) & OP=44 & S & dPlusRaOrZeroAddress { *:2(dPlusRaOrZeroAddress) = S:2; } #sthbrx r0,r0,r0 0x7C 00 07 2C :sthbrx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=918 & BIT_0=0 { tmp:2 = zext(S:1) <<8; tmp2:2 = S:2 >>8; ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; *:2(ea) = tmp2 | tmp; } #sthu r0,r0 0xB4 00 00 00 :sthu S,dPlusRaAddress is $(NOTVLE) & OP=45 & S & A & dPlusRaAddress { *:2(dPlusRaAddress) = S:2; A = dPlusRaAddress; } #sthux r0,r0,r0 0x7C 00 03 6E :sthux S,A,B is OP=31 & S & A & B & XOP_1_10=439 & BIT_0=0 { ea:$(REGISTER_SIZE) = A + B; *:2(ea) = S:2; A = ea; } #sthx r0,r0,r0 0x7C 00 03 2E :sthx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=407 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; *:2(ea) = S:2; } #### #stm instruction @include "stmwInstructions.sinc" @include "stswiInstructions.sinc" #stswi r0,r0,0 0x7c 00 05 aa #:stswi S,A,NB is $(NOTVLE) & OP=31 & S & A & NB & XOP_1_10=725 & BIT_0=0 #{ # tmp:1 = NB; # storeString(S,A,tmp); #} #stswx r0,r0,0 0x7c 00 05 2a define pcodeop stswxOp; :stswx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=661 & BIT_0=0 { EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:1 EA = stswxOp(S,RA_OR_ZERO,B); } #stw r0,r0,0 0x90 00 00 00 :stw S,dPlusRaOrZeroAddress is $(NOTVLE) & OP=36 & S & dPlusRaOrZeroAddress { @ifdef BIT_64 *:4(dPlusRaOrZeroAddress) = S:4; @else *:4(dPlusRaOrZeroAddress) = S; @endif } #stwbrx r0,r0,0 0x7c 00 05 2c :stwbrx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=662 & BIT_0=0 { @ifdef BIT_64 value:4 = S:4; @else value:$(REGISTER_SIZE) = S; @endif ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; tmp1:4 = value << 24; tmp2:4 = (value << 8) & 0xff0000; tmp3:4 = (value >> 8) & 0x00ff00; tmp4:4 = value >> 24; *:4(ea) = tmp1 | tmp2 | tmp3 | tmp4; } #stwcx. r0,8(0) 0x7c 00 01 2D :stwcx. S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=150 & BIT_0=1 { EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; if (RESERVE == 0) goto inst_next; *[ram]:4 EA = storeWordConditionalIndexed(S,RA_OR_ZERO,B); # set when a stwcx. or stdcx. successfully completes cr0flags(0:$(REGISTER_SIZE)); } #stwu r0,r0 0x94 00 00 00 :stwu S,dPlusRaAddress is $(NOTVLE) & OP=37 & S & A & dPlusRaAddress { @ifdef BIT_64 *:4(dPlusRaAddress) = S:4; @else *:4(dPlusRaAddress) = S; @endif A = dPlusRaAddress; } #stwux r0,r0,r0 0x7C 00 01 6E :stwux S,A,B is OP=31 & S & A & B & XOP_1_10=183 & BIT_0=0 { ea:$(REGISTER_SIZE) = A + B; @ifdef BIT_64 *:4(ea) = S:4; @else *:4(ea) = S; @endif A = ea; } #stwx r0,r0,r0 0x7C 00 01 2E :stwx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=151 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; @ifdef BIT_64 *:4(ea) = S:4; @else *:4(ea) = S; @endif } #subf r0,r0,r0 0x7c 00 00 50 :subf D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=40 & Rc=0 { D = B - A; } #subf. r0,r0,r0 0x7c 00 00 51 :subf. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=40 & Rc=1 { D = B - A; cr0flags(D); } #subfo r1,r2,r3 0x7c 00 04 50 :subfo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=40 & Rc=0 { subOverflow(B,A); D = B - A; } #subfo. r1,r2,r3 0x7c 00 04 51 :subfo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=40 & Rc=1 { subOverflow(B,A); D = B - A; cr0flags(D); } #subfc r0,r0,r0 0x7c 00 00 10 :subfc D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=8 & Rc=0 { xer_ca = (A <= B); D = B - A; } #subfc. r0,r0,r0 0x7c 00 00 11 :subfc. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=8 & Rc=1 { xer_ca = (A <= B); D = B - A; cr0flags(D); } #subfco r0,r0,r0 0x7c 00 04 10 :subfco D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=8 & Rc=0 { xer_ca = (A <= B); subOverflow(B,A); D = B - A; } #subfco. r0,r0,r0 0x7c 00 04 11 :subfco. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=8 & Rc=1 { xer_ca = (A <= B); subOverflow( B, A ); D = B - A; cr0flags(D); } #subfe r0,r0,r0 0x7c 00 01 10 :subfe D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=136 & Rc=0 { tmp:$(REGISTER_SIZE) = A + zext(!xer_ca); subExtendedCarry(B,A); D = B - tmp; } #subfe. r0,r0,r0 0x7c 00 01 11 :subfe. D,A,B is OP=31 & D & A & B & OE=0 & XOP_1_9=136 & Rc=1 { tmp:$(REGISTER_SIZE) = A + zext(!xer_ca); subExtendedCarry(B,A); D = B - tmp; cr0flags(D); } #subfeo r0,r0,r0 0x7c 00 05 10 :subfeo D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=136 & Rc=0 { tmp:$(REGISTER_SIZE) = zext(!xer_ca)+A; subExtendedOverflow(B,A); subExtendedCarry(B,A); D = B - tmp; } #subfeo. r0,r0,r0 0x7c 00 05 11 :subfeo. D,A,B is OP=31 & D & A & B & OE=1 & XOP_1_9=136 & Rc=1 { tmp:$(REGISTER_SIZE) = zext(!xer_ca)+A; subExtendedOverflow(B,A); subExtendedCarry(B,A); D = B - tmp; cr0flags(D); } #subfic r0,r0,2 0x20 00 00 02 :subfic D,A,SIMM is $(NOTVLE) & OP=8 & D & A & SIMM { xer_ca = !(SIMM b = (63 - (i*8+7)); tmp = (S >> (63 - (i*8+7))); b = tmp & 1; # GetBit s = s ^ b; i = i + 1; if (i < 8) goto ; A = s; } # PowerISA II: 3.3.12 Fixed-Point Logical Instructions # CMT: Compare Bytes # FORM: X-form # binutils: 476.d: dc: 7c 83 2b f8 cmpb r3,r4,r5 # binutils: 476.d: e0: 7c 83 2b f8 cmpb r3,r4,r5 # binutils: a2.d: 104: 7d 6a 63 f8 cmpb r10,r11,r12 # binutils: power6.d: 20: 7c 83 2b f8 cmpb r3,r4,r5 # binutils: power7.d: 90: 7c 83 2b f8 cmpb r3,r4,r5 # name cmpb code 7c0003f8 mask ff0700fc00000000 flags @POWER6 @476 @A2 operands 31 3b 38 0 0 0 0 0 :cmpb S,A,B is $(NOTVLE) & OP=31 & S & A & B & XOP_1_10=508 & BIT_0=0 { # PCODE-YES tmpS:8 = 0; tmpB:8 = 0; val:8 = 0; zero:8 = 0; ones:8 = 0xff; # Unrolled the loop tmpS = (S >> 56) & 0xFF; # get next S byte tmpB = (B >> 56) & 0xFF; # get next B byte val = (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); tmpS = (S >> 48) & 0xFF; # get next S byte tmpB = (B >> 48) & 0xFF; # get next B byte val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); tmpS = (S >> 40) & 0xFF; # get next S byte tmpB = (B >> 40) & 0xFF; # get next B byte val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); tmpS = (S >> 32) & 0xFF; # get next S byte tmpB = (B >> 32) & 0xFF; # get next B byte val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); tmpS = (S >> 24) & 0xFF; # get next S byte tmpB = (B >> 24) & 0xFF; # get next B byte val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); tmpS = (S >> 16) & 0xFF; # get next S byte tmpB = (B >> 16) & 0xFF; # get next B byte val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); tmpS = (S >> 8) & 0xFF; # get next S byte tmpB = (B >> 8) & 0xFF; # get next B byte val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); tmpS = S & 0xFF; # get next S byte tmpB = B & 0xFF; # get next B byte val = val << 8 | (zext(tmpS == tmpB) * ones) + (zext(tmpS != tmpB) * zero); A = val; } # PowerISA II: 3.3.12 Fixed-Point Logical Instructions # CMT: Bit Permute Doubleword [Category: Embedded.Phased-in, Server] # FORM: X-form # binutils: a2.d: fc: 7d 6a 61 f8 bpermd r10,r11,r12 # binutils: power7.d: d8: 7e 27 d9 f8 bpermd r7,r17,r27 # name bpermd code 7c0001f8 mask ff0700fc00000000 flags @POWER7 @A2 operands 31 3b 38 0 0 0 0 0 define pcodeop BitPermuteDoubleword; :bpermd A,S,B is $(NOTVLE) & OP=31 & S & A & B & XOP_1_10=252 & BIT_0=0 { BitPermuteDoubleword(A,S,B); } # PowerISA II: 3.3.12 Fixed-Point Logical Instructions # CMT: Population Count Words [Category: Server] [Category: Embedded.Phased-In] # FORM: X-form # binutils: a2.d: 64c: 7d 6a 02 f4 popcntw r10,r11 # binutils: power7.d: dc: 7e 8a 02 f4 popcntw r10,r20 # name popcntw code 7c0002f4 mask ffff00fc00000000 flags @POWER7 @A2 operands 31 3b 0 0 0 0 0 0 :popcntw A,S is $(NOTVLE) & OP=31 & S & A & XOP_1_10=378 & Rc & BITS_11_15=0 { local tmp1:4 = S(0); tmp1 = popcount(tmp1); local tmp2:4 = S(4); tmp2 = popcount(tmp2); A = (zext(tmp2) << 32) + zext(tmp1); } # PowerISA II: 3.3.12 Fixed-Point Logical Instructions # CMT: Population Count Bytes # FORM: X-form # binutils: 476.d: 618: 7c 83 00 f4 popcntb r3,r4 # binutils: a2.d: 644: 7d 6a 00 f4 popcntb r10,r11 # name popcntb code 7c0000f4 mask ffff00fc00000000 flags @POWER5 operands 31 3b 0 0 0 0 0 0 :popcntb A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=122 & BIT_0=0 { local i:8 = 0; local tmp:8 = 0; local tmpb:1 = 0; local mask:8 = 0xff; tmp = (S >> (i*8)); tmpb = tmp(0); tmpb = popcount(tmpb); A = (A & ~(mask)) + (zext(tmpb) << (i*8)); mask = mask << 8; i = i + 1; if (i < 8) goto ; } # PowerISA II: 3.3.12 Fixed-Point Logical Instructions # CMT: Parity Word # FORM: X-form # binutils: 476.d: 61c: 7c 83 01 34 prtyw r3,r4 # binutils: a2.d: 654: 7d 6a 01 34 prtyw r10,r11 # binutils: power6.d: 10: 7c 83 01 34 prtyw r3,r4 # binutils: power7.d: 80: 7c 83 01 34 prtyw r3,r4 # name prtyw code 7c000134 mask ffff00fc00000000 flags @POWER6 @476 @A2 operands 31 3b 0 0 0 0 0 0 :prtyw A,S is $(NOTVLE) & OP=31 & S & A & BITS_11_15=0 & XOP_1_10=154 & BIT_0=0 { local temp:8 = S; A[0,32] = zext(((popcount(temp & 0x01010101:8)) & 1:8) == 1:8); A[32,32] = zext(((popcount(temp & 0x0101010100000000:8)) & 1:8) == 1:8); } # ======================================================================= # PowerISA II: 4.4.1 Fixed-Point Load and Store Caching Inhibited Instructions # CMT: Load Word and Zero Caching Inhibited Indexed # binutils: power6.d: 2c: 7d 4b 66 2a lwzcix r10,r11,r12 # binutils: power7.d: 94: 7d 4b 66 2a lwzcix r10,r11,r12 # name lwzcix code 7c00062a mask ff0700fc00000000 flags @POWER6 operands 3b 32 38 0 0 0 0 0 define pcodeop LoadWordAndZeroCachingInhibited; :lwzcix TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & XOP_1_10=789 & BIT_0=0 { # PCODE-YES tmp:8 = *(A + B); tmp = tmp << 32; TH = tmp; } # ======================================================================= # PowerISA II: 3.3.14 Binary Coded Decimal (BCD) Assist Instructions [Category: Embedded.Phased-in, Server] # CMT: Convert Declets To Binary Coded Decimal # FORM: X-form # binutils: power6.d: f0: 7d 6a 02 34 cdtbcd r10,r11 # name cdtbcd code 7c000234 mask ffff00fc00000000 flags @POWER6 operands 31 3b 0 0 0 0 0 0 define pcodeop ConvertDecletsToBinaryCodedDecimal; :cdtbcd A,S is $(NOTVLE) & OP=31 & S & A & XOP_1_10=282 & BITS_11_15=0 & BIT_0=0 { ConvertDecletsToBinaryCodedDecimal(S,A); } # PowerISA II: 3.3.14 Binary Coded Decimal (BCD) Assist Instructions [Category: Embedded.Phased-in, Server] # CMT: Add and Generate Sixes # FORM: XO-form # binutils: power6.d: f4: 7d 4b 60 94 addg6s r10,r11,r12 # name addg6s code 7c000094 mask ff0700fc00000000 flags @POWER6 operands 3b 31 38 0 0 0 0 0 define pcodeop AddAndGenerateSixes; :addg6s TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & XOP_1_9=74 & BIT_10=0 & BIT_0=0 { # PCODE-YES AddAndGenerateSixes(TH,A,B); } # ========================================================================== # PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions # CMT: Divide Word Extended [Category: Server] [Category: Embedded.Phased-In] # FORM: XO-form # binutils: power7.d: b8: 7d 4b 63 56 divwe r10,r11,r12 # name divwe code 7c000356 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 :divwe TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=0 & XOP_1_9=427 & Rc=0 { tmp:8 = 0; # A high 4 bytes to a tmp = tmp >> 32; a:4 = tmp:4; # B high 4 bytes to b tmp = tmp >> 32; b:4 = tmp:4; # C c:4 = (a s/ b); # C low 4 bytes to TH high 4 bytes tmp = zext(c); tmp = tmp << 32; TH = tmp; } # PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions # CMT: Divide Word Extended [Category: Server] [Category: Embedded.Phased-In] # FORM: XO-form # binutils: power7.d: bc: 7d 6c 6b 57 divwe. r11,r12,r13 # name divwe. code 7c000357 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 :divwe. TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=0 & XOP_1_9=427 & Rc=1 { tmp:8 = 0; # A high 4 bytes to a tmp = tmp >> 32; a:4 = tmp:4; # B high 4 bytes to b tmp = tmp >> 32; b:4 = tmp:4; # C c:4 = (a s/ b); # C low 4 bytes to TH high 4 bytes tmp = zext(c); tmp = tmp << 32; TH = tmp; cr0flags(TH); } # PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions # CMT: Divide Word Extended [Category: Server] [Category: Embedded.Phased-In] # FORM: XO-form # binutils: power7.d: c0: 7d 8d 77 56 divweo r12,r13,r14 # name divweo code 7c000756 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 :divweo TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=1 & XOP_1_9=427 & Rc=0 { tmp:8 = 0; # A high 4 bytes to a tmp = tmp >> 32; a:4 = tmp:4; # B high 4 bytes to b tmp = tmp >> 32; b:4 = tmp:4; # C c:4 = (a s/ b); # C low 4 bytes to TH high 4 bytes tmp = zext(c); tmp = tmp << 32; divOverflow(A,B); TH = tmp; } # PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions # CMT: Divide Word Extended [Category: Server] [Category: Embedded.Phased-In] # FORM: XO-form # binutils: power7.d: c4: 7d ae 7f 57 divweo. r13,r14,r15 # name divweo. code 7c000757 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 define pcodeop DivideWordExtended4; :divweo. TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=1 & XOP_1_9=427 & Rc=1 { tmp:8 = 0; # A high 4 bytes to a tmp = tmp >> 32; a:4 = tmp:4; # B high 4 bytes to b tmp = tmp >> 32; b:4 = tmp:4; # C c:4 = (a s/ b); # C low 4 bytes to TH high 4 bytes tmp = zext(c); tmp = tmp << 32; divOverflow(A,B); TH = tmp; cr0flags(TH); } # PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions # CMT: Divide Word Extended [Category: Server] [Category: Embedded.Phased-In] # FORM: XO-form # binutils: power7.d: c8: 7d 4b 63 16 divweu r10,r11,r12 # name divweu code 7c000316 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 :divweu TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=0 & Rc=0 & XOP_1_9=395 { tmp:8 = 0; # A high 4 bytes to a tmp = tmp >> 32; a:4 = tmp:4; # B high 4 bytes to b tmp = tmp >> 32; b:4 = tmp:4; # C c:4 = (a / b); # C low 4 bytes to TH high 4 bytes tmp = zext(c); tmp = tmp << 32; TH = tmp; } # PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions # CMT: Divide Word Extended [Category: Server] [Category: Embedded.Phased-In] # FORM: XO-form # binutils: power7.d: cc: 7d 6c 6b 17 divweu. r11,r12,r13 # name divweu. code 7c000317 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 :divweu. TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=0 & Rc=1 & XOP_1_9=395 { tmp:8 = 0; # A high 4 bytes to a tmp = tmp >> 32; a:4 = tmp:4; # B high 4 bytes to b tmp = tmp >> 32; b:4 = tmp:4; # C c:4 = (a / b); # C low 4 bytes to TH high 4 bytes tmp = zext(c); tmp = tmp << 32; TH = tmp; cr0flags(TH); } # PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions # CMT: Divide Word Extended Unsigned [Category: Server] [Category: Embedded.Phased-In] # FORM: XO-form # binutils: power7.d: d0: 7d 8d 77 16 divweuo r12,r13,r14 # name divweuo code 7c000716 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 :divweuo TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=1 & Rc=0 & XOP_1_9=395 { tmp:8 = 0; # A high 4 bytes to a tmp = tmp >> 32; a:4 = tmp:4; # B high 4 bytes to b tmp = tmp >> 32; b:4 = tmp:4; # C c:4 = (a / b); # C low 4 bytes to TH high 4 bytes tmp = zext(c); tmp = tmp << 32; divOverflow(A,B); TH = tmp; } # PowerISA II: 3.3.8 Fixed-Point Arithmetic Instructions # CMT: Divide Word Extended Unsigned [Category: Server] [Category: Embedded.Phased-In] # FORM: XO-form # binutils: power7.d: d4: 7d ae 7f 17 divweuo. r13,r14,r15 # name divweuo. code 7c000717 mask ff0700fc00000000 flags @POWER7 @A2 operands 3b 31 38 0 0 0 0 0 :divweuo. TH,A,B is $(NOTVLE) & OP=31 & TH & A & B & OE=1 & Rc=1 & XOP_1_9=395 { tmp:8 = 0; # A high 4 bytes to a tmp = tmp >> 32; a:4 = tmp:4; # B high 4 bytes to b tmp = tmp >> 32; b:4 = tmp:4; # C c:4 = (a / b); # C low 4 bytes to TH high 4 bytes tmp = zext(c); tmp = tmp << 32; divOverflow(A,B); TH = tmp; cr0flags(TH); } # ======================================================================= # PowerISA II: 3.3.12.1 64-bit Fixed-Point Logical Instructions [Category: 64-Bit] # CMT: Population Count Doubleword [Category: Server.64-bit] [Category: Embedded.64-bit.Phased-In] # FORM: X-form # binutils: a2.d: 648: 7d 6a 03 f4 popcntd r10,r11 # binutils: power7.d: e0: 7e 8a 03 f4 popcntd r10,r20 # name popcntd code 7c0003f4 mask ffff00fc00000000 flags @POWER7 @A2 operands 31 3b 0 0 0 0 0 0 :popcntd A,S is $(NOTVLE) & OP=31 & S & A & XOP_1_10=506 & Rc & BITS_11_15=0 { A = popcount(S); } # ======================================================================= # PowerISA II: 3.3.4.1 64-Bit Load and Store with Byte Reversal Instructions [Category: 64-bit] # CMT: Load Doubleword Byte-Reverse Indexed # FORM: X-form # Category: 64 # binutils: a2.d: 418: 7d 4b 64 28 ldbrx r10,r11,r12 # binutils: cell.d: 40: 7c 00 0c 28 ldbrx r0,0,r1 # binutils: cell.d: 44: 7c 01 14 28 ldbrx r0,r1,r2 # binutils: power7.d: e4: 7e 95 b4 28 ldbrx r20,r21,r22 # name ldbrx code 7c000428 mask ff0700fc00000000 flags @POWER7 @CELL @A2 operands 3b 32 38 0 0 0 0 0 define pcodeop LoadDoublewordByteReverseIndexed; :ldbrx D,A,B is $(NOTVLE) & OP=31 & D & A & B & XOP_1_10=532 & Rc { D = LoadDoublewordByteReverseIndexed(D,A,B); } # ====================================================================== # PowerISA II: 4.4.2 Load and Reserve and Store Conditional Instructions # CMT: Store Byte Conditional Indexed # FORM: X-form # binutils: power7.d: 164: 7d 4b 65 6d stbcx. r10,r11,r12 # name stbcx. code 7c00056d mask ff0700fc00000000 flags @POWER7 operands 3b 32 38 0 0 0 0 0 define pcodeop StoreByteConditionalIndexed; :stbcx. S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=694 & Rc=1 { EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:1 EA = StoreByteConditionalIndexed(S,RA_OR_ZERO,B); setCrBit(cr0, 2, 1); } # ====================================================================== # PowerISA II: 5.4.1 Move To/From System Register Instructions # CMT: Move From Device Control Register Indexed [Category: Embedded.Device Control] # FORM: X-form # binutils: 476.d: 49c: 7c 85 02 06 mfdcrx r4,r5 # binutils: a2.d: 520: 7d 4b 02 06 mfdcrx r10,r11 # binutils: booke.d: 28: 7c 85 02 06 mfdcrx r4,r5 # binutils: booke_xcoff.d: 24: 7c 85 02 06 mfdcrx r4,r5 # name mfdcrx code 7c000206 mask ff0700fc00000000 flags @476 @BOOKE @A2 operands 3b 31 0 0 0 0 0 0 define pcodeop MoveFromDeviceControlRegisterIndexed; :mfdcrx D,A is OP=31 & D & A & XOP_1_9=259 & Rc=0 { # MoveFromDeviceControlRegisterIndexed(D,A); } # PowerISA II: 5.4.1 Move To/From System Register Instructions # CMT: Move To Device Control Register Indexed [Category: Embedded.Device Control] # FORM: X-form # binutils: 476.d: 4cc: 7c e6 03 06 mtdcrx r6,r7 # binutils: a2.d: 568: 7d 6a 03 06 mtdcrx r10,r11 # binutils: booke.d: 30: 7c e6 03 06 mtdcrx r6,r7 # binutils: booke_xcoff.d: 2c: 7c e6 03 06 mtdcrx r6,r7 # binutils: 4cc: 7c e6 03 06 mtdcrx r6,r7 # name mtdcrx code 7c000306 mask ff0700fc00000000 flags @476 @BOOKE @A2 operands 31 3b 0 0 0 0 0 0 define pcodeop MoveToDeviceControlRegisterIndexed; :mtdcrx A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=387 & BIT_0=0 { MoveToDeviceControlRegisterIndexed(S,A); } # # ======================================================================== # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Data Cache Block Flush by External PID # FORM: X-form # binutils: a2.d: 154: 7c 0a 58 fe dcbfep r10,r11 # binutils: e500mc.d: 9c: 7c 01 10 fe dcbfep r1,r2 # name dcbfep code 7c0000fe mask ff07e0ff00000000 flags @E500MC @A2 operands 31 38 0 0 0 0 0 0 define pcodeop DataCacheBlockFlushByExternalPID; :dcbfep A,B is OP=31 & A & B & XOP_1_10=127 & BIT_0=0 & BITS_21_25=0 { # DataCacheBlockFlushByExternalPID(A,B); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Data Cache Block Store by External PID # FORM: X-form # binutils: a2.d: 168: 7c 0a 58 7e dcbstep r10,r11 # binutils: e500mc.d: 98: 7c 1f 00 7e dcbstep r31,r0 # name dcbstep code 7c00007e mask ff07e0ff00000000 flags @E500MC @A2 operands 31 38 0 0 0 0 0 0 define pcodeop DataCacheBlockStoreByExternalPID; :dcbstep A,B is OP=31 & BITS_21_25=0 & A & B & XOP_1_10=63 & BIT_0=0 { # DataCacheBlockStoreByExternalPID(A,B); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Data Cache Block set to Zero by External PID # FORM: X-form # binutils: a2.d: 198: 7c 0a 5f fe dcbzep r10,r11 # binutils: e500mc.d: a8: 7c 0b 67 fe dcbzep r11,r12 # name dcbzep code 7c0007fe mask ff07e0ff00000000 flags @E500MC @A2 operands 31 38 0 0 0 0 0 0 define pcodeop DataCacheBlockSetToZeroByExternalPID; :dcbzep A,B is OP=31 & BITS_21_25=0 & A & B & XOP_1_10=1023 & BIT_0=0 { DataCacheBlockSetToZeroByExternalPID(A,B); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Instruction Cache Block Invalidate by External PID # FORM: X-form # binutils: a2.d: 3b8: 7c 0a 5f be icbiep r10,r11 # binutils: e500mc.d: 10: 7c 09 57 be icbiep r9,r10 # name icbiep code 7c0007be mask ff07e0ff00000000 flags @E500MC @A2 operands 31 38 0 0 0 0 0 0 define pcodeop InstructionCacheBlockInvalidateByExternalPID; :icbiep A,B is OP=31 & BITS_21_25=0 & A & B & XOP_1_10=991 & BIT_0=0 { InstructionCacheBlockInvalidateByExternalPID(A,B); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Load Floating-Point Double by External Process ID Indexed # FORM: X-form # binutils: a2.d: 438: 7e 8a 5c be lfdepx f20,r10,r11 # binutils: e500mc.d: 50: 7d ae 7c be lfdepx f13,r14,r15 # name lfdepx code 7c0004be mask ff0700fc00000000 flags @E500MC @A2 operands 22 31 38 0 0 0 0 0 :lfdepx fT,RA_OR_ZERO,B is OP=31 & fT & B & RA_OR_ZERO & XOP_1_10=607 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; fT = *:8(ea); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Store Byte by External Process ID Indexed # FORM: X-form # binutils: a2.d: 700: 7d 4b 61 be stbepx r10,r11,r12 # binutils: e500mc.d: 54: 7e 11 91 be stbepx r16,r17,r18 # name stbepx code 7c0001be mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 define pcodeop StoreByteByExternalProcessIDIndexed; :stbepx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=223 & BIT_0=0 { # EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:1 EA = StoreByteByExternalProcessIDIndexed(S,RA_OR_ZERO,B); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Store Halfword by External Process ID Indexed # FORM: X-form # binutils: a2.d: 784: 7d 4b 63 3e sthepx r10,r11,r12 # binutils: e500mc.d: 58: 7e 74 ab 3e sthepx r19,r20,r21 # name sthepx code 7c00033e mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 define pcodeop StoreHalfwordByExternalProcessIDIndexed; :sthepx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=415 & BIT_0=0 { # EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:2 EA = StoreHalfwordByExternalProcessIDIndexed(S,RA_OR_ZERO,B); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Store Word by External Process ID Indexed # FORM: X-form # binutils: a2.d: 7b0: 7d 4b 61 3e stwepx r10,r11,r12 # binutils: e500mc.d: 5c: 7e d7 c1 3e stwepx r22,r23,r24 # name stwepx code 7c00013e mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 define pcodeop StoreWordByExternalProcessIDIndexed; :stwepx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=159 & BIT_0=0 { # EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:4 EA = StoreWordByExternalProcessIDIndexed(S,RA_OR_ZERO,B); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Store Doubleword Byte-Reverse Indexed # FORM: X-form # binutils: a2.d: 71c: 7d 4b 65 28 stdbrx r10,r11,r12 # binutils: cell.d: 48: 7c 00 0d 28 stdbrx r0,0,r1 # binutils: cell.d: 4c: 7c 01 15 28 stdbrx r0,r1,r2 # binutils: power7.d: e8: 7e 95 b5 28 stdbrx r20,r21,r22 # name stdbrx code 7c000528 mask ff0700fc00000000 flags @POWER7 @CELL @A2 operands 3b 32 38 0 0 0 0 0 define pcodeop StoreDoublewordByteReverseIndexed; :stdbrx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=660 & BIT_0=0 { # EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:8 EA = StoreDoublewordByteReverseIndexed(S,RA_OR_ZERO,B); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Store Doubleword Byte-Reverse Indexed # FORM: X-form # binutils: a2.d: 724: 7d 4b 61 3a stdepx r10,r11,r12 # binutils: e500mc.d: 60: 7f 3a d9 3a stdepx r25,r26,r27 # name stdepx code 7c00013a mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 define pcodeop StoreDoublewordByteReverseIndexed1; :stdepx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=157 & BIT_0=0 { # EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:8 EA = StoreDoublewordByteReverseIndexed1(S,RA_OR_ZERO,B); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Load Byte by External Process ID Indexed # FORM: X-form # binutils: a2.d 3ec: 7d 4b 60 be lbepx r10,r11,r12 # binutils: e500mc.d 40: 7c 22 18 be lbepx r1,r2,r3 # name lbepx code 7c0000be mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 # Note: no support for context modeling here :lbepx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=95 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; D = zext(*:1(ea)); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Data Cache Block Touch by External PID # FORM: X-form # binutils: a2.d: 174: 7d 4b 62 7e dcbtep r10,r11,r12 # binutils: e500mc.d: a4: 7c c7 42 7e dcbtep r6,r7,r8 # NOTE: BITS_21_25 => TH (register) # name dcbtep code 7c00027e mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 # No PCODE necessary define pcodeop DataCacheBlockTouchByExternalPID2; :dcbtep TH,RA_OR_ZERO,B is OP=31 & TH & RA_OR_ZERO & B & XOP_1_10=319 & BIT_0=0 { DataCacheBlockTouchByExternalPID2(TH,RA_OR_ZERO,B); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Load Doubleword by External Process ID Indexed # FORM: X-form # binutils: a2.d: 41c: 7d 4b 60 3a ldepx r10,r11,r12 # binutils: e500mc.d: 4c: 7d 4b 60 3a ldepx r10,r11,r12 # name ldepx code 7c00003a mask ff0700fc00000000 flags @E500MC @A2 operands 3b 31 38 0 0 0 0 0 # Note: no support for context modeling here :ldepx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=29 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; D = *:8(ea); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Load Word by External Process ID Indexed # FORM: X-form # binutils: a2.d: 4c8: 7d 4b 60 3e lwepx r10,r11,r12 # binutils: e500mc.d: 48: 7c e8 48 3e lwepx r7,r8,r9 # Note: no support for context modeling here :lwepx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=31 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; D = *:4(ea); } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Store Floating-Point Double by External Process ID Indexed # FORM: X-form # binutils: a2.d: 740: 7e 8a 5d be stfdepx f20,r10,r11 # binutils: e500mc.d: 64: 7f 9d f5 be stfdepx f28,r29,r30 # NOTE: BITS_21_25 => FRS (float register) => fS # Note: no support for context modeling here :stfdepx fS,RA_OR_ZERO,B is OP=31 & fS & B & RA_OR_ZERO & XOP_1_10=735 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; *:8(ea) = fS; } # PowerISA II: 5.4.3 External Process ID Instructions [Category: Embedded.External PID] # CMT: Load Halfword by External Process ID Indexed # FORM: X-form # binutils: a2.d: 480: 7d 4b 62 3e lhepx r10,r11,r12 # binutils: e500mc.d: 44: 7c 85 32 3e lhepx r4,r5,r6 # Note: no support for context modeling here :lhepx D,RA_OR_ZERO,B is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=287 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; D = zext(*:2(ea)); } # ======================================================================== # PowerISA II: 3.3.15.2 Move To/From System Registers [Category: Embedded] # CMT: Move From Device Control Register User-mode Indexed [Category: Embedded.Device Control] # FORM: X-form # binutils: 476.d: 498: 7c 64 02 46 mfdcrux r3,r4 define pcodeop MoveFromDeviceControlRegisterUserModeIndexed; :mfdcrux RT,A is OP=31 & RT & A & BITS_11_15=0 & XOP_1_10=291 & BIT_0=0 { @ifdef BIT_64 tmp:8 = dcr000 + (A * $(REGISTER_SIZE)); RT = *[register]:8 (tmp:4); @else tmp = dcr000 + (A * $(REGISTER_SIZE)); RT = *[register]:4 (tmp); @endif } # PowerISA II: 3.3.15.2 Move To/From System Registers [Category: Embedded] # CMT: Move To Device Control Register User-mode Indexed [Category: Embedded.Device Control] # FORM: X-form # binutils: 476.d: 4c8: 7c 83 03 46 mtdcrux r3,r4 :mtdcrux S,A is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=419 & BIT_0=0 { @ifdef BIT_64 tmp:8 = dcr000 + (A * $(REGISTER_SIZE)); *[register]:8 (tmp:4) = S; @else tmp = dcr000 + (A * $(REGISTER_SIZE)); *[register]:4 (tmp) = S; @endif } # ======================================================================== # PowerISA II: 4.6.5 Floating-Point Move Instructions # CMT: Floating Copy Sign # FORM: X-form # binutils: 476.d: 1f0: fd 4b 60 10 fcpsgn f10,f11,f12 # binutils: a2.d: 268: fe 95 b0 10 fcpsgn f20,f21,f22 :fcpsgn fT,fA,fB is $(NOTVLE) & OP=63 & fT & fA & fB & XOP_1_10=8 & Rc=0 { fT = ( fB & 0x7FFFFFFFFFFFFFFF ) | ( fA & 0x8000000000000000 ); } # PowerISA II: 4.6.5 Floating-Point Move Instructions # CMT: Floating Copy Sign # FORM: X-form # binutils: 476.d: 1f4: fd 4b 60 11 fcpsgn\. f10,f11,f12 # binutils: a2.d: 264: fe 95 b0 11 fcpsgn\. f20,f21,f22 :fcpsgn. fT,fA,fB is $(NOTVLE) & OP=63 & fT & fA & fB & XOP_1_10=8 & Rc=1 { fT = ( fB & 0x7FFFFFFFFFFFFFFF ) | ( fA & 0x8000000000000000 ); cr1flags(); } # ======================================================================== # PowerISA II: 4.6.2 Floating-Point Load Instructions # CMT: Load Floating-Point as Integer Word Algebraic Indexed # FORM: X-form # binutils: 476.d: 350: 7d 43 26 ae lfiwax f10,r3,r4 # binutils: a2.d: 44c: 7e 8a 5e ae lfiwax f20,r10,r11 define pcodeop LoadFloatingPointAsIntegerWordAlgebraicIndexed; :lfiwax fT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fT & RA_OR_ZERO & B & XOP_1_10=855 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; fT = sext( *:4(ea) ); } # PowerISA II: 4.6.2 Floating-Point Load Instructions # CMT: Load Floating-Point as Integer Word and Zero Indexed [Category: Floating-Point.Phased-in] # FORM: X-form # bintutils: a2.d: 450: 7e 8a 5e ee lfiwzx f20,r10,r11 # bintutils: power7.d: ec: 7d 40 56 ee lfiwzx f10,0,r10 # bintutils: power7.d: f0: 7d 49 56 ee lfiwzx f10,r9,r10 define pcodeop LoadFloatingPointAsIntegerWordAndZeroIndexed; :lfiwzx fT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & fT & RA_OR_ZERO & B & XOP_1_10=887 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; fT = zext( *:4(ea) ); } # ======================================================================= # PowerISA II: A.1 Embedded Cache Initialization [Category: Embedded.Cache Initialization] # CMT: Instruction Cache Invalidate # FORM: X-form # binutils: 476.d: 31c: 7c 20 07 8c ici 1 # binutils: a2.d: 3d8: 7d 40 07 8c ici 10 # Note: Using CT, but limited to 4 bits, not 5 (PPC bit 6 is 0 and is a don't care anyhow as CT is unused) # No PCODE for this function define pcodeop InstructionCacheInvalidate; :ici CT is OP=31 & CT & BITS_16_20=0 & BITS_11_15=0 & XOP_1_10=966 & BIT_0=0 { InstructionCacheInvalidate(); } # PowerISA II: A.1 Embedded Cache Initialization [Category: Embedded.Cache Initialization] # CMT: Data Cache Invalidate # FORM: X-form # Note: Using CT, but limited to 4 bits, not 5 (PPC bit 6 is 0 and is a don't care anyhow as CT is unused) # No PCODE for this function # binutils: 476.d: 180: 7c 20 03 8c dci 1 # binutils: a2.d: 1a8: 7d 40 03 8c dci 10 define pcodeop DataCacheInvalidate; :dci CT is OP=31 & CT & BITS_11_20=0 & BITS_11_15=0 & XOP_1_10=454 & BIT_0=0 { DataCacheInvalidate(); } # ======================================================================= # PowerISA II: 4.3.1 Instruction Cache Instructions # CMT: Instruction Cache Block Touch [Category: Embedded] # FORM: X-form # binutils: 476.d: 308: 7c a8 48 2c icbt 5,r8,r9 # binutils: a2.d: 3bc: 7c 0a 58 2c icbt r10,r11 # binutils: a2.d: 3c0: 7c ea 58 2c icbt 7,r10,r11 # binutils: booke.d: 0: 7c a8 48 2c icbt 5,r8,r9 # binutils: booke_xcoff.d: 8: 7c a8 48 2c icbt 5,r8,r9 # Note: Using CT, but limited to 4 bits, not 5 (PPC bit 6 is 0 and is a don't care anyhow as CT is unused) # No PCODE for this function define pcodeop InstructionCacheBlockTouch; :icbt CT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & CT & RA_OR_ZERO & B & XOP_1_10=22 & BIT_0=0 { InstructionCacheBlockTouch(RA_OR_ZERO,B); } # ====================================================================== # PowerISA II: 4.6.6.1 Floating-Point Elementary Arithmetic Instructions # CMT: Floating Reciprocal Square Root Estimate [Single] # FORM: A-form # binutils: 476.d: 2d0: ed c0 78 34 frsqrtes f14,f15 # binutils: a2.d: 374: ee 80 a8 34 frsqrtes f20,f21 # binutils: a2.d: 37c: ee 80 a8 34 frsqrtes f20,f21 # binutils: a2.d: 384: ee 81 a8 34 frsqrtes f20,f21,1 # binutils: power7.d: 184: ed c0 78 34 frsqrtes f14,f15 # NOTE: binutils allows BITS_16_20=1 but manual says BITS_16_20=0. We take the manuals side. (pg 136) :frsqrtes fT,fB is $(NOTVLE) & OP=59 & fT & fB & BITS_16_20=0 & BITS_6_10=0 & XOP_1_5=26 & Rc=0 { # divide 1 by square root of fB to create reciprocal tmp1:8 = 0x3FF0000000000000; fT = tmp1 f/ sqrt( fB ); setFPDivFlags(tmp1,fB,fT); } # PowerISA II: 4.6.6.1 Floating-Point Elementary Arithmetic Instructions # CMT: Floating Reciprocal Square Root Estimate [Single] # FORM: A-form # binutils: 476.d: 2d4: ed c0 78 35 frsqrtes. f14,f15 # binutils: a2.d: 378: ee 80 a8 35 frsqrtes. f20,f21 # binutils: a2.d: 380: ee 80 a8 35 frsqrtes. f20,f21 # binutils: a2.d: 388: ee 81 a8 35 frsqrtes. f20,f21,1 # binutils: power7.d: 188: ed c0 78 35 frsqrtes. f14,f15 # NOTE: binutils allows BITS_16_20=1 but manual says BITS_16_20=0. We take the manuals side. (pg 136) define pcodeop FloatingReciprocalSquareRootEstimate1; :frsqrtes. fT,fB is $(NOTVLE) & OP=59 & fT & fB & BITS_16_20=0 & BITS_6_10=0 & XOP_1_5=26 & Rc=1 { # divide 1 by square root of fB to create reciprocal tmp1:8 = 0x3FF0000000000000; fT = tmp1 f/ sqrt( fB ); setFPDivFlags(tmp1,fB,fT); cr1flags(); } # PowerISA II: 4.6.6.1 Floating-Point Elementary Arithmetic Instructions # CMT: Floating Reciprocal Estimate [Single] # FORM: A-form # binutils: 476.d: 290: fd c0 78 30 fre f14,f15 # binutils: a2.d: 308: fe 80 a8 30 fre f20,f21 # binutils: a2.d: 310: fe 80 a8 30 fre f20,f21 # binutils: a2.d: 318: fe 81 a8 30 fre f20,f21,1 # binutils: power7.d: 16c: fd c0 78 30 fre f14,f15 # NOTE: binutils allows BITS_16_20!=0 but manual says BITS_16_20=0. We take the manuals side. (pg 135) :fre fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=24 & Rc=0 { # divide 1 by fB to create reciprocal tmp1:8 = 0x3FF0000000000000; fT = tmp1 f/ fB; setFPDivFlags(tmp1,fB,fT); } # PowerISA II: 4.6.6.1 Floating-Point Elementary Arithmetic Instructions # CMT: Floating Reciprocal Estimate [Single] # FORM: A-form # binutils: 476.d: 294: fd c0 78 31 fre. f14,f15 # binutils: a2.d: 304: fe 80 a8 31 fre. f20,f21 # binutils: a2.d: 30c: fe 80 a8 31 fre. f20,f21 # binutils: a2.d: 314: fe 81 a8 31 fre. f20,f21,1 # binutils: power7.d: 170: fd c0 78 31 fre. f14,f15 # NOTE: binutils allows BITS_16_20!=0 but manual says BITS_16_20=0. We take the manuals side. (pg 135) :fre. fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & BITS_6_10=0 & XOP_1_5=24 & Rc=1 { # divide 1 by fB to create reciprocal tmp1:8 = 0x3FF0000000000000; fT = tmp1 f/ fB; setFPDivFlags(tmp1,fB,fT); cr1flags(); } # ====================================================================== # PowerISA II: 4.6.7.3 Floating Round to Integer Instructions # CMT: Floating Round to Integer Minus # FORM: X-form # binutils: 476.d: 2a0: fd 40 5b d0 frim f10,f11 # binutils: a2.d: 338: fe 80 ab d0 frim f20,f21 :frim fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=488 & Rc=0 { fT = floor( fB ); setFPRF(fT); setSummaryFPSCR(); } # PowerISA II: 4.6.7.3 Floating Round to Integer Instructions # CMT: Floating Round to Integer Minus # FORM: X-form # binutils: 476.d: 2a4: fd 40 5b d1 frim. f10,f11 # binutils: a2.d: 334: fe 80 ab d1 frim. f20,f21 :frim. fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=488 & Rc=1 { fT = floor( fB ); setFPRF(fT); setSummaryFPSCR(); cr1flags(); } # PowerISA II: 4.6.7.3 Floating Round to Integer Instructions # CMT: Floating Round to Integer Nearest # FORM: X-form # binutils: 476.d: 2a8: fd 40 5b 10 frin f10,f11 # binutils: a2.d: 340: fe 80 ab 10 frin f20,f21 :frin fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=392 & Rc=0 { fT = round( fB ); setFPRF(fT); setSummaryFPSCR(); } # PowerISA II: 4.6.7.3 Floating Round to Integer Instructions # CMT: Floating Round to Integer Nearest # FORM: X-form # binutils: 476.d: 2ac: fd 40 5b 11 frin. f10,f11 # binutils: a2.d: 33c: fe 80 ab 11 frin. f20,f21 :frin. fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=392 & Rc=1 { fT = round( fB ); setFPRF(fT); setSummaryFPSCR(); cr1flags(); } # PowerISA II: 4.6.7.3 Floating Round to Integer Instructions # CMT: Floating Round to Integer Plus # FORM: X-form # binutils: 476.d: 2b0: fd 40 5b 90 frip f10,f11 # binutils: a2.d: 348: fe 80 ab 90 frip f20,f21 :frip fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=456 & Rc=0 { fT = ceil( fB ); setFPRF(fT); setSummaryFPSCR(); } # PowerISA II: 4.6.7.3 Floating Round to Integer Instructions # CMT: Floating Round to Integer Plus # FORM: X-form # binutils: 476.d: 2b4: fd 40 5b 91 frip. f10,f11 # binutils: a2.d: 344: fe 80 ab 91 frip. f20,f21 :frip. fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=456 & Rc=1 { fT = ceil( fB ); setFPRF(fT); setSummaryFPSCR(); cr1flags(); } # PowerISA II: 4.6.7.3 Floating Round to Integer Instructions # CMT: Floating Round to Integer Toward Zero # FORM: X-form # binutils: 476.d: 2b8: fd 40 5b 50 friz f10,f11 # binutils: a2.d: 350: fe 80 ab 50 friz f20,f21 :friz fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=424 & Rc=0 { fT = trunc( fB ); setFPRF(fT); setSummaryFPSCR(); } # PowerISA II: 4.6.7.3 Floating Round to Integer Instructions # CMT: Floating Round to Integer Toward Zero # FORM: X-form # binutils: 476.d: 2bc: fd 40 5b 51 friz. f10,f11 # binutils: a2.d: 34c: fe 80 ab 51 friz. f20,f21 define pcodeop FloatingRoundToIntegerTowardZero1; :friz. fT,fB is $(NOTVLE) & OP=63 & fT & BITS_16_20=0 & fB & XOP_1_10=424 & Rc=1 { fT = trunc( fB ); setFPRF(fT); setSummaryFPSCR(); cr1flags(); } # ======================================================================= # PowerISA II: 4.4.4 Wait Instruction # CMT: Wait # FORM: X-form # binutils: a2.d: 86c: 7c 00 00 7c wait # binutils: a2.d: 870: 7c 00 00 7c wait # binutils: e500mc.d: 1c: 7c 00 00 7c wait # binutils: e500mc.d: 20: 7c 00 00 7c wait # binutils: power7.d: 58: 7c 00 00 7c wait # binutils: power7.d: 5c: 7c 00 00 7c wait define pcodeop waitOp; :wait BITS_21_22 is OP=31 & crfD=0 & BITS_21_22 & BITS_11_20=0 & XOP_1_10=62 & BIT_0=0 { waitOp(); } # ======================================================================= # PowerISA II: 4.3.1 System Linkage Instructions # CMT: Return From Guest Interrupt [Category:Embedded.Hypervisor] # FORM: XL-form # binutils: e500mc.d: 0: 4c 00 00 4e rfdi define pcodeop ReturnFromGuestInterrupt; :rfgi is $(NOTVLE) & OP=19 & BITS_11_25=0 & XOP_1_10=102 & BIT_0=0 { MSR = returnFromGuestInterrupt(MSR, spr17b); #GSRR1 return[spr17a]; #GSRR0 } # ======================================================================= # PowerISA II: 4.4.2.1 64-Bit Load and Reserve and Store Conditional Instructions [Category: 64-Bit] # CMT: Load Doubleword And Reserve Indexed # FORM: X-form # binutils: a2.d: 410: 7d 4b 60 a8 ldarx r10,r11,r12 # binutils: a2.d: 414: 7d 4b 60 a9 ldarx r10,r11,r12,1 :ldarx TH,RA_OR_ZERO,B,BIT_0 is OP=31 & TH & RA_OR_ZERO & B & XOP_1_10=84 & BIT_0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; RESERVE = 1; RESERVE_LENGTH = 8; RESERVE_ADDRESS = ea; TH = *:8 (ea); } # ======================================================================= # PowerISA II: 4.4.1 Instruction Synchronize Instruction # CMT: Load Word And Reserve Indexed # FORM: X-form # binutils: 476.d: 394: 7c 64 28 28 lwarx r3,r4,r5 # binutils: 476.d: 398: 7c 64 28 28 lwarx r3,r4,r5 # binutils: 476.d: 39c: 7c 64 28 29 lwarx r3,r4,r5,1 # binutils: a2.d: 4b4: 7d 4b 60 28 lwarx r10,r11,r12 # binutils: a2.d: 4b8: 7d 4b 60 29 lwarx r10,r11,r12,1 :lwarx TH,RA_OR_ZERO,B,BIT_0 is OP=31 & TH & RA_OR_ZERO & B & XOP_1_10=20 & BIT_0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO+B; RESERVE = 1; RESERVE_LENGTH = 4; RESERVE_ADDRESS = ea; TH = zext( *:4 (ea) ); } # ======================================================================= # PowerISA II: 11.3 Processor Control Instructions # CMT: Message Clear # FORM: X-form # binutils: a2.d: 544: 7c 00 51 dc msgclr r10 # binutils: e500mc.d: 14: 7c 00 69 dc msgclr r13 define pcodeop MessageClear; :msgclr B is OP=31 & BITS_21_25=0 & BITS_16_20=0 & B & XOP_1_10=238 & BIT_0=0 { MessageClear(B); } # PowerISA II: 11.3 Processor Control Instructions # CMT: Message Send # FORM: X-form # binutils: a2.d: 548: 7c 00 51 9c msgsnd r10 # binutils: e500mc.d: 18: 7c 00 71 9c msgsnd r14 define pcodeop MessageSend; :msgsnd B is OP=31 & BITS_21_25=0 & BITS_16_20=0 & B & XOP_1_10=206 & BIT_0=0 { MessageSend(); } # ======================================================================= # PowerISA III: TLB Management Instructions (expanded by ISA 3.0) # CMT: TLB Invalidate Entry (expands on form in ppc_instructions.sinc) # FORM: X-form :tlbie RB_OR_ZERO,RS_OR_ZERO,"2",BIT_17,BIT_16 is $(NOTVLE) & OP=31 & RS_OR_ZERO & RB_OR_ZERO & BIT_20=0 & BITS_18_19=2 & BIT_17 & BIT_16 & XOP_1_10=306 & BIT_0=0 { # RIC=2 # RIC = 2 # PRS = BIT_17 # R = BIT_16 TLBInvalidateEntry(RB_OR_ZERO,RS_OR_ZERO,2:1,BIT_17:1,BIT_16:1); } :tlbie RB_OR_ZERO,RS_OR_ZERO,BIT_18,BIT_17,"1" is $(NOTVLE) & OP=31 & RS_OR_ZERO & RB_OR_ZERO & BIT_20=0 & (BITS_18_19=0 | BITS_18_19=1) & BIT_18 & BIT_17 & BIT_16=1 & XOP_1_10=306 & BIT_0=0 { # RIC=0|1 & R=1 # RIC = BITS_18_19 (0 or 1) # PRS = BIT_17 # R = 1 TLBInvalidateEntry(RB_OR_ZERO,RS_OR_ZERO,BIT_18:1,BIT_17:1,1:1); } :tlbie RB_OR_ZERO,RS_OR_ZERO,"3","0","0" is $(NOTVLE) & OP=31 & RS_OR_ZERO & RB_OR_ZERO & BIT_20=0 & BITS_18_19=3 & BIT_17=0 & BIT_16=0 & XOP_1_10=306 & BIT_0=0 { # RIC=3 & PRS=0 & R=0 # RIC = 3 # PRS = 0 # R = 0 TLBInvalidateEntry(RB_OR_ZERO,RS_OR_ZERO,3:1,0:1,0:1); } # PowerISA III: TLB Management Instructions (expanded by ISA 3.0) # CMT: TLB Invalidate Entry Local (expands on form in ppc_instructions.sinc) # FORM: X-form :tlbiel RB_OR_ZERO,RS_OR_ZERO,"0","0","0" is $(NOTVLE) & OP=31 & RS_OR_ZERO & RB_OR_ZERO & BITS_16_20=0 & XOP_1_10=274 & BIT_0=0 { # RIC=0 & PRS=0 & R=0 # RIC = 0 # PRS = 0 # R = 0 TLBInvalidateEntryLocal(RB_OR_ZERO,RS_OR_ZERO,0:1,0:1,0:1); } :tlbiel RB_OR_ZERO,RS_OR_ZERO,"2",BIT_17,BIT_16 is $(NOTVLE) & OP=31 & RS_OR_ZERO & RB_OR_ZERO & BIT_20=0 & BITS_18_19=2 & BIT_17 & BIT_16 & XOP_1_10=274 & BIT_0=0 { # RIC=2 # RIC = 2 # PRS = BIT_17 # R = BIT_16 TLBInvalidateEntryLocal(RB_OR_ZERO,RS_OR_ZERO,2:1,BIT_17:1,BIT_16:1); } :tlbiel RB_OR_ZERO,RS_OR_ZERO,BIT_18,BIT_17,"1" is $(NOTVLE) & OP=31 & RS_OR_ZERO & RB_OR_ZERO & BIT_20=0 & (BITS_18_19=0 | BITS_18_19=1) & BIT_18 & BIT_17 & BIT_16=1 & XOP_1_10=274 & BIT_0=0 { # RIC=0|1 & R=1 # RIC = BITS_18_19 (0 or 1) # PRS = BIT_17 # R = 1 TLBInvalidateEntryLocal(RB_OR_ZERO,RS_OR_ZERO,BIT_18:1,BIT_17:1,1:1); } # PowerISA II: 6.11.4.9 TLB Management Instructions # CMT: TLB Search and Reserve Indexed Category: Embedded.TLB Write Conditional] # FORM: X-form # binutils: a2.d: 848: 7c 0a 5e a5 tlbsrx\. r10,r11 define pcodeop TLBSearchAndReserveIndexedCategory; :tlbsrx. A,B is OP=31 & BITS_21_25=0 & A & B & XOP_1_10=850 & BIT_0=1 { TLBSearchAndReserveIndexedCategory(A,B,cr0); } # ======================================================================= # PowerISA II: 4.6.10 Floating-Point Status and Control Register Instructions # CMT: Move To FPSCR Fields # FORM: X-form # binutils: 476.d: 4e0: fc 0c 55 8e mtfsf 6,f10 # binutils: 476.d: 4e4: fc 0c 55 8e mtfsf 6,f10 # binutils: 476.d: 4e8: fc 0d 55 8e mtfsf 6,f10,0,1 # binutils: 476.d: 4ec: fe 0c 55 8e mtfsf 6,f10,1,0 # binutils: a2.d: 580: fc 0c a5 8e mtfsf 6,f20 # binutils: a2.d: 588: fc 0c a5 8e mtfsf 6,f20 # binutils: a2.d: 590: fe 0d a5 8e mtfsf 6,f20,1,1 # binutils: common.d: 210: fc 0c 55 8e mtfsf 6,f10 # binutils: power6.d: b4: fc 0c 55 8e mtfsf 6,f10 # binutils: power6.d: bc: fc 0c 55 8e mtfsf 6,f10 # binutils: power6.d: c4: fc 0d 55 8e mtfsf 6,f10,0,1 # binutils: power6.d: cc: fe 0c 55 8e mtfsf 6,f10,1,0 define pcodeop MoveToFPSCRFields; :mtfsf BITS_17_24,fB,BIT_25,BIT_16 is $(NOTVLE) & OP=63 & BIT_25 & BITS_17_24 & BIT_16 & fB & XOP_1_10=711 & Rc=0 { # PCODE MoveToFPSCRFields(fB); } # PowerISA II: 4.6.10 Floating-Point Status and Control Register Instructions # CMT: Move To FPSCR Fields # FORM: X-form # binutils: 476.d: 4f0: fc 0c 5d 8f mtfsf. 6,f11 # binutils: 476.d: 4f4: fc 0c 5d 8f mtfsf. 6,f11 # binutils: 476.d: 4f8: fc 0d 5d 8f mtfsf. 6,f11,0,1 # binutils: 476.d: 4fc: fe 0c 5d 8f mtfsf. 6,f11,1,0 # binutils: a2.d: 57c: fc 0c a5 8f mtfsf. 6,f20 # binutils: a2.d: 584: fc 0c a5 8f mtfsf. 6,f20 # binutils: a2.d: 58c: fe 0d a5 8f mtfsf. 6,f20,1,1 define pcodeop MoveToFPSCRFields1; :mtfsf. BITS_17_24,fB,BIT_25,BIT_16 is $(NOTVLE) & OP=63 & BIT_25 & BITS_17_24 & BIT_16 & fB & XOP_1_10=711 & Rc=1 { # PCODE MoveToFPSCRFields1(fB); } # PowerISA II: 4.6.10 Floating-Point Status and Control Register Instructions # CMT: Move To FPSCR Field Immediate # FORM: X-form # binutils: 476.d: 500: ff 00 01 0c mtfsfi 6,0 # binutils: 476.d: 504: ff 00 01 0c mtfsfi 6,0 # binutils: 476.d: 508: ff 00 01 0c mtfsfi 6,0 # binutils: 476.d: 50c: ff 01 01 0c mtfsfi 6,0,1 # binutils: a2.d: 598: ff 00 01 0c mtfsfi 6,0 # binutils: a2.d: 5a0: ff 00 d1 0c mtfsfi 6,13 # binutils: a2.d: 5a8: ff 01 d1 0c mtfsfi 6,13,1 # binutils: common.d: 218: ff 00 01 0c mtfsfi 6,0 # binutils: power6.d: d4: ff 00 01 0c mtfsfi 6,0 # binutils: power6.d: dc: ff 00 01 0c mtfsfi 6,0 # binutils: power6.d: e4: ff 01 01 0c mtfsfi 6,0,1 define pcodeop MoveToFPSCRFieldImmediate; :mtfsfi BF2,BITS_12_15,BIT_16 is $(NOTVLE) & OP=63 & BF2 & BITS_21_22=0 & BITS_17_20=0 & BIT_16 & BITS_12_15 & BIT_11=0 & XOP_1_10=134 & Rc=0 { MoveToFPSCRFieldImmediate(); } # PowerISA II: 4.6.10 Floating-Point Status and Control Register Instructions # CMT: Move To FPSCR Field Immediate # FORM: X-form # binutils: 476.d: 510: ff 00 f1 0d mtfsfi. 6,15 # binutils: 476.d: 514: ff 00 f1 0d mtfsfi. 6,15 # binutils: 476.d: 518: ff 00 f1 0d mtfsfi. 6,15 # binutils: 476.d: 51c: ff 01 f1 0d mtfsfi. 6,15,1 # binutils: a2.d: 594: ff 00 01 0d mtfsfi. 6,0 # binutils: a2.d: 59c: ff 00 d1 0d mtfsfi. 6,13 # binutils: a2.d: 5a4: ff 01 d1 0d mtfsfi. 6,13,1 define pcodeop MoveToFPSCRFieldImmediate1; :mtfsfi. BITS_23_25,BITS_12_15,BIT_16 is $(NOTVLE) & OP=63 & BITS_23_25 & BITS_21_22=0 & BITS_17_20=0 & BIT_16 & BITS_12_15 & BIT_11=0 & XOP_1_10=134 & Rc=1 { MoveToFPSCRFieldImmediate1(); } # ======================================================================= # PowerISA II: 3.3.15 Move To/From System Register Instructions # CMT: Move To Condition Register Fields # FORM: XFX-form # binutils: 476.d: 48c: 7c 60 00 26 mfcr r3 define pcodeop mfcrOp; :mfcr TO is OP=31 & TO & BIT_20=0 & BITS_12_19=0 & XOP_1_10=190 & BIT_0=0 { mfcrOp(); } # ======================================================================= # PowerISA II: 5.6.1 DFP Arithmetic Instructions # CMT: DFP Add [Quad] # FORM: X-form # binutils: power6.d: 38: fe 96 c0 04 daddq f20,f22,f24 # binutils: power7.d: 9c: fe 96 c0 04 daddq f20,f22,f24 define pcodeop daddqOp; :daddq fT,fA,fB is $(NOTVLE) & OP=63 & fT & fA & fB & XOP_1_10=2 & Rc=0 { daddqOp(fA,fB); } # PowerISA II: 5.6.1 DFP Arithmetic Instructions # CMT: DFP Add [Quad] # FORM: X-form define pcodeop daddqDotOp; :daddq. fT,fA,fB is $(NOTVLE) & OP=63 & fT & fA & fB & XOP_1_10=2 & Rc=1 { daddqDotOp(fA); } # ======================================================================= # ======================================================================= # binutils: 476.d: 30c: 7d ae 7b cc icbtls 13,r14,r15 # binutils: a2.d: 3c4: 7c 0a 5b cc icbtls r10,r11 # binutils: a2.d: 3c8: 7c ea 5b cc icbtls 7,r10,r11 # binutils: e500.d: 10: 7d ae 7b cc icbtls 13,r14,r15 # binutils: titan.d: 198: 7c 02 0b cc icbtls r2,r1 # binutils: titan.d: 19c: 7c 02 0b cc icbtls r2,r1 # binutils: titan.d: 1a0: 7c 22 0b cc icbtls 1,r2,r1 # :icbtls BITS_21_24,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & BIT_25=0 & BITS_21_24 & B & XOP_1_10=486 & BIT_0=0 & RA_OR_ZERO # { # ea = RA_OR_ZERO + B; # # prefetchInstructionCacheBlockLockSetX(ea); # } # Source for information on instructions: # PowerISA_V2.06B_PUBLIC.pdf (dated: July 23, 2010) # and binutils-2.21.1 # Have test case for about 200 of these instructions # Extended Mnemonic # xvmovdp XT,XB => xvcpsgndp XT,XB,XB # xvmovsp XT,XB => xvcpsgnsp XT,XB,XB # xxmrghd T,A,B => xxpermdi T,A,B,0b00 # xxmrgld T,A,B => xxpermdi T,A,B,0b11 # xxspltd T,A,0 => xxpermdi T,A,A,0b00 # xxswapd T,A => xxpermdi T,A,A,0b10 @include "vsx.sinc" # binutils-descr: "brinc", VX (4, 527), VX_MASK, PPCSPE, PPCNONE, {RS, RA, RB} define pcodeop brincOp; # ISA-cmt: brinc - Bit Reversed Increment # ISA-info: brinc - Form "EVX" Page 510 Category "SP" # binutils: mytest.d: 1d0: 10 22 1a 0f brinc r1,r2,r3 :brinc S,A,B is OP=4 & XOP_0_10=527 & S & A & B { brincOp(S,A,B); } # binutils-descr: "hrfid", XL(19,274), 0xffffffff, POWER5|CELL, PPC476, {0} define pcodeop hrfidOp; # ISA-info: hrfid - Form "XL" Page 739 Category "S" # binutils: mytest.d: 0: 4c 00 02 24 hrfid :hrfid is $(NOTVLE) & OP=19 & XOP_1_10=274 & BITS_11_25=0 & BIT_0=0 { hrfidOp(); } define pcodeop bcctrOp; # ZZZ NO-PARSE XLLK - "bcctr", XLLK(19,528,0), XLBH_MASK, PPCCOM, PPCNONE, {BO, BI, BH} :bcctr BO,BI_BITS,BH is $(NOTVLE) & OP=19 & BO & BI_BITS & BITS_13_15=0 & BH & XOP_1_10=528 & LK=0 { bcctrOp(); } define pcodeop bcctrlOp; # ZZZ NO-PARSE XLLK - "bcctrl", XLLK(19,528,1), XLBH_MASK, PPCCOM, PPCNONE, {BO, BI, BH} :bcctrl BO,BI_BITS,BH is $(NOTVLE) & OP=19 & BO & BI_BITS & BITS_13_15=0 & BH & XOP_1_10=528 & LK=1 { bcctrlOp(); } # binutils-descr: "lbarx", X(31,52), XEH_MASK, POWER7, PPCNONE, {RT, RA0, RB, EH} define pcodeop lbarxOp; # ISA-cmt: lbarx - Load Byte and Reserve Indexed # ISA-info: lbarx - Form "X" Page 689 Category "B" # binutils: power7.d: 14c: 7d 4b 60 68 lbarx r10,r11,r12 # binutils: power7.d: 150: 7d 4b 60 68 lbarx r10,r11,r12 # binutils: power7.d: 154: 7d 4b 60 69 lbarx r10,r11,r12,1 :lbarx RT,A,B,BIT_0 is OP=31 & XOP_1_10=52 & RT & A & B & BIT_0 { A = A + B; RT = *:1 A; } # binutils-descr: "lharx", X(31,116), XEH_MASK, POWER7, PPCNONE, {RT, RA0, RB, EH} define pcodeop lharxOp; # ISA-cmt: lharx - Load Halfword and Reserve Indexed # ISA-info: lharx - Form "X" Page 690 Category "B" # binutils: power7.d: 158: 7e 95 b0 e8 lharx r20,r21,r22 # binutils: power7.d: 15c: 7e 95 b0 e8 lharx r20,r21,r22 # binutils: power7.d: 160: 7e 95 b0 e9 lharx r20,r21,r22,1 :lharx RT,A,B,BIT_0 is OP=31 & XOP_1_10=116 & RT & A & B & BIT_0 { A = A + B; RT = *:2 A; } # binutils-descr: "ehpriv", X(31,270), 0xffffffff, E500MC|PPCA2, PPCNONE, {0} define pcodeop ehprivOp; # ISA-info: ehpriv - Form "XL" Page 889 Category "E.HV" # binutils: NO-EXAMPLE - ehpriv :ehpriv BITS_11_25 is OP=31 & BITS_11_25 & XOP_1_10=270 & BIT_0=0 { ehprivOp(); } # binutils-descr: "cbcdtd", X(31,314), XRB_MASK, POWER6, PPCNONE, {RA, RS} define pcodeop cbcdtdOp; # ISA-info: cbcdtd - Form "X" Page 97 Category "BCDA" # binutils: power6.d: ec: 7d 6a 02 74 cbcdtd r10,r11 :cbcdtd S,A is $(NOTVLE) & OP=31 & S & A & BITS_11_15=0 & XOP_1_10=314 & BIT_0=0 { cbcdtdOp(S,A); } # binutils-descr: "divdeu", XO(31,393,0,0), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} define pcodeop divdeuOp; # binutils: mytest.d: 4: 7c 64 2b 12 divdeu r3,r4,r5 :divdeu RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=393 & OE=0 & Rc=0 & RT & A & B { RT = A/B; } # binutils-descr: "divdeu.", XO(31,393,0,1), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} define pcodeop divdeuDotOp; # binutils: mytest.d: 8: 7c 64 2b 13 divdeu. r3,r4,r5 :divdeu. RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=393 & OE=0 & Rc=1 & RT & A & B { RT = A/B; cr0flags(RT); } # binutils-descr: "divde", XO(31,425,0,0), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} define pcodeop divdeOp; # binutils: mytest.d: c: 7c 64 2b 52 divde r3,r4,r5 :divde RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=425 & OE=0 & Rc=0 & RT & A & B { RT = A s/ B; } # binutils-descr: "divde.", XO(31,425,0,1), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} define pcodeop divdeDotOp; # binutils: mytest.d: 10: 7c 64 2b 53 divde. r3,r4,r5 :divde. RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=425 & OE=0 & Rc=1 & RT & A & B { RT = A s/ B; cr0flags(RT); } # binutils-descr: "dsn", X(31,483), XRT_MASK, E500MC, PPCNONE, {RA, RB} define pcodeop dsnOp; # ISA-info: dsn - Form "X" Page 710 Category "DS" # binutils: e500mc.d: 3c: 7c 18 cb c6 dsn r24,r25 :dsn A,B is OP=31 & XOP_1_10=483 & A & B & BITS_21_25=0 & BIT_0=0 { dsnOp(A,B); } # binutils-descr: "lbdx", X(31,515), X_MASK, E500MC, PPCNONE, {RT, RA, RB} define pcodeop lbdxOp; # ISA-info: lbdx - Form "X" Page 708 Category "DS" # binutils: e500mc.d: 68: 7c 01 14 06 lbdx r0,r1,r2 :lbdx RT,A,B is OP=31 & XOP_1_10=515 & RT & A & B & BIT_0=0 { RT = lbdxOp(RT,A,B); } # binutils-descr: "lhdx", X(31,547), X_MASK, E500MC, PPCNONE, {RT, RA, RB} define pcodeop lhdxOp; # ISA-info: lhdx - Form "X" Page 708 Category "DS" # binutils: e500mc.d: 6c: 7d 8d 74 46 lhdx r12,r13,r14 :lhdx RT,A,B is OP=31 & XOP_1_10=547 & RT & A & B & BIT_0=0 { RT = lhdxOp(RT,A,B); } # binutils-descr: "lwdx", X(31,579), X_MASK, E500MC, PPCNONE, {RT, RA, RB} define pcodeop lwdxOp; # ISA-info: lwdx - Form "X" Page 708 Category "DS" # binutils: e500mc.d: 70: 7c 64 2c 86 lwdx r3,r4,r5 :lwdx RT,A,B is OP=31 & XOP_1_10=579 & RT & A & B & BIT_0=0 { RT = lwdxOp(RT,A,B); } # binutils-descr: "lddx", X(31,611), X_MASK, E500MC, PPCNONE, {RT, RA, RB} define pcodeop lddxOp; # ISA-info: lddx - Form "X" Page 708 Category "DS" # binutils: e500mc.d: 78: 7d f0 8c c6 lddx r15,r16,r17 :lddx RT,A,B is OP=31 & XOP_1_10=611 & RT & A & B & BIT_0=0 { RT = lddxOp(RT,A,B); } # ISA-info: lddx - Form "X" Page 50 Category "DS" :ldx RT,RA_OR_ZERO,B is OP=31 & XOP_1_10=21 & RT & RA_OR_ZERO & B & BIT_0=0 { RT = *:8 (RA_OR_ZERO + B); } # binutils-descr: "stbdx", X(31,643), X_MASK, E500MC, PPCNONE, {RS, RA, RB} define pcodeop stbdxOp; # ISA-info: stbdx - Form "X" Page 709 Category "DS" # binutils: e500mc.d: 7c: 7c c7 45 06 stbdx r6,r7,r8 :stbdx S,A,B is OP=31 & XOP_1_10=643 & S & A & B & BIT_0=0 { *[ram]:1 B =stbdxOp(S,A,B); } # binutils-descr: "sthdx", X(31,675), X_MASK, E500MC, PPCNONE, {RS, RA, RB} define pcodeop sthdxOp; # ISA-info: sthdx - Form "X" Page 709 Category "DS" # binutils: e500mc.d: 80: 7e 53 a5 46 sthdx r18,r19,r20 :sthdx S,A,B is OP=31 & XOP_1_10=675 & S & A & B & BIT_0=0 { *[ram]:2 B = sthdxOp(S,A,B); } # binutils-descr: "stwdx", X(31,707), X_MASK, E500MC, PPCNONE, {RS, RA, RB} define pcodeop stwdxOp; # ISA-info: stwdx - Form "X" Page 709 Category "DS" # binutils: e500mc.d: 84: 7d 2a 5d 86 stwdx r9,r10,r11 :stwdx S,A,B is OP=31 & XOP_1_10=707 & S & A & B & BIT_0=0 { *[ram]:4 B = stwdxOp(S,A,B); } # binutils-descr: "sthcx.", XRC(31,726,1), X_MASK, POWER7, PPCNONE, {RS, RA0, RB} define pcodeop sthcxDotOp; # ISA-info: sthcx. - Form "X" Page 692 Category "B" # binutils: mytest.d: 14: 7c 64 2d ad sthcx. r3,r4,r5 :sthcx. S,RA_OR_ZERO,B is OP=31 & XOP_1_10=726 & Rc=1 & S & RA_OR_ZERO & B { EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:2 EA = sthcxDotOp(S,RA_OR_ZERO,B); setCrBit(cr0, 2, 1); } # binutils-descr: "stddx", X(31,739), X_MASK, E500MC, PPCNONE, {RS, RA, RB} define pcodeop stddxOp; # ISA-cmt: stddx - Store Doubleword with Decoration Indexed # ISA-info: stddx - Form "X" Page 709 Category "DS" # binutils: e500mc.d: 8c: 7e b6 bd c6 stddx r21,r22,r23 :stddx S,A,B is OP=31 & XOP_1_10=739 & S & A & B & BIT_0=0 { *[ram]:8 B = stddxOp(S,A,B); } # binutils-descr: "lfdpx", X(31,791), X_MASK, POWER6, POWER7, {FRT, RA, RB} define pcodeop lfdpxOp; # ISA-cmt: lfdpx - Load Floating-Point Double Pair Indexed # ISA-info: lfdpx - Form "X" Page 131 Category "FP.out" # binutils: power6.d: 30: 7d ae 7e 2e lfdpx f13,r14,r15 :lfdpx fT,A,B is $(NOTVLE) & OP=31 & XOP_1_10=791 & fT & A & B & BIT_0=0 { fT = lfdpxOp(fT,A,B); } # binutils-descr: "lfddx", X(31,803), X_MASK, E500MC, PPCNONE, {FRT, RA, RB} define pcodeop lfddxOp; # ISA-cmt: lfddx - Load Floating Doubleword with Decoration Indexed # ISA-info: lfddx - Form "X" Page 708 Category "DS" # binutils: e500mc.d: 74: 7f 5b e6 46 lfddx f26,r27,r28 :lfddx fT,A,B is OP=31 & XOP_1_10=803 & fT & A & B & BIT_0=0 { fT = lfddxOp(fT,A,B); } # binutils-descr: "lhzcix", X(31,821), X_MASK, POWER6, PPCNONE, {RT, RA0, RB} define pcodeop lhzcixOp; # ISA-cmt: lhzcix - Load Halfword and Zero Caching Inhibited Indexed # ISA-info: lhzcix - Form "X" Page 749 Category "S" # binutils: mytest.d: 18: 7c 64 2e 6a lhzcix r3,r4,r5 :lhzcix RT,A,B is $(NOTVLE) & OP=31 & XOP_1_10=821 & RT & A & B & BIT_0=0 { A = A + B; RT = *:2 A; } # binutils-descr: "lbzcix", X(31,853), X_MASK, POWER6, PPCNONE, {RT, RA0, RB} define pcodeop lbzcixOp; # ISA-cmt: lbzcix - Load Byte and Zero Caching Inhibited Indexed # ISA-info: lbzcix - Form "X" Page 749 Category "S" # binutils: mytest.d: 1c: 7c 64 2e aa lbzcix r3,r4,r5 :lbzcix RT,A,B is $(NOTVLE) & OP=31 & XOP_1_10=853 & RT & A & B & BIT_0=0 { A = A + B; RT = *:1 A; } # binutils-descr: "ldcix", X(31,885), X_MASK, POWER6, PPCNONE, {RT, RA0, RB} # ISA-cmt: ldcix - Load Doubleword Caching Inhibited Indexed # ISA-info: ldcix - Form "X" Page 749 Category "S" # binutils: mytest.d: 24: 7c 64 2e ea ldcix r3,r4,r5 :ldcix RT,A,B is $(NOTVLE) & OP=31 & XOP_1_10=885 & RT & A & B & BIT_0=0 { A = A + B; RT = *:8 A; } # binutils-descr: "divdeuo", XO(31,393,1,0), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} # binutils: mytest.d: 28: 7c 64 2f 12 divdeuo r3,r4,r5 :divdeuo RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=393 & OE=1 & Rc=0 & RT & A & B { divOverflow(A,B); RT = A/B; } # binutils-descr: "divdeuo.", XO(31,393,1,1), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} define pcodeop divdeuoDotOp; # binutils: mytest.d: 2c: 7c 64 2f 13 divdeuo. r3,r4,r5 :divdeuo. RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=393 & OE=1 & Rc=1 & RT & A & B { divOverflow(A,B); RT = A/B; cr0flags(RT); } # binutils-descr: "stwcix", X(31,917), X_MASK, POWER6, PPCNONE, {RS, RA0, RB} define pcodeop stwcixOp; # ISA-cmt: stwcix - Store Word Caching Inhibited Indexed # ISA-info: stwcix - Form "X" Page 750 Category "S" # binutils: mytest.d: 30: 7c 64 2f 2a stwcix r3,r4,r5 :stwcix S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=917 & S & A & B & BIT_0=0 { A = A + B; *:4 A = S; } # binutils-descr: "stfdpx", X(31,919), X_MASK, POWER6, PPCNONE, {FRS, RA, RB} define pcodeop stfdpxOp; # ISA-cmt: stfdpx - Store Floating-Point Double Pair Indexed # ISA-info: stfdpx - Form "X" Page 131 Category "FP.out" # binutils: mytest.d: 34: 7c 64 2f 2e stfdpx f3,r4,r5 :stfdpx fS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XOP_1_10=919 & fS & RA_OR_ZERO & B & BIT_0=0 { EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:8 EA = stfdpxOp(fS,RA_OR_ZERO,B); } # binutils-descr: "stfddx", X(31,931), X_MASK, E500MC, PPCNONE, {FRS, RA, RB} define pcodeop stfddxOp; # ISA-info: stfddx - Form "X" Page 709 Category "DS" # binutils: e500mc.d: 88: 7f be ff 46 stfddx f29,r30,r31 :stfddx fS,A,B is OP=31 & XOP_1_10=931 & fS & A & B & BIT_0=0 { *[ram]:8 B = stfddxOp(fS,A,B); } # binutils-descr: "divdeo", XO(31,425,1,0), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} define pcodeop divdeoOp; # binutils: mytest.d: 38: 7c 64 2f 52 divdeo r3,r4,r5 :divdeo RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=425 & OE=1 & Rc=0 & RT & A & B { divOverflow(A,B); RT = A s/ B; } # binutils-descr: "divdeo.", XO(31,425,1,1), XO_MASK, POWER7|PPCA2, PPCNONE, {RT, RA, RB} define pcodeop divdeoDotOp; # binutils: mytest.d: 3c: 7c 64 2f 53 divdeo. r3,r4,r5 :divdeo. RT,A,B is $(NOTVLE) & OP=31 & XOP_1_9=425 & OE=1 & Rc=1 & RT & A & B { divOverflow(A,B); RT = A s/ B; cr0flags(RT); } # binutils-descr: "sthcix", X(31,949), X_MASK, POWER6, PPCNONE, {RS, RA0, RB} define pcodeop sthcixOp; # ISA-info: sthcix - Form "X" Page 750 Category "S" # binutils: mytest.d: 40: 7c 64 2f 6a sthcix r3,r4,r5 :sthcix S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=949 & S & A & B & BIT_0=0 { A = A + B; *:2 A = S; } define pcodeop slbfeeDotOp; # ISA-info: slbfee - Form "X" Page 794 Category "?" :slbfee. RT,B is $(NOTVLE) & OP=31 & RT & BITS_16_20=0 & B & XOP_1_10=979 & BIT_0=1 { slbfeeDotOp(RT,B); } # binutils-descr: "stbcix", X(31,981), X_MASK, POWER6, PPCNONE, {RS, RA0, RB} define pcodeop stbcixOp; # ISA-info: stbcix - Form "X" Page 750 Category "S" # binutils: mytest.d: 44: 7c 64 2f aa stbcix r3,r4,r5 :stbcix S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=981 & S & A & B & BIT_0=0 { A = A + B; *:1 A = A; } # binutils-descr: "stdcix", X(31,1013), X_MASK, POWER6, PPCNONE, {RS, RA0, RB} define pcodeop stdcixOp; # ISA-info: stdcix - Form "X" Page 750 Category "S" # binutils: mytest.d: 48: 7c 64 2f ea stdcix r3,r4,r5 :stdcix S,A,B is $(NOTVLE) & OP=31 & XOP_1_10=1013 & S & A & B & BIT_0=0 { A = A + B; *:8 A = S; } # binutils-descr: "lq", OP(56), OP_MASK, POWER4, PPC476, {RTQ, DQ, RAQ} # ISA-cmt: lq - Load Quadword # ISA-info: lq - Form "DQ" Page 751 Category "LSQ" # binutils: power4.d: +0: e0 83 00 00 lq r4,0\(r3\) # binutils: power4.d: +4: e0 83 00 00 lq r4,0\(r3\) :lq RT,A,DQ is $(NOTVLE) & OP=56 & RT & Dp & A & DQ & BITS_0_3=0 & regp [regpset = Dp+1;] { ea:$(REGISTER_SIZE) = A + sext(DQ:2 << 4); @if ENDIAN == "big" RT = *:$(REGISTER_SIZE) ea; regp = *:$(REGISTER_SIZE) (ea + $(REGISTER_SIZE)); @else RT = *:$(REGISTER_SIZE) (ea + $(REGISTER_SIZE)); regp = *:$(REGISTER_SIZE) ea; @endif } define pcodeop lvepxOp; :lvepx RT,A,B is OP=31 & RT & A & B & XOP_1_10=295 & BIT_0=0 { RT = lvepxOp(RT,A,B); } define pcodeop lvepxlOp; :lvepxl RT,A,B is OP=31 & RT & A & B & XOP_1_10=263 & BIT_0=0 { RT = lvepxlOp(RT,A,B); } # binutils-descr: "lfdp", OP(57), OP_MASK, POWER6, POWER7, {FRT, D, RA0} define pcodeop lfdpOp; # ISA-cmt: lfdp - Load Floating-Point Double Pair # ISA-info: lfdp - Form "DS" Page 131 Category "FP.out" # binutils: NO-EXAMPLE - lfdp :lfdp fT,A,DS is $(NOTVLE) & OP=57 & fT & A & DS & BITS_0_1=0 { fT = lfdpOp(fT,A,DS:2); } # binutils-descr: "dadd", XRC(59,2,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop daddOp; # ISA-cmt: dadd - DFP Add # binutils: power6.d: 34: ee 11 90 04 dadd f16,f17,f18 # binutils: power7.d: 98: ee 11 90 04 dadd f16,f17,f18 :dadd fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=2 & Rc=0 & fT & fA & fB { daddOp(fT,fA,fB); } # binutils-descr: "dadd.", XRC(59,2,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop daddDotOp; # ISA-cmt: dadd. - DFP Add Rc # binutils: mytest.d: 50: ec 43 20 05 dadd. f2,f3,f4 :dadd. fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=2 & Rc=1 & fT & fA & fB { daddDotOp(fT,fA,fB); } # binutils-descr: "dqua", ZRC(59,3,0), Z2_MASK, POWER6, PPCNONE, {FRT,FRA,FRB,RMC} define pcodeop dquaOp; # ISA-cmt: dqua - DFP Quantize # binutils: mytest.d: 54: ec 22 18 06 dqua f1,f2,f3,0 :dqua fT,fA,fB,RMC is $(NOTVLE) & OP=59 & XOP_1_8=3 & Rc=0 & fT & fA & fB & RMC { dquaOp(fT,fA,fB); } # binutils-descr: "dqua.", ZRC(59,3,1), Z2_MASK, POWER6, PPCNONE, {FRT,FRA,FRB,RMC} define pcodeop dquaDotOp; # ISA-cmt: dqua. - DFP Quantize Rc # binutils: mytest.d: 58: ec 22 18 07 dqua. f1,f2,f3,0 :dqua. fT,fA,fB,RMC is $(NOTVLE) & OP=59 & XOP_1_8=3 & Rc=1 & fT & fA & fB & RMC { dquaDotOp(fT,fA,fB); } # binutils-descr: "dmul", XRC(59,34,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop dmulOp; # ISA-cmt: dmul - DFP Multiply # binutils: mytest.d: 5c: ec 43 20 44 dmul f2,f3,f4 :dmul fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=34 & Rc=0 & fT & fA & fB { dmulOp(fT,fA,fB); } # binutils-descr: "dmul.", XRC(59,34,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop dmulDotOp; # ISA-cmt: dmul. - DFP Multiply Rc # binutils: mytest.d: 60: ec 43 20 45 dmul. f2,f3,f4 :dmul. fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=34 & Rc=1 & fT & fA & fB { dmulDotOp(fT,fA,fB); } # binutils-descr: "drrnd", ZRC(59,35,0), Z2_MASK, POWER6, PPCNONE, {FRT, FRA, FRB, RMC} define pcodeop drrndOp; # ISA-cmt: drrnd - DFP Reround # binutils: mytest.d: 64: ec 43 20 46 drrnd f2,f3,f4,0 :drrnd fT,fA,fB,RMC is $(NOTVLE) & OP=59 & XOP_1_8=35 & Rc=0 & fT & fA & fB & RMC { drrndOp(fT,fA,fB); } # binutils-descr: "drrnd.", ZRC(59,35,1), Z2_MASK, POWER6, PPCNONE, {FRT, FRA, FRB, RMC} define pcodeop drrndDotOp; # ISA-cmt: drrnd. - DFP Reround Rc # binutils: mytest.d: 68: ec 43 20 47 drrnd. f2,f3,f4,0 :drrnd. fT,fA,fB,RMC is $(NOTVLE) & OP=59 & XOP_1_8=35 & Rc=1 & fT & fA & fB & RMC { drrndDotOp(fT,fA,fB); } # binutils-descr: "dscli", ZRC(59,66,0), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} define pcodeop dscliOp; # ISA-cmt: dscli - DFP Shift Significand Left Immediate # binutils: mytest.d: 6c: ec 43 10 84 dscli f2,f3,4 # Y {OP 0 5 {}} {fT 6 10 {}} {fA 11 15 {}} {SH16 16 21 {}} {XOP_1_9 22 30 {}} {Rc 31 31 {}} # X 00--------------------------05 06---------------------10 11---------------------15 16--------------------------21 22-----------------------------------------30 31-31 # X --------OP=111011(59)---------|-----------fT------------|-----------fA------------|-------------SH16-------------|-------------XOP_1_9=1000010(66)-------------|Rc=0-| :dscli fT,fA,SH16 is $(NOTVLE) & OP=59 & XOP_1_9=66 & Rc=0 & fT & fA & SH16 { dscliOp(fT,fA); } # binutils-descr: "dscli.", ZRC(59,66,1), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} define pcodeop dscliDotOp; # ISA-cmt: dscli. - DFP Shift Significand Left Immediate Rc # binutils: mytest.d: 70: ec 43 10 85 dscli. f2,f3,4 :dscli. fT,fA,SH16 is $(NOTVLE) & OP=59 & XOP_1_9=66 & Rc=1 & fT & fA & SH16 { dscliDotOp(fT,fA); } # binutils-descr: "dquai", ZRC(59,67,0), Z2_MASK, POWER6, PPCNONE, {TE, FRT,FRB,RMC} define pcodeop dquaiOp; # ISA-cmt: dquai - DFP Quantize Immediate # binutils: mytest.d: 74: ec 62 20 86 dquai 2,f3,f4,0 :dquai fT,BITS_16_20,fB,RMC is $(NOTVLE) & OP=59 & fT & BITS_16_20 & fB & RMC & XOP_1_8=67 & Rc=0 { dquaiOp(fT,fB); } # binutils-descr: "dquai.", ZRC(59,67,1), Z2_MASK, POWER6, PPCNONE, {TE, FRT,FRB,RMC} define pcodeop dquaiDotOp; # ISA-cmt: dquai. - DFP Quantize Immediate Rc # binutils: mytest.d: 78: ec 62 20 87 dquai. 2,f3,f4,0 :dquai. fT,BITS_16_20,fB,RMC is $(NOTVLE) & OP=59 & fT & BITS_16_20 & fB & RMC & XOP_1_8=67 & Rc=1 { dquaiDotOp(fT,fB); } # binutils-descr: "dscri", ZRC(59,98,0), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} define pcodeop dscriOp; # ISA-cmt: dscri - DFP Shift Significand Right Immediate # binutils: mytest.d: 7c: ec 43 10 c4 dscri f2,f3,4 :dscri fT,fA,SH16 is $(NOTVLE) & OP=59 & XOP_1_9=98 & Rc=0 & fT & fA & SH16 { dscriOp(fT,fA); } # binutils-descr: "dscri.", ZRC(59,98,1), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} define pcodeop dscriDotOp; # ISA-cmt: dscri. - DFP Shift Significand Right Immediate Rc # binutils: mytest.d: 80: ec 43 10 c5 dscri. f2,f3,4 :dscri. fT,fA,SH16 is $(NOTVLE) & OP=59 & XOP_1_9=98 & Rc=1 & fT & fA & SH16 { dscriDotOp(fT,fA); } # binutils-descr: "drintx", ZRC(59,99,0), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} define pcodeop drintxOp; # ISA-cmt: drintx - DFP Round To FP Integer With Inexact # binutils: mytest.d: 84: ec 61 20 c6 drintx 1,f3,f4,0 :drintx fT,fB,RMC is $(NOTVLE) & OP=59 & fT & BITS_17_20=0 & BIT_16 & fB & RMC & XOP_1_8=99 & Rc=0 { drintxOp(fT,fB); } # binutils-descr: "drintx.", ZRC(59,99,1), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} define pcodeop drintxDotOp; # ISA-cmt: drintx - DFP Round To FP Integer With Inexact # binutils: mytest.d: 84: ec 61 20 c6 drintx 1,f3,f4,0 :drintx. fT,fB,RMC is $(NOTVLE) & OP=59 & fT & BITS_17_20=0 & BIT_16 & fB & RMC & XOP_1_8=99 & Rc=1 { drintxDotOp(fT,fB); } # binutils-descr: "dcmpo", X(59,130), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} define pcodeop dcmpoOp; # ISA-cmt: dcmpo - DFP Compare Ordered # ISA-info: dcmpo - Form "X" Page 179 Category "DFP" # binutils: mytest.d: 8c: ed 03 21 04 dcmpo cr2,f3,f4 :dcmpo CRFD,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=130 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dcmpoOp(CRFD,fA,fB); } # binutils-descr: "dtstex", X(59,162), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} define pcodeop dtstexOp; # ISA-cmt: dtstex - DFP Test Exponent # ISA-info: dtstex - Form "X" Page 181 Category "DFP" # binutils: mytest.d: 90: ed 03 21 44 dtstex cr2,f3,f4 :dtstex CRFD,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=162 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dtstexOp(CRFD,fA,fB); } # binutils-descr: "dtstdc", Z(59,194), Z_MASK, POWER6, PPCNONE, {BF, FRA, DCM} define pcodeop dtstdcOp; # ISA-cmt: dtstdc - DFP Test Data Class # ISA-info: dtstdc - Form "Z23" Page 180 Category "DFP" # binutils: mytest.d: 94: ed 03 11 84 dtstdc cr2,f3,4 :dtstdc CRFD,fA,DCM is $(NOTVLE) & OP=59 & XOP_1_9=194 & CRFD & fA & DCM & BITS_21_22=0 & BIT_0=0 { dtstdcOp(CRFD,fA); } # binutils-descr: "dtstdg", Z(59,226), Z_MASK, POWER6, PPCNONE, {BF, FRA, DGM} define pcodeop dtstdgOp; # ISA-cmt: dtstdg - DFP Test Data Group # ISA-info: dtstdg - Form "Z23" Page 180 Category "DFP" # binutils: mytest.d: 98: ed 03 11 c4 dtstdg cr2,f3,4 :dtstdg CRFD,fA,DGM is $(NOTVLE) & OP=59 & XOP_1_9=226 & CRFD & fA & DGM & BITS_21_22=0 & BIT_0=0 { dtstdgOp(CRFD,fA); } # binutils-descr: "drintn", ZRC(59,227,0), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} define pcodeop drintnOp; # ISA-cmt: drintn - DFP Round To FP Integer Without Inexact # binutils: mytest.d: 9c: ec 61 21 c6 drintn 1,f3,f4,0 :drintn fT,fB,RMC is $(NOTVLE) & OP=59 & XOP_1_8=227 & Rc=0 & BIT_16 & fT & fB & RMC & BITS_17_20=0 { drintnOp(fT,fB); } # binutils-descr: "drintn.", ZRC(59,227,1), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} define pcodeop drintnDotOp; # ISA-cmt: drintn. - DFP Round To FP Integer Without Inexact Rc # binutils: mytest.d: a0: ec 61 21 c7 drintn. 1,f3,f4,0 :drintn. fT,fB,RMC is $(NOTVLE) & OP=59 & XOP_1_8=227 & Rc=1 & BIT_16 & fT & fB & RMC & BITS_17_20=0 { drintnDotOp(fT,fB); } # binutils-descr: "dctdp", XRC(59,258,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dctdpOp; # ISA-cmt: dctdp - DFP Convert To DFP Long # binutils: mytest.d: a4: ec 40 1a 04 dctdp f2,f3 :dctdp fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=258 & Rc=0 & fT & fB & BITS_16_20=0 { dctdpOp(fT,fB); } # binutils-descr: "dctdp.", XRC(59,258,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dctdpDotOp; # ISA-cmt: dctdp. - DFP Convert To DFP Long Rc # binutils: mytest.d: a8: ec 40 1a 05 dctdp. f2,f3 :dctdp. fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=258 & Rc=1 & fT & fB & BITS_16_20=0 { dctdpDotOp(fT,fB); } # binutils-descr: "dctfix", XRC(59,290,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dctfixOp; # ISA-cmt: dctfix - DFP Convert To Fixed # binutils: mytest.d: ac: ec 40 1a 44 dctfix f2,f3 :dctfix fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=290 & Rc=0 & fT & fB & BITS_16_20=0 { dctfixOp(fT,fB); } # binutils-descr: "dctfix.", XRC(59,290,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dctfixDotOp; # ISA-cmt: dctfix. - DFP Convert To Fixed Rc # binutils: mytest.d: b0: ec 40 1a 45 dctfix. f2,f3 :dctfix. fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=290 & Rc=1 & fT & fB & BITS_16_20=0 { dctfixDotOp(fT,fB); } # binutils-descr: "ddedpd", XRC(59,322,0), X_MASK, POWER6, PPCNONE, {SP, FRT, FRB} define pcodeop ddedpdOp; # ISA-cmt: ddedpd - DFP Decode DPD To BCD # binutils: mytest.d: b4: ec 70 22 84 ddedpd 2,f3,f4 :ddedpd fT,SP,fB is $(NOTVLE) & OP=59 & fT & SP & BITS_16_18=0 & fB & XOP_1_10=322 & Rc=0 { ddedpdOp(fT,fB); } # & BITS_16_18=0 # binutils-descr: "ddedpd.", XRC(59,322,1), X_MASK, POWER6, PPCNONE, {SP, FRT, FRB} define pcodeop ddedpdDotOp; # ISA-cmt: ddedpd. - DFP Decode DPD To BCD Rc # binutils: mytest.d: b8: ec 70 22 85 ddedpd. 2,f3,f4 :ddedpd. fT,SP,fB is $(NOTVLE) & OP=59 & fT & SP & BITS_16_18=0 & fB & XOP_1_10=322 & Rc=1 { ddedpdDotOp(fT,fB); } # & BITS_16_18=0 # binutils-descr: "dxex", XRC(59,354,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dxexOp; # ISA-cmt: dxex - DFP Extract Biased Exponent # binutils: mytest.d: bc: ec 40 1a c4 dxex f2,f3 :dxex fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=354 & Rc=0 & fT & fB & BITS_16_20=0 { dxexOp(fT,fB); } # binutils-descr: "dxex.", XRC(59,354,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dxexDotOp; # ISA-cmt: dxex. - DFP Extract Biased Exponent Rc # binutils: mytest.d: c0: ec 40 1a c5 dxex. f2,f3 :dxex. fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=354 & Rc=1 & fT & fB & BITS_16_20=0 { dxexDotOp(fT,fB); } # binutils-descr: "dsub", XRC(59,514,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop dsubOp; # ISA-cmt: dsub - DFP Subtract # binutils: mytest.d: c4: ec 43 24 04 dsub f2,f3,f4 :dsub fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=514 & Rc=0 & fT & fA & fB { dsubOp(fT,fA,fB); } # binutils-descr: "dsub.", XRC(59,514,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop dsubDotOp; # ISA-cmt: dsub. - DFP Subtract Rc # binutils: mytest.d: c8: ec 43 24 05 dsub. f2,f3,f4 :dsub. fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=514 & Rc=1 & fT & fA & fB { dsubDotOp(fT,fA,fB); } # binutils-descr: "ddiv", XRC(59,546,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop ddivOp; # ISA-cmt: ddiv - DFP Divide # binutils: mytest.d: cc: ec 43 24 44 ddiv f2,f3,f4 :ddiv fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=546 & Rc=0 & fT & fA & fB { ddivOp(fT,fA,fB); } # binutils-descr: "ddiv.", XRC(59,546,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop ddivDotOp; # ISA-cmt: ddiv. - DFP Divide Rc # binutils: mytest.d: d0: ec 43 24 45 ddiv. f2,f3,f4 :ddiv. fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=546 & Rc=1 & fT & fA & fB { ddivDotOp(fT,fA,fB); } # binutils-descr: "dcmpu", X(59,642), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} define pcodeop dcmpuOp; # ISA-cmt: dcmpu - DFP Compare Unordered # ISA-info: dcmpu - Form "X" Page 178 Category "DFP" # binutils: mytest.d: d4: ed 03 25 04 dcmpu cr2,f3,f4 :dcmpu CRFD,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=642 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dcmpuOp(CRFD,fA,fB); } # binutils-descr: "dtstsf", X(59,674), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} define pcodeop dtstsfOp; # ISA-cmt: dtstsf - DFP Test Significance # ISA-info: dtstsf - Form "X" Page 182 Category "DFP" # binutils: mytest.d: d8: ed 03 25 44 dtstsf cr2,f3,f4 :dtstsf CRFD,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=674 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dtstsfOp(CRFD,fA,fB); } # binutils-descr: "drsp", XRC(59,770,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop drspOp; # ISA-cmt: drsp - DFP Round To DFP Short # binutils: mytest.d: dc: ec 40 1e 04 drsp f2,f3 :drsp fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=770 & Rc=0 & fT & fB & BITS_16_20=0 { drspOp(fT,fB); } # binutils-descr: "drsp.", XRC(59,770,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop drspDotOp; # ISA-cmt: drsp. - DFP Round To DFP Short Rc # binutils: mytest.d: e0: ec 40 1e 05 drsp. f2,f3 :drsp. fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=770 & Rc=1 & fT & fB & BITS_16_20=0 { drspDotOp(fT,fB); } # binutils-descr: "dcffix", XRC(59,802,0), X_MASK|FRA_MASK, POWER7, PPCNONE, {FRT, FRB} define pcodeop dcffixOp; # ISA-cmt: dcffix - DFP Convert From Fixed # binutils: power7.d: 144: ed 40 66 44 dcffix f10,f12 :dcffix fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=802 & Rc=0 & fT & fB & BITS_16_20=0 { dcffixOp(fT,fB); } # binutils-descr: "dcffix.", XRC(59,802,1), X_MASK|FRA_MASK, POWER7, PPCNONE, {FRT, FRB} define pcodeop dcffixDotOp; # ISA-cmt: dcffix. - DFP Convert From Fixed Rc # binutils: mytest.d: e4: ec 40 1e 45 dcffix. f2,f3 :dcffix. fT,fB is $(NOTVLE) & OP=59 & XOP_1_10=802 & Rc=1 & fT & fB & BITS_16_20=0 { dcffixDotOp(fT,fB); } # binutils-descr: "denbcd", XRC(59,834,0), X_MASK, POWER6, PPCNONE, {S, FRT, FRB} define pcodeop denbcdOp; # ISA-cmt: denbcd - DFP Encode BCD To DPD # binutils: mytest.d: e8: ec 70 26 84 denbcd 1,f3,f4 :denbcd fT,fB is $(NOTVLE) & OP=59 & fT & BIT_20 & BITS_16_19=0 & fB & XOP_1_10=834 & Rc=0 { denbcdOp(fT,fB); } # & BITS_16_19=0 # binutils-descr: "denbcd.", XRC(59,834,1), X_MASK, POWER6, PPCNONE, {S, FRT, FRB} define pcodeop denbcdDotOp; # ISA-cmt: denbcd. - DFP Encode BCD To DPD Rc # binutils: mytest.d: ec: ec 70 26 85 denbcd. 1,f3,f4 :denbcd. fT,fB is $(NOTVLE) & OP=59 & fT & BIT_20 & BITS_16_19=0 & fB & XOP_1_10=834 & Rc=1 { denbcdDotOp(fT,fB); } # & BITS_16_19=0 # binutils-descr: "diex", XRC(59,866,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop diexOp; # ISA-cmt: diex - DFP Insert Biased Exponent # binutils: mytest.d: f4: ec 43 26 c4 diex f2,f3,f4 :diex fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=866 & Rc=0 & fT & fA & fB { diexOp(fT,fA,fB); } # binutils-descr: "diex.", XRC(59,866,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop diexDotOp; # ISA-cmt: diex. - DFP Insert Biased Exponent Rc # binutils: mytest.d: f8: ec 43 26 c5 diex. f2,f3,f4 :diex. fT,fA,fB is $(NOTVLE) & OP=59 & XOP_1_10=866 & Rc=1 & fT & fA & fB { diexDotOp(fT,fA,fB); } # binutils-descr: "stfdp", OP(61), OP_MASK, POWER6, PPCNONE, {FRT, D, RA0} define pcodeop stfdpOp; # ISA-cmt: stfdp - Store Floating-Point Double Pair # ISA-info: stfdp - Form "DS" Page 131 Category "FP.out" # binutils: NO-EXAMPLE - stfdp :stfdp fS,RA_OR_ZERO,DS is $(NOTVLE) & OP=61 & fS & RA_OR_ZERO & DS & BITS_0_1=0 { EA:$(REGISTER_SIZE) = RA_OR_ZERO + sext(DS:2 << 2); *[ram]:8 EA = stfdpOp(fS,RA_OR_ZERO,DS:2); } # binutils-descr: "stq", DSO(62,2), DS_MASK, POWER4, PPC476, {RSQ, DS, RA0} # ISA-cmt: stq - Store Quadword # ISA-info: stq - Form "DS" Page 751 Category "LSQ" # binutils: power4.d: +50: f8 c7 00 02 stq r6,0\(r7\) # binutils: power4.d: +54: f8 c7 00 12 stq r6,16\(r7\) # binutils: power4.d: +58: f8 c7 ff f2 stq r6,-16\(r7\) # binutils: power4.d: +5c: f8 c7 80 02 stq r6,-32768\(r7\) # binutils: power4.d: +60: f8 c7 7f f2 stq r6,32752\(r7\) :stq RS,RA_OR_ZERO,DS is $(NOTVLE) & OP=62 & RS & Dp & RA_OR_ZERO & DS & BITS_0_1=2 & regp [regpset = Dp+1;] { ea:$(REGISTER_SIZE) = RA_OR_ZERO + sext(DS:2 << 2); @if ENDIAN == "big" *:$(REGISTER_SIZE) ea = RS; *:$(REGISTER_SIZE) (ea + $(REGISTER_SIZE)) = regp; @else *:$(REGISTER_SIZE) (ea + $(REGISTER_SIZE)) = RS; *:$(REGISTER_SIZE) ea = regp; @endif } define pcodeop stvepxOp; :stvepx S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=807 & BIT_0=0 { EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:16 EA = stvepxOp(S, RA_OR_ZERO, B); } define pcodeop stvepxlOp; :stvepxl S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=775 & BIT_0=0 { EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:16 EA = stvepxlOp(S, RA_OR_ZERO, B); } # binutils-descr: "dquaq", ZRC(63,3,0), Z2_MASK, POWER6, PPCNONE, {FRT, FRA, FRB, RMC} define pcodeop dquaqOp; # ISA-cmt: dquaq - DFP Quantize Quad # binutils: mytest.d: 100: fc 43 24 06 dquaq f2,f3,f4,2 :dquaq fT,fA,fB,RMC is $(NOTVLE) & OP=63 & fT & fA & fB & RMC & XOP_1_8=3 & Rc=0 { dquaqOp(); } # binutils-descr: "dquaq.", ZRC(63,3,1), Z2_MASK, POWER6, PPCNONE, {FRT, FRA, FRB, RMC} define pcodeop dquaqDotOp; # ISA-cmt: dquaq. - DFP Quantize Quad Rc # binutils: mytest.d: 104: fc 43 24 07 dquaq. f2,f3,f4,2 :dquaq. fT,fA,fB,RMC is $(NOTVLE) & OP=63 & fT & fA & fB & RMC & XOP_1_8=3 & Rc=1 { dquaqDotOp(); } # binutils-descr: "dmulq", XRC(63,34,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop dmulqOp; # ISA-cmt: dmulq - DFP Multiply Quad # binutils: mytest.d: 108: fc 43 20 44 dmulq f2,f3,f4 :dmulq fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=34 & Rc=0 & fT & fA & fB { dmulqOp(fT,fA,fB); } # binutils-descr: "dmulq.", XRC(63,34,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop dmulqDotOp; # ISA-cmt: dmulq. - DFP Multiply Quad Rc # binutils: mytest.d: 10c: fc 43 20 45 dmulq. f2,f3,f4 :dmulq. fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=34 & Rc=1 & fT & fA & fB { dmulqDotOp(fT,fA,fB); } # binutils-descr: "drrndq", ZRC(63,35,0), Z2_MASK, POWER6, PPCNONE, {FRT, FRA, FRB, RMC} define pcodeop drrndqOp; # ISA-cmt: drrndq - DFP Reround Quad # binutils: mytest.d: 110: fc 43 22 46 drrndq f2,f3,f4,1 :drrndq fT,fA,fB,RMC is $(NOTVLE) & OP=63 & fT & fA & fB & RMC & XOP_1_8=35 & Rc=0 { drrndqOp(fT,fA,fB); } # binutils-descr: "drrndq.", ZRC(63,35,1), Z2_MASK, POWER6, PPCNONE, {FRT, FRA, FRB, RMC} define pcodeop drrndqDotOp; # ISA-cmt: drrndq - DFP Reround Quad # binutils: mytest.d: 110: fc 43 22 46 drrndq f2,f3,f4,1 :drrndq. fT,fA,fB,RMC is $(NOTVLE) & OP=63 & fT & fA & fB & RMC & XOP_1_8=35 & Rc=1 { drrndqDotOp(fT,fA,fB); } # binutils-descr: "dscliq", ZRC(63,66,0), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} define pcodeop dscliqOp; # ISA-cmt: dscliq - DFP Shift Significand Left Immediate Quad # binutils: mytest.d: 118: fc 43 10 84 dscliq f2,f3,4 :dscliq fT,fA,SH16 is $(NOTVLE) & OP=63 & fT & fA & SH16 & XOP_1_9=66 & Rc=0 { dscliqOp(fT,fA); } # binutils-descr: "dscliq.", ZRC(63,66,1), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} define pcodeop dscliqDotOp; # ISA-cmt: dscliq. - DFP Shift Significand Left Immediate Quad Rc # binutils: mytest.d: 11c: fc 43 10 85 dscliq. f2,f3,4 :dscliq. fT,fA,SH16 is $(NOTVLE) & OP=63 & fT & fA & SH16 & XOP_1_9=66 & Rc=1 { dscliqDotOp(fT,fA); } # binutils-descr: "dquaiq", ZRC(63,67,0), Z2_MASK, POWER6, PPCNONE, {TE, FRT, FRB, RMC} define pcodeop dquaiqOp; # ISA-cmt: dquaiq - DFP Quantize Immediate Quad # binutils: mytest.d: 120: fc 62 24 86 dquaiq 2,f3,f4,2 :dquaiq fT,A_BITS,fB,RMC is $(NOTVLE) & OP=63 & fT & A_BITS & fB & RMC & XOP_1_8=67 & Rc=0 { dquaiqOp(fT,fB); } # binutils-descr: "dquaiq.", ZRC(63,67,1), Z2_MASK, POWER6, PPCNONE, {TE, FRT, FRB, RMC} define pcodeop dquaiqDotOp; # ISA-cmt: dquaiq. - DFP Quantize Immediate Quad Rc # binutils: mytest.d: 124: fc 62 24 87 dquaiq. 2,f3,f4,2 :dquaiq. fT,A_BITS,fB,RMC is $(NOTVLE) & OP=63 & fT & A_BITS & fB & RMC & XOP_1_8=67 & Rc=1 { dquaiqDotOp(fT,fB); } # binutils-descr: "dscriq", ZRC(63,98,0), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} define pcodeop dscriqOp; # ISA-cmt: dscriq - DFP Shift Significand Right Immediate Quad # binutils: mytest.d: 128: fc 43 10 c4 dscriq f2,f3,4 :dscriq fT,fA,SH16 is $(NOTVLE) & OP=63 & fT & fA & SH16 & XOP_1_9=98 & Rc=0 { dscriqOp(); } # binutils-descr: "dscriq.", ZRC(63,98,1), Z_MASK, POWER6, PPCNONE, {FRT, FRA, SH16} define pcodeop dscriqDotOp; # ISA-cmt: dscriq. - DFP Shift Significand Right Immediate Quad Rc # binutils: mytest.d: 12c: fc 43 10 c5 dscriq. f2,f3,4 :dscriq. fT,fA,SH16 is $(NOTVLE) & OP=63 & fT & fA & SH16 & XOP_1_9=98 & Rc=1 { dscriqDotOp(); } # binutils-descr: "drintxq", ZRC(63,99,0), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} define pcodeop drintxqOp; # ISA-cmt: drintxq - DFP Round To FP Integer With Inexact Quad # binutils: mytest.d: 130: fc 61 22 c6 drintxq 1,f3,f4,1 :drintxq fT,fB,RMC is $(NOTVLE) & OP=63 & fT & BITS_17_20=0 & BIT_16 & fB & RMC & XOP_1_8=99 & Rc=0 { drintxqOp(); } # binutils-descr: "drintxq.", ZRC(63,99,1), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} define pcodeop drintxqDotOp; # ISA-cmt: drintxq. - DFP Round To FP Integer With Inexact Quad Rc # binutils: mytest.d: 134: fc 61 22 c7 drintxq. 1,f3,f4,1 :drintxq. fT,fB,RMC is $(NOTVLE) & OP=63 & fT & BITS_17_20=0 & BIT_16 & fB & RMC & XOP_1_8=99 & Rc=1 { drintxqDotOp(); } # binutils-descr: "dcmpoq", X(63,130), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} define pcodeop dcmpoqOp; # ISA-cmt: dcmpoq - DFP Compare Ordered Quad # ISA-info: dcmpoq - Form "X" Page 179 Category "DFP" # binutils: mytest.d: 138: fd 03 21 04 dcmpoq cr2,f3,f4 :dcmpoq CRFD,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=130 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dcmpoqOp(CRFD,fA,fB); } # binutils-descr: "dtstexq", X(63,162), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} define pcodeop dtstexqOp; # ISA-cmt: dtstexq - DFP Test Exponent Quad # ISA-info: dtstexq - Form "X" Page 181 Category "DFP" # binutils: mytest.d: 144: fd 03 21 44 dtstexq cr2,f3,f4 :dtstexq CRFD,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=162 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dtstexqOp(CRFD,fA,fB); } # binutils-descr: "dtstdcq", Z(63,194), Z_MASK, POWER6, PPCNONE, {BF, FRA, DCM} define pcodeop dtstdcqOp; # ISA-cmt: dtstdcq - DFP Test Data Class Quad # ISA-info: dtstdcq - Form "Z22" Page 180 Category "DFP" # binutils: mytest.d: 26c: fc 82 0d 84 dtstdcq cr1,f2,3 :dtstdcq BF2,fA,DCM is $(NOTVLE) & OP=63 & BF2 & BITS_21_22=0 & fA & DCM & XOP_1_9=194 & BIT_0=0 { dtstdcqOp(fA); } # binutils-descr: "dtstdgq", Z(63,226), Z_MASK, POWER6, PPCNONE, {BF, FRA, DGM} define pcodeop dtstdgqOp; # ISA-cmt: dtstdgq - DFP Test Data Group Quad # ISA-info: dtstdgq - Form "Z22" Page 180 Category "DFP" # binutils: mytest.d: 148: fd 03 11 c4 dtstdgq cr2,f3,4 :dtstdgq BF2,fA,DGM is $(NOTVLE) & OP=63 & BF2 & BITS_21_22=0 & fA & DGM & XOP_1_9=226 & BIT_0=0 { dtstdgqOp(); } # binutils-descr: "drintnq", ZRC(63,227,0), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} define pcodeop drintnqOp; # ISA-cmt: drintnq - DFP Round To FP Integer Without Inexact Quad # binutils: mytest.d: 14c: fc 61 23 c6 drintnq 1,f3,f4,1 :drintnq fT,fB,RMC is $(NOTVLE) & OP=63 & fT & BITS_17_20=0 & MSR_L & fB & RMC & XOP_1_8=227 & Rc=0 { drintnqOp(); } # binutils-descr: "drintnq.", ZRC(63,227,1), Z2_MASK, POWER6, PPCNONE, {R, FRT, FRB, RMC} define pcodeop drintnqDotOp; # ISA-cmt: drintnq. - DFP Round To FP Integer Without Inexact Quad Rc # binutils: mytest.d: 150: fc 61 23 c7 drintnq. 1,f3,f4,1 :drintnq. fT,fB,RMC is $(NOTVLE) & OP=63 & fT & BITS_17_20=0 & MSR_L & fB & RMC & XOP_1_8=227 & Rc=1 { drintnqDotOp(); } # binutils-descr: "dctqpq", XRC(63,258,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dctqpqOp; # ISA-cmt: dctqpq - DFP Convert To DFP Extended # binutils: mytest.d: 154: fc 40 1a 04 dctqpq f2,f3 :dctqpq fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=258 & Rc=0 & fT & fB & BITS_16_20=0 { dctqpqOp(fT,fB); } # binutils-descr: "dctqpq.", XRC(63,258,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dctqpqDotOp; # ISA-cmt: dctqpq. - DFP Convert To DFP Extended Rc # binutils: mytest.d: 158: fc 40 1a 05 dctqpq. f2,f3 :dctqpq. fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=258 & Rc=1 & fT & fB & BITS_16_20=0 { dctqpqDotOp(fT,fB); } # binutils-descr: "dctfixq", XRC(63,290,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dctfixqOp; # ISA-cmt: dctfixq - DFP Convert To Fixed Quad # binutils: mytest.d: 15c: fc 40 1a 44 dctfixq f2,f3 :dctfixq fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=290 & Rc=0 & fT & fB & BITS_16_20=0 { dctfixqOp(fT,fB); } # binutils-descr: "dctfixq.", XRC(63,290,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dctfixqDotOp; # ISA-cmt: dctfixq. - DFP Convert To Fixed Quad Rc # binutils: mytest.d: 160: fc 40 1a 45 dctfixq. f2,f3 :dctfixq. fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=290 & Rc=1 & fT & fB & BITS_16_20=0 { dctfixqDotOp(fT,fB); } # binutils-descr: "ddedpdq", XRC(63,322,0), X_MASK, POWER6, PPCNONE, {SP, FRT, FRB} define pcodeop ddedpdqOp; # ISA-cmt: ddedpdq - DFP Decode DPD To BCD Quad # binutils: mytest.d: 164: fc 70 22 84 ddedpdq 2,f3,f4 :ddedpdq fT,SP,fB is $(NOTVLE) & OP=63 & XOP_1_10=322 & Rc=0 & fT & fB & SP & BITS_16_18=0 { ddedpdqOp(fT,fB); } # binutils-descr: "ddedpdq.", XRC(63,322,1), X_MASK, POWER6, PPCNONE, {SP, FRT, FRB} define pcodeop ddedpdqDotOp; # ISA-cmt: ddedpdq. - DFP Decode DPD To BCD Quad Rc # binutils: mytest.d: 168: fc 70 22 85 ddedpdq. 2,f3,f4 :ddedpdq. fT,SP,fB is $(NOTVLE) & OP=63 & XOP_1_10=322 & Rc=1 & fT & fB & SP & BITS_16_18=0 { ddedpdqDotOp(fT,fB); } # binutils-descr: "dxexq", XRC(63,354,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dxexqOp; # ISA-cmt: dxexq - DFP Extract Biased Exponent Quad # binutils: mytest.d: 16c: fc 40 1a c4 dxexq f2,f3 :dxexq fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=354 & Rc=0 & fT & fB & BITS_16_20=0 { dxexqOp(fT,fB); } # binutils-descr: "dxexq.", XRC(63,354,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dxexqDotOp; # ISA-cmt: dxexq. - DFP Extract Biased Exponent Quad Rc # binutils: mytest.d: 170: fc 40 1a c5 dxexq. f2,f3 :dxexq. fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=354 & Rc=1 & fT & fB & BITS_16_20=0 { dxexqDotOp(fT,fB); } # binutils-descr: "dsubq", XRC(63,514,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop dsubqOp; # ISA-cmt: dsubq - DFP Subtract Quad # binutils: mytest.d: 174: fc 43 24 04 dsubq f2,f3,f4 :dsubq fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=514 & Rc=0 & fT & fA & fB { dsubqOp(fT,fA,fB); } # binutils-descr: "dsubq.", XRC(63,514,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop dsubqDotOp; # ISA-cmt: dsubq. - DFP Subtract Quad Rc # binutils: mytest.d: 178: fc 43 24 05 dsubq. f2,f3,f4 :dsubq. fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=514 & Rc=1 & fT & fA & fB { dsubqDotOp(fT,fA,fB); } # binutils-descr: "ddivq", XRC(63,546,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop ddivqOp; # ISA-cmt: ddivq - DFP Divide Quad # binutils: mytest.d: 17c: fc 43 24 44 ddivq f2,f3,f4 :ddivq fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=546 & Rc=0 & fT & fA & fB { ddivqOp(fT,fA,fB); } # binutils-descr: "ddivq.", XRC(63,546,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop ddivqDotOp; # ISA-cmt: ddivq. - DFP Divide Quad Rc # binutils: mytest.d: 180: fc 43 24 45 ddivq. f2,f3,f4 :ddivq. fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=546 & Rc=1 & fT & fA & fB { ddivqDotOp(fT,fA,fB); } # binutils-descr: "dcmpuq", X(63,642), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} define pcodeop dcmpuqOp; # ISA-cmt: dcmpuq - DFP Compare Unordered Quad # ISA-info: dcmpuq - Form "X" Page 179 Category "DFP" # binutils: mytest.d: 184: fd 03 25 04 dcmpuq cr2,f3,f4 :dcmpuq CRFD,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=642 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dcmpuqOp(CRFD,fA,fB); } # binutils-descr: "dtstsfq", X(63,674), X_MASK, POWER6, PPCNONE, {BF, FRA, FRB} define pcodeop dtstsfqOp; # ISA-cmt: dtstsfq - DFP Test Significance Quad # ISA-info: dtstsfq - Form "X" Page 182 Category "DFP" # binutils: mytest.d: 188: fd 03 25 44 dtstsfq cr2,f3,f4 :dtstsfq CRFD,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=674 & CRFD & fA & fB & BITS_21_22=0 & BIT_0=0 { dtstsfqOp(CRFD,fA,fB); } # binutils-descr: "drdpq", XRC(63,770,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop drdpqOp; # ISA-cmt: drdpq - DFP Round To DFP Long # binutils: mytest.d: 18c: fc 40 1e 04 drdpq f2,f3 :drdpq fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=770 & Rc=0 & fT & fB & BITS_16_20=0 { drdpqOp(fT,fB); } # binutils-descr: "drdpq.", XRC(63,770,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop drdpqDotOp; # ISA-cmt: drdpq. - DFP Round To DFP Long Rc # binutils: mytest.d: 190: fc 40 1e 05 drdpq. f2,f3 :drdpq. fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=770 & Rc=1 & fT & fB & BITS_16_20=0 { drdpqDotOp(fT,fB); } # binutils-descr: "dcffixq", XRC(63,802,0), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dcffixqOp; # ISA-cmt: dcffixq - DFP Convert From Fixed Quad # binutils: mytest.d: 194: fc 40 1e 44 dcffixq f2,f3 :dcffixq fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=802 & Rc=0 & fT & fB & BITS_16_20=0 { dcffixqOp(fT,fB); } # binutils-descr: "dcffixq.", XRC(63,802,1), X_MASK, POWER6, PPCNONE, {FRT, FRB} define pcodeop dcffixqDotOp; # ISA-cmt: dcffixq. - DFP Convert From Fixed Quad Rc # binutils: mytest.d: 198: fc 40 1e 45 dcffixq. f2,f3 :dcffixq. fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=802 & Rc=1 & fT & fB & BITS_16_20=0 { dcffixqDotOp(fT,fB); } # binutils-descr: "denbcdq", XRC(63,834,0), X_MASK, POWER6, PPCNONE, {S, FRT, FRB} define pcodeop denbcdqOp; # ISA-cmt: denbcdq - DFP Encode BCD To DPD Quad # binutils: mytest.d: 19c: fc 70 26 84 denbcdq 1,f3,f4 :denbcdq fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=834 & Rc=0 & BIT_20 & fT & fB & SR=0 { denbcdqOp(fT,fB); } # binutils-descr: "denbcdq.", XRC(63,834,1), X_MASK, POWER6, PPCNONE, {S, FRT, FRB} define pcodeop denbcdqDotOp; # ISA-cmt: denbcdq. - DFP Encode BCD To DPD Quad Rc # binutils: mytest.d: 1a0: fc 70 26 85 denbcdq. 1,f3,f4 :denbcdq. fT,fB is $(NOTVLE) & OP=63 & XOP_1_10=834 & Rc=1 & BIT_20 & fT & fB & SR=0 { denbcdqDotOp(fT,fB); } # binutils-descr: "diexq", XRC(63,866,0), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop diexqOp; # ISA-cmt: diexq - DFP Insert Biased Exponent Quad # binutils: mytest.d: 1a4: fc 43 26 c4 diexq f2,f3,f4 :diexq fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=866 & Rc=0 & fT & fA & fB { diexqOp(fT,fA,fB); } # binutils-descr: "diexq.", XRC(63,866,1), X_MASK, POWER6, PPCNONE, {FRT, FRA, FRB} define pcodeop diexqDotOp; # ISA-cmt: diexq. - DFP Insert Biased Exponent Quad Rc # binutils: mytest.d: 1a8: fc 43 26 c5 diexq. f2,f3,f4 :diexq. fT,fA,fB is $(NOTVLE) & OP=63 & XOP_1_10=866 & Rc=1 & fT & fA & fB { diexqDotOp(fT,fA,fB); } # icbtls ct,ra,rb # 31 / CT RA RB 486 / # 0 6 7 11 16 21 31 # 31 25 24 20 15 10 0 #define pcodeop icbtlsOp; #:icbtls CT2,A,B is $(NOTVLE) & OP=31 & BIT_25=0 & CT2 & A & B & XOP_1_10=486 & BIT_0=0 { icbtlsOp(A,B); } define pcodeop InstructionCacheBlockLockSetX; :icbtls CT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & BIT_25=0 & CT & RA_OR_ZERO & B & XOP_1_10=486 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; InstructionCacheBlockLockSetX(ea); } ###################################### # v2.07 non vsx additions. #=========================================================== # Branch Conditional TAR(op=19, xop=560) #=========================================================== :bctar is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=560 { goto [TAR]; } :bctar is linkreg=1 & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH=0 & XOP_1_10=560 [ linkreg=0; globalset(inst_start,linkreg); ] { # don't do this anymore, detect another way # call [CTR]; # return [LR]; goto [TAR]; } :bctar BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=0 & BITS_13_15=0 & BH & XOP_1_10=560 { goto [TAR]; } :bctarl is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH=0 & XOP_1_10=560 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; call [TAR]; } :bctarl BH is $(NOTVLE) & OP=19 & BO_0=1 & BO_2=1 & LK=1 & BITS_13_15=0 & BH & XOP_1_10=560 [ linkreg=0; globalset(inst_start,linkreg); ] { LR = inst_next; call [TAR]; } :b^CC^"ctar" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=560 { if (!CC) goto inst_next; goto [TAR]; } :b^CC^"ctar" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH & BH_BITS!=0 & LK=0 & BITS_13_15=0 & XOP_1_10=560 { if (!CC) goto inst_next; goto [TAR]; } :b^CC^"ctarl" is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=560 [ linkreg=0; globalset(inst_start,linkreg); ] { if (!CC) goto inst_next; LR = inst_next; call [TAR]; } :b^CC^"ctarl" BH is $(NOTVLE) & OP=19 & CC & BO_0=0 & BO_2=1 & BI_CR= 0 & BH & BH_BITS!=0 & LK=1 & BITS_13_15=0 & XOP_1_10=560 [ linkreg=0; globalset(inst_start,linkreg); ] { if (!CC) goto inst_next; LR = inst_next; call [TAR]; } :b^CC^"ctar" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=0 & BITS_13_15=0 & XOP_1_10=560 { if (!CC) goto inst_next; goto [TAR]; } :b^CC^"ctar" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=0 & BITS_13_15=0 & XOP_1_10=560 { if (!CC) goto inst_next; goto [TAR]; } :b^CC^"ctarl" BI_CR is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH=0 & LK=1 & BITS_13_15=0 & XOP_1_10=560 [ linkreg=0; globalset(inst_start,linkreg); ] { if (!CC) goto inst_next; LR = inst_next; call [TAR]; } :b^CC^"ctarl" BI_CR,BH is $(NOTVLE) & OP=19 & CC & BI_CR & BO_0=0 & BO_2=1 & BH & LK=1 & BITS_13_15=0 & XOP_1_10=560 [ linkreg=0; globalset(inst_start,linkreg); ] { if (!CC) goto inst_next; LR = inst_next; call [TAR]; } :clrbhrb is $(NOTVLE) & OP=31 & XOP_1_10=430 & BITS_11_25=0 & BIT_0=0 { clearHistory(); } :fmrgew fT,fA,fB is $(NOTVLE) & OP=63 & fT & fA & fB & XOP_1_10=966 & Rc=0 { fT[0,32] = fA:4; fT[32,32] = fB:4; } :fmrgow fT,fA,fB is $(NOTVLE) & OP=63 & fT & fA & fB & XOP_1_10=838 & Rc=0 { fT[0,32] = fA(4); fT[32,32] = fB(4); } :lqarx D,RA_OR_ZERO,B,EX is OP=31 & D & RA_OR_ZERO & B & XOP_1_10=276 & EX & Dp & regp [regpset = Dp+1;] { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; @if ENDIAN == "big" D = *:$(REGISTER_SIZE) ea; regp = *:$(REGISTER_SIZE) (ea + $(REGISTER_SIZE)); @else D = *:$(REGISTER_SIZE) (ea + $(REGISTER_SIZE)); regp = *:$(REGISTER_SIZE) ea; @endif } :mfbhrbe D,BH_RBE is $(NOTVLE) & OP=31 & XOP_1_10=302 & BIT_0=0 & D & BH_RBE { D = movebuffer(BH_RBE:2); } :msgclrp B is OP=31 & XOP_1_10=174 & BITS_16_25=0 & BIT_0=0 & B { message(B); } :msgsndp B is OP=31 & XOP_1_10=142 & BITS_16_25=0 & BIT_0=0 & B { message(B); } :rfebb SBE is $(NOTVLE) & OP=19 & XOP_1_10=146 & BITS_12_25=0 & BIT_0=0 & SBE { eventInterrupt(SBE:1); } :stqcx. S,RA_OR_ZERO,B is OP=31 & S & RA_OR_ZERO & B & XOP_1_10=182 & BIT_0=1 & Dp & regp [regpset = Dp+1;] { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; @if ENDIAN == "big" *:$(REGISTER_SIZE) ea = S; *:$(REGISTER_SIZE) (ea + $(REGISTER_SIZE)) = regp; @else *:$(REGISTER_SIZE) (ea + $(REGISTER_SIZE)) = S; *:$(REGISTER_SIZE) ea = regp; @endif setCrBit(cr0, 2, 1); } :tabort. A is $(NOTVLE) & OP=31 & XOP_1_10=910 & BIT_0=1 & BITS_11_15=0 & BITS_21_25=0 & A { transaction(A); } :tabortdc. TOA,A,B is $(NOTVLE) & OP=31 & XOP_1_10=814 & BIT_0=1 & A & B & TOA { transaction(TOA:1,A,B); } :tabortdci. TOA,A,S5IMM is $(NOTVLE) & OP=31 & XOP_1_10=878 & BIT_0=1 & A & S5IMM & TOA { transaction(TOA:1,A,S5IMM:1); } :tabortwc. TOA,A,B is $(NOTVLE) & OP=31 & XOP_1_10=782 & BIT_0=1 & A & B & TOA { transaction(TOA:1,A,B); } :tabortwci. TOA,A,S5IMM is $(NOTVLE) & OP=31 & XOP_1_10=846 & BIT_0=1 & A & S5IMM & TOA { transaction(TOA:1,A,S5IMM:1); } :tbegin. BIT_R is $(NOTVLE) & OP=31 & XOP_1_10=654 & BIT_0=1 & BITS_11_20=0 & BITS_22_24=0 & BIT_R { transaction(BIT_R:1); } :tcheck BF2 is $(NOTVLE) & OP=31 & XOP_1_10=718 & BIT_0=0 & BITS_11_22=0 & BF2 { transaction(BF2:1); } :tend. BIT_A is $(NOTVLE) & OP=31 & XOP_1_10=686 & BIT_0=1 & BITS_11_24=0 & BIT_A { transaction(BIT_A:1); } :trechkpt. is $(NOTVLE) & OP=31 & XOP_1_10=1006 & BIT_0=1 & BITS_11_25=0 { transaction(); } :treclaim. A is $(NOTVLE) & OP=31 & XOP_1_10=942 & BIT_0=1 & BITS_11_15=0 & BITS_21_25=0 & A { transaction(A); } :tsr. BIT_L is $(NOTVLE) & OP=31 & XOP_1_10=750 & BIT_0=1 & BITS_11_20=0 & BITS_22_25=0 & BIT_L { transaction(BIT_L:1); } ####################### # v3.0 :addpcis D,OFF16SH is $(NOTVLE) & OP=19 & XOP_1_5=2 & D & OFF16SH { D = inst_next + sext(OFF16SH); } :cmpeqb CRFD,A,B is $(NOTVLE) & OP=31 & BITS_21_22=0 & BIT_0=0 & XOP_1_10=224 & A & B & CRFD { tmpa:1 = A:1; match:1 = (tmpa == B[0,8]) | (tmpa == B[8,8]) | (tmpa == B[16,8]) | (tmpa == B[24,8]); @if REGISTER_SIZE == "8" match = match | (tmpa == B[32,8]) | (tmpa == B[40,8]) | (tmpa == B[48,8]) | (tmpa == B[56,8]); @endif # 0b0 | match | 0b0 | 0b0 CRFD = (match & 1) << 2; } :cmprb CRFD,L2,A,B is $(NOTVLE) & OP=31 & BIT_22=0 & BIT_0=0 & XOP_1_10=192 & A & B & CRFD & L2 { tmpin:1 = A:1; tmp1lo:1 = B[16,8]; tmp1hi:1 = B[24,8]; tmp2lo:1 = B[0,8]; tmp2hi:1 = B[8,8]; in_range:1 = ((tmpin >= tmp2lo) & (tmpin <= tmp2hi)) | (((tmpin >= tmp1lo) & (tmpin <= tmp1hi)) * L2:1); # 0b0 | in_range | 0b0 | 0b0 CRFD = (in_range & 1) << 2; } :cnttzw A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=538 & Rc=0 { A = countTrailingZeros(S); } :cnttzw. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=538 & Rc=1 { A = countTrailingZeros(S); cr0flags(A); } :cnttzd A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=570 & Rc=0 { A = countTrailingZeros(S); } :cnttzd. A,S is OP=31 & S & A & BITS_11_15=0 & XOP_1_10=570 & Rc=1 { A = countTrailingZeros(S); cr0flags(A); } :copy RA_OR_ZERO,B,L2 is $(NOTVLE) & OP=31 & BITS_22_25=0 & BIT_0=0 & XOP_1_10=774 & RA_OR_ZERO & B & L2 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; copytrans(ea,L2:1); } :cp_abort is $(NOTVLE) & OP=31 & BITS_11_25=0 & BIT_0=0 & XOP_1_10=838{ copytrans(); } :darn D,L16 is $(NOTVLE) & OP=31 & BITS_18_20=0 & BITS_11_15=0 & BIT_0=0 & XOP_1_10=755 & D & L16 { D = random(L16:1); } :dtstsfi CRFD,UIMT,fB is $(NOTVLE) & OP=59 & XOP_1_10=675 & CRFD & UIMT & fB & BIT_22=0 & BIT_0=0 { dtstsfOp(CRFD,UIMT:1,fB); } :dtstsfiq CRFD,UIMT,fB is $(NOTVLE) & OP=63 & XOP_1_10=675 & CRFD & UIMT & fB & BIT_22=0 & BIT_0=0 { dtstsfOp(CRFD,UIMT:1,fB); } :extswsli A,S,SH is OP=31 & A & S & SH & XOP_2_10=445 & Rc=0 { tmp:8 = sext(S:4); A = tmp << SH; } :extswsli. A,S,SH is OP=31 & A & S & SH & XOP_2_10=445 & Rc=1 { tmp:8 = sext(S:4); A = tmp << SH; cr0flags(A); } :ldat D,RA_OR_ZERO,FNC is $(NOTVLE) & OP=31 & D & RA_OR_ZERO & FNC & XOP_1_10=614 & BIT_0=0 & Dp & regp [regpset = Dp+1;] { ea:$(REGISTER_SIZE) = RA_OR_ZERO; tmp:$(REGISTER_SIZE) = *:8 ea; mematom(ea,tmp,D,regp,FNC:1); D = tmp; } :ldmx D,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & D & RA_OR_ZERO & B & XOP_1_10=309 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; D = *:8 ea; } :lwat D,RA_OR_ZERO,FNC is $(NOTVLE) & OP=31 & D & RA_OR_ZERO & FNC & XOP_1_10=582 & BIT_0=0 & Dp & regp [regpset = Dp+1;] { ea:$(REGISTER_SIZE) = RA_OR_ZERO; tmp:$(REGISTER_SIZE) = zext(*:4 ea); mematom(ea,tmp,D,regp,FNC:1); D = tmp; } :maddhd D,A,B,C is $(NOTVLE) & OP=4 & D & A & B & C & XOP_0_5=48 { tmpa:16 = sext(A); tmpb:16 = sext(B); tmpc:16 = sext(C); tmpp:16 = (tmpa * tmpb) + tmpc; D = tmpp(8); } :maddhdu D,A,B,C is $(NOTVLE) & OP=4 & D & A & B & C & XOP_0_5=49 { tmpa:16 = zext(A); tmpb:16 = zext(B); tmpc:16 = zext(C); tmpp:16 = (tmpa * tmpb) + tmpc; D = tmpp(8); } :maddld D,A,B,C is $(NOTVLE) & OP=4 & D & A & B & C & XOP_0_5=51 { tmpa:16 = sext(A); tmpb:16 = sext(B); tmpc:16 = sext(C); tmpp:16 = (tmpa * tmpb) + tmpc; D = tmpp:8; } :mcrxrx CRFD is $(NOTVLE) & OP=31 & BITS_11_22=0 & BIT_0=0 & XOP_1_10=576 & CRFD { CRFD = (xer_ov << 3) | (xer_ov32 << 2) | (xer_ca << 1) | (xer_ca32); } :modsd D,A,B is $(NOTVLE) & OP=31 & D & A & B & XOP_1_10=777 & BIT_0=0 { tmpa:16 = sext(A); tmpb:16 = sext(B); tmpd:16 = tmpa s% tmpb; D = tmpd:$(REGISTER_SIZE); } :modsw D,A,B is $(NOTVLE) & OP=31 & D & A & B & XOP_1_10=779 & BIT_0=0 { tmpa:16 = sext(A:4); tmpb:16 = sext(B:4); tmpd:16 = tmpa s% tmpb; D = tmpd:$(REGISTER_SIZE); } :modud D,A,B is $(NOTVLE) & OP=31 & D & A & B & XOP_1_10=265 & BIT_0=0 { tmpa:16 = zext(A); tmpb:16 = zext(B); tmpd:16 = tmpa % tmpb; D = tmpd:$(REGISTER_SIZE); } :moduw D,A,B is $(NOTVLE) & OP=31 & D & A & B & XOP_1_10=267 & BIT_0=0 { tmpa:4 = zext(A:4); tmpb:4 = zext(B:4); tmpd:4 = tmpa % tmpb; D = zext(tmpd); } :msgsync is $(NOTVLE) & OP=31 & BITS_11_25=0 & BIT_0=0 & XOP_1_10=886 { message(); } :paste RA_OR_ZERO,B,0 is $(NOTVLE) & OP=31 & BITS_22_25=0 & XOP_1_10=902 & RA_OR_ZERO & B & L2=0 & Rc=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; pastetrans(ea); } :paste. RA_OR_ZERO,B,1 is $(NOTVLE) & OP=31 & BITS_22_25=0 & XOP_1_10=902 & RA_OR_ZERO & B & L2=1 & Rc=1 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; pastetrans(ea); setCrBit(cr0, 2, 1); } :setb D,BFA is $(NOTVLE) & OP=31 & BITS_11_17=0 & BIT_0=0 & XOP_1_10=128 & D & BFA { tmpcr:8 = 1 << (8 * BFA:1); tmpr0:1 = (BFA & 0x8) != 0; tmpr1:1 = (BFA & 0x4) != 0; D = (-1 * zext(tmpr0)) + (1 * zext(tmpr0 == 0) * zext(tmpr1)); } :slbieg S,B is $(NOTVLE) & OP=31 & BITS_16_20=0 & BIT_0=0 & XOP_1_10=466 & S & B { slbInvalidateEntry(S,B); } :slbsync is $(NOTVLE) & OP=31 & BITS_11_25=0 & BIT_0=0 & XOP_1_10=338 { sync(); } :stdat S,RA_OR_ZERO,FNC is $(NOTVLE) & OP=31 & S & RA_OR_ZERO & FNC & XOP_1_10=742 & BIT_0=0 & Dp & regp [regpset = Dp+1;] { ea:$(REGISTER_SIZE) = RA_OR_ZERO; tmp:$(REGISTER_SIZE) = *:8 ea; mematom(ea,tmp,S,regp,FNC:1); } :stop is $(NOTVLE) & OP=19 & BITS_11_25=0 & BIT_0=0 & XOP_1_10=370 { stopT(); } :stwat S,RA_OR_ZERO,FNC is $(NOTVLE) & OP=31 & S & RA_OR_ZERO & FNC & XOP_1_10=710 & BIT_0=0 & Dp & regp [regpset = Dp+1;] { ea:$(REGISTER_SIZE) = RA_OR_ZERO; tmp:$(REGISTER_SIZE) = zext(*:4 ea); mematom(ea,tmp,S,regp,FNC:1); } :wait WC is $(NOTVLE) & OP=31 & BITS_23_25=0 & BITS_11_20=0 & BIT_0=0 & XOP_1_10=30 & WC { waitT(WC:1); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/ppc_vle.sinc ================================================ CC16: "lt" is BI16_VLE=0 & BO16_VLE=1 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); export tmp; } CC16: "le" is BI16_VLE=1 & BO16_VLE=0 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); tmp = !tmp; export tmp; } CC16: "eq" is BI16_VLE=2 & BO16_VLE=1 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); export tmp; } CC16: "ge" is BI16_VLE=0 & BO16_VLE=0 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); tmp = !tmp; export tmp; } CC16: "gt" is BI16_VLE=1 & BO16_VLE=1 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); export tmp; } CC16: "ne" is BI16_VLE=2 & BO16_VLE=0 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); tmp = !tmp; export tmp; } CC16: "so" is BI16_VLE=3 & BO16_VLE=1 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); export tmp; } CC16: "ns" is BI16_VLE=3 & BO16_VLE=0 & BI16_VLE { tmp:1 = 0; getCrBit(cr0, BI16_VLE, tmp); tmp = !tmp; export tmp; } CC32: "lt" is BI_CC_VLE=0 & BO_VLE=1 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); export tmp; } CC32: "le" is BI_CC_VLE=1 & BO_VLE=0 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); tmp = !tmp; export tmp; } CC32: "eq" is BI_CC_VLE=2 & BO_VLE=1 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); export tmp; } CC32: "ge" is BI_CC_VLE=0 & BO_VLE=0 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); tmp = !tmp; export tmp; } CC32: "gt" is BI_CC_VLE=1 & BO_VLE=1 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); export tmp; } CC32: "ne" is BI_CC_VLE=2 & BO_VLE=0 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); tmp = !tmp; export tmp; } CC32: "so" is BI_CC_VLE=3 & BO_VLE=1 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); export tmp; } CC32: "ns" is BI_CC_VLE=3 & BO_VLE=0 & BI_CR_VLE & BI_CC_VLE { tmp:1 = 0; getCrBit(BI_CR_VLE, BI_CC_VLE, tmp); tmp = !tmp; export tmp; } CC32: "dnz" is BO_VLE=2 {CTR = CTR-1; tmp:1 = (CTR != 0); export tmp; } CC32: "dz" is BO_VLE=3 {CTR = CTR-1; tmp:1 = (CTR == 0); export tmp; } addrBD8: reloc is BD8_VLE [ reloc = inst_start + (BD8_VLE << 1);] { export *[ram]:4 reloc; } addrBD15: reloc is BD15_VLE [ reloc = inst_start + (BD15_VLE << 1);] { export *[ram]:4 reloc; } addrBD24: reloc is BD24_VLE [ reloc = inst_start + (BD24_VLE << 1);] { export *[ram]:4 reloc; } d8PlusRaAddress: S8IMM(A) is S8IMM & A {tmp:$(REGISTER_SIZE) = A+S8IMM; export tmp; } d8PlusRaOrZeroAddress: S8IMM(RA_OR_ZERO) is S8IMM & RA_OR_ZERO {tmp:$(REGISTER_SIZE) = RA_OR_ZERO+S8IMM; export tmp; } sd4PlusRxAddr: SD4_VLE(RX_VLE) is SD4_VLE & RX_VLE {tmp:$(REGISTER_SIZE) = RX_VLE+SD4_VLE; export tmp; } sd4HPlusRxAddr: SD4_OFF(RX_VLE) is SD4_VLE & RX_VLE [SD4_OFF = SD4_VLE << 1;] {tmp:$(REGISTER_SIZE) = RX_VLE+SD4_OFF; export tmp; } sd4WPlusRxAddr: SD4_OFF(RX_VLE) is SD4_VLE & RX_VLE [SD4_OFF = SD4_VLE << 2;] {tmp:$(REGISTER_SIZE) = RX_VLE+SD4_OFF; export tmp; } OIMM: val is UI5_VLE [ val = UI5_VLE+1; ] { export *[const]:$(REGISTER_SIZE) val; } @if REGISTER_SIZE == "4" SCALE: val is BIT_10=1 & SCL_VLE & IMM8 [ val = (0xFFFFFFFF) & ~((0xFF-IMM8) << (SCL_VLE*8)); ] { export *[const]:4 val;} SCALE: val is BIT_10=0 & SCL_VLE & IMM8 [ val = (IMM8 << (SCL_VLE*8)); ] { export *[const]:4 val;} @else SCALE: val is BIT_10=1 & SCL_VLE & IMM8 [ val = (0xFFFFFFFFFFFFFFFF) & ~((0xFF-IMM8) << (SCL_VLE*8)); ] { export *[const]:8 val;} SCALE: val is BIT_10=0 & SCL_VLE & IMM8 [ val = IMM8 << (SCL_VLE*8); ] { export *[const]:8 val;} @endif SIMM16: val is IMM_0_10_VLE & SIMM_21_25_VLE [ val = (SIMM_21_25_VLE << 11) | IMM_0_10_VLE ;] { export *[const]:2 val; } SIMM20: val is IMM_0_10_VLE & IMM_16_20_VLE & SIMM_11_14_VLE [ val = (SIMM_11_14_VLE << 16 ) | (IMM_16_20_VLE << 11) | IMM_0_10_VLE ;] { export *[const]:4 val; } IMM16: val is IMM_0_10_VLE & IMM_21_25_VLE [ val = (IMM_21_25_VLE << 11) | IMM_0_10_VLE ;] { export *[const]:2 val; } IMM16B: val is IMM_0_10_VLE & IMM_16_20_VLE [ val = (IMM_16_20_VLE << 11) | IMM_0_10_VLE ;] { export *[const]:2 val; } :e_b addrBD24 is $(ISVLE) & OP=30 & BIT_25=0 & LK=0 & addrBD24 { goto addrBD24; } :e_bl addrBD24 is $(ISVLE) & OP=30 & BIT_25=0 & LK=1 & addrBD24 { LR = inst_next; call addrBD24; } :se_b addrBD8 is $(ISVLE) & OP6_VLE=58 & BIT9_VLE=0 & LK8_VLE=0 & addrBD8 { goto addrBD8; } :se_bl addrBD8 is $(ISVLE) & OP6_VLE=58 & BIT9_VLE=0 & LK8_VLE=1 & addrBD8 { LR = inst_next; call addrBD8; } # NOTE: For the conditional branches, the "official" mnemonics have just bc and bcl. # We use extended mnemonics so the display is understandable without having to cross- # reference multiple tables. :e_b^CC32 BI_CR_VLE, addrBD15 is $(ISVLE) & OP=30 & XOP_VLE=8 & LK=0 & addrBD15 & BIT_L=0 & BI_CR_VLE & CC32 { if (CC32 == 0) goto inst_next; goto addrBD15; } :e_b^CC32^"l" BI_CR_VLE, addrBD15 is $(ISVLE) & OP=30 & XOP_VLE=8 & LK=1 & addrBD15 & BIT_L=0 & BI_CR_VLE & CC32 { if (CC32 == 0) goto inst_next; LR= inst_next; call [addrBD15]; } :e_b^CC32 addrBD15 is $(ISVLE) & OP=30 & XOP_VLE=8 & LK=0 & addrBD15 & BIT_L=1 & CC32 { if (CC32 == 0) goto inst_next; goto addrBD15; } :e_b^CC32^"l" addrBD15 is $(ISVLE) & OP=30 & XOP_VLE=8 & LK=1 & addrBD15 & BIT_L=1 & CC32 { if (CC32 == 0) goto inst_next; LR= inst_next; call [addrBD15]; } :se_b^CC16 cr0, addrBD8 is $(ISVLE) & OP5_VLE=28 & addrBD8 & cr0 & CC16 { if (CC16 == 0) goto inst_next; goto addrBD8; } ####### :se_bctr is $(ISVLE) & OP15_VLE=3 & LK0_VLE=0 { tmp:$(REGISTER_SIZE) = CTR & ~1; goto [tmp]; } :se_bctrl is $(ISVLE) & OP15_VLE=3 & LK0_VLE=1 { LR = inst_next; tmp:$(REGISTER_SIZE) = CTR & ~1; call [tmp]; } :se_blr is $(ISVLE) & OP15_VLE=2 & LK0_VLE=0 { tmp:$(REGISTER_SIZE) = LR & ~1; return [tmp]; } :se_blrl is $(ISVLE) & OP15_VLE=2 & LK0_VLE=1 { tmp:$(REGISTER_SIZE) = LR & ~1; LR = inst_next; call [tmp]; } :se_sc is $(ISVLE) & OP16_VLE=2 { tmp:1 = 0; syscall(tmp); } :e_sc LEV_VLE is $(ISVLE) & OP=31 & XOP_1_10=36 & BIT_0=0 & BITS_16_20=0 & BITS_21_25=0 & LEV_VLE { tmp:1 = LEV_VLE; syscall(tmp); } :e_sc is $(ISVLE) & OP=31 & XOP_1_10=36 & BIT_0=0 & BITS_16_20=0 & BITS_21_25=0 & LEV_VLE=0 { tmp:1 = 0; syscall(tmp); } :se_illegal is $(ISVLE) & OP16_VLE=0 { illegal(); } :se_rfmci is $(ISVLE) & OP16_VLE=11 { MSR = returnFromMachineCheckInterrupt(MSR, spr23b); #MCSRR1 local ra = spr23a; #MCSRR0 return[ra]; } :se_rfci is $(ISVLE) & OP16_VLE=9 { MSR = returnFromCriticalInterrupt(MSR, CSRR1); local ra = CSRR0; return[ra]; } :se_rfi is $(ISVLE) & OP16_VLE=8 { MSR = returnFromInterrupt(MSR, SRR1); local ra = SRR0; return[ra]; } :se_rfdi is $(ISVLE) & OP16_VLE=10 { MSR = returnFromDebugInterrupt(MSR, spr23f); #DSRR1 local ra = spr23e; #DSRR0 return[ra]; } :se_rfgi is $(ISVLE) & OP16_VLE=12 { MSR = returnFromGuestInterrupt(MSR, spr17b); #GSRR1 local ra = spr17a; #GSRR0 return[ra]; } :e_crand CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=257 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,CC_OP & CC_B_OP); } :e_crandc CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=129 & BIT_0=0 { tmp1:1 = !CC_B_OP; setCrBit(CR_D,CR_D_CC,CC_OP & tmp1); } :e_creqv CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=289 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,CC_B_OP == CC_OP); } :e_crnand CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=225 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,!(CC_B_OP & CC_OP)); } :e_crnor CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=33 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,!(CC_B_OP | CC_OP)); } :e_cror CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=449 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,(CC_B_OP | CC_OP)); } :e_crorc CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=417 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,(CC_B_OP | (!CC_OP))); } :e_crxor CC_D_OP,CC_OP,CC_B_OP is $(ISVLE) & OP=31 & CC_D_OP & CC_OP & CC_B_OP & CR_D & CR_D_CC & XOP_1_10=193 & BIT_0=0 { setCrBit(CR_D,CR_D_CC,(CC_B_OP ^ CC_OP)); } :e_mcrf CRFD,CRFS is $(ISVLE) & OP=31 & CRFD & BITS_21_22=0 & CRFS & BITS_11_17=0 & XOP_1_10=16 & BIT_0=0 { CRFD = CRFS; } :e_lbz D,dPlusRaOrZeroAddress is $(ISVLE) & OP=12 & D & dPlusRaOrZeroAddress { D = zext(*:1(dPlusRaOrZeroAddress)); } :se_lbz RZ_VLE,sd4PlusRxAddr is $(ISVLE) & OP4_VLE=8 & RZ_VLE & sd4PlusRxAddr { RZ_VLE = zext(*:1(sd4PlusRxAddr)); } :e_lbzu D,d8PlusRaAddress is $(ISVLE) & OP=6 & D & A & XOP_8_VLE=0 & d8PlusRaAddress { ea:$(REGISTER_SIZE) = d8PlusRaAddress; D = zext(*:1(ea)); A = ea; } # e_ldmvcsrrw 6 (0b0001_10) 0b00101 RA 0b0001_0000 D8 :e_ldmvcsrrw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x10 & BITS_21_25=5 { tea = d8PlusRaOrZeroAddress; loadReg(CSRR0); loadReg(CSRR1); } # e_ldmvdsrrw 6 (0b0001_10) 0b00110 RA 0b0001_0000 D8 :e_ldmvdsrrw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x10 & BITS_21_25=6 { tea = d8PlusRaOrZeroAddress; loadReg(spr23e); #DSRR0 loadReg(spr23f); #DSRR1 } # e_ldmvgprw 6 (0b0001_10) 0b00000 RA 0b0001_0000 D8 :e_ldmvgprw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x10 & BITS_21_25=0 { tea = d8PlusRaOrZeroAddress; loadReg(r0); loadReg(r3); loadReg(r4); loadReg(r5); loadReg(r6); loadReg(r7); loadReg(r8); loadReg(r9); loadReg(r10); loadReg(r11); loadReg(r12); } # e_ldmvsprw 6 (0b0001_10) 0b00001 RA 0b0001_0000 D8 :e_ldmvsprw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x10 & BITS_21_25=1 { tea = d8PlusRaOrZeroAddress; #TODO is there a better way to handle this, CR are 4 bit # so crall can't be used. And not much code accesses # CR in this way, also CRM_CR seems backwards? # loadReg(CR); local tmpCR:4 = *:4 tea; cr0 = zext(tmpCR[0,4]); cr1 = zext(tmpCR[4,4]); cr2 = zext(tmpCR[8,4]); cr3 = zext(tmpCR[12,4]); cr4 = zext(tmpCR[16,4]); cr5 = zext(tmpCR[20,4]); cr6 = zext(tmpCR[24,4]); cr7 = zext(tmpCR[28,4]); tea = tea + 4; loadReg(LR); loadReg(CTR); loadReg(XER); } # e_ldmvsrrw 6 (0b0001_10) 0b00100 RA 0b0001_0000 D8 :e_ldmvsrrw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x10 & BITS_21_25=4 { tea = d8PlusRaOrZeroAddress; loadReg(SRR0); loadReg(SRR1); } :e_lha D,dPlusRaOrZeroAddress is $(ISVLE) & OP=14 & D & dPlusRaOrZeroAddress { D = sext(*:2(dPlusRaOrZeroAddress)); } :e_lhz D,dPlusRaOrZeroAddress is $(ISVLE) & OP=22 & D & dPlusRaOrZeroAddress { D = zext(*:2(dPlusRaOrZeroAddress)); } :se_lhz RZ_VLE,sd4HPlusRxAddr is $(ISVLE) & OP4_VLE=10 & RZ_VLE & sd4HPlusRxAddr { RZ_VLE = zext(*:2(sd4HPlusRxAddr)); } :e_lhau D,d8PlusRaAddress is $(ISVLE) & OP=6 & D & A & XOP_8_VLE=3 & d8PlusRaAddress { ea:$(REGISTER_SIZE) = d8PlusRaAddress; D = sext(*:2(ea)); A = ea; } :e_lhzu D,d8PlusRaAddress is $(ISVLE) & OP=6 & D & A & XOP_8_VLE=1 & d8PlusRaAddress { ea:$(REGISTER_SIZE) = d8PlusRaAddress; D = zext(*:2(ea)); A = ea; } :e_lwz D,dPlusRaOrZeroAddress is $(ISVLE) & OP=20 & D & dPlusRaOrZeroAddress { D = zext(*:4(dPlusRaOrZeroAddress)); } :se_lwz RZ_VLE,sd4WPlusRxAddr is $(ISVLE) & OP4_VLE=12 & RZ_VLE & sd4WPlusRxAddr { RZ_VLE = zext(*:4(sd4WPlusRxAddr)); } :e_lwzu D,d8PlusRaAddress is $(ISVLE) & OP=6 & D & A & XOP_8_VLE=2 & d8PlusRaAddress { ea:$(REGISTER_SIZE) = d8PlusRaAddress; D = zext(*:4(ea)); A = ea; } :e_stb S,dPlusRaOrZeroAddress is $(ISVLE) & OP=13 & S & dPlusRaOrZeroAddress { *:1(dPlusRaOrZeroAddress) = S:1; } :se_stb RZ_VLE,sd4PlusRxAddr is $(ISVLE) & OP4_VLE=9 & RZ_VLE & sd4PlusRxAddr { *:1(sd4PlusRxAddr) = RZ_VLE:1; } :e_stbu S,d8PlusRaAddress is $(ISVLE) & OP=6 & XOP_8_VLE=4 & S & A & d8PlusRaAddress { ea:$(REGISTER_SIZE) = d8PlusRaAddress; *:1(ea) = S:1; A = ea; } :e_sth S,dPlusRaOrZeroAddress is $(ISVLE) & OP=23 & S & dPlusRaOrZeroAddress { *:2(dPlusRaOrZeroAddress) = S:2; } :se_sth RZ_VLE,sd4HPlusRxAddr is $(ISVLE) & OP4_VLE=11 & RZ_VLE & sd4HPlusRxAddr { *:2(sd4HPlusRxAddr) = RZ_VLE:2; } :e_sthu S,d8PlusRaAddress is $(ISVLE) & OP=6 & XOP_8_VLE=5 & S & A & d8PlusRaAddress { ea:$(REGISTER_SIZE) = d8PlusRaAddress; *:2(ea) = S:2; A = ea; } # e_stmvcsrrw 6 (0b0001_10) 0b00101 RA 0b0001_0001 D8 :e_stmvcsrrw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x11 & BITS_21_25=5 { tea = d8PlusRaOrZeroAddress; storeReg(CSRR0); storeReg(CSRR1); } # e_stmvdsrrw 6 (0b0001_10) 0b00110 RA 0b0001_0001 D8 :e_stmvdsrrw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x11 & BITS_21_25=6 { tea = d8PlusRaOrZeroAddress; storeReg(spr23e); #DSRR0 storeReg(spr23f); #DSRR1 } # e_stmvgprw 6 (0b0001_10) 0b00000 RA 0b0001_0001 D8 :e_stmvgprw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x11 & BITS_21_25=0 { tea = d8PlusRaOrZeroAddress; storeReg(r0); storeReg(r3); storeReg(r4); storeReg(r5); storeReg(r6); storeReg(r7); storeReg(r8); storeReg(r9); storeReg(r10); storeReg(r11); storeReg(r12); } # e_stmvsprw 6 (0b0001_10) 0b00001 RA 0b0001_0001 D8 :e_stmvsprw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x11 & BITS_21_25=1 { tea = d8PlusRaOrZeroAddress; #TODO SEE TODO in e_ldmvsprw # storeReg(CR); local tmpCR:4 = 0; tmpCR = tmpCR | (zext(cr0 & 0xf) << 0); tmpCR = tmpCR | (zext(cr1 & 0xf) << 4); tmpCR = tmpCR | (zext(cr2 & 0xf) << 8); tmpCR = tmpCR | (zext(cr3 & 0xf) << 12); tmpCR = tmpCR | (zext(cr4 & 0xf) << 16); tmpCR = tmpCR | (zext(cr5 & 0xf) << 20); tmpCR = tmpCR | (zext(cr6 & 0xf) << 24); tmpCR = tmpCR | (zext(cr7 & 0xf) << 28); *:4 tea = tmpCR; tea = tea + 4; storeReg(LR); storeReg(CTR); storeReg(XER); } # e_stmvsrrw 6 (0b0001_10) 0b00100 RA 0b0001_0001 D8 :e_stmvsrrw d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & d8PlusRaOrZeroAddress & XOP_8_VLE=0x11 & BITS_21_25=4 { tea = d8PlusRaOrZeroAddress; storeReg(SRR0); storeReg(SRR1); } :e_stw S,dPlusRaOrZeroAddress is $(ISVLE) & OP=21 & S & dPlusRaOrZeroAddress { @ifdef BIT_64 *:4(dPlusRaOrZeroAddress) = S:4; @else *:4(dPlusRaOrZeroAddress) = S; @endif } :se_stw RZ_VLE,sd4WPlusRxAddr is $(ISVLE) & OP4_VLE=13 & RZ_VLE & sd4WPlusRxAddr { @ifdef BIT_64 *:4(sd4WPlusRxAddr) = RZ_VLE:4; @else *:4(sd4WPlusRxAddr) = RZ_VLE; @endif } :e_stwu S,d8PlusRaAddress is $(ISVLE) & OP=6 & XOP_8_VLE=6 & S & A & d8PlusRaAddress { ea:$(REGISTER_SIZE) = d8PlusRaAddress; @ifdef BIT_64 *:4(ea) = S:4; @else *:4(ea) = S; @endif A = ea; } :e_lmw D,d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & XOP_8_VLE=8 & D & BITS_21_25 & d8PlusRaOrZeroAddress & LDMR31 [ lsmul = BITS_21_25; ] { tea = d8PlusRaOrZeroAddress; build LDMR31; } :e_stmw S,d8PlusRaOrZeroAddress is $(ISVLE) & OP=6 & XOP_8_VLE=9 & S & BITS_21_25 & d8PlusRaOrZeroAddress & STMR31 [ lsmul = BITS_21_25; ] { tea = d8PlusRaOrZeroAddress; build STMR31; } :se_add RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=1 & BITS_8_9=0 & RX_VLE & RY_VLE { RX_VLE = RX_VLE + RY_VLE; } :e_add16i D,A,SIMM is $(ISVLE) & OP=7 & A & D & SIMM { tmp:2 = SIMM; D = A + sext(tmp); } :e_add2i. A,SIMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=17 & A & SIMM16 { A = A + sext(SIMM16); cr0flags(A); } :e_add2is A,SIMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=18 & A & SIMM16 { tmp:$(REGISTER_SIZE) = sext(SIMM16); tmp = tmp << 16; A = A + tmp; } :e_addi D,A,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=8 & BIT_11=0 & D & A & SCALE { D = A + SCALE; } :e_addi. D,A,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=8 & BIT_11=1 & D & A & SCALE { D = A + SCALE; cr0flags(D); } :se_addi RX_VLE,OIMM is $(ISVLE) & OP6_VLE=8 & BIT9_VLE=0 & RX_VLE & OIMM { RX_VLE = RX_VLE + OIMM; } :e_addic D,A,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=9 & BIT_11=0 & D & A & SCALE { xer_ca = carry(A,SCALE); D = A + SCALE; } :e_addic. D,A,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=9 & BIT_11=1 & D & A & SCALE { xer_ca = carry(A,SCALE); D = A + SCALE; cr0flags(D); } :se_sub RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=1 & BITS_8_9=2 & RX_VLE & RY_VLE { RX_VLE = RX_VLE - RY_VLE; } :se_subf RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=1 & BITS_8_9=3 & RX_VLE & RY_VLE { RX_VLE = RY_VLE - RX_VLE; } :e_subfic D,A,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=11 & BIT_11=0 & D & A & SCALE { xer_ca = (A <= SCALE); D = SCALE - A; } :e_subfic. D,A,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=11 & BIT_11=1 & D & A & SCALE { xer_ca = (A <= SCALE); D = SCALE - A; cr0flags(D); } :se_subi RX_VLE,OIMM is $(ISVLE) & OP6_VLE=9 & BIT9_VLE=0 & RX_VLE & OIMM { RX_VLE = RX_VLE - OIMM; } :se_subi. RX_VLE,OIMM is $(ISVLE) & OP6_VLE=9 & BIT9_VLE=1 & RX_VLE & OIMM { RX_VLE = RX_VLE - OIMM; cr0flags(RX_VLE); } :e_mulli D,A,SCALE is $(ISVLE) & OP=6 & XOP_11_VLE=20 & D & A & SCALE { tmp1:16 = sext(A); tmp2:16 = sext(SCALE); tmpP:16 = tmp1 * tmp2; D = tmpP:$(REGISTER_SIZE); } :e_mull2i. A,SIMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=20 & A & SIMM16 { tmp1:16 = sext(A); tmp2:16 = sext(SIMM16); tmpP:16 = tmp1 * tmp2; A = tmpP:$(REGISTER_SIZE); } :se_mullw RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=1 & BITS_8_9=1 & RX_VLE & RY_VLE { RX_VLE = RX_VLE * RY_VLE; } :se_neg RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=3 & RX_VLE { RX_VLE = ~RX_VLE + 1; } :se_btsti RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=25 & BIT9_VLE=1 & RX_VLE & OIM5_VLE { tmp:$(REGISTER_SIZE) = (RX_VLE >> (0x1F - OIM5_VLE)) & 0x1; cr0flags(tmp); } :e_cmp16i. A,SIMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=19 & A & SIMM16 { tmpA:4 = A:4; tmpB:4 = sext(SIMM16); cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :e_cmpi BF_VLE,A,SCALE is $(ISVLE) & OP=6 & XOP_11_VLE=21 & BITS_23_25=0 & A & BF_VLE & SCALE { tmpA:4 = A:4; tmpB:4 = SCALE:4; BF_VLE = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :se_cmp RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=3 & BITS_8_9=0 & RX_VLE & RY_VLE { tmpA:4 = RX_VLE:4; tmpB:4 = RY_VLE:4; cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :se_cmpi RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=10 & BIT9_VLE=1 & RX_VLE & OIM5_VLE { tmpA:4 = RX_VLE:4; tmpB:4 = OIM5_VLE; cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :e_cmpl16i. A,IMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=21 & A & IMM16 { tmpA:4 = A:4; tmpB:4 = zext(IMM16); cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :e_cmpli BF_VLE,A,SCALE is $(ISVLE) & OP=6 & XOP_11_VLE=21 & BITS_23_25=1 & A & BF_VLE & SCALE { tmpA:4 = A:4; tmpB:4 = SCALE:4; BF_VLE = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :se_cmpl RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=3 & BITS_8_9=1 & RX_VLE & RY_VLE { tmpA:4 = RX_VLE:4; tmpB:4 = RY_VLE:4; cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :se_cmpli RX_VLE,OIMM is $(ISVLE) & OP6_VLE=8 & BIT9_VLE=1 & RX_VLE & OIMM { tmpA:4 = RX_VLE:4; tmpB:4 = OIMM:4; cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :e_cmph CRFD,A,B is $(ISVLE) & OP=31 & BITS_21_22=0 & BIT_0=0 & XOP_1_10=14 & A & B & CRFD { tmpA:2 = A:2; tmpB:2 = B:2; CRFD = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :se_cmph RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=3 & BITS_8_9=2 & RX_VLE & RY_VLE { tmpA:2 = RX_VLE:2; tmpB:2 = RY_VLE:2; cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :e_cmph16i. A,SIMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=22 & A & SIMM16 { tmpA:2 = A:2; tmpB:2 = SIMM16; cr0 = ((tmpA s< tmpB) << 3) | ((tmpA s> tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :e_cmphl CRFD,A,B is $(ISVLE) & OP=31 & BITS_21_22=0 & BIT_0=0 & XOP_1_10=46 & A & B & CRFD { tmpA:2 = A:2; tmpB:2 = B:2; tmpC:1 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); CRFD = tmpC; } :se_cmphl RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=3 & BITS_8_9=3 & RX_VLE & RY_VLE { tmpA:2 = RX_VLE:2; tmpB:2 = RY_VLE:2; cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :e_cmphl16i. A,IMM16 is $(ISVLE) & OP=28 & XOP_11_VLE=23 & A & IMM16 { tmpA:2 = A:2; tmpB:2 = IMM16; cr0 = ((tmpA < tmpB) << 3) | ((tmpA > tmpB) << 2) | ((tmpA == tmpB) << 1) | (xer_so & 1); } :e_and2i. D,IMM16B is $(ISVLE) & OP=28 & XOP_11_VLE=25 & D & IMM16B { D = D & zext(IMM16B); cr0flags(D); } :e_and2is. D,IMM16B is $(ISVLE) & OP=28 & XOP_11_VLE=29 & D & IMM16B { tmp:$(REGISTER_SIZE) = zext(IMM16B); tmp = tmp << 16; D = D & tmp; cr0flags(D); } :e_andi A,S,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=12 & BIT_11=0 & S & A & SCALE { A = S & SCALE; } :e_andi. A,S,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=12 & BIT_11=1 & S & A & SCALE { A = S & SCALE; cr0flags(A); } :se_andi RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=11 & BIT9_VLE=1 & RX_VLE & OIM5_VLE { tmp:1 = OIM5_VLE; RX_VLE = RX_VLE & zext(tmp); } :e_or2i D,IMM16B is $(ISVLE) & OP=28 & XOP_11_VLE=24 & D & IMM16B { D = D | zext(IMM16B); } :e_or2is D,IMM16B is $(ISVLE) & OP=28 & XOP_11_VLE=26 & D & IMM16B { tmp:$(REGISTER_SIZE) = zext(IMM16B); tmp = tmp << 16; D = D | tmp; } :e_nop is $(ISVLE) & OP=6 & XOP_12_VLE=13 & BITS_1_10=0 & BIT_0=0 & S=0 & A=0 { } :e_ori A,S,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=13 & BIT_11=0 & S & A & SCALE { A = S | SCALE; } :e_ori. A,S,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=13 & BIT_11=1 & S & A & SCALE { A = S | SCALE; cr0flags(A); } :e_xori A,S,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=14 & BIT_11=0 & S & A & SCALE { A = S ^ SCALE; } :e_xori. A,S,SCALE is $(ISVLE) & OP=6 & XOP_12_VLE=14 & BIT_11=1 & S & A & SCALE { A = S ^ SCALE; cr0flags(A); } :se_and RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=17 & BIT9_VLE=1 & BIT8_VLE=0 & RX_VLE & RY_VLE { RX_VLE = RX_VLE & RY_VLE; } :se_and. RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=17 & BIT9_VLE=1 & BIT8_VLE=1 & RX_VLE & RY_VLE { RX_VLE = RX_VLE & RY_VLE; cr0flags(RX_VLE); } :se_andc RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=17 & BITS_8_9=1 & RX_VLE & RY_VLE { RX_VLE = RX_VLE & ~RY_VLE; } :se_nop is $(ISVLE) & OP6_VLE=17 & BITS_8_9=0 & RX_VLE=0 & RY_VLE=0 { } :se_or RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=17 & BITS_8_9=0 & RX_VLE & RY_VLE { RX_VLE = RX_VLE | RY_VLE; } :se_not RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=2 & RX_VLE { RX_VLE = ~RX_VLE; } :se_bclri RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=24 & BIT9_VLE=0 & RX_VLE & OIM5_VLE { tmp:$(REGISTER_SIZE) = 0x80000000 >> OIM5_VLE; tmp = ~tmp; RX_VLE = RX_VLE & tmp; } :se_bgeni RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=24 & BIT9_VLE=1 & RX_VLE & OIM5_VLE { RX_VLE = 0x80000000 >> OIM5_VLE; } :se_bmaski RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=11 & BIT9_VLE=0 & RX_VLE & OIM5_VLE { RX_VLE = ~0; sa:4 = (8 * $(REGISTER_SIZE) - OIM5_VLE) * zext( OIM5_VLE != 0:1 ); RX_VLE = RX_VLE >> sa; } :se_bseti RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=25 & BIT9_VLE=0 & RX_VLE & OIM5_VLE { tmp:$(REGISTER_SIZE) = 0x80000000 >> OIM5_VLE; RX_VLE = RX_VLE | tmp; } :se_extsb RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=13 & RX_VLE { RX_VLE = sext(RX_VLE:1); } :se_extsh RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=15 & RX_VLE { RX_VLE = sext(RX_VLE:2); } :se_extzb RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=12 & RX_VLE { RX_VLE = zext(RX_VLE:1); } :se_extzh RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=14 & RX_VLE { RX_VLE = zext(RX_VLE:2); } :e_li D,SIMM20 is $(ISVLE) & OP=28 & BIT_15=0 & D & SIMM20 { D = sext(SIMM20); } :se_li RX_VLE,OIM7_VLE is $(ISVLE) & OP5_VLE=9 & RX_VLE & OIM7_VLE { RX_VLE = OIM7_VLE; } :e_lis D,IMM16B is $(ISVLE) & OP=28 & XOP_11_VLE=28 & D & IMM16B { tmp:$(REGISTER_SIZE) = zext(IMM16B); D = tmp << 16; } :se_mfar RX_VLE,ARY_VLE is $(ISVLE) & OP6_VLE=0 & BITS_8_9=3 & RX_VLE & ARY_VLE { RX_VLE = ARY_VLE; } :se_mr RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=0 & BITS_8_9=1 & RX_VLE & RY_VLE { RX_VLE = RY_VLE; } :se_mtar ARX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=0 & BITS_8_9=2 & ARX_VLE & RY_VLE { ARX_VLE = RY_VLE; } :e_rlw A,S,B is $(ISVLE) & OP=31 & BIT_0=0 & XOP_1_10=280 & A & B & S { tmpB:1 = B[0,5]; tmpS:4 = S:4; tmpA:4 = (tmpS << tmpB) | (tmpS >> (32 - tmpB)); A = zext(tmpA); } :e_rlw. A,S,B is $(ISVLE) & OP=31 & BIT_0=1 & XOP_1_10=280 & A & B & S { tmpB:1 = B[0,5]; tmpS:4 = S:4; tmpA:4 = (tmpS << tmpB) | (tmpS >> (32 - tmpB)); A = zext(tmpA); cr0flags(A); } :e_rlwi A,S,SHL is $(ISVLE) & OP=31 & BIT_0=0 & XOP_1_10=312 & A & SHL & S { tmpS:4 = S:4; tmpA:4 = (tmpS << SHL) | (tmpS >> (32 - SHL)); A = zext(tmpA); } :e_rlwi. A,S,SHL is $(ISVLE) & OP=31 & BIT_0=1 & XOP_1_10=312 & A & SHL & S { tmpS:4 = S:4; tmpA:4 = (tmpS << SHL) | (tmpS >> (32 - SHL)); A = zext(tmpA); cr0flags(A); } # The manual uses MB instead of MBL here, but because the "MB" symbol is already taken, MBL it is :e_rlwimi A,S,SHL,MBL,ME is $(ISVLE) & OP=29 & BIT_0=0 & MBL & ME & A & SHL & S { tmpS:4 = S:4; tmpA:4 = (tmpS << SHL) | (tmpS >> (32 - SHL)); tmpM1:4 = (~0:4) << MBL; tmpM1 = tmpM1 >> ((31-ME) + MBL); tmpM1 = tmpM1 << (31-ME); tmpM2:4 = (~0:4) << ME; tmpM2 = tmpM2 >> ((31-MBL) + ME); tmpM2 = tmpM2 << (31-MBL); tmpM2 = ~tmpM2; local invert = (ME:1 < MBL:1); tmpM:4 = (zext(invert == 0)*tmpM1) + (zext(invert == 1)*tmpM2); A = zext(tmpA & tmpM) | (A & zext(~tmpM)); } :e_rlwinm A,S,SHL,MBL,ME is $(ISVLE) & OP=29 & BIT_0=1 & MBL & ME & A & SHL & S { tmpS:4 = S:4; tmpA:4 = (tmpS << SHL) | (tmpS >> (32 - SHL)); tmpM1:4 = (~0:4) << MBL; tmpM1 = tmpM1 >> ((31-ME) + MBL); tmpM1 = tmpM1 << (31-ME); tmpM2:4 = (~0:4) << ME; tmpM2 = tmpM2 >> ((31-MBL) + ME); tmpM2 = tmpM2 << (31-MBL); tmpM2 = ~tmpM2; local invert = (ME:1 < MBL:1); tmpM:4 = (zext(invert == 0)*tmpM1) + (zext(invert == 1)*tmpM2); A = zext(tmpA & tmpM); } :e_slwi A,S,SHL is $(ISVLE) & OP=31 & BIT_0=0 & XOP_1_10=56 & A & SHL & S { tmpS:4 = S:4; tmpS = tmpS << SHL; A = zext(tmpS); } :e_slwi. A,S,SHL is $(ISVLE) & OP=31 & BIT_0=1 & XOP_1_10=56 & A & SHL & S { tmpS:4 = S:4; tmpS = tmpS << SHL; A = zext(tmpS); cr0flags(A); } :se_slwi RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=27 & BIT9_VLE=0 & RX_VLE & OIM5_VLE { tmpX:4 = RX_VLE:4; tmpX = tmpX << OIM5_VLE; RX_VLE = zext(tmpX); } :se_slw RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=16 & BITS_8_9=2 & RX_VLE & RY_VLE { tmpX:4 = RX_VLE:4; tmpS:1 = RY_VLE[0,6]; tmpX = tmpX << tmpS; RX_VLE = zext(tmpX); } :se_srawi RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=26 & BIT9_VLE=1 & RX_VLE & OIM5_VLE { tmpX:4 = RX_VLE:4; tmpX = tmpX s>> OIM5_VLE; RX_VLE = sext(tmpX); xer_ca = (RX_VLE s< 0) & (OIM5_VLE:1 != 0); } :se_sraw RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=16 & BITS_8_9=1 & RX_VLE & RY_VLE { tmpX:4 = RX_VLE:4; tmpS:1 = RY_VLE[0,5]; tmpX = tmpX s>> tmpS; RX_VLE = sext(tmpX); xer_ca = (RX_VLE s< 0) & (tmpS != 0); } :e_srwi A,S,SHL is $(ISVLE) & OP=31 & BIT_0=0 & XOP_1_10=568 & A & SHL & S { tmpS:4 = S:4; tmpS = tmpS >> SHL; A = zext(tmpS); } :e_srwi. A,S,SHL is $(ISVLE) & OP=31 & BIT_0=1 & XOP_1_10=568 & A & SHL & S { tmpS:4 = S:4; tmpS = tmpS >> SHL; A = zext(tmpS); cr0flags(A); } :se_srwi RX_VLE,OIM5_VLE is $(ISVLE) & OP6_VLE=26 & BIT9_VLE=0 & RX_VLE & OIM5_VLE { tmpX:4 = RX_VLE:4; tmpX = tmpX >> OIM5_VLE; RX_VLE = zext(tmpX); } :se_srw RX_VLE,RY_VLE is $(ISVLE) & OP6_VLE=16 & BITS_8_9=0 & RX_VLE & RY_VLE { tmpX:4 = RX_VLE:4; tmpS:1 = RY_VLE[0,5]; tmpX = tmpX >> tmpS; RX_VLE = zext(tmpX); } :se_mfctr RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=10 & RX_VLE { RX_VLE = CTR; } :se_mtctr RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=11 & RX_VLE { CTR = RX_VLE; } :se_mflr RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=8 & RX_VLE { RX_VLE = LR; } :se_mtlr RX_VLE is $(ISVLE) & OP6_VLE=0 & XOR_VLE=9 & RX_VLE { LR = RX_VLE; } :se_isync is $(ISVLE) & OP16_VLE=1 { instructionSynchronize(); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/quicciii.sinc ================================================ # These instructions show up in the Freescale PowerQUICC III instruction manual # (not present elsewhere) define pcodeop dataCacheBlockClearLock; define pcodeop prefetchDataCacheBlockLockSet; define pcodeop prefetchDataCacheBlockLockSetX; define pcodeop debuggerNotifyHalt; define pcodeop instructionCacheBlockClearLock; define pcodeop queryInstructionCacheBlockLock; define pcodeop prefetchInstructionCacheBlockLockSetX; define pcodeop moveFromAPIDIndirect; define pcodeop moveFromPerformanceMonitorRegister; define pcodeop moveToPerformanceMonitorRegister; define pcodeop invalidateTLB; #dcblc 0,0,r0 #FIXME :dcblc CT,RA_OR_ZERO,B is OP=31 & CT & B & XOP_1_10=390 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; dataCacheBlockClearLock(ea); } #dcbtls 0,0,r0 #FIXME :dcbtls CT,RA_OR_ZERO,B is OP=31 & CT & B & XOP_1_10=166 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; prefetchDataCacheBlockLockSet(ea); } #dcbtstls 0,0,r0 #FIXME :dcbtstls CT,RA_OR_ZERO,B is OP=31 & CT & B & XOP_1_10=134 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; prefetchDataCacheBlockLockSetX(ea); } #dnh 0,0 #FIXME :dnh DUI,DUIS is $(NOTVLE) & OP=19 & DUI & DUIS & XOP_1_10=198 & BIT_0=0 { debuggerNotifyHalt(DUI:1,DUIS:2); } #icblc 0,0,r0 #FIXME :icblc CT,RA_OR_ZERO,B is OP=31 & CT & B & XOP_1_10=230 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; instructionCacheBlockClearLock(CT:1,ea); } :icblq CT,RA_OR_ZERO,B is OP=31 & CT & B & XOP_1_10=198 & BIT_0=1 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; cr0 = queryInstructionCacheBlockLock(CT:1,ea); } #icbtls 0,0,r0 #FIXME :icbtls CT,RA_OR_ZERO,B is OP=31 & CT & B & XOP_1_10=486 & BIT_0=0 & RA_OR_ZERO { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; prefetchInstructionCacheBlockLockSetX(ea); } :isel^CC_X_OPm D,RA_OR_ZERO,B,CC_X_OP is OP=31 & D & RA_OR_ZERO & B & CC_X_OP & CC_X_OPm & XOP_1_5=15 { local tmp:$(REGISTER_SIZE) = RA_OR_ZERO; D = B; if (!CC_X_OP) goto inst_next; D = tmp; # D = (zext(CC_X_OP) * RA_OR_ZERO) + (zext(!CC_X_OP) * B); } #mfapidi r0,r1 #FIXME :mfapidi D,A is $(NOTVLE) & OP=31 & D & A & XOP_1_10=275 { D = moveFromAPIDIndirect(A); } pmrn: pmr is BITS_16_20 & BITS_11_15 [ pmr = BITS_11_15 << 5 | BITS_16_20; ] { tmp:2 = pmr; export tmp; } #mfpmr r0,? #FIXME :mfpmr D,pmrn is OP=31 & D & pmrn & XOP_1_10=334 & BIT_0=0 { D = moveFromPerformanceMonitorRegister(pmrn); } #mtpmr r0,? #FIXME :mtpmr pmrn,S is OP=31 & S & pmrn & XOP_1_10=462 & BIT_0=0 { moveToPerformanceMonitorRegister(pmrn, S); } #rfdi #FIXME :rfdi is $(NOTVLE) & OP=19 & XOP_1_10=39 { MSR = returnFromDebugInterrupt(MSR, spr23f); #DSRR1 local ra = spr23e; #DSRR0 return[ra]; } #rfmci #FIXME :rfmci is $(NOTVLE) & OP=19 & XOP_0_10=76 { MSR = returnFromMachineCheckInterrupt(MSR, spr23b); #MCSRR1 local ra = spr23a; #MCSRR0 return[ra]; } # PowerISA II: 6.11.4.9 TLB Management Instructions # CMT: TLB Invalidate Local Indexed [Category: Embedded.Phased In]] # FORM: X-form define pcodeop TLBInvalidateLocalIndexed; # Outputs/affect TBD :tlbilx BITS_21_22,RA_OR_ZERO,RB_OR_ZERO is $(NOTVLE) & OP=31 & CRFD=0 & BITS_21_22 & RA_OR_ZERO & RB_OR_ZERO & XOP_1_10=18 & BIT_0=0 { TLBInvalidateLocalIndexed(BITS_21_22:1,RA_OR_ZERO,RB_OR_ZERO); } #tlbivax 0,r0 #FIXME :tlbivax RA_OR_ZERO,B is OP=31 & RA_OR_ZERO & B & XOP_1_10=786 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; invalidateTLB(ea); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/stmwInstructions.sinc ================================================ STMR0: is lsmul=1 {} STMR0: is epsilon { storeReg(r0); } STMR1: is lsmul=2 {} STMR1: is STMR0 { build STMR0; storeReg(r1); } STMR2: is lsmul=3 {} STMR2: is STMR1 { build STMR1; storeReg(r2); } STMR3: is lsmul=4 {} STMR3: is STMR2 { build STMR2; storeReg(r3); } STMR4: is lsmul=5 {} STMR4: is STMR3 { build STMR3; storeReg(r4); } STMR5: is lsmul=6 {} STMR5: is STMR4 { build STMR4; storeReg(r5); } STMR6: is lsmul=7 {} STMR6: is STMR5 { build STMR5; storeReg(r6); } STMR7: is lsmul=8 {} STMR7: is STMR6 { build STMR6; storeReg(r7); } STMR8: is lsmul=9 {} STMR8: is STMR7 { build STMR7; storeReg(r8); } STMR9: is lsmul=10 {} STMR9: is STMR8 { build STMR8; storeReg(r9); } STMR10: is lsmul=11 {} STMR10: is STMR9 { build STMR9; storeReg(r10); } STMR11: is lsmul=12 {} STMR11: is STMR10 { build STMR10; storeReg(r11); } STMR12: is lsmul=13 {} STMR12: is STMR11 { build STMR11; storeReg(r12); } STMR13: is lsmul=14 {} STMR13: is STMR12 { build STMR12; storeReg(r13); } STMR14: is lsmul=15 {} STMR14: is STMR13 { build STMR13; storeReg(r14); } STMR15: is lsmul=16 {} STMR15: is STMR14 { build STMR14; storeReg(r15); } STMR16: is lsmul=17 {} STMR16: is STMR15 { build STMR15; storeReg(r16); } STMR17: is lsmul=18 {} STMR17: is STMR16 { build STMR16; storeReg(r17); } STMR18: is lsmul=19 {} STMR18: is STMR17 { build STMR17; storeReg(r18); } STMR19: is lsmul=20 {} STMR19: is STMR18 { build STMR18; storeReg(r19); } STMR20: is lsmul=21 {} STMR20: is STMR19 { build STMR19; storeReg(r20); } STMR21: is lsmul=22 {} STMR21: is STMR20 { build STMR20; storeReg(r21); } STMR22: is lsmul=23 {} STMR22: is STMR21 { build STMR21; storeReg(r22); } STMR23: is lsmul=24 {} STMR23: is STMR22 { build STMR22; storeReg(r23); } STMR24: is lsmul=25 {} STMR24: is STMR23 { build STMR23; storeReg(r24); } STMR25: is lsmul=26 {} STMR25: is STMR24 { build STMR24; storeReg(r25); } STMR26: is lsmul=27 {} STMR26: is STMR25 { build STMR25; storeReg(r26); } STMR27: is lsmul=28 {} STMR27: is STMR26 { build STMR26; storeReg(r27); } STMR28: is lsmul=29 {} STMR28: is STMR27 { build STMR27; storeReg(r28); } STMR29: is lsmul=30 {} STMR29: is STMR28 { build STMR28; storeReg(r29); } STMR30: is lsmul=31 {} STMR30: is STMR29 { build STMR29; storeReg(r30); } STMR31: is STMR30 { build STMR30; storeReg(r31); } :stmw S,dPlusRaOrZeroAddress is $(NOTVLE) & OP=47 & S & BITS_21_25 & dPlusRaOrZeroAddress & STMR31 [ lsmul = BITS_21_25; ] { tea = dPlusRaOrZeroAddress; build STMR31; } ================================================ FILE: pypcode/processors/PowerPC/data/languages/stswiInstructions.sinc ================================================ #stswi r5,r3,0x02 7c a4 14 aa #stswi r5,r4,0x08 7c a4 44 aa DYN_S1: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 1)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } DYN_S2: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 2)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } DYN_S3: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 3)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } DYN_S4: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 4)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } DYN_S5: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 5)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } DYN_S6: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 6)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } DYN_S7: regaddr is BITS_21_25 [ regaddr = ((BITS_21_25 + 7)&0x1f) * $(REGISTER_SIZE); ] { export *[register]:$(REGISTER_SIZE) regaddr; } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=0 & BH=0 & XOP_1_10=725 & BIT_0=0 & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 & DYN_S5 & DYN_S6 & DYN_S7 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); storeRegister(DYN_S2,ea); storeRegister(DYN_S3,ea); storeRegister(DYN_S4,ea); storeRegister(DYN_S5,ea); storeRegister(DYN_S6,ea); storeRegister(DYN_S7,ea); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=0 & BH & XOP_1_10=725 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; sa:1 = BH; storeRegisterPartial(S,ea,sa); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=1 & BH=0 & XOP_1_10=725 & BIT_0=0 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=1 & BH & XOP_1_10=725 & BIT_0=0 & DYN_S1 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); sa:1 = BH; storeRegisterPartial(DYN_S1,ea,sa); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=2 & BH=0 & XOP_1_10=725 & BIT_0=0 & DYN_S1 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=2 & BH & XOP_1_10=725 & BIT_0=0 & DYN_S1 & DYN_S2 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); sa:1 = BH; storeRegisterPartial(DYN_S2,ea,sa); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=3 & BH=0 & XOP_1_10=725 & BIT_0=0 & DYN_S1 & DYN_S2 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); storeRegister(DYN_S2,ea); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=3 & BH & XOP_1_10=725 & BIT_0=0 & DYN_S1 & DYN_S2 & DYN_S3 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); storeRegister(DYN_S2,ea); sa:1 = BH; storeRegisterPartial(DYN_S3,ea,sa); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=4 & BH=0 & XOP_1_10=725 & BIT_0=0 & DYN_S1 & DYN_S2 & DYN_S3 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); storeRegister(DYN_S2,ea); storeRegister(DYN_S3,ea); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=4 & BH & XOP_1_10=725 & BIT_0=0 & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); storeRegister(DYN_S2,ea); storeRegister(DYN_S3,ea); sa:1 = BH; storeRegisterPartial(DYN_S4,ea,sa); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=5 & BH=0 & XOP_1_10=725 & BIT_0=0 & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); storeRegister(DYN_S2,ea); storeRegister(DYN_S3,ea); storeRegister(DYN_S4,ea); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=5 & BH & XOP_1_10=725 & BIT_0=0 & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 & DYN_S5 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); storeRegister(DYN_S2,ea); storeRegister(DYN_S3,ea); storeRegister(DYN_S4,ea); sa:1 = BH; storeRegisterPartial(DYN_S5,ea,sa); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=6 & BH=0 & XOP_1_10=725 & BIT_0=0 & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 & DYN_S5 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); storeRegister(DYN_S2,ea); storeRegister(DYN_S3,ea); storeRegister(DYN_S4,ea); storeRegister(DYN_S5,ea); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=6 & BH & XOP_1_10=725 & BIT_0=0 & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 & DYN_S5 & DYN_S6 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); storeRegister(DYN_S2,ea); storeRegister(DYN_S3,ea); storeRegister(DYN_S4,ea); storeRegister(DYN_S5,ea); sa:1 = BH; storeRegisterPartial(DYN_S6,ea,sa); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=7 & BH=0 & XOP_1_10=725 & BIT_0=0 & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 & DYN_S5 & DYN_S6 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); storeRegister(DYN_S2,ea); storeRegister(DYN_S3,ea); storeRegister(DYN_S4,ea); storeRegister(DYN_S5,ea); storeRegister(DYN_S6,ea); } :stswi S,RA_OR_ZERO,NB is OP=31 & S & RA_OR_ZERO & NB & BITS_13_15=7 & BH & XOP_1_10=725 & BIT_0=0 & DYN_S1 & DYN_S2 & DYN_S3 & DYN_S4 & DYN_S5 & DYN_S6 & DYN_S7 { ea:$(REGISTER_SIZE) = RA_OR_ZERO; storeRegister(S,ea); storeRegister(DYN_S1,ea); storeRegister(DYN_S2,ea); storeRegister(DYN_S3,ea); storeRegister(DYN_S4,ea); storeRegister(DYN_S5,ea); storeRegister(DYN_S6,ea); sa:1 = BH; storeRegisterPartial(DYN_S7,ea,sa); } ================================================ FILE: pypcode/processors/PowerPC/data/languages/vsx.sinc ================================================ # Source for information on instructions: # PowerISA_V2.06B_PUBLIC.pdf (dated: July 23, 2010) # and binutils-2.21.1 # version 1.0 # ========================================================================================================== # VSX use of XA,XB,XC,XT # ========================================================================================================== # PowerPC VSX allows for VSX registers values to come from a combination of 2 different fields # XA is the value of A and AX concatenated. (A has 5 bits and AX 1 so allows for 6 bits or 64 registers). # XB is the value of B and BX concatenated. (B has 5 bits and BX 1 so allows for 6 bits or 64 registers). # XC is the value of C and CX concatenated. (C has 5 bits and CX 1 so allows for 6 bits or 64 registers). # XT is the value of T and TX concatenated. (T has 5 bits and TX 1 so allows for 6 bits or 64 registers). # # NOTE: A,B,C,T are all 5 bits long and AX,BX,CX,TX are all 1 bit long. # # In order to print the registers defined in XA,XB,XC,XT we need to play some tricks. # Normally you use a "attach variables [ field ...] [ name1 ... ]; to attach names to fields but because # we need to attach names to 2 fields and that is not directly supported in sleigh. # # We attach the low registers (0 to 31) to fields that overlap the normal A,B,C,T named Avsa, Bvsa, Bvsa, Bvsa. # We attach the high registers (31 to 63) to fields that overlap the normal A,B,C,T named Avsb, Bvsb, Bvsb, Bvsb. # # Then we make constructors dependent on the AX,BX,CX,TX values to switch between them as needed. #define token instr(32) #... # support VSX args # Avsa=(16,20) # Avsb=(16,20) # Bvsa=(11,15) # Bvsb=(11,15) # Cvsa=(6,10) # Cvsb=(6,10) # Tvsa=(21,25) # Tvsb=(21,25) #... #; # Attach low VSX registers attach variables [ Avsa Bvsa Cvsa Svsa Tvsa ] [ vs0 vs1 vs2 vs3 vs4 vs5 vs6 vs7 vs8 vs9 vs10 vs11 vs12 vs13 vs14 vs15 vs16 vs17 vs18 vs19 vs20 vs21 vs22 vs23 vs24 vs25 vs26 vs27 vs28 vs29 vs30 vs31 ]; # Attach hi VSX registers attach variables [ Avsb Bvsb Cvsb Svsb Tvsb ] [ vs32 vs33 vs34 vs35 vs36 vs37 vs38 vs39 vs40 vs41 vs42 vs43 vs44 vs45 vs46 vs47 vs48 vs49 vs50 vs51 vs52 vs53 vs54 vs55 vs56 vs57 vs58 vs59 vs60 vs61 vs62 vs63 ]; attach variables [ Svsbx Tvsbx ] [ vr0_64_0 vr1_64_0 vr2_64_0 vr3_64_0 vr4_64_0 vr5_64_0 vr6_64_0 vr7_64_0 vr8_64_0 vr9_64_0 vr10_64_0 vr11_64_0 vr12_64_0 vr13_64_0 vr14_64_0 vr15_64_0 vr16_64_0 vr17_64_0 vr18_64_0 vr19_64_0 vr20_64_0 vr21_64_0 vr22_64_0 vr23_64_0 vr24_64_0 vr25_64_0 vr26_64_0 vr27_64_0 vr28_64_0 vr29_64_0 vr30_64_0 vr31_64_0 ]; XA: Avsa is Avsa & AX=0 { export Avsa; } # Low register version of XA (i.e A and AX fields) XA: Avsb is Avsb & AX=1 { export Avsb; } # Hi register version of XA (i.e A and AX fields) XB: Bvsa is Bvsa & BX=0 { export Bvsa; } # Low register version of XB (i.e B and BX fields) XB: Bvsb is Bvsb & BX=1 { export Bvsb; } # Hi register version of XB (i.e B and BX fields) XC: Cvsa is Cvsa & CX=0 { export Cvsa; } # Low register version of XC (i.e C and CX fields) XC: Cvsb is Cvsb & CX=1 { export Cvsb; } # Hi register version of XC (i.e C and CX fields) XS: Svsa is Svsa & SX=0 { export Svsa; } XS: Svsb is Svsb & SX=1 { export Svsb; } XS3: Svsa is Svsa & SX3=0 { export Svsa; } XS3: Svsb is Svsb & SX3=1 { export Svsb; } XT: Tvsa is Tvsa & TX=0 { export Tvsa; } # Low register version of XT (i.e T and AT fields) XT: Tvsb is Tvsb & TX=1 { export Tvsb; } # Hi register version of XT (i.e T and AT fields) XT3: Tvsa is Tvsa & TX3=0 { export Tvsa; } # Low register version of XT (i.e T and AT fields) XT3: Tvsb is Tvsb & TX3=1 { export Tvsb; } # Hi register version of XT (i.e T and AT fields) XSF: fS is fS & SX=0 { export fS; } XSF: Svsbx is Svsbx & SX=1 { export Svsbx; } XTF: fT is fT & TX=0 { export fT; } XTF: Tvsbx is Tvsbx & TX=1 { export Tvsbx; } DBUILD: val is DX & DM2 & DC6 [ val = (DC6 << 6) | (DM2 << 5) | DX; ] { export *[const]:1 val; } # ========================================================================================================== # ========================================================================================================== define pcodeop lxvdsxOp; # ISA-info: lxvdsx - Form "XX1" Page 339 Category "VSX" # binutils: vsx.d: 8: 7d 0a a2 99 lxvdsx vs40,r10,r20 :lxvdsx XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=332 & TX { XT = lxvdsxOp(A,B); } # lxsdx XT,RA,RB # ISA-info: lxsdx - Form "XX1" Page 338 Category "VSX" # binutils: vsx.d: 0: 7d 0a a4 99 lxsdx vs40,r10,r20 :lxsdx XT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XT & RA_OR_ZERO & B & XOP_1_10=588 & TX { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; XT[0,64] = *:8 ea; } # name lxvd2x code 7c000698 mask fe0700fc00000000 flags @VSX operands 69 31 38 0 0 0 0 0 :lxvd2x XT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XT & RA_OR_ZERO & B & XOP_1_10=844 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; XT[64,64] = *:8 ea; XT[0,64] = *:8 (ea+8); } define pcodeop stxsdxOp; # ISA-info: stxsdx - Form "XX1" Page 340 Category "VSX" # binutils: vsx.d: 10: 7d 0a a5 99 stxsdx vs40,r10,r20 :stxsdx XT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XT & RA_OR_ZERO & B & XOP_1_10=716 & TX { EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:8 EA = stxsdxOp(RA_OR_ZERO,B); } # name stxvd2x code 7c000798 mask fe0700fc00000000 flags @VSX operands 69 31 38 0 0 0 0 0 :stxvd2x XS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XS & RA_OR_ZERO & B & XOP_1_10=972 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; *:8 ea = XS(8); *:8 (ea+8) = XS:8; } # ISA-cmt: lxvw4x - Load VSR Vector Word*4 Indexed # ISA-info: lxvw4x - Form "XX1" Page 339 Category "VSX" # binutils: vsx.d: c: 7d 0a a6 19 lxvw4x vs40,r10,r20 :lxvw4x XT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XT & RA_OR_ZERO & B & XOP_1_10=780 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; XT[96,32] = *:4 ea; XT[64,32] = *:4 (ea + 4); XT[32,32] = *:4 (ea + 8); XT[0,32] = *:4 (ea + 12); } # ISA-cmt: stxvw4x - Store VSR Vector Word*4 Indexed # ISA-info: stxvw4x - Form "XX1" Page 341 Category "VSX" # binutils: vsx.d: 18: 7d 0a a7 19 stxvw4x vs40,r10,r20 :stxvw4x XS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XS & RA_OR_ZERO & B & XOP_1_10=908 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; *:16 ea = XS; } # ISA-cmt: xxsldwi - VSX Shift Left Double by Word Immediate # ISA-info: xxsldwi - Form "XX3" Page 501 Category "VSX" # binutils: vsx.d: 270: f1 12 e2 17 xxsldwi vs40,vs50,vs60,2 :xxsldwi XT,XA,XB,SHW is $(NOTVLE) & OP=60 & BIT_10 & SHW & BITS_3_7=2 & XA & XB & XT { tmp:32 = (zext(XA) << 128) | zext(XB); tmp = tmp >> ((7 - (SHW+3)) * 32); XT = tmp:16; } define pcodeop xxselOp; # ISA-cmt: xxsel - VSX Select # ISA-info: xxsel - Form "XX4" Page 500 Category "VSX" # binutils: vsx.d: 26c: f1 12 e7 bf xxsel vs40,vs50,vs60,vs62 :xxsel XT,XA,XB,XC is $(NOTVLE) & OP=60 & XT & XA & XB & XC & BITS_4_5=3 { xxselOp(XA,XB,XC); } define pcodeop xxpermdiOp; # :xxpermdi BITS_21_25,TX,A,AX,B,BX,DM is $(NOTVLE) & OP=60 & XOP_3_10=10 & BITS_21_25 & TX & A & AX & B & BX & DM { xxpermdiOp(A,B); } # ISA-cmt: xxpermdi - VSX Permute Doubleword Immediate # ISA-info: xxpermdi - Form "XX3" Page 500 Category "VSX" # binutils: power7.d: 30: f0 64 29 50 xxpermdi vs3,vs4,vs5,1 # binutils: power7.d: 34: f1 6c 69 57 xxpermdi vs43,vs44,vs45,1 # binutils: power7.d: 38: f0 64 2a 50 xxpermdi vs3,vs4,vs5,2 # binutils: power7.d: 3c: f1 6c 6a 57 xxpermdi vs43,vs44,vs45,2 # binutils: vsx.d: 23c: f1 12 e1 57 xxpermdi vs40,vs50,vs60,1 # binutils: vsx.d: 240: f1 12 e2 57 xxpermdi vs40,vs50,vs60,2 :xxpermdi XT,XA,XB,DM is $(NOTVLE) & OP=60 & OE & DM & BITS_3_7=10 & XA & XB & XT { xxpermdiOp(XA,XB,XT); } define pcodeop xxmrghwOp; # ISA-cmt: xxmrghw - VSX Merge High Word # ISA-info: xxmrghw - Form "XX3" Page 499 Category "VSX" # binutils: vsx.d: 230: f1 12 e0 97 xxmrghw vs40,vs50,vs60 :xxmrghw XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=18 & XA & XB & XT { xxmrghwOp(XA,XB,XT); } define pcodeop xsadddpOp; # ISA-cmt: xsadddp - VSX Scalar Add Double-Precision # ISA-info: xsadddp - Form "XX3" Page 342 Category "VSX" # binutils: vsx.d: 20: f1 12 e1 07 xsadddp vs40,vs50,vs60 :xsadddp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=32 & XA & XB & XT { src1:8 = XA:8; src2:8 = XB:8; local src = src1 f+ src2; XT[0,64] = src; } define pcodeop xsmaddadpOp; # ISA-cmt: xsmaddadp - VSX Scalar Multiply-Add Type-A Double-Precision # ISA-info: xsmaddadp - Form "XX3" Page 365 Category "VSX" # binutils: vsx.d: 54: f1 12 e1 0f xsmaddadp vs40,vs50,vs60 :xsmaddadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=33 & XA & XB & XT { xsmaddadpOp(XA,XB,XT); } define pcodeop xscmpudpOp; # ISA-cmt: xscmpudp - VSX Scalar Compare Unordered Double-Precision # ISA-info: xscmpudp - Form "XX3" Page 349 Category "VSX" # binutils: vsx.d: 28: f0 92 e1 1e xscmpudp cr1,vs50,vs60 :xscmpudp CRFD,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=35 & CRFD & BITS_21_22=0 & BIT_0=0 & XA & XB { xscmpudpOp(CRFD,XA,XB); } define pcodeop xssubdpOp; # ISA-cmt: xssubdp - VSX Scalar Subtract Double-Precision # ISA-info: xssubdp - Form "XX3" Page 393 Category "VSX" # binutils: vsx.d: a8: f1 12 e1 47 xssubdp vs40,vs50,vs60 :xssubdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=40 & XA & XB & XT { xssubdpOp(XA,XB,XT); } define pcodeop xsmaddmdpOp; # ISA-cmt: xsmaddmdp - VSX Scalar Multiply-Add Type-M Double-Precision # ISA-info: xsmaddmdp - Form "XX3" Page 365 Category "VSX" # binutils: vsx.d: 58: f1 12 e1 4f xsmaddmdp vs40,vs50,vs60 :xsmaddmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=41 & XA & XB & XT { xsmaddmdpOp(XA,XB,XT); } define pcodeop xscmpodpOp; # ISA-cmt: xscmpodp - VSX Scalar Compare Ordered Double-Precision # ISA-info: xscmpodp - Form "XX3" Page 347 Category "VSX" # binutils: vsx.d: 24: f0 92 e1 5e xscmpodp cr1,vs50,vs60 :xscmpodp CRFD,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=43 & CRFD & BIT_0=0 & BITS_21_22=0 & XA & XB { xscmpodpOp(CRFD,XA,XB); } define pcodeop xsmuldpOp; # ISA-cmt: xsmuldp - VSX Scalar Multiply Double-Precision # ISA-info: xsmuldp - Form "XX3" Page 375 Category "VSX" # binutils: vsx.d: 6c: f1 12 e1 87 xsmuldp vs40,vs50,vs60 :xsmuldp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=48 & XA & XB & XT { xsmuldpOp(XA,XB,XT); } define pcodeop xsmsubadpOp; # ISA-cmt: xsmsubadp - VSX Scalar Multiply-Subtract Type-A Double-Precision # ISA-info: xsmsubadp - Form "XX3" Page 372 Category "VSX" # binutils: vsx.d: 64: f1 12 e1 8f xsmsubadp vs40,vs50,vs60 :xsmsubadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=49 & XA & XB & XT { xsmsubadpOp(XA,XB,XT); } define pcodeop xxmrglwOp; # ISA-cmt: xxmrglw - VSX Merge Low Word # ISA-info: xxmrglw - Form "XX3" Page 499 Category "VSX" # binutils: vsx.d: 234: f1 12 e1 97 xxmrglw vs40,vs50,vs60 :xxmrglw XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=50 & XA & XB & XT { xxmrglwOp(XA,XB,XT); } define pcodeop xsdivdpOp; # ISA-cmt: xsdivdp - VSX Scalar Divide Double-Precision # ISA-info: xsdivdp - Form "XX3" Page 363 Category "VSX" # binutils: vsx.d: 50: f1 12 e1 c7 xsdivdp vs40,vs50,vs60 :xsdivdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=56 & XA & XB & XT { xsdivdpOp(XA,XB,XT); } define pcodeop xsmsubmdpOp; # ISA-cmt: xsmsubmdp - VSX Scalar Multiply-Subtract Type-M Double-Precision # ISA-info: xsmsubmdp - Form "XX3" Page 372 Category "VSX" # binutils: vsx.d: 68: f1 12 e1 cf xsmsubmdp vs40,vs50,vs60 :xsmsubmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=57 & XA & XB & XT { xsmsubmdpOp(XA,XB,XT); } define pcodeop xstdivdpOp; # ISA-cmt: xstdivdp - VSX Scalar Test for software Divide Double-Precision # ISA-info: xstdivdp - Form "XX3" Page 395 Category "VSX" # binutils: vsx.d: ac: f0 92 e1 ee xstdivdp cr1,vs50,vs60 :xstdivdp CRFD,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=61 & CRFD & BIT_0=0 & BITS_21_22=0 & XA & XB { xstdivdpOp(CRFD,XA,XB); } define pcodeop xvaddspOp; # ISA-cmt: xvaddsp - VSX Vector Add Single-Precision # ISA-info: xvaddsp - Form "XX3" Page 402 Category "VSX" # binutils: vsx.d: c0: f1 12 e2 07 xvaddsp vs40,vs50,vs60 :xvaddsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=64 & XA & XB & XT { xvaddspOp(XA,XB,XT); } define pcodeop xvmaddaspOp; # ISA-cmt: xvmaddasp - VSX Vector Multiply-Add Type-A Single-Precision # ISA-info: xvmaddasp - Form "XX3" Page 437 Category "VSX" # binutils: vsx.d: 164: f1 12 e2 0f xvmaddasp vs40,vs50,vs60 :xvmaddasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=65 & XA & XB & XT { xvmaddaspOp(XA,XB,XT); } define pcodeop xvcmpeqspOp; # ISA-cmt: xvcmpeqsp - VSX Vector Compare Equal To Single-Precision # ISA-info: xvcmpeqsp - Form "XX3" Page 405 Category "VSX" # binutils: vsx.d: cc: f1 12 e2 1f xvcmpeqsp vs40,vs50,vs60 :xvcmpeqsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=67 & BIT_10=0 & XA & XB & XT { xvcmpeqspOp(XA,XB,XT); } define pcodeop xvcmpeqspDotOp; # ISA-cmt: xvcmpeqsp. - VSX Vector Compare Equal To Single-Precision & Record # ISA-info: xvcmpeqsp. - Form "XX3" Page 405 Category "VSX" # binutils: mytest.d: 1b8: f0 43 26 18 xvcmpeqsp. vs2,vs3,vs4 :xvcmpeqsp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=67 & BIT_10=1 & XA & XB & XT { xvcmpeqspDotOp(XA,XB,XT); } define pcodeop xvsubspOp; # ISA-cmt: xvsubsp - VSX Vector Subtract Single-Precision # ISA-info: xvsubsp - Form "XX3" Page 491 Category "VSX" # binutils: vsx.d: 208: f1 12 e2 47 xvsubsp vs40,vs50,vs60 :xvsubsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=72 & XA & XB & XT { xvsubspOp(XA,XB,XT); } define pcodeop xscvdpuxwsOp; # ISA-cmt: xscvdpuxws - VSX Scalar truncate Double-Precision to integer and Convert to Unsigned Fixed-Point Word format with Saturate # ISA-info: xscvdpuxws - Form "XX2" Page 359 Category "VSX" # binutils: vsx.d: 40: f1 00 e1 23 xscvdpuxws vs40,vs60 :xscvdpuxws XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=72 & BITS_16_20=0 & XB & XT { xscvdpuxwsOp(XB,XT); } define pcodeop xvmaddmspOp; # ISA-cmt: xvmaddmsp - VSX Vector Multiply-Add Type-M Single-Precision # ISA-info: xvmaddmsp - Form "XX3" Page 440 Category "VSX" # binutils: vsx.d: 168: f1 12 e2 4f xvmaddmsp vs40,vs50,vs60 :xvmaddmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=73 & XA & XB & XT { xvmaddmspOp(XA,XB,XT); } define pcodeop xsrdpiOp; # ISA-cmt: xsrdpi - VSX Scalar Round to Double-Precision Integer # ISA-info: xsrdpi - Form "XX2" Page 386 Category "VSX" # binutils: vsx.d: 88: f1 00 e1 27 xsrdpi vs40,vs60 :xsrdpi XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=73 & BITS_16_20=0 & XB & XT { xsrdpiOp(XB,XT); } define pcodeop xsrsqrtedpOp; # ISA-cmt: xsrsqrtedp - VSX Scalar Reciprocal Square Root Estimate Double-Precision # ISA-info: xsrsqrtedp - Form "XX2" Page 391 Category "VSX" # binutils: vsx.d: a0: f1 00 e1 2b xsrsqrtedp vs40,vs60 :xsrsqrtedp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=74 & BITS_16_20=0 & XB & XT { xsrsqrtedpOp(XB,XT); } define pcodeop xssqrtdpOp; # ISA-cmt: xssqrtdp - VSX Scalar Square Root Double-Precision # ISA-info: xssqrtdp - Form "XX2" Page 392 Category "VSX" # binutils: vsx.d: a4: f1 00 e1 2f xssqrtdp vs40,vs60 :xssqrtdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=75 & BITS_16_20=0 & XB & XT { xssqrtdpOp(XB,XT); } define pcodeop xvcmpgtspOp; # ISA-cmt: xvcmpgtsp - VSX Vector Compare Greater Than Single-Precision # ISA-info: xvcmpgtsp - Form "XX3" Page 409 Category "VSX" # binutils: vsx.d: ec: f1 12 e2 5f xvcmpgtsp vs40,vs50,vs60 :xvcmpgtsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=75 & BIT_10=0 & XA & XB & XT { xvcmpgtspOp(XA,XB,XT); } define pcodeop xvcmpgtspDotOp; # ISA-cmt: xvcmpgtsp. - VSX Vector Compare Greater Than Single-Precision & Record # ISA-info: xvcmpgtsp. - Form "XX3" Page 409 Category "VSX" # binutils: mytest.d: 1bc: f0 43 26 58 xvcmpgtsp. vs2,vs3,vs4 :xvcmpgtsp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=75 & BIT_10=1 & XA & XB & XT { xvcmpgtspDotOp(XA,XB,XT); } define pcodeop xvmulspOp; # ISA-cmt: xvmulsp - VSX Vector Multiply Single-Precision # ISA-info: xvmulsp - Form "XX3" Page 459 Category "VSX" # binutils: vsx.d: 190: f1 12 e2 87 xvmulsp vs40,vs50,vs60 :xvmulsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=80 & XA & XB & XT { xvmulspOp(XA,XB,XT); } define pcodeop xvmsubaspOp; # ISA-cmt: xvmsubasp - VSX Vector Multiply-Subtract Type-A Single-Precision # ISA-info: xvmsubasp - Form "XX3" Page 451 Category "VSX" # binutils: vsx.d: 184: f1 12 e2 8f xvmsubasp vs40,vs50,vs60 :xvmsubasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=81 & XA & XB & XT { xvmsubaspOp(XA,XB,XT); } define pcodeop xvcmpgespOp; # ISA-cmt: xvcmpgesp - VSX Vector Compare Greater Than or Equal To Single-Precision # ISA-info: xvcmpgesp - Form "XX3" Page 407 Category "VSX" # binutils: vsx.d: dc: f1 12 e2 9f xvcmpgesp vs40,vs50,vs60 :xvcmpgesp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=83 & BIT_10=0 & XA & XB & XT { xvcmpgespOp(XA,XB,XT); } define pcodeop xvcmpgespDotOp; # ISA-cmt: xvcmpgesp. - VSX Vector Compare Greater Than or Equal To Single-Precision & Record # ISA-info: xvcmpgesp. - Form "XX3" Page 407 Category "VSX" # binutils: mytest.d: 1c0: f0 43 26 98 xvcmpgesp. vs2,vs3,vs4 :xvcmpgesp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=83 & BIT_10=1 & XA & XB & XT { xvcmpgespDotOp(XA,XB,XT); } define pcodeop xvdivspOp; # ISA-cmt: xvdivsp - VSX Vector Divide Single-Precision # ISA-info: xvdivsp - Form "XX3" Page 435 Category "VSX" # binutils: vsx.d: 158: f1 12 e2 c7 xvdivsp vs40,vs50,vs60 :xvdivsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=88 & XA & XB & XT { xvdivspOp(XA,XB,XT); } define pcodeop xscvdpsxwsOp; # ISA-cmt: xscvdpsxws - VSX Scalar truncate Double-Precision to integer and Convert to Signed Fixed-Point Word format with Saturate # ISA-info: xscvdpsxws - Form "XX2" Page 355 Category "VSX" # binutils: vsx.d: 38: f1 00 e1 63 xscvdpsxws vs40,vs60 :xscvdpsxws XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=88 & BITS_16_20=0 & XB & XT { xscvdpsxwsOp(XB,XT); } define pcodeop xvmsubmspOp; # ISA-cmt: xvmsubmsp - VSX Vector Multiply-Subtract Type-M Single-Precision # ISA-info: xvmsubmsp - Form "XX3" Page 454 Category "VSX" # binutils: vsx.d: 188: f1 12 e2 cf xvmsubmsp vs40,vs50,vs60 :xvmsubmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=89 & XA & XB & XT { xvmsubmspOp(XA,XB,XT); } define pcodeop xsrdpizOp; # ISA-cmt: xsrdpiz - VSX Scalar Round to Double-Precision Integer toward Zero # ISA-info: xsrdpiz - Form "XX2" Page 389 Category "VSX" # binutils: vsx.d: 98: f1 00 e1 67 xsrdpiz vs40,vs60 :xsrdpiz XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=89 & BITS_16_20=0 & XB & XT { xsrdpizOp(XB,XT); } define pcodeop xsredpOp; # ISA-cmt: xsredp - VSX Scalar Reciprocal Estimate Double-Precision # ISA-info: xsredp - Form "XX2" Page 390 Category "VSX" # binutils: vsx.d: 9c: f1 00 e1 6b xsredp vs40,vs60 :xsredp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=90 & BITS_16_20=0 & XB & XT { xsredpOp(XB,XT); } define pcodeop xvtdivspOp; # ISA-cmt: xvtdivsp - VSX Vector Test for software Divide Single-Precision # ISA-info: xvtdivsp - Form "XX3" Page 494 Category "VSX" # binutils: vsx.d: 210: f0 92 e2 ee xvtdivsp cr1,vs50,vs60 :xvtdivsp CRFD,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=93 & CRFD & BIT_0=0 & BITS_21_22=0 & XA & XB { xvtdivspOp(CRFD,XA,XB); } define pcodeop xvadddpOp; # ISA-cmt: xvadddp - VSX Vector Add Double-Precision # ISA-info: xvadddp - Form "XX3" Page 398 Category "VSX" # binutils: vsx.d: bc: f1 12 e3 07 xvadddp vs40,vs50,vs60 :xvadddp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=96 & XA & XB & XT { xvadddpOp(XA,XB,XT); } define pcodeop xvmaddadpOp; # ISA-cmt: xvmaddadp - VSX Vector Multiply-Add Type-A Double-Precision # ISA-info: xvmaddadp - Form "XX3" Page 437 Category "VSX" # binutils: vsx.d: 15c: f1 12 e3 0f xvmaddadp vs40,vs50,vs60 :xvmaddadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=97 & XA & XB & XT { xvmaddadpOp(XA,XB,XT); } define pcodeop xvcmpeqdpOp; # ISA-cmt: xvcmpeqdp - VSX Vector Compare Equal To Double-Precision # ISA-info: xvcmpeqdp - Form "XX3" Page 404 Category "VSX" # binutils: vsx.d: c4: f1 12 e3 1f xvcmpeqdp vs40,vs50,vs60 :xvcmpeqdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=99 & BIT_10=0 & XA & XB & XT { xvcmpeqdpOp(XA,XB,XT); } define pcodeop xvcmpeqdpDotOp; # ISA-cmt: xvcmpeqdp. - VSX Vector Compare Equal To Double-Precision & Record # ISA-info: xvcmpeqdp. - Form "XX3" Page 404 Category "VSX" # binutils: mytest.d: 1c4: f0 43 27 18 xvcmpeqdp. vs2,vs3,vs4 :xvcmpeqdp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=99 & BIT_10=1 & XA & XB & XT { xvcmpeqdpDotOp(XA,XB,XT); } define pcodeop xvsubdpOp; # ISA-cmt: xvsubdp - VSX Vector Subtract Double-Precision # ISA-info: xvsubdp - Form "XX3" Page 489 Category "VSX" # binutils: vsx.d: 204: f1 12 e3 47 xvsubdp vs40,vs50,vs60 :xvsubdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=104 & XA & XB & XT { xvsubdpOp(XA,XB,XT); } define pcodeop xvmaddmdpOp; # ISA-cmt: xvmaddmdp - VSX Vector Multiply-Add Type-M Double-Precision # ISA-info: xvmaddmdp - Form "XX3" Page 440 Category "VSX" # binutils: vsx.d: 160: f1 12 e3 4f xvmaddmdp vs40,vs50,vs60 :xvmaddmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=105 & XA & XB & XT { xvmaddmdpOp(XA,XB,XT); } define pcodeop xsrdpipOp; # ISA-cmt: xsrdpip - VSX Scalar Round to Double-Precision Integer toward +Infinity # ISA-info: xsrdpip - Form "XX2" Page 388 Category "VSX" # binutils: vsx.d: 94: f1 00 e1 a7 xsrdpip vs40,vs60 :xsrdpip XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=105 & BITS_16_20=0 & XB & XT { xsrdpipOp(XB,XT); } define pcodeop xstsqrtdpOp; # ISA-cmt: xstsqrtdp - VSX Scalar Test for software Square Root Double-Precision # ISA-info: xstsqrtdp - Form "XX2" Page 396 Category "VSX" # binutils: vsx.d: b0: f0 80 e1 aa xstsqrtdp cr1,vs60 :xstsqrtdp CRFD,XB is $(NOTVLE) & OP=60 & XOP_2_10=106 & CRFD & BIT_0=0 & BITS_21_22=0 & BITS_16_20=0 & XB { xstsqrtdpOp(CRFD,XB); } define pcodeop xsrdpicOp; # ISA-cmt: xsrdpic - VSX Scalar Round to Double-Precision Integer using Current rounding mode # ISA-info: xsrdpic - Form "XX2" Page 387 Category "VSX" # binutils: vsx.d: 8c: f1 00 e1 af xsrdpic vs40,vs60 :xsrdpic XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=107 & BITS_16_20=0 & XB & XT { xsrdpicOp(XB,XT); } define pcodeop xvcmpgtdpOp; # ISA-cmt: xvcmpgtdp - VSX Vector Compare Greater Than Double-Precision # ISA-info: xvcmpgtdp - Form "XX3" Page 408 Category "VSX" # binutils: vsx.d: e4: f1 12 e3 5f xvcmpgtdp vs40,vs50,vs60 :xvcmpgtdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=107 & BIT_10=0 & XA & XB & XT { xvcmpgtdpOp(XA,XB,XT); } define pcodeop xvcmpgtdpDotOp; # ISA-cmt: xvcmpgtdp. - VSX Vector Compare Greater Than Double-Precision & Record # ISA-info: xvcmpgtdp. - Form "XX3" Page 408 Category "VSX" # binutils: mytest.d: 1c8: f0 43 27 58 xvcmpgtdp. vs2,vs3,vs4 :xvcmpgtdp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=107 & BIT_10=1 & XA & XB & XT { xvcmpgtdpDotOp(XA,XB,XT); } define pcodeop xvmuldpOp; # ISA-cmt: xvmuldp - VSX Vector Multiply Double-Precision # ISA-info: xvmuldp - Form "XX3" Page 457 Category "VSX" # binutils: vsx.d: 18c: f1 12 e3 87 xvmuldp vs40,vs50,vs60 :xvmuldp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=112 & XA & XB & XT { xvmuldpOp(XA,XB,XT); } define pcodeop xvmsubadpOp; # ISA-cmt: xvmsubadp - VSX Vector Multiply-Subtract Type-A Double-Precision # ISA-info: xvmsubadp - Form "XX3" Page 451 Category "VSX" # binutils: vsx.d: 17c: f1 12 e3 8f xvmsubadp vs40,vs50,vs60 :xvmsubadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=113 & XA & XB & XT { xvmsubadpOp(XA,XB,XT); } define pcodeop xvcmpgedpOp; # ISA-cmt: xvcmpgedp - VSX Vector Compare Greater Than or Equal To Double-Precision # ISA-info: xvcmpgedp - Form "XX3" Page 406 Category "VSX" # binutils: vsx.d: d4: f1 12 e3 9f xvcmpgedp vs40,vs50,vs60 :xvcmpgedp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=115 & BIT_10=0 & XA & XB & XT { xvcmpgedpOp(XA,XB,XT); } define pcodeop xvcmpgedpDotOp; # ISA-cmt: xvcmpgedp. - VSX Vector Compare Greater Than or Equal To Double-Precision & Record # ISA-info: xvcmpgedp. - Form "XX3" Page 406 Category "VSX" # binutils: mytest.d: 1cc: f0 43 27 98 xvcmpgedp. vs2,vs3,vs4 :xvcmpgedp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=115 & BIT_10=1 & XA & XB & XT { xvcmpgedpDotOp(XA,XB,XT); } define pcodeop xvdivdpOp; # ISA-cmt: xvdivdp - VSX Vector Divide Double-Precision # ISA-info: xvdivdp - Form "XX3" Page 433 Category "VSX" # binutils: vsx.d: 154: f1 12 e3 c7 xvdivdp vs40,vs50,vs60 :xvdivdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=120 & XA & XB & XT { xvdivdpOp(XA,XB,XT); } define pcodeop xvmsubmdpOp; # ISA-cmt: xvmsubmdp - VSX Vector Multiply-Subtract Type-M Double-Precision # ISA-info: xvmsubmdp - Form "XX3" Page 454 Category "VSX" # binutils: vsx.d: 180: f1 12 e3 cf xvmsubmdp vs40,vs50,vs60 :xvmsubmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=121 & XA & XB & XT { xvmsubmdpOp(XA,XB,XT); } define pcodeop xsrdpimOp; # ISA-cmt: xsrdpim - VSX Scalar Round to Double-Precision Integer toward -Infinity # ISA-info: xsrdpim - Form "XX2" Page 388 Category "VSX" # binutils: vsx.d: 90: f1 00 e1 e7 xsrdpim vs40,vs60 :xsrdpim XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=121 & BITS_16_20=0 & XB & XT { xsrdpimOp(XB,XT); } define pcodeop xvtdivdpOp; # ISA-cmt: xvtdivdp - VSX Vector Test for software Divide Double-Precision # ISA-info: xvtdivdp - Form "XX3" Page 493 Category "VSX" # binutils: vsx.d: 20c: f0 92 e3 ee xvtdivdp cr1,vs50,vs60 :xvtdivdp CRFD,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=125 & CRFD & BIT_0=0 & BITS_21_22=0 & XA & XB { xvtdivdpOp(CRFD,XA,XB); } # ISA-cmt: xxland - VSX Logical AND # ISA-info: xxland - Form "XX3" Page 496 Category "VSX" # binutils: vsx.d: 21c: f1 12 e4 17 xxland vs40,vs50,vs60 :xxland XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=130 & XA & XB & XT { XT = XA & XB; } define pcodeop xvcvspuxwsOp; # ISA-cmt: xvcvspuxws - VSX Vector truncate Single-Precision to integer and Convert to Unsigned Fixed-Point Word Saturate # ISA-info: xvcvspuxws - Form "XX2" Page 427 Category "VSX" # binutils: vsx.d: 130: f1 00 e2 23 xvcvspuxws vs40,vs60 :xvcvspuxws XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=136 & BI_BITS=0 & XB & XT { xvcvspuxwsOp(XB,XT); } define pcodeop xvrspiOp; # ISA-cmt: xvrspi - VSX Vector Round to Single-Precision Integer # ISA-info: xvrspi - Form "XX2" Page 482 Category "VSX" # binutils: vsx.d: 1e0: f1 00 e2 27 xvrspi vs40,vs60 :xvrspi XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=137 & BI_BITS=0 & XB & XT { xvrspiOp(XB,XT); } # ISA-cmt: xxlandc - VSX Logical AND with Complement # ISA-info: xxlandc - Form "XX3" Page 496 Category "VSX" # binutils: vsx.d: 220: f1 12 e4 57 xxlandc vs40,vs50,vs60 :xxlandc XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=138 & XA & XB & XT { XT = XA & (~XB); } define pcodeop xvrsqrtespOp; # ISA-cmt: xvrsqrtesp - VSX Vector Reciprocal Square Root Estimate Single-Precision # ISA-info: xvrsqrtesp - Form "XX2" Page 486 Category "VSX" # binutils: vsx.d: 1f8: f1 00 e2 2b xvrsqrtesp vs40,vs60 :xvrsqrtesp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=138 & BI_BITS=0 & XB & XT { xvrsqrtespOp(XB,XT); } define pcodeop xvsqrtspOp; # ISA-cmt: xvsqrtsp - VSX Vector Square Root Single-Precision # ISA-info: xvsqrtsp - Form "XX2" Page 488 Category "VSX" # binutils: vsx.d: 200: f1 00 e2 2f xvsqrtsp vs40,vs60 :xvsqrtsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=139 & BI_BITS=0 & XB & XT { xvsqrtspOp(XB,XT); } # ISA-cmt: xxlor - VSX Logical OR # ISA-info: xxlor - Form "XX3" Page 497 Category "VSX" # binutils: vsx.d: 228: f1 12 e4 97 xxlor vs40,vs50,vs60 :xxlor XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=146 & XA & XB & XT { XT = XA | XB; } define pcodeop xvcvspsxwsOp; # ISA-cmt: xvcvspsxws - VSX Vector truncate Single-Precision to integer and Convert to Signed Fixed-Point Word format with Saturate # ISA-info: xvcvspsxws - Form "XX2" Page 423 Category "VSX" # binutils: vsx.d: 128: f1 00 e2 63 xvcvspsxws vs40,vs60 :xvcvspsxws XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=152 & BI_BITS=0 & XB & XT { xvcvspsxwsOp(XB,XT); } define pcodeop xvrspizOp; # ISA-cmt: xvrspiz - VSX Vector Round to Single-Precision Integer toward Zero # ISA-info: xvrspiz - Form "XX2" Page 484 Category "VSX" # binutils: vsx.d: 1f0: f1 00 e2 67 xvrspiz vs40,vs60 :xvrspiz XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=153 & BI_BITS=0 & XB & XT { xvrspizOp(XB,XT); } # ISA-cmt: xxlxor - VSX Logical XOR # ISA-info: xxlxor - Form "XX3" Page 498 Category "VSX" # binutils: vsx.d: 22c: f1 12 e4 d7 xxlxor vs40,vs50,vs60 :xxlxor XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=154 & XA & XB & XT { XT = XA ^ XB; } define pcodeop xvrespOp; # ISA-cmt: xvresp - VSX Vector Reciprocal Estimate Single-Precision # ISA-info: xvresp - Form "XX2" Page 481 Category "VSX" # binutils: vsx.d: 1dc: f1 00 e2 6b xvresp vs40,vs60 :xvresp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=154 & BI_BITS=0 & XB & XT { xvrespOp(XB,XT); } define pcodeop xsmaxdpOp; # ISA-cmt: xsmaxdp - VSX Scalar Maximum Double-Precision # ISA-info: xsmaxdp - Form "XX3" Page 368 Category "VSX" # binutils: vsx.d: 5c: f1 12 e5 07 xsmaxdp vs40,vs50,vs60 :xsmaxdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=160 & XA & XB & XT { xsmaxdpOp(XA,XB,XT); } define pcodeop xsnmaddadpOp; # ISA-cmt: xsnmaddadp - VSX Scalar Negative Multiply-Add Type-A Double-Precision # ISA-info: xsnmaddadp - Form "XX3" Page 378 Category "VSX" # binutils: vsx.d: 78: f1 12 e5 0f xsnmaddadp vs40,vs50,vs60 :xsnmaddadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=161 & XA & XB & XT { xsnmaddadpOp(XA,XB); } define pcodeop xxlnorOp; # ISA-cmt: xxlnor - VSX Logical NOR # ISA-info: xxlnor - Form "XX3" Page 497 Category "VSX" # binutils: vsx.d: 224: f1 12 e5 17 xxlnor vs40,vs50,vs60 :xxlnor XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=162 & XA & XB & XT { XT = ~(XA | XB); } define pcodeop xxspltwOp; # ISA-cmt: xxspltw - VSX Splat Word # ISA-info: xxspltw - Form "XX2" Page 501 Category "VSX" # binutils: vsx.d: 274: f1 02 e2 93 xxspltw vs40,vs60,2 :xxspltw XT,XB,UIM is $(NOTVLE) & OP=60 & XOP_2_10=164 & BITS_18_20=0 & UIM & XB & XT { xxspltwOp(XB,XT); } define pcodeop xsmindpOp; # ISA-cmt: xsmindp - VSX Scalar Minimum Double-Precision # ISA-info: xsmindp - Form "XX3" Page 370 Category "VSX" # binutils: vsx.d: 60: f1 12 e5 47 xsmindp vs40,vs50,vs60 :xsmindp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=168 & XA & XB & XT { xsmindpOp(XA,XB,XT); } define pcodeop xvcvuxwspOp; # ISA-cmt: xvcvuxwsp - VSX Vector Convert and round Unsigned Fixed-Point Word to Single-Precision format # ISA-info: xvcvuxwsp - Form "XX2" Page 432 Category "VSX" # binutils: vsx.d: 150: f1 00 e2 a3 xvcvuxwsp vs40,vs60 :xvcvuxwsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=168 & BI_BITS=0 & XB & XT { xvcvuxwspOp(XB,XT); } define pcodeop xsnmaddmdpOp; # ISA-cmt: xsnmaddmdp - VSX Scalar Negative Multiply-Add Type-M Double-Precision # ISA-info: xsnmaddmdp - Form "XX3" Page 378 Category "VSX" # binutils: vsx.d: 7c: f1 12 e5 4f xsnmaddmdp vs40,vs50,vs60 :xsnmaddmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=169 & XA & XB & XT { xsnmaddmdpOp(XA,XB,XT); } define pcodeop xvrspipOp; # ISA-cmt: xvrspip - VSX Vector Round to Single-Precision Integer toward +Infinity # ISA-info: xvrspip - Form "XX2" Page 483 Category "VSX" # binutils: vsx.d: 1ec: f1 00 e2 a7 xvrspip vs40,vs60 :xvrspip XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=169 & BI_BITS=0 & XB & XT { xvrspipOp(XB,XT); } define pcodeop xvtsqrtspOp; # ISA-cmt: xvtsqrtsp - VSX Vector Test for software Square Root Single-Precision # ISA-info: xvtsqrtsp - Form "XX2" Page 495 Category "VSX" # binutils: vsx.d: 218: f0 80 e2 aa xvtsqrtsp cr1,vs60 :xvtsqrtsp CRFD,XB is $(NOTVLE) & OP=60 & XOP_2_10=170 & CRFD & BITS_21_22=0 & BITS_16_20=0 & BIT_0=0 & XB { xvtsqrtspOp(CRFD,XB); } define pcodeop xvrspicOp; # ISA-cmt: xvrspic - VSX Vector Round to Single-Precision Integer using Current rounding mode # ISA-info: xvrspic - Form "XX2" Page 482 Category "VSX" # binutils: vsx.d: 1e4: f1 00 e2 af xvrspic vs40,vs60 :xvrspic XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=171 & BITS_16_20=0 & XB & XT { xvrspicOp(XB,XT); } define pcodeop xscpsgndpOp; # ISA-cmt: xscpsgndp - VSX Scalar Copy Sign Double-Precision # ISA-info: xscpsgndp - Form "XX3" Page 351 Category "VSX" # binutils: vsx.d: 2c: f1 12 e5 87 xscpsgndp vs40,vs50,vs60 :xscpsgndp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=176 & XA & XB & XT { xscpsgndpOp(XA,XB,XT); } define pcodeop xsnmsubadpOp; # ISA-cmt: xsnmsubadp - VSX Scalar Negative Multiply-Subtract Type-A Double-Precision # ISA-info: xsnmsubadp - Form "XX3" Page 383 Category "VSX" # binutils: vsx.d: 80: f1 12 e5 8f xsnmsubadp vs40,vs50,vs60 :xsnmsubadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=177 & XA & XB & XT { xsnmsubadpOp(XA,XB,XT); } define pcodeop xvcvsxwspOp; # ISA-cmt: xvcvsxwsp - VSX Vector Convert and round Signed Fixed-Point Word to Single-Precision format # ISA-info: xvcvsxwsp - Form "XX2" Page 430 Category "VSX" # binutils: vsx.d: 140: f1 00 e2 e3 xvcvsxwsp vs40,vs60 :xvcvsxwsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=184 & BITS_16_20=0 & XB & XT { xvcvsxwspOp(XB,XT); } define pcodeop xsnmsubmdpOp; # ISA-cmt: xsnmsubmdp - VSX Scalar Negative Multiply-Subtract Type-M Double-Precision # ISA-info: xsnmsubmdp - Form "XX3" Page 383 Category "VSX" # binutils: vsx.d: 84: f1 12 e5 cf xsnmsubmdp vs40,vs50,vs60 :xsnmsubmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=185 & XA & XB & XT { xsnmsubmdpOp(XA,XB,XT); } define pcodeop xvrspimOp; # ISA-cmt: xvrspim - VSX Vector Round to Single-Precision Integer toward -Infinity # ISA-info: xvrspim - Form "XX2" Page 483 Category "VSX" # binutils: vsx.d: 1e8: f1 00 e2 e7 xvrspim vs40,vs60 :xvrspim XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=185 & BITS_16_20=0 & XB & XT { xvrspimOp(XB,XT); } define pcodeop xvmaxspOp; # ISA-cmt: xvmaxsp - VSX Vector Maximum Single-Precision # ISA-info: xvmaxsp - Form "XX3" Page 445 Category "VSX" # binutils: vsx.d: 170: f1 12 e6 07 xvmaxsp vs40,vs50,vs60 :xvmaxsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=192 & XA & XB & XT { xvmaxspOp(XA,XB,XT); } define pcodeop xvnmaddaspOp; # ISA-cmt: xvnmaddasp - VSX Vector Negative Multiply-Add Type-A Single-Precision # ISA-info: xvnmaddasp - Form "XX3" Page 463 Category "VSX" # binutils: vsx.d: 1ac: f1 12 e6 0f xvnmaddasp vs40,vs50,vs60 :xvnmaddasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=193 & XA & XB & XT { xvnmaddaspOp(XA,XB,XT); } define pcodeop xvminspOp; # ISA-cmt: xvminsp - VSX Vector Minimum Single-Precision # ISA-info: xvminsp - Form "XX3" Page 449 Category "VSX" # binutils: vsx.d: 178: f1 12 e6 47 xvminsp vs40,vs50,vs60 :xvminsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=200 & XA & XB & XT { xvminspOp(XA,XB,XT); } define pcodeop xvcvdpuxwsOp; # ISA-cmt: xvcvdpuxws - VSX Vector truncate Double-Precision to integer and Convert to Unsigned Fixed-Point Word format with Saturate # ISA-info: xvcvdpuxws - Form "XX2" Page 418 Category "VSX" # binutils: vsx.d: 11c: f1 00 e3 23 xvcvdpuxws vs40,vs60 :xvcvdpuxws XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=200 & BITS_16_20=0 & XB & XT { xvcvdpuxwsOp(XB,XT); } define pcodeop xvnmaddmspOp; # ISA-cmt: xvnmaddmsp - VSX Vector Negative Multiply-Add Type-M Single-Precision # ISA-info: xvnmaddmsp - Form "XX3" Page 468 Category "VSX" # binutils: vsx.d: 1b0: f1 12 e6 4f xvnmaddmsp vs40,vs50,vs60 :xvnmaddmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=201 & XA & XB & XT { xvnmaddmspOp(XA,XB,XT); } define pcodeop xvrdpiOp; # ISA-cmt: xvrdpi - VSX Vector Round to Double-Precision Integer # ISA-info: xvrdpi - Form "XX2" Page 477 Category "VSX" # binutils: vsx.d: 1c4: f1 00 e3 27 xvrdpi vs40,vs60 :xvrdpi XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=201 & BITS_16_20=0 & XB & XT { xvrdpiOp(XB,XT); } define pcodeop xvrsqrtedpOp; # ISA-cmt: xvrsqrtedp - VSX Vector Reciprocal Square Root Estimate Double-Precision # ISA-info: xvrsqrtedp - Form "XX2" Page 485 Category "VSX" # binutils: vsx.d: 1f4: f1 00 e3 2b xvrsqrtedp vs40,vs60 :xvrsqrtedp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=202 & BITS_16_20=0 & XB & XT { xvrsqrtedpOp(XB,XT); } define pcodeop xvsqrtdpOp; # ISA-cmt: xvsqrtdp - VSX Vector Square Root Double-Precision # ISA-info: xvsqrtdp - Form "XX2" Page 487 Category "VSX" # binutils: vsx.d: 1fc: f1 00 e3 2f xvsqrtdp vs40,vs60 :xvsqrtdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=203 & BITS_16_20=0 & XB & XT { xvsqrtdpOp(XB,XT); } define pcodeop xvcpsgnspOp; # ISA-cmt: xvcpsgnsp - VSX Vector Copy Sign Single-Precision # ISA-info: xvcpsgnsp - Form "XX3" Page 410 Category "VSX" # binutils: vsx.d: 100: f1 12 e6 87 xvcpsgnsp vs40,vs50,vs60 :xvcpsgnsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=208 & XA & XB & XT { xvcpsgnspOp(XA,XB,XT); } define pcodeop xvnmsubaspOp; # ISA-cmt: xvnmsubasp - VSX Vector Negative Multiply-Subtract Type-A Single-Precision # ISA-info: xvnmsubasp - Form "XX3" Page 471 Category "VSX" # binutils: vsx.d: 1bc: f1 12 e6 8f xvnmsubasp vs40,vs50,vs60 :xvnmsubasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=209 & XA & XB & XT { xvnmsubaspOp(XA,XB,XT); } define pcodeop xvcvdpsxwsOp; # ISA-cmt: xvcvdpsxws - VSX Vector truncate Double-Precision to integer and Convert to Signed Fixed-Point Word Saturate # ISA-info: xvcvdpsxws - Form "XX2" Page 414 Category "VSX" # binutils: vsx.d: 114: f1 00 e3 63 xvcvdpsxws vs40,vs60 :xvcvdpsxws XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=216 & BITS_16_20=0 & XB & XT { xvcvdpsxwsOp(XB,XT); } define pcodeop xvnmsubmspOp; # ISA-cmt: xvnmsubmsp - VSX Vector Negative Multiply-Subtract Type-M Single-Precision # ISA-info: xvnmsubmsp - Form "XX3" Page 474 Category "VSX" # binutils: vsx.d: 1c0: f1 12 e6 cf xvnmsubmsp vs40,vs50,vs60 :xvnmsubmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=217 & XA & XB & XT { xvnmsubmspOp(XA,XB,XT); } define pcodeop xvrdpizOp; # ISA-cmt: xvrdpiz - VSX Vector Round to Double-Precision Integer toward Zero # ISA-info: xvrdpiz - Form "XX2" Page 479 Category "VSX" # binutils: vsx.d: 1d4: f1 00 e3 67 xvrdpiz vs40,vs60 :xvrdpiz XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=217 & BITS_16_20=0 & XB & XT { xvrdpizOp(XB,XT); } define pcodeop xvredpOp; # ISA-cmt: xvredp - VSX Vector Reciprocal Estimate Double-Precision # ISA-info: xvredp - Form "XX2" Page 480 Category "VSX" # binutils: vsx.d: 1d8: f1 00 e3 6b xvredp vs40,vs60 :xvredp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=218 & BITS_16_20=0 & XB & XT { xvredpOp(XB,XT); } define pcodeop xvmaxdpOp; # ISA-cmt: xvmaxdp - VSX Vector Maximum Double-Precision # ISA-info: xvmaxdp - Form "XX3" Page 443 Category "VSX" # binutils: vsx.d: 16c: f1 12 e7 07 xvmaxdp vs40,vs50,vs60 :xvmaxdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=224 & XA & XB & XT { xvmaxdpOp(XA,XB,XT); } define pcodeop xvnmaddadpOp; # ISA-cmt: xvnmaddadp - VSX Vector Negative Multiply-Add Type-A Double-Precision # ISA-info: xvnmaddadp - Form "XX3" Page 463 Category "VSX" # binutils: vsx.d: 1a4: f1 12 e7 0f xvnmaddadp vs40,vs50,vs60 :xvnmaddadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=225 & XA & XB & XT { xvnmaddadpOp(XA,XB,XT); } define pcodeop xvmindpOp; # ISA-cmt: xvmindp - VSX Vector Minimum Double-Precision # ISA-info: xvmindp - Form "XX3" Page 447 Category "VSX" # binutils: vsx.d: 174: f1 12 e7 47 xvmindp vs40,vs50,vs60 :xvmindp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=232 & XA & XB & XT { xvmindpOp(XA,XB,XT); } define pcodeop xvnmaddmdpOp; # ISA-cmt: xvnmaddmdp - VSX Vector Negative Multiply-Add Type-M Double-Precision # ISA-info: xvnmaddmdp - Form "XX3" Page 468 Category "VSX" # binutils: vsx.d: 1a8: f1 12 e7 4f xvnmaddmdp vs40,vs50,vs60 :xvnmaddmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=233 & XA & XB & XT { xvnmaddmdpOp(XA,XB,XT); } define pcodeop xvcvuxwdpOp; # ISA-cmt: xvcvuxwdp - VSX Vector Convert Unsigned Fixed-Point Word to Double-Precision format # ISA-info: xvcvuxwdp - Form "XX2" Page 432 Category "VSX" # binutils: vsx.d: 14c: f1 00 e3 a3 xvcvuxwdp vs40,vs60 :xvcvuxwdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=232 & BITS_16_20=0 & XB & XT { xvcvuxwdpOp(XB,XT); } define pcodeop xvrdpipOp; # ISA-cmt: xvrdpip - VSX Vector Round to Double-Precision Integer toward +Infinity # ISA-info: xvrdpip - Form "XX2" Page 479 Category "VSX" # binutils: vsx.d: 1d0: f1 00 e3 a7 xvrdpip vs40,vs60 :xvrdpip XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=233 & BITS_16_20=0 & XB & XT { xvrdpipOp(XB,XT); } define pcodeop xvtsqrtdpOp; # ISA-cmt: xvtsqrtdp - VSX Vector Test for software Square Root Double-Precision # ISA-info: xvtsqrtdp - Form "XX2" Page 495 Category "VSX" # binutils: vsx.d: 214: f0 80 e3 aa xvtsqrtdp cr1,vs60 :xvtsqrtdp CRFD,XB is $(NOTVLE) & OP=60 & XOP_2_10=234 & CRFD & BITS_16_20=0 & BIT_0=0 & BITS_21_22=0 & XB { xvtsqrtdpOp(CRFD,XB); } define pcodeop xvrdpicOp; # ISA-cmt: xvrdpic - VSX Vector Round to Double-Precision Integer using Current rounding mode # ISA-info: xvrdpic - Form "XX2" Page 478 Category "VSX" # binutils: vsx.d: 1c8: f1 00 e3 af xvrdpic vs40,vs60 :xvrdpic XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=235 & BITS_16_20=0 & XB & XT { xvrdpicOp(XB,XT); } define pcodeop xvcpsgndpOp; # ISA-cmt: xvcpsgndp - VSX Vector Copy Sign Double-Precision # ISA-info: xvcpsgndp - Form "XX3" Page 410 Category "VSX" # binutils: power7.d: 50: f0 64 2f 80 xvcpsgndp vs3,vs4,vs5 # binutils: power7.d: 54: f1 6c 6f 87 xvcpsgndp vs43,vs44,vs45 # binutils: vsx.d: f4: f1 12 e7 87 xvcpsgndp vs40,vs50,vs60 :xvcpsgndp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=240 & XA & XB & XT { xvcpsgndpOp(XA,XB,XT); } define pcodeop xvnmsubadpOp; # ISA-cmt: xvnmsubadp - VSX Vector Negative Multiply-Subtract Type-A Double-Precision # ISA-info: xvnmsubadp - Form "XX3" Page 471 Category "VSX" # binutils: vsx.d: 1b4: f1 12 e7 8f xvnmsubadp vs40,vs50,vs60 :xvnmsubadp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=241 & XA & XB & XT { xvnmsubadpOp(XA,XB,XT); } define pcodeop xvcvsxwdpOp; # ISA-cmt: xvcvsxwdp - VSX Vector Convert Signed Fixed-Point Word to Double-Precision format # ISA-info: xvcvsxwdp - Form "XX2" Page 430 Category "VSX" # binutils: vsx.d: 13c: f1 00 e3 e3 xvcvsxwdp vs40,vs60 :xvcvsxwdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=248 & BI_BITS=0 & XB & XT { xvcvsxwdpOp(XB,XT); } define pcodeop xvnmsubmdpOp; # ISA-cmt: xvnmsubmdp - VSX Vector Negative Multiply-Subtract Type-M Double-Precision # ISA-info: xvnmsubmdp - Form "XX3" Page 474 Category "VSX" # binutils: vsx.d: 1b8: f1 12 e7 cf xvnmsubmdp vs40,vs50,vs60 :xvnmsubmdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=249 & XA & XB & XT { xvnmsubmdpOp(XA,XB,XT); } define pcodeop xvrdpimOp; # ISA-cmt: xvrdpim - VSX Vector Round to Double-Precision Integer toward -Infinity # ISA-info: xvrdpim - Form "XX2" Page 478 Category "VSX" # binutils: vsx.d: 1cc: f1 00 e3 e7 xvrdpim vs40,vs60 :xvrdpim XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=249 & BITS_16_20=0 & XB & XT { xvrdpimOp(XB,XT); } define pcodeop xscvdpspOp; # ISA-cmt: xscvdpsp - VSX Scalar Convert Double-Precision to Single-Precision # ISA-info: xscvdpsp - Form "XX2" Page 352 Category "VSX" # binutils: vsx.d: 30: f1 00 e4 27 xscvdpsp vs40,vs60 :xscvdpsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=265 & BITS_16_20=0 & XB & XT { xscvdpspOp(XB,XT); } define pcodeop xscvdpuxdsOp; # ISA-cmt: xscvdpuxds - VSX Scalar truncate Double-Precision to integer and Convert to Unsigned Fixed-Point Doubleword format with Saturate # ISA-info: xscvdpuxds - Form "XX2" Page 357 Category "VSX" # binutils: vsx.d: 3c: f1 00 e5 23 xscvdpuxds vs40,vs60 :xscvdpuxds XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=328 & BITS_16_20=0 & XB & XT { xscvdpuxdsOp(XB,XT); } define pcodeop xscvspdpOp; # ISA-cmt: xscvspdp - VSX Scalar Convert Single-Precision to Double-Precision format # binutils: vsx.d: 44: f1 00 e5 27 xscvspdp vs40,vs60 :xscvspdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=329 & BITS_16_20=0 & XB & XT { xscvspdpOp(XB,XT); } define pcodeop xscvdpsxdsOp; # ISA-cmt: xscvdpsxds - VSX Scalar truncate Double-Precision to integer and Convert to Signed Fixed-Point Doubleword format with Saturate # ISA-info: xscvdpsxds - Form "XX2" Page 353 Category "VSX" # binutils: vsx.d: 34: f1 00 e5 63 xscvdpsxds vs40,vs60 :xscvdpsxds XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=344 & BITS_16_20=0 & XB & XT { xscvdpsxdsOp(XB,XT); } define pcodeop xsabsdpOp; # ISA-cmt: xsabsdp - VSX Scalar Absolute Value Double-Precision # ISA-info: xsabsdp - Form "XX2" Page 341 Category "VSX" # binutils: vsx.d: 1c: f1 00 e5 67 xsabsdp vs40,vs60 :xsabsdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=345 & XT & BITS_16_20=0 & XB { xsabsdpOp(XB,XT); } define pcodeop xscvuxddpOp; # ISA-cmt: xscvuxddp - VSX Scalar Convert and round Unsigned Fixed-Point Doubleword to Double-Precision format # binutils: vsx.d: 4c: f1 00 e5 a3 xscvuxddp vs40,vs60 :xscvuxddp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=360 & BITS_16_20=0 & XB & XT { xscvuxddpOp(XB,XT); } define pcodeop xsnabsdpOp; # ISA-cmt: xsnabsdp - VSX Scalar Negative Absolute Value Double-Precision # ISA-info: xsnabsdp - Form "XX2" Page 377 Category "VSX" # binutils: vsx.d: 70: f1 00 e5 a7 xsnabsdp vs40,vs60 :xsnabsdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=361 & BITS_16_20=0 & XB & XT { xsnabsdpOp(XB,XT); } define pcodeop xscvsxddpOp; # ISA-cmt: xscvsxddp - VSX Scalar Convert and round Signed Fixed-Point Doubleword to Double-Precision format # ISA-info: xscvsxddp - Form "XX2" Page 361 Category "VSX" # binutils: vsx.d: 48: f1 00 e5 e3 xscvsxddp vs40,vs60 :xscvsxddp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=376 & BITS_16_20=0 & XB & XT { xscvsxddpOp(XB,XT); } define pcodeop xsnegdpOp; # ISA-cmt: xsnegdp - VSX Scalar Negate Double-Precision # ISA-info: xsnegdp - Form "XX2" Page 377 Category "VSX" # binutils: vsx.d: 74: f1 00 e5 e7 xsnegdp vs40,vs60 :xsnegdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=377 & BITS_16_20=0 & XB & XT { xsnegdpOp(XB,XT); } define pcodeop xvcvspuxdsOp; # ISA-cmt: xvcvspuxds - VSX Vector truncate Single-Precision to integer and Convert to Unsigned Fixed-Point Doubleword format with Saturate # ISA-info: xvcvspuxds - Form "XX2" Page 425 Category "VSX" # binutils: vsx.d: 12c: f1 00 e6 23 xvcvspuxds vs40,vs60 :xvcvspuxds XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=392 & BITS_16_20=0 & XB & XT { xvcvspuxdsOp(XB,XT); } define pcodeop xvcvdpspOp; # ISA-cmt: xvcvdpsp - VSX Vector round and Convert Double-Precision to Single-Precision format # ISA-info: xvcvdpsp - Form "XX2" Page 411 Category "VSX" # binutils: vsx.d: 10c: f1 00 e6 27 xvcvdpsp vs40,vs60 :xvcvdpsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=393 & BITS_16_20=0 & XB & XT { xvcvdpspOp(XB,XT); } define pcodeop xvcvspsxdsOp; # ISA-cmt: xvcvspsxds - VSX Vector truncate Single-Precision to integer and Convert to Signed Fixed-Point Doubleword format with Saturate # ISA-info: xvcvspsxds - Form "XX2" Page 421 Category "VSX" # binutils: vsx.d: 124: f1 00 e6 63 xvcvspsxds vs40,vs60 :xvcvspsxds XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=408 & BITS_16_20=0 & XB & XT { xvcvspsxdsOp(XB,XT); } define pcodeop xvabsspOp; # ISA-cmt: xvabssp - VSX Vector Absolute Value Single-Precision # ISA-info: xvabssp - Form "XX2" Page 397 Category "VSX" # binutils: vsx.d: b8: f1 00 e6 67 xvabssp vs40,vs60 :xvabssp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=409 & BITS_16_20=0 & XB & XT { xvabsspOp(XB,XT); } define pcodeop xvcvuxdspOp; # ISA-cmt: xvcvuxdsp - VSX Vector Convert and round Unsigned Fixed-Point Doubleword to Single-Precision format # ISA-info: xvcvuxdsp - Form "XX2" Page 431 Category "VSX" # binutils: vsx.d: 148: f1 00 e6 a3 xvcvuxdsp vs40,vs60 :xvcvuxdsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=424 & BITS_16_20=0 & XB & XT { xvcvuxdspOp(XB,XT); } define pcodeop xvnabsspOp; # ISA-cmt: xvnabssp - VSX Vector Negative Absolute Value Single-Precision # ISA-info: xvnabssp - Form "XX2" Page 461 Category "VSX" # binutils: vsx.d: 198: f1 00 e6 a7 xvnabssp vs40,vs60 :xvnabssp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=425 & BITS_16_20=0 & XB & XT { xvnabsspOp(XB,XT); } define pcodeop xvcvsxdspOp; # ISA-cmt: xvcvsxdsp - VSX Vector Convert and round Signed Fixed-Point Doubleword to Single-Precision format # ISA-info: xvcvsxdsp - Form "XX2" Page 429 Category "VSX" # binutils: vsx.d: 138: f1 00 e6 e3 xvcvsxdsp vs40,vs60 :xvcvsxdsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=440 & BITS_16_20=0 & XB & XT { xvcvsxdspOp(XB,XT); } define pcodeop xvnegspOp; # ISA-cmt: xvnegsp - VSX Vector Negate Single-Precision # ISA-info: xvnegsp - Form "XX2" Page 462 Category "VSX" # binutils: vsx.d: 1a0: f1 00 e6 e7 xvnegsp vs40,vs60 :xvnegsp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=441 & BITS_16_20=0 & XB & XT { xvnegspOp(XB,XT); } define pcodeop xvcvdpuxdsOp; # ISA-cmt: xvcvdpuxds - VSX Vector truncate Double-Precision to integer and Convert to Unsigned Fixed-Point Doubleword format with Saturate # ISA-info: xvcvdpuxds - Form "XX2" Page 416 Category "VSX" # binutils: vsx.d: 118: f1 00 e7 23 xvcvdpuxds vs40,vs60 :xvcvdpuxds XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=456 & BITS_16_20=0 & XB & XT { xvcvdpuxdsOp(XB,XT); } define pcodeop xvcvspdpOp; # ISA-cmt: xvcvspdp - VSX Vector Convert Single-Precision to Double-Precision # ISA-info: xvcvspdp - Form "XX2" Page 420 Category "VSX" # binutils: vsx.d: 120: f1 00 e7 27 xvcvspdp vs40,vs60 :xvcvspdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=457 & BITS_16_20=0 & XB & XT { xvcvspdpOp(XB,XT); } define pcodeop xvcvdpsxdsOp; # ISA-cmt: xvcvdpsxds - VSX Vector truncate Double-Precision to integer and Convert to Signed Fixed-Point Doubleword Saturate # ISA-info: xvcvdpsxds - Form "XX2" Page 412 Category "VSX" # binutils: vsx.d: 110: f1 00 e7 63 xvcvdpsxds vs40,vs60 :xvcvdpsxds XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=472 & BITS_16_20=0 & XB & XT { xvcvdpsxdsOp(XB,XT); } define pcodeop xvabsdpOp; # ISA-cmt: xvabsdp - VSX Vector Absolute Value Double-Precision # ISA-info: xvabsdp - Form "XX2" Page 397 Category "VSX" # binutils: vsx.d: b4: f1 00 e7 67 xvabsdp vs40,vs60 :xvabsdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=473 & BITS_16_20=0 & XB & XT { xvabsdpOp(XB,XT); } define pcodeop xvcvuxddpOp; # ISA-cmt: xvcvuxddp - VSX Vector Convert and round Unsigned Fixed-Point Doubleword to Double-Precision format # ISA-info: xvcvuxddp - Form "XX2" Page 431 Category "VSX" # binutils: vsx.d: 144: f1 00 e7 a3 xvcvuxddp vs40,vs60 :xvcvuxddp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=488 & BITS_16_20=0 & XB & XT { xvcvuxddpOp(XB,XT); } define pcodeop xvnabsdpOp; # ISA-cmt: xvnabsdp - VSX Vector Negative Absolute Value Double-Precision # ISA-info: xvnabsdp - Form "XX2" Page 461 Category "VSX" # binutils: vsx.d: 194: f1 00 e7 a7 xvnabsdp vs40,vs60 :xvnabsdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=489 & BITS_16_20=0 & XB & XT { xvnabsdpOp(XB,XT); } define pcodeop xvcvsxddpOp; # ISA-cmt: xvcvsxddp - VSX Vector Convert and round Signed Fixed-Point Doubleword to Double-Precision format # ISA-info: xvcvsxddp - Form "XX2" Page 429 Category "VSX" # binutils: vsx.d: 134: f1 00 e7 e3 xvcvsxddp vs40,vs60 :xvcvsxddp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=504 & BITS_16_20=0 & XB & XT { xvcvsxddpOp(XB,XT); } define pcodeop xvnegdpOp; # ISA-cmt: xvnegdp - VSX Vector Negate Double-Precision # ISA-info: xvnegdp - Form "XX2" Page 462 Category "VSX" # binutils: vsx.d: 19c: f1 00 e7 e7 xvnegdp vs40,vs60 :xvnegdp XT,XB is $(NOTVLE) & OP=60 & XOP_2_10=505 & BITS_16_20=0 & XB & XT { xvnegdpOp(XB,XT); } define pcodeop vsx207_1; define pcodeop vsx207_2; define pcodeop vsx207_3; define pcodeop vsx207_5; define pcodeop vsx207_8; define pcodeop vsx207_9; define pcodeop vsx207_10; define pcodeop vsx207_11; define pcodeop vsx207_12; define pcodeop vsx207_13; define pcodeop vsx207_14; define pcodeop vsx207_15; define pcodeop vsx207_16; define pcodeop vsx207_17; define pcodeop vsx207_18; define pcodeop vsx207_19; define pcodeop vsx207_20; define pcodeop vsx207_21; define pcodeop vsx207_22; define pcodeop vsx207_23; define pcodeop vsx207_24; define pcodeop vsx207_25; define pcodeop vsx207_26; define pcodeop vsx207_27; define pcodeop vsx207_28; define pcodeop vsx207_29; define pcodeop vsx207_30; define pcodeop vsx300_1; define pcodeop vsx300_2; define pcodeop vsx300_3; define pcodeop vsx300_4; define pcodeop vsx300_5; define pcodeop vsx300_7; define pcodeop vsx300_8; define pcodeop vsx300_9; define pcodeop vsx300_10; define pcodeop vsx300_11; define pcodeop vsx300_12; define pcodeop vsx300_13; define pcodeop vsx300_14; define pcodeop vsx300_15; define pcodeop vsx300_16; define pcodeop vsx300_17; define pcodeop vsx300_18; define pcodeop vsx300_19; define pcodeop vsx300_20; define pcodeop vsx300_21; define pcodeop vsx300_22; define pcodeop vsx300_23; define pcodeop vsx300_25; define pcodeop vsx300_26; define pcodeop vsx300_27; define pcodeop vsx300_28; define pcodeop vsx300_29; define pcodeop vsx300_30; define pcodeop vsx300_31; define pcodeop vsx300_32; define pcodeop vsx300_33; define pcodeop vsx300_34; define pcodeop vsx300_35; define pcodeop vsx300_36; define pcodeop vsx300_37; define pcodeop vsx300_38; define pcodeop vsx300_39; define pcodeop vsx300_40; define pcodeop vsx300_41; define pcodeop vsx300_42; define pcodeop vsx300_43; define pcodeop vsx300_44; define pcodeop vsx300_45; define pcodeop vsx300_46; define pcodeop vsx300_47; define pcodeop vsx300_48; define pcodeop vsx300_49; define pcodeop vsx300_50; define pcodeop vsx300_51; define pcodeop vsx300_52; define pcodeop vsx300_53; define pcodeop vsx300_54; define pcodeop vsx300_55; define pcodeop vsx300_56; define pcodeop vsx300_57; define pcodeop vsx300_58; define pcodeop vsx300_59; define pcodeop vsx300_60; define pcodeop vsx300_61; define pcodeop vsx300_62; define pcodeop vsx300_63; define pcodeop vsx300_64; define pcodeop vsx300_65; define pcodeop vsx300_66; define pcodeop vsx300_67; define pcodeop vsx300_68; define pcodeop vsx300_69; define pcodeop vsx300_70; define pcodeop vsx300_71; define pcodeop vsx300_72; define pcodeop vsx300_73; define pcodeop vsx300_74; define pcodeop vsx300_75; define pcodeop vsx300_76; define pcodeop vsx300_77; define pcodeop vsx300_78; define pcodeop vsx300_79; define pcodeop vsx300_80; define pcodeop vsx300_81; define pcodeop vsx300_82; define pcodeop vsx300_83; define pcodeop vsx300_84; define pcodeop vsx300_85; define pcodeop vsx300_86; define pcodeop vsx300_87; define pcodeop vsx300_88; define pcodeop vsx300_89; define pcodeop vsx300_90; define pcodeop vsx300_91; define pcodeop vsx300_92; define pcodeop vsx300_93; define pcodeop vsx300_94; define pcodeop vsx300_95; define pcodeop vsx300_96; define pcodeop vsx300_97; define pcodeop vsx300_98; define pcodeop vsx300_99; define pcodeop vsx300_100; define pcodeop vsx300_101; define pcodeop vsx300_102; define pcodeop vsx300_103; ################# # v2.07 additions :lxsiwax XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=76 { XT = vsx207_1(A,B); } :lxsiwzx XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=12 { XT = vsx207_2(A,B); } :lxsspx XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=524 { XT = vsx207_3(A,B); } :mfvsrd A,XSF is $(NOTVLE) & OP=31 & XOP_1_10=51 & BITS_11_15=0 & XSF & A { A = XSF; } :mfvsrwz A,XSF is $(NOTVLE) & OP=31 & XOP_1_10=115 & BITS_11_15=0 & XSF & A { A[0,32] = XSF[0,32]; A[32,32] = 0; } :mtvsrd XTF,A is $(NOTVLE) & OP=31 & XOP_1_10=179 & BITS_11_15=0 & XTF & A { XTF = A; } :mtvsrwa XTF,A is $(NOTVLE) & OP=31 & XOP_1_10=211 & BITS_11_15=0 & XTF & A { XTF = sext(A:4); } :mtvsrwz XTF,A is $(NOTVLE) & OP=31 & XOP_1_10=243 & BITS_11_15=0 & XTF & A { XTF = zext(A:4); } :stxsiwx XS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XS & RA_OR_ZERO & B & XOP_1_10=140 { EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:4 EA = vsx207_9(XS,RA_OR_ZERO,B); } :stxsspx XS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XS & RA_OR_ZERO & B & XOP_1_10=652 { EA:$(REGISTER_SIZE) = RA_OR_ZERO + B; *[ram]:4 EA = vsx207_10(XS,RA_OR_ZERO,B); } :xsaddsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=0 & XA & XB & XT { XT = vsx207_11(XA,XB); } :xscvdpspn XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=267 & XB & XT { src:4 = float2float(XB:8); XT[0,32] = src; } :xscvspdpn XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=331 & XB & XT { XT = vsx207_13(XB); } :xscvsxdsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=312 & XB & XT { XT = vsx207_14(XB); } :xscvuxdsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=296 & XB & XT { XT = vsx207_15(XB); } :xsdivsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=24 & XA & XB & XT { XT = vsx207_16(XA,XB); } :xsmaddasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=1 & XA & XB & XT { XT = vsx207_17(XA,XB); } :xsmaddmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=9 & XA & XB & XT { XT = vsx207_18(XA,XB); } :xsmsubasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=17 & XA & XB & XT { XT = vsx207_19(XA,XB); } :xsmsubmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=25 & XA & XB & XT { XT = vsx207_20(XA,XB); } :xsmulsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=16 & XA & XB & XT { XT = vsx207_21(XA,XB); } :xsnmaddasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=129 & XA & XB & XT { XT = vsx207_22(XA,XB); } :xsnmaddmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=137 & XA & XB & XT { XT = vsx207_23(XA,XB); } :xsnmsubasp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=145 & XA & XB & XT { XT = vsx207_24(XA,XB); } :xsnmsubmsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=153 & XA & XB & XT { XT = vsx207_25(XA,XB); } :xsresp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=26 & XB & XT { XT = vsx207_26(XB); } :xsrsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=281 & XB & XT { XT = vsx207_27(XB); } :xsrsqrtesp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=10 & XB & XT { XT = vsx207_28(XB); } :xssqrtsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=11 & XB & XT { XT = vsx207_29(XB); } :xssubsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=8 & XA & XB & XT { XT = vsx207_30(XA,XB); } :xxleqv XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=186 & XA & XB & XT { XT = ~(XA ^ XB); } :xxlnand XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=178 & XA & XB & XT { XT = ~(XA & XB); } :xxlorc XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=170 & XA & XB & XT { XT = XA | (~XB); } ####################### # v3.0 # The endian behavior of the storage has not been modelled :lxsd vrD,DSs(RA_OR_ZERO) is $(NOTVLE) & OP=57 & vrD & RA_OR_ZERO & BITS_0_1=2 & DSs { ea:$(REGISTER_SIZE) = RA_OR_ZERO + (DSs << 2); vrD[0,64] = *:8 ea; } :lxsibzx XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=781 { XT = vsx300_2(A,B); } :lxsihzx XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=813 { XT = vsx300_3(A,B); } :lxssp vrD,DSs(RA_OR_ZERO) is $(NOTVLE) & OP=57 & vrD & RA_OR_ZERO & BITS_0_1=3 & DSs { vrD = vsx300_4(DSs:2,RA_OR_ZERO); } # The endian behavior of the storage has not been modelled :lxv XT3,DQs(RA_OR_ZERO) is $(NOTVLE) & OP=61 & XT3 & RA_OR_ZERO & BITS_0_2=1 & DQs { ea:$(REGISTER_SIZE) = RA_OR_ZERO + (DQs << 4); XT3 = *:16 ea; } :lxvx XT,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XOP_1_5=12 & BIT_6=0 & XOP_7_10=4 & RA_OR_ZERO & B & XT { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; XT = *:16 ea; } :lxvb16x XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=876 { XT = vsx300_7(A,B); } :lxvh8x XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=812 { XT = vsx300_8(A,B); } :lxvl XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=269 { XT = vsx300_9(A,B); } :lxvll XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=301 { XT = vsx300_10(A,B); } :lxvwsx XT,A,B is $(NOTVLE) & OP=31 & XT & A & B & XOP_1_10=364 { XT = vsx300_11(A,B); } :mfvsrld A,XSF is $(NOTVLE) & OP=31 & XOP_1_10=307 & BITS_11_15=0 & XSF & A { A = vsx300_12(XSF); } :mtvsrdd XTF,A,B is $(NOTVLE) & OP=31 & XTF & A & B & XOP_1_10=435 { XTF = vsx300_13(A,B); } :mtvsrws XTF,A is $(NOTVLE) & OP=31 & XOP_1_10=403 & BITS_11_15=0 & XTF & A { XTF = vsx300_14(A); } :stxsd vrS,DSs(RA_OR_ZERO) is $(NOTVLE) & OP=61 & vrS & RA_OR_ZERO & BITS_0_1=2 & DSs { vsx300_15(vrS,DSs:2,RA_OR_ZERO); } :stxsibx XS,A,B is $(NOTVLE) & OP=31 & XS & A & B & XOP_1_10=909 { vsx300_16(XS,A,B); } :stxsihx XS,A,B is $(NOTVLE) & OP=31 & XS & A & B & XOP_1_10=941 { vsx300_17(XS,A,B); } :stxssp vrS,DSs(RA_OR_ZERO) is $(NOTVLE) & OP=61 & vrS & RA_OR_ZERO & BITS_0_1=3 & DSs { vsx300_18(vrS,DSs:2,RA_OR_ZERO); } # The endian behavior of the storage has not been modelled :stxv XS3,DQs(RA_OR_ZERO) is $(NOTVLE) & OP=61 & XS3 & RA_OR_ZERO & BITS_0_2=5 & DQs { ea:$(REGISTER_SIZE) = RA_OR_ZERO + (DQs << 4); *:16 ea = XS3; } :stxvb16x XS,A,B is $(NOTVLE) & OP=31 & XS & A & B & XOP_1_10=1004 { vsx300_20(XS,A,B); } :stxvh8x XS,A,B is $(NOTVLE) & OP=31 & XS & A & B & XOP_1_10=940 { vsx300_21(XS,A,B); } :stxvl XS,A,B is $(NOTVLE) & OP=31 & XS & A & B & XOP_1_10=397 { vsx300_22(XS,A,B); } :stxvll XS,A,B is $(NOTVLE) & OP=31 & XS & A & B & XOP_1_10=429 { vsx300_23(XS,A,B); } :stxvx XS,RA_OR_ZERO,B is $(NOTVLE) & OP=31 & XS & RA_OR_ZERO & B & XOP_1_10=396 { ea:$(REGISTER_SIZE) = RA_OR_ZERO + B; *:16 ea = XS; } :xsabsqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=0 & BIT_0=0 & XOP_1_10=804 & vrD & vrB { vrD = vsx300_25(vrB); } :xsaddqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=4 & R0=0 & vrD & vrA & vrB { vrD = vsx300_26(vrA,vrB); } :xsaddqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=4 & R0=1 & vrD & vrA & vrB { vrD = vsx300_27(vrA,vrB); } :xscmpeqdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=3 & XA & XB & XT { XT = vsx300_28(XA,XB); } :xscmpexpdp BF2,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=59 & BITS_21_22=0 & BIT_0=0 & XA & XB & BF2 { vsx300_29(BF2:1,XA,XB); } :xscmpexpqp BF2,vrA,vrB is $(NOTVLE) & OP=63 & BITS_21_22=0 & BIT_0=0 & XOP_1_10=164 & R0=0 & BF2 & vrA & vrB { vsx300_30(BF2:1,vrA,vrB); } :xscmpgedp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=19 & XA & XB & XT { XT = vsx300_31(XA,XB); } :xscmpgtdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=11 & XA & XB & XT { XT = vsx300_32(XA,XB); } :xscmpnedp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=27 & XA & XB & XT { XT = vsx300_33(XA,XB); } :xscmpoqp BF2,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=132 & BITS_21_22=0 & BIT_0=0 & vrA & vrB & BF2 { vsx300_34(BF2:1,vrA,vrB); } :xscmpuqp BF2,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=644 & BITS_21_22=0 & BIT_0=0 & vrA & vrB & BF2 { vsx300_35(BF2:1,vrA,vrB); } :xscpsgnqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & BIT_0=0 & XOP_1_10=100 & vrD & vrA & vrB { vrD = vsx300_36(vrA,vrB); } :xscvdphp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=17 & XOP_2_10=347 & XB & XT { XT = vsx300_37(XB); } :xscvdpqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=22 & BIT_0=0 & XOP_1_10=836 & vrD & vrB { vrD = vsx300_38(vrB); } :xscvhpdp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=16 & XOP_2_10=347 & XB & XT { XT = vsx300_39(XB); } :xscvqpdp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=20 & XOP_1_10=836 & R0=0 & vrD & vrB { vrD = vsx300_40(vrB); } :xscvqpdpo vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=20 & XOP_1_10=836 & R0=1 & vrD & vrB { vrD = vsx300_41(vrB); } :xscvqpsdz vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=25 & XOP_1_10=836 & BIT_0=0 & vrD & vrB { vrD = vsx300_42(vrB); } :xscvqpswz vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=9 & XOP_1_10=836 & BIT_0=0 & vrD & vrB { vrD = vsx300_43(vrB); } :xscvqpudz vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=17 & XOP_1_10=836 & BIT_0=0 & vrD & vrB { vrD = vsx300_44(vrB); } :xscvqpuwz vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=1 & XOP_1_10=836 & BIT_0=0 & vrD & vrB { vrD = vsx300_45(vrB); } :xscvsdqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=10 & XOP_1_10=836 & BIT_0=0 & vrD & vrB { vrD = vsx300_46(vrB); } :xscvudqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=2 & XOP_1_10=836 & BIT_0=0 & vrD & vrB { vrD = vsx300_47(vrB); } :xsdivqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=548 & R0=0 & vrD & vrA & vrB { vrD = vsx300_47(vrA,vrB); } :xsdivqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=548 & R0=1 & vrD & vrA & vrB { vrD = vsx300_48(vrA,vrB); } :xsiexpdp XT,A,B is $(NOTVLE) & OP=60 & XT & A & B & XOP_1_10=918 { vsx300_49(A,B); } :xsiexpqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & BIT_0=0 & XOP_1_10=868 & vrD & vrA & vrB { vrD = vsx300_50(vrA,vrB); } :xsmaddqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=388 & R0=0 & vrD & vrA & vrB { vrD = vsx300_51(vrA,vrB); } :xsmaddqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=388 & R0=1 & vrD & vrA & vrB { vrD = vsx300_52(vrA,vrB); } :xsmaxcdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=128 & XA & XB & XT { XT = vsx300_53(XA,XB); } :xsmaxjdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=144 & XA & XB & XT { XT = vsx300_54(XA,XB); } :xsmincdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=136 & XA & XB & XT { XT = vsx300_55(XA,XB); } :xsminjdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=152 & XA & XB & XT { XT = vsx300_56(XA,XB); } :xsmsubqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=420 & R0=0 & vrD & vrA & vrB { vrD = vsx300_57(vrA,vrB); } :xsmsubqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=420 & R0=1 & vrD & vrA & vrB { vrD = vsx300_58(vrA,vrB); } :xsmulqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=36 & R0=0 & vrD & vrA & vrB { vrD = vsx300_59(vrA,vrB); } :xsmulqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=36 & R0=1 & vrD & vrA & vrB { vrD = vsx300_60(vrA,vrB); } :xsnabsqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=8 & XOP_1_10=804 & BIT_0=0 & vrD & vrB { vrD = vsx300_61(vrB); } :xsnegqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=16 & XOP_1_10=804 & BIT_0=0 & vrD & vrB { vrD = vsx300_62(vrB); } :xsnmaddqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=452 & R0=0 & vrD & vrA & vrB { vrD = vsx300_63(vrA,vrB); } :xsnmaddqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=452 & R0=1 & vrD & vrA & vrB { vrD = vsx300_64(vrA,vrB); } :xsnmsubqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=484 & R0=0 & vrD & vrA & vrB { vrD = vsx300_65(vrA,vrB); } :xsnmsubqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=484 & R0=1 & vrD & vrA & vrB { vrD = vsx300_66(vrA,vrB); } :xsrqpi R16,vrD,vrB,RMC is $(NOTVLE) & OP=63 & BITS_17_20=0 & XOP_1_8=5 & EX=0 & vrD & vrB & R16 & RMC { vrD = vsx300_67(vrB,RMC:1,R16:1); } :xsrqpix R16,vrD,vrB,RMC is $(NOTVLE) & OP=63 & BITS_17_20=0 & XOP_1_8=5 & EX=1 & vrD & vrB & R16 & RMC { vrD = vsx300_68(vrB,RMC:1,R16:1); } :xsrqpxp R16,vrD,vrB,RMC is $(NOTVLE) & OP=63 & BITS_17_20=0 & XOP_1_8=37 & BIT_0=0 & vrD & vrB & R16 & RMC { vrD = vsx300_69(vrB,RMC:1,R16:1); } :xssqrtqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=27 & XOP_1_10=804 & R0=0 & vrD & vrB { vrD = vsx300_70(vrB); } :xssqrtqpo vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=27 & XOP_1_10=804 & R0=1 & vrD & vrB { vrD = vsx300_71(vrB); } :xssubqp vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=516 & R0=0 & vrD & vrA & vrB { vrD = vsx300_72(vrA,vrB); } :xssubqpo vrD,vrA,vrB is $(NOTVLE) & OP=63 & XOP_1_10=516 & R0=1 & vrD & vrA & vrB { vrD = vsx300_73(vrA,vrB); } :xststdcdp BF2,XB,DCMX is $(NOTVLE) & OP=60 & BIT_0=0 & XOP_2_10=362 & XB & BF2 & DCMX { vsx300_74(XB,BF2:1,DCMX:1); } :xststdcqp BF2,vrB,DCMX is $(NOTVLE) & OP=63 & XOP_1_10=708 & BIT_0=0 & vrB & BF2 & DCMX { vsx300_75(vrB,BF2:1,DCMX:1); } :xststdcsp BF2,XB,DCMX is $(NOTVLE) & OP=60 & BIT_0=0 & XOP_2_10=298 & XB & BF2 & DCMX { vsx300_76(XB,BF2:1,DCMX:1); } :xsxexpdp D,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & BIT_0=0 & XOP_2_10=347 & XB & D { D = vsx300_77(XB); } :xsxexpqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=2 & XOP_1_10=804 & BIT_0=0 & vrD & vrB { vrD = vsx300_78(vrB); } :xsxsigdp D,XB is $(NOTVLE) & OP=60 & BITS_16_20=1 & BIT_0=0 & XOP_2_10=347 & XB & D { D = vsx300_79(XB); } :xsxsigqp vrD,vrB is $(NOTVLE) & OP=63 & BITS_16_20=18 & XOP_1_10=804 & BIT_0=0 & vrD & vrB { vrD = vsx300_80(vrB); } :xvcmpnedp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=123 & Rc2=0 & XA & XB & XT { XT = vsx300_81(XA,XB); } :xvcmpnedp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=123 & Rc2=1 & XA & XB & XT { XT = vsx300_82(XA,XB); } :xvcmpnesp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=91 & Rc2=0 & XA & XB & XT { XT = vsx300_83(XA,XB); } :xvcmpnesp. XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_9=91 & Rc2=1 & XA & XB & XT { XT = vsx300_84(XA,XB); } :xvcvhpsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=24 & XOP_2_10=475 & XB & XT { XT = vsx300_85(XB); } :xvcvsphp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=25 & XOP_2_10=475 & XB & XT { XT = vsx300_86(XB); } :xviexpdp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=248 & XA & XB & XT { XT = vsx300_87(XA,XB); } :xviexpsp XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=216 & XA & XB & XT { XT = vsx300_88(XA,XB); } :xvtstdcdp XT,XB,DBUILD is $(NOTVLE) & OP=60 & XOP_3_5=5 & XOP_7_10=15 & XA & XB & XT & DBUILD { XT = vsx300_89(XB,DBUILD); } :xvtstdcsp XT,XB,DBUILD is $(NOTVLE) & OP=60 & XOP_3_5=5 & XOP_7_10=13 & XA & XB & XT & DBUILD { XT = vsx300_90(XB,DBUILD); } :xvxexpdp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=0 & XOP_2_10=475 & XB & XT { XT = vsx300_91(XB); } :xvxexpsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=8 & XOP_2_10=475 & XB & XT { XT = vsx300_92(XB); } :xvxsigdp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=1 & XOP_2_10=475 & XB & XT { XT = vsx300_93(XB); } :xvxsigsp XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=9 & XOP_2_10=475 & XB & XT { XT = vsx300_94(XB); } :xxbrd XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=23 & XOP_2_10=475 & XB & XT { XT = vsx300_95(XB); } :xxbrh XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=7 & XOP_2_10=475 & XB & XT { XT = vsx300_96(XB); } :xxbrq XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=31 & XOP_2_10=475 & XB & XT { XT = vsx300_97(XB); } :xxbrw XT,XB is $(NOTVLE) & OP=60 & BITS_16_20=15 & XOP_2_10=475 & XB & XT { XT = vsx300_98(XB); } :xxextractuw XT,XB,UIMB is $(NOTVLE) & OP=60 & BIT_20=0 & XOP_2_10=165 & XB & XT & UIMB { XT = vsx300_99(XB,UIMB:1); } :xxinsertw XT,XB,UIMB is $(NOTVLE) & OP=60 & BIT_20=0 & XOP_2_10=181 & XB & XT & UIMB { XT = vsx300_100(XB,UIMB:1); } :xxperm XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=26 & XA & XB & XT { XT = vsx300_101(XA,XB); } :xxpermr XT,XA,XB is $(NOTVLE) & OP=60 & XOP_3_10=58 & XA & XB & XT { XT = vsx300_102(XA,XB); } :xxspltib XT,UIMM8 is $(NOTVLE) & OP=60 & BITS_19_20=0 & XOP_1_10=360 & XT & UIMM8 { tmpa:16 = zext(UIMM8:1); tmpa = tmpa | (tmpa << 8); tmpa = tmpa | (tmpa << 16); tmpa = tmpa | (tmpa << 32); tmpa = tmpa | (tmpa << 64); XT = tmpa; } ================================================ FILE: pypcode/processors/PowerPC/data/manuals/PowerISA.idx ================================================ @OPF_PowerISA_v3.1.pdf [OpenPower Power ISA, Version 3.1, May 1, 2020] add., 103 add, 103 addc., 104 addc, 104 addco., 104 addco, 104 adde., 104 adde, 104 addeo., 104 addeo, 104 addex, 106 addg6s, 144 addi, 102 addic, 103 addic., 103 addis, 102 addme., 105 addme, 105 addmeo., 105 addmeo, 105 addo., 103 addo, 103 addpcis, 102 addze., 105 addze, 105 addzeo., 105 addzeo, 105 and., 126 and, 126 andc., 127 andc, 127 andi., 125 andis., 125 b, 67 ba, 67 bl, 67 bla, 67 bc, 67 bca, 67 bcl, 67 bcla, 67 bcctr, 68 bcctrl, 68 bcdadd., 504 bcdcfn., 506 bcdcfsq., 511 bcdcfz., 507 bcdcpsgn., 515 bcdctn., 509 bcdctsq., 512 bcdctz., 510 bcds., 517 bcdsetsgn., 516 bcdsr., 519 bcdsub., 504 bcdtrunc., 520 bcdus., 518 bcdutrunc., 521 bclr, 68 bclrl, 68 bctar, 69 bctarl, 69 bpermd, 131 brd, 145 brh, 145 brw, 145 cbcdtd, 143 cdtbcd, 143 cfuged, 132 clrbhrb, 1129 cmp, 119 cmpb, 128 cmpeqb, 121 cmpi, 119 cmpl, 119 cmpli, 119 cmprb, 120 cntlzd., 130 cntlzd, 130 cntlzdm, 131 cntlzw., 128 cntlzw, 128 cnttzd, 130 cnttzd., 130 cnttzdm, 131 cnttzw, 128 cnttzw., 128 copy, 1094 cp_abort, 1095 crand, 70 crandc, 71 creqv, 71 crnand, 70 crnor, 71 cror, 70 crorc, 71 crxor, 70 dadd., 232 dadd, 232 daddq., 232 daddq, 232 darn, 112 dcbf, 1090 dcbst, 1089 dcbt, 1087 dcbtst, 1088 dcbz, 1089 dcffix., 257 dcffix, 257 dcffixq., 257 dcffixq, 257 dcffixqq, 258 dcmpo, 238 dcmpoq, 238 dcmpu, 237 dcmpuq, 237 dctdp., 255 dctdp, 255 dctfix.,259 dctfix,259 dctfixq.,259 dctfixq,259 dctfixqq,259 dctqpq., 255 dctqpq, 255 ddedpd., 261 ddedpd, 261 ddedpdq., 261 ddedpdq, 261 ddiv., 235 ddiv, 235 ddivq., 235 ddivq, 235 denbcd., 261 denbcd, 261 denbcdq., 261 denbcdq, 261 diex., 262 diex, 262 diexq., 262 diexq, 262 divd., 115 divd, 115 divde., 116 divde, 116 divdeo., 116 divdeo, 116 divdeu., 116 divdeu, 116 divdeuo., 116 divdeuo, 116 divdo., 115 divdo, 115 divdu., 115 divdu, 115 divduo., 115 divduo, 115 divw., 108 divw, 108 divwe., 109 divwe, 109 divweo., 109 divweo, 109 divweu., 109 divweu, 109 divweuo., 109 divweuo, 109 divwo., 108 divwo, 108 divwu., 108 divwu, 108 divwuo., 108 divwuo, 108 dmul., 234 dmul, 234 dmulq., 234 dmulq, 234 dqua., 245 dqua, 245 dquai., 243 dquai, 243 dquaiq., 243 dquaiq, 243 dquaq., 245 dquaq, 245 drdpq., 256 drdpq, 256 drintn., 252 drintn, 252 drintnq., 252 drintnq, 252 drintx., 250 drintx, 250 drintxq., 250 drintxq, 250 drrnd., 247 drrnd, 247 drrndq., 247 drrndq, 247 drsp., 256 drsp, 256 dscli., 264 dscli, 264 dscliq., 264 dscliq, 264 dscri., 264 dscri, 264 dscriq., 264 dscriq, 264 dsub., 232 dsub, 232 dsubq., 232 dsubq, 232 dtstdc, 239 dtstdcq, 239 dtstdg, 239 dtstdgq, 239 dtstex, 240 dtstexq, 240 dtstsf, 241 dtstsfi, 242 dtstsfiq, 242 dtstsfq, 241 dxex., 262 dxex, 262 dxexq., 262 dxexq, 262 eieio, 1114 eqv., 127 eqv, 127 extsb., 128 extsb, 128 extsh., 128 extsh, 128 extsw., 130 extsw, 130 extswsli., 142 extswsli, 142 fabs., 187 fabs, 187 fadd., 189 fadd, 189 fadds., 189 fadds, 189 fcfid., 200 fcfid, 200 fcfids., 201 fcfids, 201 fcfidu., 201 fcfidu, 201 fcfidus., 202 fcfidus, 202 fcmpo, 205 fcmpu, 205 fcpsgn., 187 fcpsgn, 187 fctid., 196 fctid, 196 fctidu., 197 fctidu, 197 fctiduz., 198 fctiduz, 198 fctidz., 197 fctidz, 197 fctiw., 198 fctiw, 198 fctiwu., 199 fctiwu, 199 fctiwuz., 200 fctiwuz, 200 fctiwz., 199 fctiwz, 199 fdiv., 190 fdiv, 190 fdivs., 190 fdivs, 190 fmadd., 194 fmadd, 194 fmadds., 194 fmadds, 194 fmr., 187 fmr, 187 fmrgew, 188 fmrgow, 188 fmsub., 194 fmsub, 194 fmsubs., 194 fmsubs, 194 fmul., 190 fmul, 190 fmuls., 190 fmuls, 190 fnabs., 187 fnabs, 187 fneg., 187 fneg, 187 fnmadd., 195 fnmadd, 195 fnmadds., 195 fnmadds, 195 fnmsub., 195 fnmsub, 195 fnmsubs., 195 fnmsubs, 195 fre., 191 fre, 191 fres., 191 fres, 191 frim., 204 frim, 204 frin.,204 frin, 204 frip., 204 frip, 204 friz., 204 friz, 204 frsp., 196 frsp, 196 frsqrte., 192 frsqrte, 192 frsqrtes., 192 frsqrtes, 192 fsel., 206 fsel, 206 fsqrt., 191 fsqrt, 191 fsqrts., 191 fsqrts, 191 fsub., 189 fsub, 189 fsubs., 189 fsubs, 189 ftdiv, 192 ftsqrt, 193 hrfid, 1178 icbi, 1078 icbt, 1078 isel, 124 isync, 1102 lbarx, 1103 lbz, 78 lbzcix, 1190 lbzu, 78 lbzux, 78 lbzx, 78 ld, 83 ldarx, 1108 ldat, 1099 ldbrx, 95 ldcix, 1190 ldu, 83 ldux, 83 ldx, 83 lfd, 178 lfdp, 185 lfdpx, 185 lfdu, 178 lfdux, 178 lfdx, 178 lfiwax, 179 lfiwzx, 179 lfs, 176 lfsu, 176 lfsux, 177 lfsx, 176 lha, 80 lharx, 1104 lhau, 80 lhaux, 80 lhax, 80 lhbrx, 93 lhz, 79 lhzcix, 1190 lhzu, 79 lhzux, 79 lhzx, 79 lmw, 96 lq, 91 lqarx, 1110 lswi, 98 lswx, 98 lvebx, 294 lvehx, 295 lvewx, 296 lvsl, 303 lvsr, 303 lvx, 297 lvxl, 297 lwa, 82 lwarx, 1104 lwat, 1099 lwaux, 82 lwax, 82 lwbrx, 94 lwz, 81 lwzcix, 1190 lwzu, 81 lwzux, 81 lwzx, 81 lxsd, 636 lxsdx, 637 lxsibzx, 638 lxsihzx, 638 lxsiwax, 639 lxsiwzx, 640 lxssp, 641 lxsspx, 642 lxv, 643 lxvb16x, 644 lxvd2x, 645 lxvdsx, 659 lxvh8x, 660 lxvkq, 646 lxvl, 647 lxvll, 649 lxvp, 651 lxvpx, 652 lxvrbx, 653 lxvrdx, 654 lxvrhx, 655 lxvrwx, 656 lxvw4x, 661 lxvwsx, 662 lxvx, 657 maddhd, 114 maddhdu, 114 maddld, 114 mcrf, 72 mcrfs, 210 mcrxrx, 153 mfbhrbe, 1129 mfcr, 154 mffs., 208 mffs, 208 mffscdrn, 208 mffscdrni, 209 mffsce, 208 mffscrn, 209 mffscrni, 209 mffsl, 209 mfmsr, 1202 mfocrf, 154 mfspr, 152 mftb, 1120 mfvscr, 522 mfvsrd, 146 mfvsrld, 146 mfvsrwz, 147 modsd, 117 modsw, 111 modud, 117 moduw, 111 msgclr, 1355 msgclrp, 1357 msgclru, 1354 msgsnd, 1354 msgsndp, 1356 msgsndu, 1353 msgsync, 1357 mtcrf, 153 mtfsb0., 211 mtfsb0, 211 mtfsb1., 211 mtfsb1, 211 mtfsf., 210 mtfsf, 210 mtfsfi., 210 mtfsfi, 210 mtmsr, 1200 mtmsrd, 1201 mtocrf, 153 mtspr, 150 mtvscr, 522 mtvsrbm, 489 mtvsrbmi, 491 mtvsrd, 147 mtvsrdd, 149 mtvsrdm, 490 mtvsrhm, 489 mtvsrqm, 491 mtvsrwa, 148 mtvsrwm, 490 mtvsrws, 149 mtvsrwz, 148 mulhd., 113 mulhd, 113 mulhdu., 113 mulhdu, 113 mulhw., 107 mulhw, 107 mulhwu., 107 mulhwu, 107 mulld., 113 mulld, 113 mulldo., 113 mulldo, 113 mulli, 107 mullw., 107 mullw, 107 mullwo., 107 mullwo, 107 nand., 126 nand, 126 neg., 106 neg, 106 nego., 106 nego, 106 nor.,127 nor, 127 or., 127 or, 127 orc., 127 orc, 127 ori, 125 oris, 126 paddi, 102 paste., 1094 paste, 1094 pdepd, 132 pextd, 132 plbz, 78 pld, 83 plfd, 178 plfs, 176 plha, 80 plhz, 79 plq, 91 plwa, 82 plwz, 81 plxsd, 636 plxssp, 641 plxv, 643 plxvp, 651 pmxvbf16ger2, 853 pmxvbf16ger2nn, 853 pmxvbf16ger2np, 853 pmxvbf16ger2pn, 853 pmxvbf16ger2pp, 853 pmxvf16ger2 , 897 pmxvf16ger2nn, 897 pmxvf16ger2np, 897 pmxvf16ger2pn, 897 pmxvf16ger2pp, 897 pmxvf32ger, 901 pmxvf32gernn, 901 pmxvf32gernp, 901 pmxvf32gerpn, 901 pmxvf32gerpp, 901 pmxvf64ger, 905 pmxvf64gernn, 905 pmxvf64gernp, 905 pmxvf64gerpn, 905 pmxvf64gerpp, 905 pmxvi16ger2, 917 pmxvi16ger2pp, 917 pmxvi16ger2s, 919 pmxvi16ger2spp, 919 pmxvi4ger8, 909 pmxvi4ger8pp, 909 pmxvi8ger4, 912 pmxvi8ger4pp, 912 pmxvi8ger4spp, 915 pnop, 156 popcntb, 129 popcntd, 130 popcntw, 129 prtyd, 130 prtyw, 129 pstb, 85 pstd, 88 pstfd, 183 pstfs, 181 psth, 86 pstq, 92 pstw, 87 pstxsd, 664 pstxssp, 668 pstxv, 670 pstxvp, 680 rfebb, 1126 rfid, 1178 rfscv, 1177 rldcl., 137 rldcl, 137 rldcr., 138 rldcr, 138 rldic., 137 rldic, 137 rldicl., 136 rldicl, 136 rldicr., 136 rldicr, 136 rldimi., 138 rldimi, 138 rlwimi., 134 rlwimi, 134 rlwinm., 133 rlwinm, 133 rlwnm., 134 rlwnm, 134 sc, 73 scv, 73 setb, 155 setbc, 155 setbcr, 155 setnbc, 155 setnbcr, 155 slbfee., 1255 slbia, 1251 slbiag, 1252 slbie, 1247 slbieg, 1249 slbmfee, 1255 slbmfev, 1254 slbmte, 1253 slbsync, 1256 sld., 141 sld, 141 slw., 139 slw, 139 srad., 141 srad, 141 sradi., 141 sradi, 141 sraw., 140 sraw, 140 srawi., 140 srawi, 140 srd., 141 srd, 141 srw., 139 srw, 139 stb, 85 stbcix, 1191 stbcx., 1105 stbu, 85 stbux, 85 stbx, 85 std, 88 stdat, 1101 stdbrx, 95 stdcix, 1191 stdcx., 1108 stdu, 88 stdux, 89 stdx, 88 stfd, 183 stfdp, 186 stfdpx, 186 stfdu, 183 stfdux, 184 stfdx, 183 stfiwx, 184 stfs, 181 stfsu, 181 stfsux, 182 stfsx, 181 sth, 86 sthbrx, 93 sthcix, 1191 sthcx., 1106 sthu, 86 sthux, 86 sthx, 86 stmw, 96 stop, 1181 stq, 92 stqcx., 894 stswi, 99 stswx, 99 stvebx, 298 stvehx, 299 stvewx, 300 stvx, 301 stvxl, 301 stw, 87 stwat, 1101 stwbrx, 94 stwcix, 1191 stwcx., 1107 stwu, 87 stwux, 87 stwx, 87 stxsd, 664 stxsdx, 665 stxsibx, 666 stxsihx, 666 stxsiwx, 667 stxssp, 668 stxsspx, 669 stxv, 670 stxvb16x, 671 stxvd2x, 672 stxvh8x, 673 stxvl, 674 stxvll, 676 stxvp, 680 stxvpx, 681 stxvrbx, 677 stxvrdx, 677 stxvrhx, 678 stxvrwx, 678 stxvw4x, 679 stxvx, 682 subf., 103 subf, 103 subfc., 104 subfc, 104 subfco., 104 subfco, 104 subfe., 104 subfe, 104 subfeo., 104 subfeo, 104 subfic, 103 subfme, 105 subfme., 105 subfmeo, 105 subfmeo., 105 subfo., 103 subfo, 103 subfze., 105 subfze, 105 subfzeo., 105 subfzeo, 105 sync, 1112 td, 124 tdi, 124 tlbie, 1257 tlbiel, 1262 tlbsync, 1266 tw, 123 twi, 123 urfid, 1179 vabsdub, 404 vabsduh, 404 vabsduw, 405 vaddcuq, 356 vaddcuw, 349 vaddecuq, 356 vaddeuqm, 355 vaddfp, 448 vaddsbs, 349 vaddshs, 350 vaddsws, 350 vaddubm, 351 vaddubs, 353 vaddudm, 352 vadduhm, 351 vadduhs, 353 vadduqm, 355 vadduwm, 352 vadduws, 354 vand, 428 vandc, 428 vavgsb, 401 vavgsh, 402 vavgsw, 403 vavgub, 401 vavguh, 402 vavguw, 403 vbpermd, 487 vbpermq, 488 vcfsx, 452 vcfuged, 482 vcfux, 452 vcipher, 461 vcipherlast, 461 vclrlb, 502 vclrrb, 502 vclzb, 473 vclzd, 475 vclzdm, 475 vclzh, 473 vclzlsbb, 479 vclzw, 474 vcmpbfp., 455 vcmpbfp, 455 vcmpeqfp., 456 vcmpeqfp, 456 vcmpequb., 414 vcmpequb, 414 vcmpequd, 417 vcmpequd., 417 vcmpequh., 415 vcmpequh, 415 vcmpequq., 418 vcmpequq, 418 vcmpequw., 416 vcmpequw, 416 vcmpgefp., 456 vcmpgefp, 456 vcmpgtfp., 457 vcmpgtfp, 457 vcmpgtsb., 419 vcmpgtsb, 419 vcmpgtsd, 422 vcmpgtsd., 422 vcmpgtsh., 420 vcmpgtsh, 420 vcmpgtsq., 423 vcmpgtsq, 423 vcmpgtsw., 421 vcmpgtsw, 421 vcmpgtub., 419 vcmpgtub, 419 vcmpgtud, 422 vcmpgtud., 422 vcmpgtuh., 420 vcmpgtuh, 420 vcmpgtuq., 423 vcmpgtuq, 423 vcmpgtuw., 421 vcmpgtuw, 421 vcmpneb, 424 vcmpneb., 424 vcmpneh, 425 vcmpneh., 425 vcmpnew, 426 vcmpnew., 425 vcmpnezb, 424 vcmpnezb., 424 vcmpnezh, 425 vcmpnezh., 425 vcmpnezw, 426 vcmpnezw., 426 vcmpsq, 427 vcmpuq, 427 vcntmbb, 495 vcntmbd 496 vcntmbh, 495 vcntmbw, 496 vctsxs, 451 vctuxs, 451 vctzb, 476 vctzd, 478 vctzdm, 478 vctzh, 476 vctzlsbb, 479 vctzw, 477 vdivesd, 387 vdivesq, 389 vdivesw, 385 vdiveud, 387 vdiveuq, 389 vdiveuw, 385 vdivsd, 386 vdivsq, 388 vdivsw, 384 vdivud, 386 vdivuq, 388 vdivuw, 384 veqv, 429 vexpandbm, 492 vexpanddm, 493 vexpandhm, 492 vexpandqm, 494 vexpandwm, 493 vexptefp, 458 vextddvlx, 338 vextddvrx, 338 vextdubvlx, 335 vextdubvrx, 335 vextduhvlx, 336 vextduhvrx, 336 vextduwvlx, 337 vextduwvrx, 337 vextractbm, 497 vextractd, 331 vextractdm, 498 vextracthm, 497 vextractqm, 499 vextractub, 330 vextractuh, 330 vextractuw, 331 vextractwm, 498 vextsb2d, 399 vextsb2w, 398 vextsd2q, 400 vextsh2d, 399 vextsh2w, 398 vextsw2d, 400 vextublx, 332 vextubrx, 332 vextuhlx, 333 vextuhrx, 333 vextuwlx, 334 vextuwrx, 334 vgbbd, 471 vgnb, 472 vinsblx, 341 vinsbrx, 341 vinsbvlx, 346 vinsbvrx, 346 vinsd, 345 vinsdlx, 344 vinsdrx, 344 vinsertb, 339 vinsertd, 340 vinserth, 339 vinsertw, 340 vinshlx, 342 vinshrx, 342 vinshvlx, 347 vinshvrx, 347 vinsw, 345 vinswlx, 343 vinswrx, 343 vinswvlx, 348 vinswvrx, 348 vlogefp, 459 vmaddfp, 449 vmaxfp, 450 vmaxsb, 406 vmaxsd, 409 vmaxsh, 407 vmaxsw, 408 vmaxub, 406 vmaxud, 409 vmaxuh, 407 vmaxuw, 408 vmhaddshs, 377 vmhraddshs, 377 vminfp, 450 vminsb, 410 vminsd, 413 vminsh, 411 vminsw, 412 vminub, 410 vminud, 413 vminuh, 411 vminuw, 412 vmladduhm, 378 vmodsd, 391 vmodsq, 392 vmodsw, 390 vmodud, 391 vmoduq, 392 vmoduw, 390 vmrgew, 318 vmrghb, 315 vmrghh, 316 vmrghw, 317 vmrglb, 315 vmrglh, 316 vmrglw, 317 vmrgow, 318 vmsumcud, 383 vmsummbm, 379 vmsumshm, 379 vmsumshs, 380 vmsumubm, 378 vmsumudm, 382 vmsumuhm, 380 vmsumuhs, 381 vmul10cuq, 513 vmul10ecuq, 514 vmul10euq, 514 vmul10uq, 513 vmulesb, 365 vmulesd, 372 vmulesh, 367 vmulesw, 369 vmuleub, 366 vmuleud, 371 vmuleuh, 368 vmuleuw, 370 vmulhsd, 375 vmulhsw, 373 vmulhud, 375 vmulhuw, 374 vmulld, 376 vmulosb, 365 vmulosd, 372 vmulosh, 367 vmulosw, 369 vmuloub, 366 vmuloud, 371 vmulouh, 368 vmulouw, 370 vmuluwm, 373 vnand, 429 vncipher, 462 vncipherlast, 462 vnegd, 397 vnegw, 397 vnmsubfp, 449 vnor, 429 vor, 429 vorc, 429 vpdepd, 480 vperm, 322 vpermr, 322 vpermxor, 470 vpextd, 481 vpkpx, 304 vpksdss, 307 vpksdus, 307 vpkshss, 305 vpkshus, 305 vpkswss, 306 vpkswus, 306 vpkudum, 310 vpkudus, 310 vpkuhum, 308 vpkuhus, 308 vpkuwum, 309 vpkuwus, 309 vpmsumb, 466 vpmsumd, 469 vpmsumh, 467 vpmsumw, 468 vpopcntb, 483 vpopcntd, 484 vpopcnth, 483 vpopcntw, 484 vprtybd, 485 vprtybq, 486 vprtybw, 485 vrefp, 460 vrfim, 453 vrfin, 453 vrfip, 454 vrfiz, 454 vrlb, 430 vrld, 431 vrldmi, 437 vrldnm, 434 vrlh, 430 vrlq, 432 vrlqmi, 438 vrlqnm, 435 vrlw, 431 vrlwmi, 436 vrlwnm, 433 vrsqrtefp, 460 vsbox, 463 vsel, 323 vshasigmad, 464 vshasigmaw, 465 vsl, 326 vslb, 439 vsld, 440 vsldbi, 324 vsldoi, 324 vslh, 439 vslo, 327 vslq, 441 vslv, 328 vslw, 440 vspltb, 319 vsplth, 319 vspltisb, 321 vspltish, 321 vspltisw, 321 vspltw, 320 vsr, 326 vsrab, 445 vsrad, 446 vsrah, 445 vsraq, 447 vsraw, 446 vsrb, 442 vsrd, 443 vsrdbi, 325 vsrh, 443 vsro, 327 vsrq, 444 vsrv, 328 vsrw, 443 vstribl., 500 vstribl, 500 vstribr., 500 vstribr, 500 vstrihl., 501 vstrihl, 501 vstrihr., 501 vstrihr, 501 vsubcuq, 364 vsubcuw, 357 vsubecuq, 364 vsubeuqm, 363 vsubfp, 448 vsubsbs, 357 vsubshs, 358 vsubsws, 358 vsububm, 359 vsububs, 361 vsubudm, 360 vsubuhm, 359 vsubuhs, 361 vsubuqm, 363 vsubuwm, 360 vsubuws, 362 vsum2sws, 394 vsum4sbs, 395 vsum4shs, 395 vsum4ubs, 396 vsumsws, 393 vupkhpx, 314 vupkhsb, 311 vupkhsh, 312 vupkhsw, 313 vupklpx, 314 vupklsb, 311 vupklsh, 312 vupklsw, 313 vxor, 429 wait, 1116 xor., 126 xor, 126 xori, 126 xoris, 126 xsabsdp, 684 xsabsqp, 684 xsadddp, 685 xsaddqp, 692 xsaddqpo, 692 xsaddsp, 690 xscmpeqdp, 696 xscmpeqqp, 697 xscmpexpdp, 694 xscmpexpqp, 695 xscmpgedp, 698 xscmpgeqp, 699 xscmpgtdp, 700 xscmpgtqp, 701 xscmpodp, 702 xscmpoqp, 704 xscmpudp, 705 xscmpuqp, 707 xscpsgndp, 708 xscpsgnqp, 708 xscvdphp, 709 xscvdpqp, 710 xscvdpsp, 711 xscvdpspn, 712 xscvdpsxds, 713 xscvdpsxws, 715 xscvdpuxds, 717 xscvdpuxws, 719 xscvhpdp, 721 xscvqpdp, 722 xscvqpdpo, 722 xscvqpsdz, 723 xscvqpsqz, 725 xscvqpswz, 727 xscvqpudz, 729 xscvqpuqz, 731 xscvqpuwz, 733 xscvsdqp, 740 xscvspdp, 735 xscvspdpn, 736 xscvsqqp, 737 xscvsxddp, 738 xscvsxdsp, 739 xscvudqp, 740 xscvuqqp, 741 xscvuxddp, 741 xscvuxdsp, 742 xsdivdp, 743 xsdivqp, 745 xsdivqpo, 745 xsdivsp, 747 xsiexpdp, 749 xsiexpqp, 750 xsmaddadp, 751 xsmaddasp, 754 xsmaddmdp, 751 xsmaddmsp, 754 xsmaddqp, 757 xsmaddqpo, 757 xsmaxcdp, 762 xsmaxcqp, 764 xsmaxdp, 760 xsmaxjdp, 765 xsmincdp, 769 xsmincqp, 771 xsmindp, 767 xsminjdp, 772 xsmsubadp, 774 xsmsubasp, 777 xsmsubmdp, 774 xsmsubmsp, 777 xsmsubqp, 780 xsmsubqpo, 780 xsmuldp, 783 xsmulqp, 785 xsmulqpo, 785 xsmulsp, 878 xsnabsdp, 789 xsnabsqp, 789 xsnegdp, 790 xsnegqp, 790 xsnmaddadp, 791 xsnmaddasp, 796 xsnmaddmdp, 791 xsnmaddmsp, 796 xsnmaddqp, 799 xsnmaddqpo, 799 xsnmsubadp, 802 xsnmsubasp, 805 xsnmsubmdp, 802 xsnmsubmsp, 805 xsnmsubqp, 808 xsnmsubqpo, 808 xsrdpi, 811 xsrdpic, 812 xsrdpim, 813 xsrdpip, 814 xsrdpiz, 815 xsredp, 816 xsresp, 817 xsrqpi, 819 xsrqpix, 819 xsrqpxp, 821 xsrsp, 823 xsrsqrtedp, 824 xsrsqrtesp, 825 xssqrtdp, 827 xssqrtqp, 829 xssqrtqpo, 829 xssqrtsp, 831 xssubdp, 833 xssubqp, 835 xssubqpo, 835 xssubsp, 837 xstdivdp, 839 xstsqrtdp, 840 xststdcdp, 841 xststdcqp, 842 xststdcsp, 843 xsxexpdp, 844 xsxexpqp, 844 xsxsigdp, 845 xsxsigqp, 845 xvabsdp, 846 xvabssp, 846 xvaddd, 847 xvaddsp, 851 xvbf16ger2, 853 xvbf16ger2nn, 853 xvbf16ger2np, 853 xvbf16ger2pn, 853 xvbf16ger2pp, 853 xvcmpeqdp, 858 xvcmpeqdp., 858 xvcmpeqsp, 859 xvcmpeqsp., 859 xvcmpgedp, 860 xvcmpgedp., 860 xvcmpgesp, 861 xvcmpgesp., 861 xvcmpgtdp, 862 xvcmpgtdp., 862 xvcmpgtsp, 863 xvcmpgtsp., 863 xvcpsgndp, 864 xvcpsgnsp, 864 xvcvbf16sp, 865 xvcvdpsp, 866 xvcvdpsxds, 867 xvcvdpsxws, 869 xvcvdpuxds, 871 xvcvdpuxws, 873 xvcvhpsp, 875 xvcvspbf16, 876 xvcvspdp, 877 xvcvsphp, 878 xvcvspsxds, 879 xvcvspsxws, 881 xvcvspuxds, 883 xvcvspuxws, 885 xvcvsxddp, 887 xvcvsxdsp, 888 xvcvsxwdp, 889 xvcvsxwsp, 889 xvcvuxddp, 890 xvcvuxdsp, 891 xvcvuxwdp, 892 xvcvuxwsp, 892 xvdivdp, 893 xvdivsp, 895 xvf16ger2, 897 xvf16ger2nn, 897 xvf16ger2np, 897 xvf16ger2pn, 897 xvf16ger2pp, 897 xvf32ger, 901 xvf32gernn, 901 xvf32gernp, 901 xvf32gerpn, 901 xvf32gerpp, 901 xvf64ger, 905 xvf64gernn, 905 xvf64gernp, 905 xvf64gerpn, 905 xvf64gerpp, 905 xvi16ger2, 917 xvi16ger2pp, 917 xvi16ger2s, 919 xvi16ger2spp, 919 xvi4ger8, 909 xvi4ger8pp, 909 xvi8ger4, 912 xvi8ger4pp, 912 xvi8ger4spp, 915 xviexpdp, 922 xviexpsp, 922 xvmaddadp, 923 xvmaddasp, 926 xvmaddmdp, 923 xvmaddmsp, 926 xvmaxdp, 929 xvmaxsp, 931 xvmindp, 933 xvminsp, 935 xvmsubadp, 937 xvmsubasp, 940 xvmsubmdp, 937 xvmsubmsp, 940 xvmuldp, 943 xvmulsp, 945 xvnabsdp, 947 xvnabssp, 947 xvnegdp, 948 xvnegsp, 948 xvnmaddadp, 949 xvnmaddasp, 953 xvnmaddmdp, 949 xvnmaddmsp, 953 xvnmsubadp, 956 xvnmsubasp, 959 xvnmsubmdp, 956 xvnmsubmsp, 959 xvrdpi, 962 xvrdpic, 963 xvrdpim, 964 xvrdpip, 965 xvrdpiz, 965 xvredp, 966 xvresp, 967 xvrspi, 968 xvrspic, 969 xvrspim, 970 xvrspip, 971 xvrspiz, 971 xvrsqrtedp, 972 xvrsqrtesp, 973 xvsqrtdp, 974 xvsqrtsp, 975 xvsubdp, 976 xvsubsp, 978 xvtdivdp, 980 xvtdivsp, 981 xvtlsbb, 985 xvtsqrtdp, 982 xvtsqrtsp, 982 xvtstdcdp, 983 xvtstdcsp, 984 xvxexpdp, 986 xvxexpsp, 986 xvxsigdp, 987 xvxsigsp, 987 xxblendvb, 988 xxblendvd, 989 xxblendvh, 988 xxblendvw, 989 xxbrd, 990 xxbrh, 991 xxbrq, 992 xxbrw, 993 xxeval, 993 xxextractuw, 995 xxgenpcvbm, 996 xxgenpcvdm, 1002 xxgenpcvhm, 998 xxgenpcvwm, 1000 xxinsertw, 995 xxland, 1004 xxlandc, 1004 xxleqv, 1005 xxlnand, 1005 xxlnor, 1006 xxlor, 1007 xxlorc, 1006 xxlxor, 1007 xxmfacc, 1009 xxmrghw, 1008 xxmrglw, 1008 xxmtacc, 1009 xxperm, 1011 xxpermdi, 1012 xxpermr, 1011 xxpermx, 1013 xxsel, 1014 xxsetaccz, 1015 xxsldwi, 1016 xxsplti32dx, 1018 xxspltib, 1017 xxspltidp, 1017 xxspltiw, 1018 xxspltw, 1019 @PowerISA_V3.0.pdf [Power ISA Version 3.0 Novomber 30, 2015] ldmx,72 xscmpnedp, 546 xvcmpnedp, 691 xvcmpnedp., 691 xvcmpnesp, 692 xvcmpnesp., 692 @PowerISA_V2.07B.pdf [Power ISA Version 2.07 B April 9, 2015] brinc, 625 dcba, 801 dcbfep, 1095 dcbi, 1149 dcblc, 1154 dcbstep, 1094 dcbtep, 1094 dcbtls, 1153 dcbtstep, 1097 dcbtstls, 1153 dcbzep, 1098 dci, 1270 dcread, 1273 dlmzb., 704 dlmzb, 704 dnh, 1259 doze, 898 dsn, 855 eciwx, 857 ecowx, 857 efdabs, 691 efdadd, 692 efdcfs, 697 efdcfsf, 695 efdcfsi, 694 efdcfsid, 695 efdcfuf, 695 efdcfui, 694 efdcfuid, 695 efdcmpeq, 693 efdcmpgt, 693 efdcmplt, 693 efdctsf, 697 efdctsi, 695 efdctsidz, 696 efdctsiz, 697 efdctuf, 697 efdctui, 695 efdctuidz, 696 efdctuiz, 697 efddiv, 692 efdmul, 692 efdnabs, 691 efdneg, 691 efdsub, 692 efdtsteq, 694 efdtstgt, 693 efdtstlt, 694 efsabs, 684 efsadd, 685 efscfd, 698 efscfsf, 689 efscfsi, 689 efscfuf, 689 efscfui, 689 efscmpeq, 687 efscmpgt, 686 efscmplt, 686 efsctsf, 690 efsctsi, 689 efsctsiz, 690 efsctuf, 690 efsctui, 689 efsctuiz, 690 efsdiv, 685 efsmul, 685 efsnabs, 684 efsneg, 684 efssub, 685 efststeq, 688 efststgt, 687 efststlt, 688 ehpriv, 1074 evabs, 625 evaddiw, 625 evaddsmiaaw, 625 evaddssiaaw, 626 evaddumiaaw, 626 evaddusiaaw, 626 evaddw, 626 evand, 627 evandc, 627 evcmpeq, 627 evcmpgts, 627 evcmpgtu, 628 evcmplts, 628 evcmpltu, 628 evcntlsw, 629 evcntlzw, 629 evdivws, 629 evdivwu, 630 eveqv, 630 evextsb, 630 evextsh, 630 evfsabs, 676 evfsadd, 677 evfscfsf, 681 evfscfsi, 681 evfscfuf, 681 evfscfui, 681 evfscmpeq, 679 evfscmpgt, 678 evfscmplt, 678 evfsctsf, 683 evfsctsi, 682 evfsctsiz, 682 evfsctuf, 683 evfsctui, 682 evfsctuiz, 682 evfsdiv, 677 evfsmul, 677 evfsnabs, 676 evfsneg, 676 evfssub, 677 evfststeq, 680 evfststgt, 679 evfststlt, 680 evldd, 631 evlddepx, 1100 evlddx, 631 evldh, 631 evldhx, 631 evldw, 632 evldwx, 632 evlhhesplat, 632 evlhhesplatx, 632 evlhhossplat, 633 evlhhossplatx, 633 evlhhousplat, 633 evlhhousplatx, 633 evlwhe, 634 evlwhex, 634 evlwhos, 634 evlwhosx, 634 evlwhou, 635 evlwhoux, 635 evlwhsplat, 635 evlwhsplatx, 635 evlwwsplat, 636 evlwwsplatx, 636 evmergehi, 636 evmergehilo, 637 evmergelo, 636 evmergelohi, 637 evmhegsmfaa, 637 evmhegsmfan, 637 evmhegsmiaa, 638 evmhegsmian, 638 evmhegumiaa, 638 evmhegumian, 638 evmhesmf, 639 evmhesmfa, 639 evmhesmfaaw, 639 evmhesmfanw, 639 evmhesmi, 640 evmhesmia, 640 evmhesmiaaw, 640 evmhesmianw, 640 evmhessf, 641 evmhessfa, 641 evmhessfaaw, 642 evmhessfanw, 642 evmhessiaaw, 643 evmhessianw, 643 evmheumi, 644 evmheumia, 644 evmheumiaaw, 644 evmheumianw, 644 evmheusiaaw, 645 evmheusianw, 645 evmhogsmfaa, 646 evmhogsmfan, 646 evmhogsmiaa, 646 evmhogsmian, 646 evmhogumiaa, 647 evmhogumian, 647 evmhosmf, 647 evmhosmfa, 647 evmhosmfaaw, 648 evmhosmfanw, 648 evmhosmi, 648 evmhosmia, 648 evmhosmiaaw, 649 evmhosmianw, 649 evmhossf, 650 evmhossfa, 650 evmhossfaaw, 651 evmhossfanw, 651 evmhossiaaw, 652 evmhossianw, 652 evmhoumi, 652 evmhoumia, 652 evmhoumiaaw, 653 evmhoumianw, 653 evmhousiaaw, 653 evmhousianw, 653 evmra, 654 evmwhsmf, 654 evmwhsmfa, 654 evmwhsmi, 654 evmwhsmia, 654 evmwhssf, 655 evmwhssfa, 655 evmwhumi, 655 evmwhumia, 655 evmwlsmiaaw, 656 evmwlsmianw, 656 evmwlssiaaw, 656 evmwlssianw, 656 evmwlumi, 657 evmwlumia, 657 evmwlumiaaw, 657 evmwlumianw, 657 evmwlusiaaw, 658 evmwlusianw, 658 evmwsmf, 658 evmwsmfa, 658 evmwsmfaa, 659 evmwsmfan, 659 evmwsmi, 659 evmwsmia, 659 evmwsmiaa, 659 evmwsmian, 659 evmwssf, 660 evmwssfa, 660 evmwssfaa, 661 evmwssfan, 661 evmwumi, 661 evmwumia, 661 evmwumiaa, 662 evmwumian, 662 evnand, 662 evneg, 662 evnor, 663 evor, 663 evorc, 663 evrlw, 663 evrlwi, 664 evrndw, 664 evsel, 664 evslw, 665 evslwi, 665 evsplatfi, 665 evsplati, 665 evsrwis, 665 evsrwiu, 665 evsrws, 666 evsrwu, 666 evstdd, 666 evstddepx, 1100 evstddx, 666 evstdh, 667 evstdhx, 667 evstdw, 667 evstdwx, 667 evstwhe, 668 evstwhex, 668 evstwho, 668 evstwhox, 668 evstwwe, 668 evstwwex, 668 evstwwo, 669 evstwwox, 669 evsubfsmiaaw, 669 evsubfssiaaw, 669 evsubfumiaaw, 670 evsubfusiaaw, 670 evsubfw, 670 evsubifw, 670 evxor, 670 icbiep, 1098 icblc, 1154 icbtls, 1154 ici, 1270 icread, 1274 lbdx, 853 lbepx, 1090 lddx, 853 ldepx, 1091 lfddx, 853 lfdepx, 1099 lhdx, 853 lhepx, 1090 lvepx, 1101 lvepxl, 1101 lwdx, 853 lwepx, 1091 macchwo., 706 macchwo, 706 macchw., 706 macchw, 706 macchwso., 706 macchwso, 706 macchws., 706 macchws, 706 macchwsuo., 707 macchwsuo, 707 macchwsu., 707 macchwsu, 707 macchwuo., 707 macchwuo, 707 macchwu., 707 macchwu, 707 machhwo., 708 machhwo, 708 machhw., 708 machhw, 708 machhwso., 708 machhwso, 708 machhws., 708 machhws, 708 machhwsuo., 709 machhwsuo, 709 machhwsu., 709 machhwsu, 709 machhwuo., 709 machhwuo, 709 machhwu., 709 machhwu, 709 maclhwo., 710 maclhwo, 710 maclhw., 710 maclhw, 710 maclhwso., 710 maclhwso, 710 maclhws., 710 maclhws, 710 maclhwsuo., 711 maclhwsuo, 711 maclhwsu., 711 maclhwsu, 711 maclhwuo., 711 maclhwuo, 711 maclhwu., 711 maclhwu, 711 mbar, 821 mcrxr, 143 mfpmr, 1288 mfsr, 958 mfsrin, 958 mtdcr, 1085 mtdcrux, 143 mtdcrx, 1085 mtpmr, 1288 mtsr, 957 mtsrin, 957 mulchw., 711 mulchw, 711 mulchwu., 711 mulchwu, 711 mulhhw., 712 mulhhw, 712 mulhhwu., 712 mulhhwu, 712 mullhw., 712 mullhw, 712 mullhwu., 712 mullhwu, 712 nap, 898 nmacchwo., 713 nmacchwo, 713 nmacchw., 713 nmacchw, 713 nmacchwso., 713 nmacchwso, 713 nmacchws., 713 nmacchws, 713 nmachhwo., 714 nmachhwo, 714 nmachhw., 714 nmachhw, 714 nmachhwso., 714 nmachhwso, 714 nmachhws., 714 nmachhws, 714 nmaclhwo., 715 nmaclhwo, 715 nmaclhw., 715 nmaclhw, 715 nmaclhwso., 715 nmaclhwso, 715 nmaclhws., 715 nmaclhws, 715 rfci, 1072 rfdi, 1073 rfgi, 1074 rfi, 1072 rfmci, 1073 rvwinkle, 899 sleep, 899 stbdx, 854 stbepx, 1092 stddx, 854 stdepx, 1093 stfddx, 854 stfdepx, 1099 sthdx, 854 sthepx, 1092 stvepx, 1102 stvepxl, 1102 stwdx, 854 stwepx, 1093 subfic, 100 subfme., 101 subfme, 101 subfmeo., 101 subfmeo, 101 tabort., 839 tabortdc., 841 tabortdci., 841 tabortwc., 840 tabortwci., 840 tbegin., 837 tcheck, 842 tend., 838 tlbia, 963 tlbilx, 1165 tlbivax, 1163 tlbre, 1170 tlbsrx., 1138 tlbsx, 1136 tlbwe, 1141 trechkpt., 911 treclaim., 910 tsr., 841 wrtee, 1056 wrteei, 1057 e_b, 1307 e_bl, 1307 se_b, 1307 se_bl, 1307 e_bc, 1307 e_bcl, 1307 se_bc, 1307 se_bctr, 1308 se_bctrl, 1308 se_blr, 1308 se_blrl, 1308 se_sc, 1309 e_sc, 1309 se_illegal, 1310 se_rfmci, 1310 se_rfci, 1311 se_rfi, 1311 se_rfdi, 1312 se_rfgi, 1312 e_crand, 1313 e_crandc, 1313 e_creqv, 1313 e_crnand, 1313 e_crnor, 1314 e_cror, 1314 e_crorc, 1314 e_crxor, 1314 e_mcrf, 1314 e_lbz, 1317 se_lbz, 1317 e_lbzu, 1317 e_lha, 1317 e_lhz, 1317 se_lhz, 1317 e_lhau, 1318 e_lhzu, 1318 e_lwz, 1318 se_lwz, 1318 e_lwzu, 1319 e_stb, 1320 se_stb, 1320 e_stbu, 1321 e_sth, 1321 se_sth, 1321 e_sthu, 1321 e_stw, 1322 se_stw, 1322 e_stwu, 1322 e_lmw, 1323 e_stmw, 1323 se_add, 1325 e_add16i, 1325 e_add2i., 1325 e_add2is, 1325 e_addi, 1325 e_addi., 1325 se_addi, 1325 e_addic, 1326 e_addic., 1326 se_sub, 1326 se_subf, 1326 e_subfic, 1326 e_subfic., 1326 se_subi, 1326 se_subi., 1326 e_mulli, 1327 e_mull2i., 1327 se_mullw, 1327 se_neg, 1327 se_btsti, 1328 e_cmp16i., 1328 e_cmpi, 1329 se_cmp, 1329 se_cmpi, 1329 e_cmpl16i., 1329 e_cmpli, 1330 se_cmpl, 1330 se_cmpli, 1330 e_cmph, 1330 se_cmph, 1331 e_cmph16i., 1331 e_cmphl, 1331 se_cmphl, 1331 e_cmphl16i., 1332 e_and2i., 1333 e_and2is., 1333 e_andi, 1333 e_andi., 1333 se_andi, 1333 e_or2i, 1334 e_or2is, 1334 e_ori, 1334 e_ori., 1334 e_xori, 1334 e_xori., 1334 se_and, 1334 se_and., 1334 se_andc, 1334 se_or, 1335 se_not, 1335 se_bclri, 1335 se_bgeni, 1335 se_bmaski, 1335 se_bseti, 1335 se_extsb, 1336 se_extsh, 1336 se_extzb, 1336 se_extzh, 1336 e_li, 1336 se_li, 1336 e_lis, 1336 se_mfar, 1337 se_mr, 1337 se_mtar, 1337 e_rlw, 1338 e_rlw., 1338 e_rlwi, 1338 e_rlwi., 1338 e_rlwimi, 1338 e_rlwinm, 1338 e_slwi, 1339 e_slwi., 1339 se_slwi, 1339 se_slw, 1339 se_srawi, 1339 se_sraw, 1340 e_srwi, 1340 e_srwi., 1340 se_srwi, 1340 se_srw, 1340 se_mfctr, 1341 se_mtctr, 1341 se_mflr, 1341 se_mtlr, 1341 se_isync, 1342 @PowerISA_V2.06_PUBLIC.pdf [Power ISA Version 2.06 January 30, 2009] lxsdux, 366 lxvd2ux, 366 lxvw4ux, 367 stxsdux, 368 stxvd2ux, 368 stxvw4ux, 369 ================================================ FILE: pypcode/processors/PowerPC/data/manuals/PowerPC.idx ================================================ @powerpc.pdf[PowerPC� Microprocessor Family: The Programming Environments Manual for 32 and 64-bit Microprocessors, Version 2.3, March 31, 2005] add , 353 add. , 353 addo , 353 addo. , 353 addc , 355 addc. , 355 addco , 355 addco. , 355 adde , 356 adde. , 356 addeo , 356 addeo. , 356 addi , 357 addic , 358 addic. , 358 addis , 360 addme , 361 addme. , 361 addmeo , 361 addmeo. , 361 addze , 362 addze. , 362 addzeo , 362 addzeo. , 362 and , 363 and. , 363 andc , 364 andc. , 364 andi. , 365 andis. , 366 b , 367 ba , 367 bl , 367 ble, 367 bla , 367 bc , 368 bca , 368 bcl , 368 bcla , 368 bcctr , 369 bcctrl , 369 bclr , 370 bclrl , 370 bdnzlr , 370 bltctr , 369 bltlr , 370 bnectr , 369 bnelr , 370 clrldi , 534 clrlsldi , 533 clrlslwi , 538 clrlwi , 538 clrrdi , 535 clrrwi , 538 cmp , 371 cmpd , 371 cmpdi , 372 cmpi , 372 cmpl , 373 cmpld , 373 cmpldi , 374 cmpli , 374 cmplw , 373 cmplwi , 374 cmpw , 371 cmpwi , 372 cntlzd , 375 cntlzd. , 375 cntlzw , 376 cntlzw. , 376 crand , 377 crandc , 378 crclr , 384 creqv , 379 crmove , 382 crnand , 380 crnor , 381 crnot , 381 cror , 382 crorc , 383 crset , 379 crxor , 384 dcba , 721 dcbf , 385 dcbi , 386 dcbst , 387 dcbt , 388 dcbtst , 390 dcbz , 391 divd , 393 divd. , 393 divdo , 393 divdo. , 393 divdu , 394 divdu. , 394 divduo , 394 divduo. , 394 divw , 395 divw. , 395 divwo , 395 divwo. , 395 divwu , 396 divwu. , 396 divwuo , 396 divwuo. , 396 eciwx , 397 ecowx , 398 eieio , 399 eqv , 401 eqv. , 401 extldi , 535 extlwi , 538 extrdi , 534 extrwi , 538 extsb , 402 extsb. , 402 extsh , 403 extsh. , 403 extsw , 404 extsw. , 404 fabs , 405 fabs. , 405 fadd , 406 fadd. , 406 fadds , 407 fadds. , 407 fcfid , 408 fcfid. , 408 fcmpo , 409 fcmpu , 410 fctid , 411 fctid. , 411 fctidz , 412 fctidz. , 412 fctiw , 413 fctiw. , 413 fctiwz , 414 fctiwz. , 414 fdiv , 415 fdiv. , 415 fdivs , 416 fdivs. , 416 fmadd , 417 fmadd. , 417 fmadds , 418 fmadds. , 418 fmr , 419 fmr. , 419 fmsub , 420 fmsub. , 420 fmsubs , 421 fmsubs. , 421 fmul , 422 fmul. , 422 fmuls , 423 fmuls. , 423 fnabs , 424 fnabs. , 424 fneg , 425 fneg. , 425 fnmadd , 426 fnmadd. , 426 fnmadds , 427 fnmadds., 427 fnmsub , 428 fnmsub. , 428 fnmsubs , 429 fnmsubs., 429 fres , 430 fres. , 430 frsp , 432 frsp. , 432 frsqrte , 433 frsqrte., 433 fsel , 435 fsel. , 435 fsqrt , 436 fsqrt. , 436 fsqrts , 437 fsqrts. , 437 fsub , 438 fsub. , 438 fsubs , 439 fsubs. , 439 icbi , 440 inslwi , 537 insrdi , 536 insrwi , 537 isync , 441 la , 357 lbz , 442 lbzu , 443 lbzux , 444 lbzx , 445 ld , 446 ldarx , 447 ldu , 448 ldux , 449 ldx , 450 lfd , 451 lfdu , 452 lfdux , 453 lfdx , 454 lfs , 455 lfsu , 456 lfsux , 457 lfsx , 458 lha , 459 lhau , 460 lhaux , 461 lhax , 462 lhbrx , 463 lhz , 464 lhzu , 465 lhzux , 466 lhzx , 467 li , 357 lis, 360 lmw , 468 lswi , 469 lswx , 471 lwa , 763 lwarx , 474 lwaux , 475 lwax , 476 lwbrx , 477 lwz , 478 lwzu , 479 lwzux , 480 lwzx , 481 mcrf , 482 mcrfs , 483 mcrxr , 484 mfcr , 485 mfctr , 489 mffs , 487 mffs. , 487 mflr , 489 mfmsr , 488 mfocrf , 486 mfspr , 489 mfsr , 492 mfsrin , 494 mftb , 496 mftbu , 497 mfxer , 489 mr , 525 mtcr , 498 mtcrf , 498 mtctr , 507 mtfsb0 , 499 mtfsb0. , 499 mtfsb1 , 500 mtfsb1. , 500 mtfsf , 501 mtfsf. , 501 mtfsfi , 502 mtfsfi. , 502 mtlr , 507 mtmsr , 503 mtmsrd , 505 mtocrf , 506 mtspr , 507 mtsr , 511 mtsrd , 512 mtsrdin , 513 mtsrin , 514 mtxer , 507 mulhd , 515 mulhd. , 515 mulhdu , 516 mulhdu. , 516 mulhw , 517 mulhw. , 517 mulhwu , 518 mulhwu. , 518 mulld , 519 mulld. , 519 mulldo , 519 mulldo. , 519 mulli , 520 mullw , 521 mullw. , 521 mullwo , 521 mullwo. , 521 nand , 522 nand. , 522 neg , 523 neg. , 523 nego , 523 nego. , 523 nop , 527 nor , 524 nor. , 524 not , 524 or , 525 or. , 525 orc , 526 orc. , 526 ori , 527 oris , 528 rfi , 529 rfid , 530 rldcl , 531 rldcl. , 531 rldcr , 532 rldcr. , 532 rldic , 533 rldic. , 533 rldicl , 534 rldicl. , 534 rldicr , 535 rldicr. , 535 rldimi , 536 rldimi. , 536 rlwimi , 537 rlwimi. , 537 rlwinm , 538 rlwinm. , 538 rlwnm , 540 rlwnm. , 540 rotld , 531 rotldi , 534 rotlw , 540 rotlwi , 538 rotrdi , 534 rotrwi , 538 sc , 541 slbia , 542 slbie , 543 slbmfee , 544 slbmfev , 545 slbmte , 546 sld , 547 sld. , 547 sldi , 535 slw , 548 slw. , 548 slwi , 538 srad , 549 srad. , 549 sradi , 550 sradi. , 550 sraw , 551 sraw. , 551 srawi , 552 srawi. , 552 srd , 553 srd. , 553 srdi , 534 srw , 554 srw. , 554 srwi , 538 stb , 555 stbu , 556 stbux , 557 stbx , 558 std , 559 stdcx. , 560 stdu , 562 stdux , 563 stdx , 564 stfd , 565 stfdu , 566 stfdux , 567 stfdx , 568 stfiwx , 569 stfs , 570 stfsu , 571 stfsux , 572 stfsx , 573 sth , 574 sthbrx , 575 sthu , 576 sthux , 577 sthx , 578 stmw , 579 stswi , 580 stswx , 581 stw , 582 stwbrx , 583 stwcx. , 584 stwu , 586 stwux , 587 stwx , 588 sub , 589 subc , 590 subf , 589 subf. , 589 subfo , 589 subfo. , 589 subfc , 590 subfc. , 590 subfco , 590 subfco. , 590 subfe , 591 subfe. , 591 subfeo , 591 subfeo. , 591 subfic , 592 subfme , 593 subfme. , 593 subfmeo , 593 subfmeo., 593 subfze , 594 subfze. , 594 subfzeo , 594 subfzeo., 594 subi , 357 subic , 358 subic. , 359 subis , 360 sync , 595 td , 597 tdge , 597 tdi , 598 tdlnl , 597 tdlti , 598 tdnei , 598 tlbia , 599 tlbie , 600 tlbiel , 601 tlbsync , 603 trap , 604 tw , 604 tweq , 604 twgti , 605 twi , 605 twlge , 604 twllei , 605 xor , 606 xor. , 606 xori , 607 xoris , 608 @altivecpem.pdf [AltiVec Technology Programming Environments Manual, Rev.0.1 11/1998 (ALTIVECPEM/D)] dss , 131 dssall , 131 dst , 132 dstt , 132 dstst , 134 dststt , 134 lvebx , 136 lvehx , 138 lvewx , 139 lvsl , 140 lvsr , 142 lvx , 144 lvxl , 145 mfvscr , 146 mtvscr , 147 stvebx , 148 stvehx , 149 stvewx , 150 stvx , 151 stvxl , 152 vaddcuw , 153 vaddfp , 154 vaddsbs , 155 vaddshs , 156 vaddsws , 157 vaddubm , 158 vaddubs , 159 vadduhm , 160 vadduhs , 161 vadduwm , 162 vadduws , 163 vand , 164 vandc , 165 vavgsb , 166 vavgsh , 167 vavgsw , 168 vavgub , 169 vavguh , 170 vavguw , 171 vcfsx , 172 vcfux , 173 vcmpbfp , 174 vcmpbfp. , 174 vcmpeqfp , 176 vcmpeqfp. , 176 vcmpequb , 177 vcmpequb. , 177 vcmpequh , 178 vcmpequh. , 178 vcmpequw , 179 vcmpequw. , 179 vcmpgefp , 180 vcmpgefp. , 180 vcmpgtfp , 181 vcmpgtfp. , 181 vcmpgtsb , 182 vcmpgtsb. , 182 vcmpgtsh , 183 vcmpgtsh. , 183 vcmpgtsw , 184 vcmpgtsw. , 184 vcmpgtub , 185 vcmpgtub. , 185 vcmpgtuh , 186 vcmpgtuh. , 186 vcmpgtuw , 187 vcmpgtuw. , 187 vctsxs , 188 vctuxs , 189 vexptefp , 190 vlogefp , 192 vmaddfp , 194 vmaxfp , 195 vmaxsb , 196 vmaxsh , 197 vmaxsw , 198 vmaxub , 199 vmaxuh , 200 vmaxuw , 201 vmhaddshs , 202 vmhraddshs , 203 vminfp , 204 vminsb , 205 vminsh , 206 vminsw , 207 vminub , 208 vminuh , 209 vminuw , 210 vmladduhm , 211 vmrghb , 212 vmrghh , 213 vmrghw , 214 vmrglb , 215 vmrglh , 216 vmrglw , 217 vmsummbm , 218 vmsumshm , 219 vmsumshs , 220 vmsumubm , 221 vmsumuhm , 222 vmsumuhs , 223 vmulesb , 224 vmulesh , 225 vmuleub , 226 vmuleuh , 227 vmulosb , 228 vmulosh , 229 vmuloub , 230 vmulouh , 231 vnmsubfp , 232 vnor , 233 vor , 234 vperm , 235 vpkpx , 236 vpkshss , 237 vpkshus , 238 vpkswss , 239 vpkswus , 240 vpkuhum , 241 vpkuhus , 242 vpkuwum , 243 vpkuwus , 244 vrefp , 245 vrfim , 247 vrfin , 248 vrfip , 249 vrfiz , 250 vrlb , 251 vrlh , 252 vrlw , 253 vrsqrtefp , 254 vsel , 256 vsl , 257 vslb , 258 vsldoi , 259 vslh , 260 vslo , 261 vslw , 262 vspltb , 263 vsplth , 264 vspltisb , 265 vspltish , 266 vspltisw , 267 vspltw , 268 vsr , 269 vsrab , 271 vsrah , 272 vsraw , 273 vsrb , 274 vsrh , 275 vsro , 276 vsrw , 277 vsubcuw , 278 vsubfp , 279 vsubsbs , 280 vsubshs , 281 vsubsws , 282 vsububm , 283 vsububs , 284 vsubuhm , 285 vsubuhs , 286 vsubuwm , 287 vsubuws , 288 vsumsws , 289 vsum2sws , 290 vsum4sbs , 291 vsum4shs , 292 vsum4ubs , 293 vupkhpx , 294 vupkhsb , 295 vupkhsh , 296 vupklpx , 297 vupklsb , 298 vupklsh , 299 vxor , 300 ================================================ FILE: pypcode/processors/PowerPC/data/patterns/PPC_BE_patterns.xml ================================================ 0x4e800020 010010.. 0x.. 0x.. ......00 10010100 00100001 11...... .....000 011111.. ...01000 00000010 10100110 0x7c2c0b78 0x38 0x21 ........ ........ 0x91810000 010010.. 0x.. 0x.. ......00 10010100 00100001 11...... .....000 011111.. ...01000 00000010 10100110 011111.. ...01000 00000010 10100110 10010100 00100001 11...... .....000 10010100 00100001 11...... .....000 0x........ 011111.. ...01000 00000010 10100110 011111.. ...01000 00000010 10100110 0x........ 10010100 00100001 11...... .....000 10010100 00100001 11...... .....000 0x........ 0x........ 011111.. ...01000 00000010 10100110 011111.. ...01000 00000010 10100110 0x........ 0x........ 10010100 00100001 11...... .....000 0x7c2c0b78 0x38 0x21 ........ ........ 0x91810000 10010100 00100001 11...... .....000 011111.. ...01000 00000010 10100110 011111.. ...01000 00000010 10100110 10010100 00100001 11...... .....000 0x4e 0x80 0x00 0x21 0xf8410028 001111.. ...00010 0xff 0xff 0xe9 ........ ........ ........ 0x7d 0x.9 0x03 0xa6 0xe8 010..... ........ ........ 0x28220000 0x4c 1..00010 0x04 0x20 010010.. ........ ........ ......00 0xf8410028 0xe9 ........ ........ ........ 0x7d 0x.9 0x03 0xa6 0xe8 010..... ........ ........ 0x28220000 0x4c 1..00010 0x04 0x20 010010.. ........ ........ ......00 011111.. ...01000 0x02 0xa6 0x42 1....... 0x00 0x05 011111.. ...01000 0x02 0xa6 001111.. ........ 0x.. 0x.. 001110.. ........ 0x.. 0x.. 011111.. ...01000 0x03 0xa6 011111.. ...01001 0x03 0xa6 0x4e 10000... 0x04 0x20 ================================================ FILE: pypcode/processors/PowerPC/data/patterns/PPC_BE_prepatterns.xml ================================================ 0xf8410028 001111.. ...00010 0xff 0xff 0xe9 ........ ........ ........ 0x7d 0x.9 0x03 0xa6 0xe8 010..... ........ ........ 0x28220000 0x4c 1..00010 0x04 0x20 010010.. ........ ........ ......00 0xf8410028 0xe9 ........ ........ ........ 0x7d 0x.9 0x03 0xa6 0xe8 010..... ........ ........ 0x28220000 0x4c 1..00010 0x04 0x20 010010.. ........ ........ ......00 011111.. ...01000 0x02 0xa6 0x42 1....... 0x00 0x05 011111.. ...01000 0x02 0xa6 001111.. ........ 0x.. 0x.. 001110.. ........ 0x.. 0x.. 011111.. ...01000 0x03 0xa6 011111.. ...01001 0x03 0xa6 0x4e 10000... 0x04 0x20 0x....823d # addis r12,r2,0x#### 0x..0041f8 # std r2,0x##(r1) 0x....6ce9 # ld r11,0x####(r12) 0xa603697d # mtspr CTR,r11 0x....4ce8 # ld r2,0x####(r12) 0x....6ce9 # ld r11,0x####(r12) 0x2004804e # bctr 0x..0041f8 # std r2,0x##(r1) 0x....62e9 # ld r11,0x####(r2) 0xa603697d # mtspr CTR,r11 0x....62e9 # ld r11,0x####(r2) 0x....42e8 # ld r2,0x####(r2) 0x2004804e # bctr 0x..0041f8 # std r2,0x##(r1) 0x....82e9 # ld r12,0x####(r2) 0xa603897d # mtspr CTR,r12 0x2004804e # bctr 0x....623d # addis r11,r2,0x#### 0x....8be9 # ld r12,0x####(r11) 0xa603897d # mtspr CTR,r12 0x....4be8 # ld r2,0x####(r11) 0x2004804e # bctr 0x....623d # addis r11,r2,0x#### 0x....8be9 # ld r12,0x####(r11) 0x....6b39 # addi r11,r11,0x#### 0xa603897d # mtspr CTR,r12 0x....4be8 # ld r2,0x####(r11) 0x....6be9 # ld r11,0x####(r11) 0x2004804e # bctr 0x....623d # addis r11,r2,0x#### 0x....8be9 # ld r12,0x####(r11) 0xa603897d # mtspr CTR,r12 0x7862827d # xor r2,r12,r12 0x14126b7d # add r11,r11,r2 0x....4be8 # ld r2,0x####(r11) 0x2004804e # bctr 0x....623d # addis r11,r2,0x#### 0x....8be9 # ld r12,0x####(r11) 0x....6b39 # addi r11,r11,0x#### 0xa603897d # mtspr CTR,r12 0x7862827d # xor r2,r12,r12 0x14126b7d # add r11,r11,r2 0x....4be8 # ld r2,0x####(r11) 0x....6be9 # ld r11,0x####(r11) 0x2004804e # bctr 0x..0041f8 # std r2,0x##(r1) 0x....623d # addis r11,r2,0x#### 0x....8be9 # ld r12,0x####(r11) 0xa603897d # mtspr CTR,r12 0x....4be8 # ld r2,0x####(r11) 0x2004804e # bctr 0x..0041f8 # std r2,0x##(r1) 0x....623d # addis r11,r2,0x#### 0x....8be9 # ld r12,0x####(r11) 0x....6b39 # addi r11,r11,0x#### 0xa603897d # mtspr CTR,r12 0x....4be8 # ld r2,0x####(r11) 0x....6be9 # ld r11,0x####(r11) 0x2004804e # bctr 0x..0041f8 # std r2,0x##(r1) 0x....623d # addis r11,r2,0x#### 0x....8be9 # ld r12,0x####(r11) 0xa603897d # mtspr CTR,r12 0x7862827d # xor r2,r12,r12 0x14126b7d # add r11,r11,r2 0x....4be8 # ld r2,0x####(r11) 0x2004804e # bctr 0x..0041f8 # std r2,0x##(r1) 0x....623d # addis r11,r2,0x#### 0x....8be9 # ld r12,0x####(r11) 0x....6b39 # addi r11,r11,0x#### 0xa603897d # mtspr CTR,r12 0x7862827d # xor r2,r12,r12 0x14126b7d # add r11,r11,r2 0x....4be8 # ld r2,0x####(r11) 0x....6be9 # ld r11,0x####(r11) 0x2004804e # bctr 0x0000823d # addis r12,r2,0x#### 0x00008ce9 # ld r12,0x####(r12) 0xa603897d # mtspr CTR,r12 0x2004804e # bctr 0x000041f8 # std r2,0x####(r1) 0x0000823d # addis r12,r2,0x#### 0x00008ce9 # ld r12,0x####(r12) 0xa603897d # mtspr CTR,r12 0x2004804e # bctr ================================================ FILE: pypcode/processors/PowerPC/data/patterns/PPC_LE_patterns.xml ================================================ 0x2000804e ......00 0x.. 0x.. 010010.. .....000 11...... 00100001 10010100 10100110 00000010 ...01000 011111.. 0x780b2c7c ........ ........ 0x21 0x38 0x00008191 ......00 0x.. 0x.. 010010.. .....000 11...... 00100001 10010100 10100110 00000010 ...01000 011111.. 10100110 00000010 ...01000 011111.. .....000 11...... 00100001 10010100 .....000 11...... 00100001 10010100 0x........ 10100110 00000010 ...01000 011111.. 10100110 00000010 ...01000 011111.. 0x........ .....000 11...... 00100001 10010100 .....000 11...... 00100001 10010100 0x........ 0x........ 10100110 00000010 ...01000 011111.. 10100110 00000010 ...01000 011111.. 0x........ 0x........ .....000 11...... 00100001 10010100 0x780b2c7c ........ ........ 0x21 0x38 0x00008191 .....000 11...... 00100001 10010100 10100110 00000010 ...01000 011111.. 10100110 00000010 ...01000 011111.. .....000 11...... 00100001 10010100 0x21 0x00 0x80 0x4e 0x280041f8 0xff 0xff ...00010 001111.. ........ ........ ........ 0xe9 0xa6 0x03 0x.9 0x7d ........ ........ 010..... 0xe8 0x00002228 0x20 0x04 1..00010 0x4c ......00 ........ ........ 010010.. 0x280041f8 ........ ........ ........ 0xe9 0xa6 0x03 0x.9 0x7d ........ ........ 010..... 0xe8 0x00002228 0x20 0x04 1..00010 0x4c ......00 ........ ........ 010010.. 0xa6 0x02 ...01000 011111.. 0x05 0x00 1....... 0x42 0xa6 0x02 ...01000 011111.. 0x.. 0x.. ........ 001111.. 0x.. 0x.. ........ 001110.. 0xa6 0x03 ...01000 011111.. 0xa6 0x03 ...01001 011111.. 0x20 0x04 10000... 0x4e ================================================ FILE: pypcode/processors/PowerPC/data/patterns/PPC_LE_prepatterns.xml ================================================ 0x280041f8 0xff 0xff ...00010 001111.. ........ ........ ........ 0xe9 0xa6 0x03 0x.9 0x7d ........ ........ 010..... 0xe8 0x00002228 0x20 0x04 1..00010 0x4c ......00 ........ ........ 010010.. 0x280041f8 ........ ........ ........ 0xe9 0xa6 0x03 0x.9 0x7d ........ ........ 010..... 0xe8 0x00002228 0x20 0x04 1..00010 0x4c ......00 ........ ........ 010010.. 0xa6 0x02 ...01000 011111.. 0x05 0x00 1....... 0x42 0xa6 0x02 ...01000 011111.. 0x.. 0x.. ........ 001111.. 0x.. 0x.. ........ 001110.. 0xa6 0x03 ...01000 011111.. 0xa6 0x03 ...01001 011111.. 0x20 0x04 10000... 0x4e 0x3d82.... # addis r12,r2,0x#### 0xf84100.. # std r2,0x##(r1) 0xe96c.... # ld r11,0x####(r12) 0x7d6903a6 # mtspr CTR,r11 0xe84c.... # ld r2,0x####(r12) 0xe96c.... # ld r11,0x####(r12) 0x4e800420 # bctr 0xf84100.. # std r2,0x##(r1) 0xe962.... # ld r11,0x####(r2) 0x7d6903a6 # mtspr CTR,r11 0xe962.... # ld r11,0x####(r2) 0xe842.... # ld r2,0x####(r2) 0x4e800420 # bctr 0xf84100.. # std r2,0x##(r1) 0xe982.... # ld r12,0x####(r2) 0x7d8903a6 # mtspr CTR,r12 0x4e800420 # bctr 0x3d62.... # addis r11,r2,0x#### 0xe98b.... # ld r12,0x####(r11) 0x7d8903a6 # mtspr CTR,r12 0xe84b.... # ld r2,0x####(r11) 0x4e800420 # bctr 0x3d62.... # addis r11,r2,0x#### 0xe98b.... # ld r12,0x####(r11) 0x396b.... # addi r11,r11,0x#### 0x7d8903a6 # mtspr CTR,r12 0xe84b.... # ld r2,0x####(r11) 0xe96b.... # ld r11,0x####(r11) 0x4e800420 # bctr 0x3d62.... # addis r11,r2,0x#### 0xe98b.... # ld r12,0x####(r11) 0x7d8903a6 # mtspr CTR,r12 0x7d826278 # xor r2,r12,r12 0x7d6b1214 # add r11,r11,r2 0xe84b.... # ld r2,0x####(r11) 0x4e800420 # bctr 0x3d62.... # addis r11,r2,0x#### 0xe98b.... # ld r12,0x####(r11) 0x396b.... # addi r11,r11,0x#### 0x7d8903a6 # mtspr CTR,r12 0x7d826278 # xor r2,r12,r12 0x7d6b1214 # add r11,r11,r2 0xe84b.... # ld r2,0x####(r11) 0xe96b.... # ld r11,0x####(r11) 0x4e800420 # bctr 0xf84100.. # std r2,0x##(r1) 0x3d62.... # addis r11,r2,0x#### 0xe98b.... # ld r12,0x####(r11) 0x7d8903a6 # mtspr CTR,r12 0xe84b.... # ld r2,0x####(r11) 0x4e800420 # bctr 0xf84100.. # std r2,0x##(r1) 0x3d62.... # addis r11,r2,0x#### 0xe98b.... # ld r12,0x####(r11) 0x396b.... # addi r11,r11,0x#### 0x7d8903a6 # mtspr CTR,r12 0xe84b.... # ld r2,0x####(r11) 0xe96b.... # ld r11,0x####(r11) 0x4e800420 # bctr 0xf84100.. # std r2,0x##(r1) 0x3d62.... # addis r11,r2,0x#### 0xe98b.... # ld r12,0x####(r11) 0x7d8903a6 # mtspr CTR,r12 0x7d826278 # xor r2,r12,r12 0x7d6b1214 # add r11,r11,r2 0xe84b.... # ld r2,0x####(r11) 0x4e800420 # bctr 0xf84100.. # std r2,0x##(r1) 0x3d62.... # addis r11,r2,0x#### 0xe98b.... # ld r12,0x####(r11) 0x396b.... # addi r11,r11,0x#### 0x7d8903a6 # mtspr CTR,r12 0x7d826278 # xor r2,r12,r12 0x7d6b1214 # add r11,r11,r2 0xe84b.... # ld r2,0x####(r11) 0xe96b.... # ld r11,0x####(r11) 0x4e800420 # bctr 0x3d820000 # addis r12,r2,0x#### 0xe98c0000 # ld r12,0x####(r12) 0x7d8903a6 # mtspr CTR,r12 0x4e800420 # bctr 0xf8410000 # std r2,0x####(r1) 0x3d820000 # addis r12,r2,0x#### 0xe98c0000 # ld r12,0x####(r12) 0x7d8903a6 # mtspr CTR,r12 0x4e800420 # bctr ================================================ FILE: pypcode/processors/PowerPC/data/patterns/patternconstraints.xml ================================================ PPC_BE_patterns.xml PPC_LE_patterns.xml ================================================ FILE: pypcode/processors/PowerPC/data/patterns/prepatternconstraints.xml ================================================ PPC_BE_prepatterns.xml PPC_LE_prepatterns.xml ================================================ FILE: pypcode/processors/RISCV/data/languages/RV32.pspec ================================================ ================================================ FILE: pypcode/processors/RISCV/data/languages/RV64.pspec ================================================ ================================================ FILE: pypcode/processors/RISCV/data/languages/andestar_v5.instr.sinc ================================================ # # AndeStar V5 extensions to base RISC-V architecture # # # ExecTable is loaded/overlayed on the memory segment # That is indexed by the E define space ExecTable type=ram_space size=2; @define CUSTOM0 "op0006=0b0001011" @define CUSTOM1 "op0006=0b0101011" @define CUSTOM2 "op0006=0b1011011" @define CUSTOM4 "op0006=0b1010111" simm18_lb: val is sop3131 & op1516 & op1719 & op2020 & op2130 & op1414 [ val = (sop3131<<17) | (op1516<<15) | (op1719<<12) | (op2020<<11) | (op2130<<1) | op1414; ] { export *[const]:$(XLEN) val; } simm18_lh: val is sop3131 & op1516 & op1719 & op2020 & op2130 [ val = (sop3131<<17) | (op1516<<15) | (op1719<<12) | (op2020<<11) | (op2130<<1); ] { export *[const]:$(XLEN) val; } #simm18_lw: val is sop3131 & op1516 & op1719 & op2020 & op2130 [ val = (sop3131<<18) | (op1516<<16) | (op1719<<13) | (op2020<<12) | (op2130<<2); ] { simm18_lw: val is sop3131 & op2121 & op1516 & op1719 & op2020 & op2230 [ val = (sop3131<<18) | (op2121 << 17) | (op1516<<15) | (op1719<<12) | (op2020<<11) | (op2230<<2); ] { export *[const]:$(XLEN) val; } simm18_ld: val is sop3131 & op1516 & op1719 & op2020 & op2122 & op2330 [ val = (sop3131<<19) | (op2122<<17) | (op1516<<15) | (op1719<<12) | (op2330<<3); ] { export *[const]:$(XLEN) val; } simm18_sb: val is sop3131 & op1516 & op1719 & op0707 & op2530 & op0811 & op1414 [ val = (sop3131<<17) | (op1516<<15) | (op1719<<12) | (op0707<<11) | (op2530<<5) | (op0811<<1) | op1414; ] { export *[const]:$(XLEN) val; } simm18_sh: val is sop3131 & op1516 & op1719 & op0707 & op2530 & op0811 [ val = (sop3131<<17) | (op1516<<15) | (op1719<<12) | (op0707<<11) | (op2530<<5) | (op0811<<1); ] { export *[const]:$(XLEN) val; } simm18_sw: val is sop3131 & op1516 & op1719 & op0707 & op2530 & op0808 & op0911 [ val = (sop3131<<18) | (op0808<<17) | (op1516<<15) | (op1719<<12) | (op0707<<11) | (op2530<<5) | (op0911<<2); ] { export *[const]:$(XLEN) val; } simm18_sd: val is sop3131 & op1516 & op1719 & op0707 & op2530 & op0809 & op1011 [ val = (sop3131<<19) | (op0809<<17) | (op1516<<15) | (op1719<<12) | (op0707<<11) | (op2530<<5) | (op1011<<3); ] { export *[const]:$(XLEN) val; } cimm: "#"^val is op2024 & op0707 [ val = op0707<<5 | op2024; ] { # Note on 32-bit op0707 must be 0 export *[const]:$(XLEN) val; } cimm7: "#"^val is op3030 & op2024 & op0707 [ val = op3030<<6 | op0707<<5 | op2024; ] { export *[const]:$(XLEN) val; } ra_imm10: dest is sop3131 & op2529 & op0811 [ dest = inst_start + (sop3131 << 10 | op2529<<5 | op0811<<1); ] { export *[ram]:$(XLEN) dest; } :addigp rd,simm18_lb is simm18_lb & rd & op1213=1 & $(CUSTOM0) { rd = gp + simm18_lb; } :bbc rs1,cimm,ra_imm10 is rs1 & cimm & ra_imm10 & op3030=0 & op1214=0b111 & op0707=0 & $(CUSTOM2) { tst:1 = (rs1 & (1 << cimm)) == 0; if (tst) goto ra_imm10; } :bbs rs1,cimm,ra_imm10 is rs1 & cimm & ra_imm10 & op3030=1 & op1214=0b111 & op0707=0 & $(CUSTOM2) { tst:1 = (rs1 & (1 << cimm)) == 1; if (tst) goto ra_imm10; } :beqc rs1,cimm7,ra_imm10 is rs1 & cimm7 & ra_imm10 & op1214=0b101 & $(CUSTOM2) { tst:1 = rs1 == cimm7; if (tst) goto ra_imm10; } :bnec rs1,cimm7,ra_imm10 is rs1 & cimm7 & ra_imm10 & op1214=0b110 & $(CUSTOM2) { tst:1 = rs1 != cimm7; if (tst) goto ra_imm10; } msb: "#"^op2631 is op2631 { export *[const]:$(XLEN) op2631; } lsb: "#"^op2025 is op2025 { export *[const]:$(XLEN) op2025; } :bfos rd,rs1,msb,lsb is rd & rs1 & op2631=0 & msb & lsb & op1214=0b011 & $(CUSTOM2) { # msb==0 Rd[LSB] = sext(Rs1[0]) shift:$(XLEN) = ($(XLEN)*8-1); val:$(XLEN) = (rs1 & 1 << shift) s>> (shift); val = val << lsb; rd = val; } :bfos rd,rs1,msb,lsb is rd & rs1 & msb & lsb & (op2025 > op2631) & (op2631 != 0) & op1214=0b011 & $(CUSTOM2) { # msb < lsb Rd[LSB:MSB] = sext(Rs1[len-1:0]) len:$(XLEN) = lsb-msb+1; shift:$(XLEN) = ($(XLEN)*8 - len); val:$(XLEN) = (rs1 << shift) s>> shift; val = val << msb; rd = val; } :bfos rd,rs1,msb,lsb is rd & rs1 & msb & lsb & (op2025 <= op2631) & (op2631 != 0) & op1214=0b011 & $(CUSTOM2) { # msb >= lsb Rd[len-1:0] = sext(Rs1[MSB:LSB]) len:$(XLEN) = msb-lsb+1; shift:$(XLEN) = ($(XLEN)*8 - msb - 1); val:$(XLEN) = (rs1 << shift) s>> ($(XLEN)*8 - len); rd = val; } :bfoz rd,rs1,msb,lsb is rd & rs1 & op2631=0 & msb & lsb & op1214=0b010 & $(CUSTOM2) { # msb==0 Rd[LSB] = zext(Rs1[0]) val:$(XLEN) = rs1 & 1; val = val << lsb; rd = val; } :bfoz rd,rs1,msb,lsb is rd & rs1 & msb & lsb & (op2025 > op2631) & (op2631 != 0) & op1214=0b010 & $(CUSTOM2) { # msb < lsb Rd[LSB:MSB] = zext(Rs1[len-1:0]) len:$(XLEN) = lsb-msb+1; mask:$(XLEN) = ((-1) >> ($(XLEN)*8 -len)); val:$(XLEN) = rs1 & mask; val = val << msb; rd = val; } :bfoz rd,rs1,msb,lsb is rd & rs1 & msb & lsb & (op2025 <= op2631) & op1214=0b010 & $(CUSTOM2) { # msb >= lsb Rd[len-1:0] = zext(Rs1[MSB:LSB]) len:$(XLEN) = msb-lsb+1; mask:$(XLEN) = ((-1) >> ($(XLEN)*8 -len)) << lsb; val:$(XLEN) = rs1 & mask; val = val >> lsb; rd = val; } :lea.h rd,rs1,rs2 is op2531=0b0000101 & rs2 & rs1 & op1214=0 & rd & $(CUSTOM2) { local ea:$(XLEN) = rs1 + rs2 * 2; rd = ea; } :lea.w rd,rs1,rs2 is op2531=0b0000110 & rs2 & rs1 & op1214=0 & rd & $(CUSTOM2) { local ea:$(XLEN) = rs1 + rs2 * 4; rd = ea; } :lea.d rd,rs1,rs2 is op2531=0b0000111 & rs2 & rs1 & op1214=0 & rd & $(CUSTOM2) { local ea:$(XLEN) = rs1 + rs2 * 8; rd = ea; } :lea.b.ze rd,rs1,rs2 is op2531=0b0001000 & rs2 & rs1 & op1214=0 & rd & $(CUSTOM2) { local ea:$(XLEN) = rs1 + zext(rs2:4); rd = ea; } :lea.h.ze rd,rs1,rs2 is op2531=0b0001001 & rs2 & rs1 & op1214=0 & rd & $(CUSTOM2) { local ea:$(XLEN) = rs1 + zext(rs2:4) * 2; rd = ea; } :lea.w.ze rd,rs1,rs2 is op2531=0b0001010 & rs2 & rs1 & op1214=0 & rd & $(CUSTOM2) { local ea:$(XLEN) = rs1 + zext(rs2:4) * 4; rd = ea; } :lea.d.ze rd,rs1,rs2 is op2531=0b0001011 & rs2 & rs1 & op1214=0 & rd & $(CUSTOM2) { local ea:$(XLEN) = rs1 + zext(rs2:4) * 8; rd = ea; } :lbgp rd,"["^simm18_lb^"]" is simm18_lb & rd & op1213=0 & $(CUSTOM0) { local ea:$(XLEN) = gp + simm18_lb; rd = sext(*[ram]:1 ea); } :lbugp rd,"["^simm18_lb^"]" is simm18_lb & rd & op1213=2 & $(CUSTOM0) { local ea:$(XLEN) = gp + simm18_lb; rd = zext(*[ram]:1 ea); } :lhgp rd,"["^simm18_lh^"]" is simm18_lh & rd & op1214=1 & $(CUSTOM1) { local ea:$(XLEN) = gp + simm18_lh; rd = sext(*[ram]:2 ea); } :lhugp rd,"["^simm18_lh^"]" is simm18_lh & rd & op1214=5 & $(CUSTOM1) { local ea:$(XLEN) = gp + simm18_lh; rd = zext(*[ram]:2 ea); } :lwgp rd,"["^simm18_lw^"]" is simm18_lw & rd & op1214=2 & $(CUSTOM1) { local ea:$(XLEN) = gp + simm18_lw; rd = sext(*[ram]:4 ea); } :lwugp rd,"["^simm18_lw^"]" is simm18_lw & rd & op1214=6 & $(CUSTOM1) { local ea:$(XLEN) = gp + simm18_lw; rd = zext(*[ram]:4 ea); } :ldgp rd,"["^simm18_ld^"]" is simm18_ld & rd & op1214=3 & $(CUSTOM1) { local ea:$(XLEN) = gp + simm18_ld; rd = *[ram]:8 ea; } :sbgp rs2,"["^simm18_sb^"]" is simm18_sb & rs2 & op1213=3 & $(CUSTOM0) { local ea:$(XLEN) = gp + simm18_sb; *[ram]:1 ea = rs2[0,8]; } :shgp rs2,"["^simm18_sh^"]" is simm18_sh & rs2 & op1214=0 & $(CUSTOM1) { local ea:$(XLEN) = gp + simm18_sh; *[ram]:2 ea = rs2[0,16]; } :swgp rs2,"["^simm18_sw^"]" is simm18_sw & rs2 & op1214=4 & $(CUSTOM1) { local ea:$(XLEN) = gp + simm18_sw; *[ram]:4 ea = rs2[0,32]; } :sdgp rs2,"["^simm18_sd^"]" is simm18_sd & rs2 & op1214=7 & $(CUSTOM1) { local ea:$(XLEN) = gp + simm18_sd; *[ram]:8 ea = rs2; } :ffb rd,rs1,rs2 is rd & rs1 & rs2 & op2531=0b0010000 & op1214=0 & $(CUSTOM2) { @if XLEN == "4" m1:1 = (rs1[0,8] == rs2[0,8]); m2:1 = (rs1[8,8] == rs2[0,8]); m3:1 = (rs1[16,8] == rs2[0,8]); m4:1 = (rs1[24,8] == rs2[0,8]); rd = -4; if (m1) goto inst_next; rd = -3; if (m2) goto inst_next; rd = -2; if (m3) goto inst_next; rd = -1; if (m4) goto inst_next; rd = 0; # choosery method # rd = 0 + (zext(m1)*-4) + (zext(m2)*-3) + (zext(m3)*-2) + (zext(m4)*-1); @else m1:1 = (rs1[0,8] == rs2[0,8]); m2:1 = (rs1[8,8] == rs2[0,8]); m3:1 = (rs1[16,8] == rs2[0,8]); m4:1 = (rs1[24,8] == rs2[0,8]); m5:1 = (rs1[32,8] == rs2[0,8]); m6:1 = (rs1[40,8] == rs2[0,8]); m7:1 = (rs1[48,8] == rs2[0,8]); m8:1 = (rs1[56,8] == rs2[0,8]); rd = -8; if (m1) goto inst_next; rd = -7; if (m2) goto inst_next; rd = -6; if (m3) goto inst_next; rd = -5; if (m4) goto inst_next; rd = -4; if (m5) goto inst_next; rd = -3; if (m6) goto inst_next; rd = -2; if (m7) goto inst_next; rd = -1; if (m8) goto inst_next; rd = 0; # choosery method # rd = 0 + (zext(m1)*-8) + (zext(m2)*-7) + (zext(m3)*-6) + (zext(m4)*-5) + (zext(m5)*-4) + (zext(m6)*-3) + (zext(m7)*-2) + (zext(m8)*-1); @endif } :ffzmism rd,rs1,rs2 is rd & rs1 & rs2 & op2531=0b0010001 & op1214=0 & $(CUSTOM2) { @if XLEN == "4" m1:1 = (rs1[0,8]==0) | (rs1[0,8] == rs2[0,8]); m2:1 = (rs1[8,8]==0) | (rs1[8,8] == rs2[8,8]); m3:1 = (rs1[16,8]==0) | (rs1[16,8] == rs2[16,8]); m4:1 = (rs1[24,8]==0) | (rs1[24,8] == rs2[24,8]); rd = -4; if (m1) goto inst_next; rd = -3; if (m2) goto inst_next; rd = -2; if (m3) goto inst_next; rd = -1; if (m4) goto inst_next; rd = 0; # choosery method # rd = 0 + (zext(m1)*-4) + (zext(m2)*-3) + (zext(m3)*-2) + (zext(m4)*-1); @else m1:1 = (rs1[0,8]==0) | (rs1[0,8] == rs2[0,8]); m2:1 = (rs1[8,8]==0) | (rs1[8,8] == rs2[8,8]); m3:1 = (rs1[16,8]==0) | (rs1[16,8] == rs2[16,8]); m4:1 = (rs1[24,8]==0) | (rs1[24,8] == rs2[24,8]); m5:1 = (rs1[32,8]==0) | (rs1[32,8] == rs2[32,8]); m6:1 = (rs1[40,8]==0) | (rs1[40,8] == rs2[40,8]); m7:1 = (rs1[48,8]==0) | (rs1[48,8] == rs2[48,8]); m8:1 = (rs1[56,8]==0) | (rs1[56,8] == rs2[56,8]); rd = -8; if (m1) goto inst_next; rd = -7; if (m2) goto inst_next; rd = -6; if (m3) goto inst_next; rd = -5; if (m4) goto inst_next; rd = -4; if (m5) goto inst_next; rd = -3; if (m6) goto inst_next; rd = -2; if (m7) goto inst_next; rd = -1; if (m8) goto inst_next; rd = 0; # choosery method # rd = 0 + (zext(m1)*-8) + (zext(m2)*-7) + (zext(m3)*-6) + (zext(m4)*-5) + (zext(m5)*-4) + (zext(m6)*-3) + (zext(m7)*-2) + (zext(m8)*-1); @endif } :ffmism rd,rs1,rs2 is rd & rs1 & rs2 & op2531=0b0010010 & op1214=0 & $(CUSTOM2) { @if XLEN == "4" m1:1 = (rs1[0,8] != rs2[0,8]); m2:1 = (rs1[8,8] != rs2[8,8]); m3:1 = (rs1[16,8] != rs2[16,8]); m4:1 = (rs1[24,8] != rs2[24,8]); rd = -4; if (m1) goto inst_next; rd = -3; if (m2) goto inst_next; rd = -2; if (m3) goto inst_next; rd = -1; if (m4) goto inst_next; rd = 0; # choosery method # rd = 0 + (zext(m1)*-4) + (zext(m2)*-3) + (zext(m3)*-2) + (zext(m4)*-1); @else m1:1 = (rs1[0,8] != rs2[0,8]); m2:1 = (rs1[8,8] != rs2[8,8]); m3:1 = (rs1[16,8] != rs2[16,8]); m4:1 = (rs1[24,8] != rs2[24,8]); m5:1 = (rs1[32,8] != rs2[32,8]); m6:1 = (rs1[40,8] != rs2[40,8]); m7:1 = (rs1[48,8] != rs2[48,8]); m8:1 = (rs1[56,8] != rs2[56,8]); rd = -8; if (m1) goto inst_next; rd = -7; if (m2) goto inst_next; rd = -6; if (m3) goto inst_next; rd = -5; if (m4) goto inst_next; rd = -4; if (m5) goto inst_next; rd = -3; if (m6) goto inst_next; rd = -2; if (m7) goto inst_next; rd = -1; if (m8) goto inst_next; rd = 0; # choosery method # rd = 0 + (zext(m1)*-8) + (zext(m2)*-7) + (zext(m3)*-6) + (zext(m4)*-5) + (zext(m5)*-4) + (zext(m6)*-3) + (zext(m7)*-2) + (zext(m8)*-1); @endif } :flmism rd,rs1,rs2 is rd & rs1 & rs2 & op2531=0b0010011 & op1214=0 & $(CUSTOM2) { @if XLEN == "4" m1:1 = (rs1[0,8] != rs2[0,8]); m2:1 = (rs1[8,8] != rs2[8,8]); m3:1 = (rs1[16,8] != rs2[16,8]); m4:1 = (rs1[24,8] != rs2[24,8]); rd = -1; if (m4) goto inst_next; rd = -2; if (m3) goto inst_next; rd = -3; if (m2) goto inst_next; rd = -4; if (m1) goto inst_next; rd = 0; # choosery method # rd = 0 + (zext(m1)*-4) + (zext(m2)*-3) + (zext(m3)*-2) + (zext(m4)*-1); @else m1:1 = (rs1[0,8] != rs2[0,8]); m2:1 = (rs1[8,8] != rs2[8,8]); m3:1 = (rs1[16,8] != rs2[16,8]); m4:1 = (rs1[24,8] != rs2[24,8]); m5:1 = (rs1[32,8] != rs2[32,8]); m6:1 = (rs1[40,8] != rs2[40,8]); m7:1 = (rs1[48,8] != rs2[48,8]); m8:1 = (rs1[56,8] != rs2[56,8]); rd = -1; if (m8) goto inst_next; rd = -2; if (m7) goto inst_next; rd = -3; if (m6) goto inst_next; rd = -4; if (m5) goto inst_next; rd = -5; if (m4) goto inst_next; rd = -6; if (m3) goto inst_next; rd = -7; if (m2) goto inst_next; rd = -8; if (m1) goto inst_next; rd = 0; # choosery method # rd = 0 + (zext(m1)*-8) + (zext(m2)*-7) + (zext(m3)*-6) + (zext(m4)*-5) + (zext(m5)*-4) + (zext(m6)*-3) + (zext(m7)*-2) + (zext(m8)*-1); @endif } imm11_exec: val is cop1212 & cop1011 & cop0909 & cop0808 & cop0506 & cop0404 & cop0303 & cop0202 [ val = (cop0808<<11)|(cop1212<<10)|(cop0303<<9)|(cop0909<<8)|(cop0506<<6)|(cop0202<<5)|(cop1011<<3)|(cop0404<<2); ] { export *[ExecTable]:2 val; } # # Code Dense (CoDense) extension #100 imm[10|4:3|8] imm[11] 0 imm[7:6|2|9|5] 00 :exec.it imm11_exec is ecdv=0 & cop1315=4 & imm11_exec & cop0707=0 & cop0001=0 { ExecRetAddr = inst_next; goto imm11_exec; } :ex9.it imm11_exec is ecdv=0 & cop1315=4 & imm11_exec & cop0708=0 & cop0001=0 { ExecRetAddr = inst_next; goto imm11_exec; } # # alternate version of EXEC.IT when mmsc_cfb.ECDV=1 # imm11_nexec: val is cop1011 & cop0909 & cop0808 & cop0707 & cop0506 & cop0404 & cop0303 & cop0202 [ val = (cop0808<<11)|(cop0707<<10)|(cop0303<<9)|(cop0909<<8)|(cop0506<<6)|(cop0202<<5)|(cop1011<<3)|(cop0404<<2); ] { export *[ExecTable]:2 val; } # 100 1 imm[4:3|8] imm[11] imm[10] imm[7:6|2|9|5] 00 :nexec.it imm11_nexec is ecdv=1 & cop1315=4 & cop1212=1 & imm11_nexec & cop0001=0 { ExecRetAddr = inst_next; goto imm11_nexec; } # # INT4 vector load extension # define pcodeop vln8; :vln8.v vd,(rs1)^vm is vd & rs1 & op2631=0b000001 & vm & op2024=0b00010 & op1214=0b100 & $(CUSTOM2) { # TODO load 32 4bit values, possibly sext by vm into 32 8-bit vector registers val:$(VLEN) = *[ram]:$(VLEN) rs1; vd = vln8(val); build vm; } :vlnu8.v vd,(rs1)^vm is vd & rs1 & op2631=0b000001 & vm & op2024=0b00011 & op1214=0b100 & $(CUSTOM2) { # TODO load 32 4bit values, possibly zext by vm into 32 8-bit vector registers val:$(VLEN) = *[ram]:$(VLEN) rs1; vd = vln8(val); build vm; } # # bfloat16 conversion extension # define pcodeop fcvt.s.bf16; :fcvt.s.bf16 frd,frs2 is frd & frs2 & op2531=0 & op1519=0b00010 & op1214=0b100 & $(CUSTOM2) { frd = fcvt.s.bf16(frs2); } define pcodeop fcvt.bf16.s; :fcvt.bf16.s frd,frs2 is frd & frs2 & op2531=0 & op1519=0b00011 & op1214=0b100 & $(CUSTOM2) { frd = fcvt.bf16.s(frs2); } # # Vector BFloat16 conversion extension # define pcodeop vfwcvt.s.bf16; :vfwcvt.s.bf16 vd,vs2 is vd & vs2 & op2631=0b000000 & op1519=0b00000 & op1214=0b100 & $(CUSTOM2) { vd = vfwcvt.s.bf16(vs2); } define pcodeop vfncvt.bf16.s; :vfncvt.bf16.s vd,vs2 is vd & vs2 & op2631=0b000000 & op1519=0b00001 & op1214=0b100 & $(CUSTOM2) { vd = vfncvt.bf16.s(vs2); } define pcodeop vfpmadt.vf; :vfpmadt.vf vd,rs1,vs2^vm is vd & rs1 & vs2 & vm & op2631=0b000010 & op1214=0b100 & $(CUSTOM2) { vd = vfpmadt.vf(rs1,vs2); build vm; } define pcodeop vfpmadb.vf; :vfpmadb.vf vd,rs1,vs2^vm is vd & rs1 & vs2 & vm & op2631=0b000011 & op1214=0b100 & $(CUSTOM2) { vd = vfpmadb.vf(rs1,vs2); build vm; } define pcodeop vd4dots.vv; :vd4dots.vv vd,vs1,vs2^vm is vd & vs1 & vs2 & vm & op2631=0b000100 & op1214=0b100 & $(CUSTOM2) { vd = vd4dots.vv(vs1,vs2); build vm; } define pcodeop vd4dotu.vv; :vd4dotu.vv vd,vs1,vs2^vm is vd & vs1 & vs2 & vm & op2631=0b000111 & op1214=0b100 & $(CUSTOM2) { vd = vd4dotu.vv(vs1,vs2); build vm; } define pcodeop vd4dotsu.vv; :vd4dotsu.vv vd,vs1,vs2^vm is vd & vs1 & vs2 & vm & op2631=0b000101 & op1214=0b100 & $(CUSTOM2) { vd = vd4dotsu.vv(vs1,vs2); build vm; } define pcodeop vle4.v; :vle4.v vd,(rs1) is vd & rs1 & op2631=0b000001 & op2525=1 & op2024=0b00000 & op1214=0b100 & $(CUSTOM2) { val:$(VLEN) = *[ram]:$(VLEN) rs1; vd = vle4.v(val); } define pcodeop vfwcvt.f.n.v; :vfwcvt.f.n.v vd,vs2^vm is vd & vs2 & op2631=0b000000 & vm & op1519=0b00100 & op1214=0b100 & $(CUSTOM2) { vd = vfwcvt.f.n.v(vs2); build vm; } define pcodeop vfwcvt.f.nu.v; :vfwcvt.f.nu.v vd,vs2^vm is vd & vs2 & op2631=0b000000 & vm & op1519=0b00101 & op1214=0b100 & $(CUSTOM2) { vd = vfwcvt.f.nu.v(vs2); build vm; } define pcodeop vfwcvt.f.b.v; :vfwcvt.f.b.v vd,vs2^vm is vd & vs2 & op2631=0b000000 & vm & op1519=0b00110 & op1214=0b100 & $(CUSTOM2) { vd = vfwcvt.f.b.v(vs2); build vm; } define pcodeop vfwcvt.f.bu.v; :vfwcvt.f.bu.v vd,vs2^vm is vd & vs2 & op2631=0b000000 & vm & op1519=0b00111 & op1214=0b100 & $(CUSTOM2) { vd = vfwcvt.f.bu.v(vs2); build vm; } ================================================ FILE: pypcode/processors/RISCV/data/languages/andestar_v5.ldefs ================================================ AndeStar v5 RISC-V based 32 little default ================================================ FILE: pypcode/processors/RISCV/data/languages/andestar_v5.slaspec ================================================ define endian=little; @define XLEN 4 @define XLEN2 8 @define FLEN 8 @define CONTEXTLEN 8 @define ADDRSIZE "32" @define FPSIZE "64" @include "riscv.reg.sinc" define context CONTEXT isExecInstr=(32,32) phase=(33,33) ecdv=(34,34) ; @include "riscv.table.sinc" # artificial return register define register offset=0x6000 size=4 [ ExecRetAddr ]; Dest: is epsilon { export *[ram]:1 ExecRetAddr; } :^instruction is phase=0 & isExecInstr=1 & instruction [ phase=1; ] { build instruction; local dest:$(XLEN) = ExecRetAddr; goto [dest]; } :^instruction is phase=0 & isExecInstr=0 & instruction [ phase=1; ] { build instruction; } with : phase=1 { @include "riscv.instr.sinc" @include "andestar_v5.instr.sinc" } ================================================ FILE: pypcode/processors/RISCV/data/languages/old/riscv_deprecated.ldefs ================================================ RISC-V 64 little base RISC-V 64 little base compressed RISC-V 64 little general purpose RISC-V 64 little general purpose compressed RISC-V 32 little base RISC-V 32 little base compressed RISC-V 32 little base compressed RISC-V 32 little general purpose RISC-V 32 little general purpose compressed ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.csr.sinc ================================================ # RV32/RV64 Zicsr Standard Extension # csrrc d,E,s 00003073 0000707f SIMPLE (0, 0) :csrc csr,rs1 is rs1 & csr & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x3 & op0711=0 { local tmprs1:$(XLEN) = rs1; local oldcsr:$(XLEN) = csr:$(XLEN); local newcsr:$(XLEN) = oldcsr & ~tmprs1; csr = newcsr; } # csrrc d,E,s 00003073 0000707f SIMPLE (0, 0) :csrrc rdDst,csr,rs1 is rs1 & csr & rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x3 & op0711 { local tmprs1:$(XLEN) = rs1; local oldcsr:$(XLEN) = csr:$(XLEN); local newcsr:$(XLEN) = oldcsr & ~tmprs1; csr = newcsr; rdDst = oldcsr; } # csrrci d,E,Z 00007073 0000707f SIMPLE (0, 0) :csrci csr,op1519 is op1519 & op0711=0 & csr & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x7 { local oldcsr:$(XLEN) = csr:$(XLEN); local tmp:$(XLEN) = op1519; csr = oldcsr & ~tmp; } # csrrci d,E,Z 00007073 0000707f SIMPLE (0, 0) :csrrci rdDst,csr,op1519 is op1519 & csr & rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x7 { local oldcsr:$(XLEN) = csr:$(XLEN); local tmp:$(XLEN) = op1519; csr = oldcsr & ~tmp; rdDst = oldcsr; } # csrrs d,E,s 00002073 0000707f SIMPLE (0, 0) :csrr rdDst,csr is csr & rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1519=0 & op0711 { rdDst = csr:$(XLEN); } # csrrs d,E,s 00002073 0000707f SIMPLE (0, 0) :csrs csr,rs1 is rs1 & csr & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1519 & op0711=0 { local oldcsr:$(XLEN) = csr:$(XLEN); csr = oldcsr | rs1; } # csrrs d,E,s 00002073 0000707f SIMPLE (0, 0) :csrrs rdDst,csr,rs1 is rs1 & csr & rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1519 & op0711 { local oldcsr:$(XLEN) = csr:$(XLEN); csr = oldcsr | rs1; rdDst = oldcsr; } # csrrsi d,E,Z 00006073 0000707f SIMPLE (0, 0) :csrsi csr,op1519 is op1519 & csr & op0711=0 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x6 { local oldcsr:$(XLEN) = csr:$(XLEN); local tmp:$(XLEN) = op1519; csr = oldcsr | tmp; } # csrrsi d,E,Z 00006073 0000707f SIMPLE (0, 0) :csrrsi rdDst,csr,op1519 is op1519 & csr & rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x6 & op0711 { local oldcsr:$(XLEN) = csr:$(XLEN); local tmp:$(XLEN) = op1519; csr = oldcsr | tmp; rdDst = oldcsr; } # csrw d,E,s 00001073 0000707f SIMPLE (0, 0) :csrw csr,rs1 is rs1 & csr & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x1 & r0711=0 { csr = rs1; } # csrrw d,E,s 00001073 0000707f SIMPLE (0, 0) :csrrw rdDst,csr,rs1 is rs1 & csr & rdDst & r0711 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x1 { local tmprs1:$(XLEN) = rs1; local oldcsr:$(XLEN) = csr:$(XLEN); csr = tmprs1; rdDst = oldcsr; } # csrrwi d,E,Z 00005073 0000707f SIMPLE (0, 0) :csrwi csr,op1519 is op1519 & csr & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x5 & r0711=0 { local val:$(XLEN) = op1519; csr = val; } # csrrwi d,E,Z 00005073 0000707f SIMPLE (0, 0) :csrrwi rdDst,csr,op1519 is op1519 & csr & rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x5 & r0711 { local oldcsr:$(XLEN) = csr:$(XLEN); local val:$(XLEN) = op1519; csr = val; rdDst = oldcsr; } # frcsr d 00302073 fffff07f SIMPLE (0, 0) :frcsr rdDst is rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1531=0x60 { rdDst = fcsr; } # frflags d 00102073 fffff07f SIMPLE (0, 0) :frflags rdDst is rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1531=0x20 { rdDst = zext(fflags[0,5]); } # frrm d 00202073 fffff07f SIMPLE (0, 0) :frrm rdDst is rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1531=0x40 { rdDst = frm; } # fscsr s 00301073 fff07fff SIMPLE (0, 0) :fscsr rs1 is rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x1 & op0711=0x0 & op2031=0x3 { zero = fcsr; fcsr = rs1; } # fscsr d,s 00301073 fff0707f SIMPLE (0, 0) :fscsr rdDst,rs1 is rs1 & rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x1 & op2031=0x3 { rdDst = fcsr; fcsr = rs1; } # fsflags s 00101073 fff07fff SIMPLE (0, 0) :fsflags rs1 is rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x1 & op0711=0x0 & op2031=0x1 { zero = zext(fflags[0,5]); fflags[0,5] = rs1[0,5]; } # fsflags d,s 00101073 fff0707f SIMPLE (0, 0) :fsflags rdDst,rs1 is rs1 & rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x1 & op2031=0x1 { rdDst = zext(fflags[0,5]); fflags[0,5] = rs1[0,5]; } # fsflagsi d,Z 00105073 fff0707f SIMPLE (0, 0) :fsflagsi rdDst,op1519 is op1519 & rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x5 & op2031=0x1 { rdDst = zext(fflags[0,5]); local tmp:1 = op1519:1; fflags[0,5] = tmp[0,5]; } # fsflagsi Z 00105073 fff07fff SIMPLE (0, 0) :fsflagsi op1519 is op1519 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x5 & op0711=0x0 & op2031=0x1 { zero = zext(fflags[0,5]); local tmp:1 = op1519:1; fflags[0,5] = tmp[0,5]; } # fsrm s 00201073 fff07fff SIMPLE (0, 0) :fsrm rs1 is rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x1 & op0711=0x0 & op2031=0x2 { zero = zext(frm[0,3]); frm[0,3] = rs1[0,3]; } # fsrm d,s 00201073 fff0707f SIMPLE (0, 0) :fsrm rdDst,rs1 is rs1 & rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x1 & op2031=0x2 { rdDst = zext(frm[0,3]); frm[0,3] = rs1[0,3]; } # fsrmi d,Z 00205073 fff0707f SIMPLE (0, 0) :fsrmi rdDst,op1519 is op1519 & rdDst & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x5 & op2031=0x2 { rdDst = zext(frm[0,3]); local tmp:1 = op1519:1; frm[0,3] = tmp[0,3]; } # fsrmi Z 00205073 fff07fff SIMPLE (0, 0) :fsrmi op1519 is op1519 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x5 & op0711=0x0 & op2031=0x2 { zero = zext(frm[0,3]); local tmp:1 = op1519:1; frm[0,3] = tmp[0,3]; } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.custom.sinc ================================================ # custom define pcodeop custom0; define pcodeop custom0.rs1; define pcodeop custom0.rs1.rs2; define pcodeop custom0.rd; define pcodeop custom0.rd.rs1; define pcodeop custom0.rd.rs1.rs2; define pcodeop custom1; define pcodeop custom1.rs1; define pcodeop custom1.rs1.rs2; define pcodeop custom1.rd; define pcodeop custom1.rd.rs1; define pcodeop custom1.rd.rs1.rs2; define pcodeop custom2; define pcodeop custom2.rs1; define pcodeop custom2.rs1.rs2; define pcodeop custom2.rd; define pcodeop custom2.rd.rs1; define pcodeop custom2.rd.rs1.rs2; define pcodeop custom3; define pcodeop custom3.rs1; define pcodeop custom3.rs1.rs2; define pcodeop custom3.rd; define pcodeop custom3.rd.rs1; define pcodeop custom3.rd.rs1.rs2; :custom0 is op0001=0x3 & op0204=0x2 & op0506=0x0 & (op1214=0x0 | op1214=0x1 | op1214=0x5) { custom0(); } :custom0.rs1 rs1 is op0001=0x3 & op0204=0x2 & op0506=0x0 & op1214=0x2 & rs1 { custom0.rs1(rs1); } :custom0.rs1.rs2 rs1,rs2 is op0001=0x3 & op0204=0x2 & op0506=0x0 & op1214=0x3 & rs1 & rs2 { custom0.rs1.rs2(rs1, rs2); } :custom0.rd rd is op0001=0x3 & op0204=0x2 & op0506=0x0 & op1214=0x4 & rd { rd = custom0.rd(); } :custom0.rd.rs1 rd,rs1 is op0001=0x3 & op0204=0x2 & op0506=0x0 & op1214=0x6 & rd & rs1 { rd = custom0.rd.rs1(rs1); } :custom0.rd.rs1.rs2 rd,rs1,rs2 is op0001=0x3 & op0204=0x2 & op0506=0x0 & op1214=0x7 & rd & rs1 & rs2 { rd = custom0.rd.rs1.rs2(rs1, rs2); } :custom1 is op0001=0x3 & op0204=0x2 & op0506=0x1 & (op1214=0x0 | op1214=0x1 | op1214=0x5) { custom1(); } :custom1.rs1 rs1 is op0001=0x3 & op0204=0x2 & op0506=0x1 & op1214=0x2 & rs1 { custom1.rs1(rs1); } :custom1.rs1.rs2 rs1,rs2 is op0001=0x3 & op0204=0x2 & op0506=0x1 & op1214=0x3 & rs1 & rs2 { custom1.rs1.rs2(rs1, rs2); } :custom1.rd rd is op0001=0x3 & op0204=0x2 & op0506=0x1 & op1214=0x4 & rd { rd = custom1.rd(); } :custom1.rd.rs1 rd,rs1 is op0001=0x3 & op0204=0x2 & op0506=0x1 & op1214=0x6 & rd & rs1 { rd = custom1.rd.rs1(rs1); } :custom1.rd.rs1.rs2 rd,rs1,rs2 is op0001=0x3 & op0204=0x2 & op0506=0x1 & op1214=0x7 & rd & rs1 & rs2 { rd = custom1.rd.rs1.rs2(rs1, rs2); } #TODO handle RV128 for custom-2/custom-3 :custom2 is op0001=0x3 & op0204=0x6 & op0506=0x2 & (op1214=0x0 | op1214=0x1 | op1214=0x5) { custom2(); } :custom2.rs1 rs1 is op0001=0x3 & op0204=0x6 & op0506=0x2 & op1214=0x2 & rs1 { custom2.rs1(rs1); } :custom2.rs1.rs2 rs1,rs2 is op0001=0x3 & op0204=0x6 & op0506=0x2 & op1214=0x3 & rs1 & rs2 { custom2.rs1.rs2(rs1, rs2); } :custom2.rd rd is op0001=0x3 & op0204=0x6 & op0506=0x2 & op1214=0x4 & rd { rd = custom2.rd(); } :custom2.rd.rs1 rd,rs1 is op0001=0x3 & op0204=0x6 & op0506=0x2 & op1214=0x6 & rd & rs1 { rd = custom2.rd.rs1(rs1); } :custom2.rd.rs1.rs2 rd,rs1,rs2 is op0001=0x3 & op0204=0x6 & op0506=0x2 & op1214=0x7 & rd & rs1 & rs2 { rd = custom2.rd.rs1.rs2(rs1, rs2); } :custom3 is op0001=0x3 & op0204=0x6 & op0506=0x3 & (op1214=0x0 | op1214=0x1 | op1214=0x5) { custom3(); } :custom3.rs1 rs1 is op0001=0x3 & op0204=0x6 & op0506=0x3 & op1214=0x2 & rs1 { custom3.rs1(rs1); } :custom3.rs1.rs2 rs1,rs2 is op0001=0x3 & op0204=0x6 & op0506=0x3 & op1214=0x3 & rs1 & rs2 { custom3.rs1.rs2(rs1, rs2); } :custom3.rd rd is op0001=0x3 & op0204=0x6 & op0506=0x3 & op1214=0x4 & rd { rd = custom3.rd(); } :custom3.rd.rs1 rd,rs1 is op0001=0x3 & op0204=0x6 & op0506=0x3 & op1214=0x6 & rd & rs1 { rd = custom3.rd.rs1(rs1); } :custom3.rd.rs1.rs2 rd,rs1,rs2 is op0001=0x3 & op0204=0x6 & op0506=0x3 & op1214=0x7 & rd & rs1 & rs2 { rd = custom3.rd.rs1.rs2(rs1, rs2); } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.ilp32d.slaspec ================================================ define endian=little; @define XLEN 4 @define XLEN2 8 @define FLEN 8 @define CONTEXTLEN 4 @define ADDRSIZE "32" @define FPSIZE "64" @include "riscv.reg.sinc" @include "riscv.table.sinc" @include "riscv.instr.sinc" # include placeholder decode for *some* custom instructions @include "riscv.custom.sinc" ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.instr.sinc ================================================ # This is just a top level to include the standards @if ADDRSIZE == "32" || ADDRSIZE == "64" || ADDRSIZE == "128" @include "riscv.rv32i.sinc" @include "riscv.rv32a.sinc" @include "riscv.rv32m.sinc" @include "riscv.rv32b.sinc" @include "riscv.rv32p.sinc" @include "riscv.rv32k.sinc" @if FPSIZE == "32" || FPSIZE == "64" || FPSIZE == "128" @include "riscv.rv32f.sinc" @endif @if FPSIZE == "64" || FPSIZE == "128" @include "riscv.rv32d.sinc" @endif @if FPSIZE == "128" @include "riscv.rv32q.sinc" @endif @endif @if ADDRSIZE == "64" || ADDRSIZE == "128" @include "riscv.rv64i.sinc" @include "riscv.rv64a.sinc" @include "riscv.rv64m.sinc" @include "riscv.rv64b.sinc" @include "riscv.rv64p.sinc" @if FPSIZE == "32" || FPSIZE == "64" || FPSIZE == "128" @include "riscv.rv64f.sinc" @endif @if FPSIZE == "64" || FPSIZE == "128" @include "riscv.rv64d.sinc" @endif @if FPSIZE == "128" @include "riscv.rv64q.sinc" @endif @endif @include "riscv.csr.sinc" @include "riscv.priv.sinc" @include "riscv.rvc.sinc" @include "riscv.rvv.sinc" @include "riscv.zi.sinc" # todos that may be possible, mostly just artifacts from my # script to generate the initial SELIGH #TODO ALIAS # add d,CU,CV 00009002 0000f003 ALIAS (0, 0) #:add crd,crs1,crs2 is crd & crs2 & crs1 & cop0001=0x2 & cop1315=0x4 & cop1212=0x1 #{ #} #TODO ALIAS # add d,CV,CU 00009002 0000f003 ALIAS (0, 0) #:add crd,crs2,crs1 is crd & crs1 & crs2 & cop0001=0x2 & cop1315=0x4 & cop1212=0x1 #{ #} #TODO ALIAS # add d,CU,Co 00000001 0000e003 ALIAS (0, 0) #:add crd,crs1,cimmI is crd & cimmI & crs1 & cop0001=0x1 & cop1315=0x0 #{ #} #TODO ALIAS # add Ct,Cc,CK 00000000 0000e003 ALIAS (0, 0) #:add cr0204s,sp,caddi4spnimm is caddi4spnimm & cr0204s & sp & cop0001=0x0 & cop1315=0x0 #{ #} #TODO ALIAS # add Cc,Cc,CL 00006101 0000ef83 ALIAS (0, 0) #TODO sp,sp,caddi16spimm caddi16spimm & sp & cop0001=0x1 & cop1315=0x3 #:add sp,sp,caddi16spimm is caddi16spimm & sp & cop0001=0x1 & cop1315=0x3 #{ #} #TODO ALIAS # add d,s,j 00000013 0000707f ALIAS (0, 0) #:add rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x0 #{ #} #TODO ALIAS # addi Ct,Cc,CK 00000000 0000e003 ALIAS (0, 0) #:addi cr0204s,sp,caddi4spnimm is caddi4spnimm & cr0204s & sp & cop0001=0x0 & cop1315=0x0 #{ #} #TODO ALIAS # addi d,CU,Cj 00000001 0000e003 ALIAS (0, 0) #:addi crd,crs1,cimmI is crd & cimmI & crs1 & cop0001=0x1 & cop1315=0x0 #{ #} #TODO ALIAS # addi d,CU,z 00000001 0000f07f ALIAS (0, 0) #:addi crd,crs1,zero is crd & zero & crs1 & cop0001=0x1 & cop1315=0x0 & cop0206=0x0 & cop1212=0x0 #{ #} #TODO ALIAS # addi Cc,Cc,CL 00006101 0000ef83 ALIAS (0, 0) #TODO sp,sp,caddi16spimm caddi16spimm & sp & cop0001=0x1 & cop1315=0x3 #:addi sp,sp,caddi16spimm is caddi16spimm & sp & cop0001=0x1 & cop1315=0x3 #{ #} #@if defined(RISCV64I) #TODO ALIAS # addiw d,CU,Co 00002001 0000e003 ALIAS (64, 0) #TODO 32 64 #:addiw crd,crs1,cimmI is crd & cimmI & crs1 & cop0001=0x1 & cop1315=0x1 #{ #} #@endif #@if defined(RISCV64I) #TODO ALIAS # addw Cs,Cw,Ct 00009c21 0000fc63 ALIAS (64, 0) #:addw cr0709s,cd0709s,cr0204s is cd0709s & cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x1 & cop1012=0x7 #{ #} #@endif #@if defined(RISCV64I) #TODO ALIAS # addw Cs,Ct,Cw 00009c21 0000fc63 ALIAS (64, 0) #:addw cr0709s,cr0204s,cd0709s is cd0709s & cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x1 & cop1012=0x7 #{ #} #@endif #@if defined(RISCV64I) #TODO ALIAS # addw d,CU,Co 00002001 0000e003 ALIAS (64, 0) #TODO crd,crs1,cimmI crd & cimmI & crs1 & cop0001=0x1 & cop1315=0x1 #:addw crd,crs1,cimmI is crd & cimmI & crs1 & cop0001=0x1 & cop1315=0x1 #{ #} #@endif #@if defined(RISCV64I) #TODO ALIAS # addw d,s,j 0000001b 0000707f ALIAS (64, 0) #:addw rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x6 & op0506=0x0 & funct3=0x0 #{ #} #@endif #TODO ALIAS # and Cs,Cw,Ct 00008c61 0000fc63 ALIAS (0, 0) #:and cr0709s,cd0709s,cr0204s is cd0709s & cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x3 & cop1012=0x3 #{ #} #TODO ALIAS # and Cs,Ct,Cw 00008c61 0000fc63 ALIAS (0, 0) #:and cr0709s,cr0204s,cd0709s is cd0709s & cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x3 & cop1012=0x3 #{ #} #TODO ALIAS # and Cs,Cw,Co 00008801 0000ec03 ALIAS (0, 0) #:and cr0709s,cd0709s,cimmI is cd0709s & cimmI & cr0709s & cop0001=0x1 & cop1315=0x4 & cop1011=0x2 #{ #} #TODO ALIAS # and d,s,j 00007013 0000707f ALIAS (0, 0) #:and rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x7 #{ #} #TODO ALIAS # andi Cs,Cw,Co 00008801 0000ec03 ALIAS (0, 0) #:andi cr0709s,cd0709s,cimmI is cd0709s & cimmI & cr0709s & cop0001=0x1 & cop1315=0x4 & cop1011=0x2 #{ #} #TODO ALIAS # beq Cs,Cz,Cp 0000c001 0000e003 CONDBRANCH|ALIAS (0, 0) #:beq cr0709s,zero,cbimm is cop0206=0 & cbimm & zero & cr0709s & cop0001=0x1 & cop1315=0x6 #{ #} #TODO ALIAS # beqz Cs,Cp 0000c001 0000e003 CONDBRANCH|ALIAS (0, 0) #:beqz cr0709s,cbimm is cbimm & cr0709s & cop0001=0x1 & cop1315=0x6 #{ #} #TODO ALIAS # beqz s,p 00000063 01f0707f CONDBRANCH|ALIAS (0, 0) #:beqz rs1,immSB is immSB & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x0 & op2024=0x0 #{ #} #TODO ALIAS # bgez s,p 00005063 01f0707f CONDBRANCH|ALIAS (0, 0) #:bgez rs1,immSB is immSB & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x5 & op2024=0x0 #{ #} #TODO ALIAS # bgt t,s,p 00004063 0000707f CONDBRANCH|ALIAS (0, 0) #:bgt rs2,rs1,immSB is immSB & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x4 #{ #} #TODO ALIAS # bgtu t,s,p 00006063 0000707f CONDBRANCH|ALIAS (0, 0) #:bgtu rs2,rs1,immSB is immSB & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x6 #{ #} #TODO ALIAS # bgtz t,p 00004063 000ff07f CONDBRANCH|ALIAS (0, 0) #:bgtz rs2,immSB is immSB & rs2 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x4 & op1519=0x0 #{ #} #TODO ALIAS # ble t,s,p 00005063 0000707f CONDBRANCH|ALIAS (0, 0) #:ble rs2,rs1,immSB is immSB & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x5 #{ #} #TODO ALIAS # bleu t,s,p 00007063 0000707f CONDBRANCH|ALIAS (0, 0) #:bleu rs2,rs1,immSB is immSB & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x7 #{ #} #TODO ALIAS # blez t,p 00005063 000ff07f CONDBRANCH|ALIAS (0, 0) #:blez rs2,immSB is immSB & rs2 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x5 & op1519=0x0 #{ #} #TODO ALIAS # bltz s,p 00004063 01f0707f CONDBRANCH|ALIAS (0, 0) #:bltz rs1,immSB is immSB & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x4 & op2024=0x0 #{ #} #TODO ALIAS # bne Cs,Cz,Cp 0000e001 0000e003 CONDBRANCH|ALIAS (0, 0) #:bne cr0709s,zero,cbimm is cop0206=0 & cbimm & zero & cr0709s & cop0001=0x1 & cop1315=0x7 #{ #} #TODO ALIAS # bnez Cs,Cp 0000e001 0000e003 CONDBRANCH|ALIAS (0, 0) #:bnez cr0709s,cbimm is cbimm & cr0709s & cop0001=0x1 & cop1315=0x7 #{ #} #TODO ALIAS # bnez s,p 00001063 01f0707f CONDBRANCH|ALIAS (0, 0) #:bnez rs1,immSB is immSB & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x1 & op2024=0x0 #{ #} #@if defined(RISCV128I) #TODO 128 #:c.lq #@endif #TODO ALIAS # c.nop 00000001 0000ffff ALIAS (0, 0) #:c.nop is cop0001=0x1 & cop1315=0x0 & cop0212=0x0 #{ #} #TODO ALIAS # c.nop Cj 00000001 0000ef83 ALIAS (0, 0) #:c.nop cimmI is cimmI & cop0001=0x1 & cop1315=0x0 & cop0711=0x0 #{ #} #TODO MACRO # call d,c 00030000 00000015 MACRO (0, 64) #TODO MACRO # call c 00008080 00000015 MACRO (0, 64) #TODO ALIAS # csrc E,s 00003073 00007fff ALIAS (0, 0) #:csrc csr,rs1 is csr & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x3 & op0711=0x0 #{ #} #TODO ALIAS # csrc E,Z 00007073 00007fff ALIAS (0, 0) #:csrc csr,rs1 is csr & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x7 & op0711=0x0 #{ #} #TODO ALIAS # csrci E,Z 00007073 00007fff ALIAS (0, 0) #:csrci csr,rs1 is csr & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x7 & op0711=0x0 #{ #} #TODO ALIAS # csrr d,E 00002073 000ff07f ALIAS (0, 0) #:csrr rd,csr is csr & rd & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1519=0x0 #{ #} #TODO ALIAS # csrrc d,E,Z 00007073 0000707f ALIAS (0, 0) #:csrrc rd,csr,rs1 is rs1 & csr & rd & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x7 #{ #} #TODO ALIAS # csrrs d,E,Z 00006073 0000707f ALIAS (0, 0) #:csrrs rd,csr,rs1 is rs1 & csr & rd & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x6 #{ #} #TODO ALIAS # csrrw d,E,Z 00005073 0000707f ALIAS (0, 0) #:csrrw rd,csr,rs1 is rs1 & csr & rd & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x5 #{ #} #TODO ALIAS # csrs E,s 00002073 00007fff ALIAS (0, 0) #:csrs csr,rs1 is csr & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op0711=0x0 #{ #} #TODO ALIAS # csrs E,Z 00006073 00007fff ALIAS (0, 0) #:csrs csr,rs1 is csr & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x6 & op0711=0x0 #{ #} #TODO ALIAS # csrsi E,Z 00006073 00007fff ALIAS (0, 0) #:csrsi csr,rs1 is csr & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x6 & op0711=0x0 #{ #} #TODO ALIAS # csrw E,s 00001073 00007fff ALIAS (0, 0) #:csrw csr,rs1 is csr & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x1 & op0711=0x0 #{ #} #TODO ALIAS # csrw E,Z 00005073 00007fff ALIAS (0, 0) #:csrw csr,rs1 is csr & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x5 & op0711=0x0 #{ #} #TODO ALIAS # csrwi E,Z 00005073 00007fff ALIAS (0, 0) #:csrwi csr,rs1 is csr & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x5 & op0711=0x0 #{ #} #TODO ALIAS # ebreak 00009002 0000ffff ALIAS (0, 0) #:ebreak is cop0001=0x2 & cop1315=0x4 & cop0212=0x400 #{ #} #TODO ALIAS # fabs.q D,U 26002053 fe00707f ALIAS (0, 0) #:fabs.q fr0711,fr1519 is fr1519 & fr0711 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x2 & funct7=0x13 & op2024=0x0 #{ #} #TODO ALIAS # fence 0ff0000f ffffffff ALIAS (0, 0) #:fence is op0001=0x3 & op0204=0x3 & op0506=0x0 & funct3=0x0 & fm=0x0 & op0711=0x0 & op1527=0x1fe0 #{ #} #TODO ALIAS # fence.tso 8330000f ffffffff ALIAS (0, 0) #:fence.tso is op0001=0x3 & op0204=0x3 & op0506=0x0 & funct3=0x0 & fm=0x8 & op0711=0x0 & op1527=0x660 #{ #} #TODO SEE fle.d # fge.d d,T,S a2000053 fe00707f SIMPLE (0, 0) # :fge.d rd,fr2024,fr1519 is fr2024 & fr1519 & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x51 # { # } #TODO SEE fle.q # fge.q d,T,S a6000053 fe00707f SIMPLE (0, 0) # :fge.q rd,fr2024,fr1519 is fr2024 & fr1519 & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x53 # { # } #TODO SEE fle.s # fge.s d,T,S a0000053 fe00707f SIMPLE (0, 0) # :fge.s rd,fr2024,fr1519 is fr2024 & fr1519 & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x50 # { # } #TODO SEE flt.d # fgt.d d,T,S a2001053 fe00707f SIMPLE (0, 0) # :fgt.d rd,fr2024,fr1519 is fr2024 & fr1519 & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x51 # { # } #TODO SEE flt.q # fgt.q d,T,S a6001053 fe00707f SIMPLE (0, 0) # :fgt.q rd,fr2024,fr1519 is fr2024 & fr1519 & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x53 # { # } #TODO SEE flt.s # fgt.s d,T,S a0001053 fe00707f SIMPLE (0, 0) # :fgt.s rd,fr2024,fr1519 is fr2024 & fr1519 & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x50 # { # } #TODO ALIAS # fld D,Cn(Cc) 00002002 0000e003 QWORD|DREF|ALIAS (0, 8) #TODO cfr0711,cldspimm(sp) cldspimm & cfr0711 & sp & cop0001=0x2 & cop1315=0x1 #:fld cfr0711,cldspimm(sp) is cldspimm & cfr0711 & sp & cop0001=0x2 & cop1315=0x1 #{ #} #TODO ALIAS # fld CD,Cl(Cs) 00002000 0000e003 QWORD|DREF|ALIAS (0, 8) #TODO 32 64 #:fld cfr0204s,cldimm(cr0709s) is cfr0204s & cr0709s & cop0001=0x0 & cop1315=0x1 & cldimm #{ #} #TODO MACRO # fld D,A,s 00000000 00000010 MACRO (0, 64) #TODO MACRO # flq D,A,s 00000000 00000011 MACRO (0, 64) #@if defined(RISCV32I) #TODO ALIAS # flw D,Cm(Cc) 00006002 0000e003 DWORD|DREF|ALIAD (32, 4) #TODO cfr0711,clwspimm(sp) cfr0711 & clwspimm & sp & cop0001=0x2 & cop1315=0x3 #:flw cfr0711,clwspimm(sp) is cfr0711 & clwspimm & sp & cop0001=0x2 & cop1315=0x3 #{ #} #@endif #@if defined(RISCV32I) #TODO ALIAS # flw CD,Ck(Cs) 00006000 0000e003 DWORD|DREF|ALIAD (32, 4) #:flw cfr0204s,clwimm(cr0709s) is cfr0204s & cr0709s & cop0001=0x0 & cop1315=0x3 & clwimm #{ #} #@endif #TODO MACRO # flw D,A,s 00000000 0000000f MACRO (0, 64) #TODO ALIAS # fmv.q D,U 26000053 fe00707f ALIAS (0, 0) #:fmv.q fr0711,fr1519 is fr1519 & fr0711 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x13 & op2024=0x0 #{ #} #TODO SEE fmv.w.x # fmv.s.x D,s f0000053 fff0707f SIMPLE (0, 0) # :fmv.s.x fr0711,rs1 is fr0711 & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x78 & op2024=0x0 # { # } #TODO SEE fmv.x.w # fmv.x.s d,S e0000053 fff0707f SIMPLE (0, 0) # :fmv.x.s rd,fr1519 is fr1519 & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x70 & op2024=0x0 # { # } #TODO ALIAS # fneg.q D,U 26001053 fe00707f ALIAS (0, 0) #:fneg.q fr0711,fr1519 is fr1519 & fr0711 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x13 & op2024=0x0 #{ #} #TODO SEE frcsr # frsr d 00302073 fffff07f SIMPLE (0, 0) # :frsr rd is rd & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1531=0x60 # { # } #TODO ALIAS # fsd CT,CN(Cc) 0000a002 0000e003 QWORD|DREF|ALIAS (0, 8) #TODO cfr0206,csdspimm(sp) csdspimm & cfr0206 & sp & cop0001=0x2 & cop1315=0x5 #:fsd cfr0206,csdspimm(sp) is csdspimm & cfr0206 & sp & cop0001=0x2 & cop1315=0x5 #{ #} #TODO ALIAS # fsd CD,Cl(Cs) 0000a000 0000e003 QWORD|DREF|ALIAS (0, 8) #TODO 32 64 #:fsd cfr0204s,cldimm(cr0709s) is cfr0204s & cr0709s & cop0001=0x0 & cop1315=0x5 & cldimm #{ #} #TODO MACRO # fsd T,A,s 00000000 00000013 MACRO (0, 64) #TODO MACRO # fsq T,A,s 00000000 00000014 MACRO (0, 64) #TODO SEE fscsr # fssr s 00301073 fff07fff SIMPLE (0, 0) # :fssr rs1 is rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x1 & op0711=0x0 & op2031=0x3 # { # } #TODO SEE fscsr # fssr d,s 00301073 fff0707f SIMPLE (0, 0) # :fssr rd,rs1 is rs1 & rd & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x1 & op2031=0x3 # { # } #@if defined(RISCV32I) #TODO ALIAS # fsw CT,CM(Cc) 0000e002 0000e003 DWORD|DREF|ALIAD (32, 4) #TODO cfr0206,cswspimm(sp) cswspimm & cfr0206 & sp & cop0001=0x2 & cop1315=0x7 #:fsw cfr0206,cswspimm(sp) is cswspimm & cfr0206 & sp & cop0001=0x2 & cop1315=0x7 #{ #} #@endif #@if defined(RISCV32I) #TODO ALIAS # fsw CD,Ck(Cs) 0000e000 0000e003 DWORD|DREF|ALIAD (32, 4) #:fsw cfr0204s,clwimm(cr0709s) is cfr0204s & cr0709s & cop0001=0x0 & cop1315=0x7 & clwimm #{ #} #@endif #TODO MACRO # fsw T,A,s 00000000 00000012 MACRO (0, 64) #TODO ALIAS # j Ca 0000a001 0000e003 BRANCH|ALIAS (0, 0) #:j cjimm is cjimm & cop0001=0x1 & cop1315=0x5 #{ #} #@if defined(RISCV32I) #TODO ALIAS # jal Ca 00002001 0000e003 JSR|ALIAS (32, 0) #:jal cjimm is cjimm & cop0001=0x1 & cop1315=0x1 #{ #} #@endif #TODO ALIAS # jal a 000000ef 00000fff JSR|ALIAS (0, 0) #:jal immUJ is immUJ & op0001=0x3 & op0204=0x3 & op0506=0x3 & op0711=0x1 #{ #} #TODO ALIAS # jalr d 00009002 0000f07f JSR|ALIAS (0, 0) #:jalr crd is crd & cop0001=0x2 & cop1315=0x4 & cop0206=0x0 & cop1212=0x1 #{ #} #TODO ALIAS # jalr s 000000e7 fff07fff JSR|ALIAS (0, 0) #:jalr rs1 is rs1 & op0001=0x3 & op0204=0x1 & op0506=0x3 & funct3=0x0 & op0711=0x1 & op2031=0x0 #{ #} #TODO ALIAS # jalr o(s) 000000e7 00007fff JSR|ALIAS (0, 0) #:jalr immI(rs1) is immI & rs1 & op0001=0x3 & op0204=0x1 & op0506=0x3 & funct3=0x0 & op0711=0x1 #{ #} #TODO ALIAS # jalr s,j 000000e7 00007fff JSR|ALIAS (0, 0) #:jalr rs1,immI is immI & rs1 & op0001=0x3 & op0204=0x1 & op0506=0x3 & funct3=0x0 & op0711=0x1 #{ #} #TODO ALIAS # jalr d,s 00000067 fff0707f JSR|ALIAS (0, 0) #:jalr rd,rs1 is rs1 & rd & op0001=0x3 & op0204=0x1 & op0506=0x3 & funct3=0x0 & op2031=0x0 #{ #} #TODO SEE jalr # jalr d,o(s) 00000067 0000707f JSR (0, 0) # :jalr rd,immI(rs1) is immI & rs1 & rd & op0001=0x3 & op0204=0x1 & op0506=0x3 & funct3=0x0 # { # } #TODO ALIAS # jr d 00008002 0000f07f BRANCH|ALIAS (0, 0) #:jr crd is crd & cop0001=0x2 & cop1315=0x4 & cop0206=0x0 & cop1212=0x0 #{ #} #TODO ALIAS # jr s,j 00000067 00007fff BRANCH|ALIAS (0, 0) #:jr rs1,immI is immI & rs1 & op0001=0x3 & op0204=0x1 & op0506=0x3 & funct3=0x0 & op0711=0x0 #{ #} #TODO MACRO # jump c,s 00000000 00000015 MACRO (0, 64) #TODO MACRO # la d,B 00000000 00000000 MACRO (0, 64) #TODO MACRO # la.tls.gd d,A 00000000 00000002 MACRO (0, 64) #TODO MACRO # la.tls.ie d,A 00000000 00000003 MACRO (0, 64) #TODO MACRO # lb d,A 00000000 00000004 MACRO (0, 64) #TODO MACRO # lbu d,A 00000000 00000005 MACRO (0, 64) #@if defined(RISCV64I) #TODO ALIAS # ld d,Cn(Cc) 00006002 0000e003 QWORD|DREF|ALIAS (64, 8) #TODO crd,cldspimm(sp) crd & cldspimm & sp & cop0001=0x2 & cop1315=0x3 #:ld crd,cldspimm(sp) is crd & cldspimm & sp & cop0001=0x2 & cop1315=0x3 #{ #} #@endif #@if defined(RISCV64I) #TODO ALIAS # ld Ct,Cl(Cs) 00006000 0000e003 QWORD|DREF|ALIAS (64, 8) #TODO 64 128 #:ld cr0204s,cldimm(cr0709s) is cr0709s & cr0204s & cop0001=0x0 & cop1315=0x3 & cldimm #{ #} #@endif #@if defined(RISCV64I) #TODO MACRO # ld d,A 00000000 0000000a MACRO (64, 64) #@endif #TODO MACRO # lh d,A 00000000 00000006 MACRO (0, 64) #TODO MACRO # lhu d,A 00000000 00000007 MACRO (0, 64) #TODO ALIAS # li d,Cv 00006001 0000e003 ALIAS (0, 0) #TODO crd,cbigimm crd & cbigimm & cop0001=0x1 & cop1315=0x3 #:li crd,cbigimm is crd & cbigimm & cop0001=0x1 & cop1315=0x3 #{ #} #TODO ALIAS # li d,Co 00004001 0000e003 ALIAS (0, 0) #:li crd,cimmI is crd & cimmI & cop0001=0x1 & cop1315=0x2 #{ #} #TODO MACRO # li d,I 00000000 00000017 MACRO (0, 64) #TODO MACRO # lla d,B 00000000 00000001 MACRO (0, 64) #TODO ALIAS # lui d,Cu 00006001 0000e003 ALIAS (0, 0) #:lui crd,cbigimm is crd & cbigimm & cop0001=0x1 & cop1315=0x3 #{ #} #TODO ALIAS # lw d,Cm(Cc) 00004002 0000e003 DWORD|DREF|ALIAD (0, 4) #:lw crd,clwspimm(sp) is crd & sp & cop0001=0x2 & cop1315=0x2 & clwspimm #{ #} #TODO ALIAS # lw Ct,Ck(Cs) 00004000 0000e003 DWORD|DREF|ALIAD (0, 4) #:lw cr0204s,clwimm(cr0709s) is cr0709s & cr0204s & cop0001=0x0 & cop1315=0x2 & clwimm #{ #} #TODO MACRO # lw d,A 00000000 00000008 MACRO (0, 64) #@if defined(RISCV64I) #TODO MACRO # lwu d,A 00000000 00000009 MACRO (64, 64) #@endif #TODO ALIAS # move d,CV 00008002 0000f003 ALIAS (0, 0) #TODO crd,crs2 crd & crs2 & cop0001=0x2 & cop1315=0x4 #:move crd,crs2 is crd & crs2 & cop0001=0x2 & cop1315=0x4 & cop1212=0x0 #{ #} #TODO ALIAS # move d,s 00000013 fff0707f ALIAS (0, 0) #:move rd,rs1 is rs1 & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x0 & op2031=0x0 #{ #} #TODO ALIAS # mv d,CV 00008002 0000f003 ALIAS (0, 0) #:mv crd,crs2 is crd & crs2 & cop0001=0x2 & cop1315=0x4 & cop1212=0x0 #{ #} #TODO ALIAS # nop 00000001 0000ffff ALIAS (0, 0) #:nop is cop0001=0x1 & cop1315=0x0 & cop0212=0x0 #{ #} #TODO ALIAS # or Cs,Cw,Ct 00008c41 0000fc63 ALIAS (0, 0) #:or cr0709s,cd0709s,cr0204s is cd0709s & cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x2 & cop1012=0x3 #{ #} #TODO ALIAS # or Cs,Ct,Cw 00008c41 0000fc63 ALIAS (0, 0) #:or cr0709s,cr0204s,cd0709s is cd0709s & cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x2 & cop1012=0x3 #{ #} #TODO ALIAS # or d,s,j 00006013 0000707f ALIAS (0, 0) #:or rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x6 #{ #} #TODO ALIAS # rdcycle d c0002073 fffff07f ALIAS (0, 0) #:rdcycle rd is rd & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1531=0x18000 #{ #} #@if defined(RISCV32I) #TODO ALIAS # rdcycleh d c8002073 fffff07f ALIAS (32, 0) #:rdcycleh rd is rd & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1531=0x19000 #{ #} #@endif #TODO ALIAS # rdinstret d c0202073 fffff07f ALIAS (0, 0) #:rdinstret rd is rd & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1531=0x18040 #{ #} #@if defined(RISCV32I) #TODO ALIAS # rdinstreth d c8202073 fffff07f ALIAS (32, 0) #:rdinstreth rd is rd & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1531=0x19040 #{ #} #@endif #TODO ALIAS # rdtime d c0102073 fffff07f ALIAS (0, 0) #:rdtime rd is rd & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1531=0x18020 #{ #} #@if defined(RISCV32I) #TODO ALIAS # rdtimeh d c8102073 fffff07f ALIAS (32, 0) #:rdtimeh rd is rd & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x2 & op1531=0x19020 #{ #} #@endif #TODO MACRO # sb t,A,s 00000000 0000000b MACRO (0, 64) #@if defined(RISCV64I) #TODO ALIAS # sd CV,CN(Cc) 0000e002 0000e003 QWORD|DREF|ALIAS (64, 8) #TODO crs2,csdspimm(sp) csdspimm & crs2 & sp & cop0001=0x2 & cop1315=0x7 #:sd crs2,csdspimm(sp) is csdspimm & crs2 & sp & cop0001=0x2 & cop1315=0x7 #{ #} #@endif #@if defined(RISCV64I) #TODO ALIAS # sd Ct,Cl(Cs) 0000e000 0000e003 QWORD|DREF|ALIAS (64, 8) #TODO 64 128 #:sd cr0204s,cldimm(cr0709s) is cr0709s & cr0204s & cop0001=0x0 & cop1315=0x7 & cldimm #{ #} #@endif #@if defined(RISCV64I) #TODO MACRO # sd t,A,s 00000000 0000000e MACRO (64, 64) #@endif #TODO ALIAS # seqz d,s 00103013 fff0707f ALIAS (0, 0) #:seqz rd,rs1 is rs1 & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x3 & op2031=0x1 #{ #} #@if defined(RISCV64I) #TODO ALIAS # sext.w d,CU 00002001 0000f07f ALIAS (64, 0) #TODO crd,crs1 crd & crs1 & cop0001=0x1 & cop1315=0x1 #:sext.w crd,crs1 is crd & crs1 & cop0001=0x1 & cop1315=0x1 & cop0206=0x0 & cop1212=0x0 #{ #} #@endif #@if defined(RISCV64I) #TODO ALIAS # sext.w d,s 0000001b fff0707f ALIAS (64, 0) #:sext.w rd,rs1 is rs1 & rd & op0001=0x3 & op0204=0x6 & op0506=0x0 & funct3=0x0 & op2031=0x0 #{ #} #@endif #TODO ALIAS # sfence.vma 12000073 ffffffff ALIAS (0, 0) #:sfence.vma is op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op1531=0x2400 #{ #} #TODO ALIAS # sfence.vma s 12000073 fff07fff ALIAS (0, 0) #:sfence.vma rs1 is rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op2031=0x120 #{ #} #TODO ALIAS # sgt d,t,s 00002033 fe00707f ALIAS (0, 0) #:sgt rd,rs2,rs1 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x2 & funct7=0x0 #{ #} #TODO ALIAS # sgtu d,t,s 00003033 fe00707f ALIAS (0, 0) #:sgtu rd,rs2,rs1 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x3 & funct7=0x0 #{ #} #TODO ALIAS # sgtz d,t 00002033 fe0ff07f ALIAS (0, 0) #:sgtz rd,rs2 is rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x2 & funct7=0x0 & op1519=0x0 #{ #} #TODO MACRO # sh t,A,s 00000000 0000000c MACRO (0, 64) #TODO ALIAS # sll d,CU,C> 00000002 0000e003 ALIAS (0, 0) #:sll crd,crs1,c6imm is crd & c6imm & crs1 & cop0001=0x2 & cop1315=0x0 #{ #} #TODO ALIAS # sll d,s,> 00001013 fc00707f ALIAS (0, 0) #:sll rd,rs1,shamt6 is rs1 & shamt6 & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x1 & op2631=0x0 #{ #} #TODO ALIAS # slli d,CU,C> 00000002 0000e003 ALIAS (0, 0) #:slli crd,crs1,c6imm is crd & c6imm & crs1 & cop0001=0x2 & cop1315=0x0 #{ #} #@if defined(RISCV64I) #TODO ALIAS # sllw d,s,< 0000101b fe00707f ALIAS (64, 0) #:sllw rd,rs1,shamt5 is rs1 & shamt5 & rd & op0001=0x3 & op0204=0x6 & op0506=0x0 & funct3=0x1 & op2531=0x0 #{ #} #@endif #TODO ALIAS # slt d,s,j 00002013 0000707f ALIAS (0, 0) #:slt rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x2 #{ #} #TODO ALIAS # sltu d,s,j 00003013 0000707f ALIAS (0, 0) #:sltu rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x3 #{ #} #TODO ALIAS # sltz d,s 00002033 fff0707f ALIAS (0, 0) #:sltz rd,rs1 is rs1 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x2 & funct7=0x0 & op2024=0x0 #{ #} #TODO ALIAS # snez d,t 00003033 fe0ff07f ALIAS (0, 0) #:snez rd,rs2 is rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x3 & funct7=0x0 & op1519=0x0 #{ #} #TODO ALIAS # sra Cs,Cw,C> 00008401 0000ec03 ALIAS (0, 0) #:sra cr0709s,cd0709s,c6imm is cd0709s & c6imm & cr0709s & cop0001=0x1 & cop1315=0x4 & cop1011=0x1 #{ #} #TODO ALIAS # sra d,s,> 40005013 fc00707f ALIAS (0, 0) #:sra rd,rs1,shamt6 is rs1 & shamt6 & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x5 & op2631=0x10 #{ #} #TODO ALIAS # srai Cs,Cw,C> 00008401 0000ec03 ALIAS (0, 0) #:srai cr0709s,cd0709s,c6imm is cd0709s & c6imm & cr0709s & cop0001=0x1 & cop1315=0x4 & cop1011=0x1 #{ #} #@if defined(RISCV64I) #TODO ALIAS # sraw d,s,< 4000501b fe00707f ALIAS (64, 0) #:sraw rd,rs1,shamt5 is rs1 & shamt5 & rd & op0001=0x3 & op0204=0x6 & op0506=0x0 & funct3=0x5 & op2531=0x20 #{ #} #@endif #TODO ALIAS # srl Cs,Cw,C> 00008001 0000ec03 ALIAS (0, 0) #:srl cr0709s,cd0709s,c6imm is cd0709s & c6imm & cr0709s & cop0001=0x1 & cop1315=0x4 & cop1011=0x0 #{ #} #TODO ALIAS # srl d,s,> 00005013 fc00707f ALIAS (0, 0) #:srl rd,rs1,shamt6 is rs1 & shamt6 & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x5 & op2631=0x0 #{ #} #TODO ALIAS # srli Cs,Cw,C> 00008001 0000ec03 ALIAS (0, 0) #:srli cr0709s,cd0709s,c6imm is cd0709s & c6imm & cr0709s & cop0001=0x1 & cop1315=0x4 & cop1011=0x0 #{ #} #@if defined(RISCV64I) #TODO ALIAS # srlw d,s,< 0000501b fe00707f ALIAS (64, 0) #:srlw rd,rs1,shamt5 is rs1 & shamt5 & rd & op0001=0x3 & op0204=0x6 & op0506=0x0 & funct3=0x5 & op2531=0x0 #{ #} #@endif #TODO ALIAS # sub Cs,Cw,Ct 00008c01 0000fc63 ALIAS (0, 0) #:sub cr0709s,cd0709s,cr0204s is cd0709s & cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x0 & cop1012=0x3 #{ #} #@if defined(RISCV64I) #TODO ALIAS # subw Cs,Cw,Ct 00009c01 0000fc63 ALIAS (64, 0) #:subw cr0709s,cd0709s,cr0204s is cd0709s & cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x0 & cop1012=0x7 #{ #} #@endif #TODO ALIAS # sw CV,CM(Cc) 0000c002 0000e003 DWORD|DREF|ALIAD (0, 4) #:sw crs2,cswspimm(sp) is crs2 & sp & cop0001=0x2 & cop1315=0x6 & cswspimm #{ #} #TODO ALIAS # sw Ct,Ck(Cs) 0000c000 0000e003 DWORD|DREF|ALIAD (0, 4) #:sw cr0204s,clwimm(cr0709s) is cr0709s & cr0204s & cop0001=0x0 & cop1315=0x6 & clwimm #{ #} #TODO MACRO # sw t,A,s 00000000 0000000d MACRO (0, 64) #TODO MACRO # tail c 00030000 00000015 MACRO (0, 64) #TODO ALIAS # unimp 00000000 0000ffff ALIAS (0, 0) #:unimp is cop0001=0x0 & cop1315=0x0 & cop0212=0x0 #{ #} #TODO ALIAS # xor Cs,Cw,Ct 00008c21 0000fc63 ALIAS (0, 0) #:xor cr0709s,cd0709s,cr0204s is cd0709s & cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x1 & cop1012=0x3 #{ #} #TODO ALIAS # xor Cs,Ct,Cw 00008c21 0000fc63 ALIAS (0, 0) #:xor cr0709s,cr0204s,cd0709s is cd0709s & cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x1 & cop1012=0x3 #{ #} #TODO ALIAS # xor d,s,j 00004013 0000707f ALIAS (0, 0) #:xor rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x4 #{ #} ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.ldefs ================================================ RISC-V 64 little default RISC-V 32 little default ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.lp64d.slaspec ================================================ define endian=little; @define XLEN 8 @define XLEN2 16 @define FLEN 8 @define CONTEXTLEN 4 @define ADDRSIZE "64" @define FPSIZE "64" @include "riscv.reg.sinc" @include "riscv.table.sinc" @include "riscv.instr.sinc" @include "riscv.rv64k.sinc" # current encoding is in custom space @include "riscv.custom.sinc" ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.opinion ================================================ ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.priv.sinc ================================================ # RISC-V Privileged Instructions define pcodeop wfi; define pcodeop sfence.vm; define pcodeop sfence.vma; # Trap-Return # dret 7b200073 ffffffff SIMPLE (0, 0) :dret is op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op1531=0xf640 { return [dpc]; } # hret 20200073 ffffffff SIMPLE (0, 0) # deprecated instruction in latest spec #:hret is op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op1531=0x4040 # mret 30200073 ffffffff SIMPLE (0, 0) :mret is op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op1531=0x6040 { return [mepc]; } # sret 10200073 ffffffff SIMPLE (0, 0) :sret is op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op1531=0x2040 { return [sepc]; } # uret 00200073 ffffffff SIMPLE (0, 0) :uret is op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op1531=0x40 { return [uepc]; } # Interrupt-Management # wfi 10500073 ffffffff SIMPLE (0, 0) :wfi is op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op1531=0x20a0 { wfi(); } # Supervisor Memory-Management # sfence.vm 10400073 ffffffff SIMPLE (0, 0) :sfence.vm is op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op1531=0x2080 { sfence.vm(); } # sfence.vm s 10400073 fff07fff SIMPLE (0, 0) :sfence.vm rs1 is rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op2031=0x104 { sfence.vm(rs1); } # sfence.vma s,t 12000073 fe007fff SIMPLE (0, 0) :sfence.vma rs1,rs2 is rs2 & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op2531=0x9 { sfence.vma(rs1, rs2); } # Hypervisor Memory-Management #TODO move to rv32h and rv64h? :hlv.b rd,rs1 is op0006=0x73 & funct3=0x4 & funct7=0x30 & rs1 & rd & op2024=0x0 { rd = sext(*[ram]:1 rs1); } :hlv.bu rd,rs1 is op0006=0x73 & funct3=0x4 & funct7=0x30 & rs1 & rd & op2024=0x1 { rd = zext(*[ram]:1 rs1); } :hlv.h rd,rs1 is op0006=0x73 & funct3=0x4 & funct7=0x32 & rs1 & rd & op2024=0x0 { rd = sext(*[ram]:2 rs1); } :hlv.hu rd,rs1, is op0006=0x73 & funct3=0x4 & funct7=0x32 & rs1 & rd & op2024=0x1 { rd = zext(*[ram]:2 rs1); } :hlvx.hu rd,rs1 is op0006=0x73 & funct3=0x4 & funct7=0x32 & rs1 & rd & op2024=0x3 { rd = zext(*[ram]:2 rs1); } :hlv.w rd,rs1 is op0006=0x73 & funct3=0x4 & funct7=0x34 & rs1 & rd & op2024=0x0 { assignW(rd, *[ram]:4 rs1); } :hlvx.wu rd,rs1 is op0006=0x73 & funct3=0x4 & funct7=0x34 & rs1 & rd & op2024=0x3 { zassignW(rd, *[ram]:4 rs1); } :hsv.b rs1,rs2 is op0006=0x73 & funct3=0x4 & funct7=0x31 & op0711=0x0 & rs1 & rs2 { *[ram]:1 rs1 = rs2:1; } :hsv.h rs1,rs2 is op0006=0x73 & funct3=0x4 & funct7=0x33 & op0711=0x0 & rs1 & rs2 { *[ram]:2 rs1 = rs2:2; } :hsv.w rs1,rs2 is op0006=0x73 & funct3=0x4 & funct7=0x35 & op0711=0x0 & rs1 & rs2 { *[ram]:4 rs1 = rs2:4; } @if ADDRSIZE == "64" :hlv.wu rd,rs1 is op0006=0x73 & funct3=0x4 & funct7=0x34 & rs1 & rd & op2024=0x1 { rd = zext(*[ram]:4 rs1); } :hlv.d rd,rs1 is op0006=0x73 & funct3=0x4 & funct7=0x36 & rs1 & rd & op2024=0x0 { rd = *[ram]:8 rs1; } :hsv.d rs1,rs2 is op0006=0x73 & funct3=0x4 & funct7=0x37 & op0711=0x0 & rs1 & rs2 { *[ram]:8 rs1 = rs2; } @endif ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.reg.sinc ================================================ define alignment=2; define space ram type=ram_space size=$(XLEN) default; define space register type=register_space size=4; define space csreg type=ram_space size=2 wordsize=$(XLEN); # really 12bit space, for 4096 registers define register offset=0x100 size=$(CONTEXTLEN) [ CONTEXT ]; define register offset=0x1000 size=$(XLEN) [ pc ]; # 08-31 reserved # 05-07 frm # 000 - RNE - round to nearest, ties to even # 001 - RTZ - round towards zero # 010 - RDN - round down (towards -inf) # 011 - RUP - round up (towards +inf) # 100 - RMM - round to nearest, ties to max magnitude # 101 - invalid # 110 - invalid # 111 - DYN - in rm field, selects dynamic rounding mode # in rounding mode register, invalid # 04 NV - invalid operation # 03 DZ - divide by zero # 02 OF - overflow # 01 UF - underflow # 00 NX - inexact #define register offset=0x1008 size=4 [ fcsr ]; #TODO FIXME #TODO This is really broken #NOTE This is stolen from ppc_common, so it has something similar define register offset=0x1010 size=$(XLEN) [ RESERVE_ADDRESS ]; define register offset=0x1018 size=1 [ RESERVE ]; define register offset=0x101C size=1 [ RESERVE_LENGTH ]; #TODO FIXME #ATTN RV32E is the same instruction-set encoding, but only defines x0-x15 # x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 x10 x11 x12 x13 x14 x15 # z ra sp gp tp t0 t1 t2 s0 s1 a0 a1 a2 a3 a4 a5 # x16 x17 x18 x19 x20 x21 x22 x23 x24 x25 x26 x27 x28 x29 x30 x31 # a6 a7 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 t3 t4 t5 t6 # register numbers 0x1000-0x101f define register offset=0x2000 size=$(XLEN) [ zero ra sp gp tp t0 t1 t2 s0 s1 a0 a1 a2 a3 a4 a5 a6 a7 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 t3 t4 t5 t6 ]; # register numbers 0x1020-0x103f define register offset=0x3000 size=$(FLEN) [ ft0 ft1 ft2 ft3 ft4 ft5 ft6 ft7 fs0 fs1 fa0 fa1 fa2 fa3 fa4 fa5 fa6 fa7 fs2 fs3 fs4 fs5 fs6 fs7 fs8 fs9 fs10 fs11 ft8 ft9 ft10 ft11 ]; #TODO fix @define VLEN "32" define register offset=0x4000 size=$(VLEN) [ v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 ]; # SEE 3.1.1 Machine ISA Register misa # (MXLEN-1, MXLEN-2) MXL - Machine XLEN {1: 32, 2: 64, 3: 128} # Bit Character Description # 0 A Atomic extension # 1 B Tentatively reserved for Bit-Manipulation extension # 2 C Compressed extension # 3 D Double-precision floating-point extension # 4 E RV32E base ISA # 5 F Single-precision floating-point extension # 6 G Additional standard extensions present # 7 H Hypervisor extension # 8 I RV32I/64I/128I base ISA # 9 J Tentatively reserved for Dynamically Translated Languages extension # 10 K Reserved # 11 L Tentatively reserved for Decimal Floating-Point extension # 12 M Integer Multiply/Divide extension # 13 N User-level interrupts supported # 14 O Reserved # 15 P Tentatively reserved for Packed-SIMD extension # 16 Q Quad-precision floating-point extension # 17 R Reserved # 18 S Supervisor mode implemented # 19 T Tentatively reserved for Transactional Memory extension # 20 U User mode implemented # 21 V Tentatively reserved for Vector extension # 22 W Reserved # 23 X Non-standard extensions present # 24 Y Reserved # 25 Z Reserved # Moved most CSR registers to .pspec file. Doing so will: # - Allow new registers to be named in the .pspec file # - Processor variants differing only in CSR registers can just use a variant.pspec # - Read/Write references to registers not defined in sleigh # - Registers defined here will not get references to them # - Allow rename and comment by end user # # Control registers reserved 0x0000-0x0fff @define CSR_REG_START "0x0000" ## CSR definitions is done as a big table with undefined holes so that ## the 32-bit and 64-bit tables can be defined with the same code. ## Otherwise the byte offset of the address of each register ## would need to be calculated and would be different for XLEN of 32 or 64 bit. define csreg offset=$(CSR_REG_START) size=$(XLEN) [ # 0x000 _ fflags frm fcsr _ _ _ _ # 0x008 _ _ _ _ _ _ _ _ # 0x010 _ _ _ _ _ _ _ _ # 0x018 _ _ _ _ _ _ _ _ # 0x020 _ _ _ _ _ _ _ _ # 0x028 _ _ _ _ _ _ _ _ # 0x030 _ _ _ _ _ _ _ _ # 0x038 _ _ _ _ _ _ _ _ # 0x040 _ uepc _ _ _ _ _ _ # 0x048 _ _ _ _ _ _ _ _ # 0x050 _ _ _ _ _ _ _ _ # 0x058 _ _ _ _ _ _ _ _ # 0x060 _ _ _ _ _ _ _ _ # 0x068 _ _ _ _ _ _ _ _ # 0x070 _ _ _ _ _ _ _ _ # 0x078 _ _ _ _ _ _ _ _ # 0x080 _ _ _ _ _ _ _ _ # 0x088 _ _ _ _ _ _ _ _ # 0x090 _ _ _ _ _ _ _ _ # 0x098 _ _ _ _ _ _ _ _ # 0x0a0 _ _ _ _ _ _ _ _ # 0x0a8 _ _ _ _ _ _ _ _ # 0x0b0 _ _ _ _ _ _ _ _ # 0x0b8 _ _ _ _ _ _ _ _ # 0x0c0 _ _ _ _ _ _ _ _ # 0x0c8 _ _ _ _ _ _ _ _ # 0x0d0 _ _ _ _ _ _ _ _ # 0x0d8 _ _ _ _ _ _ _ _ # 0x0e0 _ _ _ _ _ _ _ _ # 0x0e8 _ _ _ _ _ _ _ _ # 0x0f0 _ _ _ _ _ _ _ _ # 0x0f8 _ _ _ _ _ _ _ _ # 0x100 _ _ _ _ _ _ _ _ # 0x108 _ _ _ _ _ _ _ _ # 0x110 _ _ _ _ _ _ _ _ # 0x118 _ _ _ _ _ _ _ _ # 0x120 _ _ _ _ _ _ _ _ # 0x128 _ _ _ _ _ _ _ _ # 0x130 _ _ _ _ _ _ _ _ # 0x138 _ _ _ _ _ _ _ _ # 0x140 _ sepc _ _ _ _ _ _ # 0x148 _ _ _ _ _ _ _ _ # 0x150 _ _ _ _ _ _ _ _ # 0x158 _ _ _ _ _ _ _ _ # 0x160 _ _ _ _ _ _ _ _ # 0x168 _ _ _ _ _ _ _ _ # 0x170 _ _ _ _ _ _ _ _ # 0x178 _ _ _ _ _ _ _ _ # 0x180 _ _ _ _ _ _ _ _ # 0x188 _ _ _ _ _ _ _ _ # 0x190 _ _ _ _ _ _ _ _ # 0x198 _ _ _ _ _ _ _ _ # 0x1a0 _ _ _ _ _ _ _ _ # 0x1a8 _ _ _ _ _ _ _ _ # 0x1b0 _ _ _ _ _ _ _ _ # 0x1b8 _ _ _ _ _ _ _ _ # 0x1c0 _ _ _ _ _ _ _ _ # 0x1c8 _ _ _ _ _ _ _ _ # 0x1d0 _ _ _ _ _ _ _ _ # 0x1d8 _ _ _ _ _ _ _ _ # 0x1e0 _ _ _ _ _ _ _ _ # 0x1e8 _ _ _ _ _ _ _ _ # 0x1f0 _ _ _ _ _ _ _ _ # 0x1f8 _ _ _ _ _ _ _ _ # 0x200 _ _ _ _ _ _ _ _ # 0x208 _ _ _ _ _ _ _ _ # 0x210 _ _ _ _ _ _ _ _ # 0x218 _ _ _ _ _ _ _ _ # 0x220 _ _ _ _ _ _ _ _ # 0x228 _ _ _ _ _ _ _ _ # 0x230 _ _ _ _ _ _ _ _ # 0x238 _ _ _ _ _ _ _ _ # 0x240 _ _ _ _ _ _ _ _ # 0x248 _ _ _ _ _ _ _ _ # 0x250 _ _ _ _ _ _ _ _ # 0x258 _ _ _ _ _ _ _ _ # 0x260 _ _ _ _ _ _ _ _ # 0x268 _ _ _ _ _ _ _ _ # 0x270 _ _ _ _ _ _ _ _ # 0x278 _ _ _ _ _ _ _ _ # 0x280 _ _ _ _ _ _ _ _ # 0x288 _ _ _ _ _ _ _ _ # 0x290 _ _ _ _ _ _ _ _ # 0x298 _ _ _ _ _ _ _ _ # 0x2a0 _ _ _ _ _ _ _ _ # 0x2a8 _ _ _ _ _ _ _ _ # 0x2b0 _ _ _ _ _ _ _ _ # 0x2b8 _ _ _ _ _ _ _ _ # 0x2c0 _ _ _ _ _ _ _ _ # 0x2c8 _ _ _ _ _ _ _ _ # 0x2d0 _ _ _ _ _ _ _ _ # 0x2d8 _ _ _ _ _ _ _ _ # 0x2e0 _ _ _ _ _ _ _ _ # 0x2e8 _ _ _ _ _ _ _ _ # 0x2f0 _ _ _ _ _ _ _ _ # 0x2f8 _ _ _ _ _ _ _ _ # 0x310 _ _ _ _ _ _ _ _ # 0x308 _ _ _ _ _ _ _ _ # 0x310 _ _ _ _ _ _ _ _ # 0x318 _ _ _ _ _ _ _ _ # 0x320 _ _ _ _ _ _ _ _ # 0x328 _ _ _ _ _ _ _ _ # 0x330 _ _ _ _ _ _ _ _ # 0x338 _ _ _ _ _ _ _ _ # 0x318 _ mepc _ _ _ _ _ _ # 0x348 _ _ _ _ _ _ _ _ # 0x350 _ _ _ _ _ _ _ _ # 0x358 _ _ _ _ _ _ _ _ # 0x360 _ _ _ _ _ _ _ _ # 0x368 _ _ _ _ _ _ _ _ # 0x370 _ _ _ _ _ _ _ _ # 0x378 _ _ _ _ _ _ _ _ # 0x380 _ _ _ _ _ _ _ _ # 0x388 _ _ _ _ _ _ _ _ # 0x390 _ _ _ _ _ _ _ _ # 0x398 _ _ _ _ _ _ _ _ # 0x3a0 _ _ _ _ _ _ _ _ # 0x3a8 _ _ _ _ _ _ _ _ # 0x3b0 _ _ _ _ _ _ _ _ # 0x3b8 _ _ _ _ _ _ _ _ # 0x3c0 _ _ _ _ _ _ _ _ # 0x3c8 _ _ _ _ _ _ _ _ # 0x3d0 _ _ _ _ _ _ _ _ # 0x3d8 _ _ _ _ _ _ _ _ # 0x3e0 _ _ _ _ _ _ _ _ # 0x3e8 _ _ _ _ _ _ _ _ # 0x3f0 _ _ _ _ _ _ _ _ # 0x3f8 _ _ _ _ _ _ _ _ # 0x400 _ _ _ _ _ _ _ _ # 0x408 _ _ _ _ _ _ _ _ # 0x410 _ _ _ _ _ _ _ _ # 0x418 _ _ _ _ _ _ _ _ # 0x420 _ _ _ _ _ _ _ _ # 0x428 _ _ _ _ _ _ _ _ # 0x430 _ _ _ _ _ _ _ _ # 0x438 _ _ _ _ _ _ _ _ # 0x440 _ _ _ _ _ _ _ _ # 0x448 _ _ _ _ _ _ _ _ # 0x450 _ _ _ _ _ _ _ _ # 0x458 _ _ _ _ _ _ _ _ # 0x460 _ _ _ _ _ _ _ _ # 0x468 _ _ _ _ _ _ _ _ # 0x470 _ _ _ _ _ _ _ _ # 0x478 _ _ _ _ _ _ _ _ # 0x480 _ _ _ _ _ _ _ _ # 0x488 _ _ _ _ _ _ _ _ # 0x490 _ _ _ _ _ _ _ _ # 0x498 _ _ _ _ _ _ _ _ # 0x4a0 _ _ _ _ _ _ _ _ # 0x4a8 _ _ _ _ _ _ _ _ # 0x4b0 _ _ _ _ _ _ _ _ # 0x4b8 _ _ _ _ _ _ _ _ # 0x4c0 _ _ _ _ _ _ _ _ # 0x4c8 _ _ _ _ _ _ _ _ # 0x4d0 _ _ _ _ _ _ _ _ # 0x4d8 _ _ _ _ _ _ _ _ # 0x4e0 _ _ _ _ _ _ _ _ # 0x4e8 _ _ _ _ _ _ _ _ # 0x4f0 _ _ _ _ _ _ _ _ # 0x4f8 _ _ _ _ _ _ _ _ # 0x500 _ _ _ _ _ _ _ _ # 0x508 _ _ _ _ _ _ _ _ # 0x510 _ _ _ _ _ _ _ _ # 0x518 _ _ _ _ _ _ _ _ # 0x520 _ _ _ _ _ _ _ _ # 0x528 _ _ _ _ _ _ _ _ # 0x530 _ _ _ _ _ _ _ _ # 0x538 _ _ _ _ _ _ _ _ # 0x540 _ _ _ _ _ _ _ _ # 0x548 _ _ _ _ _ _ _ _ # 0x550 _ _ _ _ _ _ _ _ # 0x558 _ _ _ _ _ _ _ _ # 0x560 _ _ _ _ _ _ _ _ # 0x568 _ _ _ _ _ _ _ _ # 0x570 _ _ _ _ _ _ _ _ # 0x578 _ _ _ _ _ _ _ _ # 0x580 _ _ _ _ _ _ _ _ # 0x588 _ _ _ _ _ _ _ _ # 0x590 _ _ _ _ _ _ _ _ # 0x598 _ _ _ _ _ _ _ _ # 0x5a0 _ _ _ _ _ _ _ _ # 0x5a8 _ _ _ _ _ _ _ _ # 0x5b0 _ _ _ _ _ _ _ _ # 0x5b8 _ _ _ _ _ _ _ _ # 0x5c0 _ _ _ _ _ _ _ _ # 0x5c8 _ _ _ _ _ _ _ _ # 0x5d0 _ _ _ _ _ _ _ _ # 0x5d8 _ _ _ _ _ _ _ _ # 0x5e0 _ _ _ _ _ _ _ _ # 0x5e8 _ _ _ _ _ _ _ _ # 0x5f0 _ _ _ _ _ _ _ _ # 0x5f8 _ _ _ _ _ _ _ _ # 0x600 _ _ _ _ _ _ _ _ # 0x608 _ _ _ _ _ _ _ _ # 0x610 _ _ _ _ _ _ _ _ # 0x618 _ _ _ _ _ _ _ _ # 0x620 _ _ _ _ _ _ _ _ # 0x628 _ _ _ _ _ _ _ _ # 0x630 _ _ _ _ _ _ _ _ # 0x638 _ _ _ _ _ _ _ _ # 0x640 _ _ _ _ _ _ _ _ # 0x648 _ _ _ _ _ _ _ _ # 0x650 _ _ _ _ _ _ _ _ # 0x658 _ _ _ _ _ _ _ _ # 0x660 _ _ _ _ _ _ _ _ # 0x668 _ _ _ _ _ _ _ _ # 0x670 _ _ _ _ _ _ _ _ # 0x678 _ _ _ _ _ _ _ _ # 0x680 _ _ _ _ _ _ _ _ # 0x688 _ _ _ _ _ _ _ _ # 0x690 _ _ _ _ _ _ _ _ # 0x698 _ _ _ _ _ _ _ _ # 0x6a0 _ _ _ _ _ _ _ _ # 0x6a8 _ _ _ _ _ _ _ _ # 0x6b0 _ _ _ _ _ _ _ _ # 0x6b8 _ _ _ _ _ _ _ _ # 0x6c0 _ _ _ _ _ _ _ _ # 0x6c8 _ _ _ _ _ _ _ _ # 0x6d0 _ _ _ _ _ _ _ _ # 0x6d8 _ _ _ _ _ _ _ _ # 0x6e0 _ _ _ _ _ _ _ _ # 0x6e8 _ _ _ _ _ _ _ _ # 0x6f0 _ _ _ _ _ _ _ _ # 0x6f8 _ _ _ _ _ _ _ _ # 0x700 _ _ _ _ _ _ _ _ # 0x708 _ _ _ _ _ _ _ _ # 0x710 _ _ _ _ _ _ _ _ # 0x718 _ _ _ _ _ _ _ _ # 0x720 _ _ _ _ _ _ _ _ # 0x728 _ _ _ _ _ _ _ _ # 0x730 _ _ _ _ _ _ _ _ # 0x738 _ _ _ _ _ _ _ _ # 0x740 _ _ _ _ _ _ _ _ # 0x748 _ _ _ _ _ _ _ _ # 0x750 _ _ _ _ _ _ _ _ # 0x758 _ _ _ _ _ _ _ _ # 0x760 _ _ _ _ _ _ _ _ # 0x768 _ _ _ _ _ _ _ _ # 0x770 _ _ _ _ _ _ _ _ # 0x778 _ _ _ _ _ _ _ _ # 0x780 _ _ _ _ _ _ _ _ # 0x788 _ _ _ _ _ _ _ _ # 0x790 _ _ _ _ _ _ _ _ # 0x798 _ _ _ _ _ _ _ _ # 0x7a0 _ _ _ _ _ _ _ _ # 0x7a8 _ _ _ _ _ _ _ _ # 0x7b0 dcsr dpc dscratch0 dscratch1 _ _ _ _ # 0x7b8 _ _ _ _ _ _ _ _ # 0x7c0 _ _ _ _ _ _ _ _ # 0x7c8 _ _ _ _ _ _ _ _ # 0x7d0 _ _ _ _ _ _ _ _ # 0x7d8 _ _ _ _ _ _ _ _ # 0x7e0 _ _ _ _ _ _ _ _ # 0x7e8 _ _ _ _ _ _ _ _ # 0x7f0 _ _ _ _ _ _ _ _ # 0x7f8 _ _ _ _ _ _ _ _ # 0x800 _ _ _ _ _ _ _ _ # 0x808 _ _ _ _ _ _ _ _ # 0x810 _ _ _ _ _ _ _ _ # 0x818 _ _ _ _ _ _ _ _ # 0x820 _ _ _ _ _ _ _ _ # 0x828 _ _ _ _ _ _ _ _ # 0x830 _ _ _ _ _ _ _ _ # 0x838 _ _ _ _ _ _ _ _ # 0x840 _ _ _ _ _ _ _ _ # 0x848 _ _ _ _ _ _ _ _ # 0x850 _ _ _ _ _ _ _ _ # 0x858 _ _ _ _ _ _ _ _ # 0x860 _ _ _ _ _ _ _ _ # 0x868 _ _ _ _ _ _ _ _ # 0x870 _ _ _ _ _ _ _ _ # 0x878 _ _ _ _ _ _ _ _ # 0x880 _ _ _ _ _ _ _ _ # 0x888 _ _ _ _ _ _ _ _ # 0x890 _ _ _ _ _ _ _ _ # 0x898 _ _ _ _ _ _ _ _ # 0x8a0 _ _ _ _ _ _ _ _ # 0x8a8 _ _ _ _ _ _ _ _ # 0x8b0 _ _ _ _ _ _ _ _ # 0x8b8 _ _ _ _ _ _ _ _ # 0x8c0 _ _ _ _ _ _ _ _ # 0x8c8 _ _ _ _ _ _ _ _ # 0x8d0 _ _ _ _ _ _ _ _ # 0x8d8 _ _ _ _ _ _ _ _ # 0x8e0 _ _ _ _ _ _ _ _ # 0x8e8 _ _ _ _ _ _ _ _ # 0x8f0 _ _ _ _ _ _ _ _ # 0x8f8 _ _ _ _ _ _ _ _ # 0x900 _ _ _ _ _ _ _ _ # 0x908 _ _ _ _ _ _ _ _ # 0x910 _ _ _ _ _ _ _ _ # 0x918 _ _ _ _ _ _ _ _ # 0x920 _ _ _ _ _ _ _ _ # 0x928 _ _ _ _ _ _ _ _ # 0x930 _ _ _ _ _ _ _ _ # 0x938 _ _ _ _ _ _ _ _ # 0x940 _ _ _ _ _ _ _ _ # 0x948 _ _ _ _ _ _ _ _ # 0x950 _ _ _ _ _ _ _ _ # 0x958 _ _ _ _ _ _ _ _ # 0x960 _ _ _ _ _ _ _ _ # 0x968 _ _ _ _ _ _ _ _ # 0x970 _ _ _ _ _ _ _ _ # 0x978 _ _ _ _ _ _ _ _ # 0x980 _ _ _ _ _ _ _ _ # 0x988 _ _ _ _ _ _ _ _ # 0x990 _ _ _ _ _ _ _ _ # 0x998 _ _ _ _ _ _ _ _ # 0x9a0 _ _ _ _ _ _ _ _ # 0x9a8 _ _ _ _ _ _ _ _ # 0x9b0 _ _ _ _ _ _ _ _ # 0x9b8 _ _ _ _ _ _ _ _ # 0x9c0 _ _ _ _ _ _ _ _ # 0x9c8 _ _ _ _ _ _ _ _ # 0x9d0 _ _ _ _ _ _ _ _ # 0x9d8 _ _ _ _ _ _ _ _ # 0x9e0 _ _ _ _ _ _ _ _ # 0x9e8 _ _ _ _ _ _ _ _ # 0x9f0 _ _ _ _ _ _ _ _ # 0x9f8 _ _ _ _ _ _ _ _ # 0xa00 _ _ _ _ _ _ _ _ # 0xa08 _ _ _ _ _ _ _ _ # 0xa10 _ _ _ _ _ _ _ _ # 0xa18 _ _ _ _ _ _ _ _ # 0xa20 _ _ _ _ _ _ _ _ # 0xa28 _ _ _ _ _ _ _ _ # 0xa30 _ _ _ _ _ _ _ _ # 0xa38 _ _ _ _ _ _ _ _ # 0xa40 _ _ _ _ _ _ _ _ # 0xa48 _ _ _ _ _ _ _ _ # 0xa50 _ _ _ _ _ _ _ _ # 0xa58 _ _ _ _ _ _ _ _ # 0xa60 _ _ _ _ _ _ _ _ # 0xa68 _ _ _ _ _ _ _ _ # 0xa70 _ _ _ _ _ _ _ _ # 0xa78 _ _ _ _ _ _ _ _ # 0xa80 _ _ _ _ _ _ _ _ # 0xa88 _ _ _ _ _ _ _ _ # 0xa90 _ _ _ _ _ _ _ _ # 0xa98 _ _ _ _ _ _ _ _ # 0xaa0 _ _ _ _ _ _ _ _ # 0xaa8 _ _ _ _ _ _ _ _ # 0xab0 _ _ _ _ _ _ _ _ # 0xab8 _ _ _ _ _ _ _ _ # 0xac0 _ _ _ _ _ _ _ _ # 0xac8 _ _ _ _ _ _ _ _ # 0xad0 _ _ _ _ _ _ _ _ # 0xad8 _ _ _ _ _ _ _ _ # 0xae0 _ _ _ _ _ _ _ _ # 0xae8 _ _ _ _ _ _ _ _ # 0xaf0 _ _ _ _ _ _ _ _ # 0xaf8 _ _ _ _ _ _ _ _ # 0xa00 _ _ _ _ _ _ _ _ # 0xa08 _ _ _ _ _ _ _ _ # 0xa10 _ _ _ _ _ _ _ _ # 0xa18 _ _ _ _ _ _ _ _ # 0xb20 _ _ _ _ _ _ _ _ # 0xb28 _ _ _ _ _ _ _ _ # 0xb30 _ _ _ _ _ _ _ _ # 0xb38 _ _ _ _ _ _ _ _ # 0xb40 _ _ _ _ _ _ _ _ # 0xb48 _ _ _ _ _ _ _ _ # 0xb50 _ _ _ _ _ _ _ _ # 0xb58 _ _ _ _ _ _ _ _ # 0xb60 _ _ _ _ _ _ _ _ # 0xb68 _ _ _ _ _ _ _ _ # 0xb70 _ _ _ _ _ _ _ _ # 0xb78 _ _ _ _ _ _ _ _ # 0xb80 _ _ _ _ _ _ _ _ # 0xb88 _ _ _ _ _ _ _ _ # 0xb90 _ _ _ _ _ _ _ _ # 0xb98 _ _ _ _ _ _ _ _ # 0xba0 _ _ _ _ _ _ _ _ # 0xba8 _ _ _ _ _ _ _ _ # 0xbb0 _ _ _ _ _ _ _ _ # 0xbb8 _ _ _ _ _ _ _ _ # 0xbc0 _ _ _ _ _ _ _ _ # 0xbc8 _ _ _ _ _ _ _ _ # 0xbd0 _ _ _ _ _ _ _ _ # 0xbd8 _ _ _ _ _ _ _ _ # 0xbe0 _ _ _ _ _ _ _ _ # 0xbe8 _ _ _ _ _ _ _ _ # 0xbf0 _ _ _ _ _ _ _ _ # 0xbf8 _ _ _ _ _ _ _ _ # 0xc00 _ _ _ _ _ _ _ _ # 0xc08 _ _ _ _ _ _ _ _ # 0xc10 _ _ _ _ _ _ _ _ # 0xc18 _ _ _ _ _ _ _ _ # 0xc20 _ _ _ _ _ _ _ _ # 0xc28 _ _ _ _ _ _ _ _ # 0xc30 _ _ _ _ _ _ _ _ # 0xc38 _ _ _ _ _ _ _ _ # 0xc40 _ _ _ _ _ _ _ _ # 0xc48 _ _ _ _ _ _ _ _ # 0xc50 _ _ _ _ _ _ _ _ # 0xc58 _ _ _ _ _ _ _ _ # 0xc60 _ _ _ _ _ _ _ _ # 0xc68 _ _ _ _ _ _ _ _ # 0xc70 _ _ _ _ _ _ _ _ # 0xc78 _ _ _ _ _ _ _ _ # 0xc80 _ _ _ _ _ _ _ _ # 0xc88 _ _ _ _ _ _ _ _ # 0xc90 _ _ _ _ _ _ _ _ # 0xc98 _ _ _ _ _ _ _ _ # 0xca0 _ _ _ _ _ _ _ _ # 0xca8 _ _ _ _ _ _ _ _ # 0xcb0 _ _ _ _ _ _ _ _ # 0xcb8 _ _ _ _ _ _ _ _ # 0xcc0 _ _ _ _ _ _ _ _ # 0xcc8 _ _ _ _ _ _ _ _ # 0xcd0 _ _ _ _ _ _ _ _ # 0xcd8 _ _ _ _ _ _ _ _ # 0xce0 _ _ _ _ _ _ _ _ # 0xce8 _ _ _ _ _ _ _ _ # 0xcf0 _ _ _ _ _ _ _ _ # 0xcf8 _ _ _ _ _ _ _ _ # 0xd00 _ _ _ _ _ _ _ _ # 0xd08 _ _ _ _ _ _ _ _ # 0xd10 _ _ _ _ _ _ _ _ # 0xd18 _ _ _ _ _ _ _ _ # 0xd20 _ _ _ _ _ _ _ _ # 0xd28 _ _ _ _ _ _ _ _ # 0xd30 _ _ _ _ _ _ _ _ # 0xd38 _ _ _ _ _ _ _ _ # 0xd40 _ _ _ _ _ _ _ _ # 0xd48 _ _ _ _ _ _ _ _ # 0xd50 _ _ _ _ _ _ _ _ # 0xd58 _ _ _ _ _ _ _ _ # 0xd60 _ _ _ _ _ _ _ _ # 0xd68 _ _ _ _ _ _ _ _ # 0xd70 _ _ _ _ _ _ _ _ # 0xd78 _ _ _ _ _ _ _ _ # 0xd80 _ _ _ _ _ _ _ _ # 0xd88 _ _ _ _ _ _ _ _ # 0xd90 _ _ _ _ _ _ _ _ # 0xd98 _ _ _ _ _ _ _ _ # 0xda0 _ _ _ _ _ _ _ _ # 0xda8 _ _ _ _ _ _ _ _ # 0xdb0 _ _ _ _ _ _ _ _ # 0xdb8 _ _ _ _ _ _ _ _ # 0xdc0 _ _ _ _ _ _ _ _ # 0xdc8 _ _ _ _ _ _ _ _ # 0xdd0 _ _ _ _ _ _ _ _ # 0xdd8 _ _ _ _ _ _ _ _ # 0xde0 _ _ _ _ _ _ _ _ # 0xde8 _ _ _ _ _ _ _ _ # 0xdf0 _ _ _ _ _ _ _ _ # 0xdf8 _ _ _ _ _ _ _ _ # 0xe00 _ _ _ _ _ _ _ _ # 0xe08 _ _ _ _ _ _ _ _ # 0xe10 _ _ _ _ _ _ _ _ # 0xe18 _ _ _ _ _ _ _ _ # 0xe20 _ _ _ _ _ _ _ _ # 0xe28 _ _ _ _ _ _ _ _ # 0xe30 _ _ _ _ _ _ _ _ # 0xe38 _ _ _ _ _ _ _ _ # 0xe40 _ _ _ _ _ _ _ _ # 0xe48 _ _ _ _ _ _ _ _ # 0xe50 _ _ _ _ _ _ _ _ # 0xe58 _ _ _ _ _ _ _ _ # 0xe60 _ _ _ _ _ _ _ _ # 0xe68 _ _ _ _ _ _ _ _ # 0xe70 _ _ _ _ _ _ _ _ # 0xe78 _ _ _ _ _ _ _ _ # 0xe80 _ _ _ _ _ _ _ _ # 0xe88 _ _ _ _ _ _ _ _ # 0xe90 _ _ _ _ _ _ _ _ # 0xe98 _ _ _ _ _ _ _ _ # 0xea0 _ _ _ _ _ _ _ _ # 0xea8 _ _ _ _ _ _ _ _ # 0xeb0 _ _ _ _ _ _ _ _ # 0xeb8 _ _ _ _ _ _ _ _ # 0xec0 _ _ _ _ _ _ _ _ # 0xec8 _ _ _ _ _ _ _ _ # 0xed0 _ _ _ _ _ _ _ _ # 0xed8 _ _ _ _ _ _ _ _ # 0xee0 _ _ _ _ _ _ _ _ # 0xee8 _ _ _ _ _ _ _ _ # 0xef0 _ _ _ _ _ _ _ _ # 0xef8 _ _ _ _ _ _ _ _ # 0xf00 _ _ _ _ _ _ _ _ # 0xf08 _ _ _ _ _ _ _ _ # 0xf10 _ _ _ _ _ _ _ _ # 0xf18 _ _ _ _ _ _ _ _ # 0xf20 _ _ _ _ _ _ _ _ # 0xf28 _ _ _ _ _ _ _ _ # 0xf30 _ _ _ _ _ _ _ _ # 0xf38 _ _ _ _ _ _ _ _ # 0xf40 _ _ _ _ _ _ _ _ # 0xf48 _ _ _ _ _ _ _ _ # 0xf50 _ _ _ _ _ _ _ _ # 0xf58 _ _ _ _ _ _ _ _ # 0xf60 _ _ _ _ _ _ _ _ # 0xf68 _ _ _ _ _ _ _ _ # 0xf70 _ _ _ _ _ _ _ _ # 0xf78 _ _ _ _ _ _ _ _ # 0xf80 _ _ _ _ _ _ _ _ # 0xf88 _ _ _ _ _ _ _ _ # 0xf90 _ _ _ _ _ _ _ _ # 0xf98 _ _ _ _ _ _ _ _ # 0xfa0 _ _ _ _ _ _ _ _ # 0xfa8 _ _ _ _ _ _ _ _ # 0xfb0 _ _ _ _ _ _ _ _ # 0xfb8 _ _ _ _ _ _ _ _ # 0xfc0 _ _ _ _ _ _ _ _ # 0xfc8 _ _ _ _ _ _ _ _ # 0xfd0 _ _ _ _ _ _ _ _ # 0xfd8 _ _ _ _ _ _ _ _ # 0xfe0 _ _ _ _ _ _ _ _ # 0xfe8 _ _ _ _ _ _ _ _ # 0xff0 _ _ _ _ _ _ _ _ # 0xff8 _ _ _ _ _ _ _ _ ]; define context CONTEXT reserved=(0,3) MXL=(4,5) # MXL - Machine XLEN {1: 32, 2: 64, 3: 128} ; define token instr (32) op0001=(0,1) op0006=(0,6) op0204=(2,4) op0506=(5,6) op0707=(7,7) op0711=(7,11) r0711=(7,11) fr0711=(7,11) v0711=(7,11) op0808=(8,8) op0809=(8,9) op0811=(8,11) op0911=(9,11) op1011=(10,11) op1213=(12,13) op1214=(12,14) funct3=(12,14) op1219=(12,19) op1231=(12,31) sop1231=(12,31) signed op1414=(14,14) op1516=(15,16) op1519=(15,19) sop1519=(15,19) signed subf5=(15,19) r1519=(15,19) fr1519=(15,19) v1519=(15,19) op1527=(15,27) op1531=(15,31) op1719=(17,19) op2020=(20,20) op2022=(20,22) succ=(20,23) op2023=(20,23) op2024=(20,24) r2024=(20,24) fr2024=(20,24) v2024=(20,24) op2025=(20,25) op2026=(20,26) csr_0=(20,27) csr_1=(20,27) csr_2=(20,27) csr_3=(20,27) csr_4=(20,27) csr_50=(20,26) csr_58=(20,25) csr_5C=(20,25) csr_60=(20,26) csr_68=(20,25) csr_6C=(20,25) csr_70=(20,26) csr_78=(20,24) csr_7A=(20,23) csr_7B=(20,23) csr_7C=(20,25) csr_8=(20,27) csr_90=(20,26) csr_98=(20,25) csr_9C=(20,25) csr_A0=(20,26) csr_A8=(20,25) csr_AC=(20,25) csr_B0=(20,26) csr_B8=(20,25) csr_BC=(20,25) csr_C0=(20,26) csr_C8=(20,25) csr_CC=(20,25) csr_D0=(20,26) csr_D8=(20,25) csr_DC=(20,25) csr_E0=(20,26) csr_E8=(20,25) csr_EC=(20,25) csr_F0=(20,26) csr_F8=(20,25) csr_FC=(20,25) op2030=(20,30) op2031=(20,31) sop2031=(20,31) signed op2121=(21,21) op2122=(21,22) op2130=(21,30) op2222=(22,22) op2230=(22,30) op2323=(23,23) op2324=(23,24) op2330=(23,30) op2424=(24,24) op2427=(24,27) pred=(24,27) op2525=(25,25) op2526=(25,26) op2527=(25,27) op2529=(25,29) op2530=(25,30) op2531=(25,31) sop2531=(25,31) signed funct7=(25,31) wd=(26,26) op2626=(26,26) op2627=(26,27) op2631=(26,31) op2731=(27,31) amoop=(27,31) funct5=(27,31) op2727=(27,27) r2731=(27,31) fr2731=(27,31) op2828=(28,28) op2829=(28,29) fm=(28,31) op2931=(29,31) op3030=(30,30) op3031=(30,31) op3131=(31,31) sop3131=(31,31) signed ; define token cinstr (16) cop0001=(0,1) cop0202=(2,2) cop0203=(2,3) cop0204=(2,4) cr0204s=(2,4) cfr0204s=(2,4) cop0205=(2,5) cop0206=(2,6) cr0206=(2,6) cfr0206=(2,6) cop0212=(2,12) cop0303=(3,3) cop0304=(3,4) cop0305=(3,5) cop0404=(4,4) cop0406=(4,6) cop0505=(5,5) cop0506=(5,6) cop0512=(5,12) cop0606=(6,6) cop0707=(7,7) cop0708=(7,8) cop0709=(7,9) cr0709s=(7,9) cd0709s=(7,9) cfr0709s=(7,9) cop0710=(7,10) cop0711=(7,11) cr0711=(7,11) cd0711NoSp=(7,11) cd0711=(7,11) cfr0711=(7,11) cop0712=(7,12) cop0808=(8,8) cop0909=(9,9) cop0910=(9,10) cop0912=(9,12) cop1010=(10,10) cop1011=(10,11) cop1012=(10,12) cop1111=(11,11) cop1112=(11,12) cop1212=(12,12) scop1212=(12,12) signed cop1315=(13,15) ; ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv32a.sinc ================================================ # RV32A Standard Extension # amoadd.w d,t,0(s) 0000202f fe00707f DWORD|DREF (0, 4) :amoadd.w^aqrl rd,rs2W,(rs1) is rs1 & rs2W & rd & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x2 & op2731=0 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2W; local tmp:4 = *[ram]:4 tmprs1; assignW(rd, tmp); tmp = tmp + tmprs2; *[ram]:4 tmprs1 = tmp; } # amoand.w d,t,0(s) 6000202f fe00707f DWORD|DREF (0, 4) :amoand.w^aqrl rd,rs2W,(rs1) is rs1 & rs2W & rd & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x2 & op2731=0xc & aqrl { local tmprs1 = rs1; local tmprs2 = rs2W; local tmp:4 = *[ram]:4 tmprs1; assignW(rd, tmp); tmp = tmp & tmprs2; *[ram]:4 tmprs1 = tmp; } # amomax.w d,t,0(s) a000202f fe00707f DWORD|DREF (0, 4) :amomax.w^aqrl rd,rs2W,(rs1) is rs1 & rs2W & rd & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x2 & op2731=0x14 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2W; local tmp:4 = *[ram]:4 tmprs1; assignW(rd, tmp); if (tmprs2 s<= tmp) goto inst_next; *[ram]:4 tmprs1 = tmprs2; } # amomaxu.w d,t,0(s) e000202f fe00707f DWORD|DREF (0, 4) :amomaxu.w^aqrl rd,rs2W,(rs1) is rs1 & rs2W & rd & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x2 & op2731=0x1c & aqrl { local tmprs1 = rs1; local tmprs2 = rs2W; local tmp:4 = *[ram]:4 tmprs1; assignW(rd, tmp); if (tmprs2 <= tmp) goto inst_next; *[ram]:4 tmprs1 = tmprs2; } # amomin.w d,t,0(s) 8000202f fe00707f DWORD|DREF (0, 4) :amomin.w^aqrl rd,rs2W,(rs1) is rs1 & rs2W & rd & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x2 & op2731=0x10 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2W; local tmp:4 = *[ram]:4 tmprs1; assignW(rd, tmp); if (tmprs2 s>= tmp) goto inst_next; *[ram]:4 tmprs1 = tmprs2; } # amominu.w d,t,0(s) c000202f fe00707f DWORD|DREF (0, 4) :amominu.w^aqrl rd,rs2W,(rs1) is rs1 & rs2W & rd & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x2 & op2731=0x18 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2W; local tmp:4 = *[ram]:4 tmprs1; assignW(rd, tmp); if (tmprs2 >= tmp) goto inst_next; *[ram]:4 tmprs1 = tmprs2; } # amoor.w d,t,0(s) 4000202f fe00707f DWORD|DREF (0, 4) :amoor.w^aqrl rd,rs2W,(rs1) is rs1 & rs2W & rd & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x2 & op2731=0x8 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2W; local tmp:4 = *[ram]:4 tmprs1; assignW(rd, tmp); tmp = tmp | tmprs2; *[ram]:4 tmprs1 = tmp; } # amoswap.w d,t,0(s) 0800202f fe00707f DWORD|DREF (0, 4) :amoswap.w^aqrl rd,rs2W,(rs1) is rs1 & rs2W & rd & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x2 & op2731=0x1 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2W; local tmp:4 = *[ram]:4 tmprs1; assignW(rd, tmp); *[ram]:4 tmprs1 = tmprs2; } # amoxor.w d,t,0(s) 2000202f fe00707f DWORD|DREF (0, 4) :amoxor.w^aqrl rd,rs2W,(rs1) is rs1 & rs2W & rd & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x2 & op2731=0x4 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2W; local tmp:4 = *[ram]:4 tmprs1; assignW(rd, tmp); tmp = tmp ^ tmprs2; *[ram]:4 tmprs1 = tmp; } # lr.w d,0(s) 1000202f fff0707f DWORD|DREF (0, 4) :lr.w^aqrl rd,(rs1) is rs1 & rd & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x2 & op2731=0x2 & op2024=0x0 & aqrl { RESERVE_ADDRESS = rs1; RESERVE = 1; RESERVE_LENGTH = 4; assignW(rd, *[ram]:4 rs1); } # sc.w d,t,0(s) 1800202f fe00707f DWORD|DREF (0, 4) :sc.w^aqrl rd,rs2W,(rs1) is rs1 & rs2W & rd & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x2 & op2731=0x3 & aqrl { local tmprs2 = rs2W; local tmprs1 = rs1; rd = 1; if ((RESERVE == 0)||(RESERVE_ADDRESS != tmprs1)||(RESERVE_LENGTH != 4)) goto inst_next; *[ram]:4 tmprs1 = tmprs2; rd = 0; RESERVE_ADDRESS = 0; RESERVE = 0; RESERVE_LENGTH = 0; } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv32b.sinc ================================================ # RV32 Bitmanip Extension :andn rd, rs1, rs2 is op0006=0x33 & op1214=0x7 & op2531=0x20 & rd & rs1 & rs2 unimpl :bdep rd, rs1, rs2 is op0006=0x33 & op1214=0x6 & op2531=0x24 & rd & rs1 & rs2 unimpl :bext rd, rs1, rs2 is op0006=0x33 & op1214=0x6 & op2531=0x4 & rd & rs1 & rs2 unimpl :bfp rd, rs1, rs2 is op0006=0x33 & op1214=0x7 & op2531=0x24 & rd & rs1 & rs2 unimpl :clmul rd, rs1, rs2 is op0006=0x33 & op1214=0x1 & op2531=0x5 & rd & rs1 & rs2 unimpl :clmulh rd, rs1, rs2 is op0006=0x33 & op1214=0x3 & op2531=0x5 & rd & rs1 & rs2 unimpl :clmulr rd, rs1, rs2 is op0006=0x33 & op1214=0x2 & op2531=0x5 & rd & rs1 & rs2 unimpl :clz rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x0 & op2531=0x30 & rd & rs1 unimpl :cmix rd, rs2, rs1, rs3 is op0006=0x33 & op1214=0x1 & op2526=0x3 & rd & rs1 & rs2 & rs3 unimpl :cmov rd, rs2, rs1, rs3 is op0006=0x33 & op1214=0x5 & op2526=0x3 & rd & rs1 & rs2 & rs3 unimpl :crc32.b rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x10 & op2531=0x30 & rd & rs1 unimpl :crc32.h rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x11 & op2531=0x30 & rd & rs1 unimpl :crc32.w rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x12 & op2531=0x30 & rd & rs1 unimpl :crc32c.b rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x18 & op2531=0x30 & rd & rs1 unimpl :crc32c.h rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x19 & op2531=0x30 & rd & rs1 unimpl :crc32c.w rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x1a & op2531=0x30 & rd & rs1 unimpl :ctz rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x1 & op2531=0x30 & rd & rs1 unimpl :fsl rd, rs1, rs3, rs2 is op0006=0x33 & op1214=0x1 & op2526=0x2 & rd & rs1 & rs2 & rs3 unimpl :fsr rd, rs1, rs3, rs2 is op0006=0x33 & op1214=0x5 & op2526=0x2 & rd & rs1 & rs2 & rs3 unimpl #TODO fix op2025 #TODO this looks like a typo in 0.92 :fsri rd, rs1, rs3, op2025 is op0006=0x33 & op1214=0x5 & op2626=0x1 & op2025 & rd & rs1 & rs3 unimpl :gorc rd, rs1, rs2 is op0006=0x33 & op1214=0x5 & op2531=0x14 & rd & rs1 & rs2 unimpl #TODO fix op2026 :gorci rd, rs1, op2026 is op0006=0x13 & op1214=0x5 & op2731=0x5 & op2026 & rd & rs1 unimpl :grev rd, rs1, rs2 is op0006=0x33 & op1214=0x5 & op2531=0x34 & rd & rs1 & rs2 unimpl #TODO fix op2026 :grevi rd, rs1, op2026 is op0006=0x13 & op1214=0x5 & op2731=0xd & op2026 & rd & rs1 unimpl :max rd, rs1, rs2 is op0006=0x33 & op1214=0x6 & op2531=0x5 & rd & rs1 & rs2 unimpl :maxu rd, rs1, rs2 is op0006=0x33 & op1214=0x7 & op2531=0x5 & rd & rs1 & rs2 unimpl :min rd, rs1, rs2 is op0006=0x33 & op1214=0x4 & op2531=0x5 & rd & rs1 & rs2 unimpl :minu rd, rs1, rs2 is op0006=0x33 & op1214=0x5 & op2531=0x5 & rd & rs1 & rs2 unimpl :orn rd, rs1, rs2 is op0006=0x33 & op1214=0x6 & op2531=0x20 & rd & rs1 & rs2 unimpl :pack rd, rs1, rs2 is op0006=0x33 & op1214=0x4 & op2531=0x4 & rd & rs1 & rs2 unimpl :packh rd, rs1, rs2 is op0006=0x33 & op1214=0x7 & op2531=0x4 & rd & rs1 & rs2 unimpl :packu rd, rs1, rs2 is op0006=0x33 & op1214=0x4 & op2531=0x24 & rd & rs1 & rs2 unimpl :pcnt rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x2 & op2531=0x30 & rd & rs1 unimpl :rol rd, rs1, rs2 is op0006=0x33 & op1214=0x1 & op2531=0x30 & rd & rs1 & rs2 unimpl :ror rd, rs1, rs2 is op0006=0x33 & op1214=0x5 & op2531=0x30 & rd & rs1 & rs2 unimpl #TODO fix op2026 :rori rd, rs1, op2026 is op0006=0x13 & op1214=0x5 & op2731=0xc & op2026 & rd & rs1 unimpl :sbclr rd, rs1, rs2 is op0006=0x33 & op1214=0x1 & op2531=0x24 & rd & rs1 & rs2 unimpl #TODO fix op2026 :sbclri rd, rs1, op2026 is op0006=0x13 & op1214=0x1 & op2731=0x9 & op2026 & rd & rs1 unimpl :sbext rd, rs1, rs2 is op0006=0x33 & op1214=0x5 & op2531=0x24 & rd & rs1 & rs2 unimpl #TODO fix op2026 :sbexti rd, rs1, op2026 is op0006=0x13 & op1214=0x5 & op2731=0x9 & op2026 & rd & rs1 unimpl :sbinv rd, rs1, rs2 is op0006=0x33 & op1214=0x1 & op2531=0x34 & rd & rs1 & rs2 unimpl #TODO fix op2026 :sbinvi rd, rs1, op2026 is op0006=0x13 & op1214=0x1 & op2731=0xd & op2026 & rd & rs1 unimpl :sbset rd, rs1, rs2 is op0006=0x33 & op1214=0x1 & op2531=0x14 & rd & rs1 & rs2 unimpl #TODO fix op2026 :sbseti rd, rs1, op2026 is op0006=0x13 & op1214=0x1 & op2731=0x5 & op2026 & rd & rs1 unimpl :sext.b rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x4 & op2531=0x30 & rd & rs1 unimpl :sext.h rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x5 & op2531=0x30 & rd & rs1 unimpl :sh1add rd, rs1, rs2 is op0006=0x33 & op1214=0x2 & op2531=0x10 & rd & rs1 & rs2 unimpl :sh2add rd, rs1, rs2 is op0006=0x33 & op1214=0x4 & op2531=0x10 & rd & rs1 & rs2 unimpl :sh3add rd, rs1, rs2 is op0006=0x33 & op1214=0x6 & op2531=0x10 & rd & rs1 & rs2 unimpl :shfl rd, rs1, rs2 is op0006=0x33 & op1214=0x1 & op2531=0x4 & rd & rs1 & rs2 unimpl #TODO fix op2025 :shfli rd, rs1, op2025 is op0006=0x13 & op1214=0x1 & op2631=0x2 & op2025 & rd & rs1 unimpl :slo rd, rs1, rs2 is op0006=0x33 & op1214=0x1 & op2531=0x10 & rd & rs1 & rs2 unimpl #TODO fix op2026 :sloi rd, rs1, op2026 is op0006=0x13 & op1214=0x1 & op2731=0x4 & op2026 & rd & rs1 unimpl :sro rd, rs1, rs2 is op0006=0x33 & op1214=0x5 & op2531=0x10 & rd & rs1 & rs2 unimpl #TODO fix op2026 :sroi rd, rs1, op2026 is op0006=0x13 & op1214=0x5 & op2731=0x4 & op2026 & rd & rs1 unimpl :unshfl rd, rs1, rs2 is op0006=0x33 & op1214=0x5 & op2531=0x4 & rd & rs1 & rs2 unimpl #TODO fix op2025 :unshfli rd, rs1, op2025 is op0006=0x13 & op1214=0x5 & op2631=0x2 & op2025 & rd & rs1 unimpl :xnor rd, rs1, rs2 is op0006=0x33 & op1214=0x4 & op2531=0x20 & rd & rs1 & rs2 unimpl ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv32d.sinc ================================================ # RV32D Standard Extension # fadd.d D,S,T,m 02000053 fe00007f SIMPLE (0, 0) :fadd.d frd,frs1D,frs2D,FRM is frs1D & frd & frs2D & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x1 { frd = frs1D f+ frs2D; } # fclass.d d,S e2001053 fff0707f SIMPLE (0, 0) :fclass.d rd,frs1D is frs1D & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x71 & op2024=0x0 { #TODO # rd = 0; # rd[0, 1] = 0; #TODO - inf # rd[1, 1] = 0; #TODO - norm num # rd[2, 1] = 0; #TODO - subnorm num # rd[3, 1] = 0; #TODO - 0 # rd[4, 1] = 0; #TODO + 0 # rd[5, 1] = 0; #TODO + norm num # rd[6, 1] = 0; #TODO + subnorm num # rd[7, 1] = 0; #TODO + inf # rd[8, 1] = 0; #TODO snan # rd[9, 1] = 0; #TODO qnan } # fcvt.d.s D,S 42000053 fff0707f SIMPLE (0, 0) :fcvt.d.s frd,frs1S is frs1S & frd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x21 & op2024=0x0 { local tmp:8 = float2float(frs1S); frd = tmp; } # fcvt.d.w D,s d2000053 fff0707f SIMPLE (0, 0) :fcvt.d.w frd,rs1W is frd & rs1W & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x69 & op2024=0x0 { local tmp:8 = int2float(rs1W); frd = tmp; } # fcvt.d.wu D,s d2100053 fff0707f SIMPLE (0, 0) :fcvt.d.wu frd,rs1W is frd & rs1W & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x69 & op2024=0x1 { #ATTN unsigned can be an issue here local u32:$(XLEN2) = zext(rs1W); local tmp:8 = int2float(u32); frd = tmp; } # fcvt.s.d D,S,m 40100053 fff0007f SIMPLE (0, 0) :fcvt.s.d frd,frs1D,FRM is frs1D & frd & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x20 & op2024=0x1 { local tmp:4 = float2float(frs1D); frd = zext(tmp); } # fcvt.w.d d,S,m c2000053 fff0007f SIMPLE (0, 0) :fcvt.w.d rdW,frs1D,FRM is frs1D & FRM & rdW & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x61 & op2024=0x0 { rdW = trunc(frs1D); } # fcvt.wu.d d,S,m c2100053 fff0007f SIMPLE (0, 0) :fcvt.wu.d rdW,frs1D,FRM is frs1D & FRM & rdW & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x61 & op2024=0x1 { #TODO unsigned rdW = trunc(frs1D); } # fdiv.d D,S,T,m 1a000053 fe00007f SIMPLE (0, 0) :fdiv.d frd,frs1D,frs2D,FRM is frs1D & frd & frs2D & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0xd { frd = frs1D f/ frs2D; } # feq.d d,S,T a2002053 fe00707f SIMPLE (0, 0) :feq.d rd,frs1D,frs2D is frs2D & frs1D & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x2 & funct7=0x51 { rd = zext(frs1D f== frs2D); } # fld D,o(s) 00003007 0000707f QWORD|DREF (0, 8) :fld frd,immI(rs1) is immI & frd & rs1 & op0001=0x3 & op0204=0x1 & op0506=0x0 & funct3=0x3 { local ea:$(XLEN) = immI + rs1; frd = *[ram]:$(DFLEN) ea; } # fle.d d,S,T a2000053 fe00707f SIMPLE (0, 0) :fle.d rd,frs1D,frs2D is frs2D & frs1D & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x51 { rd = zext(frs1D f<= frs2D); } # flt.d d,S,T a2001053 fe00707f SIMPLE (0, 0) :flt.d rd,frs1D,frs2D is frs2D & frs1D & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x51 { rd = zext(frs1D f< frs2D); } #endif # fmadd.d D,S,T,R,m 02000043 0600007f SIMPLE (0, 0) :fmadd.d frd,frs1D,frs2D,frs3D,FRM is frs1D & frd & frs2D & FRM & frs3D & op0001=0x3 & op0204=0x0 & op0506=0x2 & op2526=0x1 { frd = (frs1D f* frs2D) f+ frs3D; } # fmax.d D,S,T 2a001053 fe00707f SIMPLE (0, 0) :fmax.d frd,frs1D,frs2D is frs1D & frd & frs2D & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x15 { #TODO redo this local tmpfrs1 = frs1D; local tmpfrs2 = frs2D; frd = tmpfrs1; if (nan(tmpfrs1) && nan(tmpfrs2)) goto inst_next; if (nan(tmpfrs2)) goto inst_next; frd = tmpfrs2; if (nan(tmpfrs1)) goto inst_next; if (tmpfrs2 f> tmpfrs1) goto inst_next; frd = tmpfrs1; } # fmin.d D,S,T 2a000053 fe00707f SIMPLE (0, 0) :fmin.d frd,frs1D,frs2D is frs1D & frd & frs2D & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x15 { #TODO redo this local tmpfrs1 = frs1D; local tmpfrs2 = frs2D; frd = tmpfrs1; if (nan(tmpfrs1) && nan(tmpfrs2)) goto inst_next; if (nan(tmpfrs2)) goto inst_next; frd = tmpfrs2; if (nan(tmpfrs1)) goto inst_next; if (tmpfrs2 f<= tmpfrs1) goto inst_next; frd = tmpfrs1; } # fmsub.d D,S,T,R,m 02000047 0600007f SIMPLE (0, 0) :fmsub.d frd,frs1D,frs2D,frs3D,FRM is frs1D & frd & frs2D & FRM & frs3D & op0001=0x3 & op0204=0x1 & op0506=0x2 & op2526=0x1 { frd = (frs1D f* frs2D) f- frs3D; } # fmul.d D,S,T,m 12000053 fe00007f SIMPLE (0, 0) :fmul.d frd,frs1D,frs2D,FRM is frs1D & frd & frs2D & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x9 { frd = frs1D f* frs2D; } # fnmadd.d D,S,T,R,m 0200004f 0600007f SIMPLE (0, 0) :fnmadd.d frd,frs1D,frs2D,frs3D,FRM is frs1D & frd & frs2D & FRM & frs3D & op0001=0x3 & op0204=0x3 & op0506=0x2 & op2526=0x1 { frd = (f- (frs1D f* frs2D)) f- frs3D; } # fnmsub.d D,S,T,R,m 0200004b 0600007f SIMPLE (0, 0) :fnmsub.d frd,frs1D,frs2D,frs3D,FRM is frs1D & frd & frs2D & FRM & frs3D & op0001=0x3 & op0204=0x2 & op0506=0x2 & op2526=0x1 { frd = (f- (frs1D f* frs2D)) f+ frs3D; } # fsd T,q(s) 00003027 0000707f QWORD|DREF (0, 8) :fsd frs2D,immS(rs1) is frs2D & immS & rs1 & op0001=0x3 & op0204=0x1 & op0506=0x1 & funct3=0x3 { local ea:$(XLEN) = immS + rs1; *[ram]:$(DFLEN) ea = frs2D; } # fsgnj.d D,S,T 22000053 fe00707f SIMPLE (0, 0) :fsgnj.d frd,frs1D,frs2D is frs1D & frd & frs2D & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x11 { local tmp:$(DFLEN) = frs1D; tmp[63,1] = frs2D[63,1]; frd = tmp; } # fmv.d D,U 22000053 fe00707f ALIAS (0, 0) :fmv.d frd,frs1D is frd & frs1D & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x11 & op1519=op2024 { frd = frs1D; } # fsgnjn.d D,S,T 22001053 fe00707f SIMPLE (0, 0) :fsgnjn.d frd,frs1D,frs2D is frs1D & frd & frs2D & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x11 { local tmp:$(DFLEN) = frs1D; tmp[63,1] = !frs2D[63,1]; frd = tmp; } # fneg.d D,U 22001053 fe00707f ALIAS (0, 0) :fneg.d frd,frs1D is frd & frs1D & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x11 & op1519=op2024 { frd = f- frs1D; } # fsgnjx.d D,S,T 22002053 fe00707f SIMPLE (0, 0) :fsgnjx.d frd,frs1D,frs2D is frs1D & frd & frs2D & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x2 & funct7=0x11 { local tmp:$(DFLEN) = frs1D; tmp[63,1] = tmp[63,1] ^ frs2D[63,1]; frd = tmp; } # fabs.d D,U 22002053 fe00707f ALIAS (0, 0) :fabs.d frd,frs1D is frd & frs1D & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x2 & funct7=0x11 & op1519=op2024 { frd = abs(frs1D); } # fsqrt.d D,S,m 5a000053 fff0007f SIMPLE (0, 0) :fsqrt.d frd,frs1D,FRM is frs1D & frd & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x2d & op2024=0x0 { frd = sqrt(frs1D); } # fsub.d D,S,T,m 0a000053 fe00007f SIMPLE (0, 0) :fsub.d frd,frs1D,frs2D,FRM is frs1D & frd & frs2D & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x5 { frd = frs1D f- frs2D; } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv32f.sinc ================================================ # RV32F Standard Extension # fadd.s D,S,T,m 00000053 fe00007f SIMPLE (0, 0) :fadd.s frd,frs1S,frs2S,FRM is frs1S & frd & frs2S & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x0 { local tmp:4 = frs1S f+ frs2S; fassignS(frd, tmp); } # fclass.s d,S e0001053 fff0707f SIMPLE (0, 0) :fclass.s rd,frs1S is frs1S & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x70 & op2024=0x0 { #TODO # rd = 0; # rd[0, 1] = 0; #TODO - inf # rd[1, 1] = 0; #TODO - norm num # rd[2, 1] = 0; #TODO - subnorm num # rd[3, 1] = 0; #TODO - 0 # rd[4, 1] = 0; #TODO + 0 # rd[5, 1] = 0; #TODO + norm num # rd[6, 1] = 0; #TODO + subnorm num # rd[7, 1] = 0; #TODO + inf # rd[8, 1] = 0; #TODO snan # rd[9, 1] = 0; #TODO qnan } # fcvt.s.w D,s,m d0000053 fff0007f SIMPLE (0, 0) :fcvt.s.w frd,rs1W,FRM is frd & FRM & rs1W & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x68 & op2024=0x0 { local tmp:4 = int2float(rs1W); fassignS(frd, tmp); } # fcvt.s.wu D,s,m d0100053 fff0007f SIMPLE (0, 0) :fcvt.s.wu frd,rs1W,FRM is frd & FRM & rs1W & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x68 & op2024=0x1 { #ATTN unsigned can be an issue here local u32:$(XLEN2) = zext(rs1W); local tmp:4 = int2float(u32); fassignS(frd, tmp); } # fcvt.w.s d,S,m c0000053 fff0007f SIMPLE (0, 0) :fcvt.w.s rdW,frs1S,FRM is frs1S & FRM & rdW & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x60 & op2024=0x0 { rdW = trunc(frs1S); } # fcvt.wu.s d,S,m c0100053 fff0007f SIMPLE (0, 0) :fcvt.wu.s rdW,frs1S,FRM is frs1S & FRM & rdW & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x60 & op2024=0x1 { #TODO unsigned rdW = trunc(frs1S); } # fdiv.s D,S,T,m 18000053 fe00007f SIMPLE (0, 0) :fdiv.s frd,frs1S,frs2S,FRM is frs1S & frd & frs2S & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0xc { local tmp:4 = frs1S f/ frs2S; fassignS(frd, tmp); } # feq.s d,S,T a0002053 fe00707f SIMPLE (0, 0) :feq.s rd,frs1S,frs2S is frs2S & frs1S & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x2 & funct7=0x50 { rd = zext(frs1S f== frs2S); } # fle.s d,S,T a0000053 fe00707f SIMPLE (0, 0) :fle.s rd,frs1S,frs2S is frs2S & frs1S & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x50 { rd = zext(frs1S f<= frs2S); } # flt.s d,S,T a0001053 fe00707f SIMPLE (0, 0) :flt.s rd,frs1S,frs2S is frs2S & frs1S & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x50 { rd = zext(frs1S f< frs2S); } # flw D,o(s) 00002007 0000707f DWORD|DREF (0, 4) :flw frd,immI(rs1) is immI & frd & rs1 & op0001=0x3 & op0204=0x1 & op0506=0x0 & funct3=0x2 { local ea:$(XLEN) = immI + rs1; fassignS(frd, *[ram]:4 ea); } # fmadd.s D,S,T,R,m 00000043 0600007f SIMPLE (0, 0) :fmadd.s frd,frs1S,frs2S,frs3S,FRM is frs1S & frd & frs2S & FRM & frs3S & op0001=0x3 & op0204=0x0 & op0506=0x2 & op2526=0x0 { local tmp:4 = (frs1S f* frs2S) f+ frs3S; fassignS(frd, tmp); } # fmax.s D,S,T 28001053 fe00707f SIMPLE (0, 0) :fmax.s frd,frs1S,frs2S is frs1S & frd & frs2S & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x14 { #TODO redo this local tmpfrs1 = frs1S; local tmpfrs2 = frs2S; fassignS(frd, tmpfrs1); if (nan(tmpfrs1) && nan(tmpfrs2)) goto inst_next; if (nan(tmpfrs2)) goto inst_next; fassignS(frd, tmpfrs2); if (nan(tmpfrs1)) goto inst_next; if (tmpfrs2 f>= tmpfrs1) goto inst_next; fassignS(frd, tmpfrs1); } # fmin.s D,S,T 28000053 fe00707f SIMPLE (0, 0) :fmin.s frd,frs1S,frs2S is frs1S & frd & frs2S & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x14 { #TODO redo this local tmpfrs1 = frs1S; local tmpfrs2 = frs2S; fassignS(frd, tmpfrs1); if (nan(tmpfrs1) && nan(tmpfrs2)) goto inst_next; if (nan(tmpfrs2)) goto inst_next; fassignS(frd, tmpfrs2); if (nan(tmpfrs1)) goto inst_next; if (tmpfrs2 f<= tmpfrs1) goto inst_next; fassignS(frd, tmpfrs1); } # fmsub.s D,S,T,R,m 00000047 0600007f SIMPLE (0, 0) :fmsub.s frd,frs1S,frs2S,frs3S,FRM is frs1S & frd & frs2S & FRM & frs3S & op0001=0x3 & op0204=0x1 & op0506=0x2 & op2526=0x0 { local tmp:4 = (frs1S f* frs2S) f- frs3S; fassignS(frd, tmp); } # fmul.s D,S,T,m 10000053 fe00007f SIMPLE (0, 0) :fmul.s frd,frs1S,frs2S,FRM is frs1S & frd & frs2S & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x8 { local tmp:4 = frs1S f* frs2S; fassignS(frd, tmp); } # fmv.w.x D,s f0000053 fff0707f SIMPLE (0, 0) :fmv.w.x frd,rs1W is frd & rs1W & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x78 & op2024=0x0 { fassignS(frd, rs1W); } # fmv.x.w d,S e0000053 fff0707f SIMPLE (0, 0) :fmv.x.w rdW,frs1S is frs1S & rdW & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x70 & op2024=0x0 { local tmpreg:4 = &frs1S; local tmp:4 = *[register]:4 tmpreg; rdW = tmp; } # fnmadd.s D,S,T,R,m 0000004f 0600007f SIMPLE (0, 0) :fnmadd.s frd,frs1S,frs2S,frs3S,FRM is frs1S & frd & frs2S & FRM & frs3S & op0001=0x3 & op0204=0x3 & op0506=0x2 & op2526=0x0 { local tmp:4 = (f- (frs1S f* frs2S)) f- frs3S; fassignS(frd, tmp); } # fnmsub.s D,S,T,R,m 0000004b 0600007f SIMPLE (0, 0) :fnmsub.s frd,frs1S,frs2S,frs3S,FRM is frs1S & frd & frs2S & FRM & frs3S & op0001=0x3 & op0204=0x2 & op0506=0x2 & op2526=0x0 { local tmp:4 = (f- (frs1S f* frs2S)) f+ frs3S; fassignS(frd, tmp); } # fsgnj.s D,S,T 20000053 fe00707f SIMPLE (0, 0) :fsgnj.s frd,frs1S,frs2S is frs1S & frd & frs2S & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x10 { local tmp:$(SFLEN) = frs1S; tmp[31,1] = frs2S[31,1]; fassignS(frd, tmp); } # fmv.s D,U 20000053 fe00707f ALIAS (0, 0) :fmv.s frd,frs1S is frd & frs1S & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x10 & op1519=op2024 { fassignS(frd, frs1S); } # fsgnjn.s D,S,T 20001053 fe00707f SIMPLE (0, 0) :fsgnjn.s frd,frs1S,frs2S is frs1S & frd & frs2S & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x10 { local tmp:$(SFLEN) = frs1S; tmp[31,1] = !frs2S[31,1]; fassignS(frd, tmp); } # fneg.s D,U 20001053 fe00707f ALIAS (0, 0) :fneg.s frd,frs1S is frs1S & frd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x10 & op1519=op2024 { local tmp:4 = f- frs1S; fassignS(frd, tmp); } # fsgnjx.s D,S,T 20002053 fe00707f SIMPLE (0, 0) :fsgnjx.s frd,frs1S,frs2S is frs1S & frd & frs2S & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x2 & funct7=0x10 { local tmp:$(SFLEN) = frs1S; tmp[31,1] = tmp[31,1] ^ frs2S[31,1]; fassignS(frd, tmp); } # fabs.s D,U 20002053 fe00707f ALIAS (0, 0) :fabs.s frd,frs1S is frd & frs1S & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x2 & funct7=0x10 & op1519=op2024 { local tmp:4 = abs(frs1S); fassignS(frd, tmp); } # fsqrt.s D,S,m 58000053 fff0007f SIMPLE (0, 0) :fsqrt.s frd,frs1S,FRM is frs1S & frd & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x2c & op2024=0x0 { local tmp:4 = sqrt(frs1S); fassignS(frd, tmp); } # fsub.s D,S,T,m 08000053 fe00007f SIMPLE (0, 0) :fsub.s frd,frs1S,frs2S,FRM is frs1S & frd & frs2S & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x4 { local tmp:4 = frs1S f- frs2S; fassignS(frd, tmp); } # fsw T,q(s) 00002027 0000707f DWORD|DREF (0, 4) :fsw frs2S,immS(rs1) is frs2S & immS & rs1 & op0001=0x3 & op0204=0x1 & op0506=0x1 & funct3=0x2 { local ea:$(XLEN) = immS + rs1; *[ram]:$(SFLEN) ea = frs2S; } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv32i.sinc ================================================ # RV32I Base Instruction Set # add d,s,t 00000033 fe00707f SIMPLE (0, 0) :add rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x0 & funct7=0x0 { rd = rs1 + rs2; } # addi d,s,j 00000013 0000707f SIMPLE (0, 0) :addi rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x0 { rd = rs1 + immI; } # nop 00000013 ffffffff ALIAS (0, 0) :nop is op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x0 & op0711=0x0 & op1531=0x0 { local NOP:1 = 0; NOP = NOP; } # mv d,s 00000013 fff0707f ALIAS (0, 0) :mv rd,rs1 is rs1 & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x0 & op2031=0x0 { rd = rs1; } # li d,j 00000013 000ff07f ALIAS (0, 0) :li rd,immI is immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x0 & op1519=0x0 { #TODO alias of addi rd,zero,0x0 is an issue rd = immI; } # Resolve conflict between: mv rd,zero and li rd,0x0 # ATTN this implementation uses mv rd,zero :mv rd,rs1 is rs1 & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x0 & op1531=0x0 { rd = rs1; } # and d,s,t 00007033 fe00707f SIMPLE (0, 0) :and rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x7 & funct7=0x0 { rd = rs1 & rs2; } # andi d,s,j 00007013 0000707f SIMPLE (0, 0) :andi rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x7 { rd = rs1 & immI; } # auipc d,u 00000017 0000007f SIMPLE (0, 0) :auipc rd,immU is immU & rd & op0001=0x3 & op0204=0x5 & op0506=0x0 { rd = immU + inst_start; } # beq s,t,p 00000063 0000707f CONDBRANCH (0, 0) :beq rs1,rs2,immSB is immSB & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x0 { if (rs1 == rs2) goto immSB; } # bge s,t,p 00005063 0000707f CONDBRANCH (0, 0) :bge rs1,rs2,immSB is immSB & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x5 { if (rs1 s>= rs2) goto immSB; } # bgeu s,t,p 00007063 0000707f CONDBRANCH (0, 0) :bgeu rs1,rs2,immSB is immSB & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x7 { if (rs1 >= rs2) goto immSB; } # blt s,t,p 00004063 0000707f CONDBRANCH (0, 0) :blt rs1,rs2,immSB is immSB & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x4 { if (rs1 s< rs2) goto immSB; } # bltu s,t,p 00006063 0000707f CONDBRANCH (0, 0) :bltu rs1,rs2,immSB is immSB & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x6 { if (rs1 < rs2) goto immSB; } # bne s,t,p 00001063 0000707f CONDBRANCH (0, 0) :bne rs1,rs2,immSB is immSB & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x3 & funct3=0x1 { if (rs1 != rs2) goto immSB; } # ebreak 00100073 ffffffff SIMPLE (0, 0) :ebreak is op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op1531=0x20 { ebreak(); } # ecall 00000073 ffffffff SIMPLE (0, 0) :ecall is op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op1531=0x0 { ecall(); } # fence P,Q 0000000f f00fffff SIMPLE (0, 0) :fence pred,succ is pred & succ & op0001=0x3 & op0204=0x3 & op0506=0x0 & funct3=0x0 & fm=0x0 & op0711=0x0 & op1519=0x0 { fence(); } # jal d,a 0000006f 0000007f JSR (0, 0) # call for rd = RA|T0 set to inst_next :jal rd,immUJ is immUJ & rd & (r0711=1 | r0711=5) & op0001=0x3 & op0204=0x3 & op0506=0x3 { rd = inst_next; call immUJ; } # goto for all other rd set to inst_next :jal rd,immUJ is immUJ & rd & r0711 & op0001=0x3 & op0204=0x3 & op0506=0x3 { rd = inst_next; goto immUJ; } # j a 0000006f 00000fff BRANCH|ALIAS (0, 0) :j immUJ is immUJ & op0001=0x3 & op0204=0x3 & op0506=0x3 & op0711=0x0 { goto immUJ; } # jalr d,s,j 00000067 0000707f JSR (0, 0) # call for rd = RA|T0 set to inst_next :jalr rd,rs1,immI is rs1 & immI & rd & (r0711=1 | r0711=5) & op0001=0x3 & op0204=0x1 & op0506=0x3 & funct3=0x0 { local ea:$(XLEN) = (rs1 + immI) & ~1; rd = inst_next; call [ea]; } # goto for all other rd set to inst_next :jalr rd,rs1,immI is rs1 & immI & rd & r0711 & op0001=0x3 & op0204=0x1 & op0506=0x3 & funct3=0x0 { local ea:$(XLEN) = (rs1 + immI) & ~1; rd = inst_next; goto [ea]; } # jr o(s) 00000067 00007fff BRANCH|ALIAS (0, 0) :jr immI(rs1) is immI & rs1 & op0001=0x3 & op0204=0x1 & op0506=0x3 & funct3=0x0 & op0711=0x0 { local ea:$(XLEN) = (rs1 + immI) & ~1; goto [ea]; } # jr s 00000067 fff07fff BRANCH|ALIAS (0, 0) :jr rs1 is rs1 & op0001=0x3 & op0204=0x1 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op2031=0x0 { local ea:$(XLEN) = rs1 & ~1; goto [ea]; } # ret 00008067 ffffffff BRANCH|ALIAS (0, 0) :ret is op0001=0x3 & op0204=0x1 & op0506=0x3 & funct3=0x0 & op0711=0x0 & op2031=0x0 & op1519=1 { local ea:$(XLEN) = ra & ~1; return [ea]; } # lb d,o(s) 00000003 0000707f BYTE|DREF (0, 1) :lb rd,immI(rs1) is immI & rs1 & rd & op0001=0x3 & op0204=0x0 & op0506=0x0 & funct3=0x0 { local ea:$(XLEN) = rs1 + immI; rd = sext(*[ram]:1 ea); } # lbu d,o(s) 00004003 0000707f BYTE|DREF (0, 1) :lbu rd,immI(rs1) is immI & rs1 & rd & op0001=0x3 & op0204=0x0 & op0506=0x0 & funct3=0x4 { local ea:$(XLEN) = rs1 + immI; rd = zext(*[ram]:1 ea); } # lh d,o(s) 00001003 0000707f WORD|DREF (0, 2) :lh rd,immI(rs1) is immI & rs1 & rd & op0001=0x3 & op0204=0x0 & op0506=0x0 & funct3=0x1 { local ea:$(XLEN) = rs1 + immI; rd = sext(*[ram]:2 ea); } # lhu d,o(s) 00005003 0000707f WORD|DREF (0, 2) :lhu rd,immI(rs1) is immI & rs1 & rd & op0001=0x3 & op0204=0x0 & op0506=0x0 & funct3=0x5 { local ea:$(XLEN) = rs1 + immI; rd = zext(*[ram]:2 ea); } # lui d,u 00000037 0000007f SIMPLE (0, 0) :lui rd,immU is immU & rd & op0001=0x3 & op0204=0x5 & op0506=0x1 { rd = immU; } # lw d,o(s) 00002003 0000707f DWORD|DREF (0, 4) :lw rd,immI(rs1) is immI & rs1 & rd & op0001=0x3 & op0204=0x0 & op0506=0x0 & funct3=0x2 { local ea:$(XLEN) = rs1 + immI; assignW(rd, *[ram]:4 ea); } # or d,s,t 00006033 fe00707f SIMPLE (0, 0) :or rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x6 & funct7=0x0 { rd = rs1 | rs2; } # ori d,s,j 00006013 0000707f SIMPLE (0, 0) :ori rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x6 { rd = rs1 | immI; } # sb t,q(s) 00000023 0000707f BYTE|DREF (0, 1) :sb rs2,immS(rs1) is immS & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x1 & funct3=0x0 { local ea:$(XLEN) = rs1 + immS; *[ram]:1 ea = rs2:1; } # sh t,q(s) 00001023 0000707f WORD|DREF (0, 2) :sh rs2,immS(rs1) is immS & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x1 & funct3=0x1 { local ea:$(XLEN) = rs1 + immS; *[ram]:2 ea = rs2:2; } # sll d,s,t 00001033 fe00707f SIMPLE (0, 0) :sll rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x1 & funct7=0x0 { local shift:$(XLEN) = rs2 & ($(ADDRSIZE) - 1); rd = rs1 << shift; } # slli d,s,> 00001013 fc00707f SIMPLE (0, 0) :slli rd,rs1,shamt6 is rs1 & shamt6 & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x1 & op2631=0x0 { rd = rs1 << shamt6; } # slt d,s,t 00002033 fe00707f SIMPLE (0, 0) :slt rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x2 & funct7=0x0 { rd = zext(rs1 s< rs2); } # slti d,s,j 00002013 0000707f SIMPLE (0, 0) :slti rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x2 { rd = zext(rs1 s< immI); } # sltiu d,s,j 00003013 0000707f SIMPLE (0, 0) :sltiu rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x3 { rd = zext(rs1 < immI); } # sltu d,s,t 00003033 fe00707f SIMPLE (0, 0) :sltu rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x3 & funct7=0x0 { rd = zext(rs1 < rs2); } # sra d,s,t 40005033 fe00707f SIMPLE (0, 0) :sra rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x5 & funct7=0x20 { local shift:$(XLEN) = rs2 & ($(ADDRSIZE) - 1); rd = rs1 s>> shift; } # srai d,s,> 40005013 fc00707f SIMPLE (0, 0) :srai rd,rs1,shamt6 is rs1 & shamt6 & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x5 & op2631=0x10 { rd = rs1 s>> shamt6; } # srl d,s,t 00005033 fe00707f SIMPLE (0, 0) :srl rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x5 & funct7=0x0 { local shift:$(XLEN) = rs2 & ($(ADDRSIZE) - 1); rd = rs1 >> shift; } # srli d,s,> 00005013 fc00707f SIMPLE (0, 0) :srli rd,rs1,shamt6 is rs1 & shamt6 & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x5 & op2631=0x0 { rd = rs1 >> shamt6; } # sub d,s,t 40000033 fe00707f SIMPLE (0, 0) :sub rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x0 & funct7=0x20 { rd = rs1 - rs2; } # neg d,t 40000033 fe0ff07f ALIAS (0, 0) :neg rd,rs2 is rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x0 & funct7=0x20 & op1519=0x0 { rd = -rs2; } # sw t,q(s) 00002023 0000707f DWORD|DREF (0, 4) :sw rs2,immS(rs1) is immS & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x1 & funct3=0x2 { local ea:$(XLEN) = rs1 + immS; *[ram]:4 ea = rs2:4; } # unimp c0001073 ffffffff SIMPLE (0, 0) :unimp is op0001=0x3 & op0204=0x4 & op0506=0x3 & funct3=0x1 & op0711=0x0 & op1531=0x18000 { local excaddr:$(XLEN) = inst_start; local target:$(XLEN) = unimp(excaddr); goto [target]; } # xor d,s,t 00004033 fe00707f SIMPLE (0, 0) :xor rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x4 & funct7=0x0 { rd = rs1 ^ rs2; } # xori d,s,j 00004013 0000707f SIMPLE (0, 0) :xori rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x4 { rd = rs1 ^ immI; } # not d,s fff04013 fff0707f ALIAS (0, 0) :not rd,rs1 is rs1 & rd & op0001=0x3 & op0204=0x4 & op0506=0x0 & funct3=0x4 & op2031=0xfff { rd = ~rs1; } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv32k.sinc ================================================ # RV32 Crypto Extension # NOTE 0.6.2 # bs 00001 rs2 rs1 010 rd 0101011 saes32.encs :saes32.encs rd, rs1, rs2, bs is rd & rs1 & rs2 & bs & op0006=0x2b & op1214=0x2 & op2529=0x1 unimpl # bs 00000 rs2 rs1 010 rd 0101011 saes32.encsm :saes32.encsm rd, rs1, rs2, bs is rd & rs1 & rs2 & bs & op0006=0x2b & op1214=0x2 & op2529=0x0 unimpl # bs 00011 rs2 rs1 010 rd 0101011 saes32.decs :saes32.decs rd, rs1, rs2, bs is rd & rs1 & rs2 & bs & op0006=0x2b & op1214=0x2 & op2529=0x3 unimpl # bs 00010 rs2 rs1 010 rd 0101011 saes32.decsm :saes32.decsm rd, rs1, rs2, bs is rd & rs1 & rs2 & bs & op0006=0x2b & op1214=0x2 & op2529=0x2 unimpl # 0000111 00000 rs1 111 rd 0101011 ssha256.sig0 :ssha256.sig0 rd, rs1 is rd & rs1 & op0006=0x2b & op1214=0x7 & op2024=0x00 & op2531=0x07 unimpl # 0000111 00001 rs1 111 rd 0101011 ssha256.sig1 :ssha256.sig1 rd, rs1 is rd & rs1 & op0006=0x2b & op1214=0x7 & op2024=0x01 & op2531=0x07 unimpl # 0000111 00010 rs1 111 rd 0101011 ssha256.sum0 :ssha256.sum0 rd, rs1 is rd & rs1 & op0006=0x2b & op1214=0x7 & op2024=0x02 & op2531=0x07 unimpl # 0000111 00011 rs1 111 rd 0101011 ssha256.sum1 :ssha256.sum1 rd, rs1 is rd & rs1 & op0006=0x2b & op1214=0x7 & op2024=0x03 & op2531=0x07 unimpl # 0001100 rs2 rs1 111 rd 0101011 ssha512.sum0r :ssha512.sum0r rd, rs1, rs2 is rd & rs1 & rs2 & op0006=0x2b & op1214=0x7 & op2531=0x0c unimpl # 0001101 rs2 rs1 111 rd 0101011 ssha512.sum1r :ssha512.sum1r rd, rs1, rs2 is rd & rs1 & rs2 & op0006=0x2b & op1214=0x7 & op2531=0x0d unimpl # 0001000 rs2 rs1 111 rd 0101011 ssha512.sig0l :ssha512.sig0l rd, rs1, rs2 is rd & rs1 & rs2 & op0006=0x2b & op1214=0x7 & op2531=0x08 unimpl # 0001001 rs2 rs1 111 rd 0101011 ssha512.sig0h :ssha512.sig0h rd, rs1, rs2 is rd & rs1 & rs2 & op0006=0x2b & op1214=0x7 & op2531=0x09 unimpl # 0001010 rs2 rs1 111 rd 0101011 ssha512.sig1l :ssha512.sig1l rd, rs1, rs2 is rd & rs1 & rs2 & op0006=0x2b & op1214=0x7 & op2531=0x0a unimpl # 0001011 rs2 rs1 111 rd 0101011 ssha512.sig1h :ssha512.sig1h rd, rs1, rs2 is rd & rs1 & rs2 & op0006=0x2b & op1214=0x7 & op2531=0x0b unimpl # 0000111 01000 rs1 111 rd 0101011 ssm3.p0 :ssm3.p0 rd, rs1 is rd & rs1 & op0006=0x2b & op1214=0x7 & op2024=0x08 & op2531=0x07 unimpl # 0000111 01001 rs1 111 rd 0101011 ssm3.p1 :ssm3.p1 rd, rs1 is rd & rs1 & op0006=0x2b & op1214=0x7 & op2024=0x09 & op2531=0x07 unimpl # bs 00100 rs2 rs1 011 rd 0101011 ssm4.ed :ssm4.ed rd, rs1, rs2, bs is rd & rs1 & rs2 & bs & op0006=0x2b & op1214=0x3 & op2529=0x4 unimpl # bs 00101 rs2 rs1 011 rd 0101011 ssm4.ks :ssm4.ks rd, rs1, rs2, bs is rd & rs1 & rs2 & bs & op0006=0x2b & op1214=0x3 & op2529=0x5 unimpl # 0000111 shamtw 01010 111 rd 0101011 pollentropy :pollentropy rd, shamtw is rd & shamtw & op0006=0x2b & op1214=0x7 & op1519=0x0a & op2531=0x07 unimpl ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv32m.sinc ================================================ # RV32M Standard Extension # div d,s,t 02004033 fe00707f SIMPLE (0, 0) :div rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x4 & funct7=0x1 { rd = rs1 s/ rs2; } # divu d,s,t 02005033 fe00707f SIMPLE (0, 0) :divu rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x5 & funct7=0x1 { rd = rs1 / rs2; } # mul d,s,t 02000033 fe00707f SIMPLE (0, 0) :mul rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x0 & funct7=0x1 { rd = rs1 * rs2; } # mulh d,s,t 02001033 fe00707f SIMPLE (0, 0) :mulh rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x1 & funct7=0x1 { local trs1:$(XLEN2) = sext(rs1); local trs2:$(XLEN2) = sext(rs2); local tmp:$(XLEN2) = trs1 * trs2; rd = tmp($(XLEN)); } # mulhsu d,s,t 02002033 fe00707f SIMPLE (0, 0) :mulhsu rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x2 & funct7=0x1 { local trs1:$(XLEN2) = sext(rs1); local trs2:$(XLEN2) = zext(rs2); local tmp:$(XLEN2) = trs1 * trs2; rd = tmp($(XLEN)); } # mulhu d,s,t 02003033 fe00707f SIMPLE (0, 0) :mulhu rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x3 & funct7=0x1 { local trs1:$(XLEN2) = zext(rs1); local trs2:$(XLEN2) = zext(rs2); local tmp:$(XLEN2) = trs1 * trs2; rd = tmp($(XLEN)); } # rem d,s,t 02006033 fe00707f SIMPLE (0, 0) :rem rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x6 & funct7=0x1 { rd = rs1 s% rs2; } # remu d,s,t 02007033 fe00707f SIMPLE (0, 0) :remu rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x4 & op0506=0x1 & funct3=0x7 & funct7=0x1 { rd = rs1 % rs2; } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv32p.sinc ================================================ # RV32 P Extension # add16 rt, ra, rb ; rt.H[_x_] = ra.H[_x_] + rb.H[_x_]; ; (RV32: __x__=1..0, RV64: __x__=3..0) :add16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x20 { local tmp1:$(XLEN) = rs1; local tmp2:$(XLEN) = rs2; rd[ 0,16] = tmp1[ 0,16] + tmp2[ 0,16]; rd[16,16] = tmp1[16,16] + tmp2[16,16]; @if ADDRSIZE == "64" rd[32,16] = tmp1[32,16] + tmp2[32,16]; rd[48,16] = tmp1[48,16] + tmp2[48,16]; @endif } # add64 rt, ra, rb ; a64 = r[aU].r[aL]; b64 = r[bU].r[bL]; + ; + ; t64 = a64 + b64; + ; + ; r[tU].r[tL] = t64; :add64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x60 { @if ADDRSIZE == "32" local dst:$(XLEN) = &rd; local src1:$(XLEN) = &rs1; local src2:$(XLEN) = &rs2; *[register]:8 dst = *[register]:8 src1 + *[register]:8 src2; @else rd = rs1 + rs2; @endif } # add8 rt, ra, rb ; rt.B[_x_] = ra.B[_x_] + rb.B[_x_]; ; (RV32: __x__=3..0, RV64: __x__=7..0) :add8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x24 { local tmp1:$(XLEN) = rs1; local tmp2:$(XLEN) = rs2; rd[ 0,8] = tmp1[ 0,8] + tmp2[ 0,8]; rd[ 8,8] = tmp1[ 8,8] + tmp2[ 8,8]; rd[16,8] = tmp1[16,8] + tmp2[16,8]; rd[24,8] = tmp1[24,8] + tmp2[24,8]; @if ADDRSIZE == "64" rd[32,8] = tmp1[32,8] + tmp2[32,8]; rd[40,8] = tmp1[40,8] + tmp2[40,8]; rd[48,8] = tmp1[48,8] + tmp2[48,8]; rd[56,8] = tmp1[56,8] + tmp2[56,8]; @endif } # ave rt, ra, rb :ave rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x70 { local tmp:$(XLEN) = rs1 + rs2; rd = (tmp / 2) + (tmp & 1); } # bitrev rt, ra, rb ; msb = rb[4:0]; // RV32 ; msb = rb[5:0]; // RV64 ; rev[0:msb] = ra[msb:0]; ; rt = ZE(rev[msb:0]); :bitrev rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x73 unimpl # bitrevi rt, ra, imm5u ; msb = imm5u; // RV32 ; msb = imm6u; // RV64 ; rev[0:msb] = ra[msb:0]; ; rt = ZE(rev[msb:0]); :bitrevi rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x74 unimpl # bpick rt, ra, rb, rc ; rt[_i_] = rc[_i_]? ra[_i_] : rb[_i_]; + ; (RV32: __i__=31..0, RV64: __i__=63..0) :bpick rd,rs1,rs2,rs3 is op0006=0x3f & rd & rs1 & rs2 & rs3 & funct3=0x3 & funct7=0x00 unimpl # bpick rt, ra, rb, rc ; rt[_i_] = rc[_i_]? ra[_i_] : rb[_i_]; + ; (RV32: __i__=31..0, RV64: __i__=63..0) :bpick rd,rs1,rs2,rs3 is op0006=0x3f & rd & rs1 & rs2 & rs3 & funct3=0x3 & funct7=0x04 unimpl # clo16 rt, ra :clo16 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x57 & subf5=0x0a unimpl # clo32 rt, ra ; rt.W[_x_] = CLO(ra.W[_x_]) ; (RV32: __x__=0, RV64: __x__=1..0) :clo32 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x57 & subf5=0x12 unimpl # clo8 rt, ra :clo8 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x57 & subf5=0x02 unimpl # clrs16 rt, ra :clrs16 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x57 & subf5=0x08 unimpl # clrs32 rt, ra ; rt.W[_x_] = CLRS(ra.W[_x_]) ; (RV32: __x__=0, RV64: __x__=1..0) :clrs32 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x57 & subf5=0x10 unimpl # clrs8 rt, ra :clrs8 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x57 & subf5=0x00 unimpl # clz16 rt, ra :clz16 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x57 & subf5=0x09 unimpl # clz32 rt, ra ; rt.W[_x_] = CLZ(ra.W[_x_]) ; (RV32: __x__=0, RV64: __x__=1..0) :clz32 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x57 & subf5=0x11 unimpl # clz8 rt, ra :clz8 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x57 & subf5=0x01 unimpl # cmpeq16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] == rb.H[_x_])? 0xffff : 0; ; (RV32: __x__=1..0, RV64: __x__=3..0) :cmpeq16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x26 unimpl # cmpeq8 rt, ra, rb ; rt.B[_x_] = (ra.B[_x_] == rb.B[_x_])? 0xff : 0; ; (RV32: __x__=3..0, RV64: __x__=7..0) :cmpeq8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x27 unimpl # cras16 rt, ra, rb ; rt.H[_x_] = ra.H[_x_] + rb.H[_x-1_]; + ; rt.H[_x-1_] = ra.H[_x-1_] – rb.H[_x_]; ; (RV32: __x__=1, RV64: __x__=1,3) :cras16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x22 unimpl # crsa16 rt, ra, rb ; rt.H[_x_] = ra.H[_x_] - rb.H[_x-1_]; + ; rt.H[_x-1_] = ra.H[_x-1_] + rb.H[_x_]; ; (RV32: __x__=1, RV64: __x__=1,3) :crsa16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x23 unimpl # insb rt, ra, imm3u ; byte_idx = imm2u; // RV32 ; byte_idx = imm3u; // RV64 ; rt.B[byte_idx] = ra.B[0]; :insb rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x00 unimpl # kabs16 rt, ra ; rt.H[_x_] = SAT.Q15(ABS(ra.H[_x_])); ; (RV32: __x__=1..0, RV64: __x__=3..0) :kabs16 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x11 unimpl # kabs32 rt, ra ; rt.W[_x_] = SAT.Q31(ABS(ra.W[_x_])); ; (RV64: __x__=1..0) :kabs32 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x12 unimpl # kabs8 rt, ra ; rt.B[_x_] = SAT.Q7(ABS(ra.B[_x_])); ; (RV32: __x__=3..0, RV64: __x__=7..0) :kabs8 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x10 unimpl # kabsw rt, ra ; rt = SAT.Q31(ABS(ra)); // RV32 ; rt = SE(SAT.Q31(ABS(ra.W[_0_]))); // RV64 :kabsw rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x14 unimpl # kadd16 rt, ra, rb ; rt.H[_x_] = SAT.Q15(ra.H[_x_] + rb.H[_x_]); ; (RV32: __x__=1..0, RV64: __x__=3..0) :kadd16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x08 unimpl # kadd64 rt, ra, rb ; a64 = r[aU].r[aL]; b64 = r[bU].r[bL]; + ; + ; t64 = SAT.Q63(a64 + b64); + ; + ; r[tU].r[tL] = t64; :kadd64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x48 unimpl # kadd8 rt, ra, rb ; rt.B[_x_] = SAT.Q7(ra.B[_x_] + rb.B[_x_]); ; (RV32: __x__=3..0, RV64: __x__=7..0) :kadd8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x0c unimpl :kaddh rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x02 unimpl :kaddw rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x00 unimpl # kcras16 rt, ra, rb ; rt.H[_x_] = SAT.Q15(ra.H[_x_] + rb.H[_x-1_]); + ; rt.H[_x-1_] = SAT.Q15(ra.H[_x-1_] – rb.H[_x_]); ; (RV32: __x__=1, RV64: __x__=1,3) :kcras16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x0a unimpl # kcrsa16 rt, ra, rb ; rt.H[_x_] = SAT.Q15(ra.H[_x_] - rb.H[_x-1_]); + ; rt.H[_x-1_] = SAT.Q15(ra.H[_x-1_] + rb.H[_x_]); ; (RV32: __x__=1, RV64: __x__=1,3) :kcrsa16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x0b unimpl :kdmabb rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x69 unimpl :kdmabb16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x6b unimpl :kdmabt rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x71 unimpl :kdmabt16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x74 unimpl :kdmatt rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x79 unimpl :kdmatt16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x7c unimpl :kdmbb rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x05 unimpl :kdmbb16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x6c unimpl :kdmbt rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x0d unimpl :kdmbt16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x75 unimpl :kdmtt rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x15 unimpl :kdmtt16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x7d unimpl # khm16 rt, ra, rb ; rt.H[_x_] = SAT.Q15((ra.H[_x_] s* rb.H[_x_]) >> 15); ; (RV32: __x__=1..0, RV64: __x__=3..0) :khm16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x43 unimpl # khm8 rt, ra, rb ; rt.B[_x_] = SAT.Q7((ra.B[_x_] s* rb.B[_x_]) >> 7); ; (RV32: __x__=3..0, RV64: __x__=7..0) :khm8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x47 unimpl :khmbb rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x06 unimpl :khmbb16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x6d unimpl :khmbt rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x0e unimpl :khmbt16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x76 unimpl :khmtt rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x16 unimpl :khmtt16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x7e unimpl # khmx16 rt, ra, rb ; rt.H[_x_] = SAT.Q15((ra.H[_x_] s* rb.H[_y_]) >> 15); ; (RV32: (_x,y_)=(1,0), (0,1), + ; RV64: (_x,y_)=(3,2),(2,3),(1,0), (0,1)) :khmx16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x4b unimpl # khmx8 rt, ra, rb ; rt.B[_x_] = SAT.Q7((ra.B[_x_] s* rb.B[_y_]) >> 7); ; (RV32: (_x,y_)=(3,2),(2,3),(1,0), (0,1), + ; RV64: (_x,y_)=(7,6),(6,7),(5,4), (4,5), (3,2), (2,3), (1,0), (0,1)) :khmx8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x4f unimpl # kmabb rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] + ra.W[_x_].H[0]*rb.W[_x_].H[0]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmabb rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x2d unimpl # kmabt rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] + ra.W[_x_].H[0]*rb.W[_x_].H[1]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmabt rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x35 unimpl # kmada rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] + ra.W[_x_].H[1]*rb.W[_x_].H[1] + ra.W[_x_].H[0]*rb.W[_x_].H[0]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmada rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x24 unimpl # kmadrs rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] + ra.W[_x_].H[0]*rb.W[_x_].H[0] - ra.W[_x_].H[1]*rb.W[_x_].H[1]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmadrs rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x36 unimpl # kmads rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] + ra.W[_x_].H[1]*rb.W[_x_].H[1] - ra.W[_x_].H[0]*rb.W[_x_].H[0]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmads rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x2e unimpl # kmar64 rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = SAT.Q63(c64 + ra*rb); + ; + ; r[tU].r[tL] = t64; :kmar64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x4a unimpl # kmatt rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] + ra.W[_x_].H[1]*rb.W[_x_].H[1]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmatt rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x3d unimpl # kmaxda rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] + ra.W[_x_].H[1]*rb.W[_x_].H[0] + ra.W[_x_].H[0]*rb.W[_x_].H[1]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmaxda rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x25 unimpl # kmaxds rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] + ra.W[_x_].H[1]*rb.W[_x_].H[0] - ra.W[_x_].H[0]*rb.W[_x_].H[1]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmaxds rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x3e unimpl # kmda rt, ra, rb ; rt.W[_x_] = SAT.Q31(ra.W[_x_].H[1]*rb.W[_x_].H[1] + ra.W[_x_].H[0]*rb.W[_x_].H[0]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmda rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x1c unimpl # kmmac rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] + (ra.W[_x_]*rb.W[_x_])[63:32]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmmac rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x30 unimpl :kmmac.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x38 unimpl # kmmawb rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] + (ra.W[_x_]*rb.W[_x_].H[0])[47:16]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmmawb rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x23 unimpl :kmmawb.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x2b unimpl # kmmawb2 rt, ra, rb ; a[_x_]=ra.W[_x_]; b[_x_]=rb.W[_x_]; ; if ((a[_x_]==0x80000000) & (b[_x_].L==0x8000)) \{ ; t[_x_]=0x7fffffff; OV=1;} else \{ ; t[_x_]= ((a[_x_]*b[_x_].L)<<1)[47:16]; ; } ; rt.W[_x_] = SAT.Q31(rt.W[x] + t[_x_]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmmawb2 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x67 unimpl :kmmawb2.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x6e unimpl # kmmawt rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] + (ra.W[_x_]*rb.W[_x_].H[1])[47:16]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmmawt rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x33 unimpl :kmmawt.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x3b unimpl # kmmsb rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] - (ra.W[_x_]*rb.W[_x_])[63:32]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmmsb rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x21 unimpl :kmmsb.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x29 unimpl # kmmwb2 rt, ra, rb ; a[_x_]=ra.W[_x_]; b[_x_]=rb.W[_x_]; ; if ((a[_x_]==0x80000000) & (b[_x_].L==0x8000)) \{ ; t[_x_]=0x7fffffff; OV=1;} else \{ ; t[_x_]= ((a[_x_]*b[_x_].L)<<1)[47:16]; ; } ; rt.W[_x_] = t[_x_]; ; (RV32: __x__=0, RV64: __x__=1..0) :kmmwb2 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x47 unimpl :kmmwb2.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x4f unimpl # kmmwt2 rt, ra, rb ; a[_x_]=ra.W[_x_]; b[_x_]=rb.W[_x_]; ; if ((a[_x_]==0x80000000) & (b[_x_].H==0x8000)) \{ ; t[_x_]=0x7fffffff; OV=1;} else \{ ; t[_x_]= ((a[_x_]*b[_x_].H)<<1)[47:16]; ; } ; rt.W[_x_] = t[_x_]; ; (RV32: __x__=0, RV64: __x__=1..0) :kmmwt2 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x57 unimpl # kmmwt2 rt, ra, rb ; a[_x_]=ra.W[_x_]; b[_x_]=rb.W[_x_]; ; if ((a[_x_]==0x80000000) & (b[_x_].H==0x8000)) \{ ; t[_x_]=0x7fffffff; OV=1;} else \{ ; t[_x_]= ((a[_x_]*b[_x_].H)<<1)[47:16]; ; } ; rt.W[_x_] = t[_x_]; ; (RV32: __x__=0, RV64: __x__=1..0) :kmmwt2 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x77 unimpl :kmmwt2.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x5f unimpl :kmmwt2.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x7f unimpl # kmsda rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] - ra.W[_x_].H[1]*rb.W[_x_].H[1] - ra.W[_x_].H[0]*rb.W[_x_].H[0]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmsda rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x26 unimpl # kmsr64 rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = SAT.Q63(c64 – ra*rb); + ; + ; r[tU].r[tL] = t64; :kmsr64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x4b unimpl # kmsxda rt, ra, rb ; rt.W[_x_] = SAT.Q31(rt.W[_x_] - ra.W[_x_].H[1]*rb.W[_x_].H[0] - ra.W[_x_].H[0]*rb.W[_x_].H[1]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmsxda rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x27 unimpl # kmxda rt, ra, rb ; rt.W[_x_] = SAT.Q31(ra.W[_x_].H[1]*rb.W[_x_].H[0] + ra.W[_x_].H[0]*rb.W[_x_].H[1]); ; (RV32: __x__=0, RV64: __x__=1..0) :kmxda rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x1d unimpl :ksll rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x13 unimpl # ksll16 rt, ra, rb ; rt.H[_x_] = SAT.Q15(ra.H[_x_] << im4u); ; (RV32: __x__=1..0, RV64: __x__=3..0) :ksll16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x32 unimpl # ksll8 rt, ra, rb ; rt.B[_x_] = SAT.Q7(ra.B[_x_] << rb[2:0]); ; (RV32: __x__=3..0, RV64: __x__=7..0) :ksll8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x36 unimpl :kslli rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x1b unimpl # kslra16 rt, ra, rb ; if (rb[4:0] < 0) + ; rt.H[_x_] = ra.H[_x_] s>> -rb[4:0]; ; if (rb[4:0] > 0) + ; rt.H[_x_] = SAT.Q15(ra.H[_x_] << rb[4:0]); ; (RV32: __x__=1..0, RV64: __x__=3..0) :kslra16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x2b unimpl :kslra16.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x33 unimpl # kslra8 rt, ra, rb ; if (rb[3:0] < 0) + ; rt.B[_x_] = ra.B[_x_] s>> -rb[3:0]; ; if (rb[3:0] > 0) + ; rt.B[_x_] = SAT.Q7(ra.B[_x_] << rb[3:0]); ; (RV32: __x__=3..0, RV64: __x__=7..0) :kslra8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x2f unimpl :kslra8.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x37 unimpl :kslraw rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x37 unimpl :kslraw.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x3f unimpl # ksub16 rt, ra, rb ; rt.H[_x_] = SAT.Q15(ra.H[_x_] - rb.H[_x_]); ; (RV32: __x__=1..0, RV64: __x__=3..0) :ksub16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x09 unimpl # ksub64 rt, ra, rb ; a64 = r[aU].r[aL]; b64 = r[bU].r[bL]; + ; + ; t64 = SAT.Q63(a64 - b64); + ; + ; r[tU].r[tL] = t64; :ksub64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x49 unimpl # ksub8 rt, ra, rb ; rt.B[_x_] = SAT.Q7(ra.B[_x_] - rb.B[_x_]); ; (RV32: __x__=3..0, RV64: __x__=7..0) :ksub8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x0d unimpl :ksubh rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x03 unimpl :ksubw rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x01 unimpl # kwmmul rt, ra, rb ; rt.W[_x_] = SAT.Q31((ra.W[_x_]*rb.W[_x_] << 1)[63:32]); ; (RV32: __x__=0, RV64: __x__=1..0) :kwmmul rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x31 unimpl :kwmmul.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x39 unimpl # maddr32 rt, ra, tb ; 11 :maddr32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x62 unimpl # maxw rt, ra, rb ; if (ra.W[0] >= rb.W[0]) \{ ; rt = SE(ra.W[0]); ; else \{ ; rt = SE(rb.W[0]); ; } :maxw rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x79 unimpl # minw rt, ra, rb ; if (ra.W[0] >= rb.W[0]) \{ ; rt = SE(rb.W[0]); ; else \{ ; rt = SE(ra.W[0]); ; } :minw rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x78 unimpl # msubr32 rt, ra, tb ; === ; <<< ; === RV64 Only Instructions ; The following tables list instructions that are only present in RV64. ; There are 30 SIMD 32-bit addition or subtraction instructions. ; .(RV64 Only) SIMD 32-bit Add/Subtract Instructions ; [cols="^.^1,<.^2,<.^2,<.^4",options="header",] :msubr32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x63 unimpl :mtlbi rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x7c unimpl :mtlei rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x7d unimpl # mulr64 rt, ra, rb ; RV32: ; mres[63:0] = ra u* rb; ; r[tU] = mres.W[1]; ; r[tL] = mres.W[0]; ; RV64: ; rt = ra.W[0] u* rb.W[0]; :mulr64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x78 unimpl # mulsr64 rt, ra, rb ; RV32: ; mres[63:0] = ra s* rb; ; r[tU] = mres.W[1]; ; r[tL] = mres.W[0]; ; RV64: ; rt = ra.W[0] s* rb.W[0]; :mulsr64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x70 unimpl :oneop rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x56 unimpl :oneop2 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x57 unimpl # pbsad rt, ra, rb ; absdiff[_x_] = ABS(ra.B[_x_] – rb.B[_x_]); ; rt = SUM(absdiff[_x_]); ; (RV32: __x__=3..0, RV64: __x__=7..0) :pbsad rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x7e unimpl # pbsada rt, ra, rb ; absdiff[_x_] = ABS(ra.B[_x_] – rb.B[_x_]); ; rt = rt + SUM(absdiff[_x_]); ; (RV32: __x__=3..0, RV64: __x__=7..0) :pbsada rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x7f unimpl # pkbb16 rt, ra, rb ; rt.W[_x_] = CONCAT(ra.W[_x_].H[0], rb.W[_x_].H[0]); ; (RV32: __x__=0, RV64: __x__=1..0) :pkbb16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x07 unimpl # pkbt16 rt, ra, rb ; rt.W[_x_] = CONCAT(ra.W[_x_].H[0], rb.W[_x_].H[1]); ; (RV32: __x__=0, RV64: __x__=1..0) :pkbt16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x0f unimpl # pktb16 rt, ra, rb ; rt.W[_x_] = CONCAT(ra.W[_x_].H[1], rb.W[_x_].H[0]); ; (RV32: __x__=0, RV64: __x__=1..0) :pktb16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x1f unimpl # pktt16 rt, ra, rb ; rt.W[_x_] = CONCAT(ra.W[_x_].H[1], rb.W[_x_].H[0]); ; (RV32: __x__=0, RV64: __x__=1..0) :pktt16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x17 unimpl # radd16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] + rb.H[_x_]) s>> 1; ; (RV32: __x__=1..0, RV64: __x__=3..0) :radd16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x00 unimpl # radd64 rt, ra, rb ; a64 = r[aU].r[aL]; b64 = r[bU].r[bL]; + ; + ; t64 = (a64 + b64) s>>1; + ; + ; r[tU].r[tL] = t64; :radd64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x40 unimpl # radd8 rt, ra, rb ; rt.B[_x_] = (ra.B[_x_] + rb.B[_x_]) s>> 1; ; (RV32: __x__=3..0, RV64: __x__=7..0) :radd8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x04 unimpl # raddw rt, ra, rb ; res = (ra.W[0] + rb.W[0]) s>> 1; ; rt = SE(res); :raddw rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x10 unimpl # rcras16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] + rb.H[_x-1_]) s>> 1; + ; rt.H[_x-1_] = (ra.H[_x-1_] – rb.H[_x_]) s>> 1; ; (RV32: __x__=1, RV64: __x__=1,3) :rcras16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x02 unimpl # rcrsa16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] - rb.H[_x-1_]) s>> 1; + ; rt.H[_x-1_] = (ra.H[_x-1_] + rb.H[_x_]) s>> 1; ; (RV32: __x__=1, RV64: __x__=1,3) :rcrsa16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x03 unimpl # rsub16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] - rb.H[_x_]) s>> 1; ; (RV32: __x__=1..0, RV64: __x__=3..0) :rsub16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x01 unimpl # rsub64 rt, ra, rb ; a64 = r[aU].r[aL]; b64 = r[bU].r[bL]; + ; + ; t64 = (a64 - b64) s>>1; + ; + ; r[tU].r[tL] = t64; :rsub64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x41 unimpl # rsub8 rt, ra, rb ; rt.B[_x_] = (ra.B[_x_] - rb.B[_x_]) s>> 1; ; (RV32: __x__=3..0, RV64: __x__=7..0) :rsub8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x05 unimpl # rsubw rt, ra, rb ; res = (ra.W[0] - rb.W[0]) s>> 1; ; rt = SE(res); :rsubw rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x11 unimpl :sclip16 rd,rs1,imm4u is op0006=0x3f & rd & rs1 & imm4u & op2424=0 & funct3=0x0 & funct7=0x42 unimpl :uclip16 rd,rs1,imm4u is op0006=0x3f & rd & rs1 & imm4u & op2424=1 & funct3=0x0 & funct7=0x42 unimpl # sclip32 rt, ra, imm5u ; n = imm5u; + ; rt = SAT.Qn(ra.W[_x_]); ; (RV32: __x__=0, RV64: __x__=1..0) :sclip32 rd,rs1,imm5u is op0006=0x3f & rd & rs1 & imm5u & funct3=0x0 & funct7=0x72 unimpl :sclip8 rd,rs1,imm3u is op0006=0x3f & rd & rs1 & imm3u & op2324=0 & funct3=0x0 & funct7=0x46 unimpl :uclip8 rd,rs1,imm3u is op0006=0x3f & rd & rs1 & imm3u & op2324=2 & funct3=0x0 & funct7=0x46 unimpl # scmple16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] {le} rb.H[_x_])? 0xffff : 0; ; (RV32: __x__=1..0, RV64: __x__=3..0) :scmple16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x0e unimpl # scmple8 rt, ra, rb ; rt.B[_x_] = (ra.B[_x_] {le} rb.B[_x_])? 0xff : 0; ; (RV32: __x__=3..0, RV64: __x__=7..0) :scmple8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x0f unimpl # scmplt16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] < rb.H[_x_])? 0xffff : 0; ; (RV32: __x__=1..0, RV64: __x__=3..0) :scmplt16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x06 unimpl # scmplt8 rt, ra, rb ; rt.B[_x_] = (ra.B[_x_] < rb.B[_x_])? 0xff : 0; ; (RV32: __x__=3..0, RV64: __x__=7..0) :scmplt8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x07 unimpl # sll16 rt, ra, rb ; rt.H[_x_] = ra.H__x__ << rb[3:0]; ; (RV32: __x__=1..0, RV64: __x__=3..0) :sll16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x2a unimpl # sll8 rt, ra, rb ; rt.B[_x_] = ra.B[_x_] << rb[2:0]; ; (RV32: __x__=3..0, RV64: __x__=7..0) :sll8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x2e unimpl :slli16 rd,rs1,imm4u is op0006=0x3f & rd & rs1 & imm4u & op2424=0 & funct3=0x0 & funct7=0x3a unimpl :kslli16 rd,rs1,imm4u is op0006=0x3f & rd & rs1 & imm4u & op2424=1 & funct3=0x0 & funct7=0x3a unimpl :slli8 rd,rs1,imm3u is op0006=0x3f & rd & rs1 & imm3u & op2324=0 & funct3=0x0 & funct7=0x3e unimpl :kslli8 rd,rs1,imm3u is op0006=0x3f & rd & rs1 & imm3u & op2324=1 & funct3=0x0 & funct7=0x3e unimpl # smal rt, ra, rb ; RV32: ; a64 = r[aU].r[aL]; + ; t64 = a64 + rb.W[_0_].H[1]*rb.W[_0_].H[0]; + ; r[tU].r[tL] = t64; ; RV64: ; a64 = ra; + ; rt = a64 + rb.W[_1_].H[1]*rb.W[_1_].H[0] + rb.W[_0_].H[1]*rb.W[_0_].H[0]; :smal rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x2f unimpl # smalbb rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 + ra.L*rb.L; + ; + ; r[tU].r[tL] = t64; :smalbb rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x44 unimpl # smalbt rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 + ra.L*rb.H; + ; + ; r[tU].r[tL] = t64; :smalbt rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x4c unimpl # smalda rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 + ra.H*rb.H + ra.L*rb.L; + ; + ; r[tU].r[tL] = t64; :smalda rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x46 unimpl # smaldrs rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 + ra.L*rb.L - ra.H*rb.H; + ; + ; r[tU].r[tL] = t64; :smaldrs rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x4d unimpl # smalds rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 + ra.H*rb.H - ra.L*rb.L; + ; + ; r[tU].r[tL] = t64; :smalds rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x45 unimpl # smaltt rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 + ra.H*rb.H; + ; + ; r[tU].r[tL] = t64; :smaltt rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x54 unimpl # smalxda rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 + ra.H*rb.L + ra.L*rb.H; + ; + ; r[tU].r[tL] = t64; :smalxda rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x4e unimpl # smalxds rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 + ra.H*rb.L - ra.L*rb.H; + ; + ; r[tU].r[tL] = t64; :smalxds rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x55 unimpl # smaqa rt, ra, rb ; rt.W[x] = rt.W[x] + ra.W[x].B[3]*rb.W[x].B[3] + ra.W[x].B[2]*rb.W[x].B[2] ; + ra.W[x].B[1]*rb.W[x].B[1] + ra.W[x].B[0]*rb.W[x].B[0]); ; (RV32: x=0, RV64: x=1..0) ; Elements of ra and rb are signed numbers. :smaqa rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x64 unimpl # smaqa.su rt, ra, rb ; rt.W[x] = rt.W[x] + ra.W[x].B[3]*rb.W[x].B[3] + ra.W[x].B[2]*rb.W[x].B[2] + ; ra.W[x].B[1]*rb.W[x].B[1] + ra.W[x].B[0]*rb.W[x].B[0]); ; (RV32: x=0, RV64: x=1..0) ; Elements of ra are signed numbers. ; Elements of rb are unsigned numbers. :smaqa.su rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x65 unimpl # smar64 rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 + ra*rb; // signed + ; + ; r[tU].r[tL] = t64; :smar64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x42 unimpl # smax16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] > rb.H[_x_])? ra.H[_x_] : rb.H[_x_]; ; (RV32: __x__=1..0, RV64: __x__=3..0) :smax16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x41 unimpl # smax8 rt, ra, rb ; rt.B[_x_] = (ra.B[_x_] > rb.B[_x_])? ra.B[_x_] : rb.B[_x_]; ; (RV32: __x__=3..0, RV64: __x__=7..0) :smax8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x45 unimpl # smbb16 rt, ra, rb ; rt.W[_x_] = ra.W[_x_].H[0]*rb.W[_x_].H[0]; ; (RV32: __x__=0, RV64: __x__=1..0) :smbb16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x04 unimpl # smbt16 rt, ra, rb ; rt.W[_x_] = ra.W[_x_].H[0]*rb.W[_x_].H[1]; ; (RV32: __x__=0, RV64: __x__=1..0) :smbt16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x0c unimpl # smdrs rt, ra, rb ; rt.W[_x_] = (ra.W[_x_].H[0]*rb.W[_x_].H[0]) - (ra.W[_x_].H[1]*rb.W[_x_].H[1]); ; (RV32: __x__=0, RV64: __x__=1..0) :smdrs rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x34 unimpl # smds rt, ra, rb ; rt.W[_x_] = (ra.W[_x_].H[1]*rb.W[_x_].H[1]) - (ra.W[_x_].H[0]*rb.W[_x_].H[0]); ; (RV32: __x__=0, RV64: __x__=1..0) :smds rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x2c unimpl # smin16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] < rb.H[_x_])? ra.H[_x_] : rb.H[_x_]; ; (RV32: __x__=1..0, RV64: __x__=3..0) :smin16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x40 unimpl # smin8 rt, ra, rb ; rt.B[_x_] = (ra.B[_x_] < rb.B[_x_])? ra.B[_x_] : rb.B[_x_]; ; (RV32: __x__=3..0, RV64: __x__=7..0) :smin8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x44 unimpl # smmul rt, ra, rb ; rt.W[_x_] = (ra.W[_x_]*rb.W[_x_])[63:32]; ; (RV32: __x__=0, RV64: __x__=1..0) :smmul rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x20 unimpl :smmul.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x28 unimpl # smmwb rt, ra, rb ; rt.W[_x_] = (ra.W[_x_]*rb.W[_x_].H[0])[47:16]; ; (RV32: __x__=0, RV64: __x__=1..0) :smmwb rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x22 unimpl :smmwb.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x2a unimpl # smmwt rt, ra, rb ; rt.W[_x_] = (ra.W[_x_]*rb.W[_x_].H[1])[47:16]; ; (RV32: __x__=0, RV64: __x__=1..0) :smmwt rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x32 unimpl :smmwt.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x3a unimpl # smslda rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 - ra.H*rb.H - ra.L*rb.L; + ; + ; r[tU].r[tL] = t64; :smslda rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x56 unimpl # smslxda rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 - ra.H*rb.L - ra.L*rb.H; + ; + ; r[tU].r[tL] = t64; :smslxda rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x5e unimpl # smsr64 rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 - ra*rb; // signed + ; + ; r[tU].r[tL] = t64; :smsr64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x43 unimpl # smtt16 rt, ra, rb ; rt.W[_x_] = ra.W[_x_].H[1]*rb.W[_x_].H[1]; ; (RV32: __x__=0, RV64: __x__=1..0) :smtt16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x14 unimpl # smul16 rt, ra, rb ; RV32: ; r[tL] = ra.H[_0_] s* rb.H[_0_]; ; r[tH] = ra.H[_1_] s* rb.H[_1_]; ; RV64: ; rt.W[_0_] = ra.H[_0_] s* rb.H[_0_]; ; rt.W[_1_] = ra.H[_1_] s* rb.H[_1_]; :smul16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x50 unimpl :smul16h rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x60 unimpl # smul8 rt, ra, rb ; RV32: ; r[tL].H[_0_] = ra.B[_0_] s* rb.B[_0_]; ; r[tL].H[_1_] = ra.B[_1_] s* rb.B[_1_]; ; r[tH].H[_0_] = ra.B[_2_] s* rb.B[_2_]; ; r[tH].H[_1_] = ra.B[_3_] s* rb.B[_3_]; ; RV64: ; rt.H[_0_] = ra.B[_0_] s* rb.B[_0_]; ; rt.H[_1_] = ra.B[_1_] s* rb.B[_1_]; ; rt.H[_2_] = ra.B[_2_] s* rb.B[_2_]; ; rt.H[_3_] = ra.B[_3_] s* rb.B[_3_]; :smul8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x54 unimpl # smulx16 rt, ra, rb ; RV32: ; r[tL] = ra.H[_0_] s* rb.H[_1_]; ; r[tH] = ra.H[_1_] s* rb.H[_0_]; ; RV64: ; rt.W[_0_] = ra.H[_0_] s* rb.H[_1_]; ; rt.W[_1_] = ra.H[_1_] s* rb.H[_0_]; :smulx16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x51 unimpl :smulx16h rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x61 unimpl # smulx8 rt, ra, rb ; RV32: ; r[tL].H[_0_] = ra.B[_0_] s* rb.B[_1_]; ; r[tL].H[_1_] = ra.B[_1_] s* rb.B[_0_]; ; r[tH].H[_0_] = ra.B[_2_] s* rb.B[_3_]; ; r[tH].H[_1_] = ra.B[_3_] s* rb.B[_2_]; ; RV64: ; rt.H[_0_] = ra.B[_0_] s* rb.B[_1_]; ; rt.H[_1_] = ra.B[_1_] s* rb.B[_0_]; ; rt.H[_2_] = ra.B[_2_] s* rb.B[_3_]; ; rt.H[_3_] = ra.B[_3_] s* rb.B[_2_]; :smulx8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x55 unimpl # smxds rt, ra, rb ; rt.W[_x_] = (ra.W[_x_].H[1]*rb.W[_x_].H[0]) - (ra.W[_x_].H[0]*rb.W[_x_].H[1]); ; (RV32: __x__=0, RV64: __x__=1..0) :smxds rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x3c unimpl :sra.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x12 unimpl # sra16 rt, ra, rb ; rt.H[_x_] = ra.H[_x_] s>> rb[3:0]; ; (RV32: __x__=1..0, RV64: __x__=3..0) :sra16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x28 unimpl :sra16.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x30 unimpl # sra8 rt, ra, rb ; rt.B[_x_] = ra.B[_x_] s>> rb[2:0]; ; (RV32: __x__=3..0, RV64: __x__=7..0) :sra8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x2c unimpl :sra8.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x34 unimpl :srai.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x6a unimpl :srai16 rd,rs1,imm4u is op0006=0x3f & rd & rs1 & imm4u & op2424=0 & funct3=0x0 & funct7=0x38 unimpl :srai16.u rd,rs1,imm4u is op0006=0x3f & rd & rs1 & imm4u & op2424=1 & funct3=0x0 & funct7=0x38 unimpl :srai8 rd,rs1,imm3u is op0006=0x3f & rd & rs1 & imm3u & op2324=0 & funct3=0x0 & funct7=0x3c unimpl :srai8.u rd,rs1,imm3u is op0006=0x3f & rd & rs1 & imm3u & op2324=1 & funct3=0x0 & funct7=0x3c unimpl :sraiw.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x1a unimpl # srl16 rt, ra, rb ; rt.H[_x_] = ra.H[_x_] u>> rb[3:0]; ; (RV32: __x__=1..0, RV64: __x__=3..0) :srl16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x29 unimpl :srl16.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x31 unimpl # srl8 rt, ra, rb ; rt.B[_x_] = ra.B[_x_] u>> rb[2:0]; ; (RV32: __x__=3..0, RV64: __x__=7..0) :srl8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x2d unimpl :srl8.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x35 unimpl :srli16 rd,rs1,imm4u is op0006=0x3f & rd & rs1 & imm4u & op2424=0 & funct3=0x0 & funct7=0x39 unimpl :srli16.u rd,rs1,imm4u is op0006=0x3f & rd & rs1 & imm4u & op2424=1 & funct3=0x0 & funct7=0x39 unimpl :srli8 rd,rs1,imm3u is op0006=0x3f & rd & rs1 & imm3u & op2324=0 & funct3=0x0 & funct7=0x3d unimpl :srli8.u rd,rs1,imm3u is op0006=0x3f & rd & rs1 & imm3u & op2324=1 & funct3=0x0 & funct7=0x3d unimpl # sub16 rt, ra, rb ; rt.H[_x_] = ra.H[_x_] - rb.H[_x_]; ; (RV32: __x__=1..0, RV64: __x__=3..0) :sub16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x21 unimpl # sub64 rt, ra, rb ; a64 = r[aU].r[aL]; b64 = r[bU].r[bL]; + ; + ; t64 = a64 - b64; + ; + ; r[tU].r[tL] = t64; :sub64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x61 unimpl # sub8 rt, ra, rb ; rt.B[_x_] = ra.B[_x_] - rb.B[_x_]; ; (RV32: __x__=3..0, RV64: __x__=7..0) :sub8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x25 unimpl # sunpkd810 rt, ra ; rt.H[_x_] = SE16(ra.B[_y_]); ; RV32: (_x,y_) = (1,1), (0,0) ; RV64: (_x,y_) = (3,5),(2,4),(1,1), (0,0) :sunpkd810 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x08 unimpl # sunpkd820 rt, ra ; rt.H[_x_] = SE16(ra.B[_y_]); ; RV32: (_x,y_) = (1,2), (0,0) ; RV64: (_x,y_) = (3,6),(2,4),(1,2), (0,0) :sunpkd820 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x09 unimpl # sunpkd830 rt, ra ; rt.H[_x_] = SE16(ra.B[_y_]); ; RV32: (_x,y_) = (1,3), (0,0) ; RV64: (_x,y_) = (3,7),(2,4),(1,3), (0,0) :sunpkd830 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x0a unimpl # sunpkd831 rt, ra ; rt.H[_x_] = SE16(ra.B[_y_]); ; RV32: (_x,y_) = (1,3), (0,1) ; RV64: (_x,y_) = (3,7),(2,5),(1,3), (0,1) :sunpkd831 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x0b unimpl # sunpkd832 rt, ra ; rt.H[_x_] = SE16(ra.B[_y_]); ; RV32: (_x,y_) = (1,3), (0,2) ; RV64: (_x,y_) = (3,7),(2,6),(1,3), (0,2) :sunpkd832 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x13 unimpl # swap16 rt, ra ; === ; <<< ; ==== 8-bit Misc Instructions ; There are 11 instructions here. ; .SIMD 8-bit Miscellaneous Instructions ; [cols="^.^1,<.^2,<.^2,<.^4",options="header",] :swap16 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x19 unimpl # swap8 rt, ra ; === ; <<< ; ==== 8-bit Unpacking Instructions ; There are 8 instructions here. ; .8-bit Unpacking Instructions ; [cols="^.^1,<.^2,<.^2,<.^4",options="header",] :swap8 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x18 unimpl # uclip32 rt, ra, imm5u ; m = imm5u; + ; rt = SAT.Um(ra.W[_x_]); ; (RV32: __x__=0, RV64: __x__=1..0) :uclip32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x7a unimpl # ucmple16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] u{le} rb.H[_x_])? 0xffff : 0; ; (RV32: __x__=1..0, RV64: __x__=3..0) :ucmple16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x1e unimpl # ucmple8 rt, ra, rb ; rt.B[_x_] = (ra.B[_x_] u{le} rb.B[_x_])? 0xff : 0; ; (RV32: __x__=3..0, RV64: __x__=7..0) :ucmple8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x1f unimpl # ucmplt16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] u< rb.H[_x_])? 0xffff : 0; ; (RV32: __x__=1..0, RV64: __x__=3..0) :ucmplt16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x16 unimpl # ucmplt8 rt, ra, rb ; rt.B[_x_] = (ra.B[_x_] u< rb.B[_x_])? 0xff : 0; ; (RV32: __x__=3..0, RV64: __x__=7..0) :ucmplt8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x17 unimpl # ukadd16 rt, ra, rb ; rt.H[_x_] = SAT.U16(ra.H[_x_] + rb.H[_x_]; ; (RV32: __x__=1..0, RV64: __x__=3..0) :ukadd16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x18 unimpl # ukadd64 rt, ra, rb ; a64 = r[aU].r[aL]; b64 = r[bU].r[bL]; + ; + ; t64 = SAT.U64(a64 + b64); + ; + ; r[tU].r[tL] = t64; :ukadd64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x58 unimpl # ukadd8 rt, ra, rb ; rt.B[_x_] = SAT.U8(ra.B[_x_] + rb.B[_x_]); ; (RV32: __x__=3..0, RV64: __x__=7..0) :ukadd8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x1c unimpl :ukaddh rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x0a unimpl :ukaddw rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x08 unimpl # ukcras16 rt, ra, rb ; rt.H[_x_] = SAT.U16(ra.H[_x_] + rb.H[_x-1_]); + ; rt.H[_x-1_] = SAT.U16(ra.H[_x-1_] – rb.H[_x_]); ; (RV32: __x__=1, RV64: __x__=1,3) :ukcras16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x1a unimpl # ukcrsa16 rt, ra, rb ; rt.H[_x_] = SAT.U16(ra.H[_x_] - rb.H[_x-1_]); + ; rt.H[_x-1_] = SAT.U16(ra.H[_x-1_] + rb.H[_x_]); ; (RV32: __x__=1, RV64: __x__=1,3) :ukcrsa16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x1b unimpl # ukmar64 rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = SAT.U64(c64 + ra*rb); + ; + ; r[tU].r[tL] = t64; :ukmar64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x5a unimpl # ukmsr64 rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = SAT.U64(c64 - ra*rb); + ; + ; r[tU].r[tL] = t64; :ukmsr64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x5b unimpl # uksub16 rt, ra, rb ; rt.H[_x_] = SAT.U16(ra.H[_x_] - rb.H[_x_]); ; (RV32: __x__=1..0, RV64: __x__=3..0) :uksub16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x19 unimpl # uksub64 rt, ra, rb ; a64 = r[aU].r[aL]; b64 = r[bU].r[bL]; + ; + ; t64 = SAT.U64(a64 - b64); + ; + ; r[tU].r[tL] = t64; :uksub64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x59 unimpl # uksub8 rt, ra, rb ; rt.B[_x_] = SAT.U8(ra.B[_x_] - rb.B[_x_]); ; (RV32: __x__=3..0, RV64: __x__=7..0) :uksub8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x1d unimpl :uksubh rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x0b unimpl :uksubw rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x09 unimpl # umaqa rt, ra, rb ; rt.W[x] = rt.W[x] + ra.W[x].B[3]*rb.W[x].B[3] + ra.W[x].B[2]*rb.W[x].B[2] ; + ra.W[x].B[1]*rb.W[x].B[1] + ra.W[x].B[0]*rb.W[x].B[0]); ; (RV32: x=0, RV64: x=1..0) ; Elements of ra and rb are unsigned numbers. :umaqa rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x66 unimpl # umar64 rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 + ra*rb; // unsigned + ; + ; r[tU].r[tL] = t64; :umar64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x52 unimpl # umax16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] u> rb.H[_x_])? ra.H[_x_] : rb.H[_x_]; ; (RV32: __x__=1..0, RV64: __x__=3..0) :umax16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x49 unimpl # umax8 rt, ra, rb ; rt.B[_x_] = (ra.B[_x_] u> rb.B[_x_])? ra.B[_x_] : rb.B[_x_]; ; (RV32: __x__=3..0, RV64: __x__=7..0) :umax8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x4d unimpl # umin16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] u< rb.H[_x_])? ra.H[_x_] : rb.H[_x_]; ; (RV32: __x__=1..0, RV64: __x__=3..0) :umin16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x48 unimpl # umin8 rt, ra, rb ; rt.B[_x_] = (ra.B[_x_] u< rb.B[_x_])? ra.B[_x_] : rb.B[_x_]; ; (RV32: __x__=3..0, RV64: __x__=7..0) :umin8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x4c unimpl # umsr64 rt, ra, rb ; c64 = r[tU].r[tL]; + ; + ; t64 = c64 - ra*rb; // unsigned + ; + ; r[tU].r[tL] = t64; :umsr64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x53 unimpl # umul16 rt, ra, rb ; RV32: ; r[tL] = ra.H[_0_] u* rb.H[_0_]; ; r[tH] = ra.H[_1_] u* rb.H[_1_]; ; RV64: ; rt.W[_0_] = ra.H[_0_] u* rb.H[_0_]; ; rt.W[_1_] = ra.H[_1_] u* rb.H[_1_]; :umul16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x58 unimpl :umul16h rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x68 unimpl # umul8 rt, ra, rb ; RV32: ; r[tL].H[_0_] = ra.B[_0_] u* rb.B[_0_]; ; r[tL].H[_1_] = ra.B[_1_] u* rb.B[_1_]; ; r[tH].H[_0_] = ra.B[_2_] u* rb.B[_2_]; ; r[tH].H[_1_] = ra.B[_3_] u* rb.B[_3_]; ; RV64: ; rt.H[_0_] = ra.B[_0_] u* rb.B[_0_]; ; rt.H[_1_] = ra.B[_1_] u* rb.B[_1_]; ; rt.H[_2_] = ra.B[_2_] u* rb.B[_2_]; ; rt.H[_3_] = ra.B[_3_] u* rb.B[_3_]; :umul8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x5c unimpl # umulx16 rt, ra, rb ; RV32: ; r[tL] = ra.H[_0_] u* rb.H[_1_]; ; r[tH] = ra.H[_1_] u* rb.H[_0_]; ; RV64: ; rt.W[_0_] = ra.H[_0_] u* rb.H[_1_]; ; rt.W[_1_] = ra.H[_1_] u* rb.H[_0_]; :umulx16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x59 unimpl :umulx16h rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x69 unimpl # umulx8 rt, ra, rb ; RV32: ; r[tL].H[_0_] = ra.B[_0_] u* rb.B[_1_]; ; r[tL].H[_1_] = ra.B[_1_] u* rb.B[_0_]; ; r[tH].H[_0_] = ra.B[_2_] u* rb.B[_3_]; ; r[tH].H[_1_] = ra.B[_3_] u* rb.B[_2_]; ; RV64: ; rt.H[_0_] = ra.B[_0_] u* rb.B[_1_]; ; rt.H[_1_] = ra.B[_1_] u* rb.B[_0_]; ; rt.H[_2_] = ra.B[_2_] u* rb.B[_3_]; ; rt.H[_3_] = ra.B[_3_] u* rb.B[_2_]; :umulx8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x5d unimpl # uradd16 rt, ra, rb ; rt.H[_x_] = (CONCAT(1'b0,ra.H[_x_]) + CONCAT(1'b0,rb.H[_x_])) >> 1; ; (RV32: __x__=1..0, RV64: __x__=3..0) :uradd16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x10 unimpl # uradd64 rt, ra, rb ; a64 = r[aU].r[aL]; b64 = r[bU].r[bL]; + ; + ; t64 = (CONCAT(1'b0,a64) + CONCAT(1'b0,b64)) >>1; + ; + ; r[tU].r[tL] = t64; :uradd64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x50 unimpl # uradd8 rt, ra, rb ; rt.B[_x_] = (CONCAT(1'b0,ra.B[_x_]) + CONCAT(1'b0,rb.B[_x_])) >> 1; ; (RV32: __x__=3..0, RV64: __x__=7..0) :uradd8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x14 unimpl # uraddw rt, ra, rb ; res = (CONCAT(1'b0,ra.W[0]) + CONCAT(1'b0,rb.W[0])) >> 1; ; rt = SE(res); :uraddw rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x18 unimpl # urcras16 rt, ra, rb ; rt.H[_x_] = (CONCAT(1'b0,ra.H[_x_]) + CONCAT(1'b0,rb.H[_x-1_])) >> 1; + ; rt.H[_x-1_] = (CONCAT(1'b0,ra.H[_x-1_]) – CONCAT(1'b0,rb.H[_x_])) >> 1; ; (RV32: __x__=1, RV64: __x__=1,3) :urcras16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x12 unimpl # urcrsa16 rt, ra, rb ; rt.H[_x_] = (CONCAT(1'b0,ra.H[_x_]) - CONCAT(1'b0,rb.H[_x-1_])) >> 1; + ; rt.H[_x-1_] = (CONCAT(1'b0,ra.H[_x-1_]) + CONCAT(1'b0,rb.H[_x_])) >> 1; ; (RV32: __x__=1, RV64: __x__=1,3) :urcrsa16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x13 unimpl # ursub16 rt, ra, rb ; rt.H[_x_] = (CONCAT(1'b0,ra.H[_x_]) - CONCAT(1'b0,rb.H[_x_])) >> 1; ; (RV32: __x__=1..0, RV64: __x__=3..0) :ursub16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x11 unimpl # ursub64 rt, ra, rb ; a64 = r[aU].r[aL]; b64 = r[bU].r[bL]; + ; + ; t64 = (CONCAT(1'b0,a64) - CONCAT(1'b0,b64)) >>1; + ; + ; r[tU].r[tL] = t64; :ursub64 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x51 unimpl # ursub8 rt, ra, rb ; rt.B[_x_] = (CONCAT(1'b0,ra.B[_x_]) - CONCAT(1'b0,rb.B[_x_])) >> 1; ; (RV32: __x__=3..0, RV64: __x__=7..0) :ursub8 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x15 unimpl # ursubw rt, ra, rb ; res = (CONCAT(1'b0,ra.W[0]) - CONCAT(1'b0,rb.W[0])) >> 1; ; rt = SE(res); :ursubw rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x1 & funct7=0x19 unimpl # wext rt, ra, rb ; a64 = r[aU].r[aL]; // RV32 ; a64 = ra; // RV64 ; lsb = rb[4:0]; ; exword = a64[(31+lsb):lsb]; ; rt = SE(exword); :wext rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x67 unimpl # wexti rt, ra, imm5u ; a64 = r[aU].r[aL]; // RV32 ; a64 = ra; // RV64 ; lsb = imm5u; ; exword = a64[(31+lsb):lsb]; ; rt = SE(exword); :wexti rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x0 & funct7=0x6f unimpl # zunpkd810 rt, ra ; rt.H[_x_] = ZE16(ra.B[_y_]); ; RV32: (_x,y_) = (1,1), (0,0) ; RV64: (_x,y_) = (3,5),(2,4),(1,1), (0,0) :zunpkd810 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x0c unimpl # zunpkd820 rt, ra ; rt.H[_x_] = ZE16(ra.B[_y_]); ; RV32: (_x,y_) = (1,2), (0,0) ; RV64: (_x,y_) = (3,6),(2,4),(1,2), (0,0) :zunpkd820 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x0d unimpl # zunpkd830 rt, ra ; rt.H[_x_] = ZE16(ra.B[_y_]); ; RV32: (_x,y_) = (1,3), (0,0) ; RV64: (_x,y_) = (3,7),(2,4),(1,3), (0,0) :zunpkd830 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x0e unimpl # zunpkd831 rt, ra ; rt.H[_x_] = ZE16(ra.B[_y_]); ; RV32: (_x,y_) = (1,3), (0,1) ; RV64: (_x,y_) = (3,7),(2,5),(1,3), (0,1) :zunpkd831 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x0f unimpl # zunpkd832 rt, ra ; rt.H[_x_] = ZE16(ra.B[_y_]); ; RV32: (_x,y_) = (1,3), (0,2) ; RV64: (_x,y_) = (3,7),(2,6),(1,3), (0,2) :zunpkd832 rd,rs1 is op0006=0x3f & rd & rs1 & funct3=0x0 & funct7=0x56 & subf5=0x17 unimpl ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv32q.sinc ================================================ # RV32Q Standard Extension # fadd.q D,S,T,m 06000053 fe00007f SIMPLE (0, 0) :fadd.q frd,frs1,frs2,FRM is frs1 & frd & frs2 & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x3 { frd = frs1 f+ frs2; } # fclass.q d,S e6001053 fff0707f SIMPLE (0, 0) :fclass.q rd,frs1 is frs1 & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x73 & op2024=0x0 { #TODO # rd = 0; # rd[0, 1] = 0; #TODO - inf # rd[1, 1] = 0; #TODO - norm num # rd[2, 1] = 0; #TODO - subnorm num # rd[3, 1] = 0; #TODO - 0 # rd[4, 1] = 0; #TODO + 0 # rd[5, 1] = 0; #TODO + norm num # rd[6, 1] = 0; #TODO + subnorm num # rd[7, 1] = 0; #TODO + inf # rd[8, 1] = 0; #TODO snan # rd[9, 1] = 0; #TODO qnan } # fcvt.d.q D,S,m 42300053 fff0007f SIMPLE (0, 0) :fcvt.d.q frd,frs1,FRM is frs1 & frd & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x21 & op2024=0x3 { #TODO double to quad } # fcvt.q.d D,S 46100053 fff0707f SIMPLE (0, 0) :fcvt.q.d frd,frs1 is frs1 & frd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x23 & op2024=0x1 { frd = float2float(frs1); } # fcvt.q.s D,S 46000053 fff0707f SIMPLE (0, 0) :fcvt.q.s frd,frs1 is frs1 & frd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x23 & op2024=0x0 { frd = float2float(frs1); } # fcvt.q.w D,s d6000053 fff0707f SIMPLE (0, 0) :fcvt.q.w frd,rs1 is frd & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x6b & op2024=0x0 { frd = int2float(rs1); } # fcvt.q.wu D,s d6100053 fff0707f SIMPLE (0, 0) :fcvt.q.wu frd,rs1 is frd & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x6b & op2024=0x1 { frd = int2float(rs1); } # fcvt.s.q D,S,m 40300053 fff0007f SIMPLE (0, 0) :fcvt.s.q frd,frs1,FRM is frs1 & frd & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x20 & op2024=0x3 { frd = float2float(frs1); } # fcvt.w.q d,S,m c6000053 fff0007f SIMPLE (0, 0) :fcvt.w.q rd,frs1,FRM is frs1 & FRM & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x63 & op2024=0x0 { rd = trunc(frs1); } # fcvt.wu.q d,S,m c6100053 fff0007f SIMPLE (0, 0) :fcvt.wu.q rd,frs1,FRM is frs1 & FRM & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x63 & op2024=0x1 { rd = trunc(frs1); } # fdiv.q D,S,T,m 1e000053 fe00007f SIMPLE (0, 0) :fdiv.q frd,frs1,frs2,FRM is frs1 & frd & frs2 & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0xf { local tfrs1:$(QFLEN) = frs1; local tfrs2:$(QFLEN) = frs2; local result:$(QFLEN) = tfrs1 f/ tfrs2; frd = result; } # feq.q d,S,T a6002053 fe00707f SIMPLE (0, 0) :feq.q rd,frs1,frs2 is frs2 & frs1 & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x2 & funct7=0x53 { rd = zext(frs1 f== frs2); } # fle.q d,S,T a6000053 fe00707f SIMPLE (0, 0) :fle.q rd,frs1,frs2 is frs2 & frs1 & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x53 { rd = zext(frs1 f<= frs2); } # flq D,o(s) 00004007 0000707f OWORD|DREF (0, 16) :flq frd,immI(rs1) is immI & frd & rs1 & op0001=0x3 & op0204=0x1 & op0506=0x0 & funct3=0x4 { local ea:$(XLEN) = immI + rs1; frd = *[ram]:16 ea; } # flt.q d,S,T a6001053 fe00707f SIMPLE (0, 0) :flt.q rd,frs1,frs2 is frs2 & frs1 & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x53 { rd = zext(frs1 f< frs2); } # fmadd.q D,S,T,R,m 06000043 0600007f SIMPLE (0, 0) :fmadd.q frd,frs1,frs2,frs3,FRM is frs1 & frd & frs2 & FRM & frs3 & op0001=0x3 & op0204=0x0 & op0506=0x2 & op2526=0x3 { frd = (frs1 f* frs2) f+ frs3; } # fmax.q D,S,T 2e001053 fe00707f SIMPLE (0, 0) :fmax.q frd,frs1,frs2 is frs1 & frd & frs2 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x17 { local tmpfrs1 = frs1; local tmpfrs2 = frs2; frd = tmpfrs1; if (nan(tmpfrs1) && nan(tmpfrs2)) goto inst_next; if (nan(tmpfrs2)) goto inst_next; frd = tmpfrs2; if (nan(tmpfrs1)) goto inst_next; if (tmpfrs2 f> tmpfrs1) goto inst_next; frd = tmpfrs1; } # fmin.q D,S,T 2e000053 fe00707f SIMPLE (0, 0) :fmin.q frd,frs1,frs2 is frs1 & frd & frs2 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x17 { local tmpfrs1 = frs1; local tmpfrs2 = frs2; frd = tmpfrs1; if (nan(tmpfrs1) && nan(tmpfrs2)) goto inst_next; if (nan(tmpfrs2)) goto inst_next; frd = tmpfrs2; if (nan(tmpfrs1)) goto inst_next; if (tmpfrs2 f<= tmpfrs1) goto inst_next; frd = tmpfrs1; } # fmsub.q D,S,T,R,m 06000047 0600007f SIMPLE (0, 0) :fmsub.q frd,frs1,frs2,frs3,FRM is frs1 & frd & frs2 & FRM & frs3 & op0001=0x3 & op0204=0x1 & op0506=0x2 & op2526=0x3 { frd = (frs1 f* frs2) f- frs3; } # fmul.q D,S,T,m 16000053 fe00007f SIMPLE (0, 0) :fmul.q frd,frs1,frs2,FRM is frs1 & frd & frs2 & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0xb { frd = frs1 f* frs2; } # fmv.q.x D,s f6000053 fff0707f SIMPLE (64, 0) :fmv.q.x frd,rs1 is frd & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x7b & op2024=0x0 { frd = int2float(rs1); } # fmv.x.q d,S e6000053 fff0707f SIMPLE (64, 0) :fmv.x.q rd,frs1 is frs1 & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x73 & op2024=0x0 { local tmpreg:4 = &frs1; local tmp:4 = *[register]:4 tmpreg; assignW(rd, tmp); } # fnmadd.q D,S,T,R,m 0600004f 0600007f SIMPLE (0, 0) :fnmadd.q frd,frs1,frs2,frs3,FRM is frs1 & frd & frs2 & FRM & frs3 & op0001=0x3 & op0204=0x3 & op0506=0x2 & op2526=0x3 { frd = (f- (frs1 f* frs2)) f- frs3; } # fnmsub.q D,S,T,R,m 0600004b 0600007f SIMPLE (0, 0) :fnmsub.q frd,frs1,frs2,frs3,FRM is frs1 & frd & frs2 & FRM & frs3 & op0001=0x3 & op0204=0x2 & op0506=0x2 & op2526=0x3 { frd = (f- (frs1 f* frs2)) f+ frs3; } # fsgnj.q D,S,T 26000053 fe00707f SIMPLE (0, 0) :fsgnj.q frd,frs1,frs2 is frs1 & frd & frs2 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x13 { local tmp = frs1; tmp[127,1] = frs2[127,1]; frd = tmp; } # fsgnjn.q D,S,T 26001053 fe00707f SIMPLE (0, 0) :fsgnjn.q frd,frs1,frs2 is frs1 & frd & frs2 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x1 & funct7=0x13 { local tmp = frs1; tmp[127,1] = !frs2[127,1]; frd = tmp; } # fsgnjx.q D,S,T 26002053 fe00707f SIMPLE (0, 0) :fsgnjx.q frd,frs1,frs2 is frs1 & frd & frs2 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x2 & funct7=0x13 { local tmp = frs1; tmp[127,1] = tmp[127,1] ^ frs2[127,1]; frd = tmp; } :fsq frs2,immS(rs1) is frs2 & immS & rs1 & op0001=0x3 & op0204=0x1 & op0506=0x1 & funct3=0x4 { local ea:$(XLEN) = immS + rs1; *[ram]:$(QFLEN) ea = frs2; } # fsqrt.q D,S,m 5e000053 fff0007f SIMPLE (0, 0) :fsqrt.q frd,frs1,FRM is frs1 & frd & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x2f & op2024=0x0 { frd = sqrt(frs1); } # fsub.q D,S,T,m 0e000053 fe00007f SIMPLE (0, 0) :fsub.q frd,frs1,frs2,FRM is frs1 & frd & frs2 & FRM & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x7 { frd = frs1 f- frs2; } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv64a.sinc ================================================ # RV64A Standard Extension (in addition to RV32A) # amoadd.d d,t,0(s) 0000302f fe00707f QWORD|DREF (64, 8) :amoadd.d^aqrl rdL,rs2L,(rs1) is rs1 & rs2L & rdL & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x3 & op2731=0x0 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2L; local tmp:8 = *[ram]:8 tmprs1; rdL = tmp; tmp = tmp + tmprs2; *[ram]:8 tmprs1 = tmp; } # amoand.d d,t,0(s) 6000302f fe00707f QWORD|DREF (64, 8) :amoand.d^aqrl rdL,rs2L,(rs1) is rs1 & rs2L & rdL & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x3 & op2731=0xc & aqrl { local tmprs1 = rs1; local tmprs2 = rs2L; local tmp:8 = *[ram]:8 tmprs1; rdL = tmp; tmp = tmp & tmprs2; *[ram]:8 tmprs1 = tmp; } # amomax.d d,t,0(s) a000302f fe00707f QWORD|DREF (64, 8) :amomax.d^aqrl rdL,rs2L,(rs1) is rs1 & rs2L & rdL & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x3 & op2731=0x14 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2L; local tmp:8 = *[ram]:8 tmprs1; rdL = tmp; if (tmprs2 s<= tmp) goto inst_next; *[ram]:8 tmprs1 = tmprs2; } # amomaxu.d d,t,0(s) e000302f fe00707f QWORD|DREF (64, 8) :amomaxu.d^aqrl rdL,rs2L,(rs1) is rs1 & rs2L & rdL & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x3 & op2731=0x1c & aqrl { local tmprs1 = rs1; local tmprs2 = rs2L; local tmp:8 = *[ram]:8 tmprs1; rdL = tmp; if (tmprs2 <= tmp) goto inst_next; *[ram]:8 tmprs1 = tmprs2; } # amomin.d d,t,0(s) 8000302f fe00707f QWORD|DREF (64, 8) :amomin.d^aqrl rdL,rs2L,(rs1) is rs1 & rs2L & rdL & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x3 & op2731=0x10 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2L; local tmp:8 = *[ram]:8 tmprs1; rdL = tmp; if (tmprs2 s>= tmp) goto inst_next; *[ram]:8 tmprs1 = tmprs2; } # amominu.d d,t,0(s) c000302f fe00707f QWORD|DREF (64, 8) :amominu.d^aqrl rdL,rs2L,(rs1) is rs1 & rs2L & rdL & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x3 & op2731=0x18 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2L; local tmp:8 = *[ram]:8 tmprs1; rdL = tmp; if (tmprs2 >= tmp) goto inst_next; *[ram]:8 tmprs1 = tmprs2; } # amoor.d d,t,0(s) 4000302f fe00707f QWORD|DREF (64, 8) :amoor.d^aqrl rdL,rs2L,(rs1) is rs1 & rs2L & rdL & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x3 & op2731=0x8 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2L; local tmp:8 = *[ram]:8 tmprs1; rdL = tmp; tmp = tmp | tmprs2; *[ram]:8 tmprs1 = tmp; } # amoswap.d d,t,0(s) 0800302f fe00707f QWORD|DREF (64, 8) :amoswap.d^aqrl rdL,rs2L,(rs1) is rs1 & rs2L & rdL & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x3 & op2731=0x1 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2L; local tmp:8 = *[ram]:8 tmprs1; rdL = tmp; *[ram]:8 tmprs1 = tmprs2; } # amoxor.d d,t,0(s) 2000302f fe00707f QWORD|DREF (64, 8) :amoxor.d^aqrl rdL,rs2L,(rs1) is rs1 & rs2L & rdL & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x3 & op2731=0x4 & aqrl { local tmprs1 = rs1; local tmprs2 = rs2L; local tmp:8 = *[ram]:8 tmprs1; rdL = tmp; tmp = tmp ^ tmprs2; *[ram]:8 tmprs1 = tmp; } # lr.d d,0(s) 1000302f fff0707f QWORD|DREF (64, 8) :lr.d^aqrl rdL,(rs1) is rs1 & rdL & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x3 & op2731=0x2 & op2024=0x0 & aqrl { RESERVE_ADDRESS = rs1; RESERVE = 1; RESERVE_LENGTH = 8; rdL = *[ram]:8 rs1; } # sc.d d,t,0(s) 1800302f fe00707f QWORD|DREF (64, 8) :sc.d^aqrl rdL,rs2L,(rs1) is rs1 & rs2L & rdL & op0001=0x3 & op0204=0x3 & op0506=0x1 & funct3=0x3 & op2731=0x3 & aqrl { local tmprs2 = rs2L; local tmprs1 = rs1; rdL = 1; if ((RESERVE == 0)||(RESERVE_ADDRESS != tmprs1)||(RESERVE_LENGTH != 8)) goto inst_next; *[ram]:8 tmprs1 = tmprs2; rdL = 0; RESERVE_ADDRESS = 0; RESERVE = 0; RESERVE_LENGTH = 0; } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv64b.sinc ================================================ # RV64 Bitmanip Extension #TODO fix op2031 :addiwu rd, rs1, op2031 is op0006=0x1b & op1214=0x4 & rd & op2031 & rs1 unimpl :addu.w rd, rs1, rs2 is op0006=0x3b & op1214=0x0 & op2531=0x4 & rd & rs1 & rs2 unimpl :addwu rd, rs1, rs2 is op0006=0x3b & op1214=0x0 & op2531=0x5 & rd & rs1 & rs2 unimpl :bdepw rd, rs1, rs2 is op0006=0x3b & op1214=0x6 & op2531=0x24 & rd & rs1 & rs2 unimpl :bextw rd, rs1, rs2 is op0006=0x3b & op1214=0x6 & op2531=0x4 & rd & rs1 & rs2 unimpl :bfpw rd, rs1, rs2 is op0006=0x3b & op1214=0x7 & op2531=0x24 & rd & rs1 & rs2 unimpl :bmatflip rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x3 & op2531=0x30 & rd & rs1 unimpl :bmator rd, rs1, rs2 is op0006=0x33 & op1214=0x3 & op2531=0x4 & rd & rs1 & rs2 unimpl :bmatxor rd, rs1, rs2 is op0006=0x33 & op1214=0x3 & op2531=0x24 & rd & rs1 & rs2 unimpl :clmulhw rd, rs1, rs2 is op0006=0x3b & op1214=0x3 & op2531=0x5 & rd & rs1 & rs2 unimpl :clmulrw rd, rs1, rs2 is op0006=0x3b & op1214=0x2 & op2531=0x5 & rd & rs1 & rs2 unimpl :clmulw rd, rs1, rs2 is op0006=0x3b & op1214=0x1 & op2531=0x5 & rd & rs1 & rs2 unimpl :clzw rd, rs1 is op0006=0x1b & op1214=0x1 & op2024=0x0 & op2531=0x30 & rd & rs1 unimpl :crc32.d rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x13 & op2531=0x30 & rd & rs1 unimpl :crc32c.d rd, rs1 is op0006=0x13 & op1214=0x1 & op2024=0x1b & op2531=0x30 & rd & rs1 unimpl :ctzw rd, rs1 is op0006=0x1b & op1214=0x1 & op2024=0x1 & op2531=0x30 & rd & rs1 unimpl :fslw rd, rs1, rs3, rs2 is op0006=0x3b & op1214=0x1 & op2526=0x2 & rd & rs1 & rs2 & rs3 unimpl #TODO fix op2024 :fsriw rd, rs1, rs3, op2024 is op0006=0x1b & op1214=0x5 & op2526=0x2 & op2024 & rd & rs1 & rs3 unimpl :fsrw rd, rs1, rs3, rs2 is op0006=0x3b & op1214=0x5 & op2526=0x2 & rd & rs1 & rs2 & rs3 unimpl #TODO fix op2024 :gorciw rd, rs1, op2024 is op0006=0x1b & op1214=0x5 & op2531=0x14 & op2024 & rd & rs1 unimpl :gorcw rd, rs1, rs2 is op0006=0x3b & op1214=0x5 & op2531=0x14 & rd & rs1 & rs2 unimpl #TODO fix op2024 :greviw rd, rs1, op2024 is op0006=0x1b & op1214=0x5 & op2531=0x34 & op2024 & rd & rs1 unimpl :grevw rd, rs1, rs2 is op0006=0x3b & op1214=0x5 & op2531=0x34 & rd & rs1 & rs2 unimpl :packuw rd, rs1, rs2 is op0006=0x3b & op1214=0x4 & op2531=0x24 & rd & rs1 & rs2 unimpl :packw rd, rs1, rs2 is op0006=0x3b & op1214=0x4 & op2531=0x4 & rd & rs1 & rs2 unimpl :pcntw rd, rs1 is op0006=0x1b & op1214=0x1 & op2024=0x2 & op2531=0x30 & rd & rs1 unimpl :rolw rd, rs1, rs2 is op0006=0x3b & op1214=0x1 & op2531=0x30 & rd & rs1 & rs2 unimpl #TODO fix op2024 :roriw rd, rs1, op2024 is op0006=0x1b & op1214=0x5 & op2531=0x30 & op2024 & rd & rs1 unimpl :rorw rd, rs1, rs2 is op0006=0x3b & op1214=0x5 & op2531=0x30 & rd & rs1 & rs2 unimpl #TODO fix op2024 :sbclriw rd, rs1, op2024 is op0006=0x1b & op1214=0x1 & op2531=0x24 & op2024 & rd & rs1 unimpl :sbclrw rd, rs1, rs2 is op0006=0x3b & op1214=0x1 & op2531=0x24 & rd & rs1 & rs2 unimpl :sbextw rd, rs1, rs2 is op0006=0x3b & op1214=0x5 & op2531=0x24 & rd & rs1 & rs2 unimpl #TODO fix op2024 :sbinviw rd, rs1, op2024 is op0006=0x1b & op1214=0x1 & op2531=0x34 & op2024 & rd & rs1 unimpl :sbinvw rd, rs1, rs2 is op0006=0x3b & op1214=0x1 & op2531=0x34 & rd & rs1 & rs2 unimpl #TODO fix op2024 :sbsetiw rd, rs1, op2024 is op0006=0x1b & op1214=0x1 & op2531=0x14 & op2024 & rd & rs1 unimpl :sbsetw rd, rs1, rs2 is op0006=0x3b & op1214=0x1 & op2531=0x14 & rd & rs1 & rs2 unimpl :sh1addu.w rd, rs1, rs2 is op0006=0x3b & op1214=0x2 & op2531=0x10 & rd & rs1 & rs2 unimpl :sh2addu.w rd, rs1, rs2 is op0006=0x3b & op1214=0x4 & op2531=0x10 & rd & rs1 & rs2 unimpl :sh3addu.w rd, rs1, rs2 is op0006=0x3b & op1214=0x6 & op2531=0x10 & rd & rs1 & rs2 unimpl :shflw rd, rs1, rs2 is op0006=0x3b & op1214=0x1 & op2531=0x4 & rd & rs1 & rs2 unimpl #TODO fix op2026 :slliu.w rd, rs1, op2026 is op0006=0x1b & op1214=0x1 & op2731=0x1 & op2026 & rd & rs1 unimpl #TODO fix op2024 :sloiw rd, rs1, op2024 is op0006=0x1b & op1214=0x1 & op2531=0x10 & op2024 & rd & rs1 unimpl :slow rd, rs1, rs2 is op0006=0x3b & op1214=0x1 & op2531=0x10 & rd & rs1 & rs2 unimpl #TODO fix op2024 :sroiw rd, rs1, op2024 is op0006=0x1b & op1214=0x5 & op2531=0x10 & op2024 & rd & rs1 unimpl :srow rd, rs1, rs2 is op0006=0x3b & op1214=0x5 & op2531=0x10 & rd & rs1 & rs2 unimpl :subu.w rd, rs1, rs2 is op0006=0x3b & op1214=0x0 & op2531=0x24 & rd & rs1 & rs2 unimpl :subwu rd, rs1, rs2 is op0006=0x3b & op1214=0x0 & op2531=0x25 & rd & rs1 & rs2 unimpl :unshflw rd, rs1, rs2 is op0006=0x3b & op1214=0x5 & op2531=0x4 & rd & rs1 & rs2 unimpl ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv64d.sinc ================================================ # RV64D Standard Extension (in addition to RV32D) # fcvt.d.l D,s,m d2200053 fff0007f SIMPLE (64, 0) :fcvt.d.l frd,rs1L,FRM is frd & FRM & rs1L & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x69 & op2024=0x2 { local tmp:8 = int2float(rs1L); frd = tmp; } # fcvt.d.lu D,s,m d2300053 fff0007f SIMPLE (64, 0) :fcvt.d.lu frd,rs1L,FRM is frd & FRM & rs1L & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x69 & op2024=0x3 { #ATTN unsigned can be an issue here local u64:$(XLEN2) = zext(rs1L); local tmp:8 = int2float(u64); frd = tmp; } # fcvt.l.d d,S,m c2200053 fff0007f SIMPLE (64, 0) :fcvt.l.d rdL,frs1D,FRM is frs1D & FRM & rdL & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x61 & op2024=0x2 { rdL = trunc(frs1D); } # fcvt.lu.d d,S,m c2300053 fff0007f SIMPLE (64, 0) :fcvt.lu.d rdL,frs1D,FRM is frs1D & FRM & rdL & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x61 & op2024=0x3 { #TODO unsigned rdL = trunc(frs1D); } # fmv.d.x D,s f2000053 fff0707f SIMPLE (64, 0) :fmv.d.x frd,rs1L is frd & rs1L & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x79 & op2024=0x0 { frd = rs1L; } :fmv.x.d rdL,frs1D is frs1D & rdL & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct3=0x0 & funct7=0x71 & op2024=0x0 { local tmpreg:4 = &frs1D; local tmp:8 = *[register]:8 tmpreg; rdL = tmp; } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv64f.sinc ================================================ # RV64F Standard Extension (in addition to RV32F) # fcvt.l.s d,S,m c0200053 fff0007f SIMPLE (64, 0) :fcvt.l.s rdL,frs1S,FRM is frs1S & FRM & rdL & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x60 & op2024=0x2 { rdL = trunc(frs1S); } # fcvt.lu.s d,S,m c0300053 fff0007f SIMPLE (64, 0) :fcvt.lu.s rdL,frs1S,FRM is frs1S & FRM & rdL & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x60 & op2024=0x3 { #TODO unsigned rdL = trunc(frs1S); } # fcvt.s.l D,s,m d0200053 fff0007f SIMPLE (64, 0) :fcvt.s.l frd,rs1L,FRM is frd & FRM & rs1L & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x68 & op2024=0x2 { local tmp:4 = int2float(rs1L); fassignS(frd, tmp); } # fcvt.s.lu D,s,m d0300053 fff0007f SIMPLE (64, 0) :fcvt.s.lu frd,rs1L,FRM is frd & FRM & rs1L & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x68 & op2024=0x3 { #ATTN unsigned can be an issue here local u64:$(XLEN2) = zext(rs1L); local tmp:4 = int2float(u64); fassignS(frd, tmp); } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv64i.sinc ================================================ # RV64I Base Instruction Set (in addition to RV32I) # addiw d,s,j 0000001b 0000707f SIMPLE (64, 0) :addiw rd,rs1,immI is rs1 & immI & rd & op0001=0x3 & op0204=0x6 & op0506=0x0 & funct3=0x0 { local result = rs1 + immI; rd = sext(result:4); } :sext.w rd,rs1 is rs1 & rd & op0001=0x3 & op0204=0x6 & op0506=0x0 & funct3=0x0 & op2031=0 { local result = rs1; rd = sext(result:4); } # addw d,s,t 0000003b fe00707f SIMPLE (64, 0) :addw rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x6 & op0506=0x1 & funct3=0x0 & funct7=0x0 { local tmpr1:4 = rs1:4; local tmpr2:4 = rs2:4; local result:4 = tmpr1 + tmpr2; rd = sext(result); } # ld d,o(s) 00003003 0000707f QWORD|DREF (64, 8) :ld rd,immI(rs1) is immI & rs1 & rd & op0001=0x3 & op0204=0x0 & op0506=0x0 & funct3=0x3 { local ea:$(XLEN) = rs1 + immI; rd = *[ram]:8 ea; } # lwu d,o(s) 00006003 0000707f DWORD|DREF (64, 4) :lwu rd,immI(rs1) is immI & rs1 & rd & op0001=0x3 & op0204=0x0 & op0506=0x0 & funct3=0x6 { local ea:$(XLEN) = rs1 + immI; rd = zext(*[ram]:4 ea); } # sd t,q(s) 00003023 0000707f QWORD|DREF (64, 8) :sd rs2,immS(rs1) is immS & rs2 & rs1 & op0001=0x3 & op0204=0x0 & op0506=0x1 & funct3=0x3 { local ea:$(XLEN) = rs1 + immS; *[ram]:8 ea = rs2; } # slliw d,s,< 0000101b fe00707f SIMPLE (64, 0) :slliw rd,rs1,shamt5 is rs1 & shamt5 & rd & op0001=0x3 & op0204=0x6 & op0506=0x0 & funct3=0x1 & op2531=0x0 { local tmp:4 = rs1:4; tmp = tmp << shamt5; rd = sext(tmp); } # sllw d,s,t 0000103b fe00707f SIMPLE (64, 0) :sllw rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x6 & op0506=0x1 & funct3=0x1 & funct7=0x0 { local shift:$(XLEN) = rs2 & 0x1f; local tmp:4 = rs1:4; tmp = tmp << shift; rd = sext(tmp); } # sraiw d,s,< 4000501b fe00707f SIMPLE (64, 0) :sraiw rd,rs1,shamt5 is rs1 & shamt5 & rd & op0001=0x3 & op0204=0x6 & op0506=0x0 & funct3=0x5 & op2531=0x20 { local tmp:4 = rs1:4; tmp = tmp s>> shamt5; rd = sext(tmp); } # sraw d,s,t 4000503b fe00707f SIMPLE (64, 0) :sraw rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x6 & op0506=0x1 & funct3=0x5 & funct7=0x20 { local shift:$(XLEN) = rs2 & 0x1f; local tmp:4 = rs1:4; tmp = tmp s>> shift; rd = sext(tmp); } # srliw d,s,< 0000501b fe00707f SIMPLE (64, 0) :srliw rd,rs1,shamt5 is rs1 & shamt5 & rd & op0001=0x3 & op0204=0x6 & op0506=0x0 & funct3=0x5 & op2531=0x0 { local tmp:4 = rs1:4; tmp = tmp >> shamt5; rd = sext(tmp); } # srlw d,s,t 0000503b fe00707f SIMPLE (64, 0) :srlw rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x6 & op0506=0x1 & funct3=0x5 & funct7=0x0 { local shift:$(XLEN) = rs2 & 0x1f; local tmp:4 = rs1:4; tmp = tmp >> shift; rd = sext(tmp); } # subw d,s,t 4000003b fe00707f SIMPLE (64, 0) :subw rd,rs1W,rs2W is rs1W & rs2W & rd & op0001=0x3 & op0204=0x6 & op0506=0x1 & funct3=0x0 & funct7=0x20 { local result = rs1W - rs2W; rd = sext(result); } # negw d,t 4000003b fe0ff07f ALIAS (64, 0) :negw rd,rs2W is rs2W & rd & op0001=0x3 & op0204=0x6 & op0506=0x1 & funct3=0x0 & funct7=0x20 & op1519=0x0 { local tmp = -rs2W; rd = sext(tmp); } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv64k.sinc ================================================ # RV64 Crypto Extension # NOTE 0.6.2 # 0001010 rs2 rs1 010 rd 0101011 saes64.decs :saes64.decs rd, rs1, rs2 is rd & rs1 & rs2 & op0006=0x5b & op1214=0x2 & op2531=0x0a unimpl # 0001001 rs2 rs1 010 rd 0101011 saes64.decsm :saes64.decsm rd, rs1, rs2 is rd & rs1 & rs2 & op0006=0x5b & op1214=0x2 & op2531=0x09 unimpl # 0001000 rs2 rs1 010 rd 0101011 saes64.encs :saes64.encs rd, rs1, rs2 is rd & rs1 & rs2 & op0006=0x5b & op1214=0x2 & op2531=0x08 unimpl # 0000111 rs2 rs1 010 rd 0101011 saes64.encsm :saes64.encsm rd, rs1, rs2 is rd & rs1 & rs2 & op0006=0x5b & op1214=0x2 & op2531=0x07 unimpl # 0000110 00001 rs1 010 rd 0101011 saes64.imix :saes64.imix rd, rs1 is rd & rs1 & op0006=0x5b & op1214=0x2 & op2024=0x01 & op2531=0x06 unimpl # 0000100 0 rcon rs1 010 rd 0101011 saes64.ks1 :saes64.ks1 rd, rs1, rcon is rd & rs1 & rcon & op0006=0x5b & op1214=0x2 & op2424=0x0 & op2531=0x04 unimpl # 0000101 rs2 rs1 010 rd 0101011 saes64.ks2 :saes64.ks2 rd, rs1, rs2 is rd & rs1 & rs2 & op0006=0x5b & op1214=0x2 & op2531=0x05 unimpl # 0000111 00100 rs1 111 rd 0101011 ssha512.sig0 :ssha512.sig0 rd, rs1 is rd & rs1 & op0006=0x5b & op1214=0x7 & op2024=0x04 & op2531=0x07 unimpl # 0000111 00101 rs1 111 rd 0101011 ssha512.sig1 :ssha512.sig1 rd, rs1 is rd & rs1 & op0006=0x5b & op1214=0x7 & op2024=0x05 & op2531=0x07 unimpl # 0000111 00110 rs1 111 rd 0101011 ssha512.sum0 :ssha512.sum0 rd, rs1 is rd & rs1 & op0006=0x5b & op1214=0x7 & op2024=0x06 & op2531=0x07 unimpl # 0000111 00111 rs1 111 rd 0101011 ssha512.sum1 :ssha512.sum1 rd, rs1 is rd & rs1 & op0006=0x5b & op1214=0x7 & op2024=0x07 & op2531=0x07 unimpl ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv64m.sinc ================================================ # RV64M Standard Exention (in addition to RV32M) # divuw d,s,t 0200503b fe00707f SIMPLE (64, 0) :divuw rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x6 & op0506=0x1 & funct3=0x5 & funct7=0x1 { local tmpr1:4 = rs1:4; local tmpr2:4 = rs2:4; rd = sext(tmpr1 / tmpr2); } # divw d,s,t 0200403b fe00707f SIMPLE (64, 0) :divw rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x6 & op0506=0x1 & funct3=0x4 & funct7=0x1 { local tmpr1:4 = rs1:4; local tmpr2:4 = rs2:4; rd = sext(tmpr1 s/ tmpr2); } # mulw d,s,t 0200003b fe00707f SIMPLE (64, 0) :mulw rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x6 & op0506=0x1 & funct3=0x0 & funct7=0x1 { local tmp:4 = rs1:4 * rs2:4; rd = sext(tmp); } # remuw d,s,t 0200703b fe00707f SIMPLE (64, 0) :remuw rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x6 & op0506=0x1 & funct3=0x7 & funct7=0x1 { local tmpr1:4 = rs1:4; local tmpr2:4 = rs2:4; rd = sext(tmpr1 % tmpr2); } # remw d,s,t 0200603b fe00707f SIMPLE (64, 0) :remw rd,rs1,rs2 is rs1 & rs2 & rd & op0001=0x3 & op0204=0x6 & op0506=0x1 & funct3=0x6 & funct7=0x1 { local tmpr1:4 = rs1:4; local tmpr2:4 = rs2:4; rd = sext(tmpr1 s% tmpr2); } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv64p.sinc ================================================ # RV64 P Extension # add32 rt, ra, rb ; rt.W[_x_] = ra.W[_x_] + rb.W[_x_]; ; (RV64: __x__=1..0) :add32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x20 unimpl # cras32 rt, ra, rb ; rt.W[_x_] = ra.W[_x_] + rb.W[_x-1_]; + ; rt.W[_x-1_] = ra.W[_x-1_] – rb.W[_x_]; ; (RV64: __x__=1) :cras32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x22 unimpl # crsa32 rt, ra, rb ; rt.W[_x_] = ra.W[_x_] - rb.W[_x-1_]; + ; rt.W[_x-1_] = ra.W[_x-1_] + rb.W[_x_]; ; (RV64: __x__=1) :crsa32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x23 unimpl # kadd32 rt, ra, rb ; rt.W[_x_] = SAT.Q31(ra.W[_x_] + rb.W[_x_]); ; (RV64: __x__=1..0) :kadd32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x08 unimpl # kcras32 rt, ra, rb ; rt.W[_x_] = SAT.Q31(ra.W[_x_] + rb.W[_x-1_]); + ; rt.W[_x-1_] = SAT.Q31(ra.W[_x-1_] – rb.W[_x_]); ; (RV64: __x__=1) :kcras32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x0a unimpl # kcrsa32 rt, ra, rb ; rt.W[_x_] = SAT.Q31(ra.W[_x_] - rb.W[_x-1_]); + ; rt.W[_x-1_] = SAT.Q31(ra.W[_x-1_] + rb.W[_x_]); ; (RV64: __x__=1) :kcrsa32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x0b unimpl :kmabb32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x2d unimpl :kmabt32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x35 unimpl :kmadrs32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x36 unimpl :kmads32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x2e unimpl :kmatt32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x3d unimpl :kmaxda32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x25 unimpl :kmaxds32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x3e unimpl :kmda32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x1c unimpl :kmsda32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x26 unimpl :kmsxda32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x27 unimpl :kmxda32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x1d unimpl # ksll32 rt, ra, rb ; rt.W[_x_] = SAT.Q31(ra.W[_x_] << rb[4:0]); ; (RV64: __x__=1..0) :ksll32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x32 unimpl # kslli32 rt, ra, im5u ; rt.W[_x_] = SAT.Q31(ra.W[_x_] << im5u); ; (RV64: __x__=1..0) :kslli32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x42 unimpl # kslra32 rt, ra, rb ; if (rb[5:0] < 0) + ; rt.W[_x_] = ra.W[_x_] s>> -rb[5:0]; ; if (rb[5:0] > 0) + ; rt.W[_x_] = SAT.Q31(ra.W[_x_] << rb[5:0]); ; (RV64: __x__=1..0) :kslra32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x2b unimpl :kslra32.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x33 unimpl # kstas16 rt, ra, rb ; rt.H[_x_] = SAT.Q15(ra.H[_x_] + rb.H[_x_]); + ; rt.H[_x-1_] = SAT.Q15(ra.H[_x-1_] – rb.H[_x-1_]); ; (RV32: __x__=1, RV64: __x__=1,3) :kstas16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x62 unimpl # kstas32 rt, ra, rb ; rt.W[_x_] = SAT.Q31(ra.W[_x_] + rb.W[_x_]); + ; rt.W[_x-1_] = SAT.Q31(ra.W[_x-1_] – rb.W[_x-1_]); ; (RV64: __x__=1) :kstas32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x60 unimpl # kstsa16 rt, ra, rb ; rt.H[_x_] = SAT.Q15(ra.H[_x_] - rb.H[_x_]); + ; rt.H[_x-1_] = SAT.Q15(ra.H[_x-1_] + rb.H[_x-1_]); ; (RV32: __x__=1, RV64: __x__=1,3) :kstsa16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x63 unimpl # kstsa32 rt, ra, rb ; rt.W[_x_] = SAT.Q31(ra.W[_x_] - rb.W[_x_]); + ; rt.W[_x-1_] = SAT.Q31(ra.W[_x-1_] + rb.W[_x-1_]); ; (RV64: __x__=1) :kstsa32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x61 unimpl # ksub32 rt, ra, rb ; rt.W[_x_] = SAT.Q31(ra.W[_x_] - rb.W[_x_]); ; (RV64: __x__=1..0) :ksub32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x09 unimpl # pkbb32 rt, ra, rb ; rt = CONCAT(ra.W[_0_], rb.W[_0_]); :pkbb32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x07 unimpl # pkbt32 rt, ra, rb ; rt = CONCAT(ra.W[_0_], rb.W[_1_]); :pkbt32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x0f unimpl # pktb32 rt, ra, rb ; rt = CONCAT(ra.W[_1_], rb.W[_0_]); :pktb32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x1f unimpl # pktt32 rt, ra, rb ; rt = CONCAT(ra.W[_1_], rb.W[_1_]); :pktt32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x17 unimpl # radd32 rt, ra, rb ; rt.W[_x_] = (ra.W[_x_] + rb.W[_x_]) s>> 1; ; (RV64: __x__=1..0) :radd32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x00 unimpl # rcras32 rt, ra, rb ; rt.W[_x_] = (ra.W[_x_] + rb.W[_x-1_]) s>> 1; + ; rt.W[_x-1_] = (ra.W[_x-1_] – rb.W[_x_]) s>> 1; ; (RV64: __x__=1) :rcras32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x02 unimpl # rcrsa32 rt, ra, rb ; rt.W[_x_] = (ra.W[_x_] - rb.W[_x-1_]) s>> 1; + ; rt.W[_x-1_] = (ra.W[_x-1_] + rb.W[_x_]) s>> 1; ; (RV64: __x__=1) :rcrsa32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x03 unimpl # rstas16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] + rb.H[_x_]) s>> 1; + ; rt.H[_x-1_] = (ra.H[_x-1_] – rb.H[_x-1_]) s>> 1; ; (RV32: __x__=1, RV64: __x__=1,3) :rstas16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x5a unimpl # rstas32 rt, ra, rb ; rt.W[_x_] = (ra.W[_x_] + rb.W[_x_]) s>> 1; + ; rt.W[_x-1_] = (ra.W[_x-1_] – rb.W[_x-1_]) s>> 1; ; (RV64: __x__=1) :rstas32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x58 unimpl # rstsa16 rt, ra, rb ; rt.H[_x_] = (ra.H[_x_] - rb.H[_x_]) s>> 1; + ; rt.H[_x-1_] = (ra.H[_x-1_] + rb.H[_x-1_]) s>> 1; ; (RV32: __x__=1, RV64: __x__=1,3) :rstsa16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x5b unimpl # rstsa32 rt, ra, rb ; rt.W[_x_] = (ra.W[_x_] - rb.W[_x_]) s>> 1; + ; rt.W[_x-1_] = (ra.W[_x-1_] + rb.W[_x-1_]) s>> 1; ; (RV64: __x__=1) :rstsa32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x59 unimpl # rsub32 rt, ra, rb ; rt.W[_x_] = (ra.W[_x_] - rb.W[_x_]) s>> 1; ; (RV64: __x__=1..0) :rsub32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x01 unimpl # sll32 rt, ra, rb ; rt.W[_x_] = ra.W[_x_] << rb[4:0]; ; (RV64: __x__=1..0) :sll32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x2a unimpl # slli32 rt, ra, im5u ; rt.W[_x_] = ra.W[_x_] << im5u; ; (RV64: __x__=1..0) :slli32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x3a unimpl # smax32 rt, ra, rb ; rt.W[_x_] = (ra.W[_x_] > rb.W[_x_])? ra.W[_x_] : rb.W[_x_]; ; (RV64: __x__=1..0) :smax32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x49 unimpl :smbt32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x0c unimpl :smdrs32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x34 unimpl :smds32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x2c unimpl # smin32 rt, ra, rb ; rt.W[_x_] = (ra.W[_x_] < rb.W[_x_])? ra.W[_x_] : rb.W[_x_]; ; (RV64: __x__=1..0) :smin32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x48 unimpl :smtt32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x14 unimpl :smxds32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x3c unimpl # sra32 rt, ra, rb ; rt.W[_x_] = ra.W[_x_] s>> rb[4:0]; ; (RV64: __x__=1..0) :sra32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x28 unimpl :sra32.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x30 unimpl # srai32 rt, ra, im5u ; rt.W[_x_] = ra.W[_x_] s>> im5u; ; (RV64: __x__=1..0) :srai32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x38 unimpl :srai32.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x40 unimpl # srl32 rt, ra, rb ; rt.W[_x_] = ra.W[_x_] u>> rb[4:0]; ; (RV64: __x__=1..0) :srl32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x29 unimpl :srl32.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x31 unimpl # srli32 rt, ra, im5u ; rt.W[_x_] = ra.W[_x_] u>> im5u; ; (RV64: __x__=1..0) :srli32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x39 unimpl :srli32.u rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x41 unimpl # stas16 rt, ra, rb ; rt.H[_x_] = ra.H[_x_] + rb.H[_x_]; + ; rt.H[_x-1_] = ra.H[_x-1_] – rb.H[_x-1_]; ; (RV32: __x__=1, RV64: __x__=1,3) :stas16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x7a unimpl # stas32 rt, ra, rb ; rt.W[_x_] = ra.W[_x_] + rb.W[_x_]; + ; rt.W[_x-1_] = ra.W[_x-1_] – rb.W[_x-1_]; ; (RV64: __x__=1) :stas32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x78 unimpl # stsa16 rt, ra, rb ; rt.H[_x_] = ra.H[_x_] - rb.H[_x_]; + ; rt.H[_x-1_] = ra.H[_x-1_] + rb.H[_x-1_]; ; (RV32: __x__=1, RV64: __x__=1,3) :stsa16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x7b unimpl # stsa32 rt, ra, rb ; rt.W[_x_] = ra.W[_x_] - rb.W[_x_]; + ; rt.W[_x-1_] = ra.W[_x-1_] + rb.W[_x-1_]; ; (RV64: __x__=1) :stsa32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x79 unimpl # sub32 rt, ra, rb ; rt.W[_x_] = ra.W[_x_] - rb.W[_x_]; ; (RV64: __x__=1..0) :sub32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x21 unimpl # ukadd32 rt, ra, rb ; rt.W[_x_] = SAT.U32(ra.W[_x_] + rb.W[_x_]; ; (RV64: __x__=1..0) :ukadd32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x18 unimpl # ukcras32 rt, ra, rb ; rt.W[_x_] = SAT.U32(ra.W[_x_] + rb.W[_x-1_]); + ; rt.W[_x-1_] = SAT.U32(ra.W[_x-1_] – rb.W[_x_]); ; (RV64: __x__=1) :ukcras32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x1a unimpl # ukcrsa32 rt, ra, rb ; rt.W[_x_] = SAT.U32(ra.W[_x_] - rb.W[_x-1_]); + ; rt.W[_x-1_] = SAT.U32(ra.W[_x-1_] + rb.W[_x_]); ; (RV64: __x__=1) :ukcrsa32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x1b unimpl # ukstas16 rt, ra, rb ; rt.H[_x_] = SAT.U16(ra.H[_x_] + rb.H[_x_]); + ; rt.H[_x-1_] = SAT.U16(ra.H[_x-1_] – rb.H[_x-1_]); ; (RV32: __x__=1, RV64: __x__=1,3) :ukstas16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x72 unimpl # ukstas32 rt, ra, rb ; rt.W[_x_] = SAT.U32(ra.W[_x_] + rb.W[_x_]); + ; rt.W[_x-1_] = SAT.U32(ra.W[_x-1_] – rb.W[_x-1_]); ; (RV64: __x__=1) :ukstas32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x70 unimpl # ukstsa16 rt, ra, rb ; rt.H[_x_] = SAT.U16(ra.H[_x_] - rb.H[_x_]); + ; rt.H[_x-1_] = SAT.U16(ra.H[_x-1_] + rb.H[_x-1_]); ; (RV32: __x__=1, RV64: __x__=1,3) :ukstsa16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x73 unimpl # ukstsa32 rt, ra, rb ; rt.W[_x_] = SAT.U32(ra.W[_x_] - rb.W[_x_]); + ; rt.W[_x-1_] = SAT.U32(ra.W[_x-1_] + rb.W[_x-1_]); ; (RV64: __x__=1) :ukstsa32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x71 unimpl # uksub32 rt, ra, rb ; rt.W[_x_] = SAT.U32(ra.W[_x_] - rb.W[_x_]); ; (RV64: __x__=1..0) :uksub32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x19 unimpl # umax32 rt, ra, rb ; rt.W[_x_] = (ra.W[_x_] u> rb.W[_x_])? ra.W[_x_] : rb.W[_x_]; ; (RV64: __x__=1..0) :umax32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x51 unimpl # umin32 rt, ra, rb ; rt.W[_x_] = (ra.W[_x_] u< rb.W[_x_])? ra.W[_x_] : rb.W[_x_]; ; (RV64: __x__=1..0) :umin32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x50 unimpl # uradd32 rt, ra, rb ; rt.W[_x_] = (CONCAT(1'b0,ra.W[_x_]) + CONCAT(1'b0,rb.W[_x_])) >> 1; ; (RV64: __x__=1..0) :uradd32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x10 unimpl # urcras32 rt, ra, rb ; rt.W[_x_] = (CONCAT(1'b0,ra.W[_x_]) + CONCAT(1'b0,rb.W[_x-1_])) >> 1; + ; rt.W[_x-1_] = (CONCAT(1'b0,ra.W[_x-1_]) – CONCAT(1'b0,rb.W[_x_])) >> 1; ; (RV64: __x__=1) :urcras32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x12 unimpl # urcrsa32 rt, ra, rb ; rt.W[_x_] = (CONCAT(1'b0,ra.W[_x_]) - CONCAT(1'b0,rb.W[_x-1_])) >> 1; + ; rt.W[_x-1_] = (CONCAT(1'b0,ra.W[_x-1_]) + CONCAT(1'b0,rb.W[_x_])) >> 1; ; (RV64: __x__=1) :urcrsa32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x13 unimpl # urstas16 rt, ra, rb ; rt.H[_x_] = (CONCAT(1'b0,ra.H[_x_]) + CONCAT(1'b0,rb.H[_x_])) >> 1; + ; rt.H[_x-1_] = (CONCAT(1'b0,ra.H[_x-1_]) – CONCAT(1'b0,rb.H[_x-1_])) >> 1; ; (RV32: __x__=1, RV64: __x__=1,3) :urstas16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x6a unimpl # urstas32 rt, ra, rb ; rt.W[_x_] = (CONCAT(1'b0,ra.W[_x_]) + CONCAT(1'b0,rb.W[_x_])) >> 1; + ; rt.W[_x-1_] = (CONCAT(1'b0,ra.W[_x-1_]) – CONCAT(1'b0,rb.W[_x-1_])) >> 1; ; (RV64: __x__=1) :urstas32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x68 unimpl # urstsa16 rt, ra, rb ; rt.H[_x_] = (CONCAT(1'b0,ra.H[_x_]) - CONCAT(1'b0,rb.H[_x_])) >> 1; + ; rt.H[_x-1_] = (CONCAT(1'b0,ra.H[_x-1_]) + CONCAT(1'b0,rb.H[_x-1_])) >> 1; ; (RV32: __x__=1, RV64: __x__=1,3) :urstsa16 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x6b unimpl # urstsa32 rt, ra, rb ; rt.W[_x_] = (CONCAT(1'b0,ra.W[_x_]) - CONCAT(1'b0,rb.W[_x_])) >> 1; + ; rt.W[_x-1_] = (CONCAT(1'b0,ra.W[_x-1_]) + CONCAT(1'b0,rb.W[_x-1_])) >> 1; ; (RV64: __x__=1) :urstsa32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x69 unimpl # ursub32 rt, ra, rb ; rt.W[_x_] = (CONCAT(1'b0,ra.W[_x_]) - CONCAT(1'b0,rb.W[_x_])) >> 1; ; (RV64: __x__=1..0) :ursub32 rd,rs1,rs2 is op0006=0x3f & rd & rs1 & rs2 & funct3=0x2 & funct7=0x11 unimpl ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rv64q.sinc ================================================ # RV64Q Standard Extension (in addition to RV32Q) # fcvt.l.q d,S,m c6200053 fff0007f SIMPLE (64, 0) :fcvt.l.q rd,frs1,FRM is frs1 & FRM & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x63 & op2024=0x2 { rd = trunc(frs1); } # fcvt.lu.q d,S,m c6300053 fff0007f SIMPLE (64, 0) :fcvt.lu.q rd,frs1,FRM is frs1 & FRM & rd & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x63 & op2024=0x3 { rd = trunc(frs1); } # fcvt.q.l D,s,m d6200053 fff0007f SIMPLE (64, 0) :fcvt.q.l frd,rs1,FRM is frd & FRM & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x6b & op2024=0x2 { frd = int2float(rs1); } # fcvt.q.lu D,s,m d6300053 fff0007f SIMPLE (64, 0) :fcvt.q.lu frd,rs1,FRM is frd & FRM & rs1 & op0001=0x3 & op0204=0x4 & op0506=0x2 & funct7=0x6b & op2024=0x3 { frd = int2float(rs1); } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rvc.sinc ================================================ # RVC Standard Extension for Compressed Instructions # c.add d,CV 00009002 0000f003 SIMPLE (0, 0) :c.add crd,crs2 is crd & crs2 & cop0001=0x2 & cop1315=0x4 & cop1212=0x1 { crd = crd + crs2; } # c.addi d,Co 00000001 0000e003 SIMPLE (0, 0) # There may be other nop forms here if (cop0711=0) or (cop1212=0 & cop0206=0) :c.addi crd,cimmI is crd & cimmI & cop0001=0x1 & cop1315=0x0 { crd = crd + cimmI; } :c.nop is cop0001=0x1 & cop1315=0x0 & cop0711=0 & cop1212=0 & cop0206=0 { local NOP:1 = 0; NOP = NOP; } # c.addi16sp Cc,CL 00006101 0000ef83 SIMPLE (0, 0) :c.addi16sp sp,caddi16spimm is cop0711=0x2 & caddi16spimm & sp & cop0001=0x1 & cop1315=0x3 { sp = sp + caddi16spimm; } # c.addi4spn Ct,Cc,CK 00000000 0000e003 SIMPLE (0, 0) :c.addi4spn cr0204s,sp,caddi4spnimm is caddi4spnimm & cr0204s & sp & cop0001=0x0 & cop1315=0x0 { cr0204s = sp + caddi4spnimm; } @if (ADDRSIZE == "64") || (ADDRSIZE == "128") # c.addiw d,Co 00002001 0000e003 SIMPLE (64, 0) :c.addiw crd,cimmI is crd & cimmI & cop0001=0x1 & cop1315=0x1 { local tmp:$(XLEN) = crd + cimmI; crd = sext(tmp:$(WXLEN)); } @endif @if (ADDRSIZE == "64") || (ADDRSIZE == "128") # c.addw Cs,Ct 00009c21 0000fc63 SIMPLE (64, 0) :c.addw cr0709s,cr0204s is cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x1 & cop1012=0x7 { local tmp:4 = cr0709s:4 + cr0204s:4; cr0709s = sext(tmp); } @endif # c.and Cs,Ct 00008c61 0000fc63 SIMPLE (0, 0) :c.and cr0709s,cr0204s is cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x3 & cop1012=0x3 { cr0709s = cr0709s & cr0204s; } # c.andi Cs,Co 00008801 0000ec03 SIMPLE (0, 0) :c.andi cr0709s,cimmI is cimmI & cr0709s & cop0001=0x1 & cop1315=0x4 & cop1011=0x2 { cr0709s = cr0709s & cimmI; } # c.beqz Cs,Cp 0000c001 0000e003 CONDBRANCH (0, 0) :c.beqz cr0709s,cbimm is cbimm & cr0709s & cop0001=0x1 & cop1315=0x6 { if (cr0709s == 0) goto cbimm; } # c.bnez Cs,Cp 0000e001 0000e003 CONDBRANCH (0, 0) :c.bnez cr0709s,cbimm is cbimm & cr0709s & cop0001=0x1 & cop1315=0x7 { if (cr0709s != 0) goto cbimm; } # c.ebreak 00009002 0000ffff SIMPLE (0, 0) :c.ebreak is cop0001=0x2 & cop1315=0x4 & cop0212=0x400 { ebreak(); } @if ADDRSIZE == "32" || ADDRSIZE == "64" @if FPSIZE == "64" # c.fld CD,Cl(Cs) 00002000 0000e003 QWORD|DREF (0, 8) :c.fld cfr0204s,cldimm(cr0709s) is cfr0204s & cr0709s & cop0001=0x0 & cop1315=0x1 & cldimm { local ea:$(XLEN) = cldimm:$(XLEN) + cr0709s; cfr0204s = *[ram]:$(DFLEN) ea; } # c.fldsp D,Cn(Cc) 00002002 0000e003 QWORD|DREF (0, 8) :c.fldsp cfrd,cldspimm(sp) is cfrd & sp & cop0001=0x2 & cop1315=0x1 & cldspimm { local ea:$(XLEN) = cldspimm:$(XLEN) + sp; cfrd = *[ram]:$(DFLEN) ea; } @endif @endif @if ADDRSIZE == "32" # c.flw CD,Ck(Cs) 00006000 0000e003 DWORD|DREF (32, 4) :c.flw cfr0204s,clwimm(cr0709s) is cfr0204s & cr0709s & cop0001=0x0 & cop1315=0x3 & clwimm { local ea:$(XLEN) = clwimm:$(XLEN) + cr0709s; cfr0204s = *[ram]:$(SFLEN) ea; } # c.flwsp D,Cm(Cc) 00006002 0000e003 DWORD|DREF (32, 4) :c.flwsp cfrd,clwspimm(sp) is cfrd & sp & cop0001=0x2 & cop1315=0x3 & clwspimm { local ea:$(XLEN) = clwspimm:$(XLEN) + sp; cfrd = *[ram]:$(SFLEN) ea; } @endif @if ADDRSIZE == "32" || ADDRSIZE == "64" @if FPSIZE == "64" # c.fsd CD,Cl(Cs) 0000a000 0000e003 QWORD|DREF (0, 8) :c.fsd cfr0204s,cldimm(cr0709s) is cfr0204s & cr0709s & cop0001=0x0 & cop1315=0x5 & cldimm { local ea:$(XLEN) = cldimm + cr0709s; *[ram]:8 ea = cfr0204s; } # c.fsdsp CT,CN(Cc) 0000a002 0000e003 QWORD|DREF (0, 8) :c.fsdsp cfr0206,csdspimm(sp) is cfr0206 & sp & cop0001=0x2 & cop1315=0x5 & csdspimm { local ea:$(XLEN) = csdspimm + sp; *[ram]:8 ea = cfr0206; } @endif @endif @if ADDRSIZE == "32" @if FPSIZE == "32" || FPSIZE == "64" # c.fsw CD,Ck(Cs) 0000e000 0000e003 DWORD|DREF (32, 4) :c.fsw cfr0204s,clwimm(cr0709s) is cfr0204s & cr0709s & cop0001=0x0 & cop1315=0x7 & clwimm { local ea:$(XLEN) = clwimm + cr0709s; *[ram]:4 ea = cfr0204s; } # c.fswsp CT,CM(Cc) 0000e002 0000e003 DWORD|DREF (32, 4) :c.fswsp cfr0206,cswspimm(sp) is cfr0206 & sp & cop0001=0x2 & cop1315=0x7 & cswspimm { local ea:$(XLEN) = cswspimm + sp; *[ram]:4 ea = cfr0206:4; } @endif @endif # c.j Ca 0000a001 0000e003 BRANCH (0, 0) :c.j cjimm is cjimm & cop0001=0x1 & cop1315=0x5 { goto cjimm; } @if ADDRSIZE == "32" # c.jal Ca 00002001 0000e003 JSR (32, 0) :c.jal cjimm is cjimm & cop0001=0x1 & cop1315=0x1 { ra = inst_next; call cjimm; } @endif # c.jalr d 00009002 0000f07f JSR (0, 0) :c.jalr crd is crd & cop0001=0x2 & cop1315=0x4 & cop0206=0x0 & cop1212=0x1 { ra = inst_next; call [crd]; } # c.jr d 00008002 0000f07f BRANCH (0, 0) :c.jr crd is crd & cop0001=0x2 & cop1315=0x4 & cop0206=0x0 & cop1212=0x0 { goto [crd]; } # ret 00008082 0000ffff BRANCH|ALIAS (0, 0) :ret is cop0001=0x2 & cop1315=0x4 & cop0206=0x0 & cop1212=0x0 & cop0711=1 { return [ra]; } @if (ADDRSIZE == "64") || (ADDRSIZE == "128") # c.ld Ct,Cl(Cs) 00006000 0000e003 QWORD|DREF (64, 8) :c.ld cr0204s,cldimm(cr0709s) is cr0709s & cr0204s & cop0001=0x0 & cop1315=0x3 & cldimm { local ea:$(XLEN) = cldimm:$(XLEN) + cr0709s; assignD(cr0204s, *[ram]:$(DXLEN) ea); } @endif @if ADDRSIZE == "128" :c.lq cr0204s,clqimm(cr0709s) is cr0709s & cr0204s & cop0001=0x0 & cop1315=0x1 & clqimm { local ea:$(XLEN) = clqimm:$(XLEN) + cr0709s; cr0204s = *[ram]:$(QXLEN) ea; } @endif @if (ADDRSIZE == "64") || (ADDRSIZE == "128") # c.ldsp d,Cn(Cc) 00006002 0000e003 QWORD|DREF (64, 8) :c.ldsp crd,cldspimm(sp) is crd & sp & cop0001=0x2 & cop1315=0x3 & cldspimm { local ea:$(XLEN) = cldspimm + sp; assignD(crd, *[ram]:$(DXLEN) ea); } @endif @if ADDRSIZE == "128" :c.lqsp crd,clqspimm(sp) is crd & sp & cop0001=0x2 & cop1315=0x1 & clqspimm { local ea:$(XLEN) = clqspimm + sp; crd = *[ram]:$(QXLEN) ea; } @endif # c.li d,Co 00004001 0000e003 SIMPLE (0, 0) :c.li crd,cimmI is crd & cimmI & cop0001=0x1 & cop1315=0x2 { crd = cimmI; } # c.lui d,Cu 00006001 0000e003 SIMPLE (0, 0) :c.lui cd0711NoSp,cbigimm is cd0711NoSp & cbigimm & cop0001=0x1 & cop1315=0x3 { cd0711NoSp = cbigimm; } # c.lw Ct,Ck(Cs) 00004000 0000e003 DWORD|DREF (0, 4) :c.lw cr0204s,clwimm(cr0709s) is cr0709s & cr0204s & cop0001=0x0 & cop1315=0x2 & clwimm { local ea:$(XLEN) = clwimm + cr0709s; assignW(cr0204s, *[ram]:4 ea); } # c.lwsp d,Cm(Cc) 00004002 0000e003 SIMPLE (0, 0) :c.lwsp crd,clwspimm(sp) is crd & sp & cop0001=0x2 & cop1315=0x2 & clwspimm { local ea:$(XLEN) = clwspimm + sp; assignW(crd, *[ram]:4 ea); } # c.mv d,CV 00008002 0000f003 SIMPLE (0, 0) :c.mv crd,crs2 is crd & crs2 & cop0001=0x2 & cop1315=0x4 & cop1212=0x0 { crd = crs2; } # c.or Cs,Ct 00008c41 0000fc63 SIMPLE (0, 0) :c.or cr0709s,cr0204s is cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x2 & cop1012=0x3 { cr0709s = cr0709s | cr0204s; } @if (ADDRSIZE == "64") || (ADDRSIZE == "128") # c.sd Ct,Cl(Cs) 0000e000 0000e003 QWORD|DREF (64, 8) :c.sd cr0204s,cldimm(cr0709s) is cr0709s & cr0204s & cop0001=0x0 & cop1315=0x7 & cldimm { local ea:$(XLEN) = cldimm:$(XLEN) + cr0709s; *[ram]:$(DXLEN) ea = cr0204s:$(DXLEN); } # c.sdsp CV,CN(Cc) 0000e002 0000e003 QWORD|DREF (64, 8) :c.sdsp crs2,csdspimm(sp) is crs2 & sp & cop0001=0x2 & cop1315=0x7 & csdspimm { local ea:$(XLEN) = csdspimm:$(XLEN) + sp; *[ram]:$(DXLEN) ea = crs2:$(DXLEN); } @endif # c.slli d,C> 00000002 0000e003 SIMPLE (0, 0) :c.slli crd,c6imm is crd & c6imm & cop0001=0x2 & cop1315=0x0 { crd = crd << c6imm; } #TODO hint? # c.slli64 d 00000002 0000f07f SIMPLE (0, 0) :c.slli64 crd is crd & cop0001=0x2 & cop1315=0x0 & cop0206=0x0 & cop1212=0x0 { crd = crd << 0; } # c.srai Cs,C> 00008401 0000ec03 SIMPLE (0, 0) :c.srai cr0709s,c6imm is c6imm & cr0709s & cop0001=0x1 & cop1315=0x4 & cop1011=0x1 { cr0709s = cr0709s s>> c6imm; } #TODO hint? # c.srai64 Cs 00008401 0000fc7f SIMPLE (0, 0) :c.srai64 cr0709s is cr0709s & cop0001=0x1 & cop1315=0x4 & cop0206=0x0 & cop1012=0x1 { cr0709s = cr0709s s>> 0; } # c.srli Cs,C> 00008001 0000ec03 SIMPLE (0, 0) :c.srli cr0709s,c6imm is c6imm & cr0709s & cop0001=0x1 & cop1315=0x4 & cop1011=0x0 { cr0709s = cr0709s >> c6imm; } #TODO hint? # c.srli64 Cs 00008001 0000fc7f SIMPLE (0, 0) :c.srli64 cr0709s is cr0709s & cop0001=0x1 & cop1315=0x4 & cop0206=0x0 & cop1012=0x0 { cr0709s = cr0709s >> 0; } # c.sub Cs,Ct 00008c01 0000fc63 SIMPLE (0, 0) :c.sub cr0709s,cr0204s is cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x0 & cop1012=0x3 { cr0709s = cr0709s - cr0204s; } @if (ADDRSIZE == "64") || (ADDRSIZE == "128") # c.subw Cs,Ct 00009c01 0000fc63 SIMPLE (64, 0) :c.subw cr0709s,cr0204s is cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x0 & cop1012=0x7 { local tmp:4 = cr0709s:4 - cr0204s:4; cr0709s = sext(tmp); } @endif # c.sw Ct,Ck(Cs) 0000c000 0000e003 DWORD|DREF (0, 4) :c.sw cr0204s,clwimm(cr0709s) is cr0709s & cr0204s & cop0001=0x0 & cop1315=0x6 & clwimm { local ea:$(XLEN) = clwimm + cr0709s; *[ram]:4 ea = cr0204s:4; } @if ADDRSIZE == "128" :c.sq cr0204s,clqimm(cr0709s) is cr0709s & cr0204s & cop0001=0x0 & cop1315=0x5 & clqimm { local ea:$(XLEN) = clqimm + cr0709s; *[ram]:16 ea = cr0204s; } :c.sqsp crs2,csqspimm(sp) is crs2 & sp & cop0001=0x2 & cop1315=0x5 & csqspimm { local ea:$(XLEN) = csqspimm + sp; *[ram]:16 ea = crs2; } @endif # c.swsp CV,CM(Cc) 0000c002 0000e003 DWORD|DREF (0, 4) :c.swsp crs2,cswspimm(sp) is crs2 & sp & cop0001=0x2 & cop1315=0x6 & cswspimm { local ea:$(XLEN) = cswspimm + sp; *[ram]:4 ea = crs2:4; } # c.unimp 00000000 0000ffff SIMPLE (0, 0) # would be better not to decode as it is used as padding # # :c.unimp is cop0001=0x0 & cop1315=0x0 & cop0212=0x0 #{ # trap(); #} # c.xor Cs,Ct 00008c21 0000fc63 SIMPLE (0, 0) :c.xor cr0709s,cr0204s is cr0204s & cr0709s & cop0001=0x1 & cop1315=0x4 & cop0506=0x1 & cop1012=0x3 { cr0709s = cr0709s ^ cr0204s; } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.rvv.sinc ================================================ # Vector # sew: "e8" is vsew=0 {} # sew: "e16" is vsew=1 {} # sew: "e32" is vsew=2 {} # sew: "e64" is vsew=3 {} # sew: "e128" is vsew=4 {} # sew: "e256" is vsew=5 {} # sew: "e512" is vsew=6 {} # sew: "e1024" is vsew=7 {} # vmul: "m1" is vlmul=0 {} # vmul: "m2" is vlmul=1 {} # vmul: "m4" is vlmul=2 {} # vmul: "m8" is vlmul=3 {} # vmul: "mf8" is vlmul=5 {} # vmul: "mf4" is vlmul=6 {} # vmul: "mf2" is vlmul=7 {} # vmask: "mu" is vma=0 {} # vmask: "ma" is vma=1 {} # vtail: "tu" is vta=0 {} # vtail: "ta" is vta=1 {} #TODO possible tables # mop=0 unit-stride VLE # mop=2 strided VLSE # mop=3 indexed VLXEI # mop=0 unit-stride VSE # mop=1 indexed-unordered VSUXEI # mop=2 strided VSSE # mop=3 indexed-ordered VSXEI # lumop=0 unit-stride # lumop=8 unit-stride,whole registers # lumop=16 unit-stride fault-only-first # sumop=0 unit-stride # sumop=8 unit-stride,whole registers # sumop=16 unit-stride fault-only-first # nfields imm is nf [ imm = nf + 1; ] { export *[const]:1 imm; } # vaadd.vv 31..26=0x09 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vaadd.vv vd, vs2, vs1, vm # roundoff_signed(vs2[i] + vs1[i], 1) :vaadd.vv vd, vs2, vs1^ vm is op2631=0x9 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vaadd.vx 31..26=0x09 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vaadd.vx vd, vs2, rs1, vm # roundoff_signed(vs2[i] + x[rs1], 1) :vaadd.vx vd, vs2, rs1^ vm is op2631=0x9 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vaaddu.vv 31..26=0x08 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vaaddu.vv vd, vs2, vs1, vm # roundoff_unsigned(vs2[i] + vs1[i], 1) :vaaddu.vv vd, vs2, vs1^ vm is op2631=0x8 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vaaddu.vx 31..26=0x08 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vaaddu.vx vd, vs2, rs1, vm # roundoff_unsigned(vs2[i] + x[rs1], 1) :vaaddu.vx vd, vs2, rs1^ vm is op2631=0x8 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vadc.vim 31..26=0x10 25=0 vs2 simm5 14..12=0x3 vd 6..0=0x57 # vadc.vim vd, vs2, simm5, v0 # Vector-immediate :vadc.vim vd, vs2, simm5, v0 is op2631=0x10 & op2525=0x0 & vs2 & simm5 & op1214=0x3 & v0 & vd & op0006=0x57 unimpl # vadc.vvm 31..26=0x10 25=0 vs2 vs1 14..12=0x0 vd 6..0=0x57 # vadc.vvm vd, vs2, vs1, v0 # Vector-vector :vadc.vvm vd, vs2, vs1, v0 is op2631=0x10 & op2525=0x0 & vs2 & vs1 & op1214=0x0 & v0 & vd & op0006=0x57 unimpl # vadc.vxm 31..26=0x10 25=0 vs2 rs1 14..12=0x4 vd 6..0=0x57 # vadc.vxm vd, vs2, rs1, v0 # Vector-scalar :vadc.vxm vd, vs2, rs1, v0 is op2631=0x10 & op2525=0x0 & vs2 & rs1 & op1214=0x4 & v0 & vd & op0006=0x57 unimpl # vadd.vi 31..26=0x00 vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vadd.vi vd, vs2, simm5, vm # vector-immediate :vadd.vi vd, vs2, simm5^ vm is op2631=0x0 & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vadd.vv 31..26=0x00 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vadd.vv vd, vs2, vs1, vm # Vector-vector :vadd.vv vd, vs2, vs1^ vm is op2631=0x0 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vadd.vx 31..26=0x00 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vadd.vx vd, vs2, rs1, vm # vector-scalar :vadd.vx vd, vs2, rs1^ vm is op2631=0x0 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vamoaddei16.v 31..27=0x00 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamoaddei16.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoaddei16.v vd, (rs1), vs2, vs3^ vm is op2731=0x0 & wd=0x1 & vm & vs2 & rs1 & op1214=0x5 & vs3 & vd & op0006=0x2f unimpl # vamoaddei16.v 31..27=0x00 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamoaddei16.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoaddei16.v zero, (rs1), vs2, vs3^ vm is op2731=0x0 & wd=0x0 & vm & vs2 & rs1 & op1214=0x5 & zero & vs3 & vd & op0006=0x2f unimpl # vamoaddei32.v 31..27=0x00 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamoaddei32.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoaddei32.v vd, (rs1), vs2, vs3^ vm is op2731=0x0 & wd=0x1 & vm & vs2 & rs1 & op1214=0x6 & vs3 & vd & op0006=0x2f unimpl # vamoaddei32.v 31..27=0x00 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamoaddei32.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoaddei32.v zero, (rs1), vs2, vs3^ vm is op2731=0x0 & wd=0x0 & vm & vs2 & rs1 & op1214=0x6 & zero & vs3 & vd & op0006=0x2f unimpl # vamoaddei64.v 31..27=0x00 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamoaddei64.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoaddei64.v vd, (rs1), vs2, vs3^ vm is op2731=0x0 & wd=0x1 & vm & vs2 & rs1 & op1214=0x7 & vs3 & vd & op0006=0x2f unimpl # vamoaddei64.v 31..27=0x00 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamoaddei64.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoaddei64.v zero, (rs1), vs2, vs3^ vm is op2731=0x0 & wd=0x0 & vm & vs2 & rs1 & op1214=0x7 & zero & vs3 & vd & op0006=0x2f unimpl # vamoaddei8.v 31..27=0x00 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamoaddei8.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoaddei8.v vd, (rs1), vs2, vs3^ vm is op2731=0x0 & wd=0x1 & vm & vs2 & rs1 & op1214=0x0 & vs3 & vd & op0006=0x2f unimpl # vamoaddei8.v 31..27=0x00 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamoaddei8.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoaddei8.v zero, (rs1), vs2, vs3^ vm is op2731=0x0 & wd=0x0 & vm & vs2 & rs1 & op1214=0x0 & zero & vs3 & vd & op0006=0x2f unimpl # vamoandei16.v 31..27=0x0c wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamoandei16.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoandei16.v vd, (rs1), vs2, vs3^ vm is op2731=0xc & wd=0x1 & vm & vs2 & rs1 & op1214=0x5 & vs3 & vd & op0006=0x2f unimpl # vamoandei16.v 31..27=0x0c wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamoandei16.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoandei16.v zero, (rs1), vs2, vs3^ vm is op2731=0xc & wd=0x0 & vm & vs2 & rs1 & op1214=0x5 & zero & vs3 & vd & op0006=0x2f unimpl # vamoandei32.v 31..27=0x0c wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamoandei32.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoandei32.v vd, (rs1), vs2, vs3^ vm is op2731=0xc & wd=0x1 & vm & vs2 & rs1 & op1214=0x6 & vs3 & vd & op0006=0x2f unimpl # vamoandei32.v 31..27=0x0c wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamoandei32.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoandei32.v zero, (rs1), vs2, vs3^ vm is op2731=0xc & wd=0x0 & vm & vs2 & rs1 & op1214=0x6 & zero & vs3 & op0006=0x2f unimpl # vamoandei64.v 31..27=0x0c wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamoandei64.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoandei64.v vd, (rs1), vs2, vs3^ vm is op2731=0xc & wd=0x1 & vm & vs2 & rs1 & op1214=0x7 & vs3 & vd & op0006=0x2f unimpl # vamoandei64.v 31..27=0x0c wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamoandei64.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoandei64.v zero, (rs1), vs2, vs3^ vm is op2731=0xc & wd=0x0 & vm & vs2 & rs1 & op1214=0x7 & zero & vs3 & op0006=0x2f unimpl # vamoandei8.v 31..27=0x0c wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamoandei8.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoandei8.v vd, (rs1), vs2, vs3^ vm is op2731=0xc & wd=0x1 & vm & vs2 & rs1 & op1214=0x0 & vs3 & vd & op0006=0x2f unimpl # vamoandei8.v 31..27=0x0c wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamoandei8.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoandei8.v zero, (rs1), vs2, vs3^ vm is op2731=0xc & wd=0x0 & vm & vs2 & rs1 & op1214=0x0 & zero & vs3 & op0006=0x2f unimpl # vamomaxei16.v 31..27=0x14 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamomaxei16.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamomaxei16.v vd, (rs1), vs2, vs3^ vm is op2731=0x14 & wd=0x1 & vm & vs2 & rs1 & op1214=0x5 & vs3 & vd & op0006=0x2f unimpl # vamomaxei16.v 31..27=0x14 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamomaxei16.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamomaxei16.v zero, (rs1), vs2, vs3^ vm is op2731=0x14 & wd=0x0 & vm & vs2 & rs1 & op1214=0x5 & zero & vs3 & op0006=0x2f unimpl # vamomaxei32.v 31..27=0x14 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamomaxei32.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamomaxei32.v vd, (rs1), vs2, vs3^ vm is op2731=0x14 & wd=0x1 & vm & vs2 & rs1 & op1214=0x6 & vs3 & vd & op0006=0x2f unimpl # vamomaxei32.v 31..27=0x14 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamomaxei32.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamomaxei32.v zero, (rs1), vs2, vs3^ vm is op2731=0x14 & wd=0x0 & vm & vs2 & rs1 & op1214=0x6 & zero & vs3 & op0006=0x2f unimpl # vamomaxei64.v 31..27=0x14 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamomaxei64.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamomaxei64.v vd, (rs1), vs2, vs3^ vm is op2731=0x14 & wd=0x1 & vm & vs2 & rs1 & op1214=0x7 & vs3 & vd & op0006=0x2f unimpl # vamomaxei64.v 31..27=0x14 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamomaxei64.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamomaxei64.v zero, (rs1), vs2, vs3^ vm is op2731=0x14 & wd=0x0 & vm & vs2 & rs1 & op1214=0x7 & zero & vs3 & op0006=0x2f unimpl # vamomaxei8.v 31..27=0x14 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamomaxei8.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamomaxei8.v vd, (rs1), vs2, vs3^ vm is op2731=0x14 & wd=0x1 & vm & vs2 & rs1 & op1214=0x0 & vs3 & vd & op0006=0x2f unimpl # vamomaxei8.v 31..27=0x14 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamomaxei8.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamomaxei8.v zero, (rs1), vs2, vs3^ vm is op2731=0x14 & wd=0x0 & vm & vs2 & rs1 & op1214=0x0 & zero & vs3 & vd & op0006=0x2f unimpl # vamomaxuei16.v 31..27=0x1c wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamomaxuei16.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamomaxuei16.v vd, (rs1), vs2, vs3^ vm is op2731=0x1c & wd=0x1 & vm & vs2 & rs1 & op1214=0x5 & vs3 & vd & op0006=0x2f unimpl # vamomaxuei16.v 31..27=0x1c wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamomaxuei16.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamomaxuei16.v zero, (rs1), vs2, vs3^ vm is op2731=0x1c & wd=0x0 & vm & vs2 & rs1 & op1214=0x5 & zero & vs3 & op0006=0x2f unimpl # vamomaxuei32.v 31..27=0x1c wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamomaxuei32.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamomaxuei32.v vd, (rs1), vs2, vs3^ vm is op2731=0x1c & wd=0x1 & vm & vs2 & rs1 & op1214=0x6 & vs3 & vd & op0006=0x2f unimpl # vamomaxuei32.v 31..27=0x1c wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamomaxuei32.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamomaxuei32.v zero, (rs1), vs2, vs3^ vm is op2731=0x1c & wd=0x0 & vm & vs2 & rs1 & op1214=0x6 & zero & vs3 & op0006=0x2f unimpl # vamomaxuei64.v 31..27=0x1c wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamomaxuei64.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamomaxuei64.v vd, (rs1), vs2, vs3^ vm is op2731=0x1c & wd=0x1 & vm & vs2 & rs1 & op1214=0x7 & vs3 & vd & op0006=0x2f unimpl # vamomaxuei64.v 31..27=0x1c wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamomaxuei64.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamomaxuei64.v zero, (rs1), vs2, vs3^ vm is op2731=0x1c & wd=0x0 & vm & vs2 & rs1 & op1214=0x7 & zero & vs3 & op0006=0x2f unimpl # vamomaxuei8.v 31..27=0x1c wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamomaxuei8.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamomaxuei8.v vd, (rs1), vs2, vs3^ vm is op2731=0x1c & wd=0x1 & vm & vs2 & rs1 & op1214=0x0 & vs3 & vd & op0006=0x2f unimpl # vamomaxuei8.v 31..27=0x1c wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamomaxuei8.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamomaxuei8.v zero, (rs1), vs2, vs3^ vm is op2731=0x1c & wd=0x0 & vm & vs2 & rs1 & op1214=0x0 & zero & vs3 & op0006=0x2f unimpl # vamominei16.v 31..27=0x10 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamominei16.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamominei16.v vd, (rs1), vs2, vs3^ vm is op2731=0x10 & wd=0x1 & vm & vs2 & rs1 & op1214=0x5 & vs3 & vd & op0006=0x2f unimpl # vamominei16.v 31..27=0x10 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamominei16.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamominei16.v zero, (rs1), vs2, vs3^ vm is op2731=0x10 & wd=0x0 & vm & vs2 & rs1 & op1214=0x5 & zero & vs3 & op0006=0x2f unimpl # vamominei32.v 31..27=0x10 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamominei32.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamominei32.v vd, (rs1), vs2, vs3^ vm is op2731=0x10 & wd=0x1 & vm & vs2 & rs1 & op1214=0x6 & vs3 & vd & op0006=0x2f unimpl # vamominei32.v 31..27=0x10 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamominei32.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamominei32.v zero, (rs1), vs2, vs3^ vm is op2731=0x10 & wd=0x0 & vm & vs2 & rs1 & op1214=0x6 & zero & vs3 & op0006=0x2f unimpl # vamominei64.v 31..27=0x10 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamominei64.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamominei64.v vd, (rs1), vs2, vs3^ vm is op2731=0x10 & wd=0x1 & vm & vs2 & rs1 & op1214=0x7 & vs3 & vd & op0006=0x2f unimpl # vamominei64.v 31..27=0x10 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamominei64.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamominei64.v zero, (rs1), vs2, vs3^ vm is op2731=0x10 & wd=0x0 & vm & vs2 & rs1 & op1214=0x7 & zero & vs3 & op0006=0x2f unimpl # vamominei8.v 31..27=0x10 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamominei8.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamominei8.v vd, (rs1), vs2, vs3^ vm is op2731=0x10 & wd=0x1 & vm & vs2 & rs1 & op1214=0x0 & vs3 & vd & op0006=0x2f unimpl # vamominei8.v 31..27=0x10 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamominei8.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamominei8.v zero, (rs1), vs2, vs3^ vm is op2731=0x10 & wd=0x0 & vm & vs2 & rs1 & op1214=0x0 & zero & vs3 & op0006=0x2f unimpl # vamominuei16.v 31..27=0x18 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamominuei16.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamominuei16.v vd, (rs1), vs2, vs3^ vm is op2731=0x18 & wd=0x1 & vm & vs2 & rs1 & op1214=0x5 & vs3 & vd & op0006=0x2f unimpl # vamominuei16.v 31..27=0x18 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamominuei16.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamominuei16.v zero, (rs1), vs2, vs3^ vm is op2731=0x18 & wd=0x0 & vm & vs2 & rs1 & op1214=0x5 & zero & vs3 & op0006=0x2f unimpl # vamominuei32.v 31..27=0x18 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamominuei32.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamominuei32.v vd, (rs1), vs2, vs3^ vm is op2731=0x18 & wd=0x1 & vm & vs2 & rs1 & op1214=0x6 & vs3 & vd & op0006=0x2f unimpl # vamominuei32.v 31..27=0x18 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamominuei32.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamominuei32.v zero, (rs1), vs2, vs3^ vm is op2731=0x18 & wd=0x0 & vm & vs2 & rs1 & op1214=0x6 & zero & vs3 & op0006=0x2f unimpl # vamominuei64.v 31..27=0x18 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamominuei64.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamominuei64.v vd, (rs1), vs2, vs3^ vm is op2731=0x18 & wd=0x1 & vm & vs2 & rs1 & op1214=0x7 & vs3 & vd & op0006=0x2f unimpl # vamominuei64.v 31..27=0x18 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamominuei64.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamominuei64.v zero, (rs1), vs2, vs3^ vm is op2731=0x18 & wd=0x0 & vm & vs2 & rs1 & op1214=0x7 & zero & vs3 & op0006=0x2f unimpl # vamominuei8.v 31..27=0x18 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamominuei8.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamominuei8.v vd, (rs1), vs2, vs3^ vm is op2731=0x18 & wd=0x1 & vm & vs2 & rs1 & op1214=0x0 & vs3 & vd & op0006=0x2f unimpl # vamominuei8.v 31..27=0x18 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamominuei8.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamominuei8.v zero, (rs1), vs2, vs3^ vm is op2731=0x18 & wd=0x0 & vm & vs2 & rs1 & op1214=0x0 & zero & vs3 & op0006=0x2f unimpl # vamoorei16.v 31..27=0x08 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamoorei16.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoorei16.v vd, (rs1), vs2, vs3^ vm is op2731=0x8 & wd=0x1 & vm & vs2 & rs1 & op1214=0x5 & vs3 & vd & op0006=0x2f unimpl # vamoorei16.v 31..27=0x08 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamoorei16.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoorei16.v zero, (rs1), vs2, vs3^ vm is op2731=0x8 & wd=0x0 & vm & vs2 & rs1 & op1214=0x5 & zero & vs3 & op0006=0x2f unimpl # vamoorei32.v 31..27=0x08 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamoorei32.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoorei32.v vd, (rs1), vs2, vs3^ vm is op2731=0x8 & wd=0x1 & vm & vs2 & rs1 & op1214=0x6 & vs3 & vd & op0006=0x2f unimpl # vamoorei32.v 31..27=0x08 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamoorei32.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoorei32.v zero, (rs1), vs2, vs3^ vm is op2731=0x8 & wd=0x0 & vm & vs2 & rs1 & op1214=0x6 & zero & vs3 & op0006=0x2f unimpl # vamoorei64.v 31..27=0x08 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamoorei64.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoorei64.v vd, (rs1), vs2, vs3^ vm is op2731=0x8 & wd=0x1 & vm & vs2 & rs1 & op1214=0x7 & vs3 & vd & op0006=0x2f unimpl # vamoorei64.v 31..27=0x08 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamoorei64.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoorei64.v zero, (rs1), vs2, vs3^ vm is op2731=0x8 & wd=0x0 & vm & vs2 & rs1 & op1214=0x7 & zero & vs3 & op0006=0x2f unimpl # vamoorei8.v 31..27=0x08 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamoorei8.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoorei8.v vd, (rs1), vs2, vs3^ vm is op2731=0x8 & wd=0x1 & vm & vs2 & rs1 & op1214=0x0 & vs3 & vd & op0006=0x2f unimpl # vamoorei8.v 31..27=0x08 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamoorei8.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoorei8.v zero, (rs1), vs2, vs3^ vm is op2731=0x8 & wd=0x0 & vm & vs2 & rs1 & op1214=0x0 & zero & vs3 & op0006=0x2f unimpl # vamoswapei16.v 31..27=0x01 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamoswapei16.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoswapei16.v vd, (rs1), vs2, vs3^ vm is op2731=0x1 & wd=0x1 & vm & vs2 & rs1 & op1214=0x5 & vs3 & vd & op0006=0x2f unimpl # vamoswapei16.v 31..27=0x01 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamoswapei16.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoswapei16.v zero, (rs1), vs2, vs3^ vm is op2731=0x1 & wd=0x0 & vm & vs2 & rs1 & op1214=0x5 & zero & vs3 & op0006=0x2f unimpl # vamoswapei32.v 31..27=0x01 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamoswapei32.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoswapei32.v vd, (rs1), vs2, vs3^ vm is op2731=0x1 & wd=0x1 & vm & vs2 & rs1 & op1214=0x6 & vs3 & vd & op0006=0x2f unimpl # vamoswapei32.v 31..27=0x01 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamoswapei32.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoswapei32.v zero, (rs1), vs2, vs3^ vm is op2731=0x1 & wd=0x0 & vm & vs2 & rs1 & op1214=0x6 & zero & vs3 & op0006=0x2f unimpl # vamoswapei64.v 31..27=0x01 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamoswapei64.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoswapei64.v vd, (rs1), vs2, vs3^ vm is op2731=0x1 & wd=0x1 & vm & vs2 & rs1 & op1214=0x7 & vs3 & vd & op0006=0x2f unimpl # vamoswapei64.v 31..27=0x01 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamoswapei64.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoswapei64.v zero, (rs1), vs2, vs3^ vm is op2731=0x1 & wd=0x0 & vm & vs2 & rs1 & op1214=0x7 & zero & vs3 & op0006=0x2f unimpl # vamoswapei8.v 31..27=0x01 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamoswapei8.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoswapei8.v vd, (rs1), vs2, vs3^ vm is op2731=0x1 & wd=0x1 & vm & vs2 & rs1 & op1214=0x0 & vs3 & vd & op0006=0x2f unimpl # vamoswapei8.v 31..27=0x01 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamoswapei8.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoswapei8.v zero, (rs1), vs2, vs3^ vm is op2731=0x1 & wd=0x0 & vm & vs2 & rs1 & op1214=0x0 & zero & vs3 & op0006=0x2f unimpl # vamoxorei16.v 31..27=0x04 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamoxorei16.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoxorei16.v vd, (rs1), vs2, vs3^ vm is op2731=0x4 & wd=0x1 & vm & vs2 & rs1 & op1214=0x5 & vs3 & vd & op0006=0x2f unimpl # vamoxorei16.v 31..27=0x04 wd vm vs2 rs1 14..12=0x5 vd 6..0=0x2f # vamoxorei16.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoxorei16.v zero, (rs1), vs2, vs3^ vm is op2731=0x4 & wd=0x0 & vm & vs2 & rs1 & op1214=0x5 & zero & vs3 & op0006=0x2f unimpl # vamoxorei32.v 31..27=0x04 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamoxorei32.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoxorei32.v vd, (rs1), vs2, vs3^ vm is op2731=0x4 & wd=0x1 & vm & vs2 & rs1 & op1214=0x6 & vs3 & vd & op0006=0x2f unimpl # vamoxorei32.v 31..27=0x04 wd vm vs2 rs1 14..12=0x6 vd 6..0=0x2f # vamoxorei32.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoxorei32.v zero, (rs1), vs2, vs3^ vm is op2731=0x4 & wd=0x0 & vm & vs2 & rs1 & op1214=0x6 & zero & vs3 & op0006=0x2f unimpl # vamoxorei64.v 31..27=0x04 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamoxorei64.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoxorei64.v vd, (rs1), vs2, vs3^ vm is op2731=0x4 & wd=0x1 & vm & vs2 & rs1 & op1214=0x7 & vs3 & vd & op0006=0x2f unimpl # vamoxorei64.v 31..27=0x04 wd vm vs2 rs1 14..12=0x7 vd 6..0=0x2f # vamoxorei64.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoxorei64.v zero, (rs1), vs2, vs3^ vm is op2731=0x4 & wd=0x0 & vm & vs2 & rs1 & op1214=0x7 & zero & vs3 & op0006=0x2f unimpl # vamoxorei8.v 31..27=0x04 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamoxorei8.v vd, (rs1), vs2, vs3, vm # Write original value to register, wd=1 :vamoxorei8.v vd, (rs1), vs2, vs3^ vm is op2731=0x4 & wd=0x1 & vm & vs2 & rs1 & op1214=0x0 & vs3 & vd & op0006=0x2f unimpl # vamoxorei8.v 31..27=0x04 wd vm vs2 rs1 14..12=0x0 vd 6..0=0x2f # vamoxorei8.v zero, (rs1), vs2, vs3, vm # Do not write original value to register, wd=0 :vamoxorei8.v zero, (rs1), vs2, vs3^ vm is op2731=0x4 & wd=0x0 & vm & vs2 & rs1 & op1214=0x0 & zero & vs3 & vd & op0006=0x2f unimpl # vand.vi 31..26=0x09 vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vand.vi vd, vs2, simm5, vm # vector-immediate :vand.vi vd, vs2, simm5^ vm is op2631=0x9 & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vand.vv 31..26=0x09 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vand.vv vd, vs2, vs1, vm # Vector-vector :vand.vv vd, vs2, vs1^ vm is op2631=0x9 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vand.vx 31..26=0x09 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vand.vx vd, vs2, rs1, vm # vector-scalar :vand.vx vd, vs2, rs1^ vm is op2631=0x9 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vasub.vv 31..26=0x0b vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vasub.vv vd, vs2, vs1, vm # roundoff_signed(vs2[i] - vs1[i], 1) :vasub.vv vd, vs2, vs1^ vm is op2631=0xb & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vasub.vx 31..26=0x0b vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vasub.vx vd, vs2, rs1, vm # roundoff_signed(vs2[i] - x[rs1], 1) :vasub.vx vd, vs2, rs1^ vm is op2631=0xb & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vasubu.vv 31..26=0x0a vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vasubu.vv vd, vs2, vs1, vm # roundoff_unsigned(vs2[i] - vs1[i], 1) :vasubu.vv vd, vs2, vs1^ vm is op2631=0xa & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vasubu.vx 31..26=0x0a vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vasubu.vx vd, vs2, rs1, vm # roundoff_unsigned(vs2[i] - x[rs1], 1) :vasubu.vx vd, vs2, rs1^ vm is op2631=0xa & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vcompress.vm 31..26=0x17 25=1 vs2 vs1 14..12=0x2 vd 6..0=0x57 # vcompress.vm vd, vs2, vs1 # Compress into vd elements of vs2 where vs1 is enabled :vcompress.vm vd, vs2, vs1 is op2631=0x17 & op2525=0x1 & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vdiv.vv 31..26=0x21 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vdiv.vv vd, vs2, vs1, vm # Vector-vector :vdiv.vv vd, vs2, vs1^ vm is op2631=0x21 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vdiv.vx 31..26=0x21 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vdiv.vx vd, vs2, rs1, vm # vector-scalar :vdiv.vx vd, vs2, rs1^ vm is op2631=0x21 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vdivu.vv 31..26=0x20 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vdivu.vv vd, vs2, vs1, vm # Vector-vector :vdivu.vv vd, vs2, vs1^ vm is op2631=0x20 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vdivu.vx 31..26=0x20 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vdivu.vx vd, vs2, rs1, vm # vector-scalar :vdivu.vx vd, vs2, rs1^ vm is op2631=0x20 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vdot.vv 31..26=0x39 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vdot.vv vd, vs2, vs1, vm # Vector-vector :vdot.vv vd, vs2, vs1^ vm is op2631=0x39 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vdotu.vv 31..26=0x38 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vdotu.vv vd, vs2, vs1, vm # Vector-vector :vdotu.vv vd, vs2, vs1^ vm is op2631=0x38 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vfadd.vf 31..26=0x00 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfadd.vf vd, vs2, rs1, vm # vector-scalar :vfadd.vf vd, vs2, rs1^ vm is op2631=0x0 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfadd.vv 31..26=0x00 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfadd.vv vd, vs2, vs1, vm # Vector-vector :vfadd.vv vd, vs2, vs1^ vm is op2631=0x0 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfclass.v 31..26=0x13 vm vs2 19..15=0x10 14..12=0x1 vd 6..0=0x57 # vfclass.v vd, vs2, vm # Vector-vector :vfclass.v vd, vs2^ vm is op2631=0x13 & vm & vs2 & op1519=0x10 & op1214=0x1 & vd & op0006=0x57 unimpl # vfcvt.f.x.v 31..26=0x12 vm vs2 19..15=0x03 14..12=0x1 vd 6..0=0x57 # vfcvt.f.x.v vd, vs2, vm # Convert signed integer to float. :vfcvt.f.x.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x3 & op1214=0x1 & vd & op0006=0x57 unimpl # vfcvt.f.xu.v 31..26=0x12 vm vs2 19..15=0x02 14..12=0x1 vd 6..0=0x57 # vfcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to float. :vfcvt.f.xu.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x2 & op1214=0x1 & vd & op0006=0x57 unimpl # vfcvt.rtz.x.f.v 31..26=0x12 vm vs2 19..15=0x07 14..12=0x1 vd 6..0=0x57 # vfcvt.rtz.x.f.v vd, vs2, vm # Convert float to signed integer, truncating. :vfcvt.rtz.x.f.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x7 & op1214=0x1 & vd & op0006=0x57 unimpl # vfcvt.rtz.xu.f.v 31..26=0x12 vm vs2 19..15=0x06 14..12=0x1 vd 6..0=0x57 # vfcvt.rtz.xu.f.v vd, vs2, vm # Convert float to unsigned integer, truncating. :vfcvt.rtz.xu.f.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x6 & op1214=0x1 & vd & op0006=0x57 unimpl # vfcvt.x.f.v 31..26=0x12 vm vs2 19..15=0x01 14..12=0x1 vd 6..0=0x57 # vfcvt.x.f.v vd, vs2, vm # Convert float to signed integer. :vfcvt.x.f.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfcvt.xu.f.v 31..26=0x12 vm vs2 19..15=0x00 14..12=0x1 vd 6..0=0x57 # vfcvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. :vfcvt.xu.f.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x0 & op1214=0x1 & vd & op0006=0x57 unimpl # vfdiv.vf 31..26=0x20 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfdiv.vf vd, vs2, rs1, vm # vector-scalar :vfdiv.vf vd, vs2, rs1^ vm is op2631=0x20 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfdiv.vv 31..26=0x20 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfdiv.vv vd, vs2, vs1, vm # Vector-vector :vfdiv.vv vd, vs2, vs1^ vm is op2631=0x20 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfdot.vv 31..26=0x39 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfdot.vv vd, vs2, vs1, vm # Vector-vector :vfdot.vv vd, vs2, vs1^ vm is op2631=0x39 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfirst.m 31..26=0x10 vm vs2 19..15=0x11 14..12=0x2 rd 6..0=0x57 # vfirst.m rd, vs2, vm :vfirst.m rd, vs2^ vm is op2631=0x10 & vm & vs2 & op1519=0x11 & op1214=0x2 & rd & op0006=0x57 unimpl # vfmacc.vf 31..26=0x2c vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfmacc.vf vd, rs1, vs2, vm # vd[i] = +(f[rs1] * vs2[i]) + vd[i] :vfmacc.vf vd, rs1, vs2^ vm is op2631=0x2c & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfmacc.vv 31..26=0x2c vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfmacc.vv vd, vs1, vs2, vm # vd[i] = +(vs1[i] * vs2[i]) + vd[i] :vfmacc.vv vd, vs1, vs2^ vm is op2631=0x2c & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfmadd.vf 31..26=0x28 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfmadd.vf vd, rs1, vs2, vm # vd[i] = +(f[rs1] * vd[i]) + vs2[i] :vfmadd.vf vd, rs1, vs2^ vm is op2631=0x28 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfmadd.vv 31..26=0x28 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfmadd.vv vd, vs1, vs2, vm # vd[i] = +(vs1[i] * vd[i]) + vs2[i] :vfmadd.vv vd, vs1, vs2^ vm is op2631=0x28 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfmax.vf 31..26=0x06 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfmax.vf vd, vs2, rs1, vm # vector-scalar :vfmax.vf vd, vs2, rs1^ vm is op2631=0x6 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfmax.vv 31..26=0x06 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfmax.vv vd, vs2, vs1, vm # Vector-vector :vfmax.vv vd, vs2, vs1^ vm is op2631=0x6 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfmerge.vfm 31..26=0x17 25=0 vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfmerge.vfm vd, vs2, rs1, v0 # vd[i] = v0.mask[i] ? f[rs1] : vs2[i] :vfmerge.vfm vd, vs2, rs1, v0 is op2631=0x17 & op2525=0x0 & vs2 & rs1 & op1214=0x5 & v0 & vd & op0006=0x57 unimpl # vfmin.vf 31..26=0x04 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfmin.vf vd, vs2, rs1, vm # vector-scalar :vfmin.vf vd, vs2, rs1^ vm is op2631=0x4 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfmin.vv 31..26=0x04 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfmin.vv vd, vs2, vs1, vm # Vector-vector :vfmin.vv vd, vs2, vs1^ vm is op2631=0x4 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfmsac.vf 31..26=0x2e vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfmsac.vf vd, rs1, vs2, vm # vd[i] = +(f[rs1] * vs2[i]) - vd[i] :vfmsac.vf vd, rs1, vs2^ vm is op2631=0x2e & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfmsac.vv 31..26=0x2e vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfmsac.vv vd, vs1, vs2, vm # vd[i] = +(vs1[i] * vs2[i]) - vd[i] :vfmsac.vv vd, vs1, vs2^ vm is op2631=0x2e & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfmsub.vf 31..26=0x2a vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfmsub.vf vd, rs1, vs2, vm # vd[i] = +(f[rs1] * vd[i]) - vs2[i] :vfmsub.vf vd, rs1, vs2^ vm is op2631=0x2a & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfmsub.vv 31..26=0x2a vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfmsub.vv vd, vs1, vs2, vm # vd[i] = +(vs1[i] * vd[i]) - vs2[i] :vfmsub.vv vd, vs1, vs2^ vm is op2631=0x2a & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfmul.vf 31..26=0x24 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfmul.vf vd, vs2, rs1, vm # vector-scalar :vfmul.vf vd, vs2, rs1^ vm is op2631=0x24 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfmul.vv 31..26=0x24 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfmul.vv vd, vs2, vs1, vm # Vector-vector :vfmul.vv vd, vs2, vs1^ vm is op2631=0x24 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfmv.f.s 31..26=0x10 25=1 vs2 19..15=0 14..12=0x1 rd 6..0=0x57 # vfmv.f.s rd, vs2 # f[rd] = vs2[0] (rs1=0) :vfmv.f.s rd, vs2 is op2631=0x10 & op2525=0x1 & vs2 & op1519=0x0 & op1214=0x1 & rd & op0006=0x57 unimpl # vfmv.s.f 31..26=0x10 25=1 24..20=0 rs1 14..12=0x5 vd 6..0=0x57 # vfmv.s.f vd, rs1 # vd[0] = f[rs1] (vs2=0) :vfmv.s.f vd, rs1 is op2631=0x10 & op2525=0x1 & op2024=0x0 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfmv.v.f 31..26=0x17 25=1 24..20=0 rs1 14..12=0x5 vd 6..0=0x57 # vfmv.v.f vd, rs1 # vd[i] = f[rs1] :vfmv.v.f vd, rs1 is op2631=0x17 & op2525=0x1 & op2024=0x0 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfncvt.f.f.w 31..26=0x12 vm vs2 19..15=0x14 14..12=0x1 vd 6..0=0x57 # vfncvt.f.f.w vd, vs2, vm # Convert double-width float to single-width float. :vfncvt.f.f.w vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x14 & op1214=0x1 & vd & op0006=0x57 unimpl # vfncvt.f.x.w 31..26=0x12 vm vs2 19..15=0x13 14..12=0x1 vd 6..0=0x57 # vfncvt.f.x.w vd, vs2, vm # Convert double-width signed integer to float. :vfncvt.f.x.w vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x13 & op1214=0x1 & vd & op0006=0x57 unimpl # vfncvt.f.xu.w 31..26=0x12 vm vs2 19..15=0x12 14..12=0x1 vd 6..0=0x57 # vfncvt.f.xu.w vd, vs2, vm # Convert double-width unsigned integer to float. :vfncvt.f.xu.w vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x12 & op1214=0x1 & vd & op0006=0x57 unimpl # vfncvt.rod.f.f.w 31..26=0x12 vm vs2 19..15=0x15 14..12=0x1 vd 6..0=0x57 # vfncvt.rod.f.f.w vd, vs2, vm # Convert double-width float to single-width float, :vfncvt.rod.f.f.w vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x15 & op1214=0x1 & vd & op0006=0x57 unimpl # vfncvt.rtz.x.f.w 31..26=0x12 vm vs2 19..15=0x17 14..12=0x1 vd 6..0=0x57 # vfncvt.rtz.x.f.w vd, vs2, vm # Convert double-width float to signed integer, truncating. :vfncvt.rtz.x.f.w vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x17 & op1214=0x1 & vd & op0006=0x57 unimpl # vfncvt.rtz.xu.f.w 31..26=0x12 vm vs2 19..15=0x16 14..12=0x1 vd 6..0=0x57 # vfncvt.rtz.xu.f.w vd, vs2, vm # Convert double-width float to unsigned integer, truncating. :vfncvt.rtz.xu.f.w vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x16 & op1214=0x1 & vd & op0006=0x57 unimpl # vfncvt.x.f.w 31..26=0x12 vm vs2 19..15=0x11 14..12=0x1 vd 6..0=0x57 # vfncvt.x.f.w vd, vs2, vm # Convert double-width float to signed integer. :vfncvt.x.f.w vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x11 & op1214=0x1 & vd & op0006=0x57 unimpl # vfncvt.xu.f.w 31..26=0x12 vm vs2 19..15=0x10 14..12=0x1 vd 6..0=0x57 # vfncvt.xu.f.w vd, vs2, vm # Convert double-width float to unsigned integer. :vfncvt.xu.f.w vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x10 & op1214=0x1 & vd & op0006=0x57 unimpl # vfnmacc.vf 31..26=0x2d vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfnmacc.vf vd, rs1, vs2, vm # vd[i] = -(f[rs1] * vs2[i]) - vd[i] :vfnmacc.vf vd, rs1, vs2^ vm is op2631=0x2d & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfnmacc.vv 31..26=0x2d vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfnmacc.vv vd, vs1, vs2, vm # vd[i] = -(vs1[i] * vs2[i]) - vd[i] :vfnmacc.vv vd, vs1, vs2^ vm is op2631=0x2d & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfnmadd.vf 31..26=0x29 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfnmadd.vf vd, rs1, vs2, vm # vd[i] = -(f[rs1] * vd[i]) - vs2[i] :vfnmadd.vf vd, rs1, vs2^ vm is op2631=0x29 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfnmadd.vv 31..26=0x29 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfnmadd.vv vd, vs1, vs2, vm # vd[i] = -(vs1[i] * vd[i]) - vs2[i] :vfnmadd.vv vd, vs1, vs2^ vm is op2631=0x29 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfnmsac.vf 31..26=0x2f vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfnmsac.vf vd, rs1, vs2, vm # vd[i] = -(f[rs1] * vs2[i]) + vd[i] :vfnmsac.vf vd, rs1, vs2^ vm is op2631=0x2f & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfnmsac.vv 31..26=0x2f vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfnmsac.vv vd, vs1, vs2, vm # vd[i] = -(vs1[i] * vs2[i]) + vd[i] :vfnmsac.vv vd, vs1, vs2^ vm is op2631=0x2f & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfnmsub.vf 31..26=0x2b vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfnmsub.vf vd, rs1, vs2, vm # vd[i] = -(f[rs1] * vd[i]) + vs2[i] :vfnmsub.vf vd, rs1, vs2^ vm is op2631=0x2b & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfnmsub.vv 31..26=0x2b vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfnmsub.vv vd, vs1, vs2, vm # vd[i] = -(vs1[i] * vd[i]) + vs2[i] :vfnmsub.vv vd, vs1, vs2^ vm is op2631=0x2b & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfrdiv.vf 31..26=0x21 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfrdiv.vf vd, vs2, rs1, vm # scalar-vector, vd[i] = f[rs1]/vs2[i] :vfrdiv.vf vd, vs2, rs1^ vm is op2631=0x21 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfredmax.vs 31..26=0x07 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfredmax.vs vd, vs2, vs1, vm # Maximum value :vfredmax.vs vd, vs2, vs1^ vm is op2631=0x7 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfredmin.vs 31..26=0x05 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfredmin.vs vd, vs2, vs1, vm # Minimum value :vfredmin.vs vd, vs2, vs1^ vm is op2631=0x5 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfredosum.vs 31..26=0x03 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfredosum.vs vd, vs2, vs1, vm # Ordered sum :vfredosum.vs vd, vs2, vs1^ vm is op2631=0x3 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfredsum.vs 31..26=0x01 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfredsum.vs vd, vs2, vs1, vm # Unordered sum :vfredsum.vs vd, vs2, vs1^ vm is op2631=0x1 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfrsub.vf 31..26=0x27 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfrsub.vf vd, vs2, rs1, vm # Scalar-vector vd[i] = f[rs1] - vs2[i] :vfrsub.vf vd, vs2, rs1^ vm is op2631=0x27 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfsgnj.vf 31..26=0x08 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfsgnj.vf vd, vs2, rs1, vm # vector-scalar :vfsgnj.vf vd, vs2, rs1^ vm is op2631=0x8 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfsgnj.vv 31..26=0x08 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfsgnj.vv vd, vs2, vs1, vm # Vector-vector :vfsgnj.vv vd, vs2, vs1^ vm is op2631=0x8 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfsgnjn.vf 31..26=0x09 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfsgnjn.vf vd, vs2, rs1, vm # vector-scalar :vfsgnjn.vf vd, vs2, rs1^ vm is op2631=0x9 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfsgnjn.vv 31..26=0x09 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfsgnjn.vv vd, vs2, vs1, vm # Vector-vector :vfsgnjn.vv vd, vs2, vs1^ vm is op2631=0x9 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfsgnjx.vf 31..26=0x0a vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfsgnjx.vf vd, vs2, rs1, vm # vector-scalar :vfsgnjx.vf vd, vs2, rs1^ vm is op2631=0xa & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfsgnjx.vv 31..26=0x0a vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfsgnjx.vv vd, vs2, vs1, vm # Vector-vector :vfsgnjx.vv vd, vs2, vs1^ vm is op2631=0xa & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfslide1down.vf 31..26=0x0f vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfslide1down.vf vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=f[rs1] :vfslide1down.vf vd, vs2, rs1^ vm is op2631=0xf & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfslide1up.vf 31..26=0x0e vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfslide1up.vf vd, vs2, rs1, vm # vd[0]=f[rs1], vd[i+1] = vs2[i] :vfslide1up.vf vd, vs2, rs1^ vm is op2631=0xe & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfsqrt.v 31..26=0x13 vm vs2 19..15=0x00 14..12=0x1 vd 6..0=0x57 # vfsqrt.v vd, vs2, vm # Vector-vector square root :vfsqrt.v vd, vs2^ vm is op2631=0x13 & vm & vs2 & op1519=0x0 & op1214=0x1 & vd & op0006=0x57 unimpl # vfsub.vf 31..26=0x02 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfsub.vf vd, vs2, rs1, vm # Vector-scalar vd[i] = vs2[i] - f[rs1] :vfsub.vf vd, vs2, rs1^ vm is op2631=0x2 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfsub.vv 31..26=0x02 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfsub.vv vd, vs2, vs1, vm # Vector-vector :vfsub.vv vd, vs2, vs1^ vm is op2631=0x2 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwadd.vf 31..26=0x30 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfwadd.vf vd, vs2, rs1, vm # vector-scalar :vfwadd.vf vd, vs2, rs1^ vm is op2631=0x30 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfwadd.vv 31..26=0x30 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfwadd.vv vd, vs2, vs1, vm # vector-vector :vfwadd.vv vd, vs2, vs1^ vm is op2631=0x30 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwadd.wf 31..26=0x34 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfwadd.wf vd, vs2, rs1, vm # vector-scalar :vfwadd.wf vd, vs2, rs1^ vm is op2631=0x34 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfwadd.wv 31..26=0x34 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfwadd.wv vd, vs2, vs1, vm # vector-vector :vfwadd.wv vd, vs2, vs1^ vm is op2631=0x34 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwcvt.f.f.v 31..26=0x12 vm vs2 19..15=0x0C 14..12=0x1 vd 6..0=0x57 # vfwcvt.f.f.v vd, vs2, vm # Convert single-width float to double-width float. :vfwcvt.f.f.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0xc & op1214=0x1 & vd & op0006=0x57 unimpl # vfwcvt.f.x.v 31..26=0x12 vm vs2 19..15=0x0B 14..12=0x1 vd 6..0=0x57 # vfwcvt.f.x.v vd, vs2, vm # Convert signed integer to double-width float. :vfwcvt.f.x.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0xb & op1214=0x1 & vd & op0006=0x57 unimpl # vfwcvt.f.xu.v 31..26=0x12 vm vs2 19..15=0x0A 14..12=0x1 vd 6..0=0x57 # vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width float. :vfwcvt.f.xu.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0xa & op1214=0x1 & vd & op0006=0x57 unimpl # vfwcvt.rtz.x.f.v 31..26=0x12 vm vs2 19..15=0x0F 14..12=0x1 vd 6..0=0x57 # vfwcvt.rtz.x.f.v vd, vs2, vm # Convert float to double-width signed integer, truncating. :vfwcvt.rtz.x.f.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0xf & op1214=0x1 & vd & op0006=0x57 unimpl # vfwcvt.rtz.xu.f.v 31..26=0x12 vm vs2 19..15=0x0E 14..12=0x1 vd 6..0=0x57 # vfwcvt.rtz.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer, truncating. :vfwcvt.rtz.xu.f.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0xe & op1214=0x1 & vd & op0006=0x57 unimpl # vfwcvt.x.f.v 31..26=0x12 vm vs2 19..15=0x09 14..12=0x1 vd 6..0=0x57 # vfwcvt.x.f.v vd, vs2, vm # Convert float to double-width signed integer. :vfwcvt.x.f.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x9 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwcvt.xu.f.v 31..26=0x12 vm vs2 19..15=0x08 14..12=0x1 vd 6..0=0x57 # vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer. :vfwcvt.xu.f.v vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x8 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwmacc.vf 31..26=0x3c vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfwmacc.vf vd, rs1, vs2, vm # vd[i] = +(f[rs1] * vs2[i]) + vd[i] :vfwmacc.vf vd, rs1, vs2^ vm is op2631=0x3c & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfwmacc.vv 31..26=0x3c vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfwmacc.vv vd, vs1, vs2, vm # vd[i] = +(vs1[i] * vs2[i]) + vd[i] :vfwmacc.vv vd, vs1, vs2^ vm is op2631=0x3c & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwmsac.vf 31..26=0x3e vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfwmsac.vf vd, rs1, vs2, vm # vd[i] = +(f[rs1] * vs2[i]) - vd[i] :vfwmsac.vf vd, rs1, vs2^ vm is op2631=0x3e & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfwmsac.vv 31..26=0x3e vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfwmsac.vv vd, vs1, vs2, vm # vd[i] = +(vs1[i] * vs2[i]) - vd[i] :vfwmsac.vv vd, vs1, vs2^ vm is op2631=0x3e & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwmul.vf 31..26=0x38 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfwmul.vf vd, vs2, rs1, vm # vector-scalar :vfwmul.vf vd, vs2, rs1^ vm is op2631=0x38 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfwmul.vv 31..26=0x38 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfwmul.vv vd, vs2, vs1, vm # vector-vector :vfwmul.vv vd, vs2, vs1^ vm is op2631=0x38 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwnmacc.vf 31..26=0x3d vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfwnmacc.vf vd, rs1, vs2, vm # vd[i] = -(f[rs1] * vs2[i]) - vd[i] :vfwnmacc.vf vd, rs1, vs2^ vm is op2631=0x3d & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfwnmacc.vv 31..26=0x3d vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfwnmacc.vv vd, vs1, vs2, vm # vd[i] = -(vs1[i] * vs2[i]) - vd[i] :vfwnmacc.vv vd, vs1, vs2^ vm is op2631=0x3d & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwnmsac.vf 31..26=0x3f vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfwnmsac.vf vd, rs1, vs2, vm # vd[i] = -(f[rs1] * vs2[i]) + vd[i] :vfwnmsac.vf vd, rs1, vs2^ vm is op2631=0x3f & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfwnmsac.vv 31..26=0x3f vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfwnmsac.vv vd, vs1, vs2, vm # vd[i] = -(vs1[i] * vs2[i]) + vd[i] :vfwnmsac.vv vd, vs1, vs2^ vm is op2631=0x3f & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwredosum.vs 31..26=0x33 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfwredosum.vs vd, vs2, vs1, vm # Ordered sum :vfwredosum.vs vd, vs2, vs1^ vm is op2631=0x33 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwredsum.vs 31..26=0x31 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfwredsum.vs vd, vs2, vs1, vm # Unordered sum :vfwredsum.vs vd, vs2, vs1^ vm is op2631=0x31 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwsub.vf 31..26=0x32 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfwsub.vf vd, vs2, rs1, vm # vector-scalar :vfwsub.vf vd, vs2, rs1^ vm is op2631=0x32 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfwsub.vv 31..26=0x32 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfwsub.vv vd, vs2, vs1, vm # vector-vector :vfwsub.vv vd, vs2, vs1^ vm is op2631=0x32 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vfwsub.wf 31..26=0x36 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vfwsub.wf vd, vs2, rs1, vm # vector-scalar :vfwsub.wf vd, vs2, rs1^ vm is op2631=0x36 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vfwsub.wv 31..26=0x36 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vfwsub.wv vd, vs2, vs1, vm # vector-vector :vfwsub.wv vd, vs2, vs1^ vm is op2631=0x36 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vid.v 31..26=0x14 vm 24..20=0 19..15=0x11 14..12=0x2 vd 6..0=0x57 # vid.v vd, vm # Write element ID to destination. :vid.v vd^ vm is op2631=0x14 & vm & op2024=0x0 & op1519=0x11 & op1214=0x2 & vd & op0006=0x57 unimpl # viota.m 31..26=0x14 vm vs2 19..15=0x10 14..12=0x2 vd 6..0=0x57 # viota.m vd, vs2, vm :viota.m vd, vs2^ vm is op2631=0x14 & vm & vs2 & op1519=0x10 & op1214=0x2 & vd & op0006=0x57 unimpl # vl1re16.v 31..29=0 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x5 vd 6..0=0x07 # vl1re16.v vd, (rs1) :vl1re16.v vd, (rs1) is op2931=0x0 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x5 & vd & op0006=0x7 unimpl # vl1re32.v 31..29=0 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x6 vd 6..0=0x07 # vl1re32.v vd, (rs1) :vl1re32.v vd, (rs1) is op2931=0x0 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x6 & vd & op0006=0x7 unimpl # vl1re64.v 31..29=0 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x7 vd 6..0=0x07 # vl1re64.v vd, (rs1) :vl1re64.v vd, (rs1) is op2931=0x0 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x7 & vd & op0006=0x7 unimpl # vl1re8.v 31..29=0 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x0 vd 6..0=0x07 # vl1re8.v vd, (rs1) :vl1re8.v vd, (rs1) is op2931=0x0 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x0 & vd & op0006=0x7 unimpl # vl2re16.v 31..29=1 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x5 vd 6..0=0x07 # vl2re16.v vd, (rs1) :vl2re16.v vd, (rs1) is op2931=0x1 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x5 & vd & op0006=0x7 unimpl # vl2re32.v 31..29=1 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x6 vd 6..0=0x07 # vl2re32.v vd, (rs1) :vl2re32.v vd, (rs1) is op2931=0x1 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x6 & vd & op0006=0x7 unimpl # vl2re64.v 31..29=1 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x7 vd 6..0=0x07 # vl2re64.v vd, (rs1) :vl2re64.v vd, (rs1) is op2931=0x1 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x7 & vd & op0006=0x7 unimpl # vl2re8.v 31..29=1 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x0 vd 6..0=0x07 # vl2re8.v vd, (rs1) :vl2re8.v vd, (rs1) is op2931=0x1 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x0 & vd & op0006=0x7 unimpl # vl4re16.v 31..29=3 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x5 vd 6..0=0x07 # vl4re16.v vd, (rs1) :vl4re16.v vd, (rs1) is op2931=0x3 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x5 & vd & op0006=0x7 unimpl # vl4re32.v 31..29=3 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x6 vd 6..0=0x07 # vl4re32.v vd, (rs1) :vl4re32.v vd, (rs1) is op2931=0x3 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x6 & vd & op0006=0x7 unimpl # vl4re64.v 31..29=3 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x7 vd 6..0=0x07 # vl4re64.v vd, (rs1) :vl4re64.v vd, (rs1) is op2931=0x3 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x7 & vd & op0006=0x7 unimpl # vl4re8.v 31..29=3 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x0 vd 6..0=0x07 # vl4re8.v vd, (rs1) :vl4re8.v vd, (rs1) is op2931=0x3 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x0 & vd & op0006=0x7 unimpl # vl8re16.v 31..29=7 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x5 vd 6..0=0x07 # vl8re16.v vd, (rs1) :vl8re16.v vd, (rs1) is op2931=0x7 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x5 & vd & op0006=0x7 unimpl # vl8re32.v 31..29=7 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x6 vd 6..0=0x07 # vl8re32.v vd, (rs1) :vl8re32.v vd, (rs1) is op2931=0x7 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x6 & vd & op0006=0x7 unimpl # vl8re64.v 31..29=7 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x7 vd 6..0=0x07 # vl8re64.v vd, (rs1) :vl8re64.v vd, (rs1) is op2931=0x7 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x7 & vd & op0006=0x7 unimpl # vl8re8.v 31..29=7 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x0 vd 6..0=0x07 # vl8re8.v vd, (rs1) :vl8re8.v vd, (rs1) is op2931=0x7 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x0 & vd & op0006=0x7 unimpl # vle1024.v nf 28=1 27..26=0 vm 24..20=0 rs1 14..12=0x7 vd 6..0=0x07 # vle1024.v vd, (rs1), vm # 1024-bit unit-stride load :vle1024.v vd, (rs1)^ vm is nf & op2828=0x1 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x7 & vd & op0006=0x7 unimpl # vle1024ff.v nf 28=1 27..26=0 vm 24..20=0x10 rs1 14..12=0x7 vd 6..0=0x07 # vle1024ff.v vd, (rs1), vm # 1024-bit unit-stride fault-only-first load :vle1024ff.v vd, (rs1)^ vm is nf & op2828=0x1 & op2627=0x0 & vm & op2024=0x10 & rs1 & op1214=0x7 & vd & op0006=0x7 unimpl # vle128.v nf 28=1 27..26=0 vm 24..20=0 rs1 14..12=0x0 vd 6..0=0x07 # vle128.v vd, (rs1), vm # 128-bit unit-stride load :vle128.v vd, (rs1)^ vm is nf & op2828=0x1 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x0 & vd & op0006=0x7 unimpl # vle128ff.v nf 28=1 27..26=0 vm 24..20=0x10 rs1 14..12=0x0 vd 6..0=0x07 # vle128ff.v vd, (rs1), vm # 128-bit unit-stride fault-only-first load :vle128ff.v vd, (rs1)^ vm is nf & op2828=0x1 & op2627=0x0 & vm & op2024=0x10 & rs1 & op1214=0x0 & vd & op0006=0x7 unimpl # vle16.v nf 28=0 27..26=0 vm 24..20=0 rs1 14..12=0x5 vd 6..0=0x07 # vle16.v vd, (rs1), vm # 16-bit unit-stride load :vle16.v vd, (rs1)^ vm is nf & op2828=0x0 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x5 & vd & op0006=0x7 unimpl # vle16ff.v nf 28=0 27..26=0 vm 24..20=0x10 rs1 14..12=0x5 vd 6..0=0x07 # vle16ff.v vd, (rs1), vm # 16-bit unit-stride fault-only-first load :vle16ff.v vd, (rs1)^ vm is nf & op2828=0x0 & op2627=0x0 & vm & op2024=0x10 & rs1 & op1214=0x5 & vd & op0006=0x7 unimpl # vle256.v nf 28=1 27..26=0 vm 24..20=0 rs1 14..12=0x5 vd 6..0=0x07 # vle256.v vd, (rs1), vm # 256-bit unit-stride load :vle256.v vd, (rs1)^ vm is nf & op2828=0x1 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x5 & vd & op0006=0x7 unimpl # vle256ff.v nf 28=1 27..26=0 vm 24..20=0x10 rs1 14..12=0x5 vd 6..0=0x07 # vle256ff.v vd, (rs1), vm # 256-bit unit-stride fault-only-first load :vle256ff.v vd, (rs1)^ vm is nf & op2828=0x1 & op2627=0x0 & vm & op2024=0x10 & rs1 & op1214=0x5 & vd & op0006=0x7 unimpl # vle32.v nf 28=0 27..26=0 vm 24..20=0 rs1 14..12=0x6 vd 6..0=0x07 # vle32.v vd, (rs1), vm # 32-bit unit-stride load :vle32.v vd, (rs1)^ vm is nf & op2828=0x0 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x6 & vd & op0006=0x7 unimpl # vle32ff.v nf 28=0 27..26=0 vm 24..20=0x10 rs1 14..12=0x6 vd 6..0=0x07 # vle32ff.v vd, (rs1), vm # 32-bit unit-stride fault-only-first load :vle32ff.v vd, (rs1)^ vm is nf & op2828=0x0 & op2627=0x0 & vm & op2024=0x10 & rs1 & op1214=0x6 & vd & op0006=0x7 unimpl # vle512.v nf 28=1 27..26=0 vm 24..20=0 rs1 14..12=0x6 vd 6..0=0x07 # vle512.v vd, (rs1), vm # 512-bit unit-stride load :vle512.v vd, (rs1)^ vm is nf & op2828=0x1 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x6 & vd & op0006=0x7 unimpl # vle512ff.v nf 28=1 27..26=0 vm 24..20=0x10 rs1 14..12=0x6 vd 6..0=0x07 # vle512ff.v vd, (rs1), vm # 512-bit unit-stride fault-only-first load :vle512ff.v vd, (rs1)^ vm is nf & op2828=0x1 & op2627=0x0 & vm & op2024=0x10 & rs1 & op1214=0x6 & vd & op0006=0x7 unimpl # vle64.v nf 28=0 27..26=0 vm 24..20=0 rs1 14..12=0x7 vd 6..0=0x07 # vle64.v vd, (rs1), vm # 64-bit unit-stride load :vle64.v vd, (rs1)^ vm is nf & op2828=0x0 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x7 & vd & op0006=0x7 unimpl # vle64ff.v nf 28=0 27..26=0 vm 24..20=0x10 rs1 14..12=0x7 vd 6..0=0x07 # vle64ff.v vd, (rs1), vm # 64-bit unit-stride fault-only-first load :vle64ff.v vd, (rs1)^ vm is nf & op2828=0x0 & op2627=0x0 & vm & op2024=0x10 & rs1 & op1214=0x7 & vd & op0006=0x7 unimpl # vle8.v nf 28=0 27..26=0 vm 24..20=0 rs1 14..12=0x0 vd 6..0=0x07 # vle8.v vd, (rs1), vm # 8-bit unit-stride load :vle8.v vd, (rs1)^ vm is nf & op2828=0x0 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x0 & vd & op0006=0x7 unimpl # vle8ff.v nf 28=0 27..26=0 vm 24..20=0x10 rs1 14..12=0x0 vd 6..0=0x07 # vle8ff.v vd, (rs1), vm # 8-bit unit-stride fault-only-first load :vle8ff.v vd, (rs1)^ vm is nf & op2828=0x0 & op2627=0x0 & vm & op2024=0x10 & rs1 & op1214=0x0 & vd & op0006=0x7 unimpl # vlse1024.v nf 28=1 27..26=2 vm rs2 rs1 14..12=0x7 vd 6..0=0x07 # vlse1024.v vd, (rs1), rs2, vm # 1024-bit strided load :vlse1024.v vd, (rs1), rs2^ vm is nf & op2828=0x1 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x7 & vd & op0006=0x7 unimpl # vlse128.v nf 28=1 27..26=2 vm rs2 rs1 14..12=0x0 vd 6..0=0x07 # vlse128.v vd, (rs1), rs2, vm # 128-bit strided load :vlse128.v vd, (rs1), rs2^ vm is nf & op2828=0x1 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x0 & vd & op0006=0x7 unimpl # vlse16.v nf 28=0 27..26=2 vm rs2 rs1 14..12=0x5 vd 6..0=0x07 # vlse16.v vd, (rs1), rs2, vm # 16-bit strided load :vlse16.v vd, (rs1), rs2^ vm is nf & op2828=0x0 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x5 & vd & op0006=0x7 unimpl # vlse256.v nf 28=1 27..26=2 vm rs2 rs1 14..12=0x5 vd 6..0=0x07 # vlse256.v vd, (rs1), rs2, vm # 256-bit strided load :vlse256.v vd, (rs1), rs2^ vm is nf & op2828=0x1 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x5 & vd & op0006=0x7 unimpl # vlse32.v nf 28=0 27..26=2 vm rs2 rs1 14..12=0x6 vd 6..0=0x07 # vlse32.v vd, (rs1), rs2, vm # 32-bit strided load :vlse32.v vd, (rs1), rs2^ vm is nf & op2828=0x0 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x6 & vd & op0006=0x7 unimpl # vlse512.v nf 28=1 27..26=2 vm rs2 rs1 14..12=0x6 vd 6..0=0x07 # vlse512.v vd, (rs1), rs2, vm # 512-bit strided load :vlse512.v vd, (rs1), rs2^ vm is nf & op2828=0x1 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x6 & vd & op0006=0x7 unimpl # vlse64.v nf 28=0 27..26=2 vm rs2 rs1 14..12=0x7 vd 6..0=0x07 # vlse64.v vd, (rs1), rs2, vm # 64-bit strided load :vlse64.v vd, (rs1), rs2^ vm is nf & op2828=0x0 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x7 & vd & op0006=0x7 unimpl # vlse8.v nf 28=0 27..26=2 vm rs2 rs1 14..12=0x0 vd 6..0=0x07 # vlse8.v vd, (rs1), rs2, vm # 8-bit strided load :vlse8.v vd, (rs1), rs2^ vm is nf & op2828=0x0 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x0 & vd & op0006=0x7 unimpl # vlxei1024.v nf 28=1 27..26=3 vm vs2 rs1 14..12=0x7 vd 6..0=0x07 # vlxei1024.v vd, (rs1), vs2, vm # 1024-bit indexed load of SEW data :vlxei1024.v vd, (rs1), vs2^ vm is nf & op2828=0x1 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x7 & vd & op0006=0x7 unimpl # vlxei128.v nf 28=1 27..26=3 vm vs2 rs1 14..12=0x0 vd 6..0=0x07 # vlxei128.v vd, (rs1), vs2, vm # 128-bit indexed load of SEW data :vlxei128.v vd, (rs1), vs2^ vm is nf & op2828=0x1 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x0 & vd & op0006=0x7 unimpl # vlxei16.v nf 28=0 27..26=3 vm vs2 rs1 14..12=0x5 vd 6..0=0x07 # vlxei16.v vd, (rs1), vs2, vm # 16-bit indexed load of SEW data :vlxei16.v vd, (rs1), vs2^ vm is nf & op2828=0x0 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x7 unimpl # vlxei256.v nf 28=1 27..26=3 vm vs2 rs1 14..12=0x5 vd 6..0=0x07 # vlxei256.v vd, (rs1), vs2, vm # 256-bit indexed load of SEW data :vlxei256.v vd, (rs1), vs2^ vm is nf & op2828=0x1 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x7 unimpl # vlxei32.v nf 28=0 27..26=3 vm vs2 rs1 14..12=0x6 vd 6..0=0x07 # vlxei32.v vd, (rs1), vs2, vm # 32-bit indexed load of SEW data :vlxei32.v vd, (rs1), vs2^ vm is nf & op2828=0x0 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x7 unimpl # vlxei512.v nf 28=1 27..26=3 vm vs2 rs1 14..12=0x6 vd 6..0=0x07 # vlxei512.v vd, (rs1), vs2, vm # 512-bit indexed load of SEW data :vlxei512.v vd, (rs1), vs2^ vm is nf & op2828=0x1 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x7 unimpl # vlxei64.v nf 28=0 27..26=3 vm vs2 rs1 14..12=0x7 vd 6..0=0x07 # vlxei64.v vd, (rs1), vs2, vm # 64-bit indexed load of SEW data :vlxei64.v vd, (rs1), vs2^ vm is nf & op2828=0x0 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x7 & vd & op0006=0x7 unimpl # vlxei8.v nf 28=0 27..26=3 vm vs2 rs1 14..12=0x0 vd 6..0=0x07 # vlxei8.v vd, (rs1), vs2, vm # 8-bit indexed load of SEW data :vlxei8.v vd, (rs1), vs2^ vm is nf & op2828=0x0 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x0 & vd & op0006=0x7 unimpl # vmacc.vv 31..26=0x2d vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmacc.vv vd, vs1, vs2, vm # vd[i] = +(vs1[i] * vs2[i]) + vd[i] :vmacc.vv vd, vs1, vs2^ vm is op2631=0x2d & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmacc.vx 31..26=0x2d vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vmacc.vx vd, rs1, vs2, vm # vd[i] = +(x[rs1] * vs2[i]) + vd[i] :vmacc.vx vd, rs1, vs2^ vm is op2631=0x2d & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vmadc.vim 31..26=0x11 vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vmadc.vim vd, vs2, simm5, v0 # Vector-immediate :vmadc.vim vd, vs2, simm5, v0 is op2631=0x11 & vm & vs2 & simm5 & op1214=0x3 & v0 & vd & op0006=0x57 unimpl # vmadc.vvm 31..26=0x11 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vmadc.vvm vd, vs2, vs1, v0 # Vector-vector :vmadc.vvm vd, vs2, vs1, v0 is op2631=0x11 & vm & vs2 & vs1 & op1214=0x0 & v0 & vd & op0006=0x57 unimpl # vmadc.vxm 31..26=0x11 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmadc.vxm vd, vs2, rs1, v0 # Vector-scalar :vmadc.vxm vd, vs2, rs1, v0 is op2631=0x11 & vm & vs2 & rs1 & op1214=0x4 & v0 & vd & op0006=0x57 unimpl # vmadd.vv 31..26=0x29 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmadd.vv vd, vs1, vs2, vm # vd[i] = (vs1[i] * vd[i]) + vs2[i] :vmadd.vv vd, vs1, vs2^ vm is op2631=0x29 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmadd.vx 31..26=0x29 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vmadd.vx vd, rs1, vs2, vm # vd[i] = (x[rs1] * vd[i]) + vs2[i] :vmadd.vx vd, rs1, vs2^ vm is op2631=0x29 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vmand.mm 31..26=0x19 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmand.mm vd, vs2, vs1 # vd[i] = vs2.mask[i] && vs1.mask[i] :vmand.mm vd, vs2, vs1 is op2631=0x19 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmandnot.mm 31..26=0x18 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmandnot.mm vd, vs2, vs1 # vd[i] = vs2.mask[i] && !vs1.mask[i] :vmandnot.mm vd, vs2, vs1 is op2631=0x18 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmax.vv 31..26=0x07 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vmax.vv vd, vs2, vs1, vm # Vector-vector :vmax.vv vd, vs2, vs1^ vm is op2631=0x7 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vmax.vx 31..26=0x07 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmax.vx vd, vs2, rs1, vm # vector-scalar :vmax.vx vd, vs2, rs1^ vm is op2631=0x7 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vmaxu.vv 31..26=0x06 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vmaxu.vv vd, vs2, vs1, vm # Vector-vector :vmaxu.vv vd, vs2, vs1^ vm is op2631=0x6 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vmaxu.vx 31..26=0x06 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmaxu.vx vd, vs2, rs1, vm # vector-scalar :vmaxu.vx vd, vs2, rs1^ vm is op2631=0x6 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vmerge.vim 31..26=0x17 25=0 vs2 simm5 14..12=0x3 vd 6..0=0x57 # vmerge.vim vd, vs2, simm5, v0 # vd[i] = v0.mask[i] ? imm : vs2[i] :vmerge.vim vd, vs2, simm5, v0 is op2631=0x17 & op2525=0x0 & vs2 & simm5 & op1214=0x3 & v0 & vd & op0006=0x57 unimpl # vmerge.vvm 31..26=0x17 25=0 vs2 vs1 14..12=0x0 vd 6..0=0x57 # vmerge.vvm vd, vs2, vs1, v0 # vd[i] = v0.mask[i] ? vs1[i] : vs2[i] :vmerge.vvm vd, vs2, vs1, v0 is op2631=0x17 & op2525=0x0 & vs2 & vs1 & op1214=0x0 & v0 & vd & op0006=0x57 unimpl # vmerge.vxm 31..26=0x17 25=0 vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmerge.vxm vd, vs2, rs1, v0 # vd[i] = v0.mask[i] ? x[rs1] : vs2[i] :vmerge.vxm vd, vs2, rs1, v0 is op2631=0x17 & op2525=0x0 & vs2 & rs1 & op1214=0x4 & v0 & vd & op0006=0x57 unimpl # vmfeq.vf 31..26=0x18 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vmfeq.vf vd, vs2, rs1, vm # vector-scalar :vmfeq.vf vd, vs2, rs1^ vm is op2631=0x18 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vmfeq.vv 31..26=0x18 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vmfeq.vv vd, vs2, vs1, vm # Vector-vector :vmfeq.vv vd, vs2, vs1^ vm is op2631=0x18 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vmfge.vf 31..26=0x1f vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vmfge.vf vd, vs2, rs1, vm # vector-scalar :vmfge.vf vd, vs2, rs1^ vm is op2631=0x1f & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vmfgt.vf 31..26=0x1d vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vmfgt.vf vd, vs2, rs1, vm # vector-scalar :vmfgt.vf vd, vs2, rs1^ vm is op2631=0x1d & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vmfle.vf 31..26=0x19 vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vmfle.vf vd, vs2, rs1, vm # vector-scalar :vmfle.vf vd, vs2, rs1^ vm is op2631=0x19 & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vmfle.vv 31..26=0x19 vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vmfle.vv vd, vs2, vs1, vm # Vector-vector :vmfle.vv vd, vs2, vs1^ vm is op2631=0x19 & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vmflt.vf 31..26=0x1b vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vmflt.vf vd, vs2, rs1, vm # vector-scalar :vmflt.vf vd, vs2, rs1^ vm is op2631=0x1b & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vmflt.vv 31..26=0x1b vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vmflt.vv vd, vs2, vs1, vm # Vector-vector :vmflt.vv vd, vs2, vs1^ vm is op2631=0x1b & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vmfne.vf 31..26=0x1c vm vs2 rs1 14..12=0x5 vd 6..0=0x57 # vmfne.vf vd, vs2, rs1, vm # vector-scalar :vmfne.vf vd, vs2, rs1^ vm is op2631=0x1c & vm & vs2 & rs1 & op1214=0x5 & vd & op0006=0x57 unimpl # vmfne.vv 31..26=0x1c vm vs2 vs1 14..12=0x1 vd 6..0=0x57 # vmfne.vv vd, vs2, vs1, vm # Vector-vector :vmfne.vv vd, vs2, vs1^ vm is op2631=0x1c & vm & vs2 & vs1 & op1214=0x1 & vd & op0006=0x57 unimpl # vmin.vv 31..26=0x05 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vmin.vv vd, vs2, vs1, vm # Vector-vector :vmin.vv vd, vs2, vs1^ vm is op2631=0x5 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vmin.vx 31..26=0x05 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmin.vx vd, vs2, rs1, vm # vector-scalar :vmin.vx vd, vs2, rs1^ vm is op2631=0x5 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vminu.vv 31..26=0x04 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vminu.vv vd, vs2, vs1, vm # Vector-vector :vminu.vv vd, vs2, vs1^ vm is op2631=0x4 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vminu.vx 31..26=0x04 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vminu.vx vd, vs2, rs1, vm # vector-scalar :vminu.vx vd, vs2, rs1^ vm is op2631=0x4 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vmnand.mm 31..26=0x1d vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmnand.mm vd, vs2, vs1 # vd[i] = !(vs2.mask[i] && vs1.mask[i]) :vmnand.mm vd, vs2, vs1 is op2631=0x1d & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmnor.mm 31..26=0x1e vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmnor.mm vd, vs2, vs1 # vd[i] = !(vs2.mask[i] || vs1.mask[i]) :vmnor.mm vd, vs2, vs1 is op2631=0x1e & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmor.mm 31..26=0x1a vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmor.mm vd, vs2, vs1 # vd[i] = vs2.mask[i] || vs1.mask[i] :vmor.mm vd, vs2, vs1 is op2631=0x1a & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmornot.mm 31..26=0x1c vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmornot.mm vd, vs2, vs1 # vd[i] = vs2.mask[i] || !vs1.mask[i] :vmornot.mm vd, vs2, vs1 is op2631=0x1c & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmsbc.vvm 31..26=0x13 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vmsbc.vvm vd, vs2, vs1, v0 # Vector-vector :vmsbc.vvm vd, vs2, vs1, v0 is op2631=0x13 & vm & vs2 & vs1 & op1214=0x0 & v0 & vd & op0006=0x57 unimpl # vmsbc.vxm 31..26=0x13 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmsbc.vxm vd, vs2, rs1, v0 # Vector-scalar :vmsbc.vxm vd, vs2, rs1, v0 is op2631=0x13 & vm & vs2 & rs1 & op1214=0x4 & v0 & vd & op0006=0x57 unimpl # vmsbf.m 31..26=0x14 vm vs2 19..15=0x01 14..12=0x2 vd 6..0=0x57 # vmsbf.m vd, vs2, vm :vmsbf.m vd, vs2^ vm is op2631=0x14 & vm & vs2 & op1519=0x1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmseq.vi 31..26=0x18 vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vmseq.vi vd, vs2, simm5, vm # vector-immediate :vmseq.vi vd, vs2, simm5^ vm is op2631=0x18 & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vmseq.vv 31..26=0x18 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vmseq.vv vd, vs2, vs1, vm # Vector-vector :vmseq.vv vd, vs2, vs1^ vm is op2631=0x18 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vmseq.vx 31..26=0x18 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmseq.vx vd, vs2, rs1, vm # vector-scalar :vmseq.vx vd, vs2, rs1^ vm is op2631=0x18 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vmsgt.vi 31..26=0x1f vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vmsgt.vi vd, vs2, simm5, vm # Vector-immediate :vmsgt.vi vd, vs2, simm5^ vm is op2631=0x1f & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vmsgt.vx 31..26=0x1f vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmsgt.vx vd, vs2, rs1, vm # Vector-scalar :vmsgt.vx vd, vs2, rs1^ vm is op2631=0x1f & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vmsgtu.vi 31..26=0x1e vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vmsgtu.vi vd, vs2, simm5, vm # Vector-immediate :vmsgtu.vi vd, vs2, simm5^ vm is op2631=0x1e & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vmsgtu.vx 31..26=0x1e vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmsgtu.vx vd, vs2, rs1, vm # Vector-scalar :vmsgtu.vx vd, vs2, rs1^ vm is op2631=0x1e & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vmsif.m 31..26=0x14 vm vs2 19..15=0x03 14..12=0x2 vd 6..0=0x57 # vmsif.m vd, vs2, vm :vmsif.m vd, vs2^ vm is op2631=0x14 & vm & vs2 & op1519=0x3 & op1214=0x2 & vd & op0006=0x57 unimpl # vmsle.vi 31..26=0x1d vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vmsle.vi vd, vs2, simm5, vm # vector-immediate :vmsle.vi vd, vs2, simm5^ vm is op2631=0x1d & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vmsle.vv 31..26=0x1d vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vmsle.vv vd, vs2, vs1, vm # Vector-vector :vmsle.vv vd, vs2, vs1^ vm is op2631=0x1d & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vmsle.vx 31..26=0x1d vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmsle.vx vd, vs2, rs1, vm # vector-scalar :vmsle.vx vd, vs2, rs1^ vm is op2631=0x1d & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vmsleu.vi 31..26=0x1c vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vmsleu.vi vd, vs2, simm5, vm # Vector-immediate :vmsleu.vi vd, vs2, simm5^ vm is op2631=0x1c & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vmsleu.vv 31..26=0x1c vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vmsleu.vv vd, vs2, vs1, vm # Vector-vector :vmsleu.vv vd, vs2, vs1^ vm is op2631=0x1c & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vmsleu.vx 31..26=0x1c vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmsleu.vx vd, vs2, rs1, vm # vector-scalar :vmsleu.vx vd, vs2, rs1^ vm is op2631=0x1c & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vmslt.vv 31..26=0x1b vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vmslt.vv vd, vs2, vs1, vm # Vector-vector :vmslt.vv vd, vs2, vs1^ vm is op2631=0x1b & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vmslt.vx 31..26=0x1b vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmslt.vx vd, vs2, rs1, vm # vector-scalar :vmslt.vx vd, vs2, rs1^ vm is op2631=0x1b & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vmsltu.vv 31..26=0x1a vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vmsltu.vv vd, vs2, vs1, vm # Vector-vector :vmsltu.vv vd, vs2, vs1^ vm is op2631=0x1a & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vmsltu.vx 31..26=0x1a vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmsltu.vx vd, vs2, rs1, vm # Vector-scalar :vmsltu.vx vd, vs2, rs1^ vm is op2631=0x1a & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vmsne.vi 31..26=0x19 vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vmsne.vi vd, vs2, simm5, vm # vector-immediate :vmsne.vi vd, vs2, simm5^ vm is op2631=0x19 & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vmsne.vv 31..26=0x19 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vmsne.vv vd, vs2, vs1, vm # Vector-vector :vmsne.vv vd, vs2, vs1^ vm is op2631=0x19 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vmsne.vx 31..26=0x19 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vmsne.vx vd, vs2, rs1, vm # vector-scalar :vmsne.vx vd, vs2, rs1^ vm is op2631=0x19 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vmsof.m 31..26=0x14 vm vs2 19..15=0x02 14..12=0x2 vd 6..0=0x57 # vmsof.m vd, vs2, vm :vmsof.m vd, vs2^ vm is op2631=0x14 & vm & vs2 & op1519=0x2 & op1214=0x2 & vd & op0006=0x57 unimpl # vmul.vv 31..26=0x25 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmul.vv vd, vs2, vs1, vm # Vector-vector :vmul.vv vd, vs2, vs1^ vm is op2631=0x25 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmul.vx 31..26=0x25 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vmul.vx vd, vs2, rs1, vm # vector-scalar :vmul.vx vd, vs2, rs1^ vm is op2631=0x25 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vmulh.vv 31..26=0x27 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmulh.vv vd, vs2, vs1, vm # Vector-vector :vmulh.vv vd, vs2, vs1^ vm is op2631=0x27 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmulh.vx 31..26=0x27 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vmulh.vx vd, vs2, rs1, vm # vector-scalar :vmulh.vx vd, vs2, rs1^ vm is op2631=0x27 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vmulhsu.vv 31..26=0x26 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmulhsu.vv vd, vs2, vs1, vm # Vector-vector :vmulhsu.vv vd, vs2, vs1^ vm is op2631=0x26 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmulhsu.vx 31..26=0x26 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vmulhsu.vx vd, vs2, rs1, vm # vector-scalar :vmulhsu.vx vd, vs2, rs1^ vm is op2631=0x26 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vmulhu.vv 31..26=0x24 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmulhu.vv vd, vs2, vs1, vm # Vector-vector :vmulhu.vv vd, vs2, vs1^ vm is op2631=0x24 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmulhu.vx 31..26=0x24 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vmulhu.vx vd, vs2, rs1, vm # vector-scalar :vmulhu.vx vd, vs2, rs1^ vm is op2631=0x24 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vmv.s.x 31..26=0x10 25=1 24..20=0 rs1 14..12=0x6 vd 6..0=0x57 # vmv.s.x vd, rs1 # vd[0] = x[rs1] (vs2=0) :vmv.s.x vd, rs1 is op2631=0x10 & op2525=0x1 & op2024=0x0 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vmv.v.i 31..26=0x17 25=1 24..20=0 simm5 14..12=0x3 vd 6..0=0x57 # vmv.v.i vd, simm5 # vd[i] = imm :vmv.v.i vd, simm5 is op2631=0x17 & op2525=0x1 & op2024=0x0 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vmv.v.v 31..26=0x17 25=1 24..20=0 vs1 14..12=0x0 vd 6..0=0x57 # vmv.v.v vd, vs1 # vd[i] = vs1[i] :vmv.v.v vd, vs1 is op2631=0x17 & op2525=0x1 & op2024=0x0 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vmv.v.x 31..26=0x17 25=1 24..20=0 rs1 14..12=0x4 vd 6..0=0x57 # vmv.v.x vd, rs1 # vd[i] = rs1 :vmv.v.x vd, rs1 is op2631=0x17 & op2525=0x1 & op2024=0x0 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl #TODO this is broken # vmv.x.s 31..26=0x10 25=1 vs2 19..15=0 14..12=0x2 vd 6..0=0x57 # vmv.x.s rd, vs2 # x[rd] = vs2[0] (rs1=0) :vmv.x.s rd, vs2 is op2631=0x10 & op2525=0x1 & vs2 & op1519=0x0 & op1214=0x2 & rd & vd & op0006=0x57 unimpl # vmv1r.v 31..26=0x27 25=1 vs2 19..15=0 14..12=0x3 vd 6..0=0x57 # vmv1r.v vd, vs2 :vmv1r.v vd, vs2 is op2631=0x27 & op2525=0x1 & vs2 & op1519=0x0 & op1214=0x3 & vd & op0006=0x57 unimpl # vmv2r.v 31..26=0x27 25=1 vs2 19..15=1 14..12=0x3 vd 6..0=0x57 # vmv2r.v vd, vs2 :vmv2r.v vd, vs2 is op2631=0x27 & op2525=0x1 & vs2 & op1519=0x1 & op1214=0x3 & vd & op0006=0x57 unimpl # vmv4r.v 31..26=0x27 25=1 vs2 19..15=3 14..12=0x3 vd 6..0=0x57 # vmv4r.v vd, vs2 :vmv4r.v vd, vs2 is op2631=0x27 & op2525=0x1 & vs2 & op1519=0x3 & op1214=0x3 & vd & op0006=0x57 unimpl # vmv8r.v 31..26=0x27 25=1 vs2 19..15=7 14..12=0x3 vd 6..0=0x57 # vmv8r.v vd, vs2 :vmv8r.v vd, vs2 is op2631=0x27 & op2525=0x1 & vs2 & op1519=0x7 & op1214=0x3 & vd & op0006=0x57 unimpl # vmxnor.mm 31..26=0x1f vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmxnor.mm vd, vs2, vs1 # vd[i] = !(vs2.mask[i] ^^ vs1.mask[i]) :vmxnor.mm vd, vs2, vs1 is op2631=0x1f & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vmxor.mm 31..26=0x1b vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vmxor.mm vd, vs2, vs1 # vd[i] = vs2.mask[i] ^^ vs1.mask[i] :vmxor.mm vd, vs2, vs1 is op2631=0x1b & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl #TODO this is broken # vnclip.wi 31..26=0x2f vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vnclip.wi vd, vs2, simm5, vm # vd[i] = clip(roundoff_signed(vs2[i], uimm5)) :vnclip.wi vd, vs2, simm5^ vm is op2631=0x2f & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vnclip.wv 31..26=0x2f vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vnclip.wv vd, vs2, vs1, vm # vd[i] = clip(roundoff_signed(vs2[i], vs1[i])) :vnclip.wv vd, vs2, vs1^ vm is op2631=0x2f & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vnclip.wx 31..26=0x2f vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vnclip.wx vd, vs2, rs1, vm # vd[i] = clip(roundoff_signed(vs2[i], x[rs1])) :vnclip.wx vd, vs2, rs1^ vm is op2631=0x2f & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl #TODO this is broken # vnclipu.wi 31..26=0x2e vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vnclipu.wi vd, vs2, simm5, vm # vd[i] = clip(roundoff_unsigned(vs2[i], uimm5)) :vnclipu.wi vd, vs2, simm5^ vm is op2631=0x2e & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vnclipu.wv 31..26=0x2e vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vnclipu.wv vd, vs2, vs1, vm # vd[i] = clip(roundoff_unsigned(vs2[i], vs1[i])) :vnclipu.wv vd, vs2, vs1^ vm is op2631=0x2e & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vnclipu.wx 31..26=0x2e vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vnclipu.wx vd, vs2, rs1, vm # vd[i] = clip(roundoff_unsigned(vs2[i], x[rs1])) :vnclipu.wx vd, vs2, rs1^ vm is op2631=0x2e & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vnmsac.vv 31..26=0x2f vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vnmsac.vv vd, vs1, vs2, vm # vd[i] = -(vs1[i] * vs2[i]) + vd[i] :vnmsac.vv vd, vs1, vs2^ vm is op2631=0x2f & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vnmsac.vx 31..26=0x2f vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vnmsac.vx vd, rs1, vs2, vm # vd[i] = -(x[rs1] * vs2[i]) + vd[i] :vnmsac.vx vd, rs1, vs2^ vm is op2631=0x2f & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vnmsub.vv 31..26=0x2b vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vnmsub.vv vd, vs1, vs2, vm # vd[i] = -(vs1[i] * vd[i]) + vs2[i] :vnmsub.vv vd, vs1, vs2^ vm is op2631=0x2b & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vnmsub.vx 31..26=0x2b vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vnmsub.vx vd, rs1, vs2, vm # vd[i] = -(x[rs1] * vd[i]) + vs2[i] :vnmsub.vx vd, rs1, vs2^ vm is op2631=0x2b & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl #TODO this is broken # vnsra.wi 31..26=0x2d vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vnsra.wi vd, vs2, simm5, vm # vector-immediate :vnsra.wi vd, vs2, simm5^ vm is op2631=0x2d & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vnsra.wv 31..26=0x2d vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vnsra.wv vd, vs2, vs1, vm # vector-vector :vnsra.wv vd, vs2, vs1^ vm is op2631=0x2d & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vnsra.wx 31..26=0x2d vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vnsra.wx vd, vs2, rs1, vm # vector-scalar :vnsra.wx vd, vs2, rs1^ vm is op2631=0x2d & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl #TODO this is broken # vnsrl.wi 31..26=0x2c vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vnsrl.wi vd, vs2, simm5, vm # vector-immediate :vnsrl.wi vd, vs2, simm5^ vm is op2631=0x2c & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vnsrl.wv 31..26=0x2c vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vnsrl.wv vd, vs2, vs1, vm # vector-vector :vnsrl.wv vd, vs2, vs1^ vm is op2631=0x2c & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vnsrl.wx 31..26=0x2c vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vnsrl.wx vd, vs2, rs1, vm # vector-scalar :vnsrl.wx vd, vs2, rs1^ vm is op2631=0x2c & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vor.vi 31..26=0x0a vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vor.vi vd, vs2, simm5, vm # vector-immediate :vor.vi vd, vs2, simm5^ vm is op2631=0xa & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vor.vv 31..26=0x0a vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vor.vv vd, vs2, vs1, vm # Vector-vector :vor.vv vd, vs2, vs1^ vm is op2631=0xa & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vor.vx 31..26=0x0a vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vor.vx vd, vs2, rs1, vm # vector-scalar :vor.vx vd, vs2, rs1^ vm is op2631=0xa & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vpopc.m 31..26=0x10 vm vs2 19..15=0x10 14..12=0x2 rd 6..0=0x57 # vpopc.m rd, vs2, vm # x[rd] = sum_i ( vs2.mask[i] && v0.mask[i] ) :vpopc.m rd, vs2^ vm is op2631=0x10 & vm & vs2 & op1519=0x10 & op1214=0x2 & rd & op0006=0x57 unimpl # vqmacc.vv 31..26=0x3d vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vqmacc.vv vd, vs1, vs2, vm # vd[i] = +(vs1[i] * vs2[i]) + vd[i] :vqmacc.vv vd, vs1, vs2^ vm is op2631=0x3d & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vqmacc.vx 31..26=0x3d vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vqmacc.vx vd, rs1, vs2, vm # vd[i] = +(x[rs1] * vs2[i]) + vd[i] :vqmacc.vx vd, rs1, vs2^ vm is op2631=0x3d & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vqmaccsu.vv 31..26=0x3f vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vqmaccsu.vv vd, vs1, vs2, vm # vd[i] = +(signed(vs1[i]) * unsigned(vs2[i])) + vd[i] :vqmaccsu.vv vd, vs1, vs2^ vm is op2631=0x3f & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vqmaccsu.vx 31..26=0x3f vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vqmaccsu.vx vd, rs1, vs2, vm # vd[i] = +(signed(x[rs1]) * unsigned(vs2[i])) + vd[i] :vqmaccsu.vx vd, rs1, vs2^ vm is op2631=0x3f & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vqmaccu.vv 31..26=0x3c vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vqmaccu.vv vd, vs1, vs2, vm # vd[i] = +(vs1[i] * vs2[i]) + vd[i] :vqmaccu.vv vd, vs1, vs2^ vm is op2631=0x3c & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vqmaccu.vx 31..26=0x3c vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vqmaccu.vx vd, rs1, vs2, vm # vd[i] = +(x[rs1] * vs2[i]) + vd[i] :vqmaccu.vx vd, rs1, vs2^ vm is op2631=0x3c & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vqmaccus.vx 31..26=0x3e vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vqmaccus.vx vd, rs1, vs2, vm # vd[i] = +(unsigned(x[rs1]) * signed(vs2[i])) + vd[i] :vqmaccus.vx vd, rs1, vs2^ vm is op2631=0x3e & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vredand.vs 31..26=0x01 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vredand.vs vd, vs2, vs1, vm # vd[0] = and( vs1[0] , vs2[*] ) :vredand.vs vd, vs2, vs1^ vm is op2631=0x1 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vredmax.vs 31..26=0x07 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vredmax.vs vd, vs2, vs1, vm # vd[0] = max( vs1[0] , vs2[*] ) :vredmax.vs vd, vs2, vs1^ vm is op2631=0x7 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vredmaxu.vs 31..26=0x06 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vredmaxu.vs vd, vs2, vs1, vm # vd[0] = maxu( vs1[0] , vs2[*] ) :vredmaxu.vs vd, vs2, vs1^ vm is op2631=0x6 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vredmin.vs 31..26=0x05 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vredmin.vs vd, vs2, vs1, vm # vd[0] = min( vs1[0] , vs2[*] ) :vredmin.vs vd, vs2, vs1^ vm is op2631=0x5 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vredminu.vs 31..26=0x04 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vredminu.vs vd, vs2, vs1, vm # vd[0] = minu( vs1[0] , vs2[*] ) :vredminu.vs vd, vs2, vs1^ vm is op2631=0x4 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vredor.vs 31..26=0x02 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vredor.vs vd, vs2, vs1, vm # vd[0] = or( vs1[0] , vs2[*] ) :vredor.vs vd, vs2, vs1^ vm is op2631=0x2 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vredsum.vs 31..26=0x00 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vredsum.vs vd, vs2, vs1, vm # vd[0] = sum( vs1[0] , vs2[*] ) :vredsum.vs vd, vs2, vs1^ vm is op2631=0x0 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vredxor.vs 31..26=0x03 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vredxor.vs vd, vs2, vs1, vm # vd[0] = xor( vs1[0] , vs2[*] ) :vredxor.vs vd, vs2, vs1^ vm is op2631=0x3 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vrem.vv 31..26=0x23 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vrem.vv vd, vs2, vs1, vm # Vector-vector :vrem.vv vd, vs2, vs1^ vm is op2631=0x23 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vrem.vx 31..26=0x23 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vrem.vx vd, vs2, rs1, vm # vector-scalar :vrem.vx vd, vs2, rs1^ vm is op2631=0x23 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vremu.vv 31..26=0x22 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vremu.vv vd, vs2, vs1, vm # Vector-vector :vremu.vv vd, vs2, vs1^ vm is op2631=0x22 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vremu.vx 31..26=0x22 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vremu.vx vd, vs2, rs1, vm # vector-scalar :vremu.vx vd, vs2, rs1^ vm is op2631=0x22 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl #TODO this is broken # vrgather.vi 31..26=0x0c vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vrgather.vi vd, vs2, simm5, vm # vd[i] = (uimm >= VLMAX) ? 0 : vs2[uimm] :vrgather.vi vd, vs2, simm5^ vm is op2631=0xc & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vrgather.vv 31..26=0x0c vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vrgather.vv vd, vs2, vs1, vm # vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; :vrgather.vv vd, vs2, vs1^ vm is op2631=0xc & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vrgather.vx 31..26=0x0c vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[x[rs1]] :vrgather.vx vd, vs2, rs1^ vm is op2631=0xc & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vrgatherei16.vv 31..26=0x0e vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vrgatherei16.vv vd, vs2, vs1, vm # vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; :vrgatherei16.vv vd, vs2, vs1^ vm is op2631=0xe & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vrsub.vi 31..26=0x03 vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vrsub.vi vd, vs2, simm5, vm # vd[i] = imm - vs2[i] :vrsub.vi vd, vs2, simm5^ vm is op2631=0x3 & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vrsub.vx 31..26=0x03 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vrsub.vx vd, vs2, rs1, vm # vd[i] = rs1 - vs2[i] :vrsub.vx vd, vs2, rs1^ vm is op2631=0x3 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vs1r.v 31..29=0 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x0 vs3 6..0=0x27 # vs1r.v vs3, (rs1) :vs1r.v vs3, (rs1) is op2931=0x0 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x0 & vs3 & op0006=0x27 unimpl # vs2r.v 31..29=1 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x0 vs3 6..0=0x27 # vs2r.v vs3, (rs1) :vs2r.v vs3, (rs1) is op2931=0x1 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x0 & vs3 & op0006=0x27 unimpl # vs4r.v 31..29=3 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x0 vs3 6..0=0x27 # vs4r.v vs3, (rs1) :vs4r.v vs3, (rs1) is op2931=0x3 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x0 & vs3 & op0006=0x27 unimpl # vs8r.v 31..29=7 28=0 27..26=0 25=1 24..20=0x08 rs1 14..12=0x0 vs3 6..0=0x27 # vs8r.v vs3, (rs1) :vs8r.v vs3, (rs1) is op2931=0x7 & op2828=0x0 & op2627=0x0 & op2525=0x1 & op2024=0x8 & rs1 & op1214=0x0 & vs3 & op0006=0x27 unimpl # vsadd.vi 31..26=0x21 vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vsadd.vi vd, vs2, simm5, vm # vector-immediate :vsadd.vi vd, vs2, simm5^ vm is op2631=0x21 & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vsadd.vv 31..26=0x21 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vsadd.vv vd, vs2, vs1, vm # Vector-vector :vsadd.vv vd, vs2, vs1^ vm is op2631=0x21 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vsadd.vx 31..26=0x21 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vsadd.vx vd, vs2, rs1, vm # vector-scalar :vsadd.vx vd, vs2, rs1^ vm is op2631=0x21 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vsaddu.vi 31..26=0x20 vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vsaddu.vi vd, vs2, simm5, vm # vector-immediate :vsaddu.vi vd, vs2, simm5^ vm is op2631=0x20 & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vsaddu.vv 31..26=0x20 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vsaddu.vv vd, vs2, vs1, vm # Vector-vector :vsaddu.vv vd, vs2, vs1^ vm is op2631=0x20 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vsaddu.vx 31..26=0x20 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vsaddu.vx vd, vs2, rs1, vm # vector-scalar :vsaddu.vx vd, vs2, rs1^ vm is op2631=0x20 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vsbc.vvm 31..26=0x12 25=0 vs2 vs1 14..12=0x0 vd 6..0=0x57 # vsbc.vvm vd, vs2, vs1, v0 # Vector-vector :vsbc.vvm vd, vs2, vs1, v0 is op2631=0x12 & op2525=0x0 & vs2 & vs1 & op1214=0x0 & v0 & vd & op0006=0x57 unimpl # vsbc.vxm 31..26=0x12 25=0 vs2 rs1 14..12=0x4 vd 6..0=0x57 # vsbc.vxm vd, vs2, rs1, v0 # Vector-scalar :vsbc.vxm vd, vs2, rs1, v0 is op2631=0x12 & op2525=0x0 & vs2 & rs1 & op1214=0x4 & v0 & vd & op0006=0x57 unimpl # vse1024.v nf 28=1 27..26=0 vm 24..20=0 rs1 14..12=0x7 vs3 6..0=0x27 # vse1024.v vs3, (rs1), vm # 1024-bit unit-stride store :vse1024.v vs3, (rs1)^ vm is nf & op2828=0x1 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x7 & vs3 & op0006=0x27 unimpl # vse128.v nf 28=1 27..26=0 vm 24..20=0 rs1 14..12=0x0 vs3 6..0=0x27 # vse128.v vs3, (rs1), vm # 128-bit unit-stride store :vse128.v vs3, (rs1)^ vm is nf & op2828=0x1 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x0 & vs3 & op0006=0x27 unimpl # vse16.v nf 28=0 27..26=0 vm 24..20=0 rs1 14..12=0x5 vs3 6..0=0x27 # vse16.v vs3, (rs1), vm # 16-bit unit-stride store :vse16.v vs3, (rs1)^ vm is nf & op2828=0x0 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x5 & vs3 & op0006=0x27 unimpl # vse256.v nf 28=1 27..26=0 vm 24..20=0 rs1 14..12=0x5 vs3 6..0=0x27 # vse256.v vs3, (rs1), vm # 256-bit unit-stride store :vse256.v vs3, (rs1)^ vm is nf & op2828=0x1 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x5 & vs3 & op0006=0x27 unimpl # vse32.v nf 28=0 27..26=0 vm 24..20=0 rs1 14..12=0x6 vs3 6..0=0x27 # vse32.v vs3, (rs1), vm # 32-bit unit-stride store :vse32.v vs3, (rs1)^ vm is nf & op2828=0x0 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x6 & vs3 & op0006=0x27 unimpl # vse512.v nf 28=1 27..26=0 vm 24..20=0 rs1 14..12=0x6 vs3 6..0=0x27 # vse512.v vs3, (rs1), vm # 512-bit unit-stride store :vse512.v vs3, (rs1)^ vm is nf & op2828=0x1 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x6 & vs3 & op0006=0x27 unimpl # vse64.v nf 28=0 27..26=0 vm 24..20=0 rs1 14..12=0x7 vs3 6..0=0x27 # vse64.v vs3, (rs1), vm # 64-bit unit-stride store :vse64.v vs3, (rs1)^ vm is nf & op2828=0x0 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x7 & vs3 & op0006=0x27 unimpl # vse8.v nf 28=0 27..26=0 vm 24..20=0 rs1 14..12=0x0 vs3 6..0=0x27 # vse8.v vs3, (rs1), vm # 8-bit unit-stride store :vse8.v vs3, (rs1)^ vm is nf & op2828=0x0 & op2627=0x0 & vm & op2024=0x0 & rs1 & op1214=0x0 & vs3 & op0006=0x27 unimpl # vsetvl 31=1 30..25=0x0 rs2 rs1 14..12=0x7 rd 6..0=0x57 # vsetvl rd, rs1, rs2 # rd = new vl, rs1 = AVL, rs2 = new vtype value :vsetvl rd, rs1, rs2 is op3131=0x1 & op2530=0x0 & rs2 & rs1 & op1214=0x7 & rd & op0006=0x57 unimpl #TODO huh # vsetvli 31=0 vtypei rs1 14..12=0x7 rd 6..0=0x57 # vsetvli rd, rs1, vtypei # rd = new vl, rs1 = AVL, vtypei = new vtype setting :vsetvli rd, rs1, vtypei is op3131=0x0 & vtypei & rs1 & op1214=0x7 & rd & op0006=0x57 unimpl # vsext.vf2 31..26=0x12 vm vs2 19..15=7 14..12=0x2 vd 6..0=0x57 # vsext.vf2 vd, vs2, vm # Sign-extend SEW/2 source to SEW destination :vsext.vf2 vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x7 & op1214=0x2 & vd & op0006=0x57 unimpl # vsext.vf4 31..26=0x12 vm vs2 19..15=5 14..12=0x2 vd 6..0=0x57 # vsext.vf4 vd, vs2, vm # Sign-extend SEW/4 source to SEW destination :vsext.vf4 vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x5 & op1214=0x2 & vd & op0006=0x57 unimpl # vsext.vf8 31..26=0x12 vm vs2 19..15=3 14..12=0x2 vd 6..0=0x57 # vsext.vf8 vd, vs2, vm # Sign-extend SEW/8 source to SEW destination :vsext.vf8 vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x3 & op1214=0x2 & vd & op0006=0x57 unimpl # vslide1down.vx 31..26=0x0f vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] :vslide1down.vx vd, vs2, rs1^ vm is op2631=0xf & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vslide1up.vx 31..26=0x0e vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] :vslide1up.vx vd, vs2, rs1^ vm is op2631=0xe & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl #TODO this is broken # vslidedown.vi 31..26=0x0f vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vslidedown.vi vd, vs2, simm5[4:0], vm # vd[i] = vs2[i+uimm] :vslidedown.vi vd, vs2, simm5[4:0]^ vm is op2631=0xf & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vslidedown.vx 31..26=0x0f vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] :vslidedown.vx vd, vs2, rs1^ vm is op2631=0xf & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl #TODO this is broken # vslideup.vi 31..26=0x0e vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vslideup.vi vd, vs2, simm5[4:0], vm # vd[i+uimm] = vs2[i] :vslideup.vi vd, vs2, simm5[4:0]^ vm is op2631=0xe & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vslideup.vx 31..26=0x0e vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] :vslideup.vx vd, vs2, rs1^ vm is op2631=0xe & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl #TODO this is broken # vsll.vi 31..26=0x25 vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vsll.vi vd, vs2, simm5, vm # vector-immediate :vsll.vi vd, vs2, simm5^ vm is op2631=0x25 & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vsll.vv 31..26=0x25 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vsll.vv vd, vs2, vs1, vm # Vector-vector :vsll.vv vd, vs2, vs1^ vm is op2631=0x25 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vsll.vx 31..26=0x25 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vsll.vx vd, vs2, rs1, vm # vector-scalar :vsll.vx vd, vs2, rs1^ vm is op2631=0x25 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vsmul.vv 31..26=0x27 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vsmul.vv vd, vs2, vs1, vm # vd[i] = clip(roundoff_signed(vs2[i]*vs1[i], SEW-1)) :vsmul.vv vd, vs2, vs1^ vm is op2631=0x27 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vsmul.vx 31..26=0x27 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vsmul.vx vd, vs2, rs1, vm # vd[i] = clip(roundoff_signed(vs2[i]*x[rs1], SEW-1)) :vsmul.vx vd, vs2, rs1^ vm is op2631=0x27 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl #OTOD this is broken # vsra.vi 31..26=0x29 vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vsra.vi vd, vs2, simm5, vm # vector-immediate :vsra.vi vd, vs2, simm5^ vm is op2631=0x29 & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vsra.vv 31..26=0x29 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vsra.vv vd, vs2, vs1, vm # Vector-vector :vsra.vv vd, vs2, vs1^ vm is op2631=0x29 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vsra.vx 31..26=0x29 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vsra.vx vd, vs2, rs1, vm # vector-scalar :vsra.vx vd, vs2, rs1^ vm is op2631=0x29 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl #TODO this is broken # vsrl.vi 31..26=0x28 vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vsrl.vi vd, vs2, simm5, vm # vector-immediate :vsrl.vi vd, vs2, simm5^ vm is op2631=0x28 & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vsrl.vv 31..26=0x28 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vsrl.vv vd, vs2, vs1, vm # Vector-vector :vsrl.vv vd, vs2, vs1^ vm is op2631=0x28 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vsrl.vx 31..26=0x28 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vsrl.vx vd, vs2, rs1, vm # vector-scalar :vsrl.vx vd, vs2, rs1^ vm is op2631=0x28 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vsse1024.v nf 28=1 27..26=2 vm rs2 rs1 14..12=0x7 vs3 6..0=0x27 # vsse1024.v vs3, (rs1), rs2, vm # 1024-bit strided store :vsse1024.v vs3, (rs1), rs2^ vm is nf & op2828=0x1 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x7 & vs3 & op0006=0x27 unimpl # vsse128.v nf 28=1 27..26=2 vm rs2 rs1 14..12=0x0 vs3 6..0=0x27 # vsse128.v vs3, (rs1), rs2, vm # 128-bit strided store :vsse128.v vs3, (rs1), rs2^ vm is nf & op2828=0x1 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x0 & vs3 & op0006=0x27 unimpl # vsse16.v nf 28=0 27..26=2 vm rs2 rs1 14..12=0x5 vs3 6..0=0x27 # vsse16.v vs3, (rs1), rs2, vm # 16-bit strided store :vsse16.v vs3, (rs1), rs2^ vm is nf & op2828=0x0 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x5 & vs3 & op0006=0x27 unimpl # vsse256.v nf 28=1 27..26=2 vm rs2 rs1 14..12=0x5 vs3 6..0=0x27 # vsse256.v vs3, (rs1), rs2, vm # 256-bit strided store :vsse256.v vs3, (rs1), rs2^ vm is nf & op2828=0x1 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x5 & vs3 & op0006=0x27 unimpl # vsse32.v nf 28=0 27..26=2 vm rs2 rs1 14..12=0x6 vs3 6..0=0x27 # vsse32.v vs3, (rs1), rs2, vm # 32-bit strided store :vsse32.v vs3, (rs1), rs2^ vm is nf & op2828=0x0 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x6 & vs3 & op0006=0x27 unimpl # vsse512.v nf 28=1 27..26=2 vm rs2 rs1 14..12=0x6 vs3 6..0=0x27 # vsse512.v vs3, (rs1), rs2, vm # 512-bit strided store :vsse512.v vs3, (rs1), rs2^ vm is nf & op2828=0x1 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x6 & vs3 & op0006=0x27 unimpl # vsse64.v nf 28=0 27..26=2 vm rs2 rs1 14..12=0x7 vs3 6..0=0x27 # vsse64.v vs3, (rs1), rs2, vm # 64-bit strided store :vsse64.v vs3, (rs1), rs2^ vm is nf & op2828=0x0 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x7 & vs3 & op0006=0x27 unimpl # vsse8.v nf 28=0 27..26=2 vm rs2 rs1 14..12=0x0 vs3 6..0=0x27 # vsse8.v vs3, (rs1), rs2, vm # 8-bit strided store :vsse8.v vs3, (rs1), rs2^ vm is nf & op2828=0x0 & op2627=0x2 & vm & rs2 & rs1 & op1214=0x0 & vs3 & op0006=0x27 unimpl #TODO this is broken # vssra.vi 31..26=0x2b vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vssra.vi vd, vs2, simm5, vm # vd[i] = roundoff_signed(vs2[i], uimm) :vssra.vi vd, vs2, simm5^ vm is op2631=0x2b & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vssra.vv 31..26=0x2b vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vssra.vv vd, vs2, vs1, vm # vd[i] = roundoff_signed(vs2[i],vs1[i]) :vssra.vv vd, vs2, vs1^ vm is op2631=0x2b & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vssra.vx 31..26=0x2b vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vssra.vx vd, vs2, rs1, vm # vd[i] = roundoff_signed(vs2[i], x[rs1]) :vssra.vx vd, vs2, rs1^ vm is op2631=0x2b & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl #TODO this is broken # vssrl.vi 31..26=0x2a vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vssrl.vi vd, vs2, simm5, vm # vd[i] = roundoff_unsigned(vs2[i], uimm) :vssrl.vi vd, vs2, simm5^ vm is op2631=0x2a & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vssrl.vv 31..26=0x2a vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vssrl.vv vd, vs2, vs1, vm # vd[i] = roundoff_unsigned(vs2[i], vs1[i]) :vssrl.vv vd, vs2, vs1^ vm is op2631=0x2a & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vssrl.vx 31..26=0x2a vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vssrl.vx vd, vs2, rs1, vm # vd[i] = roundoff_unsigned(vs2[i], x[rs1]) :vssrl.vx vd, vs2, rs1^ vm is op2631=0x2a & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vssub.vv 31..26=0x23 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vssub.vv vd, vs2, vs1, vm # Vector-vector :vssub.vv vd, vs2, vs1^ vm is op2631=0x23 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vssub.vx 31..26=0x23 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vssub.vx vd, vs2, rs1, vm # vector-scalar :vssub.vx vd, vs2, rs1^ vm is op2631=0x23 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vssubu.vv 31..26=0x22 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vssubu.vv vd, vs2, vs1, vm # Vector-vector :vssubu.vv vd, vs2, vs1^ vm is op2631=0x22 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vssubu.vx 31..26=0x22 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vssubu.vx vd, vs2, rs1, vm # vector-scalar :vssubu.vx vd, vs2, rs1^ vm is op2631=0x22 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vsub.vv 31..26=0x02 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vsub.vv vd, vs2, vs1, vm # Vector-vector :vsub.vv vd, vs2, vs1^ vm is op2631=0x2 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vsub.vx 31..26=0x02 vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vsub.vx vd, vs2, rs1, vm # vector-scalar :vsub.vx vd, vs2, rs1^ vm is op2631=0x2 & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vsuxei1024.v nf 28=1 27..26=1 vm vs2 rs1 14..12=0x7 vs3 6..0=0x27 # vsuxei1024.v vs3, (rs1), vs2, vm # unordered 1024-bit indexed store of SEW data :vsuxei1024.v vs3, (rs1), vs2^ vm is nf & op2828=0x1 & op2627=0x1 & vm & vs2 & rs1 & op1214=0x7 & vs3 & op0006=0x27 unimpl # vsuxei128.v nf 28=1 27..26=1 vm vs2 rs1 14..12=0x0 vs3 6..0=0x27 # vsuxei128.v vs3, (rs1), vs2, vm # unordered 128-bit indexed store of SEW data :vsuxei128.v vs3, (rs1), vs2^ vm is nf & op2828=0x1 & op2627=0x1 & vm & vs2 & rs1 & op1214=0x0 & vs3 & op0006=0x27 unimpl # vsuxei16.v nf 28=0 27..26=1 vm vs2 rs1 14..12=0x5 vs3 6..0=0x27 # vsuxei16.v vs3, (rs1), vs2, vm # unordered 16-bit indexed store of SEW data :vsuxei16.v vs3, (rs1), vs2^ vm is nf & op2828=0x0 & op2627=0x1 & vm & vs2 & rs1 & op1214=0x5 & vs3 & op0006=0x27 unimpl # vsuxei256.v nf 28=1 27..26=1 vm vs2 rs1 14..12=0x5 vs3 6..0=0x27 # vsuxei256.v vs3, (rs1), vs2, vm # unordered 256-bit indexed store of SEW data :vsuxei256.v vs3, (rs1), vs2^ vm is nf & op2828=0x1 & op2627=0x1 & vm & vs2 & rs1 & op1214=0x5 & vs3 & op0006=0x27 unimpl # vsuxei32.v nf 28=0 27..26=1 vm vs2 rs1 14..12=0x6 vs3 6..0=0x27 # vsuxei32.v vs3, (rs1), vs2, vm # unordered 32-bit indexed store of SEW data :vsuxei32.v vs3, (rs1), vs2^ vm is nf & op2828=0x0 & op2627=0x1 & vm & vs2 & rs1 & op1214=0x6 & vs3 & op0006=0x27 unimpl # vsuxei512.v nf 28=1 27..26=1 vm vs2 rs1 14..12=0x6 vs3 6..0=0x27 # vsuxei512.v vs3, (rs1), vs2, vm # unordered 512-bit indexed store of SEW data :vsuxei512.v vs3, (rs1), vs2^ vm is nf & op2828=0x1 & op2627=0x1 & vm & vs2 & rs1 & op1214=0x6 & vs3 & op0006=0x27 unimpl # vsuxei64.v nf 28=0 27..26=1 vm vs2 rs1 14..12=0x7 vs3 6..0=0x27 # vsuxei64.v vs3, (rs1), vs2, vm # unordered 64-bit indexed store of SEW data :vsuxei64.v vs3, (rs1), vs2^ vm is nf & op2828=0x0 & op2627=0x1 & vm & vs2 & rs1 & op1214=0x7 & vs3 & op0006=0x27 unimpl # vsuxei8.v nf 28=0 27..26=1 vm vs2 rs1 14..12=0x0 vs3 6..0=0x27 # vsuxei8.v vs3, (rs1), vs2, vm # unordered 8-bit indexed store of SEW data :vsuxei8.v vs3, (rs1), vs2^ vm is nf & op2828=0x0 & op2627=0x1 & vm & vs2 & rs1 & op1214=0x0 & vs3 & op0006=0x27 unimpl # vsxei1024.v nf 28=1 27..26=3 vm vs2 rs1 14..12=0x7 vs3 6..0=0x27 # vsxei1024.v vs3, (rs1), vs2, vm # ordered 1024-bit indexed store of SEW data :vsxei1024.v vs3, (rs1), vs2^ vm is nf & op2828=0x1 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x7 & vs3 & op0006=0x27 unimpl # vsxei128.v nf 28=1 27..26=3 vm vs2 rs1 14..12=0x0 vs3 6..0=0x27 # vsxei128.v vs3, (rs1), vs2, vm # ordered 128-bit indexed store of SEW data :vsxei128.v vs3, (rs1), vs2^ vm is nf & op2828=0x1 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x0 & vs3 & op0006=0x27 unimpl # vsxei16.v nf 28=0 27..26=3 vm vs2 rs1 14..12=0x5 vs3 6..0=0x27 # vsxei16.v vs3, (rs1), vs2, vm # ordered 16-bit indexed store of SEW data :vsxei16.v vs3, (rs1), vs2^ vm is nf & op2828=0x0 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x5 & vs3 & op0006=0x27 unimpl # vsxei256.v nf 28=1 27..26=3 vm vs2 rs1 14..12=0x5 vs3 6..0=0x27 # vsxei256.v vs3, (rs1), vs2, vm # ordered 256-bit indexed store of SEW data :vsxei256.v vs3, (rs1), vs2^ vm is nf & op2828=0x1 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x5 & vs3 & op0006=0x27 unimpl # vsxei32.v nf 28=0 27..26=3 vm vs2 rs1 14..12=0x6 vs3 6..0=0x27 # vsxei32.v vs3, (rs1), vs2, vm # ordered 32-bit indexed store of SEW data :vsxei32.v vs3, (rs1), vs2^ vm is nf & op2828=0x0 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x6 & vs3 & op0006=0x27 unimpl # vsxei512.v nf 28=1 27..26=3 vm vs2 rs1 14..12=0x6 vs3 6..0=0x27 # vsxei512.v vs3, (rs1), vs2, vm # ordered 512-bit indexed store of SEW data :vsxei512.v vs3, (rs1), vs2^ vm is nf & op2828=0x1 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x6 & vs3 & op0006=0x27 unimpl # vsxei64.v nf 28=0 27..26=3 vm vs2 rs1 14..12=0x7 vs3 6..0=0x27 # vsxei64.v vs3, (rs1), vs2, vm # ordered 64-bit indexed store of SEW data :vsxei64.v vs3, (rs1), vs2^ vm is nf & op2828=0x0 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x7 & vs3 & op0006=0x27 unimpl # vsxei8.v nf 28=0 27..26=3 vm vs2 rs1 14..12=0x0 vs3 6..0=0x27 # vsxei8.v vs3, (rs1), vs2, vm # ordered 8-bit indexed store of SEW data :vsxei8.v vs3, (rs1), vs2^ vm is nf & op2828=0x0 & op2627=0x3 & vm & vs2 & rs1 & op1214=0x0 & vs3 & op0006=0x27 unimpl # vwadd.vv 31..26=0x31 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwadd.vv vd, vs2, vs1, vm # vector-vector :vwadd.vv vd, vs2, vs1^ vm is op2631=0x31 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwadd.vx 31..26=0x31 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwadd.vx vd, vs2, rs1, vm # vector-scalar :vwadd.vx vd, vs2, rs1^ vm is op2631=0x31 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwadd.wv 31..26=0x35 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwadd.wv vd, vs2, vs1, vm # vector-vector :vwadd.wv vd, vs2, vs1^ vm is op2631=0x35 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwadd.wx 31..26=0x35 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwadd.wx vd, vs2, rs1, vm # vector-scalar :vwadd.wx vd, vs2, rs1^ vm is op2631=0x35 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwaddu.vv 31..26=0x30 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwaddu.vv vd, vs2, vs1, vm # vector-vector :vwaddu.vv vd, vs2, vs1^ vm is op2631=0x30 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwaddu.vx 31..26=0x30 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwaddu.vx vd, vs2, rs1, vm # vector-scalar :vwaddu.vx vd, vs2, rs1^ vm is op2631=0x30 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwaddu.wv 31..26=0x34 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwaddu.wv vd, vs2, vs1, vm # vector-vector :vwaddu.wv vd, vs2, vs1^ vm is op2631=0x34 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwaddu.wx 31..26=0x34 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwaddu.wx vd, vs2, rs1, vm # vector-scalar :vwaddu.wx vd, vs2, rs1^ vm is op2631=0x34 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwmacc.vv 31..26=0x3d vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwmacc.vv vd, vs1, vs2, vm # vd[i] = +(vs1[i] * vs2[i]) + vd[i] :vwmacc.vv vd, vs1, vs2^ vm is op2631=0x3d & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwmacc.vx 31..26=0x3d vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwmacc.vx vd, rs1, vs2, vm # vd[i] = +(x[rs1] * vs2[i]) + vd[i] :vwmacc.vx vd, rs1, vs2^ vm is op2631=0x3d & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwmaccsu.vv 31..26=0x3f vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwmaccsu.vv vd, vs1, vs2, vm # vd[i] = +(signed(vs1[i]) * unsigned(vs2[i])) + vd[i] :vwmaccsu.vv vd, vs1, vs2^ vm is op2631=0x3f & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwmaccsu.vx 31..26=0x3f vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwmaccsu.vx vd, rs1, vs2, vm # vd[i] = +(signed(x[rs1]) * unsigned(vs2[i])) + vd[i] :vwmaccsu.vx vd, rs1, vs2^ vm is op2631=0x3f & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwmaccu.vv 31..26=0x3c vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwmaccu.vv vd, vs1, vs2, vm # vd[i] = +(vs1[i] * vs2[i]) + vd[i] :vwmaccu.vv vd, vs1, vs2^ vm is op2631=0x3c & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwmaccu.vx 31..26=0x3c vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwmaccu.vx vd, rs1, vs2, vm # vd[i] = +(x[rs1] * vs2[i]) + vd[i] :vwmaccu.vx vd, rs1, vs2^ vm is op2631=0x3c & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwmaccus.vx 31..26=0x3e vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwmaccus.vx vd, rs1, vs2, vm # vd[i] = +(unsigned(x[rs1]) * signed(vs2[i])) + vd[i] :vwmaccus.vx vd, rs1, vs2^ vm is op2631=0x3e & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwmul.vv 31..26=0x3b vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwmul.vv vd, vs2, vs1, vm# vector-vector :vwmul.vv vd, vs2, vs1^ vm is op2631=0x3b & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwmul.vx 31..26=0x3b vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwmul.vx vd, vs2, rs1, vm # vector-scalar :vwmul.vx vd, vs2, rs1^ vm is op2631=0x3b & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwmulsu.vv 31..26=0x3a vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwmulsu.vv vd, vs2, vs1, vm # vector-vector :vwmulsu.vv vd, vs2, vs1^ vm is op2631=0x3a & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwmulsu.vx 31..26=0x3a vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwmulsu.vx vd, vs2, rs1, vm # vector-scalar :vwmulsu.vx vd, vs2, rs1^ vm is op2631=0x3a & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwmulu.vv 31..26=0x38 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwmulu.vv vd, vs2, vs1, vm # vector-vector :vwmulu.vv vd, vs2, vs1^ vm is op2631=0x38 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwmulu.vx 31..26=0x38 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwmulu.vx vd, vs2, rs1, vm # vector-scalar :vwmulu.vx vd, vs2, rs1^ vm is op2631=0x38 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwredsum.vs 31..26=0x31 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vwredsum.vs vd, vs2, vs1, vm # 2*SEW = 2*SEW + sum(sign-extend(SEW)) :vwredsum.vs vd, vs2, vs1^ vm is op2631=0x31 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vwredsumu.vs 31..26=0x30 vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vwredsumu.vs vd, vs2, vs1, vm # 2*SEW = 2*SEW + sum(zero-extend(SEW)) :vwredsumu.vs vd, vs2, vs1^ vm is op2631=0x30 & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vwsub.vv 31..26=0x33 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwsub.vv vd, vs2, vs1, vm # vector-vector :vwsub.vv vd, vs2, vs1^ vm is op2631=0x33 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwsub.vx 31..26=0x33 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwsub.vx vd, vs2, rs1, vm # vector-scalar :vwsub.vx vd, vs2, rs1^ vm is op2631=0x33 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwsub.wv 31..26=0x37 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwsub.wv vd, vs2, vs1, vm # vector-vector :vwsub.wv vd, vs2, vs1^ vm is op2631=0x37 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwsub.wx 31..26=0x37 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwsub.wx vd, vs2, rs1, vm # vector-scalar :vwsub.wx vd, vs2, rs1^ vm is op2631=0x37 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwsubu.vv 31..26=0x32 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwsubu.vv vd, vs2, vs1, vm # vector-vector :vwsubu.vv vd, vs2, vs1^ vm is op2631=0x32 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwsubu.vx 31..26=0x32 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwsubu.vx vd, vs2, rs1, vm # vector-scalar :vwsubu.vx vd, vs2, rs1^ vm is op2631=0x32 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vwsubu.wv 31..26=0x36 vm vs2 vs1 14..12=0x2 vd 6..0=0x57 # vwsubu.wv vd, vs2, vs1, vm # vector-vector :vwsubu.wv vd, vs2, vs1^ vm is op2631=0x36 & vm & vs2 & vs1 & op1214=0x2 & vd & op0006=0x57 unimpl # vwsubu.wx 31..26=0x36 vm vs2 rs1 14..12=0x6 vd 6..0=0x57 # vwsubu.wx vd, vs2, rs1, vm # vector-scalar :vwsubu.wx vd, vs2, rs1^ vm is op2631=0x36 & vm & vs2 & rs1 & op1214=0x6 & vd & op0006=0x57 unimpl # vxor.vi 31..26=0x0b vm vs2 simm5 14..12=0x3 vd 6..0=0x57 # vxor.vi vd, vs2, simm5, vm # vector-immediate :vxor.vi vd, vs2, simm5^ vm is op2631=0xb & vm & vs2 & simm5 & op1214=0x3 & vd & op0006=0x57 unimpl # vxor.vv 31..26=0x0b vm vs2 vs1 14..12=0x0 vd 6..0=0x57 # vxor.vv vd, vs2, vs1, vm # Vector-vector :vxor.vv vd, vs2, vs1^ vm is op2631=0xb & vm & vs2 & vs1 & op1214=0x0 & vd & op0006=0x57 unimpl # vxor.vx 31..26=0x0b vm vs2 rs1 14..12=0x4 vd 6..0=0x57 # vxor.vx vd, vs2, rs1, vm # vector-scalar :vxor.vx vd, vs2, rs1^ vm is op2631=0xb & vm & vs2 & rs1 & op1214=0x4 & vd & op0006=0x57 unimpl # vzext.vf2 31..26=0x12 vm vs2 19..15=6 14..12=0x2 vd 6..0=0x57 # vzext.vf2 vd, vs2, vm # Zero-extend SEW/2 source to SEW destination :vzext.vf2 vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x6 & op1214=0x2 & vd & op0006=0x57 unimpl # vzext.vf4 31..26=0x12 vm vs2 19..15=4 14..12=0x2 vd 6..0=0x57 # vzext.vf4 vd, vs2, vm # Zero-extend SEW/4 source to SEW destination :vzext.vf4 vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x4 & op1214=0x2 & vd & op0006=0x57 unimpl # vzext.vf8 31..26=0x12 vm vs2 19..15=2 14..12=0x2 vd 6..0=0x57 # vzext.vf8 vd, vs2, vm # Zero-extend SEW/8 source to SEW destination :vzext.vf8 vd, vs2^ vm is op2631=0x12 & vm & vs2 & op1519=0x2 & op1214=0x2 & vd & op0006=0x57 unimpl ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.table.sinc ================================================ attach variables [ r0711 r1519 r2024 r2731 ] [ zero ra sp gp tp t0 t1 t2 s0 s1 a0 a1 a2 a3 a4 a5 a6 a7 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 t3 t4 t5 t6 ]; attach variables [ cd0711NoSp ] [ zero ra _ gp tp t0 t1 t2 s0 s1 a0 a1 a2 a3 a4 a5 a6 a7 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 t3 t4 t5 t6 ]; attach variables [ cr0206 cr0711 cd0711 ] [ zero ra sp gp tp t0 t1 t2 s0 s1 a0 a1 a2 a3 a4 a5 a6 a7 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 t3 t4 t5 t6 ]; attach variables [ cr0204s cr0709s cd0709s ] [ s0 s1 a0 a1 a2 a3 a4 a5 ]; attach variables [ fr0711 fr1519 fr2024 fr2731 ] [ ft0 ft1 ft2 ft3 ft4 ft5 ft6 ft7 fs0 fs1 fa0 fa1 fa2 fa3 fa4 fa5 fa6 fa7 fs2 fs3 fs4 fs5 fs6 fs7 fs8 fs9 fs10 fs11 ft8 ft9 ft10 ft11 ]; attach variables [ cfr0206 cfr0711 ] [ ft0 ft1 ft2 ft3 ft4 ft5 ft6 ft7 fs0 fs1 fa0 fa1 fa2 fa3 fa4 fa5 fa6 fa7 fs2 fs3 fs4 fs5 fs6 fs7 fs8 fs9 fs10 fs11 ft8 ft9 ft10 ft11 ]; attach variables [ cfr0204s cfr0709s ] [ fs0 fs1 fa0 fa1 fa2 fa3 fa4 fa5 ]; attach variables [ v0711 v1519 v2024 ] [ v0 v1 v2 v3 v4 v5 v6 v7 v8 v9 v10 v11 v12 v13 v14 v15 v16 v17 v18 v19 v20 v21 v22 v23 v24 v25 v26 v27 v28 v29 v30 v31 ]; #attach variables [ csr_0 ] # [ ustatus fflags frm fcsr uie utvec csr006 csr007 # vstart vxsat vxrm csr00b csr00c csr00d csr00e vcsr # csr010 csr011 csr012 csr013 csr014 csr015 csr016 csr017 # csr018 csr019 csr01a csr01b csr01c csr01d csr01e csr01f # csr020 csr021 csr022 csr023 csr024 csr025 csr026 csr027 # csr028 csr029 csr02a csr02b csr02c csr02d csr02e csr02f # csr030 csr031 csr032 csr033 csr034 csr035 csr036 csr037 # csr038 csr039 csr03a csr03b csr03c csr03d csr03e csr03f # uscratch uepc ucause utval uip csr045 csr046 csr047 # csr048 csr049 csr04a csr04b csr04c csr04d csr04e csr04f # csr050 csr051 csr052 csr053 csr054 csr055 csr056 csr057 # csr058 csr059 csr05a csr05b csr05c csr05d csr05e csr05f # csr060 csr061 csr062 csr063 csr064 csr065 csr066 csr067 # csr068 csr069 csr06a csr06b csr06c csr06d csr06e csr06f # csr070 csr071 csr072 csr073 csr074 csr075 csr076 csr077 # csr078 csr079 csr07a csr07b csr07c csr07d csr07e csr07f # csr080 csr081 csr082 csr083 csr084 csr085 csr086 csr087 # csr088 csr089 csr08a csr08b csr08c csr08d csr08e csr08f # csr090 csr091 csr092 csr093 csr094 csr095 csr096 csr097 # csr098 csr099 csr09a csr09b csr09c csr09d csr09e csr09f # csr0a0 csr0a1 csr0a2 csr0a3 csr0a4 csr0a5 csr0a6 csr0a7 # csr0a8 csr0a9 csr0aa csr0ab csr0ac csr0ad csr0ae csr0af # csr0b0 csr0b1 csr0b2 csr0b3 csr0b4 csr0b5 csr0b6 csr0b7 # csr0b8 csr0b9 csr0ba csr0bb csr0bc csr0bd csr0be csr0bf # csr0c0 csr0c1 csr0c2 csr0c3 csr0c4 csr0c5 csr0c6 csr0c7 # csr0c8 csr0c9 csr0ca csr0cb csr0cc csr0cd csr0ce csr0cf # csr0d0 csr0d1 csr0d2 csr0d3 csr0d4 csr0d5 csr0d6 csr0d7 # csr0d8 csr0d9 csr0da csr0db csr0dc csr0dd csr0de csr0df # csr0e0 csr0e1 csr0e2 csr0e3 csr0e4 csr0e5 csr0e6 csr0e7 # csr0e8 csr0e9 csr0ea csr0eb csr0ec csr0ed csr0ee csr0ef # csr0f0 csr0f1 csr0f2 csr0f3 csr0f4 csr0f5 csr0f6 csr0f7 # csr0f8 csr0f9 csr0fa csr0fb csr0fc csr0fd csr0fe csr0ff ]; #attach variables [ csr_1 ] # [ sstatus csr101 sedeleg sideleg sie stvec scounteren csr107 # csr108 csr109 csr10a csr10b csr10c csr10d csr10e csr10f # csr110 csr111 csr112 csr113 csr114 csr115 csr116 csr117 # csr118 csr119 csr11a csr11b csr11c csr11d csr11e csr11f # csr120 csr121 csr122 csr123 csr124 csr125 csr126 csr127 # csr128 csr129 csr12a csr12b csr12c csr12d csr12e csr12f # csr130 csr131 csr132 csr133 csr134 csr135 csr136 csr137 # csr138 csr139 csr13a csr13b csr13c csr13d csr13e csr13f # sscratch sepc scause stval sip csr145 csr146 csr147 # csr148 csr149 csr14a csr14b csr14c csr14d csr14e csr14f # csr150 csr151 csr152 csr153 csr154 csr155 csr156 csr157 # csr158 csr159 csr15a csr15b csr15c csr15d csr15e csr15f # csr160 csr161 csr162 csr163 csr164 csr165 csr166 csr167 # csr168 csr169 csr16a csr16b csr16c csr16d csr16e csr16f # csr170 csr171 csr172 csr173 csr174 csr175 csr176 csr177 # csr178 csr179 csr17a csr17b csr17c csr17d csr17e csr17f # satp csr181 csr182 csr183 csr184 csr185 csr186 csr187 # csr188 csr189 csr18a csr18b csr18c csr18d csr18e csr18f # csr190 csr191 csr192 csr193 csr194 csr195 csr196 csr197 # csr198 csr199 csr19a csr19b csr19c csr19d csr19e csr19f # csr1a0 csr1a1 csr1a2 csr1a3 csr1a4 csr1a5 csr1a6 csr1a7 # csr1a8 csr1a9 csr1aa csr1ab csr1ac csr1ad csr1ae csr1af # csr1b0 csr1b1 csr1b2 csr1b3 csr1b4 csr1b5 csr1b6 csr1b7 # csr1b8 csr1b9 csr1ba csr1bb csr1bc csr1bd csr1be csr1bf # csr1c0 csr1c1 csr1c2 csr1c3 csr1c4 csr1c5 csr1c6 csr1c7 # csr1c8 csr1c9 csr1ca csr1cb csr1cc csr1cd csr1ce csr1cf # csr1d0 csr1d1 csr1d2 csr1d3 csr1d4 csr1d5 csr1d6 csr1d7 # csr1d8 csr1d9 csr1da csr1db csr1dc csr1dd csr1de csr1df # csr1e0 csr1e1 csr1e2 csr1e3 csr1e4 csr1e5 csr1e6 csr1e7 # csr1e8 csr1e9 csr1ea csr1eb csr1ec csr1ed csr1ee csr1ef # csr1f0 csr1f1 csr1f2 csr1f3 csr1f4 csr1f5 csr1f6 csr1f7 # csr1f8 csr1f9 csr1fa csr1fb csr1fc csr1fd csr1fe csr1ff ]; #attach variables [ csr_2 ] # [ vsstatus csr201 csr202 csr203 vsie vstvec csr206 csr207 # csr208 csr209 csr20a csr20b csr20c csr20d csr20e csr20f # csr210 csr211 csr212 csr213 csr214 csr215 csr216 csr217 # csr218 csr219 csr21a csr21b csr21c csr21d csr21e csr21f # csr220 csr221 csr222 csr223 csr224 csr225 csr226 csr227 # csr228 csr229 csr22a csr22b csr22c csr22d csr22e csr22f # csr230 csr231 csr232 csr233 csr234 csr235 csr236 csr237 # csr238 csr239 csr23a csr23b csr23c csr23d csr23e csr23f # vsscratch vsepc vscause vstval vsip csr245 csr246 csr247 # csr248 csr249 csr24a csr24b csr24c csr24d csr24e csr24f # csr250 csr251 csr252 csr253 csr254 csr255 csr256 csr257 # csr258 csr259 csr25a csr25b csr25c csr25d csr25e csr25f # csr260 csr261 csr262 csr263 csr264 csr265 csr266 csr267 # csr268 csr269 csr26a csr26b csr26c csr26d csr26e csr26f # csr270 csr271 csr272 csr273 csr274 csr275 csr276 csr277 # csr278 csr279 csr27a csr27b csr27c csr27d csr27e csr27f # vsatp csr281 csr282 csr283 csr284 csr285 csr286 csr287 # csr288 csr289 csr28a csr28b csr28c csr28d csr28e csr28f # csr290 csr291 csr292 csr293 csr294 csr295 csr296 csr297 # csr298 csr299 csr29a csr29b csr29c csr29d csr29e csr29f # csr2a0 csr2a1 csr2a2 csr2a3 csr2a4 csr2a5 csr2a6 csr2a7 # csr2a8 csr2a9 csr2aa csr2ab csr2ac csr2ad csr2ae csr2af # csr2b0 csr2b1 csr2b2 csr2b3 csr2b4 csr2b5 csr2b6 csr2b7 # csr2b8 csr2b9 csr2ba csr2bb csr2bc csr2bd csr2be csr2bf # csr2c0 csr2c1 csr2c2 csr2c3 csr2c4 csr2c5 csr2c6 csr2c7 # csr2c8 csr2c9 csr2ca csr2cb csr2cc csr2cd csr2ce csr2cf # csr2d0 csr2d1 csr2d2 csr2d3 csr2d4 csr2d5 csr2d6 csr2d7 # csr2d8 csr2d9 csr2da csr2db csr2dc csr2dd csr2de csr2df # csr2e0 csr2e1 csr2e2 csr2e3 csr2e4 csr2e5 csr2e6 csr2e7 # csr2e8 csr2e9 csr2ea csr2eb csr2ec csr2ed csr2ee csr2ef # csr2f0 csr2f1 csr2f2 csr2f3 csr2f4 csr2f5 csr2f6 csr2f7 # csr2f8 csr2f9 csr2fa csr2fb csr2fc csr2fd csr2fe csr2ff ]; #attach variables [ csr_3 ] # [ mstatus misa medeleg mideleg mie mtvec mcounteren csr307 # csr308 csr309 csr30a csr30b csr30c csr30d csr30e csr30f # mstatush csr311 csr312 csr313 csr314 csr315 csr316 csr317 # csr318 csr319 csr31a csr31b csr31c csr31d csr31e csr31f # mcountinhibit csr321 csr322 mhpmevent3 mhpmevent4 mhpmevent5 mhpmevent6 mhpmevent7 # mhpmevent8 mhpmevent9 mhpmevent10 mhpmevent11 mhpmevent12 mhpmevent13 mhpmevent14 mhpmevent15 # mhpmevent16 mhpmevent17 mhpmevent18 mhpmevent19 mhpmevent20 mhpmevent21 mhpmevent22 mhpmevent23 # mhpmevent24 mhpmevent25 mhpmevent26 mhpmevent27 mhpmevent28 mhpmevent29 mhpmevent30 mhpmevent31 # mscratch mepc mcause mtval mip csr345 csr346 csr347 # csr348 csr349 mtinst mtval2 csr34c csr34d csr34e csr34f # csr350 csr351 csr352 csr353 csr354 csr355 csr356 csr357 # csr358 csr359 csr35a csr35b csr35c csr35d csr35e csr35f # csr360 csr361 csr362 csr363 csr364 csr365 csr366 csr367 # csr368 csr369 csr36a csr36b csr36c csr36d csr36e csr36f # csr370 csr371 csr372 csr373 csr374 csr375 csr376 csr377 # csr378 csr379 csr37a csr37b csr37c csr37d csr37e csr37f # mbase mbound mibase mibound mdbase mdbound csr386 csr387 # csr388 csr389 csr38a csr38b csr38c csr38d csr38e csr38f # csr390 csr391 csr392 csr393 csr394 csr395 csr396 csr397 # csr398 csr399 csr39a csr39b csr39c csr39d csr39e csr39f # pmpcfg0 pmpcfg1 pmpcfg2 pmpcfg3 pmpcfg4 pmpcfg5 pmpcfg6 pmpcfg7 # pmpcfg8 pmpcfg9 pmpcfg10 pmpcfg11 pmpcfg12 pmpcfg13 pmpcfg14 pmpcfg15 # pmpaddr0 pmpaddr1 pmpaddr2 pmpaddr3 pmpaddr4 pmpaddr5 pmpaddr6 pmpaddr7 # pmpaddr8 pmpaddr9 pmpaddr10 pmpaddr11 pmpaddr12 pmpaddr13 pmpaddr14 pmpaddr15 # pmpaddr16 pmpaddr17 pmpaddr18 pmpaddr19 pmpaddr20 pmpaddr21 pmpaddr22 pmpaddr23 # pmpaddr24 pmpaddr25 pmpaddr26 pmpaddr27 pmpaddr28 pmpaddr29 pmpaddr30 pmpaddr31 # pmpaddr32 pmpaddr33 pmpaddr34 pmpaddr35 pmpaddr36 pmpaddr37 pmpaddr38 pmpaddr39 # pmpaddr40 pmpaddr41 pmpaddr42 pmpaddr43 pmpaddr44 pmpaddr45 pmpaddr46 pmpaddr47 # pmpaddr48 pmpaddr49 pmpaddr50 pmpaddr51 pmpaddr52 pmpaddr53 pmpaddr54 pmpaddr55 # pmpaddr56 pmpaddr57 pmpaddr58 pmpaddr59 pmpaddr60 pmpaddr61 pmpaddr62 pmpaddr63 # csr3f0 csr3f1 csr3f2 csr3f3 csr3f4 csr3f5 csr3f6 csr3f7 # csr3f8 csr3f9 csr3fa csr3fb csr3fc csr3fd csr3fe csr3ff ]; #attach variables [ csr_4 ] # [ csr400 csr401 csr402 csr403 csr404 csr405 csr406 csr407 # csr408 csr409 csr40a csr40b csr40c csr40d csr40e csr40f # csr410 csr411 csr412 csr413 csr414 csr415 csr416 csr417 # csr418 csr419 csr41a csr41b csr41c csr41d csr41e csr41f # csr420 csr421 csr422 csr423 csr424 csr425 csr426 csr427 # csr428 csr429 csr42a csr42b csr42c csr42d csr42e csr42f # csr430 csr431 csr432 csr433 csr434 csr435 csr436 csr437 # csr438 csr439 csr43a csr43b csr43c csr43d csr43e csr43f # csr440 csr441 csr442 csr443 csr444 csr445 csr446 csr447 # csr448 csr449 csr44a csr44b csr44c csr44d csr44e csr44f # csr450 csr451 csr452 csr453 csr454 csr455 csr456 csr457 # csr458 csr459 csr45a csr45b csr45c csr45d csr45e csr45f # csr460 csr461 csr462 csr463 csr464 csr465 csr466 csr467 # csr468 csr469 csr46a csr46b csr46c csr46d csr46e csr46f # csr470 csr471 csr472 csr473 csr474 csr475 csr476 csr477 # csr478 csr479 csr47a csr47b csr47c csr47d csr47e csr47f # csr480 csr481 csr482 csr483 csr484 csr485 csr486 csr487 # csr488 csr489 csr48a csr48b csr48c csr48d csr48e csr48f # csr490 csr491 csr492 csr493 csr494 csr495 csr496 csr497 # csr498 csr499 csr49a csr49b csr49c csr49d csr49e csr49f # csr4a0 csr4a1 csr4a2 csr4a3 csr4a4 csr4a5 csr4a6 csr4a7 # csr4a8 csr4a9 csr4aa csr4ab csr4ac csr4ad csr4ae csr4af # csr4b0 csr4b1 csr4b2 csr4b3 csr4b4 csr4b5 csr4b6 csr4b7 # csr4b8 csr4b9 csr4ba csr4bb csr4bc csr4bd csr4be csr4bf # csr4c0 csr4c1 csr4c2 csr4c3 csr4c4 csr4c5 csr4c6 csr4c7 # csr4c8 csr4c9 csr4ca csr4cb csr4cc csr4cd csr4ce csr4cf # csr4d0 csr4d1 csr4d2 csr4d3 csr4d4 csr4d5 csr4d6 csr4d7 # csr4d8 csr4d9 csr4da csr4db csr4dc csr4dd csr4de csr4df # csr4e0 csr4e1 csr4e2 csr4e3 csr4e4 csr4e5 csr4e6 csr4e7 # csr4e8 csr4e9 csr4ea csr4eb csr4ec csr4ed csr4ee csr4ef # csr4f0 csr4f1 csr4f2 csr4f3 csr4f4 csr4f5 csr4f6 csr4f7 # csr4f8 csr4f9 csr4fa csr4fb csr4fc csr4fd csr4fe csr4ff ]; #attach variables [ csr_50 ] # [ csr500 csr501 csr502 csr503 csr504 csr505 csr506 csr507 # csr508 csr509 csr50a csr50b csr50c csr50d csr50e csr50f # csr510 csr511 csr512 csr513 csr514 csr515 csr516 csr517 # csr518 csr519 csr51a csr51b csr51c csr51d csr51e csr51f # csr520 csr521 csr522 csr523 csr524 csr525 csr526 csr527 # csr528 csr529 csr52a csr52b csr52c csr52d csr52e csr52f # csr530 csr531 csr532 csr533 csr534 csr535 csr536 csr537 # csr538 csr539 csr53a csr53b csr53c csr53d csr53e csr53f # csr540 csr541 csr542 csr543 csr544 csr545 csr546 csr547 # csr548 csr549 csr54a csr54b csr54c csr54d csr54e csr54f # csr550 csr551 csr552 csr553 csr554 csr555 csr556 csr557 # csr558 csr559 csr55a csr55b csr55c csr55d csr55e csr55f # csr560 csr561 csr562 csr563 csr564 csr565 csr566 csr567 # csr568 csr569 csr56a csr56b csr56c csr56d csr56e csr56f # csr570 csr571 csr572 csr573 csr574 csr575 csr576 csr577 # csr578 csr579 csr57a csr57b csr57c csr57d csr57e csr57f ]; #attach variables [ csr_58 ] # [ csr580 csr581 csr582 csr583 csr584 csr585 csr586 csr587 # csr588 csr589 csr58a csr58b csr58c csr58d csr58e csr58f # csr590 csr591 csr592 csr593 csr594 csr595 csr596 csr597 # csr598 csr599 csr59a csr59b csr59c csr59d csr59e csr59f # csr5a0 csr5a1 csr5a2 csr5a3 csr5a4 csr5a5 csr5a6 csr5a7 # scontext csr5a9 csr5aa csr5ab csr5ac csr5ad csr5ae csr5af # csr5b0 csr5b1 csr5b2 csr5b3 csr5b4 csr5b5 csr5b6 csr5b7 # csr5b8 csr5b9 csr5ba csr5bb csr5bc csr5bd csr5be csr5bf ]; #attach variables [ csr_5C ] # [ csr5c0 csr5c1 csr5c2 csr5c3 csr5c4 csr5c5 csr5c6 csr5c7 # csr5c8 csr5c9 csr5ca csr5cb csr5cc csr5cd csr5ce csr5cf # csr5d0 csr5d1 csr5d2 csr5d3 csr5d4 csr5d5 csr5d6 csr5d7 # csr5d8 csr5d9 csr5da csr5db csr5dc csr5dd csr5de csr5df # csr5e0 csr5e1 csr5e2 csr5e3 csr5e4 csr5e5 csr5e6 csr5e7 # csr5e8 csr5e9 csr5ea csr5eb csr5ec csr5ed csr5ee csr5ef # csr5f0 csr5f1 csr5f2 csr5f3 csr5f4 csr5f5 csr5f6 csr5f7 # csr5f8 csr5f9 csr5fa csr5fb csr5fc csr5fd csr5fe csr5ff ]; #attach variables [ csr_60 ] # [ hstatus csr601 hedeleg hideleg hie htimedelta hcounteren hgeie # csr608 csr609 csr60a csr60b csr60c csr60d csr60e csr60f # csr610 csr611 csr612 csr613 csr614 htimedeltah csr616 csr617 # csr618 csr619 csr61a csr61b csr61c csr61d csr61e csr61f # csr620 csr621 csr622 csr623 csr624 csr625 csr626 csr627 # csr628 csr629 csr62a csr62b csr62c csr62d csr62e csr62f # csr630 csr631 csr632 csr633 csr634 csr635 csr636 csr637 # csr638 csr639 csr63a csr63b csr63c csr63d csr63e csr63f # csr640 csr641 csr642 htval hip hvip csr646 csr647 # csr648 csr649 htinst csr64b csr64c csr64d csr64e csr64f # csr650 csr651 csr652 csr653 csr654 csr655 csr656 csr657 # csr658 csr659 csr65a csr65b csr65c csr65d csr65e csr65f # csr660 csr661 csr662 csr663 csr664 csr665 csr666 csr667 # csr668 csr669 csr66a csr66b csr66c csr66d csr66e csr66f # csr670 csr671 csr672 csr673 csr674 csr675 csr676 csr677 # csr678 csr679 csr67a csr67b csr67c csr67d csr67e csr67f ]; #attach variables [ csr_68 ] # [ hgatp csr681 csr682 csr683 csr684 csr685 csr686 csr687 # csr688 csr689 csr68a csr68b csr68c csr68d csr68e csr68f # csr690 csr691 csr692 csr693 csr694 csr695 csr696 csr697 # csr698 csr699 csr69a csr69b csr69c csr69d csr69e csr69f # csr6a0 csr6a1 csr6a2 csr6a3 csr6a4 csr6a5 csr6a6 csr6a7 # hcontext csr6a9 csr6aa csr6ab csr6ac csr6ad csr6ae csr6af # csr6b0 csr6b1 csr6b2 csr6b3 csr6b4 csr6b5 csr6b6 csr6b7 # csr6b8 csr6b9 csr6ba csr6bb csr6bc csr6bd csr6be csr6bf ]; #attach variables [ csr_6C ] # [ csr6c0 csr6c1 csr6c2 csr6c3 csr6c4 csr6c5 csr6c6 csr6c7 # csr6c8 csr6c9 csr6ca csr6cb csr6cc csr6cd csr6ce csr6cf # csr6d0 csr6d1 csr6d2 csr6d3 csr6d4 csr6d5 csr6d6 csr6d7 # csr6d8 csr6d9 csr6da csr6db csr6dc csr6dd csr6de csr6df # csr6e0 csr6e1 csr6e2 csr6e3 csr6e4 csr6e5 csr6e6 csr6e7 # csr6e8 csr6e9 csr6ea csr6eb csr6ec csr6ed csr6ee csr6ef # csr6f0 csr6f1 csr6f2 csr6f3 csr6f4 csr6f5 csr6f6 csr6f7 # csr6f8 csr6f9 csr6fa csr6fb csr6fc csr6fd csr6fe csr6ff ]; #attach variables [ csr_70 ] # [ csr700 csr701 csr702 csr703 csr704 csr705 csr706 csr707 # csr708 csr709 csr70a csr70b csr70c csr70d csr70e csr70f # csr710 csr711 csr712 csr713 csr714 csr715 csr716 csr717 # csr718 csr719 csr71a csr71b csr71c csr71d csr71e csr71f # csr720 csr721 csr722 csr723 csr724 csr725 csr726 csr727 # csr728 csr729 csr72a csr72b csr72c csr72d csr72e csr72f # csr730 csr731 csr732 csr733 csr734 csr735 csr736 csr737 # csr738 csr739 csr73a csr73b csr73c csr73d csr73e csr73f # csr740 csr741 csr742 csr743 csr744 csr745 csr746 csr747 # csr748 csr749 csr74a csr74b csr74c csr74d csr74e csr74f # csr750 csr751 csr752 csr753 csr754 csr755 csr756 csr757 # csr758 csr759 csr75a csr75b csr75c csr75d csr75e csr75f # csr760 csr761 csr762 csr763 csr764 csr765 csr766 csr767 # csr768 csr769 csr76a csr76b csr76c csr76d csr76e csr76f # csr770 csr771 csr772 csr773 csr774 csr775 csr776 csr777 # csr778 csr779 csr77a csr77b csr77c csr77d csr77e csr77f ]; #attach variables [ csr_78 ] # [ csr780 csr781 csr782 csr783 csr784 csr785 csr786 csr787 # csr788 csr789 csr78a csr78b csr78c csr78d csr78e csr78f # csr790 csr791 csr792 csr793 csr794 csr795 csr796 csr797 # csr798 csr799 csr79a csr79b csr79c csr79d csr79e csr79f ]; #attach variables [ csr_7A ] # [ tselect tdata1 tdata2 tdata3 csr7a4 csr7a5 csr7a6 csr7a7 # mcontext csr7a9 csr7aa csr7ab csr7ac csr7ad csr7ae csr7af ]; #attach variables [ csr_7B ] # [ dcsr dpc dscratch0 dscratch1 csr7b4 csr7b5 csr7b6 csr7b7 # csr7b8 csr7b9 csr7ba csr7bb csr7bc csr7bd csr7be csr7bf ]; #attach variables [ csr_7C ] # [ csr7c0 csr7c1 csr7c2 csr7c3 csr7c4 csr7c5 csr7c6 csr7c7 # csr7c8 csr7c9 csr7ca csr7cb csr7cc csr7cd csr7ce csr7cf # csr7d0 csr7d1 csr7d2 csr7d3 csr7d4 csr7d5 csr7d6 csr7d7 # csr7d8 csr7d9 csr7da csr7db csr7dc csr7dd csr7de csr7df # csr7e0 csr7e1 csr7e2 csr7e3 csr7e4 csr7e5 csr7e6 csr7e7 # csr7e8 csr7e9 csr7ea csr7eb csr7ec csr7ed csr7ee csr7ef # csr7f0 csr7f1 csr7f2 csr7f3 csr7f4 csr7f5 csr7f6 csr7f7 # csr7f8 csr7f9 csr7fa csr7fb csr7fc csr7fd csr7fe csr7ff ]; #attach variables [ csr_8 ] # [ csr800 csr801 csr802 csr803 csr804 csr805 csr806 csr807 # csr808 csr809 csr80a csr80b csr80c csr80d csr80e csr80f # csr810 csr811 csr812 csr813 csr814 csr815 csr816 csr817 # csr818 csr819 csr81a csr81b csr81c csr81d csr81e csr81f # csr820 csr821 csr822 csr823 csr824 csr825 csr826 csr827 # csr828 csr829 csr82a csr82b csr82c csr82d csr82e csr82f # csr830 csr831 csr832 csr833 csr834 csr835 csr836 csr837 # csr838 csr839 csr83a csr83b csr83c csr83d csr83e csr83f # csr840 csr841 csr842 csr843 csr844 csr845 csr846 csr847 # csr848 csr849 csr84a csr84b csr84c csr84d csr84e csr84f # csr850 csr851 csr852 csr853 csr854 csr855 csr856 csr857 # csr858 csr859 csr85a csr85b csr85c csr85d csr85e csr85f # csr860 csr861 csr862 csr863 csr864 csr865 csr866 csr867 # csr868 csr869 csr86a csr86b csr86c csr86d csr86e csr86f # csr870 csr871 csr872 csr873 csr874 csr875 csr876 csr877 # csr878 csr879 csr87a csr87b csr87c csr87d csr87e csr87f # csr880 csr881 csr882 csr883 csr884 csr885 csr886 csr887 # csr888 csr889 csr88a csr88b csr88c csr88d csr88e csr88f # csr890 csr891 csr892 csr893 csr894 csr895 csr896 csr897 # csr898 csr899 csr89a csr89b csr89c csr89d csr89e csr89f # csr8a0 csr8a1 csr8a2 csr8a3 csr8a4 csr8a5 csr8a6 csr8a7 # csr8a8 csr8a9 csr8aa csr8ab csr8ac csr8ad csr8ae csr8af # csr8b0 csr8b1 csr8b2 csr8b3 csr8b4 csr8b5 csr8b6 csr8b7 # csr8b8 csr8b9 csr8ba csr8bb csr8bc csr8bd csr8be csr8bf # csr8c0 csr8c1 csr8c2 csr8c3 csr8c4 csr8c5 csr8c6 csr8c7 # csr8c8 csr8c9 csr8ca csr8cb csr8cc csr8cd csr8ce csr8cf # csr8d0 csr8d1 csr8d2 csr8d3 csr8d4 csr8d5 csr8d6 csr8d7 # csr8d8 csr8d9 csr8da csr8db csr8dc csr8dd csr8de csr8df # csr8e0 csr8e1 csr8e2 csr8e3 csr8e4 csr8e5 csr8e6 csr8e7 # csr8e8 csr8e9 csr8ea csr8eb csr8ec csr8ed csr8ee csr8ef # csr8f0 csr8f1 csr8f2 csr8f3 csr8f4 csr8f5 csr8f6 csr8f7 # csr8f8 csr8f9 csr8fa csr8fb csr8fc csr8fd csr8fe csr8ff ]; #attach variables [ csr_90 ] # [ csr900 csr901 csr902 csr903 csr904 csr905 csr906 csr907 # csr908 csr909 csr90a csr90b csr90c csr90d csr90e csr90f # csr910 csr911 csr912 csr913 csr914 csr915 csr916 csr917 # csr918 csr919 csr91a csr91b csr91c csr91d csr91e csr91f # csr920 csr921 csr922 csr923 csr924 csr925 csr926 csr927 # csr928 csr929 csr92a csr92b csr92c csr92d csr92e csr92f # csr930 csr931 csr932 csr933 csr934 csr935 csr936 csr937 # csr938 csr939 csr93a csr93b csr93c csr93d csr93e csr93f # csr940 csr941 csr942 csr943 csr944 csr945 csr946 csr947 # csr948 csr949 csr94a csr94b csr94c csr94d csr94e csr94f # csr950 csr951 csr952 csr953 csr954 csr955 csr956 csr957 # csr958 csr959 csr95a csr95b csr95c csr95d csr95e csr95f # csr960 csr961 csr962 csr963 csr964 csr965 csr966 csr967 # csr968 csr969 csr96a csr96b csr96c csr96d csr96e csr96f # csr970 csr971 csr972 csr973 csr974 csr975 csr976 csr977 # csr978 csr979 csr97a csr97b csr97c csr97d csr97e csr97f ]; #attach variables [ csr_98 ] # [ csr980 csr981 csr982 csr983 csr984 csr985 csr986 csr987 # csr988 csr989 csr98a csr98b csr98c csr98d csr98e csr98f # csr990 csr991 csr992 csr993 csr994 csr995 csr996 csr997 # csr998 csr999 csr99a csr99b csr99c csr99d csr99e csr99f # csr9a0 csr9a1 csr9a2 csr9a3 csr9a4 csr9a5 csr9a6 csr9a7 # csr9a8 csr9a9 csr9aa csr9ab csr9ac csr9ad csr9ae csr9af # csr9b0 csr9b1 csr9b2 csr9b3 csr9b4 csr9b5 csr9b6 csr9b7 # csr9b8 csr9b9 csr9ba csr9bb csr9bc csr9bd csr9be csr9bf ]; #attach variables [ csr_9C ] # [ csr9c0 csr9c1 csr9c2 csr9c3 csr9c4 csr9c5 csr9c6 csr9c7 # csr9c8 csr9c9 csr9ca csr9cb csr9cc csr9cd csr9ce csr9cf # csr9d0 csr9d1 csr9d2 csr9d3 csr9d4 csr9d5 csr9d6 csr9d7 # csr9d8 csr9d9 csr9da csr9db csr9dc csr9dd csr9de csr9df # csr9e0 csr9e1 csr9e2 csr9e3 csr9e4 csr9e5 csr9e6 csr9e7 # csr9e8 csr9e9 csr9ea csr9eb csr9ec csr9ed csr9ee csr9ef # csr9f0 csr9f1 csr9f2 csr9f3 csr9f4 csr9f5 csr9f6 csr9f7 # csr9f8 csr9f9 csr9fa csr9fb csr9fc csr9fd csr9fe csr9ff ]; #attach variables [ csr_A0 ] # [ csra00 csra01 csra02 csra03 csra04 csra05 csra06 csra07 # csra08 csra09 csra0a csra0b csra0c csra0d csra0e csra0f # csra10 csra11 csra12 csra13 csra14 csra15 csra16 csra17 # csra18 csra19 csra1a csra1b csra1c csra1d csra1e csra1f # csra20 csra21 csra22 csra23 csra24 csra25 csra26 csra27 # csra28 csra29 csra2a csra2b csra2c csra2d csra2e csra2f # csra30 csra31 csra32 csra33 csra34 csra35 csra36 csra37 # csra38 csra39 csra3a csra3b csra3c csra3d csra3e csra3f # csra40 csra41 csra42 csra43 csra44 csra45 csra46 csra47 # csra48 csra49 csra4a csra4b csra4c csra4d csra4e csra4f # csra50 csra51 csra52 csra53 csra54 csra55 csra56 csra57 # csra58 csra59 csra5a csra5b csra5c csra5d csra5e csra5f # csra60 csra61 csra62 csra63 csra64 csra65 csra66 csra67 # csra68 csra69 csra6a csra6b csra6c csra6d csra6e csra6f # csra70 csra71 csra72 csra73 csra74 csra75 csra76 csra77 # csra78 csra79 csra7a csra7b csra7c csra7d csra7e csra7f ]; #attach variables [ csr_A8 ] # [ csra80 csra81 csra82 csra83 csra84 csra85 csra86 csra87 # csra88 csra89 csra8a csra8b csra8c csra8d csra8e csra8f # csra90 csra91 csra92 csra93 csra94 csra95 csra96 csra97 # csra98 csra99 csra9a csra9b csra9c csra9d csra9e csra9f # csraa0 csraa1 csraa2 csraa3 csraa4 csraa5 csraa6 csraa7 # csraa8 csraa9 csraaa csraab csraac csraad csraae csraaf # csrab0 csrab1 csrab2 csrab3 csrab4 csrab5 csrab6 csrab7 # csrab8 csrab9 csraba csrabb csrabc csrabd csrabe csrabf ]; #attach variables [ csr_AC ] # [ csrac0 csrac1 csrac2 csrac3 csrac4 csrac5 csrac6 csrac7 # csrac8 csrac9 csraca csracb csracc csracd csrace csracf # csrad0 csrad1 csrad2 csrad3 csrad4 csrad5 csrad6 csrad7 # csrad8 csrad9 csrada csradb csradc csradd csrade csradf # csrae0 csrae1 csrae2 csrae3 csrae4 csrae5 csrae6 csrae7 # csrae8 csrae9 csraea csraeb csraec csraed csraee csraef # csraf0 csraf1 csraf2 csraf3 csraf4 csraf5 csraf6 csraf7 # csraf8 csraf9 csrafa csrafb csrafc csrafd csrafe csraff ]; #attach variables [ csr_B0 ] # [ mcycle csrb01 minstret mhpmcounter3 mhpmcounter4 mhpmcounter5 mhpmcounter6 mhpmcounter7 # mhpmcounter8 mhpmcounter9 mhpmcounter10 mhpmcounter11 mhpmcounter12 mhpmcounter13 mhpmcounter14 mhpmcounter15 # mhpmcounter16 mhpmcounter17 mhpmcounter18 mhpmcounter19 mhpmcounter20 mhpmcounter21 mhpmcounter22 mhpmcounter23 # mhpmcounter24 mhpmcounter25 mhpmcounter26 mhpmcounter27 mhpmcounter28 mhpmcounter29 mhpmcounter30 mhpmcounter31 # csrb20 csrb21 csrb22 csrb23 csrb24 csrb25 csrb26 csrb27 # csrb28 csrb29 csrb2a csrb2b csrb2c csrb2d csrb2e csrb2f # csrb30 csrb31 csrb32 csrb33 csrb34 csrb35 csrb36 csrb37 # csrb38 csrb39 csrb3a csrb3b csrb3c csrb3d csrb3e csrb3f # csrb40 csrb41 csrb42 csrb43 csrb44 csrb45 csrb46 csrb47 # csrb48 csrb49 csrb4a csrb4b csrb4c csrb4d csrb4e csrb4f # csrb50 csrb51 csrb52 csrb53 csrb54 csrb55 csrb56 csrb57 # csrb58 csrb59 csrb5a csrb5b csrb5c csrb5d csrb5e csrb5f # csrb60 csrb61 csrb62 csrb63 csrb64 csrb65 csrb66 csrb67 # csrb68 csrb69 csrb6a csrb6b csrb6c csrb6d csrb6e csrb6f # csrb70 csrb71 csrb72 csrb73 csrb74 csrb75 csrb76 csrb77 # csrb78 csrb79 csrb7a csrb7b csrb7c csrb7d csrb7e csrb7f ]; #attach variables [ csr_B8 ] # [ mcycleh csrb81 minstreth mhpmcounter3h mhpmcounter4h mhpmcounter5h mhpmcounter6h mhpmcounter7h # mhpmcounter8h mhpmcounter9h mhpmcounter10h mhpmcounter11h mhpmcounter12h mhpmcounter13h mhpmcounter14h mhpmcounter15h # mhpmcounter16h mhpmcounter17h mhpmcounter18h mhpmcounter19h mhpmcounter20h mhpmcounter21h mhpmcounter22h mhpmcounter23h # mhpmcounter24h mhpmcounter25h mhpmcounter26h mhpmcounter27h mhpmcounter28h mhpmcounter29h mhpmcounter30h mhpmcounter31h # csrba0 csrba1 csrba2 csrba3 csrba4 csrba5 csrba6 csrba7 # csrba8 csrba9 csrbaa csrbab csrbac csrbad csrbae csrbaf # csrbb0 csrbb1 csrbb2 csrbb3 csrbb4 csrbb5 csrbb6 csrbb7 # csrbb8 csrbb9 csrbba csrbbb csrbbc csrbbd csrbbe csrbbf ]; #attach variables [ csr_BC ] # [ csrbc0 csrbc1 csrbc2 csrbc3 csrbc4 csrbc5 csrbc6 csrbc7 # csrbc8 csrbc9 csrbca csrbcb csrbcc csrbcd csrbce csrbcf # csrbd0 csrbd1 csrbd2 csrbd3 csrbd4 csrbd5 csrbd6 csrbd7 # csrbd8 csrbd9 csrbda csrbdb csrbdc csrbdd csrbde csrbdf # csrbe0 csrbe1 csrbe2 csrbe3 csrbe4 csrbe5 csrbe6 csrbe7 # csrbe8 csrbe9 csrbea csrbeb csrbec csrbed csrbee csrbef # csrbf0 csrbf1 csrbf2 csrbf3 csrbf4 csrbf5 csrbf6 csrbf7 # csrbf8 csrbf9 csrbfa csrbfb csrbfc csrbfd csrbfe csrbff ]; #attach variables [ csr_C0 ] # [ cycle time instret hpmcounter3 hpmcounter4 hpmcounter5 hpmcounter6 hpmcounter7 # hpmcounter8 hpmcounter9 hpmcounter10 hpmcounter11 hpmcounter12 hpmcounter13 hpmcounter14 hpmcounter15 # hpmcounter16 hpmcounter17 hpmcounter18 hpmcounter19 hpmcounter20 hpmcounter21 hpmcounter22 hpmcounter23 # hpmcounter24 hpmcounter25 hpmcounter26 hpmcounter27 hpmcounter28 hpmcounter29 hpmcounter30 hpmcounter31 # vl vtype vlenb csrc23 csrc24 csrc25 csrc26 csrc27 # csrc28 csrc29 csrc2a csrc2b csrc2c csrc2d csrc2e csrc2f # csrc30 csrc31 csrc32 csrc33 csrc34 csrc35 csrc36 csrc37 # csrc38 csrc39 csrc3a csrc3b csrc3c csrc3d csrc3e csrc3f # csrc40 csrc41 csrc42 csrc43 csrc44 csrc45 csrc46 csrc47 # csrc48 csrc49 csrc4a csrc4b csrc4c csrc4d csrc4e csrc4f # csrc50 csrc51 csrc52 csrc53 csrc54 csrc55 csrc56 csrc57 # csrc58 csrc59 csrc5a csrc5b csrc5c csrc5d csrc5e csrc5f # csrc60 csrc61 csrc62 csrc63 csrc64 csrc65 csrc66 csrc67 # csrc68 csrc69 csrc6a csrc6b csrc6c csrc6d csrc6e csrc6f # csrc70 csrc71 csrc72 csrc73 csrc74 csrc75 csrc76 csrc77 # csrc78 csrc79 csrc7a csrc7b csrc7c csrc7d csrc7e csrc7f ]; #attach variables [ csr_C8 ] # [ cycleh timeh instreth hpmcounter3h hpmcounter4h hpmcounter5h hpmcounter6h hpmcounter7h # hpmcounter8h hpmcounter9h hpmcounter10h hpmcounter11h hpmcounter12h hpmcounter13h hpmcounter14h hpmcounter15h # hpmcounter16h hpmcounter17h hpmcounter18h hpmcounter19h hpmcounter20h hpmcounter21h hpmcounter22h hpmcounter23h # hpmcounter24h hpmcounter25h hpmcounter26h hpmcounter27h hpmcounter28h hpmcounter29h hpmcounter30h hpmcounter31h # csrca0 csrca1 csrca2 csrca3 csrca4 csrca5 csrca6 csrca7 # csrca8 csrca9 csrcaa csrcab csrcac csrcad csrcae csrcaf # csrcb0 csrcb1 csrcb2 csrcb3 csrcb4 csrcb5 csrcb6 csrcb7 # csrcb8 csrcb9 csrcba csrcbb csrcbc csrcbd csrcbe csrcbf ]; #attach variables [ csr_CC ] # [ csrcc0 csrcc1 csrcc2 csrcc3 csrcc4 csrcc5 csrcc6 csrcc7 # csrcc8 csrcc9 csrcca csrccb csrccc csrccd csrcce csrccf # csrcd0 csrcd1 csrcd2 csrcd3 csrcd4 csrcd5 csrcd6 csrcd7 # csrcd8 csrcd9 csrcda csrcdb csrcdc csrcdd csrcde csrcdf # csrce0 csrce1 csrce2 csrce3 csrce4 csrce5 csrce6 csrce7 # csrce8 csrce9 csrcea csrceb csrcec csrced csrcee csrcef # csrcf0 csrcf1 csrcf2 csrcf3 csrcf4 csrcf5 csrcf6 csrcf7 # csrcf8 csrcf9 csrcfa csrcfb csrcfc csrcfd csrcfe csrcff ]; #attach variables [ csr_D0 ] # [ csrd00 csrd01 csrd02 csrd03 csrd04 csrd05 csrd06 csrd07 # csrd08 csrd09 csrd0a csrd0b csrd0c csrd0d csrd0e csrd0f # csrd10 csrd11 csrd12 csrd13 csrd14 csrd15 csrd16 csrd17 # csrd18 csrd19 csrd1a csrd1b csrd1c csrd1d csrd1e csrd1f # csrd20 csrd21 csrd22 csrd23 csrd24 csrd25 csrd26 csrd27 # csrd28 csrd29 csrd2a csrd2b csrd2c csrd2d csrd2e csrd2f # csrd30 csrd31 csrd32 csrd33 csrd34 csrd35 csrd36 csrd37 # csrd38 csrd39 csrd3a csrd3b csrd3c csrd3d csrd3e csrd3f # csrd40 csrd41 csrd42 csrd43 csrd44 csrd45 csrd46 csrd47 # csrd48 csrd49 csrd4a csrd4b csrd4c csrd4d csrd4e csrd4f # csrd50 csrd51 csrd52 csrd53 csrd54 csrd55 csrd56 csrd57 # csrd58 csrd59 csrd5a csrd5b csrd5c csrd5d csrd5e csrd5f # csrd60 csrd61 csrd62 csrd63 csrd64 csrd65 csrd66 csrd67 # csrd68 csrd69 csrd6a csrd6b csrd6c csrd6d csrd6e csrd6f # csrd70 csrd71 csrd72 csrd73 csrd74 csrd75 csrd76 csrd77 # csrd78 csrd79 csrd7a csrd7b csrd7c csrd7d csrd7e csrd7f ]; #attach variables [ csr_D8 ] # [ csrd80 csrd81 csrd82 csrd83 csrd84 csrd85 csrd86 csrd87 # csrd88 csrd89 csrd8a csrd8b csrd8c csrd8d csrd8e csrd8f # csrd90 csrd91 csrd92 csrd93 csrd94 csrd95 csrd96 csrd97 # csrd98 csrd99 csrd9a csrd9b csrd9c csrd9d csrd9e csrd9f # csrda0 csrda1 csrda2 csrda3 csrda4 csrda5 csrda6 csrda7 # csrda8 csrda9 csrdaa csrdab csrdac csrdad csrdae csrdaf # csrdb0 csrdb1 csrdb2 csrdb3 csrdb4 csrdb5 csrdb6 csrdb7 # csrdb8 csrdb9 csrdba csrdbb csrdbc csrdbd csrdbe csrdbf ]; #attach variables [ csr_DC ] # [ csrdc0 csrdc1 csrdc2 csrdc3 csrdc4 csrdc5 csrdc6 csrdc7 # csrdc8 csrdc9 csrdca csrdcb csrdcc csrdcd csrdce csrdcf # csrdd0 csrdd1 csrdd2 csrdd3 csrdd4 csrdd5 csrdd6 csrdd7 # csrdd8 csrdd9 csrdda csrddb csrddc csrddd csrdde csrddf # csrde0 csrde1 csrde2 csrde3 csrde4 csrde5 csrde6 csrde7 # csrde8 csrde9 csrdea csrdeb csrdec csrded csrdee csrdef # csrdf0 csrdf1 csrdf2 csrdf3 csrdf4 csrdf5 csrdf6 csrdf7 # csrdf8 csrdf9 csrdfa csrdfb csrdfc csrdfd csrdfe csrdff ]; #attach variables [ csr_E0 ] # [ csre00 csre01 csre02 csre03 csre04 csre05 csre06 csre07 # csre08 csre09 csre0a csre0b csre0c csre0d csre0e csre0f # csre10 csre11 hgeip csre13 csre14 csre15 csre16 csre17 # csre18 csre19 csre1a csre1b csre1c csre1d csre1e csre1f # csre20 csre21 csre22 csre23 csre24 csre25 csre26 csre27 # csre28 csre29 csre2a csre2b csre2c csre2d csre2e csre2f # csre30 csre31 csre32 csre33 csre34 csre35 csre36 csre37 # csre38 csre39 csre3a csre3b csre3c csre3d csre3e csre3f # csre40 csre41 csre42 csre43 csre44 csre45 csre46 csre47 # csre48 csre49 csre4a csre4b csre4c csre4d csre4e csre4f # csre50 csre51 csre52 csre53 csre54 csre55 csre56 csre57 # csre58 csre59 csre5a csre5b csre5c csre5d csre5e csre5f # csre60 csre61 csre62 csre63 csre64 csre65 csre66 csre67 # csre68 csre69 csre6a csre6b csre6c csre6d csre6e csre6f # csre70 csre71 csre72 csre73 csre74 csre75 csre76 csre77 # csre78 csre79 csre7a csre7b csre7c csre7d csre7e csre7f ]; #attach variables [ csr_E8 ] # [ csre80 csre81 csre82 csre83 csre84 csre85 csre86 csre87 # csre88 csre89 csre8a csre8b csre8c csre8d csre8e csre8f # csre90 csre91 csre92 csre93 csre94 csre95 csre96 csre97 # csre98 csre99 csre9a csre9b csre9c csre9d csre9e csre9f # csrea0 csrea1 csrea2 csrea3 csrea4 csrea5 csrea6 csrea7 # csrea8 csrea9 csreaa csreab csreac csread csreae csreaf # csreb0 csreb1 csreb2 csreb3 csreb4 csreb5 csreb6 csreb7 # csreb8 csreb9 csreba csrebb csrebc csrebd csrebe csrebf ]; #attach variables [ csr_EC ] # [ csrec0 csrec1 csrec2 csrec3 csrec4 csrec5 csrec6 csrec7 # csrec8 csrec9 csreca csrecb csrecc csrecd csrece csrecf # csred0 csred1 csred2 csred3 csred4 csred5 csred6 csred7 # csred8 csred9 csreda csredb csredc csredd csrede csredf # csree0 csree1 csree2 csree3 csree4 csree5 csree6 csree7 # csree8 csree9 csreea csreeb csreec csreed csreee csreef # csref0 csref1 csref2 csref3 csref4 csref5 csref6 csref7 # csref8 csref9 csrefa csrefb csrefc csrefd csrefe csreff ]; #attach variables [ csr_F0 ] # [ csrf00 csrf01 csrf02 csrf03 csrf04 csrf05 csrf06 csrf07 # csrf08 csrf09 csrf0a csrf0b csrf0c csrf0d csrf0e csrf0f # csrf10 mvendorid marchid mimpid mhartid csrf15 csrf16 csrf17 # csrf18 csrf19 csrf1a csrf1b csrf1c csrf1d csrf1e csrf1f # csrf20 csrf21 csrf22 csrf23 csrf24 csrf25 csrf26 csrf27 # csrf28 csrf29 csrf2a csrf2b csrf2c csrf2d csrf2e csrf2f # csrf30 csrf31 csrf32 csrf33 csrf34 csrf35 csrf36 csrf37 # csrf38 csrf39 csrf3a csrf3b csrf3c csrf3d csrf3e csrf3f # csrf40 csrf41 csrf42 csrf43 csrf44 csrf45 csrf46 csrf47 # csrf48 csrf49 csrf4a csrf4b csrf4c csrf4d csrf4e csrf4f # csrf50 csrf51 csrf52 csrf53 csrf54 csrf55 csrf56 csrf57 # csrf58 csrf59 csrf5a csrf5b csrf5c csrf5d csrf5e csrf5f # csrf60 csrf61 csrf62 csrf63 csrf64 csrf65 csrf66 csrf67 # csrf68 csrf69 csrf6a csrf6b csrf6c csrf6d csrf6e csrf6f # csrf70 csrf71 csrf72 csrf73 csrf74 csrf75 csrf76 csrf77 # csrf78 csrf79 csrf7a csrf7b csrf7c csrf7d csrf7e csrf7f ]; #attach variables [ csr_F8 ] # [ csrf80 csrf81 csrf82 csrf83 csrf84 csrf85 csrf86 csrf87 # csrf88 csrf89 csrf8a csrf8b csrf8c csrf8d csrf8e csrf8f # csrf90 csrf91 csrf92 csrf93 csrf94 csrf95 csrf96 csrf97 # csrf98 csrf99 csrf9a csrf9b csrf9c csrf9d csrf9e csrf9f # csrfa0 csrfa1 csrfa2 csrfa3 csrfa4 csrfa5 csrfa6 csrfa7 # csrfa8 csrfa9 csrfaa csrfab csrfac csrfad csrfae csrfaf # csrfb0 csrfb1 csrfb2 csrfb3 csrfb4 csrfb5 csrfb6 csrfb7 # csrfb8 csrfb9 csrfba csrfbb csrfbc csrfbd csrfbe csrfbf ]; #attach variables [ csr_FC ] # [ csrfc0 csrfc1 csrfc2 csrfc3 csrfc4 csrfc5 csrfc6 csrfc7 # csrfc8 csrfc9 csrfca csrfcb csrfcc csrfcd csrfce csrfcf # csrfd0 csrfd1 csrfd2 csrfd3 csrfd4 csrfd5 csrfd6 csrfd7 # csrfd8 csrfd9 csrfda csrfdb csrfdc csrfdd csrfde csrfdf # csrfe0 csrfe1 csrfe2 csrfe3 csrfe4 csrfe5 csrfe6 csrfe7 # csrfe8 csrfe9 csrfea csrfeb csrfec csrfed csrfee csrfef # csrff0 csrff1 csrff2 csrff3 csrff4 csrff5 csrff6 csrff7 # csrff8 csrff9 csrffa csrffb csrffc csrffd csrffe csrfff ]; #TODO these names are madeup. do real ones exist? #TODO go through and use these instead of numbers @define HFLEN 2 @define SFLEN 4 @define DFLEN 8 @define QFLEN 16 @define HXLEN 2 @define WXLEN 4 @define DXLEN 8 @define QXLEN 16 define pcodeop unimp; define pcodeop trap; define pcodeop ebreak; define pcodeop ecall; define pcodeop fence; define pcodeop fence.i; # possible tokens: r0711 r1519 r2024 r2731 cr0206 cr0711 cd0711 rs1: r1519 is r1519 { export r1519; } rs1: zero is zero & op1519=0 { export 0:$(XLEN); } rs2: r2024 is r2024 { export r2024; } rs2: zero is zero & op2024=0 { export 0:$(XLEN); } rs3: r2731 is r2731 { export r2731; } rs3: zero is zero & op2731=0 { export 0:$(XLEN); } rd: r0711 is r0711 { export r0711; } rd: zero is r0711 & zero & op0711=0 { local tempZero:$(XLEN) = 0; export tempZero; } rdDst: r0711 is r0711 { export r0711; } rs1W: r1519 is r1519 { local tmp:$(WXLEN) = r1519:$(WXLEN); export tmp; } rs1W: zero is r1519 & zero & op1519=0 { export 0:$(WXLEN); } rs2W: r2024 is r2024 { local tmp:$(WXLEN) = r2024:$(WXLEN); export tmp; } rs2W: zero is r2024 & zero & op2024=0 { export 0:$(WXLEN); } #TODO dest may be bad, might need an assign macro rdW: r0711 is r0711 { local tmp:$(WXLEN) = r0711:$(WXLEN); export tmp; } rdW: zero is r0711 & zero & op0711=0 { export 0:$(WXLEN); } #TODO does this need to be in an if/endif @if ADDRSIZE == "64" rs1L: r1519 is r1519 { local tmp:8 = r1519:8; export tmp; } rs1L: zero is r1519 & zero & op1519=0 { export 0:8; } rs2L: r2024 is r2024 { local tmp:8 = r2024:8; export tmp; } rs2L: zero is r2024 & zero & op2024=0 { export 0:8; } #TODO dest may be bad, might need an assign macro rdL: r0711 is r0711 { export r0711; } rdL: zero is r0711 & zero & op0711=0 { export 0:8; } @endif #TODO eh not sure if this is usable # would only make sense to use this if the float operation # tables for frd,frs1,frs2 could be different sizes or # if the cast could use this export, but they have to export # the same size and you cant 'local tmp:fmt' # # 32-bit single-precision $(SFLEN) # fmt: ".s" is op2526=0 { export $(SFLEN):1; } # # 64-bit double-precision $(DFLEN) # fmt: ".d" is op2526=1 { export $(DFLEN):1; } # # 16-bit half-precision $(HFLEN) # fmt: ".h" is op2526=2 { export $(HFLEN):1; } # # 128-bit quad-precision $(QFLEN) # fmt: ".q" is op2526=3 { export $(QFLEN):1; } frd: fr0711 is fr0711 { export fr0711; } frs1: fr1519 is fr1519 { export fr1519; } frs2: fr2024 is fr2024 { export fr2024; } frs3: fr2731 is fr2731 { export fr2731; } #TODO dest may be bad, might need an assign macro #frdS: fr0711 is fr0711 { local tmp = fr0711:$(SFLEN); export tmp; } frs1S: fr1519 is fr1519 { local tmp = fr1519:$(SFLEN); export tmp; } frs2S: fr2024 is fr2024 { local tmp = fr2024:$(SFLEN); export tmp; } frs3S: fr2731 is fr2731 { local tmp = fr2731:$(SFLEN); export tmp; } @if ((FPSIZE == "64") || (FPSIZE == "128")) #TODO dest may be bad, might need an assign macro #frdD: fr0711 is fr0711 { local tmp = fr0711:$(DFLEN); export tmp; } frs1D: fr1519 is fr1519 { local tmp = fr1519:$(DFLEN); export tmp; } frs2D: fr2024 is fr2024 { local tmp = fr2024:$(DFLEN); export tmp; } frs3D: fr2731 is fr2731 { local tmp = fr2731:$(DFLEN); export tmp; } @endif macro fassignS(dest, src) { @if FPSIZE == "32" dest = src; @else dest = zext(src); @endif } macro assignW(dest, src) { @if ADDRSIZE == "32" dest = src; @else dest = sext(src); @endif } macro zassignW(dest, src) { @if ADDRSIZE == "32" dest = src; @else dest = zext(src); @endif } macro zassignD(dest, src) { @if ADDRSIZE == "128" dest = zext(src); @else dest = src; @endif } macro assignD(dest, src) { @if ADDRSIZE == "128" dest = sext(src); @else dest = src; @endif } immI: sop2031 is sop2031 { local tmp:$(XLEN) = sop2031; export tmp; } immS: imm is op0711 & sop2531 [ imm = (sop2531 << 5) | op0711; ] { local tmp:$(XLEN) = imm; export tmp; } # used for goto immSB: reloc is op0707 & op0811 & op2530 & sop3131 [ reloc = inst_start + ((sop3131 << 12) | (op2530 << 5) | (op0811 << 1) | (op0707 << 11)); ] { export *[ram]:$(XLEN) reloc; } #immSB: reloc is op0707 & op0811 & op2530 & sop3131 [ reloc = inst_start + ((sop3131 << 12) | (op2530 << 5) | (op0811 << 1) | (op0707 << 11)); ] { export reloc; } immU: op1231 is op1231 & sop1231 { local tmp:$(XLEN) = sop1231 << 12; export tmp; } # used for goto immUJ: reloc is op1219 & op2020 & op2130 & sop3131 [ reloc = inst_start + ((sop3131 << 20) | (op2130 << 1) | (op2020 << 11) | (op1219 << 12)); ] { export *[ram]:$(XLEN) reloc; } @if ADDRSIZE == "32" shamt6: op2024 is op2024 & op2525=0 { local tmp:$(XLEN) = op2024; export tmp; } @else shamt5: op2024 is op2024 { local tmp:$(XLEN) = op2024; export tmp; } shamt6: imm is op2024 & op2525 [ imm = (op2525 << 5) | op2024; ] { local tmp:$(XLEN) = imm; export tmp; } @endif FRM: "rne" is op1214=0 { local tmp:1 = 0; export tmp; } FRM: "rtz" is op1214=1 { local tmp:1 = 1; export tmp; } FRM: "rdn" is op1214=2 { local tmp:1 = 2; export tmp; } FRM: "rup" is op1214=3 { local tmp:1 = 3; export tmp; } FRM: "rmm" is op1214=4 { local tmp:1 = 4; export tmp; } # 5 Invalid. Reserved for future use # 6 Invalid. Reserved for future use FRM: "dyn" is op1214=7 { local tmp:1 = 7; export tmp; } # used to specify additional memory ordering constraints aqrl: "" is op2526=0 { export 0:$(XLEN); } aqrl: ".rl" is op2526=1 { export 1:$(XLEN); } aqrl: ".aq" is op2526=2 { export 2:$(XLEN); } aqrl: ".aqrl" is op2526=3 { export 3:$(XLEN); } crs1: cr0711 is cr0711 { export cr0711; } crs1: zero is cr0711 & zero & cop0711=0 { export 0:$(XLEN); } crdNoSp: cd0711NoSp is cd0711NoSp { export cd0711NoSp; } crdNoSp: zero is zero & cop0711=0 { export 0:$(XLEN); } crd: cd0711 is cd0711 { export cd0711; } crd: zero is zero & cop0711=0 { export 0:$(XLEN); } crs2: cr0206 is cr0206 { export cr0206; } crs2: zero is cr0206 & zero & cop0206=0 { export 0:$(XLEN); } cfrs1: cfr0711 is cfr0711 { export cfr0711; } cfrd: cfr0711 is cfr0711 { export cfr0711; } cfrs2: cfr0206 is cfr0206 { export cfr0206; } #ATTN Not doing tables for the RVC registers since there is no # zero register to worry about cimmI: imm is scop1212 & cop0206 [ imm = (scop1212 << 5) | (cop0206); ] { local tmp:$(XLEN) = imm; export tmp; } # used for goto cbimm: reloc is scop1212 & cop1011 & cop0506 & cop0304 & cop0202 [ reloc = inst_start + ((scop1212 << 8) | (cop0506 << 6) | (cop0202 << 5) | (cop1011 << 3) | (cop0304 << 1)); ] { export *[ram]:$(XLEN) reloc; } #cbimm: reloc is scop1212 & cop1011 & cop0506 & cop0304 & cop0202 [ reloc = inst_start + ((scop1212 << 8) | (cop0506 << 6) | (cop0202 << 5) | (cop1011 << 3) | (cop0304 << 1)); ] { export reloc; } # used for goto cjimm: reloc is scop1212 & cop1111 & cop0910 & cop0808 & cop0707 & cop0606 & cop0305 & cop0202 [ reloc = inst_start + ((scop1212 << 11) | (cop1111 << 4) | (cop0910 << 8) | (cop0808 << 10) | (cop0707 << 6) | (cop0606 << 7) | (cop0305 << 1) | (cop0202 << 5)); ] { export *[ram]:$(XLEN) reloc; } nzuimm5: is cop0606=1 | cop0505=1 | cop0404=1 | cop0303 = 1 | cop0202=1 {} nzuimm6: is cop1212=1 | cop0606=1 | cop0505=1 | cop0404=1 | cop0303 = 1 | cop0202=1 {} @if ADDRSIZE == "32" c6imm: uimm is cop1212=0 & cop0206 & nzuimm5 [ uimm = (cop0206 + 0); ] { local tmp:$(XLEN) = uimm; export tmp; } @elif ADDRSIZE == "64" c6imm: uimm is cop1212 & cop0206 & nzuimm6 [ uimm = (cop1212 << 5) | (cop0206); ] { local tmp:$(XLEN) = uimm; export tmp; } @elif ADDRSIZE == "128" c6imm: uimm is cop1212 & cop0206 [ uimm = (cop1212 << 5) | (cop0206); ] { local tmp:$(XLEN) = uimm + (64 * (uimm == 0)); export tmp; } @endif cbigimm: uimm is cop1212 & scop1212 & cop0206 & nzuimm6 [ uimm = (cop1212 << 5) | (cop0206); ] { local tmp:$(XLEN) = (scop1212 << 17) | (cop0206 << 12); export tmp; } nzcaddi4: is cop1212=1 | cop1111=1 | cop1010=1 | cop0909=1 | cop0808=1 | cop0707=1 | cop0606=1 | cop0505=1 {} caddi4spnimm: uimm is nzcaddi4 & cop1112 & cop0710 & cop0606 & cop0505 [ uimm = (cop0710 << 6) | (cop1112 << 4) | (cop0505 << 3) | (cop0606 << 2); ] { local tmp:$(XLEN) = uimm; export tmp; } caddi16spimm: imm is scop1212 & cop0606 & cop0505 & cop0304 & cop0202 & nzuimm6 [ imm = (scop1212 << 9) | (cop0304 << 7) | (cop0505 << 6) | (cop0202 << 5) | (cop0606 << 4); ] { local tmp:$(XLEN) = imm; export tmp; } clwimm: uimm is cop1012 & cop0606 & cop0505 [ uimm = (cop1012 << 3) | (cop0606 << 2) | (cop0505 << 6); ] { local tmp:$(XLEN) = uimm; export tmp; } clwspimm: uimm is cop1212 & cop0406 & cop0203 [ uimm = (cop1212 << 5) | (cop0406 << 2) | (cop0203 << 6); ] { local tmp:$(XLEN) = uimm; export tmp; } cswspimm: uimm is cop0708 & cop0912 [ uimm = (cop0708 << 6) | (cop0912 << 2); ] { local tmp:$(XLEN) = uimm; export tmp; } cldimm: uimm is cop1012 & cop0506 [ uimm = (cop1012 << 3) | (cop0506 << 6); ] { local tmp:$(XLEN) = uimm; export tmp; } cldspimm: uimm is cop1212 & cop0506 & cop0204 [ uimm = (cop1212 << 5) | (cop0506 << 3) | (cop0204 << 6); ] { local tmp:$(XLEN) = uimm; export tmp; } csdspimm: uimm is cop0709 & cop1012 [ uimm = (cop0709 << 6) | (cop1012 << 3); ] { local tmp:$(XLEN) = uimm; export tmp; } @if ADDRSIZE == "128" clqimm: uimm is cop1112 & cop1010 & cop0506 [ uimm = (cop1112 << 4) | (cop1010 << 8) | (cop0506 << 6); ] { local tmp:$(XLEN) = uimm; export tmp; } clqspimm: uimm is cop1212 & cop0606 & cop0205 [ uimm = (cop1212 << 5) | (cop0606 << 4) | (cop0205 << 6); ] { local tmp:$(XLEN) = uimm; export tmp; } csqspimm: uimm is cop0710 & cop1112 [ uimm = (cop0710 << 6) | (cop1112 << 4); ] { local tmp:$(XLEN) = uimm; export tmp; } @endif # SEE riscv-privileged.pdf Section 'CSR Listing' for description # This implementation aligns with the table breakdown # csr[11:10] - read/write (00, 01, 10) or read-only (11) # csr[9:8] - lowest privilege that can access the CSR ## 0x000-0x0ff #with csr: op3031=0 & op2829=0 { # : csr_0 is csr_0 { export csr_0; } # user, standard read/write #} # ## 0x100-0x1ff #with csr: op3031=0 & op2829=1 { # : csr_1 is csr_1 { export csr_1; } # supervisor, standard read/write #} # ## 0x200-0x2ff #with csr: op3031=0 & op2829=2 { # : csr_2 is csr_2 { export csr_2; } # hypervisor, standard read/write #} # ## 0x300-0x3ff #with csr: op3031=0 & op2829=3 { # : csr_3 is csr_3 { export csr_3; } # machine, standard read/write #} # ## 0x400-0x4ff #with csr: op3031=1 & op2829=0 { # : csr_4 is csr_4 { export csr_4; } # user, standard read/write #} # ## 0x500-0x5ff #with csr: op3031=1 & op2829=1 { # : csr_50 is csr_50 & op2727=0 { export csr_50; } # supervisor, standard read/write # : csr_58 is csr_58 & op2627=2 { export csr_58; } # supervisor, standard read/write # : csr_5C is csr_5C & op2627=3 { export csr_5C; } # supervisor, custom read/write #} # ## 0x600-0x6ff #with csr: op3031=1 & op2829=2 { # : csr_60 is csr_60 & op2727=0 { export csr_60; } # hypervisor, standard read/write # : csr_68 is csr_68 & op2627=2 { export csr_68; } # hypervisor, standard read/write # : csr_6C is csr_6C & op2627=3 { export csr_6C; } # hypervisor, custom read/write #} # ## 0x700-0x7ff #with csr: op3031=1 & op2829=3 { # : csr_70 is csr_70 & op2727=0 { export csr_70; } # machine, standard read/write # : csr_78 is csr_78 & op2527=4 { export csr_78; } # machine, standard read/write # : csr_7A is csr_7A & op2427=0xa { export csr_7A; } # machine, standard read/write debug # : csr_7B is csr_7B & op2427=0xb { export csr_7B; } # machine, debug-mode-only # : csr_7C is csr_7C & op2627=3 { export csr_7C; } # machine, custom read/write #} # ## 0x800-0x8ff #with csr: op3031=2 & op2829=0 { # : csr_8 is csr_8 { export csr_8; } # user, custom read/write #} # ## 0x900-0x9ff #with csr: op3031=2 & op2829=1 { # : csr_90 is csr_90 & op2727=0 { export csr_90; } # supervisor, standard read/write # : csr_98 is csr_98 & op2627=2 { export csr_98; } # supervisor, standard read/write # : csr_9C is csr_9C & op2627=3 { export csr_9C; } # supervisor, custom read/write #} # ## 0xa00-0xaff #with csr: op3031=2 & op2829=2 { # : csr_A0 is csr_A0 & op2727=0 { export csr_A0; } # hypervisor, standard read/write # : csr_A8 is csr_A8 & op2627=2 { export csr_A8; } # hypervisor, standard read/write # : csr_AC is csr_AC & op2627=3 { export csr_AC; } # hypervisor, custom read/write #} # ## 0xb00-0xbff #with csr: op3031=2 & op2829=3 { # : csr_B0 is csr_B0 & op2727=0 { export csr_B0; } # machine, standard read/write # : csr_B8 is csr_B8 & op2627=2 { export csr_B8; } # machine, standard read/write # : csr_BC is csr_BC & op2627=3 { export csr_BC; } # machine, custom read/write #} # ## 0xc00-0xcff #with csr: op3031=3 & op2829=0 { # : csr_C0 is csr_C0 & op2727=0 { export csr_C0; } # user, standard read-only # : csr_C8 is csr_C8 & op2627=2 { export csr_C8; } # user, standard read-only # : csr_CC is csr_CC & op2627=3 { export csr_CC; } # user, custom read-only #} # ## 0xd00-0xdff #with csr: op3031=3 & op2829=1 { # : csr_D0 is csr_D0 & op2727=0 { export csr_D0; } # supervisor, standard read-only # : csr_D8 is csr_D8 & op2627=2 { export csr_D8; } # supervisor, standard read-only # : csr_DC is csr_DC & op2627=3 { export csr_DC; } # supervisor, custom read-only #} # ## 0xe00-0xeff #with csr: op3031=3 & op2829=2 { # : csr_E0 is csr_E0 & op2727=0 { export csr_E0; } # hypervisor, standard read-only # : csr_E8 is csr_E8 & op2627=2 { export csr_E8; } # hypervisor, standard read-only # : csr_EC is csr_EC & op2627=3 { export csr_EC; } # hypervisor, custom read-only #} # ## 0xf00-0xfff #with csr: op3031=3 & op2829=3 { # : csr_F0 is csr_F0 & op2727=0 { export csr_F0; } # machine, standard read-only # : csr_F8 is csr_F8 & op2627=2 { export csr_F8; } # machine, standard read-only # : csr_FC is csr_FC & op2627=3 { export csr_FC; } # machine, custom read-only #} # csr: csr_reg is op2031 [ csr_reg = $(CSR_REG_START) + op2031; ] { export *[csreg]:$(XLEN) csr_reg; } vs1: v1519 is v1519 { export v1519; } vs2: v2024 is v2024 { export v2024; } vs3: v0711 is v0711 { export v0711; } vd: v0711 is v0711 { export v0711; } vm: ,v0^".t" is op2525=0 & v0 & vd { vd = vd & v0; } vm: "" is op2525=1 { } simm5: sop1519 is sop1519 { local tmp:$(XLEN) = sop1519; export tmp; } # zimm: op1519 is op1519 { local tmp:$(XLEN) = op1519; export tmp; } nf: op2931 is op2931 { local tmp:$(XLEN) = op2931; export tmp; } vtypei: op2030 is op2030 { local tmp:$(XLEN) = op2030; export tmp; } bs: op3031 is op3031 { local tmp:$(XLEN) = op3031; export tmp; } rcon: op2023 is op2023 { local tmp:$(XLEN) = op2023; export tmp; } # imm=0 for baseline operation, nonzero values are reserved shamtw: 0 is op2024=0 { local tmp:$(XLEN) = 0; export tmp; } imm3u: op2022 is op2022 { local tmp:$(XLEN) = op2022; export tmp; } imm4u: op2023 is op2023 { local tmp:$(XLEN) = op2023; export tmp; } imm5u: op2024 is op2024 { local tmp:$(XLEN) = op2024; export tmp; } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv.zi.sinc ================================================ # RV32/RV64 Zifencei Standard Extension # fence.i 0000100f ffffffff SIMPLE (0, 0) :fence.i is op0001=0x3 & op0204=0x3 & op0506=0x0 & funct3=0x1 & fm=0x0 & op0711=0x0 & op1527=0x0 { fence.i(); } ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv32-fp.cspec ================================================ ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv32.cspec ================================================ ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv32.dwarf ================================================ ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv64-fp.cspec ================================================ ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv64.cspec ================================================ ================================================ FILE: pypcode/processors/RISCV/data/languages/riscv64.dwarf ================================================ ================================================ FILE: pypcode/processors/RISCV/data/patterns/patternconstraints.xml ================================================ riscv_gc_patterns.xml ================================================ FILE: pypcode/processors/RISCV/data/patterns/riscv_gc_patterns.xml ================================================ 10000010 10000000 10000010 10000000 00000000 00000000 0.....01 01110001 0.....01 00010001 00010011 00000001 ....0001 1....... 01101111 ....0000 ........ ........ ......01 101..... ......01 101..... 00000000 00000000 0.....01 01110001 0.....01 00010001 00010011 00000001 ....0001 1....... 10000010 10000000 10000010 10000000 00000000 00000000 ......01 101..... 00000000 00000000 01101111 ....0000 ........ ........ .0010111 ........ ........ ........ ================================================ FILE: pypcode/processors/RISCV/scripts/binutil.py ================================================ #!/usr/bin/env python3 ## ### # IP: GHIDRA # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## """Script to generate base RISC-V SLEIGH for Ghidra module Just an artifact to keep around for the development at this point as a lot of cleanup and reorganization was done after generating the intial SLEIGH. Data was copied from various files in binutils, a lot of it unused. """ MATCH_SLLI_RV32=0x1013 MASK_SLLI_RV32=0xfe00707f MATCH_SRLI_RV32=0x5013 MASK_SRLI_RV32=0xfe00707f MATCH_SRAI_RV32=0x40005013 MASK_SRAI_RV32=0xfe00707f MATCH_FRFLAGS=0x102073 MASK_FRFLAGS=0xfffff07f MATCH_FSFLAGS=0x101073 MASK_FSFLAGS=0xfff0707f MATCH_FSFLAGSI=0x105073 MASK_FSFLAGSI=0xfff0707f MATCH_FRRM=0x202073 MASK_FRRM=0xfffff07f MATCH_FSRM=0x201073 MASK_FSRM=0xfff0707f MATCH_FSRMI=0x205073 MASK_FSRMI=0xfff0707f MATCH_FSCSR=0x301073 MASK_FSCSR=0xfff0707f MATCH_FRCSR=0x302073 MASK_FRCSR=0xfffff07f MATCH_RDCYCLE=0xc0002073 MASK_RDCYCLE=0xfffff07f MATCH_RDTIME=0xc0102073 MASK_RDTIME=0xfffff07f MATCH_RDINSTRET=0xc0202073 MASK_RDINSTRET=0xfffff07f MATCH_RDCYCLEH=0xc8002073 MASK_RDCYCLEH=0xfffff07f MATCH_RDTIMEH=0xc8102073 MASK_RDTIMEH=0xfffff07f MATCH_RDINSTRETH=0xc8202073 MASK_RDINSTRETH=0xfffff07f MATCH_SCALL=0x73 MASK_SCALL=0xffffffff MATCH_SBREAK=0x100073 MASK_SBREAK=0xffffffff MATCH_BEQ=0x63 MASK_BEQ=0x707f MATCH_BNE=0x1063 MASK_BNE=0x707f MATCH_BLT=0x4063 MASK_BLT=0x707f MATCH_BGE=0x5063 MASK_BGE=0x707f MATCH_BLTU=0x6063 MASK_BLTU=0x707f MATCH_BGEU=0x7063 MASK_BGEU=0x707f MATCH_JALR=0x67 MASK_JALR=0x707f MATCH_JAL=0x6f MASK_JAL=0x7f MATCH_LUI=0x37 MASK_LUI=0x7f MATCH_AUIPC=0x17 MASK_AUIPC=0x7f MATCH_ADDI=0x13 MASK_ADDI=0x707f MATCH_SLLI=0x1013 MASK_SLLI=0xfc00707f MATCH_SLTI=0x2013 MASK_SLTI=0x707f MATCH_SLTIU=0x3013 MASK_SLTIU=0x707f MATCH_XORI=0x4013 MASK_XORI=0x707f MATCH_SRLI=0x5013 MASK_SRLI=0xfc00707f MATCH_SRAI=0x40005013 MASK_SRAI=0xfc00707f MATCH_ORI=0x6013 MASK_ORI=0x707f MATCH_ANDI=0x7013 MASK_ANDI=0x707f MATCH_ADD=0x33 MASK_ADD=0xfe00707f MATCH_SUB=0x40000033 MASK_SUB=0xfe00707f MATCH_SLL=0x1033 MASK_SLL=0xfe00707f MATCH_SLT=0x2033 MASK_SLT=0xfe00707f MATCH_SLTU=0x3033 MASK_SLTU=0xfe00707f MATCH_XOR=0x4033 MASK_XOR=0xfe00707f MATCH_SRL=0x5033 MASK_SRL=0xfe00707f MATCH_SRA=0x40005033 MASK_SRA=0xfe00707f MATCH_OR=0x6033 MASK_OR=0xfe00707f MATCH_AND=0x7033 MASK_AND=0xfe00707f MATCH_ADDIW=0x1b MASK_ADDIW=0x707f MATCH_SLLIW=0x101b MASK_SLLIW=0xfe00707f MATCH_SRLIW=0x501b MASK_SRLIW=0xfe00707f MATCH_SRAIW=0x4000501b MASK_SRAIW=0xfe00707f MATCH_ADDW=0x3b MASK_ADDW=0xfe00707f MATCH_SUBW=0x4000003b MASK_SUBW=0xfe00707f MATCH_SLLW=0x103b MASK_SLLW=0xfe00707f MATCH_SRLW=0x503b MASK_SRLW=0xfe00707f MATCH_SRAW=0x4000503b MASK_SRAW=0xfe00707f MATCH_LB=0x3 MASK_LB=0x707f MATCH_LH=0x1003 MASK_LH=0x707f MATCH_LW=0x2003 MASK_LW=0x707f MATCH_LD=0x3003 MASK_LD=0x707f MATCH_LBU=0x4003 MASK_LBU=0x707f MATCH_LHU=0x5003 MASK_LHU=0x707f MATCH_LWU=0x6003 MASK_LWU=0x707f MATCH_SB=0x23 MASK_SB=0x707f MATCH_SH=0x1023 MASK_SH=0x707f MATCH_SW=0x2023 MASK_SW=0x707f MATCH_SD=0x3023 MASK_SD=0x707f MATCH_FENCE=0xf MASK_FENCE=0x707f MATCH_FENCE_I=0x100f MASK_FENCE_I=0x707f MATCH_FENCE_TSO=0x8330000f MASK_FENCE_TSO=0xfff0707f MATCH_MUL=0x2000033 MASK_MUL=0xfe00707f MATCH_MULH=0x2001033 MASK_MULH=0xfe00707f MATCH_MULHSU=0x2002033 MASK_MULHSU=0xfe00707f MATCH_MULHU=0x2003033 MASK_MULHU=0xfe00707f MATCH_DIV=0x2004033 MASK_DIV=0xfe00707f MATCH_DIVU=0x2005033 MASK_DIVU=0xfe00707f MATCH_REM=0x2006033 MASK_REM=0xfe00707f MATCH_REMU=0x2007033 MASK_REMU=0xfe00707f MATCH_MULW=0x200003b MASK_MULW=0xfe00707f MATCH_DIVW=0x200403b MASK_DIVW=0xfe00707f MATCH_DIVUW=0x200503b MASK_DIVUW=0xfe00707f MATCH_REMW=0x200603b MASK_REMW=0xfe00707f MATCH_REMUW=0x200703b MASK_REMUW=0xfe00707f MATCH_AMOADD_W=0x202f MASK_AMOADD_W=0xf800707f MATCH_AMOXOR_W=0x2000202f MASK_AMOXOR_W=0xf800707f MATCH_AMOOR_W=0x4000202f MASK_AMOOR_W=0xf800707f MATCH_AMOAND_W=0x6000202f MASK_AMOAND_W=0xf800707f MATCH_AMOMIN_W=0x8000202f MASK_AMOMIN_W=0xf800707f MATCH_AMOMAX_W=0xa000202f MASK_AMOMAX_W=0xf800707f MATCH_AMOMINU_W=0xc000202f MASK_AMOMINU_W=0xf800707f MATCH_AMOMAXU_W=0xe000202f MASK_AMOMAXU_W=0xf800707f MATCH_AMOSWAP_W=0x800202f MASK_AMOSWAP_W=0xf800707f MATCH_LR_W=0x1000202f MASK_LR_W=0xf9f0707f MATCH_SC_W=0x1800202f MASK_SC_W=0xf800707f MATCH_AMOADD_D=0x302f MASK_AMOADD_D=0xf800707f MATCH_AMOXOR_D=0x2000302f MASK_AMOXOR_D=0xf800707f MATCH_AMOOR_D=0x4000302f MASK_AMOOR_D=0xf800707f MATCH_AMOAND_D=0x6000302f MASK_AMOAND_D=0xf800707f MATCH_AMOMIN_D=0x8000302f MASK_AMOMIN_D=0xf800707f MATCH_AMOMAX_D=0xa000302f MASK_AMOMAX_D=0xf800707f MATCH_AMOMINU_D=0xc000302f MASK_AMOMINU_D=0xf800707f MATCH_AMOMAXU_D=0xe000302f MASK_AMOMAXU_D=0xf800707f MATCH_AMOSWAP_D=0x800302f MASK_AMOSWAP_D=0xf800707f MATCH_LR_D=0x1000302f MASK_LR_D=0xf9f0707f MATCH_SC_D=0x1800302f MASK_SC_D=0xf800707f MATCH_ECALL=0x73 MASK_ECALL=0xffffffff MATCH_EBREAK=0x100073 MASK_EBREAK=0xffffffff MATCH_URET=0x200073 MASK_URET=0xffffffff MATCH_SRET=0x10200073 MASK_SRET=0xffffffff MATCH_HRET=0x20200073 MASK_HRET=0xffffffff MATCH_MRET=0x30200073 MASK_MRET=0xffffffff MATCH_DRET=0x7b200073 MASK_DRET=0xffffffff MATCH_SFENCE_VM=0x10400073 MASK_SFENCE_VM=0xfff07fff MATCH_SFENCE_VMA=0x12000073 MASK_SFENCE_VMA=0xfe007fff MATCH_WFI=0x10500073 MASK_WFI=0xffffffff MATCH_CSRRW=0x1073 MASK_CSRRW=0x707f MATCH_CSRRS=0x2073 MASK_CSRRS=0x707f MATCH_CSRRC=0x3073 MASK_CSRRC=0x707f MATCH_CSRRWI=0x5073 MASK_CSRRWI=0x707f MATCH_CSRRSI=0x6073 MASK_CSRRSI=0x707f MATCH_CSRRCI=0x7073 MASK_CSRRCI=0x707f MATCH_FADD_S=0x53 MASK_FADD_S=0xfe00007f MATCH_FSUB_S=0x8000053 MASK_FSUB_S=0xfe00007f MATCH_FMUL_S=0x10000053 MASK_FMUL_S=0xfe00007f MATCH_FDIV_S=0x18000053 MASK_FDIV_S=0xfe00007f MATCH_FSGNJ_S=0x20000053 MASK_FSGNJ_S=0xfe00707f MATCH_FSGNJN_S=0x20001053 MASK_FSGNJN_S=0xfe00707f MATCH_FSGNJX_S=0x20002053 MASK_FSGNJX_S=0xfe00707f MATCH_FMIN_S=0x28000053 MASK_FMIN_S=0xfe00707f MATCH_FMAX_S=0x28001053 MASK_FMAX_S=0xfe00707f MATCH_FSQRT_S=0x58000053 MASK_FSQRT_S=0xfff0007f MATCH_FADD_D=0x2000053 MASK_FADD_D=0xfe00007f MATCH_FSUB_D=0xa000053 MASK_FSUB_D=0xfe00007f MATCH_FMUL_D=0x12000053 MASK_FMUL_D=0xfe00007f MATCH_FDIV_D=0x1a000053 MASK_FDIV_D=0xfe00007f MATCH_FSGNJ_D=0x22000053 MASK_FSGNJ_D=0xfe00707f MATCH_FSGNJN_D=0x22001053 MASK_FSGNJN_D=0xfe00707f MATCH_FSGNJX_D=0x22002053 MASK_FSGNJX_D=0xfe00707f MATCH_FMIN_D=0x2a000053 MASK_FMIN_D=0xfe00707f MATCH_FMAX_D=0x2a001053 MASK_FMAX_D=0xfe00707f MATCH_FCVT_S_D=0x40100053 MASK_FCVT_S_D=0xfff0007f MATCH_FCVT_D_S=0x42000053 MASK_FCVT_D_S=0xfff0007f MATCH_FSQRT_D=0x5a000053 MASK_FSQRT_D=0xfff0007f MATCH_FADD_Q=0x6000053 MASK_FADD_Q=0xfe00007f MATCH_FSUB_Q=0xe000053 MASK_FSUB_Q=0xfe00007f MATCH_FMUL_Q=0x16000053 MASK_FMUL_Q=0xfe00007f MATCH_FDIV_Q=0x1e000053 MASK_FDIV_Q=0xfe00007f MATCH_FSGNJ_Q=0x26000053 MASK_FSGNJ_Q=0xfe00707f MATCH_FSGNJN_Q=0x26001053 MASK_FSGNJN_Q=0xfe00707f MATCH_FSGNJX_Q=0x26002053 MASK_FSGNJX_Q=0xfe00707f MATCH_FMIN_Q=0x2e000053 MASK_FMIN_Q=0xfe00707f MATCH_FMAX_Q=0x2e001053 MASK_FMAX_Q=0xfe00707f MATCH_FCVT_S_Q=0x40300053 MASK_FCVT_S_Q=0xfff0007f MATCH_FCVT_Q_S=0x46000053 MASK_FCVT_Q_S=0xfff0007f MATCH_FCVT_D_Q=0x42300053 MASK_FCVT_D_Q=0xfff0007f MATCH_FCVT_Q_D=0x46100053 MASK_FCVT_Q_D=0xfff0007f MATCH_FSQRT_Q=0x5e000053 MASK_FSQRT_Q=0xfff0007f MATCH_FLE_S=0xa0000053 MASK_FLE_S=0xfe00707f MATCH_FLT_S=0xa0001053 MASK_FLT_S=0xfe00707f MATCH_FEQ_S=0xa0002053 MASK_FEQ_S=0xfe00707f MATCH_FLE_D=0xa2000053 MASK_FLE_D=0xfe00707f MATCH_FLT_D=0xa2001053 MASK_FLT_D=0xfe00707f MATCH_FEQ_D=0xa2002053 MASK_FEQ_D=0xfe00707f MATCH_FLE_Q=0xa6000053 MASK_FLE_Q=0xfe00707f MATCH_FLT_Q=0xa6001053 MASK_FLT_Q=0xfe00707f MATCH_FEQ_Q=0xa6002053 MASK_FEQ_Q=0xfe00707f MATCH_FCVT_W_S=0xc0000053 MASK_FCVT_W_S=0xfff0007f MATCH_FCVT_WU_S=0xc0100053 MASK_FCVT_WU_S=0xfff0007f MATCH_FCVT_L_S=0xc0200053 MASK_FCVT_L_S=0xfff0007f MATCH_FCVT_LU_S=0xc0300053 MASK_FCVT_LU_S=0xfff0007f MATCH_FMV_X_S=0xe0000053 MASK_FMV_X_S=0xfff0707f MATCH_FCLASS_S=0xe0001053 MASK_FCLASS_S=0xfff0707f MATCH_FCVT_W_D=0xc2000053 MASK_FCVT_W_D=0xfff0007f MATCH_FCVT_WU_D=0xc2100053 MASK_FCVT_WU_D=0xfff0007f MATCH_FCVT_L_D=0xc2200053 MASK_FCVT_L_D=0xfff0007f MATCH_FCVT_LU_D=0xc2300053 MASK_FCVT_LU_D=0xfff0007f MATCH_FMV_X_D=0xe2000053 MASK_FMV_X_D=0xfff0707f MATCH_FCLASS_D=0xe2001053 MASK_FCLASS_D=0xfff0707f MATCH_FCVT_W_Q=0xc6000053 MASK_FCVT_W_Q=0xfff0007f MATCH_FCVT_WU_Q=0xc6100053 MASK_FCVT_WU_Q=0xfff0007f MATCH_FCVT_L_Q=0xc6200053 MASK_FCVT_L_Q=0xfff0007f MATCH_FCVT_LU_Q=0xc6300053 MASK_FCVT_LU_Q=0xfff0007f MATCH_FMV_X_Q=0xe6000053 MASK_FMV_X_Q=0xfff0707f MATCH_FCLASS_Q=0xe6001053 MASK_FCLASS_Q=0xfff0707f MATCH_FCVT_S_W=0xd0000053 MASK_FCVT_S_W=0xfff0007f MATCH_FCVT_S_WU=0xd0100053 MASK_FCVT_S_WU=0xfff0007f MATCH_FCVT_S_L=0xd0200053 MASK_FCVT_S_L=0xfff0007f MATCH_FCVT_S_LU=0xd0300053 MASK_FCVT_S_LU=0xfff0007f MATCH_FMV_S_X=0xf0000053 MASK_FMV_S_X=0xfff0707f MATCH_FCVT_D_W=0xd2000053 MASK_FCVT_D_W=0xfff0007f MATCH_FCVT_D_WU=0xd2100053 MASK_FCVT_D_WU=0xfff0007f MATCH_FCVT_D_L=0xd2200053 MASK_FCVT_D_L=0xfff0007f MATCH_FCVT_D_LU=0xd2300053 MASK_FCVT_D_LU=0xfff0007f MATCH_FMV_D_X=0xf2000053 MASK_FMV_D_X=0xfff0707f MATCH_FCVT_Q_W=0xd6000053 MASK_FCVT_Q_W=0xfff0007f MATCH_FCVT_Q_WU=0xd6100053 MASK_FCVT_Q_WU=0xfff0007f MATCH_FCVT_Q_L=0xd6200053 MASK_FCVT_Q_L=0xfff0007f MATCH_FCVT_Q_LU=0xd6300053 MASK_FCVT_Q_LU=0xfff0007f MATCH_FMV_Q_X=0xf6000053 MASK_FMV_Q_X=0xfff0707f MATCH_FLW=0x2007 MASK_FLW=0x707f MATCH_FLD=0x3007 MASK_FLD=0x707f MATCH_FLQ=0x4007 MASK_FLQ=0x707f MATCH_FSW=0x2027 MASK_FSW=0x707f MATCH_FSD=0x3027 MASK_FSD=0x707f MATCH_FSQ=0x4027 MASK_FSQ=0x707f MATCH_FMADD_S=0x43 MASK_FMADD_S=0x600007f MATCH_FMSUB_S=0x47 MASK_FMSUB_S=0x600007f MATCH_FNMSUB_S=0x4b MASK_FNMSUB_S=0x600007f MATCH_FNMADD_S=0x4f MASK_FNMADD_S=0x600007f MATCH_FMADD_D=0x2000043 MASK_FMADD_D=0x600007f MATCH_FMSUB_D=0x2000047 MASK_FMSUB_D=0x600007f MATCH_FNMSUB_D=0x200004b MASK_FNMSUB_D=0x600007f MATCH_FNMADD_D=0x200004f MASK_FNMADD_D=0x600007f MATCH_FMADD_Q=0x6000043 MASK_FMADD_Q=0x600007f MATCH_FMSUB_Q=0x6000047 MASK_FMSUB_Q=0x600007f MATCH_FNMSUB_Q=0x600004b MASK_FNMSUB_Q=0x600007f MATCH_FNMADD_Q=0x600004f MASK_FNMADD_Q=0x600007f MATCH_C_ADDI4SPN=0x0 MASK_C_ADDI4SPN=0xe003 MATCH_C_FLD=0x2000 MASK_C_FLD=0xe003 MATCH_C_LW=0x4000 MASK_C_LW=0xe003 MATCH_C_FLW=0x6000 MASK_C_FLW=0xe003 MATCH_C_FSD=0xa000 MASK_C_FSD=0xe003 MATCH_C_SW=0xc000 MASK_C_SW=0xe003 MATCH_C_FSW=0xe000 MASK_C_FSW=0xe003 MATCH_C_ADDI=0x1 MASK_C_ADDI=0xe003 MATCH_C_JAL=0x2001 MASK_C_JAL=0xe003 MATCH_C_LI=0x4001 MASK_C_LI=0xe003 MATCH_C_LUI=0x6001 MASK_C_LUI=0xe003 MATCH_C_SRLI=0x8001 MASK_C_SRLI=0xec03 MATCH_C_SRLI64=0x8001 MASK_C_SRLI64=0xfc7f MATCH_C_SRAI=0x8401 MASK_C_SRAI=0xec03 MATCH_C_SRAI64=0x8401 MASK_C_SRAI64=0xfc7f MATCH_C_ANDI=0x8801 MASK_C_ANDI=0xec03 MATCH_C_SUB=0x8c01 MASK_C_SUB=0xfc63 MATCH_C_XOR=0x8c21 MASK_C_XOR=0xfc63 MATCH_C_OR=0x8c41 MASK_C_OR=0xfc63 MATCH_C_AND=0x8c61 MASK_C_AND=0xfc63 MATCH_C_SUBW=0x9c01 MASK_C_SUBW=0xfc63 MATCH_C_ADDW=0x9c21 MASK_C_ADDW=0xfc63 MATCH_C_J=0xa001 MASK_C_J=0xe003 MATCH_C_BEQZ=0xc001 MASK_C_BEQZ=0xe003 MATCH_C_BNEZ=0xe001 MASK_C_BNEZ=0xe003 MATCH_C_SLLI=0x2 MASK_C_SLLI=0xe003 MATCH_C_SLLI64=0x2 MASK_C_SLLI64=0xf07f MATCH_C_FLDSP=0x2002 MASK_C_FLDSP=0xe003 MATCH_C_LWSP=0x4002 MASK_C_LWSP=0xe003 MATCH_C_FLWSP=0x6002 MASK_C_FLWSP=0xe003 MATCH_C_MV=0x8002 MASK_C_MV=0xf003 MATCH_C_ADD=0x9002 MASK_C_ADD=0xf003 MATCH_C_FSDSP=0xa002 MASK_C_FSDSP=0xe003 MATCH_C_SWSP=0xc002 MASK_C_SWSP=0xe003 MATCH_C_FSWSP=0xe002 MASK_C_FSWSP=0xe003 MATCH_C_NOP=0x1 MASK_C_NOP=0xffff MATCH_C_ADDI16SP=0x6101 MASK_C_ADDI16SP=0xef83 MATCH_C_JR=0x8002 MASK_C_JR=0xf07f MATCH_C_JALR=0x9002 MASK_C_JALR=0xf07f MATCH_C_EBREAK=0x9002 MASK_C_EBREAK=0xffff MATCH_C_LD=0x6000 MASK_C_LD=0xe003 MATCH_C_SD=0xe000 MASK_C_SD=0xe003 MATCH_C_ADDIW=0x2001 MASK_C_ADDIW=0xe003 MATCH_C_LDSP=0x6002 MASK_C_LDSP=0xe003 MATCH_C_SDSP=0xe002 MASK_C_SDSP=0xe003 MATCH_CUSTOM0=0xb MASK_CUSTOM0=0x707f MATCH_CUSTOM0_RS1=0x200b MASK_CUSTOM0_RS1=0x707f MATCH_CUSTOM0_RS1_RS2=0x300b MASK_CUSTOM0_RS1_RS2=0x707f MATCH_CUSTOM0_RD=0x400b MASK_CUSTOM0_RD=0x707f MATCH_CUSTOM0_RD_RS1=0x600b MASK_CUSTOM0_RD_RS1=0x707f MATCH_CUSTOM0_RD_RS1_RS2=0x700b MASK_CUSTOM0_RD_RS1_RS2=0x707f MATCH_CUSTOM1=0x2b MASK_CUSTOM1=0x707f MATCH_CUSTOM1_RS1=0x202b MASK_CUSTOM1_RS1=0x707f MATCH_CUSTOM1_RS1_RS2=0x302b MASK_CUSTOM1_RS1_RS2=0x707f MATCH_CUSTOM1_RD=0x402b MASK_CUSTOM1_RD=0x707f MATCH_CUSTOM1_RD_RS1=0x602b MASK_CUSTOM1_RD_RS1=0x707f MATCH_CUSTOM1_RD_RS1_RS2=0x702b MASK_CUSTOM1_RD_RS1_RS2=0x707f MATCH_CUSTOM2=0x5b MASK_CUSTOM2=0x707f MATCH_CUSTOM2_RS1=0x205b MASK_CUSTOM2_RS1=0x707f MATCH_CUSTOM2_RS1_RS2=0x305b MASK_CUSTOM2_RS1_RS2=0x707f MATCH_CUSTOM2_RD=0x405b MASK_CUSTOM2_RD=0x707f MATCH_CUSTOM2_RD_RS1=0x605b MASK_CUSTOM2_RD_RS1=0x707f MATCH_CUSTOM2_RD_RS1_RS2=0x705b MASK_CUSTOM2_RD_RS1_RS2=0x707f MATCH_CUSTOM3=0x7b MASK_CUSTOM3=0x707f MATCH_CUSTOM3_RS1=0x207b MASK_CUSTOM3_RS1=0x707f MATCH_CUSTOM3_RS1_RS2=0x307b MASK_CUSTOM3_RS1_RS2=0x707f MATCH_CUSTOM3_RD=0x407b MASK_CUSTOM3_RD=0x707f MATCH_CUSTOM3_RD_RS1=0x607b MASK_CUSTOM3_RD_RS1=0x707f MATCH_CUSTOM3_RD_RS1_RS2=0x707b MASK_CUSTOM3_RD_RS1_RS2=0x707f CSR_USTATUS=0x0 CSR_UIE=0x4 CSR_UTVEC=0x5 CSR_USCRATCH=0x40 CSR_UEPC=0x41 CSR_UCAUSE=0x42 CSR_UTVAL=0x43 CSR_UIP=0x44 CSR_FFLAGS=0x1 CSR_FRM=0x2 CSR_FCSR=0x3 CSR_CYCLE=0xc00 CSR_TIME=0xc01 CSR_INSTRET=0xc02 CSR_HPMCOUNTER3=0xc03 CSR_HPMCOUNTER4=0xc04 CSR_HPMCOUNTER5=0xc05 CSR_HPMCOUNTER6=0xc06 CSR_HPMCOUNTER7=0xc07 CSR_HPMCOUNTER8=0xc08 CSR_HPMCOUNTER9=0xc09 CSR_HPMCOUNTER10=0xc0a CSR_HPMCOUNTER11=0xc0b CSR_HPMCOUNTER12=0xc0c CSR_HPMCOUNTER13=0xc0d CSR_HPMCOUNTER14=0xc0e CSR_HPMCOUNTER15=0xc0f CSR_HPMCOUNTER16=0xc10 CSR_HPMCOUNTER17=0xc11 CSR_HPMCOUNTER18=0xc12 CSR_HPMCOUNTER19=0xc13 CSR_HPMCOUNTER20=0xc14 CSR_HPMCOUNTER21=0xc15 CSR_HPMCOUNTER22=0xc16 CSR_HPMCOUNTER23=0xc17 CSR_HPMCOUNTER24=0xc18 CSR_HPMCOUNTER25=0xc19 CSR_HPMCOUNTER26=0xc1a CSR_HPMCOUNTER27=0xc1b CSR_HPMCOUNTER28=0xc1c CSR_HPMCOUNTER29=0xc1d CSR_HPMCOUNTER30=0xc1e CSR_HPMCOUNTER31=0xc1f CSR_CYCLEH=0xc80 CSR_TIMEH=0xc81 CSR_INSTRETH=0xc82 CSR_HPMCOUNTER3H=0xc83 CSR_HPMCOUNTER4H=0xc84 CSR_HPMCOUNTER5H=0xc85 CSR_HPMCOUNTER6H=0xc86 CSR_HPMCOUNTER7H=0xc87 CSR_HPMCOUNTER8H=0xc88 CSR_HPMCOUNTER9H=0xc89 CSR_HPMCOUNTER10H=0xc8a CSR_HPMCOUNTER11H=0xc8b CSR_HPMCOUNTER12H=0xc8c CSR_HPMCOUNTER13H=0xc8d CSR_HPMCOUNTER14H=0xc8e CSR_HPMCOUNTER15H=0xc8f CSR_HPMCOUNTER16H=0xc90 CSR_HPMCOUNTER17H=0xc91 CSR_HPMCOUNTER18H=0xc92 CSR_HPMCOUNTER19H=0xc93 CSR_HPMCOUNTER20H=0xc94 CSR_HPMCOUNTER21H=0xc95 CSR_HPMCOUNTER22H=0xc96 CSR_HPMCOUNTER23H=0xc97 CSR_HPMCOUNTER24H=0xc98 CSR_HPMCOUNTER25H=0xc99 CSR_HPMCOUNTER26H=0xc9a CSR_HPMCOUNTER27H=0xc9b CSR_HPMCOUNTER28H=0xc9c CSR_HPMCOUNTER29H=0xc9d CSR_HPMCOUNTER30H=0xc9e CSR_HPMCOUNTER31H=0xc9f CSR_SSTATUS=0x100 CSR_SEDELEG=0x102 CSR_SIDELEG=0x103 CSR_SIE=0x104 CSR_STVEC=0x105 CSR_SCOUNTEREN=0x106 CSR_SSCRATCH=0x140 CSR_SEPC=0x141 CSR_SCAUSE=0x142 CSR_STVAL=0x143 CSR_SIP=0x144 CSR_SATP=0x180 CSR_MVENDORID=0xf11 CSR_MARCHID=0xf12 CSR_MIMPID=0xf13 CSR_MHARTID=0xf14 CSR_MSTATUS=0x300 CSR_MISA=0x301 CSR_MEDELEG=0x302 CSR_MIDELEG=0x303 CSR_MIE=0x304 CSR_MTVEC=0x305 CSR_MCOUNTEREN=0x306 CSR_MSCRATCH=0x340 CSR_MEPC=0x341 CSR_MCAUSE=0x342 CSR_MTVAL=0x343 CSR_MIP=0x344 CSR_PMPCFG0=0x3a0 CSR_PMPCFG1=0x3a1 CSR_PMPCFG2=0x3a2 CSR_PMPCFG3=0x3a3 CSR_PMPADDR0=0x3b0 CSR_PMPADDR1=0x3b1 CSR_PMPADDR2=0x3b2 CSR_PMPADDR3=0x3b3 CSR_PMPADDR4=0x3b4 CSR_PMPADDR5=0x3b5 CSR_PMPADDR6=0x3b6 CSR_PMPADDR7=0x3b7 CSR_PMPADDR8=0x3b8 CSR_PMPADDR9=0x3b9 CSR_PMPADDR10=0x3ba CSR_PMPADDR11=0x3bb CSR_PMPADDR12=0x3bc CSR_PMPADDR13=0x3bd CSR_PMPADDR14=0x3be CSR_PMPADDR15=0x3bf CSR_MCYCLE=0xb00 CSR_MINSTRET=0xb02 CSR_MHPMCOUNTER3=0xb03 CSR_MHPMCOUNTER4=0xb04 CSR_MHPMCOUNTER5=0xb05 CSR_MHPMCOUNTER6=0xb06 CSR_MHPMCOUNTER7=0xb07 CSR_MHPMCOUNTER8=0xb08 CSR_MHPMCOUNTER9=0xb09 CSR_MHPMCOUNTER10=0xb0a CSR_MHPMCOUNTER11=0xb0b CSR_MHPMCOUNTER12=0xb0c CSR_MHPMCOUNTER13=0xb0d CSR_MHPMCOUNTER14=0xb0e CSR_MHPMCOUNTER15=0xb0f CSR_MHPMCOUNTER16=0xb10 CSR_MHPMCOUNTER17=0xb11 CSR_MHPMCOUNTER18=0xb12 CSR_MHPMCOUNTER19=0xb13 CSR_MHPMCOUNTER20=0xb14 CSR_MHPMCOUNTER21=0xb15 CSR_MHPMCOUNTER22=0xb16 CSR_MHPMCOUNTER23=0xb17 CSR_MHPMCOUNTER24=0xb18 CSR_MHPMCOUNTER25=0xb19 CSR_MHPMCOUNTER26=0xb1a CSR_MHPMCOUNTER27=0xb1b CSR_MHPMCOUNTER28=0xb1c CSR_MHPMCOUNTER29=0xb1d CSR_MHPMCOUNTER30=0xb1e CSR_MHPMCOUNTER31=0xb1f CSR_MCYCLEH=0xb80 CSR_MINSTRETH=0xb82 CSR_MHPMCOUNTER3H=0xb83 CSR_MHPMCOUNTER4H=0xb84 CSR_MHPMCOUNTER5H=0xb85 CSR_MHPMCOUNTER6H=0xb86 CSR_MHPMCOUNTER7H=0xb87 CSR_MHPMCOUNTER8H=0xb88 CSR_MHPMCOUNTER9H=0xb89 CSR_MHPMCOUNTER10H=0xb8a CSR_MHPMCOUNTER11H=0xb8b CSR_MHPMCOUNTER12H=0xb8c CSR_MHPMCOUNTER13H=0xb8d CSR_MHPMCOUNTER14H=0xb8e CSR_MHPMCOUNTER15H=0xb8f CSR_MHPMCOUNTER16H=0xb90 CSR_MHPMCOUNTER17H=0xb91 CSR_MHPMCOUNTER18H=0xb92 CSR_MHPMCOUNTER19H=0xb93 CSR_MHPMCOUNTER20H=0xb94 CSR_MHPMCOUNTER21H=0xb95 CSR_MHPMCOUNTER22H=0xb96 CSR_MHPMCOUNTER23H=0xb97 CSR_MHPMCOUNTER24H=0xb98 CSR_MHPMCOUNTER25H=0xb99 CSR_MHPMCOUNTER26H=0xb9a CSR_MHPMCOUNTER27H=0xb9b CSR_MHPMCOUNTER28H=0xb9c CSR_MHPMCOUNTER29H=0xb9d CSR_MHPMCOUNTER30H=0xb9e CSR_MHPMCOUNTER31H=0xb9f CSR_MHPMEVENT3=0x323 CSR_MHPMEVENT4=0x324 CSR_MHPMEVENT5=0x325 CSR_MHPMEVENT6=0x326 CSR_MHPMEVENT7=0x327 CSR_MHPMEVENT8=0x328 CSR_MHPMEVENT9=0x329 CSR_MHPMEVENT10=0x32a CSR_MHPMEVENT11=0x32b CSR_MHPMEVENT12=0x32c CSR_MHPMEVENT13=0x32d CSR_MHPMEVENT14=0x32e CSR_MHPMEVENT15=0x32f CSR_MHPMEVENT16=0x330 CSR_MHPMEVENT17=0x331 CSR_MHPMEVENT18=0x332 CSR_MHPMEVENT19=0x333 CSR_MHPMEVENT20=0x334 CSR_MHPMEVENT21=0x335 CSR_MHPMEVENT22=0x336 CSR_MHPMEVENT23=0x337 CSR_MHPMEVENT24=0x338 CSR_MHPMEVENT25=0x339 CSR_MHPMEVENT26=0x33a CSR_MHPMEVENT27=0x33b CSR_MHPMEVENT28=0x33c CSR_MHPMEVENT29=0x33d CSR_MHPMEVENT30=0x33e CSR_MHPMEVENT31=0x33f CSR_TSELECT=0x7a0 CSR_TDATA1=0x7a1 CSR_TDATA2=0x7a2 CSR_TDATA3=0x7a3 CSR_DCSR=0x7b0 CSR_DPC=0x7b1 CSR_DSCRATCH=0x7b2 CSR_HSTATUS=0x200 CSR_HEDELEG=0x202 CSR_HIDELEG=0x203 CSR_HIE=0x204 CSR_HTVEC=0x205 CSR_HSCRATCH=0x240 CSR_HEPC=0x241 CSR_HCAUSE=0x242 CSR_HBADADDR=0x243 CSR_HIP=0x244 CSR_MBASE=0x380 CSR_MBOUND=0x381 CSR_MIBASE=0x382 CSR_MIBOUND=0x383 CSR_MDBASE=0x384 CSR_MDBOUND=0x385 CSR_MUCOUNTEREN=0x320 CSR_MSCOUNTEREN=0x321 CSR_MHCOUNTEREN=0x322 CAUSE_MISALIGNED_FETCH=0x0 CAUSE_FAULT_FETCH=0x1 CAUSE_ILLEGAL_INSTRUCTION=0x2 CAUSE_BREAKPOINT=0x3 CAUSE_MISALIGNED_LOAD=0x4 CAUSE_FAULT_LOAD=0x5 CAUSE_MISALIGNED_STORE=0x6 CAUSE_FAULT_STORE=0x7 CAUSE_USER_ECALL=0x8 CAUSE_SUPERVISOR_ECALL=0x9 CAUSE_HYPERVISOR_ECALL=0xa CAUSE_MACHINE_ECALL=0xb MASK_RD=0x1f OP_MASK_OP=0x7f OP_SH_OP=0 OP_MASK_RS2=0x1f OP_SH_RS2=20 OP_MASK_RS1=0x1f OP_SH_RS1=15 OP_MASK_RS3=0x1f OP_SH_RS3=27 OP_MASK_RD=0x1f OP_SH_RD=7 OP_MASK_SHAMT=0x3f OP_SH_SHAMT=20 OP_MASK_SHAMTW=0x1f OP_SH_SHAMTW=20 OP_MASK_RM=0x7 OP_SH_RM=12 OP_MASK_PRED=0xf OP_SH_PRED=24 OP_MASK_SUCC=0xf OP_SH_SUCC=20 OP_MASK_AQ=0x1 OP_SH_AQ=26 OP_MASK_RL=0x1 OP_SH_RL=25 OP_MASK_CUSTOM_IMM=0x7f OP_SH_CUSTOM_IMM=25 OP_MASK_CSR=0xfff OP_SH_CSR=20 OP_MASK_FUNCT3=0x7 OP_SH_FUNCT3=12 OP_MASK_FUNCT7=0x7f OP_SH_FUNCT7=25 OP_MASK_FUNCT2=0x3 OP_SH_FUNCT2=25 OP_MASK_OP2=0x3 OP_SH_OP2=0 OP_MASK_CRS2=0x1f OP_SH_CRS2=2 OP_MASK_CRS1S=0x7 OP_SH_CRS1S=7 OP_MASK_CRS2S=0x7 OP_SH_CRS2S=2 OP_MASK_CFUNCT6=0x3f OP_SH_CFUNCT6=10 OP_MASK_CFUNCT4=0xf OP_SH_CFUNCT4=12 OP_MASK_CFUNCT3=0x7 OP_SH_CFUNCT3=13 OP_MASK_CFUNCT2=0x3 OP_SH_CFUNCT2=5 M_LA=0 M_LLA=1 M_LA_TLS_GD=2 M_LA_TLS_IE=3 M_LB=4 M_LBU=5 M_LH=6 M_LHU=7 M_LW=8 M_LWU=9 M_LD=10 M_SB=11 M_SH=12 M_SW=13 M_SD=14 M_FLW=15 M_FLD=16 M_FLQ=17 M_FSW=18 M_FSD=19 M_FSQ=20 M_CALL=21 M_J=22 M_LI=23 M_NUM_MACROS=24 X_RA=1 X_SP=2 X_GP=3 X_TP=4 X_T0=5 X_T1=6 X_T2=7 X_T3=28 NGPR=32 NFPR=32 ISAC = 1 ISAI = 2 ISAA = 3 ISAM = 4 ISAF = 5 ISAFC = 6 ISADC = 7 ISAD = 8 ISAQ = 9 INSN_TYPE=0x0000000e INSN_MACRO=0xffffffff INSN_ALIAS=0x00000001 INSN_BRANCH=0x00000002 INSN_CONDBRANCH=0x00000004 INSN_JSR=0x00000006 INSN_DREF=0x00000008 INSN_DATA_SIZE=0x00000070 INSN_DATA_SIZE_SHIFT=4 INSN_1_BYTE=0x00000010 INSN_2_BYTE=0x00000020 INSN_4_BYTE=0x00000030 INSN_8_BYTE=0x00000040 INSN_16_BYTE=0x0000050 def RV_X(x, s, n): return (((x) >> (s)) & ((1 << (n)) - 1)) def RV_IMM_SIGN(x): return (-(((x) >> 31) & 1)) def EXTRACT_ITYPE_IMM(x): return (RV_X(x, 20, 12) | (RV_IMM_SIGN(x) << 12)) def EXTRACT_STYPE_IMM(x): return (RV_X(x, 7, 5) | (RV_X(x, 25, 7) << 5) | (RV_IMM_SIGN(x) << 12)) def EXTRACT_SBTYPE_IMM(x): return ((RV_X(x, 8, 4) << 1) | (RV_X(x, 25, 6) << 5) | (RV_X(x, 7, 1) << 11) | (RV_IMM_SIGN(x) << 12)) def EXTRACT_UTYPE_IMM(x): return ((RV_X(x, 12, 20) << 12) | (RV_IMM_SIGN(x) << 32)) def EXTRACT_UJTYPE_IMM(x): return ((RV_X(x, 21, 10) << 1) | (RV_X(x, 20, 1) << 11) | (RV_X(x, 12, 8) << 12) | (RV_IMM_SIGN(x) << 20)) def EXTRACT_RVC_IMM(x): return (RV_X(x, 2, 5) | (-RV_X(x, 12, 1) << 5)) def EXTRACT_RVC_LUI_IMM(x): return (EXTRACT_RVC_IMM (x) << RISCV_IMM_BITS) def EXTRACT_RVC_SIMM3(x): return (RV_X(x, 10, 2) | (-RV_X(x, 12, 1) << 2)) def EXTRACT_RVC_UIMM8(x): return (RV_X(x, 5, 8)) def EXTRACT_RVC_ADDI4SPN_IMM(x): return ((RV_X(x, 6, 1) << 2) | (RV_X(x, 5, 1) << 3) | (RV_X(x, 11, 2) << 4) | (RV_X(x, 7, 4) << 6)) def EXTRACT_RVC_ADDI16SP_IMM(x): return ((RV_X(x, 6, 1) << 4) | (RV_X(x, 2, 1) << 5) | (RV_X(x, 5, 1) << 6) | (RV_X(x, 3, 2) << 7) | (-RV_X(x, 12, 1) << 9)) def EXTRACT_RVC_LW_IMM(x): return ((RV_X(x, 6, 1) << 2) | (RV_X(x, 10, 3) << 3) | (RV_X(x, 5, 1) << 6)) def EXTRACT_RVC_LD_IMM(x): return ((RV_X(x, 10, 3) << 3) | (RV_X(x, 5, 2) << 6)) def EXTRACT_RVC_LWSP_IMM(x): return ((RV_X(x, 4, 3) << 2) | (RV_X(x, 12, 1) << 5) | (RV_X(x, 2, 2) << 6)) def EXTRACT_RVC_LDSP_IMM(x): return ((RV_X(x, 5, 2) << 3) | (RV_X(x, 12, 1) << 5) | (RV_X(x, 2, 3) << 6)) def EXTRACT_RVC_SWSP_IMM(x): return ((RV_X(x, 9, 4) << 2) | (RV_X(x, 7, 2) << 6)) def EXTRACT_RVC_SDSP_IMM(x): return ((RV_X(x, 10, 3) << 3) | (RV_X(x, 7, 3) << 6)) def EXTRACT_RVC_B_IMM(x): return ((RV_X(x, 3, 2) << 1) | (RV_X(x, 10, 2) << 3) | (RV_X(x, 2, 1) << 5) | (RV_X(x, 5, 2) << 6) | (-RV_X(x, 12, 1) << 8)) def EXTRACT_RVC_J_IMM(x): return ((RV_X(x, 3, 3) << 1) | (RV_X(x, 11, 1) << 4) | (RV_X(x, 2, 1) << 5) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 6, 1) << 7) | (RV_X(x, 9, 2) << 8) | (RV_X(x, 8, 1) << 10) | (-RV_X(x, 12, 1) << 11)) def ENCODE_ITYPE_IMM(x): return (RV_X(x, 0, 12) << 20) def ENCODE_STYPE_IMM(x): return ((RV_X(x, 0, 5) << 7) | (RV_X(x, 5, 7) << 25)) def ENCODE_SBTYPE_IMM(x): return ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31)) def ENCODE_UTYPE_IMM(x): return (RV_X(x, 12, 20) << 12) def ENCODE_UJTYPE_IMM(x): return ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31)) def ENCODE_RVC_IMM(x): return ((RV_X(x, 0, 5) << 2) | (RV_X(x, 5, 1) << 12)) def ENCODE_RVC_LUI_IMM(x): return ENCODE_RVC_IMM ((x) >> RISCV_IMM_BITS) def ENCODE_RVC_SIMM3(x): return (RV_X(x, 0, 3) << 10) def ENCODE_RVC_UIMM8(x): return (RV_X(x, 0, 8) << 5) def ENCODE_RVC_ADDI4SPN_IMM(x): return ((RV_X(x, 2, 1) << 6) | (RV_X(x, 3, 1) << 5) | (RV_X(x, 4, 2) << 11) | (RV_X(x, 6, 4) << 7)) def ENCODE_RVC_ADDI16SP_IMM(x): return ((RV_X(x, 4, 1) << 6) | (RV_X(x, 5, 1) << 2) | (RV_X(x, 6, 1) << 5) | (RV_X(x, 7, 2) << 3) | (RV_X(x, 9, 1) << 12)) def ENCODE_RVC_LW_IMM(x): return ((RV_X(x, 2, 1) << 6) | (RV_X(x, 3, 3) << 10) | (RV_X(x, 6, 1) << 5)) def ENCODE_RVC_LD_IMM(x): return ((RV_X(x, 3, 3) << 10) | (RV_X(x, 6, 2) << 5)) def ENCODE_RVC_LWSP_IMM(x): return ((RV_X(x, 2, 3) << 4) | (RV_X(x, 5, 1) << 12) | (RV_X(x, 6, 2) << 2)) def ENCODE_RVC_LDSP_IMM(x): return ((RV_X(x, 3, 2) << 5) | (RV_X(x, 5, 1) << 12) | (RV_X(x, 6, 3) << 2)) def ENCODE_RVC_SWSP_IMM(x): return ((RV_X(x, 2, 4) << 9) | (RV_X(x, 6, 2) << 7)) def ENCODE_RVC_SDSP_IMM(x): return ((RV_X(x, 3, 3) << 10) | (RV_X(x, 6, 3) << 7)) def ENCODE_RVC_B_IMM(x): return ((RV_X(x, 1, 2) << 3) | (RV_X(x, 3, 2) << 10) | (RV_X(x, 5, 1) << 2) | (RV_X(x, 6, 2) << 5) | (RV_X(x, 8, 1) << 12)) def ENCODE_RVC_J_IMM(x): return ((RV_X(x, 1, 3) << 3) | (RV_X(x, 4, 1) << 11) | (RV_X(x, 5, 1) << 2) | (RV_X(x, 6, 1) << 7) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 8, 2) << 9) | (RV_X(x, 10, 1) << 8) | (RV_X(x, 11, 1) << 12)) MASK_RS1= (OP_MASK_RS1 << OP_SH_RS1) MASK_RS2= (OP_MASK_RS2 << OP_SH_RS2) MASK_RD= (OP_MASK_RD << OP_SH_RD) MASK_CRS2= (OP_MASK_CRS2 << OP_SH_CRS2) MASK_IMM= ENCODE_ITYPE_IMM(-1) MASK_RVC_IMM= ENCODE_RVC_IMM(-1) MASK_UIMM= ENCODE_UTYPE_IMM(-1) MASK_RM= (OP_MASK_RM << OP_SH_RM) MASK_PRED= (OP_MASK_PRED << OP_SH_PRED) MASK_SUCC= (OP_MASK_SUCC << OP_SH_SUCC) MASK_AQ= (OP_MASK_AQ << OP_SH_AQ) MASK_RL= (OP_MASK_RL << OP_SH_RL) MASK_AQRL= (MASK_AQ | MASK_RL) def match_opcode(op, insn): print("match_opcode: %08x %08x %08x %08x %08x" % (op.match, op.mask, insn, insn ^ op.match, (insn ^ op.match) & op.mask)) return ((insn ^ op.match) & op.mask) == 0 def match_never(op, insn): return False def match_rs1_eq_rs2(op, insn): rs1 = (insn & MASK_RS1) >> OP_SH_RS1 rs2 = (insn & MASK_RS2) >> OP_SH_RS2 return match_opcode(op, insn) and rs1 == rs2 def match_rd_nonzero(op, insn): return match_opcode(op, insn) and ((insn & MASK_RD) != 0) def match_c_add(op, insn): return match_rd_nonzero(op, insn) and ((insn & MASK_CRS2) != 0) def match_c_add_with_hint(op, insn): return match_opcode(op, insn) and ((insn & MASK_CRS2) != 0) def match_c_nop(op, insn): return match_opcode(op, insn) and (((insn & MASK_RD) >> OP_SH_RD) == 0) def match_c_add16sp(op, insn): return match_opcode(op, insn) and (((insn & MASK_RD) >> OP_SH_RD) == 2) def match_c_lui(op, insn): return False def match_c_addi4spn(op, insn): return False def match_c_addi16sp(op, insn): return False def match_slli_as_c_slli(op, insn): return False def match_srxi_as_c_srxi(op, insn): return False def match_c_lui_with_hint(op, insn): return False def match_c_slli(op, insn): return False def match_c_slli64(op, insn): return False class OpCode: def __init__(self, op): self.name = op[0] self.xlen = op[1] self.isa = op[2] self.operands = op[3] self.match = op[4] self.mask = op[5] self.func = op[6] self.pinfo = op[7] self.size = 0 if self.pinfo & INSN_DATA_SIZE: self.size = ((self.pinfo & INSN_DATA_SIZE) >> INSN_DATA_SIZE_SHIFT) self.size = 1 << (self.size - 1) def __str__(self): return "%s %s %08x %08x %08x (%d, %d) " % (self.name, self.operands, self.match, self.mask, self.pinfo, self.xlen, self.size) def __eq__(self, other): if other is None: return False if self.name != other.name: return False if self.mask != other.mask: return False if self.match != other.match: return False if self.operands != other.operands: return False if self.pinfo != other.pinfo: return False if self.xlen != other.xlen: return False return True # name, xlen, isa, operands, match, mask, match_func, pinfo opcodes = [ ("unimp", 0, ISAC, "", 0, 0xffff, match_opcode, INSN_ALIAS ), ("unimp", 0, ISAI, "", MATCH_CSRRW | (CSR_CYCLE << OP_SH_CSR), 0xffffffff, match_opcode, 0 ), #/* csrw cycle, x0 */ ("ebreak", 0, ISAC, "", MATCH_C_EBREAK, MASK_C_EBREAK, match_opcode, INSN_ALIAS ), ("ebreak", 0, ISAI, "", MATCH_EBREAK, MASK_EBREAK, match_opcode, 0 ), #("sbreak", 0, ISAC, "", MATCH_C_EBREAK, MASK_C_EBREAK, match_opcode, INSN_ALIAS ), #("sbreak", 0, ISAI, "", MATCH_EBREAK, MASK_EBREAK, match_opcode, INSN_ALIAS ), ("ret", 0, ISAC, "", MATCH_C_JR | (X_RA << OP_SH_RD), MASK_C_JR | MASK_RD, match_opcode, INSN_ALIAS|INSN_BRANCH ), ("ret", 0, ISAI, "", MATCH_JALR | (X_RA << OP_SH_RS1), MASK_JALR | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, INSN_ALIAS|INSN_BRANCH ), ("jr", 0, ISAC, "d", MATCH_C_JR, MASK_C_JR, match_rd_nonzero, INSN_ALIAS|INSN_BRANCH ), ("jr", 0, ISAI, "s", MATCH_JALR, MASK_JALR | MASK_RD | MASK_IMM, match_opcode, INSN_ALIAS|INSN_BRANCH ), ("jr", 0, ISAI, "o(s)", MATCH_JALR, MASK_JALR | MASK_RD, match_opcode, INSN_ALIAS|INSN_BRANCH ), ("jr", 0, ISAI, "s,j", MATCH_JALR, MASK_JALR | MASK_RD, match_opcode, INSN_ALIAS|INSN_BRANCH ), ("jalr", 0, ISAC, "d", MATCH_C_JALR, MASK_C_JALR, match_rd_nonzero, INSN_ALIAS|INSN_JSR ), ("jalr", 0, ISAI, "s", MATCH_JALR | (X_RA << OP_SH_RD), MASK_JALR | MASK_RD | MASK_IMM, match_opcode, INSN_ALIAS|INSN_JSR ), ("jalr", 0, ISAI, "o(s)", MATCH_JALR | (X_RA << OP_SH_RD), MASK_JALR | MASK_RD, match_opcode, INSN_ALIAS|INSN_JSR ), ("jalr", 0, ISAI, "s,j", MATCH_JALR | (X_RA << OP_SH_RD), MASK_JALR | MASK_RD, match_opcode, INSN_ALIAS|INSN_JSR ), ("jalr", 0, ISAI, "d,s", MATCH_JALR, MASK_JALR | MASK_IMM, match_opcode, INSN_ALIAS|INSN_JSR ), ("jalr", 0, ISAI, "d,o(s)", MATCH_JALR, MASK_JALR, match_opcode, INSN_JSR ), ("jalr", 0, ISAI, "d,s,j", MATCH_JALR, MASK_JALR, match_opcode, INSN_JSR ), ("j", 0, ISAC, "Ca", MATCH_C_J, MASK_C_J, match_opcode, INSN_ALIAS|INSN_BRANCH ), ("j", 0, ISAI, "a", MATCH_JAL, MASK_JAL | MASK_RD, match_opcode, INSN_ALIAS|INSN_BRANCH ), ("jal", 0, ISAI, "d,a", MATCH_JAL, MASK_JAL, match_opcode, INSN_JSR ), ("jal", 32, ISAC, "Ca", MATCH_C_JAL, MASK_C_JAL, match_opcode, INSN_ALIAS|INSN_JSR ), ("jal", 0, ISAI, "a", MATCH_JAL | (X_RA << OP_SH_RD), MASK_JAL | MASK_RD, match_opcode, INSN_ALIAS|INSN_JSR ), ("call", 0, ISAI, "d,c", (X_T1 << OP_SH_RS1), M_CALL, match_never, INSN_MACRO ), ("call", 0, ISAI, "c", (X_RA << OP_SH_RS1) | (X_RA << OP_SH_RD), M_CALL, match_never, INSN_MACRO ), ("tail", 0, ISAI, "c", (X_T1 << OP_SH_RS1), M_CALL, match_never, INSN_MACRO ), ("jump", 0, ISAI, "c,s", 0, M_CALL, match_never, INSN_MACRO ), ("nop", 0, ISAC, "", MATCH_C_ADDI, 0xffff, match_opcode, INSN_ALIAS ), ("nop", 0, ISAI, "", MATCH_ADDI, MASK_ADDI | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, INSN_ALIAS ), ("lui", 0, ISAC, "d,Cu", MATCH_C_LUI, MASK_C_LUI, match_c_lui, INSN_ALIAS ), ("lui", 0, ISAI, "d,u", MATCH_LUI, MASK_LUI, match_opcode, 0 ), ("li", 0, ISAC, "d,Cv", MATCH_C_LUI, MASK_C_LUI, match_c_lui, INSN_ALIAS ), ("li", 0, ISAC, "d,Co", MATCH_C_LI, MASK_C_LI, match_rd_nonzero, INSN_ALIAS ), ("li", 0, ISAI, "d,j", MATCH_ADDI, MASK_ADDI | MASK_RS1, match_opcode, INSN_ALIAS ), #/* addi */ ("li", 0, ISAI, "d,I", 0, M_LI, match_never, INSN_MACRO ), ("mv", 0, ISAC, "d,CV", MATCH_C_MV, MASK_C_MV, match_c_add, INSN_ALIAS ), ("mv", 0, ISAI, "d,s", MATCH_ADDI, MASK_ADDI | MASK_IMM, match_opcode, INSN_ALIAS ), ("move", 0, ISAC, "d,CV", MATCH_C_MV, MASK_C_MV, match_c_add, INSN_ALIAS ), ("move", 0, ISAI, "d,s", MATCH_ADDI, MASK_ADDI | MASK_IMM, match_opcode, INSN_ALIAS ), ("andi", 0, ISAC, "Cs,Cw,Co", MATCH_C_ANDI, MASK_C_ANDI, match_opcode, INSN_ALIAS ), ("andi", 0, ISAI, "d,s,j", MATCH_ANDI, MASK_ANDI, match_opcode, 0 ), ("and", 0, ISAC, "Cs,Cw,Ct", MATCH_C_AND, MASK_C_AND, match_opcode, INSN_ALIAS ), ("and", 0, ISAC, "Cs,Ct,Cw", MATCH_C_AND, MASK_C_AND, match_opcode, INSN_ALIAS ), ("and", 0, ISAC, "Cs,Cw,Co", MATCH_C_ANDI, MASK_C_ANDI, match_opcode, INSN_ALIAS ), ("and", 0, ISAI, "d,s,t", MATCH_AND, MASK_AND, match_opcode, 0 ), ("and", 0, ISAI, "d,s,j", MATCH_ANDI, MASK_ANDI, match_opcode, INSN_ALIAS ), ("beqz", 0, ISAC, "Cs,Cp", MATCH_C_BEQZ, MASK_C_BEQZ, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("beqz", 0, ISAI, "s,p", MATCH_BEQ, MASK_BEQ | MASK_RS2, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("beq", 0, ISAC, "Cs,Cz,Cp", MATCH_C_BEQZ, MASK_C_BEQZ, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("beq", 0, ISAI, "s,t,p", MATCH_BEQ, MASK_BEQ, match_opcode, INSN_CONDBRANCH ), ("blez", 0, ISAI, "t,p", MATCH_BGE, MASK_BGE | MASK_RS1, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("bgez", 0, ISAI, "s,p", MATCH_BGE, MASK_BGE | MASK_RS2, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("bge", 0, ISAI, "s,t,p", MATCH_BGE, MASK_BGE, match_opcode, INSN_CONDBRANCH ), ("bgeu", 0, ISAI, "s,t,p", MATCH_BGEU, MASK_BGEU, match_opcode, INSN_CONDBRANCH ), ("ble", 0, ISAI, "t,s,p", MATCH_BGE, MASK_BGE, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("bleu", 0, ISAI, "t,s,p", MATCH_BGEU, MASK_BGEU, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("bltz", 0, ISAI, "s,p", MATCH_BLT, MASK_BLT | MASK_RS2, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("bgtz", 0, ISAI, "t,p", MATCH_BLT, MASK_BLT | MASK_RS1, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("blt", 0, ISAI, "s,t,p", MATCH_BLT, MASK_BLT, match_opcode, INSN_CONDBRANCH ), ("bltu", 0, ISAI, "s,t,p", MATCH_BLTU, MASK_BLTU, match_opcode, INSN_CONDBRANCH ), ("bgt", 0, ISAI, "t,s,p", MATCH_BLT, MASK_BLT, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("bgtu", 0, ISAI, "t,s,p", MATCH_BLTU, MASK_BLTU, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("bnez", 0, ISAC, "Cs,Cp", MATCH_C_BNEZ, MASK_C_BNEZ, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("bnez", 0, ISAI, "s,p", MATCH_BNE, MASK_BNE | MASK_RS2, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("bne", 0, ISAC, "Cs,Cz,Cp", MATCH_C_BNEZ, MASK_C_BNEZ, match_opcode, INSN_ALIAS|INSN_CONDBRANCH ), ("bne", 0, ISAI, "s,t,p", MATCH_BNE, MASK_BNE, match_opcode, INSN_CONDBRANCH ), ("addi", 0, ISAC, "Ct,Cc,CK", MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN, match_c_addi4spn, INSN_ALIAS ), ("addi", 0, ISAC, "d,CU,Cj", MATCH_C_ADDI, MASK_C_ADDI, match_rd_nonzero, INSN_ALIAS ), ("addi", 0, ISAC, "d,CU,z", MATCH_C_NOP, MASK_C_ADDI | MASK_RVC_IMM, match_c_nop, INSN_ALIAS ), ("addi", 0, ISAC, "Cc,Cc,CL", MATCH_C_ADDI16SP, MASK_C_ADDI16SP, match_c_addi16sp, INSN_ALIAS ), ("addi", 0, ISAI, "d,s,j", MATCH_ADDI, MASK_ADDI, match_opcode, 0 ), ("add", 0, ISAC, "d,CU,CV", MATCH_C_ADD, MASK_C_ADD, match_c_add, INSN_ALIAS ), ("add", 0, ISAC, "d,CV,CU", MATCH_C_ADD, MASK_C_ADD, match_c_add, INSN_ALIAS ), ("add", 0, ISAC, "d,CU,Co", MATCH_C_ADDI, MASK_C_ADDI, match_rd_nonzero, INSN_ALIAS ), ("add", 0, ISAC, "Ct,Cc,CK", MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN, match_c_addi4spn, INSN_ALIAS ), ("add", 0, ISAC, "Cc,Cc,CL", MATCH_C_ADDI16SP, MASK_C_ADDI16SP, match_c_addi16sp, INSN_ALIAS ), ("add", 0, ISAI, "d,s,t", MATCH_ADD, MASK_ADD, match_opcode, 0 ), #/* This is used for TLS, where the fourth arg is %tprel_add, to get a reloc #applied to an add instruction, for relaxation to use. */ #("add", 0, ISAI, "d,s,t,1",MATCH_ADD, MASK_ADD, match_opcode, 0 ), ("add", 0, ISAI, "d,s,j", MATCH_ADDI, MASK_ADDI, match_opcode, INSN_ALIAS ), ("la", 0, ISAI, "d,B", 0, M_LA, match_never, INSN_MACRO ), ("lla", 0, ISAI, "d,B", 0, M_LLA, match_never, INSN_MACRO ), ("la.tls.gd", 0, ISAI, "d,A", 0, M_LA_TLS_GD, match_never, INSN_MACRO ), ("la.tls.ie", 0, ISAI, "d,A", 0, M_LA_TLS_IE, match_never, INSN_MACRO ), ("neg", 0, ISAI, "d,t", MATCH_SUB, MASK_SUB | MASK_RS1, match_opcode, INSN_ALIAS ), #/* sub 0 */ ("slli", 0, ISAC, "d,CU,C>", MATCH_C_SLLI, MASK_C_SLLI, match_slli_as_c_slli, INSN_ALIAS ), ("slli", 0, ISAI, "d,s,>", MATCH_SLLI, MASK_SLLI, match_opcode, 0 ), ("sll", 0, ISAC, "d,CU,C>", MATCH_C_SLLI, MASK_C_SLLI, match_slli_as_c_slli, INSN_ALIAS ), ("sll", 0, ISAI, "d,s,t", MATCH_SLL, MASK_SLL, match_opcode, 0 ), ("sll", 0, ISAI, "d,s,>", MATCH_SLLI, MASK_SLLI, match_opcode, INSN_ALIAS ), ("srli", 0, ISAC, "Cs,Cw,C>", MATCH_C_SRLI, MASK_C_SRLI, match_srxi_as_c_srxi, INSN_ALIAS ), ("srli", 0, ISAI, "d,s,>", MATCH_SRLI, MASK_SRLI, match_opcode, 0 ), ("srl", 0, ISAC, "Cs,Cw,C>", MATCH_C_SRLI, MASK_C_SRLI, match_srxi_as_c_srxi, INSN_ALIAS ), ("srl", 0, ISAI, "d,s,t", MATCH_SRL, MASK_SRL, match_opcode, 0 ), ("srl", 0, ISAI, "d,s,>", MATCH_SRLI, MASK_SRLI, match_opcode, INSN_ALIAS ), ("srai", 0, ISAC, "Cs,Cw,C>", MATCH_C_SRAI, MASK_C_SRAI, match_srxi_as_c_srxi, INSN_ALIAS ), ("srai", 0, ISAI, "d,s,>", MATCH_SRAI, MASK_SRAI, match_opcode, 0 ), ("sra", 0, ISAC, "Cs,Cw,C>", MATCH_C_SRAI, MASK_C_SRAI, match_srxi_as_c_srxi, INSN_ALIAS ), ("sra", 0, ISAI, "d,s,t", MATCH_SRA, MASK_SRA, match_opcode, 0 ), ("sra", 0, ISAI, "d,s,>", MATCH_SRAI, MASK_SRAI, match_opcode, INSN_ALIAS ), ("sub", 0, ISAC, "Cs,Cw,Ct", MATCH_C_SUB, MASK_C_SUB, match_opcode, INSN_ALIAS ), ("sub", 0, ISAI, "d,s,t", MATCH_SUB, MASK_SUB, match_opcode, 0 ), ("lb", 0, ISAI, "d,o(s)", MATCH_LB, MASK_LB, match_opcode, INSN_DREF|INSN_1_BYTE ), ("lb", 0, ISAI, "d,A", 0, M_LB, match_never, INSN_MACRO ), ("lbu", 0, ISAI, "d,o(s)", MATCH_LBU, MASK_LBU, match_opcode, INSN_DREF|INSN_1_BYTE ), ("lbu", 0, ISAI, "d,A", 0, M_LBU, match_never, INSN_MACRO ), ("lh", 0, ISAI, "d,o(s)", MATCH_LH, MASK_LH, match_opcode, INSN_DREF|INSN_2_BYTE ), ("lh", 0, ISAI, "d,A", 0, M_LH, match_never, INSN_MACRO ), ("lhu", 0, ISAI, "d,o(s)", MATCH_LHU, MASK_LHU, match_opcode, INSN_DREF|INSN_2_BYTE ), ("lhu", 0, ISAI, "d,A", 0, M_LHU, match_never, INSN_MACRO ), ("lw", 0, ISAC, "d,Cm(Cc)", MATCH_C_LWSP, MASK_C_LWSP, match_rd_nonzero, INSN_ALIAS|INSN_DREF|INSN_4_BYTE ), ("lw", 0, ISAC, "Ct,Ck(Cs)", MATCH_C_LW, MASK_C_LW, match_opcode, INSN_ALIAS|INSN_DREF|INSN_4_BYTE ), ("lw", 0, ISAI, "d,o(s)", MATCH_LW, MASK_LW, match_opcode, INSN_DREF|INSN_4_BYTE ), ("lw", 0, ISAI, "d,A", 0, M_LW, match_never, INSN_MACRO ), ("not", 0, ISAI, "d,s", MATCH_XORI | MASK_IMM, MASK_XORI | MASK_IMM, match_opcode, INSN_ALIAS ), ("ori", 0, ISAI, "d,s,j", MATCH_ORI, MASK_ORI, match_opcode, 0 ), ("or", 0, ISAC, "Cs,Cw,Ct", MATCH_C_OR, MASK_C_OR, match_opcode, INSN_ALIAS ), ("or", 0, ISAC, "Cs,Ct,Cw", MATCH_C_OR, MASK_C_OR, match_opcode, INSN_ALIAS ), ("or", 0, ISAI, "d,s,t", MATCH_OR, MASK_OR, match_opcode, 0 ), ("or", 0, ISAI, "d,s,j", MATCH_ORI, MASK_ORI, match_opcode, INSN_ALIAS ), ("auipc", 0, ISAI, "d,u", MATCH_AUIPC, MASK_AUIPC, match_opcode, 0 ), ("seqz", 0, ISAI, "d,s", MATCH_SLTIU | ENCODE_ITYPE_IMM (1), MASK_SLTIU | MASK_IMM, match_opcode, INSN_ALIAS ), ("snez", 0, ISAI, "d,t", MATCH_SLTU, MASK_SLTU | MASK_RS1, match_opcode, INSN_ALIAS ), ("sltz", 0, ISAI, "d,s", MATCH_SLT, MASK_SLT | MASK_RS2, match_opcode, INSN_ALIAS ), ("sgtz", 0, ISAI, "d,t", MATCH_SLT, MASK_SLT | MASK_RS1, match_opcode, INSN_ALIAS ), ("slti", 0, ISAI, "d,s,j", MATCH_SLTI, MASK_SLTI, match_opcode, 0 ), ("slt", 0, ISAI, "d,s,t", MATCH_SLT, MASK_SLT, match_opcode, 0 ), ("slt", 0, ISAI, "d,s,j", MATCH_SLTI, MASK_SLTI, match_opcode, INSN_ALIAS ), ("sltiu", 0, ISAI, "d,s,j", MATCH_SLTIU, MASK_SLTIU, match_opcode, 0 ), ("sltu", 0, ISAI, "d,s,t", MATCH_SLTU, MASK_SLTU, match_opcode, 0 ), ("sltu", 0, ISAI, "d,s,j", MATCH_SLTIU, MASK_SLTIU, match_opcode, INSN_ALIAS ), ("sgt", 0, ISAI, "d,t,s", MATCH_SLT, MASK_SLT, match_opcode, INSN_ALIAS ), ("sgtu", 0, ISAI, "d,t,s", MATCH_SLTU, MASK_SLTU, match_opcode, INSN_ALIAS ), ("sb", 0, ISAI, "t,q(s)", MATCH_SB, MASK_SB, match_opcode, INSN_DREF|INSN_1_BYTE ), ("sb", 0, ISAI, "t,A,s", 0, M_SB, match_never, INSN_MACRO ), ("sh", 0, ISAI, "t,q(s)", MATCH_SH, MASK_SH, match_opcode, INSN_DREF|INSN_2_BYTE ), ("sh", 0, ISAI, "t,A,s", 0, M_SH, match_never, INSN_MACRO ), ("sw", 0, ISAC, "CV,CM(Cc)", MATCH_C_SWSP, MASK_C_SWSP, match_opcode, INSN_ALIAS|INSN_DREF|INSN_4_BYTE ), ("sw", 0, ISAC, "Ct,Ck(Cs)", MATCH_C_SW, MASK_C_SW, match_opcode, INSN_ALIAS|INSN_DREF|INSN_4_BYTE ), ("sw", 0, ISAI, "t,q(s)", MATCH_SW, MASK_SW, match_opcode, INSN_DREF|INSN_4_BYTE ), ("sw", 0, ISAI, "t,A,s", 0, M_SW, match_never, INSN_MACRO ), ("fence", 0, ISAI, "", MATCH_FENCE | MASK_PRED | MASK_SUCC, MASK_FENCE | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, INSN_ALIAS ), ("fence", 0, ISAI, "P,Q", MATCH_FENCE, MASK_FENCE | MASK_RD | MASK_RS1 | (MASK_IMM & ~MASK_PRED & ~MASK_SUCC), match_opcode, 0 ), ("fence.i", 0, ISAI, "", MATCH_FENCE_I, MASK_FENCE | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, 0 ), ("fence.tso", 0, ISAI, "", MATCH_FENCE_TSO, MASK_FENCE_TSO | MASK_RD | MASK_RS1, match_opcode, INSN_ALIAS ), ("rdcycle", 0, ISAI, "d", MATCH_RDCYCLE, MASK_RDCYCLE, match_opcode, INSN_ALIAS ), ("rdinstret", 0, ISAI, "d", MATCH_RDINSTRET, MASK_RDINSTRET, match_opcode, INSN_ALIAS ), ("rdtime", 0, ISAI, "d", MATCH_RDTIME, MASK_RDTIME, match_opcode, INSN_ALIAS ), ("rdcycleh", 32, ISAI, "d", MATCH_RDCYCLEH, MASK_RDCYCLEH, match_opcode, INSN_ALIAS ), ("rdinstreth", 32, ISAI, "d", MATCH_RDINSTRETH, MASK_RDINSTRETH, match_opcode, INSN_ALIAS ), ("rdtimeh", 32, ISAI, "d", MATCH_RDTIMEH, MASK_RDTIMEH, match_opcode, INSN_ALIAS ), ("ecall", 0, ISAI, "", MATCH_SCALL, MASK_SCALL, match_opcode, 0 ), #("scall", 0, ISAI, "", MATCH_SCALL, MASK_SCALL, match_opcode, 0 ), ("xori", 0, ISAI, "d,s,j", MATCH_XORI, MASK_XORI, match_opcode, 0 ), ("xor", 0, ISAC, "Cs,Cw,Ct", MATCH_C_XOR, MASK_C_XOR, match_opcode, INSN_ALIAS ), ("xor", 0, ISAC, "Cs,Ct,Cw", MATCH_C_XOR, MASK_C_XOR, match_opcode, INSN_ALIAS ), ("xor", 0, ISAI, "d,s,t", MATCH_XOR, MASK_XOR, match_opcode, 0 ), ("xor", 0, ISAI, "d,s,j", MATCH_XORI, MASK_XORI, match_opcode, INSN_ALIAS ), ("lwu", 64, ISAI, "d,o(s)", MATCH_LWU, MASK_LWU, match_opcode, INSN_DREF|INSN_4_BYTE ), ("lwu", 64, ISAI, "d,A", 0, M_LWU, match_never, INSN_MACRO ), ("ld", 64, ISAC, "d,Cn(Cc)", MATCH_C_LDSP, MASK_C_LDSP, match_rd_nonzero, INSN_ALIAS|INSN_DREF|INSN_8_BYTE ), ("ld", 64, ISAC, "Ct,Cl(Cs)", MATCH_C_LD, MASK_C_LD, match_opcode, INSN_ALIAS|INSN_DREF|INSN_8_BYTE ), ("ld", 64, ISAI, "d,o(s)", MATCH_LD, MASK_LD, match_opcode, INSN_DREF|INSN_8_BYTE ), ("ld", 64, ISAI, "d,A", 0, M_LD, match_never, INSN_MACRO ), ("sd", 64, ISAC, "CV,CN(Cc)", MATCH_C_SDSP, MASK_C_SDSP, match_opcode, INSN_ALIAS|INSN_DREF|INSN_8_BYTE ), ("sd", 64, ISAC, "Ct,Cl(Cs)", MATCH_C_SD, MASK_C_SD, match_opcode, INSN_ALIAS|INSN_DREF|INSN_8_BYTE ), ("sd", 64, ISAI, "t,q(s)", MATCH_SD, MASK_SD, match_opcode, INSN_DREF|INSN_8_BYTE ), ("sd", 64, ISAI, "t,A,s", 0, M_SD, match_never, INSN_MACRO ), ("sext.w", 64, ISAC, "d,CU", MATCH_C_ADDIW, MASK_C_ADDIW | MASK_RVC_IMM, match_rd_nonzero, INSN_ALIAS ), ("sext.w", 64, ISAI, "d,s", MATCH_ADDIW, MASK_ADDIW | MASK_IMM, match_opcode, INSN_ALIAS ), ("addiw", 64, ISAC, "d,CU,Co", MATCH_C_ADDIW, MASK_C_ADDIW, match_rd_nonzero, INSN_ALIAS ), ("addiw", 64, ISAI, "d,s,j", MATCH_ADDIW, MASK_ADDIW, match_opcode, 0 ), ("addw", 64, ISAC, "Cs,Cw,Ct", MATCH_C_ADDW, MASK_C_ADDW, match_opcode, INSN_ALIAS ), ("addw", 64, ISAC, "Cs,Ct,Cw", MATCH_C_ADDW, MASK_C_ADDW, match_opcode, INSN_ALIAS ), ("addw", 64, ISAC, "d,CU,Co", MATCH_C_ADDIW, MASK_C_ADDIW, match_rd_nonzero, INSN_ALIAS ), ("addw", 64, ISAI, "d,s,t", MATCH_ADDW, MASK_ADDW, match_opcode, 0 ), ("addw", 64, ISAI, "d,s,j", MATCH_ADDIW, MASK_ADDIW, match_opcode, INSN_ALIAS ), ("negw", 64, ISAI, "d,t", MATCH_SUBW, MASK_SUBW | MASK_RS1, match_opcode, INSN_ALIAS ), #/* sub 0 */ ("slliw", 64, ISAI, "d,s,<", MATCH_SLLIW, MASK_SLLIW, match_opcode, 0 ), ("sllw", 64, ISAI, "d,s,t", MATCH_SLLW, MASK_SLLW, match_opcode, 0 ), ("sllw", 64, ISAI, "d,s,<", MATCH_SLLIW, MASK_SLLIW, match_opcode, INSN_ALIAS ), ("srliw", 64, ISAI, "d,s,<", MATCH_SRLIW, MASK_SRLIW, match_opcode, 0 ), ("srlw", 64, ISAI, "d,s,t", MATCH_SRLW, MASK_SRLW, match_opcode, 0 ), ("srlw", 64, ISAI, "d,s,<", MATCH_SRLIW, MASK_SRLIW, match_opcode, INSN_ALIAS ), ("sraiw", 64, ISAI, "d,s,<", MATCH_SRAIW, MASK_SRAIW, match_opcode, 0 ), ("sraw", 64, ISAI, "d,s,t", MATCH_SRAW, MASK_SRAW, match_opcode, 0 ), ("sraw", 64, ISAI, "d,s,<", MATCH_SRAIW, MASK_SRAIW, match_opcode, INSN_ALIAS ), ("subw", 64, ISAC, "Cs,Cw,Ct", MATCH_C_SUBW, MASK_C_SUBW, match_opcode, INSN_ALIAS ), ("subw", 64, ISAI, "d,s,t", MATCH_SUBW, MASK_SUBW, match_opcode, 0 ), #/* Atomic memory operation instruction subset */ ("lr.w", 0, ISAA, "d,0(s)", MATCH_LR_W, MASK_LR_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("sc.w", 0, ISAA, "d,t,0(s)", MATCH_SC_W, MASK_SC_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoadd.w", 0, ISAA, "d,t,0(s)", MATCH_AMOADD_W, MASK_AMOADD_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoswap.w", 0, ISAA, "d,t,0(s)", MATCH_AMOSWAP_W, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoand.w", 0, ISAA, "d,t,0(s)", MATCH_AMOAND_W, MASK_AMOAND_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoor.w", 0, ISAA, "d,t,0(s)", MATCH_AMOOR_W, MASK_AMOOR_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoxor.w", 0, ISAA, "d,t,0(s)", MATCH_AMOXOR_W, MASK_AMOXOR_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amomax.w", 0, ISAA, "d,t,0(s)", MATCH_AMOMAX_W, MASK_AMOMAX_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amomaxu.w", 0, ISAA, "d,t,0(s)", MATCH_AMOMAXU_W, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amomin.w", 0, ISAA, "d,t,0(s)", MATCH_AMOMIN_W, MASK_AMOMIN_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amominu.w", 0, ISAA, "d,t,0(s)", MATCH_AMOMINU_W, MASK_AMOMINU_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("lr.w.aq", 0, ISAA, "d,0(s)", MATCH_LR_W | MASK_AQ, MASK_LR_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("sc.w.aq", 0, ISAA, "d,t,0(s)", MATCH_SC_W | MASK_AQ, MASK_SC_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoadd.w.aq", 0, ISAA, "d,t,0(s)", MATCH_AMOADD_W | MASK_AQ, MASK_AMOADD_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoswap.w.aq", 0, ISAA, "d,t,0(s)", MATCH_AMOSWAP_W | MASK_AQ, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoand.w.aq", 0, ISAA, "d,t,0(s)", MATCH_AMOAND_W | MASK_AQ, MASK_AMOAND_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoor.w.aq", 0, ISAA, "d,t,0(s)", MATCH_AMOOR_W | MASK_AQ, MASK_AMOOR_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoxor.w.aq", 0, ISAA, "d,t,0(s)", MATCH_AMOXOR_W | MASK_AQ, MASK_AMOXOR_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amomax.w.aq", 0, ISAA, "d,t,0(s)", MATCH_AMOMAX_W | MASK_AQ, MASK_AMOMAX_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amomaxu.w.aq", 0, ISAA, "d,t,0(s)", MATCH_AMOMAXU_W | MASK_AQ, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amomin.w.aq", 0, ISAA, "d,t,0(s)", MATCH_AMOMIN_W | MASK_AQ, MASK_AMOMIN_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amominu.w.aq", 0, ISAA, "d,t,0(s)", MATCH_AMOMINU_W | MASK_AQ, MASK_AMOMINU_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("lr.w.rl", 0, ISAA, "d,0(s)", MATCH_LR_W | MASK_RL, MASK_LR_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("sc.w.rl", 0, ISAA, "d,t,0(s)", MATCH_SC_W | MASK_RL, MASK_SC_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoadd.w.rl", 0, ISAA, "d,t,0(s)", MATCH_AMOADD_W | MASK_RL, MASK_AMOADD_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoswap.w.rl", 0, ISAA, "d,t,0(s)", MATCH_AMOSWAP_W | MASK_RL, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoand.w.rl", 0, ISAA, "d,t,0(s)", MATCH_AMOAND_W | MASK_RL, MASK_AMOAND_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoor.w.rl", 0, ISAA, "d,t,0(s)", MATCH_AMOOR_W | MASK_RL, MASK_AMOOR_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoxor.w.rl", 0, ISAA, "d,t,0(s)", MATCH_AMOXOR_W | MASK_RL, MASK_AMOXOR_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amomax.w.rl", 0, ISAA, "d,t,0(s)", MATCH_AMOMAX_W | MASK_RL, MASK_AMOMAX_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amomaxu.w.rl", 0, ISAA, "d,t,0(s)", MATCH_AMOMAXU_W | MASK_RL, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amomin.w.rl", 0, ISAA, "d,t,0(s)", MATCH_AMOMIN_W | MASK_RL, MASK_AMOMIN_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amominu.w.rl", 0, ISAA, "d,t,0(s)", MATCH_AMOMINU_W | MASK_RL, MASK_AMOMINU_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("lr.w.aqrl", 0, ISAA, "d,0(s)", MATCH_LR_W | MASK_AQRL, MASK_LR_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("sc.w.aqrl", 0, ISAA, "d,t,0(s)", MATCH_SC_W | MASK_AQRL, MASK_SC_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoadd.w.aqrl", 0, ISAA, "d,t,0(s)", MATCH_AMOADD_W | MASK_AQRL, MASK_AMOADD_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoswap.w.aqrl", 0, ISAA, "d,t,0(s)", MATCH_AMOSWAP_W | MASK_AQRL, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoand.w.aqrl", 0, ISAA, "d,t,0(s)", MATCH_AMOAND_W | MASK_AQRL, MASK_AMOAND_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoor.w.aqrl", 0, ISAA, "d,t,0(s)", MATCH_AMOOR_W | MASK_AQRL, MASK_AMOOR_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amoxor.w.aqrl", 0, ISAA, "d,t,0(s)", MATCH_AMOXOR_W | MASK_AQRL, MASK_AMOXOR_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amomax.w.aqrl", 0, ISAA, "d,t,0(s)", MATCH_AMOMAX_W | MASK_AQRL, MASK_AMOMAX_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amomaxu.w.aqrl", 0, ISAA, "d,t,0(s)", MATCH_AMOMAXU_W | MASK_AQRL, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amomin.w.aqrl", 0, ISAA, "d,t,0(s)", MATCH_AMOMIN_W | MASK_AQRL, MASK_AMOMIN_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("amominu.w.aqrl", 0, ISAA, "d,t,0(s)", MATCH_AMOMINU_W | MASK_AQRL, MASK_AMOMINU_W | MASK_AQRL, match_opcode, INSN_DREF|INSN_4_BYTE ), ("lr.d", 64, ISAA , "d,0(s)", MATCH_LR_D, MASK_LR_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("sc.d", 64, ISAA , "d,t,0(s)", MATCH_SC_D, MASK_SC_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoadd.d", 64, ISAA , "d,t,0(s)", MATCH_AMOADD_D, MASK_AMOADD_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoswap.d", 64, ISAA , "d,t,0(s)", MATCH_AMOSWAP_D, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoand.d", 64, ISAA , "d,t,0(s)", MATCH_AMOAND_D, MASK_AMOAND_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoor.d", 64, ISAA , "d,t,0(s)", MATCH_AMOOR_D, MASK_AMOOR_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoxor.d", 64, ISAA , "d,t,0(s)", MATCH_AMOXOR_D, MASK_AMOXOR_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amomax.d", 64, ISAA , "d,t,0(s)", MATCH_AMOMAX_D, MASK_AMOMAX_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amomaxu.d", 64, ISAA , "d,t,0(s)", MATCH_AMOMAXU_D, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amomin.d", 64, ISAA , "d,t,0(s)", MATCH_AMOMIN_D, MASK_AMOMIN_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amominu.d", 64, ISAA , "d,t,0(s)", MATCH_AMOMINU_D, MASK_AMOMINU_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("lr.d.aq", 64, ISAA , "d,0(s)", MATCH_LR_D | MASK_AQ, MASK_LR_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("sc.d.aq", 64, ISAA , "d,t,0(s)", MATCH_SC_D | MASK_AQ, MASK_SC_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoadd.d.aq", 64, ISAA , "d,t,0(s)", MATCH_AMOADD_D | MASK_AQ, MASK_AMOADD_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoswap.d.aq", 64, ISAA , "d,t,0(s)", MATCH_AMOSWAP_D | MASK_AQ, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoand.d.aq", 64, ISAA , "d,t,0(s)", MATCH_AMOAND_D | MASK_AQ, MASK_AMOAND_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoor.d.aq", 64, ISAA , "d,t,0(s)", MATCH_AMOOR_D | MASK_AQ, MASK_AMOOR_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoxor.d.aq", 64, ISAA , "d,t,0(s)", MATCH_AMOXOR_D | MASK_AQ, MASK_AMOXOR_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amomax.d.aq", 64, ISAA , "d,t,0(s)", MATCH_AMOMAX_D | MASK_AQ, MASK_AMOMAX_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amomaxu.d.aq", 64, ISAA , "d,t,0(s)", MATCH_AMOMAXU_D | MASK_AQ, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amomin.d.aq", 64, ISAA , "d,t,0(s)", MATCH_AMOMIN_D | MASK_AQ, MASK_AMOMIN_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amominu.d.aq", 64, ISAA , "d,t,0(s)", MATCH_AMOMINU_D | MASK_AQ, MASK_AMOMINU_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("lr.d.rl", 64, ISAA , "d,0(s)", MATCH_LR_D | MASK_RL, MASK_LR_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("sc.d.rl", 64, ISAA , "d,t,0(s)", MATCH_SC_D | MASK_RL, MASK_SC_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoadd.d.rl", 64, ISAA , "d,t,0(s)", MATCH_AMOADD_D | MASK_RL, MASK_AMOADD_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoswap.d.rl", 64, ISAA , "d,t,0(s)", MATCH_AMOSWAP_D | MASK_RL, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoand.d.rl", 64, ISAA , "d,t,0(s)", MATCH_AMOAND_D | MASK_RL, MASK_AMOAND_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoor.d.rl", 64, ISAA , "d,t,0(s)", MATCH_AMOOR_D | MASK_RL, MASK_AMOOR_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoxor.d.rl", 64, ISAA , "d,t,0(s)", MATCH_AMOXOR_D | MASK_RL, MASK_AMOXOR_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amomax.d.rl", 64, ISAA , "d,t,0(s)", MATCH_AMOMAX_D | MASK_RL, MASK_AMOMAX_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amomaxu.d.rl", 64, ISAA , "d,t,0(s)", MATCH_AMOMAXU_D | MASK_RL, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amomin.d.rl", 64, ISAA , "d,t,0(s)", MATCH_AMOMIN_D | MASK_RL, MASK_AMOMIN_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amominu.d.rl", 64, ISAA , "d,t,0(s)", MATCH_AMOMINU_D | MASK_RL, MASK_AMOMINU_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("lr.d.aqrl", 64, ISAA , "d,0(s)", MATCH_LR_D | MASK_AQRL, MASK_LR_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("sc.d.aqrl", 64, ISAA , "d,t,0(s)", MATCH_SC_D | MASK_AQRL, MASK_SC_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoadd.d.aqrl", 64, ISAA , "d,t,0(s)", MATCH_AMOADD_D | MASK_AQRL, MASK_AMOADD_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoswap.d.aqrl", 64, ISAA , "d,t,0(s)", MATCH_AMOSWAP_D | MASK_AQRL, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoand.d.aqrl", 64, ISAA , "d,t,0(s)", MATCH_AMOAND_D | MASK_AQRL, MASK_AMOAND_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoor.d.aqrl", 64, ISAA , "d,t,0(s)", MATCH_AMOOR_D | MASK_AQRL, MASK_AMOOR_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amoxor.d.aqrl", 64, ISAA , "d,t,0(s)", MATCH_AMOXOR_D | MASK_AQRL, MASK_AMOXOR_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amomax.d.aqrl", 64, ISAA , "d,t,0(s)", MATCH_AMOMAX_D | MASK_AQRL, MASK_AMOMAX_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amomaxu.d.aqrl", 64, ISAA , "d,t,0(s)", MATCH_AMOMAXU_D | MASK_AQRL, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amomin.d.aqrl", 64, ISAA , "d,t,0(s)", MATCH_AMOMIN_D | MASK_AQRL, MASK_AMOMIN_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), ("amominu.d.aqrl", 64, ISAA , "d,t,0(s)", MATCH_AMOMINU_D | MASK_AQRL, MASK_AMOMINU_D | MASK_AQRL, match_opcode, INSN_DREF|INSN_8_BYTE ), #/* Multiply/Divide instruction subset */ ("mul", 0, ISAM, "d,s,t", MATCH_MUL, MASK_MUL, match_opcode, 0 ), ("mulh", 0, ISAM, "d,s,t", MATCH_MULH, MASK_MULH, match_opcode, 0 ), ("mulhu", 0, ISAM, "d,s,t", MATCH_MULHU, MASK_MULHU, match_opcode, 0 ), ("mulhsu", 0, ISAM, "d,s,t", MATCH_MULHSU, MASK_MULHSU, match_opcode, 0 ), ("div", 0, ISAM, "d,s,t", MATCH_DIV, MASK_DIV, match_opcode, 0 ), ("divu", 0, ISAM, "d,s,t", MATCH_DIVU, MASK_DIVU, match_opcode, 0 ), ("rem", 0, ISAM, "d,s,t", MATCH_REM, MASK_REM, match_opcode, 0 ), ("remu", 0, ISAM, "d,s,t", MATCH_REMU, MASK_REMU, match_opcode, 0 ), ("mulw", 64, ISAM, "d,s,t", MATCH_MULW, MASK_MULW, match_opcode, 0 ), ("divw", 64, ISAM, "d,s,t", MATCH_DIVW, MASK_DIVW, match_opcode, 0 ), ("divuw", 64, ISAM, "d,s,t", MATCH_DIVUW, MASK_DIVUW, match_opcode, 0 ), ("remw", 64, ISAM, "d,s,t", MATCH_REMW, MASK_REMW, match_opcode, 0 ), ("remuw", 64, ISAM, "d,s,t", MATCH_REMUW, MASK_REMUW, match_opcode, 0 ), #/* Single-precision floating-point instruction subset */ ("frsr", 0, ISAF, "d", MATCH_FRCSR, MASK_FRCSR, match_opcode, 0 ), ("fssr", 0, ISAF, "s", MATCH_FSCSR, MASK_FSCSR | MASK_RD, match_opcode, 0 ), ("fssr", 0, ISAF, "d,s", MATCH_FSCSR, MASK_FSCSR, match_opcode, 0 ), ("frcsr", 0, ISAF, "d", MATCH_FRCSR, MASK_FRCSR, match_opcode, 0 ), ("fscsr", 0, ISAF, "s", MATCH_FSCSR, MASK_FSCSR | MASK_RD, match_opcode, 0 ), ("fscsr", 0, ISAF, "d,s", MATCH_FSCSR, MASK_FSCSR, match_opcode, 0 ), ("frrm", 0, ISAF, "d", MATCH_FRRM, MASK_FRRM, match_opcode, 0 ), ("fsrm", 0, ISAF, "s", MATCH_FSRM, MASK_FSRM | MASK_RD, match_opcode, 0 ), ("fsrm", 0, ISAF, "d,s", MATCH_FSRM, MASK_FSRM, match_opcode, 0 ), ("fsrmi", 0, ISAF, "d,Z", MATCH_FSRMI, MASK_FSRMI, match_opcode, 0 ), ("fsrmi", 0, ISAF, "Z", MATCH_FSRMI, MASK_FSRMI | MASK_RD, match_opcode, 0 ), ("frflags", 0, ISAF, "d", MATCH_FRFLAGS, MASK_FRFLAGS, match_opcode, 0 ), ("fsflags", 0, ISAF, "s", MATCH_FSFLAGS, MASK_FSFLAGS | MASK_RD, match_opcode, 0 ), ("fsflags", 0, ISAF, "d,s", MATCH_FSFLAGS, MASK_FSFLAGS, match_opcode, 0 ), ("fsflagsi", 0, ISAF, "d,Z", MATCH_FSFLAGSI, MASK_FSFLAGSI, match_opcode, 0 ), ("fsflagsi", 0, ISAF, "Z", MATCH_FSFLAGSI, MASK_FSFLAGSI | MASK_RD, match_opcode, 0 ), ("flw", 32, ISAFC, "D,Cm(Cc)", MATCH_C_FLWSP, MASK_C_FLWSP, match_opcode, INSN_ALIAS|INSN_DREF|INSN_4_BYTE ), ("flw", 32, ISAFC, "CD,Ck(Cs)", MATCH_C_FLW, MASK_C_FLW, match_opcode, INSN_ALIAS|INSN_DREF|INSN_4_BYTE ), ("flw", 0, ISAF, "D,o(s)", MATCH_FLW, MASK_FLW, match_opcode, INSN_DREF|INSN_4_BYTE ), ("flw", 0, ISAF, "D,A,s", 0, M_FLW, match_never, INSN_MACRO ), ("fsw", 32, ISAFC, "CT,CM(Cc)", MATCH_C_FSWSP, MASK_C_FSWSP, match_opcode, INSN_ALIAS|INSN_DREF|INSN_4_BYTE ), ("fsw", 32, ISAFC, "CD,Ck(Cs)", MATCH_C_FSW, MASK_C_FSW, match_opcode, INSN_ALIAS|INSN_DREF|INSN_4_BYTE ), ("fsw", 0, ISAF, "T,q(s)", MATCH_FSW, MASK_FSW, match_opcode, INSN_DREF|INSN_4_BYTE ), ("fsw", 0, ISAF, "T,A,s", 0, M_FSW, match_never, INSN_MACRO ), ("fmv.x.w", 0, ISAF, "d,S", MATCH_FMV_X_S, MASK_FMV_X_S, match_opcode, 0 ), ("fmv.w.x", 0, ISAF, "D,s", MATCH_FMV_S_X, MASK_FMV_S_X, match_opcode, 0 ), ("fmv.x.s", 0, ISAF, "d,S", MATCH_FMV_X_S, MASK_FMV_X_S, match_opcode, 0 ), ("fmv.s.x", 0, ISAF, "D,s", MATCH_FMV_S_X, MASK_FMV_S_X, match_opcode, 0 ), ("fmv.s", 0, ISAF, "D,U", MATCH_FSGNJ_S, MASK_FSGNJ_S, match_rs1_eq_rs2, INSN_ALIAS ), ("fneg.s", 0, ISAF, "D,U", MATCH_FSGNJN_S, MASK_FSGNJN_S, match_rs1_eq_rs2, INSN_ALIAS ), ("fabs.s", 0, ISAF, "D,U", MATCH_FSGNJX_S, MASK_FSGNJX_S, match_rs1_eq_rs2, INSN_ALIAS ), ("fsgnj.s", 0, ISAF, "D,S,T", MATCH_FSGNJ_S, MASK_FSGNJ_S, match_opcode, 0 ), ("fsgnjn.s", 0, ISAF, "D,S,T", MATCH_FSGNJN_S, MASK_FSGNJN_S, match_opcode, 0 ), ("fsgnjx.s", 0, ISAF, "D,S,T", MATCH_FSGNJX_S, MASK_FSGNJX_S, match_opcode, 0 ), ("fadd.s", 0, ISAF, "D,S,T", MATCH_FADD_S | MASK_RM, MASK_FADD_S | MASK_RM, match_opcode, 0 ), ("fadd.s", 0, ISAF, "D,S,T,m", MATCH_FADD_S, MASK_FADD_S, match_opcode, 0 ), ("fsub.s", 0, ISAF, "D,S,T", MATCH_FSUB_S | MASK_RM, MASK_FSUB_S | MASK_RM, match_opcode, 0 ), ("fsub.s", 0, ISAF, "D,S,T,m", MATCH_FSUB_S, MASK_FSUB_S, match_opcode, 0 ), ("fmul.s", 0, ISAF, "D,S,T", MATCH_FMUL_S | MASK_RM, MASK_FMUL_S | MASK_RM, match_opcode, 0 ), ("fmul.s", 0, ISAF, "D,S,T,m", MATCH_FMUL_S, MASK_FMUL_S, match_opcode, 0 ), ("fdiv.s", 0, ISAF, "D,S,T", MATCH_FDIV_S | MASK_RM, MASK_FDIV_S | MASK_RM, match_opcode, 0 ), ("fdiv.s", 0, ISAF, "D,S,T,m", MATCH_FDIV_S, MASK_FDIV_S, match_opcode, 0 ), ("fsqrt.s", 0, ISAF, "D,S", MATCH_FSQRT_S | MASK_RM, MASK_FSQRT_S | MASK_RM, match_opcode, 0 ), ("fsqrt.s", 0, ISAF, "D,S,m", MATCH_FSQRT_S, MASK_FSQRT_S, match_opcode, 0 ), ("fmin.s", 0, ISAF, "D,S,T", MATCH_FMIN_S, MASK_FMIN_S, match_opcode, 0 ), ("fmax.s", 0, ISAF, "D,S,T", MATCH_FMAX_S, MASK_FMAX_S, match_opcode, 0 ), ("fmadd.s", 0, ISAF, "D,S,T,R", MATCH_FMADD_S | MASK_RM, MASK_FMADD_S | MASK_RM, match_opcode, 0 ), ("fmadd.s", 0, ISAF, "D,S,T,R,m", MATCH_FMADD_S, MASK_FMADD_S, match_opcode, 0 ), ("fnmadd.s", 0, ISAF, "D,S,T,R", MATCH_FNMADD_S | MASK_RM, MASK_FNMADD_S | MASK_RM, match_opcode, 0 ), ("fnmadd.s", 0, ISAF, "D,S,T,R,m", MATCH_FNMADD_S, MASK_FNMADD_S, match_opcode, 0 ), ("fmsub.s", 0, ISAF, "D,S,T,R", MATCH_FMSUB_S | MASK_RM, MASK_FMSUB_S | MASK_RM, match_opcode, 0 ), ("fmsub.s", 0, ISAF, "D,S,T,R,m", MATCH_FMSUB_S, MASK_FMSUB_S, match_opcode, 0 ), ("fnmsub.s", 0, ISAF, "D,S,T,R", MATCH_FNMSUB_S | MASK_RM, MASK_FNMSUB_S | MASK_RM, match_opcode, 0 ), ("fnmsub.s", 0, ISAF, "D,S,T,R,m", MATCH_FNMSUB_S, MASK_FNMSUB_S, match_opcode, 0 ), ("fcvt.w.s", 0, ISAF, "d,S", MATCH_FCVT_W_S | MASK_RM, MASK_FCVT_W_S | MASK_RM, match_opcode, 0 ), ("fcvt.w.s", 0, ISAF, "d,S,m", MATCH_FCVT_W_S, MASK_FCVT_W_S, match_opcode, 0 ), ("fcvt.wu.s", 0, ISAF, "d,S", MATCH_FCVT_WU_S | MASK_RM, MASK_FCVT_WU_S | MASK_RM, match_opcode, 0 ), ("fcvt.wu.s", 0, ISAF, "d,S,m", MATCH_FCVT_WU_S, MASK_FCVT_WU_S, match_opcode, 0 ), ("fcvt.s.w", 0, ISAF, "D,s", MATCH_FCVT_S_W | MASK_RM, MASK_FCVT_S_W | MASK_RM, match_opcode, 0 ), ("fcvt.s.w", 0, ISAF, "D,s,m", MATCH_FCVT_S_W, MASK_FCVT_S_W, match_opcode, 0 ), ("fcvt.s.wu", 0, ISAF, "D,s", MATCH_FCVT_S_WU | MASK_RM, MASK_FCVT_S_W | MASK_RM, match_opcode, 0 ), ("fcvt.s.wu", 0, ISAF, "D,s,m", MATCH_FCVT_S_WU, MASK_FCVT_S_WU, match_opcode, 0 ), ("fclass.s", 0, ISAF, "d,S", MATCH_FCLASS_S, MASK_FCLASS_S, match_opcode, 0 ), ("feq.s", 0, ISAF, "d,S,T", MATCH_FEQ_S, MASK_FEQ_S, match_opcode, 0 ), ("flt.s", 0, ISAF, "d,S,T", MATCH_FLT_S, MASK_FLT_S, match_opcode, 0 ), ("fle.s", 0, ISAF, "d,S,T", MATCH_FLE_S, MASK_FLE_S, match_opcode, 0 ), ("fgt.s", 0, ISAF, "d,T,S", MATCH_FLT_S, MASK_FLT_S, match_opcode, 0 ), ("fge.s", 0, ISAF, "d,T,S", MATCH_FLE_S, MASK_FLE_S, match_opcode, 0 ), ("fcvt.l.s", 64, ISAF, "d,S", MATCH_FCVT_L_S | MASK_RM, MASK_FCVT_L_S | MASK_RM, match_opcode, 0 ), ("fcvt.l.s", 64, ISAF, "d,S,m", MATCH_FCVT_L_S, MASK_FCVT_L_S, match_opcode, 0 ), ("fcvt.lu.s", 64, ISAF, "d,S", MATCH_FCVT_LU_S | MASK_RM, MASK_FCVT_LU_S | MASK_RM, match_opcode, 0 ), ("fcvt.lu.s", 64, ISAF, "d,S,m", MATCH_FCVT_LU_S, MASK_FCVT_LU_S, match_opcode, 0 ), ("fcvt.s.l", 64, ISAF, "D,s", MATCH_FCVT_S_L | MASK_RM, MASK_FCVT_S_L | MASK_RM, match_opcode, 0 ), ("fcvt.s.l", 64, ISAF, "D,s,m", MATCH_FCVT_S_L, MASK_FCVT_S_L, match_opcode, 0 ), ("fcvt.s.lu", 64, ISAF, "D,s", MATCH_FCVT_S_LU | MASK_RM, MASK_FCVT_S_L | MASK_RM, match_opcode, 0 ), ("fcvt.s.lu", 64, ISAF, "D,s,m", MATCH_FCVT_S_LU, MASK_FCVT_S_LU, match_opcode, 0 ), #/* Double-precision floating-point instruction subset */ ("fld", 0, ISADC, "D,Cn(Cc)", MATCH_C_FLDSP, MASK_C_FLDSP, match_opcode, INSN_ALIAS|INSN_DREF|INSN_8_BYTE ), ("fld", 0, ISADC, "CD,Cl(Cs)", MATCH_C_FLD, MASK_C_FLD, match_opcode, INSN_ALIAS|INSN_DREF|INSN_8_BYTE ), ("fld", 0, ISAD, "D,o(s)", MATCH_FLD, MASK_FLD, match_opcode, INSN_DREF|INSN_8_BYTE ), ("fld", 0, ISAD, "D,A,s", 0, M_FLD, match_never, INSN_MACRO ), ("fsd", 0, ISADC, "CT,CN(Cc)", MATCH_C_FSDSP, MASK_C_FSDSP, match_opcode, INSN_ALIAS|INSN_DREF|INSN_8_BYTE ), ("fsd", 0, ISADC, "CD,Cl(Cs)", MATCH_C_FSD, MASK_C_FSD, match_opcode, INSN_ALIAS|INSN_DREF|INSN_8_BYTE ), ("fsd", 0, ISAD, "T,q(s)", MATCH_FSD, MASK_FSD, match_opcode, INSN_DREF|INSN_8_BYTE ), ("fsd", 0, ISAD, "T,A,s", 0, M_FSD, match_never, INSN_MACRO ), ("fmv.d", 0, ISAD, "D,U", MATCH_FSGNJ_D, MASK_FSGNJ_D, match_rs1_eq_rs2, INSN_ALIAS ), ("fneg.d", 0, ISAD, "D,U", MATCH_FSGNJN_D, MASK_FSGNJN_D, match_rs1_eq_rs2, INSN_ALIAS ), ("fabs.d", 0, ISAD, "D,U", MATCH_FSGNJX_D, MASK_FSGNJX_D, match_rs1_eq_rs2, INSN_ALIAS ), ("fsgnj.d", 0, ISAD, "D,S,T", MATCH_FSGNJ_D, MASK_FSGNJ_D, match_opcode, 0 ), ("fsgnjn.d", 0, ISAD, "D,S,T", MATCH_FSGNJN_D, MASK_FSGNJN_D, match_opcode, 0 ), ("fsgnjx.d", 0, ISAD, "D,S,T", MATCH_FSGNJX_D, MASK_FSGNJX_D, match_opcode, 0 ), ("fadd.d", 0, ISAD, "D,S,T", MATCH_FADD_D | MASK_RM, MASK_FADD_D | MASK_RM, match_opcode, 0 ), ("fadd.d", 0, ISAD, "D,S,T,m", MATCH_FADD_D, MASK_FADD_D, match_opcode, 0 ), ("fsub.d", 0, ISAD, "D,S,T", MATCH_FSUB_D | MASK_RM, MASK_FSUB_D | MASK_RM, match_opcode, 0 ), ("fsub.d", 0, ISAD, "D,S,T,m", MATCH_FSUB_D, MASK_FSUB_D, match_opcode, 0 ), ("fmul.d", 0, ISAD, "D,S,T", MATCH_FMUL_D | MASK_RM, MASK_FMUL_D | MASK_RM, match_opcode, 0 ), ("fmul.d", 0, ISAD, "D,S,T,m", MATCH_FMUL_D, MASK_FMUL_D, match_opcode, 0 ), ("fdiv.d", 0, ISAD, "D,S,T", MATCH_FDIV_D | MASK_RM, MASK_FDIV_D | MASK_RM, match_opcode, 0 ), ("fdiv.d", 0, ISAD, "D,S,T,m", MATCH_FDIV_D, MASK_FDIV_D, match_opcode, 0 ), ("fsqrt.d", 0, ISAD, "D,S", MATCH_FSQRT_D | MASK_RM, MASK_FSQRT_D | MASK_RM, match_opcode, 0 ), ("fsqrt.d", 0, ISAD, "D,S,m", MATCH_FSQRT_D, MASK_FSQRT_D, match_opcode, 0 ), ("fmin.d", 0, ISAD, "D,S,T", MATCH_FMIN_D, MASK_FMIN_D, match_opcode, 0 ), ("fmax.d", 0, ISAD, "D,S,T", MATCH_FMAX_D, MASK_FMAX_D, match_opcode, 0 ), ("fmadd.d", 0, ISAD, "D,S,T,R", MATCH_FMADD_D | MASK_RM, MASK_FMADD_D | MASK_RM, match_opcode, 0 ), ("fmadd.d", 0, ISAD, "D,S,T,R,m", MATCH_FMADD_D, MASK_FMADD_D, match_opcode, 0 ), ("fnmadd.d", 0, ISAD, "D,S,T,R", MATCH_FNMADD_D | MASK_RM, MASK_FNMADD_D | MASK_RM, match_opcode, 0 ), ("fnmadd.d", 0, ISAD, "D,S,T,R,m", MATCH_FNMADD_D, MASK_FNMADD_D, match_opcode, 0 ), ("fmsub.d", 0, ISAD, "D,S,T,R", MATCH_FMSUB_D | MASK_RM, MASK_FMSUB_D | MASK_RM, match_opcode, 0 ), ("fmsub.d", 0, ISAD, "D,S,T,R,m", MATCH_FMSUB_D, MASK_FMSUB_D, match_opcode, 0 ), ("fnmsub.d", 0, ISAD, "D,S,T,R", MATCH_FNMSUB_D | MASK_RM, MASK_FNMSUB_D | MASK_RM, match_opcode, 0 ), ("fnmsub.d", 0, ISAD, "D,S,T,R,m", MATCH_FNMSUB_D, MASK_FNMSUB_D, match_opcode, 0 ), ("fcvt.w.d", 0, ISAD, "d,S", MATCH_FCVT_W_D | MASK_RM, MASK_FCVT_W_D | MASK_RM, match_opcode, 0 ), ("fcvt.w.d", 0, ISAD, "d,S,m", MATCH_FCVT_W_D, MASK_FCVT_W_D, match_opcode, 0 ), ("fcvt.wu.d", 0, ISAD, "d,S", MATCH_FCVT_WU_D | MASK_RM, MASK_FCVT_WU_D | MASK_RM, match_opcode, 0 ), ("fcvt.wu.d", 0, ISAD, "d,S,m", MATCH_FCVT_WU_D, MASK_FCVT_WU_D, match_opcode, 0 ), ("fcvt.d.w", 0, ISAD, "D,s", MATCH_FCVT_D_W, MASK_FCVT_D_W | MASK_RM, match_opcode, 0 ), ("fcvt.d.wu", 0, ISAD, "D,s", MATCH_FCVT_D_WU, MASK_FCVT_D_WU | MASK_RM, match_opcode, 0 ), ("fcvt.d.s", 0, ISAD, "D,S", MATCH_FCVT_D_S, MASK_FCVT_D_S | MASK_RM, match_opcode, 0 ), ("fcvt.s.d", 0, ISAD, "D,S", MATCH_FCVT_S_D | MASK_RM, MASK_FCVT_S_D | MASK_RM, match_opcode, 0 ), ("fcvt.s.d", 0, ISAD, "D,S,m", MATCH_FCVT_S_D, MASK_FCVT_S_D, match_opcode, 0 ), ("fclass.d", 0, ISAD, "d,S", MATCH_FCLASS_D, MASK_FCLASS_D, match_opcode, 0 ), ("feq.d", 0, ISAD, "d,S,T", MATCH_FEQ_D, MASK_FEQ_D, match_opcode, 0 ), ("flt.d", 0, ISAD, "d,S,T", MATCH_FLT_D, MASK_FLT_D, match_opcode, 0 ), ("fle.d", 0, ISAD, "d,S,T", MATCH_FLE_D, MASK_FLE_D, match_opcode, 0 ), ("fgt.d", 0, ISAD, "d,T,S", MATCH_FLT_D, MASK_FLT_D, match_opcode, 0 ), ("fge.d", 0, ISAD, "d,T,S", MATCH_FLE_D, MASK_FLE_D, match_opcode, 0 ), ("fmv.x.d", 64, ISAD, "d,S", MATCH_FMV_X_D, MASK_FMV_X_D, match_opcode, 0 ), ("fmv.d.x", 64, ISAD, "D,s", MATCH_FMV_D_X, MASK_FMV_D_X, match_opcode, 0 ), ("fcvt.l.d", 64, ISAD, "d,S", MATCH_FCVT_L_D | MASK_RM, MASK_FCVT_L_D | MASK_RM, match_opcode, 0 ), ("fcvt.l.d", 64, ISAD, "d,S,m", MATCH_FCVT_L_D, MASK_FCVT_L_D, match_opcode, 0 ), ("fcvt.lu.d", 64, ISAD, "d,S", MATCH_FCVT_LU_D | MASK_RM, MASK_FCVT_LU_D | MASK_RM, match_opcode, 0 ), ("fcvt.lu.d", 64, ISAD, "d,S,m", MATCH_FCVT_LU_D, MASK_FCVT_LU_D, match_opcode, 0 ), ("fcvt.d.l", 64, ISAD, "D,s", MATCH_FCVT_D_L | MASK_RM, MASK_FCVT_D_L | MASK_RM, match_opcode, 0 ), ("fcvt.d.l", 64, ISAD, "D,s,m", MATCH_FCVT_D_L, MASK_FCVT_D_L, match_opcode, 0 ), ("fcvt.d.lu", 64, ISAD, "D,s", MATCH_FCVT_D_LU | MASK_RM, MASK_FCVT_D_L | MASK_RM, match_opcode, 0 ), ("fcvt.d.lu", 64, ISAD, "D,s,m", MATCH_FCVT_D_LU, MASK_FCVT_D_LU, match_opcode, 0 ), #/* Quad-precision floating-point instruction subset */ ("flq", 0, ISAQ, "D,o(s)", MATCH_FLQ, MASK_FLQ, match_opcode, INSN_DREF|INSN_16_BYTE ), ("flq", 0, ISAQ, "D,A,s", 0, M_FLQ, match_never, INSN_MACRO ), ("fsq", 0, ISAQ, "T,q(s)", MATCH_FSQ, MASK_FSQ, match_opcode, INSN_DREF|INSN_16_BYTE ), ("fsq", 0, ISAQ, "T,A,s", 0, M_FSQ, match_never, INSN_MACRO ), ("fmv.q", 0, ISAQ, "D,U", MATCH_FSGNJ_Q, MASK_FSGNJ_Q, match_rs1_eq_rs2, INSN_ALIAS ), ("fneg.q", 0, ISAQ, "D,U", MATCH_FSGNJN_Q, MASK_FSGNJN_Q, match_rs1_eq_rs2, INSN_ALIAS ), ("fabs.q", 0, ISAQ, "D,U", MATCH_FSGNJX_Q, MASK_FSGNJX_Q, match_rs1_eq_rs2, INSN_ALIAS ), ("fsgnj.q", 0, ISAQ, "D,S,T", MATCH_FSGNJ_Q, MASK_FSGNJ_Q, match_opcode, 0 ), ("fsgnjn.q", 0, ISAQ, "D,S,T", MATCH_FSGNJN_Q, MASK_FSGNJN_Q, match_opcode, 0 ), ("fsgnjx.q", 0, ISAQ, "D,S,T", MATCH_FSGNJX_Q, MASK_FSGNJX_Q, match_opcode, 0 ), ("fadd.q", 0, ISAQ, "D,S,T", MATCH_FADD_Q | MASK_RM, MASK_FADD_Q | MASK_RM, match_opcode, 0 ), ("fadd.q", 0, ISAQ, "D,S,T,m", MATCH_FADD_Q, MASK_FADD_Q, match_opcode, 0 ), ("fsub.q", 0, ISAQ, "D,S,T", MATCH_FSUB_Q | MASK_RM, MASK_FSUB_Q | MASK_RM, match_opcode, 0 ), ("fsub.q", 0, ISAQ, "D,S,T,m", MATCH_FSUB_Q, MASK_FSUB_Q, match_opcode, 0 ), ("fmul.q", 0, ISAQ, "D,S,T", MATCH_FMUL_Q | MASK_RM, MASK_FMUL_Q | MASK_RM, match_opcode, 0 ), ("fmul.q", 0, ISAQ, "D,S,T,m", MATCH_FMUL_Q, MASK_FMUL_Q, match_opcode, 0 ), ("fdiv.q", 0, ISAQ, "D,S,T", MATCH_FDIV_Q | MASK_RM, MASK_FDIV_Q | MASK_RM, match_opcode, 0 ), ("fdiv.q", 0, ISAQ, "D,S,T,m", MATCH_FDIV_Q, MASK_FDIV_Q, match_opcode, 0 ), ("fsqrt.q", 0, ISAQ, "D,S", MATCH_FSQRT_Q | MASK_RM, MASK_FSQRT_Q | MASK_RM, match_opcode, 0 ), ("fsqrt.q", 0, ISAQ, "D,S,m", MATCH_FSQRT_Q, MASK_FSQRT_Q, match_opcode, 0 ), ("fmin.q", 0, ISAQ, "D,S,T", MATCH_FMIN_Q, MASK_FMIN_Q, match_opcode, 0 ), ("fmax.q", 0, ISAQ, "D,S,T", MATCH_FMAX_Q, MASK_FMAX_Q, match_opcode, 0 ), ("fmadd.q", 0, ISAQ, "D,S,T,R", MATCH_FMADD_Q | MASK_RM, MASK_FMADD_Q | MASK_RM, match_opcode, 0 ), ("fmadd.q", 0, ISAQ, "D,S,T,R,m", MATCH_FMADD_Q, MASK_FMADD_Q, match_opcode, 0 ), ("fnmadd.q", 0, ISAQ, "D,S,T,R", MATCH_FNMADD_Q | MASK_RM, MASK_FNMADD_Q | MASK_RM, match_opcode, 0 ), ("fnmadd.q", 0, ISAQ, "D,S,T,R,m", MATCH_FNMADD_Q, MASK_FNMADD_Q, match_opcode, 0 ), ("fmsub.q", 0, ISAQ, "D,S,T,R", MATCH_FMSUB_Q | MASK_RM, MASK_FMSUB_Q | MASK_RM, match_opcode, 0 ), ("fmsub.q", 0, ISAQ, "D,S,T,R,m", MATCH_FMSUB_Q, MASK_FMSUB_Q, match_opcode, 0 ), ("fnmsub.q", 0, ISAQ, "D,S,T,R", MATCH_FNMSUB_Q | MASK_RM, MASK_FNMSUB_Q | MASK_RM, match_opcode, 0 ), ("fnmsub.q", 0, ISAQ, "D,S,T,R,m", MATCH_FNMSUB_Q, MASK_FNMSUB_Q, match_opcode, 0 ), ("fcvt.w.q", 0, ISAQ, "d,S", MATCH_FCVT_W_Q | MASK_RM, MASK_FCVT_W_Q | MASK_RM, match_opcode, 0 ), ("fcvt.w.q", 0, ISAQ, "d,S,m", MATCH_FCVT_W_Q, MASK_FCVT_W_Q, match_opcode, 0 ), ("fcvt.wu.q", 0, ISAQ, "d,S", MATCH_FCVT_WU_Q | MASK_RM, MASK_FCVT_WU_Q | MASK_RM, match_opcode, 0 ), ("fcvt.wu.q", 0, ISAQ, "d,S,m", MATCH_FCVT_WU_Q, MASK_FCVT_WU_Q, match_opcode, 0 ), ("fcvt.q.w", 0, ISAQ, "D,s", MATCH_FCVT_Q_W, MASK_FCVT_Q_W | MASK_RM, match_opcode, 0 ), ("fcvt.q.wu", 0, ISAQ, "D,s", MATCH_FCVT_Q_WU, MASK_FCVT_Q_WU | MASK_RM, match_opcode, 0 ), ("fcvt.q.s", 0, ISAQ, "D,S", MATCH_FCVT_Q_S, MASK_FCVT_Q_S | MASK_RM, match_opcode, 0 ), ("fcvt.q.d", 0, ISAQ, "D,S", MATCH_FCVT_Q_D, MASK_FCVT_Q_D | MASK_RM, match_opcode, 0 ), ("fcvt.s.q", 0, ISAQ, "D,S", MATCH_FCVT_S_Q | MASK_RM, MASK_FCVT_S_Q | MASK_RM, match_opcode, 0 ), ("fcvt.s.q", 0, ISAQ, "D,S,m", MATCH_FCVT_S_Q, MASK_FCVT_S_Q, match_opcode, 0 ), ("fcvt.d.q", 0, ISAQ, "D,S", MATCH_FCVT_D_Q | MASK_RM, MASK_FCVT_D_Q | MASK_RM, match_opcode, 0 ), ("fcvt.d.q", 0, ISAQ, "D,S,m", MATCH_FCVT_D_Q, MASK_FCVT_D_Q, match_opcode, 0 ), ("fclass.q", 0, ISAQ, "d,S", MATCH_FCLASS_Q, MASK_FCLASS_Q, match_opcode, 0 ), ("feq.q", 0, ISAQ, "d,S,T", MATCH_FEQ_Q, MASK_FEQ_Q, match_opcode, 0 ), ("flt.q", 0, ISAQ, "d,S,T", MATCH_FLT_Q, MASK_FLT_Q, match_opcode, 0 ), ("fle.q", 0, ISAQ, "d,S,T", MATCH_FLE_Q, MASK_FLE_Q, match_opcode, 0 ), ("fgt.q", 0, ISAQ, "d,T,S", MATCH_FLT_Q, MASK_FLT_Q, match_opcode, 0 ), ("fge.q", 0, ISAQ, "d,T,S", MATCH_FLE_Q, MASK_FLE_Q, match_opcode, 0 ), ("fmv.x.q", 64, ISAQ, "d,S", MATCH_FMV_X_Q, MASK_FMV_X_Q, match_opcode, 0 ), ("fmv.q.x", 64, ISAQ, "D,s", MATCH_FMV_Q_X, MASK_FMV_Q_X, match_opcode, 0 ), ("fcvt.l.q", 64, ISAQ, "d,S", MATCH_FCVT_L_Q | MASK_RM, MASK_FCVT_L_Q | MASK_RM, match_opcode, 0 ), ("fcvt.l.q", 64, ISAQ, "d,S,m", MATCH_FCVT_L_Q, MASK_FCVT_L_Q, match_opcode, 0 ), ("fcvt.lu.q", 64, ISAQ, "d,S", MATCH_FCVT_LU_Q | MASK_RM, MASK_FCVT_LU_Q | MASK_RM, match_opcode, 0 ), ("fcvt.lu.q", 64, ISAQ, "d,S,m", MATCH_FCVT_LU_Q, MASK_FCVT_LU_Q, match_opcode, 0 ), ("fcvt.q.l", 64, ISAQ, "D,s", MATCH_FCVT_Q_L | MASK_RM, MASK_FCVT_Q_L | MASK_RM, match_opcode, 0 ), ("fcvt.q.l", 64, ISAQ, "D,s,m", MATCH_FCVT_Q_L, MASK_FCVT_Q_L, match_opcode, 0 ), ("fcvt.q.lu", 64, ISAQ, "D,s", MATCH_FCVT_Q_LU | MASK_RM, MASK_FCVT_Q_L | MASK_RM, match_opcode, 0 ), ("fcvt.q.lu", 64, ISAQ, "D,s,m", MATCH_FCVT_Q_LU, MASK_FCVT_Q_LU, match_opcode, 0 ), #/* Compressed instructions. */ ("c.unimp", 0, ISAC, "", 0, 0xffff, match_opcode, 0 ), ("c.ebreak", 0, ISAC, "", MATCH_C_EBREAK, MASK_C_EBREAK, match_opcode, 0 ), ("c.jr", 0, ISAC, "d", MATCH_C_JR, MASK_C_JR, match_rd_nonzero, INSN_BRANCH ), ("c.jalr", 0, ISAC, "d", MATCH_C_JALR, MASK_C_JALR, match_rd_nonzero, INSN_JSR ), ("c.j", 0, ISAC, "Ca", MATCH_C_J, MASK_C_J, match_opcode, INSN_BRANCH ), ("c.jal", 32, ISAC, "Ca", MATCH_C_JAL, MASK_C_JAL, match_opcode, INSN_JSR ), ("c.beqz", 0, ISAC, "Cs,Cp", MATCH_C_BEQZ, MASK_C_BEQZ, match_opcode, INSN_CONDBRANCH ), ("c.bnez", 0, ISAC, "Cs,Cp", MATCH_C_BNEZ, MASK_C_BNEZ, match_opcode, INSN_CONDBRANCH ), ("c.lwsp", 0, ISAC, "d,Cm(Cc)", MATCH_C_LWSP, MASK_C_LWSP, match_rd_nonzero, 0 ), ("c.lw", 0, ISAC, "Ct,Ck(Cs)", MATCH_C_LW, MASK_C_LW, match_opcode, INSN_DREF|INSN_4_BYTE ), ("c.swsp", 0, ISAC, "CV,CM(Cc)", MATCH_C_SWSP, MASK_C_SWSP, match_opcode, INSN_DREF|INSN_4_BYTE ), ("c.sw", 0, ISAC, "Ct,Ck(Cs)", MATCH_C_SW, MASK_C_SW, match_opcode, INSN_DREF|INSN_4_BYTE ), ("c.nop", 0, ISAC, "", MATCH_C_ADDI, 0xffff, match_opcode, INSN_ALIAS ), ("c.nop", 0, ISAC, "Cj", MATCH_C_ADDI, MASK_C_ADDI | MASK_RD, match_opcode, INSN_ALIAS ), ("c.mv", 0, ISAC, "d,CV", MATCH_C_MV, MASK_C_MV, match_c_add_with_hint, 0 ), ("c.lui", 0, ISAC, "d,Cu", MATCH_C_LUI, MASK_C_LUI, match_c_lui_with_hint, 0 ), ("c.li", 0, ISAC, "d,Co", MATCH_C_LI, MASK_C_LI, match_opcode, 0 ), ("c.addi4spn", 0, ISAC, "Ct,Cc,CK", MATCH_C_ADDI4SPN, MASK_C_ADDI4SPN, match_c_addi4spn, 0 ), ("c.addi16sp", 0, ISAC, "Cc,CL", MATCH_C_ADDI16SP, MASK_C_ADDI16SP, match_c_addi16sp, 0 ), ("c.addi", 0, ISAC, "d,Co", MATCH_C_ADDI, MASK_C_ADDI, match_opcode, 0 ), ("c.add", 0, ISAC, "d,CV", MATCH_C_ADD, MASK_C_ADD, match_c_add_with_hint, 0 ), ("c.sub", 0, ISAC, "Cs,Ct", MATCH_C_SUB, MASK_C_SUB, match_opcode, 0 ), ("c.and", 0, ISAC, "Cs,Ct", MATCH_C_AND, MASK_C_AND, match_opcode, 0 ), ("c.or", 0, ISAC, "Cs,Ct", MATCH_C_OR, MASK_C_OR, match_opcode, 0 ), ("c.xor", 0, ISAC, "Cs,Ct", MATCH_C_XOR, MASK_C_XOR, match_opcode, 0 ), ("c.slli", 0, ISAC, "d,C>", MATCH_C_SLLI, MASK_C_SLLI, match_c_slli, 0 ), ("c.srli", 0, ISAC, "Cs,C>", MATCH_C_SRLI, MASK_C_SRLI, match_c_slli, 0 ), ("c.srai", 0, ISAC, "Cs,C>", MATCH_C_SRAI, MASK_C_SRAI, match_c_slli, 0 ), ("c.slli64", 0, ISAC, "d", MATCH_C_SLLI64, MASK_C_SLLI64, match_c_slli64, 0 ), ("c.srli64", 0, ISAC, "Cs", MATCH_C_SRLI64, MASK_C_SRLI64, match_c_slli64, 0 ), ("c.srai64", 0, ISAC, "Cs", MATCH_C_SRAI64, MASK_C_SRAI64, match_c_slli64, 0 ), ("c.andi", 0, ISAC, "Cs,Co", MATCH_C_ANDI, MASK_C_ANDI, match_opcode, 0 ), ("c.addiw", 64, ISAC, "d,Co", MATCH_C_ADDIW, MASK_C_ADDIW, match_rd_nonzero, 0 ), ("c.addw", 64, ISAC, "Cs,Ct", MATCH_C_ADDW, MASK_C_ADDW, match_opcode, 0 ), ("c.subw", 64, ISAC, "Cs,Ct", MATCH_C_SUBW, MASK_C_SUBW, match_opcode, 0 ), ("c.ldsp", 64, ISAC, "d,Cn(Cc)", MATCH_C_LDSP, MASK_C_LDSP, match_rd_nonzero, INSN_DREF|INSN_8_BYTE ), ("c.ld", 64, ISAC, "Ct,Cl(Cs)", MATCH_C_LD, MASK_C_LD, match_opcode, INSN_DREF|INSN_8_BYTE ), ("c.sdsp", 64, ISAC, "CV,CN(Cc)", MATCH_C_SDSP, MASK_C_SDSP, match_opcode, INSN_DREF|INSN_8_BYTE ), ("c.sd", 64, ISAC, "Ct,Cl(Cs)", MATCH_C_SD, MASK_C_SD, match_opcode, INSN_DREF|INSN_8_BYTE ), ("c.fldsp", 0, ISADC, "D,Cn(Cc)", MATCH_C_FLDSP, MASK_C_FLDSP, match_opcode, INSN_DREF|INSN_8_BYTE ), ("c.fld", 0, ISADC, "CD,Cl(Cs)", MATCH_C_FLD, MASK_C_FLD, match_opcode, INSN_DREF|INSN_8_BYTE ), ("c.fsdsp", 0, ISADC, "CT,CN(Cc)", MATCH_C_FSDSP, MASK_C_FSDSP, match_opcode, INSN_DREF|INSN_8_BYTE ), ("c.fsd", 0, ISADC, "CD,Cl(Cs)", MATCH_C_FSD, MASK_C_FSD, match_opcode, INSN_DREF|INSN_8_BYTE ), ("c.flwsp", 32, ISAFC, "D,Cm(Cc)", MATCH_C_FLWSP, MASK_C_FLWSP, match_opcode, INSN_DREF|INSN_4_BYTE ), ("c.flw", 32, ISAFC, "CD,Ck(Cs)", MATCH_C_FLW, MASK_C_FLW, match_opcode, INSN_DREF|INSN_4_BYTE ), ("c.fswsp", 32, ISAFC, "CT,CM(Cc)", MATCH_C_FSWSP, MASK_C_FSWSP, match_opcode, INSN_DREF|INSN_4_BYTE ), ("c.fsw", 32, ISAFC, "CD,Ck(Cs)", MATCH_C_FSW, MASK_C_FSW, match_opcode, INSN_DREF|INSN_4_BYTE ), #/* Supervisor instructions */ ("csrr", 0, ISAI, "d,E", MATCH_CSRRS, MASK_CSRRS | MASK_RS1, match_opcode, INSN_ALIAS ), ("csrwi", 0, ISAI, "E,Z", MATCH_CSRRWI, MASK_CSRRWI | MASK_RD, match_opcode, INSN_ALIAS ), ("csrsi", 0, ISAI, "E,Z", MATCH_CSRRSI, MASK_CSRRSI | MASK_RD, match_opcode, INSN_ALIAS ), ("csrci", 0, ISAI, "E,Z", MATCH_CSRRCI, MASK_CSRRCI | MASK_RD, match_opcode, INSN_ALIAS ), ("csrw", 0, ISAI, "E,s", MATCH_CSRRW, MASK_CSRRW | MASK_RD, match_opcode, INSN_ALIAS ), ("csrw", 0, ISAI, "E,Z", MATCH_CSRRWI, MASK_CSRRWI | MASK_RD, match_opcode, INSN_ALIAS ), ("csrs", 0, ISAI, "E,s", MATCH_CSRRS, MASK_CSRRS | MASK_RD, match_opcode, INSN_ALIAS ), ("csrs", 0, ISAI, "E,Z", MATCH_CSRRSI, MASK_CSRRSI | MASK_RD, match_opcode, INSN_ALIAS ), ("csrc", 0, ISAI, "E,s", MATCH_CSRRC, MASK_CSRRC | MASK_RD, match_opcode, INSN_ALIAS ), ("csrc", 0, ISAI, "E,Z", MATCH_CSRRCI, MASK_CSRRCI | MASK_RD, match_opcode, INSN_ALIAS ), ("csrrwi", 0, ISAI, "d,E,Z", MATCH_CSRRWI, MASK_CSRRWI, match_opcode, 0 ), ("csrrsi", 0, ISAI, "d,E,Z", MATCH_CSRRSI, MASK_CSRRSI, match_opcode, 0 ), ("csrrci", 0, ISAI, "d,E,Z", MATCH_CSRRCI, MASK_CSRRCI, match_opcode, 0 ), ("csrrw", 0, ISAI, "d,E,s", MATCH_CSRRW, MASK_CSRRW, match_opcode, 0 ), ("csrrw", 0, ISAI, "d,E,Z", MATCH_CSRRWI, MASK_CSRRWI, match_opcode, INSN_ALIAS ), ("csrrs", 0, ISAI, "d,E,s", MATCH_CSRRS, MASK_CSRRS, match_opcode, 0 ), ("csrrs", 0, ISAI, "d,E,Z", MATCH_CSRRSI, MASK_CSRRSI, match_opcode, INSN_ALIAS ), ("csrrc", 0, ISAI, "d,E,s", MATCH_CSRRC, MASK_CSRRC, match_opcode, 0 ), ("csrrc", 0, ISAI, "d,E,Z", MATCH_CSRRCI, MASK_CSRRCI, match_opcode, INSN_ALIAS ), ("uret", 0, ISAI, "", MATCH_URET, MASK_URET, match_opcode, 0 ), ("sret", 0, ISAI, "", MATCH_SRET, MASK_SRET, match_opcode, 0 ), ("hret", 0, ISAI, "", MATCH_HRET, MASK_HRET, match_opcode, 0 ), ("mret", 0, ISAI, "", MATCH_MRET, MASK_MRET, match_opcode, 0 ), ("dret", 0, ISAI, "", MATCH_DRET, MASK_DRET, match_opcode, 0 ), ("sfence.vm", 0, ISAI, "", MATCH_SFENCE_VM, MASK_SFENCE_VM | MASK_RS1, match_opcode, 0 ), ("sfence.vm", 0, ISAI, "s", MATCH_SFENCE_VM, MASK_SFENCE_VM, match_opcode, 0 ), ("sfence.vma", 0, ISAI, "", MATCH_SFENCE_VMA, MASK_SFENCE_VMA | MASK_RS1 | MASK_RS2, match_opcode, INSN_ALIAS ), ("sfence.vma", 0, ISAI, "s", MATCH_SFENCE_VMA, MASK_SFENCE_VMA | MASK_RS2, match_opcode, INSN_ALIAS ), ("sfence.vma", 0, ISAI, "s,t", MATCH_SFENCE_VMA, MASK_SFENCE_VMA, match_opcode, 0 ), ("wfi", 0, ISAI, "", MATCH_WFI, MASK_WFI, match_opcode, 0 )] # expanded # 16 xxxxxxxxxxxxxxaa != 11 # 32 xxxxxxxxxxxbbb11 != 111 # 48 xxxxxxxxxx011111 # 64 xxxxxxxxx0111111 # 80 xnnnxxxxx1111111 != 111 # 192 x111xxxxx1111111 # print("immI: op2031 is op2031 { local tmp:4 = op2031; export tmp; }") # print("immS: imm is op0711 & op2531 [ imm = (op2531 << 5) | op0711; ] { local tmp:4 = zext(imm); export tmp; }") # print("immSB: imm is op0707 & op0811 & op2530 & op3131 [ imm = (op3131 << 12) | (op2530 << 5) | (op0811 << 1) | (op0707 << 11); ] { local tmp:4 = zext(imm); export tmp; }") # print("immU: imm is op1231 [ imm = (op1231 << 12); ] { local tmp:4 = zext(imm); export tmp; }") # print("immUJ: imm is op1219 & op2020 & op2130 & op3131 [ imm = (op3131 << 20) | (op2130 << 1) | (op2020 << 11) | (op1219 << 12); ] { local tmp:4 = zext(imm); export tmp; }") # print("shamt5: op2024 is op2024 { local tmp:4 = op2024; export tmp; }") # print("shamt6: imm is op2024 & op2525 [ imm = (op2525 << 5) | op2024; ] { local tmp:4 = imm; export tmp; }") # print("fmt: \".s\" is op2526=0 {}") # print("fmt: \".d\" is op2526=1 {}") # print("fmt: \".h\" is op2526=2 {}") # print("fmt: \".q\" is op2526=3 {}") def parse_operand(op): dis = op.operands is16 = op.match & 3 != 3 x = 0 out = "" cons=[] while x < len(dis): if dis[x] == 'C': x += 1 if dis[x] == 'a': out += "cjimm" cons.append("cjimm") elif dis[x] == 'c': out += "sp" cons.append("cop0711=2") cons.append("sp") elif dis[x] == 'i': out += "csimm3" cons.append("csimm3") elif dis[x] == 'o' or dis[x] == 'j': out += "cimmI" cons.append("cimmI") elif dis[x] == 'k': out += "clwimm" cons.append("clwimm") elif dis[x] == 'l': out += "cldimm" cons.append("cldimm") elif dis[x] == 'm': out += "clwspimm" cons.append("clwspimm") elif dis[x] == 'n': out += "cldspimm" cons.append("cldspimm") elif dis[x] == 'p': out += "cbimm" cons.append("cbimm") elif dis[x] == 's': out += "cr0709s" cons.append("cr0709s") elif dis[x] == 't': out += "cr0204s" cons.append("cr0204s") elif dis[x] == 'w': out += "cd0709s" cons.append("cd0709s") elif dis[x] == 'x': out += "cr0204s" cons.append("cr0204s") elif dis[x] == 'u' or dis[x] == 'v': out += "cbigimm" cons.append("cbigimm") elif dis[x] == 'z': out += "zero" cons.append("cop0206=0") cons.append("zero") elif dis[x] == 'U': out += "cr0711" cons.append("cr0711") elif dis[x] == 'V': out += "cr0206" cons.append("cr0206") elif dis[x] == 'K': out += "caddi4spnimm" cons.append("caddi4spnimm") elif dis[x] == 'L': out += "caddi16spimm" cons.append("caddi16spimm") elif dis[x] == 'M': out += "cswspimm" cons.append("cswspimm") elif dis[x] == 'N': out += "csdspimm" cons.append("csdspimm") elif dis[x] == '>': out += "c6imm" cons.append("c6imm") elif dis[x] == '<': out += "c5imm" cons.append("c5imm") elif dis[x] == 'T': out += "cfr0206" cons.append("cfr0206") elif dis[x] == 'D': out += "cfr0204s" cons.append("cfr0204s") else: print("BAD C case %c" % (dis[x])) print(op) exit(1) elif dis[x] in [',', '(', ')', '[', ']']: out += dis[x] elif dis[x] == '0': if x+1 == len(dis): out += "0" elif dis[x] == 'b' or dis[x] == 's': if is16: out += "cr1519" cons.append("cr1519") else: out += "r1519" cons.append("r1519") elif dis[x] == 't': if is16: out += "cr2024" cons.append("cr2024") else: out += "r2024" cons.append("r2024") elif dis[x] == 'u': out += "immU" cons.append("immU") elif dis[x] == 'm': out += "frm" cons.append("frm") elif dis[x] == 'P': out += "pred" cons.append("pred") elif dis[x] == 'Q': out += "succ" cons.append("succ") elif dis[x] == 'o': out += "immI" cons.append("immI") elif dis[x] == 'j': out += "immI" cons.append("immI") elif dis[x] == 'q': out += "immS" cons.append("immS") elif dis[x] == 'a': out += "immUJ" cons.append("immUJ") elif dis[x] == 'p': out += "immSB" cons.append("immSB") elif dis[x] == 'd': if is16: out += "cd0711" cons.append("cd0711") else: out += "r0711" cons.append("r0711") elif dis[x] == 'z': out += "zero" cons.append("zero") elif dis[x] == '>': out += "shamt6" cons.append("shamt6") elif dis[x] == '<': out += "shamt5" cons.append("shamt5") elif dis[x] == 'S' or dis[x] == 'U': if is16: out += "cfr1519" cons.append("cfr1519") else: out += "fr1519" cons.append("fr1519") elif dis[x] == 'T': if is16: out += "cfr2024" cons.append("cfr2024") else: out += "fr2024" cons.append("fr2024") elif dis[x] == 'D': if is16: out += "cfr0711" cons.append("cfr0711") else: out += "fr0711" cons.append("fr0711") elif dis[x] == 'R': if is16: out += "cfr2731" cons.append("cfr2731") else: out += "fr2731" cons.append("fr2731") elif dis[x] == 'E': out += "csr" cons.append("csr") elif dis[x] == 'Z': if is16: out += "cr1519" cons.append("cr1519") else: out += "r1519" cons.append("r1519") else: print("BAD top case %c" % (dis[x])) print(op) exit(1) x += 1 # print("DISPLAY: %s" % out) return (out, cons) def parse_r(op): funct3 = (op.match >> 12) & ((1<<3) - 1) funct7 = (op.match >> 25) & ((1<<7) - 1) op.bitpattern.append("funct3=0x%x" % funct3) op.bitpattern.append("funct7=0x%x" % funct7) return def parse_i(op): funct3 = (op.match >> 12) & ((1<<3) - 1) op.bitpattern.append("funct3=0x%x" % funct3) return def parse_s(op): funct3 = (op.match >> 12) & ((1<<3) - 1) op.bitpattern.append("funct3=0x%x" % funct3) return def parse_u(op): return def parse_b(op): funct3 = (op.match >> 12) & ((1<<3) - 1) op.bitpattern.append("funct3=0x%x" % funct3) return def parse_j(op): return def parse_misc_mem(op): fm = (op.match >> 28) & ((1<<4) - 1) funct3 = (op.match >> 12) & ((1<<3) - 1) op.bitpattern.append("funct3=0x%x" % funct3) op.bitpattern.append("fm=0x%x" % fm) return def parse_CR(op, rv): return def parse_CI(op, rv): x = (op.match >> 13) & 7 if op.bitpattern.count("clwspimm") == 1: op.bitpattern.remove("clwspimm") if x == 2 or (x == 3 and rv == 32): op.display = op.display.replace("clwspimm", "clwspimm54276") op.bitpattern.append("clwspimm54276") elif x == 1 and rv == 128: op.display = op.display.replace("clwspimm", "clwspimm5496") op.bitpattern.append("clwspimm5496") elif (x == 1 and (rv == 32 or rv == 64)) or (x == 3 and (rv == 64 or rv == 128)): op.display = op.display.replace("clwspimm", "clwspimm54386") op.bitpattern.append("clwspimm54386") if op.bitpattern.count("cldspimm") == 1: op.bitpattern.remove("cldspimm") if x == 2 or (x == 3 and rv == 32): op.display = op.display.replace("cldspimm", "cldspimm54276") op.bitpattern.append("cldspimm54276") elif x == 1 and rv == 128: op.display = op.display.replace("cldspimm", "cldspimm5496") op.bitpattern.append("cldspimm5496") elif (x == 1 and (rv == 32 or rv == 64)) or (x == 3 and (rv == 64 or rv == 128)): op.display = op.display.replace("cldspimm", "cldspimm54386") op.bitpattern.append("cldspimm54386") return def parse_CSS(op, rv): x = (op.match >> 13) & 7 if op.bitpattern.count("cswspimm") == 1: op.bitpattern.remove("cswspimm") if (x == 5 and (rv == 32 or rv == 64)) or (x == 7 and (rv == 128 or rv == 64)): op.display = op.display.replace("cswspimm", "cswspimm5386") op.bitpattern.append("cswspimm5386") elif (x == 5 and rv == 128): op.display = op.display.replace("cswspimm", "cswspimm5496") op.bitpattern.append("cswspimm5496") elif (x == 7 and rv == 32) or (x == 6): op.display = op.display.replace("cswspimm", "cswspimm5276") op.bitpattern.append("cswspimm5276") if op.bitpattern.count("csdspimm") == 1: op.bitpattern.remove("csdspimm") if (x == 5 and (rv == 32 or rv == 64)) or (x == 7 and (rv == 128 or rv == 64)): op.display = op.display.replace("csdspimm", "csdspimm5386") op.bitpattern.append("csdspimm5386") elif (x == 5 and rv == 128): op.display = op.display.replace("csdspimm", "csdspimm5496") op.bitpattern.append("csdspimm5496") elif (x == 7 and rv == 32) or (x == 6): op.display = op.display.replace("csdspimm", "csdspimm5276") op.bitpattern.append("csdspimm5276") return def parse_CIW(op, rv): return def parse_CL(op, rv): x = (op.match >> 13) & 7 if op.bitpattern.count("cldspimm") == 1: op.bitpattern.remove("cldspimm") if x == 2 or (x == 3 and rv == 32): op.display = op.display.replace("cldspimm", "cldspimm54276") op.bitpattern.append("cldspimm54276") elif x == 1 and rv == 128: op.display = op.display.replace("cldspimm", "cldspimm5496") op.bitpattern.append("cldspimm5496") elif (x == 1 and (rv == 32 or rv == 64)) or (x == 3 and (rv == 64 or rv == 128)): op.display = op.display.replace("cldspimm", "cldspimm54386") op.bitpattern.append("cldspimm54386") if op.bitpattern.count("clwspimm") == 1: op.bitpattern.remove("clwspimm") if x == 2 or (x == 3 and rv == 32): op.display = op.display.replace("clwspimm", "clwspimm54276") op.bitpattern.append("clwspimm54276") elif x == 1 and rv == 128: op.display = op.display.replace("clwspimm", "clwspimm5496") op.bitpattern.append("clwspimm5496") elif (x == 1 and (rv == 32 or rv == 64)) or (x == 3 and (rv == 64 or rv == 128)): op.display = op.display.replace("clwspimm", "clwspimm54386") op.bitpattern.append("clwspimm54386") if op.bitpattern.count("cldimm") == 1: op.bitpattern.remove("cldimm") if None == rv: op.display = op.display.replace("cldimm", "cldimm5326") op.bitpattern.append("cldimm5326") elif 32 == rv and (x == 3 or x == 7): op.display = op.display.replace("cldimm", "cldimm5326") op.bitpattern.append("cldimm5326") elif (32 == rv or 64 == rv) and (x == 1 or x == 5): op.display = op.display.replace("cldimm", "cldimm5376") op.bitpattern.append("cldimm5376") elif (128 == rv or 64 == rv) and (x == 3 or x == 7): op.display = op.display.replace("cldimm", "cldimm5376") op.bitpattern.append("cldimm5376") elif 128 == rv and (x == 1 or x == 7): op.display = op.display.replace("cldimm", "cldimm54876") op.bitpattern.append("cldimm54876") else: print("BAD CL rv %r" % (rv)) exit(1) if op.bitpattern.count("clwimm") == 1: op.bitpattern.remove("clwimm") if None == rv: op.display = op.display.replace("clwimm", "clwimm5326") op.bitpattern.append("clwimm5326") elif 32 == rv and (x == 3 or x == 7): op.display = op.display.replace("clwimm", "clwimm5326") op.bitpattern.append("clwimm5326") elif (32 == rv or 64 == rv) and (x == 1 or x == 5): op.display = op.display.replace("clwimm", "clwimm5376") op.bitpattern.append("clwimm5376") elif (128 == rv or 64 == rv) and (x == 3 or x == 7): op.display = op.display.replace("clwimm", "clwimm5376") op.bitpattern.append("clwimm5376") elif 128 == rv and (x == 1 or x == 7): op.display = op.display.replace("clwimm", "clwimm54876") op.bitpattern.append("clwimm54876") else: print("BAD CL rv %r" % (rv)) exit(1) return def parse_CS(op, rv): x = (op.match >> 13) & 7 if op.bitpattern.count("clwimm") == 1: op.bitpattern.remove("clwimm") if None == rv: op.display = op.display.replace("clwimm", "clwimm5326") op.bitpattern.append("clwimm5326") elif 32 == rv and (x == 3 or x == 7): op.display = op.display.replace("clwimm", "clwimm5326") op.bitpattern.append("clwimm5326") elif (32 == rv or 64 == rv) and (x == 1 or x == 5): op.display = op.display.replace("clwimm", "clwimm5376") op.bitpattern.append("clwimm5376") elif (128 == rv or 64 == rv) and (x == 3 or x == 7): op.display = op.display.replace("clwimm", "clwimm5376") op.bitpattern.append("clwimm5376") elif 128 == rv and (x == 1 or x == 7): op.display = op.display.replace("clwimm", "clwimm54876") op.bitpattern.append("clwimm54876") else: print("BAD CL rv %r" % (rv)) exit(1) if op.bitpattern.count("cldimm") == 1: op.bitpattern.remove("cldimm") if None == rv: op.display = op.display.replace("cldimm", "cldimm5326") op.bitpattern.append("cldimm5326") elif 32 == rv and (x == 3 or x == 7): op.display = op.display.replace("cldimm", "cldimm5326") op.bitpattern.append("cldimm5326") elif (32 == rv or 64 == rv) and (x == 1 or x == 5): op.display = op.display.replace("cldimm", "cldimm5376") op.bitpattern.append("cldimm5376") elif (128 == rv or 64 == rv) and (x == 3 or x == 7): op.display = op.display.replace("cldimm", "cldimm5376") op.bitpattern.append("cldimm5376") elif 128 == rv and (x == 1 or x == 7): op.display = op.display.replace("cldimm", "cldimm54876") op.bitpattern.append("cldimm54876") else: print("BAD CL rv %r" % (rv)) exit(1) return def parse_CA(op, rv): return def parse_CB(op, rv): return def parse_CJ(op, rv): return def opcode_map_c(op): ''' CR |funct4 | cr0711 | cr0206 | op | CI |funct3 | cop1212 | cr0711 | cop0206 | op | CSS |funct3 | cop0712 | cr0206 | op | CIW |funct3 | cop0512 | cr0204s | op | CL |funct3 | cop1012 | cs0709s | cop0506 | cr0204s | op | CS |funct3 | cop1012 | cr0709s | cop0506 | cr0204s | op | CA |funct6 | cr0709s | funct2 | cr0204s | op | CB |funct3 | off1012 | cr0709s | off0206 | op | CJ |funct3 | target | op | ''' x = (op.match >> 13) & 7 y = op.match & 3 op.bitpattern.append("cop0001=0x%x" % y) op.bitpattern.append("cop1315=0x%x" % x) # print("CMAP: RV32, RV64, RV128") if x == 0 and y == 0: # ADDI4SPN parse_CIW(op, None) elif x == 0 and y == 1: # ADDI parse_CI(op, None) elif x == 0 and y == 2: # SLLI parse_CI(op, None) elif x == 1 and y == 0: # FLD FLD LQ # print("CMAP: FLD FLD LQ") if op.name.find("fld") >= 0: print("#TODO 32 64") parse_CL(op, 32) parse_CL(op, 64) elif op.name.find("lq") >= 0: parse_CL(op, 128) else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 1 and y == 1: # JAL ADDIW ADDIW # print("CMAP: JAL ADDIW ADDIW") if op.name.find("jal") >= 0: parse_CJ(op, 32) elif op.name.find("addiw") >= 0: print("#TODO 32 64") parse_CI(op, 64) parse_CI(op, 128) elif op.name == "sext.w": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass elif op.name == "addw": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 1 and y == 2: # LW LI LWSP # print("CMAP: FLDSP FLDSP LQSP") if op.name.find("fldsp") >= 0: print("#TODO 32 64") parse_CI(op, 32) parse_CI(op, 64) elif op.name.find("lqsp") >= 0: parse_CI(op, 128) elif op.name == "fld": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 2 and y == 0: # FW parse_CL(op, None) elif x == 2 and y == 1: # LI # print("CMAP: LI") if op.name.find("li") >= 0: parse_CI(op, None) else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 2 and y == 2: # LWSP parse_CI(op, None) elif x == 3 and y == 0: # FLW LD LD # print("CMAP: FLW LD LD") if op.name.find("flw") >= 0: parse_CL(op, 32) elif op.name.find("ld") >= 0: print("#TODO 64 128") parse_CL(op, 64) parse_CL(op, 128) else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 3 and y == 1: # LUI ADDI16SP # print("CMAP: LUI ADDI16SP") if op.name.find("lui") >= 0: parse_CI(op, None) elif op.name.find("addi16sp") >= 0: parse_CJ(op, None) elif op.name == "li": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass elif op.name == "addi": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass elif op.name == "add": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 3 and y == 2: # FLWSP LDSP LDSP # print("CMAP: FLWSP LDSP LDSP") if op.name.find("flwsp") >= 0: parse_CI(op, 32) elif op.name.find("ldsp") >= 0: print("#TODO 64 128") parse_CI(op, 64) parse_CI(op, 128) elif op.name == "ld": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass elif op.name == "flw": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 4 and y == 0: # RESERVED # print("CMAP: RESERVED C 3 0") print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass elif x == 4 and y == 1: # MISC-ALU # print("CMAP: MISC-ALU") if (op.match >> 10) & 3 == 3: parse_CA(op, None) else: parse_CB(op, None) pass elif x == 4 and y == 2: # JR JALR MV ADD # print("CMAP: JR JALR MV ADD") if op.name.find("jr") >= 0: parse_CI(op, None) elif op.name.find("jalr") >= 0: parse_CI(op, None) elif op.name.find("mv") >= 0: parse_CR(op, None) elif op.name.find("add") >= 0: parse_CR(op, None) elif op.name == "ebreak": parse_CJ(op, None) elif op.name == "c.ebreak": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass elif op.name == "ret": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass elif op.name == "move": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 5 and y == 0: # FSD FSD SQ # print("CMAP: FSD FSD SQ") if op.name.find("fsd") >= 0: print("#TODO 32 64") parse_CS(op, 32) parse_CS(op, 64) elif op.name.find("sq") >= 0: parse_CS(op, 128) else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 5 and y == 1: # J # print("CMAP: J") if op.name.find("j") >= 0: parse_CJ(op, None) else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 5 and y == 2: # FSDSP FSDSP SQSP # print("CMAP: FSDSP FSDSP SQSP") if op.name.find("fsdsp") >= 0: print("#TODO 32 64") parse_CSS(op, 32) parse_CSS(op, 64) elif op.name.find("sqsp") >= 0: parse_CSS(op, 128) elif op.name == "fsd": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 6 and y == 0: # SW # print("CMAP: SW") if op.name.find("sw") >= 0: parse_CS(op, None) else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 6 and y == 1: # BEQZ parse_CB(op, None) elif x == 6 and y == 2: # SWSP parse_CSS(op, None) elif x == 7 and y == 0: # FSW SD SD # print("CMAP: FSW SD SD") if op.name.find("fsw") >= 0: parse_CS(op, 32) elif op.name.find("sd") >= 0: print("#TODO 64 128") parse_CS(op, 64) parse_CS(op, 128) else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass elif x == 7 and y == 1: # BNEZ parse_CB(op, None) elif x == 7 and y == 2: # # print("CMAP: FSWSP SDSP SDSP") if op.name.find("fswsp") >= 0: parse_CSS(op, 32) elif op.name.find("sdsp") >= 0: print("#TODO 64 128") parse_CSS(op, 64) parse_CSS(op, 128) elif op.name == "sd": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass elif op.name == "fsw": print("#TODO %s %s" % (op.display, ' & '.join(op.bitpattern))) pass else: print("CMAP -- %s" % (op.name)) print(op) exit(1) pass else: print("CBAD X %x Y %x" % (x, y)) print(op) exit(1) def opcode_map(op): x = (op.match >> 2) & 7 y = (op.match >> 5) & 3 op.bitpattern.append("op0001=0x3") op.bitpattern.append("op0204=0x%x" % x) op.bitpattern.append("op0506=0x%x" % y) if x == 0: if y == 0: # LOAD # print("MAP: LOAD") parse_i(op) pass elif y == 1: # STORE # print("MAP: STORE") parse_s(op) pass elif y == 2: # MADD # print("MAP: MADD") parse_r(op) pass elif y == 3: # BRANCH # print("MAP: BRANCH") parse_b(op) pass else: print("BAD Y %x X %x" % (y,x)) print(op) exit(1) elif x == 1: if y == 0: # LOAD-FP # print("MAP: LOAD-FP") parse_i(op) pass elif y == 1: # STORE-FP # print("MAP: STORE-FP") parse_s(op) pass elif y == 2: # MSUB # print("MAP: MSUB") parse_r(op) pass elif y == 3: # JALR # print("MAP: JALR") parse_i(op) pass else: print("BAD Y %x X %x" % (y,x)) print(op) exit(1) elif x == 2: if y == 0: # CUSTOM0 print("MAP: CUSTOM0") pass elif y == 1: # CUSTOM1 print("MAP: CUSTOM1") pass elif y == 2: # NMSUB # print("MAP: NMSUB") parse_r(op) pass elif y == 3: # RESERVED print("MAP: RESERVED 2 3") pass else: print("BAD Y %x X %x" % (y,x)) print(op) exit(1) elif x == 3: if y == 0: # MISC-MEM # print("MAP: MISC-MEM") parse_misc_mem(op) pass elif y == 1: # AMO # print("MAP: AMO") parse_r(op) pass elif y == 2: # NMADD # print("MAP: NMADD") parse_r(op) pass elif y == 3: # JAL # print("MAP: JAL") parse_j(op) pass else: print("BAD Y %x X %x" % (y,x)) print(op) exit(1) elif x == 4: if y == 0: # OP-IMM # print("MAP: OP-IMM") parse_i(op) elif y == 1: # OP # print("MAP: OP") parse_r(op) pass elif y == 2: # OP-FP # print("MAP: OP-FP") parse_r(op) pass elif y == 3: # SYSTEM # print("MAP: SYSTEM") parse_i(op) pass else: print("BAD Y %x X %x" % (y,x)) print(op) exit(1) elif x == 5: if y == 0: # AUIPC # print("MAP: AUIPC") parse_u(op) pass elif y == 1: # LUI # print("MAP: LUI") parse_u(op) pass elif y == 2: # RESERVED print("MAP: RESERVED 5 2") pass elif y == 3: # RESERVED print("MAP: RESERVED 5 3") pass else: print("BAD Y %x X %x" % (y,x)) print(op) exit(1) elif x == 6: if y == 0: # OP-IMM-32 # print("MAP: OP-IMM-32") parse_i(op) pass elif y == 1: # OP-32 # print("MAP: OP-32") parse_r(op) pass elif y == 2: # CUSTOM2 print("MAP: CUSTOM2") pass elif y == 3: # CUSTOM3 print("MAP: CUSTOM3") pass else: print("BAD Y %x X %x" % (y,x)) print(op) exit(1) else: print("BAD X %x" % (x)) print(op) exit(1) def find_gaps(op): """Account for all the bits in the pattern dummy style just added as they got added""" gap = { 'op0001': (0,1), 'op0204': (2,4), 'op0506': (5,6), 'op0707': (7,7), 'op0711': (7,11), 'r0711': (7,11), 'fr0711': (7,11), 'op0811': (8,11), 'op1214': (12,14), 'funct3': (12,14), 'op1219': (12,19), 'op1231': (12,31), 'sop1231': (12,31), 'r1519': (15,19), 'fr1519': (15,19), 'fd1519': (15,19), 'op1527': (15,27), 'op1531': (15,31), 'op2020': (20,20), 'succ': (20,23), 'op2024': (20,24), 'r2024': (20,24), 'fr2024': (20,24), 'op2031': (20,31), 'sop2031': (20,31), 'op2130': (21,30), 'pred': (24,27), 'op2525': (25,25), 'op2526': (25,26), 'op2530': (25,30), 'op2531': (25,31), 'sop2531': (25,31), 'op2631': (26,31), 'funct7': (25,31), 'op2731': (27,31), 'r2731': (27,31), 'fr2731': (27,31), 'fm': (28,31), 'sop3131': (31,31), 'cop0001': (0,1), 'cop0202': (2,2), 'cop0203': (2,3), 'cop0204': (2,4), 'cr0204s': (2,4), 'cfr0204s': (2,4), 'cop0205': (2,5), 'cop0206': (2,6), 'cr0206': (2,6), 'cfr0206': (2,6), 'cop0212': (2,12), 'cop0304': (3,4), 'cop0305': (3,5), 'cop0406': (4,6), 'cop0505': (5,5), 'cop0506': (5,6), 'cop0512': (5,12), 'cop0606': (6,6), 'cop0707': (7,7), 'cop0708': (7,8), 'cop0709': (7,9), 'cr0709s': (7,9), 'cd0709s': (7,9), 'cfr0709s': (7,9), 'cop0710': (7,10), 'cop0711': (7,11), 'cr0711': (7,11), 'cd0711': (7,11), 'cfr0711': (7,11), 'cop0712': (7,12), 'cop0808': (8,8), 'cop0910': (9,10), 'cop0912': (9,12), 'cop1010': (10,10), 'cop1011': (10,11), 'cop1012': (10,12), 'cop1111': (11,11), 'cop1112': (11,12), 'cop1212': (12,12), 'scop1212': (12,12), 'cop1315': (13,15), 'immI': "sop2031", 'immS': "op0711 & sop2531", 'immSB': "op0707 & op0811 & op2530 & sop3131", 'immU': "sop1231", 'immUJ': "op1219 & op2020 & op2130 & sop3131", 'shamt5': "op2024", 'shamt6': "op2024 & op2525", 'frm': "op1214=0", 'frm': "op1214=1", 'frm': "op1214=2", 'frm': "op1214=3", 'frm': "op1214=4", 'frm': "op1214=7", 'fmt': "op2526=0", 'fmt': "op2526=1", 'fmt': "op2526=2", 'fmt': "op2526=3", 'csr1': "op2031", 'csr2': "op2031", 'csr4': "op2031", 'csr8': "op2031", 'csr': "op2031", 'cimmI': "cop1212 & cop0204 & cop0506", 'cbimm': "scop1212 & cop1011 & cop0506 & cop0304 & cop0202", 'cjimm': "scop1212 & cop1111 & cop0910 & cop0808 & cop0707 & cop0606 & cop0305 & cop0202", 'c6imm': "cop1212 & cop0206", 'cbigimm': "scop1212 & cop0206", 'caddi4spnimm': "cop1112 & cop0710 & cop0606 & cop0505", 'caddi16spimm': "scop1212 & cop0606 & cop0505 & cop0304 & cop0202", 'cldimm': "cop1012 & cop0506", 'cldimm5376': "cop1012 & cop0506", 'cldimm54876': "cop1112 & cop1010 & cop0506", 'cldimm5326': "cop1012 & cop0606 & cop0505", 'clwimm': "cop1012 & cop0506", 'clwimm5376': "cop1012 & cop0506", 'clwimm54876': "cop1112 & cop1010 & cop0506", 'clwimm5326': "cop1012 & cop0606 & cop0505", 'cldspimm': "cop1212 & cop0506 & cop0204", 'cldspimm54386': "cop1212 & cop0506 & cop0204", 'cldspimm5496': "cop1212 & cop0606 & cop0205", 'cldspimm54276': "cop1212 & cop0406 & cop0203", 'clwspimm': "cop1212 & cop0506 & cop0204", 'clwspimm54386': "cop1212 & cop0506 & cop0204", 'clwspimm5496': "cop1212 & cop0606 & cop0205", 'clwspimm54276': "cop1212 & cop0406 & cop0203", 'csdspimm': "cop0709 & cop1012", 'csdspimm5386': "cop0709 & cop1012", 'csdspimm5496': "cop0710 & cop1112", 'csdspimm5276': "cop0708 & cop0912", 'cswspimm': "cop0709 & cop1012", 'cswspimm5386': "cop0709 & cop1012", 'cswspimm5496': "cop0710 & cop1112", 'cswspimm5276': "cop0708 & cop0912", 'sp': None, 'zero': None } pattern = 0x0 for x in op.bitpattern: x = x.split('=')[0] if x not in gap.keys(): print("GAP: %r" % x) exit(1) x = gap[x] if isinstance(x, str): y = x.split('&') for _ in y: w = gap[_.strip().split('=')[0]] pattern |= (((1 << (w[1] - w[0] + 1)) - 1) << w[0]) if isinstance(x, tuple): pattern |= (((1 << (x[1] - x[0] + 1)) - 1) << x[0]) # print("# %s 10987654321098765432109876543210" % (op.name)) # print("# %s %s" % (op.name, '{:032b}'.format(pattern))) z = '{:032b}'.format(pattern)[::-1] z0 = '{:032b}'.format(op.match)[::-1] z1 = '{:032b}'.format(op.mask)[::-1] # print("# %s %s" % (op.name, '{:032b}'.format(op.match))) # print("# %s %s" % (op.name, '{:032b}'.format(op.mask))) x = 0 y = 32 if op.match & 3 == 3 else 16 while x < y: start = z.find('0', x) if start > 0 and start < y: # print("# START %d %d %d" % (x, start, y)) end = z.find('1', start) if end < 0: end = y # print("# chunk %d - %d (%d) " % (start, end - 1, x)) t0 = z1[start:end] # print("# THING %s %s" % (t0, z1)) if t0.find('0') > 0: print("damnit %s" % (z1[start:end])) exit(1) t0 = int(z0[start:end][::-1], 2) t1 = int(z1[start:end][::-1], 2) # print("# %d %d" % (t0, t1)) if 16 == y: bp = "cop%02d%02d=0x%x" % (start, end - 1, t0 & t1) else: bp = "op%02d%02d=0x%x" % (start, end - 1, t0 & t1) # print("# BP = %s " % bp) op.bitpattern.append(bp) x = end + 1 else: # print("#END 1") break def parse(): sorted_opcodes = sorted(opcodes, key=lambda x: x[0]) for op in sorted_opcodes: size = 0 op = OpCode(op) if op.xlen: print("@if defined(RISCV%d)" % (op.xlen)) if INSN_MACRO == op.pinfo: print("#TODO MACRO") print("# %s" % op) if op.xlen: print("@endif") print("") continue if INSN_ALIAS & op.pinfo != 0: print("#TODO ALIAS") print("# %s" % op) op.display, op.bitpattern = parse_operand(op) op.bitpattern = list(set(op.bitpattern)) if op.match & 3 == 3: opcode_map(op) else: opcode_map_c(op) find_gaps(op) if INSN_ALIAS & op.pinfo != 0: print("#:%s %s is %s\n#{\n#}" % (op.name, op.display, ' & '.join(op.bitpattern))) else: print(":%s %s is %s\n{\n}" % (op.name, op.display, ' & '.join(op.bitpattern))) if op.xlen: print("@endif") print("") return def make_unique(): sorted_opcodes = sorted(opcodes, key=lambda x: x[0]) for opX in sorted_opcodes: print("-------------------------") opX = OpCode(opX) print("X: %s" % (opX)) for opY in sorted_opcodes: opY = OpCode(opY) if opY == opX: continue if opX.mask != opY.mask: continue if opX.match != opY.match: continue if opX.xlen != opY.xlen: continue print("Y: %s" % (opY)) if __name__ == "__main__": parse() # make_unique() ================================================ FILE: pypcode/processors/Sparc/data/languages/Sparc.dwarf ================================================ ================================================ FILE: pypcode/processors/Sparc/data/languages/Sparc.opinion ================================================ ================================================ FILE: pypcode/processors/Sparc/data/languages/SparcV9.ldefs ================================================ Sparc V9 32-bit Sparc V9 64-bit ================================================ FILE: pypcode/processors/Sparc/data/languages/SparcV9.pspec ================================================ ================================================ FILE: pypcode/processors/Sparc/data/languages/SparcV9.sinc ================================================ # SLA specification file for SPARC/64 define endian=big; define alignment=4; define space ram type=ram_space size=$(SIZE) default; define space register type=register_space size=4; define register offset=0 size=$(SIZE) [ g0 g1 g2 g3 g4 g5 g6 g7 o0 o1 o2 o3 o4 o5 sp o7 l0 l1 l2 l3 l4 l5 l6 l7 i0 i1 i2 i3 i4 i5 fp i7 ]; # these are save locations for implementing register windows # define register offset=0x500 size=$(SIZE) [ s_l0 s_l1 s_l2 s_l3 s_l4 s_l5 s_l6 s_l7 s_i0 s_i1 s_i2 s_i3 s_i4 s_i5 s_fp s_i7 ]; define register offset=0x1000 size=$(SIZE) [ PC nPC _ TICK Y CCR _ PCR PIC GSR SOFTINT_SET SOFTINT_CLR SOFTINT TICK_CMPR STICK STICK_CMPR ]; define register offset=0x1100 size=$(SIZE) [ asr7 asr8 asr9 asr10 asr11 asr12 asr13 asr14 asr15 asr16 asr17 asr18 asr19 asr20 asr21 asr22 asr23 asr24 asr25 asr26 asr27 asr28 asr29 asr30 asr31 ]; define register offset=0x3000 size=1 [ x_nf x_zf x_vf x_cf i_nf i_zf i_vf i_cf ]; define register offset=0x4000 size=1 [ ASI ]; define register offset=0x4008 size=1 [ FPRS ]; define register offset=0x5000 size=$(SIZE) [ fsr ]; #fcc0 is bits 10 and 11 of fsr #fcc1 is bits 32 and 33 of fsr (64 bit only) #fcc2 is bits 34 and 35 of fsr (64 bit only) #fcc3 is bits 36 and 37 of fsr (64 bit only) #model these as separate 1-byte varnodes define register offset=0x5008 size=1 [ fcc0 fcc1 fcc2 fcc3 ]; define register offset=0x5010 size=1 [ didrestore ]; define register offset=0x5020 size=1 [ DECOMPILE_MODE ]; # Fake register define register offset=0x6000 size=$(SIZE) [ TPC1 TPC2 TPC3 TPC4 TNPC1 TNPC2 TNPC3 TNPC4 TSTATE1 TSTATE2 TSTATE3 TSTATE4 TT1 TT2 TT3 TT4 TCK TBA PSTATE TL PIL CWP CANSAVE CANRESTORE CLEANWIN OTHERWIN WSTATE FQ VER GL ]; define register offset=0x7000 size=$(SIZE) [ HPSTATE1 HPSTATE2 HPSTATE3 HPSTATE4 HTSTATE1 HTSTATE2 HTSTATE3 HTSTATE4 RESV2_1 RESV2_2 RESV2_3 RESV2_4 HINTP1 HINTP2 HINTP3 HINTP4 RESV4_1 RESV4_2 RESV4_3 RESV4_4 HTBA1 HTBA2 HTBA3 HTBA4 HVER1 HVER2 HVER3 HVER4 # TODO: actually RESV 6 - 29 registers... RESV30_1 RESV30_2 RESV30_3 RESV30_4 HSTICK_CMPR1 HSTICK_CMPR2 HSTICK_CMPR3 HSTICK_CMPR4 ]; # A window is 24 registers (96 or 192 bytes), most processors have 7 or 8. (g0->g7,o0->o7,l0->o7,i0->i7) # When the window is overflowed the data must be purged to some backup memory, via user # supplied function attached to a signal handler. # When the window is underflowed the data must be read from some backup memory, via user # supplied function attached to a signal handler. # There are 2 basic strategies we figured for this. # One, create a bank of register space and read and write to it in a way that simulates # how the sparc would really work, but the symbolic names become indexes. # Two, save and restore logic does all the work. # window index is ((CWP+1)%NWINDOWS) # CWP is an index from 0 to N of the windows. # Size of CWP is implementation dependent (must be > 5 bits). # inputs i0 i1 i2 i3 i4 i5 fp i7 # locals l0 l1 l2 l3 l4 l5 l6 l7 # output o0 o1 o2 o3 o4 o5 sp o7 # fp w016 w036 w126 w236 w316 w336 w416 w436 w516 w536 # sp w036 w126 w236 w316 w336 w416 w436 w616 w536 # i7 w017 w037 w127 w217 w237 w327 w417 # o7 w037 w127 w217 w237 w327 w417 define register offset=0x8000 size=$(SIZE) [ w010 w011 w012 w013 w014 w015 w016 w017 w020 w021 w022 w023 w024 w025 w026 w027 w030 w031 w032 w033 w034 w035 w036 w037 w110 w111 w112 w113 w114 w115 w116 w117 w120 w121 w122 w123 w124 w125 w126 w127 w130 w131 w132 w133 w134 w135 w136 w137 w210 w211 w212 w213 w214 w215 w216 w217 w220 w221 w222 w223 w224 w225 w226 w227 w230 w231 w232 w233 w234 w235 w236 w237 w310 w311 w312 w313 w314 w315 w316 w317 w320 w321 w322 w323 w324 w325 w326 w327 w330 w331 w332 w333 w334 w335 w336 w337 w410 w411 w412 w413 w414 w415 w416 w417 w420 w421 w422 w423 w424 w425 w426 w427 w430 w431 w432 w433 w434 w435 w436 w437 w510 w511 w512 w513 w514 w515 w516 w517 w520 w521 w522 w523 w524 w525 w526 w527 w530 w531 w532 w533 w534 w535 w536 w537 w610 w611 w612 w613 w614 w615 w616 w617 w620 w621 w622 w623 w624 w625 w626 w627 w630 w631 w632 w633 w634 w635 w636 w637 w710 w711 w712 w713 w714 w715 w716 w717 w720 w721 w722 w723 w724 w725 w726 w727 w730 w731 w732 w733 w734 w735 w736 w737 ]; # Floating-point registers define register offset=0x2000 size=4 [ fs0 fs1 fs2 fs3 fs4 fs5 fs6 fs7 fs8 fs9 fs10 fs11 fs12 fs13 fs14 fs15 fs16 fs17 fs18 fs19 fs20 fs21 fs22 fs23 fs24 fs25 fs26 fs27 fs28 fs29 fs30 fs31 ]; define register offset=0x2000 size=8 [ fd0 fd2 fd4 fd6 fd8 fd10 fd12 fd14 fd16 fd18 fd20 fd22 fd24 fd26 fd28 fd30 fd32 fd34 fd36 fd38 fd40 fd42 fd44 fd46 fd48 fd50 fd52 fd54 fd56 fd58 fd60 fd62 ]; define register offset=0x2000 size=16 [ fq0 fq4 fq8 fq12 fq16 fq20 fq24 fq28 fq32 fq36 fq40 fq44 fq48 fq52 fq56 fq60 ]; define pcodeop segment; define pcodeop sw_trap; define pcodeop reset; define token instr(32) op = (30,31) disp30 = ( 0,29) signed udisp22 = ( 0,21) disp22 = ( 0,21) signed disp19 = ( 0,18) signed d16lo = ( 0,13) d16hi = (20,21) signed op2 = (22,24) a = (29,29) fpc = (27,29) cond = (25,28) cond4 = (14,17) rcond2 = (25,27) cc0 = (20,20) cc1 = (21,21) fccn = (20,21) fccn2 = (25,26) cc0_3 = (25,25) cc1_3 = (26,26) cc0_4 = (11,11) cc1_4 = (12,12) fccn_4 = (11,12) cc2_4 = (18,18) p = (19,19) rd = (25,29) rd_d = (25,29) rd_asr = (25,29) rd_zero = (25,29) fsrd = (25,29) fdrd = (25,29) fqrd = (25,29) prd = (25,29) op3 = (19,24) rs1 = (14,18) rs1_zero = (14,18) rs_asr = (14,18) prs1 = (14,18) fsrs1 = (14,18) fdrs1 = (14,18) fqrs1 = (14,18) i = (13,13) x = (12,12) rcond3 = (10,12) rs2 = ( 0, 4) rs2_zero = ( 0, 4) fsrs2 = ( 0, 4) fdrs2 = ( 0, 4) fqrs2 = ( 0, 4) shcnt32 = ( 0, 4) shcnt64 = ( 0, 5) simm13 = ( 0,12) signed simm11 = ( 0,10) signed simm10 = ( 0, 9) signed imm_asi = ( 5,12) cmask = ( 4, 6) mmask = ( 0, 3) opf = ( 5,13) opf5 = ( 5, 9) opf6 = ( 5,10) opf_cc = (11,13) opf_low = ( 5,10) opf_low_5_9 = ( 5,9) fcn = (25,29) swtrap = ( 0, 6) bit28 = (28,28) const22 = ( 0,21) bit13 = (13,13) bit18 = (18,18) ; attach variables [ rd rs1 rs2 ] [ g0 g1 g2 g3 g4 g5 g6 g7 o0 o1 o2 o3 o4 o5 sp o7 l0 l1 l2 l3 l4 l5 l6 l7 i0 i1 i2 i3 i4 i5 fp i7 ]; @if SIZE=="4" # The ldd, ldda, std, and stda insns access a pair of regs define register offset=0 size=8 [ g0_1 g2_3 g4_5 g6_7 o0_1 o2_3 o4_5 sp_7 l0_1 l2_3 l4_5 l6_7 i0_1 i2_3 i4_5 fp_7 ]; attach variables [ rd_d ] [ g0_1 _ g2_3 _ g4_5 _ g6_7 _ o0_1 _ o2_3 _ o4_5 _ sp_7 _ l0_1 _ l2_3 _ l4_5 _ l6_7 _ i0_1 _ i2_3 _ i4_5 _ fp_7 _ ]; @endif attach variables [ fccn fccn2 fccn_4 ] [ fcc0 fcc1 fcc2 fcc3 ]; #attach names [ rd rs1 rs2 ] [ "%g0" "%g1" "%g2" "%g3" "%g4" "%g5" "%g6" "%g7" # "%o0" "%o1" "%o2" "%o3" "%o4" "%o5" "%sp" "%o7" # "%l0" "%l1" "%l2" "%l3" "%l4" "%l5" "%l6" "%l7" # "%i0" "%i1" "%i2" "%i3" "%i4" "%i5" "%fp" "%i7" ]; # Window register table accessors =================================== @define NUMREGWINS 8 @define REGWINSZ 16 @define LOCALOFF 8 @define OUTOFF 16 # copy oN to iN # CWP++ macro save() { if (DECOMPILE_MODE) goto ; # Save inputs *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+0)*$(SIZE)) = i0; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+1)*$(SIZE)) = i1; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+2)*$(SIZE)) = i2; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+3)*$(SIZE)) = i3; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+4)*$(SIZE)) = i4; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+5)*$(SIZE)) = i5; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+6)*$(SIZE)) = fp; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+7)*$(SIZE)) = i7; # Save local *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+0+$(LOCALOFF))*$(SIZE)) = l0; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+1+$(LOCALOFF))*$(SIZE)) = l1; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+2+$(LOCALOFF))*$(SIZE)) = l3; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+3+$(LOCALOFF))*$(SIZE)) = l3; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+4+$(LOCALOFF))*$(SIZE)) = l4; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+5+$(LOCALOFF))*$(SIZE)) = l5; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+6+$(LOCALOFF))*$(SIZE)) = l6; *[register]:$(SIZE) (&w010 + (CWP:4*$(REGWINSZ)+7+$(LOCALOFF))*$(SIZE)) = l7; # what was outputs become inputs i0 = o0; i1 = o1; i2 = o2; i3 = o3; i4 = o4; i5 = o5; fp = sp; i7 = o7; # zero out locals l0 = 0; l1 = 0; l2 = 0; l3 = 0; l4 = 0; l5 = 0; l6 = 0; l7 = 0; CWP = CWP + 1; } # copy iN ot oN # CWP-- macro restore() { CWP = CWP - 1; # inputs once again become outputs o0 = i0; # API return value o1 = i1; o2 = i2; o3 = i3; o4 = i4; o5 = i5; sp = fp; o7 = i7; # address of CALLer address if (DECOMPILE_MODE) goto ; # restore original inputs i0 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+0)*$(SIZE))); i1 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+1)*$(SIZE))); i2 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+2)*$(SIZE))); i3 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+3)*$(SIZE))); i4 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+4)*$(SIZE))); i5 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+5)*$(SIZE))); fp = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+6)*$(SIZE))); i7 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+7)*$(SIZE))); # address of CALLer address # restore original locals l0 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+0+$(LOCALOFF))*$(SIZE))); l1 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+1+$(LOCALOFF))*$(SIZE))); l2 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+2+$(LOCALOFF))*$(SIZE))); l3 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+3+$(LOCALOFF))*$(SIZE))); l4 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+4+$(LOCALOFF))*$(SIZE))); l5 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+5+$(LOCALOFF))*$(SIZE))); l6 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+6+$(LOCALOFF))*$(SIZE))); l7 = *[register]:$(SIZE) ((&w010) + ((CWP:4*$(REGWINSZ)+6+$(LOCALOFF))*$(SIZE))); } #Register g0 in Sparc is always 0 #There are special cases for source operands RS1 and RS2 which just return the constant 0 when the #specified register is g0 RS1: rs1 is rs1 & rs1_zero=0 { export 0:$(SIZE); } RS1: rs1 is rs1 { export rs1; } RS2: rs2 is rs2 & rs2_zero=0 { export 0:$(SIZE); } RS2: rs2 is rs2 { export rs2; } #For the destination operand RD, we export a temporary varnode with value 0. #This is because writes to g0 are allowed, but they have no visible effect (see the Sparc manual). #This way the value of g0 won't appear to change when using the pcode emulator. # RD: rd is rd & rd_zero=0 { local tmp:$(SIZE) = 0; export tmp; } # didrestore is picked up by call instruction only # this will cause any instruction that assigns to the o7 return address register # in the delay slot of a call instruction to turn the call into a call/return # RD: rd is rd & rd_d=15 { didrestore = 1; export rd; } RD: rd is rd { export rd; } regorimm: RS2 is i=0 & RS2 { export RS2; } regorimm: simm13 is i=1 & simm13 { export *[const]:$(SIZE) simm13; } regorimm10: RS2 is i=0 & RS2 { export RS2; } regorimm10: simm10 is i=1 & simm10 { export *[const]:$(SIZE) simm10; } regorimm11: RS2 is i=0 & RS2 { export RS2; } regorimm11: simm11 is i=1 & simm11 { export *[const]:$(SIZE) simm11; } reg_or_shcnt: RS2 is i=0 & RS2 & rs2=0 { export 0:1; } reg_or_shcnt: RS2 is i=0 & x=0 & RS2 { tmp:1=RS2:1; tmp = tmp & 0x1f; export tmp; } reg_or_shcnt: RS2 is i=0 & x=1 & RS2 { tmp:1=RS2:1; tmp = tmp & 0x3f; export tmp; } reg_or_shcnt: shcnt32 is i=1 & x=0 & shcnt32 { export *[const]:1 shcnt32; } reg_or_shcnt: shcnt64 is i=1 & x=1 & shcnt64 { export *[const]:1 shcnt64; } ea: [regorimm] is rs1=0 & regorimm { export regorimm; } # special case g0=zero ea: [RS1+regorimm] is RS1 & regorimm { local tmp = RS1+regorimm; export tmp; } ea: [RS1] is RS1 & i=1 & simm13=0x0 { export RS1; } #special case when adding zero retea: regorimm is rs1=0 & i=0 & regorimm { local tmp = regorimm; export tmp; } retea: regorimm is rs1=0 & i=1 & regorimm { export *:$(SIZE) regorimm; } retea: RS1+regorimm is RS1 & regorimm { local tmp = RS1+regorimm; export tmp; } retea: RS1 is RS1 & i=1 & simm13=0x0 { local tmp = RS1; export tmp; } #special case when adding zero retea: is rs1 & rs1_zero=31 & i=1 & simm13=0x8 { local tmp = rs1 + 0x8; export tmp; } # typical return from CALL instruction (suppress display) ea_alt: [RS1+RS2] imm_asi is i=0 & RS1 & RS2 & imm_asi { local tmp1:1 = imm_asi; local tmp = RS1+RS2+segment(tmp1); export tmp; } ea_alt: [RS1+simm13] %ASI is i=1 & RS1 & simm13 & ASI { local tmp = RS1+simm13+segment(ASI); export tmp; } ea_alt: [RS1] %ASI is i=1 & RS1 & simm13=0x0 & ASI { local tmp = RS1+segment(ASI); export tmp; } #special case when adding zero macro addflags(op1,op2) { x_cf = carry(op1,op2); x_vf = scarry(op1,op2); local tmp1 = op1:4; local tmp2 = op2:4; i_cf = carry(tmp1,tmp2); i_vf = scarry(tmp1,tmp2); } macro taddflags(op1,op2) { addflags(op1,op2); i_vf = i_vf || ((op1 & 0x3) != 0) || ((op2 & 0x3) != 0); } macro addflags32(op1,op2) { i_cf = carry(op1,op2); i_vf = scarry(op1,op2); } macro addCarryFlags ( op1, op2 ) { local op1_low_32:4 = op1:4; local op2_low_32:4 = op2:4; local CFcopy_4:4 = zext(i_cf); local CFcopy:$(SIZE) = zext(i_cf); local result:$(SIZE) = op1 + op2; local result_low_32:4 = op1_low_32 + op2_low_32; i_cf = carry( op1_low_32, op2_low_32) || carry( result_low_32, CFcopy_4 ); x_cf = carry(op1,op2) || carry(result,CFcopy); i_vf = scarry( op1_low_32, op2_low_32) ^^ scarry( result_low_32, CFcopy_4 ); x_vf = scarry(op1, op2) ^^ scarry(result,CFcopy); } macro subCarryFlags ( op1, op2 ) { local op1_low_32:4 = op1:4; local op2_low_32:4 = op2:4; local CFcopy_4:4 = zext(i_cf); local CFcopy:$(SIZE) = zext(i_cf); local result:$(SIZE) = op1 - op2; local result_low_32:4 = op1_low_32 - op2_low_32; i_cf = (op1_low_32 < op2_low_32) || (result_low_32 < CFcopy_4); x_cf = (op1 < op2) || (result < CFcopy); i_vf = sborrow( op1_low_32, op2_low_32) ^^ sborrow( result_low_32, CFcopy_4); x_vf = sborrow(op1, op2) ^^ sborrow(result,CFcopy); } macro logicflags() { x_cf = 0; x_vf = 0; i_cf = 0; i_vf = 0; } macro subflags(op1,op2) { x_cf = op1 < op2; x_vf = sborrow(op1,op2); local tmp1 = op1:4; local tmp2 = op2:4; i_cf = tmp1 < tmp2; i_vf = sborrow(tmp1,tmp2); } macro tsubflags(op1,op2){ subflags(op1,op2); i_vf = i_vf || ((op1 & 0x3) != 0) || ((op2 & 0x3) != 0); } macro zeroflags(op1) { x_zf = (op1 == 0); x_nf = (op1 s< 0); local tmp1 = op1:4; i_zf = (tmp1 == 0); i_nf = (tmp1 s< 0); } macro packflags(ccr) { ccr = zext((x_nf << 7) | (x_zf << 6) | (x_vf << 5) | (x_cf << 4) | (i_nf << 3) | (i_zf << 2) | (i_vf << 1) | (i_cf << 0)); } macro unpackflags(ccr) { x_nf = (ccr & 0x80)!=0; x_zf = (ccr & 0x40)!=0; x_vf = (ccr & 0x20)!=0; x_cf = (ccr & 0x10)!=0; i_nf = (ccr & 0x8)!=0; i_zf = (ccr & 0x4)!=0; i_vf = (ccr & 0x2)!=0; i_cf = (ccr & 0x1)!=0; } # --------------- :add RS1,regorimm,RD is op=2 & RD & op3=0x0 & RS1 & regorimm {RD = RS1 + regorimm;} :addcc RS1,regorimm,RD is op=2 & RD & op3=0x10 & RS1 & regorimm { addflags(RS1,regorimm); local res:$(SIZE) = RS1 + regorimm; zeroflags(res); RD = res; } :addc RS1,regorimm,RD is op=2 & RD & op3=0x8 & RS1 & regorimm {RD = RS1 + regorimm + zext(i_cf);} :addccc RS1,regorimm,RD is op=2 & RD & op3=0x18 & RS1 & regorimm { local original_i_cf:$(SIZE) = zext(i_cf); addCarryFlags(RS1,regorimm); local res:$(SIZE) = RS1 + regorimm + original_i_cf; zeroflags(res); RD = res; } #----------------------- :and RS1,regorimm,RD is op=2 & RD & op3=0x1 & RS1 & regorimm {RD = RS1 & regorimm;} :andcc RS1,regorimm,RD is op=2 & RD & op3=0x11 & RS1 & regorimm { logicflags(); local res:$(SIZE) = RS1 & regorimm; zeroflags(res); RD = res; } :andn RS1,regorimm,RD is op=2 & RD & op3=0x5 & RS1 & regorimm {RD = RS1 & ~regorimm;} :andncc RS1,regorimm,RD is op=2 & RD & op3=0x15 & RS1 & regorimm { logicflags(); local res:$(SIZE) = RS1 & ~regorimm; zeroflags(res); RD = res; } :or RS1,regorimm,RD is op=2 & RD & op3=0x2 & RS1 & regorimm {RD = RS1 | regorimm;} :orcc RS1,regorimm,RD is op=2 & RD & op3=0x12 & RS1 & regorimm { logicflags(); local res:$(SIZE) = RS1 | regorimm; zeroflags(res); RD = res; } :orn RS1,regorimm,RD is op=2 & RD & op3=0x6 & RS1 & regorimm {RD = RS1 | ~regorimm;} :orncc RS1,regorimm,RD is op=2 & RD & op3=0x16 & RS1 & regorimm { logicflags(); local res:$(SIZE) = RS1 | ~regorimm; zeroflags(res); RD = res; } :xor RS1,regorimm,RD is op=2 & RD & op3=0x3 & RS1 & regorimm {RD = RS1 ^ regorimm;} :xorcc RS1,regorimm,RD is op=2 & RD & op3=0x13 & RS1 & regorimm { logicflags(); local res:$(SIZE) = RS1 ^ regorimm; zeroflags(res); RD = res; } :xnor RS1,regorimm,RD is op=2 & RD & op3=0x7 & RS1 & regorimm {RD = RS1 ^ ~regorimm;} :xnorcc RS1,regorimm,RD is op=2 & RD & op3=0x17 & RS1 & regorimm { logicflags(); local res:$(SIZE) = RS1 ^ ~regorimm; zeroflags(res); RD = res; } # --------------- :ldsb ea,RD is op=3 & RD & op3=0x09 & ea { RD = sext(*:1 ea); } :ldsh ea,RD is op=3 & RD & op3=0x0A & ea { RD = sext(*:2 ea); } :ldsw ea,RD is op=3 & RD & op3=0x08 & ea { RD = sext(*:4 ea); } :ldub ea,RD is op=3 & RD & op3=0x01 & ea { RD = zext(*:1 ea); } :lduh ea,RD is op=3 & RD & op3=0x02 & ea { RD = zext(*:2 ea); } :lduw ea,RD is op=3 & RD & op3=0x00 & ea { RD = zext(*:4 ea); } :ldx ea,RD is op=3 & RD & op3=0x0b & ea { RD = *:$(SIZE) ea; } @if SIZE=="8" :ldd ea,RD is op=3 & RD & op3=0x03 & ea { RD = *:$(SIZE) ea; } @else :ldd ea,RD is op=3 & RD & rd_d & op3=0x03 & ea { rd_d = *:8 ea; } @endif :ldsba ea_alt,RD is op=3 & RD & op3=0x19 & ea_alt { RD = sext(*:1 ea_alt); } :ldsha ea_alt,RD is op=3 & RD & op3=0x1a & ea_alt { RD = sext(*:2 ea_alt); } :ldswa ea_alt,RD is op=3 & RD & op3=0x18 & ea_alt { RD = sext(*:4 ea_alt); } :lduba ea_alt,RD is op=3 & RD & op3=0x11 & ea_alt { RD = zext(*:1 ea_alt); } :lduha ea_alt,RD is op=3 & RD & op3=0x12 & ea_alt { RD = zext(*:2 ea_alt); } :lduwa ea_alt,RD is op=3 & RD & op3=0x10 & ea_alt { RD = zext(*:4 ea_alt); } :ldxa ea_alt,RD is op=3 & RD & op3=0x1b & ea_alt { RD = *:$(SIZE) ea_alt; } :ldda ea_alt,RD is op=3 & RD & op3=0x13 & ea_alt { RD = *:$(SIZE) ea_alt; } #----------------- :stb RD,ea is op=3 & RD & op3=0x05 & ea { *ea = RD:1; } :sth RD,ea is op=3 & RD & op3=0x06 & ea { *ea = RD:2; } :stw RD,ea is op=3 & RD & op3=0x04 & ea { *ea = RD:4; } @if SIZE=="8" :stx RD,ea is op=3 & RD & op3=0x0e & ea { *ea = RD; } :std RD,ea is op=3 & RD & op3=0x07 & ea { *ea = RD; } @else # size = 4, but this extended store instruction needs to write 8 bytes :stx RD,ea is op=3 & RD & rd_d & op3=0x0e & ea { *ea = rd_d; } :std RD,ea is op=3 & RD & rd_d & op3=0x07 & ea { *ea = rd_d; } @endif :clrx ea is op=3 & rd=0 & op3=0x0e & ea { *ea = 0:8; } :clrd ea is op=3 & rd=0 & op3=0x07 & ea { *ea = 0:8; } :stba RD,ea_alt is op=3 & RD & op3=0x15 & ea_alt { *ea_alt = RD:1; } :stha RD,ea_alt is op=3 & RD & op3=0x16 & ea_alt { *ea_alt = RD:2; } :stwa RD,ea_alt is op=3 & RD & op3=0x14 & ea_alt { *ea_alt = RD:4; } :stxa RD,ea_alt is op=3 & RD & op3=0x1e & ea_alt { *ea_alt = RD; } :stda RD,ea_alt is op=3 & RD & op3=0x17 & ea_alt { *ea_alt = RD; } # --------------- :sub RS1,regorimm,RD is op=2 & RD & op3=0x4 & RS1 & regorimm { RD = RS1 - regorimm; } :subcc RS1,regorimm,RD is op=2 & RD & op3=0x14 & RS1 & regorimm { subflags(RS1,regorimm); local res:$(SIZE) = RS1 - regorimm; zeroflags(res); RD = res; } :subc RS1,regorimm,RD is op=2 & RD & op3=0xc & RS1 & regorimm { RD = RS1 - regorimm - zext(i_cf); } :subccc RS1,regorimm,RD is op=2 & RD & op3=0x1c & RS1 & regorimm { local original_cf:$(SIZE) = zext(i_cf); subCarryFlags(RS1,regorimm); local res:$(SIZE) = RS1 - regorimm - original_cf; zeroflags(res); RD = res; } # --------------- :nop is op=0x0 & rd=0x0 & op2=0x4 & disp22=0x0 { } # ---------------COMPARES :cmp RS1,regorimm is op=0x2 & rd=0x0 & op3=0x14 & RS1 & regorimm { subflags(RS1,regorimm); local res:$(SIZE) = RS1 - regorimm; zeroflags(res); } # ---------------MOVES :mov regorimm,RD is op=2 & RD & op3=0x2 & rs1=0 & regorimm {RD = regorimm;} # This will not work until the rs1 field in a token can be used without being # part of the display portion below RCOND: "z" is rcond3=1 & RS1 { tmp:1 = (RS1 == 0); export tmp; } RCOND: "lez" is rcond3=2 & RS1 { tmp:1 = (RS1 s<= 0); export tmp; } RCOND: "lz" is rcond3=3 & RS1 { tmp:1 = (RS1 s< 0); export tmp; } RCOND: "nz" is rcond3=5 & RS1 { tmp:1 = (RS1 != 0); export tmp; } RCOND: "gz" is rcond3=6 & RS1 { tmp:1 = (RS1 s> 0); export tmp; } RCOND: "gez" is rcond3=7 & RS1 { tmp:1 = (RS1 s>= 0); export tmp; } :movr^RCOND RS1,regorimm10,RD is op=0x2 & RD & op3=0x2f & RCOND & regorimm10 & RS1 { if !RCOND goto ; RD = regorimm10; } #:movrz RS1,regorimm10,rd is op=0x2 & rd & op3=0x2f & RS1 & rcond3=1 & regorimm10 { if (RS1 == 0) goto inst_next; rd = regorimm10; } #:movrlez RS1,regorimm10,rd is op=0x2 & rd & op3=0x2f & RS1 & rcond3=2 & regorimm10 { if (RS1 s<= 0) goto inst_next; rd = regorimm10; } #:movrlz RS1,regorimm10,rd is op=0x2 & rd & op3=0x2f & RS1 & rcond3=3 & regorimm10 { if (RS1 s< 0) goto inst_next; rd = regorimm10; } #:movrnz RS1,regorimm10,rd is op=0x2 & rd & op3=0x2f & RS1 & rcond3=5 & regorimm10 { if (RS1 != 0) goto inst_next; rd = regorimm10; } #:movrgz RS1,regorimm10,rd is op=0x2 & rd & op3=0x2f & RS1 & rcond3=6 & regorimm10 { if (RS1 s> 0) goto inst_next; rd = regorimm10; } #:movrgez RS1,regorimm10,rd is op=0x2 & rd & op3=0x2f & RS1 & rcond3=7 & regorimm10 { if (RS1 s>= 0) goto inst_next; rd = regorimm10; } m_icc: "a" is cond4=0x8 { tmp:1=1; export tmp; } m_icc: "n" is cond4=0x0 { tmp:1=0; export tmp; } m_icc: "ne" is cond4=0x9 { tmp:1=!i_zf; export tmp; } m_icc: "e" is cond4=0x1 { tmp:1=i_zf; export tmp; } m_icc: "g" is cond4=0xa { tmp:1=!(i_zf || (i_nf ^^ i_vf)); export tmp; } m_icc: "le" is cond4=0x2 { tmp:1=(i_zf || (i_nf ^^ i_vf)); export tmp; } m_icc: "ge" is cond4=0xb { tmp:1=!(i_nf ^^ i_vf); export tmp; } m_icc: "l" is cond4=0x3 { tmp:1=(i_nf ^^ i_vf); export tmp; } m_icc: "gu" is cond4=0xc { tmp:1=!(i_cf || i_zf); export tmp; } m_icc: "leu" is cond4=0x4 { tmp:1=(i_cf || i_zf); export tmp; } m_icc: "cc" is cond4=0xd { tmp:1=!(i_cf); export tmp; } m_icc: "cs" is cond4=0x5 { export i_cf; } m_icc: "pos" is cond4=0xe { tmp:1=!(i_nf); export tmp; } m_icc: "neg" is cond4=0x6 { export i_nf; } m_icc: "vc" is cond4=0xf { tmp:1=!(i_vf); export tmp; } m_icc: "vs" is cond4=0x7 { export i_vf; } m_xcc: "a" is cond4=0x8 { tmp:1=1; export tmp; } m_xcc: "n" is cond4=0x0 { tmp:1=0; export tmp; } m_xcc: "ne" is cond4=0x9 { tmp:1=!x_zf; export tmp; } m_xcc: "e" is cond4=0x1 { tmp:1=x_zf; export tmp; } m_xcc: "g" is cond4=0xa { tmp:1=!(x_zf || (x_nf ^^ x_vf)); export tmp; } m_xcc: "le" is cond4=0x2 { tmp:1=(x_zf || (x_nf ^^ x_vf)); export tmp; } m_xcc: "ge" is cond4=0xb { tmp:1=!(x_nf ^^ x_vf); export tmp; } m_xcc: "l" is cond4=0x3 { tmp:1=(x_nf ^^ x_vf); export tmp; } m_xcc: "gu" is cond4=0xc { tmp:1=!(x_cf || x_zf); export tmp; } m_xcc: "leu" is cond4=0x4 { tmp:1=(x_cf || x_zf); export tmp; } m_xcc: "cc" is cond4=0xd { tmp:1=!(x_cf); export tmp; } m_xcc: "cs" is cond4=0x5 { export x_cf; } m_xcc: "pos" is cond4=0xe { tmp:1=!(x_nf); export tmp; } m_xcc: "neg" is cond4=0x6 { export x_nf; } m_xcc: "vc" is cond4=0xf { tmp:1=!(x_vf); export tmp; } m_xcc: "vs" is cond4=0x7 { export x_vf; } m_cc:m_icc is cc2_4=1 & cc1_4=0 & cc0_4=0 & m_icc { export m_icc; } m_cc:m_xcc is cc2_4=1 & cc1_4=1 & cc0_4=0 & m_xcc { export m_xcc; } MICC: "%icc" is cc2_4=1 &cc1_4=0 { } MICC: "%xcc" is cc2_4=1 &cc1_4=1 { } #conditional integer moves with floating-point conditions defined in constructor :mov^fmfcc :mov^m_cc MICC,regorimm11,RD is op=0x2 & RD & op3=0x2c & bit18=1 & m_cc & MICC & regorimm11 { if (!m_cc) goto ; RD = regorimm11; } # ---------------BRANCHES icc: "a" is cond=0x8 { tmp:1=1; export tmp; } icc: "ne" is cond=0x9 { tmp:1=!i_zf; export tmp; } icc: "e" is cond=0x1 { tmp:1=i_zf; export tmp; } icc: "g" is cond=0xa { tmp:1=!(i_zf || (i_nf ^^ i_vf)); export tmp; } icc: "le" is cond=0x2 { tmp:1=(i_zf || (i_nf ^^ i_vf)); export tmp; } icc: "ge" is cond=0xb { tmp:1=!(i_nf ^^ i_vf); export tmp; } icc: "l" is cond=0x3 { tmp:1=(i_nf ^^ i_vf); export tmp; } icc: "gu" is cond=0xc { tmp:1=!(i_cf || i_zf); export tmp; } icc: "leu" is cond=0x4 { tmp:1=(i_cf || i_zf); export tmp; } icc: "cc" is cond=0xd { tmp:1=!(i_cf); export tmp; } icc: "cs" is cond=0x5 { export i_cf; } icc: "pos" is cond=0xe { tmp:1=!(i_nf); export tmp; } icc: "neg" is cond=0x6 { export i_nf; } icc: "vc" is cond=0xf { tmp:1=!(i_vf); export tmp; } icc: "vs" is cond=0x7 { export i_vf; } xcc: "a" is cond=0x8 { tmp:1=1; export tmp; } xcc: "ne" is cond=0x9 { tmp:1=!x_zf; export tmp; } xcc: "e" is cond=0x1 { tmp:1=x_zf; export tmp; } xcc: "g" is cond=0xa { tmp:1=!(x_zf || (x_nf ^^ x_vf)); export tmp; } xcc: "le" is cond=0x2 { tmp:1=(x_zf || (x_nf ^^ x_vf)); export tmp; } xcc: "ge" is cond=0xb { tmp:1=!(x_nf ^^ x_vf); export tmp; } xcc: "l" is cond=0x3 { tmp:1=(x_nf ^^ x_vf); export tmp; } xcc: "gu" is cond=0xc { tmp:1=!(x_cf || x_zf); export tmp; } xcc: "leu" is cond=0x4 { tmp:1=(x_cf || x_zf); export tmp; } xcc: "cc" is cond=0xd { tmp:1=!(x_cf); export tmp; } xcc: "cs" is cond=0x5 { export x_cf; } xcc: "pos" is cond=0xe { tmp:1=!(x_nf); export tmp; } xcc: "neg" is cond=0x6 { export x_nf; } xcc: "vc" is cond=0xf { tmp:1=!(x_vf); export tmp; } xcc: "vs" is cond=0x7 { export x_vf; } cc: icc is cc1=0 & cc0=0 & icc { export icc; } cc: xcc is cc1=1 & cc0=0 & xcc { export xcc; } d16off: reloc is d16hi & d16lo [reloc = inst_start+4*((d16hi<<14) | d16lo);] { export *:$(SIZE) reloc; } predict: ",pt" is p=1 { } predict: ",pn" is p=0 { } :brz^predict RS1,d16off is op=0 & a=0 & bit28=0 & rcond2=0x1 & op2=0x3 & RS1 & d16off & predict { delayslot(1); if (RS1 == 0) goto d16off;} :brlez^predict RS1,d16off is op=0 & a=0 & bit28=0 & rcond2=0x2 & op2=0x3 & RS1 & d16off & predict { delayslot(1); if (RS1 s<= 0) goto d16off;} :brlz^predict RS1,d16off is op=0 & a=0 & bit28=0 & rcond2=0x3 & op2=0x3 & RS1 & d16off & predict { delayslot(1); if (RS1 s< 0) goto d16off;} :brnz^predict RS1,d16off is op=0 & a=0 & bit28=0 & rcond2=0x5 & op2=0x3 & RS1 & d16off & predict { delayslot(1); if (RS1 != 0) goto d16off;} :brgz^predict RS1,d16off is op=0 & a=0 & bit28=0 & rcond2=0x6 & op2=0x3 & RS1 & d16off & predict { delayslot(1); if (RS1 s> 0) goto d16off;} :brgez^predict RS1,d16off is op=0 & a=0 & bit28=0 & rcond2=0x7 & op2=0x3 & RS1 & d16off & predict { delayslot(1); if (RS1 s>= 0) goto d16off;} :brz^",a"^predict RS1,d16off is op=0 & a=1 & bit28=0 & rcond2=0x1 & op2=0x3 & RS1 & d16off & predict { if (RS1 != 0) goto inst_next; delayslot(1); goto d16off;} :brlez^",a"^predict RS1,d16off is op=0 & a=1 & bit28=0 & rcond2=0x2 & op2=0x3 & RS1 & d16off & predict { if (RS1 s> 0) goto inst_next; delayslot(1); goto d16off;} :brlz^",a"^predict RS1,d16off is op=0 & a=1 & bit28=0 & rcond2=0x3 & op2=0x3 & RS1 & d16off & predict { if (RS1 s>= 0) goto inst_next; delayslot(1); goto d16off;} :brnz^",a"^predict RS1,d16off is op=0 & a=1 & bit28=0 & rcond2=0x5 & op2=0x3 & RS1 & d16off & predict { if (RS1 == 0) goto inst_next; delayslot(1); goto d16off;} :brgz^",a"^predict RS1,d16off is op=0 & a=1 & bit28=0 & rcond2=0x6 & op2=0x3 & RS1 & d16off & predict { if (RS1 s<= 0) goto inst_next; delayslot(1); goto d16off;} :brgez^",a"^predict RS1,d16off is op=0 & a=1 & bit28=0 & rcond2=0x7 & op2=0x3 & RS1 & d16off & predict { if (RS1 s< 0) goto inst_next; delayslot(1); goto d16off;} BCC: "%icc" is cc0=0 & cc1=0 { } BCC: "%xcc" is cc0=0 & cc1=1 { } reloff: reloc is disp22 [reloc=inst_start+(4*disp22);] { export *:$(SIZE) reloc; } reloff64: reloc is disp19 [reloc=inst_start+(4*disp19);] { export *:$(SIZE) reloc; } skip: reloc is epsilon [reloc=inst_start+8;] { export *:$(SIZE) reloc; } :ba reloff is op=0x0 & op2=0x2 & a=0x0 & cond=0x8 & reloff { delayslot(1); goto reloff; } :"ba,a" reloff is op=0x0 & op2=0x2 & a=0x1 & cond=0x8 & reloff { goto reloff; } :bn reloff is op=0x0 & op2=0x2 & a=0x0 & cond=0x0 & reloff { } :"bn,a" reloff,skip is op=0x0 & op2=0x2 & a=0x1 & cond=0x0 & reloff & skip { goto skip; } :b^icc reloff is op=0x0 & op2=0x2 & a=0x0 & icc & reloff { delayslot(1); if (icc) goto reloff; } :b^icc^",a" reloff is op=0x0 & op2=0x2 & a=0x1 & icc & reloff { if (!icc) goto inst_next; delayslot(1); goto reloff; } :bpa^predict reloff64 is op=0x0 & op2=0x1 & a=0x0 & cond=0x8 & reloff64 & predict { delayslot(1); goto reloff64; } :"bpa,a"^predict reloff64 is op=0x0 & op2=0x1 & a=0x1 & cond=0x8 & reloff64 & predict { goto reloff64; } :bpn^predict reloff64 is op=0x0 & op2=0x1 & a=0x0 & cond=0x0 & reloff64 & predict { } :"bpn,a"^predict reloff64,skip is op=0x0 & op2=0x1 & a=0x1 & cond=0x0 & reloff64 & predict & skip { goto skip; } :bp^cc^predict BCC,reloff64 is op=0x0 & op2=0x1 & a=0x0 & cond & cc & reloff64 & predict & BCC { delayslot(1); if (cc) goto reloff64; } :bp^cc^",a"^predict BCC,reloff64 is op=0x0 & op2=0x1 & a=0x1 & cond & cc & reloff64 & predict & BCC { if (!cc) goto inst_next; delayslot(1); goto reloff64; } #:br^cc^predict reloff64 is op=0x0 & a=0x0 & op2=0x3 & cc & reloff64 & predict { delayslot(1); if (cc) goto reloff64; } #:br^cc^",a"^predict reloff64 is op=0x0 & a=0x1 & op2=0x3 & cc & reloff64 & predict { if (!cc) goto inst_next; delayslot(1); goto reloff64; } #---------------CALL callreloff: reloc is disp30 [reloc=inst_start+4*disp30;] { export *:$(SIZE) reloc; } :call callreloff is op=0x1 & callreloff { o7=inst_start; didrestore=0; delayslot(1); call callreloff; if (didrestore==0) goto inst_next; return [o7]; } # changing to jump for PIC call if destination is right below this one. :call callreloff is op=0x1 & disp30=2 & callreloff { o7=inst_start; delayslot(1); goto callreloff; } #----------------RET #----------------MULTIPLY AND DIVIDE 64 bit :mulx RS1,regorimm,RD is op=2 & RD & op3=0x09 & RS1 & regorimm {RD = RS1 * regorimm;} :sdivx RS1,regorimm,RD is op=2 & RD & op3=0x2d & RS1 & regorimm {RD = RS1 s/ regorimm;} :udivx RS1,regorimm,RD is op=2 & RD & op3=0x0d & RS1 & regorimm {RD = RS1 / regorimm;} #----------------MULTIPLY 32 bit :umul RS1,regorimm,RD is op=2 & RD & op3=0x0a & RS1 & regorimm { local res:8 = zext(RS1:4) * zext(regorimm:4); Y = zext(res[32,32]); @if SIZE=="4" RD = res:4; # 32 bit only gets lower 4 bytes @else RD = res; # 64 bit gets full product @endif } :smul RS1,regorimm,RD is op=2 & RD & op3=0x0b & RS1 & regorimm { local res:8 = sext(RS1:4) * sext(regorimm:4); Y = zext(res[32,32]); @if SIZE=="4" RD = res:4; # 32 bit only gets lower 4 bytes @else RD = res; # 64 bit gets full product @endif } :umulcc RS1,regorimm,RD is op=2 & RD & op3=0x1a & RS1 & regorimm { local res:8 = zext(RS1:4) * zext(regorimm:4); Y = zext(res[32,32]); zeroflags(res:4); @if SIZE=="4" RD = res:4; # 32 bit only gets lower 4 bytes @else RD = res; # 64 bit gets full product @endif logicflags(); } :smulcc RS1,regorimm,RD is op=2 & RD & op3=0x1b & RS1 & regorimm { local res:8 = sext(RS1:4) * sext(regorimm:4); Y = zext(res[32,32]); zeroflags(res:4); @if SIZE=="4" RD = res:4; # 32 bit only gets lower 4 bytes @else RD = res; # 64 bit gets full product @endif logicflags(); } #----------------MULTIPLY Step :mulscc RS1,regorimm,RD is op=2 & RD & op3=0x24 & RS1 & regorimm { local ccr:4 = zext(i_nf ^^ i_vf); ccr = ccr << 31; local shifted:4 = RS1:4 >> 1; shifted = shifted | ccr; local addend:4 = 0:4; if ((Y & 0x1) == 0) goto ; addend = regorimm:4; local sum:4 = addend + shifted; addflags32(addend,shifted); #upper 32 bits of RD are undefined according to the manual local tbit:4 = (RS1:4 & 0x1:4) << 31; local res:$(SIZE) = zext(sum); zeroflags(res); RD = res; #Y is 64 bits in Sparc 9 but the high 32 are fixed to 0 Y = zext((Y:4 >> 1:4) | tbit); } #----------------DIVIDE (64-bit / 32-bit) # NB- Beware, the plus + operator has higher precedence than shift << # (These are Java rules. C rules have shift and + at the same level, so left to right) :udiv RS1,regorimm,RD is op=2 & RD & op3=0x0e & RS1 & regorimm { numerator:8 = (zext(Y) << 32) + zext(RS1:4); denom:8 = zext(regorimm:4); local res:8 = numerator / denom; RD = zext(res:4); } :sdiv RS1,regorimm,RD is op=2 & RD & op3=0x0f & RS1 & regorimm { numerator:8 = (sext(Y) << 32) + zext(RS1:4); denom:8 = sext(regorimm:4); local res:8 = numerator s/ denom; RD = sext(res:4); } :udivcc RS1,regorimm,RD is op=2 & RD & op3=0x1e & RS1 & regorimm { numerator:8 = (zext(Y) << 32) + zext(RS1:4); denom:8 = zext(regorimm:4); local res:8 = numerator / denom; zeroflags(res:4); RD = zext(res:4); i_vf = res > 0xffffffff; i_cf = 0; x_vf = 0; x_cf = 0; } :sdivcc RS1,regorimm,RD is op=2 & RD & op3=0x1f & RS1 & regorimm { numerator:8 = (sext(Y) << 32) + (zext(RS1) & 0xffffffff); denom:8 = sext(regorimm:4); local res:8 = numerator s/ denom; zeroflags(res:4); RD = sext(res:4); i_vf = (res s>= 0x80000000) || (res s<= -0x7ffffffff); i_cf = 0; x_vf = 0; x_cf = 0; } #---------------SHIFT :sll RS1,reg_or_shcnt,RD is op=0x2 & RD & op3=0x25 & x=0 & RS1 & reg_or_shcnt { RD=RS1<>reg_or_shcnt; } :sllx RS1,reg_or_shcnt,RD is op=0x2 & RD & op3=0x25 & x=1 & RS1 & reg_or_shcnt { RD=RS1<>reg_or_shcnt; } :sra RS1,reg_or_shcnt,RD is op=0x2 & RD & op3=0x27 & x=0 & RS1 & reg_or_shcnt { tmp:4=RS1:4; RD=sext(tmp s>> reg_or_shcnt); } :srax RS1,reg_or_shcnt,RD is op=0x2 & RD & op3=0x27 & x=1 & RS1 & reg_or_shcnt { RD=RS1 s>> reg_or_shcnt; } # ASR read registers (some ASR #s not permitted for rd: 1, 7..15, other #s handled by rd: 3, 5, 6) attach variables [ rs_asr ] [ Y _ CCR _ TICK _ _ _ _ _ _ _ _ _ _ _ PCR PIC asr18 GSR SOFTINT_SET SOFTINT_CLR SOFTINT TICK_CMPR STICK STICK_CMPR asr26 asr27 asr28 asr29 asr30 asr31 ]; # ASR read registers rsASR: "%"^ASI is rs_asr=3 & ASI { tmp:$(SIZE) = zext(ASI); export tmp; } rsASR: "%"^PC is rs_asr=5 & PC { tmp:$(SIZE) = inst_start; export tmp; } rsASR: "%"^FPRS is rs_asr=6 & FPRS { tmp:$(SIZE) = zext(FPRS); export tmp; } rsASR: "%"^rs_asr is rs_asr { export rs_asr; } #---------------RD ASR special register (STBAR instruction must be defined after this instruction) :rd rsASR,RD is op=0x2 & RD & op3=0x28 & rsASR & i=0 { RD = rsASR; } :rd rsASR,RD is op=0x2 & RD & op3=0x28 & rs_asr=2 & rsASR & i=0 { packflags(RD); } # packed CCR register displayed # ASR write registers (some ASR #s not permitted for wr: 1, 4, 5, 7..15, other #s handled by wr: 2, 3, 6) attach variables [ rd_asr ] [ Y _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ PCR PIC asr18 GSR SOFTINT_SET SOFTINT_CLR SOFTINT TICK_CMPR STICK STICK_CMPR asr26 asr27 asr28 asr29 asr30 asr31 ]; # ASR write registers wrY: "%"^Y is rd_asr=0 & Y {export Y;} wrCCR: "%"^CCR is rd_asr=2 & CCR { export CCR; } # packed CCR register displayed wrASI: "%"^ASI is rd_asr=3 & ASI { export ASI; } wrFPRS: "%"^FPRS is rd_asr=6 & FPRS { export FPRS; } wrASR: "%"^rd_asr is rd_asr { export rd_asr; } #---------------WR ASR special register (SIR instruction must be defined after this instruction) # NOTE: the following ASR register numbers are not allowed: 1, 4, 5, 7..14 :wr RS1,regorimm,wrCCR is op=0x2 & RS1 & regorimm & op3=0x30 & rd_asr=2 & wrCCR { local tmp = RS1 ^ regorimm; unpackflags(tmp); } :wr RS1,regorimm,wrASI is op=0x2 & RS1 & regorimm & op3=0x30 & rd_asr=3 & wrASI { local tmp = RS1 ^ regorimm; wrASI = tmp:1; } :wr RS1,regorimm,wrFPRS is op=0x2 & RS1 & regorimm & op3=0x30 & rd_asr=6 & wrFPRS { local tmp = RS1 ^ regorimm; FPRS = tmp:1; } :wr RS1,regorimm,wrY is op=0x2 & RS1 & regorimm & op3=0x30 & rd_asr=0 & wrY { Y = zext(RS1:4 ^ regorimm:4); } :wr RS1,regorimm,wrASR is op=0x2 & RS1 & regorimm & op3=0x30 & wrASR { wrASR = RS1 ^ regorimm; } #---------------MISC sethidisp: "%hi("^hi^")" is udisp22 [hi=udisp22<<10;] { export *[const]:$(SIZE) hi; } :sethi sethidisp,RD is RD & op=0x0 & op2=0x4 & sethidisp { RD=sethidisp; } :popc regorimm, RD is op=0x2 & RD & op3=0x2e & rs1=0 & regorimm { RD = popcount(regorimm); } :save RS1,regorimm,RD is op=0x2 & RD & op3=0x3c & RS1 & regorimm { local tmp = RS1 + regorimm; save(); RD = tmp; } :restore RS1,regorimm,RD is op=0x2 & RD & op3=0x3d & RS1 & regorimm { local tmp = RS1 + regorimm; restore(); didrestore=1; RD = tmp; } :restore is op=0x2 & rd=0 & op3=0x3d { restore(); didrestore=1; } # FIXME 'jmpl' can have 'return' in the delayslot to return from a user trap handler # @see PR #6285 :return retea is op=0x2 & op3=0x39 & retea { build retea; restore(); delayslot(1); didrestore=1; return [retea]; } :jmpl retea,RD is op=0x2 & RD & op3=0x38 & retea { build retea; RD = inst_start; delayslot(1); goto [retea]; } # special case where link register is loaded with return address; functions as indirect call :jmpl retea,RD is op=0x2 & RD & prd=15 & op3=0x38 & retea { build retea; RD = inst_start; delayslot(1); call [retea]; } :jmpl retea is op=0x2 & rd=0 & op3=0x38 & retea { build retea; delayslot(1); goto [retea]; } # special case: when returning a structure, some software inserts unimpl instruction after every caller # jumps to linkRegister(o7)+12, instead of normal linkregister(o7)+8 :jmpl retea is op=0x2 & rd=0 & rs1=31 & op3=0x38 & i=1 & simm13=12 & retea { build retea; delayslot(1); return [retea]; } :jmpl retea is op=0x2 & rd=0 & rs1=15 & op3=0x38 & i=1 & simm13=12 & retea { build retea; delayslot(1); return [retea]; } # really jmpl instruction using linkRegister(o7)+8 :ret is op=0x2 & rd=0 & rs1=31 & op3=0x38 & i=1 & simm13=8 & retea { build retea; delayslot(1); return [retea]; } :retl is op=0x2 & rd=0 & rs1=15 & op3=0x38 & i=1 & simm13=8 & retea { build retea; delayslot(1); return [retea]; } casa_ea: [RS1]imm_asi is i=0 & RS1 & imm_asi { local tmp1:1 = imm_asi; local tmp = RS1+segment(tmp1); export tmp; } casa_ea: [RS1]%ASI is i=1 & RS1 & ASI { local tmp = RS1+segment(ASI); export tmp; } :casa casa_ea,RS2,RD is op=0x3 & RD & op3=0x3c & casa_ea & RS2 { local tmp:4=RD:4; local tmp2:$(SIZE) = RS2; local tmp_ea:$(SIZE) = casa_ea; RD=zext(*:4 tmp_ea); if ((tmp2 & 0xFFFFFFFF)!=RD) goto ; *:4 tmp_ea=tmp; } :casxa casa_ea,RS2,RD is op=0x3 & RD & op3=0x3e & casa_ea & RS2 { local tmp=RD; local tmp2:$(SIZE) = RS2; local tmp_ea:$(SIZE) = casa_ea; RD=*:$(SIZE) tmp_ea; if (tmp2!=RD) goto ; *:$(SIZE) tmp_ea=tmp; } :impdef1 is op=0x2 & op3=0x36 unimpl :impdef2 is op=0x2 & op3=0x37 unimpl :ldstub ea,RD is op=0x3 & RD & op3=0xd & ea { local tmp_ea:$(SIZE) = ea; RD = zext(*:1 tmp_ea); *:1 tmp_ea = 0xFF; } :ldstuba ea_alt,RD is op=0x3 & RD & op3=0x1d & ea_alt { local tmp_ea:$(SIZE) = ea_alt; RD = zext(*:1 tmp_ea); *:1 tmp_ea = 0xFF; } :swap ea,RD is op=0x3 & RD & op3=0xF & ea { local tmp_ea:$(SIZE) = ea; tmp:4=RD:4; RD = zext(*:4 tmp_ea); *:4 tmp_ea = tmp; } :swapa ea_alt,RD is op=0x3 & RD & op3=0x1F & ea_alt { local tmp_ea:$(SIZE) = ea_alt; tmp:4=RD:4; RD = zext(*:4 tmp_ea); *:4 tmp_ea = tmp; } :taddcc RS1,regorimm,RD is op=2 & RD & op3=0x20 & RS1 & regorimm { taddflags(RS1,regorimm); local res:$(SIZE) = RS1 + regorimm; zeroflags(res); RD = res; } :taddcctv RS1,regorimm,RD is op=2 & RD & op3=0x22 & RS1 & regorimm { taddflags(RS1,regorimm); local res:$(SIZE) = RS1 + regorimm; zeroflags(res); RD = res; } :tsubcc RS1,regorimm,RD is op=2 & RD & op3=0x21 & RS1 & regorimm { tsubflags(RS1,regorimm); local res:$(SIZE) = RS1 - regorimm; zeroflags(res); RD = res; } :tsubcctv RS1,regorimm,RD is op=2 & RD & op3=0x23 & RS1 & regorimm { tsubflags(RS1,regorimm); local res:$(SIZE) = RS1 - regorimm; zeroflags(res); RD = res; } tcc: icc is cc1_4=0 & cc0_4=0 & icc { export icc; } tcc: xcc is cc1_4=1 & cc0_4=0 & xcc { export xcc; } TICC: "%icc" is cc1_4=0 &cc0_4=0 { } TICC: "%xcc" is cc1_4=1 &cc0_4=0 { } trap: RS1+RS2 is i=0 & RS1 & RS2 { local tmp = ((RS1 + RS2) & 0x7F); export tmp; } trap: RS1+swtrap is i=1 & RS1 & swtrap { local tmp = ((RS1 + swtrap) & 0x7F); export tmp; } :t^tcc TICC, trap is op=0x2 & op3=0x3a & tcc & TICC & trap { if (!tcc) goto inst_next; local dest:$(SIZE) = sw_trap(trap); # trap should fall thru by default, can be over-ridden to a branch/call-return call [dest]; } membar_mask: is cmask & mmask { tmp:1 = (cmask << 4) | mmask; export tmp; } :membar membar_mask is op=0x2 & rd=0 & op3=0x28 & rs1=0xF & i=1 & membar_mask {} :stbar is op=0x2 & rd=0 & op3=0x28 & rs1=0xF & i=0 {} :sir simm13 is op=0x2 & rd=0xF & op3=0x30 & rs1=0x0 & i=1 & simm13 { reset(); } attach variables [ prs1 prd ] [ TPC1 TNPC1 TSTATE1 TT1 TCK TBA PSTATE TL PIL CWP CANSAVE CANRESTORE CLEANWIN OTHERWIN WSTATE FQ GL _ _ _ _ _ _ _ _ _ _ _ _ _ _ VER ]; tnpc: "%tnpc" is fcn { local reloc = zext(TL == 1)*&TNPC1 + zext(TL == 2)*&TNPC2 + zext(TL == 3)*&TNPC3 + zext(TL ==4)*&TNPC4; export reloc; } tpc: "%tpc" is fcn { local reloc = zext(TL == 1)*&TPC1 + zext(TL == 2)*&TPC2 + zext(TL == 3)*&TPC3 + zext(TL ==4)*&TPC4; export reloc; } tt: "%tt" is fcn { local tmp = zext(TL == 1)* &TT1 + zext(TL == 2)*&TT2 + zext(TL == 3)*&TT3 + zext(TL ==4)*&TT4; export tmp; } tstate: "%tstate" is fcn { local tmp = zext(TL == 1)* &TSTATE1 + zext(TL == 2)* &TSTATE2 + zext(TL == 3)* &TSTATE3 + zext(TL==4)* &TSTATE4; export tmp; } # prs1 is same bits as rs1 # prd is same bits as rd :rdpr prs1,RD is op=0x2 & RD & op3=0x2A & prs1 {RD = prs1; } :rdpr tpc,RD is op=0x2 & prs1 = 0 & RD & op3=0x2A & tpc { RD = *[register]:$(SIZE) tpc; } :rdpr tnpc,RD is op=0x2 & prs1 = 1 & RD & op3=0x2A & tnpc {RD = *[register]:$(SIZE) tnpc; } :rdpr tt,RD is op=0x2 & prs1 = 2 & RD & op3=0x2A & tt { RD = *[register]:$(SIZE) tt; } :rdpr tstate,RD is op=0x2 & prs1 = 3 & RD & op3=0x2A & tstate {RD = *[register]:$(SIZE) tstate;} :wrpr RS1,regorimm,prd is op=0x2 & prd & op3=0x32 & RS1 & regorimm {prd = RS1^regorimm; } :wrpr RS1,regorimm,tpc is op=0x2 & prd = 0 & op3=0x32 & RS1 & regorimm & tpc { *[register]:$(SIZE) tpc = RS1^regorimm; } :wrpr RS1,regorimm,tnpc is op=0x2 & prd = 1 & op3=0x32 & RS1 & regorimm & tnpc { *[register]:$(SIZE) tnpc = RS1^regorimm; } :wrpr RS1,regorimm,tstate is op=0x2 & prd = 2 & op3=0x32 & RS1 & regorimm & tstate { *[register]:$(SIZE) tstate = RS1^regorimm; } :wrpr RS1,regorimm,tt is op=0x2 & prd = 3 & op3=0x32 & RS1 & regorimm & tt { *[register]:$(SIZE) tt = RS1^regorimm; } hpstate: "%hpstate" is fcn { local reloc = zext(TL == 1)*&HPSTATE1 + zext(TL == 2)*&HPSTATE2 + zext(TL == 3)*&HPSTATE3 + zext(TL ==4)*&HPSTATE4; export reloc; } htstate: "%htstate" is fcn { local reloc = zext(TL == 1)*&HTSTATE1 + zext(TL == 2)*&HTSTATE2 + zext(TL == 3)*&HTSTATE3 + zext(TL ==4)*&HTSTATE4; export reloc; } hintp: "%hintp" is fcn { local reloc = zext(TL == 1)*&HINTP1 + zext(TL == 2)*&HINTP2 + zext(TL == 3)*&HINTP3 + zext(TL ==4)*&HINTP4; export reloc; } htba: "%htba" is fcn { local reloc = zext(TL == 1)*&HTBA1 + zext(TL == 2)*&HTBA2 + zext(TL == 3)*&HTBA3 + zext(TL ==4)*&HTBA4; export reloc; } hver: "%hver" is fcn { local reloc = zext(TL == 1)*&HVER1 + zext(TL == 2)*&HVER2 + zext(TL == 3)*&HVER3 + zext(TL ==4)*&HVER4; export reloc; } hsys_tick_cmpr: "%hstick_cmpr" is fcn { local reloc = zext(TL == 1)*&HSTICK_CMPR1 + zext(TL == 2)*&HSTICK_CMPR2 + zext(TL == 3)*&HSTICK_CMPR3 + zext(TL ==4)*&HSTICK_CMPR4; export reloc; } resv30: "%resv30" is fcn { local reloc = zext(TL == 1)*&RESV30_1 + zext(TL == 2)*&RESV30_2 + zext(TL == 3)*&RESV30_3 + zext(TL ==4)*&RESV30_4; export reloc; } :rdhpr hpstate,RD is op=0x2 & prs1 = 0 & RD & op3=0x29 & hpstate { RD = *[register]:$(SIZE) hpstate; } :rdhpr htstate,RD is op=0x2 & prs1 = 1 & RD & op3=0x29 & htstate { RD = *[register]:$(SIZE) htstate; } :rdhpr hintp,RD is op=0x2 & prs1 = 3 & RD & op3=0x29 & hintp { RD = *[register]:$(SIZE) hintp; } :rdhpr htba,RD is op=0x2 & prs1 = 5 & RD & op3=0x29 & htba { RD = *[register]:$(SIZE) htba; } :rdhpr hver,RD is op=0x2 & prs1 = 6 & RD & op3=0x29 & hver { RD = *[register]:$(SIZE) hver; } :rdhpr hsys_tick_cmpr,RD is op=0x2 & prs1 = 31 & RD & op3=0x29 & hsys_tick_cmpr { RD = *[register]:$(SIZE) hsys_tick_cmpr; } :rdhpr resv30,RD is op=0x2 & prs1 = 30 & RD & op3=0x29 & resv30 { RD = *[register]:$(SIZE) resv30; } :wrhpr RS1,regorimm,hpstate is op=0x2 & prd = 0 & op3=0x33 & RS1 & regorimm & hpstate { *[register]:$(SIZE) hpstate = RS1^regorimm; } :wrhpr RS1,regorimm,htstate is op=0x2 & prd = 1 & op3=0x33 & RS1 & regorimm & htstate { *[register]:$(SIZE) htstate = RS1^regorimm; } :wrhpr RS1,regorimm,hintp is op=0x2 & prd = 3 & op3=0x33 & RS1 & regorimm & hintp { *[register]:$(SIZE) hintp = RS1^regorimm; } :wrhpr RS1,regorimm,htba is op=0x2 & prd = 5 & op3=0x33 & RS1 & regorimm & htba { *[register]:$(SIZE) htba = RS1^regorimm; } :wrhpr RS1,regorimm,hsys_tick_cmpr is op=0x2 & prd = 31 & op3=0x33 & RS1 & regorimm & hsys_tick_cmpr { *[register]:$(SIZE) hsys_tick_cmpr = RS1^regorimm; } :wrhpr RS1,regorimm,resv30 is op=0x2 & prd = 30 & op3=0x33 & RS1 & regorimm & resv30 { *[register]:$(SIZE) resv30 = RS1^regorimm; } :done is op = 2 & fcn = 0 & op3 = 0x3e & tnpc {TL=TL-1;return [tnpc]; } :retry is op = 2 & fcn = 1 & op3 = 0x3e & tpc {TL=TL-1;return [tpc]; } :flush ea is op = 2 & op3 = 0x3b & ea {} :flushw is op = 2 & op3 = 0x2b & i = 0 {} define pcodeop IllegalInstructionTrap; :illtrap const22 is op = 0 & op2 = 0 & const22 { local dest:$(SIZE) = IllegalInstructionTrap(const22:4); # trap should not fall thru by default, can be over-ridden to a call goto [dest]; } :prefetch ea,fcn is op=3 & fcn & op3 = 0x2d & ea {} :prefetcha ea_alt,fcn is op=3 & fcn & op3 = 0x3d & ea_alt {} :restored is op = 2 & fcn=1 & op3 = 0x31 {} :saved is op = 2 & fcn = 0 & op3 = 0x31 {} attach variables [fsrd fsrs1 fsrs2 ] [ fs0 fs1 fs2 fs3 fs4 fs5 fs6 fs7 fs8 fs9 fs10 fs11 fs12 fs13 fs14 fs15 fs16 fs17 fs18 fs19 fs20 fs21 fs22 fs23 fs24 fs25 fs26 fs27 fs28 fs29 fs30 fs31 ]; attach variables [fdrd fdrs1 fdrs2 ] [ fd0 fd32 fd2 fd34 fd4 fd36 fd6 fd38 fd8 fd40 fd10 fd42 fd12 fd44 fd14 fd46 fd16 fd48 fd18 fd50 fd20 fd52 fd22 fd54 fd24 fd56 fd26 fd58 fd28 fd60 fd30 fd62 ]; attach variables [fqrd fqrs1 fqrs2 ] [ fq0 _ fq32 _ fq4 _ fq36 _ fq8 _ fq40 _ fq12 _ fq44 _ fq16 _ fq48 _ fq20 _ fq52 _ fq24 _ fq56 _ fq28 _ fq60 _]; define pcodeop ld; define pcodeop ldd; define pcodeop ldq; define pcodeop ldx; define pcodeop lda; define pcodeop ldda; define pcodeop ldqa; define pcodeop ld_fsr; define pcodeop ldx_fsr; define pcodeop st; define pcodeop std; define pcodeop stq; define pcodeop stx; define pcodeop st_fsr; define pcodeop stx_fsr; define pcodeop sta; define pcodeop stda; define pcodeop stqa; :fabss fsrs2,fsrd is op=0x2 & fsrd & op3=0x34 & opf=0x9 & fsrs2 { fsrd = abs(fsrs2); } :fabsd fdrs2,fdrd is op=0x2 & fdrd & op3=0x34 & opf=0xa & fdrs2 { fdrd = abs(fdrs2); } :fabsq fqrs2,fqrd is op=0x2 & fqrd & op3=0x34 & opf=0xb & fqrs2 { fqrd = abs(fqrs2); } :fadds fsrs1,fsrs2,fsrd is op=0x2 & fsrd & op3=0x34 & fsrs1 & opf=0x41 & fsrs2 { fsrd = fsrs1 f+ fsrs2; } :faddd fdrs1,fdrs2,fdrd is op=0x2 & fdrd & op3=0x34 & fdrs1 & opf=0x42 & fdrs2 { fdrd = fdrs1 f+ fdrs2; } :faddq fqrs1,fqrs2,fqrd is op=0x2 & fqrd & op3=0x34 & fqrs1 & opf=0x43 & fqrs2 { fqrd = fqrs1 f+ fqrs2; } :fdivs fsrs1,fsrs2,fsrd is op=0x2 & fsrd & op3=0x34 & fsrs1 & opf=0x4d & fsrs2 { fsrd = fsrs1 f/ fsrs2; } :fdivd fdrs1,fdrs2,fdrd is op=0x2 & fdrd & op3=0x34 & fdrs1 & opf=0x4e & fdrs2 { fdrd = fdrs1 f/ fdrs2; } :fdivq fqrs1,fqrs2,fqrd is op=0x2 & fqrd & op3=0x34 & fqrs1 & opf=0x4f & fqrs2 { fqrd = fqrs1 f/ fqrs2; } :fdmulq fdrs1,fdrs2,fqrd is op=0x2 & fqrd & op3=0x34 & fdrs1 & opf=0x6e & fdrs2 { tmp1:16 = float2float(fdrs1); tmp2:16 = float2float(fdrs2); fqrd = tmp1 f* tmp2; } :fsmuld fsrs1,fsrs2,fdrd is op=0x2 & fdrd & op3=0x34 & fsrs1 & opf=0x69 & fsrs2 { tmp1:8 = float2float(fsrs1); tmp2:8 = float2float(fsrs2); fdrd = tmp1 f* tmp2; } :fitos fsrs2,fsrd is op=0x2 & fsrd & op3=0x34 & opf=0xc4 & fsrs2 { fsrd = int2float(fsrs2); } :fitod fsrs2,fdrd is op=0x2 & fdrd & op3=0x34 & opf=0xc8 & fsrs2 { fdrd = int2float(fsrs2); } :fitoq fsrs2,fqrd is op=0x2 & fqrd & op3=0x34 & opf=0xcc & fsrs2 { fqrd = int2float(fsrs2); } :fmovs fsrs2,fsrd is op=0x2 & fsrd & op3=0x34 & opf=0x1 & fsrs2 { fsrd = fsrs2; } :fmovd fdrs2,fdrd is op=0x2 & fdrd & op3=0x34 & opf=0x2 & fdrs2 { fdrd = fdrs2; } :fmovq fqrs2,fqrd is op=0x2 & fqrd & op3=0x34 & opf=0x3 & fqrs2 { fqrd = fqrs2; } :fmuls fsrs1,fsrs2,fsrd is op=0x2 & fsrd & op3=0x34 & fsrs1 & opf=0x49 & fsrs2 { fsrd = fsrs1 f* fsrs2; } :fmuld fdrs1,fdrs2,fdrd is op=0x2 & fdrd & op3=0x34 & fdrs1 & opf=0x4a & fdrs2 { fdrd = fdrs1 f* fdrs2; } :fmulq fqrs1,fqrs2,fqrd is op=0x2 & fqrd & op3=0x34 & fqrs1 & opf=0x4b & fqrs2 { fqrd = fqrs1 f* fqrs2; } :fnegs fsrs2,fsrd is op=0x2 & fsrd & op3=0x34 & opf=0x5 & fsrs2 { fsrd = f- fsrs2; } :fnegd fdrs2,fdrd is op=0x2 & fdrd & op3=0x34 & opf=0x6 & fdrs2 { fdrd = f- fdrs2; } :fnegq fqrs2,fqrd is op=0x2 & fqrd & op3=0x34 & opf=0x7 & fqrs2 { fqrd = f- fqrs2; } :fsubs fsrs1,fsrs2,fsrd is op=0x2 & fsrd & op3=0x34 & fsrs1 & opf=0x45 & fsrs2 { fsrd = fsrs1 f- fsrs2; } :fsubd fdrs1,fdrs2,fdrd is op=0x2 & fdrd & op3=0x34 & fdrs1 & opf=0x46 & fdrs2 { fdrd = fdrs1 f- fdrs2; } :fsubq fqrs1,fqrs2,fqrd is op=0x2 & fqrd & op3=0x34 & fqrs1 & opf=0x47 & fqrs2 { fqrd = fqrs1 f- fqrs2; } :fxtos fdrs2,fsrd is op=0x2 & fsrd & op3=0x34 & opf=0x84 & fdrs2 { fsrd = int2float(fdrs2); } :fxtod fdrs2,fdrd is op=0x2 & fdrd & op3=0x34 & opf=0x88 & fdrs2 { fdrd = int2float(fdrs2); } :fxtoq fdrs2,fqrd is op=0x2 & fqrd & op3=0x34 & opf=0x8c & fdrs2 { fqrd = int2float(fdrs2); } :fstoi fsrs2,fsrd is op=0x2 & fsrd & op3=0x34 & opf=0xd1 & fsrs2 { fsrd = trunc(fsrs2); } :fdtoi fdrs2,fdrd is op=0x2 & fdrd & op3=0x34 & opf=0xd2 & fdrs2 { fdrd = trunc(fdrs2); } :fqtoi fqrs2,fqrd is op=0x2 & fqrd & op3=0x34 & opf=0xd3 & fqrs2 { fqrd = trunc(fqrs2); } :fstox fsrs2,fsrd is op=0x2 & fsrd & op3=0x34 & opf=0x81 & fsrs2 { fsrd = trunc(fsrs2); } :fdtox fdrs2,fdrd is op=0x2 & fdrd & op3=0x34 & opf=0x82 & fdrs2 { fdrd = trunc(fdrs2); } :fqtox fqrs2,fqrd is op=0x2 & fqrd & op3=0x34 & opf=0x83 & fqrs2 { fqrd = trunc(fqrs2); } :fstod fsrs2,fdrd is op=0x2 & fdrd & op3=0x34 & opf=0xc9 & fsrs2 { fdrd = float2float(fsrs2); } :fstoq fsrs2,fqrd is op=0x2 & fqrd & op3=0x34 & opf=0xcd & fsrs2 { fqrd = float2float(fsrs2); } :fdtos fdrs2,fsrd is op=0x2 & fsrd & op3=0x34 & opf=0xc6 & fdrs2 { fsrd = float2float(fdrs2); } :fdtoq fdrs2,fqrd is op=0x2 & fqrd & op3=0x34 & opf=0xce & fdrs2 { fqrd = float2float(fdrs2); } :fqtos fdrs2,fsrd is op=0x2 & fsrd & op3=0x34 & opf=0xc7 & fdrs2 { fsrd = float2float(fdrs2); } :fqtod fqrs2,fdrd is op=0x2 & fdrd & op3=0x34 & opf=0xcb & fqrs2 { fdrd = float2float(fqrs2); } :fsqrts fsrs2,fsrd is op=0x2 & fsrd & op3=0x34 & opf=0x29 & fsrs2 { fsrd = sqrt(fsrs2); } :fsqrtd fdrs2,fdrd is op=0x2 & fdrd & op3=0x34 & opf=0x2a & fdrs2 { fdrd = sqrt(fdrs2); } :fsqrtq fqrs2,fqrd is op=0x2 & fqrd & op3=0x34 & opf=0x2b & fqrs2 { fqrd = sqrt(fqrs2); } :ld ea,fsrd is op=3 & fsrd & op3=0x20 & ea { fsrd = *:4 ea; } :ldd ea,fdrd is op=3 & fdrd & op3=0x23 & ea { fdrd = *:8 ea; } :ldq ea,fqrd is op=3 & fqrd & op3=0x22 & ea { fqrd = *:16 ea; } :ld ea,"%fsr" is op=3 & op3=0x21 & rd=0 & ea { fsr = *:2 ea; } :ldx ea,"%fsr" is op=3 & op3=0x21 & rd=1 & ea { fsr = *:2 ea; } :lda ea_alt,fsrd is op=3 & fsrd & op3=0x30 & ea_alt { fsrd = *:4 ea_alt; } :ldda ea_alt,fdrd is op=3 & fdrd & op3=0x33 & ea_alt { fdrd = *:8 ea_alt; } :ldqa ea_alt,fqrd is op=3 & fqrd & op3=0x32 & ea_alt { fqrd = *:16 ea_alt; } :st fsrd,ea is op=3 & fsrd & op3=0x24 & ea { *ea = fsrd:4; } :std fdrd,ea is op=3 & fdrd & op3=0x27 & ea { *ea = fdrd:8; } :stq fqrd,ea is op=3 & fqrd & op3=0x26 & ea { *ea = fqrd:16; } :st "%fsr",ea is op=3 & op3=0x25 & rd=0 & ea { *ea = fsr; } :stx "%fsr",ea is op=3 & op3=0x25 & rd=1 & ea { *ea = fsr; } :sta fsrd,ea_alt is op=3 & fsrd & op3=0x34 & ea_alt { *ea_alt = fsrd:4; } :stda fdrd,ea_alt is op=3 & fdrd & op3=0x37 & ea_alt { *ea_alt = fdrd:8; } :stqa fqrd,ea_alt is op=3 & fqrd & op3=0x36 & ea_alt { *ea_alt = fqrd:16; } fcc0_or_fccn: is op2=6 { export fcc0; } fcc0_or_fccn: is op2=5 & fccn { export fccn; } fcc: "u" is cond=0x7 & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 3); export tmp; } fcc: "g" is cond=0x6 & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 2); export tmp; } fcc: "ug" is cond=0x5 & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 2 || fcc0_or_fccn == 3); export tmp; } fcc: "l" is cond=0x4 & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 1); export tmp; } fcc: "ul" is cond=0x3 & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 1 || fcc0_or_fccn ==3); export tmp; } fcc: "lg" is cond=0x2 & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 1 || fcc0_or_fccn ==2); export tmp; } fcc: "ne" is cond=0x1 & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 1 || fcc0_or_fccn == 2 || fcc0_or_fccn ==3); export tmp; } fcc: "e" is cond=0x9 & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 0); export tmp; } fcc: "ue" is cond=0xa & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 0 || fcc0_or_fccn == 3); export tmp; } fcc: "ge" is cond=0xb & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 0 || fcc0_or_fccn == 2); export tmp; } fcc: "uge" is cond=0xc & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 0 || fcc0_or_fccn == 2 || fcc0_or_fccn == 3); export tmp; } fcc: "le" is cond=0xd & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 0 || fcc0_or_fccn == 1); export tmp; } fcc: "ule" is cond=0xe & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 0 || fcc0_or_fccn == 1 || fcc0_or_fccn ==3); export tmp; } fcc: "o" is cond=0xf & fcc0_or_fccn { tmp:1=(fcc0_or_fccn == 0 || fcc0_or_fccn == 1 || fcc0_or_fccn ==2); export tmp; } :fba reloff is op=0x0 & op2=0x6 & a=0x0 & cond=0x8 & reloff { delayslot(1); goto reloff; } :"fba,a" reloff is op=0x0 & op2=0x6 & a=0x1 & cond=0x8 & reloff { goto reloff; } :fbn reloff is op=0x0 & op2=0x6 & a=0x0 & cond=0x0 & reloff { } :"fbn,a" reloff,skip is op=0x0 & op2=0x6 & a=0x1 & cond=0x0 & reloff & skip { goto skip; } :fb^fcc reloff is op=0x0 & op2=0x6 & a=0x0 & fcc & reloff { delayslot(1); if (fcc) goto reloff; } :fb^fcc^",a" reloff is op=0x0 & op2=0x6 & a=0x1 & fcc & reloff { if (!fcc) goto inst_next; delayslot(1); goto reloff; } :fb^fcc^predict "%"fccn,reloff64 is op=0x0 & op2=0x5 & a=0x0 & fcc & reloff64 & predict & fccn { delayslot(1); if (fcc) goto reloff64; } :fb^fcc^",a"^predict "%"^fccn,reloff64 is op=0x0 & op2=0x5 & a=0x1 & fcc & reloff64 & predict & fccn { if (!fcc) goto inst_next; delayslot(1); goto reloff64; } macro fcmp(f1, f2, fccn) { # fcc value | relation # 0 | f1 = f2 # 1 | f1 < f2 # 2 | f1 > f2 # 3 | f1 or f2 NaN fccn = (1*(f1 f< f2)) + (2*(f1 f> f2)) + (3*(nan(f1) || nan(f2))); } :fcmps %fccn2,fsrs1,fsrs2 is op=0x2 & fpc=0 & fccn2 & op3=0x35 & opf=0x51 & fsrs1 & fsrs2 { fcmp(fsrs1, fsrs2, fccn2); } :fcmpd %fccn2,fdrs1,fdrs2 is op=0x2 & fpc=0 & fccn2 & op3=0x35 & opf=0x52 & fdrs1 & fdrs2 { fcmp(fdrs1, fdrs2, fccn2); } :fcmpq %fccn2,fqrs1,fqrs2 is op=0x2 & fpc=0 & fccn2 & op3=0x35 & opf=0x53 & fqrs1 & fqrs2 { fcmp(fqrs1, fqrs2, fccn2); } :fcmpes %fccn2,fsrs1,fsrs2 is op=0x2 & fpc=0 & fccn2 & op3=0x35 & opf=0x55 & fsrs1 & fsrs2 { fcmp(fsrs1, fsrs2, fccn2); } :fcmped %fccn2,fdrs1,fdrs2 is op=0x2 & fpc=0 & fccn2 & op3=0x35 & opf=0x56 & fdrs1 & fdrs2 { fcmp(fdrs1, fdrs2, fccn2); } :fcmpeq %fccn2,fqrs1,fqrs2 is op=0x2 & fpc=0 & fccn2 & op3=0x35 & opf=0x57 & fqrs1 & fqrs2 { fcmp(fqrs1, fqrs2, fccn2); } Z: is opf_cc=4 { export i_zf; } Z: is opf_cc=6 { export x_zf; } C: is opf_cc=4 { export i_cf; } C: is opf_cc=6 { export x_cf; } N: is opf_cc=4 { export i_nf; } N: is opf_cc=6 { export x_nf; } V: is opf_cc=4 { export i_vf; } V: is opf_cc=6 { export x_vf; } # floating-point move with integer condition codes fmicc: "a" is cond4=0x8 { tmp:1=1; export tmp; } fmicc: "n" is cond4=0x0 { tmp:1=0; export tmp; } fmicc: "ne" is cond4=0x9 & Z { tmp:1=!Z; export tmp; } fmicc: "e" is cond4=0x1 & Z { export Z; } fmicc: "g" is cond4=0xa & Z & N & V { tmp:1=!(Z|(N^V)); export tmp; } fmicc: "le" is cond4=0x2 & Z & N & V { tmp:1= (Z|(N^V)); export tmp; } fmicc: "ge" is cond4=0xb & N & V { tmp:1=!(N^V); export tmp; } fmicc: "l" is cond4=0x3 & N & V { tmp:1= (N^V); export tmp; } fmicc: "gu" is cond4=0xc & C & Z { tmp:1=!(C|Z); export tmp; } fmicc: "leu" is cond4=0x4 & C & Z { tmp:1= (C|Z); export tmp; } fmicc: "cc" is cond4=0xd & C { tmp:1=!C; export tmp; } fmicc: "cs" is cond4=0x5 & C { tmp:1=C; export tmp; } fmicc: "pos" is cond4=0xe & N { tmp:1=!N; export tmp; } fmicc: "neg" is cond4=0x6 & N { tmp:1=N; export tmp; } fmicc: "vc" is cond4=0xf & V { tmp:1=!V; export tmp; } fmicc: "vs" is cond4=0x7 & V { tmp:1=V; export tmp; } # floating-point move with floating-point condition codes fmfcc: "a" is cond4=0x8 & fccn_4 { tmp:1=1:1; export tmp; } fmfcc: "n" is cond4=0x0 & fccn_4 { tmp:1=0:1; export tmp; } fmfcc: "u" is cond4=0x7 & fccn_4 { tmp:1=(fccn_4 == 3); export tmp; } fmfcc: "g" is cond4=0x6 & fccn_4 { tmp:1=(fccn_4 == 2); export tmp; } fmfcc: "ug" is cond4=0x5 & fccn_4 { tmp:1=(fccn_4 == 2 || fccn_4 == 3); export tmp; } fmfcc: "l" is cond4=0x4 & fccn_4 { tmp:1=(fccn_4 == 1); export tmp; } fmfcc: "ul" is cond4=0x3 & fccn_4 { tmp:1=(fccn_4 == 1 || fccn_4 ==3); export tmp; } fmfcc: "lg" is cond4=0x2 & fccn_4 { tmp:1=(fccn_4 == 1 || fccn_4 ==2); export tmp; } fmfcc: "ne" is cond4=0x1 & fccn_4 { tmp:1=(fccn_4 == 1 || fccn_4 == 2 || fccn_4 ==3); export tmp; } fmfcc: "e" is cond4=0x9 & fccn_4 { tmp:1=(fccn_4 == 0); export tmp; } fmfcc: "ue" is cond4=0xa & fccn_4 { tmp:1=(fccn_4 == 0 || fccn_4 == 3); export tmp; } fmfcc: "ge" is cond4=0xb & fccn_4 { tmp:1=(fccn_4 == 0 || fccn_4 == 2); export tmp; } fmfcc: "uge" is cond4=0xc & fccn_4 { tmp:1=(fccn_4 == 0 || fccn_4 == 2 || fccn_4 == 3); export tmp; } fmfcc: "le" is cond4=0xd & fccn_4 { tmp:1=(fccn_4 == 0 || fccn_4 == 1); export tmp; } fmfcc: "ule" is cond4=0xe & fccn_4 { tmp:1=(fccn_4 == 0 || fccn_4 == 1 || fccn_4 ==3); export tmp; } fmfcc: "o" is cond4=0xf & fccn_4 { tmp:1=(fccn_4 == 0 || fccn_4 == 1 || fccn_4 ==2); export tmp; } fmfcc_or_fmicc: fmfcc is bit13=0 & fmfcc { export fmfcc; } fmfcc_or_fmicc: fmicc is bit13=1 & fmicc { export fmicc; } fcc_icc_xcc: "%"^fccn_4 is bit13=0 & fccn_4 { } fcc_icc_xcc: "%icc" is bit13=1 & opf_cc=4 { } fcc_icc_xcc: "%xcc" is bit13=1 & opf_cc=6 { } :fmovs^fmfcc_or_fmicc fcc_icc_xcc,fsrs2,fsrd is op=2 & op3=0x35 & bit18=0 & fmfcc_or_fmicc & fcc_icc_xcc & opf_low=1 & fsrs2 & fsrd { if !(fmfcc_or_fmicc) goto ; fsrd = fsrs2; } :fmovd^fmfcc_or_fmicc fcc_icc_xcc,fdrs2,fdrd is op=2 & op3=0x35 & bit18=0 & fmfcc_or_fmicc & fcc_icc_xcc & opf_low=2 & fdrs2 & fdrd { if !(fmfcc_or_fmicc) goto ; fdrd = fdrs2; } :fmovq^fmfcc_or_fmicc fcc_icc_xcc,fqrs2,fqrd is op=2 & op3=0x35 & bit18=0 & fmfcc_or_fmicc & fcc_icc_xcc & opf_low=3 & fqrs2 & fqrd { if !(fmfcc_or_fmicc) goto ; fqrd = fqrs2; } #conditional integer moves with integer conditions defined in constructor :mov^m_cc :mov^fmfcc " %"^fccn_4,regorimm11,RD is op=2 & RD & op3=0x2c & bit18=0 & fmfcc & fccn_4 & regorimm11 { if !(fmfcc) goto ; RD = regorimm11; } fmovrcc: "z" is rcond3=0x1 & RS1 { tmp:1 = (RS1 == 0); export tmp; } fmovrcc: "lez" is rcond3=0x2 & RS1 { tmp:1 = (RS1 s<= 0); export tmp; } fmovrcc: "lz" is rcond3=0x3 & RS1 { tmp:1 = (RS1 s< 0); export tmp; } fmovrcc: "nz" is rcond3=0x5 & RS1 { tmp:1 = (RS1 != 0); export tmp; } fmovrcc: "gz" is rcond3=0x6 & RS1 { tmp:1 = (RS1 s> 0); export tmp; } fmovrcc: "gez" is rcond3=0x7 & RS1 { tmp:1 = (RS1 s>= 0); export tmp; } :fmovrs^fmovrcc RS1,fsrs2,fsrd is op=2 & fsrd & op3=0x35 & bit13=0 & RS1 & fmovrcc & opf_low_5_9=0x5 & fsrs2 { if !(fmovrcc) goto ; fsrd = fsrs2; } :fmovrd^fmovrcc RS1,fdrs2,fdrd is op=2 & fdrd & op3=0x35 & bit13=0 & RS1 & fmovrcc & opf_low_5_9=0x6 & fdrs2 { if !(fmovrcc) goto ; fdrd = fdrs2; } :fmovrq^fmovrcc RS1,fqrs2,fqrd is op=2 & fqrd & op3=0x35 & bit13=0 & RS1 & fmovrcc & opf_low_5_9=0x7 & fqrs2 { if !(fmovrcc) goto ; fqrd = fqrs2; } # Include support for the VIS1 vector instructions @include "SparcVIS.sinc" ================================================ FILE: pypcode/processors/Sparc/data/languages/SparcV9_32.cspec ================================================ ================================================ FILE: pypcode/processors/Sparc/data/languages/SparcV9_32.slaspec ================================================ @define SIZE "4" @include "SparcV9.sinc" ================================================ FILE: pypcode/processors/Sparc/data/languages/SparcV9_64.cspec ================================================ ================================================ FILE: pypcode/processors/Sparc/data/languages/SparcV9_64.slaspec ================================================ @define SIZE "8" @include "SparcV9.sinc" ================================================ FILE: pypcode/processors/Sparc/data/languages/SparcVIS.sinc ================================================ # The Sparc VIS1 vector instruction set # The opcodes below that have their pcodeop uncommented have been checked to make sure # that the register width is correct. If the call to the pcodeop is still commented out # that means that the register width may be incorrect. For example, a call to a 64 bit # floating point register may really use a 32 bit register. # VIS2 or VIS2+ instructions are not included in this file. define pcodeop alignaddr; :alignaddr RS1,RS2,rd is opf = 0x18 & op3 = 0x36 & op = 0x2 & RS1 & RS2 & rd { rd = alignaddr(RS1,RS2); } define pcodeop alignaddrl; :alignaddrl RS1,RS2,rd is opf = 0x1a & op3 = 0x36 & op = 0x2 & RS1 & RS2 & rd { rd = alignaddrl(RS1,RS2); } define pcodeop array16; :array16 RS1,RS2,rd is opf = 0x12 & op3 = 0x36 & op = 0x2 & RS1 & RS2 & rd { rd = array16(RS1,RS2); } define pcodeop array32; :array32 RS1,RS2,rd is opf = 0x14 & op3 = 0x36 & op = 0x2 & RS1 & RS2 & rd { rd = array32(RS1,RS2); } define pcodeop array8; :array8 RS1,RS2,rd is opf = 0x10 & op3 = 0x36 & op = 0x2 & RS1 & RS2 & rd { rd = array8(RS1,RS2); } define pcodeop edge16cc; :edge16cc RS1,RS2,rd is opf = 0x4 & op3 = 0x36 & op = 0x2 & RS1 & RS2 & rd { rd = edge16cc(RS1,RS2); } define pcodeop edge16lcc; :edge16lcc RS1,RS2,rd is opf = 0x6 & op3 = 0x36 & op = 0x2 & RS1 & RS2 & rd { rd = edge16lcc(RS1,RS2); } define pcodeop edge32cc; :edge32cc RS1,RS2,rd is opf = 0x8 & op3 = 0x36 & op = 0x2 & RS1 & RS2 & rd { rd = edge32cc(RS1,RS2); } define pcodeop edge32lcc; :edge32lcc RS1,RS2,rd is opf = 0xa & op3 = 0x36 & op = 0x2 & RS1 & RS2 & rd { rd = edge32lcc(RS1,RS2); } define pcodeop edge8cc; :edge8cc RS1,RS2,rd is opf = 0x0 & op3 = 0x36 & op = 0x2 & RS1 & RS2 & rd { rd = edge8cc(RS1,RS2); } define pcodeop edge8lcc; :edge8lcc RS1,RS2,rd is opf = 0x2 & op3 = 0x36 & op = 0x2 & RS1 & RS2 & rd { rd = edge8lcc(RS1,RS2); } define pcodeop faligndata; :faligndata fdrs1,fdrs2,fdrd is opf = 0x48 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = faligndata(fdrs1,fdrs2); } define pcodeop fandd; :fandd fdrs1,fdrs2,fdrd is opf = 0x70 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fandd(fdrs1,fdrs2); } define pcodeop fandnot1d; :fandnot1d fdrs1,fdrs2,fdrd is opf = 0x68 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fandnot1d(fdrs1,fdrs2); } define pcodeop fandnot1s; :fandnot1s fdrs1,fdrs2,fdrd is opf = 0x69 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fandnot1s(fdrs1,fdrs2); } define pcodeop fandnot2d; :fandnot2d fdrs1,fdrs2,fdrd is opf = 0x64 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fandnot2d(fdrs1,fdrs2); } define pcodeop fandnot2s; :fandnot2s fdrs1,fdrs2,fdrd is opf = 0x65 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fandnot2s(fdrs1,fdrs2); } define pcodeop fands; :fands fdrs1,fdrs2,fdrd is opf = 0x71 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fands(fdrs1,fdrs2); } define pcodeop fexpand; :fexpand fsrs2,fdrd is opf = 0x4d & op3 = 0x36 & op = 0x2 & fsrs2 & fdrd { fdrd = fexpand(fsrs2); } define pcodeop fmul8sux16; :fmul8sux16 fdrs1,fdrs2,fdrd is opf = 0x36 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fmul8sux16(fdrs1,fdrs2); } define pcodeop fmul8ulx16; :fmul8ulx16 fdrs1,fdrs2,fdrd is opf = 0x37 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fmul8ulx16(fdrs1,fdrs2); } define pcodeop fmul8x16; :fmul8x16 fdrs1,fdrs2,fdrd is opf = 0x31 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fmul8x16(fdrs1,fdrs2); } define pcodeop fmul8x16al; :fmul8x16al fdrs1,fdrs2,fdrd is opf = 0x35 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fmul8x16al(fdrs1,fdrs2); } define pcodeop fmul8x16au; :fmul8x16au fdrs1,fdrs2,fdrd is opf = 0x33 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fmul8x16au(fdrs1,fdrs2); } define pcodeop fmuld8sux16; :fmuld8sux16 fdrs1,fdrs2,fdrd is opf = 0x38 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fmuld8sux16(fdrs1,fdrs2); } define pcodeop fmuld8ulx16; :fmuld8ulx16 fdrs1,fdrs2,fdrd is opf = 0x39 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fmuld8ulx16(fdrs1,fdrs2); } define pcodeop fnandd; :fnandd fdrs1,fdrs2,fdrd is opf = 0x6e & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fnandd(fdrs1,fdrs2); } define pcodeop fnands; :fnands fdrs1,fdrs2,fdrd is opf = 0x6f & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fnands(fdrs1,fdrs2); } define pcodeop fnord; :fnord fdrs1,fdrs2,fdrd is opf = 0x62 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fnord(fdrs1,fdrs2); } define pcodeop fnors; :fnors fdrs1,fdrs2,fdrd is opf = 0x63 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fnors(fdrs1,fdrs2); } define pcodeop fnot1d; :fnot1d fdrs1,fdrd is opf = 0x6a & op3 = 0x36 & op = 0x2 & fdrs1 & fdrd { fdrd = fnot1d(fdrs1); } define pcodeop fnot1s; :fnot1s fdrs1,fdrd is opf = 0x6b & op3 = 0x36 & op = 0x2 & fdrs1 & fdrd { fdrd = fnot1s(fdrs1); } define pcodeop fnot2d; :fnot2d fdrs2,fdrd is opf = 0x66 & op3 = 0x36 & op = 0x2 & fdrs2 & fdrd { fdrd = fnot2d(fdrs2); } define pcodeop fnot2s; :fnot2s fdrs2,fdrd is opf = 0x67 & op3 = 0x36 & op = 0x2 & fdrs2 & fdrd { fdrd = fnot2s(fdrs2); } define pcodeop foned; :foned fdrd is opf = 0x7e & op3 = 0x36 & op = 0x2 & fdrd { fdrd = foned(); } define pcodeop fones; :fones fsrd is opf = 0x7f & op3 = 0x36 & op = 0x2 & fsrd { fsrd = fones(); } define pcodeop ford; :ford fdrs1,fdrs2,fdrd is opf = 0x7c & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = ford(fdrs1,fdrs2); } define pcodeop fornot1d; :fornot1d fdrs1,fdrs2,fdrd is opf = 0x7a & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fornot1d(fdrs1,fdrs2); } define pcodeop fornot1s; :fornot1s fdrs1,fdrs2,fdrd is opf = 0x7b & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fornot1s(fdrs1,fdrs2); } define pcodeop fornot2d; :fornot2d fdrs1,fdrs2,fdrd is opf = 0x76 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fornot2d(fdrs1,fdrs2); } define pcodeop fornot2s; :fornot2s fdrs1,fdrs2,fdrd is opf = 0x77 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fornot2s(fdrs1,fdrs2); } define pcodeop fors; :fors fdrs1,fdrs2,fdrd is opf = 0x7d & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fors(fdrs1,fdrs2); } define pcodeop fpack16; :fpack16 fdrs2,fsrd is opf = 0x3b & op3=0x36 & op = 0x2 & fdrs2 & fsrd { fsrd = fpack16(fdrs2); } define pcodeop fpack32; :fpack32 fdrs1,fdrs2,fdrd is opf = 0x3a & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fpack32(fdrs1,fdrs2); } define pcodeop fpackfix; :fpackfix fdrs2,fsrd is opf = 0x3d & op3 = 0x36 & op = 0x2 & fdrs2 & fsrd { fsrd = fpackfix(fdrs2); } define pcodeop fpadd16; :fpadd16 fdrs1,fdrs2,fdrd is opf = 0x50 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fpadd16(fdrs1,fdrs2); } define pcodeop fpadd16s; :fpadd16s fdrs1,fdrs2,fdrd is opf = 0x51 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fpadd16s(fdrs1,fdrs2); } define pcodeop fpadd32; :fpadd32 fdrs1,fdrs2,fdrd is opf = 0x52 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fpadd32(fdrs1,fdrs2); } define pcodeop fpadd32s; :fpadd32s fdrs1,fdrs2,fdrd is opf = 0x53 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fpadd32s(fdrs1,fdrs2); } define pcodeop fpcmpeq16; :fpcmpeq16 fdrs1,fdrs2,rd is opf = 0x2a & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & rd { rd = fpcmpeq16(fdrs1,fdrs2); } define pcodeop fpcmpeq32; :fpcmpeq32 fdrs1,fdrs2,rd is opf = 0x2e & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & rd { rd = fpcmpeq32(fdrs1,fdrs2); } define pcodeop fpcmpgt16; :fpcmpgt16 fdrs1,fdrs2,rd is opf = 0x28 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & rd { rd = fpcmpgt16(fdrs1,fdrs2); } define pcodeop fpcmpgt32; :fpcmpgt32 fdrs1,fdrs2,rd is opf = 0x2c & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & rd { rd = fpcmpgt32(fdrs1,fdrs2); } define pcodeop fpcmple16; :fpcmple16 fdrs1,fdrs2,rd is opf = 0x20 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & rd { rd = fpcmple16(fdrs1,fdrs2); } define pcodeop fpcmple32; :fpcmple32 fdrs1,fdrs2,rd is opf = 0x24 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & rd { rd = fpcmple32(fdrs1,fdrs2); } define pcodeop fpcmpne16; :fpcmpne16 fdrs1,fdrs2,rd is opf = 0x22 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & rd { rd = fpcmpne16(fdrs1,fdrs2); } define pcodeop fpcmpne32; :fpcmpne32 fdrs1,fdrs2,rd is opf = 0x26 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & rd { rd = fpcmpne32(fdrs1,fdrs2); } define pcodeop fpmerge; :fpmerge fdrs1,fdrs2,fdrd is opf = 0x4b & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fpmerge(fdrs1,fdrs2); } define pcodeop fpsub16; :fpsub16 fdrs1,fdrs2,fdrd is opf = 0x54 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fpsub16(fdrs1,fdrs2); } define pcodeop fpsub16s; :fpsub16s fdrs1,fdrs2,fdrd is opf = 0x55 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fpsub16s(fdrs1,fdrs2); } define pcodeop fpsub32; :fpsub32 fdrs1,fdrs2,fdrd is opf = 0x56 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fpsub32(fdrs1,fdrs2); } define pcodeop fpsub32s; :fpsub32s fdrs1,fdrs2,fdrd is opf = 0x57 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fpsub32s(fdrs1,fdrs2); } define pcodeop fsrc1d; :fsrc1d fdrs1,fdrd is opf = 0x74 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrd { fdrd = fsrc1d(fdrs1); } define pcodeop fsrc1s; :fsrc1s fdrs1,fdrd is opf = 0x75 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrd { fdrd = fsrc1s(fdrs1); } define pcodeop fsrc2d; :fsrc2d fdrs2,fdrd is opf = 0x78 & op3 = 0x36 & op = 0x2 & fdrs2 & fdrd { fdrd = fsrc2d(fdrs2); } define pcodeop fsrc2s; :fsrc2s fdrs2,fdrd is opf = 0x79 & op3 = 0x36 & op = 0x2 & fdrs2 & fdrd { fdrd = fsrc2s(fdrs2); } define pcodeop fxnord; :fxnord fdrs1,fdrs2,fdrd is opf = 0x72 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fxnord(fdrs1,fdrs2); } define pcodeop fxnors; :fxnors fdrs1,fdrs2,fdrd is opf = 0x73 & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fxnors(fdrs1,fdrs2); } define pcodeop fxord; :fxord fdrs1,fdrs2,fdrd is opf = 0x6c & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fxord(fdrs1,fdrs2); } define pcodeop fxors; :fxors fdrs1,fdrs2,fdrd is opf = 0x6d & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = fxors(fdrs1,fdrs2); } define pcodeop fzerod; :fzerod fdrd is opf = 0x60 & op3 = 0x36 & op = 0x2 & fdrd { fdrd = fzerod(); } define pcodeop fzeros; :fzeros fsrd is opf = 0x61 & op3 = 0x36 & op = 0x2 & fsrd { fsrd = fzeros(); } define pcodeop pdist; :pdist fdrs1,fdrs2,fdrd is opf = 0x3e & op3 = 0x36 & op = 0x2 & fdrs1 & fdrs2 & fdrd { fdrd = pdist(fdrs1,fdrs2); } ================================================ FILE: pypcode/processors/Sparc/data/manuals/Sparc.idx ================================================ @SPARCV9.pdf[The SPARC Architecture Manual, Version 9 (SAV09R1459912)] ADD,160 ADDcc,160 ADDC,160 ADDCcc,160 AND,208 ANDcc,208 ANDN,208 ANDNcc,208 BPcc,172 Bicc,169 BA,169 BN,169 BNE,169 BE,169 BG,169 BLE,169 BGE,169 BL,169 BGU,169 BLEU,169 BCC,169 BCS,169 BPOS,169 BNEG,169 BVC,169 BVS,169 BPA,172 BPN,172 BPNE,172 BPE,172 BPG,172 BPLE,172 BPGE,172 BPL,172 BPGU,172 BPEU,172 BPCC,172 BPCS,172 BPPOS,172 BPNEG,172 BPVC,172 BPVS,172 BPr,161 BRZ,161 BRLEZ,161 BRLZ,161 BRNZ,161 BRGZ,161 BRGEZ,161 CALL,175 CASA,176 CASXA,176 DONE,181 FABSs,188 FABSd,188 FABSq,188 FADDs,182 FADDd,182 FADDq,182 FBA,163 FBN,163 FBU,163 FBG,163 FBUG,163 FBL,163 FBUL,163 FBLG,163 FBNE,163 FBE,163 FBUE,163 FBGE,163 FBUGE,163 FBLE,163 FBULE,163 FBO,163 FBfcc,163 FBPA,166 FBPN,166 FBPU,166 FBPG,166 FBPUG,166 FBPL,166 FBPUL,166 FBPLG,166 FBPNE,166 FBPE,166 FBPUE,166 FBPGE,166 FBPUGE,166 FBPLE,166 FBPULE,166 FBPO,166 FBPfcc,166 FCMPs,183 FCMPd,183 FCMPq,183 FCMPEs,183 FCMPEd,183 FCMPEq,183 FDIVs,189 FDIVd,189 FDIVq,189 FdMULq,189 FiTOs,187 FiTOd,187 FiTOq,187 FLUSH,191 FLUSHW,193 FMOVA,213 FMOVN,213 FMOVNE,213 FMOVE,213 FMOVG,213 FMOVLE,213 FMOVGE,213 FMOVL,213 FMOVGU,213 FMOVLEU,213 FMOVCC,213 FMOVCS,213 FMOVPOS,213 FMOVNEG,213 FMOVVC,213 FMOVVS,213 FMOVFA,213 FMOVFN,213 FMOVFU,213 FMOVFG,213 FMOVFUG,213 FMOVFL,213 FMOVFUL,213 FMOVFLG,213 FMOVFNE,213 FMOVFE,213 FMOVFUE,213 FMOVFGE,213 FMOVFUGE,213 FMOVFLE,213 FMOVFULE,213 FMOVFO,213 FMOVr,217 FMOVRZ,217 FMOVRLEZ,217 FMOVRLZ,217 FMOVRNZ,217 FMOVRGZ,217 FMOVRGEZ,217 FMOVs,188 FMOVd,188 FMOVq,188 FMOVscc,213 FMOVdcc,213 FMOVqcc,213 FMOVsr,217 FMOVdr,217 FMOVqr,217 FMULs,189 FMULd,189 FMULq,189 FNEGs,188 FNEGd,188 FNEGq,188 FsMULd,189 FSQRTs,190 FSQRTd,190 FSQRTq,190 FsTOi,185 FdTOi,185 FqTOi,185 FsTOd,186 FsTOq,186 FdTOs,186 FdTOq,186 FqTOs,186 FqTOd,186 FsTOx,185 FdTOx,185 FqTOx,185 FSUBs,182 FSUBd,182 FSUBq,182 FxTOs,187 FxTOd,187 FxTOq,187 ILLTRAP,194 IMPDEP1,195 IMPDEP2,195 JMPL,196 LDD,201 LDDA,203 LDDF,197 LDDFA,199 LDDQFA,199 LDDFAPASI,199 LDF,197 LDFA,199 LDFAPASI,199 LDFSR,197 LDQF,197 LDQFAPASI,199 LDSB,201 LDSBAPASI,203 LDSH,201 LDSHAPASI,203 LDSTUB,206 LDSTUBA,207 LDSW,201 LDSWA,203 LDUB,201 LDUBA,203 LDUH,201 LDUHA,203 LDUW,201 LDUWA,203 LDX,201 LDXA,203 LDXFSR,197 MEMBAR,210 MOVA,219 MOVN,219 MOVNE,219 MOVE,219 MOVG,219 MOVLE,219 MOVGE,219 MOVL,219 MOVGU,219 MOVLEU,219 MOVCC,219 MOVCS,219 MOVPOS,219 MOVNEG,219 MOVVC,219 MOVVS,219 MOVFA,219 MOVFN,219 MOVFU,219 MOVFG,219 MOVFUG,219 MOVFFL,219 MOVFUL,219 MOVFLG,219 MOVFNE,219 MOVFE,219 MOVFUE,219 MOVFGE,219 MOVFUGE,219 MOVFLE,219 MOVFULE,219 MOVFO,219 MOVr,223 MOVRZ,223 MOVRLEZ,223 MOVRLZ,223 MOVRNZ,223 MOVRGZ,223 MOVRGEZ,223 MULScc,228 MULX,225 NOP,230 OR,208 ORcc,208 ORN,208 ORNcc,208 POPC,231 PREFETCH,232 PREFETCHA,232 RDASI,241 RDASRPASR,241 RDCCR,241 RDFPRS,241 RDPC,241 RDPR,238 RDTICKPNPT,241 RDY,241 RDCCR,241 RDASI,241 RDTICK,241 RDPC,241 RDFPRS,241 RDASR,241 RESTORE,245 RESTORED,247 RETRY,181 RETURN,243 SAVE,245 SAVED,247 SDIV,178 SDIVcc,178 SDIVX,225 SETHI,248 SIR,251 SLL,249 SLLX,249 SMUL,226 SMULcc,226 SRA,249 SRAX,249 SRL,249 SRLX,249 STB,257 STBA,259 STBAR,252 STD,257 STDA,259 STDF,253 STDFA,255 STF,253 STFA,255 STFSR,253 STH,257 STHA,259 STQF,253 STQFA,255 STW,257 STWA,259 STX,257 STXA,259 STXFSR,253 SUB,261 SUBcc,261 SUBC,261 SUBCcc,261 SWAP,262 SWAPA,264 TADDcc,266 TADDccTV,266 TA,270 TN,270 TNE,270 TE,270 TG,270 TLE,270 TGE,270 TL,270 TGU,270 TLEU,270 TCC,270 TCS,270 TPOS,270 TNEG,270 TVC,270 TVS,270 TSUBcc,268 TSUBccTV,268 UDIV,178 UDIVcc,178 UDIVX,225 UMUL,226 UMULcc,226 WRASI,275 WRASRPASR,275 WRCCR,275 WRFPRS,275 WRPR,273 WRY,275 WRCCR,275 WRASI,275 WRASR,275 WRFPRS,275 WRY,275 XNOR,208 XNORcc,208 XOR,208 XORcc,208 ================================================ FILE: pypcode/processors/Sparc/data/patterns/SPARC_patterns.xml ================================================ 0x81f00000 0x81c7e008 0x........ 0x81c7e008 0x........ 0000000. 0x000000 0x81c3e008 0x........ 0x81c3e008 0x........ 0000000. 0x000000 0x81cfe008 0x........ 0x81cfe008 0x........ 0000000. 0x000000 0x10 101..... 0x.... 0x........ 0x10 101..... 0x.... 0x........ 0000000. 0x000000 0x30 101..... 0x.... 0x30 101..... 0x.... 0000000. 0x000000 01...... 0x...... 10.....1 11101... 0x.... 01...... 0x...... 10.....1 11101... 0x.... 0000000. 0x000000 01...... 0x...... 0x9E 00010... 0x.... 01...... 0x...... 0x9E 00010... 0x.... 0000000. 0x000000 0000000. 0x000000 0000000. 0x000000 10011101 11100011 10111... ........ 0x81 0xc3 0xe0 0x08 0xae 0x03 0xc0 0x17 0x81 0xc3 0xe0 0x08 0x82 0x03 0xc0 0x01 0x81 0xc3 0xe0 0x08 0x90 0x02 0x00 0x0f ================================================ FILE: pypcode/processors/Sparc/data/patterns/patternconstraints.xml ================================================ SPARC_patterns.xml ================================================ FILE: pypcode/processors/SuperH/data/languages/sh-1.slaspec ================================================ @define SH_VERSION "1" @include "superh.sinc" ================================================ FILE: pypcode/processors/SuperH/data/languages/sh-2.slaspec ================================================ @define SH_VERSION "2" @include "superh.sinc" ================================================ FILE: pypcode/processors/SuperH/data/languages/sh-2a.slaspec ================================================ @define SH_VERSION "2A" @define FPU "1" @include "superh.sinc" ================================================ FILE: pypcode/processors/SuperH/data/languages/superh.cspec ================================================ ================================================ FILE: pypcode/processors/SuperH/data/languages/superh.ldefs ================================================ SuperH SH-2A processor 32-bit big-endian SuperH SH-2 processor 32-bit big-endian SuperH SH-1 processor 32-bit big-endian ================================================ FILE: pypcode/processors/SuperH/data/languages/superh.pspec ================================================ ================================================ FILE: pypcode/processors/SuperH/data/languages/superh.sinc ================================================ # All assembly defintions taken from: http://www.shared-ptr.com/sh_insns.html define endian=big; define alignment=1; define space ram type=ram_space size=4 wordsize=1 default; define space register type=register_space size=4; define register offset=0 size=4 [r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15]; define register offset=0x100 size=4 [sr gbr vbr mach macl pr pc]; @if SH_VERSION == "2A" define register offset=0x180 size=4 [tbr]; @endif # SR Flags @define T_FLAG "sr[0,1]" @define S_FLAG "sr[1,1]" @define I0_FLAG "sr[4,1]" @define I1_FLAG "sr[5,1]" @define I2_FLAG "sr[6,1]" @define I3_FLAG "sr[7,1]" @define Q_FLAG "sr[8,1]" @define M_FLAG "sr[9,1]" @define CS_FLAG "sr[13,1]" @define BO_FLAG "sr[14,1]" @if defined(FPU) # Floating-Point Registers define register offset=0x200 size=4 [ fr0 fr1 fr2 fr3 fr4 fr5 fr6 fr7 fr8 fr9 fr10 fr11 fr12 fr13 fr14 fr15 ]; define register offset=0x200 size=8 [ dr0 dr2 dr4 dr6 dr8 dr10 dr12 dr14 ]; # Floating-Point System Registers define register offset=0x300 size=4 [fpscr fpul]; # FPSCR Flags (initial value = H'0004 0001) @define FP_RM "fpscr[0,2]" @define FP_FLAG "fpscr[2,5]" @define FP_ENABLE "fpscr[7,5]" @define FP_CAUSE "fpscr[12,6]" @define FP_DN "fpscr[18,1]" @define FP_PR "fpscr[19,1]" @define FP_SZ "fpscr[20,1]" @define FP_QIS "fpscr[22,1]" @endif @if SH_VERSION == "2A" # The register banks space is defined below, there are 512 banks, each is 80 bytes long define register offset=0x10000 size=40960 [ resbank_base ]; @endif define token instr16(16) disp_00_03 = (0, 3) sdisp_00_03 = (0, 3) signed disp_00_07 = (0, 7) sdisp_00_07 = (0, 7) signed disp_00_11 = (0, 11) sdisp_00_11 = (0, 11) signed imm3_00_02 = (0, 2) imm_00_07 = (0, 7) simm_00_07 = (0, 7) signed opcode_00_03 = (0, 3) opcode_00_07 = (0, 7) opcode_00_15 = (0, 15) opcode_03_03 = (3, 3) opcode_04_07 = (4, 7) opcode_08_11 = (8, 11) opcode_08_15 = (8, 15) opcode_12_15 = (12, 15) rm_04_07 = (4, 7) rm_08_11 = (8, 11) rn_04_07 = (4, 7) rn_08_11 = (8, 11) rm_imm_08_11 = (8, 11) rn_imm_08_11 = (8, 11) ; @if SH_VERSION == "2A" define token instr32(32) l_disp_00_11 = (0, 11) l_opcode_12_15 = (12, 15) l_opcode_16_19 = (16, 19) l_opcode_23_23 = (23, 23) l_opcode_24_31 = (24, 31) l_rm_20_23 = (20, 23) l_rn_24_27 = (24, 27) l_opcode_28_31 = (28, 31) l_imm20_00_15 = (0, 15) l_simm20_20_23 = (20, 23) signed l_imm3_20_22 = (20, 22) ; attach variables [ l_rn_24_27 l_rm_20_23 ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 ]; @endif attach variables [ rm_04_07 rm_08_11 rn_04_07 rn_08_11 ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 ]; attach names [ rm_imm_08_11 rn_imm_08_11 ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 pr ]; @if defined(FPU) define token finstr16(16) fop_00_07 = (0, 7) fop_00_03 = (0, 3) fop_04_07 = (4, 7) fop_12_15 = (12, 15) fop_08_08 = (8, 8) fop_04_04 = (4, 4) fop_00_15 = (0, 15) ffrn_08_11 = (8, 11) ffrm_08_11 = (8, 11) ffrn_04_07 = (4, 7) ffrm_04_07 = (4, 7) f_rm_04_07 = (4, 7) f_rn_08_11 = (8, 11) fdrn_09_11 = (9, 11) fdrm_09_11 = (9, 11) fdrn_05_07 = (5, 7) fdrm_05_07 = (5, 7) ; define token finstr32(32) lfdisp_00_11 = (0, 11) lfop_28_31 = (28, 31) lfop_12_19 = (12, 19) lffrm_24_27 = (24, 27) lffrn_24_27 = (24, 27) lffrm_20_23 = (20, 23) lf_rm_20_23 = (20, 23) lf_rn_24_27 = (24, 27) lffrn_20_23 = (20, 23) ; attach variables [ ffrn_08_11 ffrm_08_11 ffrn_04_07 ffrm_04_07 lffrm_24_27 lffrn_24_27 lffrm_20_23 lffrn_20_23 ] [fr0 fr1 fr2 fr3 fr4 fr5 fr6 fr7 fr8 fr9 fr10 fr11 fr12 fr13 fr14 fr15]; attach variables [ f_rm_04_07 f_rn_08_11 lf_rm_20_23 lf_rn_24_27 ] [r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15]; attach variables [ fdrn_09_11 fdrm_09_11 fdrn_05_07 fdrm_05_07 ] [dr0 dr2 dr4 dr6 dr8 dr10 dr12 dr14]; @endif # helpers for branch target00_07: target is sdisp_00_07 [ target = (sdisp_00_07 << 1) + inst_start + 4; ] { export *:4 target; } target00_11: target is sdisp_00_11 [ target = (sdisp_00_11 << 1) + inst_start + 4; ] { export *:4 target; } # # Data Transfer Instructions # :mov rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b0011 { rn_08_11 = rm_04_07; } imm8: "#"^simm_00_07 is simm_00_07 { export *[const]:4 simm_00_07; } :mov imm8,rn_08_11 is opcode_12_15=0b1110 & rn_08_11 & imm8 { rn_08_11 = imm8; } disppc4: @(disp,pc) is disp_00_07 & pc [ disp = (disp_00_07 << 2) + ((inst_start + 4) & 0xfffffffc); ] { local tmp:4 = disp; export tmp; } disppc2: @(disp,pc) is disp_00_07 & pc [ disp = (disp_00_07 << 1) + (inst_start + 4); ] { local tmp:4 = disp; export tmp; } :mova disppc4,r0 is r0 & opcode_08_15=0b11000111 & disppc4 { r0 = disppc4; } :mov.w disppc2,rn_08_11 is opcode_12_15=0b1001 & rn_08_11 & disppc2 { rn_08_11 = sext(*:2 disppc2); } :mov.l disppc4,rn_08_11 is opcode_12_15=0b1101 & rn_08_11 & disppc4 { rn_08_11 = *:4 disppc4; } :mov.b @rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b0000 { rn_08_11 = sext(*:1 rm_04_07); } :mov.w @rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b0001 { rn_08_11 = sext(*:2 rm_04_07); } :mov.l @rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b0010 { rn_08_11 = *:4 rm_04_07; } :mov.b rm_04_07,@rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b0000 { *:1 rn_08_11 = rm_04_07:1; } :mov.w rm_04_07,@rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b0001 { *:2 rn_08_11 = rm_04_07:2; } :mov.l rm_04_07,@rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b0010 { *:4 rn_08_11 = rm_04_07; } # the following two instructions share the same opcodes but differ if rm == rn :mov.b @rm_04_07+,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b0100 & opcode_04_07=opcode_08_11 { rn_08_11 = sext(*:1 rm_04_07); } :mov.b @rm_04_07+,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b0100 { rn_08_11 = sext(*:1 rm_04_07); rm_04_07 = rm_04_07 + 1; } # the following two instructions share the same opcodes but differ if rm == rn :mov.w @rm_04_07+,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b0101 & opcode_04_07=opcode_08_11 { rn_08_11 = sext(*:2 rm_04_07); } :mov.w @rm_04_07+,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b0101 { rn_08_11 = sext(*:2 rm_04_07); rm_04_07 = rm_04_07 + 2; } # the following two instructions share the same opcodes but differ if rm == rn :mov.l @rm_04_07+,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b0110 & opcode_04_07=opcode_08_11 { rn_08_11 = *:4 rm_04_07; } :mov.l @rm_04_07+,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b0110 { rn_08_11 = *:4 rm_04_07; rm_04_07 = rm_04_07 + 4; } :mov.b rm_04_07,@-rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b0100 { rn_08_11 = rn_08_11 -1; *:1 rn_08_11 = rm_04_07:1; } :mov.w rm_04_07,@-rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b0101 { rn_08_11 = rn_08_11 -2; *:2 rn_08_11 = rm_04_07:2; } :mov.l rm_04_07,@-rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b0110 { rn_08_11 = rn_08_11 -4; *:4 rn_08_11 = rm_04_07; } :mov.b @(disp_00_03,rm_04_07),r0 is r0 & opcode_08_15=0b10000100 & rm_04_07 & disp_00_03 { r0 = sext(*:1 (disp_00_03 + rm_04_07)); } :mov.w @(disp,rm_04_07),r0 is r0 & opcode_08_15=0b10000101 & rm_04_07 & disp_00_03 [ disp = disp_00_03 << 1; ] { r0 = sext(*:2 (disp + rm_04_07)); } :mov.l @(disp,rm_04_07),rn_08_11 is opcode_12_15=0b0101 & rn_08_11 & rm_04_07 & disp_00_03 [ disp = disp_00_03 << 2; ] { rn_08_11 = *:4 (disp + rm_04_07); } :mov.b r0,@(disp_00_03,rn_04_07) is r0 & opcode_08_15=0b10000000 & rn_04_07 & disp_00_03 { *:1 (rn_04_07 + disp_00_03) = r0:1; } :mov.w r0,@(disp,rn_04_07) is r0 & opcode_08_15=0b10000001 & rn_04_07 & disp_00_03 [ disp = disp_00_03 << 1; ] { *:2 (rn_04_07 + disp) = r0:2; } :mov.l rm_04_07,@(disp,rn_08_11) is opcode_12_15=0b0001 & rn_08_11 & rm_04_07 & disp_00_03 [ disp = disp_00_03 << 2; ] { *:4 (rn_08_11 + disp) = rm_04_07; } :mov.b @(r0,rm_04_07),rn_08_11 is r0 & opcode_12_15=0b0000 & rn_08_11 & rm_04_07 & opcode_00_03=0b1100 { rn_08_11 = sext(*:1 (rm_04_07 + r0)); } :mov.w @(r0,rm_04_07),rn_08_11 is r0 & opcode_12_15=0b0000 & rn_08_11 & rm_04_07 & opcode_00_03=0b1101 { rn_08_11 = sext(*:2 (rm_04_07 + r0)); } :mov.l @(r0,rm_04_07),rn_08_11 is r0 & opcode_12_15=0b0000 & rn_08_11 & rm_04_07 & opcode_00_03=0b1110 { rn_08_11 = *:4 (rm_04_07 + r0); } :mov.b rm_04_07,@(r0,rn_08_11) is r0 & opcode_12_15=0b0000 & rn_08_11 & rm_04_07 & opcode_00_03=0b0100 { *:1 (rn_08_11 + r0) = rm_04_07:1; } :mov.w rm_04_07,@(r0,rn_08_11) is r0 & opcode_12_15=0b0000 & rn_08_11 & rm_04_07 & opcode_00_03=0b0101 { *:2 (rn_08_11 + r0) = rm_04_07:2; } :mov.l rm_04_07,@(r0,rn_08_11) is r0 & opcode_12_15=0b0000 & rn_08_11 & rm_04_07 & opcode_00_03=0b0110 { *:4 (rn_08_11 + r0) = rm_04_07:4; } :mov.b @(disp_00_07,gbr),r0 is gbr & r0 & opcode_08_15=0b11000100 & disp_00_07 { r0 = sext(*:1 (gbr + disp_00_07)); } :mov.w @(disp,gbr),r0 is gbr & r0 & opcode_08_15=0b11000101 & disp_00_07 [disp = (disp_00_07 << 1); ] { r0 = sext(*:2 (gbr + disp)); } :mov.l @(disp,gbr),r0 is gbr & r0 & opcode_08_15=0b11000110 & disp_00_07 [disp = (disp_00_07 << 2); ] { r0 = *:4 (gbr + disp); } :mov.b r0,@(disp_00_07,gbr) is r0 & gbr & opcode_08_15=0b11000000 & disp_00_07 { *:1 (gbr + disp_00_07) = r0:1; } :mov.w r0,@(disp,gbr) is r0 & gbr & opcode_08_15=0b11000001 & disp_00_07 [disp = (disp_00_07 << 1); ] { *:2 (gbr + disp) = r0:2; } :mov.l r0,@(disp,gbr) is r0 & gbr & opcode_08_15=0b11000010 & disp_00_07 [disp = (disp_00_07 << 2); ] { *:4 (gbr + disp) = r0:4; } :movt rn_08_11 is opcode_12_15=0b0000 & rn_08_11 & opcode_00_07=0b00101001 { rn_08_11 = zext($(T_FLAG)); } @if SH_VERSION == "2A" # MOV.B R0, @Rn+ 0100nnnn10001011 R0 → (Rn), Rn + 1 → Rn :mov.b r0, @rn_08_11+ is r0 & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b10001011 { *:1 (rn_08_11) = r0:1; rn_08_11 = rn_08_11 + 1; } # MOV.W R0, @Rn+ 0100nnnn10011011 R0 → (Rn), Rn + 2 → Rn :mov.w r0, @rn_08_11+ is r0 & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b10011011 { *:2 (rn_08_11) = r0:2; rn_08_11 = rn_08_11 + 2; } # MOV.L R0, @Rn+ 0100nnnn10101011 R0 → (Rn), Rn + 4 → Rn :mov.l r0, @rn_08_11+ is r0 & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b10101011 { *:4 (rn_08_11) = r0; rn_08_11 = rn_08_11 + 4; } # MOV.B @-Rm, R0 0100mmmm11001011 Rm - 1 → Rm, (Rm) → sign extension → R0 :mov.b @-rm_08_11, r0 is r0 & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b11001011 { rm_08_11 = rm_08_11 - 1; r0 = sext(*:1 (rm_08_11)); } # MOV.W @-Rm, R0 0100mmmm11011011 Rm - 2 → Rm, (Rm) → sign extension → R0 :mov.w @-rm_08_11, r0 is r0 & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b11011011 { rm_08_11 = rm_08_11 - 2; r0 = sext(*:2 (rm_08_11)); } # MOV.L @-Rm, R0 0100mmmm11101011 Rm - 4 → Rm, (Rm) → R0 :mov.l @-rm_08_11, r0 is r0 & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b11101011 { rm_08_11 = rm_08_11 - 4; r0 = *:4 (rm_08_11); } # MOV.B Rm, @(disp12, Rn) 0011nnnnmmmm0001 0000dddddddddddd Rm -> (disp+Rn) :mov.b l_rm_20_23, @(l_disp_00_11, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_rm_20_23 & l_opcode_16_19=0b0001 & l_opcode_12_15=0b0000 & l_disp_00_11 { *:1 (l_rn_24_27 + l_disp_00_11) = l_rm_20_23:1; } # MOV.W Rm, @(disp12, Rn) 0011nnnnmmmm0001 0001dddddddddddd Rm → (disp×2+Rn) :mov.w l_rm_20_23, @(disp, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_rm_20_23 & l_opcode_16_19=0b0001 & l_opcode_12_15=0b0001 & l_disp_00_11 [ disp = 2*l_disp_00_11; ] { *:2 (l_rn_24_27 + disp) = l_rm_20_23:2; } # MOV.L Rm, @(disp12, Rn) 0011nnnnmmmm0001 0010dddddddddddd Rm → (disp×4+Rn) :mov.l l_rm_20_23, @(disp, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_rm_20_23 & l_opcode_16_19=0b0001 & l_opcode_12_15=0b0010 & l_disp_00_11 [ disp = 4*l_disp_00_11; ] { *:4 (l_rn_24_27 + disp) = l_rm_20_23; } # MOV.B @(disp12, Rm), Rn 0011nnnnmmmm0001 0100dddddddddddd (disp+Rm) → sign extension → Rn :mov.b @(l_disp_00_11, l_rm_20_23), l_rn_24_27 is l_opcode_28_31=0b011 & l_rn_24_27 & l_rm_20_23 & l_opcode_16_19=0b0001 & l_opcode_12_15=0b0100 & l_disp_00_11 { l_rn_24_27 = sext(*:1 (l_rm_20_23 + l_disp_00_11)); } # MOV.W @(disp12, Rm), Rn 0011nnnnmmmm0001 0101dddddddddddd (disp×2+Rm) → sign extension → Rn :mov.w @(disp, l_rm_20_23), l_rn_24_27 is l_opcode_28_31=0b011 & l_rn_24_27 & l_rm_20_23 & l_opcode_16_19=0b0001 & l_opcode_12_15=0b0101 & l_disp_00_11 [ disp = 2*l_disp_00_11; ] { l_rn_24_27 = sext(*:2 (l_rm_20_23 + disp)); } # MOV.L @(disp12, Rm), Rn 0011nnnnmmmm0001 0110dddddddddddd (disp×4+Rm) → Rn :mov.l @(disp, l_rm_20_23), l_rn_24_27 is l_opcode_28_31=0b011 & l_rn_24_27 & l_rm_20_23 & l_opcode_16_19=0b0001 & l_opcode_12_15=0b0110 & l_disp_00_11 [ disp = (4*l_disp_00_11); ] { l_rn_24_27 = *:4 (l_rm_20_23 + disp); } # MOVU.B @(disp12,Rm), Rn 0011nnnnmmmm0001 1000dddddddddddd (disp+Rm) → zero extension → Rn :movu.b @(l_disp_00_11, l_rm_20_23), l_rn_24_27 is l_opcode_28_31=0b0011 & l_rn_24_27 & l_rm_20_23 & l_opcode_16_19=0b0001 & l_opcode_12_15=0b1000 & l_disp_00_11 { l_rn_24_27 = zext(*:1 (l_disp_00_11 + l_rm_20_23)); } # MOVU.W @(disp12,Rm), Rn 0011nnnnmmmm0001 1001dddddddddddd (disp×2+Rm) → zero extension → Rn :movu.w @(disp, l_rm_20_23), l_rn_24_27 is l_opcode_28_31=0b0011 & l_rn_24_27 & l_rm_20_23 & l_opcode_16_19=0b0001 & l_opcode_12_15=0b1001 & l_disp_00_11 [ disp = l_disp_00_11 * 2; ] { l_rn_24_27 = zext(*:2 (disp + l_rm_20_23)); } simm20: "#"value is l_simm20_20_23 & l_imm20_00_15 [ value = ((l_simm20_20_23 << 16) | l_imm20_00_15); ] { export *[const]:4 value; } simm20s: "#"value is l_simm20_20_23 & l_imm20_00_15 [ value = ((l_simm20_20_23 << 16) | l_imm20_00_15) << 8; ] { export *[const]:4 value; } # MOVI20 #imm20, Rn 0000nnnniiii0000 iiiiiiiiiiiiiiii imm → sign extension → Rn :movi20 simm20, l_rn_24_27 is l_opcode_28_31=0b0000 & l_rn_24_27 & l_opcode_16_19=0b0000 & simm20 { l_rn_24_27 = simm20; } # MOVI20S #imm20, Rn 0000nnnniiii0001 iiiiiiiiiiiiiiii imm<<8 → sign extension → Rn :movi20s simm20s, l_rn_24_27 is l_opcode_28_31=0b0000 & l_rn_24_27 & l_opcode_16_19=0b0001 & simm20s { l_rn_24_27 = simm20s; } # # movm* instuctions are only in SH2A. They don't collide, but could be ifdef'ed for 2A only # macro loadRegister(reg, ea) { reg = *:4(ea); ea = ea+4; } macro storeRegister(reg, ea) { ea = ea-4; *:4(ea) = reg; } MovMLReg1_0: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=0 { storeRegister(r0,r15); } MovMLReg1_0store: is rm_imm_08_11 { storeRegister(r0,r15); } MovMLReg1_1: MovMLReg1_0 is MovMLReg1_0 { r0 = r0; } MovMLReg1_1: rm_imm_08_11 is MovMLReg1_0store & rm_imm_08_11 & rm_08_11=1 { storeRegister(r1,r15); build MovMLReg1_0store; } MovMLReg1_1store: is MovMLReg1_0store & rm_imm_08_11 { storeRegister(r1,r15); build MovMLReg1_0store; } MovMLReg1_2: MovMLReg1_1 is MovMLReg1_1 { r0 = r0; } MovMLReg1_2: rm_imm_08_11 is MovMLReg1_1store & rm_imm_08_11 & rm_08_11=2 { storeRegister(r2,r15); build MovMLReg1_1store; } MovMLReg1_2store: is MovMLReg1_1store & rm_imm_08_11 { storeRegister(r2,r15); build MovMLReg1_1store; } MovMLReg1_3: MovMLReg1_2 is MovMLReg1_2 { r0 = r0; } MovMLReg1_3: rm_imm_08_11 is MovMLReg1_2store & rm_imm_08_11 & rm_08_11=3 { storeRegister(r3,r15); build MovMLReg1_2store; } MovMLReg1_3store: is MovMLReg1_2store & rm_imm_08_11 { storeRegister(r3,r15); build MovMLReg1_2store; } MovMLReg1_4: MovMLReg1_3 is MovMLReg1_3 { r0 = r0; } MovMLReg1_4: rm_imm_08_11 is MovMLReg1_3store & rm_imm_08_11 & rm_08_11=4 { storeRegister(r4,r15); build MovMLReg1_3store; } MovMLReg1_4store: is MovMLReg1_3store & rm_imm_08_11 { storeRegister(r4,r15); build MovMLReg1_3store; } MovMLReg1_5: MovMLReg1_4 is MovMLReg1_4 { r0 = r0; } MovMLReg1_5: rm_imm_08_11 is MovMLReg1_4store & rm_imm_08_11 & rm_08_11=5 { storeRegister(r5,r15); build MovMLReg1_4store; } MovMLReg1_5store: is MovMLReg1_4store & rm_imm_08_11 { storeRegister(r5,r15); build MovMLReg1_4store; } MovMLReg1_6: MovMLReg1_5 is MovMLReg1_5 { r0 = r0; } MovMLReg1_6: rm_imm_08_11 is MovMLReg1_5store & rm_imm_08_11 & rm_08_11=6 { storeRegister(r6,r15); build MovMLReg1_5store; } MovMLReg1_6store: is MovMLReg1_5store & rm_imm_08_11 { storeRegister(r6,r15); build MovMLReg1_5store; } MovMLReg1_7: MovMLReg1_6 is MovMLReg1_6 { r0 = r0; } MovMLReg1_7: rm_imm_08_11 is MovMLReg1_6store & rm_imm_08_11 & rm_08_11=7 { storeRegister(r7,r15); build MovMLReg1_6store; } MovMLReg1_7store: is MovMLReg1_6store & rm_imm_08_11 { storeRegister(r7,r15); build MovMLReg1_6store; } MovMLReg1_8: MovMLReg1_7 is MovMLReg1_7 { r0 = r0; } MovMLReg1_8: rm_imm_08_11 is MovMLReg1_7store & rm_imm_08_11 & rm_08_11=8 { storeRegister(r8,r15); build MovMLReg1_7store; } MovMLReg1_8store: is MovMLReg1_7store & rm_imm_08_11 { storeRegister(r8,r15); build MovMLReg1_7store; } MovMLReg1_9: MovMLReg1_8 is MovMLReg1_8 { r0 = r0; } MovMLReg1_9: rm_imm_08_11 is MovMLReg1_8store & rm_imm_08_11 & rm_08_11=9 { storeRegister(r9,r15); build MovMLReg1_8store; } MovMLReg1_9store: is MovMLReg1_8store & rm_imm_08_11 { storeRegister(r9,r15); build MovMLReg1_8store; } MovMLReg1_10: MovMLReg1_9 is MovMLReg1_9 { r0 = r0; } MovMLReg1_10: rm_imm_08_11 is MovMLReg1_9store & rm_imm_08_11 & rm_08_11=10 { storeRegister(r10,r15); build MovMLReg1_9store; } MovMLReg1_10store: is MovMLReg1_9store & rm_imm_08_11 { storeRegister(r10,r15); build MovMLReg1_9store; } MovMLReg1_11: MovMLReg1_10 is MovMLReg1_10 { r0 = r0; } MovMLReg1_11: rm_imm_08_11 is MovMLReg1_10store & rm_imm_08_11 & rm_08_11=11 { storeRegister(r11,r15); build MovMLReg1_10store; } MovMLReg1_11store: is MovMLReg1_10store & rm_imm_08_11 { storeRegister(r11,r15); build MovMLReg1_10store; } MovMLReg1_12: MovMLReg1_11 is MovMLReg1_11 { r0 = r0; } MovMLReg1_12: rm_imm_08_11 is MovMLReg1_11store & rm_imm_08_11 & rm_08_11=12 { storeRegister(r12,r15); build MovMLReg1_11store; } MovMLReg1_12store: is MovMLReg1_11store & rm_imm_08_11 { storeRegister(r12,r15); build MovMLReg1_11store; } MovMLReg1_13: MovMLReg1_12 is MovMLReg1_12 { r0 = r0; } MovMLReg1_13: rm_imm_08_11 is MovMLReg1_12store & rm_imm_08_11 & rm_08_11=13 { storeRegister(r13,r15); build MovMLReg1_12store; } MovMLReg1_13store: is MovMLReg1_12store & rm_imm_08_11 { storeRegister(r13,r15); build MovMLReg1_12store; } MovMLReg1_14: MovMLReg1_13 is MovMLReg1_13 { r0 = r0; } MovMLReg1_14: rm_imm_08_11 is MovMLReg1_13store & rm_imm_08_11 & rm_08_11=14 { storeRegister(r14,r15); build MovMLReg1_13store; } MovMLReg1_14store: is MovMLReg1_13store & rm_imm_08_11 { storeRegister(r14,r15); build MovMLReg1_13store; } MovMLReg1_15: MovMLReg1_14 is MovMLReg1_14 { r0 = r0; } MovMLReg1_15: rm_imm_08_11 is MovMLReg1_14store & rm_imm_08_11 & rm_08_11=15 { storeRegister(pr,r15); build MovMLReg1_14store; } MovMLReg1: MovMLReg1_15 is MovMLReg1_15 { build MovMLReg1_15; } # MOVML.L Rm, @-R15 0100mmmm11110001 :movml.l MovMLReg1, @-r15 is r15 & opcode_12_15=0b0100 & rm_imm_08_11 & opcode_00_07=0b11110001 & MovMLReg1 { build MovMLReg1; } MovMLReg2_0: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=0 { loadRegister(r0,r15); } MovMLReg2_0load: is rm_imm_08_11 { loadRegister(r0,r15); } MovMLReg2_1: MovMLReg2_0 is MovMLReg2_0 { r0 = r0; } MovMLReg2_1: rm_imm_08_11 is MovMLReg2_0load & rm_imm_08_11 & rm_08_11=1 { build MovMLReg2_0load; loadRegister(r1,r15); } MovMLReg2_1load: is MovMLReg2_0load & rm_imm_08_11 { build MovMLReg2_0load; loadRegister(r1,r15); } MovMLReg2_2: MovMLReg2_1 is MovMLReg2_1 { r0 = r0; } MovMLReg2_2: rm_imm_08_11 is MovMLReg2_1load & rm_imm_08_11 & rm_08_11=2 { build MovMLReg2_1load; loadRegister(r2,r15); } MovMLReg2_2load: is MovMLReg2_1load & rm_imm_08_11 { build MovMLReg2_1load; loadRegister(r2,r15); } MovMLReg2_3: MovMLReg2_2 is MovMLReg2_2 { r0 = r0; } MovMLReg2_3: rm_imm_08_11 is MovMLReg2_2load & rm_imm_08_11 & rm_08_11=3 { build MovMLReg2_2load; loadRegister(r3,r15); } MovMLReg2_3load: is MovMLReg2_2load & rm_imm_08_11 { build MovMLReg2_2load; loadRegister(r3,r15); } MovMLReg2_4: MovMLReg2_3 is MovMLReg2_3 { r0 = r0; } MovMLReg2_4: rm_imm_08_11 is MovMLReg2_3load & rm_imm_08_11 & rm_08_11=4 { build MovMLReg2_3load; loadRegister(r4,r15); } MovMLReg2_4load: is MovMLReg2_3load & rm_imm_08_11 { build MovMLReg2_3load; loadRegister(r4,r15); } MovMLReg2_5: MovMLReg2_4 is MovMLReg2_4 { r0 = r0; } MovMLReg2_5: rm_imm_08_11 is MovMLReg2_4load & rm_imm_08_11 & rm_08_11=5 { build MovMLReg2_4load; loadRegister(r5,r15); } MovMLReg2_5load: is MovMLReg2_4load & rm_imm_08_11 { build MovMLReg2_4load; loadRegister(r5,r15); } MovMLReg2_6: MovMLReg2_5 is MovMLReg2_5 { r0 = r0; } MovMLReg2_6: rm_imm_08_11 is MovMLReg2_5load & rm_imm_08_11 & rm_08_11=6 { build MovMLReg2_5load; loadRegister(r6,r15); } MovMLReg2_6load: is MovMLReg2_5load & rm_imm_08_11 { build MovMLReg2_5load; loadRegister(r6,r15); } MovMLReg2_7: MovMLReg2_6 is MovMLReg2_6 { r0 = r0; } MovMLReg2_7: rm_imm_08_11 is MovMLReg2_6load & rm_imm_08_11 & rm_08_11=7 { build MovMLReg2_6load; loadRegister(r7,r15); } MovMLReg2_7load: is MovMLReg2_6load & rm_imm_08_11 { build MovMLReg2_6load; loadRegister(r7,r15); } MovMLReg2_8: MovMLReg2_7 is MovMLReg2_7 { r0 = r0; } MovMLReg2_8: rm_imm_08_11 is MovMLReg2_7load & rm_imm_08_11 & rm_08_11=8 { build MovMLReg2_7load; loadRegister(r8,r15); } MovMLReg2_8load: is MovMLReg2_7load & rm_imm_08_11 { build MovMLReg2_7load; loadRegister(r8,r15); } MovMLReg2_9: MovMLReg2_8 is MovMLReg2_8 { r0 = r0; } MovMLReg2_9: rm_imm_08_11 is MovMLReg2_8load & rm_imm_08_11 & rm_08_11=9 { build MovMLReg2_8load; loadRegister(r9,r15); } MovMLReg2_9load: is MovMLReg2_8load & rm_imm_08_11 { build MovMLReg2_8load; loadRegister(r9,r15); } MovMLReg2_10: MovMLReg2_9 is MovMLReg2_9 { r0 = r0; } MovMLReg2_10: rm_imm_08_11 is MovMLReg2_9load & rm_imm_08_11 & rm_08_11=10 { build MovMLReg2_9load; loadRegister(r10,r15); } MovMLReg2_10load: is MovMLReg2_9load & rm_imm_08_11 { build MovMLReg2_9load; loadRegister(r10,r15); } MovMLReg2_11: MovMLReg2_10 is MovMLReg2_10 { r0 = r0; } MovMLReg2_11: rm_imm_08_11 is MovMLReg2_10load & rm_imm_08_11 & rm_08_11=11 { build MovMLReg2_10load; loadRegister(r11,r15); } MovMLReg2_11load: is MovMLReg2_10load & rm_imm_08_11 { build MovMLReg2_10load; loadRegister(r11,r15); } MovMLReg2_12: MovMLReg2_11 is MovMLReg2_11 { r0 = r0; } MovMLReg2_12: rm_imm_08_11 is MovMLReg2_11load & rm_imm_08_11 & rm_08_11=12 { build MovMLReg2_11load; loadRegister(r12,r15); } MovMLReg2_12load: is MovMLReg2_11load & rm_imm_08_11 { build MovMLReg2_11load; loadRegister(r12,r15); } MovMLReg2_13: MovMLReg2_12 is MovMLReg2_12 { r0 = r0; } MovMLReg2_13: rm_imm_08_11 is MovMLReg2_12load & rm_imm_08_11 & rm_08_11=13 { build MovMLReg2_12load; loadRegister(r13,r15); } MovMLReg2_13load: is MovMLReg2_12load & rm_imm_08_11 { build MovMLReg2_12load; loadRegister(r13,r15); } MovMLReg2_14: MovMLReg2_13 is MovMLReg2_13 { r0 = r0; } MovMLReg2_14: rm_imm_08_11 is MovMLReg2_13load & rm_imm_08_11 & rm_08_11=14 { build MovMLReg2_13load; loadRegister(r14,r15); } MovMLReg2_14load: is MovMLReg2_13load & rm_imm_08_11 { build MovMLReg2_13load; loadRegister(r14,r15); } MovMLReg2_15: MovMLReg2_14 is MovMLReg2_14 { r0 = r0; } MovMLReg2_15: rm_imm_08_11 is MovMLReg2_14load & rm_imm_08_11 & rm_08_11=15 { build MovMLReg2_14load; loadRegister(pr,r15); } MovMLReg2: MovMLReg2_15 is MovMLReg2_15 { build MovMLReg2_15; } # MOVML.L @R15+, Rn 0100nnnn11110101 :movml.l @r15+, MovMLReg2 is r15 & opcode_12_15=0b0100 & rn_imm_08_11 & opcode_00_07=0b11110101 & MovMLReg2 { build MovMLReg2; } MovMUReg1_0: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=0 { storeRegister(r0,r15); } MovMUReg1_1: MovMUReg1_0 is MovMUReg1_0 { storeRegister(r1,r15); build MovMUReg1_0; } MovMUReg1_1: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=1 { storeRegister(r1,r15); } MovMUReg1_2: MovMUReg1_1 is MovMUReg1_1 { storeRegister(r2,r15); build MovMUReg1_1; } MovMUReg1_2: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=2 { storeRegister(r2,r15); } MovMUReg1_3: MovMUReg1_2 is MovMUReg1_2 { storeRegister(r3,r15); build MovMUReg1_2; } MovMUReg1_3: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=3 { storeRegister(r3,r15); } MovMUReg1_4: MovMUReg1_3 is MovMUReg1_3 { storeRegister(r4,r15); build MovMUReg1_3; } MovMUReg1_4: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=4 { storeRegister(r4,r15); } MovMUReg1_5: MovMUReg1_4 is MovMUReg1_4 { storeRegister(r5,r15); build MovMUReg1_4; } MovMUReg1_5: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=5 { storeRegister(r5,r15); } MovMUReg1_6: MovMUReg1_5 is MovMUReg1_5 { storeRegister(r6,r15); build MovMUReg1_5; } MovMUReg1_6: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=6 { storeRegister(r6,r15); } MovMUReg1_7: MovMUReg1_6 is MovMUReg1_6 { storeRegister(r7,r15); build MovMUReg1_6; } MovMUReg1_7: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=7 { storeRegister(r7,r15); } MovMUReg1_8: MovMUReg1_7 is MovMUReg1_7 { storeRegister(r8,r15); build MovMUReg1_7; } MovMUReg1_8: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=8 { storeRegister(r8,r15); } MovMUReg1_9: MovMUReg1_8 is MovMUReg1_8 { storeRegister(r9,r15); build MovMUReg1_8; } MovMUReg1_9: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=9 { storeRegister(r9,r15); } MovMUReg1_10: MovMUReg1_9 is MovMUReg1_9 { storeRegister(r10,r15); build MovMUReg1_9; } MovMUReg1_10: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=10 { storeRegister(r10,r15); } MovMUReg1_11: MovMUReg1_10 is MovMUReg1_10 { storeRegister(r11,r15); build MovMUReg1_10; } MovMUReg1_11: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=11 { storeRegister(r11,r15); } MovMUReg1_12: MovMUReg1_11 is MovMUReg1_11 { storeRegister(r12,r15); build MovMUReg1_11; } MovMUReg1_12: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=12 { storeRegister(r12,r15); } MovMUReg1_13: MovMUReg1_12 is MovMUReg1_12 { storeRegister(r13,r15); build MovMUReg1_12; } MovMUReg1_13: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=13 { storeRegister(r13,r15); } MovMUReg1_14: MovMUReg1_13 is MovMUReg1_13 { storeRegister(r14,r15); build MovMUReg1_13; } MovMUReg1_14: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=14 { storeRegister(r14,r15); } MovMUReg1_15: MovMUReg1_14 is MovMUReg1_14 { storeRegister(pr,r15); build MovMUReg1_14; } MovMUReg1_15: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=15 { storeRegister(pr,r15); } MovMUReg1: MovMUReg1_15 is MovMUReg1_15 { build MovMUReg1_15; } # MOVMU.L Rm, @-R15 0100mmmm11110000 :movmu.l MovMUReg1, @-r15 is r15 & opcode_12_15=0b0100 & rm_imm_08_11 & opcode_00_07=0b11110000 & MovMUReg1 { build MovMUReg1; } MovMUReg2_0: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=0 { loadRegister(r0,r15); } MovMUReg2_1: MovMUReg2_0 is MovMUReg2_0 { build MovMUReg2_0; loadRegister(r1,r15); } MovMUReg2_1: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=1 { loadRegister(r1,r15); } MovMUReg2_2: MovMUReg2_1 is MovMUReg2_1 { build MovMUReg2_1; loadRegister(r2,r15); } MovMUReg2_2: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=2 { loadRegister(r2,r15); } MovMUReg2_3: MovMUReg2_2 is MovMUReg2_2 { build MovMUReg2_2; loadRegister(r3,r15); } MovMUReg2_3: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=3 { loadRegister(r3,r15); } MovMUReg2_4: MovMUReg2_3 is MovMUReg2_3 { build MovMUReg2_3; loadRegister(r4,r15); } MovMUReg2_4: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=4 { loadRegister(r4,r15); } MovMUReg2_5: MovMUReg2_4 is MovMUReg2_4 { build MovMUReg2_4; loadRegister(r5,r15); } MovMUReg2_5: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=5 { loadRegister(r5,r15); } MovMUReg2_6: MovMUReg2_5 is MovMUReg2_5 { build MovMUReg2_5; loadRegister(r6,r15); } MovMUReg2_6: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=6 { loadRegister(r6,r15); } MovMUReg2_7: MovMUReg2_6 is MovMUReg2_6 { build MovMUReg2_6; loadRegister(r7,r15); } MovMUReg2_7: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=7 { loadRegister(r7,r15); } MovMUReg2_8: MovMUReg2_7 is MovMUReg2_7 { build MovMUReg2_7; loadRegister(r8,r15); } MovMUReg2_8: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=8 { loadRegister(r8,r15); } MovMUReg2_9: MovMUReg2_8 is MovMUReg2_8 { build MovMUReg2_8; loadRegister(r9,r15); } MovMUReg2_9: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=9 { loadRegister(r9,r15); } MovMUReg2_10: MovMUReg2_9 is MovMUReg2_9 { build MovMUReg2_9; loadRegister(r10,r15); } MovMUReg2_10: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=10 { loadRegister(r10,r15); } MovMUReg2_11: MovMUReg2_10 is MovMUReg2_10 { build MovMUReg2_10; loadRegister(r11,r15); } MovMUReg2_11: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=11 { loadRegister(r11,r15); } MovMUReg2_12: MovMUReg2_11 is MovMUReg2_11 { build MovMUReg2_11; loadRegister(r12,r15); } MovMUReg2_12: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=12 { loadRegister(r12,r15); } MovMUReg2_13: MovMUReg2_12 is MovMUReg2_12 { build MovMUReg2_12; loadRegister(r13,r15); } MovMUReg2_13: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=13 { loadRegister(r13,r15); } MovMUReg2_14: MovMUReg2_13 is MovMUReg2_13 { build MovMUReg2_13; loadRegister(r14,r15); } MovMUReg2_14: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=14 { loadRegister(r14,r15); } MovMUReg2_15: MovMUReg2_14 is MovMUReg2_14 { build MovMUReg2_14; loadRegister(pr,r15); } MovMUReg2_15: rm_imm_08_11 is rm_imm_08_11 & rm_08_11=15 { loadRegister(pr,r15); } MovMUReg2: MovMUReg2_15 is MovMUReg2_15 { build MovMUReg2_15; } # MOVMU.L @R15+, Rn 0100nnnn11110100 :movmu.l @r15+, MovMUReg2 is r15 & opcode_12_15=0b0100 & rn_imm_08_11 & opcode_00_07=0b11110100 & MovMUReg2 { build MovMUReg2; } # MOVRT Rn 0000nnnn00111001 ~ T → Rn :movrt rn_08_11 is opcode_12_15=0b0000 & rn_08_11 & opcode_00_07=0b00111001 { rn_08_11 = zext($(T_FLAG) == 0); } # NOTT 0000000001101000 ~ T → T :nott is opcode_00_15=0b0000000001101000 { $(T_FLAG) = ~$(T_FLAG); } @endif :swap.b rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b1000 { local temp0; local temp1; temp0 = rm_04_07 & 0xFFFF0000; temp1 = (rm_04_07 & 0x000000FF) << 8; rn_08_11 = (rm_04_07 & 0x0000FF00) >> 8; rn_08_11 = rn_08_11 | temp1 | temp0; } :swap.w rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b1001 { local temp; temp = (rm_04_07 >> 16) & 0x0000FFFF; rn_08_11 = rm_04_07 << 16; rn_08_11 = rn_08_11 | temp; } :xtrct rm_04_07,rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b1101 { local high; local low; high = (rm_04_07 << 16) & 0xFFFF0000; low = (rn_08_11 >> 16) & 0x0000FFFF; rn_08_11 = high | low; } # # Arithmetic Operation Instructions # :add rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b1100 { rn_08_11 = rn_08_11 + rm_04_07; } :add simm_00_07,rn_08_11 is opcode_12_15=0b0111 & rn_08_11 & simm_00_07 { rn_08_11 = rn_08_11 + simm_00_07; } :addc rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b1110 { local temp0; local temp1; temp1 = rn_08_11 + rm_04_07; temp0 = rn_08_11; rn_08_11 = temp1 + zext($(T_FLAG)); $(T_FLAG) = (temp0 > temp1) | (temp1 > rn_08_11); } :addv rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b1111 { local dest:1; local src:1; local ans:1; dest = (rn_08_11 s< 0); src = (rm_04_07 s< 0); src = src + dest; rn_08_11 = rn_08_11 + rm_04_07; ans = (rn_08_11 s< 0); ans = ans + dest; $(T_FLAG) = (src == 0 || src == 2) && (ans == 1); } :cmp"/eq" simm_00_07,r0 is r0 & opcode_08_15=0b10001000 & simm_00_07 { $(T_FLAG) = (r0 == simm_00_07); } :cmp"/eq" rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b0000 { $(T_FLAG) = (rn_08_11 == rm_04_07); } :cmp"/hs" rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b0010 { $(T_FLAG) = (rn_08_11 >= rm_04_07); } :cmp"/ge" rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b0011 { $(T_FLAG) = (rn_08_11 s>= rm_04_07); } :cmp"/hi" rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b0110 { $(T_FLAG) = (rn_08_11 > rm_04_07); } :cmp"/gt" rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b0111 { $(T_FLAG) = (rn_08_11 s> rm_04_07); } :cmp"/pl" rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00010101 { $(T_FLAG) = (rn_08_11 s> 0); } :cmp"/pz" rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00010001 { $(T_FLAG) = (rn_08_11 s>= 0); } :cmp"/str" rm_04_07,rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b1100 { local tmp0:1 = (rm_04_07[0,8] == rn_08_11[0,8]); local tmp1:1 = (rm_04_07[8,8] == rn_08_11[8,8]); local tmp2:1 = (rm_04_07[16,8] == rn_08_11[16,8]); local tmp3:1 = (rm_04_07[24,8] == rn_08_11[24,8]); $(T_FLAG) = tmp0 || tmp1 || tmp2 || tmp3; } @if SH_VERSION == "2A" # The pseudo code for clips in the super-h manual looks incorrect, # this solution was contributed by @mumbel :clips.b rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b10010001 { local uppercheck = (rn_08_11 s> 0x7f); local lowercheck = (rn_08_11 s< -0x80); if (!(uppercheck || lowercheck)) goto inst_next; rn_08_11 = (0x0000007f * zext(uppercheck)) + (0xffffff80 * zext(lowercheck)); $(CS_FLAG)=1; } :clips.w rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b10010101 { local uppercheck = (rn_08_11 s> 0x7fff); local lowercheck = (rn_08_11 s< -0x8000); if (!(uppercheck || lowercheck)) goto inst_next; rn_08_11 = (0x00007fff * zext(uppercheck)) + (0xffff8000 * zext(lowercheck)); $(CS_FLAG)=1; } :clipu.b rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b10000001 { if (rn_08_11 <= 0x000000ff) goto ; rn_08_11 = 0x000000ff; $(CS_FLAG) = 1; } :clipu.w rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b10000101 { if (rn_08_11 <= 0x0000ffff) goto ; rn_08_11 = 0x0000ffff; $(CS_FLAG) = 1; } @endif :div0s rm_04_07,rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b0111 { $(Q_FLAG) = rn_08_11 s< 0; $(M_FLAG) = rm_04_07 s< 0; $(T_FLAG) = !($(M_FLAG) == $(Q_FLAG)); } :div0u is opcode_00_15=0b0000000000011001 { $(M_FLAG) = 0; $(Q_FLAG) = 0; $(T_FLAG) = 0; } :div1 rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b0100 { local tmp0:4; local tmp1:1; local tmp2:4; local old_q:1; local old_q_eq_m:1; local m_eq_q:1; old_q = $(Q_FLAG); $(Q_FLAG) = (0x80000000 & rn_08_11) != 0; tmp2 = rm_04_07; rn_08_11 = rn_08_11 << 1; rn_08_11 = rn_08_11 | zext($(T_FLAG)); old_q_eq_m = old_q == $(M_FLAG); m_eq_q = $(M_FLAG) == $(Q_FLAG); tmp0 = rn_08_11; # rn_08_11 = old_q_eq_m ? rn_08_11 - tmp2 : rn_08_11 + tmp2; rn_08_11 = (zext(old_q_eq_m) * (rn_08_11 - tmp2)) + (zext(!old_q_eq_m) * (rn_08_11 + tmp2)); # tmp1 = old_q_eq_m ? rn_08_11 > tmp0 : rn_08_11 < tmp0; tmp1 = (old_q_eq_m * (rn_08_11 > tmp0)) + (!old_q_eq_m * (rn_08_11 < tmp0)); # $(Q_FLAG) = m_eq_q ? tmp1 : tmp1 == 0; $(Q_FLAG) = (m_eq_q * tmp1) + (!m_eq_q & (tmp1 == 0)); $(T_FLAG) = $(Q_FLAG) == $(M_FLAG); } @if SH_VERSION == "2A" :divs r0, rn_08_11 is r0 & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b10010100 { rn_08_11 = rn_08_11 s/ r0; } :divu r0, rn_08_11 is r0 & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b10000100 { rn_08_11 = rn_08_11 / r0; } @endif @if (SH_VERSION == "2") || (SH_VERSION == "2A") :dmuls.l rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b1101 { local a:8 = sext(rn_08_11); local b:8 = sext(rm_04_07); local result:8 = a * b; mach = result(4); macl = result:4; } :dmulu.l rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b0101 { local a:8 = zext(rn_08_11); local b:8 = zext(rm_04_07); local result:8 = a * b; mach = result(4); macl = result:4; } :dt rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00010000 { rn_08_11 = rn_08_11 - 1; $(T_FLAG) = (rn_08_11 == 0); } @endif :exts.b rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b1110 { local temp:1 = rm_04_07:1; rn_08_11 = sext(temp); } :exts.w rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b1111 { local temp:2 = rm_04_07:2; rn_08_11 = sext(temp); } :extu.b rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b1100 { local temp:1 = rm_04_07:1; rn_08_11 = zext(temp); } :extu.w rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b1101 { local temp:2 = rm_04_07:2; rn_08_11 = zext(temp); } @if (SH_VERSION == "2") || (SH_VERSION == "2A") :mac.l @rm_04_07+,@rn_08_11+ is opcode_12_15=0b0000 & rn_08_11 & rm_04_07 & opcode_00_03=0b1111 { # FIXME: review this instruction local RnL; local RnH; local RmL; local RmH; local Res0; local Res1:4; local Res2:4; local temp0; local temp1:4; local temp2:4; local temp3; local tempm:4; local tempn:4; local fnLmL:4; tempn = *:4 rn_08_11; rn_08_11 = rn_08_11 + 4; tempm = *:4 rm_04_07; rm_04_07 = rm_04_07 + 4; fnLmL = -1 * zext((tempn ^ tempm) s<0); if( tempn s>= 0) goto ; tempn = 0 - tempn; if( tempm s>= 0) goto ; tempm = 0 - tempm; temp1 = tempn; temp2 = tempm; RnL = temp1 & 0x0000FFFF; RnH = (temp1 >> 16) & 0x0000FFFF; RmL = temp2 & 0x0000FFFF; RmH = (temp2 >> 16) & 0x0000FFFF; temp0 = RmL * RnL; temp1 = RmH * RnL; temp2 = RmL * RnH; temp3 = RmH * RnH; Res2 = 0; Res1 = temp1 + temp2; if(Res2 >= temp1) goto ; Res2 = Res2 + 0x00010000; temp1 = (Res1 << 16) & 0xFFFF0000; Res0 = temp0 + temp1; Res2 = Res2 + zext(Res0 < temp0); Res2 = Res2 + ((Res1 >> 16) & 0x0000FFFF) + temp3; if(fnLmL s>= 0) goto ; Res2 = ~Res2; if(Res0 == 0) goto ; Res0 = (~Res0) + 1; goto ; Res2 = Res2 + 1; if($(S_FLAG) != 1) goto ; Res0 = macl + Res0; Res2 = Res2 + zext(macl > Res0); Res2 = Res2 + (mach & 0x0000FFFF); if((Res2 s>= 0) || Res2 >= 0xFFFF8000) goto ; Res2 = 0xFFFF8000; Res0 = 0x00000000; if((Res2 s<= 0) || Res2 <= 0x00007FFF) goto ; Res2 = 0x00007FFF; Res0 = 0xFFFFFFFF; mach = (Res2 & 0x0000FFFF) | (mach & 0xFFFF0000); macl = Res0; goto ; Res0 = macl + Res0; Res2 = Res2 + zext(macl > Res0); Res2 = Res2 + mach; mach = Res2; macl = Res0; } @endif :mac.w @rm_04_07+,@rn_08_11+ is opcode_12_15=0b0100 & rn_08_11 & rm_04_07 & opcode_00_03=0b1111 { # FIXME: review this instruction local tempm:4; local tempn:4; local dest; local src:4; local ans; local templ:4; tempn = *:2 rn_08_11; rn_08_11 = rn_08_11 + 2; tempm = *:2 rm_04_07; rm_04_07 = rm_04_07 + 2; templ = macl; tempm = sext(tempn:2) * sext(tempm:2); dest = (macl s< 0); src = zext(1*(tempm s>= 0)); tempn = sext(-1*(tempm s>= 0)); src = src + zext(dest); macl = macl + tempm; ans = (macl s< 0); ans = ans + dest; # if (S == 1) if($(S_FLAG) != 1) goto ; if(ans != 1) goto ; @if SH_VERSION == "1" if(src != 0 && src != 2) goto ; mach = mach | 0x00000001; @endif if(src == 0) goto ; if(src == 2) goto ; goto ; macl = 0x7FFFFFFF; goto ; macl = 0x80000000; goto ; # if (S != 1) mach = mach + tempn; macl = macl + zext(1*(templ s> macl)); @if SH_VERSION == "1" if((mach & 0x00000200) == 0) goto ; mach = mach | 0xFFFFFC00; goto ; mach = mach & 0x000003FF; @endif } @if (SH_VERSION == "2") || (SH_VERSION == "2A") :mul.l rm_04_07,rn_08_11 is opcode_12_15=0b0000 & rn_08_11 & rm_04_07 & opcode_00_03=0b0111 { macl = rn_08_11 * rm_04_07; } @endif @if SH_VERSION == "2A" :mulr r0, rn_08_11 is r0 & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b10000000 { rn_08_11 = r0 * rn_08_11; } @endif :muls.w rm_04_07,rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b1111 { macl = sext(rn_08_11:2) * sext(rm_04_07:2); } :mulu.w rm_04_07,rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b1110 { macl = zext(rn_08_11:2) * zext(rm_04_07:2); } :neg rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b1011 { rn_08_11 = -rm_04_07; } :negc rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b1010 { local temp:4 = -rm_04_07; rn_08_11 = temp - zext($(T_FLAG)); $(T_FLAG) = (0 != temp) || (temp < rn_08_11); } :sub rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b1000 { rn_08_11 = rn_08_11 - rm_04_07; } :subc rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b1010 { local temp0; local temp1; temp1 = rn_08_11 - rm_04_07; temp0 = rn_08_11; rn_08_11 = temp1 - zext($(T_FLAG)); $(T_FLAG) = (temp0 < temp1 || temp1 < rn_08_11); } :subv rm_04_07,rn_08_11 is opcode_12_15=0b0011 & rn_08_11 & rm_04_07 & opcode_00_03=0b1011 { local dest; local src; local ans; dest = (rn_08_11 s< 0); src = (rm_04_07 s< 0); src = src + dest; rn_08_11 = rn_08_11 - rm_04_07; ans = (rn_08_11 s< 0); ans = ans + dest; $(T_FLAG) = (src == 1) && (ans == 1); } # # Logic Operation Instructions # :and rm_04_07,rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b1001 { rn_08_11 = rn_08_11 & rm_04_07; } :and imm_00_07,r0 is r0 & opcode_08_15=0b11001001 & imm_00_07 { r0 = (r0 & zext(imm_00_07:1)); } :and.b imm_00_07,@(r0,gbr) is r0 & gbr & opcode_08_15=0b11001101 & imm_00_07 { local temp:1 = *:1 (gbr + r0); temp = temp & imm_00_07:1; *:1 (gbr + r0) = temp; } :not rm_04_07,rn_08_11 is opcode_12_15=0b0110 & rn_08_11 & rm_04_07 & opcode_00_03=0b0111 { rn_08_11 = ~rm_04_07; } :or rm_04_07,rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b1011 { rn_08_11 = rn_08_11 | rm_04_07; } :or imm_00_07,r0 is r0 & opcode_08_15=0b11001011 & imm_00_07 { r0 = r0 | imm_00_07:4; } :or.b imm_00_07,@(r0,gbr) is r0 & gbr & opcode_08_15=0b11001111 & imm_00_07 { local temp:1 = *:1 (gbr + r0); temp = temp | imm_00_07:1; *:1 (gbr + r0) = temp; } :tas.b @rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00011011 { local temp = *:1 rn_08_11; $(T_FLAG) = (temp == 0); temp = temp | 0x80; *:1 rn_08_11 = temp; } :tst rm_04_07,rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b1000 { $(T_FLAG) = ((rm_04_07 & rn_08_11) == 0); } :tst imm_00_07,r0 is r0 & opcode_08_15=0b11001000 & imm_00_07 { local temp = r0 & (imm_00_07 & 0x000000FF); $(T_FLAG) = (temp == 0); } :tst.b imm_00_07,@(r0,gbr) is r0 & gbr & opcode_08_15=0b11001100 & imm_00_07 { local temp = *:1 (gbr + r0); temp = temp & (imm_00_07 & 0x000000FF); $(T_FLAG) = (temp == 0); } :xor rm_04_07,rn_08_11 is opcode_12_15=0b0010 & rn_08_11 & rm_04_07 & opcode_00_03=0b1010 { rn_08_11 = rn_08_11 ^ rm_04_07; } :xor imm_00_07,r0 is r0 & opcode_08_15=0b11001010 & imm_00_07 { r0 = r0 ^ (imm_00_07 & 0x000000FF); } :xor.b imm_00_07,@(r0,gbr) is r0 & gbr & opcode_08_15=0b11001110 & imm_00_07 { local temp = *:1 (gbr + r0); temp = temp & (imm_00_07 & 0x000000FF); *:1 (gbr + r0) = temp; } # #Shift Instructions # :rotcl rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00100100 { local temp:1; temp = ((rn_08_11 & 0x80000000) != 0); rn_08_11 = (rn_08_11 << 1) | zext($(T_FLAG)); $(T_FLAG) = temp; } :rotcr rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00100101 { local temp:1; temp = !((rn_08_11 & 1) == 0); rn_08_11= (rn_08_11 >> 1) | (0x80000000 * zext($(T_FLAG))); $(T_FLAG) = temp; } :rotl rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00000100 { $(T_FLAG) = ((rn_08_11 & 0x80000000) != 0); rn_08_11 = (rn_08_11 << 1) | zext($(T_FLAG)); } :rotr rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00000101 { $(T_FLAG) = ((rn_08_11 & 0x1) != 0); rn_08_11 = (rn_08_11 >> 1) | (rn_08_11 << 31); } @if SH_VERSION == "2A" :shad rm_04_07, rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & rm_04_07 & opcode_00_03=0b1100 { if (rm_04_07 s> 0) goto ; if (rm_04_07 s<= -32) goto ; # shift right rn_08_11 = rn_08_11 s>> -rm_04_07; goto ; rn_08_11 = rn_08_11 << (rm_04_07 & 0x0000001F); goto ; rn_08_11 = -1 * zext(rn_08_11 s< 0); } @endif :shal rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00100000 { # clear or set T $(T_FLAG) = ((rn_08_11 & 0x80000000) != 0); rn_08_11 = rn_08_11 << 1; } :shar rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00100001 { $(T_FLAG) = ((rn_08_11 & 1) == 1); rn_08_11 = rn_08_11 s>> 1; } @if SH_VERSION == "2A" :shld rm_04_07, rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & rm_04_07 & opcode_00_03=0b1101 { if (rm_04_07 s> 0) goto ; if (rm_04_07 s<= -32) goto ; # shift right rn_08_11 = rn_08_11 >> -rm_04_07; goto ; rn_08_11 = rn_08_11 << (rm_04_07 & 0x0000001F); goto ; rn_08_11 = 0; } @endif :shll rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00000000 { # clear or set T $(T_FLAG) = ((rn_08_11 & 0x80000000) != 0); rn_08_11 = rn_08_11 << 1; } :shll2 rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00001000 { rn_08_11 = rn_08_11 << 2; } :shll8 rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00011000 { rn_08_11 = rn_08_11 << 8; } :shll16 rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00101000 { rn_08_11 = rn_08_11 << 16; } :shlr rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00000001 { # clear or set T $(T_FLAG) = (rn_08_11 & 1) == 1; rn_08_11 = rn_08_11 >> 1; } :shlr2 rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00001001 { rn_08_11 = rn_08_11 >> 2; } :shlr8 rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00011001 { rn_08_11 = rn_08_11 >> 8; } :shlr16 rn_08_11 is opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00101001 { rn_08_11 = rn_08_11 >> 16; } # # Branch Instructions # :bf target00_07 is opcode_08_15=0b10001011 & target00_07 { if ($(T_FLAG) == 0) goto target00_07; } @if (SH_VERSION == "2") || (SH_VERSION == "2A") :bf"/s" target00_07 is opcode_08_15=0b10001111 & target00_07 { local cond = $(T_FLAG); delayslot(1); if (cond==0) goto target00_07; } @endif :bt target00_07 is opcode_08_15=0b10001001 & target00_07 { if ($(T_FLAG) == 1) goto target00_07; } @if (SH_VERSION == "2") || (SH_VERSION == "2A") :bt"/s" target00_07 is opcode_08_15=0b10001101 & target00_07 { local cond = $(T_FLAG); delayslot(1); if (cond==1) goto target00_07; } @endif :bra target00_11 is opcode_12_15=0b1010 & target00_11 { delayslot(1); goto target00_11; } @if (SH_VERSION == "2") || (SH_VERSION == "2A") :braf rm_08_11 is opcode_12_15=0b0000 & rm_08_11 & opcode_00_07=0b00100011 { local dest:4 = inst_start + 4 + rm_08_11; delayslot(1); goto [dest]; } @endif :bsr target00_11 is opcode_12_15=0b1011 & target00_11 { local _pr:4 = inst_start + 4; delayslot(1); pr = _pr; call target00_11; } @if (SH_VERSION == "2") || (SH_VERSION == "2A") :bsrf rm_08_11 is opcode_12_15=0b0000 & rm_08_11 & opcode_00_07=0b00000011 { local _pr = inst_start + 4; local dest = rm_08_11 + inst_start + 4; delayslot(1); pr = _pr; call [dest]; } @endif :jmp @rm_08_11 is opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00101011 { local _pc:4 = rm_08_11; delayslot(1); pc = _pc; goto [pc]; } :jsr @rm_08_11 is opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00001011 { local _pr:4 = inst_start + 4; local _pc:4 = rm_08_11; delayslot(1); pr = _pr; pc = _pc; call [_pc]; } :rts is opcode_00_15=0b0000000000001011 { local _pc = pr; delayslot(1); pc = _pc; return [pc]; } @if SH_VERSION == "2A" # JSR/N @Rm 0100mmmm01001011 PC - 2 → PR, Rm → PC :jsr"/n" @rm_08_11 is opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b01001011 { pr = inst_next; call [rm_08_11]; } # JSR/N @@(disp8, TBR) 10000011dddddddd PC - 2 → PR, (disp×4+TBR) → PC :jsr"/n" @@(disp, tbr) is tbr & opcode_08_15=0b10000011 & disp_00_07 [ disp = disp_00_07*4; ] { pr = inst_next; call [tbr + disp*4]; } # RTS/N 0000000001101011 PR → PC :rts"/n" is opcode_00_15=0b0000000001101011 { return [pr]; } :rtv"/n" rm_08_11 is opcode_12_15=0b0000 & rm_08_11 & opcode_00_07=0b01111011 { r0 = rm_08_11; return [pr]; } @endif # # System Control Instructions # :clrmac is opcode_00_15=0b0000000000101000 { mach = 0; macl = 0; } :clrt is opcode_00_15=0b0000000000001000 { $(T_FLAG) = 0; } @if SH_VERSION == "2A" :ldbank @rm_08_11, r0 is r0 & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b11100101 { local cnt = *:4 (rm_08_11); local bn = (cnt & 0x0000FF80) >> 7; local en = (cnt & 0x0000007C) >> 2; local off = (bn * 80) + en * 4; local rb = &resbank_base + off; r0 = *[register]:4 (rb); } :stbank r0, @rn_08_11 is r0 & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b11100001 { local cnt = *:4 (rn_08_11); local bn = (cnt & 0x0000FF80) >> 7; local en = (cnt & 0x0000007C) >> 2; local off = (bn * 80) + en * 4; local rb = &resbank_base + off; *[register]:4 (rb) = r0; } :resbank is opcode_00_15=0b0000000001011011 { # This can be left as NOP, as it's used for saving/restoring context on interrupts r0 = r0; } @endif :ldc rm_08_11,sr is sr & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00001110 { sr = rm_08_11 & 0x0FFF0FFF; } :ldc.l @rm_08_11+,sr is sr & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00000111 { sr = *rm_08_11 & 0x0FFF0FFF; rm_08_11 = rm_08_11 + 4; } :ldc rm_08_11,gbr is gbr & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00011110 { gbr = rm_08_11; } @if SH_VERSION == "2A" :ldc rm_08_11, tbr is tbr & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b01001010 { tbr = rm_08_11; } @endif :ldc.l @rm_08_11+,gbr is gbr & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00010111 { gbr = *rm_08_11; rm_08_11 = rm_08_11 + 4; } :ldc rm_08_11,vbr is vbr & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00101110 { vbr = rm_08_11; } :ldc.l @rm_08_11+,vbr is vbr & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00100111 { vbr = *rm_08_11; rm_08_11 = rm_08_11 + 4; } :lds rm_08_11,mach is mach & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00001010 { mach = rm_08_11; @if SH_VERSION == "1" # sign extend 10 bit signed value from rm mach = (mach << (32-10)) s>> (32-10); @endif } :lds.l @rm_08_11+,mach is mach & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00000110 { mach = *rm_08_11; @if SH_VERSION == "1" # sign extend 10 bit signed value from rm mach = (mach << (32-10)) s>> (32-10); @endif rm_08_11 = rm_08_11 + 4; } :lds rm_08_11,macl is macl & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00011010 { macl = rm_08_11; } :lds.l @rm_08_11+,macl is macl & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00010110 { macl = *rm_08_11; rm_08_11 = rm_08_11 + 4; } :lds rm_08_11,pr is pr & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00101010 { pr = rm_08_11; } :lds.l @rm_08_11+,pr is pr & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b00100110 { pr = *rm_08_11; rm_08_11 = rm_08_11 + 4; } :nop is opcode_00_15=0b0000000000001001 { # FIXME: intentional nop r0 = r0; # do this to suppress warning } :rte is opcode_00_15=0b0000000000101011 { _pc:4 = *r15; r15 = r15 + 4; _sr:4 = *r15 & 0x000063F3; r15 = r15 + 4; delayslot(1); pc = _pc; sr = _sr; return [pc]; } :sett is opcode_00_15=0b0000000000011000 { $(T_FLAG) = 1; } define pcodeop Sleep_Standby; :sleep is opcode_00_15=0b0000000000011011 { Sleep_Standby(); } :stc sr,rn_08_11 is sr & opcode_12_15=0b0000 & rn_08_11 & opcode_00_07=0b00000010 { rn_08_11 = sr; } :stc.l sr,@-rn_08_11 is sr & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00000011 { rn_08_11 = rn_08_11 -4; *rn_08_11 = sr; } :stc gbr,rn_08_11 is gbr & opcode_12_15=0b0000 & rn_08_11 & opcode_00_07=0b00010010 { rn_08_11 = gbr; } @if SH_VERSION == "2A" :stc tbr,rn_08_11 is tbr & opcode_12_15=0b0000 & rn_08_11 & opcode_00_07=0b01001010 { rn_08_11 = tbr; } @endif :stc.l gbr,@-rn_08_11 is gbr & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00010011 { rn_08_11 = rn_08_11 -4; *rn_08_11 = gbr; } :stc vbr,rn_08_11 is vbr & opcode_12_15=0b0000 & rn_08_11 & opcode_00_07=0b00100010 { rn_08_11 = vbr; } :stc.l vbr,@-rn_08_11 is vbr & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00100011 { rn_08_11 = rn_08_11 -4; *rn_08_11 = vbr; } :sts mach,rn_08_11 is mach & opcode_12_15=0b0000 & rn_08_11 & opcode_00_07=0b00001010 { rn_08_11 = mach; @if SH_VERSION == "1" # sign extend 10 bit signed value from rm rn_08_11 = (rn_08_11 << (32-10)) s>> (32-10); @endif } :sts.l mach,@-rn_08_11 is mach & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00000010 { rn_08_11 = rn_08_11 - 4; local temp = mach; @if SH_VERSION == "1" # sign extend 10 bit signed value from rm temp = (temp << (32-10)) s>> (32-10); @endif *rn_08_11 = temp; } :sts macl,rn_08_11 is macl & opcode_12_15=0b0000 & rn_08_11 & opcode_00_07=0b00011010 { rn_08_11 = macl; } :sts.l macl,@-rn_08_11 is macl & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00010010 { rn_08_11 = rn_08_11 -4; *rn_08_11 = macl; } :sts pr,rn_08_11 is pr & opcode_12_15=0b0000 & rn_08_11 & opcode_00_07=0b00101010 { rn_08_11 = pr; } :sts.l pr,@-rn_08_11 is pr & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b00100010 { rn_08_11 = rn_08_11 -4; *rn_08_11 = pr; } :trapa imm_00_07 is opcode_08_15=0b11000011 & imm_00_07 { r15 = r15 - 4; *r15 = sr; r15 = r15 - 4; dest:4 = inst_next; *r15 = dest; dest = *(vbr + (imm_00_07 * 4)); call [dest]; } @if defined(FPU) # # Floating-Point Instructions # # FABS FRn 1111nnnn01011101 |FRn| → FRn :fabs ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & fop_00_07=0b01011101 { ffrn_08_11 = abs(ffrn_08_11); } # TODO: FABS DRn 1111nnn001011101 |DRn| → DRn # FADD FRm, FRn 1111nnnnmmmm0000 FRn + FRm → FRn :fadd ffrm_04_07, ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & ffrm_04_07 & fop_00_03=0b0000 { ffrn_08_11 = ffrn_08_11 f+ ffrm_04_07; } # TODO: FADD DRm, DRn 1111nnn0mmm00000 DRn + DRm → DRn # FCMP/EQ FRm, FRn 1111nnnnmmmm0100 (FRn=FRm)? 1:0 → T :fcmp"/eq" ffrm_04_07, ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & ffrm_04_07 & fop_00_03=0b0100 { $(T_FLAG) = (ffrn_08_11 f== ffrm_04_07); } # TODO: FCMP/EQ DRm, DRn 1111nnn0mmm00100 (DRn=DRm)? 1:0 → T # FCMP/GT FRm, FRn 1111nnnnmmmm0101 (FRn>FRm)? 1:0 → T :fcmp"/gt" ffrm_04_07, ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & ffrm_04_07 & fop_00_03=0b0101 { $(T_FLAG) = (ffrn_08_11 f> ffrm_04_07); } # TODO: FCMP/GT DRm, DRn 1111nnn0mmm00101 (DRn>DRm)? 1:0 → T # FCNVDS DRm, FPUL 1111mmm010111101 (float) DRm → FPUL :fcnvds fdrm_09_11, fpul is fpul & fop_12_15=0b1111 & fdrm_09_11 & fop_08_08=0b0 & fop_00_07=0b10111101 { fpul = float2float(fdrm_09_11); } # FCNVSD FPUL, DRn 1111nnn010101101 (double) FPUL → DRn :fcnvsd fpul, fdrn_09_11 is fpul & fop_12_15=0b1111 & fdrn_09_11 & fop_08_08=0b0 & fop_00_07=0b10101101 { fdrn_09_11 = float2float(fpul); } # FDIV FRm, FRn 1111nnnnmmmm0011 FRn/FRm → FRn :fdiv ffrm_04_07, ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & ffrm_04_07 & fop_00_03=0b0011 { ffrn_08_11 = ffrn_08_11 f/ ffrm_04_07; } # TODO: FDIV DRm, DRn 1111nnn0mmm00011 DRn/DRm → DRn # FLDI0 FRn 1111nnnn10001101 0 × 00000000 → FRn :fldi0 ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & fop_00_07=0b10001101 { ffrn_08_11 = 0x00000000; } # FLDI1 FRn 1111nnnn10011101 0 × 3F800000 → FRn :fldi1 ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & fop_00_07=0b10011101 { ffrn_08_11 = 0x3F800000; } # FLDS FRm, FPUL 1111mmmm00011101 FRm → FPUL :flds ffrm_08_11, fpul is fpul & fop_12_15=0b1111 & ffrm_08_11 & fop_00_07=0b00011101 { fpul = ffrm_08_11; } # FLOAT FPUL,FRn 1111nnnn00101101 (float) FPUL → FRn :float fpul, ffrn_08_11 is fpul & fop_12_15=0b1111 & ffrn_08_11 & fop_00_07=0b00101101 { ffrn_08_11 = int2float(fpul); } # TODO: FLOAT FPUL,DRn 1111nnn000101101 (double) FPUL → DRn # FMAC FR0, FRm, FRn 1111nnnnmmmm1110 FR0 × FRm + FRn → FRn :fmac fr0, ffrm_04_07, ffrn_08_11 is fr0 & fop_12_15=0b1111 & ffrn_08_11 & ffrm_04_07 & fop_00_03=0b1110 { ffrn_08_11 = ffrn_08_11 f+ (ffrm_04_07 f* fr0); } # FMOV FRm, FRn 1111nnnnmmmm1100 FRm → FRn :fmov ffrm_04_07, ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & ffrm_04_07 & fop_00_03=0b1100 { ffrn_08_11 = ffrm_04_07; } # TODO: FMOV DRm, DRn 1111nnn0mmm01100 DRm → DRn # FMOV.S @(R0, Rm), FRn 1111nnnnmmmm0110 (R0+Rm) → FRn :fmov.s @(r0, f_rm_04_07), ffrn_08_11 is r0 & fop_12_15=0b1111 & ffrn_08_11 & f_rm_04_07 & fop_00_03=0b0110 { ffrn_08_11 = *:4 (r0 + f_rm_04_07); } # TODO: FMOV.D @(R0, Rm), DRn 1111nnn0mmmm0110 (R0+Rm) → DRn # FMOV.S @Rm+, FRn 1111nnnnmmmm1001 (Rm) → FRn, Rm+ = 4 :fmov.s @f_rm_04_07+, ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & f_rm_04_07 & fop_00_03=0b1001 { ffrn_08_11 = *:4 (f_rm_04_07); f_rm_04_07 = f_rm_04_07 + 4; } # TODO: FMOV.D @Rm+, DRn 1111nnn0mmmm1001 (Rm) → DRn, Rm+ = 8 # FMOV.S @Rm, FRn 1111nnnnmmmm1000 (Rm) → FRn :fmov.s @f_rm_04_07, ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & f_rm_04_07 & fop_00_03=0b1000 { ffrn_08_11 = *:4 (f_rm_04_07); } # TODO: FMOV.D @Rm, DRn 1111nnn0mmmm1000 (Rm) → DRn # FMOV.S @(disp12,Rm),FRn 0011nnnnmmmm0001 0111dddddddddddd (disp×4+Rm) → FRn :fmov.s @(disp12, lf_rm_20_23), lffrn_24_27 is lfop_28_31=0b0011 & lffrn_24_27 & lf_rm_20_23 & lfop_12_19=0b00010111 & lfdisp_00_11 [ disp12 = lfdisp_00_11 * 4; ] { lffrn_24_27 = *:4 (disp12 + lf_rm_20_23); } # TODO: FMOV.D @(disp12,Rm),DRn 0011nnn0mmmm0001 0111dddddddddddd (disp×8+Rm) → DRn # FMOV.S FRm, @( R0,Rn ) 1111nnnnmmmm0111 FRm → (R0+Rn) :fmov.s ffrm_04_07, @( r0, f_rn_08_11 ) is r0 & fop_12_15=0b1111 & f_rn_08_11 & ffrm_04_07 & fop_00_03=0b0111 { *:4 (f_rn_08_11 + r0) = ffrm_04_07; } # TODO: FMOV.D DRm, @( R0,Rn ) 1111nnnnmmm00111 DRm → (R0+Rn) # FMOV.S FRm, @-Rn 1111nnnnmmmm1011 Rn- = 4, FRm → (Rn) :fmov.s ffrm_04_07, @-f_rn_08_11 is fop_12_15=0b1111 & f_rn_08_11 & ffrm_04_07 & fop_00_03=0b1011 { f_rn_08_11 = f_rn_08_11 - 4; *:4 (f_rn_08_11) = ffrm_04_07; } # TODO: FMOV.D DRm, @-Rn 1111nnnnmmm01011 Rn- = 8, DRm → (Rn) # FMOV.S FRm, @Rn 1111nnnnmmmm1010 FRm → (Rn) :fmov.s ffrm_04_07, @f_rn_08_11 is fop_12_15=0b1111 & f_rn_08_11 & ffrm_04_07 & fop_00_03=0b1010 { *:4 (f_rn_08_11) = ffrm_04_07; } # TODO: FMOV.D DRm, @Rn 1111nnnnmmm01010 DRm → (Rn) # FMOV.S FRm, @(disp12,Rn) 0011nnnnmmmm0001 0011dddddddddddd FRm → (disp×4+Rn) :fmov.s lffrm_20_23, @(disp12, lf_rn_24_27) is lfop_28_31=0b0011 & lf_rn_24_27 & lffrm_20_23 & lfop_12_19=0b00010011 & lfdisp_00_11 [ disp12 = lfdisp_00_11 * 4; ] { *:4 (disp12 + lf_rn_24_27) = lffrm_20_23; } # TODO: FMOV.D DRm, @(disp12,Rn) 0011nnnnmmm000010 011dddddddddddd DRm → (disp×8+Rn) # FMUL FRm, FRn 1111nnnnmmmm0010 FRn × FRm → FRn :fmul ffrm_04_07, ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & ffrm_04_07 & fop_00_03=0b0010 { ffrn_08_11 = ffrn_08_11 f* ffrm_04_07; } # TODO: FMUL DRm, DRn 1111nnn0mmm00010 DRn × DRm → DRn # FNEG FRn 1111nnnn01001101 -FRn → FRn :fneg ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & fop_00_07=0b01001101 { ffrn_08_11 = f- ffrn_08_11; } # TODO: FNEG DRn 1111nnn001001101 -DRn → DRn # FSCHG 1111001111111101 FPSCR.SZ = ~ FPSCR.SZ :fschg is fop_00_15=0b1111001111111101 { $(FP_SZ) = ~ $(FP_SZ); } # FSQRT FRn 1111nnnn01101101 √FRn → FRn :fsqrt ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & fop_00_07=0b01101101 { ffrn_08_11 = sqrt(ffrn_08_11); } # TODO: FSQRT DRn 1111nnn001101101 √DRn → DRn # FSTS FPUL, FRn 1111nnnn00001101 FPUL → FRn :fsts fpul, ffrn_08_11 is fpul & fop_12_15=0b1111 & ffrn_08_11 & fop_00_07=0b00001101 { ffrn_08_11 = fpul; } # FSUB FRm, FRn 1111nnnnmmmm0001 FRn - FRm → FRn :fsub ffrm_04_07, ffrn_08_11 is fop_12_15=0b1111 & ffrn_08_11 & ffrm_04_07 & fop_00_03=0b0001 { ffrn_08_11 = ffrn_08_11 f- ffrm_04_07; } # TODO: FSUB DRm, DRn 1111nnn0mmm00001 DRn - DRm → DRn # FTRC FRm, FPUL 1111mmmm00111101 (long) FRm → FPUL :ftrc ffrm_08_11, fpul is fpul & fop_12_15=0b1111 & ffrm_08_11 & fop_00_07=0b00111101 { fpul = trunc(ffrm_08_11); } # FTRC DRm, FPUL 1111mmm000111101 (long) DRm → FPUL @endif @if defined(FPU) # # FPU-related CPU Instructions # # LDS Rm,FPSCR 0100mmmm01101010 Rm → FPSCR :lds rm_08_11, fpscr is fpscr & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b01101010 { fpscr = rm_08_11; } # LDS Rm,FPUL 0100mmmm01011010 Rm → FPUL :lds rm_08_11, fpul is fpul & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b01011010 { fpul = rm_08_11; } # LDS.L @Rm+, FPSCR 0100mmmm01100110 (Rm) → FPSCR, Rm+ = 4 :lds.l @rm_08_11+, fpscr is fpscr & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b01100110 { fpscr = *:4 (rm_08_11); rm_08_11 = rm_08_11 + 4; } # LDS.L @Rm+, FPUL 0100mmmm01010110 (Rm) → FPUL, Rm+ = 4 :lds.l @rm_08_11+, fpul is fpul & opcode_12_15=0b0100 & rm_08_11 & opcode_00_07=0b01010110 { fpul = *:4 (rm_08_11); rm_08_11 = rm_08_11 + 4; } # STS FPSCR, Rn 0000nnnn01101010 FPSCR → Rn :sts fpscr, rn_08_11 is fpscr & opcode_12_15=0b0000 & rn_08_11 & opcode_00_07=0b01101010 { rn_08_11 = fpscr; } # STS FPUL,Rn 0000nnnn01011010 FPUL → Rn :sts fpul, rn_08_11 is fpul & opcode_12_15=0b0000 & rn_08_11 & opcode_00_07=0b01011010 { rn_08_11 = fpul; } # STS.L FPSCR,@-Rn 0100nnnn01100010 Rn- = 4, fpscr → (Rn) :sts.l fpscr, @-rn_08_11 is fpscr & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b01100010 { rn_08_11 = rn_08_11 - 4; *:4 (rn_08_11) = fpscr; } # STS.L FPUL,@-Rn 0100nnnn01010010 Rn- = 4, FPUL → (Rn) :sts.l fpul, @-rn_08_11 is fpul & opcode_12_15=0b0100 & rn_08_11 & opcode_00_07=0b01010010 { rn_08_11 = rn_08_11 - 4; *:4 (rn_08_11) = fpul; } # @if defined(FPU) @endif @if SH_VERSION == "2A" # # Bit Manipulation Instructions # # The BAND.B, BOR.B, and BXOR.B instructions perform logical operations between a bit in # memory and the T bit, and store the result in the T bit. The BCLR.B and BSET.B instructions # manipulate a bit in memory. The BST.B and BLD.B instructions execute a transfer between a bit # in memory and the T bit. The BANDNOT.B and BORNOT.B instructions perform logical # operations between the value resulting from inverting a bit in memory and the T bit, and store the # result in the T bit. The BLDNOT.B instruction inverts a bit in memory and stores the result in the # T bit. Bits other than the specified bit are not affected. # # BAND.B #imm3, @(disp12,Rn) 0011nnnn0iii1001 0100dddddddddddd (imm of (disp+Rn)) & T → T :band.b "#"l_imm3_20_22 @(l_disp_00_11, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_opcode_23_23=0b0 & l_imm3_20_22 & l_opcode_16_19=0b1001 & l_opcode_12_15=0b0100 & l_disp_00_11 { local b = *:1 (l_disp_00_11 + l_rn_24_27); $(T_FLAG) = ((b & (1 << l_imm3_20_22)) & $(T_FLAG) != 0); } # BANDNOT.B #imm3, @(disp12,Rn) 0011nnnn0iii1001 1100dddddddddddd ~ (imm of (disp+Rn)) & T → T :bandnot.b "#"l_imm3_20_22, @(l_disp_00_11, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_opcode_23_23=0b0 & l_imm3_20_22 & l_opcode_16_19=0b1001 & l_opcode_12_15=0b1100 & l_disp_00_11 { local b = *:1 (l_disp_00_11 + l_rn_24_27); $(T_FLAG) = ((~ (b & (1 << l_imm3_20_22)) & $(T_FLAG)) != 0); } # BCLR.B #imm3, @(disp12,Rn) 0011nnnn0iii1001 0000dddddddddddd 0 → (imm of (disp+Rn)) :bclr.b "#"l_imm3_20_22, @(l_disp_00_11, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_opcode_23_23=0b0 & l_imm3_20_22 & l_opcode_16_19=0b1001 & l_opcode_12_15=0b0000 & l_disp_00_11 { local addr = l_disp_00_11 + l_rn_24_27; local b = *:1 (addr); *:1 (addr) = b & (~(1 << l_imm3_20_22)); } # BCLR #imm3, Rn 10000110nnnn0iii 0 → imm of Rn :bclr "#"imm3_00_02, rn_04_07 is opcode_08_15=0b10000110 & rn_04_07 & opcode_03_03=0b0 & imm3_00_02 { rn_04_07 = rn_04_07 & (~(1 << imm3_00_02)); } # BLD.B #imm3, @(disp12,Rn) 0011nnnn0iii1001 0011dddddddddddd (imm of (disp+Rn)) → T :bld.b "#"l_imm3_20_22, @(l_disp_00_11, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_opcode_23_23=0b0 & l_imm3_20_22 & l_opcode_16_19=0b1001 & l_opcode_12_15=0b0011 & l_disp_00_11 { local b = *:1 (l_disp_00_11 + l_rn_24_27); $(T_FLAG) = ((b & (1 << l_imm3_20_22)) != 0); } # BLD #imm3, Rn 10000111nnnn1iii imm of Rn → T :bld "#"imm3_00_02, rn_04_07 is opcode_08_15=0b10000111 & rn_04_07 & opcode_03_03=0b1 & imm3_00_02 { local b = rn_04_07; $(T_FLAG) = ((b & (1 << imm3_00_02)) != 0); } # BLDNOT.B #imm3, @(disp12,Rn) 0011nnnn0iii1001 1011dddddddddddd ~ (imm of (disp+Rn)) → T :bldnot.b "#"l_imm3_20_22, @(l_disp_00_11, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_opcode_23_23=0b0 & l_imm3_20_22 & l_opcode_16_19=0b1001 & l_opcode_12_15=0b1011 & l_disp_00_11 { local b = *:1 (l_disp_00_11 + l_rn_24_27); $(T_FLAG) = ((b & (1 << l_imm3_20_22)) == 0); } # BOR.B #imm3, @(disp12,Rn) 0011nnnn0iii1001 0101dddddddddddd (imm of (disp+ Rn)) | T → T :bor.b "#"l_imm3_20_22, @(l_disp_00_11, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_opcode_23_23=0b0 & l_imm3_20_22 & l_opcode_16_19=0b1001 & l_opcode_12_15=0b0101 & l_disp_00_11 { local b = *:1 (l_disp_00_11 + l_rn_24_27); local abit = b & (1 << l_imm3_20_22); $(T_FLAG) = $(T_FLAG) | (abit != 0); } # BORNOT.B #imm3, @(disp12,Rn) 0011nnnn0iii1001 1101dddddddddddd ~ (imm of (disp+ Rn)) | T → T :bornot.b "#"l_imm3_20_22, @(l_disp_00_11, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_opcode_23_23=0b0 & l_imm3_20_22 & l_opcode_16_19=0b1001 & l_opcode_12_15=0b1101 & l_disp_00_11 { local b = *:1 (l_disp_00_11 + l_rn_24_27); local abit = b & (1 << l_imm3_20_22); $(T_FLAG) = $(T_FLAG) | (abit == 0); } # BSET.B #imm3, @(disp12,Rn) 0011nnnn0iii1001 0001dddddddddddd 1 → (imm of (disp+Rn)) :bset.b "#"l_imm3_20_22, @(l_disp_00_11, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_opcode_23_23=0b0 & l_imm3_20_22 & l_opcode_16_19=0b1001 & l_opcode_12_15=0b0001 & l_disp_00_11 { local b = *:1 (l_disp_00_11 + l_rn_24_27); local newb = b | (1 << l_imm3_20_22); *:1 (l_disp_00_11 + l_rn_24_27) = newb; } # BSET #imm3, Rn 10000110nnnn1iii 1 → imm of Rn :bset "#"imm3_00_02, rn_04_07 is opcode_08_15=0b10000110 & rn_04_07 & opcode_03_03=0b1 & imm3_00_02 { rn_04_07 = rn_04_07 | (1 << imm3_00_02); } # BST.B #imm3 ,@(disp12,Rn) 0011nnnn0iii1001 0010dddddddddddd T → (imm of (disp+Rn)) :bst.b "#"l_imm3_20_22, @(l_disp_00_11, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_opcode_23_23=0b0 & l_imm3_20_22 & l_opcode_16_19=0b1001 & l_opcode_12_15=0b0010 & l_disp_00_11 { local b = *:1 (l_disp_00_11 + l_rn_24_27); local ibit = 1 << l_imm3_20_22; b = (b | ibit) * ($(T_FLAG) != 0) + (b & (~ibit)) * ($(T_FLAG) == 0); *:1 (l_disp_00_11 + l_rn_24_27) = b; } # BST #imm3, Rn 10000111nnnn0iii T → imm of Rn :bst "#"imm3_00_02, rn_04_07 is opcode_08_15=0b10000111 & rn_04_07 & opcode_03_03=0b0 & imm3_00_02 { local ibit = 1 << imm3_00_02; rn_04_07 = (rn_04_07 | ibit) * zext($(T_FLAG) != 0) + (rn_04_07 & (~ibit)) * zext($(T_FLAG) == 0); } # BXOR.B #imm3, @(disp12, Rn) 0011nnnn0iii1001 0110dddddddddddd (imm of (disp+ Rn)) ^ T → T :bxor.b "#"l_imm3_20_22, @(l_disp_00_11, l_rn_24_27) is l_opcode_28_31=0b0011 & l_rn_24_27 & l_opcode_23_23=0b0 & l_imm3_20_22 & l_opcode_16_19=0b1001 & l_opcode_12_15=0b0110 & l_disp_00_11 { # extract bit to test local b = *:1 (l_rn_24_27 + l_disp_00_11); local abit = (b >> l_imm3_20_22) & 1; $(T_FLAG) = $(T_FLAG) ^ abit; } @endif ================================================ FILE: pypcode/processors/SuperH/data/languages/superh2a.cspec ================================================ ================================================ FILE: pypcode/processors/SuperH4/data/languages/SuperH4.ldefs ================================================ SuperH-4(a) (SH4) big endian SuperH-4(a) (SH4) little endian ================================================ FILE: pypcode/processors/SuperH4/data/languages/SuperH4.opinion ================================================ ================================================ FILE: pypcode/processors/SuperH4/data/languages/SuperH4.pspec ================================================ ================================================ FILE: pypcode/processors/SuperH4/data/languages/SuperH4.sinc ================================================ # This module defines SuperH version 4, but should work against versions 1,2, and 3. # There is a SuperH version 4A (which has 4 byte instruction length) which has instructions incompatable # with this. # Based on "Renesas SH-4 Software Manual: Rev 6.00 2006.09 (i.e. rej09b0318_sh_4sm.pdf) # Here's a nice webpage with all the insns clearly shown # http://shared-ptr.com/sh_insns.html # Address Space: # SuperH 4A has a 29-bit and a 32-bit address space mode, # and can do 32-bit addressing in 29-bit mode with virtual addressing. # The SH4 has a 29-bit physical address space, but can do 32-bit with virtual addressing. # # WARNING: # WARNING: # WARNING: Currently set up for 29-bit only for computed jumps/calls. This needs to be configurable. # WARNING: # NOTE: SuperH 4 and floating point disassembly and decompiling precision. # Many of the floating point instructions can do either single or double precision calculations depending # on a flag FPSCR_PR at runtime. This means at disassembly stage we don't have all the information required # determine the arguments to some floating point instructions. # NOTE: SuperH 4 and floating point disassembly and decompiling (fmov lengths). # Some of the instructions can read and write floating point values in 4 or 8 byte values depending on the # flag FPSCR_SZ. This means at disassembly stage we don't have all the information required to determine the # arguments to some floating point instructions. # Note that the FPSCR_PR flag is never examined during any of the fmov insns. # NOTE: SuperH and banking # When the RB flag is not set R1->R7 come from bank0 and when RB is set R1->R7 come from bank1. The RB mode is # set in privileged mode. We don't currently simulate this behavior. # NOTE: SuperH and floating point banking # When the flag FPSCR_FR is not set fr0->fr15 come from bank0 and when FPSCR.FR is set fr0->fr15 come from bank1. # We don't currently simulate this behavior. # NOTE: SuperH and memory map registers # There are 7 32bit control registers (SR, GBR, SSR, SPC, SGR, DBR, VBR) these are mapped to 2 different memory # areas 0x1C000000 and 0xFC000000. We don't currently simulate this behavior. # NOTE: SuperH/Renesas return value convention. # Renesas and gcc return most values from a function in r0 but floats are return in fr0 and doubles in dr0. # Ghidra calling spec has no way of specifying this behavior so such return values are not handled correctly. # NOTE: SuperH/Renesas calling convention # Renesas and gcc pass most values to a function via r4-r7 but floats are passed by fr4-fr7 and doubles in # dr4, dr6, dr8, and dr10. # NOTE: floating point errors # In implementing the floating point pcode we ignored many of the possible error conditions floating point # could cause. This allows us to produce much better looking decompiled code. # NOTE: SuperH 4 Memory model # SuperH 4 has a Memory Management Unit (i.e. MMU). Which means that depending on mode the memory can # look very different. We don't model this behavior. We also don't simulate the MMU address translation, # so all addresses are raw. # Basic ================================================================================ define endian=$(ENDIAN); # Defined in file that includes this file define alignment=2; define space ram type=ram_space size=4 default; define space register type=register_space size=4; # Registers ============================================================================ # TODO deal with the 2 banks of registers # TODO move os corret memory address define register offset=0 size=4 [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 R0_BANK R1_BANK R2_BANK R3_BANK R4_BANK R5_BANK R6_BANK R7_BANK ]; # This is where "BANK0" is with RB=0 (normal), causes duplicate pair error in SLEIGH #define register offset=0 size=4 [ # R0_BANK0 R1_BANK0 R2_BANK0 R3_BANK0 R4_BANK0 R5_BANK0 R6_BANK0 R7_BANK0 #]; # This is where "BANK1" is with RB=1 (privileged), causes duplicate pair error in SLEIGH #define register offset=0 size=4 [ # R0_BANK1 R1_BANK1 R2_BANK1 R3_BANK1 R4_BANK1 R5_BANK1 R6_BANK1 R7_BANK1 #]; @if ENDIAN == "big" define register offset=512 size=4 [ fr0 fr1 fr2 fr3 fr4 fr5 fr6 fr7 fr8 fr9 fr10 fr11 fr12 fr13 fr14 fr15 xf0 xf1 xf2 xf3 xf4 xf5 xf6 xf7 xf8 xf9 xf10 xf11 xf12 xf13 xf14 xf15 ]; @endif @if ENDIAN == "little" define register offset=512 size=4 [ fr1 fr0 fr3 fr2 fr5 fr4 fr7 fr6 fr9 fr8 fr11 fr10 fr13 fr12 fr15 fr14 xf0 xf1 xf2 xf3 xf4 xf5 xf6 xf7 xf8 xf9 xf10 xf11 xf12 xf13 xf14 xf15 ]; @endif define register offset=512 size=8 [ dr0 dr2 dr4 dr6 dr8 dr10 dr12 dr14 xd0 xd2 xd4 xd6 xd8 xd10 xd12 xd14 ]; define register offset=512 size=16 [ fv0 fv4 fv8 fv12 ]; # Control registers define register offset=1024 size=4 [ GBR SR SSR SPC VBR SGR DBR ]; # SR component register fields (pseudo) define register offset=1536 size=1 [ MD RB BL FD M Q IMASK S T ]; # System registers define register offset=2048 size=4 [ MACH MACL PR PC FPSCR FPUL ]; # FPSCR component register fields (pseudo) define register offset=2560 size=1 [ FPSCR_RM FPSCR_FLAG FPSCR_ENABLE FPSCR_CAUSE FPSCR_DN FPSCR_PR FPSCR_SZ FPSCR_FR ]; @define T_FLAG "T" @define S_FLAG "S" @define IMASK "IMASK" @define Q_FLAG "Q" @define M_FLAG "M" @define FD_FLAG "FD" @define BL_FLAG "BL" @define RB_FLAG "RB" @define MD_FLAG "MD" @define FPSCR_RM "FPSCR_RM" @define FPSCR_FLAG "FPSCR_FLAG" @define FPSCR_ENABLE "FPSCR_ENABLE" @define FPSCR_CAUSE "FPSCR_CAUSE" @define FPSCR_DN "FPSCR_DN" @define FPSCR_PR "FPSCR_PR" @define FPSCR_SZ "FPSCR_SZ" @define FPSCR_FR "FPSCR_FR" # # SR pack and unpack support # # 0000 0000 0000 0000 0000 0000 0000 0001 @define T_MASK "0x00000001" # 0000 0000 0000 0000 0000 0000 0000 0010 @define S_MASK "0x00000002" # 0000 0000 0000 0000 0000 0000 1111 0000 @define IMASK_MASK "0x000000F0" # 0000 0000 0000 0000 0000 0001 0000 0000 @define Q_MASK "0x00000100" # 0000 0000 0000 0000 0000 0010 0000 0000 @define M_MASK "0x00000200" # 0000 0000 0000 0000 1000 0000 0000 0000 @define FD_MASK "0x00008000" # 0001 0000 0000 0000 0000 0000 0000 0000 @define BL_MASK "0x10000000" # 0010 0000 0000 0000 0000 0000 0000 0000 @define RB_MASK "0x20000000" # 0100 0000 0000 0000 0000 0000 0000 0000 @define MD_MASK "0x40000000" @define T_SHIFT " 0" @define S_SHIFT " 1" @define IMASK_SHIFT " 4" @define Q_SHIFT " 8" @define M_SHIFT " 9" @define FD_SHIFT "15" @define BL_SHIFT "28" @define RB_SHIFT "29" @define MD_SHIFT "30" macro genSRregister() { SR = (zext(T) << $(T_SHIFT)) | (zext(S) << $(S_SHIFT)) | (zext(IMASK) << $(IMASK_SHIFT)) | (zext(Q) << $(Q_SHIFT)) | (zext(M) << $(M_SHIFT)) | (zext(FD) << $(FD_SHIFT)) | (zext(BL) << $(BL_SHIFT)) | (zext(RB) << $(RB_SHIFT)) | (zext(MD) << $(MD_SHIFT)); } macro splitSRregister() { splitTemp:4 = (SR & $(T_MASK)) >> $(T_SHIFT); T = splitTemp:1; splitTemp = (SR & $(S_MASK)) >> $(S_SHIFT); S = splitTemp:1; splitTemp = (SR & $(IMASK_MASK)) >> $(IMASK_SHIFT); IMASK = splitTemp:1; splitTemp = (SR & $(Q_MASK)) >> $(Q_SHIFT); Q = splitTemp:1; splitTemp = (SR & $(M_MASK)) >> $(M_SHIFT); M = splitTemp:1; splitTemp = (SR & $(FD_MASK)) >> $(FD_SHIFT); FD = splitTemp:1; splitTemp = (SR & $(BL_MASK)) >> $(BL_SHIFT); BL = splitTemp:1; splitTemp = (SR & $(RB_MASK)) >> $(RB_SHIFT); RB = splitTemp:1; splitTemp = (SR & $(MD_MASK)) >> $(MD_SHIFT); MD = splitTemp:1; } # # FPSCR pack and unpack support # @define FPSCR_RM_SHIFT " 0" @define FPSCR_FLAG_SHIFT " 2" @define FPSCR_ENABLE_SHIFT " 7" @define FPSCR_CAUSE_SHIFT "12" @define FPSCR_DN_SHIFT "18" @define FPSCR_PR_SHIFT "19" @define FPSCR_SZ_SHIFT "20" @define FPSCR_FR_SHIFT "21" # 0000 0000 0000 0000 0000 0000 0000 0011 @define FPSCR_RM_MASK "0x00000003" # 0000 0000 0000 0000 0000 0000 0111 1100 @define FPSCR_FLAG_MASK "0x0000007C" # 0000 0000 0000 0000 0000 1111 1000 0000 @define FPSCR_ENABLE_MASK "0x00000F80" # 0000 0000 0000 0011 1111 0000 0000 0000 @define FPSCR_CAUSE_MASK "0x0003F000" # 0000 0000 0000 0100 0000 0000 0000 0000 @define FPSCR_DN_MASK "0x00040000" # 0000 0000 0000 1000 0000 0000 0000 0000 @define FPSCR_PR_MASK "0x00080000" # 0000 0000 0001 0000 0000 0000 0000 0000 @define FPSCR_SZ_MASK "0x00100000" # 0000 0000 0010 0000 0000 0000 0000 0000 @define FPSCR_FR_MASK "0x00200000" # Bits 22-31 are not used macro genFPSCRregister() { FPSCR = (zext(FPSCR_RM) << $(FPSCR_RM_SHIFT)) | (zext(FPSCR_FLAG) << $(FPSCR_FLAG_SHIFT)) | (zext(FPSCR_ENABLE) << $(FPSCR_ENABLE_SHIFT)) | (zext(FPSCR_CAUSE) << $(FPSCR_CAUSE_SHIFT)) | (zext(FPSCR_DN) << $(FPSCR_DN_SHIFT)) | (zext(FPSCR_PR) << $(FPSCR_PR_SHIFT)) | (zext(FPSCR_SZ) << $(FPSCR_SZ_SHIFT)) | (zext(FPSCR_FR) << $(FPSCR_FR_SHIFT)); } macro splitFPSCRregister() { splitTemp:4 = (FPSCR & $(FPSCR_RM_MASK)) >> $(FPSCR_RM_SHIFT); FPSCR_RM = splitTemp:1; splitTemp = (FPSCR & $(FPSCR_FLAG_MASK)) >> $(FPSCR_FLAG_SHIFT); FPSCR_FLAG = splitTemp:1; splitTemp = (FPSCR & $(FPSCR_ENABLE_MASK)) >> $(FPSCR_ENABLE_SHIFT); FPSCR_ENABLE = splitTemp:1; splitTemp = (FPSCR & $(FPSCR_CAUSE_MASK)) >> $(FPSCR_CAUSE_SHIFT); FPSCR_CAUSE = splitTemp:1; splitTemp = (FPSCR & $(FPSCR_DN_MASK)) >> $(FPSCR_DN_SHIFT); FPSCR_DN = splitTemp:1; splitTemp = (FPSCR & $(FPSCR_PR_MASK)) >> $(FPSCR_PR_SHIFT); FPSCR_PR = splitTemp:1; splitTemp = (FPSCR & $(FPSCR_SZ_MASK)) >> $(FPSCR_SZ_SHIFT); FPSCR_SZ = splitTemp:1; splitTemp = (FPSCR & $(FPSCR_FR_MASK)) >> $(FPSCR_FR_SHIFT); FPSCR_FR = splitTemp:1; } # Fields ================================================================================= define token instr(16) OP_0 = (12,15) OP_1 = ( 0, 3) OP_2 = ( 8,15) OP_3 = ( 0,15) OP_4 = ( 0, 7) OP_5 = ( 0, 8) OP_6 = ( 8, 8) OP_7 = ( 0, 4) OP_8 = ( 0, 9) OP_9 = ( 7, 7) OP_10 = ( 4, 4) BANK = ( 4, 6) # bank register id M_0 = ( 4, 7) # register id M_1 = ( 5, 7) # register id M_2 = ( 8, 9) # register id FRM_0 = ( 4, 7) # float register id DRM_1 = ( 5, 7) # double register id XDM_1 = ( 5, 7) # double register id XDRM = ( 4, 7) # double register id FVM_2 = ( 8, 9) # fv register id N_0 = ( 8,11) # register id N_1 = ( 9,11) # register id N_2 = (10,11) # register id FRN_0 = ( 8,11) # float register id FRN_1 = ( 9,11) # float register id FRN_2 = ( 9,11) # float register id DRN_0 = ( 9,11) # double register id DRN_1 = ( 9,11) # double register id XDN_1 = ( 9,11) # double register id XDRN = ( 8,11) # float register id FVN_2 = (10,11) # fv register id I_0 = ( 0, 7) signed # immediate I_1 = ( 0,11) signed # immediate I_2 = ( 0, 3) signed # immediate U_0 = ( 0, 7) # immediate U_1 = ( 0,11) # immediate U_2 = ( 0, 3) # immediate ; # Context variables ==================================================== # Attach variables ===================================================== # attach normal registers attach variables [ N_0 M_0 ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 ]; # attach float registers attach variables [ FRN_0 FRM_0] [ fr0 fr1 fr2 fr3 fr4 fr5 fr6 fr7 fr8 fr9 fr10 fr11 fr12 fr13 fr14 fr15 ]; attach variables [ FRN_1 ] [ fr0 fr2 fr4 fr6 fr8 fr10 fr12 fr14 ]; attach variables [ FRN_2 ] [ fr1 fr3 fr5 fr7 fr9 fr11 fr13 fr15 ]; # attach double registers attach variables [ DRN_1 DRM_1 ] [ dr0 dr2 dr4 dr6 dr8 dr10 dr12 dr14 ]; # attach double registers attach variables [ DRN_0 ] [ dr0 dr2 dr4 dr6 dr8 dr10 dr12 dr14 ]; # attach extended double registers attach variables [ XDN_1 XDM_1 ] [ xd0 xd2 xd4 xd6 xd8 xd10 xd12 xd14 ]; attach variables [ XDRN XDRM ] [ dr0 xd0 dr2 xd2 dr4 xd4 dr6 xd6 dr8 xd8 dr10 xd10 dr12 xd12 dr14 xd14 ]; # attach vf registers attach variables [ FVN_2 FVM_2 ] [ fv0 fv4 fv8 fv12 ]; attach variables [ BANK ] [ R0_BANK R1_BANK R2_BANK R3_BANK R4_BANK R5_BANK R6_BANK R7_BANK ]; # Tables ================================================================ # Addressing Modes # # Register direct # Rn EA is Rn. # # Register indirect # @Rn Rn contains EA # # Register indirect with postincrement # @Rn+ Rn contains EA # After EA calculation: # increment Rn by 1 for byte, 2 for word, 4 for long word, 8 quadword operand # # Register indirect with predecrement # @-Rn Rn contains EA, # Before EA calculation: # increment Rn by 1 for byte, 2 for word, 4 for long word, 8 quadword operand # # Register indirect with displacement # @(disp:4, Rn) EA is Rn + 4-bit displacement disp added. disp is zero extended, # then multiplied by 1 for byte, 2 for word, 4 for long word operand size # # Indexed register indirect # @(R0, Rn) EA is Rn + R0 # # GBR indirect with displacement # @(disp:8,GBR) EA is GBR contents with 8-bit displacement added. # 8-bit displacement disp added. disp is zero extended, # then multiplied by 1 for byte, 2 for word, 4 for long word operand size # # Indexed GBR indirect # @(R0, GBR) EA is GBR + R0 # # PC-relative with displacement # @(disp:8, PC) EA is PC+4 + 8bit displacement. disp is zero extended, # then multiplied by 1 for byte, 2 for word, 4 for long word operand size # # PC-relative # disp:8 EA is PC+4 + 8-bit displacement. # disp is sign-extended and multiplied by 2. # # PC-relative # disp:12 EA is PC+4 + 12-bit displacement. # disp is sign-extended and multiplied by 2. # # Rn EA is PC+4 + Rn. # M_0t: M_0 is M_0 { export M_0; } N_0t: N_0 is N_0 { export N_0; } N_0tjmp: @^N_0 is N_0 { export N_0; } I_0t: "#"^I_0 is I_0 { tmp:4 = I_0; export tmp; } U_0t: "#"^U_0 is U_0 { tmp:4 = U_0; export tmp; } U_0t1: "#"^U_0^",@("^r0^","^GBR^")" is U_0 & r0 & GBR { tmp:1 = U_0; export tmp; } I_0t_r0: "#"I_0^","^r0 is I_0 & r0 { tmp:4 = I_0; export tmp; } U_0t_r0: "#"U_0^","^r0 is U_0 & r0 { tmp:4 = U_0; export tmp; } I_0tbranch: dest is I_0 [ dest = inst_start + I_0*2 + 4; ] { export *:4 dest; } I_1tbranch: dest is I_1 [ dest = inst_start + I_1*2 + 4; ] { export *:4 dest; } sr_N_0t: SR^","^N_0 is N_0 & SR { export N_0; } gbr_N_0t: GBR^","^N_0 is N_0 & GBR { export N_0; } vbr_N_0t: VBR^","^N_0 is N_0 & VBR { export N_0; } ssr_N_0t: SSR^","^N_0 is N_0 & SSR { export N_0; } spc_N_0t: SPC^","^N_0 is N_0 & SPC { export N_0; } sgr_N_0t: SGR^","^N_0 is N_0 & SGR { export N_0; } dbr_N_0t: DBR^","^N_0 is N_0 & DBR { export N_0; } sr_t: SR is OP_0 & SR { x:4 = 0; export x; } # dummy export, just looking for the display gbr_t: GBR is OP_0 & GBR { x:4 = 0; export x; } # dummy export, just looking for the display vbr_t: VBR is OP_0 & VBR { x:4 = 0; export x; } # dummy export, just looking for the display ssr_t: SSR is OP_0 & SSR { x:4 = 0; export x; } # dummy export, just looking for the display spc_t: SPC is OP_0 & SPC { x:4 = 0; export x; } # dummy export, just looking for the display sgr_t: SGR is OP_0 & SGR { x:4 = 0; export x; } # dummy export, just looking for the display dbr_t: DBR is OP_0 & DBR { x:4 = 0; export x; } # dummy export, just looking for the display N_0t_sr: N_0^","^SR is N_0 & SR { export N_0; } N_0t_gbr: N_0^","^GBR is N_0 & GBR { export N_0; } N_0t_vbr: N_0^","^VBR is N_0 & VBR { export N_0; } N_0t_ssr: N_0^","^SSR is N_0 & SSR { export N_0; } N_0t_spc: N_0^","^SPC is N_0 & SPC { export N_0; } # N_0t_sgr: N_0^","^SGR is N_0 & SGR { export N_0; } N_0t_dbr: N_0^","^DBR is N_0 & DBR { export N_0; } N_0t_bank: N_0 is N_0 { export N_0; } N_0t_sr1: @^N_0^"+,"^SR is N_0 & SR { export N_0; } N_0t_gbr1: @^N_0^"+,"^GBR is N_0 & GBR { export N_0; } N_0t_vbr1: @^N_0^"+,"^VBR is N_0 & VBR { export N_0; } N_0t_ssr1: @^N_0^"+,"^SSR is N_0 & SSR { export N_0; } N_0t_spc1: @^N_0^"+,"^SPC is N_0 & SPC { export N_0; } # N_0t_sgr1: @^N_0^"+,"^SGR is N_0 & SGR { export N_0;} N_0t_dbr1: @^N_0^"+,"^DBR is N_0 & DBR { export N_0; } N_0t_bank1: @^N_0^"+" is N_0 { export N_0; } FR0_t: fr0 is OP_0 & fr0 { export fr0; } XMTRX_t: "xmtrx" is OP_0 { x:4 = 0; export x; } # dummy export, just looking for the display mach_t: MACH is OP_0 & MACH { export MACH; } macl_t: MACL is OP_0 & MACL { export MACL; } fpul_t: FPUL is OP_0 & FPUL { export FPUL; } fpscr_t: "FPSCR" is OP_0 { x:4 = 0; export x; } # dummy export, just looking for the display mach_N_0t: MACH^","^N_0 is N_0 & MACH { export N_0; } macl_N_0t: MACL^","^N_0 is N_0 & MACL { export N_0; } pr_N_0t: PR^","^N_0 is N_0 & PR { export N_0; } fpul_N_0t: FPUL^","N_0 is N_0 & FPUL { export N_0; } fpscr_N_0t: "FPSCR"^","N_0 is N_0 { export N_0; } N_0t_mach: N_0^","^MACH is N_0 & MACH { export N_0; } N_0t_macl: N_0^","^MACL is N_0 & MACL { export N_0; } N_0t_pr: N_0^","^PR is N_0 & PR { export N_0; } N_0t_fpul: N_0^","^FPUL is N_0 & FPUL { export N_0; } N_0t_fpscr: N_0^",fpscr" is N_0 { export N_0; } N_0t_mach1: @^N_0^"+,"^MACH is N_0 & MACH { export N_0; } N_0t_macl1: @^N_0^"+,"^MACL is N_0 & MACL { export N_0; } N_0t_pr1: @^N_0^"+,"^PR is N_0 & PR { export N_0; } N_0t_fpul1: @^N_0^"+,"^FPUL is N_0 & FPUL { export N_0; } N_0t_fpscr1: @^N_0^"+,fpscr" is N_0 { export N_0; } M_0t_at1: @^M_0 is M_0 { export M_0; } N_0t_at1: @^N_0 is N_0 { export N_0; } M_0t_at: @^M_0^+ is M_0 { export M_0; } N_0t_at: @^N_0^+ is N_0 { export N_0; } FRM_0t: FRM_0 is FRM_0 { export FRM_0; } FRN_0t: FRN_0 is FRN_0 { export FRN_0; } DRM_1t: DRM_1 is DRM_1 { export DRM_1; } DRN_1t: DRN_1 is DRN_1 { export DRN_1; } FVM_2t: FVM_2 is FVM_2 { export FVM_2; } FVN_2t: FVN_2 is FVN_2 { export FVN_2; } N_0t_at_with_r0: "@("^r0^","^N_0^")" is N_0 & r0 { export N_0; } M_0t_at_with_r0: "@("^r0^","^M_0^")" is M_0 & r0 { export M_0; } N_0t_at_neg: "@-"^N_0 is N_0 { export N_0; } U_2t_M0_dispr01: "@("^disp^","^M_0^")" is U_2 & M_0 [ disp = U_2 * 1; ] { tmp4:4 = disp; export tmp4; } U_2t_M0_dispr02: "@("^disp^","^M_0^")" is U_2 & M_0 [ disp = U_2 * 2; ] { tmp4:4 = disp; export tmp4; } U_2t_M0_dispr04: "@("^disp^","^M_0^")" is U_2 & M_0 [ disp = U_2 * 4; ] { tmp4:4 = disp; export tmp4; } U_2t_N0_dispr04: "@("^disp^","^N_0^")" is U_2 & N_0 [ disp = U_2 * 4; ] { tmp4:4 = disp; export tmp4; } # Bug in SLEIGH, needed "* 1" U_0t_gbr_at_1: "@("^disp,GBR^")" is U_0 & GBR [ disp = U_0 * 1; ] { tmp4:4 = disp + GBR; export tmp4; } U_0t_gbr_at_2: "@("^disp,GBR^")" is U_0 & GBR [ disp = U_0 * 2; ] { tmp4:4 = disp + GBR; export tmp4; } U_0t_gbr_at_4: "@("^disp,GBR^")" is U_0 & GBR [ disp = U_0 * 4; ] { tmp4:4 = disp + GBR; export tmp4; } # Note: The 4 byte (MOVA) case needs the masking of the PC bottom 2 bits, page 345, paragraph 1: # "a value with the lower 2 bits adjusted to B00 is used in address calculation." # Note: Only the 4 byte case needs the masking of the PC bottom 2 bits, page 336, paragraph 3: # "A value with the lower 2 bits adjusted to B00 is used in address calculation." # (The 2 byte case always has the PC LSBit at 0 because all instructions are 2 byte aligned.) U_0t_2pc: dest is U_0 [ dest = inst_start + U_0*2 + 4; ] { export *:2 dest; } U_0t_4pc: dest is U_0 [ dest = ( inst_start & 0xfffffffc ) + U_0*4 + 4; ] { export *:4 dest; } BANKt: BANK is BANK { export BANK; } N_0txx: r0",@"^N_0 is r0 & N_0 { tmp:4 = N_0; export tmp; } # Constructors ======================================================================= # Binary Addition # pattern 0011nnnnmmmm1100 # text add , # arch arch_sh_up :add M_0t,N_0t is OP_0=0x3 & N_0t & M_0t & OP_1=0xc { N_0t = N_0t + M_0t; } # Binary Addition # pattern 0111nnnniiiiiiii # text add #, # arch arch_sh_up :add I_0t,N_0t is OP_0=0x7 & N_0t & I_0t { N_0t = I_0t + N_0t; # NOTE I_0t already signed extended } # Binary Addition with Carry # pattern 0011nnnnmmmm1110 # text addc , # arch arch_sh_up :addc M_0t,N_0t is OP_0=0x3 & N_0t & M_0t & OP_1=0xe { local Tcopy:4 = zext($(T_FLAG)); $(T_FLAG) = carry( N_0t, M_0t ); local result:4 = N_0t + M_0t; $(T_FLAG) = $(T_FLAG) || carry( result, Tcopy ); N_0t = result + Tcopy; } # Binary Addition with Overflow Check # pattern 0011nnnnmmmm1111 # text addv , # arch arch_sh_up :addv M_0t,N_0t is OP_0=0x3 & N_0t & M_0t & OP_1=0xf { $(T_FLAG) = scarry(N_0t,M_0t); N_0t = N_0t + M_0t; } # Logical AND # pattern 0010nnnnmmmm1001 # text and , # arch arch_sh_up :and M_0t,N_0t is OP_0=0x2 & N_0t & M_0t & OP_1=0x9 { N_0t = N_0t & M_0t; } # Logical AND # pattern 11001001iiiiiiii # text and #,R0 # arch arch_sh_up :and U_0t_r0 is OP_2=0xc9 & U_0t_r0 { r0 = r0 & U_0t_r0; } # Logical AND # pattern 11001101iiiiiiii # text and.b #,@(R0,GBR) # arch arch_sh_up :and.b U_0t1 is OP_2=0xcd & U_0t1 { *:1 (GBR + r0) = (*:1 (GBR + r0)) & U_0t1; } # Conditional Branch # pattern 10001011iiiiiiii # text bf # arch arch_sh_up :bf I_0tbranch is OP_2=0x8b & I_0tbranch { if ( $(T_FLAG) == 0 ) goto I_0tbranch; } # Conditional Branch with Delay # pattern 10001111iiiiiiii # text bf/s # arch arch_sh2_up :bf^"/s" I_0tbranch is OP_2=0x8f & I_0tbranch { local cond = $(T_FLAG); delayslot(1); if ( cond == 0 ) goto I_0tbranch; } # Unconditional Branch # pattern 1010iiiiiiiiiiii # text bra # arch arch_sh_up :bra I_1tbranch is OP_0=0xa & I_1tbranch { delayslot(1); goto I_1tbranch; } # Unconditional Branch # pattern 0000nnnn00100011 # text braf # arch arch_sh2_up :braf N_0 is OP_0=0x0 & N_0 & OP_4=0x23 { local dest = N_0 + inst_next; delayslot(1); goto [dest]; } # Branch to Subroutine Procedure # pattern 1011iiiiiiiiiiii # text bsr # arch arch_sh_up :bsr I_1tbranch is OP_0=0xb & I_1tbranch { delayslot(1); call I_1tbranch; } # Branch to Subroutine Procedure # pattern 0000nnnn00000011 # text bsrf # arch arch_sh2_up :bsrf N_0 is OP_0=0x0 & N_0 & OP_4=0x3 { PR = inst_next; local dest = N_0 + inst_next; delayslot(1); call [dest]; } # Conditional Branch # pattern 10001001iiiiiiii # text bt # arch arch_sh_up :bt I_0tbranch is OP_2=0x89 & I_0tbranch { if ( $(T_FLAG) == 1 ) goto I_0tbranch; } # Conditional Branch with Delay # pattern 10001101iiiiiiii # text bt/s # arch arch_sh2_up :bt^"/s" I_0tbranch is OP_2=0x8d & I_0tbranch { local cond = $(T_FLAG); delayslot(1); if ( cond == 1 ) goto I_0tbranch; } # MAC Register Clear # pattern 0000000000101000 # text clrmac # arch arch_sh_up :clrmac is OP_3=0x28 { MACH = 0; MACL = 0; } # S Bit Clear # pattern 0000000001001000 # text clrs # arch arch_sh_up :clrs is OP_3=0x48 { $(S_FLAG) = 0; } # T Bit Clear # pattern 0000000000001000 # text clrt # arch arch_sh_up :clrt is OP_3=0x8 { $(T_FLAG) = 0; } # Compare # pattern 0011nnnnmmmm0000 # text cmp/eq , # arch arch_sh_up :cmp^"/eq" M_0t,N_0t is OP_0=0x3 & N_0t & M_0t & OP_1=0x0 { $(T_FLAG) = ( N_0t == M_0t ); } # Compare # pattern 0011nnnnmmmm0011 # text cmp/ge , # arch arch_sh_up :cmp^"/ge" M_0t,N_0t is OP_0=0x3 & N_0t & M_0t & OP_1=0x3 { $(T_FLAG) = ( N_0t s>= M_0t ); } # Compare # pattern 0011nnnnmmmm0111 # text cmp/gt , # arch arch_sh_up :cmp^"/gt" M_0t,N_0t is OP_0=0x3 & N_0t & M_0t & OP_1=0x7 { $(T_FLAG) = ( N_0t s> M_0t); } # Compare # pattern 0011nnnnmmmm0110 # text cmp/hi , # arch arch_sh_up :cmp^"/hi" M_0t,N_0t is OP_0=0x3 & N_0t & M_0t & OP_1=0x6 { $(T_FLAG) = ( N_0t > M_0t ); } # Compare # pattern 0011nnnnmmmm0010 # text cmp/hs , # arch arch_sh_up :cmp^"/hs" M_0t,N_0t is OP_0=0x3 & N_0t & M_0t & OP_1=0x2 { $(T_FLAG) = ( N_0t >= M_0t ); } # Compare # pattern 0100nnnn00010101 # text cmp/pl # arch arch_sh_up :cmp^"/pl" N_0t is OP_0=0x4 & N_0t & OP_4=0x15 { $(T_FLAG) = ( N_0t s> 0 ); } # Compare # pattern 0100nnnn00010001 # text cmp/pz # arch arch_sh_up :cmp^"/pz" N_0t is OP_0=0x4 & N_0t & OP_4=0x11 { $(T_FLAG) = ( N_0t s>= 0 ); } # Compare # pattern 0010nnnnmmmm1100 # text cmp/str , # arch arch_sh_up :cmp^"/str" M_0t,N_0t is OP_0=0x2 & N_0t & M_0t & OP_1=0xc { temp:4 = M_0t ^ N_0t; HH:4 = (temp & 0xFF000000) >> 24; HL:4 = (temp & 0x00FF0000) >> 16; LH:4 = (temp & 0x0000FF00) >> 8; LL:4 = temp & 0x000000FF; $(T_FLAG) = (HH == 0) || (HL == 0 ) || (LH == 0) || (LL == 0); } # Compare # pattern 10001111iiiiiiii # pattern 10001000iiiiiiii # text cmp/eq #,R0 # arch arch_sh_up :cmp^"/eq" I_0t_r0 is OP_2=0x88 & I_0t_r0 { $(T_FLAG) = ( r0 == I_0t_r0 ); } # Initialization for Signed Division # pattern 0010nnnnmmmm0111 # text div0s , # arch arch_sh_up :div0s M_0t,N_0t is OP_0=0x2 & N_0t & M_0t & OP_1=0x7 { $(Q_FLAG) = ( (N_0t & 0x80000000) != 0 ); $(M_FLAG) = ( (M_0t & 0x80000000) != 0 ); $(T_FLAG) = ( $(M_FLAG) != $(Q_FLAG) ); } # Initialization for Unsigned Division # pattern 0000000000011001 # text div0u # arch arch_sh_up :div0u is OP_3=0x19 { $(M_FLAG) = 0; $(Q_FLAG) = 0; $(T_FLAG) = 0; } # Division # pattern 0011nnnnmmmm0100 # text div1 , # arch arch_sh_up :div1 M_0t,N_0t is OP_0=0x3 & OP_1=0x4 & M_0t & N_0t { #@ifdef DIV1_ORIGINAL_IS_BROKEN # tmp2:4 = M_0t; # N_0t = N_0t << 1; # N_0t = N_0t | zext($(T_FLAG)); # ??? # tmp0:4 = N_0t; # N_0t = N_0t - zext((($(Q_FLAG)+$(M_FLAG))!=1))*tmp2 + zext(($(Q_FLAG)+$(M_FLAG))==1)*tmp2; # tmp1:1 = ( (($(Q_FLAG)+$(M_FLAG))!=1)*(N_0t > tmp0) + (($(Q_FLAG)+$(M_FLAG))==1)*(N_0t < tmp0)); # $(Q_FLAG) = ((0x80000000 & N_0t)!=0); # Q1:1 = tmp1*($(Q_FLAG)!=0) + (tmp1==0)*($(Q_FLAG)==0); # Q2:1 = tmp1*($(Q_FLAG)==0) + (tmp1==0)*($(Q_FLAG)!=0); # $(Q_FLAG) = (($(M_FLAG)==1)*Q1 + ($(M_FLAG)!=1)*Q2); # $(T_FLAG) = ($(Q_FLAG)==$(M_FLAG)); #@endif # DIV1_ORIGINAL_IS_BROKEN # DIV1_WITH_FORWARD_BRANCHES_WORKS_OK local Rm:4 = M_0t; local Rn:4 = N_0t; local old_Q:1 = $(Q_FLAG); $(Q_FLAG) = ((0x80000000 & Rn)!=0); local tmp2:4 = Rm; Rn = Rn << 1; Rn = Rn | zext($(T_FLAG)); local tmp0:4 = Rn; local tmp1:1 = 0; if (old_Q == 0) && ($(M_FLAG) == 0) goto ; if (old_Q == 0) && ($(M_FLAG) == 1) goto ; if (old_Q == 1) && ($(M_FLAG) == 0) goto ; # if (old_Q == 1) && ($(M_FLAG) == 1) ... Rn = Rn - tmp2; tmp1 = Rn > tmp0; if ($(Q_FLAG) == 0) goto ; # ($(Q_FLAG) == 1) $(Q_FLAG) = tmp1; goto ; $(Q_FLAG) = tmp1 == 0; goto ; # (old_Q == 1) && ($(M_FLAG) == 0) Rn = Rn + tmp2; tmp1 = Rn < tmp0; if ($(Q_FLAG) == 0) goto ; # ($(Q_FLAG) == 1) $(Q_FLAG) = tmp1 == 0; goto ; $(Q_FLAG) = tmp1; goto ; # (old_Q == 0) && ($(M_FLAG) == 1) Rn = Rn + tmp2; tmp1 = Rn < tmp0; if ($(Q_FLAG) == 0) goto ; # ($(Q_FLAG) == 1) $(Q_FLAG) = tmp1; goto ; $(Q_FLAG) = tmp1 == 0; goto ; # (old_Q == 0) && ($(M_FLAG) == 0) Rn = Rn - tmp2; tmp1 = Rn > tmp0; if ($(Q_FLAG) == 0) goto ; # ($(Q_FLAG) == 1) $(Q_FLAG) = tmp1 == 0; goto ; $(Q_FLAG) = tmp1; $(T_FLAG) = $(Q_FLAG) == $(M_FLAG); N_0t = Rn; # DIV1_WITH_GOTOs_WORKS_OK # TODO: the following is currently broken, it should be fixed to eliminate gotos in the code above #@ifdef DIV1_STRAIGHT_CODE # BROKEN # Rm:4 = M_0t; # Rn:4 = N_0t; # old_Q:1 = $(Q_FLAG); # $(Q_FLAG) = ((0x80000000 & Rn)!=0); # tmp2:4 = Rm; # Rn = Rn << 1; # Rn = Rn | zext($(T_FLAG)); # tmp0:4 = Rn; # # oldQM_10_01_bool:1 = ( ( (old_Q == 1) && ($(M_FLAG) == 0) ) || ( (old_Q == 1) && ($(M_FLAG) == 0) ) ); # oldQM_10_01:4 = zext(oldQM_10_01_bool) * 0xffffffff; # # oldQM_11_00_bool:1 = ( (old_Q == 1) && ($(M_FLAG) == 1) ) || ( (old_Q == 0) && ($(M_FLAG) == 0) ); # oldQM_11_00:4 = zext(oldQM_11_00_bool) * 0xffffffff; # Rn = (oldQM_10_01 & (Rn + tmp2)) | (oldQM_11_00 & (Rn - tmp2)); # tmp1:1 = ( (oldQM_11_00 != 0) && (Rn > tmp0) ) | ( (oldQM_10_01 != 0) && (Rn < tmp0) ); # QM_10_01:1 = ( ( ($(Q_FLAG) == 1) && ($(M_FLAG) == 0) ) || ( ($(Q_FLAG) == 1) && ($(M_FLAG) == 0) ) ) * 0xff; # QM_11_00:1 = ( ( ($(Q_FLAG) == 1) && ($(M_FLAG) == 1) ) || ( ($(Q_FLAG) == 0) && ($(M_FLAG) == 0) ) ) * 0xff; # $(Q_FLAG) = (QM_10_01 & tmp1) || (QM_11_00 & (tmp1 == 0)); # $(T_FLAG) = $(Q_FLAG) == $(M_FLAG); # N_0t = Rn; #@endif # DIV1_STRAIGHT_CODE } # Signed Double-Length Multiplication # pattern 0011nnnnmmmm1101 # text dmuls.l , # arch arch_sh2_up :dmuls.l M_0t,N_0t is OP_0=0x3 & OP_1=0xd & M_0t & N_0t { local temp:8 = sext(M_0t) * sext(N_0t); MACL = temp[0,32]; MACH = temp[32,32]; } # Unsigned Double-Length Multiplication # pattern 0011nnnnmmmm0101 # text dmulu.l , # arch arch_sh2_up :dmulu.l M_0t,N_0t is OP_0=0x3 & OP_1=0x5 & M_0t & N_0t { local temp:8 = zext(M_0t) * zext(N_0t); MACL = temp[0,32]; MACH = temp[32,32]; } # Decrement and Test # pattern 0100nnnn00010000 # text dt # arch arch_sh2_up :dt N_0t is OP_0=0x4 & N_0t & OP_4=0x10 { N_0t = N_0t - 1; $(T_FLAG) = ( N_0t == 0 ); } # Sign Extension # pattern 0110nnnnmmmm1110 # text exts.b , # arch arch_sh_up :exts.b M_0t,N_0t is OP_0=0x6 & N_0t & M_0t & OP_1=0xe { N_0t = sext(M_0t:1); } # Sign Extension # pattern 0110nnnnmmmm1111 # text exts.w , # arch arch_sh_up :exts.w M_0t,N_0t is OP_0=0x6 & N_0t & M_0t & OP_1=0xf { N_0t = sext(M_0t:2); } # Zero Extension # pattern 0110nnnnmmmm1100 # text extu.b , # arch arch_sh_up :extu.b M_0t,N_0t is OP_0=0x6 & N_0t & M_0t & OP_1=0xc { N_0t = zext(M_0t:1); } # Zero Extension # pattern 0110nnnnmmmm1101 # text extu.w , # arch arch_sh_up :extu.w M_0t,N_0t is OP_0=0x6 & N_0t & M_0t & OP_1=0xd { N_0t = zext(M_0t:2); } # Floating-Point Absolute Value # pattern 1111nnnn01011101 # text fabs # arch arch_sh2e_up :fabs FRN_0 is OP_0=0xf & FRN_0 & DRN_1 & OP_4=0x5d { if (!( $(FPSCR_PR) == 0 )) goto ; FRN_0 = abs(FRN_0); goto ; DRN_1 = abs(DRN_1); } # Floating-Point Addition # pattern 1111nnnnmmmm0000 # text fadd , # arch arch_sh2a_or_sh4_up :fadd FRM_0,FRN_0 is OP_0=0xf & FRN_0 & DRN_1 & FRM_0 & DRM_1 & OP_1=0x0 { if (!( $(FPSCR_PR) == 0 )) goto ; FRN_0 = FRN_0 f+ FRM_0; goto ; DRN_1 = DRN_1 f+ DRM_1; } # Floating-Point Comparison # pattern 1111nnnnmmmm0100 # text fcmp/eq , # arch arch_sh2e_up :fcmp^"/eq" FRM_0t,FRN_0t is OP_0=0xf & FRN_0t & DRN_1t & FRM_0t & DRM_1t & OP_6 & OP_10 & OP_1=0x4 { if (!( $(FPSCR_PR) == 0 ) ) && (OP_6:1 == 0) && (OP_10:1 == 0) goto ; $(T_FLAG) = ( FRN_0t f== FRM_0t ); goto ; $(T_FLAG) = ( DRN_1t f== DRM_1t ); } # Floating-Point Comparison # pattern 1111nnnnmmmm0101 # text fcmp/gt , # arch arch_sh2e_up :fcmp^"/gt" FRM_0t,FRN_0t is OP_0=0xf & FRM_0t & DRM_1t & FRN_0t & DRN_1t & OP_1=0x5 { if (!( $(FPSCR_PR) == 0 )) goto ; $(T_FLAG) = ( FRN_0t f> FRM_0t ); goto ; $(T_FLAG) = ( DRN_1t f> DRM_1t ); } # Double-Precision to Single-Precision Conversion # pattern 1111nnn010111101 # text fcnvds ,FPUL # even though reserved to DblPrec, should decode, may not know at the time. # if this instruction shows up, it is most likely in DblPrec mode but not set # arch arch_sh2a_or_sh4_up :fcnvds DRN_1t,FPUL is OP_0=0xf & DRN_1t & OP_5=0x0bd & FPUL { # note: this instruction is undefined if not running in DblPrec mode. FPUL = float2float( DRN_1t ); } # Single-Precision to Double-Precision Conversion # pattern 1111nnn010101101 # text fcnvsd FPUL, # even though reserved to DblPrec, should decode, may not know at the time. # if this instruction shows up, it is most likely in DblPrec mode but not set # arch arch_sh2a_or_sh4_up :fcnvsd FPUL,DRN_1t is OP_0=0xf & DRN_1t & OP_5=0xad & FPUL { # note: this instruction is undefined if not running in DblPrec mode. DRN_1t = float2float( FPUL ); } # Floating-Point Division # pattern 1111nnnnmmmm0011 # text fdiv , # arch arch_sh2e_up :fdiv FRM_0t,FRN_0t is OP_0=0xf & FRN_0t & DRN_1t & FRM_0t & DRM_1t & OP_1=0x3 { if (!( $(FPSCR_PR) == 0 )) goto ; FRN_0t = ( FRN_0t f/ FRM_0t ); goto ; DRN_1t = ( DRN_1t f/ DRM_1t ); } # Floating-Point Inner Product # pattern 1111nnmm11101101 # text fipr , # even though reserved to DblPrec, should decode, may not know at the time. # if this instruction shows up, it is most likely in DblPrec mode but not set # arch arch_sh4_up :fipr FVM_2t,FVN_2t is OP_0=0xf & FVN_2t & FVM_2t & OP_4=0xed { # note: this instruction is undefined if not running in SinglePrec mode. if (!( $(FPSCR_PR) == 0 )) goto ; # FVn dot FVm temp1:4 = ( *[register]:4 (&:4 FVN_2t + 0) f* *[register]:4 (&:4 FVM_2t + 0) ); temp1 = temp1 f+ ( *[register]:4 (&:4 FVN_2t + 4) f* *[register]:4 (&:4 FVM_2t + 4) ); temp1 = temp1 f+ ( *[register]:4 (&:4 FVN_2t + 8) f* *[register]:4 (&:4 FVM_2t + 8) ); temp1 = temp1 f+ ( *[register]:4 (&:4 FVN_2t + 12) f* *[register]:4 (&:4 FVM_2t + 12) ); # summation goes to FR[n + 3] *[register]:4 (&:4 FVN_2t + 12) = temp1; } # 0.0 Load # pattern 1111nnnn10001101 # text fldi0 # arch arch_sh2e_up # # Note- The manual says this applies only to single float destination regs # :fldi0 FRN_0t is OP_0=0xf & DRN_1t & FRN_0t & OP_4=0x8d { tmp1:4 = 0; FRN_0t = int2float(tmp1); } # 1.0 Load # pattern 1111nnnn10011101 # text fldi1 # arch arch_sh2e_up # # Note- the manual says FPSCR_PR applies only to single float regs, not to doubles. # Manual also says FPSCR_PR should be 0, but gcc generates code where it's set to 1. # :fldi1 FRN_0t is OP_0=0xf & FRN_0t & DRN_1t & OP_4=0x9d { tmp1:4 = 1; FRN_0t = int2float(tmp1); } # Transfer to System Register # pattern 1111nnnn00011101 # text flds ,FPUL # arch # arch_sh2e_up # Note: Field usage indicates FR[n], not FR[m]. :flds FRN_0t,FPUL is OP_0=0xf & FRN_0t & OP_4=0x1d & FPUL { FPUL = FRN_0t; } # Integer to Floating-Point Conversion # pattern 1111nnnn00101101 # text float FPUL, # arch # arch_sh2e_up :float FPUL,FRN_0t is OP_0=0xf & OP_6 & FRN_0t & DRN_1t & OP_4=0x2d & FPUL { # If bit 6 is set, this is an odd FP register number, so need to do single float operation. # This is not in the manual but this seems to be the correct behavior. if (!( $(FPSCR_PR) == 0 ) ) && (OP_6:1 == 0x0) goto ; FRN_0t = int2float( FPUL ); goto ; DRN_1t = int2float( FPUL ); } # Floating-Point Multiply and Accumulate # pattern 1111nnnnmmmm1110 # text fmac FR0,, # arch arch_sh2e_up :fmac FR0_t,FRM_0t,FRN_0t is OP_0=0xf & FRN_0t & FRM_0t & OP_1=0xe & FR0_t { if (!( $(FPSCR_PR) == 0 )) goto ; FRN_0t = ( FR0_t f* FRM_0t ) f+ FRN_0t; } # Floating-Point Transfer #1-4 # pattern 1111nnnnmmmm1100 # text fmov , # arch arch_sh2e_up # # Note- The manual says all the fmov insns should only look at the FPSCR_SZ flag # to determine if this is a single or double float move. Ie, don't reference FPSCR_PR. # :fmov FRM_0,FRN_0 is OP_0=0xf & FRN_0 & XDRN & FRM_0 & XDRM & OP_1=0xc { if (!( $(FPSCR_SZ) == 0 )) goto ; FRN_0 = FRM_0; goto ; XDRN = XDRM; } # Floating-Point Transfer #5-6 # pattern 1111nnn0mmmm1000 # text fmov @, # arch arch_sh2e_up :fmov.s M_0t_at1,FRN_0 is OP_0=0xf & XDRN & FRN_0 & M_0t_at1 & OP_1=0x8 { if (!( $(FPSCR_SZ) == 0 )) goto ; FRN_0 = *:4 M_0t_at1; goto ; XDRN = *:8 M_0t_at1; } # Floating-Point Transfer #7-8 # pattern 1111nnnnmmmm1010 # text fmov ,@ # arch arch_sh2e_up # # Note- Manual says to ignore FPSCR_PR but FPSCR_SZ must = 0 # :fmov.s FRM_0,N_0t_at1 is OP_0=0xf & N_0t_at1 & FRM_0 & XDRM & OP_1=0xa { if (!( $(FPSCR_SZ) == 0 )) goto ; *:4 N_0t_at1 = FRM_0; goto ; *:8 N_0t_at1 = XDRM; } # Floating-Point Transfer #9-10 # pattern 1111nnnnmmmm1001 # text fmov @+, # arch arch_sh2e_up :fmov.s M_0t_at,FRN_0 is OP_0=0xf & XDRN & FRN_0 & M_0t_at & OP_1=0x9 { if (!( $(FPSCR_SZ) == 0 )) goto ; FRN_0 = *:4 M_0t_at; M_0t_at = M_0t_at + 4; goto ; XDRN = *:8 M_0t_at; M_0t_at = M_0t_at + 8; } # Floating-Point Transfer #11-12 # pattern 1111nnnnmmm01011 # text fmov ,@- # arch arch_sh2e_up :fmov.s FRM_0,N_0t_at_neg is OP_0=0xf & N_0t_at_neg & XDRM & FRM_0 & OP_1=0xb { if (!( $(FPSCR_SZ) == 0 )) goto ; N_0t_at_neg = N_0t_at_neg - 4; *:4 N_0t_at_neg = FRM_0; goto ; N_0t_at_neg = N_0t_at_neg - 8; *:8 N_0t_at_neg = XDRM; } # Floating-Point Transfer #13-14 # pattern 1111nnnnmmmm0110 # text fmov @(R0,), # arch arch_sh2e_up :fmov.s M_0t_at_with_r0,FRN_0 is OP_0=0xf & XDRN & FRN_0 & M_0t_at_with_r0 & OP_1=0x6 { if (!( $(FPSCR_SZ) == 0 )) goto ; FRN_0 = *:4 (r0 + M_0t_at_with_r0); goto ; XDRN = *:8 (r0 + M_0t_at_with_r0); } # Floating-Point Transfer #15-16 # pattern 1111nnnnmmmm0111 # text fmov @(R0,), # arch arch_sh2e_up :fmov FRM_0,N_0t_at_with_r0 is OP_0=0xf & N_0t_at_with_r0 & XDRM & FRM_0 & OP_1=0x7 { if (!( $(FPSCR_SZ) == 0 )) goto ; *:4 (r0 + N_0t_at_with_r0) = FRM_0; goto ; *:8 (r0 + N_0t_at_with_r0) = XDRM; } # Floating-Point Multiplication # pattern 1111nnnnmmmm0010 # text fmul , # arch arch_sh2e_up :fmul FRM_0t,FRN_0t is OP_0=0xf & FRN_0t & DRN_1t & FRM_0t & DRM_1t & OP_1=0x2 { if (!( $(FPSCR_PR) == 0 )) goto ; FRN_0t = FRN_0t f* FRM_0t; goto ; DRN_1t = DRN_1t f* DRM_1t; } # Floating-Point Sign Inversion # pattern 1111nnnn01001101 # text fneg # arch arch_sh2e_up :fneg FRN_0t is OP_0=0xf & FRN_0t & DRN_1t & OP_6 & OP_4=0x4d { if (!( $(FPSCR_PR) == 0 )) && (OP_6:1 == 0) goto ; FRN_0t = f- FRN_0t; goto ; DRN_1t = f- DRN_1t; } # FR Bit Inversion # pattern 1111101111111101 # text frchg # arch # arch_sh4_up :frchg is OP_3=0xfbfd { if (!( $(FPSCR_PR) == 0 )) goto ; $(FPSCR_FR) = !$(FPSCR_FR); FPSCR = FPSCR ^ 0x00200000; } # SZ Bit Inversion # pattern 1111001111111101 # text fschg # arch arch_sh2a_or_sh4a_up (not in sh4) :fschg is OP_3=0xf3fd { if (!( $(FPSCR_PR) == 0 )) goto ; FPSCR = FPSCR ^ 0x00100000; } # PR Bit Inversion # pattern 1111011111111101 # text fpchg # arch arch_sh2a_or_sh4_up :fpchg is OP_3=0xf7fd { FPSCR = FPSCR ^ 0x00080000; } # Floating-Point Square Root # pattern 1111nnnn01101101 # text fsqrt # arch # arch_sh2a_or_sh3e_up :fsqrt FRN_0t is OP_0=0xf & FRN_0t & DRN_1t & OP_4=0x6d { if (!( $(FPSCR_PR) == 0 )) goto ; FRN_0t = sqrt( FRN_0t ); goto ; DRN_1t = sqrt( DRN_1t ); } # Floating Point Square Reciprocal Approximate # pattern 1111nnnn01111101 # text fsrra # arch # arch_sh2a_or_sh3e_up :fsrra FRN_0 is OP_0=0xF & FRN_0 & OP_4=0x7D { if (( $(FPSCR_PR) != 0 )) goto inst_next; FRN_0 = int2float(1 :4) f/ sqrt( FRN_0 ); } define pcodeop sin; define pcodeop cos; # Floating Point Sine And Cosine Approximate # pattern 1111nnn011111101 # text fsca FPUL, # arch # arch_sh2a_or_sh3e_up :fsca FPUL,DRN_0 is OP_0=0xF & FPUL & DRN_0 & FRN_1 & FRN_2 & OP_4=0xFD { if (( $(FPSCR_PR) != 0 )) goto inst_next; angle:4 = int2float(0 :4); # float fraction:4 = 0x0000FFFF; # long fraction = fraction & FPUL; # extract sub-rotation angle = int2float(fraction); # convert to float two_pi:4 = 0x40c90fdb; # 6.2831855.. as 32-bit float angle = (two_pi f* angle) f/ int2float(65536 :4); # convert to radian local _sin:4 = sin(angle); # call fake sin & cos local _cos:4 = cos(angle); FRN_1 = float2float(_sin); # _sin goes to FR[n] FRN_2 = float2float(_cos); # _cos goes to FR[n+1] } # Transfer from System Register # pattern 1111nnnn00001101 # text fsts FPUL, # arch arch_sh2e_up :fsts FPUL,FRN_0t is OP_0=0xf & FRN_0t & OP_4=0xd & FPUL { FRN_0t = FPUL; } # Floating-Point Subtraction # pattern 1111nnnnmmmm0001 # text fsub , # arch arch_sh2e_up :fsub FRM_0t,FRN_0t is OP_0=0xf & FRN_0t & DRN_1t & FRM_0t & DRM_1t & OP_1=0x1 { if (!( $(FPSCR_PR) == 0 )) goto ; FRN_0t = FRN_0t f- FRM_0t; goto ; DRN_1t = DRN_1t f- DRM_1t; } # Conversion to Integer # pattern 1111nnnn00111101 # text ftrc ,FPUL # # arch arch_sh2e_up # Note: Field usage indicates FR[n], not FR[m]. :ftrc FRN_0t,FPUL is OP_0=0xf & FRN_0t & DRN_1t & OP_4=0x3d & FPUL { if (!( $(FPSCR_PR) == 0 )) goto ; FPUL = trunc( FRN_0t ); goto ; FPUL = trunc( DRN_1t ); } # Vector Transformation # pattern 1111nn0111111101 # text ftrv XMTRX_M4, # arch # arch_sh4_up :ftrv XMTRX_t,FVN_2t is OP_0=0xf & FVN_2t & OP_8=0x1fd & XMTRX_t { if (!( $(FPSCR_PR) == 0 )) goto ; temp1:4 = ( xf0 f* *[register]:4 (&:4 FVN_2t + 0) ); temp1 = temp1 f+ ( xf4 f* *[register]:4 (&:4 FVN_2t + 4) ); temp1 = temp1 f+ ( xf8 f* *[register]:4 (&:4 FVN_2t + 8) ); temp1 = temp1 f+ ( xf12 f* *[register]:4 (&:4 FVN_2t + 12) ); temp2:4 = ( xf1 f* *[register]:4 (&:4 FVN_2t + 0) ); temp2 = temp2 f+ ( xf5 f* *[register]:4 (&:4 FVN_2t + 4) ); temp2 = temp2 f+ ( xf9 f* *[register]:4 (&:4 FVN_2t + 8) ); temp2 = temp2 f+ ( xf13 f* *[register]:4 (&:4 FVN_2t + 12) ); temp3:4 = ( xf2 f* *[register]:4 (&:4 FVN_2t + 0) ); temp3 = temp3 f+ ( xf6 f* *[register]:4 (&:4 FVN_2t + 4) ); temp3 = temp3 f+ ( xf10 f* *[register]:4 (&:4 FVN_2t + 8) ); temp3 = temp3 f+ ( xf14 f* *[register]:4 (&:4 FVN_2t + 12) ); temp4:4 = ( xf3 f* *[register]:4 (&:4 FVN_2t + 0) ); temp4 = temp4 f+ ( xf7 f* *[register]:4 (&:4 FVN_2t + 4) ); temp4 = temp4 f+ ( xf11 f* *[register]:4 (&:4 FVN_2t + 8) ); temp4 = temp4 f+ ( xf15 f* *[register]:4 (&:4 FVN_2t + 12) ); # summation goes to FR[n + 0] through FR[n + 3] *[register]:4 (&:4 FVN_2t + 0) = temp1; *[register]:4 (&:4 FVN_2t + 4) = temp2; *[register]:4 (&:4 FVN_2t + 8) = temp3; *[register]:4 (&:4 FVN_2t + 12) = temp4; } # Invalidate Instruction Cache block # pattern 0000nnnn11100011 # text icib @ # arch arch_sh_up define pcodeop InvalidateCacheBlock; :icbi N_0t_at1 is OP_0=0x0 & N_0t_at1 & OP_4=0xe3 { tmp:4 = N_0t_at1; InvalidateCacheBlock(tmp); } # Unconditional Branch # pattern 0100nnnn00101011 # text jmp @ # arch arch_sh_up :jmp N_0tjmp is OP_0=0x4 & N_0tjmp & OP_4=0x2b { PC = N_0tjmp; tmp:4 = PC; delayslot(1); goto [tmp]; } # Branch to Subroutine Procedure # pattern 0100nnnn00001011 # text jsr @ # arch arch_sh_up :jsr N_0tjmp is OP_0=0x4 & N_0tjmp & OP_4=0xb { PR = inst_next; PC = N_0tjmp; tmp:4 = PC; delayslot(1); call [tmp]; } # Load to Control Register # pattern 0100nnnn00001110 # text ldc ,SR # arch arch_sh_up :ldc N_0t_sr is OP_0=0x4 & N_0t_sr & OP_4=0xe { splitSRregister(); SR = (N_0t_sr & 0x700083f3); } # Load to Control Register # pattern 0100nnnn00011110 # text ldc ,GBR # arch arch_sh_up :ldc N_0t_gbr is OP_0=0x4 & N_0t_gbr & OP_4=0x1e { GBR = N_0t_gbr; } # Load to Control Register # pattern 0100nnnn00101110 # text ldc ,VBR # arch arch_sh_up :ldc N_0t_vbr is OP_0=0x4 & N_0t_vbr & OP_4=0x2e { VBR = N_0t_vbr; } # pattern 0100nnnn00111110 # text ldc ,SSR # arch arch_sh3_nommu_up :ldc N_0t_ssr is OP_0=0x4 & N_0t_ssr & OP_4=0x3e { SSR = N_0t_ssr; } # pattern 0100nnnn01001110 # text ldc ,SPC # arch arch_sh3_nommu_up :ldc N_0t_spc is OP_0=0x4 & N_0t_spc & OP_4=0x4e { SPC = N_0t_spc; } # pattern 0100nnnn11111010 # text ldc ,DBR # arch arch_sh4_nommu_nofpu_up :ldc N_0t_dbr is OP_0=0x4 & N_0t_dbr & OP_4=0xfa { DBR = N_0t_dbr; } # pattern 0100nnnn1xxx1110 # text ldc ,Rn_BANK # arch arch_sh3_nommu_up :ldc N_0t_bank,BANKt is OP_0=0x4 & N_0t_bank & OP_9=0x1 & BANKt & OP_1=0xe { BANKt = N_0t_bank; } # Load to Control Register # pattern 0100nnnn00000111 # text ldc.l @+,SR # arch arch_sh_up :ldc.l N_0t_sr1 is OP_0=0x4 & N_0t_sr1 & OP_4=0x7 { SR = (*:4 ( N_0t_sr1 )) & 0x700083F3; splitSRregister(); N_0t_sr1 = N_0t_sr1 + 4; } # Load to Control Register # pattern 0100nnnn00010111 # text ldc.l @+,GBR # arch arch_sh_up :ldc.l N_0t_gbr1 is OP_0=0x4 & N_0t_gbr1 & OP_4=0x17 { GBR = *:4 ( N_0t_gbr1 ); N_0t_gbr1 = N_0t_gbr1 + 4; } # Load to Control Register # pattern 0100nnnn00100111 # text ldc.l @+,VBR # arch arch_sh_up :ldc.l N_0t_vbr1 is OP_0=0x4 & N_0t_vbr1 & OP_4=0x27 { VBR = *:4 ( N_0t_vbr1 ); N_0t_vbr1 = N_0t_vbr1 + 4; } # pattern 0100nnnn00110111 # text ldc.l @+,SSR # arch arch_sh3_nommu_up :ldc.l N_0t_ssr1 is OP_0=0x4 & N_0t_ssr1 & OP_4=0x37 { SSR = *:4 ( N_0t_ssr1 ); N_0t_ssr1 = N_0t_ssr1 + 4; } # pattern 0100nnnn01000111 # text ldc.l @+,SPC # arch arch_sh3_nommu_up :ldc.l N_0t_spc1 is OP_0=0x4 & N_0t_spc1 & OP_4=0x47 { SPC = *:4 ( N_0t_spc1 ); N_0t_spc1 = N_0t_spc1 + 4; } # pattern 0100nnnn11110110 # text ldc.l @+,DBR # arch arch_sh4_nommu_nofpu_up :ldc.l N_0t_dbr1 is OP_0=0x4 & N_0t_dbr1 & OP_4=0xf6 { DBR = *:4 ( N_0t_dbr1 ); N_0t_dbr1 = N_0t_dbr1 + 4; } # pattern 0100nnnn1xxx0111 # text ldc.l @+,Rn_BANK # arch arch_sh3_nommu_up :ldc.l N_0t_bank1,BANKt is OP_0=0x4 & N_0t_bank1 & OP_9=0x1 & BANKt & OP_1=0x7 { BANKt = *:4 ( N_0t_bank1 ); N_0t_bank1 = N_0t_bank1 + 4; } # Load to FPU System Register # pattern 0100nnnn01011010 # text lds ,FPUL # arch arch_sh2e_up :lds N_0t_fpul is OP_0=0x4 & N_0t_fpul & OP_4=0x5a { FPUL = N_0t_fpul; } # Load to FPU System Register # pattern 0100nnnn01010110 # text lds.l @+,FPUL # arch arch_sh2e_up :lds.l N_0t_fpul1 is OP_0=0x4 & N_0t_fpul1 & OP_4=0x56 { FPUL = *:4 ( N_0t_fpul1 ); N_0t_fpul1 = N_0t_fpul1 + 4; } # Load to FPU System Register # pattern 0100nnnn01101010 # text lds ,FPSCR # arch arch_sh2e_up # Note: FPSCR context cannot be supported from this instruction; contents of N_0 not known at disassembly time. :lds N_0t_fpscr is OP_0=0x4 & N_0t_fpscr & OP_4=0x6a { FPSCR = N_0t_fpscr & 0x003FFFFF; splitFPSCRregister(); } # Load to FPU System Register # pattern 0100nnnn01100110 # text lds.l @+,FPSCR # arch arch_sh2e_up # Note: FPSCR context cannot be supported from this instruction; contents of N_0 not known at disassembly time. :lds.l N_0t_fpscr1 is OP_0=0x4 & N_0t_fpscr1 & OP_4=0x66 { FPSCR = (*:4 ( N_0t_fpscr1 )) & 0x003FFFFF; splitFPSCRregister(); N_0t_fpscr1 = N_0t_fpscr1 + 4; } # Load to FPU System Register # pattern 0100nnnn00001010 # text lds ,MACH # arch arch_sh_up :lds N_0t_mach is OP_0=0x4 & N_0t_mach & OP_4=0xa { MACH = N_0t_mach; } # Load to FPU System Register # pattern 0100nnnn00011010 # text lds ,MACL # arch arch_sh_up :lds N_0t_macl is OP_0=0x4 & N_0t_macl & OP_4=0x1a { MACL = N_0t_macl; } # Load to FPU System Register # pattern 0100nnnn00101010 # text lds ,PR # arch arch_sh_up :lds N_0t_pr is OP_0=0x4 & N_0t_pr & OP_4=0x2a { PR = N_0t_pr; } # Load to FPU System Register # pattern 0100nnnn00000110 # text lds.l @+,MACH # arch arch_sh_up :lds.l N_0t_mach1 is OP_0=0x4 & N_0t_mach1 & OP_4=0x6 { MACH = *:4 ( N_0t_mach1 ); N_0t_mach1 = N_0t_mach1 + 4; } # Load to FPU System Register # pattern 0100nnnn00010110 # text lds.l @+,MACL # arch arch_sh_up :lds.l N_0t_macl1 is OP_0=0x4 & N_0t_macl1 & OP_4=0x16 { MACL = *:4 ( N_0t_macl1 ); N_0t_macl1 = N_0t_macl1 + 4; } # Load to FPU System Register # pattern 0100nnnn00100110 # text lds.l @+,PR # arch arch_sh_up :lds.l N_0t_pr1 is OP_0=0x4 & N_0t_pr1 & OP_4=0x26 { PR = *:4 ( N_0t_pr1 ); N_0t_pr1 = N_0t_pr1 + 4; } define pcodeop LoadTranslationLookasideBuffer; # Load to TLB # pattern 0000000000111000 # text ldtlb # arch arch_sh3_up :ldtlb is OP_3=0x38 { LoadTranslationLookasideBuffer(); } # Double-Precision Multiply-and-Accumulate Operation # pattern 0000nnnnmmmm1111 # text mac.l @+,@+ # arch arch_sh2_up define pcodeop mac_lOp; :mac.l M_0t_at,N_0t_at is OP_0=0x0 & OP_1=0xf & M_0t_at & N_0t_at { local tmpM:8 = sext(*:4 M_0t_at); local tmpN:8 = sext(*:4 N_0t_at); local mac:8 = zext(MACL) + (zext(MACH) << 32); local product:8 = tmpM * tmpN; if ($(S_FLAG) == 0) goto ; mac = mac_lOp(mac,product); goto ; mac = mac + product; MACL = mac[0,32]; MACH = mac[32,32]; M_0t_at = M_0t_at + 4; N_0t_at = N_0t_at + 4; } # Single-Precision Multiply-and-Accumulate Operation # pattern 0100nnnnmmmm1111 # text mac.w @+,@+ # arch arch_sh_up define pcodeop mac_wOp; :mac.w M_0t_at,N_0t_at is OP_0=0x4 & OP_1=0xf & M_0t_at & N_0t_at { local tmpM:4 = sext(*:2 M_0t_at); local tmpN:4 = sext(*:2 N_0t_at); local mac:8 = zext(MACL) + (zext(MACH) << 32); local product:4 = tmpN * tmpN; if ($(S_FLAG) == 0) goto ; mac = mac_wOp(mac,product); goto ; mac = mac + sext(product); MACL = mac[0,32]; MACH = mac[32,32]; M_0t_at = M_0t_at + 2; N_0t_at = N_0t_at + 2; } # Data Transfer # pattern 0110nnnnmmmm0011 # text mov , # arch arch_sh_up :mov M_0t,N_0t is OP_0=0x6 & N_0t & M_0t & OP_1=0x3 { N_0t = M_0t; } # Data Transfer # pattern 0010nnnnmmmm0000 # text mov.b ,@ # arch arch_sh_up :mov.b M_0t,N_0t_at1 is OP_0=0x2 & N_0t_at1 & M_0t & OP_1=0x0 { *:1 ( N_0t_at1 ) = M_0t:1; } # Data Transfer # pattern 0010nnnnmmmm0001 # text mov.w ,@ # arch arch_sh_up :mov.w M_0t,N_0t_at1 is OP_0=0x2 & N_0t_at1 & M_0t & OP_1=0x1 { *:2 ( N_0t_at1 ) = M_0t:2; } # Data Transfer # pattern 0010nnnnmmmm0010 # text mov.l ,@ # arch arch_sh_up :mov.l M_0t,N_0t_at1 is OP_0=0x2 & N_0t_at1 & M_0t & OP_1=0x2 { *:4 ( N_0t_at1 ) = M_0t; } # Data Transfer # pattern 0110nnnnmmmm0000 # text mov.b @, # arch arch_sh_up :mov.b M_0t_at1,N_0t is OP_0=0x6 & N_0t & M_0t_at1 & OP_1=0x0 { N_0t = sext( *:1 ( M_0t_at1 ) ); } # Data Transfer # pattern 0110nnnnmmmm0001 # text mov.w @, # arch arch_sh_up :mov.w M_0t_at1,N_0t is OP_0=0x6 & N_0t & M_0t_at1 & OP_1=0x1 { N_0t = sext( *:2 ( M_0t_at1 ) ); } # Data Transfer # pattern 0110nnnnmmmm0010 # text mov.l @, # arch arch_sh_up :mov.l M_0t_at1,N_0t is OP_0=0x6 & N_0t & M_0t_at1 & OP_1=0x2 { N_0t = *:4 ( M_0t_at1 ); } # Data Transfer # pattern 0010nnnnmmmm0100 # text mov.b ,@- # arch arch_sh_up :mov.b M_0t,N_0t_at_neg is OP_0=0x2 & N_0t_at_neg & M_0t & OP_1=0x4 { N_0t_at_neg = N_0t_at_neg - 1; *:1 ( N_0t_at_neg ) = M_0t:1; } # Data Transfer # pattern 0010nnnnmmmm0101 # text mov.w ,@- # arch arch_sh_up :mov.w M_0t,N_0t_at_neg is OP_0=0x2 & N_0t_at_neg & M_0t & OP_1=0x5 { N_0t_at_neg = N_0t_at_neg - 2; *:2 ( N_0t_at_neg ) = M_0t:2; } # Data Transfer # pattern 0010nnnnmmmm0110 # text mov.l ,@- # arch arch_sh_up :mov.l M_0t,N_0t_at_neg is OP_0=0x2 & N_0t_at_neg & M_0t & OP_1=0x6 { N_0t_at_neg = N_0t_at_neg - 4; *:4 ( N_0t_at_neg ) = M_0t; } # Data Transfer # pattern 0110nnnnmmmm0100 # text mov.b @+, # arch arch_sh_up :mov.b M_0t_at,N_0t is OP_0=0x6 & N_0t & M_0t_at & OP_1=0x4 { N_0t = sext( *:1 ( M_0t_at ) ); M_0t_at = M_0t_at + 1; } # Data Transfer # pattern 0110nnnnmmmm0101 # text mov.w @+, # arch arch_sh_up :mov.w M_0t_at,N_0t is OP_0=0x6 & N_0t & M_0t_at & OP_1=0x5 { N_0t = sext( *:2 ( M_0t_at ) ); M_0t_at = M_0t_at + 2; } # Data Transfer # pattern 0110nnnnmmmm0110 # text mov.l @+, # arch arch_sh_up :mov.l M_0t_at,N_0t is OP_0=0x6 & N_0t & M_0t_at & OP_1=0x6 { N_0t = *:4 ( M_0t_at ); M_0t_at = M_0t_at + 4; } # Data Transfer # pattern 0000nnnnmmmm0100 # text mov.b ,@(R0,) # arch arch_sh_up :mov.b M_0t,N_0t_at_with_r0 is OP_0=0x0 & N_0t_at_with_r0 & M_0t & OP_1=0x4 { *:1 ( r0 + N_0t_at_with_r0 ) = M_0t:1; } # Data Transfer # pattern 0000nnnnmmmm0101 # text mov.w ,@(R0,) # arch arch_sh_up :mov.w M_0t,N_0t_at_with_r0 is OP_0=0x0 & N_0t_at_with_r0 & M_0t & OP_1=0x5 { *:2 ( r0 + N_0t_at_with_r0 ) = M_0t:2; } # Data Transfer # pattern 0000nnnnmmmm0110 # text mov.l ,@(R0,) # arch arch_sh_up :mov.l M_0t,N_0t_at_with_r0 is OP_0=0x0 & N_0t_at_with_r0 & M_0t & OP_1=0x6 { *:4 ( r0 + N_0t_at_with_r0 ) = M_0t; } # Data Transfer # pattern 0000nnnnmmmm1100 # text mov.b @(R0,), # arch arch_sh_up :mov.b M_0t_at_with_r0,N_0t is OP_0=0x0 & N_0t & M_0t_at_with_r0 & OP_1=0xc { N_0t = sext( *:1 ( r0 + M_0t_at_with_r0 ) ); } # Data Transfer # pattern 0000nnnnmmmm1101 # text mov.w @(R0,), # arch arch_sh_up :mov.w M_0t_at_with_r0,N_0t is OP_0=0x0 & N_0t & M_0t_at_with_r0 & OP_1=0xd { N_0t = sext( *:2 ( r0 + M_0t_at_with_r0 ) ); } # Data Transfer # pattern 0000nnnnmmmm1110 # text mov.l @(R0,), # arch arch_sh_up :mov.l M_0t_at_with_r0,N_0t is OP_0=0x0 & N_0t & M_0t_at_with_r0 & OP_1=0xe { N_0t = *:4 ( r0 + M_0t_at_with_r0 ); } # Data Transfer # pattern 1110nnnniiiiiiii # text mov #, # arch arch_sh_up :mov I_0t,N_0t is OP_0=0xe & N_0t & I_0t { N_0t = I_0t; # NOTE I_0t already signed extended } # Data Transfer # pattern 1001nnnndddddddd # text mov.w @(,PC), # arch arch_sh_up :mov.w U_0t_2pc,N_0t is OP_0=0x9 & N_0t & U_0t_2pc { N_0t = sext( U_0t_2pc ); # NOTE U_0t_2pc units is bytes } # Data Transfer # pattern 1101nnnndddddddd # text mov.l @(,PC), # arch arch_sh_up :mov.l U_0t_4pc,N_0t is OP_0=0xd & N_0t & U_0t_4pc { N_0t = U_0t_4pc; # NOTE U_0t_4pc units is bytes } # Data Transfer # pattern 11000100dddddddd # text mov.b @(,GBR),R0 # arch arch_sh_up :mov.b U_0t_gbr_at_1,r0 is OP_2=0xc4 & U_0t_gbr_at_1 & r0 { r0 = sext( *:1 ( U_0t_gbr_at_1 ) ); } # Data Transfer # pattern 11000101dddddddd # text mov.w @(,GBR),R0 # arch arch_sh_up :mov.w U_0t_gbr_at_2,r0 is OP_2=0xc5 & U_0t_gbr_at_2 & r0 { r0 = sext( *:2 ( U_0t_gbr_at_2 ) ); # NOTE U_0t_gbr_at_2 units is bytes } # Data Transfer # pattern 11000110dddddddd # text mov.l @(,GBR),R0 # arch arch_sh_up :mov.l U_0t_gbr_at_4,r0 is OP_2=0xc6 & U_0t_gbr_at_4 & r0 { r0 = *:4 ( U_0t_gbr_at_4 ); # NOTE U_0t_gbr_at_4 units is bytes } # Data Transfer # pattern 11000000dddddddd # text mov.b R0,@(,GBR) # arch arch_sh_up :mov.b r0,U_0t_gbr_at_1 is OP_2=0xc0 & U_0t_gbr_at_1 & r0 { *:1 ( U_0t_gbr_at_1 ) = r0:1; } # Data Transfer # pattern 11000001dddddddd # text mov.w R0,@(,GBR) # arch arch_sh_up :mov.w r0,U_0t_gbr_at_2 is OP_2=0xc1 & U_0t_gbr_at_2 & r0 { *:2 ( U_0t_gbr_at_2 ) = r0:2; # NOTE U_0t_gbr_at_2 units is bytes } # Data Transfer # pattern 11000010dddddddd # text mov.l R0,@(,GBR) # arch arch_sh_up :mov.l r0, U_0t_gbr_at_4 is OP_2=0xc2 & U_0t_gbr_at_4 & r0 { *:4 ( U_0t_gbr_at_4 ) = r0; # NOTE U_0t_4_at_gbr_r0_1 units is bytes } # Data Transfer # pattern 10000000mmmmdddd # text mov.b R0,@(,) # arch arch_sh_up :mov.b r0,U_2t_M0_dispr01 is OP_2=0x80 & M_0t & U_2t_M0_dispr01 & r0 { *:1 ( U_2t_M0_dispr01 + M_0t ) = r0:1; } # Data Transfer # pattern 10000001mmmmdddd # text mov.w R0,@(,) # arch arch_sh_up :mov.w r0,U_2t_M0_dispr02 is OP_2=0x81 & M_0t & U_2t_M0_dispr02 & r0 { *:2 ( U_2t_M0_dispr02 + M_0t ) = r0:2; # NOTE U_2t_M0_dispr02 units is bytes } # Data Transfer # pattern 0001nnnnmmmmdddd # text mov.l ,@(,) # arch arch_sh_up :mov.l M_0t,U_2t_N0_dispr04 is OP_0=0x1 & M_0t & N_0t & U_2t_N0_dispr04 { *:4 ( U_2t_N0_dispr04 + N_0t ) = M_0t; # NOTE U_2t_N0_dispr04 units is bytes } # Data Transfer # pattern 10000100mmmmdddd # text mov.b @(,),R0 # arch arch_sh_up :mov.b U_2t_M0_dispr01,r0 is OP_2=0x84 & M_0t & U_2t_M0_dispr01 & r0 { r0 = sext( *:1 ( U_2t_M0_dispr01 + M_0t ) ); } # Data Transfer # pattern 10000101mmmmdddd # text mov.w @(,),R0 # arch arch_sh_up :mov.w U_2t_M0_dispr02,r0 is OP_2=0x85 & U_2t_M0_dispr02 & M_0t & r0 { r0 = sext( *:2 ( U_2t_M0_dispr02 + M_0t ) ); # NOTE U_2t_M0_dispr02 units is bytes } # Data Transfer # pattern 0101nnnnmmmmdddd # text mov.l @(,), # arch arch_sh_up :mov.l U_2t_M0_dispr04,N_0t is OP_0=0x5 & N_0t & M_0t & U_2t_M0_dispr04 { N_0t = *:4 ( U_2t_M0_dispr04 + M_0t ); # NOTE U_2t_M0_dispr04 units is bytes } # Effective Address Transfer # pattern 11000111iiiiiiii # text mova @(,PC),R0 # arch arch_sh_up :mova U_0t_4pc,r0 is OP_2=0xc7 & U_0t_4pc & r0 { r0 = &U_0t_4pc; # NOTE U_0t_4pc units is bytes } # MOVe with Cache block Allocation # pattern 0000nnnn11000011 # text movca.l R0,@ # arch arch_sh4_nommu_nofpu_up :movca.l N_0txx is OP_0=0x0 & N_0txx & OP_4=0xc3 { *:4 ( N_0txx ) = r0; # NOTE ignore cache issues } # T Bit Transfer # pattern 0000nnnn00101001 # text movt # arch :movt N_0t is OP_0=0x0 & N_0t & OP_4=0x29 { N_0t = zext($(T_FLAG)); } # Move Unaligned Long # pattern 0100mmmm10101001 # text movua.l @,R0 # arch :movua.l N_0t_at,r0 is OP_0=0x4 & N_0t_at & r0 & OP_4=0xA9 { r0 = (*:4 ( N_0t_at )); } # Move Unaligned Long Pointer # pattern 0100mmmm11101001 # text movua.l @+,R0 # arch :movua.l N_0t_at,r0 is OP_0=0x4 & N_0t_at & r0 & OP_4=0xE9 { r0 = (*:4 ( N_0t_at )); N_0t_at = N_0t_at + 4; } :movua.l N_0t_at,r0 is OP_0=0x4 & N_0t_at & N_0=0 & r0 & OP_4=0xE9 { r0 = (*:4 ( N_0t_at )); } # Double-Precision Multiplication # pattern 0000nnnnmmmm0111 # text mul.l , # arch :mul.l M_0t,N_0t is OP_0=0x0 & N_0t & M_0t & OP_1=0x7 { MACL = N_0t * M_0t; } # Signed Multiplication # pattern 0010nnnnmmmm1111 # text muls.w , # arch :muls.w M_0t,N_0t is OP_0=0x2 & N_0t & M_0t & OP_1=0xf { MACL = sext(N_0t:2) * sext(M_0t:2); } # Unsigned Multiplication # pattern 0010nnnnmmmm1110 # text mulu.w , # arch :mulu.w M_0t,N_0t is OP_0=0x2 & N_0t & M_0t & OP_1=0xe { MACL = zext(N_0t:2) * zext(M_0t:2); } # Sign Inversion # pattern 0110nnnnmmmm1011 # text neg , # arch :neg M_0t,N_0t is OP_0=0x6 & N_0t & M_0t & OP_1=0xb { N_0t = -M_0t; } # Sign Inversion with Borrow # pattern 0110nnnnmmmm1010 # text negc , # arch :negc M_0t,N_0t is OP_0=0x6 & N_0t & M_0t & OP_1=0xa { local Tcopy:4 = zext($(T_FLAG)); $(T_FLAG) = 0 != M_0t; local result:4 = - M_0t; $(T_FLAG) = $(T_FLAG) || (result < Tcopy); N_0t = result - Tcopy; } # No Operation # pattern 0000000000001001 # text nop # arch :nop is OP_3=0x9 { } # Empty on purpose # Bit Inversion # pattern 0110nnnnmmmm0111 # text not , # arch :not M_0t,N_0t is OP_0=0x6 & N_0t & M_0t & OP_1=0x7 { N_0t = ~M_0t; } define pcodeop CacheBlockInvalidate; # Operand Cache Block Invalidate # pattern 0000nnnn10010011 # text ocbi @ # arch :ocbi N_0t_at1 is OP_0=0x0 & N_0t_at1 & OP_4=0x93 { CacheBlockInvalidate(N_0t_at1); } define pcodeop CacheBlockPurge; # Cache Block Purge # pattern 0000nnnn10100011 # text ocbp @ # arch :ocbp N_0t_at1 is OP_0=0x0 & N_0t_at1 & OP_4=0xa3 { CacheBlockPurge(N_0t_at1); } define pcodeop CacheBlockWriteBack; # TODO ocbwb # Cache Block Write-Back # pattern 0000nnnn10110011 # text ocbwb @ # arch :ocbwb N_0t_at1 is OP_0=0x0 & N_0t_at1 & OP_4=0xb3 { CacheBlockWriteBack(N_0t_at1); } # Logical OR # pattern 0010nnnnmmmm1011 # text or , # arch :or M_0t,N_0t is OP_0=0x2 & N_0t & M_0t & OP_1=0xb { N_0t = N_0t | M_0t; } # Logical OR # pattern 11001011iiiiiiii # text or #,R0 # arch :or U_0t_r0 is OP_2=0xcb & U_0t_r0 { r0 = r0 | U_0t_r0; } # Logical OR # pattern 11001111iiiiiiii # text or.b #,@(R0,GBR) # arch :or.b U_0t1 is OP_2=0xcf & U_0t1 { *:1 (GBR + r0) = ( *:1 (GBR + r0) ) | U_0t1; } # Prefetch to Data Cache # pattern 0000nnnn10000011 # text pref @ # arch :pref N_0tjmp is OP_0=0x0 & N_0tjmp & OP_4=0x83 { } # Empty on purpose # One-Bit Left Rotation through T Bit # pattern 0100nnnn00100100 # text rotcl # arch :rotcl N_0t is OP_0=0x4 & N_0t & OP_4=0x24 { temp:1 = ( (N_0t & 0x80000000) != 0 ); N_0t = (N_0t << 1) | zext($(T_FLAG)); $(T_FLAG) = temp; } # One-Bit Right Rotation through T Bit # pattern 0100nnnn00100101 # text rotcr # arch :rotcr N_0t is OP_0=0x4 & N_0t & OP_4=0x25 { temp:1 = ( (N_0t & 0x00000001) != 0 ); N_0t = ( N_0t >> 1 ) | ( zext($(T_FLAG)) << 31 ); $(T_FLAG) = temp; } # One-Bit Left Rotation # pattern 0100nnnn00000100 # text rotl # arch :rotl N_0t is OP_0=0x4 & N_0t & OP_4=0x4 { $(T_FLAG) = ( (N_0t & 0x80000000) != 0 ); N_0t = (N_0t << 1) | zext($(T_FLAG)); } # One-Bit Right Rotation # pattern 0100nnnn00000101 # text rotr # arch :rotr N_0t is OP_0=0x4 & N_0t & OP_4=0x5 { $(T_FLAG) = ( (N_0t & 0x00000001) != 0 ); N_0t = ( N_0t >> 1 ) | ( zext($(T_FLAG)) << 31 ); } # Return from Exception Handling # pattern 0000000000101011 # text rte # arch :rte is OP_3=0x2b { SR = SSR; splitSRregister(); PC = SPC; delayslot(1); return [PC]; } # Return from Subroutine Procedure # pattern 0000000000001011 # text rts # arch :rts is OP_3=0xb { PC = PR; delayslot(1); return [PC]; } # S Bit Setting # pattern 0000000001011000 # text sets # arch :sets is OP_3=0x58 { $(S_FLAG) = 1; } # T Bit Setting # pattern 0000000000011000 # text sett # arch :sett is OP_3=0x18 { $(T_FLAG) = 1; } # Note: this constructor follows the description on page 393 precisely, though simpler, # it should produce identical results to the pseudo-code above :shad M_0t,N_0t is OP_0=0x4 & N_0t & M_0t & OP_1=0xc { if ( M_0t s< 0 ) goto ; N_0t = N_0t << ( M_0t & 0x1f ); goto ; N_0t = N_0t s>> ( ( ~M_0t & 0x1f ) + 1 ); } # One-Bit Left Arithmetic Shift # pattern 0100nnnn00100000 # text shal # arch :shal N_0t is OP_0=0x4 & N_0t & OP_4=0x20 { $(T_FLAG) = ( ( N_0t & 0x80000000 ) != 0 ); N_0t = N_0t << 1; } # One-Bit Right Arithmetic Shift # pattern 0100nnnn00100001 # text shar # arch :shar N_0t is OP_0=0x4 & N_0t & OP_4=0x21 { $(T_FLAG) = ( ( N_0t & 0x00000001 ) != 0 ); N_0t = N_0t s>> 1; } # Note: this constructor follows the description on page 397 precisely, though simpler, # it should produce identical results to the pseudo-code above :shld M_0t,N_0t is OP_0=0x4 & N_0t & M_0t & OP_1=0xd { if ( M_0t s< 0 ) goto ; N_0t = N_0t << ( M_0t & 0x1f ); goto ; N_0t = N_0t >> ( ( ~M_0t & 0x1f ) + 1 ); } # n-Bit Left Logical Shift # One-Bit Left Logical Shift # pattern 0100nnnn00000000 # text shll # arch :shll N_0t is OP_0=0x4 & N_0t & OP_4=0x0 { $(T_FLAG) = ( ( N_0t & 0x80000000 ) != 0 ); N_0t = N_0t << 1; } # n-Bit Left Logical Shift # pattern 0100nnnn00001000 # text shll2 # arch :shll2 N_0t is OP_0=0x4 & N_0t & OP_4=0x8 { N_0t = ( N_0t << 2 ); } # n-Bit Left Logical Shift # pattern 0100nnnn00011000 # text shll8 # arch :shll8 N_0t is OP_0=0x4 & N_0t & OP_4=0x18 { N_0t = ( N_0t << 8 ); } # pattern 0100nnnn00101000 # text shll16 # arch :shll16 N_0t is OP_0=0x4 & N_0t & OP_4=0x28 { N_0t = ( N_0t << 16 ); } # One-Bit Right Logical Shift # pattern 0100nnnn00000001 # text shlr # arch :shlr N_0t is OP_0=0x4 & N_0t & OP_4=0x1 { $(T_FLAG) = ( ( N_0t & 0x00000001 ) != 0 ); N_0t = N_0t >> 1; } # n-Bit Left Logical Shift # pattern 0100nnnn00001001 # text shlr2 # arch :shlr2 N_0t is OP_0=0x4 & N_0t & OP_4=0x9 { N_0t = N_0t >> 2; } # n-Bit Left Logical Shift # pattern 0100nnnn00011001 # text shlr8 # arch :shlr8 N_0t is OP_0=0x4 & N_0t & OP_4=0x19 { N_0t = N_0t >> 8; } # n-Bit Left Logical Shift # pattern 0100nnnn00101001 # text shlr16 # arch :shlr16 N_0t is OP_0=0x4 & N_0t & OP_4=0x29 { N_0t = N_0t >> 16; } # Transition to Power-Down Mode # pattern 0000000000011011 # text sleep # arch :sleep is OP_3=0x1b { } # empty on purpose # Store from Control Register # pattern 0000nnnn00000010 # text stc SR, # arch arch_sh_up :stc sr_N_0t is OP_0=0x0 & sr_N_0t & OP_4=0x2 { genSRregister(); sr_N_0t = SR; } # Store from Control Register # pattern 0000nnnn00010010 # text stc GBR, # arch :stc gbr_N_0t is OP_0=0x0 & gbr_N_0t & OP_4=0x12 { gbr_N_0t = GBR; } # Store from Control Register # pattern 0000nnnn00100010 # text stc VBR, # arch arch_sh_up :stc vbr_N_0t is OP_0=0x0 & vbr_N_0t & OP_4=0x22 { vbr_N_0t = VBR; } # pattern 0000nnnn00110010 # text stc SSR, # arch arch_sh3_nommu_up :stc ssr_N_0t is OP_0=0x0 & ssr_N_0t & OP_4=0x32 { ssr_N_0t = SSR; } # pattern 0000nnnn01000010 # text stc SPC, # arch arch_sh3_nommu_up # :stc spc_N_0t is OP_0=0x0 & spc_N_0t & OP_4=0x42 { spc_N_0t = SPC; } # pattern 0000nnnn00111010 # text stc SGR, # arch arch_sh4_nommu_nofpu_up :stc sgr_N_0t is OP_0=0x0 & sgr_N_0t & OP_4=0x3a { sgr_N_0t = SGR; } # pattern 0000nnnn11111010 # text stc DBR, # arch arch_sh4_nommu_nofpu_up :stc dbr_N_0t is OP_0=0x0 & dbr_N_0t & OP_4=0xfa { dbr_N_0t = DBR; } # pattern 0000nnnn1xxx0010 # text stc Rn_BANK, # arch arch_sh3_nommu_up :stc BANKt,N_0t_bank is OP_0=0x0 & N_0t_bank & OP_9=0x1 & BANKt & OP_1=0x2 { N_0t_bank = BANKt; } # Store from Control Register # pattern 0100nnnn00000011 # text stc.l SR,@- # arch arch_sh_up :stc.l sr_t,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_4=0x3 & sr_t { N_0t_at_neg = N_0t_at_neg - 4; genSRregister(); *:4 ( N_0t_at_neg ) = SR; } # Store from Control Register # pattern 0100nnnn00010011 # text stc.l GBR,@- # arch arch_sh_up :stc.l gbr_t,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_4=0x13 & gbr_t { N_0t_at_neg = N_0t_at_neg - 4; *:4 ( N_0t_at_neg ) = GBR; } # Store from Control Register # pattern 0100nnnn00100011 # text stc.l VBR,@- # arch arch_sh_up :stc.l vbr_t,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_4=0x23 & vbr_t { N_0t_at_neg = N_0t_at_neg - 4; *:4 ( N_0t_at_neg ) = VBR; } # pattern 0100nnnn00110011 # text stc.l SSR,@- # arch arch_sh3_nommu_up :stc.l ssr_t,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_4=0x33 & ssr_t { N_0t_at_neg = N_0t_at_neg - 4; *:4 ( N_0t_at_neg ) = SSR; } # pattern 0100nnnn01000011 # text stc.l SPC,@- # arch arch_sh3_nommu_up :stc.l spc_t,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_4=0x43 & spc_t { N_0t_at_neg = N_0t_at_neg - 4; *:4 ( N_0t_at_neg ) = SPC; } # pattern 0100nnnn00110010 # text stc.l SGR,@- # arch arch_sh4_nommu_nofpu_up :stc.l sgr_t,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_4=0x32 & sgr_t { N_0t_at_neg = N_0t_at_neg - 4; *:4 ( N_0t_at_neg ) = SGR; } # pattern 0100nnnn11110010 # text stc.l DBR,@- # arch arch_sh4_nommu_nofpu_up :stc.l dbr_t,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_4=0xf2 & dbr_t { N_0t_at_neg = N_0t_at_neg - 4; *:4 ( N_0t_at_neg ) = DBR; } # pattern 0100nnnn1xxx0011 # text stc.l Rn_BANK,@- # arch arch_sh3_nommu_up :stc.l BANKt,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_9=0x1 & BANKt & OP_1=0x3 { N_0t_at_neg = N_0t_at_neg - 4; *:4 ( N_0t_at_neg ) = BANKt; } # Store from System Register # pattern 0000nnnn00001010 # text sts MACH, # arch arch_sh_up :sts mach_N_0t is OP_0=0x0 & mach_N_0t & OP_4=0xa { mach_N_0t = MACH; } # Store from System Register # pattern 0000nnnn00011010 # text sts MACL, # arch arch_sh_up :sts macl_N_0t is OP_0=0x0 & macl_N_0t & OP_4=0x1a { macl_N_0t = MACL; } # Store from System Register # pattern 0000nnnn00101010 # text sts PR, # arch arch_sh_up :sts pr_N_0t is OP_0=0x0 & pr_N_0t & OP_4=0x2a { pr_N_0t = PR; } # Store from Control Register # pattern 0100nnnn00000010 # text sts.l MACH,@- # arch arch_sh_up :sts.l mach_t,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_4=0x2 & mach_t { N_0t_at_neg = N_0t_at_neg - 4; *:4 ( N_0t_at_neg ) = MACH; } # Store from Control Register # pattern 0100nnnn00010010 # text sts.l MACL,@- # arch arch_sh_up :sts.l macl_t,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_4=0x12 & macl_t { N_0t_at_neg = N_0t_at_neg - 4; *:4 ( N_0t_at_neg ) = MACL; } # Store from Control Register # pattern 0100nnnn00100010 # text sts.l PR,@- # arch arch_sh_up :sts.l PR,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_4=0x22 & PR { N_0t_at_neg = N_0t_at_neg - 4; *:4 ( N_0t_at_neg ) = PR; } # Store from System Register # pattern 0000nnnn01011010 # text sts FPUL, # arch arch_sh2e_up :sts fpul_N_0t is OP_0=0x0 & fpul_N_0t & OP_4=0x5a { fpul_N_0t = FPUL; } # Store from System Register # pattern 0000nnnn01101010 # text sts FPSCR, # arch arch_sh2e_up :sts fpscr_N_0t is OP_0=0x0 & fpscr_N_0t & OP_4=0x6a { genFPSCRregister(); fpscr_N_0t = FPSCR; } # Store from Control Register # pattern 0100nnnn01010010 # text sts.l FPUL,@- # arch arch_sh2e_up :sts.l fpul_t,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_4=0x52 & fpul_t { N_0t_at_neg = N_0t_at_neg - 4; *:4 ( N_0t_at_neg ) = FPUL; } # Store from Control Register # pattern 0100nnnn01100010 # text sts.l FPSCR,@- # arch arch_sh2e_up :sts.l fpscr_t,N_0t_at_neg is OP_0=0x4 & N_0t_at_neg & OP_4=0x62 & fpscr_t { N_0t_at_neg = N_0t_at_neg - 4; genFPSCRregister(); *:4 ( N_0t_at_neg ) = FPSCR; } # Binary Subtraction # pattern 0011nnnnmmmm1000 # text sub , # arch arch_sh_up :sub M_0t,N_0t is OP_0=0x3 & N_0t & M_0t & OP_1=0x8 { N_0t = N_0t - M_0t; } # Binary Subtraction with Borrow # pattern 0011nnnnmmmm1010 # text subc , # arch arch_sh_up :subc M_0t,N_0t is OP_0=0x3 & N_0t & M_0t & OP_1=0xa { local Tcopy:4 = zext($(T_FLAG)); $(T_FLAG) = N_0t < M_0t; local result:4 = N_0t - M_0t; $(T_FLAG) = $(T_FLAG) || (result < Tcopy); N_0t = result - Tcopy; } # Binary Subtraction with Underflow Check # pattern 0011nnnnmmmm1011 # text subv , # arch arch_sh_up :subv M_0t,N_0t is OP_0=0x3 & N_0t & M_0t & OP_1=0xb { $(T_FLAG) = sborrow(N_0t, M_0t); N_0t = N_0t - M_0t; } # Upper-/Lower-Half Swap # pattern 0110nnnnmmmm1000 # text swap.b , # arch arch_sh_up :swap.b M_0t,N_0t is OP_0=0x6 & N_0t & M_0t & OP_1=0x8 { N_0t = ( M_0t & 0xFFFF0000 ) | ( ( M_0t & 0x000000FF ) << 8 ) | ( ( M_0t & 0x0000FF00 ) >> 8 ); } # Upper-/Lower-Half Swap # pattern 0110nnnnmmmm1001 # text swap.w , # arch arch_sh_up :swap.w M_0t,N_0t is OP_0=0x6 & N_0t & M_0t & OP_1=0x9 { N_0t = ( M_0t << 16 ) | ( M_0t >> 16 ); } # Synchronize Data Operation # pattern 0000000010101011 # text synco # arch arch_sh4a_up define pcodeop SynchronizeDataOperation; :synco is OP_3=0x00ab { SynchronizeDataOperation(); } # Memory Test and Bit Setting # pattern 0100nnnn00011011 # text tas.b @ # arch arch_sh_up :tas.b N_0t_at1 is OP_0=0x4 & N_0t_at1 & OP_4=0x1b { temp:1 = *:1 ( N_0t_at1 ); $(T_FLAG) = ( temp == 0 ); temp = temp | 0x80; *:1 ( N_0t_at1 ) = temp; } define pcodeop TrapAlways; # Trap Exception Handling # pattern 11000011iiiiiiii # text trapa # # arch arch_sh_up :trapa U_0t is OP_2=0xc3 & U_0t { TrapAlways(U_0t); } # AND Operation T Bit Setting # pattern 0010nnnnmmmm1000 # text tst , # arch arch_sh_up :tst M_0t,N_0t is OP_0=0x2 & N_0t & M_0t & OP_1=0x8 { $(T_FLAG) = ( (N_0t & M_0t) == 0 ); } # AND Operation T Bit Setting # pattern 11001000iiiiiiii # text tst #,R0 # arch arch_sh_up :tst U_0t_r0 is OP_2=0xc8 & U_0t_r0 { $(T_FLAG) = ( ( r0 & U_0t_r0 ) == 0 ); } # AND Operation T Bit Setting # pattern 11001100iiiiiiii # text tst.b #,@(R0,GBR) # arch arch_sh_up :tst.b U_0t1 is OP_2=0xcc & U_0t1 { $(T_FLAG) = ( ( (*:1 ( GBR + r0 )) & U_0t1 ) == 0 ); } # pattern 0010nnnnmmmm1010 # text xor , # arch arch_sh_up :xor M_0t,N_0t is OP_0=0x2 & N_0t & M_0t & OP_1=0xa { N_0t = N_0t ^ M_0t; } # Exclusive Logical OR # pattern 11001010iiiiiiii # text xor #,R0 # arch arch_sh_up :xor U_0t_r0 is OP_2=0xca & U_0t_r0 { r0 = r0 ^ U_0t_r0; } # Exclusive Logical OR # pattern 11001110iiiiiiii # text xor.b #,@(R0,GBR) # arch arch_sh_up :xor.b U_0t1 is OP_2=0xce & U_0t1 { *:1 (GBR + r0) = ( *:1 (GBR + r0) ) ^ U_0t1; } # Middle Extraction from Linked Registers # pattern 0010nnnnmmmm1101 # text xtrct , # arch arch_sh_up :xtrct M_0t,N_0t is OP_0=0x2 & N_0t & M_0t & OP_1=0xd { N_0t = (M_0t << 16) | (N_0t >> 16); } ================================================ FILE: pypcode/processors/SuperH4/data/languages/SuperH4_be.cspec ================================================ ================================================ FILE: pypcode/processors/SuperH4/data/languages/SuperH4_be.slaspec ================================================ # This module defines SuperH version 4a, but should work against versions 1,2, and 3. # DSP Extensions are not yet added # Based on "Renesas SH-4 Software Manual: Rev 6.00 2006.09 (i.e. rej09b0318_sh_4sm.pdf) @define ENDIAN "big" @include "SuperH4.sinc" ================================================ FILE: pypcode/processors/SuperH4/data/languages/SuperH4_le.cspec ================================================ ================================================ FILE: pypcode/processors/SuperH4/data/languages/SuperH4_le.slaspec ================================================ # This module defines SuperH version 4, but should work against versions 1,2, and 3. # There is a SuperH version 4A (which has 4 byte instruction length) which has instructions incompatable # with this. # Based on "Renesas SH-4 Software Manual: Rev 6.00 2006.09 (i.e. rej09b0318_sh_4sm.pdf) @define ENDIAN "little" @include "SuperH4.sinc" ================================================ FILE: pypcode/processors/SuperH4/data/languages/old/SuperH4-BE-16.lang ================================================ SuperH4:BE:16:default SuperH4 default 16 ================================================ FILE: pypcode/processors/SuperH4/data/languages/old/SuperH4-BE-16.trans ================================================ SuperH4:BE:16:default SuperH4:BE:32:default ================================================ FILE: pypcode/processors/SuperH4/data/languages/old/SuperH4-LE-16.lang ================================================ SuperH4:LE:16:default SuperH4 default 16 ================================================ FILE: pypcode/processors/SuperH4/data/languages/old/SuperH4-LE-16.trans ================================================ SuperH4:LE:16:default SuperH4:LE:32:default ================================================ FILE: pypcode/processors/SuperH4/data/manuals/superh4.idx ================================================ @ rej09b0318_sh_4sm.pdf[SH-4 Software Manual Rev 6.00 2006.09] add, 229 addc, 231 addv, 232 and, 234 and.b, 234 bf, 236 bf/s, 238 bra, 240 braf, 242 bsr, 244 bsrf, 246 bt, 248 bt/s, 250 clrmac, 252 clrs, 253 clrt, 254 cmp/eq, 255 cmp/ge, 255 cmp/gt, 255 cmp/hi, 255 cmp/hs, 255 cmp/pl, 255 cmp/pz, 255 cmp/str, 255 cmp/eq, 255 cmp/str, 255 div0s, 259 div0u, 260 div1, 261 dmuls.l, 266 dmulu.l, 268 dt, 270 exts.b, 271 exts.w, 271 extu.b, 273 extu.w, 273 jmp, 331 jsr, 332 ldc, 334 ldc.l, 334 lds, 339 lds.l, 339 ldtlb, 343 mac.l, 345 mac, 349 mac.w, 349 mov, 352 mov.b, 352 mov.w, 352 mov.l, 352 mova, 367 movca.l, 368 movt, 369 mul.l, 370 muls.w, 371 muls, 371 mulu.w, 372 mulu, 372 neg, 373 negc, 374 nop, 375 not, 376 ocbi, 377 ocbp, 378 ocbwb, 379 or, 380 or.b, 380 pref, 382 rotcl, 383 rotcr, 384 rotl, 385 rotr, 386 rte, 387 rts, 389 sets, 391 sett, 392 shad, 393 shal, 395 shar, 396 shld, 397 shll, 399 shll2, 400 shll8, 400 shll16, 400 shlr, 402 shlr2, 403 shlr8, 403 shlr16, 403 sleep, 405 stc, 406 stc.l, 406 sts, 411 sts.l, 411 sub, 416 subc, 417 subv, 418 swap.b, 420 swap.w, 420 tas.b, 422 trapa, 424 tst, 426 tst.b, 426 xor, 428 xor.b, 428 xtrct, 430 fabs, 275 fadd, 276 fcmp/eq, 279 fcmp/gt, 279 fcnvds, 283 fcnvsd, 286 fdiv, 288 fipr, 292 fldi0, 294 fldi1, 295 flds, 296 float, 297 fmac, 299 fmov, 305 fmov.s, 305 fmul, 312 fneg, 315 frchg, 316 fschg, 317 fsqrt, 318 fsts, 321 fsub, 322 ftrc, 325 ftrv, 328 ================================================ FILE: pypcode/processors/SuperH4/data/patterns/SuperH4_patterns.xml ================================================ 0x0b 0x00 0x09 0x00 0x0b 0x00 0xf6 0x6. 0x0b 0x00 0x09 0x00 0x00 0x00 0x0b 0x00 0x.. 0x7f 10011101 11100011 10111... ........ 0x22 0x4f 0x22 0x4f 1....... 0x7f 1....... 0x7f ================================================ FILE: pypcode/processors/SuperH4/data/patterns/patternconstraints.xml ================================================ SuperH4_patterns.xml ================================================ FILE: pypcode/processors/TI_MSP430/data/languages/TI430Common.sinc ================================================ # # TI MSP430 # # Texas Instruments microcontroller 16-bit CPU # # Memory Architecture # define endian=$(ENDIAN); define alignment=2; define space RAM type=ram_space size=$(REG_SIZE) default; define space register type=register_space size=2; # # General Registers # define register offset=0x0000 size=$(REG_SIZE) [ PC # R0 # Program Counter SP # R1 # Stack Pointer SR # R2 # Status Register R3 # R3 # Constant Generator #Available for general use: R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 #None: None ]; @if REG_SIZE == "4" define register offset=0x0000 size=2 [ PC_16 _ SP_16 _ SR_16 _ R3_16 _ R4_16 _ R5_16 _ R6_16 _ R7_16 _ R8_16 _ R9_16 _ R10_16 _ R11_16 _ R12_16 _ R13_16 _ R14_16 _ R15_16 _ ]; define register offset=0x0000 size=1 [ PC_lo PC_hi _ _ # R0 # Program Counter SP_lo SP_hi _ _ # R1 # Stack Pointer SR_lo SR_hi _ _ # R2 # Status Register R3_lo R3_hi _ _ # R3 # Constant Generator #Available for general use: R4_lo R4_hi _ _ R5_lo R5_hi _ _ R6_lo R6_hi _ _ R7_lo R7_hi _ _ R8_lo R8_hi _ _ R9_lo R9_hi _ _ R10_lo R10_hi _ _ R11_lo R11_hi _ _ R12_lo R12_hi _ _ R13_lo R13_hi _ _ R14_lo R14_hi _ _ R15_lo R15_hi _ _ #None: None_lo None_hi _ _ ]; @else define register offset=0x0000 size=1 [ PC_lo PC_hi # R0 # Program Counter SP_lo SP_hi # R1 # Stack Pointer SR_lo SR_hi # R2 # Status Register R3_lo R3_hi # R3 # Constant Generator #Available for general use: R4_lo R4_hi R5_lo R5_hi R6_lo R6_hi R7_lo R7_hi R8_lo R8_hi R9_lo R9_hi R10_lo R10_hi R11_lo R11_hi R12_lo R12_hi R13_lo R13_hi R14_lo R14_hi R15_lo R15_hi #None: None_lo None_hi ]; @endif define register offset=0x1000 size=4 contextreg; define context contextreg # NOTE: Only instructions that don't use immediates (except ones from constant generator) can # use the repeat feature. # NOTE: The POPM/PUSM have a starting register & # of register to pop/push. We need to track # that info in context for the subtables that do the work. ctx_isHi=(0,0) noflow # Used in pspec to flag msp430 instruction > 64k ctx_al=(1,1) noflow # extension word al field ctx_ctregdest=(2,5) noflow # extension word dest register/immediate field ctx_ctregdests=(2,5) signed noflow # signed version of above ctx_repreg=(2,5) noflow # register repeat count comes from. ctx_regsrc=(6,9) noflow # extension word src register/immediate field ctx_regsrcs=(6,9) signed noflow # signed version of above ctx_zc=(10,10) noflow # extension word zero carry field ctx_num=(11,11) noflow # is repetition field a # or register ctx_haveext=(12,14) noflow # used to track type of extension word used ctx_popreg_set=(15,18) noflow # used to set register for POPM/PUSHM instructions ctx_popreg=(15,18) noflow # display register, linked for POPM/PUSHM instructions ctx_count=(19,22) noflow # tracks count of registers for POPM/PUSHM ctx_mreg=(23,26) noflow # register being accessed in POPM/PUSHM ; define register offset=0x2000 size=1 [ CNT ]; # # Tokens # define token instr16(16) op16_0_8 = (0, 7) op16_4_4 = (4, 7) op16_0_4 = (0, 3) op16_7_9 = (7, 15) op16_8_4 = (8, 11) op16_8_8 = (8, 15) op16_12_4 = (12, 15) opext_11_5 = (11, 15) op16_7_1 = (7, 7) op16_13_3 = (13, 15) src = (0, 3) dest = (0, 3) as = (4, 5) bow = (6, 6) insid = (4, 7) insidbig = (4, 9) reg16_0_4 = (0, 3) dest_0_4 = (0, 3) imm_0_4 = (0, 4) reg_Direct16_0_4 = (0, 3) reg_Direct16_0_4W = (0, 3) reg_Indexed16_0_4 = (0, 3) reg_InDirect16_0_4 = (0, 3) dest_Direct16_0_4 = (0, 3) dest_Indexed16_0_4 = (0, 3) dest_Direct_lo = (0, 3) dest_Direct_hi = (0, 3) condition = (10, 12) off16 = (0, 9) signed off16_8_2 = (8, 9) off16_4_4 = (4, 7) off16_0_4 = (0, 3) zc = (8, 8) ad = (7, 7) al = (6, 6) imm_4_4 = (4, 7) imm_8_4 = (8, 11) src_8_4 = (8, 11) src16_8_4 = (8, 11) src_Direct16_8_4 = (8, 11) reg_Direct16_8_4W = (8, 11) src_InDirect16_8_4 = (8, 11) src_Indexed16_8_4 = (8, 11) src_Direct_lo = (8, 11) src_Direct_hi = (8, 11) src_ext = (7, 10) rrn = (10, 11) imm_0_16 = (0, 15) imms_0_16 = (0, 15) signed indexExtWord16_0_16 = (0, 15) indexExtWord16_0_16s = (0, 15) signed indexExt2Word16_0_16 = (0, 15) indexExt2Word16_0_16s = (0, 15) signed ; # # Attach(s) # attach variables [ src_8_4 dest_0_4 reg_Direct16_0_4 src_Direct16_8_4 dest_Direct16_0_4 ctx_popreg ctx_repreg ] [ PC SP SR R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 ]; attach variables [ dest_Direct_lo src_Direct_lo] [ PC_lo SP_lo SR_lo _ R4_lo R5_lo R6_lo R7_lo R8_lo R9_lo R10_lo R11_lo R12_lo R13_lo R14_lo R15_lo ]; attach variables [ dest_Direct_hi src_Direct_hi] [ PC_hi SP_hi SR_hi _ R4_hi R5_hi R6_hi R7_hi R8_hi R9_hi R10_hi R11_hi R12_hi R13_hi R14_hi R15_hi ]; attach variables [ reg_Indexed16_0_4 src_Indexed16_8_4 dest_Indexed16_0_4 ] [ None SP _ _ R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 ]; attach variables [ reg_InDirect16_0_4 src_InDirect16_8_4 ] [ PC SP _ _ R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 ]; @if REG_SIZE == "4" attach variables [reg_Direct16_0_4W reg_Direct16_8_4W] [PC_16 SP_16 SR_16 _ R4_16 R5_16 R6_16 R7_16 R8_16 R9_16 R10_16 R11_16 R12_16 R13_16 R14_16 R15_16]; @else attach variables [reg_Direct16_0_4W reg_Direct16_8_4W] [PC SP SR _ R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15]; @endif SRC16_8_4: src_Direct16_8_4 is src_Direct16_8_4 & reg_Direct16_8_4W {export reg_Direct16_8_4W;} DST16_0_4: dest_Direct16_0_4 is dest_Direct16_0_4 & reg_Direct16_0_4W {export reg_Direct16_0_4W;} SRC8_8_4: src_Direct16_8_4 is src_Direct16_8_4 & src_Direct_lo {export src_Direct_lo;} DST8_0_4: reg_Direct16_0_4 is reg_Direct16_0_4 & dest_Direct_lo {export dest_Direct_lo;} #################################### # Status Register (SR) Map #################################### # b15-b9: Reserved # b8: V (overflow bit) # b7: SCG1 (System Clock generator 1) # b6: SCG0 (System Clock generator 0) # b5: OSCOFF (Oscillator Off) # b4: CPUOFF (CPU off) # b3: GIE (General Interrupt Enable) # b2: N (Negative Bit) (Word = bit 15, Byte = bit 7)(sign bit) # b1: Z (Zero Bit) # b0: C (Carry Bit) #################################### @define CARRY "SR[0,1]" @define ZERO "SR[1,1]" @define SIGN "SR[2,1]" @define OVERFLOW "SR[8,1]" @define GIE "SR[3,1]" # # Sub Constructors # #----------------------------------------------- # B/W: Byte or Word operation # 0: Word Operation # 1: Byte Operation #----------------------------------------------- @if REG_SIZE == "4" AMASK: val is ctx_isHi=1 [ val = 0xFFFF; ] { export *[const]:4 val; } AMASK: val is ctx_isHi=0 [ val = 0xFFFFF; ] { export *[const]:4 val; } @else AMASK: val is bow=0 [ val = 0xFFFE; ] { export *[const]:2 val; } # Memory accesses for unaligned (odd) word addresses round down for alignment. AMASK: val is bow=1 [ val = 0xFFFF; ] { export *[const]:2 val; } @endif #----------------------------------------------- # # REGISTER (REG) # # The REG modes are used for the 1 operand form instructions # #----------------------------------------------- REG_W_AS: DST16_0_4 is DST16_0_4 & as=0x0 & bow=0x0 {export DST16_0_4;} # Word/Register Direct (Rn): REG_W_AS: DST16_0_4 is DST16_0_4 & reg16_0_4=0 & as=0x0 & bow=0x0 {DST16_0_4 = inst_next & 0xFFFE; export DST16_0_4;} # PC register accesses point to next instruction REG_W_AS: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x0 & AMASK ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = (reg_Indexed16_0_4 + indexExtWord16_0_16s) & AMASK; export *:2 tmp;} REG_W_AS: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x0 & AMASK & reg_Indexed16_0_4=1 ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = (reg_Indexed16_0_4 + indexExtWord16_0_16s - 0x2) & AMASK; export *:2 tmp;} # PUSH, CALL X(SP) - addressing includes SP decrement REG_W_AS: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x0 & AMASK {tmp:$(REG_SIZE) = reg_InDirect16_0_4 & AMASK; export *:2 tmp;} # Word/Register Indirect (@Rn): REG_W_AS: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x0 & AMASK {tmp:$(REG_SIZE) = reg_InDirect16_0_4 & AMASK; export *:2 tmp;} # Word/Register Indirect Autoincrement (@Rn+): REG_W_AS: labelCalc is reg16_0_4=0x0 & as=0x1 & bow=0x0 & AMASK; indexExtWord16_0_16 [labelCalc = inst_start + 2 + indexExtWord16_0_16; ] {tmp:$(REG_SIZE) = labelCalc & AMASK; export *:2 tmp; } # Symbolic REG_W_AS: "#"^indexExtWord16_0_16 is reg16_0_4=0x0 & as=0x3 & bow=0x0 ; indexExtWord16_0_16 {export *[const]:2 indexExtWord16_0_16; } # Immediate REG_W_AS: "&"^indexExtWord16_0_16 is reg16_0_4=0x2 & as=0x1 & bow=0x0 & AMASK; indexExtWord16_0_16 {tmp:$(REG_SIZE) = indexExtWord16_0_16 & AMASK; export *:2 tmp; } # Absolute REG_W_AS: "#4" is reg16_0_4=0x2 & as=0x2 & bow=0x0 { export 4:2;} # Constant REG_W_AS: "#8" is reg16_0_4=0x2 & as=0x3 & bow=0x0 { export 8:2;} # Constant REG_W_AS: "#0" is reg16_0_4=0x3 & as=0x0 & bow=0x0 { export 0:2;} # Constant REG_W_AS: "#1" is reg16_0_4=0x3 & as=0x1 & bow=0x0 { export 1:2;} # Constant REG_W_AS: "#2" is reg16_0_4=0x3 & as=0x2 & bow=0x0 { export 2:2;} # Constant REG_W_AS: "#-1" is reg16_0_4=0x3 & as=0x3 & bow=0x0 { export 0xffff:2;} # Constant REG_W_AS_DEST: DST16_0_4 is DST16_0_4 & as=0x0 & bow=0x0 {export DST16_0_4;} # Word/Register Direct (Rn): REG_W_AS_DEST: DST16_0_4 is DST16_0_4 & reg16_0_4=0 & as=0x0 & bow=0x0 {DST16_0_4 = inst_next & 0xFFFE; export DST16_0_4;} # PC register accesses point to next instruction REG_W_AS_DEST: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x0 & AMASK ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = (reg_Indexed16_0_4 + indexExtWord16_0_16s) & AMASK; export *:2 tmp;} REG_W_AS_DEST: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x0 & AMASK {tmp:$(REG_SIZE) = reg_InDirect16_0_4 & AMASK; export *:2 tmp;} # Word/Register Indirect (@Rn): REG_W_AS_DEST: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x0 & AMASK {tmp:$(REG_SIZE) = reg_InDirect16_0_4 & AMASK; export *:2 tmp;} # Word/Register Indirect Autoincrement (@Rn+): REG_W_AS_DEST: labelCalc is reg16_0_4=0x0 & as=0x1 & bow=0x0 & AMASK ; indexExtWord16_0_16 [labelCalc = inst_start + 2 + indexExtWord16_0_16; ] {tmp:$(REG_SIZE) = labelCalc & AMASK; export *:2 tmp; } # Symbolic REG_W_AS_DEST: "#"^indexExtWord16_0_16 is reg16_0_4=0x0 & as=0x3 & bow=0x0 & AMASK ; indexExtWord16_0_16 {export *:2 inst_next; } # Immediate - Undocumented behaviour REG_W_AS_DEST: "&"^indexExtWord16_0_16 is reg16_0_4=0x2 & as=0x1 & bow=0x0 & AMASK ; indexExtWord16_0_16 {tmp:$(REG_SIZE) = indexExtWord16_0_16 & AMASK; export *:2 tmp; } # Absolute #----------------------------------------------- REG_B_AS: DST8_0_4 is DST8_0_4 & as=0x0 & bow=0x1 { export DST8_0_4;} # Word/Register Direct (Rn): REG_B_AS: DST8_0_4 is DST8_0_4 & reg16_0_4=0 & as=0x0 & bow=0x1 {tmp:$(REG_SIZE) = inst_next; DST8_0_4 = tmp:1 & 0xFF; export DST8_0_4;} # PC register accesses point to next instruction - must return register for resulting stores REG_B_AS: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x1 & AMASK ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = (reg_Indexed16_0_4 + indexExtWord16_0_16s) & AMASK; export *:1 tmp;} REG_B_AS: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x1 & AMASK & reg_Indexed16_0_4=1 ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = (reg_Indexed16_0_4 + indexExtWord16_0_16s - 0x2) & AMASK; export *:1 tmp;} # PUSH.B X(SP) - includes SP decrement REG_B_AS: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x1 {export *:1 reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): REG_B_AS: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x1 {export *:1 reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): REG_B_AS: labelCalc is reg16_0_4=0x0 & as=0x1 & bow=0x1 & AMASK ; indexExtWord16_0_16 [labelCalc = inst_start + 2 + indexExtWord16_0_16; ] {tmp:$(REG_SIZE) = labelCalc & AMASK;export *:1 tmp; } # Symbolic REG_B_AS: "#"^indexExtWord16_0_16 is reg16_0_4=0x0 & as=0x3 & bow=0x1 ; indexExtWord16_0_16 { export *[const]:1 indexExtWord16_0_16; } # Immediate REG_B_AS: "&"^indexExtWord16_0_16 is reg16_0_4=0x2 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 {export *:1 indexExtWord16_0_16; } # Absolute REG_B_AS: "#4" is reg16_0_4=0x2 & as=0x2 & bow=0x1 { export 4:1;} # Constant REG_B_AS: "#8" is reg16_0_4=0x2 & as=0x3 & bow=0x1 { export 8:1;} # Constant REG_B_AS: "#0" is reg16_0_4=0x3 & as=0x0 & bow=0x1 { export 0:1;} # Constant REG_B_AS: "#1" is reg16_0_4=0x3 & as=0x1 & bow=0x1 { export 1:1;} # Constant REG_B_AS: "#2" is reg16_0_4=0x3 & as=0x2 & bow=0x1 { export 2:1;} # Constant REG_B_AS: "#-1" is reg16_0_4=0x3 & as=0x3 & bow=0x1 { export 0xff:1;} # Constant REG_B_AS_DEST: DST8_0_4 is DST8_0_4 & as=0x0 & bow=0x1 { export DST8_0_4;} # Word/Register Direct (Rn): REG_B_AS_DEST: DST8_0_4 is DST8_0_4 & reg16_0_4=0 & as=0x0 & bow=0x1 {tmp:$(REG_SIZE) = inst_next; DST8_0_4 = tmp:1 & 0xFF; export DST8_0_4;} # PC register accesses point to next instruction REG_B_AS_DEST: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x1 & AMASK ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = (reg_Indexed16_0_4 + indexExtWord16_0_16s) & AMASK; export *:1 tmp;} REG_B_AS_DEST: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x1 {export *:1 reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): REG_B_AS_DEST: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x1 {export *:1 reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): REG_B_AS_DEST: labelCalc is reg16_0_4=0x0 & as=0x1 & bow=0x1 & AMASK ; indexExtWord16_0_16 [labelCalc = inst_start + 2 + indexExtWord16_0_16; ] {tmp:$(REG_SIZE) = labelCalc & AMASK;export *:1 tmp; } # Symbolic REG_B_AS_DEST: "#"^indexExtWord16_0_16 is reg16_0_4=0x0 & as=0x3 & bow=0x1 & AMASK ; indexExtWord16_0_16 {export *:1 inst_next; } # Undocumented behaviour REG_B_AS_DEST: "&"^indexExtWord16_0_16 is reg16_0_4=0x2 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 {export *:1 indexExtWord16_0_16; } # Absolute #----------------------------------------------- # # SOURCE (SRC) # #----------------------------------------------- SRC_W_AS: SRC16_8_4 is SRC16_8_4 & as=0x0 & bow=0x0 {export SRC16_8_4;} # Word/Register Direct (Rn): SRC_W_AS: SRC16_8_4 is SRC16_8_4 & src16_8_4=0 & as=0x0 & bow=0x0 {tmp:2 = inst_next; export tmp;} # PC register accesses point to next instruction (PC-relative addresses already covered by Immediate/Symbolic modes) SRC_W_AS: indexExtWord16_0_16s^"("^src_Indexed16_8_4^")" is src_Indexed16_8_4 & as=0x1 & bow=0x0 & AMASK ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = (src_Indexed16_8_4 + indexExtWord16_0_16s) & AMASK; export *:2 tmp;} SRC_W_AS: "@"^src_InDirect16_8_4 is src_InDirect16_8_4 & as=0x2 & bow=0x0 & AMASK {tmp:$(REG_SIZE) = src_InDirect16_8_4 & AMASK; export *:2 tmp;} # Word/Register Indirect (@Rn): SRC_W_AS: "@"^src_InDirect16_8_4^"+" is src_InDirect16_8_4 & as=0x3 & bow=0x0 & AMASK {tmp:$(REG_SIZE) = src_InDirect16_8_4 & AMASK; export *:2 tmp;} # Word/Register Indirect Autoincrement (@Rn+): SRC_W_AS: labelCalc is src16_8_4=0x0 & as=0x1 & bow=0x0 & AMASK ; indexExtWord16_0_16 [labelCalc = inst_start + 2 + indexExtWord16_0_16; ] {tmp:$(REG_SIZE) = labelCalc & AMASK; export *:2 tmp; } # Symbolic SRC_W_AS: "#"^indexExtWord16_0_16 is src16_8_4=0x0 & as=0x3 & bow=0x0 ; indexExtWord16_0_16 {export *[const]:2 indexExtWord16_0_16; } # Immediate SRC_W_AS: "&"^indexExtWord16_0_16 is src16_8_4=0x2 & as=0x1 & bow=0x0 & AMASK ; indexExtWord16_0_16 {tmp:$(REG_SIZE) = indexExtWord16_0_16 & AMASK; export *:2 tmp; } # Absolute SRC_W_AS: "#4" is src16_8_4=0x2 & as=0x2 & bow=0x0 { export 4:2; } # Constant SRC_W_AS: "#8" is src16_8_4=0x2 & as=0x3 & bow=0x0 { export 8:2; } # Constant SRC_W_AS: "#0" is src16_8_4=0x3 & as=0x0 & bow=0x0 { export 0:2; } # Constant SRC_W_AS: "#1" is src16_8_4=0x3 & as=0x1 & bow=0x0 { export 1:2; } # Constant SRC_W_AS: "#2" is src16_8_4=0x3 & as=0x2 & bow=0x0 { export 2:2; } # Constant SRC_W_AS: "#-1" is src16_8_4=0x3 & as=0x3 & bow=0x0 { export 0xffff:2; } # Constant #----------------------------------------------- SRC_B_AS: SRC8_8_4 is SRC8_8_4 & as=0x0 & bow=0x1 { export SRC8_8_4;} # Word/Register Direct (Rn): SRC_B_AS: SRC8_8_4 is SRC8_8_4 & src16_8_4=0 & as=0x0 & bow=0x1 {tmp:$(REG_SIZE) = inst_next; tmp2:1 = tmp:1; export tmp2;} # PC register accesses point to next instruction. SRC_B_AS: indexExtWord16_0_16s^"("^src_Indexed16_8_4^")" is src_Indexed16_8_4 & as=0x1 & bow=0x1 & AMASK ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = (src_Indexed16_8_4 + indexExtWord16_0_16s) & AMASK; export *:1 tmp;} SRC_B_AS: "@"^src_InDirect16_8_4 is src_InDirect16_8_4 & as=0x2 & bow=0x1 {export *:1 src_InDirect16_8_4;} # Word/Register Indirect (@Rn): SRC_B_AS: "@"^src_InDirect16_8_4^"+" is src_InDirect16_8_4 & as=0x3 & bow=0x1 {export *:1 src_InDirect16_8_4;} # Word/Register Indirect Autoincrement (@Rn+): SRC_B_AS: labelCalc is src16_8_4=0x0 & as=0x1 & bow=0x1 & AMASK ; indexExtWord16_0_16 [labelCalc = inst_start + 2 + indexExtWord16_0_16; ] {tmp:$(REG_SIZE) = labelCalc & AMASK;export *:1 tmp; } # Symbolic SRC_B_AS: "#"^indexExtWord16_0_16 is src16_8_4=0x0 & as=0x3 & bow=0x1 ; indexExtWord16_0_16 {export *[const]:1 indexExtWord16_0_16;} # Immediate SRC_B_AS: "&"^indexExtWord16_0_16 is src16_8_4=0x2 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 {export *:1 indexExtWord16_0_16; } # Absolute SRC_B_AS: "#4" is src16_8_4=0x2 & as=0x2 & bow=0x1 { export 4:1; } # Constant SRC_B_AS: "#8" is src16_8_4=0x2 & as=0x3 & bow=0x1 { export 8:1; } # Constant SRC_B_AS: "#0" is src16_8_4=0x3 & as=0x0 & bow=0x1 { export 0:1; } # Constant SRC_B_AS: "#1" is src16_8_4=0x3 & as=0x1 & bow=0x1 { export 1:1; } # Constant SRC_B_AS: "#2" is src16_8_4=0x3 & as=0x2 & bow=0x1 { export 2:1; } # Constant SRC_B_AS: "#-1" is src16_8_4=0x3 & as=0x3 & bow=0x1 { export 0xff:1; } # Constant #----------------------------------------------- # # DESTINATION (DEST) # #----------------------------------------------- DEST_W_AD: DST16_0_4 is DST16_0_4 & ad=0x0 & bow=0x0 {export DST16_0_4;} # Word/Register Direct (Rn): DEST_W_AD: DST16_0_4 is DST16_0_4 & dest_0_4=0 & ad=0x0 & bow=0x0 {DST16_0_4 = inst_next; export DST16_0_4;} # PC register accesses point to next instruction. # Register relative destinations for R1, R4-R15 DEST_W_AD: indexExtWord16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x0 & AMASK ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = (dest_Indexed16_0_4 + indexExtWord16_0_16s) & AMASK; export *:2 tmp;} #---Depends on SRC ---# # Source is register-relative and involves 'embedded' immediate DEST_W_AD: indexExt2Word16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x0 & AMASK & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16s {tmp:$(REG_SIZE) = (dest_Indexed16_0_4 + indexExt2Word16_0_16s) & AMASK; export *:2 tmp;} # Source is an 'embedded' immediate implemented by @PC+ DEST_W_AD: indexExt2Word16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x0 & AMASK & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16s {tmp:$(REG_SIZE) = (dest_Indexed16_0_4 + indexExt2Word16_0_16s) & AMASK; export *:2 tmp;} # Source is involves a register increment (@reg+) that applies to the destination (of same register, but not PC, SR, R3) DEST_W_AD: indexExt2Word16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x0 & AMASK & as=0x3 & src16_8_4=dest_0_4 & (src16_8_4 = 1 | src16_8_4 >= 4) ; indexExt2Word16_0_16s {tmp:$(REG_SIZE) = (dest_Indexed16_0_4 + 2 + indexExt2Word16_0_16s) & AMASK; export *:2 tmp;} #---End of Depend ----# # PC-relative destinations DEST_W_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x0 & AMASK ; indexExtWord16_0_16s [labelCalc = inst_start + 2 + indexExtWord16_0_16s; ] {tmp:$(REG_SIZE) = labelCalc & AMASK; export *:2 tmp; } # Symbolic #---Depends on SRC ---# DEST_W_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x0 & AMASK & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16s [labelCalc = inst_start + 4 + indexExt2Word16_0_16s; ] {tmp:$(REG_SIZE) = labelCalc & AMASK; export *:2 tmp; } # Symbolic DEST_W_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x0 & AMASK & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16s [labelCalc = inst_start + 4 + indexExt2Word16_0_16s; ] {tmp:$(REG_SIZE) = labelCalc & AMASK; export *:2 tmp; } # Symbolic #---End of Depend ----# # SR-relative (absolute value) destinations DEST_W_AD: "&"^indexExtWord16_0_16 is dest=0x2 & ad=0x1 & bow=0x0 & AMASK; indexExtWord16_0_16 {tmp:$(REG_SIZE) = indexExtWord16_0_16 & AMASK; export *:2 tmp;} # Absolute #---Depends on SRC ---# DEST_W_AD: "&"^indexExt2Word16_0_16 is dest=0x2 & ad=0x1 & bow=0x0 & AMASK & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16 {tmp:$(REG_SIZE) = indexExt2Word16_0_16 & AMASK; export *:2 tmp;} # Absolute DEST_W_AD: "&"^indexExt2Word16_0_16 is dest=0x2 & ad=0x1 & bow=0x0 & AMASK & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16 {tmp:$(REG_SIZE) = indexExt2Word16_0_16 & AMASK; export *:2 tmp; } # Absolute #---End of Depend ----# #----------------------------------------------- DEST_B_AD: DST8_0_4 is DST8_0_4 & dest_Direct_lo & ad=0x0 & bow=0x1 { export DST8_0_4; } # Word/Register Direct (Rn): DEST_B_AD: DST8_0_4 is DST8_0_4 & dest_Direct_lo & dest_0_4=0 & ad=0x0 & bow=0x1 {tmp:$(REG_SIZE) = inst_next; DST8_0_4 = tmp:1 & 0xFF; export DST8_0_4;} # PC register accesses point to next instruction DEST_B_AD: indexExtWord16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x1 & AMASK ; indexExtWord16_0_16s { tmp:$(REG_SIZE) = (dest_Indexed16_0_4 + indexExtWord16_0_16s) & AMASK; export *:1 tmp;} #---Depends on SRC ---# DEST_B_AD: indexExt2Word16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x1 & AMASK & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16s { tmp:$(REG_SIZE) = (dest_Indexed16_0_4 + indexExt2Word16_0_16s) & AMASK; export *:1 tmp;} DEST_B_AD: indexExt2Word16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x1 & AMASK & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16s { tmp:$(REG_SIZE) = (dest_Indexed16_0_4 + indexExt2Word16_0_16s) & AMASK; export *:1 tmp;} # Source includes a register increment (@reg+) that applies to the destination (use of same register in source and dest, but not PC, SR, R3) DEST_B_AD: indexExt2Word16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x1 & AMASK & as=0x3 & src16_8_4=dest_0_4 & (src16_8_4 = 1 | src16_8_4 >= 4) ; indexExt2Word16_0_16s {tmp:$(REG_SIZE) = (dest_Indexed16_0_4 + 2 + indexExt2Word16_0_16s) & AMASK; export *:1 tmp;} #---End of Depend ----# DEST_B_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x1 & AMASK ; indexExtWord16_0_16s [labelCalc = inst_start + 2 + indexExtWord16_0_16s; ] {tmp:$(REG_SIZE) = labelCalc & AMASK; export *:1 tmp; } # Symbolic #---Depends on SRC ---# DEST_B_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x1 & AMASK & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16s [labelCalc = inst_start + 4 + indexExt2Word16_0_16s; ] {tmp:$(REG_SIZE) = labelCalc & AMASK;export *:1 tmp; } # Symbolic DEST_B_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x1 & AMASK & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16s [labelCalc = inst_start + 4 + indexExt2Word16_0_16s; ] {tmp:$(REG_SIZE) = labelCalc & AMASK;export *:1 tmp; } # Symbolic #---End of Depend ----# DEST_B_AD: "&"^indexExtWord16_0_16 is dest=0x2 & ad=0x1 & bow=0x1 ; indexExtWord16_0_16 {export *:1 indexExtWord16_0_16; } # Absolute #---Depends on SRC ---# DEST_B_AD: "&"^indexExt2Word16_0_16 is dest=0x2 & ad=0x1 & bow=0x1 & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16 {export *:1 indexExt2Word16_0_16; } # Absolute DEST_B_AD: "&"^indexExt2Word16_0_16 is dest=0x2 & ad=0x1 & bow=0x1 & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16 {export *:1 indexExt2Word16_0_16; } # Absolute #---End of Depend ----# # For handling constant operands in CALL and BR instructions. DirectAddr: "#"^label is indexExtWord16_0_16 [label = indexExtWord16_0_16 & 0xFFFE;] {export *:$(REG_SIZE) label; } # Align value to show and jump to actual target # Following is only valid for double operand instructions, whose dest uses ad tbl_bzero: is ad=0 & reg_Direct16_0_4 & dest_Direct_lo {ztmp:1 = dest_Direct_lo; reg_Direct16_0_4 = 0; dest_Direct_lo = ztmp; } tbl_bzero: is epsilon {} # Following is valid for single operand instructions whose dest uses as tbl_bzero_singleop: is as=0 & reg_Direct16_0_4 & dest_Direct_lo {ztmp:1 = dest_Direct_lo; reg_Direct16_0_4 = 0; dest_Direct_lo = ztmp; } tbl_bzero_singleop: is epsilon {} @if REG_SIZE == "4" tbl_wzero: is ad=0 & reg_Direct16_0_4 & reg_Direct16_0_4W {ztmp:2 = reg_Direct16_0_4W; reg_Direct16_0_4 = 0; reg_Direct16_0_4W = ztmp; } @endif tbl_wzero: is epsilon {} # # Post Processing # does correct increment of source register # Also catches when PC is being stored to and does the correct branching # postRegIncrement: is as=0x3 & dest_0_4 & bow=0x0 & reg_InDirect16_0_4 { reg_InDirect16_0_4 = reg_InDirect16_0_4 + 2; } postRegIncrement: is as=0x3 & dest_0_4=1 & bow=0x0 & reg_InDirect16_0_4 & (op16_12_4=0x1 & op16_8_4=0x2 & op16_7_1=0x0) { } # PUSH.W SP, SP modification covered by PUSH postRegIncrement: is as=0x3 & dest_0_4 & bow=0x1 & reg_InDirect16_0_4 { reg_InDirect16_0_4 = reg_InDirect16_0_4 + 1; } postRegIncrement: is as=0x3 & dest_0_4=1 & bow=0x1 & reg_InDirect16_0_4 { reg_InDirect16_0_4 = reg_InDirect16_0_4 + 2; } postRegIncrement: is as=0x3 & dest_0_4=1 & bow=0x1 & reg_InDirect16_0_4 & (op16_12_4=0x1 & op16_8_4=0x2 & op16_7_1=0x0) { } # PUSH.B @SP+, SP modification covered by PUSH postRegIncrement: is as=0x3 & dest_0_4=0 & bow=0x0 & reg_InDirect16_0_4 { } # PC is incremented by 2, but that is just to skip over the value postRegIncrement: is as=0x3 & dest_0_4=0 & bow=0x1 & reg_InDirect16_0_4 { } # PC is incremented by 2, but that is just to skip over the value postRegIncrement: is as=0x3 & dest_0_4=2 & bow=0x1 { } postRegIncrement: is as=0x3 & dest_0_4=3 & bow=0x1 { } postRegIncrement: is as=0x3 & dest_0_4=2 & bow=0x0 { } postRegIncrement: is as=0x3 & dest_0_4=3 & bow=0x0 { } postRegIncrement: is as=0x0 & dest_0_4=0 & bow=0x0 & (op16_12_4!=0x1 | op16_8_4!=0x2 | op16_7_1!=0x0) { PC = PC & 0xFFFE; goto [PC]; } # If PC is modified, alter flow (except for PUSH instructions) postRegIncrement: is as=0x0 & dest_0_4=0 & bow=0x1 & (op16_12_4!=0x1 | op16_8_4!=0x2 | op16_7_1!=0x0) { PC = PC & 0xFE; goto [PC]; } # If PC is modified, alter flow (except for PUSH instructions) postRegIncrement: is as & bow { } # R2 and R3 are constant generators - post-increment not supported postIncrement: is as=0x3 & ctx_haveext=0 & src16_8_4=2 & bow=0x0 & ctx_al=0 { } postIncrement: is as=0x3 & ctx_haveext=0 & src16_8_4=2 & bow=0x1 & ctx_al=0 { } postIncrement: is as=0x3 & ctx_haveext=0 & src16_8_4=3 & bow=0x0 & ctx_al=0 { } postIncrement: is as=0x3 & ctx_haveext=0 & src16_8_4=3 & bow=0x1 & ctx_al=0 { } postIncrement: is as=0x3 & src16_8_4=2 & bow=0x0 { } postIncrement: is as=0x3 & src16_8_4=2 & bow=0x1 { } postIncrement: is as=0x3 & src16_8_4=3 & bow=0x0 { } postIncrement: is as=0x3 & src16_8_4=3 & bow=0x1 { } postIncrement: is as=0x3 & src16_8_4=0 & bow=0x1 & src_InDirect16_8_4 { } # PC is incremented by 2, but that is just to skip over the value postIncrement: is as=0x3 & src16_8_4=0 & bow=0x0 & src_InDirect16_8_4 { } # PC is incremented by 2, but that is just to skip over the value postIncrement: is as=0x3 & src16_8_4 & bow=0x0 & src_InDirect16_8_4 { src_InDirect16_8_4 = src_InDirect16_8_4 + 2; } postIncrement: is as=0x3 & src16_8_4 & bow=0x1 & ctx_al=0 & src_InDirect16_8_4 { src_InDirect16_8_4 = src_InDirect16_8_4 + 4; } postIncrement: is as=0x3 & src16_8_4=1 & bow=0x1 & ctx_al=0 & src_InDirect16_8_4 { src_InDirect16_8_4 = src_InDirect16_8_4 + 4; } postIncrement: is as=0x3 & src16_8_4 & bow=0x1 & ctx_al=1 & src_InDirect16_8_4 { src_InDirect16_8_4 = src_InDirect16_8_4 + 1; } postIncrement: is as=0x3 & src16_8_4=1 & bow=0x1 & ctx_al=1 & src_InDirect16_8_4 { src_InDirect16_8_4 = src_InDirect16_8_4 + 2; } postIncrement: is as=0x3 & ctx_haveext=0 & src16_8_4 & bow=0x1 & ctx_al=0 & src_InDirect16_8_4 { src_InDirect16_8_4 = src_InDirect16_8_4 + 1; } postIncrement: is as=0x3 & ctx_haveext=0 & src16_8_4=1 & bow=0x1 & ctx_al=0 & src_InDirect16_8_4 { src_InDirect16_8_4 = src_InDirect16_8_4 + 2; } postIncrement: is as & src16_8_4 & bow { } # # Zero Extends if the store is byte oriented, and a register is being stored to zeroExtend: is dest_Direct_lo & dest_Direct16_0_4 { dest_Direct16_0_4 = zext(dest_Direct_lo); } # # Post processing when destination is the PC - for byte operations # postIncrementStore: is postIncrement & ad=0x0 & src_InDirect16_8_4 & as=0x3 & src16_8_4=1 & dest_Direct16_0_4=0x0 & bow=0x1 & ctx_al=1 & zeroExtend { build zeroExtend; build postIncrement; return [PC]; } postIncrementStore: is postIncrement & ad=0x0 & dest_Direct16_0_4=0x0 & bow=0x1 & ctx_al=1 & zeroExtend { build zeroExtend; build postIncrement; PC = PC & 0xFFFFFE; goto [PC];} # Writes to PC are rounded to alignment postIncrementStore: is postIncrement & ad=0x0 & bow=0x1 & ctx_al=1 & zeroExtend { build zeroExtend; build postIncrement; } postIncrementStore: is postIncrement & ctx_haveext=0 & ad=0x0 & src_InDirect16_8_4 & as=0x3 & src16_8_4=1 & dest_Direct16_0_4=0x0 & bow=0x1 & zeroExtend { build zeroExtend; build postIncrement; return [PC]; } postIncrementStore: is postIncrement & ctx_haveext=0 & ad=0x0 & dest_Direct16_0_4=0x0 & bow=0x1 & zeroExtend # MOV.B any,PC { build zeroExtend; build postIncrement; PC = PC & 0xFE; goto [PC];} # Writes to PC are rounded to alignment postIncrementStore: is postIncrement & ctx_haveext=0 & ad=0x0 & bow=0x1 & zeroExtend { build zeroExtend; build postIncrement; } postIncrementStore: is postIncrement & ad=0x0 & src_InDirect16_8_4 & as=0x3 & src16_8_4=1 & dest_Direct16_0_4=0x0 { build postIncrement; return [PC]; } postIncrementStore: is postIncrement & ad=0x0 & dest_Direct16_0_4=0x0 { build postIncrement; PC = PC & 0xFFFE; goto [PC];} # Writes to PC are rounded to alignment postIncrementStore: is postIncrement & ad & bow { build postIncrement; } #----------------------------------------------- # # JUMP CONDITION (JCND) # #----------------------------------------------- JCND: "NE" is condition=0x0 {cndTst:1 = !$(ZERO); export cndTst;} # Not Equal/Zero (cleared) JCND: "EQ" is condition=0x1 {cndTst:1 = $(ZERO); export cndTst;} # Equal/Zero (set) JCND: "NC" is condition=0x2 {cndTst:1 = !$(CARRY); export cndTst;} # No Carry/Lower (cleared) JCND: "C" is condition=0x3 {cndTst:1 = $(CARRY); export cndTst;} # Carry/Higher or same (set) JCND: "N" is condition=0x4 {cndTst:1 = $(SIGN); export cndTst;} # Negative (set) JCND: "GE" is condition=0x5 {cndTst:1 = ($(SIGN) == $(OVERFLOW)); export cndTst;} # Greater or equal (>=) JCND: "L" is condition=0x6 {cndTst:1 = ($(SIGN) != $(OVERFLOW)); export cndTst;} # Less (<) JCND: "MP" is condition=0x7 {cndTst:1 = 0x1; export cndTst;} # Unconditional #----------------------------------------------- # # 10 BIT OFFSET # #----------------------------------------------- OFFSET_10BIT: offset10 is off16 [offset10 = inst_start + 2 + off16 * 2; ] { export *:2 offset10;} ################################################################################### # # Single-operand arithmetic # # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 | 0 | 0 | 1 | 0 | 0 | opcode | B/W | As | register | # ------------------------------------------------------------------------------ ################################################################################### ################################################################################### # # RRC: Rotate right through carry # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 | 0 | 0 | 1 | 0 | 0 | 000 | B/W | As | register | :RRC^".W" REG_W_AS_DEST is ctx_haveext=0 & (op16_12_4=0x1 & op16_8_4=0x0 & op16_7_1=0x0 & bow=0x0 & tbl_wzero & postRegIncrement) ... & REG_W_AS_DEST { # Operation Flags... $(OVERFLOW) = 0; # V Flag is reset # Operation... tmp:1 = $(CARRY); $(CARRY) = REG_W_AS_DEST[0,1]; REG_W_AS_DEST = ((zext(tmp) << 0xF) | (REG_W_AS_DEST >> 0x1)); build tbl_wzero; # Result Flags... $(SIGN) = (REG_W_AS_DEST s< 0x0); # S Flag $(ZERO) = (REG_W_AS_DEST == 0x0); # Z Flag build postRegIncrement; } :RRC^".B" REG_B_AS_DEST is ctx_haveext=0 & (op16_12_4=0x1 & op16_8_4=0x0 & op16_7_1=0x0 & bow=0x1 & tbl_bzero_singleop & postRegIncrement) ... & REG_B_AS_DEST { # Operation Flags... $(OVERFLOW) = 0; # V Flag is reset # Operation... tmp:1 = $(CARRY); $(CARRY) = (REG_B_AS_DEST & 0x1); REG_B_AS_DEST = ((tmp << 0x7) | (REG_B_AS_DEST >> 0x1)); build tbl_bzero_singleop; # Result Flags... $(SIGN) = (REG_B_AS_DEST s< 0x0); # S Flag $(ZERO) = (REG_B_AS_DEST == 0x0); # Z Flag build postRegIncrement; } ################################################################################### # # SWPB: Swap bytes # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 | 0 | 0 | 1 | 0 | 0 | 001 | 0 | As | register | :SWPB REG_W_AS_DEST is ctx_haveext=0 & (op16_12_4=0x1 & op16_8_4=0x0 & op16_7_1=0x1 & bow=0x0 & tbl_wzero & postRegIncrement) ... & REG_W_AS_DEST { lowByte:1 = REG_W_AS_DEST[0,8]; highByte:1 = REG_W_AS_DEST[8,8]; REG_W_AS_DEST = (((zext(lowByte)) << 0x8) | zext(highByte)); build tbl_wzero; #Status bits are not affected build postRegIncrement; } ################################################################################### # # RRA: Rotate right arithmetic # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 | 0 | 0 | 1 | 0 | 0 | 010 | B/W | As | register | :RRA^".W" REG_W_AS_DEST is ctx_haveext=0 & (op16_12_4=0x1 & op16_8_4=0x1 & op16_7_1=0x0 & bow=0x0 & tbl_wzero & postRegIncrement) ... & REG_W_AS_DEST { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Operation... $(CARRY) = REG_W_AS_DEST[0,1]; MSB:2 = REG_W_AS_DEST >> 0xF; REG_W_AS_DEST = ((MSB << 0xF) | (REG_W_AS_DEST >> 0x1)); build tbl_wzero; # Result Flags... $(SIGN) = (REG_W_AS_DEST s< 0x0); # S Flag $(ZERO) = (REG_W_AS_DEST == 0x0); # Z Flag build postRegIncrement; } :RRA^".B" REG_B_AS_DEST is ctx_haveext=0 & (op16_12_4=0x1 & op16_8_4=0x1 & op16_7_1=0x0 & bow=0x1 & tbl_bzero_singleop & postRegIncrement) ... & REG_B_AS_DEST { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Operation... $(CARRY) = (REG_B_AS_DEST & 0x1); MSB:1 = REG_B_AS_DEST >> 0x7; REG_B_AS_DEST = ((MSB << 0x7) | (REG_B_AS_DEST >> 0x1)); build tbl_bzero_singleop; # Result Flags... $(SIGN) = (REG_B_AS_DEST s< 0x0); # S Flag $(ZERO) = (REG_B_AS_DEST == 0x0); # Z Flag build postRegIncrement; } ################################################################################### # # SXT: Sign extend byte to word # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 | 0 | 0 | 1 | 0 | 0 | 011 | 0 | As | register | :SXT REG_W_AS_DEST is ctx_haveext=0 & (op16_12_4=0x1 & op16_8_4=0x1 & op16_7_1=0x1 & bow=0x0 & tbl_wzero & postRegIncrement) ... & REG_W_AS_DEST { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Operation... byteVal:1 = REG_W_AS_DEST[0,8]; REG_W_AS_DEST = sext(byteVal); build tbl_wzero; # Result Flags... $(SIGN) = (REG_W_AS_DEST s< 0x0); # S Flag $(ZERO) = (REG_W_AS_DEST == 0x0); # Z Flag $(CARRY) = (REG_W_AS_DEST != 0x0); # C Flag build postRegIncrement; } ################################################################################### # # PUSH: Push value onto stack # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 | 0 | 0 | 1 | 0 | 0 | 100 | B/W | As | register | :PUSH^".W" REG_W_AS is ctx_haveext=0 & (op16_12_4=0x1 & op16_8_4=0x2 & op16_7_1=0x0 & bow=0x0 & postRegIncrement & AMASK) ... & REG_W_AS { *:2 ((SP - 0x2) & AMASK) = REG_W_AS; # Mask for possible unaligned SP SP = SP - 0x2; # Actual behaviour, in conflict with documentation #Status bits are not affected build postRegIncrement; } :PUSH^".B" REG_B_AS is ctx_haveext=0 & (op16_12_4=0x1 & op16_8_4=0x2 & op16_7_1=0x0 & bow=0x1 & postRegIncrement) ... & REG_B_AS { *:1 (SP - 0x2) = REG_B_AS; SP = SP - 0x2; # Actual behaviour, in conflict with documentation #Status bits are not affected build postRegIncrement; } ################################################################################### # # CALL: Subroutine call; push PC and move source to PC # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 | 0 | 0 | 1 | 0 | 0 | 101 | 0 | As | register | :CALL REG_W_AS is ctx_haveext=0 & (op16_12_4=0x1 & op16_8_4=0x2 & op16_7_1=0x1 & bow=0x0 & postRegIncrement & AMASK) ... & REG_W_AS { PC = zext(REG_W_AS) & 0xFFFFFFFE:$(REG_SIZE); # PC assignment before SP modification (relevant for CALL SP). Behaviour differs from documentation SP = SP - 0x2; *:2 (SP & AMASK) = inst_next; build postRegIncrement; call [PC]; #Status bits are not affected } :CALL DirectAddr is ctx_haveext=0 & (op16_12_4=0x1 & op16_8_4=0x2 & op16_7_1=0x1 & reg16_0_4=0x0 & as=0x3 & bow=0x0 & postRegIncrement & AMASK); DirectAddr { PC = &DirectAddr; # PC assignment before SP modification (relevant for CALL SP). Behaviour differs from documentation SP = SP - 0x2; *:2 (SP & AMASK) = inst_next; build postRegIncrement; call DirectAddr; #Status bits are not affected } ################################################################################### # # RETI: Return from interrupt; pop SR then pop PC # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 | 0 | 0 | 1 | 0 | 0 | 110 | 0 | 00 | 0000 | :RETI is ctx_haveext=0 & op16_12_4=0x1 & op16_8_4=0x3 & op16_7_1=0x0 & as=0x0 & bow=0x0 & op16_0_4=0x0 & op16_4_4=0x0 & AMASK { @if REG_SIZE == "2" SR = *:2 (SP & AMASK); SP = SP + 0x2; PC = (*:2 (SP & AMASK)) & AMASK; @else tmp:$(REG_SIZE) = zext(*:2 SP); SR = zext(tmp[0,12]); SP = SP + 0x2; PC = zext(*:2 SP) | ((tmp & 0xF000) << 4); @endif SP = SP + 0x2; return [PC]; #Status bits are restored from system stack } ################################################################################### # # Conditional jump; PC = PC + 2*offset # # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 | 0 | 1 | condition | 10-bit signed offset | # ------------------------------------------------------------------------------ ################################################################################### ################################################################################### # # J^JumpCondition 10-bit_signed_offset # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 | 0 | 1 | condition | 10-bit signed offset | :J^JCND OFFSET_10BIT is ctx_haveext=0 & op16_13_3=0x1 & JCND & OFFSET_10BIT { if (JCND) goto OFFSET_10BIT; #Status bits are not affected } :JMP OFFSET_10BIT is ctx_haveext=0 & op16_13_3=0x1 & condition=0x7 & OFFSET_10BIT { goto OFFSET_10BIT; #Status bits are not affected } ################################################################################### # # Two-operand arithmetic # # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | opcode | source | Ad | B/W | As | destination | # ------------------------------------------------------------------------------ ################################################################################### ################################################################################### # # MOV: Move source to destination # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 1 0 0 | source | Ad | B/W | As | destination | #----------------------- # Emulated instructions #----------------------- # Branch :BR SRC_W_AS is ctx_haveext=0 & (op16_12_4=0x4 & bow=0x0 & ad=0x0 & dest_Direct16_0_4=0x0 & postIncrement) ... & SRC_W_AS ... { PC = zext(SRC_W_AS) & 0xFFFFFFFE:$(REG_SIZE); build postIncrement; # needed before branch goto [PC]; #Status bits are not affected } # Branch to an immediate value :BR DirectAddr is ctx_haveext=0 & (op16_12_4=0x4 & bow=0x0 & ad=0x0 & dest_Direct16_0_4=0x0 & src_Direct16_8_4=0x0 & as=0x3); DirectAddr { PC = &DirectAddr; goto DirectAddr; #Status bits are not affected } # No operation :NOP is ctx_haveext=0 & op16_12_4=0x4 & bow=0x0 & ad=0x0 & as=0x0 & dest_Direct16_0_4=0x3 & src_Direct16_8_4=0x3 & postIncrement { #Status bits are not affected build postIncrement; } # Pop word from stack :POP^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x4 & bow=0x0 & (ad=0x1 | dest_Direct16_0_4) & as=0x3 & src_Direct16_8_4=0x1 & tbl_wzero & AMASK) ... & DEST_W_AD ... { DEST_W_AD = *:2 (SP & AMASK); build tbl_wzero; SP = SP + 0x2; #Status bits are not affected } # Pop byte from stack :POP^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x4 & bow=0x1 & (ad=0x1 | dest_Direct16_0_4) & as=0x3 & src_Direct16_8_4=0x1 & tbl_bzero) ... & DEST_B_AD ... { DEST_B_AD = *:1 SP; build tbl_bzero; SP = SP + 0x2; #Status bits are not affected } # POP.W SP - increment occurs after read but before write, and is therefore lost :POP^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x4 & bow=0x0 & ad=0x0 & dest_Direct16_0_4 & as=0x3 & src_Direct16_8_4=0x1 & dest_Direct16_0_4=0x1 & tbl_wzero & AMASK) ... & DEST_W_AD ... { DEST_W_AD = *:2 (SP & AMASK); # Unaligned word memory accesses round down build tbl_wzero; #Status bits are not affected } # POP.B SP - increment occurs after read but before write, and is therefore lost :POP^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x4 & bow=0x1 & ad=0x0 & dest_Direct16_0_4 & as=0x3 & src_Direct16_8_4=0x1 & dest_Direct16_0_4=0x1 & tbl_bzero) ... & DEST_B_AD ... { DEST_B_AD = *:1 SP; build tbl_bzero; #Status bits are not affected } # POP.B PC - writes to PC are rounded down, but base instruction does not use postIncrementStore to handle this case :POP^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x4 & bow=0x1 & ad=0x0 & dest_Direct16_0_4 & as=0x3 & src_Direct16_8_4=0x1 & dest_Direct16_0_4=0x0 & tbl_bzero) ... & DEST_B_AD ... { DEST_B_AD = *:1 SP; build tbl_bzero; SP = SP + 0x2; PC = PC & 0xFFFFFFFE:$(REG_SIZE); goto [PC]; #Status bits are not affected } # Return from subroutine :RET is ctx_haveext=0 & op16_12_4=0x4 & bow=0x0 & ad=0x0 & as=0x3 & dest_Direct16_0_4=0x0 & src_Direct16_8_4=0x1 & AMASK { PC = zext(*:2 (SP & AMASK)) & AMASK; # Stack pointer can be misaligned, and subsequent write to PC rounds to alignment SP = SP + 0x2; return [PC]; #Status bits are not affected } #------------------ # SRC Word #------------------ :MOV^".W" SRC_W_AS, DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x4 & bow=0x0 & tbl_wzero & postIncrementStore) ... & SRC_W_AS ... & DEST_W_AD ... { DEST_W_AD = SRC_W_AS; build tbl_wzero; #Status bits are not affected build postIncrementStore; } #------------------ # SRC Byte #------------------ :MOV^".B" SRC_B_AS, DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x4 & bow=0x1 & tbl_bzero & postIncrementStore) ... & SRC_B_AS ... & DEST_B_AD ... { DEST_B_AD = SRC_B_AS; build tbl_bzero; #Status bits are not affected build postIncrementStore; } ################################################################################### # # ADD: Add source to destination # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 1 0 1 | source | Ad | B/W | As | destination | #----------------------- # Emulated instructions #----------------------- # Increment word :INC^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x5 & as=0x1 & src_Direct16_8_4=0x3 & bow=0x0 & tbl_wzero & postIncrementStore) ... & DEST_W_AD ... { # Operation Flags... $(CARRY) = carry(DEST_W_AD,1); # C Flag $(OVERFLOW) = scarry(DEST_W_AD,1); # V Flag # Operation... DEST_W_AD = DEST_W_AD + 0x1; build tbl_wzero; # Result Flags... $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag build postIncrementStore; } # Increment byte :INC^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x5 & as=0x1 & src_Direct16_8_4=0x3 & bow=0x1 & tbl_bzero & postIncrementStore) ... & DEST_B_AD ... { # Operation Flags... $(CARRY) = carry(DEST_B_AD,1); # C Flag $(OVERFLOW) = scarry(DEST_B_AD,1); # V Flag # Operation... DEST_B_AD = DEST_B_AD + 0x1; build tbl_bzero; # Result Flags... $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag build postIncrementStore; } # Double increment word :INCD^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x5 & as=0x2 & src_Direct16_8_4=0x3 & bow=0x0 & tbl_wzero & postIncrementStore) ... & DEST_W_AD ... { # Operation Flags... $(CARRY) = carry(DEST_W_AD,2); # C Flag $(OVERFLOW) = scarry(DEST_W_AD,2); # V Flag # Operation... DEST_W_AD = DEST_W_AD + 0x2; build tbl_wzero; # Result Flags... $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag build postIncrementStore; } # Double increment byte :INCD^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x5 & as=0x2 & src_Direct16_8_4=0x3 & bow=0x1 & tbl_bzero & postIncrementStore) ... & DEST_B_AD ... { # Operation Flags... $(CARRY) = carry(DEST_B_AD,2); # C Flag $(OVERFLOW) = scarry(DEST_B_AD,2); # V Flag # Operation... DEST_B_AD = DEST_B_AD + 0x2; build tbl_bzero; # Result Flags... $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag build postIncrementStore; } # Rotate left arithmetic (left shift once) word :RLA^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x5 & as=0x0 & ad=0x0 & src_Direct16_8_4=dest_Direct16_0_4 & bow=0x0 & tbl_wzero & postIncrementStore) ... & DEST_W_AD ... { # Operation Flags... $(CARRY) = carry(DEST_W_AD, DEST_W_AD); # C Flag $(OVERFLOW) = scarry(DEST_W_AD, DEST_W_AD); # V Flag # Operation... DEST_W_AD = DEST_W_AD + DEST_W_AD; build tbl_wzero; # Result Flags... $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag build postIncrementStore; } # Rotate left arithmetic (left shift once) byte :RLA^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x5 & as=0x0 & ad=0x0 & src_Direct16_8_4=dest_Direct16_0_4 & bow=0x1 & tbl_bzero & postIncrementStore) ... & DEST_B_AD ... { # Operation Flags... $(CARRY) = carry(DEST_B_AD, DEST_B_AD); # C Flag $(OVERFLOW) = scarry(DEST_B_AD, DEST_B_AD); # V Flag # Operation... DEST_B_AD = DEST_B_AD + DEST_B_AD; build tbl_bzero; # Result Flags... $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag build postIncrementStore; } #------------------ # 16 bit SRC Word #------------------ :ADD^".W" SRC_W_AS, DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x5 & bow=0x0 & tbl_wzero & postIncrementStore) ... & SRC_W_AS ... & DEST_W_AD ... { # Operation Flags... tmp_carry:1 = carry(SRC_W_AS, DEST_W_AD); # C Flag tmp_overflow:1 = scarry(SRC_W_AS, DEST_W_AD); # V Flag # Operation... DEST_W_AD = SRC_W_AS + DEST_W_AD; build tbl_wzero; # Result Flags... $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag $(CARRY) = tmp_carry; $(OVERFLOW) = tmp_overflow; build postIncrementStore; } #------------------ # 16 bit SRC Byte #------------------ :ADD^".B" SRC_B_AS, DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x5 & bow=0x1 & tbl_bzero & postIncrementStore) ... & SRC_B_AS ... & DEST_B_AD ... { # Operation Flags... tmp_carry:1 = carry(SRC_B_AS, DEST_B_AD); # C Flag tmp_overflow:1 = scarry(SRC_B_AS, DEST_B_AD); # V Flag # Operation... DEST_B_AD = SRC_B_AS + DEST_B_AD; build tbl_bzero; # Result Flags... $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag $(CARRY) = tmp_carry; $(OVERFLOW) = tmp_overflow; build postIncrementStore; } ################################################################################### # # ADDC: Add source and carry to destination # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 1 1 0 | source | Ad | B/W | As | destination | #----------------------- # Emulated instructions #----------------------- # Add carry to word :ADC^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x6 & as=0x0 & src_Direct16_8_4=0x3 & bow=0x0 & tbl_wzero & postIncrementStore) ... & DEST_W_AD ... { # Operation Flags... tmp_carry:1 = carry(DEST_W_AD,zext($(CARRY))); #C Flag tmp_overflow:1 = scarry(DEST_W_AD, zext($(CARRY))); #V Flag # Operation... DEST_W_AD = DEST_W_AD + zext($(CARRY)); build tbl_wzero; # Result Flags... $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag $(CARRY) = tmp_carry; $(OVERFLOW) = tmp_overflow; build postIncrementStore; } # Add carry to byte :ADC^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x6 & as=0x0 & src_Direct16_8_4=0x3 & bow=0x1 & tbl_bzero & postIncrementStore) ... & DEST_B_AD ... { # Operation Flags... tmp_carry:1 = carry(DEST_B_AD,$(CARRY)); #C Flag tmp_overflow:1 = scarry(DEST_B_AD,$(CARRY)); #V Flag # Operation... DEST_B_AD = DEST_B_AD + $(CARRY); build tbl_bzero; # Result Flags... $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag $(CARRY) = tmp_carry; $(OVERFLOW) = tmp_overflow; build postIncrementStore; } # Rotate word left through carry :RLC^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x6 & as=0x0 & ad=0x0 & src_Direct16_8_4=dest_Direct16_0_4 & bow=0x0 & tbl_wzero & postIncrementStore) ... & DEST_W_AD ... { # Operation Flags... tmp_carry:1 = (carry(DEST_W_AD,zext($(CARRY))) || carry(DEST_W_AD,DEST_W_AD + zext($(CARRY)))); #C Flag tmp_overflow:1 = (scarry(DEST_W_AD,zext($(CARRY))) || scarry(DEST_W_AD,DEST_W_AD + zext($(CARRY)))); #V Flag # Operation... DEST_W_AD = DEST_W_AD + DEST_W_AD + zext($(CARRY)); build tbl_wzero; # Result Flags... $(CARRY) = tmp_carry; $(OVERFLOW) = tmp_overflow; $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag build postIncrementStore; } # Rotate byte left through carry :RLC^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x6 & as=0x0 & ad=0x0 & src_Direct16_8_4=dest_Direct16_0_4 & bow=0x1 & tbl_bzero & postIncrementStore) ... & DEST_B_AD ... { # Operation Flags... tmp_carry:1 = (carry(DEST_B_AD, $(CARRY)) || carry(DEST_B_AD,DEST_B_AD + $(CARRY))); #C Flag $(OVERFLOW) = (scarry(DEST_B_AD, $(CARRY)) || scarry(DEST_B_AD,DEST_B_AD + $(CARRY))); #V Flag # Operation... DEST_B_AD = DEST_B_AD + DEST_B_AD + $(CARRY); build tbl_bzero; # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag build postIncrementStore; } macro addWithCarry(src, dest){ local incoming_carry = zext($(CARRY)); local sum_without_carry = src + dest; local tmp_carry:1 = carry(src, dest); tmp_carry = tmp_carry || carry(sum_without_carry, incoming_carry); local tmp_overflow:1 = scarry(src, dest); tmp_overflow = tmp_overflow ^^ scarry(sum_without_carry, incoming_carry); dest = sum_without_carry + incoming_carry; $(CARRY) = tmp_carry; $(OVERFLOW) = tmp_overflow; } #------------------ # 16 bit SRC Word #------------------ :ADDC^".W" SRC_W_AS, DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x6 & bow=0x0 & tbl_wzero & postIncrementStore) ... & SRC_W_AS ... & DEST_W_AD ... { addWithCarry(SRC_W_AS,DEST_W_AD); build tbl_wzero; $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag build postIncrementStore; } #------------------ # 16 bit SRC Byte #------------------ :ADDC^".B" SRC_B_AS, DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x6 & bow=0x1 & tbl_bzero & postIncrementStore) ... & SRC_B_AS ... & DEST_B_AD ... { addWithCarry(SRC_B_AS, DEST_B_AD); build tbl_bzero; $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag build postIncrementStore; } ################################################################################### # # SUBC: Subtract source from destination (with carry) # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 0 1 1 1 | source | Ad | B/W | As | destination | #----------------------- # Emulated instructions #----------------------- # Subtract borrow from word :SBC^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x7 & as=0x0 & src_Direct16_8_4=0x3 & bow=0x0 & tbl_wzero & postIncrementStore) ... & DEST_W_AD ... { # Operation Flags... brw:2 = 1 - zext( $(CARRY) ); $(CARRY) = (brw <= DEST_W_AD); # Carry flag is NOT set if there is a borrow $(OVERFLOW) = sborrow(DEST_W_AD, brw); # Operation... DEST_W_AD = DEST_W_AD - brw; build tbl_wzero; # Result Flags... $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag build postIncrementStore; } # Subtract borrow from byte :SBC^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x7 & as=0x0 & src_Direct16_8_4=0x3 & bow=0x1 & tbl_bzero & postIncrementStore) ... & DEST_B_AD ... { # Operation Flags... brw:1 = 1 - $(CARRY); $(CARRY) = (brw <= DEST_B_AD); # Carry flag is NOT set if there is a borrow $(OVERFLOW) = sborrow(DEST_B_AD, brw); # Operation... DEST_B_AD = DEST_B_AD - brw; build tbl_bzero; # Result Flags... $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag build postIncrementStore; } macro subtractWithCarry(dest, src){ local incoming_carry = zext($(CARRY)); local not_incoming_carry = zext(!($(CARRY))); local diff_without_carry = dest - src; local tmp_carry:1 = dest > src; tmp_carry = tmp_carry || (diff_without_carry < incoming_carry); local tmp_overflow:1 = sborrow(dest, src); tmp_overflow = tmp_overflow ^^ sborrow(diff_without_carry, not_incoming_carry); dest = diff_without_carry - not_incoming_carry; $(CARRY) = tmp_carry; $(OVERFLOW) = tmp_overflow; } #------------------ # 16 bit SRC Word #------------------ :SUBC^".W" SRC_W_AS, DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x7 & bow=0x0 & tbl_wzero & postIncrementStore) ... & SRC_W_AS ... & DEST_W_AD ... { subtractWithCarry(DEST_W_AD,SRC_W_AS); build tbl_wzero; $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag build postIncrementStore; } #------------------ # 16 bit SRC Byte #------------------ :SUBC^".B" SRC_B_AS, DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x7 & bow=0x1 & tbl_bzero & postIncrementStore) ... & SRC_B_AS ... & DEST_B_AD ... { subtractWithCarry(DEST_B_AD,SRC_B_AS); build tbl_bzero; $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag build postIncrementStore; } ################################################################################### # # SUB: Subtract source from destination # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 1 0 0 0 | source | Ad | B/W | As | destination | #----------------------- # Emulated instructions #----------------------- # Decrement word :DEC^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x8 & as=0x1 & src_Direct16_8_4=0x3 & bow=0x0 & tbl_wzero & postIncrementStore) ... & DEST_W_AD ... { # Operation Flags... $(CARRY) = (0x0 != DEST_W_AD); # C Flag $(OVERFLOW) = (0x8000 == DEST_W_AD); # V Flag # Operation... DEST_W_AD = DEST_W_AD - 0x1; build tbl_wzero; # Result Flags... $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag build postIncrementStore; } # Decrement byte :DEC^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x8 & as=0x1 & src_Direct16_8_4=0x3 & bow=0x1 & tbl_bzero & postIncrementStore) ... & DEST_B_AD ... { # Operation Flags... $(CARRY) = (0x0 != DEST_B_AD); # C Flag $(OVERFLOW) = (0x80 == DEST_B_AD); # V Flag # Operation... DEST_B_AD = DEST_B_AD - 0x1; build tbl_bzero; # Result Flags... $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag build postIncrementStore; } # Double decrement word :DECD^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x8 & as=0x2 & src_Direct16_8_4=0x3 & bow=0x0 & tbl_wzero & postIncrementStore) ... & DEST_W_AD ... { # Operation Flags... $(CARRY) = ((0x0 != DEST_W_AD) && (0x1 != DEST_W_AD)); # C Flag $(OVERFLOW) = ((0x8000 == DEST_W_AD) || (0x8001 == DEST_W_AD)); # V Flag # Operation... DEST_W_AD = DEST_W_AD - 0x2; build tbl_wzero; # Result Flags... $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag build postIncrementStore; } # Double decrement byte :DECD^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x8 & as=0x2 & src_Direct16_8_4=0x3 & bow=0x1 & tbl_bzero & postIncrementStore) ... & DEST_B_AD ... { # Operation Flags... $(CARRY) = ((0x0 != DEST_B_AD) && (0x1 != DEST_B_AD)); # C Flag $(OVERFLOW) = ((0x80 == DEST_B_AD) || (0x81 == DEST_B_AD)); # V Flag # Operation... DEST_B_AD = DEST_B_AD - 0x2; build tbl_bzero; # Result Flags... $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag build postIncrementStore; } #------------------ # 16 bit SRC Word #------------------ :SUB^".W" SRC_W_AS, DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x8 & bow=0x0 & tbl_wzero & postIncrementStore) ... & SRC_W_AS ... & DEST_W_AD ... { # Operation Flags... tmp_carry:1 = (SRC_W_AS <= DEST_W_AD); # Carry is NOT set if there is a borrow tmp_overflow:1 = sborrow(DEST_W_AD, SRC_W_AS); # V Flag # Operation... DEST_W_AD = DEST_W_AD - SRC_W_AS; build tbl_wzero; # Result Flags... $(CARRY) = tmp_carry; $(OVERFLOW) = tmp_overflow; $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag build postIncrementStore; } #------------------ # 16 bit SRC Byte #------------------ :SUB^".B" SRC_B_AS, DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x8 & bow=0x1 & tbl_bzero & postIncrementStore) ... & SRC_B_AS ... & DEST_B_AD ... { # Operation Flags... tmp_carry:1 = (SRC_B_AS <= DEST_B_AD); # Carry is NOT set if there is a borrow tmp_overflow:1 = sborrow(DEST_B_AD, SRC_B_AS); # V Flag # Operation... DEST_B_AD = DEST_B_AD - SRC_B_AS; build tbl_bzero; # Result Flags... $(CARRY) = tmp_carry; $(OVERFLOW) = tmp_overflow; $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag build postIncrementStore; } ################################################################################### # # CMP: Compare (pretend to subtract) source from destination # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 1 0 0 1 | source | Ad | B/W | As | destination | #----------------------- # Emulated instructions #----------------------- # Test word :TST^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x9 & as=0x0 & src_Direct16_8_4=0x3 & bow=0x0 & postIncrement) ... & DEST_W_AD ... { # Result Flags... $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag # Operation Flags... $(CARRY) = 1; # Carry is NOT set if there is a borrow $(OVERFLOW) = 0; # V Flag build postIncrement; } # Test byte :TST^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x9 & as=0x0 & src_Direct16_8_4=0x3 & bow=0x1 & postIncrement) ... & DEST_B_AD ... { # Result Flags... $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag # Operation Flags... $(CARRY) = 1; $(OVERFLOW) = 0; build postIncrement; } #------------------ # 16 bit SRC Word #------------------ :CMP^".W" SRC_W_AS, DEST_W_AD is ctx_haveext=0 & (op16_12_4=0x9 & bow=0x0 & postIncrement) ... & SRC_W_AS ... & DEST_W_AD ... { # Operation Flags... tmp_carry:1 = (SRC_W_AS <= DEST_W_AD); # Carry is NOT set if there is a borrow tmp_overflow:1 = sborrow(DEST_W_AD, SRC_W_AS); # V Flag # Operation... result:2 = (DEST_W_AD - SRC_W_AS); # Result Flags... $(CARRY) = tmp_carry; $(OVERFLOW) = tmp_overflow; $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; } #------------------ # 16 bit SRC Byte #------------------ :CMP^".B" SRC_B_AS, DEST_B_AD is ctx_haveext=0 & (op16_12_4=0x9 & bow=0x1 & postIncrement) ... & SRC_B_AS ... & DEST_B_AD ... { # Operation Flags... tmp_carry:1 = (SRC_B_AS <= DEST_B_AD); # Carry is NOT set if there is a borrow tmp_overflow:1 = sborrow(DEST_B_AD, SRC_B_AS); # V Flag # Operation... result:1 = (DEST_B_AD - SRC_B_AS); # Result Flags... $(CARRY) = tmp_carry; $(OVERFLOW) = tmp_overflow; $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; } ################################################################################### # # DADD: Decimal add source to destination (with carry) # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 1 0 1 0 | source | Ad | B/W | As | destination | #---------------------------------------------------------------------------------------------------------------- # These decimal add instructions appear to lack supporting BCD p-code operations to easily handle the operation and flags. #---------------------------------------------------------------------------------------------------------------- #----------------------- # Emulated instructions #----------------------- # Decimal add carry to word :DADC^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0xA & as=0x0 & src_Direct16_8_4=0x3 & bow=0x0 & tbl_wzero & postIncrementStore) ... & DEST_W_AD ... { # Operation... dst_nibble0:2 = DEST_W_AD & 0xf; dst_nibble1:2 = (DEST_W_AD >> 4) & 0xf; dst_nibble2:2 = (DEST_W_AD >> 8) & 0xf; dst_nibble3:2 = (DEST_W_AD >> 12) & 0xf; res_nibble0:2 = dst_nibble0 + zext($(CARRY)); carry_nibble0:2 = zext(res_nibble0 > 9); res_nibble0 = (res_nibble0 - carry_nibble0 * 10) & 0xf; res_nibble1:2 = dst_nibble1 + carry_nibble0; carry_nibble1:2 = zext(res_nibble1 > 9); res_nibble1 = (res_nibble1 - carry_nibble1 * 10) & 0xf; res_nibble2:2 = dst_nibble2 + carry_nibble1; carry_nibble2:2 = zext(res_nibble2 > 9); res_nibble2 = (res_nibble2 - carry_nibble2 * 10) & 0xf; res_nibble3:2 = dst_nibble3 + carry_nibble2; tmp_carry:1 = res_nibble3 > 9; carry_nibble3:2 = zext(res_nibble3 > 9); res_nibble3 = (res_nibble3 - carry_nibble3 * 10) & 0xf; tmp_res:2 = (res_nibble3 << 12) + (res_nibble2 << 8) + (res_nibble1 << 4) + res_nibble0; DEST_W_AD = tmp_res; build tbl_wzero; # Operation Flags... $(CARRY) = tmp_carry; # Result Flags... $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag build postIncrementStore; } # Decimal add carry to byte :DADC^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0xA & as=0x0 & src_Direct16_8_4=0x3 & bow=0x1 & tbl_bzero & postIncrementStore) ... & DEST_B_AD ... { # Operation... dst_nibble0:1 = DEST_B_AD & 0xf; dst_nibble1:1 = (DEST_B_AD >> 4) & 0xf; res_nibble0:1 = dst_nibble0 + zext($(CARRY)); carry_nibble0:1 = zext(res_nibble0 > 9); res_nibble0 = (res_nibble0 - carry_nibble0 * 10) & 0xf; res_nibble1:1 = dst_nibble1 + carry_nibble0; tmp_carry:1 = res_nibble1 > 9; carry_nibble1:1 = zext(res_nibble1 > 9); res_nibble1 = (res_nibble1 - carry_nibble1 * 10) & 0xf; tmp_res:1 = (res_nibble1 << 4) + res_nibble0; DEST_B_AD = tmp_res; build tbl_bzero; # Operation Flags... $(CARRY) = tmp_carry; # Result Flags... $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag build postIncrementStore; } #------------------ # 16 bit SRC Word #------------------ :DADD^".W" SRC_W_AS, DEST_W_AD is ctx_haveext=0 & (op16_12_4=0xA & bow=0x0 & tbl_wzero & postIncrementStore) ... & SRC_W_AS ... & DEST_W_AD ... { # Operation... src_nibble0:2 = SRC_W_AS & 0xf; src_nibble1:2 = (SRC_W_AS >> 4) & 0xf; src_nibble2:2 = (SRC_W_AS >> 8) & 0xf; src_nibble3:2 = (SRC_W_AS >> 12) & 0xf; dst_nibble0:2 = DEST_W_AD & 0xf; dst_nibble1:2 = (DEST_W_AD >> 4) & 0xf; dst_nibble2:2 = (DEST_W_AD >> 8) & 0xf; dst_nibble3:2 = (DEST_W_AD >> 12) & 0xf; res_nibble0:2 = src_nibble0 + dst_nibble0 + zext($(CARRY)); carry_nibble0:2 = zext(res_nibble0 > 9); res_nibble0 = (res_nibble0 - carry_nibble0 * 10) & 0xf; res_nibble1:2 = src_nibble1 + dst_nibble1 + carry_nibble0; carry_nibble1:2 = zext(res_nibble1 > 9); res_nibble1 = (res_nibble1 - carry_nibble1 * 10) & 0xf; res_nibble2:2 = src_nibble2 + dst_nibble2 + carry_nibble1; carry_nibble2:2 = zext(res_nibble2 > 9); res_nibble2 = (res_nibble2 - carry_nibble2 * 10) & 0xf; res_nibble3:2 = src_nibble3 + dst_nibble3 + carry_nibble2; tmp_carry:1 = res_nibble3 > 9; carry_nibble3:2 = zext(res_nibble3 > 9); res_nibble3 = (res_nibble3 - carry_nibble3 * 10) & 0xf; tmp_res:2 = (res_nibble3 << 12) + (res_nibble2 << 8) + (res_nibble1 << 4) + res_nibble0; DEST_W_AD = tmp_res; build tbl_wzero; # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag build postIncrementStore; } #------------------ # 16 bit SRC Byte #------------------ :DADD^".B" SRC_B_AS, DEST_B_AD is ctx_haveext=0 & (op16_12_4=0xA & bow=0x1 & tbl_bzero & postIncrementStore) ... & SRC_B_AS ... & DEST_B_AD ... { # Operation... src_nibble0:1 = SRC_B_AS & 0xf; src_nibble1:1 = (SRC_B_AS >> 4) & 0xf; dst_nibble0:1 = DEST_B_AD & 0xf; dst_nibble1:1 = (DEST_B_AD >> 4) & 0xf; res_nibble0:1 = src_nibble0 + dst_nibble0 + zext($(CARRY)); carry_nibble0:1 = zext(res_nibble0 > 9); res_nibble0 = (res_nibble0 - carry_nibble0 * 10) & 0xf; res_nibble1:1 = src_nibble1 + dst_nibble1 + carry_nibble0; tmp_carry:1 = res_nibble1 > 9; carry_nibble1:1 = zext(res_nibble1 > 9); res_nibble1 = (res_nibble1 - carry_nibble1 * 10) & 0xf; tmp_res:1 = (res_nibble1 << 4) + res_nibble0; DEST_B_AD = tmp_res; build tbl_bzero; # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (tmp_res s< 0x0); # S Flag $(ZERO) = (tmp_res == 0x0); # Z Flag build postIncrementStore; } ################################################################################### # # BIT: Test bits of source AND destination # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 1 0 1 1 | source | Ad | B/W | As | destination | #------------------ # 16 bit SRC Word #------------------ :BIT^".W" SRC_W_AS, DEST_W_AD is ctx_haveext=0 & (op16_12_4=0xB & bow=0x0 & postIncrement) ... & SRC_W_AS ... & DEST_W_AD ... { # Operation... result:2 = DEST_W_AD & SRC_W_AS; # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Result Flags... $(CARRY) = (result != 0x0); # C Flag $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; } #------------------ # 16 bit SRC Byte #------------------ :BIT^".B" SRC_B_AS, DEST_B_AD is ctx_haveext=0 & (op16_12_4=0xB & bow=0x1 & postIncrement) ... & SRC_B_AS ... & DEST_B_AD ... { # Operation... result:1 = DEST_B_AD & SRC_B_AS; # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Result Flags... $(CARRY) = (result != 0x0); # C Flag $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; } ################################################################################### # # BIC: Bit clear (dest &= ~src) # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 1 1 0 0 | source | Ad | B/W | As | destination | #----------------------- # Emulated instructions #----------------------- # Clear carry bit :CLRC is ctx_haveext=0 & op16_12_4=0xC & as=0x1 & src_Direct16_8_4=0x3 & ad=0x0 & dest_Direct16_0_4=0x2 & bow=0x0 & postIncrementStore { $(CARRY) = 0; build postIncrementStore; } # Clear sign bit :CLRN is ctx_haveext=0 & op16_12_4=0xC & as=0x2 & src_Direct16_8_4=0x2 & ad=0x0 & dest_Direct16_0_4=0x2 & bow=0x0 & postIncrementStore { $(SIGN) = 0; build postIncrementStore; } # Clear zero bit :CLRZ is ctx_haveext=0 & op16_12_4=0xC & as=0x2 & src_Direct16_8_4=0x3 & ad=0x0 & dest_Direct16_0_4=0x2 & bow=0x0 & postIncrementStore { $(ZERO) = 0; build postIncrementStore; } # Disable interrupts :DINT is ctx_haveext=0 & op16_12_4=0xC & as=0x3 & src_Direct16_8_4=0x2 & ad=0x0 & dest_Direct16_0_4=0x2 & postIncrementStore { $(GIE) = 0; build postIncrementStore; } #------------------ # 16 bit SRC Word #------------------ :BIC^".W" SRC_W_AS, DEST_W_AD is ctx_haveext=0 & (op16_12_4=0xC & bow=0x0 & tbl_wzero & postIncrementStore) ... & SRC_W_AS ... & DEST_W_AD ... { DEST_W_AD = (~SRC_W_AS) & DEST_W_AD; build tbl_wzero; #Status bits are not affected build postIncrementStore; } #------------------ # 16 bit SRC Byte #------------------ :BIC^".B" SRC_B_AS, DEST_B_AD is ctx_haveext=0 & (op16_12_4=0xC & bow=0x1 & tbl_bzero & postIncrementStore) ... & SRC_B_AS ... & DEST_B_AD ... { DEST_B_AD = (~SRC_B_AS) & DEST_B_AD; build tbl_bzero; #Status bits are not affected build postIncrementStore; } ################################################################################### # # BIS: Bit set (logical OR) # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 1 1 0 1 | source | Ad | B/W | As | destination | #----------------------- # Emulated instructions #----------------------- # Enable interrupts :EINT is ctx_haveext=0 & op16_12_4=0xD & as=0x3 & src_Direct16_8_4=0x2 & ad=0x0 & dest_Direct16_0_4=0x2 & postIncrementStore { $(GIE) = 1; build postIncrementStore; } # Set carry bit :SETC is ctx_haveext=0 & (op16_12_4=0xD & as=0x1 & src_Direct16_8_4=0x3 & ad=0x0 & dest_Direct16_0_4=0x2 & bow=0x0 & postIncrementStore) { $(CARRY) = 1; build postIncrementStore; } # Set sign bit :SETN is ctx_haveext=0 & (op16_12_4=0xD & as=0x2 & src_Direct16_8_4=0x2 & ad=0x0 & dest_Direct16_0_4=0x2 & bow=0x0 & postIncrementStore) { $(SIGN) = 1; build postIncrementStore; } # Set zero bit :SETZ is ctx_haveext=0 & (op16_12_4=0xD & as=0x2 & src_Direct16_8_4=0x3 & ad=0x0 & dest_Direct16_0_4=0x2 & bow=0x0 & postIncrementStore) { $(ZERO) = 1; build postIncrementStore; } #------------------ # 16 bit SRC Word #------------------ :BIS^".W" SRC_W_AS, DEST_W_AD is ctx_haveext=0 & (op16_12_4=0xD & bow=0x0 & tbl_wzero & postIncrementStore) ... & SRC_W_AS ... & DEST_W_AD ... { DEST_W_AD = SRC_W_AS | DEST_W_AD; build tbl_wzero; #Status bits are not affected build postIncrementStore; } #------------------ # 16 bit SRC Byte #------------------ :BIS^".B" SRC_B_AS, DEST_B_AD is ctx_haveext=0 & (op16_12_4=0xD & bow=0x1 & tbl_bzero & postIncrementStore) ... & SRC_B_AS ... & DEST_B_AD ... { DEST_B_AD = SRC_B_AS | DEST_B_AD; build tbl_bzero; #Status bits are not affected build postIncrementStore; } ################################################################################### # # XOR: Exclusive or source with destination # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 1 1 1 0 | source | Ad | B/W | As | destination | #----------------------- # Emulated instructions #----------------------- # Invert word :INV^".W" DEST_W_AD is ctx_haveext=0 & (op16_12_4=0xE & as=0x3 & src_Direct16_8_4=0x3 & bow=0x0 & tbl_wzero & postIncrementStore) ... & DEST_W_AD ... { # Operation Flags... $(OVERFLOW) = (DEST_W_AD s< 0x0); # V Flag # Operation... DEST_W_AD = DEST_W_AD ^ 0xFFFF; build tbl_wzero; # Result Flags... $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag $(CARRY) = (DEST_W_AD != 0x0); # C Flag build postIncrementStore; } # Invert byte :INV^".B" DEST_B_AD is ctx_haveext=0 & (op16_12_4=0xE & as=0x3 & src_Direct16_8_4=0x3 & bow=0x1 & tbl_bzero & postIncrementStore) ... & DEST_B_AD ... { # Operation Flags... $(OVERFLOW) = (DEST_B_AD s< 0x0); # V Flag # Operation... DEST_B_AD = DEST_B_AD ^ 0xFF; build tbl_bzero; # Result Flags... $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag $(CARRY) = (DEST_B_AD != 0x0); # C Flag build postIncrementStore; } #------------------ # 16 bit SRC Word #------------------ :XOR^".W" SRC_W_AS, DEST_W_AD is ctx_haveext=0 & (op16_12_4=0xE & bow=0x0 & tbl_wzero & postIncrementStore) ... & SRC_W_AS ... & DEST_W_AD ... { # Operation Flags... tmp_overflow:1 = ((DEST_W_AD s< 0x0) && (SRC_W_AS s< 0x0)) ; # V Flag # Operation... DEST_W_AD = DEST_W_AD ^ SRC_W_AS; build tbl_wzero; # Result Flags... $(OVERFLOW) = tmp_overflow; $(SIGN) = (DEST_W_AD s< 0x0); # S Flag $(ZERO) = (DEST_W_AD == 0x0); # Z Flag $(CARRY) = (DEST_W_AD != 0x0); # C Flag build postIncrementStore; } #------------------ # 16 bit SRC Byte #------------------ :XOR^".B" SRC_B_AS, DEST_B_AD is ctx_haveext=0 & (op16_12_4=0xE & bow=0x1 & tbl_bzero & postIncrementStore) ... & SRC_B_AS ... & DEST_B_AD ... { # Operation Flags... tmp_overflow:1 = ((DEST_B_AD s< 0x0) && (SRC_B_AS s< 0x0)) ; # V Flag # Operation... DEST_B_AD = DEST_B_AD ^ SRC_B_AS; build tbl_bzero; # Result Flags... $(OVERFLOW) = tmp_overflow; $(SIGN) = (DEST_B_AD s< 0x0); # S Flag $(ZERO) = (DEST_B_AD == 0x0); # Z Flag $(CARRY) = (DEST_B_AD != 0x0); # C Flag build postIncrementStore; } ################################################################################### # # AND: Logical AND source with destination (dest &= src) # ------------------------------------------------------------------------------ # | 15 | 14 | 13 | 12 | 11 | 10 | 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | # ------------------------------------------------------------------------------ # | 1 1 1 1 | source | Ad | B/W | As | destination | #------------------ # 16 bit SRC Word #------------------ :AND^".W" SRC_W_AS, DEST_W_AD is ctx_haveext=0 & (op16_12_4=0xF & bow=0x0 & tbl_wzero & postIncrementStore) ... & SRC_W_AS ... & DEST_W_AD ... { # Operation... result:2 = DEST_W_AD & SRC_W_AS; DEST_W_AD = result; build tbl_wzero; # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Result Flags... $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag $(CARRY) = (result != 0x0); # C Flag build postIncrementStore; } #------------------ # 16 bit SRC Byte #------------------ :AND^".B" SRC_B_AS, DEST_B_AD is ctx_haveext=0 & (op16_12_4=0xF & bow=0x1 & tbl_bzero & postIncrementStore) ... & SRC_B_AS ... & DEST_B_AD ... { # Operation... result:1 = DEST_B_AD & SRC_B_AS; DEST_B_AD = result; build tbl_bzero; # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Result Flags... $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag $(CARRY) = (result != 0x0); # C Flag build postIncrementStore; } ================================================ FILE: pypcode/processors/TI_MSP430/data/languages/TI430X.sinc ================================================ ################################################################################### # # 20bit Address Instructions # # The original way of using sub-tables to breakout the addressing modes does not # work for these instructions #constructor recognizing the presence of an extension word :^instruction is opext_11_5=0x3 & ctx_haveext=0 & dest_0_4 & src_ext & al & zc & ad; instruction [ ctx_haveext=1; ctx_ctregdest=dest_0_4; ctx_regsrc=src_ext; ctx_al=al; ctx_num=ad; ctx_zc=zc;] { build instruction;} #:^instruction is ctx_haveext=1 & instruction & as=1 & src_8_4=3 [ctx_haveext=2;] {build instruction;} :^instruction is ctx_haveext=1 & instruction & as=1 & src_8_4=3 [ctx_haveext=7;] {build instruction;} #replacment substituting second of haveext=2. NOTE: as=1 precludes first :^instruction is ctx_haveext=1 & instruction & as=1 [ctx_haveext=7;] {build instruction;} :^instruction is ctx_haveext=1 & instruction & as=3 & src_8_4=0 [ctx_haveext=7;] {build instruction;} #:^instruction is ctx_haveext=1 & instruction [ctx_haveext=2;] {build instruction;} :^instruction is ctx_haveext=1 & instruction & as=0 & ad=0 [ctx_haveext=3;] {build instruction;} #replacement substituting first of haveext=2 :^instruction is ctx_haveext=1 & instruction [ctx_haveext=7;] {build instruction;} #replacement substituting second of haveext=2 :^instruction is ctx_haveext=1 & instruction & op16_7_9=0x23 & as=0 [ctx_haveext=3;] {build instruction;} :^instruction is ctx_haveext=1 & instruction & op16_7_9=0x21 & as=0 [ctx_haveext=3;] {build instruction;} :^instruction is ctx_haveext=1 & instruction & op16_12_4=0x1 & as=1 & reg16_0_4=3 [ctx_haveext=3;] {build instruction;} :^instruction is ctx_haveext=1 & instruction & op16_12_4=0x1 & as=1 [ctx_haveext=7;] {build instruction;} :^instruction is ctx_haveext=1 & instruction & op16_12_4=0x1 & as=3 & reg16_0_4=0 [ctx_haveext=7;] {build instruction;} # removed haveext=2 #:^instruction is ctx_haveext=2 & instruction & as=0 & ad=0 [ctx_haveext=3;] {build instruction;} #:^instruction is ctx_haveext=2 & instruction [ctx_haveext=7;] {build instruction;} :^instruction is ctx_haveext=3 & instruction & ctx_num=0 & ctx_ctregdest=0 [ctx_haveext=4;] { CNT = 0;build instruction; } :"RPT #"^val^" { "^instruction is ctx_haveext=3 & instruction & ctx_num=0 & ctx_ctregdest [ctx_haveext=4; val = ctx_ctregdest+1;] { CNT = ctx_ctregdest;build instruction;} :"RPT "^ctx_repreg^" { "^instruction is ctx_haveext=3 & instruction & ctx_num=1 & ctx_repreg [ctx_haveext=4;] { CNT = zext(ctx_repreg[0,4]); build instruction;} # 20bit address mode sub tables Abs20: val is ctx_ctregdest & imm_0_16 [ val=(ctx_ctregdest << 16) | imm_0_16;] {export *[const]:3 val;} Abs20s: val is ctx_ctregdests & imm_0_16 [ val=(ctx_ctregdests << 16) | imm_0_16;] {export *[const]:3 val;} Abs20add: val is ctx_ctregdest & imm_0_16 [ val=(ctx_ctregdest << 16) | imm_0_16;] {export *:4 val;} IMM4: val is imm_4_4 [val = imm_4_4+1;] {export *[const]:1 val;} NUM2: val is rrn [ val = rrn+1;] {export *[const]:1 val;} XREG_B_AS: DST8_0_4 is DST8_0_4 & as=0x0 & bow=0x1 { export DST8_0_4;} # Word/Register Direct (Rn): XREG_B_AS: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x1 ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = reg_Indexed16_0_4 + indexExtWord16_0_16s; export *:1 tmp;} XREG_B_AS: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x1 {export *:1 reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XREG_B_AS: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x1 {export *:1 reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XREG_B_AS: labelCalc is reg16_0_4=0x0 & as=0x1 & bow=0x1 ; indexExtWord16_0_16s [labelCalc = inst_start + 4 + indexExtWord16_0_16s; ] {export *:1 labelCalc; } # Symbolic XREG_B_AS: "#"^indexExtWord16_0_16 is reg16_0_4=0x0 & as=0x3 & bow=0x1 ; indexExtWord16_0_16 {export *[const]:1 indexExtWord16_0_16; } # Immediate XREG_B_AS: "&"^indexExtWord16_0_16 is reg16_0_4=0x2 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 {export *:1 indexExtWord16_0_16; } # Absolute XREG_B_AS: "#4" is reg16_0_4=0x2 & as=0x2 & bow=0x1 { export 4:1;} # Constant XREG_B_AS: "#8" is reg16_0_4=0x2 & as=0x3 & bow=0x1 { export 8:1;} # Constant XREG_B_AS: "#0" is reg16_0_4=0x3 & as=0x0 & bow=0x1 { export 0:1;} # Constant XREG_B_AS: "#1" is reg16_0_4=0x3 & as=0x1 & bow=0x1 { export 1:1;} # Constant XREG_B_AS: "#2" is reg16_0_4=0x3 & as=0x2 & bow=0x1 { export 2:1;} # Constant XREG_B_AS: "#-1" is reg16_0_4=0x3 & as=0x3 & bow=0x1 { export 0xff:1;} # Constant XRREG_B_AS: DST8_0_4 is DST8_0_4 & as=0x0 & bow=0x1 { export DST8_0_4;} # Word/Register Direct (Rn): XRREG_B_AS: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x1 {export *:1 reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XRREG_B_AS: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x1 {export *:1 reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XRREG_B_AS: "#4" is reg16_0_4=0x2 & as=0x2 & bow=0x1 { export 4:1;} # Constant XRREG_B_AS: "#8" is reg16_0_4=0x2 & as=0x3 & bow=0x1 { export 8:1;} # Constant XRREG_B_AS: "#0" is reg16_0_4=0x3 & as=0x0 & bow=0x1 { export 0:1;} # Constant XRREG_B_AS: "#1" is reg16_0_4=0x3 & as=0x1 & bow=0x1 { export 1:1;} # Constant XRREG_B_AS: "#2" is reg16_0_4=0x3 & as=0x2 & bow=0x1 { export 2:1;} # Constant XRREG_B_AS: "#-1" is reg16_0_4=0x3 & as=0x3 & bow=0x1 { export 0xff:1;} # Constant XREG_W_AS: DST16_0_4 is DST16_0_4 & as=0x0 & bow=0x0 {export DST16_0_4;} # Word/Register Direct (Rn): XREG_W_AS: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x0 ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = reg_Indexed16_0_4 + indexExtWord16_0_16s; export *:2 tmp;} XREG_W_AS: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x0 {export *:2 reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XREG_W_AS: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x0 {export *:2 reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XREG_W_AS: labelCalc is reg16_0_4=0x0 & as=0x1 & bow=0x0 ; indexExtWord16_0_16s [labelCalc = inst_start + 4 + indexExtWord16_0_16s; ] {export *:2 labelCalc; } # Symbolic XREG_W_AS: "#"^indexExtWord16_0_16 is reg16_0_4=0x0 & as=0x3 & bow=0x0 ; indexExtWord16_0_16 {export *[const]:2 indexExtWord16_0_16; } # Immediate XREG_W_AS: "&"^indexExtWord16_0_16 is reg16_0_4=0x2 & as=0x1 & bow=0x0 ; indexExtWord16_0_16 {export *:2 indexExtWord16_0_16; } # Absolute XREG_W_AS: "#4" is reg16_0_4=0x2 & as=0x2 & bow=0x0 { export 4:2;} # Constant XREG_W_AS: "#8" is reg16_0_4=0x2 & as=0x3 & bow=0x0 { export 8:2;} # Constant XREG_W_AS: "#0" is reg16_0_4=0x3 & as=0x0 & bow=0x0 { export 0:2;} # Constant XREG_W_AS: "#1" is reg16_0_4=0x3 & as=0x1 & bow=0x0 { export 1:2;} # Constant XREG_W_AS: "#2" is reg16_0_4=0x3 & as=0x2 & bow=0x0 { export 2:2;} # Constant XREG_W_AS: "#-1" is reg16_0_4=0x3 & as=0x3 & bow=0x0 { export 0xffff:2;} # Constant XRREG_W_AS: DST16_0_4 is DST16_0_4 & as=0x0 & bow=0x0 {export DST16_0_4;} # Word/Register Direct (Rn): XRREG_W_AS: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x0 {export *:2 reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XRREG_W_AS: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x0 {export *:2 reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XRREG_W_AS: "#4" is reg16_0_4=0x2 & as=0x2 & bow=0x0 { export 4:2;} # Constant XRREG_W_AS: "#8" is reg16_0_4=0x2 & as=0x3 & bow=0x0 { export 8:2;} # Constant XRREG_W_AS: "#0" is reg16_0_4=0x3 & as=0x0 & bow=0x0 { export 0:2;} # Constant XRREG_W_AS: "#1" is reg16_0_4=0x3 & as=0x1 & bow=0x0 { export 1:2;} # Constant XRREG_W_AS: "#2" is reg16_0_4=0x3 & as=0x2 & bow=0x0 { export 2:2;} # Constant XRREG_W_AS: "#-1" is reg16_0_4=0x3 & as=0x3 & bow=0x0 { export 0xffff:2;} # Constant XREG_A_AS: dest_0_4 is dest_0_4 & as=0 & bow=0x1 {export dest_0_4;} XREG_A_AS: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x1 ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = reg_Indexed16_0_4 + indexExtWord16_0_16s; export *:$(REG_SIZE) tmp;} XREG_A_AS: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x1 {export *:$(REG_SIZE) reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XREG_A_AS: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x1 {export *:$(REG_SIZE) reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XREG_A_AS: labelCalc is reg16_0_4=0x0 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 [labelCalc = inst_start + 4 + ((ctx_ctregdests << 16) | indexExtWord16_0_16); ] {export *:$(REG_SIZE) labelCalc; } # Symbolic XREG_A_AS: "#"^val is reg16_0_4=0x0 & as=0x3 & bow=0x1 ; indexExtWord16_0_16 [val=(ctx_ctregdests << 16) | indexExtWord16_0_16; ] {export *[const]:$(REG_SIZE) val; } # Immediate XREG_A_AS: "&"^val is reg16_0_4=0x2 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 [val=(ctx_ctregdest << 16) | indexExtWord16_0_16; ] {export *:$(REG_SIZE) val; } # Absolute XREG_A_AS: "#4" is reg16_0_4=0x2 & as=0x2 & bow=0x1 { export 4:$(REG_SIZE);} # Constant XREG_A_AS: "#8" is reg16_0_4=0x2 & as=0x3 & bow=0x1 { export 8:$(REG_SIZE);} # Constant XREG_A_AS: "#0" is reg16_0_4=0x3 & as=0x0 & bow=0x1 { export 0:$(REG_SIZE);} # Constant XREG_A_AS: "#1" is reg16_0_4=0x3 & as=0x1 & bow=0x1 { export 1:$(REG_SIZE);} # Constant XREG_A_AS: "#2" is reg16_0_4=0x3 & as=0x2 & bow=0x1 { export 2:$(REG_SIZE);} # Constant XREG_A_AS: "#-1" is reg16_0_4=0x3 & as=0x3 & bow=0x1 { export 0xffffffff:$(REG_SIZE);} # Constant XRREG_A_AS: dest_0_4 is dest_0_4 & as=0 & bow=0x1 {export dest_0_4;} XRREG_A_AS: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x1 {export *:$(REG_SIZE) reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XRREG_A_AS: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x1 {export *:$(REG_SIZE) reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XRREG_A_AS: "#4" is reg16_0_4=0x2 & as=0x2 & bow=0x1 { export 4:$(REG_SIZE);} # Constant XRREG_A_AS: "#8" is reg16_0_4=0x2 & as=0x3 & bow=0x1 { export 8:$(REG_SIZE);} # Constant XRREG_A_AS: "#0" is reg16_0_4=0x3 & as=0x0 & bow=0x1 { export 0:$(REG_SIZE);} # Constant XRREG_A_AS: "#1" is reg16_0_4=0x3 & as=0x1 & bow=0x1 { export 1:$(REG_SIZE);} # Constant XRREG_A_AS: "#2" is reg16_0_4=0x3 & as=0x2 & bow=0x1 { export 2:$(REG_SIZE);} # Constant XRREG_A_AS: "#-1" is reg16_0_4=0x3 & as=0x3 & bow=0x1 { export 0xffffffff:$(REG_SIZE);} # Constant XREG_B_AS_DEST: DST8_0_4 is DST8_0_4 & as=0x0 & bow=0x1 { export DST8_0_4;} # Word/Register Direct (Rn): XREG_B_AS_DEST: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x1 ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = reg_Indexed16_0_4 + indexExtWord16_0_16s; export *:1 tmp;} XREG_B_AS_DEST: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x1 {export *:1 reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XREG_B_AS_DEST: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x1 {export *:1 reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XREG_B_AS_DEST: labelCalc is reg16_0_4=0x0 & as=0x1 & bow=0x1 ; indexExtWord16_0_16s [labelCalc = inst_start + 4 + indexExtWord16_0_16s; ] {export *:1 labelCalc; } # Symbolic XREG_B_AS_DEST: "&"^indexExtWord16_0_16 is reg16_0_4=0x2 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 {export *:1 indexExtWord16_0_16; } # Absolute XRREG_B_AS_DEST: DST8_0_4 is DST8_0_4 & as=0x0 & reg_Direct16_0_4 & bow=0x1 { ztmp:1 = DST8_0_4; reg_Direct16_0_4=0; DST8_0_4 = ztmp; export DST8_0_4;} # Word/Register Direct (Rn): XRREG_B_AS_DEST: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x1 {export *:1 reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XRREG_B_AS_DEST: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x1 {export *:1 reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XREG_W_AS_DEST: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x0 ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = reg_Indexed16_0_4 + indexExtWord16_0_16s; export *:2 tmp;} XREG_W_AS_DEST: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x0 {export *:2 reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XREG_W_AS_DEST: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x0 {export *:2 reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XREG_W_AS_DEST: labelCalc is reg16_0_4=0x0 & as=0x1 & bow=0x0 ; indexExtWord16_0_16s [labelCalc = inst_start + 4 + indexExtWord16_0_16s; ] {export *:2 labelCalc; } # Symbolic XREG_W_AS_DEST: "&"^indexExtWord16_0_16 is reg16_0_4=0x2 & as=0x1 & bow=0x0 ; indexExtWord16_0_16 {export *:2 indexExtWord16_0_16; } # Absolute XRREG_W_AS_DEST: DST16_0_4 is DST16_0_4 & as=0x0 & reg_Direct16_0_4 & bow=0x0 {ztmp:2 = DST16_0_4; reg_Direct16_0_4=0; DST16_0_4 = ztmp;export DST16_0_4;} # Word/Register Direct (Rn): XRREG_W_AS_DEST: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x0 {export *:2 reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XRREG_W_AS_DEST: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x0 {export *:2 reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XREG_A_AS_DEST: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x1 ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = reg_Indexed16_0_4 + indexExtWord16_0_16s; export *:$(REG_SIZE) tmp;} XREG_A_AS_DEST: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x1 {export *:$(REG_SIZE) reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XREG_A_AS_DEST: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x1 {export *:$(REG_SIZE) reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XREG_A_AS_DEST: labelCalc is reg16_0_4=0x0 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 [labelCalc = inst_start + 4 + ((ctx_ctregdests << 16) | indexExtWord16_0_16); ] {export *:$(REG_SIZE) labelCalc; } # Symbolic XREG_A_AS_DEST: "&"^val is reg16_0_4=0x2 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 [val=(ctx_ctregdest << 16) | indexExtWord16_0_16; ] {export *:$(REG_SIZE) val; } # Absolute XRREG_A_AS_DEST: dest_0_4 is dest_0_4 & as=0 & bow=0x1 {export dest_0_4;} XRREG_A_AS_DEST: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x1 {export *:$(REG_SIZE) reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XRREG_A_AS_DEST: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x1 {export *:$(REG_SIZE) reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XREG_A_AS_DEST2: indexExtWord16_0_16s^"("^reg_Indexed16_0_4^")" is reg_Indexed16_0_4 & as=0x1 & bow=0x0 ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = reg_Indexed16_0_4 + indexExtWord16_0_16s; export *:$(REG_SIZE) tmp;} XREG_A_AS_DEST2: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x0 {export *:$(REG_SIZE) reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XREG_A_AS_DEST2: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x0 {export *:$(REG_SIZE) reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XREG_A_AS_DEST2: labelCalc is reg16_0_4=0x0 & as=0x1 & bow=0x0 ; indexExtWord16_0_16 [labelCalc = inst_start + 4 + ((ctx_ctregdests << 16) | indexExtWord16_0_16); ] {export *:$(REG_SIZE) labelCalc; } # Symbolic XREG_A_AS_DEST2: "&"^val is reg16_0_4=0x2 & as=0x1 & bow=0x0 ; indexExtWord16_0_16 [val=(ctx_ctregdest << 16) | indexExtWord16_0_16; ] {export *:$(REG_SIZE) val; } # Absolute XRREG_A_AS_DEST2: dest_0_4 is dest_0_4 & as=0 & bow=0x0 {export dest_0_4;} XRREG_A_AS_DEST2: "@"^reg_InDirect16_0_4 is reg_InDirect16_0_4 & as=0x2 & bow=0x0 {export *:$(REG_SIZE) reg_InDirect16_0_4;} # Word/Register Indirect (@Rn): XRREG_A_AS_DEST2: "@"^reg_InDirect16_0_4^"+" is reg_InDirect16_0_4 & as=0x3 & bow=0x0 {export *:$(REG_SIZE) reg_InDirect16_0_4;} # Word/Register Indirect Autoincrement (@Rn+): XSRC_B_AS: SRC8_8_4 is SRC8_8_4 & as=0x0 & bow=0x1 { export SRC8_8_4;} # Word/Register Direct (Rn): XSRC_B_AS: indexExtWord16_0_16s^"("^src_Indexed16_8_4^")" is src_Indexed16_8_4 & as=0x1 & bow=0x1 ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = src_Indexed16_8_4 + indexExtWord16_0_16s; export *:1 tmp;} XSRC_B_AS: "@"^src_InDirect16_8_4 is src_InDirect16_8_4 & as=0x2 & bow=0x1 {export *:1 src_InDirect16_8_4;} # Word/Register Indirect (@Rn): XSRC_B_AS: "@"^src_InDirect16_8_4^"+" is src_InDirect16_8_4 & as=0x3 & bow=0x1 {export *:1 src_InDirect16_8_4;} # Word/Register Indirect Autoincrement (@Rn+): XSRC_B_AS: labelCalc is src16_8_4=0x0 & as=0x1 & bow=0x1 ; indexExtWord16_0_16s [labelCalc = inst_start + 4 + indexExtWord16_0_16s; ] {export *:1 labelCalc; } # Symbolic XSRC_B_AS: "#"^indexExtWord16_0_16 is src16_8_4=0x0 & as=0x3 & bow=0x1 ; indexExtWord16_0_16 {export *[const]:1 indexExtWord16_0_16;} # Immediate XSRC_B_AS: "&"^indexExtWord16_0_16 is src16_8_4=0x2 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 {export *:1 indexExtWord16_0_16; } # Absolute XSRC_B_AS: "#4" is src16_8_4=0x2 & as=0x2 & bow=0x1 { export 4:1; } # Constant XSRC_B_AS: "#8" is src16_8_4=0x2 & as=0x3 & bow=0x1 { export 8:1; } # Constant XSRC_B_AS: "#0" is src16_8_4=0x3 & as=0x0 & bow=0x1 { export 0:1; } # Constant XSRC_B_AS: "#1" is src16_8_4=0x3 & as=0x1 & bow=0x1 { export 1:1; } # Constant XSRC_B_AS: "#2" is src16_8_4=0x3 & as=0x2 & bow=0x1 { export 2:1; } # Constant XSRC_B_AS: "#-1" is src16_8_4=0x3 & as=0x3 & bow=0x1 { export 0xff:1; } # Constant XRSRC_B_AS: SRC8_8_4 is SRC8_8_4 & as=0x0 & bow=0x1 { export SRC8_8_4;} # Word/Register Direct (Rn): XRSRC_B_AS: "@"^src_InDirect16_8_4 is src_InDirect16_8_4 & as=0x2 & bow=0x1 {export *:1 src_InDirect16_8_4;} # Word/Register Indirect (@Rn): XRSRC_B_AS: "@"^src_InDirect16_8_4^"+" is src_InDirect16_8_4 & as=0x3 & bow=0x1 {export *:1 src_InDirect16_8_4;} # Word/Register Indirect Autoincrement (@Rn+): XRSRC_B_AS: "#4" is src16_8_4=0x2 & as=0x2 & bow=0x1 { export 4:1; } # Constant XRSRC_B_AS: "#8" is src16_8_4=0x2 & as=0x3 & bow=0x1 { export 8:1; } # Constant XRSRC_B_AS: "#0" is src16_8_4=0x3 & as=0x0 & bow=0x1 { export 0:1; } # Constant XRSRC_B_AS: "#1" is src16_8_4=0x3 & as=0x1 & bow=0x1 { export 1:1; } # Constant XRSRC_B_AS: "#2" is src16_8_4=0x3 & as=0x2 & bow=0x1 { export 2:1; } # Constant XRSRC_B_AS: "#-1" is src16_8_4=0x3 & as=0x3 & bow=0x1 { export 0xff:1; } # Constant XSRC_W_AS: SRC16_8_4 is SRC16_8_4 & as=0x0 & bow=0x0 {export SRC16_8_4;} # Word/Register Direct (Rn): XSRC_W_AS: indexExtWord16_0_16s^"("^src_Indexed16_8_4^")" is src_Indexed16_8_4 & as=0x1 & bow=0x0 ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = src_Indexed16_8_4 + indexExtWord16_0_16s; export *:2 tmp;} XSRC_W_AS: "@"^src_InDirect16_8_4 is src_InDirect16_8_4 & as=0x2 & bow=0x0 {export *:2 src_InDirect16_8_4;} # Word/Register Indirect (@Rn): XSRC_W_AS: "@"^src_InDirect16_8_4^"+" is src_InDirect16_8_4 & as=0x3 & bow=0x0 {export *:2 src_InDirect16_8_4;} # Word/Register Indirect Autoincrement (@Rn+): XSRC_W_AS: labelCalc is src16_8_4=0x0 & as=0x1 & bow=0x0 ; indexExtWord16_0_16s [labelCalc = inst_start + 4 + indexExtWord16_0_16s; ] {export *:2 labelCalc; } # Symbolic XSRC_W_AS: "#"^indexExtWord16_0_16 is src16_8_4=0x0 & as=0x3 & bow=0x0 ; indexExtWord16_0_16 {export *[const]:2 indexExtWord16_0_16; } # Immediate XSRC_W_AS: "&"^indexExtWord16_0_16 is src16_8_4=0x2 & as=0x1 & bow=0x0 ; indexExtWord16_0_16 {export *:2 indexExtWord16_0_16; } # Absolute XSRC_W_AS: "#4" is src16_8_4=0x2 & as=0x2 & bow=0x0 { export 4:2; } # Constant XSRC_W_AS: "#8" is src16_8_4=0x2 & as=0x3 & bow=0x0 { export 8:2; } # Constant XSRC_W_AS: "#0" is src16_8_4=0x3 & as=0x0 & bow=0x0 { export 0:2; } # Constant XSRC_W_AS: "#1" is src16_8_4=0x3 & as=0x1 & bow=0x0 { export 1:2; } # Constant XSRC_W_AS: "#2" is src16_8_4=0x3 & as=0x2 & bow=0x0 { export 2:2; } # Constant XSRC_W_AS: "#-1" is src16_8_4=0x3 & as=0x3 & bow=0x0 { export 0xffff:2; } # Constant XRSRC_W_AS: SRC16_8_4 is SRC16_8_4 & as=0x0 & bow=0x0 {export SRC16_8_4;} # Word/Register Direct (Rn): XRSRC_W_AS: "@"^src_InDirect16_8_4 is src_InDirect16_8_4 & as=0x2 & bow=0x0 {export *:2 src_InDirect16_8_4;} # Word/Register Indirect (@Rn): XRSRC_W_AS: "@"^src_InDirect16_8_4^"+" is src_InDirect16_8_4 & as=0x3 & bow=0x0 {export *:2 src_InDirect16_8_4;} # Word/Register Indirect Autoincrement (@Rn+): XRSRC_W_AS: "#4" is src16_8_4=0x2 & as=0x2 & bow=0x0 { export 4:2; } # Constant XRSRC_W_AS: "#8" is src16_8_4=0x2 & as=0x3 & bow=0x0 { export 8:2; } # Constant XRSRC_W_AS: "#0" is src16_8_4=0x3 & as=0x0 & bow=0x0 { export 0:2; } # Constant XRSRC_W_AS: "#1" is src16_8_4=0x3 & as=0x1 & bow=0x0 { export 1:2; } # Constant XRSRC_W_AS: "#2" is src16_8_4=0x3 & as=0x2 & bow=0x0 { export 2:2; } # Constant XRSRC_W_AS: "#-1" is src16_8_4=0x3 & as=0x3 & bow=0x0 { export 0xffff:2; } # Constant XSRC_A_AS: src_8_4 is src_8_4 & as=0x0 & bow=0x1 { export src_8_4;} # Word/Register Direct (Rn): XSRC_A_AS: val^"("^src_Indexed16_8_4^")" is src_Indexed16_8_4 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 [val=(ctx_regsrcs << 16) | indexExtWord16_0_16; ] {tmp:$(REG_SIZE) = src_Indexed16_8_4 + val; export *:$(REG_SIZE) tmp;} XSRC_A_AS: "@"^src_InDirect16_8_4 is src_InDirect16_8_4 & as=0x2 & bow=0x1 {export *:$(REG_SIZE) src_InDirect16_8_4;} # Word/Register Indirect (@Rn): XSRC_A_AS: "@"^src_InDirect16_8_4^"+" is src_InDirect16_8_4 & as=0x3 & bow=0x1 {export *:$(REG_SIZE) src_InDirect16_8_4;} # Word/Register Indirect Autoincrement (@Rn+): XSRC_A_AS: labelCalc is src16_8_4=0x0 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 [labelCalc = inst_start + 4 + ((ctx_regsrcs << 16) | indexExtWord16_0_16); ] {export *:$(REG_SIZE) labelCalc; } # Symbolic XSRC_A_AS: "#"^val is src16_8_4=0x0 & as=0x3 & bow=0x1 ; indexExtWord16_0_16 [val=(ctx_regsrcs << 16) | indexExtWord16_0_16; ] {export *[const]:$(REG_SIZE) val; } # Immediate XSRC_A_AS: "&"^val is src16_8_4=0x2 & as=0x1 & bow=0x1 ; indexExtWord16_0_16 [val=(ctx_regsrc << 16) | indexExtWord16_0_16; ] {export *:$(REG_SIZE) val; } # Absolute XSRC_A_AS: "#4" is src16_8_4=0x2 & as=0x2 & bow=0x1 { export 4:$(REG_SIZE); } # Constant XSRC_A_AS: "#8" is src16_8_4=0x2 & as=0x3 & bow=0x1 { export 8:$(REG_SIZE); } # Constant XSRC_A_AS: "#0" is src16_8_4=0x3 & as=0x0 & bow=0x1 { export 0:$(REG_SIZE); } # Constant XSRC_A_AS: "#1" is src16_8_4=0x3 & as=0x1 & bow=0x1 { export 1:$(REG_SIZE); } # Constant XSRC_A_AS: "#2" is src16_8_4=0x3 & as=0x2 & bow=0x1 { export 2:$(REG_SIZE); } # Constant XSRC_A_AS: "#-1" is src16_8_4=0x3 & as=0x3 & bow=0x1 { export 0xffffffff:$(REG_SIZE); } # Constant XRSRC_A_AS: src_8_4 is src_8_4 & as=0x0 & bow=0x1 { export src_8_4;} # Word/Register Direct (Rn): XRSRC_A_AS: "@"^src_InDirect16_8_4 is src_InDirect16_8_4 & as=0x2 & bow=0x1 {export *:$(REG_SIZE) src_InDirect16_8_4;} # Word/Register Indirect (@Rn): XRSRC_A_AS: "@"^src_InDirect16_8_4^"+" is src_InDirect16_8_4 & as=0x3 & bow=0x1 {export *:$(REG_SIZE) src_InDirect16_8_4;} # Word/Register Indirect Autoincrement (@Rn+): XRSRC_A_AS: "#4" is src16_8_4=0x2 & as=0x2 & bow=0x1 { export 4:$(REG_SIZE); } # Constant XRSRC_A_AS: "#8" is src16_8_4=0x2 & as=0x3 & bow=0x1 { export 8:$(REG_SIZE); } # Constant XRSRC_A_AS: "#0" is src16_8_4=0x3 & as=0x0 & bow=0x1 { export 0:$(REG_SIZE); } # Constant XRSRC_A_AS: "#1" is src16_8_4=0x3 & as=0x1 & bow=0x1 { export 1:$(REG_SIZE); } # Constant XRSRC_A_AS: "#2" is src16_8_4=0x3 & as=0x2 & bow=0x1 { export 2:$(REG_SIZE); } # Constant XRSRC_A_AS: "#-1" is src16_8_4=0x3 & as=0x3 & bow=0x1 { export 0xffffffff:$(REG_SIZE); } # Constant XDEST_B_AD: DST8_0_4 is DST8_0_4 & ad=0x0 & bow=0x1 { export DST8_0_4; } # Word/Register Direct (Rn): XDEST_B_AD: indexExtWord16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x1 ; indexExtWord16_0_16s { tmp:$(REG_SIZE) = dest_Indexed16_0_4 + indexExtWord16_0_16s; export *:1 tmp;} XDEST_B_AD: indexExt2Word16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x1 & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16s { tmp:$(REG_SIZE) = dest_Indexed16_0_4 + indexExt2Word16_0_16s; export *:1 tmp;} XDEST_B_AD: indexExt2Word16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x1 & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16s { tmp:$(REG_SIZE) = dest_Indexed16_0_4 + indexExt2Word16_0_16s; export *:1 tmp;} XDEST_B_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x1 ; indexExtWord16_0_16s [labelCalc = inst_start + 4 + indexExtWord16_0_16s; ] { export *:1 labelCalc; } # Symbolic XDEST_B_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x1 & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16s [labelCalc = inst_start + 6 + indexExt2Word16_0_16s; ] {export *:1 labelCalc; } # Symbolic XDEST_B_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x1 & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16s [labelCalc = inst_start + 6 + indexExt2Word16_0_16s; ] {export *:1 labelCalc; } # Symbolic XDEST_B_AD: "&"^indexExtWord16_0_16 is dest=0x2 & ad=0x1 & bow=0x1 ; indexExtWord16_0_16 {export *:1 indexExtWord16_0_16; } # Absolute XDEST_B_AD: "&"^indexExt2Word16_0_16 is dest=0x2 & ad=0x1 & bow=0x1 & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16 {export *:1 indexExt2Word16_0_16; } # Absolute XDEST_B_AD: "&"^indexExt2Word16_0_16 is dest=0x2 & ad=0x1 & bow=0x1 & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16 {export *:1 indexExt2Word16_0_16; } # Absolute XDEST_W_AD: DST16_0_4 is DST16_0_4 & ad=0x0 & bow=0x0 {export DST16_0_4;} # Word/Register Direct (Rn): XDEST_W_AD: indexExtWord16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x0 ; indexExtWord16_0_16s {tmp:$(REG_SIZE) = dest_Indexed16_0_4 + indexExtWord16_0_16s; export *:2 tmp;} XDEST_W_AD: indexExt2Word16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x0 & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16s {tmp:$(REG_SIZE) = dest_Indexed16_0_4 + indexExt2Word16_0_16s; export *:2 tmp;} XDEST_W_AD: indexExt2Word16_0_16s^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x0 & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16s {tmp:$(REG_SIZE) = dest_Indexed16_0_4 + indexExt2Word16_0_16s; export *:2 tmp;} XDEST_W_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x0 ; indexExtWord16_0_16s [labelCalc = inst_start + 4 + indexExtWord16_0_16s; ] {export *:2 labelCalc; } # Symbolic XDEST_W_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x0 & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16s [labelCalc = inst_start + 6 + indexExt2Word16_0_16s; ] {export *:2 labelCalc; } # Symbolic XDEST_W_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x0 & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16s [labelCalc = inst_start + 6 + indexExt2Word16_0_16s; ] {export *:2 labelCalc; } # Symbolic XDEST_W_AD: "&"^indexExtWord16_0_16 is dest=0x2 & ad=0x1 & bow=0x0 ; indexExtWord16_0_16 {export *:2 indexExtWord16_0_16; } # Absolute XDEST_W_AD: "&"^indexExt2Word16_0_16 is dest=0x2 & ad=0x1 & bow=0x0 & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16 {export *:2 indexExt2Word16_0_16; } # Absolute XDEST_W_AD: "&"^indexExt2Word16_0_16 is dest=0x2 & ad=0x1 & bow=0x0 & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16 {export *:2 indexExt2Word16_0_16; } # Absolute XDEST_A_AD: dest_0_4 is dest_0_4 & ad=0x0 & bow=0x1 { export dest_0_4; } # Word/Register Direct (Rn): XDEST_A_AD: val^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x1 ; indexExtWord16_0_16 [val=(ctx_ctregdests << 16) | indexExtWord16_0_16; ] {tmp:$(REG_SIZE) = dest_Indexed16_0_4 + val; export *:$(REG_SIZE) tmp;} XDEST_A_AD: val^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x1 & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16 [val=(ctx_ctregdests << 16) | indexExt2Word16_0_16; ] {tmp:$(REG_SIZE) = dest_Indexed16_0_4 + val; export *:$(REG_SIZE) tmp;} XDEST_A_AD: val^"("^dest_Indexed16_0_4^")" is dest_Indexed16_0_4 & ad=0x1 & bow=0x1 & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16 [val=(ctx_ctregdests << 16) | indexExt2Word16_0_16; ] {tmp:$(REG_SIZE) = dest_Indexed16_0_4 + val; export *:$(REG_SIZE) tmp;} XDEST_A_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x1 ; indexExtWord16_0_16 [labelCalc = inst_start + 4 + ((ctx_ctregdests << 16) | indexExtWord16_0_16); ] {export *:$(REG_SIZE) labelCalc; } # Symbolic XDEST_A_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x1 & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16 [labelCalc = inst_start + 4 + ((ctx_ctregdests << 16) | indexExt2Word16_0_16); ] {export *:$(REG_SIZE) labelCalc; } # Symbolic XDEST_A_AD: labelCalc is dest=0x0 & ad=0x1 & bow=0x1 & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16 [labelCalc = inst_start + 4 + ((ctx_ctregdests << 16) | indexExt2Word16_0_16); ] {export *:$(REG_SIZE) labelCalc; } # Symbolic XDEST_A_AD: "&"^val is dest=0x2 & ad=0x1 & bow=0x1 ; indexExtWord16_0_16 [val=(ctx_ctregdest << 16) | indexExtWord16_0_16; ] {export *:$(REG_SIZE) val; } # Absolute XDEST_A_AD: "&"^val is dest=0x2 & ad=0x1 & bow=0x1 & as=0x1 & ((src16_8_4>=0x0 & src16_8_4<=0x2) | (src16_8_4>=0x4 & src16_8_4<=0xF)) ; indexExtWord16_0_16 ; indexExt2Word16_0_16 [val=(ctx_ctregdest << 16) | indexExt2Word16_0_16; ] {export *:$(REG_SIZE) val; } # Absolute XDEST_A_AD: "&"^val is dest=0x2 & ad=0x1 & bow=0x1 & as=0x3 & src16_8_4=0x0 ; indexExtWord16_0_16 ; indexExt2Word16_0_16 [val=(ctx_ctregdest << 16) | indexExt2Word16_0_16; ] {export *:$(REG_SIZE) val; } # Absolute #Use repeat_carry with a build directive at the beginning of a RPT loop #use existing value of carry bit repeat_carry: is ctx_zc = 0 {} #in this case the repeated instruction uses 0 as the value of the carry bit #after repeated instruction executes, value of carry bit is defined by the result of #last operation, so building this constructor at the beginning of RPT loop can accurately #model the semantics repeat_carry: is ctx_zc = 1 {$(CARRY) = 0;} macro setaddflags(ans, in1, in2) { tmp1:$(REG_SIZE) = zext(in1[0,20]); tmp2:$(REG_SIZE) = zext(in2[0,20]); tmp1 = tmp1 + tmp2; $(CARRY) = tmp1 > 0xFFFFF; $(OVERFLOW) = ((in1 s>= 0) & (in2 s>= 0) & (ans s< 0)) | ((in1 s< 0) & (in2 s< 0) & (ans s>= 0)); $(SIGN) = (ans s< 0); $(ZERO) = (ans == 0); } macro setsubflags(ans, in1, in2) { tmp1:$(REG_SIZE) = zext(in1[0,20]); tmp2:$(REG_SIZE) = zext(in2[0,20]); $(CARRY) = tmp1 > tmp2; $(OVERFLOW) = ((in1 s< 0) & (in2 s>= 0) & (ans s< 0)) | ((in1 s>= 0) & (in2 s< 0) & (ans s>= 0)); $(SIGN) = (ans s< 0); $(ZERO) = (ans == 0); } ################# # # Subtables for the pushm/popm variants. # In memory, the 20 bit regs take up 4 bytes with all the uppers being 0. # However, to get some of the math to work, the 20bit regs are sign extended when reading from mem. PUSHAR0: is ctx_count=0 {} PUSHAR0: is ctx_mreg=0x0 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = inst_start & 0xFFFFF; } PUSHAR1: is ctx_count=0 {} PUSHAR1: is PUSHAR0 {build PUSHAR0;} PUSHAR1: is ctx_mreg=0x1 & PUSHAR0 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = SP & 0xFFFF; build PUSHAR0; } PUSHAR2: is ctx_count=0 {} PUSHAR2: is PUSHAR1 {build PUSHAR1;} PUSHAR2: is ctx_mreg=0x2 & PUSHAR1 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = SR & 0xFFFF; build PUSHAR1; } PUSHAR3: is ctx_count=0 {} PUSHAR3: is PUSHAR2 {build PUSHAR2;} PUSHAR3: is ctx_mreg=0x3 & PUSHAR2 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R3 & 0xFFFFF; build PUSHAR2; } PUSHAR4: is ctx_count=0 {} PUSHAR4: is PUSHAR3 {build PUSHAR3;} PUSHAR4: is ctx_mreg=0x4 & PUSHAR3 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R4 & 0xFFFFF; build PUSHAR3; } PUSHAR5: is ctx_count=0 {} PUSHAR5: is PUSHAR4 {build PUSHAR4;} PUSHAR5: is ctx_mreg=0x5 & PUSHAR4 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R5 & 0xFFFFF; build PUSHAR4; } PUSHAR6: is ctx_count=0 {} PUSHAR6: is PUSHAR5 {build PUSHAR5;} PUSHAR6: is ctx_mreg=0x6 & PUSHAR5 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R6 & 0xFFFFF; build PUSHAR5; } PUSHAR7: is ctx_count=0 {} PUSHAR7: is PUSHAR6 {build PUSHAR6;} PUSHAR7: is ctx_mreg=0x7 & PUSHAR6 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R7 & 0xFFFFF; build PUSHAR6; } PUSHAR8: is ctx_count=0 {} PUSHAR8: is PUSHAR7 {build PUSHAR7;} PUSHAR8: is ctx_mreg=0x8 & PUSHAR7 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R8 & 0xFFFFF; build PUSHAR7; } PUSHAR9: is ctx_count=0 {} PUSHAR9: is PUSHAR8 {build PUSHAR8;} PUSHAR9: is ctx_mreg=0x9 & PUSHAR8 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R9 & 0xFFFFF; build PUSHAR8; } PUSHAR10: is ctx_count=0 {} PUSHAR10: is PUSHAR9 {build PUSHAR9;} PUSHAR10: is ctx_mreg=0xA & PUSHAR9 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R10 & 0xFFFFF; build PUSHAR9; } PUSHAR11: is ctx_count=0 {} PUSHAR11: is PUSHAR10 {build PUSHAR10;} PUSHAR11: is ctx_mreg=0xB & PUSHAR10 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R11 & 0xFFFFF; build PUSHAR10; } PUSHAR12: is ctx_count=0 {} PUSHAR12: is PUSHAR11 {build PUSHAR11;} PUSHAR12: is ctx_mreg=0xC & PUSHAR11 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R12 & 0xFFFFF; build PUSHAR11; } PUSHAR13: is ctx_count=0 {} PUSHAR13: is PUSHAR12 {build PUSHAR12;} PUSHAR13: is ctx_mreg=0xD & PUSHAR12 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R13 & 0xFFFFF; build PUSHAR12; } PUSHAR14: is ctx_count=0 {} PUSHAR14: is PUSHAR13 {build PUSHAR13;} PUSHAR14: is ctx_mreg=0xE & PUSHAR13 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R14 & 0xFFFFF; build PUSHAR13; } PUSHAR15: is PUSHAR14 {build PUSHAR14;} PUSHAR15: is ctx_mreg=0xF & PUSHAR14 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 4; *[RAM]:4 SP = R15 & 0xFFFFF; build PUSHAR14; } PUSHWR0: is ctx_count=0 {} PUSHWR0: is ctx_mreg=0x0 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = inst_start & 0xFFFF; } PUSHWR1: is ctx_count=0 {} PUSHWR1: is PUSHWR0 {build PUSHWR0;} PUSHWR1: is ctx_mreg=0x1 & PUSHWR0 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = SP:2; build PUSHWR0; } PUSHWR2: is ctx_count=0 {} PUSHWR2: is PUSHWR1 {build PUSHWR1;} PUSHWR2: is ctx_mreg=0x2 & PUSHWR1 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = SR:2; build PUSHWR1; } PUSHWR3: is ctx_count=0 {} PUSHWR3: is PUSHWR2 {build PUSHWR2;} PUSHWR3: is ctx_mreg=0x3 & PUSHWR2 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R3:2; build PUSHWR2; } PUSHWR4: is ctx_count=0 {} PUSHWR4: is PUSHWR3 {build PUSHWR3;} PUSHWR4: is ctx_mreg=0x4 & PUSHWR3 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R4:2; build PUSHWR3; } PUSHWR5: is ctx_count=0 {} PUSHWR5: is PUSHWR4 {build PUSHWR4;} PUSHWR5: is ctx_mreg=0x5 & PUSHWR4 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R5:2; build PUSHWR4; } PUSHWR6: is ctx_count=0 {} PUSHWR6: is PUSHWR5 {build PUSHWR5;} PUSHWR6: is ctx_mreg=0x6 & PUSHWR5 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R6:2; build PUSHWR5; } PUSHWR7: is ctx_count=0 {} PUSHWR7: is PUSHWR6 {build PUSHWR6;} PUSHWR7: is ctx_mreg=0x7 & PUSHWR6 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R7:2; build PUSHWR6; } PUSHWR8: is ctx_count=0 {} PUSHWR8: is PUSHWR7 {build PUSHWR7;} PUSHWR8: is ctx_mreg=0x8 & PUSHWR7 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R8:2; build PUSHWR7; } PUSHWR9: is ctx_count=0 {} PUSHWR9: is PUSHWR8 {build PUSHWR8;} PUSHWR9: is ctx_mreg=0x9 & PUSHWR8 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R9:2; build PUSHWR8; } PUSHWR10: is ctx_count=0 {} PUSHWR10: is PUSHWR9 {build PUSHWR9;} PUSHWR10: is ctx_mreg=0xA & PUSHWR9 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R10:2; build PUSHWR9; } PUSHWR11: is ctx_count=0 {} PUSHWR11: is PUSHWR10 {build PUSHWR10;} PUSHWR11: is ctx_mreg=0xB & PUSHWR10 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R11:2; build PUSHWR10; } PUSHWR12: is ctx_count=0 {} PUSHWR12: is PUSHWR11 {build PUSHWR11;} PUSHWR12: is ctx_mreg=0xC & PUSHWR11 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R12:2; build PUSHWR11; } PUSHWR13: is ctx_count=0 {} PUSHWR13: is PUSHWR12 {build PUSHWR12;} PUSHWR13: is ctx_mreg=0xD & PUSHWR12 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R13:2; build PUSHWR12; } PUSHWR14: is ctx_count=0 {} PUSHWR14: is PUSHWR13 {build PUSHWR13;} PUSHWR14: is ctx_mreg=0xE & PUSHWR13 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R14:2; build PUSHWR13; } PUSHWR15: is PUSHWR14 {build PUSHWR14;} PUSHWR15: is ctx_mreg=0xF & PUSHWR14 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg-1;] { SP = SP - 2; *[RAM]:2 SP = R15:2; build PUSHWR14; } POPAR15: is ctx_count=0 {} POPAR15: is ctx_mreg=0xF { R15 = *[RAM]:4 SP; R15 = sext(R15[0,20]); SP = SP + 4; } POPAR14: is ctx_count=0 {} POPAR14: is POPAR15 {build POPAR15;} POPAR14: is ctx_mreg=0xE & POPAR15 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R14 = *[RAM]:4 SP; R14 = sext(R14[0,20]); SP = SP + 4; build POPAR15; } POPAR13: is ctx_count=0 {} POPAR13: is POPAR14 {build POPAR14;} POPAR13: is ctx_mreg=0xD & POPAR14 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R13 = *[RAM]:4 SP; R13 = sext(R13[0,20]); SP = SP + 4; build POPAR14; } POPAR12: is ctx_count=0 {} POPAR12: is POPAR13 {build POPAR13;} POPAR12: is ctx_mreg=0xC & POPAR13 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R12 = *[RAM]:4 SP; R12 = sext(R12[0,20]); SP = SP + 4; build POPAR13; } POPAR11: is ctx_count=0 {} POPAR11: is POPAR12 {build POPAR12;} POPAR11: is ctx_mreg=0xB & POPAR12 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R11 = *[RAM]:4 SP; R11 = sext(R11[0,20]); SP = SP + 4; build POPAR12; } POPAR10: is ctx_count=0 {} POPAR10: is POPAR11 {build POPAR11;} POPAR10: is ctx_mreg=0xA & POPAR11 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R10 = *[RAM]:4 SP; R10 = sext(R10[0,20]); SP = SP + 4; build POPAR11; } POPAR9: is ctx_count=0 {} POPAR9: is POPAR10 {build POPAR10;} POPAR9: is ctx_mreg=0x9 & POPAR10 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R9 = *[RAM]:4 SP; R9 = sext(R9[0,20]); SP = SP + 4; build POPAR10; } POPAR8: is ctx_count=0 {} POPAR8: is POPAR9 {build POPAR9;} POPAR8: is ctx_mreg=0x8 & POPAR9 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R8 = *[RAM]:4 SP; R8 = sext(R8[0,20]); SP = SP + 4; build POPAR9; } POPAR7: is ctx_count=0 {} POPAR7: is POPAR8 {build POPAR8;} POPAR7: is ctx_mreg=0x7 & POPAR8 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R7 = *[RAM]:4 SP; R7 = sext(R7[0,20]); SP = SP + 4; build POPAR8; } POPAR6: is ctx_count=0 {} POPAR6: is POPAR7 {build POPAR7;} POPAR6: is ctx_mreg=0x6 & POPAR7 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R6 = *[RAM]:4 SP; R6 = sext(R6[0,20]); SP = SP + 4; build POPAR7; } POPAR5: is ctx_count=0 {} POPAR5: is POPAR6 {build POPAR6;} POPAR5: is ctx_mreg=0x5 & POPAR6 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R5 = *[RAM]:4 SP; R5 = sext(R5[0,20]); SP = SP + 4; build POPAR6; } POPAR4: is ctx_count=0 {} POPAR4: is POPAR5 {build POPAR5;} POPAR4: is ctx_mreg=0x4 & POPAR5 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R4 = *[RAM]:4 SP; R4 = sext(R4[0,20]); SP = SP + 4; build POPAR5; } POPAR3: is ctx_count=0 {} POPAR3: is POPAR4 {build POPAR4;} POPAR3: is ctx_mreg=0x3 & POPAR4 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R3 = *[RAM]:4 SP; R3 = sext(R3[0,20]); SP = SP + 4; build POPAR4; } POPAR2: is ctx_count=0 {} POPAR2: is POPAR3 {build POPAR3;} POPAR2: is ctx_mreg=0x2 & POPAR3 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { SR = *[RAM]:4 SP; SP = SP + 4; build POPAR3; } POPAR1: is ctx_count=0 {} POPAR1: is POPAR2 {build POPAR2;} POPAR1: is ctx_mreg=0x1 & POPAR2 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { SP = *[RAM]:4 SP; SP = sext(SP[0,20]); SP = SP + 4; build POPAR2; } POPAR0: is POPAR1 {build POPAR1;} POPAR0: is ctx_mreg=0x0 & POPAR1 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { PC = *[RAM]:4 SP; PC = sext(PC[0,20]); SP = SP + 4; build POPAR1; goto [PC]; } POPWR15: is ctx_count=0 {} POPWR15: is ctx_mreg=0xF { R15 = zext(*[RAM]:2 SP); SP = SP + 2; } POPWR14: is ctx_count=0 {} POPWR14: is POPWR15 {build POPWR15;} POPWR14: is ctx_mreg=0xE & POPWR15 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R14 = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR15; } POPWR13: is ctx_count=0 {} POPWR13: is POPWR14 {build POPWR14;} POPWR13: is ctx_mreg=0xD & POPWR14 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R13 = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR14; } POPWR12: is ctx_count=0 {} POPWR12: is POPWR13 {build POPWR13;} POPWR12: is ctx_mreg=0xC & POPWR13 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R12 = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR13; } POPWR11: is ctx_count=0 {} POPWR11: is POPWR12 {build POPWR12;} POPWR11: is ctx_mreg=0xB & POPWR12 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R11 = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR12; } POPWR10: is ctx_count=0 {} POPWR10: is POPWR11 {build POPWR11;} POPWR10: is ctx_mreg=0xA & POPWR11 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R10 = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR11; } POPWR9: is ctx_count=0 {} POPWR9: is POPWR10 {build POPWR10;} POPWR9: is ctx_mreg=0x9 & POPWR10 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R9 = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR10; } POPWR8: is ctx_count=0 {} POPWR8: is POPWR9 {build POPWR9;} POPWR8: is ctx_mreg=0x8 & POPWR9 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R8 = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR9; } POPWR7: is ctx_count=0 {} POPWR7: is POPWR8 {build POPWR8;} POPWR7: is ctx_mreg=0x7 & POPWR8 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R7 = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR8; } POPWR6: is ctx_count=0 {} POPWR6: is POPWR7 {build POPWR7;} POPWR6: is ctx_mreg=0x6 & POPWR7 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R6 = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR7; } POPWR5: is ctx_count=0 {} POPWR5: is POPWR6 {build POPWR6;} POPWR5: is ctx_mreg=0x5 & POPWR6 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R5 = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR6; } POPWR4: is ctx_count=0 {} POPWR4: is POPWR5 {build POPWR5;} POPWR4: is ctx_mreg=0x4 & POPWR5 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R4 = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR5; } POPWR3: is ctx_count=0 {} POPWR3: is POPWR4 {build POPWR4;} POPWR3: is ctx_mreg=0x3 & POPWR4 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { R3 = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR4; } POPWR2: is ctx_count=0 {} POPWR2: is POPWR3 {build POPWR3;} POPWR2: is ctx_mreg=0x2 & POPWR3 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { SR = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR3; } POPWR1: is ctx_count=0 {} POPWR1: is POPWR2 {build POPWR2;} POPWR1: is ctx_mreg=0x1 & POPWR2 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { SP = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR2; } POPWR0: is POPWR1 {build POPWR1;} POPWR0: is ctx_mreg=0x0 & POPWR1 [ctx_count=ctx_count-1; ctx_mreg=ctx_mreg+1;] { PC = zext(*[RAM]:2 SP); SP = SP + 2; build POPWR1; goto [PC]; } :ADDA src_8_4, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xE & src_8_4 & dest_0_4 { tmpd:$(REG_SIZE) = dest_0_4; tmp:$(REG_SIZE) = src_8_4 + dest_0_4; dest_0_4 = sext(tmp[0,20]); setaddflags(dest_0_4,src_8_4,tmpd); } :ADDA src_8_4, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xE & src_8_4 & dest_0_4 & dest_0_4=0x0 { tmpd:$(REG_SIZE) = inst_start + 2; tmp:$(REG_SIZE) = src_8_4 + tmpd; PC = sext(tmp[0,20]); setaddflags(dest_0_4,src_8_4,tmpd); goto [PC]; } :ADDA src_8_4, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xE & src_8_4 & src_8_4=0x0 & dest_0_4 { tmpd:$(REG_SIZE) = dest_0_4; tmps:$(REG_SIZE) = inst_start + 2; tmp:$(REG_SIZE) = tmps + dest_0_4; dest_0_4 = sext(tmp[0,20]); setaddflags(dest_0_4,tmps,tmpd); } :ADDA "#"^Abs20s, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xA & imm_8_4 & dest_0_4 ; Abs20s [ctx_ctregdest=imm_8_4;] { tmpd:$(REG_SIZE) = dest_0_4; tmps:$(REG_SIZE) = sext(Abs20s); tmp:$(REG_SIZE) = tmpd + tmps; dest_0_4 = sext(tmp[0,20]); setaddflags(dest_0_4,tmps,tmpd); } :ADDA "#"^Abs20s, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xA & imm_8_4 & dest_0_4 & dest_0_4=0x0; Abs20s [ctx_ctregdest=imm_8_4;] { tmpd:$(REG_SIZE) = inst_start + 2; tmps:$(REG_SIZE) = sext(Abs20s); tmp:$(REG_SIZE) = tmpd + tmps; PC = sext(tmp[0,20]); setaddflags(PC,tmps,tmpd); goto [PC]; } :CMPA src_8_4, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xD & src_8_4 & dest_0_4 { tmp:$(REG_SIZE) = dest_0_4 - src_8_4; tmpd:$(REG_SIZE) = sext(tmp[0,20]); setsubflags(tmpd,src_8_4,dest_0_4); } :CMPA "#"^Abs20s, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0x9 & imm_8_4 & dest_0_4 ; Abs20s [ctx_ctregdest=imm_8_4;] { tmps:$(REG_SIZE) = sext(Abs20s); tmp:$(REG_SIZE) = dest_0_4 - tmps; tmpd:$(REG_SIZE) = sext(tmp[0,20]); setsubflags(tmpd,tmps,dest_0_4); } :MOVA "@"^src_8_4, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0x0 & src_8_4 & dest_0_4 { dest_0_4 = *[RAM]:$(REG_SIZE) src_8_4; dest_0_4 = sext(dest_0_4[0,20]); } :MOVA "@"^src_8_4^"+", dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0x1 & src_8_4 & dest_0_4 { dest_0_4 = *[RAM]:$(REG_SIZE) src_8_4; dest_0_4 = sext(dest_0_4[0,20]); src_8_4 = src_8_4 + 4; } :MOVA "&"^Abs20, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0x2 & imm_8_4 & dest_0_4 ; Abs20 [ctx_ctregdest=imm_8_4;] { tmp:$(REG_SIZE) = zext(Abs20); dest_0_4 = *[RAM]:$(REG_SIZE) tmp; dest_0_4 = sext(dest_0_4[0,20]); } :MOVA imms_0_16^"("^src_8_4^")", dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0x3 & src_8_4 & dest_0_4 ; imms_0_16 { tmp:$(REG_SIZE) = src_8_4 + sext(imms_0_16:2); dest_0_4 = *[RAM]:$(REG_SIZE) tmp; dest_0_4 = sext(dest_0_4[0,20]); } :MOVA src_8_4, "&"^Abs20 is ctx_haveext=0 & op16_12_4=0 & insid=0x6 & imm_0_4 & src_8_4 ; Abs20 [ctx_ctregdest=imm_0_4;] { tmp:$(REG_SIZE) = zext(Abs20); *[RAM]:$(REG_SIZE) tmp = src_8_4 & 0xFFFFF; } :MOVA src_8_4, imms_0_16^"("^dest_0_4^")" is ctx_haveext=0 & op16_12_4=0 & insid=0x7 & src_8_4 & dest_0_4 ; imms_0_16 { tmp:$(REG_SIZE) = dest_0_4 + sext(imms_0_16:2); *[RAM]:$(REG_SIZE) tmp = src_8_4 & 0xFFFFF; } :MOVA "#"^Abs20s, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0x8 & imm_8_4 & dest_0_4 ; Abs20s [ctx_ctregdest=imm_8_4;] { dest_0_4 = sext(Abs20s); } :MOVA src_8_4, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xC & src_8_4 & dest_0_4 { dest_0_4 = src_8_4; } :SUBA src_8_4, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xF & src_8_4 & dest_0_4 { tmpd:$(REG_SIZE) = dest_0_4; tmp:$(REG_SIZE) = dest_0_4 - src_8_4; dest_0_4 = sext(tmp[0,20]); setsubflags(dest_0_4,src_8_4,tmpd); } :SUBA src_8_4, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xF & src_8_4 & dest_0_4 & dest_0_4=0x0 { tmpd:$(REG_SIZE) = inst_start + 2; tmp:$(REG_SIZE) = tmpd - src_8_4; PC = sext(tmp[0,20]); setsubflags(dest_0_4,src_8_4,tmpd); goto [PC]; } :SUBA src_8_4, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xF & src_8_4 & src_8_4=0x0 & dest_0_4 { tmpd:$(REG_SIZE) = dest_0_4; tmps:$(REG_SIZE) = inst_start + 2; tmp:$(REG_SIZE) = dest_0_4 - tmps; dest_0_4 = sext(tmp[0,20]); setsubflags(dest_0_4,tmps,tmpd); } :SUBA "#"^Abs20s, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xB & imm_8_4 & dest_0_4 ; Abs20s [ctx_ctregdest=imm_8_4;] { tmpd:$(REG_SIZE) = dest_0_4; tmps:$(REG_SIZE) = sext(Abs20s); tmp:$(REG_SIZE) = dest_0_4 - tmps; dest_0_4 = sext(tmp[0,20]); setsubflags(dest_0_4,tmps,tmpd); } :SUBA "#"^Abs20s, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xB & imm_8_4 & dest_0_4 & dest_0_4=0x0; Abs20s [ctx_ctregdest=imm_8_4;] { tmpd:$(REG_SIZE) = inst_start + 2; tmps:$(REG_SIZE) = sext(Abs20s); tmp:$(REG_SIZE) = tmpd - tmps; PC = sext(tmp[0,20]); setsubflags(PC,tmps,tmpd); goto [PC]; } ################## # # Special cases of renamed MOVA where PC is involved # This first case doesn't make any sense, but I saw it in the cunits. The instruction word is all 0. # The toolchain comes back with a 'beq', but that doesn't make sense either as there is no beq instruction # anywhere in the manual. One toolchain allows BRA @PC, another one doesn't. I did find reference to gvv # assembler extension regarding @rN being treated as 0(rN) and vice versa. That would effectively turn # this into a branch to following instruction. What I think may be happening is a compiler bug where in # some cases an immediate gets output even though the constant generator is used. :BRA "@"^src_8_4 is ctx_haveext=0 & op16_12_4=0 & insid=0x0 & src_8_4 & src_8_4=0 & dest_0_4=0x0 { } :BRA "@"^src_8_4 is ctx_haveext=0 & op16_12_4=0 & insid=0x0 & src_8_4 & dest_0_4=0x0 { PC = *[RAM]:$(REG_SIZE) src_8_4; PC = sext(PC[0,20]); goto [PC]; } :BRA "@"^src_8_4^"+" is ctx_haveext=0 & op16_12_4=0 & insid=0x1 & src_8_4 & dest_0_4=0x0 { PC = *[RAM]:$(REG_SIZE) src_8_4; PC = sext(PC[0,20]); src_8_4 = src_8_4 + 4; goto [PC]; } :BRA "&"^Abs20 is ctx_haveext=0 & op16_12_4=0 & insid=0x2 & imm_8_4 & dest_0_4=0x0; Abs20 [ctx_ctregdest=imm_8_4;] { tmp:$(REG_SIZE) = zext(Abs20); PC = *[RAM]:$(REG_SIZE) tmp; PC = sext(PC[0,20]); goto [PC]; } :BRA imms_0_16^"("^src_8_4^")" is ctx_haveext=0 & op16_12_4=0 & insid=0x3 & src_8_4 & dest_0_4=0x0; imms_0_16 { tmp:$(REG_SIZE) = src_8_4 + sext(imms_0_16:2); PC = *[RAM]:$(REG_SIZE) tmp; PC = sext(PC[0,20]); goto [PC]; } :BRA "#"^Abs20add is ctx_haveext=0 & op16_12_4=0 & insid=0x8 & imm_8_4 & dest_0_4=0x0; Abs20add [ctx_ctregdest=imm_8_4;] { # PC = Abs20add; goto Abs20add; } :BRA src_8_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xC & src_8_4 & dest_0_4=0x0 { PC = src_8_4; goto [PC]; } :RETA "@"^src_8_4^"+" is ctx_haveext=0 & op16_12_4=0 & insid=0x1 & src_8_4 & src_8_4=0x1 & dest_0_4=0x0 { PC = *[RAM]:$(REG_SIZE) src_8_4; PC = sext(PC[0,20]); SP = SP + 4; return [PC]; } # ################ # # Special cases of SUBA/ADDA/CMPA/MOVA :DECDA dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xB & imm_8_4=0x0 & dest_0_4 ; imm_0_16=0x0002 { tmpd:$(REG_SIZE) = dest_0_4; tmps:$(REG_SIZE) = 2; dest_0_4 = dest_0_4 - 2; dest_0_4 = sext(dest_0_4[0,20]); setsubflags(dest_0_4,tmps,tmpd); } :INCDA dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0xA & imm_8_4=0x0 & dest_0_4 ; imm_0_16=0x0002 { tmpd:$(REG_SIZE) = dest_0_4; tmps:$(REG_SIZE) = 2; dest_0_4 = dest_0_4 + 2; dest_0_4 = sext(dest_0_4[0,20]); setaddflags(dest_0_4,tmps,tmpd); } :TSTA dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0x9 & imm_8_4=0x0 & dest_0_4 ; imm_0_16=0x0000 { $(CARRY) = 1; $(OVERFLOW) = 0; $(SIGN) = (dest_0_4 s< 0); $(ZERO) = (dest_0_4 == 0); } :CLRA dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insid=0x8 & imm_8_4=0 & dest_0_4 ; imm_0_16=0 { dest_0_4 = 0; } # ################ # # Other 20 bit address instructions :CALLA dest_0_4 is ctx_haveext=0 & op16_8_8=0x13 & op16_4_4=0x4 & dest_0_4 { SP = SP - 0x4; *:4 SP = inst_next; PC = dest_0_4; call [PC]; } :CALLA imms_0_16^"("^dest_0_4^")" is ctx_haveext=0 & op16_8_8=0x13 & op16_4_4=0x5 & dest_0_4 ; imms_0_16 { SP = SP - 0x4; *:4 SP = inst_next; tmp:$(REG_SIZE) = dest_0_4 + sext(imms_0_16:2); PC = *[RAM]:$(REG_SIZE) tmp; PC = sext(PC[0,20]); call [PC]; } :CALLA "@"^dest_0_4 is ctx_haveext=0 & op16_8_8=0x13 & op16_4_4=0x6 & dest_0_4 { SP = SP - 0x4; *:4 SP = inst_next; PC = *[RAM]:$(REG_SIZE) dest_0_4; PC = sext(PC[0,20]); call [PC]; } :CALLA "@"^dest_0_4^"+" is ctx_haveext=0 & op16_8_8=0x13 & op16_4_4=0x7 & dest_0_4 { SP = SP - 0x4; *:4 SP = inst_next; PC = *[RAM]:$(REG_SIZE) dest_0_4; PC = sext(PC[0,20]); dest_0_4 = dest_0_4 + 4; call [PC]; } :CALLA "&"^Abs20 is ctx_haveext=0 & op16_8_8=0x13 & op16_4_4=0x8 & imm_0_4; Abs20 [ctx_ctregdest=imm_0_4;] { SP = SP - 0x4; *:4 SP = inst_next; tmp:$(REG_SIZE) = zext(Abs20); PC = *[RAM]:$(REG_SIZE) tmp; PC = sext(PC[0,20]); call [PC]; } :CALLA imms_0_16^"(PC)" is ctx_haveext=0 & op16_8_8=0x13 & op16_4_4=0x9 ; imms_0_16 { SP = SP - 0x4; *:4 SP = inst_next; tmp:$(REG_SIZE) = inst_start + sext(imms_0_16:2); PC = *[RAM]:$(REG_SIZE) tmp; PC = sext(PC[0,20]); call [PC]; } :CALLA "#"^Abs20add is ctx_haveext=0 & op16_8_8=0x13 & op16_4_4=0xB & imm_0_4; Abs20add [ctx_ctregdest = imm_0_4;] { SP = SP - 0x4; *:4 SP = inst_next; PC = &Abs20add; call Abs20add; } :PUSHM.A IMM4,dest_0_4 is ctx_haveext=0 & op16_8_8=0x14 & IMM4 & dest_0_4 & imm_4_4 & PUSHAR15 [ctx_count=imm_4_4+1; ctx_mreg=dest_0_4;] { build IMM4; build PUSHAR15; } :PUSHM.W IMM4,dest_0_4 is ctx_haveext=0 & op16_8_8=0x15 & IMM4 & dest_0_4 & imm_4_4 & PUSHWR15 [ctx_count=imm_4_4+1; ctx_mreg=dest_0_4;] { build IMM4; build PUSHWR15; } :POPM.A IMM4,ctx_popreg is ctx_haveext=0 & op16_8_8=0x16 & IMM4 & ctx_popreg & imm_0_4 & imm_4_4 & POPAR0 [ctx_popreg_set=imm_0_4+imm_4_4; ctx_count=imm_4_4+1; ctx_mreg=imm_0_4;] { build IMM4; build POPAR0; } :POPM.W IMM4,ctx_popreg is ctx_haveext=0 & op16_8_8=0x17 & IMM4 & ctx_popreg & imm_0_4 & imm_4_4 & POPWR0 [ctx_popreg_set=imm_0_4+imm_4_4; ctx_count=imm_4_4+1; ctx_mreg=imm_0_4;] { build IMM4; build POPWR0; } :RRCM.A NUM2, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insidbig=0x04 & NUM2 & dest_0_4 & rrn { tmph:$(REG_SIZE) = (dest_0_4 >> rrn) & 0x1; tmpc:$(REG_SIZE) = zext($(CARRY)); tmpc = tmpc << (20-NUM2); dest_0_4 = (dest_0_4 >> NUM2) | (dest_0_4 << (20-rrn)); dest_0_4 = ((dest_0_4 & (~tmpc)) | tmpc) & 0xFFFFF; $(CARRY) = (tmph != 0); $(OVERFLOW) = 0; $(SIGN) = (dest_0_4[19,1] != 0); $(ZERO) = (dest_0_4 == 0); } :RRAM.A NUM2, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insidbig=0x14 & NUM2 & dest_0_4 & rrn { tmph:$(REG_SIZE) = (dest_0_4 >> rrn) & 0x1; dest_0_4 = (dest_0_4 s>> NUM2); $(CARRY) = (tmph != 0); $(OVERFLOW) = 0; $(SIGN) = (dest_0_4[19,1] != 0); $(ZERO) = (dest_0_4 == 0); } :RLAM.A NUM2, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insidbig=0x24 & NUM2 & dest_0_4 { tmph:$(REG_SIZE) = (dest_0_4 >> (20 - NUM2)) & 0x1; dest_0_4 = (dest_0_4 << NUM2); dest_0_4 = sext(dest_0_4[0,20]); $(CARRY) = (tmph != 0); $(SIGN) = (dest_0_4[19,1] != 0); $(ZERO) = (dest_0_4 == 0); } :RRUM.A NUM2, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insidbig=0x34 & NUM2 & dest_0_4 & rrn { tmph:$(REG_SIZE) = (dest_0_4 >> rrn) & 0x1; dest_0_4 = (dest_0_4 >> NUM2) & 0xFFFFF; $(CARRY) = (tmph != 0); $(OVERFLOW) = 0; $(SIGN) = 0; $(ZERO) = (dest_0_4 == 0); } :RRCM.W NUM2, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insidbig=0x05 & NUM2 & dest_0_4 & rrn { tmpr:2 = dest_0_4:2; tmph:2 = (tmpr >> rrn) & 0x1; tmpc:2 = zext($(CARRY)); tmpc = tmpc << (16-NUM2); tmpr = (tmpr >> NUM2) | (tmpr << (16-rrn)); dest_0_4 = zext((tmpr & (~tmpc)) | tmpc); $(CARRY) = (tmph != 0); $(OVERFLOW) = 0; $(SIGN) = (dest_0_4[15,1] != 0); $(ZERO) = (dest_0_4 == 0); } :RRAM.W NUM2, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insidbig=0x15 & NUM2 & dest_0_4 & rrn { tmpr:2 = dest_0_4:2; tmph:$(REG_SIZE) = (dest_0_4 >> rrn) & 0x1; dest_0_4 = zext(tmpr s>> NUM2); $(CARRY) = (tmph != 0); $(OVERFLOW) = 0; $(SIGN) = (dest_0_4[19,1] != 0); $(ZERO) = (dest_0_4 == 0); } :RLAM.W NUM2, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insidbig=0x25 & NUM2 & dest_0_4 { tmph:$(REG_SIZE) = (dest_0_4 >> (16 - NUM2)) & 0x1; dest_0_4 = (dest_0_4 << NUM2); dest_0_4 = zext(dest_0_4:2); $(CARRY) = (tmph != 0); $(SIGN) = (dest_0_4[19,1] != 0); $(ZERO) = (dest_0_4 == 0); } :RRUM.W NUM2, dest_0_4 is ctx_haveext=0 & op16_12_4=0 & insidbig=0x35 & NUM2 & dest_0_4 & rrn { tmpr:2 = dest_0_4:2; tmph:2 = (tmpr >> rrn) & 0x1; dest_0_4 = zext(tmpr >> NUM2); $(CARRY) = (tmph != 0); $(OVERFLOW) = 0; $(SIGN) = 0; $(ZERO) = (dest_0_4 == 0); } macro bzero(full, byte) { ztmp:1 = byte; full = 0; byte = ztmp; } macro wzero(full, word) { ztmp:2 = word; full = 0; word = ztmp; } ############################## # # Extention word instructions # # The base msp430 handles all the addressing modes in subtables. # The reg/reg mode for the 'X' instruction has repetition so we # break that mode out separately. Because of that, we need to use # separate subtables for the address modes since the base ones # will hit on the reg/reg mode. # # There are also two groups for the extended instructions: # double and single operand. The double operand ones come # first. A lot of the singles are covered under the address # extensions as they don't have the extension word. # # The manual talks about RRUX extended instructions. However, # I've determined they don't really exist. First off, the base # RRU is not mentioned in the manual and is not in the toolchain. # The toolchain does take rrux instructions, but what I've figured # out is that for the W and A versions, it substitutes RRUM with # the 'n' argument being 1. For the B version, it uses rra.b # followed by a bic.b instruction. ############################# # # Double Operand # ############################# # Repeat enabled :ADCX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0x6 & src16_8_4=0x3 & as=0x0 & bow=1 & ctx_al=1 & postIncrementStore & DST8_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; # Operation Flags... tmp_carry:1 = carry(DST8_0_4,$(CARRY)); #C Flag $(OVERFLOW) = scarry(DST8_0_4,$(CARRY)); #V Flag # Operation... DST8_0_4 = DST8_0_4 + $(CARRY); bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :ADCX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0x6 & src16_8_4=0x3 & as=0x0 & bow=0 & ctx_al=1 & postIncrementStore & DST16_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; # Operation Flags... tmp_carry:1 = carry(DST16_0_4,zext($(CARRY))); #C Flag $(OVERFLOW) = scarry(DST16_0_4,zext($(CARRY))); #V Flag # Operation... DST16_0_4 = DST16_0_4 + zext($(CARRY)); wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :ADCX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0x6 & src16_8_4=0x3 & as=0x0 & bow=1 & ctx_al=0 & postIncrementStore & dest_0_4 & repeat_carry { build repeat_carry; tmpd:$(REG_SIZE) = dest_0_4; tmpc:$(REG_SIZE) = zext($(CARRY)); tmp:$(REG_SIZE) = tmpc + dest_0_4; dest_0_4 = sext(tmp[0,20]); setaddflags(dest_0_4,tmpc,tmpd); build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :ADDX.B XRSRC_B_AS, DST8_0_4 is ctx_haveext=4 & op16_12_4=0x5 & bow=1 & ctx_al=1 & postIncrementStore & XRSRC_B_AS & DST8_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = carry(XRSRC_B_AS, DST8_0_4); # C Flag $(OVERFLOW) = scarry(XRSRC_B_AS, DST8_0_4); # V Flag # Operation... DST8_0_4 = XRSRC_B_AS + DST8_0_4; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :ADDX.W XRSRC_W_AS, DST16_0_4 is ctx_haveext=4 & op16_12_4=0x5 & bow=0 & ctx_al=1 & postIncrementStore & XRSRC_W_AS & DST16_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = carry(XRSRC_W_AS, DST16_0_4); # C Flag $(OVERFLOW) = scarry(XRSRC_W_AS, DST16_0_4); # V Flag # Operation... DST16_0_4 = XRSRC_W_AS + DST16_0_4; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :ADDX.A XRSRC_A_AS, dest_0_4 is ctx_haveext=4 & op16_12_4=0x5 & bow=1 & ctx_al=0 & postIncrementStore & XRSRC_A_AS & dest_0_4 { tmpd:$(REG_SIZE) = dest_0_4; tmp:$(REG_SIZE) = XRSRC_A_AS + dest_0_4; dest_0_4 = sext(tmp[0,20]); setaddflags(dest_0_4,XRSRC_A_AS,tmpd); build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :ADDCX.B XRSRC_B_AS, DST8_0_4 is ctx_haveext=4 & op16_12_4=0x6 & bow=1 & ctx_al=1 & postIncrementStore & XRSRC_B_AS & DST8_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; # Operation Flags... tmp_carry:1 = (carry(XRSRC_B_AS, $(CARRY)) || carry(DST8_0_4,XRSRC_B_AS + $(CARRY))); #C Flag $(OVERFLOW) = (scarry(XRSRC_B_AS, $(CARRY)) || scarry(DST8_0_4,XRSRC_B_AS + $(CARRY))); #V Flag # Operation... DST8_0_4 = XRSRC_B_AS + DST8_0_4 + $(CARRY); bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :ADDCX.W XRSRC_W_AS, DST16_0_4 is ctx_haveext=4 & op16_12_4=0x6 & bow=0 & ctx_al=1 & postIncrementStore & XRSRC_W_AS & DST16_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; # Operation Flags... tmp_carry:1 = (carry(XRSRC_W_AS,zext($(CARRY))) || carry(DST16_0_4,XRSRC_W_AS + zext($(CARRY)))); #C Flag $(OVERFLOW) = (scarry(XRSRC_W_AS,zext($(CARRY))) || scarry(DST16_0_4,XRSRC_W_AS + zext($(CARRY)))); #V Flag # Operation... DST16_0_4 = XRSRC_W_AS + DST16_0_4 + zext($(CARRY)); wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :ADDCX.A XRSRC_A_AS, dest_0_4 is ctx_haveext=4 & op16_12_4=0x6 & bow=1 & ctx_al=0 & postIncrementStore & XRSRC_A_AS & dest_0_4 & repeat_carry { build repeat_carry; tmpd:$(REG_SIZE) = dest_0_4; tmps:$(REG_SIZE) = XRSRC_A_AS + zext($(CARRY)); tmp:$(REG_SIZE) = tmps + dest_0_4; dest_0_4 = sext(tmp[0,20]); setaddflags(dest_0_4,tmps,tmpd); build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :ANDX.B XRSRC_B_AS, DST8_0_4 is ctx_haveext=4 & op16_12_4=0xF & bow=1 & ctx_al=1 & postIncrementStore & XRSRC_B_AS & DST8_0_4 & reg_Direct16_0_4 { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Operation... DST8_0_4 = DST8_0_4 & XRSRC_B_AS; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag $(CARRY) = (DST8_0_4 != 0x0); # C Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :ANDX.W XRSRC_W_AS, DST16_0_4 is ctx_haveext=4 & op16_12_4=0xF & bow=0 & ctx_al=1 & postIncrementStore & XRSRC_W_AS & DST16_0_4 & reg_Direct16_0_4 { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Operation... DST16_0_4 = DST16_0_4 & XRSRC_W_AS; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag $(CARRY) = (DST16_0_4 != 0x0); # C Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :ANDX.A XRSRC_A_AS, dest_0_4 is ctx_haveext=4 & op16_12_4=0xF & bow=1 & ctx_al=0 & postIncrementStore & XRSRC_A_AS & dest_0_4 { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Operation... dest_0_4 = dest_0_4 & XRSRC_A_AS; dest_0_4 = sext(dest_0_4[0,20]); # Result Flags... $(SIGN) = (dest_0_4 s< 0x0); # S Flag $(ZERO) = (dest_0_4 == 0x0); # Z Flag $(CARRY) = (dest_0_4 != 0x0); # C Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :BICX.B XRSRC_B_AS, DST8_0_4 is ctx_haveext=4 & op16_12_4=0xC & bow=1 & ctx_al=1 & postIncrementStore & XRSRC_B_AS & DST8_0_4 & reg_Direct16_0_4 { DST8_0_4 = (~XRSRC_B_AS) & DST8_0_4; bzero(reg_Direct16_0_4,DST8_0_4); #Status bits are not affected build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :BICX.W XRSRC_W_AS, DST16_0_4 is ctx_haveext=4 & op16_12_4=0xC & bow=0 & ctx_al=1 & postIncrementStore & XRSRC_W_AS & DST16_0_4 & reg_Direct16_0_4 { DST16_0_4 = (~XRSRC_W_AS) & DST16_0_4; wzero(reg_Direct16_0_4,DST16_0_4); #Status bits are not affected build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :BICX.A XRSRC_A_AS, dest_0_4 is ctx_haveext=4 & op16_12_4=0xC & bow=1 & ctx_al=0 & postIncrementStore & XRSRC_A_AS & dest_0_4 { dest_0_4 = (~XRSRC_A_AS) & dest_0_4; dest_0_4 = zext(dest_0_4[0,20]); #Status bits are not affected build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :BISX.B XRSRC_B_AS, DST8_0_4 is ctx_haveext=4 & op16_12_4=0xD & bow=1 & ctx_al=1 & postIncrementStore & XRSRC_B_AS & DST8_0_4 & reg_Direct16_0_4 { DST8_0_4 = XRSRC_B_AS | DST8_0_4; bzero(reg_Direct16_0_4,DST8_0_4); #Status bits are not affected build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :BISX.W XRSRC_W_AS, DST16_0_4 is ctx_haveext=4 & op16_12_4=0xD & bow=0 & ctx_al=1 & postIncrementStore & XRSRC_W_AS & DST16_0_4 & reg_Direct16_0_4 { DST16_0_4 = XRSRC_W_AS | DST16_0_4; wzero(reg_Direct16_0_4,DST16_0_4); #Status bits are not affected if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :BISX.A XRSRC_A_AS, dest_0_4 is ctx_haveext=4 & op16_12_4=0xD & bow=1 & ctx_al=0 & postIncrementStore & XRSRC_A_AS & dest_0_4 { dest_0_4 = XRSRC_A_AS | dest_0_4; #Status bits are not affected dest_0_4 = sext(dest_0_4[0,20]); build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :BITX.B XRSRC_B_AS, DST8_0_4 is ctx_haveext=4 & op16_12_4=0xB & bow=1 & ctx_al=1 & postIncrement & XRSRC_B_AS & DST8_0_4 { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Operation... result:1 = DST8_0_4 & XRSRC_B_AS; # Result Flags... $(CARRY) = (result != 0x0); # C Flag $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :BITX.W XRSRC_W_AS, DST16_0_4 is ctx_haveext=4 & op16_12_4=0xB & bow=0 & ctx_al=1 & postIncrement & XRSRC_W_AS & DST16_0_4 { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Operation... result:2 = DST16_0_4 & XRSRC_W_AS; # Result Flags... $(CARRY) = (result != 0x0); # C Flag $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :BITX.A XRSRC_A_AS, dest_0_4 is ctx_haveext=4 & op16_12_4=0xB & bow=1 & ctx_al=0 & postIncrement & XRSRC_A_AS & dest_0_4 { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Operation... result:$(REG_SIZE) = dest_0_4 & XRSRC_A_AS; # Result Flags... result = sext(result[0,20]); $(CARRY) = (result != 0x0); # C Flag $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :CLRX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0x4 & src16_8_4=0x3 & as=0x0 & bow=1 & ctx_al=1 & postIncrementStore & DST8_0_4 & reg_Direct16_0_4 { DST8_0_4 = 0; bzero(reg_Direct16_0_4,DST8_0_4); #Status bits are not affected build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :CLRX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0x4 & src16_8_4=0x3 & as=0x0 & bow=0 & ctx_al=1 & postIncrementStore & DST16_0_4 & reg_Direct16_0_4 { DST16_0_4 = 0; wzero(reg_Direct16_0_4,DST16_0_4); #Status bits are not affected build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :CLRX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0x4 & src16_8_4=0x3 & as=0x0 & bow=1 & ctx_al=0 & postIncrementStore & dest_0_4 { dest_0_4 = 0; build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :CMPX.B XRSRC_B_AS, DST8_0_4 is ctx_haveext=4 & op16_12_4=0x9 & bow=1 & ctx_al=1 & postIncrement & XRSRC_B_AS & DST8_0_4 { # Operation Flags... $(CARRY) = (XRSRC_B_AS <= DST8_0_4); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(DST8_0_4, XRSRC_B_AS); # V Flag # Operation... result:1 = (DST8_0_4 - XRSRC_B_AS); # Result Flags... $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :CMPX.W XRSRC_W_AS, DST16_0_4 is ctx_haveext=4 & op16_12_4=0x9 & bow=0 & ctx_al=1 & postIncrement & XRSRC_W_AS & DST16_0_4 { # Operation Flags... $(CARRY) = (XRSRC_W_AS <= DST16_0_4); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(DST16_0_4, XRSRC_W_AS); # V Flag # Operation... result:2 = (DST16_0_4 - XRSRC_W_AS); # Result Flags... $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :CMPX.A XRSRC_A_AS, dest_0_4 is ctx_haveext=4 & op16_12_4=0x9 & bow=1 & ctx_al=0 & postIncrement & XRSRC_A_AS & dest_0_4 { tmp:$(REG_SIZE) = dest_0_4 - XRSRC_A_AS; tmpd:$(REG_SIZE) = sext(tmp[0,20]); setsubflags(tmpd,XRSRC_A_AS,dest_0_4); build postIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } define pcodeop bcd_add; :DADCX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0xA & src16_8_4=0x3 & as=0x0 & bow=1 & ctx_al=1 & postIncrementStore & DST8_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; local temp:4 = zext(DST8_0_4); DST8_0_4 = bcd_add(temp, $(CARRY)); $(CARRY) = temp >= 0x99; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :DADCX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0xA & src16_8_4=0x3 & as=0x0 & bow=0 & ctx_al=1 & postIncrementStore & DST16_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; local temp:4 = zext(DST16_0_4); DST16_0_4 = bcd_add(temp, $(CARRY)); $(CARRY) = temp >= 0x9999; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :DADCX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0xA & src16_8_4=0x3 & as=0x0 & bow=1 & ctx_al=0 & postIncrementStore & dest_0_4 & repeat_carry { build repeat_carry; local temp:4 = zext(dest_0_4); dest_0_4 = bcd_add(temp, $(CARRY)); $(CARRY) = temp >= 0x99999; dest_0_4 = sext(dest_0_4[0,20]); # Result Flags... $(SIGN) = (dest_0_4 s< 0x0); # S Flag $(ZERO) = (dest_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :DADDX.B XRSRC_B_AS, DST8_0_4 is ctx_haveext=4 & op16_12_4=0xA & bow=1 & ctx_al=1 & postIncrementStore & XRSRC_B_AS & DST8_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; local temp_src:4 = zext(XRSRC_B_AS); local temp_dest:4 = zext(DST8_0_4); local temp_carry:4 = zext($(CARRY)); DST8_0_4 = bcd_add(temp_src,temp_dest, temp_carry); bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag $(CARRY) = (temp_src + temp_dest + temp_carry) > 0x99; build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :DADDX.W XRSRC_W_AS, DST16_0_4 is ctx_haveext=4 & op16_12_4=0xA & bow=0 & ctx_al=1 & postIncrementStore & XRSRC_W_AS & DST16_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; local temp_src:4 = zext(XRSRC_W_AS); local temp_dest:4 = zext(DST16_0_4); local temp_carry:4 = zext($(CARRY)); DST16_0_4 = bcd_add(temp_src, temp_dest, temp_carry); wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag $(CARRY) = (temp_src + temp_dest + temp_carry) > 0x9999; build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :DADDX.A XRSRC_A_AS, dest_0_4 is ctx_haveext=4 & op16_12_4=0xA & bow=1 & ctx_al=0 & postIncrementStore & XRSRC_A_AS & dest_0_4 & repeat_carry { build repeat_carry; local temp_src:4 = zext(XRSRC_A_AS); local temp_dest:4 = zext(dest_0_4); local temp_carry:4 = zext($(CARRY)); dest_0_4 = bcd_add(temp_src, temp_dest, temp_carry); dest_0_4 = sext(dest_0_4[0,20]); # Result Flags... $(SIGN) = (dest_0_4 s< 0x0); # S Flag $(ZERO) = (dest_0_4 == 0x0); # Z Flag $(CARRY) = (temp_src + temp_dest + temp_carry) > 0x99999; build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :DECX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0x8 & src16_8_4=0x3 & as=0x1 & bow=1 & ctx_al=1 & postIncrementStore & DST8_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = (1 <= DST8_0_4); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(DST8_0_4, 1:1); # V Flag # Operation... DST8_0_4 = DST8_0_4 - 1; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :DECX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0x8 & src16_8_4=0x3 & as=0x1 & bow=0 & ctx_al=1 & postIncrementStore & DST16_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = (1 <= DST16_0_4); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(DST16_0_4, 1:2); # V Flag # Operation... DST16_0_4 = DST16_0_4 - 1; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :DECX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0x8 & src16_8_4=0x3 & as=0x1 & bow=1 & ctx_al=0 & postIncrementStore & dest_0_4 { tmpd:$(REG_SIZE) = dest_0_4; tmp:$(REG_SIZE) = dest_0_4 - 1; dest_0_4 = sext(tmp[0,20]); setsubflags(dest_0_4,1:$(REG_SIZE),tmpd); build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :DECDX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0x8 & src16_8_4=0x3 & as=0x2 & bow=1 & ctx_al=1 & postIncrementStore & DST8_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = (2 <= DST8_0_4); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(DST8_0_4, 2:1); # V Flag # Operation... DST8_0_4 = DST8_0_4 - 2; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :DECDX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0x8 & src16_8_4=0x3 & as=0x2 & bow=0 & ctx_al=1 & postIncrementStore & DST16_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = (2 <= DST16_0_4); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(DST16_0_4, 2:2); # V Flag # Operation... DST16_0_4 = DST16_0_4 - 2; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :DECDX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0x8 & src16_8_4=0x3 & as=0x2 & bow=1 & ctx_al=0 & postIncrementStore & dest_0_4 { tmpd:$(REG_SIZE) = dest_0_4; tmp:$(REG_SIZE) = dest_0_4 - 2; dest_0_4 = sext(tmp[0,20]); setsubflags(dest_0_4,2:$(REG_SIZE),tmpd); build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :INCX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0x5 & src16_8_4=0x3 & as=0x1 & bow=1 & ctx_al=1 & postIncrementStore & DST8_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = carry(DST8_0_4,1); # C Flag $(OVERFLOW) = scarry(DST8_0_4,1); # V Flag # Operation... DST8_0_4 = 1 + DST8_0_4; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :INCX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0x5 & src16_8_4=0x3 & as=0x1 & bow=0 & ctx_al=1 & postIncrementStore & DST16_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = carry(DST16_0_4,1); # C Flag $(OVERFLOW) = scarry(DST16_0_4,1); # V Flag # Operation... DST16_0_4 = 1:2 + DST16_0_4; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :INCX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0x5 & src16_8_4=0x3 & as=0x1 & bow=1 & ctx_al=0 & postIncrementStore & dest_0_4 { tmpd:$(REG_SIZE) = dest_0_4; tmp:$(REG_SIZE) = 1 + dest_0_4; dest_0_4 = sext(tmp[0,20]); setaddflags(dest_0_4,1:$(REG_SIZE),tmpd); build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :INCDX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0x5 & src16_8_4=0x3 & as=0x2 & bow=1 & ctx_al=1 & postIncrementStore & DST8_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = carry(DST8_0_4,2); # C Flag $(OVERFLOW) = scarry(DST8_0_4,2); # V Flag # Operation... DST8_0_4 = 2 + DST8_0_4; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :INCDX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0x5 & src16_8_4=0x3 & as=0x2 & bow=0 & ctx_al=1 & postIncrementStore & DST16_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = carry(DST16_0_4,2); # C Flag $(OVERFLOW) = scarry(DST16_0_4,2); # V Flag # Operation... DST16_0_4 = 2:2 + DST16_0_4; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :INCDX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0x5 & src16_8_4=0x3 & as=0x2 & bow=1 & ctx_al=0 & postIncrementStore & dest_0_4 { tmpd:$(REG_SIZE) = dest_0_4; tmp:$(REG_SIZE) = 2 + dest_0_4; dest_0_4 = sext(tmp[0,20]); setaddflags(dest_0_4,2:$(REG_SIZE),tmpd); build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :INVX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0xE & src16_8_4=0x3 & as=0x3 & bow=1 & ctx_al=1 & postIncrementStore & DST8_0_4 & reg_Direct16_0_4 { # Operation Flags... $(OVERFLOW) = DST8_0_4 s< 0x0; # V Flag # Operation... DST8_0_4 = DST8_0_4 ^ -1; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag $(CARRY) = (DST8_0_4 != 0x0); # C Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :INVX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0xE & src16_8_4=0x3 & as=0x3 & bow=0 & ctx_al=1 & postIncrementStore & DST16_0_4 & reg_Direct16_0_4 { # Operation Flags... $(OVERFLOW) = DST16_0_4 s< 0x0 ; # V Flag # Operation... DST16_0_4 = DST16_0_4 ^ -1; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag $(CARRY) = (DST16_0_4 != 0x0); # C Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :INVX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0xE & src16_8_4=0x3 & as=0x3 & bow=1 & ctx_al=0 & postIncrementStore & dest_0_4 { # Operation Flags... $(OVERFLOW) = dest_0_4 s< 0x0; # V Flag # Operation... dest_0_4 = dest_0_4 ^ -1; dest_0_4 = sext(dest_0_4[0,20]); # Result Flags... $(SIGN) = (dest_0_4 s< 0x0); # S Flag $(ZERO) = (dest_0_4 == 0x0); # Z Flag $(CARRY) = (dest_0_4 != 0x0); # C Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :MOVX.B XRSRC_B_AS, DST8_0_4 is ctx_haveext=4 & op16_12_4=0x4 & bow=1 & ctx_al=1 & postIncrementStore & XRSRC_B_AS & DST8_0_4 & reg_Direct16_0_4 { DST8_0_4 = XRSRC_B_AS; bzero(reg_Direct16_0_4,DST8_0_4); #Status bits are not affected build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :MOVX.W XRSRC_W_AS, DST16_0_4 is ctx_haveext=4 & op16_12_4=0x4 & bow=0 & ctx_al=1 & postIncrementStore & XRSRC_W_AS & DST16_0_4 & reg_Direct16_0_4 { DST16_0_4 = XRSRC_W_AS; wzero(reg_Direct16_0_4,DST16_0_4); #Status bits are not affected build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :MOVX.A XRSRC_A_AS, dest_0_4 is ctx_haveext=4 & op16_12_4=0x4 & bow=1 & ctx_al=0 & postIncrementStore & XRSRC_A_AS & dest_0_4 { dest_0_4 = XRSRC_A_AS; build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :POPX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0x4 & src16_8_4=0x1 & as=0x3 & bow=1 & ctx_al=1 & XRSRC_B_AS & DST8_0_4 & reg_Direct16_0_4 { DST8_0_4 = *:1 SP; bzero(reg_Direct16_0_4,DST8_0_4); SP = SP + 0x2; #Status bits are not affected if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :POPX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0x4 & src16_8_4=0x1 & as=0x3 & bow=0 & ctx_al=1 & XRSRC_W_AS & DST16_0_4 & reg_Direct16_0_4 { DST16_0_4 = *:2 SP; wzero(reg_Direct16_0_4,DST16_0_4); SP = SP + 0x2; #Status bits are not affected if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :POPX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0x4 & src16_8_4=0x1 & as=0x3 & bow=1 & ctx_al=0 & XRSRC_A_AS & dest_0_4 { dest_0_4 = *:4 SP; SP = SP + 0x4; dest_0_4 = sext(dest_0_4[0,20]); if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :RLAX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0x5 & bow=1 & ctx_al=1 & src_Direct16_8_4=dest_Direct16_0_4 & DST8_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = carry(DST8_0_4, DST8_0_4); # C Flag $(OVERFLOW) = scarry(DST8_0_4, DST8_0_4); # V Flag # Operation... DST8_0_4 = DST8_0_4 + DST8_0_4; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :RLAX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0x5 & bow=0 & ctx_al=1 & src_Direct16_8_4=dest_Direct16_0_4 & DST16_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = carry(DST16_0_4, DST16_0_4); # C Flag $(OVERFLOW) = scarry(DST16_0_4, DST16_0_4); # V Flag # Operation... DST16_0_4 = DST16_0_4 + DST16_0_4; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :RLAX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0x5 & bow=1 & src_Direct16_8_4=dest_Direct16_0_4 & ctx_al=0 & dest_0_4 { tmpd:$(REG_SIZE) = dest_0_4; tmp:$(REG_SIZE) = dest_0_4 + dest_0_4; dest_0_4 = sext(tmp[0,20]); setaddflags(dest_0_4,tmpd,tmpd); if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :RLCX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0x6 & bow=1 & ctx_al=1 & src_Direct16_8_4=dest_Direct16_0_4 & postIncrementStore & XRSRC_B_AS & DST8_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; # Operation Flags... tmp_carry:1 = (carry(DST8_0_4, $(CARRY)) || carry(DST8_0_4,DST8_0_4 + $(CARRY))); #C Flag $(OVERFLOW) = (scarry(DST8_0_4, $(CARRY)) || scarry(DST8_0_4,DST8_0_4 + $(CARRY))); #V Flag # Operation... DST8_0_4 = DST8_0_4 + DST8_0_4 + $(CARRY); bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :RLCX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0x6 & bow=0 & ctx_al=1 & src_Direct16_8_4=dest_Direct16_0_4 & postIncrementStore & XRSRC_W_AS & DST16_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; # Operation Flags... tmp_carry:1 = (carry(DST16_0_4,zext($(CARRY))) || carry(DST16_0_4,DST16_0_4 + zext($(CARRY)))); #C Flag $(OVERFLOW) = (scarry(DST16_0_4,zext($(CARRY))) || scarry(DST16_0_4,DST16_0_4 + zext($(CARRY)))); #V Flag # Operation... DST16_0_4 = DST16_0_4 + DST16_0_4 + zext($(CARRY)); wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :RLCX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0x6 & bow=1 & ctx_al=0 & src_Direct16_8_4=dest_Direct16_0_4 & postIncrementStore & XRSRC_A_AS & dest_0_4 & repeat_carry { build repeat_carry; tmpd:$(REG_SIZE) = dest_0_4; tmps:$(REG_SIZE) = dest_0_4 + zext($(CARRY)); tmp:$(REG_SIZE) = tmps + dest_0_4; dest_0_4 = sext(tmp[0,20]); setaddflags(dest_0_4,tmps,tmpd); build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SBCX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0x7 & bow=1 & ctx_al=1 & src16_8_4=0x3 & as=0x0 & postIncrementStore & DST8_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; # Operation Flags... brw:1 = 1 - $(CARRY); $(CARRY) = (brw <= DST8_0_4); # Carry flag is NOT set if there is a borrow $(OVERFLOW) = sborrow(DST8_0_4, brw); # Operation... DST8_0_4 = DST8_0_4 - brw; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SBCX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0x7 & bow=0 & ctx_al=1 & src16_8_4=0x3 & as=0x0 & postIncrementStore & DST16_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; # Operation Flags... brw:2 = 1 - zext( $(CARRY) ); $(CARRY) = (brw <= DST16_0_4); # Carry flag is NOT set if there is a borrow $(OVERFLOW) = sborrow(DST16_0_4, brw); # Operation... DST16_0_4 = DST16_0_4 - brw; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SBCX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0x7 & bow=1 & ctx_al=0 & src16_8_4=0x3 & as=0x0 & postIncrementStore & dest_0_4 & repeat_carry { build repeat_carry; brw:$(REG_SIZE) = 1 - zext( $(CARRY) ); tmpd:$(REG_SIZE) = dest_0_4; tmp:$(REG_SIZE) = dest_0_4 - brw; dest_0_4 = sext(tmp[0,20]); setsubflags(dest_0_4,brw,tmpd); build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SUBCX.B XRSRC_B_AS, DST8_0_4 is ctx_haveext=4 & op16_12_4=0x7 & bow=1 & ctx_al=1 & postIncrementStore & XRSRC_B_AS & DST8_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; # Operation Flags... brw:1 = 1 - $(CARRY); $(CARRY) = ((brw + XRSRC_B_AS) <= DST8_0_4); # Carry flag is NOT set if there is a borrow $(OVERFLOW) = sborrow(DST8_0_4, XRSRC_B_AS + brw); # Operation... DST8_0_4 = DST8_0_4 - XRSRC_B_AS - brw; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SUBCX.W XRSRC_W_AS, DST16_0_4 is ctx_haveext=4 & op16_12_4=0x7 & bow=0 & ctx_al=1 & postIncrementStore & XRSRC_W_AS & DST16_0_4 & reg_Direct16_0_4 & repeat_carry { build repeat_carry; # Operation Flags... brw:2 = 1 - zext( $(CARRY) ); $(CARRY) = ((brw + XRSRC_W_AS) <= DST16_0_4); # Carry flag is NOT set if there is a borrow $(OVERFLOW) = sborrow(DST16_0_4, XRSRC_W_AS + brw); # Operation... DST16_0_4 = DST16_0_4 - XRSRC_W_AS - brw; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SUBCX.A XRSRC_A_AS, dest_0_4 is ctx_haveext=4 & op16_12_4=0x7 & bow=1 & ctx_al=0 & postIncrementStore & XRSRC_A_AS & dest_0_4 & repeat_carry { build repeat_carry; brw:$(REG_SIZE) = 1 - zext( $(CARRY) ); tmpd:$(REG_SIZE) = dest_0_4; tmps:$(REG_SIZE) = XRSRC_A_AS + brw; tmp:$(REG_SIZE) = dest_0_4 - tmps; dest_0_4 = sext(tmp[0,20]); setsubflags(dest_0_4,tmps,tmpd); build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SUBX.B XRSRC_B_AS, DST8_0_4 is ctx_haveext=4 & op16_12_4=0x8 & bow=1 & ctx_al=1 & postIncrementStore & XRSRC_B_AS & DST8_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = (XRSRC_B_AS <= DST8_0_4); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(DST8_0_4, XRSRC_B_AS); # V Flag # Operation... DST8_0_4 = DST8_0_4 - XRSRC_B_AS; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SUBX.W XRSRC_W_AS, DST16_0_4 is ctx_haveext=4 & op16_12_4=0x8 & bow=0 & ctx_al=1 & postIncrementStore & XRSRC_W_AS & DST16_0_4 & reg_Direct16_0_4 { # Operation Flags... $(CARRY) = (XRSRC_W_AS <= DST16_0_4); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(DST16_0_4, XRSRC_W_AS); # V Flag # Operation... DST16_0_4 = DST16_0_4 - XRSRC_W_AS; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SUBX.A XRSRC_A_AS, dest_0_4 is ctx_haveext=4 & op16_12_4=0x8 & bow=1 & ctx_al=0 & postIncrementStore & XRSRC_A_AS & dest_0_4 { tmpd:$(REG_SIZE) = dest_0_4; tmp:$(REG_SIZE) = dest_0_4 - XRSRC_A_AS; dest_0_4 = sext(tmp[0,20]); setsubflags(dest_0_4,XRSRC_A_AS,tmpd); build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :TSTX.B DST8_0_4 is ctx_haveext=4 & op16_12_4=0x9 & bow=1 & ctx_al=1 & src16_8_4=0x3 & as=0x0 & DST8_0_4 { # Operation Flags... $(CARRY) = 1; # Carry is NOT set if there is a borrow $(OVERFLOW) = 0; # V Flag # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :TSTX.W DST16_0_4 is ctx_haveext=4 & op16_12_4=0x9 & bow=0 & ctx_al=1 & src16_8_4=0x3 & as=0x0 & DST16_0_4 { # Operation Flags... $(CARRY) = 1; # Carry is NOT set if there is a borrow $(OVERFLOW) = 0; # V Flag # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :TSTX.A dest_0_4 is ctx_haveext=4 & op16_12_4=0x9 & bow=1 & ctx_al=0 & src16_8_4=0x3 & as=0x0 & dest_0_4 { setsubflags(dest_0_4,0:$(REG_SIZE),dest_0_4); if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :XORX.B XRSRC_B_AS, DST8_0_4 is ctx_haveext=4 & op16_12_4=0xE & bow=1 & ctx_al=1 & postIncrementStore & XRSRC_B_AS & DST8_0_4 & reg_Direct16_0_4 { # Operation Flags... $(OVERFLOW) = ((DST8_0_4 s< 0x0) && (XRSRC_B_AS s< 0x0)) ; # V Flag # Operation... DST8_0_4 = DST8_0_4 ^ XRSRC_B_AS; bzero(reg_Direct16_0_4,DST8_0_4); # Result Flags... $(SIGN) = (DST8_0_4 s< 0x0); # S Flag $(ZERO) = (DST8_0_4 == 0x0); # Z Flag $(CARRY) = (DST8_0_4 != 0x0); # C Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :XORX.W XRSRC_W_AS, DST16_0_4 is ctx_haveext=4 & op16_12_4=0xE & bow=0 & ctx_al=1 & postIncrementStore & XRSRC_W_AS & DST16_0_4 & reg_Direct16_0_4 { # Operation Flags... $(OVERFLOW) = ((DST16_0_4 s< 0x0) && (XRSRC_W_AS s< 0x0)) ; # V Flag # Operation... DST16_0_4 = DST16_0_4 ^ XRSRC_W_AS; wzero(reg_Direct16_0_4,DST16_0_4); # Result Flags... $(SIGN) = (DST16_0_4 s< 0x0); # S Flag $(ZERO) = (DST16_0_4 == 0x0); # Z Flag $(CARRY) = (DST16_0_4 != 0x0); # C Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :XORX.A XRSRC_A_AS, dest_0_4 is ctx_haveext=4 & op16_12_4=0xE & bow=1 & ctx_al=0 & postIncrementStore & XRSRC_A_AS & dest_0_4 { # Operation Flags... $(OVERFLOW) = ((dest_0_4 s< 0x0) && (XRSRC_A_AS s< 0x0)) ; # V Flag # Operation... dest_0_4 = dest_0_4 ^ XRSRC_A_AS; dest_0_4 = sext(dest_0_4[0,20]); # Result Flags... $(SIGN) = (dest_0_4 s< 0x0); # S Flag $(ZERO) = (dest_0_4 == 0x0); # Z Flag $(CARRY) = (dest_0_4 != 0x0); # C Flag build postIncrementStore; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } ############################# # No Repeat :ADCX.B XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x6 & src16_8_4=0x3 & as=0x0 & bow=1 & tbl_bzero & postIncrementStore) ... & XDEST_B_AD ... { # Operation Flags... tmp_carry:1 = carry(XDEST_B_AD,$(CARRY)); #C Flag $(OVERFLOW) = scarry(XDEST_B_AD,$(CARRY)); #V Flag # Operation... XDEST_B_AD = XDEST_B_AD + $(CARRY); build tbl_bzero; # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrementStore; } :ADCX.W XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x6 & src16_8_4=0x3 & as=0x0 & bow=0 & tbl_wzero & postIncrementStore) ... & XDEST_W_AD ... { # Operation Flags... tmp_carry:1 = carry(XDEST_W_AD,zext($(CARRY))); #C Flag $(OVERFLOW) = scarry(XDEST_W_AD,zext($(CARRY))); #V Flag # Operation... XDEST_W_AD = XDEST_W_AD + zext($(CARRY)); build tbl_wzero; # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrementStore; } :ADCX.A XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x6 & src16_8_4=0x3 & as=0x0 & bow=1 & postIncrementStore) ... & XDEST_A_AD ... { tmpd:$(REG_SIZE) = XDEST_A_AD; tmpc:$(REG_SIZE) = zext($(CARRY)); tmp:$(REG_SIZE) = tmpc + XDEST_A_AD; XDEST_A_AD = sext(tmp[0,20]); setaddflags(XDEST_A_AD,tmpc,tmpd); build postIncrementStore; } :ADDX.B XSRC_B_AS, XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x5 & bow=1 & tbl_bzero & postIncrementStore) ... & XSRC_B_AS ... & XDEST_B_AD ... { # Operation Flags... $(CARRY) = carry(XSRC_B_AS, XDEST_B_AD); # C Flag $(OVERFLOW) = scarry(XSRC_B_AS, XDEST_B_AD); # V Flag # Operation... XDEST_B_AD = XSRC_B_AS + XDEST_B_AD; build tbl_bzero; # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrementStore; } :ADDX.W XSRC_W_AS, XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x5 & bow=0 & tbl_wzero & postIncrementStore) ... & XSRC_W_AS ... & XDEST_W_AD ... { # Operation Flags... $(CARRY) = carry(XSRC_W_AS, XDEST_W_AD); # C Flag $(OVERFLOW) = scarry(XSRC_W_AS, XDEST_W_AD); # V Flag # Operation... XDEST_W_AD = XSRC_W_AS + XDEST_W_AD; build tbl_wzero; # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrementStore; } :ADDX.A XSRC_A_AS, XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x5 & bow=1 & postIncrementStore) ... & XSRC_A_AS ... & XDEST_A_AD ... { tmpd:$(REG_SIZE) = XDEST_A_AD; tmp:$(REG_SIZE) = XSRC_A_AS + XDEST_A_AD; XDEST_A_AD = sext(tmp[0,20]); setaddflags(XDEST_A_AD,XSRC_A_AS,tmpd); build postIncrementStore; } :ADDCX.B XSRC_B_AS, XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x6 & bow=1 & tbl_bzero & postIncrementStore) ... & XSRC_B_AS ... & XDEST_B_AD ... { # Operation Flags... tmp_carry:1 = (carry(XSRC_B_AS, $(CARRY)) || carry(XDEST_B_AD,XSRC_B_AS + $(CARRY))); #C Flag $(OVERFLOW) = (scarry(XSRC_B_AS, $(CARRY)) || scarry(XDEST_B_AD,XSRC_B_AS + $(CARRY))); #V Flag # Operation... XDEST_B_AD = XSRC_B_AS + XDEST_B_AD + $(CARRY); build tbl_bzero; # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrementStore; } :ADDCX.W XSRC_W_AS, XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x6 & bow=0 & tbl_wzero & postIncrementStore) ... & XSRC_W_AS ... & XDEST_W_AD ... { # Operation Flags... tmp_carry:1 = (carry(XSRC_W_AS,zext($(CARRY))) || carry(XDEST_W_AD,XSRC_W_AS + zext($(CARRY)))); #C Flag $(OVERFLOW) = (scarry(XSRC_W_AS,zext($(CARRY))) || scarry(XDEST_W_AD,XSRC_W_AS + zext($(CARRY)))); #V Flag # Operation... XDEST_W_AD = XSRC_W_AS + XDEST_W_AD + zext($(CARRY)); build tbl_wzero; # Result Flags... $(CARRY) = tmp_carry; $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrementStore; } :ADDCX.A XSRC_A_AS, XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x6 & bow=1 & postIncrementStore) ... & XSRC_A_AS ... & XDEST_A_AD ... { tmpd:$(REG_SIZE) = XDEST_A_AD; tmps:$(REG_SIZE) = XSRC_A_AS + zext($(CARRY)); tmp:$(REG_SIZE) = tmps + XDEST_A_AD; XDEST_A_AD = sext(tmp[0,20]); setaddflags(XDEST_A_AD,tmps,tmpd); build postIncrementStore; } :ANDX.B XSRC_B_AS, XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xF & bow=1 & tbl_bzero & postIncrementStore) ... & XSRC_B_AS ... & XDEST_B_AD ... { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Operation... result:1 = XDEST_B_AD & XSRC_B_AS; XDEST_B_AD = result; build tbl_bzero; # Result Flags... $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag $(CARRY) = (result != 0x0); # C Flag build postIncrementStore; } :ANDX.W XSRC_W_AS, XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xF & bow=0 & tbl_wzero & postIncrementStore) ... & XSRC_W_AS ... & XDEST_W_AD ... { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Operation... result:2 = XDEST_W_AD & XSRC_W_AS; XDEST_W_AD = result; build tbl_wzero; # Result Flags... $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag $(CARRY) = (result != 0x0); # C Flag build postIncrementStore; } :ANDX.A XSRC_A_AS, XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0xF & bow=1 & postIncrementStore) ... & XSRC_A_AS ... & XDEST_A_AD ... { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Operation... XDEST_A_AD = XDEST_A_AD & XSRC_A_AS; XDEST_A_AD = sext(XDEST_A_AD[0,20]); # Result Flags... $(SIGN) = (XDEST_A_AD s< 0x0); # S Flag $(ZERO) = (XDEST_A_AD == 0x0); # Z Flag $(CARRY) = (XDEST_A_AD != 0x0); # C Flag build postIncrementStore; } :BICX.B XSRC_B_AS, XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xC & bow=1 & tbl_bzero & postIncrementStore) ... & XSRC_B_AS ... & XDEST_B_AD ... { XDEST_B_AD = (~XSRC_B_AS) & XDEST_B_AD; build tbl_bzero; #Status bits are not affected build postIncrementStore; } :BICX.W XSRC_W_AS, XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xC & bow=0 & tbl_wzero & postIncrementStore) ... & XSRC_W_AS ... & XDEST_W_AD ... { XDEST_W_AD = (~XSRC_W_AS) & XDEST_W_AD; build tbl_wzero; #Status bits are not affected build postIncrementStore; } :BICX.A XSRC_A_AS, XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0xC & bow=1 & postIncrementStore) ... & XSRC_A_AS ... & XDEST_A_AD ... { XDEST_A_AD = (~XSRC_A_AS) & XDEST_A_AD; #Status bits are not affected XDEST_A_AD = sext(XDEST_A_AD[0,20]); build postIncrementStore; } :BISX.B XSRC_B_AS, XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xD & bow=1 & tbl_bzero & postIncrementStore) ... & XSRC_B_AS ... & XDEST_B_AD ... { XDEST_B_AD = XSRC_B_AS | XDEST_B_AD; build tbl_bzero; #Status bits are not affected build postIncrementStore; } :BISX.W XSRC_W_AS, XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xD & bow=0 & tbl_wzero & postIncrementStore) ... & XSRC_W_AS ... & XDEST_W_AD ... { XDEST_W_AD = XSRC_W_AS | XDEST_W_AD; build tbl_wzero; #Status bits are not affected build postIncrementStore; } :BISX.A XSRC_A_AS, XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0xD & bow=1 & postIncrementStore) ... & XSRC_A_AS ... & XDEST_A_AD ... { XDEST_A_AD = XSRC_A_AS | XDEST_A_AD; #Status bits are not affected XDEST_A_AD = sext(XDEST_A_AD[0,20]); build postIncrementStore; } :BITX.B XSRC_B_AS, XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xB & bow=1 & postIncrement) ... & XSRC_B_AS ... & XDEST_B_AD ... { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Operation... result:1 = XDEST_B_AD & XSRC_B_AS; # Result Flags... $(CARRY) = (result != 0x0); # C Flag $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; } :BITX.W XSRC_W_AS, XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xB & bow=0 & postIncrement) ... & XSRC_W_AS ... & XDEST_W_AD ... { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Operation... result:2 = XDEST_W_AD & XSRC_W_AS; # Result Flags... $(CARRY) = (result != 0x0); # C Flag $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; } :BITX.A XSRC_A_AS, XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0xB & bow=1 & postIncrement) ... & XSRC_A_AS ... & XDEST_A_AD ... { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Operation... result:$(REG_SIZE) = XDEST_A_AD & XSRC_A_AS; # Result Flags... result = sext(result[0,20]); $(CARRY) = (result != 0x0); # C Flag $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; } :CLRX.B XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x4 & src16_8_4=0x3 & as=0x0 & bow=1 & tbl_bzero & postIncrementStore) ... & XDEST_B_AD ... { XDEST_B_AD = 0; build tbl_bzero; #Status bits are not affected build postIncrementStore; } :CLRX.W XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x4 & src16_8_4=0x3 & as=0x0 & bow=0 & tbl_wzero & postIncrementStore) ... & XDEST_W_AD ... { XDEST_W_AD = 0; build tbl_wzero; #Status bits are not affected build postIncrementStore; } :CLRX.A XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x4 & src16_8_4=0x3 & as=0x0 & bow=1 & postIncrementStore) ... & XDEST_A_AD ... { XDEST_A_AD = 0; build postIncrementStore; } :CMPX.B XSRC_B_AS, XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x9 & bow=1 & postIncrement) ... & XSRC_B_AS ... & XDEST_B_AD ... { # Operation Flags... $(CARRY) = (XSRC_B_AS <= XDEST_B_AD); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(XDEST_B_AD, XSRC_B_AS); # V Flag # Operation... result:1 = (XDEST_B_AD - XSRC_B_AS); # Result Flags... $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; } :CMPX.W XSRC_W_AS, XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x9 & bow=0 & postIncrement) ... & XSRC_W_AS ... & XDEST_W_AD ... { # Operation Flags... $(CARRY) = (XSRC_W_AS <= XDEST_W_AD); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(XDEST_W_AD, XSRC_W_AS); # V Flag # Operation... result:2 = (XDEST_W_AD - XSRC_W_AS); # Result Flags... $(SIGN) = (result s< 0x0); # S Flag $(ZERO) = (result == 0x0); # Z Flag build postIncrement; } :CMPX.A XSRC_A_AS, XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x9 & bow=1 & postIncrement) ... & XSRC_A_AS ... & XDEST_A_AD ... { tmp:$(REG_SIZE) = XDEST_A_AD - XSRC_A_AS; tmpd:$(REG_SIZE) = sext(tmp[0,20]); setsubflags(tmpd,XSRC_A_AS,XDEST_A_AD); build postIncrement; } :DADCX.B XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xA & src16_8_4=0x3 & as=0x0 & bow=1 & tbl_bzero & postIncrementStore) ... & XDEST_B_AD ... { # Operation Flags... $(CARRY) = 0; # This should be overflow # Operation... XDEST_B_AD = bcd_add(XDEST_B_AD); build tbl_bzero; # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrementStore; } :DADCX.W XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xA & src16_8_4=0x3 & as=0x0 & bow=0 & tbl_wzero & postIncrementStore) ... & XDEST_W_AD ... { # Operation Flags... $(CARRY) = 0; # Don't currently have BCD overflow op # Operation... XDEST_W_AD = bcd_add(XDEST_W_AD); build tbl_wzero; # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrementStore; } :DADCX.A XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0xA & src16_8_4=0x3 & as=0x0 & bow=1 & postIncrementStore) ... & XDEST_A_AD ... { # Operation Flags... $(CARRY) = 0; # Don't currently have BCD overflow op # Operation... XDEST_A_AD = bcd_add(XDEST_A_AD); XDEST_A_AD = sext(XDEST_A_AD[0,20]); # Result Flags... $(SIGN) = (XDEST_A_AD s< 0x0); # S Flag $(ZERO) = (XDEST_A_AD == 0x0); # Z Flag build postIncrementStore; } :DADDX.B XSRC_B_AS, XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xA & bow=1 & tbl_bzero & postIncrementStore) ... & XSRC_B_AS ... & XDEST_B_AD ... { # Operation Flags... $(CARRY) = 0; # This should be overflow # Operation... XDEST_B_AD = bcd_add(XSRC_B_AS,XDEST_B_AD); build tbl_bzero; # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrementStore; } :DADDX.W XSRC_W_AS, XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xA & bow=0 & tbl_wzero & postIncrementStore) ... & XSRC_W_AS ... & XDEST_W_AD ... { # Operation Flags... $(CARRY) = 0; # Don't currently have BCD overflow op # Operation... XDEST_W_AD = bcd_add(XSRC_W_AS ,XDEST_W_AD); build tbl_wzero; # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrementStore; } :DADDX.A XSRC_A_AS, XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0xA & bow=1 & postIncrementStore) ... & XSRC_A_AS ... & XDEST_A_AD ... { # Operation Flags... $(CARRY) = 0; # Don't currently have BCD overflow op # Operation... XDEST_A_AD = bcd_add(XSRC_A_AS ,XDEST_A_AD); XDEST_A_AD = sext(XDEST_A_AD[0,20]); # Result Flags... $(SIGN) = (XDEST_A_AD s< 0x0); # S Flag $(ZERO) = (XDEST_A_AD == 0x0); # Z Flag build postIncrementStore; } :DECX.B XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x8 & src16_8_4=0x3 & as=0x1 & bow=1 & tbl_bzero & postIncrementStore) ... & XDEST_B_AD ... { # Operation Flags... $(CARRY) = (1 <= XDEST_B_AD); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(XDEST_B_AD, 1:1); # V Flag # Operation... XDEST_B_AD = XDEST_B_AD - 1; build tbl_bzero; # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrementStore; } :DECX.W XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x8 & src16_8_4=0x3 & as=0x1 & bow=0 & tbl_wzero & postIncrementStore) ... & XDEST_W_AD ... { # Operation Flags... $(CARRY) = (1 <= XDEST_W_AD); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(XDEST_W_AD, 1:2); # V Flag # Operation... XDEST_W_AD = XDEST_W_AD - 1:2; build tbl_wzero; # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrementStore; } :DECX.A XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x8 & src16_8_4=0x3 & as=0x1 & bow=1 & postIncrementStore) ... & XDEST_A_AD ... { tmpd:$(REG_SIZE) = XDEST_A_AD; tmp:$(REG_SIZE) = XDEST_A_AD - 1:$(REG_SIZE); XDEST_A_AD = sext(tmp[0,20]); setsubflags(XDEST_A_AD,1:$(REG_SIZE),tmpd); build postIncrementStore; } :DECDX.B XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x8 & src16_8_4=0x3 & as=0x2 & bow=1 & tbl_bzero & postIncrementStore) ... & XDEST_B_AD ... { # Operation Flags... $(CARRY) = (1 <= XDEST_B_AD); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(XDEST_B_AD, 2:1); # V Flag # Operation... XDEST_B_AD = XDEST_B_AD - 2; build tbl_bzero; # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrementStore; } :DECDX.W XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x8 & src16_8_4=0x3 & as=0x2 & bow=0 & tbl_wzero & postIncrementStore) ... & XDEST_W_AD ... { # Operation Flags... $(CARRY) = (1 <= XDEST_W_AD); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(XDEST_W_AD, 2:2); # V Flag # Operation... XDEST_W_AD = XDEST_W_AD - 2:2; build tbl_wzero; # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrementStore; } :DECDX.A XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x8 & src16_8_4=0x3 & as=0x2 & bow=1 & postIncrementStore) ... & XDEST_A_AD ... { tmpd:$(REG_SIZE) = XDEST_A_AD; tmp:$(REG_SIZE) = XDEST_A_AD - 2:$(REG_SIZE); XDEST_A_AD = sext(tmp[0,20]); setsubflags(XDEST_A_AD,2:$(REG_SIZE),tmpd); build postIncrementStore; } :INCX.B XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x5 & src16_8_4=0x3 & as=0x1 & bow=1 & tbl_bzero & postIncrementStore) ... & XDEST_B_AD ... { # Operation Flags... $(CARRY) = carry(XDEST_B_AD,1); # C Flag $(OVERFLOW) = scarry(XDEST_B_AD,1); # V Flag # Operation... XDEST_B_AD = 1 + XDEST_B_AD; build tbl_bzero; # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrementStore; } :INCX.W XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x5 & src16_8_4=0x3 & as=0x1 & bow=0 & tbl_wzero & postIncrementStore) ... & XDEST_W_AD ... { # Operation Flags... $(CARRY) = carry(XDEST_W_AD,1); # C Flag $(OVERFLOW) = scarry(XDEST_W_AD,1); # V Flag # Operation... XDEST_W_AD = 1 + XDEST_W_AD; build tbl_wzero; # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrementStore; } :INCX.A XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x5 & src16_8_4=0x3 & as=0x1 & bow=1 & postIncrementStore) ... & XDEST_A_AD ... { tmpd:$(REG_SIZE) = XDEST_A_AD; tmp:$(REG_SIZE) = 1 + XDEST_A_AD; XDEST_A_AD = sext(tmp[0,20]); setaddflags(XDEST_A_AD,1:$(REG_SIZE),tmpd); build postIncrementStore; } :INCDX.B XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x5 & src16_8_4=0x3 & as=0x2 & bow=1 & tbl_bzero & postIncrementStore) ... & XDEST_B_AD ... { # Operation Flags... $(CARRY) = carry(XDEST_B_AD,2); # C Flag $(OVERFLOW) = scarry(XDEST_B_AD,2); # V Flag # Operation... XDEST_B_AD = 2 + XDEST_B_AD; build tbl_bzero; # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrementStore; } :INCDX.W XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x5 & src16_8_4=0x3 & as=0x2 & bow=0 & tbl_wzero & postIncrementStore) ... & XDEST_W_AD ... { # Operation Flags... $(CARRY) = carry(XDEST_W_AD,2); # C Flag $(OVERFLOW) = scarry(XDEST_W_AD,2); # V Flag # Operation... XDEST_W_AD = 2 + XDEST_W_AD; build tbl_wzero; # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrementStore; } :INCDX.A XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x5 & src16_8_4=0x3 & as=0x2 & bow=1 & postIncrementStore) ... & XDEST_A_AD ... { tmpd:$(REG_SIZE) = XDEST_A_AD; tmp:$(REG_SIZE) = 2 + XDEST_A_AD; XDEST_A_AD = sext(tmp[0,20]); setaddflags(XDEST_A_AD,1:$(REG_SIZE),tmpd); build postIncrementStore; } :INVX.B XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xE & src16_8_4=0x3 & as=0x3 & bow=1 & tbl_bzero & postIncrementStore) ... & XDEST_B_AD ... { # Operation Flags... $(OVERFLOW) = XDEST_B_AD s< 0x0; # V Flag # Operation... XDEST_B_AD = XDEST_B_AD ^ -1; build tbl_bzero; # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag $(CARRY) = (XDEST_B_AD != 0x0); # C Flag build postIncrementStore; } :INVX.W XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xE & src16_8_4=0x3 & as=0x3 & bow=0 & tbl_wzero & postIncrementStore) ... & XDEST_W_AD ... { # Operation Flags... $(OVERFLOW) = XDEST_W_AD s< 0x0; # V Flag # Operation... XDEST_W_AD = XDEST_W_AD ^ -1; build tbl_wzero; # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag $(CARRY) = (XDEST_W_AD != 0x0); # C Flag build postIncrementStore; } :INVX.A XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0xE & src16_8_4=0x3 & as=0x3 & bow=1 & postIncrementStore) ... & XDEST_A_AD ... { # Operation Flags... $(OVERFLOW) = XDEST_A_AD s< 0x0; # V Flag # Operation... XDEST_A_AD = XDEST_A_AD ^ -1; XDEST_A_AD = sext(XDEST_A_AD[0,20]); # Result Flags... $(SIGN) = (XDEST_A_AD s< 0x0); # S Flag $(ZERO) = (XDEST_A_AD == 0x0); # Z Flag $(CARRY) = (XDEST_A_AD != 0x0); # C Flag build postIncrementStore; } :MOVX.B XSRC_B_AS, XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x4 & bow=1 & tbl_bzero & postIncrementStore) ... & XSRC_B_AS ... & XDEST_B_AD ... { XDEST_B_AD = XSRC_B_AS; build tbl_bzero; #Status bits are not affected build postIncrementStore; } :MOVX.W XSRC_W_AS, XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x4 & bow=0 & tbl_wzero & postIncrementStore) ... & XSRC_W_AS ... & XDEST_W_AD ... { XDEST_W_AD = XSRC_W_AS; build tbl_wzero; #Status bits are not affected build postIncrementStore; } :MOVX.A XSRC_A_AS, XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x4 & bow=1 & postIncrementStore) ... & XSRC_A_AS ... & XDEST_A_AD ... { XDEST_A_AD = XSRC_A_AS; build postIncrementStore; } :POPX.B XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x4 & src16_8_4=0x1 & as=0x3 & bow=1 & tbl_bzero) ... & XDEST_B_AD ... { XDEST_B_AD = *:1 SP; build tbl_bzero; SP = SP + 0x2; #Status bits are not affected } :POPX.W XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x4 & src16_8_4=0x1 & as=0x3 & bow=0 & tbl_wzero) ... & XDEST_W_AD ... { XDEST_W_AD = *:2 SP; build tbl_wzero; SP = SP + 0x2; #Status bits are not affected } :POPX.A XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x4 & src16_8_4=0x1 & as=0x3 & bow=1) ... & XDEST_A_AD ... { XDEST_A_AD = *:4 SP; SP = SP + 0x4; XDEST_A_AD = sext(XDEST_A_AD[0,20]); } :SBCX.B XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x7 & bow=1 & src16_8_4=0x3 & as=0x0 & tbl_bzero & postIncrementStore) ... & XDEST_B_AD ... { # Operation Flags... brw:1 = 1 - $(CARRY); $(CARRY) = (brw <= XDEST_B_AD); # Carry flag is NOT set if there is a borrow $(OVERFLOW) = sborrow(XDEST_B_AD, brw); # Operation... XDEST_B_AD = XDEST_B_AD - brw; build tbl_bzero; # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrementStore; } :SBCX.W XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x7 & bow=0 & src16_8_4=0x3 & as=0x0 & tbl_wzero & postIncrementStore) ... & XDEST_W_AD ... { # Operation Flags... brw:2 = 1 - zext( $(CARRY) ); $(CARRY) = (brw <= XDEST_W_AD); # Carry flag is NOT set if there is a borrow $(OVERFLOW) = sborrow(XDEST_W_AD, brw); # Operation... XDEST_W_AD = XDEST_W_AD - brw; build tbl_wzero; # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrementStore; } :SBCX.A XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x7 & bow=1 & src16_8_4=0x3 & as=0x0 & postIncrementStore) ... & XDEST_A_AD ... { tmpd:$(REG_SIZE) = XDEST_A_AD; brw:$(REG_SIZE) = 1 - zext( $(CARRY) ); tmp:$(REG_SIZE) = XDEST_A_AD - brw; XDEST_A_AD = sext(tmp[0,20]); setsubflags(XDEST_A_AD,brw,tmpd); build postIncrementStore; } :SUBCX.B XSRC_B_AS, XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x7 & bow=1 & tbl_bzero & postIncrementStore) ... & XSRC_B_AS ... & XDEST_B_AD ... { # Operation Flags... brw:1 = 1 - $(CARRY); $(CARRY) = ((brw + XSRC_B_AS) <= XDEST_B_AD); # Carry flag is NOT set if there is a borrow $(OVERFLOW) = sborrow(XDEST_B_AD, XSRC_B_AS + brw); # Operation... XDEST_B_AD = XDEST_B_AD - XSRC_B_AS - brw; build tbl_bzero; # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrementStore; } :SUBCX.W XSRC_W_AS, XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x7 & bow=0 & tbl_wzero & postIncrementStore) ... & XSRC_W_AS ... & XDEST_W_AD ... { # Operation Flags... brw:2 = 1 - zext( $(CARRY) ); $(CARRY) = ((brw + XSRC_W_AS) <= XDEST_W_AD); # Carry flag is NOT set if there is a borrow $(OVERFLOW) = sborrow(XDEST_W_AD, XSRC_W_AS + brw); # Operation... XDEST_W_AD = XDEST_W_AD - XSRC_W_AS - brw; build tbl_wzero; # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrementStore; } :SUBCX.A XSRC_A_AS, XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x7 & bow=1 & postIncrementStore) ... & XSRC_A_AS ... & XDEST_A_AD ... { tmpd:$(REG_SIZE) = XDEST_A_AD; brw:$(REG_SIZE) = 1 - zext( $(CARRY) ); tmps:$(REG_SIZE) = XSRC_A_AS + brw; tmp:$(REG_SIZE) = XDEST_A_AD - tmps; XDEST_A_AD = sext(tmp[0,20]); setsubflags(XDEST_A_AD,tmps,tmpd); build postIncrementStore; } :SUBX.B XSRC_B_AS, XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x8 & bow=1 & tbl_bzero & postIncrementStore) ... & XSRC_B_AS ... & XDEST_B_AD ... { # Operation Flags... $(CARRY) = (XSRC_B_AS <= XDEST_B_AD); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(XDEST_B_AD, XSRC_B_AS); # V Flag # Operation... XDEST_B_AD = XDEST_B_AD - XSRC_B_AS; build tbl_bzero; # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrementStore; } :SUBX.W XSRC_W_AS, XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x8 & bow=0 & tbl_wzero & postIncrementStore) ... & XSRC_W_AS ... & XDEST_W_AD ... { # Operation Flags... $(CARRY) = (XSRC_W_AS <= XDEST_W_AD); # Carry is NOT set if there is a borrow $(OVERFLOW) = sborrow(XDEST_W_AD, XSRC_W_AS); # V Flag # Operation... XDEST_W_AD = XDEST_W_AD - XSRC_W_AS; build tbl_wzero; # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrementStore; } :SUBX.A XSRC_A_AS, XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x8 & bow=1 & postIncrementStore) ... & XSRC_A_AS ... & XDEST_A_AD ... { tmpd:$(REG_SIZE) = XDEST_A_AD; tmp:$(REG_SIZE) = XDEST_A_AD - XSRC_A_AS; XDEST_A_AD = sext(tmp[0,20]); setsubflags(XDEST_A_AD,XSRC_A_AS,tmpd); build postIncrementStore; } :TSTX.B XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x9 & bow=1 & src16_8_4=0x3 & as=0x0 & postIncrement) ... & XDEST_B_AD ... { # Operation Flags... $(CARRY) = 1; # Carry is NOT set if there is a borrow $(OVERFLOW) = 0; # V Flag # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag build postIncrement; } :TSTX.W XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x9 & bow=0 & src16_8_4=0x3 & as=0x0 & postIncrement) ... & XDEST_W_AD ... { # Operation Flags... $(CARRY) = 1; # Carry is NOT set if there is a borrow $(OVERFLOW) = 0; # V Flag # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag build postIncrement; } :TSTX.A XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x9 & bow=1 & src16_8_4=0x3 & as=0x0 & postIncrement) ... & XDEST_A_AD ... { setsubflags(XDEST_A_AD,0:$(REG_SIZE),XDEST_A_AD); build postIncrement; } :XORX.B XSRC_B_AS, XDEST_B_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xE & bow=1 & tbl_bzero & postIncrementStore) ... & XSRC_B_AS ... & XDEST_B_AD ... { # Operation Flags... $(OVERFLOW) = ((XDEST_B_AD s< 0x0) && (XSRC_B_AS s< 0x0)) ; # V Flag # Operation... XDEST_B_AD = XDEST_B_AD ^ XSRC_B_AS; build tbl_bzero; # Result Flags... $(SIGN) = (XDEST_B_AD s< 0x0); # S Flag $(ZERO) = (XDEST_B_AD == 0x0); # Z Flag $(CARRY) = (XDEST_B_AD != 0x0); # C Flag build postIncrementStore; } :XORX.W XSRC_W_AS, XDEST_W_AD is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0xE & bow=0 & tbl_wzero & postIncrementStore) ... & XSRC_W_AS ... & XDEST_W_AD ... { # Operation Flags... $(OVERFLOW) = ((XDEST_W_AD s< 0x0) && (XSRC_W_AS s< 0x0)) ; # V Flag # Operation... XDEST_W_AD = XDEST_W_AD ^ XSRC_W_AS; build tbl_wzero; # Result Flags... $(SIGN) = (XDEST_W_AD s< 0x0); # S Flag $(ZERO) = (XDEST_W_AD == 0x0); # Z Flag $(CARRY) = (XDEST_W_AD != 0x0); # C Flag build postIncrementStore; } :XORX.A XSRC_A_AS, XDEST_A_AD is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0xE & bow=1 & postIncrementStore) ... & XSRC_A_AS ... & XDEST_A_AD ... { # Operation Flags... $(OVERFLOW) = ((XDEST_A_AD s< 0x0) && (XSRC_A_AS s< 0x0)) ; # V Flag # Operation... XDEST_A_AD = XDEST_A_AD ^ XSRC_A_AS; XDEST_A_AD = sext(XDEST_A_AD[0,20]); # Result Flags... $(SIGN) = (XDEST_A_AD s< 0x0); # S Flag $(ZERO) = (XDEST_A_AD == 0x0); # Z Flag $(CARRY) = (XDEST_A_AD != 0x0); # C Flag build postIncrementStore; } ############################# # # Single Operand # ############################# # Repeat enabled # Note: The manual says PUSHX doesn't use extension word. The manual is *WRONG* :PUSHX.B XRREG_B_AS is ctx_haveext=4 & ctx_al=1 & op16_12_4=0x1 & op16_8_4=0x2 & bow=0x1 & postRegIncrement & XRREG_B_AS { SP = SP - 0x2; *:1 SP = XRREG_B_AS; #Status bits are not affected build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :PUSHX.W XRREG_W_AS is ctx_haveext=4 & ctx_al=1 & op16_12_4=0x1 & op16_8_4=0x2 & bow=0x0 & postRegIncrement & XRREG_W_AS { SP = SP - 0x2; *:2 SP = XRREG_W_AS; #Status bits are not affected build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :PUSHX.A XRREG_A_AS is ctx_haveext=4 & ctx_al=0 & op16_12_4=0x1 & op16_8_4=0x2 & bow=0x1 & postRegIncrement & XRREG_A_AS { SP = SP - 0x4; *:$(REG_SIZE) SP = XRREG_A_AS; #Status bits are not affected build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :RRAX.B XRREG_B_AS_DEST is ctx_haveext=4 & ctx_al=1 & op16_12_4=0x1 & op16_8_4=0x1 & bow=0x1 & postRegIncrement & XRREG_B_AS_DEST { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Operation... $(CARRY) = (XRREG_B_AS_DEST & 0x1); XRREG_B_AS_DEST = XRREG_B_AS_DEST s>> 1; # Result Flags... $(SIGN) = (XRREG_B_AS_DEST s< 0x0); # S Flag $(ZERO) = (XRREG_B_AS_DEST == 0x0); # Z Flag build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :RRAX.W XRREG_W_AS_DEST is ctx_haveext=4 & ctx_al=1 & op16_12_4=0x1 & op16_8_4=0x1 & bow=0x0 & postRegIncrement & XRREG_W_AS_DEST { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Operation... $(CARRY) = XRREG_W_AS_DEST[0,1]; XRREG_W_AS_DEST = XRREG_W_AS_DEST s>> 1; # Result Flags... $(SIGN) = (XRREG_W_AS_DEST s< 0x0); # S Flag $(ZERO) = (XRREG_W_AS_DEST == 0x0); # Z Flag build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :RRAX.A XRREG_A_AS_DEST is ctx_haveext=4 & ctx_al=0 & op16_12_4=0x1 & op16_8_4=0x1 & bow=0x1 & postRegIncrement & XRREG_A_AS_DEST { $(CARRY) = XRREG_A_AS_DEST[0,1]; XRREG_A_AS_DEST = (XRREG_A_AS_DEST s>> 1); $(OVERFLOW) = 0; $(SIGN) = (XRREG_A_AS_DEST[19,1] != 0); $(ZERO) = (XRREG_A_AS_DEST == 0); build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :RRCX.B XRREG_B_AS_DEST is ctx_haveext=4 & ctx_al=1 & ctx_zc & op16_12_4=0x1 & op16_8_4=0x0 & bow=0x1 & postRegIncrement & XRREG_B_AS_DEST & repeat_carry { # Operation Flags... build repeat_carry; $(OVERFLOW) = ((XRREG_B_AS_DEST != 0x0) && ($(CARRY) == 0x1)); # V Flag # Operation... tmp:1 = $(CARRY); $(CARRY) = (XRREG_B_AS_DEST & 0x1); XRREG_B_AS_DEST = ((tmp << 0x7) | (XRREG_B_AS_DEST >> 0x1)); # Result Flags... $(SIGN) = (XRREG_B_AS_DEST s< 0x0); # S Flag $(ZERO) = (XRREG_B_AS_DEST == 0x0); # Z Flag build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :RRCX.W XRREG_W_AS_DEST is ctx_haveext=4 & ctx_al=1 & op16_12_4=0x1 & op16_8_4=0x0 & bow=0x0 & postRegIncrement & XRREG_W_AS_DEST & repeat_carry { build repeat_carry; # Operation Flags... $(OVERFLOW) = ((XRREG_W_AS_DEST != 0x0) && ($(CARRY) == 0x1)); # V Flag # Operation... tmp:1 = $(CARRY); $(CARRY) = XRREG_W_AS_DEST[0,1]; XRREG_W_AS_DEST = ((zext(tmp) << 0xF) | (XRREG_W_AS_DEST >> 0x1)); # Result Flags... $(SIGN) = (XRREG_W_AS_DEST s< 0x0); # S Flag $(ZERO) = (XRREG_W_AS_DEST == 0x0); # Z Flag build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :RRCX.A XRREG_A_AS_DEST is ctx_haveext=4 & ctx_al=0 & op16_12_4=0x1 & op16_8_4=0x0 & bow=0x1 & postRegIncrement & XRREG_A_AS_DEST & repeat_carry { build repeat_carry; # Operation Flags... $(OVERFLOW) = ((XRREG_A_AS_DEST != 0x0) && ($(CARRY) == 0x1)); # V Flag # Operation... tmp:1 = $(CARRY); $(CARRY) = XRREG_A_AS_DEST[0,1]; XRREG_A_AS_DEST = ((zext(tmp) << 0x13) | ((XRREG_A_AS_DEST >> 0x1) & 0xEFFFF)); XRREG_A_AS_DEST = sext(XRREG_A_AS_DEST[0,20]); # Result Flags... $(SIGN) = (XRREG_A_AS_DEST s< 0x0); # S Flag $(ZERO) = (XRREG_A_AS_DEST == 0x0); # Z Flag build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SWPBX.W XRREG_W_AS_DEST is ctx_haveext=4 & ctx_al=1 & op16_12_4=0x1 & op16_8_4=0x0 & op16_7_1=0x1 & as=0x0 & bow=0x0 & postRegIncrement & XRREG_W_AS_DEST { lowByte:1 = XRREG_W_AS_DEST[0,8]; highByte:1 = XRREG_W_AS_DEST[8,8]; XRREG_W_AS_DEST = (((zext(lowByte)) << 0x8) | zext(highByte)); #Status bits are not affected build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SWPBX.A XRREG_A_AS_DEST2 is ctx_haveext=4 & ctx_al=0 & op16_12_4=0x1 & op16_8_4=0x0 & op16_7_1=0x1 & as=0x0 & bow=0x0 & postRegIncrement & XRREG_A_AS_DEST2 { lowByte:1 = XRREG_A_AS_DEST2[0,8]; highByte:1 = XRREG_A_AS_DEST2[8,8]; XRREG_A_AS_DEST2[8,8] = lowByte; XRREG_A_AS_DEST2[0,8] = highByte; XRREG_A_AS_DEST2 = zext(XRREG_A_AS_DEST2[0,20]); build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SXTX.W XRREG_W_AS_DEST is ctx_haveext=4 & ctx_al=1 & op16_12_4=0x1 & op16_8_4=0x1 & op16_7_1=0x1 & as=0x0 & bow=0x0 & postRegIncrement & XRREG_W_AS_DEST { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Operation... XRREG_W_AS_DEST = sext(XRREG_W_AS_DEST:1); # Result Flags... $(SIGN) = (XRREG_W_AS_DEST s< 0x0); # S Flag $(ZERO) = (XRREG_W_AS_DEST == 0x0); # Z Flag $(CARRY) = (XRREG_W_AS_DEST != 0x0); # C Flag build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } :SXTX.A XRREG_A_AS_DEST2 is ctx_haveext=4 & ctx_al=0 & op16_12_4=0x1 & op16_8_4=0x1 & op16_7_1=0x1 & as=0x0 & bow=0x0 & postRegIncrement & XRREG_A_AS_DEST2 { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Operation... XRREG_A_AS_DEST2 = sext(XRREG_A_AS_DEST2:1); # Result Flags... $(SIGN) = (XRREG_A_AS_DEST2 s< 0x0); # S Flag $(ZERO) = (XRREG_A_AS_DEST2 == 0x0); # Z Flag $(CARRY) = (XRREG_A_AS_DEST2 != 0x0); # C Flag build postRegIncrement; if (CNT == 0) goto inst_next; CNT = CNT - 1; goto ; } ############################# # No Repeat :PUSHX.B XREG_B_AS is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x1 & op16_8_4=0x2 & op16_7_1=0x0 & bow=0x1 & postRegIncrement) ... & XREG_B_AS { SP = SP - 0x2; *:1 SP = XREG_B_AS; #Status bits are not affected build postRegIncrement; } :PUSHX.W XREG_W_AS is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x1 & op16_8_4=0x2 & op16_7_1=0x0 & bow=0x0 & postRegIncrement) ... & XREG_W_AS { SP = SP - 0x2; *:2 SP = XREG_W_AS; #Status bits are not affected build postRegIncrement; } :PUSHX.A XREG_A_AS is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x1 & op16_8_4=0x2 & op16_7_1=0x0 & bow=0x1 & postRegIncrement) ... & XREG_A_AS { SP = SP - 0x4; *:$(REG_SIZE) SP = XREG_A_AS; build postRegIncrement; } :RRAX.B XREG_B_AS_DEST is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x1 & op16_8_4=0x1 & op16_7_1=0x0 & bow=0x1 & postRegIncrement) ... & XREG_B_AS_DEST { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Operation... $(CARRY) = (XREG_B_AS_DEST & 0x1); XREG_B_AS_DEST = XREG_B_AS_DEST s>> 1; # Result Flags... $(SIGN) = (XREG_B_AS_DEST s< 0x0); # S Flag $(ZERO) = (XREG_B_AS_DEST == 0x0); # Z Flag build postRegIncrement; } :RRAX.W XREG_W_AS_DEST is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x1 & op16_8_4=0x1 & op16_7_1=0x0 & bow=0x0 & postRegIncrement) ... & XREG_W_AS_DEST { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag (reset) # Operation... $(CARRY) = XREG_W_AS_DEST[0,1]; XREG_W_AS_DEST = XREG_W_AS_DEST s>> 1; # Result Flags... $(SIGN) = (XREG_W_AS_DEST s< 0x0); # S Flag $(ZERO) = (XREG_W_AS_DEST == 0x0); # Z Flag build postRegIncrement; } :RRAX.A XREG_A_AS_DEST is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x1 & op16_8_4=0x1 & op16_7_1=0x0 & bow=0x1 & postRegIncrement) ... & XREG_A_AS_DEST { $(CARRY) = XREG_A_AS_DEST[0,1]; XREG_A_AS_DEST = (XREG_A_AS_DEST s>> 1); $(OVERFLOW) = 0; $(SIGN) = (XREG_A_AS_DEST[19,1] != 0); $(ZERO) = (XREG_A_AS_DEST == 0); build postRegIncrement; } :RRCX.B XREG_B_AS_DEST is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x1 & op16_8_4=0x0 & op16_7_1=0x0 & bow=0x1 & postRegIncrement) ... & XREG_B_AS_DEST { # Operation Flags... $(OVERFLOW) = ((XREG_B_AS_DEST != 0x0) && ($(CARRY) == 0x1)); # V Flag # Operation... tmp:1 = $(CARRY); $(CARRY) = (XREG_B_AS_DEST & 0x1); XREG_B_AS_DEST = ((tmp << 0x7) | (XREG_B_AS_DEST >> 0x1)); # Result Flags... $(SIGN) = (XREG_B_AS_DEST s< 0x0); # S Flag $(ZERO) = (XREG_B_AS_DEST == 0x0); # Z Flag build postRegIncrement; } :RRCX.W XREG_W_AS_DEST is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x1 & op16_8_4=0x0 & op16_7_1=0x0 & bow=0x0 & postRegIncrement) ... & XREG_W_AS_DEST { # Operation Flags... $(OVERFLOW) = ((XREG_W_AS_DEST != 0x0) && ($(CARRY) == 0x1)); # V Flag # Operation... tmp:1 = $(CARRY); $(CARRY) = XREG_W_AS_DEST[0,1]; XREG_W_AS_DEST = ((zext(tmp) << 0xF) | (XREG_W_AS_DEST >> 0x1)); # Result Flags... $(SIGN) = (XREG_W_AS_DEST s< 0x0); # S Flag $(ZERO) = (XREG_W_AS_DEST == 0x0); # Z Flag build postRegIncrement; } :RRCX.A XREG_A_AS_DEST is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x1 & op16_8_4=0x0 & op16_7_1=0x0 & bow=0x1 & postRegIncrement) ... & XREG_A_AS_DEST { # Operation Flags... $(OVERFLOW) = ((XREG_A_AS_DEST != 0x0) && ($(CARRY) == 0x1)); # V Flag # Operation... tmp:1 = $(CARRY); $(CARRY) = XREG_A_AS_DEST[0,1]; XREG_A_AS_DEST = ((zext(tmp) << 0x13) | ((XREG_A_AS_DEST >> 0x1) & 0xEFFFF)); XREG_A_AS_DEST = sext(XREG_A_AS_DEST[0,20]); # Result Flags... $(SIGN) = (XREG_A_AS_DEST s< 0x0); # S Flag $(ZERO) = (XREG_A_AS_DEST == 0x0); # Z Flag build postRegIncrement; } :SWPBX.W XREG_W_AS_DEST is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x1 & op16_8_4=0x0 & op16_7_1=0x1 & bow=0x0 & postRegIncrement) ... & XREG_W_AS_DEST { lowByte:1 = XREG_W_AS_DEST[0,8]; highByte:1 = XREG_W_AS_DEST[8,8]; XREG_W_AS_DEST = (((zext(lowByte)) << 0x8) | zext(highByte)); #Status bits are not affected build postRegIncrement; } # Yes, for SXTX and SWPB, the normal width selectors are different. Hence, for the A versions, we have a different dest reg subtable. :SWPBX.A XREG_A_AS_DEST2 is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x1 & op16_8_4=0x0 & op16_7_1=0x1 & bow=0x0 & postRegIncrement) ... & XREG_A_AS_DEST2 { lowByte:1 = XREG_A_AS_DEST2[0,8]; highByte:1 = XREG_A_AS_DEST2[8,8]; XREG_A_AS_DEST2[8,8] = lowByte; XREG_A_AS_DEST2[0,8] = highByte; XREG_A_AS_DEST2 = zext(XREG_A_AS_DEST2[0,20]); build postRegIncrement; } :SXTX.W XREG_W_AS_DEST is ctx_haveext=7 & ctx_al=1 & (op16_12_4=0x1 & op16_8_4=0x1 & op16_7_1=0x1 & bow=0x0 & postRegIncrement) ... & XREG_W_AS_DEST { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Operation... XREG_W_AS_DEST = sext(XREG_W_AS_DEST:1); # Result Flags... $(SIGN) = (XREG_W_AS_DEST s< 0x0); # S Flag $(ZERO) = (XREG_W_AS_DEST == 0x0); # Z Flag $(CARRY) = (XREG_W_AS_DEST != 0x0); # C Flag build postRegIncrement; } # Yes, for SXTX and SWPB, the normal width selectors are different. Hence, for the A versions, we have a different dest reg subtable. :SXTX.A XREG_A_AS_DEST2 is ctx_haveext=7 & ctx_al=0 & (op16_12_4=0x1 & op16_8_4=0x1 & op16_7_1=0x1 & bow=0x0 & postRegIncrement) ... & XREG_A_AS_DEST2 { # Operation Flags... $(OVERFLOW) = 0x0; # V Flag # Operation... XREG_A_AS_DEST2 = sext(XREG_A_AS_DEST2:1); # Result Flags... $(SIGN) = (XREG_A_AS_DEST2 s< 0x0); # S Flag $(ZERO) = (XREG_A_AS_DEST2 == 0x0); # Z Flag $(CARRY) = (XREG_A_AS_DEST2 != 0x0); # C Flag build postRegIncrement; } ================================================ FILE: pypcode/processors/TI_MSP430/data/languages/TI_MSP430.cspec ================================================ ================================================ FILE: pypcode/processors/TI_MSP430/data/languages/TI_MSP430.dwarf ================================================ ================================================ FILE: pypcode/processors/TI_MSP430/data/languages/TI_MSP430.ldefs ================================================ TI MSP430 16-Bit MicroController TI MSP430X 20-Bit MicroController ================================================ FILE: pypcode/processors/TI_MSP430/data/languages/TI_MSP430.pspec ================================================ ================================================ FILE: pypcode/processors/TI_MSP430/data/languages/TI_MSP430.slaspec ================================================ @define ENDIAN "little" @define REG_SIZE "2" @include "TI430Common.sinc" ================================================ FILE: pypcode/processors/TI_MSP430/data/languages/TI_MSP430X.cspec ================================================ ================================================ FILE: pypcode/processors/TI_MSP430/data/languages/TI_MSP430X.dwarf ================================================ ================================================ FILE: pypcode/processors/TI_MSP430/data/languages/TI_MSP430X.slaspec ================================================ @define ENDIAN "little" @define REG_SIZE "4" @include "TI430Common.sinc" @include "TI430X.sinc" ================================================ FILE: pypcode/processors/TI_MSP430/data/languages/ti_msp430.opinion ================================================ ================================================ FILE: pypcode/processors/TI_MSP430/data/manuals/MSP430.idx ================================================ @MSP430.pdf[TI MSP430x2xx Family User's Guide, 2008, SLAU144E] ADC, 177 ADD, 178 ADDC, 179 AND, 180 BIC, 181 BIS, 182 BIT, 183 BR, 184 BRANCH, 184 CALL, 185 CLR, 186 CLRC, 187 CLRN, 188 CLRZ, 189 CMP, 190 DADC, 191 DADD, 192 DEC, 193 DECD, 194 DINT, 195 EINT, 196 INC, 197 INCD, 198 INV, 199 JC, 200 JHS, 200 JEQ, 201 JZ, 201 JGE, 202 JL, 203 JMP, 204 JN, 205 JNC, 206 JLO, 206 JNZ, 207 JNE, 207 MOV, 208 NOP, 209 POP, 210 PUSH, 211 RET, 212 RETI, 213 RLA, 214 RLC, 215 RRA, 216 RRC, 217 SBC, 218 SETC, 219 SETN, 220 SETZ, 221 SUB, 222 SUBC, 223 SWPB, 224 SXT, 225 TST, 226 XOR, 227 ADCX, 229 ADDX, 230 ADDCX, 231 ANDX, 232 BICX, 233 BISX, 234 BITX, 235 CLRX, 236 CMPX, 237 DADCX, 238 DADDX, 239 DECX, 240 DECDX, 241 INCX, 242 INCDX, 243 INVX, 244 MOVX, 245 POPM, 247 PUSHM, 248 POPX, 249 PUSHX, 250 RLAM, 251 RLAX, 252 RLCX, 253 RRAM, 254 RRAX, 255 RRCM, 257 RRCX, 258 RRUM, 260 RRUX, 261 SBCX, 262 SUBX, 263 SUBCX, 264 SWPBX, 265 SXTX, 267 TSTX, 269 XORX, 270 ADDA, 272 BRA, 273 CALLA, 275 CLRA, 277 CMPA, 278 DECDA, 279 INCDA, 280 MOVA, 281 RETA, 283 TSTA, 284 SUBA, 285 ================================================ FILE: pypcode/processors/Toy/data/languages/old/ToyV00BE64.lang ================================================ Toy:BE:64:default Toy default 64 ================================================ FILE: pypcode/processors/Toy/data/languages/old/ToyV0BE64.trans ================================================ Toy:BE:64:default Toy:BE:64:default ================================================ FILE: pypcode/processors/Toy/data/languages/old/ToyV0LE64.lang ================================================ Toy:LE:64:default Toy default 64 ================================================ FILE: pypcode/processors/Toy/data/languages/old/ToyV0LE64.trans ================================================ Toy:LE:64:default Toy:LE:64:default ================================================ FILE: pypcode/processors/Toy/data/languages/old/v01stuff/toy.cspec ================================================ ================================================ FILE: pypcode/processors/Toy/data/languages/old/v01stuff/toy.ldefs_v01 ================================================ Toy (test) processor 64-bit big-endian Toy (test) processor 64-bit little-endian ================================================ FILE: pypcode/processors/Toy/data/languages/old/v01stuff/toy.sinc ================================================ # Main slaspec must define endianness and alignment @ifndef WORDSIZE @define WORDSIZE "1" @endif define space ROM type=ram_space size=$(SIZE) wordsize=$(WORDSIZE) default; define space register type=register_space size=2; define register offset=0x1000 size=$(SIZE) [ a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 sp lr pc ]; # STATUS REGISTER MAP: (LOW) # C - CARRY # Z - ZERO # N - NEGATIVE # V - OVERFLOW define register offset=0x1100 size=1 [ C Z N V ]; ================================================ FILE: pypcode/processors/Toy/data/languages/old/v01stuff/toy64.cspec ================================================ ================================================ FILE: pypcode/processors/Toy/data/languages/old/v01stuff/toyInstructions.sinc ================================================ #### immediates # 0iii dddd iiii iiii # imm rd, i load 11-bit unsigned int into rd # 10nn dddd nnnn nnnn # simm rd, n load 10-bit signed int into rd # #### arithmetic # 1100 0000 ssss tttt # add rs, rt rs = rs + rt # 1100 0001 ssss tttt # sub rs, rt rs = rs - rt # 1100 0010 ssss tttt # rsub rs, rt rs = rt - rs # 1100 0011 ssss tttt # mul rs, rt rs = rs * rt # 1100 0100 ssss tttt # div rs, rt rs = rs / rt # 1100 0101 ssss tttt # mod rs, rt rs = rs % rt # 1100 0110 ssss tttt # cmp rs, rt (set status only based on signed compare (rs - rt)) # 1100 0111 ssss tttt # ucmp rs, rt (set status only based on unsigned compare (rs - rt)) # # 1100 1000 ssss nnnn # add rs, n rs = rs + n # 1100 1001 ssss nnnn # sub rs, n rs = rs - n # 1100 1010 ssss nnnn # rsub rs, n rs = n - rs # 1100 1011 ssss nnnn # mul rs, n rs = rs * n # 1100 1100 ssss nnnn # div rs, n rs = rs / n # 1100 1101 ssss iiii # mod rs, i rs = rs % i # 1100 1110 ssss nnnn # cmp rs, n (set status only based on signed compare (rs - n)) # 1100 1111 ssss iiii # ucmp rs, i (set status only based on unsigned compare (rs - i)) # #### logic # 1101 0000 ssss tttt # and rs, rt rs = rs & rt # 1101 0001 ssss tttt # or rs, rt rs = rs | rt # 1101 0010 ssss tttt # xor rs, rt rs = rs ^ rt # 1101 0011 ssss tttt # lsr rs, rt rs = rs >> rt # 1101 0100 ssss tttt # asr rs, rt rs = rs s>> rt # 1101 0101 ssss tttt # lsl rs, rt rs = rs << rt # 1101 1000 ssss tttt # saa rs, rt rs = (rs << 11) | rt # # 1101 1011 ssss iiii # lsr rs, i rs = rs >> i # 1101 1100 ssss iiii # asr rs, i rs = rs s>> i # 1101 1101 ssss iiii # lsl rs, i rs = rs << i # 1101 1110 ssss 0000 # inv rs rs = ~rs # 1101 1110 ssss 0001 # neg rs rs = -rs # #### memory # 1101 0110 ssss tttt # load rs, [rt] rs = [rt] # 1101 0111 ssss tttt # store [rs], rt [rs] = rt # 1101 1111 ssss tttt # mov rs, rt rs = rt # #### flow # 1110 nnnn nnnn 0ccc # brcc n if ccc goto pc + n # 1110 nnnn nnnn 1ccc # brdscc n if ccc goto pc + n with delay slot # 1111 0000 ssss 0ccc # brcc rs if ccc goto rs & ~1 # 1111 0001 ssss 0ccc # brdscc rs if ccc goto rs & ~1 with delay slot # 1111 0010 ssss 0000 # push rs push rs # 1111 0011 ssss 0000 # pop rs pop rs # 1111 0100 0000 0000 # ret return # 1111 0101 nnnn nnnn # callds n call n with delay slot # 1111 0110 ssss 0000 # call rs call rs # 1111 1nnn nnnn nnnn # call n call n # #### RESERVED # 1101 1001 xxxx xxxx # RESERVED BANK # 1101 1010 xxxx xxxx # RESERVED BANK # 1111 0111 xxxx xxxx # RESERVED BANK define token instr(16) op1515 = (15, 15) op1415 = (14, 15) op1215 = (12, 15) op1111 = (11, 11) op0811 = (8, 11) op0007 = (0, 7) op0003 = (0, 3) op0303 = (3, 3) rd = (8, 11) rs = (4, 7) rt = (0, 3) imm1214 = (12, 14) imm0007 = (0, 7) imm0003 = (0, 3) simm1213 = (12, 13) signed simm0010 = (0, 10) signed simm0411 = (4, 11) signed simm0007 = (0, 7) signed simm0003 = (0, 3) signed cc0911 = (9, 11) cc0002 = (0, 2) ; attach variables [ rd rs rt ] [ a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 sp lr pc ]; # addressing mode subs Simm4: "#"^simm0003 is simm0003 { export *[const]:$(SIZE) simm0003; } Simm10: "#"^computed is simm1213 & imm0007 [ computed = (simm1213 << 8) | imm0007; ] { export *[const]:$(SIZE) computed; } Imm4: "#"^imm0003 is imm0003 { export *[const]:$(SIZE) imm0003; } Imm11: "#"^computed is imm1214 & imm0007 [ computed = (imm1214 << 8) | imm0007; ] { export *[const]:$(SIZE) computed; } Rel8: addr is simm0007 [ addr = inst_start + simm0007; ] { export *:$(SIZE) addr; } Rel82: addr is simm0411 [ addr = inst_start + simm0411; ] { export *:$(SIZE) addr; } Rel11: addr is simm0010 [ addr = inst_start + simm0010; ] { export *:$(SIZE) addr; } RS: [rs] is rs { export *[ROM]:$(SIZE) rs; } RT: [rt] is rt { export *[ROM]:$(SIZE) rt; } CC: "eq" is cc0002=0x0 { export Z; } CC: "ne" is cc0002=0x1 { tmp = !Z; export tmp; } CC: "lt" is cc0002=0x2 { tmp = N != V; export tmp; } CC: "le" is cc0002=0x3 { tmp = Z || (N != V); export tmp; } CC: "lo" is cc0002=0x4 { export C; } CC: "mi" is cc0002=0x5 { export N; } CC: "vs" is cc0002=0x6 { export V; } CC: "" is cc0002=0x7 { export 1:1; } COND: CC is CC { if (!CC) goto inst_next; } COND: CC is CC & cc0002=0x7 { } # unconditional macro resultflags(result) { N = result s< 0; Z = result == 0; } macro addflags(a, b) { C = carry(a, b); V = scarry(a, b); } macro subflags(a, b) { C = a s< b; V = sborrow(a, b); } macro logicflags() { C = 0; V = 0; } # operations :imm rd, Imm11 is $(INSTR_PHASE) op1515=0x0 & rd & Imm11 { logicflags(); rd = Imm11; resultflags(rd); } :simm rd, Simm10 is $(INSTR_PHASE) op1415=0x2 & rd & Simm10 { logicflags(); rd = Simm10; resultflags(rd); } :add rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x0 & rs & rt { addflags(rs, rt); rs = rs + rt; resultflags(rs); } :sub rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x1 & rs & rt { subflags(rs, rt); rs = rs - rt; resultflags(rs); } :rsub rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x2 & rs & rt { subflags(rt, rs); rs = rt - rs; resultflags(rs); } :mul rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x3 & rs & rt { rs = rs * rt; resultflags(rs); } # fix C & V :div rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x4 & rs & rt { rs = rs / rt; resultflags(rs); } # fix C & V :mod rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x5 & rs & rt { rs = rs % rt; resultflags(rs); } # fix C & V :cmp rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x6 & rs & rt { subflags(rs, rt); tmp:$(SIZE) = rs - rt; resultflags(tmp); } :ucmp rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x7 & rs & rt { logicflags(); N = rs < rt; Z = rs == rt; } :add rs, Simm4 is $(INSTR_PHASE) op1215=0xc & op0811=0x8 & rs & Simm4 { addflags(rs, Simm4); rs = rs + Simm4; resultflags(rs); } :sub rs, Simm4 is $(INSTR_PHASE) op1215=0xc & op0811=0x9 & rs & Simm4 { subflags(rs, Simm4); rs = rs - Simm4; resultflags(rs); } :rsub rs, Simm4 is $(INSTR_PHASE) op1215=0xc & op0811=0xa & rs & Simm4 { subflags(Simm4, rs); rs = Simm4 - rs; resultflags(rs); } :mul rs, Simm4 is $(INSTR_PHASE) op1215=0xc & op0811=0xb & rs & Simm4 { rs = rs * Simm4; resultflags(rs); } # fix C & V :div rs, Simm4 is $(INSTR_PHASE) op1215=0xc & op0811=0xc & rs & Simm4 { rs = rs / Simm4; resultflags(rs); } # fix C & V :mod rs, Imm4 is $(INSTR_PHASE) op1215=0xc & op0811=0xd & rs & Imm4 { rs = rs % Imm4; resultflags(rs); } # fix C & V :cmp rs, Simm4 is $(INSTR_PHASE) op1215=0xc & op0811=0xe & rs & Simm4 { subflags(rs, Simm4); tmp:$(SIZE) = rs - Simm4; resultflags(tmp); } :ucmp rs, Imm4 is $(INSTR_PHASE) op1215=0xc & op0811=0xf & rs & Imm4 { logicflags(); N = rs < Imm4; Z = rs == Imm4; } :and rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x0 & rs & rt { logicflags(); rs = rs & rt; resultflags(rs); } :or rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x1 & rs & rt { logicflags(); rs = rs | rt; resultflags(rs); } :xor rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x2 & rs & rt { logicflags(); rs = rs ^ rt; resultflags(rs); } :lsr rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x3 & rs & rt { logicflags(); rs = rs >> rt; resultflags(rs); } :asr rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x4 & rs & rt { logicflags(); rs = rs s>> rt; resultflags(rs); } :lsl rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x5 & rs & rt { logicflags(); rs = rs << rt; resultflags(rs); } # saa == shift and accumulate; very useful for building up a 32 bit or 64 bit value in pieces, like: # imm r12, #32b # imm r11, #7d7 # saa r12, r11 # imm r11, #2be # saa r12, r11 # # now r12 contains '0xcafebabe' (decompiler reconstitutes the pieces seamlessly) :saa rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x8 & rs & rt { logicflags(); rs = (rs << 11) | rt; resultflags(rs); } :lsr rs, Imm4 is $(INSTR_PHASE) op1215=0xd & op0811=0xb & rs & Imm4 { logicflags(); rs = rs >> Imm4; resultflags(rs); } :asr rs, Imm4 is $(INSTR_PHASE) op1215=0xd & op0811=0xc & rs & Imm4 { logicflags(); rs = rs s>> Imm4; resultflags(rs); } :lsl rs, Imm4 is $(INSTR_PHASE) op1215=0xd & op0811=0xd & rs & Imm4 { logicflags(); rs = rs << Imm4; resultflags(rs); } # fix C & V :inv rs is $(INSTR_PHASE) op1215=0xd & op0811=0xe & rs & op0003=0x0 { logicflags(); rs = ~rs; resultflags(rs); } :neg rs is $(INSTR_PHASE) op1215=0xd & op0811=0xe & rs & op0003=0x1 { logicflags(); rs = -rs; resultflags(rs); } :load rs, RT is $(INSTR_PHASE) op1215=0xd & op0811=0x6 & rs & RT { rs = RT; logicflags(); resultflags(rs); } :store RS, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x7 & RS & rt { RS = rt; logicflags(); resultflags(rt); } :mov rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0xf & rs & rt { rs = rt; logicflags(); resultflags(rs); } :br^COND Rel82 is $(INSTR_PHASE) op1215=0xe & op0303=0x0 & COND & Rel82 { build COND; goto Rel82; } :brds^COND Rel82 is $(INSTR_PHASE) op1215=0xe & op0303=0x1 & COND & Rel82 { build COND; delayslot(1); goto Rel82; } :br^COND rs is $(INSTR_PHASE) op1215=0xf & op0811=0x0 & COND & rs & op0303=0x0 { build COND; goto [rs]; } :brds^COND rs is $(INSTR_PHASE) op1215=0xf & op0811=0x1 & COND & rs & op0303=0x0 { build COND; delayslot(1); goto [rs]; } @ifdef POS_STACK :push rs is $(INSTR_PHASE) op1215=0xf & op0811=0x2 & rs & op0003=0x0 { *[ROM]:$(SIZE) sp = rs; sp = sp + $(SIZE); logicflags(); resultflags(rs); } :pop rs is $(INSTR_PHASE) op1215=0xf & op0811=0x3 & rs & op0003=0x0 { sp = sp - $(SIZE); rs = *[ROM]:$(SIZE) sp; logicflags(); resultflags(rs); } @else :push rs is $(INSTR_PHASE) op1215=0xf & op0811=0x2 & rs & op0003=0x0 { *[ROM]:$(SIZE) sp = rs; sp = sp - $(SIZE); logicflags(); resultflags(rs); } :pop rs is $(INSTR_PHASE) op1215=0xf & op0811=0x3 & rs & op0003=0x0 { sp = sp + $(SIZE); rs = *[ROM]:$(SIZE) sp; logicflags(); resultflags(rs); } @endif :ret is $(INSTR_PHASE) op1215=0xf & op0811=0x4 & op0007=0x0 { return [lr]; } :callds Rel8 is $(INSTR_PHASE) op1215=0xf & op0811=0x5 & Rel8 { delayslot(1); lr = inst_next; call Rel8; } :call rs is $(INSTR_PHASE) op1215=0xf & op0811=0x6 & rs & op0003=0x0 { lr = inst_next; call [rs]; } :call Rel11 is $(INSTR_PHASE) op1215=0xf & op1111=0x1 & Rel11 { lr = inst_next; call Rel11; } ================================================ FILE: pypcode/processors/Toy/data/languages/old/v01stuff/toyPosStack.cspec ================================================ ================================================ FILE: pypcode/processors/Toy/data/languages/toy.cspec ================================================ ================================================ FILE: pypcode/processors/Toy/data/languages/toy.ldefs ================================================ Toy (test) processor 32-bit big-endian Toy (test) processor 32-bit big-endian Toy (test) processor 32-bit little-endian Toy (test) processor 32-bit big-endian (wordsize=2) Toy (test) processor 32-bit little-endian (wordsize=2) Toy (test) processor 64-bit big-endian Toy (test) processor 64-bit big-endian Harvard Toy (test) processor 64-bit big-endian Harvard Toy (test) processor 64-bit little-endian Toy (test-builder) processor 32-bit big-endian Toy (test-builder) processor 32-bit little-endian Toy (test-builder) processor 32-bit big-endian word-aligned Toy (test-builder) processor 32-bit little-endian word-aligned ================================================ FILE: pypcode/processors/Toy/data/languages/toy.pspec ================================================ ================================================ FILE: pypcode/processors/Toy/data/languages/toy.sinc ================================================ # Main slaspec must define ENDIAN and ALIGN @ifndef WORDSIZE @define WORDSIZE "1" @endif @ifndef ALIGN @define ALIGN "1" @endif @ifndef ALREADY_ENDIAN_ALIGN define endian=$(ENDIAN); define alignment=$(ALIGN); @endif define space ram type=ram_space size=$(SIZE) wordsize=$(WORDSIZE) default; define space register type=register_space size=2; define register offset=0x1000 size=$(SIZE) [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc ]; define ram offset=0x0 size=$(SIZE) [ mmr0 ]; # STATUS REGISTER MAP: (LOW) # C - CARRY # Z - ZERO # N - NEGATIVE # V - OVERFLOW define register offset=0x1100 size=1 [ C Z N V ]; @if SIZE == "4" @define HALFSIZE "2" @endif @if SIZE == "8" @define HALFSIZE "4" @endif @if ENDIAN == "little" define register offset=0x1000 size=$(HALFSIZE) [ r0l r0h r1l r1h r2l r2h r3l r3h r4l r4h r5l r5h r6l r6h r7l r7h r8l r8h r9l r9h r10l r10h r11l r11h r12l r12h spl sph lrl lrh pcl pch ]; @else # ENDIAN == "big" define register offset=0x1000 size=$(HALFSIZE) [ r0h r0l r1h r1l r2h r2l r3h r3l r4h r4l r5h r5l r6h r6l r7h r7l r8h r8l r9h r9l r10h r10l r11h r11l r12h r12l sph spl lrh lrl pch pcl ]; @endif # ENDIAN ================================================ FILE: pypcode/processors/Toy/data/languages/toy64-long8.cspec ================================================ ================================================ FILE: pypcode/processors/Toy/data/languages/toy64.cspec ================================================ ================================================ FILE: pypcode/processors/Toy/data/languages/toy64_be.slaspec ================================================ @define ENDIAN "big" @define SIZE "8" @define INSTR_PHASE "" # not used by basic toy language @define DATA_SPACE "ram" @include "toy.sinc" @include "toyInstructions.sinc" ================================================ FILE: pypcode/processors/Toy/data/languages/toy64_be_harvard.slaspec ================================================ @define ENDIAN "big" @define SIZE "8" @define INSTR_PHASE "" # not used by basic toy language @define DATA_SPACE "data" @include "toy.sinc" define space data type=ram_space size=$(SIZE) wordsize=$(WORDSIZE); @include "toyInstructions.sinc" ================================================ FILE: pypcode/processors/Toy/data/languages/toy64_be_harvard_rev.slaspec ================================================ @define ENDIAN "big" @define SIZE "8" @define WORDSIZE "1" @define ALIGN "1" @define INSTR_PHASE "" # not used by basic toy language @define DATA_SPACE "data" @define ALREADY_ENDIAN_ALIGN define endian=$(ENDIAN); define alignment=$(ALIGN); define space data type=ram_space size=$(SIZE) wordsize=$(WORDSIZE); @include "toy.sinc" @include "toyInstructions.sinc" ================================================ FILE: pypcode/processors/Toy/data/languages/toy64_le.slaspec ================================================ @define ENDIAN "little" @define SIZE "8" @define INSTR_PHASE "" # not used by basic toy language @define DATA_SPACE "ram" @include "toy.sinc" @include "toyInstructions.sinc" ================================================ FILE: pypcode/processors/Toy/data/languages/toyInstructions.sinc ================================================ #### load immediate 0000 000x xxxx xxxx # 00ii dddd iiii iiii # imm rd, i load 10-bit unsigned int into rd # 01nn dddd nnnn nnnn # simm rd, n load 10-bit signed int into rd # #### misc 100x xxxx xxxx xxxx # 1000 0000 0000 0ccc # skcc if ccc goto inst_next2 (conditional skip instruction) # #### arithmetic 1100 xxxx # 1100 0000 ssss tttt # add rs, rt rs = rs + rt # 1100 0001 ssss tttt # sub rs, rt rs = rs - rt # 1100 0010 ssss tttt # rsub rs, rt rs = rt - rs # 1100 0011 ssss tttt # mul rs, rt rs = rs * rt # 1100 0100 ssss tttt # div rs, rt rs = rs / rt # 1100 0101 ssss tttt # mod rs, rt rs = rs % rt # 1100 0110 ssss tttt # cmp rs, rt (set status only based on signed compare (rs - rt)) # 1100 0111 ssss tttt # ucmp rs, rt (set status only based on unsigned compare (rs - rt)) # # 1100 1000 ssss nnnn # add rs, n rs = rs + n # 1100 1001 ssss nnnn # sub rs, n rs = rs - n # 1100 1010 ssss nnnn # rsub rs, n rs = n - rs # 1100 1011 ssss nnnn # mul rs, n rs = rs * n # 1100 1100 ssss nnnn # div rs, n rs = rs / n # 1100 1101 ssss iiii # mod rs, i rs = rs % i # 1100 1110 ssss nnnn # cmp rs, n (set status only based on signed compare (rs - n)) # 1100 1111 ssss iiii # ucmp rs, i (set status only based on unsigned compare (rs - i)) # #### logic # 1101 0000 ssss tttt # and rs, rt rs = rs & rt # 1101 0001 ssss tttt # or rs, rt rs = rs | rt # 1101 0010 ssss tttt # xor rs, rt rs = rs ^ rt # 1101 0011 ssss tttt # lsr rs, rt rs = rs >> rt # 1101 0100 ssss tttt # asr rs, rt rs = rs s>> rt # 1101 0101 ssss tttt # lsl rs, rt rs = rs << rt # 1101 1000 ssss tttt # saa rs, rt rs = (rs << 11) | rt # # 1101 1011 ssss iiii # lsr rs, i rs = rs >> i # 1101 1100 ssss iiii # asr rs, i rs = rs s>> i # 1101 1101 ssss iiii # lsl rs, i rs = rs << i # 1101 1110 ssss 0000 # inv rs rs = ~rs # 1101 1110 ssss 0001 # neg rs rs = -rs # #### memory # 1101 0110 ssss tttt # load rs, [rt] rs = [rt] # 1101 0111 ssss tttt # store [rs], rt [rs] = rt # 1101 1111 ssss tttt # mov rs, rt rs = rt #### flow # 1110 nnnn nnnn 0ccc # brcc n if ccc goto pc + n # 1110 nnnn nnnn 1ccc # brdscc n if ccc goto pc + n with delay slot # 1111 0000 ssss 0ccc # brcc rs if ccc goto rs & ~1 # 1111 0001 ssss 0ccc # brdscc rs if ccc goto rs & ~1 with delay slot # 1111 0010 ssss 0000 # push rs push rs # 1111 0011 ssss 0000 # pop rs pop rs # 1111 0100 0000 0000 # ret return # 1111 0101 nnnn nnnn # callds n call n with delay slot # 1111 0110 ssss 0000 # call rs call rs # 1111 0110 ssss 1ccc # call rs if ccc call rs # 1111 1nnn nnnn nnnn # call n call n # #### user-defined 1010 xxxx # 1010 0010 ssss 0000 # user_one rs user_one rs # 1010 0010 ssss 0000 # user_two rs user_two rs # 1010 0011 0000 0000 # user_three user_three # 1010 0100 ssss tttt # user_four rs rt user_four rs rt # 1010 0101 nnnn nnnn # user_five n user_five n # 1010 0110 ssss 0000 # user_six rs user_six rs # 1010 1000 0000 0000 # unimpl # #### RESERVED / UNUSED # 1011 xxxx xxxx xxxx # UNUSED # 1101 1001 xxxx xxxx # RESERVED BANK (consumed by toy_builder.sinc) # 1101 1010 xxxx xxxx # RESERVED BANK (consumed by toy_builder.sinc) # 1111 0111 xxxx xxxx # RESERVED BANK (consumed by toy_builder.sinc) define token instr(16) op1515 = (15, 15) op1415 = (14, 15) op1215 = (12, 15) op1111 = (11, 11) op0811 = (8, 11) op0407 = (4, 7) op0007 = (0, 7) op0003 = (0, 3) op0303 = (3, 3) rd = (8, 11) rs = (4, 7) rt = (0, 3) imm1213 = (12, 13) imm0007 = (0, 7) imm0003 = (0, 3) simm1213 = (12, 13) signed simm0010 = (0, 10) signed simm0411 = (4, 11) signed simm0007 = (0, 7) signed simm0003 = (0, 3) signed cc0911 = (9, 11) cc0002 = (0, 2) ; attach variables [ rd rs rt ] [ r0 r1 r2 r3 r4 r5 r6 r7 r8 r9 r10 r11 r12 sp lr pc ]; # addressing mode subs Simm4: "#"^simm0003 is simm0003 { export *[const]:$(SIZE) simm0003; } Simm10: "#"^computed is simm1213 & imm0007 [ computed = (simm1213 << 8) | imm0007; ] { export *[const]:$(SIZE) computed; } Imm4: "#"^imm0003 is imm0003 { export *[const]:$(SIZE) imm0003; } Imm10: "#"^computed is imm1213 & imm0007 [ computed = (imm1213 << 8) | imm0007; ] { export *[const]:$(SIZE) computed; } Rel8: addr is simm0007 [ addr = inst_start + simm0007; ] { export *:$(SIZE) addr; } Rel82: addr is simm0411 [ addr = inst_start + simm0411; ] { export *:$(SIZE) addr; } Rel11: addr is simm0010 [ addr = inst_start + simm0010; ] { export *:$(SIZE) addr; } RS: [rs] is rs { export *[$(DATA_SPACE)]:$(SIZE) rs; } RT: [rt] is rt { export *[$(DATA_SPACE)]:$(SIZE) rt; } CC: "eq" is cc0002=0x0 { export Z; } CC: "ne" is cc0002=0x1 { tmp = !Z; export tmp; } CC: "lt" is cc0002=0x2 { tmp = N != V; export tmp; } CC: "le" is cc0002=0x3 { tmp = Z || (N != V); export tmp; } CC: "lo" is cc0002=0x4 { export C; } CC: "mi" is cc0002=0x5 { export N; } CC: "vs" is cc0002=0x6 { export V; } CC: "" is cc0002=0x7 { export 1:1; } COND: CC is CC { if (!CC) goto inst_next; } COND: CC is CC & cc0002=0x7 { } # unconditional macro resultflags(result) { N = result s< 0; Z = result == 0; } macro addflags(a, b) { C = carry(a, b); V = scarry(a, b); } macro subflags(a, b) { C = a s< b; V = sborrow(a, b); } macro logicflags() { C = 0; V = 0; } define pcodeop pcodeop_one; define pcodeop pcodeop_two; define pcodeop pcodeop_three; # operations :imm rd, Imm10 is $(INSTR_PHASE) op1515=0x0 & rd & Imm10 { logicflags(); rd = Imm10; resultflags(rd); } :simm rd, Simm10 is $(INSTR_PHASE) op1415=0x2 & rd & Simm10 { logicflags(); rd = Simm10; resultflags(rd); } :add rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x0 & rs & rt { addflags(rs, rt); rs = rs + rt; resultflags(rs); } :sub rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x1 & rs & rt { subflags(rs, rt); rs = rs - rt; resultflags(rs); } :rsub rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x2 & rs & rt { subflags(rt, rs); rs = rt - rs; resultflags(rs); } :mul rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x3 & rs & rt { rs = rs * rt; resultflags(rs); } # fix C & V :div rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x4 & rs & rt { rs = rs / rt; resultflags(rs); } # fix C & V :mod rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x5 & rs & rt { rs = rs % rt; resultflags(rs); } # fix C & V :cmp rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x6 & rs & rt { subflags(rs, rt); tmp:$(SIZE) = rs - rt; resultflags(tmp); } :ucmp rs, rt is $(INSTR_PHASE) op1215=0xc & op0811=0x7 & rs & rt { logicflags(); N = rs < rt; Z = rs == rt; } :add rs, Simm4 is $(INSTR_PHASE) op1215=0xc & op0811=0x8 & rs & Simm4 { addflags(rs, Simm4); rs = rs + Simm4; resultflags(rs); } :sub rs, Simm4 is $(INSTR_PHASE) op1215=0xc & op0811=0x9 & rs & Simm4 { subflags(rs, Simm4); rs = rs - Simm4; resultflags(rs); } :rsub rs, Simm4 is $(INSTR_PHASE) op1215=0xc & op0811=0xa & rs & Simm4 { subflags(Simm4, rs); rs = Simm4 - rs; resultflags(rs); } :mul rs, Simm4 is $(INSTR_PHASE) op1215=0xc & op0811=0xb & rs & Simm4 { rs = rs * Simm4; resultflags(rs); } # fix C & V :div rs, Simm4 is $(INSTR_PHASE) op1215=0xc & op0811=0xc & rs & Simm4 { rs = rs / Simm4; resultflags(rs); } # fix C & V :mod rs, Imm4 is $(INSTR_PHASE) op1215=0xc & op0811=0xd & rs & Imm4 { rs = rs % Imm4; resultflags(rs); } # fix C & V :cmp rs, Simm4 is $(INSTR_PHASE) op1215=0xc & op0811=0xe & rs & Simm4 { subflags(rs, Simm4); tmp:$(SIZE) = rs - Simm4; resultflags(tmp); } :ucmp rs, Imm4 is $(INSTR_PHASE) op1215=0xc & op0811=0xf & rs & Imm4 { logicflags(); N = rs < Imm4; Z = rs == Imm4; } :and rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x0 & rs & rt { logicflags(); rs = rs & rt; resultflags(rs); } :or rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x1 & rs & rt { logicflags(); rs = rs | rt; resultflags(rs); } :xor rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x2 & rs & rt { logicflags(); rs = rs ^ rt; resultflags(rs); } :lsr rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x3 & rs & rt { logicflags(); rs = rs >> rt; resultflags(rs); } :asr rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x4 & rs & rt { logicflags(); rs = rs s>> rt; resultflags(rs); } :lsl rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x5 & rs & rt { logicflags(); rs = rs << rt; resultflags(rs); } # saa == shift and accumulate; very useful for building up a 32 bit or 64 bit value in pieces, like: # imm r12, #32b # imm r11, #7d7 # saa r12, r11 # imm r11, #2be # saa r12, r11 # # now r12 contains '0xcafebabe' (decompiler reconstitutes the pieces seamlessly) :saa rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x8 & rs & rt { logicflags(); rs = (rs << 11) | rt; resultflags(rs); } :lsr rs, Imm4 is $(INSTR_PHASE) op1215=0xd & op0811=0xb & rs & Imm4 { logicflags(); rs = rs >> Imm4; resultflags(rs); } :asr rs, Imm4 is $(INSTR_PHASE) op1215=0xd & op0811=0xc & rs & Imm4 { logicflags(); rs = rs s>> Imm4; resultflags(rs); } :lsl rs, Imm4 is $(INSTR_PHASE) op1215=0xd & op0811=0xd & rs & Imm4 { logicflags(); rs = rs << Imm4; resultflags(rs); } # fix C & V :inv rs is $(INSTR_PHASE) op1215=0xd & op0811=0xe & rs & op0003=0x0 { logicflags(); rs = ~rs; resultflags(rs); } :neg rs is $(INSTR_PHASE) op1215=0xd & op0811=0xe & rs & op0003=0x1 { logicflags(); rs = -rs; resultflags(rs); } :load rs, RT is $(INSTR_PHASE) op1215=0xd & op0811=0x6 & rs & RT { rs = RT; logicflags(); resultflags(rs); } :store RS, rt is $(INSTR_PHASE) op1215=0xd & op0811=0x7 & RS & rt { RS = rt; logicflags(); resultflags(rt); } :mov rs, rt is $(INSTR_PHASE) op1215=0xd & op0811=0xf & rs & rt { rs = rt; logicflags(); resultflags(rs); } :sk^CC is $(INSTR_PHASE) op1215=0x8 & op0811=0x0 & op0407=0x0 & op0303=0x0 & CC & Rel82 { if (CC) goto inst_next2; } :br^COND Rel82 is $(INSTR_PHASE) op1215=0xe & op0303=0x0 & COND & Rel82 { build COND; goto Rel82; } :brds^COND Rel82 is $(INSTR_PHASE) op1215=0xe & op0303=0x1 & COND & Rel82 { build COND; delayslot(1); goto Rel82; } :br^COND rs is $(INSTR_PHASE) op1215=0xf & op0811=0x0 & COND & rs & op0303=0x0 { build COND; goto [rs]; } :brds^COND rs is $(INSTR_PHASE) op1215=0xf & op0811=0x1 & COND & rs & op0303=0x0 { build COND; temp:$(SIZE) = rs; delayslot(1); goto [temp]; } @ifdef POS_STACK :push rs is $(INSTR_PHASE) op1215=0xf & op0811=0x2 & rs & op0003=0x0 { *[ram]:$(SIZE) sp = rs; sp = sp + $(SIZE); logicflags(); resultflags(rs); } :pop rs is $(INSTR_PHASE) op1215=0xf & op0811=0x3 & rs & op0003=0x0 { sp = sp - $(SIZE); rs = *[ram]:$(SIZE) sp; logicflags(); resultflags(rs); } @else :push rs is $(INSTR_PHASE) op1215=0xf & op0811=0x2 & rs & op0003=0x0 { *[ram]:$(SIZE) sp = rs; sp = sp - $(SIZE); logicflags(); resultflags(rs); } :pop rs is $(INSTR_PHASE) op1215=0xf & op0811=0x3 & rs & op0003=0x0 { sp = sp + $(SIZE); rs = *[ram]:$(SIZE) sp; logicflags(); resultflags(rs); } @endif :ret is $(INSTR_PHASE) op1215=0xf & op0811=0x4 & op0007=0x0 { return [lr]; } :callds Rel8 is $(INSTR_PHASE) op1215=0xf & op0811=0x5 & Rel8 { delayslot(1); lr = inst_next; call Rel8; } :call rs is $(INSTR_PHASE) op1215=0xf & op0811=0x6 & rs & op0003=0x0 { lr = inst_next; call [rs]; } :call Rel11 is $(INSTR_PHASE) op1215=0xf & op1111=0x1 & Rel11 { lr = inst_next; call Rel11; } # 1111 0110 ssss 1ccc # call rs if ccc call rs :call^COND rs is $(INSTR_PHASE) op1215=0xf & op0811=0x6 & rs & op0303=0x1 & COND { build COND; lr = inst_next; call [rs]; } :user_one rs is $(INSTR_PHASE) op1215=0xa & op0811=0x01 & rs & op0003=0x0 { pcodeop_one(rs);} :user_two rs is $(INSTR_PHASE) op1215=0xa & op0811=0x02 & rs & op0003=0x0 { pcodeop_two(rs); pcodeop_three();} :user_three is $(INSTR_PHASE) op1215=0xa & op0811=0x03 & op0007=0x0 { pcodeop_three();} :user_four rs rt is $(INSTR_PHASE) op1215=0xa & op0811=0x04 & rs & rt { pcodeop_one(rs); call [rt]; pcodeop_three();} :user_five Rel8 is $(INSTR_PHASE) op1215=0xa & op0811=0x05 & Rel8 { lr = inst_next; call Rel8; pcodeop_three();} :user_six rs is $(INSTR_PHASE) op1215=0xa & op0811=0x06 & rs & op0003=0x0 { r1 = pcodeop_one(rs); call [r1];} :unimpl is $(INSTR_PHASE) op1215=0xa & op0811=0x08 & op0007=0 unimpl ================================================ FILE: pypcode/processors/Toy/data/languages/toyPosStack.cspec ================================================ ================================================ FILE: pypcode/processors/Toy/data/languages/toy_be.slaspec ================================================ @define ENDIAN "big" @define SIZE "4" @define INSTR_PHASE "" # not used by basic toy language @define DATA_SPACE "ram" @include "toy.sinc" @include "toyInstructions.sinc" ================================================ FILE: pypcode/processors/Toy/data/languages/toy_be_posStack.slaspec ================================================ @define ENDIAN "big" @define SIZE "4" @define INSTR_PHASE "" # not used by basic toy language @define POS_STACK "true" # enables switch in instructions for push/pop to work in positive direction @define DATA_SPACE "ram" @include "toy.sinc" @include "toyInstructions.sinc" ================================================ FILE: pypcode/processors/Toy/data/languages/toy_builder.sinc ================================================ @include "toy.sinc" # Define context bits define register offset=0x2000 size=8 contextreg; define context contextreg # stored context fctx = (0,3) # flowing context nfctx = (4,7) noflow # single address context # transient context (never stored, aids disassembly) phase = (8,9) # parse phase - used for complex scenarios counter = (10,13) # parse count-down ; define token instr8(8) op8 = (0, 7) ; define token extra16(16) xsimm8 = (0, 7) signed ; define token extra8(8) nnnn = (0, 3) ; # ^instruction - manage parse phases (only required for complex languages) :^instruction is phase=0 & instruction [ phase=1; ] { build instruction; } @define INSTR_PHASE "phase=1 &" # parse instructions during phase 1 @include "toyInstructions.sinc" # additional forms added to toy language, taken from all three reserved banks: # 1101 1001 xxxx xxxx # RESERVED BANK # 1101 1010 xxxx xxxx # RESERVED BANK # 1111 0111 xxxx xxxx # RESERVED BANK # fctx i 1101 1001 0000 iiii # set flow context (fctx) on next instr # nfctx i 1101 1001 0001 iiii # set noflow context (nfctx) on next instr # nfctx rel,i 1101 1001 0010 iiii; 0000 0000 iiii iiii # set noflow context on rel instr # cop# s 1101 1010 ssss 0000 # coprocessor # determined by nfctx val (1-3) # nop #1 1111 0111 # nop # 1101 1001 0011 nnnn; ... # nop where nnnn indicates number of additional bytes consumed # operations :fctx Imm4 is phase=1 & op1215=0xd & op0811=0x9 & rs=0x0 & imm0003 & Imm4 [ fctx=imm0003; globalset(inst_next,fctx); ] { } nfctxSetAddr: addr is Imm4 & imm0003; xsimm8 [ addr = inst_start + xsimm8; nfctx=imm0003; globalset(addr, nfctx); ] { export *:$(SIZE) addr; } :nfctx nfctxSetAddr,Imm4 is (phase=1 & op1215=0xd & op0811=0x9 & rs=0x2 & Imm4) ... & nfctxSetAddr { } :nfctx Imm4 is phase=1 & op1215=0xd & op0811=0x9 & rs=0x1 & imm0003 & Imm4 [ nfctx=imm0003; globalset(inst_next,nfctx); ] { } define pcodeop cop1; define pcodeop cop2; define pcodeop cop3; :cop1 rs is phase=1 & op1215=0xd & op0811=0xa & op0003=0 & nfctx=1 & rs { cop1(rs); } :cop2 rs is phase=1 & op1215=0xd & op0811=0xa & op0003=0 & nfctx=2 & rs { cop2(rs); } :cop3 rs is phase=1 & op1215=0xd & op0811=0xa & op0003=0 & nfctx=3 & rs { cop3(rs); } NopCnt: "#"^cnt is imm0003 [ cnt = imm0003 + 2; ] { export *[const]:1 cnt; } NopByte: is counter=0 { } NopByte: is epsilon; nnnn; NopByte [ counter=counter-1; ] { } One: "#"^cnt is epsilon [ cnt = 1; ] { export *[const]:1 cnt; } :nop One is phase=1 & op8=0xf7 & One { } :nop NopCnt is phase=1 & op1215=0xd & op0811=0x9 & rs=0x3 & imm0003 & NopCnt; NopByte ... [ counter=imm0003; ] { } ================================================ FILE: pypcode/processors/Toy/data/languages/toy_builder_be.slaspec ================================================ @define ENDIAN "big" @define SIZE "4" @define DATA_SPACE "ram" @include "toy_builder.sinc" ================================================ FILE: pypcode/processors/Toy/data/languages/toy_builder_be_align2.slaspec ================================================ @define ENDIAN "big" @define ALIGN "2" @define SIZE "4" @define DATA_SPACE "ram" @include "toy_builder.sinc" ================================================ FILE: pypcode/processors/Toy/data/languages/toy_builder_le.slaspec ================================================ @define ENDIAN "little" @define SIZE "4" @define DATA_SPACE "ram" @include "toy_builder.sinc" ================================================ FILE: pypcode/processors/Toy/data/languages/toy_builder_le_align2.slaspec ================================================ @define ENDIAN "little" @define ALIGN "2" @define SIZE "4" @define DATA_SPACE "ram" @include "toy_builder.sinc" ================================================ FILE: pypcode/processors/Toy/data/languages/toy_harvard.pspec ================================================ ================================================ FILE: pypcode/processors/Toy/data/languages/toy_le.slaspec ================================================ @define ENDIAN "little" @define SIZE "4" @define INSTR_PHASE "" # not used by basic toy language @define DATA_SPACE "ram" @include "toy.sinc" @include "toyInstructions.sinc" ================================================ FILE: pypcode/processors/Toy/data/languages/toy_wsz_be.slaspec ================================================ @define ENDIAN "big" @define ALIGN "2" @define SIZE "4" @define WORDSIZE "2" @define INSTR_PHASE "" # not used by basic toy language @define DATA_SPACE "ram" @include "toy.sinc" @include "toyInstructions.sinc" ================================================ FILE: pypcode/processors/Toy/data/languages/toy_wsz_le.slaspec ================================================ @define ENDIAN "little" @define ALIGN "2" @define SIZE "4" @define WORDSIZE "2" @define INSTR_PHASE "" # not used by basic toy language @define DATA_SPACE "ram" @include "toy.sinc" @include "toyInstructions.sinc" ================================================ FILE: pypcode/processors/V850/data/languages/Helpers/Conditions.sinc ================================================ ##################################################### ##### Conditions ##### ##################################################### c0003: "v" is op0003=0x0 { tmp:1 = ($(OV)) == 1; export tmp; } c0003: "nv" is op0003=0x8 { tmp:1 = ($(OV)) == 0; export tmp; } c0003: "c" is op0003=0x1 { tmp:1 = ($(CY)) == 1; export tmp; } c0003: "nc" is op0003=0x9 { tmp:1 = ($(CY)) == 0; export tmp; } c0003: "e" is op0003=0x2 { tmp:1 = ($(Z)) == 1; export tmp; } c0003: "ne" is op0003=0xA { tmp:1 = ($(Z)) == 0; export tmp; } c0003: "nh" is op0003=0x3 { tmp:1 = ($(CY) || $(Z)) == 1; export tmp; } c0003: "h" is op0003=0xB { tmp:1 = ($(CY) || $(Z)) == 0; export tmp; } c0003: "n" is op0003=0x4 { tmp:1 = ($(S)) == 1; export tmp; } c0003: "p" is op0003=0xC { tmp:1 = ($(S)) == 0; export tmp; } c0003: "t" is op0003=0x5 { tmp:1 = 1; export tmp; } c0003: "sa" is op0003=0xD { tmp:1 = ($(SAT)) == 1; export tmp; } c0003: "lt" is op0003=0x6 { tmp:1 = ($(S) ^^ $(OV)) == 1; export tmp; } c0003: "ge" is op0003=0xE { tmp:1 = ($(S) ^^ $(OV)) == 0; export tmp; } c0003: "le" is op0003=0x7 { tmp:1 = ($(S) ^^ $(OV) || $(Z)) == 1; export tmp; } c0003: "gt" is op0003=0xF { tmp:1 = ($(S) ^^ $(OV) || $(Z)) == 0; export tmp; } c1720: "v" is op1720=0x0 { tmp:1 = ($(OV)) == 1; export tmp; } c1720: "nv" is op1720=0x8 { tmp:1 = ($(OV)) == 0; export tmp; } c1720: "c" is op1720=0x1 { tmp:1 = ($(CY)) == 1; export tmp; } c1720: "nc" is op1720=0x9 { tmp:1 = ($(CY)) == 0; export tmp; } c1720: "e" is op1720=0x2 { tmp:1 = ($(Z)) == 1; export tmp; } c1720: "ne" is op1720=0xA { tmp:1 = ($(Z)) == 0; export tmp; } c1720: "nh" is op1720=0x3 { tmp:1 = ($(CY) || $(Z)) == 1; export tmp; } c1720: "h" is op1720=0xB { tmp:1 = ($(CY) || $(Z)) == 0; export tmp; } c1720: "n" is op1720=0x4 { tmp:1 = ($(S)) == 1; export tmp; } c1720: "p" is op1720=0xC { tmp:1 = ($(S)) == 0; export tmp; } c1720: "t" is op1720=0x5 { tmp:1 = 1; export tmp; } c1720: "sa" is op1720=0xD { tmp:1 = ($(SAT)) == 1; export tmp; } c1720: "lt" is op1720=0x6 { tmp:1 = ($(S) ^^ $(OV)) == 1; export tmp; } c1720: "ge" is op1720=0xE { tmp:1 = ($(S) ^^ $(OV)) == 0; export tmp; } c1720: "le" is op1720=0x7 { tmp:1 = ($(S) ^^ $(OV) || $(Z)) == 1; export tmp; } c1720: "gt" is op1720=0xF { tmp:1 = ($(S) ^^ $(OV) || $(Z)) == 0; export tmp; } ================================================ FILE: pypcode/processors/V850/data/languages/Helpers/Extras.sinc ================================================ ##################################################### ##### Extras ##### ##################################################### # read r0 always return zero R0004: _R0004 is _R0004 & _R0004=0 { local x:4=0; export x; } R0004: _R0004 is _R0004 { export _R0004; } R1115: _R1115 is _R1115 & _R1115=0 { local x:4=0; export x; } R1115: _R1115 is _R1115 { export _R1115; } R2731: _R2731 is _R2731 & _R2731=0 { local x:4=0; export x; } R2731: _R2731 is _R2731 { export _R2731; } adr9: res is op0406 & s1115 [ res = ((s1115 << 4) | (op0406 << 1)) + inst_start; ] { export *:4 res; } adr22: res is s0005; op1631 & op1616=0 [ res = ((s0005 << 16) | op1631) + inst_start; ] { export *:4 res; } adr32: res is op1631 & op1616=0; op3247 [ res = ((op3247 << 16) | op1631) + inst_start; ] { export *:4 res; } adr32i: res is op1631 & op1616=0; op3247 [ res = ((op3247 << 16) | op1631); ] { export *[const]:4 res; } reg4: op0_1720 is op2323=0 & op0_1720 { export op0_1720; } reg4: op1_1720 is op2323=1 & op1_1720 { export op1_1720; } ##### Prep/Disp Loop ##### PrepList20: r20 is prep27=1 & r20 { push(r20); } PrepList20: is prep27=0 { } PrepList21: r21 is prep26=1 & prep27=0 & r21 { push(r21); } PrepList21: PrepList20,r21 is prep26=1 & PrepList20 & r21 { push(r21); } PrepList21: PrepList20 is prep26=0 & PrepList20 { } PrepList22: r22 is prep25=1 & prep2627=0 & r22 { push(r22); } PrepList22: PrepList21,r22 is prep25=1 & PrepList21 & r22 { push(r22); } PrepList22: PrepList21 is prep25=0 & PrepList21 { } PrepList23: r23 is prep24=1 & prep2527=0 & r23 { push(r23); } PrepList23: PrepList22,r23 is prep24=1 & PrepList22 & r23 { push(r23); } PrepList23: PrepList22 is prep24=0 & PrepList22 { } PrepList24: r24 is prep31=1 & prep2427=0 & r24 { push(r24); } PrepList24: PrepList23,r24 is prep31=1 & PrepList23 & r24 { push(r24); } PrepList24: PrepList23 is prep31=0 & PrepList23 { } PrepList25: r25 is prep30=1 & prep2427=0 & prep31=0 & r25 { push(r25); } PrepList25: PrepList24,r25 is prep30=1 & PrepList24 & r25 { push(r25); } PrepList25: PrepList24 is prep30=0 & PrepList24 { } PrepList26: r26 is prep29=1 & prep2427=0 & prep3031=0 & r26 { push(r26); } PrepList26: PrepList25,r26 is prep29=1 & PrepList25 & r26 { push(r26); } PrepList26: PrepList25 is prep29=0 & PrepList25 { } PrepList27: r27 is prep28=1 & prep2427=0 & prep2931=0 & r27 { push(r27); } PrepList27: PrepList26,r27 is prep28=1 & PrepList26 & r27 { push(r27); } PrepList27: PrepList26 is prep28=0 & PrepList26 { } PrepList28: r28 is prep23=1 & prep2431=0 & r28 { push(r28); } PrepList28: PrepList27,r28 is prep23=1 & PrepList27 & r28 { push(r28); } PrepList28: PrepList27 is prep23=0 & PrepList27 { } PrepList29: r29 is prep22=1 & prep2431=0 & prep23=0 & r29 { push(r29); } PrepList29: PrepList28,r29 is prep22=1 & PrepList28 & r29 { push(r29); } PrepList29: PrepList28 is prep22=0 & PrepList28 { } PrepList30: ep is prep00=1 & prep2431=0 & prep2223=0 & ep { push(ep); } PrepList30: PrepList29,ep is prep00=1 & PrepList29 & ep { push(ep); } PrepList30: PrepList29 is prep00=0 & PrepList29 { } PrepList: { lp } is prep21=1 & prep2431=0 & prep2223=0 & prep00=0 & lp { push(lp); } PrepList: { PrepList30,lp } is prep21=1 & PrepList30 & lp { push(lp); } PrepList: { PrepList30 } is prep21=0 & PrepList30 { } DispList31: lp is prep21=1 & lp { pop(lp); } DispList31: is prep21=0 { } DispList30: ep,DispList31 is DispList31 & prep00=1 & ep { pop(ep); } DispList30: DispList31 is DispList31 & prep00=0 { } DispList29: r29,DispList30 is DispList30 & prep22=1 & r29 { pop(r29); } DispList29: DispList30 is DispList30 & prep22=0 { } DispList28: r28,DispList29 is DispList29 & prep23=1 & r28 { pop(r28); } DispList28: DispList29 is DispList29 & prep23=0 { } DispList27: r27,DispList28 is DispList28 & prep28=1 & r27 { pop(r27); } DispList27: DispList28 is DispList28 & prep28=0 { } DispList26: r26,DispList27 is DispList27 & prep29=1 & r26 { pop(r26); } DispList26: DispList27 is DispList27 & prep29=0 { } DispList25: r25,DispList26 is DispList26 & prep30=1 & r25 { pop(r25); } DispList25: DispList26 is DispList26 & prep30=0 { } DispList24: r24,DispList25 is DispList25 & prep31=1 & r24 { pop(r24); } DispList24: DispList25 is DispList25 & prep31=0 { } DispList23: r23,DispList24 is DispList24 & prep24=1 & r23 { pop(r23); } DispList23: DispList24 is DispList24 & prep24=0 { } DispList22: r22,DispList23 is DispList23 & prep25=1 & r22 { pop(r22); } DispList22: DispList23 is DispList23 & prep25=0 { } DispList21: r21,DispList22 is DispList22 & prep26=1 & r21 { pop(r21); } DispList21: DispList22 is DispList22 & prep26=0 { } DispList: { r20,DispList21 } is DispList21 & prep27=1 & r20 { pop(r20); } DispList: { DispList21 } is DispList21 & prep27=0 { } ================================================ FILE: pypcode/processors/V850/data/languages/Helpers/Macros.sinc ================================================ ##################################################### ##### Macros ##### ##################################################### ##### CARRY-Flag ##### macro set_CY_pos(var1, var2) { $(CY) = carry(var1, var2); } macro set_CY_pos2(var1, var2, var3) { local var12 = var1 + var2; $(CY) = carry(var1, var2) || carry(var12, var3); } macro set_CY_neg(var1, var2) { $(CY) = var1 < var2; } macro set_CY_neg2(var1, var2, var3) { local var23 = var2 + var3; $(CY) = (var1 < var23); } ##### Overflow-Flag ##### macro set_OV_pos(var1, var2) { $(OV) = scarry(var1, var2); } macro set_OV_pos2(var1, var2, var3) { local var12 = var1 + var2; $(OV) = scarry(var1, var2) || scarry(var12, var3); } macro set_OV_neg(var1, var2) { local A:4 = var1; local B:4 = var2; local R = A - B; local A1 = A[31,1]; local B1 = B[31,1]; local R1 = R[31,1]; $(OV) = (A1 != B1) && (B1 == R1); #OV = 1 if: #pos - neg = neg #neg - pos = pos } macro set_OV_neg2(var1, var2, var3) { local A:4 = var1; local B:4 = var2; local C:4 = var3; local R = A - B - C; local A1 = A[31,1]; local B1 = B[31,1]; local R1 = R[31,1]; $(OV) = (A1 != B1) && (B1 == R1); } ##### S/Z-Flags ##### macro set_S(flag) { $(S) = flag s< 0; } macro set_Z(var) { $(Z) = var == 0; } ##### General-Flag-Macros ##### macro set_general_flags_pos(var1, var2) { local res = var1 + var2; set_CY_pos(var1, var2); set_OV_pos(var1, var2); set_S(res); set_Z(res); } macro set_general_flags_neg(var1, var2) { local res = var1 - var2; set_CY_neg(var1, var2); set_OV_neg(var1, var2); set_S(res); set_Z(res); } macro set_OV0_S_Z(var) { $(OV) = 0; set_S(var); set_Z(var); } ##### General-Macros ##### # if condition is != 0 macro either_or(res, cond, true, false) { res = (true * zext(cond != 0)) + (false * zext(cond == 0)); } # if condition is == 1 macro either_or1(res, cond, true, false) { res = (true * zext(cond == 1)) + (false * zext(cond != 1)); } macro shift_right_logic(res, var, shift_) { local shift = shift_ & 0x1f; local mask = (zext(shift != 0) * var) & (1 << (shift - 1)); res = var >> shift; set_OV0_S_Z(res); $(CY) = ((mask != 0) && (shift != 0)); } macro shift_right_arith(res, var, shift_) { local shift = shift_ & 0x1f; local mask = (zext(shift != 0) * var) & (1 << (shift - 1)); res = var s>> shift; set_OV0_S_Z(res); $(CY) = ((mask != 0) && (shift != 0)); } macro shift_left_logic(res, var, shift_) { local shift = shift_ & 0x1f; local mask = (zext(shift != 0) * var) & (1 << (32 - shift)); res = var << shift; set_OV0_S_Z(res); $(CY) = ((mask != 0) && (shift != 0)); } ##### Prep/Disp Macros ##### macro push(reg) { sp = sp - 4; *:4 sp = reg; } macro pop(reg) { reg = *:4 sp; sp = sp + 4; } ##### Search Macros ##### macro SearchRight(res, var, char) { local var_:4 = var; res = 0; if ((var_ & 0x1) == char) goto ; var_ = var_ >> 1; res = res + 1; if (res < 32) goto ; res = 0; } macro SearchLeft(res, var, char) { local var_:4 = var; res = 0; if ((var_ >> 31) == char) goto ; var_ = var_ << 1; res = res + 1; if (res < 32) goto ; res = 0; } # macro saturate(var) # { # if (var s> 0x7FFFFFFF) # goto ; # if (var s< -0x80000000) # goto ; # goto ; # # var = 0x7FFFFFFF; # goto ; # # var = -0x80000000; # goto ; # # } ##### Float-Macros ##### macro compare_float(res, fcond, reg1, reg2) { local un = ((fcond & 1) == 1) & (nan(reg2) || nan(reg1)); local eq = ((fcond & 2) == 2) & (!(nan(reg2) || nan(reg1))) & (reg2 f== reg1); local le = ((fcond & 4) == 4) & (!(nan(reg2) || nan(reg1))) & (reg2 f< reg1); #local ex = (fcond & 8) & ((nan(reg2) || nan(reg1))); res = zext(un|eq|le); } ================================================ FILE: pypcode/processors/V850/data/languages/Helpers/Register.sinc ================================================ ##################################################### ##### Register ##### ##################################################### ##### General-purpose registers (r0 to r31) ##### define register offset=0x0 size=0x4 # offset = 0 because it's the start [ r0 r1 r2 sp gp tp r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17 r18 r19 r20 r21 r22 r23 r24 r25 r26 r27 r28 r29 ep lp ]; ##### Control/Special registers ##### define register offset=0x80 size=0x4 # offset = 0x80(128) = PreOffset+PreRegister*Size = 0+32*4 = 128 [ EIPC EIPSW FEPC FEPSW ECR PSW FPSR FPEPC FPST FPCC FPCFG SCCFG SCBP EIIC FEIC DBIC CTPC CTPSW DBPC DBPSW CTBP DIR DBG22 DBG23 DBG24 DBG25 DBG26 DBG27 EIWR FEWR DBWR BSEL ]; define register offset=0x0 size=0x8 [ r0r1 r2sp r4r5 r6r7 r8r9 r10r11 r12r13 r14r15 r16r17 r18r19 r20r21 r22r23 r24r25 r26r27 r28r29 eplp ]; define register offset=0x100 size=0x4 [ PC ]; # offset = 0x100(256) = PreOffset+PreRegister*Size = 128+32*4 = 256 ================================================ FILE: pypcode/processors/V850/data/languages/Helpers/Tokens.sinc ================================================ ##################################################### ##### Tokens ##### ##################################################### define token instr(16) op0000 = (0,0) op0003 = (0,3) op0004 = (0,4) _R0004 = (0,4) SR0004 = (0,4) R0004x2 = (0,4) s0004 = (0,4) signed s0005 = (0,5) signed op0005 = (0,5) op0006 = (0,6) op0010 = (0,10) op0015 = (0,15) op0106 = (1,6) op0406 = (4,6) op0410 = (4,10) op0505 = (5,5) op0510 = (5,10) op0515 = (5,15) op0610 = (6,10) op0615 = (6,15) op0710 = (7,10) op1113 = (11,13) op1114 = (11,14) op1115 = (11,15) _R1115 = (11,15) SR1115 = (11,15) R1115x2 = (11,15) s1115 = (11,15) signed op1415 = (14,15) op1515 = (15,15) ; define token instr2(16) op1616 = (0,0) op1617 = (0,1) op1619 = (0,3) op1620 = (0,4) R1620 = (0,4) R1620x2 = (0,4) op1626 = (0,10) op1631 = (0,15) s1631 = (0,15) signed fcbit1719 = (1,3) op1720 = (1,4) op0_1720 = (1,4) op1_1720 = (1,4) s1731 = (1,15) signed op1821 = (2,5) s1821 = (2,5) signed op2020 = (4,4) op2026 = (4,10) op2122 = (5,6) op2126 = (5,10) op2226 = (6,10) op2323 = (7,7) op2426 = (8,10) op2729 = (11,13) fcond2730 = (11,14) op2731 = (11,15) _R2731 = (11,15) R2731x2 = (11,15) op3031 = (14,15) op3131 = (15,15) ; define token instr3(16) op3247 = (0,15) s3247 = (0,15) signed ; define token instr4(16) op4863 = (0,15) ; # used in PREPARE/DISPOSE instructions define token prep(32) prep00 = (0,0) prep0105 = (1,5) prep0615 = (6,15) prep1620 = (16,20) prep21 = (21,21) prep22 = (22,22) prep2223 = (22,31) prep23 = (23,23) prep24 = (24,24) prep2431 = (24,31) prep25 = (25,25) prep26 = (26,26) prep27 = (27,27) prep28 = (28,28) prep29 = (29,29) prep2931 = (29,31) prep3031 = (30,31) prep2427 = (24,27) prep2527 = (25,27) prep2627 = (26,27) prep30 = (30,30) prep31 = (31,31) ; ================================================ FILE: pypcode/processors/V850/data/languages/Helpers/Variables.sinc ================================================ ##################################################### ##### Variables ##### ##################################################### attach variables [ _R0004 _R1115 _R2731 R1620 prep1620] [ r0 r1 r2 sp gp tp r6 r7 r8 r9 r10 r11 r12 r13 r14 r15 r16 r17 r18 r19 r20 r21 r22 r23 r24 r25 r26 r27 r28 r29 ep lp ]; attach variables [ R0004x2 R1115x2 R1620x2 R2731x2 ] [ r0r1 _ r2sp _ r4r5 _ r6r7 _ r8r9 _ r10r11 _ r12r13 _ r14r15 _ r16r17 _ r18r19 _ r20r21 _ r22r23 _ r24r25 _ r26r27 _ r28r29 _ eplp _ ]; attach variables [ SR0004 SR1115 ] [ EIPC EIPSW FEPC FEPSW ECR PSW FPSR FPEPC FPST FPCC FPCFG SCCFG SCBP EIIC FEIC DBIC CTPC CTPSW DBPC DBPSW CTBP DIR DBG22 DBG23 DBG24 DBG25 DBG26 DBG27 EIWR FEWR DBWR BSEL ]; attach variables [op0_1720] [r0 r2 gp r6 r8 r10 r12 r14 r16 r18 r20 r22 r24 r26 r28 ep]; attach variables [op1_1720] [r1 sp tp r7 r9 r11 r13 r15 r17 r19 r21 r23 r25 r27 r29 lp]; attach names [fcond2730] ["f" "un" "eq" "ueq" "olt" "ult" "ole" "ule" "sd" "ngle" "seq" "ngl" "lt" "nge" "le" "ngt"]; @define NP "PSW[7,1]" @define EP "PSW[6,1]" @define ID "PSW[5,1]" @define SAT "PSW[4,1]" @define CY "PSW[3,1]" @define OV "PSW[2,1]" @define S "PSW[1,1]" @define Z "PSW[0,1]" @define EICC "ECR[0,16]" @define FECC "ECR[16,16]" ================================================ FILE: pypcode/processors/V850/data/languages/Instructions/Arithmetic.sinc ================================================ # (3) Multiply instructions # (4) Multiply-accumulate instructions # (5) Arithmetic instructions # (7) Saturated operation instructions # (11) Divide instructions # (12) High-speed divide instructions ##################################################### ##### Multiply ##### ##################################################### # MUL reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww01000100000 :mul R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0x220 & R2731 { local res:8 = sext(R1115) * sext(R0004); R1115 = res:4; R2731 = res[32,32]; } # MUL imm9, reg2, reg3 - rrrrr111111iiiii|wwwww01001IIII00 :mul imm9, R1115, R2731 is op0510=0x3F & op0004 & R1115; op2226=0x9 & op1617=0x0 & s1821 & R2731 [ imm9 = (s1821 << 5) | op0004; ] { local res:8 = sext(R1115) * imm9; R1115 = res:4; R2731 = res[32,32]; } # MULH reg1, reg2 - rrrrr000111RRRRR :mulh R0004, R1115 is op0510=0x07 & R0004 & R1115 & op1115!=0 { R1115 = sext(R1115:2) * sext(R0004:2); } # MULH imm5, reg2 - rrrrr010111iiiii :mulh s0004, R1115 is op0510=0x17 & s0004 & R1115 { R1115 = sext(R1115:2) * s0004; } # MULHI imm16, reg1, reg2 - rrrrr110111RRRRR|iiiiiiiiiiiiiiii :mulhi s1631, R0004, R1115 is op0510=0x37 & R1115 & R0004; s1631 { R1115 = R0004 * s1631; } # MULU reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww01000100010 :mulu R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0x222 & R2731 { local res:8 = zext(R1115) * zext(R0004); R1115 = res:4; R2731 = res[32,32]; } # MULU imm9, reg2, reg3 - rrrrr111111iiiii|wwwww01001IIII10 :mulu imm9, R1115, R2731 is op0510=0x3F & op0004 & R1115; op2226=0x9 & op1617=0x2 & op1821 & R2731 [ imm9 = (op1821 << 5) | op0004; ] { local res:8 = zext(R1115) * imm9; R1115 = res:4; R2731 = res[32,32]; } ##################################################### ##### MultiplyAccumulate ##### ##################################################### # MAC reg1, reg2, reg3, reg4 - rrrrr111111RRRRR wwww0011110mmmm0 :mac R0004, R1115, R2731x2, R1620x2 is op0510=0x3F & R0004 & R1115; op2126=0x1E & op1616=0 & R1620x2 & R2731x2 { R1620x2 = sext(R1115) * sext(R0004) + R2731x2; } # MACU reg1, reg2, reg3, reg4 - rrrrr111111RRRRR|wwww0011111mmmm0 :macu R0004, R1115, R2731x2, R1620x2 is op0510=0x3F & R0004 & R1115; op2126=0x1F & op1616=0 & R1620x2 & R2731x2 { R1620x2 = zext(R1115) * zext(R0004) + R2731x2; } ##################################################### ##### Arithmetic ##### ##################################################### # ADD reg1, reg2 - rrrrr001110RRRRR :add R0004, R1115 is op0510=0x0E & R0004 & R1115 { set_general_flags_pos(R0004, R1115); R1115 = R1115 + R0004; } # ADD imm5, reg2 - rrrrr010010iiiii :add s0004, R1115 is op0510=0x12 & s0004 & R1115 { set_general_flags_pos(s0004, R1115); R1115 = R1115 + s0004; } # ADDI imm16, reg1, reg2 - rrrrr110000RRRRR|iiiiiiiiiiiiiiii :addi s1631, R0004, R1115 is op0510=0x30 & R1115 & R0004; s1631 { set_general_flags_pos(R0004, s1631); R1115 = R0004 + s1631; } # CMP reg1, reg2 - rrrrr001111RRRRR :cmp R0004, R1115 is op0510=0x0F & R0004 & R1115 { set_general_flags_neg(R1115, R0004); } # CMP imm5, reg2 - rrrrr010011iiiii :cmp s0004, R1115 is op0510=0x13 & s0004 & R1115 { set_general_flags_neg(R1115, s0004); } # MOV reg1, reg2 - rrrrr000000RRRRR :mov R0004, R1115 is op0510=0x00 & R0004 & R1115 { R1115 = R0004; } # MOV imm5, reg2 - rrrrr010000iiiii :mov s0004, R1115 is op0510=0x10 & s0004 & R1115 & op1115!=0 { R1115 = s0004; } # MOV imm32, reg1 - 00000110001RRRRR|iiiiiiiiiiiiiiii|IIIIIIIIIIIIIIII :mov imm32, R0004 is op0515=0x031 & R0004; op1631; op3247 [ imm32 = (op3247 << 16) | op1631; ] { R0004 = imm32; } # MOVEA imm16, reg1, reg2 - rrrrr110001RRRRR|iiiiiiiiiiiiiiii :movea s1631, R0004, R1115 is op0510=0x31 & op1115!=0 & R0004 & R1115; s1631 { R1115 = R0004 + s1631; } # MOVHI imm16, reg1, reg2 - rrrrr110010RRRRR|iiiiiiiiiiiiiiii :movhi s1631, R0004, R1115 is op0510=0x32 & op1115!=0 & R0004 & R1115; s1631 { R1115 = R0004 + (s1631 << 16); } # SUB reg1, reg2 - rrrrr001101RRRRR :sub R0004, R1115 is op0510=0x0D & R0004 & R1115 { set_general_flags_neg(R1115, R0004); R1115 = R1115 - R0004; } # SUBR reg1, reg2 - rrrrr001100RRRRR :subr R0004, R1115 is op0510=0x0C & R0004 & R1115 { set_general_flags_neg(R0004, R1115); R1115 = R0004 - R1115; } ##################################################### ##### Saturated ##### ##################################################### define pcodeop __saturate; # SATADD reg1, reg2 - rrrrr000110RRRRR :satadd R0004, R1115 is op0510=0x06 & R0004 & R1115 & op1115!=0 { set_general_flags_pos(R1115, R0004); $(SAT) = $(SAT) || $(OV); R1115 = R1115 + R0004; __saturate(R1115); } #SATADD imm5, reg2 - rrrrr010001iiiii :satadd s0004, R1115 is op0510=0x11 & s0004 & R1115 & op1115!=0 { set_general_flags_pos(R1115, s0004); $(SAT) = $(SAT) || $(OV); R1115 = R1115 + s0004; __saturate(R1115); } # SATADD reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww01110111010 :satadd R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0x3BA & R2731 { set_general_flags_pos(R1115, R0004); $(SAT) = $(SAT) || $(OV); R2731 = R1115 + R0004; __saturate(R2731); } # SATSUB reg1, reg2 - rrrrr000101RRRRR :satsub R0004, R1115 is op0510=0x05 & R0004 & R1115 & op1115!=0 { set_general_flags_neg(R1115, R0004); $(SAT) = $(SAT) || $(OV); R1115 = R1115 - R0004; __saturate(R1115); } # SATSUB reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww01110011010 :satsub R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0x39A & R2731 { set_general_flags_neg(R1115, R0004); $(SAT) = $(SAT) || $(OV); R2731 = R1115 - R0004; __saturate(R2731); } # SATSUBI imm16, reg1, reg2 :satsubi s1631, R0004, R1115 is op0510=0x33 & op1115!=0 & R0004 & R1115; s1631 { set_general_flags_neg(R0004, s1631); $(SAT) = $(SAT) || $(OV); R1115 = R0004 - s1631; __saturate(R1115); } # SATSUBR reg1, reg2 :satsubr R0004, R1115 is op0510=0x04 & R0004 & R1115 & op1115!=0 { set_general_flags_neg(R0004, R1115); $(SAT) = $(SAT) || $(OV); R1115 = R0004 - R1115; __saturate(R1115); } ##################################################### ##### Divide ##### ##################################################### # DIV reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww01011000000 :div R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0x2C0 & R2731 { local quot:4 = R1115 s/ R0004; local mod:4 = R1115 s% R0004; $(OV) = ((R1115 == 0x80000000 && R0004 == 0xFFFFFFFF) || R0004 == 0x0); set_Z(R1115); set_S(R1115); R1115 = quot; R2731 = mod; } # DIVH reg1, reg2 - rrrrr000010RRRRR :divh R0004, R1115 is op0510=0x02 & R0004 & R1115 { $(OV) = ((R1115 == 0x80000000 && R0004 == 0xFFFFFFFF) || R0004 == 0x0); R1115 = R1115 / R0004; set_Z(R1115); set_S(R1115); } # DIVH reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww01010000000 :divh R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0x280 & R2731 { local quot:4 = R1115 s/ sext(R0004:2); local mod:4 = R1115 s% sext(R0004:2); $(OV) = ((R1115 == 0x80000000 && R0004 == 0xFFFFFFFF) || R0004 == 0x0); set_Z(R1115); set_S(R1115); R1115 = quot; R2731 = mod; } # DIVHU reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww01010000010 :divhu R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0x282 & R2731 { local quot:4 = R1115 / sext(R0004:2); local mod:4 = R1115 % sext(R0004:2); $(OV) = (R0004 == 0); set_Z(R1115); set_S(R1115); R1115 = quot; R2731 = mod; } # DIVU reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww01011000010 :divu R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0x2C2 & R2731 { local quot:4 = R1115 / R0004; local mod:4 = R1115 % R0004; $(OV) = (R0004 == 0); set_Z(R1115); set_S(R1115); R1115 = quot; R2731 = mod; } ##################################################### ##### HighSpeedDivide ##### ##################################################### # DIVQ reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww01011111100 :divq R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0x2FC & R2731 { local quot:4 = R1115 s/ R0004; local mod:4 = R1115 s% R0004; $(OV) = ((R1115 == 0x80000000 && R0004 == 0xFFFFFFFF) || R0004 == 0x0); set_Z(R1115); set_S(R1115); R2731 = mod; R1115 = quot; } # DIVQU reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww01011111110 :divqu R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0x2FE & R2731 { local quot:4 = R1115 / R0004; local mod:4 = R1115 % R0004; $(OV) = (R0004 == 0); set_Z(R1115); set_S(R1115); R2731 = mod; R1115 = quot; } ================================================ FILE: pypcode/processors/V850/data/languages/Instructions/Float.sinc ================================================ ##################################################### ##### Float ##### ##################################################### # ABSF.D reg2, reg3 - rrrr011111100000|wwww010001011000 :absf.d R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b00000; R2731x2 & op2126=0b100010 & op1620=0b11000 { R2731x2 = abs(R1115x2); } # ABSF.S reg2, reg3 - rrrrr11111100000|wwwww10001001000 :absf.s R1115, R2731 is R1115 & op0510=0x3F & op0004=0b00000; R2731 & op2126=0b100010 & op1620=0b01000 { R2731 = abs(R1115); } # ADDF.D reg1, reg2, reg3 - rrrr0111111RRRR0|wwww010001110000 :addf.d R0004x2, R1115x2, R2731x2 is R1115x2 & op0510=0x3F & R0004x2 ; R2731x2 & op2126=0b100011 & op1620=0b10000 { R2731x2 = R1115x2 f+ R0004x2; } # ADDF.S reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww10001100000 :addf.s R0004, R1115, R2731 is R1115 & op0510=0x3F & R0004 ; R2731 & op2126=0b100011 & op1620=0b00000 { R2731 = R1115 f+ R0004; } # CEILF.DL reg2, reg3 - rrrr011111100010|wwww010001010100 :ceilf.dl R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b00010; R2731x2 & op2126=0b100010 & op1620=0b10100 { local var:8 = ceil(float2float(R1115x2)); R2731x2 = trunc(var); } # CEILF.DUL reg2, reg3 - rrrr011111110010|wwww010001010100 :ceilf.dul R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b10010; R2731x2 & op2126=0b100010 & op1620=0b10100 { local var:8 = ceil(float2float(R1115x2)); R2731x2 = trunc(var); } # CEILF.DUW reg2, reg3 - rrrrr11111110010|wwwww10001010000 :ceilf.duw R1115x2, R2731 is R1115x2 & op0510=0x3F & op0004=0b10010; R2731 & op2126=0b100010 & op1620=0b10000 { R2731 = trunc(ceil(R1115x2)); } # CEILF.DW reg2, reg3 - rrrrr11111100010|wwwww10001010000 :ceilf.dw R1115x2, R2731 is R1115x2 & op0510=0x3F & op0004=0b00010; R2731 & op2126=0b100010 & op1620=0b10000 { R2731 = trunc(ceil(R1115x2)); } # CEILF.SL reg2, reg3 - rrrrr11111100010|wwww010001000100 :ceilf.sl R1115, R2731x2 is R1115 & op0510=0x3F & op0004=0b00010; R2731x2 & op2126=0b100010 & op1620=0b00100 { local var:8 = ceil(float2float(R1115)); R2731x2 = trunc(var); } # CEILF.SUL reg2, reg3 - rrrrr11111110010|wwwww10001000100 :ceilf.sul R1115, R2731x2 is R1115 & op0510=0x3F & op0004=0b10010; R2731x2 & op2126=0b100010 & op1620=0b00100 { local var:8 = ceil(float2float(R1115)); R2731x2 = trunc(var); } # CEILF.SUW reg2, reg3 - rrrrr11111110010|wwwww10001000000 :ceilf.sul R1115, R2731 is R1115 & op0510=0x3F & op0004=0b10010; R2731 & op2126=0b100010 & op1620=0b00000 { R2731 = trunc(ceil(R1115)); } # CEILF.SW reg2, reg3 - rrrrr11111100010|wwwww10001000000 :ceilf.sw R1115, R2731 is R1115 & op0510=0x3F & op0004=0b00010; R2731 & op2126=0b100010 & op1620=0b00000 { R2731 = trunc(ceil(R1115)); } # CMOVF.D fcbit, reg1, reg2, reg3 - rrrr0111111RRRR0|wwww01000001fff0 :cmovf.d fcbit1719, R1115x2, R0004x2, R2731x2 is R1115x2 & op0510=0x3F & R0004x2; R2731x2 & op2126=0b100000 & op2020=1 & fcbit1719 & op1616=0 { #CC0 = Bit 24 local bit = (FPSR >> (fcbit1719 + 24:1)) & 0x1; either_or1(R2731x2, bit, R0004x2, R1115x2); } # CMOVF.S fcbit, reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww1000000fff0 :cmovf.s fcbit1719, R1115, R0004, R2731 is R1115 & op0510=0x3F & R0004; R2731 & op2126=0b100000 & op2020=0 & fcbit1719 & op1616=0 { local bit = (FPSR >> (fcbit1719 + 24:1)) & 0x1; either_or1(R2731, bit, R0004, R1115); } # CMPF.D fcond, reg2, reg1, fcbit - rrrr0111111RRRRR|0FFFF1000011fff0 :cmpf.d fcond2730, R1115x2, R0004x2, fcbit1719 is R1115x2 & op0510=0x3F & R0004x2; op3131=0 & fcond2730 & op2126=0b100001 & op2020=1 & fcbit1719 & op1616=0 { #0 = Unordered #1 = Equal to #2 = Less than #3 = Exeption #bits = ex le eq un local bit:4 = 0; compare_float(bit, fcond2730:1, R0004x2, R1115x2); local pos:4 = bit << (fcbit1719 + 24); #find position of the calculated bit local mask:4 = 1 << (fcbit1719 + 24); #create mask to clean old bit in FPSR register FPSR = (FPSR & ~mask) | pos; #set the new bit at the right position } # CMPF.S fcond, reg2, reg1, fcbit - rrrrr111111RRRRR|0FFFF1000010fff0 :cmpf.s fcond2730, R1115, R0004, fcbit1719 is R1115 & op0510=0x3F & R0004; op3131=0 & fcond2730 & op2126=0b100001 & op2020=0 & fcbit1719 & op1616=0 { local bit:4 = 0; compare_float(bit, fcond2730:1, R0004, R1115); local pos:4 = bit << (fcbit1719 + 24); #find position of the calculated bit local mask:4 = 1 << (fcbit1719 + 24); #create mask to clean old bit in FPSR register FPSR = (FPSR & ~mask) | pos; #set the new bit at the right position } # CVTF.DL reg2, reg3 - rrrr011111100100|wwww010001010100 :cvtf.dl R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b00100; R2731x2 & op2126=0b100010 & op1620=0b10100 { R2731x2 = int2float(R1115x2); } # CVTF.DS reg2, reg3 - rrrr011111100011|wwww010001010010 :cvtf.ds R1115x2, R2731 is R1115x2 & op0510=0x3F & op0004=0b00011; R2731 & op2126=0b100010 & op1620=0b10010 { R2731 = float2float(R1115x2); } # CVTF.DUL reg2, reg3 - rrrr011111110100|wwww010001010100 :cvtf.dul R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b10100; R2731x2 & op2126=0b100010 & op1620=0b10100 { R2731x2 = trunc(R1115x2); } # CVTF.DUW reg2, reg3 - rrrrr11111110100|wwwww10001010000 :cvtf.duw R1115x2, R2731 is R1115x2 & op0510=0x3F & op0004=0b10100; R2731 & op2126=0b100010 & op1620=0b10000 { R2731 = trunc(R1115x2); } # CVTF.DW reg2, reg3 - rrrrr11111100100|wwwww10001010000 :cvtf.sw R1115x2, R2731 is R1115x2 & op0510=0x3F & op0004=0b00100; R2731 & op2126=0b100010 & op1620=0b10000 { R2731 = trunc(R1115x2); } # CVTF.LD reg2, reg3 - rrrr011111100001|wwww010001010010 :cvtf.ls R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b00001; R2731x2 & op2126=0b100010 & op1620=0b10010 { R2731x2 = int2float(R1115x2); } # CVTF.LS reg2, reg3 - rrrr011111100001|wwwww10001000010 :cvtf.ls R1115x2, R2731 is R1115x2 & op0510=0x3F & op0004=0b00001; R2731 & op2126=0b100010 & op1620=0b00010 { R2731 = int2float(R1115x2); } # CVTF.SD reg2, reg3 - rrrrr11111100010|wwww010001010010 :cvtf.sd R1115, R2731x2 is R1115 & op0510=0x3F & op0004=0b00010; R2731x2 & op2126=0b100010 & op1620=0b10010 { R2731x2 = float2float(R1115); } # CVTF.SL reg2, reg3 - rrrrr11111100100|wwwww10001000100 :cvtf.sl R1115, R2731x2 is R1115 & op0510=0x3F & op0004=0b00100; R2731x2 & op2126=0b100010 & op1620=0b00100 { R2731x2 = trunc(R1115); } # CVTF.SUL reg2, reg3 - rrrrr11111110100|wwwww10001000100 :cvtf.sul R1115, R2731x2 is R1115 & op0510=0x3F & op0004=0b10100; R2731x2 & op2126=0b100010 & op1620=0b00100 { R2731x2 = trunc(R1115); } # CVTF.SUW reg2, reg3 - rrrrr11111110100|wwwww10001000000 :cvtf.suw R1115, R2731 is R1115 & op0510=0x3F & op0004=0b10100; R2731 & op2126=0b100010 & op1620=0b00000 { R2731 = trunc(R1115); } # CVTF.SW reg2, reg3 - rrrrr11111100100|wwwww10001000000 :cvtf.sw R1115, R2731 is R1115 & op0510=0x3F & op0004=0b00100; R2731 & op2126=0b100010 & op1620=0b00000 { R2731 = trunc(R1115); } # CVTF.ULD reg2, reg3 - rrrr011111100001|wwww010001010010 :cvtf.uls R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b10001; R2731x2 & op2126=0b100010 & op1620=0b10010 { R2731x2 = int2float(R1115x2); } # CVTF.ULS reg2, reg3 - rrrr011111110001|wwwww10001000010 :cvtf.uls R1115x2, R2731 is R1115x2 & op0510=0x3F & op0004=0b10001; R2731 & op2126=0b100010 & op1620=0b00010 { R2731 = int2float(R1115x2); } # CVTF.UWD reg2, reg3 - rrrrr11111110000|wwwww10001010010 :cvtf.uwd R1115, R2731x2 is R1115 & op0510=0x3F & op0004=0b10000; R2731x2 & op2126=0b100010 & op1620=0b10010 { R2731x2 = int2float(R1115); } # CVTF.UWS reg2, reg3 - rrrrr11111110000|wwwww10001000010 :cvtf.uws R1115, R2731 is R1115 & op0510=0x3F & op0004=0b10000; R2731 & op2126=0b100010 & op1620=0b00010 { R2731 = int2float(R1115); } # CVTF.WD reg2, reg3 - rrrrr11111100000|wwwww10001010010 :cvtf.wd R1115, R2731x2 is R1115 & op0510=0x3F & op0004=0b00000; R2731x2 & op2126=0b100010 & op1620=0b10010 { R2731x2 = int2float(R1115); } # CVTF.WS reg2, reg3 - rrrrr11111100000|wwwww10001000010 :cvtf.ws R1115, R2731 is R1115 & op0510=0x3F & op0004=0b00000; R2731 & op2126=0b100010 & op1620=0b00010 { R2731 = int2float(R1115); } # DIVF.D reg1, reg2, reg3 - rrrr0111111RRRR0|wwww010001111110 :divf.s R0004x2, R1115x2, R2731x2 is R0004x2 & op0510=0x3F & R1115x2; R2731x2 & op2126=0b100011 & op1620=0b11110 { R2731x2 = R1115x2 f/ R0004x2; } # DIVF.S reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww10001101110 :divf.s R0004, R1115, R2731 is R0004 & op0510=0x3F & R1115; R2731 & op2126=0b100011 & op1620=0b01110 { R2731 = R1115 f/ R0004; } # FLOORF.DL reg2, reg3 - rrrr011111100011|wwww010001010100 :floorf.dl R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b00011; R2731x2 & op2126=0b100010 & op1620=0b10100 { local var:8 = floor(float2float(R1115x2)); R2731x2 = trunc(var); } # FLOORF.DUL reg2, reg3 - rrrr011111110011|wwww010001010100 :floorf.dul R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b10011; R2731x2 & op2126=0b100010 & op1620=0b10100 { local var:8 = floor(float2float(R1115x2)); R2731x2 = trunc(var); } # FLOORF.DUW reg2, reg3 - rrrrr11111110011|wwwww10001010000 :floorf.duw R1115x2, R2731 is R1115x2 & op0510=0x3F & op0004=0b10011; R2731 & op2126=0b100010 & op1620=0b10000 { R2731 = trunc(floor(R1115x2)); } # FLOORF.DW reg2, reg3 - rrrrr11111100011|wwwww10001010000 :floorf.dw R1115x2, R2731 is R1115x2 & op0510=0x3F & op0004=0b00011; R2731 & op2126=0b100010 & op1620=0b10000 { R2731 = trunc(floor(R1115x2)); } # FLOORF.SL reg2, reg3 - rrrrr11111100011|wwww010001000100 :floorf.sl R1115, R2731x2 is R1115 & op0510=0x3F & op0004=0b00011; R2731x2 & op2126=0b100010 & op1620=0b00100 { local var:8 = floor(float2float(R1115)); R2731x2 = trunc(var); } # FLOORF.SUL reg2, reg3 - rrrrr11111110011|wwwww10001000100 :floorf.sul R1115, R2731x2 is R1115 & op0510=0x3F & op0004=0b10011; R2731x2 & op2126=0b100010 & op1620=0b00100 { local var:8 = floor(float2float(R1115)); R2731x2 = trunc(var); } # FLOORF.SUW reg2, reg3 - rrrrr11111110011|wwwww10001000000 :floorf.suw R1115, R2731 is R1115 & op0510=0x3F & op0004=0b10011; R2731 & op2126=0b100010 & op1620=0b00000 { R2731 = trunc(floor(R1115)); } # FLOORF.SW reg2, reg3 - rrrrr11111100011|wwwww10001000000 :floorf.suw R1115, R2731 is R1115 & op0510=0x3F & op0004=0b00011; R2731 & op2126=0b100010 & op1620=0b00000 { R2731 = trunc(floor(R1115)); } # FMAF.S reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww10011100000 :fmaf.s R0004, R1115, R2731 is R0004 & op0510=0x3F & R1115; R2731 & op2126=0b100111 & op1620=0b00000 { R2731 = (R1115 f* R0004) f+ R2731; } # FMSF.S reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww10011100010 :fmsf.s R0004, R1115, R2731 is R0004 & op0510=0x3F & R1115; R2731 & op2126=0b100111 & op1620=0b00010 { R2731 = (R1115 f* R0004) f- R2731; } # FNMAF.S reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww10011100100 :fnmaf.s R0004, R1115, R2731 is R0004 & op0510=0x3F & R1115; R2731 & op2126=0b100111 & op1620=0b00100 { R2731 = -1 f* ((R1115 f* R0004) f+ R2731); } # FNMSF.S reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww10011100110 :fnmfs.s R0004, R1115, R2731 is R0004 & op0510=0x3F & R1115; R2731 & op2126=0b100111 & op1620=0b00110 { R2731 = -1 f* ((R1115 f* R0004) f- R2731); } # MADDF.S reg1, reg2, reg3, reg4 - rrrrr111111RRRRR|wwwww101W00WWWW0 :maddf.s R0004, R1115, R2731, reg4 is R0004 & op0510=0x3F & R1115; R2731 & op2426=0b101 & op2122=0b00 & op1616=0 & reg4 { reg4 = (R1115 f* R0004) f+ R2731; } # MAXF.D reg1, reg2, reg3 - rrrr0111111RRRR0|wwww010001111000 :maxf.d R0004x2, R1115x2, R2731x2 is R0004x2 & op0510=0x3F & R1115x2; R2731x2 & op2126=0b100011 & op1620=0b11000 { local bigger:1 = R1115x2 f> R0004x2; either_or(R2731x2, bigger, R1115x2, R0004x2); } # MAXF.S reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww10001101000 :maxf.s R0004, R1115, R2731 is R0004 & op0510=0x3F & R1115; R2731 & op2126=0b100011 & op1620=0b01000 { local bigger:1 = R1115 f> R0004; either_or(R2731, bigger, R1115, R0004); } # MINF.D reg1, reg2, reg3 - rrrr0111111RRRR0|wwww010001111010 :minf.d R0004x2, R1115x2, R2731x2 is R0004x2 & op0510=0x3F & R1115x2; R2731x2 & op2126=0b100011 & op1620=0b11010 { local bigger:1 = R1115x2 f< R0004x2; either_or(R2731x2, bigger, R1115x2, R0004x2); } # MINF.S reg1, reg2, reg3 - rrrr0111111RRRRR|wwwww10001101010 :minf.s R0004, R1115, R2731 is R0004 & op0510=0x3F & R1115; R2731 & op2126=0b100011 & op1620=0b01010 { local bigger:1 = R1115 f< R0004; either_or(R2731, bigger, R1115, R0004); } # MSUBF.S reg1, reg2, reg3, reg4 - rrrrr111111RRRRR|wwwww101W01WWWW0 :msubf.s R0004, R1115, R2731, reg4 is R0004 & op0510=0x3F & R1115; R2731 & op2426=0b101 & op2122=0b01 & op1616=0 & reg4 { reg4 = (R1115 f* R0004) f- R2731; } # MULF.D reg1, reg2, reg3 - rrrr0111111RRRR0|wwww010001110100 :mulf.d R0004x2, R1115x2, R2731x2 is R0004x2 & op0510=0x3F & R1115x2; R2731x2 & op2126=0b100011 & op1620=0b10100 { R2731x2 = R1115x2 f* R0004x2; } # MULF.S reg1, reg2, reg3 - rrrr0111111RRRRR|wwwww10001100100 :mulf.s R0004, R1115, R2731 is R0004 & op0510=0x3F & R1115; R2731 & op2126=0b100011 & op1620=0b00100 { R2731 = R1115 f* R0004; } # NEGF.D reg2, reg3 - rrrr011111100001|wwww010001011000 :negf.d R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b00001; R2731x2 & op2126=0b100010 & op1620=0b11000 { R2731x2 = f- R1115x2; } # NEGF.S reg2, reg3 - rrrrr11111100001|wwwww10001001000 :negf.s R1115, R2731 is R1115 & op0510=0x3F & op0004=0b00001; R2731 & op2126=0b100010 & op1620=0b01000 { R2731 = f- R1115; } # NMADDF.S reg1, reg2, reg3, reg4 - rrrrr111111RRRRR|wwwww101W10WWWW0 :nmaddf.s R0004, R1115, R2731, reg4 is R0004 & op0510=0x3F & R1115; R2731 & op2426=0b101 & op2122=0b10 & op1616=0 & reg4 { reg4 = f-((R1115 f* R0004) f+ R2731); } # NMSUBF.S reg1, reg2, reg3, reg4 - rrrrr111111RRRRR|wwwww101W11WWWW0 :nmsubf.s R0004, R1115, R2731, reg4 is R0004 & op0510=0x3F & R1115; R2731 & op2426=0b101 & op2122=0b11 & op1616=0 & reg4 { reg4 = f-((R1115 f* R0004) f- R2731); } # RECIPF.D reg2, reg3 - rrrr011111100001|wwww010001011110 :recipf.d R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b00001; R2731x2 & op2126=0b100010 & op1620=0b11110 { R2731x2 = 1 f/ R1115x2; } # RECIPF.S reg2, reg3 - rrrrr11111100001|wwwww10001001110 :recipf.s R1115, R2731 is R1115 & op0510=0x3F & op0004=0b00001; R2731 & op2126=0b100010 & op1620=0b01110 { R2731 = 1 f/ R1115; } # RSQRTF.D reg2, reg3 - rrrr011111100010|wwwww10001011110 :rsqrtf.d R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b00010; R2731x2 & op2126=0b100010 & op1620=0b11110 { R2731x2 = 1 f/ sqrt(R1115x2); } # RSQRTF.S reg2, reg3 - rrrrr11111100010|wwwww10001001110 :rsqrtf.s R1115, R2731 is R1115 & op0510=0x3F & op0004=0b00010; R2731 & op2126=0b100010 & op1620=0b01110 { R2731 = 1 f/ sqrt(R1115); } # SQRTF.D reg2, reg3 - rrrr011111100000|wwww010001011110 :sqrtf.d R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b00000; R2731x2 & op2126=0b100010 & op1620=0b11110 { R2731x2 = sqrt(R1115x2); } # SQRTF.S reg2, reg3 - rrrrr11111100000|wwwww10001001110 :sqrtf.s R1115, R2731 is R1115 & op0510=0x3F & op0004=0b00000; R2731 & op2126=0b100010 & op1620=0b01110 { R2731 = sqrt(R1115); } # SUBF.D reg1, reg2, reg3 - rrrr0111111RRRR0|wwww010001110010 :subf.d R0004x2, R1115x2, R2731x2 is R0004x2 & op0510=0x3F & R1115x2; R2731x2 & op2126=0b100011 & op1620=0b10010 { R2731x2 = R1115x2 f- R0004x2; } # SUBF.S reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww10001100010 :subf.s R0004, R1115, R2731 is R0004 & op0510=0x3F & R1115; R2731 & op2126=0b100011 & op1620=0b00010 { R2731 = R1115 f- R0004; } # TRFSR fcbit - 0000011111100000|000001000000fff0 :trfsr fcbit1719 is op1115=0 & op0510=0x3F & op0004=0; op2731=0 & op2126=0b100000 & op2020=0 & fcbit1719 & op1616=0 { local var:4 = FPSR & (1 << (fcbit1719 + 24)); $(Z) = (var != 0); } # TRNCF.DL reg2, reg3 - rrrr011111100001|wwww010001010100 :trncf.dl R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b00001; R2731x2 & op2126=0b100010 & op1620=0b10100 { R2731x2 = trunc(R1115x2); } # TRNCF.DUL reg2, reg3 - rrrr011111110001|wwww010001010100 :trncf.dul R1115x2, R2731x2 is R1115x2 & op0510=0x3F & op0004=0b10001; R2731x2 & op2126=0b100010 & op1620=0b10100 { R2731x2 = trunc(R1115x2); } # TRNCF.DUW reg2, reg3 - rrrrr11111110001|wwwww10001010000 :trncf.duw R1115x2, R2731 is R1115x2 & op0510=0x3F & op0004=0b10001; R2731 & op2126=0b100010 & op1620=0b10000 { R2731 = trunc(R1115x2); } # TRNCF.DW reg2, reg3 - rrrrr11111100001|wwwww10001010000 :trncf.dw R1115x2, R2731 is R1115x2 & op0510=0x3F & op0004=0b00001; R2731 & op2126=0b100010 & op1620=0b10000 { R2731 = trunc(R1115x2); } # TRNCF.SL reg2, reg3 - rrrrr11111100001|wwww010001000100 :trncf.sl R1115, R2731x2 is R1115 & op0510=0x3F & op0004=0b00001; R2731x2 & op2126=0b100010 & op1620=0b00100 { R2731x2 = trunc(R1115); } # TRNCF.SUL reg2, reg3 - rrrrr11111110001|wwww010001000100 :trncf.sul R1115, R2731x2 is R1115 & op0510=0x3F & op0004=0b10001; R2731x2 & op2126=0b100010 & op1620=0b00100 { R2731x2 = trunc(R1115); } # TRNCF.SUW reg2, reg3 - rrrrr11111110001|wwwww10001000000 :trncf.suw R1115, R2731 is R1115 & op0510=0x3F & op0004=0b10001; R2731 & op2126=0b100010 & op1620=0b00000 { R2731 = trunc(R1115); } # TRNCF.SW reg2, reg3 - rrrrr11111100001|wwwww10001000000 :trncf.sw R1115, R2731 is R1115 & op0510=0x3F & op0004=0b00001; R2731 & op2126=0b100010 & op1620=0b00000 { R2731 = trunc(R1115); } ================================================ FILE: pypcode/processors/V850/data/languages/Instructions/Load_Store.sinc ================================================ # (1) Load instructions # (2) Store instructions # (9) Data manipulation instructions ##################################################### ##### Load ##### ##################################################### # LD.B disp16[reg1], reg2 - rrrrr111000RRRRR|dddddddddddddddd :ld.b s1631[R0004], R1115 is op0510=0x38 & R0004 & R1115; s1631 { local adr:4 = R0004 + s1631; R1115 = sext(*:1 adr); } # LD.B disp23[reg1], reg3 - rrrrr111100RRRRR|wwwwwddddddd0101|DDDDDDDDDDDDDDDD :ld.b disp23[R0004], R2731 is op0515=0x3C & R0004; R2731 & op2026 & op1619=0x5; s3247 [ disp23 = (s3247 << 7) | op2026; ] { local adr:4 = R0004 + disp23; R2731 = sext(*:1 adr); } # LD.BU disp16[reg1], reg2 - rrrrr11110bRRRRR|ddddddddddddddd1 :ld.bu disp16[R0004], R1115 is op0610=0x1E & R0004 & R1115 & op0505; op1616=0x1 & s1731 [ disp16 = (s1731 << 1) | op0505; ] { local adr:4 = R0004 + disp16; R1115 = zext(*:1 adr); } # LD.BU disp23[reg1], reg3 - 00000111101RRRRR|wwwwwddddddd0101|DDDDDDDDDDDDDDDD :ld.bu disp23[R0004], R2731 is op0515=0x3D & R0004; R2731 & op2026 & op1619=0x5; s3247 [ disp23 = (s3247 << 7) | op2026; ] { local adr:4 = R0004 + disp23; R2731 = zext(*:1 adr); } # LD.H disp16[reg1], reg2 - rrrrr111001RRRRR|ddddddddddddddd0 :ld.h s1631[R0004], R1115 is op0510=0x39 & R0004 & R1115; s1631 & op1616=0x0 { local adr:4 = R0004 + s1631; R1115 = sext(*:2 adr); } # LD.H disp23[reg1], reg3 - 00000111100RRRRR|wwwwwdddddd00111|DDDDDDDDDDDDDDDD :ld.h disp23[R0004], R2731 is op0515=0x3C & R0004; R2731 & op2126 & op1620=0x7; s3247 [ disp23 = (s3247 << 7) | (op2126 << 1); ] { local adr:4 = R0004 + disp23; R2731 = sext(*:2 adr); } # LD.HU disp16[reg1], reg2 - rrrrr111111RRRRR|ddddddddddddddd1 :ld.hu disp16[R0004], R1115 is op0510=0x3F & R0004 & R1115; op1616=0x1 & s1731 [ disp16 = s1731 << 1; ] { local adr:4 = R0004 + disp16; R1115 = zext(*:2 adr); } # LD.HU disp23[reg1], reg3 - 00000111101RRRRR|wwwwwdddddd00111|DDDDDDDDDDDDDDDD :ld.hu disp23[R0004], R2731 is op0515=0x3D & R0004; R2731 & op2026 & op1619=0x7; s3247 [ disp23 = (s3247 << 7) | op2026; ] { local adr:4 = R0004 + disp23; R2731 = zext(*:2 adr); } # LD.W disp16[reg1], reg2 - rrrrr111001RRRRR|ddddddddddddddd1 :ld.w disp16[R0004], R1115 is op0510=0x39 & R0004 & R1115; s1731 & op1616=0x1 [ disp16 = s1731 * 2; ] { local adr:4 = R0004 + disp16; R1115 = *:4 adr; } # LD.W disp23[reg1], reg3 - 00000111100RRRRR|wwwwwdddddd01001|DDDDDDDDDDDDDDDD :ld.w disp23[R0004], R2731 is op0515=0x03C & R0004; R2731 & op2126 & op1620=0x9; s3247 [ disp23 = (s3247 << 7) | (op2126 << 1); ] { local adr:4 = R0004 + disp23; R2731 = *:4 adr; } # SLD.B disp7[ep], reg2 - rrrrr0110ddddddd :sld.b op0006[ep], R1115 is op0710=0x06 & op0006 & R1115 & ep { local adr:4 = ep + op0006; R1115 = sext(*:1 adr); } # SLD.BU disp4[ep], reg2 - rrrrr0000110dddd :sld.bu op0003[ep], R1115 is op0410=0x06 & R1115 & op0003 & ep { local adr:4 = ep + op0003; R1115 = zext(*:1 adr); } # SLD.H disp8[ep], reg2 - rrrrr1000ddddddd :sld.h disp8[ep], R1115 is op0710=0x08 & op0006 & R1115 & ep [ disp8 = op0006 * 2; ] { local adr:4 = ep + disp8; R1115 = sext(*:2 adr); } # SLD.HU disp5[ep], reg2 - rrrrr0000111dddd :sld.hu disp5[ep], R1115 is op0410=0x07 & R1115 & op0003 & ep [ disp5 = op0003 * 2; ] { local adr:4 = ep + disp5; R1115 = zext(*:2 adr); } # SLD.W disp8[ep], reg2 - rrrrr1010dddddd0 :sld.w disp8[ep], R1115 is op0710=0x0A & op0000=0x0 & op0106 & R1115 & ep [ disp8 = op0106 * 4; ] { local adr:4 = ep + disp8; R1115 = *:4 adr; } ##################################################### ##### Store ##### ##################################################### # SST.B reg2, disp7[ep] - rrrrr0111ddddddd :sst.b R1115, op0006[ep] is op0710=0x07 & op0006 & R1115 & ep { local adr:4 = ep + op0006; local tmp:4 = R1115; *:1 adr = tmp:1; } # SST.H reg2, disp8[ep] - rrrrr1001ddddddd :sst.h R1115, disp8[ep] is op0710=0x09 & op0006 & R1115 & ep [ disp8 = op0006 * 2; ] { local adr:4 = ep + disp8; local tmp:4 = R1115; *:2 adr = tmp:2; } # SST.W reg2, disp8[ep] - rrrrr1010dddddd1 :sst.w R1115, disp8[ep] is op0710=0x0A & op0000=0x1 & op0106 & R1115 & ep [ disp8 = op0106 * 4; ] { local adr:4 = ep + disp8; local tmp:4 = R1115; *:4 adr = tmp; } # ST.B reg2, disp16[reg1] - rrrrr111010RRRRR|dddddddddddddddd :st.b R1115, s1631[R0004] is op0510=0x3A & R0004 & R1115; s1631 { local adr:4 = R0004 + s1631; local tmp:4 = R1115; *:1 adr = tmp:1; } # ST.B reg3, disp23[reg1] - 00000111100RRRRR|dddddddddddddddd :st.b R2731, disp23[R0004] is op0515=0x3C & R0004; R2731 & op2026 & op1619=0xD; s3247 [ disp23 = (s3247 << 7) | op2026; ] { local adr:4 = R0004 + disp23; local tmp:4 = R2731; *:1 adr = tmp:1; } # ST.H reg2, disp16[reg1] - rrrrr111011RRRRR|ddddddddddddddd0 :st.h R1115, s1631[R0004] is op0510=0x3B & R0004 & R1115; s1631 & op1616=0x0 { local adr:4 = R0004 + s1631; local tmp:4 = R1115; *:2 adr = tmp:2; } # ST.H reg3, disp23[reg1] - 00000111101RRRRR|wwwwwdddddd01101|DDDDDDDDDDDDDDDD :st.h R2731, disp23[R0004] is op0515=0x3D & R0004; R2731 & op2126 & op1620=0xD; s3247 [ disp23 = (s3247 << 7) | (op2126 << 1); ] { local adr:4 = R0004 + disp23; local tmp:4 = R2731; *:2 adr = tmp:2; } # ST.W reg2, disp16[reg1] - rrrrr111011RRRRR|ddddddddddddddd1 :st.w R1115, disp16[R0004] is op0510=0x3B & R0004 & R1115; s1731 & op1616=0x1 [ disp16 = s1731 * 2; ] { local adr:4 = R0004 + disp16; local tmp:4 = R1115; *:4 adr = tmp; } # ST.W reg3, disp23[reg1] - 00000111100RRRRR|wwwwwdddddd01111|DDDDDDDDDDDDDDDD :st.w R2731, disp23[R0004] is op0515=0x3C & R0004; R2731 & op2126 & op1620=0xF; s3247 [ disp23 = (s3247 << 7) | (op2126 << 1); ] { local adr:4 = R0004 + disp23; local tmp:4 = R2731; *:2 adr = tmp:2; } ##################################################### ##### DataManipulation ##### ##################################################### # BSH reg2, reg3 - rrrrr11111100000|wwwww01101000010 :bsh R1115, R2731 is op0010=0x7E0 & R1115; op1626=0x342 & R2731 { local x1 = R1115[0,8]; local x2 = R1115[8,8]; local x3 = R1115[16,8]; local x4 = R1115[24,8]; R2731 = zext(x3 << 24) | zext(x4 << 16) | zext(x1 << 8) | zext(x2); set_S(R2731); $(OV) = 0; $(Z) = (x1 == 0) && (x2 == 0); $(CY) = (x1 == 0) || (x2 == 0); } # BSW reg2, reg3 - rrrrr11111100000|wwwww01101000000 :bsw R1115, R2731 is op0010=0x7E0 & R1115; op1626=0x340 & R2731 { local x1 = R1115[0,8]; local x2 = R1115[8,8]; local x3 = R1115[16,8]; local x4 = R1115[24,8]; R2731 = zext(x1 << 24) | zext(x2 << 16) | zext(x3 << 8) | zext(x4); set_OV0_S_Z(R2731); $(CY) = (x1 == 0) || (x2 == 0) || (x3 == 0) || (x4 == 0); } # CMOV cccc, reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww011001cccc0 :cmov^c1720 R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op2126=0x19 & op1616=0 & c1720 & R2731 { local result:4 = zext(c1720); R2731 = R0004 * zext(result != 0) + R1115 * zext(result == 0); } # CMOV cccc, imm5, reg2, reg3 - rrrrr111111iiiii|wwwww011000cccc0 :cmov^c1720 s0004, R1115, R2731 is op0510=0x3F & s0004 & R1115; op2126=0x18 & op1616=0 & c1720 & R2731 { local result:4 = zext(c1720); R2731 = s0004 * zext(result != 0) + R1115 * zext(result == 0); } # HSH reg2, reg3 - rrrrr11111100000|wwwww01101000110 :hsh R1115, R2731 is op0010=0x7E0 & R1115; op1626=0x346 & R2731 { R2731 = R1115; set_S(R2731); $(OV) = 0; $(Z) = (R2731:2 == 0); $(CY) = $(Z); } # HSW reg2, reg3 - rrrrr11111100000|wwwww01101000100 :hsw R1115, R2731 is op0010=0x7E0 & R1115; op1626=0x344 & R2731 { local x1 = R1115:2; local x2 = R1115[16,16]; R2731 = zext(x1 << 16) | zext(x2); set_OV0_S_Z(R2731); $(CY) = (x1 == 0) || (x2 == 0); } # SAR reg1, reg2 - rrrrr111111RRRRR|0000000010100000 :sar R0004, R1115 is op0510=0x3F & R0004 & R1115; op1631=0xA0 { shift_right_arith(R1115, R1115, R0004); } # SAR imm5, reg2 - rrrrr010101iiiii :sar op0004, R1115 is op0510=0x15 & op0004 & R1115 { shift_right_arith(R1115, R1115, op0004:5); } # SAR reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww00010100010 :sar R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0xA2 & R2731 { shift_right_arith(R2731, R1115, R0004); } # SASF cccc, reg2 - rrrrr1111110cccc|0000001000000000 :sasf^c0003 R1115 is op0410=0x7E & c0003 & R1115; op1631=0x0200 { R1115 = (R1115 << 1) | zext(c0003); } # SETF cond, reg2 - rrrrr1111110cccc|0000000000000000 :setf^c0003 R1115 is op0410=0x7E & c0003 & R1115; op1631=0x0 { R1115 = zext(c0003); } # SHL reg1, reg2 - rrrrr111111RRRRR|0000000011000000 :shl R0004, R1115 is op0510=0x3F & R0004 & R1115; op1631=0xC0 { shift_left_logic(R1115, R1115, R0004); } # SHL imm5, reg2 - rrrrr010110iiiii :shl op0004, R1115 is op0510=0x16 & op0004 & R1115 { shift_left_logic(R1115, R1115, op0004:5); } # SHL reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww00011000010 :shl R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0xC2 & R2731 { shift_left_logic(R2731, R1115, R0004); } # SHR reg1, reg2 - rrrrr111111RRRRR|0000000010000000 :shr R0004, R1115 is op0510=0x3F & R0004 & R1115; op1631=0x80 { shift_right_logic(R1115, R1115, R0004); } # SHR imm5, reg2 - rrrrr010100iiiii :shr op0004, R1115 is op0510=0x14 & op0004 & R1115 { shift_right_logic(R1115, R1115, op0004:5); } # SHR reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww00010000010 :shr R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0x82 & R2731 { shift_right_logic(R2731, R1115, R0004); } # SXB reg1 - 00000000101RRRRR :sxb R0004 is op0515=0x05 & R0004 { R0004 = sext(R0004:1); } # SXH reg1 - 00000000111RRRRR :sxh R0004 is op0515=0x07 & R0004 { R0004 = sext(R0004:2); } # ZXB reg1 - 00000000100RRRRR :zxb R0004 is op0515=0x004 & R0004 { R0004 = zext(R0004:1); } # ZXH reg1 - 00000000110RRRRR :zxh R0004 is op0515=0x006 & R0004 { R0004 = zext(R0004:2); } ================================================ FILE: pypcode/processors/V850/data/languages/Instructions/Logic.sinc ================================================ # (6) Conditional arithmetic instructions # (8) Logical instructions # (14) Bit manipulation instructions ##################################################### ##### Conditional ##### ##################################################### # ADF cccc, reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww011101cccc0 :adf^c1720 R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op2126=0x1D & op1616=0 & c1720 & R2731 { local cond = zext(c1720); set_OV_pos2(R1115, R0004, cond); set_CY_pos2(R1115, R0004, cond); R2731 = R1115 + R0004 + cond; set_S(R2731); set_Z(R2731); } # SBF cccc, reg1, reg2, reg3 - rrrrr111111RRRRR|wwwww011100cccc0 :sbf^c1720 R0004, R1115, R2731 is op0510=0x3F & R0004 & R1115; op2126=0x1C & op1616=0 & c1720 & R2731 { local cond = zext(c1720); set_OV_neg2(R1115, R0004, cond); set_CY_neg2(R1115, R0004, cond); R2731 = R1115 - R0004 - cond; set_S(R2731); set_Z(R2731); } ##################################################### ##### Logic ##### ##################################################### # AND reg1, reg2 - rrrrr001010RRRRR :and R0004, R1115 is op0510=0x0A & R0004 & R1115 { R1115 = R1115 & R0004; set_OV0_S_Z(R1115); } # ANDI imm16, reg1, reg2 - rrrrr110110RRRRR|iiiiiiiiiiiiiiii :andi op1631, R0004, R1115 is op0510=0x36 & R1115 & R0004; op1631 { R1115 = R0004 & op1631; set_OV0_S_Z(R1115); } # NOT reg1, reg2 - rrrrr000001RRRRR :not R0004, R1115 is op0510=0x01 & R0004 & R1115 { R1115 = ~R0004; set_OV0_S_Z(R1115); } # OR reg1, reg2 - rrrrr001000RRRRR :or R0004, R1115 is op0510=0x08 & R0004 & R1115 { R1115 = R1115 | R0004; set_OV0_S_Z(R1115); } # ORI imm16, reg1, reg2 - rrrrr110100RRRRR|iiiiiiiiiiiiiiii :ori op1631, R0004, R1115 is op0510=0x34 & R1115 & R0004; op1631 { R1115 = R0004 | op1631; set_OV0_S_Z(R1115); } # TST reg1, reg2 - rrrrr001011RRRRR :tst R0004, R1115 is op0510=0x0B & R0004 & R1115 { set_OV0_S_Z(R1115 & R0004); } # XOR reg1, reg2 - rrrrr001001RRRRR :xor R0004, R1115 is op0510=0x09 & R0004 & R1115 { R1115 = R1115 ^ R0004; set_OV0_S_Z(R1115); } # XORI imm16, reg1, reg2 - rrrrr110101RRRRR|iiiiiiiiiiiiiiii :xori op1631, R0004, R1115 is op0510=0x35 & R1115 & R0004; op1631 { R1115 = R0004 ^ op1631; set_OV0_S_Z(R1115); } ##################################################### ##### BitManipulation ##### ##################################################### # CLR1 bit#3, disp16[reg1] - 10bbb111110RRRRR|dddddddddddddddd :clr1 op1113, s1631[R0004] is op0510=0x3E & op1415=2 & op1113 & R0004; s1631 { local adr:4 = R0004 + s1631; local tkn = *:1 adr; *:1 adr = tkn & ~(1 << op1113); set_Z(tkn & (1 << op1113)); } # CLR1 reg2, [reg1] - rrrrr111111RRRRR|0000000011100100 :clr1 R1115, [R0004] is op0510=0x3F & R0004 & R1115; op1631=0xE4 { local tkn = *:1 R0004; *:1 R0004 = tkn & ~(1 << R1115); set_Z(tkn & (1 << R1115)); } # NOT1 bit#3, disp16[reg1] - 01bbb111110RRRRR|dddddddddddddddd :not1 op1113, s1631[R0004] is op0510=0x3E & op1415=1 & op1113 & R0004; s1631 { local adr:4 = R0004 + s1631; local tkn = *:1 adr; *:1 adr = tkn ^ (1 << op1113); set_Z(tkn & (1 << op1113)); } # NOT1 reg2, [reg1] - rrrrr111111RRRRR|0000000011100010 :not1 R1115, [R0004] is op0510=0x3F & R0004 & R1115; op1631=0xE2 { local tkn = *:1 R0004; *:1 R0004 = tkn ^ (1 << R1115); set_Z(tkn & (1 << R1115)); } # SET1 bit#3, disp16[reg1] - 00bbb111110RRRRR|dddddddddddddddd :set1 op1113, s1631[R0004] is op0510=0x3E & op1415=0 & op1113 & R0004; s1631 { local adr:4 = R0004 + s1631; local tkn = *:1 adr; *:1 adr = tkn | (1 << op1113); set_Z(tkn & (1 << op1113)); } # SET1 reg2, [reg1] - rrrrr111111RRRRR|0000000011100000 :set1 R1115, [R0004] is op0510=0x3F & R0004 & R1115; op1631=0xE0 { local tkn = *:1 R0004; *:1 R0004 = tkn | (1 << R1115); set_Z(tkn & (1 << R1115)); } # TST1 bit#3, disp16[reg1] - 11bbb111110RRRRR|dddddddddddddddd :tst1 op1113, s1631[R0004] is op0510=0x3E & op1415=3 & op1113 & R0004; s1631 { local adr:4 = R0004 + s1631; local tkn = *:1 adr; set_Z(tkn & (1 << op1113)); } # TST1 reg2, [reg1] - rrrrr111111RRRRR|0000000011100110 :tst1 R0004, [R1115] is op0510=0x3F & R0004 & R1115; op1631=0xE6 { local tkn = *:1 R0004; set_Z(tkn & (1 << R1115)); } ================================================ FILE: pypcode/processors/V850/data/languages/Instructions/Special.sinc ================================================ # (10) Bit search instructions # (13) Branch instructions # (15) Special instructions ##################################################### ##### BitSearch ##### ##################################################### # SCH0L reg2, reg3 - rrrrr11111100000|wwwww01101100100 :sch0l R1115, R2731 is op0010=0x7E0 & R1115; op1626=0x364 & R2731 { SearchLeft(R2731, R1115, 0); $(CY) = (R1115 != 0xFFFFFFFF); # zero bit found $(Z) = (R1115 == 0xFFFFFFFF); # zero bit not found } # SCH0R reg2, reg3 - rrrrr11111100000|wwwww01101100000 :sch0r R1115, R2731 is op0010=0x7E0 & R1115; op1626=0x360 & R2731 { SearchRight(R2731, R1115, 0); $(CY) = (R1115 != 0xFFFFFFFF); # zero bit found $(Z) = (R1115 == 0xFFFFFFFF); # zero bit not found } # SCH1L reg2, reg3 - rrrrr11111100000|wwwww01101100110 :sch1l R1115, R2731 is op0010=0x7E0 & R1115; op1626=0x366 & R2731 { SearchLeft(R2731, R1115, 1); $(CY) = (R1115 != 0x0); # one bit found $(Z) = (R1115 == 0x0); # one bit not found } # SCH1R reg2, reg3 :sch1r R1115, R2731 is op0010=0x7E0 & R1115; op1626=0x362 & R2731 { SearchRight(R2731, R1115, 1); $(CY) = (R1115 != 0x0); # one bit found $(Z) = (R1115 == 0x0); # one bit not found } ##################################################### ##### Branch ##### ##################################################### #Bcond adr9 - ddddd1011dddcccc :b^c0003 adr9 is op0710=0xB & c0003 & adr9 { if (c0003) goto adr9; } :br adr9 is op0710=0xB & op0003=0x5 & adr9 { goto adr9; } # JARL disp22, reg2 - rrrrr11110dddddd|ddddddddddddddd0 :jarl adr22, R1115 is (op0610=0x1E & R1115) ... & adr22 { R1115 = inst_next; call adr22; } # JARL disp32, reg1 - 00000010111RRRRR|ddddddddddddddd0|DDDDDDDDDDDDDDDD :jarl adr32, R0004 is op0515=0x017 & R0004; adr32 { R0004 = inst_next; call adr32; } # JMP [reg1] - 00000000011RRRRR :jmp [R0004] is op0515=0x03 & R0004 & op0004=0x1F { return [R0004]; } :jmp [R0004] is op0515=0x03 & R0004 & op0004!=0x1F { call [R0004]; } # JMP disp32[reg1] - 00000110111RRRRR|ddddddddddddddd0|DDDDDDDDDDDDDDDD :jmp adr32i[R0004] is op0515=0x037 & R0004; adr32i { local adr = adr32i + R0004; goto [adr]; } # JR disp22 - 00000111110ddddd|ddddddddddddddd0 :jr adr22 is op0615=0x1E ... & adr22 { goto adr22; } # JR disp32 - 0000001011100000|ddddddddddddddd0|DDDDDDDDDDDDDDDD :jr adr32 is op0015=0x2E0; adr32 { goto adr32; } ##################################################### ##### Special ##### ##################################################### # CALLT imm6 - 0000001000iiiiii :callt op0005 is op0615=0x8 & op0005 { CTPC = inst_next; CTPSW = PSW; local adr:4 = CTBP + (op0005 << 1); PC = CTBP + zext(*:2 adr); call [PC]; } # CAXI [reg1], reg2, reg3 - rrrrr111111RRRRR|wwwww00011101110 :caxi [R0004], R1115, R2731 is op0510=0x3F & R0004 & R1115; op1626=0xEE & R2731 { local tkn = *:4 (R0004 & ~(0x3)); local result = R1115 - tkn; *:4 R0004 = tkn * zext(result != 0) + R2731 * zext(result == 0); R2731 = tkn; set_general_flags_neg(R1115, tkn); } # CTRET - 0000011111100000|0000000101000100 :ctret is op0515=0x3F; op1631=0x144 { PC = CTPC; PSW = CTPSW; return [PC]; } # DI - 0000011111100000|0000000101100000 define pcodeop __disable_irq; :di is op0015=0x7E0; op1631=0x160 { $(ID) = 1; __disable_irq(); } # DISPOSE imm5, list12 - 0000011001iiiiiL|LLLLLLLLLLL00000 :dispose prep0105, DispList is prep0615=0x19 & prep1620=0x0 & prep0105 & DispList { sp = sp + (prep0105 << 2); build DispList; } # DISPOSE imm5, list12, [reg1] - 0000011001iiiiiL|LLLLLLLLLLLRRRRR :dispose prep0105, DispList, [prep1620] is prep0615=0x19 & prep1620 & prep0105 & DispList { sp = sp + (prep0105 << 2); build DispList; PC = prep1620; return [PC]; } # EI - 1000011111100000|0000000101100000 define pcodeop __enable_irq; :ei is op0015=0x87E0; op1631=0x160 { $(ID) = 0; __enable_irq(); } # EIRET - 0000011111100000|0000000101001000 :eiret is op0515=0x3F; op1631=0x148 { PC = EIPC; PSW = EIPSW; return [PC]; } # FERET - 0000011111100000|0000000101001010 :feret is op0515=0x3F; op1631=0x14A { PC = FEPC; PSW = FEPSW; return [PC]; } # FETRAP vector4 - 0vvvv00001000000 :fetrap op1114 is op0010=0x40 & op1515=0 & op1114 & op1115!=0 { FEPC = inst_next; FEPSW = PSW; $(FECC) = op1114 + 0x30; # exception code 0x30..0x3F FEIC = op1114 + 0x30; # exception code 0x30..0x3F $(EP) = 1; $(ID) = 1; $(NP) = 1; PC = 0x30; goto [PC]; } # HALT - 0000011111100000|0000000100100000 define pcodeop __halt; :halt is op0015=0x7E0; op1631=0x120 { __halt(); } # LDSR reg2, regID - rrrrr111111RRRRR|0000000000100000 :ldsr R0004, SR1115 is op0510=0x3F & SR1115 & R0004; op1631=0x20 { SR1115 = R0004; } # NOP - 0000000000000000 :nop is op0015=0x0 { PC = inst_next; } # PREPARE list12, imm5 - 0000011110iiiiiL|LLLLLLLLLLL00001 :prepare PrepList, prep0105 is prep0615=0x1E & prep0105 & prep1620=0x1 & PrepList { build PrepList; sp = sp - (prep0105 << 2); } # PREPARE list12, imm5, sp - 0000011110iiiiiL|LLLLLLLLLLL00011 :prepare PrepList, prep0105, sp is prep0615=0x1E & prep0105 & prep1620=0x3 & PrepList & sp { build PrepList; sp = sp - (prep0105 << 2); ep = sp; } # PREPARE list12, imm5, imm16 (low) - 0000011110iiiiiL|LLLLLLLLLLL01011|iiiiiiiiiiiiiiii :prepare PrepList, prep0105, s3247 is prep0615=0x1E & prep0105 & prep1620=0xB & PrepList; s3247 { build PrepList; sp = sp - (prep0105 << 2); ep = s3247; } # PREPARE list12, imm5, imm16 (high) - 0000011110iiiiiL|LLLLLLLLLLL10011|iiiiiiiiiiiiiiii :prepare PrepList, prep0105, s3247 is prep0615=0x1E & prep0105 & prep1620=0x13 & PrepList; s3247 { build PrepList; sp = sp - (prep0105 << 2); ep = s3247 << 16; } # PREPARE list12, imm5, imm32 - 0000011110iiiiiL|LLLLLLLLLLL11011|iiiiiiiiiiiiiiii|iiiiiiiiiiiiiiii :prepare PrepList, prep0105, imm32 is prep0615=0x1E & prep0105 & prep1620=0x1B & PrepList; op3247; op4863 [ imm32 = (op4863 << 16) | op3247; ] { build PrepList; sp = sp - (prep0105 << 2); ep = imm32; } # RETI - 0000011111100000|0000000101000000 :reti is op0515=0x3F; op1631=0x140 { if($(EP)!=1) goto ; PC = EIPC; PSW = EIPSW; goto ; if($(NP)!=1) goto ; PC = FEPC; PSW = FEPSW; goto ; PC = EIPC; PSW = EIPSW; return[PC]; } # RIE - 0000000001000000 :rie is op0015=0x40 { FEPC = PC; FEPSW = PSW; $(NP) = 1; $(EP) = 1; $(ID) = 1; PC = 0x30; goto [PC]; } # RIE imm5, imm4 - iiiii1111111IIII|0000000000000000 :rie op1115, op0003 is op0410=0x7F & op1115 & op0003; op1631=0x0 { FEPC = PC; FEPSW = PSW; $(NP) = 1; $(EP) = 1; $(ID) = 1; PC = 0x30; goto [PC]; } # STSR regID, reg2 - rrrrr111111RRRRR|0000000001000000 :stsr SR0004, R1115 is op0510=0x3F & R1115 & SR0004; op1631=0x40 { R1115 = SR0004; } # SWITCH reg1 - 00000000010RRRRR :switch R0004 is op0515=0x2 & R0004 { local adr:4 = inst_next + (R0004 << 1); PC = inst_next + (sext(*:2 adr) << 1); goto [PC]; } # SYNCE - 0000000000011101 define pcodeop __synchronize; :synce is op0015=0x1D { __synchronize(); } # SYNCM - 0000000000011110 :syncm is op0015=0x1E { __synchronize(); } # SYNCP - 0000000000011111 :syncp is op0015=0x1F { __synchronize(); } # SYSCALL vector8 - 11010111111vvvvv|00VVV00101100000 :syscall vector8 is op0515=0x6BF & op0004; op3031=0 & op2729 & op1626=0x160 [ vector8 = (op2729 << 5) | op0004; ] { EIPC = inst_next; EIPSW = PSW; EIIC = vector8 + 0x8000; # exception code 0x8000..0x80FF $(EICC) = vector8 + 0x8000; # exception code 0x8000..0x80FF $(EP) = 1; $(ID) = 1; local adr:4; either_or(adr, (vector8 <= SCCFG), SCBP + (vector8 << 2), SCBP); PC = SCBP + (*:4 adr); call [PC]; } # TRAP imm5 - 00000111111vvvvv|0000000100000000 :trap op0004 is op0515=0x3F & op0004; op1631=0x100 { local vector5:4 = op0004; EIPC = inst_next; EIPSW = PSW; EIIC = vector5 + 0x40; # exception code 0x40..0x5F $(EICC) = vector5:2 + 0x40; # exception code 0x40..0x5F $(EP) = 1; $(ID) = 1; either_or(PC, (vector5 <= 15), 0x40, 0x50); call [PC]; } ================================================ FILE: pypcode/processors/V850/data/languages/V850.cspec ================================================ ================================================ FILE: pypcode/processors/V850/data/languages/V850.ldefs ================================================ Renesas V850 family ================================================ FILE: pypcode/processors/V850/data/languages/V850.opinion ================================================ ================================================ FILE: pypcode/processors/V850/data/languages/V850.pspec ================================================ ================================================ FILE: pypcode/processors/V850/data/languages/V850.slaspec ================================================ ##################################################### ##### ##### ##### V850E2M SLEIGH specification ##### ##### ##### ##################################################### define endian = little; define alignment = 2; # Size & default are required define space ram type=ram_space size=4 default; define space register type=register_space size=4; ##################################################### ##### Helpers ##### ##################################################### @include "./Helpers/Register.sinc" @include "./Helpers/Tokens.sinc" @include "./Helpers/Variables.sinc" @include "./Helpers/Conditions.sinc" @include "./Helpers/Macros.sinc" @include "./Helpers/Extras.sinc" ##################################################### ##### Instructions ##### ##################################################### @include "./Instructions/Arithmetic.sinc" @include "./Instructions/Float.sinc" @include "./Instructions/Load_Store.sinc" @include "./Instructions/Logic.sinc" @include "./Instructions/Special.sinc" ================================================ FILE: pypcode/processors/V850/data/manuals/v850.idx ================================================ @r01us0001ej0100_v850e2m.pdf [V850E2M Users Manual: Architecture RENESAS MCU V850E2M Microprocessor Core] ADD, 71 ADDI, 72 ADF, 73 AND, 74 ANDI, 75 Bcond, 76 BSH, 78 BSW, 79 CALLT, 80 CAXI, 81 CLR1, 82 CMOV, 84 CMP, 86 CTRET, 87 DI, 88 DISPOSE, 89 DIV, 91 DIVH, 92 DIVHU, 94 DIVQ, 95 DIVQU, 96 DIVU, 97 EI, 98 EIRET, 99 FERET, 100 FETRAP, 101 HALT, 102 HSH, 103 HSW, 104 JARL, 105 JMP, 107 JR, 108 LD.B, 109 LD.BU, 110 LD.H, 111 LD.HU, 112 LD.W, 113 LDSR, 114 MAC, 115 MACU, 116 MOV, 117 MOVEA, 118 MOVHI, 119 MUL, 120 MULH, 121 MULHI, 122 MULU, 123 NOP, 124 NOT, 125 NOT1, 126 OR, 128 ORI, 129 PREPARE, 130 RETI, 132 RIE, 134 SAR, 135 SASF, 137 SATADD, 138 SATSUB, 140 SATSUBI, 141 SATSUBR, 142 SBF, 143 SCH0L, 144 SCH0R, 145 SCH1L, 146 SCH1R, 147 SET1, 148 SETF, 150 SHL, 152 SHR, 154 SLD.B, 156 SLD.BU, 157 SLD.H, 158 SLD.HU, 159 SLD.W, 160 SST.B, 161 SST.H, 162 SST.W, 163 ST.B, 164 ST.H, 165 ST.W, 166 STSR, 167 SUB, 168 SUBR, 169 SWITCH, 170 SXB, 171 SXH, 172 SYNCE, 173 SYNCM, 174 SYNCP, 175 SYSCALL, 176 TRAP, 178 TST, 179 TST1, 180 XOR, 181 XORI, 182 ZXB, 183 ZXH, 184 ABSF.D, 326 ABSF.S, 327 ADDF.D, 328 ADDF.S, 329 CEILF.DL, 330 CEILF.DUL, 331 CEILF.DUW, 332 CEILF.DW, 333 CEILF.SL, 334 CEILF.SUL, 335 CEILF.SUW, 336 CEILF.SW, 337 CMOVF.D, 338 CMOVF.S, 339 CMPF.D, 340 CMPF.S, 343 CVTF.DL, 346 CVTF.DS, 347 CVTF.DUL, 348 CVTF.DUW, 349 CVTF.DW, 350 CVTF.LD, 351 CVTF.LS, 352 CVTF.SD, 353 CVTF.SL, 354 CVTF.SUL, 355 CVTF.SUW, 356 CVTF.SW, 357 CVTF.ULD, 358 CVTF.ULS, 359 CVTF.UWD, 360 CVTF.UWS, 361 CVTF.WD, 362 CVTF.WS, 363 DIVF.D, 364 DIVF.S, 365 FLOORF.DL, 366 FLOORF.DUL, 367 FLOORF.DUW, 368 FLOORF.DW, 369 FLOORF.SL, 370 FLOORF.SUL, 371 FLOORF.SUW, 372 FLOORF.SW, 373 MADDF.S, 374 MAXF.D, 376 MAXF.S, 377 MINF.D, 378 MINF.S, 379 MSUBF.S, 380 MULF.D, 382 MULF.S, 383 NEGF.D, 384 NEGF.S, 385 NMADDF.S, 386 NMSUBF.S, 388 RECIPF.D, 390 RECIPF.S, 391 RSQRTF.D, 392 RSQRTF.S, 393 SQRTF.D, 394 SQRTF.S, 395 SUBF.D, 396 SUBF.S, 397 TRFSR, 398 TRNCF.DL, 399 TRNCF.DUL, 400 TRNCF.DUW, 401 TRNCF.DW, 402 TRNCF.SL, 403 TRNCF.SUL, 404 TRNCF.SUW, 405 TRNCF.SW, 406 ================================================ FILE: pypcode/processors/V850/data/patterns/V850_patterns.xml ================================================ 01111111 00000000 10...... 00000111 ...00001 ........ 10...... 00000111 ...00011 ........ 00000011 00011110 ........ ........ 10...... 00000111 ...01011 ........ ........ ........ 10...... 00000111 ...10011 ........ ........ ........ 10...... 00000111 ...11011 ........ ........ ........ ........ ........ 01...... 00000110 ........ ........ 11100000 00000111 01001010 00000001 11100000 00000111 01001000 00000001 11100000 00000111 01000100 00000001 10...... 00000111 ...00001 ........ 10...... 00000111 ...00011 ........ 00000011 00011110 ........ ........ 10...... 00000111 ...11011 ........ ........ ........ ........ ........ 10...... 00000111 ...01011 ........ ........ ........ 10...... 00000111 ...10011 ........ ........ ........ ================================================ FILE: pypcode/processors/V850/data/patterns/patternconstraints.xml ================================================ V850_patterns.xml ================================================ FILE: pypcode/processors/Xtensa/data/languages/cust.sinc ================================================ # Per the manual: # CUST0 and CUST1 opcode encodings shown in Table 7–193 are permanently reserved # for designer-defined opcodes. In the future, customers who use these spaces # exclusively for their own designer-defined opcodes will be able to add new # Tensilica-defined options without changing their opcodes or binary executables. define pcodeop cust0; :cust0 "{op2="^op2^", r="^ar^", s="^as^", t="^at^"}" is op0=0x0 & op1=0x6 & op2 & ar & as & at { cust0(); } define pcodeop cust1; :cust1 "{op2="^op2^", r="^ar^", s="^as^", t="^at^"}" is op0=0x0 & op1=0x7 & op2 & ar & as & at { cust1(); } ================================================ FILE: pypcode/processors/Xtensa/data/languages/flix.sinc ================================================ # FLIX (Flexible Length Instruction eXtension) is a Xtensa processor extension # that allows for variable-length, multi-op instructions with support from 4 # 16 bytes. Customizable, if found they should be flagged. define pcodeop flix; :FLIX u_4_23 is op0=0xe & u_4_23 { flix(); } ================================================ FILE: pypcode/processors/Xtensa/data/languages/xtensa.cspec ================================================ ================================================ FILE: pypcode/processors/Xtensa/data/languages/xtensa.dwarf ================================================ ================================================ FILE: pypcode/processors/Xtensa/data/languages/xtensa.ldefs ================================================ Tensilica Xtensa 32-bit little-endian Tensilica Xtensa 32-bit big-endian ================================================ FILE: pypcode/processors/Xtensa/data/languages/xtensa.opinion ================================================ ================================================ FILE: pypcode/processors/Xtensa/data/languages/xtensa.pspec ================================================ ================================================ FILE: pypcode/processors/Xtensa/data/languages/xtensaArch.sinc ================================================ define endian=$(ENDIAN); define alignment=1; define space ram type=ram_space size=4 default; define space register type=register_space size=4; # Address registers (AR). define register offset=0x0000 size=4 [ a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15 ]; # Temporary Address registers (facilitates simplified CALL register swapping used by decompiler) define register offset=0x0080 size=4 [ t0 t1 t2 t3 t4 t5 t6 t7 t8 t9 t10 t11 ]; # Floating Point registers define register offset=0x0100 size=4 [ f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 ]; # Boolean registers (BR) define register offset=0x0200 size=1 [ b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 ]; define register offset=0x0400 size=4 [ user0 user1 user2 user3 user4 user5 user6 user7 user8 user9 user10 user11 user12 user13 user14 user15 user16 user17 user18 user19 user20 user21 user22 user23 user24 user25 user26 user27 user28 user29 user30 user31 user32 user33 user34 user35 user36 user37 user38 user39 user40 user41 user42 user43 user44 user45 user46 user47 user48 user49 user50 user51 user52 user53 user54 user55 user56 user57 user58 user59 user60 user61 user62 user63 user64 user65 user66 user67 user68 user69 user70 user71 user72 user73 user74 user75 user76 user77 user78 user79 user80 user81 user82 user83 user84 user85 user86 user87 user88 user89 user90 user91 user92 user93 user94 user95 user96 user97 user98 user99 user100 user101 user102 user103 user104 user105 user106 user107 user108 user109 user110 user111 user112 user113 user114 user115 user116 user117 user118 user119 user120 user121 user122 user123 user124 user125 user126 user127 user128 user129 user130 user131 user132 user133 user134 user135 user136 user137 user138 user139 user140 user141 user142 user143 user144 user145 user146 user147 user148 user149 user150 user151 user152 user153 user154 user155 user156 user157 user158 user159 user160 user161 user162 user163 user164 user165 user166 user167 user168 user169 user170 user171 user172 user173 user174 user175 user176 user177 user178 user179 user180 user181 user182 user183 user184 user185 user186 user187 user188 user189 user190 user191 user192 user193 user194 user195 user196 user197 user198 user199 user200 user201 user202 user203 user204 user205 user206 user207 user208 user209 user210 user211 user212 user213 user214 user215 user216 user217 user218 user219 user220 user221 user222 user223 user224 user225 user226 user227 user228 user229 user230 THREADPTR FCR FSR user234 user235 user236 user237 user238 user239 user240 user241 user242 user243 user244 user245 user246 user247 user248 user249 user250 user251 user252 user253 user254 user255 ]; # Program counter. define register offset=0x1000 size=4 [ pc ]; define register offset=0x2000 size=4 [ LBEG LEND LCOUNT SAR BR LITBASE sr6 sr7 sr8 sr9 sr10 sr11 SCOMPARE1 sr13 sr14 sr15 @if ENDIAN == "big" ACCHI ACCLO @else ACCLO ACCHI @endif sr18 sr19 sr20 sr21 sr22 sr23 sr24 sr25 sr26 sr27 sr28 sr29 sr30 sr31 M0 M1 M2 M3 sr36 sr37 sr38 sr39 sr40 sr41 sr42 sr43 sr44 sr45 sr46 sr47 sr48 sr49 sr50 sr51 sr52 sr53 sr54 sr55 sr56 sr57 sr58 sr59 sr60 sr61 sr62 sr63 sr64 sr65 sr66 sr67 sr68 sr69 sr70 sr71 WindowBase WindowStart sr74 sr75 sr76 sr77 sr78 sr79 sr80 sr81 sr82 PTEVADDR sr84 sr85 sr86 sr87 sr88 MMID RASID ITLBCFG DTLBCFG sr93 sr94 sr95 IBREAKENABLE MEMCTL CACHEATTR ATOMCTL sr100 sr101 sr102 sr103 DDR sr105 MEPC MEPS MESAVE MESR MECR MEVADDR sr112 sr113 sr114 sr115 sr116 sr117 sr118 sr119 sr120 sr121 sr122 sr123 sr124 sr125 sr126 sr127 IBREAKA0 IBREAKA1 sr130 sr131 sr132 sr133 sr134 sr135 sr136 sr137 sr138 sr139 sr140 sr141 sr142 sr143 DBREAKA0 DBREAKA1 sr146 sr147 sr148 sr149 sr150 sr151 sr152 sr153 sr154 sr155 sr156 sr157 sr158 sr159 DBREAKC0 DBREAKC1 sr162 sr163 sr164 sr165 sr166 sr167 sr168 sr169 sr170 sr171 sr172 sr173 sr174 sr175 sr176 EPC1 EPC2 EPC3 EPC4 EPC5 EPC6 EPC7 sr184 sr185 sr186 sr187 sr188 sr189 sr190 sr191 DEPC sr193 EPS2 EPS3 EPS4 EPS5 EPS6 EPS7 sr200 sr201 sr202 sr203 sr204 sr205 sr206 sr207 sr208 EXCSAVE1 EXCSAVE2 EXCSAVE3 EXCSAVE4 EXCSAVE5 EXCSAVE6 EXCSAVE7 sr216 sr217 sr218 sr219 sr220 sr221 sr222 sr223 #TODO: REVIEW NEEDED! - INTSET / INTERRUPT placement/address (also review related attach) CPENABLE INTERRUPT INTSET INTCLEAR INTENABLE sr229 PS VECBASE EXCCAUSE DEBUGCAUSE CCOUNT PRID ICOUNT ICOUNTLEVEL EXCVADDR sr239 CCOMPARE0 CCOMPARE1 CCOMPARE2 sr243 MISC0 MISC1 MISC2 MISC3 sr248 sr249 sr250 sr251 sr252 sr253 sr254 sr255 ]; define register offset=0x2040 size=8 [ ACC ]; @define EPC_BASE "0x22c0" #address of EPCn = $(EPC_BASE) + (n * 4) @define EPS_BASE "0x2300" #address of EPSn = $(EPS_BASE) + (n * 4) @define PS_INTLEVEL "PS[0,4]" @define PS_EXCM "PS[4,1]" @define PS_UM "PS[5,1]" @define PS_RING "PS[6,2]" @define PS_OWB "PS[8,4]" @define PS_CALLINC "PS[12,2]" @define PS_WOE "PS[14,1]" define register offset=0xf000 size=4 contextreg; define context contextreg loopMode=(0,0) loopEnd=(1,1) noflow #transient bits phase=(31,31) ; @if ENDIAN == "big" # little-endian -> big-endian 24-bit conversion chart #|00|01|02|03|04|05|06|07|08|09|10|11|12|13|14|15|16|17|18|19|20|21|22|23| #|23|22|21|20|19|18|17|16|15|14|13|12|11|10|09|08|07|06|05|04|03|02|01|00| # Regular 24-bit instruction. define token insn(24) # Named opcode/register fields. op2 = (0,3) op1 = (4,7) ar = (8,11) fr = (8,11) br = (8,11) as = (12,15) fs = (12,15) bs = (12,15) sr = (8,15) at = (16,19) ft = (16,19) bt = (16,19) op0 = (20,23) # Signed and unsigned immediates. Named [us]N_L.M, where u and s denote signedness, L and M the # least and most significant bit of the immediate in the instruction word, and N the length # (i.e. M-L+1). u3_21_23 = (1,3) u4_20_23 = (0,3) s8_16_23 = (0,7) signed u8_16_23 = (0,7) u12_12_23 = (0,11) s12_12_23 = (0,11) signed u16_8_23 = (0,15) s8_6_23 = (0,17) signed u1_20 = (0,0) u2_18_19 = (4,5) u3_17_19 = (5,7) u2_16_17 = (6,7) u1_16 = (4,4) u1_15_15 = (11,11) u2_14_15 = (10,11) u3_13_15 = (9,11) u4_12_15 = (8,11) m0m1_14_14 = (10,10) u2_12_13 = (8,9) mw_12_13 = (8,9) u1_12 = (8,8) u4_8_11 = (12,15) u8_4_11 = (12,19) s4_8_11 = (12,15) signed u1_7_7 = (19,19) u2_6_7 = (16,17) u3_5_7 = (17,19) u4_4_7 = (16,19) s4_4_7 = (16,19) m2m3_6_6 = (18,18) u_4_23 = (0,19) t2_4_5 = (16,17) u2_4_5 = (18,19) u1_4 = (16,16) ; # little-endian -> big-endian 16-bit conversion chart #|00|01|02|03|04|05|06|07|08|09|10|11|12|13|14|15| #|15|14|13|12|11|10|09|08|07|06|05|04|03|02|01|00| # Narrow 16-bit instructions; fields are always prefixed with n_. define token narrowinsn(16) n_ar = (0,3) n_as = (4,7) n_at = (8,11) n_op0 = (12,15) n_u4_12_15 = (0,3) n_s4_12_15 = (0,3) signed n_u4_8_11 = (4,7) n_u1_7 = (11,11) n_u2_6_7 = (10,11) n_u4_4_7 = (8,11) n_s3_4_6 = (8,10) n_u2_4_5 = (8,9) ; @else # Regular 24-bit instruction. define token insn(24) # Named opcode/register fields. op2 = (20,23) ar = (12,15) fr = (12,15) br = (12,15) as = (8,11) fs = (8,11) bs = (8,11) sr = (8,15) at = (4,7) ft = (4,7) bt = (4,7) op1 = (16,19) op0 = (0,3) # Signed and unsigned immediates. Named [us]N_L_M, where u and s denote signedness, L and M the # least and most significant bit of the immediate in the instruction word, and N the length # (i.e. M-L+1). u3_21_23 = (21,23) u4_20_23 = (20,23) s8_16_23 = (16,23) signed u8_16_23 = (16,23) u12_12_23 = (12,23) s12_12_23 = (12,23) signed u16_8_23 = (8,23) s8_6_23 = (6,23) signed u1_20 = (20,20) u2_18_19 = (18,19) u3_17_19 = (17,19) u2_16_17 = (16,17) u1_16 = (16,16) u1_15_15 = (15,15) u2_14_15 = (14,15) u3_13_15 = (13,15) u4_12_15 = (12,15) m0m1_14_14 = (14,14) u2_12_13 = (12,13) mw_12_13 = (12,13) u1_12 = (12,12) u4_8_11 = (8,11) u8_4_11 = (4,11) s4_8_11 = (8,11) signed u1_7_7 = (7,7) u2_6_7 = (6,7) u3_5_7 = (5,7) u4_4_7 = (4,7) s4_4_7 = (4,7) m2m3_6_6 = (6,6) u_4_23 = (4,23) t2_4_5 = (4,5) u2_4_5 = (4,5) u1_4 = (4,4) ; # Narrow 16-bit instructions; fields are always prefixed with n_. define token narrowinsn(16) n_ar = (12,15) n_as = (8,11) n_at = (4,7) n_op0 = (0, 3) n_u4_12_15 = (12,15) n_s4_12_15 = (12,15) signed n_u4_8_11 = (8,11) n_u1_7 = (7,7) n_u2_6_7 = (6,7) n_u4_4_7 = (4,7) n_s3_4_6 = (4,6) n_u2_4_5 = (4,5) ; @endif ================================================ FILE: pypcode/processors/Xtensa/data/languages/xtensaInstructions.sinc ================================================ # ABS - Absolute Value (RRR), pg. 246. :abs ar, at is op2 = 0b0110 & op1 = 0 & ar & as = 0b0001 & at & op0 = 0 { ar = at; if (ar s> 0) goto inst_next; ar = -ar; } # ABS.S - Absolute Single Value (RRR), pg. 247. :abs.s fr, fs is op2 = 0b1111 & op1 = 0b1010 & fr & fs & at = 0b0001 & op0 = 0b0000 { fr = abs(fs); } # ADD - Add (RRR), pg. 248. :add ar, as, at is op2 = 0b1000 & op1 = 0 & ar & as & at & op0 = 0 { ar = as + at; } # ADD.N - Narrow Add (RRRN), pg. 249. :add.n n_ar, n_as, n_at is n_ar & n_as & n_at & n_op0 = 0b1010 { n_ar = n_as + n_at; } # ADD.S - Add Single (RRR), pg. 250. :add.s fr, fs, ft is op2 = 0 & op1 = 0b1010 & fr & fs & ft & op0 = 0 { fr = fs f+ ft; } # ADDI - Add Immediate (RRI8), pg. 251. :addi at, as, s8_16_23 is s8_16_23 & ar = 0b1100 & as & at & op0 = 0b0010 { at = as + s8_16_23; } # ADDI.N - Narrow Add Immediate (RRRN), pg. 252. :addi.n n_ar, n_as, n_s4_4_7_nozero is n_ar & n_as & n_s4_4_7_nozero & n_op0 = 0b1011 { n_ar = n_as + n_s4_4_7_nozero; } # ADDMI - Add Immediate with Shift by 8, pg. 253. :addmi at, as, s16_16_23_sb8 is s16_16_23_sb8 & ar = 0b1101 & as & at & op0 = 0b0010 { at = as + s16_16_23_sb8; } # ADDX2 - Add with Shift by 1, pg. 254. :addx2 ar, as, at is op2 = 0b1001 & op1 = 0 & ar & as & at & op0 = 0 { ar = (as << 1) + at; } # ADDX4 - Add with Shift by 2, pg. 255. :addx4 ar, as, at is op2 = 0b1010 & op1 = 0 & ar & as & at & op0 = 0 { ar = (as << 2) + at; } # ADDX8 - Add with Shift by 4, pg. 256. :addx8 ar, as, at is op2 = 0b1011 & op1 = 0 & ar & as & at & op0 = 0 { ar = (as << 3) + at; } # ALL4 - All 4 Booleans True, pg. 257. :all4 bt, bs is op2 = 0 & op1 = 0 & ar = 0b1001 & bs & bt & op0 = 0 { local b = *[register]:1 &:4 bs+1; local c = *[register]:1 &:4 bs+2; local d = *[register]:1 &:4 bs+3; bt = bs && b && c && d; } # ALL8 - All 8 Booleans True, pg. 258. :all8 bt, bs is op2 = 0 & op1 = 0 & ar = 0b1011 & bs & bt & op0 = 0 { local b = *[register]:1 &:4 bs+1; local c = *[register]:1 &:4 bs+2; local d = *[register]:1 &:4 bs+3; local e = *[register]:1 &:4 bs+4; local f = *[register]:1 &:4 bs+5; local g = *[register]:1 &:4 bs+6; local h = *[register]:1 &:4 bs+7; bt = bs && b && c && d && e && f && g && h; } # AND - Bitwise Logical And, pg. 259. :and ar, as, at is op2 = 0b0001 & op1 = 0 & ar & as & at & op0 = 0 { ar = as & at; } # ANDB - Boolean And, pg. 260. :andb br, bs, bt is op2 = 0 & op1 = 0b0010 & br & bs & bt & op0 = 0 { br = bs && bt; } # ANDBC - Boolean And with Complement, pg. 261. :andbc br, bs, bt is op2 = 0b0001 & op1 = 0b0010 & br & bs & bt & op0 = 0 { br = bs && !bt; } # ANY4 - Any 4 Booleans True, pg. 262. :any4 bt, bs is op2 = 0 & op1 = 0 & ar = 0b1000 & bs & bt & op0 = 0 { local b = *[register]:1 &:4 bs+1; local c = *[register]:1 &:4 bs+2; local d = *[register]:1 &:4 bs+3; bt = bs || b || c || d; } # ANY8 - Any 8 Booleans True, pg. 263. :any8 bt, bs is op2 = 0 & op1 = 0 & ar = 0b1010 & bs & bt & op0 = 0 { local b = *[register]:1 &:4 bs+1; local c = *[register]:1 &:4 bs+2; local d = *[register]:1 &:4 bs+3; local e = *[register]:1 &:4 bs+4; local f = *[register]:1 &:4 bs+5; local g = *[register]:1 &:4 bs+6; local h = *[register]:1 &:4 bs+7; bt = bs || b || c || d || e || f || g || h; } # BALL - Branch if All Bits Set, pg. 264. :ball srel_16_23, as, at is srel_16_23 & ar = 0b0100 & as & at & op0 = 0b0111 { local test:4 = ~as & at; if (test == 0) goto srel_16_23; } # BANY - Branch if Any Bit Set, pg. 265. :bany srel_16_23, as, at, is srel_16_23 & ar = 0b1000 & as & at & op0 = 0b0111 { local test:4 = as & at; if (test != 0) goto srel_16_23; } macro extract_bit(bit, result) { @if ENDIAN == "big" result = 0x80000000 >> bit; @else result = 0x1 << bit; @endif } # BBC - Branch if Bit Clear, pg. 266. :bbc as, at, srel_16_23 is srel_16_23 & ar = 0b0101 & as & at & op0 = 0b0111 { local bval:4 = 0; extract_bit(at[0,5], bval); bval = as & bval; if (bval == 0) goto srel_16_23; } # BBCI - Branch if Bit Clear immediate, pg. 267 :bbci as, u5_4_7_12, srel_16_23 is srel_16_23 & u3_13_15 = 0b011 & as & u5_4_7_12 & op0 = 0b0111 { local bval; extract_bit(u5_4_7_12, bval); bval = as & bval; if (bval == 0) goto srel_16_23; } # BBS - Branch if Bit Set, pg. 269. :bbs as, at, srel_16_23 is srel_16_23 & ar = 0b1101 & as & at & op0 = 0b0111 { local bval; extract_bit(at[0,5], bval); bval = as & bval; if (bval != 0) goto srel_16_23; } # BBSI - Branch if Bit Set immediate, pg. 270. :bbsi as, u5_4_7_12, srel_16_23 is srel_16_23 & u3_13_15 = 0b111 & as & u5_4_7_12 & op0 = 0b0111 { local bval; extract_bit(u5_4_7_12, bval); bval = as & bval; if (bval != 0) goto srel_16_23; } # BEQ - Branch if Equal, pg. 272. :beq as, at, srel_16_23 is srel_16_23 & ar = 0b0001 & as & at & op0 = 0b0111 { if (as == at) goto srel_16_23; } # BEQI - Branch if Equal Immediate, pg. 273. :beqi as, r_b4const, srel_16_23 is srel_16_23 & r_b4const & as & u2_6_7 = 0 & u2_4_5 = 0b10 & op0 = 0b0110 { if (as == r_b4const) goto srel_16_23; } # BEQZ - Branch if Equal Zero, pg. 274. :beqz as, srel_12_23 is srel_12_23 & as & u2_6_7 = 0 & u2_4_5 = 0b01 & op0 = 0b0110 { if (as == 0) goto srel_12_23; } # BEQZ.N - Narrow Branch if Equal Zero, pg. 275. :beqz.n n_as, urel_12_15_4_5 is urel_12_15_4_5 & n_as & n_u2_6_7 = 0b10 & n_op0 = 0b1100 { if (n_as == 0) goto urel_12_15_4_5; } # BF - Branch if False, pg. 276. :bf bs, srel_16_23 is srel_16_23 & ar = 0 & bs & at = 0b0111 & op0 = 0b0110 { if (!bs) goto srel_16_23; } # BGE - Branch if Greater Than or Equal, pg. 277. :bge as, at, srel_16_23 is srel_16_23 & ar = 0b1010 & as & at & op0 = 0b0111 { if (as s>= at) goto srel_16_23; } # BGEI - Branch if Greater Than or Equal Immediate, pg. 278. :bgei as, r_b4const, srel_16_23 is srel_16_23 & r_b4const & as & u2_6_7 = 0b11 & u2_4_5 = 0b10 & op0 = 0b0110 { if (as s>= r_b4const) goto srel_16_23; } # BGEU - Branch if Greater Than or Equal Unsigned, pg. 279. :bgeu as, at, srel_16_23 is srel_16_23 & ar = 0b1011 & as & at & op0 = 0b0111 { if (as >= at) goto srel_16_23; } # BGEUI - Branch if Greater Than or Equal Unsigned Immediate, pg. 280. :bgeui as, r_b4constu, srel_16_23 is srel_16_23 & r_b4constu & as & u2_6_7 = 0b11 & u2_4_5 = 0b11 & op0 = 0b0110 { if (as >= r_b4constu) goto srel_16_23; } # BGEZ - Branch if Greater Than or Equal Zero, pg. 281. :bgez as, srel_12_23 is srel_12_23 & as & u2_6_7 = 0b11 & u2_4_5 = 0b01 & op0 = 0b0110 { if (as s>= 0) goto srel_12_23; } # BLT - Branch if Less Than, pg. 282. :blt as, at, srel_16_23 is srel_16_23 & ar = 0b0010 & as & at & op0 = 0b0111 { if (as s< at) goto srel_16_23; } # BLTI - Branch if Less Than Immediate, pg. 283. :blti as, r_b4const, srel_16_23 is srel_16_23 & r_b4const & as & u2_6_7 = 0b10 & u2_4_5 = 0b10 & op0 = 0b0110 { if (as s< r_b4const) goto srel_16_23; } # BLTU - Branch if Less Than Unsigned, pg. 284. :bltu as, at, srel_16_23 is srel_16_23 & ar = 0b0011 & as & at & op0 = 0b0111 { if (as < at) goto srel_16_23; } # BLTUI - Branch if Less Than Unsigned Immediate, pg. 285. :bltui as, r_b4constu, srel_16_23 is srel_16_23 & r_b4constu & as & u2_6_7 = 0b10 & u2_4_5 = 0b11 & op0 = 0b0110 { if (as < r_b4constu) goto srel_16_23; } # BLTZ - Branch if Less Than Zero, pg. 286. :bltz as, srel_12_23 is srel_12_23 & as & u2_6_7 = 0b10 & u2_4_5 = 0b01 & op0 = 0b0110 { if (as s< 0) goto srel_12_23; } # BNALL - Branch if Not-All Bits Set, pg. 287. :bnall srel_16_23, as, at is srel_16_23 & ar = 0b1100 & as & at & op0 = 0b0111 { if ((~as & at) != 0) goto srel_16_23; } # BNE - Branch if Not Equal, pg. 288. :bne as, at, srel_16_23 is srel_16_23 & ar = 0b1001 & as & at & op0 = 0b0111 { if (as != at) goto srel_16_23; } # BNEI - Branch if Not EquaL Immediate, pg. 289. :bnei as, r_b4const, srel_16_23 is srel_16_23 & r_b4const & as & u2_6_7 = 0b01 & u2_4_5 = 0b10 & op0 = 0b0110 { if (as != r_b4const) goto srel_16_23; } # BNEZ - Branch if Not Equal Zero, pg. 290. :bnez as, srel_12_23 is srel_12_23 & as & u2_6_7 = 0b01 & u2_4_5 = 0b01 & op0 = 0b0110 { if (as != 0) goto srel_12_23; } # BNEZ.N - Narrow Branch if Not Equal Zero, pg. 291. :bnez.n n_as, urel_12_15_4_5 is urel_12_15_4_5 & n_as & n_u2_6_7 = 0b11 & n_op0 = 0b1100 { if (n_as != 0) goto urel_12_15_4_5; } # BNONE - Branch if No Bit Set, pg. 292. :bnone srel_16_23, as, at, is srel_16_23 & ar = 0 & as & at & op0 = 0b0111 { if ((as & at) == 0) goto srel_16_23; } # BREAK - Breakpoint, pg. 293. :break u4_8_11, u4_4_7 is op2 = 0 & op1 = 0 & ar = 0b0100 & u4_8_11 & u4_4_7 & op0 = 0 { break_inst:4 = inst_start; breakpoint(0x001000:4, break_inst, u4_8_11:1, u4_4_7:1); } # BREAK.N - Narrow Breakpoint, pg. 295. :break.n n_u4_8_11 is n_ar = 0b1111 & n_u4_8_11 & n_at = 0b0010 & n_op0 = 0b1101 { break_inst:4 = inst_start; breakpoint(0x010000:4, break_inst, n_u4_8_11:1, 0:1); } # BT - Branch if True, pg. 296. :bt bs, srel_16_23 is srel_16_23 & ar = 0b0001 & bs & at = 0b0111 & op0 = 0b0110 { if (bs) goto srel_16_23; } # CALL0 - Non-windowed Call, pg. 297. :call0 srel_6_23_sb2 is srel_6_23_sb2 & u2_4_5 = 0 & op0 = 0b0101 { $(PS_CALLINC) = 0; a0 = inst_next; call srel_6_23_sb2; } # CALL4 - Call PC-relative, Rotate Window by 4, pg. 298. :call4 srel_6_23_sb2 is srel_6_23_sb2 & Ret4 & u2_4_5 = 0b01 & op0 = 0b0101 { $(PS_CALLINC) = 1; a4 = Ret4; swap4(); call srel_6_23_sb2; restore4(); } # CALL8 - Call PC-relative, Rotate Window by 8, pg. 300. :call8 srel_6_23_sb2 is srel_6_23_sb2 & Ret8 & u2_4_5 = 0b10 & op0 = 0b0101 { $(PS_CALLINC) = 2; a8 = Ret8; swap8(); call srel_6_23_sb2; restore8(); } # CALL12 - Call PC-relative, Rotate Window by 12, pg. 302. :call12 srel_6_23_sb2 is srel_6_23_sb2 & Ret12 & u2_4_5 = 0b11 & op0 = 0b0101 { $(PS_CALLINC) = 3; a12 = Ret12; swap12(); call srel_6_23_sb2; restore12(); } # CALLX0 - Non-windowed Call Register, pg. 304. :callx0 as is op2 = 0 & op1 = 0 & ar = 0 & as & u2_6_7 = 0b11 & u2_4_5 = 0 & op0 = 0 { $(PS_CALLINC) = 0; local dst = as; a0 = inst_next; call [dst]; } # CALLX4 - Call Register, Rotate Window by 4, pg. 305. :callx4 as is op2 = 0 & op1 = 0 & ar = 0 & as & Ret4 & u2_6_7 = 0b11 & u2_4_5 = 0b01 & op0 = 0 { $(PS_CALLINC) = 1; dest:4 = as; a4 = Ret4; swap4(); call [dest]; restore4(); } # CALLX8 - Call Register, Rotate Window by 8, pg. 307. :callx8 as is op2 = 0 & op1 = 0 & ar = 0 & as & Ret8 & u2_6_7 = 0b11 & u2_4_5 = 0b10 & op0 = 0 { $(PS_CALLINC) = 2; dest:4 = as; a8 = Ret8; swap8(); call [dest]; restore8(); } # CALLX12 - Call Register, Rotate Window by 12, pg. 308. :callx12 as is op2 = 0 & op1 = 0 & ar = 0 & as & Ret12 & u2_6_7 = 0b11 & u2_4_5 = 0b11 & op0 = 0 { $(PS_CALLINC) = 3; dest:4 = as; a12 = Ret12; swap12(); call [dest]; restore12(); } # CEIL.S - Ceiling Single to Fixed, pg. 311. :ceil.s ar, fs, u4_4_7 is op2 = 0b1011 & op1 = 0b1010 & ar & fs & u4_4_7 & op0 = 0 { local scale:4 = 1 << u4_4_7; ar = ceil(fs f* int2float(scale)); } # CLAMPS - Signed Clamp, pg. 312. :clamps ar, as, u5_4_7_plus7 is op2 = 0b0011 & op1 = 0b0011 & ar & as & u5_4_7_plus7 & op0 = 0 { # ar min(max(as, -2^{u5_4_7_plus7}), 2^{u5_4_7_plus7}-1) local x:4 = as; local clamp:4 = 1 << u5_4_7_plus7; local mt:1 = (x s> (-clamp)); local max:4 = (zext(mt) * x) + (zext(!mt) * (-clamp)); mt = (x s< (clamp-1)); ar = (zext(mt) * max) + (zext(!mt) * (clamp-1)); } # DHI - Data Cache Hit Invalidate, pg. 313. :dhi as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0111 & as & at = 0b0110 & op0 = 0b0010 { dhi(as + u10_16_23_sb2); } # DHU - Data Cache Hit Unlock, pg. 315. :dhu as, u8_20_23_sb4 is u8_20_23_sb4 & op1 = 0b0010 & ar = 0b0111 & as & at = 0b1000 & op0 = 0b0010 { dhu(as + u8_20_23_sb4); } # DHWB - Data Cache Hit Writeback, pg. 317. :dhwb as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0111 & as & at = 0b0100 & op0 = 0b0010 { dhwb(as + u10_16_23_sb2); } # DHWBI - Data Cache Hit Writeback Invalidate, pg. 319. :dhwbi as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0111 & as & at = 0b0101 & op0 = 0b0010 { dhwbi(as + u10_16_23_sb2); } # DII - Data Cache Index Invalidate, pg. 321. :dii as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0111 & as & at = 0b0111 & op0 = 0b0010 { dii(as + u10_16_23_sb2); } # DIU - Data Cache Index Unlock, pg. 323. :diu as, u8_20_23_sb4 is u8_20_23_sb4 & op1 = 0b0011 & ar = 0b0111 & as & at = 0b1000 & op0 = 0b0010 { diu(as + u8_20_23_sb4); } # DIWB - Data Cache Index Write Back, pg. 325. :diwb as, u8_20_23_sb4 is u8_20_23_sb4 & op1 = 0b0100 & ar = 0b0111 & as & at = 0b1000 & op0 = 0b0010 { diwb(as + u8_20_23_sb4); } # DIWBI - Data Cache Index Write Back Invalidate, pg. 327. :diwbi as, u8_20_23_sb4 is u8_20_23_sb4 & op1 = 0b0101 & ar = 0b0111 & as & at = 0b1000 & op0 = 0b0010 { diwbi(as + u8_20_23_sb4); } # DPFL - Data Cache Prefetch and Lock, pg. 329. :dpfl as, u8_20_23_sb4 is u8_20_23_sb4 & op1 = 0 & ar = 0b0111 & as & at = 0b1000 & op0 = 0b0010 { dpfl(as + u8_20_23_sb4); } # DPFR - Data Cache Prefetch for Read, pg. 331. :dpfr as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0111 & as & at = 0 & op0 = 0b0010 { dpfr(as + u10_16_23_sb2); } # DPFRO - Data Cache Prefetch for Read Once, pg. 333. :dpfro as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0111 & as & at = 0b0010 & op0 = 0b0010 { dpfro(as + u10_16_23_sb2); } # DPFW - Data Cache Prefetch for Write, pg. 335. :dpfw as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0111 & as & at = 0b0001 & op0 = 0b0010 { dpfw(as + u10_16_23_sb2); } # DPFWO - Data Cache Prefetch for Write Once, pg. 337. :dpfwo as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0111 & as & at = 0b0011 & op0 = 0b0010 { dpfwo(as + u10_16_23_sb2); } # DSYNC - Load/Store Synchronize, pg. 339. :dsync is op2 = 0 & op1 = 0 & ar = 0b0010 & as = 0 & at = 0b0011 & op0 = 0 { dsync(); } # ENTRY - Subroutine Entry, pg. 340. :entry as, u15_12_23_sb3 is u15_12_23_sb3 & as & u2_6_7 = 0b00 & u2_4_5 = 0b11 & op0 = 0b0110 { local callSP = a1; callinc:1 = $(PS_CALLINC); rotateRegWindow(callinc); as = callSP - zext(u15_12_23_sb3); } # ESYNC - Execute Synchronize, pg. 342. :esync is op2 = 0 & op1 = 0 & ar = 0b0010 & as = 0 & at = 0b0010 & op0 = 0 { esync(); } # EXCW - Exception Wait, pg. 343. :excw is op2 = 0 & op1 = 0 & ar = 0b0010 & as = 0 & at = 0b1000 & op0 = 0 { excw(); } # EXTUI - Extract Unsigned Immediate, pg. 344. :extui ar, at, u5_8_11_16, u5_20_23_plus1 is u5_20_23_plus1 & u3_17_19 = 0b010 & u5_8_11_16 & ar & at & op0 = 0 { local shifted:4 = at >> u5_8_11_16; local mask:4 = (1:4 << (u5_20_23_plus1))-1; ar = shifted & mask; } # EXTW - External Wait, pg. 345. :extw is op2 = 0 & op1 = 0 & ar = 0b0010 & as = 0 & at = 0b1101 & op0 = 0 { extw(); } # FLOAT.S - Convert Fixed to Single, pg. 346. :float.s fr, as, u4_4_7 is op2 = 0b1100 & op1 = 0b1010 & fr & as & u4_4_7 & op0 = 0 { local scale:4 = 1 << u4_4_7; fr = int2float(as) f/ int2float(scale); } # FLOOR.S - Floor Single to Fixed, pg. 347. :floor.s ar, fs, u4_4_7 is op2 = 0b1010 & op1 = 0b1010 & ar & fs & u4_4_7 & op0 = 0 { local scale:4 = 1 << u4_4_7; ar = floor(fs f* int2float(scale)); } # IDTLB - Invalidate Data TLB Entry, pg. 348. :idtlb as is op2 = 0b0101 & op1 = 0 & ar = 0b1100 & as & at = 0 & op0 = 0 { idtlb(); } # IHI - Instruction Cache Hit Invalidate, pg. 349. :ihi as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0111 & as & at = 0b1110 & op0 = 0b0010 { ihi(as + u10_16_23_sb2); } # IHU - Instruction Cache Hit Unlock, pg. 351. :ihu as, u8_20_23_sb4 is u8_20_23_sb4 & op1 = 0b0010 & ar = 0b0111 & as & at = 0b1101 & op0 = 0b0010 { ihu(as + u8_20_23_sb4); } # III - Instruction Cache Index Invalidate, pg. 353. :iii as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0111 & as & at = 0b1111 & op0 = 0b0010 { iii(as + u10_16_23_sb2); } # IITLB - Invalidate Instruction TLB Entry, pg. 355. :iitlb as is op2 = 0b0101 & op1 = 0 & ar = 0b0100 & as & at = 0 & op0 = 0 { iitlb(as); } # IIU - Instruction Cache Index Unlock, pg. 356. :iiu as, u8_20_23_sb4 is u8_20_23_sb4 & op1 = 0b0011 & ar = 0b0111 & as & at = 0b1101 & op0 = 0b0010 { iiu(as + u8_20_23_sb4); } # ILL - Illegal Instruction, pg. 358. :ill is op2 = 0 & op1 = 0 & ar = 0 & as = 0 & at = 0 & op0 = 0 { ill(); goto inst_start; } # ILL.N - Narrow Illegal Instruction, pg. 359. :ill.n is n_ar = 0b1111 & n_as = 0 & n_at = 0b0110 & n_op0 = 0b1101 { ill(); goto inst_start; } # IPF - Instruction Cache Prefetch, pg. 360. :ipf as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0111 & as & at = 0b1100 & op0 = 0b0010 { ipf(as + u10_16_23_sb2); } # IPFL - Instruction Cache Prefetch and Lock, pg. 362. :ipfl as, u8_20_23_sb4 is u8_20_23_sb4 & op1 = 0 & ar = 0b0111 & as & at = 0b1101 & op0 = 0b0010 { ipfl(as + u8_20_23_sb4); } # ISYNC - Instruction Fetch Synchronize, pg. 364. :isync is op2 = 0 & op1 = 0 & ar = 0b0010 & as = 0 & at = 0 & op0 = 0 { isync(); } # J - Unconditional Jump, pg. 366. :j srel_6_23 is srel_6_23 & u2_4_5 = 0 & op0 = 0b0110 { goto srel_6_23; } # J.L is a macro. # RET (JX A0) - Non-Windowed Return, pg. 478. :ret is op2 = 0 & op1 = 0 & ar = 0 & as = 0 & u2_6_7 = 0b10 & u2_4_5 = 0b10 & op0 = 0 { return [a0]; } # The manual suggests that RET is equivalent to JX A0, yet RET has bit 5 unset, JX doesn’t. :ret is op2 = 0 & op1 = 0 & ar = 0 & as = 0 & u2_6_7 = 0b10 & u2_4_5 = 0b00 & op0 = 0 { return [a0]; } # JX - Uncoditional Jump Register, pg. 368. :jx as is op2 = 0 & op1 = 0 & ar = 0 & as & u2_6_7 = 0b10 & u2_4_5 = 0b10 & op0 = 0 { goto [as]; } # L8UI - Load 8-bit Unsigned, pg. 369. :l8ui at, as, u8_16_23 is u8_16_23 & ar = 0 & as & at & op0 = 0b0010 { local addr:4 = as + zext(u8_16_23:1); at = zext(*:1 addr); } # L16SI - Load 16-bit Signed, pg. 370. :l16si at, as, u9_16_23_sb1 is u9_16_23_sb1 & ar = 0b1001 & as & at & op0 = 0b0010 { local addr:4 = as + u9_16_23_sb1; at = sext(*:2 addr); } # L16UI - Load 16-bit Unsigned, pg. 372. :l16ui at, as, u9_16_23_sb1 is u9_16_23_sb1 & ar = 0b001 & as & at & op0 = 0b0010 { local addr:4 = as + u9_16_23_sb1; at = zext(*:2 addr); } # L32AI - Load 32-bit Acquire, pg. 374. :l32ai at, as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b1011 & as & at & op0 = 0b0010 { local addr:4 = as + u10_16_23_sb2; at = *:4 addr; acquire(addr); } # L32E - Load 32-bit for Window Exceptions, pg. 376. :l32e at, as, s5_12_15_oex is op2 = 0 & op1 = 0b1001 & s5_12_15_oex & as & at & op0 = 0 { ptr:4 = as + sext(s5_12_15_oex); at = *:4 ptr; } # L32I - Load 32-bit, pg. 378. :l32i at, as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0010 & as & at & op0 = 0b0010 { local addr:4 = as + u10_16_23_sb2; at = *:4 addr; } # L32I.N - Narrow Load 32-bit, pg. 380. :l32i.n n_at, n_as, n_u6_12_15_sb2 is n_u6_12_15_sb2 & n_as & n_at & n_op0 = 0b1000 { local addr:4 = n_as + n_u6_12_15_sb2; n_at = *:4 addr; } # L32R - Load 32-bit PC-relative, pg. 382. :l32r at, srel_8_23_oex_sb2 is srel_8_23_oex_sb2 & at & op0 = 0b0001 { at = srel_8_23_oex_sb2; } # LDCT - Load Data Cache Tag, pg. 384. :ldct at, as is op2 = 0b1111 & op1 = 0b0001 & ar = 0b1000 & as & at & op0 = 0 { at = ldct(as); } # LICT - Load Instruction Cache Tag, pg. 388. :lict at, as is op2 = 0b1111 & op1 = 0b0001 & ar = 0 & as & at & op0 = 0 { at = lict(as); } # LICW - Load Instruction Cache Word, pg. 390. :licw at, as is op2 = 0b1111 & op1 = 0b0010 & ar = 0 & as & at & op0 = 0 { at = licw(as); } # LSI - Load Single Immediate, pg. 398. :lsi ft, as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0 & as & ft & op0 = 0b0011 { local addr:4 = as + u10_16_23_sb2; ft = *:4 addr; } # LSIU - Load Single Immediate with Update, pg. 400. :lsiu ft, as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b1000 & as & ft & op0 = 0b0011 { local addr:4 = as + u10_16_23_sb2; ft = *:4 addr; as = addr; } # LSX - Load Single Indexed, pg. 402. :lsx fr, as, at is op2 = 0 & op1 = 0b1000 & fr & as & at & op0 = 0 { local addr:4 = as+at; fr = *:4 addr; } # LSXU - Load Single Indexed with Update, pg. 404. :lsxu fr, as, at is op2 = 0b0001 & op1 = 0b1000 & fr & as & at & op0 = 0 { local addr:4 = as+at; fr = *:4 addr; as = addr; } # MADD.S - Multiply and Add Single, pg. 406. :madd.s fr, fs, ft is op2 = 0b0100 & op1 = 0b1010 & fr & fs & ft & op0 = 0 { fr = fr f+ (fs f* ft); } # MAX - Maximum Value, pg. 407. :max ar, as, at is op2 = 0b0101 & op1 = 0b0011 & ar & as & at & op0 = 0 { test:1 = as s< at; ar = (zext(test) * at) + (zext(!test) * as); } # MAXU - Maximum Value Unsigned, pg. 408. :maxu ar, as, at is op2 = 0b0111 & op1 = 0b0011 & ar & as & at & op0 = 0 { test:1 = as < at; ar = (zext(test) * at) + (zext(!test) * as); } # MEMW - Memory Wait, pg. 409. :memw is op2 = 0 & op1 = 0 & ar = 0b0010 & as = 0 & at = 0b1100 & op0 = 0 { memw(); } # MIN - Minimum Value, pg. 410. :min ar, as, at is op2 = 0b0100 & op1 = 0b0011 & ar & as & at & op0 = 0 { test:1 = as s< at; ar = (zext(test) * as) + (zext(!test) * at); } # MINU - Minimum Value Unsigned, pg. 411. :minu ar, as, at is op2 = 0b0110 & op1 = 0b0011 & ar & as & at & op0 = 0 { test:1 = as < at; ar = (zext(test) * as) + (zext(!test) * at); } # MOV.N - Narrow Move, pg. 413. :mov.n n_at, n_as is n_ar = 0 & n_as & n_at & n_op0 = 0b1101 { n_at = n_as; } # MOV.S - Move Single, pg. 414. :mov.s fr, fs is op2 = 0b1111 & op1 = 0b1010 & fr & fs & at = 0 & op0 = 0 { fr = fs; } # MOVEQZ - Move if Equal to Zero, pg. 415. :moveqz ar, as, at is op2 = 0b1000 & op1 = 0b0011 & ar & as & at & op0 = 0 { if (at != 0) goto ; ar = as; } # MOVEQZ.S - Move Single if Equal to Zero, pg. 416. :moveqz.s fr, fs, at is op2 = 0b1000 & op1 = 0b1011 & fr & fs & at & op0 = 0 { if (at != 0) goto ; fr = fs; } # MOVF - Move if False, pg. 417. :movf ar, as, bt is op2 = 0b1100 & op1 = 0b0011 & ar & as & bt & op0 = 0 { if (bt) goto ; ar = as; } # MOVF.S - Move Single if False, pg. 418. :movf.s fr, fs, bt is op2 = 0b1100 & op1 = 0b1011 & fr & fs & bt & op0 = 0 { if (bt)goto ; fr = fs; } # MOVGEZ - Move if Greater Than or Equal to Zero, pg. 419. :movgez ar, as, at is op2 = 0b1011 & op1 = 0b0011 & ar & as & at & op0 = 0 { if (at s< 0) goto ; ar = as; } # MOVGEZ.S - Move Single if Greater Than or Equal to Zero, pg. 420. :movgez.s fr, fs, at is op2 = 0b1011 & op1 = 0b1011 & fr & fs & at & op0 = 0 { if (at s< 0) goto ; fr = fs; } # MOVI - Move Immediate, pg. 421. :movi at, s16_16_23_8_11 is s16_16_23_8_11 & ar = 0b1010 & at & op0 = 0b0010 { local val:4 = sext(s16_16_23_8_11); at = val; } # MOVI.N - Narrow Move Immediate, pg. 422. :movi.n n_as, n_s8_12_15_4_6_asymm is n_s8_12_15_4_6_asymm & n_as & n_u1_7 = 0 & n_op0 = 0b1100 { local val:4 = sext(n_s8_12_15_4_6_asymm); n_as = val; } # MOVLTZ - Move if Less Than Zero, pg. 423. :movltz ar, as, at is op2 = 0b1010 & op1 = 0b0011 & ar & as & at & op0 = 0 { if (at s>= 0) goto ; ar = as; } # MOVLTZ.S - Move Single if Less Than Zero, pg. 424. :movltz.s fr, fs, at is op2 = 0b1010 & op1 = 0b1011 & fr & fs & at & op0 = 0 { if (at s>= 0) goto ; fr = fs; } # MOVNEZ - Move if Not Equal to Zero, pg. 425. :movnez ar, as, at is op2 = 0b1001 & op1 = 0b0011 & ar & as & at & op0 = 0 { if (at == 0) goto ; ar = as; } # MOVNEZ.S - Move Single if Not Equal to Zero, pg. 426. :movnez.s fr, fs, at is op2 = 0b1001 & op1 = 0b1011 & fr & fs & at & op0 = 0 { if (at == 0) goto ; fr = fs; } # MOVSP - Move to Stack Pointer, pg. 427. :movsp at, as is op2 = 0 & op1 = 0 & ar = 0b0001 & as & at & op0 = 0 { at = (zext(WindowStart == 0) * at) + (zext(WindowStart != 0) * as); } # MOVT - Move if True, pg. 428. :movt ar, as, bt is op2 = 0b1101 & op1 = 0b0011 & ar & as & bt & op0 = 0 { if (!bt) goto ; ar = as; } # MOVT.S - Move Single if True, pg. 429. :movt.s fr, fs, bt is op2 = 0b1101 & op1 = 0b1011 & fr & fs & bt & op0 = 0 { if (!bt) goto ; fr = fs; } # MSUB.S - Multiply and Subtract Single, pg. 430. :msub.s fr, fs, ft is op2 = 0b0101 & op1 = 0b1010 & fr & fs & ft & op0 = 0 { fr = fr f- (fs f* ft); } # MUL.S - Multiply Single, pg. 435. :mul.s fr, fs, ft is op2 = 0b0010 & op1 = 0b1010 & fr & fs & ft & op0 = 0 { fr = fs f* ft; } # MUL16S - Multiply 16-bit Signed, pg. 436. :mul16s ar, as, at is op2 = 0b1101 & op1 = 0b0001 & ar & as & at & op0 = 0 { ar = sext(as:2) * sext(at:2); } # MUL16U - Multiply 16-bit Unsigned, pg. 437. :mul16u ar, as, at is op2 = 0b1100 & op1 = 0b0001 & ar & as & at & op0 = 0 { ar = zext(as:2) * zext(at:2); } # MULL - Multiply Low, pg. 450. :mull ar, as, at is op2 = 0b1000 & op1 = 0b0010 & ar & as & at & op0 = 0 { ar = as * at; } # MULSH - Multiply Signed High, pg. 455. :mulsh ar, as, at is op2 = 0b1011 & op1 = 0b0010 & ar & as & at & op0 = 0 { local s64:8 = sext(as); local t64:8 = sext(at); local p:8 = (s64 * t64); ar = p(4); } # MULUH - Multiply Unsigned High, pg. 456. :muluh ar, as, at is op2 = 0b1010 & op1 = 0b0010 & ar & as & at & op0 = 0 { local s64:8 = zext(as); local t64:8 = zext(at); local p:8 = (s64 * t64); ar = p(4); } # NEG - Negate, pg. 457. :neg ar, at is op2 = 0b0110 & op1 = 0 & ar & as = 0 & at & op0 = 0 { ar = -at; } # NEG.S - Negate Single, pg. 458. :neg.s fr, fs is op2 = 0b1111 & op1 = 0b1010 & fr & fs & at = 0b0110 & op0 = 0 { fr = 0 f- fs; } # NOP - No Operation, pg. 459. :nop is op2 = 0 & op1 = 0 & ar = 0b0010 & as = 0 & at = 0b1111 & op0 = 0 { } # NOP.N - Narrow No Operation, pg. 460. :nop.n is n_ar = 0b1111 & n_as = 0 & n_at = 0b0011 & n_op0 = 0b1101 { } # NSA - Normalization Shift Amount, pg. 461. :nsa at, as is op2 = 0b0100 & op1 = 0 & ar = 0b1110 & as & at & op0 = 0 { at = lzcount(~as); } # NSAU - Normalization Shift Amount Unsigned, pg. 462. (Count leading zeros) :nsau at, as is op2 = 0b0100 & op1 = 0 & ar = 0b1111 & as & at & op0 = 0 { at = lzcount(as); } # OEQ.S - Compare Single Equal, pg. 463. :oeq.s br, fs, ft is op2 = 0b0010 & op1 = 0b1011 & br & fs & ft & op0 = 0 { br = !nan(fs) && !nan(ft) && fs f== ft; } # OLE.S - Compare Single Ordered and Less Than or Equal, pg. 464 :ole.s br, fs, ft is op2 = 0b0110 & op1 = 0b1011 & br & fs & ft & op0 = 0 { br = !nan(fs) && !nan(ft) && fs f<= ft; } # OLT.S - Compare Single Ordered and Less Than, pg. 465. :olt.s br, fs, ft is op2 = 0b0100 & op1 = 0b1011 & br & fs & ft & op0 = 0 { br = !nan(fs) && !nan(ft) && fs f< ft; } # MOV - Move, pg. 412. Special case of OR as, at, at. :mov ar, as is op2 = 0b0010 & op1 = 0 & ar & as & as = at & op0 = 0 { ar = as; } # OR - Bitwise Logical Or, pg. 466. :or ar, as, at is op2 = 0b0010 & op1 = 0 & ar & as & at & op0 = 0 { ar = as | at; } # ORB - Boolean Or, pg. 467. :orb br, bs, bt is op2 = 0b0010 & op1 = 0b0010 & br & bs & bt & op0 = 0 { br = bs || bt; } # ORBC - Boolean Or with Complement, pg. 468. :orbc br, bs, bt is op2 = 0b0011 & op1 = 0b0010 & br & bs & bt & op0 = 0 { br = bs || !bt; } # PDTLB - Probe Data TLB, pg. 469. :pdtlb at, as is op2 = 0b0101 & op1 = 0 & ar = 0b1101 & as & at & op0 = 0 { at = pdtlb(as); } # PITLB - Probe Instruction TLB, pg. 470. :pitlb at, as is op2 = 0b0101 & op1 = 0 & ar = 0b0101 & as & at & op0 = 0 { at = pitlb(as); } # QUOS - Quotient Signed, pg. 471. :quos ar, as, at is op2 = 0b1101 & op1 = 0b0010 & ar & as & at & op0 = 0 { ar = as s/ at; } # QUOU - Quotient Unsigned, pg. 472. :quou ar, as, at is op2 = 0b1100 & op1 = 0b0010 & ar & as & at & op0 = 0 { ar = as / at; } # RDTLB0 - Read Data TLB Virtual Entry, pg. 473. :rdtlb0 at, as is op2 = 0b0101 & op1 = 0 & ar = 0b1011 & as & at & op0 = 0 { at = rdtlb0(as); } # RDTLB1 - Read Data TLB Entry Translation, pg. 474. :rdtlb1 at, as is op2 = 0b0101 & op1 = 0 & ar = 0b1111 & as & at & op0 = 0 { at = rdtlb1(as); } # REMS - Remainder Signed, pg. 475. :rems ar, as, at, is op2 = 0b1111 & op1 = 0b0010 & ar & as & at & op0 = 0 { ar = as s% at; } # REMU - Remainder Unsigned, pg. 476. :remu ar, as, at, is op2 = 0b1110 & op1 = 0b0010 & ar & as & at & op0 = 0 { ar = as % at; } # RER - Read External Register, pg. 477. :rer as, at is op2 = 0b0100 & op1 = 0 & ar = 0b0110 & as & at & op0 = 0 { as = rer(at); } # RET.N - Narrow Non-Windowed Return, pg. 479. :ret.n is n_ar = 0b1111 & n_as = 0 & n_at = 0 & n_op0 = 0b1101 { return [a0]; } # RETW - Windowed Return, pg. 480. :retw is op2 = 0 & op1 = 0 & ar = 0 & as = 0 & u2_6_7 = 0b10 & u2_4_5 = 0b01 & op0 = 0 { local addr:4 = (a0 & 0x3fffffff) | (inst_start & 0xc0000000); restoreRegWindow(); return [addr]; } # RETW.N - Narrow Windowed Return, pg. 482. :retw.n is n_ar = 0b1111 & n_as = 0 & n_at = 0b0001 & n_op0 = 0b1101 { local addr:4 = (a0 & 0x3fffffff) | (inst_start & 0xc0000000); restoreRegWindow(); return [addr]; } # RFDD - Return from Debug and Dispatch, pg. 484. :rfdd is op2 = 0b1111 & op1 = 0b0001 & ar = 0b1110 & (as = 0b0000 | as = 0b0001) & at = 0b0001 & op0 = 0 { local tmp:4 = rfdd(); return [tmp]; } # RFDE _ Return From Double Exception, pg. 485. :rfde is op2 = 0 & op1 = 0 & ar = 0b0011 & as =0b0010 & at = 0 & op0 = 0 { local tmp:4 = rfde(); return [tmp]; } # RFDO - Return from Debug Operation, pg. 486. :rfdo is op2 = 0b1111 & op1 = 0b0001 & ar = 0b1110 & as = 0 & at = 0 & op0 = 0 { local tmp:4 = rfdo(); return [tmp]; } # RFE - Return From Exception, pg. 487. :rfe is op2 = 0 & op1 = 0 & ar = 0b0011 & as = 0 & at = 0 & op0 = 0 { local tmp:4 = rfe(); return [tmp]; } rfi_epc: ptr is u4_8_11 [ ptr = $(EPC_BASE) + (4 * u4_8_11); ] { export *[register]:4 ptr; } rfi_eps: ptr is u4_8_11 [ ptr = $(EPS_BASE) + (4 * u4_8_11); ] { export *[register]:4 ptr; } # RFI - Return from High-Priority Interrupt, pg. 488. :rfi u4_8_11 is op2 = 0 & op1 = 0 & ar = 0b0011 & u4_8_11 & at = 0b0001 & op0 = 0 & rfi_epc & rfi_eps { PS = rfi_eps; return [rfi_epc]; } # RFME - Return from Memory Error, pg. 489. :rfme is op2 = 0 & op1 = 0 & ar = 0b0011 & as = 0 & at = 0b0010 & op0 = 0 { PS = MEPS; MESR[0,1] = 0; return [MEPC]; } # RFR - Move FR to AR, pg. 490. :rfr ar, fs is op2 = 0b1111 & op1 = 0b1010 & ar & fs & at = 0b0100 & op0 = 0 { ar = fs; } # RFUE - Return from User-Mode Exception, pg. 491. :rfue is op2 = 0 & op1 = 0 & ar = 0b0011 & as = 0b0001 & at = 0 & op0 = 0 { local tmp:4 = rfue(); return [tmp]; } # RFWO - Return from Window Overflow, pg. 492. :rfwo is op2 = 0 & op1 = 0 & ar = 0b0011 & as = 0b0100 & at = 0 & op0 = 0 { $(PS_EXCM) = 0; rfwo(); return [EPC1]; } # RFWU - Return from Window Underflow, pg. 493. :rfwu is op2 = 0 & op1 = 0 & ar = 0b0011 & as = 0b0101 & at = 0 & op0 = 0 { $(PS_EXCM) = 0; rfwu(); return [EPC1]; } # RITLB0 - Read Instruction TLB Virtual Entry, pg. 494. :ritlb0 at, as is op2 = 0b0101 & op1 = 0 & ar = 0b0011 & as & at & op0 = 0 { at = ritlb0(as); } # RITLB1 - Read Instruction TLB Entry Translation, pg. 495. :ritlb1 at, as is op2 = 0b0101 & op1 = 0 & ar = 0b0111 & as & at & op0 = 0 { at = ritlb1(as); } # ROTW - Rotate Window, pg. 496. :rotw s4_4_7 is op2 = 0b0100 & op1 = 0 & ar = 0b1000 & as = 0 & s4_4_7 & op0 = 0 { WindowBase = WindowBase + s4_4_7; } # ROUND.S - Round Single to Fixed, pg. 497. :round.s ar, fs, u4_4_7 is op2 = 0b1000 & op1 = 0b1010 & ar & fs & u4_4_7 & op0 = 0 { local scale:4 = 1 << u4_4_7; local result = fs f* int2float(scale); isNan:1 = nan(result); if (isNan) goto ; ar = round(fs f* scale); goto ; ar = 0x80000000; if (fs f< 0) goto ; ar = 0x7fffffff; } # RSIL - Read and Set Interrupt Level, pg. 498. :rsil at, u4_8_11 is op2 = 0 & op1 = 0 & ar = 0b0110 & u4_8_11 & at & op0 = 0 { at = rsil(u4_8_11:1); } # RSR - Read Special Register, pg. 500. :rsr at, sr is op0 = 0 & op1 = 0b0011 & sr & at & op0 = 0 { at = rsr(sr:1); } # RSYNC - Register Read Synchronize, pg. 502. :rsync is op2 = 0 & op1 = 0 & ar = 0b0010 & as = 0 & at = 0b0001 & op0 = 0 { rsync(); } # RUR - Read User Register, pg. 503. :rur ar, u8_4_11 is op2 = 0b1110 & op1 = 0b0011 & ar & u8_4_11 & op0 = 0 { ar = rur(u8_4_11:1); } # S8I - Store 8-bit, pg. 504. :s8i at, as, u8_16_23 is u8_16_23 & ar = 0b0100 & as & at & op0 = 0b0010 { local addr:4 = as + zext(u8_16_23:1); *:1 addr = at:1; } # S16I - Store 16-bit, pg. 505. :s16i at, as, u9_16_23_sb1 is u9_16_23_sb1 & ar = 0b0101 & as & at & op0 = 0b0010 { local addr:4 = as + u9_16_23_sb1; *:2 addr = at:2; } # S32C1I - Store 32-bit Compare Conditional, pg. 506 :s32c1i at, as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b1110 & as & at & op0 = 0b0010 { local addr:4 = as + u10_16_23_sb2; old:4 = *:4 addr; if (old != SCOMPARE1) goto ; *:4 addr = at; at = old; } # S32E - Store 32-bit for Window Exceptions, pg. 508. :s32e at, as, s5_12_15_oex is op2 = 0b0100 & op1 = 0b1001 & s5_12_15_oex & as & at & op0 = 0 { ptr:4 = as + sext(s5_12_15_oex); *:4 ptr = at; } # S32I - Store 32-bit, pg. 510. :s32i at, as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0110 & as & at & op0 = 0b0010 { local addr:4 = as + u10_16_23_sb2; *:4 addr = at; } # S32I.N - Narrow Store 32-bit, pg. 512. :s32i.n n_at, n_as, n_u6_12_15_sb2 is n_u6_12_15_sb2 & n_as & n_at & n_op0 = 0b1001 { local addr:4 = n_as + n_u6_12_15_sb2; *:4 addr = n_at; } # S32RI - Store 32-bit Release, pg. 514. :s32ri at, as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b1111 & as & at & op0 = 0b0010 { local addr:4 = as + u10_16_23_sb2; release(addr); *:4 addr = at; } # SDCT - Store Data Cache Tag, pg. 516. :sdct at, as is op2 = 0b1111 & op1 = 0b0001 & ar = 0b1001 & as & at & op0 = 0 { sdct(as, at); } # SEXT - Sign Extend, pg. 518. :sext ar, as, u5_4_7_plus7 is op2 = 0b0010 & op1 = 0b0011 & ar & as & u5_4_7_plus7 & op0 = 0 { local shift:4 = 31 - u5_4_7_plus7; local tmp:4 = as << shift; ar = tmp s>> shift; } :sext ar, as, 7 is op2 = 0b0010 & op1 = 0b0011 & ar & as & u4_4_7 = 0 & op0 = 0 { ar = sext(as:1); } :sext ar, as, 15 is op2 = 0b0010 & op1 = 0b0011 & ar & as & u4_4_7 = 8 & op0 = 0 { ar = sext(as:2); } # SICT - Store Instruction Cache Tag, pg. 519. :sict at, as is op2 = 0b1111 & op1 = 0b0001 & ar = 0b0001 & as & at & op0 = 0 { sict(as, at); } # SICW - Store Instruction Cache word, pg. 521. :sicw at, as is op2 = 0b1111 & op1 = 0b0001 & ar = 0b0011 & as & at & op0 = 0 { sicw(as, at); } # SIMCALL - Simulator Call, pg. 523. :simcall is op2 = 0 & op1 = 0 & ar = 0b0101 & as = 0b0001 & at = 0 & op0 = 0 { simcall(); } # SLL - Shift Left Logical, pg. 524. :sll ar, as is op2 = 0b1010 & op1 = 0b0001 & ar & as & at = 0 & op0 = 0 { local sa:4 = 32 - SAR; ar = as << sa; } # SLLI - Shift Left Logical Immediate, pg. 525. :slli ar, as, u5_4_7_20 is u3_21_23 = 0 & u5_4_7_20 & op1 = 0b0001 & ar & as & op0 = 0 { ar = as << u5_4_7_20; } # SRA - Shift Right Arithmetic, pg. 526. :sra ar, at is op2 = 0b1011 & op1 = 0b0001 & ar & as = 0 & at & op0 = 0 { ar = at s>> SAR; } # SRAI - Shift Right Arithmetic Immediate, pg. 527. :srai ar, at, u5_8_11_20 is u3_21_23 = 0b001 & u5_8_11_20 & op1 = 0b0001 & ar & at & op0 = 0 { ar = at s>> u5_8_11_20; } # SRC - Shift Right Combined, pg. 528. :src ar, as, at is op2 = 0b1000 & op1 = 0b0001 & ar & as & at & op0 = 0 { local s64:8 = zext(as); local t64:8 = zext(at); local combined:8 = (s64 << 32) | t64; local shifted:8 = combined >> SAR; ar = shifted:4; } # SRL - Shift Right Logical, pg. 529. :srl ar, at is op2 = 0b1001 & op1 = 0b0001 & ar & as = 0 & at & op0 = 0 { ar = at >> SAR; } # SRLI - Shift Right Logical Immediate, pg. 530. :srli ar, at, u4_8_11 is op2 = 0b0100 & op1 = 0b0001 & ar & u4_8_11 & at & op0 = 0 { ar = at >> u4_8_11; } # SSA8B - Set Shift Amount for BE Byte Shift, pg. 531. :ssa8b as is op2 = 0b0100 & op1 = 0 & ar = 0b0011 & as & at = 0 & op0 = 0 { local lsa:4 = (as&3)*8; SAR = 32 - lsa; } # SSA8L - Set Shift Amount for LE Byte Shift, pg. 532. :ssa8l as is op2 = 0b0100 & op1 = 0 & ar = 0b0010 & as & at = 0 & op0 = 0 { local rsa:4 = (as & 3)*8; SAR = rsa; } # SSAI - Set Shift Amount Immediate, pg. 533. :ssai u5_8_11_4 is op2 = 0b0100 & op1 = 0 & ar = 0b0100 & u5_8_11_4 & u3_5_7 = 0 & op0 = 0 { SAR = u5_8_11_4; } # SSI - Store Single Immediate, pg. 534. :ssi ft, as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b0100 & as & ft & op0 = 0b0011 { local addr:4 = as + u10_16_23_sb2; *:4 addr = ft; } # SSIU - Store Single Immediate with Update, pg. 536. :ssiu ft, as, u10_16_23_sb2 is u10_16_23_sb2 & ar = 0b1100 & as & ft & op0 = 0b0011 { local addr:4 = as + u10_16_23_sb2; *:4 addr = ft; as = addr; } # SSL - Set Shift Amount for Left Shift, pg. 538. :ssl as is op2 = 0b0100 & op1 = 0 & ar = 0b0001 & as & at = 0 & op0 = 0 { SAR = 32 - (as & 0x1f); } # SSR - Set Shift Amount for Right Shift, pg. 539. :ssr as is op2 = 0b0100 & op1 = 0 & ar = 0 & as & at = 0 & op0 = 0 { SAR = (as & 0x1f); } # SSX - Store Single Indexed, pg. 540. :ssx fr, as, at is op2 = 0b0100 & op1 = 0b1000 & fr & as & at & op0 = 0 { local addr:4 = as+at; *:4 addr = fr; } # SSXU - Store Single Indexed with Update, pg. 541. :ssxu fr, as, at is op2 = 0b0101 & op1 = 0b1000 & fr & as & at & op0 = 0 { local addr:4 = as+at; *:4 addr = fr; as = addr; } # SUB - Subtract, pg. 542. :sub ar, as, at is op2 = 0b1100 & op1 = 0 & ar & as & at & op0 = 0 { ar = as - at; } # SUB.S - Subtract Single, pg. 543. :sub.s fr, fs, ft is op2 = 0b0001 & op1 = 0b1010 & fr & fs & ft & op0 = 0 { fr = fs f- ft; } # SUBX2 - Subtract with Shift by 1, pg. 544. :subx2 ar, as, at is op2 = 0b1101 & op1 = 0 & ar & as & at & op0 = 0 { ar = (as << 1) - at; } # SUBX4 - Subtract with Shift by 2, pg. 545. :subx4 ar, as, at is op2 = 0b1110 & op1 = 0 & ar & as & at & op0 = 0 { ar = (as << 2) - at; } # SUBX8 - Subtract with Shift by 3, pg. 546. :subx8 ar, as, at is op2 = 0b1111 & op1 = 0 & ar & as & at & op0 = 0 { ar = (as << 3) - at; } # SYSCALL - System Call, pg. 547. :syscall is op2 = 0 & op1 = 0 & ar = 0b0101 & as = 0 & at = 0 & op0 = 0 { syscall(); } # TRUNC.S - Truncate Single to Fixed, pg. 548 :trunc.s ar, fs, u4_4_7 is op2 = 0b1001 & op1 = 0b1010 & ar & fs & u4_4_7 & op0 = 0 { local scale:4 = 1 << u4_4_7; local result = fs f* int2float(scale); isNan:1 = nan(result); if (isNan) goto ; ar = trunc(fs f* scale); goto ; ar = 0x80000000; if (fs f< 0) goto ; ar = 0x7fffffff; } # UEQ.S - Compare Single Unordered or Equal, pg. 549. :ueq.s br, fs, ft is op2 = 0b0011 & op1 = 0b1011 & br & fs & ft & op0 = 0 { br = nan(fs) || nan(ft) || fs f== ft; } # UFLOAT.S - Convert Unsigned Fixed to Single, pg. 550. :ufloat.s fr, as, u4_4_7 is op2 = 0b1101 & op1 = 0b1010 & fr & as & u4_4_7 & op0 = 0 { local tmp:8 = zext(as); local scale:4 = 1 << u4_4_7; fr = int2float(tmp) f/ int2float(scale); } # ULE.S - Compare Single Unordered or Less Than or Equal, pg. 551. :ule.s br, fs, ft is op2 = 0b0111 & op1 = 0b1011 & br & fs & ft & op0 = 0 { br = nan(fs) || nan(ft) || fs f<= ft; } # ULT.S - Compare Single Unordered or Less Than, pg. 552. :ult.s br, fs, ft is op2 = 0b0101 & op1 = 0b1011 & br & fs & ft & op0 = 0 { br = nan(fs) || nan(ft) || fs f< ft; } # UN.S - Compare Single Unordered, pg. 554. :un.s br, fs, ft is op2 = 0b0001 & op1 = 0b1011 & br & fs & ft & op0 = 0 { br = nan(fs) || nan(ft); } # UTRUNC.S - Truncate Single to Fixed Unsigned, pg. 555. :utrunc.s ar, fs, u4_4_7 is op2 = 0b1110 & op1 = 0b1010 & ar & fs & u4_4_7 & op0 = 0 { local scale:4 = int2float(1:2 << u4_4_7:2); local tmp:8 = trunc(fs f* scale); local posof = nan(fs) || (tmp >> 16) != 0; local negof = tmp s< 0; local noof = !posof && !negof; ar = zext(posof)*0xffffffff + zext(negof)*0x80000000 + zext(noof)*tmp:4; } # WAITI - Wait Interrupt, pg. 556. :waiti u4_8_11 is op2 = 0 & op1 = 0 & ar = 0b0111 & u4_8_11 & at = 0 & op0 = 0 { waiti(u4_8_11:4); } # WDTLB - Write Data TLB Entry, pg. 557. :wdtlb at, as is op2 = 0b0101 & op1 = 0 & ar = 0b1110 & as & at & op0 = 0 { wdtlb(as, at); } # WER - Write External Register, pg. 558. :wer as, at is op2 = 0b0100 & op1 = 0 & ar = 0b0111 & as & at & op0 = 0 { wer(as, at); } # WFR - Move AR to FR, pg. 559. :wfr fr, as is op2 = 0b1111 & op1 = 0b1010 & fr & as & at = 0b0101 & op0 = 0 { fr = as; } # WITLB - Write Instruction TLB Entry, pg. 560. :witlb at, as is op2 = 0b0101 & op1 = 0 & ar = 0b0110 & as & at & op0 = 0 { witlb(as, at); } # WSR - Write Special Register, pg. 561. :wsr at, sr is op2 = 0b0001 & op1 = 0b0011 & sr & at & op0 = 0 { wsr(sr:1, at); } # WUR - Write User Register, pg. 563. :wur at, sr is op2 = 0b1111 & op1 = 0b0011 & sr & at & op0 = 0 { wur(sr:1, at); } # XOR - Bitwise Exclusive Or, pg. 564. :xor ar, as, at is op2 = 0b0011 & op1 = 0 & ar & as & at & op0 = 0 { ar = as ^ at; } # XORB - Boolean Exclusive Or, pg. 565. :xorb br, bs, bt is op2 = 0b0100 & op1 = 0b0010 & br & bs & bt & op0 = 0 { br = bs ^^ bt; } # XSR - Exchange Special Register, pg. 566. :xsr at, sr is op2 = 0b0110 & op1 = 0b0001 & sr & at & op0 = 0 { at = xsr(sr:1, at); } ## MAC16 option ## # LDDEC - Load with Autodecrement, pg. 386. :lddec "MAC16_REGS[" mw_12_13 "]", as is op2 = 0b1001 & op1 = 0 & u2_14_15 = 0 & mw_12_13 & as & at = 0 & op0 = 0b0100 { local ptr:4 = as - 4; mw_12_13 = *:4 ptr; as = ptr; } # LDINC - Load with Autoincrement, pg. 387. :ldinc "MAC16_REGS[" mw_12_13 "]", as is op2 = 0b1000 & op1 = 0 & u2_14_15 = 0 & mw_12_13 & as & at = 0 & op0 = 0b0100 { local ptr:4 = as + 4; mw_12_13 = *:4 ptr; as = ptr; } # MUL.AA.* - Signed Multiply, pg. 431. :mul.aa.ll as, at is op2 = 0x7 & op1 = 0x4 & ar = 0 & as & at & op0 = 0x4 { tm1:2 = as:2; tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } :mul.aa.hl as, at is op2 = 0x7 & op1 = 0x5 & ar = 0 & as & at & op0 = 0x4 { tm1:2 = as(2); tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } :mul.aa.lh as, at is op2 = 0x7 & op1 = 0x6 & ar = 0 & as & at & op0 = 0x4 { tm1:2 = as:2; tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } :mul.aa.hh as, at is op2 = 0x7 & op1 = 0x7 & ar = 0 & as & at & op0 = 0x4 { tm1:2 = as(2); tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } # MUL.AD.* - Signed Multiply, pg. 432. :mul.ad.ll as, m2m3_6_6 is op2 = 0x3 & op1 = 0x4 & ar = 0 & as & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = as:2; tm2:2 = m2m3_6_6:2; M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } :mul.ad.hl as, m2m3_6_6 is op2 = 0x3 & op1 = 0x5 & ar = 0 & as & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = as(2); tm2:2 = m2m3_6_6:2; M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } :mul.ad.lh as, m2m3_6_6 is op2 = 0x3 & op1 = 0x6 & ar = 0 & as & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = as:2; tm2:2 = m2m3_6_6(2); M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } :mul.ad.hh as, m2m3_6_6 is op2 = 0x3 & op1 = 0x7 & ar = 0 & as & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = as(2); tm2:2 = m2m3_6_6(2); M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } # MUL.AD.* - Signed Multiply, pg. 433. :mul.da.ll m0m1_14_14, at is op2 = 0x6 & at & op1 = 0x4 & as = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & op0 = 0x4 { tm1:2 = m0m1_14_14:2; tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } :mul.da.hl m0m1_14_14, at is op2 = 0x6 & op1 = 0x5 & as = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & at & op0 = 0x4 { tm1:2 = m0m1_14_14:2; tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } :mul.da.lh m0m1_14_14, at is op2 = 0x6 & op1 = 0x6 & as = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & at & op0 = 0x4 { tm1:2 = m0m1_14_14(2); tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } :mul.da.hh m0m1_14_14, at is op2 = 0x6 & op1 = 0x7 & as = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & at & op0 = 0x4 { tm1:2 = m0m1_14_14(2); tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } # MUL.AD.* - Signed Multiply, pg. 434. :mul.dd.ll m0m1_14_14, m2m3_6_6 is op2 = 0x2 & op1 = 0x4 & ar = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = m0m1_14_14:2; tm2:2 = m2m3_6_6:2; M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } :mul.dd.hl m0m1_14_14, m2m3_6_6 is op2 = 0x2 & op1 = 0x5 & ar = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = m0m1_14_14(2); tm2:2 = m2m3_6_6:2; M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } :mul.dd.lh m0m1_14_14, m2m3_6_6 is op2 = 0x2 & op1 = 0x6 & ar = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = m0m1_14_14:2; tm2:2 = m2m3_6_6(2); M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } :mul.dd.hh m0m1_14_14, m2m3_6_6 is op2 = 0x2 & op1 = 0x7 & ar = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = m0m1_14_14(2); tm2:2 = m2m3_6_6(2); M1 = zext(tm1); M2 = zext(tm2); ACC = sext(M1:2) * sext(M2:2); } # MULA.AA.* - Signed Multiply, pg. 431. :mula.aa.ll as, at is op2 = 0x7 & op1 = 0x8 & ar = 0 & as & at & op0 = 0x4 { tm1:2 = as:2; tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.aa.hl as, at is op2 = 0x7 & op1 = 0x9 & ar = 0 & as & at & op0 = 0x4 { tm1:2 = as(2); tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.aa.lh as, at is op2 = 0x7 & op1 = 0xa & ar = 0 & as & at & op0 = 0x4 { tm1:2 = as:2; tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.aa.hh as, at is op2 = 0x7 & op1 = 0xb & ar = 0 & as & at & op0 = 0x4 { tm1:2 = as(2); tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.ad.ll as, m2m3_6_6 is op2 = 0x3 & op1 = 0x8 & ar = 0 & as & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = as:2; tm2:2 = m2m3_6_6:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.ad.hl as, m2m3_6_6 is op2 = 0x3 & op1 = 0x9 & ar = 0 & as & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = as(2); tm2:2 = m2m3_6_6:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.ad.lh as, m2m3_6_6 is op2 = 0x3 & op1 = 0xa & ar = 0 & as & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = as:2; tm2:2 = m2m3_6_6(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.ad.hh as, m2m3_6_6 is op2 = 0x3 & op1 = 0xb & ar = 0 & as & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = as(2); tm2:2 = m2m3_6_6(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.da.ll m0m1_14_14, at is op2 = 0x6 & at & op1 = 0x8 & as = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & op0 = 0x4 { tm1:2 = m0m1_14_14:2; tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.da.hl m0m1_14_14, at is op2 = 0x6 & op1 = 0x9 & as = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & at & op0 = 0x4 { tm1:2 = m0m1_14_14:2; tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.da.lh m0m1_14_14, at is op2 = 0x6 & op1 = 0xa & as = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & at & op0 = 0x4 { tm1:2 = m0m1_14_14(2); tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.da.hh m0m1_14_14, at is op2 = 0x6 & op1 = 0xb & as = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & at & op0 = 0x4 { tm1:2 = m0m1_14_14(2); tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.dd.ll m0m1_14_14, m2m3_6_6 is op2 = 0x2 & op1 = 0x8 & ar = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = m0m1_14_14:2; tm2:2 = m2m3_6_6:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.dd.hl m0m1_14_14, m2m3_6_6 is op2 = 0x2 & op1 = 0x9 & ar = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = m0m1_14_14(2); tm2:2 = m2m3_6_6:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.dd.lh m0m1_14_14, m2m3_6_6 is op2 = 0x2 & op1 = 0xa & ar = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = m0m1_14_14:2; tm2:2 = m2m3_6_6(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } :mula.dd.hh m0m1_14_14, m2m3_6_6 is op2 = 0x2 & op1 = 0xb & ar = 0 & u1_15_15 = 0 & u2_12_13 = 0 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { tm1:2 = m0m1_14_14(2); tm2:2 = m2m3_6_6(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); } # Signed Mult/Accum, Ld/Autodec MULA.DA.*.LDDEC, pg. 441. :mula.da.ll.lddec mw_12_13, as, m0m1_14_14, at is op2 = 0x5 & at & op1 = 0x8 & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & op0 = 0x4 { local vaddr:4 = as - 4; tm1:2 = m0m1_14_14:2; tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } :mula.da.hl.lddec mw_12_13, as, m0m1_14_14, at is op2 = 0x5 & op1 = 0x9 & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & at & op0 = 0x4 { local vaddr:4 = as - 4; tm1:2 = m0m1_14_14:2; tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } :mula.da.lh.lddec mw_12_13, as, m0m1_14_14, at is op2 = 0x5 & op1 = 0xa & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & at & op0 = 0x4 { local vaddr:4 = as - 4; tm1:2 = m0m1_14_14(2); tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } :mula.da.hh.lddec mw_12_13, as, m0m1_14_14, at is op2 = 0x5 & op1 = 0xb & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & at & op0 = 0x4 { local vaddr:4 = as - 4; tm1:2 = m0m1_14_14(2); tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } # Signed Mult/Accum, Ld/Autoinc MULA.DA.*.LDINC, pg. 443. :mula.da.ll.ldinc mw_12_13, as, m0m1_14_14, at is op2 = 0x4 & at & op1 = 0x8 & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & op0 = 0x4 { local vaddr:4 = as + 4; tm1:2 = m0m1_14_14:2; tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } :mula.da.hl.ldinc mw_12_13, as, m0m1_14_14, at is op2 = 0x4 & op1 = 0x9 & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & at & op0 = 0x4 { local vaddr:4 = as + 4; tm1:2 = m0m1_14_14:2; tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } :mula.da.lh.ldinc mw_12_13, as, m0m1_14_14, at is op2 = 0x4 & op1 = 0xa & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & at & op0 = 0x4 { local vaddr:4 = as + 4; tm1:2 = m0m1_14_14(2); tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } :mula.da.hh.ldinc mw_12_13, as, m0m1_14_14, at is op2 = 0x4 & op1 = 0xb & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & at & op0 = 0x4 { local vaddr:4 = as + 4; tm1:2 = m0m1_14_14(2); tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } # Signed Mult/Accum, Ld/Autodec MULA.DD.*.LDDEC, pg. 446. :mula.dd.ll.lddec mw_12_13, as, m0m1_14_14, m2m3_6_6 is op2 = 0x1 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op1 = 0x8 & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & op0 = 0x4 { local vaddr:4 = as - 4; tm1:2 = m0m1_14_14:2; tm2:2 = m2m3_6_6:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } :mula.dd.hl.lddec mw_12_13, as, m0m1_14_14, m2m3_6_6 is op2 = 0x1 & op1 = 0x9 & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { local vaddr:4 = as - 4; tm1:2 = m0m1_14_14:2; tm2:2 = m2m3_6_6(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } :mula.dd.lh.lddec mw_12_13, as, m0m1_14_14, m2m3_6_6 is op2 = 0x1 & op1 = 0xa & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { local vaddr:4 = as - 4; tm1:2 = m0m1_14_14(2); tm2:2 = m2m3_6_6:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } :mula.dd.hh.lddec mw_12_13, as, m0m1_14_14, m2m3_6_6 is op2 = 0x1 & op1 = 0xb & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { local vaddr:4 = as - 4; tm1:2 = m0m1_14_14(2); tm2:2 = m2m3_6_6(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } # Signed Mult/Accum, Ld/Autoinc MULA.DD.*.LDINC, pg. 448. :mula.da.ll.ldinc mw_12_13, as, m0m1_14_14, m2m3_6_6 is op2 = 0x0 & op1 = 0x8 & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { local vaddr:4 = as + 4; tm1:2 = m0m1_14_14:2; tm2:2 = m2m3_6_6:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } :mula.da.hl.ldinc mw_12_13, as, m0m1_14_14, m2m3_6_6 is op2 = 0x0 & op1 = 0x9 & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { local vaddr:4 = as + 4; tm1:2 = m0m1_14_14:2; tm2:2 = m2m3_6_6(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } :mula.da.lh.ldinc mw_12_13, as, m0m1_14_14, m2m3_6_6 is op2 = 0x0 & op1 = 0xa & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { local vaddr:4 = as + 4; tm1:2 = m0m1_14_14(2); tm2:2 = m2m3_6_6:2; M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } :mula.da.hh.ldinc mw_12_13, as, m0m1_14_14, m2m3_6_6 is op2 = 0x0 & op1 = 0xb & as & u1_15_15 = 0 & mw_12_13 & m0m1_14_14 & u1_7_7 = 0 & t2_4_5 = 0 & m2m3_6_6 & op0 = 0x4 { local vaddr:4 = as + 4; tm1:2 = m0m1_14_14(2); tm2:2 = m2m3_6_6(2); M1 = zext(tm1); M2 = zext(tm2); ACC = ACC + (sext(M1:2) * sext(M2:2)); as = vaddr; mw_12_13 = *:4 vaddr; } # UMUL.AA.* - Unsigned Multiply, pg. 553. :umul.aa.ll as, at is op2 = 0x7 & op1 = 0x0 & ar = 0 & as & at & op0 = 0x4 { tm1:2 = as:2; tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = zext(M1:2) * zext(M2:2); } :umul.aa.hl as, at is op2 = 0x7 & op1 = 0x1 & ar = 0 & as & at & op0 = 0x4 { tm1:2 = as(2); tm2:2 = at:2; M1 = zext(tm1); M2 = zext(tm2); ACC = zext(M1:2) * zext(M2:2); } :umul.aa.lh as, at is op2 = 0x7 & op1 = 0x2 & ar = 0 & as & at & op0 = 0x4 { tm1:2 = as:2; tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = zext(M1:2) * zext(M2:2); } :umul.aa.hh as, at is op2 = 0x7 & op1 = 0x3 & ar = 0 & as & at & op0 = 0x4 { tm1:2 = as(2); tm2:2 = at(2); M1 = zext(tm1); M2 = zext(tm2); ACC = zext(M1:2) * zext(M2:2); } ## Loop Option ## LoopOffset8: loc is u8_16_23 [ loc = inst_start + u8_16_23 + 4; ] { export *:4 loc; } # LOOP - Loop, pg. 392. :loop as, LoopOffset8 is LoopOffset8 & ar = 8 & as & at = 0b0111 & op0 = 6 [ loopMode=1; loopEnd = 1; globalset(LoopOffset8, loopEnd); ] { LCOUNT = as - 1; LBEG = inst_next; LEND = &LoopOffset8; } # LOOPGTZ - Loop if Greater Than Zero, pg. 394. :loopgtz as, LoopOffset8 is LoopOffset8 & ar = 0b1010 & as & at = 0b0111 & op0 = 0b0110 [ loopMode=1; loopEnd = 1; globalset(LoopOffset8, loopEnd); ] { LCOUNT = as - 1; LBEG = inst_next; LEND = &LoopOffset8; if (as s<= 0) goto LoopOffset8; } # LOOPNEZ - Loop if Not Equal Zero, pg. 396. :loopnez as, LoopOffset8 is LoopOffset8 & ar = 0b1001 & as & at = 0b0111 & op0 = 0b0110 [ loopMode=1; loopEnd = 1; globalset(LoopOffset8, loopEnd); ] { LCOUNT = as - 1; LBEG = inst_next; LEND = &LoopOffset8; if (as == 0) goto LoopOffset8; } ================================================ FILE: pypcode/processors/Xtensa/data/languages/xtensaMain.sinc ================================================ attach variables [ sr ] [ # 0x...0 0x...4 0x...8 0x...c LBEG LEND LCOUNT SAR # 0x0_ BR LITBASE _ _ # 0x1_ _ _ _ _ # 0x2_ SCOMPARE1 _ _ _ # 0x3_ ACCLO ACCHI _ _ # 0x4_ _ _ _ _ # 0x5_ _ _ _ _ # 0x6_ _ _ _ _ # 0x7_ M0 M1 M2 M3 # 0x8_ _ _ _ _ # 0x9_ _ _ _ _ # 0xa_ _ _ _ _ # 0xb_ _ _ _ _ # 0xc_ _ _ _ _ # 0xd_ _ _ _ _ # 0xe_ _ _ _ _ # 0xf_ # 0x...0 0x...4 0x...8 0x...c _ _ _ _ # 0x10_ _ _ _ _ # 0x11_ WindowBase WindowStart _ _ # 0x12_ _ _ _ _ # 0x13_ _ _ _ PTEVADDR # 0x14_ _ _ _ _ # 0x15_ _ MMID RASID ITLBCFG # 0x16_ DTLBCFG _ _ _ # 0x17_ IBREAKENABLE MEMCTL CACHEATTR ATOMCTL # 0x18_ _ _ _ _ # 0x19_ DDR _ MEPC MEPS # 0x1a_ MESAVE MESR MECR MEVADDR # 0x1b_ _ _ _ _ # 0x1c_ _ _ _ _ # 0x1d_ _ _ _ _ # 0x1e_ _ _ _ _ # 0x1f_ # 0x...0 0x...4 0x...8 0x...c IBREAKA0 IBREAKA1 _ _ # 0x20_ _ _ _ _ # 0x21_ _ _ _ _ # 0x22_ _ _ _ _ # 0x23_ DBREAKA0 DBREAKA1 _ _ # 0x24_ _ _ _ _ # 0x25_ _ _ _ _ # 0x26_ _ _ _ _ # 0x27_ DBREAKC0 DBREAKC1 _ _ # 0x28_ _ _ _ _ # 0x29_ _ _ _ _ # 0x2a_ _ _ _ _ # 0x2b_ _ EPC1 EPC2 EPC3 # 0x2c_ EPC4 EPC5 EPC6 EPC7 # 0x2d_ _ _ _ _ # 0x2e_ _ _ _ _ # 0x2f_ # 0x...0 0x...4 0x...8 0x...c DEPC _ EPS2 EPS3 # 0x30_ EPS4 EPS5 EPS6 EPS7 # 0x31_ _ _ _ _ # 0x32_ _ _ _ _ # 0x33_ _ EXCSAVE1 EXCSAVE2 EXCSAVE3 # 0x34_ EXCSAVE4 EXCSAVE5 EXCSAVE6 EXCSAVE7 # 0x35_ _ _ _ _ # 0x36_ _ _ _ _ # 0x37_ CPENABLE INTERRUPT INTSET INTCLEAR # 0x38_ INTENABLE _ PS VECBASE # 0x39_ EXCCAUSE DEBUGCAUSE CCOUNT PRID # 0x3a_ ICOUNT ICOUNTLEVEL EXCVADDR _ # 0x3b_ CCOMPARE0 CCOMPARE1 CCOMPARE2 _ # 0x3c_ MISC0 MISC1 MISC2 MISC3 # 0x3d_ _ _ _ _ # 0x3e_ _ _ _ _ # 0x3f_ # 0x...0 0x...4 0x...8 0x...c ]; attach variables [ ar as at n_ar n_as n_at ] [ a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15 ]; attach variables [ fr fs ft ] [ f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15 ]; attach variables [ br bs bt ] [ b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15 ]; # Various 32-bit pointers relative to PC. Any operands that are split across non-consecutive # bits are named foo_LL.LM_ML.MM, where LL is the least significant bits of the least # singificant operand half, LM the most significant bits of the least significant operand half, etc. attach variables [ mw_12_13 ] [ M0 M1 M2 M3 ]; attach variables [ m2m3_6_6 ] [ M2 M3 ]; attach variables [ m0m1_14_14 ] [ M0 M1 ]; #implemented pcodeops define pcodeop breakpoint; define pcodeop dhi; define pcodeop dhu; define pcodeop dhwb; define pcodeop dhwbi; define pcodeop dii; define pcodeop diu; define pcodeop diwb; define pcodeop diwbi; define pcodeop dpfl; define pcodeop dpfr; define pcodeop dpfro; define pcodeop dpfw; define pcodeop dpfwo; define pcodeop dsync; define pcodeop esync; define pcodeop excw; define pcodeop extw; define pcodeop idtlb; define pcodeop ihi; define pcodeop ihu; define pcodeop iii; define pcodeop iitlb; define pcodeop iiu; define pcodeop ill; define pcodeop ipf; define pcodeop ipfl; define pcodeop isync; define pcodeop acquire; define pcodeop ldct; define pcodeop lict; define pcodeop licw; define pcodeop memw; define pcodeop nsa; define pcodeop nsau; define pcodeop pdtlb; define pcodeop pitlb; define pcodeop rdtlb0; define pcodeop rdtlb1; define pcodeop rer; define pcodeop restore4; define pcodeop restore8; define pcodeop restore12; define pcodeop rfdd; define pcodeop rfde; define pcodeop rfdo; define pcodeop rfe; define pcodeop rfi; define pcodeop rfme; define pcodeop rfue; define pcodeop rfwo; define pcodeop rfwu; define pcodeop ritlb0; define pcodeop ritlb1; define pcodeop rsil; define pcodeop rsr; define pcodeop rsync; define pcodeop rur; define pcodeop s32c1i; define pcodeop release; define pcodeop restoreRegWindow; define pcodeop rotateRegWindow; define pcodeop sdct; define pcodeop sict; define pcodeop sicw; define pcodeop simcall; define pcodeop syscall; define pcodeop swap4; define pcodeop swap8; define pcodeop swap12; define pcodeop waiti; define pcodeop wdtlb; define pcodeop wer; define pcodeop witlb; define pcodeop wsr; define pcodeop wur; define pcodeop xsr; # Various 32-bit pointers relative to PC. Any operands that are split across non-consecutive # bits are named foo_LL_LM_ML_MM, where LL is the least significant bits of the least # singificant operand half, LM the most significant bits of the least significant operand half, etc. srel_16_23: rel is s8_16_23 [ rel = inst_start + s8_16_23 + 4; ] { export *:4 rel; } srel_12_23: rel is s12_12_23 [ rel = inst_start + s12_12_23 + 4; ] { export *:4 rel; } srel_6_23: rel is s8_6_23 [ rel = inst_start + s8_6_23 + 4; ] { export *:4 rel; } urel_12_15_4_5: rel is n_u2_4_5 & n_u4_12_15 [ rel = inst_start + ((n_u2_4_5 << 4) | n_u4_12_15) + 4; ] { export *:4 rel; } srel_6_23_sb2: rel is s8_6_23 [ rel = (inst_start & ~3) + ( s8_6_23 << 2 ) + 4; ] { export *:4 rel; } srel_8_23_oex_sb2: rel is u16_8_23 [ rel = ((inst_start + 3) & ~3) + ((u16_8_23 | 0xffff0000) << 2); ] { export *:4 rel; } # Immediates split across the instruction. u5_8_11_20: tmp is u1_20 & u4_8_11 [ tmp = (u1_20 << 4) | u4_8_11; ] { export *[const]:4 tmp; } u5_4_7_20: tmp is u1_20 & u4_4_7 [ tmp = 32 - ((u1_20 << 4) | u4_4_7); ] { export *[const]:4 tmp; } u5_8_11_16: tmp is u1_16 & u4_8_11 [ tmp = (u1_16 << 4) | u4_8_11; ] { export *[const]:4 tmp; } u5_4_7_12: tmp is u1_12 & u4_4_7 [ tmp = (u1_12 << 4) | u4_4_7; ] { export *[const]:4 tmp; } u5_8_11_4: tmp is u1_4 & u4_8_11 [ tmp = (u1_4 << 4) | u4_8_11; ] { export *[const]:4 tmp; } # Signed 12-bit (extended to 16) immediate, used by MOVI. s16_16_23_8_11: tmp is s4_8_11 & u8_16_23 [ tmp = (s4_8_11 << 8) | u8_16_23; ] { export *[const]:2 tmp; } # An “asymmetric” immediate from -32..95, used by MOVI.N. n_s8_12_15_4_6_asymm: tmp is n_s3_4_6 & n_s4_12_15 [ tmp = ((((n_s3_4_6 & 7) << 4) | (n_s4_12_15 & 15)) | ((((n_s3_4_6 >> 2) & 1) & ((n_s3_4_6 >> 1) & 1)) << 7)); ] { export *[const]:1 tmp; } # Immediates shifted or with offset. s16_16_23_sb8: tmp is s8_16_23 [ tmp = s8_16_23 << 8; ] { export *[const]:4 tmp; } u15_12_23_sb3: tmp is u12_12_23 [ tmp = u12_12_23 << 3; ] { export *[const]:4 tmp; } u10_16_23_sb2: tmp is u8_16_23 [ tmp = u8_16_23 << 2; ] { export *[const]:4 tmp; } u9_16_23_sb1: tmp is u8_16_23 [ tmp = u8_16_23 << 1; ] { export *[const]:4 tmp; } u5_20_23_plus1: tmp is u4_20_23 [ tmp = u4_20_23 + 1; ] { export *[const]:4 tmp; } u8_20_23_sb4: tmp is u4_20_23 [ tmp = u4_20_23 << 4; ] { export *[const]:4 tmp; } u5_4_7_plus7: tmp is u4_4_7 [ tmp = u4_4_7 + 7; ] { export *[const]:4 tmp; } n_u6_12_15_sb2: tmp is n_u4_12_15 [ tmp = n_u4_12_15 << 2; ] { export *[const]:4 tmp; } # One-extended. FIXME: Verify this. Only used by [LS]32E (window extension), which aren’t yet # implemented. s5_12_15_oex: tmp is u4_12_15 [ tmp = (u4_12_15 << 2) - 64; ] { export *[const]:2 tmp; } # Some 4-bit immediates with mappings that can’t be (easily) expressed in a single disassembly action. # n_u4_4_7 with 0 being -1, used by ADDI.N. n_s4_4_7_nozero: tmp is n_u4_4_7 = 0 [ tmp = -1; ] { export *[const]:4 tmp; } n_s4_4_7_nozero: tmp is n_u4_4_7 [ tmp = n_u4_4_7+0; ] { export *[const]:4 tmp; } # B4CONST(ar) (Branch Immediate) encodings, pg. 41 f. r_b4const: tmp is ar = 0 [ tmp = 0xffffffff; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 1 [ tmp = 0x1; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 2 [ tmp = 0x2; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 3 [ tmp = 0x3; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 4 [ tmp = 0x4; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 5 [ tmp = 0x5; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 6 [ tmp = 0x6; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 7 [ tmp = 0x7; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 8 [ tmp = 0x8; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 9 [ tmp = 0xa; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 10 [ tmp = 0xc; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 11 [ tmp = 0x10; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 12 [ tmp = 0x20; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 13 [ tmp = 0x40; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 14 [ tmp = 0x80; ] { export *[const]:4 tmp; } r_b4const: tmp is ar = 15 [ tmp = 0x100; ] { export *[const]:4 tmp; } # B4CONSTU(ar) (Branch Unsigned Immediate) encodings, pg. 42. r_b4constu: tmp is ar = 0 [ tmp = 0x8000; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 1 [ tmp = 0x1000; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 2 [ tmp = 0x2; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 3 [ tmp = 0x3; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 4 [ tmp = 0x4; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 5 [ tmp = 0x5; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 6 [ tmp = 0x6; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 7 [ tmp = 0x7; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 8 [ tmp = 0x8; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 9 [ tmp = 0xa; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 10 [ tmp = 0xc; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 11 [ tmp = 0x10; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 12 [ tmp = 0x20; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 13 [ tmp = 0x40; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 14 [ tmp = 0x80; ] { export *[const]:4 tmp; } r_b4constu: tmp is ar = 15 [ tmp = 0x100; ] { export *[const]:4 tmp; } Ret4: loc is epsilon [loc = ((inst_start + 3) & 0x0fffffff) | 0x40000000; ] { ret:4 = loc; export ret; } Ret8: loc is epsilon [loc = ((inst_start + 3) & 0x0fffffff) | 0x80000000; ] { ret:4 = loc; export ret; } Ret12: loc is epsilon [loc = ((inst_start + 3) & 0x0fffffff) | 0xc0000000; ] { ret:4 = loc; export ret; } :^instruction is phase=0 & loopMode=1 & instruction [ phase=1; ] { build instruction; if (LCOUNT == 0 || $(PS_EXCM)) goto inst_next; LCOUNT = LCOUNT - 1; goto [LBEG]; } :^instruction is phase=0 & loopMode=1 & loopEnd=1 & instruction [ loopMode=0; phase=1; ] { build instruction; } :^instruction is phase=0 & loopMode=0 & instruction [ phase=1; ] { build instruction; } ================================================ FILE: pypcode/processors/Xtensa/data/languages/xtensa_be.slaspec ================================================ @define ENDIAN "big" @include "xtensaArch.sinc" @include "xtensaMain.sinc" with : phase=1 { @include "xtensaInstructions.sinc" #@include "xtensa_depbits.sinc" #uncomment this to use depbits instruction, collides with floating point @include "cust.sinc" @include "flix.sinc" } ================================================ FILE: pypcode/processors/Xtensa/data/languages/xtensa_depbits.sinc ================================================ # Xtensa Deposit Bits instruction # This is broken out because it collides with the floating point instructions. It is not included by default # DEPBITS - Add (RRR), pg. 394. shiftimm: simm is u4_20_23 & u1_16 [ simm = u1_16 << 4 + u4_20_23; ] { export *[const]:4 simm; } :depbits as, at, shiftimm, u4_12_15 is u3_17_19=0x5 & u4_12_15 & as & at & op0 = 0 & shiftimm { mask:4 = (1 << u4_12_15) - 1; bits:4 = (as & mask) << shiftimm; mask = mask << shiftimm; at = (~mask & at) | bits; } ================================================ FILE: pypcode/processors/Xtensa/data/languages/xtensa_le.slaspec ================================================ @define ENDIAN "little" @include "xtensaArch.sinc" @include "xtensaMain.sinc" with : phase=1 { @include "xtensaInstructions.sinc" #@include "xtensa_depbits.sinc" #uncomment this to use depbits instruction, collides with floating point @include "cust.sinc" @include "flix.sinc" } ================================================ FILE: pypcode/processors/Xtensa/data/manuals/xtensa.idx ================================================ @isa_summary.pdf[Xtensa® Instruction Set Architecture Summary, For all Xtensa LX Processors April 2022] ABS, 324 ABS.D, 324 ABS.S, 325 ADD, 326 ADD.N, 326 ADD.D, 327 ADD.S, 328 ADDEXP.D, 329 ADDEXP.S, 329 ADDEXPM.D, 330 ADDEXPM.S, 331 ADDI, 332 ADDI.N, 333 ADDMI, 334 ADDX2, 335 ADDX4, 336 ADDX8, 336 ALL4, 337 ALL8, 338 AND, 339 ANDB, 339 ANDBC, 340 ANY4, 341 ANY8, 341 BALL, 342 BANY, 343 BBC, 344 BBCI, 345 BBCI.L, 346 BBS, 347 BBSI, 348 BBSI.L, 349 BEQ, 349 BEQI, 350 BEQZ, 351 BEQZ.N, 352 BF, 353 BGE, 354 BGEI, 355 ixBGEU, 356 BGEUI, 357 BGEZ, 358 BLT, 359 BLTI, 360 BLTU, 361 BLTUI, 361 BLTZ, 362 BNALL, 363 BNE, 364 BNEI, 365 BNEZ, 366 BNEZ.N, 367 BNONE, 368 BREAK, 369 BREAK.N, 370 BT, 371 CALL0, 372 CALL4 373 CALL8, 375 CALL12, 376 CALLX0, 377 CALLX4, 378 CALLX8, 379 CALLX12, 380 CEIL.D, 382 CEIL.S, 383 CLAMPS, 383 CLREX, 384 CONST.D, 385 CONST.S, 386 CONST16, 387 CVTD.S, 388 CVTS.D, 389 DCI, 389 DCWB, 391 DCWBI, 392 DEPBITS, 394 DHI, 395 DHI.B, 396 DHU, 397 DHWB, 398 DHWB.B, 400 DHWBI, 400 DHWBI.B, 402 xDII, 403 DIU, 404 DIV0.D, 406 DIV0.S, 406 DIVN.D, 407 DIVN.S, 408 DIWB, 409 DIWBI, 410 DIWBUI.P, 412 DPFL, 413 DPFM.B, 415 DPFM.BF, 416 DPFR, 416 DPFR.B, 418 DPFR.BF, 419 DPFRO, 419 DPFW, 421 DPFW.B, 422 DPFW.BF, 423 DPFWO, 424 DSYNC, 425 ENTRY, 426 ESYNC, 427 EXCW, 428 EXTUI, 429 EXTW, 430 FLOAT.D, 431 FLOAT.S, 431 FLOOR.D, 432 FLOOR.S, 433 FSYNC, 434 GETEX, 434 IDTLB, 435 IHI, 436 IHU, 438 III, 439 IITLB, 441 IIU, 442 ILL, 443 ILL.N, 444 IPF, 444 IPFL, 446 ISYNC, 447 J, 449 J.L, 449 JX, 450 L8UI, 450 L16SI, 451 L16UI, 453 L32AI, 454 L32E, 455 L32EX, 457 L32I, 458 L32I.N, 459 L32R, 461 LDCT, 463 LDCW, 464 LDDEC, 465 LDDR32.P, 467 LDI, 467 LDINC, 468 LDIP, 469 LDX, 471 LDXP, 472 LOOP, 473 LOOPGTZ, 475 LOOPNEZ, 476 LSI, 478 LSIP, 480 LSIU, 481 LSX, 482 LSXP, 483 LSXU, 484 MADD.D, 486 MADD.S, 486 MADDN.D, 487 MADDN.S, 488 MAX, 489 MAXU, 489 MEMW, 490 MIN, 491 MINU, 491 MKDADJ.D, 492 MKDADJ.S, 493 MKSADJ.D, 494 MKSADJ.S, 494 MOV, 495 MOV.D, 496 MOV.N, 497 MOV.S, 498 MOVEQZ, 499 MOVEQZ.D, 499 MOVEQZ.S, 500 MOVF, 501 MOVF.D, 502 MOVF.S, 503 MOVGEZ, 504 MOVGEZ.D, 504 MOVGEZ.S, 505 MOVI, 506 MOVI.N, 507 MOVLTZ, 508 MOVLTZ.D, 509 MOVLTZ.S, 510 MOVNEZ, 510 MOVNEZ.D, 511 MOVNEZ.S, 512 MOVSP, 513 MOVT, 514 MOVT.D, 515 MOVT.S, 516 MSUB.D, 517 MSUB.S, 517 MUL.AA.*, 518 MUL.AD.*, 519 MUL.DA.*, 520 MUL.DD.*, 521 MUL.D, 522 MUL.S, 522 MUL16S, 523 MUL16U, 524 MULA.AA.*, 524 MULA.AD.*, 525 MULA.DA.*, 526 MULA.DA.*.LDDEC, 527 MULA.DA.*.LDINC, 528 MULA.DD.*, 530 MULA.DD.*.LDDEC 531 MULA.DD.*.LDINC 532 MULL, 534 MULS.AA.*, 535 MULS.AD.*, 535 MULS.DA.*, 536 MULS.DD.*, 537 MULSH, 538 MULUH, 539 NEG, 540 NEG.D, 540 NEG.S, 541 NEXP01.D, 541 NEXP01.S, 542 NOP, 543 NOP.N, 544 NSA, 545 NSAU, 546 OEQ.D, 547 OEQ.S, 547 OLE.D, 548 OLE.S, 549 OLT.D, 550 OLT.S, 551 OR, 551 ORB, 552 ORBC, 553 TLB, 553 PITLB, 554 PPTLB, 555 QUOS, 556 QUOU, 557 RDTLB0, 558 RDTLB1, 559 RECIP0.D, 560 RECIP0.S, 560 REMS, 561 REMU, 562 RER, 563 RET, 564 RET.N, 564 RETW, 565 RETW.N, 567 RFDD, 568 RFDE, 569 RFDO, 570 RFE, 570 RFI, 571 RFME, 572 RFR, 573 RFRD, 573 RFUE, 574 RFWO, 575 RFWU, 576 RITLB0, 576 RITLB1, 577 ROTW, 578 ROUND.D, 579 ROUND.S, 580 RPTLB0, 580 RPTLB1, 581 RSIL, 582 RSQRT0.D, 583 RSQRT0.S, 584 RSR.*, 585 RSYNC, 586 RUR.*, 586 S8I, 587 S16I, 588 S32C1I, 589 S32E, 591 S32EX, 592 S32I, 594 S32I.N, 595 S32NB, 596 S32RI, 597 SALT, 599 SALTU, 600 SDDR32.P, 600 SDI, 601 SDIP, 602 SDX, 603 SDXP, 604 SEXT, 605 SICT, 606 SICW, 607 SIMCALL, 609 SLL, 609 SLLI, 610 SQRT0.D, 611 SQRT0.S, 612 SRA, 613 SRAI, 613 SRC, 614 SRL, 615 SRLI, 616 SSA8B, 616 SSA8L, 617 SSAI, 618 SSI, 619 SSIP, 620 SSIU, 621 SSL, 622 SSR, 623 SSX, 624 SSXP, 625 SSXU, 626 SUB, 627 SUB.D, 627 SUB.S, 628 SUBX2, 629 SUBX4, 629 SUBX8, 630 SYSCALL, 631 TRUNC.D, 632 TRUNC.S, 632 UEQ.D, 633 UEQ.S, 634 UFLOAT.D, 635 UFLOAT.S, 636 ULE.D, 636 ULE.S, 637 ULT.D, 638 ULT.S, 639 UMUL.AA.*, 639 UN.D, 640 UN.S, 641 UTRUNC.D, 642 UTRUNC.S, 642 WAITI, 643 WDTLB, 644 WER, 645 WFR, 646 WFRD, 647 WITLB, 648 WPTLB, 649 WSR.*, 650 WUR.*, 651 XOR, 652 XORB, 652 XSR.*, 653 ================================================ FILE: pypcode/processors/Xtensa/data/patterns/patternconstraints.xml ================================================ xtensa_patterns.xml ================================================ FILE: pypcode/processors/Xtensa/data/patterns/xtensa_patterns.xml ================================================ 0x0d 0xf0 0x0d 0xf0 0x00 0x0d 0xf0 0x00 0x00 0x0d 0xf0 0x00 0x00 0x00 0x80 0x00 0x00 0x80 0x00 0x00 0x00 0x80 0x00 0x00 0x00 0x00 0x80 0x00 0x00 0x00 0x00 0x00 00010010 11000001 1...0000 ....0010 10100... ........ ....0000 00010001 11000000 0x1d 0xf0 0x1d 0xf0 0x00 0x1d 0xf0 0x00 0x00 0x1d 0xf0 0x00 0x00 0x00 0x36 ...00001 0x00 00010010 11000001 0...0000 00001101 11110000 00010010 11000001 0...0000 00001101 11110000 00000000 00010010 11000001 0...0000 00001101 11110000 00000000 00000000 00010010 11000001 0...0000 00001101 11110000 00000000 00000000 00000000 ....1010 00010001 00001101 11110000 ....1010 00010001 00001101 11110000 00000000 ....1010 00010001 00001101 11110000 00000000 00000000 ....1010 00010001 00001101 11110000 00000000 00000000 00000000 00010010 11000001 1...0000 ....0010 10100... ........ ....0000 00010001 11000000 ..000110 ........ 1....... ..000110 ........ 1....... 00000000 ..000110 ........ 1....... 00000000 00000000 ..000110 ........ 1....... 00000000 00000000 00000000 ....0010 10100... ........ ....0000 00010001 11000000 00010010 11000001 1...0000 ....0010 10100... ........ ....0000 00010001 11000000 00010010 11000001 1...0000 0x12 0xc1 0xf0 0x09 0x01 ..000101 ........ ........ 0x08 0x01 0x12 0xc1 0x10 0x0d 0xf0 ================================================ FILE: pypcode/processors/Z80/data/languages/z180.pspec ================================================ ================================================ FILE: pypcode/processors/Z80/data/languages/z180.slaspec ================================================ @define Z180 "" @include "z80.slaspec" ================================================ FILE: pypcode/processors/Z80/data/languages/z182.pspec ================================================ ================================================ FILE: pypcode/processors/Z80/data/languages/z80.cspec ================================================ ================================================ FILE: pypcode/processors/Z80/data/languages/z80.ldefs ================================================ Zilog Z80 Zilog Z8401x (IPC) microcontroller Zilog Z180 Zilog Z182 ================================================ FILE: pypcode/processors/Z80/data/languages/z80.pspec ================================================ ================================================ FILE: pypcode/processors/Z80/data/languages/z80.slaspec ================================================ # sleigh specification file for Zilog Z80 # TODO: Improve Flag bit implementation so that bit operations on F register work properly define endian=little; define alignment=1; @if defined(Z180) define space ram type=ram_space size=2 default; @define PTRSIZE "2" @else define space ram type=ram_space size=2 default; @define PTRSIZE "2" @endif define space io type=ram_space size=2; define space register type=register_space size=1; define register offset=0x00 size=1 [ F A C B E D L H I R ]; define register offset=0x00 size=2 [ AF BC DE HL ]; define register offset=0x20 size=1 [ F_ A_ C_ B_ E_ D_ L_ H_ ]; # Alternate registers define register offset=0x20 size=2 [ AF_ BC_ DE_ HL_ ]; # Alternate registers define register offset=0x40 size=2 [ _ PC SP IX IY ]; define register offset=0x46 size=1 [ IXL IXH IYL IYH ]; # Undocumented registers define register offset=0x50 size=1 [ rCBAR rCBR rBBR ]; # Fake Registers used for pcode control define register offset=0x60 size=1 [ DECOMPILE_MODE ]; # Define context bits define register offset=0xf0 size=4 contextreg; define context contextreg assume8bitIOSpace =(0,0) # only applies to Z180 ; # Flag bits (?? manual is very confusing - could be typos!) @define C_flag "F[0,1]" # C: Carry @define N_flag "F[1,1]" # N: Add/Subtract - used by DAA to distinguish between ADD and SUB instructions (0=ADD,1=SUB) @define PV_flag "F[2,1]" # PV: Parity/Overflow @define H_flag "F[4,1]" # H: Half Carry @define Z_flag "F[6,1]" # Z: Zero @define S_flag "F[7,1]" # S: Sign define token opbyte (8) op0_8 = (0,7) op6_2 = (6,7) dRegPair4_2 = (4,5) pRegPair4_2 = (4,5) sRegPair4_2 = (4,5) qRegPair4_2 = (4,5) rRegPair4_2 = (4,5) reg3_3 = (3,5) bits3_3 = (3,5) bits0_4 = (0,3) reg0_3 = (0,2) bits0_3 = (0,2) ; define token data8 (8) imm8 = (0,7) sign8 = (7,7) simm8 = (0,7) signed ; define token data16 (16) timm4 = (12,15) imm16 = ( 0,15) sign16 = (15,15) simm16 = ( 0,15) signed ; attach variables [ reg0_3 reg3_3 ] [ B C D E H L _ A ]; attach variables [ sRegPair4_2 dRegPair4_2 ] [ BC DE HL SP ]; attach variables [ qRegPair4_2 ] [ BC DE HL AF ]; attach variables [ pRegPair4_2 ] [ BC DE IX SP ]; attach variables [ rRegPair4_2 ] [ BC DE IY SP ]; ################################################################ # Pseudo Instructions ################################################################ define pcodeop segment; # Define special pcodeop that calculates the RAM address # given the segment selector and offset as input define pcodeop BCDadjust; define pcodeop BCDadjustCarry; define pcodeop hasEvenParity; define pcodeop disableMaskableInterrupts; define pcodeop enableMaskableInterrupts; define pcodeop setInterruptMode; define pcodeop parity; define pcodeop sleep; define pcodeop halt; ################################################################ # Macros ################################################################ macro setResultFlags(result) { $(Z_flag) = (result == 0); $(S_flag) = (result s< 0); } macro additionFlags(operand1, operand2) { local AFmask = -1 >> 4; $(H_flag) = (((operand1 & AFmask) + (operand2 & AFmask)) & (AFmask + 1)) != 0; $(PV_flag) = scarry(operand1, operand2); $(N_flag) = 0; $(C_flag) = carry(operand1, operand2); } macro additionFlagsNoC(operand1, operand2) { local AFmask = -1 >> 4; $(H_flag) = (((operand1 & AFmask) + (operand2 & AFmask)) & (AFmask + 1)) != 0; $(PV_flag) = scarry(operand1, operand2); $(N_flag) = 0; # $(C_flag) is not affected } macro additionFlagsNoPV(operand1, operand2) { local AFmask = -1 >> 4; $(H_flag) = (((operand1 & AFmask) + (operand2 & AFmask)) & (AFmask + 1)) != 0; # $(PV_flag) is not affected $(N_flag) = 0; $(C_flag) = carry(operand1, operand2); } macro additionWithCarry(operand1, operand2, result) { local Ccopy = zext($(C_flag)); local AFmask = -1 >> 4; $(H_flag) = (((operand1 & AFmask) + (operand2 & AFmask) + Ccopy) & (AFmask + 1)) != 0; $(PV_flag) = scarry(operand1, operand2); $(N_flag) = 0; $(C_flag) = carry(operand1, operand2); local tempResult = operand1 + operand2; $(C_flag) = $(C_flag) || carry(tempResult, Ccopy); $(PV_flag) = $(PV_flag) ^^ scarry(tempResult, Ccopy); result = tempResult + Ccopy; } macro subtractionFlags(operand1, operand2) { local AFmask = -1 >> 4; $(H_flag) = (((operand1 & AFmask) - (operand2 & AFmask)) & (AFmask + 1)) != 0; $(PV_flag) = sborrow(operand1, operand2); $(N_flag) = 1; $(C_flag) = operand1 < operand2; } macro subtractionFlagsNoC(operand1, operand2) { local AFmask = -1 >> 4; $(H_flag) = (((operand1 & AFmask) - (operand2 & AFmask)) & (AFmask + 1)) != 0; $(PV_flag) = sborrow(operand1, operand2); $(N_flag) = 1; # $(C_flag) is not affected } macro subtractionWithCarry(operand1, operand2, result) { local Ccopy = zext($(C_flag)); local AFmask = -1 >> 4; $(H_flag) = (((operand1 & AFmask) - (operand2 & AFmask) - Ccopy) & (AFmask + 1)) != 0; $(PV_flag) = sborrow(operand1, operand2); $(N_flag) = 1; $(C_flag) = operand1 < operand2; local tempResult = operand1 - operand2; $(C_flag) = $(C_flag) || (tempResult < Ccopy); $(PV_flag) = $(PV_flag) ^^ sborrow(tempResult, Ccopy); result = tempResult - Ccopy; } macro setSubtractFlags(op1,op2) { $(C_flag) = (op1 < op2); } # places the parity bit of the given byte in out_parity_bit # the upper 7 bits of out_parity_bit are cleared macro setParity(in_byte) { local tmp = in_byte ^ (in_byte >> 1); tmp = tmp ^ (tmp >> 2); tmp = (tmp ^ (tmp >> 4)) & 1; $(PV_flag) = (tmp == 0); # $(PV_flag) = hasEvenParity(in_byte); } macro ioWrite(addr,val) { *[io]:1 addr = val; } macro ioRead(addr,dest) { dest = *[io]:1 addr; } @if defined(Z180_SEGMENTED) macro push16(val16) { SP = SP - 2; ptr:$(PTRSIZE) = segment(rBBR,SP); *:2 ptr = val16; } macro pop16(ret16) { ptr:$(PTRSIZE) = segment(rBBR,SP); ret16 = *:2 ptr; SP = SP + 2; } macro push8(val8) { SP = SP - 1; ptr:$(PTRSIZE) = segment(rBBR,SP); *:1 ptr = val8; } macro pop8(ret8) { ptr:$(PTRSIZE) = segment(rBBR,SP); ret8 = *:1 ptr; SP = SP + 1; } macro swap(val16) { ptr:$(PTRSIZE) = segment(rBBR,SP); tmp:2 = *:2 ptr; *:2 ptr = val16; val16 = tmp; } macro MemRead(dest,off) { ptr:$(PTRSIZE) = segment(rBBR,off); dest = *:1 ptr; } macro MemStore(off,val) { ptr:$(PTRSIZE) = segment(rBBR,off); *:1 ptr = val; } macro JumpToLoc(off) { ptr:$(PTRSIZE) = segment(rBBR,off); goto [ptr]; } @else macro push16(val16) { SP = SP - 2; *:2 SP = val16; } macro pop16(ret16) { ret16 = *:2 SP; SP = SP + 2; } macro push8(val8) { SP = SP - 1; ptr:$(PTRSIZE) = SP; *:1 ptr = val8; } macro pop8(ret8) { ptr:$(PTRSIZE) = SP; ret8 = *:1 ptr; SP = SP + 1; } macro swap(val16) { ptr:$(PTRSIZE) = SP; tmp:2 = *:2 ptr; *:2 ptr = val16; val16 = tmp; } macro MemRead(dest,off) { ptr:$(PTRSIZE) = off; dest = *:1 ptr; } macro MemStore(off,val) { ptr:$(PTRSIZE) = off; *:1 ptr = val; } macro JumpToLoc(off) { ptr:$(PTRSIZE) = off; goto [ptr]; } @endif ################################################################ @if defined(Z180) Flag: "Flag" is reg0_3 { } @endif @if defined(Z180_SEGMENTED) hlMem8: (HL) is HL { ptr:$(PTRSIZE) = segment(rBBR,HL); export *:1 ptr; } ixMem8: (IX+simm8) is IX & simm8 { off:2 = IX + simm8; ptr:$(PTRSIZE) = segment(rBBR,off); export *:1 ptr; } ixMem8: (IX-val) is IX & simm8 & sign8=1 [ val = -simm8; ] { off:2 = IX + simm8; ptr:$(PTRSIZE) = segment(rBBR,off); export *:1 ptr; } iyMem8: (IY+simm8) is IY & simm8 { off:$(PTRSIZE) = simm8; ptr:$(PTRSIZE) = segment(rBBR,IY); ptr = ptr + off; export *:1 ptr; } iyMem8: (IY-val) is IY & simm8 & sign8=1 [ val = -simm8; ] { off:$(PTRSIZE) = simm8; ptr:$(PTRSIZE) = segment(rBBR,IY); ptr = ptr + off; export *:1 ptr; } @else # if Z180_SEGMENTED @if defined(Z180) hlMem8: (HL) is HL { ptr:$(PTRSIZE) = HL; export *:1 ptr; } @endif ixMem8: (IX+simm8) is IX & simm8 { ptr:$(PTRSIZE) = IX + simm8; export *:1 ptr; } ixMem8: (IX-val) is IX & simm8 & sign8=1 [ val = -simm8; ] { ptr:$(PTRSIZE) = IX + simm8; export *:1 ptr; } iyMem8: (IY+simm8) is IY & simm8 { ptr:$(PTRSIZE) = IY + simm8; export *:1 ptr; } iyMem8: (IY-val) is IY & simm8 & sign8=1 [ val = -simm8; ] { ptr:$(PTRSIZE) = IY + simm8; export *:1 ptr; } @endif # end !Z180_SEGMENTED @if defined(Z180) Addr16: imm16 is imm16 { export *:1 imm16; } Mem8: (imm16) is imm16 { export *:1 imm16; } Mem16: (imm16) is imm16 { export *:2 imm16; } @else Addr16: imm16 is imm16 { export *:1 imm16; } Mem8: (imm16) is imm16 { export *:1 imm16; } Mem16: (imm16) is imm16 { export *:2 imm16; } @endif RelAddr8: loc is simm8 [ loc = inst_next + simm8; ] { export *:1 loc; } RstAddr: loc is bits3_3 [ loc = bits3_3 << 3; ] { export *:1 loc; } @if defined(Z180) IOAddr8: (imm8) is imm8 { export *[const]:2 imm8; } IOAddr8a: (imm8) is assume8bitIOSpace=0 & imm8 { ptr:2 = (zext(A) << 8) + imm8; export ptr; } IOAddr8a: (imm8) is assume8bitIOSpace=1 & imm8 { export *[const]:2 imm8; } IOAddrC: (C) is assume8bitIOSpace=0 & C { ptr:2 = (zext(B) << 8) + zext(C); export ptr; } IOAddrC: (C) is assume8bitIOSpace=1 & C { ptr:2 = zext(C); export ptr; } @else IOAddr8a: (imm8) is imm8 { export *[const]:2 imm8; } IOAddrC: (C) is C { ptr:2 = zext(C); export ptr; } @endif cc: "NZ" is bits3_3=0x0 { c:1 = ($(Z_flag) == 0); export c; } cc: "Z" is bits3_3=0x1 { c:1 = $(Z_flag); export c; } cc: "NC" is bits3_3=0x2 { c:1 = ($(C_flag) == 0); export c; } cc: "C" is bits3_3=0x3 { c:1 = $(C_flag); export c; } cc: "PO" is bits3_3=0x4 { c:1 = ($(PV_flag) == 0); export c; } cc: "PE" is bits3_3=0x5 { c:1 = $(PV_flag); export c; } cc: "P" is bits3_3=0x6 { c:1 = ($(S_flag) == 0); export c; } cc: "M" is bits3_3=0x7 { c:1 = $(S_flag); export c; } cc2: "NZ" is bits3_3=0x4 { c:1 = ($(Z_flag) == 0); export c; } cc2: "Z" is bits3_3=0x5 { c:1 = $(Z_flag); export c; } cc2: "NC" is bits3_3=0x6 { c:1 = ($(C_flag) == 0); export c; } cc2: "C" is bits3_3=0x7 { c:1 = $(C_flag); export c; } ################################################################ :LD reg3_3,reg0_3 is op6_2=0x1 & reg3_3 & reg0_3 { reg3_3 = reg0_3; } :LD reg3_3,imm8 is op6_2=0x0 & reg3_3 & bits0_3=0x6; imm8 { reg3_3 = imm8; } :LD reg3_3,(HL) is op6_2=0x1 & reg3_3 & bits0_3=0x6 & HL { MemRead(reg3_3,HL); } :LD reg3_3,ixMem8 is op0_8=0xdd; op6_2=0x1 & reg3_3 & bits3_3!=0x6 & bits0_3=0x6; ixMem8 { reg3_3 = ixMem8; } :LD reg3_3,iyMem8 is op0_8=0xfd; op6_2=0x1 & reg3_3 & bits3_3!=0x6 & bits0_3=0x6; iyMem8 { reg3_3 = iyMem8; } :LD (HL),reg0_3 is op6_2=0x1 & bits3_3=0x6 & reg0_3 & HL { MemStore(HL,reg0_3); } :LD ixMem8,reg0_3 is op0_8=0xdd; op6_2=0x1 & bits3_3=0x6 & reg0_3 & bits0_3!=0x6; ixMem8 { ixMem8 = reg0_3; } :LD iyMem8,reg0_3 is op0_8=0xfd; op6_2=0x1 & bits3_3=0x6 & reg0_3 & bits0_3!=0x6; iyMem8 { iyMem8 = reg0_3; } :LD (HL),imm8 is op0_8=0x36 & HL; imm8 { tmp:1 = imm8; MemStore(HL,tmp); } :LD ixMem8,imm8 is op0_8=0xdd; op6_2=0x0 & bits3_3=0x6 & bits0_3=0x6; ixMem8; imm8 { ixMem8 = imm8; } :LD iyMem8,imm8 is op0_8=0xfd; op6_2=0x0 & bits3_3=0x6 & bits0_3=0x6; iyMem8; imm8 { iyMem8 = imm8; } :LD A,(BC) is op0_8=0x0a & A & BC { MemRead(A,BC); } :LD A,(DE) is op0_8=0x1a & A & DE { MemRead(A,DE); } :LD A,Mem8 is op0_8=0x3a & A; Mem8 { A = Mem8; } :LD (BC),A is op0_8=0x2 & BC & A { MemStore(BC,A); } :LD (DE),A is op0_8=0x12 & DE & A { MemStore(DE,A); } :LD Mem8,A is op0_8=0x32 & A; Mem8 { Mem8 = A; } :LD A,I is op0_8=0xed & A & I; op0_8=0x57 { local val = I; A = val; setResultFlags(val); $(H_flag) = 0; # $(PV_flag) = IFF2; $(N_flag) = 0; } :LD A,R is op0_8=0xed & A & R; op0_8=0x5f { local val = R; A = val; setResultFlags(val); $(H_flag) = 0; # $(PV_flag) = IFF2; $(N_flag) = 0; } :LD I,A is op0_8=0xed & A & I; op0_8=0x47 { I = A; } :LD R,A is op0_8=0xed & A & R; op0_8=0x4f { R = A; } :LD dRegPair4_2,imm16 is op6_2=0x0 & dRegPair4_2 & bits0_4=0x1; imm16 { dRegPair4_2 = imm16; } :LD IX,imm16 is op0_8=0xdd & IX; op0_8=0x21; imm16 { IX = imm16; } :LD IY,imm16 is op0_8=0xfd & IY; op0_8=0x21; imm16 { IY = imm16; } :LD HL,Mem16 is op0_8=0x2a & HL; Mem16 { HL = Mem16; } :LD dRegPair4_2,Mem16 is op0_8=0xed; op6_2=0x1 & dRegPair4_2 & bits0_4=0xb; Mem16 { dRegPair4_2 = Mem16; } :LD IX,Mem16 is op0_8=0xdd & IX; op0_8=0x2a; Mem16 { IX = Mem16; } :LD IY,Mem16 is op0_8=0xfd & IY; op0_8=0x2a; Mem16 { IY = Mem16; } :LD Mem16,HL is op0_8=0x22 & HL; Mem16 { Mem16 = HL; } :LD Mem16,dRegPair4_2 is op0_8=0xed; op6_2=0x1 & dRegPair4_2 & bits0_4=0x3; Mem16 { Mem16 = dRegPair4_2; } :LD Mem16,IX is op0_8=0xdd & IX; op0_8=0x22; Mem16 { Mem16 = IX; } :LD Mem16,IY is op0_8=0xfd & IY; op0_8=0x22; Mem16 { Mem16 = IY; } :LD SP,HL is op0_8=0xf9 & SP & HL { SP = HL; } :LD SP,IX is op0_8=0xdd & SP & IX; op0_8=0xf9 { SP = IX; } :LD SP,IY is op0_8=0xfd & SP & IY; op0_8=0xf9 { SP = IY; } :PUSH qRegPair4_2 is op6_2=0x3 & qRegPair4_2 & bits0_4=0x5 { push16(qRegPair4_2); } :PUSH IX is op0_8=0xdd & IX; op0_8=0xe5 { push16(IX); } :PUSH IY is op0_8=0xfd & IY; op0_8=0xe5 { push16(IY); } :POP qRegPair4_2 is op6_2=0x3 & qRegPair4_2 & bits0_4=0x1 { pop16(qRegPair4_2); } :POP IX is op0_8=0xdd & IX; op0_8=0xe1 { pop16(IX); } # ?? Manual appears to have incorrect encoding :POP IY is op0_8=0xfd & IY; op0_8=0xe1 { pop16(IY); } :EX DE,HL is op0_8=0xeb & DE & HL { tmp:2 = DE; DE = HL; HL = tmp; } :EX AF, AF_ is op0_8=0x08 & AF & AF_ { tmp:2 = AF; AF = AF_; AF_ = tmp; } :EXX is op0_8=0xd9 { tmp:2 = BC; BC = BC_; BC_ = tmp; tmp = DE; DE = DE_; DE_ = tmp; tmp = HL; HL = HL_; HL_ = tmp; } :EX (SP),HL is op0_8=0xe3 & SP & HL { swap(HL); } :EX (SP),IX is op0_8=0xdd & SP & IX; op0_8=0xe3 { swap(IX); } :EX (SP),IY is op0_8=0xfd & SP & IY; op0_8=0xe3 { swap(IY); } :LDI is op0_8=0xed; op0_8=0xa0 { val:1 = 0; local inloc = HL; local outloc = DE; MemRead(val,inloc); MemStore(outloc,val); DE = outloc + 1; HL = inloc + 1; local test = BC - 1; BC = test; $(H_flag) = 0; $(PV_flag) = (test != 0); $(N_flag) = 0; } :LDIR is op0_8=0xed; op0_8=0xb0 { val:1 = 0; local inloc = HL; local outloc = DE; MemRead(val,inloc); MemStore(outloc,val); DE = outloc + 1; HL = inloc + 1; local test = BC - 1; BC = test; if (test != 0) goto inst_start; $(H_flag) = 0; $(PV_flag) = 0; $(N_flag) = 0; } :LDD is op0_8=0xed; op0_8=0xa8 { val:1 = 0; local inloc = HL; local outloc = DE; MemRead(val,inloc); MemStore(outloc,val); DE = outloc - 1; HL = inloc - 1; local test = BC - 1; BC = test; $(H_flag) = 0; $(PV_flag) = (test != 0); $(N_flag) = 0; } :LDDR is op0_8=0xed; op0_8=0xb8 { val:1 = 0; local inloc = HL; local outloc = DE; MemRead(val,inloc); MemStore(outloc,val); DE = outloc - 1; HL = inloc - 1; local test = BC - 1; BC = test; if (test != 0) goto inst_start; $(H_flag) = 0; $(PV_flag) = 0; $(N_flag) = 0; } :CPI is op0_8=0xed; op0_8=0xa1 { val:1 = 0; local loc = HL; MemRead(val,loc); local a_temp = A; cmp:1 = a_temp - val; setResultFlags(cmp); HL = loc + 1; local test = BC - 1; BC = test; carries:1 = (~a_temp & val) | (val & cmp) | (cmp & ~a_temp); $(H_flag) = (carries & 0b00001000) != 0; $(PV_flag) = (test != 0); $(N_flag) = 1; } :CPIR is op0_8=0xed; op0_8=0xb1 { val:1 = 0; local loc = HL; MemRead(val,loc); local a_temp = A; cmp:1 = a_temp - val; setResultFlags(cmp); HL = loc + 1; local test = BC - 1; BC = test; if (cmp != 0 || test != 0) goto inst_start; carries:1 = (~a_temp & val) | (val & cmp) | (cmp & ~a_temp); $(H_flag) = (carries & 0b00001000) != 0; $(PV_flag) = (test != 0); $(N_flag) = 1; } :CPD is op0_8=0xed; op0_8=0xa9 { val:1 = 0; local loc = HL; MemRead(val,loc); local a_temp = A; cmp:1 = a_temp - val; setResultFlags(cmp); HL = loc - 1; local test = BC - 1; BC = test; carries:1 = (~a_temp & val) | (val & cmp) | (cmp & ~a_temp); $(H_flag) = (carries & 0b00001000) != 0; $(PV_flag) = (test != 0); $(N_flag) = 1; } :CPDR is op0_8=0xed; op0_8=0xb9 { val:1 = 0; local loc = HL; MemRead(val,loc); local a_temp = A; cmp:1 = a_temp - val; setResultFlags(cmp); HL = loc - 1; local test = BC - 1; BC = test; if (cmp != 0 || test != 0) goto inst_start; carries:1 = (~a_temp & val) | (val & cmp) | (cmp & ~a_temp); $(H_flag) = (carries & 0b00001000) != 0; $(PV_flag) = (test != 0); $(N_flag) = 1; } :ADD A, reg0_3 is op6_2=0x2 & bits3_3=0x0 & reg0_3 & A { local a_temp = A; local reg_temp = reg0_3; additionFlags(a_temp, reg_temp); a_temp= a_temp + reg0_3; setResultFlags(a_temp); A = a_temp; } :ADD A, imm8 is op0_8=0xc6; imm8 & A { local a_temp = A; additionFlags(a_temp, imm8); a_temp = a_temp + imm8; setResultFlags(a_temp); A = a_temp; } :ADD A, (HL) is op0_8=0x86 & HL & A { val:1 = 0; MemRead(val,HL); local a_temp = A; additionFlags(a_temp, val); a_temp = a_temp + val; setResultFlags(a_temp); A = a_temp; } :ADD A, ixMem8 is op0_8=0xdd; op0_8=0x86; ixMem8 & A { val:1 = ixMem8; local a_temp = A; additionFlags(a_temp, val); a_temp = a_temp + val; setResultFlags(a_temp); A = a_temp; } :ADD A, iyMem8 is op0_8=0xfd; op0_8=0x86; iyMem8 & A { val:1 = iyMem8; local a_temp = A; additionFlags(a_temp, val); a_temp = a_temp + val; setResultFlags(a_temp); A = a_temp; } :ADC A, reg0_3 is op6_2=0x2 & bits3_3=0x1 & reg0_3 & A { local a_temp = A; local r_temp = reg0_3; additionWithCarry(a_temp, r_temp, a_temp); setResultFlags(a_temp); A = a_temp; } :ADC A, imm8 is op0_8=0xce; imm8 & A { val:1 = imm8; local a_temp = A; additionWithCarry(a_temp, val, a_temp); setResultFlags(a_temp); A = a_temp; } :ADC A, (HL) is op0_8=0x8e & HL & A { val:1 = 0; MemRead(val,HL); local a_temp = A; additionWithCarry(a_temp, val, a_temp); setResultFlags(a_temp); A = a_temp; } :ADC A, ixMem8 is op0_8=0xdd; op0_8=0x8e; ixMem8 & A{ val:1 = ixMem8; MemRead(val,HL); local a_temp = A; additionWithCarry(a_temp, val, a_temp); setResultFlags(a_temp); A = a_temp; } :ADC A, iyMem8 is op0_8=0xfd; op0_8=0x8e; iyMem8 & A { val:1 = iyMem8; MemRead(val,HL); local a_temp = A; additionWithCarry(a_temp, val, a_temp); setResultFlags(a_temp); A = a_temp; } :SUB reg0_3 is op6_2=0x2 & bits3_3=0x2 & reg0_3 { local a_temp = A; local r_temp = reg0_3; subtractionFlags(a_temp, r_temp); a_temp = a_temp - r_temp; setResultFlags(a_temp); A = a_temp; } :SUB imm8 is op0_8=0xd6; imm8 { local a_temp = A; subtractionFlags(a_temp, imm8); a_temp = a_temp - imm8; setResultFlags(a_temp); A = a_temp; } :SUB (HL) is op0_8=0x96 & HL { val:1 = 0; MemRead(val,HL); local a_temp = A; subtractionFlags(a_temp, val); a_temp = a_temp - val; setResultFlags(a_temp); A = a_temp; } :SUB ixMem8 is op0_8=0xdd; op0_8=0x96; ixMem8 { val:1 = ixMem8; local a_temp = A; subtractionFlags(a_temp, val); a_temp = a_temp - val; setResultFlags(a_temp); A = a_temp; } :SUB iyMem8 is op0_8=0xfd; op0_8=0x96; iyMem8 { val:1 = iyMem8; local a_temp = A; subtractionFlags(a_temp, val); a_temp = a_temp - val; setResultFlags(a_temp); A = a_temp; } :SBC A, reg0_3 is op6_2=0x2 & bits3_3=0x3 & reg0_3 & A { local a_temp = A; local r_temp = reg0_3; subtractionWithCarry(a_temp, r_temp, a_temp); setResultFlags(a_temp); A = a_temp; } :SBC A, imm8 is op0_8=0xde; imm8 & A { local a_temp = A; subtractionWithCarry(a_temp, imm8, a_temp); setResultFlags(a_temp); A = a_temp; } :SBC A, (HL) is op0_8=0x9e & HL & A { val:1 = 0; MemRead(val,HL); local a_temp = A; subtractionWithCarry(a_temp, val, a_temp); setResultFlags(a_temp); A = a_temp; } :SBC A, ixMem8 is op0_8=0xdd; op0_8=0x9e; ixMem8 & A { val:1 = ixMem8; local a_temp = A; subtractionWithCarry(a_temp, val, a_temp); setResultFlags(a_temp); A = a_temp; } :SBC A, iyMem8 is op0_8=0xfd; op0_8=0x9e; iyMem8 & A { val:1 = iyMem8; local a_temp = A; subtractionWithCarry(a_temp, val, a_temp); setResultFlags(a_temp); A = a_temp; } :AND reg0_3 is op6_2=0x2 & bits3_3=0x4 & reg0_3 { local a_temp = A; $(H_flag) = 1; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp & reg0_3; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :AND imm8 is op0_8=0xe6; imm8 { local a_temp = A; $(H_flag) = 1; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp & imm8; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :AND (HL) is op0_8=0xa6 & HL { val:1 = 0; MemRead(val,HL); local a_temp = A; $(H_flag) = 1; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp & val; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :AND ixMem8 is op0_8=0xdd; op0_8=0xa6; ixMem8 { local a_temp = A; $(H_flag) = 1; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp & ixMem8; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :AND iyMem8 is op0_8=0xfd; op0_8=0xa6; iyMem8 { local a_temp = A; $(H_flag) = 1; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp & iyMem8; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :OR reg0_3 is op6_2=0x2 & bits3_3=0x6 & reg0_3 { local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp | reg0_3; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :OR imm8 is op0_8=0xf6; imm8 { local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp | imm8; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :OR (HL) is op0_8=0xb6 & HL { val:1 = 0; MemRead(val,HL); local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp | val; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :OR ixMem8 is op0_8=0xdd; op0_8=0xb6; ixMem8 { local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp | ixMem8; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :OR iyMem8 is op0_8=0xfd; op0_8=0xb6; iyMem8 { local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp | iyMem8; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :XOR reg0_3 is op6_2=0x2 & bits3_3=0x5 & reg0_3 { local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp ^ reg0_3; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :XOR imm8 is op0_8=0xee; imm8 { local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp ^ imm8; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :XOR (HL) is op0_8=0xae & HL { val:1 = 0; MemRead(val,HL); local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp ^ val; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :XOR ixMem8 is op0_8=0xdd; op0_8=0xae; ixMem8 { local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp ^ ixMem8; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :XOR iyMem8 is op0_8=0xfd; op0_8=0xae; iyMem8 { local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp ^ iyMem8; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :CP reg0_3 is op6_2=0x2 & bits3_3=0x7 & reg0_3 { local a_temp = A; local r_temp = reg0_3; cmp:1 = a_temp - r_temp; subtractionFlags(a_temp, r_temp); setResultFlags(cmp); } :CP imm8 is op0_8=0xfe; imm8 { local a_temp = A; cmp:1 = a_temp - imm8; subtractionFlags(a_temp, imm8); setResultFlags(cmp); } :CP (HL) is op0_8=0xbe & HL { val:1 = 0; MemRead(val,HL); local a_temp = A; cmp:1 = a_temp - val; subtractionFlags(a_temp, val); setResultFlags(cmp); } :CP ixMem8 is op0_8=0xdd; op0_8=0xbe; ixMem8 { val:1 = ixMem8; local a_temp = A; cmp:1 = a_temp - val; subtractionFlags(a_temp, val); setResultFlags(cmp); } :CP iyMem8 is op0_8=0xfd; op0_8=0xbe; iyMem8 { val:1 = iyMem8; local a_temp = A; cmp:1 = a_temp - val; subtractionFlags(a_temp, val); setResultFlags(cmp); } :INC reg3_3 is op6_2=0x0 & reg3_3 & bits0_3=0x4 { local r_temp = reg3_3; additionFlags(r_temp, 1); r_temp = r_temp + 1; reg3_3 = r_temp; setResultFlags(r_temp); } :INC (HL) is op0_8=0x34 & HL { val:1 = 0; MemRead(val,HL); val_temp:1 = val; val = val + 1; MemStore(HL,val); setResultFlags(val); additionFlagsNoC(val_temp, 1); } :INC ixMem8 is op0_8=0xdd; op0_8=0x34; ixMem8 { val:1 = ixMem8; val_temp:1 = val; val = val + 1; ixMem8 = val; setResultFlags(val); additionFlagsNoC(val_temp, 1); } :INC iyMem8 is op0_8=0xfd; op0_8=0x34; iyMem8 { val:1 = iyMem8; val_temp:1 = val; val = val + 1; iyMem8 = val; setResultFlags(val); additionFlagsNoC(val_temp, 1); } :DEC reg3_3 is op6_2=0x0 & reg3_3 & bits0_3=0x5 { local r_temp = reg3_3; subtractionFlagsNoC(r_temp, 1); r_temp = r_temp - 1; reg3_3 = r_temp; setResultFlags(r_temp); } :DEC (HL) is op0_8=0x35 & HL { val:1 = 0; MemRead(val,HL); val_temp:1 = val; val = val - 1; MemStore(HL,val); subtractionFlagsNoC(val_temp, 1); setResultFlags(val); } :DEC ixMem8 is op0_8=0xdd; op0_8=0x35; ixMem8 { val:1 = ixMem8; val_temp:1 = val; val = val - 1; ixMem8 = val; subtractionFlagsNoC(val_temp, 1); setResultFlags(val); } :DEC iyMem8 is op0_8=0xfd; op0_8=0x35; iyMem8 { val:1 = iyMem8; val_temp:1 = val; val = val - 1; iyMem8 = val; subtractionFlagsNoC(val_temp, 1); setResultFlags(val); } :DAA is op0_8=0x27 { local a_temp = A; if (DECOMPILE_MODE) goto ; HN:1 = a_temp >> 4; # high nibble LN:1 = a_temp & 0xF; # low nibbble # # If (C and H are both 0, and both nibbles are in range[0,9] no # adjustment is needed. # if (($(C_flag) == 0) & ($(H_flag) == 0) & (HN <= 0x9) & (LN <= 0x9)) goto ; if ($(N_flag) == 1) goto ; #, in effect if ($(C_flag) == 0 & $(H_flag) == 0 & HN <= 0x8 & LN >= 0xA & LN <= 0xF) goto ; if ($(C_flag) == 0 & $(H_flag) == 1 & HN <= 0x9 & LN <= 0x3) goto ; if ($(C_flag) == 0 & $(H_flag) == 0 & HN >= 0xA & HN <= 0xF & LN <= 0x9) goto ; if ($(C_flag) == 0 & $(H_flag) == 0 & HN >= 0x9 & HN <= 0xF & LN >= 0xA & LN <= 0xF) goto ; if ($(C_flag) == 0 & $(H_flag) == 1 & HN >= 0xA & HN <= 0xF & LN <= 0x3) goto ; if ($(C_flag) == 1 & $(H_flag) == 0 & HN <= 0x2 & LN <= 0x9) goto ; if ($(C_flag) == 1 & $(H_flag) == 0 & HN <= 0x2 & LN >= 0xA & LN <= 0xF) goto ; if ($(C_flag) == 1 & $(H_flag) == 1 & HN <= 0x3 & LN <= 0x3) goto ; goto ; # Cases for addition # # Isn't used a_temp = a_temp + 0x06; goto ; a_temp = a_temp + 0x06; goto ; a_temp = a_temp + 0x60; $(C_flag) = 1; goto ; a_temp = a_temp + 0x66; $(C_flag) = 1; goto ; a_temp = a_temp + 0x66; $(C_flag) = 1; goto ; a_temp = a_temp + 0x60; goto ; a_temp = a_temp + 0x66; goto ; a_temp = a_temp + 0x66; goto ; # Cases for subtraction #if ($(C_flag) == 0 & $(H_flag) == 0 & HN >= 0x0 & HN <= 0x9 & LN >= 0x0 & LN <= 0x9) goto ; if ($(C_flag) == 0 & $(H_flag) == 1 & HN <= 0x8 & LN >= 0x6 & LN <= 0xF) goto ; if ($(C_flag) == 1 & $(H_flag) == 0 & HN >= 0x7 & HN <= 0xF & LN <= 0x9) goto ; if ($(C_flag) == 1 & $(H_flag) == 1 & HN >= 0x6 & HN <= 0xF & LN >= 0x6 & LN <= 0xF) goto ; goto ; # # Isn't used a_temp = a_temp + 0xFA; goto ; a_temp = a_temp + 0xA0; goto ; a_temp = a_temp + 0x9A; setResultFlags(a_temp); setParity(a_temp); A = a_temp; goto ; a_temp = BCDadjust(a_temp, $(C_flag), $(H_flag)); $(C_flag) = BCDadjustCarry(a_temp, $(C_flag), $(H_flag)); setResultFlags(a_temp); $(PV_flag) = hasEvenParity(a_temp); A = a_temp; } :CPL is op0_8=0x2f { A = ~A; $(H_flag) = 1; $(N_flag) = 1; } :NEG is op0_8=0xed; op0_8=0x44 { local a_temp = A; subtractionFlags(0, a_temp); a_temp = -a_temp; A = a_temp; setResultFlags(a_temp); } :CCF is op0_8=0x3f { $(C_flag) = !$(C_flag); $(N_flag) = 0; } :SCF is op0_8=0x37 { $(C_flag) = 1; $(H_flag) = 0; $(N_flag) = 0; } :NOP is op0_8=0x0 { } :HALT is op0_8=0x76 { halt(); } :DI is op0_8=0xf3 { # IFF1 = 0; # IFF2 = 0; disableMaskableInterrupts(); } :EI is op0_8=0xfb { # IFF1 = 1; # IFF2 = 1; enableMaskableInterrupts(); } :IM 0 is op0_8=0xed; op0_8=0x46 { setInterruptMode(0:1); } :IM 1 is op0_8=0xed; op0_8=0x56 { setInterruptMode(1:1); } :IM 2 is op0_8=0xed; op0_8=0x5e { setInterruptMode(2:1); } :ADD HL,sRegPair4_2 is op6_2=0x0 & sRegPair4_2 & bits0_4=0x9 & HL { local HL_temp = HL; local Reg_temp = sRegPair4_2; additionFlagsNoPV(HL_temp, Reg_temp); HL = HL_temp + Reg_temp; } :ADC HL,sRegPair4_2 is op0_8=0xed & HL; op6_2=0x1 & sRegPair4_2 & bits0_4=0xa { local HL_temp = HL; local Reg_temp = sRegPair4_2; additionFlagsNoPV(HL_temp, Reg_temp); HL_temp = HL_temp + Reg_temp + zext($(C_flag)); setResultFlags(HL_temp); HL = HL_temp; } :SBC HL,sRegPair4_2 is op0_8=0xed & HL; op6_2=0x1 & sRegPair4_2 & bits0_4=0x2 { local HL_temp = HL; local Reg_temp = sRegPair4_2; subtractionWithCarry(HL_temp, sRegPair4_2, HL_temp); setResultFlags(HL_temp); HL = HL_temp; } :ADD IX,pRegPair4_2 is op0_8=0xdd & IX; op6_2=0x0 & pRegPair4_2 & bits0_4=0x9 { local IX_temp = IX; local Reg_temp = pRegPair4_2; additionFlagsNoPV(IX_temp, Reg_temp); IX = IX_temp + pRegPair4_2; } :ADD IY,pRegPair4_2 is op0_8=0xfd & IY; op6_2=0x0 & pRegPair4_2 & bits0_4=0x9 { local IY_temp = IY; local Reg_temp = pRegPair4_2; additionFlagsNoPV(IY_temp, Reg_temp); IY = IY_temp + Reg_temp; } :INC sRegPair4_2 is op6_2=0x0 & sRegPair4_2 & bits0_4=0x3 { sRegPair4_2 = sRegPair4_2 + 1; } :INC IX is op0_8=0xdd & IX; op0_8=0x23 { IX = IX + 1; } :INC IY is op0_8=0xfd & IY; op0_8=0x23 { IY = IY + 1; } :DEC sRegPair4_2 is op6_2=0x0 & sRegPair4_2 & bits0_4=0xb { sRegPair4_2 = sRegPair4_2 - 1; } :DEC IX is op0_8=0xdd & IX; op0_8=0x2b { IX = IX - 1; } :DEC IY is op0_8=0xfd & IY; op0_8=0x2b { IY = IY - 1; } :RLCA is op0_8=0x07 { local a_temp = A; $(C_flag) = (a_temp >> 7); A = (a_temp << 1) | $(C_flag); $(H_flag) = 0; $(N_flag) = 0; } :RLA is op0_8=0x17 { local a_temp = A; nextC:1 = (a_temp >> 7); A = (a_temp << 1) | $(C_flag); $(C_flag) = nextC; $(H_flag) = 0; $(N_flag) = 0; } :RRCA is op0_8=0x0f { local a_temp = A; $(C_flag) = (a_temp & 1); A = (a_temp >> 1) | ($(C_flag) << 7); $(H_flag) = 0; $(N_flag) = 0; } :RRA is op0_8=0x1f { local a_temp = A; nextC:1 = (a_temp & 1); A = (a_temp >> 1) | ($(C_flag) << 7); $(C_flag) = nextC; $(H_flag) = 0; $(N_flag) = 0; } :RLC reg0_3 is op0_8=0x0cb; op6_2=0x0 & bits3_3=0x0 & reg0_3 { local val = reg0_3; $(C_flag) = (val >> 7); val = (val << 1) | $(C_flag); reg0_3 = val; setResultFlags(val); $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :RLC (HL) is op0_8=0x0cb & HL; op0_8=0x06 { val:1 = 0; MemRead(val,HL); $(C_flag) = (val >> 7); val = (val << 1) | $(C_flag); setResultFlags(val); MemStore(HL,val); $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :RLC ixMem8 is op0_8=0x0dd; op0_8=0xcb; ixMem8; op0_8=0x06 { val:1 = ixMem8; $(C_flag) = (val >> 7); val = (val << 1) | $(C_flag); setResultFlags(val); ixMem8 = val; $(H_flag) = 0; $(N_flag) = 0; } :RLC iyMem8 is op0_8=0x0fd; op0_8=0xcb; iyMem8; op0_8=0x06 { val:1 = iyMem8; $(C_flag) = (val >> 7); val = (val << 1) | $(C_flag); setResultFlags(val); iyMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :RL reg0_3 is op0_8=0x0cb; op6_2=0x0 & bits3_3=0x2 & reg0_3 { local r_temp = reg0_3; nextC:1 = (r_temp >> 7); r_temp = (r_temp << 1) | $(C_flag); reg0_3 = r_temp; $(C_flag) = nextC; setResultFlags(r_temp); $(H_flag) = 0; setParity(r_temp); $(N_flag) = 0; } :RL (HL) is op0_8=0x0cb & HL; op0_8=0x16 { val:1 = 0; MemRead(val,HL); nextC:1 = (val >> 7); val = (val << 1) | $(C_flag); $(C_flag) = nextC; setResultFlags(val); MemStore(HL,val); $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :RL ixMem8 is op0_8=0x0dd; op0_8=0xcb; ixMem8; op0_8=0x16 { val:1 = ixMem8; nextC:1 = (val >> 7); val = (val << 1) | $(C_flag); $(C_flag) = nextC; setResultFlags(val); ixMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :RL iyMem8 is op0_8=0x0fd; op0_8=0xcb; iyMem8; op0_8=0x16 { val:1 = iyMem8; nextC:1 = (val >> 7); val = (val << 1) | $(C_flag); $(C_flag) = nextC; setResultFlags(val); iyMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :RRC reg0_3 is op0_8=0x0cb; op6_2=0x0 & bits3_3=0x1 & reg0_3 { local r_temp = reg0_3; $(C_flag) = (r_temp & 1); r_temp = (r_temp >> 1) | ($(C_flag) << 7); reg0_3 = r_temp; setResultFlags(r_temp); $(H_flag) = 0; setParity(r_temp); $(N_flag) = 0; } :RRC (HL) is op0_8=0x0cb & HL; op0_8=0x0e { val:1 = 0; MemRead(val,HL); $(C_flag) = (val & 1); val = (val >> 1) | ($(C_flag) << 7); setResultFlags(val); MemStore(HL,val); $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :RRC ixMem8 is op0_8=0x0dd; op0_8=0xcb; ixMem8; op0_8=0x0e { val:1 = ixMem8; $(C_flag) = (val & 1); val = (val >> 1) | ($(C_flag) << 7); setResultFlags(val); ixMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :RRC iyMem8 is op0_8=0x0fd; op0_8=0xcb; iyMem8; op0_8=0x0e { val:1 = iyMem8; $(C_flag) = (val & 1); val = (val >> 1) | ($(C_flag) << 7); setResultFlags(val); iyMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :RR reg0_3 is op0_8=0x0cb; op6_2=0x0 & bits3_3=0x3 & reg0_3 { local r_temp = reg0_3; nextC:1 = (r_temp & 1); r_temp = (r_temp >> 1) | ($(C_flag) << 7); reg0_3 = r_temp; $(C_flag) = nextC; setResultFlags(r_temp); $(H_flag) = 0; setParity(r_temp); $(N_flag) = 0; } :RR (HL) is op0_8=0x0cb & HL; op0_8=0x1e { val:1 = 0; MemRead(val,HL); nextC:1 = (val & 1); val = (val >> 1) | ($(C_flag) << 7); $(C_flag) = nextC; setResultFlags(val); MemStore(HL,val); $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :RR ixMem8 is op0_8=0x0dd; op0_8=0xcb; ixMem8; op0_8=0x1e { val:1 = ixMem8; nextC:1 = (val & 1); val = (val >> 1) | ($(C_flag) << 7); $(C_flag) = nextC; setResultFlags(val); ixMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :RR iyMem8 is op0_8=0x0fd; op0_8=0xcb; iyMem8; op0_8=0x1e { val:1 = iyMem8; nextC:1 = (val & 1); val = (val >> 1) | ($(C_flag) << 7); $(C_flag) = nextC; setResultFlags(val); iyMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :SLA reg0_3 is op0_8=0x0cb; op6_2=0x0 & bits3_3=0x4 & reg0_3 { local r_temp = reg0_3; $(C_flag) = (r_temp >> 7); r_temp = r_temp << 1; reg0_3 = r_temp; setResultFlags(r_temp); $(H_flag) = 0; setParity(r_temp); $(N_flag) = 0; } :SLA (HL) is op0_8=0x0cb & HL; op0_8=0x26 { val:1 = 0; MemRead(val,HL); $(C_flag) = (val >> 7); val = val << 1; setResultFlags(val); MemStore(HL,val); $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :SLA ixMem8 is op0_8=0x0dd; op0_8=0xcb; ixMem8; op0_8=0x26 { val:1 = ixMem8; $(C_flag) = (val >> 7); val = val << 1; setResultFlags(val); ixMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :SLA iyMem8 is op0_8=0x0fd; op0_8=0xcb; iyMem8; op0_8=0x26 { val:1 = iyMem8; $(C_flag) = (val >> 7); val = val << 1; setResultFlags(val); iyMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :SRA reg0_3 is op0_8=0x0cb; op6_2=0x0 & bits3_3=0x5 & reg0_3 { local _val = reg0_3; $(C_flag) = (_val & 1); _val = _val s>> 1; reg0_3 = _val; setResultFlags(_val); $(H_flag) = 0; setParity(_val); $(N_flag) = 0; } :SRA (HL) is op0_8=0x0cb & HL; op0_8=0x2e { val:1 = 0; MemRead(val,HL); $(C_flag) = (val & 1); val = val s>> 1; setResultFlags(val); MemStore(HL,val); $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :SRA ixMem8 is op0_8=0x0dd; op0_8=0xcb; ixMem8; op0_8=0x2e { val:1 = ixMem8; $(C_flag) = (val & 1); val = val s>> 1; setResultFlags(val); ixMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :SRA iyMem8 is op0_8=0x0fd; op0_8=0xcb; iyMem8; op0_8=0x2e { val:1 = iyMem8; $(C_flag) = (val & 1); val = val s>> 1; setResultFlags(val); iyMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :SRL reg0_3 is op0_8=0x0cb; op6_2=0x0 & bits3_3=0x7 & reg0_3 { local val = reg0_3; $(C_flag) = (val & 1); val = val >> 1; reg0_3 = val; setResultFlags(val); $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :SRL (HL) is op0_8=0x0cb & HL; op0_8=0x3e { val:1 = 0; MemRead(val,HL); $(C_flag) = (val & 1); val = val >> 1; setResultFlags(val); MemStore(HL,val); $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :SRL ixMem8 is op0_8=0x0dd; op0_8=0xcb; ixMem8; op0_8=0x3e { val:1 = ixMem8; $(C_flag) = (val & 1); val = val >> 1; setResultFlags(val); ixMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :SRL iyMem8 is op0_8=0x0fd; op0_8=0xcb; iyMem8; op0_8=0x3e { val:1 = iyMem8; $(C_flag) = (val & 1); val = val >> 1; setResultFlags(val); iyMem8 = val; $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :RLD is op0_8=0xed; op0_8=0x6f { val:1 = 0; local a_temp = A; MemRead(val,HL); nibA:1 = a_temp & 0x0f; nibM:1 = val >> 4; val = (val << 4) | nibA; a_temp = (a_temp & 0xf0) | nibM; A = a_temp; MemStore(HL,val); @if defined(Z180) setResultFlags(val); @else setResultFlags(a_temp); @endif $(H_flag) = 0; setParity(a_temp); $(N_flag) = 0; } :RRD is op0_8=0xed; op0_8=0x67 { val:1 = 0; MemRead(val,HL); local a_temp = A; nibA:1 = a_temp & 0x0f; nibM:1 = val & 0x0f; val = (val >> 4) | (nibA << 4); a_temp = (a_temp & 0xf0) | nibM; A = a_temp; MemStore(HL,val); @if defined(Z180) setResultFlags(val); @else setResultFlags(a_temp); @endif $(H_flag) = 0; setParity(a_temp); $(N_flag) = 0; } :BIT bits3_3,reg0_3 is op0_8=0xcb; op6_2=0x1 & bits3_3 & reg0_3 { mask:1 = (1 << bits3_3); $(Z_flag) = ((reg0_3 & mask) == 0); $(H_flag) = 1; $(N_flag) = 0; } :BIT bits3_3,(HL) is op0_8=0xcb & HL; op6_2=0x1 & bits3_3 & bits0_3=0x6 { mask:1 = (1 << bits3_3); val:1 = 0; MemRead(val,HL); $(Z_flag) = ((val & mask) == 0); $(H_flag) = 1; $(N_flag) = 0; } :BIT bits3_3,ixMem8 is op0_8=0xdd; op0_8=0xcb; ixMem8; op6_2=0x1 & bits3_3 & bits0_3=0x6 { mask:1 = (1 << bits3_3); val:1 = ixMem8; $(Z_flag) = ((val & mask) == 0); $(H_flag) = 1; $(N_flag) = 0; } :BIT bits3_3,iyMem8 is op0_8=0xfd; op0_8=0xcb; iyMem8; op6_2=0x1 & bits3_3 & bits0_3=0x6 { mask:1 = (1 << bits3_3); val:1 = iyMem8; $(Z_flag) = ((val & mask) == 0); $(H_flag) = 1; $(N_flag) = 0; } :SET bits3_3,reg0_3 is op0_8=0xcb; op6_2=0x3 & bits3_3 & reg0_3 { mask:1 = (1 << bits3_3); reg0_3 = reg0_3 | mask; } :SET bits3_3,(HL) is op0_8=0xcb & HL; op6_2=0x3 & bits3_3 & bits0_3=0x6 { mask:1 = (1 << bits3_3); val:1 = 0; MemRead(val,HL); val = val | mask; MemStore(HL,val); } :SET bits3_3,ixMem8 is op0_8=0xdd; op0_8=0xcb; ixMem8; op6_2=0x3 & bits3_3 & bits0_3=0x6 { mask:1 = (1 << bits3_3); val:1 = ixMem8; ixMem8 = val | mask; } :SET bits3_3,iyMem8 is op0_8=0xfd; op0_8=0xcb; iyMem8; op6_2=0x3 & bits3_3 & bits0_3=0x6 { mask:1 = (1 << bits3_3); val:1 = iyMem8; iyMem8 = val | mask; } :RES bits3_3,reg0_3 is op0_8=0xcb; op6_2=0x2 & bits3_3 & reg0_3 { mask:1 = ~(1 << bits3_3); reg0_3 = reg0_3 & mask; } :RES bits3_3,(HL) is op0_8=0xcb & HL; op6_2=0x2 & bits3_3 & bits0_3=0x6 { mask:1 = ~(1 << bits3_3); val:1 = 0; MemRead(val,HL); val = val & mask; MemStore(HL,val); } :RES bits3_3,ixMem8 is op0_8=0xdd; op0_8=0xcb; ixMem8; op6_2=0x2 & bits3_3 & bits0_3=0x6 { mask:1 = ~(1 << bits3_3); val:1 = ixMem8; ixMem8 = val & mask; } :RES bits3_3,iyMem8 is op0_8=0xfd; op0_8=0xcb; iyMem8; op6_2=0x2 & bits3_3 & bits0_3=0x6 { mask:1 = ~(1 << bits3_3); val:1 = iyMem8; iyMem8 = val & mask; } :JP Addr16 is op0_8=0xc3; Addr16 { goto Addr16; } :JP cc,Addr16 is op6_2=0x3 & cc & bits0_3=0x2; Addr16 { if (cc) goto Addr16; } :JR RelAddr8 is op0_8=0x18; RelAddr8 { goto RelAddr8; } :JR cc2,RelAddr8 is op6_2=0x0 & cc2 & bits0_3=0x0; RelAddr8 { if (cc2) goto RelAddr8; } :JP (HL) is op0_8=0xe9 & HL { off:2 = (zext(H) << 8) | zext(L); JumpToLoc(off); } :JP (IX) is op0_8=0xdd & IX; op0_8=0xe9 { JumpToLoc(IX); } :JP (IY) is op0_8=0xfd & IY; op0_8=0xe9 { JumpToLoc(IY); } :DJNZ RelAddr8 is op0_8=0x10; RelAddr8 { B = B - 1; if (B != 0) goto RelAddr8; } :CALL Addr16 is op0_8=0xcd; Addr16 { push16(&:2 inst_next); call Addr16; } :CALL cc,Addr16 is op6_2=0x3 & cc & bits0_3=0x4; Addr16 { if (!cc) goto inst_next; push16(&:2 inst_next); call Addr16; } :RET is op0_8=0xc9 { pop16(PC); ptr:$(PTRSIZE) = PC; return [ptr]; } :RET cc is op6_2=0x3 & cc & bits0_3=0x0 { if (!cc) goto inst_next; pop16(PC); ptr:$(PTRSIZE) = PC; return [ptr]; } :RETI is op0_8=0xed; op0_8=0x4d { pop16(PC); ptr:$(PTRSIZE) = PC; return [ptr]; } :RETN is op0_8=0xed; op0_8=0x45 { # IFF1 = IFF2; pop16(PC); ptr:$(PTRSIZE) = PC; return [ptr]; } :RST RstAddr is op6_2=0x3 & RstAddr & bits0_3=0x7 { push16(&:2 inst_next); call RstAddr; } :IN A,IOAddr8a is op0_8=0xdb & A; IOAddr8a { ioRead(IOAddr8a, A); } :IN reg3_3,IOAddrC is op0_8=0xed & IOAddrC; op6_2=0x1 & reg3_3 & bits0_3=0x0 { val:1 = 0; ioRead(IOAddrC, val); reg3_3 = val; setResultFlags(val); $(H_flag) = 0; setParity(val); $(N_flag) = 0; } :INI is op0_8=0xed & IOAddrC; op0_8=0xa2 { val:1 = 0; ioRead(IOAddrC, val); MemStore(HL,val); B = B - 1; HL = HL + 1; $(Z_flag) = (B == 0); $(N_flag) = (B s< 0); } :INIR is op0_8=0xed & IOAddrC; op0_8=0xb2 { val:1 = 0; ioRead(IOAddrC, val); MemStore(HL,val); B = B - 1; HL = HL + 1; if (B != 0) goto inst_start; $(Z_flag) = 1; $(N_flag) = 1; } :IND is op0_8=0xed & IOAddrC; op0_8=0xaa { val:1 = 0; ioRead(IOAddrC, val); MemStore(HL,val); B = B - 1; HL = HL - 1; $(Z_flag) = (B == 0); $(N_flag) = (B s< 0); } :INDR is op0_8=0xed & IOAddrC; op0_8=0xba { val:1 = 0; ioRead(IOAddrC, val); MemStore(HL,val); B = B - 1; HL = HL - 1; if (B != 0) goto inst_start; $(Z_flag) = 1; $(N_flag) = 1; } :OUT IOAddr8a,A is op0_8=0xd3 & A; IOAddr8a { ioWrite(IOAddr8a, A); } :OUT IOAddrC,reg3_3 is op0_8=0xed & IOAddrC; op6_2=0x1 & reg3_3 & bits0_3=0x1 { ioWrite(IOAddrC, reg3_3); } :OUTI is op0_8=0xed & IOAddrC; op0_8=0xa3 { local test = B - 1; B = test; val:1 = 0; MemRead(val,HL); ioWrite(IOAddrC, val); HL = HL + 1; $(Z_flag) = (test == 0); $(N_flag) = (test s< 0); } :OTIR is op0_8=0xed & IOAddrC; op0_8=0xb3 { B = B - 1; val:1 = 0; MemRead(val,HL); ioWrite(IOAddrC, val); HL = HL + 1; if (B != 0) goto inst_start; $(Z_flag) = 1; $(N_flag) = 1; } :OUTD is op0_8=0xed & IOAddrC; op0_8=0xab { local test = B - 1; B = test; val:1 = 0; MemRead(val,HL); ioWrite(IOAddrC, val); HL = HL - 1; $(Z_flag) = (test == 0); $(N_flag) = (test s< 0); } :OTDR is op0_8=0xed & IOAddrC; op0_8=0xbb { B = B - 1; val:1 = 0; MemRead(val,HL); ioWrite(IOAddrC, val); HL = HL - 1; if (B != 0) goto inst_start; $(Z_flag) = 1; $(N_flag) = 1; } @if defined(Z180) :MLT qRegPair4_2 is op0_8=0xed; op6_2=0x1 & qRegPair4_2 & bits0_4=0xc { local pair = qRegPair4_2; hi:2 = pair >> 8; lo:2 = pair & 0xff; qRegPair4_2 = hi * lo; } :TST reg3_3 is op0_8=0xed; op6_2=0x0 & reg3_3 & bits0_3=0x4 { local result = reg3_3 & A; setResultFlags(result); $(H_flag)=1; setParity(result); $(N_flag)=0; $(C_flag)=0; } :TST hlMem8 is op0_8=0xed; op0_8=0x34 & hlMem8 { val:1 = 0; MemRead(val,HL); local result = val & A; setResultFlags(result); $(H_flag)=1; setParity(result); $(N_flag)=0; $(C_flag)=0; } :TST imm8 is op0_8=0xed; op0_8=0x64; imm8 { val:1 = imm8 & A; setResultFlags(val); $(H_flag)=1; setParity(val); $(N_flag)=0; $(C_flag)=0; } :IN0 Flag,IOAddr8 is op0_8=0xed; op6_2=0x0 & bits3_3=0x6 & Flag & bits0_3=0x0; IOAddr8 { val:1 = 0; ioRead(IOAddr8,val); # read input location setResultFlags(val); $(H_flag)=0; setParity(val); $(N_flag)=0; } :IN0 reg3_3,IOAddr8 is op0_8=0xed; op6_2=0x0 & reg3_3 & bits0_3=0x0; IOAddr8 { local r_temp = reg3_3; ioRead(IOAddr8,r_temp); # read input location reg3_3 = r_temp; setResultFlags(r_temp); $(H_flag)=0; setParity(r_temp); $(N_flag)=0; } :OUT0 IOAddr8,reg3_3 is op0_8=0xed; op6_2=0x0 & reg3_3 & bits0_3=0x1; IOAddr8 { ioWrite(IOAddr8, reg3_3); } :OTDM is op0_8=0xed; op0_8=0x8b & hlMem8 & IOAddrC { val:1 = hlMem8; ioWrite(IOAddrC, val); HL = HL - 1; C = C - 1; local test = B; setSubtractFlags(test,1); # ?? sets $(C_flag) based upon B-1 ?? test = test - 1; B = test; setResultFlags(test); # P_flag = parity(r); $(PV_flag) = (val s< 0); } :OTDMR is op0_8=0xed; op0_8=0x9b & hlMem8 & IOAddrC { local test = B - 1; B = test; val:1 = hlMem8; ioWrite(IOAddrC, val); HL = HL - 1; C = C - 1; if (test != 0) goto inst_start; $(S_flag)=0; $(Z_flag)=1; $(H_flag) = 0; # $(PV_flag)=1; $(PV_flag) = (val s< 0); # based upon last output byte $(C_flag)=0; } :TSTIO IOAddr8 is op0_8=0xed; op0_8=0x74; IOAddr8 { val:1 = 0; ioRead(IOAddr8,val); local result = A & val; setResultFlags(result); $(H_flag) = 1; # P_flag = parity(v); $(N_flag) = 0; $(C_flag)=0; } :OTIM is op0_8=0xed; op0_8=0x83 & hlMem8 & IOAddrC { val:1 = hlMem8; ioWrite(IOAddrC, val); HL = HL + 1; C = C + 1; local test = B; setSubtractFlags(test,1); # ?? sets $(C_flag) based upon B-1 ?? test = test - 1; B = test; setResultFlags(test); $(H_flag) = 1; # P_flag = parity(r); $(PV_flag) = (val s< 0); $(N_flag) = 0; $(C_flag)=0; } :OTIMR is op0_8=0xed; op0_8=0x93 & hlMem8 & IOAddrC { val:1 = hlMem8; local test = B - 1; B = test; ioWrite(IOAddrC, val); HL = HL - 1; C = C + 1; if (test != 0) goto inst_start; $(S_flag)=0; $(Z_flag)=1; $(H_flag) = 0; # $(PV_flag)=1; $(PV_flag) = (val s< 0); # based upon last output byte $(C_flag)=0; } :SLP is op0_8=0xed; op0_8=0x76 { sleep(); } @endif # Undocumented instructions # information taken from https://clrhome.org/table @ifndef Z180 # Bad support on Z180 :IM 0 is op0_8=0xed; op0_8=0x4e { setInterruptMode(0:1); } :IM 1 is op0_8=0xed; op0_8=0x6e { setInterruptMode(1:1); } # CB range :SLL reg0_3 is op0_8=0x0cb; op6_2=0x0 & bits3_3=0x6 & reg0_3 { local r_temp = reg0_3; $(C_flag) = (r_temp >> 7); r_temp = (r_temp << 1) | 0x01; reg0_3 = r_temp; setResultFlags(r_temp); $(H_flag) = 0; setParity(r_temp); $(N_flag) = 0; } :SLL (HL) is op0_8=0x0cb & HL; op0_8=0x36 { val:1 = 0; MemRead(val,HL); $(C_flag) = (val >> 7); val = val << 1 | 0x01; setResultFlags(val); MemStore(HL,val); $(H_flag) = 0; setParity(val); $(N_flag) = 0; } ## DD range ixh_iyh: IXH is op0_8=0xdd & IXH { export IXH; } ixh_iyh: IYH is op0_8=0xfd & IYH { export IYH; } ixl_iyl: IXL is op0_8=0xdd & IXL { export IXL; } ixl_iyl: IYL is op0_8=0xfd & IYL { export IYL; } :INC ixh_iyh is ixh_iyh; op0_8=0x24 { local val = ixh_iyh; additionFlags(val, 1); val = val + 1; ixh_iyh = val; setResultFlags(val); } :DEC ixh_iyh is ixh_iyh; op0_8=0x25 { local val = ixh_iyh; subtractionFlagsNoC(val, 1); val = val - 1; ixh_iyh = val ; setResultFlags(val); } :INC ixl_iyl is ixl_iyl; op0_8=0x2c { local val = ixl_iyl; additionFlags(val, 1); val = val + 1; ixl_iyl = val; setResultFlags(val); } :DEC ixl_iyl is ixl_iyl; op0_8=0x2d { local val = ixl_iyl; subtractionFlagsNoC(val, 1); val = val - 1; ixl_iyl = val; setResultFlags(val); } :LD ixh_iyh,imm8 is ixh_iyh; op0_8=0x26; imm8 { ixh_iyh = imm8; } :LD ixl_iyl,imm8 is ixl_iyl; op0_8=0x2e; imm8 { ixl_iyl = imm8; } :LD B,ixh_iyh is ixh_iyh & B; op0_8=0x44 { B = ixh_iyh; } :LD B,ixl_iyl is ixl_iyl & B; op0_8=0x45 { B = ixl_iyl; } :LD C,ixh_iyh is ixh_iyh & C; op0_8=0x4c { C = ixh_iyh; } :LD C,ixl_iyl is ixl_iyl & C; op0_8=0x4d { C = ixl_iyl; } :LD D,ixh_iyh is ixh_iyh & D; op0_8=0x54 { D = ixh_iyh; } :LD D,ixl_iyl is ixl_iyl & D; op0_8=0x55 { D = ixl_iyl; } :LD E,ixh_iyh is ixh_iyh & E; op0_8=0x5c { E = ixh_iyh; } :LD E,ixl_iyl is ixl_iyl & E; op0_8=0x5d { E = ixl_iyl; } :LD ixh_iyh,reg0_3 is ixh_iyh; op6_2=0x1 & bits3_3=0x4 & reg0_3{ ixh_iyh = reg0_3; } :LD ixl_iyl,reg0_3 is ixl_iyl; op6_2=0x1 & bits3_3=0x5 & reg0_3 { ixl_iyl = reg0_3; } :LD ixh_iyh,ixl_iyl is ixh_iyh & ixl_iyl; op0_8=0x65 { ixh_iyh = ixl_iyl; } :LD ixh_iyh,A is ixh_iyh; op0_8=0x67 & A { ixh_iyh = A; } :LD ixl_iyl,ixh_iyh is ixl_iyl & ixh_iyh; op0_8=0x6c { ixl_iyl = ixh_iyh; } :LD ixl_iyl,A is ixl_iyl; op0_8=0x6f & A { ixl_iyl = A; } :LD A,ixh_iyh is ixh_iyh; op0_8=0x7c & A { A = ixh_iyh; } :LD A,ixl_iyl is ixl_iyl; op0_8=0x7d & A { A = ixl_iyl; } :ADD A, ixh_iyh is ixh_iyh; op0_8=0x84 & A { local a_temp = A; local val = ixh_iyh; additionFlags(a_temp, val); a_temp = a_temp + val; setResultFlags(a_temp); A = a_temp; } :ADD A, ixl_iyl is ixl_iyl; op0_8=0x85 & A { local a_temp = A; local val = ixl_iyl; additionFlags(a_temp, val); a_temp = a_temp + val; setResultFlags(a_temp); A = a_temp; } :ADC A, ixh_iyh is ixh_iyh; op0_8=0x8c & A { local a_temp = A; local val = ixh_iyh; additionWithCarry(a_temp, val, a_temp); setResultFlags(a_temp); A = a_temp; } :ADC A, ixl_iyl is ixl_iyl; op0_8=0x8d & A { local a_temp = A; local val = ixl_iyl; additionWithCarry(a_temp, val, a_temp); setResultFlags(a_temp); A = a_temp; } :SUB ixh_iyh is ixh_iyh; op0_8=0x94 { local a_temp = A; local val = ixh_iyh; subtractionFlags(a_temp, val); a_temp = a_temp - val; setResultFlags(a_temp); A = a_temp; } :SUB ixl_iyl is ixl_iyl; op0_8=0x95 { local a_temp = A; local val = ixl_iyl; subtractionFlags(a_temp, val); a_temp = a_temp - val; setResultFlags(a_temp); A = a_temp; } :SBC A, ixh_iyh is ixh_iyh; op0_8=0x9c & A { local a_temp = A; subtractionWithCarry(a_temp, ixh_iyh, a_temp); setResultFlags(A); A = a_temp; } :SBC A, ixl_iyl is ixl_iyl; op0_8=0x9d & A { local a_temp = A; subtractionWithCarry(a_temp, ixl_iyl, a_temp); setResultFlags(a_temp); A = a_temp; } :AND ixh_iyh is ixh_iyh; op0_8=0xa4 { local a_temp = A; $(H_flag) = 1; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp & ixh_iyh; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :AND ixl_iyl is ixl_iyl; op0_8=0xa5 { local a_temp = A; $(H_flag) = 1; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp & ixl_iyl; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :XOR ixh_iyh is ixh_iyh; op0_8=0xac { local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp ^ ixh_iyh; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :XOR ixl_iyl is ixl_iyl; op0_8=0xad { local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp ^ ixl_iyl; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :OR ixh_iyh is ixh_iyh; op0_8=0xb4 { local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp | ixh_iyh; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :OR ixl_iyl is ixl_iyl; op0_8=0xb5 { local a_temp = A; $(H_flag) = 0; $(C_flag) = 0; $(N_flag) = 0; a_temp = a_temp | ixl_iyl; setResultFlags(a_temp); setParity(a_temp); A = a_temp; } :CP ixh_iyh is ixh_iyh; op0_8=0xbc { local a_temp = A; local r_temp = ixh_iyh; cmp:1 = a_temp - r_temp; subtractionFlags(a_temp, r_temp); setResultFlags(cmp); } :CP ixl_iyl is ixl_iyl; op0_8=0xbd { local a_temp = A; local r_temp = ixl_iyl; cmp:1 = a_temp - r_temp; subtractionFlags(a_temp, r_temp); setResultFlags(cmp); } @endif ================================================ FILE: pypcode/processors/Z80/data/languages/z8401x.pspec ================================================ ================================================ FILE: pypcode/processors/Z80/data/manuals/Z180.idx ================================================ @um0050.pdf [Z8018x Family MPU User Manual (UM005003-0703)] ADD, 227 ADC, 227 AND, 228 CP, 228 CPL, 228 DEC, 229 INC, 229 MLT, 229 NEG, 229 OR, 230 SUB, 230 SBC, 231 TST, 231 XOR, 231 RL, 232 RLC, 232 RLD, 233 RRA, 233 RR, 234 RRCA, 233 RRC, 233 RRD, 234 SLA, 234 SRA, 234 SRL, 234 SET, 235 RES, 235 BIT, 236 LDA, 238 LD, 238 CPD, 241 CPDR, 241 CPI, 242 CPIR, 242 LDD, 242 LDDR, 242 LDI, 242 LDIR, 242 PUSH, 243 POP, 243 EX, 244 CALL, 245 DJNZ, 245 JP, 245 JR, 245 RET, 246 RETI, 246 RETN, 246 RST, 246 IN, 247 IN0, 247 IND, 247 INDR, 247 INI, 247 INIR, 248 OUT, 248 OUT0, 248 OTDM, 248 OTDMR, 248 OTDR, 249 OUTI, 249 OTIR, 249 TSTIO, 249 OTIM, 249 OTIMR, 250 OUTD, 250 DAA, 251 CCF, 251 SCF, 251 DI, 251 EI, 251 HALT, 251 IM0, 251 IM1, 251 IM2, 251 NOP, 251 SLP, 251 ================================================ FILE: pypcode/processors/Z80/data/manuals/Z80.idx ================================================ @UM0080.pdf [Z80 FamilyCPU User Manual, Aug 2016 (UM008011-0816)] LD, 85 PUSH, 129 POP, 133 EX, 138 EXX, 140 LDI, 144 LDIR, 146 LDD, 148 LDDR, 150 CPI, 152 CPIR, 153 CPD, 155 CPDR, 156 ADD, 159 ADC, 165 SUB, 167 SBC, 169 AND, 171 OR, 173 XOR, 175 CP, 177 INC, 179 DEC, 184 DAA, 187 CPL, 189 NEG, 190 CCF, 192 SCF, 193 NOP, 194 HALT, 195 DI, 196 EI, 197 IM, 198 RLCA, 219 RLA, 221 RRCA, 223 RRA, 225 RLC, 227 RL, 235 RRC, 238 RR, 241 SLA, 244 SRA, 247 SRL, 250 RLD, 252 RRD, 254 BIT, 257 SET, 265 RES, 273 JP, 276 JR, 279 DJNZ, 292 CALL, 295 RET, 299 RETI, 302 RETN, 304 RST, 306 IN, 309 INI, 312 INIR, 314 IND, 316 INDR, 318 OUT, 320 OUTI, 323 OTIR, 325 OUTD, 327 OTDR, 329 ================================================ FILE: pypcode/processors/Z80/temp/z8401x.pspec ================================================ ================================================ FILE: pypcode/processors/eBPF/data/languages/eBPF.cspec ================================================ ================================================ FILE: pypcode/processors/eBPF/data/languages/eBPF.dwarf ================================================ ================================================ FILE: pypcode/processors/eBPF/data/languages/eBPF.ldefs ================================================ eBPF processor 64-bit big-endian eBPF processor 64-bit little-endian ================================================ FILE: pypcode/processors/eBPF/data/languages/eBPF.opinion ================================================ ================================================ FILE: pypcode/processors/eBPF/data/languages/eBPF.pspec ================================================ ================================================ FILE: pypcode/processors/eBPF/data/languages/eBPF.sinc ================================================ ############################################################################### # eBPF Processor Specification for Ghidra ############################################################################### define endian=$(ENDIAN); #eBPF is a RISC register machine with a total of 11 64-bit registers, a program counter and a 512 byte fixed-size stack. #9 registers are general purpose read-write, one is a read-only stack pointer and the program counter is implicit, #i.e. we can only jump to a certain offset from it. The eBPF registers are always 64-bit wide. define space ram type=ram_space size=8 default; define space register type=register_space size=4; define space syscall type=ram_space size=4; define register offset=0 size=8 [ R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 PC ]; # Instruction encoding: Insop:8, dst_reg:4, src_reg:4, off:16, imm:32 - from lsb to msb @if ENDIAN == "little" define token instr(64) llvm_imm_callx_zero=(36, 63) imm=(32, 63) signed llvm_reg_callx=(32, 35) # special encoding for callx instruction emitted by LLVM off=(16, 31) signed src=(12, 15) dst=(8, 11) op_alu_jmp_opcode=(4, 7) op_alu_jmp_source=(3, 3) op_ld_st_mode=(5, 7) op_ld_st_size=(3, 4) op_insn_class=(0, 2) ; #We'll need this token to operate with LDDW instruction, which has 64 bit imm value define token immtoken(64) imm2=(32, 63) ; @else # ENDIAN == "big" define token instr(64) imm=(0, 31) signed llvm_reg_callx=(0, 3) # special encoding for callx instruction emitted by LLVM llvm_imm_callx_zero=(4, 31) off=(32, 47) signed src=(48, 51) dst=(52, 55) op_insn_class=(56, 58) op_ld_st_size=(59, 60) op_ld_st_mode=(61, 63) op_alu_jmp_source=(59, 59) op_alu_jmp_opcode=(60, 63) ; define token immtoken(64) imm2=(0, 31) ; @endif # ENDIAN = "big" #To operate with registers attach variables [ src dst llvm_reg_callx ] [ R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 _ _ _ _ _ ]; #Arithmetic instructions #BPF_ALU64 ############################################################################### SRC8: src is src & op_alu_jmp_source=1 { export src; } SRC8: imm is imm & op_alu_jmp_source=0 { export *[const]:8 imm; } SRC4: src is src & op_alu_jmp_source=1 { local tmp:4 = src:4; export tmp; } SRC4: imm is imm & op_alu_jmp_source=0 { export *[const]:4 imm; } DST4: dst is dst { local tmp:4 = dst:4; export tmp; } :MOV dst, SRC8 is SRC8 & dst & off=0 & op_alu_jmp_opcode=0xb & op_insn_class=0x7 { dst = SRC8; } :MOVSB dst, src is src & dst & off=8 & op_alu_jmp_opcode=0xb & op_alu_jmp_source=1 & op_insn_class=0x7 { dst = sext(src:1); } :MOVSH dst, src is src & dst & off=16 & op_alu_jmp_opcode=0xb & op_alu_jmp_source=1 & op_insn_class=0x7 { dst = sext(src:2); } :MOVSW dst, src is src & dst & off=32 & op_alu_jmp_opcode=0xb & op_alu_jmp_source=1 & op_insn_class=0x7 { dst = sext(src:4); } :ADD dst, SRC8 is SRC8 & dst & op_alu_jmp_opcode=0x0 & op_insn_class=0x7 { dst = dst + SRC8; } :SUB dst, SRC8 is SRC8 & dst & op_alu_jmp_opcode=0x1 & op_insn_class=0x7 { dst = dst - SRC8; } :MUL dst, SRC8 is SRC8 & dst & op_alu_jmp_opcode=0x2 & op_insn_class=0x7 { dst = dst * SRC8; } :DIV dst, SRC8 is SRC8 & dst & off=0 & op_alu_jmp_opcode=0x3 & op_insn_class=0x7 { dst = dst / SRC8; } :SDIV dst, SRC8 is SRC8 & dst & off=1 & op_alu_jmp_opcode=0x3 & op_insn_class=0x7 { dst = dst s/ SRC8; } :OR dst, SRC8 is SRC8 & dst & op_alu_jmp_opcode=0x4 & op_insn_class=0x7 { dst = dst | SRC8; } :AND dst, SRC8 is SRC8 & dst & op_alu_jmp_opcode=0x5 & op_insn_class=0x7 { dst = dst & SRC8; } :LSH dst, SRC8 is SRC8 & dst & op_alu_jmp_opcode=0x6 & op_insn_class=0x7 { dst = dst << SRC8; } :RSH dst, SRC8 is SRC8 & dst & op_alu_jmp_opcode=0x7 & op_insn_class=0x7 { dst = dst >> SRC8; } :NEG dst is dst & op_alu_jmp_opcode=0x8 & op_alu_jmp_source=0 & op_insn_class=0x7 { dst = -dst; } :MOD dst, SRC8 is SRC8 & dst & off=0 & op_alu_jmp_opcode=0x9 & op_insn_class=0x7 { dst = dst % SRC8; } :SMOD dst, SRC8 is SRC8 & dst & off=1 & op_alu_jmp_opcode=0x9 & op_insn_class=0x7 { dst = dst s% SRC8; } :XOR dst, SRC8 is SRC8 & dst & op_alu_jmp_opcode=0xa & op_insn_class=0x7 { dst = dst ^ SRC8; } :ARSH dst, SRC8 is SRC8 & dst & op_alu_jmp_opcode=0xc & op_insn_class=0x7 { dst = dst s>> SRC8; } #BPF_ALU ############################################################################### :MOV dst, SRC4 is SRC4 & dst & off=0 & op_alu_jmp_opcode=0xb & op_insn_class=0x4 { dst = zext(SRC4); } :MOVSB dst, src is src & dst & off=8 & op_alu_jmp_opcode=0xb & op_alu_jmp_source=1 & op_insn_class=0x4 { local tmp:4 = sext(src:1); dst = zext(tmp); } :MOVSH dst, src is src & dst & off=16 & op_alu_jmp_opcode=0xb & op_alu_jmp_source=1 & op_insn_class=0x4 { local tmp:4 = sext(src:2); dst = zext(tmp); } :ADD dst, SRC4 is SRC4 & dst & op_alu_jmp_opcode=0x0 & op_insn_class=0x4 { dst = zext(dst:4 + SRC4); } :SUB dst, SRC4 is SRC4 & dst & op_alu_jmp_opcode=0x1 & op_insn_class=0x4 { dst = zext(dst:4 - SRC4); } :MUL dst, SRC4 is SRC4 & dst & op_alu_jmp_opcode=0x2 & op_insn_class=0x4 { dst = zext(dst:4 * SRC4); } :DIV dst, SRC4 is SRC4 & dst & off=0 & op_alu_jmp_opcode=0x3 & op_insn_class=0x4 { dst = zext(dst:4 / SRC4); } :SDIV dst, SRC4 is SRC4 & dst & off=1 & op_alu_jmp_opcode=0x3 & op_insn_class=0x4 { dst = zext(dst:4 s/ SRC4); } :OR dst, SRC4 is SRC4 & dst & op_alu_jmp_opcode=0x4 & op_insn_class=0x4 { dst = zext(dst:4 | SRC4); } :AND dst, SRC4 is SRC4 & dst & op_alu_jmp_opcode=0x5 & op_insn_class=0x4 { dst = zext(dst:4 & SRC4); } :LSH dst, SRC4 is SRC4 & dst & op_alu_jmp_opcode=0x6 & op_insn_class=0x4 { dst = zext(dst:4 << SRC4); } :RSH dst, SRC4 is SRC4 & dst & op_alu_jmp_opcode=0x7 & op_insn_class=0x4 { dst = zext(dst:4 >> SRC4); } :NEG dst is dst & op_alu_jmp_opcode=0x8 & op_alu_jmp_source=0 & op_insn_class=0x4 { dst = zext(-dst:4); } :MOD dst, SRC4 is SRC4 & dst & off=0 & op_alu_jmp_opcode=0x9 & op_insn_class=0x4 { dst = zext(dst:4 % SRC4); } :SMOD dst, SRC4 is SRC4 & dst & off=1 & op_alu_jmp_opcode=0x9 & op_insn_class=0x4 { dst = zext(dst:4 s% SRC4); } :XOR dst, SRC4 is SRC4 & dst & op_alu_jmp_opcode=0xa & op_insn_class=0x4 { dst = zext(dst:4 ^ SRC4); } :ARSH dst, SRC4 is SRC4 & dst & op_alu_jmp_opcode=0xc & op_insn_class=0x4 { dst = zext(dst:4 s>> SRC4); } #Bytewasp instructions ############################################################################### @if ENDIAN == "little" # BPF_ALU | BPF_K | BPF_END :LE16 dst is imm=0x10 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=0 & op_insn_class=0x4 { dst = zext(dst:2); } :LE32 dst is imm=0x20 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=0 & op_insn_class=0x4 { dst = zext(dst:4); } :LE64 dst is imm=0x40 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=0 & op_insn_class=0x4 {} # BPF_ALU | BPF_X | BPF_END :BE16 dst is imm=0x10 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=1 & op_insn_class=0x4 { dst = ((dst & 0xff00) >> 8) | ((dst & 0x00ff) << 8); } :BE32 dst is imm=0x20 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=1 & op_insn_class=0x4 { dst = ((dst & 0xff000000) >> 24) | (((dst) & 0x00ff0000) >> 8) | (((dst) & 0x0000ff00) << 8) | ((dst & 0x000000ff) << 24); } :BE64 dst is imm=0x40 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=1 & op_insn_class=0x4 { dst = ((dst << 56) & 0xff00000000000000) | ((dst << 40) & 0x00ff000000000000) | ((dst << 24) & 0x0000ff0000000000) | ((dst << 8) & 0x000000ff00000000) | ((dst >> 8) & 0x00000000ff000000) | ((dst >> 24) & 0x0000000000ff0000) | ((dst >> 40) & 0x000000000000ff00) | ((dst >> 56) & 0x00000000000000ff); } @else # ENDIAN == "big" # BPF_ALU | BPF_K | BPF_END :LE16 dst is imm=0x10 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=0 & op_insn_class=0x4 { dst = ((dst & 0xff00) >> 8) | ((dst & 0x00ff) << 8); } :LE32 dst is imm=0x20 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=0 & op_insn_class=0x4 { dst = ((dst & 0xff000000) >> 24) | (((dst) & 0x00ff0000) >> 8) | (((dst) & 0x0000ff00) << 8) | ((dst & 0x000000ff) << 24); } :LE64 dst is imm=0x40 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=0 & op_insn_class=0x4 { dst = ((dst << 56) & 0xff00000000000000) | ((dst << 40) & 0x00ff000000000000) | ((dst << 24) & 0x0000ff0000000000) | ((dst << 8) & 0x000000ff00000000) | ((dst >> 8) & 0x00000000ff000000) | ((dst >> 24) & 0x0000000000ff0000) | ((dst >> 40) & 0x000000000000ff00) | ((dst >> 56) & 0x00000000000000ff); } # BPF_ALU | BPF_X | BPF_END :BE16 dst is imm=0x10 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=1 & op_insn_class=0x4 { dst = zext(dst:2); } :BE32 dst is imm=0x20 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=1 & op_insn_class=0x4 { dst = zext(dst:4); } :BE64 dst is imm=0x40 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=1 & op_insn_class=0x4 {} @endif # ENDIAN = "big" # BPF_ALU64 | BPF_K | BPF_END :BSWAP16 dst is imm=0x10 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=0 & op_insn_class=0x7 { dst = ((dst & 0xff00) >> 8) | ((dst & 0x00ff) << 8); } :BSWAP32 dst is imm=0x20 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=0 & op_insn_class=0x7 { dst = ((dst & 0xff000000) >> 24) | (((dst) & 0x00ff0000) >> 8) | (((dst) & 0x0000ff00) << 8) | ((dst & 0x000000ff) << 24); } :BSWAP64 dst is imm=0x40 & dst & op_alu_jmp_opcode=0xd & op_alu_jmp_source=0 & op_insn_class=0x7 { dst = ((dst << 56) & 0xff00000000000000) | ((dst << 40) & 0x00ff000000000000) | ((dst << 24) & 0x0000ff0000000000) | ((dst << 8) & 0x000000ff00000000) | ((dst >> 8) & 0x00000000ff000000) | ((dst >> 24) & 0x0000000000ff0000) | ((dst >> 40) & 0x000000000000ff00) | ((dst >> 56) & 0x00000000000000ff); } #Memory instructions - Load and Store ############################################################################### #LDDW is the only 16-byte eBPF instruction which consists of two consecutive 8-byte blocks ('struct bpf_insn') #and interpreted as single instruction which loads 64-bit imm value into dst. Encoding of LDDW: #LSR MSR # opcode src dst offset Low 8-byte imm zero-block High 8-byte imm #bits 8 4 4 16 32 32 32 # So, imm64 consists of concatination of high 8-byte imm and low 8-byte imm. :LDDW dst, concat is imm & dst & op_ld_st_mode=0x0 & op_ld_st_size=0x3 & op_insn_class=0x0; imm2 [ concat= (imm2 << 32) | ((imm) & 0xFFFFFFFF); ] { dst = concat; } #BPF_LD_MAP_FD(DST, MAP_FD) -> second LDDW = pseudo LDDW insn used to refer to process-local map_fd #For each instruction which needs relocation, it inject corresponding file descriptor to imm field. #As a part of protocol, src_reg is set to BPF_PSEUDO_MAP_FD (which defined as 1) to notify kernel this is a map loading instruction. :LDDW dst, imm is imm & src=1 & dst & op_ld_st_mode=0x0 & op_ld_st_size=0x3 & op_insn_class=0x0; imm2 { dst = *:8 imm:8; } :LDABSW dst, imm is imm & dst & op_ld_st_mode=0x1 & op_ld_st_size=0x0 & op_insn_class=0x0 { dst = zext(*:4 imm:8); } :LDABSH dst, imm is imm & dst & op_ld_st_mode=0x1 & op_ld_st_size=0x1 & op_insn_class=0x0 { dst = zext(*:2 imm:8); } :LDABSB dst, imm is imm & dst & op_ld_st_mode=0x1 & op_ld_st_size=0x2 & op_insn_class=0x0 { dst = zext(*:1 imm:8); } :LDABSDW dst, imm is imm & dst & op_ld_st_mode=0x1 & op_ld_st_size=0x3 & op_insn_class=0x0 { dst = *:8 imm:8; } :LDINDW src, dst, imm is imm & src & dst & op_ld_st_mode=0x2 & op_ld_st_size=0x0 & op_insn_class=0x0 { dst = zext(*:4 (src + imm)); } :LDINDH src, dst, imm is imm & src & dst & op_ld_st_mode=0x2 & op_ld_st_size=0x1 & op_insn_class=0x0 { dst = zext(*:2 (src + imm)); } :LDINDB src, dst, imm is imm & src & dst & op_ld_st_mode=0x2 & op_ld_st_size=0x2 & op_insn_class=0x0 { dst = zext(*:1 (src + imm)); } :LDINDDW src, dst, imm is imm & src & dst & op_ld_st_mode=0x2 & op_ld_st_size=0x3 & op_insn_class=0x0 { dst = *:8 (src + imm); } :LDXW dst, [src + off] is off & src & dst & op_ld_st_mode=0x3 & op_ld_st_size=0x0 & op_insn_class=0x1 { dst = zext(*:4 (src + off)); } :LDXH dst, [src + off] is off & src & dst & op_ld_st_mode=0x3 & op_ld_st_size=0x1 & op_insn_class=0x1 { dst = zext(*:2 (src + off)); } :LDXB dst, [src + off] is off & src & dst & op_ld_st_mode=0x3 & op_ld_st_size=0x2 & op_insn_class=0x1 { dst = zext(*:1 (src + off)); } :LDXDW dst, [src + off] is off & src & dst & op_ld_st_mode=0x3 & op_ld_st_size=0x3 & op_insn_class=0x1 { dst = *:8 (src + off); } :STW [dst + off], imm is imm & off & dst & op_ld_st_mode=0x3 & op_ld_st_size=0x0 & op_insn_class=0x2 { *:4 (dst + off)=imm:4; } :STH [dst + off], imm is imm & off & dst & op_ld_st_mode=0x3 & op_ld_st_size=0x1 & op_insn_class=0x2 { *:2 (dst + off)=imm:2; } :STB [dst + off], imm is imm & off & dst & op_ld_st_mode=0x3 & op_ld_st_size=0x2 & op_insn_class=0x2 { *:1 (dst + off)=imm:1; } :STDW [dst + off], imm is imm & off & dst & op_ld_st_mode=0x3 & op_ld_st_size=0x3 & op_insn_class=0x2 { *:8 (dst + off)=imm:8; } :STXW [dst + off], src is off & src & dst & op_ld_st_mode=0x3 & op_ld_st_size=0x0 & op_insn_class=0x3 { *:4 (dst + off)=src:4; } :STXH [dst + off], src is off & src & dst & op_ld_st_mode=0x3 & op_ld_st_size=0x1 & op_insn_class=0x3 { *:2 (dst + off)=src:2; } :STXB [dst + off], src is off & src & dst & op_ld_st_mode=0x3 & op_ld_st_size=0x2 & op_insn_class=0x3 { *:1 (dst + off)=src:1; } :STXDW [dst + off], src is off & src & dst & op_ld_st_mode=0x3 & op_ld_st_size=0x3 & op_insn_class=0x3 { *:8 (dst + off)=src:8; } :LDSXW dst, [src + off] is off & src & dst & op_ld_st_mode=0x4 & op_ld_st_size=0x0 & op_insn_class=0x1 { dst = sext(*:4 (src + off)); } :LDSXH dst, [src + off] is off & src & dst & op_ld_st_mode=0x4 & op_ld_st_size=0x1 & op_insn_class=0x1 { dst = sext(*:2 (src + off)); } :LDSXB dst, [src + off] is off & src & dst & op_ld_st_mode=0x4 & op_ld_st_size=0x2 & op_insn_class=0x1 { dst = sext(*:1 (src + off)); } # BPF_ATOMIC # BPF_ADD: # BPF_STX | BPF_ATOMIC | BPF_W :STXXADDW [dst + off], src is imm=0x0 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x0 & op_insn_class=0x3 { *:4 (dst + off) = *:4 (dst + off) + src:4; } # BPF_STX | BPF_ATOMIC | BPF_DW :STXXADDDW [dst + off], src is imm=0x0 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x3 & op_insn_class=0x3 { *:8 (dst + off) = *:8 (dst + off) + src; } # BPF_OR: :STXXADDW [dst + off], src is imm=0x40 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x0 & op_insn_class=0x3 { *:4 (dst + off) = *:4 (dst + off) | src:4; } :STXXADDDW [dst + off], src is imm=0x40 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x3 & op_insn_class=0x3 { *:8 (dst + off) = *:8 (dst + off) | src; } # BPF_AND: :STXXADDW [dst + off], src is imm=0x50 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x0 & op_insn_class=0x3 { *:4 (dst + off) = *:4 (dst + off) & src:4; } :STXXADDDW [dst + off], src is imm=0x50 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x3 & op_insn_class=0x3 { *:8 (dst + off) = *:8 (dst + off) & src; } # BPF_XOR: :STXXADDW [dst + off], src is imm=0xa0 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x0 & op_insn_class=0x3 { *:4 (dst + off) = *:4 (dst + off) ^ src:4; } :STXXADDDW [dst + off], src is imm=0xa0 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x3 & op_insn_class=0x3 { *:8 (dst + off) = *:8 (dst + off) ^ src; } # BPF_ADD | BPF_FETCH -> src = atomic_fetch_add(dst + off, src): :STXXADDW [dst + off], src is imm=0x1 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x0 & op_insn_class=0x3 { local tmp:4 = *:4 (dst + off); *:4 (dst + off) = *:4 (dst + off) + src:4; src = zext(tmp); } :STXXADDDW [dst + off], src is imm=0x1 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x3 & op_insn_class=0x3 { local tmp:8 = *:8 (dst + off); *:8 (dst + off) = *:8 (dst + off) + src; src = tmp; } # BPF_OR | BPF_FETCH -> src = atomic_fetch_or(dst + off, src): :STXXADDW [dst + off], src is imm=0x41 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x0 & op_insn_class=0x3 { local tmp:4 = *:4 (dst + off); *:4 (dst + off) = *:4 (dst + off) | src:4; src = zext(tmp); } :STXXADDDW [dst + off], src is imm=0x41 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x3 & op_insn_class=0x3 { local tmp:8 = *:8 (dst + off); *:8 (dst + off) = *:8 (dst + off) | src; src = tmp; } # BPF_AND | BPF_FETCH -> src = atomic_fetch_and(dst + off, src): :STXXADDW [dst + off], src is imm=0x51 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x0 & op_insn_class=0x3 { local tmp:4 = *:4 (dst + off); *:4 (dst + off) = *:4 (dst + off) & src:4; src = zext(tmp); } :STXXADDDW [dst + off], src is imm=0x51 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x3 & op_insn_class=0x3 { local tmp:8 = *:8 (dst + off); *:8 (dst + off) = *:8 (dst + off) & src; src = tmp; } # BPF_XOR | BPF_FETCH -> src = atomic_fetch_xor(dst + off, src): :STXXADDW [dst + off], src is imm=0xa1 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x0 & op_insn_class=0x3 { local tmp:4 = *:4 (dst + off); *:4 (dst + off) = *:4 (dst + off) ^ src:4; src = zext(tmp); } :STXXADDDW [dst + off], src is imm=0xa1 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x3 & op_insn_class=0x3 { local tmp:8 = *:8 (dst + off); *:8 (dst + off) = *:8 (dst + off) ^ src; src = tmp; } # BPF_XCHG -> src_reg = atomic_xchg(dst + off, src): :STXXADDW [dst + off], src is imm=0xe1 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x0 & op_insn_class=0x3 { local tmp:4 = *:4 (dst + off); *:4 (dst + off) = src:4; src = zext(tmp); } :STXXADDDW [dst + off], src is imm=0xe1 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x3 & op_insn_class=0x3 { local tmp:8 = *:8 (dst + off); *:8 (dst + off) = src; src = tmp; } # BPF_CMPXCHG -> R0 = atomic_cmpxchg(dst + off, R0, src): :STXXADDW [dst + off], src is imm=0xf1 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x0 & op_insn_class=0x3 { local tmp:4 = *:4 (dst + off); if (R0:4 == tmp) goto ; R0 = zext(tmp); *:4 (dst + off) = src:4; } :STXXADDDW [dst + off], src is imm=0xf1 & off & src & dst & op_ld_st_mode=0x6 & op_ld_st_size=0x3 & op_insn_class=0x3 { local tmp:8 = *:8 (dst + off); if (R0 == tmp) goto ; R0 = tmp; *:8 (dst + off) = src; } #Jump instructions (BPF_JMP, BPF_JMP32) ############################################################################### joff: reloc is off [ reloc = inst_next + off * 8; ] { export *:8 reloc; } jimm: reloc is imm [ reloc = inst_next + imm * 8; ] { export *:8 reloc; } cond: "EQ" is op_alu_jmp_opcode=0x1 & op_insn_class=0x5 & dst & SRC8 { local cmp = dst == SRC8; export cmp; } cond: "EQ" is op_alu_jmp_opcode=0x1 & op_insn_class=0x6 & DST4 & SRC4 { local cmp = DST4 == SRC4; export cmp; } cond: "GT" is op_alu_jmp_opcode=0x2 & op_insn_class=0x5 & dst & SRC8 { local cmp = dst > SRC8; export cmp; } cond: "GT" is op_alu_jmp_opcode=0x2 & op_insn_class=0x6 & DST4 & SRC4 { local cmp = DST4 > SRC4; export cmp; } cond: "GE" is op_alu_jmp_opcode=0x3 & op_insn_class=0x5 & dst & SRC8 { local cmp = dst >= SRC8; export cmp; } cond: "GE" is op_alu_jmp_opcode=0x3 & op_insn_class=0x6 & DST4 & SRC4 { local cmp = DST4 >= SRC4; export cmp; } cond: "LT" is op_alu_jmp_opcode=0xa & op_insn_class=0x5 & dst & SRC8 { local cmp = dst < SRC8; export cmp; } cond: "LT" is op_alu_jmp_opcode=0xa & op_insn_class=0x6 & DST4 & SRC4 { local cmp = DST4 < SRC4; export cmp; } cond: "LE" is op_alu_jmp_opcode=0xb & op_insn_class=0x5 & dst & SRC8 { local cmp = dst <= SRC8; export cmp; } cond: "LE" is op_alu_jmp_opcode=0xb & op_insn_class=0x6 & DST4 & SRC4 { local cmp = DST4 <= SRC4; export cmp; } cond: "NE" is op_alu_jmp_opcode=0x5 & op_insn_class=0x5 & dst & SRC8 { local cmp = dst != SRC8; export cmp; } cond: "NE" is op_alu_jmp_opcode=0x5 & op_insn_class=0x6 & DST4 & SRC4 { local cmp = DST4 != SRC4; export cmp; } cond: "SET" is op_alu_jmp_opcode=0x4 & op_insn_class=0x5 & dst & SRC8 { local cmp = (dst & SRC8) != 0; export cmp; } cond: "SET" is op_alu_jmp_opcode=0x4 & op_insn_class=0x6 & DST4 & SRC4 { local cmp = (DST4 & SRC4) != 0; export cmp; } cond: "SGT" is op_alu_jmp_opcode=0x6 & op_insn_class=0x5 & dst & SRC8 { local cmp = dst s> SRC8; export cmp; } cond: "SGT" is op_alu_jmp_opcode=0x6 & op_insn_class=0x6 & DST4 & SRC4 { local cmp = DST4 s> SRC4; export cmp; } cond: "SGE" is op_alu_jmp_opcode=0x7 & op_insn_class=0x5 & dst & SRC8 { local cmp = dst s>= SRC8; export cmp; } cond: "SGE" is op_alu_jmp_opcode=0x7 & op_insn_class=0x6 & DST4 & SRC4 { local cmp = DST4 s>= SRC4; export cmp; } cond: "SLT" is op_alu_jmp_opcode=0xc & op_insn_class=0x5 & dst & SRC8 { local cmp = dst s< SRC8; export cmp; } cond: "SLT" is op_alu_jmp_opcode=0xc & op_insn_class=0x6 & DST4 & SRC4 { local cmp = DST4 s< SRC4; export cmp; } cond: "SLE" is op_alu_jmp_opcode=0xd & op_insn_class=0x5 & dst & SRC8 { local cmp = dst s<= SRC8; export cmp; } cond: "SLE" is op_alu_jmp_opcode=0xd & op_insn_class=0x6 & DST4 & SRC4 { local cmp = DST4 s<= SRC4; export cmp; } :JA joff is joff & op_alu_jmp_opcode=0x0 & op_alu_jmp_source=0 & op_insn_class=0x5 { goto joff; } :JA jimm is jimm & op_alu_jmp_opcode=0x0 & op_alu_jmp_source=0 & op_insn_class=0x6 { goto jimm; } :J^cond dst, SRC8, joff is joff & SRC8 & dst & cond { if (cond) goto joff; } SysCall: imm is imm { export *[syscall]:1 imm; } :CALL SysCall is imm & src=0 & op_alu_jmp_opcode=0x8 & op_alu_jmp_source=0 & op_insn_class=0x5 & SysCall { call SysCall; } disp32: reloc is imm [ reloc = inst_next + imm * 8; ] { export *:4 reloc; } :CALL disp32 is imm & src=1 & op_alu_jmp_opcode=0x8 & op_alu_jmp_source=0 & op_insn_class=0x5 & disp32 { call disp32; } # GCC encoding and LLVM 19.1+ encoding :CALLX dst is op_alu_jmp_opcode=0x8 & op_alu_jmp_source=1 & op_insn_class=0x5 & src=0 & imm=0 & dst { call [dst]; } # LLVM encoding used until LLVM 19.1 # Introduced in https://github.com/llvm/llvm-project/commit/9a67245d881f4cf89fd8f897ae2cd0bccec49496 # Modified in https://github.com/llvm/llvm-project/commit/c43ad6c0fddac0bbed5e881801dd2bc2f9eeba2d :CALLX llvm_reg_callx is op_alu_jmp_opcode=0x8 & op_alu_jmp_source=1 & op_insn_class=0x5 & dst=0 & src=0 & llvm_imm_callx_zero=0 & llvm_reg_callx { call [llvm_reg_callx]; } # Both CALLX encodings are matched when both dst and imm are zero :CALLX R0 is op_alu_jmp_opcode=0x8 & op_alu_jmp_source=1 & op_insn_class=0x5 & dst=0 & src=0 & imm=0 & R0 { call [R0]; } :EXIT is op_alu_jmp_opcode=0x9 & op_alu_jmp_source=0 & op_insn_class=0x5 { return [*:8 R10]; } ================================================ FILE: pypcode/processors/eBPF/data/languages/eBPF_be.slaspec ================================================ @define ENDIAN "big" @include "eBPF.sinc" ================================================ FILE: pypcode/processors/eBPF/data/languages/eBPF_le.slaspec ================================================ @define ENDIAN "little" @include "eBPF.sinc" ================================================ FILE: pypcode/processors/tricore/data/languages/tc172x.pspec ================================================ ================================================ FILE: pypcode/processors/tricore/data/languages/tc176x.pspec ================================================ ================================================ FILE: pypcode/processors/tricore/data/languages/tc29x.pspec ================================================ ================================================ FILE: pypcode/processors/tricore/data/languages/tricore.cspec ================================================ ================================================ FILE: pypcode/processors/tricore/data/languages/tricore.dwarf ================================================ ================================================ FILE: pypcode/processors/tricore/data/languages/tricore.ldefs ================================================ Siemens Tricore Embedded Processor Siemens Tricore Embedded Processor TC29x Siemens Tricore Embedded Processor TC1724/TC1728 Siemens Tricore Embedded Processor TC1762/TC1766 ================================================ FILE: pypcode/processors/tricore/data/languages/tricore.opinion ================================================ ================================================ FILE: pypcode/processors/tricore/data/languages/tricore.pcp.sinc ================================================ # R0 - Accumulator # R1 - # R2 - Return Address # R3 - # R4 - SRC # R5 - DST # R6 - CPPN/SRPN/TOS/CNT1 # R7 - DPTR/Flags define register offset=0xf0043F00 size=4 [ R0 R1 R2 R3 R4 R5 R6 R7 ]; @define CPPN "R6[24,8]" @define SRPN "R6[16,8]" @define TOS "R6[14,2]" @define CNT1 "R6[0,12]" @define DPTR "R7[8,8]" @define CEN "R7[6,1]" @define IEN "R7[5,1]" @define CNZ "R7[4,1]" @define V "R7[3,1]" @define C "R7[2,1]" @define N "R7[1,1]" @define Z "R7[0,1]" define token pcpinstr (16) pcp_op0000=(0,0) pcp_op0001=(0,1) pcp_op0002=(0,2) ccAB=(0,3) ccA=(0,2) pcp_op0004=(0,4) pcp_op0005=(0,5) pcp_op0009=(0,9) pcp_op0010=(0,10) pcp_op0101=(1,1) pcp_op0202=(2,2) pcp_op0203=(2,3) pcp_op0204=(2,4) pcp_op0303=(3,3) R0305=(3,5) pcp_op0404=(4,4) pcp_op0405=(4,5) pcp_op0406=(4,6) pcp_op0505=(5,5) pcp_op0506=(5,6) R0608=(6,8) ccB=(6,9) pcp_op0707=(7,7) pcp_op0708=(7,8) pcp_op0808=(8,8) pcp_op0909=(9,9) pcp_op0910=(9,10) pcp_op0912=(9,12) pcp_op1010=(10,10) pcp_op1012=(10,12) pcp_op1111=(11,11) pcp_op1212=(12,12) addrmode=(13,15) ; define token pcpinstr2 (16) pcp_op1631=(0,15) ; attach variables [ R0305 R0608 ] [ R0 R1 R2 R3 R4 R5 R6 R7 ]; CONDCA: "cc_UC" is PCPMode=1 & ccA=0x0 { local tmp:1 = 1; export tmp; } CONDCA: "cc_Z" is PCPMode=1 & ccA=0x1 { local tmp:1 = ($(Z)==1); export tmp; } CONDCA: "cc_NZ" is PCPMode=1 & ccA=0x2 { local tmp:1 = ($(Z)==0); export tmp; } CONDCA: "cc_V" is PCPMode=1 & ccA=0x3 { local tmp:1 = ($(V)==1); export tmp; } CONDCA: "cc_ULT" is PCPMode=1 & ccA=0x4 { local tmp:1 = ($(C)==1); export tmp; } CONDCA: "cc_UGT" is PCPMode=1 & ccA=0x5 { local tmp:1 = (($(C)|$(Z))==0); export tmp; } CONDCA: "cc_SLT" is PCPMode=1 & ccA=0x6 { local tmp:1 = (($(N)^$(V))==1); export tmp; } CONDCA: "cc_SGT" is PCPMode=1 & ccA=0x7 { local tmp:1 = ((($(N)^$(V))|$(Z))==0); export tmp; } CONDCB: "cc_UC" is PCPMode=1 & ccB=0x0 { local tmp:1 = 1; export tmp; } CONDCB: "cc_Z" is PCPMode=1 & ccB=0x1 { local tmp:1 = ($(Z)==1); export tmp; } CONDCB: "cc_NZ" is PCPMode=1 & ccB=0x2 { local tmp:1 = ($(Z)==0); export tmp; } CONDCB: "cc_V" is PCPMode=1 & ccB=0x3 { local tmp:1 = ($(V)==1); export tmp; } CONDCB: "cc_ULT" is PCPMode=1 & ccB=0x4 { local tmp:1 = ($(C)==1); export tmp; } CONDCB: "cc_UGT" is PCPMode=1 & ccB=0x5 { local tmp:1 = (($(C)|$(Z))==0); export tmp; } CONDCB: "cc_SLT" is PCPMode=1 & ccB=0x6 { local tmp:1 = (($(N)^$(V))==1); export tmp; } CONDCB: "cc_SGT" is PCPMode=1 & ccB=0x7 { local tmp:1 = ((($(N)^$(V))|$(Z))==0); export tmp; } CONDCB: "cc_N" is PCPMode=1 & ccB=0x8 { local tmp:1 = ($(N)==1); export tmp; } CONDCB: "cc_NN" is PCPMode=1 & ccB=0x9 { local tmp:1 = ($(N)==0); export tmp; } CONDCB: "cc_NV" is PCPMode=1 & ccB=0xA { local tmp:1 = ($(V)==0); export tmp; } CONDCB: "cc_UGE" is PCPMode=1 & ccB=0xB { local tmp:1 = ($(C)==0); export tmp; } CONDCB: "cc_SGE" is PCPMode=1 & ccB=0xC { local tmp:1 = (($(N)^$(V))==0); export tmp; } CONDCB: "cc_SLE" is PCPMode=1 & ccB=0xD { local tmp:1 = ((($(N)^$(V))|$(Z))==1); export tmp; } CONDCB: "cc_CNZ" is PCPMode=1 & ccB=0xE { local tmp:1 = ($(CNZ)==1); export tmp; } CONDCB: "cc_CNN" is PCPMode=1 & ccB=0xF { local tmp:1 = ($(CNZ)==0); export tmp; } #TODO CONDCAB: "cc_UC" is PCPMode=1 & ccAB=0x0 { local tmp:1 = 1; export tmp; } CONDCAB: "cc_Z" is PCPMode=1 & ccAB=0x1 { local tmp:1 = ($(Z)==1); export tmp; } CONDCAB: "cc_NZ" is PCPMode=1 & ccAB=0x2 { local tmp:1 = ($(Z)==0); export tmp; } CONDCAB: "cc_V" is PCPMode=1 & ccAB=0x3 { local tmp:1 = ($(V)==1); export tmp; } CONDCAB: "cc_ULT" is PCPMode=1 & ccAB=0x4 { local tmp:1 = ($(C)==1); export tmp; } CONDCAB: "cc_UGT" is PCPMode=1 & ccAB=0x5 { local tmp:1 = (($(C)|$(Z))==0); export tmp; } CONDCAB: "cc_SLT" is PCPMode=1 & ccAB=0x6 { local tmp:1 = (($(N)^$(V))==1); export tmp; } CONDCAB: "cc_SGT" is PCPMode=1 & ccAB=0x7 { local tmp:1 = ((($(N)^$(V))|$(Z))==0); export tmp; } CONDCAB: "cc_N" is PCPMode=1 & ccAB=0x8 { local tmp:1 = ($(N)==1); export tmp; } CONDCAB: "cc_NN" is PCPMode=1 & ccAB=0x9 { local tmp:1 = ($(N)==0); export tmp; } CONDCAB: "cc_NV" is PCPMode=1 & ccAB=0xA { local tmp:1 = ($(V)==0); export tmp; } CONDCAB: "cc_UGE" is PCPMode=1 & ccAB=0xB { local tmp:1 = ($(C)==0); export tmp; } CONDCAB: "cc_SGE" is PCPMode=1 & ccAB=0xC { local tmp:1 = (($(N)^$(V))==0); export tmp; } CONDCAB: "cc_SLE" is PCPMode=1 & ccAB=0xD { local tmp:1 = ((($(N)^$(V))|$(Z))==1); export tmp; } CONDCAB: "cc_CNZ" is PCPMode=1 & ccAB=0xE { local tmp:1 = ($(CNZ)==1); export tmp; } CONDCAB: "cc_CNN" is PCPMode=1 & ccAB=0xF { local tmp:1 = ($(CNZ)==0); export tmp; } imm5: "#"^pcp_op0004 is pcp_op0004 { local tmp:4 = pcp_op0004; export tmp; } imm6: "#"^pcp_op0005 is pcp_op0005 { local tmp:4 = pcp_op0005; export tmp; } #imm10: "#"^pcp_op0009 is pcp_op0009 { local tmp:4 = pcp_op0009; export tmp; } imm16: "#"^pcp_op1631 is pcp_op1631 { local tmp:4 = pcp_op1631; export tmp; } offset6: "[#"^pcp_op0005^"]" is pcp_op0005 { local tmp:4 = (zext($(DPTR)) << 6) + pcp_op0005; export *[ram]:4 tmp; } offset6W: R0608, "[#"^pcp_op0005^"]" is pcp_op0005 & R0608 { local tmp:4 = (zext($(DPTR)) << 6) + pcp_op0005; *[ram]:4 tmp = R0608; } offset6RW: R0608, "[#"^pcp_op0005^"]" is pcp_op0005 & R0608 { local tmp:4 = R0608; local ea:4 = (zext($(DPTR)) << 6) + pcp_op0005; R0608 = *[ram]:4 ea; *[ram]:4 ea = tmp; } SRC: R4 is PCPMode=1 & pcp_op0708=0 & R4 { local tmp:4 = 0; export tmp; } SRC: R4+ is PCPMode=1 & pcp_op0708=1 & R4 { local tmp:4 = 1; export tmp; } SRC: R4- is PCPMode=1 & pcp_op0708=2 & R4 { local tmp:4 = -1; export tmp; } DST: R5 is PCPMode=1 & pcp_op0910=0 & R5 { local tmp:4 = 0; export tmp; } DST: R5+ is PCPMode=1 & pcp_op0910=1 & R5 { local tmp:4 = 1; export tmp; } DST: R5- is PCPMode=1 & pcp_op0910=2 & R5 { local tmp:4 = -1; export tmp; } SIZE0: "8" is PCPMode=1 & pcp_op0001=0 { local tmp:4 = zext(*[ram]:1 R4); export tmp;} SIZE0: "16" is PCPMode=1 & pcp_op0001=1 { local tmp:4 = zext(*[ram]:2 R4); export tmp;} SIZE0: "32" is PCPMode=1 & pcp_op0001=2 { local tmp:4 = *[ram]:4 R4; export tmp;} SIZE1: [R0305], "8" is PCPMode=1 & pcp_op0001=0 & R0305 { local tmp:4 = zext(*[ram]:1 R0305); export tmp;} SIZE1: [R0305], "16" is PCPMode=1 & pcp_op0001=1 & R0305 { local tmp:4 = zext(*[ram]:2 R0305); export tmp;} SIZE1: [R0305], "32" is PCPMode=1 & pcp_op0001=2 & R0305 { local tmp:4 = *[ram]:4 R0305; export tmp;} SIZE1W: R0608, [R0305], "8" is PCPMode=1 & pcp_op0001=0 & R0305 & R0608 { *[ram]:1 R0305 = R0608[0,8]; } SIZE1W: R0608, [R0305], "16" is PCPMode=1 & pcp_op0001=1 & R0305 & R0608 { *[ram]:2 R0305 = R0608[0,16]; } SIZE1W: R0608, [R0305], "32" is PCPMode=1 & pcp_op0001=2 & R0305 & R0608 { *[ram]:4 R0305 = R0608; } SIZE1RW: R0608, [R0305], "8" is PCPMode=1 & pcp_op0001=0 & R0305 & R0608 { local tmp:1 = R0608[0,8]; R0608 = zext(*[ram]:1 R0305); *[ram]:1 R0305 = tmp; } SIZE1RW: R0608, [R0305], "16" is PCPMode=1 & pcp_op0001=1 & R0305 & R0608 { local tmp:2 = R0608[0,16]; R0608 = zext(*[ram]:2 R0305); *[ram]:2 R0305 = tmp; } SIZE1RW: R0608, [R0305], "32" is PCPMode=1 & pcp_op0001=2 & R0305 & R0608 { local tmp:4 = R0608; R0608 = *[ram]:4 R0305; *[ram]:4 R0305 = tmp; } SIZE5: "8" is PCPMode=1 & pcp_op0505=0 & pcp_op0909=0 & R0608 { local tmp:4 = zext(*[ram]:1 R0608); export tmp;} SIZE5: "16" is PCPMode=1 & pcp_op0505=1 & pcp_op0909=0 & R0608 { local tmp:4 = zext(*[ram]:2 R0608); export tmp;} SIZE5: "32" is PCPMode=1 & pcp_op0505=0 & pcp_op0909=1 & R0608 { local tmp:4 = *[ram]:4 R0608; export tmp;} SIZE5W: [R0608], imm5, "8" is PCPMode=1 & pcp_op0505=0 & pcp_op0909=0 & imm5 & R0608 { *[ram]:1 (R0608 + imm5) = R0[0,8]; } SIZE5W: [R0608], imm5, "16" is PCPMode=1 & pcp_op0505=1 & pcp_op0909=0 & imm5 & R0608 { *[ram]:2 (R0608 + imm5) = R0[0,16]; } SIZE5W: [R0608], imm5, "32" is PCPMode=1 & pcp_op0505=0 & pcp_op0909=1 & imm5 & R0608 { *[ram]:4 (R0608 + imm5) = R0; } # Counter Control # 00 = perform xfer by CNT0 ; goto next # 01 = perform xfer by CNT0 ; dec CNT1 ; goto next # 10 = perform xfer by CNT0 ; dec CNT1 ; repeat dec ; goto next CNC: pcp_op0506 is PCPMode=1 & pcp_op0506 { local tmp:4 = pcp_op0506; export tmp; } # Counter Reload Value (COPY) # 001..111 = perform 1..7 xfer CNT03: pcp_op0204 is PCPMode=1 & pcp_op0204 { local tmp:4 = pcp_op0204; export tmp; } # Counter Reload Value Block Size (BCOPY) # 00 = block size 8 words # 10 = block size 2 words # 11 = block size 4 words CNT02: pcp_op0203 is PCPMode=1 & pcp_op0203 { local tmp:4 = pcp_op0203; export tmp; } EC: pcp_op0707 is PCPMode=1 & pcp_op0707 { local tmp:1 = pcp_op0707; export tmp; } EP: pcp_op0808 is PCPMode=1 & pcp_op0808 { local tmp:1 = pcp_op0808; export tmp; } INT: pcp_op0909 is PCPMode=1 & pcp_op0909 { local tmp:1 = pcp_op0909; export tmp; } ST: pcp_op1010 is PCPMode=1 & pcp_op1010 { local tmp:1 = pcp_op1010; export tmp; } SETCLR: "SET" is PCPMode=1 & pcp_op0505=1 { local tmp:1 = 1; export tmp; } SETCLR: "CLR" is PCPMode=1 & pcp_op0505=0 { local tmp:1 = 0; export tmp; } SDB: pcp_op0000 is PCPMode=1 & pcp_op0000 { local tmp:1 = pcp_op0000; export tmp; } EDA: pcp_op0101 is PCPMode=1 & pcp_op0101 { local tmp:1 = pcp_op0101; export tmp; } RTA: pcp_op0202 is PCPMode=1 & pcp_op0202 { local tmp:1 = pcp_op0202; export tmp; } DAC: pcp_op0303 is PCPMode=1 & pcp_op0303 { local tmp:1 = pcp_op0303; export tmp; } # Addressing Modes: # 0 - control # 1 - FPI # 2 - PRAM # 3 - Arithmetic # 4 - Immediate # 5 - FPI Immediate # 6 - Complex Maths # 7 - Jump # 3: 16-bit 6000|0b110000000000000 9e00|0b1001111000000000 # ADD Rb, Ra, cc_A :add R0608, R0305, CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0x0 & R0608 & R0305 & CONDCA { #TODO flags N,Z,V,C if (CONDCA == 0) goto inst_next; R0608 = R0608 + R0305; } # 1: 16-bit 2000|0b10000000000000 de04|0b1101111000000100 # ADD.F Rb, [Ra], Size :add.f R0608, SIZE1 is PCPMode=1 & addrmode=0x1 & pcp_op0912=0x0 & pcp_op0202=0x0 & R0608 & R0305 & SIZE1 { #TODO flags N,Z,V,C build SIZE1; R0608 = R0608 + SIZE1; } # 4: 16-bit 8000|0b1000000000000000 7e00|0b111111000000000 # ADD.I Ra, #imm6 :add.i R0608, imm6 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0x0 & imm6 { #TODO flags N,Z,V,C R0608 = R0608 + imm6; } # 2: 16-bit 4000|0b100000000000000 be00|0b1011111000000000 # ADD.PI Ra, [#offset6] :add.pi R0608, offset6 is PCPMode=1 & addrmode=2 & pcp_op0912=0x0 & R0608 & offset6 { #TODO flags N,Z,V,C build offset6; R0608 = R0608 + offset6; } # 3: 16-bit 6a00|0b110101000000000 9400|0b1001010000000000 # AND Rb, Ra, cc_A :and R0608, R0305, CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0x5 & R0608 & R0305 & CONDCA { #TODO flags N,Z if (CONDCA == 0) goto inst_next; R0608 = R0608 & R0305; } # 1: 16-bit 2a00|0b10101000000000 d404|0b1101010000000100 # AND.F Rb, [Ra], Size :and.f R0608, SIZE1 is PCPMode=1 & addrmode=0x1 & pcp_op0912=0x5 & pcp_op0202=0x0 & R0608 & R0305 & SIZE1 { #TODO flags N,Z build SIZE1; R0608 = R0608 & SIZE1; } # 2: 16-bit 4a00|0b100101000000000 b400|0b1011010000000000 # AND.PI Ra, [#offset6] :and.pi R0608, offset6 is PCPMode=1 & addrmode=2 & pcp_op0912=0x5 & R0608 & offset6 { #TODO flags N,Z build offset6; R0608 = R0608 & offset6; } # 0: 16-bit 1800|0b1100000000000 e013|0b1110000000010011 # :bcopy DST, SRC, CNC, CNT02 is PCPMode=1 & addrmode=0 & pcp_op1212=0x1 & pcp_op1111=0x1 & DST & SRC & CNC & CNT02 & pcp_op0404=0x0 & pcp_op0001=0x0 { } # 4: 16-bit 9c00|0b1001110000000000 6200|0b110001000000000 # CHKB Ra, #imm5, S/C :chkb R0608, imm5, SETCLR is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0xe & SETCLR & imm5 { $(C) = (R0608 & (1 << imm5)) != 0; } # 4: 16-bit 9600|0b1001011000000000 6820|0b110100000100000 # CLR Ra, #imm5 :clr R0608, imm5 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0xb & pcp_op0505=0x0 & imm5 { R0608 = R0608 & ~(1 << imm5); } # 5: 16-bit b000|0b1011000000000000 4c00|0b100110000000000 # CLR.F [Ra], #imm5, Size :clr.f [R0608], imm5, SIZE5 is PCPMode=1 & addrmode=0x5 & pcp_op1012=0x4 & R0608 & imm5 & SIZE5 { build SIZE5; *[ram]:4 R0608 = SIZE5 & ~(1 << imm5); } #TODO the manual does not specify # N negative # Z zero # V overflow # C carry macro Flags(r0, r1) { local val:4 = r0 - r1; $(N) = val s< 0; $(Z) = r0 == r1; $(V) = r0[31,1] | r1[31,1]; $(C) = r0 < r1; } # 3: 16-bit 6400|0b110010000000000 9a00|0b1001101000000000 # COMP Rb, Ra, cc_A :comp R0608, R0305, CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0x2 & R0608 & R0305 & CONDCA { if (CONDCA == 0) goto inst_next; Flags(R0608, R0305); } # 1: 16-bit 2400|0b10010000000000 da04|0b1101101000000100 # COMP.F Rb, [Ra], Size :comp.f R0608, SIZE1 is PCPMode=1 & addrmode=0x1 & pcp_op0912=0x2 & pcp_op0202=0x0 & R0608 & R0305 & SIZE1 { build SIZE1; Flags(R0608, SIZE1); } # 4: 16-bit 8400|0b1000010000000000 7a00|0b111101000000000 # COMP.I Ra, #imm6 :comp.i R0608, imm6 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0x2 & imm6 { Flags(R0608, imm6); } # 2: 16-bit 4400|0b100010000000000 ba00|0b1011101000000000 # COMP.PI Ra, [#offset6] :comp.pi R0608, offset6 is PCPMode=1 & addrmode=2 & pcp_op0912=0x2 & R0608 & offset6 { build offset6; Flags(R0608, offset6); } # 0: 16-bit 0800|0b100000000000 f000|0b1111000000000000 # :copy DST, SRC, CNC, CNT03, SIZE0 is PCPMode=1 & addrmode=0 & pcp_op1212=0x0 & pcp_op1111=0x1 & DST & SRC & CNC & CNT03 & SIZE0 { } #TODO DEBUG SLEIGH instead define pcodeop pcpdebug; # 7: 16-bit fc00|0b1111110000000000 0030|0b110000 # DEBUG EDA, DAC, RTA, SDB, cc_B :debug DAC, RTA, EDA, SDB, CONDCB is PCPMode=1 & addrmode=7 & pcp_op1012=0x7 & DAC & RTA & EDA & SDB & CONDCB & pcp_op0405=0x0 { if (CONDCB == 0) goto inst_next; pcpdebug(); } # 6: 16-bit c000|0b1100000000000000 3e07|0b11111000000111 # DINIT , Rb, Ra :dinit "<"^R0^">", R0608, R0305 is PCPMode=1 & addrmode=0x6 & pcp_op0912=0x0 & R0 & R0608 & R0305 & pcp_op0002=0x0 { R0 = 0; $(V) = R0305 == 0; $(Z) = (R0608 == 0) && (R0305 != 0); } # 6: 16-bit c200|0b1100001000000000 3c07|0b11110000000111 # DSTEP , Rb, Ra :dstep "<"^R0^">", R0608, R0305 is PCPMode=1 & addrmode=0x6 & pcp_op0912=0x1 & R0 & R0608 & R0305 & pcp_op0002=0x0 { #TODO flags Z not sure R0 = (R0 << 8) + (R0608 >> 24); R0608 = (R0608 << 8) + (R0 / R0305); R0 = R0 % R0305; $(Z) = R0 == 0; } # 0: 16-bit 1000|0b1000000000000 e870|0b1110100001110000 # :exit ST, EC, INT, EP, CONDCAB is PCPMode=1 & addrmode=0 & pcp_op1212=0x1 & pcp_op1111=0x0 & ST & EC & INT & EP & CONDCAB & pcp_op0406=0x0 { } # 3: 16-bit 7a00|0b111101000000000 8400|0b1000010000000000 # INB Rb, Ra, cc_A :inb R0608, R0305, CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0xd & R0608 & R0305 & CONDCA { if (CONDCA == 0) goto inst_next; R0608 = (R0608 & ~(1 << R0305[0,5])) | zext($(C) << R0305[0,5]); } # 4: 16-bit 9a00|0b1001101000000000 6420|0b110010000100000 # INB.I Ra, #imm5 :inb.i R0608, imm5 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0xd & pcp_op0505=0x0 & imm5 { R0608 = (R0608 & ~(1 << imm5)) | zext($(C) << imm5); } imm6pc: reloc is PCPMode=1 & pcp_op0005 [ reloc = inst_start + pcp_op0005; ] { export *[ram]:4 reloc; } imm10pc: reloc is PCPMode=1 & pcp_op0009 [ reloc = inst_start + pcp_op0009; ] { export *[ram]:4 reloc; } imm16abs: pcp_op1631 is PCPMode=1 & pcp_op1631 { export *[ram]:4 pcp_op1631; } # 7: 16-bit e400|0b1110010000000000 1800|0b1100000000000 # JC offset6, cc_B :jc imm6pc, CONDCB is PCPMode=1 & addrmode=7 & pcp_op1012=0x1 & imm6pc & CONDCB { if (CONDCB == 0) goto inst_next; goto imm6pc; } # 7: 32-bit e800|0b1110100000000000 143f|0b1010000111111 # JC.A #address16, cc_B :jc.a imm16abs, CONDCB is PCPMode=1 & addrmode=7 & pcp_op1012=0x2 & CONDCB & pcp_op0005=0 ; imm16abs { if (CONDCB == 0) goto inst_next; goto imm16abs; } # 7: 16-bit f000|0b1111000000000000 0c07|0b110000000111 # JC.I Ra, cc_B :jc.i [R0305], CONDCB is PCPMode=1 & addrmode=7 & pcp_op1012=0x4 & R0305 & CONDCB & pcp_op0002=0x0 { if (CONDCB == 0) goto inst_next; local tmp:4 = inst_start + zext(R0305[0,16]); goto [tmp]; } # 7: 16-bit f400|0b1111010000000000 0807|0b100000000111 # JC.IA Ra, cc_B :jc.ia [R0305], CONDCB is PCPMode=1 & addrmode=7 & pcp_op1012=0x5 & R0305 & CONDCB & pcp_op0002=0x0 { if (CONDCB == 0) goto inst_next; local tmp:4 = zext(R0305[0,16]); goto [tmp]; } # 7: 16-bit e000|0b1110000000000000 1c00|0b1110000000000 # JL offset10 :jl imm10pc is PCPMode=1 & addrmode=7 & pcp_op1012=0x0 & imm10pc { goto imm10pc; } # 1: 16-bit 3200|0b11001000000000 cc04|0b1100110000000100 # LD.F Rb, [Ra], Size :ld.f R0608, SIZE1 is PCPMode=1 & addrmode=0x1 & pcp_op0912=0x9 & pcp_op0202=0x0 & R0608 & R0305 & SIZE1 { R0608 = SIZE1; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 4: 16-bit 9800|0b1001100000000000 6600|0b110011000000000 # LD.I Ra, #imm6 :ld.i R0608, imm6 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0xc & imm6 { R0608 = imm6; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 5: 16-bit b400|0b1011010000000000 4800|0b100100000000000 # LD.IF [Ra], #offset5, Size :ld.if [R0608], imm5, SIZE5 is PCPMode=1 & addrmode=0x5 & pcp_op1012=0x5 & R0608 & imm5 & SIZE5 { R0608 = SIZE5 + imm5; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 3: 16-bit 7200|0b111001000000000 8c00|0b1000110000000000 # LD.P Rb, [Ra], cc_A :ld.p R0608, [R0305], CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0x9 & R0608 & R0305 & CONDCA { if (CONDCA == 0) goto inst_next; local tmp:4 = zext($(DPTR) << 6) + zext(R0305[0,6]); R0608 = *[ram]:4 tmp; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 2: 16-bit 5200|0b101001000000000 ac00|0b1010110000000000 # LD.PI Ra, [#offset6] :ld.pi R0608, offset6 is PCPMode=1 & addrmode=2 & pcp_op0912=0x9 & R0608 & offset6 { R0608 = offset6; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 4: 32-bit 9200|0b1001001000000000 6c3f|0b110110000111111 # LDL.IL Ra, #imm16 :ldl.il R0608, imm16 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0x9 & pcp_op0005=0x0 ; imm16 { #TODO are flags correct R0608[0,16] = imm16[0,16]; $(N) = R0608[0,16] s< 0; $(Z) = R0608[0,16] == 0; } # 4: 32-bit 9000|0b1001000000000000 6e3f|0b110111000111111 # :ldl.iu R0608, imm16 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0x8 & pcp_op0005=0x0 ; imm16 { #TODO are flags correct R0608[16,16] = imm16[0,16]; $(N) = R0608[16,16] s< 0; $(Z) = R0608[16,16] == 0; } # 2: 16-bit 4800|0b100100000000000 b600|0b1011011000000000 # MCLR.PI Ra, [#offset6] :mclr.pi R0608, offset6 is PCPMode=1 & addrmode=2 & pcp_op0005 & pcp_op0912=0x4 & R0608 & offset6 { R0608 = R0608 & offset6; local tmp:4 = zext($(DPTR) << 6) + pcp_op0005; *[ram]:4 tmp = R0608; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 6: 16-bit c400|0b1100010000000000 3a07|0b11101000000111 # MINIT , Rb, Ra :minit "<"^R0^">", R0608, R0305 is PCPMode=1 & addrmode=0x6 & pcp_op0912=0x2 & R0 & R0608 & R0305 & pcp_op0002=0x0 { R0 = 0; $(Z) = (R0608 == 0) || (R0305 == 0); } # 3: 16-bit 7800|0b111100000000000 8600|0b1000011000000000 # MOV Rb, Ra, cc_A :mov R0608, R0305, CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0xc & R0608 & R0305 & CONDCA { if (CONDCA == 0) goto inst_next; R0608 = R0305; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 2: 16-bit 4c00|0b100110000000000 b200|0b1011001000000000 # MSET.PI Ra, [#offset6] :mset.pi R0608, offset6 is PCPMode=1 & addrmode=2 & pcp_op0005 & pcp_op0912=0x6 & R0608 & offset6 { R0608 = R0608 | offset6; local tmp:4 = zext($(DPTR) << 6) + pcp_op0005; *[ram]:4 tmp = R0608; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 6: 16-bit c600|0b1100011000000000 3807|0b11100000000111 # :mstep.l R0608, R0305 is PCPMode=1 & addrmode=0x6 & pcp_op0912=0x3 & R0608 & R0305 & pcp_op0002=0x0 { } # 6: 16-bit c800|0b1100100000000000 3607|0b11011000000111 # :mstep.u R0608, R0305 is PCPMode=1 & addrmode=0x6 & pcp_op0912=0x4 & R0608 & R0305 & pcp_op0002=0x0 { } # 3: 16-bit 6600|0b110011000000000 9800|0b1001100000000000 # NEG Rb, Ra, cc_A :neg R0608, R0305, CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0x3 & R0608 & R0305 & CONDCA { #TODO flags if (CONDCA == 0) goto inst_next; R0608 = -R0305; $(N) = R0608 s< 0; $(Z) = R0608 == 0; $(V) = R0608[31,1]; $(C) = R0305[31,1]; } # 0: 16-bit 0000|0b0 ffff|0b1111111111111111 # NOP :nop is PCPMode=1 & addrmode=0 & pcp_op1212=0x0 & pcp_op1111=0x0 & pcp_op0010=0x0 { local NOP:1 = 0; NOP = NOP; } # 3: 16-bit 6800|0b110100000000000 9600|0b1001011000000000 # NOT Rb, Ra, cc_A :not R0608, R0305, CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0x4 & R0608 & R0305 & CONDCA { if (CONDCA == 0) goto inst_next; R0608 = ~R0305; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 3: 16-bit 6e00|0b110111000000000 9000|0b1001000000000000 # OR Rb, Ra, cc_A :or R0608, R0305, CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0x7 & R0608 & R0305 & CONDCA { if (CONDCA == 0) goto inst_next; R0608 = R0608 | R0305; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 1: 16-bit 2e00|0b10111000000000 d004|0b1101000000000100 # OR.F Rb, [Ra], Size :or.f R0608, SIZE1 is PCPMode=1 & addrmode=0x1 & pcp_op0912=0x7 & pcp_op0202=0x0 & R0608 & R0305 & SIZE1 { R0608 = R0608 | SIZE1; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 2: 16-bit 4e00|0b100111000000000 b000|0b1011000000000000 # OR.PI Ra, [#offset6] :or.pi R0608, offset6 is PCPMode=1 & addrmode=2 & pcp_op0912=0x7 & R0608 & offset6 { R0608 = R0608 | offset6; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 3: 16-bit 7c00|0b111110000000000 8200|0b1000001000000000 # PRI Rb, Ra, cc_A :pri R0608, R0305, CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0xe & R0608 & R0305 & CONDCA { #TODO pcodeop or this? also double check if (CONDCA == 0) goto inst_next; local index:4 = 0; local tmp:4 = R0305; if (tmp == 0) goto ; tmp = tmp >> 2; index = index + 1; if (tmp != 0) goto ; R0608 = zext(0x20 * (index == 0)) + (index * zext(index != 0)); $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 4: 16-bit 8e00|0b1000111000000000 7020|0b111000000100000 # RL Ra, #imm5 :rl R0608, imm5 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0x7 & pcp_op0505=0x0 & imm5 { #TODO double check local tmp:4 = R0608; R0608 = R0608 << imm5; $(C) = (tmp & (1 << (32 - imm5))) != 0; tmp = tmp >> (32 - imm5); R0608 = tmp | R0608; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 4: 16-bit 8c00|0b1000110000000000 7220|0b111001000100000 # RR Ra, #imm5 :rr R0608, imm5 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0x6 & pcp_op0505=0x0 & imm5 { #TODO double check local tmp:4 = R0608; R0608 = R0608 >> imm5; tmp = tmp << (32 - imm5); R0608 = tmp | R0608; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 4: 16-bit 9400|0b1001010000000000 6a20|0b110101000100000 # SET Ra, #imm5 :set R0608, imm5 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0xa & pcp_op0505=0x0 & imm5 { R0608 = R0608 | (1 << imm5); } # 5: 16-bit ac00|0b1010110000000000 5000|0b101000000000000 # SET.F [Ra], #imm5, Size :set.f [R0608], imm5, SIZE5 is PCPMode=1 & addrmode=0x5 & pcp_op1012=0x3 & R0608 & imm5 & SIZE5 { build SIZE5; *[ram]:4 R0608 = SIZE5 | (1 << imm5); } # 4: 16-bit 8a00|0b1000101000000000 7420|0b111010000100000 # SHL Ra, #imm5 :shl R0608, imm5 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0x5 & pcp_op0505=0x0 & imm5 { $(C) = (R0608 & (1 << (32 - imm5))) != 0; R0608 = R0608 << imm5; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 4: 16-bit 8800|0b1000100000000000 7620|0b111011000100000 # SHR Ra, #imm5 :shr R0608, imm5 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0x4 & pcp_op0505=0x0 & imm5 { R0608 = R0608 >> imm5; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 1: 16-bit 3400|0b11010000000000 ca04|0b1100101000000100 # ST.F Rb, [Ra], Size :st.f SIZE1W is PCPMode=1 & addrmode=0x1 & pcp_op0912=0xa & pcp_op0202=0x0 & SIZE1W { build SIZE1W; } # 5: 16-bit b800|0b1011100000000000 4400|0b100010000000000 # ST.IF [Ra], #offset5, Size :st.if SIZE5W is PCPMode=1 & addrmode=0x5 & pcp_op1012=0x6 & SIZE5W { build SIZE5W; } # 3: 16-bit 7400|0b111010000000000 8a00|0b1000101000000000 # ST.P Rb, [Ra], cc_A :st.p R0608, [R0305], CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0xa & R0608 & R0305 & CONDCA { if (CONDCA == 0) goto inst_next; local tmp:4 = zext($(DPTR) << 6) + zext(R0305[0,6]); *[ram]:4 tmp = R0608; } # 2: 16-bit 5400|0b101010000000000 aa00|0b1010101000000000 # ST.PI Rb, [#offset6] :st.pi offset6W is PCPMode=1 & addrmode=2 & pcp_op0912=0xa & offset6W { build offset6W; } # 3: 16-bit 6200|0b110001000000000 9c00|0b1001110000000000 # SUB Rb, Ra, cc_A :sub R0608, R0305, CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0x1 & R0608 & R0305 & CONDCA { if (CONDCA == 0) goto inst_next; R0608 = R0608 - R0305; Flags(R0608, R0305); } # 1: 16-bit 2200|0b10001000000000 dc04|0b1101110000000100 # SUB.F Rb, [Ra], Size :sub.f R0608, SIZE1 is PCPMode=1 & addrmode=0x1 & pcp_op0912=0x1 & pcp_op0202=0x0 & R0608 & R0305 & SIZE1 { build SIZE1; local tmp:4 = SIZE1; Flags(R0608, tmp); R0608 = R0608 - tmp; } # 4: 16-bit 8200|0b1000001000000000 7c00|0b111110000000000 # SUB.I Ra, #imm6 :sub.i R0608, imm6 is PCPMode=1 & addrmode=4 & R0608 & pcp_op0912=0x1 & imm6 { Flags(R0608, imm6); R0608 = R0608 - imm6; } # 2: 16-bit 4200|0b100001000000000 bc00|0b1011110000000000 # SUB.PI Ra, [#offset6] :sub.pi R0608, offset6 is PCPMode=1 & addrmode=2 & pcp_op0912=0x1 & R0608 & offset6 { Flags(R0608, offset6); R0608 = R0608 - offset6; } # 1: 16-bit 3600|0b11011000000000 c804|0b1100100000000100 # XCH.F Rb, [Ra], Size :xch.f SIZE1RW is PCPMode=1 & addrmode=0x1 & pcp_op0912=0xb & pcp_op0202=0x0 & R0608 & SIZE1RW { build SIZE1RW; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 2: 16-bit 5600|0b101011000000000 a800|0b1010100000000000 # XCH.PI Ra, [#offset6] :xch.pi offset6RW is PCPMode=1 & addrmode=2 & pcp_op0912=0xb & R0608 & offset6RW { build offset6RW; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 3: 16-bit 7000|0b111000000000000 8e00|0b1000111000000000 # XOR Rb, Ra, cc_A :xor R0608, R0305, CONDCA is PCPMode=1 & addrmode=0x3 & pcp_op0912=0x8 & R0608 & R0305 & CONDCA { if (CONDCA == 0) goto inst_next; R0608 = R0608 ^ R0305; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 1: 16-bit 3000|0b11000000000000 ce04|0b1100111000000100 # XOR.F Rb, [Ra], Size :xor.f R0608, SIZE1 is PCPMode=1 & addrmode=0x1 & pcp_op0912=0x8 & pcp_op0202=0x0 & R0608 & R0305 & SIZE1 { R0608 = R0608 ^ SIZE1; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } # 2: 16-bit 5000|0b101000000000000 ae00|0b1010111000000000 # XOR.PI Ra, [#offset6] :xor.pi R0608, offset6 is PCPMode=1 & addrmode=2 & pcp_op0912=0x8 & R0608 & offset6 { R0608 = R0608 ^ offset6; $(N) = R0608 s< 0; $(Z) = R0608 == 0; } ================================================ FILE: pypcode/processors/tricore/data/languages/tricore.pspec ================================================ ================================================ FILE: pypcode/processors/tricore/data/languages/tricore.sinc ================================================ define alignment=2; define space ram type=ram_space size=4 default; define space register type=register_space size=4; #TODO This is probably in the spec define register offset=0x00 size=4 contextreg; define context contextreg PCPMode=(0,0) ; # Data General Purpose Registers define register offset=0xFF00 size=8 [ e0 e2 e4 e6 e8 e10 e12 e14 ]; define register offset=0xFF00 size=4 [ d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 ]; # Address General Purpose Registers define register offset=0xFF80 size=8 [ p0 p2 p4 p6 p8 p10 p12 p14 ]; define register offset=0xFF80 size=4 [ a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15 ]; # Program Counter define register offset=0xFE08 size=4 [ PC ]; # Program Status Word Register define register offset=0xFE04 size=4 [ PSW ]; #TODO bitrange vs define vs context # define bitrange PSW_USB=PSW[24,8] # PSW_C=PSW[31,1] # PSW_V=PSW[30,1] # PSW_SV=PSW[29,1] # PSW_AV=PSW[28,1] # PSW_SAV=PSW[27,1] # PSW_S=PSW[14,1] # PSW_PRS=PSW[12,2] # PSW_IO=PSW[10,2] # PSW_IS=PSW[9,1] # PSW_GW=PSW[8,1] # PSW_CDE=PSW[7,1] # PSW_CDC=PSW[0,7]; @define PSW_USB "PSW[24,8]" @define PSW_C "PSW[31,1]" @define PSW_FS "PSW[31,1]" @define PSW_V "PSW[30,1]" @define PSW_FI "PSW[30,1]" @define PSW_SV "PSW[29,1]" @define PSW_FV "PSW[29,1]" @define PSW_AV "PSW[28,1]" @define PSW_FZ "PSW[28,1]" @define PSW_SAV "PSW[27,1]" @define PSW_FU "PSW[27,1]" @define PSW_FX "PSW[26,1]" @define PSW_RM "PSW[24,2]" @define PSW_S "PSW[14,1]" @define PSW_PRS "PSW[12,2]" @define PSW_IO "PSW[10,2]" @define PSW_IS "PSW[9,1]" @define PSW_GW "PSW[8,1]" @define PSW_CDE "PSW[7,1]" @define PSW_CDC "PSW[0,7]" # Previous Context Information and Pointer Register define register offset=0xFE00 size=4 [ PCXI ]; # define context PCXI # PCXO=(0,15) # PCXS=(16,19) # UL=(20,20) # PIE=(21,21) # PCPN=(22,29) # ; # Interrupt Stack Pointer Register define register offset=0xFE28 size=4 [ ISP ]; # System Control Register define register offset=0xFE14 size=4 [ SYSCON ]; # define context SYSCON # FCDSF=(0,0) # PROTEN=(1,1) # TPROTEN=(2,2) # IS=(3,3) # TS=(4,4) # U1_IED=(16,16) # U1_IOS=(17,17) # ; # CPU Identification Register define register offset=0xFE18 size=4 [ CPU_ID ]; # define context CPU_ID # MOD_REV=(0,7) # MOD_32B=(8,15) # MOD=(16,31) # ; # Core Identification Register define register offset=0xFE1C size=4 [ CORE_ID ]; # Compatibility Mode Register define register offset=0x9400 size=4 [ COMPAT ]; # SIST Mode Access Control Register define register offset=0x900C size=4 [ SMACON ]; # Free CSA List Head Pointer Register define register offset=0xFE38 size=4 [ FCX ]; # define context FCX # FCXO=(0,15) # FCXS=(16,19) # ; # Free CSA List Limit Pointer Register define register offset=0xFE3C size=4 [ LCX ]; # define context LCX # LCXO=(0,15) # LCXS=(16,19) # ; # ICU Interrupt Control Register define register offset=0xFE2C size=4 [ ICR ]; define register offset=0xFE2E size=1 [ PIPN ]; # define context ICR # CCPN=(0,7) # IE=(15,15) # PIPN=(16,23) # ; @define ICR_PIPN "ICR[16,8]" @define ICR_IE "ICR[15,1]" @define ICR_CCPN "ICR[0,8]" # Base Interrupt Vector Table Pointer define register offset=0xFE20 size=4 [ BIV ]; # define context BIV # VSS=(0,0) # ; # Base Trap Vector Table Pointer define register offset=0xFE24 size=4 [ BTV ]; # Program Synchronous Error Trap Register define register offset=0x9200 size=4 [ PSTR ]; # Data Synchronous Error Trap Register define register offset=0x9010 size=4 [ DSTR ]; # Data Asynchronous Error Trap Register define register offset=0x9018 size=4 [ DATR ]; # Data Error Address Register define register offset=0x901C size=4 [ DEADD ]; # Program Integrity Error Trap Register define register offset=0x9214 size=4 [ PIETR ]; # Program Integrity Error Address Register define register offset=0x9210 size=4 [ PIEAR ]; # Data Integrity Error Trap Register define register offset=0x9024 size=4 [ DIETR ]; # Data Integrity Error Address Register define register offset=0x9020 size=4 [ DIEAR ]; # Programmable Memory Access Register define register offset=0x8100 size=4 [ PMA0 PMA1 PMA2 ]; # Program Memory Configuration Registers define register offset=0x9204 size=4 [ PCON1 PCON2 PCON0 ]; # Data Memory Configuration Registers define register offset=0x9040 size=4 [ DCON0 ]; define register offset=0x9008 size=4 [ DCON1 ]; define register offset=0x9000 size=4 [ DCON2 ]; # Data Protection Range Register Lower & Upper Bound define register offset=0xC000 size=4 [ DPR0_L DPR0_U DPR1_L DPR1_U DPR2_L DPR2_U DPR3_L DPR3_U DPR4_L DPR4_U DPR5_L DPR5_U DPR6_L DPR6_U DPR7_L DPR7_U DPR8_L DPR8_U DPR9_L DPR9_U DPR10_L DPR10_U DPR11_L DPR11_U DPR12_L DPR12_U DPR13_L DPR13_U DPR14_L DPR14_U DPR15_L DPR15_U ]; # Code Protection Range Register Lower & Upper Bound define register offset=0xD000 size=4 [ CPR0_L CPR0_U CPR1_L CPR1_U CPR2_L CPR2_U CPR3_L CPR3_U CPR4_L CPR4_U CPR5_L CPR5_U CPR6_L CPR6_U CPR7_L CPR7_U CPR8_L CPR8_U CPR9_L CPR9_U CPR10_L CPR10_U CPR11_L CPR11_U CPR12_L CPR12_U CPR13_L CPR13_U CPR14_L CPR14_U CPR15_L CPR15_U ]; # Data Protection Read Enable Set Configuration Register define register offset=0xE010 size=4 [ DPRE_0 DPRE_1 DPRE_2 DPRE_3 ]; # Data Protection Write Enable Set Configuration Register define register offset=0xE020 size=4 [ DPWE_0 DPWE_1 DPWE_2 DPWE_3 ]; # Code Protection Execute Enable Set Configuration Register define register offset=0xE000 size=4 [ CPXE_0 CPXE_1 CPXE_2 CPXE_3 ]; # Temporal Protect System (TPS) Timer Register define register offset=0xE404 size=4 [ TPS_TIMER0 TPS_TIMER1 TPS_TIMER2 ]; # TPS Control Register define register offset=0xE400 size=4 [ TPS_CON ]; # define context TPS_CON # TEXP0=(0,0) # TEXP1=(1,1) # TEXP2=(2,2) # TTRAP=(16,16) # ; # FPU Trap Control Register define register offset=0xA000 size=4 [ FPU_TRAP_CON ]; # define context FPU_TRAP_CON # TST=(0,0) # TCL=(1,1) # RM=(8,9) # FXE=(18,18) # FUE=(19,19) # FZE=(20,20) # FVE=(21,21) # FIE=(22,22) # FX=(26,26) # FU=(27,27) # FZ=(28,28) # FV=(29,29) # FI=(30,30) # ; # FPU Trapping Instruction Program Counter define register offset=0xA004 size=4 [ FPU_TRAP_PC ]; # FPU Trapping Instruction Opcode Register define register offset=0xA008 size=4 [ FPU_TRAP_OPC ]; # define context FPU_TRAP_OPC # OPC=(0,7) # FMT=(8,8) # DREG=(16,19) # ; # FPU Trapping Instruction Operand SRC1 Register define register offset=0xA010 size=4 [ FPU_TRAP_SRC1 FPU_TRAP_SRC2 FPU_TRAP_SRC3 ]; # Core Debug Controller (CDC) Registers # Debug Status Register define register offset=0xFD00 size=4 [ DBGSR ]; # define context DBGSR # DE=(0,0) # HALT=(1,2) # SIH=(3,3) # SUSP=(4,4) # PREVSUSP=(6,6) # PEVT=(7,7) # EVTSRC=(8,12) # ; @define DBGSR_EVTSRC "DBGSR[8,5]" @define DBGSR_PEVT "DBGSR[7,1]" @define DBGSR_PREVSUSP "DBGSR[6,1]" @define DBGSR_SUSP "DBGSR[4,1]" @define DBGSR_SIH "DBGSR[3,1]" @define DBGSR_HALT "DBGSR[1,2]" @define DBGSR_DE "DBGSR[0,1]" # External Event Register define register offset=0xFD08 size=4 [ EXEVT ]; # define context EXEVT # EVTA=(0,2) # BBM=(3,3) # BOD=(4,4) # SUSP=(5,5) # CNT=(6,7) # ; # Core Register Access Event Register define register offset=0xFD0C size=4 [ CREVT ]; # define context CREVT # CREVT_EVTA=(0,2) # CREVT_BBM=(3,3) # CREVT_BOD=(4,4) # CREVT_SUSP=(5,5) # CREVT_CNT=(6,7) # ; # Software Debug Event Register define register offset=0xFD10 size=4 [ SWEVT ]; # define context SWEVT # SWEVT_EVTA=(0,2) # SWEVT_BBM=(3,3) # SWEVT_BOD=(4,4) # SWEVT_SUSP=(5,5) # SWEVT_CNT=(6,7) # ; # Trigger Accumulator Register define register offset=0xFD30 size=4 [ TRIG_ACC ]; # define context TRIC_ACC # T0=(0,0) # T1=(1,1) # T2=(2,2) # T3=(3,3) # T4=(4,4) # T5=(5,5) # T6=(6,6) # T7=(7,7) # ; # Debug Monitor Start Address Register define register offset=0xFD40 size=4 [ DMS ]; # Debug Context Save Area Pointer Register define register offset=0xFD44 size=4 [ DCX ]; # Debug Trap Control Register define register offset=0xFD48 size=4 [ DBGTCR ]; # Application Space Identifier Register define register offset=0x8004 size=4 [ TASK_ASI ]; # Software Breakpoint Service Request Control #define register offset=0xFFB0 size=4 [ SBSRC3 SBSRC2 SBSRC1 SBSRC0 ]; # Trigger Event Configuration / Address Register define register offset=0xF000 size=4 [ TR0EVT TR0ADR TR1EVT TR1ADR TR2EVT TRA2DR TR3EVT TR3ADR TR4EVT TR4ADR TR5EVT TR5ADR TR6EVT TR6ADR TR7EVT TR7ADR ]; # define context TRxEVT # EVTA=(0,2) # BBM=(3,3) # BOD=(4,4) # SUSP=(5,5) # CNT=(6,7) # TYP=(12,12) # RNG=(13,13) # ASI_EN=(15,15) # ASI=(16,20) # AST=(27,27) # ALD=(28,28) # ; # Counter Control Register define register offset=0xFC00 size=4 [ CCTRL ]; # define context CCTRL # CM=(0,0) # CE=(1,1) # M1=(2,4) # M2=(5,7) # M3=(8,10) # ; # CPU Clock Cycle Count Register define register offset=0xFC04 size=4 [ CCNT ]; # Instruction Count Register define register offset=0xFC08 size=4 [ ICNT ]; # Mult-Count Register define register offset=0xFC0C size=4 [ M1CNT M2CNT M3CNT ]; define token instr (16) op0003=(0, 3) op0005=(0, 5) op0006=(0, 6) op0007=(0, 7) op0404=(4, 4) op0405=(4, 5) op0407=(4, 7) op0606=(6, 6) op0607=(6, 7) op0707=(7, 7) op0810=(8, 10) Rd0811=(8, 11) Ra0811=(8, 11) Re0811=(8, 11) Ree0811=(8, 11) Reo0811=(8, 11) ReN0811=(8, 11) op0811=(8, 11) Rp0811=(8, 11) Rpe0811=(8, 11) Rpo0811=(8, 11) op0815=(8, 15) sop0815=(8, 15) signed op1111=(11, 11) Rd1215=(12, 15) op1215=(12, 15) sop1215=(12, 15) signed Ra1215=(12, 15) Rpe1215=(12, 15) Rpo1215=(12, 15) op1515=(15, 15) ; define token instr2 (16) op1617=(0, 1) op1620=(0, 4) sop1620=(0, 4) signed op1621=(0, 5) op1622=(0, 6) op1623=(0, 7) op1627=(0, 11) sop1627=(0, 11) signed sop1630=(0, 14) signed op1631=(0, 15) op1819=(2, 3) op1823=(2, 7) op1827=(2, 11) op2020=(4, 4) op2023=(4, 7) op2027=(4, 11) op2122=(5, 6) op2123=(5, 7) op2127=(5, 11) op2131=(5, 15) op2225=(6, 9) op2227=(6, 11) sop2227=(6, 11) signed op2327=(7, 11) Rd2427=(8, 11) Re2427=(8, 11) Ree2427=(8, 11) Reo2427=(8, 11) op2627=(10, 11) Rd2831=(12, 15) Ra2831=(12, 15) Re2831=(12, 15) Ree2831=(12, 15) Reo2831=(12, 15) op2831=(12, 15) sop2831=(12, 15) signed op3131=(15, 15) ; attach variables [ Rd0811 Rd1215 Rd2427 Rd2831 ] [ d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15 ]; attach variables [ Re0811 Re2427 Re2831 ] [ e0 _ e2 _ e4 _ e6 _ e8 _ e10 _ e12 _ e14 _ ]; attach variables [ ReN0811 ] [ e2 _ e4 _ e6 _ e8 _ e10 _ e12 _ e14 _ e0 _ ]; attach variables [ Ree0811 Ree2427 Ree2831 ] [ d0 _ d2 _ d4 _ d6 _ d8 _ d10 _ d12 _ d14 _]; attach variables [ Reo0811 Reo2427 Reo2831 ] [ d1 _ d3 _ d5 _ d7 _ d9 _ d11 _ d13 _ d15 _]; attach variables [ Ra0811 Ra1215 Ra2831 ] [ a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15 ]; attach variables [ Rp0811 ] [ p0 _ p2 _ p4 _ p6 _ p8 _ p10 _ p12 _ p14 _ ]; attach variables [ Rpe0811 Rpe1215 ] [ a0 _ a2 _ a4 _ a6 _ a8 _ a10 _ a12 _ a14 _]; attach variables [ Rpo0811 Rpo1215 ] [ a1 _ a3 _ a5 _ a7 _ a9 _ a11 _ a13 _ a15 _]; # 0 MMU # * 0 VAF # * 1 VAP # 1 Internal Protection Traps # * 1 PRIV # * 2 MPR # * 3 MPW # * 4 MPX # * 5 MPP # * 6 MPN # * 7 GRWP # 2 Instruction Errors # * 1 IOPC # * 2 UOPC # * 3 OPD # * 4 ALN # * 5 MEM # 3 Context Management # * 1 FCD # * 2 CDO # * 3 CDU # * 4 FCU # * 5 CSU # * 6 CTYP # * 7 NEST # 4 System Bus and Peripheral Errors # * 1 PSE # * 2 DSE # * 3 DAE # * 4 CAE # * 5 PIE # * 6 DIE # * 7 TAE # 5 Assertion Traps # * 1 OVF # * 2 SOVF # 6 System Call # * 0-255 SYS # 7 Non-Maskable Interrupt # * 0 NMI define pcodeop trap; define pcodeop cache_address_ivld; define pcodeop cache_address_wb; define pcodeop cache_address_wi; define pcodeop cache_index_ivld; define pcodeop cache_index_wb; define pcodeop cache_index_wi; define pcodeop round16; define pcodeop crc32; # float define pcodeop denorm_to_zero; define pcodeop ieee754_round; define pcodeop ieee754_32bit_format; define pcodeop coprocessor; define pcodeop debug; define pcodeop dsync; define pcodeop isync; define pcodeop tlbdemap; define pcodeop tlbflusha; define pcodeop tlbflushb; define pcodeop tlbmap; define pcodeop tlbprobea; define pcodeop tlbprobei; define pcodeop wait; @if defined(TRICORE_VERBOSE) @else define pcodeop bmerge; define pcodeop bsplit; define pcodeop load_lower_context; define pcodeop load_upper_context; define pcodeop store_lower_context; define pcodeop store_upper_context; @endif #TODO define pcodeop reverse16; # macro reverse16(src, dst) { # dst[0,1]=src[15,1]; # dst[1,1]=src[14,1]; # dst[2,1]=src[13,1]; # dst[3,1]=src[12,1]; # dst[4,1]=src[11,1]; # dst[5,1]=src[10,1]; # dst[6,1]=src[9,1]; # dst[7,1]=src[8,1]; # dst[8,1]=src[7,1]; # dst[9,1]=src[6,1]; # dst[10,1]=src[5,1]; # dst[11,1]=src[4,1]; # dst[12,1]=src[3,1]; # dst[13,1]=src[2,1]; # dst[14,1]=src[1,1]; # dst[15,1]=src[0,1]; # } macro overflowflagsd(r) { $(PSW_V) = r[63,1]; $(PSW_SV) = $(PSW_V) | $(PSW_SV); $(PSW_AV) = r[63,1] ^ r[62,1]; $(PSW_SAV) = $(PSW_AV) | $(PSW_SAV); } macro overflowflagsww(r1, r0) { $(PSW_V) = r1[31,1] | r0[31,1]; $(PSW_SV) = $(PSW_V) | $(PSW_SV); $(PSW_AV) = (r1[31,1] ^ r1[30,1]) | (r0[31,1] ^ r0[30,1]); $(PSW_SAV) = $(PSW_AV) | $(PSW_SAV); } macro advoverflowflagsww(r1, r0) { $(PSW_V) = 0; $(PSW_AV) = (r1[31,1] ^ r1[30,1]) | (r0[31,1] ^ r0[30,1]); $(PSW_SAV) = $(PSW_AV) | $(PSW_SAV); } macro advoverflowflags(r) { $(PSW_V) = 0; $(PSW_AV) = r[31,1] ^ r[30,1]; $(PSW_SAV) = $(PSW_AV) | $(PSW_SAV); } macro overflowflags(r) { $(PSW_V) = r[31,1]; $(PSW_SV) = $(PSW_V) | $(PSW_SV); $(PSW_AV) = r[31,1] ^ r[30,1]; $(PSW_SAV) = $(PSW_AV) | $(PSW_SAV); } macro overflowflagsh(r1, r0) { $(PSW_V) = r1[15,1] | r0[15,1]; $(PSW_SV) = $(PSW_V) | $(PSW_SV); $(PSW_AV) = (r1[15,1] ^ r1[14,1]) | (r0[15,1] ^ r0[14,1]); $(PSW_SAV) = $(PSW_AV) | $(PSW_SAV); } macro overflowflagsb(r3, r2, r1, r0) { $(PSW_V) = r3[7,1] | r2[7,1] | r1[7,1] | r0[7,1]; $(PSW_SV) = $(PSW_V) | $(PSW_SV); $(PSW_AV) = (r3[7,1] ^ r3[6,1]) | (r2[7,1] ^ r2[6,1]) | (r1[7,1] ^ r1[6,1]) | (r0[7,1] ^ r0[6,1]); $(PSW_SAV) = $(PSW_AV) | $(PSW_SAV); } #define pcodeop ssov; macro ssov(res,x,y) { local max_pos = (1 << (y - 1)) - 1; local max_neg = -(1 << (y - 1)); local sc1 = x s> max_pos; local sc2 = x s< max_neg; res = (max_pos * zext(sc1 != 0)) + (max_neg * zext(sc1 == 0 && sc2 != 0)) + (x * zext(sc1 == 0 && sc2 == 0)); } #define pcodeop suov; macro suov(res,x,y) { local max_pos = (1 << y) - 1; local sc1 = x s> max_pos; local sc2 = x s< 0; res = (max_pos * zext(sc1 != 0)) + (x * zext(sc1 == 0 && sc2 == 0)); } macro int_abs(res, val) { local acond = val s< 0; res = (val * zext(acond == 0)) + (-val * zext(acond != 0)); } macro int_abs1(res, val) { local acond = val s< 0; res = (val * (acond == 0)) + (-val * (acond != 0)); } macro ternary(res, cond, avar, bvar) { res = (avar * zext(cond != 0)) + (bvar * zext(cond == 0)); } @if defined(TRICORE_VERBOSE) macro load_lower_context(EA) { # dummy = *[ram]:4 EA ; EA = EA + 4; # dummy = *[ram]:4 EA ; EA = EA + 4; a2 = *[ram]:4 EA ; EA = EA + 4; a3 = *[ram]:4 EA ; EA = EA + 4; d0 = *[ram]:4 EA ; EA = EA + 4; d1 = *[ram]:4 EA ; EA = EA + 4; d2 = *[ram]:4 EA ; EA = EA + 4; d3 = *[ram]:4 EA ; EA = EA + 4; a4 = *[ram]:4 EA ; EA = EA + 4; a5 = *[ram]:4 EA ; EA = EA + 4; a6 = *[ram]:4 EA ; EA = EA + 4; a7 = *[ram]:4 EA ; EA = EA + 4; d4 = *[ram]:4 EA ; EA = EA + 4; d5 = *[ram]:4 EA ; EA = EA + 4; d6 = *[ram]:4 EA ; EA = EA + 4; d7 = *[ram]:4 EA ; EA = EA + 4; } @endif @if defined(TRICORE_VERBOSE) macro store_lower_context(EA) { *[ram]:4 EA = PCXI; EA = EA + 4; *[ram]:4 EA = a11; EA = EA + 4; *[ram]:4 EA = a2; EA = EA + 4; *[ram]:4 EA = a3; EA = EA + 4; *[ram]:4 EA = d0; EA = EA + 4; *[ram]:4 EA = d1; EA = EA + 4; *[ram]:4 EA = d2; EA = EA + 4; *[ram]:4 EA = d3; EA = EA + 4; *[ram]:4 EA = a4; EA = EA + 4; *[ram]:4 EA = a5; EA = EA + 4; *[ram]:4 EA = a6; EA = EA + 4; *[ram]:4 EA = a7; EA = EA + 4; *[ram]:4 EA = d4; EA = EA + 4; *[ram]:4 EA = d5; EA = EA + 4; *[ram]:4 EA = d6; EA = EA + 4; *[ram]:4 EA = d7; EA = EA + 4; } @endif macro _restore_upper_context(EA) { a10 = *[ram]:4 EA; EA = EA + 4; a11 = *[ram]:4 EA; EA = EA + 4; d8 = *[ram]:4 EA; EA = EA + 4; d9 = *[ram]:4 EA; EA = EA + 4; d10 = *[ram]:4 EA; EA = EA + 4; d11 = *[ram]:4 EA; EA = EA + 4; a12 = *[ram]:4 EA; EA = EA + 4; a13 = *[ram]:4 EA; EA = EA + 4; a14 = *[ram]:4 EA; EA = EA + 4; a15 = *[ram]:4 EA; EA = EA + 4; d12 = *[ram]:4 EA; EA = EA + 4; d13 = *[ram]:4 EA; EA = EA + 4; d14 = *[ram]:4 EA; EA = EA + 4; d15 = *[ram]:4 EA; EA = EA + 4; } @if defined(TRICORE_VERBOSE) macro restore_upper_context(EA) { PCXI = *[ram]:4 EA; EA = EA + 4; PSW = *[ram]:4 EA; EA = EA + 4; _restore_upper_context(EA); } @endif define pcodeop saveCallerState; define pcodeop restoreCallerState; @if defined(TRICORE_VERBOSE) macro restore_debug_context(EA) { PCXI = *[ram]:4 EA; EA = EA + 4; PSW = *[ram]:4 EA; EA = EA + 4; a10 = *[ram]:4 EA; EA = EA + 4; a11 = *[ram]:4 EA; EA = EA + 4; } @else define pcodeop restore_debug_context; @endif @if defined(TRICORE_VERBOSE) macro load_upper_context(EA) { # dummy = *[ram]:4 EA; EA = EA + 4; # dummy = *[ram]:4 EA; EA = EA + 4; _restore_upper_context(EA); } @endif @if defined(TRICORE_VERBOSE) macro store_upper_context(EA) { *[ram]:4 EA = PCXI; EA = EA + 4; *[ram]:4 EA = PSW; EA = EA + 4; *[ram]:4 EA = a10; EA = EA + 4; *[ram]:4 EA = a11; EA = EA + 4; *[ram]:4 EA = d8; EA = EA + 4; *[ram]:4 EA = d9; EA = EA + 4; *[ram]:4 EA = d10; EA = EA + 4; *[ram]:4 EA = d11; EA = EA + 4; *[ram]:4 EA = a12; EA = EA + 4; *[ram]:4 EA = a13; EA = EA + 4; *[ram]:4 EA = a14; EA = EA + 4; *[ram]:4 EA = a15; EA = EA + 4; *[ram]:4 EA = d12; EA = EA + 4; *[ram]:4 EA = d13; EA = EA + 4; *[ram]:4 EA = d14; EA = EA + 4; *[ram]:4 EA = d15; EA = EA + 4; } @endif macro BitReverseAddressingMode(rege, rego, EA) { local index:2 = rego[0,16]; local incr:2 = rego[16,16]; EA = rege + zext(index); local rindex:2 = reverse16(index); local rincr:2 = reverse16(incr); local new_index:2 = reverse16(rindex + rincr); rego[0,16] = new_index; } macro CircularAddressingMode(rege, rego, EA0, off10) { local index:2 = rego[0,16]; local length:2 = rego[16,16]; EA0 = rege + zext(index); local new_index:2 = index + off10[0,10]; ternary(new_index, new_index s< 0, new_index + length, new_index % length); rego[0,16] = new_index; } macro CircularAddressingMode2(rege, rego, EA0, EA1, off10, circsize) { local index:2 = rego[0,16]; local length:2 = rego[16,16]; EA0 = rege + zext(index); EA1 = rege + zext((index + circsize) % length); local new_index:2 = index + off10[0,10]; ternary(new_index, new_index s< 0, new_index + length, new_index % length); rego[0,16] = new_index; } macro CircularAddressingMode4(rege, rego, EA0, EA1, EA2, EA3, off10, circsize) { local index:2 = rego[0,16]; local length:2 = rego[16,16]; EA0 = rege + zext(index); EA1 = rege + zext((index + circsize) % length); EA2 = rege + zext((index + circsize + circsize) % length); EA3 = rege + zext((index + circsize + circsize + circsize) % length); local new_index:2 = index + off10[0,10]; ternary(new_index, new_index s< 0, new_index + length, new_index % length); rego[0,16] = new_index; } #TODO Should probably just delete this and any "+i" until referencing a # T2 manual instead of DSP/compiler guide macro IndexAddressingMode(rege, rego, EA) { local index:2 = rego[0,16]; local modifier:2 = rego[16,16]; EA = rege + zext(index); rego[0,16] = index + modifier; } off10: reloc is PCPMode=0 & op1621 & sop2831 [ reloc = op1621 | (sop2831 << 6); ] { local tmp:4 = reloc; export tmp; } off16: reloc is PCPMode=0 & op1621 & sop2227 & op2831 [ reloc = op1621 | (op2831 << 6) | (sop2227 << 10); ] { local tmp:4 = reloc; export tmp; } off18: reloc is PCPMode=0 & op1215 ; op1621 & op2225 & op2831 [ reloc = (op1215 << 28) | (op2225 << 10) | (op2831 << 6) | op1621; ] { local tmp:4 = reloc; export tmp; } off24pc: reloc is PCPMode=0 & sop0815 ; op1631 [ reloc = inst_start + ((op1631 | (sop0815 << 16)) * 2); ] { export *[ram]:4 reloc; } off24abs: reloc is PCPMode=0 & op0811 & op1215 ; op1631 [ reloc = (op1631 << 1) | (op0811 << 17) | (op1215 << 28); ] { export *[ram]:4 reloc; } off0811pc4o: reloc is PCPMode=0 & op0811 [ reloc = inst_start + (0xffffffe0 | (op0811 << 1)); ] { export *[ram]:4 reloc; } off0811pc4z: reloc is PCPMode=0 & op0811 [ reloc = inst_start + (op0811 * 2); ] { export *[ram]:4 reloc; } off0815pc8s: reloc is PCPMode=0 & sop0815 [ reloc = inst_start + (sop0815 * 2); ] { export *[ram]:4 reloc; } off1630pc15s: reloc is PCPMode=0 & sop1630 [ reloc = inst_start + (sop1630 * 2); ] { export *[ram]:4 reloc; } @if defined(TRICORE_V2) off0811pc4z16: reloc is PCPMode=0 & op0811 [ reloc = inst_start + ((op0811 + 16) * 2); ] { export *[ram]:4 reloc; } @endif const0607Z: "#"^op0607 is PCPMode=0 & op0607 { local tmp:4 = op0607; export tmp; } const0810Z: "#"^op0810 is PCPMode=0 & op0810 { local tmp:4 = op0810; export tmp; } const0811Z6zz: "#"^reloc is PCPMode=0 & op0811 [ reloc = op0811 << 2; ] { local tmp:4 = reloc; export tmp; } const0811Z: "#"^op0811 is PCPMode=0 & op0811 { local tmp:4 = op0811; export tmp; } const0811Z5z: "#"^reloc is PCPMode=0 & op0811 [ reloc = op0811 << 1; ] { local tmp:4 = reloc; export tmp; } const0815Z: "#"^op0815 is PCPMode=0 & op0815 { local tmp:4 = op0815; export tmp; } const0815Z10zz: "#"^reloc is PCPMode=0 & op0815 [ reloc = op0815 << 2; ] { local tmp:4 = reloc; export tmp; } const1111Z: "#"^op1111 is PCPMode=0 & op1111 { local tmp:4 = op1111; export tmp; } const1215S: "#"^sop1215 is PCPMode=0 & sop1215 { local tmp:4 = sop1215; export tmp; } const1215Z: "#"^op1215 is PCPMode=0 & op1215 { local tmp:4 = op1215; export tmp; } const1215Z6zz: "#"^reloc is PCPMode=0 & op1215 [ reloc = op1215 << 2; ] { local tmp:4 = reloc; export tmp; } const1215Z5z: "#"^reloc is PCPMode=0 & op1215 [ reloc = op1215 << 1; ] { local tmp:4 = reloc; export tmp; } const1220S: "#"^reloc is PCPMode=0 & op1215 ; sop1620 [ reloc = (sop1620 << 4) | op1215; ] { local tmp:4 = reloc; export tmp; } const1220Z: "#"^reloc is PCPMode=0 & op1215 ; op1620 [ reloc = (op1620 << 4) | op1215; ] { local tmp:4 = reloc; export tmp; } const1227S: "#"^reloc is PCPMode=0 & op1215 ; sop1627 [ reloc = (sop1627 << 4) | op1215; ] { local tmp:4 = reloc; export tmp; } const1227Z: "#"^reloc is PCPMode=0 & op1215 ; op1627 [ reloc = (op1627 << 4) | op1215; ] { local tmp:4 = reloc; export tmp; } const1617Z: "#"^op1617 is PCPMode=0 & op1617 { local tmp:4 = op1617; export tmp; } const1620Z: "#"^op1620 is PCPMode=0 & op1620 { local tmp:4 = op1620; export tmp; } const2327Z: "#"^op2327 is PCPMode=0 & op2327 { local tmp:4 = op2327; export tmp; } Nbit: "#"^reloc is PCPMode=0 & op0707 & op1215 [ reloc = (op0707 << 4) | op1215; ] { local tmp:4 = reloc; export tmp; } #TODO circular is seems too compilcated to do this way #BO: [Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Rpe1215 & Rpo1215 & op0003=9 & op0405=2 ; off10 & op2627=1 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); export EA; } BO: [Rpe1215/Rpo1215^"+r"^] is PCPMode=0 & Rpe1215 & Rpo1215 & op0003=9 & op0405=2 ; op1621=0x0 & op2627=0 & op2831=0x0 { local EA:4; BitReverseAddressingMode(Rpe1215, Rpo1215, EA); export EA; } BO: [Rpe1215/Rpo1215^"+i"^] is PCPMode=0 & Rpe1215 & Rpo1215 & op0003=9 & op0405=2 ; op1621=0x0 & op2627=2 & op2831=0x0 { local EA:4; IndexAddressingMode(Rpe1215, Rpo1215, EA); export EA; } BO: [Ra1215]off10 is PCPMode=0 & Ra1215 & op0003=9 & op0405=0 ; off10 & op2627=2 { local EA = Ra1215 + off10; export EA; } BO: [Ra1215+]off10 is PCPMode=0 & Ra1215 & op0003=9 & op0405=0 ; off10 & op2627=0 { local EA = Ra1215; Ra1215 = Ra1215 + off10; export EA; } BO: [+Ra1215]off10 is PCPMode=0 & Ra1215 & op0003=9 & op0405=0 ; off10 & op2627=1 { Ra1215 = Ra1215 + off10; local EA = Ra1215; export EA; } BOL: [Ra1215]off16 is PCPMode=0 & Ra1215 ; off16 { local EA = Ra1215 + off16; export EA; } SSR: [Ra1215] is PCPMode=0 & Ra1215 & op0003=4 & op0405=3 { local EA = Ra1215; export EA; } SSR: [Ra1215+] is PCPMode=0 & Ra1215 & op0003=4 & op0405=2 & op0606=1 { local EA = Ra1215; Ra1215 = Ra1215 + 4; export EA; } SSR: [Ra1215+] is PCPMode=0 & Ra1215 & op0003=4 & op0405=2 & op0606=0 & op0707=1 { local EA = Ra1215; Ra1215 = Ra1215 + 2; export EA; } SSR: [Ra1215+] is PCPMode=0 & Ra1215 & op0003=4 & op0405=2 & op0606=0 & op0707=0 { local EA = Ra1215; Ra1215 = Ra1215 + 1; export EA; } SLR: [Ra1215] is PCPMode=0 & Ra1215 & op0003=4 & op0405=1 { local EA = Ra1215; export EA; } SLR: [Ra1215+] is PCPMode=0 & Ra1215 & op0003=4 & op0405=0 & op0606=1 { local EA = Ra1215; Ra1215 = Ra1215 + 4; export EA; } SLR: [Ra1215+] is PCPMode=0 & Ra1215 & op0003=4 & op0405=0 & op0606=0 & op0707=1 { local EA = Ra1215; Ra1215 = Ra1215 + 2; export EA; } SLR: [Ra1215+] is PCPMode=0 & Ra1215 & op0003=4 & op0405=0 & op0606=0 & op0707=0 { local EA = Ra1215; Ra1215 = Ra1215 + 1; export EA; } SRO: [Ra1215]const0811Z is PCPMode=0 & Ra1215 & const0811Z & op0003=0xc & op0404=0 & op0607=0 { local EA = Ra1215 + const0811Z; export EA; } SRO: [Ra1215]const0811Z5z is PCPMode=0 & Ra1215 & const0811Z5z & op0003=0xc & op0404=0 & op0607=2 { local EA = Ra1215 + const0811Z5z; export EA; } SRO: [Ra1215]const0811Z6zz is PCPMode=0 & Ra1215 & const0811Z6zz & op0003=0xc & op0404=0 & op0606=1 { local EA = Ra1215 + const0811Z6zz; export EA; } SLRO: [a15]const1215Z is PCPMode=0 & a15 & const1215Z & op0003=8 & op0405=0 & op0607=0 { local EA = a15 + const1215Z; export EA; } SLRO: [a15]const1215Z5z is PCPMode=0 & a15 & const1215Z5z & op0003=8 & op0405=0 & op0607=2 { local EA = a15 + const1215Z5z; export EA; } SLRO: [a15]const1215Z6zz is PCPMode=0 & a15 & const1215Z6zz & op0003=8 & op0405=0 & op0606=1 { local EA = a15 + const1215Z6zz; export EA; } SSRO: [a15]const1215Z is PCPMode=0 & a15 & const1215Z & op0003=8 & op0405=2 & op0607=0 { local EA = a15 + const1215Z; export EA; } SSRO: [a15]const1215Z5z is PCPMode=0 & a15 & const1215Z5z & op0003=8 & op0405=2 & op0607=2 { local EA = a15 + const1215Z5z; export EA; } SSRO: [a15]const1215Z6zz is PCPMode=0 & a15 & const1215Z6zz & op0003=8 & op0405=2 & op0606=1 { local EA = a15 + const1215Z6zz; export EA; } SC: [a10]const0815Z10zz is PCPMode=0 & a10 & const0815Z10zz & op0003=8 & op0404=1 & op0606=1 { local EA = a10 + const0815Z10zz; export EA; } # ABS D[c], D[b] (RR) :abs Rd2831,Rd1215 is PCPMode=0 & Rd1215 & op0007=0xb & op0811=0 ; Rd2831 & op1627=0x1c0 { int_abs(Rd2831, Rd1215); overflowflags(Rd2831); } # ABS.B D[c], D[b] (RR) :abs.b Rd2831,Rd1215 is PCPMode=0 & Rd1215 & op0007=0xb & op0811=0 ; Rd2831 & op1627=0x5c0 { local result3:1; int_abs1(result3, Rd1215[24,8]); local result2:1; int_abs1(result2, Rd1215[16,8]); local result1:1; int_abs1(result1, Rd1215[8,8]); local result0:1; int_abs1(result0, Rd1215[0,8]); overflowflagsb(result3, result2, result1, result0); Rd2831[24,8] = result3; Rd2831[16,8] = result2; Rd2831[8,8] = result1; Rd2831[0,8] = result0; } # ABS.H D[c], D[b] (RR) :abs.h Rd2831,Rd1215 is PCPMode=0 & Rd1215 & op0007=0xb & op0811=0 ; Rd2831 & op1627=0x7c0 { local result1:2; int_abs(result1, Rd1215[16,16]); local result0:2; int_abs(result0, Rd1215[0,16]); overflowflagsh(result1, result0); Rd2831[16,16] = result1; Rd2831[0,16] = result0; } # ABSDIF D[c], D[a], D[b] (RR) :absdif Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0xe0 { int_abs(Rd2831, Rd0811 - Rd1215); overflowflags(Rd2831); } # ABSDIF D[c], D[a], const9 (RC) :absdif Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0xe ) & const1220S { int_abs(Rd2831, Rd0811 - const1220S); overflowflags(Rd2831); } # ABSDIF.B D[c], D[a], D[b] (RR) :absdif.b Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x4e0 { local result3:1; int_abs1(result3, (Rd0811[24,8] - Rd1215[24,8])); local result2:1; int_abs1(result2, (Rd0811[16,8] - Rd1215[16,8])); local result1:1; int_abs1(result1, (Rd0811[8,8] - Rd1215[8,8])); local result0:1; int_abs1(result0, (Rd0811[0,8] - Rd1215[0,8])); overflowflagsb(result3, result2, result1, result0); Rd2831[24,8] = result3; Rd2831[16,8] = result2; Rd2831[8,8] = result1; Rd2831[0,8] = result0; } # ABSDIF.H D[c], D[a], D[b] (RR) :absdif.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x6e0 { local result1:2; int_abs(result1, (Rd0811[16,16] - Rd1215[16,16])); local result0:2; int_abs(result0, (Rd0811[0,16] - Rd1215[16,16])); overflowflagsh(result1, result0); Rd2831[16,16] = result1; Rd2831[0,16] = result0; } # ABSDIFS D[c], D[a], D[b] (RR) :absdifs Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0xf0 { local result:4; int_abs(result, (Rd0811 - Rd1215)); overflowflags(result); ssov(Rd2831, result, 32); } # ABSDIFS D[c], D[a], const9 (RC) :absdifs Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0xf ) & const1220S { local result:4; int_abs(result, (Rd0811 - const1220S)); overflowflags(result); ssov(Rd2831, result, 32); } # ABSDIFS.H D[c], D[a], D[b] (RR) :absdifs.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x6f0 { local result1:2; int_abs(result1, (Rd0811[16,16] - Rd1215[16,16])); local result0:2; int_abs(result0, (Rd0811[0,16] - Rd1215[16,16])); overflowflagsh(result1, result0); ssov(Rd2831[16,16], result1, 16); ssov(Rd2831[0,16], result0, 16); } # ABSS D[c], D[b] (RR) :abss Rd2831,Rd1215 is PCPMode=0 & Rd1215 & op0007=0xb & op0811=0 ; Rd2831 & op1627=0x1d0 { local result:4; int_abs(result, Rd1215); overflowflags(result); ssov(Rd2831, result, 32); } # ABSS.H D[c], D[b] (RR) :abss.h Rd2831,Rd1215 is PCPMode=0 & Rd1215 & op0007=0xb & op0811=0 ; Rd2831 & op1627=0x7d0 { local result1:2; int_abs(result1, Rd1215[16,16]); local result0:2; int_abs(result0, Rd1215[0,16]); overflowflagsh(result1, result0); ssov(Rd2831[16,16], result1, 16); ssov(Rd2831[0,16], result0, 16); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ADD D[a], D[15], D[b] (SRR) :add Rd0811,d15,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & d15 & op0007=0x12 { Rd0811 = d15 + Rd1215; overflowflags(Rd0811); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ADD D[a], D[15], const4 (SRC) :add Rd0811,d15,const1215S is PCPMode=0 & Rd0811 & const1215S & d15 & op0007=0x92 { Rd0811 = d15 + const1215S; overflowflags(Rd0811); } @endif # ADD D[15], D[a], D[b] (SRR) :add d15,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & d15 & op0007=0x1a { d15 = Rd0811 + Rd1215; overflowflags(d15); } # ADD D[a], D[b] (SRR) :add Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x42 { Rd0811 = Rd0811 + Rd1215; overflowflags(Rd0811); } # ADD D[15], D[a], const4 (SRC) :add d15,Rd0811,const1215S is PCPMode=0 & Rd0811 & const1215S & d15 & op0007=0x9a { d15 = Rd0811 + const1215S; overflowflags(d15); } # ADD D[a], const4 (SRC) :add Rd0811,const1215S is PCPMode=0 & Rd0811 & const1215S & op0007=0xc2 { Rd0811 = Rd0811 + const1215S; overflowflags(Rd0811); } # ADD D[c], D[a], D[b] (RR) :add Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x0 { Rd2831 = Rd0811 + Rd1215; overflowflags(Rd2831); } # ADD D[c], D[a], const9 (RC) :add Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x0 ) & const1220S { Rd2831 = Rd0811 + const1220S; overflowflags(Rd2831); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ADD.A A[a], A[b] (SRR) :add.a Ra0811,Ra1215 is PCPMode=0 & Ra0811 & Ra1215 & op0007=0x30 { Ra0811 = Ra0811 + Ra1215; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ADD.A A[a], const4 (SRC) :add.a Ra0811,const1215S is PCPMode=0 & Ra0811 & const1215S & op0007=0xb0 { Ra0811 = Ra0811 + const1215S; } @endif # ADD.A A[c], A[a], A[b] (RR) :add.a Ra2831,Ra0811,Ra1215 is PCPMode=0 & Ra0811 & Ra1215 & op0007=0x1 ; Ra2831 & op1627=0x10 { Ra2831 = Ra0811 + Ra1215; } # ADD.B D[c], D[a], D[b] (RR) :add.b Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x400 { local result3:4 = zext(Rd0811[24,8] + Rd1215[24,8]); local result2:4 = zext(Rd0811[16,8] + Rd1215[16,8]); local result1:4 = zext(Rd0811[8,8] + Rd1215[8,8]); local result0:4 = zext(Rd0811[0,8] + Rd1215[0,8]); overflowflagsb(result3, result2, result1, result0); Rd2831[24,8] = result3[0,8]; Rd2831[16,8] = result2[0,8]; Rd2831[8,8] = result1[0,8]; Rd2831[0,8] = result0[0,8]; } @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ADD.F D[c], D[d], D[a] (RRR) :add.f Rd2831,Rd2427,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x6b & op1215=0x0 ; Rd2427 & Rd2831 & op1623=0x21 { #TODO float #TODO flags Rd2831 = Rd2427 f+ Rd0811; } @endif # ADD.H D[c], D[a], D[b] (RR) :add.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x600 { local result1:4 = zext(Rd0811[16,16] + Rd1215[16,16]); local result0:4 = zext(Rd0811[0,16] + Rd1215[0,16]); overflowflagsh(result1, result0); Rd2831[16,16] = result1[0,16]; Rd2831[0,16] = result0[0,16]; } # ADDC D[c], D[a], D[b] (RR) :addc Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x50 { local tmp:5 = zext(Rd0811) + zext(Rd1215) + zext($(PSW_C)); Rd2831 = tmp[0,32]; $(PSW_C) = tmp[32,1]; overflowflags(Rd2831); } # ADDC D[c], D[a], const9 (RC) :addc Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x5 ) & const1220S { local tmp:5 = zext(Rd0811) + zext(const1220S) + zext($(PSW_C)); Rd2831 = tmp[0,32]; $(PSW_C) = tmp[32,1]; overflowflags(Rd2831); } # ADDI D[c], D[a], const16 (RLC) :addi Rd2831,Rd0811,const1227S is PCPMode=0 & ( Rd0811 & op0007=0x1b ; Rd2831 ) & const1227S { Rd2831 = Rd0811 + const1227S; overflowflags(Rd2831); } # ADDIH D[c], D[a], const16 (RLC) :addih Rd2831,Rd0811,const1227Z is PCPMode=0 & ( Rd0811 & op0007=0x9b ; Rd2831 ) & const1227Z { Rd2831 = Rd0811 + (const1227Z << 16); overflowflags(Rd2831); } # ADDIH.A A[c], A[a], const16 (RLC) :addih.a Ra2831,Ra0811,const1227Z is PCPMode=0 & ( Ra0811 & op0007=0x11 ; Ra2831 ) & const1227Z { Ra2831 = Ra0811 + (const1227Z << 16); } # ADDS D[a], D[b], (SRR) :adds Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x22 { local result:4 = Rd0811 + Rd1215; overflowflags(result); ssov(Rd0811, result, 32); } # ADDS D[c], D[a], D[b] (RR) :adds Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x20 { local result:4 = Rd0811 + Rd1215; overflowflags(result); ssov(Rd2831, result, 32); } # ADDS D[c], D[a], const9 (RC) :adds Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x2 ) & const1220S { local result:4 = Rd0811 + const1220S; overflowflags(result); ssov(Rd2831, result, 32); } # ADDS.H D[c], D[a], D[b] (RR) :adds.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x620 { local result1:4 = sext(Rd0811[16,16] + Rd1215[16,16]); local result0:4 = sext(Rd0811[0,16] + Rd1215[0,16]); overflowflagsh(result1, result0); ssov(Rd2831[16,16], result1[0,16], 16); ssov(Rd2831[0,16], result0[0,16], 16); } # ADDS.HU D[c], D[a], D[b] (RR) :adds.hu Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x630 { local result1:4 = sext(Rd0811[16,16] + Rd1215[16,16]); local result0:4 = sext(Rd0811[0,16] + Rd1215[0,16]); overflowflagsh(result1, result0); suov(Rd2831[16,16], result1[0,16], 16); suov(Rd2831[0,16], result0[0,16], 16); } # ADDS.U D[c], D[a], D[b] (RR) :adds.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x30 { local result:4 = Rd0811 + Rd1215; overflowflags(result); suov(Rd2831, result, 32); } # ADDS.U D[c], D[a], const9 (RC) :adds.u Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x3 ) & const1220S { local result:4 = Rd0811 + const1220S; overflowflags(result); suov(Rd2831, result, 32); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ADDSC.A A[a], A[b], D[15], n (SRRS) :addsc.a Ra0811,Ra1215,d15,const0607Z is PCPMode=0 & Ra0811 & Ra1215 & const0607Z & d15 & op0005=0x10 { Ra0811 = Ra1215 + (d15 << const0607Z); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ADDSC.A A[c], A[b], D[a], n (RR) :addsc.a Ra2831,Ra1215,Rd0811,const1617Z is PCPMode=0 & Ra1215 & Rd0811 & op0007=0x1 ; Ra2831 & const1617Z & op1827=0x180 { Ra2831 = Ra1215 + (Rd0811 << const1617Z); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ADDSC.AT A[c], A[b], D[a] (RR) :addsc.at Ra2831,Ra1215,Rd0811 is PCPMode=0 & Ra1215 & Rd0811 & op0007=0x1 ; Ra2831 & op1627=0x620 { Ra2831 = (Ra1215 + (Rd0811 >> 3)) & 0xFFFFFFFC; } @endif # ADDX D[c], D[a], D[b] (RR) :addx Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x40 { local result:4 = Rd0811 + Rd1215; $(PSW_C) = carry(Rd0811, Rd1215); overflowflags(result); Rd2831 = result; } # ADDX D[c], D[a], const9 (RC) :addx Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x4 ) & const1220S { local result:4 = Rd0811 + const1220S; $(PSW_C) = carry(Rd0811, const1220S); overflowflags(result); Rd2831 = result; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # AND D[a], D[b] (SRR) :and Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x26 { Rd0811 = Rd0811 & Rd1215; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # AND D[15], const8 (SC) :and d15,const0815Z is PCPMode=0 & const0815Z & d15 & op0007=0x16 { d15 = d15 & const0815Z; } @endif # AND D[c], D[a], D[b] (RR) :and Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0x80 { Rd2831 = Rd0811 & Rd1215; } # AND D[c], D[a], const9 (RC) :and Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0x8 ) & const1220Z { Rd2831 = Rd0811 & const1220Z; } # AND.AND.T D[c], D[a], pos1, D[b], pos2 (BIT) :and.and.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x47 ; Rd2831 & const1620Z & const2327Z & op2122=0x0 { local tmpa = (Rd0811 >> const1620Z) & 1; local tmpb = (Rd1215 >> const2327Z) & 1; Rd2831[0,1] = Rd2831[0,1] & (tmpa[0,1] & tmpb[0,1]); } # AND.ANDN.T D[c], D[a,] pos1, D[b], pos2 (BIT) :and.andn.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x47 ; Rd2831 & const1620Z & const2327Z & op2122=0x3 { local tmpa = (Rd0811 >> const1620Z) & 1; local tmpb = (Rd1215 >> const2327Z) & 1; Rd2831[0,1] = Rd2831[0,1] & (tmpa[0,1] & ~tmpb[0,1]); } # AND.EQ D[c], D[a], D[b] (RR) :and.eq Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x200 { Rd2831[0,1] = Rd2831[0,1] & (Rd0811 == Rd1215); } # AND.EQ D[c], D[a], const9 (RC) :and.eq Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x20 ) & const1220S { Rd2831[0,1] = Rd2831[0,1] & (Rd0811 == const1220S); } # AND.GE D[c], D[a], D[b] (RR) :and.ge Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x240 { Rd2831[0,1] = Rd2831[0,1] & (Rd0811 s>= Rd1215); } # AND.GE D[c], D[a], const9 (RC) :and.ge Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x24 ) & const1220S { Rd2831[0,1] = Rd2831[0,1] & (Rd0811 s>= const1220S); } # AND.GE.U D[c], D[a], D[b] (RR) :and.ge.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x250 { Rd2831[0,1] = Rd2831[0,1] & (Rd0811 >= Rd1215); } # AND.GE.U D[c], D[a], const9 (RC) :and.ge.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x25 ) & const1220Z { Rd2831[0,1] = Rd2831[0,1] & (Rd0811 >= const1220Z); } # AND.LT D[c], D[a], D[b] (RR) :and.lt Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x220 { Rd2831[0,1] = Rd2831[0,1] & (Rd0811 s< Rd1215); } # AND.LT D[c], D[a], const9 (RC) :and.lt Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x22 ) & const1220S { Rd2831[0,1] = Rd2831[0,1] & (Rd0811 s< const1220S); } # AND.LT.U D[c], D[a], D[b] (RR) :and.lt.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x230 { Rd2831[0,1] = Rd2831[0,1] & (Rd0811 < Rd1215); } # AND.LT.U D[c], D[a], const9 (RC) :and.lt.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x23 ) & const1220Z { Rd2831[0,1] = Rd2831[0,1] & (Rd0811 < const1220Z); } # AND.NE D[c], D[a], D[b] (RR) :and.ne Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x210 { Rd2831[0,1] = Rd2831[0,1] & (Rd0811 != Rd1215); } # AND.NE D[c], D[a], const9 (RC) :and.ne Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x21 ) & const1220S { Rd2831[0,1] = Rd2831[0,1] & (Rd0811 != const1220S); } # AND.NOR.T D[c], D[a], pos1, D[b], pos2 (BIT) :and.nor.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x47 ; Rd2831 & const1620Z & const2327Z & op2122=0x2 { local tmpa = (Rd0811 >> const1620Z) & 1; local tmpb = (Rd1215 >> const2327Z) & 1; Rd2831[0,1] = Rd2831[0,1] & !(tmpa[0,1] | tmpb[0,1]); } # AND.OR.T D[c], D[a], pos1, D[b], pos2 (BIT) :and.or.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x47 ; Rd2831 & const1620Z & const2327Z & op2122=0x1 { local tmpa = (Rd0811 >> const1620Z) & 1; local tmpb = (Rd1215 >> const2327Z) & 1; Rd2831[0,1] = Rd2831[0,1] & (tmpa[0,1] | tmpb[0,1]); } # AND.T D[c], D[a], pos1, D[b], pos2 (BIT) :and.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x87 ; Rd2831 & const1620Z & const2327Z & op2122=0x0 { local tmpa = (Rd0811 >> const1620Z) & 1; local tmpb = (Rd1215 >> const2327Z) & 1; Rd2831 = zext(tmpa[0,1] & tmpb[0,1]); } # ANDN D[c], D[a], D[b] (RR) :andn Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0xe0 { Rd2831 = Rd0811 & ~Rd1215; } # ANDN D[c], D[a], const9 (RC) :andn Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0xe ) & const1220Z { Rd2831 = Rd0811 & ~const1220Z; } # ANDN.T D[c], D[a], pos1, D[b], pos2 (BIT) :andn.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x87 ; Rd2831 & const1620Z & const2327Z & op2122=0x3 { local tmpa = (Rd0811 >> const1620Z) & 1; local tmpb = (Rd1215 >> const2327Z) & 1; Rd2831 = zext(tmpa[0,1] & !tmpb[0,1]); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # BISR const8 (SC) :bisr const0815Z is PCPMode=0 & const0815Z & op0007=0xe0 { #TODO isr # tmp_FCX = FCX; # if (FCX == 0) trap(FCU); # EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; # new_FCX = M(EA, word); # M(EA,16 * word) = {PCXI, a11, a2, a3, d0, d1, d2, d3, a4, a5, a6, a7, d4, d5, d6, d7}; # PCXI.PCPN = ICR.CCPN; # PCXI.PIE = ICR.IE; # PCXI.UL = 0; # PCXI[19:0] = FCX[19:0]; # FCX[19:0] = new_FCX[19:0]; # ICR.IE = 1; # ICR.CCPN = const8; # if (tmp_FCX == LCX) trap(FCD); } @endif # BISR const9 (RC) :bisr const1220Z is PCPMode=0 & ( op0007=0xad & op0811=0x0 ; op2131=0x0 ) & const1220Z { #TODO isr # if (FCX == 0) trap(FCU); # tmp_FCX = FCX; # EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; # new_FCX = M(EA, word); # M(EA,16 * word) = {PCXI, a11, a2, a3, d0, d1, d2, d3, a4, a5, a6, a7, d4, d5, d6, d7}; # PCXI.PCPN = ICR.CCPN; # PCXI.PIE = ICR.IE; # PCXI.UL = 0; # PCXI[19:0] = FCX[19:0]; # FCX[19:0] = new_FCX[19:0]; # ICR.IE = 1; # ICR.CCPN = const9[7:0]; # if (tmp_FCX == LCX) trap(FCD); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # BMERGE D[c], D[a], D[b] (RR) :bmerge Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Rd2831 & op1627=0x10 { @if defined(TRICORE_VERBOSE) Rd2831[31,1] = Rd0811[15,1]; Rd2831[30,1] = Rd1215[15,1]; Rd2831[29,1] = Rd0811[14,1]; Rd2831[28,1] = Rd1215[14,1]; Rd2831[27,1] = Rd0811[13,1]; Rd2831[26,1] = Rd1215[13,1]; Rd2831[25,1] = Rd0811[12,1]; Rd2831[24,1] = Rd1215[12,1]; Rd2831[23,1] = Rd0811[11,1]; Rd2831[22,1] = Rd1215[11,1]; Rd2831[21,1] = Rd0811[10,1]; Rd2831[20,1] = Rd1215[10,1]; Rd2831[19,1] = Rd0811[9,1]; Rd2831[18,1] = Rd1215[9,1]; Rd2831[17,1] = Rd0811[8,1]; Rd2831[16,1] = Rd1215[8,1]; Rd2831[15,1] = Rd0811[7,1]; Rd2831[14,1] = Rd1215[7,1]; Rd2831[13,1] = Rd0811[6,1]; Rd2831[12,1] = Rd1215[6,1]; Rd2831[11,1] = Rd0811[5,1]; Rd2831[10,1] = Rd1215[5,1]; Rd2831[9,1] = Rd0811[4,1]; Rd2831[8,1] = Rd1215[4,1]; Rd2831[7,1] = Rd0811[3,1]; Rd2831[6,1] = Rd1215[3,1]; Rd2831[5,1] = Rd0811[2,1]; Rd2831[4,1] = Rd1215[2,1]; Rd2831[3,1] = Rd0811[1,1]; Rd2831[2,1] = Rd1215[1,1]; Rd2831[1,1] = Rd0811[0,1]; Rd2831[0,1] = Rd1215[0,1]; @else bmerge(Rd2831,Rd0811,Rd1215); @endif } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # BSPLIT E[c], D[a] (RR) :bsplit Re2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x4b & op1215=0x0 ; Re2831 & op1627=0x90 { @if defined(TRICORE_VERBOSE) Re2831[48,16] = 0; Re2831[47,1] = Rd0811[31,1]; Re2831[46,1] = Rd0811[29,1]; Re2831[45,1] = Rd0811[27,1]; Re2831[44,1] = Rd0811[25,1]; Re2831[43,1] = Rd0811[23,1]; Re2831[42,1] = Rd0811[21,1]; Re2831[41,1] = Rd0811[19,1]; Re2831[40,1] = Rd0811[17,1]; Re2831[39,1] = Rd0811[15,1]; Re2831[38,1] = Rd0811[13,1]; Re2831[37,1] = Rd0811[11,1]; Re2831[36,1] = Rd0811[9,1]; Re2831[35,1] = Rd0811[7,1]; Re2831[34,1] = Rd0811[5,1]; Re2831[33,1] = Rd0811[3,1]; Re2831[32,1] = Rd0811[1,1]; Re2831[16,16] = 0; Re2831[15,1] = Rd0811[30,1]; Re2831[14,1] = Rd0811[28,1]; Re2831[13,1] = Rd0811[26,1]; Re2831[12,1] = Rd0811[24,1]; Re2831[11,1] = Rd0811[22,1]; Re2831[10,1] = Rd0811[20,1]; Re2831[9,1] = Rd0811[18,1]; Re2831[8,1] = Rd0811[16,1]; Re2831[7,1] = Rd0811[14,1]; Re2831[6,1] = Rd0811[12,1]; Re2831[5,1] = Rd0811[10,1]; Re2831[4,1] = Rd0811[8,1]; Re2831[3,1] = Rd0811[6,1]; Re2831[2,1] = Rd0811[4,1]; Re2831[1,1] = Rd0811[2,1]; Re2831[0,1] = Rd0811[0,1]; @else bsplit(Re2831,Rd0811); @endif } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # CACHEA.I A[b], off10 (BO) (Post-increment Addressing Mode) # CACHEA.I P[b] (BO) (Bit Reverse Addressing Mode) # CACHEA.I A[b], off10 (BO) (Pre-increment Addressing Mode) # CACHEA.I A[b], off10 (BO) (Base + Short Offset Addressing Mode) # CACHEA.I P[b] (BO) (Index Addressing Mode) :cachea.i BO is PCPMode=0 & ( op0607=0x2 & op0811=0x0 ; op2225=0xe ) & BO { build BO; cache_address_ivld(BO); } # CACHEA.I P[b], off10 (BO) (Circular Addressing Mode) #:cachea.i BO is PCPMode=0 & ( op0007=0xa9 & op0811=0x0 ; op2227=0x1e ) & BO :cachea.i [Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Rpe1215 & Rpo1215 & op0007=0xa9 & op0811=0x0 ; off10 & op2227=0x1e { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); cache_address_ivld(EA); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # CACHEA.W A[b], off10 (BO) (Post-increment Addressing Mode) # CACHEA.W P[b] (BO) (Bit-reverse Addressing Mode) # CACHEA.W A[b], off10 (BO) (Pre-increment Addressing Mode) # CACHEA.W A[b], off10 (BO) (Base + Short Offset Addressing Mode) # CACHEA.W P[b] (BO) (Index Addressing Mode) :cachea.w BO is PCPMode=0 & ( op0607=0x2 & op0811=0x0 ; op2225=0xc ) & BO { build BO; cache_address_wb(BO); } # CACHEA.W P[b], off10 (BO)(Circular Addressing Mode) #:cachea.w BO is PCPMode=0 & ( op0007=0xa9 & op0811=0x0 ; op2227=0x1c ) & BO :cachea.w [Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Rpe1215 & Rpo1215 & op0007=0xa9 & op0811=0x0 ; off10 & op2227=0x1c { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); cache_address_wb(EA); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # CACHEA.WI A[b], off10 (BO)(Post-increment Addressing Mode) # CACHEA.WI P[b] (BO)(Bit-reverse Addressing Mode) # CACHEA.WI A[b], off10 (BO)(Pre-increment Addressing Mode) # CACHEA.WI A[b], off10 (BO)(Base + Short Offset Addressing Mode) # CACHEA.WI P[b] (BO)(Index Addressing Mode) :cachea.wi BO is PCPMode=0 & ( op0607=0x2 & op0811=0x0 ; op2225=0xd ) & BO { build BO; cache_address_wi(BO); } # CACHEA.WI P[b], off10 (BO) (Circular Addressing Mode) #:cachea.wi BO is PCPMode=0 & ( op0007=0xa9 & op0811=0x0 ; op2227=0x1d ) & BO :cachea.wi [Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Rpe1215 & Rpo1215 & op0007=0xa9 & op0811=0x0 ; off10 & op2227=0x1d { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); cache_address_wi(EA); } @endif @if defined(TRICORE_V2) # CACHEI.I A[b], off10 (BO)(Post-increment Addressing Mode) # CACHEI.I A[b], off10 (BO)(Pre-increment Addressing Mode) # CACHEI.I A[b], off10 (BO)(Base + Short Offset Addressing Mode) :cachei.i BO is PCPMode=0 & ( op0407=0x8 & op0811=0x0 ; op2225=0xa ) & BO { build BO; cache_index_ivld(BO); } @endif @if defined(TRICORE_V2) # CACHEI.W A[b], off10 (BO)(Post-increment Addressing Mode) # CACHEI.W A[b], off10 (BO)(Pre-increment Addressing Mode) # CACHEI.W A[b], off10 (BO)(Base + Short Offset Addressing Mode) :cachei.w BO is PCPMode=0 & ( op0407=0x8 & op0811=0x0 ; op2225=0xb ) & BO { build BO; cache_index_wb(BO); } @endif @if defined(TRICORE_V2) # CACHEI.WI A[b], off10 (BO)(Post-increment Addressing Mode) # CACHEI.WI A[b], off10 (BO)(Pre-increment Addressing Mode) # CACHEI.WI A[b], off10 (BO)(Base + Short Offset Addressing Mode) :cachei.wi BO is PCPMode=0 & ( op0407=0x8 & op0811=0x0 ; op2225=0xf ) & BO { build BO; cache_index_wi(BO); } @endif # CADD D[a], D[15], const4 (SRC) :cadd Rd0811,d15,const1215S is PCPMode=0 & Rd0811 & const1215S & d15 & op0007=0x8a { local result:4; ternary(result, d15 != 0, Rd0811 + const1215S, Rd0811); overflowflags(result); Rd0811 = result; } # CADD D[c], D[d], D[a], D[b] (RRR) :cadd Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x2b ; Rd2427 & Rd2831 & op1623=0x0 { local result:4; ternary(result, Rd2427 != 0, Rd0811 + Rd1215, Rd0811); overflowflags(result); Rd2831 = result; } # CADD D[c], D[d], D[a], const9 (RCR) :cadd Rd2831,Rd2427,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0xab ; Rd2427 & Rd2831 & op2123=0x0 ) & const1220S { local result:4; ternary(result, Rd2427 != 0, Rd0811 + const1220S, Rd0811); overflowflags(result); Rd2831 = result; } # CADDN D[a], D[15], const4 (SRC) :caddn Rd0811,d15,const1215S is PCPMode=0 & Rd0811 & const1215S & d15 & op0007=0xca { local result:4; ternary(result, d15 == 0, Rd0811 + const1215S, Rd0811); overflowflags(result); Rd0811 = result; } # CADDN D[c], D[d], D[a], D[b] (RRR) :caddn Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x2b ; Rd2427 & Rd2831 & op1623=0x10 { local result:4; ternary(result, Rd2427 == 0, Rd0811 + Rd1215, Rd0811); overflowflags(result); Rd2831 = result; } # CADDN D[c], D[d], D[a], const9 (RCR) :caddn Rd2831,Rd2427,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0xab ; Rd2427 & Rd2831 & op2123=0x1 ) & const1220S { local result:4; ternary(result, Rd2427 == 0, Rd0811 + const1220S, Rd0811); overflowflags(result); Rd2831 = result; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # CALL disp8 (SB) :call off0815pc8s is PCPMode=0 & off0815pc8s & op0007=0x5c { #TODO call # if (FCX == 0) trap(FCU); # if (PSW.CDE) then if(cdc_increment()) then trap(CDO); # PSW.CDE = 1; # ret_addr = PC + 2 ; # tmp_FCX = FCX; # EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; # new_FCX = M(EA, word); # M(EA,16 * word) = {PCXI,PSW,a10,a11,d8,d9,d10,d11,a12,a13,a14,a15,d12,d13,d14,d15}; # PCXI.PCPN = ICR.CCPN; # PCXI.PIE = ICR.IE; # PCXI.UL = 1; # PCXI[19:0] = FCX[19:0]; # FCX[19:0] = new_FCX[19:0]; # PC = PC + sign_ext(2 * disp8); # A[11] = ret_addr[31:0]; # if (tmp_FCX == LCX) trap(FCD); saveCallerState(FCX, LCX, PCXI); a11 = inst_next; call off0815pc8s; } @endif # CALL disp24 (B) :call off24pc is PCPMode=0 & ( op0007=0x6d ) ... & off24pc { #TODO call # if (FCX == 0) trap(FCU); # if (PSW.CDE) then if (cdc_increment()) then trap(CDO); # PSW.CDE = 1; # ret_addr = PC + 4; # tmp_FCX = FCX; # EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; # new_FCX = M(EA, word); # M(EA,16 * word) = {PCXI,PSW,a10,a11,d8,d9,d10,d11,a12,a13,a14,a15,d12,d13,d14,d15}; # PCXI.PCPN = ICR.CCPN; # PCXI.PIE = ICR.IE; # PCXI.UL = 1; # PCXI[19:0] = FCX[19:0]; # FCX[19:0] = new_FCX[19:0]; # PC = PC + sign_ext(2 * disp24); # A[11] = ret_addr[31:0]; # if (tmp_FCX == LCX) trap(FCD); saveCallerState(FCX, LCX, PCXI); a11 = inst_next; call off24pc; } # CALLA disp24 (B) :calla off24abs is PCPMode=0 & ( op0007=0xed ) ... & off24abs { #TODO call # if (FCX == 0) trap(FCU); # if (PSW.CDE) then if (cdc_increment()) then trap(CDO); # PSW.CDE = 1; # ret_addr = PC + 4; # tmp_FCX = FCX; # EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; # new_FCX = M(EA, word); # M(EA,16 * word) = {PCXI,PSW,a10,a11,d8,d9,d10,d11,a12,a13,a14,a15,d12,d13,d14,d15}; # PCXI.PCPN = ICR.CCPN; # PCXI.PIE = ICR.IE; # PCXI.UL = 1; # PCXI[19:0] = FCX[19:0]; # FCX[19:0] = new_FCX[19:0]; # PC = {disp24[23:20], 7'b0, disp24[19:0], 1'b0}; # A[11] = ret_addr[31:0]; # if (tmp_FCX == LCX) trap(FCD); saveCallerState(FCX, LCX, PCXI); a11 = inst_next; call off24abs; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # CALLI A[a] (RR) :calli Ra0811 is PCPMode=0 & Ra0811 & op0007=0x2d & op1215=0x0 ; op1631=0x0 { #TODO call # if (FCX == 0) trap(FCU); # if (PSW.CDE) then if(cdc_increment()) then trap(CDO); # PSW.CDE = 1; # ret_addr = PC + 4; # tmp_FCX = FCX; # EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; # new_FCX = M(EA, word); # M(EA,16 * word) = {PCXI,PSW,a10,a11,d8,d9,d10,d11,a12,a13,a14,a15,d12,d13,d14,d15}; # PCXI.PCPN = ICR.CCPN; # PCXI.PIE = ICR.IE; # PCXI.UL = 1; # PCXI[19:0] = FCX[19:0]; # FCX[19:0] = new_FCX[19:0]; # PC = {A[a][31:1], 1'b0}; # A[11] = ret_addr[31:0]; # if (tmp_FCX == LCX) trap(FCD); saveCallerState(FCX, LCX, PCXI); a11 = inst_next; local tmp:4 = Ra0811 & 0xFFFFFFFE; call [tmp]; } @endif # CLO D[c], D[a] (RR) :clo Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0xf & op1215=0x0 ; Rd2831 & op1627=0x1c0 { Rd2831 = lzcount(~Rd0811); } # CLO.H D[c], D[a] (RR) :clo.h Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0xf & op1215=0x0 ; Rd2831 & op1627=0x7d0 { local tmp1:2 = Rd0811[16,16]; local tmp0:2 = Rd0811[0,16]; Rd2831[16,16] = lzcount(~tmp1); Rd2831[0,16] = lzcount(~tmp0); } # CLS D[c], D[a] (RR) :cls Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0xf & op1215=0x0 ; Rd2831 & op1627=0x1d0 { local tmp:4 = (Rd0811 ^ (Rd0811<<1))|0x1; Rd2831 = lzcount(tmp); } # CLS.H D[c], D[a] (RR) :cls.h Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0xf & op1215=0x0 ; Rd2831 & op1627=0x7e0 { local tmp1:2 = (Rd0811[16,16] ^ (Rd0811[16,16]<<1))|0x1; local tmp0:2 = (Rd0811[0,16] ^ (Rd0811[0,16]<<1))|0x1; Rd2831[16,16] = lzcount(tmp1); Rd2831[0,16] = lzcount(tmp0); } # CLZ D[c], D[a] (RR) :clz Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0xf & op1215=0x0 ; Rd2831 & op1627=0x1b0 { Rd2831 = lzcount(Rd0811); } # CLZ.H D[c], D[a] (RR) :clz.h Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0xf & op1215=0x0 ; Rd2831 & op1627=0x7c0 { local result:4 = (lzcount(Rd0811[16,16]) << 16) | lzcount(Rd0811[0,16]); Rd2831 = result; } # CMOV D[a], D[15], D[b] (SRR) :cmov Rd0811,d15,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & d15 & op0007=0x2a { ternary(Rd0811, d15 != 0, Rd1215, Rd0811); } # CMOV D[a], D[15], const4 (SRC) :cmov Rd0811,d15,const1215S is PCPMode=0 & Rd0811 & const1215S & d15 & op0007=0xaa { ternary(Rd0811, d15 != 0, const1215S, Rd0811); } # CMOVN D[a], D[15], D[b] (SRR) :cmovn Rd0811,d15,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & d15 & op0007=0x6a { ternary(Rd0811, d15 == 0, Rd1215, Rd0811); } # CMOVN D[a], D[15], const4 (SRC) :cmovn Rd0811,d15,const1215S is PCPMode=0 & Rd0811 & const1215S & d15 & op0007=0xea { ternary(Rd0811, d15 == 0, const1215S, Rd0811); } @if defined(TRICORE_V2) # CMPSWAP.W A[b], off10, E[a] (BO)(Base + Short Offset Addressing Mode) # CMPSWAP.W P[b], E[a] (BO)(Bit-reverse Addressing Mode) # CMPSWAP.W A[b], off10, E[a] (BO)(Post-increment Addressing Mode) # CMPSWAP.W A[b], off10, E[a] (BO)(Pre-increment Addressing Mode) # CMPSWAP.W P[b], E[a] (BO)(Index Addressing Mode) :cmpswap.w BO,Ree0811/Reo0811 is PCPMode=0 & ( Ree0811 & Reo0811 & op0607=0x1 ; op2225=0x3 ) & BO { build BO; local tmp:4 = *[ram]:4 BO; Ree0811 = tmp; ternary(tmp, tmp == Reo0811, Ree0811, tmp); *[ram]:4 BO = tmp; } @endif @if defined(TRICORE_V2) # CMPSWAP.W P[b], off10, E[a] (BO)(Circular Addressing Mode) #:cmpswap.w BO,Ree0811/Reo0811 is PCPMode=0 & ( Ree0811 & Reo0811 & op0007=0x69 ; op2227=0x13 ) & BO :cmpswap.w [Rpe1215/Rpo1215^"+c"^]off10,Ree0811/Reo0811 is PCPMode=0 & Ree0811 & Reo0811 & Rpe1215 & Rpo1215 & op0007=0x69 ; off10 & op2227=0x13 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); local tmp:4 = *[ram]:4 EA; Ree0811 = tmp; ternary(tmp, tmp == Reo0811, Ree0811, tmp); *[ram]:4 EA = tmp; } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # CMP.F D[c], D[a], D[b] (RR) :cmp.f Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Rd2831 & op1627=0x1 { #TODO float #TODO flags local tmpDa = Rd0811; local tmpDb = Rd1215; Rd2831 = 0; Rd2831[0,1] = tmpDa f< tmpDb; Rd2831[1,1] = tmpDa f== tmpDb; Rd2831[2,1] = tmpDa f> tmpDb; Rd2831[3,1] = nan(tmpDa) || nan(tmpDb); Rd2831[4,1] = tmpDa[23,8] == 0 && tmpDa[0,23] != 0; Rd2831[5,1] = tmpDb[23,8] == 0 && tmpDb[0,23] != 0; } @endif :cop op2027[op1617],Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; op1617 & op2027 & Rd2831 & op1819=0x0 { # Rd2831 = op2027[op1617](Rd0811,Rd1215); local op2:4 = op2027; local proc:4 = op1617; Rd2831 = coprocessor(op2, proc, Rd0811, Rd1215); } :cop op2023[op1617],Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x6b ; op1617 & op2023 & Rd2427 & Rd2831 & op1819=0x0 { # Rd2831 = op2023[op1617](Rd2427,Rd0811,Rd1215); local op2:4 = op2023; local proc:4 = op1617; Rd2831 = coprocessor(op2, proc, Rd2427, Rd0811, Rd1215); } @if defined(TRICORE_V2) # CRC32 D[c], D[b], D[a] (RR) :crc32 Rd2831,Rd1215,Rd0811 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Rd2831 & op1627=0x30 { #TODO crc of Rd0811 and the inverse of Rd1215 into Rd2831 # crc is crc32, initial value of Rd1215 should be zero Rd2831 = crc32(Rd1215, Rd0811); } @endif # CSUB D[c], D[d], D[a], D[b] (RRR) :csub Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x2b ; Rd2427 & Rd2831 & op1623=0x20 { local result:4; ternary(result, Rd2427 != 0, Rd0811 - Rd1215, Rd0811); overflowflags(result); Rd2831 = result; } # CSUBN D[c], D[d], D[a], D[b] (RRR) :csubn Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x2b ; Rd2427 & Rd2831 & op1623=0x30 { local result:4; ternary(result, Rd2427 != 0, Rd0811, Rd1215); overflowflags(result); Rd2831 = result; } # DEBUG (SR) :debug is PCPMode=0 & op0007=0x0 & op0815=0xa0 { if ($(DBGSR_DE) == 0) goto inst_next; debug(); } # DEBUG (SYS) :debug is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x100 { if ($(DBGSR_DE) == 0) goto inst_next; debug(); } # DEXTR D[c], D[a], D[b], pos (RRPW) :dextr Rd2831,Rd0811,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x77 ; Rd2831 & const2327Z & op1622=0x0 { local shift = const2327Z; Rd2831 = (Rd0811 << shift) | (Rd1215 >> (32 - shift)); } # DEXTR D[c], D[a], D[b], D[d] (RRRR) :dextr Rd2831,Rd0811,Rd1215,Rd2427 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x17 ; Rd2427 & Rd2831 & op1623=0x80 { local shift = Rd2427[0,5]; Rd2831 = (Rd0811 << shift) | (Rd1215 >> (32 - shift)); } # DISABLE (SYS) :disable is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x340 { $(ICR_IE) = 0; } @if defined(TRICORE_V2) # DISABLE D[a] (SYS) :disable Rd0811 is PCPMode=0 & Rd0811 & op0007=0xd & op1215=0x0 ; op1631=0x3c0 { Rd0811 = zext($(ICR_IE)); $(ICR_IE) = 0; } @endif # DIV E[c], D[a], D[b] (RR) :div Ree2831/Reo2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Ree2831 & Reo2831 & op1627=0x201 { #TODO divide #TODO flags local divres = Rd0811 s/ Rd1215; local divmod = Rd0811 s% Rd1215; Ree2831 = divres; Reo2831 = divmod; } # DIV.U E[c], D[a], D[b] (RR) :div.u Ree2831/Reo2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Ree2831 & Reo2831 & op1627=0x211 { #TODO divide #TODO flags local divres = Rd0811 / Rd1215; local divmod = Rd0811 % Rd1215; Ree2831 = divres; Reo2831 = divmod; } @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # DIV.F D[c], D[a], D[b] (RR) :div.f Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Rd2831 & op1627=0x51 { #TODO float #TODO divide #TODO flags Rd2831 = Rd0811 f/ Rd1215; } @endif # DSYNC (SYS) :dsync is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x480 { dsync(); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # DVADJ E[c], E[d], D[b] (RRR) :dvadj Ree2831/Reo2831,Ree2427/Reo2427,Rd1215 is PCPMode=0 & Rd1215 & op0007=0x6b & op0811=0x0 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & op1623=0xd0 { #TODO divide sequence local q_sign = Reo2427[31,1] ^ Rd1215[31,1]; local x_sign = Reo2427[31,1]; local eq_pos = x_sign & (Reo2427 == Rd1215); local eq_neg = x_sign & (Reo2427 == -Rd1215); local quotient:4; ternary(quotient, ((q_sign & ~eq_neg) | eq_pos), Ree2427 + 1, Ree2427); local remainder:4; ternary(remainder, (eq_pos | eq_neg), 0, Reo2427); local absReo2427:4; int_abs(absReo2427, Reo2427); local absRd1215:4; int_abs(absRd1215, Rd1215); local _gt = absReo2427 > absRd1215; local _eq = !x_sign && (absReo2427 == absRd1215); ternary(Reo2831, (_eq | _gt) != 0, 0, remainder); ternary(Ree2831, (_eq | _gt) != 0, 0, quotient); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # DVINIT E[c], D[a], D[b] (RR) :dvinit Ree2831/Reo2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Ree2831 & Reo2831 & op1627=0x1a0 { #TODO divide sequence local dividend:4 = Rd0811; local divisor:4 = Rd1215; Ree2831 = dividend; Reo2831 = 0xFFFFFFFF * zext(dividend[31,1]); $(PSW_V) = ((divisor == 0) || ((divisor == 0xFFFFFFFF) && (dividend == 0x80000000))); $(PSW_SV) = $(PSW_V) | $(PSW_SV); $(PSW_AV) = 0; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # DVINIT.B E[c], D[a], D[b] (RR) :dvinit.b Ree2831/Reo2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Ree2831 & Reo2831 & op1627=0x5a0 { #TODO divide sequence local dividend:4 = Rd0811; local divisor:4 = Rd1215; local quotient_sign = !(dividend[31,1] == divisor[31,1]); Ree2831 = (dividend << 24) | (0xFFFFFF * zext(quotient_sign)); Reo2831 = dividend s>> 8; $(PSW_V) = ((divisor == 0) || ((divisor == 0xFFFFFFFF) && (dividend == 0xFFFFFF80))); $(PSW_SV) = $(PSW_V) | $(PSW_SV); $(PSW_AV) = 0; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # DVINIT.BU E[c], D[a], D[b] (RR) :dvinit.bu Ree2831/Reo2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Ree2831 & Reo2831 & op1627=0x4a0 { #TODO divide sequence local dividend:4 = Rd0811; # D[a] local divisor:4 = Rd1215; # D[b] Ree2831 = dividend << 24; Reo2831 = dividend >> 8; $(PSW_V) = (divisor == 0); $(PSW_SV) = $(PSW_V) | $(PSW_SV); $(PSW_AV) = 0; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # DVINIT.H E[c], D[a], D[b] (RR) :dvinit.h Ree2831/Reo2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Ree2831 & Reo2831 & op1627=0x3a0 { #TODO divide sequence local dividend:4 = Rd0811; # D[a] local divisor:4 = Rd1215; # D[b] local quotient_sign = !(dividend[31,1] == divisor[31,1]); Ree2831 = (dividend << 16) | (zext(quotient_sign) * 0xFFFF); Reo2831 = dividend s>> 16; $(PSW_V) = ((divisor == 0) || ((divisor == 0xFFFFFFFF) && (dividend == 0xFFFF8000))); $(PSW_SV) = $(PSW_V) | $(PSW_SV); $(PSW_AV) = 0; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # DVINIT.HU E[c], D[a], D[b] (RR) :dvinit.hu Ree2831/Reo2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Ree2831 & Reo2831 & op1627=0x2a0 { #TODO divide sequence local dividend:4 = Rd0811; # D[a] local divisor:4 = Rd1215; # D[b] Ree2831 = dividend << 16; Reo2831 = dividend >> 16; $(PSW_V) = (divisor == 0); $(PSW_SV) = $(PSW_V) | $(PSW_SV); $(PSW_AV) = 0; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # DVINIT.U E[c], D[a], D[b] (RR) :dvinit.u Ree2831/Reo2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Ree2831 & Reo2831 & op1627=0xa0 { #TODO divide sequence local dividend:4 = Rd0811; # D[a] local divisor:4 = Rd1215; # D[b] Ree2831 = dividend; Reo2831 = 0; $(PSW_V) = (divisor == 0); $(PSW_SV) = $(PSW_V) | $(PSW_SV); $(PSW_AV) = 0; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # DVSTEP E[c], E[d], D[b] (RRR) :dvstep Ree2831/Reo2831,Ree2427/Reo2427,Rd1215 is PCPMode=0 & Rd1215 & op0007=0x6b & op0811=0x0 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & op1623=0xf0 { #TODO divide sequence local dividend_sign = Reo2427[31,1] == 1; local divisor_sign = Rd1215[31,1] == 1; local quotient_sign = dividend_sign != divisor_sign; local addend:4; ternary(addend, quotient_sign != 0, Rd1215, 0 - Rd1215); local dividend_quotient:4 = Ree2427; local remainder:4 = Reo2427; local temp:4 = 0; local index:1 = 0; remainder = (remainder << 1) | zext(dividend_quotient[31,1]); dividend_quotient = dividend_quotient << 1; temp = remainder + addend; ternary(remainder, (temp s< 0) == dividend_sign, temp, remainder); ternary(temp, (temp s< 0) == dividend_sign, zext(!quotient_sign), zext(quotient_sign)); dividend_quotient = dividend_quotient | temp; index = index + 1; if (index < 8) goto ; Reo2831 = remainder; Ree2831 = dividend_quotient; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # DVSTEP.U E[c], E[d], D[b] (RRR) :dvstep.u Ree2831/Reo2831,Ree2427/Reo2427,Rd1215 is PCPMode=0 & Rd1215 & op0007=0x6b & op0811=0x0 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & op1623=0xe0 { #TODO divide sequence local divisor = Rd1215; local dividend_quotient = Ree2427; local remainder = Reo2427; local temp:4 = 0; local index:1 = 0; remainder = (remainder << 1) | zext(dividend_quotient[31,1]); dividend_quotient = dividend_quotient << 1; temp = remainder - divisor; ternary(remainder, temp s< 0, remainder, temp); dividend_quotient = dividend_quotient | zext(!(temp s< 0)); index = index + 1; if (index < 8) goto ; Reo2427 = remainder; Ree2427 = dividend_quotient; } @endif # ENABLE (SYS) :enable is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x300 { $(ICR_IE) = 1; } # EQ D[15], D[a], D[b] (SRR) :eq d15,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & d15 & op0007=0x3a { d15 = zext(Rd0811 == Rd1215); } # EQ D[15], D[a], const4 (SRC) :eq d15,Rd0811,const1215S is PCPMode=0 & Rd0811 & const1215S & d15 & op0007=0xba { d15 = zext(Rd0811 == const1215S); } # EQ D[c], D[a], D[b] (RR) :eq Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x100 { Rd2831 = zext(Rd0811 == Rd1215); } # EQ D[c], D[a], const9 (RC) :eq Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x10 ) & const1220S { Rd2831 = zext(Rd0811 == const1220S); } # EQ.A D[c], A[a], A[b] (RR) :eq.a Rd2831,Ra0811,Ra1215 is PCPMode=0 & Ra0811 & Ra1215 & op0007=0x1 ; Rd2831 & op1627=0x400 { Rd2831 = zext(Ra0811 == Ra1215); } # EQ.B D[c], D[a], D[b] (RR) :eq.b Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x500 { local result3:4 = 0xff * zext(Rd0811[24,8] == Rd1215[24,8]); local result2:4 = 0xff * zext(Rd0811[16,8] == Rd1215[16,8]); local result1:4 = 0xff * zext(Rd0811[8,8] == Rd1215[8,8]); local result0:4 = 0xff * zext(Rd0811[0,8] == Rd1215[0,8]); Rd2831 = (result3 << 24) | (result2 << 16) | (result1 << 8) | (result0); } # EQ.H D[c], D[a], D[b] (RR) :eq.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x700 { local result1:4 = 0xffff * zext(Rd0811[16,16] == Rd1215[16,16]); local result0:4 = 0xffff * zext(Rd0811[0,16] == Rd1215[0,16]); Rd2831 = (result1 << 16) | (result0); } # EQ.W D[c], D[a], D[b] (RR) :eq.w Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x900 { Rd2831 = 0xffffffff * zext(Rd0811 == Rd1215); } # EQANY.B D[c], D[a], D[b] (RR) :eqany.b Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x560 { local result3 = Rd0811[24,8] == Rd1215[24,8]; local result2 = Rd0811[16,8] == Rd1215[16,8]; local result1 = Rd0811[8,8] == Rd1215[8,8]; local result0 = Rd0811[0,8] == Rd1215[0,8]; Rd2831 = zext(result3 | result2 | result1 | result0); } # EQANY.B D[c], D[a], const9 (RC) :eqany.b Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x56 ) & const1220S { local result3 = Rd0811[24,8] == const1220S[24,8]; local result2 = Rd0811[16,8] == const1220S[16,8]; local result1 = Rd0811[8,8] == const1220S[8,8]; local result0 = Rd0811[0,8] == const1220S[0,8]; Rd2831 = zext(result3 | result2 | result1 | result0); } # EQANY.H D[c], D[a], D[b] (RR) :eqany.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x760 { local result1 = Rd0811[16,16] == Rd1215[16,16]; local result0 = Rd0811[0,16] == Rd1215[0,16]; Rd2831 = zext(result1 | result0); } # EQANY.H D[c], D[a], const9 (RC) :eqany.h Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x76 ) & const1220S { local result1 = Rd0811[16,16] == const1220S[16,16]; local result0 = Rd0811[0,16] == const1220S[0,16]; Rd2831 = zext(result1 | result0); } # EQZ.A D[c], A[a] (RR) :eqz.a Rd2831,Ra0811 is PCPMode=0 & Ra0811 & op0007=0x1 & op1215=0x0 ; Rd2831 & op1627=0x480 { Rd2831 = zext(Ra0811 == 0); } # EXTR D[c], D[a], E[d] (RRRR) :extr Rd2831,Rd0811,Ree2427/Reo2427 is PCPMode=0 & Rd0811 & op0007=0x17 & op1215=0x0 ; Rd2831 & Ree2427 & Reo2427 & op1623=0x40 { local shift:4 = zext(Ree2427[0,5]); local tmp:4 = (Rd0811 << (32 - shift - zext(Reo2427[0,5]))) s>> (32 - zext(Reo2427[0,5])); Rd2831 = tmp; } # EXTR D[c], D[a], pos, width (RRPW) :extr Rd2831,Rd0811,const2327Z,const1620Z is PCPMode=0 & Rd0811 & op0007=0x37 & op1215=0x0 ; Rd2831 & const1620Z & const2327Z & op2122=0x2 { local shift:4 = const2327Z; local tmp:4 = (Rd0811 << (32 - shift - const1620Z)) s>> (32 - const1620Z); Rd2831 = tmp; } # EXTR D[c], D[a], D[d], width (RRRW) :extr Rd2831,Rd0811,Rd2427,const1620Z is PCPMode=0 & Rd0811 & op0007=0x57 & op1215=0x0 ; Rd2427 & Rd2831 & const1620Z & op2123=0x2 { local shift:4 = zext(Rd2427[0,5]); local tmp:4 = (Rd0811 << (32 - shift - const1620Z)) s>> (32 - const1620Z); Rd2831 = tmp; } # EXTR.U D[c], D[a], E[d] (RRRR) :extr.u Rd2831,Rd0811,Ree2427/Reo2427 is PCPMode=0 & Rd0811 & op0007=0x17 & op1215=0x0 ; Rd2831 & Ree2427 & Reo2427 & op1623=0x60 { local tmp:4 = Rd0811 >> Ree2427[0,5]; local mask:4 = (1 << Reo2427[0,5]) - 1; Rd2831 = tmp & mask; } # EXTR.U D[c], D[a], pos, width (RRPW) :extr.u Rd2831,Rd0811,const2327Z,const1620Z is PCPMode=0 & Rd0811 & op0007=0x37 & op1215=0x0 ; Rd2831 & const1620Z & const2327Z & op2122=0x3 { local tmp:4 = Rd0811 >> const2327Z; local mask:4 = (1 << const1620Z) - 1; Rd2831 = tmp & mask; } # EXTR.U D[c], D[a], D[d], width (RRRW) :extr.u Rd2831,Rd0811,Rd2427,const1620Z is PCPMode=0 & Rd0811 & op0007=0x57 & op1215=0x0 ; Rd2427 & Rd2831 & const1620Z & op2123=0x3 { local tmp:4 = Rd0811 >> Rd2427[0,5]; local mask:4 = (1 << const1620Z) - 1; Rd2831 = tmp & mask; } @if defined(TRICORE_V2) # FCALL disp24 (B) :fcall off24pc is PCPMode=0 & ( op0007=0x61 ) ... & off24pc { #TODO call a10 = a10 - 4; *[ram]:4 a10 = a11; a11 = inst_next; call off24pc; } @endif @if defined(TRICORE_V2) # CALLA disp24 (B) :fcalla off24abs is PCPMode=0 & ( op0007=0xe1 ) ... & off24abs { #TODO call a10 = a10 - 4; *[ram]:4 a10 = a11; a11 = inst_next; call off24abs; } @endif @if defined(TRICORE_V2) # FCALLI A[a] (RR) :fcalli Ra0811 is PCPMode=0 & Ra0811 & op0007=0x2d & op1215=0x0 ; op1631=0x10 { #TODO call a10 = a10 - 4; *[ram]:4 a10 = a11; a11 = inst_next; local tmp:4 = Ra0811 & 0xFFFFFFFE; call [tmp]; } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # FRET (SR) :fret is PCPMode=0 & op0007=0x0 & op0811=0x0 & op1215=0x7 { local tmp:4 = a11 & 0xFFFFFFFE; a11 = *[ram]:4 a10; a10 = a10 + 4; return [tmp]; } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # FRET (SYS) :fret is PCPMode=0 & op0007=0x0d & op0815=0x0 ; op1621=0x0 & op2227=0x3 & op2831=0x0 { local tmp:4 = a11 & 0xFFFFFFFE; a11 = *[ram]:4 a10; a10 = a10 + 4; return [tmp]; } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # FTOI D[c], D[a] (RR) :ftoi Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x4b & op1215=0x0 ; Rd2831 & op1627=0x101 { #TODO float #TODO flags Rd2831 = trunc(Rd0811); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # FTOIZ D[c], D[a] (RR) :ftoiz Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x4b & op1215=0x0 ; Rd2831 & op1627=0x131 { #TODO float #TODO flags #TODO round Rd2831 = floor(trunc(Rd0811)); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # FTOQ31 D[c], D[a], D[b] (RR) :ftoq31 Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Rd2831 & op1627=0x111 { #TODO float #TODO flags } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # FTOQ31Z D[c], D[a], D[b] (RR) :ftoq31z Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Rd2831 & op1627=0x181 { #TODO float #TODO flags #TODO round } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # FTOU D[c], D[a] (RR) :ftou Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x4b & op1215=0x0 ; Rd2831 & op1627=0x121 { #TODO float #TODO flags #TODO unsigned } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # FTOUZ D[c], D[a] (RR) :ftouz Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x4b & op1215=0x0 ; Rd2831 & op1627=0x171 { #TODO float #TODO flags #TODO unsigned #TODO round } @endif # GE D[c], D[a], D[b] (RR) :ge Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x140 { Rd2831 = zext(Rd0811 s>= Rd1215); } # GE D[c], D[a], const9 (RC) :ge Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x14 ) & const1220S { Rd2831 = zext(Rd0811 s>= const1220S); } # GE.A D[c], A[a], A[b] (RR) :ge.a Rd2831,Ra0811,Ra1215 is PCPMode=0 & Ra0811 & Ra1215 & op0007=0x1 ; Rd2831 & op1627=0x430 { Rd2831 = zext(Ra0811 >= Ra1215); } # GE.U D[c], D[a], D[b] (RR) :ge.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x150 { Rd2831 = zext(Rd0811 >= Rd1215); } # GE.U D[c], D[a], const9 (RC) :ge.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x15 ) & const1220Z { Rd2831 = zext(Rd0811 >= const1220Z); } # IMASK E[c], D[b], pos, width (RRPW) :imask Ree2831/Reo2831,Rd1215,const2327Z,const1620Z is PCPMode=0 & Rd1215 & op0007=0x37 & op0811=0x0 ; Ree2831 & Reo2831 & const1620Z & const2327Z & op2122=0x1 { local tmp:4 = 1; local tmp2:4 = Rd1215; tmp = (tmp << const1620Z) - 1; Reo2831 = tmp << const2327Z; Ree2831 = tmp2 << const2327Z; } # IMASK E[c], D[b], D[d], width (RRRW) :imask Ree2831/Reo2831,Rd1215,Rd2427,const1620Z is PCPMode=0 & Rd1215 & op0007=0x57 & op0811=0x0 ; Rd2427 & Ree2831 & Reo2831 & const1620Z & op2123=0x1 { local tmp:4 = 1; local tmp2:4 = Rd1215; local tmp3:4 = Rd2427; tmp = (tmp << const1620Z) - 1; Reo2831 = tmp << tmp3[0,5]; Ree2831 = tmp2 << tmp3[0,5]; } # IMASK E[c], const4, pos, width (RCPW) :imask Ree2831/Reo2831,const1215Z,const2327Z,const1620Z is PCPMode=0 & const1215Z & op0007=0xb7 & op0811=0x0 ; Ree2831 & Reo2831 & const1620Z & const2327Z & op2122=0x1 { local tmp:4 = 1; tmp = (tmp << const1620Z) - 1; Reo2831 = tmp << const2327Z; Ree2831 = const1215Z << const2327Z; } # IMASK E[c], const4, D[d], width (RCRW) :imask Ree2831/Reo2831,const1215Z,Rd2427,const1620Z is PCPMode=0 & const1215Z & op0007=0xd7 & op0811=0x0 ; Rd2427 & Ree2831 & Reo2831 & const1620Z & op2123=0x1 { local tmp:4 = 1; local tmp2:4 = Rd2427; tmp = (tmp << const1620Z) - 1; Reo2831 = tmp << tmp2[0,5]; Ree2831 = const1215Z << tmp2[0,5]; } # INS.T D[c], D[a], pos1, D[b], pos2 (BIT) :ins.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x67 ; Rd2831 & const1620Z & const2327Z & op2122=0x0 { local tmp:4 = Rd0811 & ~(1 << const1620Z); Rd2831 = tmp | (((Rd1215 >> const2327Z) & 1) << const1620Z); } # INSERT D[c], D[a], D[b], E[d] (RRRR) :insert Rd2831,Rd0811,Rd1215,Ree2427/Reo2427 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x17 ; Rd2831 & Ree2427 & Reo2427 & op1623=0x0 { local tmp:4 = 1; tmp = (tmp << Reo2427) - 1; tmp = tmp << Ree2427[0,5]; Rd2831 = (Rd0811 & ~tmp) | ((Rd1215 << Ree2427[0,5]) & tmp); } # INSERT D[c], D[a], D[b], pos, width (RRPW) :insert Rd2831,Rd0811,Rd1215,const2327Z,const1620Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x37 ; Rd2831 & const1620Z & const2327Z & op2122=0x0 { local tmp:4 = 1; tmp = (tmp << const1620Z) - 1; tmp = tmp << const2327Z; Rd2831 = (Rd0811 & ~tmp) | ((Rd1215 << const2327Z) & tmp); } # INSERT D[c], D[a], D[b], D[d], width (RRRW) :insert Rd2831,Rd0811,Rd1215,Rd2427,const1620Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x57 ; Rd2427 & Rd2831 & const1620Z & op2123=0x0 { local tmp:4 = 1; tmp = (tmp << const1620Z) - 1; tmp = tmp << Rd2427[0,5]; Rd2831 = (Rd0811 & ~tmp) | ((Rd1215 << Rd2427[0,5]) & tmp); } # INSERT D[c], D[a], const4, E[d] (RCRR) :insert Rd2831,Rd0811,const1215Z,Ree2427/Reo2427 is PCPMode=0 & Rd0811 & const1215Z & op0007=0x97 ; Rd2831 & Ree2427 & Reo2427 & op1623=0x0 { local tmp:4 = 1; tmp = (tmp << Reo2427) - 1; tmp = tmp << Ree2427[0,5]; Rd2831 = (Rd0811 & ~tmp) | ((const1215Z << Ree2427[0,5]) & tmp); } # INSERT D[c], D[a], const4, pos, width (RCPW) :insert Rd2831,Rd0811,const1215Z,const2327Z,const1620Z is PCPMode=0 & Rd0811 & const1215Z & op0007=0xb7 ; Rd2831 & const1620Z & const2327Z & op2122=0x0 { local tmp:4 = 1; tmp = (tmp << const1620Z) - 1; tmp = tmp << const2327Z; Rd2831 = (Rd0811 & ~tmp) | ((const1215Z << const2327Z) & tmp); } # INSERT D[c], D[a], const4, D[d], width (RCRW) :insert Rd2831,Rd0811,const1215Z,Rd2427,const1620Z is PCPMode=0 & Rd0811 & const1215Z & op0007=0xd7 ; Rd2427 & Rd2831 & const1620Z & op2123=0x0 { local tmp:4 = 1; tmp = (tmp << const1620Z) - 1; tmp = tmp << Rd2427[0,5]; Rd2831 = (Rd0811 & ~tmp) | ((const1215Z << Rd2427[0,5]) & tmp); } # INSN.T D[c], D[a], pos1, D[b], pos2 (BIT) :insn.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x67 ; Rd2831 & const1620Z & const2327Z & op2122=0x1 { local tmp:4 = Rd0811 & ~(1 << const1620Z); Rd2831 = tmp | (((~Rd1215 >> const2327Z) & 1) << const1620Z); } # ISYNC (SYS) :isync is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x4c0 { isync(); } @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ITOF D[c], D[a] (RR) :itof Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x4b & op1215=0x0 ; Rd2831 & op1627=0x141 { #TODO float #TODO flags Rd2831 = int2float(Rd0811); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # IXMAX E[c], E[d], D[b] (RRR) :ixmax Re2831,Re2427,Rd1215 is PCPMode=0 & Rd1215 & op0007=0x6b & op0811=0x0 ; Re2427 & Re2831 & op1623=0xa0 { local tmp:8 = Re2427; local tmp2:4 = Rd1215; Re2831[0,16] = tmp[0,16] + 2; Re2831[48,16] = 0; local cond1 = (tmp2[0,16] s>= tmp2[16,16]) && (tmp2[0,16] s> tmp[32,16]); if (cond1) goto <_first>; local cond2 = (tmp2[16,16] s> tmp2[0,16]) && (tmp2[16,16] s> tmp[32,16]); if (cond2) goto <_second>; Re2831[32,16] = tmp[32,16]; Re2831[16,16] = tmp[16,16]; goto inst_next; <_first> Re2831[32,16] = tmp2[0,16]; Re2831[16,16] = tmp[0,16]; goto inst_next; <_second> Re2831[32,16] = tmp2[16,16]; Re2831[16,16] = tmp[0,16] + 1; } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # IXMAX.U E[c], E[d], D[b] (RRR) :ixmax.u Re2831,Re2427,Rd1215 is PCPMode=0 & Rd1215 & op0007=0x6b & op0811=0x0 ; Re2427 & Re2831 & op1623=0xb0 { local tmp:8 = Re2427; local tmp2:4 = Rd1215; Re2831[0,16] = tmp[0,16] + 2; Re2831[48,16] = 0; local cond1 = (tmp2[0,16] >= tmp2[16,16]) && (tmp2[0,16] > tmp[32,16]); if (cond1) goto <_first>; local cond2 = (tmp2[16,16] > tmp2[0,16]) && (tmp2[16,16] > tmp[32,16]); if (cond2) goto <_second>; Re2831[32,16] = tmp[32,16]; Re2831[16,16] = tmp[16,16]; goto inst_next; <_first> Re2831[32,16] = tmp2[0,16]; Re2831[16,16] = tmp[0,16]; goto inst_next; <_second> Re2831[32,16] = tmp2[16,16]; Re2831[16,16] = tmp[0,16] + 1; } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # IXMIN E[c], E[d], D[b] (RRR) :ixmin Re2831,Re2427,Rd1215 is PCPMode=0 & Rd1215 & op0007=0x6b & op0811=0x0 ; Re2427 & Re2831 & op1623=0x80 { local tmp:8 = Re2427; local tmp2:4 = Rd1215; Re2831[0,16] = tmp[0,16] + 2; Re2831[48,16] = 0; local cond1 = (tmp2[0,16] s<= tmp2[16,16]) && (tmp2[0,16] s< tmp[32,16]); if (cond1) goto <_first>; local cond2 = (tmp2[16,16] s< tmp2[0,16]) && (tmp2[16,16] s< tmp[32,16]); if (cond2) goto <_second>; Re2831[32,16] = tmp[32,16]; Re2831[16,16] = tmp[16,16]; goto inst_next; <_first> Re2831[32,16] = tmp2[0,16]; Re2831[16,16] = tmp[0,16]; goto inst_next; <_second> Re2831[32,16] = tmp2[16,16]; Re2831[16,16] = tmp[0,16] + 1; } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # IXMIN.U E[c], E[d], D[b] (RRR) :ixmin.u Re2831,Re2427,Rd1215 is PCPMode=0 & Rd1215 & op0007=0x6b & op0811=0x0 ; Re2427 & Re2831 & op1623=0x90 { local tmp:8 = Re2427; local tmp2:4 = Rd1215; Re2831[0,16] = tmp[0,16] + 2; Re2831[48,16] = 0; local cond1 = (tmp2[0,16] <= tmp2[16,16]) && (tmp2[0,16] < tmp[32,16]); if (cond1) goto <_first>; local cond2 = (tmp2[16,16] < tmp2[0,16]) && (tmp2[16,16] < tmp[32,16]); if (cond2) goto <_second>; Re2831[32,16] = tmp[32,16]; Re2831[16,16] = tmp[16,16]; goto inst_next; <_first> Re2831[32,16] = tmp2[0,16]; Re2831[16,16] = tmp[0,16]; goto inst_next; <_second> Re2831[32,16] = tmp2[16,16]; Re2831[16,16] = tmp[0,16] + 1; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # J disp8 (SB) :j off0815pc8s is PCPMode=0 & off0815pc8s & op0007=0x3c { goto off0815pc8s; } @endif # J disp24 (B) :j off24pc is PCPMode=0 & ( op0007=0x1d ) ... & off24pc { goto off24pc; } # JA disp24 (B) :ja off24abs is PCPMode=0 & ( op0007=0x9d ) ... & off24abs { goto off24abs; } @if defined(TRICORE_V2) # JEQ D[15], D[b], disp4 (SBR) :jeq d15,Rd1215,off0811pc4z16 is PCPMode=0 & Rd1215 & d15 & off0811pc4z16 & op0007=0xbe { if (d15 == Rd1215) goto off0811pc4z16; } @endif @if defined(TRICORE_V2) # JEQ D[15], const4, disp4 (SBC) :jeq d15,const1215S,off0811pc4z16 is PCPMode=0 & const1215S & d15 & off0811pc4z16 & op0007=0x9e { if (d15 == const1215S) goto off0811pc4z16; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JEQ D[15], D[b], disp4 (SBR) :jeq d15,Rd1215,off0811pc4z is PCPMode=0 & Rd1215 & d15 & off0811pc4z & op0007=0x3e { if (d15 == Rd1215) goto off0811pc4z; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JEQ D[15], const4, disp4 (SBC) :jeq d15,const1215S,off0811pc4z is PCPMode=0 & const1215S & d15 & off0811pc4z & op0007=0x1e { if (d15 == const1215S) goto off0811pc4z; } @endif # JEQ D[a], D[b], disp15 (BRR) :jeq Rd0811,Rd1215,off1630pc15s is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x5f ; off1630pc15s & op3131=0x0 { if (Rd0811 == Rd1215) goto off1630pc15s; } # JEQ D[a], const4, disp15 (BRC) :jeq Rd0811,const1215S,off1630pc15s is PCPMode=0 & Rd0811 & const1215S & op0007=0xdf ; off1630pc15s & op3131=0x0 { if (Rd0811 == const1215S) goto off1630pc15s; } # JEQ.A A[a], A[b], disp15 (BRR) :jeq.a Ra0811,Ra1215,off1630pc15s is PCPMode=0 & Ra0811 & Ra1215 & op0007=0x7d ; off1630pc15s & op3131=0x0 { if (Ra0811 == Ra1215) goto off1630pc15s; } # JGE D[a], D[b], disp15 (BRR) :jge Rd0811,Rd1215,off1630pc15s is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x7f ; off1630pc15s & op3131=0x0 { if (Rd0811 s>= Rd1215) goto off1630pc15s; } # JGE D[a], const4, disp15 (BRC) :jge Rd0811,const1215S,off1630pc15s is PCPMode=0 & Rd0811 & const1215S & op0007=0xff ; off1630pc15s & op3131=0x0 { if (Rd0811 s>= const1215S) goto off1630pc15s; } # JGE.U D[a], D[b], disp15 (BRR) :jge.u Rd0811,Rd1215,off1630pc15s is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x7f ; off1630pc15s & op3131=0x1 { if (Rd0811 >= Rd1215) goto off1630pc15s; } # JGE.U D[a], const4, disp15 (BRC) :jge.u Rd0811,const1215Z,off1630pc15s is PCPMode=0 & Rd0811 & const1215Z & op0007=0xff ; off1630pc15s & op3131=0x1 { if (Rd0811 >= const1215Z) goto off1630pc15s; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JGEZ D[b], disp4 (SBR) :jgez Rd1215,off0811pc4z is PCPMode=0 & Rd1215 & off0811pc4z & op0007=0xce { if (Rd1215 s>= 0) goto off0811pc4z; } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_RIDER_B) || defined(TRICORE_V2) # JGTZ D[b], disp4 (SBR) :jgtz Rd1215,off0811pc4z is PCPMode=0 & Rd1215 & off0811pc4z & op0007=0x4e { if (Rd1215 s> 0) goto off0811pc4z; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JI A[a] (SR) :ji Ra0811 is PCPMode=0 & Ra0811 & op0007=0xdc & op1215=0x0 { local tmp:4 = Ra0811; tmp[0,1] = 0; goto [tmp]; } :ji a11 is PCPMode=0 & op0811=11 & op0007=0xdc & op1215=0x0 & a11 { return [a11]; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JI A[a] (RR) :ji Ra0811 is PCPMode=0 & Ra0811 & op0007=0x2d & op1215=0x0 ; op1631=0x30 { local tmp:4 = Ra0811; tmp[0,1] = 0; goto [tmp]; } :ji a11 is PCPMode=0 & op0811=11 & op0007=0x2d & op1215=0x0 & a11; op1631=0x30 { return [a11]; } @endif # JL disp24 (B) :jl off24pc is PCPMode=0 & ( op0007=0x5d ) ... & off24pc { #TODO is this just a call w/o context switching? a11 = inst_next; call off24pc; } # JLA disp24 (B) :jla off24abs is PCPMode=0 & ( op0007=0xdd ) ... & off24abs { #TODO is this just a call w/o context switching? a11 = inst_next; call off24abs; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JLEZ D[b], disp4 (SBR) :jlez Rd1215,off0811pc4z is PCPMode=0 & Rd1215 & off0811pc4z & op0007=0x8e { if (Rd1215 s<= 0) goto off0811pc4z; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JLI A[a] (RR) :jli Ra0811 is PCPMode=0 & Ra0811 & op0007=0x2d & op1215=0x0 ; op1631=0x20 { a11 = inst_start + 4; local tmp:4 = Ra0811 & 0xFFFFFFFE; call [tmp]; } @endif # JLT D[a], D[b], disp15 (BRR) :jlt Rd0811,Rd1215,off1630pc15s is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x3f ; off1630pc15s & op3131=0x0 { if (Rd0811 s< Rd1215) goto off1630pc15s; } # JLT D[a], const4, disp15 (BRC) :jlt Rd0811,const1215S,off1630pc15s is PCPMode=0 & Rd0811 & const1215S & op0007=0xbf ; off1630pc15s & op3131=0x0 { if (Rd0811 s< const1215S) goto off1630pc15s; } # JLT.U D[a], D[b], disp15 (BRR) :jlt.u Rd0811,Rd1215,off1630pc15s is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x3f ; off1630pc15s & op3131=0x1 { if (Rd0811 < Rd1215) goto off1630pc15s; } # JLT.U D[a], const4, disp15 (BRC) :jlt.u Rd0811,const1215Z,off1630pc15s is PCPMode=0 & Rd0811 & const1215Z & op0007=0xbf ; off1630pc15s & op3131=0x1 { if (Rd0811 < const1215Z) goto off1630pc15s; } @if defined(TRICORE_RIDER_D) || defined(TRICORE_RIDER_B) || defined(TRICORE_V2) # JLTZ D[b], disp4 (SBR) :jltz Rd1215,off0811pc4z is PCPMode=0 & Rd1215 & off0811pc4z & op0007=0xe { if (Rd1215 s< 0) goto off0811pc4z; } @endif @if defined(TRICORE_V2) # JNE D[15], D[b], disp4 (SBR) :jne d15,Rd1215,off0811pc4z16 is PCPMode=0 & Rd1215 & d15 & off0811pc4z16 & op0007=0xfe { if (d15 != Rd1215) goto off0811pc4z16; } @endif @if defined(TRICORE_V2) # JNE D[15], const4, disp4 (SBC) :jne d15,const1215S,off0811pc4z16 is PCPMode=0 & const1215S & d15 & off0811pc4z16 & op0007=0xde { if (d15 != const1215S) goto off0811pc4z16; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JNE D[15], D[b], disp4 (SBR) :jne d15,Rd1215,off0811pc4z is PCPMode=0 & Rd1215 & d15 & off0811pc4z & op0007=0x7e { if (d15 != Rd1215) goto off0811pc4z; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JNE D[15], const4, disp4 (SBC) :jne d15,const1215S,off0811pc4z is PCPMode=0 & const1215S & d15 & off0811pc4z & op0007=0x5e { if (d15 != const1215S) goto off0811pc4z; } @endif # JNE D[a], D[b], disp15 (BRR) :jne Rd0811,Rd1215,off1630pc15s is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x5f ; off1630pc15s & op3131=0x1 { if (Rd0811 != Rd1215) goto off1630pc15s; } # JNE D[a], const4, disp15 (BRC) :jne Rd0811,const1215S,off1630pc15s is PCPMode=0 & Rd0811 & const1215S & op0007=0xdf ; off1630pc15s & op3131=0x1 { if (Rd0811 != const1215S) goto off1630pc15s; } # JNE.A A[a], A[b], disp15 (BRR) :jne.a Ra0811,Ra1215,off1630pc15s is PCPMode=0 & Ra0811 & Ra1215 & op0007=0x7d ; off1630pc15s & op3131=0x1 { if (Ra0811 != Ra1215) goto off1630pc15s; } # JNED D[a], D[b], disp15 (BRR) :jned Rd0811,Rd1215,off1630pc15s is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x1f ; off1630pc15s & op3131=0x1 { local tmp = Rd0811; Rd0811 = Rd0811 - 1; if (tmp != Rd1215) goto off1630pc15s; } # JNED D[a], const4, disp15 (BRC) :jned Rd0811,const1215S,off1630pc15s is PCPMode=0 & Rd0811 & const1215S & op0007=0x9f ; off1630pc15s & op3131=0x1 { local tmp = Rd0811; Rd0811 = Rd0811 - 1; if (tmp != const1215S) goto off1630pc15s; } # JNEI D[a], D[b], disp15 (BRR) :jnei Rd0811,Rd1215,off1630pc15s is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x1f ; off1630pc15s & op3131=0x0 { local tmp = Rd0811; Rd0811 = Rd0811 + 1; if (tmp != Rd1215) goto off1630pc15s; } # JNEI D[a], const4, disp15 (BRC) :jnei Rd0811,const1215S,off1630pc15s is PCPMode=0 & Rd0811 & const1215S & op0007=0x9f ; off1630pc15s & op3131=0x0 { local tmp = Rd0811; Rd0811 = Rd0811 - 1; if (tmp != const1215S) goto off1630pc15s; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JNZ D[15], disp8 (SB) :jnz d15,off0815pc8s is PCPMode=0 & d15 & off0815pc8s & op0007=0xee { if (d15 != 0) goto off0815pc8s; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JNZ D[b], disp4 (SBR) :jnz Rd1215,off0811pc4z is PCPMode=0 & Rd1215 & off0811pc4z & op0007=0xf6 { if (Rd1215 != 0) goto off0811pc4z; } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_RIDER_B) || defined(TRICORE_V2) # JNZ.A A[b], disp4 (SBR) :jnz.a Ra1215,off0811pc4z is PCPMode=0 & Ra1215 & off0811pc4z & op0007=0x7c { if (Ra1215 != 0) goto off0811pc4z; } @endif # JNZ.A A[a], disp15 (BRR) :jnz.a Ra0811,off1630pc15s is PCPMode=0 & Ra0811 & op0007=0xbd & op1215=0x0 ; off1630pc15s & op3131=0x1 { if (Ra0811 != 0) goto off1630pc15s; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JNZ.T D[15], n, disp4 (SBRN) :jnz.t d15,const1215Z,off0811pc4z is PCPMode=0 & const1215Z & d15 & off0811pc4z & op0007=0xae { local tmp = d15 & (1 << const1215Z); if (tmp != 0) goto off0811pc4z; } @endif # JNZ.T D[a], n, disp15 (BRN) :jnz.t Rd0811,Nbit,off1630pc15s is PCPMode=0 & Nbit & Rd0811 & op0006=0x6f ; off1630pc15s & op3131=0x1 { local tmp = Rd0811 & (1 << Nbit); if (tmp != 0) goto off1630pc15s; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JZ D[15], disp8 (SB) :jz d15,off0815pc8s is PCPMode=0 & d15 & off0815pc8s & op0007=0x6e { if (d15 == 0) goto off0815pc8s; } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_RIDER_B) || defined(TRICORE_V2) # JZ D[b], disp4 (SBR) :jz Rd1215,off0811pc4z is PCPMode=0 & Rd1215 & off0811pc4z & op0007=0x76 { if (Rd1215 == 0) goto off0811pc4z; } @endif # JZ.A A[b], disp4 (SBR) :jz.a Ra1215,off0811pc4z is PCPMode=0 & Ra1215 & off0811pc4z & op0007=0xbc { if (Ra1215 == 0) goto off0811pc4z; } # JZ.A A[a], disp15 (BRR) :jz.a Ra0811,off1630pc15s is PCPMode=0 & Ra0811 & op0007=0xbd & op1215=0x0 ; off1630pc15s & op3131=0x0 { if (Ra0811 == 0) goto off1630pc15s; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # JZ.T D[15], n, disp4 (SBRN) :jz.t d15,const1215Z,off0811pc4z is PCPMode=0 & const1215Z & d15 & off0811pc4z & op0007=0x2e { local tmp = d15 & (1 << const1215Z); if (tmp == 0) goto off0811pc4z; } @endif # JZ.T D[a], n, disp15 (BRN) :jz.t Rd0811,Nbit,off1630pc15s is PCPMode=0 & Nbit & Rd0811 & op0006=0x6f ; off1630pc15s & op3131=0x0 { local tmp = Rd0811 & (1 << Nbit); if (tmp == 0) goto off1630pc15s; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.A A[15], A[10], const8 (SC) :ld.a a15,SC is PCPMode=0 & a15 & op0007=0xd8 & SC { build SC; a15 = *[ram]:4 SC; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.A A[c], A[15], off4 (SLRO) :ld.a Ra0811,SLRO is PCPMode=0 & Ra0811 & op0007=0xc8 & SLRO { build SLRO; Ra0811 = *[ram]:4 SLRO; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.A A[15], A[b], off4 (SRO) :ld.a a15,SRO is PCPMode=0 & a15 & op0007=0xcc & SRO { build SRO; a15 = *[ram]:4 SRO; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.A A[c], A[b] (SLR)(Post-increment Addressing Mode) # LD.A A[c], A[b] (SLR) :ld.a Ra0811,SLR is PCPMode=0 & Ra0811 & op0607=0x3 & SLR { build SLR; Ra0811 = *[ram]:4 SLR; } @endif # LD.A A[a], A[b], off16 (BOL)(Base + Long Offset Addressing Mode) :ld.a Ra0811,BOL is PCPMode=0 & ( Ra0811 & op0007=0x99 ) ... & BOL { build BOL; Ra0811 = *[ram]:4 BOL; } # LD.A A[a], A[b], off10 (BO)(Post-increment Addressing Mode) # LD.A A[a], P[b] (BO)(Bit-reverse Addressing Mode) # LD.A A[a], A[b], off10 (BO)(Pre-increment Addressing Mode) # LD.A A[a], A[b], off10 (BO)(Base + Short Offset Addressing Mode) # LD.A A[a], P[b] (BO)(Index Addressing Mode) :ld.a Ra0811,BO is PCPMode=0 & ( Ra0811 & op0607=0x0 ; op2225=0x6 ) & BO { build BO; Ra0811 = *[ram]:4 BO; } # LD.A A[a], P[b], off10 (BO)(Circular Addressing Mode) #:ld.a Ra0811,BO is PCPMode=0 & ( Ra0811 & op0007=0x29 ; op2227=0x16 ) & BO :ld.a Ra0811,[Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Ra0811 & Rpe1215 & Rpo1215 & op0007=0x29 ; off10 & op2227=0x16 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); Ra0811 = *[ram]:4 EA; } # LD.A A[a], off18 (ABS)(Absolute Addressing Mode) :ld.a Ra0811,off18 is PCPMode=0 & ( Ra0811 & op0007=0x85 ; op2627=0x2 ) & off18 { Ra0811 = *[ram]:4 off18; } # LD.B D[a], off18 (ABS)(Absolute Addressing Mode) :ld.b Rd0811,off18 is PCPMode=0 & ( Rd0811 & op0007=0x5 ; op2627=0x0 ) & off18 { Rd0811 = sext(*[ram]:1 off18); } # LD.B D[a], A[b], off10 (BO)(Post-increment Addressing Mode) # LD.B D[a], P[b] (BO)(Bit-reverse Addressing Mode) # LD.B D[a], A[b], off10 (BO)(Pre-increment Addressing Mode) # LD.B D[a], A[b], off10 (BO)(Base + Short Offset Addressing Mode) # LD.B D[a], P[b] (BO)(Index Addressing Mode) :ld.b Rd0811,BO is PCPMode=0 & ( Rd0811 & op0607=0x0 ; op2225=0x0 ) & BO { build BO; Rd0811 = sext(*[ram]:1 BO); } # LD.B D[a], P[b], off10 (BO)(Circular Addressing Mode) #:ld.b Rd0811,BO is PCPMode=0 & ( Rd0811 & op0007=0x29 ; op2227=0x10 ) & BO :ld.b Rd0811,[Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Rd0811 & Rpe1215 & Rpo1215 & op0007=0x29 ; off10 & op2227=0x10 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); Rd0811 = sext(*[ram]:1 EA); } # LD.B D[a], A[b], off16 (BOL)(Base + Long Offset Addressing Mode) :ld.b Rd0811,BOL is PCPMode=0 & ( Rd0811 & op0007=0x79 ) ... & BOL { build BOL; Rd0811 = sext(*[ram]:1 BOL); } # LD.BU D[a], A[b], off16 (BOL)(Base + Long Offset Addressing Mode) :ld.bu Rd0811,BOL is PCPMode=0 & ( Rd0811 & op0007=0x39 ) ... & BOL { build BOL; Rd0811 = zext(*[ram]:1 BOL); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.BU D[c], A[b] (SLR) # LD.BU D[c], A[b] (SLR)(Post-increment Addressing Mode) :ld.bu Rd0811,SLR is PCPMode=0 & Rd0811 & op0607=0x0 & SLR { build SLR; Rd0811 = zext(*[ram]:1 SLR); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.BU D[15], A[b], off4 (SRO) :ld.bu d15,SRO is PCPMode=0 & d15 & op0007=0xc & SRO { build SRO; d15 = zext(*[ram]:1 SRO); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.BU D[c], A[15], off4 (SLRO) :ld.bu Rd0811,SLRO is PCPMode=0 & Rd0811 & op0007=0x8 & SLRO { build SLRO; Rd0811 = zext(*[ram]:1 SLRO); } @endif # LD.BU D[a], A[b], off10 (BO)(Post-increment Addressing Mode) # LD.BU D[a], P[b] (BO)(Bit-reverse Addressing Mode) # LD.BU D[a], A[b], off10 (BO)(Pre-increment Addressing Mode) # LD.BU D[a], A[b], off10 (BO)(Base + Short Offset Addressing Mode) # LD.BU D[a], P[b] (BO)(Index Addressing Mode) :ld.bu Rd0811,BO is PCPMode=0 & ( Rd0811 & op0607=0x0 ; op2225=0x1 ) & BO { build BO; Rd0811 = zext(*[ram]:1 BO); } # LD.BU D[a], off18 (ABS)(Absolute Addressing Mode) :ld.bu Rd0811,off18 is PCPMode=0 & ( Rd0811 & op0007=0x5 ; op2627=0x1 ) & off18 { Rd0811 = zext(*[ram]:1 off18); } # LD.BU D[a], P[b], off10 (BO)(Circular Addressing Mode) #:ld.bu Rd0811,BO is PCPMode=0 & ( Rd0811 & op0007=0x29 ; op2227=0x11 ) & BO :ld.bu Rd0811,[Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Rd0811 & Rpe1215 & Rpo1215 & op0007=0x29 ; off10 & op2227=0x11 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); Rd0811 = zext(*[ram]:1 EA); } # LD.D E[a], A[b], off10 (BO)(Post-increment Addressing Mode) # LD.D E[a], P[b] (BO)(Bit-reverse Addressing Mode) # LD.D E[a], A[b], off10 (BO)(Pre-increment Addressing Mode) # LD.D E[a], A[b], off10 (BO)(Base + Short Offset Addressing Mode) # LD.D E[a], P[b] (BO)(Index Addressing Mode) :ld.d Re0811,BO is PCPMode=0 & ( Re0811 & op0607=0x0 ; op2225=0x5 ) & BO { build BO; Re0811 = *[ram]:8 BO; } # LD.D E[a], off18 (ABS)(Absolute Addressing Mode) :ld.d Re0811,off18 is PCPMode=0 & ( Re0811 & op0007=0x85 ; op2627=0x1 ) & off18 { Re0811 = *[ram]:8 off18; } # LD.D E[a], P[b], off10 (BO)(Circular Addressing Mode) #:ld.d Re0811,BO is PCPMode=0 & ( Re0811 & op0007=0x29 ; op2227=0x15 ) & BO :ld.d Re0811,[Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Re0811 & Rpe1215 & Rpo1215 & op0007=0x29 ; off10 & op2227=0x15 { local EA0:4; local EA2:4; local EA4:4; local EA6:4; CircularAddressingMode4(Rpe1215, Rpo1215, EA0, EA2, EA4, EA6, off10, 2); Re0811[48,16] = *[ram]:2 EA6; Re0811[32,16] = *[ram]:2 EA4; Re0811[16,16] = *[ram]:2 EA2; Re0811[0,16] = *[ram]:2 EA0; } # LD.DA P[a], A[b], off10 (BO)(Post-increment Addressing Mode) # LD.DA P[a], P[b] (BO)(Bit-reverse Addressing Mode) # LD.DA P[a], A[b], off10 (BO)(Pre-increment Addressing Mode) # LD.DA P[a], A[b], off10 (BO)(Base + Short Offset Addressing Mode) # LD.DA P[a], P[b] (BO)(Index Addressing Mode) :ld.da Rp0811,BO is PCPMode=0 & ( Rp0811 & op0607=0x0 ; op2225=0x7 ) & BO { build BO; Rp0811 = *[ram]:8 BO; } # LD.DA P[a], P[b], off10 (BO)(Circular Addressing Mode) #:ld.da Rpe0811/Rpo0811,BO is PCPMode=0 & ( Rpe0811 & Rpo0811 & op0007=0x29 ; op2227=0x17 ) & BO :ld.da Rpe0811/Rpo0811,[Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Rpe0811 & Rpo0811 & Rpe1215 & Rpo1215 & op0007=0x29 ; off10 & op2227=0x17 { local EA0:4; local EA4:4; CircularAddressingMode2(Rpe1215, Rpo1215, EA0, EA4, off10, 4); Rpo0811 = *[ram]:4 EA4; Rpe0811 = *[ram]:4 EA0; } # LD.DA P[a], off18 (ABS)(Absolute Addressing Mode) :ld.da Rp0811,off18 is PCPMode=0 & ( Rp0811 & op0007=0x85 ; op2627=0x3 ) & off18 { Rp0811 = *[ram]:8 off18; } @if defined(TRICORE_V2) :ld.dd Re0811/ReN0811,BO is PCPMode=0 & ( Re0811 & ReN0811 & op0007=0x9 ; op2227=0x9 ) & BO { build BO; Re0811 = *[ram]:8 BO; ReN0811 = *[ram]:8 BO+8; } @endif @if defined(TRICORE_V2) :ld.dd Re0811/ReN0811,BO is PCPMode=0 & ( Re0811 & ReN0811 & op0007=0x29 ; op1621=0x0 & op2227=0x09 & op2831=0x0 ) & BO { build BO; Re0811 = *[ram]:8 BO; ReN0811 = *[ram]:8 BO+8; } @endif @if defined(TRICORE_V2) :ld.dd Re0811/ReN0811,BO is PCPMode=0 & ( Re0811 & ReN0811 & op0007=0x9 ; op2227=0x19 ) & BO { build BO; Re0811 = *[ram]:8 BO; ReN0811 = *[ram]:8 BO+8; } @endif @if defined(TRICORE_V2) #:ld.dd Re0811/ReN0811,BO is PCPMode=0 & ( Re0811 & ReN0811 & op0007=0x29 ; op2227=0x19 ) & BO :ld.dd Re0811/ReN0811,[Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Re0811 & ReN0811 & Rpe1215 & Rpo1215 & op0007=0x29 ; off10 & op2227=0x19 { local EA0:4; local EA8:4; CircularAddressingMode2(Rpe1215, Rpo1215, EA0, EA8, off10, 8); Re0811 = *[ram]:8 EA0; ReN0811 = *[ram]:8 EA8; } @endif @if defined(TRICORE_V2) :ld.dd Re0811/ReN0811,BO is PCPMode=0 & ( Re0811 & ReN0811 & op0007=0x9 ; op2227=0x29 ) & BO { build BO; Re0811 = *[ram]:8 BO; ReN0811 = *[ram]:8 BO+8; } @endif @if defined(TRICORE_V2) :ld.dd Re0811/ReN0811,BO is PCPMode=0 & ( Re0811 & ReN0811 & op0007=0x29 ; op1621=0x0 & op2227=0x29 & op2831=0x0 ) & BO { build BO; Re0811 = *[ram]:8 BO; ReN0811 = *[ram]:8 BO+8; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.H D[c], A[b] (SLR)(Post-increment Addressing Mode) # LD.H D[c], A[b] (SLR) :ld.h Rd0811,SLR is PCPMode=0 & Rd0811 & op0607=0x2 & SLR { build SLR; Rd0811 = sext(*[ram]:2 SLR); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.H D[15], A[b], off4 (SRO) :ld.h d15,SRO is PCPMode=0 & d15 & op0007=0x8c & SRO { build SRO; d15 = sext(*[ram]:2 SRO); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.H D[c], A[15], off4 (SLRO) :ld.h Rd0811,SLRO is PCPMode=0 & Rd0811 & op0007=0x88 & SLRO { build SLRO; Rd0811 = sext(*[ram]:2 SLRO); } @endif # LD.H D[a], A[b], off10 (BO)(Post-increment Addressing Mode) # LD.H D[a], P[b] (BO)(Bit-reverse Addressing Mode) # LD.H D[a], A[b], off10 (BO)(Pre-increment Addressing Mode) # LD.H D[a], A[b], off10 (BO)(Base + Short Offset Addressing Mode) # LD.H D[a], P[b] (BO)(Index Addressing Mode) :ld.h Rd0811,BO is PCPMode=0 & ( Rd0811 & op0607=0x0 ; op2225=0x2 ) & BO { build BO; Rd0811 = sext(*[ram]:2 BO); } # LD.H D[a], A[b], off16 (BOL)(Base + Long Offset Addressing Mode) :ld.h Rd0811,BOL is PCPMode=0 & ( Rd0811 & op0007=0xc9 ) ... & BOL { build BOL; Rd0811 = sext(*[ram]:2 BOL); } #:ld.h Rd0811,BO is PCPMode=0 & ( Rd0811 & op0007=0x29 ; op2227=0x12 ) & BO :ld.h Rd0811,[Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Rd0811 & Rpe1215 & Rpo1215 & op0007=0x29 ; off10 & op2227=0x12 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); Rd0811 = sext(*[ram]:2 EA); } # LD.H D[a], off18 (ABS)(Absolute Addressing Mode) :ld.h Rd0811,off18 is PCPMode=0 & ( Rd0811 & op0007=0x5 ; op2627=0x2 ) & off18 { Rd0811 = sext(*[ram]:2 off18); } # LD.HU D[a], A[b], off10 (BO)(Post-increment Addressing Mode) # LD.HU D[a], P[b] (BO)(Bit-reverse Addressing Mode) # LD.HU D[a], A[b], off10 (BO)(Pre-increment Addressing Mode) # LD.HU D[a], A[b], off10 (BO)(Base + Short Offset Addressing Mode) # LD.HU D[a], P[b] (BO)(Index Addressing Mode) :ld.hu Rd0811,BO is PCPMode=0 & ( Rd0811 & op0607=0x0 ; op2225=0x3 ) & BO { build BO; Rd0811 = zext(*[ram]:2 BO); } #:ld.hu Rd0811,BO is PCPMode=0 & ( Rd0811 & op0007=0x29 ; op2227=0x13 ) & BO :ld.hu Rd0811,[Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Rd0811 & Rpe1215 & Rpo1215 & op0007=0x29 ; off10 & op2227=0x13 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); Rd0811 = zext(*[ram]:2 EA); } # LD.HU D[a], off18 (ABS)(Absolute Addressing Mode) :ld.hu Rd0811,off18 is PCPMode=0 & ( Rd0811 & op0007=0x5 ; op2627=0x3 ) & off18 { Rd0811 = zext(*[ram]:2 off18); } # LD.HU D[a], A[b], off16 (BOL)(Base + Long Offset Addressing Mode) :ld.hu Rd0811,BOL is PCPMode=0 & ( Rd0811 & op0007=0xb9 ) ... & BOL { build BOL; Rd0811 = zext(*[ram]:2 BOL); } # LD.Q D[a], off18 (ABS)(Absolute Addressing Mode) :ld.q Rd0811,off18 is PCPMode=0 & ( Rd0811 & op0007=0x45 ; op2627=0x0 ) & off18 { Rd0811 = zext(*[ram]:2 off18) << 16; } # LD.Q D[a], A[b], off10 (BO)(Post-increment Addressing Mode) # LD.Q D[a], P[b] (BO)(Bit-reverse Addressing Mode) # LD.Q D[a], A[b], off10 (BO)(Pre-increment Addressing Mode) # LD.Q D[a], A[b], off10 (BO)(Base + Short Offset Addressing Mode) # LD.Q D[a], P[b] (BO)(Index Addressing Mode) :ld.q Rd0811,BO is PCPMode=0 & ( Rd0811 & op0607=0x0 ; op2225=0x8 ) & BO { build BO; Rd0811 = zext(*[ram]:2 BO) << 16; } # LD.Q D[a], P[b], off10 (BO)(Circular Addressing Mode) #:ld.q Rd0811,BO is PCPMode=0 & ( Rd0811 & op0007=0x29 ; op2227=0x18 ) & BO :ld.q Rd0811,[Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Rd0811 & Rpe1215 & Rpo1215 & op0007=0x29 ; off10 & op2227=0x18 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); Rd0811 = zext(*[ram]:2 EA) << 16; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.W D[15], A[10], const8 (SC) :ld.w d15,SC is PCPMode=0 & d15 & op0007=0x58 & SC { build SC; d15 = *[ram]:4 SC; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.W D[c], A[b] (SLR) # LD.W D[c], A[b] (SLR)(Post-increment Addressing Mode) :ld.w Rd0811,SLR is PCPMode=0 & Rd0811 & op0607=0x1 & SLR { build SLR; Rd0811 = *[ram]:4 SLR; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.W D[15], A[b], off4 (SRO) :ld.w d15,SRO is PCPMode=0 & d15 & op0007=0x4c & SRO { build SRO; d15 = *[ram]:4 SRO; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LD.W D[c], A[15], off4 (SLRO) :ld.w Rd0811,SLRO is PCPMode=0 & Rd0811 & op0007=0x48 & SLRO { build SLRO; Rd0811 = *[ram]:4 SLRO; } @endif # LD.W D[a], A[b], off16 (BOL)(Base + Long Offset Addressing Mode) :ld.w Rd0811,BOL is PCPMode=0 & ( Rd0811 & op0007=0x19 ) ... & BOL { build BOL; Rd0811 = *[ram]:4 BOL; } # LD.W D[a], off18 (ABS)(Absolute Addressing Mode) :ld.w Rd0811,off18 is PCPMode=0 & ( Rd0811 & op0007=0x85 ; op2627=0x0 ) & off18 { Rd0811 = *[ram]:4 off18; } # LD.W D[a], A[b], off10 (BO)(Post-increment Addressing Mode) # LD.W D[a], P[b] (BO)(Bit-reverse Addressing Mode) # LD.W D[a], A[b], off10 (BO)(Pre-increment Addressing Mode) # LD.W D[a], A[b], off10 (BO)(Base + Short Offset Addressing Mode) # LD.W D[a], P[b] (BO)(Index Addressing Mode) :ld.w Rd0811,BO is PCPMode=0 & ( Rd0811 & op0607=0x0 ; op2225=0x4 ) & BO { build BO; Rd0811 = *[ram]:4 BO; } #:ld.w Rd0811,BO is PCPMode=0 & ( Rd0811 & op0007=0x29 ; op2227=0x14 ) & BO :ld.w Rd0811,[Rpe1215/Rpo1215^"+c"^]off10 is PCPMode=0 & Rd0811 & Rpe1215 & Rpo1215 & op0007=0x29 ; off10 & op2227=0x14 { local EA0:4; local EA2:4; CircularAddressingMode2(Rpe1215, Rpo1215, EA0, EA2, off10, 2); Rd0811[16,16] = *[ram]:2 EA2; Rd0811[0,16] = *[ram]:2 EA0; } # LDLCX off18 (ABS)(Absolute Addressing Mode) :ldlcx off18 is PCPMode=0 & ( op0007=0x15 & op0811=0x0 ; op2627=0x2 ) & off18 { #TODO context load_lower_context(off18); } # LDLCX A[b], off10 (BO) (Base + Short Index Addressing Mode) :ldlcx BO is PCPMode=0 & ( op0007=0x49 & op0811=0x0 ; off10 & op2227=0x24 ) & BO { #TODO context build BO; load_lower_context(BO); } # LDMST A[b], off10, E[a] (BO)(Post-increment Addressing Mode) # LDMST P[b], E[a] (BO)(Bit-reverse Addressing Mode) # LDMST A[b], off10, E[a] (BO)(Pre-increment Addressing Mode) # LDMST A[b], off10, E[a] (BO)(Base + Short Offset Addressing Mode) # LDMST P[b], E[a] (BO)(Index Addressing Mode) :ldmst BO,Ree0811/Reo0811 is PCPMode=0 & ( Ree0811 & Reo0811 & op0607=0x1 ; op2225=0x1 ) & BO { build BO; local tmp:4 = *[ram]:4 BO; *[ram]:4 BO = (tmp & ~Reo0811) | (Ree0811 & Reo0811); } # LDMST off18, E[a] (ABS)(Absolute Addressing Mode) :ldmst off18,Ree0811/Reo0811 is PCPMode=0 & ( Ree0811 & Reo0811 & op0007=0xe5 ; op2627=0x1 ) & off18 { local tmp:4 = *[ram]:4 off18; *[ram]:4 off18 = (tmp & ~Reo0811) | (Ree0811 & Reo0811); } # LDMST P[b], off10, E[a] (BO)(Circular Addressing Mode) #:ldmst BO,Ree0811/Reo0811 is PCPMode=0 & ( Ree0811 & Reo0811 & op0007=0x69 ; op2227=0x11 ) & BO :ldmst [Rpe1215/Rpo1215^"+c"^]off10,Ree0811/Reo0811 is PCPMode=0 & Ree0811 & Reo0811 & Rpe1215 & Rpo1215 & op0007=0x69 ; off10 & op2227=0x11 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); *[ram]:4 EA = (*[ram]:4 EA & ~Reo0811) | (Ree0811 & Reo0811); } # LDUCX A[b], off10 (BO)(Base + Short Index Addressing Mode) :lducx BO is PCPMode=0 & ( op0007=0x49 & op0811=0x0 ; off10 & op2227=0x25 ) & BO { #TODO context build BO; load_upper_context(BO); } # LDUCX off18 (ABS)(Absolute Addressing Mode) :lducx off18 is PCPMode=0 & ( op0007=0x15 & op0811=0x0 ; op2627=0x3 ) & off18 { #TODO context load_upper_context(off18); } # LEA A[a], off18 (ABS)(Absolute Addressing Mode) :lea Ra0811,off18 is PCPMode=0 & ( Ra0811 & op0007=0xc5 ; op2627=0x0 ) & off18 { Ra0811 = off18; } # LEA A[a], A[b], off16 (BOL)(Base + Long Offset Addressing Mode) :lea Ra0811,BOL is PCPMode=0 & ( Ra0811 & op0007=0xd9 ) ... & BOL { build BOL; Ra0811 = BOL; } # LEA A[a], A[b], off10 (BO)(Base + Short Offset Addressing Mode) :lea Ra0811,BO is PCPMode=0 & ( Ra0811 & op0007=0x49 ; op2227=0x28 ) & BO { build BO; Ra0811 = BO; } # LOOP A[b], disp4 (SBR) :loop Ra1215,off0811pc4o is PCPMode=0 & Ra1215 & off0811pc4o & op0007=0xfc { local tmp:4 = Ra1215; Ra1215 = Ra1215 - 1; if (tmp != 0) goto off0811pc4o; } # LOOP A[b], disp15 (BRR) :loop Ra1215,off1630pc15s is PCPMode=0 & Ra1215 & op0007=0xfd & op0811=0x0 ; off1630pc15s & op3131=0x0 { local tmp:4 = Ra1215; Ra1215 = Ra1215 - 1; if (tmp != 0) goto off1630pc15s; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # LOOPU disp15 (BRR) :loopu off1630pc15s is PCPMode=0 & op0007=0xfd & op0815=0x0 ; off1630pc15s & op3131=0x1 { goto off1630pc15s; } @endif # LT D[15], D[a], D[b] (SRR) :lt d15,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & d15 & op0007=0x7a { d15 = zext(Rd0811 s< Rd1215); } # LT D[15], D[a], const4 (SRC) :lt d15,Rd0811,const1215S is PCPMode=0 & Rd0811 & const1215S & d15 & op0007=0xfa { d15 = zext(Rd0811 s< const1215S); } # LT D[c], D[a], D[b] (RR) :lt Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x120 { Rd2831 = zext(Rd0811 s< Rd1215); } # LT D[c], D[a], const9 (RC) :lt Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x12 ) & const1220S { Rd2831 = zext(Rd0811 s< const1220S); } # LT.A D[c], A[a], A[b] (RR) :lt.a Rd2831,Ra0811,Ra1215 is PCPMode=0 & Ra0811 & Ra1215 & op0007=0x1 ; Rd2831 & op1627=0x420 { Rd2831 = zext(Ra0811 s< Ra1215); } # LT.B D[c], D[a], D[b] (RR) :lt.b Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x520 { local result3:4 = 0xff * zext(Rd0811[24,8] s< Rd1215[24,8]); local result2:4 = 0xff * zext(Rd0811[16,8] s< Rd1215[16,8]); local result1:4 = 0xff * zext(Rd0811[8,8] s< Rd1215[8,8]); local result0:4 = 0xff * zext(Rd0811[0,8] s< Rd1215[0,8]); Rd2831 = (result3 << 24) | (result2 << 16) | (result1 << 8) | (result0); } # LT.BU D[c], D[a], D[b] (RR) :lt.bu Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x530 { local result3:4 = 0xff * zext(Rd0811[24,8] < Rd1215[24,8]); local result2:4 = 0xff * zext(Rd0811[16,8] < Rd1215[16,8]); local result1:4 = 0xff * zext(Rd0811[8,8] < Rd1215[8,8]); local result0:4 = 0xff * zext(Rd0811[0,8] < Rd1215[0,8]); Rd2831 = (result3 << 24) | (result2 << 16) | (result1 << 8) | (result0); } # LT.H D[c], D[a], D[b] (RR) :lt.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x720 { result1:4 = 0xffff * zext(Rd0811[16,16] s< Rd1215[16,16]); result0:4 = 0xffff * zext(Rd0811[0,16] s< Rd1215[0,16]); Rd2831 = (result1 << 16) | (result0); } # LT.HU D[c], D[a], D[b] (RR) :lt.hu Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x730 { local result1:4 = 0xffff * zext(Rd0811[16,16] < Rd1215[16,16]); local result0:4 = 0xffff * zext(Rd0811[0,16] < Rd1215[0,16]); Rd2831 = (result1 << 16) | (result0); } # LT.U D[c], D[a], D[b] (RR) :lt.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x130 { Rd2831 = zext(Rd0811 < Rd1215); } # LT.U D[c], D[a], const9 (RC) :lt.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x13 ) & const1220Z { Rd2831 = zext(Rd0811 < const1220Z); } # LT.W D[c], D[a], D[b] (RR) :lt.w Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x920 { Rd2831 = 0xFFFFFFFF * zext(Rd0811 s< Rd1215); } # LT.WU D[c], D[a], D[b] (RR) :lt.wu Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x930 { Rd2831 = 0xFFFFFFFF * zext(Rd0811 < Rd1215); } # MADD D[c], D[d], D[a], D[b] (RRR2) :madd Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x3 ; Rd2427 & Rd2831 & op1623=0xa { Rd2831 = Rd2427 + (Rd0811 * Rd1215); overflowflags(Rd2831); } # MADD D[c], D[d], D[a], const9 (RCR) :madd Rd2831,Rd2427,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x13 ; Rd2427 & Rd2831 & op2123=0x1 ) & const1220S { Rd2831 = Rd2427 + (Rd0811 * const1220S); overflowflags(Rd2831); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD E[c], E[d], D[a], const9 (RCR) :madd Re2831,Re2427,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x13 ; Re2427 & Re2831 & op2123=0x3 ) & const1220S { Re2831 = Re2427 + sext(Rd0811 * const1220S); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD E[c], E[d], D[a], D[b] (RRR2) :madd Re2831,Re2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x3 ; Re2427 & Re2831 & op1623=0x6a { Re2831 = Re2427 + sext(Rd0811 * Rd1215); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.F D[c], D[d], D[a], D[b] (RRR) :madd.f Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x6b ; Rd2427 & Rd2831 & op1623=0x61 { #TODO float #TODO flags Rd2831 = Rd2427 f+ (Rd0811 f* Rd1215); } @endif macro packed_multiply_ul(mres1, mres0, rega, regb, n) { local sc1 = (rega[16,16] == 0x8000) && (regb[16,16] == 0x8000) && (n == 1); local sc0 = (rega[0,16] == 0x8000) && (regb[0,16] == 0x8000) && (n == 1); ternary(mres1, sc1, 0x7FFFFFFF, sext((rega[16,16] * regb[16,16]) << n)); ternary(mres0, sc0, 0x7FFFFFFF, sext((rega[0,16] * regb[0,16]) << n)); } macro packed_multiply_lu(mres1, mres0, rega, regb, n) { local sc1 = (rega[16,16] == 0x8000) && (regb[0,16] == 0x8000) && (n == 1); local sc0 = (rega[0,16] == 0x8000) && (regb[16,16] == 0x8000) && (n == 1); ternary(mres1, sc1, 0x7FFFFFFF, sext((rega[16,16] * regb[0,16]) << n)); ternary(mres0, sc0, 0x7FFFFFFF, sext((rega[0,16] * regb[16,16]) << n)); } macro packed_multiply_ll(mres1, mres0, rega, regb, n) { local sc1 = (rega[16,16] == 0x8000) && (regb[0,16] == 0x8000) && (n == 1); local sc0 = (rega[0,16] == 0x8000) && (regb[0,16] == 0x8000) && (n == 1); ternary(mres1, sc1, 0x7FFFFFFF, sext((rega[16,16] * regb[0,16]) << n)); ternary(mres0, sc0, 0x7FFFFFFF, sext((rega[0,16] * regb[0,16]) << n)); } macro packed_multiply_uu(mres1, mres0, rega, regb, n) { local sc1 = (rega[0,16] == 0x8000) && (regb[16,16] == 0x8000) && (n == 1); local sc0 = (rega[16,16] == 0x8000) && (regb[16,16] == 0x8000) && (n == 1); ternary(mres1, sc1, 0x7FFFFFFF, sext((rega[0,16] * regb[16,16]) << n)); ternary(mres0, sc0, 0x7FFFFFFF, sext((rega[16,16] * regb[16,16]) << n)); } macro multiply_l_l(mres0, rega, regb, n) { local sc0 = (rega[0,16] == 0x8000) && (regb[0,16] == 0x8000) && (n == 1); ternary(mres0, sc0, 0x7FFFFFFF, sext((rega[0,16] * regb[0,16]) << n)); } macro multiply_u_u(mres0, rega, regb, n) { local sc0 = (rega[16,16] == 0x8000) && (regb[16,16] == 0x8000) && (n == 1); ternary(mres0, sc0, 0x7FFFFFFF, sext((rega[16,16] * regb[16,16]) << n)); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.H E[c], E[d], D[a], D[b] UL, n (RRR1) :madd.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x18 { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 + mres0; Reo2831 = result1; Ree2831 = result0; overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.H E[c], E[d], D[a], D[b] LU, n (RRR1) :madd.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x19 { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 + mres0; Reo2831 = result1; Ree2831 = result0; overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.H E[c], E[d], D[a], D[b] LL, n (RRR1) :madd.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x1a { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 + mres0; Reo2831 = result1; Ree2831 = result0; overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.H E[c], E[d], D[a], D[b] UU, n (RRR1) :madd.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x1b { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 + mres0; Reo2831 = result1; Ree2831 = result0; overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.Q D[c], D[d], D[a] U, D[b] U, n (RRR1) :madd.q Rd2831,Rd2427,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x4 { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); Rd2831 = Rd2427 + mres; overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.Q D[c], D[d], D[a], D[b] U, n (RRR1) :madd.q Rd2831,Rd2427,Rd0811,Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x0 { local tmp:8 = sext(Rd0811 * sext(Rd1215[16,16])); tmp = (tmp << const1617Z) s>> 16; Rd2831 = Rd2427 + tmp[0,32]; overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.Q D[c], D[d], D[a], D[b] L, n (RRR1) :madd.q Rd2831,Rd2427,Rd0811,Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x1 { local tmp:8 = sext(Rd0811 * sext(Rd1215[0,16])); tmp = (tmp << const1617Z) s>> 16; Rd2831 = Rd2427 + tmp[0,32]; overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.Q D[c], D[d], D[a], D[b], n (RRR1) :madd.q Rd2831,Rd2427,Rd0811,Rd1215,const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2 { local tmp:8 = sext(Rd0811 * Rd1215); tmp = (tmp << const1617Z) s>> 32; Rd2831 = Rd2427 + tmp[0,32]; overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.Q D[c], D[d], D[a] L, D[b] L, n (RRR1) :madd.q Rd2831,Rd2427,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x5 { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); Rd2831 = Rd2427 + mres; overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.Q E[c], E[d], D[a], D[b] U, n (RRR1) :madd.q Re2831,Re2427,Rd0811,Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Re2427 & Re2831 & const1617Z & op1823=0x18 { local tmp:8 = sext(Rd0811 * sext(Rd1215[16,16])); Re2831 = Re2427 + (tmp << const1617Z); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.Q E[c], E[d], D[a], D[b] L, n (RRR1) :madd.q Re2831,Re2427,Rd0811,Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Re2427 & Re2831 & const1617Z & op1823=0x19 { local tmp:8 = sext(Rd0811 * sext(Rd1215[0,16])); Re2831 = Re2427 + (tmp << const1617Z); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.Q E[c], E[d], D[a], D[b], n (RRR1) :madd.q Re2831,Re2427,Rd0811,Rd1215,const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Re2427 & Re2831 & const1617Z & op1823=0x1b { local tmp:8 = sext(Rd0811 * Rd1215); Re2831 = Re2427 + (tmp << const1617Z); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.Q E[c], E[d], D[a] U, D[b] U, n (RRR1) :madd.q Re2831,Re2427,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Re2427 & Re2831 & const1617Z & op1823=0x1c { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 + sext(mres << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.Q E[c], E[d], D[a] L, D[b] L, n (RRR1) :madd.q Re2831,Re2427,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Re2427 & Re2831 & const1617Z & op1823=0x1d { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 + (sext(mres) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.U E[c], E[d], D[a], const9 (RCR) :madd.u Re2831,Re2427,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x13 ; Re2427 & Re2831 & op2123=0x2 ) & const1220Z { Re2831 = Re2427 + zext(Rd0811 * const1220Z); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADD.U E[c], E[d], D[a], D[b] (RRR2) :madd.u Re2831,Re2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x3 ; Re2427 & Re2831 & op1623=0x68 { Re2831 = Re2427 + zext(Rd0811 * Rd1215); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDM.H E[c], E[d], D[a], D[b] UL, n (RRR1) :maddm.h Re2831,Re2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Re2427 & Re2831 & const1617Z & op1823=0x1c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 + sext((mres1 + mres0) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDM.H E[c], E[d], D[a], D[b] LU, n (RRR1) :maddm.h Re2831,Re2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Re2427 & Re2831 & const1617Z & op1823=0x1d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 + sext((mres1 + mres0) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDM.H E[c], E[d], D[a], D[b] LL, n (RRR1) :maddm.h Re2831,Re2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Re2427 & Re2831 & const1617Z & op1823=0x1e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 + sext((mres1 + mres0) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDM.H E[c], E[d], D[a], D[b] UU, n (RRR1) :maddm.h Re2831,Re2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Re2427 & Re2831 & const1617Z & op1823=0x1f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 + sext((mres1 + mres0) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDMS.H E[c], E[d], D[a], D[b] UL, n (RRR1) :maddms.h Re2831,Re2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Re2427 & Re2831 & const1617Z & op1823=0x3c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 + sext((mres1 + mres0) << 16); ssov(Re2831, result, 64); overflowflagsd(result); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDMS.H E[c], E[d], D[a], D[b] LU, n (RRR1) :maddms.h Re2831,Re2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Re2427 & Re2831 & const1617Z & op1823=0x3d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 + sext((mres1 + mres0) << 16); ssov(Re2831, result, 64); overflowflagsd(result); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDMS.H E[c], E[d], D[a], D[b] LL, n (RRR1) :maddms.h Re2831,Re2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Re2427 & Re2831 & const1617Z & op1823=0x3e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 + sext((mres1 + mres0) << 16); ssov(Re2831, result, 64); overflowflagsd(result); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDMS.H E[c], E[d], D[a], D[b] UU, n (RRR1) :maddms.h Re2831,Re2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Re2427 & Re2831 & const1617Z & op1823=0x3f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 + sext((mres1 + mres0) << 16); ssov(Re2831, result, 64); overflowflagsd(result); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDR.H D[c], E[d], D[a], D[b] UL, n (RRR1) :maddr.h Rd2831,Ree2427/Reo2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2831 & Ree2427 & Reo2427 & const1617Z & op1823=0x1e { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 + mres1 + 0x8000; local res0:4 = Ree2427 + mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDR.H D[c], D[d], D[a], D[b] UL, n (RRR1) :maddr.h Rd2831,Rd2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Rd2427 & Rd2831 & const1617Z & op1823=0xc { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) + mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) + mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDR.H D[c], D[d], D[a], D[b] LU, n (RRR1) :maddr.h Rd2831,Rd2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Rd2427 & Rd2831 & const1617Z & op1823=0xd { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) + mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) + mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDR.H D[c], D[d], D[a], D[b] LL, n (RRR1) :maddr.h Rd2831,Rd2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Rd2427 & Rd2831 & const1617Z & op1823=0xe { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) + mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) + mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDR.H D[c], D[d], D[a], D[b] UU, n (RRR1) :maddr.h Rd2831,Rd2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Rd2427 & Rd2831 & const1617Z & op1823=0xf { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) + mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) + mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDR.Q D[c], D[d], D[a] U, D[b] U, n (RRR1) :maddr.q Rd2831,Rd2427,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x6 { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); local res:4 = Rd2427 + mres + 0x8000; Rd2831 = zext(res[16,16]) << 16; overflowflags(res); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDR.Q D[c], D[d], D[a] L, D[b] L, n (RRR1) :maddr.q Rd2831,Rd2427,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x7 { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); local res:4 = Rd2427 + mres + 0x8000; Rd2831 = zext(res[16,16]) << 16; overflowflags(res); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDRS.H D[c], E[d], D[a], D[b] UL, n (RRR1) :maddrs.h Rd2831,Ree2427/Reo2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2831 & Ree2427 & Reo2427 & const1617Z & op1823=0x3e { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 + mres1 + 0x8000; local res0:4 = Ree2427 + mres0 + 0x8000; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDRS.H D[c], D[d], D[a], D[b] UL, n (RRR1) :maddrs.h Rd2831,Rd2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = mres1 + 0x8000; local res0:4 = mres0 + 0x8000; res1[16,16] = res1[16,16] + Rd2427[16,16]; res0[16,16] = res0[16,16] + Rd2427[0,16]; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDRS.H D[c], D[d], D[a], D[b] LU, n (RRR1) :maddrs.h Rd2831,Rd2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = mres1 + 0x8000; local res0:4 = mres0 + 0x8000; res1[16,16] = res1[16,16] + Rd2427[16,16]; res0[16,16] = res0[16,16] + Rd2427[0,16]; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDRS.H D[c], D[d], D[a], D[b] LL, n (RRR1) :maddrs.h Rd2831,Rd2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = mres1 + 0x8000; local res0:4 = mres0 + 0x8000; res1[16,16] = res1[16,16] + Rd2427[16,16]; res0[16,16] = res0[16,16] + Rd2427[0,16]; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDRS.H D[c], D[d], D[a], D[b] UU, n (RRR1) :maddrs.h Rd2831,Rd2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = mres1 + 0x8000; local res0:4 = mres0 + 0x8000; res1[16,16] = res1[16,16] + Rd2427[16,16]; res0[16,16] = res0[16,16] + Rd2427[0,16]; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDRS.Q D[c], D[d], D[a] U, D[b] U, n (RRR1) :maddrs.q Rd2831,Rd2427,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x26 { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); local res:4 = Rd2427 + mres + 0x8000; overflowflags(res); ssov(res, res, 32); Rd2831 = zext(res[16,16]) << 16; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDRS.Q D[c], D[d], D[a] L, D[b] L, n (RRR1) :maddrs.q Rd2831,Rd2427,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x27 { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); local res:4 = Rd2427 + mres + 0x8000; overflowflags(res); ssov(res, res, 32); Rd2831 = zext(res[16,16]) << 16; } @endif # MADDS D[c], D[d], D[a], D[b] (RRR2) :madds Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x3 ; Rd2427 & Rd2831 & op1623=0x8a { local result:4 = Rd2427 + (Rd0811 * Rd1215); overflowflags(result); ssov(Rd2831, result, 32); } # MADDS D[c], D[d], D[a], const9 (RCR) :madds Rd2831,Rd2427,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x13 ; Rd2427 & Rd2831 & op2123=0x5 ) & const1220S { local result:4 = Rd2427 + (Rd0811 * const1220S); overflowflags(result); ssov(Rd2831, result, 32); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS E[c], E[d], D[a], const9 (RCR) :madds Re2831,Re2427,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x13 ; Re2427 & Re2831 & op2123=0x7 ) & const1220S { local result:8 = Re2427 + sext(Rd0811 * const1220S); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS E[c], E[d], D[a], D[b] (RRR2) :madds Re2831,Re2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x3 ; Re2427 & Re2831 & op1623=0xea { local result:8 = Re2427 + sext(Rd0811 * Rd1215); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.H E[c], E[d], D[a], D[b] UL, n (RRR1) :madds.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x38 { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 + mres0; ssov(Reo2831, result1, 32); ssov(Ree2831, result0, 32); overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.H E[c], E[d], D[a], D[b] LU, n (RRR1) :madds.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x39 { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 + mres0; ssov(Reo2831, result1, 32); ssov(Ree2831, result0, 32); overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.H E[c], E[d], D[a], D[b] LL, n (RRR1) :madds.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x3a { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 + mres0; ssov(Reo2831, result1, 32); ssov(Ree2831, result0, 32); overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.H E[c], E[d], D[a], D[b] UU, n (RRR1) :madds.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x83 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x3b { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 + mres0; ssov(Reo2831, result1, 32); ssov(Ree2831, result0, 32); overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.Q D[c], D[d], D[a] U, D[b] U, n (RRR1) :madds.q Rd2831,Rd2427,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x24 { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); local result:4 = Rd2427 + mres; overflowflags(result); ssov(Rd2831, result, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.Q D[c], D[d], D[a], D[b] U, n (RRR1) :madds.q Rd2831,Rd2427,Rd0811,Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x20 { local tmp:8 = sext(Rd0811 * sext(Rd1215[16,16])); tmp = (tmp << const1617Z) s>> 16; local result:4 = Rd2427 + tmp[0,32]; overflowflags(result); ssov(Rd2831, result, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.Q D[c], D[d], D[a], D[b] L, n (RRR1) :madds.q Rd2831,Rd2427,Rd0811,Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x21 { local tmp:8 = sext(Rd0811 * sext(Rd1215[0,16])); tmp = (tmp << const1617Z) s>> 16; local result:4 = Rd2427 + tmp[0,32]; overflowflags(result); ssov(Rd2831, result, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.Q D[c], D[d], D[a], D[b], n (RRR1) :madds.q Rd2831,Rd2427,Rd0811,Rd1215,const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x22 { local tmp:8 = sext(Rd0811 * Rd1215); tmp = (tmp << const1617Z) s>> 32; local result:4 = Rd2427 + tmp[0,32]; overflowflags(result); ssov(Rd2831, result, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.Q D[c], D[d], D[a] L, D[b] L, n (RRR1) :madds.q Rd2831,Rd2427,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Rd2427 & Rd2831 & const1617Z & op1823=0x25 { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); local result:4 = Rd2427 + mres; overflowflags(result); ssov(Rd2831, result, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.QE[c], E[d], D[a], D[b] U, n (RRR1) :madds.q Re2831,Re2427,Rd0811,Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Re2427 & Re2831 & const1617Z & op1823=0x38 { local tmp:8 = sext(Rd0811 * sext(Rd1215[16,16])); local result:8 = Re2427 + (tmp << const1617Z); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.Q E[c], E[d], D[a], D[b] L, n (RRR1) :madds.q Re2831,Re2427,Rd0811,Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Re2427 & Re2831 & const1617Z & op1823=0x39 { local tmp:8 = sext(Rd0811 * sext(Rd1215[0,16])); local result:8 = Re2427 + (tmp << const1617Z); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.Q E[c], E[d], D[a], D[b], n (RRR1) :madds.q Re2831,Re2427,Rd0811,Rd1215,const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Re2427 & Re2831 & const1617Z & op1823=0x3b { local tmp:8 = sext(Rd0811 * Rd1215); tmp = tmp << const1617Z; local result:8 = Re2427 + tmp; overflowflags(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.Q E[c], E[d], D[a] U, D[b] U, n (RRR1) :madds.q Re2831,Re2427,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Re2427 & Re2831 & const1617Z & op1823=0x3c { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 + sext(mres << 16); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.Q E[c], E[d], D[a] L, D[b] L, n (RRR1) :madds.q Re2831,Re2427,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x43 ; Re2427 & Re2831 & const1617Z & op1823=0x3d { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 + sext(mres << 16); overflowflagsd(result); ssov(Re2831, result, 64); } @endif # MADDS.U D[c], D[d], D[a], const9 (RCR) :madds.u Rd2831,Rd2427,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x13 ; Rd2427 & Rd2831 & op2123=0x4 ) & const1220Z { local result:4 = Rd2427 + (Rd0811 * const1220Z); overflowflags(result); suov(Rd2831, result, 32); } # MADDS.U D[c], D[d], D[a], D[b] (RRR2) :madds.u Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x3 ; Rd2427 & Rd2831 & op1623=0x88 { local result:4 = Rd2427 + (Rd0811 * Rd1215); overflowflags(result); suov(Rd2831, result, 32); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.U E[c], E[d], D[a], const9 (RCR) :madds.u Re2831,Re2427,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x13 ; Re2427 & Re2831 & op2123=0x6 ) & const1220Z { local result:8 = Re2427 + zext(Rd0811 * const1220Z); overflowflagsd(result); suov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDS.U E[c], E[d], D[a], D[b] (RRR2) :madds.u Re2831,Re2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x3 ; Re2427 & Re2831 & op1623=0xe8 { local result:8 = Re2427 + zext(Rd0811 * Rd1215); overflowflags(result); suov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSU.H E[c], E[d], D[a], D[b] UL, n (RRR1) :maddsu.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x18 { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 - mres0; Reo2831 = result1; Ree2831 = result0; overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSU.H E[c], E[d], D[a], D[b] LU, n (RRR1) :maddsu.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x19 { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 - mres0; Reo2831 = result1; Ree2831 = result0; overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSU.H E[c], E[d], D[a], D[b] LL, n (RRR1) :maddsu.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x1a { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 - mres0; Reo2831 = result1; Ree2831 = result0; overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSU.H E[c], E[d], D[a], D[b] UU, n (RRR1) :maddsu.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x1b { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 - mres0; Reo2831 = result1; Ree2831 = result0; overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUM.H E[c], E[d], D[a], D[b] UL, n (RRR1) :maddsum.h Re2831,Re2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Re2427 & Re2831 & const1617Z & op1823=0x1c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 + sext((mres1 - mres0) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUM.H E[c], E[d], D[a], D[b] LU, n (RRR1) :maddsum.h Re2831,Re2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Re2427 & Re2831 & const1617Z & op1823=0x1d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 + sext((mres1 - mres0) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUM.H E[c], E[d], D[a], D[b] LL, n (RRR1) :maddsum.h Re2831,Re2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Re2427 & Re2831 & const1617Z & op1823=0x1e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 + sext((mres1 - mres0) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUM.H E[c], E[d], D[a], D[b] UU, n (RRR1) :maddsum.h Re2831,Re2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Re2427 & Re2831 & const1617Z & op1823=0x1f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 + sext((mres1 - mres0) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUMS.H E[c], E[d], D[a], D[b] UL, n (RRR1) :maddsums.h Re2831,Re2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Re2427 & Re2831 & const1617Z & op1823=0x3c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 + sext((mres1 - mres0) << 16); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUMS.H E[c], E[d], D[a], D[b] LU, n (RRR1) :maddsums.h Re2831,Re2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Re2427 & Re2831 & const1617Z & op1823=0x3d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 + sext((mres1 - mres0) << 16); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUMS.H E[c], E[d], D[a], D[b] LL, n (RRR1) :maddsums.h Re2831,Re2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Re2427 & Re2831 & const1617Z & op1823=0x3e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 + sext((mres1 - mres0) << 16); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUMS.H E[c], E[d], D[a], D[b] UU, n (RRR1) :maddsums.h Re2831,Re2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Re2427 & Re2831 & const1617Z & op1823=0x3f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 + sext((mres1 - mres0) << 16); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUR.H D[c], D[d], D[a], D[b] UL, n (RRR1) :maddsur.h Rd2831,Rd2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Rd2427 & Rd2831 & const1617Z & op1823=0xc { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) + mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUR.H D[c], D[d], D[a], D[b] LU, n (RRR1) :maddsur.h Rd2831,Rd2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Rd2427 & Rd2831 & const1617Z & op1823=0xd { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) + mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUR.H D[c], D[d], D[a], D[b] LL, n (RRR1) :maddsur.h Rd2831,Rd2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Rd2427 & Rd2831 & const1617Z & op1823=0xe { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) + mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUR.H D[c], D[d], D[a], D[b] UU, n (RRR1) :maddsur.h Rd2831,Rd2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Rd2427 & Rd2831 & const1617Z & op1823=0xf { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) + mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSURS.H D[c], D[d], D[a], D[b] UL, n (RRR1) :maddsurs.h Rd2831,Rd2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) + mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSURS.H D[c], D[d], D[a], D[b] LU, n (RRR1) :maddsurs.h Rd2831,Rd2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) + mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSURS.H D[c], D[d], D[a], D[b] LL, n (RRR1) :maddsurs.h Rd2831,Rd2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) + mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSURS.H D[c], D[d], D[a], D[b] UU, n (RRR1) :maddsurs.h Rd2831,Rd2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) + mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUS.H E[c], E[d], D[a], D[b] UL, n (RRR1) :maddsus.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x38 { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 - mres0; ssov(Reo2831, result1, 32); ssov(Ree2831, result0, 32); overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUS.H E[c], E[d], D[a], D[b] LU, n (RRR1) :maddsus.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x39 { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); ssov(Reo2831, Reo2427 + mres1, 32); ssov(Ree2831, Ree2427 - mres0, 32); overflowflagsww(Reo2427 + mres1, Ree2427 - mres0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUS.H E[c], E[d], D[a], D[b] LL, n (RRR1) :maddsus.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x3a { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 - mres0; ssov(Reo2831, result1, 32); ssov(Ree2831, result0, 32); overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MADDSUS.H E[c], E[d], D[a], D[b] UU, n (RRR1) :maddsus.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x3b { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 + mres1; local result0:4 = Ree2427 - mres0; ssov(Reo2831, result1, 32); ssov(Ree2831, result0, 32); overflowflagsww(result1, result0); } @endif # MAX D[c], D[a], D[b] (RR) :max Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x1a0 { ternary(Rd2831, Rd0811 s> Rd1215, Rd0811, Rd1215); } # MAX D[c], D[a], const9 (RC) :max Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x1a ) & const1220S { ternary(Rd2831, Rd0811 s> const1220S, Rd0811, const1220S); } # MAX.B D[c], D[a], D[b] (RR) :max.b Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x5a0 { local result3:4; local result2:4; local result1:4; local result0:4; ternary(result3, (Rd0811[24,8] s> Rd1215[24,8]), zext(Rd0811[24,8]), zext(Rd1215[24,8])); ternary(result2, (Rd0811[16,8] s> Rd1215[16,8]), zext(Rd0811[16,8]), zext(Rd1215[16,8])); ternary(result1, (Rd0811[8,8] s> Rd1215[8,8]), zext(Rd0811[8,8]), zext(Rd1215[8,8])); ternary(result0, (Rd0811[0,8] s> Rd1215[0,8]), zext(Rd0811[0,8]), zext(Rd1215[0,8])); Rd2831 = (result3 << 24) | (result2 << 16) | (result1 << 8) | (result0); } # MAX.BU D[c], D[a], D[b] (RR) :max.bu Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x5b0 { local result3:4; local result2:4; local result1:4; local result0:4; ternary(result3, (Rd0811[24,8] > Rd1215[24,8]), zext(Rd0811[24,8]), zext(Rd1215[24,8])); ternary(result2, (Rd0811[16,8] > Rd1215[16,8]), zext(Rd0811[16,8]), zext(Rd1215[16,8])); ternary(result1, (Rd0811[8,8] > Rd1215[8,8]), zext(Rd0811[8,8]), zext(Rd1215[8,8])); ternary(result0, (Rd0811[0,8] > Rd1215[0,8]), zext(Rd0811[0,8]), zext(Rd1215[0,8])); Rd2831 = (result3 << 24) | (result2 << 16) | (result1 << 8) | (result0); } # MAX.H D[c], D[a], D[b] (RR) :max.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x7a0 { local result1:4; local result0:4; ternary(result1, (Rd0811[16,16] s> Rd1215[16,16]), zext(Rd0811[16,16]), zext(Rd1215[16,16])); ternary(result0, (Rd0811[0,16] s> Rd1215[0,16]), zext(Rd0811[0,16]), zext(Rd1215[0,16])); Rd2831 = (result1 << 16) | (result0); } # MAX.HU D[c], D[a], D[b] (RR) :max.hu Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x7b0 { local result1:4; local result0:4; ternary(result1, (Rd0811[16,16] > Rd1215[16,16]), zext(Rd0811[16,16]), zext(Rd1215[16,16])); ternary(result0, (Rd0811[0,16] > Rd1215[0,16]), zext(Rd0811[0,16]), zext(Rd1215[0,16])); Rd2831 = (result1 << 16) | (result0); } # MAX.U D[c], D[a], D[b] (RR) :max.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x1b0 { ternary(Rd2831, Rd0811 > Rd1215, Rd0811, Rd1215); } # MAX.U D[c], D[a], const9 (RC) :max.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x1b ) & const1220Z { ternary(Rd2831, Rd0811 > const1220Z, Rd0811, const1220Z); } # MFCR D[c], const16 (RLC) :mfcr Rd2831,const1227Z is PCPMode=0 & ( op0007=0x4d & op0811=0x0 ; Rd2831 ) & const1227Z { Rd2831 = *[register]:4 const1227Z; } @if defined(TRICORE_V2) :mffr Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x4b & op1215=0x0 ; Rd2831 & op1627=0x1d1 { Rd2831 = *[register]:4 Rd0811; } @endif # MIN D[c], D[a], D[b] (RR) :min Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x180 { ternary(Rd2831, Rd0811 s< Rd1215, Rd0811, Rd1215); } # MIN D[c], D[a], const9 (RC) :min Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x18 ) & const1220S { ternary(Rd2831, Rd0811 s< const1220S, Rd0811, const1220S); } # MIN.B D[c], D[a], D[b] (RR) :min.b Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x580 { local result3:4; local result2:4; local result1:4; local result0:4; ternary(result3, (Rd0811[24,8] s< Rd1215[24,8]), zext(Rd0811[24,8]), zext(Rd1215[24,8])); ternary(result2, (Rd0811[16,8] s< Rd1215[16,8]), zext(Rd0811[16,8]), zext(Rd1215[16,8])); ternary(result1, (Rd0811[8,8] s< Rd1215[8,8]), zext(Rd0811[8,8]), zext(Rd1215[8,8])); ternary(result0, (Rd0811[0,8] s< Rd1215[0,8]), zext(Rd0811[0,8]), zext(Rd1215[0,8])); Rd2831 = (result3 << 24) | (result2 << 16) | (result1 << 8) | (result0); } # MIN.BU D[c], D[a], D[b] (RR) :min.bu Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x590 { local result3:4; local result2:4; local result1:4; local result0:4; ternary(result3, (Rd0811[24,8] < Rd1215[24,8]), zext(Rd0811[24,8]), zext(Rd1215[24,8])); ternary(result2, (Rd0811[16,8] < Rd1215[16,8]), zext(Rd0811[16,8]), zext(Rd1215[16,8])); ternary(result1, (Rd0811[8,8] < Rd1215[8,8]), zext(Rd0811[8,8]), zext(Rd1215[8,8])); ternary(result0, (Rd0811[0,8] < Rd1215[0,8]), zext(Rd0811[0,8]), zext(Rd1215[0,8])); Rd2831 = (result3 << 24) | (result2 << 16) | (result1 << 8) | (result0); } # MIN.H D[c], D[a], D[b] (RR) :min.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x780 { local result1:4; local result0:4; ternary(result1, (Rd0811[16,16] s< Rd1215[16,16]), zext(Rd0811[16,16]), zext(Rd1215[16,16])); ternary(result0, (Rd0811[0,16] s< Rd1215[0,16]), zext(Rd0811[0,16]), zext(Rd1215[0,16])); Rd2831 = (result1 << 16) | (result0); } # MIN.HU D[c], D[a], D[b] (RR) :min.hu Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x790 { local result1:4; local result0:4; ternary(result1, (Rd0811[16,16] < Rd1215[16,16]), zext(Rd0811[16,16]), zext(Rd1215[16,16])); ternary(result0, (Rd0811[0,16] < Rd1215[0,16]), zext(Rd0811[0,16]), zext(Rd1215[0,16])); Rd2831 = (result1 << 16) | (result0); } # MIN.U D[c], D[a], D[b] (RR) :min.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x190 { ternary(Rd2831, Rd0811 < Rd1215, Rd0811, Rd1215); } # MIN.U D[c], D[a], const9 (RC) :min.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x19 ) & const1220Z { ternary(Rd2831, Rd0811 < const1220Z, Rd0811, const1220Z); } # MOV D[a], D[b] (SRR) :mov Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x2 { Rd0811 = Rd1215; } # MOV D[a], const4 (SRC) :mov Rd0811,const1215S is PCPMode=0 & Rd0811 & const1215S & op0007=0x82 { Rd0811 = const1215S; } @if defined(TRICORE_V2) # MOV E[a], const4 (SRC) :mov Re0811,const1215S is PCPMode=0 & Re0811 & const1215S & op0007=0xd2 { Re0811 = sext(const1215S); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MOV D[15], const8 (SC) :mov d15,const0815Z is PCPMode=0 & const0815Z & d15 & op0007=0xda { d15 = const0815Z; } @endif # MOV D[c], const16 (RLC) :mov Rd2831,const1227S is PCPMode=0 & ( op0007=0x3b & op0811=0x0 ; Rd2831 ) & const1227S { Rd2831 = const1227S; } # MOV D[c], D[b] (RR) :mov Rd2831,Rd1215 is PCPMode=0 & Rd1215 & op0007=0xb & op0811=0 ; Rd2831 & op1627=0x1f0 { Rd2831 = Rd1215; } @if defined(TRICORE_V2) # MOV E[c], const16 (RLC) :mov Re2831,const1227S is PCPMode=0 & ( op0007=0xfb & op0811=0x0 ; Re2831 ) & const1227S { Re2831 = sext(const1227S); } @endif @if defined(TRICORE_V2) # MOV E[c], D[b] (RR) :mov Re2831,Rd1215 is PCPMode=0 & Rd1215 & op0007=0xb & op0811=0 ; Re2831 & op1627=0x800 { Re2831 = sext(Rd1215); } @endif @if defined(TRICORE_V2) # MOV E[c], D[a], D[b] (RR) :mov Ree2831/Reo2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Ree2831 & Reo2831 & op1627=0x810 { local tmp1 = Rd0811; local tmp0 = Rd1215; Reo2831 = tmp1; Ree2831 = tmp0; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MOV.A A[a], const4 (SRC) :mov.a Ra0811,const1215Z is PCPMode=0 & Ra0811 & const1215Z & op0007=0xa0 { Ra0811 = const1215Z; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MOV.A A[a], D[b] (SRR) :mov.a Ra0811,Rd1215 is PCPMode=0 & Ra0811 & Rd1215 & op0007=0x60 { Ra0811 = Rd1215; } @endif # MOV.A A[c], D[b] (RR) :mov.a Ra2831,Rd1215 is PCPMode=0 & Rd1215 & op0007=0x1 & op0811=0x0 ; Ra2831 & op1627=0x630 { Ra2831 = Rd1215; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MOV.AA A[a], A[b] (SRR) :mov.aa Ra0811,Ra1215 is PCPMode=0 & Ra0811 & Ra1215 & op0007=0x40 { Ra0811 = Ra1215; } @endif # MOV.AA A[c], A[b] (RR) :mov.aa Ra2831,Ra1215 is PCPMode=0 & Ra1215 & op0007=0x1 & op0811=0x0 ; Ra2831 & op1627=0x0 { Ra2831 = Ra1215; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MOV.D D[a], A[b] (SRR) :mov.d Rd0811,Ra1215 is PCPMode=0 & Ra1215 & Rd0811 & op0007=0x80 { Rd0811 = Ra1215; } @endif # MOV.D D[c], A[b] (RR) :mov.d Rd2831,Ra1215 is PCPMode=0 & Ra1215 & op0007=0x1 & op0811=0x0 ; Rd2831 & op1627=0x4c0 { Rd2831 = Ra1215; } # MOV.U D[c], const16 (RLC) :mov.u Rd2831,const1227Z is PCPMode=0 & ( op0007=0xbb & op0811=0x0 ; Rd2831 ) & const1227Z { Rd2831 = const1227Z; } # MOVH D[c], const16 (RLC) :movh Rd2831,const1227Z is PCPMode=0 & ( op0007=0x7b & op0811=0x0 ; Rd2831 ) & const1227Z { Rd2831 = const1227Z << 16; } # MOVH.A A[c], const16 (RLC) :movh.a Ra2831,const1227Z is PCPMode=0 & ( op0007=0x91 & op0811=0x0 ; Ra2831 ) & const1227Z { Ra2831 = const1227Z << 16; } #MSUB D[c], D[d], D[a], D[b] (RRR2) :msub Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x23 ; Rd2427 & Rd2831 & op1623=0xa { Rd2831 = Rd2427 - (Rd0811 * Rd1215); overflowflags(Rd2831); } # MSUB D[c], D[d], D[a], const9 (RCR) :msub Rd2831,Rd2427,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x33 ; Rd2427 & Rd2831 & op2123=0x1 ) & const1220S { Rd2831 = Rd2427 - (Rd0811 * const1220S); overflowflags(Rd2831); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB E[c], E[d], D[a], const9 (RCR) :msub Re2831,Re2427,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x33 ; Re2427 & Re2831 & op2123=0x3 ) & const1220S { Re2831 = Re2427 - sext(Rd0811 * const1220S); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB E[c], E[d], D[a], D[b] (RRR2) :msub Re2831,Re2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x23 ; Re2427 & Re2831 & op1623=0x6a { Re2831 = Re2427 - sext(Rd0811 * Rd1215); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.F D[c], D[d], D[a], D[b] (RRR) :msub.f Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x6b ; Rd2427 & Rd2831 & op1623=0x71 { #TODO float #TODO flags Rd2831 = Rd2427 f- (Rd0811 f* Rd1215); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.H E[c], E[d], D[a], D[b] UL, n (RRR1) :msub.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x18 { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 - mres1; local result0:4 = Ree2427 - mres0; Reo2831 = result1; Ree2831 = result0; overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.H E[c], E[d], D[a], D[b] LU, n (RRR1) :msub.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x19 { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 - mres1; local result0:4 = Ree2427 - mres0; Reo2831 = result1; Ree2831 = result0; overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.H E[c], E[d], D[a], D[b] LL, n (RRR1) :msub.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x1a { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 - mres1; local result0:4 = Ree2427 - mres0; Reo2831 = result1; Ree2831 = result0; overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.H E[c], E[d], D[a], D[b] UU, n (RRR1) :msub.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x1b { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result1:4 = Reo2427 - mres1; local result0:4 = Ree2427 - mres0; Reo2831 = result1; Ree2831 = result0; overflowflagsww(result1, result0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.Q D[c], D[d], D[a] U, D[b] U, n (RRR1) :msub.q Rd2831,Rd2427,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x4 { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); Rd2831 = Rd2427 - mres; overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.Q D[c], D[d], D[a], D[b] U, n (RRR1) :msub.q Rd2831,Rd2427,Rd0811,Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x0 { Rd2831 = Rd2427 - (((Rd0811 * sext(Rd1215[16,16])) << const1617Z) s>> 16); overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.Q D[c], D[d], D[a], D[b] L, n (RRR1) :msub.q Rd2831,Rd2427,Rd0811,Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x1 { Rd2831 = Rd2427 - (((Rd0811 * sext(Rd1215[0,16])) << const1617Z) s>> 16); overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.Q D[c], D[d], D[a], D[b], n (RRR1) :msub.q Rd2831,Rd2427,Rd0811,Rd1215,const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2 { Rd2831 = Rd2427 - (((Rd0811 * Rd1215) << const1617Z) s>> 32); overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.Q D[c], D[d], D[a] L, D[b] L, n (RRR1) :msub.q Rd2831,Rd2427,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x5 { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); Rd2831 = Rd2427 - mres; overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.Q E[c], E[d], D[a], D[b] U, n (RRR1) :msub.q Re2831,Re2427,Rd0811,Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Re2427 & Re2831 & const1617Z & op1823=0x18 { Re2831 = Re2427 - sext((Rd0811 * sext(Rd1215[16,16])) << const1617Z); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.Q E[c], E[d], D[a], D[b] L, n (RRR1) :msub.q Re2831,Re2427,Rd0811,Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Re2427 & Re2831 & const1617Z & op1823=0x19 { Re2831 = Re2427 - sext((Rd0811 * sext(Rd1215[0,16])) << const1617Z); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.Q E[c], E[d], D[a], D[b], n (RRR1) :msub.q Re2831,Re2427,Rd0811,Rd1215,const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Re2427 & Re2831 & const1617Z & op1823=0x1b { Re2831 = Re2427 - sext((Rd0811 * Rd1215) << const1617Z); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.Q E[c], E[d], D[a] U, D[b] U, n (RRR1) :msub.q Re2831,Re2427,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Re2427 & Re2831 & const1617Z & op1823=0x1c { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 - sext(mres << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # SUB.Q E[c], E[d], D[a] L, D[b] L, n (RRR1) :msub.q Re2831,Re2427,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Re2427 & Re2831 & const1617Z & op1823=0x1d { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 - sext(mres << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.U E[c], E[d], D[a], const9 (RCR) :msub.u Re2831,Re2427,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x33 ; Re2427 & Re2831 & op2123=0x2 ) & const1220Z { Re2831 = Re2427 - zext(Rd0811 * const1220Z); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUB.U E[c], E[d], D[a], D[b] (RRR2) :msub.u Re2831,Re2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x23 ; Re2427 & Re2831 & op1623=0x68 { Re2831 = Re2427 - zext(Rd0811 * Rd1215); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBAD.H E[c], E[d], D[a], D[b] UL, n (RRR1) :msubad.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x18 { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1; local res0:4 = Ree2427 + mres0; Reo2831 = res1; Ree2831 = res0; overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBAD.H E[c], E[d], D[a], D[b] LU, n (RRR1) :msubad.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x19 { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1; local res0:4 = Ree2427 + mres0; Reo2831 = res1; Ree2831 = res0; overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBAD.H E[c], E[d], D[a], D[b] LL, n (RRR1) :msubad.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x1a { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1; local res0:4 = Ree2427 + mres0; Reo2831 = res1; Ree2831 = res0; overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBAD.H E[c], E[d], D[a], D[b] UU, n (RRR1) :msubad.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x1b { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1; local res0:4 = Ree2427 + mres0; Reo2831 = res1; Ree2831 = res0; overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADM.H E[c], E[d], D[a], D[b] UL, n (RRR1) :msubadm.h Re2831,Re2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Re2427 & Re2831 & const1617Z & op1823=0x1c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 - sext((mres1 - mres0) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADM.H E[c], E[d], D[a], D[b] LU, n (RRR1) :msubadm.h Re2831,Re2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Re2427 & Re2831 & const1617Z & op1823=0x1d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 - sext((mres1 - mres0) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADM.H E[c], E[d], D[a], D[b] LL, n (RRR1) :msubadm.h Re2831,Re2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Re2427 & Re2831 & const1617Z & op1823=0x1e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 - sext((mres1 - mres0) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADM.H E[c], E[d], D[a], D[b] UU, n (RRR1) :msubadm.h Re2831,Re2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Re2427 & Re2831 & const1617Z & op1823=0x1f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = Re2427 - sext((mres1 - mres0) << 16); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADMS.H E[c], E[d], D[a], D[b] UL, n (RRR1) :msubadms.h Re2831,Re2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Re2427 & Re2831 & const1617Z & op1823=0x3c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 - sext((mres1 - mres0) << 16); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADMS.H E[c], E[d], D[a], D[b] LU, n (RRR1) :msubadms.h Re2831,Re2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Re2427 & Re2831 & const1617Z & op1823=0x3d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 - sext((mres1 - mres0) << 16); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADMS.H E[c], E[d], D[a], D[b] LL, n (RRR1) :msubadms.h Re2831,Re2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Re2427 & Re2831 & const1617Z & op1823=0x3e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 - sext((mres1 - mres0) << 16); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADMS.H E[c], E[d], D[a], D[b] UU, n (RRR1) :msubadms.h Re2831,Re2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Re2427 & Re2831 & const1617Z & op1823=0x3f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local result:8 = Re2427 - sext((mres1 - mres0) << 16); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADR.H D[c], D[d], D[a], D[b] UL, n (RRR1) :msubadr.h Rd2831,Rd2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Rd2427 & Rd2831 & const1617Z & op1823=0xc { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) + mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) :msubadr.h Rd2831,Rd2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Rd2427 & Rd2831 & const1617Z & op1823=0xd { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) + mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADR.H D[c], D[d], D[a], D[b] LL, n (RRR1) :msubadr.h Rd2831,Rd2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Rd2427 & Rd2831 & const1617Z & op1823=0xe { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) + mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADR.H D[c], D[d], D[a], D[b] UU, n (RRR1) :msubadr.h Rd2831,Rd2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Rd2427 & Rd2831 & const1617Z & op1823=0xf { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) + mres0 + 0x8000; Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); overflowflagsww(res1, res0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADRS.H D[c], D[d], D[a], D[b] UL, n (RRR1) :msubadrs.h Rd2831,Rd2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) + mres0 + 0x8000; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADRS.H D[c], D[d], D[a], D[b] LU, n (RRR1) :msubadrs.h Rd2831,Rd2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) + mres0 + 0x8000; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADRS.H D[c], D[d], D[a], D[b] LL, n (RRR1) :msubadrs.h Rd2831,Rd2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) + mres0 + 0x8000; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADRS.H D[c], D[d], D[a], D[b] UU, n (RRR1) :msubadrs.h Rd2831,Rd2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) + mres0 + 0x8000; overflowflagsww(res1, res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADS.H E[c], E[d], D[a], D[b] UL, n (RRR1) :msubads.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x38 { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1; local res0:4 = Ree2427 + mres0; overflowflagsww(res1, res0); ssov(Reo2831, res1, 32); ssov(Ree2831, res0, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADS.H E[c], E[d], D[a], D[b] LU, n (RRR1) :msubads.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x39 { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1; local res0:4 = Ree2427 + mres0; overflowflagsww(res1, res0); ssov(Reo2831, res1, 32); ssov(Ree2831, res0, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADS.H E[c], E[d], D[a], D[b] LL, n (RRR1) :msubads.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x3a { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1; local res0:4 = Ree2427 + mres0; overflowflagsww(res1, res0); ssov(Reo2831, res1, 32); ssov(Ree2831, res0, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBADS.H E[c], E[d], D[a], D[b] UU, n (RRR1) :msubads.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x3b { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1; local res0:4 = Ree2427 + mres0; overflowflagsww(res1, res0); ssov(Reo2831, res1, 32); ssov(Ree2831, res0, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBM.H E[c], E[d], D[a], D[b] UL, n (RRR1) :msubm.h Re2831,Re2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Re2427 & Re2831 & const1617Z & op1823=0x1c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res:8 = Re2427 - (sext(mres1 + mres0) << 16); overflowflagsd(res); Re2831 = res; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBM.H E[c], E[d], D[a], D[b] LU, n (RRR1) :msubm.h Re2831,Re2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Re2427 & Re2831 & const1617Z & op1823=0x1d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res:8 = Re2427 - (sext(mres1 + mres0) << 16); overflowflagsd(res); Re2831 = res; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBM.H E[c], E[d], D[a], D[b] LL, n (RRR1) :msubm.h Re2831,Re2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Re2427 & Re2831 & const1617Z & op1823=0x1e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res:8 = Re2427 - (sext(mres1 + mres0) << 16); overflowflagsd(res); Re2831 = res; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBM.H E[c], E[d], D[a], D[b] UU, n (RRR1) :msubm.h Re2831,Re2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Re2427 & Re2831 & const1617Z & op1823=0x1f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res:8 = Re2427 - (sext(mres1 + mres0) << 16); overflowflagsd(res); Re2831 = res; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBMS.H E[c], E[d], D[a], D[b] UL, n (RRR1) :msubms.h Re2831,Re2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Re2427 & Re2831 & const1617Z & op1823=0x3c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res:8 = Re2427 - (sext(mres1 + mres0) << 16); overflowflagsd(res); ssov(Re2427, res, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBMS.H E[c], E[d], D[a], D[b] LU, n (RRR1) :msubms.h Re2831,Re2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Re2427 & Re2831 & const1617Z & op1823=0x3d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res:8 = Re2427 - (sext(mres1 + mres0) << 16); overflowflagsd(res); ssov(Re2427, res, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBMS.H E[c], E[d], D[a], D[b] LL, n (RRR1) :msubms.h Re2831,Re2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Re2427 & Re2831 & const1617Z & op1823=0x3e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res:8 = Re2427 - (sext(mres1 + mres0) << 16); overflowflagsd(res); ssov(Re2427, res, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBMS.H E[c], E[d], D[a], D[b] UU, n (RRR1) :msubms.h Re2831,Re2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Re2427 & Re2831 & const1617Z & op1823=0x3f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res:8 = Re2427 - (sext(mres1 + mres0) << 16); overflowflagsd(res); ssov(Re2427, res, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBR.H D[c], E[d], D[a], D[b] UL, n (RRR1) :msubr.h Rd2831,Ree2427/Reo2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2831 & Ree2427 & Reo2427 & const1617Z & op1823=0x1e { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1 + 0x8000; local res0:4 = Ree2427 - mres0 + 0x8000; overflowflagsww(res1,res0); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBR.H D[c], D[d], D[a], D[b] UL, n (RRR1) :msubr.h Rd2831,Rd2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Rd2427 & Rd2831 & const1617Z & op1823=0xc { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; overflowflagsww(res1,res0); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBR.H D[c], D[d], D[a], D[b] LU, n (RRR1) :msubr.h Rd2831,Rd2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Rd2427 & Rd2831 & const1617Z & op1823=0xd { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; overflowflagsww(res1,res0); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBR.H D[c], D[d], D[a], D[b] LL, n (RRR1) :msubr.h Rd2831,Rd2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Rd2427 & Rd2831 & const1617Z & op1823=0xe { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; overflowflagsww(res1,res0); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBR.H D[c], D[d], D[a], D[b] UU, n (RRR1) :msubr.h Rd2831,Rd2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Rd2427 & Rd2831 & const1617Z & op1823=0xf { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; overflowflagsww(res1,res0); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBR.Q D[c], D[d], D[a] U, D[b] U, n (RRR1) :msubr.q Rd2831,Rd2427,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x6 { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); local res:4 = Rd2427 - mres + 0x8000; overflowflags(res); Rd2831 = zext(res[16,16]) << 16; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBR.Q D[c], D[d], D[a] L, D[b] L, n (RRR1) :msubr.q Rd2831,Rd2427,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x7 { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); local res:4 = Rd2427 - mres + 0x8000; overflowflags(res); Rd2831 = zext(res[16,16]) << 16; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBRS.H D[c], E[d], D[a], D[b] UL, n (RRR1) :msubrs.h Rd2831,Ree2427/Reo2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2831 & Ree2427 & Reo2427 & const1617Z & op1823=0x3e { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1 + 0x8000; local res0:4 = Ree2427 - mres0 + 0x8000; overflowflagsww(res1,res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBRS.H D[c], D[d], D[a], D[b] UL, n (RRR1) :msubrs.h Rd2831,Rd2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; overflowflagsww(res1,res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBRS.H D[c], D[d], D[a], D[b] LU, n (RRR1) :msubrs.h Rd2831,Rd2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; overflowflagsww(res1,res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBRS.H D[c], D[d], D[a], D[b] LL, n (RRR1) :msubrs.h Rd2831,Rd2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; overflowflagsww(res1,res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBRS.H D[c], D[d], D[a], D[b] UU, n (RRR1) :msubrs.h Rd2831,Rd2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Rd2427 & Rd2831 & const1617Z & op1823=0x2f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = sext(Rd2427[16,16] << 16) - mres1 + 0x8000; local res0:4 = sext(Rd2427[0,16] << 16) - mres0 + 0x8000; overflowflagsww(res1,res0); ssov(res1, res1, 32); ssov(res0, res0, 32); Rd2831 = (zext(res1[16,16]) << 16) | zext(res0[16,16]); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBRS.Q D[c], D[d], D[a] U, D[b] U, n (RRR1) :msubrs.q Rd2831,Rd2427,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x26 { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); local res:4 = Rd2427 - mres + 0x8000; overflowflags(res); ssov(res, res, 32); Rd2831 = zext(res[16,16]) << 16; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBRS.Q D[c], D[d], D[a] L, D[b] L, n (RRR1) :msubrs.q Rd2831,Rd2427,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x27 { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); local res:4 = Rd2427 - mres + 0x8000; overflowflags(res); ssov(res, res, 32); Rd2831 = zext(res[16,16]) << 16; } @endif # MSUBS D[c], D[d], D[a], D[b] (RRR2) :msubs Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x23 ; Rd2427 & Rd2831 & op1623=0x8a { Rd2831 = Rd2427 - (Rd0811 - Rd1215); overflowflags(Rd2831); ssov(Rd2831, Rd2831, 32); } # MSUBS D[c], D[d], D[a], const9 (RCR) :msubs Rd2831,Rd2427,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x33 ; Rd2427 & Rd2831 & op2123=0x5 ) & const1220S { local result:4 = Rd2427 - (Rd0811 - const1220S); overflowflags(result); ssov(Rd2831, result, 32); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS E[c], E[d], D[a], const9 (RCR) :msubs Re2831,Re2427,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x33 ; Re2427 & Re2831 & op2123=0x7 ) & const1220S { local result:8 = Re2427 - sext(Rd0811 - const1220S); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS E[c], E[d], D[a], D[b] (RRR2) :msubs Re2831,Re2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x23 ; Re2427 & Re2831 & op1623=0xea { local result:8 = Re2427 - sext(Rd0811 - Rd1215); overflowflags(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.H E[c], E[d], D[a], D[b] UL, n (RRR1) :msubs.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x38 { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1; local res0:4 = Ree2427 - mres0; overflowflagsww(res1, res0); ssov(Reo2831, res1, 32); ssov(Ree2831, res0, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.H E[c], E[d], D[a], D[b] LU, n (RRR1) :msubs.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x39 { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1; local res0:4 = Ree2427 - mres0; overflowflagsww(res1, res0); ssov(Reo2831, res1, 32); ssov(Ree2831, res0, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.H E[c], E[d], D[a], D[b] LL, n (RRR1) :msubs.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x3a { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1; local res0:4 = Ree2427 - mres0; overflowflagsww(res1, res0); ssov(Reo2831, res1, 32); ssov(Ree2831, res0, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.H E[c], E[d], D[a], D[b] UU, n (RRR1) :msubs.h Ree2831/Reo2831,Ree2427/Reo2427,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa3 ; Ree2427 & Reo2427 & Ree2831 & Reo2831 & const1617Z & op1823=0x3b { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); local res1:4 = Reo2427 - mres1; local res0:4 = Ree2427 - mres0; overflowflagsww(res1, res0); ssov(Reo2831, res1, 32); ssov(Ree2831, res0, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.Q D[c], D[d], D[a] U, D[b] U, n (RRR1) :msubs.q Rd2831,Rd2427,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x24 { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); local res:4 = Rd2427 - mres; overflowflags(res); ssov(Rd2831, res, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.Q D[c], D[d], D[a], D[b] U, n (RRR1) :msubs.q Rd2831,Rd2427,Rd0811,Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x20 { local result:4 = Rd2427 - (((Rd0811 * sext(Rd1215[16,16])) << const1617Z) s>> 16); overflowflags(result); ssov(Rd2831, result, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.Q D[c], D[d], D[a], D[b] L, n (RRR1) :msubs.q Rd2831,Rd2427,Rd0811,Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x21 { local result:4 = Rd2427 - (((Rd0811 * sext(Rd1215[0,16])) << const1617Z) s>> 16); overflowflags(result); ssov(Rd2831, result, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.Q D[c], D[d], D[a], D[b], n (RRR1) :msubs.q Rd2831,Rd2427,Rd0811,Rd1215,const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x22 { local result:4 = Rd2427 - (((Rd0811 * Rd1215) << const1617Z) s>> 32); overflowflags(result); ssov(Rd2831, result, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.Q D[c], D[d], D[a] L, D[b] L, n (RRR1) :msubs.q Rd2831,Rd2427,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Rd2427 & Rd2831 & const1617Z & op1823=0x25 { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); local res:4 = Rd2427 - mres; overflowflags(res); ssov(Rd2831, res, 32); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.Q E[c], E[d], D[a], D[b] U, n (RRR1) :msubs.q Re2831,Re2427,Rd0811,Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Re2427 & Re2831 & const1617Z & op1823=0x38 { local result:8 = Re2427 - sext((Rd0811 * sext(Rd1215[16,16])) << const1617Z); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.Q E[c], E[d], D[a], D[b] L, n (RRR1) :msubs.q Re2831,Re2427,Rd0811,Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Re2427 & Re2831 & const1617Z & op1823=0x39 { local result:8 = Re2427 - sext((Rd0811 * sext(Rd1215[0,16])) << const1617Z); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.Q E[c], E[d], D[a], D[b], n (RRR1) :msubs.q Re2831,Re2427,Rd0811,Rd1215,const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Re2427 & Re2831 & const1617Z & op1823=0x3b { local result:8 = Re2427 - sext((Rd0811 * Rd1215) << const1617Z); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.Q E[c], E[d], D[a] U, D[b] U, n (RRR1) :msubs.q Re2831,Re2427,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Re2427 & Re2831 & const1617Z & op1823=0x3c { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); local res:8 = Re2427 - sext(mres << 16); overflowflagsd(res); ssov(Re2831, res, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.Q E[c], E[d], D[a] L, D[b] L, n (RRR1) :msubs.q Re2831,Re2427,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x63 ; Re2427 & Re2831 & const1617Z & op1823=0x3d { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); local res:8 = Re2427 - sext(mres << 16); overflowflagsd(res); ssov(Re2831, res, 64); } @endif # MSUBS.U D[c], D[d], D[a], const9 (RCR) :msubs.u Rd2831,Rd2427,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x33 ; Rd2427 & Rd2831 & op2123=0x4 ) & const1220Z { local result:4 = Rd2427 - (Rd0811 - const1220Z); overflowflags(result); suov(Rd2831, result, 32); } # MSUBS.U D[c], D[d], D[a], D[b] (RRR2) :msubs.u Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x23 ; Rd2427 & Rd2831 & op1623=0x88 { local result:4 = Rd2427 - (Rd0811 - Rd1215); overflowflags(result); suov(Rd2831, result, 32); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.U E[c], E[d], D[a], const9 (RCR) :msubs.u Re2831,Re2427,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x33 ; Re2427 & Re2831 & op2123=0x6 ) & const1220Z { local result:8 = Re2427 - zext(Rd0811 - const1220Z); overflowflagsd(result); ssov(Re2831, result, 64); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MSUBS.U E[c], E[d], D[a], D[b] (RRR2) :msubs.u Re2831,Re2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x23 ; Re2427 & Re2831 & op1623=0xe8 { local result:8 = Re2427 - zext(Rd0811 - Rd1215); overflowflagsd(result); ssov(Re2831, result, 64); } @endif # MTCR const16, D[a] (RLC) :mtcr const1227Z,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0xcd ; op2831=0x0 ) & const1227Z { *[register]:4 const1227Z = Rd0811; } @if defined(TRICORE_V2) :mtfr Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; op1631=0x1c1 { *[register]:4 Rd1215 = Rd0811; } @endif # MUL D[a], D[b] (SRR) :mul Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xe2 { Rd0811 = Rd0811 * Rd1215; overflowflags(Rd0811); } # MUL D[c], D[a], const9 (RC) :mul Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x53 ; Rd2831 & op2127=0x1 ) & const1220S { Rd2831 = Rd0811 * const1220S; overflowflags(Rd2831); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL D[c], D[a], D[b] (RR2) :mul Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x73 ; Rd2831 & op1627=0xa { Rd2831 = Rd0811 * Rd1215; overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL E[c], D[a], const9 (RC) :mul Re2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x53 ; Re2831 & op2127=0x3 ) & const1220S { Re2831 = sext(Rd0811 * const1220S); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL E[c], D[a], D[b] (RR2) :mul Re2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x73 ; Re2831 & op1627=0x6a { Re2831 = sext(Rd0811 * Rd1215); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.F D[c], D[a], D[b] (RR) :mul.f Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Rd2831 & op1627=0x41 { #TODO float #TODO flags Rd2831 = Rd0811 f* Rd1215; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.H E[c], D[a], D[b] UL, n (RR1) :mul.h Ree2831/Reo2831,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb3 ; Ree2831 & Reo2831 & const1617Z & op1827=0x18 { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); Reo2831 = mres1; Ree2831 = mres0; advoverflowflagsww(mres1, mres0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.H E[c], D[a], D[b] LU, n (RR1) :mul.h Ree2831/Reo2831,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb3 ; Ree2831 & Reo2831 & const1617Z & op1827=0x19 { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); Reo2831 = mres1; Ree2831 = mres0; advoverflowflagsww(mres1, mres0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.H E[c], D[a], D[b] LL, n (RR1) :mul.h Ree2831/Reo2831,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb3 ; Ree2831 & Reo2831 & const1617Z & op1827=0x1a { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); Reo2831 = mres1; Ree2831 = mres0; advoverflowflagsww(mres1, mres0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.H E[c], D[a], D[b] UU, n (RR1) :mul.h Ree2831/Reo2831,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb3 ; Ree2831 & Reo2831 & const1617Z & op1827=0x1b { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); Reo2831 = mres1; Ree2831 = mres0; advoverflowflagsww(mres1, mres0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.Q D[c], D[a], D[b] U, n (RR1) :mul.q Rd2831,Rd0811,Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x93 ; Rd2831 & const1617Z & op1827=0x0 { Rd2831 = ((Rd0811 * sext(Rd1215[16,16])) << const1617Z) s>> 16; overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.Q D[c], D[a], D[b] L, n (RR1) :mul.q Rd2831,Rd0811,Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x93 ; Rd2831 & const1617Z & op1827=0x1 { Rd2831 = ((Rd0811 * sext(Rd1215[0,16])) << const1617Z) s>> 16; overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.Q D[c], D[a], D[b], n (RR1) :mul.q Rd2831,Rd0811,Rd1215,const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x93 ; Rd2831 & const1617Z & op1827=0x2 { Rd2831 = ((Rd0811 * Rd1215) << const1617Z) s>> 32; overflowflags(Rd2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.Q D[c], D[a] U, D[b] U, n (RR1) :mul.q Rd2831,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x93 ; Rd2831 & const1617Z & op1827=0x4 { local mres:4; multiply_u_u(mres, Rd0811, Rd1215, const1617Z); overflowflags(mres); Rd2831 = mres; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.Q D[c], D[a] L, D[b] L, n (RR1) :mul.q Rd2831,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x93 ; Rd2831 & const1617Z & op1827=0x5 { local mres:4; multiply_l_l(mres, Rd0811, Rd1215, const1617Z); overflowflags(mres); Rd2831 = mres; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.Q E[c], D[a], D[b] U, n (RR1) :mul.q Re2831,Rd0811,Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x93 ; Re2831 & const1617Z & op1827=0x18 { Re2831 = sext(Rd0811 * sext(Rd1215[16,16])) << const1617Z; overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.Q E[c], D[a], D[b] L, n (RR1) :mul.q Re2831,Rd0811,Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x93 ; Re2831 & const1617Z & op1827=0x19 { Re2831 = sext(Rd0811 * sext(Rd1215[0,16])) << const1617Z; overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.Q E[c], D[a], D[b], n (RR1) :mul.q Re2831,Rd0811,Rd1215,const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x93 ; Re2831 & const1617Z & op1827=0x1b { Re2831 = sext(Rd0811 * Rd1215) << const1617Z; overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.U E[c], D[a], const9 (RC) :mul.u Re2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x53 ; Re2831 & op2127=0x2 ) & const1220Z { Re2831 = zext(Rd0811 * const1220Z); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MUL.U E[c], D[a], D[b] (RR2) :mul.u Re2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x73 ; Re2831 & op1627=0x68 { Re2831 = zext(Rd0811 * Rd1215); overflowflagsd(Re2831); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MULM.H E[c], D[a], D[b] UL, n (RR1) :mulm.h Re2831,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb3 ; Re2831 & const1617Z & op1827=0x1c { local mres1:4; local mres0:4; packed_multiply_ul(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = sext(mres1 + mres0) << 16; $(PSW_V) = 0; $(PSW_AV) = 0; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MULM.H E[c], D[a], D[b] LU, n (RR1) :mulm.h Re2831,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb3 ; Re2831 & const1617Z & op1827=0x1d { local mres1:4; local mres0:4; packed_multiply_lu(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = sext(mres1 + mres0) << 16; $(PSW_V) = 0; $(PSW_AV) = 0; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MULM.H E[c], D[a], D[b] LL, n (RR1) :mulm.h Re2831,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb3 ; Re2831 & const1617Z & op1827=0x1e { local mres1:4; local mres0:4; packed_multiply_ll(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = sext(mres1 + mres0) << 16; $(PSW_V) = 0; $(PSW_AV) = 0; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MULM.H E[c], D[a], D[b] UU, n (RR1) :mulm.h Re2831,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb3 ; Re2831 & const1617Z & op1827=0x1f { local mres1:4; local mres0:4; packed_multiply_uu(mres1, mres0, Rd0811, Rd1215, const1617Z); Re2831 = sext(mres1 + mres0) << 16; $(PSW_V) = 0; $(PSW_AV) = 0; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MULR.H D[c], D[a], D[b] UL, n (RR1) :mulr.h Rd2831,Rd0811,Rd1215^"ul",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb3 ; Rd2831 & const1617Z & op1827=0xc { local sc1 = (Rd0811[16,16] == 0x8000) && (Rd1215[16,16] == 0x8000) && (const1617Z == 1); local sc0 = (Rd0811[0,16] == 0x8000) && (Rd1215[0,16] == 0x8000) && (const1617Z == 1); local mres1:4; local mres0:4; ternary(mres1, sc1, 0x7FFFFFFF, sext(((Rd0811[16,16] * Rd1215[16,16]) << const1617Z) + 0x8000)); ternary(mres0, sc0, 0x7FFFFFFF, sext(((Rd0811[0,16] * Rd1215[0,16]) << const1617Z) + 0x8000)); Rd2831 = (zext(mres1[16,16]) << 16) | zext(mres0[16,16]); advoverflowflagsww(mres1, mres0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MULR.H D[c], D[a], D[b] LU, n (RR1) :mulr.h Rd2831,Rd0811,Rd1215^"lu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb3 ; Rd2831 & const1617Z & op1827=0xd { local sc1 = (Rd0811[16,16] == 0x8000) && (Rd1215[0,16] == 0x8000) && (const1617Z == 1); local sc0 = (Rd0811[0,16] == 0x8000) && (Rd1215[16,16] == 0x8000) && (const1617Z == 1); local mres1:4; local mres0:4; ternary(mres1, sc1, 0x7FFFFFFF, sext(((Rd0811[16,16] * Rd1215[0,16]) << const1617Z) + 0x8000)); ternary(mres0, sc0, 0x7FFFFFFF, sext(((Rd0811[0,16] * Rd1215[16,16]) << const1617Z) + 0x8000)); Rd2831 = (zext(mres1[16,16]) << 16) | zext(mres0[16,16]); advoverflowflagsww(mres1, mres0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # # MULR.H D[c], D[a], D[b] LL, n (RR1) :mulr.h Rd2831,Rd0811,Rd1215^"ll",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb3 ; Rd2831 & const1617Z & op1827=0xe { local sc1 = (Rd0811[16,16] == 0x8000) && (Rd1215[0,16] == 0x8000) && (const1617Z == 1); local sc0 = (Rd0811[0,16] == 0x8000) && (Rd1215[0,16] == 0x8000) && (const1617Z == 1); local mres1:4; local mres0:4; ternary(mres1, sc1, 0x7FFFFFFF, sext(((Rd0811[16,16] * Rd1215[0,16]) << const1617Z) + 0x8000)); ternary(mres0, sc0, 0x7FFFFFFF, sext(((Rd0811[0,16] * Rd1215[0,16]) << const1617Z) + 0x8000)); Rd2831 = (zext(mres1[16,16]) << 16) | zext(mres0[16,16]); advoverflowflagsww(mres1, mres0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MULR.H D[c], D[a], D[b] UU, n (RR1) :mulr.h Rd2831,Rd0811,Rd1215^"uu",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb3 ; Rd2831 & const1617Z & op1827=0xf { local sc1 = (Rd0811[0,16] == 0x8000) && (Rd1215[16,16] == 0x8000) && (const1617Z == 1); local sc0 = (Rd0811[16,16] == 0x8000) && (Rd1215[16,16] == 0x8000) && (const1617Z == 1); local mres1:4; local mres0:4; ternary(mres1, sc1, 0x7FFFFFFF, sext(((Rd0811[0,16] * Rd1215[16,16]) << const1617Z) + 0x8000)); ternary(mres0, sc0, 0x7FFFFFFF, sext(((Rd0811[16,16] * Rd1215[16,16]) << const1617Z) + 0x8000)); Rd2831 = (zext(mres1[16,16]) << 16) | zext(mres0[16,16]); advoverflowflagsww(mres1, mres0); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MULR.Q D[c], D[a] U, D[b] U, n (RR1) :mulr.q Rd2831,Rd0811^"u",Rd1215^"u",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x93 ; Rd2831 & const1617Z & op1827=0x6 { local sc = (Rd0811[16,16] == 0x8000) && (Rd1215[16,16] == 0x8000) && (const1617Z == 1); local res:4; ternary(res, sc, 0x7FFFFFFF, ((sext(Rd0811[16,16] * Rd1215[16,16]) << const1617Z) + 0x8000)); Rd2831 = zext(res[16,16] << 16); advoverflowflags(res); } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MULR.Q D[c], D[a] L, D[b] L, n (RR1) :mulr.q Rd2831,Rd0811^"l",Rd1215^"l",const1617Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x93 ; Rd2831 & const1617Z & op1827=0x7 { local sc = (Rd0811[0,16] == 0x8000) && (Rd1215[0,16] == 0x8000) && (const1617Z == 1); local res:4; ternary(res, sc, 0x7FFFFFFF, ((sext(Rd0811[0,16] * Rd1215[0,16]) << const1617Z) + 0x8000)); Rd2831 = zext(res[16,16] << 16); advoverflowflags(res); } @endif # MULS D[c], D[a], const9 (RC) :muls Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x53 ; Rd2831 & op2127=0x5 ) & const1220S { local result:4 = Rd0811 * const1220S; overflowflags(result); ssov(Rd2831, result, 32); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MULS D[c], D[a], D[b] (RR2) :muls Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x73 ; Rd2831 & op1627=0x8a { local result:4 = Rd0811 * Rd1215; overflowflags(result); ssov(Rd2831, result, 32); } @endif # MULS.U D[c], D[a], const9 (RC) :muls.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x53 ; Rd2831 & op2127=0x4 ) & const1220Z { local result:4 = Rd0811 * const1220Z; overflowflags(result); suov(Rd2831, result, 32); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # MULS.U D[c], D[a], D[b] (RR2) :muls.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x73 ; Rd2831 & op1627=0x88 { local result:4 = Rd0811 * Rd1215; overflowflags(result); suov(Rd2831, result, 32); } @endif # NAND D[c], D[a], D[b] (RR) :nand Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0x90 { Rd2831 = ~(Rd0811 & Rd1215); } # NAND D[c], D[a], const9 (RC) :nand Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0x9 ) & const1220Z { Rd2831 = ~(Rd0811 & const1220Z); } # NAND.T D[c], D[a], pos1, D[b], pos2 (BIT) :nand.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x7 ; Rd2831 & const1620Z & const2327Z & op2122=0x0 { local tmp1:4 = (Rd0811 >> const1620Z) & 1; local tmp2:4 = (Rd1215 >> const2327Z) & 1; Rd2831 = zext(!(tmp1[0,1] & tmp2[0,1])); } # NE D[c], D[a], D[b] (RR) :ne Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x110 { Rd2831 = zext(Rd0811 != Rd1215); } # NE D[c], D[a], const9 (RC) :ne Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x11 ) & const1220S { Rd2831 = zext(Rd0811 != const1220S); } # NE.A D[c], A[a], A[b] (RR) :ne.a Rd2831,Ra0811,Ra1215 is PCPMode=0 & Ra0811 & Ra1215 & op0007=0x1 ; Rd2831 & op1627=0x410 { Rd2831 = zext(Ra0811 != Ra1215); } # NEZ.A D[c], A[a] (RR) :nez.a Rd2831,Ra0811 is PCPMode=0 & Ra0811 & op0007=0x1 & op1215=0x0 ; Rd2831 & op1627=0x490 { Rd2831 = zext(Ra0811 != 0); } # NOP (SR) :nop is PCPMode=0 & op0007=0x0 & op0815=0x0 { local NOP:1 = 0:1; NOP = NOP; } # NOP (SYS) :nop is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x0 { local NOP:1 = 0:1; NOP = NOP; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # NOT D[a] (SR) :not Rd0811 is PCPMode=0 & Rd0811 & op0007=0x46 & op1215=0x0 { Rd0811 = ~Rd0811; } @endif # NOR D[c], D[a], D[b] (RR) :nor Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0xb0 { Rd2831 = ~(Rd0811 | Rd1215); } # NOR D[c], D[a], const9 (RC) :nor Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0xb ) & const1220Z { Rd2831 = ~(Rd0811 | const1220Z); } # NOR.T D[c], D[a], pos1, D[b], pos2 (BIT) :nor.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x87 ; Rd2831 & const1620Z & const2327Z & op2122=0x2 { local tmp = Rd0811 >> const1620Z; local tmp2 = Rd1215 >> const2327Z; Rd2831 = ~(tmp | tmp2) & 1; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # OR D[a], D[b] (SRR) :or Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa6 { Rd0811 = Rd0811 | Rd1215; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # OR D[15], const8 (SC) :or d15,const0815Z is PCPMode=0 & const0815Z & d15 & op0007=0x96 { d15 = d15 | const0815Z; } @endif # OR D[c], D[a], D[b] (RR) :or Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0xa0 { Rd2831 = Rd0811 | Rd1215; } # OR D[c], D[a], const9 (RC) :or Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0xa ) & const1220Z { Rd2831 = Rd0811 | const1220Z; } # OR.AND.T D[c], D[a], pos1, D[b], pos2 (BIT) :or.and.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc7 ; Rd2831 & const1620Z & const2327Z & op2122=0x0 { local tmp = (Rd0811 >> const1620Z) & 1; local tmp2 = (Rd1215 >> const2327Z) & 1; local tmp3 = zext(Rd2831[0,1]); tmp3 = tmp3 | (tmp & tmp2); Rd2831[0,1] = tmp3[0,1]; } # OR.ANDN.T D[c], D[a], pos1, D[b], pos2 (BIT) :or.andn.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc7 ; Rd2831 & const1620Z & const2327Z & op2122=0x3 { local tmp = ((Rd0811 >> const1620Z) & 1) == 1; local tmp2 = ((Rd1215 >> const2327Z) & 1) == 0; local tmp3 = (Rd2831 & 1) == 1; tmp3 = tmp3 | (tmp & tmp2); Rd2831[0,1] = tmp3; } # OR.EQ D[c], D[a], D[b] (RR) :or.eq Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x270 { Rd2831[0,1] = Rd2831[0,1] | (Rd0811 == Rd1215); } # OR.EQ D[c], D[a], const9 (RC) :or.eq Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x27 ) & const1220S { Rd2831[0,1] = Rd2831[0,1] | (Rd0811 == const1220S); } # OR.GE D[c], D[a], D[b] (RR) :or.ge Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x2b0 { Rd2831[0,1] = Rd2831[0,1] | (Rd0811 s>= Rd0811); } # OR.GE D[c], D[a], const9 (RC) :or.ge Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x2b ) & const1220S { Rd2831[0,1] = Rd2831[0,1] | (Rd0811 s>= const1220S); } # OR.GE.U D[c], D[a], D[b] (RR) :or.ge.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x2c0 { Rd2831[0,1] = Rd2831[0,1] | (Rd0811 >= Rd1215); } # OR.GE.U D[c], D[a], const9 (RC) :or.ge.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x2c ) & const1220Z { Rd2831[0,1] = Rd2831[0,1] | (Rd0811 >= const1220Z); } # OR.LT D[c], D[a], D[b] (RR) :or.lt Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x290 { Rd2831[0,1] = Rd2831[0,1] | (Rd0811 s< Rd0811); } # OR.LT D[c], D[a], const9 (RC) :or.lt Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x29 ) & const1220S { Rd2831[0,1] = Rd2831[0,1] | (Rd0811 s< const1220S); } # OR.LT.U D[c], D[a], D[b] (RR) :or.lt.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x2a0 { Rd2831[0,1] = Rd2831[0,1] | (Rd0811 < Rd0811); } # OR.LT.U D[c], D[a], const9 (RC) :or.lt.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x2a ) & const1220Z { Rd2831[0,1] = Rd2831[0,1] | (Rd0811 < const1220Z); } # OR.NE D[c], D[a], D[b] (RR) :or.ne Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x280 { Rd2831[0,1] = Rd2831[0,1] | (Rd0811 != Rd0811); } # OR.NE D[c], D[a], const9 (RC) :or.ne Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x28 ) & const1220S { Rd2831[0,1] = Rd2831[0,1] | (Rd0811 != const1220S); } # OR.NOR.T D[c], D[a], pos1, D[b], pos2 (BIT) :or.nor.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc7 ; Rd2831 & const1620Z & const2327Z & op2122=0x2 { local tmp = (Rd0811 >> const1620Z) & 1; local tmp2 = (Rd1215 >> const2327Z) & 1; local tmp3 = zext(Rd2831[0,1]); tmp3 = tmp3 | ~(tmp | tmp2); Rd2831[0,1] = tmp3[0,1]; } # OR.OR.T D[c], D[a], pos1, D[b], pos2 (BIT) :or.or.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc7 ; Rd2831 & const1620Z & const2327Z & op2122=0x1 { local tmp = (Rd0811 >> const1620Z) & 1; local tmp2 = (Rd1215 >> const2327Z) & 1; local tmp3 = zext(Rd2831[0,1]); tmp3 = tmp3 | (tmp | tmp2); Rd2831[0,1] = tmp3[0,1]; } # OR.T D[c], D[a], pos1, D[b], pos2 (BIT) :or.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x87 ; Rd2831 & const1620Z & const2327Z & op2122=0x1 { local tmp = (Rd0811 >> const1620Z) & 1; local tmp2 = (Rd1215 >> const2327Z) & 1; Rd2831 = tmp | tmp2; } # ORN D[c], D[a], D[b] (RR) :orn Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0xf0 { Rd2831 = Rd0811 | ~Rd1215; } # ORN D[c], D[a], const9 (RC) :orn Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0xf ) & const1220Z { Rd2831 = Rd0811 | ~const1220Z; } # ORN.T D[c], D[a], pos1, D[b], pos2 (BIT) :orn.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x7 ; Rd2831 & const1620Z & const2327Z & op2122=0x1 { local tmp = ((Rd0811 >> const1620Z) & 1) == 1; local tmp2 = ((Rd1215 >> const2327Z) & 1) == 0; Rd2831 = zext(tmp | tmp2); } # PACK D[c], E[d], D[a] (RRR) :pack Rd2831,Ree2427/Reo2427,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x6b & op1215=0x0 ; Rd2831 & Ree2427 & Reo2427 & op1623=0x0 { #TODO o=exp e=mantissa D[a][31]=sign # int_exp = E[d][63:32]; # int_mant = E[d][31:0]; # flag_rnd = int_mant[7] AND (int_mant[8] OR int_mant[6:0] OR PSW.C); # if ((int_mant[31] == 0) AND (int_exp == +255)) then { # // Infinity or NaN # fp_exp = +255; # fp_frac = int_mant[30:8]; # } else if ((int_mant[31] == 1) AND (int_exp >= +127)) then { # // Overflow ? Infinity. # fp_exp = +255; # fp_frac = 0; # } else if ((int_mant[31] == 1) AND (int_exp <= -128)) then { # // Underflow ? Zero # fp_exp = 0; # fp_frac = 0; # } else if (int_mant == 0) then { # // Zero # fp_exp = 0; # fp_frac = 0; # } else { # if (int_mant[31] == 0) then { # // Denormal # temp_exp = 0; # } else { # // Normal # temp_exp = int_exp + 128; # } # fp_exp_frac[30:0] = {tmp_exp[7:0], int_mant[30:8]} + flag_rnd; # fp_exp = fp_exp_frac[30:23]; # fp_frac = fp_exp_frac[22:0]; # } # D[c][31] = D[a][31]; # D[c][30:23] = fp_exp; # D[c][22:0] = fp_frac; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # PARITY D[c], D[a] (RR) :parity Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x4b & op1215=0x0 ; Rd2831 & op1627=0x20 { local result3:1 = Rd0811[24,1] ^ Rd0811[25,1] ^ Rd0811[26,1] ^ Rd0811[27,1] ^ Rd0811[28,1] ^ Rd0811[29,1] ^Rd0811[30,1] ^ Rd0811[31,1]; local result2:1 = Rd0811[16,1] ^ Rd0811[17,1] ^ Rd0811[18,1] ^ Rd0811[19,1] ^ Rd0811[20,1] ^ Rd0811[21,1] ^Rd0811[22,1] ^ Rd0811[23,1]; local result1:1 = Rd0811[8,1] ^ Rd0811[9,1] ^ Rd0811[10,1] ^ Rd0811[11,1] ^ Rd0811[12,1] ^ Rd0811[13,1] ^Rd0811[14,1] ^ Rd0811[15,1]; local result0:1 = Rd0811[0,1] ^ Rd0811[1,1] ^ Rd0811[2,1] ^ Rd0811[3,1] ^ Rd0811[4,1] ^ Rd0811[5,1] ^ Rd0811[6,1] ^ Rd0811[7,1]; Rd2831 = zext(result3 << 24) | zext(result2 << 16) | zext(result1 << 8) | zext(result0); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # Q31TOF D[c], D[a], D[b] (RR) :q31tof Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x4b ; Rd2831 & op1627=0x151 { #TODO float #TODO flags } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # QSEED.F D[c], D[a] (RR) :qseed.f Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x4b & op1215=0x0 ; Rd2831 & op1627=0x191 { #TODO float #TODO flags Rd2831 = 1 f/ sqrt(Rd0811); } @endif @if defined(TRICORE_V2) # RESTORE D[a] (SYS) :restore Rd0811 is PCPMode=0 & Rd0811 & op0007=0xd & op1215=0x0 ; op1631=0x380 { $(ICR_IE) = Rd0811[0,1]; } @endif # RET (SR) :ret is PCPMode=0 & op0007=0x0 & op0815=0x90 { #TODO ret # if (PSW.CDE) then if (cdc_decrement()) then trap(CDU); # if (PCXI[19:0] == 0) then trap(CSU); # if (PCXI.UL == 0) then trap(CTYP); # PC = {A[11] [31:1], 1’b0}; # EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; # {new_PCXI, new_PSW, a10,a11,d8,d9,d10,d11,a12,a13,a14,a15,d12,d13,d14,d15} = M(EA, 16 * word); # M(EA, word) = FCX; # FCX[19:0] = PCXI[19:0]; # PCXI = new_PCXI; # PSW = {new_PSW[31:26], PSW[25:24], new_PSW[23:0]}; local tmp:4 = a11 & 0xFFFFFFFE; restoreCallerState(FCX, LCX, PCXI); return [tmp]; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # RET (SYS) :ret is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x180 { #TODO ret # if (PSW.CDE) then if (cdc_decrement()) then trap(CDU); # if (PCXI[19:0] == 0) then trap(CSU); # if (PCXI.UL == 0) then trap(CTYP); # PC = {A[11] [31:1], 1’b0}; # EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; # {new_PCXI, new_PSW,a10,a11,d8,d9,d10,d11,a12,a13,a14,a15,d12,d13,d14,d15} = M(EA, 16 * word); # M(EA, word) = FCX; # FCX[19:0] = PCXI[19:0]; # PCXI = new_PCXI; # PSW = {new_PSW[31:26], PSW[25:24], new_PSW[23:0]}; local tmp:4 = a11 & 0xFFFFFFFE; restoreCallerState(FCX, LCX, PCXI); return [tmp]; } @endif # RFE (SR) :rfe is PCPMode=0 & op0007=0x0 & op0815=0x80 { #TODO ret # if (PCXI[19:0] == 0) then trap(CSU); # if (PCXI.UL == 0) then trap(CTYP); # if (!cdc_zero() AND PSW.CDE) then trap(NEST); # PC = {A[11] [31:1], 1’b0}; # ICR.IE = PCXI.PIE; # ICR.CCPN = PCXI.PCPN; # EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; # {new_PCXI,PSW,a10,a11,d8,d9,d10,d11,a12,a13,a14,a15,d12,d13,d14,d15}=M(EA,16*word); # M(EA, word) = FCX; # FCX[19:0] = PCXI[19:0]; # PCXI = new_PCXI; local tmp:4 = a11 & 0xFFFFFFFE; restoreCallerState(FCX, LCX, PCXI); return [tmp]; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # RFE (SYS) :rfe is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x1c0 { #TODO ret # if (PCXI[19:0] == 0) then trap(CSU); # if (PCXI.UL == 0) then trap(CTYP); # if (!cdc_zero() AND PSW.CDE) then trap(NEST); # PC = {A[11] [31:1], 1’b0}; # ICR.IE = PCXI.PIE; # ICR.CCPN = PCXI.PCPN; # EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; # {new_PCXI,PSW,a10,a11,d8,d9,d10,d11,a12,a13,a14,a15,d12,d13,d14,d15}=M(EA,16*word); # M(EA, word) = FCX; # FCX[19:0] = PCXI[19:0]; # PCXI = new_PCXI; local tmp:4 = a11 & 0xFFFFFFFE; restoreCallerState(FCX, LCX, PCXI); return [tmp]; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # RFM (SYS) :rfm is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x140 { #TODO ret # if (PSW.IO != 2’b10) then trap (PRIV); # if (DBGSR.DE) then{ # PC = {A[11] [31:1], 1’b0}; # ICR.IE = PCXI.IE; # ICR.CCPN = PCXI.PCPN; # EA = DCX; # {PCXI, PSW, A[10], A[11]} = M(EA, 4 * word); # DBGTCR.DTA = 0; # }else{ # NOP # } local tmp:4 = a11 & 0xFFFFFFFE; restoreCallerState(FCX, LCX, PCXI); return [tmp]; } @endif # RSLCX (SYS) :rslcx is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x240 { #TODO context # if(PCXI[19:0] == 0) then trap(CSU); # if(PCXI.UL == 1) then trap(CTYP); # EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; # {new_PCXI, A[11], A[2], A[3], D[0], D[1], D[2], D[3], A[4], A[5], A[6], A[7], D[4], D[5], D[6], D[7]} = M(EA, 16*word); # M(EA, word) = FCX; # FCX[19:0] = PCXI[19:0]; # PCXI = new_PCXI; } # RSTV (SYS) :rstv is PCPMode=0 & op0007=0x2f & op0815=0x0 ; op1631=0x0 { $(PSW_V) = 0; $(PSW_SV) = 0; $(PSW_AV) = 0; $(PSW_SAV) = 0; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # RSUB D[a] (SR) :rsub Rd0811 is PCPMode=0 & Rd0811 & op0007=0x32 & op1215=0x5 { Rd0811 = 0 - Rd0811; overflowflags(Rd0811); } @endif # RSUB D[c], D[a], const9 (RC) :rsub Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x8 ) & const1220S { Rd2831 = const1220S - Rd0811; overflowflags(Rd2831); } # RSUBS D[c], D[a], const9 (RC) :rsubs Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0xa ) & const1220S { local result:4 = const1220S - Rd0811; overflowflags(result); ssov(Rd2831, result, 32); } # RSUBS.U D[c], D[a], const9 (RC) :rsubs.u Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0xb ) & const1220S { local result:4 = const1220S - Rd0811; overflowflags(result); suov(Rd2831, result, 32); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # SAT.B D[a] (SR) :sat.b Rd0811 is PCPMode=0 & Rd0811 & op0007=0x32 & op1215=0x0 { local sat_neg:4; ternary(sat_neg, Rd0811 s< -0x80, -0x80, Rd0811); ternary(Rd0811, sat_neg s> 0x7f, 0x7f, sat_neg); } @endif # SAT.B D[c], D[a] (RR) :sat.b Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0xb & op1215=0x0 ; Rd2831 & op1627=0x5e0 { local sat_neg:4; ternary(sat_neg, Rd0811 s< -0x80, -0x80, Rd0811); ternary(Rd2831, sat_neg s> 0x7f, 0x7f, sat_neg); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # SAT.BU D[a] (SR) :sat.bu Rd0811 is PCPMode=0 & Rd0811 & op0007=0x32 & op1215=0x1 { ternary(Rd0811, Rd0811 > 0xff, 0xff, Rd0811); } @endif # SAT.BU D[c], D[a] (RR) :sat.bu Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0xb & op1215=0x0 ; Rd2831 & op1627=0x5f0 { ternary(Rd2831, Rd0811 > 0xff, 0xff, Rd0811); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # SAT.H D[a] (SR) :sat.h Rd0811 is PCPMode=0 & Rd0811 & op0007=0x32 & op1215=0x2 { local sat_neg:4; ternary(sat_neg, Rd0811 s< -0x8000, -0x8000, Rd0811); ternary(Rd0811, sat_neg s> 0x7fff, 0x7fff, sat_neg); } @endif # SAT.H D[c], D[a] (RR) :sat.h Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0xb & op1215=0x0 ; Rd2831 & op1627=0x7e0 { local sat_neg:4; ternary(sat_neg, Rd0811 s< -0x8000, -0x8000, Rd0811); ternary(Rd2831, sat_neg s> 0x7fff, 0x7fff, sat_neg); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # SAT.HU D[a] (SR) :sat.hu Rd0811 is PCPMode=0 & Rd0811 & op0007=0x32 & op1215=0x3 { ternary(Rd0811, Rd0811 > 0xffff, 0xffff, Rd0811); } @endif # SAT.HU D[c], D[a] (RR) :sat.hu Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0xb & op1215=0x0 ; Rd2831 & op1627=0x7f0 { ternary(Rd2831, Rd0811 > 0xffff, 0xffff, Rd0811); } # SEL D[c], D[d], D[a], D[b] (RRR) :sel Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x2b ; Rd2427 & Rd2831 & op1623=0x40 { ternary(Rd2831, Rd2427 != 0, Rd0811, Rd1215); } # SEL D[c], D[d], D[a], const9 (RCR) :sel Rd2831,Rd2427,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0xab ; Rd2427 & Rd2831 & op2123=0x4 ) & const1220S { ternary(Rd2831, Rd2427 != 0, Rd0811, const1220S); } # SELN D[c], D[d], D[a], D[b] (RRR) :seln Rd2831,Rd2427,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x2b ; Rd2427 & Rd2831 & op1623=0x50 { ternary(Rd2831, Rd2427 == 0, Rd0811, Rd1215); } # SELN D[c], D[d], D[a], const9 (RCR) :seln Rd2831,Rd2427,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0xab ; Rd2427 & Rd2831 & op2123=0x5 ) & const1220S { ternary(Rd2831, Rd2427 == 0, Rd0811, const1220S); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # SH D[a], const4 (SRC) :sh Rd0811,const1215S is PCPMode=0 & Rd0811 & const1215S & op0007=0x6 & op1515=1 { local tmp = -const1215S; Rd0811 = Rd0811 >> tmp; } # SH D[a], const4 (SRC) :sh Rd0811,const1215S is PCPMode=0 & Rd0811 & const1215S & op0007=0x6 & op1515=0 { Rd0811 = Rd0811 << const1215S; } @endif # SH D[c], D[a], D[b] (RR) :sh Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0x0 { local shift_count:4 = sext(Rd1215[0,6]); shift_count = (shift_count << (32 - 6)) s>> (32 - 6); ternary(Rd2831, shift_count s>= 0, Rd0811 << shift_count, Rd0811 >> -shift_count); } # SHD[c], D[a], const9 (RC) :sh Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0x0 & op2020=1) & const1220S { local tmp = -const1220S; Rd2831 = Rd0811 >> tmp[0,6]; } # SHD[c], D[a], const9 (RC) :sh Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0x0 & op2020=0 ) & const1220S { Rd2831 = Rd0811 << const1220S[0,6]; } # SH.AND.T D[c], D[a], pos1, D[b], pos2 (BIT) :sh.and.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x27 ; Rd2831 & const1620Z & const2327Z & op2122=0x0 { local pos1 = (Rd0811 >> const1620Z) & 1; local pos2 = (Rd1215 >> const2327Z) & 1; local tmp = pos1 & pos2; Rd2831 = (Rd2831 << 1) | zext(tmp[0,1]); } # SH.ANDN.T D[c], D[a], pos1, D[b], pos2 (BIT) :sh.andn.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x27 ; Rd2831 & const1620Z & const2327Z & op2122=0x3 { local pos1 = (Rd0811 >> const1620Z) & 1; local pos2 = (Rd1215 >> const2327Z) & 1; local tmp = pos1 & ~pos2; Rd2831 = (Rd2831 << 1) | zext(tmp[0,1]); } # SH.EQ D[c], D[a], D[b] (RR) :sh.eq Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x370 { Rd2831 = (Rd2831 << 1) | zext(Rd0811 == Rd1215); } # SH.EQ D[c], D[a], const9 (RC) :sh.eq Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x37 ) & const1220S { Rd2831 = (Rd2831 << 1) | zext(Rd0811 == const1220S); } # SH.GE D[c], D[a], D[b] (RR) :sh.ge Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x3b0 { Rd2831 = (Rd2831 << 1) | zext(Rd0811 s>= Rd1215); } # SH.GE D[c], D[a], const9 (RC) :sh.ge Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x3b ) & const1220S { Rd2831 = (Rd2831 << 1) | zext(Rd0811 s>= const1220S); } # SH.GE.U D[c], D[a], D[b] (RR) :sh.ge.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x3c0 { Rd2831 = (Rd2831 << 1) | zext(Rd0811 >= Rd1215); } # SH.GE.U D[c], D[a], const9 (RC) :sh.ge.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x3c ) & const1220Z { Rd2831 = (Rd2831 << 1) | zext(Rd0811 >= const1220Z); } # SH.H D[c], D[a], D[b] (RR) :sh.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0x400 { local res1:2; local res0:2; local shift_count:4 = sext(Rd1215[0,5]); shift_count = (shift_count << (16 - 5)) s>> (16 - 5); ternary(res1, (shift_count s>= 0), Rd0811[16,16] << shift_count, Rd0811[16,16] >> -shift_count); ternary(res0, (shift_count s>= 0), Rd0811[0,16] << shift_count, Rd0811[0,16] >> -shift_count); Rd2831 = zext(res1 << 16) | zext(res0); } # SH.H D[c], D[a], const9 (RC) :sh.h Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0x40 ) & const1220S { local res1:2; local res0:2; local shift_count = sext(const1220S[0,5]); shift_count = (shift_count << (16 - 5)) s>> (16 - 5); ternary(res1, (shift_count s>= 0), Rd0811[16,16] << shift_count, Rd0811[16,16] >> -shift_count); ternary(res0, (shift_count s>= 0), Rd0811[0,16] << shift_count, Rd0811[0,16] >> -shift_count); Rd2831 = zext(res1 << 16) | zext(res0); } # SH.LT D[c], D[a], D[b] (RR) :sh.lt Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x390 { Rd2831 = (Rd2831 << 1) | zext(Rd0811 s< Rd1215); } # SH.LT D[c], D[a], const9 (RC) :sh.lt Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x39 ) & const1220S { Rd2831 = (Rd2831 << 1) | zext(Rd0811 s< const1220S); } # SH.LT.U D[c], D[a], D[b] (RR) :sh.lt.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x3a0 { Rd2831 = (Rd2831 << 1) | zext(Rd0811 < Rd1215); } # SH.LT.U D[c], D[a], const9 (RC) :sh.lt.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x3a ) & const1220Z { Rd2831 = (Rd2831 << 1) | zext(Rd0811 < const1220Z); } # SH.NAND.T D[c], D[a], pos1, D[b], pos2 (BIT) :sh.nand.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa7 ; Rd2831 & const1620Z & const2327Z & op2122=0x0 { local tmp1 = ((Rd0811 >> const1620Z) & 1) == 1; local tmp0 = ((Rd1215 >> const2327Z) & 1) == 1; local res = !(tmp1 && tmp0); Rd2831 = (Rd2831 << 1) | zext(res); } # SH.NE D[c], D[a], D[b] (RR) :sh.ne Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x380 { Rd2831 = (Rd2831 << 1) | zext(Rd0811 != Rd1215); } # SH.NE D[c], D[a], const9 (RC) :sh.ne Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x38 ) & const1220S { Rd2831 = (Rd2831 << 1) | zext(Rd0811 != const1220S); } # SH.NOR.T D[c], D[a], pos1, D[b], pos2 (BIT) :sh.nor.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x27 ; Rd2831 & const1620Z & const2327Z & op2122=0x2 { local tmp1 = ((Rd0811 >> const1620Z) & 1) == 1; local tmp0 = ((Rd1215 >> const2327Z) & 1) == 1; local res = !(tmp1 || tmp0); Rd2831 = (Rd2831 << 1) | zext(res); } # SH.OR.T D[c], D[a], pos1, D[b], pos2 (BIT) :sh.or.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x27 ; Rd2831 & const1620Z & const2327Z & op2122=0x1 { local tmp1 = ((Rd0811 >> const1620Z) & 1) == 1; local tmp0 = ((Rd1215 >> const2327Z) & 1) == 1; local res = tmp1 || tmp0; Rd2831 = (Rd2831 << 1) | zext(res); } # SH.ORN.T D[c], D[a], pos1, D[b], pos2 (BIT) :sh.orn.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa7 ; Rd2831 & const1620Z & const2327Z & op2122=0x1 { local tmp1 = ((Rd0811 >> const1620Z) & 1) == 1; local tmp0 = ((Rd1215 >> const2327Z) & 1) == 1; local res = tmp1 || !tmp0; Rd2831 = (Rd2831 << 1) | zext(res); } # SH.XNOR.T D[c], D[a], pos1, D[b], pos2 (BIT) :sh.xnor.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa7 ; Rd2831 & const1620Z & const2327Z & op2122=0x2 { local tmp1 = ((Rd0811 >> const1620Z) & 1) == 1; local tmp0 = ((Rd1215 >> const2327Z) & 1) == 1; local res = !(tmp1 ^ tmp0); Rd2831 = (Rd2831 << 1) | zext(res); } # SH.XOR.T D[c], D[a], pos1, D[b], pos2 (BIT) :sh.xor.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa7 ; Rd2831 & const1620Z & const2327Z & op2122=0x3 { local tmp1 = ((Rd0811 >> const1620Z) & 1) == 1; local tmp0 = ((Rd1215 >> const2327Z) & 1) == 1; local res = tmp1 ^ tmp0; Rd2831 = (Rd2831 << 1) | zext(res); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # SHA D[a], const4 (SRC) :sha Rd0811,const1215S is PCPMode=0 & Rd0811 & const1215S & op0007=0x86 { local shift_count:4 = sext(const1215S[0,4]); shift_count = (shift_count << (32 - 4)) s>> (32 - 4); local res:4 = Rd0811; local shift_dir:1 = shift_count s< 0; res = (Rd0811 << shift_count) * zext(shift_dir == 0) | (Rd0811 s>> (-shift_count)) * zext(shift_dir == 1); overflowflags(res); Rd0811 = res; } @endif # SHA D[c], D[a], D[b] (RR) :sha Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0x10 { local shift_count:4 = sext(Rd1215[0,6]); shift_count = (shift_count << (32 - 6)) s>> (32 - 6); local res:4 = Rd0811; local shift_dir:1 = shift_count s< 0; res = (Rd0811 << shift_count) * zext(shift_dir == 0) | (Rd0811 s>> (-shift_count)) * zext(shift_dir == 1); overflowflags(res); Rd2831 = res; } # SHA D[c], D[a], const9 (RC) :sha Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0x1 ) & const1220S { local shift_count:4 = sext(const1220S[0,6]); shift_count = (shift_count << (32 - 6)) s>> (32 - 6); local res:4 = Rd0811; local shift_dir:1 = shift_count s< 0; res = (Rd0811 << shift_count) * zext(shift_dir == 0) | (Rd0811 s>> (-shift_count)) * zext(shift_dir == 1); overflowflags(res); Rd2831 = res; } # SHA.H D[c], D[a], D[b] (RR) :sha.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0x410 { local shift_count:4 = sext(Rd1215[0,5]); local res1:4 = sext(Rd0811[16,16]); local res0:4 = sext(Rd0811[0,16]); if (shift_count s> 0) goto ; if (shift_count == 0) goto ; shift_count = 0 - shift_count; local msk:4; ternary(msk, Rd0811[31,1] != 0, (((1 << shift_count) - 1) << (16 - shift_count)), 0); res1 = msk | sext(Rd0811[16,16] s>> shift_count); res0 = msk | sext(Rd0811[0,16] s>> shift_count); goto ; res1 = sext(Rd0811[16,16] << shift_count); res0 = sext(Rd0811[0,16] << shift_count); Rd2831 = zext(res1[0,16] << 16) | zext(res0[0,16]); } # SHA.H D[c], D[a], const9 (RC) :sha.h Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0x41 ) & const1220S { local shift_count:4 = sext(const1220S[0,5]); local res1:4 = sext(Rd0811[16,16]); local res0:4 = sext(Rd0811[0,16]); if (shift_count s> 0) goto ; if (shift_count == 0) goto ; shift_count = 0 - shift_count; local msk:4; ternary(msk, Rd0811[31,1] != 0, (((1 << shift_count) - 1) << (16 - shift_count)), 0); res1 = msk | sext(Rd0811[16,16] s>> shift_count); res0 = msk | sext(Rd0811[0,16] s>> shift_count); goto ; res1 = sext(Rd0811[16,16] << shift_count); res0 = sext(Rd0811[0,16] << shift_count); Rd2831 = (zext(res1[0,16]) << 16) | zext(res0[0,16]); } # SHAS D[c], D[a], D[b] (RR) :shas Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0x20 { local shift_count:4 = sext(Rd1215[0,6]); shift_count = (shift_count << (32 - 6)) s>> (32 - 6); local res:4 = Rd0811; local shift_dir:1 = shift_count s< 0; res = (Rd0811 << shift_count) * zext(shift_dir == 0) | (Rd0811 s>> (-shift_count)) * zext(shift_dir == 1); overflowflags(res); ssov(Rd2831, res, 32); } # SHAS D[c], D[a], const9 (RC) :shas Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0x2 ) & const1220S { local shift_count:4 = sext(const1220S[0,6]); shift_count = (shift_count << (32 - 6)) s>> (32 - 6); local res:4 = Rd0811; local shift_dir:1 = shift_count s< 0; res = (Rd0811 << shift_count) * zext(shift_dir == 0) | (Rd0811 s>> (-shift_count)) * zext(shift_dir == 1); overflowflags(res); ssov(Rd2831, res, 32); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.A A[10], const8, A[15] (SC) :st.a SC,a15 is PCPMode=0 & a15 & op0007=0xf8 & SC { build SC; *[ram]:4 SC = a15; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.A A[b], off4, A[15] (SRO) :st.a SRO,a15 is PCPMode=0 & a15 & op0007=0xec & SRO { build SRO; *[ram]:4 SRO = a15; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.A A[15], off4, A[a] (SSRO) :st.a SSRO,Ra0811 is PCPMode=0 & Ra0811 & op0007=0xe8 & SSRO { build SSRO; *[ram]:4 SSRO = Ra0811; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.A A[b], A[a] (SSR)(Post-increment Addressing Mode) # ST.A A[b], A[a] (SSR) :st.a SSR,Ra0811 is PCPMode=0 & Ra0811 & op0607=0x3 & SSR { build SSR; *[ram]:4 SSR = Ra0811; } @endif # ST.A A[b], off10, A[a] (BO)(Post-increment Addressing Mode) # ST.A P[b], A[a] (BO)(Bit-reverse Addressing Mode) # ST.A A[b], off10, A[a] (BO)(Pre-increment Addressing Mode) # ST.A A[b], off10, A[a] (BO)(Base + Short Offset Addressing Mode) # ST.A P[b], A[a] (BO)(Index Addressing Mode) :st.a BO,Ra0811 is PCPMode=0 & ( Ra0811 & op0607=0x2 ; op2225=0x6 ) & BO { build BO; *[ram]:4 BO = Ra0811; } # ST.A P[b], off10, A[a] (BO)(Circular Addressing Mode) #:st.a BO,Ra0811 is PCPMode=0 & ( Ra0811 & op0007=0xa9 ; op2227=0x16 ) & BO :st.a [Rpe1215/Rpo1215^"+c"^]off10,Ra0811 is PCPMode=0 & Ra0811 & Rpe1215 & Rpo1215 & op0007=0xa9 ; off10 & op2227=0x16 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); *[ram]:4 EA = Ra0811; } # ST.A off18, A[a] (ABS)(Absolute Addressing Mode) :st.a off18,Ra0811 is PCPMode=0 & ( Ra0811 & op0007=0xa5 ; op2627=0x2 ) & off18 { *[ram]:4 off18 = Ra0811; } # ST.A A[b], off16, A[a] (BOL)(Base + Long Offset Addressing Mode) :st.a BOL,Ra0811 is PCPMode=0 & ( Ra0811 & op0007=0xb5 ) ... & BOL { build BOL; *[ram]:4 BOL = Ra0811; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.B A[b], D[a] (SSR) # ST.B A[b], D[a] (SSR)(Post-increment Addressing Mode) :st.b SSR,Rd0811 is PCPMode=0 & Rd0811 & op0607=0x0 & SSR { build SSR; *[ram]:1 SSR = Rd0811[0,8]; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.B A[15], off4, D[a] (SSRO) :st.b SSRO,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x28 & SSRO { build SSRO; *[ram]:1 SSRO = Rd0811[0,8]; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.B A[b], off4, D[15] (SRO) :st.b SRO,d15 is PCPMode=0 & d15 & op0007=0x2c & SRO { build SRO; *[ram]:1 SRO = d15[0,8]; } @endif # ST.B off18, D[a] (ABS)(Absolute Addressing Mode) :st.b off18,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0x25 ; op2627=0x0 ) & off18 { *[ram]:1 off18 = Rd0811[0,8]; } # ST.B A[b], off16, D[a] (BOL)(Base + Long Offset Addressing Mode) :st.b BOL,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0xe9 ) ... & BOL { build BOL; *[ram]:1 BOL = Rd0811[0,8]; } # ST.B A[b], off10, D[a] (BO)(Post-increment Addressing Mode) # ST.B P[b], D[a] (BO)(Bit-reverse Addressing Mode) # ST.B A[b], off10, D[a] (BO)(Pre-increment Addressing Mode) # ST.B A[b], off10, D[a] (BO)(Base + Short Offset Addressing Mode) # ST.B P[b], D[a] (BO)(Index Addressing Mode) :st.b BO,Rd0811 is PCPMode=0 & ( Rd0811 & op0607=0x2 ; op2225=0x0 ) & BO { build BO; *[ram]:1 BO = Rd0811[0,8]; } # ST.B P[b], off10, D[a] (BO)(Circular Addressing Mode) #:st.b BO,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0xa9 ; op2227=0x10 ) & BO :st.b [Rpe1215/Rpo1215^"+c"^]off10,Rd0811 is PCPMode=0 & Rd0811 & Rpe1215 & Rpo1215 & op0007=0xa9 ; off10 & op2227=0x10 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); *[ram]:1 EA = Rd0811[0,8]; } # ST.D A[b], off10, E[a] (BO)(Post-increment Addressing Mode) # ST.D P[b], E[a] (BO)(Bit-reverse Addressing Mode) # ST.D A[b], off10, E[a] (BO)(Pre-increment Addressing Mode) # ST.D A[b], off10, E[a] (BO)(Base + Short Offset Addressing Mode) # ST.D P[b], E[a] (BO)(Index Addressing Mode) :st.d BO,Re0811 is PCPMode=0 & ( Re0811 & op0607=0x2 ; op2225=0x5 ) & BO { build BO; *[ram]:8 BO = Re0811; } # ST.D off18, E[a] (ABS)(Absolute Addressing Mode) :st.d off18,Re0811 is PCPMode=0 & ( Re0811 & op0007=0xa5 ; op2627=0x1 ) & off18 { *[ram]:8 off18 = Re0811; } # ST.D P[b], off10, E[a] (BO)(Circular Addressing Mode) #:st.d BO,Re0811 is PCPMode=0 & ( Re0811 & op0007=0xa9 ; op2227=0x15 ) & BO :st.d [Rpe1215/Rpo1215^"+c"^]off10,Re0811 is PCPMode=0 & Re0811 & Rpe1215 & Rpo1215 & op0007=0xa9 ; off10 & op2227=0x15 { local EA0:4; local EA2:4; local EA4:4; local EA6:4; CircularAddressingMode4(Rpe1215, Rpo1215, EA0, EA2, EA4, EA6, off10, 2); *[ram]:2 EA6 = Re0811[48,16]; *[ram]:2 EA4 = Re0811[32,16]; *[ram]:2 EA2 = Re0811[16,16]; *[ram]:2 EA0 = Re0811[0,16]; } # ST.DA A[b], off10, P[a] (BO)(Post-increment Addressing Mode) # ST.DA P[b], P[a] (BO)(Bit-reverse Addressing Mode) # ST.DA A[b], off10, P[a] (BO)(Pre-increment Addressing Mode) # ST.DA A[b], off10, P[a] (BO)(Base + Short Offset Addressing Mode) # ST.DA P[b], P[a] (BO)(Index Addressing Mode) :st.da BO,Rp0811 is PCPMode=0 & ( Rp0811 & op0607=0x2 ; op2225=0x7 ) & BO { build BO; *[ram]:8 BO = Rp0811; } # ST.DA P[b], off10, P[a] (BO)(Circular Addressing Mode) #:st.da BO,Rp0811 is PCPMode=0 & ( Rp0811 & op0007=0xa9 ; op2227=0x17 ) & BO :st.da [Rpe1215/Rpo1215^"+c"^]off10,Rp0811 is PCPMode=0 & Rp0811 & Rpe1215 & Rpo1215 & op0007=0xa9 ; off10 & op2227=0x17 { local EA0:4; local EA4:4; CircularAddressingMode2(Rpe1215, Rpo1215, EA0, EA4, off10, 4); *[ram]:4 EA0 = Rp0811[0,32]; *[ram]:4 EA4 = Rp0811[32,32]; } # ST.DA off18, P[a] (ABS)(Absolute Addressing Mode) :st.da off18,Rp0811 is PCPMode=0 & ( Rp0811 & op0007=0xa5 ; op2627=0x3 ) & off18 { *[ram]:8 off18 = Rp0811; } @if defined(TRICORE_V2) :st.dd BO,Re0811/ReN0811 is PCPMode=0 & ( Re0811 & ReN0811 & op0007=0x89 ; op2227=0x9 ) & BO { build BO; *[ram]:8 BO = Re0811; *[ram]:8 BO+8 = ReN0811; } @endif @if defined(TRICORE_V2) :st.dd BO,Re0811/ReN0811 is PCPMode=0 & ( Re0811 & ReN0811 & op0007=0xa9 ; op1621=0x0 & op2227=0x09 & op2831=0x0 ) & BO { build BO; *[ram]:8 BO = Re0811; *[ram]:8 BO+8 = ReN0811; } @endif @if defined(TRICORE_V2) :st.dd BO,Re0811/ReN0811 is PCPMode=0 & ( Re0811 & ReN0811 & op0007=0x89 ; op2227=0x19 ) & BO { build BO; *[ram]:8 BO = Re0811; *[ram]:8 BO+8 = ReN0811; } @endif @if defined(TRICORE_V2) #:st.dd BO,Re0811/ReN0811 is PCPMode=0 & ( Re0811 & ReN0811 & op0007=0xa9 ; op2227=0x19 ) & BO :st.dd [Rpe1215/Rpo1215^"+c"^]off10,Re0811/ReN0811 is PCPMode=0 & Re0811 & ReN0811 & Rpe1215 & Rpo1215 & op0007=0xa9 ; off10 & op2227=0x19 { local EA0:4; local EA8:4; CircularAddressingMode2(Rpe1215, Rpo1215, EA0, EA8, off10, 8); *[ram]:8 EA0 = Re0811; *[ram]:8 EA8 = ReN0811; } @endif @if defined(TRICORE_V2) :st.dd BO,Re0811/ReN0811 is PCPMode=0 & ( Re0811 & ReN0811 & op0007=0x89 ; op2227=0x29 ) & BO { build BO; *[ram]:8 BO = Re0811; *[ram]:8 BO+8 = ReN0811; } @endif @if defined(TRICORE_V2) :st.dd BO,Re0811/ReN0811 is PCPMode=0 & ( Re0811 & ReN0811 & op0007=0xa9 ; op1621=0x0 & op2227=0x29 & op2831=0x0 ) & BO { build BO; *[ram]:8 BO = Re0811; *[ram]:8 BO+8 = ReN0811; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.H A[b], D[a] (SSR)(Post-increment Addressing Mode) # ST.H A[b], D[a] (SSR) :st.h SSR,Rd0811 is PCPMode=0 & Rd0811 & op0607=0x2 & SSR { build SSR; *[ram]:2 SSR = Rd0811[0,16]; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.H A[15], off4, D[a] (SSRO) :st.h SSRO,Rd0811 is PCPMode=0 & Rd0811 & op0007=0xa8 & SSRO { build SSRO; *[ram]:2 SSRO = Rd0811[0,16]; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.H A[b], off4, D[15] (SRO) :st.h SRO,d15 is PCPMode=0 & d15 & op0007=0xac & SRO { build SRO; *[ram]:2 SRO = d15[0,16]; } @endif # ST.H A[b], off10, D[a] (BO)(Post-increment Addressing Mode) # ST.H P[b], D[a] (BO)(Bit-reverse Addressing Mode) # ST.H A[b], off10, D[a] (BO)(Pre-increment Addressing Mode) # ST.H A[b], off10, D[a] (BO)(Base + Short Offset Addressing Mode) # ST.H P[b], D[a] (BO)(Index Addressing Mode) :st.h BO,Rd0811 is PCPMode=0 & ( Rd0811 & op0607=0x2 ; op2225=0x2 ) & BO { build BO; *[ram]:2 BO = Rd0811[0,16]; } # ST.H A[b], off16, D[a] (BOL)(Base + Long Offset Addressing Mode) :st.h BOL,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0xf9 ) ... & BOL { build BOL; *[ram]:2 BOL = Rd0811[0,16]; } # ST.H P[b], off10, D[a] (BO)(Circular Addressing Mode) #:st.h BO,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0xa9 ; op2227=0x12 ) & BO :st.h [Rpe1215/Rpo1215^"+c"^]off10,Rd0811 is PCPMode=0 & Rd0811 & Rpe1215 & Rpo1215 & op0007=0xa9 ; off10 & op2227=0x12 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); *[ram]:2 EA = Rd0811[0,16]; } # ST.H off18, D[a] (ABS)(Absolute Addressing Mode) :st.h off18,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0x25 ; op2627=0x2 ) & off18 { *[ram]:2 off18 = Rd0811[0,16]; } # ST.Q off18, D[a] (ABS)(Absolute Addressing Mode) :st.q off18,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0x65 ; op2627=0x0 ) & off18 { *[ram]:2 off18 = Rd0811[16,16]; } # ST.Q A[b], off10, D[a] (BO)(Post-increment Addressing Mode) # ST.Q P[b], D[a] (BO)(Bit-reverse Addressing Mode) # ST.Q A[b], off10, D[a] (BO)(Pre-increment Addressing Mode) # ST.Q A[b], off10, D[a] (BO)(Base + Short Offset Addressing Mode) # ST.Q P[b], D[a] (BO)(Index Addressing Mode) :st.q BO,Rd0811 is PCPMode=0 & ( Rd0811 & op0607=0x2 ; op2225=0x8 ) & BO { build BO; *[ram]:2 BO = Rd0811[16,16]; } #:st.q BO,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0xa9 ; op2227=0x18 ) & BO :st.q [Rpe1215/Rpo1215^"+c"^]off10,Rd0811 is PCPMode=0 & Rd0811 & Rpe1215 & Rpo1215 & op0007=0xa9 ; off10 & op2227=0x18 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); *[ram]:2 EA = Rd0811[16,16]; } # ST.T off18, bpos3, b (ABSB) :st.t off18,const0810Z,const1111Z is PCPMode=0 & ( const0810Z & const1111Z & op0007=0xd5 ; op2627=0x0 ) & off18 { local tmp:1 = *[ram]:1 off18; ternary(tmp, const1111Z, tmp | (1 << const0810Z), tmp & ~(1 << const0810Z)); *[ram]:1 off18 = tmp; } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.W A[10], const8, D[15] (SC) :st.w SC,d15 is PCPMode=0 & d15 & op0007=0x78 & SC { build SC; *[ram]:4 SC = d15; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.W A[b], D[a] (SSR) # ST.W A[b], D[a] (SSR)(Post-increment Addressing Mode) :st.w SSR,Rd0811 is PCPMode=0 & Rd0811 & op0607=0x1 & SSR { build SSR; *[ram]:4 SSR = Rd0811; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.W A[15], off4, D[a] (SSRO) :st.w SSRO,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x68 & SSRO { build SSRO; *[ram]:4 SSRO = Rd0811; } @endif @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # ST.W A[b], off4, D[15] (SRO) :st.w SRO,d15 is PCPMode=0 & d15 & op0007=0x6c & SRO { build SRO; *[ram]:4 SRO = d15; } @endif # ST.W A[b], off16, D[a] (BOL)(Base + Long Offset Addressing Mode) :st.w BOL,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0x59 ) ... & BOL { build BOL; *[ram]:4 BOL = Rd0811; } # ST.W off18, D[a] (ABS)(Absolute Addressing Mode) :st.w off18,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0xa5 ; op2627=0x0 ) & off18 { *[ram]:4 off18 = Rd0811; } # ST.W A[b], off10, D[a] (BO)(Post-increment Addressing Mode) # ST.W P[b], D[a] (BO)(Bit-reverse Addressing Mode) # ST.W A[b], off10, D[a] (BO)(Pre-increment Addressing Mode) # ST.W A[b], off10, D[a] (BO)(Base + Short Offset Addressing Mode) # ST.W P[b], D[a] (BO)(Index Addressing Mode) :st.w BO,Rd0811 is PCPMode=0 & ( Rd0811 & op0607=0x2 ; op2225=0x4 ) & BO { build BO; *[ram]:4 BO = Rd0811; } # ST.W P[b], off10, D[a] (BO)(Circular Addressing Mode) #:st.w BO,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0xa9 ; op2227=0x14 ) & BO :st.w [Rpe1215/Rpo1215^"+c"^]off10,Rd0811 is PCPMode=0 & Rd0811 & Rpe1215 & Rpo1215 & op0007=0xa9 ; off10 & op2227=0x14 { local EA0:4; local EA2:4; CircularAddressingMode2(Rpe1215, Rpo1215, EA0, EA2, off10, 2); *[ram]:2 EA2 = Rd0811[16,16]; *[ram]:2 EA0 = Rd0811[0,16]; } # STLCX off18 (ABS)(Absolute Addressing Mode) :stlcx off18 is PCPMode=0 & ( op0007=0x15 & op0811=0x0 ; op2627=0x0 ) & off18 { #TODO context store_lower_context(off18); } # STLCX A[b], off10 (BO)(Base + Short Index Addressing Mode) :stlcx BO is PCPMode=0 & ( op0007=0x49 & op0811=0x0 ; op2227=0x26 ) & BO { #TODO context build BO; store_lower_context(BO); } # STUCX off18 (ABS)(Absolute Addressing Mode) :stucx off18 is PCPMode=0 & ( op0007=0x15 & op0811=0x0 ; op2627=0x1 ) & off18 { #TODO context store_upper_context(off18); } # STUCX A[b], off10 (BO)(Base + Short Index Addressing Mode) :stucx BO is PCPMode=0 & ( op0007=0x49 & op0811=0x0 ; op2227=0x27 ) & BO { #TODO context build BO; store_upper_context(BO); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # SUB D[a], D[15], D[b] (SRR) :sub Rd0811,d15,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & d15 & op0007=0x52 { Rd0811 = d15 - Rd1215; overflowflags(Rd0811); } @endif # SUB D[15], D[a], D[b] (SRR) :sub d15,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & d15 & op0007=0x5a { d15 = Rd0811 - Rd1215; overflowflags(d15); } # SUB D[a], D[b] (SRR) :sub Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xa2 { Rd0811 = Rd0811 - Rd1215; overflowflags(Rd0811); } # SUB D[c], D[a], D[b] (RR) :sub Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x80 { Rd2831 = Rd0811 - Rd1215; overflowflags(Rd2831); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # SUB.A A[10], const8 (SC) :sub.a a10,const0815Z is PCPMode=0 & a10 & const0815Z & op0007=0x20 { a10 = a10 - const0815Z; } @endif # SUB.A A[c], A[a], A[b] (RR) :sub.a Ra2831,Ra0811,Ra1215 is PCPMode=0 & Ra0811 & Ra1215 & op0007=0x1 ; Ra2831 & op1627=0x20 { Ra2831 = Ra0811 - Ra1215; } # SUB.B D[c], D[a], D[b] (RR) :sub.b Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x480 { local result3:4 = zext(Rd0811[24,8] - Rd1215[24,8]); local result2:4 = zext(Rd0811[16,8] - Rd1215[16,8]); local result1:4 = zext(Rd0811[8,8] - Rd1215[8,8]); local result0:4 = zext(Rd0811[0,8] - Rd1215[0,8]); overflowflagsb(result3, result2, result1, result0); Rd2831 = zext(result3[0,8] << 24) | zext(result2[0,8] << 16) | zext(result1[0,8] << 8) | zext(result0[0,8]); } @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # SUB.F D[c], D[d], D[a] (RRR) :sub.f Rd2831,Rd2427,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x6b & op1215=0x0 ; Rd2427 & Rd2831 & op1623=0x31 { #TODO float #TODO flags Rd2831 = Rd2427 f- Rd0811; } @endif # SUB.H D[c], D[a], D[b] (RR) :sub.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x680 { local result1:4 = zext(Rd0811[16,16] - Rd1215[16,16]); local result0:4 = zext(Rd0811[0,16] - Rd1215[0,16]); overflowflagsh(result1, result0); Rd2831 = (zext(result1[0,16]) << 16) | zext(result0[0,16]); } # SUBC D[c], D[a], D[b] (RR) :subc Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0xd0 { local tmp:5 = zext(Rd0811) - zext(Rd1215) + zext($(PSW_C)) - 1; Rd2831 = tmp[0,32]; $(PSW_C) = tmp[32,1]; overflowflags(Rd2831); } # SUBS D[a], D[b] (SRR) :subs Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x62 { local result:4 = Rd0811 - Rd1215; overflowflags(result); ssov(Rd0811, result, 32); } # SUBS D[c], D[a], D[b] (RR) :subs Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0xa0 { local result:4 = Rd0811 - Rd1215; overflowflags(result); ssov(Rd2831, result, 32); } # SUBS.H D[c], D[a], D[b] (RR) :subs.h Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x6a0 { local result1:4 = zext(Rd0811[16,16] - Rd1215[16,16]); local result0:4 = zext(Rd0811[0,16] - Rd1215[0,16]); overflowflagsh(result1, result0); ssov(Rd2831[16,16], result1[0,16], 16); ssov(Rd2831[0,16], result0[0,16], 16); } # SUBS.HU D[c], D[a], D[b] (RR) :subs.hu Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x6b0 { local result1:4 = zext(Rd0811[16,16] - Rd1215[16,16]); local result0:4 = zext(Rd0811[0,16] - Rd1215[0,16]); overflowflagsh(result1, result0); suov(Rd2831[16,16], result1[0,16], 16); suov(Rd2831[0,16], result0[0,16], 16); } # SUBS.U D[c], D[a], D[b] (RR) :subs.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0xb0 { local result:4 = Rd0811 - Rd1215; overflowflags(result); suov(Rd2831, result, 32); } # SUBX D[c], D[a], D[b] (RR) :subx Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0xc0 { Rd2831 = Rd0811 - Rd1215; local tmp:5 = zext(Rd0811) + ~zext(Rd1215) + 1; $(PSW_C) = tmp[32,1]; overflowflags(Rd2831); } # SVLCX (SYS) :svlcx is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x200 { #TODO context # if (FCX == 0) trap(FCU); # tmp_FCX = FCX; # EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; # new_FCX = M(EA, word); # M(EA, 16 * word) = {PCXI, A[11], A[2], A[3], D[0], D[1], D[2], D[3], A[4], A[5], A[6], A[7], D[4], D[5], D[6], D[7]}; # PCXI.PCPN = ICR.CCPN # PCXI.PIE = ICR.IE; # PCXI.UL = 0; # PCXI[19:0] = FCX[19:0]; # FCX[19:0] = new_FCX[19:0]; # if (tmp_FCX == LCX) trap(FCD); } # SWAP.W A[b], off10, D[a] (BO)(Base + Short Offset Addressing Mode) # SWAP.W P[b], D[a] (BO)(Bit-reverse Addressing Mode) # SWAP.W A[b], off10, D[a] (BO)(Pre-increment Addressing Mode) # SWAP.W A[b], off10, D[a] (BO)(Base + Short Offset Addressing Mode) # SWAP.W P[b], D[a] (BO)(Index Addressing Mode) :swap.w BO,Rd0811 is PCPMode=0 & ( Rd0811 & op0607=0x1 ; op2225=0x0 ) & BO { build BO; local tmp:4 = *[ram]:4 BO; *[ram]:4 BO = Rd0811; Rd0811 = tmp; } # SWAP.W off18, D[a] (ABS)(Absolute Addressing Mode) :swap.w off18,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0xe5 ; op2627=0x0 ) & off18 { local tmp:4 = *[ram]:4 off18; *[ram]:4 off18 = Rd0811; Rd0811 = tmp; } # SWAP.W P[b], off10, D[a] (BO)(Circular Addressing Mode) #:swap.w BO,Rd0811 is PCPMode=0 & ( Rd0811 & op0007=0x69 ; op2227=0x10 ) & BO :swap.w [Rpe1215/Rpo1215^"+c"^]off10,Rd0811 is PCPMode=0 & Rd0811 & Rpe1215 & Rpo1215 & op0007=0x69 ; off10 & op2227=0x10 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); local tmp:4 = *[ram]:4 EA; *[ram]:4 EA = Rd0811; Rd0811 = tmp; } @if defined(TRICORE_V2) # SWAPMSK.W A[b], off10, E[a] (BO)(Post-increment Addressing Mode) # SWAPMSK.W P[b], E[a] (BO)(Bit-reverse Addressing Mode) # SWAPMSK.W A[b], off10, E[a] (BO)(Pre-increment Addressing Mode) # SWAPMSK.W A[b], off10, E[a] (BO)(Base + Short Offset Addressing Mode) # SWAPMSK.W P[b], E[a] (BO)(Index Addressing Mode) :swapmsk.w BO,Ree0811/Reo0811 is PCPMode=0 & ( Ree0811 & Reo0811 & op0607=0x1 ; op2225=0x2 ) & BO { build BO; local tmp:4 = *[ram]:4 BO; *[ram]:4 BO = (tmp & ~Reo0811) | (Ree0811 & Reo0811); } # SWAPMSK.W P[b], off10, E[a] (BO)(Circular Addressing Mode) #:swapmsk.w BO,Ree0811/Reo0811 is PCPMode=0 & ( Ree0811 & Reo0811 & op0007=0x69 ; op2227=0x12 ) & BO :swapmsk.w [Rpe1215/Rpo1215^"+c"^]off10,Ree0811/Reo0811 is PCPMode=0 & Ree0811 & Reo0811 & Rpe1215 & Rpo1215 & op0007=0x69 ; off10 & op2227=0x12 { local EA:4; CircularAddressingMode(Rpe1215, Rpo1215, EA, off10); local tmp:4 = *[ram]:4 EA; *[ram]:4 EA = (tmp & ~Reo0811) | (Ree0811 & Reo0811); } @endif # SYSCALL const9 (RC) :syscall const1220Z is PCPMode=0 & ( op0007=0xad & op0811=0x0 ; op2131=0x4 ) & const1220Z { #TODO TIN SYS trap(const1220Z[0,8]); } @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) :tlbdemap Rd0811 is PCPMode=0 & Rd0811 & op0007=0x75 & op1215=0x0 ; op1631=0x0 { tlbdemap(Rd0811); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) :tlbflush.a is PCPMode=0 & op0007=0x75 & op0815=0x0 ; op1631=0x40 { tlbflusha(); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) :tlbflush.b is PCPMode=0 & op0007=0x75 & op0815=0x0 ; op1631=0x50 { tlbflushb(); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) :tlbmap Re0811 is PCPMode=0 & Re0811 & op0007=0x75 & op1215=0x0 ; op1631=0x400 { tlbmap(Re0811); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) :tlbprobe.a Rd0811 is PCPMode=0 & Rd0811 & op0007=0x75 & op1215=0x0 ; op1631=0x80 { tlbprobea(Rd0811); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) :tlbprobe.i Rd0811 is PCPMode=0 & Rd0811 & op0007=0x75 & op1215=0x0 ; op1631=0x90 { tlbprobei(Rd0811); } @endif # TRAPSV (SYS) :trapsv is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x540 { #TODO TIN SOVF if ($(PSW_SV) == 0) goto inst_next; trap(); } # TRAPV (SYS) :trapv is PCPMode=0 & op0007=0xd & op0815=0x0 ; op1631=0x500 { #TODO TIN OVF if ($(PSW_V) == 0) goto inst_next; trap(); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # UNPACK E[c], D[a] (RR) :unpack Re2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x4b & op1215=0x0 ; Re2831 & op1627=0x80 { #TODO #TODO float En+1 = exp and En = mantissa } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # UPDFL D[a] (RR) :updfl Rd0811 is PCPMode=0 & Rd0811 & op0007=0x4b & op1215=0x0 ; op1631=0xc1 { $(PSW_FS) = ($(PSW_FS) & ~Rd0811[15,1]) | (Rd0811[7,1] & Rd0811[15,1]); $(PSW_FI) = ($(PSW_FI) & ~Rd0811[14,1]) | (Rd0811[6,1] & Rd0811[14,1]); $(PSW_FV) = ($(PSW_FV) & ~Rd0811[13,1]) | (Rd0811[5,1] & Rd0811[13,1]); $(PSW_FZ) = ($(PSW_FZ) & ~Rd0811[12,1]) | (Rd0811[4,1] & Rd0811[12,1]); $(PSW_FU) = ($(PSW_FU) & ~Rd0811[11,1]) | (Rd0811[3,1] & Rd0811[11,1]); $(PSW_FX) = ($(PSW_FX) & ~Rd0811[10,1]) | (Rd0811[2,1] & Rd0811[10,1]); $(PSW_RM) = ($(PSW_RM) & ~Rd0811[8,2]) | (Rd0811[0,2] & Rd0811[8,2]); } @endif @if defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # UTOF D[c], D[a] (RR) :utof Rd2831,Rd0811 is PCPMode=0 & Rd0811 & op0007=0x4b & op1215=0x0 ; Rd2831 & op1627=0x161 { #TODO float #TODO flags #TODO unsigned # Rd2831 = int2float(Rd0811); } @endif @if defined(TRICORE_V2) # WAIT (SYS) :wait is PCPMode=0 & op0007=0xd & op0815=0 ; op1621=0 & op2227=0x16 & op2831=0 { wait(); } @endif # XNOR D[c], D[a], D[b] (RR) :xnor Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0xd0 { Rd2831 = ~(Rd0811 ^ Rd1215); } # XNOR D[c], D[a], const9 (RC) :xnor Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0xd ) & const1220Z { Rd2831 = ~(Rd0811 ^ const1220Z); } # XNOR.T D[c], D[a], pos1, D[b], pos2 (BIT) :xnor.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x7 ; Rd2831 & const1620Z & const2327Z & op2122=0x2 { local tmpa = (Rd0811 >> const1620Z) & 1; local tmpb = (Rd1215 >> const2327Z) & 1; Rd2831 = zext(!(tmpa[0,1] ^ tmpb[0,1])); } @if defined(TRICORE_RIDER_B) || defined(TRICORE_RIDER_D) || defined(TRICORE_V2) # XOR D[a], D[b] (SRR) :xor Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xc6 { Rd0811 = Rd0811 ^ Rd1215; } @endif # XOR D[c], D[a], D[b] (RR) :xor Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xf ; Rd2831 & op1627=0xc0 { Rd2831 = Rd0811 ^ Rd1215; } # XOR D[c], D[a], const9 (RC) :xor Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8f ; Rd2831 & op2127=0xc ) & const1220Z { Rd2831 = Rd0811 ^ const1220Z; } # XOR.EQ D[c], D[a], D[b] (RR) :xor.eq Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x2f0 { Rd2831[0,1] = Rd2831[0,1] ^ (Rd0811 == Rd1215); } # XOR.EQ D[c], D[a], const9 (RC) :xor.eq Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x2f ) & const1220S { Rd2831[0,1] = Rd2831[0,1] ^ (Rd0811 == const1220S); } # XOR.GE D[c], D[a], D[b] (RR) :xor.ge Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x330 { Rd2831[0,1] = Rd2831[0,1] ^ (Rd0811 s>= Rd1215); } # XOR.GE D[c], D[a], const9 (RC) :xor.ge Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x33 ) & const1220S { Rd2831[0,1] = Rd2831[0,1] ^ (Rd0811 s>= const1220S); } # XOR.GE.U D[c], D[a], D[b] (RR) :xor.ge.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x340 { Rd2831[0,1] = Rd2831[0,1] ^ (Rd0811 >= Rd1215); } # XOR.GE.U D[c], D[a], const9 (RC) :xor.ge.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x34 ) & const1220Z { Rd2831[0,1] = Rd2831[0,1] ^ (Rd0811 >= const1220Z); } # XOR.LT D[c], D[a], D[b] (RR) :xor.lt Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x310 { Rd2831[0,1] = Rd2831[0,1] ^ (Rd0811 s< Rd1215); } # XOR.LT D[c], D[a], const9 (RC) :xor.lt Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x31 ) & const1220S { Rd2831[0,1] = Rd2831[0,1] ^ (Rd0811 s< const1220S); } # XOR.LT.U D[c], D[a], D[b] (RR) :xor.lt.u Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x320 { Rd2831[0,1] = Rd2831[0,1] ^ (Rd0811 < Rd1215); } # XOR.LT.U D[c], D[a], const9 (RC) :xor.lt.u Rd2831,Rd0811,const1220Z is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x32 ) & const1220Z { Rd2831[0,1] = Rd2831[0,1] ^ (Rd0811 < const1220Z); } # XOR.NE D[c], D[a], D[b] (RR) :xor.ne Rd2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Rd2831 & op1627=0x300 { Rd2831[0,1] = Rd2831[0,1] ^ (Rd0811 != Rd1215); } # XOR.NE D[c], D[a], const9 (RC) :xor.ne Rd2831,Rd0811,const1220S is PCPMode=0 & ( Rd0811 & op0007=0x8b ; Rd2831 & op2127=0x30 ) & const1220S { Rd2831[0,1] = Rd2831[0,1] ^ (Rd0811 != const1220S); } # XOR.T D[c], D[a], pos1, D[b], pos2 (BIT) :xor.t Rd2831,Rd0811,const1620Z,Rd1215,const2327Z is PCPMode=0 & Rd0811 & Rd1215 & op0007=0x7 ; Rd2831 & const1620Z & const2327Z & op2122=0x3 { local tmpa = (Rd0811 >> const1620Z) & 1; local tmpb = (Rd1215 >> const2327Z) & 1; Rd2831 = zext(tmpa[0,1] ^ tmpb[0,1]); } @if defined(TRICORE_V2) :xpose.b Re2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Re2831 & op1627=0x830 { Re2831 = zext(Rd0811[24,8] << 56)|zext(Rd1215[24,8] << 48)|zext(Rd0811[8,8] << 40)|zext(Rd1215[8,8] << 32)|zext(Rd0811[16,8] << 24)|zext(Rd1215[16,8] << 16)|zext(Rd0811[0,8] << 8)|zext(Rd1215[0,8]); } @endif @if defined(TRICORE_V2) :xpose.h Re2831,Rd0811,Rd1215 is PCPMode=0 & Rd0811 & Rd1215 & op0007=0xb ; Re2831 & op1627=0x820 { Re2831 = zext(Rd0811[16,16] << 48)|zext(Rd1215[16,16] << 32)|zext(Rd0811[0,16] << 16)|zext(Rd1215[0,16]); } @endif # @if defined(TRICORE_V2) # :yield # { # } # @endif @include "tricore.pcp.sinc" ================================================ FILE: pypcode/processors/tricore/data/languages/tricore.slaspec ================================================ @define Tricore "" @define TRICORE_GENERIC "" #@define TRICORE_RIDER_D "" #@define TRICORE_RIDER_B "" @define TRICORE_V2 "" #@define TRICORE_VERBOSE "" define endian=little; @include "tricore.sinc" ================================================ FILE: pypcode/processors/tricore/data/manuals/tricore.idx ================================================ @tc_v131_instructionset_v138.pdf[TriCore Architecture Volume 2: Instruction Set, V1.3 & V1.3.1] ABS, 66 ABS.B, 67 ABSDIF, 69 ABSDIF.B, 71 ABSDIF.H, 71 ABSDIFS, 73 ABSDIFS.H, 75 ABS.H, 67 ABSS, 76 ABSS.H, 77 ADD, 78 ADD.A, 81 ADD.B, 83 ADDC, 85 ADD.F, 511 ADD.H, 83 ADDI, 87 ADDIH, 88 ADDIH.A, 89 ADDS, 90 ADDSC.A, 96 ADDSC.AT, 96 ADDS.H, 92 ADDS.HU, 92 ADDS.U, 94 ADDX, 98 AND, 100 AND.ANDN.T, 102 AND.AND.T, 102 AND.EQ, 104 AND.GE, 105 AND.GE.U, 105 AND.LT, 107 AND.LT.U, 107 ANDN, 111 AND.NE, 109 AND.NOR.T, 102 ANDN.T, 112 AND.OR.T, 102 AND.T, 110 BISR, 113 BMERGE, 115 BSPLIT, 116 CACHEA.I, 117 CACHEA.W, 120 CACHEA.WI, 123 CACHEI.W, 126 CACHEI.WI, 128 CADD, 130 CADDN, 132 CALL, 134 CALLA, 137 CALLI, 139 CLO, 141 CLO.H, 142 CLS, 143 CLS.H, 144 CLZ, 145 CLZ.H, 146 CMOV, 147 CMOVN, 148 CMP.F, 513 CSUB, 149 CSUBN, 150 DEBUG, 151 DEXTR, 152 DISABLE, 153 DIV.F, 515 DSYNC, 154 DVADJ, 155 DVINIT, 157 DVINIT.B, 157 DVINIT.BU, 157 DVINIT.H, 157 DVINIT.HU, 157 DVINIT.U, 157 DVSTEP, 162 DVSTEP.U, 162 ENABLE, 165 EQ, 166 EQ.A, 168 EQANY.B, 171 EQANY.H, 171 EQ.B, 169 EQ.H, 169 EQ.W, 169 EQZ.A, 173 EXTR, 174 EXTR.U, 174 FTOI, 517 FTOIZ, 518 FTOQ31, 519 FTOQ31Z, 521 FTOU, 523 FTOUZ, 524 GE, 176 GE.A, 178 GE.U, 176 IMASK, 179 INSERT, 182 INSN.T, 181 INS.T, 181 ISYNC, 185 ITOF, 525 IXMAX, 186 IXMAX.U, 186 IXMIN, 188 IXMIN.U, 188 J, 190 JA, 191 JEQ, 192 JEQ.A, 194 JGE, 195 JGE.U, 195 JGEZ, 197 JGTZ, 198 JI, 199 JL, 200 JLA, 201 JLEZ, 202 JLI, 203 JLT, 204 JLT.U, 204 JLTZ, 206 JNE, 207 JNE.A, 209 JNED, 210 JNEI, 212 JNZ, 214 JNZ.A, 215 JNZ.T, 216 JZ, 217 JZ.A, 218 JZ.T, 219 LD.A, 220 LD.B, 224 LD.BU, 224 LD.D, 229 LD.DA, 232 LD.H, 235 LD.HU, 238 LDLCX, 246 LDMST, 247 LD.Q, 240 LDUCX, 250 LD.W, 242 LEA, 251 LOOP, 253 LOOPU, 255 LT, 256 LT.A, 259 LT.B, 260 LT.BU, 260 LT.H, 262 LT.HU, 262 LT.U, 256 LT.W, 264 LT.WU, 264 MADD, 265 MADD.F, 526 MADD.H, 268 MADDM.H, 282 MADDMS.H, 282 MADD.Q, 272 MADDR.H, 286 MADDR.Q, 291 MADDRS.H, 286 MADDRS.Q, 291 MADDS, 265 MADDS.H, 268 MADDS.Q, 272 MADDS.U, 279 MADDSU.H, 293 MADDSUM.H, 297 MADDSUMS.H, 297 MADDSUR.H, 301 MADDSURS.H, 301 MADDSUS.H, 293 MADD.U, 279 MAX, 306 MAX.B, 308 MAX.BU, 308 MAX.H, 310 MAX.HU, 310 MAX.U, 306 MFCR, 311 MIN, 312 MIN.B, 314 MIN.BU, 314 MIN.H, 316 MIN.HU, 316 MIN.U, 312 MOV, 317 MOV.A, 319 MOV.AA, 321 MOV.D, 322 MOVH, 324 MOVH.A, 325 MOV.U, 323 MSUB, 326 MSUBAD.H, 343 MSUBADM.H, 347 MSUBADMS.H, 347 MSUBADR.H, 351 MSUBADRS.H, 351 MSUBADS.H, 343 MSUB.F, 528 MSUB.H, 329 MSUBM.H, 356 MSUBMS.H, 356 MSUB.Q, 333 MSUBR.H, 360 MSUBR.Q, 365 MSUBRS.H, 360 MSUBRS.Q, 365 MSUBS, 326 MSUBS.H, 329 MSUBS.Q, 333 MSUBS.U, 340 MSUB.U, 340 MTCR, 367 MUL, 368 MUL.F, 530 MUL.H, 371 MULM.H, 379 MUL.Q, 374 MULR.H, 382 MULR.Q, 385 MULS, 368 MULS.U, 377 MUL.U, 377 NAND, 387 NAND.T, 388 NE, 389 NE.A, 390 NEZ.A, 391 NOP, 392 NOR, 393 NOR.T, 394 NOT, 395 OR, 396 OR.ANDN.T, 398 OR.AND.T, 398 OR.EQ, 400 OR.GE, 401 OR.GE.U, 401 OR.LT, 403 OR.LT.U, 403 ORN, 407 OR.NE, 405 OR.NOR.T, 398 ORN.T, 408 OR.OR.T, 398 OR.T, 406 PACK, 409 PARITY, 412 Q31TOF, 532 QSEED.F, 533 RET, 413 RFE, 415 RFM, 417 RSLCX, 419 RSTV, 420 RSUB, 421 RSUBS, 423 RSUBS.U, 423 SAT.B, 425 SAT.BU, 427 SAT.H, 428 SAT.HU, 430 SEL, 431 SELN, 432 SH, 433 SHA, 446 SHA.H, 449 SH.ANDN.T, 443 SH.AND.T, 443 SHAS, 451 SH.EQ, 435 SH.GE, 436 SH.GE.U, 436 SH.H, 438 SH.LT, 440 SH.LT.U, 440 SH.NAND.T, 443 SH.NE, 442 SH.NOR.T, 443 SH.ORN.T, 443 SH.OR.T, 443 SH.XNOR.T, 443 SH.XOR.T, 443 ST.A, 453 ST.B, 457 ST.D, 460 ST.DA, 463 ST.H, 466 STLCX, 476 ST.Q, 469 ST.T, 471 STUCX, 477 ST.W, 472 SUB, 478 SUB.A, 480 SUB.B, 481 SUBC, 483 SUB.F, 535 SUB.H, 481 SUBS, 484 SUBS.H, 486 SUBS.HU, 486 SUBS.U, 484 SUBX, 488 SVLCX, 489 SWAP.W, 491 SYSCALL, 494 TLBDEMAP, 541 TLBFLUSH.A, 542 TLBFLUSH.B, 542 TLBMAP, 544 TLBPROBE.A, 546 TLBPROBE.I, 548 TRAPSV, 495 TRAPV, 496 UNPACK, 497 UPDFL, 537 UTOF, 539 XNOR, 499 XNOR.T, 500 XOR, 501 XOR.EQ, 503 XOR.GE, 504 XOR.GE.U, 504 XOR.LT, 506 XOR.LT.U, 506 XOR.NE, 508 XOR.T, 509 ================================================ FILE: pypcode/processors/tricore/data/manuals/tricore2.idx ================================================ @Infineon-TC2xx_Architecture_vol2-UM-v01_00-EN.pdf [TriCore TC1.6P & TC1.6E Instruction Set, User Manual (Volume 2), V1.0 2013-07] ABS, 49 ABS.B, 50 ABS.H, 50 ABSDIF, 52 ABSDIF.B, 53 ABSDIF.H, 53 ABSDIFS, 55 ABSDIFS.H, 56 ABSS, 57 ABSS.H, 58 ADD, 59 ADD.A, 62 ADD.B, 63 ADD.F, 447 ADD.H, 63 ADDC, 65 ADDI, 66 ADDIH, 67 ADDIH.A, 68 ADDS, 69 ADDS.H, 71 ADDS.HU, 71 ADDS.U, 73 ADDSC.A, 74 ADDSC.AT, 74 ADDX, 76 AND, 77 AND.AND.T, 79 AND.ANDN.T, 79 AND.EQ, 81 AND.GE, 82 AND.GE.U, 82 AND.LT, 84 AND.LT.U, 84 AND.NE, 86 AND.NOR.T, 79 AND.OR.T, 79 AND.T, 87 ANDN, 88 ANDN.T, 89 BISR, 90 BMERGE, 92 BSPLIT, 93 CACHEA.I, 94 CACHEA.W, 96 CACHEA.WI, 98 CACHEI.I, 102 CACHEI.W, 100 CACHEI.WI, 104 CADD, 106 CADDN, 108 CALL, 110 CALLA, 112 CALLI, 113 CLO, 115 CLO.H, 116 CLS, 117 CLS.H, 118 CLZ, 119 CLZ.H, 120 CMOV, 121 CMOVN, 122 CMP.F, 448 CMPSWAP.W, 123 # COP, CRC32, 125 CSUB, 126 CSUBN, 127 DEBUG, 128 DEXTR, 129 DISABLE, 130 DIV, 134 DIV.F, 449 DIV.U, 134 DSYNC, 131 DVADJ, 132 DVINIT, 136 DVINIT.B, 136 DVINIT.BU, 136 DVINIT.H, 136 DVINIT.HU, 136 DVINIT.U, 136 DVSTEP, 139 DVSTEP.U, 139 ENABLE, 141 EQ, 142 EQ.A, 144 EQ.B, 145 EQ.H, 145 EQ.W, 145 EQANY.B, 147 EQANY.H, 147 EQZ.A, 149 EXTR, 150 EXTR.U, 150 FCALL, 152 FCALLA, 153 FCALLI, 154 FRET, 155 FTOI, 450 FTOIZ, 451 FTOQ31, 452 FTOQ31Z, 453 FTOU, 454 FTOUZ, 455 GE, 156 GE.A, 158 GE.U, 156 IMASK, 159 INS.T, 161 INSERT, 162 INSN.T, 161 ISYNC, 164 ITOF, 456 IXMAX, 165 IXMAX.U, 165 IXMIN, 167 IXMIN.U, 167 J, 169 JA, 170 JEQ, 171 JEQ.A, 173 JGE, 174 JGE.U, 174 JGEZ, 176 JGTZ, 177 JI, 178 JL, 179 JLA, 180 JLEZ, 181 JLI, 182 JLT, 183 JLT.U, 183 JLTZ, 185 JNE, 186 JNE.A, 188 JNED, 189 JNEI, 190 JNZ, 191 JNZ.A, 192 JNZ.T, 193 JZ, 194 JZ.A, 195 JZ.T, 196 LD.A, 197 LD.B, 200 LD.BU, 200 LD.D, 204 LD.DA, 206 # LD.DD, LD.H, 208 LD.HU, 208 LD.Q, 212 LD.W, 214 LDLCX, 217 LDMST, 218 LDUCX, 220 LEA, 221 LOOP, 222 LOOPU, 223 LT, 224 LT.A, 226 LT.B, 227 LT.BU, 227 LT.H, 228 LT.HU, 228 LT.U, 224 LT.W, 229 LT.WU, 229 MADD, 230 MADD.F, 457 MADD.H, 233 MADD.Q, 237 MADD.U, 243 MADDM.H, 245 MADDMS.H, 245 MADDR.H, 248 MADDR.Q, 252 MADDRS.H, 248 MADDRS.Q, 252 MADDS, 230 MADDS.H, 233 MADDS.Q, 237 MADDS.U, 243 MADDSU.H, 254 MADDSUM.H, 258 MADDSUMS.H, 258 MADDSUR.H, 261 MADDSURS.H, 261 MADDSUS.H, 254 MAX, 265 MAX.B, 267 MAX.BU, 267 MAX.H, 268 MAX.HU, 268 MAX.U, 265 MFCR, 269 # MFFR, MIN, 270 MIN.B, 272 MIN.BU, 272 MIN.H, 273 MIN.HU, 273 MIN.U, 270 MOV, 274 MOV.A, 277 MOV.AA, 278 MOV.D, 279 MOV.U, 280 MOVH, 281 MOVH.A, 282 MSUB, 283 MSUB.F, 459 MSUB.H, 286 MSUB.Q, 290 MSUB.U, 296 MSUBAD.H, 298 MSUBADM.H, 302 MSUBADMS.H, 302 MSUBADR.H, 305 MSUBADRS.H, 305 MSUBADS.H, 298 MSUBM.H, 309 MSUBMS.H, 309 MSUBR.H, 312 MSUBR.Q, 316 MSUBRS.H, 312 MSUBRS.Q, 316 MSUBS, 283 MSUBS.H, 286 MSUBS.Q, 290 MSUBS.U, 296 MTCR, 318 # MTFR, MUL, 319 MUL.F, 461 MUL.H, 322 MUL.Q, 324 MUL.U, 327 MULM.H, 329 MULMS.H, 331 MULR.H, 332 MULR.Q, 334 MULS, 319 MULS.U, 327 NAND, 335 NAND.T, 336 NE, 337 NE.A, 338 NEZ.A, 339 NOP, 340 NOR, 341 NOR.T, 342 NOT, 343 OR, 344 OR.AND.T, 346 OR.ANDN.T, 346 OR.EQ, 348 OR.GE, 349 OR.GE.U, 349 OR.LT, 351 OR.LT.U, 351 OR.NE, 353 OR.NOR.T, 346 OR.OR.T, 346 OR.T, 354 ORN, 355 ORN.T, 356 PACK, 357 PARITY, 359 Q31TOF, 462 QSEED.F, 463 RESTORE, 360 RET, 361 RFE, 363 RFM, 365 RSLCX, 366 RSTV, 367 RSUB, 368 RSUBS, 369 RSUBS.U, 369 SAT.B, 370 SAT.BU, 371 SAT.H, 372 SAT.HU, 373 SEL, 374 SELN, 375 SH, 376 SH.AND.T, 385 SH.ANDN.T, 385 SH.EQ, 378 SH.GE, 379 SH.GE.U, 379 SH.H, 381 SH.LT, 382 SH.LT.U, 382 SH.NAND.T, 385 SH.NE, 384 SH.NOR.T, 385 SH.OR.T, 385 SH.ORN.T, 385 SH.XNOR.T, 385 SH.XOR.T, 385 SHA, 387 SHA.H, 389 SHAS, 391 ST.A, 393 ST.B, 396 ST.D, 399 ST.DA, 401 # ST.DD, ST.H, 403 ST.Q, 406 ST.T, 408 ST.W, 409 STLCX, 412 STUCX, 413 SUB, 414 SUB.A, 416 SUB.B, 417 SUB.F, 464 SUB.H, 417 SUBC, 419 SUBS, 420 SUBS.H, 422 SUBS.HU, 422 SUBS.U, 420 SUBX, 424 SVLCX, 425 SWAP.W, 426 SWAPMSK.W, 428 SYSCALL, 430 # TLBDEMAP, # TLBFLUSH.A, # TLBFLUSH.B, # TLBMAP, # TLBPROBE.A, # TLBPROBE.I, TRAPSV, 431 TRAPV, 432 UNPACK, 433 UPDFL, 465 UTOF, 466 WAIT, 435 XNOR, 436 XNOR.T, 437 XOR, 438 XOR.EQ, 439 XOR.GE, 440 XOR.GE.U, 440 XOR.LT, 442 XOR.LT.U, 442 XOR.NE, 444 XOR.T, 445 # XPOSE.B, # XPOSE.H, # YIELD, ================================================ FILE: pypcode/processors/tricore/data/patterns/patternconstraints.xml ================================================ tricore_patterns.xml ================================================ FILE: pypcode/processors/tricore/data/patterns/tricore_patterns.xml ================================================ 00000000 10010000 > 10011101 ........ ........ ........ > 00011101 ........ ........ ........ > 11011100 00001011 > 00101101 00001011 00000000 00110000 > 00111100 ........ > 00000010 ........ > 00000101 ....1111 ........ ....01.. > 00001100 ........ > 00111011 ....0000 ........ 1111.... > 00111011 ....0000 ........ 0100.... > 00111011 ....0000 ........ 1000.... > 10000010 ....1111 > 10000010 ....0100 > 10000010 ....1000 > 01111101 ....0000 ........ 1111.... > 10010001 ....0000 ........ 1111.... > 01111101 ....0000 ........ 0100.... > 01111101 ....0000 ........ 1000.... > 11011010 ........ > 00011101 ........ ........ ........ > 10000101 ....1111 ........ ....00.. > ================================================ FILE: pypcode/processors/x86/data/extensions/rust/unix32/cc.xml ================================================ ================================================ FILE: pypcode/processors/x86/data/extensions/rust/unix32/probe_fixup.xml ================================================ ================================================ FILE: pypcode/processors/x86/data/extensions/rust/unix32/try_fixup.xml ================================================ ================================================ FILE: pypcode/processors/x86/data/extensions/rust/unix64/cc.xml ================================================ ================================================ FILE: pypcode/processors/x86/data/extensions/rust/unix64/probe_fixup.xml ================================================ ================================================ FILE: pypcode/processors/x86/data/extensions/rust/unix64/try_fixup.xml ================================================ ================================================ FILE: pypcode/processors/x86/data/extensions/rust/windows32/probe_fixup.xml ================================================ ================================================ FILE: pypcode/processors/x86/data/extensions/rust/windows32/try_fixup.xml ================================================ ================================================ FILE: pypcode/processors/x86/data/extensions/rust/windows64/probe_fixup.xml ================================================ ================================================ FILE: pypcode/processors/x86/data/extensions/rust/windows64/try_fixup.xml ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/adx.sinc ================================================ :ADCX Reg32, rm32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xF6; rm32 & Reg32 ... & check_Reg32_dest ... { tmp:5 = zext(Reg32) + zext(rm32) + zext(CF); tmpCF:1 = tmp(4); # just the carry byte CF = tmpCF != 0; Reg32 = tmp:4; build check_Reg32_dest; } @ifdef IA64 :ADCX Reg64, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xF6; Reg64 ... & rm64 { tmp:9 = zext(Reg64) + zext(rm64) + zext(CF); tmpCF:1 = tmp(8); # just the carry byte CF = tmpCF != 0; Reg64 = tmp:8; } @endif :ADOX Reg32, rm32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x38; byte=0xF6; rm32 & Reg32 ... & check_Reg32_dest ... { tmp:5 = zext(Reg32) + zext(rm32) + zext(OF); tmpOF:1 = tmp(4); # just the carry byte OF = tmpOF != 0; Reg32 = tmp:4; build check_Reg32_dest; } @ifdef IA64 :ADOX Reg64, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x38; byte=0xF6; Reg64 ... & rm64 { tmp:9 = zext(Reg64) + zext(rm64) + zext(OF); tmpOF:1 = tmp(8); # just the carry byte OF = tmpOF != 0; Reg64 = tmp:8; } @endif ================================================ FILE: pypcode/processors/x86/data/languages/avx.sinc ================================================ # INFO This file automatically generated by andre on Tue Apr 30 15:35:00 2024 # INFO Direct edits to this file may be lost in future updates # INFO Command line arguments: ['--sinc', '--skip-sinc', '../../../../../../../ghidra/Ghidra/Processors/x86/data/languages/avx_manual.sinc', '../../../../../../../ghidra/Ghidra/Processors/x86/data/languages/ia.sinc'] # ADDPD 3-33 PAGE 603 LINE 33405 :VADDPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local m:16 = XmmReg2_m128; XmmReg1[0,64] = vexVVVV_XmmReg[0,64] f+ m[0,64]; XmmReg1[64,64] = vexVVVV_XmmReg[64,64] f+ m[64,64]; ZmmReg1 = zext(XmmReg1); } # ADDPD 3-33 PAGE 603 LINE 33408 :VADDPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x58; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local m:32 = YmmReg2_m256; YmmReg1[0,64] = vexVVVV_YmmReg[0,64] f+ m[0,64]; YmmReg1[64,64] = vexVVVV_YmmReg[64,64] f+ m[64,64]; YmmReg1[128,64] = vexVVVV_YmmReg[128,64] f+ m[128,64]; YmmReg1[192,64] = vexVVVV_YmmReg[192,64] f+ m[192,64]; ZmmReg1 = zext(YmmReg1); } # ADDPS 3-36 PAGE 606 LINE 33558 :VADDPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = XmmReg2_m128; XmmReg1[0,32] = vexVVVV_XmmReg[0,32] f+ tmp[0,32]; XmmReg1[32,32] = vexVVVV_XmmReg[32,32] f+ tmp[32,32]; XmmReg1[64,32] = vexVVVV_XmmReg[64,32] f+ tmp[64,32]; XmmReg1[96,32] = vexVVVV_XmmReg[96,32] f+ tmp[96,32]; ZmmReg1 = zext(XmmReg1); } # ADDPS 3-36 PAGE 606 LINE 33560 :VADDPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x58; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = YmmReg2_m256; YmmReg1[0,32] = vexVVVV_YmmReg[0,32] f+ tmp[0,32]; YmmReg1[32,32] = vexVVVV_YmmReg[32,32] f+ tmp[32,32]; YmmReg1[64,32] = vexVVVV_YmmReg[64,32] f+ tmp[64,32]; YmmReg1[96,32] = vexVVVV_YmmReg[96,32] f+ tmp[96,32]; YmmReg1[128,32] = vexVVVV_YmmReg[128,32] f+ tmp[128,32]; YmmReg1[160,32] = vexVVVV_YmmReg[160,32] f+ tmp[160,32]; YmmReg1[192,32] = vexVVVV_YmmReg[192,32] f+ tmp[192,32]; YmmReg1[224,32] = vexVVVV_YmmReg[224,32] f+ tmp[224,32]; ZmmReg1 = zext(YmmReg1); } # ADDSD 3-39 PAGE 609 LINE 33718 :VADDSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:8 = vexVVVV_XmmReg[0,64] f+ XmmReg2_m64[0,64]; ZmmReg1 = zext(tmp); } # ADDSS 3-41 PAGE 611 LINE 33812 :VADDSS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:4 = vexVVVV_XmmReg[0,32] f+ XmmReg2_m32[0,32]; ZmmReg1 = zext(tmp); } # ADDSUBPD 3-43 PAGE 613 LINE 33906 define pcodeop vaddsubpd_avx ; :VADDSUBPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD0; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vaddsubpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # ADDSUBPD 3-43 PAGE 613 LINE 33909 :VADDSUBPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD0; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vaddsubpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # ADDSUBPS 3-45 PAGE 615 LINE 34013 define pcodeop vaddsubps_avx ; :VADDSUBPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD0; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vaddsubps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # ADDSUBPS 3-45 PAGE 615 LINE 34016 :VADDSUBPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD0; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vaddsubps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # ANDPD 3-64 PAGE 634 LINE 34821 define pcodeop vandpd_avx ; :VANDPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vandpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # ANDPD 3-64 PAGE 634 LINE 34824 :VANDPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vandpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # ANDPS 3-67 PAGE 637 LINE 34947 define pcodeop vandps_avx ; :VANDPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & vexVVVV_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vandps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # ANDPS 3-67 PAGE 637 LINE 34950 :VANDPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & vexVVVV_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vandps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # ANDNPD 3-70 PAGE 640 LINE 35081 define pcodeop vandnpd_avx ; :VANDNPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vandnpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # ANDNPD 3-70 PAGE 640 LINE 35084 :VANDNPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_YmmReg; byte=0x55; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vandnpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # ANDNPS 3-73 PAGE 643 LINE 35207 define pcodeop vandnps_avx ; :VANDNPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & vexVVVV_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vandnps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # ANDNPS 3-73 PAGE 643 LINE 35210 :VANDNPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & vexVVVV_YmmReg; byte=0x55; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vandnps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # BLENDPD 3-78 PAGE 648 LINE 35433 define pcodeop vblendpd_avx ; :VBLENDPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, imm8_3_0 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x0D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8_3_0 { local tmp:16 = vblendpd_avx( vexVVVV_XmmReg, XmmReg2_m128, imm8_3_0:1 ); ZmmReg1 = zext(tmp); } # BLENDPD 3-78 PAGE 648 LINE 35436 :VBLENDPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, imm8_3_0 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x0D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8_3_0 { local tmp:32 = vblendpd_avx( vexVVVV_YmmReg, YmmReg2_m256, imm8_3_0:1 ); ZmmReg1 = zext(tmp); } # BLENDPS 3-81 PAGE 651 LINE 35580 define pcodeop vblendps_avx ; :VBLENDPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x0C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vblendps_avx( vexVVVV_XmmReg, XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # BLENDPS 3-81 PAGE 651 LINE 35583 :VBLENDPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x0C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vblendps_avx( vexVVVV_YmmReg, YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # BLENDVPD 3-83 PAGE 653 LINE 35684 define pcodeop vblendvpd_avx ; :VBLENDVPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, Xmm_imm8_7_4 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x4B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; Xmm_imm8_7_4 { local tmp:16 = vblendvpd_avx( vexVVVV_XmmReg, XmmReg2_m128, Xmm_imm8_7_4 ); ZmmReg1 = zext(tmp); } # BLENDVPD 3-83 PAGE 653 LINE 35688 :VBLENDVPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, Ymm_imm8_7_4 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x4B; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; Ymm_imm8_7_4 { local tmp:32 = vblendvpd_avx( vexVVVV_YmmReg, YmmReg2_m256, Ymm_imm8_7_4 ); ZmmReg1 = zext(tmp); } # BLENDVPS 3-85 PAGE 655 LINE 35789 define pcodeop vblendvps_avx ; :VBLENDVPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, Xmm_imm8_7_4 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x4A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; Xmm_imm8_7_4 { local tmp:16 = vblendvps_avx( vexVVVV_XmmReg, XmmReg2_m128, Xmm_imm8_7_4 ); ZmmReg1 = zext(tmp); } # BLENDVPS 3-85 PAGE 655 LINE 35793 :VBLENDVPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, Ymm_imm8_7_4 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x4A; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; Ymm_imm8_7_4 { local tmp:32 = vblendvps_avx( vexVVVV_YmmReg, YmmReg2_m256, Ymm_imm8_7_4 ); ZmmReg1 = zext(tmp); } # CMPPD 3-155 PAGE 725 LINE 39240 VCMPPD_mon: "VCMPEQPD" is imm8=0x0 { } VCMPPD_op: "" is imm8=0x0 { export 0x0:1; } VCMPPD_mon: "VCMPLTPD" is imm8=0x1 { } VCMPPD_op: "" is imm8=0x1 { export 0x1:1; } VCMPPD_mon: "VCMPLEPD" is imm8=0x2 { } VCMPPD_op: "" is imm8=0x2 { export 0x2:1; } VCMPPD_mon: "VCMPUNORDPD" is imm8=0x3 { } VCMPPD_op: "" is imm8=0x3 { export 0x3:1; } VCMPPD_mon: "VCMPNEQPD" is imm8=0x4 { } VCMPPD_op: "" is imm8=0x4 { export 0x4:1; } VCMPPD_mon: "VCMPNLTPD" is imm8=0x5 { } VCMPPD_op: "" is imm8=0x5 { export 0x5:1; } VCMPPD_mon: "VCMPNLEPD" is imm8=0x6 { } VCMPPD_op: "" is imm8=0x6 { export 0x6:1; } VCMPPD_mon: "VCMPORDPD" is imm8=0x7 { } VCMPPD_op: "" is imm8=0x7 { export 0x7:1; } VCMPPD_mon: "VCMPEQ_UQPD" is imm8=0x8 { } VCMPPD_op: "" is imm8=0x8 { export 0x8:1; } VCMPPD_mon: "VCMPNGEPD" is imm8=0x9 { } VCMPPD_op: "" is imm8=0x9 { export 0x9:1; } VCMPPD_mon: "VCMPNGTPD" is imm8=0xa { } VCMPPD_op: "" is imm8=0xa { export 0xa:1; } VCMPPD_mon: "VCMPFALSEPD" is imm8=0xb { } VCMPPD_op: "" is imm8=0xb { export 0xb:1; } VCMPPD_mon: "VCMPNEQ_OQPD" is imm8=0xc { } VCMPPD_op: "" is imm8=0xc { export 0xc:1; } VCMPPD_mon: "VCMPGEPD" is imm8=0xd { } VCMPPD_op: "" is imm8=0xd { export 0xd:1; } VCMPPD_mon: "VCMPGTPD" is imm8=0xe { } VCMPPD_op: "" is imm8=0xe { export 0xe:1; } VCMPPD_mon: "VCMPTRUEPD" is imm8=0xf { } VCMPPD_op: "" is imm8=0xf { export 0xf:1; } VCMPPD_mon: "VCMPEQ_OSPD" is imm8=0x10 { } VCMPPD_op: "" is imm8=0x10 { export 0x10:1; } VCMPPD_mon: "VCMPLT_OQPD" is imm8=0x11 { } VCMPPD_op: "" is imm8=0x11 { export 0x11:1; } VCMPPD_mon: "VCMPLE_OQPD" is imm8=0x12 { } VCMPPD_op: "" is imm8=0x12 { export 0x12:1; } VCMPPD_mon: "VCMPUNORD_SPD" is imm8=0x13 { } VCMPPD_op: "" is imm8=0x13 { export 0x13:1; } VCMPPD_mon: "VCMPNEQ_USPD" is imm8=0x14 { } VCMPPD_op: "" is imm8=0x14 { export 0x14:1; } VCMPPD_mon: "VCMPNLT_UQPD" is imm8=0x15 { } VCMPPD_op: "" is imm8=0x15 { export 0x15:1; } VCMPPD_mon: "VCMPNLE_UQPD" is imm8=0x16 { } VCMPPD_op: "" is imm8=0x16 { export 0x16:1; } VCMPPD_mon: "VCMPORD_SPD" is imm8=0x17 { } VCMPPD_op: "" is imm8=0x17 { export 0x17:1; } VCMPPD_mon: "VCMPEQ_USPD" is imm8=0x18 { } VCMPPD_op: "" is imm8=0x18 { export 0x18:1; } VCMPPD_mon: "VCMPNGE_UQPD" is imm8=0x19 { } VCMPPD_op: "" is imm8=0x19 { export 0x19:1; } VCMPPD_mon: "VCMPNGT_UQPD" is imm8=0x1a { } VCMPPD_op: "" is imm8=0x1a { export 0x1a:1; } VCMPPD_mon: "VCMPFALSE_OSPD" is imm8=0x1b { } VCMPPD_op: "" is imm8=0x1b { export 0x1b:1; } VCMPPD_mon: "VCMPNEQ_OSPD" is imm8=0x1c { } VCMPPD_op: "" is imm8=0x1c { export 0x1c:1; } VCMPPD_mon: "VCMPGE_OQPD" is imm8=0x1d { } VCMPPD_op: "" is imm8=0x1d { export 0x1d:1; } VCMPPD_mon: "VCMPGT_OQPD" is imm8=0x1e { } VCMPPD_op: "" is imm8=0x1e { export 0x1e:1; } VCMPPD_mon: "VCMPTRUE_USPD" is imm8=0x1f { } VCMPPD_op: "" is imm8=0x1f { export 0x1f:1; } VCMPPD_mon: "VCMPPD" is imm8 { } VCMPPD_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } define pcodeop vcmppd_avx ; :^VCMPPD_mon XmmReg1, vexVVVV_XmmReg, XmmReg2_m128^VCMPPD_op is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xC2; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; VCMPPD_mon & VCMPPD_op { local tmp:16 = vcmppd_avx( vexVVVV_XmmReg, XmmReg2_m128, VCMPPD_op ); ZmmReg1 = zext(tmp); } # CMPPD 3-155 PAGE 725 LINE 39243 :^VCMPPD_mon YmmReg1, vexVVVV_YmmReg, YmmReg2_m256^VCMPPD_op is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xC2; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; VCMPPD_mon & VCMPPD_op { local tmp:32 = vcmppd_avx( vexVVVV_YmmReg, YmmReg2_m256, VCMPPD_op ); ZmmReg1 = zext(tmp); } # CMPPS 3-162 PAGE 732 LINE 39607 VCMPPS_mon: "VCMPEQPS" is imm8=0x0 { } VCMPPS_op: "" is imm8=0x0 { export 0x0:1; } VCMPPS_mon: "VCMPLTPS" is imm8=0x1 { } VCMPPS_op: "" is imm8=0x1 { export 0x1:1; } VCMPPS_mon: "VCMPLEPS" is imm8=0x2 { } VCMPPS_op: "" is imm8=0x2 { export 0x2:1; } VCMPPS_mon: "VCMPUNORDPS" is imm8=0x3 { } VCMPPS_op: "" is imm8=0x3 { export 0x3:1; } VCMPPS_mon: "VCMPNEQPS" is imm8=0x4 { } VCMPPS_op: "" is imm8=0x4 { export 0x4:1; } VCMPPS_mon: "VCMPNLTPS" is imm8=0x5 { } VCMPPS_op: "" is imm8=0x5 { export 0x5:1; } VCMPPS_mon: "VCMPNLEPS" is imm8=0x6 { } VCMPPS_op: "" is imm8=0x6 { export 0x6:1; } VCMPPS_mon: "VCMPORDPS" is imm8=0x7 { } VCMPPS_op: "" is imm8=0x7 { export 0x7:1; } VCMPPS_mon: "VCMPEQ_UQPS" is imm8=0x8 { } VCMPPS_op: "" is imm8=0x8 { export 0x8:1; } VCMPPS_mon: "VCMPNGEPS" is imm8=0x9 { } VCMPPS_op: "" is imm8=0x9 { export 0x9:1; } VCMPPS_mon: "VCMPNGTPS" is imm8=0xa { } VCMPPS_op: "" is imm8=0xa { export 0xa:1; } VCMPPS_mon: "VCMPFALSEPS" is imm8=0xb { } VCMPPS_op: "" is imm8=0xb { export 0xb:1; } VCMPPS_mon: "VCMPNEQ_OQPS" is imm8=0xc { } VCMPPS_op: "" is imm8=0xc { export 0xc:1; } VCMPPS_mon: "VCMPGEPS" is imm8=0xd { } VCMPPS_op: "" is imm8=0xd { export 0xd:1; } VCMPPS_mon: "VCMPGTPS" is imm8=0xe { } VCMPPS_op: "" is imm8=0xe { export 0xe:1; } VCMPPS_mon: "VCMPTRUEPS" is imm8=0xf { } VCMPPS_op: "" is imm8=0xf { export 0xf:1; } VCMPPS_mon: "VCMPEQ_OSPS" is imm8=0x10 { } VCMPPS_op: "" is imm8=0x10 { export 0x10:1; } VCMPPS_mon: "VCMPLT_OQPS" is imm8=0x11 { } VCMPPS_op: "" is imm8=0x11 { export 0x11:1; } VCMPPS_mon: "VCMPLE_OQPS" is imm8=0x12 { } VCMPPS_op: "" is imm8=0x12 { export 0x12:1; } VCMPPS_mon: "VCMPUNORD_SPS" is imm8=0x13 { } VCMPPS_op: "" is imm8=0x13 { export 0x13:1; } VCMPPS_mon: "VCMPNEQ_USPS" is imm8=0x14 { } VCMPPS_op: "" is imm8=0x14 { export 0x14:1; } VCMPPS_mon: "VCMPNLT_UQPS" is imm8=0x15 { } VCMPPS_op: "" is imm8=0x15 { export 0x15:1; } VCMPPS_mon: "VCMPNLE_UQPS" is imm8=0x16 { } VCMPPS_op: "" is imm8=0x16 { export 0x16:1; } VCMPPS_mon: "VCMPORD_SPS" is imm8=0x17 { } VCMPPS_op: "" is imm8=0x17 { export 0x17:1; } VCMPPS_mon: "VCMPEQ_USPS" is imm8=0x18 { } VCMPPS_op: "" is imm8=0x18 { export 0x18:1; } VCMPPS_mon: "VCMPNGE_UQPS" is imm8=0x19 { } VCMPPS_op: "" is imm8=0x19 { export 0x19:1; } VCMPPS_mon: "VCMPNGT_UQPS" is imm8=0x1a { } VCMPPS_op: "" is imm8=0x1a { export 0x1a:1; } VCMPPS_mon: "VCMPFALSE_OSPS" is imm8=0x1b { } VCMPPS_op: "" is imm8=0x1b { export 0x1b:1; } VCMPPS_mon: "VCMPNEQ_OSPS" is imm8=0x1c { } VCMPPS_op: "" is imm8=0x1c { export 0x1c:1; } VCMPPS_mon: "VCMPGE_OQPS" is imm8=0x1d { } VCMPPS_op: "" is imm8=0x1d { export 0x1d:1; } VCMPPS_mon: "VCMPGT_OQPS" is imm8=0x1e { } VCMPPS_op: "" is imm8=0x1e { export 0x1e:1; } VCMPPS_mon: "VCMPTRUE_USPS" is imm8=0x1f { } VCMPPS_op: "" is imm8=0x1f { export 0x1f:1; } VCMPPS_mon: "VCMPPS" is imm8 { } VCMPPS_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } define pcodeop vcmpps_avx ; :^VCMPPS_mon XmmReg1, vexVVVV_XmmReg, XmmReg2_m128^VCMPPS_op is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xC2; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; VCMPPS_mon & VCMPPS_op { local tmp:16 = vcmpps_avx( vexVVVV_XmmReg, XmmReg2_m128, VCMPPS_op ); ZmmReg1 = zext(tmp); } # CMPPS 3-162 PAGE 732 LINE 39610 :^VCMPPS_mon YmmReg1, vexVVVV_YmmReg, YmmReg2_m256^VCMPPS_op is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xC2; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; VCMPPS_mon & VCMPPS_op { local tmp:32 = vcmpps_avx( vexVVVV_YmmReg, YmmReg2_m256, VCMPPS_op ); ZmmReg1 = zext(tmp); } # CMPSD 3-173 PAGE 743 LINE 40154 VCMPSD_mon: "VCMPEQSD" is imm8=0x0 { } VCMPSD_op: "" is imm8=0x0 { export 0x0:1; } VCMPSD_mon: "VCMPLTSD" is imm8=0x1 { } VCMPSD_op: "" is imm8=0x1 { export 0x1:1; } VCMPSD_mon: "VCMPLESD" is imm8=0x2 { } VCMPSD_op: "" is imm8=0x2 { export 0x2:1; } VCMPSD_mon: "VCMPUNORDSD" is imm8=0x3 { } VCMPSD_op: "" is imm8=0x3 { export 0x3:1; } VCMPSD_mon: "VCMPNEQSD" is imm8=0x4 { } VCMPSD_op: "" is imm8=0x4 { export 0x4:1; } VCMPSD_mon: "VCMPNLTSD" is imm8=0x5 { } VCMPSD_op: "" is imm8=0x5 { export 0x5:1; } VCMPSD_mon: "VCMPNLESD" is imm8=0x6 { } VCMPSD_op: "" is imm8=0x6 { export 0x6:1; } VCMPSD_mon: "VCMPORDSD" is imm8=0x7 { } VCMPSD_op: "" is imm8=0x7 { export 0x7:1; } VCMPSD_mon: "VCMPEQ_UQSD" is imm8=0x8 { } VCMPSD_op: "" is imm8=0x8 { export 0x8:1; } VCMPSD_mon: "VCMPNGESD" is imm8=0x9 { } VCMPSD_op: "" is imm8=0x9 { export 0x9:1; } VCMPSD_mon: "VCMPNGTSD" is imm8=0xa { } VCMPSD_op: "" is imm8=0xa { export 0xa:1; } VCMPSD_mon: "VCMPFALSESD" is imm8=0xb { } VCMPSD_op: "" is imm8=0xb { export 0xb:1; } VCMPSD_mon: "VCMPNEQ_OQSD" is imm8=0xc { } VCMPSD_op: "" is imm8=0xc { export 0xc:1; } VCMPSD_mon: "VCMPGESD" is imm8=0xd { } VCMPSD_op: "" is imm8=0xd { export 0xd:1; } VCMPSD_mon: "VCMPGTSD" is imm8=0xe { } VCMPSD_op: "" is imm8=0xe { export 0xe:1; } VCMPSD_mon: "VCMPTRUESD" is imm8=0xf { } VCMPSD_op: "" is imm8=0xf { export 0xf:1; } VCMPSD_mon: "VCMPEQ_OSSD" is imm8=0x10 { } VCMPSD_op: "" is imm8=0x10 { export 0x10:1; } VCMPSD_mon: "VCMPLT_OQSD" is imm8=0x11 { } VCMPSD_op: "" is imm8=0x11 { export 0x11:1; } VCMPSD_mon: "VCMPLE_OQSD" is imm8=0x12 { } VCMPSD_op: "" is imm8=0x12 { export 0x12:1; } VCMPSD_mon: "VCMPUNORD_SSD" is imm8=0x13 { } VCMPSD_op: "" is imm8=0x13 { export 0x13:1; } VCMPSD_mon: "VCMPNEQ_USSD" is imm8=0x14 { } VCMPSD_op: "" is imm8=0x14 { export 0x14:1; } VCMPSD_mon: "VCMPNLT_UQSD" is imm8=0x15 { } VCMPSD_op: "" is imm8=0x15 { export 0x15:1; } VCMPSD_mon: "VCMPNLE_UQSD" is imm8=0x16 { } VCMPSD_op: "" is imm8=0x16 { export 0x16:1; } VCMPSD_mon: "VCMPORD_SSD" is imm8=0x17 { } VCMPSD_op: "" is imm8=0x17 { export 0x17:1; } VCMPSD_mon: "VCMPEQ_USSD" is imm8=0x18 { } VCMPSD_op: "" is imm8=0x18 { export 0x18:1; } VCMPSD_mon: "VCMPNGE_UQSD" is imm8=0x19 { } VCMPSD_op: "" is imm8=0x19 { export 0x19:1; } VCMPSD_mon: "VCMPNGT_UQSD" is imm8=0x1a { } VCMPSD_op: "" is imm8=0x1a { export 0x1a:1; } VCMPSD_mon: "VCMPFALSE_OSSD" is imm8=0x1b { } VCMPSD_op: "" is imm8=0x1b { export 0x1b:1; } VCMPSD_mon: "VCMPNEQ_OSSD" is imm8=0x1c { } VCMPSD_op: "" is imm8=0x1c { export 0x1c:1; } VCMPSD_mon: "VCMPGE_OQSD" is imm8=0x1d { } VCMPSD_op: "" is imm8=0x1d { export 0x1d:1; } VCMPSD_mon: "VCMPGT_OQSD" is imm8=0x1e { } VCMPSD_op: "" is imm8=0x1e { export 0x1e:1; } VCMPSD_mon: "VCMPTRUE_USSD" is imm8=0x1f { } VCMPSD_op: "" is imm8=0x1f { export 0x1f:1; } VCMPSD_mon: "VCMPSD" is imm8 { } VCMPSD_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } define pcodeop vcmpsd_avx ; :^VCMPSD_mon XmmReg1, vexVVVV_XmmReg, XmmReg2_m64^VCMPSD_op is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xC2; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64; VCMPSD_mon & VCMPSD_op { local tmp:16 = vcmpsd_avx( vexVVVV_XmmReg, XmmReg2_m64, VCMPSD_op ); ZmmReg1 = zext(tmp); } # CMPSS 3-177 PAGE 747 LINE 40390 VCMPSS_mon: "VCMPEQSS" is imm8=0x0 { } VCMPSS_op: "" is imm8=0x0 { export 0x0:1; } VCMPSS_mon: "VCMPLTSS" is imm8=0x1 { } VCMPSS_op: "" is imm8=0x1 { export 0x1:1; } VCMPSS_mon: "VCMPLESS" is imm8=0x2 { } VCMPSS_op: "" is imm8=0x2 { export 0x2:1; } VCMPSS_mon: "VCMPUNORDSS" is imm8=0x3 { } VCMPSS_op: "" is imm8=0x3 { export 0x3:1; } VCMPSS_mon: "VCMPNEQSS" is imm8=0x4 { } VCMPSS_op: "" is imm8=0x4 { export 0x4:1; } VCMPSS_mon: "VCMPNLTSS" is imm8=0x5 { } VCMPSS_op: "" is imm8=0x5 { export 0x5:1; } VCMPSS_mon: "VCMPNLESS" is imm8=0x6 { } VCMPSS_op: "" is imm8=0x6 { export 0x6:1; } VCMPSS_mon: "VCMPORDSS" is imm8=0x7 { } VCMPSS_op: "" is imm8=0x7 { export 0x7:1; } VCMPSS_mon: "VCMPEQ_UQSS" is imm8=0x8 { } VCMPSS_op: "" is imm8=0x8 { export 0x8:1; } VCMPSS_mon: "VCMPNGESS" is imm8=0x9 { } VCMPSS_op: "" is imm8=0x9 { export 0x9:1; } VCMPSS_mon: "VCMPNGTSS" is imm8=0xa { } VCMPSS_op: "" is imm8=0xa { export 0xa:1; } VCMPSS_mon: "VCMPFALSESS" is imm8=0xb { } VCMPSS_op: "" is imm8=0xb { export 0xb:1; } VCMPSS_mon: "VCMPNEQ_OQSS" is imm8=0xc { } VCMPSS_op: "" is imm8=0xc { export 0xc:1; } VCMPSS_mon: "VCMPGESS" is imm8=0xd { } VCMPSS_op: "" is imm8=0xd { export 0xd:1; } VCMPSS_mon: "VCMPGTSS" is imm8=0xe { } VCMPSS_op: "" is imm8=0xe { export 0xe:1; } VCMPSS_mon: "VCMPTRUESS" is imm8=0xf { } VCMPSS_op: "" is imm8=0xf { export 0xf:1; } VCMPSS_mon: "VCMPEQ_OSSS" is imm8=0x10 { } VCMPSS_op: "" is imm8=0x10 { export 0x10:1; } VCMPSS_mon: "VCMPLT_OQSS" is imm8=0x11 { } VCMPSS_op: "" is imm8=0x11 { export 0x11:1; } VCMPSS_mon: "VCMPLE_OQSS" is imm8=0x12 { } VCMPSS_op: "" is imm8=0x12 { export 0x12:1; } VCMPSS_mon: "VCMPUNORD_SSS" is imm8=0x13 { } VCMPSS_op: "" is imm8=0x13 { export 0x13:1; } VCMPSS_mon: "VCMPNEQ_USSS" is imm8=0x14 { } VCMPSS_op: "" is imm8=0x14 { export 0x14:1; } VCMPSS_mon: "VCMPNLT_UQSS" is imm8=0x15 { } VCMPSS_op: "" is imm8=0x15 { export 0x15:1; } VCMPSS_mon: "VCMPNLE_UQSS" is imm8=0x16 { } VCMPSS_op: "" is imm8=0x16 { export 0x16:1; } VCMPSS_mon: "VCMPORD_SSS" is imm8=0x17 { } VCMPSS_op: "" is imm8=0x17 { export 0x17:1; } VCMPSS_mon: "VCMPEQ_USSS" is imm8=0x18 { } VCMPSS_op: "" is imm8=0x18 { export 0x18:1; } VCMPSS_mon: "VCMPNGE_UQSS" is imm8=0x19 { } VCMPSS_op: "" is imm8=0x19 { export 0x19:1; } VCMPSS_mon: "VCMPNGT_UQSS" is imm8=0x1a { } VCMPSS_op: "" is imm8=0x1a { export 0x1a:1; } VCMPSS_mon: "VCMPFALSE_OSSS" is imm8=0x1b { } VCMPSS_op: "" is imm8=0x1b { export 0x1b:1; } VCMPSS_mon: "VCMPNEQ_OSSS" is imm8=0x1c { } VCMPSS_op: "" is imm8=0x1c { export 0x1c:1; } VCMPSS_mon: "VCMPGE_OQSS" is imm8=0x1d { } VCMPSS_op: "" is imm8=0x1d { export 0x1d:1; } VCMPSS_mon: "VCMPGT_OQSS" is imm8=0x1e { } VCMPSS_op: "" is imm8=0x1e { export 0x1e:1; } VCMPSS_mon: "VCMPTRUE_USSS" is imm8=0x1f { } VCMPSS_op: "" is imm8=0x1f { export 0x1f:1; } VCMPSS_mon: "VCMPSS" is imm8 { } VCMPSS_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } define pcodeop vcmpss_avx ; :^VCMPSS_mon XmmReg1, vexVVVV_XmmReg, XmmReg2_m32^VCMPSS_op is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xC2; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32; VCMPSS_mon & VCMPSS_op { local tmp:16 = vcmpss_avx( vexVVVV_XmmReg, XmmReg2_m32, VCMPSS_op ); ZmmReg1 = zext(tmp); } # COMISD 3-186 PAGE 756 LINE 40860 :VCOMISD XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x2F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { fucompe(XmmReg1[0,64], XmmReg2_m64[0,64]); } # COMISS 3-188 PAGE 758 LINE 40938 :VCOMISS XmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x2F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { fucompe(XmmReg1[0,32], XmmReg2_m32[0,32]); } # CVTDQ2PD 3-228 PAGE 798 LINE 43074 define pcodeop vcvtdq2pd_avx ; :VCVTDQ2PD XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0xE6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vcvtdq2pd_avx( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # CVTDQ2PD 3-228 PAGE 798 LINE 43077 :VCVTDQ2PD YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0xE6; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vcvtdq2pd_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # CVTDQ2PS 3-232 PAGE 802 LINE 43242 define pcodeop vcvtdq2ps_avx ; :VCVTDQ2PS XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vcvtdq2ps_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # CVTDQ2PS 3-232 PAGE 802 LINE 43245 :VCVTDQ2PS YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x5B; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vcvtdq2ps_avx( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # CVTPD2DQ 3-235 PAGE 805 LINE 43408 define pcodeop vcvtpd2dq_avx ; :VCVTPD2DQ XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0xE6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vcvtpd2dq_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # CVTPD2DQ 3-235 PAGE 805 LINE 43411 :VCVTPD2DQ XmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0xE6; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vcvtpd2dq_avx( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # CVTPD2PS 3-240 PAGE 810 LINE 43643 define pcodeop vcvtpd2ps_avx ; :VCVTPD2PS XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vcvtpd2ps_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # CVTPD2PS 3-240 PAGE 810 LINE 43646 :VCVTPD2PS XmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x5A; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vcvtpd2ps_avx( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # CVTPS2DQ 3-246 PAGE 816 LINE 43927 define pcodeop vcvtps2dq_avx ; :VCVTPS2DQ XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vcvtps2dq_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # CVTPS2DQ 3-246 PAGE 816 LINE 43930 :VCVTPS2DQ YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x5B; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vcvtps2dq_avx( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # CVTPS2PD 3-249 PAGE 819 LINE 44098 define pcodeop vcvtps2pd_avx ; :VCVTPS2PD XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vcvtps2pd_avx( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # CVTPS2PD 3-249 PAGE 819 LINE 44101 :VCVTPS2PD YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x5A; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vcvtps2pd_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # CVTSD2SI 3-253 PAGE 823 LINE 44315 :VCVTSD2SI Reg32, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x2D; Reg32 ... & XmmReg2_m64 { Reg32 = trunc(round(XmmReg2_m64[0,64])); # TODO Reg64 = zext(Reg32) } # CVTSD2SI 3-253 PAGE 823 LINE 44317 @ifdef IA64 :VCVTSD2SI Reg64, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x2D; Reg64 ... & XmmReg2_m64 { Reg64 = round(XmmReg2_m64[0,64]); } @endif # CVTSD2SS 3-255 PAGE 825 LINE 44414 :VCVTSD2SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:4 = float2float(XmmReg2_m64[0,64]); XmmReg1[0,32] = tmp; XmmReg1[32,96] = vexVVVV_XmmReg[32,96]; ZmmReg1 = zext(XmmReg1); } # CVTSI2SD 3-257 PAGE 827 LINE 44516 :VCVTSI2SD XmmReg1, vexVVVV_XmmReg, rm32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm32 { local tmp:8 = int2float(rm32); XmmReg1[0,64] = tmp; XmmReg1[64,64] = vexVVVV_XmmReg[64,64]; ZmmReg1 = zext(XmmReg1); } # CVTSI2SD 3-257 PAGE 827 LINE 44519 @ifdef IA64 :VCVTSI2SD XmmReg1, vexVVVV_XmmReg, rm64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm64 { local tmp:8 = int2float(rm64); XmmReg1[0,64] = tmp; XmmReg1[64,64] = vexVVVV_XmmReg[64,64]; ZmmReg1 = zext(XmmReg1); } @endif # CVTSI2SS 3-259 PAGE 829 LINE 44632 :VCVTSI2SS XmmReg1, vexVVVV_XmmReg, rm32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm32 { local tmp:4 = int2float( rm32 ); XmmReg1[0,32] = tmp; XmmReg1[32,96] = vexVVVV_XmmReg[32,96]; ZmmReg1 = zext(XmmReg1); } # CVTSI2SS 3-259 PAGE 829 LINE 44634 @ifdef IA64 :VCVTSI2SS XmmReg1, vexVVVV_XmmReg, rm64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm64 { local tmp:4 = int2float( rm64 ); XmmReg1[0,32] = tmp; XmmReg1[32,96] = vexVVVV_XmmReg[32,96]; ZmmReg1 = zext(XmmReg1); } @endif # CVTSS2SD 3-261 PAGE 831 LINE 44744 :VCVTSS2SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:8 = float2float( XmmReg2_m32[0,32] ); XmmReg1[0,64] = tmp; XmmReg1[64,64] = vexVVVV_XmmReg[64,64]; ZmmReg1 = zext(XmmReg1); } # CVTSS2SI 3-263 PAGE 833 LINE 44835 :VCVTSS2SI Reg32, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x2D; Reg32 ... & XmmReg2_m32 { Reg32 = trunc(round(XmmReg2_m32[0,32])); } # CVTSS2SI 3-263 PAGE 833 LINE 44837 @ifdef IA64 :VCVTSS2SI Reg64, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x2D; Reg64 ... & XmmReg2_m32 { Reg64 = trunc(round(XmmReg2_m32[0,32])); } @endif # CVTTPD2DQ 3-265 PAGE 835 LINE 44930 :VCVTTPD2DQ XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xE6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = XmmReg2_m128; XmmReg1[0,32] = trunc(tmp[0,64]); XmmReg1[32,32] = trunc(tmp[64,64]); XmmReg1[64,32] = 0; XmmReg1[96,32] = 0; ZmmReg1 = zext(XmmReg1); } # CVTTPD2DQ 3-265 PAGE 835 LINE 44933 :VCVTTPD2DQ XmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xE6; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = YmmReg2_m256; XmmReg1[0,32] = trunc(tmp[0,64]); XmmReg1[32,32] = trunc(tmp[64,64]); XmmReg1[64,32] = trunc(tmp[128,64]); XmmReg1[96,32] = trunc(tmp[192,64]); ZmmReg1 = zext(XmmReg1); } # CVTTPS2DQ 3-270 PAGE 840 LINE 45163 :VCVTTPS2DQ XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = XmmReg2_m128; XmmReg1[0,32] = trunc(tmp[0,32]); XmmReg1[32,32] = trunc(tmp[32,32]); XmmReg1[64,32] = trunc(tmp[64,32]); XmmReg1[96,32] = trunc(tmp[96,32]); ZmmReg1 = zext(XmmReg1); } # CVTTPS2DQ 3-270 PAGE 840 LINE 45166 :VCVTTPS2DQ YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x5B; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = YmmReg2_m256; YmmReg1[0,32] = trunc(tmp[0,32]); YmmReg1[32,32] = trunc(tmp[32,32]); YmmReg1[64,32] = trunc(tmp[64,32]); YmmReg1[96,32] = trunc(tmp[96,32]); YmmReg1[128,32] = trunc(tmp[128,32]); YmmReg1[160,32] = trunc(tmp[160,32]); YmmReg1[192,32] = trunc(tmp[192,32]); YmmReg1[224,32] = trunc(tmp[224,32]); ZmmReg1 = zext(YmmReg1); } # CVTTSD2SI 3-274 PAGE 844 LINE 45379 :VCVTTSD2SI Reg32, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x2C; Reg32 ... & XmmReg2_m64 { Reg32 = trunc(XmmReg2_m64[0,64]); } # CVTTSD2SI 3-274 PAGE 844 LINE 45382 @ifdef IA64 :VCVTTSD2SI Reg64, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x2C; Reg64 ... & XmmReg2_m64 { Reg64 = trunc(XmmReg2_m64[0,64]); } @endif # CVTTSS2SI 3-276 PAGE 846 LINE 45473 :VCVTTSS2SI Reg32, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x2C; Reg32 ... & XmmReg2_m32 { Reg32 = trunc(XmmReg2_m32[0,32]); } # CVTTSS2SI 3-276 PAGE 846 LINE 45476 @ifdef IA64 :VCVTTSS2SI Reg64, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x2C; Reg64 ... & XmmReg2_m32 { Reg64 = trunc(XmmReg2_m32[0,32]); } @endif # DIVPD 3-288 PAGE 858 LINE 46023 define pcodeop vdivpd_avx ; :VDIVPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vdivpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # DIVPD 3-288 PAGE 858 LINE 46026 :VDIVPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x5E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vdivpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # DIVPS 3-291 PAGE 861 LINE 46164 define pcodeop vdivps_avx ; :VDIVPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vdivps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # DIVPS 3-291 PAGE 861 LINE 46167 :VDIVPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x5E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vdivps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # DIVSD 3-294 PAGE 864 LINE 46312 :VDIVSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { XmmReg1[0,64] = vexVVVV_XmmReg[0,64] f/ XmmReg2_m64[0,64]; XmmReg1[64,64] = vexVVVV_XmmReg[64,64]; ZmmReg1 = zext(XmmReg1); } # DIVSS 3-296 PAGE 866 LINE 46410 :VDIVSS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { XmmReg1[0,32] = vexVVVV_XmmReg[0,32] f/ XmmReg2_m32[0,32]; XmmReg1[32,96] = vexVVVV_XmmReg[32,96]; ZmmReg1 = zext(XmmReg1); } # DPPD 3-298 PAGE 868 LINE 46509 define pcodeop vdppd_avx ; :VDPPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x41; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vdppd_avx( vexVVVV_XmmReg, XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # DPPS 3-300 PAGE 870 LINE 46612 define pcodeop vdpps_avx ; :VDPPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x40; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vdpps_avx( vexVVVV_XmmReg, XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # DPPS 3-300 PAGE 870 LINE 46616 :VDPPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x40; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vdpps_avx( vexVVVV_YmmReg, YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # EXTRACTPS 3-307 PAGE 877 LINE 46978 define pcodeop vextractps_avx ; :VEXTRACTPS rm32, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x17; XmmReg1 ... & rm32; imm8 { rm32 = vextractps_avx( XmmReg1, imm8:1 ); } # HADDPD 3-427 PAGE 997 LINE 52447 :VHADDPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x7C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local m:16 = XmmReg2_m128; XmmReg1[0,64] = vexVVVV_XmmReg[0,64] f+ vexVVVV_XmmReg[64,64]; XmmReg1[64,64] = m[0,64] f+ m[64,64]; ZmmReg1 = zext(XmmReg1); } # HADDPD 3-427 PAGE 997 LINE 52450 :VHADDPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x7C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local m:32 = YmmReg2_m256; YmmReg1[0,64] = vexVVVV_YmmReg[0,64] f+ vexVVVV_YmmReg[64,64]; YmmReg1[64,64] = m[0,64] f+ m[64,64]; YmmReg1[128,64] = vexVVVV_YmmReg[128,64] f+ vexVVVV_YmmReg[192,64]; YmmReg1[192,64] = m[128,64] f+ m[192,64]; ZmmReg1 = zext(YmmReg1); } # HADDPS 3-430 PAGE 1000 LINE 52586 define pcodeop vhaddps_avx ; :VHADDPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x7C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vhaddps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # HADDPS 3-430 PAGE 1000 LINE 52589 :VHADDPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x7C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vhaddps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # HSUBPD 3-434 PAGE 1004 LINE 52795 define pcodeop vhsubpd_avx ; :VHSUBPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vhsubpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # HSUBPD 3-434 PAGE 1004 LINE 52798 :VHSUBPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vhsubpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # HSUBPS 3-437 PAGE 1007 LINE 52933 define pcodeop vhsubps_avx ; :VHSUBPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vhsubps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # HSUBPS 3-437 PAGE 1007 LINE 52936 :VHSUBPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vhsubps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # INSERTPS 3-454 PAGE 1024 LINE 53780 define pcodeop vinsertps_avx ; :VINSERTPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x21; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32; imm8 { local tmp:16 = vinsertps_avx( vexVVVV_XmmReg, XmmReg2_m32, imm8:1 ); ZmmReg1 = zext(tmp); } # LDDQU 3-518 PAGE 1088 LINE 57123 define pcodeop vlddqu_avx ; :VLDDQU XmmReg1, m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0xF0; (XmmReg1 & ZmmReg1) ... & m128 { local tmp:16 = vlddqu_avx( m128 ); ZmmReg1 = zext(tmp); } # LDDQU 3-518 PAGE 1088 LINE 57126 :VLDDQU YmmReg1, m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0xF0; (YmmReg1 & ZmmReg1) ... & m256 { local tmp:32 = vlddqu_avx( m256 ); ZmmReg1 = zext(tmp); } # LDMXCSR 3-520 PAGE 1090 LINE 57208 define pcodeop vldmxcsr_avx ; :VLDMXCSR m32 is $(VEX_NONE) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0xAE; reg_opcode=2 ... & m32 { vldmxcsr_avx( m32 ); # TODO missing destination or side effects } # MASKMOVDQU 4-8 PAGE 1128 LINE 59041 define pcodeop vmaskmovdqu_avx ; :VMASKMOVDQU XmmReg1, XmmReg2 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xF7; XmmReg1 & (mod=0x3 & XmmReg2) { vmaskmovdqu_avx( XmmReg1, XmmReg2 ); # TODO missing destination or side effects } # MAXPD 4-12 PAGE 1132 LINE 59201 define pcodeop vmaxpd_avx ; :VMAXPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vmaxpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # MAXPD 4-12 PAGE 1132 LINE 59203 :VMAXPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x5F; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vmaxpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # MAXPS 4-15 PAGE 1135 LINE 59350 define pcodeop vmaxps_avx ; :VMAXPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vmaxps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # MAXPS 4-15 PAGE 1135 LINE 59353 :VMAXPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x5F; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vmaxps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # MAXSD 4-18 PAGE 1138 LINE 59503 define pcodeop vmaxsd_avx ; :VMAXSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vmaxsd_avx( vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # MAXSS 4-20 PAGE 1140 LINE 59606 define pcodeop vmaxss_avx ; :VMAXSS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vmaxss_avx( vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # MINPD 4-23 PAGE 1143 LINE 59765 define pcodeop vminpd_avx ; :VMINPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vminpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # MINPD 4-23 PAGE 1143 LINE 59768 :VMINPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x5D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vminpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # MINPS 4-26 PAGE 1146 LINE 59909 define pcodeop vminps_avx ; :VMINPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vminps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # MINPS 4-26 PAGE 1146 LINE 59912 :VMINPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x5D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vminps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # MINSD 4-29 PAGE 1149 LINE 60061 define pcodeop vminsd_avx ; :VMINSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vminsd_avx( vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # MINSS 4-31 PAGE 1151 LINE 60164 define pcodeop vminss_avx ; :VMINSS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vminss_avx( vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # MOVD/MOVQ 4-55 PAGE 1175 LINE 61358 :VMOVD XmmReg1, rm32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x6E; (XmmReg1 & ZmmReg1) ... & rm32 { ZmmReg1 = zext( rm32 ); } # MOVD/MOVQ 4-55 PAGE 1175 LINE 61360 @ifdef IA64 :VMOVQ XmmReg1, rm64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x6E; (XmmReg1 & ZmmReg1) ... & rm64 { ZmmReg1 = zext( rm64 ); } @endif # MOVD/MOVQ 4-55 PAGE 1175 LINE 61362 :VMOVD rm32, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x7E; XmmReg1 ... & rm32 { rm32 = XmmReg1 [0,32]; } # MOVD/MOVQ 4-55 PAGE 1175 LINE 61364 @ifdef IA64 :VMOVQ rm64, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x7E; XmmReg1 ... & rm64 { rm64 = XmmReg1 [0,64]; } @endif # MOVDDUP 4-59 PAGE 1179 LINE 61521 :VMOVDDUP XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0x12; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:8 = XmmReg2_m64[0,64]; XmmReg1[0,64] = tmp; XmmReg1[64,64] = tmp; ZmmReg1 = zext(XmmReg1); } # MOVDDUP 4-59 PAGE 1179 LINE 61523 :VMOVDDUP YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0x12; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = YmmReg2_m256; local tmp1:8 = tmp[0,64]; local tmp2:8 = tmp[128,64]; YmmReg1[0,64] = tmp1; YmmReg1[64,64] = tmp1; YmmReg1[128,64] = tmp2; YmmReg1[192,64] = tmp2; ZmmReg1 = zext(YmmReg1); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61930 :VMOVDQU XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x6F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = XmmReg2_m128; ZmmReg1 = zext(tmp); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61932 :VMOVDQU XmmReg2_m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; (XmmReg1 & XmmReg2_m128_extend) ... & XmmReg2_m128 { XmmReg2_m128 = XmmReg1; build XmmReg2_m128_extend; } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61934 :VMOVDQU YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x6F; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = YmmReg2_m256; ZmmReg1 = zext(tmp); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61936 :VMOVDQU m256, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; YmmReg1 ... & m256 { m256 = YmmReg1; } :VMOVDQU YmmReg2, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; YmmReg1 & (mod=3 & ZmmReg2 & YmmReg2) { ZmmReg2 = zext( YmmReg1 ); } # MOVHLPS 4-76 PAGE 1196 LINE 62410 define pcodeop vmovhlps_avx ; :VMOVHLPS XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1) & (mod=0x3 & XmmReg2) { local tmp:16 = vmovhlps_avx( vexVVVV_XmmReg, XmmReg2 ); ZmmReg1 = zext(tmp); } # MOVHPD 4-78 PAGE 1198 LINE 62483 define pcodeop vmovhpd_avx ; :VMOVHPD XmmReg1, vexVVVV_XmmReg, m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x16; (XmmReg1 & ZmmReg1) ... & m64 { local tmp:16 = vmovhpd_avx( vexVVVV_XmmReg, m64 ); ZmmReg1 = zext(tmp); } # MOVHPD 4-78 PAGE 1198 LINE 62489 :VMOVHPD m64, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x17; XmmReg1 ... & m64 { m64 = vmovhpd_avx( XmmReg1 ); } # MOVHPS 4-80 PAGE 1200 LINE 62570 define pcodeop vmovhps_avx ; :VMOVHPS XmmReg1, vexVVVV_XmmReg, m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x16; (XmmReg1 & ZmmReg1) ... & m64 { local tmp:16 = vmovhps_avx( vexVVVV_XmmReg, m64 ); ZmmReg1 = zext(tmp); } # MOVHPS 4-80 PAGE 1200 LINE 62576 :VMOVHPS m64, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x17; XmmReg1 ... & m64 { m64 = vmovhps_avx( XmmReg1 ); } # MOVLHPS 4-82 PAGE 1202 LINE 62658 define pcodeop vmovlhps_avx ; :VMOVLHPS XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x16; (XmmReg1 & ZmmReg1) & (mod=0x3 & XmmReg2) { local tmp:16 = vmovlhps_avx( vexVVVV_XmmReg, XmmReg2 ); ZmmReg1 = zext(tmp); } # MOVLPD 4-84 PAGE 1204 LINE 62731 define pcodeop vmovlpd_avx ; :VMOVLPD XmmReg1, vexVVVV_XmmReg, m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1) ... & m64 { local tmp:16 = vmovlpd_avx( vexVVVV_XmmReg, m64 ); ZmmReg1 = zext(tmp); } # MOVLPD 4-84 PAGE 1204 LINE 62737 :VMOVLPD m64, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x13; XmmReg1 ... & m64 { m64 = vmovlpd_avx( XmmReg1 ); } # MOVLPS 4-86 PAGE 1206 LINE 62816 define pcodeop vmovlps_avx ; :VMOVLPS XmmReg1, vexVVVV_XmmReg, m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1) ... & m64 { local tmp:16 = vmovlps_avx( vexVVVV_XmmReg, m64 ); ZmmReg1 = zext(tmp); } # MOVLPS 4-86 PAGE 1206 LINE 62822 :VMOVLPS m64, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x13; XmmReg1 ... & m64 { m64 = vmovlps_avx( XmmReg1 ); } # MOVMSKPD 4-88 PAGE 1208 LINE 62906 define pcodeop vmovmskpd_avx ; :VMOVMSKPD Reg32, XmmReg2 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x50; Reg32 & (mod=0x3 & XmmReg2) { Reg32 = vmovmskpd_avx( XmmReg2 ); # TODO Reg64 = zext(Reg32) } # MOVMSKPD 4-88 PAGE 1208 LINE 62910 :VMOVMSKPD Reg32, YmmReg2 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x50; Reg32 & (mod=0x3 & YmmReg2) { Reg32 = vmovmskpd_avx( YmmReg2 ); # TODO Reg64 = zext(Reg32) } # MOVMSKPS 4-90 PAGE 1210 LINE 62986 define pcodeop vmovmskps_avx ; :VMOVMSKPS Reg32, XmmReg2 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x50; Reg32 & (mod=0x3 & XmmReg2) { Reg32 = vmovmskps_avx( XmmReg2 ); # TODO Reg64 = zext(Reg32) } # MOVMSKPS 4-90 PAGE 1210 LINE 62990 :VMOVMSKPS Reg32, YmmReg2 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x50; Reg32 & (mod=0x3 & YmmReg2) { Reg32 = vmovmskps_avx( YmmReg2 ); # TODO Reg64 = zext(Reg32) } # MOVNTDQA 4-92 PAGE 1212 LINE 63084 define pcodeop vmovntdqa_avx ; :VMOVNTDQA XmmReg1, m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x2A; (XmmReg1 & ZmmReg1) ... & m128 { local tmp:16 = vmovntdqa_avx( m128 ); ZmmReg1 = zext(tmp); } # MOVNTDQ 4-94 PAGE 1214 LINE 63187 define pcodeop vmovntdq_avx ; :VMOVNTDQ m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xE7; XmmReg1 ... & m128 { m128 = vmovntdq_avx( XmmReg1 ); } # MOVNTDQ 4-94 PAGE 1214 LINE 63189 :VMOVNTDQ m256, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xE7; YmmReg1 ... & m256 { m256 = vmovntdq_avx( YmmReg1 ); } # MOVNTPD 4-98 PAGE 1218 LINE 63357 define pcodeop vmovntpd_avx ; :VMOVNTPD m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x2B; XmmReg1 ... & m128 { m128 = vmovntpd_avx( XmmReg1 ); } # MOVNTPD 4-98 PAGE 1218 LINE 63359 :VMOVNTPD m256, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x2B; YmmReg1 ... & m256 { m256 = vmovntpd_avx( YmmReg1 ); } # MOVNTPS 4-100 PAGE 1220 LINE 63441 define pcodeop vmovntps_avx ; :VMOVNTPS m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x2B; XmmReg1 ... & m128 { m128 = vmovntps_avx( XmmReg1 ); } # MOVNTPS 4-100 PAGE 1220 LINE 63443 :VMOVNTPS m256, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x2B; YmmReg1 ... & m256 { m256 = vmovntps_avx( YmmReg1 ); } # MOVQ 4-103 PAGE 1223 LINE 63579 :VMOVQ XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x7E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { ZmmReg1 = zext(XmmReg2_m64[0,64]); } # MOVQ 4-103 PAGE 1223 LINE 63585 :VMOVQ m64, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xD6; XmmReg1 ... & m64 { m64 = XmmReg1[0,64]; } :VMOVQ XmmReg2, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xD6; XmmReg1 & (mod=3 & XmmReg2 & ZmmReg2 ) { ZmmReg2 = zext( XmmReg1[0,64] ); } # MOVSHDUP 4-114 PAGE 1234 LINE 64126 define pcodeop vmovshdup_avx ; :VMOVSHDUP XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x16; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vmovshdup_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # MOVSHDUP 4-114 PAGE 1234 LINE 64128 :VMOVSHDUP YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x16; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vmovshdup_avx( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # MOVSLDUP 4-117 PAGE 1237 LINE 64280 define pcodeop vmovsldup_avx ; :VMOVSLDUP XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x12; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vmovsldup_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # MOVSLDUP 4-117 PAGE 1237 LINE 64282 :VMOVSLDUP YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x12; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vmovsldup_avx( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # MOVSS 4-120 PAGE 1240 LINE 64433 :VMOVSS XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x10; (XmmReg1 & ZmmReg1) & (mod=0x3 & XmmReg2) { XmmReg1[0,32] = XmmReg2[0,32]; XmmReg1[32,96] = vexVVVV_XmmReg[32,96]; ZmmReg1 = zext(XmmReg1); } # MOVSS 4-120 PAGE 1240 LINE 64435 :VMOVSS XmmReg1, m32 is $(VEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x10; (XmmReg1 & ZmmReg1) ... & m32 { ZmmReg1 = zext( m32 ); } # MOVSS 4-120 PAGE 1240 LINE 64439 :VMOVSS XmmReg2, vexVVVV_XmmReg, XmmReg1 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x11; XmmReg1 & (mod=0x3 & (XmmReg2 & ZmmReg2)) { XmmReg2[0,32] = XmmReg1[0,32]; XmmReg2[32,96] = vexVVVV_XmmReg[32,96]; ZmmReg2 = zext(XmmReg2); } # MOVSS 4-120 PAGE 1240 LINE 64441 :VMOVSS m32, XmmReg1 is $(VEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x11; XmmReg1 ... & m32 { m32 = XmmReg1[0,32]; } # MOVUPD 4-126 PAGE 1246 LINE 64687 :VMOVUPD XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x10; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = XmmReg2_m128; ZmmReg1 = zext(tmp); } # MOVUPD 4-126 PAGE 1246 LINE 64689 :VMOVUPD XmmReg2_m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x11; (XmmReg1 & XmmReg2_m128_extend) ... & XmmReg2_m128 { XmmReg2_m128 = XmmReg1; build XmmReg2_m128_extend; } # MOVUPD 4-126 PAGE 1246 LINE 64691 :VMOVUPD YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x10; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = YmmReg2_m256; ZmmReg1 = zext(tmp); } # MOVUPD 4-126 PAGE 1246 LINE 64693 :VMOVUPD m256, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x11; YmmReg1 ... & m256 { m256 = YmmReg1; } :VMOVUPD YmmReg2, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x11; YmmReg1 & ( mod=3 & YmmReg2 & ZmmReg2 ) { local tmp:32 = YmmReg1; ZmmReg2 = zext(tmp); } # MPSADBW 4-136 PAGE 1256 LINE 65135 define pcodeop vmpsadbw_avx ; :VMPSADBW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x42; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vmpsadbw_avx( vexVVVV_XmmReg, XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # MULPD 4-146 PAGE 1266 LINE 65682 :VMULPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local m:16 = XmmReg2_m128; XmmReg1[0,64] = vexVVVV_XmmReg[0,64] f* m[0,64]; XmmReg1[64,64] = vexVVVV_XmmReg[64,64] f* m[64,64]; ZmmReg1 = zext(XmmReg1); } # MULPD 4-146 PAGE 1266 LINE 65684 :VMULPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x59; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local m:32 = YmmReg2_m256; YmmReg1[0,64] = vexVVVV_YmmReg[0,64] f* m[0,64]; YmmReg1[64,64] = vexVVVV_YmmReg[64,64] f* m[64,64]; YmmReg1[128,64] = vexVVVV_YmmReg[128,64] f* m[128,64]; YmmReg1[192,64] = vexVVVV_YmmReg[192,64] f* m[192,64]; ZmmReg1 = zext(YmmReg1); } # MULPS 4-149 PAGE 1269 LINE 65813 :VMULPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local m:16 = XmmReg2_m128; XmmReg1[0,32] = vexVVVV_XmmReg[0,32] f* m[0,32]; XmmReg1[32,32] = vexVVVV_XmmReg[32,32] f* m[32,32]; XmmReg1[64,32] = vexVVVV_XmmReg[64,32] f* m[64,32]; XmmReg1[96,32] = vexVVVV_XmmReg[96,32] f* m[96,32]; ZmmReg1 = zext(XmmReg1); } # MULPS 4-149 PAGE 1269 LINE 65815 :VMULPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x59; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local m:32 = YmmReg2_m256; YmmReg1[0,32] = vexVVVV_YmmReg[0,32] f* m[0,32]; YmmReg1[32,32] = vexVVVV_YmmReg[32,32] f* m[32,32]; YmmReg1[64,32] = vexVVVV_YmmReg[64,32] f* m[64,32]; YmmReg1[96,32] = vexVVVV_YmmReg[96,32] f* m[96,32]; YmmReg1[128,32] = vexVVVV_YmmReg[128,32] f* m[128,32]; YmmReg1[160,32] = vexVVVV_YmmReg[160,32] f* m[160,32]; YmmReg1[192,32] = vexVVVV_YmmReg[192,32] f* m[192,32]; ZmmReg1 = zext(YmmReg1); } # MULSD 4-152 PAGE 1272 LINE 65956 :VMULSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:8 = vexVVVV_XmmReg[0,64] f* XmmReg2_m64[0,64]; ZmmReg1 = zext(tmp); } # MULSS 4-154 PAGE 1274 LINE 66052 :VMULSS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:4 = vexVVVV_XmmReg[0,32] f* XmmReg2_m32[0,32]; ZmmReg1 = zext(tmp); } # ORPD 4-168 PAGE 1288 LINE 66720 define pcodeop vorpd_avx ; :VORPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_XmmReg; byte=0x56; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vorpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # ORPD 4-168 PAGE 1288 LINE 66722 :VORPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_YmmReg; byte=0x56; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vorpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # ORPS 4-171 PAGE 1291 LINE 66846 define pcodeop vorps_avx ; :VORPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & vexVVVV_XmmReg; byte=0x56; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vorps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # ORPS 4-171 PAGE 1291 LINE 66848 :VORPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & vexVVVV_YmmReg; byte=0x56; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vorps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PABSB/PABSW/PABSD/PABSQ 4-180 PAGE 1300 LINE 67302 define pcodeop vpabsb_avx ; :VPABSB XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x1C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpabsb_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PABSB/PABSW/PABSD/PABSQ 4-180 PAGE 1300 LINE 67305 define pcodeop vpabsw_avx ; :VPABSW XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x1D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpabsw_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PABSB/PABSW/PABSD/PABSQ 4-180 PAGE 1300 LINE 67308 define pcodeop vpabsd_avx ; :VPABSD XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x1E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpabsd_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PACKSSWB/PACKSSDW 4-186 PAGE 1306 LINE 67629 define pcodeop vpacksswb_avx ; :VPACKSSWB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x63; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpacksswb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PACKSSWB/PACKSSDW 4-186 PAGE 1306 LINE 67633 define pcodeop vpackssdw_avx ; :VPACKSSDW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x6B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpackssdw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PACKUSDW 4-194 PAGE 1314 LINE 68086 define pcodeop vpackusdw_avx ; :VPACKUSDW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_XmmReg; byte=0x2B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpackusdw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PACKUSWB 4-199 PAGE 1319 LINE 68366 define pcodeop vpackuswb_avx ; :VPACKUSWB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x67; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpackuswb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68658 define pcodeop vpaddb_avx ; :VPADDB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xFC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpaddb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68660 define pcodeop vpaddw_avx ; :VPADDW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xFD; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpaddw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68662 define pcodeop vpaddd_avx ; :VPADDD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xFE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpaddd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68664 define pcodeop vpaddq_avx ; :VPADDQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD4; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpaddq_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69040 define pcodeop vpaddsb_avx ; :VPADDSB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xEC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpaddsb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69042 define pcodeop vpaddsw_avx ; :VPADDSW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xED; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpaddsw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69257 define pcodeop vpaddusb_avx ; :VPADDUSB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xDC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpaddusb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69260 define pcodeop vpaddusw_avx ; :VPADDUSW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xDD; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpaddusw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PALIGNR 4-219 PAGE 1339 LINE 69485 define pcodeop vpalignr_avx ; :VPALIGNR XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x0F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vpalignr_avx( vexVVVV_XmmReg, XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # PAND 4-223 PAGE 1343 LINE 69678 define pcodeop vpand_avx ; :VPAND XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xDB; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpand_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PANDN 4-226 PAGE 1346 LINE 69854 define pcodeop vpandn_avx ; :VPANDN XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xDF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpandn_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70085 define pcodeop vpavgb_avx ; :VPAVGB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE0; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpavgb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70088 define pcodeop vpavgw_avx ; :VPAVGW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE3; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpavgw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PBLENDVB 4-234 PAGE 1354 LINE 70296 define pcodeop vpblendvb_avx ; :VPBLENDVB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, Xmm_imm8_7_4 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x4C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; Xmm_imm8_7_4 { local tmp:16 = vpblendvb_avx( vexVVVV_XmmReg, XmmReg2_m128, Xmm_imm8_7_4 ); ZmmReg1 = zext(tmp); } # PBLENDW 4-238 PAGE 1358 LINE 70522 define pcodeop vpblendw_avx ; :VPBLENDW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x0E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vpblendw_avx( vexVVVV_XmmReg, XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70821 define pcodeop vpcmpeqb_avx ; :VPCMPEQB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x74; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpcmpeqb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70824 define pcodeop vpcmpeqw_avx ; :VPCMPEQW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x75; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpcmpeqw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70827 define pcodeop vpcmpeqd_avx ; :VPCMPEQD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x76; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpcmpeqd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PCMPESTRI 4-253 PAGE 1373 LINE 71311 define pcodeop vpcmpestri_avx ; :VPCMPESTRI XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A); byte=0x61; XmmReg1 ... & XmmReg2_m128; imm8 { vpcmpestri_avx( XmmReg1, XmmReg2_m128, imm8:1 ); # TODO missing destination or side effects } # PCMPESTRM 4-255 PAGE 1375 LINE 71395 define pcodeop vpcmpestrm_avx ; :VPCMPESTRM XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A); byte=0x60; XmmReg1 ... & XmmReg2_m128; imm8 { vpcmpestrm_avx( XmmReg1, XmmReg2_m128, imm8:1 ); # TODO missing destination or side effects } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71499 define pcodeop vpcmpgtb_avx ; :VPCMPGTB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x64; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpcmpgtb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71502 define pcodeop vpcmpgtw_avx ; :VPCMPGTW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x65; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpcmpgtw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71505 define pcodeop vpcmpgtd_avx ; :VPCMPGTD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x66; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpcmpgtd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PCMPGTQ 4-263 PAGE 1383 LINE 71833 define pcodeop vpcmpgtq_avx ; :VPCMPGTQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x37; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpcmpgtq_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PCMPISTRI 4-266 PAGE 1386 LINE 71966 define pcodeop vpcmpistri_avx ; :VPCMPISTRI XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x63; XmmReg1 ... & XmmReg2_m128; imm8 { vpcmpistri_avx( XmmReg1, XmmReg2_m128, imm8:1 ); # TODO missing destination or side effects } # PCMPISTRM 4-268 PAGE 1388 LINE 72052 define pcodeop vpcmpistrm_avx ; :VPCMPISTRM XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x62; XmmReg1 ... & XmmReg2_m128; imm8 { vpcmpistrm_avx( XmmReg1, XmmReg2_m128, imm8:1 ); # TODO missing destination or side effects } # PEXTRB/PEXTRD/PEXTRQ 4-274 PAGE 1394 LINE 72322 define pcodeop vpextrb_avx ; :VPEXTRB Rmr32, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x14; mod=3 & XmmReg1 & Rmr32 & check_Rmr32_dest; imm8 { local tmp8:1 = imm8; local tmp = XmmReg1 >> (tmp8[0,3]*8); Rmr32 = zext(tmp[0,8]); build check_Rmr32_dest; } :VPEXTRB m8, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x14; (XmmReg1 ... & m8); imm8 { local tmp8:1 = imm8; local tmp = XmmReg1 >> (tmp8[0,3]*8); m8 = tmp[0,8]; } # PEXTRB/PEXTRD/PEXTRQ 4-274 PAGE 1394 LINE 72326 define pcodeop vpextrd_avx ; :VPEXTRD Rmr32, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x16; mod=3 & XmmReg1 & Rmr32 & check_Rmr32_dest; imm8 { local tmp = XmmReg1 >> (imm8*32); Rmr32 = tmp(0); build check_Rmr32_dest; } :VPEXTRD m32, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x16; XmmReg1 ... & m32; imm8 { local tmp = XmmReg1 >> (imm8*32); m32 = tmp(0); } # PEXTRB/PEXTRD/PEXTRQ 4-274 PAGE 1394 LINE 72330 define pcodeop vpextrq_avx ; @ifdef IA64 :VPEXTRQ rm64, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1); byte=0x16; XmmReg1 ... & rm64; imm8 { rm64 = vpextrq_avx( XmmReg1, imm8:1 ); } @endif # PEXTRW 4-277 PAGE 1397 LINE 72478 define pcodeop vpextrw_avx ; :VPEXTRW Reg32, XmmReg2, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0xC5; Reg32 & (mod=0x3 & XmmReg2); imm8 { Reg32 = vpextrw_avx( XmmReg2, imm8:1 ); # TODO Reg64 = zext(Reg32) } # PEXTRW 4-277 PAGE 1397 LINE 72483 :VPEXTRW Reg32_m16, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x15; XmmReg1 ... & Reg32_m16; imm8 { Reg32_m16 = vpextrw_avx( XmmReg1, imm8:1 ); # TODO Reg64 = zext(Reg32) } # PHADDW/PHADDD 4-280 PAGE 1400 LINE 72627 define pcodeop vphaddw_avx ; :VPHADDW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x01; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vphaddw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PHADDW/PHADDD 4-280 PAGE 1400 LINE 72630 define pcodeop vphaddd_avx ; :VPHADDD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x02; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vphaddd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PHADDSW 4-284 PAGE 1404 LINE 72821 define pcodeop vphaddsw_avx ; :VPHADDSW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x03; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vphaddsw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PHMINPOSUW 4-286 PAGE 1406 LINE 72939 define pcodeop vphminposuw_avx ; :VPHMINPOSUW XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x41; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vphminposuw_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PHSUBW/PHSUBD 4-288 PAGE 1408 LINE 73032 define pcodeop vphsubw_avx ; :VPHSUBW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x05; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vphsubw_avx( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PHSUBW/PHSUBD 4-288 PAGE 1408 LINE 73035 define pcodeop vphsubd_avx ; :VPHSUBD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x06; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vphsubd_avx( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PHSUBSW 4-291 PAGE 1411 LINE 73197 define pcodeop vphsubsw_avx ; :VPHSUBSW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x07; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vphsubsw_avx( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PINSRB/PINSRD/PINSRQ 4-293 PAGE 1413 LINE 73321 define pcodeop vpinsrb_avx ; :VPINSRB XmmReg1, vexVVVV_XmmReg, Reg32_m8, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x20; (XmmReg1 & ZmmReg1) ... & Reg32_m8; imm8 { local tmp:16 = vpinsrb_avx( vexVVVV_XmmReg, Reg32_m8, imm8:1 ); ZmmReg1 = zext(tmp); } # PINSRB/PINSRD/PINSRQ 4-293 PAGE 1413 LINE 73324 define pcodeop vpinsrd_avx ; :VPINSRD XmmReg1, vexVVVV_XmmReg, rm32, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x22; (XmmReg1 & ZmmReg1) ... & rm32; imm8 { local tmp:16 = vpinsrd_avx( vexVVVV_XmmReg, rm32, imm8:1 ); ZmmReg1 = zext(tmp); } # PINSRB/PINSRD/PINSRQ 4-293 PAGE 1413 LINE 73327 define pcodeop vpinsrq_avx ; @ifdef IA64 :VPINSRQ XmmReg1, vexVVVV_XmmReg, rm64, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x22; (XmmReg1 & ZmmReg1) ... & rm64; imm8 { local tmp:16 = vpinsrq_avx( vexVVVV_XmmReg, rm64, imm8:1 ); ZmmReg1 = zext(tmp); } @endif # PINSRW 4-296 PAGE 1416 LINE 73446 define pcodeop vpinsrw_avx ; :VPINSRW XmmReg1, vexVVVV_XmmReg, Reg32_m16, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xC4; (XmmReg1 & ZmmReg1) ... & Reg32_m16; imm8 { local tmp:16 = vpinsrw_avx( vexVVVV_XmmReg, Reg32_m16, imm8:1 ); ZmmReg1 = zext(tmp); } # PMADDUBSW 4-298 PAGE 1418 LINE 73552 define pcodeop vpmaddubsw_avx ; :VPMADDUBSW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x04; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmaddubsw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMADDWD 4-301 PAGE 1421 LINE 73700 define pcodeop vpmaddwd_avx ; :VPMADDWD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF5; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmaddwd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73882 define pcodeop vpmaxsb_avx ; :VPMAXSB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x3C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmaxsb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73885 define pcodeop vpmaxsw_avx ; :VPMAXSW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xEE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmaxsw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73888 define pcodeop vpmaxsd_avx ; :VPMAXSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x3D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmaxsd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74283 define pcodeop vpmaxub_avx ; :VPMAXUB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_XmmReg; byte=0xDE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmaxub_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74286 define pcodeop vpmaxuw_avx ; :VPMAXUW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_XmmReg; byte=0x3E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmaxuw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMAXUD/PMAXUQ 4-316 PAGE 1436 LINE 74534 define pcodeop vpmaxud_avx ; :VPMAXUD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x3F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmaxud_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74736 define pcodeop vpminsb_avx ; :VPMINSB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_XmmReg; byte=0x38; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpminsb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74739 define pcodeop vpminsw_avx ; :VPMINSW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_XmmReg; byte=0xEA; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpminsw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMINSD/PMINSQ 4-325 PAGE 1445 LINE 74989 define pcodeop vpminsd_avx ; :VPMINSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x39; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpminsd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75195 define pcodeop vpminub_avx ; :VPMINUB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_XmmReg; byte=0xDA; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpminub_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75198 define pcodeop vpminuw_avx ; :VPMINUW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_XmmReg; byte=0x3A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpminuw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMINUD/PMINUQ 4-334 PAGE 1454 LINE 75445 define pcodeop vpminud_avx ; :VPMINUD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x3B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpminud_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMOVSX 4-340 PAGE 1460 LINE 75770 define pcodeop vpmovsxbw_avx ; :VPMOVSXBW XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x20; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vpmovsxbw_avx( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # PMOVSX 4-340 PAGE 1460 LINE 75772 define pcodeop vpmovsxbd_avx ; :VPMOVSXBD XmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x21; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vpmovsxbd_avx( XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # PMOVSX 4-340 PAGE 1460 LINE 75774 define pcodeop vpmovsxbq_avx ; :VPMOVSXBQ XmmReg1, XmmReg2_m16 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x22; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { local tmp:16 = vpmovsxbq_avx( XmmReg2_m16 ); ZmmReg1 = zext(tmp); } # PMOVSX 4-340 PAGE 1460 LINE 75776 define pcodeop vpmovsxwd_avx ; :VPMOVSXWD XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x23; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vpmovsxwd_avx( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # PMOVSX 4-340 PAGE 1460 LINE 75778 define pcodeop vpmovsxwq_avx ; :VPMOVSXWQ XmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x24; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vpmovsxwq_avx( XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # PMOVSX 4-340 PAGE 1460 LINE 75780 define pcodeop vpmovsxdq_avx ; :VPMOVSXDQ XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x25; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vpmovsxdq_avx( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # PMOVZX 4-350 PAGE 1470 LINE 76285 define pcodeop vpmovzxbw_avx ; :VPMOVZXBW XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x30; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vpmovzxbw_avx( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # PMOVZX 4-350 PAGE 1470 LINE 76288 define pcodeop vpmovzxbd_avx ; :VPMOVZXBD XmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x31; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vpmovzxbd_avx( XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # PMOVZX 4-350 PAGE 1470 LINE 76291 define pcodeop vpmovzxbq_avx ; :VPMOVZXBQ XmmReg1, XmmReg2_m16 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x32; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { local tmp:16 = vpmovzxbq_avx( XmmReg2_m16 ); ZmmReg1 = zext(tmp); } # PMOVZX 4-350 PAGE 1470 LINE 76294 define pcodeop vpmovzxwd_avx ; :VPMOVZXWD XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x33; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vpmovzxwd_avx( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # PMOVZX 4-350 PAGE 1470 LINE 76297 define pcodeop vpmovzxwq_avx ; :VPMOVZXWQ XmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x34; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vpmovzxwq_avx( XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # PMOVZX 4-350 PAGE 1470 LINE 76301 define pcodeop vpmovzxdq_avx ; :VPMOVZXDQ XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x35; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vpmovzxdq_avx( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # PMULDQ 4-359 PAGE 1479 LINE 76788 define pcodeop vpmuldq_avx ; :VPMULDQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x28; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmuldq_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMULHRSW 4-362 PAGE 1482 LINE 76928 define pcodeop vpmulhrsw_avx ; :VPMULHRSW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x0B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmulhrsw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMULHUW 4-366 PAGE 1486 LINE 77141 define pcodeop vpmulhuw_avx ; :VPMULHUW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE4; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmulhuw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMULHW 4-370 PAGE 1490 LINE 77370 define pcodeop vpmulhw_avx ; :VPMULHW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE5; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmulhw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMULLD/PMULLQ 4-374 PAGE 1494 LINE 77576 define pcodeop vpmulld_avx ; :VPMULLD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x40; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmulld_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMULLW 4-378 PAGE 1498 LINE 77775 define pcodeop vpmullw_avx ; :VPMULLW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD5; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmullw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMULUDQ 4-382 PAGE 1502 LINE 77969 define pcodeop vpmuludq_avx ; :VPMULUDQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF4; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpmuludq_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # POR 4-399 PAGE 1519 LINE 78850 define pcodeop vpor_avx ; :VPOR XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xEB; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpor_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSADBW 4-408 PAGE 1528 LINE 79240 define pcodeop vpsadbw_avx ; :VPSADBW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsadbw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSHUFB 4-412 PAGE 1532 LINE 79460 define pcodeop vpshufb_avx ; :VPSHUFB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x00; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpshufb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSHUFD 4-416 PAGE 1536 LINE 79651 define pcodeop vpshufd_avx ; :VPSHUFD XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x70; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vpshufd_avx( XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # PSHUFHW 4-420 PAGE 1540 LINE 79857 define pcodeop vpshufhw_avx ; :VPSHUFHW XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x70; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vpshufhw_avx( XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # PSHUFLW 4-423 PAGE 1543 LINE 80032 define pcodeop vpshuflw_avx ; :VPSHUFLW XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0x70; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vpshuflw_avx( XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # PSIGNB/PSIGNW/PSIGND 4-427 PAGE 1547 LINE 80269 define pcodeop vpsignb_avx ; :VPSIGNB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x08; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsignb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSIGNB/PSIGNW/PSIGND 4-427 PAGE 1547 LINE 80272 define pcodeop vpsignw_avx ; :VPSIGNW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x09; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsignw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSIGNB/PSIGNW/PSIGND 4-427 PAGE 1547 LINE 80275 define pcodeop vpsignd_avx ; :VPSIGND XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x0A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsignd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSLLDQ 4-431 PAGE 1551 LINE 80485 define pcodeop vpslldq_avx ; :VPSLLDQ vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDD) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x73; reg_opcode=7 & (mod=0x3 & XmmReg2); imm8 { vexVVVV_XmmReg = vpslldq_avx( XmmReg2, imm8:1 ); } # PSLLW/PSLLD/PSLLQ 4-433 PAGE 1553 LINE 80620 define pcodeop vpsllw_avx ; :VPSLLW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF1; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsllw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSLLW/PSLLD/PSLLQ 4-433 PAGE 1553 LINE 80623 :VPSLLW vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDD) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x71; reg_opcode=6 & (mod=0x3 & XmmReg2); imm8 { vexVVVV_XmmReg = vpsllw_avx( XmmReg2, imm8:1 ); } # PSLLW/PSLLD/PSLLQ 4-433 PAGE 1553 LINE 80626 define pcodeop vpslld_avx ; :VPSLLD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF2; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpslld_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSLLW/PSLLD/PSLLQ 4-433 PAGE 1553 LINE 80629 :VPSLLD vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDD) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x72; reg_opcode=6 & (mod=0x3 & XmmReg2); imm8 { vexVVVV_XmmReg = vpslld_avx( XmmReg2, imm8:1 ); } # PSLLW/PSLLD/PSLLQ 4-433 PAGE 1553 LINE 80632 define pcodeop vpsllq_avx ; :VPSLLQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF3; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsllq_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSLLW/PSLLD/PSLLQ 4-433 PAGE 1553 LINE 80635 :VPSLLQ vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDD) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x73; reg_opcode=6 & (mod=0x3 & XmmReg2); imm8 { vexVVVV_XmmReg = vpsllq_avx( XmmReg2, imm8:1 ); } # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81305 define pcodeop vpsraw_avx ; :VPSRAW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE1; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsraw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81308 :VPSRAW vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDD) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x71; reg_opcode=4 & (mod=0x3 & XmmReg2); imm8 { vexVVVV_XmmReg = vpsraw_avx( XmmReg2, imm8:1 ); } # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81311 define pcodeop vpsrad_avx ; :VPSRAD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE2; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsrad_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81314 :VPSRAD vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDD) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x72; reg_opcode=4 & (mod=0x3 & XmmReg2); imm8 { vexVVVV_XmmReg = vpsrad_avx( XmmReg2, imm8:1 ); } # PSRLDQ 4-455 PAGE 1575 LINE 81873 define pcodeop vpsrldq_avx ; :VPSRLDQ vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDD) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x73; reg_opcode=3 & (mod=0x3 & XmmReg2); imm8 { vexVVVV_XmmReg = vpsrldq_avx( XmmReg2, imm8:1 ); } # PSRLW/PSRLD/PSRLQ 4-457 PAGE 1577 LINE 82012 define pcodeop vpsrlw_avx ; :VPSRLW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD1; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsrlw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSRLW/PSRLD/PSRLQ 4-457 PAGE 1577 LINE 82015 :VPSRLW vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDD) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x71; reg_opcode=2 & (mod=0x3 & XmmReg2); imm8 { vexVVVV_XmmReg = vpsrlw_avx( XmmReg2, imm8:1 ); } # PSRLW/PSRLD/PSRLQ 4-457 PAGE 1577 LINE 82018 define pcodeop vpsrld_avx ; :VPSRLD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD2; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsrld_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSRLW/PSRLD/PSRLQ 4-457 PAGE 1577 LINE 82021 :VPSRLD vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDD) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x72; reg_opcode=2 & (mod=0x3 & XmmReg2); imm8 { vexVVVV_XmmReg = vpsrld_avx( XmmReg2, imm8:1 ); } # PSRLW/PSRLD/PSRLQ 4-457 PAGE 1577 LINE 82024 define pcodeop vpsrlq_avx ; :VPSRLQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD3; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsrlq_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSRLW/PSRLD/PSRLQ 4-457 PAGE 1577 LINE 82027 :VPSRLQ vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDD) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x73; reg_opcode=2 & (mod=0x3 & XmmReg2); imm8 { vexVVVV_XmmReg = vpsrlq_avx( XmmReg2, imm8:1 ); } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82689 define pcodeop vpsubb_avx ; :VPSUBB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF8; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsubb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82691 define pcodeop vpsubw_avx ; :VPSUBW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xF9; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsubw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82694 define pcodeop vpsubd_avx ; :VPSUBD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xFA; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsubd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSUBQ 4-476 PAGE 1596 LINE 83101 define pcodeop vpsubq_avx ; :VPSUBQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xFB; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsubq_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83258 define pcodeop vpsubsb_avx ; :VPSUBSB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE8; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsubsb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83261 define pcodeop vpsubsw_avx ; :VPSUBSW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xE9; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsubsw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83498 define pcodeop vpsubusb_avx ; :VPSUBUSB XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD8; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsubusb_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83501 define pcodeop vpsubusw_avx ; :VPSUBUSW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xD9; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsubusw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PTEST 4-487 PAGE 1607 LINE 83728 define pcodeop vptest_avx ; :VPTEST XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x17; XmmReg1 ... & XmmReg2_m128 { local val1 = XmmReg2_m128; local val2 = XmmReg1; ZF = (val1 & val2) == 0; CF = (val1 & ~val2) == 0; AF = 0; OF = 0; PF = 0; SF = 0; } # PTEST 4-487 PAGE 1607 LINE 83730 :VPTEST YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x17; YmmReg1 ... & YmmReg2_m256 { local val1 = YmmReg2_m256; local val2 = YmmReg1; ZF = (val1 & val2) == 0; CF = (val1 & ~val2) == 0; AF = 0; OF = 0; PF = 0; SF = 0; } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83929 define pcodeop vpunpckhbw_avx ; :VPUNPCKHBW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x68; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpunpckhbw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83932 define pcodeop vpunpckhwd_avx ; :VPUNPCKHWD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x69; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpunpckhwd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83935 define pcodeop vpunpckhdq_avx ; :VPUNPCKHDQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x6A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpunpckhdq_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83938 define pcodeop vpunpckhqdq_avx ; :VPUNPCKHQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x6D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpunpckhqdq_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84529 define pcodeop vpunpcklbw_avx ; :VPUNPCKLBW XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x60; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpunpcklbw_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84532 define pcodeop vpunpcklwd_avx ; :VPUNPCKLWD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x61; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpunpcklwd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84535 define pcodeop vpunpckldq_avx ; :VPUNPCKLDQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x62; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpunpckldq_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84538 define pcodeop vpunpcklqdq_avx ; :VPUNPCKLQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x6C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpunpcklqdq_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PXOR 4-518 PAGE 1638 LINE 85495 :VPXOR XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xEF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vexVVVV_XmmReg ^ XmmReg2_m128; ZmmReg1 = zext(tmp); } # RCPPS 4-526 PAGE 1646 LINE 85950 define pcodeop vrcpps_avx ; :VRCPPS XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x53; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vrcpps_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # RCPPS 4-526 PAGE 1646 LINE 85953 :VRCPPS YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x53; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vrcpps_avx( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # RCPSS 4-528 PAGE 1648 LINE 86052 define pcodeop vrcpss_avx ; :VRCPSS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x53; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vrcpss_avx( vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # ROUNDPD 4-564 PAGE 1684 LINE 87791 define pcodeop vroundpd_avx ; :VROUNDPD XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x09; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vroundpd_avx( XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # ROUNDPD 4-564 PAGE 1684 LINE 87795 :VROUNDPD YmmReg1, YmmReg2_m256, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x09; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vroundpd_avx( YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # ROUNDPS 4-567 PAGE 1687 LINE 87934 define pcodeop vroundps_avx ; :VROUNDPS XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x08; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vroundps_avx( XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # ROUNDPS 4-567 PAGE 1687 LINE 87938 :VROUNDPS YmmReg1, YmmReg2_m256, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x08; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vroundps_avx( YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # ROUNDSD 4-570 PAGE 1690 LINE 88058 define pcodeop vroundsd_avx ; :VROUNDSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64, imm8 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x0B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64; imm8 { local tmp:16 = vroundsd_avx( vexVVVV_XmmReg, XmmReg2_m64, imm8:1 ); ZmmReg1 = zext(tmp); } # ROUNDSS 4-572 PAGE 1692 LINE 88145 define pcodeop vroundss_avx ; :VROUNDSS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32, imm8 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x0A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32; imm8 { local tmp:16 = vroundss_avx( vexVVVV_XmmReg, XmmReg2_m32, imm8:1 ); ZmmReg1 = zext(tmp); } # RSQRTPS 4-576 PAGE 1696 LINE 88301 define pcodeop vrsqrtps_avx ; :VRSQRTPS XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x52; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vrsqrtps_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # RSQRTPS 4-576 PAGE 1696 LINE 88304 :VRSQRTPS YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x52; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vrsqrtps_avx( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # RSQRTSS 4-578 PAGE 1698 LINE 88399 define pcodeop vrsqrtss_avx ; :VRSQRTSS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x52; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vrsqrtss_avx( vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # SHUFPD 4-617 PAGE 1737 LINE 90223 define pcodeop vshufpd_avx ; :VSHUFPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xC6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vshufpd_avx( vexVVVV_XmmReg, XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # SHUFPD 4-617 PAGE 1737 LINE 90227 :VSHUFPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xC6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vshufpd_avx( vexVVVV_YmmReg, YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # SHUFPS 4-622 PAGE 1742 LINE 90483 define pcodeop vshufps_avx ; :VSHUFPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xC6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vshufps_avx( vexVVVV_XmmReg, XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # SHUFPS 4-622 PAGE 1742 LINE 90486 :VSHUFPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xC6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vshufps_avx( vexVVVV_YmmReg, YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # SQRTPD 4-632 PAGE 1752 LINE 91001 define pcodeop vsqrtpd_avx ; :VSQRTPD XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x51; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vsqrtpd_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # SQRTPD 4-632 PAGE 1752 LINE 91004 :VSQRTPD YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x51; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vsqrtpd_avx( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # SQRTPS 4-635 PAGE 1755 LINE 91133 define pcodeop vsqrtps_avx ; :VSQRTPS XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x51; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vsqrtps_avx( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # SQRTPS 4-635 PAGE 1755 LINE 91136 :VSQRTPS YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x51; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vsqrtps_avx( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # SQRTSD 4-638 PAGE 1758 LINE 91272 define pcodeop vsqrtsd_avx ; :VSQRTSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vsqrtsd_avx( vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # SQRTSS 4-640 PAGE 1760 LINE 91367 define pcodeop vsqrtss_avx ; :VSQRTSS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vsqrtss_avx( vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # STMXCSR 4-647 PAGE 1767 LINE 91697 define pcodeop vstmxcsr_avx ; :VSTMXCSR m32 is $(VEX_NONE) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0xAE; reg_opcode=3 ... & m32 { m32 = vstmxcsr_avx( ); } # SUBPD 4-656 PAGE 1776 LINE 92116 define pcodeop vsubpd_avx ; :VSUBPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vsubpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # SUBPD 4-656 PAGE 1776 LINE 92118 :VSUBPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x5C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vsubpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # SUBPS 4-659 PAGE 1779 LINE 92265 define pcodeop vsubps_avx ; :VSUBPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vsubps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # SUBPS 4-659 PAGE 1779 LINE 92267 :VSUBPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x5C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vsubps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # SUBSD 4-662 PAGE 1782 LINE 92419 :VSUBSD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:8 = vexVVVV_XmmReg[0,64] f- XmmReg2_m64[0,64]; ZmmReg1 = zext(tmp); } # SUBSS 4-664 PAGE 1784 LINE 92512 :VSUBSS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:4 = vexVVVV_XmmReg[0,32] f- XmmReg2_m32[0,32]; ZmmReg1 = zext(tmp); } # UCOMISD 4-683 PAGE 1803 LINE 93421 :VUCOMISD XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x2E; XmmReg1 ... & XmmReg2_m64 { val1:8 = XmmReg1[0,64]; val2:8 = XmmReg2_m64[0,64]; fucompe(val1, val2); } # UCOMISS 4-685 PAGE 1805 LINE 93504 :VUCOMISS XmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x2E; XmmReg1 ... & XmmReg2_m32 { val1:4 = XmmReg1[0,32]; val2:4 = XmmReg2_m32[0,32]; fucompe(val1, val2); } # UNPCKHPD 4-688 PAGE 1808 LINE 93623 define pcodeop vunpckhpd_avx ; :VUNPCKHPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vunpckhpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # UNPCKHPD 4-688 PAGE 1808 LINE 93626 :VUNPCKHPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vunpckhpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # UNPCKHPS 4-692 PAGE 1812 LINE 93807 define pcodeop vunpckhps_avx ; :VUNPCKHPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vunpckhps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # UNPCKHPS 4-692 PAGE 1812 LINE 93810 :VUNPCKHPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vunpckhps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # UNPCKLPD 4-696 PAGE 1816 LINE 94039 define pcodeop vunpcklpd_avx ; :VUNPCKLPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vunpcklpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # UNPCKLPD 4-696 PAGE 1816 LINE 94042 :VUNPCKLPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vunpcklpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # UNPCKLPS 4-700 PAGE 1820 LINE 94225 define pcodeop vunpcklps_avx ; :VUNPCKLPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vunpcklps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # UNPCKLPS 4-700 PAGE 1820 LINE 94228 :VUNPCKLPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vunpcklps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VBROADCAST 5-12 PAGE 1836 LINE 94909 :VBROADCASTSS XmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x18; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local val:4 = XmmReg2_m32[0,32]; XmmReg1[0,32] = val; XmmReg1[32,32] = val; XmmReg1[64,32] = val; XmmReg1[96,32] = val; ZmmReg1 = zext(XmmReg1); } # VBROADCAST 5-12 PAGE 1836 LINE 94911 :VBROADCASTSS YmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x18; (YmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local val:4 = XmmReg2_m32[0,32]; YmmReg1[0,32] = val; YmmReg1[32,32] = val; YmmReg1[64,32] = val; YmmReg1[96,32] = val; YmmReg1[128,32] = val; YmmReg1[160,32] = val; YmmReg1[192,32] = val; YmmReg1[224,32] = val; ZmmReg1 = zext(YmmReg1); } # VBROADCAST 5-12 PAGE 1836 LINE 94913 :VBROADCASTSD YmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x19; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local val:8 = XmmReg2_m64[0,64]; YmmReg1[0,64] = val; YmmReg1[64,64] = val; YmmReg1[128,64] = val; YmmReg1[192,64] = val; ZmmReg1 = zext(YmmReg1); } # VBROADCAST 5-12 PAGE 1836 LINE 94915 :VBROADCASTF128 YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x1A; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local val:16 = XmmReg2_m128; YmmReg1[0,128] = val; YmmReg1[128,128] = val; ZmmReg1 = zext(YmmReg1); } # VEXTRACTF128/VEXTRACTF32x4/VEXTRACTF64x2/VEXTRACTF32x8/VEXTRACTF64x4 5-99 PAGE 1923 LINE 99102 :VEXTRACTF128 XmmReg2_m128, YmmReg1, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x19; (YmmReg1 & XmmReg2_m128_extend) ... & XmmReg2_m128; imm8 { local ext:1 = (imm8:1 & 1) == 1; local val:16 = YmmReg1[0,128]; if (ext == 0) goto ; val = YmmReg1[128,128]; XmmReg2_m128 = val; build XmmReg2_m128_extend; } # VINSERTF128/VINSERTF32x4/VINSERTF64x2/VINSERTF32x8/VINSERTF64x4 5-310 PAGE 2134 LINE 109703 :VINSERTF128 YmmReg1, vexVVVV_YmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x18; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local ext:1 = (imm8:1 & 1) == 1; local src1_0 = vexVVVV_YmmReg[0, 128]; local src1_1 = vexVVVV_YmmReg[128, 128]; src2:16 = XmmReg2_m128; YmmReg1[0,128] = src2; YmmReg1[128,128] = src1_1; if (ext == 0) goto ; YmmReg1[0,128] = src1_0; YmmReg1[128,128] = src2; ZmmReg1 = zext(YmmReg1); } # VMASKMOV 5-318 PAGE 2142 LINE 110151 define pcodeop vmaskmovps_avx ; :VMASKMOVPS XmmReg1, vexVVVV_XmmReg, m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x2C; (XmmReg1 & ZmmReg1) ... & m128 { local tmp:16 = vmaskmovps_avx( vexVVVV_XmmReg, m128 ); ZmmReg1 = zext(tmp); } # VMASKMOV 5-318 PAGE 2142 LINE 110154 :VMASKMOVPS YmmReg1, vexVVVV_YmmReg, m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x2C; (YmmReg1 & ZmmReg1) ... & m256 { local tmp:32 = vmaskmovps_avx( vexVVVV_YmmReg, m256 ); ZmmReg1 = zext(tmp); } # VMASKMOV 5-318 PAGE 2142 LINE 110157 define pcodeop vmaskmovpd_avx ; :VMASKMOVPD XmmReg1, vexVVVV_XmmReg, m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x2D; (XmmReg1 & ZmmReg1) ... & m128 { local tmp:16 = vmaskmovpd_avx( vexVVVV_XmmReg, m128 ); ZmmReg1 = zext(tmp); } # VMASKMOV 5-318 PAGE 2142 LINE 110160 :VMASKMOVPD YmmReg1, vexVVVV_YmmReg, m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x2D; (YmmReg1 & ZmmReg1) ... & m256 { local tmp:32 = vmaskmovpd_avx( vexVVVV_YmmReg, m256 ); ZmmReg1 = zext(tmp); } # VMASKMOV 5-318 PAGE 2142 LINE 110163 :VMASKMOVPS m128, vexVVVV_XmmReg, XmmReg1 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x2E; XmmReg1 ... & m128 { m128 = vmaskmovps_avx( vexVVVV_XmmReg, XmmReg1 ); } # VMASKMOV 5-318 PAGE 2142 LINE 110166 :VMASKMOVPS m256, vexVVVV_YmmReg, YmmReg1 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x2E; YmmReg1 ... & m256 { m256 = vmaskmovps_avx( vexVVVV_YmmReg, YmmReg1 ); } # VMASKMOV 5-318 PAGE 2142 LINE 110168 :VMASKMOVPD m128, vexVVVV_XmmReg, XmmReg1 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x2F; XmmReg1 ... & m128 { m128 = vmaskmovpd_avx( vexVVVV_XmmReg, XmmReg1 ); } # VMASKMOV 5-318 PAGE 2142 LINE 110171 :VMASKMOVPD m256, vexVVVV_YmmReg, YmmReg1 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x2F; YmmReg1 ... & m256 { m256 = vmaskmovpd_avx( vexVVVV_YmmReg, YmmReg1 ); } # VPERM2F128 5-358 PAGE 2182 LINE 112216 define pcodeop vperm2f128_avx ; :VPERM2F128 YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x06; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vperm2f128_avx( vexVVVV_YmmReg, YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # VPERMILPD 5-371 PAGE 2195 LINE 112860 define pcodeop vpermilpd_avx ; :VPERMILPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x0D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpermilpd_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VPERMILPD 5-371 PAGE 2195 LINE 112863 :VPERMILPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x0D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpermilpd_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VPERMILPD 5-371 PAGE 2195 LINE 112875 :VPERMILPD XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x05; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vpermilpd_avx( XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # VPERMILPD 5-371 PAGE 2195 LINE 112877 :VPERMILPD YmmReg1, YmmReg2_m256, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x05; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vpermilpd_avx( YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # VPERMILPS 5-376 PAGE 2200 LINE 113158 define pcodeop vpermilps_avx ; :VPERMILPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x0C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpermilps_avx( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VPERMILPS 5-376 PAGE 2200 LINE 113161 :VPERMILPS XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x04; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vpermilps_avx( XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # VPERMILPS 5-376 PAGE 2200 LINE 113164 :VPERMILPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x0C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpermilps_avx( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VPERMILPS 5-376 PAGE 2200 LINE 113167 :VPERMILPS YmmReg1, YmmReg2_m256, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x04; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vpermilps_avx( YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # VTESTPD/VTESTPS 5-560 PAGE 2384 LINE 122257 define pcodeop vtestps_avx ; :VTESTPS XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x0E; XmmReg1 ... & XmmReg2_m128 { local val1 = XmmReg2_m128; local val2 = XmmReg1; local ztest = val1 & val2; ZF = (ztest[31,1] | ztest[63,1] | ztest[95,1] | ztest[127,1]) == 0; local ctest = val1 & ~val2; CF = (ctest[31,1] | ctest[63,1] | ctest[95,1] | ctest[127,1]) == 0; AF = 0; OF = 0; PF = 0; SF = 0; } # VTESTPD/VTESTPS 5-560 PAGE 2384 LINE 122260 :VTESTPS YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x0E; YmmReg1 ... & YmmReg2_m256 { local val1 = YmmReg2_m256; local val2 = YmmReg1; local ztest = val1 & val2; ZF = (ztest[31,1] | ztest[63,1] | ztest[95,1] | ztest[127,1] | ztest[160,1] | ztest[191,1] | ztest[224,1] | ztest[255,1]) == 0; local ctest = val1 & ~val2; CF = (ctest[31,1] | ctest[63,1] | ctest[95,1] | ctest[127,1] | ctest[160,1] | ctest[191,1] | ctest[224,1] | ctest[255,1]) == 0; AF = 0; OF = 0; PF = 0; SF = 0; } # VTESTPD/VTESTPS 5-560 PAGE 2384 LINE 122263 define pcodeop vtestpd_avx ; :VTESTPD XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x0F; XmmReg1 ... & XmmReg2_m128 { local val1 = XmmReg2_m128; local val2 = XmmReg1; local ztest = val1 & val2; ZF = (ztest[63,1] | ztest[127,1]) == 0; local ctest = val1 & ~val2; CF = (ctest[63,1] | ctest[127,1]) == 0; AF = 0; OF = 0; PF = 0; SF = 0; } # VTESTPD/VTESTPS 5-560 PAGE 2384 LINE 122266 :VTESTPD YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x0F; YmmReg1 ... & YmmReg2_m256 { local val1 = YmmReg2_m256; local val2 = YmmReg1; local ztest = val1 & val2; ZF = (ztest[63,1] | ztest[127,1] | ztest[191,1] | ztest[255,1]) == 0; local ctest = val1 & ~val2; CF = (ctest[63,1] | ctest[127,1] | ctest[191,1] | ctest[255,1]) == 0; AF = 0; OF = 0; PF = 0; SF = 0; } # XORPD 5-596 PAGE 2420 LINE 123828 :VXORPD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { tmp:16 = XmmReg2_m128; XmmReg1[0,64] = ( vexVVVV_XmmReg[0,64] ^ tmp[0,64] ); XmmReg1[64,64] = ( vexVVVV_XmmReg[64,64] ^ tmp[64,64] ); ZmmReg1 = zext(XmmReg1); } # XORPD 5-596 PAGE 2420 LINE 123831 :VXORPD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x57; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { tmp:32 = YmmReg2_m256; YmmReg1[0,64] = ( vexVVVV_YmmReg[0,64] ^ tmp[0,64] ); YmmReg1[64,64] = ( vexVVVV_YmmReg[64,64] ^ tmp[64,64] ); YmmReg1[128,64] = ( vexVVVV_YmmReg[128,64] ^ tmp[128,64] ); YmmReg1[192,64] = ( vexVVVV_YmmReg[192,64] ^ tmp[192,64] ); ZmmReg1 = zext(YmmReg1); } # XORPS 5-599 PAGE 2423 LINE 123953 :VXORPS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { tmp:16 = XmmReg2_m128; XmmReg1[0,32] = ( vexVVVV_XmmReg[0,32] ^ tmp[0,32] ); XmmReg1[32,32] = ( vexVVVV_XmmReg[32,32] ^ tmp[32,32] ); XmmReg1[64,32] = ( vexVVVV_XmmReg[64,32] ^ tmp[64,32] ); XmmReg1[96,32] = ( vexVVVV_XmmReg[96,32] ^ tmp[96,32] ); ZmmReg1 = zext(XmmReg1); } # XORPS 5-599 PAGE 2423 LINE 123956 :VXORPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x57; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { tmp:32 = YmmReg2_m256; YmmReg1[0,32] = ( vexVVVV_YmmReg[0,32] ^ tmp[0,32] ); YmmReg1[32,32] = ( vexVVVV_YmmReg[32,32] ^ tmp[32,32] ); YmmReg1[64,32] = ( vexVVVV_YmmReg[64,32] ^ tmp[64,32] ); YmmReg1[96,32] = ( vexVVVV_YmmReg[96,32] ^ tmp[96,32] ); YmmReg1[128,32] = ( vexVVVV_YmmReg[128,32] ^ tmp[128,32] ); YmmReg1[160,32] = ( vexVVVV_YmmReg[160,32] ^ tmp[160,32] ); YmmReg1[192,32] = ( vexVVVV_YmmReg[192,32] ^ tmp[192,32] ); YmmReg1[224,32] = ( vexVVVV_YmmReg[224,32] ^ tmp[224,32] ); ZmmReg1 = zext(YmmReg1); } # INFO This file automatically generated by andre on Tue Apr 30 16:08:43 2024 # INFO Direct edits to this file may be lost in future updates # INFO Command line arguments: ['--cpuid-match', 'F16C', '--sinc', '--skip-sinc', '../../../../../../../ghidra/Ghidra/Processors/x86/data/languages/avx_manual.sinc', '../../../../../../../ghidra/Ghidra/Processors/x86/data/languages/ia.sinc'] # VCVTPH2PS 5-34 PAGE 1858 LINE 95957 define pcodeop vcvtph2ps_f16c ; :VCVTPH2PS XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x13; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vcvtph2ps_f16c( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VCVTPH2PS 5-34 PAGE 1858 LINE 95960 :VCVTPH2PS YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x13; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vcvtph2ps_f16c( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VCVTPS2PH 5-37 PAGE 1861 LINE 96110 define pcodeop vcvtps2ph_f16c ; :VCVTPS2PH XmmReg2_m64, XmmReg1, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x1D; XmmReg1 ... & XmmReg2_m64; imm8 { XmmReg2_m64 = vcvtps2ph_f16c( XmmReg1, imm8:1 ); } # VCVTPS2PH 5-37 PAGE 1861 LINE 96113 :VCVTPS2PH XmmReg2_m128, YmmReg1, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x1D; (YmmReg1 & XmmReg2_m128_extend) ... & XmmReg2_m128; imm8 { XmmReg2_m128 = vcvtps2ph_f16c( YmmReg1, imm8:1 ); build XmmReg2_m128_extend; } ================================================ FILE: pypcode/processors/x86/data/languages/avx2.sinc ================================================ # INFO This file automatically generated by andre on Tue Apr 30 16:10:11 2024 # INFO Direct edits to this file may be lost in future updates # INFO Command line arguments: ['--cpuid-match', 'AVX2', '--sinc', '--skip-sinc', '../../../../../../../ghidra/Ghidra/Processors/x86/data/languages/avx2_manual.sinc', '../../../../../../../ghidra/Ghidra/Processors/x86/data/languages/ia.sinc'] # MOVNTDQA 4-92 PAGE 1212 LINE 63086 define pcodeop vmovntdqa_avx2 ; :VMOVNTDQA YmmReg1, m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x2A; (YmmReg1 & ZmmReg1) ... & m256 { local tmp:32 = vmovntdqa_avx2( m256 ); ZmmReg1 = zext(tmp); } # MPSADBW 4-136 PAGE 1256 LINE 65140 define pcodeop vmpsadbw_avx2 ; :VMPSADBW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x42; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vmpsadbw_avx2( vexVVVV_YmmReg, YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # PABSB/PABSW/PABSD/PABSQ 4-180 PAGE 1300 LINE 67311 define pcodeop vpabsb_avx2 ; :VPABSB YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x1C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpabsb_avx2( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PABSB/PABSW/PABSD/PABSQ 4-180 PAGE 1300 LINE 67314 define pcodeop vpabsw_avx2 ; :VPABSW YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x1D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpabsw_avx2( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PABSB/PABSW/PABSD/PABSQ 4-180 PAGE 1300 LINE 67317 define pcodeop vpabsd_avx2 ; :VPABSD YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x1E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpabsd_avx2( YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PACKSSWB/PACKSSDW 4-186 PAGE 1306 LINE 67637 define pcodeop vpacksswb_avx2 ; :VPACKSSWB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x63; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpacksswb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PACKSSWB/PACKSSDW 4-186 PAGE 1306 LINE 67641 define pcodeop vpackssdw_avx2 ; :VPACKSSDW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x6B; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpackssdw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PACKUSDW 4-194 PAGE 1314 LINE 68090 define pcodeop vpackusdw_avx2 ; :VPACKUSDW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_YmmReg; byte=0x2B; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpackusdw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PACKUSWB 4-199 PAGE 1319 LINE 68370 define pcodeop vpackuswb_avx2 ; :VPACKUSWB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x67; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpackuswb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68666 define pcodeop vpaddb_avx2 ; :VPADDB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xFC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpaddb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68668 define pcodeop vpaddw_avx2 ; :VPADDW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xFD; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpaddw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68670 define pcodeop vpaddd_avx2 ; :VPADDD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xFE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpaddd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68672 define pcodeop vpaddq_avx2 ; :VPADDQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD4; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpaddq_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69045 define pcodeop vpaddsb_avx2 ; :VPADDSB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xEC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpaddsb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69048 define pcodeop vpaddsw_avx2 ; :VPADDSW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xED; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpaddsw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69263 define pcodeop vpaddusb_avx2 ; :VPADDUSB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xDC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpaddusb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69266 define pcodeop vpaddusw_avx2 ; :VPADDUSW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xDD; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpaddusw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PALIGNR 4-219 PAGE 1339 LINE 69489 define pcodeop vpalignr_avx2 ; :VPALIGNR YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x0F; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vpalignr_avx2( vexVVVV_YmmReg, YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # PAND 4-223 PAGE 1343 LINE 69680 define pcodeop vpand_avx2 ; :VPAND YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xDB; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpand_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PANDN 4-226 PAGE 1346 LINE 69856 define pcodeop vpandn_avx2 ; :VPANDN YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xDF; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpandn_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70091 define pcodeop vpavgb_avx2 ; :VPAVGB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE0; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpavgb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70094 define pcodeop vpavgw_avx2 ; :VPAVGW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE3; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpavgw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PBLENDVB 4-234 PAGE 1354 LINE 70300 define pcodeop vpblendvb_avx2 ; :VPBLENDVB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, Ymm_imm8_7_4 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x4C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; Ymm_imm8_7_4 { local tmp:32 = vpblendvb_avx2( vexVVVV_YmmReg, YmmReg2_m256, Ymm_imm8_7_4 ); ZmmReg1 = zext(tmp); } # PBLENDW 4-238 PAGE 1358 LINE 70525 define pcodeop vpblendw_avx2 ; :VPBLENDW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x0E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vpblendw_avx2( vexVVVV_YmmReg, YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70830 define pcodeop vpcmpeqb_avx2 ; :VPCMPEQB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x74; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpcmpeqb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70833 define pcodeop vpcmpeqw_avx2 ; :VPCMPEQW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x75; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpcmpeqw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70837 define pcodeop vpcmpeqd_avx2 ; :VPCMPEQD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x76; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpcmpeqd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71508 define pcodeop vpcmpgtb_avx2 ; :VPCMPGTB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x64; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpcmpgtb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71511 define pcodeop vpcmpgtw_avx2 ; :VPCMPGTW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x65; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpcmpgtw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71514 define pcodeop vpcmpgtd_avx2 ; :VPCMPGTD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x66; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpcmpgtd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PCMPGTQ 4-263 PAGE 1383 LINE 71835 define pcodeop vpcmpgtq_avx2 ; :VPCMPGTQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x37; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpcmpgtq_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PHADDW/PHADDD 4-280 PAGE 1400 LINE 72633 define pcodeop vphaddw_avx2 ; :VPHADDW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x01; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vphaddw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PHADDW/PHADDD 4-280 PAGE 1400 LINE 72636 define pcodeop vphaddd_avx2 ; :VPHADDD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x02; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vphaddd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PHADDSW 4-284 PAGE 1404 LINE 72824 define pcodeop vphaddsw_avx2 ; :VPHADDSW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x03; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vphaddsw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PHSUBW/PHSUBD 4-288 PAGE 1408 LINE 73038 define pcodeop vphsubw_avx2 ; :VPHSUBW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x05; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vphsubw_avx2( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PHSUBW/PHSUBD 4-288 PAGE 1408 LINE 73041 define pcodeop vphsubd_avx2 ; :VPHSUBD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x06; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vphsubd_avx2( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PHSUBSW 4-291 PAGE 1411 LINE 73200 define pcodeop vphsubsw_avx2 ; :VPHSUBSW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x07; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vphsubsw_avx2( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMADDUBSW 4-298 PAGE 1418 LINE 73555 define pcodeop vpmaddubsw_avx2 ; :VPMADDUBSW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x04; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmaddubsw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMADDWD 4-301 PAGE 1421 LINE 73704 define pcodeop vpmaddwd_avx2 ; :VPMADDWD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF5; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmaddwd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73891 define pcodeop vpmaxsb_avx2 ; :VPMAXSB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x3C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmaxsb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73894 define pcodeop vpmaxsw_avx2 ; :VPMAXSW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xEE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmaxsw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73897 define pcodeop vpmaxsd_avx2 ; :VPMAXSD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x3D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmaxsd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74289 define pcodeop vpmaxub_avx2 ; :VPMAXUB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_YmmReg; byte=0xDE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmaxub_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74292 define pcodeop vpmaxuw_avx2 ; :VPMAXUW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_YmmReg; byte=0x3E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmaxuw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMAXUD/PMAXUQ 4-316 PAGE 1436 LINE 74537 define pcodeop vpmaxud_avx2 ; :VPMAXUD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x3F; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmaxud_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74742 define pcodeop vpminsb_avx2 ; :VPMINSB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_YmmReg; byte=0x38; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpminsb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74745 define pcodeop vpminsw_avx2 ; :VPMINSW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_YmmReg; byte=0xEA; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpminsw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMINSD/PMINSQ 4-325 PAGE 1445 LINE 74992 define pcodeop vpminsd_avx2 ; :VPMINSD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x39; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpminsd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75201 define pcodeop vpminub_avx2 ; :VPMINUB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & vexVVVV_YmmReg; byte=0xDA; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpminub_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75204 define pcodeop vpminuw_avx2 ; :VPMINUW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_YmmReg; byte=0x3A; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpminuw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMINUD/PMINUQ 4-334 PAGE 1454 LINE 75448 define pcodeop vpminud_avx2 ; :VPMINUD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x3B; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpminud_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMOVSX 4-340 PAGE 1460 LINE 75782 define pcodeop vpmovsxbw_avx2 ; :VPMOVSXBW YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x20; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpmovsxbw_avx2( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMOVSX 4-340 PAGE 1460 LINE 75784 define pcodeop vpmovsxbd_avx2 ; :VPMOVSXBD YmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x21; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:32 = vpmovsxbd_avx2( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # PMOVSX 4-340 PAGE 1460 LINE 75786 define pcodeop vpmovsxbq_avx2 ; :VPMOVSXBQ YmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x22; (YmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:32 = vpmovsxbq_avx2( XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # PMOVSX 4-340 PAGE 1460 LINE 75788 define pcodeop vpmovsxwd_avx2 ; :VPMOVSXWD YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x23; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpmovsxwd_avx2( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMOVSX 4-340 PAGE 1460 LINE 75791 define pcodeop vpmovsxwq_avx2 ; :VPMOVSXWQ YmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x24; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:32 = vpmovsxwq_avx2( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # PMOVSX 4-340 PAGE 1460 LINE 75793 define pcodeop vpmovsxdq_avx2 ; :VPMOVSXDQ YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x25; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpmovsxdq_avx2( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMOVZX 4-350 PAGE 1470 LINE 76304 define pcodeop vpmovzxbw_avx2 ; :VPMOVZXBW YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x30; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpmovzxbw_avx2( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMOVZX 4-350 PAGE 1470 LINE 76306 define pcodeop vpmovzxbd_avx2 ; :VPMOVZXBD YmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x31; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:32 = vpmovzxbd_avx2( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # PMOVZX 4-350 PAGE 1470 LINE 76309 define pcodeop vpmovzxbq_avx2 ; :VPMOVZXBQ YmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x32; (YmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:32 = vpmovzxbq_avx2( XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # PMOVZX 4-350 PAGE 1470 LINE 76312 define pcodeop vpmovzxwd_avx2 ; :VPMOVZXWD YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x33; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpmovzxwd_avx2( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMOVZX 4-350 PAGE 1470 LINE 76314 define pcodeop vpmovzxwq_avx2 ; :VPMOVZXWQ YmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x34; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:32 = vpmovzxwq_avx2( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # PMOVZX 4-350 PAGE 1470 LINE 76317 define pcodeop vpmovzxdq_avx2 ; :VPMOVZXDQ YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0x35; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpmovzxdq_avx2( XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PMULDQ 4-359 PAGE 1479 LINE 76791 define pcodeop vpmuldq_avx2 ; :VPMULDQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x28; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmuldq_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMULHRSW 4-362 PAGE 1482 LINE 76931 define pcodeop vpmulhrsw_avx2 ; :VPMULHRSW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x0B; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmulhrsw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMULHUW 4-366 PAGE 1486 LINE 77144 define pcodeop vpmulhuw_avx2 ; :VPMULHUW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE4; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmulhuw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMULHW 4-370 PAGE 1490 LINE 77373 define pcodeop vpmulhw_avx2 ; :VPMULHW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE5; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmulhw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMULLD/PMULLQ 4-374 PAGE 1494 LINE 77579 define pcodeop vpmulld_avx2 ; :VPMULLD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x40; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmulld_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMULLW 4-378 PAGE 1498 LINE 77778 define pcodeop vpmullw_avx2 ; :VPMULLW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD5; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmullw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PMULUDQ 4-382 PAGE 1502 LINE 77973 define pcodeop vpmuludq_avx2 ; :VPMULUDQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF4; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpmuludq_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # POR 4-399 PAGE 1519 LINE 78852 define pcodeop vpor_avx2 ; :VPOR YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xEB; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpor_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSADBW 4-408 PAGE 1528 LINE 79245 define pcodeop vpsadbw_avx2 ; :VPSADBW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsadbw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSHUFB 4-412 PAGE 1532 LINE 79463 define pcodeop vpshufb_avx2 ; :VPSHUFB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x00; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpshufb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSHUFD 4-416 PAGE 1536 LINE 79653 define pcodeop vpshufd_avx2 ; :VPSHUFD YmmReg1, YmmReg2_m256, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x70; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vpshufd_avx2( YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # PSHUFHW 4-420 PAGE 1540 LINE 79860 define pcodeop vpshufhw_avx2 ; :VPSHUFHW YmmReg1, YmmReg2_m256, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG); byte=0x70; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vpshufhw_avx2( YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # PSHUFLW 4-423 PAGE 1543 LINE 80035 define pcodeop vpshuflw_avx2 ; :VPSHUFLW YmmReg1, YmmReg2_m256, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0x70; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vpshuflw_avx2( YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # PSIGNB/PSIGNW/PSIGND 4-427 PAGE 1547 LINE 80278 define pcodeop vpsignb_avx2 ; :VPSIGNB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x08; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsignb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSIGNB/PSIGNW/PSIGND 4-427 PAGE 1547 LINE 80281 define pcodeop vpsignw_avx2 ; :VPSIGNW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x09; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsignw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSIGNB/PSIGNW/PSIGND 4-427 PAGE 1547 LINE 80284 define pcodeop vpsignd_avx2 ; :VPSIGND YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x0A; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsignd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSLLDQ 4-431 PAGE 1551 LINE 80488 define pcodeop vpslldq_avx2 ; :VPSLLDQ vexVVVV_YmmReg, YmmReg2, imm8 is $(VEX_NDD) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x73; reg_opcode=7 & (mod=0x3 & YmmReg2); imm8 { vexVVVV_YmmReg = vpslldq_avx2( YmmReg2, imm8:1 ); } # PSLLW/PSLLD/PSLLQ 4-433 PAGE 1553 LINE 80638 define pcodeop vpsllw_avx2 ; :VPSLLW YmmReg1, vexVVVV_YmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF1; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpsllw_avx2( vexVVVV_YmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSLLW/PSLLD/PSLLQ 4-433 PAGE 1553 LINE 80641 :VPSLLW vexVVVV_YmmReg, YmmReg2, imm8 is $(VEX_NDD) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x71; reg_opcode=6 & (mod=0x3 & YmmReg2); imm8 { vexVVVV_YmmReg = vpsllw_avx2( YmmReg2, imm8:1 ); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80656 define pcodeop vpslld_avx2 ; :VPSLLD YmmReg1, vexVVVV_YmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF2; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpslld_avx2( vexVVVV_YmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80659 :VPSLLD vexVVVV_YmmReg, YmmReg2, imm8 is $(VEX_NDD) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x72; reg_opcode=6 & (mod=0x3 & YmmReg2); imm8 { vexVVVV_YmmReg = vpslld_avx2( YmmReg2, imm8:1 ); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80662 define pcodeop vpsllq_avx2 ; :VPSLLQ YmmReg1, vexVVVV_YmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF3; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpsllq_avx2( vexVVVV_YmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80664 :VPSLLQ vexVVVV_YmmReg, YmmReg2, imm8 is $(VEX_NDD) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x73; reg_opcode=6 & (mod=0x3 & YmmReg2); imm8 { vexVVVV_YmmReg = vpsllq_avx2( YmmReg2, imm8:1 ); } # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81317 define pcodeop vpsraw_avx2 ; :VPSRAW YmmReg1, vexVVVV_YmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE1; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpsraw_avx2( vexVVVV_YmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81320 :VPSRAW vexVVVV_YmmReg, YmmReg2, imm8 is $(VEX_NDD) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x71; reg_opcode=4 & (mod=0x3 & YmmReg2); imm8 { vexVVVV_YmmReg = vpsraw_avx2( YmmReg2, imm8:1 ); } # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81323 define pcodeop vpsrad_avx2 ; :VPSRAD YmmReg1, vexVVVV_YmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE2; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpsrad_avx2( vexVVVV_YmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81326 :VPSRAD vexVVVV_YmmReg, YmmReg2, imm8 is $(VEX_NDD) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x72; reg_opcode=4 & (mod=0x3 & YmmReg2); imm8 { vexVVVV_YmmReg = vpsrad_avx2( YmmReg2, imm8:1 ); } # PSRLDQ 4-455 PAGE 1575 LINE 81876 define pcodeop vpsrldq_avx2 ; :VPSRLDQ vexVVVV_YmmReg, YmmReg2, imm8 is $(VEX_NDD) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x73; reg_opcode=3 & (mod=0x3 & YmmReg2); imm8 { vexVVVV_YmmReg = vpsrldq_avx2( YmmReg2, imm8:1 ); } # PSRLW/PSRLD/PSRLQ 4-457 PAGE 1577 LINE 82030 define pcodeop vpsrlw_avx2 ; :VPSRLW YmmReg1, vexVVVV_YmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD1; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpsrlw_avx2( vexVVVV_YmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSRLW/PSRLD/PSRLQ 4-457 PAGE 1577 LINE 82033 :VPSRLW vexVVVV_YmmReg, YmmReg2, imm8 is $(VEX_NDD) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x71; reg_opcode=2 & (mod=0x3 & YmmReg2); imm8 { vexVVVV_YmmReg = vpsrlw_avx2( YmmReg2, imm8:1 ); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82048 define pcodeop vpsrld_avx2 ; :VPSRLD YmmReg1, vexVVVV_YmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD2; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpsrld_avx2( vexVVVV_YmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82051 :VPSRLD vexVVVV_YmmReg, YmmReg2, imm8 is $(VEX_NDD) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x72; reg_opcode=2 & (mod=0x3 & YmmReg2); imm8 { vexVVVV_YmmReg = vpsrld_avx2( YmmReg2, imm8:1 ); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82054 define pcodeop vpsrlq_avx2 ; :VPSRLQ YmmReg1, vexVVVV_YmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD3; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:32 = vpsrlq_avx2( vexVVVV_YmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82056 :VPSRLQ vexVVVV_YmmReg, YmmReg2, imm8 is $(VEX_NDD) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x73; reg_opcode=2 & (mod=0x3 & YmmReg2); imm8 { vexVVVV_YmmReg = vpsrlq_avx2( YmmReg2, imm8:1 ); } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82696 define pcodeop vpsubb_avx2 ; :VPSUBB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF8; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsubb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82698 define pcodeop vpsubw_avx2 ; :VPSUBW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xF9; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsubw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82700 define pcodeop vpsubd_avx2 ; :VPSUBD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xFA; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsubd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSUBQ 4-476 PAGE 1596 LINE 83104 define pcodeop vpsubq_avx2 ; :VPSUBQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xFB; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsubq_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83264 define pcodeop vpsubsb_avx2 ; :VPSUBSB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE8; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsubsb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83267 define pcodeop vpsubsw_avx2 ; :VPSUBSW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xE9; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsubsw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83504 define pcodeop vpsubusb_avx2 ; :VPSUBUSB YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD8; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsubusb_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83507 define pcodeop vpsubusw_avx2 ; :VPSUBUSW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xD9; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsubusw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83940 define pcodeop vpunpckhbw_avx2 ; :VPUNPCKHBW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x68; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpunpckhbw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83942 define pcodeop vpunpckhwd_avx2 ; :VPUNPCKHWD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x69; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpunpckhwd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83944 define pcodeop vpunpckhdq_avx2 ; :VPUNPCKHDQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x6A; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpunpckhdq_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83946 define pcodeop vpunpckhqdq_avx2 ; :VPUNPCKHQDQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x6D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpunpckhqdq_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84541 define pcodeop vpunpcklbw_avx2 ; :VPUNPCKLBW YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x60; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpunpcklbw_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84544 define pcodeop vpunpcklwd_avx2 ; :VPUNPCKLWD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x61; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpunpcklwd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84547 define pcodeop vpunpckldq_avx2 ; :VPUNPCKLDQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x62; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpunpckldq_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84550 define pcodeop vpunpcklqdq_avx2 ; :VPUNPCKLQDQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x6C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpunpcklqdq_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # PXOR 4-518 PAGE 1638 LINE 85497 :VPXOR YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0xEF; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vexVVVV_YmmReg ^ YmmReg2_m256; ZmmReg1 = zext(tmp); } # VEXTRACTI128/VEXTRACTI32x4/VEXTRACTI64x2/VEXTRACTI32x8/VEXTRACTI64x4 5-106 PAGE 1930 LINE 99432 :VEXTRACTI128 XmmReg2_m128, YmmReg1, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x39; (YmmReg1 & XmmReg2_m128_extend) ... & XmmReg2_m128; imm8 { local ext:1 = (imm8:1 & 1) == 1; local val:16 = YmmReg1[0,128]; if (ext == 0) goto ; val = YmmReg1[128,128]; XmmReg2_m128 = val; build XmmReg2_m128_extend; } # VPBLENDD 5-321 PAGE 2145 LINE 110309 define pcodeop vpblendd_avx2 ; :VPBLENDD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x02; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 { local tmp:16 = vpblendd_avx2( vexVVVV_XmmReg, XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # VPBLENDD 5-321 PAGE 2145 LINE 110312 :VPBLENDD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x02; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vpblendd_avx2( vexVVVV_YmmReg, YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # VPBROADCAST 5-331 PAGE 2155 LINE 110776 :VPBROADCASTB XmmReg1, XmmReg2_m8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x78; (XmmReg1 & ZmmReg1) ... & XmmReg2_m8 { local val:1 = XmmReg2_m8[0,8]; XmmReg1[0,8] = val; XmmReg1[8,8] = val; XmmReg1[16,8] = val; XmmReg1[24,8] = val; XmmReg1[32,8] = val; XmmReg1[40,8] = val; XmmReg1[48,8] = val; XmmReg1[56,8] = val; XmmReg1[64,8] = val; XmmReg1[72,8] = val; XmmReg1[80,8] = val; XmmReg1[88,8] = val; XmmReg1[96,8] = val; XmmReg1[104,8] = val; XmmReg1[112,8] = val; XmmReg1[120,8] = val; ZmmReg1 = zext(XmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110778 :VPBROADCASTB YmmReg1, XmmReg2_m8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x78; (YmmReg1 & ZmmReg1) ... & XmmReg2_m8 { local val:1 = XmmReg2_m8[0,8]; YmmReg1[0,8] = val; YmmReg1[8,8] = val; YmmReg1[16,8] = val; YmmReg1[24,8] = val; YmmReg1[32,8] = val; YmmReg1[40,8] = val; YmmReg1[48,8] = val; YmmReg1[56,8] = val; YmmReg1[64,8] = val; YmmReg1[72,8] = val; YmmReg1[80,8] = val; YmmReg1[88,8] = val; YmmReg1[96,8] = val; YmmReg1[104,8] = val; YmmReg1[112,8] = val; YmmReg1[120,8] = val; YmmReg1[128,8] = val; YmmReg1[136,8] = val; YmmReg1[144,8] = val; YmmReg1[152,8] = val; YmmReg1[160,8] = val; YmmReg1[168,8] = val; YmmReg1[176,8] = val; YmmReg1[184,8] = val; YmmReg1[192,8] = val; YmmReg1[200,8] = val; YmmReg1[208,8] = val; YmmReg1[216,8] = val; YmmReg1[224,8] = val; YmmReg1[232,8] = val; YmmReg1[240,8] = val; YmmReg1[248,8] = val; ZmmReg1 = zext(YmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110787 :VPBROADCASTW XmmReg1, XmmReg2_m16 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x79; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { local val:2 = XmmReg2_m16[0,16]; XmmReg1[0,16] = val; XmmReg1[16,16] = val; XmmReg1[32,16] = val; XmmReg1[48,16] = val; XmmReg1[64,16] = val; XmmReg1[80,16] = val; XmmReg1[96,16] = val; XmmReg1[112,16] = val; ZmmReg1 = zext(XmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110789 :VPBROADCASTW YmmReg1, XmmReg2_m16 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x79; (YmmReg1 & ZmmReg1) ... & XmmReg2_m16 { local val:2 = XmmReg2_m16[0,16]; YmmReg1[0,16] = val; YmmReg1[16,16] = val; YmmReg1[32,16] = val; YmmReg1[48,16] = val; YmmReg1[64,16] = val; YmmReg1[80,16] = val; YmmReg1[96,16] = val; YmmReg1[112,16] = val; YmmReg1[128,16] = val; YmmReg1[144,16] = val; YmmReg1[160,16] = val; YmmReg1[176,16] = val; YmmReg1[192,16] = val; YmmReg1[208,16] = val; YmmReg1[224,16] = val; YmmReg1[240,16] = val; ZmmReg1 = zext(YmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110800 :VPBROADCASTD XmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x58; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local val:4 = XmmReg2_m32[0,32]; XmmReg1[0,32] = val; XmmReg1[32,32] = val; XmmReg1[64,32] = val; XmmReg1[96,32] = val; ZmmReg1 = zext(XmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110802 :VPBROADCASTD YmmReg1, XmmReg2_m32 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x58; (YmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local val:4 = XmmReg2_m32[0,32]; YmmReg1[0,32] = val; YmmReg1[32,32] = val; YmmReg1[64,32] = val; YmmReg1[96,32] = val; YmmReg1[128,32] = val; YmmReg1[160,32] = val; YmmReg1[192,32] = val; YmmReg1[224,32] = val; ZmmReg1 = zext(YmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110813 :VPBROADCASTQ XmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local val:8 = XmmReg2_m64[0,64]; XmmReg1[0,64] = val; XmmReg1[64,64] = val; ZmmReg1 = zext(XmmReg1); } # VPBROADCAST 5-331 PAGE 2155 LINE 110815 :VPBROADCASTQ YmmReg1, XmmReg2_m64 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x59; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local val:8 = XmmReg2_m64[0,64]; YmmReg1[0,64] = val; YmmReg1[64,64] = val; YmmReg1[128,64] = val; YmmReg1[192,64] = val; ZmmReg1 = zext(YmmReg1); } # VPBROADCAST 5-332 PAGE 2156 LINE 110843 :VBROADCASTI128 YmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x5A; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local val:16 = XmmReg2_m128; YmmReg1[0,128] = val; YmmReg1[128,128] = val; ZmmReg1 = zext(YmmReg1); } # VPERM2I128 5-360 PAGE 2184 LINE 112312 define pcodeop vperm2i128_avx2 ; :VPERM2I128 YmmReg1, vexVVVV_YmmReg, YmmReg2_m256, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x46; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vperm2i128_avx2( vexVVVV_YmmReg, YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # VPERMD/VPERMW 5-362 PAGE 2186 LINE 112405 define pcodeop vpermd_avx2 ; :VPERMD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x36; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpermd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VPERMPD 5-381 PAGE 2205 LINE 113452 define pcodeop vpermpd_avx2 ; :VPERMPD YmmReg1, YmmReg2_m256, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1); byte=0x01; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vpermpd_avx2( YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # VPERMPS 5-384 PAGE 2208 LINE 113633 define pcodeop vpermps_avx2 ; :VPERMPS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x16; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpermps_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VPERMQ 5-387 PAGE 2211 LINE 113768 define pcodeop vpermq_avx2 ; :VPERMQ YmmReg1, YmmReg2_m256, imm8 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1); byte=0x00; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 { local tmp:32 = vpermq_avx2( YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # VPMASKMOV 5-397 PAGE 2221 LINE 114262 define pcodeop vpmaskmovd_avx2 ; :VPMASKMOVD XmmReg1, vexVVVV_XmmReg, m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x8C; (XmmReg1 & ZmmReg1) ... & m128 { local tmp:16 = vpmaskmovd_avx2( vexVVVV_XmmReg, m128 ); ZmmReg1 = zext(tmp); } # VPMASKMOV 5-397 PAGE 2221 LINE 114264 :VPMASKMOVD YmmReg1, vexVVVV_YmmReg, m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x8C; (YmmReg1 & ZmmReg1) ... & m256 { local tmp:32 = vpmaskmovd_avx2( vexVVVV_YmmReg, m256 ); ZmmReg1 = zext(tmp); } # VPMASKMOV 5-397 PAGE 2221 LINE 114266 define pcodeop vpmaskmovq_avx2 ; :VPMASKMOVQ XmmReg1, vexVVVV_XmmReg, m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x8C; (XmmReg1 & ZmmReg1) ... & m128 { local tmp:16 = vpmaskmovq_avx2( vexVVVV_XmmReg, m128 ); ZmmReg1 = zext(tmp); } # VPMASKMOV 5-397 PAGE 2221 LINE 114268 :VPMASKMOVQ YmmReg1, vexVVVV_YmmReg, m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x8C; (YmmReg1 & ZmmReg1) ... & m256 { local tmp:32 = vpmaskmovq_avx2( vexVVVV_YmmReg, m256 ); ZmmReg1 = zext(tmp); } # VPMASKMOV 5-397 PAGE 2221 LINE 114270 :VPMASKMOVD m128, vexVVVV_XmmReg, XmmReg1 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x8E; XmmReg1 ... & m128 { m128 = vpmaskmovd_avx2( vexVVVV_XmmReg, XmmReg1 ); } # VPMASKMOV 5-397 PAGE 2221 LINE 114272 :VPMASKMOVD m256, vexVVVV_YmmReg, YmmReg1 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x8E; YmmReg1 ... & m256 { m256 = vpmaskmovd_avx2( vexVVVV_YmmReg, YmmReg1 ); } # VPMASKMOV 5-397 PAGE 2221 LINE 114274 :VPMASKMOVQ m128, vexVVVV_XmmReg, XmmReg1 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x8E; XmmReg1 ... & m128 { m128 = vpmaskmovq_avx2( vexVVVV_XmmReg, XmmReg1 ); } # VPMASKMOV 5-397 PAGE 2221 LINE 114276 :VPMASKMOVQ m256, vexVVVV_YmmReg, YmmReg1 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x8E; YmmReg1 ... & m256 { m256 = vpmaskmovq_avx2( vexVVVV_YmmReg, YmmReg1 ); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116620 define pcodeop vpsllvd_avx2 ; :VPSLLVD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x47; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsllvd_avx2( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116623 define pcodeop vpsllvq_avx2 ; :VPSLLVQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x47; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsllvq_avx2( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116626 :VPSLLVD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x47; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsllvd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116629 :VPSLLVQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x47; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsllvq_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116874 define pcodeop vpsravd_avx2 ; :VPSRAVD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x46; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsravd_avx2( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116877 :VPSRAVD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x46; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsravd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117139 define pcodeop vpsrlvd_avx2 ; :VPSRLVD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x45; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsrlvd_avx2( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117142 define pcodeop vpsrlvq_avx2 ; :VPSRLVQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x45; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vpsrlvq_avx2( vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117145 :VPSRLVD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x45; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsrlvd_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117148 :VPSRLVQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x45; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:32 = vpsrlvq_avx2( vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } ================================================ FILE: pypcode/processors/x86/data/languages/avx2_manual.sinc ================================================ # VINSERTI128/VINSERTI32x4/VINSERTI64x2/VINSERTI32x8/VINSERTI64x4 5-314 PAGE 2138 LINE 109785 define pcodeop vinserti128 ; :VINSERTI128 YmmReg1, vexVVVV_YmmReg, XmmReg2_m128, imm8 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x38; YmmReg1 ... & XmmReg2_m128; imm8 & imm8_0 { local tmp:16 = XmmReg2_m128; local cond0:16 = zext((imm8_0:1 & 0x1) == 0); local cond1:16 = zext((imm8_0:1 & 0x1) == 1); # ignoring all but the least significant bit YmmReg1[0,128] = (cond0 * tmp) + (cond1 * vexVVVV_YmmReg[0,128]); YmmReg1[128,128] = (cond0 * vexVVVV_YmmReg[128,128]) + (cond1 * tmp); } # VGATHERDPD/VGATHERQPD 5-251 PAGE 2075 LINE 106903 define pcodeop vgatherdpd ; :VGATHERDPD XmmReg1, q_vm32x, vexVVVV_XmmReg is $(VEX_DDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x92; (XmmReg1 & ZmmReg1) ... & q_vm32x { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # XmmReg1 = vgatherdpd(XmmReg1, q_vm32x, vexVVVV_XmmReg); local tmp:16 = vgatherdpd(XmmReg1, vexVVVV_XmmReg); ZmmReg1 = zext(tmp); vexVVVV_XmmReg = 0; } # VGATHERDPD/VGATHERQPD 5-251 PAGE 2075 LINE 106908 @ifdef IA64 define pcodeop vgatherqpd ; :VGATHERQPD XmmReg1, q_vm64x, vexVVVV_XmmReg is $(LONGMODE_ON) & $(VEX_DDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x93; (XmmReg1 & ZmmReg1) ... & q_vm64x { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # XmmReg1 = vgatherqpd(XmmReg1, q_vm64x, vexVVVV_XmmReg); local tmp:16 = vgatherqpd(XmmReg1, vexVVVV_XmmReg); ZmmReg1 = zext(tmp); vexVVVV_XmmReg = 0; } @endif # VGATHERDPD/VGATHERQPD 5-251 PAGE 2075 LINE 106913 :VGATHERDPD YmmReg1, q_vm32x, vexVVVV_YmmReg is $(VEX_DDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x92; (YmmReg1 & ZmmReg1) ... & q_vm32x { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # YmmReg1 = vgatherdpd(YmmReg1, q_vm32x, vexVVVV_YmmReg); YmmReg1 = vgatherdpd(YmmReg1, vexVVVV_YmmReg); ZmmReg1 = zext(YmmReg1); vexVVVV_YmmReg = 0; } # VGATHERDPD/VGATHERQPD 5-251 PAGE 2075 LINE 106918 @ifdef IA64 :VGATHERQPD YmmReg1, q_vm64y, vexVVVV_YmmReg is $(LONGMODE_ON) & $(VEX_DDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x93; (YmmReg1 & ZmmReg1) ... & q_vm64y { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # YmmReg1 = vgatherqpd(YmmReg1, q_vm64y, vexVVVV_YmmReg); YmmReg1 = vgatherqpd(YmmReg1, vexVVVV_YmmReg); ZmmReg1 = zext(YmmReg1); vexVVVV_YmmReg = 0; } @endif # VGATHERDPS/VGATHERQPS 5-256 PAGE 2080 LINE 107130 define pcodeop vgatherdps ; :VGATHERDPS XmmReg1, d_vm32x, vexVVVV_XmmReg is $(VEX_DDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x92; (XmmReg1 & ZmmReg1) ... & d_vm32x { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # XmmReg1 = vgatherdps(XmmReg1, d_vm32x, vexVVVV_XmmReg); local tmp:16 = vgatherdps(XmmReg1, vexVVVV_XmmReg); ZmmReg1 = zext(tmp); vexVVVV_XmmReg = 0; } # VGATHERDPS/VGATHERQPS 5-256 PAGE 2080 LINE 107135 @ifdef IA64 define pcodeop vgatherqps ; :VGATHERQPS XmmReg1, d_vm64x, vexVVVV_XmmReg is $(LONGMODE_ON) & $(VEX_DDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x93; (XmmReg1 & ZmmReg1) ... & d_vm64x { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # XmmReg1 = vgatherqps(XmmReg1, d_vm64x, vexVVVV_XmmReg); local tmp:16 = vgatherqps(XmmReg1, vexVVVV_XmmReg); ZmmReg1 = zext(tmp); vexVVVV_XmmReg = 0; } @endif # VGATHERDPS/VGATHERQPS 5-256 PAGE 2080 LINE 107140 :VGATHERDPS YmmReg1, d_vm32y, vexVVVV_YmmReg is $(VEX_DDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x92; (YmmReg1 & ZmmReg1) ... & d_vm32y { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # YmmReg1 = vgatherdps(YmmReg1, d_vm32y, vexVVVV_YmmReg); YmmReg1 = vgatherdps(YmmReg1, vexVVVV_YmmReg); ZmmReg1 = zext(YmmReg1); vexVVVV_YmmReg = 0; } # VGATHERDPS/VGATHERQPS 5-256 PAGE 2080 LINE 107145 @ifdef IA64 :VGATHERQPS XmmReg1, d_vm64y, vexVVVV_XmmReg is $(LONGMODE_ON) & $(VEX_DDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x93; (XmmReg1 & ZmmReg1) ... & d_vm64y { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # XmmReg1 = vgatherqps(XmmReg1, d_vm64y, vexVVVV_XmmReg); XmmReg1 = vgatherqps(XmmReg1, vexVVVV_XmmReg); ZmmReg1 = zext(XmmReg1); vexVVVV_XmmReg = 0; } @endif # PCMPEQQ 4-250 PAGE 1370 LINE 71171 :VPCMPEQQ YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_YmmReg; byte=0x29; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local mask:8 = 0xffffffffffffffff; YmmReg1[0,64] = zext(vexVVVV_YmmReg[0,64] == YmmReg2_m256[0,64]) * mask; YmmReg1[64,64] = zext(vexVVVV_YmmReg[64,64] == YmmReg2_m256[64,64]) * mask; YmmReg1[128,64] = zext(vexVVVV_YmmReg[128,64] == YmmReg2_m256[128,64]) * mask; YmmReg1[192,64] = zext(vexVVVV_YmmReg[192,64] == YmmReg2_m256[192,64]) * mask; ZmmReg1 = zext(YmmReg1); } # VPGATHERDD/VPGATHERQD 5-273 PAGE 2097 LINE 107884 define pcodeop vpgatherdd ; :VPGATHERDD XmmReg1, d_vm32x, vexVVVV_XmmReg is $(VEX_DDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x90; (XmmReg1 & ZmmReg1) ... & d_vm32x { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # XmmReg1 = vpgatherdd(XmmReg1, d_vm32x, vexVVVV_XmmReg); local tmp:16 = vpgatherdd(XmmReg1, vexVVVV_XmmReg); ZmmReg1 = zext(tmp); vexVVVV_XmmReg = 0; } # VPGATHERDD/VPGATHERQD 5-273 PAGE 2097 LINE 107888 @ifdef IA64 define pcodeop vpgatherqd ; :VPGATHERQD XmmReg1, d_vm64x, vexVVVV_XmmReg is $(LONGMODE_ON) & $(VEX_DDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x91; (XmmReg1 & ZmmReg1) ... & d_vm64x { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # XmmReg1 = vpgatherqd(XmmReg1, d_vm64x, vexVVVV_XmmReg); local tmp:16 = vpgatherqd(XmmReg1, vexVVVV_XmmReg); ZmmReg1 = zext(tmp); vexVVVV_XmmReg = 0; } @endif # VPGATHERDD/VPGATHERQD 5-273 PAGE 2097 LINE 107892 :VPGATHERDD YmmReg1, d_vm32y, vexVVVV_YmmReg is $(VEX_DDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x90; (YmmReg1 & ZmmReg1) ... & d_vm32y { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # YmmReg1 = vpgatherdd(YmmReg1, d_vm32y, vexVVVV_YmmReg); YmmReg1 = vpgatherdd(YmmReg1, vexVVVV_YmmReg); ZmmReg1 = zext(YmmReg1); vexVVVV_YmmReg = 0; } # VPGATHERDD/VPGATHERQD 5-273 PAGE 2097 LINE 107896 @ifdef IA64 :VPGATHERQD XmmReg1, d_vm64y, vexVVVV_XmmReg is $(LONGMODE_ON) & $(VEX_DDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x91; (XmmReg1 & ZmmReg1) ... & d_vm64y { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # XmmReg1 = vpgatherqd(XmmReg1, d_vm64y, vexVVVV_XmmReg); local tmp:16 = vpgatherqd(XmmReg1, vexVVVV_XmmReg); ZmmReg1 = zext(tmp); vexVVVV_XmmReg = 0; } @endif # VPGATHERDQ/VPGATHERQQ 5-280 PAGE 2104 LINE 108234 define pcodeop vpgatherdq ; :VPGATHERDQ XmmReg1, q_vm32x, vexVVVV_XmmReg is $(VEX_DDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x90; (XmmReg1 & ZmmReg1) ... & q_vm32x { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # XmmReg1 = vpgatherdq(XmmReg1, q_vm32x, vexVVVV_XmmReg); local tmp:16 = vpgatherdq(XmmReg1, vexVVVV_XmmReg); ZmmReg1 = zext(tmp); vexVVVV_XmmReg = 0; } # VPGATHERDQ/VPGATHERQQ 5-280 PAGE 2104 LINE 108238 @ifdef IA64 define pcodeop vpgatherqq ; :VPGATHERQQ XmmReg1, q_vm64x, vexVVVV_XmmReg is $(LONGMODE_ON) & $(VEX_DDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x91; (XmmReg1 & ZmmReg1) ... & q_vm64x { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # XmmReg1 = vpgatherqq(XmmReg1, q_vm64x, vexVVVV_XmmReg); local tmp:16 = vpgatherqq(XmmReg1, vexVVVV_XmmReg); ZmmReg1 = zext(tmp); vexVVVV_XmmReg = 0; } @endif # VPGATHERDQ/VPGATHERQQ 5-280 PAGE 2104 LINE 108242 :VPGATHERDQ YmmReg1, q_vm32x, vexVVVV_YmmReg is $(VEX_DDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x90; (YmmReg1 & ZmmReg1) ... & q_vm32x { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # YmmReg1 = vpgatherdq(YmmReg1, q_vm32x, vexVVVV_YmmReg); YmmReg1 = vpgatherdq(YmmReg1, vexVVVV_YmmReg); ZmmReg1 = zext(YmmReg1); vexVVVV_YmmReg = 0; } # VPGATHERDQ/VPGATHERQQ 5-280 PAGE 2104 LINE 108246 @ifdef IA64 :VPGATHERQQ YmmReg1, q_vm64y, vexVVVV_YmmReg is $(LONGMODE_ON) & $(VEX_DDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x91; (YmmReg1 & ZmmReg1) ... & q_vm64y { # TODO full semantics necessary for VSIB memory data access, leave out of data flow for now # YmmReg1 = vpgatherqq(YmmReg1, q_vm64y, vexVVVV_YmmReg); YmmReg1 = vpgatherqq(YmmReg1, vexVVVV_YmmReg); ZmmReg1 = zext(YmmReg1); vexVVVV_YmmReg = 0; } @endif # PMOVMSKB 4-338 PAGE 1458 LINE 75655 :VPMOVMSKB Reg32, YmmReg2 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xD7; Reg32 & (mod=0x3 & YmmReg2) & check_Reg32_dest { local byte_mask:4 = 0:4; byte_mask[0,1] = YmmReg2[7,1]; byte_mask[1,1] = YmmReg2[15,1]; byte_mask[2,1] = YmmReg2[23,1]; byte_mask[3,1] = YmmReg2[31,1]; byte_mask[4,1] = YmmReg2[39,1]; byte_mask[5,1] = YmmReg2[47,1]; byte_mask[6,1] = YmmReg2[55,1]; byte_mask[7,1] = YmmReg2[63,1]; byte_mask[8,1] = YmmReg2[71,1]; byte_mask[9,1] = YmmReg2[79,1]; byte_mask[10,1] = YmmReg2[87,1]; byte_mask[11,1] = YmmReg2[95,1]; byte_mask[12,1] = YmmReg2[103,1]; byte_mask[13,1] = YmmReg2[111,1]; byte_mask[14,1] = YmmReg2[119,1]; byte_mask[15,1] = YmmReg2[127,1]; byte_mask[16,1] = YmmReg2[135,1]; byte_mask[17,1] = YmmReg2[143,1]; byte_mask[18,1] = YmmReg2[151,1]; byte_mask[19,1] = YmmReg2[159,1]; byte_mask[20,1] = YmmReg2[167,1]; byte_mask[21,1] = YmmReg2[175,1]; byte_mask[22,1] = YmmReg2[183,1]; byte_mask[23,1] = YmmReg2[191,1]; byte_mask[24,1] = YmmReg2[199,1]; byte_mask[25,1] = YmmReg2[207,1]; byte_mask[26,1] = YmmReg2[215,1]; byte_mask[27,1] = YmmReg2[223,1]; byte_mask[28,1] = YmmReg2[231,1]; byte_mask[29,1] = YmmReg2[239,1]; byte_mask[30,1] = YmmReg2[247,1]; byte_mask[31,1] = YmmReg2[255,1]; Reg32 = zext(byte_mask); build check_Reg32_dest; } ================================================ FILE: pypcode/processors/x86/data/languages/avx512.sinc ================================================ # INFO This file automatically generated by andre on Wed May 8 15:10:16 2024 # INFO Direct edits to this file may be lost in future updates # INFO Command line arguments: ['--cpuid-match', 'AVX512', '--sinc', '--skip-sinc', '../../../../../../../ghidra/Ghidra/Processors/x86/data/languages/avx512_manual.sinc'] # ADDPD 3-33 PAGE 603 LINE 33411 define pcodeop vaddpd_avx512vl ; :VADDPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vaddpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # ADDPD 3-33 PAGE 603 LINE 33414 :VADDPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x58; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vaddpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # ADDPD 3-33 PAGE 603 LINE 33417 define pcodeop vaddpd_avx512f ; :VADDPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x58; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vaddpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # ADDPS 3-36 PAGE 606 LINE 33562 define pcodeop vaddps_avx512vl ; :VADDPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vaddps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # ADDPS 3-36 PAGE 606 LINE 33565 :VADDPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x58; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vaddps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # ADDPS 3-36 PAGE 606 LINE 33568 define pcodeop vaddps_avx512f ; :VADDPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x58; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vaddps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # ADDSD 3-39 PAGE 609 LINE 33721 define pcodeop vaddsd_avx512f ; :VADDSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vaddsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # ADDSS 3-41 PAGE 611 LINE 33815 define pcodeop vaddss_avx512f ; :VADDSS XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vaddss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # ANDPD 3-64 PAGE 634 LINE 34827 define pcodeop vandpd_avx512vl ; :VANDPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vandpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # ANDPD 3-64 PAGE 634 LINE 34830 :VANDPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vandpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # ANDPD 3-64 PAGE 634 LINE 34833 define pcodeop vandpd_avx512dq ; :VANDPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x54; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vandpd_avx512dq( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # ANDPS 3-67 PAGE 637 LINE 34953 define pcodeop vandps_avx512vl ; :VANDPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vandps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # ANDPS 3-67 PAGE 637 LINE 34956 :VANDPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp:32 = vandps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # ANDPS 3-67 PAGE 637 LINE 34959 define pcodeop vandps_avx512dq ; :VANDPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x54; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vandps_avx512dq( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # ANDNPD 3-70 PAGE 640 LINE 35087 define pcodeop vandnpd_avx512vl ; :VANDNPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vandnpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # ANDNPD 3-70 PAGE 640 LINE 35090 :VANDNPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x55; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vandnpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # ANDNPD 3-70 PAGE 640 LINE 35093 define pcodeop vandnpd_avx512dq ; :VANDNPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x55; (ZmmReg1 & ZmmOpMask & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vandnpd_avx512dq( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # ANDNPS 3-73 PAGE 643 LINE 35213 define pcodeop vandnps_avx512vl ; :VANDNPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vandnps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # ANDNPS 3-73 PAGE 643 LINE 35216 :VANDNPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x55; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vandnps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # ANDNPS 3-73 PAGE 643 LINE 35219 define pcodeop vandnps_avx512dq ; :VANDNPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x55; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vandnps_avx512dq( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # CMPPD 3-155 PAGE 725 LINE 39246 define pcodeop vcmppd_avx512vl ; :^VCMPPD_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst^VCMPPD_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m128_m64bcst; VCMPPD_mon & VCMPPD_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vcmppd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, VCMPPD_op ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # CMPPD 3-155 PAGE 725 LINE 39250 :^VCMPPD_mon KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst^VCMPPD_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0xC2; KReg_reg ... & YmmReg2_m256_m64bcst; VCMPPD_mon & VCMPPD_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vcmppd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, VCMPPD_op ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # CMPPD 3-155 PAGE 725 LINE 39254 define pcodeop vcmppd_avx512f ; :^VCMPPD_mon KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m64bcst^VCMPPD_op is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0xC2; KReg_reg ... & ZmmReg2_m512_m64bcst; VCMPPD_mon & VCMPPD_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vcmppd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, VCMPPD_op ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # CMPPS 3-162 PAGE 732 LINE 39613 define pcodeop vcmpps_avx512vl ; :^VCMPPS_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst^VCMPPS_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m128_m32bcst; VCMPPS_mon & VCMPPS_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vcmpps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst, VCMPPS_op ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # CMPPS 3-162 PAGE 732 LINE 39617 :^VCMPPS_mon KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst^VCMPPS_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0xC2; KReg_reg ... & YmmReg2_m256_m32bcst; VCMPPS_mon & VCMPPS_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vcmpps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, VCMPPS_op ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # CMPPS 3-162 PAGE 732 LINE 39621 define pcodeop vcmpps_avx512f ; :^VCMPPS_mon KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m32bcst^VCMPPS_op is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0xC2; KReg_reg ... & ZmmReg2_m512_m32bcst; VCMPPS_mon & VCMPPS_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vcmpps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, VCMPPS_op ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # CMPSD 3-173 PAGE 743 LINE 40157 define pcodeop vcmpsd_avx512f ; :^VCMPSD_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m64^VCMPSD_op is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m64; VCMPSD_mon & VCMPSD_op [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp = vcmpsd_avx512f( evexV5_XmmReg, XmmReg2_m64, VCMPSD_op ); KReg_reg = zext(AVXOpMask[0,1]) & tmp; } # CMPSS 3-177 PAGE 747 LINE 40393 define pcodeop vcmpss_avx512f ; :^VCMPSS_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m32^VCMPSS_op is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m32; VCMPSS_mon & VCMPSS_op [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp = vcmpss_avx512f( evexV5_XmmReg, XmmReg2_m32, VCMPSS_op ); KReg_reg = zext(AVXOpMask[0,1]) & tmp; } # COMISD 3-186 PAGE 756 LINE 40863 define pcodeop vcomisd_avx512f ; :VCOMISD XmmReg1, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x2F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vcomisd_avx512f( XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # COMISS 3-188 PAGE 758 LINE 40941 define pcodeop vcomiss_avx512f ; :VCOMISS XmmReg1, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x2F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vcomiss_avx512f( XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # CVTDQ2PD 3-228 PAGE 798 LINE 43080 define pcodeop vcvtdq2pd_avx512vl ; :VCVTDQ2PD XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0xE6; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexBType=1; ] # (TupleType HV) { XmmResult = vcvtdq2pd_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # CVTDQ2PD 3-228 PAGE 798 LINE 43083 :VCVTDQ2PD YmmReg1^YmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0xE6; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { YmmResult = vcvtdq2pd_avx512vl( XmmReg2_m128_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # CVTDQ2PD 3-228 PAGE 798 LINE 43086 define pcodeop vcvtdq2pd_avx512f ; :VCVTDQ2PD ZmmReg1^ZmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0xE6; (ZmmReg1 & ZmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexBType=1; ] # (TupleType HV) { ZmmResult = vcvtdq2pd_avx512f( YmmReg2_m256_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # CVTDQ2PS 3-232 PAGE 802 LINE 43248 define pcodeop vcvtdq2ps_avx512vl ; :VCVTDQ2PS XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x5B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtdq2ps_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # CVTDQ2PS 3-232 PAGE 802 LINE 43251 :VCVTDQ2PS YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x5B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtdq2ps_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # CVTDQ2PS 3-232 PAGE 802 LINE 43254 define pcodeop vcvtdq2ps_avx512f ; :VCVTDQ2PS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x5B; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vcvtdq2ps_avx512f( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # CVTPD2DQ 3-235 PAGE 805 LINE 43414 define pcodeop vcvtpd2dq_avx512vl ; :VCVTPD2DQ XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0xE6; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtpd2dq_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # CVTPD2DQ 3-235 PAGE 805 LINE 43417 :VCVTPD2DQ YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0xE6; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtpd2dq_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # CVTPD2DQ 3-235 PAGE 805 LINE 43420 define pcodeop vcvtpd2dq_avx512f ; :VCVTPD2DQ YmmReg1^YmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0xE6; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtpd2dq_avx512f( ZmmReg2_m512_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # CVTPD2PS 3-240 PAGE 810 LINE 43649 define pcodeop vcvtpd2ps_avx512vl ; :VCVTPD2PS XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x5A; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtpd2ps_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # CVTPD2PS 3-240 PAGE 810 LINE 43653 :VCVTPD2PS XmmReg1^XmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x5A; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtpd2ps_avx512vl( YmmReg2_m256_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # CVTPD2PS 3-240 PAGE 810 LINE 43657 define pcodeop vcvtpd2ps_avx512f ; :VCVTPD2PS YmmReg1^YmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x5A; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtpd2ps_avx512f( ZmmReg2_m512_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # CVTPS2DQ 3-246 PAGE 816 LINE 43933 define pcodeop vcvtps2dq_avx512vl ; :VCVTPS2DQ XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x5B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtps2dq_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # CVTPS2DQ 3-246 PAGE 816 LINE 43936 :VCVTPS2DQ YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x5B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtps2dq_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # CVTPS2DQ 3-246 PAGE 816 LINE 43939 define pcodeop vcvtps2dq_avx512f ; :VCVTPS2DQ ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x5B; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vcvtps2dq_avx512f( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # CVTPS2PD 3-249 PAGE 819 LINE 44104 define pcodeop vcvtps2pd_avx512vl ; :VCVTPS2PD XmmReg1^XmmOpMask64, XmmReg2_m64_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x5A; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { XmmResult = vcvtps2pd_avx512vl( XmmReg2_m64_m32bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # CVTPS2PD 3-249 PAGE 819 LINE 44107 :VCVTPS2PD YmmReg1^YmmOpMask64, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x5A; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { YmmResult = vcvtps2pd_avx512vl( XmmReg2_m128_m32bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # CVTPS2PD 3-249 PAGE 819 LINE 44110 define pcodeop vcvtps2pd_avx512f ; :VCVTPS2PD ZmmReg1^ZmmOpMask64, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x5A; (ZmmReg1 & ZmmOpMask64) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { ZmmResult = vcvtps2pd_avx512f( YmmReg2_m256_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # CVTSD2SI 3-253 PAGE 823 LINE 44320 define pcodeop vcvtsd2si_avx512f ; :VCVTSD2SI Reg32, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x2D; Reg32 ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg32 = vcvtsd2si_avx512f( XmmReg2_m64 ); # TODO Reg64 = zext(Reg32) } # CVTSD2SI 3-253 PAGE 823 LINE 44322 @ifdef IA64 :VCVTSD2SI Reg64, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x2D; Reg64 ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg64 = vcvtsd2si_avx512f( XmmReg2_m64 ); } @endif # CVTSD2SS 3-255 PAGE 825 LINE 44417 define pcodeop vcvtsd2ss_avx512f ; :VCVTSD2SS XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1 & XmmOpMask) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vcvtsd2ss_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask; XmmResult[0,32] = (zext(XmmOpMask[0,1]) * XmmResult[0,32]) + (zext(!XmmOpMask[0,1]) * XmmMask[0,32]); ZmmReg1 = zext(XmmResult); } # CVTSI2SD 3-257 PAGE 827 LINE 44522 define pcodeop vcvtsi2sd_avx512f ; :VCVTSI2SD XmmReg1, evexV5_XmmReg, rm32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vcvtsi2sd_avx512f( evexV5_XmmReg, rm32 ); ZmmReg1 = zext(tmp); } # CVTSI2SD 3-257 PAGE 827 LINE 44525 @ifdef IA64 :VCVTSI2SD XmmReg1, evexV5_XmmReg, rm64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vcvtsi2sd_avx512f( evexV5_XmmReg, rm64 ); ZmmReg1 = zext(tmp); } @endif # CVTSI2SS 3-259 PAGE 829 LINE 44636 define pcodeop vcvtsi2ss_avx512f ; :VCVTSI2SS XmmReg1, evexV5_XmmReg, rm32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vcvtsi2ss_avx512f( evexV5_XmmReg, rm32 ); ZmmReg1 = zext(tmp); } # CVTSI2SS 3-259 PAGE 829 LINE 44638 @ifdef IA64 :VCVTSI2SS XmmReg1, evexV5_XmmReg, rm64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vcvtsi2ss_avx512f( evexV5_XmmReg, rm64 ); ZmmReg1 = zext(tmp); } @endif # CVTSS2SD 3-261 PAGE 831 LINE 44747 define pcodeop vcvtss2sd_avx512f ; :VCVTSS2SD XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1 & XmmOpMask) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vcvtss2sd_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask; XmmResult[0,64] = (zext(XmmOpMask[0,1]) * XmmResult[0,64]) + (zext(!XmmOpMask[0,1]) * XmmMask[0,64]); ZmmReg1 = zext(XmmResult); } # CVTSS2SI 3-263 PAGE 833 LINE 44839 define pcodeop vcvtss2si_avx512f ; :VCVTSS2SI Reg32, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x2D; Reg32 ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg32 = vcvtss2si_avx512f( XmmReg2_m32 ); # TODO Reg64 = zext(Reg32) } # CVTSS2SI 3-263 PAGE 833 LINE 44841 @ifdef IA64 :VCVTSS2SI Reg64, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x2D; Reg64 ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg64 = vcvtss2si_avx512f( XmmReg2_m32 ); } @endif # CVTTPD2DQ 3-265 PAGE 835 LINE 44936 define pcodeop vcvttpd2dq_avx512vl ; :VCVTTPD2DQ XmmReg1^XmmOpMask32, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0xE6; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvttpd2dq_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # CVTTPD2DQ 3-265 PAGE 835 LINE 44940 :VCVTTPD2DQ XmmReg1^XmmOpMask32, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0xE6; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvttpd2dq_avx512vl( YmmReg2_m256_m64bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # CVTTPD2DQ 3-265 PAGE 835 LINE 44944 define pcodeop vcvttpd2dq_avx512f ; :VCVTTPD2DQ YmmReg1^YmmOpMask32, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0xE6; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvttpd2dq_avx512f( ZmmReg2_m512_m64bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # CVTTPS2DQ 3-270 PAGE 840 LINE 45169 define pcodeop vcvttps2dq_avx512vl ; :VCVTTPS2DQ XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x5B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvttps2dq_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # CVTTPS2DQ 3-270 PAGE 840 LINE 45173 :VCVTTPS2DQ YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x5B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvttps2dq_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # CVTTPS2DQ 3-270 PAGE 840 LINE 45177 define pcodeop vcvttps2dq_avx512f ; :VCVTTPS2DQ ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x5B; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vcvttps2dq_avx512f( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # CVTTSD2SI 3-274 PAGE 844 LINE 45385 define pcodeop vcvttsd2si_avx512f ; :VCVTTSD2SI Reg32, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x2C; Reg32 ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg32 = vcvttsd2si_avx512f( XmmReg2_m64 ); # TODO Reg64 = zext(Reg32) } # CVTTSD2SI 3-274 PAGE 844 LINE 45388 @ifdef IA64 :VCVTTSD2SI Reg64, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x2C; Reg64 ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg64 = vcvttsd2si_avx512f( XmmReg2_m64 ); } @endif # CVTTSS2SI 3-276 PAGE 846 LINE 45479 define pcodeop vcvttss2si_avx512f ; :VCVTTSS2SI Reg32, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x2C; Reg32 ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg32 = vcvttss2si_avx512f( XmmReg2_m32 ); # TODO Reg64 = zext(Reg32) } # CVTTSS2SI 3-276 PAGE 846 LINE 45482 @ifdef IA64 :VCVTTSS2SI Reg64, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x2C; Reg64 ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg64 = vcvttss2si_avx512f( XmmReg2_m32 ); } @endif # DIVPD 3-288 PAGE 858 LINE 46029 define pcodeop vdivpd_avx512vl ; :VDIVPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vdivpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # DIVPD 3-288 PAGE 858 LINE 46033 :VDIVPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x5E; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vdivpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # DIVPD 3-288 PAGE 858 LINE 46037 define pcodeop vdivpd_avx512f ; :VDIVPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x5E; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vdivpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # DIVPS 3-291 PAGE 861 LINE 46170 define pcodeop vdivps_avx512vl ; :VDIVPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vdivps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # DIVPS 3-291 PAGE 861 LINE 46174 :VDIVPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x5E; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vdivps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # DIVPS 3-291 PAGE 861 LINE 46178 define pcodeop vdivps_avx512f ; :VDIVPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x5E; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vdivps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # DIVSD 3-294 PAGE 864 LINE 46315 define pcodeop vdivsd_avx512f ; :VDIVSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vdivsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # DIVSS 3-296 PAGE 866 LINE 46413 define pcodeop vdivss_avx512f ; :VDIVSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vdivss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # EXTRACTPS 3-307 PAGE 877 LINE 46983 define pcodeop vextractps_avx512f ; :VEXTRACTPS rm32, XmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x17; XmmReg1 ... & rm32; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { rm32 = vextractps_avx512f( XmmReg1, imm8:1 ); } # INSERTPS 3-454 PAGE 1024 LINE 53785 define pcodeop vinsertps_avx512f ; :VINSERTPS XmmReg1, evexV5_XmmReg, XmmReg2_m32, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x21; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vinsertps_avx512f( evexV5_XmmReg, XmmReg2_m32, imm8:1 ); ZmmReg1 = zext(tmp); } # MAXPD 4-12 PAGE 1132 LINE 59206 define pcodeop vmaxpd_avx512vl ; :VMAXPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vmaxpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MAXPD 4-12 PAGE 1132 LINE 59210 :VMAXPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x5F; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vmaxpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # MAXPD 4-12 PAGE 1132 LINE 59214 define pcodeop vmaxpd_avx512f ; :VMAXPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x5F; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vmaxpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # MAXPS 4-15 PAGE 1135 LINE 59356 define pcodeop vmaxps_avx512vl ; :VMAXPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vmaxps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MAXPS 4-15 PAGE 1135 LINE 59359 :VMAXPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x5F; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vmaxps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # MAXPS 4-15 PAGE 1135 LINE 59362 define pcodeop vmaxps_avx512f ; :VMAXPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x5F; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vmaxps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # MAXSD 4-18 PAGE 1138 LINE 59506 define pcodeop vmaxsd_avx512f ; :VMAXSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vmaxsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MAXSS 4-20 PAGE 1140 LINE 59609 define pcodeop vmaxss_avx512f ; :VMAXSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vmaxss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MINPD 4-23 PAGE 1143 LINE 59771 define pcodeop vminpd_avx512vl ; :VMINPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vminpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MINPD 4-23 PAGE 1143 LINE 59774 :VMINPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x5D; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vminpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # MINPD 4-23 PAGE 1143 LINE 59777 define pcodeop vminpd_avx512f ; :VMINPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x5D; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vminpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # MINPS 4-26 PAGE 1146 LINE 59915 define pcodeop vminps_avx512vl ; :VMINPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vminps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MINPS 4-26 PAGE 1146 LINE 59918 :VMINPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x5D; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vminps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # MINPS 4-26 PAGE 1146 LINE 59921 define pcodeop vminps_avx512f ; :VMINPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x5D; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vminps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # MINSD 4-29 PAGE 1149 LINE 60063 define pcodeop vminsd_avx512f ; :VMINSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vminsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MINSS 4-31 PAGE 1151 LINE 60166 define pcodeop vminss_avx512f ; :VMINSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vminss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MOVAPD 4-45 PAGE 1165 LINE 60852 :VMOVAPD XmmReg1^XmmOpMask64, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x28; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { XmmResult = XmmReg2_m128 ; XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MOVAPD 4-45 PAGE 1165 LINE 60855 :VMOVAPD YmmReg1^YmmOpMask64, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x28; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { YmmResult = YmmReg2_m256; YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # MOVAPD 4-45 PAGE 1165 LINE 60858 define pcodeop vmovapd_avx512f ; :VMOVAPD ZmmReg1^ZmmOpMask64, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x28; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { ZmmResult = ZmmReg2_m512; ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } :VMOVAPD XmmReg2^XmmOpMask64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & XmmOpMask64; byte=0x29; XmmReg1 & mod=3 & XmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmResult = XmmReg1 ; XmmMask = XmmReg2; build XmmOpMask64; ZmmReg2 = zext(XmmResult); } :VMOVAPD m128^XmmOpMask64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & XmmOpMask64; byte=0x29; XmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmResult = XmmReg1 ; XmmMask = m128; build XmmOpMask64; m128 = XmmResult; } # MOVAPD 4-45 PAGE 1165 LINE 60864 :VMOVAPD YmmReg2^YmmOpMask64, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & YmmOpMask64; byte=0x29; YmmReg1 & mod=3 & YmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmResult = YmmReg1 ; YmmMask = YmmReg2; build YmmOpMask64; ZmmReg2 = zext(YmmResult); } :VMOVAPD m256 YmmOpMask64, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & YmmOpMask64; byte=0x29; YmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmResult = YmmReg1 ; YmmMask = m256; build YmmOpMask64; m256 = YmmResult; } # MOVAPD 4-45 PAGE 1165 LINE 60867 :VMOVAPD ZmmReg2^ZmmOpMask64, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & ZmmOpMask64; byte=0x29; ZmmReg1 & mod=3 & ZmmReg2 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { ZmmResult = ZmmReg1 ; ZmmMask = ZmmReg2; build ZmmOpMask64; ZmmReg2 = ZmmResult; } :VMOVAPD m512 ZmmOpMask64, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & ZmmOpMask64; byte=0x29; ZmmReg1 ... & m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { ZmmResult = ZmmReg1 ; ZmmMask = m512; build ZmmOpMask64; m512 = ZmmResult; } # MOVAPS 4-49 PAGE 1169 LINE 61047 :VMOVAPS XmmReg1^XmmOpMask32, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x28; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { XmmResult = XmmReg2_m128; XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MOVAPS 4-49 PAGE 1169 LINE 61050 :VMOVAPS YmmReg1^YmmOpMask32, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x28; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { YmmResult = YmmReg2_m256; YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # MOVAPS 4-49 PAGE 1169 LINE 61053 define pcodeop vmovaps_avx512f ; :VMOVAPS ZmmReg1^ZmmOpMask32, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x28; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { ZmmResult = ZmmReg2_m512 ; ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # MOVAPS 4-49 PAGE 1169 LINE 61056 :VMOVAPS XmmReg2^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & XmmOpMask32; byte=0x29; XmmReg1 & mod=3 & XmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmResult = XmmReg1; XmmMask = XmmReg2; build XmmOpMask32; ZmmReg2 = zext(XmmResult); } :VMOVAPS m128^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & XmmOpMask32; byte=0x29; (XmmReg1) ... & m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmResult = XmmReg1; XmmMask = m128; build XmmOpMask32; m128 = XmmResult; } # MOVAPS 4-49 PAGE 1169 LINE 61059 :VMOVAPS YmmReg2^YmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & YmmOpMask32; byte=0x29; YmmReg1 & mod=3 & YmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmResult = YmmReg1; YmmMask = YmmReg2; build YmmOpMask32; ZmmReg2 = zext(YmmResult); } :VMOVAPS m256 YmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x29; (YmmReg1 & YmmOpMask32) ... & m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmResult = YmmReg1; YmmMask = m256; build YmmOpMask32; m256 = YmmResult; } # MOVAPS 4-49 PAGE 1169 LINE 61062 :VMOVAPS ZmmReg2^ZmmOpMask32, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x29; ZmmReg1 & mod=3 & ZmmOpMask32 & ZmmReg2 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { ZmmResult = ZmmReg1 ; ZmmMask = ZmmReg2; build ZmmOpMask32; ZmmReg2 = ZmmResult; } :VMOVAPS m512 ZmmOpMask32, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x29; (ZmmReg1 & ZmmOpMask32) ... & m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { ZmmResult = ZmmReg1 ; ZmmMask = m512; build ZmmOpMask32; m512 = ZmmResult; } # MOVD/MOVQ 4-55 PAGE 1175 LINE 61366 :VMOVD XmmReg1, rm32 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x6E; XmmReg1 ... & rm32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RM) { XmmReg1 = zext(rm32); } # MOVD/MOVQ 4-55 PAGE 1175 LINE 61368 @ifdef IA64 :VMOVQ XmmReg1, rm64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x6E; XmmReg1 ... & rm64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RM) { XmmReg1 = zext(rm64); } @endif # MOVD/MOVQ 4-55 PAGE 1175 LINE 61370 :VMOVD rm32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x7E; XmmReg1 ... & rm32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-MR) { rm32 = XmmReg1[0,32]; } # MOVD/MOVQ 4-55 PAGE 1175 LINE 61372 @ifdef IA64 :VMOVQ rm64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x7E; XmmReg1 ... & rm64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-MR) { rm64 = XmmReg1[0,64]; } @endif # MOVDDUP 4-59 PAGE 1179 LINE 61526 define pcodeop vmovddup_avx512vl ; :VMOVDDUP XmmReg1^XmmOpMask64, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0x12; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 13; ] # (TupleType DUP-RM) { XmmResult = vmovddup_avx512vl( XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MOVDDUP 4-59 PAGE 1179 LINE 61529 :VMOVDDUP YmmReg1^YmmOpMask64, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0x12; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 13; ] # (TupleType DUP-RM) { YmmResult = vmovddup_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # MOVDDUP 4-59 PAGE 1179 LINE 61532 define pcodeop vmovddup_avx512f ; :VMOVDDUP ZmmReg1^ZmmOpMask64, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0x12; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 13; ] # (TupleType DUP-RM) { ZmmResult = vmovddup_avx512f( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61675 define pcodeop vmovdqa32_avx512vl ; :VMOVDQA32 XmmReg1^XmmOpMask32, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x6F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { XmmResult = vmovdqa32_avx512vl( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61678 :VMOVDQA32 YmmReg1^YmmOpMask32, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x6F; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { YmmResult = vmovdqa32_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61681 define pcodeop vmovdqa32_avx512f ; :VMOVDQA32 ZmmReg1^ZmmOpMask32, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x6F; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { ZmmResult = vmovdqa32_avx512f( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61684 :VMOVDQA32 XmmReg2_m128^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x7F; (XmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmReg2_m128 = vmovdqa32_avx512vl( XmmReg1 ); } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61687 :VMOVDQA32 YmmReg2_m256^YmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x7F; (YmmReg1 & YmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmReg2_m256 = vmovdqa32_avx512vl( YmmReg1 ); } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61690 :VMOVDQA32 ZmmReg2_m512^ZmmOpMask32, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x7F; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { ZmmReg2_m512 = vmovdqa32_avx512f( ZmmReg1 ); } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61693 define pcodeop vmovdqa64_avx512vl ; :VMOVDQA64 XmmReg1^XmmOpMask64, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x6F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { XmmResult = vmovdqa64_avx512vl( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61696 :VMOVDQA64 YmmReg1^YmmOpMask64, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x6F; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { YmmResult = vmovdqa64_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61699 define pcodeop vmovdqa64_avx512f ; :VMOVDQA64 ZmmReg1^ZmmOpMask64, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x6F; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { ZmmResult = vmovdqa64_avx512f( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61702 :VMOVDQA64 XmmReg2_m128^XmmOpMask64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x7F; (XmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmReg2_m128 = vmovdqa64_avx512vl( XmmReg1 ); } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61705 :VMOVDQA64 YmmReg2_m256^YmmOpMask64, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x7F; (YmmReg1 & YmmOpMask64) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmReg2_m256 = vmovdqa64_avx512vl( YmmReg1 ); } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61708 :VMOVDQA64 ZmmReg2_m512^ZmmOpMask64, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x7F; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { ZmmReg2_m512 = vmovdqa64_avx512f( ZmmReg1 ); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61938 define pcodeop vmovdqu8_avx512vl ; :VMOVDQU8 XmmReg1^XmmOpMask8, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x6F; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { XmmResult = vmovdqu8_avx512vl( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61941 :VMOVDQU8 YmmReg1^YmmOpMask8, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x6F; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { YmmResult = vmovdqu8_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61944 define pcodeop vmovdqu8_avx512bw ; :VMOVDQU8 ZmmReg1^ZmmOpMask8, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x6F; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { ZmmResult = vmovdqu8_avx512bw( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61947 :VMOVDQU8 XmmReg2_m128^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x7F; (XmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmReg2_m128 = vmovdqu8_avx512vl( XmmReg1 ); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61950 :VMOVDQU8 YmmReg2_m256^YmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x7F; (YmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmReg2_m256 = vmovdqu8_avx512vl( YmmReg1 ); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61953 :VMOVDQU8 ZmmReg2_m512^ZmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x7F; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { ZmmReg2_m512 = vmovdqu8_avx512bw( ZmmReg1 ); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61956 define pcodeop vmovdqu16_avx512vl ; :VMOVDQU16 XmmReg1^XmmOpMask16, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0x6F; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { XmmResult = vmovdqu16_avx512vl( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61959 :VMOVDQU16 YmmReg1^YmmOpMask16, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0x6F; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { YmmResult = vmovdqu16_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61962 define pcodeop vmovdqu16_avx512bw ; :VMOVDQU16 ZmmReg1^ZmmOpMask16, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0x6F; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { ZmmResult = vmovdqu16_avx512bw( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61965 :VMOVDQU16 XmmReg2_m128^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x7F; (XmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmReg2_m128 = vmovdqu16_avx512vl( XmmReg1 ); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61968 :VMOVDQU16 YmmReg2_m256^YmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x7F; (YmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmReg2_m256 = vmovdqu16_avx512vl( YmmReg1 ); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61971 :VMOVDQU16 ZmmReg2_m512^ZmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x7F; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { ZmmReg2_m512 = vmovdqu16_avx512bw( ZmmReg1 ); } # MOVDQU,VMOVDQU8/16/32/64 4-67 PAGE 1187 LINE 61974 define pcodeop vmovdqu32_avx512vl ; :VMOVDQU32 XmmReg1^XmmOpMask32, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x6F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { XmmResult = vmovdqu32_avx512vl( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MOVDQU,VMOVDQU8/16/32/64 4-68 PAGE 1188 LINE 61987 :VMOVDQU32 YmmReg1^YmmOpMask32, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x6F; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { YmmResult = vmovdqu32_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # MOVDQU,VMOVDQU8/16/32/64 4-68 PAGE 1188 LINE 61990 define pcodeop vmovdqu32_avx512f ; :VMOVDQU32 ZmmReg1^ZmmOpMask32, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x6F; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { ZmmResult = vmovdqu32_avx512f( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # MOVDQU,VMOVDQU8/16/32/64 4-68 PAGE 1188 LINE 61993 :VMOVDQU32 XmmReg2_m128^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x7F; (XmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmReg2_m128 = vmovdqu32_avx512vl( XmmReg1 ); } # MOVDQU,VMOVDQU8/16/32/64 4-68 PAGE 1188 LINE 61996 :VMOVDQU32 YmmReg2_m256^YmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x7F; (YmmReg1 & YmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmReg2_m256 = vmovdqu32_avx512vl( YmmReg1 ); } # MOVDQU,VMOVDQU8/16/32/64 4-68 PAGE 1188 LINE 61999 :VMOVDQU32 ZmmReg2_m512^ZmmOpMask32, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x7F; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { ZmmReg2_m512 = vmovdqu32_avx512f( ZmmReg1 ); } # MOVDQU,VMOVDQU8/16/32/64 4-68 PAGE 1188 LINE 62002 define pcodeop vmovdqu64_avx512vl ; :VMOVDQU64 XmmReg1^XmmOpMask64, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) ; byte=0x6F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { XmmResult = vmovdqu64_avx512vl( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MOVDQU,VMOVDQU8/16/32/64 4-68 PAGE 1188 LINE 62005 :VMOVDQU64 YmmReg1^YmmOpMask64, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) ; byte=0x6F; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { YmmResult = vmovdqu64_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # MOVDQU,VMOVDQU8/16/32/64 4-68 PAGE 1188 LINE 62008 define pcodeop vmovdqu64_avx512f ; :VMOVDQU64 ZmmReg1^ZmmOpMask64, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) ; byte=0x6F; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { ZmmResult = vmovdqu64_avx512f( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # MOVDQU,VMOVDQU8/16/32/64 4-68 PAGE 1188 LINE 62011 :VMOVDQU64 XmmReg2_m128^XmmOpMask64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x7F; (XmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmReg2_m128 = vmovdqu64_avx512vl( XmmReg1 ); } # MOVDQU,VMOVDQU8/16/32/64 4-68 PAGE 1188 LINE 62014 :VMOVDQU64 YmmReg2_m256^YmmOpMask64, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x7F; (YmmReg1 & YmmOpMask64) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmReg2_m256 = vmovdqu64_avx512vl( YmmReg1 ); } # MOVDQU,VMOVDQU8/16/32/64 4-68 PAGE 1188 LINE 62017 :VMOVDQU64 ZmmReg2_m512^ZmmOpMask64, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x7F; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { ZmmReg2_m512 = vmovdqu64_avx512f( ZmmReg1 ); } # MOVHLPS 4-76 PAGE 1196 LINE 62412 :VMOVHLPS XmmReg1, evexV5_XmmReg, XmmReg2 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1) & (mod=0x3 & XmmReg2) { local src1 = evexV5_XmmReg[64,64]; local src2 = XmmReg2[64,64]; XmmReg1[0,64] = src2; XmmReg1[64,64] = src2; ZmmReg1 = zext(XmmReg1); } # MOVHPD 4-78 PAGE 1198 LINE 62485 define pcodeop vmovhpd_avx512f ; :VMOVHPD XmmReg1, evexV5_XmmReg, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x16; (XmmReg1 & ZmmReg1) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local src1 = evexV5_XmmReg[0,64]; local src2 = m64[0,64]; XmmReg1[0,64] = src2; XmmReg1[64,64] = src2; ZmmReg1 = zext(XmmReg1); } # MOVHPD 4-78 PAGE 1198 LINE 62491 :VMOVHPD m64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x17; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-MR) { m64 = vmovhpd_avx512f( XmmReg1 ); } # MOVHPS 4-80 PAGE 1200 LINE 62572 define pcodeop vmovhps_avx512f ; :VMOVHPS XmmReg1, evexV5_XmmReg, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x16; (XmmReg1 & ZmmReg1) ... & m64 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2) { XmmResult = vmovhps_avx512f( evexV5_XmmReg, m64 ); XmmMask = XmmReg1; ZmmReg1 = zext(XmmResult); } # MOVHPS 4-80 PAGE 1200 LINE 62578 :VMOVHPS m64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x17; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2-MR) { m64 = vmovhps_avx512f( XmmReg1 ); } # MOVLHPS 4-82 PAGE 1202 LINE 62660 # WARNING: duplicate opcode EVEX.NDS.128.0F.W0 16 /r last seen on 4-80 PAGE 1200 LINE 62572 for "VMOVLHPS xmm1, xmm2, xmm3" define pcodeop vmovlhps_avx512f ; :VMOVLHPS XmmReg1, evexV5_XmmReg, XmmReg2 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x16; (XmmReg1 & ZmmReg1) & (mod=0x3 & XmmReg2) { local tmp:16 = vmovlhps_avx512f( evexV5_XmmReg, XmmReg2 ); ZmmReg1 = zext(tmp); } # MOVLPD 4-84 PAGE 1204 LINE 62733 define pcodeop vmovlpd_avx512f ; :VMOVLPD XmmReg1, evexV5_XmmReg, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vmovlpd_avx512f( evexV5_XmmReg, m64 ); ZmmReg1 = zext(tmp); } # MOVLPD 4-84 PAGE 1204 LINE 62739 :VMOVLPD m64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x13; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-MR) { m64 = vmovlpd_avx512f( XmmReg1 ); } # MOVLPS 4-86 PAGE 1206 LINE 62818 # WARNING: duplicate opcode EVEX.NDS.128.0F.W0 12 /r last seen on 4-76 PAGE 1196 LINE 62412 for "VMOVLPS xmm2, xmm1, m64" define pcodeop vmovlps_avx512f ; :VMOVLPS XmmReg1, evexV5_XmmReg, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1) ... & m64 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2) { local tmp:16 = vmovlps_avx512f( evexV5_XmmReg, m64 ); ZmmReg1 = zext(tmp); } # MOVLPS 4-86 PAGE 1206 LINE 62824 :VMOVLPS m64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x13; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2-MR) { m64 = vmovlps_avx512f( XmmReg1 ); } # MOVNTDQA 4-92 PAGE 1212 LINE 63088 define pcodeop vmovntdqa_avx512vl ; :VMOVNTDQA XmmReg1, m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x2A; (XmmReg1 & ZmmReg1) ... & m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp:16 = vmovntdqa_avx512vl( m128 ); ZmmReg1 = zext(tmp); } # MOVNTDQA 4-92 PAGE 1212 LINE 63090 :VMOVNTDQA YmmReg1, m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x2A; (YmmReg1 & ZmmReg1) ... & m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp:32 = vmovntdqa_avx512vl( m256 ); ZmmReg1 = zext(tmp); } # MOVNTDQA 4-92 PAGE 1212 LINE 63092 define pcodeop vmovntdqa_avx512f ; :VMOVNTDQA ZmmReg1, m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x2A; ZmmReg1 ... & m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmReg1 = vmovntdqa_avx512f( m512 ); } # MOVNTDQ 4-94 PAGE 1214 LINE 63191 define pcodeop vmovntdq_avx512vl ; :VMOVNTDQ m128, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0xE7; XmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { m128 = vmovntdq_avx512vl( XmmReg1 ); } # MOVNTDQ 4-94 PAGE 1214 LINE 63193 :VMOVNTDQ m256, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0xE7; YmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { m256 = vmovntdq_avx512vl( YmmReg1 ); } # MOVNTDQ 4-94 PAGE 1214 LINE 63195 define pcodeop vmovntdq_avx512f ; :VMOVNTDQ m512, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0xE7; ZmmReg1 ... & m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { m512 = vmovntdq_avx512f( ZmmReg1 ); } # MOVNTPD 4-98 PAGE 1218 LINE 63361 define pcodeop vmovntpd_avx512vl ; :VMOVNTPD m128, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x2B; XmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { m128 = vmovntpd_avx512vl( XmmReg1 ); } # MOVNTPD 4-98 PAGE 1218 LINE 63363 :VMOVNTPD m256, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x2B; YmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { m256 = vmovntpd_avx512vl( YmmReg1 ); } # MOVNTPD 4-98 PAGE 1218 LINE 63365 define pcodeop vmovntpd_avx512f ; :VMOVNTPD m512, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x2B; ZmmReg1 ... & m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { m512 = vmovntpd_avx512f( ZmmReg1 ); } # MOVNTPS 4-100 PAGE 1220 LINE 63445 define pcodeop vmovntps_avx512vl ; :VMOVNTPS m128, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x2B; XmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { m128 = vmovntps_avx512vl( XmmReg1 ); } # MOVNTPS 4-100 PAGE 1220 LINE 63447 :VMOVNTPS m256, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x2B; YmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { m256 = vmovntps_avx512vl( YmmReg1 ); } # MOVNTPS 4-100 PAGE 1220 LINE 63449 define pcodeop vmovntps_avx512f ; :VMOVNTPS m512, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x2B; ZmmReg1 ... & m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { m512 = vmovntps_avx512f( ZmmReg1 ); } # MOVQ 4-103 PAGE 1223 LINE 63581 :VMOVQ XmmReg1, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x7E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RM) { ZmmReg1 = zext(XmmReg2_m64[0,64]); } # MOVQ 4-103 PAGE 1223 LINE 63587 :VMOVQ XmmReg2, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0xD6; XmmReg1 & mod=3 & XmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-MR) { ZmmReg2 = zext( XmmReg1[0,64] ); } :VMOVQ m64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0xD6; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-MR) { m64 = XmmReg1[0,64]; } # MOVSD 4-111 PAGE 1231 LINE 63978 define pcodeop vmovsd_avx512f ; :VMOVSD XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask) & (mod=0x3 & XmmReg2) { XmmResult = XmmReg2; XmmMask = XmmReg1; build XmmOpMask; XmmResult[0,64] = (zext(XmmOpMask[0,1]) * XmmResult[0,64]) + (zext(!XmmOpMask[0,1]) * XmmMask[0,64]); XmmResult[64,64] = evexV5_XmmReg[64,64]; ZmmReg1 = zext(XmmResult); } # MOVSD 4-111 PAGE 1231 LINE 63981 :VMOVSD XmmReg1^XmmOpMask, m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RM) { local tmp:8 = m64; XmmMask = XmmReg1; build XmmOpMask; tmp = (zext(XmmOpMask[0,1]) * tmp) + (zext(!XmmOpMask[0,1]) * XmmMask[0,64]); ZmmReg1 = zext(tmp); } # MOVSD 4-111 PAGE 1231 LINE 63983 :VMOVSD XmmReg2^XmmOpMask, evexV5_XmmReg, XmmReg1 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x11; XmmReg1 & ZmmReg1 & XmmOpMask & (mod=0x3 & XmmReg2) { XmmResult = XmmReg1; XmmMask = XmmReg2; build XmmOpMask; XmmResult[0,64] = (zext(XmmOpMask[0,1]) * XmmResult[0,64]) + (zext(!XmmOpMask[0,1]) * XmmMask[0,64]); XmmResult[64,64] = evexV5_XmmReg[64,64]; ZmmReg1 = zext(XmmResult); } # MOVSD 4-111 PAGE 1231 LINE 63986 :VMOVSD m64^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & XmmOpMask; byte=0x11; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-MR) { XmmResult = vmovsd_avx512f( XmmReg1 ); local tmp:8 = m64; XmmMask = XmmReg1; build XmmOpMask; tmp = (zext(XmmOpMask[0,1]) * tmp) + (zext(!XmmOpMask[0,1]) * XmmMask[0,64]); m64 = tmp; } # MOVSHDUP 4-114 PAGE 1234 LINE 64130 define pcodeop vmovshdup_avx512vl ; :VMOVSHDUP XmmReg1^XmmOpMask32, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x16; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vmovshdup_avx512vl( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MOVSHDUP 4-114 PAGE 1234 LINE 64133 :VMOVSHDUP YmmReg1^YmmOpMask32, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x16; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vmovshdup_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # MOVSHDUP 4-114 PAGE 1234 LINE 64136 define pcodeop vmovshdup_avx512f ; :VMOVSHDUP ZmmReg1^ZmmOpMask32, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x16; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vmovshdup_avx512f( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # MOVSLDUP 4-117 PAGE 1237 LINE 64284 define pcodeop vmovsldup_avx512vl ; :VMOVSLDUP XmmReg1^XmmOpMask32, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x12; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vmovsldup_avx512vl( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MOVSLDUP 4-117 PAGE 1237 LINE 64287 :VMOVSLDUP YmmReg1^YmmOpMask32, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x12; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vmovsldup_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # MOVSLDUP 4-117 PAGE 1237 LINE 64290 define pcodeop vmovsldup_avx512f ; :VMOVSLDUP ZmmReg1^ZmmOpMask32, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x12; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vmovsldup_avx512f( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # MOVSS 4-120 PAGE 1240 LINE 64443 :VMOVSS XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask) & (mod=0x3 & XmmReg2) { local tmp:4 = XmmReg2[0,32]; XmmMask = XmmReg1; build XmmOpMask; XmmResult = evexV5_XmmReg; XmmResult[0,32] = (zext(XmmOpMask[0,1]) * tmp) + (zext(!XmmOpMask[0,1]) * XmmMask[0,32]); ZmmReg1 = zext(XmmResult); } # MOVSS 4-120 PAGE 1240 LINE 64446 :VMOVSS XmmReg1^XmmOpMask, m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RM) { local tmp:4 = m32; XmmMask = XmmReg1; build XmmOpMask; tmp = (zext(XmmOpMask[0,1]) * tmp) + (zext(!XmmOpMask[0,1]) * XmmMask[0,32]); ZmmReg1 = zext(tmp); } # MOVSS 4-120 PAGE 1240 LINE 64448 :VMOVSS XmmReg2^XmmOpMask, evexV5_XmmReg, XmmReg1 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x11; XmmReg1 & (mod=0x3 & (XmmReg2 & ZmmReg2)) { local tmp:4 = XmmReg1[0,32]; XmmMask = XmmReg2; build XmmOpMask; XmmResult = evexV5_XmmReg; XmmResult[0,32] = (zext(XmmOpMask[0,1]) * tmp) + (zext(!XmmOpMask[0,1]) * XmmMask[0,32]); ZmmReg2 = zext(XmmResult); } # MOVSS 4-120 PAGE 1240 LINE 64451 :VMOVSS m32^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & XmmOpMask; byte=0x11; XmmReg1 ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-MR) { local tmp:4 = XmmReg1[0,32]; XmmMask = zext(m32); build XmmOpMask; m32 = (zext(XmmOpMask[0,1]) * tmp) + (zext(!XmmOpMask[0,1]) * XmmMask[0,32]); } # MOVUPD 4-126 PAGE 1246 LINE 64695 :VMOVUPD XmmReg1^XmmOpMask64, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { XmmResult = XmmReg2_m128 ; XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MOVUPD 4-126 PAGE 1246 LINE 64698 :VMOVUPD XmmReg2^XmmOpMask64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & XmmOpMask64; byte=0x11; XmmReg1 & mod=3 & XmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmResult = XmmReg1; XmmMask = XmmReg2; build XmmOpMask64; ZmmReg2 = zext(XmmResult); } :VMOVUPD m128^XmmOpMask64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & XmmOpMask64; byte=0x11; (XmmReg1) ... & m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmResult = XmmReg1; XmmMask = m128; build XmmOpMask64; m128 = XmmResult; } # MOVUPD 4-126 PAGE 1246 LINE 64701 :VMOVUPD YmmReg1^YmmOpMask64, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x10; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { YmmResult = YmmReg2_m256; YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # MOVUPD 4-126 PAGE 1246 LINE 64704 :VMOVUPD YmmReg2^YmmOpMask64, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & YmmOpMask64; byte=0x11; YmmReg1 & mod=3 & YmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmResult = YmmReg1; YmmMask = YmmReg2; build YmmOpMask64; ZmmReg2 = zext(YmmResult); } :VMOVUPD m256 YmmOpMask64, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & YmmOpMask64; byte=0x11; YmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmResult = YmmReg1; YmmMask = m256; build YmmOpMask64; m256 = YmmResult; } # MOVUPD 4-126 PAGE 1246 LINE 64707 :VMOVUPD ZmmReg1^ZmmOpMask64, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x10; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { ZmmResult = ZmmReg2_m512; ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # MOVUPD 4-126 PAGE 1246 LINE 64710 :VMOVUPD ZmmReg2_m512^ZmmOpMask64, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & ZmmOpMask64; byte=0x11; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { ZmmResult = ZmmReg1; ZmmMask = ZmmReg2_m512; build ZmmOpMask64; ZmmReg2_m512 = ZmmResult; } # MOVUPS 4-130 PAGE 1250 LINE 64880 :VMOVUPS XmmReg1^XmmOpMask32, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { XmmResult = XmmReg2_m128; XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MOVUPS 4-130 PAGE 1250 LINE 64883 :VMOVUPS YmmReg1^YmmOpMask32, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x10; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { YmmResult = YmmReg2_m256; YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # MOVUPS 4-130 PAGE 1250 LINE 64886 :VMOVUPS ZmmReg1^ZmmOpMask32, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x10; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-RM) { ZmmResult = ZmmReg2_m512; ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # MOVUPS 4-130 PAGE 1250 LINE 64889 :VMOVUPS XmmReg2^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & XmmOpMask32; byte=0x11; XmmReg1 & mod=3 & XmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmResult = XmmReg1; XmmMask = XmmReg2; build XmmOpMask32; ZmmReg2 = zext(XmmResult); } :VMOVUPS m128^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & XmmOpMask32; byte=0x11; XmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { XmmResult = XmmReg1; XmmMask = m128; build XmmOpMask32; m128 = XmmResult; } # MOVUPS 4-130 PAGE 1250 LINE 64892 :VMOVUPS YmmReg2^YmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & YmmOpMask32; byte=0x11; YmmReg1 & mod=3 & YmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmResult = YmmReg1; YmmMask = YmmReg2; build YmmOpMask32; ZmmReg2 = zext(YmmResult); } :VMOVUPS m256 YmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & YmmOpMask32; byte=0x11; YmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { YmmResult = YmmReg1; YmmMask = m256; build YmmOpMask32; m256 = YmmResult; } # MOVUPS 4-130 PAGE 1250 LINE 64895 :VMOVUPS ZmmReg2_m512^ZmmOpMask32, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & ZmmOpMask32; byte=0x11; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM-MR) { ZmmResult = ZmmReg1; ZmmMask = ZmmReg2_m512; build ZmmOpMask32; ZmmReg2_m512 = ZmmResult; } # MULPD 4-146 PAGE 1266 LINE 65686 define pcodeop vmulpd_avx512vl ; :VMULPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vmulpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MULPD 4-146 PAGE 1266 LINE 65689 :VMULPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x59; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vmulpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # MULPD 4-146 PAGE 1266 LINE 65692 define pcodeop vmulpd_avx512f ; :VMULPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x59; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vmulpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # MULPS 4-149 PAGE 1269 LINE 65817 define pcodeop vmulps_avx512vl ; :VMULPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vmulps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # MULPS 4-149 PAGE 1269 LINE 65820 :VMULPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x59; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vmulps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # MULPS 4-149 PAGE 1269 LINE 65823 define pcodeop vmulps_avx512f ; :VMULPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x59; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vmulps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # MULSD 4-152 PAGE 1272 LINE 65959 define pcodeop vmulsd_avx512f ; :VMULSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vmulsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # MULSS 4-154 PAGE 1274 LINE 66055 define pcodeop vmulss_avx512f ; :VMULSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vmulss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # ORPD 4-168 PAGE 1288 LINE 66724 define pcodeop vorpd_avx512vl ; :VORPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x56; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vorpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # ORPD 4-168 PAGE 1288 LINE 66727 :VORPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x56; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vorpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # ORPD 4-168 PAGE 1288 LINE 66730 define pcodeop vorpd_avx512dq ; :VORPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x56; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vorpd_avx512dq( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # ORPS 4-171 PAGE 1291 LINE 66850 define pcodeop vorps_avx512vl ; :VORPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x56; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vorps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # ORPS 4-171 PAGE 1291 LINE 66853 :VORPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x56; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vorps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # ORPS 4-171 PAGE 1291 LINE 66856 define pcodeop vorps_avx512dq ; :VORPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x56; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vorps_avx512dq( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PABSB/PABSW/PABSD/PABSQ 4-180 PAGE 1300 LINE 67320 define pcodeop vpabsb_avx512vl ; :VPABSB XmmReg1^XmmOpMask8, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x1C; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpabsb_avx512vl( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PABSB/PABSW/PABSD/PABSQ 4-180 PAGE 1300 LINE 67323 :VPABSB YmmReg1^YmmOpMask8, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x1C; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpabsb_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PABSB/PABSW/PABSD/PABSQ 4-180 PAGE 1300 LINE 67326 define pcodeop vpabsb_avx512bw ; :VPABSB ZmmReg1^ZmmOpMask8, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x1C; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpabsb_avx512bw( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PABSB/PABSW/PABSD/PABSQ 4-180 PAGE 1300 LINE 67329 define pcodeop vpabsw_avx512vl ; :VPABSW XmmReg1^XmmOpMask16, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x1D; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpabsw_avx512vl( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PABSB/PABSW/PABSD/PABSQ 4-181 PAGE 1301 LINE 67344 :VPABSW YmmReg1^YmmOpMask16, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x1D; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpabsw_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PABSB/PABSW/PABSD/PABSQ 4-181 PAGE 1301 LINE 67347 define pcodeop vpabsw_avx512bw ; :VPABSW ZmmReg1^ZmmOpMask16, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x1D; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpabsw_avx512bw( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PABSB/PABSW/PABSD/PABSQ 4-181 PAGE 1301 LINE 67350 define pcodeop vpabsd_avx512vl ; :VPABSD XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x1E; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpabsd_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PABSB/PABSW/PABSD/PABSQ 4-181 PAGE 1301 LINE 67353 :VPABSD YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x1E; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpabsd_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PABSB/PABSW/PABSD/PABSQ 4-181 PAGE 1301 LINE 67357 define pcodeop vpabsd_avx512f ; :VPABSD ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x1E; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpabsd_avx512f( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PABSB/PABSW/PABSD/PABSQ 4-181 PAGE 1301 LINE 67360 define pcodeop vpabsq_avx512vl ; :VPABSQ XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x1F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpabsq_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PABSB/PABSW/PABSD/PABSQ 4-181 PAGE 1301 LINE 67363 :VPABSQ YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x1F; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpabsq_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PABSB/PABSW/PABSD/PABSQ 4-181 PAGE 1301 LINE 67366 define pcodeop vpabsq_avx512f ; :VPABSQ ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x1F; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpabsq_avx512f( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PACKSSWB/PACKSSDW 4-186 PAGE 1306 LINE 67645 define pcodeop vpacksswb_avx512vl ; :VPACKSSWB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0x63; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpacksswb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PACKSSWB/PACKSSDW 4-186 PAGE 1306 LINE 67649 :VPACKSSWB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0x63; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpacksswb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PACKSSWB/PACKSSDW 4-186 PAGE 1306 LINE 67653 define pcodeop vpacksswb_avx512bw ; :VPACKSSWB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x63; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpacksswb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PACKSSWB/PACKSSDW 4-186 PAGE 1306 LINE 67657 define pcodeop vpackssdw_avx512vl ; :VPACKSSDW XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x6B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpackssdw_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PACKSSWB/PACKSSDW 4-187 PAGE 1307 LINE 67674 :VPACKSSDW YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x6B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpackssdw_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PACKSSWB/PACKSSDW 4-187 PAGE 1307 LINE 67678 define pcodeop vpackssdw_avx512bw ; :VPACKSSDW ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x6B; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpackssdw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PACKUSDW 4-194 PAGE 1314 LINE 68094 define pcodeop vpackusdw_avx512vl ; :VPACKUSDW XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x2B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpackusdw_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PACKUSDW 4-194 PAGE 1314 LINE 68098 :VPACKUSDW YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x2B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpackusdw_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PACKUSDW 4-194 PAGE 1314 LINE 68103 define pcodeop vpackusdw_avx512bw ; :VPACKUSDW ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x2B; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpackusdw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PACKUSWB 4-199 PAGE 1319 LINE 68374 define pcodeop vpackuswb_avx512vl ; :VPACKUSWB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0x67; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpackuswb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PACKUSWB 4-199 PAGE 1319 LINE 68378 :VPACKUSWB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0x67; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpackuswb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PACKUSWB 4-199 PAGE 1319 LINE 68382 define pcodeop vpackuswb_avx512bw ; :VPACKUSWB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x67; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpackuswb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68674 define pcodeop vpaddb_avx512vl ; :VPADDB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xFC; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpaddb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68677 define pcodeop vpaddw_avx512vl ; :VPADDW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xFD; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpaddw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68680 define pcodeop vpaddd_avx512vl ; :VPADDD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xFE; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpaddd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68683 define pcodeop vpaddq_avx512vl ; :VPADDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xD4; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpaddq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68686 :VPADDB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xFC; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpaddb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68689 :VPADDW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xFD; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpaddw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PADDB/PADDW/PADDD/PADDQ 4-204 PAGE 1324 LINE 68692 :VPADDD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xFE; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpaddd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PADDB/PADDW/PADDD/PADDQ 4-205 PAGE 1325 LINE 68707 :VPADDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xD4; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpaddq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PADDB/PADDW/PADDD/PADDQ 4-205 PAGE 1325 LINE 68710 define pcodeop vpaddb_avx512bw ; :VPADDB ZmmReg1^ZmmOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xFC; (ZmmReg1 & ZmmOpMask) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpaddb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask; ZmmReg1 = ZmmResult; } # PADDB/PADDW/PADDD/PADDQ 4-205 PAGE 1325 LINE 68713 define pcodeop vpaddw_avx512bw ; :VPADDW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xFD; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpaddw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PADDB/PADDW/PADDD/PADDQ 4-205 PAGE 1325 LINE 68716 define pcodeop vpaddd_avx512f ; :VPADDD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0xFE; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpaddd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PADDB/PADDW/PADDD/PADDQ 4-205 PAGE 1325 LINE 68719 define pcodeop vpaddq_avx512f ; :VPADDQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0xD4; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpaddq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69051 define pcodeop vpaddsb_avx512vl ; :VPADDSB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xEC; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpaddsb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69054 :VPADDSB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xEC; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpaddsb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69057 define pcodeop vpaddsb_avx512bw ; :VPADDSB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xEC; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpaddsb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69060 define pcodeop vpaddsw_avx512vl ; :VPADDSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xED; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpaddsw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69063 :VPADDSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xED; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpaddsw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PADDSB/PADDSW 4-211 PAGE 1331 LINE 69066 define pcodeop vpaddsw_avx512bw ; :VPADDSW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xED; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpaddsw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69269 define pcodeop vpaddusb_avx512vl ; :VPADDUSB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDC; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpaddusb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69273 :VPADDUSB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDC; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpaddusb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69277 define pcodeop vpaddusb_avx512bw ; :VPADDUSB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xDC; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpaddusb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69281 define pcodeop vpaddusw_avx512vl ; :VPADDUSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDD; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpaddusw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PADDUSB/PADDUSW 4-215 PAGE 1335 LINE 69285 :VPADDUSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDD; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpaddusw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PADDUSB/PADDUSW 4-216 PAGE 1336 LINE 69302 define pcodeop vpaddusw_avx512bw ; :VPADDUSW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xDD; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpaddusw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PALIGNR 4-219 PAGE 1339 LINE 69495 define pcodeop vpalignr_avx512vl ; :VPALIGNR XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_XmmReg; byte=0x0F; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpalignr_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PALIGNR 4-219 PAGE 1339 LINE 69499 :VPALIGNR YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_YmmReg; byte=0x0F; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpalignr_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PALIGNR 4-219 PAGE 1339 LINE 69505 define pcodeop vpalignr_avx512bw ; :VPALIGNR ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x0F; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpalignr_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PAND 4-223 PAGE 1343 LINE 69684 define pcodeop vpandd_avx512vl ; :VPANDD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xDB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpandd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PAND 4-223 PAGE 1343 LINE 69687 :VPANDD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xDB; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpandd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PAND 4-223 PAGE 1343 LINE 69690 define pcodeop vpandd_avx512f ; :VPANDD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0xDB; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpandd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PAND 4-223 PAGE 1343 LINE 69693 define pcodeop vpandq_avx512vl ; :VPANDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xDB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpandq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PAND 4-223 PAGE 1343 LINE 69696 :VPANDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xDB; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpandq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PAND 4-223 PAGE 1343 LINE 69699 define pcodeop vpandq_avx512f ; :VPANDQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0xDB; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpandq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PANDN 4-226 PAGE 1346 LINE 69859 define pcodeop vpandnd_avx512vl ; :VPANDND XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xDF; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpandnd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PANDN 4-226 PAGE 1346 LINE 69862 :VPANDND YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xDF; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpandnd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PANDN 4-226 PAGE 1346 LINE 69865 define pcodeop vpandnd_avx512f ; :VPANDND ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0xDF; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpandnd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PANDN 4-226 PAGE 1346 LINE 69868 define pcodeop vpandnq_avx512vl ; :VPANDNQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xDF; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpandnq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PANDN 4-226 PAGE 1346 LINE 69871 :VPANDNQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xDF; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpandnq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PANDN 4-226 PAGE 1346 LINE 69874 define pcodeop vpandnq_avx512f ; :VPANDNQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0xDF; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpandnq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70097 define pcodeop vpavgb_avx512vl ; :VPAVGB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE0; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpavgb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70100 :VPAVGB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE0; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpavgb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70103 define pcodeop vpavgb_avx512bw ; :VPAVGB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xE0; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpavgb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70106 define pcodeop vpavgw_avx512vl ; :VPAVGW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE3; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpavgw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70109 :VPAVGW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE3; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpavgw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PAVGB/PAVGW 4-230 PAGE 1350 LINE 70112 define pcodeop vpavgw_avx512bw ; :VPAVGW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xE3; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpavgw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70841 define pcodeop vpcmpeqd_avx512vl ; :VPCMPEQD KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x76; KReg_reg ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpeqd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70846 :VPCMPEQD KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x76; KReg_reg ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpeqd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70851 define pcodeop vpcmpeqd_avx512f ; :VPCMPEQD KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x76; KReg_reg ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpeqd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # PCMPEQB/PCMPEQW/PCMPEQD 4-244 PAGE 1364 LINE 70855 define pcodeop vpcmpeqb_avx512vl ; :VPCMPEQB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_XmmReg; byte=0x74; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpeqb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # PCMPEQB/PCMPEQW/PCMPEQD 4-245 PAGE 1365 LINE 70873 :VPCMPEQB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_YmmReg; byte=0x74; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpeqb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } # PCMPEQB/PCMPEQW/PCMPEQD 4-245 PAGE 1365 LINE 70878 define pcodeop vpcmpeqb_avx512bw ; :VPCMPEQB KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_ZmmReg; byte=0x74; KReg_reg ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpeqb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); KReg_reg = zext(AVXOpMask[0,64]) & tmp; } # PCMPEQB/PCMPEQW/PCMPEQD 4-245 PAGE 1365 LINE 70883 define pcodeop vpcmpeqw_avx512vl ; :VPCMPEQW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_XmmReg; byte=0x75; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpeqw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # PCMPEQB/PCMPEQW/PCMPEQD 4-245 PAGE 1365 LINE 70888 :VPCMPEQW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_YmmReg; byte=0x75; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpeqw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # PCMPEQB/PCMPEQW/PCMPEQD 4-245 PAGE 1365 LINE 70893 define pcodeop vpcmpeqw_avx512bw ; :VPCMPEQW KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_ZmmReg; byte=0x75; KReg_reg ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpeqw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } # PCMPEQQ 4-250 PAGE 1370 LINE 71174 define pcodeop vpcmpeqq_avx512vl ; :VPCMPEQQ KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x29; KReg_reg ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpeqq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # PCMPEQQ 4-250 PAGE 1370 LINE 71179 :VPCMPEQQ KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x29; KReg_reg ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpeqq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # PCMPEQQ 4-250 PAGE 1370 LINE 71184 define pcodeop vpcmpeqq_avx512f ; :VPCMPEQQ KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x29; KReg_reg ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpeqq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71517 define pcodeop vpcmpgtd_avx512vl ; :VPCMPGTD KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x66; KReg_reg ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpgtd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71521 :VPCMPGTD KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x66; KReg_reg ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpgtd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71525 define pcodeop vpcmpgtd_avx512f ; :VPCMPGTD KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x66; KReg_reg ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpgtd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71529 define pcodeop vpcmpgtb_avx512vl ; :VPCMPGTB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_XmmReg; byte=0x64; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpgtb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # PCMPGTB/PCMPGTW/PCMPGTD 4-257 PAGE 1377 LINE 71533 :VPCMPGTB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_YmmReg; byte=0x64; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpgtb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } # PCMPGTB/PCMPGTW/PCMPGTD 4-258 PAGE 1378 LINE 71545 define pcodeop vpcmpgtb_avx512bw ; :VPCMPGTB KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_ZmmReg; byte=0x64; KReg_reg ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpgtb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); KReg_reg = zext(AVXOpMask[0,64]) & tmp; } # PCMPGTB/PCMPGTW/PCMPGTD 4-258 PAGE 1378 LINE 71549 define pcodeop vpcmpgtw_avx512vl ; :VPCMPGTW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_XmmReg; byte=0x65; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpgtw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # PCMPGTB/PCMPGTW/PCMPGTD 4-258 PAGE 1378 LINE 71553 :VPCMPGTW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_YmmReg; byte=0x65; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpgtw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # PCMPGTB/PCMPGTW/PCMPGTD 4-258 PAGE 1378 LINE 71557 define pcodeop vpcmpgtw_avx512bw ; :VPCMPGTW KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & AVXOpMask & evexV5_ZmmReg; byte=0x65; KReg_reg ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpgtw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } # PCMPGTQ 4-263 PAGE 1383 LINE 71837 define pcodeop vpcmpgtq_avx512vl ; :VPCMPGTQ KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x37; KReg_reg ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpgtq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # PCMPGTQ 4-263 PAGE 1383 LINE 71841 :VPCMPGTQ KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x37; KReg_reg ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpgtq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # PCMPGTQ 4-263 PAGE 1383 LINE 71849 define pcodeop vpcmpgtq_avx512f ; :VPCMPGTQ KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x37; KReg_reg ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpgtq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # PEXTRB/PEXTRD/PEXTRQ 4-274 PAGE 1394 LINE 72334 @ifdef IA64 :VPEXTRB Reg32, XmmReg1, imm8 is $(LONGMODE_ON) & $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x14; XmmReg1 & mod=3 & Reg32 & Reg64; imm8 { local tmp = XmmReg1 >> (imm8*8); Reg64 = zext(tmp[0,8]); } @endif :VPEXTRB Reg32, XmmReg1, imm8 is $(LONGMODE_OFF) & $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x14; XmmReg1 & mod=3 & Reg32; imm8 { local tmp = XmmReg1 >> (imm8*8); Reg32 = zext(tmp[0,8]); } :VPEXTRB m8, XmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x14; XmmReg1 ... & m8; imm8 { local tmp = XmmReg1 >> (imm8*8); m8 = tmp[0,8]; } # PEXTRB/PEXTRD/PEXTRQ 4-274 PAGE 1394 LINE 72339 :VPEXTRD rm32, XmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x16; XmmReg1 ... & rm32; imm8 { local tmp = XmmReg1 >> (imm8*32); rm32 = tmp[0,32]; } # PEXTRB/PEXTRD/PEXTRQ 4-274 PAGE 1394 LINE 72343 @ifdef IA64 :VPEXTRQ rm64, XmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1); byte=0x16; XmmReg1 ... & rm64; imm8 { local tmp = XmmReg1 >> (imm8*64); rm64 = tmp[0,64]; } @endif # PEXTRW 4-277 PAGE 1397 LINE 72488 @ifdef IA64 :VPEXTRW Reg32, XmmReg2, imm8 is $(LONGMODE_ON) & $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xC5; Reg32 & Reg64 & (mod=0x3 & XmmReg2); imm8 { local tmp = XmmReg2 >> (imm8*16); Reg64 = zext(tmp[0,16]); } @endif :VPEXTRW Reg32, XmmReg2, imm8 is $(LONGMODE_OFF) & $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xC5; Reg32 & (mod=0x3 & XmmReg2); imm8 { local tmp = XmmReg2 >> (imm8*16); Reg32 = zext(tmp[0,16]); } # PEXTRW 4-277 PAGE 1397 LINE 72494 @ifdef IA64 :VPEXTRW Reg32, XmmReg1, imm8 is $(LONGMODE_ON) & $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x15; XmmReg1 & mod=3 & Reg32 & Reg64; imm8 { local tmp = XmmReg1 >> (imm8*16); Reg64 = zext(tmp[0,16]); } @endif :VPEXTRW Reg32, XmmReg1, imm8 is $(LONGMODE_OFF) & $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x15; XmmReg1 & mod=3 & Reg32; imm8 { local tmp = XmmReg1 >> (imm8*16); Reg32 = zext(tmp[0,16]); } :VPEXTRW m16, XmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0x15; XmmReg1 ... & m16; imm8 { local tmp = XmmReg1 >> (imm8*16); m16 = tmp[0,16]; } # PINSRB/PINSRD/PINSRQ 4-293 PAGE 1413 LINE 73330 define pcodeop vpinsrb_avx512bw ; :VPINSRB XmmReg1, evexV5_XmmReg, Reg32_m8, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_XmmReg; byte=0x20; (XmmReg1 & ZmmReg1) ... & Reg32_m8; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RVMI) { local tmp:16 = vpinsrb_avx512bw( evexV5_XmmReg, Reg32_m8, imm8:1 ); ZmmReg1 = zext(tmp); } # PINSRB/PINSRD/PINSRQ 4-293 PAGE 1413 LINE 73333 define pcodeop vpinsrd_avx512dq ; :VPINSRD XmmReg1, evexV5_XmmReg, rm32, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x22; (XmmReg1 & ZmmReg1) ... & rm32; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RVMI) { local tmp:16 = vpinsrd_avx512dq( evexV5_XmmReg, rm32, imm8:1 ); ZmmReg1 = zext(tmp); } # PINSRB/PINSRD/PINSRQ 4-293 PAGE 1413 LINE 73336 define pcodeop vpinsrq_avx512dq ; @ifdef IA64 :VPINSRQ XmmReg1, evexV5_XmmReg, rm64, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x22; (XmmReg1 & ZmmReg1) ... & rm64; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RVMI) { local tmp:16 = vpinsrq_avx512dq( evexV5_XmmReg, rm64, imm8:1 ); ZmmReg1 = zext(tmp); } @endif # PINSRW 4-296 PAGE 1416 LINE 73449 define pcodeop vpinsrw_avx512bw ; :VPINSRW XmmReg1, evexV5_XmmReg, Reg32_m16, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xC4; (XmmReg1 & ZmmReg1) ... & Reg32_m16; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S-RVMI) { local tmp:16 = vpinsrw_avx512bw( evexV5_XmmReg, Reg32_m16, imm8:1 ); ZmmReg1 = zext(tmp); } # PMADDUBSW 4-298 PAGE 1418 LINE 73558 define pcodeop vpmaddubsw_avx512vl ; :VPMADDUBSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0x04; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpmaddubsw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMADDUBSW 4-298 PAGE 1418 LINE 73562 :VPMADDUBSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0x04; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpmaddubsw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PMADDUBSW 4-298 PAGE 1418 LINE 73566 define pcodeop vpmaddubsw_avx512bw ; :VPMADDUBSW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x04; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpmaddubsw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PMADDWD 4-301 PAGE 1421 LINE 73708 define pcodeop vpmaddwd_avx512vl ; :VPMADDWD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xF5; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpmaddwd_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMADDWD 4-301 PAGE 1421 LINE 73712 :VPMADDWD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xF5; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpmaddwd_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PMADDWD 4-301 PAGE 1421 LINE 73716 define pcodeop vpmaddwd_avx512bw ; :VPMADDWD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xF5; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpmaddwd_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73900 define pcodeop vpmaxsb_avx512vl ; :VPMAXSB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0x3C; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpmaxsb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73903 :VPMAXSB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0x3C; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpmaxsb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73906 define pcodeop vpmaxsb_avx512bw ; :VPMAXSB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x3C; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpmaxsb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73909 define pcodeop vpmaxsw_avx512vl ; :VPMAXSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xEE; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpmaxsw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73912 :VPMAXSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xEE; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpmaxsw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73915 define pcodeop vpmaxsw_avx512bw ; :VPMAXSW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xEE; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpmaxsw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-304 PAGE 1424 LINE 73918 define pcodeop vpmaxsd_avx512vl ; :VPMAXSD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x3D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpmaxsd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-305 PAGE 1425 LINE 73933 :VPMAXSD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x3D; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpmaxsd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-305 PAGE 1425 LINE 73936 define pcodeop vpmaxsd_avx512f ; :VPMAXSD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x3D; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpmaxsd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-305 PAGE 1425 LINE 73939 define pcodeop vpmaxsq_avx512vl ; :VPMAXSQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x3D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpmaxsq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-305 PAGE 1425 LINE 73942 :VPMAXSQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x3D; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpmaxsq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMAXSB/PMAXSW/PMAXSD/PMAXSQ 4-305 PAGE 1425 LINE 73945 define pcodeop vpmaxsq_avx512f ; :VPMAXSQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x3D; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpmaxsq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74295 define pcodeop vpmaxub_avx512vl ; :VPMAXUB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDE; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpmaxub_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74298 :VPMAXUB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDE; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpmaxub_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74301 define pcodeop vpmaxub_avx512bw ; :VPMAXUB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xDE; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpmaxub_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74304 define pcodeop vpmaxuw_avx512vl ; :VPMAXUW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0x3E; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpmaxuw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74307 :VPMAXUW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0x3E; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpmaxuw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PMAXUB/PMAXUW 4-311 PAGE 1431 LINE 74310 define pcodeop vpmaxuw_avx512bw ; :VPMAXUW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x3E; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpmaxuw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PMAXUD/PMAXUQ 4-316 PAGE 1436 LINE 74540 define pcodeop vpmaxud_avx512vl ; :VPMAXUD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x3F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpmaxud_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMAXUD/PMAXUQ 4-316 PAGE 1436 LINE 74543 :VPMAXUD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x3F; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpmaxud_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PMAXUD/PMAXUQ 4-316 PAGE 1436 LINE 74546 define pcodeop vpmaxud_avx512f ; :VPMAXUD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x3F; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpmaxud_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PMAXUD/PMAXUQ 4-316 PAGE 1436 LINE 74549 define pcodeop vpmaxuq_avx512vl ; :VPMAXUQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x3F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpmaxuq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMAXUD/PMAXUQ 4-316 PAGE 1436 LINE 74552 :VPMAXUQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x3F; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpmaxuq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMAXUD/PMAXUQ 4-316 PAGE 1436 LINE 74555 define pcodeop vpmaxuq_avx512f ; :VPMAXUQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x3F; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpmaxuq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74748 define pcodeop vpminsb_avx512vl ; :VPMINSB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0x38; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpminsb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74751 :VPMINSB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0x38; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpminsb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74754 define pcodeop vpminsb_avx512bw ; :VPMINSB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x38; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpminsb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74757 define pcodeop vpminsw_avx512vl ; :VPMINSW XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xEA; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpminsw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74760 :VPMINSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xEA; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpminsw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PMINSB/PMINSW 4-320 PAGE 1440 LINE 74763 define pcodeop vpminsw_avx512bw ; :VPMINSW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xEA; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpminsw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PMINSD/PMINSQ 4-325 PAGE 1445 LINE 74995 define pcodeop vpminsd_avx512vl ; :VPMINSD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x39; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpminsd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMINSD/PMINSQ 4-325 PAGE 1445 LINE 74998 :VPMINSD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x39; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpminsd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PMINSD/PMINSQ 4-325 PAGE 1445 LINE 75001 define pcodeop vpminsd_avx512f ; :VPMINSD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x39; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpminsd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PMINSD/PMINSQ 4-325 PAGE 1445 LINE 75004 define pcodeop vpminsq_avx512vl ; :VPMINSQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x39; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpminsq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMINSD/PMINSQ 4-325 PAGE 1445 LINE 75007 :VPMINSQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x39; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpminsq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMINSD/PMINSQ 4-325 PAGE 1445 LINE 75010 define pcodeop vpminsq_avx512f ; :VPMINSQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x39; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpminsq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75207 define pcodeop vpminub_avx512vl ; :VPMINUB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & evexV5_XmmReg; byte=0xDA; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpminub_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75210 :VPMINUB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & evexV5_YmmReg; byte=0xDA; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpminub_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75213 define pcodeop vpminub_avx512bw ; :VPMINUB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & evexV5_ZmmReg; byte=0xDA; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpminub_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75216 define pcodeop vpminuw_avx512vl ; :VPMINUW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & evexV5_XmmReg; byte=0x3A; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpminuw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75219 :VPMINUW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & evexV5_YmmReg; byte=0x3A; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpminuw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PMINUB/PMINUW 4-329 PAGE 1449 LINE 75222 define pcodeop vpminuw_avx512bw ; :VPMINUW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & evexV5_ZmmReg; byte=0x3A; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpminuw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PMINUD/PMINUQ 4-334 PAGE 1454 LINE 75451 define pcodeop vpminud_avx512vl ; :VPMINUD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x3B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpminud_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMINUD/PMINUQ 4-334 PAGE 1454 LINE 75454 :VPMINUD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x3B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpminud_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PMINUD/PMINUQ 4-334 PAGE 1454 LINE 75457 define pcodeop vpminud_avx512f ; :VPMINUD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x3B; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpminud_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PMINUD/PMINUQ 4-334 PAGE 1454 LINE 75460 define pcodeop vpminuq_avx512vl ; :VPMINUQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x3B; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpminuq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMINUD/PMINUQ 4-334 PAGE 1454 LINE 75463 :VPMINUQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x3B; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpminuq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMINUD/PMINUQ 4-334 PAGE 1454 LINE 75466 define pcodeop vpminuq_avx512f ; :VPMINUQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x3B; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpminuq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PMOVSX 4-340 PAGE 1460 LINE 75796 define pcodeop vpmovsxbw_avx512vl ; :VPMOVSXBW XmmReg1^XmmOpMask16, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x20; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { XmmResult = vpmovsxbw_avx512vl( XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMOVSX 4-340 PAGE 1460 LINE 75799 :VPMOVSXBW YmmReg1^YmmOpMask16, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x20; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { YmmResult = vpmovsxbw_avx512vl( XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PMOVSX 4-340 PAGE 1460 LINE 75802 define pcodeop vpmovsxbw_avx512bw ; :VPMOVSXBW ZmmReg1^ZmmOpMask16, YmmReg2_m256 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x20; (ZmmReg1 & ZmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { ZmmResult = vpmovsxbw_avx512bw( YmmReg2_m256 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PMOVSX 4-340 PAGE 1460 LINE 75805 define pcodeop vpmovsxbd_avx512vl ; :VPMOVSXBD XmmReg1^XmmOpMask32, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x21; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { XmmResult = vpmovsxbd_avx512vl( XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMOVSX 4-341 PAGE 1461 LINE 75819 :VPMOVSXBD YmmReg1^YmmOpMask32, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x21; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { YmmResult = vpmovsxbd_avx512vl( XmmReg2_m64 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PMOVSX 4-341 PAGE 1461 LINE 75822 define pcodeop vpmovsxbd_avx512f ; :VPMOVSXBD ZmmReg1^ZmmOpMask32, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x21; (ZmmReg1 & ZmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { ZmmResult = vpmovsxbd_avx512f( XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PMOVSX 4-341 PAGE 1461 LINE 75825 define pcodeop vpmovsxbq_avx512vl ; :VPMOVSXBQ XmmReg1^XmmOpMask64, XmmReg2_m16 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x22; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m16 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { XmmResult = vpmovsxbq_avx512vl( XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMOVSX 4-341 PAGE 1461 LINE 75828 :VPMOVSXBQ YmmReg1^YmmOpMask64, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x22; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { YmmResult = vpmovsxbq_avx512vl( XmmReg2_m32 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMOVSX 4-341 PAGE 1461 LINE 75831 define pcodeop vpmovsxbq_avx512f ; :VPMOVSXBQ ZmmReg1^ZmmOpMask64, XmmReg2_m64 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x22; (ZmmReg1 & ZmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { ZmmResult = vpmovsxbq_avx512f( XmmReg2_m64 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PMOVSX 4-341 PAGE 1461 LINE 75834 define pcodeop vpmovsxwd_avx512vl ; :VPMOVSXWD XmmReg1^XmmOpMask32, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x23; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { XmmResult = vpmovsxwd_avx512vl( XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMOVSX 4-341 PAGE 1461 LINE 75837 :VPMOVSXWD YmmReg1^YmmOpMask32, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x23; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { YmmResult = vpmovsxwd_avx512vl( XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PMOVSX 4-341 PAGE 1461 LINE 75840 define pcodeop vpmovsxwd_avx512f ; :VPMOVSXWD ZmmReg1^ZmmOpMask32, YmmReg2_m256 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x23; (ZmmReg1 & ZmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { ZmmResult = vpmovsxwd_avx512f( YmmReg2_m256 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PMOVSX 4-341 PAGE 1461 LINE 75843 define pcodeop vpmovsxwq_avx512vl ; :VPMOVSXWQ XmmReg1^XmmOpMask64, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x24; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { XmmResult = vpmovsxwq_avx512vl( XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMOVSX 4-341 PAGE 1461 LINE 75846 :VPMOVSXWQ YmmReg1^YmmOpMask64, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x24; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { YmmResult = vpmovsxwq_avx512vl( XmmReg2_m64 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMOVSX 4-341 PAGE 1461 LINE 75849 define pcodeop vpmovsxwq_avx512f ; :VPMOVSXWQ ZmmReg1^ZmmOpMask64, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x24; (ZmmReg1 & ZmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { ZmmResult = vpmovsxwq_avx512f( XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PMOVSX 4-341 PAGE 1461 LINE 75852 define pcodeop vpmovsxdq_avx512vl ; :VPMOVSXDQ XmmReg1^XmmOpMask64, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x25; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { XmmResult = vpmovsxdq_avx512vl( XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMOVSX 4-341 PAGE 1461 LINE 75855 :VPMOVSXDQ YmmReg1^YmmOpMask64, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x25; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { YmmResult = vpmovsxdq_avx512vl( XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMOVSX 4-341 PAGE 1461 LINE 75858 define pcodeop vpmovsxdq_avx512f ; :VPMOVSXDQ ZmmReg1^ZmmOpMask64, YmmReg2_m256 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x25; (ZmmReg1 & ZmmOpMask64) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { ZmmResult = vpmovsxdq_avx512f( YmmReg2_m256 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PMOVZX 4-351 PAGE 1471 LINE 76329 define pcodeop vpmovzxbw_avx512vl ; :VPMOVZXBW XmmReg1^XmmOpMask16, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x30; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { XmmResult = vpmovzxbw_avx512vl( XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMOVZX 4-351 PAGE 1471 LINE 76332 :VPMOVZXBW YmmReg1^YmmOpMask16, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x30; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { YmmResult = vpmovzxbw_avx512vl( XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PMOVZX 4-351 PAGE 1471 LINE 76335 define pcodeop vpmovzxbw_avx512bw ; :VPMOVZXBW ZmmReg1^ZmmOpMask16, YmmReg2_m256 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x30; (ZmmReg1 & ZmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { ZmmResult = vpmovzxbw_avx512bw( YmmReg2_m256 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PMOVZX 4-351 PAGE 1471 LINE 76338 define pcodeop vpmovzxbd_avx512vl ; :VPMOVZXBD XmmReg1^XmmOpMask32, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x31; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { XmmResult = vpmovzxbd_avx512vl( XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMOVZX 4-351 PAGE 1471 LINE 76341 :VPMOVZXBD YmmReg1^YmmOpMask32, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x31; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { YmmResult = vpmovzxbd_avx512vl( XmmReg2_m64 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PMOVZX 4-351 PAGE 1471 LINE 76344 define pcodeop vpmovzxbd_avx512f ; :VPMOVZXBD ZmmReg1^ZmmOpMask32, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x31; (ZmmReg1 & ZmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { ZmmResult = vpmovzxbd_avx512f( XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PMOVZX 4-351 PAGE 1471 LINE 76347 define pcodeop vpmovzxbq_avx512vl ; :VPMOVZXBQ XmmReg1^XmmOpMask64, XmmReg2_m16 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x32; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m16 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { XmmResult = vpmovzxbq_avx512vl( XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMOVZX 4-351 PAGE 1471 LINE 76350 :VPMOVZXBQ YmmReg1^YmmOpMask64, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x32; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { YmmResult = vpmovzxbq_avx512vl( XmmReg2_m32 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMOVZX 4-351 PAGE 1471 LINE 76353 define pcodeop vpmovzxbq_avx512f ; :VPMOVZXBQ ZmmReg1^ZmmOpMask64, XmmReg2_m64 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x32; (ZmmReg1 & ZmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { ZmmResult = vpmovzxbq_avx512f( XmmReg2_m64 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PMOVZX 4-351 PAGE 1471 LINE 76356 define pcodeop vpmovzxwd_avx512vl ; :VPMOVZXWD XmmReg1^XmmOpMask32, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x33; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { XmmResult = vpmovzxwd_avx512vl( XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMOVZX 4-351 PAGE 1471 LINE 76359 :VPMOVZXWD YmmReg1^YmmOpMask32, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x33; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { YmmResult = vpmovzxwd_avx512vl( XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PMOVZX 4-351 PAGE 1471 LINE 76362 define pcodeop vpmovzxwd_avx512f ; :VPMOVZXWD ZmmReg1^ZmmOpMask32, YmmReg2_m256 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x33; (ZmmReg1 & ZmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { ZmmResult = vpmovzxwd_avx512f( YmmReg2_m256 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PMOVZX 4-351 PAGE 1471 LINE 76365 define pcodeop vpmovzxwq_avx512vl ; :VPMOVZXWQ XmmReg1^XmmOpMask64, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x34; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { XmmResult = vpmovzxwq_avx512vl( XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMOVZX 4-351 PAGE 1471 LINE 76368 :VPMOVZXWQ YmmReg1^YmmOpMask64, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x34; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { YmmResult = vpmovzxwq_avx512vl( XmmReg2_m64 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMOVZX 4-351 PAGE 1471 LINE 76371 define pcodeop vpmovzxwq_avx512f ; :VPMOVZXWQ ZmmReg1^ZmmOpMask64, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) ; byte=0x34; (ZmmReg1 & ZmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { ZmmResult = vpmovzxwq_avx512f( XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PMOVZX 4-352 PAGE 1472 LINE 76386 define pcodeop vpmovzxdq_avx512vl ; :VPMOVZXDQ XmmReg1^XmmOpMask64, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x35; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { XmmResult = vpmovzxdq_avx512vl( XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMOVZX 4-352 PAGE 1472 LINE 76389 :VPMOVZXDQ YmmReg1^YmmOpMask64, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x35; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { YmmResult = vpmovzxdq_avx512vl( XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMOVZX 4-352 PAGE 1472 LINE 76392 define pcodeop vpmovzxdq_avx512f ; :VPMOVZXDQ ZmmReg1^ZmmOpMask64, YmmReg2_m256 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x35; (ZmmReg1 & ZmmOpMask64) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM,QVM,OVM) { ZmmResult = vpmovzxdq_avx512f( YmmReg2_m256 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PMULDQ 4-359 PAGE 1479 LINE 76794 define pcodeop vpmuldq_avx512vl ; :VPMULDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x28; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpmuldq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMULDQ 4-359 PAGE 1479 LINE 76798 :VPMULDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x28; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpmuldq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMULDQ 4-359 PAGE 1479 LINE 76802 define pcodeop vpmuldq_avx512f ; :VPMULDQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x28; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpmuldq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PMULHRSW 4-362 PAGE 1482 LINE 76934 define pcodeop vpmulhrsw_avx512vl ; :VPMULHRSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0x0B; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpmulhrsw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMULHRSW 4-362 PAGE 1482 LINE 76937 :VPMULHRSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0x0B; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpmulhrsw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PMULHRSW 4-362 PAGE 1482 LINE 76940 define pcodeop vpmulhrsw_avx512bw ; :VPMULHRSW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x0B; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpmulhrsw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PMULHUW 4-366 PAGE 1486 LINE 77147 define pcodeop vpmulhuw_avx512vl ; :VPMULHUW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE4; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpmulhuw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMULHUW 4-366 PAGE 1486 LINE 77151 :VPMULHUW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE4; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpmulhuw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PMULHUW 4-366 PAGE 1486 LINE 77155 define pcodeop vpmulhuw_avx512bw ; :VPMULHUW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xE4; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpmulhuw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PMULHW 4-370 PAGE 1490 LINE 77376 define pcodeop vpmulhw_avx512vl ; :VPMULHW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE5; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpmulhw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMULHW 4-370 PAGE 1490 LINE 77379 :VPMULHW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE5; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpmulhw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PMULHW 4-370 PAGE 1490 LINE 77382 define pcodeop vpmulhw_avx512bw ; :VPMULHW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xE5; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpmulhw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PMULLD/PMULLQ 4-374 PAGE 1494 LINE 77582 define pcodeop vpmulld_avx512vl ; :VPMULLD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x40; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpmulld_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PMULLD/PMULLQ 4-374 PAGE 1494 LINE 77585 :VPMULLD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x40; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpmulld_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PMULLD/PMULLQ 4-374 PAGE 1494 LINE 77588 define pcodeop vpmulld_avx512f ; :VPMULLD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x40; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpmulld_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PMULLD/PMULLQ 4-374 PAGE 1494 LINE 77591 define pcodeop vpmullq_avx512vl ; :VPMULLQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x40; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpmullq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMULLD/PMULLQ 4-374 PAGE 1494 LINE 77594 :VPMULLQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x40; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpmullq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMULLD/PMULLQ 4-374 PAGE 1494 LINE 77597 define pcodeop vpmullq_avx512dq ; :VPMULLQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x40; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpmullq_avx512dq( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PMULLW 4-378 PAGE 1498 LINE 77781 define pcodeop vpmullw_avx512vl ; :VPMULLW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xD5; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpmullw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PMULLW 4-378 PAGE 1498 LINE 77784 :VPMULLW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xD5; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpmullw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PMULLW 4-378 PAGE 1498 LINE 77787 define pcodeop vpmullw_avx512bw ; :VPMULLW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xD5; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpmullw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PMULUDQ 4-382 PAGE 1502 LINE 77977 define pcodeop vpmuludq_avx512vl ; :VPMULUDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xF4; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpmuludq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PMULUDQ 4-382 PAGE 1502 LINE 77981 :VPMULUDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xF4; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpmuludq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PMULUDQ 4-382 PAGE 1502 LINE 77985 define pcodeop vpmuludq_avx512f ; :VPMULUDQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0xF4; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpmuludq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # POR 4-399 PAGE 1519 LINE 78854 define pcodeop vpord_avx512vl ; :VPORD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xEB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpord_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # POR 4-399 PAGE 1519 LINE 78857 :VPORD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xEB; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpord_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # POR 4-399 PAGE 1519 LINE 78860 define pcodeop vpord_avx512f ; :VPORD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0xEB; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpord_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # POR 4-399 PAGE 1519 LINE 78863 define pcodeop vporq_avx512vl ; :VPORQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xEB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vporq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # POR 4-399 PAGE 1519 LINE 78866 :VPORQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xEB; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vporq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # POR 4-399 PAGE 1519 LINE 78869 define pcodeop vporq_avx512f ; :VPORQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0xEB; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vporq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PSADBW 4-408 PAGE 1528 LINE 79250 define pcodeop vpsadbw_avx512vl ; :VPSADBW XmmReg1, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xF6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp:16 = vpsadbw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # PSADBW 4-408 PAGE 1528 LINE 79255 :VPSADBW YmmReg1, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xF6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpsadbw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; ZmmReg1 = zext(YmmResult); } # PSADBW 4-408 PAGE 1528 LINE 79260 define pcodeop vpsadbw_avx512bw ; :VPSADBW ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xF6; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpsadbw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; ZmmReg1 = ZmmResult; } # PSHUFB 4-412 PAGE 1532 LINE 79466 define pcodeop vpshufb_avx512vl ; :VPSHUFB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0x00; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpshufb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PSHUFB 4-412 PAGE 1532 LINE 79468 :VPSHUFB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0x00; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpshufb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PSHUFB 4-412 PAGE 1532 LINE 79470 define pcodeop vpshufb_avx512bw ; :VPSHUFB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x00; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpshufb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PSHUFD 4-416 PAGE 1536 LINE 79656 define pcodeop vpshufd_avx512vl ; :VPSHUFD XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x70; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpshufd_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PSHUFD 4-416 PAGE 1536 LINE 79659 :VPSHUFD YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x70; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpshufd_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PSHUFD 4-416 PAGE 1536 LINE 79662 define pcodeop vpshufd_avx512f ; :VPSHUFD ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x70; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpshufd_avx512f( ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PSHUFHW 4-420 PAGE 1540 LINE 79863 define pcodeop vpshufhw_avx512vl ; :VPSHUFHW XmmReg1^XmmOpMask16, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) ; byte=0x70; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpshufhw_avx512vl( XmmReg2_m128, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSHUFHW 4-420 PAGE 1540 LINE 79866 :VPSHUFHW YmmReg1^YmmOpMask16, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) ; byte=0x70; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpshufhw_avx512vl( YmmReg2_m256, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PSHUFHW 4-420 PAGE 1540 LINE 79869 define pcodeop vpshufhw_avx512bw ; :VPSHUFHW ZmmReg1^ZmmOpMask16, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_WIG) ; byte=0x70; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpshufhw_avx512bw( ZmmReg2_m512, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PSHUFLW 4-423 PAGE 1543 LINE 80038 define pcodeop vpshuflw_avx512vl ; :VPSHUFLW XmmReg1^XmmOpMask16, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) ; byte=0x70; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpshuflw_avx512vl( XmmReg2_m128, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSHUFLW 4-423 PAGE 1543 LINE 80041 :VPSHUFLW YmmReg1^YmmOpMask16, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) ; byte=0x70; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpshuflw_avx512vl( YmmReg2_m256, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PSHUFLW 4-423 PAGE 1543 LINE 80044 define pcodeop vpshuflw_avx512bw ; :VPSHUFLW ZmmReg1^ZmmOpMask16, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) ; byte=0x70; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpshuflw_avx512bw( ZmmReg2_m512, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PSLLDQ 4-431 PAGE 1551 LINE 80491 define pcodeop vpslldq_avx512vl ; :VPSLLDQ evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_XmmReg & evexV5_ZmmReg); byte=0x73; reg_opcode=7 ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { local tmp:64 = vpslldq_avx512vl( XmmReg2_m128, imm8:1 ); evexV5_ZmmReg = zext(tmp); } # PSLLDQ 4-431 PAGE 1551 LINE 80493 :VPSLLDQ evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_YmmReg & evexV5_ZmmReg); byte=0x73; reg_opcode=7 ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { local tmp:64 = vpslldq_avx512vl( YmmReg2_m256, imm8:1 ); evexV5_ZmmReg = zext(tmp); } # PSLLDQ 4-431 PAGE 1551 LINE 80495 define pcodeop vpslldq_avx512bw ; :VPSLLDQ evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x73; reg_opcode=7 ... & ZmmReg2_m512; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { evexV5_ZmmReg = vpslldq_avx512bw( ZmmReg2_m512, imm8:1 ); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80667 define pcodeop vpsllw_avx512vl ; :VPSLLW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xF1; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { XmmResult = vpsllw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80670 :VPSLLW YmmReg1^YmmOpMask16, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xF1; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { YmmResult = vpsllw_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80673 define pcodeop vpsllw_avx512bw ; :VPSLLW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xF1; (ZmmReg1 & ZmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { ZmmResult = vpsllw_avx512bw( evexV5_ZmmReg, XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80676 :VPSLLW evexV5_XmmReg^XmmOpMask16, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask16; byte=0x71; reg_opcode=6 ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { XmmResult = vpsllw_avx512vl( XmmReg2_m128, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask16; evexV5_ZmmReg = zext(XmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80678 :VPSLLW evexV5_YmmReg^YmmOpMask16, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask16; byte=0x71; reg_opcode=6 ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { YmmResult = vpsllw_avx512vl( YmmReg2_m256, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask16; evexV5_ZmmReg = zext(YmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80680 :VPSLLW evexV5_ZmmReg^ZmmOpMask16, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg & ZmmOpMask16; byte=0x71; reg_opcode=6 ... & ZmmReg2_m512; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { ZmmResult = vpsllw_avx512bw( ZmmReg2_m512, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask16; evexV5_ZmmReg = ZmmResult; } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80682 define pcodeop vpslld_avx512vl ; :VPSLLD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xF2; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { XmmResult = vpslld_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80685 :VPSLLD YmmReg1^YmmOpMask32, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xF2; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { YmmResult = vpslld_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80688 define pcodeop vpslld_avx512f ; :VPSLLD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0xF2; (ZmmReg1 & ZmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { ZmmResult = vpslld_avx512f( evexV5_ZmmReg, XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80691 :VPSLLD evexV5_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=6 ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vpslld_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask32; evexV5_ZmmReg = zext(XmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80694 :VPSLLD evexV5_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=6 ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vpslld_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask32; evexV5_ZmmReg = zext(YmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80697 :VPSLLD evexV5_ZmmReg^ZmmOpMask32, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg & ZmmOpMask32; byte=0x72; reg_opcode=6 ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { ZmmResult = vpslld_avx512f( ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask32; evexV5_ZmmReg = ZmmResult; } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80700 define pcodeop vpsllq_avx512vl ; :VPSLLQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xF3; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { XmmResult = vpsllq_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80703 :VPSLLQ YmmReg1^YmmOpMask64, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xF3; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { YmmResult = vpsllq_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PSLLW/PSLLD/PSLLQ 4-434 PAGE 1554 LINE 80706 define pcodeop vpsllq_avx512f ; :VPSLLQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0xF3; (ZmmReg1 & ZmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { ZmmResult = vpsllq_avx512f( evexV5_ZmmReg, XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PSLLW/PSLLD/PSLLQ 4-435 PAGE 1555 LINE 80721 :VPSLLQ evexV5_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask64; byte=0x73; reg_opcode=6 ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vpsllq_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask64; evexV5_ZmmReg = zext(XmmResult); } # PSLLW/PSLLD/PSLLQ 4-435 PAGE 1555 LINE 80724 :VPSLLQ evexV5_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask64; byte=0x73; reg_opcode=6 ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vpsllq_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask64; evexV5_ZmmReg = zext(YmmResult); } # PSLLW/PSLLD/PSLLQ 4-435 PAGE 1555 LINE 80727 :VPSLLQ evexV5_ZmmReg^ZmmOpMask64, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg & ZmmOpMask64; byte=0x73; reg_opcode=6 ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { ZmmResult = vpsllq_avx512f( ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask64; evexV5_ZmmReg = ZmmResult; } # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81329 define pcodeop vpsraw_avx512vl ; :VPSRAW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE1; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { XmmResult = vpsraw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81332 :VPSRAW YmmReg1^YmmOpMask16, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE1; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { YmmResult = vpsraw_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PSRAW/PSRAD/PSRAQ 4-445 PAGE 1565 LINE 81335 define pcodeop vpsraw_avx512bw ; :VPSRAW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xE1; (ZmmReg1 & ZmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { ZmmResult = vpsraw_avx512bw( evexV5_ZmmReg, XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81350 :VPSRAW evexV5_XmmReg^XmmOpMask16, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask16; byte=0x71; reg_opcode=4 ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { XmmResult = vpsraw_avx512vl( XmmReg2_m128, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask16; evexV5_ZmmReg = zext(XmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81352 :VPSRAW evexV5_YmmReg^YmmOpMask16, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask16; byte=0x71; reg_opcode=4 ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { YmmResult = vpsraw_avx512vl( YmmReg2_m256, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask16; evexV5_ZmmReg = zext(YmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81354 :VPSRAW evexV5_ZmmReg^ZmmOpMask16, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg & ZmmOpMask16; byte=0x71; reg_opcode=4 ... & ZmmReg2_m512; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVMI) { ZmmResult = vpsraw_avx512bw( ZmmReg2_m512, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask16; evexV5_ZmmReg = ZmmResult; } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81356 define pcodeop vpsrad_avx512vl ; :VPSRAD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xE2; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { XmmResult = vpsrad_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81359 :VPSRAD YmmReg1^YmmOpMask32, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xE2; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { YmmResult = vpsrad_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81362 define pcodeop vpsrad_avx512f ; :VPSRAD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0xE2; (ZmmReg1 & ZmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { ZmmResult = vpsrad_avx512f( evexV5_ZmmReg, XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81365 :VPSRAD evexV5_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=4 ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vpsrad_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask32; evexV5_ZmmReg = zext(XmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81368 :VPSRAD evexV5_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=4 ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vpsrad_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask32; evexV5_ZmmReg = zext(YmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81371 :VPSRAD evexV5_ZmmReg^ZmmOpMask32, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg & ZmmOpMask32; byte=0x72; reg_opcode=4 ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { ZmmResult = vpsrad_avx512f( ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask32; evexV5_ZmmReg = ZmmResult; } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81374 define pcodeop vpsraq_avx512vl ; :VPSRAQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xE2; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { XmmResult = vpsraq_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81377 :VPSRAQ YmmReg1^YmmOpMask64, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xE2; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { YmmResult = vpsraq_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81380 define pcodeop vpsraq_avx512f ; :VPSRAQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0xE2; (ZmmReg1 & ZmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { ZmmResult = vpsraq_avx512f( evexV5_ZmmReg, XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81383 :VPSRAQ evexV5_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask64; byte=0x72; reg_opcode=4 ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vpsraq_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask64; evexV5_ZmmReg = zext(XmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81386 :VPSRAQ evexV5_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask64; byte=0x72; reg_opcode=4 ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vpsraq_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask64; evexV5_ZmmReg = zext(YmmResult); } # PSRAW/PSRAD/PSRAQ 4-446 PAGE 1566 LINE 81389 :VPSRAQ evexV5_ZmmReg^ZmmOpMask64, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg & ZmmOpMask64; byte=0x72; reg_opcode=4 ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { ZmmResult = vpsraq_avx512f( ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask64; evexV5_ZmmReg = ZmmResult; } # PSRLDQ 4-455 PAGE 1575 LINE 81879 define pcodeop vpsrldq_avx512vl ; :VPSRLDQ evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_XmmReg & evexV5_ZmmReg); byte=0x73; reg_opcode=3 ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp:64 = vpsrldq_avx512vl( XmmReg2_m128, imm8:1 ); evexV5_ZmmReg = zext(tmp); } # PSRLDQ 4-455 PAGE 1575 LINE 81881 :VPSRLDQ evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_YmmReg & evexV5_ZmmReg); byte=0x73; reg_opcode=3 ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp:64 = vpsrldq_avx512vl( YmmReg2_m256, imm8:1 ); evexV5_ZmmReg = zext(tmp); } # PSRLDQ 4-455 PAGE 1575 LINE 81883 define pcodeop vpsrldq_avx512bw ; :VPSRLDQ evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x73; reg_opcode=3 ... & ZmmReg2_m512; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { evexV5_ZmmReg = vpsrldq_avx512bw( ZmmReg2_m512, imm8:1 ); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82059 define pcodeop vpsrlw_avx512vl ; :VPSRLW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xD1; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { XmmResult = vpsrlw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82062 :VPSRLW YmmReg1^YmmOpMask16, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xD1; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { YmmResult = vpsrlw_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82065 define pcodeop vpsrlw_avx512bw ; :VPSRLW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xD1; (ZmmReg1 & ZmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { ZmmResult = vpsrlw_avx512bw( evexV5_ZmmReg, XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82068 :VPSRLW evexV5_XmmReg^XmmOpMask16, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask16; byte=0x71; reg_opcode=2 ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpsrlw_avx512vl( XmmReg2_m128, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask16; evexV5_ZmmReg = zext(XmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82070 :VPSRLW evexV5_YmmReg^YmmOpMask16, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask16; byte=0x71; reg_opcode=2 ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpsrlw_avx512vl( YmmReg2_m256, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask16; evexV5_ZmmReg = zext(YmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82072 :VPSRLW evexV5_ZmmReg^ZmmOpMask16, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg & ZmmOpMask16; byte=0x71; reg_opcode=2 ... & ZmmReg2_m512; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpsrlw_avx512bw( ZmmReg2_m512, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask16; evexV5_ZmmReg = ZmmResult; } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82074 define pcodeop vpsrld_avx512vl ; :VPSRLD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xD2; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { XmmResult = vpsrld_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82077 :VPSRLD YmmReg1^YmmOpMask32, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xD2; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { YmmResult = vpsrld_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82080 define pcodeop vpsrld_avx512f ; :VPSRLD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0xD2; (ZmmReg1 & ZmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { ZmmResult = vpsrld_avx512f( evexV5_ZmmReg, XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82084 :VPSRLD evexV5_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=2 ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vpsrld_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask32; evexV5_ZmmReg = zext(XmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82088 :VPSRLD evexV5_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=2 ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vpsrld_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask32; evexV5_ZmmReg = zext(YmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82091 :VPSRLD evexV5_ZmmReg^ZmmOpMask32, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg & ZmmOpMask32; byte=0x72; reg_opcode=2 ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { ZmmResult = vpsrld_avx512f( ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask32; evexV5_ZmmReg = ZmmResult; } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82094 define pcodeop vpsrlq_avx512vl ; :VPSRLQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xD3; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { XmmResult = vpsrlq_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82097 :VPSRLQ YmmReg1^YmmOpMask64, evexV5_YmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xD3; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { YmmResult = vpsrlq_avx512vl( evexV5_YmmReg, XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PSRLW/PSRLD/PSRLQ 4-458 PAGE 1578 LINE 82100 define pcodeop vpsrlq_avx512f ; :VPSRLQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0xD3; (ZmmReg1 & ZmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 12; ] # (TupleType M128) { ZmmResult = vpsrlq_avx512f( evexV5_ZmmReg, XmmReg2_m128 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PSRLW/PSRLD/PSRLQ 4-459 PAGE 1579 LINE 82115 :VPSRLQ evexV5_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask64; byte=0x73; reg_opcode=2 ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vpsrlq_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask64; evexV5_ZmmReg = zext(XmmResult); } # PSRLW/PSRLD/PSRLQ 4-459 PAGE 1579 LINE 82119 :VPSRLQ evexV5_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask64; byte=0x73; reg_opcode=2 ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vpsrlq_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask64; evexV5_ZmmReg = zext(YmmResult); } # PSRLW/PSRLD/PSRLQ 4-459 PAGE 1579 LINE 82122 :VPSRLQ evexV5_ZmmReg^ZmmOpMask64, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg & ZmmOpMask64; byte=0x73; reg_opcode=2 ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { ZmmResult = vpsrlq_avx512f( ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask64; evexV5_ZmmReg = ZmmResult; } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82702 define pcodeop vpsubb_avx512vl ; :VPSUBB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xF8; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpsubb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82705 :VPSUBB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xF8; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpsubb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82708 define pcodeop vpsubb_avx512bw ; :VPSUBB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xF8; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpsubb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82711 define pcodeop vpsubw_avx512vl ; :VPSUBW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xF9; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpsubw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82714 :VPSUBW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xF9; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpsubw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PSUBB/PSUBW/PSUBD 4-469 PAGE 1589 LINE 82717 define pcodeop vpsubw_avx512bw ; :VPSUBW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xF9; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpsubw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PSUBB/PSUBW/PSUBD 4-470 PAGE 1590 LINE 82733 define pcodeop vpsubd_avx512vl ; :VPSUBD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xFA; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpsubd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PSUBB/PSUBW/PSUBD 4-470 PAGE 1590 LINE 82736 :VPSUBD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xFA; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpsubd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PSUBB/PSUBW/PSUBD 4-470 PAGE 1590 LINE 82743 define pcodeop vpsubd_avx512f ; :VPSUBD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0xFA; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpsubd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PSUBQ 4-476 PAGE 1596 LINE 83111 define pcodeop vpsubq_avx512vl ; :VPSUBQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xFB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpsubq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PSUBQ 4-476 PAGE 1596 LINE 83114 :VPSUBQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xFB; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpsubq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PSUBQ 4-476 PAGE 1596 LINE 83117 define pcodeop vpsubq_avx512f ; :VPSUBQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0xFB; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpsubq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83270 define pcodeop vpsubsb_avx512vl ; :VPSUBSB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE8; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpsubsb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83274 :VPSUBSB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE8; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpsubsb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83278 define pcodeop vpsubsb_avx512bw ; :VPSUBSB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xE8; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpsubsb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83282 define pcodeop vpsubsw_avx512vl ; :VPSUBSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xE9; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpsubsw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSUBSB/PSUBSW 4-479 PAGE 1599 LINE 83286 :VPSUBSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xE9; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpsubsw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PSUBSB/PSUBSW 4-480 PAGE 1600 LINE 83302 define pcodeop psubsw_avx512bw ; :PSUBSW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(VEX_NDS) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xE9; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 { ZmmReg1 = psubsw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83510 define pcodeop vpsubusb_avx512vl ; :VPSUBUSB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xD8; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpsubusb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83514 :VPSUBUSB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xD8; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpsubusb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83518 define pcodeop vpsubusb_avx512bw ; :VPSUBUSB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xD8; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpsubusb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83522 define pcodeop vpsubusw_avx512vl ; :VPSUBUSW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0xD9; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpsubusw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PSUBUSB/PSUBUSW 4-483 PAGE 1603 LINE 83526 :VPSUBUSW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0xD9; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpsubusw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PSUBUSB/PSUBUSW 4-484 PAGE 1604 LINE 83543 define pcodeop vpsubusw_avx512bw ; :VPSUBUSW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xD9; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpsubusw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83948 define pcodeop vpunpckhbw_avx512vl ; :VPUNPCKHBW XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0x68; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpunpckhbw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83952 define pcodeop vpunpckhwd_avx512vl ; :VPUNPCKHWD XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0x69; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpunpckhwd_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83955 define pcodeop vpunpckhdq_avx512vl ; :VPUNPCKHDQ XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x6A; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpunpckhdq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-491 PAGE 1611 LINE 83958 define pcodeop vpunpckhqdq_avx512vl ; :VPUNPCKHQDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x6D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpunpckhqdq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-492 PAGE 1612 LINE 83974 :VPUNPCKHBW YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0x68; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpunpckhbw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-492 PAGE 1612 LINE 83977 :VPUNPCKHWD YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0x69; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpunpckhwd_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-492 PAGE 1612 LINE 83980 :VPUNPCKHDQ YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x6A; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpunpckhdq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-492 PAGE 1612 LINE 83984 :VPUNPCKHQDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x6D; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpunpckhqdq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-492 PAGE 1612 LINE 83988 define pcodeop vpunpckhbw_avx512bw ; :VPUNPCKHBW ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x68; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpunpckhbw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-492 PAGE 1612 LINE 83991 define pcodeop vpunpckhwd_avx512bw ; :VPUNPCKHWD ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x69; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpunpckhwd_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-492 PAGE 1612 LINE 83994 define pcodeop vpunpckhdq_avx512f ; :VPUNPCKHDQ ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x6A; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpunpckhdq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PUNPCKHBW/PUNPCKHWD/PUNPCKHDQ/PUNPCKHQDQ 4-492 PAGE 1612 LINE 83997 define pcodeop vpunpckhqdq_avx512f ; :VPUNPCKHQDQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x6D; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpunpckhqdq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84553 define pcodeop vpunpcklbw_avx512vl ; :VPUNPCKLBW XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0x60; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpunpcklbw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84556 define pcodeop vpunpcklwd_avx512vl ; :VPUNPCKLWD XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_XmmReg; byte=0x61; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpunpcklwd_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84559 define pcodeop vpunpckldq_avx512vl ; :VPUNPCKLDQ XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x62; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpunpckldq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-501 PAGE 1621 LINE 84562 define pcodeop vpunpcklqdq_avx512vl ; :VPUNPCKLQDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x6C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpunpcklqdq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-502 PAGE 1622 LINE 84578 :VPUNPCKLBW YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0x60; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpunpcklbw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-502 PAGE 1622 LINE 84581 :VPUNPCKLWD YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_YmmReg; byte=0x61; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpunpcklwd_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-502 PAGE 1622 LINE 84584 :VPUNPCKLDQ YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x62; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpunpckldq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-502 PAGE 1622 LINE 84587 :VPUNPCKLQDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x6C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpunpcklqdq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-502 PAGE 1622 LINE 84590 define pcodeop vpunpcklbw_avx512bw ; :VPUNPCKLBW ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x60; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpunpcklbw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-502 PAGE 1622 LINE 84593 define pcodeop vpunpcklwd_avx512bw ; :VPUNPCKLWD ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x61; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpunpcklwd_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-502 PAGE 1622 LINE 84596 define pcodeop vpunpckldq_avx512f ; :VPUNPCKLDQ ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x62; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpunpckldq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PUNPCKLBW/PUNPCKLWD/PUNPCKLDQ/PUNPCKLQDQ 4-502 PAGE 1622 LINE 84599 define pcodeop vpunpcklqdq_avx512f ; :VPUNPCKLQDQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x6C; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpunpcklqdq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PXOR 4-518 PAGE 1638 LINE 85503 define pcodeop vpxord_avx512vl ; :VPXORD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xEF; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpxord_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PXOR 4-518 PAGE 1638 LINE 85505 :VPXORD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xEF; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpxord_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PXOR 4-518 PAGE 1638 LINE 85507 define pcodeop vpxord_avx512f ; :VPXORD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0xEF; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpxord_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PXOR 4-518 PAGE 1638 LINE 85514 define pcodeop vpxorq_avx512vl ; :VPXORQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xEF; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpxorq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PXOR 4-518 PAGE 1638 LINE 85521 :VPXORQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xEF; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpxorq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PXOR 4-518 PAGE 1638 LINE 85523 define pcodeop vpxorq_avx512f ; :VPXORQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0xEF; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpxorq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # SHUFPD 4-617 PAGE 1737 LINE 90231 define pcodeop vshufpd_avx512vl ; :VSHUFPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0xC6; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vshufpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # SHUFPD 4-617 PAGE 1737 LINE 90235 :VSHUFPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0xC6; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vshufpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # SHUFPD 4-617 PAGE 1737 LINE 90239 define pcodeop vshufpd_avx512f ; :VSHUFPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0xC6; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vshufpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # SHUFPS 4-622 PAGE 1742 LINE 90489 define pcodeop vshufps_avx512vl ; :VSHUFPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0xC6; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vshufps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # SHUFPS 4-622 PAGE 1742 LINE 90493 :VSHUFPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0xC6; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vshufps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # SHUFPS 4-622 PAGE 1742 LINE 90497 define pcodeop vshufps_avx512f ; :VSHUFPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0xC6; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vshufps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # SQRTPD 4-632 PAGE 1752 LINE 91007 define pcodeop vsqrtpd_avx512vl ; :VSQRTPD XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vsqrtpd_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # SQRTPD 4-632 PAGE 1752 LINE 91010 :VSQRTPD YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x51; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vsqrtpd_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # SQRTPD 4-632 PAGE 1752 LINE 91013 define pcodeop vsqrtpd_avx512f ; :VSQRTPD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x51; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vsqrtpd_avx512f( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # SQRTPS 4-635 PAGE 1755 LINE 91139 define pcodeop vsqrtps_avx512vl ; :VSQRTPS XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vsqrtps_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # SQRTPS 4-635 PAGE 1755 LINE 91142 :VSQRTPS YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x51; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vsqrtps_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # SQRTPS 4-635 PAGE 1755 LINE 91145 define pcodeop vsqrtps_avx512f ; :VSQRTPS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x51; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vsqrtps_avx512f( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # SQRTSD 4-638 PAGE 1758 LINE 91276 define pcodeop vsqrtsd_avx512f ; :VSQRTSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vsqrtsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # SQRTSS 4-640 PAGE 1760 LINE 91371 define pcodeop vsqrtss_avx512f ; :VSQRTSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vsqrtss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # SUBPD 4-656 PAGE 1776 LINE 92120 define pcodeop vsubpd_avx512vl ; :VSUBPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vsubpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # SUBPD 4-656 PAGE 1776 LINE 92123 :VSUBPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x5C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vsubpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # SUBPD 4-656 PAGE 1776 LINE 92126 define pcodeop vsubpd_avx512f ; :VSUBPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x5C; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vsubpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # SUBPS 4-659 PAGE 1779 LINE 92269 define pcodeop vsubps_avx512vl ; :VSUBPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vsubps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # SUBPS 4-659 PAGE 1779 LINE 92272 :VSUBPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x5C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vsubps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # SUBPS 4-659 PAGE 1779 LINE 92275 define pcodeop vsubps_avx512f ; :VSUBPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x5C; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vsubps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # SUBSD 4-662 PAGE 1782 LINE 92421 define pcodeop vsubsd_avx512f ; :VSUBSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vsubsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # SUBSS 4-664 PAGE 1784 LINE 92514 define pcodeop vsubss_avx512f ; :VSUBSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vsubss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # UCOMISD 4-683 PAGE 1803 LINE 93424 define pcodeop vucomisd_avx512f ; :VUCOMISD XmmReg1, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x2E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vucomisd_avx512f( XmmReg2_m64 ); ZmmReg1 = zext(tmp); # TODO set flags AF, CF, OF, PF, SF, ZF } # UCOMISS 4-685 PAGE 1805 LINE 93507 define pcodeop vucomiss_avx512f ; :VUCOMISS XmmReg1, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x2E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vucomiss_avx512f( XmmReg2_m32 ); ZmmReg1 = zext(tmp); # TODO set flags AF, CF, OF, PF, SF, ZF } # UNPCKHPD 4-688 PAGE 1808 LINE 93629 define pcodeop vunpckhpd_avx512vl ; :VUNPCKHPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vunpckhpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # UNPCKHPD 4-688 PAGE 1808 LINE 93632 :VUNPCKHPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vunpckhpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # UNPCKHPD 4-688 PAGE 1808 LINE 93635 define pcodeop vunpckhpd_avx512f ; :VUNPCKHPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x15; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vunpckhpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # UNPCKHPS 4-692 PAGE 1812 LINE 93813 define pcodeop vunpckhps_avx512vl ; :VUNPCKHPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vunpckhps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # UNPCKHPS 4-692 PAGE 1812 LINE 93817 :VUNPCKHPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vunpckhps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # UNPCKHPS 4-692 PAGE 1812 LINE 93821 define pcodeop vunpckhps_avx512f ; :VUNPCKHPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x15; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vunpckhps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # UNPCKLPD 4-696 PAGE 1816 LINE 94045 define pcodeop vunpcklpd_avx512vl ; :VUNPCKLPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vunpcklpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # UNPCKLPD 4-696 PAGE 1816 LINE 94048 :VUNPCKLPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vunpcklpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # UNPCKLPD 4-696 PAGE 1816 LINE 94051 define pcodeop vunpcklpd_avx512f ; :VUNPCKLPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x14; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vunpcklpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # UNPCKLPS 4-700 PAGE 1820 LINE 94231 define pcodeop vunpcklps_avx512vl ; :VUNPCKLPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vunpcklps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # UNPCKLPS 4-700 PAGE 1820 LINE 94234 :VUNPCKLPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vunpcklps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # UNPCKLPS 4-700 PAGE 1820 LINE 94237 define pcodeop vunpcklps_avx512f ; :VUNPCKLPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x14; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vunpcklps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94615 define pcodeop valignd_avx512vl ; :VALIGND XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x03; ((XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst); imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = valignd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94619 define pcodeop valignq_avx512vl ; :VALIGNQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x03; ((XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst); imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = valignq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94623 :VALIGND YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x03; ((YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst); imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = valignd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94627 :VALIGNQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x03; ((YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst); imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = valignq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94631 define pcodeop valignd_avx512f ; :VALIGND ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x03; ((ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst); imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = valignd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VALIGND/VALIGNQ 5-5 PAGE 1829 LINE 94635 define pcodeop valignq_avx512f ; :VALIGNQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x03; ((ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst); imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = valignq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VBLENDMPD/VBLENDMPS 5-9 PAGE 1833 LINE 94787 define pcodeop vblendmpd_avx512vl ; :VBLENDMPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x65; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vblendmpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VBLENDMPD/VBLENDMPS 5-9 PAGE 1833 LINE 94790 :VBLENDMPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x65; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vblendmpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VBLENDMPD/VBLENDMPS 5-9 PAGE 1833 LINE 94793 define pcodeop vblendmpd_avx512f ; :VBLENDMPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x65; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vblendmpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VBLENDMPD/VBLENDMPS 5-9 PAGE 1833 LINE 94796 define pcodeop vblendmps_avx512vl ; :VBLENDMPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x65; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vblendmps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VBLENDMPD/VBLENDMPS 5-9 PAGE 1833 LINE 94799 :VBLENDMPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x65; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vblendmps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VBLENDMPD/VBLENDMPS 5-9 PAGE 1833 LINE 94802 define pcodeop vblendmps_avx512f ; :VBLENDMPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x65; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vblendmps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VBROADCAST 5-12 PAGE 1836 LINE 94917 define pcodeop vbroadcastsd_avx512vl ; :VBROADCASTSD YmmReg1^YmmOpMask64, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x19; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { YmmResult = vbroadcastsd_avx512vl( XmmReg2_m64 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VBROADCAST 5-12 PAGE 1836 LINE 94920 define pcodeop vbroadcastsd_avx512f ; :VBROADCASTSD ZmmReg1^ZmmOpMask64, XmmReg2_m64 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x19; (ZmmReg1 & ZmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vbroadcastsd_avx512f( XmmReg2_m64 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VBROADCAST 5-12 PAGE 1836 LINE 94923 define pcodeop vbroadcastf32x2_avx512vl ; :VBROADCASTF32X2 YmmReg1^YmmOpMask32, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x19; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { YmmResult = vbroadcastf32x2_avx512vl( XmmReg2_m64 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VBROADCAST 5-12 PAGE 1836 LINE 94926 define pcodeop vbroadcastf32x2_avx512dq ; :VBROADCASTF32X2 ZmmReg1^ZmmOpMask32, XmmReg2_m64 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x19; (ZmmReg1 & ZmmOpMask32) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vbroadcastf32x2_avx512dq( XmmReg2_m64 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VBROADCAST 5-12 PAGE 1836 LINE 94929 define pcodeop vbroadcastss_avx512vl ; :VBROADCASTSS XmmReg1^XmmOpMask32, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x18; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { XmmResult = vbroadcastss_avx512vl( XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VBROADCAST 5-12 PAGE 1836 LINE 94932 :VBROADCASTSS YmmReg1^YmmOpMask32, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x18; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { YmmResult = vbroadcastss_avx512vl( XmmReg2_m32 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VBROADCAST 5-12 PAGE 1836 LINE 94935 define pcodeop vbroadcastss_avx512f ; :VBROADCASTSS ZmmReg1^ZmmOpMask32, XmmReg2_m32 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x18; (ZmmReg1 & ZmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vbroadcastss_avx512f( XmmReg2_m32 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VBROADCAST 5-12 PAGE 1836 LINE 94938 define pcodeop vbroadcastf32x4_avx512vl ; :VBROADCASTF32X4 YmmReg1^YmmOpMask32, m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x1A; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { YmmResult = vbroadcastf32x4_avx512vl( m128 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VBROADCAST 5-12 PAGE 1836 LINE 94941 define pcodeop vbroadcastf32x4_avx512f ; :VBROADCASTF32X4 ZmmReg1^ZmmOpMask32, m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x1A; (ZmmReg1 & ZmmOpMask32) ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vbroadcastf32x4_avx512f( m128 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VBROADCAST 5-12 PAGE 1836 LINE 94944 define pcodeop vbroadcastf64x2_avx512vl ; :VBROADCASTF64X2 YmmReg1^YmmOpMask64, m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x1A; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { YmmResult = vbroadcastf64x2_avx512vl( m128 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VBROADCAST 5-12 PAGE 1836 LINE 94947 define pcodeop vbroadcastf64x2_avx512dq ; :VBROADCASTF64X2 ZmmReg1^ZmmOpMask64, m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x1A; (ZmmReg1 & ZmmOpMask64) ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vbroadcastf64x2_avx512dq( m128 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VBROADCAST 5-12 PAGE 1836 LINE 94950 define pcodeop vbroadcastf32x8_avx512dq ; :VBROADCASTF32X8 ZmmReg1^ZmmOpMask32, m256 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x1B; (ZmmReg1 & ZmmOpMask32) ... & m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vbroadcastf32x8_avx512dq( m256 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VBROADCAST 5-12 PAGE 1836 LINE 94953 define pcodeop vbroadcastf64x4_avx512f ; :VBROADCASTF64X4 ZmmReg1^ZmmOpMask64, m256 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x1B; (ZmmReg1 & ZmmOpMask64) ... & m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vbroadcastf64x4_avx512f( m256 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPBROADCASTM 5-19 PAGE 1843 LINE 95303 define pcodeop vpbroadcastmb2q_avx512vl ; :VPBROADCASTMB2Q XmmReg1, KReg_rm is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x2A; (XmmReg1 & ZmmReg1) & KReg_rm { local tmp:16 = vpbroadcastmb2q_avx512vl( KReg_rm ); ZmmReg1 = zext(tmp); } # VPBROADCASTM 5-19 PAGE 1843 LINE 95305 :VPBROADCASTMB2Q YmmReg1, KReg_rm is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x2A; (YmmReg1 & ZmmReg1) & KReg_rm { local tmp:32 = vpbroadcastmb2q_avx512vl( KReg_rm ); ZmmReg1 = zext(tmp); } # VPBROADCASTM 5-19 PAGE 1843 LINE 95307 define pcodeop vpbroadcastmb2q_avx512cd ; :VPBROADCASTMB2Q ZmmReg1, KReg_rm is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x2A; ZmmReg1 & KReg_rm { ZmmReg1 = vpbroadcastmb2q_avx512cd( KReg_rm ); } # VPBROADCASTM 5-19 PAGE 1843 LINE 95309 define pcodeop vpbroadcastmw2d_avx512vl ; :VPBROADCASTMW2D XmmReg1, KReg_rm is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x3A; (XmmReg1 & ZmmReg1) & KReg_rm { local tmp:16 = vpbroadcastmw2d_avx512vl( KReg_rm ); ZmmReg1 = zext(tmp); } # VPBROADCASTM 5-19 PAGE 1843 LINE 95311 :VPBROADCASTMW2D YmmReg1, KReg_rm is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x3A; (YmmReg1 & ZmmReg1) & KReg_rm { local tmp:32 = vpbroadcastmw2d_avx512vl( KReg_rm ); ZmmReg1 = zext(tmp); } # VPBROADCASTM 5-19 PAGE 1843 LINE 95313 define pcodeop vpbroadcastmw2d_avx512cd ; :VPBROADCASTMW2D ZmmReg1, KReg_rm is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x3A; ZmmReg1 & KReg_rm { ZmmReg1 = vpbroadcastmw2d_avx512cd( KReg_rm ); } # VCOMPRESSPD 5-21 PAGE 1845 LINE 95380 define pcodeop vcompresspd_avx512vl ; :VCOMPRESSPD XmmReg2^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0x8A; XmmReg1 & mod=3 & XmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmMask = XmmReg2; build XmmOpMask; XmmResult = vcompresspd_avx512vl( XmmReg1, XmmOpMask ); ZmmReg2 = zext(XmmResult); } :VCOMPRESSPD m128^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0x8A; XmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmMask = m128; build XmmOpMask; XmmResult = vcompresspd_avx512vl( XmmReg1, XmmOpMask ); m128 = XmmResult; } # VCOMPRESSPD 5-21 PAGE 1845 LINE 95383 :VCOMPRESSPD YmmReg2^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0x8A; YmmReg1 & mod=3 & YmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmMask = YmmReg2; build YmmOpMask; YmmResult = vcompresspd_avx512vl( YmmReg1, YmmOpMask ); ZmmReg2 = zext(YmmResult); } :VCOMPRESSPD m256^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0x8A; YmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmMask = m256; build YmmOpMask; YmmResult = vcompresspd_avx512vl( YmmReg1, YmmOpMask ); m256 = YmmResult; } # VCOMPRESSPD 5-21 PAGE 1845 LINE 95386 define pcodeop vcompresspd_avx512f ; :VCOMPRESSPD ZmmReg2^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0x8A; ZmmReg1 & mod=3 & ZmmReg2 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmMask = ZmmReg2; build ZmmOpMask; ZmmResult = vcompresspd_avx512f( ZmmReg1, ZmmOpMask ); ZmmReg2 = ZmmResult; } :VCOMPRESSPD m512^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0x8A; ZmmReg1 ... & m512 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmMask = m512; build ZmmOpMask; ZmmResult = vcompresspd_avx512f( ZmmReg1, ZmmOpMask ); m512 = ZmmResult; } # VCOMPRESSPS 5-23 PAGE 1847 LINE 95481 define pcodeop vcompressps_avx512vl ; :VCOMPRESSPS XmmReg2^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0x8A; XmmReg1 & mod=3 & XmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmMask = XmmReg2; build XmmOpMask; XmmResult = vcompressps_avx512vl( XmmReg1, XmmOpMask ); ZmmReg2 = zext(XmmResult); } :VCOMPRESSPS m128^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0x8A; XmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmMask = m128; build XmmOpMask; XmmResult = vcompressps_avx512vl( XmmReg1, XmmOpMask ); m128 = XmmResult; } # VCOMPRESSPS 5-23 PAGE 1847 LINE 95484 :VCOMPRESSPS YmmReg2^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask; byte=0x8A; YmmReg1 & mod=3 & YmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmMask = YmmReg2; build YmmOpMask; YmmResult = vcompressps_avx512vl( YmmReg1, YmmOpMask ); ZmmReg2 = zext(YmmResult); } :VCOMPRESSPS m256^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask; byte=0x8A; YmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmMask = m256; build YmmOpMask; YmmResult = vcompressps_avx512vl( YmmReg1, YmmOpMask ); m256 = YmmResult; } # VCOMPRESSPS 5-23 PAGE 1847 LINE 95487 define pcodeop vcompressps_avx512f ; :VCOMPRESSPS ZmmReg2^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0x8A; ZmmReg1 & mod=3 & ZmmReg2 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmMask = ZmmReg2; build ZmmOpMask; ZmmResult = vcompressps_avx512f( ZmmReg1, ZmmOpMask ); ZmmReg2 = ZmmResult; } :VCOMPRESSPS m512^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0x8A; ZmmReg1 ... & m512 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmMask = m512; build ZmmOpMask; ZmmResult = vcompressps_avx512f( ZmmReg1, ZmmOpMask ); m512 = ZmmResult; } # VCVTPD2QQ 5-25 PAGE 1849 LINE 95583 define pcodeop vcvtpd2qq_avx512vl ; :VCVTPD2QQ XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x7B; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtpd2qq_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTPD2QQ 5-25 PAGE 1849 LINE 95586 :VCVTPD2QQ YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x7B; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtpd2qq_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTPD2QQ 5-25 PAGE 1849 LINE 95589 define pcodeop vcvtpd2qq_avx512dq ; :VCVTPD2QQ ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x7B; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vcvtpd2qq_avx512dq( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTPD2UDQ 5-28 PAGE 1852 LINE 95706 define pcodeop vcvtpd2udq_avx512vl ; :VCVTPD2UDQ XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) ; byte=0x79; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtpd2udq_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTPD2UDQ 5-28 PAGE 1852 LINE 95709 :VCVTPD2UDQ XmmReg1^XmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) ; byte=0x79; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtpd2udq_avx512vl( YmmReg2_m256_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTPD2UDQ 5-28 PAGE 1852 LINE 95712 define pcodeop vcvtpd2udq_avx512f ; :VCVTPD2UDQ YmmReg1^YmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) ; byte=0x79; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtpd2udq_avx512f( ZmmReg2_m512_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTPD2UQQ 5-31 PAGE 1855 LINE 95833 define pcodeop vcvtpd2uqq_avx512vl ; :VCVTPD2UQQ XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x79; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtpd2uqq_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTPD2UQQ 5-31 PAGE 1855 LINE 95836 :VCVTPD2UQQ YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x79; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtpd2uqq_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTPD2UQQ 5-31 PAGE 1855 LINE 95839 define pcodeop vcvtpd2uqq_avx512dq ; :VCVTPD2UQQ ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x79; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vcvtpd2uqq_avx512dq( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTPH2PS 5-34 PAGE 1858 LINE 95963 define pcodeop vcvtph2ps_avx512vl ; :VCVTPH2PS XmmReg1^XmmOpMask32, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x13; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vcvtph2ps_avx512vl( XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTPH2PS 5-34 PAGE 1858 LINE 95966 :VCVTPH2PS YmmReg1^YmmOpMask32, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x13; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { YmmResult = vcvtph2ps_avx512vl( XmmReg2_m128 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VCVTPH2PS 5-34 PAGE 1858 LINE 95969 define pcodeop vcvtph2ps_avx512f ; :VCVTPH2PS ZmmReg1^ZmmOpMask32, YmmReg2_m256 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x13; (ZmmReg1 & ZmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { ZmmResult = vcvtph2ps_avx512f( YmmReg2_m256 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VCVTPS2PH 5-37 PAGE 1861 LINE 96116 # INFO mnemonic VCVTPS2PH was found in ../../../../../../../ghidra/Ghidra/Processors/x86/data/languages/avx512_manual.sinc # VCVTPS2PH 5-37 PAGE 1861 LINE 96119 # INFO mnemonic VCVTPS2PH was found in ../../../../../../../ghidra/Ghidra/Processors/x86/data/languages/avx512_manual.sinc # VCVTPS2PH 5-37 PAGE 1861 LINE 96122 # INFO mnemonic VCVTPS2PH was found in ../../../../../../../ghidra/Ghidra/Processors/x86/data/languages/avx512_manual.sinc # VCVTPS2UDQ 5-41 PAGE 1865 LINE 96305 define pcodeop vcvtps2udq_avx512vl ; :VCVTPS2UDQ XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x79; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtps2udq_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTPS2UDQ 5-41 PAGE 1865 LINE 96309 :VCVTPS2UDQ YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x79; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtps2udq_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VCVTPS2UDQ 5-41 PAGE 1865 LINE 96313 define pcodeop vcvtps2udq_avx512f ; :VCVTPS2UDQ ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x79; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vcvtps2udq_avx512f( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VCVTPS2QQ 5-44 PAGE 1868 LINE 96434 define pcodeop vcvtps2qq_avx512vl ; :VCVTPS2QQ XmmReg1^XmmOpMask64, XmmReg2_m64_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x7B; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { XmmResult = vcvtps2qq_avx512vl( XmmReg2_m64_m32bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTPS2QQ 5-44 PAGE 1868 LINE 96437 :VCVTPS2QQ YmmReg1^YmmOpMask64, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x7B; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { YmmResult = vcvtps2qq_avx512vl( XmmReg2_m128_m32bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTPS2QQ 5-44 PAGE 1868 LINE 96440 define pcodeop vcvtps2qq_avx512dq ; :VCVTPS2QQ ZmmReg1^ZmmOpMask64, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x7B; (ZmmReg1 & ZmmOpMask64) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { ZmmResult = vcvtps2qq_avx512dq( YmmReg2_m256_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTPS2UQQ 5-47 PAGE 1871 LINE 96560 define pcodeop vcvtps2uqq_avx512vl ; :VCVTPS2UQQ XmmReg1^XmmOpMask64, XmmReg2_m64_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x79; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { XmmResult = vcvtps2uqq_avx512vl( XmmReg2_m64_m32bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTPS2UQQ 5-47 PAGE 1871 LINE 96563 :VCVTPS2UQQ YmmReg1^YmmOpMask64, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x79; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { YmmResult = vcvtps2uqq_avx512vl( XmmReg2_m128_m32bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTPS2UQQ 5-47 PAGE 1871 LINE 96566 define pcodeop vcvtps2uqq_avx512dq ; :VCVTPS2UQQ ZmmReg1^ZmmOpMask64, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x79; (ZmmReg1 & ZmmOpMask64) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { ZmmResult = vcvtps2uqq_avx512dq( YmmReg2_m256_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTQQ2PD 5-50 PAGE 1874 LINE 96686 define pcodeop vcvtqq2pd_avx512vl ; :VCVTQQ2PD XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) ; byte=0xE6; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtqq2pd_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTQQ2PD 5-50 PAGE 1874 LINE 96689 :VCVTQQ2PD YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) ; byte=0xE6; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtqq2pd_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTQQ2PD 5-50 PAGE 1874 LINE 96692 define pcodeop vcvtqq2pd_avx512dq ; :VCVTQQ2PD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) ; byte=0xE6; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vcvtqq2pd_avx512dq( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTQQ2PS 5-52 PAGE 1876 LINE 96797 define pcodeop vcvtqq2ps_avx512vl ; :VCVTQQ2PS XmmReg1^XmmOpMask32, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) ; byte=0x5B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtqq2ps_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTQQ2PS 5-52 PAGE 1876 LINE 96800 :VCVTQQ2PS XmmReg1^XmmOpMask32, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) ; byte=0x5B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtqq2ps_avx512vl( YmmReg2_m256_m64bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTQQ2PS 5-52 PAGE 1876 LINE 96803 define pcodeop vcvtqq2ps_avx512dq ; :VCVTQQ2PS YmmReg1^YmmOpMask32, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) ; byte=0x5B; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtqq2ps_avx512dq( ZmmReg2_m512_m64bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VCVTSD2USI 5-54 PAGE 1878 LINE 96907 define pcodeop vcvtsd2usi_avx512f ; :VCVTSD2USI Reg32, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x79; Reg32 ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg32 = vcvtsd2usi_avx512f( XmmReg2_m64 ); # TODO Reg64 = zext(Reg32) } # VCVTSD2USI 5-54 PAGE 1878 LINE 96909 @ifdef IA64 :VCVTSD2USI Reg64, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x79; Reg64 ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg64 = vcvtsd2usi_avx512f( XmmReg2_m64 ); } @endif # VCVTSS2USI 5-55 PAGE 1879 LINE 96967 define pcodeop vcvtss2usi_avx512f ; :VCVTSS2USI Reg32, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x79; Reg32 ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg32 = vcvtss2usi_avx512f( XmmReg2_m32 ); # TODO Reg64 = zext(Reg32) } # VCVTSS2USI 5-55 PAGE 1879 LINE 96969 @ifdef IA64 :VCVTSS2USI Reg64, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x79; Reg64 ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg64 = vcvtss2usi_avx512f( XmmReg2_m32 ); } @endif # VCVTTPD2QQ 5-57 PAGE 1881 LINE 97040 define pcodeop vcvttpd2qq_avx512vl ; :VCVTTPD2QQ XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x7A; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvttpd2qq_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTTPD2QQ 5-57 PAGE 1881 LINE 97043 :VCVTTPD2QQ YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x7A; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvttpd2qq_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTTPD2QQ 5-57 PAGE 1881 LINE 97046 define pcodeop vcvttpd2qq_avx512dq ; :VCVTTPD2QQ ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x7A; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vcvttpd2qq_avx512dq( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTTPD2UDQ 5-59 PAGE 1883 LINE 97147 define pcodeop vcvttpd2udq_avx512vl ; :VCVTTPD2UDQ XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) ; byte=0x78; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvttpd2udq_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTTPD2UDQ 5-59 PAGE 1883 LINE 97152 :VCVTTPD2UDQ XmmReg1^XmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) ; byte=0x78; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvttpd2udq_avx512vl( YmmReg2_m256_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTTPD2UDQ 5-59 PAGE 1883 LINE 97156 define pcodeop vcvttpd2udq_avx512f ; :VCVTTPD2UDQ YmmReg1^YmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) ; byte=0x78; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvttpd2udq_avx512f( ZmmReg2_m512_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTTPD2UQQ 5-62 PAGE 1886 LINE 97272 define pcodeop vcvttpd2uqq_avx512vl ; :VCVTTPD2UQQ XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x78; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvttpd2uqq_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTTPD2UQQ 5-62 PAGE 1886 LINE 97276 :VCVTTPD2UQQ YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x78; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvttpd2uqq_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTTPD2UQQ 5-62 PAGE 1886 LINE 97280 define pcodeop vcvttpd2uqq_avx512dq ; :VCVTTPD2UQQ ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) ; byte=0x78; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vcvttpd2uqq_avx512dq( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTTPS2UDQ 5-64 PAGE 1888 LINE 97385 define pcodeop vcvttps2udq_avx512vl ; :VCVTTPS2UDQ XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x78; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvttps2udq_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTTPS2UDQ 5-64 PAGE 1888 LINE 97389 :VCVTTPS2UDQ YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x78; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvttps2udq_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VCVTTPS2UDQ 5-64 PAGE 1888 LINE 97393 define pcodeop vcvttps2udq_avx512f ; :VCVTTPS2UDQ ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x78; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vcvttps2udq_avx512f( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VCVTTPS2QQ 5-66 PAGE 1890 LINE 97497 define pcodeop vcvttps2qq_avx512vl ; :VCVTTPS2QQ XmmReg1^XmmOpMask64, XmmReg2_m64_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x7A; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { XmmResult = vcvttps2qq_avx512vl( XmmReg2_m64_m32bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTTPS2QQ 5-66 PAGE 1890 LINE 97500 :VCVTTPS2QQ YmmReg1^YmmOpMask64, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x7A; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { YmmResult = vcvttps2qq_avx512vl( XmmReg2_m128_m32bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTTPS2QQ 5-66 PAGE 1890 LINE 97503 define pcodeop vcvttps2qq_avx512dq ; :VCVTTPS2QQ ZmmReg1^ZmmOpMask64, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x7A; (ZmmReg1 & ZmmOpMask64) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { ZmmResult = vcvttps2qq_avx512dq( YmmReg2_m256_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTTPS2UQQ 5-68 PAGE 1892 LINE 97608 define pcodeop vcvttps2uqq_avx512vl ; :VCVTTPS2UQQ XmmReg1^XmmOpMask64, XmmReg2_m64_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x78; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { XmmResult = vcvttps2uqq_avx512vl( XmmReg2_m64_m32bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTTPS2UQQ 5-68 PAGE 1892 LINE 97611 :VCVTTPS2UQQ YmmReg1^YmmOpMask64, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x78; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { YmmResult = vcvttps2uqq_avx512vl( XmmReg2_m128_m32bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTTPS2UQQ 5-68 PAGE 1892 LINE 97615 define pcodeop vcvttps2uqq_avx512dq ; :VCVTTPS2UQQ ZmmReg1^ZmmOpMask64, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x78; (ZmmReg1 & ZmmOpMask64) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { ZmmResult = vcvttps2uqq_avx512dq( YmmReg2_m256_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTTSD2USI 5-70 PAGE 1894 LINE 97722 define pcodeop vcvttsd2usi_avx512f ; :VCVTTSD2USI Reg32, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x78; Reg32 ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg32 = vcvttsd2usi_avx512f( XmmReg2_m64 ); # TODO Reg64 = zext(Reg32) } # VCVTTSD2USI 5-70 PAGE 1894 LINE 97725 @ifdef IA64 :VCVTTSD2USI Reg64, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x78; Reg64 ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg64 = vcvttsd2usi_avx512f( XmmReg2_m64 ); } @endif # VCVTTSS2USI 5-71 PAGE 1895 LINE 97782 define pcodeop vcvttss2usi_avx512f ; :VCVTTSS2USI Reg32, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x78; Reg32 ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg32 = vcvttss2usi_avx512f( XmmReg2_m32 ); # TODO Reg64 = zext(Reg32) } # VCVTTSS2USI 5-71 PAGE 1895 LINE 97785 @ifdef IA64 :VCVTTSS2USI Reg64, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1); byte=0x78; Reg64 ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 5; ] # (TupleType T1F) { Reg64 = vcvttss2usi_avx512f( XmmReg2_m32 ); } @endif # VCVTUDQ2PD 5-73 PAGE 1897 LINE 97852 define pcodeop vcvtudq2pd_avx512vl ; :VCVTUDQ2PD XmmReg1^XmmOpMask64, XmmReg2_m64_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x7A; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { XmmResult = vcvtudq2pd_avx512vl( XmmReg2_m64_m32bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTUDQ2PD 5-73 PAGE 1897 LINE 97855 :VCVTUDQ2PD YmmReg1^YmmOpMask64, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x7A; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { YmmResult = vcvtudq2pd_avx512vl( XmmReg2_m128_m32bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTUDQ2PD 5-73 PAGE 1897 LINE 97859 define pcodeop vcvtudq2pd_avx512f ; :VCVTUDQ2PD ZmmReg1^ZmmOpMask64, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0); byte=0x7A; (ZmmReg1 & ZmmOpMask64) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType HV) { ZmmResult = vcvtudq2pd_avx512f( YmmReg2_m256_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTUDQ2PS 5-75 PAGE 1899 LINE 97962 define pcodeop vcvtudq2ps_avx512vl ; :VCVTUDQ2PS XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x7A; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtudq2ps_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTUDQ2PS 5-75 PAGE 1899 LINE 97965 :VCVTUDQ2PS YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x7A; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtudq2ps_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VCVTUDQ2PS 5-75 PAGE 1899 LINE 97968 define pcodeop vcvtudq2ps_avx512f ; :VCVTUDQ2PS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x7A; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vcvtudq2ps_avx512f( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VCVTUQQ2PD 5-77 PAGE 1901 LINE 98078 define pcodeop vcvtuqq2pd_avx512vl ; :VCVTUQQ2PD XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) ; byte=0x7A; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtuqq2pd_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTUQQ2PD 5-77 PAGE 1901 LINE 98081 :VCVTUQQ2PD YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) ; byte=0x7A; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtuqq2pd_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTUQQ2PD 5-77 PAGE 1901 LINE 98084 define pcodeop vcvtuqq2pd_avx512dq ; :VCVTUQQ2PD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) ; byte=0x7A; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vcvtuqq2pd_avx512dq( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTUQQ2PS 5-79 PAGE 1903 LINE 98193 define pcodeop vcvtuqq2ps_avx512vl ; :VCVTUQQ2PS XmmReg1^XmmOpMask32, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0x7A; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtuqq2ps_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTUQQ2PS 5-79 PAGE 1903 LINE 98196 :VCVTUQQ2PS XmmReg1^XmmOpMask32, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0x7A; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vcvtuqq2ps_avx512vl( YmmReg2_m256_m64bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTUQQ2PS 5-79 PAGE 1903 LINE 98199 define pcodeop vcvtuqq2ps_avx512dq ; :VCVTUQQ2PS YmmReg1^YmmOpMask32, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) ; byte=0x7A; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vcvtuqq2ps_avx512dq( ZmmReg2_m512_m64bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VCVTUSI2SD 5-81 PAGE 1905 LINE 98308 define pcodeop vcvtusi2sd_avx512f ; :VCVTUSI2SD XmmReg1, evexV5_XmmReg, rm32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vcvtusi2sd_avx512f( evexV5_XmmReg, rm32 ); ZmmReg1 = zext(tmp); } # VCVTUSI2SD 5-81 PAGE 1905 LINE 98311 @ifdef IA64 :VCVTUSI2SD XmmReg1, evexV5_XmmReg, rm64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vcvtusi2sd_avx512f( evexV5_XmmReg, rm64 ); ZmmReg1 = zext(tmp); } @endif # VCVTUSI2SS 5-83 PAGE 1907 LINE 98381 define pcodeop vcvtusi2ss_avx512f ; :VCVTUSI2SS XmmReg1, evexV5_XmmReg, rm32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vcvtusi2ss_avx512f( evexV5_XmmReg, rm32 ); ZmmReg1 = zext(tmp); } # VCVTUSI2SS 5-83 PAGE 1907 LINE 98383 @ifdef IA64 :VCVTUSI2SS XmmReg1, evexV5_XmmReg, rm64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F3) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vcvtusi2ss_avx512f( evexV5_XmmReg, rm64 ); ZmmReg1 = zext(tmp); } @endif # VDBPSADBW 5-85 PAGE 1909 LINE 98455 define pcodeop vdbpsadbw_avx512vl ; :VDBPSADBW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x42; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vdbpsadbw_avx512vl( evexV5_XmmReg, XmmReg2_m128, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VDBPSADBW 5-85 PAGE 1909 LINE 98460 :VDBPSADBW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x42; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vdbpsadbw_avx512vl( evexV5_YmmReg, YmmReg2_m256, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VDBPSADBW 5-85 PAGE 1909 LINE 98465 define pcodeop vdbpsadbw_avx512bw ; :VDBPSADBW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x42; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vdbpsadbw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VEXPANDPD 5-89 PAGE 1913 LINE 98660 define pcodeop vexpandpd_avx512vl ; :VEXPANDPD XmmReg1^XmmOpMask, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x88; (XmmReg1 & ZmmReg1 & XmmOpMask) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmMask = XmmReg1; build XmmOpMask; XmmResult = vexpandpd_avx512vl( XmmReg2_m128, XmmOpMask ); ZmmReg1 = zext(XmmResult); } # VEXPANDPD 5-89 PAGE 1913 LINE 98663 :VEXPANDPD YmmReg1^YmmOpMask, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x88; (YmmReg1 & ZmmReg1 & YmmOpMask) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmMask = YmmReg1; build YmmOpMask; YmmResult = vexpandpd_avx512vl( YmmReg2_m256, YmmOpMask ); ZmmReg1 = zext(YmmResult); } # VEXPANDPD 5-89 PAGE 1913 LINE 98665 define pcodeop vexpandpd_avx512f ; :VEXPANDPD ZmmReg1^ZmmOpMask, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x88; (ZmmReg1 & ZmmOpMask) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmMask = ZmmReg1; build ZmmOpMask; ZmmResult = vexpandpd_avx512f( ZmmReg2_m512, ZmmOpMask ); ZmmReg1 = ZmmResult; } # VEXPANDPS 5-91 PAGE 1915 LINE 98748 define pcodeop vexpandps_avx512vl ; :VEXPANDPS XmmReg1^XmmOpMask, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x88; (XmmReg1 & ZmmReg1 & XmmOpMask) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmMask = XmmReg1; build XmmOpMask; XmmResult = vexpandps_avx512vl( XmmReg2_m128, XmmOpMask ); ZmmReg1 = zext(XmmResult); } # VEXPANDPS 5-91 PAGE 1915 LINE 98750 :VEXPANDPS YmmReg1^YmmOpMask, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x88; (YmmReg1 & ZmmReg1 & YmmOpMask) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmMask = YmmReg1; build YmmOpMask; YmmResult = vexpandps_avx512vl( YmmReg2_m256 ); ZmmReg1 = zext(YmmResult); } # VEXPANDPS 5-91 PAGE 1915 LINE 98752 define pcodeop vexpandps_avx512f ; :VEXPANDPS ZmmReg1^ZmmOpMask, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x88; (ZmmReg1 & ZmmOpMask) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmMask = ZmmReg1; build ZmmOpMask; ZmmResult = vexpandps_avx512f( ZmmReg2_m512, ZmmOpMask ); ZmmReg1 = ZmmResult; } # VEXP2PD 5-95 PAGE 1919 LINE 98936 define pcodeop vexp2pd_avx512er ; :VEXP2PD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0xC8; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vexp2pd_avx512er( ZmmReg1, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VEXP2PS 5-97 PAGE 1921 LINE 99019 define pcodeop vexp2ps_avx512er ; :VEXP2PS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0xC8; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vexp2ps_avx512er( ZmmReg1, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VEXTRACTF128/VEXTRACTF32x4/VEXTRACTF64x2/VEXTRACTF32x8/VEXTRACTF64x4 5-99 PAGE 1923 LINE 99105 define pcodeop vextractf32x4_avx512vl ; :VEXTRACTF32X4 XmmReg2^XmmOpMask32, YmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask32; byte=0x19; YmmReg1 & mod=3 & XmmReg2 & ZmmReg2; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextractf32x4_avx512vl( YmmReg1, imm8:1 ); XmmMask = XmmReg2; build XmmOpMask32; ZmmReg2 = zext(XmmResult); } :VEXTRACTF32X4 m128^XmmOpMask32, YmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask32; byte=0x19; YmmReg1 ... & m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextractf32x4_avx512vl( YmmReg1, imm8:1 ); XmmMask = m128; build XmmOpMask32; m128 = XmmResult; } # VEXTRACTF128/VEXTRACTF32x4/VEXTRACTF64x2/VEXTRACTF32x8/VEXTRACTF64x4 5-99 PAGE 1923 LINE 99108 define pcodeop vextractf32x4_avx512f ; :VEXTRACTF32x4 XmmReg2^XmmOpMask32, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask32; byte=0x19; ZmmReg1 & mod=3 & XmmReg2 & ZmmReg2; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextractf32x4_avx512f( ZmmReg1, imm8:1 ); XmmMask = XmmReg2; build XmmOpMask32; ZmmReg2 = zext(XmmResult); } :VEXTRACTF32x4 m128^XmmOpMask32, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask32; byte=0x19; ZmmReg1 ... & m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextractf32x4_avx512f( ZmmReg1, imm8:1 ); XmmMask = m128; build XmmOpMask32; m128 = XmmResult; } # VEXTRACTF128/VEXTRACTF32x4/VEXTRACTF64x2/VEXTRACTF32x8/VEXTRACTF64x4 5-99 PAGE 1923 LINE 99111 define pcodeop vextractf64x2_avx512vl ; :VEXTRACTF64X2 XmmReg2^XmmOpMask64, YmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask64; byte=0x19; YmmReg1 & mod=3 & XmmReg2 & ZmmReg2; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextractf64x2_avx512vl( YmmReg1, imm8:1 ); XmmMask = XmmReg2; build XmmOpMask64; ZmmReg2 = zext(XmmResult); } :VEXTRACTF64X2 m128^XmmOpMask64, YmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask64; byte=0x19; YmmReg1 ... & m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextractf64x2_avx512vl( YmmReg1, imm8:1 ); XmmMask = m128; build XmmOpMask64; m128 = XmmResult; } # VEXTRACTF128/VEXTRACTF32x4/VEXTRACTF64x2/VEXTRACTF32x8/VEXTRACTF64x4 5-99 PAGE 1923 LINE 99114 define pcodeop vextractf64x2_avx512dq ; :VEXTRACTF64X2 XmmReg2^XmmOpMask64, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask64; byte=0x19; ZmmReg1 & mod=3 & XmmReg2 & ZmmReg2; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextractf64x2_avx512dq( ZmmReg1, imm8:1 ); XmmMask = XmmReg2; build XmmOpMask64; ZmmReg2 = zext(XmmResult); } :VEXTRACTF64X2 m128^XmmOpMask64, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask64; byte=0x19; ZmmReg1 ... & m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextractf64x2_avx512dq( ZmmReg1, imm8:1 ); XmmMask = m128; build XmmOpMask64; m128 = XmmResult; } # VEXTRACTF128/VEXTRACTF32x4/VEXTRACTF64x2/VEXTRACTF32x8/VEXTRACTF64x4 5-99 PAGE 1923 LINE 99117 define pcodeop vextractf32x8_avx512dq ; :VEXTRACTF32X8 YmmReg2^YmmOpMask32, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask32; byte=0x1B; ZmmReg1 & mod=3 & YmmReg2 & ZmmReg2; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { YmmResult = vextractf32x8_avx512dq( ZmmReg1, imm8:1 ); YmmMask = YmmReg2; build YmmOpMask32; ZmmReg2 = zext(YmmResult); } :VEXTRACTF32X8 m256^YmmOpMask32, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask32; byte=0x1B; ZmmReg1 ... & m256; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { YmmResult = vextractf32x8_avx512dq( ZmmReg1, imm8:1 ); YmmMask = m256; build YmmOpMask32; m256 = YmmResult; } # VEXTRACTF128/VEXTRACTF32x4/VEXTRACTF64x2/VEXTRACTF32x8/VEXTRACTF64x4 5-99 PAGE 1923 LINE 99120 define pcodeop vextractf64x4_avx512f ; :VEXTRACTF64x4 YmmReg2^YmmOpMask64, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask64; byte=0x1B; ZmmReg1 & mod=3 & YmmReg2 & ZmmReg2; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { YmmResult = vextractf64x4_avx512f( ZmmReg1, imm8:1 ); YmmMask = YmmReg2; build YmmOpMask64; ZmmReg2 = zext(YmmResult); } :VEXTRACTF64x4 m256^YmmOpMask64, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask64; byte=0x1B; ZmmReg1 ... & m256; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { YmmResult = vextractf64x4_avx512f( ZmmReg1, imm8:1 ); YmmMask = m256; build YmmOpMask64; m256 = YmmResult; } # VEXTRACTI128/VEXTRACTI32x4/VEXTRACTI64x2/VEXTRACTI32x8/VEXTRACTI64x4 5-106 PAGE 1930 LINE 99435 define pcodeop vextracti32x4_avx512vl ; :VEXTRACTI32X4 XmmReg2^XmmOpMask32, YmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask32; byte=0x39; YmmReg1 & mod=3 & XmmReg2 & ZmmReg2; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextracti32x4_avx512vl( YmmReg1, imm8:1 ); XmmMask = XmmReg2; build XmmOpMask32; ZmmReg2 = zext(XmmResult); } :VEXTRACTI32X4 m128^XmmOpMask32, YmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask32; byte=0x39; YmmReg1 ... & m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextracti32x4_avx512vl( YmmReg1, imm8:1 ); XmmMask = m128; build XmmOpMask32; m128 = XmmResult; } # VEXTRACTI128/VEXTRACTI32x4/VEXTRACTI64x2/VEXTRACTI32x8/VEXTRACTI64x4 5-106 PAGE 1930 LINE 99438 define pcodeop vextracti32x4_avx512f ; :VEXTRACTI32x4 XmmReg2^XmmOpMask32, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask32; byte=0x39; ZmmReg1 & mod=3 & XmmReg2 & ZmmReg2; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextracti32x4_avx512f( ZmmReg1, imm8:1 ); XmmMask = XmmReg2; build XmmOpMask32; ZmmReg2 = zext(XmmResult); } :VEXTRACTI32x4 m128^XmmOpMask32, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask32; byte=0x39; ZmmReg1 ... & m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextracti32x4_avx512f( ZmmReg1, imm8:1 ); XmmMask = m128; build XmmOpMask32; m128 = XmmResult; } # VEXTRACTI128/VEXTRACTI32x4/VEXTRACTI64x2/VEXTRACTI32x8/VEXTRACTI64x4 5-106 PAGE 1930 LINE 99441 define pcodeop vextracti64x2_avx512vl ; :VEXTRACTI64X2 XmmReg2^XmmOpMask64, YmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask64; byte=0x39; YmmReg1 & mod=3 & XmmReg2 & ZmmReg2; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextracti64x2_avx512vl( YmmReg1, imm8:1 ); XmmMask = XmmReg2; build XmmOpMask64; ZmmReg2 = zext(XmmResult); } :VEXTRACTI64X2 m128^XmmOpMask64, YmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask64; byte=0x39; YmmReg1 ... & m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextracti64x2_avx512vl( YmmReg1, imm8:1 ); XmmMask = m128; build XmmOpMask64; m128 = XmmResult; } # VEXTRACTI128/VEXTRACTI32x4/VEXTRACTI64x2/VEXTRACTI32x8/VEXTRACTI64x4 5-106 PAGE 1930 LINE 99444 define pcodeop vextracti64x2_avx512dq ; :VEXTRACTI64X2 XmmReg2^XmmOpMask64, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask64; byte=0x39; ZmmReg1 & mod=3 & XmmReg2 & ZmmReg2; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextracti64x2_avx512dq( ZmmReg1, imm8:1 ); XmmMask = XmmReg2; build XmmOpMask64; ZmmReg2 = zext(XmmResult); } :VEXTRACTI64X2 m128^XmmOpMask64, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask64; byte=0x39; ZmmReg1 ... & m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { XmmResult = vextracti64x2_avx512dq( ZmmReg1, imm8:1 ); XmmMask = m128; build XmmOpMask64; m128 = XmmResult; } # VEXTRACTI128/VEXTRACTI32x4/VEXTRACTI64x2/VEXTRACTI32x8/VEXTRACTI64x4 5-106 PAGE 1930 LINE 99447 define pcodeop vextracti32x8_avx512dq ; :VEXTRACTI32X8 YmmReg2^YmmOpMask32, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask32; byte=0x3B; ZmmReg1 & mod=3 & YmmReg2 & ZmmReg2; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { YmmResult = vextracti32x8_avx512dq( ZmmReg1, imm8:1 ); YmmMask = YmmReg2; build YmmOpMask32; ZmmReg2 = zext(YmmResult); } :VEXTRACTI32X8 m256^YmmOpMask32, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask32; byte=0x3B; ZmmReg1 ... & m256; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { YmmResult = vextracti32x8_avx512dq( ZmmReg1, imm8:1 ); YmmMask = m256; build YmmOpMask32; m256 = YmmResult; } # VEXTRACTI128/VEXTRACTI32x4/VEXTRACTI64x2/VEXTRACTI32x8/VEXTRACTI64x4 5-106 PAGE 1930 LINE 99450 define pcodeop vextracti64x4_avx512f ; :VEXTRACTI64x4 YmmReg2^YmmOpMask64, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask64; byte=0x3B; ZmmReg1 & mod=3 & YmmReg2 & ZmmReg2; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { YmmResult = vextracti64x4_avx512f( ZmmReg1, imm8:1 ); YmmMask = YmmReg2; build YmmOpMask64; ZmmReg2 = zext(YmmResult); } :VEXTRACTI64x4 m256^YmmOpMask64, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask64; byte=0x3B; ZmmReg1 ... & m256; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { YmmResult = vextracti64x4_avx512f( ZmmReg1, imm8:1 ); YmmMask = m256; build YmmOpMask64; m256 = YmmResult; } # VFIXUPIMMPD 5-112 PAGE 1936 LINE 99754 define pcodeop vfixupimmpd_avx512vl ; :VFIXUPIMMPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfixupimmpd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFIXUPIMMPD 5-112 PAGE 1936 LINE 99757 :VFIXUPIMMPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfixupimmpd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFIXUPIMMPD 5-112 PAGE 1936 LINE 99760 define pcodeop vfixupimmpd_avx512f ; :VFIXUPIMMPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x54; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfixupimmpd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFIXUPIMMPS 5-116 PAGE 1940 LINE 99957 define pcodeop vfixupimmps_avx512vl ; :VFIXUPIMMPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x54; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfixupimmps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFIXUPIMMPS 5-116 PAGE 1940 LINE 99960 :VFIXUPIMMPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x54; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfixupimmps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFIXUPIMMPS 5-116 PAGE 1940 LINE 99963 define pcodeop vfixupimmps_avx512f ; :VFIXUPIMMPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x54; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfixupimmps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFIXUPIMMSD 5-120 PAGE 1944 LINE 100159 define pcodeop vfixupimmsd_avx512f ; :VFIXUPIMMSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64, imm8 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfixupimmsd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFIXUPIMMSS 5-123 PAGE 1947 LINE 100331 define pcodeop vfixupimmss_avx512f ; :VFIXUPIMMSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32, imm8 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x55; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfixupimmss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100523 define pcodeop vfmadd132pd_avx512vl ; :VFMADD132PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x98; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst { XmmResult = vfmadd132pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100526 define pcodeop vfmadd213pd_avx512vl ; :VFMADD213PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xA8; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmadd213pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100529 define pcodeop vfmadd231pd_avx512vl ; :VFMADD231PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xB8; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmadd231pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100532 :VFMADD132PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x98; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmadd132pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100535 :VFMADD213PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xA8; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmadd213pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100538 :VFMADD231PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xB8; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmadd231pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100541 define pcodeop vfmadd132pd_avx512f ; :VFMADD132PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x98; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmadd132pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100544 define pcodeop vfmadd213pd_avx512f ; :VFMADD213PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0xA8; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmadd213pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFMADD132PD/VFMADD213PD/VFMADD231PD 5-126 PAGE 1950 LINE 100547 define pcodeop vfmadd231pd_avx512f ; :VFMADD231PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0xB8; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmadd231pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100884 define pcodeop vfmadd132ps_avx512vl ; :VFMADD132PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x98; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmadd132ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100887 define pcodeop vfmadd213ps_avx512vl ; :VFMADD213PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xA8; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmadd213ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100890 define pcodeop vfmadd231ps_avx512vl ; :VFMADD231PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xB8; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmadd231ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100893 :VFMADD132PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x98; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmadd132ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100896 :VFMADD213PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xA8; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmadd213ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100899 :VFMADD231PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xB8; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmadd231ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100902 define pcodeop vfmadd132ps_avx512f ; :VFMADD132PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x98; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmadd132ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100905 define pcodeop vfmadd213ps_avx512f ; :VFMADD213PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0xA8; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmadd213ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-133 PAGE 1957 LINE 100908 define pcodeop vfmadd231ps_avx512f ; :VFMADD231PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0xB8; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmadd231ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFMADD132SD/VFMADD213SD/VFMADD231SD 5-140 PAGE 1964 LINE 101235 define pcodeop vfmadd132sd_avx512f ; :VFMADD132SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x99; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfmadd132sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMADD132SD/VFMADD213SD/VFMADD231SD 5-140 PAGE 1964 LINE 101238 define pcodeop vfmadd213sd_avx512f ; :VFMADD213SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xA9; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfmadd213sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMADD132SD/VFMADD213SD/VFMADD231SD 5-140 PAGE 1964 LINE 101241 define pcodeop vfmadd231sd_avx512f ; :VFMADD231SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xB9; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfmadd231sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-143 PAGE 1967 LINE 101403 define pcodeop vfmadd132ss_avx512f ; :VFMADD132SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x99; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfmadd132ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-143 PAGE 1967 LINE 101406 define pcodeop vfmadd213ss_avx512f ; :VFMADD213SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xA9; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfmadd213ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-143 PAGE 1967 LINE 101409 define pcodeop vfmadd231ss_avx512f ; :VFMADD231SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xB9; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfmadd231ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-146 PAGE 1970 LINE 101585 define pcodeop vfmaddsub213pd_avx512vl ; :VFMADDSUB213PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xA6; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmaddsub213pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-146 PAGE 1970 LINE 101589 define pcodeop vfmaddsub231pd_avx512vl ; :VFMADDSUB231PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xB6; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmaddsub231pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-146 PAGE 1970 LINE 101593 define pcodeop vfmaddsub132pd_avx512vl ; :VFMADDSUB132PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x96; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmaddsub132pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-146 PAGE 1970 LINE 101597 :VFMADDSUB213PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xA6; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmaddsub213pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-146 PAGE 1970 LINE 101601 :VFMADDSUB231PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xB6; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmaddsub231pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-146 PAGE 1970 LINE 101605 :VFMADDSUB132PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x96; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmaddsub132pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-147 PAGE 1971 LINE 101621 define pcodeop vfmaddsub213pd_avx512f ; :VFMADDSUB213PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0xA6; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmaddsub213pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-147 PAGE 1971 LINE 101625 define pcodeop vfmaddsub231pd_avx512f ; :VFMADDSUB231PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0xB6; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmaddsub231pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-147 PAGE 1971 LINE 101629 define pcodeop vfmaddsub132pd_avx512f ; :VFMADDSUB132PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x96; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmaddsub132pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102024 define pcodeop vfmaddsub213ps_avx512vl ; :VFMADDSUB213PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xA6; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmaddsub213ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102028 define pcodeop vfmaddsub231ps_avx512vl ; :VFMADDSUB231PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xB6; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmaddsub231ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102031 define pcodeop vfmaddsub132ps_avx512vl ; :VFMADDSUB132PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x96; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmaddsub132ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102034 :VFMADDSUB213PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xA6; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmaddsub213ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102038 :VFMADDSUB231PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xB6; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmaddsub231ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102041 :VFMADDSUB132PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x96; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmaddsub132ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102044 define pcodeop vfmaddsub213ps_avx512f ; :VFMADDSUB213PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0xA6; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmaddsub213ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102048 define pcodeop vfmaddsub231ps_avx512f ; :VFMADDSUB231PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0xB6; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmaddsub231ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFMADDSUB132PS/VFMADDSUB213PS/VFMADDSUB231PS 5-156 PAGE 1980 LINE 102051 define pcodeop vfmaddsub132ps_avx512f ; :VFMADDSUB132PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x96; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmaddsub132ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-165 PAGE 1989 LINE 102454 define pcodeop vfmsubadd132pd_avx512vl ; :VFMSUBADD132PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x97; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmsubadd132pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-165 PAGE 1989 LINE 102458 define pcodeop vfmsubadd213pd_avx512vl ; :VFMSUBADD213PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xA7; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmsubadd213pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-165 PAGE 1989 LINE 102462 define pcodeop vfmsubadd231pd_avx512vl ; :VFMSUBADD231PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xB7; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmsubadd231pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-165 PAGE 1989 LINE 102466 :VFMSUBADD132PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x97; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmsubadd132pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-165 PAGE 1989 LINE 102470 :VFMSUBADD213PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xA7; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmsubadd213pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-165 PAGE 1989 LINE 102474 :VFMSUBADD231PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xB7; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmsubadd231pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-166 PAGE 1990 LINE 102490 define pcodeop vfmsubadd132pd_avx512f ; :VFMSUBADD132PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x97; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmsubadd132pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-166 PAGE 1990 LINE 102494 define pcodeop vfmsubadd213pd_avx512f ; :VFMSUBADD213PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0xA7; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmsubadd213pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-166 PAGE 1990 LINE 102498 define pcodeop vfmsubadd231pd_avx512f ; :VFMSUBADD231PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0xB7; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmsubadd231pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102894 define pcodeop vfmsubadd132ps_avx512vl ; :VFMSUBADD132PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x97; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmsubadd132ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102897 define pcodeop vfmsubadd213ps_avx512vl ; :VFMSUBADD213PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xA7; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmsubadd213ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102901 define pcodeop vfmsubadd231ps_avx512vl ; :VFMSUBADD231PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xB7; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmsubadd231ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102904 :VFMSUBADD132PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x97; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmsubadd132ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102907 :VFMSUBADD213PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xA7; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmsubadd213ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102911 :VFMSUBADD231PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xB7; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmsubadd231ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102914 define pcodeop vfmsubadd132ps_avx512f ; :VFMSUBADD132PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x97; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmsubadd132ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102917 define pcodeop vfmsubadd213ps_avx512f ; :VFMSUBADD213PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0xA7; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmsubadd213ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-175 PAGE 1999 LINE 102921 define pcodeop vfmsubadd231ps_avx512f ; :VFMSUBADD231PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0xB7; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmsubadd231ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103332 define pcodeop vfmsub132pd_avx512vl ; :VFMSUB132PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x9A; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmsub132pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103335 define pcodeop vfmsub213pd_avx512vl ; :VFMSUB213PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xAA; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmsub213pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103338 define pcodeop vfmsub231pd_avx512vl ; :VFMSUB231PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xBA; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmsub231pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103341 :VFMSUB132PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x9A; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmsub132pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103344 :VFMSUB213PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xAA; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmsub213pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103347 :VFMSUB231PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xBA; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmsub231pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103350 define pcodeop vfmsub132pd_avx512f ; :VFMSUB132PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x9A; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmsub132pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103353 define pcodeop vfmsub213pd_avx512f ; :VFMSUB213PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0xAA; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmsub213pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-185 PAGE 2009 LINE 103356 define pcodeop vfmsub231pd_avx512f ; :VFMSUB231PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0xBA; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmsub231pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103692 define pcodeop vfmsub132ps_avx512vl ; :VFMSUB132PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x9A; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmsub132ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103695 define pcodeop vfmsub213ps_avx512vl ; :VFMSUB213PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xAA; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmsub213ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103698 define pcodeop vfmsub231ps_avx512vl ; :VFMSUB231PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xBA; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfmsub231ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103701 :VFMSUB132PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x9A; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmsub132ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103704 :VFMSUB213PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xAA; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmsub213ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103707 :VFMSUB231PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xBA; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfmsub231ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103710 define pcodeop vfmsub132ps_avx512f ; :VFMSUB132PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x9A; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmsub132ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103713 define pcodeop vfmsub213ps_avx512f ; :VFMSUB213PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0xAA; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmsub213ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-192 PAGE 2016 LINE 103716 define pcodeop vfmsub231ps_avx512f ; :VFMSUB231PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0xBA; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfmsub231ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFMSUB132SD/VFMSUB213SD/VFMSUB231SD 5-199 PAGE 2023 LINE 104042 define pcodeop vfmsub132sd_avx512f ; :VFMSUB132SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x9B; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfmsub132sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMSUB132SD/VFMSUB213SD/VFMSUB231SD 5-199 PAGE 2023 LINE 104045 define pcodeop vfmsub213sd_avx512f ; :VFMSUB213SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xAB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfmsub213sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMSUB132SD/VFMSUB213SD/VFMSUB231SD 5-199 PAGE 2023 LINE 104048 define pcodeop vfmsub231sd_avx512f ; :VFMSUB231SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xBB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfmsub231sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFMSUB132SS/VFMSUB213SS/VFMSUB231SS 5-202 PAGE 2026 LINE 104217 define pcodeop vfmsub132ss_avx512f ; :VFMSUB132SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x9B; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfmsub132ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMSUB132SS/VFMSUB213SS/VFMSUB231SS 5-202 PAGE 2026 LINE 104220 define pcodeop vfmsub213ss_avx512f ; :VFMSUB213SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xAB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfmsub213ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFMSUB132SS/VFMSUB213SS/VFMSUB231SS 5-202 PAGE 2026 LINE 104223 define pcodeop vfmsub231ss_avx512f ; :VFMSUB231SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xBB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfmsub231ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104401 define pcodeop vfnmadd132pd_avx512vl ; :VFNMADD132PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x9C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfnmadd132pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104405 define pcodeop vfnmadd213pd_avx512vl ; :VFNMADD213PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xAC; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfnmadd213pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104408 define pcodeop vfnmadd231pd_avx512vl ; :VFNMADD231PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xBC; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfnmadd231pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104412 :VFNMADD132PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x9C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfnmadd132pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104416 :VFNMADD213PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xAC; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfnmadd213pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104419 :VFNMADD231PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xBC; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfnmadd231pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104423 define pcodeop vfnmadd132pd_avx512f ; :VFNMADD132PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x9C; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfnmadd132pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104426 define pcodeop vfnmadd213pd_avx512f ; :VFNMADD213PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0xAC; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfnmadd213pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-205 PAGE 2029 LINE 104429 define pcodeop vfnmadd231pd_avx512f ; :VFNMADD231PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0xBC; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfnmadd231pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104760 define pcodeop vfnmadd132ps_avx512vl ; :VFNMADD132PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x9C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfnmadd132ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104763 define pcodeop vfnmadd213ps_avx512vl ; :VFNMADD213PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xAC; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfnmadd213ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104766 define pcodeop vfnmadd231ps_avx512vl ; :VFNMADD231PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xBC; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfnmadd231ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104769 :VFNMADD132PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x9C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfnmadd132ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104772 :VFNMADD213PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xAC; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfnmadd213ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104775 :VFNMADD231PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xBC; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfnmadd231ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104778 :VFNMADD132PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x9C; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfnmadd132ps_avx512vl( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104781 define pcodeop vfnmadd213ps_avx512f ; :VFNMADD213PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0xAC; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfnmadd213ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-212 PAGE 2036 LINE 104784 define pcodeop vfnmadd231ps_avx512f ; :VFNMADD231PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0xBC; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfnmadd231ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFNMADD132SD/VFNMADD213SD/VFNMADD231SD 5-218 PAGE 2042 LINE 105098 define pcodeop vfnmadd132sd_avx512f ; :VFNMADD132SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x9D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfnmadd132sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMADD132SD/VFNMADD213SD/VFNMADD231SD 5-218 PAGE 2042 LINE 105101 define pcodeop vfnmadd213sd_avx512f ; :VFNMADD213SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xAD; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfnmadd213sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMADD132SD/VFNMADD213SD/VFNMADD231SD 5-218 PAGE 2042 LINE 105104 define pcodeop vfnmadd231sd_avx512f ; :VFNMADD231SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xBD; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfnmadd231sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMADD132SS/VFNMADD213SS/VFNMADD231SS 5-221 PAGE 2045 LINE 105270 define pcodeop vfnmadd132ss_avx512f ; :VFNMADD132SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x9D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfnmadd132ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMADD132SS/VFNMADD213SS/VFNMADD231SS 5-221 PAGE 2045 LINE 105273 define pcodeop vfnmadd213ss_avx512f ; :VFNMADD213SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xAD; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfnmadd213ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMADD132SS/VFNMADD213SS/VFNMADD231SS 5-221 PAGE 2045 LINE 105276 define pcodeop vfnmadd231ss_avx512f ; :VFNMADD231SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xBD; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfnmadd231ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105451 define pcodeop vfnmsub132pd_avx512vl ; :VFNMSUB132PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x9E; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfnmsub132pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105455 define pcodeop vfnmsub213pd_avx512vl ; :VFNMSUB213PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xAE; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfnmsub213pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105458 define pcodeop vfnmsub231pd_avx512vl ; :VFNMSUB231PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xBE; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfnmsub231pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105462 :VFNMSUB132PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x9E; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfnmsub132pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105466 :VFNMSUB213PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xAE; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfnmsub213pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105469 :VFNMSUB231PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0xBE; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfnmsub231pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105473 define pcodeop vfnmsub132pd_avx512f ; :VFNMSUB132PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x9E; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfnmsub132pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105476 define pcodeop vfnmsub213pd_avx512f ; :VFNMSUB213PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0xAE; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfnmsub213pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-224 PAGE 2048 LINE 105479 define pcodeop vfnmsub231pd_avx512f ; :VFNMSUB231PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0xBE; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfnmsub231pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105800 define pcodeop vfnmsub132ps_avx512vl ; :VFNMSUB132PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x9E; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfnmsub132ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105803 define pcodeop vfnmsub213ps_avx512vl ; :VFNMSUB213PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xAE; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfnmsub213ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105806 define pcodeop vfnmsub231ps_avx512vl ; :VFNMSUB231PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xBE; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vfnmsub231ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105809 :VFNMSUB132PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x9E; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfnmsub132ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105812 :VFNMSUB213PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xAE; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfnmsub213ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105815 :VFNMSUB231PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0xBE; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vfnmsub231ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105818 define pcodeop vfnmsub132ps_avx512f ; :VFNMSUB132PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x9E; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfnmsub132ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105821 define pcodeop vfnmsub213ps_avx512f ; :VFNMSUB213PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0xAE; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfnmsub213ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-230 PAGE 2054 LINE 105824 define pcodeop vfnmsub231ps_avx512f ; :VFNMSUB231PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0xBE; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vfnmsub231ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFNMSUB132SD/VFNMSUB213SD/VFNMSUB231SD 5-236 PAGE 2060 LINE 106135 define pcodeop vfnmsub132sd_avx512f ; :VFNMSUB132SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x9F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfnmsub132sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMSUB132SD/VFNMSUB213SD/VFNMSUB231SD 5-236 PAGE 2060 LINE 106138 define pcodeop vfnmsub213sd_avx512f ; :VFNMSUB213SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xAF; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfnmsub213sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMSUB132SD/VFNMSUB213SD/VFNMSUB231SD 5-236 PAGE 2060 LINE 106141 define pcodeop vfnmsub231sd_avx512f ; :VFNMSUB231SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xBF; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfnmsub231sd_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VFNMSUB132SS/VFNMSUB213SS/VFNMSUB231SS 5-239 PAGE 2063 LINE 106307 define pcodeop vfnmsub132ss_avx512f ; :VFNMSUB132SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x9F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfnmsub132ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMSUB132SS/VFNMSUB213SS/VFNMSUB231SS 5-239 PAGE 2063 LINE 106310 define pcodeop vfnmsub213ss_avx512f ; :VFNMSUB213SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xAF; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfnmsub213ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFNMSUB132SS/VFNMSUB213SS/VFNMSUB231SS 5-239 PAGE 2063 LINE 106313 define pcodeop vfnmsub231ss_avx512f ; :VFNMSUB231SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xBF; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vfnmsub231ss_avx512f( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFPCLASSPD 5-242 PAGE 2066 LINE 106466 # There is an error in the manual where the immediate byte is not specified in the operand encoding, but it is present define pcodeop vfpclasspd_avx512vl ; :VFPCLASSPD KReg_reg AVXOpMask, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask; byte=0x66; KReg_reg ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { KReg_reg = vfpclasspd_avx512vl( XmmReg2_m128_m64bcst, AVXOpMask, imm8:1 ); } # VFPCLASSPD 5-242 PAGE 2066 LINE 106470 :VFPCLASSPD KReg_reg AVXOpMask, YmmReg2_m256_m64bcst,imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask; byte=0x66; KReg_reg ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { KReg_reg = vfpclasspd_avx512vl( YmmReg2_m256_m64bcst, AVXOpMask, imm8:1 ); } # VFPCLASSPD 5-242 PAGE 2066 LINE 106474 define pcodeop vfpclasspd_avx512dq ; :VFPCLASSPD KReg_reg AVXOpMask, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask; byte=0x66; KReg_reg ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { KReg_reg = vfpclasspd_avx512dq( ZmmReg2_m512_m64bcst, AVXOpMask, imm8:1 ); } # VFPCLASSPS 5-245 PAGE 2069 LINE 106608 # There is an error in the manual where the immediate byte is not specified in the operand encoding, but it is present define pcodeop vfpclassps_avx512vl ; :VFPCLASSPS KReg_reg AVXOpMask, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask; byte=0x66; KReg_reg ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { KReg_reg = vfpclassps_avx512vl( XmmReg2_m128_m32bcst, AVXOpMask, imm8:1 ); } # VFPCLASSPS 5-245 PAGE 2069 LINE 106612 :VFPCLASSPS KReg_reg AVXOpMask, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask; byte=0x66; KReg_reg ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { KReg_reg = vfpclassps_avx512vl( YmmReg2_m256_m32bcst, AVXOpMask,imm8:1 ); } # VFPCLASSPS 5-245 PAGE 2069 LINE 106616 define pcodeop vfpclassps_avx512dq ; :VFPCLASSPS KReg_reg AVXOpMask, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask; byte=0x66; KReg_reg ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { KReg_reg = vfpclassps_avx512dq( ZmmReg2_m512_m32bcst, AVXOpMask, imm8:1 ); } # VFPCLASSSD 5-247 PAGE 2071 LINE 106722 define pcodeop vfpclasssd_avx512dq ; :VFPCLASSSD KReg_reg AVXOpMask, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask; byte=0x67; KReg_reg ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { KReg_reg = vfpclasssd_avx512dq( XmmReg2_m64, AVXOpMask ); } # VFPCLASSSS 5-249 PAGE 2073 LINE 106810 define pcodeop vfpclassss_avx512dq ; :VFPCLASSSS KReg_reg AVXOpMask, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask; byte=0x67; KReg_reg ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { KReg_reg = vfpclassss_avx512dq( XmmReg2_m32, AVXOpMask ); } # VGATHERDPS/VGATHERDPD 5-261 PAGE 2085 LINE 107357 # WARNING: did not recognize qualifier /vsib for "VGATHERDPS xmm1 {k1}, vm32x" define pcodeop vgatherdps_avx512vl ; :VGATHERDPS XmmReg1^XmmOpMask32, m32 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x92; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vgatherdps_avx512vl( m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VGATHERDPS/VGATHERDPD 5-261 PAGE 2085 LINE 107359 # WARNING: did not recognize qualifier /vsib for "VGATHERDPS ymm1 {k1}, vm32y" :VGATHERDPS YmmReg1^YmmOpMask32, m32 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x92; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vgatherdps_avx512vl( m32 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VGATHERDPS/VGATHERDPD 5-261 PAGE 2085 LINE 107361 # WARNING: did not recognize qualifier /vsib for "VGATHERDPS zmm1 {k1}, vm32z" define pcodeop vgatherdps_avx512f ; :VGATHERDPS ZmmReg1^ZmmOpMask32, m32 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x92; (ZmmReg1 & ZmmOpMask32) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vgatherdps_avx512f( m32 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VGATHERDPS/VGATHERDPD 5-261 PAGE 2085 LINE 107363 # WARNING: did not recognize qualifier /vsib for "VGATHERDPD xmm1 {k1}, vm32x" define pcodeop vgatherdpd_avx512vl ; :VGATHERDPD XmmReg1^XmmOpMask64, m32 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x92; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vgatherdpd_avx512vl( m32 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VGATHERDPS/VGATHERDPD 5-261 PAGE 2085 LINE 107366 # WARNING: did not recognize qualifier /vsib for "VGATHERDPD ymm1 {k1}, vm32x" :VGATHERDPD YmmReg1^YmmOpMask64, m32 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x92; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vgatherdpd_avx512vl( m32 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VGATHERDPS/VGATHERDPD 5-261 PAGE 2085 LINE 107369 # WARNING: did not recognize qualifier /vsib for "VGATHERDPD zmm1 {k1}, vm32y" define pcodeop vgatherdpd_avx512f ; :VGATHERDPD ZmmReg1^ZmmOpMask64, m32 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x92; (ZmmReg1 & ZmmOpMask64) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vgatherdpd_avx512f( m32 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VGATHERPF0DPS/VGATHERPF0QPS/VGATHERPF0DPD/VGATHERPF0QPD 5-264 PAGE 2088 LINE 107497 # WARNING: did not recognize qualifier /vsib for "VGATHERPF0DPS vm32z {k1}" define pcodeop vgatherpf0dps_avx512pf ; :VGATHERPF0DPS m32^XmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0xC6; reg_opcode=1 ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vgatherpf0dps_avx512pf( m32, XmmOpMask ); # TODO missing destination or side effects } # VGATHERPF0DPS/VGATHERPF0QPS/VGATHERPF0DPD/VGATHERPF0QPD 5-264 PAGE 2088 LINE 107500 # WARNING: did not recognize qualifier /vsib for "VGATHERPF0QPS vm64z {k1}" define pcodeop vgatherpf0qps_avx512pf ; :VGATHERPF0QPS m64^XmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0xC7; reg_opcode=1 ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vgatherpf0qps_avx512pf( m64, XmmOpMask ); # TODO missing destination or side effects } # VGATHERPF0DPS/VGATHERPF0QPS/VGATHERPF0DPD/VGATHERPF0QPD 5-264 PAGE 2088 LINE 107503 # WARNING: did not recognize qualifier /vsib for "VGATHERPF0DPD vm32y {k1}" define pcodeop vgatherpf0dpd_avx512pf ; :VGATHERPF0DPD m32^XmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0xC6; reg_opcode=1 ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vgatherpf0dpd_avx512pf( m32, XmmOpMask ); # TODO missing destination or side effects } # VGATHERPF0DPS/VGATHERPF0QPS/VGATHERPF0DPD/VGATHERPF0QPD 5-264 PAGE 2088 LINE 107506 # WARNING: did not recognize qualifier /vsib for "VGATHERPF0QPD vm64z {k1}" define pcodeop vgatherpf0qpd_avx512pf ; :VGATHERPF0QPD m64^XmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0xC7; reg_opcode=1 ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vgatherpf0qpd_avx512pf( m64, XmmOpMask ); # TODO missing destination or side effects } # VGATHERPF1DPS/VGATHERPF1QPS/VGATHERPF1DPD/VGATHERPF1QPD 5-267 PAGE 2091 LINE 107620 # WARNING: did not recognize qualifier /vsib for "VGATHERPF1DPS vm32z {k1}" define pcodeop vgatherpf1dps_avx512pf ; :VGATHERPF1DPS m32^XmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0xC6; reg_opcode=2 ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vgatherpf1dps_avx512pf( m32 , XmmOpMask); # TODO missing destination or side effects } # VGATHERPF1DPS/VGATHERPF1QPS/VGATHERPF1DPD/VGATHERPF1QPD 5-267 PAGE 2091 LINE 107623 # WARNING: did not recognize qualifier /vsib for "VGATHERPF1QPS vm64z {k1}" define pcodeop vgatherpf1qps_avx512pf ; :VGATHERPF1QPS m64^XmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0xC7; reg_opcode=2 ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vgatherpf1qps_avx512pf( m64, XmmOpMask ); # TODO missing destination or side effects } # VGATHERPF1DPS/VGATHERPF1QPS/VGATHERPF1DPD/VGATHERPF1QPD 5-267 PAGE 2091 LINE 107626 # WARNING: did not recognize qualifier /vsib for "VGATHERPF1DPD vm32y {k1}" define pcodeop vgatherpf1dpd_avx512pf ; :VGATHERPF1DPD m32^XmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0xC6; reg_opcode=2 ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vgatherpf1dpd_avx512pf( m32, XmmOpMask ); # TODO missing destination or side effects } # VGATHERPF1DPS/VGATHERPF1QPS/VGATHERPF1DPD/VGATHERPF1QPD 5-267 PAGE 2091 LINE 107629 # WARNING: did not recognize qualifier /vsib for "VGATHERPF1QPD vm64z {k1}" define pcodeop vgatherpf1qpd_avx512pf ; :VGATHERPF1QPD m64^XmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0xC7; reg_opcode=2 ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vgatherpf1qpd_avx512pf( m64, XmmOpMask ); # TODO missing destination or side effects } # VGATHERQPS/VGATHERQPD 5-270 PAGE 2094 LINE 107742 # WARNING: did not recognize qualifier /vsib for "VGATHERQPS xmm1 {k1}, vm64x" define pcodeop vgatherqps_avx512vl ; :VGATHERQPS XmmReg1^XmmOpMask64, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x93; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vgatherqps_avx512vl( m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VGATHERQPS/VGATHERQPD 5-270 PAGE 2094 LINE 107745 # WARNING: did not recognize qualifier /vsib for "VGATHERQPS xmm1 {k1}, vm64y" :VGATHERQPS XmmReg1^XmmOpMask64, m64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x93; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vgatherqps_avx512vl( m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VGATHERQPS/VGATHERQPD 5-270 PAGE 2094 LINE 107748 # WARNING: did not recognize qualifier /vsib for "VGATHERQPS ymm1 {k1}, vm64z" define pcodeop vgatherqps_avx512f ; :VGATHERQPS YmmReg1^YmmOpMask64, m64 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x93; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vgatherqps_avx512f( m64 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VGATHERQPS/VGATHERQPD 5-270 PAGE 2094 LINE 107751 # WARNING: did not recognize qualifier /vsib for "VGATHERQPD xmm1 {k1}, vm64x" define pcodeop vgatherqpd_avx512vl ; :VGATHERQPD XmmReg1^XmmOpMask64, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x93; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vgatherqpd_avx512vl( m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VGATHERQPS/VGATHERQPD 5-270 PAGE 2094 LINE 107753 # WARNING: did not recognize qualifier /vsib for "VGATHERQPD ymm1 {k1}, vm64y" :VGATHERQPD YmmReg1^YmmOpMask64, m64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x93; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vgatherqpd_avx512vl( m64 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VGATHERQPS/VGATHERQPD 5-270 PAGE 2094 LINE 107755 # WARNING: did not recognize qualifier /vsib for "VGATHERQPD zmm1 {k1}, vm64z" define pcodeop vgatherqpd_avx512f ; :VGATHERQPD ZmmReg1^ZmmOpMask64, m64 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x93; (ZmmReg1 & ZmmOpMask64) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vgatherqpd_avx512f( m64 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPGATHERDD/VPGATHERDQ 5-277 PAGE 2101 LINE 108099 # WARNING: did not recognize qualifier /vsib for "VPGATHERDD xmm1 {k1}, vm32x" define pcodeop vpgatherdd_avx512vl ; :VPGATHERDD XmmReg1^XmmOpMask32, m32 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x90; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpgatherdd_avx512vl( m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPGATHERDD/VPGATHERDQ 5-277 PAGE 2101 LINE 108101 # WARNING: did not recognize qualifier /vsib for "VPGATHERDD ymm1 {k1}, vm32y" :VPGATHERDD YmmReg1^YmmOpMask32, m32 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x90; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpgatherdd_avx512vl( m32 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPGATHERDD/VPGATHERDQ 5-277 PAGE 2101 LINE 108103 # WARNING: did not recognize qualifier /vsib for "VPGATHERDD zmm1 {k1}, vm32z" define pcodeop vpgatherdd_avx512f ; :VPGATHERDD ZmmReg1^ZmmOpMask32, m32 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x90; (ZmmReg1 & ZmmOpMask32) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vpgatherdd_avx512f( m32 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPGATHERDD/VPGATHERDQ 5-277 PAGE 2101 LINE 108105 # WARNING: did not recognize qualifier /vsib for "VPGATHERDQ xmm1 {k1}, vm32x" define pcodeop vpgatherdq_avx512vl ; :VPGATHERDQ XmmReg1^XmmOpMask64, m32 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x90; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpgatherdq_avx512vl( m32 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPGATHERDD/VPGATHERDQ 5-277 PAGE 2101 LINE 108107 # WARNING: did not recognize qualifier /vsib for "VPGATHERDQ ymm1 {k1}, vm32x" :VPGATHERDQ YmmReg1^YmmOpMask64, m32 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x90; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpgatherdq_avx512vl( m32 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPGATHERDD/VPGATHERDQ 5-277 PAGE 2101 LINE 108109 # WARNING: did not recognize qualifier /vsib for "VPGATHERDQ zmm1 {k1}, vm32y" define pcodeop vpgatherdq_avx512f ; :VPGATHERDQ ZmmReg1^ZmmOpMask64, m32 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x90; (ZmmReg1 & ZmmOpMask64) ... & m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vpgatherdq_avx512f( m32 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPGATHERQD/VPGATHERQQ 5-285 PAGE 2109 LINE 108457 # WARNING: did not recognize qualifier /vsib for "VPGATHERQD xmm1 {k1}, vm64x" define pcodeop vpgatherqd_avx512vl ; :VPGATHERQD XmmReg1^XmmOpMask32, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x91; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpgatherqd_avx512vl( m64 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPGATHERQD/VPGATHERQQ 5-285 PAGE 2109 LINE 108459 # WARNING: did not recognize qualifier /vsib for "VPGATHERQD xmm1 {k1}, vm64y" :VPGATHERQD XmmReg1^XmmOpMask32, m64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x91; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpgatherqd_avx512vl( m64 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPGATHERQD/VPGATHERQQ 5-285 PAGE 2109 LINE 108461 # WARNING: did not recognize qualifier /vsib for "VPGATHERQD ymm1 {k1}, vm64z" define pcodeop vpgatherqd_avx512f ; :VPGATHERQD YmmReg1^YmmOpMask32, m64 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x91; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpgatherqd_avx512f( m64 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPGATHERQD/VPGATHERQQ 5-285 PAGE 2109 LINE 108463 # WARNING: did not recognize qualifier /vsib for "VPGATHERQQ xmm1 {k1}, vm64x" define pcodeop vpgatherqq_avx512vl ; :VPGATHERQQ XmmReg1^XmmOpMask64, m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x91; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpgatherqq_avx512vl( m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPGATHERQD/VPGATHERQQ 5-285 PAGE 2109 LINE 108465 # WARNING: did not recognize qualifier /vsib for "VPGATHERQQ ymm1 {k1}, vm64y" :VPGATHERQQ YmmReg1^YmmOpMask64, m64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x91; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpgatherqq_avx512vl( m64 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPGATHERQD/VPGATHERQQ 5-285 PAGE 2109 LINE 108467 # WARNING: did not recognize qualifier /vsib for "VPGATHERQQ zmm1 {k1}, vm64z" define pcodeop vpgatherqq_avx512f ; :VPGATHERQQ ZmmReg1^ZmmOpMask64, m64 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x91; (ZmmReg1 & ZmmOpMask64) ... & m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vpgatherqq_avx512f( m64 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VGETEXPPD 5-288 PAGE 2112 LINE 108594 define pcodeop vgetexppd_avx512vl ; :VGETEXPPD XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x42; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vgetexppd_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VGETEXPPD 5-288 PAGE 2112 LINE 108598 :VGETEXPPD YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x42; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vgetexppd_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VGETEXPPD 5-288 PAGE 2112 LINE 108602 define pcodeop vgetexppd_avx512f ; :VGETEXPPD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x42; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vgetexppd_avx512f( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VGETEXPPS 5-291 PAGE 2115 LINE 108760 define pcodeop vgetexpps_avx512vl ; :VGETEXPPS XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x42; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vgetexpps_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VGETEXPPS 5-291 PAGE 2115 LINE 108764 :VGETEXPPS YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x42; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vgetexpps_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VGETEXPPS 5-291 PAGE 2115 LINE 108768 define pcodeop vgetexpps_avx512f ; :VGETEXPPS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x42; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vgetexpps_avx512f( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VGETEXPSD 5-295 PAGE 2119 LINE 108959 define pcodeop vgetexpsd_avx512f ; :VGETEXPSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x43; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vgetexpsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VGETEXPSS 5-297 PAGE 2121 LINE 109037 define pcodeop vgetexpss_avx512f ; :VGETEXPSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x43; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vgetexpss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VGETMANTPD 5-299 PAGE 2123 LINE 109120 define pcodeop vgetmantpd_avx512vl ; :VGETMANTPD XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x26; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vgetmantpd_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VGETMANTPD 5-299 PAGE 2123 LINE 109125 :VGETMANTPD YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x26; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vgetmantpd_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VGETMANTPD 5-299 PAGE 2123 LINE 109130 define pcodeop vgetmantpd_avx512f ; :VGETMANTPD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x26; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { ZmmResult = vgetmantpd_avx512f( ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VGETMANTPS 5-303 PAGE 2127 LINE 109339 define pcodeop vgetmantps_avx512vl ; :VGETMANTPS XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x26; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { XmmResult = vgetmantps_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VGETMANTPS 5-303 PAGE 2127 LINE 109344 :VGETMANTPS YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x26; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { YmmResult = vgetmantps_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VGETMANTPS 5-303 PAGE 2127 LINE 109349 define pcodeop vgetmantps_avx512f ; :VGETMANTPS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x26; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FVI) { ZmmResult = vgetmantps_avx512f( ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VGETMANTSD 5-306 PAGE 2130 LINE 109519 define pcodeop vgetmantsd_avx512f ; :VGETMANTSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x27; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vgetmantsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VGETMANTSS 5-308 PAGE 2132 LINE 109610 define pcodeop vgetmantss_avx512f ; :VGETMANTSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x27; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vgetmantss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VINSERTF128/VINSERTF32x4/VINSERTF64x2/VINSERTF32x8/VINSERTF64x4 5-310 PAGE 2134 LINE 109706 define pcodeop vinsertf32x4_avx512vl ; :VINSERTF32X4 YmmReg1^YmmOpMask32, evexV5_YmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x18; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { YmmResult = vinsertf32x4_avx512vl( evexV5_YmmReg, XmmReg2_m128, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VINSERTF128/VINSERTF32x4/VINSERTF64x2/VINSERTF32x8/VINSERTF64x4 5-310 PAGE 2134 LINE 109709 define pcodeop vinsertf32x4_avx512f ; :VINSERTF32X4 ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x18; (ZmmReg1 & ZmmOpMask32) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { ZmmResult = vinsertf32x4_avx512f( evexV5_ZmmReg, XmmReg2_m128, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VINSERTF128/VINSERTF32x4/VINSERTF64x2/VINSERTF32x8/VINSERTF64x4 5-310 PAGE 2134 LINE 109712 define pcodeop vinsertf64x2_avx512vl ; :VINSERTF64X2 YmmReg1^YmmOpMask64, evexV5_YmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x18; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { YmmResult = vinsertf64x2_avx512vl( evexV5_YmmReg, XmmReg2_m128, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VINSERTF128/VINSERTF32x4/VINSERTF64x2/VINSERTF32x8/VINSERTF64x4 5-310 PAGE 2134 LINE 109715 define pcodeop vinsertf64x2_avx512dq ; :VINSERTF64X2 ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x18; (ZmmReg1 & ZmmOpMask64) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { ZmmResult = vinsertf64x2_avx512dq( evexV5_ZmmReg, XmmReg2_m128, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VINSERTF128/VINSERTF32x4/VINSERTF64x2/VINSERTF32x8/VINSERTF64x4 5-310 PAGE 2134 LINE 109718 define pcodeop vinsertf32x8_avx512dq ; :VINSERTF32X8 ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x1A; (ZmmReg1 & ZmmOpMask32) ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { ZmmResult = vinsertf32x8_avx512dq( evexV5_ZmmReg, YmmReg2_m256, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VINSERTF128/VINSERTF32x4/VINSERTF64x2/VINSERTF32x8/VINSERTF64x4 5-310 PAGE 2134 LINE 109721 define pcodeop vinsertf64x4_avx512f ; :VINSERTF64X4 ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x1A; (ZmmReg1 & ZmmOpMask64) ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { ZmmResult = vinsertf64x4_avx512f( evexV5_ZmmReg, YmmReg2_m256, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VINSERTI128/VINSERTI32x4/VINSERTI64x2/VINSERTI32x8/VINSERTI64x4 5-314 PAGE 2138 LINE 109930 define pcodeop vinserti32x4_avx512vl ; :VINSERTI32X4 YmmReg1^YmmOpMask32, evexV5_YmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x38; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { YmmResult = vinserti32x4_avx512vl( evexV5_YmmReg, XmmReg2_m128, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VINSERTI128/VINSERTI32x4/VINSERTI64x2/VINSERTI32x8/VINSERTI64x4 5-314 PAGE 2138 LINE 109933 define pcodeop vinserti32x4_avx512f ; :VINSERTI32X4 ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x38; (ZmmReg1 & ZmmOpMask32) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { ZmmResult = vinserti32x4_avx512f( evexV5_ZmmReg, XmmReg2_m128, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VINSERTI128/VINSERTI32x4/VINSERTI64x2/VINSERTI32x8/VINSERTI64x4 5-314 PAGE 2138 LINE 109936 define pcodeop vinserti64x2_avx512vl ; :VINSERTI64X2 YmmReg1^YmmOpMask64, evexV5_YmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x38; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { YmmResult = vinserti64x2_avx512vl( evexV5_YmmReg, XmmReg2_m128, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VINSERTI128/VINSERTI32x4/VINSERTI64x2/VINSERTI32x8/VINSERTI64x4 5-314 PAGE 2138 LINE 109939 define pcodeop vinserti64x2_avx512dq ; :VINSERTI64X2 ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x38; (ZmmReg1 & ZmmOpMask64) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { ZmmResult = vinserti64x2_avx512dq( evexV5_ZmmReg, XmmReg2_m128, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VINSERTI128/VINSERTI32x4/VINSERTI64x2/VINSERTI32x8/VINSERTI64x4 5-314 PAGE 2138 LINE 109942 define pcodeop vinserti32x8_avx512dq ; :VINSERTI32X8 ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x3A; (ZmmReg1 & ZmmOpMask32) ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { ZmmResult = vinserti32x8_avx512dq( evexV5_ZmmReg, YmmReg2_m256, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VINSERTI128/VINSERTI32x4/VINSERTI64x2/VINSERTI32x8/VINSERTI64x4 5-314 PAGE 2138 LINE 109945 define pcodeop vinserti64x4_avx512f ; :VINSERTI64X4 ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x3A; (ZmmReg1 & ZmmOpMask64) ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 6; ] # (TupleType T2,T4,T8) { ZmmResult = vinserti64x4_avx512f( evexV5_ZmmReg, YmmReg2_m256, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPBLENDMB/VPBLENDMW 5-323 PAGE 2147 LINE 110393 define pcodeop vpblendmb_avx512vl ; :VPBLENDMB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x66; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpblendmb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # VPBLENDMB/VPBLENDMW 5-323 PAGE 2147 LINE 110396 :VPBLENDMB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x66; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpblendmb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # VPBLENDMB/VPBLENDMW 5-323 PAGE 2147 LINE 110399 define pcodeop vpblendmb_avx512bw ; :VPBLENDMB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x66; (ZmmReg1 & ZmmOpMask8) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpblendmb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # VPBLENDMB/VPBLENDMW 5-323 PAGE 2147 LINE 110402 define pcodeop vpblendmw_avx512vl ; :VPBLENDMW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x66; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpblendmw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPBLENDMB/VPBLENDMW 5-323 PAGE 2147 LINE 110405 :VPBLENDMW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x66; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpblendmw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPBLENDMB/VPBLENDMW 5-323 PAGE 2147 LINE 110408 define pcodeop vpblendmw_avx512bw ; :VPBLENDMW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x66; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpblendmw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPBLENDMD/VPBLENDMQ 5-325 PAGE 2149 LINE 110495 define pcodeop vpblendmd_avx512vl ; :VPBLENDMD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x64; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpblendmd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPBLENDMD/VPBLENDMQ 5-325 PAGE 2149 LINE 110498 :VPBLENDMD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x64; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpblendmd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPBLENDMD/VPBLENDMQ 5-325 PAGE 2149 LINE 110501 define pcodeop vpblendmd_avx512f ; :VPBLENDMD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x64; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpblendmd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPBLENDMD/VPBLENDMQ 5-325 PAGE 2149 LINE 110504 define pcodeop vpblendmq_avx512vl ; :VPBLENDMQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x64; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpblendmq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPBLENDMD/VPBLENDMQ 5-325 PAGE 2149 LINE 110507 :VPBLENDMQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x64; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpblendmq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPBLENDMD/VPBLENDMQ 5-325 PAGE 2149 LINE 110510 define pcodeop vpblendmq_avx512f ; :VPBLENDMQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x64; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpblendmq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPBROADCASTB/W/D/Q 5-328 PAGE 2152 LINE 110617 # WARNING: did not recognize operand "reg" (encoding ModRM:r/m (r)) for "VPBROADCASTB xmm1 {k1}{z}, reg" #TODO: fix define pcodeop vpbroadcastb_avx512vl ; :VPBROADCASTB XmmReg1^XmmOpMask8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x7A; (XmmReg1 & ZmmReg1 & XmmOpMask8) [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { local tmp:16 = vpbroadcastb_avx512vl( ); build XmmOpMask8; ZmmReg1 = zext(tmp); } # VPBROADCASTB/W/D/Q 5-328 PAGE 2152 LINE 110619 # WARNING: did not recognize operand "reg" (encoding ModRM:r/m (r)) for "VPBROADCASTB ymm1 {k1}{z}, reg" :VPBROADCASTB YmmReg1^YmmOpMask8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x7A; (YmmReg1 & ZmmReg1 & YmmOpMask8) [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpbroadcastb_avx512vl( ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # VPBROADCASTB/W/D/Q 5-328 PAGE 2152 LINE 110621 # WARNING: did not recognize operand "reg" (encoding ModRM:r/m (r)) for "VPBROADCASTB zmm1 {k1}{z}, reg" define pcodeop vpbroadcastb_avx512bw ; :VPBROADCASTB ZmmReg1^ZmmOpMask8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x7A; ZmmReg1 & ZmmOpMask8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vpbroadcastb_avx512bw( ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # VPBROADCASTB/W/D/Q 5-328 PAGE 2152 LINE 110623 # WARNING: did not recognize operand "reg" (encoding ModRM:r/m (r)) for "VPBROADCASTW xmm1 {k1}{z}, reg" define pcodeop vpbroadcastw_avx512vl ; :VPBROADCASTW XmmReg1^XmmOpMask16 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x7B; (XmmReg1 & ZmmReg1 & XmmOpMask16) [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpbroadcastw_avx512vl( ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPBROADCASTB/W/D/Q 5-328 PAGE 2152 LINE 110625 # WARNING: did not recognize operand "reg" (encoding ModRM:r/m (r)) for "VPBROADCASTW ymm1 {k1}{z}, reg" :VPBROADCASTW YmmReg1^YmmOpMask16 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x7B; (YmmReg1 & ZmmReg1 & YmmOpMask16) [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpbroadcastw_avx512vl( ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPBROADCASTB/W/D/Q 5-328 PAGE 2152 LINE 110627 # WARNING: did not recognize operand "reg" (encoding ModRM:r/m (r)) for "VPBROADCASTW zmm1 {k1}{z}, reg" define pcodeop vpbroadcastw_avx512bw ; :VPBROADCASTW ZmmReg1^ZmmOpMask16 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x7B; ZmmReg1 & ZmmOpMask16 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vpbroadcastw_avx512bw( ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPBROADCASTB/W/D/Q 5-328 PAGE 2152 LINE 110629 # WARNING: did not recognize operand "r32" (encoding ModRM:r/m (r)) for "VPBROADCASTD xmm1 {k1}{z}, r32" define pcodeop vpbroadcastd_avx512vl ; :VPBROADCASTD XmmReg1^XmmOpMask32 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x7C; (XmmReg1 & ZmmReg1 & XmmOpMask32) [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpbroadcastd_avx512vl( ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPBROADCASTB/W/D/Q 5-328 PAGE 2152 LINE 110631 # WARNING: did not recognize operand "r32" (encoding ModRM:r/m (r)) for "VPBROADCASTD ymm1 {k1}{z}, r32" :VPBROADCASTD YmmReg1^YmmOpMask32 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x7C; (YmmReg1 & ZmmReg1 & YmmOpMask32) [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpbroadcastd_avx512vl( ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPBROADCASTB/W/D/Q 5-328 PAGE 2152 LINE 110633 # WARNING: did not recognize operand "r32" (encoding ModRM:r/m (r)) for "VPBROADCASTD zmm1 {k1}{z}, r32" define pcodeop vpbroadcastd_avx512f ; :VPBROADCASTD ZmmReg1^ZmmOpMask32 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x7C; ZmmReg1 & ZmmOpMask32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vpbroadcastd_avx512f( ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPBROADCASTB/W/D/Q 5-328 PAGE 2152 LINE 110635 # WARNING: did not recognize operand "r64" (encoding ModRM:r/m (r)) for "VPBROADCASTQ xmm1 {k1}{z}, r64" define pcodeop vpbroadcastq_avx512vl ; @ifdef IA64 :VPBROADCASTQ XmmReg1^XmmOpMask64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1); byte=0x7C; (XmmReg1 & ZmmReg1 & XmmOpMask64) [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpbroadcastq_avx512vl( ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } @endif # VPBROADCASTB/W/D/Q 5-328 PAGE 2152 LINE 110637 # WARNING: did not recognize operand "r64" (encoding ModRM:r/m (r)) for "VPBROADCASTQ ymm1 {k1}{z}, r64" @ifdef IA64 :VPBROADCASTQ YmmReg1^YmmOpMask64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1); byte=0x7C; (YmmReg1 & ZmmReg1 & YmmOpMask64) [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpbroadcastq_avx512vl( ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } @endif # VPBROADCASTB/W/D/Q 5-328 PAGE 2152 LINE 110639 # WARNING: did not recognize operand "r64" (encoding ModRM:r/m (r)) for "VPBROADCASTQ zmm1 {k1}{z}, r64" define pcodeop vpbroadcastq_avx512f ; @ifdef IA64 :VPBROADCASTQ ZmmReg1^ZmmOpMask64 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1); byte=0x7C; ZmmReg1 & ZmmOpMask64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vpbroadcastq_avx512f( ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } @endif # VPBROADCAST 5-331 PAGE 2155 LINE 110780 :VPBROADCASTB XmmReg1^XmmOpMask8, XmmReg2_m8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x78; (XmmReg1 & ZmmReg1 & XmmOpMask8) ... & XmmReg2_m8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { XmmResult = vpbroadcastb_avx512vl( XmmReg2_m8 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # VPBROADCAST 5-331 PAGE 2155 LINE 110782 :VPBROADCASTB YmmReg1^YmmOpMask8, XmmReg2_m8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x78; (YmmReg1 & ZmmReg1 & YmmOpMask8) ... & XmmReg2_m8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { YmmResult = vpbroadcastb_avx512vl( XmmReg2_m8 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # VPBROADCAST 5-331 PAGE 2155 LINE 110784 :VPBROADCASTB ZmmReg1^ZmmOpMask8, XmmReg2_m8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x78; (ZmmReg1 & ZmmOpMask8) ... & XmmReg2_m8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vpbroadcastb_avx512bw( XmmReg2_m8 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # VPBROADCAST 5-331 PAGE 2155 LINE 110791 :VPBROADCASTW XmmReg1^XmmOpMask16, XmmReg2_m16 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x79; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m16 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { XmmResult = vpbroadcastw_avx512vl( XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPBROADCAST 5-331 PAGE 2155 LINE 110794 :VPBROADCASTW YmmReg1^YmmOpMask16, XmmReg2_m16 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x79; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & XmmReg2_m16 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { YmmResult = vpbroadcastw_avx512vl( XmmReg2_m16 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPBROADCAST 5-331 PAGE 2155 LINE 110797 :VPBROADCASTW ZmmReg1^ZmmOpMask16, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x79; (ZmmReg1 & ZmmOpMask16) ... & XmmReg2_m16 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vpbroadcastw_avx512bw( XmmReg2_m16 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPBROADCAST 5-331 PAGE 2155 LINE 110804 :VPBROADCASTD XmmReg1^XmmOpMask32, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x58; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { XmmResult = vpbroadcastd_avx512vl( XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPBROADCAST 5-331 PAGE 2155 LINE 110807 :VPBROADCASTD YmmReg1^YmmOpMask32, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x58; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { YmmResult = vpbroadcastd_avx512vl( XmmReg2_m32 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPBROADCAST 5-331 PAGE 2155 LINE 110810 :VPBROADCASTD ZmmReg1^ZmmOpMask32, XmmReg2_m32 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x58; (ZmmReg1 & ZmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vpbroadcastd_avx512f( XmmReg2_m32 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPBROADCAST 5-331 PAGE 2155 LINE 110817 :VPBROADCASTQ XmmReg1^XmmOpMask64, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { XmmResult = vpbroadcastq_avx512vl( XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPBROADCAST 5-331 PAGE 2155 LINE 110819 :VPBROADCASTQ YmmReg1^YmmOpMask64, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x59; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { YmmResult = vpbroadcastq_avx512vl( XmmReg2_m64 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPBROADCAST 5-331 PAGE 2155 LINE 110821 :VPBROADCASTQ ZmmReg1^ZmmOpMask64, XmmReg2_m64 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x59; (ZmmReg1 & ZmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vpbroadcastq_avx512f( XmmReg2_m64 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPBROADCAST 5-331 PAGE 2155 LINE 110823 define pcodeop vbroadcasti32x2_avx512vl ; :VBROADCASTI32x2 XmmReg1^XmmOpMask32, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x59; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { XmmResult = vbroadcasti32x2_avx512vl( XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPBROADCAST 5-332 PAGE 2156 LINE 110837 :VBROADCASTI32x2 YmmReg1^YmmOpMask32, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x59; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { YmmResult = vbroadcasti32x2_avx512vl( XmmReg2_m64 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPBROADCAST 5-332 PAGE 2156 LINE 110840 define pcodeop vbroadcasti32x2_avx512dq ; :VBROADCASTI32x2 ZmmReg1^ZmmOpMask32, XmmReg2_m64 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x59; (ZmmReg1 & ZmmOpMask32) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vbroadcasti32x2_avx512dq( XmmReg2_m64 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPBROADCAST 5-332 PAGE 2156 LINE 110845 define pcodeop vbroadcasti32x4_avx512vl ; :VBROADCASTI32X4 YmmReg1^YmmOpMask32, m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x5A; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { YmmResult = vbroadcasti32x4_avx512vl( m128 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPBROADCAST 5-332 PAGE 2156 LINE 110848 define pcodeop vbroadcasti32x4_avx512f ; :VBROADCASTI32X4 ZmmReg1^ZmmOpMask32, m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x5A; (ZmmReg1 & ZmmOpMask32) ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vbroadcasti32x4_avx512f( m128 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPBROADCAST 5-332 PAGE 2156 LINE 110851 define pcodeop vbroadcasti64x2_avx512vl ; :VBROADCASTI64X2 YmmReg1^YmmOpMask64, m128 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x5A; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { YmmResult = vbroadcasti64x2_avx512vl( m128 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPBROADCAST 5-332 PAGE 2156 LINE 110854 define pcodeop vbroadcasti64x2_avx512dq ; :VBROADCASTI64X2 ZmmReg1^ZmmOpMask64, m128 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x5A; (ZmmReg1 & ZmmOpMask64) ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vbroadcasti64x2_avx512dq( m128 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPBROADCAST 5-332 PAGE 2156 LINE 110857 define pcodeop vbroadcasti32x8_avx512dq ; :VBROADCASTI32X8 ZmmReg1^ZmmOpMask32, m256 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x5B; (ZmmReg1 & ZmmOpMask32) ... & m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vbroadcasti32x8_avx512dq( m256 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPBROADCAST 5-332 PAGE 2156 LINE 110860 define pcodeop vbroadcasti64x4_avx512f ; :VBROADCASTI64X4 ZmmReg1^ZmmOpMask64, m256 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x5B; (ZmmReg1 & ZmmOpMask64) ... & m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S,T2,T4,T8) { ZmmResult = vbroadcasti64x4_avx512f( m256 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111259 VPCMPB_mon: "VPCMPEQB" is imm8=0x0 { } VPCMPB_mon: "VPCMPLTB" is imm8=0x1 { } VPCMPB_mon: "VPCMPLEB" is imm8=0x2 { } VPCMPB_mon: "VPCMPEQB" is imm8=0x4 { } VPCMPB_mon: "VPCMPNLTB" is imm8=0x5 { } VPCMPB_mon: "VPCMPNLEB" is imm8=0x6 { } VPCMPB_op: "" is imm8=0 { local tmp:1 = 0; export *[const]:1 tmp; } VPCMPB_op: "" is imm8=1 { local tmp:1 = 1; export *[const]:1 tmp; } VPCMPB_op: "" is imm8=2 { local tmp:1 = 2; export *[const]:1 tmp; } VPCMPB_op: "" is imm8=4 { local tmp:1 = 4; export *[const]:1 tmp; } VPCMPB_op: "" is imm8=5 { local tmp:1 = 5; export *[const]:1 tmp; } VPCMPB_op: "" is imm8=6 { local tmp:1 = 6; export *[const]:1 tmp; } VPCMPB_mon: "VPCMPB" is imm8 { } VPCMPB_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } define pcodeop vpcmpb_avx512vl ; :^VPCMPB_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128^VPCMPB_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x3F; KReg_reg ... & XmmReg2_m128; VPCMPB_mon & VPCMPB_op [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpb_avx512vl( evexV5_XmmReg, XmmReg2_m128, VPCMPB_op ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111263 :^VPCMPB_mon KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256^VPCMPB_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x3F; KReg_reg ... & YmmReg2_m256; VPCMPB_mon & VPCMPB_op [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpb_avx512vl( evexV5_YmmReg, YmmReg2_m256, VPCMPB_op ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } # VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111267 define pcodeop vpcmpb_avx512bw ; :^VPCMPB_mon KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512^VPCMPB_op is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x3F; KReg_reg ... & ZmmReg2_m512; VPCMPB_mon & VPCMPB_op [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512, VPCMPB_op ); KReg_reg = zext(AVXOpMask[0,64]) & tmp; } # VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111271 VPCMPUB_mon: "VPCMPEQUB" is imm8=0x0 { } VPCMPUB_mon: "VPCMPLTUB" is imm8=0x1 { } VPCMPUB_mon: "VPCMPLEUB" is imm8=0x2 { } VPCMPUB_mon: "VPCMPEQUB" is imm8=0x4 { } VPCMPUB_mon: "VPCMPNLTUB" is imm8=0x5 { } VPCMPUB_mon: "VPCMPNLEUB" is imm8=0x6 { } VPCMPUB_op: "" is imm8=0 { local tmp:1 = 0; export *[const]:1 tmp; } VPCMPUB_op: "" is imm8=1 { local tmp:1 = 1; export *[const]:1 tmp; } VPCMPUB_op: "" is imm8=2 { local tmp:1 = 2; export *[const]:1 tmp; } VPCMPUB_op: "" is imm8=4 { local tmp:1 = 4; export *[const]:1 tmp; } VPCMPUB_op: "" is imm8=5 { local tmp:1 = 5; export *[const]:1 tmp; } VPCMPUB_op: "" is imm8=6 { local tmp:1 = 6; export *[const]:1 tmp; } VPCMPUB_mon: "VPCMPUB" is imm8 { } VPCMPUB_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } define pcodeop vpcmpub_avx512vl ; :^VPCMPUB_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128^VPCMPUB_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x3E; KReg_reg ... & XmmReg2_m128; VPCMPUB_mon & VPCMPUB_op [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpub_avx512vl( evexV5_XmmReg, XmmReg2_m128, VPCMPUB_op ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111275 :^VPCMPUB_mon KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256^VPCMPUB_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x3E; KReg_reg ... & YmmReg2_m256; VPCMPUB_mon & VPCMPUB_op [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpub_avx512vl( evexV5_YmmReg, YmmReg2_m256, VPCMPUB_op ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } # VPCMPB/VPCMPUB 5-339 PAGE 2163 LINE 111279 define pcodeop vpcmpub_avx512bw ; :^VPCMPUB_mon KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512^VPCMPUB_op is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x3E; KReg_reg ... & ZmmReg2_m512; VPCMPUB_mon & VPCMPUB_op [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpub_avx512bw( evexV5_ZmmReg, ZmmReg2_m512, VPCMPUB_op ); KReg_reg = zext(AVXOpMask[0,64]) & tmp; } # VPCMPD/VPCMPUD 5-342 PAGE 2166 LINE 111422 VPCMPD_mon: "VPCMPEQD" is imm8=0x0 { } VPCMPD_mon: "VPCMPLTD" is imm8=0x1 { } VPCMPD_mon: "VPCMPLED" is imm8=0x2 { } VPCMPD_mon: "VPCMPEQD" is imm8=0x4 { } VPCMPD_mon: "VPCMPNLTD" is imm8=0x5 { } VPCMPD_mon: "VPCMPNLED" is imm8=0x6 { } VPCMPD_op: "" is imm8=0 { local tmp:1 = 0; export *[const]:1 tmp; } VPCMPD_op: "" is imm8=1 { local tmp:1 = 1; export *[const]:1 tmp; } VPCMPD_op: "" is imm8=2 { local tmp:1 = 2; export *[const]:1 tmp; } VPCMPD_op: "" is imm8=4 { local tmp:1 = 4; export *[const]:1 tmp; } VPCMPD_op: "" is imm8=5 { local tmp:1 = 5; export *[const]:1 tmp; } VPCMPD_op: "" is imm8=6 { local tmp:1 = 6; export *[const]:1 tmp; } VPCMPD_mon: "VPCMPD" is imm8 { } VPCMPD_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } define pcodeop vpcmpd_avx512vl ; :^VPCMPD_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst^VPCMPD_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x1F; KReg_reg ... & XmmReg2_m128_m32bcst; VPCMPD_mon & VPCMPD_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst, VPCMPD_op ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # VPCMPD/VPCMPUD 5-342 PAGE 2166 LINE 111426 :^VPCMPD_mon KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst^VPCMPD_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x1F; KReg_reg ... & YmmReg2_m256_m32bcst; VPCMPD_mon & VPCMPD_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, VPCMPD_op ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPCMPD/VPCMPUD 5-342 PAGE 2166 LINE 111430 define pcodeop vpcmpd_avx512f ; :^VPCMPD_mon KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m32bcst^VPCMPD_op is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x1F; KReg_reg ... & ZmmReg2_m512_m32bcst; VPCMPD_mon & VPCMPD_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, VPCMPD_op ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPCMPD/VPCMPUD 5-342 PAGE 2166 LINE 111434 VPCMPUD_mon: "VPCMPEQUD" is imm8=0x0 { } VPCMPUD_mon: "VPCMPLTUD" is imm8=0x1 { } VPCMPUD_mon: "VPCMPLEUD" is imm8=0x2 { } VPCMPUD_mon: "VPCMPEQUD" is imm8=0x4 { } VPCMPUD_mon: "VPCMPNLTUD" is imm8=0x5 { } VPCMPUD_mon: "VPCMPNLEUD" is imm8=0x6 { } VPCMPUD_op: "" is imm8=0 { local tmp:1 = 0; export *[const]:1 tmp; } VPCMPUD_op: "" is imm8=1 { local tmp:1 = 1; export *[const]:1 tmp; } VPCMPUD_op: "" is imm8=2 { local tmp:1 = 2; export *[const]:1 tmp; } VPCMPUD_op: "" is imm8=4 { local tmp:1 = 4; export *[const]:1 tmp; } VPCMPUD_op: "" is imm8=5 { local tmp:1 = 5; export *[const]:1 tmp; } VPCMPUD_op: "" is imm8=6 { local tmp:1 = 6; export *[const]:1 tmp; } VPCMPUD_mon: "VPCMPUD" is imm8 { } VPCMPUD_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } define pcodeop vpcmpud_avx512vl ; :^VPCMPUD_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst^VPCMPUD_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x1E; KReg_reg ... & XmmReg2_m128_m32bcst; VPCMPUD_mon & VPCMPUD_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpud_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst, VPCMPUD_op ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # VPCMPD/VPCMPUD 5-342 PAGE 2166 LINE 111438 :^VPCMPUD_mon KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst^VPCMPUD_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x1E; KReg_reg ... & YmmReg2_m256_m32bcst; VPCMPUD_mon & VPCMPUD_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpud_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, VPCMPUD_op ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPCMPD/VPCMPUD 5-342 PAGE 2166 LINE 111442 define pcodeop vpcmpud_avx512f ; :^VPCMPUD_mon KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m32bcst^VPCMPUD_op is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x1E; KReg_reg ... & ZmmReg2_m512_m32bcst; VPCMPUD_mon & VPCMPUD_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpud_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, VPCMPUD_op ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } VPCMPQ_mon: "VPCMPEQQ" is imm8=0x0 { } VPCMPQ_mon: "VPCMPLTQ" is imm8=0x1 { } VPCMPQ_mon: "VPCMPLEQ" is imm8=0x2 { } VPCMPQ_mon: "VPCMPEQQ" is imm8=0x4 { } VPCMPQ_mon: "VPCMPNLTQ" is imm8=0x5 { } VPCMPQ_mon: "VPCMPNLEQ" is imm8=0x6 { } VPCMPQ_op: "" is imm8=0 { local tmp:1 = 0; export *[const]:1 tmp; } VPCMPQ_op: "" is imm8=1 { local tmp:1 = 1; export *[const]:1 tmp; } VPCMPQ_op: "" is imm8=2 { local tmp:1 = 2; export *[const]:1 tmp; } VPCMPQ_op: "" is imm8=4 { local tmp:1 = 4; export *[const]:1 tmp; } VPCMPQ_op: "" is imm8=5 { local tmp:1 = 5; export *[const]:1 tmp; } VPCMPQ_op: "" is imm8=6 { local tmp:1 = 6; export *[const]:1 tmp; } VPCMPQ_mon: "VPCMPQ" is imm8 { } VPCMPQ_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } # VPCMPQ/VPCMPQ 5-345 PAGE 2169 LINE 111573 define pcodeop vpcmpq_avx512vl ; :^VPCMPQ_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst^VPCMPQ_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x1F; KReg_reg ... & XmmReg2_m128_m64bcst; VPCMPQ_mon & VPCMPQ_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, VPCMPQ_op ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # VPCMPQ/VPCMPQ 5-345 PAGE 2169 LINE 111577 :^VPCMPQ_mon KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst^VPCMPQ_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x1F; KReg_reg ... & YmmReg2_m256_m64bcst; VPCMPQ_mon & VPCMPQ_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, VPCMPQ_op ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # VPCMPQ/VPCMPQ 5-345 PAGE 2169 LINE 111581 define pcodeop vpcmpq_avx512f ; :^VPCMPQ_mon KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m64bcst^VPCMPQ_op is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x1F; KReg_reg ... & ZmmReg2_m512_m64bcst; VPCMPQ_mon & VPCMPQ_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, VPCMPQ_op ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } VPCMPUQ_mon: "VPCMPEQUQ" is imm8=0x0 { } VPCMPUQ_mon: "VPCMPLTUQ" is imm8=0x1 { } VPCMPUQ_mon: "VPCMPLEUQ" is imm8=0x2 { } VPCMPUQ_mon: "VPCMPEQUQ" is imm8=0x4 { } VPCMPUQ_mon: "VPCMPNLTUQ" is imm8=0x5 { } VPCMPUQ_mon: "VPCMPNLEUQ" is imm8=0x6 { } VPCMPUQ_op: "" is imm8=0 { local tmp:1 = 0; export *[const]:1 tmp; } VPCMPUQ_op: "" is imm8=1 { local tmp:1 = 1; export *[const]:1 tmp; } VPCMPUQ_op: "" is imm8=2 { local tmp:1 = 2; export *[const]:1 tmp; } VPCMPUQ_op: "" is imm8=4 { local tmp:1 = 4; export *[const]:1 tmp; } VPCMPUQ_op: "" is imm8=5 { local tmp:1 = 5; export *[const]:1 tmp; } VPCMPUQ_op: "" is imm8=6 { local tmp:1 = 6; export *[const]:1 tmp; } VPCMPUQ_mon: "VPCMPUQ" is imm8 { } VPCMPUQ_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } # VPCMPQ/VPCMPUQ 5-345 PAGE 2169 LINE 111585 define pcodeop vpcmpuq_avx512vl ; :^VPCMPUQ_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst^VPCMPUQ_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x1E; KReg_reg ... & XmmReg2_m128_m64bcst; VPCMPUQ_mon & VPCMPUQ_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpuq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, VPCMPUQ_op ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # VPCMPQ/VPCMPUQ 5-345 PAGE 2169 LINE 111589 :^VPCMPUQ_mon KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst^VPCMPUQ_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x1E; KReg_reg ... & YmmReg2_m256_m64bcst; VPCMPUQ_mon & VPCMPUQ_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpuq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, VPCMPUQ_op ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # VPCMPQ/VPCMPUQ 5-345 PAGE 2169 LINE 111593 define pcodeop vpcmpuq_avx512f ; :^VPCMPUQ_mon KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m64bcst^VPCMPUQ_op is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x1E; KReg_reg ... & ZmmReg2_m512_m64bcst; VPCMPUQ_mon & VPCMPUQ_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vpcmpuq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, VPCMPUQ_op ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } VPCMPW_mon: "VPCMPEQW" is imm8=0x0 { } VPCMPW_mon: "VPCMPLTW" is imm8=0x1 { } VPCMPW_mon: "VPCMPLEW" is imm8=0x2 { } VPCMPW_mon: "VPCMPEQW" is imm8=0x4 { } VPCMPW_mon: "VPCMPNLTW" is imm8=0x5 { } VPCMPW_mon: "VPCMPNLEW" is imm8=0x6 { } VPCMPW_op: "" is imm8=0 { local tmp:1 = 0; export *[const]:1 tmp; } VPCMPW_op: "" is imm8=1 { local tmp:1 = 1; export *[const]:1 tmp; } VPCMPW_op: "" is imm8=2 { local tmp:1 = 2; export *[const]:1 tmp; } VPCMPW_op: "" is imm8=4 { local tmp:1 = 4; export *[const]:1 tmp; } VPCMPW_op: "" is imm8=5 { local tmp:1 = 5; export *[const]:1 tmp; } VPCMPW_op: "" is imm8=6 { local tmp:1 = 6; export *[const]:1 tmp; } VPCMPW_mon: "VPCMPW" is imm8 { } VPCMPW_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } # VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111724 define pcodeop vpcmpw_avx512vl ; :^VPCMPW_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128^VPCMPW_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x3F; KReg_reg ... & XmmReg2_m128; VPCMPW_mon & VPCMPW_op [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpw_avx512vl( evexV5_XmmReg, XmmReg2_m128, VPCMPW_op ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111728 :^VPCMPW_mon KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256^VPCMPW_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x3F; KReg_reg ... & YmmReg2_m256; VPCMPW_mon & VPCMPW_op [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpw_avx512vl( evexV5_YmmReg, YmmReg2_m256, VPCMPW_op ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPCMPW/VPCMPUW 5-348 PAGE 2172 LINE 111732 define pcodeop vpcmpw_avx512bw ; :^VPCMPW_mon KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512^VPCMPW_op is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x3F; KReg_reg ... & ZmmReg2_m512; VPCMPW_mon & VPCMPW_op [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512, VPCMPW_op ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } VPCMPUW_mon: "VPCMPEQUW" is imm8=0x0 { } VPCMPUW_mon: "VPCMPLTUW" is imm8=0x1 { } VPCMPUW_mon: "VPCMPLEUW" is imm8=0x2 { } VPCMPUW_mon: "VPCMPEQUW" is imm8=0x4 { } VPCMPUW_mon: "VPCMPNLTUW" is imm8=0x5 { } VPCMPUW_mon: "VPCMPNLEUW" is imm8=0x6 { } VPCMPUW_op: "" is imm8=0 { local tmp:1 = 0; export *[const]:1 tmp; } VPCMPUW_op: "" is imm8=1 { local tmp:1 = 1; export *[const]:1 tmp; } VPCMPUW_op: "" is imm8=2 { local tmp:1 = 2; export *[const]:1 tmp; } VPCMPUW_op: "" is imm8=4 { local tmp:1 = 4; export *[const]:1 tmp; } VPCMPUW_op: "" is imm8=5 { local tmp:1 = 5; export *[const]:1 tmp; } VPCMPUW_op: "" is imm8=6 { local tmp:1 = 6; export *[const]:1 tmp; } VPCMPUW_mon: "VPCMPUW" is imm8 { } VPCMPUW_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } # VPCMPUW/VPCMPUUW 5-348 PAGE 2172 LINE 111724 define pcodeop vpcmpuw_avx512vl ; :^VPCMPUW_mon KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128^VPCMPUW_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x3E; KReg_reg ... & XmmReg2_m128; VPCMPUW_mon & VPCMPUW_op [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpuw_avx512vl( evexV5_XmmReg, XmmReg2_m128, VPCMPUW_op ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPCMPUW/VPCMPUUW 5-348 PAGE 2172 LINE 111728 :^VPCMPUW_mon KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256^VPCMPUW_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x3E; KReg_reg ... & YmmReg2_m256; VPCMPUW_mon & VPCMPUW_op [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpuw_avx512vl( evexV5_YmmReg, YmmReg2_m256, VPCMPUW_op ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPCMPUW/VPCMPUUW 5-348 PAGE 2172 LINE 111732 define pcodeop vpcmpuw_avx512bw ; :^VPCMPUW_mon KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512^VPCMPUW_op is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x3E; KReg_reg ... & ZmmReg2_m512; VPCMPUW_mon & VPCMPUW_op [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vpcmpuw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512, VPCMPUW_op ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } # VPCOMPRESSD 5-351 PAGE 2175 LINE 111873 define pcodeop vpcompressd_avx512vl ; :VPCOMPRESSD XmmReg2^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x8B; XmmReg1 & mod=3 & XmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpcompressd_avx512vl( XmmReg1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg2 = zext(XmmResult); } :VPCOMPRESSD m128^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x8B; XmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpcompressd_avx512vl( XmmReg1 ); XmmMask = m128; build XmmOpMask32; m128 = XmmResult; } # VPCOMPRESSD 5-351 PAGE 2175 LINE 111875 :VPCOMPRESSD YmmReg2^YmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32; byte=0x8B; YmmReg1 & mod=3 & YmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpcompressd_avx512vl( YmmReg1 ); YmmMask = YmmReg2; build YmmOpMask32; ZmmReg2 = zext(YmmResult); } :VPCOMPRESSD m256^YmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32; byte=0x8B; YmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpcompressd_avx512vl( YmmReg1 ); YmmMask = m256; build YmmOpMask32; m256 = YmmResult; } # VPCOMPRESSD 5-351 PAGE 2175 LINE 111877 define pcodeop vpcompressd_avx512f ; :VPCOMPRESSD ZmmReg2_m512^ZmmOpMask32, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32; byte=0x8B; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vpcompressd_avx512f( ZmmReg1 ); ZmmMask = ZmmReg2_m512; build ZmmOpMask32; ZmmReg2_m512 = ZmmResult; } # VPCOMPRESSQ 5-353 PAGE 2177 LINE 111970 define pcodeop vpcompressq_avx512vl ; :VPCOMPRESSQ XmmReg2^XmmOpMask64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64; byte=0x8B; XmmReg1 & mod=3 & XmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmReg2 = vpcompressq_avx512vl( XmmReg1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg2 = zext(XmmResult); } :VPCOMPRESSQ m128^XmmOpMask64, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64; byte=0x8B; XmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpcompressq_avx512vl( XmmReg1 ); XmmMask = m128; build XmmOpMask64; m128 = XmmResult; } # VPCOMPRESSQ 5-353 PAGE 2177 LINE 111972 :VPCOMPRESSQ YmmReg2^YmmOpMask64, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64; byte=0x8B; YmmReg1 & mod=3 & YmmReg2 & ZmmReg2 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpcompressq_avx512vl( YmmReg1 ); YmmMask = YmmReg2; build YmmOpMask64; ZmmReg2 = zext(YmmResult); } :VPCOMPRESSQ m256 YmmOpMask64, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64; byte=0x8B; YmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpcompressq_avx512vl( YmmReg1 ); YmmMask = m256; build YmmOpMask64; m256 = YmmResult; } # VPCOMPRESSQ 5-353 PAGE 2177 LINE 111974 define pcodeop vpcompressq_avx512f ; :VPCOMPRESSQ ZmmReg2_m512^ZmmOpMask64, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64; byte=0x8B; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vpcompressq_avx512f( ZmmReg1 ); ZmmMask = ZmmReg2_m512; build ZmmOpMask64; ZmmReg2_m512 = ZmmResult; } # VPCONFLICTD/Q 5-355 PAGE 2179 LINE 112068 define pcodeop vpconflictd_avx512vl ; :VPCONFLICTD XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0xC4; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpconflictd_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPCONFLICTD/Q 5-355 PAGE 2179 LINE 112072 :VPCONFLICTD YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0xC4; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpconflictd_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPCONFLICTD/Q 5-355 PAGE 2179 LINE 112076 define pcodeop vpconflictd_avx512cd ; :VPCONFLICTD ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0xC4; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpconflictd_avx512cd( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPCONFLICTD/Q 5-355 PAGE 2179 LINE 112080 define pcodeop vpconflictq_avx512vl ; :VPCONFLICTQ XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0xC4; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpconflictq_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPCONFLICTD/Q 5-355 PAGE 2179 LINE 112084 :VPCONFLICTQ YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0xC4; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpconflictq_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPCONFLICTD/Q 5-355 PAGE 2179 LINE 112088 define pcodeop vpconflictq_avx512cd ; :VPCONFLICTQ ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0xC4; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpconflictq_avx512cd( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPERMD/VPERMW 5-362 PAGE 2186 LINE 112407 define pcodeop vpermd_avx512vl ; :VPERMD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x36; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpermd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPERMD/VPERMW 5-362 PAGE 2186 LINE 112410 define pcodeop vpermd_avx512f ; :VPERMD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x36; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpermd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPERMD/VPERMW 5-362 PAGE 2186 LINE 112413 define pcodeop vpermw_avx512vl ; :VPERMW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x8D; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpermw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPERMD/VPERMW 5-362 PAGE 2186 LINE 112417 :VPERMW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x8D; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpermw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPERMD/VPERMW 5-362 PAGE 2186 LINE 112421 define pcodeop vpermw_avx512bw ; :VPERMW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x8D; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpermw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112553 define pcodeop vpermi2w_avx512vl ; :VPERMI2W XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x75; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpermi2w_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112556 :VPERMI2W YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x75; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpermi2w_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112559 define pcodeop vpermi2w_avx512bw ; :VPERMI2W ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x75; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpermi2w_avx512bw( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112562 define pcodeop vpermi2d_avx512vl ; :VPERMI2D XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x76; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpermi2d_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112566 :VPERMI2D YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x76; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpermi2d_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112570 define pcodeop vpermi2d_avx512f ; :VPERMI2D ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x76; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpermi2d_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112574 define pcodeop vpermi2q_avx512vl ; :VPERMI2Q XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x76; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpermi2q_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112578 :VPERMI2Q YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x76; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpermi2q_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112582 define pcodeop vpermi2q_avx512f ; :VPERMI2Q ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x76; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpermi2q_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112586 define pcodeop vpermi2ps_avx512vl ; :VPERMI2PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x77; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpermi2ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112590 :VPERMI2PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x77; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpermi2ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPERMI2W/D/Q/PS/PD 5-365 PAGE 2189 LINE 112594 define pcodeop vpermi2ps_avx512f ; :VPERMI2PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x77; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpermi2ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPERMI2W/D/Q/PS/PD 5-366 PAGE 2190 LINE 112610 define pcodeop vpermi2pd_avx512vl ; :VPERMI2PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x77; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpermi2pd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPERMI2W/D/Q/PS/PD 5-366 PAGE 2190 LINE 112614 :VPERMI2PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x77; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpermi2pd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPERMI2W/D/Q/PS/PD 5-366 PAGE 2190 LINE 112618 define pcodeop vpermi2pd_avx512f ; :VPERMI2PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x77; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpermi2pd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPERMILPD 5-371 PAGE 2195 LINE 112866 define pcodeop vpermilpd_avx512vl ; :VPERMILPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x0D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { XmmResult = vpermilpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPERMILPD 5-371 PAGE 2195 LINE 112869 :VPERMILPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x0D; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { YmmResult = vpermilpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPERMILPD 5-371 PAGE 2195 LINE 112872 define pcodeop vpermilpd_avx512f ; :VPERMILPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x0D; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { ZmmResult = vpermilpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPERMILPD 5-371 PAGE 2195 LINE 112879 :VPERMILPD XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x05; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RM) { XmmResult = vpermilpd_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPERMILPD 5-371 PAGE 2195 LINE 112882 :VPERMILPD YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x05; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RM) { YmmResult = vpermilpd_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPERMILPD 5-371 PAGE 2195 LINE 112885 :VPERMILPD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x05; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RM) { ZmmResult = vpermilpd_avx512f( ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPERMILPS 5-376 PAGE 2200 LINE 113170 define pcodeop vpermilps_avx512vl ; :VPERMILPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x0C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { XmmResult = vpermilps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPERMILPS 5-376 PAGE 2200 LINE 113173 :VPERMILPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x0C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { YmmResult = vpermilps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPERMILPS 5-376 PAGE 2200 LINE 113176 define pcodeop vpermilps_avx512f ; :VPERMILPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x0C; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { ZmmResult = vpermilps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPERMILPS 5-376 PAGE 2200 LINE 113179 :VPERMILPS XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x04; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RM) { XmmResult = vpermilps_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPERMILPS 5-376 PAGE 2200 LINE 113182 :VPERMILPS YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x04; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RM) { YmmResult = vpermilps_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPERMILPS 5-376 PAGE 2200 LINE 113186 :VPERMILPS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x04; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RM) { ZmmResult = vpermilps_avx512f( ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPERMPD 5-381 PAGE 2205 LINE 113456 define pcodeop vpermpd_avx512vl ; :VPERMPD YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x01; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RMI) { YmmResult = vpermpd_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPERMPD 5-381 PAGE 2205 LINE 113459 define pcodeop vpermpd_avx512f ; :VPERMPD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x01; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RMI) { ZmmResult = vpermpd_avx512f( ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPERMPD 5-381 PAGE 2205 LINE 113462 :VPERMPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x16; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { YmmResult = vpermpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPERMPD 5-381 PAGE 2205 LINE 113465 :VPERMPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x16; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { ZmmResult = vpermpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPERMPS 5-384 PAGE 2208 LINE 113636 define pcodeop vpermps_avx512vl ; :VPERMPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x16; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpermps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPERMPS 5-384 PAGE 2208 LINE 113639 define pcodeop vpermps_avx512f ; :VPERMPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x16; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpermps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPERMQ 5-387 PAGE 2211 LINE 113771 define pcodeop vpermq_avx512vl ; :VPERMQ YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x00; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RMI) { YmmResult = vpermq_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPERMQ 5-387 PAGE 2211 LINE 113774 define pcodeop vpermq_avx512f ; :VPERMQ ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x00; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RMI) { ZmmResult = vpermq_avx512f( ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPERMQ 5-387 PAGE 2211 LINE 113777 :VPERMQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x36; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { YmmResult = vpermq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPERMQ 5-387 PAGE 2211 LINE 113780 :VPERMQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x36; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { ZmmResult = vpermq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } define pcodeop vpermt2pd_avx512f; :VPERMT2PD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x7F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst { XmmResult = vpermt2pd_avx512f( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } :VPERMT2PD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x7F; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst { YmmResult = vpermt2pd_avx512f( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } :VPERMT2PD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x7F; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst { ZmmResult = vpermt2pd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPEXPANDD 5-390 PAGE 2214 LINE 113945 define pcodeop vpexpandd_avx512vl ; :VPEXPANDD XmmReg1^XmmOpMask32, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x89; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpexpandd_avx512vl( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPEXPANDD 5-390 PAGE 2214 LINE 113948 :VPEXPANDD YmmReg1^YmmOpMask32, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x89; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpexpandd_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPEXPANDD 5-390 PAGE 2214 LINE 113951 define pcodeop vpexpandd_avx512f ; :VPEXPANDD ZmmReg1^ZmmOpMask32, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x89; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vpexpandd_avx512f( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPEXPANDQ 5-392 PAGE 2216 LINE 114033 define pcodeop vpexpandq_avx512vl ; :VPEXPANDQ XmmReg1^XmmOpMask64, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x89; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vpexpandq_avx512vl( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPEXPANDQ 5-392 PAGE 2216 LINE 114035 :VPEXPANDQ YmmReg1^YmmOpMask64, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x89; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { YmmResult = vpexpandq_avx512vl( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPEXPANDQ 5-392 PAGE 2216 LINE 114037 define pcodeop vpexpandq_avx512f ; :VPEXPANDQ ZmmReg1^ZmmOpMask64, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x89; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { ZmmResult = vpexpandq_avx512f( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPLZCNTD/Q 5-394 PAGE 2218 LINE 114118 define pcodeop vplzcntd_avx512vl ; :VPLZCNTD XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x44; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vplzcntd_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPLZCNTD/Q 5-394 PAGE 2218 LINE 114122 :VPLZCNTD YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x44; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vplzcntd_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPLZCNTD/Q 5-394 PAGE 2218 LINE 114126 define pcodeop vplzcntd_avx512cd ; :VPLZCNTD ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x44; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vplzcntd_avx512cd( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPLZCNTD/Q 5-394 PAGE 2218 LINE 114130 define pcodeop vplzcntq_avx512vl ; :VPLZCNTQ XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x44; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vplzcntq_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPLZCNTD/Q 5-394 PAGE 2218 LINE 114134 :VPLZCNTQ YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x44; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vplzcntq_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPLZCNTD/Q 5-394 PAGE 2218 LINE 114138 define pcodeop vplzcntq_avx512cd ; :VPLZCNTQ ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x44; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vplzcntq_avx512cd( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPMOVM2B/VPMOVM2W/VPMOVM2D/VPMOVM2Q 5-400 PAGE 2224 LINE 114413 define pcodeop vpmovm2b_avx512vl ; :VPMOVM2B XmmReg1, KReg_rm is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x28; (XmmReg1 & ZmmReg1) & KReg_rm { local tmp:16 = vpmovm2b_avx512vl( KReg_rm ); ZmmReg1 = zext(tmp); } # VPMOVM2B/VPMOVM2W/VPMOVM2D/VPMOVM2Q 5-400 PAGE 2224 LINE 114415 :VPMOVM2B YmmReg1, KReg_rm is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x28; (YmmReg1 & ZmmReg1) & KReg_rm { local tmp:32 = vpmovm2b_avx512vl( KReg_rm ); ZmmReg1 = zext(tmp); } # VPMOVM2B/VPMOVM2W/VPMOVM2D/VPMOVM2Q 5-400 PAGE 2224 LINE 114417 define pcodeop vpmovm2b_avx512bw ; :VPMOVM2B ZmmReg1, KReg_rm is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x28; ZmmReg1 & KReg_rm { ZmmReg1 = vpmovm2b_avx512bw( KReg_rm ); } # VPMOVM2B/VPMOVM2W/VPMOVM2D/VPMOVM2Q 5-400 PAGE 2224 LINE 114419 define pcodeop vpmovm2w_avx512vl ; :VPMOVM2W XmmReg1, KReg_rm is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x28; (XmmReg1 & ZmmReg1) & KReg_rm { local tmp:16 = vpmovm2w_avx512vl( KReg_rm ); ZmmReg1 = zext(tmp); } # VPMOVM2B/VPMOVM2W/VPMOVM2D/VPMOVM2Q 5-400 PAGE 2224 LINE 114421 :VPMOVM2W YmmReg1, KReg_rm is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x28; (YmmReg1 & ZmmReg1) & KReg_rm { local tmp:32 = vpmovm2w_avx512vl( KReg_rm ); ZmmReg1 = zext(tmp); } # VPMOVM2B/VPMOVM2W/VPMOVM2D/VPMOVM2Q 5-400 PAGE 2224 LINE 114423 define pcodeop vpmovm2w_avx512bw ; :VPMOVM2W ZmmReg1, KReg_rm is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x28; ZmmReg1 & KReg_rm { ZmmReg1 = vpmovm2w_avx512bw( KReg_rm ); } # VPMOVM2B/VPMOVM2W/VPMOVM2D/VPMOVM2Q 5-400 PAGE 2224 LINE 114425 define pcodeop vpmovm2d_avx512vl ; :VPMOVM2D XmmReg1, KReg_rm is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x38; (XmmReg1 & ZmmReg1) & KReg_rm { local tmp:16 = vpmovm2d_avx512vl( KReg_rm ); ZmmReg1 = zext(tmp); } # VPMOVM2B/VPMOVM2W/VPMOVM2D/VPMOVM2Q 5-400 PAGE 2224 LINE 114427 :VPMOVM2D YmmReg1, KReg_rm is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x38; (YmmReg1 & ZmmReg1) & KReg_rm { local tmp:32 = vpmovm2d_avx512vl( KReg_rm ); ZmmReg1 = zext(tmp); } # VPMOVM2B/VPMOVM2W/VPMOVM2D/VPMOVM2Q 5-400 PAGE 2224 LINE 114429 define pcodeop vpmovm2d_avx512dq ; :VPMOVM2D ZmmReg1, KReg_rm is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x38; ZmmReg1 & KReg_rm { ZmmReg1 = vpmovm2d_avx512dq( KReg_rm ); } # VPMOVM2B/VPMOVM2W/VPMOVM2D/VPMOVM2Q 5-400 PAGE 2224 LINE 114431 define pcodeop vpmovm2q_avx512vl ; :VPMOVM2Q XmmReg1, KReg_rm is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x38; (XmmReg1 & ZmmReg1) & KReg_rm { local tmp:16 = vpmovm2q_avx512vl( KReg_rm ); ZmmReg1 = zext(tmp); } # VPMOVM2B/VPMOVM2W/VPMOVM2D/VPMOVM2Q 5-400 PAGE 2224 LINE 114433 :VPMOVM2Q YmmReg1, KReg_rm is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x38; (YmmReg1 & ZmmReg1) & KReg_rm { local tmp:32 = vpmovm2q_avx512vl( KReg_rm ); ZmmReg1 = zext(tmp); } # VPMOVM2B/VPMOVM2W/VPMOVM2D/VPMOVM2Q 5-400 PAGE 2224 LINE 114435 define pcodeop vpmovm2q_avx512dq ; :VPMOVM2Q ZmmReg1, KReg_rm is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x38; ZmmReg1 & KReg_rm { ZmmReg1 = vpmovm2q_avx512dq( KReg_rm ); } # VPMOVB2M/VPMOVW2M/VPMOVD2M/VPMOVQ2M 5-403 PAGE 2227 LINE 114542 define pcodeop vpmovb2m_avx512vl ; :VPMOVB2M KReg_reg, XmmReg2 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x29; KReg_reg & (mod=0x3 & XmmReg2) { KReg_reg = vpmovb2m_avx512vl( XmmReg2 ); } # VPMOVB2M/VPMOVW2M/VPMOVD2M/VPMOVQ2M 5-403 PAGE 2227 LINE 114544 :VPMOVB2M KReg_reg, YmmReg2 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x29; KReg_reg & (mod=0x3 & YmmReg2) { KReg_reg = vpmovb2m_avx512vl( YmmReg2 ); } # VPMOVB2M/VPMOVW2M/VPMOVD2M/VPMOVQ2M 5-403 PAGE 2227 LINE 114546 define pcodeop vpmovb2m_avx512bw ; :VPMOVB2M KReg_reg, ZmmReg2 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x29; KReg_reg & (mod=0x3 & ZmmReg2) { KReg_reg = vpmovb2m_avx512bw( ZmmReg2 ); } # VPMOVB2M/VPMOVW2M/VPMOVD2M/VPMOVQ2M 5-403 PAGE 2227 LINE 114548 define pcodeop vpmovw2m_avx512vl ; :VPMOVW2M KReg_reg, XmmReg2 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x29; KReg_reg & (mod=0x3 & XmmReg2) { KReg_reg = vpmovw2m_avx512vl( XmmReg2 ); } # VPMOVB2M/VPMOVW2M/VPMOVD2M/VPMOVQ2M 5-403 PAGE 2227 LINE 114550 :VPMOVW2M KReg_reg, YmmReg2 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x29; KReg_reg & (mod=0x3 & YmmReg2) { KReg_reg = vpmovw2m_avx512vl( YmmReg2 ); } # VPMOVB2M/VPMOVW2M/VPMOVD2M/VPMOVQ2M 5-403 PAGE 2227 LINE 114552 define pcodeop vpmovw2m_avx512bw ; :VPMOVW2M KReg_reg, ZmmReg2 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x29; KReg_reg & (mod=0x3 & ZmmReg2) { KReg_reg = vpmovw2m_avx512bw( ZmmReg2 ); } # VPMOVB2M/VPMOVW2M/VPMOVD2M/VPMOVQ2M 5-403 PAGE 2227 LINE 114554 define pcodeop vpmovd2m_avx512vl ; :VPMOVD2M KReg_reg, XmmReg2 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x39; KReg_reg & (mod=0x3 & XmmReg2) { KReg_reg = vpmovd2m_avx512vl( XmmReg2 ); } # VPMOVB2M/VPMOVW2M/VPMOVD2M/VPMOVQ2M 5-403 PAGE 2227 LINE 114556 :VPMOVD2M KReg_reg, YmmReg2 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x39; KReg_reg & (mod=0x3 & YmmReg2) { KReg_reg = vpmovd2m_avx512vl( YmmReg2 ); } # VPMOVB2M/VPMOVW2M/VPMOVD2M/VPMOVQ2M 5-403 PAGE 2227 LINE 114558 define pcodeop vpmovd2m_avx512dq ; :VPMOVD2M KReg_reg, ZmmReg2 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0); byte=0x39; KReg_reg & (mod=0x3 & ZmmReg2) { KReg_reg = vpmovd2m_avx512dq( ZmmReg2 ); } # VPMOVB2M/VPMOVW2M/VPMOVD2M/VPMOVQ2M 5-403 PAGE 2227 LINE 114560 define pcodeop vpmovq2m_avx512vl ; :VPMOVQ2M KReg_reg, XmmReg2 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x39; KReg_reg & (mod=0x3 & XmmReg2) { KReg_reg = vpmovq2m_avx512vl( XmmReg2 ); } # VPMOVB2M/VPMOVW2M/VPMOVD2M/VPMOVQ2M 5-403 PAGE 2227 LINE 114562 :VPMOVQ2M KReg_reg, YmmReg2 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x39; KReg_reg & (mod=0x3 & YmmReg2) { KReg_reg = vpmovq2m_avx512vl( YmmReg2 ); } # VPMOVB2M/VPMOVW2M/VPMOVD2M/VPMOVQ2M 5-403 PAGE 2227 LINE 114564 define pcodeop vpmovq2m_avx512dq ; :VPMOVQ2M KReg_reg, ZmmReg2 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1); byte=0x39; KReg_reg & (mod=0x3 & ZmmReg2) { KReg_reg = vpmovq2m_avx512dq( ZmmReg2 ); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115959 define pcodeop vprolvd_avx512vl ; :VPROLVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { XmmResult = vprolvd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115962 define pcodeop vprold_avx512vl ; :VPROLD evexV5_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=1 ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { XmmResult = vprold_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask32; evexV5_ZmmReg = zext(XmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115965 define pcodeop vprolvq_avx512vl ; :VPROLVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x15; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { XmmResult = vprolvq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115968 define pcodeop vprolq_avx512vl ; :VPROLQ evexV5_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask64; byte=0x72; reg_opcode=1 ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { XmmResult = vprolq_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask64; evexV5_ZmmReg = zext(XmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115971 :VPROLVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { YmmResult = vprolvd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115974 :VPROLD evexV5_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=1 ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { YmmResult = vprold_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask32; evexV5_ZmmReg = zext(YmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115977 :VPROLVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x15; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { YmmResult = vprolvq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115980 :VPROLQ evexV5_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask64; byte=0x72; reg_opcode=1 ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { YmmResult = vprolq_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask64; evexV5_ZmmReg = zext(YmmResult); } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115983 define pcodeop vprolvd_avx512f ; :VPROLVD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x15; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { ZmmResult = vprolvd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115987 define pcodeop vprold_avx512f ; :VPROLD evexV5_ZmmReg^ZmmOpMask32, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg & ZmmOpMask32; byte=0x72; reg_opcode=1 ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { ZmmResult = vprold_avx512f( ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask32; evexV5_ZmmReg = ZmmResult; } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115990 define pcodeop vprolvq_avx512f ; :VPROLVQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x15; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { ZmmResult = vprolvq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PROLD/PROLVD/PROLQ/PROLVQ 5-430 PAGE 2254 LINE 115993 define pcodeop vprolq_avx512f ; :VPROLQ evexV5_ZmmReg^ZmmOpMask64, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg & ZmmOpMask64; byte=0x72; reg_opcode=1 ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { ZmmResult = vprolq_avx512f( ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask64; evexV5_ZmmReg = ZmmResult; } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116190 define pcodeop vprorvd_avx512vl ; :VPRORVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { XmmResult = vprorvd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116194 define pcodeop vprord_avx512vl ; :VPRORD evexV5_XmmReg^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask32; byte=0x72; reg_opcode=0 ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { XmmResult = vprord_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask32; evexV5_ZmmReg = zext(XmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116197 define pcodeop vprorvq_avx512vl ; :VPRORVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x14; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { XmmResult = vprorvq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116200 define pcodeop vprorq_avx512vl ; :VPRORQ evexV5_XmmReg^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_XmmReg & evexV5_ZmmReg) & XmmOpMask64; byte=0x72; reg_opcode=0 ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { XmmResult = vprorq_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = evexV5_XmmReg; build XmmOpMask64; evexV5_ZmmReg = zext(XmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116203 :VPRORVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { YmmResult = vprorvd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116207 :VPRORD evexV5_YmmReg^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask32; byte=0x72; reg_opcode=0 ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { YmmResult = vprord_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask32; evexV5_ZmmReg = zext(YmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116210 :VPRORVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x14; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { YmmResult = vprorvq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116213 :VPRORQ evexV5_YmmReg^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & (evexV5_YmmReg & evexV5_ZmmReg) & YmmOpMask64; byte=0x72; reg_opcode=0 ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { YmmResult = vprorq_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = evexV5_YmmReg; build YmmOpMask64; evexV5_ZmmReg = zext(YmmResult); } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116216 define pcodeop vprorvd_avx512f ; :VPRORVD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x14; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { ZmmResult = vprorvd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116220 define pcodeop vprord_avx512f ; :VPRORD evexV5_ZmmReg^ZmmOpMask32, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg & ZmmOpMask32; byte=0x72; reg_opcode=0 ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { ZmmResult = vprord_avx512f( ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask32; evexV5_ZmmReg = ZmmResult; } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116223 define pcodeop vprorvq_avx512f ; :VPRORVQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x14; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-RVM) { ZmmResult = vprorvq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # PRORD/PRORVD/PRORQ/PRORVQ 5-435 PAGE 2259 LINE 116226 define pcodeop vprorq_avx512f ; :VPRORQ evexV5_ZmmReg^ZmmOpMask64, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg & ZmmOpMask64; byte=0x72; reg_opcode=0 ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV-VMI) { ZmmResult = vprorq_avx512f( ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = evexV5_ZmmReg; build ZmmOpMask64; evexV5_ZmmReg = ZmmResult; } # VPSCATTERDD/VPSCATTERDQ/VPSCATTERQD/VPSCATTERQQ 5-440 PAGE 2264 LINE 116424 define pcodeop vpscatterdd_avx512vl ; :VPSCATTERDD x_vm32x^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0xA0; XmmReg1 ... & x_vm32x [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vpscatterdd_avx512vl( x_vm32x, XmmOpMask, XmmReg1 ); # TODO missing destination or side effects } # VPSCATTERDD/VPSCATTERDQ/VPSCATTERQD/VPSCATTERQQ 5-440 PAGE 2264 LINE 116426 :VPSCATTERDD y_vm32y^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask; byte=0xA0; YmmReg1 ... & y_vm32y [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vpscatterdd_avx512vl( y_vm32y, YmmOpMask, YmmReg1 ); # TODO missing destination or side effects } # VPSCATTERDD/VPSCATTERDQ/VPSCATTERQD/VPSCATTERQQ 5-440 PAGE 2264 LINE 116428 define pcodeop vpscatterdd_avx512f ; :VPSCATTERDD z_vm32z^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0xA0; ZmmReg1 ... & z_vm32z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vpscatterdd_avx512f( z_vm32z, ZmmOpMask, ZmmReg1 ); # TODO missing destination or side effects } # VPSCATTERDD/VPSCATTERDQ/VPSCATTERQD/VPSCATTERQQ 5-440 PAGE 2264 LINE 116430 define pcodeop vpscatterdq_avx512vl ; :VPSCATTERDQ x_vm32x^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0xA0; XmmReg1 ... & x_vm32x [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vpscatterdq_avx512vl( x_vm32x, XmmOpMask, XmmReg1 ); # TODO missing destination or side effects } # VPSCATTERDD/VPSCATTERDQ/VPSCATTERQD/VPSCATTERQQ 5-440 PAGE 2264 LINE 116432 :VPSCATTERDQ y_vm32y^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0xA0; YmmReg1 ... & y_vm32y [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vpscatterdq_avx512vl( y_vm32y, YmmOpMask, YmmReg1 ); # TODO missing destination or side effects } # VPSCATTERDD/VPSCATTERDQ/VPSCATTERQD/VPSCATTERQQ 5-440 PAGE 2264 LINE 116434 define pcodeop vpscatterdq_avx512f ; :VPSCATTERDQ z_vm32z^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0xA0; ZmmReg1 ... & z_vm32z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vpscatterdq_avx512f( z_vm32z, ZmmOpMask, ZmmReg1 ); # TODO missing destination or side effects } @ifdef IA64 #technically these should be supported in 32-bit mode, but the assembly differences are notable, and we don't handle vm64 in 32-bit # VPSCATTERDD/VPSCATTERDQ/VPSCATTERQD/VPSCATTERQQ 5-440 PAGE 2264 LINE 116436 # WARNING: did not recognize qualifier /vsib for "VPSCATTERQD vm64x {k1}, xmm1" define pcodeop vpscatterqd_avx512vl ; :VPSCATTERQD q_vm64x^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0xA1; XmmReg1 ... & q_vm64x [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vpscatterqd_avx512vl( q_vm64x, XmmOpMask, XmmReg1 ); # TODO missing destination or side effects } # VPSCATTERDD/VPSCATTERDQ/VPSCATTERQD/VPSCATTERQQ 5-440 PAGE 2264 LINE 116438 # WARNING: did not recognize qualifier /vsib for "VPSCATTERQD vm64y {k1}, xmm1" :VPSCATTERQD q_vm64x^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0xA1; XmmReg1 ... & q_vm64x [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vpscatterqd_avx512vl( q_vm64x, XmmOpMask, XmmReg1 ); # TODO missing destination or side effects } # VPSCATTERDD/VPSCATTERDQ/VPSCATTERQD/VPSCATTERQQ 5-440 PAGE 2264 LINE 116440 # WARNING: did not recognize qualifier /vsib for "VPSCATTERQD vm64z {k1}, ymm1" define pcodeop vpscatterqd_avx512f ; :VPSCATTERQD q_vm64y^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask; byte=0xA1; YmmReg1 ... & q_vm64y [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vpscatterqd_avx512f( q_vm64y, YmmOpMask, YmmReg1 ); # TODO missing destination or side effects } # VPSCATTERDD/VPSCATTERDQ/VPSCATTERQD/VPSCATTERQQ 5-440 PAGE 2264 LINE 116442 # WARNING: did not recognize qualifier /vsib for "VPSCATTERQQ vm64x {k1}, xmm1" define pcodeop vpscatterqq_avx512vl ; :VPSCATTERQQ x_vm64x^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0xA1; XmmReg1 ... & x_vm64x [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vpscatterqq_avx512vl( x_vm64x, XmmOpMask, XmmReg1 ); # TODO missing destination or side effects } # VPSCATTERDD/VPSCATTERDQ/VPSCATTERQD/VPSCATTERQQ 5-440 PAGE 2264 LINE 116444 # WARNING: did not recognize qualifier /vsib for "VPSCATTERQQ vm64y {k1}, ymm1" :VPSCATTERQQ y_vm64y^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0xA1; YmmReg1 ... & y_vm64y [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vpscatterqq_avx512vl( y_vm64y, YmmOpMask, YmmReg1 ); # TODO missing destination or side effects } # VPSCATTERDD/VPSCATTERDQ/VPSCATTERQD/VPSCATTERQQ 5-440 PAGE 2264 LINE 116446 # WARNING: did not recognize qualifier /vsib for "VPSCATTERQQ vm64z {k1}, zmm1" define pcodeop vpscatterqq_avx512f ; :VPSCATTERQQ z_vm64z^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0xA1; ZmmReg1 ... & z_vm64z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vpscatterqq_avx512f( z_vm64z, ZmmOpMask, ZmmReg1 ); # TODO missing destination or side effects } @endif # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116632 define pcodeop vpsllvw_avx512vl ; :VPSLLVW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x12; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpsllvw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116635 :VPSLLVW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x12; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpsllvw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116638 define pcodeop vpsllvw_avx512bw ; :VPSLLVW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x12; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpsllvw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116641 define pcodeop vpsllvd_avx512vl ; :VPSLLVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x47; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpsllvd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116644 :VPSLLVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x47; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpsllvd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116647 define pcodeop vpsllvd_avx512f ; :VPSLLVD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x47; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpsllvd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116650 define pcodeop vpsllvq_avx512vl ; :VPSLLVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x47; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpsllvq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116653 :VPSLLVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x47; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpsllvq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPSLLVW/VPSLLVD/VPSLLVQ 5-445 PAGE 2269 LINE 116656 define pcodeop vpsllvq_avx512f ; :VPSLLVQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x47; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpsllvq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116880 define pcodeop vpsravw_avx512vl ; :VPSRAVW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x11; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpsravw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116883 :VPSRAVW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x11; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpsravw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116886 define pcodeop vpsravw_avx512bw ; :VPSRAVW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x11; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpsravw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116889 define pcodeop vpsravd_avx512vl ; :VPSRAVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x46; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpsravd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116893 :VPSRAVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x46; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpsravd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116897 define pcodeop vpsravd_avx512f ; :VPSRAVD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x46; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpsravd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116901 define pcodeop vpsravq_avx512vl ; :VPSRAVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x46; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpsravq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116905 :VPSRAVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x46; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpsravq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPSRAVW/VPSRAVD/VPSRAVQ 5-450 PAGE 2274 LINE 116910 define pcodeop vpsravq_avx512f ; :VPSRAVQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x46; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpsravq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117151 define pcodeop vpsrlvw_avx512vl ; :VPSRLVW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x10; (XmmReg1 & ZmmReg1 & XmmOpMask16) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { XmmResult = vpsrlvw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117154 :VPSRLVW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x10; (YmmReg1 & ZmmReg1 & YmmOpMask16) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { YmmResult = vpsrlvw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117157 define pcodeop vpsrlvw_avx512bw ; :VPSRLVW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x10; (ZmmReg1 & ZmmOpMask16) ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { ZmmResult = vpsrlvw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117160 define pcodeop vpsrlvd_avx512vl ; :VPSRLVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x45; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpsrlvd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117163 :VPSRLVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x45; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpsrlvd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117166 define pcodeop vpsrlvd_avx512f ; :VPSRLVD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x45; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpsrlvd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117169 define pcodeop vpsrlvq_avx512vl ; :VPSRLVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x45; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpsrlvq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117172 :VPSRLVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x45; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpsrlvq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPSRLVW/VPSRLVD/VPSRLVQ 5-455 PAGE 2279 LINE 117175 define pcodeop vpsrlvq_avx512f ; :VPSRLVQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x45; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpsrlvq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPTERNLOGD/VPTERNLOGQ 5-460 PAGE 2284 LINE 117395 define pcodeop vpternlogd_avx512vl ; :VPTERNLOGD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x25; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpternlogd_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPTERNLOGD/VPTERNLOGQ 5-460 PAGE 2284 LINE 117400 :VPTERNLOGD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x25; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpternlogd_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPTERNLOGD/VPTERNLOGQ 5-460 PAGE 2284 LINE 117405 define pcodeop vpternlogd_avx512f ; :VPTERNLOGD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x25; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpternlogd_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPTERNLOGD/VPTERNLOGQ 5-460 PAGE 2284 LINE 117410 define pcodeop vpternlogq_avx512vl ; :VPTERNLOGQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x25; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vpternlogq_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPTERNLOGD/VPTERNLOGQ 5-460 PAGE 2284 LINE 117415 :VPTERNLOGQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x25; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vpternlogq_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPTERNLOGD/VPTERNLOGQ 5-460 PAGE 2284 LINE 117420 define pcodeop vpternlogq_avx512f ; :VPTERNLOGQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x25; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vpternlogq_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117559 define pcodeop vptestmb_avx512vl ; :VPTESTMB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x26; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vptestmb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117562 :VPTESTMB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x26; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vptestmb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117565 define pcodeop vptestmb_avx512bw ; :VPTESTMB KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x26; KReg_reg ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vptestmb_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); KReg_reg = zext(AVXOpMask[0,64]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117568 define pcodeop vptestmw_avx512vl ; :VPTESTMW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x26; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vptestmw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117571 :VPTESTMW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x26; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vptestmw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117574 define pcodeop vptestmw_avx512bw ; :VPTESTMW KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x26; KReg_reg ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vptestmw_avx512bw( evexV5_ZmmReg, ZmmReg2_m512 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117577 define pcodeop vptestmd_avx512vl ; :VPTESTMD KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x27; KReg_reg ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vptestmd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117581 :VPTESTMD KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x27; KReg_reg ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vptestmd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117585 define pcodeop vptestmd_avx512f ; :VPTESTMD KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x27; KReg_reg ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vptestmd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117589 define pcodeop vptestmq_avx512vl ; :VPTESTMQ KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x27; KReg_reg ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vptestmq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117593 :VPTESTMQ KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x27; KReg_reg ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vptestmq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # VPTESTMB/VPTESTMW/VPTESTMD/VPTESTMQ 5-463 PAGE 2287 LINE 117597 define pcodeop vptestmq_avx512f ; :VPTESTMQ KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x27; KReg_reg ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vptestmq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117717 define pcodeop vptestnmb_avx512vl ; :VPTESTNMB KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x26; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vptestnmb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117721 :VPTESTNMB KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x26; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vptestnmb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117725 define pcodeop vptestnmb_avx512f ; :VPTESTNMB KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x26; KReg_reg ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vptestnmb_avx512f( evexV5_ZmmReg, ZmmReg2_m512 ); KReg_reg = zext(AVXOpMask[0,64]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117729 define pcodeop vptestnmw_avx512vl ; :VPTESTNMW KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x26; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vptestnmw_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117733 :VPTESTNMW KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x26; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vptestnmw_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117737 define pcodeop vptestnmw_avx512f ; :VPTESTNMW KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x26; KReg_reg ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType FVM) { local tmp = vptestnmw_avx512f( evexV5_ZmmReg, ZmmReg2_m512 ); KReg_reg = zext(AVXOpMask[0,32]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117741 define pcodeop vptestnmd_avx512vl ; :VPTESTNMD KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_XmmReg; byte=0x27; KReg_reg ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vptestnmd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117745 :VPTESTNMD KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_YmmReg; byte=0x27; KReg_reg ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vptestnmd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117749 define pcodeop vptestnmd_avx512f ; :VPTESTNMD KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & AVXOpMask & evexV5_ZmmReg; byte=0x27; KReg_reg ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vptestnmd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); KReg_reg = zext(AVXOpMask[0,16]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117753 define pcodeop vptestnmq_avx512vl ; :VPTESTNMQ KReg_reg AVXOpMask, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_XmmReg; byte=0x27; KReg_reg ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vptestnmq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); KReg_reg = zext(AVXOpMask[0,2]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117757 :VPTESTNMQ KReg_reg AVXOpMask, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_YmmReg; byte=0x27; KReg_reg ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vptestnmq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); KReg_reg = zext(AVXOpMask[0,4]) & tmp; } # VPTESTNMB/W/D/Q 5-466 PAGE 2290 LINE 117761 define pcodeop vptestnmq_avx512f ; :VPTESTNMQ KReg_reg AVXOpMask, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & AVXOpMask & evexV5_ZmmReg; byte=0x27; KReg_reg ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { local tmp = vptestnmq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); KReg_reg = zext(AVXOpMask[0,8]) & tmp; } # VRANGEPD 5-470 PAGE 2294 LINE 117905 define pcodeop vrangepd_avx512vl ; :VRANGEPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x50; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vrangepd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VRANGEPD 5-470 PAGE 2294 LINE 117910 :VRANGEPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x50; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vrangepd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VRANGEPD 5-470 PAGE 2294 LINE 117915 define pcodeop vrangepd_avx512dq ; :VRANGEPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x50; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vrangepd_avx512dq( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VRANGEPS 5-475 PAGE 2299 LINE 118139 define pcodeop vrangeps_avx512vl ; :VRANGEPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x50; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vrangeps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VRANGEPS 5-475 PAGE 2299 LINE 118144 :VRANGEPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x50; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vrangeps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VRANGEPS 5-475 PAGE 2299 LINE 118149 define pcodeop vrangeps_avx512dq ; :VRANGEPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x50; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vrangeps_avx512dq( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VRANGESD 5-479 PAGE 2303 LINE 118318 define pcodeop vrangesd_avx512dq ; :VRANGESD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64, imm8 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vrangesd_avx512dq( evexV5_XmmReg, XmmReg2_m64, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VRANGESS 5-482 PAGE 2306 LINE 118473 define pcodeop vrangess_avx512dq ; :VRANGESS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vrangess_avx512dq( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VRCP14PD 5-485 PAGE 2309 LINE 118626 define pcodeop vrcp14pd_avx512vl ; :VRCP14PD XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x4C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vrcp14pd_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VRCP14PD 5-485 PAGE 2309 LINE 118629 :VRCP14PD YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x4C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vrcp14pd_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VRCP14PD 5-485 PAGE 2309 LINE 118632 define pcodeop vrcp14pd_avx512f ; :VRCP14PD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x4C; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vrcp14pd_avx512f( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VRCP14SD 5-487 PAGE 2311 LINE 118726 define pcodeop vrcp14sd_avx512f ; :VRCP14SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x4D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vrcp14sd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VRCP14PS 5-489 PAGE 2313 LINE 118800 define pcodeop vrcp14ps_avx512vl ; :VRCP14PS XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x4C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vrcp14ps_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VRCP14PS 5-489 PAGE 2313 LINE 118803 :VRCP14PS YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x4C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vrcp14ps_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VRCP14PS 5-489 PAGE 2313 LINE 118806 define pcodeop vrcp14ps_avx512f ; :VRCP14PS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x4C; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vrcp14ps_avx512f( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VRCP14SS 5-491 PAGE 2315 LINE 118904 define pcodeop vrcp14ss_avx512f ; :VRCP14SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x4D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vrcp14ss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VRCP28PD 5-493 PAGE 2317 LINE 118979 define pcodeop vrcp28pd_avx512er ; :VRCP28PD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0xCA; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vrcp28pd_avx512er( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VRCP28SD 5-495 PAGE 2319 LINE 119074 define pcodeop vrcp28sd_avx512er ; :VRCP28SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xCB; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vrcp28sd_avx512er( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VRCP28PS 5-497 PAGE 2321 LINE 119167 define pcodeop vrcp28ps_avx512er ; :VRCP28PS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0xCA; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vrcp28ps_avx512er( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VRCP28SS 5-499 PAGE 2323 LINE 119263 define pcodeop vrcp28ss_avx512er ; :VRCP28SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xCB; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vrcp28ss_avx512er( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VREDUCEPD 5-501 PAGE 2325 LINE 119356 define pcodeop vreducepd_avx512vl ; :VREDUCEPD XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x56; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vreducepd_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VREDUCEPD 5-501 PAGE 2325 LINE 119360 :VREDUCEPD YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x56; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vreducepd_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VREDUCEPD 5-501 PAGE 2325 LINE 119364 define pcodeop vreducepd_avx512dq ; :VREDUCEPD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x56; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vreducepd_avx512dq( ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VREDUCESD 5-504 PAGE 2328 LINE 119510 define pcodeop vreducesd_avx512dq ; :VREDUCESD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vreducesd_avx512dq( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VREDUCEPS 5-506 PAGE 2330 LINE 119605 define pcodeop vreduceps_avx512vl ; :VREDUCEPS XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x56; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vreduceps_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VREDUCEPS 5-506 PAGE 2330 LINE 119609 :VREDUCEPS YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x56; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vreduceps_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VREDUCEPS 5-506 PAGE 2330 LINE 119613 define pcodeop vreduceps_avx512dq ; :VREDUCEPS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x56; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vreduceps_avx512dq( ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VREDUCESS 5-508 PAGE 2332 LINE 119719 define pcodeop vreducess_avx512dq ; :VREDUCESS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vreducess_avx512dq( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VRNDSCALEPD 5-510 PAGE 2334 LINE 119814 define pcodeop vrndscalepd_avx512vl ; :VRNDSCALEPD XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x09; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vrndscalepd_avx512vl( XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VRNDSCALEPD 5-510 PAGE 2334 LINE 119818 :VRNDSCALEPD YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x09; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vrndscalepd_avx512vl( YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VRNDSCALEPD 5-510 PAGE 2334 LINE 119822 define pcodeop vrndscalepd_avx512f ; :VRNDSCALEPD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) ; byte=0x09; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vrndscalepd_avx512f( ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VRNDSCALESD 5-514 PAGE 2338 LINE 119998 define pcodeop vrndscalesd_avx512f ; :VRNDSCALESD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64, imm8 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_XmmReg; byte=0x0B; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64; imm8 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vrndscalesd_avx512f( evexV5_XmmReg, XmmReg2_m64, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VRNDSCALEPS 5-516 PAGE 2340 LINE 120116 define pcodeop vrndscaleps_avx512vl ; :VRNDSCALEPS XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x08; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vrndscaleps_avx512vl( XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VRNDSCALEPS 5-516 PAGE 2340 LINE 120120 :VRNDSCALEPS YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x08; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vrndscaleps_avx512vl( YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VRNDSCALEPS 5-516 PAGE 2340 LINE 120124 define pcodeop vrndscaleps_avx512f ; :VRNDSCALEPS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x08; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vrndscaleps_avx512f( ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VRNDSCALESS 5-519 PAGE 2343 LINE 120263 define pcodeop vrndscaless_avx512f ; :VRNDSCALESS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_XmmReg; byte=0x0A; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vrndscaless_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VRSQRT14PD 5-521 PAGE 2345 LINE 120381 define pcodeop vrsqrt14pd_avx512vl ; :VRSQRT14PD XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x4E; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vrsqrt14pd_avx512vl( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VRSQRT14PD 5-521 PAGE 2345 LINE 120385 :VRSQRT14PD YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x4E; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vrsqrt14pd_avx512vl( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VRSQRT14PD 5-521 PAGE 2345 LINE 120389 define pcodeop vrsqrt14pd_avx512f ; :VRSQRT14PD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0x4E; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vrsqrt14pd_avx512f( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VRSQRT14SD 5-523 PAGE 2347 LINE 120491 define pcodeop vrsqrt14sd_avx512f ; :VRSQRT14SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x4F; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vrsqrt14sd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VRSQRT14PS 5-525 PAGE 2349 LINE 120578 define pcodeop vrsqrt14ps_avx512vl ; :VRSQRT14PS XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x4E; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vrsqrt14ps_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VRSQRT14PS 5-525 PAGE 2349 LINE 120582 :VRSQRT14PS YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x4E; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vrsqrt14ps_avx512vl( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VRSQRT14PS 5-525 PAGE 2349 LINE 120586 define pcodeop vrsqrt14ps_avx512f ; :VRSQRT14PS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0x4E; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vrsqrt14ps_avx512f( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VRSQRT14SS 5-527 PAGE 2351 LINE 120690 define pcodeop vrsqrt14ss_avx512f ; :VRSQRT14SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x4F; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vrsqrt14ss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VRSQRT28PD 5-529 PAGE 2353 LINE 120778 define pcodeop vrsqrt28pd_avx512er ; :VRSQRT28PD ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) ; byte=0xCC; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vrsqrt28pd_avx512er( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VRSQRT28SD 5-531 PAGE 2355 LINE 120869 define pcodeop vrsqrt28sd_avx512er ; :VRSQRT28SD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0xCD; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vrsqrt28sd_avx512er( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VRSQRT28PS 5-533 PAGE 2357 LINE 120959 define pcodeop vrsqrt28ps_avx512er ; :VRSQRT28PS ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0); byte=0xCC; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vrsqrt28ps_avx512er( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VRSQRT28SS 5-535 PAGE 2359 LINE 121051 define pcodeop vrsqrt28ss_avx512er ; :VRSQRT28SS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0xCD; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vrsqrt28ss_avx512er( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VSCALEFPD 5-537 PAGE 2361 LINE 121140 define pcodeop vscalefpd_avx512vl ; :VSCALEFPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x2C; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vscalefpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VSCALEFPD 5-537 PAGE 2361 LINE 121143 :VSCALEFPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x2C; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vscalefpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VSCALEFPD 5-537 PAGE 2361 LINE 121146 define pcodeop vscalefpd_avx512f ; :VSCALEFPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x2C; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vscalefpd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VSCALEFSD 5-540 PAGE 2364 LINE 121269 define pcodeop vscalefsd_avx512f ; :VSCALEFSD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x2D; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m64 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vscalefsd_avx512f( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VSCALEFPS 5-542 PAGE 2366 LINE 121355 define pcodeop vscalefps_avx512vl ; :VSCALEFPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x2C; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vscalefps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VSCALEFPS 5-542 PAGE 2366 LINE 121358 :VSCALEFPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x2C; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vscalefps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VSCALEFPS 5-542 PAGE 2366 LINE 121361 define pcodeop vscalefps_avx512f ; :VSCALEFPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x2C; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vscalefps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VSCALEFSS 5-544 PAGE 2368 LINE 121470 define pcodeop vscalefss_avx512f ; :VSCALEFSS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x2D; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m32 [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { XmmResult = vscalefss_avx512f( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VSCATTERDPS/VSCATTERDPD/VSCATTERQPS/VSCATTERQPD 5-546 PAGE 2370 LINE 121559 define pcodeop vscatterdps_avx512vl ; :VSCATTERDPS x_vm32x^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0xA2; XmmReg1 ... & x_vm32x [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterdps_avx512vl( x_vm32x, XmmOpMask, XmmReg1 ); # TODO missing destination or side effects } # VSCATTERDPS/VSCATTERDPD/VSCATTERQPS/VSCATTERQPD 5-546 PAGE 2370 LINE 121561 :VSCATTERDPS y_vm32y^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask; byte=0xA2; YmmReg1 ... & y_vm32y [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterdps_avx512vl( y_vm32y, YmmOpMask, YmmReg1 ); # TODO missing destination or side effects } # VSCATTERDPS/VSCATTERDPD/VSCATTERQPS/VSCATTERQPD 5-546 PAGE 2370 LINE 121563 define pcodeop vscatterdps_avx512f ; :VSCATTERDPS z_vm32z^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0xA2; ZmmReg1 ... & z_vm32z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterdps_avx512f( z_vm32z, ZmmOpMask, ZmmReg1 ); # TODO missing destination or side effects } # VSCATTERDPS/VSCATTERDPD/VSCATTERQPS/VSCATTERQPD 5-546 PAGE 2370 LINE 121565 define pcodeop vscatterdpd_avx512vl ; :VSCATTERDPD x_vm32x^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0xA2; XmmReg1 ... & x_vm32x [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterdpd_avx512vl( x_vm32x, XmmOpMask, XmmReg1 ); # TODO missing destination or side effects } # VSCATTERDPS/VSCATTERDPD/VSCATTERQPS/VSCATTERQPD 5-546 PAGE 2370 LINE 121567 :VSCATTERDPD y_vm32y^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0xA2; YmmReg1 ... & y_vm32y [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterdpd_avx512vl( y_vm32y, YmmOpMask, YmmReg1 ); # TODO missing destination or side effects } # VSCATTERDPS/VSCATTERDPD/VSCATTERQPS/VSCATTERQPD 5-546 PAGE 2370 LINE 121569 define pcodeop vscatterdpd_avx512f ; :VSCATTERDPD z_vm32z^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0xA2; ZmmReg1 ... & z_vm32z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterdpd_avx512f( z_vm32z, ZmmOpMask, ZmmReg1 ); # TODO missing destination or side effects } @ifdef IA64 # VSCATTERDPS/VSCATTERDPD/VSCATTERQPS/VSCATTERQPD 5-546 PAGE 2370 LINE 121571 define pcodeop vscatterqps_avx512vl ; :VSCATTERQPS q_vm64x^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0xA3; XmmReg1 ... & q_vm64x [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterqps_avx512vl( q_vm64x, XmmOpMask, XmmReg1 ); # TODO missing destination or side effects } # VSCATTERDPS/VSCATTERDPD/VSCATTERQPS/VSCATTERQPD 5-546 PAGE 2370 LINE 121573 :VSCATTERQPS q_vm64y^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0xA3; XmmReg1 ... & q_vm64y [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterqps_avx512vl( q_vm64y, XmmOpMask, XmmReg1 ); # TODO missing destination or side effects } # VSCATTERDPS/VSCATTERDPD/VSCATTERQPS/VSCATTERQPD 5-546 PAGE 2370 LINE 121575 define pcodeop vscatterqps_avx512f ; :VSCATTERQPS q_vm64z^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask; byte=0xA3; YmmReg1 ... & q_vm64z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterqps_avx512f( q_vm64z, YmmOpMask, YmmReg1 ); # TODO missing destination or side effects } # VSCATTERDPS/VSCATTERDPD/VSCATTERQPS/VSCATTERQPD 5-546 PAGE 2370 LINE 121577 define pcodeop vscatterqpd_avx512vl ; :VSCATTERQPD x_vm64x^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0xA3; XmmReg1 ... & x_vm64x [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterqpd_avx512vl( x_vm64x, XmmOpMask, XmmReg1 ); # TODO missing destination or side effects } # VSCATTERDPS/VSCATTERDPD/VSCATTERQPS/VSCATTERQPD 5-546 PAGE 2370 LINE 121579 :VSCATTERQPD y_vm64y^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0xA3; YmmReg1 ... & y_vm64y [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterqpd_avx512vl( y_vm64y, YmmOpMask, YmmReg1 ); # TODO missing destination or side effects } @endif @ifdef IA64 # VSCATTERDPS/VSCATTERDPD/VSCATTERQPS/VSCATTERQPD 5-546 PAGE 2370 LINE 121581 define pcodeop vscatterqpd_avx512f ; :VSCATTERQPD z_vm64z^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0xA3; ZmmReg1 ... & z_vm64z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterqpd_avx512f( z_vm64z, ZmmOpMask, ZmmReg1 ); # TODO missing destination or side effects } @endif # VSCATTERPF0DPS/VSCATTERPF0QPS/VSCATTERPF0DPD/VSCATTERPF0QPD 5-551 PAGE 2375 LINE 121759 define pcodeop vscatterpf0dps_avx512pf ; :VSCATTERPF0DPS z_vm32z^ZmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0xC6; reg_opcode=5 ... & z_vm32z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterpf0dps_avx512pf( z_vm32z , ZmmOpMask); # TODO missing destination or side effects } @ifdef IA64 # VSCATTERPF0DPS/VSCATTERPF0QPS/VSCATTERPF0DPD/VSCATTERPF0QPD 5-551 PAGE 2375 LINE 121762 define pcodeop vscatterpf0qps_avx512pf ; :VSCATTERPF0QPS z_vm64z^ZmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0xC7; reg_opcode=5 ... & z_vm64z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterpf0qps_avx512pf( z_vm64z, ZmmOpMask ); # TODO missing destination or side effects } @endif # VSCATTERPF0DPS/VSCATTERPF0QPS/VSCATTERPF0DPD/VSCATTERPF0QPD 5-551 PAGE 2375 LINE 121765 define pcodeop vscatterpf0dpd_avx512pf ; :VSCATTERPF0DPD y_vm32y^YmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0xC6; reg_opcode=5 ... & y_vm32y [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterpf0dpd_avx512pf( y_vm32y, YmmOpMask ); # TODO missing destination or side effects } @ifdef IA64 # VSCATTERPF0DPS/VSCATTERPF0QPS/VSCATTERPF0DPD/VSCATTERPF0QPD 5-551 PAGE 2375 LINE 121768 define pcodeop vscatterpf0qpd_avx512pf ; :VSCATTERPF0QPD z_vm64z^ZmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0xC7; reg_opcode=5 ... & z_vm64z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterpf0qpd_avx512pf( z_vm64z, ZmmOpMask ); # TODO missing destination or side effects } @endif # VSCATTERPF1DPS/VSCATTERPF1QPS/VSCATTERPF1DPD/VSCATTERPF1QPD 5-553 PAGE 2377 LINE 121877 define pcodeop vscatterpf1dps_avx512pf ; :VSCATTERPF1DPS z_vm32z^ZmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0xC6; reg_opcode=6 ... & z_vm32z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterpf1dps_avx512pf( z_vm32z, ZmmOpMask ); # TODO missing destination or side effects } @ifdef IA64 # VSCATTERPF1DPS/VSCATTERPF1QPS/VSCATTERPF1DPD/VSCATTERPF1QPD 5-553 PAGE 2377 LINE 121880 define pcodeop vscatterpf1qps_avx512pf ; :VSCATTERPF1QPS z_vm64z^ZmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0xC7; reg_opcode=6 ... & z_vm64z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterpf1qps_avx512pf( z_vm64z, ZmmOpMask ); # TODO missing destination or side effects } @endif # VSCATTERPF1DPS/VSCATTERPF1QPS/VSCATTERPF1DPD/VSCATTERPF1QPD 5-553 PAGE 2377 LINE 121883 define pcodeop vscatterpf1dpd_avx512pf ; :VSCATTERPF1DPD y_vm32y^YmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0xC6; reg_opcode=6 ... & y_vm32y [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterpf1dpd_avx512pf( y_vm32y, YmmOpMask ); # TODO missing destination or side effects } @ifdef IA64 # VSCATTERPF1DPS/VSCATTERPF1QPS/VSCATTERPF1DPD/VSCATTERPF1QPD 5-553 PAGE 2377 LINE 121886 define pcodeop vscatterpf1qpd_avx512pf ; :VSCATTERPF1QPD z_vm64z^ZmmOpMask is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0xC7; reg_opcode=6 ... & z_vm64z [ evexD8Type = 1; evexTType = 3; ] # (TupleType T1S) { vscatterpf1qpd_avx512pf( z_vm64z, ZmmOpMask ); # TODO missing destination or side effects } @endif # VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 121994 define pcodeop vshuff32x4_avx512vl ; :VSHUFF32X4 YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x23; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vshuff32x4_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 121998 define pcodeop vshuff32x4_avx512f ; :VSHUFF32x4 ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x23; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vshuff32x4_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122002 define pcodeop vshuff64x2_avx512vl ; :VSHUFF64X2 YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x23; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vshuff64x2_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122006 define pcodeop vshuff64x2_avx512f ; :VSHUFF64x2 ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x23; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vshuff64x2_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122010 define pcodeop vshufi32x4_avx512vl ; :VSHUFI32X4 YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_YmmReg; byte=0x43; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vshufi32x4_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122013 define pcodeop vshufi32x4_avx512f ; :VSHUFI32x4 ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & evexV5_ZmmReg; byte=0x43; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vshufi32x4_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122016 define pcodeop vshufi64x2_avx512vl ; :VSHUFI64X2 YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_YmmReg; byte=0x43; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vshufi64x2_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VSHUFF32x4/VSHUFF64x2/VSHUFI32x4/VSHUFI64x2 5-555 PAGE 2379 LINE 122019 define pcodeop vshufi64x2_avx512f ; :VSHUFI64x2 ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & evexV5_ZmmReg; byte=0x43; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vshufi64x2_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # XORPD 5-596 PAGE 2420 LINE 123834 define pcodeop vxorpd_avx512vl ; :VXORPD XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1 & XmmOpMask64) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vxorpd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # XORPD 5-596 PAGE 2420 LINE 123837 :VXORPD YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_YmmReg; byte=0x57; (YmmReg1 & ZmmReg1 & YmmOpMask64) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vxorpd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # XORPD 5-596 PAGE 2420 LINE 123840 define pcodeop vxorpd_avx512dq ; :VXORPD ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & evexV5_ZmmReg; byte=0x57; (ZmmReg1 & ZmmOpMask64) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vxorpd_avx512dq( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # XORPS 5-599 PAGE 2423 LINE 123959 define pcodeop vxorps_avx512vl ; :VXORPS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1 & XmmOpMask32) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { XmmResult = vxorps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # XORPS 5-599 PAGE 2423 LINE 123962 :VXORPS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_YmmReg; byte=0x57; (YmmReg1 & ZmmReg1 & YmmOpMask32) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { YmmResult = vxorps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # XORPS 5-599 PAGE 2423 LINE 123965 define pcodeop vxorps_avx512dq ; :VXORPS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & evexV5_ZmmReg; byte=0x57; (ZmmReg1 & ZmmOpMask32) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType FV) { ZmmResult = vxorps_avx512dq( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # AVX512 BMI, FMA, and FP16 updates # AESDEC 3-51 PAGE 621 LINE 35875 define pcodeop vaesdec_vaes ; :VAESDEC XmmReg1, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:16 = vaesdec_vaes( evexV5_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # AESDEC 3-51 PAGE 621 LINE 35879 :VAESDEC YmmReg1, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:32 = vaesdec_vaes( evexV5_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # AESDEC 3-51 PAGE 621 LINE 35883 :VAESDEC ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xDE; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmReg1 = vaesdec_vaes( evexV5_ZmmReg, ZmmReg2_m512 ); } # AESDECLAST 3-57 PAGE 627 LINE 36144 define pcodeop vaesdeclast_vaes ; :VAESDECLAST XmmReg1, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:16 = vaesdeclast_vaes( evexV5_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # AESDECLAST 3-57 PAGE 627 LINE 36148 :VAESDECLAST YmmReg1, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDF; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:32 = vaesdeclast_vaes( evexV5_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # AESDECLAST 3-57 PAGE 627 LINE 36152 :VAESDECLAST ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xDF; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmReg1 = vaesdeclast_vaes( evexV5_ZmmReg, ZmmReg2_m512 ); } # AESENC 3-63 PAGE 633 LINE 36420 define pcodeop vaesenc_vaes ; :VAESENC XmmReg1, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:16 = vaesenc_vaes( evexV5_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # AESENC 3-63 PAGE 633 LINE 36423 :VAESENC YmmReg1, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:32 = vaesenc_vaes( evexV5_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # AESENC 3-63 PAGE 633 LINE 36426 :VAESENC ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xDC; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmReg1 = vaesenc_vaes( evexV5_ZmmReg, ZmmReg2_m512 ); } # AESENCLAST 3-69 PAGE 639 LINE 36687 define pcodeop vaesenclast_vaes ; :VAESENCLAST XmmReg1, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_XmmReg; byte=0xDD; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:16 = vaesenclast_vaes( evexV5_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # AESENCLAST 3-69 PAGE 639 LINE 36691 :VAESENCLAST YmmReg1, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_YmmReg; byte=0xDD; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:32 = vaesenclast_vaes( evexV5_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # AESENCLAST 3-69 PAGE 639 LINE 36695 :VAESENCLAST ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & evexV5_ZmmReg; byte=0xDD; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmReg1 = vaesenclast_vaes( evexV5_ZmmReg, ZmmReg2_m512 ); } # GF2P8AFFINEINVQB 3-476 PAGE 1046 LINE 56498 define pcodeop vgf2p8affineinvqb_avx512vl ; :VGF2P8AFFINEINVQB XmmReg1 XmmOpMask8, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask8 & evexV5_XmmReg; byte=0xCF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vgf2p8affineinvqb_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # GF2P8AFFINEINVQB 3-476 PAGE 1046 LINE 56501 :VGF2P8AFFINEINVQB YmmReg1 YmmOpMask8, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask8 & evexV5_YmmReg; byte=0xCF; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vgf2p8affineinvqb_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # GF2P8AFFINEINVQB 3-476 PAGE 1046 LINE 56504 define pcodeop vgf2p8affineinvqb_avx512f ; :VGF2P8AFFINEINVQB ZmmReg1 ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & ZmmOpMask8 & evexV5_ZmmReg; byte=0xCF; ZmmReg1 ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vgf2p8affineinvqb_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # GF2P8AFFINEQB 3-479 PAGE 1049 LINE 56642 define pcodeop vgf2p8affineqb_avx512vl ; :VGF2P8AFFINEQB XmmReg1 XmmOpMask8, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask8 & evexV5_XmmReg; byte=0xCE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vgf2p8affineqb_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # GF2P8AFFINEQB 3-479 PAGE 1049 LINE 56645 :VGF2P8AFFINEQB YmmReg1 YmmOpMask8, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask8 & evexV5_YmmReg; byte=0xCE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vgf2p8affineqb_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # GF2P8AFFINEQB 3-479 PAGE 1049 LINE 56648 define pcodeop vgf2p8affineqb_avx512f ; :VGF2P8AFFINEQB ZmmReg1 ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & ZmmOpMask8 & evexV5_ZmmReg; byte=0xCE; ZmmReg1 ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vgf2p8affineqb_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # GF2P8MULB 3-481 PAGE 1051 LINE 56754 define pcodeop vgf2p8mulb_avx512vl ; :VGF2P8MULB XmmReg1 XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8 & evexV5_XmmReg; byte=0xCF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { XmmResult = vgf2p8mulb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # GF2P8MULB 3-481 PAGE 1051 LINE 56757 :VGF2P8MULB YmmReg1 YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8 & evexV5_YmmReg; byte=0xCF; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { YmmResult = vgf2p8mulb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # GF2P8MULB 3-481 PAGE 1051 LINE 56760 define pcodeop vgf2p8mulb_avx512f ; :VGF2P8MULB ZmmReg1 ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask8 & evexV5_ZmmReg; byte=0xCF; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmResult = vgf2p8mulb_avx512f( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # PCLMULQDQ 4-242 PAGE 1362 LINE 76037 define pcodeop vpclmulqdq_vpclmulqdq ; :VPCLMULQDQ XmmReg1, evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_XmmReg; byte=0x44; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:16 = vpclmulqdq_vpclmulqdq( evexV5_XmmReg, XmmReg2_m128, imm8:1 ); ZmmReg1 = zext(tmp); } # PCLMULQDQ 4-242 PAGE 1362 LINE 76042 :VPCLMULQDQ YmmReg1, evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_YmmReg; byte=0x44; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:32 = vpclmulqdq_vpclmulqdq( evexV5_YmmReg, YmmReg2_m256, imm8:1 ); ZmmReg1 = zext(tmp); } # PCLMULQDQ 4-242 PAGE 1362 LINE 76047 :VPCLMULQDQ ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & evexV5_ZmmReg; byte=0x44; ZmmReg1 ... & ZmmReg2_m512; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmReg1 = vpclmulqdq_vpclmulqdq( evexV5_ZmmReg, ZmmReg2_m512, imm8:1 ); } # VADDPH 5-5 PAGE 1829 LINE 101735 define pcodeop vaddph_avx512fp16 ; :VADDPH XmmReg1 XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vaddph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VADDPH 5-5 PAGE 1829 LINE 101738 :VADDPH YmmReg1 YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x58; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vaddph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VADDPH 5-5 PAGE 1829 LINE 101741 :VADDPH ZmmReg1 ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x58; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vaddph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VADDSH 5-7 PAGE 1831 LINE 101824 define pcodeop vaddsh_avx512fp16 ; :VADDSH XmmReg1 XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x58; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vaddsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } VCMPPH_mon: "VCMPEQPH" is imm8=0x0 { } VCMPPH_op: "" is imm8=0x0 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPLTPH" is imm8=0x1 { } VCMPPH_op: "" is imm8=0x1 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPLEPH" is imm8=0x2 { } VCMPPH_op: "" is imm8=0x2 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPUNORDPH" is imm8=0x3 { } VCMPPH_op: "" is imm8=0x3 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPNEQPH" is imm8=0x4 { } VCMPPH_op: "" is imm8=0x4 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPNLTPH" is imm8=0x5 { } VCMPPH_op: "" is imm8=0x5 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPNLEPH" is imm8=0x6 { } VCMPPH_op: "" is imm8=0x6 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPORDPH" is imm8=0x7 { } VCMPPH_op: "" is imm8=0x7 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPEQ_UQPH" is imm8=0x8 { } VCMPPH_op: "" is imm8=0x8 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPNGEPH" is imm8=0x9 { } VCMPPH_op: "" is imm8=0x9 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPNGTPH" is imm8=0xa { } VCMPPH_op: "" is imm8=0xa & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPFALSEPH" is imm8=0xb { } VCMPPH_op: "" is imm8=0xb & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPNEQ_OQPH" is imm8=0xc { } VCMPPH_op: "" is imm8=0xc & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPGEPH" is imm8=0xd { } VCMPPH_op: "" is imm8=0xd & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPGTPH" is imm8=0xe { } VCMPPH_op: "" is imm8=0xe & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPTRUEPH" is imm8=0xf { } VCMPPH_op: "" is imm8=0xf & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPEQ_OSPH" is imm8=0x10 { } VCMPPH_op: "" is imm8=0x10 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPLT_OQPH" is imm8=0x11 { } VCMPPH_op: "" is imm8=0x11 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPLE_OQPH" is imm8=0x12 { } VCMPPH_op: "" is imm8=0x12 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPUNORD_SPH" is imm8=0x13 { } VCMPPH_op: "" is imm8=0x13 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPNEQ_USPH" is imm8=0x14 { } VCMPPH_op: "" is imm8=0x14 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPNLT_UQPH" is imm8=0x15 { } VCMPPH_op: "" is imm8=0x15 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPNLE_UQPH" is imm8=0x16 { } VCMPPH_op: "" is imm8=0x16 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPORD_SPH" is imm8=0x17 { } VCMPPH_op: "" is imm8=0x17 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPEQ_USPH" is imm8=0x18 { } VCMPPH_op: "" is imm8=0x18 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPNGE_UQPH" is imm8=0x19 { } VCMPPH_op: "" is imm8=0x19 & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPNGT_UQPH" is imm8=0x1a { } VCMPPH_op: "" is imm8=0x1a & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPFALSE_OSPH" is imm8=0x1b { } VCMPPH_op: "" is imm8=0x1b & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPNEQ_OSPH" is imm8=0x1c { } VCMPPH_op: "" is imm8=0x1c & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPGE_OQPH" is imm8=0x1d { } VCMPPH_op: "" is imm8=0x1d & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPGT_OQPH" is imm8=0x1e { } VCMPPH_op: "" is imm8=0x1e & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPTRUE_USPH" is imm8=0x1f { } VCMPPH_op: "" is imm8=0x1f & imm8_val { export *[const]:1 imm8_val; } VCMPPH_mon: "VCMPPH" is imm8 { } VCMPPH_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } define pcodeop vcmpph_avx512fp16; :^VCMPPH_mon KReg_reg^XmmOpMask, evexV5_XmmReg, XmmReg2_m128_m16bcst^VCMPPH_op is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m128_m16bcst; VCMPPH_mon & VCMPPH_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { KReg_reg = vcmpph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst, XmmOpMask, VCMPPH_op ); } # VCMPPH 5-21 PAGE 1845 LINE 102586 :^VCMPPH_mon KReg_reg^YmmOpMask, evexV5_YmmReg, YmmReg2_m256_m16bcst^VCMPPH_op is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask & evexV5_YmmReg; byte=0xC2; KReg_reg ... & YmmReg2_m256_m16bcst; VCMPPH_mon & VCMPPH_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { KReg_reg = vcmpph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst, YmmOpMask, VCMPPH_op ); } # VCMPPH 5-21 PAGE 1845 LINE 102590 :^VCMPPH_mon KReg_reg^ZmmOpMask, evexV5_ZmmReg, ZmmReg2_m512_m16bcst^VCMPPH_op is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & ZmmOpMask & evexV5_ZmmReg; byte=0xC2; KReg_reg ... & ZmmReg2_m512_m16bcst; VCMPPH_mon & VCMPPH_op [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { KReg_reg = vcmpph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst, ZmmOpMask, VCMPPH_op ); } VCMPSH_mon: "VCMPEQSH" is imm8=0x0 { } VCMPSH_op: "" is imm8=0x0 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPLTSH" is imm8=0x1 { } VCMPSH_op: "" is imm8=0x1 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPLESH" is imm8=0x2 { } VCMPSH_op: "" is imm8=0x2 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPUNORDSH" is imm8=0x3 { } VCMPSH_op: "" is imm8=0x3 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPNEQSH" is imm8=0x4 { } VCMPSH_op: "" is imm8=0x4 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPNLTSH" is imm8=0x5 { } VCMPSH_op: "" is imm8=0x5 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPNLESH" is imm8=0x6 { } VCMPSH_op: "" is imm8=0x6 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPORDSH" is imm8=0x7 { } VCMPSH_op: "" is imm8=0x7 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPEQ_UQSH" is imm8=0x8 { } VCMPSH_op: "" is imm8=0x8 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPNGESH" is imm8=0x9 { } VCMPSH_op: "" is imm8=0x9 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPNGTSH" is imm8=0xa { } VCMPSH_op: "" is imm8=0xa & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPFALSESH" is imm8=0xb { } VCMPSH_op: "" is imm8=0xb & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPNEQ_OQSH" is imm8=0xc { } VCMPSH_op: "" is imm8=0xc & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPGESH" is imm8=0xd { } VCMPSH_op: "" is imm8=0xd & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPGTSH" is imm8=0xe { } VCMPSH_op: "" is imm8=0xe & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPTRUESH" is imm8=0xf { } VCMPSH_op: "" is imm8=0xf & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPEQ_OSSH" is imm8=0x10 { } VCMPSH_op: "" is imm8=0x10 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPLT_OQSH" is imm8=0x11 { } VCMPSH_op: "" is imm8=0x11 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPLE_OQSH" is imm8=0x12 { } VCMPSH_op: "" is imm8=0x12 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPUNORD_SSH" is imm8=0x13 { } VCMPSH_op: "" is imm8=0x13 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPNEQ_USSH" is imm8=0x14 { } VCMPSH_op: "" is imm8=0x14 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPNLT_UQSH" is imm8=0x15 { } VCMPSH_op: "" is imm8=0x15 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPNLE_UQSH" is imm8=0x16 { } VCMPSH_op: "" is imm8=0x16 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPORD_SSH" is imm8=0x17 { } VCMPSH_op: "" is imm8=0x17 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPEQ_USSH" is imm8=0x18 { } VCMPSH_op: "" is imm8=0x18 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPNGE_UQSH" is imm8=0x19 { } VCMPSH_op: "" is imm8=0x19 & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPNGT_UQSH" is imm8=0x1a { } VCMPSH_op: "" is imm8=0x1a & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPFALSE_OSSH" is imm8=0x1b { } VCMPSH_op: "" is imm8=0x1b & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPNEQ_OSSH" is imm8=0x1c { } VCMPSH_op: "" is imm8=0x1c & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPGE_OQSH" is imm8=0x1d { } VCMPSH_op: "" is imm8=0x1d & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPGT_OQSH" is imm8=0x1e { } VCMPSH_op: "" is imm8=0x1e & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPTRUE_USSH" is imm8=0x1f { } VCMPSH_op: "" is imm8=0x1f & imm8_val { export *[const]:1 imm8_val; } VCMPSH_mon: "VCMPSH" is imm8 { } VCMPSH_op: ", "^imm8 is imm8 { export *[const]:1 imm8; } # VCMPSH 5-23 PAGE 1847 LINE 102692 define pcodeop vcmpsh_avx512fp16 ; :^VCMPSH_mon KReg_reg^XmmOpMask, evexV5_XmmReg, XmmReg2_m16^VCMPSH_op is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xC2; KReg_reg ... & XmmReg2_m16; VCMPSH_mon & VCMPSH_op { KReg_reg = vcmpsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16, XmmOpMask, VCMPSH_op ); } # VCOMISH 5-25 PAGE 1849 LINE 102783 define pcodeop vcomish_avx512fp16 ; :VCOMISH XmmReg1, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0); byte=0x2F; XmmReg1 ... & XmmReg2_m16 { vcomish_avx512fp16( XmmReg1, XmmReg2_m16 ); # TODO missing destination or side effects } # VCVTDQ2PH 5-31 PAGE 1855 LINE 103058 define pcodeop vcvtdq2ph_avx512fp16 ; :VCVTDQ2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtdq2ph_avx512fp16( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult[0,64]); } # VCVTDQ2PH 5-31 PAGE 1855 LINE 103062 :VCVTDQ2PH XmmReg1^XmmOpMask16, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x5B; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtdq2ph_avx512fp16( YmmReg2_m256_m32bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTDQ2PH 5-31 PAGE 1855 LINE 103066 :VCVTDQ2PH YmmReg1^YmmOpMask16, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x5B; (YmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vcvtdq2ph_avx512fp16( ZmmReg2_m512_m32bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VCVTNE2PS2BF16 5-33 PAGE 1857 LINE 103147 define pcodeop vcvtne2ps2bf16_avx512vl ; :VCVTNE2PS2BF16 XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x72; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtne2ps2bf16_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTNE2PS2BF16 5-33 PAGE 1857 LINE 103150 :VCVTNE2PS2BF16 YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x72; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vcvtne2ps2bf16_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VCVTNE2PS2BF16 5-33 PAGE 1857 LINE 103153 define pcodeop vcvtne2ps2bf16_avx512f ; :VCVTNE2PS2BF16 ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x72; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vcvtne2ps2bf16_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VCVTNEPS2BF16 5-35 PAGE 1859 LINE 103231 define pcodeop vcvtneps2bf16_avx512vl ; :VCVTNEPS2BF16 XmmReg1^XmmOpMask16, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x72; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtneps2bf16_avx512vl( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult[0,64]); } # VCVTNEPS2BF16 5-35 PAGE 1859 LINE 103234 :VCVTNEPS2BF16 XmmReg1^XmmOpMask16, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x72; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtneps2bf16_avx512vl( YmmReg2_m256_m32bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTNEPS2BF16 5-35 PAGE 1859 LINE 103237 define pcodeop vcvtneps2bf16_avx512f ; :VCVTNEPS2BF16 YmmReg1^YmmOpMask16, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask16; byte=0x72; (YmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vcvtneps2bf16_avx512f( ZmmReg2_m512_m32bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VCVTPD2PH 5-37 PAGE 1861 LINE 103327 define pcodeop vcvtpd2ph_avx512fp16 ; :VCVTPD2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtpd2ph_avx512fp16( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult[0,32]); } # VCVTPD2PH 5-37 PAGE 1861 LINE 103331 :VCVTPD2PH XmmReg1^XmmOpMask16, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x5A; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtpd2ph_avx512fp16( YmmReg2_m256_m64bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult[0,64]); } # VCVTPD2PH 5-37 PAGE 1861 LINE 103335 :VCVTPD2PH XmmReg1^XmmOpMask16, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x5A; (XmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtpd2ph_avx512fp16( ZmmReg2_m512_m64bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTPH2DQ 5-45 PAGE 1869 LINE 103774 define pcodeop vcvtph2dq_avx512fp16 ; :VCVTPH2DQ XmmReg1^XmmOpMask32, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask32; byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { XmmResult = vcvtph2dq_avx512fp16( XmmReg2_m64_m16bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTPH2DQ 5-45 PAGE 1869 LINE 103778 :VCVTPH2DQ YmmReg1^YmmOpMask32, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask32; byte=0x5B; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { YmmResult = vcvtph2dq_avx512fp16( XmmReg2_m128_m16bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VCVTPH2DQ 5-45 PAGE 1869 LINE 103782 :VCVTPH2DQ ZmmReg1^ZmmOpMask32, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask32; byte=0x5B; ZmmReg1 ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { ZmmResult = vcvtph2dq_avx512fp16( YmmReg2_m256_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VCVTPH2PD 5-47 PAGE 1871 LINE 103861 define pcodeop vcvtph2pd_avx512fp16 ; :VCVTPH2PD XmmReg1^XmmOpMask64, XmmReg2_m32_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask64; byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32_m16bcst { XmmResult = vcvtph2pd_avx512fp16( XmmReg2_m32_m16bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTPH2PD 5-47 PAGE 1871 LINE 103864 :VCVTPH2PD YmmReg1^YmmOpMask64, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask64; byte=0x5A; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst { YmmResult = vcvtph2pd_avx512fp16( XmmReg2_m64_m16bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTPH2PD 5-47 PAGE 1871 LINE 103867 :VCVTPH2PD ZmmReg1^ZmmOpMask64, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask64; byte=0x5A; ZmmReg1 ... & XmmReg2_m128_m16bcst { ZmmResult = vcvtph2pd_avx512fp16( XmmReg2_m128_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTPH2PS/VCVTPH2PSX 5-49 PAGE 1873 LINE 103953 define pcodeop vcvtph2psx_avx512fp16 ; :VCVTPH2PSX XmmReg1^XmmOpMask32, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask32; byte=0x13; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { XmmResult = vcvtph2psx_avx512fp16( XmmReg2_m64_m16bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTPH2PS/VCVTPH2PSX 5-49 PAGE 1873 LINE 103957 :VCVTPH2PSX YmmReg1^YmmOpMask32, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask32; byte=0x13; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { YmmResult = vcvtph2psx_avx512fp16( XmmReg2_m128_m16bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VCVTPH2PS/VCVTPH2PSX 5-49 PAGE 1873 LINE 103961 :VCVTPH2PSX ZmmReg1^ZmmOpMask32, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask32; byte=0x13; ZmmReg1 ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { ZmmResult = vcvtph2psx_avx512fp16( YmmReg2_m256_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VCVTPH2QQ 5-53 PAGE 1877 LINE 104149 define pcodeop vcvtph2qq_avx512fp16 ; :VCVTPH2QQ XmmReg1^XmmOpMask64, XmmReg2_m32_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask64; byte=0x7B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32_m16bcst { XmmResult = vcvtph2qq_avx512fp16( XmmReg2_m32_m16bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTPH2QQ 5-53 PAGE 1877 LINE 104152 :VCVTPH2QQ YmmReg1^YmmOpMask64, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask64; byte=0x7B; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst { YmmResult = vcvtph2qq_avx512fp16( XmmReg2_m64_m16bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTPH2QQ 5-53 PAGE 1877 LINE 104155 :VCVTPH2QQ ZmmReg1^ZmmOpMask64, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask64; byte=0x7B; ZmmReg1 ... & XmmReg2_m128_m16bcst { ZmmResult = vcvtph2qq_avx512fp16( XmmReg2_m128_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTPH2UDQ 5-55 PAGE 1879 LINE 104237 define pcodeop vcvtph2udq_avx512fp16 ; :VCVTPH2UDQ XmmReg1^XmmOpMask32, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask32; byte=0x79; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { XmmResult = vcvtph2udq_avx512fp16( XmmReg2_m64_m16bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTPH2UDQ 5-55 PAGE 1879 LINE 104241 :VCVTPH2UDQ YmmReg1^YmmOpMask32, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask32; byte=0x79; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { YmmResult = vcvtph2udq_avx512fp16( XmmReg2_m128_m16bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VCVTPH2UDQ 5-55 PAGE 1879 LINE 104245 :VCVTPH2UDQ ZmmReg1^ZmmOpMask32, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask32; byte=0x79; ZmmReg1 ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { ZmmResult = vcvtph2udq_avx512fp16( YmmReg2_m256_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VCVTPH2UQQ 5-57 PAGE 1881 LINE 104324 define pcodeop vcvtph2uqq_avx512fp16 ; :VCVTPH2UQQ XmmReg1^XmmOpMask64, XmmReg2_m32_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask64; byte=0x79; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32_m16bcst { XmmResult = vcvtph2uqq_avx512fp16( XmmReg2_m32_m16bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTPH2UQQ 5-57 PAGE 1881 LINE 104327 :VCVTPH2UQQ YmmReg1^YmmOpMask64, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask64; byte=0x79; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst { YmmResult = vcvtph2uqq_avx512fp16( XmmReg2_m64_m16bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTPH2UQQ 5-57 PAGE 1881 LINE 104331 :VCVTPH2UQQ ZmmReg1^ZmmOpMask64, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask64; byte=0x79; ZmmReg1 ... & XmmReg2_m128_m16bcst { ZmmResult = vcvtph2uqq_avx512fp16( XmmReg2_m128_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTPH2UW 5-59 PAGE 1883 LINE 104412 define pcodeop vcvtph2uw_avx512fp16 ; :VCVTPH2UW XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtph2uw_avx512fp16( XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTPH2UW 5-59 PAGE 1883 LINE 104415 :VCVTPH2UW YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vcvtph2uw_avx512fp16( YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VCVTPH2UW 5-59 PAGE 1883 LINE 104418 :VCVTPH2UW ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x7D; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vcvtph2uw_avx512fp16( ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VCVTPH2W 5-61 PAGE 1885 LINE 104499 define pcodeop vcvtph2w_avx512fp16 ; :VCVTPH2W XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtph2w_avx512fp16( XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTPH2W 5-61 PAGE 1885 LINE 104502 :VCVTPH2W YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vcvtph2w_avx512fp16( YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VCVTPH2W 5-61 PAGE 1885 LINE 104505 :VCVTPH2W ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x7D; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vcvtph2w_avx512fp16( ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VCVTPS2PHX 5-67 PAGE 1891 LINE 104781 define pcodeop vcvtps2phx_avx512fp16 ; :VCVTPS2PHX XmmReg1^XmmOpMask16, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x1D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtps2phx_avx512fp16( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult[0,64]); } # VCVTPS2PHX 5-67 PAGE 1891 LINE 104785 :VCVTPS2PHX XmmReg1^XmmOpMask16, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x1D; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtps2phx_avx512fp16( YmmReg2_m256_m32bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTPS2PHX 5-67 PAGE 1891 LINE 104789 :VCVTPS2PHX YmmReg1^YmmOpMask16, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x1D; (YmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vcvtps2phx_avx512fp16( ZmmReg2_m512_m32bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VCVTQQ2PH 5-78 PAGE 1902 LINE 105354 define pcodeop vcvtqq2ph_avx512fp16 ; :VCVTQQ2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtqq2ph_avx512fp16( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult[0,32]); } # VCVTQQ2PH 5-78 PAGE 1902 LINE 105358 :VCVTQQ2PH XmmReg1^XmmOpMask16, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x5B; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtqq2ph_avx512fp16( YmmReg2_m256_m64bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult[0,64]); } # VCVTQQ2PH 5-78 PAGE 1902 LINE 105362 :VCVTQQ2PH XmmReg1^XmmOpMask16, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x5B; (XmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtqq2ph_avx512fp16( ZmmReg2_m512_m64bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTSD2SH 5-82 PAGE 1906 LINE 105553 define pcodeop vcvtsd2sh_avx512fp16 ; :VCVTSD2SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m64 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask & evexV5_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { XmmResult = vcvtsd2sh_avx512fp16( evexV5_XmmReg, XmmReg2_m64 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VCVTSH2SD 5-85 PAGE 1909 LINE 105683 define pcodeop vcvtsh2sd_avx512fp16 ; :VCVTSH2SD XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x5A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vcvtsh2sd_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,64], XmmOpMask[0,1], XmmResult[0,64], XmmMask[0,64]); XmmResult[64,64] = XmmReg1[16,64]; # DEST[127:64] remains unchanged ZmmReg1 = zext(XmmResult); } # VCVTSH2SI 5-86 PAGE 1910 LINE 105738 define pcodeop vcvtsh2si_avx512fp16 ; :VCVTSH2SI Reg32, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0); byte=0x2D; Reg32 ... & XmmReg2_m16 { Reg32 = vcvtsh2si_avx512fp16( XmmReg2_m16 ); } # VCVTSH2SI 5-86 PAGE 1910 LINE 105740 @ifdef IA64 :VCVTSH2SI Reg64, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W1); byte=0x2D; Reg64 ... & XmmReg2_m16 { Reg64 = vcvtsh2si_avx512fp16( XmmReg2_m16 ); } @endif # VCVTSH2SS 5-87 PAGE 1911 LINE 105796 define pcodeop vcvtsh2ss_avx512fp16 ; :VCVTSH2SS XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x13; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vcvtsh2ss_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,32], XmmOpMask[0,1], XmmResult[0,32], XmmMask[0,32]); XmmResult[32,96] = XmmReg1[32,96]; # DEST[127:32] remains unchanged ZmmReg1 = zext(XmmResult); } # VCVTSH2USI 5-88 PAGE 1912 LINE 105851 define pcodeop vcvtsh2usi_avx512fp16 ; :VCVTSH2USI Reg32, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0); byte=0x79; Reg32 ... & XmmReg2_m16 { Reg32 = vcvtsh2usi_avx512fp16( XmmReg2_m16 ); } # VCVTSH2USI 5-88 PAGE 1912 LINE 105853 @ifdef IA64 :VCVTSH2USI Reg64, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W1); byte=0x79; Reg64 ... & XmmReg2_m16 { Reg64 = vcvtsh2usi_avx512fp16( XmmReg2_m16 ); } @endif # VCVTSI2SH 5-89 PAGE 1913 LINE 105910 define pcodeop vcvtsi2sh_avx512fp16 ; :VCVTSI2SH XmmReg1, evexV5_XmmReg, rm32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & evexV5_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm32 { local tmp:16 = vcvtsi2sh_avx512fp16( evexV5_XmmReg, rm32 ); ZmmReg1 = zext(tmp); } # VCVTSI2SH 5-89 PAGE 1913 LINE 105914 @ifdef IA64 :VCVTSI2SH XmmReg1, evexV5_XmmReg, rm64 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W1) & evexV5_XmmReg; byte=0x2A; (XmmReg1 & ZmmReg1) ... & rm64 { local tmp:16 = vcvtsi2sh_avx512fp16( evexV5_XmmReg, rm64 ); ZmmReg1 = zext(tmp); } @endif # VCVTSS2SH 5-91 PAGE 1915 LINE 105984 define pcodeop vcvtss2sh_avx512fp16 ; :VCVTSS2SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x1D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { XmmResult = vcvtss2sh_avx512fp16( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VCVTTPH2DQ 5-100 PAGE 1924 LINE 106453 define pcodeop vcvttph2dq_avx512fp16 ; :VCVTTPH2DQ XmmReg1^XmmOpMask32, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask32; byte=0x5B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { XmmResult = vcvttph2dq_avx512fp16( XmmReg2_m64_m16bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTTPH2DQ 5-100 PAGE 1924 LINE 106457 :VCVTTPH2DQ YmmReg1^YmmOpMask32, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask32; byte=0x5B; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { YmmResult = vcvttph2dq_avx512fp16( XmmReg2_m128_m16bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VCVTTPH2DQ 5-100 PAGE 1924 LINE 106461 :VCVTTPH2DQ ZmmReg1^ZmmOpMask32, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask32; byte=0x5B; ZmmReg1 ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { ZmmResult = vcvttph2dq_avx512fp16( YmmReg2_m256_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VCVTTPH2QQ 5-102 PAGE 1926 LINE 106537 define pcodeop vcvttph2qq_avx512fp16 ; :VCVTTPH2QQ XmmReg1^XmmOpMask64, XmmReg2_m32_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask64; byte=0x7A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32_m16bcst { XmmResult = vcvttph2qq_avx512fp16( XmmReg2_m32_m16bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTTPH2QQ 5-102 PAGE 1926 LINE 106541 :VCVTTPH2QQ YmmReg1^YmmOpMask64, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask64; byte=0x7A; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst { YmmResult = vcvttph2qq_avx512fp16( XmmReg2_m64_m16bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTTPH2QQ 5-102 PAGE 1926 LINE 106545 :VCVTTPH2QQ ZmmReg1^ZmmOpMask64, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask64; byte=0x7A; ZmmReg1 ... & XmmReg2_m128_m16bcst { ZmmResult = vcvttph2qq_avx512fp16( XmmReg2_m128_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTTPH2UDQ 5-104 PAGE 1928 LINE 106622 define pcodeop vcvttph2udq_avx512fp16 ; :VCVTTPH2UDQ XmmReg1^XmmOpMask32, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask32; byte=0x78; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { XmmResult = vcvttph2udq_avx512fp16( XmmReg2_m64_m16bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VCVTTPH2UDQ 5-104 PAGE 1928 LINE 106626 :VCVTTPH2UDQ YmmReg1^YmmOpMask32, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask32; byte=0x78; (YmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { YmmResult = vcvttph2udq_avx512fp16( XmmReg2_m128_m16bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VCVTTPH2UDQ 5-104 PAGE 1928 LINE 106630 :VCVTTPH2UDQ ZmmReg1^ZmmOpMask32, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask32; byte=0x78; ZmmReg1 ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 1; ] # (TupleType Half) { ZmmResult = vcvttph2udq_avx512fp16( YmmReg2_m256_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VCVTTPH2UQQ 5-106 PAGE 1930 LINE 106706 define pcodeop vcvttph2uqq_avx512fp16 ; :VCVTTPH2UQQ XmmReg1^XmmOpMask64, XmmReg2_m32_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask64; byte=0x78; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32_m16bcst { XmmResult = vcvttph2uqq_avx512fp16( XmmReg2_m32_m16bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VCVTTPH2UQQ 5-106 PAGE 1930 LINE 106710 :VCVTTPH2UQQ YmmReg1^YmmOpMask64, XmmReg2_m64_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask64; byte=0x78; (YmmReg1 & ZmmReg1) ... & XmmReg2_m64_m16bcst { YmmResult = vcvttph2uqq_avx512fp16( XmmReg2_m64_m16bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VCVTTPH2UQQ 5-106 PAGE 1930 LINE 106714 :VCVTTPH2UQQ ZmmReg1^ZmmOpMask64, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask64; byte=0x78; ZmmReg1 ... & XmmReg2_m128_m16bcst { ZmmResult = vcvttph2uqq_avx512fp16( XmmReg2_m128_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VCVTTPH2UW 5-108 PAGE 1932 LINE 106790 define pcodeop vcvttph2uw_avx512fp16 ; :VCVTTPH2UW XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvttph2uw_avx512fp16( XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTTPH2UW 5-108 PAGE 1932 LINE 106794 :VCVTTPH2UW YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vcvttph2uw_avx512fp16( YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VCVTTPH2UW 5-108 PAGE 1932 LINE 106798 :VCVTTPH2UW ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x7C; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vcvttph2uw_avx512fp16( ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VCVTTPH2W 5-110 PAGE 1934 LINE 106874 define pcodeop vcvttph2w_avx512fp16 ; :VCVTTPH2W XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvttph2w_avx512fp16( XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTTPH2W 5-110 PAGE 1934 LINE 106878 :VCVTTPH2W YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vcvttph2w_avx512fp16( YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VCVTTPH2W 5-110 PAGE 1934 LINE 106882 :VCVTTPH2W ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x7C; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vcvttph2w_avx512fp16( ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VCVTTSH2SI 5-119 PAGE 1943 LINE 107355 define pcodeop vcvttsh2si_avx512fp16 ; :VCVTTSH2SI Reg32, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0); byte=0x2C; Reg32 ... & XmmReg2_m16 { Reg32 = vcvttsh2si_avx512fp16( XmmReg2_m16 ); } # VCVTTSH2SI 5-119 PAGE 1943 LINE 107358 @ifdef IA64 :VCVTTSH2SI Reg64, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W1); byte=0x2C; Reg64 ... & XmmReg2_m16 { Reg64 = vcvttsh2si_avx512fp16( XmmReg2_m16 ); } @endif # VCVTTSH2USI 5-120 PAGE 1944 LINE 107409 define pcodeop vcvttsh2usi_avx512fp16 ; :VCVTTSH2USI Reg32, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0); byte=0x78; Reg32 ... & XmmReg2_m16 { Reg32 = vcvttsh2usi_avx512fp16( XmmReg2_m16 ); } # VCVTTSH2USI 5-120 PAGE 1944 LINE 107412 @ifdef IA64 :VCVTTSH2USI Reg64, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W1); byte=0x78; Reg64 ... & XmmReg2_m16 { Reg64 = vcvttsh2usi_avx512fp16( XmmReg2_m16 ); } @endif # VCVTUDQ2PH 5-124 PAGE 1948 LINE 107633 define pcodeop vcvtudq2ph_avx512fp16 ; :VCVTUDQ2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtudq2ph_avx512fp16( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult[0,64]); } # VCVTUDQ2PH 5-124 PAGE 1948 LINE 107637 :VCVTUDQ2PH XmmReg1^XmmOpMask16, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7A; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtudq2ph_avx512fp16( YmmReg2_m256_m32bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTUDQ2PH 5-124 PAGE 1948 LINE 107641 :VCVTUDQ2PH YmmReg1^YmmOpMask16, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7A; (YmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vcvtudq2ph_avx512fp16( ZmmReg2_m512_m32bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VCVTUQQ2PH 5-130 PAGE 1954 LINE 107951 define pcodeop vcvtuqq2ph_avx512fp16 ; :VCVTUQQ2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x7A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtuqq2ph_avx512fp16( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult[0,32]); } # VCVTUQQ2PH 5-130 PAGE 1954 LINE 107955 :VCVTUQQ2PH XmmReg1^XmmOpMask16, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x7A; (XmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtuqq2ph_avx512fp16( YmmReg2_m256_m64bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult[0,64]); } # VCVTUQQ2PH 5-130 PAGE 1954 LINE 107959 :VCVTUQQ2PH XmmReg1^XmmOpMask16, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W1) & XmmOpMask16; byte=0x7A; (XmmReg1 & ZmmReg1) ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtuqq2ph_avx512fp16( ZmmReg2_m512_m64bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTUSI2SH 5-132 PAGE 1956 LINE 108039 define pcodeop vcvtusi2sh_avx512fp16 ; :VCVTUSI2SH XmmReg1, evexV5_XmmReg, rm32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & evexV5_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm32 { local tmp:16 = vcvtusi2sh_avx512fp16( evexV5_XmmReg, rm32 ); ZmmReg1 = zext(tmp); } # VCVTUSI2SH 5-132 PAGE 1956 LINE 108043 @ifdef IA64 :VCVTUSI2SH XmmReg1, evexV5_XmmReg, rm64 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W1) & evexV5_XmmReg; byte=0x7B; (XmmReg1 & ZmmReg1) ... & rm64 { local tmp:16 = vcvtusi2sh_avx512fp16( evexV5_XmmReg, rm64 ); ZmmReg1 = zext(tmp); } @endif # VCVTUW2PH 5-140 PAGE 1964 LINE 108377 define pcodeop vcvtuw2ph_avx512fp16 ; :VCVTUW2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtuw2ph_avx512fp16( XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTUW2PH 5-140 PAGE 1964 LINE 108380 :VCVTUW2PH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vcvtuw2ph_avx512fp16( YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VCVTUW2PH 5-140 PAGE 1964 LINE 108383 :VCVTUW2PH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x7D; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vcvtuw2ph_avx512fp16( ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VCVTW2PH 5-142 PAGE 1966 LINE 108464 define pcodeop vcvtw2ph_avx512fp16 ; :VCVTW2PH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vcvtw2ph_avx512fp16( XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VCVTW2PH 5-142 PAGE 1966 LINE 108467 :VCVTW2PH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vcvtw2ph_avx512fp16( YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VCVTW2PH 5-142 PAGE 1966 LINE 108470 :VCVTW2PH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x7D; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vcvtw2ph_avx512fp16( ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VDIVPH 5-147 PAGE 1971 LINE 108747 define pcodeop vdivph_avx512fp16 ; :VDIVPH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vdivph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VDIVPH 5-147 PAGE 1971 LINE 108750 :VDIVPH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x5E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vdivph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VDIVPH 5-147 PAGE 1971 LINE 108753 :VDIVPH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x5E; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vdivph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VDIVSH 5-149 PAGE 1973 LINE 108842 define pcodeop vdivsh_avx512fp16 ; :VDIVSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x5E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vdivsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = evexV5_XmmReg[16,112]; #DEST[127:16] := SRC1[127:16] ZmmReg1 = zext(XmmResult); } # VDPBF16PS 5-150 PAGE 1974 LINE 108901 define pcodeop vdpbf16ps_avx512vl ; :VDPBF16PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x52; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vdpbf16ps_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VDPBF16PS 5-150 PAGE 1974 LINE 108905 :VDPBF16PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x52; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vdpbf16ps_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VDPBF16PS 5-150 PAGE 1974 LINE 108909 define pcodeop vdpbf16ps_avx512f ; :VDPBF16PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x52; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vdpbf16ps_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFCMADDCPH/VFMADDCPH 5-170 PAGE 1994 LINE 109930 define pcodeop vfcmaddcph_avx512fp16 ; :VFCMADDCPH XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x56; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfcmaddcph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFCMADDCPH/VFMADDCPH 5-170 PAGE 1994 LINE 109934 :VFCMADDCPH YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x56; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfcmaddcph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFCMADDCPH/VFMADDCPH 5-170 PAGE 1994 LINE 109938 :VFCMADDCPH ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x56; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfcmaddcph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFCMADDCPH/VFMADDCPH 5-170 PAGE 1994 LINE 109942 define pcodeop vfmaddcph_avx512fp16 ; :VFMADDCPH XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x56; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmaddcph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFCMADDCPH/VFMADDCPH 5-170 PAGE 1994 LINE 109946 :VFMADDCPH YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x56; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmaddcph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFCMADDCPH/VFMADDCPH 5-170 PAGE 1994 LINE 109950 :VFMADDCPH ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x56; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmaddcph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFCMADDCSH/VFMADDCSH 5-173 PAGE 1997 LINE 110095 define pcodeop vfcmaddcsh_avx512fp16 ; :VFCMADDCSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { XmmResult = vfcmaddcsh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,32], XmmOpMask[0,1], XmmResult[0,32], XmmMask[0,32]); XmmResult[32,96] = evexV5_XmmReg[32,96]; # DEST[127:32] := src1[127:32] // copy upper part of src1 ZmmReg1 = zext(XmmResult); } # VFCMADDCSH/VFMADDCSH 5-173 PAGE 1997 LINE 110100 define pcodeop vfmaddcsh_avx512fp16 ; :VFMADDCSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { XmmResult = vfmaddcsh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,32], XmmOpMask[0,1], XmmResult[0,32], XmmMask[0,32]); XmmResult[32,96] = evexV5_XmmReg[32,96]; # DEST[127:32] := src1[127:32] // copy upper part of src1 ZmmReg1 = zext(XmmResult); } # VFCMULCPH/VFMULCPH 5-175 PAGE 1999 LINE 110198 define pcodeop vfcmulcph_avx512fp16 ; :VFCMULCPH XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0xD6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfcmulcph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFCMULCPH/VFMULCPH 5-175 PAGE 1999 LINE 110202 :VFCMULCPH YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0xD6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfcmulcph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFCMULCPH/VFMULCPH 5-175 PAGE 1999 LINE 110206 :VFCMULCPH ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0xD6; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfcmulcph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFCMULCPH/VFMULCPH 5-175 PAGE 1999 LINE 110210 define pcodeop vfmulcph_avx512fp16 ; :VFMULCPH XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0xD6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmulcph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VFCMULCPH/VFMULCPH 5-175 PAGE 1999 LINE 110213 :VFMULCPH YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0xD6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmulcph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VFCMULCPH/VFMULCPH 5-175 PAGE 1999 LINE 110216 :VFMULCPH ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0xD6; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmulcph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VFCMULCSH/VFMULCSH 5-178 PAGE 2002 LINE 110374 define pcodeop vfcmulcsh_avx512fp16 ; :VFCMULCSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F2) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xD7; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { XmmResult = vfcmulcsh_avx512fp16( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,32], XmmOpMask[0,1], XmmResult[0,32], XmmMask[0,32]); XmmResult[32,96] = evexV5_XmmReg[32,96]; # DEST[127:32] := src1[127:32] // copy upper part of src1 ZmmReg1 = zext(XmmResult); } # VFCMULCSH/VFMULCSH 5-178 PAGE 2002 LINE 110379 define pcodeop vfmulcsh_avx512fp16 ; :VFMULCSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m32 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xD7; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { XmmResult = vfmulcsh_avx512fp16( evexV5_XmmReg, XmmReg2_m32 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,32], XmmOpMask[0,1], XmmResult[0,32], XmmMask[0,32]); XmmResult[32,96] = evexV5_XmmReg[32,96]; # DEST[127:32] := src1[127:32] // copy upper part of src1 ZmmReg1 = zext(XmmResult); } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111596 define pcodeop vfmadd132ph_avx512fp16 ; :VFMADD132PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x98; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmadd132ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111599 :VFMADD132PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x98; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmadd132ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111602 :VFMADD132PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x98; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmadd132ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111605 define pcodeop vfmadd213ph_avx512fp16 ; :VFMADD213PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xA8; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmadd213ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111608 :VFMADD213PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xA8; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmadd213ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111611 :VFMADD213PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xA8; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmadd213ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111614 define pcodeop vfmadd231ph_avx512fp16 ; :VFMADD231PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xB8; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmadd231ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111617 :VFMADD231PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xB8; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmadd231ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111620 :VFMADD231PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xB8; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmadd231ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111623 define pcodeop vfnmadd132ph_avx512fp16 ; :VFNMADD132PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x9C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfnmadd132ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111627 :VFNMADD132PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x9C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfnmadd132ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111631 :VFNMADD132PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x9C; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfnmadd132ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111635 define pcodeop vfnmadd213ph_avx512fp16 ; :VFNMADD213PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xAC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfnmadd213ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VF[,N]MADD[132,213,231]PH 5-201 PAGE 2025 LINE 111639 :VFNMADD213PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xAC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfnmadd213ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VF[,N]MADD[132,213,231]PH 5-202 PAGE 2026 LINE 111655 :VFNMADD213PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xAC; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfnmadd213ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MADD[132,213,231]PH 5-202 PAGE 2026 LINE 111659 define pcodeop vfnmadd231ph_avx512fp16 ; :VFNMADD231PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xBC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfnmadd231ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VF[,N]MADD[132,213,231]PH 5-202 PAGE 2026 LINE 111663 :VFNMADD231PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xBC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfnmadd231ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VF[,N]MADD[132,213,231]PH 5-202 PAGE 2026 LINE 111667 :VFNMADD231PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xBC; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfnmadd231ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MADD[132,213,231]SH 5-219 PAGE 2043 LINE 112545 define pcodeop vfmadd132sh_avx512fp16 ; :VFMADD132SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x99; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vfmadd132sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VF[,N]MADD[132,213,231]SH 5-219 PAGE 2043 LINE 112548 define pcodeop vfmadd213sh_avx512fp16 ; :VFMADD213SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xA9; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vfmadd213sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VF[,N]MADD[132,213,231]SH 5-219 PAGE 2043 LINE 112551 define pcodeop vfmadd231sh_avx512fp16 ; :VFMADD231SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xB9; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vfmadd231sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VF[,N]MADD[132,213,231]SH 5-219 PAGE 2043 LINE 112554 define pcodeop vfnmadd132sh_avx512fp16 ; :VFNMADD132SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x9D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vfnmadd132sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VF[,N]MADD[132,213,231]SH 5-219 PAGE 2043 LINE 112557 define pcodeop vfnmadd213sh_avx512fp16 ; :VFNMADD213SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xAD; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vfnmadd213sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VF[,N]MADD[132,213,231]SH 5-219 PAGE 2043 LINE 112560 define pcodeop vfnmadd231sh_avx512fp16 ; :VFNMADD231SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xBD; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vfnmadd231sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113276 define pcodeop vfmaddsub132ph_avx512fp16 ; :VFMADDSUB132PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x96; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmaddsub132ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113280 :VFMADDSUB132PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x96; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmaddsub132ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113284 :VFMADDSUB132PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x96; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmaddsub132ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113288 define pcodeop vfmaddsub213ph_avx512fp16 ; :VFMADDSUB213PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xA6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmaddsub213ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113292 :VFMADDSUB213PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xA6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmaddsub213ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113296 :VFMADDSUB213PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xA6; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmaddsub213ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113300 define pcodeop vfmaddsub231ph_avx512fp16 ; :VFMADDSUB231PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xB6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmaddsub231ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113304 :VFMADDSUB231PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xB6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmaddsub231ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VFMADDSUB132PH/VFMADDSUB213PH/VFMADDSUB231PH 5-232 PAGE 2056 LINE 113308 :VFMADDSUB231PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xB6; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmaddsub231ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114283 define pcodeop vfmsub132ph_avx512fp16 ; :VFMSUB132PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x9A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmsub132ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114286 :VFMSUB132PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x9A; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmsub132ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114289 :VFMSUB132PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x9A; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmsub132ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114292 define pcodeop vfmsub213ph_avx512fp16 ; :VFMSUB213PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xAA; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmsub213ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114295 :VFMSUB213PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xAA; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmsub213ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114298 :VFMSUB213PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xAA; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmsub213ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114301 define pcodeop vfmsub231ph_avx512fp16 ; :VFMSUB231PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xBA; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmsub231ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114304 :VFMSUB231PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xBA; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmsub231ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114307 :VFMSUB231PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xBA; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmsub231ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114310 define pcodeop vfnmsub132ph_avx512fp16 ; :VFNMSUB132PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x9E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfnmsub132ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114314 :VFNMSUB132PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x9E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfnmsub132ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114318 :VFNMSUB132PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x9E; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfnmsub132ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114322 define pcodeop vfnmsub213ph_avx512fp16 ; :VFNMSUB213PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xAE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfnmsub213ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VF[,N]MSUB[132,213,231]PH 5-250 PAGE 2074 LINE 114326 :VFNMSUB213PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xAE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfnmsub213ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VF[,N]MSUB[132,213,231]PH 5-251 PAGE 2075 LINE 114342 :VFNMSUB213PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xAE; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfnmsub213ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MSUB[132,213,231]PH 5-251 PAGE 2075 LINE 114346 define pcodeop vfnmsub231ph_avx512fp16 ; :VFNMSUB231PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xBE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfnmsub231ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VF[,N]MSUB[132,213,231]PH 5-251 PAGE 2075 LINE 114350 :VFNMSUB231PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xBE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfnmsub231ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VF[,N]MSUB[132,213,231]PH 5-251 PAGE 2075 LINE 114354 :VFNMSUB231PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xBE; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfnmsub231ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VF[,N]MSUB[132,213,231]SH 5-265 PAGE 2089 LINE 115111 define pcodeop vfmsub132sh_avx512fp16 ; :VFMSUB132SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x9B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vfmsub132sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VF[,N]MSUB[132,213,231]SH 5-265 PAGE 2089 LINE 115114 define pcodeop vfmsub213sh_avx512fp16 ; :VFMSUB213SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xAB; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vfmsub213sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VF[,N]MSUB[132,213,231]SH 5-265 PAGE 2089 LINE 115117 define pcodeop vfmsub231sh_avx512fp16 ; :VFMSUB231SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xBB; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vfmsub231sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VF[,N]MSUB[132,213,231]SH 5-265 PAGE 2089 LINE 115120 define pcodeop vfnmsub132sh_avx512fp16 ; :VFNMSUB132SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x9F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vfnmsub132sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VF[,N]MSUB[132,213,231]SH 5-265 PAGE 2089 LINE 115124 define pcodeop vfnmsub213sh_avx512fp16 ; :VFNMSUB213SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xAF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vfnmsub213sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VF[,N]MSUB[132,213,231]SH 5-265 PAGE 2089 LINE 115128 define pcodeop vfnmsub231sh_avx512fp16 ; :VFNMSUB231SH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0xBF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vfnmsub231sh_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115851 define pcodeop vfmsubadd132ph_avx512fp16 ; :VFMSUBADD132PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x97; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmsubadd132ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115855 :VFMSUBADD132PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x97; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmsubadd132ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115859 :VFMSUBADD132PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x97; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmsubadd132ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115863 define pcodeop vfmsubadd213ph_avx512fp16 ; :VFMSUBADD213PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xA7; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmsubadd213ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115867 :VFMSUBADD213PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xA7; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmsubadd213ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115871 :VFMSUBADD213PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xA7; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmsubadd213ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115875 define pcodeop vfmsubadd231ph_avx512fp16 ; :VFMSUBADD231PH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0xB7; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vfmsubadd231ph_avx512fp16( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115879 :VFMSUBADD231PH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0xB7; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vfmsubadd231ph_avx512fp16( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VFMSUBADD132PH/VFMSUBADD213PH/VFMSUBADD231PH 5-278 PAGE 2102 LINE 115883 :VFMSUBADD231PH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0xB7; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vfmsubadd231ph_avx512fp16( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } ClassNeg: "Negative" is imm8_6=1 & imm8_7=0 {} ClassNeg: "Negative,SNAN" is imm8_6=1 & imm8_7=1 {} ClassNeg: "SNAN" is imm8_6=0 & imm8_7=1 {} ClassDenorm: "Denormal" is imm8_5=1 & imm8_6_7=0 {} ClassDenorm: "Denormal,"^ClassNeg is imm8_5=1 & ClassNeg {} ClassDenorm: ClassNeg is imm8_5=0 & ClassNeg {} ClassNegI: "NegINF" is imm8_4=1 & imm8_5_7=0 {} ClassNegI: "NegINF,"^ClassDenorm is imm8_4=1 & ClassDenorm {} ClassNegI: ClassDenorm is imm8_4=0 & ClassDenorm {} ClassPosI: "PosINF" is imm8_3=1 & imm8_4_7=0 {} ClassPosI: "PosINF,"^ClassNegI is imm8_3=1 & ClassNegI {} ClassPosI: ClassNegI is imm8_3=0 & ClassNegI {} ClassNegZ: "NegZero" is imm8_2=1 & imm8_3_7=0 {} ClassNegZ: "NegZero,"^ClassPosI is imm8_2=1 & ClassPosI {} ClassNegZ: ClassPosI is imm8_2=0 & ClassPosI {} ClassPosZ: "PosZero" is imm8_1=1 & imm8_2_7=0 {} ClassPosZ: "PosZero,"^ClassNegZ is imm8_1=1 & ClassNegZ {} ClassPosZ: ClassNegZ is imm8_1=0 & ClassNegZ {} ClassQNaN: "QNAN" is imm8_0=1 & imm8_1_7=0 {} ClassQNaN: "QNAN,"^ClassPosZ is imm8_0=1 & ClassPosZ {} ClassQNaN: ClassPosZ is imm8_0=0 & ClassPosZ {} ClassQNaN: "" is imm8=0 {} ClassOp: "{"^ClassQNaN^"}" is ClassQNaN & imm8 { export *[const]:1 imm8;} # VFPCLASSPH 5-332 PAGE 2156 LINE 118786 define pcodeop vfpclassph_avx512fp16 ; :VFPCLASSPH KReg_reg^XmmOpMask, XmmReg2_m128_m16bcst, ClassOp is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask; byte=0x66; KReg_reg ... & XmmReg2_m128_m16bcst; ClassOp [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { local tmp:8 = vfpclassph_avx512fp16( XmmReg2_m128_m16bcst, XmmOpMask, ClassOp ); KReg_reg = tmp; } # VFPCLASSPH 5-332 PAGE 2156 LINE 118792 :VFPCLASSPH KReg_reg^XmmOpMask, YmmReg2_m256_m16bcst, ClassOp is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask; byte=0x66; KReg_reg ... & YmmReg2_m256_m16bcst; ClassOp [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { local tmp:8 = vfpclassph_avx512fp16( YmmReg2_m256_m16bcst, XmmOpMask, ClassOp ); KReg_reg = tmp; } # VFPCLASSPH 5-332 PAGE 2156 LINE 118798 :VFPCLASSPH KReg_reg^XmmOpMask, ZmmReg2_m512_m16bcst, ClassOp is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask; byte=0x66; KReg_reg ... & ZmmReg2_m512_m16bcst; ClassOp [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { local tmp:8 = vfpclassph_avx512fp16( ZmmReg2_m512_m16bcst, XmmOpMask, ClassOp ); KReg_reg = tmp; } # VFPCLASSSH 5-339 PAGE 2163 LINE 119114 define pcodeop vfpclasssh_avx512fp16 ; :VFPCLASSSH KReg_reg^XmmOpMask, XmmReg2_m16, ClassOp is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask; byte=0x67; KReg_reg ... & XmmReg2_m16; ClassOp { local tmp:8 = vfpclasssh_avx512fp16( XmmReg2_m16, XmmOpMask, ClassOp ); KReg_reg = tmp; } # VGETEXPPH 5-359 PAGE 2183 LINE 120137 define pcodeop vgetexpph_avx512fp16 ; :VGETEXPPH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16; byte=0x42; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vgetexpph_avx512fp16( XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VGETEXPPH 5-359 PAGE 2183 LINE 120141 :VGETEXPPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16; byte=0x42; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vgetexpph_avx512fp16( YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VGETEXPPH 5-359 PAGE 2183 LINE 120145 :VGETEXPPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16; byte=0x42; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vgetexpph_avx512fp16( ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VGETEXPSH 5-368 PAGE 2192 LINE 120571 define pcodeop vgetexpsh_avx512fp16 ; :VGETEXPSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x43; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vgetexpsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VGETMANTPH 5-376 PAGE 2200 LINE 120939 define pcodeop vgetmantph_avx512fp16 ; :VGETMANTPH XmmReg1^XmmOpMask16,XmmReg2_m128_m16bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask16; byte=0x26; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vgetmantph_avx512fp16( XmmReg2_m128_m16bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VGETMANTPH 5-376 PAGE 2200 LINE 120943 :VGETMANTPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask16; byte=0x26; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vgetmantph_avx512fp16( YmmReg2_m256_m16bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VGETMANTPH 5-376 PAGE 2200 LINE 120947 :VGETMANTPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & ZmmOpMask16; byte=0x26; ZmmReg1 ... & ZmmReg2_m512_m16bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vgetmantph_avx512fp16( ZmmReg2_m512_m16bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VGETMANTSH 5-385 PAGE 2209 LINE 121406 define pcodeop vgetmantsh_avx512fp16 ; :VGETMANTSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16, imm8 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x27; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16; imm8 { XmmResult = vgetmantsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VMAXPH 5-400 PAGE 2224 LINE 122191 define pcodeop vmaxph_avx512fp16 ; :VMAXPH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vmaxph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VMAXPH 5-400 PAGE 2224 LINE 122194 :VMAXPH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x5F; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vmaxph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VMAXPH 5-400 PAGE 2224 LINE 122197 :VMAXPH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x5F; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vmaxph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VMAXSH 5-402 PAGE 2226 LINE 122291 define pcodeop vmaxsh_avx512fp16 ; :VMAXSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x5F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vmaxsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VMINPH 5-404 PAGE 2228 LINE 122372 define pcodeop vminph_avx512fp16 ; :VMINPH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vminph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VMINPH 5-404 PAGE 2228 LINE 122375 :VMINPH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x5D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vminph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VMINPH 5-404 PAGE 2228 LINE 122378 :VMINPH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x5D; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vminph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VMINSH 5-406 PAGE 2230 LINE 122472 define pcodeop vminsh_avx512fp16 ; :VMINSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x5D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vminsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VMOVSH 5-408 PAGE 2232 LINE 122557 define pcodeop vmovsh_avx512fp16 ; :VMOVSH XmmReg1^XmmOpMask, m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask; byte=0x10; (XmmReg1 & ZmmReg1) ... & m16 { local tmp = m16; XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], tmp, XmmMask[0,16]); XmmResult[16,112] = 0; ZmmReg1 = zext(XmmResult); } # VMOVSH 5-408 PAGE 2232 LINE 122559 :VMOVSH m16^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask; byte=0x11; XmmReg1 ... & m16 { local tmp:2 = XmmReg1(0); local mask = m16; build XmmOpMask; conditionalAssign(tmp, XmmOpMask[0,1], tmp, mask); m16 = tmp; } # VMOVSH 5-408 PAGE 2232 LINE 122561 # WARNING: duplicate opcode EVEX.LLIG.F3.MAP5.W0 10 /r last seen on 5-408 PAGE 2232 LINE 122557 for "VMOVSH xmm1{k1}{z}, xmm2, xmm3" :VMOVSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x10; (XmmReg1 & ZmmReg1) & (mod=0x3 & XmmReg2) { local tmp = XmmReg2(0); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], tmp, XmmMask[0,16]); XmmResult[16,112] = evexV5_XmmReg[16,112]; # DEST[127:16] := SRC1[127:16] ZmmReg1 = zext(XmmResult); } # VMOVSH 5-408 PAGE 2232 LINE 122564 # WARNING: duplicate opcode EVEX.LLIG.F3.MAP5.W0 11 /r last seen on 5-408 PAGE 2232 LINE 122559 for "VMOVSH xmm1{k1}{z}, xmm2, xmm3" :VMOVSH XmmReg2^XmmOpMask, evexV5_XmmReg, XmmReg1 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x11; XmmReg1 & (mod=0x3 & (XmmReg2 & ZmmReg2)) { XmmResult = XmmReg1; XmmMask = XmmReg2; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = evexV5_XmmReg[16,112]; # DEST[127:16] := SRC1[127:16] ZmmReg2 = zext(XmmResult); } # VMOVW 5-410 PAGE 2234 LINE 122642 define pcodeop vmovw_avx512fp16 ; :VMOVW XmmReg1, rm16 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_WIG); byte=0x6E; (XmmReg1 & ZmmReg1) ... & rm16 { local tmp:2 = rm16 ; ZmmReg1 = zext(tmp); } # VMOVW 5-410 PAGE 2234 LINE 122644 :VMOVW rm16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP5) & $(VEX_WIG); byte=0x7E; XmmReg1 ... & rm16 { rm16 = XmmReg1(0); } # VMULPH 5-411 PAGE 2235 LINE 122691 define pcodeop vmulph_avx512fp16 ; :VMULPH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vmulph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VMULPH 5-411 PAGE 2235 LINE 122694 :VMULPH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x59; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vmulph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VMULPH 5-411 PAGE 2235 LINE 122697 :VMULPH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x59; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vmulph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VMULSH 5-413 PAGE 2237 LINE 122785 define pcodeop vmulsh_avx512fp16 ; :VMULSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x59; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vmulsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = evexV5_XmmReg[16,112]; # DEST[127:16] := SRC1[127:16] ZmmReg1 = zext(XmmResult); } # VP2INTERSECTD/VP2INTERSECTQ 5-414 PAGE 2238 LINE 122845 define pcodeop vp2intersectd_avx512vl ; :VP2INTERSECTD KReg_reg, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & evexV5_XmmReg; byte=0x68; KReg_reg ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { KReg_reg = vp2intersectd_avx512vl( evexV5_XmmReg, XmmReg2_m128_m32bcst ); } # VP2INTERSECTD/VP2INTERSECTQ 5-414 PAGE 2238 LINE 122849 :VP2INTERSECTD KReg_reg, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & evexV5_YmmReg; byte=0x68; KReg_reg ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { KReg_reg = vp2intersectd_avx512vl( evexV5_YmmReg, YmmReg2_m256_m32bcst ); } # VP2INTERSECTD/VP2INTERSECTQ 5-414 PAGE 2238 LINE 122853 define pcodeop vp2intersectd_avx512f ; :VP2INTERSECTD KReg_reg, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & evexV5_ZmmReg; byte=0x68; KReg_reg ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { KReg_reg = vp2intersectd_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); } # VP2INTERSECTD/VP2INTERSECTQ 5-414 PAGE 2238 LINE 122857 define pcodeop vp2intersectq_avx512vl ; :VP2INTERSECTQ KReg_reg, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W1) & evexV5_XmmReg; byte=0x68; KReg_reg ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { KReg_reg = vp2intersectq_avx512vl( evexV5_XmmReg, XmmReg2_m128_m64bcst ); } # VP2INTERSECTD/VP2INTERSECTQ 5-414 PAGE 2238 LINE 122861 :VP2INTERSECTQ KReg_reg, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W1) & evexV5_YmmReg; byte=0x68; KReg_reg ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { KReg_reg = vp2intersectq_avx512vl( evexV5_YmmReg, YmmReg2_m256_m64bcst ); } # VP2INTERSECTD/VP2INTERSECTQ 5-414 PAGE 2238 LINE 122865 define pcodeop vp2intersectq_avx512f ; :VP2INTERSECTQ KReg_reg, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W1) & evexV5_ZmmReg; byte=0x68; KReg_reg ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { KReg_reg = vp2intersectq_avx512f( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); } # VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124606 define pcodeop vpcompressb_avx512_vbmi2 ; :VPCOMPRESSB m128^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0x63; XmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) { XmmResult = vpcompressb_avx512_vbmi2( XmmReg1, XmmOpMask ); m128 = XmmResult; } # VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124608 # WARNING: duplicate opcode EVEX.128.66.0F38.W0 63 /r last seen on 5-449 PAGE 2273 LINE 124606 for "VPCOMPRESSB xmm1{k1}{z}, xmm2" :VPCOMPRESSB XmmReg2^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0x63; XmmReg1 & (mod=0x3 & (XmmReg2 & ZmmReg2)) { XmmResult = vpcompressb_avx512_vbmi2( XmmReg1, XmmOpMask ); ZmmReg2 = zext(XmmResult); } # VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124610 :VPCOMPRESSB m256^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask; byte=0x63; YmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) { YmmResult = vpcompressb_avx512_vbmi2( YmmReg1, YmmOpMask ); m256 = YmmResult; } # VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124612 # WARNING: duplicate opcode EVEX.256.66.0F38.W0 63 /r last seen on 5-449 PAGE 2273 LINE 124610 for "VPCOMPRESSB ymm1{k1}{z}, ymm2" :VPCOMPRESSB YmmReg2^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask; byte=0x63; YmmReg1 & (mod=0x3 & (YmmReg2 & ZmmReg2)) { YmmResult = vpcompressb_avx512_vbmi2( YmmReg1, YmmOpMask ); ZmmReg2 = zext(YmmResult); } # VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124614 :VPCOMPRESSB m512^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0x63; ZmmReg1 ... & m512 [ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) { ZmmResult = vpcompressb_avx512_vbmi2( ZmmReg1, ZmmOpMask ); m512 = ZmmResult; } # VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124616 # WARNING: duplicate opcode EVEX.512.66.0F38.W0 63 /r last seen on 5-449 PAGE 2273 LINE 124614 for "VPCOMPRESSB zmm1{k1}{z}, zmm2" :VPCOMPRESSB ZmmReg2^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0x63; ZmmReg1 & (mod=0x3 & ZmmReg2) { ZmmResult = vpcompressb_avx512_vbmi2( ZmmReg1, ZmmOpMask ); ZmmReg2 = ZmmResult; } # VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124618 define pcodeop vpcompressw_avx512_vbmi2 ; :VPCOMPRESSW m128^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0x63; XmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) { XmmResult = vpcompressw_avx512_vbmi2( XmmReg1, XmmOpMask ); m128 = XmmResult; } # VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124620 # WARNING: duplicate opcode EVEX.128.66.0F38.W1 63 /r last seen on 5-449 PAGE 2273 LINE 124618 for "VPCOMPRESSW xmm1{k1}{z}, xmm2" :VPCOMPRESSW XmmReg2^XmmOpMask, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0x63; XmmReg1 & (mod=0x3 & (XmmReg2 & ZmmReg2)) { XmmResult = vpcompressw_avx512_vbmi2( XmmReg1, XmmOpMask ); ZmmReg2 = zext(XmmResult); } # VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124622 :VPCOMPRESSW m256^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0x63; YmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) { YmmResult = vpcompressw_avx512_vbmi2( YmmReg1, YmmOpMask ); m256 = YmmResult; } # VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124624 # WARNING: duplicate opcode EVEX.256.66.0F38.W1 63 /r last seen on 5-449 PAGE 2273 LINE 124622 for "VPCOMPRESSW ymm1{k1}{z}, ymm2" :VPCOMPRESSW YmmReg2^YmmOpMask, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0x63; YmmReg1 & (mod=0x3 & (YmmReg2 & ZmmReg2)) { YmmResult = vpcompressw_avx512_vbmi2( YmmReg1, YmmOpMask ); ZmmReg2 = zext(YmmResult); } # VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124626 :VPCOMPRESSW m512^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0x63; ZmmReg1 ... & m512 [ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) { ZmmResult = vpcompressw_avx512_vbmi2( ZmmReg1, ZmmOpMask ); m512 = ZmmResult; } # VPCOMPRESSB/VCOMPRESSW 5-449 PAGE 2273 LINE 124628 # WARNING: duplicate opcode EVEX.512.66.0F38.W1 63 /r last seen on 5-449 PAGE 2273 LINE 124626 for "VPCOMPRESSW zmm1{k1}{z}, zmm2" :VPCOMPRESSW ZmmReg2^ZmmOpMask, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0x63; ZmmReg1 & (mod=0x3 & ZmmReg2) { ZmmResult = vpcompressw_avx512_vbmi2( ZmmReg1, ZmmOpMask ); ZmmReg2 = ZmmResult; } # VPDPBUSD 5-459 PAGE 2283 LINE 125092 define pcodeop vpdpbusd_avx512_vnni ; :VPDPBUSD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x50; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpdpbusd_avx512_vnni( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPDPBUSD 5-459 PAGE 2283 LINE 125097 :VPDPBUSD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x50; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpdpbusd_avx512_vnni( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPDPBUSD 5-459 PAGE 2283 LINE 125102 :VPDPBUSD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x50; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpdpbusd_avx512_vnni( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPDPBUSDS 5-461 PAGE 2285 LINE 125211 define pcodeop vpdpbusds_avx512_vnni ; :VPDPBUSDS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpdpbusds_avx512_vnni( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPDPBUSDS 5-461 PAGE 2285 LINE 125217 :VPDPBUSDS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x51; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpdpbusds_avx512_vnni( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPDPBUSDS 5-461 PAGE 2285 LINE 125223 :VPDPBUSDS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x51; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpdpbusds_avx512_vnni( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPDPWSSD 5-463 PAGE 2287 LINE 125329 define pcodeop vpdpwssd_avx512_vnni ; :VPDPWSSD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x52; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpdpwssd_avx512_vnni( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPDPWSSD 5-463 PAGE 2287 LINE 125334 :VPDPWSSD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x52; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpdpwssd_avx512_vnni( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPDPWSSD 5-463 PAGE 2287 LINE 125339 :VPDPWSSD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x52; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpdpwssd_avx512_vnni( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPDPWSSDS 5-465 PAGE 2289 LINE 125436 define pcodeop vpdpwssds_avx512_vnni ; :VPDPWSSDS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x53; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpdpwssds_avx512_vnni( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPDPWSSDS 5-465 PAGE 2289 LINE 125442 :VPDPWSSDS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x53; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpdpwssds_avx512_vnni( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPDPWSSDS 5-465 PAGE 2289 LINE 125448 :VPDPWSSDS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x53; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpdpwssds_avx512_vnni( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPERMB 5-471 PAGE 2295 LINE 125727 define pcodeop vpermb_avx512vl ; :VPERMB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8 & evexV5_XmmReg; byte=0x8D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { XmmResult = vpermb_avx512vl( evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # VPERMB 5-471 PAGE 2295 LINE 125730 :VPERMB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8 & evexV5_YmmReg; byte=0x8D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { YmmResult = vpermb_avx512vl( evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # VPERMB 5-471 PAGE 2295 LINE 125733 define pcodeop vpermb_avx512_vbmi ; :VPERMB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask8 & evexV5_ZmmReg; byte=0x8D; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmResult = vpermb_avx512_vbmi( evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # VPERMI2B 5-476 PAGE 2300 LINE 125958 define pcodeop vpermi2b_avx512vl ; :VPERMI2B XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8 & evexV5_XmmReg; byte=0x75; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { XmmResult = vpermi2b_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # VPERMI2B 5-476 PAGE 2300 LINE 125961 :VPERMI2B YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8 & evexV5_YmmReg; byte=0x75; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { YmmResult = vpermi2b_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # VPERMI2B 5-476 PAGE 2300 LINE 125964 define pcodeop vpermi2b_avx512_vbmi ; :VPERMI2B ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask8 & evexV5_ZmmReg; byte=0x75; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmResult = vpermi2b_avx512_vbmi( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # VPERMT2B 5-503 PAGE 2327 LINE 127434 define pcodeop vpermt2b_avx512vl ; :VPERMT2B XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8 & evexV5_XmmReg; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { XmmResult = vpermt2b_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # VPERMT2B 5-503 PAGE 2327 LINE 127437 :VPERMT2B YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8 & evexV5_YmmReg; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { YmmResult = vpermt2b_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # VPERMT2B 5-503 PAGE 2327 LINE 127440 define pcodeop vpermt2b_avx512_vbmi ; :VPERMT2B ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask8 & evexV5_ZmmReg; byte=0x7D; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmResult = vpermt2b_avx512_vbmi( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127524 define pcodeop vpermt2w_avx512vl ; :VPERMT2W XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask16 & evexV5_XmmReg; byte=0x7D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { XmmResult = vpermt2w_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127527 :VPERMT2W YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask16 & evexV5_YmmReg; byte=0x7D; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { YmmResult = vpermt2w_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127530 define pcodeop vpermt2w_avx512bw ; :VPERMT2W ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x7D; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmResult = vpermt2w_avx512bw( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127533 define pcodeop vpermt2d_avx512vl ; :VPERMT2D XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x7E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpermt2d_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127536 :VPERMT2D YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x7E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpermt2d_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127539 define pcodeop vpermt2d_avx512f ; :VPERMT2D ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x7E; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpermt2d_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127542 define pcodeop vpermt2q_avx512vl ; :VPERMT2Q XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0x7E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpermt2q_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127545 :VPERMT2Q YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0x7E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpermt2q_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127548 define pcodeop vpermt2q_avx512f ; :VPERMT2Q ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0x7E; ZmmReg1 ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpermt2q_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127551 define pcodeop vpermt2ps_avx512vl ; :VPERMT2PS XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x7F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpermt2ps_avx512vl( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127554 :VPERMT2PS YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x7F; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpermt2ps_avx512vl( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPERMT2W/D/Q/PS/PD 5-505 PAGE 2329 LINE 127557 define pcodeop vpermt2ps_avx512f ; :VPERMT2PS ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x7F; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpermt2ps_avx512f( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPEXPANDB/VPEXPANDW 5-510 PAGE 2334 LINE 127806 define pcodeop vpexpandb_avx512_vbmi2 ; :VPEXPANDB XmmReg1^XmmOpMask, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask; byte=0x62; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) { XmmResult = vpexpandb_avx512_vbmi2( XmmReg2_m128, XmmOpMask ); ZmmReg1 = zext(XmmResult); } # VPEXPANDB/VPEXPANDW 5-510 PAGE 2334 LINE 127810 :VPEXPANDB YmmReg1^YmmOpMask, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask; byte=0x62; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) { YmmResult = vpexpandb_avx512_vbmi2( YmmReg2_m256, YmmOpMask ); ZmmReg1 = zext(YmmResult); } # VPEXPANDB/VPEXPANDW 5-510 PAGE 2334 LINE 127814 :VPEXPANDB ZmmReg1^ZmmOpMask, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask; byte=0x62; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) { ZmmResult = vpexpandb_avx512_vbmi2( ZmmReg2_m512, ZmmOpMask ); ZmmReg1 = ZmmResult; } # VPEXPANDB/VPEXPANDW 5-510 PAGE 2334 LINE 127818 define pcodeop vpexpandw_avx512_vbmi2 ; :VPEXPANDW XmmReg1^XmmOpMask, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask; byte=0x62; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) { XmmResult = vpexpandw_avx512_vbmi2( XmmReg2_m128, XmmOpMask ); ZmmReg1 = zext(XmmResult); } # VPEXPANDB/VPEXPANDW 5-510 PAGE 2334 LINE 127822 :VPEXPANDW YmmReg1^YmmOpMask, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask; byte=0x62; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) { YmmResult = vpexpandw_avx512_vbmi2( YmmReg2_m256, YmmOpMask ); ZmmReg1 = zext(YmmResult); } # VPEXPANDB/VPEXPANDW 5-510 PAGE 2334 LINE 127826 :VPEXPANDW ZmmReg1^ZmmOpMask, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask; byte=0x62; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 3; ] # (TupleType Tuple1 Scalar) { ZmmResult = vpexpandw_avx512_vbmi2( ZmmReg2_m512, ZmmOpMask ); ZmmReg1 = ZmmResult; } # VPMADD52HUQ 5-534 PAGE 2358 LINE 128946 define pcodeop vpmadd52huq_avx512_ifma ; :VPMADD52HUQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0xB5; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpmadd52huq_avx512_ifma( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPMADD52HUQ 5-534 PAGE 2358 LINE 128950 :VPMADD52HUQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0xB5; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpmadd52huq_avx512_ifma( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPMADD52HUQ 5-534 PAGE 2358 LINE 128954 :VPMADD52HUQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0xB5; ZmmReg1 ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpmadd52huq_avx512_ifma( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPMADD52LUQ 5-536 PAGE 2360 LINE 129044 define pcodeop vpmadd52luq_avx512_ifma ; :VPMADD52LUQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0xB4; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpmadd52luq_avx512_ifma( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPMADD52LUQ 5-536 PAGE 2360 LINE 129048 :VPMADD52LUQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0xB4; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpmadd52luq_avx512_ifma( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPMADD52LUQ 5-536 PAGE 2360 LINE 129052 :VPMADD52LUQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0xB4; ZmmReg1 ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpmadd52luq_avx512_ifma( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPMULTISHIFTQB 5-571 PAGE 2395 LINE 130845 define pcodeop vpmultishiftqb_avx512_vbmi ; :VPMULTISHIFTQB XmmReg1^XmmOpMask8, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask8 & evexV5_XmmReg; byte=0x83; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpmultishiftqb_avx512_vbmi( evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # VPMULTISHIFTQB 5-571 PAGE 2395 LINE 130849 :VPMULTISHIFTQB YmmReg1^YmmOpMask8, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask8 & evexV5_YmmReg; byte=0x83; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpmultishiftqb_avx512_vbmi( evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # VPMULTISHIFTQB 5-571 PAGE 2395 LINE 130853 :VPMULTISHIFTQB ZmmReg1^ZmmOpMask8, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask8 & evexV5_ZmmReg; byte=0x83; ZmmReg1 ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpmultishiftqb_avx512_vbmi( evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # VPOPCNT 5-573 PAGE 2397 LINE 130938 define pcodeop vpopcntb_avx512_bitalg ; :VPOPCNTB XmmReg1^XmmOpMask8, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x54; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { XmmResult = vpopcntb_avx512_bitalg( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask8; ZmmReg1 = zext(XmmResult); } # VPOPCNT 5-573 PAGE 2397 LINE 130941 :VPOPCNTB YmmReg1^YmmOpMask8, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8; byte=0x54; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { YmmResult = vpopcntb_avx512_bitalg( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask8; ZmmReg1 = zext(YmmResult); } # VPOPCNT 5-573 PAGE 2397 LINE 130944 :VPOPCNTB ZmmReg1^ZmmOpMask8, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask8; byte=0x54; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmResult = vpopcntb_avx512_bitalg( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask8; ZmmReg1 = ZmmResult; } # VPOPCNT 5-573 PAGE 2397 LINE 130947 define pcodeop vpopcntw_avx512_bitalg ; :VPOPCNTW XmmReg1^XmmOpMask16, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask16; byte=0x54; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { XmmResult = vpopcntw_avx512_bitalg( XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPOPCNT 5-573 PAGE 2397 LINE 130950 :VPOPCNTW YmmReg1^YmmOpMask16, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask16; byte=0x54; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { YmmResult = vpopcntw_avx512_bitalg( YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPOPCNT 5-573 PAGE 2397 LINE 130953 :VPOPCNTW ZmmReg1^ZmmOpMask16, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask16; byte=0x54; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmResult = vpopcntw_avx512_bitalg( ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPOPCNT 5-573 PAGE 2397 LINE 130956 define pcodeop vpopcntd_avx512_vpopcntdq ; :VPOPCNTD XmmReg1^XmmOpMask32, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x55; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpopcntd_avx512_vpopcntdq( XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPOPCNT 5-573 PAGE 2397 LINE 130959 :VPOPCNTD YmmReg1^YmmOpMask32, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32; byte=0x55; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpopcntd_avx512_vpopcntdq( YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPOPCNT 5-573 PAGE 2397 LINE 130962 :VPOPCNTD ZmmReg1^ZmmOpMask32, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32; byte=0x55; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpopcntd_avx512_vpopcntdq( ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPOPCNT 5-573 PAGE 2397 LINE 130965 define pcodeop vpopcntq_avx512_vpopcntdq ; :VPOPCNTQ XmmReg1^XmmOpMask64, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64; byte=0x55; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpopcntq_avx512_vpopcntdq( XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPOPCNT 5-573 PAGE 2397 LINE 130968 :VPOPCNTQ YmmReg1^YmmOpMask64, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64; byte=0x55; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpopcntq_avx512_vpopcntdq( YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPOPCNT 5-573 PAGE 2397 LINE 130971 :VPOPCNTQ ZmmReg1^ZmmOpMask64, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64; byte=0x55; ZmmReg1 ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpopcntq_avx512_vpopcntdq( ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPSHLD 5-588 PAGE 2412 LINE 131746 define pcodeop vpshldw_avx512_vbmi2 ; :VPSHLDW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask16 & evexV5_XmmReg; byte=0x70; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { XmmResult = vpshldw_avx512_vbmi2( evexV5_XmmReg, XmmReg2_m128, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPSHLD 5-588 PAGE 2412 LINE 131749 :VPSHLDW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask16 & evexV5_YmmReg; byte=0x70; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { YmmResult = vpshldw_avx512_vbmi2( evexV5_YmmReg, YmmReg2_m256, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPSHLD 5-588 PAGE 2412 LINE 131752 :VPSHLDW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x70; ZmmReg1 ... & ZmmReg2_m512; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmResult = vpshldw_avx512_vbmi2( evexV5_ZmmReg, ZmmReg2_m512, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPSHLD 5-588 PAGE 2412 LINE 131755 define pcodeop vpshldd_avx512_vbmi2 ; :VPSHLDD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x71; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpshldd_avx512_vbmi2( evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPSHLD 5-588 PAGE 2412 LINE 131758 :VPSHLDD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x71; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpshldd_avx512_vbmi2( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPSHLD 5-588 PAGE 2412 LINE 131761 :VPSHLDD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x71; ZmmReg1 ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpshldd_avx512_vbmi2( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPSHLD 5-588 PAGE 2412 LINE 131764 define pcodeop vpshldq_avx512_vbmi2 ; :VPSHLDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0x71; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpshldq_avx512_vbmi2( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPSHLD 5-588 PAGE 2412 LINE 131767 :VPSHLDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0x71; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpshldq_avx512_vbmi2( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPSHLD 5-588 PAGE 2412 LINE 131770 :VPSHLDQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0x71; ZmmReg1 ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpshldq_avx512_vbmi2( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPSHLDV 5-591 PAGE 2415 LINE 131888 define pcodeop vpshldvw_avx512_vbmi2 ; :VPSHLDVW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask16 & evexV5_XmmReg; byte=0x70; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { XmmResult = vpshldvw_avx512_vbmi2( XmmReg1, evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPSHLDV 5-591 PAGE 2415 LINE 131891 :VPSHLDVW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask16 & evexV5_YmmReg; byte=0x70; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { YmmResult = vpshldvw_avx512_vbmi2( YmmReg1, evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPSHLDV 5-591 PAGE 2415 LINE 131894 :VPSHLDVW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x70; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmResult = vpshldvw_avx512_vbmi2( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPSHLDV 5-591 PAGE 2415 LINE 131897 define pcodeop vpshldvd_avx512_vbmi2 ; :VPSHLDVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x71; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpshldvd_avx512_vbmi2( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPSHLDV 5-591 PAGE 2415 LINE 131900 :VPSHLDVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x71; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpshldvd_avx512_vbmi2( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPSHLDV 5-591 PAGE 2415 LINE 131903 :VPSHLDVD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x71; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpshldvd_avx512_vbmi2( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPSHLDV 5-591 PAGE 2415 LINE 131906 define pcodeop vpshldvq_avx512_vbmi2 ; :VPSHLDVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0x71; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpshldvq_avx512_vbmi2( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPSHLDV 5-591 PAGE 2415 LINE 131909 :VPSHLDVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0x71; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpshldvq_avx512_vbmi2( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPSHLDV 5-591 PAGE 2415 LINE 131912 :VPSHLDVQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0x71; ZmmReg1 ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpshldvq_avx512_vbmi2( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPSHRD 5-594 PAGE 2418 LINE 132044 define pcodeop vpshrdw_avx512_vbmi2 ; :VPSHRDW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask16 & evexV5_XmmReg; byte=0x72; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { XmmResult = vpshrdw_avx512_vbmi2( evexV5_XmmReg, XmmReg2_m128, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPSHRD 5-594 PAGE 2418 LINE 132047 :VPSHRDW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask16 & evexV5_YmmReg; byte=0x72; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { YmmResult = vpshrdw_avx512_vbmi2( evexV5_YmmReg, YmmReg2_m256, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPSHRD 5-594 PAGE 2418 LINE 132050 :VPSHRDW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x72; ZmmReg1 ... & ZmmReg2_m512; imm8 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmResult = vpshrdw_avx512_vbmi2( evexV5_ZmmReg, ZmmReg2_m512, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPSHRD 5-594 PAGE 2418 LINE 132053 define pcodeop vpshrdd_avx512_vbmi2 ; :VPSHRDD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x73; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpshrdd_avx512_vbmi2( evexV5_XmmReg, XmmReg2_m128_m32bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPSHRD 5-594 PAGE 2418 LINE 132056 :VPSHRDD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x73; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpshrdd_avx512_vbmi2( evexV5_YmmReg, YmmReg2_m256_m32bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPSHRD 5-594 PAGE 2418 LINE 132059 :VPSHRDD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x73; ZmmReg1 ... & ZmmReg2_m512_m32bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpshrdd_avx512_vbmi2( evexV5_ZmmReg, ZmmReg2_m512_m32bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPSHRD 5-594 PAGE 2418 LINE 132062 define pcodeop vpshrdq_avx512_vbmi2 ; :VPSHRDQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0x73; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpshrdq_avx512_vbmi2( evexV5_XmmReg, XmmReg2_m128_m64bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPSHRD 5-594 PAGE 2418 LINE 132065 :VPSHRDQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0x73; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpshrdq_avx512_vbmi2( evexV5_YmmReg, YmmReg2_m256_m64bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPSHRD 5-594 PAGE 2418 LINE 132068 :VPSHRDQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0x73; ZmmReg1 ... & ZmmReg2_m512_m64bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpshrdq_avx512_vbmi2( evexV5_ZmmReg, ZmmReg2_m512_m64bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPSHRDV 5-597 PAGE 2421 LINE 132183 define pcodeop vpshrdvw_avx512_vbmi2 ; :VPSHRDVW XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask16 & evexV5_XmmReg; byte=0x72; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { XmmResult = vpshrdvw_avx512_vbmi2( XmmReg1, evexV5_XmmReg, XmmReg2_m128 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VPSHRDV 5-597 PAGE 2421 LINE 132186 :VPSHRDVW YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask16 & evexV5_YmmReg; byte=0x72; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { YmmResult = vpshrdvw_avx512_vbmi2( YmmReg1, evexV5_YmmReg, YmmReg2_m256 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VPSHRDV 5-597 PAGE 2421 LINE 132189 :VPSHRDVW ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x72; ZmmReg1 ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { ZmmResult = vpshrdvw_avx512_vbmi2( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VPSHRDV 5-597 PAGE 2421 LINE 132192 define pcodeop vpshrdvd_avx512_vbmi2 ; :VPSHRDVD XmmReg1^XmmOpMask32, evexV5_XmmReg, XmmReg2_m128_m32bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32 & evexV5_XmmReg; byte=0x73; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpshrdvd_avx512_vbmi2( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m32bcst ); XmmMask = XmmReg1; build XmmOpMask32; ZmmReg1 = zext(XmmResult); } # VPSHRDV 5-597 PAGE 2421 LINE 132195 :VPSHRDVD YmmReg1^YmmOpMask32, evexV5_YmmReg, YmmReg2_m256_m32bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32 & evexV5_YmmReg; byte=0x73; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpshrdvd_avx512_vbmi2( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m32bcst ); YmmMask = YmmReg1; build YmmOpMask32; ZmmReg1 = zext(YmmResult); } # VPSHRDV 5-597 PAGE 2421 LINE 132198 :VPSHRDVD ZmmReg1^ZmmOpMask32, evexV5_ZmmReg, ZmmReg2_m512_m32bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask32 & evexV5_ZmmReg; byte=0x73; ZmmReg1 ... & ZmmReg2_m512_m32bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpshrdvd_avx512_vbmi2( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m32bcst ); ZmmMask = ZmmReg1; build ZmmOpMask32; ZmmReg1 = ZmmResult; } # VPSHRDV 5-597 PAGE 2421 LINE 132201 define pcodeop vpshrdvq_avx512_vbmi2 ; :VPSHRDVQ XmmReg1^XmmOpMask64, evexV5_XmmReg, XmmReg2_m128_m64bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & XmmOpMask64 & evexV5_XmmReg; byte=0x73; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vpshrdvq_avx512_vbmi2( XmmReg1, evexV5_XmmReg, XmmReg2_m128_m64bcst ); XmmMask = XmmReg1; build XmmOpMask64; ZmmReg1 = zext(XmmResult); } # VPSHRDV 5-597 PAGE 2421 LINE 132204 :VPSHRDVQ YmmReg1^YmmOpMask64, evexV5_YmmReg, YmmReg2_m256_m64bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & YmmOpMask64 & evexV5_YmmReg; byte=0x73; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vpshrdvq_avx512_vbmi2( YmmReg1, evexV5_YmmReg, YmmReg2_m256_m64bcst ); YmmMask = YmmReg1; build YmmOpMask64; ZmmReg1 = zext(YmmResult); } # VPSHRDV 5-597 PAGE 2421 LINE 132207 :VPSHRDVQ ZmmReg1^ZmmOpMask64, evexV5_ZmmReg, ZmmReg2_m512_m64bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & ZmmOpMask64 & evexV5_ZmmReg; byte=0x73; ZmmReg1 ... & ZmmReg2_m512_m64bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vpshrdvq_avx512_vbmi2( ZmmReg1, evexV5_ZmmReg, ZmmReg2_m512_m64bcst ); ZmmMask = ZmmReg1; build ZmmOpMask64; ZmmReg1 = ZmmResult; } # VPSHUFBITQMB 5-600 PAGE 2424 LINE 132322 define pcodeop vpshufbitqmb_avx512_bitalg ; :VPSHUFBITQMB KReg_reg^XmmOpMask, evexV5_XmmReg, XmmReg2_m128 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x8F; KReg_reg ... & XmmReg2_m128 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:2 = vpshufbitqmb_avx512_bitalg( evexV5_XmmReg, XmmReg2_m128, XmmOpMask ); KReg_reg = zext(tmp); } # VPSHUFBITQMB 5-600 PAGE 2424 LINE 132325 :VPSHUFBITQMB KReg_reg^YmmOpMask, evexV5_YmmReg, YmmReg2_m256 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask & evexV5_YmmReg; byte=0x8F; KReg_reg ... & YmmReg2_m256 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:4 = vpshufbitqmb_avx512_bitalg( evexV5_YmmReg, YmmReg2_m256, YmmOpMask ); KReg_reg = zext(tmp); } # VPSHUFBITQMB 5-600 PAGE 2424 LINE 132328 :VPSHUFBITQMB KReg_reg^ZmmOpMask, evexV5_ZmmReg, ZmmReg2_m512 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & ZmmOpMask & evexV5_ZmmReg; byte=0x8F; KReg_reg ... & ZmmReg2_m512 [ evexD8Type = 1; evexTType = 0; ] # (TupleType Full Mem) { local tmp:8 = vpshufbitqmb_avx512_bitalg( evexV5_ZmmReg, ZmmReg2_m512, ZmmOpMask ); KReg_reg = zext(tmp); } # VRCPPH 5-646 PAGE 2470 LINE 134707 define pcodeop vrcpph_avx512fp16 ; :VRCPPH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16; byte=0x4C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vrcpph_avx512fp16( XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VRCPPH 5-646 PAGE 2470 LINE 134710 :VRCPPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16; byte=0x4C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vrcpph_avx512fp16( YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VRCPPH 5-646 PAGE 2470 LINE 134713 :VRCPPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16; byte=0x4C; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vrcpph_avx512fp16( ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VRCPSH 5-648 PAGE 2472 LINE 134789 define pcodeop vrcpsh_avx512fp16 ; :VRCPSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x4D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vrcpsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VREDUCEPH 5-652 PAGE 2476 LINE 134998 define pcodeop vreduceph_avx512fp16 ; :VREDUCEPH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask16; byte=0x56; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vreduceph_avx512fp16( XmmReg2_m128_m16bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VREDUCEPH 5-652 PAGE 2476 LINE 135003 :VREDUCEPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask16; byte=0x56; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vreduceph_avx512fp16( YmmReg2_m256_m16bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VREDUCEPH 5-652 PAGE 2476 LINE 135008 :VREDUCEPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & ZmmOpMask16; byte=0x56; ZmmReg1 ... & ZmmReg2_m512_m16bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vreduceph_avx512fp16( ZmmReg2_m512_m16bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VREDUCESH 5-659 PAGE 2483 LINE 135336 define pcodeop vreducesh_avx512fp16 ; :VREDUCESH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16, imm8 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x57; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16; imm8 { XmmResult = vreducesh_avx512fp16( evexV5_XmmReg, XmmReg2_m16, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VRNDSCALEPH 5-666 PAGE 2490 LINE 135677 define pcodeop vrndscaleph_avx512fp16 ; :VRNDSCALEPH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask16; byte=0x08; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vrndscaleph_avx512fp16( XmmReg2_m128_m16bcst, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VRNDSCALEPH 5-666 PAGE 2490 LINE 135681 :VRNDSCALEPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask16; byte=0x08; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vrndscaleph_avx512fp16( YmmReg2_m256_m16bcst, imm8:1 ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VRNDSCALEPH 5-666 PAGE 2490 LINE 135685 :VRNDSCALEPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & ZmmOpMask16; byte=0x08; ZmmReg1 ... & ZmmReg2_m512_m16bcst; imm8 [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vrndscaleph_avx512fp16( ZmmReg2_m512_m16bcst, imm8:1 ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VRNDSCALESH 5-674 PAGE 2498 LINE 136097 define pcodeop vrndscalesh_avx512fp16 ; :VRNDSCALESH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16, imm8 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x0A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16; imm8 { XmmResult = vrndscalesh_avx512fp16( evexV5_XmmReg, XmmReg2_m16, imm8:1 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VRSQRTPH 5-686 PAGE 2510 LINE 136692 define pcodeop vrsqrtph_avx512fp16 ; :VRSQRTPH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16; byte=0x4E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vrsqrtph_avx512fp16( XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VRSQRTPH 5-686 PAGE 2510 LINE 136696 :VRSQRTPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16; byte=0x4E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vrsqrtph_avx512fp16( YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VRSQRTPH 5-686 PAGE 2510 LINE 136700 :VRSQRTPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16; byte=0x4E; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vrsqrtph_avx512fp16( ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VRSQRTSH 5-688 PAGE 2512 LINE 136781 define pcodeop vrsqrtsh_avx512fp16 ; :VRSQRTSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x4F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vrsqrtsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VSCALEFPH 5-692 PAGE 2516 LINE 136971 define pcodeop vscalefph_avx512fp16 ; :VSCALEFPH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x2C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vscalefph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VSCALEFPH 5-692 PAGE 2516 LINE 136974 :VSCALEFPH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x2C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vscalefph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VSCALEFPH 5-692 PAGE 2516 LINE 136977 :VSCALEFPH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x2C; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vscalefph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VSCALEFSH 5-699 PAGE 2523 LINE 137301 define pcodeop vscalefsh_avx512fp16 ; :VSCALEFSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_66) & $(VEX_MAP6) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x2D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vscalefsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VSQRTPH 5-712 PAGE 2536 LINE 137925 define pcodeop vsqrtph_avx512fp16 ; :VSQRTPH XmmReg1^XmmOpMask16, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16; byte=0x51; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vsqrtph_avx512fp16( XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VSQRTPH 5-712 PAGE 2536 LINE 137928 :VSQRTPH YmmReg1^YmmOpMask16, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16; byte=0x51; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vsqrtph_avx512fp16( YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VSQRTPH 5-712 PAGE 2536 LINE 137931 :VSQRTPH ZmmReg1^ZmmOpMask16, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16; byte=0x51; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vsqrtph_avx512fp16( ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VSQRTSH 5-714 PAGE 2538 LINE 138003 define pcodeop vsqrtsh_avx512fp16 ; :VSQRTSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x51; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vsqrtsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VSUBPH 5-715 PAGE 2539 LINE 138057 define pcodeop vsubph_avx512fp16 ; :VSUBPH XmmReg1^XmmOpMask16, evexV5_XmmReg, XmmReg2_m128_m16bcst is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask16 & evexV5_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { XmmResult = vsubph_avx512fp16( evexV5_XmmReg, XmmReg2_m128_m16bcst ); XmmMask = XmmReg1; build XmmOpMask16; ZmmReg1 = zext(XmmResult); } # VSUBPH 5-715 PAGE 2539 LINE 138060 :VSUBPH YmmReg1^YmmOpMask16, evexV5_YmmReg, YmmReg2_m256_m16bcst is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & YmmOpMask16 & evexV5_YmmReg; byte=0x5C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { YmmResult = vsubph_avx512fp16( evexV5_YmmReg, YmmReg2_m256_m16bcst ); YmmMask = YmmReg1; build YmmOpMask16; ZmmReg1 = zext(YmmResult); } # VSUBPH 5-715 PAGE 2539 LINE 138063 :VSUBPH ZmmReg1^ZmmOpMask16, evexV5_ZmmReg, ZmmReg2_m512_m16bcst is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0) & ZmmOpMask16 & evexV5_ZmmReg; byte=0x5C; ZmmReg1 ... & ZmmReg2_m512_m16bcst [ evexD8Type = 0; evexTType = 0; ] # (TupleType Full) { ZmmResult = vsubph_avx512fp16( evexV5_ZmmReg, ZmmReg2_m512_m16bcst ); ZmmMask = ZmmReg1; build ZmmOpMask16; ZmmReg1 = ZmmResult; } # VSUBSH 5-717 PAGE 2541 LINE 138152 define pcodeop vsubsh_avx512fp16 ; :VSUBSH XmmReg1^XmmOpMask, evexV5_XmmReg, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_F3) & $(VEX_MAP5) & $(VEX_W0) & XmmOpMask & evexV5_XmmReg; byte=0x5C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { XmmResult = vsubsh_avx512fp16( evexV5_XmmReg, XmmReg2_m16 ); XmmMask = XmmReg1; build XmmOpMask; conditionalAssign(XmmResult[0,16], XmmOpMask[0,1], XmmResult[0,16], XmmMask[0,16]); XmmResult[16,112] = XmmReg1[16,112]; # DEST[127:16] remains unchanged ZmmReg1 = zext(XmmResult); } # VUCOMISH 5-721 PAGE 2545 LINE 138358 define pcodeop vucomish_avx512fp16 ; :VUCOMISH XmmReg1, XmmReg2_m16 is $(EVEX_NONE) & $(EVEX_LLIG) & $(VEX_PRE_NONE) & $(VEX_MAP5) & $(VEX_W0); byte=0x2E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m16 { local tmp:16 = vucomish_avx512fp16( XmmReg2_m16 ); ZmmReg1 = zext(tmp); } ================================================ FILE: pypcode/processors/x86/data/languages/avx512_manual.sinc ================================================ # KADDW/KADDB/KADDQ/KADDD 3-496 PAGE 1066 LINE 55984 :KADDW KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x4A; KReg_reg & KReg_rm { local tmp:2 = vex1VVV_KReg[0,16] + KReg_rm[0,16]; KReg_reg = zext(tmp); } # KADDW/KADDB/KADDQ/KADDD 3-496 PAGE 1066 LINE 55986 :KADDB KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x4A; KReg_reg & KReg_rm { local tmp:1 = vex1VVV_KReg[0,8] + KReg_rm[0,8]; KReg_reg = zext(tmp); } # KADDW/KADDB/KADDQ/KADDD 3-496 PAGE 1066 LINE 55988 :KADDQ KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x4A; KReg_reg & KReg_rm { local tmp:8 = vex1VVV_KReg[0,64] + KReg_rm[0,64]; KReg_reg = zext(tmp); } # KADDW/KADDB/KADDQ/KADDD 3-496 PAGE 1066 LINE 55990 :KADDD KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x4A; KReg_reg & KReg_rm { local tmp:4 = vex1VVV_KReg[0,32] + KReg_rm[0,32]; KReg_reg = zext(tmp); } # KANDW/KANDB/KANDQ/KANDD 3-497 PAGE 1067 LINE 56039 :KANDW KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x41; KReg_reg & KReg_rm { local tmp:2 = vex1VVV_KReg[0,16] & KReg_rm[0,16]; KReg_reg = zext(tmp); } # KANDW/KANDB/KANDQ/KANDD 3-497 PAGE 1067 LINE 56041 :KANDB KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x41; KReg_reg & KReg_rm { local tmp:1 = vex1VVV_KReg[0,8] & KReg_rm[0,8]; KReg_reg = zext(tmp); } # KANDW/KANDB/KANDQ/KANDD 3-497 PAGE 1067 LINE 56043 :KANDQ KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x41; KReg_reg & KReg_rm { local tmp:8 = vex1VVV_KReg[0,64] & KReg_rm[0,64]; KReg_reg = zext(tmp); } # KANDW/KANDB/KANDQ/KANDD 3-497 PAGE 1067 LINE 56045 :KANDD KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x41; KReg_reg & KReg_rm { local tmp:4 = vex1VVV_KReg[0,32] & KReg_rm[0,32]; KReg_reg = zext(tmp); } # KANDNW/KANDNB/KANDNQ/KANDND 3-498 PAGE 1068 LINE 56100 :KANDNW KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x42; KReg_reg & KReg_rm { local tmp:2 = ~vex1VVV_KReg[0,16] & KReg_rm[0,16]; KReg_reg = zext(tmp); } # KANDNW/KANDNB/KANDNQ/KANDND 3-498 PAGE 1068 LINE 56102 :KANDNB KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x42; KReg_reg & KReg_rm { local tmp:1 = ~vex1VVV_KReg[0,8] & KReg_rm[0,8]; KReg_reg = zext(tmp); } # KANDNW/KANDNB/KANDNQ/KANDND 3-498 PAGE 1068 LINE 56104 :KANDNQ KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x42; KReg_reg & KReg_rm { local tmp:8 = ~vex1VVV_KReg[0,64] & KReg_rm[0,64]; KReg_reg = zext(tmp); } # KANDNW/KANDNB/KANDNQ/KANDND 3-498 PAGE 1068 LINE 56106 :KANDND KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x42; KReg_reg & KReg_rm { local tmp:4 = ~vex1VVV_KReg[0,32] & KReg_rm[0,32]; KReg_reg = zext(tmp); } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56160 :KMOVW KReg_reg, RegK_m16 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x90; KReg_reg ... & RegK_m16 { KReg_reg = zext(RegK_m16); } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56162 :KMOVB KReg_reg, RegK_m8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x90; KReg_reg ... & RegK_m8 { KReg_reg = zext(RegK_m8); } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56164 :KMOVQ KReg_reg, RegK_m64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1); byte=0x90; KReg_reg ... & RegK_m64 { KReg_reg = zext(RegK_m64); } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56166 :KMOVD KReg_reg, RegK_m32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x90; KReg_reg ... & RegK_m32 { KReg_reg = zext(RegK_m32); } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56168 :KMOVW m16, KReg_reg is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x91; KReg_reg ... & m16 { m16 = KReg_reg[0,16]; } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56170 :KMOVB m8, KReg_reg is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x91; KReg_reg ... & m8 { m8 = KReg_reg[0,8]; } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56172 :KMOVQ m64, KReg_reg is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1); byte=0x91; KReg_reg ... & m64 { m64 = KReg_reg[0,64]; } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56174 :KMOVD m32, KReg_reg is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x91; KReg_reg ... & m32 { m32 = KReg_reg[0,32]; } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56176 :KMOVW KReg_reg, Rmr32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x92; mod=3 & Rmr32 &KReg_reg { KReg_reg = zext(Rmr32[0,16]); } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56178 :KMOVB KReg_reg, Rmr32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x92; mod=3 & Rmr32 & KReg_reg { KReg_reg = zext(Rmr32[0,8]); } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56180 @ifdef IA64 :KMOVQ KReg_reg, Rmr64 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x92; mod=3 & Rmr64 & KReg_reg { KReg_reg = zext(Rmr64); } @endif # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56182 :KMOVD KReg_reg, Rmr32 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x92; mod=3 & Rmr32 & KReg_reg { KReg_reg = zext(Rmr32); } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56184 :KMOVW Reg32, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x93; Reg32 & KReg_rm { Reg32 = zext(KReg_rm[0,16]); } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56186 :KMOVB Reg32, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x93; Reg32 & KReg_rm { Reg32 = zext(KReg_rm[0,8]); } # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56188 @ifdef IA64 :KMOVQ Reg64, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W1); byte=0x93; Reg64 & KReg_rm { Reg64 = KReg_rm[0,64]; } @endif # KMOVW/KMOVB/KMOVQ/KMOVD 3-499 PAGE 1069 LINE 56190 :KMOVD Reg32, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_W0); byte=0x93; Reg32 & KReg_rm { Reg32 = KReg_rm[0,32]; } # KNOTW/KNOTB/KNOTQ/KNOTD 3-501 PAGE 1071 LINE 56266 :KNOTW KReg_reg, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x44; KReg_reg & KReg_rm { KReg_reg = zext(~KReg_rm[0,16]); } # KNOTW/KNOTB/KNOTQ/KNOTD 3-501 PAGE 1071 LINE 56268 :KNOTB KReg_reg, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x44; KReg_reg & KReg_rm { KReg_reg = zext(~KReg_rm[0,8]); } # KNOTW/KNOTB/KNOTQ/KNOTD 3-501 PAGE 1071 LINE 56270 :KNOTQ KReg_reg, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1); byte=0x44; KReg_reg & KReg_rm { KReg_reg = zext(~KReg_rm[0,64]); } # KNOTW/KNOTB/KNOTQ/KNOTD 3-501 PAGE 1071 LINE 56272 :KNOTD KReg_reg, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x44; KReg_reg & KReg_rm { KReg_reg = zext(~KReg_rm[0,32]); } # KORW/KORB/KORQ/KORD 3-502 PAGE 1072 LINE 56325 :KORW KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x45; KReg_reg & KReg_rm { local tmp:2 = vex1VVV_KReg[0,16] | KReg_rm[0,16]; KReg_reg = zext(tmp); } # KORW/KORB/KORQ/KORD 3-502 PAGE 1072 LINE 56327 :KORB KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x45; KReg_reg & KReg_rm { local tmp:1 = vex1VVV_KReg[0,8] | KReg_rm[0,8]; KReg_reg = zext(tmp); } # KORW/KORB/KORQ/KORD 3-502 PAGE 1072 LINE 56329 :KORQ KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x45; KReg_reg & KReg_rm { local tmp:8 = vex1VVV_KReg[0,64] | KReg_rm[0,64]; KReg_reg = zext(tmp); } # KORW/KORB/KORQ/KORD 3-502 PAGE 1072 LINE 56331 :KORD KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x45; KReg_reg & KReg_rm { local tmp:4 = vex1VVV_KReg[0,32] | KReg_rm[0,32]; KReg_reg = zext(tmp); } # KORTESTW/KORTESTB/KORTESTQ/KORTESTD 3-503 PAGE 1073 LINE 56385 :KORTESTW KReg_reg, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x98; KReg_reg & KReg_rm { local tmp:2 = KReg_reg[0,16] | KReg_rm[0,16]; ZF = (tmp == 0); CF = (tmp == 0xffff); } # KORTESTW/KORTESTB/KORTESTQ/KORTESTD 3-503 PAGE 1073 LINE 56387 :KORTESTB KReg_reg, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x98; KReg_reg & KReg_rm { local tmp:1 = KReg_reg[0,8] | KReg_rm[0,8]; ZF = (tmp == 0); CF = (tmp == 0xff); } # KORTESTW/KORTESTB/KORTESTQ/KORTESTD 3-503 PAGE 1073 LINE 56389 :KORTESTQ KReg_reg, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1); byte=0x98; KReg_reg & KReg_rm { local tmp:8 = KReg_reg[0,64] | KReg_rm[0,64]; ZF = (tmp == 0); CF = (tmp == 0xffffffffffffffff); } # KORTESTW/KORTESTB/KORTESTQ/KORTESTD 3-503 PAGE 1073 LINE 56391 :KORTESTD KReg_reg, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x98; KReg_reg & KReg_rm { local tmp:4 = KReg_reg[0,32] | KReg_rm[0,32]; ZF = (tmp == 0); CF = (tmp == 0xffffffff); } # KSHIFTLW/KSHIFTLB/KSHIFTLQ/KSHIFTLD 3-505 PAGE 1075 LINE 56481 :KSHIFTLW KReg_reg, KReg_rm, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1); byte=0x32; KReg_reg & KReg_rm; imm8 { local tmp:2 = KReg_rm[0,16] << imm8:1; KReg_reg = zext(tmp); } # KSHIFTLW/KSHIFTLB/KSHIFTLQ/KSHIFTLD 3-505 PAGE 1075 LINE 56483 :KSHIFTLB KReg_reg, KReg_rm, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x32; KReg_reg & KReg_rm; imm8 { local tmp:1 = KReg_rm[0,8] << imm8:1; KReg_reg = zext(tmp); } # KSHIFTLW/KSHIFTLB/KSHIFTLQ/KSHIFTLD 3-505 PAGE 1075 LINE 56485 :KSHIFTLQ KReg_reg, KReg_rm, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1); byte=0x33; KReg_reg & KReg_rm; imm8 { local tmp:8 = KReg_rm[0,64] << imm8:1; KReg_reg = zext(tmp); } # KSHIFTLW/KSHIFTLB/KSHIFTLQ/KSHIFTLD 3-505 PAGE 1075 LINE 56487 :KSHIFTLD KReg_reg, KReg_rm, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x33; KReg_reg & KReg_rm; imm8 { local tmp:4 = KReg_reg[0,32] << imm8:1; KReg_reg = zext(tmp); } # KSHIFTRW/KSHIFTRB/KSHIFTRQ/KSHIFTRD 3-507 PAGE 1077 LINE 56562 :KSHIFTRW KReg_reg, KReg_rm, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1); byte=0x30; KReg_reg & KReg_rm; imm8 { local tmp:2 = KReg_rm[0,16] >> imm8:1; KReg_reg = zext(tmp); } # KSHIFTRW/KSHIFTRB/KSHIFTRQ/KSHIFTRD 3-507 PAGE 1077 LINE 56564 :KSHIFTRB KReg_reg, KReg_rm, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x30; KReg_reg & KReg_rm; imm8 { local tmp:1 = KReg_rm[0,8] >> imm8:1; KReg_reg = zext(tmp); } # KSHIFTRW/KSHIFTRB/KSHIFTRQ/KSHIFTRD 3-507 PAGE 1077 LINE 56566 :KSHIFTRQ KReg_reg, KReg_rm, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W1); byte=0x31; KReg_reg & KReg_rm; imm8 { local tmp:8 = KReg_rm[0,64] >> imm8:1; KReg_reg = zext(tmp); } # KSHIFTRW/KSHIFTRB/KSHIFTRQ/KSHIFTRD 3-507 PAGE 1077 LINE 56568 :KSHIFTRD KReg_reg, KReg_rm, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0); byte=0x31; KReg_reg & KReg_rm; imm8 { local tmp:4 = KReg_rm[0,32] >> imm8:1; KReg_reg = zext(tmp); } # KTESTW/KTESTB/KTESTQ/KTESTD 3-509 PAGE 1079 LINE 56643 :KTESTW KReg_reg, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0); byte=0x99; KReg_reg & KReg_rm { local tmp:2 = KReg_reg[0,16] & KReg_rm[0,16]; ZF = (tmp == 0); tmp = KReg_reg[0,16] & ~KReg_rm[0,16]; CF = (tmp == 0); AF = 0; OF = 0; PF = 0; SF = 0; } # KTESTW/KTESTB/KTESTQ/KTESTD 3-509 PAGE 1079 LINE 56645 :KTESTB KReg_reg, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0); byte=0x99; KReg_reg & KReg_rm { local tmp:1 = KReg_reg[0,8] & KReg_rm[0,8]; ZF = (tmp == 0); tmp = KReg_reg[0,8] & ~KReg_rm[0,8]; CF = (tmp == 0); AF = 0; OF = 0; PF = 0; SF = 0; } # KTESTW/KTESTB/KTESTQ/KTESTD 3-509 PAGE 1079 LINE 56647 :KTESTQ KReg_reg, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1); byte=0x99; KReg_reg & KReg_rm { local tmp:8 = KReg_reg[0,64] & KReg_rm[0,64]; ZF = (tmp == 0); tmp = KReg_reg[0,64] & ~KReg_rm[0,64]; CF = (tmp == 0); AF = 0; OF = 0; PF = 0; SF = 0; } # KTESTW/KTESTB/KTESTQ/KTESTD 3-509 PAGE 1079 LINE 56649 :KTESTD KReg_reg, KReg_rm is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1); byte=0x99; KReg_reg & KReg_rm { local tmp:4 = KReg_reg[0,32] & KReg_rm[0,32]; ZF = (tmp == 0); tmp = KReg_reg[0,32] & ~KReg_rm[0,32]; CF = (tmp == 0); AF = 0; OF = 0; PF = 0; SF = 0; } # KUNPCKBW/KUNPCKWD/KUNPCKDQ 3-511 PAGE 1081 LINE 56747 :KUNPCKBW KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x4B; KReg_reg & KReg_rm { local src1:1 = vex1VVV_KReg[0,8]; local src2:1 = KReg_rm[0,8]; KReg_reg = 0; KReg_reg[0,8] = src2; KReg_reg[8,8] = src1; } # KUNPCKBW/KUNPCKWD/KUNPCKDQ 3-511 PAGE 1081 LINE 56749 :KUNPCKWD KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x4B; KReg_reg & KReg_rm { local src1:2 = vex1VVV_KReg[0,16]; local src2:2 = KReg_rm[0,16]; KReg_reg = 0; KReg_reg[0,16] = src2; KReg_reg[16,16] = src1; } # KUNPCKBW/KUNPCKWD/KUNPCKDQ 3-511 PAGE 1081 LINE 56751 :KUNPCKDQ KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x4B; KReg_reg & KReg_rm { local src1:4 = vex1VVV_KReg[0,32]; local src2:4 = KReg_rm[0,32]; KReg_reg = 0; KReg_reg[0,32] = src2; KReg_reg[32,32] = src1; } # KXNORW/KXNORB/KXNORQ/KXNORD 3-512 PAGE 1082 LINE 56806 :KXNORW KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x46; KReg_reg & KReg_rm { local tmp:2 = ~(vex1VVV_KReg[0,16] ^ KReg_rm[0,16]); KReg_reg = zext(tmp); } # KXNORW/KXNORB/KXNORQ/KXNORD 3-512 PAGE 1082 LINE 56808 :KXNORB KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x46; KReg_reg & KReg_rm { local tmp:1 = ~(vex1VVV_KReg[0,8] ^ KReg_rm[0,8]); KReg_reg = zext(tmp); } # KXNORW/KXNORB/KXNORQ/KXNORD 3-512 PAGE 1082 LINE 56810 :KXNORQ KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x46; KReg_reg & KReg_rm { local tmp:8 = ~(vex1VVV_KReg[0,64] ^ KReg_rm[0,64]); KReg_reg = zext(tmp); } # KXNORW/KXNORB/KXNORQ/KXNORD 3-512 PAGE 1082 LINE 56812 :KXNORD KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x46; KReg_reg & KReg_rm { local tmp:4 = ~(vex1VVV_KReg[0,32] ^ KReg_rm[0,32]); KReg_reg = zext(tmp); } # KXORW/KXORB/KXORQ/KXORD 3-513 PAGE 1083 LINE 56866 :KXORW KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NDS) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x47; KReg_reg & KReg_rm { local tmp:2 = vex1VVV_KReg[0,16] ^ KReg_rm[0,16]; KReg_reg = zext(tmp); } # KXORW/KXORB/KXORQ/KXORD 3-513 PAGE 1083 LINE 56868 :KXORB KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W0) & vex1VVV_KReg; byte=0x47; KReg_reg & KReg_rm { local tmp:1 = vex1VVV_KReg[0,8] ^ KReg_rm[0,8]; KReg_reg = zext(tmp); } # KXORW/KXORB/KXORQ/KXORD 3-513 PAGE 1083 LINE 56870 :KXORQ KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x47; KReg_reg & KReg_rm { local tmp:8 = vex1VVV_KReg[0,64] ^ KReg_rm[0,64]; KReg_reg = zext(tmp); } # KXORW/KXORB/KXORQ/KXORD 3-513 PAGE 1083 LINE 56872 :KXORD KReg_reg, vex1VVV_KReg, KReg_rm is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_W1) & vex1VVV_KReg; byte=0x47; KReg_reg & KReg_rm { local tmp:4 = vex1VVV_KReg[0,32] ^ KReg_rm[0,32]; KReg_reg = zext(tmp); } # VCVTPS2PH 5-37 PAGE 1861 LINE 96116 define pcodeop vcvtps2ph_avx512vl ; :VCVTPS2PH XmmReg2^XmmOpMask, XmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask; byte=0x1D; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2; imm8 { XmmResult = vcvtps2ph_avx512vl( XmmReg1, imm8:1 ); XmmMask = XmmReg2; build XmmOpMask; XmmResult[0,16] = (zext(XmmOpMask[0,1]) * XmmResult[0,16]) + (zext(!XmmOpMask[0,1]) * XmmMask[0,16]); XmmResult[16,16] = (zext(XmmOpMask[1,1]) * XmmResult[16,16]) + (zext(!XmmOpMask[1,1]) * XmmMask[16,16]); XmmResult[32,16] = (zext(XmmOpMask[2,1]) * XmmResult[32,16]) + (zext(!XmmOpMask[2,1]) * XmmMask[32,16]); XmmResult[48,16] = (zext(XmmOpMask[3,1]) * XmmResult[48,16]) + (zext(!XmmOpMask[3,1]) * XmmMask[48,16]); ZmmReg2 = zext(XmmResult[0,64]); } :VCVTPS2PH m64^XmmOpMask, XmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask; byte=0x1D; XmmReg1 ... & m64; imm8 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vcvtps2ph_avx512vl( XmmReg1, imm8:1 ); XmmMask = zext(m64); build XmmOpMask; XmmResult[0,16] = (zext(XmmOpMask[0,1]) * XmmResult[0,16]) + (zext(!XmmOpMask[0,1]) * XmmMask[0,16]); XmmResult[16,16] = (zext(XmmOpMask[1,1]) * XmmResult[16,16]) + (zext(!XmmOpMask[1,1]) * XmmMask[16,16]); XmmResult[32,16] = (zext(XmmOpMask[2,1]) * XmmResult[32,16]) + (zext(!XmmOpMask[2,1]) * XmmMask[32,16]); XmmResult[48,16] = (zext(XmmOpMask[3,1]) * XmmResult[48,16]) + (zext(!XmmOpMask[3,1]) * XmmMask[48,16]); m64 = XmmResult[0,64]; } # VCVTPS2PH 5-37 PAGE 1861 LINE 96119 :VCVTPS2PH XmmReg2^XmmOpMask, YmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask; byte=0x1D; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2; imm8 { XmmResult = vcvtps2ph_avx512vl( YmmReg1, imm8:1 ); XmmMask = XmmReg2; build XmmOpMask; XmmResult[0,16] = (zext(XmmOpMask[0,1]) * XmmResult[0,16]) + (zext(!XmmOpMask[0,1]) * XmmMask[0,16]); XmmResult[16,16] = (zext(XmmOpMask[1,1]) * XmmResult[16,16]) + (zext(!XmmOpMask[1,1]) * XmmMask[16,16]); XmmResult[32,16] = (zext(XmmOpMask[2,1]) * XmmResult[32,16]) + (zext(!XmmOpMask[2,1]) * XmmMask[32,16]); XmmResult[48,16] = (zext(XmmOpMask[3,1]) * XmmResult[48,16]) + (zext(!XmmOpMask[3,1]) * XmmMask[48,16]); XmmResult[64,16] = (zext(XmmOpMask[4,1]) * XmmResult[64,16]) + (zext(!XmmOpMask[4,1]) * XmmMask[64,16]); XmmResult[80,16] = (zext(XmmOpMask[5,1]) * XmmResult[80,16]) + (zext(!XmmOpMask[5,1]) * XmmMask[80,16]); XmmResult[96,16] = (zext(XmmOpMask[6,1]) * XmmResult[96,16]) + (zext(!XmmOpMask[6,1]) * XmmMask[96,16]); XmmResult[112,16] = (zext(XmmOpMask[7,1]) * XmmResult[112,16]) + (zext(!XmmOpMask[7,1]) * XmmMask[112,16]); ZmmReg2 = zext(XmmResult[0,64]); } # VCVTPS2PH 5-37 PAGE 1861 LINE 96119 :VCVTPS2PH m128^XmmOpMask, YmmReg1, imm8 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & XmmOpMask; byte=0x1D; YmmReg1 ... & m128; imm8 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vcvtps2ph_avx512vl( YmmReg1, imm8:1 ); XmmMask = m128; build XmmOpMask; XmmResult[0,16] = (zext(XmmOpMask[0,1]) * XmmResult[0,16]) + (zext(!XmmOpMask[0,1]) * XmmMask[0,16]); XmmResult[16,16] = (zext(XmmOpMask[1,1]) * XmmResult[16,16]) + (zext(!XmmOpMask[1,1]) * XmmMask[16,16]); XmmResult[32,16] = (zext(XmmOpMask[2,1]) * XmmResult[32,16]) + (zext(!XmmOpMask[2,1]) * XmmMask[32,16]); XmmResult[48,16] = (zext(XmmOpMask[3,1]) * XmmResult[48,16]) + (zext(!XmmOpMask[3,1]) * XmmMask[48,16]); XmmResult[64,16] = (zext(XmmOpMask[4,1]) * XmmResult[64,16]) + (zext(!XmmOpMask[4,1]) * XmmMask[64,16]); XmmResult[80,16] = (zext(XmmOpMask[5,1]) * XmmResult[80,16]) + (zext(!XmmOpMask[5,1]) * XmmMask[80,16]); XmmResult[96,16] = (zext(XmmOpMask[6,1]) * XmmResult[96,16]) + (zext(!XmmOpMask[6,1]) * XmmMask[96,16]); XmmResult[112,16] = (zext(XmmOpMask[7,1]) * XmmResult[112,16]) + (zext(!XmmOpMask[7,1]) * XmmMask[112,16]); m128 = XmmResult; } # VCVTPS2PH 5-37 PAGE 1861 LINE 96122 define pcodeop vcvtps2ph_avx512f ; :VCVTPS2PH YmmReg2^YmmOpMask, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask; byte=0x1D; mod=3 & ZmmReg1 & YmmReg2 & ZmmReg2; imm8 { YmmResult = vcvtps2ph_avx512f( ZmmReg1, imm8:1 ); YmmMask = YmmReg2; build YmmOpMask; YmmResult[0,16] = (zext(YmmOpMask[0,1]) * YmmResult[0,16]) + (zext(!YmmOpMask[0,1]) * YmmMask[0,16]); YmmResult[16,16] = (zext(YmmOpMask[1,1]) * YmmResult[16,16]) + (zext(!YmmOpMask[1,1]) * YmmMask[16,16]); YmmResult[32,16] = (zext(YmmOpMask[2,1]) * YmmResult[32,16]) + (zext(!YmmOpMask[2,1]) * YmmMask[32,16]); YmmResult[48,16] = (zext(YmmOpMask[3,1]) * YmmResult[48,16]) + (zext(!YmmOpMask[3,1]) * YmmMask[48,16]); YmmResult[64,16] = (zext(YmmOpMask[4,1]) * YmmResult[64,16]) + (zext(!YmmOpMask[4,1]) * YmmMask[64,16]); YmmResult[80,16] = (zext(YmmOpMask[5,1]) * YmmResult[80,16]) + (zext(!YmmOpMask[5,1]) * YmmMask[80,16]); YmmResult[96,16] = (zext(YmmOpMask[6,1]) * YmmResult[96,16]) + (zext(!YmmOpMask[6,1]) * YmmMask[96,16]); YmmResult[112,16] = (zext(YmmOpMask[7,1]) * YmmResult[112,16]) + (zext(!YmmOpMask[7,1]) * YmmMask[112,16]); YmmResult[128,16] = (zext(YmmOpMask[8,1]) * YmmResult[128,16]) + (zext(!YmmOpMask[8,1]) * YmmMask[128,16]); YmmResult[144,16] = (zext(YmmOpMask[9,1]) * YmmResult[144,16]) + (zext(!YmmOpMask[9,1]) * YmmMask[144,16]); YmmResult[160,16] = (zext(YmmOpMask[10,1]) * YmmResult[160,16]) + (zext(!YmmOpMask[10,1]) * YmmMask[160,16]); YmmResult[176,16] = (zext(YmmOpMask[11,1]) * YmmResult[176,16]) + (zext(!YmmOpMask[11,1]) * YmmMask[176,16]); YmmResult[192,16] = (zext(YmmOpMask[12,1]) * YmmResult[192,16]) + (zext(!YmmOpMask[12,1]) * YmmMask[192,16]); YmmResult[208,16] = (zext(YmmOpMask[13,1]) * YmmResult[208,16]) + (zext(!YmmOpMask[13,1]) * YmmMask[208,16]); YmmResult[224,16] = (zext(YmmOpMask[14,1]) * YmmResult[224,16]) + (zext(!YmmOpMask[14,1]) * YmmMask[224,16]); YmmResult[240,16] = (zext(YmmOpMask[15,1]) * YmmResult[240,16]) + (zext(!YmmOpMask[15,1]) * YmmMask[240,16]); ZmmReg2 = zext(YmmResult); } :VCVTPS2PH m256^YmmOpMask, ZmmReg1, imm8 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_W0) & YmmOpMask; byte=0x1D; ZmmReg1 ... & m256; imm8 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { YmmResult = vcvtps2ph_avx512f( ZmmReg1, imm8:1 ); YmmMask = m256; build YmmOpMask; YmmResult[0,16] = (zext(YmmOpMask[0,1]) * YmmResult[0,16]) + (zext(!YmmOpMask[0,1]) * YmmMask[0,16]); YmmResult[16,16] = (zext(YmmOpMask[1,1]) * YmmResult[16,16]) + (zext(!YmmOpMask[1,1]) * YmmMask[16,16]); YmmResult[32,16] = (zext(YmmOpMask[2,1]) * YmmResult[32,16]) + (zext(!YmmOpMask[2,1]) * YmmMask[32,16]); YmmResult[48,16] = (zext(YmmOpMask[3,1]) * YmmResult[48,16]) + (zext(!YmmOpMask[3,1]) * YmmMask[48,16]); YmmResult[64,16] = (zext(YmmOpMask[4,1]) * YmmResult[64,16]) + (zext(!YmmOpMask[4,1]) * YmmMask[64,16]); YmmResult[80,16] = (zext(YmmOpMask[5,1]) * YmmResult[80,16]) + (zext(!YmmOpMask[5,1]) * YmmMask[80,16]); YmmResult[96,16] = (zext(YmmOpMask[6,1]) * YmmResult[96,16]) + (zext(!YmmOpMask[6,1]) * YmmMask[96,16]); YmmResult[112,16] = (zext(YmmOpMask[7,1]) * YmmResult[112,16]) + (zext(!YmmOpMask[7,1]) * YmmMask[112,16]); YmmResult[128,16] = (zext(YmmOpMask[8,1]) * YmmResult[128,16]) + (zext(!YmmOpMask[8,1]) * YmmMask[128,16]); YmmResult[144,16] = (zext(YmmOpMask[9,1]) * YmmResult[144,16]) + (zext(!YmmOpMask[9,1]) * YmmMask[144,16]); YmmResult[160,16] = (zext(YmmOpMask[10,1]) * YmmResult[160,16]) + (zext(!YmmOpMask[10,1]) * YmmMask[160,16]); YmmResult[176,16] = (zext(YmmOpMask[11,1]) * YmmResult[176,16]) + (zext(!YmmOpMask[11,1]) * YmmMask[176,16]); YmmResult[192,16] = (zext(YmmOpMask[12,1]) * YmmResult[192,16]) + (zext(!YmmOpMask[12,1]) * YmmMask[192,16]); YmmResult[208,16] = (zext(YmmOpMask[13,1]) * YmmResult[208,16]) + (zext(!YmmOpMask[13,1]) * YmmMask[208,16]); YmmResult[224,16] = (zext(YmmOpMask[14,1]) * YmmResult[224,16]) + (zext(!YmmOpMask[14,1]) * YmmMask[224,16]); YmmResult[240,16] = (zext(YmmOpMask[15,1]) * YmmResult[240,16]) + (zext(!YmmOpMask[15,1]) * YmmMask[240,16]); m256 = YmmResult; } # VPMOVDB/VPMOVSDB/VPMOVUSDB 5-418 PAGE 2242 LINE 115319 define pcodeop vpmovdb_avx512vl ; :VPMOVDB XmmReg2^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x31; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovdb_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,32]); } :VPMOVDB m32^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x31; XmmReg1 ... & m32 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovdb_avx512vl( XmmReg1 ); XmmMask = zext(m32); build XmmOpMask8; m32 = XmmResult[0,32]; } # VPMOVDB/VPMOVSDB/VPMOVUSDB 5-418 PAGE 2242 LINE 115322 define pcodeop vpmovsdb_avx512vl ; :VPMOVSDB XmmReg2^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x21; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsdb_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,32]); } :VPMOVSDB m32^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x21; XmmReg1 ... & m32 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovsdb_avx512vl( XmmReg1 ); XmmMask = zext(m32); build XmmOpMask8; m32 = XmmResult[0,32]; } # VPMOVDB/VPMOVSDB/VPMOVUSDB 5-418 PAGE 2242 LINE 115326 define pcodeop vpmovusdb_avx512vl ; :VPMOVUSDB XmmReg2^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x11; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusdb_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,32]); } :VPMOVUSDB m32^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x11; XmmReg1 ... & m32 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovusdb_avx512vl( XmmReg1 ); XmmMask = zext(m32); build XmmOpMask8; m32 = XmmResult[0,32]; } # VPMOVDB/VPMOVSDB/VPMOVUSDB 5-418 PAGE 2242 LINE 115330 :VPMOVDB XmmReg2^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x31; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovdb_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVDB m64^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x31; YmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovdb_avx512vl( YmmReg1 ); XmmMask = zext(m64); build XmmOpMask8; m64 = XmmResult[0,64]; } # VPMOVDB/VPMOVSDB/VPMOVUSDB 5-418 PAGE 2242 LINE 115333 :VPMOVSDB XmmReg2^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x21; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsdb_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVSDB m64^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x21; YmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovsdb_avx512vl( YmmReg1 ); XmmMask = zext(m64); build XmmOpMask8; m64 = XmmResult[0,64]; } # VPMOVDB/VPMOVSDB/VPMOVUSDB 5-418 PAGE 2242 LINE 115337 :VPMOVUSDB XmmReg2^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x11; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusdb_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVUSDB m64^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x11; YmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovusdb_avx512vl( YmmReg1 ); XmmMask = zext(m64); build XmmOpMask8; m64 = XmmResult[0,64]; } # VPMOVDB/VPMOVSDB/VPMOVUSDB 5-418 PAGE 2242 LINE 115341 define pcodeop vpmovdb_avx512f ; :VPMOVDB XmmReg2^XmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x31; mod=3 & ZmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovdb_avx512f( ZmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult); } :VPMOVDB m128^XmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x31; ZmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovdb_avx512f( ZmmReg1 ); XmmMask = m128; build XmmOpMask8; m128 = XmmResult; } # VPMOVDB/VPMOVSDB/VPMOVUSDB 5-418 PAGE 2242 LINE 115344 define pcodeop vpmovsdb_avx512f ; :VPMOVSDB XmmReg2^XmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x21; mod=3 & ZmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsdb_avx512f( ZmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult); } :VPMOVSDB m128^XmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x21; ZmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovsdb_avx512f( ZmmReg1 ); XmmMask = m128; build XmmOpMask8; m128 = XmmResult; } # VPMOVDB/VPMOVSDB/VPMOVUSDB 5-418 PAGE 2242 LINE 115348 define pcodeop vpmovusdb_avx512f ; :VPMOVUSDB XmmReg2^XmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x11; mod=3 & ZmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusdb_avx512f( ZmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult); } :VPMOVUSDB m128^XmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x11; ZmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovusdb_avx512f( ZmmReg1 ); XmmMask = m128; build XmmOpMask8; m128 = XmmResult; } # VPMOVDW/VPMOVSDW/VPMOVUSDW 5-422 PAGE 2246 LINE 115532 define pcodeop vpmovdw_avx512vl ; :VPMOVDW XmmReg2^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x33; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovdw_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVDW m64^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x33; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovdw_avx512vl( XmmReg1 ); XmmMask = zext(m64); build XmmOpMask16; m64 = XmmResult[0,64]; } # VPMOVDW/VPMOVSDW/VPMOVUSDW 5-422 PAGE 2246 LINE 115535 define pcodeop vpmovsdw_avx512vl ; :VPMOVSDW XmmReg2^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x23; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsdw_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVSDW m64^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x23; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovsdw_avx512vl( XmmReg1 ); XmmMask = zext(m64); build XmmOpMask16; m64 = XmmResult[0,64]; } # VPMOVDW/VPMOVSDW/VPMOVUSDW 5-422 PAGE 2246 LINE 115539 define pcodeop vpmovusdw_avx512vl ; :VPMOVUSDW XmmReg2^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x13; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusdw_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVUSDW m64^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x13; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovusdw_avx512vl( XmmReg1 ); XmmMask = zext(m64); build XmmOpMask16; m64 = XmmResult[0,64]; } # VPMOVDW/VPMOVSDW/VPMOVUSDW 5-422 PAGE 2246 LINE 115543 :VPMOVDW XmmReg2^XmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x33; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovdw_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVDW m128^XmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x33; YmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovdw_avx512vl( YmmReg1 ); XmmMask = m128; build XmmOpMask16; m128 = XmmResult; } # VPMOVDW/VPMOVSDW/VPMOVUSDW 5-422 PAGE 2246 LINE 115546 :VPMOVSDW XmmReg2^XmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x23; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsdw_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult); } :VPMOVSDW m128^XmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x23; YmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovsdw_avx512vl( YmmReg1 ); XmmMask = m128; build XmmOpMask16; m128 = XmmResult; } # VPMOVDW/VPMOVSDW/VPMOVUSDW 5-422 PAGE 2246 LINE 115550 :VPMOVUSDW XmmReg2^XmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x13; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusdw_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult); } :VPMOVUSDW m128^XmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x13; YmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovusdw_avx512vl( YmmReg1 ); XmmMask = m128; build XmmOpMask16; m128 = XmmResult; } # VPMOVDW/VPMOVSDW/VPMOVUSDW 5-422 PAGE 2246 LINE 115554 define pcodeop vpmovdw_avx512f ; :VPMOVDW YmmReg2^YmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask16; byte=0x33; mod=3 & ZmmReg1 & YmmReg2 & ZmmReg2 { YmmResult = vpmovdw_avx512f( ZmmReg1 ); YmmMask = YmmReg2; build YmmOpMask16; ZmmReg2 = zext(YmmResult); } :VPMOVDW m256^YmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask16; byte=0x33; ZmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { YmmResult = vpmovdw_avx512f( ZmmReg1 ); YmmMask = m256; build YmmOpMask16; m256 = zext(YmmResult); } # VPMOVDW/VPMOVSDW/VPMOVUSDW 5-422 PAGE 2246 LINE 115557 define pcodeop vpmovsdw_avx512f ; :VPMOVSDW YmmReg2^YmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask16; byte=0x23; mod=3 & ZmmReg1 & YmmReg2 & ZmmReg2 { YmmResult = vpmovsdw_avx512f( ZmmReg1 ); YmmMask = YmmReg2; build YmmOpMask16; ZmmReg2 = zext(YmmResult); } :VPMOVSDW m256^YmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask16; byte=0x23; ZmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { YmmResult = vpmovsdw_avx512f( ZmmReg1 ); YmmMask = m256; build YmmOpMask16; m256 = zext(YmmResult); } # VPMOVDW/VPMOVSDW/VPMOVUSDW 5-422 PAGE 2246 LINE 115561 define pcodeop vpmovusdw_avx512f ; :VPMOVUSDW YmmReg2^YmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask16; byte=0x13; mod=3 & ZmmReg1 & YmmReg2 & ZmmReg2 { YmmResult = vpmovusdw_avx512f( ZmmReg1 ); YmmMask = YmmReg2; build YmmOpMask16; ZmmReg2 = zext(YmmResult); } :VPMOVUSDW m256^YmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask16; byte=0x13; ZmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { YmmResult = vpmovusdw_avx512f( ZmmReg1 ); YmmMask = m256; build YmmOpMask16; m256 = zext(YmmResult); } # VPMOVQB/VPMOVSQB/VPMOVUSQB 5-406 PAGE 2230 LINE 114671 define pcodeop vpmovqb_avx512vl ; :VPMOVQB XmmReg2^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x32; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovqb_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,16]); } :VPMOVQB m16^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x32; XmmReg1 ... & m16 [ evexD8Type = 1; evexTType = 11; ] # (TupleType OVM) { XmmResult = vpmovqb_avx512vl( XmmReg1 ); XmmMask = zext(m16); build XmmOpMask8; m16 = XmmResult[0,16]; } # VPMOVQB/VPMOVSQB/VPMOVUSQB 5-406 PAGE 2230 LINE 114674 define pcodeop vpmovsqb_avx512vl ; :VPMOVSQB XmmReg2^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x22; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsqb_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,16]); } :VPMOVSQB m16^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x22; XmmReg1 ... & m16 [ evexD8Type = 1; evexTType = 11; ] # (TupleType OVM) { XmmResult = vpmovsqb_avx512vl( XmmReg1 ); XmmMask = zext(m16); build XmmOpMask8; m16 = zext(XmmResult[0,16]); } # VPMOVQB/VPMOVSQB/VPMOVUSQB 5-406 PAGE 2230 LINE 114678 define pcodeop vpmovusqb_avx512vl ; :VPMOVUSQB XmmReg2^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x12; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusqb_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,16]); } :VPMOVUSQB m16^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x12; XmmReg1 ... & m16 [ evexD8Type = 1; evexTType = 11; ] # (TupleType OVM) { XmmResult = vpmovusqb_avx512vl( XmmReg1 ); XmmMask = zext(m16); build XmmOpMask8; m16 = zext(XmmResult[0,16]); } # VPMOVQB/VPMOVSQB/VPMOVUSQB 5-406 PAGE 2230 LINE 114682 :VPMOVQB XmmReg2^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x32; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovqb_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,32]); } :VPMOVQB m32^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x32; YmmReg1 ... & m32 [ evexD8Type = 1; evexTType = 11; ] # (TupleType OVM) { XmmResult = vpmovqb_avx512vl( YmmReg1 ); XmmMask = zext(m32); build XmmOpMask8; m32 = XmmResult[0,32]; } # VPMOVQB/VPMOVSQB/VPMOVUSQB 5-406 PAGE 2230 LINE 114685 :VPMOVSQB XmmReg2^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x22; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsqb_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,32]); } :VPMOVSQB m32^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x22; YmmReg1 ... & m32 [ evexD8Type = 1; evexTType = 11; ] # (TupleType OVM) { XmmResult = vpmovsqb_avx512vl( YmmReg1 ); XmmMask = zext(m32); build XmmOpMask8; m32 = XmmResult[0,32]; } # VPMOVQB/VPMOVSQB/VPMOVUSQB 5-406 PAGE 2230 LINE 114689 :VPMOVUSQB XmmReg2^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x12; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusqb_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,32]); } :VPMOVUSQB m32^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x12; YmmReg1 ... & m32 [ evexD8Type = 1; evexTType = 11; ] # (TupleType OVM) { XmmResult = vpmovusqb_avx512vl( YmmReg1 ); XmmMask = zext(m32); build XmmOpMask8; m32 = XmmResult[0,32]; } # VPMOVQB/VPMOVSQB/VPMOVUSQB 5-406 PAGE 2230 LINE 114693 define pcodeop vpmovqb_avx512f ; :VPMOVQB XmmReg2^XmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x32; mod=3 & ZmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovqb_avx512f( ZmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVQB m64^XmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x32; ZmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 11; ] # (TupleType OVM) { XmmResult = vpmovqb_avx512f( ZmmReg1 ); XmmMask = zext(m64); build XmmOpMask8; m64 = XmmResult[0,64]; } # VPMOVQB/VPMOVSQB/VPMOVUSQB 5-406 PAGE 2230 LINE 114696 define pcodeop vpmovsqb_avx512f ; :VPMOVSQB XmmReg2^XmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x22; mod=3 & ZmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsqb_avx512f( ZmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVSQB m64^XmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x22; ZmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 11; ] # (TupleType OVM) { XmmResult = vpmovsqb_avx512f( ZmmReg1 ); XmmMask = zext(m64); build XmmOpMask8; m64 = XmmResult[0,64]; } # VPMOVQB/VPMOVSQB/VPMOVUSQB 5-406 PAGE 2230 LINE 114700 define pcodeop vpmovusqb_avx512f ; :VPMOVUSQB XmmReg2^XmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x12; mod=3 & ZmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusqb_avx512f( ZmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVUSQB m64^XmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x12; ZmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 11; ] # (TupleType OVM) { XmmResult = vpmovusqb_avx512f( ZmmReg1 ); XmmMask = zext(m64); build XmmOpMask8; m64 = XmmResult[0,64]; } # VPMOVQW/VPMOVSQW/VPMOVUSQW 5-410 PAGE 2234 LINE 114887 define pcodeop vpmovqw_avx512vl ; :VPMOVQW XmmReg2^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x34; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovqw_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult[0,32]); } :VPMOVQW m32^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x34; XmmReg1 ... & m32 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovqw_avx512vl( XmmReg1 ); XmmMask = zext(m32); build XmmOpMask16; m32 = zext(XmmResult[0,32]); } # VPMOVQW/VPMOVSQW/VPMOVUSQW 5-410 PAGE 2234 LINE 114890 define pcodeop vpmovsqw_avx512vl ; :VPMOVSQW XmmReg2^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x24; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsqw_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult[0,32]); } :VPMOVSQW m32^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x24; XmmReg1 ... & m32 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovsqw_avx512vl( XmmReg1 ); XmmMask = zext(m32); build XmmOpMask16; m32 = zext(XmmResult[0,32]); } # VPMOVQW/VPMOVSQW/VPMOVUSQW 5-410 PAGE 2234 LINE 114894 define pcodeop vpmovusqw_avx512vl ; :VPMOVUSQW XmmReg2^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x14; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusqw_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult[0,32]); } :VPMOVUSQW m32^XmmOpMask16, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x14; XmmReg1 ... & m32 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovusqw_avx512vl( XmmReg1 ); XmmMask = zext(m32); build XmmOpMask16; m32 = zext(XmmResult[0,32]); } # VPMOVQW/VPMOVSQW/VPMOVUSQW 5-410 PAGE 2234 LINE 114898 :VPMOVQW XmmReg2^XmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x34; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovqw_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVQW m64^XmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x34; YmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovqw_avx512vl( YmmReg1 ); XmmMask = zext(m64); build XmmOpMask16; m64 = zext(XmmResult[0,64]); } # VPMOVQW/VPMOVSQW/VPMOVUSQW 5-410 PAGE 2234 LINE 114901 :VPMOVSQW XmmReg2^XmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x24; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsqw_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVSQW m64^XmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x24; YmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovsqw_avx512vl( YmmReg1 ); XmmMask = zext(m64); build XmmOpMask16; m64 = zext(XmmResult[0,64]); } # VPMOVQW/VPMOVSQW/VPMOVUSQW 5-410 PAGE 2234 LINE 114905 :VPMOVUSQW XmmReg2^XmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x14; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusqw_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVUSQW m64^XmmOpMask16, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x14; YmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovusqw_avx512vl( YmmReg1 ); XmmMask = zext(m64); build XmmOpMask16; m64 = zext(XmmResult[0,64]); } # VPMOVQW/VPMOVSQW/VPMOVUSQW 5-410 PAGE 2234 LINE 114909 define pcodeop vpmovqw_avx512f ; :VPMOVQW XmmReg2^XmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x34; mod=3 & ZmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovqw_avx512f( ZmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult); } :VPMOVQW m128^XmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x34; ZmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovqw_avx512f( ZmmReg1 ); XmmMask = m128; build XmmOpMask16; m128 = XmmResult; } # VPMOVQW/VPMOVSQW/VPMOVUSQW 5-410 PAGE 2234 LINE 114912 define pcodeop vpmovsqw_avx512f ; :VPMOVSQW XmmReg2^XmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x24; mod=3 & ZmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsqw_avx512f( ZmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult); } :VPMOVSQW m128^XmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x24; ZmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovsqw_avx512f( ZmmReg1 ); XmmMask = m128; build XmmOpMask16; m128 = XmmResult; } # VPMOVQW/VPMOVSQW/VPMOVUSQW 5-410 PAGE 2234 LINE 114916 define pcodeop vpmovusqw_avx512f ; :VPMOVUSQW XmmReg2^XmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x14; mod=3 & ZmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusqw_avx512f( ZmmReg1 ); XmmMask = XmmReg2; build XmmOpMask16; ZmmReg2 = zext(XmmResult); } :VPMOVUSQW m128^XmmOpMask16, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask16; byte=0x14; ZmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 10; ] # (TupleType QVM) { XmmResult = vpmovusqw_avx512f( ZmmReg1 ); XmmMask = m128; build XmmOpMask16; m128 = XmmResult; } # VPMOVQD/VPMOVSQD/VPMOVUSQD 5-414 PAGE 2238 LINE 115104 define pcodeop vpmovqd_avx512vl ; :VPMOVQD XmmReg2^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x35; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovqd_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask32; ZmmReg2 = zext(XmmResult); } :VPMOVQD m128^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x35; XmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovqd_avx512vl( XmmReg1 ); XmmMask = m128; build XmmOpMask32; m128 = XmmResult; } # VPMOVQD/VPMOVSQD/VPMOVUSQD 5-414 PAGE 2238 LINE 115108 define pcodeop vpmovsqd_avx512vl ; :VPMOVSQD XmmReg2^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x25; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsqd_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask32; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVSQD m64^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x25; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovsqd_avx512vl( XmmReg1 ); XmmMask = zext(m64); build XmmOpMask32; m64 = XmmResult[0,64]; } # VPMOVQD/VPMOVSQD/VPMOVUSQD 5-414 PAGE 2238 LINE 115113 define pcodeop vpmovusqd_avx512vl ; :VPMOVUSQD XmmReg2^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x15; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusqd_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask32; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVUSQD m64^XmmOpMask32, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x15; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { m64 = vpmovusqd_avx512vl( XmmReg1 ); XmmMask = zext(m64); build XmmOpMask32; m64 = XmmResult[0,64]; } # VPMOVQD/VPMOVSQD/VPMOVUSQD 5-414 PAGE 2238 LINE 115118 :VPMOVQD XmmReg2^XmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x35; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovqd_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask32; ZmmReg2 = zext(XmmResult); } :VPMOVQD m128^XmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x35; YmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovqd_avx512vl( YmmReg1 ); XmmMask = m128; build XmmOpMask32; m128 = XmmResult; } # VPMOVQD/VPMOVSQD/VPMOVUSQD 5-414 PAGE 2238 LINE 115122 :VPMOVSQD XmmReg2^XmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x25; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovsqd_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask32; ZmmReg2 = zext(XmmResult); } :VPMOVSQD m128^XmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x25; YmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovsqd_avx512vl( YmmReg1 ); XmmMask = m128; build XmmOpMask32; m128 = XmmResult; } # VPMOVQD/VPMOVSQD/VPMOVUSQD 5-414 PAGE 2238 LINE 115127 :VPMOVUSQD XmmReg2^XmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x15; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovusqd_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask32; ZmmReg2 = zext(XmmResult); } :VPMOVUSQD m128^XmmOpMask32, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask32; byte=0x15; YmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovusqd_avx512vl( YmmReg1 ); XmmMask = m128; build XmmOpMask32; m128 = XmmResult; } # VPMOVQD/VPMOVSQD/VPMOVUSQD 5-414 PAGE 2238 LINE 115131 define pcodeop vpmovqd_avx512f ; :VPMOVQD YmmReg2^YmmOpMask32, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32; byte=0x35; mod=3 & ZmmReg1 & YmmReg2 & ZmmReg2 { YmmResult = vpmovqd_avx512f( ZmmReg1 ); YmmMask = YmmReg2; build YmmOpMask32; ZmmReg2 = zext(YmmResult); } :VPMOVQD m256^YmmOpMask32, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32; byte=0x35; ZmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { YmmResult = vpmovqd_avx512f( ZmmReg1 ); YmmMask = m256; build YmmOpMask32; m256 = zext(YmmResult); } # VPMOVQD/VPMOVSQD/VPMOVUSQD 5-414 PAGE 2238 LINE 115134 define pcodeop vpmovsqd_avx512f ; :VPMOVSQD YmmReg2^YmmOpMask32, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32; byte=0x25; mod=3 & ZmmReg1 & YmmReg2 & ZmmReg2 { YmmResult = vpmovsqd_avx512f( ZmmReg1 ); YmmMask = YmmReg2; build YmmOpMask32; ZmmReg2 = zext(YmmResult); } :VPMOVSQD m256^YmmOpMask32, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32; byte=0x25; ZmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { YmmResult = vpmovsqd_avx512f( ZmmReg1 ); YmmMask = m256; build YmmOpMask32; m256 = zext(YmmResult); } # VPMOVQD/VPMOVSQD/VPMOVUSQD 5-414 PAGE 2238 LINE 115138 define pcodeop vpmovusqd_avx512f ; :VPMOVUSQD YmmReg2^YmmOpMask32, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32; byte=0x15; mod=3 & ZmmReg1 & YmmReg2 & ZmmReg2 { YmmResult = vpmovusqd_avx512f( ZmmReg1 ); YmmMask = YmmReg2; build YmmOpMask32; ZmmReg2 = zext(YmmResult); } :VPMOVUSQD m256^YmmOpMask32, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask32; byte=0x15; ZmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { YmmResult = vpmovusqd_avx512f( ZmmReg1 ); YmmMask = m256; build YmmOpMask32; m256 = zext(YmmResult); } # VPMOVWB/VPMOVSWB/VPMOVUSWB 5-426 PAGE 2250 LINE 115748 define pcodeop vpmovwb_avx512vl ; :VPMOVWB XmmReg2^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x30; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovwb_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVWB m64^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x30; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovwb_avx512vl( XmmReg1 ); XmmMask = zext(m64); build XmmOpMask8; m64 = XmmResult[0,64]; } # VPMOVWB/VPMOVSWB/VPMOVUSWB 5-426 PAGE 2250 LINE 115751 define pcodeop vpmovswb_avx512vl ; :VPMOVSWB XmmReg2^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x20; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovswb_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVSWB m64^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x20; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovswb_avx512vl( XmmReg1 ); XmmMask = zext(m64); build XmmOpMask8; m64 = XmmResult[0,64]; } # VPMOVWB/VPMOVSWB/VPMOVUSWB 5-426 PAGE 2250 LINE 115754 define pcodeop vpmovuswb_avx512vl ; :VPMOVUSWB XmmReg2^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x10; mod=3 & XmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovuswb_avx512vl( XmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult[0,64]); } :VPMOVUSWB m64^XmmOpMask8, XmmReg1 is $(EVEX_NONE) & $(VEX_L128) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x10; XmmReg1 ... & m64 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovuswb_avx512vl( XmmReg1 ); XmmMask = zext(m64); build XmmOpMask8; m64 = XmmResult[0,64]; } # VPMOVWB/VPMOVSWB/VPMOVUSWB 5-426 PAGE 2250 LINE 115757 :VPMOVWB XmmReg2^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x30; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovwb_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult); } :VPMOVWB m128^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x30; YmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovwb_avx512vl( YmmReg1 ); XmmMask = m128; build XmmOpMask8; m128 = XmmResult; } # VPMOVWB/VPMOVSWB/VPMOVUSWB 5-426 PAGE 2250 LINE 115760 :VPMOVSWB XmmReg2^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x20; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovswb_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult); } :VPMOVSWB m128^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x20; YmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovswb_avx512vl( YmmReg1 ); XmmMask = m128; build XmmOpMask8; m128 = XmmResult; } # VPMOVWB/VPMOVSWB/VPMOVUSWB 5-426 PAGE 2250 LINE 115763 :VPMOVUSWB XmmReg2^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x10; mod=3 & YmmReg1 & XmmReg2 & ZmmReg2 { XmmResult = vpmovuswb_avx512vl( YmmReg1 ); XmmMask = XmmReg2; build XmmOpMask8; ZmmReg2 = zext(XmmResult); } :VPMOVUSWB m128^XmmOpMask8, YmmReg1 is $(EVEX_NONE) & $(VEX_L256) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & XmmOpMask8; byte=0x10; YmmReg1 ... & m128 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { XmmResult = vpmovuswb_avx512vl( YmmReg1 ); XmmMask = m128; build XmmOpMask8; m128 = XmmResult; } # VPMOVWB/VPMOVSWB/VPMOVUSWB 5-426 PAGE 2250 LINE 115766 define pcodeop vpmovwb_avx512bw ; :VPMOVWB YmmReg2^YmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8; byte=0x30; mod=3 & ZmmReg1 & YmmReg2 & ZmmReg2 { YmmResult = vpmovwb_avx512bw( ZmmReg1 ); YmmMask = YmmReg2; build YmmOpMask8; ZmmReg2 = zext(YmmResult); } :VPMOVWB m256^YmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8; byte=0x30; ZmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { YmmResult = vpmovwb_avx512bw( ZmmReg1 ); YmmMask = m256; build YmmOpMask8; m256 = zext(YmmResult); } # VPMOVWB/VPMOVSWB/VPMOVUSWB 5-426 PAGE 2250 LINE 115769 define pcodeop vpmovswb_avx512bw ; :VPMOVSWB YmmReg2^YmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8; byte=0x20; mod=3 & ZmmReg1 & YmmReg2 & ZmmReg2 { YmmResult = vpmovswb_avx512bw( ZmmReg1 ); YmmMask = YmmReg2; build YmmOpMask8; ZmmReg2 = zext(YmmResult); } :VPMOVSWB m256^YmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8; byte=0x20; ZmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { YmmResult = vpmovswb_avx512bw( ZmmReg1 ); YmmMask = m256; build YmmOpMask8; m256 = zext(YmmResult); } # VPMOVWB/VPMOVSWB/VPMOVUSWB 5-426 PAGE 2250 LINE 115772 define pcodeop vpmovuswb_avx512bw ; :VPMOVUSWB YmmReg2^YmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8; byte=0x10; mod=3 & ZmmReg1 & YmmReg2 & ZmmReg2 { YmmResult = vpmovuswb_avx512bw( ZmmReg1 ); YmmMask = YmmReg2; build YmmOpMask8; ZmmReg2 = zext(YmmResult); } :VPMOVUSWB m256^YmmOpMask8, ZmmReg1 is $(EVEX_NONE) & $(EVEX_L512) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & YmmOpMask8; byte=0x10; ZmmReg1 ... & m256 [ evexD8Type = 1; evexTType = 9; ] # (TupleType HVM) { YmmResult = vpmovuswb_avx512bw( ZmmReg1 ); YmmMask = m256; build YmmOpMask8; m256 = zext(YmmResult); } ================================================ FILE: pypcode/processors/x86/data/languages/avx_manual.sinc ================================================ # MOVAPD 4-45 PAGE 1165 LINE 60844 :VMOVAPD XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x28; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { XmmReg1 = XmmReg2_m128; ZmmReg1 = zext(XmmReg1); } # MOVAPD 4-45 PAGE 1165 LINE 60846 :VMOVAPD XmmReg2, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x29; mod=3 & XmmReg1 & (XmmReg2 & ZmmReg2) { XmmReg2 = XmmReg1; ZmmReg2 = zext(XmmReg2); } # MOVAPD 4-45 PAGE 1165 LINE 60846 :VMOVAPD m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x29; XmmReg1 ... & m128 { m128 = XmmReg1; } # MOVAPD 4-45 PAGE 1165 LINE 60848 :VMOVAPD YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x28; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { YmmReg1 = YmmReg2_m256; ZmmReg1 = zext(YmmReg1); } # MOVAPD 4-45 PAGE 1165 LINE 60850 :VMOVAPD YmmReg2, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x29; mod=3 & YmmReg1 & (YmmReg2 & ZmmReg2) { YmmReg2 = YmmReg1; ZmmReg2 = zext(YmmReg2); } :VMOVAPD m256, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x29; YmmReg1 ... & m256 { m256 = YmmReg1; } # MOVAPS 4-49 PAGE 1169 LINE 61039 :VMOVAPS XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x28; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { XmmReg1 = XmmReg2_m128; ZmmReg1 = zext(XmmReg1); } # MOVAPS 4-49 PAGE 1169 LINE 61041 :VMOVAPS XmmReg2, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x29; mod=3 & XmmReg1 & (XmmReg2 & ZmmReg2) { XmmReg2 = XmmReg1; ZmmReg2 = zext(XmmReg2); } :VMOVAPS m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x29; XmmReg1 ... & m128 { m128 = XmmReg1; } # MOVAPS 4-49 PAGE 1169 LINE 61043 :VMOVAPS YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x28; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { YmmReg1 = YmmReg2_m256; ZmmReg1 = zext(YmmReg2_m256); } # MOVAPS 4-49 PAGE 1169 LINE 61045 :VMOVAPS YmmReg2, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x29; mod=3 & YmmReg1 & (YmmReg2 & ZmmReg2) { YmmReg2 = YmmReg1; ZmmReg2 = zext(YmmReg2); } :VMOVAPS m256, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x29; YmmReg1 ... & m256 { m256 = YmmReg1; } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61667 # Note: we do not model the exception generated if VMOVDQA is used with a memory operand which is not 16-bye aligned :VMOVDQA XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x6F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { XmmReg1 = XmmReg2_m128; ZmmReg1 = zext(XmmReg1); } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61669 :VMOVDQA XmmReg2, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; mod=3 & XmmReg1 & (XmmReg2 & ZmmReg2) { ZmmReg2 = zext(XmmReg1); } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61669 :VMOVDQA m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; XmmReg1 ... & m128 { m128 = XmmReg1; } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61671 :VMOVDQA YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x6F; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { YmmReg1 = YmmReg2_m256; ZmmReg1 = zext(YmmReg1); } # MOVDQA,VMOVDQA32/64 4-62 PAGE 1182 LINE 61673 :VMOVDQA YmmReg2, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; mod=3 & YmmReg1 & (YmmReg2 & ZmmReg2) { YmmReg2 = YmmReg1; ZmmReg2 = zext(YmmReg2); } :VMOVDQA m256, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0x7F; YmmReg1 ... & m256 { m256 = YmmReg1; } # MOVSD 4-111 PAGE 1231 LINE 63970 :VMOVSD XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x10; XmmReg1 & ZmmReg1 & (mod=0x3 & XmmReg2) { local tmpa:8 = XmmReg2[0,64]; local tmpb:8 = vexVVVV_XmmReg[64,64]; XmmReg1[0,64] = tmpa; XmmReg1[64,64] = tmpb; ZmmReg1 = zext(XmmReg1); } # MOVSD 4-111 PAGE 1231 LINE 63972 :VMOVSD XmmReg1, m64 is $(VEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0x10; (XmmReg1 & YmmReg1 & ZmmReg1) ... & m64 { XmmReg1[0,64] = m64; XmmReg1[64,64] = 0; ZmmReg1 = zext(XmmReg1); } # MOVSD 4-111 PAGE 1231 LINE 63974 :VMOVSD XmmReg2, vexVVVV_XmmReg, XmmReg1 is $(VEX_NDS) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x11; XmmReg1 & (mod=0x3 & (XmmReg2 & ZmmReg2)) { local tmpa:8 = XmmReg1[0,64]; local tmpb:8 = vexVVVV_XmmReg[64,64]; XmmReg2[0,64] = tmpa; XmmReg2[64,64] = tmpb; ZmmReg2 = zext(XmmReg2); } # MOVSD 4-111 PAGE 1231 LINE 63976 :VMOVSD m64, XmmReg1 is $(VEX_NONE) & $(VEX_LIG) & $(VEX_PRE_F2) & $(VEX_0F) & $(VEX_WIG); byte=0x11; XmmReg1 ... & m64 { m64 = XmmReg1[0,64]; } # MOVUPS 4-130 PAGE 1250 LINE 64872 :VMOVUPS XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x10; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { XmmReg1 = XmmReg2_m128; ZmmReg1 = zext(XmmReg1); } # MOVUPS 4-130 PAGE 1250 LINE 64874 # break this into two constructors to handle the zext for the register destination case :VMOVUPS XmmReg2, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x11; XmmReg1 & (mod = 3 & XmmReg2 & ZmmReg2) { XmmReg2 = XmmReg1; ZmmReg2 = zext(XmmReg2); } # MOVUPS 4-130 PAGE 1250 LINE 64874 :VMOVUPS m128, XmmReg1 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x11; XmmReg1 ... & m128 { m128 = XmmReg1; } # MOVUPS 4-130 PAGE 1250 LINE 64876 :VMOVUPS YmmReg1, YmmReg2_m256 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x10; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { YmmReg1 = YmmReg2_m256; ZmmReg1 = zext(YmmReg1); } # MOVUPS 4-130 PAGE 1250 LINE 64878 # TODO in general, what do we do with the zext of only the register case; needs investigation :VMOVUPS YmmReg2, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x11; mod=3 & YmmReg1 & (YmmReg2 & ZmmReg2) { YmmReg2 = YmmReg1; ZmmReg2 = zext(YmmReg2); } :VMOVUPS m256, YmmReg1 is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x11; YmmReg1 ... & m256 { m256 = YmmReg1; } # PCMPEQQ 4-250 PAGE 1370 LINE 71169 :VPCMPEQQ XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x29; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { XmmReg1[0,64] = zext(vexVVVV_XmmReg[0,64] == XmmReg2_m128[0,64]) * 0xffffffffffffffff:8; XmmReg1[64,64] = zext(vexVVVV_XmmReg[64,64] == XmmReg2_m128[64,64]) * 0xffffffffffffffff:8; ZmmReg1 = zext(XmmReg1); } # PMOVMSKB 4-338 PAGE 1458 LINE 75651 :VPMOVMSKB Reg32, XmmReg2 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F) & $(VEX_WIG); byte=0xD7; Reg32 & (mod=0x3 & XmmReg2) & check_Reg32_dest { local byte_mask:2 = 0:2; byte_mask[0,1] = XmmReg2[7,1]; byte_mask[1,1] = XmmReg2[15,1]; byte_mask[2,1] = XmmReg2[23,1]; byte_mask[3,1] = XmmReg2[31,1]; byte_mask[4,1] = XmmReg2[39,1]; byte_mask[5,1] = XmmReg2[47,1]; byte_mask[6,1] = XmmReg2[55,1]; byte_mask[7,1] = XmmReg2[63,1]; byte_mask[8,1] = XmmReg2[71,1]; byte_mask[9,1] = XmmReg2[79,1]; byte_mask[10,1] = XmmReg2[87,1]; byte_mask[11,1] = XmmReg2[95,1]; byte_mask[12,1] = XmmReg2[103,1]; byte_mask[13,1] = XmmReg2[111,1]; byte_mask[14,1] = XmmReg2[119,1]; byte_mask[15,1] = XmmReg2[127,1]; Reg32 = zext(byte_mask); build check_Reg32_dest; } # VZEROALL 5-563 PAGE 2387 LINE 122405 :VZEROALL is $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x77 { ZMM0[0,64] = 0:8; ZMM0[64,64] = 0:8; ZMM0[128,64] = 0:8; ZMM0[192,64] = 0:8; ZMM0[256,64] = 0:8; ZMM0[320,64] = 0:8; ZMM0[384,64] = 0:8; ZMM0[448,64] = 0:8; ZMM1[0,64] = 0:8; ZMM1[64,64] = 0:8; ZMM1[128,64] = 0:8; ZMM1[192,64] = 0:8; ZMM1[256,64] = 0:8; ZMM1[320,64] = 0:8; ZMM1[384,64] = 0:8; ZMM1[448,64] = 0:8; ZMM2[0,64] = 0:8; ZMM2[64,64] = 0:8; ZMM2[128,64] = 0:8; ZMM2[192,64] = 0:8; ZMM2[256,64] = 0:8; ZMM2[320,64] = 0:8; ZMM2[384,64] = 0:8; ZMM2[448,64] = 0:8; ZMM3[0,64] = 0:8; ZMM3[64,64] = 0:8; ZMM3[128,64] = 0:8; ZMM3[192,64] = 0:8; ZMM3[256,64] = 0:8; ZMM3[320,64] = 0:8; ZMM3[384,64] = 0:8; ZMM3[448,64] = 0:8; ZMM4[0,64] = 0:8; ZMM4[64,64] = 0:8; ZMM4[128,64] = 0:8; ZMM4[192,64] = 0:8; ZMM4[256,64] = 0:8; ZMM4[320,64] = 0:8; ZMM4[384,64] = 0:8; ZMM4[448,64] = 0:8; ZMM5[0,64] = 0:8; ZMM5[64,64] = 0:8; ZMM5[128,64] = 0:8; ZMM5[192,64] = 0:8; ZMM5[256,64] = 0:8; ZMM5[320,64] = 0:8; ZMM5[384,64] = 0:8; ZMM5[448,64] = 0:8; ZMM6[0,64] = 0:8; ZMM6[64,64] = 0:8; ZMM6[128,64] = 0:8; ZMM6[192,64] = 0:8; ZMM6[256,64] = 0:8; ZMM6[320,64] = 0:8; ZMM6[384,64] = 0:8; ZMM6[448,64] = 0:8; ZMM7[0,64] = 0:8; ZMM7[64,64] = 0:8; ZMM7[128,64] = 0:8; ZMM7[192,64] = 0:8; ZMM7[256,64] = 0:8; ZMM7[320,64] = 0:8; ZMM7[384,64] = 0:8; ZMM7[448,64] = 0:8; } @ifdef IA64 :VZEROALL is $(LONGMODE_ON) & $(VEX_NONE) & $(VEX_L256) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x77 { ZMM0[0,64] = 0:8; ZMM0[64,64] = 0:8; ZMM0[128,64] = 0:8; ZMM0[192,64] = 0:8; ZMM0[256,64] = 0:8; ZMM0[320,64] = 0:8; ZMM0[384,64] = 0:8; ZMM0[448,64] = 0:8; ZMM1[0,64] = 0:8; ZMM1[64,64] = 0:8; ZMM1[128,64] = 0:8; ZMM1[192,64] = 0:8; ZMM1[256,64] = 0:8; ZMM1[320,64] = 0:8; ZMM1[384,64] = 0:8; ZMM1[448,64] = 0:8; ZMM2[0,64] = 0:8; ZMM2[64,64] = 0:8; ZMM2[128,64] = 0:8; ZMM2[192,64] = 0:8; ZMM2[256,64] = 0:8; ZMM2[320,64] = 0:8; ZMM2[384,64] = 0:8; ZMM2[448,64] = 0:8; ZMM3[0,64] = 0:8; ZMM3[64,64] = 0:8; ZMM3[128,64] = 0:8; ZMM3[192,64] = 0:8; ZMM3[256,64] = 0:8; ZMM3[320,64] = 0:8; ZMM3[384,64] = 0:8; ZMM3[448,64] = 0:8; ZMM4[0,64] = 0:8; ZMM4[64,64] = 0:8; ZMM4[128,64] = 0:8; ZMM4[192,64] = 0:8; ZMM4[256,64] = 0:8; ZMM4[320,64] = 0:8; ZMM4[384,64] = 0:8; ZMM4[448,64] = 0:8; ZMM5[0,64] = 0:8; ZMM5[64,64] = 0:8; ZMM5[128,64] = 0:8; ZMM5[192,64] = 0:8; ZMM5[256,64] = 0:8; ZMM5[320,64] = 0:8; ZMM5[384,64] = 0:8; ZMM5[448,64] = 0:8; ZMM6[0,64] = 0:8; ZMM6[64,64] = 0:8; ZMM6[128,64] = 0:8; ZMM6[192,64] = 0:8; ZMM6[256,64] = 0:8; ZMM6[320,64] = 0:8; ZMM6[384,64] = 0:8; ZMM6[448,64] = 0:8; ZMM7[0,64] = 0:8; ZMM7[64,64] = 0:8; ZMM7[128,64] = 0:8; ZMM7[192,64] = 0:8; ZMM7[256,64] = 0:8; ZMM7[320,64] = 0:8; ZMM7[384,64] = 0:8; ZMM7[448,64] = 0:8; ZMM8[0,64] = 0:8; ZMM8[64,64] = 0:8; ZMM8[128,64] = 0:8; ZMM8[192,64] = 0:8; ZMM8[256,64] = 0:8; ZMM8[320,64] = 0:8; ZMM8[384,64] = 0:8; ZMM8[448,64] = 0:8; ZMM9[0,64] = 0:8; ZMM9[64,64] = 0:8; ZMM9[128,64] = 0:8; ZMM9[192,64] = 0:8; ZMM9[256,64] = 0:8; ZMM9[320,64] = 0:8; ZMM9[384,64] = 0:8; ZMM9[448,64] = 0:8; ZMM10[0,64] = 0:8; ZMM10[64,64] = 0:8; ZMM10[128,64] = 0:8; ZMM10[192,64] = 0:8; ZMM10[256,64] = 0:8; ZMM10[320,64] = 0:8; ZMM10[384,64] = 0:8; ZMM10[448,64] = 0:8; ZMM11[0,64] = 0:8; ZMM11[64,64] = 0:8; ZMM11[128,64] = 0:8; ZMM11[192,64] = 0:8; ZMM11[256,64] = 0:8; ZMM11[320,64] = 0:8; ZMM11[384,64] = 0:8; ZMM11[448,64] = 0:8; ZMM12[0,64] = 0:8; ZMM12[64,64] = 0:8; ZMM12[128,64] = 0:8; ZMM12[192,64] = 0:8; ZMM12[256,64] = 0:8; ZMM12[320,64] = 0:8; ZMM12[384,64] = 0:8; ZMM12[448,64] = 0:8; ZMM13[0,64] = 0:8; ZMM13[64,64] = 0:8; ZMM13[128,64] = 0:8; ZMM13[192,64] = 0:8; ZMM13[256,64] = 0:8; ZMM13[320,64] = 0:8; ZMM13[384,64] = 0:8; ZMM13[448,64] = 0:8; ZMM14[0,64] = 0:8; ZMM14[64,64] = 0:8; ZMM14[128,64] = 0:8; ZMM14[192,64] = 0:8; ZMM14[256,64] = 0:8; ZMM14[320,64] = 0:8; ZMM14[384,64] = 0:8; ZMM14[448,64] = 0:8; ZMM15[0,64] = 0:8; ZMM15[64,64] = 0:8; ZMM15[128,64] = 0:8; ZMM15[192,64] = 0:8; ZMM15[256,64] = 0:8; ZMM15[320,64] = 0:8; ZMM15[384,64] = 0:8; ZMM15[448,64] = 0:8; } @endif # VZEROUPPER 5-565 PAGE 2389 LINE 122480 :VZEROUPPER is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x77 { ZMM0[128,64] = 0:8; ZMM0[192,64] = 0:8; ZMM0[256,64] = 0:8; ZMM0[320,64] = 0:8; ZMM0[384,64] = 0:8; ZMM0[448,64] = 0:8; ZMM1[128,64] = 0:8; ZMM1[192,64] = 0:8; ZMM1[256,64] = 0:8; ZMM1[320,64] = 0:8; ZMM1[384,64] = 0:8; ZMM1[448,64] = 0:8; ZMM2[128,64] = 0:8; ZMM2[192,64] = 0:8; ZMM2[256,64] = 0:8; ZMM2[320,64] = 0:8; ZMM2[384,64] = 0:8; ZMM2[448,64] = 0:8; ZMM3[128,64] = 0:8; ZMM3[192,64] = 0:8; ZMM3[256,64] = 0:8; ZMM3[320,64] = 0:8; ZMM3[384,64] = 0:8; ZMM3[448,64] = 0:8; ZMM4[128,64] = 0:8; ZMM4[192,64] = 0:8; ZMM4[256,64] = 0:8; ZMM4[320,64] = 0:8; ZMM4[384,64] = 0:8; ZMM4[448,64] = 0:8; ZMM5[128,64] = 0:8; ZMM5[192,64] = 0:8; ZMM5[256,64] = 0:8; ZMM5[320,64] = 0:8; ZMM5[384,64] = 0:8; ZMM5[448,64] = 0:8; ZMM6[128,64] = 0:8; ZMM6[192,64] = 0:8; ZMM6[256,64] = 0:8; ZMM6[320,64] = 0:8; ZMM6[384,64] = 0:8; ZMM6[448,64] = 0:8; ZMM7[128,64] = 0:8; ZMM7[192,64] = 0:8; ZMM7[256,64] = 0:8; ZMM7[320,64] = 0:8; ZMM7[384,64] = 0:8; ZMM7[448,64] = 0:8; } @ifdef IA64 :VZEROUPPER is $(LONGMODE_ON) & $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_NONE) & $(VEX_0F) & $(VEX_WIG); byte=0x77 { ZMM0[128,64] = 0:8; ZMM0[192,64] = 0:8; ZMM0[256,64] = 0:8; ZMM0[320,64] = 0:8; ZMM0[384,64] = 0:8; ZMM0[448,64] = 0:8; ZMM1[128,64] = 0:8; ZMM1[192,64] = 0:8; ZMM1[256,64] = 0:8; ZMM1[320,64] = 0:8; ZMM1[384,64] = 0:8; ZMM1[448,64] = 0:8; ZMM2[128,64] = 0:8; ZMM2[192,64] = 0:8; ZMM2[256,64] = 0:8; ZMM2[320,64] = 0:8; ZMM2[384,64] = 0:8; ZMM2[448,64] = 0:8; ZMM3[128,64] = 0:8; ZMM3[192,64] = 0:8; ZMM3[256,64] = 0:8; ZMM3[320,64] = 0:8; ZMM3[384,64] = 0:8; ZMM3[448,64] = 0:8; ZMM4[128,64] = 0:8; ZMM4[192,64] = 0:8; ZMM4[256,64] = 0:8; ZMM4[320,64] = 0:8; ZMM4[384,64] = 0:8; ZMM4[448,64] = 0:8; ZMM5[128,64] = 0:8; ZMM5[192,64] = 0:8; ZMM5[256,64] = 0:8; ZMM5[320,64] = 0:8; ZMM5[384,64] = 0:8; ZMM5[448,64] = 0:8; ZMM6[128,64] = 0:8; ZMM6[192,64] = 0:8; ZMM6[256,64] = 0:8; ZMM6[320,64] = 0:8; ZMM6[384,64] = 0:8; ZMM6[448,64] = 0:8; ZMM7[128,64] = 0:8; ZMM7[192,64] = 0:8; ZMM7[256,64] = 0:8; ZMM7[320,64] = 0:8; ZMM7[384,64] = 0:8; ZMM7[448,64] = 0:8; ZMM8[128,64] = 0:8; ZMM8[192,64] = 0:8; ZMM8[256,64] = 0:8; ZMM8[320,64] = 0:8; ZMM8[384,64] = 0:8; ZMM8[448,64] = 0:8; ZMM9[128,64] = 0:8; ZMM9[192,64] = 0:8; ZMM9[256,64] = 0:8; ZMM9[320,64] = 0:8; ZMM9[384,64] = 0:8; ZMM9[448,64] = 0:8; ZMM10[128,64] = 0:8; ZMM10[192,64] = 0:8; ZMM10[256,64] = 0:8; ZMM10[320,64] = 0:8; ZMM10[384,64] = 0:8; ZMM10[448,64] = 0:8; ZMM11[128,64] = 0:8; ZMM11[192,64] = 0:8; ZMM11[256,64] = 0:8; ZMM11[320,64] = 0:8; ZMM11[384,64] = 0:8; ZMM11[448,64] = 0:8; ZMM12[128,64] = 0:8; ZMM12[192,64] = 0:8; ZMM12[256,64] = 0:8; ZMM12[320,64] = 0:8; ZMM12[384,64] = 0:8; ZMM12[448,64] = 0:8; ZMM13[128,64] = 0:8; ZMM13[192,64] = 0:8; ZMM13[256,64] = 0:8; ZMM13[320,64] = 0:8; ZMM13[384,64] = 0:8; ZMM13[448,64] = 0:8; ZMM14[128,64] = 0:8; ZMM14[192,64] = 0:8; ZMM14[256,64] = 0:8; ZMM14[320,64] = 0:8; ZMM14[384,64] = 0:8; ZMM14[448,64] = 0:8; ZMM15[128,64] = 0:8; ZMM15[192,64] = 0:8; ZMM15[256,64] = 0:8; ZMM15[320,64] = 0:8; ZMM15[384,64] = 0:8; ZMM15[448,64] = 0:8; } @endif ================================================ FILE: pypcode/processors/x86/data/languages/bmi1.sinc ================================================ macro tzcntflags(input, output) { ZF = (output == 0); CF = (input == 0); # OF, SF, PF, AF are undefined } #### #### BMI1 instructions #### # TODO remove ANDN from ia.sinc ????? :ANDN Reg32, vexVVVV_r32, rm32 is $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_r32; byte=0xf2; Reg32 ... & check_Reg32_dest ... &rm32 { Reg32 = ~(vexVVVV_r32) & rm32; resultflags(Reg32); OF = 0; CF = 0; build check_Reg32_dest; } @ifdef IA64 # TODO remove ANDN from ia.sinc ????? :ANDN Reg64, vexVVVV_r64, rm64 is $(LONGMODE_ON) & $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_r64; byte=0xf2; Reg64 ... & rm64 { Reg64 = ~(vexVVVV_r64) & rm64; resultflags(Reg64); OF = 0; CF = 0; } @endif :BEXTR Reg32, rm32, vexVVVV_r32 is $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_r32; byte=0xf7; Reg32 ... & check_Reg32_dest ... & rm32 { sourceTmp:1 = vexVVVV_r32[0,8]; lengthTmp:1 = vexVVVV_r32[8,8]; Reg32 = (rm32 >> sourceTmp) & ((1 << lengthTmp) - 1); build check_Reg32_dest; ZF = (Reg32 == 0); OF = 0; CF = 0; # AF, SF, and PF are undefined } @ifdef IA64 :BEXTR Reg64, rm64, vexVVVV_r64 is $(LONGMODE_ON) & $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_r64; byte=0xf7; Reg64 ... & rm64 { sourceTmp:1 = vexVVVV_r64[0,8]; lengthTmp:1 = vexVVVV_r64[8,8]; Reg64 = (rm64 >> sourceTmp) & ((1 << lengthTmp) - 1); ZF = (Reg64 == 0); OF = 0; CF = 0; # AF, SF, and PF are undefined } @endif :BLSI vexVVVV_r32, rm32 is $(VEX_NDD) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_r32; byte=0xf3; reg_opcode=3 ... & check_vexVVVV_r32_dest ... & rm32 { vexVVVV_r32 = -rm32 & rm32; build check_vexVVVV_r32_dest; ZF = (vexVVVV_r32 == 0); SF = (vexVVVV_r32 s< 0); CF = (rm32 != 0); OF = 0; # AF and PF are undefined } @ifdef IA64 :BLSI vexVVVV_r64, rm64 is $(LONGMODE_ON) & $(VEX_NDD) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_r64; byte=0xf3; reg_opcode=3 ... & rm64 { vexVVVV_r64 = -rm64 & rm64; ZF = (vexVVVV_r64 == 0); SF = (vexVVVV_r64 s< 0); CF = (rm64 != 0); OF = 0; # AF and PF are undefined } @endif :BLSMSK vexVVVV_r32, rm32 is $(VEX_NDD) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_r32; byte=0xf3; reg_opcode=2 ... & check_vexVVVV_r32_dest ... &rm32 { CF = (rm32 == 0); vexVVVV_r32 = (rm32 - 1) ^ rm32; SF = (vexVVVV_r32 s< 0); build check_vexVVVV_r32_dest; ZF = 0; OF = 0; # AF and PF are undefined } @ifdef IA64 :BLSMSK vexVVVV_r64, rm64 is $(LONGMODE_ON) & $(VEX_NDD) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_r64; byte=0xf3; reg_opcode=2 ... & rm64 { CF = (rm64 == 0); vexVVVV_r64 = (rm64 - 1) ^ rm64; SF = (vexVVVV_r64 s< 0); ZF = 0; OF = 0; # AF and PF are undefined } @endif :BLSR vexVVVV_r32, rm32 is $(VEX_NDD) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_r32; byte=0xf3; reg_opcode=1 ... & check_vexVVVV_r32_dest ... &rm32 { CF = (rm32 == 0); vexVVVV_r32 = (rm32 - 1) & rm32; build check_vexVVVV_r32_dest; ZF = (vexVVVV_r32 == 0); SF = (vexVVVV_r32 s< 0); OF = 0; # AF and PF are undefined } @ifdef IA64 :BLSR vexVVVV_r64, rm64 is $(LONGMODE_ON) & $(VEX_NDD) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_r64; byte=0xf3; reg_opcode=1 ... & rm64 { CF = (rm64 == 0); vexVVVV_r64 = (rm64 - 1) & rm64; ZF = (vexVVVV_r64 == 0); SF = (vexVVVV_r64 s< 0); OF = 0; # AF and PF are undefined } @endif # not as documented in manual; requires PRE_66 prefix to get 16-bit operation :TZCNT Reg16, rm16 is vexMode=0 & opsize=0 & $(PRE_66) & $(PRE_F3) & byte=0x0F; byte=0xBC; Reg16 ... & rm16 { countTmp:2 = 0; inputTmp:2 = rm16; if ((inputTmp & 1) != 0) goto ; countTmp = countTmp + 1; inputTmp = (inputTmp >> 1) | 0x8000; goto ; tzcntflags(rm16, countTmp); Reg16 = countTmp; } :TZCNT Reg32, rm32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0F; byte=0xBC; Reg32 ... & check_Reg32_dest ... & rm32 { countTmp:4 = 0; inputTmp:4 = rm32; if ((inputTmp & 1) != 0) goto ; countTmp = countTmp + 1; inputTmp = (inputTmp >> 1) | 0x80000000; goto ; tzcntflags(rm32, countTmp); Reg32 = countTmp; build check_Reg32_dest; } @ifdef IA64 :TZCNT Reg64, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & $(REX_W) & byte=0x0F; byte=0xBC; Reg64 ... & rm64 { countTmp:8 = 0; inputTmp:8 = rm64; if ((inputTmp & 1) != 0) goto ; countTmp = countTmp + 1; inputTmp = (inputTmp >> 1) | 0x8000000000000000; goto ; tzcntflags(rm64, countTmp); Reg64 = countTmp; } @endif ================================================ FILE: pypcode/processors/x86/data/languages/bmi2.sinc ================================================ #### #### BMI2 instructions #### :BZHI Reg32, rm32, vexVVVV_r32 is $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_r32; byte=0xf5; Reg32 ... & check_Reg32_dest ... & rm32 { indexTmp:1 = vexVVVV_r32:1; # saturate index amount to 32; operand size or higher does not clear any bits shift:1 = (indexTmp <= 32) * (32 - indexTmp); # clear the upper bits Reg32 = (rm32 << shift) >> shift; build check_Reg32_dest; ZF = (Reg32 == 0); SF = (Reg32 s< 0); CF = indexTmp > 31; OF = 0; # AF and PF are undefined } @ifdef IA64 :BZHI Reg64, rm64, vexVVVV_r64 is $(LONGMODE_ON) & $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_NONE) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_r64; byte=0xf5; Reg64 ... & rm64 { indexTmp:1 = vexVVVV_r64:1; # saturate index amount to 64; operand size or higher does not clear any bits shift:1 = (indexTmp <= 64) * (64 - indexTmp); # clear the upper bits Reg64 = (rm64 << shift) >> shift; ZF = (Reg64 == 0); SF = (Reg64 s< 0); CF = indexTmp > 63; OF = 0; # AF and PF are undefined } @endif :MULX Reg32, vexVVVV_r32, rm32 is $(VEX_NDD) & $(VEX_LZ) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_r32; byte=0xf6; Reg32 ... & check_Reg32_dest ... & check_vexVVVV_r32_dest ... & rm32 { temp:8 = zext(EDX) * zext(rm32); vexVVVV_r32 = temp:4; build check_vexVVVV_r32_dest; Reg32 = temp(4); build check_Reg32_dest; } @ifdef IA64 :MULX Reg64, vexVVVV_r64, rm64 is $(LONGMODE_ON) & $(VEX_NDD) & $(VEX_LZ) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_r64; byte=0xf6; Reg64 ... & rm64 { temp:16 = zext(RDX) * zext(rm64); vexVVVV_r64 = temp:8; Reg64 = temp(8); } @endif :PDEP Reg32, vexVVVV_r32, rm32 is $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_r32; byte=0xf5; Reg32 ... & check_Reg32_dest ... & rm32 { sourceTmp:4 = vexVVVV_r32; indexTmp:4 = 1; resultTmp:4 = 0; maskBit:4 = rm32 & indexTmp; if (maskBit == 0) goto ; resultTmp = resultTmp | (maskBit * (sourceTmp & 1)); sourceTmp = sourceTmp >> 1; indexTmp = indexTmp << 1; if (indexTmp != 0) goto ; Reg32 = resultTmp; build check_Reg32_dest; } @ifdef IA64 :PDEP Reg64, vexVVVV_r64, rm64 is $(LONGMODE_ON) & $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_r64; byte=0xf5; Reg64 ... & rm64 { sourceTmp:8 = vexVVVV_r64; indexTmp:8 = 1; resultTmp:8 = 0; maskBit:8 = rm64 & indexTmp; if (maskBit == 0) goto ; resultTmp = resultTmp | (maskBit * (sourceTmp & 1)); sourceTmp = sourceTmp >> 1; indexTmp = indexTmp << 1; if (indexTmp != 0) goto ; Reg64 = resultTmp; } @endif :PEXT Reg32, vexVVVV_r32, rm32 is $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_r32; byte=0xf5; Reg32 ... & check_Reg32_dest ... & rm32 { indexTmp:4 = 0x80000000; resultTmp:4 = 0; maskBit:4 = rm32 & indexTmp; if (maskBit == 0) goto ; resultTmp = (resultTmp << 1) | zext((maskBit & vexVVVV_r32) != 0); indexTmp = indexTmp >> 1; if (indexTmp != 0) goto ; build check_Reg32_dest; Reg32 = resultTmp; } @ifdef IA64 :PEXT Reg64, vexVVVV_r64, rm64 is $(LONGMODE_ON) & $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_r64; byte=0xf5; Reg64 ... & rm64 { indexTmp:8 = 0x8000000000000000; resultTmp:8 = 0; maskBit:8 = rm64 & indexTmp; if (maskBit == 0) goto ; resultTmp = (resultTmp << 1) | zext((maskBit & vexVVVV_r64) != 0); indexTmp = indexTmp >> 1; if (indexTmp != 0) goto ; Reg64 = resultTmp; } @endif :RORX Reg32, rm32, imm8 is $(VEX_NONE) & $(VEX_LZ) & $(VEX_PRE_F2) & $(VEX_0F3A) & $(VEX_W0); byte=0xf0; Reg32 ... & check_Reg32_dest ... & rm32; imm8 { shiftTmp:1 = (imm8:1 & 0x1F); Reg32 = (rm32 >> shiftTmp) | ( rm32 << (32 - shiftTmp)); build check_Reg32_dest; } @ifdef IA64 :RORX Reg64, rm64, imm8 is $(LONGMODE_ON) & $(VEX_NONE) & $(VEX_LZ) & $(VEX_PRE_F2) & $(VEX_0F3A) & $(VEX_W1); byte=0xf0; Reg64 ... & rm64; imm8 { shiftTmp:1 = (imm8:1 & 0x3F); Reg64 = (rm64 >> shiftTmp) | ( rm64 << (64 - shiftTmp)); } @endif :SARX Reg32, rm32, vexVVVV_r32 is $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_r32; byte=0xf7; Reg32 ... & check_Reg32_dest ... & rm32 { Reg32 = rm32 s>> (vexVVVV_r32 & 0x0000001F); build check_Reg32_dest; } @ifdef IA64 :SARX Reg64, rm64, vexVVVV_r64 is $(LONGMODE_ON) & $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_F3) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_r64; byte=0xf7; Reg64 ... & rm64 { Reg64 = rm64 s>> (vexVVVV_r64 & 0x000000000000003F); } @endif :SHLX Reg32, rm32, vexVVVV_r32 is $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_r32; byte=0xf7; Reg32 ... & check_Reg32_dest ... & rm32 { Reg32 = rm32 << (vexVVVV_r32 & 0x0000001F); build check_Reg32_dest; } @ifdef IA64 :SHLX Reg64, rm64, vexVVVV_r64 is $(LONGMODE_ON) & $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_r64; byte=0xf7; Reg64 ... & rm64 { Reg64 = rm64 << (vexVVVV_r64 & 0x000000000000003F); } @endif :SHRX Reg32, rm32, vexVVVV_r32 is $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_r32; byte=0xf7; Reg32 ... & check_Reg32_dest ... & rm32 { Reg32 = rm32 >> (vexVVVV_r32 & 0x0000001F); build check_Reg32_dest; } @ifdef IA64 :SHRX Reg64, rm64, vexVVVV_r64 is $(LONGMODE_ON) & $(VEX_NDS) & $(VEX_LZ) & $(VEX_PRE_F2) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_r64; byte=0xf7; Reg64 ... & rm64 { Reg64 = rm64 >> (vexVVVV_r64 & 0x000000000000003F); } @endif ================================================ FILE: pypcode/processors/x86/data/languages/cet.sinc ================================================ # # Instructions based on Intel Control-flow Enforcement Technology Preview # # Note: Shadow Stack semantics is not currently implemented correctly in these instructions # nor in the instructions affected by CET # define pcodeop ShadowStackPush8B; define pcodeop ShadowStackPush4B; define pcodeop ShadowStackLoad8B; define pcodeop ShadowStackLoad4B; :INCSSPD r32 is vexMode=0 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=5 & r32 { SSP = SSP + zext(4 * r32:1); } @ifdef IA64 :INCSSPQ r64 is $(LONGMODE_ON) & vexMode=0 & $(PRE_F3) & $(REX_W) & byte=0x0f; byte=0xae; reg_opcode=5 & r64 { SSP = SSP + zext(8 * r64:1); } @endif :RDSSPD r32 is vexMode=0 & $(PRE_F3) & byte=0x0f; byte=0x1e; mod=3 & reg_opcode=1 & r32 { r32 = SSP:4; } @ifdef IA64 :RDSSPQ r64 is $(LONGMODE_ON) & vexMode=0 & $(PRE_F3) & $(REX_W) & byte=0x0f; byte=0x1e; mod=3 & reg_opcode=1 & r64 { r64 = SSP; } @endif :SAVEPREVSSP is vexMode=0 & $(PRE_F3) & (opsize=0 | opsize=1 | opsize=2 | opsize=3) & byte=0x0f; byte=0x01; byte=0xea { tmp:8 = SSP; SSP = SSP & ~0x7; ShadowStackPush8B(tmp); } :RSTORSSP m64 is vexMode=0 & $(PRE_F3) & (opsize=0 | opsize=1 | opsize=2 | opsize=3) & byte=0x0f; byte=0x01; ( mod != 0b11 & reg_opcode=5 ) ... & m64 { tmp_SSP:8 = m64; SSP = tmp_SSP & ~0x01; } define pcodeop writeToShadowStack; define pcodeop writeToUserShadowStack; :WRSSD rm32,Reg32 is vexMode=0 & byte=0x0f; byte=0x38; byte=0xf6; rm32 & Reg32 ... { writeToShadowStack(rm32, Reg32); } @ifdef IA64 :WRSSQ rm64,Reg64 is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0f; byte=0x0f; byte=0x38; byte=0xf6; rm64 & Reg64 ... { writeToShadowStack(rm64, Reg64); } @endif :WRUSSD rm32,Reg32 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0xf5; rm32 & Reg32 ... { writeToUserShadowStack(rm32, Reg32); } @ifdef IA64 :WRUSSQ rm64,Reg64 is $(LONGMODE_ON) & vexMode=0 & $(PRE_66) & $(REX_W) & byte=0x0f; byte=0x0f; byte=0x38; byte=0xf5; rm64 & Reg64 ... { writeToUserShadowStack(rm64, Reg64); } @endif define pcodeop markShadowStackBusy; define pcodeop clearShadowStackBusy; :SETSSBSY is vexMode=0 & $(PRE_F3) & (opsize=0 | opsize=1 | opsize=2 | opsize=3) & byte=0x0f; byte=0x01; byte=0xe8 { SSP = markShadowStackBusy(IA32_PL0_SSP); } :CLRSSBSY m64 is vexMode=0 & $(PRE_F3) & (opsize=0 | opsize=1 | opsize=2 | opsize=3) & byte=0x0f; byte=0xae; reg_opcode=6 ... & m64 { clearShadowStackBusy(m64); SSP=0; } :ENDBR32 is vexMode=0 & $(PRE_F3) & (opsize=0 | opsize=1 | opsize=2 | opsize=3) & byte=0x0f; byte=0x1e; byte=0xfb {} @ifdef IA64 :ENDBR64 is $(LONGMODE_ON) & vexMode=0 & $(PRE_F3) & (opsize=0 | opsize=1 | opsize=2 | opsize=3) & byte=0x0f; byte=0x1e; byte=0xfa {} @endif ================================================ FILE: pypcode/processors/x86/data/languages/clwb.sinc ================================================ define pcodeop clwb; :CLWB m8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xAE; m8 & reg_opcode=6 ... { clwb(m8); } @ifdef IA64 define pcodeop clflushopt; :CLFLUSHOPT m8 is $(LONGMODE_ON) & vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xAE; m8 & reg_opcode=7 ... { clflushopt(m8); } @endif # Note: PCOMMIT was deprecated prior to it ever being implemented in production processors. # I never found the encoding for it. Therefore, no constructor. ================================================ FILE: pypcode/processors/x86/data/languages/fma.sinc ================================================ # # x86 FMA instructions # # VFIXUPIMMSD 5-120 PAGE 1944 LINE 101211 define pcodeop vfmadd132pd_fma ; :VFMADD132PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x98; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmadd132pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFIXUPIMMSD 5-120 PAGE 1944 LINE 101214 define pcodeop vfmadd213pd_fma ; :VFMADD213PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xA8; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmadd213pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFIXUPIMMSD 5-120 PAGE 1944 LINE 101217 define pcodeop vfmadd231pd_fma ; :VFMADD231PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xB8; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmadd231pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFIXUPIMMSD 5-120 PAGE 1944 LINE 101220 :VFMADD132PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x98; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmadd132pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFIXUPIMMSD 5-120 PAGE 1944 LINE 101223 :VFMADD213PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xA8; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmadd213pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFIXUPIMMSD 5-120 PAGE 1944 LINE 101226 :VFMADD231PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xB8; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmadd231pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFIXUPIMMSS 5-127 PAGE 1951 LINE 101572 define pcodeop vfmadd132ps_fma ; :VFMADD132PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x98; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmadd132ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFIXUPIMMSS 5-127 PAGE 1951 LINE 101575 define pcodeop vfmadd213ps_fma ; :VFMADD213PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xA8; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmadd213ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFIXUPIMMSS 5-127 PAGE 1951 LINE 101578 define pcodeop vfmadd231ps_fma ; :VFMADD231PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xB8; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmadd231ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFIXUPIMMSS 5-127 PAGE 1951 LINE 101581 :VFMADD132PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x98; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmadd132ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFIXUPIMMSS 5-127 PAGE 1951 LINE 101584 :VFMADD213PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xA8; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmadd213ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFIXUPIMMSS 5-127 PAGE 1951 LINE 101587 # WARNING: did not recognize VEX field 0 for "VFMADD231PS ymm1, ymm2, ymm3/m256" :VFMADD231PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_YmmReg; byte=0xB8; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmadd231ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-134 PAGE 1958 LINE 101931 define pcodeop vfmadd132sd_fma ; :VFMADD132SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x99; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vfmadd132sd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-134 PAGE 1958 LINE 101934 define pcodeop vfmadd213sd_fma ; :VFMADD213SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xA9; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vfmadd213sd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VFMADD132PS/VFMADD213PS/VFMADD231PS 5-134 PAGE 1958 LINE 101937 define pcodeop vfmadd231sd_fma ; :VFMADD231SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xB9; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vfmadd231sd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-137 PAGE 1961 LINE 102099 define pcodeop vfmadd132ss_fma ; :VFMADD132SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x99; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vfmadd132ss_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-137 PAGE 1961 LINE 102102 define pcodeop vfmadd213ss_fma ; :VFMADD213SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xA9; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vfmadd213ss_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-137 PAGE 1961 LINE 102105 define pcodeop vfmadd231ss_fma ; :VFMADD231SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xB9; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vfmadd231ss_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-140 PAGE 1964 LINE 102272 define pcodeop vfmaddsub132pd_fma ; :VFMADDSUB132PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x96; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmaddsub132pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-140 PAGE 1964 LINE 102275 define pcodeop vfmaddsub213pd_fma ; :VFMADDSUB213PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xA6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmaddsub213pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-140 PAGE 1964 LINE 102278 define pcodeop vfmaddsub231pd_fma ; :VFMADDSUB231PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xB6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmaddsub231pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-140 PAGE 1964 LINE 102281 :VFMADDSUB132PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x96; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmaddsub132pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-140 PAGE 1964 LINE 102284 :VFMADDSUB213PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xA6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmaddsub213pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMADDSUB132PD/VFMADDSUB213PD/VFMADDSUB231PD 5-140 PAGE 1964 LINE 102287 :VFMADDSUB231PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xB6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmaddsub231pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-150 PAGE 1974 LINE 102711 define pcodeop vfmaddsub132ps_fma ; :VFMADDSUB132PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x96; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmaddsub132ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-150 PAGE 1974 LINE 102714 define pcodeop vfmaddsub213ps_fma ; :VFMADDSUB213PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xA6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmaddsub213ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-150 PAGE 1974 LINE 102717 define pcodeop vfmaddsub231ps_fma ; :VFMADDSUB231PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xB6; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmaddsub231ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-150 PAGE 1974 LINE 102720 :VFMADDSUB132PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x96; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmaddsub132ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-150 PAGE 1974 LINE 102723 :VFMADDSUB213PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xA6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmaddsub213ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMADD132SS/VFMADD213SS/VFMADD231SS 5-150 PAGE 1974 LINE 102726 :VFMADDSUB231PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xB6; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmaddsub231ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-159 PAGE 1983 LINE 103141 define pcodeop vfmsubadd132pd_fma ; :VFMSUBADD132PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x97; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmsubadd132pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-159 PAGE 1983 LINE 103144 define pcodeop vfmsubadd213pd_fma ; :VFMSUBADD213PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xA7; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmsubadd213pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-159 PAGE 1983 LINE 103147 define pcodeop vfmsubadd231pd_fma ; :VFMSUBADD231PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xB7; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmsubadd231pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-159 PAGE 1983 LINE 103150 :VFMSUBADD132PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x97; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmsubadd132pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-159 PAGE 1983 LINE 103153 :VFMSUBADD213PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xA7; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmsubadd213pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUBADD132PD/VFMSUBADD213PD/VFMSUBADD231PD 5-159 PAGE 1983 LINE 103156 :VFMSUBADD231PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xB7; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmsubadd231pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-169 PAGE 1993 LINE 103581 define pcodeop vfmsubadd132ps_fma ; :VFMSUBADD132PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x97; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmsubadd132ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-169 PAGE 1993 LINE 103584 define pcodeop vfmsubadd213ps_fma ; :VFMSUBADD213PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xA7; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmsubadd213ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-169 PAGE 1993 LINE 103587 define pcodeop vfmsubadd231ps_fma ; :VFMSUBADD231PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xB7; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmsubadd231ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-169 PAGE 1993 LINE 103590 :VFMSUBADD132PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x97; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmsubadd132ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-169 PAGE 1993 LINE 103593 :VFMSUBADD213PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xA7; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmsubadd213ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUBADD132PS/VFMSUBADD213PS/VFMSUBADD231PS 5-169 PAGE 1993 LINE 103596 :VFMSUBADD231PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xB7; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmsubadd231ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-179 PAGE 2003 LINE 104019 define pcodeop vfmsub132pd_fma ; :VFMSUB132PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x9A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmsub132pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-179 PAGE 2003 LINE 104022 define pcodeop vfmsub213pd_fma ; :VFMSUB213PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xAA; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmsub213pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-179 PAGE 2003 LINE 104025 define pcodeop vfmsub231pd_fma ; :VFMSUB231PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xBA; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmsub231pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-179 PAGE 2003 LINE 104028 :VFMSUB132PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x9A; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmsub132pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-179 PAGE 2003 LINE 104031 :VFMSUB213PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xAA; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmsub213pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUB132PD/VFMSUB213PD/VFMSUB231PD 5-179 PAGE 2003 LINE 104034 :VFMSUB231PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xBA; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmsub231pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-186 PAGE 2010 LINE 104379 define pcodeop vfmsub132ps_fma ; :VFMSUB132PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x9A; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmsub132ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-186 PAGE 2010 LINE 104382 define pcodeop vfmsub213ps_fma ; :VFMSUB213PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xAA; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmsub213ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-186 PAGE 2010 LINE 104385 define pcodeop vfmsub231ps_fma ; :VFMSUB231PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xBA; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfmsub231ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-186 PAGE 2010 LINE 104388 :VFMSUB132PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x9A; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmsub132ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-186 PAGE 2010 LINE 104391 :VFMSUB213PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xAA; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmsub213ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUB132PS/VFMSUB213PS/VFMSUB231PS 5-186 PAGE 2010 LINE 104394 # WARNING: did not recognize VEX field 0 for "VFMSUB231PS ymm1, ymm2, ymm3/m256" :VFMSUB231PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_YmmReg; byte=0xBA; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfmsub231ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFMSUB132SD/VFMSUB213SD/VFMSUB231SD 5-193 PAGE 2017 LINE 104738 define pcodeop vfmsub132sd_fma ; :VFMSUB132SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x9B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vfmsub132sd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VFMSUB132SD/VFMSUB213SD/VFMSUB231SD 5-193 PAGE 2017 LINE 104741 define pcodeop vfmsub213sd_fma ; :VFMSUB213SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xAB; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vfmsub213sd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VFMSUB132SD/VFMSUB213SD/VFMSUB231SD 5-193 PAGE 2017 LINE 104744 define pcodeop vfmsub231sd_fma ; :VFMSUB231SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xBB; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vfmsub231sd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VFMSUB132SS/VFMSUB213SS/VFMSUB231SS 5-196 PAGE 2020 LINE 104913 define pcodeop vfmsub132ss_fma ; :VFMSUB132SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x9B; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vfmsub132ss_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # VFMSUB132SS/VFMSUB213SS/VFMSUB231SS 5-196 PAGE 2020 LINE 104916 define pcodeop vfmsub213ss_fma ; :VFMSUB213SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xAB; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vfmsub213ss_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # VFMSUB132SS/VFMSUB213SS/VFMSUB231SS 5-196 PAGE 2020 LINE 104919 define pcodeop vfmsub231ss_fma ; :VFMSUB231SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xBB; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vfmsub231ss_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-199 PAGE 2023 LINE 105088 define pcodeop vfnmadd132pd_fma ; :VFNMADD132PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x9C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfnmadd132pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-199 PAGE 2023 LINE 105091 define pcodeop vfnmadd213pd_fma ; :VFNMADD213PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xAC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfnmadd213pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-199 PAGE 2023 LINE 105094 define pcodeop vfnmadd231pd_fma ; :VFNMADD231PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xBC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfnmadd231pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-199 PAGE 2023 LINE 105097 :VFNMADD132PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x9C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfnmadd132pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-199 PAGE 2023 LINE 105100 :VFNMADD213PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xAC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfnmadd213pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFNMADD132PD/VFNMADD213PD/VFNMADD231PD 5-199 PAGE 2023 LINE 105103 :VFNMADD231PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xBC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfnmadd231pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-206 PAGE 2030 LINE 105447 define pcodeop vfnmadd132ps_fma ; :VFNMADD132PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x9C; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfnmadd132ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-206 PAGE 2030 LINE 105450 define pcodeop vfnmadd213ps_fma ; :VFNMADD213PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xAC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfnmadd213ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-206 PAGE 2030 LINE 105453 define pcodeop vfnmadd231ps_fma ; :VFNMADD231PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xBC; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfnmadd231ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-206 PAGE 2030 LINE 105456 :VFNMADD132PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x9C; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfnmadd132ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-206 PAGE 2030 LINE 105459 :VFNMADD213PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xAC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfnmadd213ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFNMADD132PS/VFNMADD213PS/VFNMADD231PS 5-206 PAGE 2030 LINE 105462 # WARNING: did not recognize VEX field 0 for "VFNMADD231PS ymm1, ymm2, ymm3/m256" :VFNMADD231PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_YmmReg; byte=0xBC; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfnmadd231ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFNMADD132SD/VFNMADD213SD/VFNMADD231SD 5-212 PAGE 2036 LINE 105794 define pcodeop vfnmadd132sd_fma ; :VFNMADD132SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x9D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vfnmadd132sd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VFNMADD132SD/VFNMADD213SD/VFNMADD231SD 5-212 PAGE 2036 LINE 105797 define pcodeop vfnmadd213sd_fma ; :VFNMADD213SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xAD; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vfnmadd213sd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VFNMADD132SD/VFNMADD213SD/VFNMADD231SD 5-212 PAGE 2036 LINE 105800 define pcodeop vfnmadd231sd_fma ; :VFNMADD231SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xBD; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vfnmadd231sd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VFNMADD132SS/VFNMADD213SS/VFNMADD231SS 5-215 PAGE 2039 LINE 105966 define pcodeop vfnmadd132ss_fma ; :VFNMADD132SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x9D; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vfnmadd132ss_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # VFNMADD132SS/VFNMADD213SS/VFNMADD231SS 5-215 PAGE 2039 LINE 105969 define pcodeop vfnmadd213ss_fma ; :VFNMADD213SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xAD; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vfnmadd213ss_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # VFNMADD132SS/VFNMADD213SS/VFNMADD231SS 5-215 PAGE 2039 LINE 105972 define pcodeop vfnmadd231ss_fma ; :VFNMADD231SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xBD; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vfnmadd231ss_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-218 PAGE 2042 LINE 106138 define pcodeop vfnmsub132pd_fma ; :VFNMSUB132PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x9E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfnmsub132pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-218 PAGE 2042 LINE 106141 define pcodeop vfnmsub213pd_fma ; :VFNMSUB213PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xAE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfnmsub213pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-218 PAGE 2042 LINE 106144 define pcodeop vfnmsub231pd_fma ; :VFNMSUB231PD XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xBE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfnmsub231pd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-218 PAGE 2042 LINE 106147 :VFNMSUB132PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0x9E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfnmsub132pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-218 PAGE 2042 LINE 106150 :VFNMSUB213PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xAE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfnmsub213pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFNMSUB132PD/VFNMSUB213PD/VFNMSUB231PD 5-218 PAGE 2042 LINE 106153 :VFNMSUB231PD YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_YmmReg; byte=0xBE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfnmsub231pd_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-224 PAGE 2048 LINE 106487 define pcodeop vfnmsub132ps_fma ; :VFNMSUB132PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x9E; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfnmsub132ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-224 PAGE 2048 LINE 106490 define pcodeop vfnmsub213ps_fma ; :VFNMSUB213PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xAE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfnmsub213ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-224 PAGE 2048 LINE 106493 define pcodeop vfnmsub231ps_fma ; :VFNMSUB231PS XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xBE; (XmmReg1 & ZmmReg1) ... & XmmReg2_m128 { local tmp:16 = vfnmsub231ps_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 ); ZmmReg1 = zext(tmp); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-224 PAGE 2048 LINE 106496 :VFNMSUB132PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0x9E; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfnmsub132ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-224 PAGE 2048 LINE 106499 :VFNMSUB213PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_YmmReg; byte=0xAE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfnmsub213ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFNMSUB132PS/VFNMSUB213PS/VFNMSUB231PS 5-224 PAGE 2048 LINE 106502 # WARNING: did not recognize VEX field 0 for "VFNMSUB231PS ymm1, ymm2, ymm3/m256" :VFNMSUB231PS YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 is $(VEX_L256) & $(VEX_PRE_66) & $(VEX_0F38) & vexVVVV_YmmReg; byte=0xBE; (YmmReg1 & ZmmReg1) ... & YmmReg2_m256 { local tmp:16 = vfnmsub231ps_fma( YmmReg1, vexVVVV_YmmReg, YmmReg2_m256 ); ZmmReg1 = zext(tmp); } # VFNMSUB132SD/VFNMSUB213SD/VFNMSUB231SD 5-230 PAGE 2054 LINE 106832 define pcodeop vfnmsub132sd_fma ; :VFNMSUB132SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0x9F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vfnmsub132sd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VFNMSUB132SD/VFNMSUB213SD/VFNMSUB231SD 5-230 PAGE 2054 LINE 106835 define pcodeop vfnmsub213sd_fma ; :VFNMSUB213SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xAF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vfnmsub213sd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VFNMSUB132SD/VFNMSUB213SD/VFNMSUB231SD 5-230 PAGE 2054 LINE 106838 define pcodeop vfnmsub231sd_fma ; :VFNMSUB231SD XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W1) & vexVVVV_XmmReg; byte=0xBF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m64 { local tmp:16 = vfnmsub231sd_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m64 ); ZmmReg1 = zext(tmp); } # VFNMSUB132SS/VFNMSUB213SS/VFNMSUB231SS 5-233 PAGE 2057 LINE 107004 define pcodeop vfnmsub132ss_fma ; :VFNMSUB132SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0x9F; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vfnmsub132ss_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # VFNMSUB132SS/VFNMSUB213SS/VFNMSUB231SS 5-233 PAGE 2057 LINE 107007 define pcodeop vfnmsub213ss_fma ; :VFNMSUB213SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xAF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vfnmsub213ss_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } # VFNMSUB132SS/VFNMSUB213SS/VFNMSUB231SS 5-233 PAGE 2057 LINE 107010 define pcodeop vfnmsub231ss_fma ; :VFNMSUB231SS XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 is $(VEX_LIG) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_W0) & vexVVVV_XmmReg; byte=0xBF; (XmmReg1 & ZmmReg1) ... & XmmReg2_m32 { local tmp:16 = vfnmsub231ss_fma( XmmReg1, vexVVVV_XmmReg, XmmReg2_m32 ); ZmmReg1 = zext(tmp); } ================================================ FILE: pypcode/processors/x86/data/languages/ia.sinc ================================================ # SLA specification file for Intel x86 @ifdef IA64 @define SIZE "8" @define STACKPTR "RSP" @else @define SIZE "4" @define STACKPTR "ESP" @endif define endian=little; define space ram type=ram_space size=$(SIZE) default; define space register type=register_space size=4; # General purpose registers @ifdef IA64 define register offset=0 size=8 [ RAX RCX RDX RBX RSP RBP RSI RDI ]; define register offset=0 size=4 [ EAX _ ECX _ EDX _ EBX _ ESP _ EBP _ ESI _ EDI ]; define register offset=0 size=2 [ AX _ _ _ CX _ _ _ DX _ _ _ BX _ _ _ SP _ _ _ BP _ _ _ SI _ _ _ DI ]; define register offset=0 size=1 [ AL AH _ _ _ _ _ _ CL CH _ _ _ _ _ _ DL DH _ _ _ _ _ _ BL BH _ _ _ _ _ _ SPL _ _ _ _ _ _ _ BPL _ _ _ _ _ _ _ SIL _ _ _ _ _ _ _ DIL ]; define register offset=0x80 size=8 [ R8 R9 R10 R11 R12 R13 R14 R15 ]; define register offset=0x80 size=4 [ R8D _ R9D _ R10D _ R11D _ R12D _ R13D _ R14D _ R15D _ ]; define register offset=0x80 size=2 [ R8W _ _ _ R9W _ _ _ R10W _ _ _ R11W _ _ _ R12W _ _ _ R13W _ _ _ R14W _ _ _ R15W _ _ _ ]; define register offset=0x80 size=1 [ R8B _ _ _ _ _ _ _ R9B _ _ _ _ _ _ _ R10B _ _ _ _ _ _ _ R11B _ _ _ _ _ _ _ R12B _ _ _ _ _ _ _ R13B _ _ _ _ _ _ _ R14B _ _ _ _ _ _ _ R15B _ _ _ _ _ _ _ ]; @else define register offset=0 size=4 [ EAX ECX EDX EBX ESP EBP ESI EDI ]; define register offset=0 size=2 [ AX _ CX _ DX _ BX _ SP _ BP _ SI _ DI ]; define register offset=0 size=1 [ AL AH _ _ CL CH _ _ DL DH _ _ BL BH ]; @endif # Segment registers define register offset=0x100 size=2 [ ES CS SS DS FS GS ]; define register offset=0x110 size=$(SIZE) [ FS_OFFSET GS_OFFSET ]; # Flags define register offset=0x200 size=1 [ CF F1 PF F3 AF F5 ZF SF TF IF DF OF IOPL NT F15 RF VM AC VIF VIP ID ]; @ifdef IA64 define register offset=0x280 size=8 [ rflags RIP ]; define register offset=0x280 size=4 [ eflags _ EIP _ ]; define register offset=0x280 size=2 [ flags _ _ _ IP _ _ _]; @else define register offset=0x280 size=4 [ eflags EIP] ; define register offset=0x280 size=2 [ flags _ IP] ; @endif # Debug and control registers @ifdef IA64 define register offset=0x300 size=8 [ DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7 DR8 DR9 DR10 DR11 DR12 DR13 DR14 DR15 CR0 CR1 CR2 CR3 CR4 CR5 CR6 CR7 CR8 CR9 CR10 CR11 CR12 CR13 CR14 CR15 ]; @else define register offset=0x300 size=4 [ DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7 CR0 _ CR2 CR3 CR4 ]; define register offset=0x400 size=4 [ TR0 TR1 TR2 TR3 TR4 TR5 TR6 TR7 ]; @endif #Processor State Register - currently only XFEATURE_ENABLED_MASK=XCR0 is defined # define register offset=0x600 size=8 [ XCR0 ]; # Memory Protection Extensions (MPX) define register offset=0x700 size=8 [ BNDCFGS BNDCFGU BNDSTATUS ]; define register offset=0x740 size=16 [ BND0 BND1 BND2 BND3 _ _ _ _ ]; define register offset=0x740 size=8 [ BND0_LB BND0_UB BND1_LB BND1_UB BND2_LB BND2_UB BND3_LB BND3_UB _ _ _ _ _ _ _ _ ]; # Control Flow Extensions define register offset=0x7c0 size=8 [ SSP IA32_PL2_SSP IA32_PL1_SSP IA32_PL0_SSP ]; # NOTE: ST registers moved with Ghidra 10.0.3 (v2.12) and previously occupied the offset range 0x1000-104f. # Automated address re-mapping was not provided and requires use of FixOldSTVariableStorageScript # to fixup uses within a program. The range 0x1000-104f should remain reserved and unused. # define register offset=0x1000 size=80 [ OLD_ST_REGION ]; define register offset=0x1090 size=1 [ C0 C1 C2 C3 ]; define register offset=0x1094 size=4 [ MXCSR ]; define register offset=0x10a0 size=2 [ FPUControlWord FPUStatusWord FPUTagWord FPULastInstructionOpcode ]; define register offset=0x10a8 size=$(SIZE) [ FPUDataPointer FPUInstructionPointer ]; define register offset=0x10c8 size=2 [ FPUPointerSelector FPUDataSelector]; #FCS FDS # FCS is not modeled, deprecated as 0. # FDS is not modeled, deprecated as 0. # Floating point registers - as they are in 32-bit protected mode # See overlapping MM registers below define register offset=0x1100 size=10 [ ST0 ]; define register offset=0x1110 size=10 [ ST1 ]; define register offset=0x1120 size=10 [ ST2 ]; define register offset=0x1130 size=10 [ ST3 ]; define register offset=0x1140 size=10 [ ST4 ]; define register offset=0x1150 size=10 [ ST5 ]; define register offset=0x1160 size=10 [ ST6 ]; define register offset=0x1170 size=10 [ ST7 ]; # NOTE: The upper 16-bits of the x87 ST registers go unused in MMX. # These upper 16-bits should be set to all ones by any MMX instruction, which correspond to the # floating-point representation of NaNs or infinities. # Although not currently modeled, the 2-byte ST0h..ST7h registers are provided for that purpose. define register offset=0x1100 size=8 [ MM0 _ MM1 _ MM2 _ MM3 _ MM4 _ MM5 _ MM6 _ MM7 _ ]; define register offset=0x1100 size=4 [ MM0_Da MM0_Db _ _ MM1_Da MM1_Db _ _ MM2_Da MM2_Db _ _ MM3_Da MM3_Db _ _ MM4_Da MM4_Db _ _ MM5_Da MM5_Db _ _ MM6_Da MM6_Db _ _ MM7_Da MM7_Db _ _ ]; define register offset=0x1100 size=2 [ MM0_Wa MM0_Wb MM0_Wc MM0_Wd ST0h _ _ _ MM1_Wa MM1_Wb MM1_Wc MM1_Wd ST1h _ _ _ MM2_Wa MM2_Wb MM2_Wc MM2_Wd ST2h _ _ _ MM3_Wa MM3_Wb MM3_Wc MM3_Wd ST3h _ _ _ MM4_Wa MM4_Wb MM4_Wc MM4_Wd ST4h _ _ _ MM5_Wa MM5_Wb MM5_Wc MM5_Wd ST5h _ _ _ MM6_Wa MM6_Wb MM6_Wc MM6_Wd ST6h _ _ _ MM7_Wa MM7_Wb MM7_Wc MM7_Wd ST7h _ _ _ ]; define register offset=0x1100 size=1 [ MM0_Ba MM0_Bb MM0_Bc MM0_Bd MM0_Be MM0_Bf MM0_Bg MM0_Bh _ _ _ _ _ _ _ _ MM1_Ba MM1_Bb MM1_Bc MM1_Bd MM1_Be MM1_Bf MM1_Bg MM1_Bh _ _ _ _ _ _ _ _ MM2_Ba MM2_Bb MM2_Bc MM2_Bd MM2_Be MM2_Bf MM2_Bg MM2_Bh _ _ _ _ _ _ _ _ MM3_Ba MM3_Bb MM3_Bc MM3_Bd MM3_Be MM3_Bf MM3_Bg MM3_Bh _ _ _ _ _ _ _ _ MM4_Ba MM4_Bb MM4_Bc MM4_Bd MM4_Be MM4_Bf MM4_Bg MM4_Bh _ _ _ _ _ _ _ _ MM5_Ba MM5_Bb MM5_Bc MM5_Bd MM5_Be MM5_Bf MM5_Bg MM5_Bh _ _ _ _ _ _ _ _ MM6_Ba MM6_Bb MM6_Bc MM6_Bd MM6_Be MM6_Bf MM6_Bg MM6_Bh _ _ _ _ _ _ _ _ MM7_Ba MM7_Bb MM7_Bc MM7_Bd MM7_Be MM7_Bf MM7_Bg MM7_Bh _ _ _ _ _ _ _ _ ]; define register offset=0x1180 size=16 [ xmmTmp1 xmmTmp2 ]; define register offset=0x1180 size=8 [ xmmTmp1_Qa xmmTmp1_Qb xmmTmp2_Qa xmmTmp2_Qb ]; define register offset=0x1180 size=4 [ xmmTmp1_Da xmmTmp1_Db xmmTmp1_Dc xmmTmp1_Dd xmmTmp2_Da xmmTmp2_Db xmmTmp2_Dc xmmTmp2_Dd ]; # # YMM0 - YMM7 - available in 32 bit mode # YMM0 - YMM15 - available in 64 bit mode # # YMMx_H is the formal name for the high double quadword of the YMMx register, XMMx is the overlay in the XMM register set define register offset=0x1200 size=16 [ XMM0 YMM0_H _ _ XMM1 YMM1_H _ _ XMM2 YMM2_H _ _ XMM3 YMM3_H _ _ XMM4 YMM4_H _ _ XMM5 YMM5_H _ _ XMM6 YMM6_H _ _ XMM7 YMM7_H _ _ XMM8 YMM8_H _ _ XMM9 YMM9_H _ _ XMM10 YMM10_H _ _ XMM11 YMM11_H _ _ XMM12 YMM12_H _ _ XMM13 YMM13_H _ _ XMM14 YMM14_H _ _ XMM15 YMM15_H _ _ XMM16 YMM16_H _ _ XMM17 YMM17_H _ _ XMM18 YMM18_H _ _ XMM19 YMM19_H _ _ XMM20 YMM20_H _ _ XMM21 YMM21_H _ _ XMM22 YMM22_H _ _ XMM23 YMM23_H _ _ XMM24 YMM24_H _ _ XMM25 YMM25_H _ _ XMM26 YMM26_H _ _ XMM27 YMM27_H _ _ XMM28 YMM28_H _ _ XMM29 YMM29_H _ _ XMM30 YMM30_H _ _ XMM31 YMM31_H _ _ ]; define register offset=0x1200 size=8 [ XMM0_Qa XMM0_Qb _ _ _ _ _ _ XMM1_Qa XMM1_Qb _ _ _ _ _ _ XMM2_Qa XMM2_Qb _ _ _ _ _ _ XMM3_Qa XMM3_Qb _ _ _ _ _ _ XMM4_Qa XMM4_Qb _ _ _ _ _ _ XMM5_Qa XMM5_Qb _ _ _ _ _ _ XMM6_Qa XMM6_Qb _ _ _ _ _ _ XMM7_Qa XMM7_Qb _ _ _ _ _ _ XMM8_Qa XMM8_Qb _ _ _ _ _ _ XMM9_Qa XMM9_Qb _ _ _ _ _ _ XMM10_Qa XMM10_Qb _ _ _ _ _ _ XMM11_Qa XMM11_Qb _ _ _ _ _ _ XMM12_Qa XMM12_Qb _ _ _ _ _ _ XMM13_Qa XMM13_Qb _ _ _ _ _ _ XMM14_Qa XMM14_Qb _ _ _ _ _ _ XMM15_Qa XMM15_Qb _ _ _ _ _ _ XMM16_Qa XMM16_Qb _ _ _ _ _ _ XMM17_Qa XMM17_Qb _ _ _ _ _ _ XMM18_Qa XMM18_Qb _ _ _ _ _ _ XMM19_Qa XMM19_Qb _ _ _ _ _ _ XMM20_Qa XMM20_Qb _ _ _ _ _ _ XMM21_Qa XMM21_Qb _ _ _ _ _ _ XMM22_Qa XMM22_Qb _ _ _ _ _ _ XMM23_Qa XMM23_Qb _ _ _ _ _ _ XMM24_Qa XMM24_Qb _ _ _ _ _ _ XMM25_Qa XMM25_Qb _ _ _ _ _ _ XMM26_Qa XMM26_Qb _ _ _ _ _ _ XMM27_Qa XMM27_Qb _ _ _ _ _ _ XMM28_Qa XMM28_Qb _ _ _ _ _ _ XMM29_Qa XMM29_Qb _ _ _ _ _ _ XMM30_Qa XMM30_Qb _ _ _ _ _ _ XMM31_Qa XMM31_Qb _ _ _ _ _ _ ]; define register offset=0x1200 size=4 [ XMM0_Da XMM0_Db XMM0_Dc XMM0_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM1_Da XMM1_Db XMM1_Dc XMM1_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM2_Da XMM2_Db XMM2_Dc XMM2_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM3_Da XMM3_Db XMM3_Dc XMM3_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM4_Da XMM4_Db XMM4_Dc XMM4_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM5_Da XMM5_Db XMM5_Dc XMM5_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM6_Da XMM6_Db XMM6_Dc XMM6_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM7_Da XMM7_Db XMM7_Dc XMM7_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM8_Da XMM8_Db XMM8_Dc XMM8_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM9_Da XMM9_Db XMM9_Dc XMM9_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM10_Da XMM10_Db XMM10_Dc XMM10_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM11_Da XMM11_Db XMM11_Dc XMM11_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM12_Da XMM12_Db XMM12_Dc XMM12_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM13_Da XMM13_Db XMM13_Dc XMM13_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM14_Da XMM14_Db XMM14_Dc XMM14_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM15_Da XMM15_Db XMM15_Dc XMM15_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM16_Da XMM16_Db XMM16_Dc XMM16_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM17_Da XMM17_Db XMM17_Dc XMM17_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM18_Da XMM18_Db XMM18_Dc XMM18_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM19_Da XMM19_Db XMM19_Dc XMM19_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM20_Da XMM20_Db XMM20_Dc XMM20_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM21_Da XMM21_Db XMM21_Dc XMM21_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM22_Da XMM22_Db XMM22_Dc XMM22_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM23_Da XMM23_Db XMM23_Dc XMM23_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM24_Da XMM24_Db XMM24_Dc XMM24_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM25_Da XMM25_Db XMM25_Dc XMM25_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM26_Da XMM26_Db XMM26_Dc XMM26_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM27_Da XMM27_Db XMM27_Dc XMM27_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM28_Da XMM28_Db XMM28_Dc XMM28_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM29_Da XMM29_Db XMM29_Dc XMM29_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM30_Da XMM30_Db XMM30_Dc XMM30_Dd _ _ _ _ _ _ _ _ _ _ _ _ XMM31_Da XMM31_Db XMM31_Dc XMM31_Dd _ _ _ _ _ _ _ _ _ _ _ _ ]; define register offset=0x1200 size=2 [ XMM0_Wa XMM0_Wb XMM0_Wc XMM0_Wd XMM0_We XMM0_Wf XMM0_Wg XMM0_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM1_Wa XMM1_Wb XMM1_Wc XMM1_Wd XMM1_We XMM1_Wf XMM1_Wg XMM1_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM2_Wa XMM2_Wb XMM2_Wc XMM2_Wd XMM2_We XMM2_Wf XMM2_Wg XMM2_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM3_Wa XMM3_Wb XMM3_Wc XMM3_Wd XMM3_We XMM3_Wf XMM3_Wg XMM3_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM4_Wa XMM4_Wb XMM4_Wc XMM4_Wd XMM4_We XMM4_Wf XMM4_Wg XMM4_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM5_Wa XMM5_Wb XMM5_Wc XMM5_Wd XMM5_We XMM5_Wf XMM5_Wg XMM5_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM6_Wa XMM6_Wb XMM6_Wc XMM6_Wd XMM6_We XMM6_Wf XMM6_Wg XMM6_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM7_Wa XMM7_Wb XMM7_Wc XMM7_Wd XMM7_We XMM7_Wf XMM7_Wg XMM7_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM8_Wa XMM8_Wb XMM8_Wc XMM8_Wd XMM8_We XMM8_Wf XMM8_Wg XMM8_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM9_Wa XMM9_Wb XMM9_Wc XMM9_Wd XMM9_We XMM9_Wf XMM9_Wg XMM9_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM10_Wa XMM10_Wb XMM10_Wc XMM10_Wd XMM10_We XMM10_Wf XMM10_Wg XMM10_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM11_Wa XMM11_Wb XMM11_Wc XMM11_Wd XMM11_We XMM11_Wf XMM11_Wg XMM11_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM12_Wa XMM12_Wb XMM12_Wc XMM12_Wd XMM12_We XMM12_Wf XMM12_Wg XMM12_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM13_Wa XMM13_Wb XMM13_Wc XMM13_Wd XMM13_We XMM13_Wf XMM13_Wg XMM13_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM14_Wa XMM14_Wb XMM14_Wc XMM14_Wd XMM14_We XMM14_Wf XMM14_Wg XMM14_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM15_Wa XMM15_Wb XMM15_Wc XMM15_Wd XMM15_We XMM15_Wf XMM15_Wg XMM15_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM16_Wa XMM16_Wb XMM16_Wc XMM16_Wd XMM16_We XMM16_Wf XMM16_Wg XMM16_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM17_Wa XMM17_Wb XMM17_Wc XMM17_Wd XMM17_We XMM17_Wf XMM17_Wg XMM17_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM18_Wa XMM18_Wb XMM18_Wc XMM18_Wd XMM18_We XMM18_Wf XMM18_Wg XMM18_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM19_Wa XMM19_Wb XMM19_Wc XMM19_Wd XMM19_We XMM19_Wf XMM19_Wg XMM19_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM20_Wa XMM20_Wb XMM20_Wc XMM20_Wd XMM20_We XMM20_Wf XMM20_Wg XMM20_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM21_Wa XMM21_Wb XMM21_Wc XMM21_Wd XMM21_We XMM21_Wf XMM21_Wg XMM21_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM22_Wa XMM22_Wb XMM22_Wc XMM22_Wd XMM22_We XMM22_Wf XMM22_Wg XMM22_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM23_Wa XMM23_Wb XMM23_Wc XMM23_Wd XMM23_We XMM23_Wf XMM23_Wg XMM23_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM24_Wa XMM24_Wb XMM24_Wc XMM24_Wd XMM24_We XMM24_Wf XMM24_Wg XMM24_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM25_Wa XMM25_Wb XMM25_Wc XMM25_Wd XMM25_We XMM25_Wf XMM25_Wg XMM25_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM26_Wa XMM26_Wb XMM26_Wc XMM26_Wd XMM26_We XMM26_Wf XMM26_Wg XMM26_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM27_Wa XMM27_Wb XMM27_Wc XMM27_Wd XMM27_We XMM27_Wf XMM27_Wg XMM27_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM28_Wa XMM28_Wb XMM28_Wc XMM28_Wd XMM28_We XMM28_Wf XMM28_Wg XMM28_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM29_Wa XMM29_Wb XMM29_Wc XMM29_Wd XMM29_We XMM29_Wf XMM29_Wg XMM29_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM30_Wa XMM30_Wb XMM30_Wc XMM30_Wd XMM30_We XMM30_Wf XMM30_Wg XMM30_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM31_Wa XMM31_Wb XMM31_Wc XMM31_Wd XMM31_We XMM31_Wf XMM31_Wg XMM31_Wh _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ]; define register offset=0x1200 size=1 [ XMM0_Ba XMM0_Bb XMM0_Bc XMM0_Bd XMM0_Be XMM0_Bf XMM0_Bg XMM0_Bh XMM0_Bi XMM0_Bj XMM0_Bk XMM0_Bl XMM0_Bm XMM0_Bn XMM0_Bo XMM0_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM1_Ba XMM1_Bb XMM1_Bc XMM1_Bd XMM1_Be XMM1_Bf XMM1_Bg XMM1_Bh XMM1_Bi XMM1_Bj XMM1_Bk XMM1_Bl XMM1_Bm XMM1_Bn XMM1_Bo XMM1_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM2_Ba XMM2_Bb XMM2_Bc XMM2_Bd XMM2_Be XMM2_Bf XMM2_Bg XMM2_Bh XMM2_Bi XMM2_Bj XMM2_Bk XMM2_Bl XMM2_Bm XMM2_Bn XMM2_Bo XMM2_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM3_Ba XMM3_Bb XMM3_Bc XMM3_Bd XMM3_Be XMM3_Bf XMM3_Bg XMM3_Bh XMM3_Bi XMM3_Bj XMM3_Bk XMM3_Bl XMM3_Bm XMM3_Bn XMM3_Bo XMM3_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM4_Ba XMM4_Bb XMM4_Bc XMM4_Bd XMM4_Be XMM4_Bf XMM4_Bg XMM4_Bh XMM4_Bi XMM4_Bj XMM4_Bk XMM4_Bl XMM4_Bm XMM4_Bn XMM4_Bo XMM4_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM5_Ba XMM5_Bb XMM5_Bc XMM5_Bd XMM5_Be XMM5_Bf XMM5_Bg XMM5_Bh XMM5_Bi XMM5_Bj XMM5_Bk XMM5_Bl XMM5_Bm XMM5_Bn XMM5_Bo XMM5_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM6_Ba XMM6_Bb XMM6_Bc XMM6_Bd XMM6_Be XMM6_Bf XMM6_Bg XMM6_Bh XMM6_Bi XMM6_Bj XMM6_Bk XMM6_Bl XMM6_Bm XMM6_Bn XMM6_Bo XMM6_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM7_Ba XMM7_Bb XMM7_Bc XMM7_Bd XMM7_Be XMM7_Bf XMM7_Bg XMM7_Bh XMM7_Bi XMM7_Bj XMM7_Bk XMM7_Bl XMM7_Bm XMM7_Bn XMM7_Bo XMM7_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM8_Ba XMM8_Bb XMM8_Bc XMM8_Bd XMM8_Be XMM8_Bf XMM8_Bg XMM8_Bh XMM8_Bi XMM8_Bj XMM8_Bk XMM8_Bl XMM8_Bm XMM8_Bn XMM8_Bo XMM8_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM9_Ba XMM9_Bb XMM9_Bc XMM9_Bd XMM9_Be XMM9_Bf XMM9_Bg XMM9_Bh XMM9_Bi XMM9_Bj XMM9_Bk XMM9_Bl XMM9_Bm XMM9_Bn XMM9_Bo XMM9_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM10_Ba XMM10_Bb XMM10_Bc XMM10_Bd XMM10_Be XMM10_Bf XMM10_Bg XMM10_Bh XMM10_Bi XMM10_Bj XMM10_Bk XMM10_Bl XMM10_Bm XMM10_Bn XMM10_Bo XMM10_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM11_Ba XMM11_Bb XMM11_Bc XMM11_Bd XMM11_Be XMM11_Bf XMM11_Bg XMM11_Bh XMM11_Bi XMM11_Bj XMM11_Bk XMM11_Bl XMM11_Bm XMM11_Bn XMM11_Bo XMM11_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM12_Ba XMM12_Bb XMM12_Bc XMM12_Bd XMM12_Be XMM12_Bf XMM12_Bg XMM12_Bh XMM12_Bi XMM12_Bj XMM12_Bk XMM12_Bl XMM12_Bm XMM12_Bn XMM12_Bo XMM12_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM13_Ba XMM13_Bb XMM13_Bc XMM13_Bd XMM13_Be XMM13_Bf XMM13_Bg XMM13_Bh XMM13_Bi XMM13_Bj XMM13_Bk XMM13_Bl XMM13_Bm XMM13_Bn XMM13_Bo XMM13_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM14_Ba XMM14_Bb XMM14_Bc XMM14_Bd XMM14_Be XMM14_Bf XMM14_Bg XMM14_Bh XMM14_Bi XMM14_Bj XMM14_Bk XMM14_Bl XMM14_Bm XMM14_Bn XMM14_Bo XMM14_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM15_Ba XMM15_Bb XMM15_Bc XMM15_Bd XMM15_Be XMM15_Bf XMM15_Bg XMM15_Bh XMM15_Bi XMM15_Bj XMM15_Bk XMM15_Bl XMM15_Bm XMM15_Bn XMM15_Bo XMM15_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM16_Ba XMM16_Bb XMM16_Bc XMM16_Bd XMM16_Be XMM16_Bf XMM16_Bg XMM16_Bh XMM16_Bi XMM16_Bj XMM16_Bk XMM16_Bl XMM16_Bm XMM16_Bn XMM16_Bo XMM16_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM17_Ba XMM17_Bb XMM17_Bc XMM17_Bd XMM17_Be XMM17_Bf XMM17_Bg XMM17_Bh XMM17_Bi XMM17_Bj XMM17_Bk XMM17_Bl XMM17_Bm XMM17_Bn XMM17_Bo XMM17_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM18_Ba XMM18_Bb XMM18_Bc XMM18_Bd XMM18_Be XMM18_Bf XMM18_Bg XMM18_Bh XMM18_Bi XMM18_Bj XMM18_Bk XMM18_Bl XMM18_Bm XMM18_Bn XMM18_Bo XMM18_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM19_Ba XMM19_Bb XMM19_Bc XMM19_Bd XMM19_Be XMM19_Bf XMM19_Bg XMM19_Bh XMM19_Bi XMM19_Bj XMM19_Bk XMM19_Bl XMM19_Bm XMM19_Bn XMM19_Bo XMM19_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM20_Ba XMM20_Bb XMM20_Bc XMM20_Bd XMM20_Be XMM20_Bf XMM20_Bg XMM20_Bh XMM20_Bi XMM20_Bj XMM20_Bk XMM20_Bl XMM20_Bm XMM20_Bn XMM20_Bo XMM20_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM21_Ba XMM21_Bb XMM21_Bc XMM21_Bd XMM21_Be XMM21_Bf XMM21_Bg XMM21_Bh XMM21_Bi XMM21_Bj XMM21_Bk XMM21_Bl XMM21_Bm XMM21_Bn XMM21_Bo XMM21_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM22_Ba XMM22_Bb XMM22_Bc XMM22_Bd XMM22_Be XMM22_Bf XMM22_Bg XMM22_Bh XMM22_Bi XMM22_Bj XMM22_Bk XMM22_Bl XMM22_Bm XMM22_Bn XMM22_Bo XMM22_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM23_Ba XMM23_Bb XMM23_Bc XMM23_Bd XMM23_Be XMM23_Bf XMM23_Bg XMM23_Bh XMM23_Bi XMM23_Bj XMM23_Bk XMM23_Bl XMM23_Bm XMM23_Bn XMM23_Bo XMM23_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM24_Ba XMM24_Bb XMM24_Bc XMM24_Bd XMM24_Be XMM24_Bf XMM24_Bg XMM24_Bh XMM24_Bi XMM24_Bj XMM24_Bk XMM24_Bl XMM24_Bm XMM24_Bn XMM24_Bo XMM24_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM25_Ba XMM25_Bb XMM25_Bc XMM25_Bd XMM25_Be XMM25_Bf XMM25_Bg XMM25_Bh XMM25_Bi XMM25_Bj XMM25_Bk XMM25_Bl XMM25_Bm XMM25_Bn XMM25_Bo XMM25_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM26_Ba XMM26_Bb XMM26_Bc XMM26_Bd XMM26_Be XMM26_Bf XMM26_Bg XMM26_Bh XMM26_Bi XMM26_Bj XMM26_Bk XMM26_Bl XMM26_Bm XMM26_Bn XMM26_Bo XMM26_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM27_Ba XMM27_Bb XMM27_Bc XMM27_Bd XMM27_Be XMM27_Bf XMM27_Bg XMM27_Bh XMM27_Bi XMM27_Bj XMM27_Bk XMM27_Bl XMM27_Bm XMM27_Bn XMM27_Bo XMM27_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM28_Ba XMM28_Bb XMM28_Bc XMM28_Bd XMM28_Be XMM28_Bf XMM28_Bg XMM28_Bh XMM28_Bi XMM28_Bj XMM28_Bk XMM28_Bl XMM28_Bm XMM28_Bn XMM28_Bo XMM28_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM29_Ba XMM29_Bb XMM29_Bc XMM29_Bd XMM29_Be XMM29_Bf XMM29_Bg XMM29_Bh XMM29_Bi XMM29_Bj XMM29_Bk XMM29_Bl XMM29_Bm XMM29_Bn XMM29_Bo XMM29_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM30_Ba XMM30_Bb XMM30_Bc XMM30_Bd XMM30_Be XMM30_Bf XMM30_Bg XMM30_Bh XMM30_Bi XMM30_Bj XMM30_Bk XMM30_Bl XMM30_Bm XMM30_Bn XMM30_Bo XMM30_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ XMM31_Ba XMM31_Bb XMM31_Bc XMM31_Bd XMM31_Be XMM31_Bf XMM31_Bg XMM31_Bh XMM31_Bi XMM31_Bj XMM31_Bk XMM31_Bl XMM31_Bm XMM31_Bn XMM31_Bo XMM31_Bp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ]; define register offset=0x1200 size=32 [ YMM0 _ YMM1 _ YMM2 _ YMM3 _ YMM4 _ YMM5 _ YMM6 _ YMM7 _ YMM8 _ YMM9 _ YMM10 _ YMM11 _ YMM12 _ YMM13 _ YMM14 _ YMM15 _ YMM16 _ YMM17 _ YMM18 _ YMM19 _ YMM20 _ YMM21 _ YMM22 _ YMM23 _ YMM24 _ YMM25 _ YMM26 _ YMM27 _ YMM28 _ YMM29 _ YMM30 _ YMM31 _ ]; define register offset=0x1200 size=64 [ ZMM0 ZMM1 ZMM2 ZMM3 ZMM4 ZMM5 ZMM6 ZMM7 ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15 ZMM16 ZMM17 ZMM18 ZMM19 ZMM20 ZMM21 ZMM22 ZMM23 ZMM24 ZMM25 ZMM26 ZMM27 ZMM28 ZMM29 ZMM30 ZMM31 ]; # Define context bits define register offset=0x2000 size=8 contextreg; # AVX-512 opmask registers define register offset=2100 size=8 [ K0 K1 K2 K3 K4 K5 K6 K7 ]; # dummy registers for managing broadcast data for AVX512 define register offset=2200 size=4 [ BCST4 ]; define register offset=2200 size=8 [ BCST8 ]; define register offset=2200 size=16 [ BCST16 ]; define register offset=2200 size=32 [ BCST32 ]; define register offset=2200 size=64 [ BCST64 ]; define register offset=2300 size=16 [ XmmResult _ _ _ XmmMask ]; define register offset=2300 size=32 [ YmmResult _ YmmMask ]; define register offset=2300 size=64 [ ZmmResult ZmmMask ]; # # # This context layout is important: the 32 bit version sees addrsize as just the # low-order bit, whereas the 64 bit sees both bits. This ensures that the 32 and 64 # are technically binary compatible, but since the 32 bit language can't see that # addrsize is 2 bits, they won't be pulled up into constructors where bit 0 is always # 0 (which it is), and then you don't get the decision conflicts that choose # context over table order # # define context contextreg @ifdef IA64 # Stored context longMode=(0,0) # 0 for 32-bit emulation, 1 for 64-bit mode reserved=(1,3) addrsize=(4,5) # =0 16-bit addressing =1 32-bit addressing =2 64-bit addressing @else # Stored context reserved=(0,3) addrsize=(5,5) # =0 16-bit addressing =1 32-bit addressing @endif bit64=(4,4) # =0 16/32 bit =1 64-bit opsize=(6,7) # =0 16-bit operands =1 32-bit operands =2 64-bit operands segover=(8,10) # 0=default 1=cs 2=ss 3=ds 4=es 5=fs 6=gs highseg=(8,8) # high bit of segover will be set for ES, FS, GS protectedMode=(11,11) # 0 for real mode, 1 for protected mode # End stored context mandover=(12,14) # 0x66 0xf2 or 0xf3 overrides (for mandatory prefixes) repneprefx=(12,12) # 0xf2 REPNE prefi repprefx=(13,13) # 0xf3 REP prefix xacquireprefx=(12,12) # 0xf2 XACQUIRE prefix xreleaseprefx=(13,13) # 0xf3 XRELEASE prefix prefix_f2=(12,12) # This is not really a REPNE override, it means there is a real(read)/implied(vex) f2 byte prefix_f3=(13,13) # This is not really a REP override, it means there is an real(read)/implied(vex) f3 byte prefix_66=(14,14) # This is not really a OPSIZE override, it means there is an real(read)/implied(vex) 66 byte rexWRXBprefix=(15,18) # REX.WRXB bits rexWprefix=(15,15) # REX.W bit prefix (opsize=2 when REX.W is set) rexRprefix=(16,16) # REX.R bit prefix extend r rexXprefix=(17,17) # REX.X bit prefix extend SIB index field to 4 bits rexBprefix=(18,18) # REX.B bit prefix extend r/m, SIB base, Reg operand rexprefix=(19,19) # True if the Rex prefix is present - note, if present, vex_mode is not supported # rexWRXB bits can be re-used since they are incompatible. vexMode=(20,21) # 2 for evex instruction, 1 for vexMode, 0 for normal evexL = (22,23) # 0 for 128, 1 for 256, 2 for 512 (also used for rounding control) evexLp=(22,22) # EVEX.L' vexL=(23,23) # 0 for 128, 1 for 256 evexV5_XmmReg=(24,28) # evex byte for matching ZmmReg evexV5_YmmReg=(24,28) # evex byte for matching ZmmReg evexV5_ZmmReg=(24,28) # evex byte for matching ZmmReg evexV5=(24,28) # EVEX.V' combined with EVEX.vvvv evexVp=(24,24) # EVEX.V' bit prefix extends EVEX.vvvv (stored inverted) vexVVVV=(25,28) # value of vex byte for matching vexVVVV_r32=(25,28) # value of vex byte for matching a normal 32 bit register vexVVVV_r64=(25,28) # value of vex byte for matching a normal 64 bit register vexVVVV_XmmReg=(25,28) # value of vex byte for matching XmmReg vexVVVV_YmmReg=(25,28) # value of vex byte for matching YmmReg vexVVVV_ZmmReg=(25,28) # value of vex byte for matching ZmmReg vexHighV=(25,25) evexVopmask=(26,28) # VEX.vvvv opmask suffix3D=(22,29) # 3DNow suffix byte (overlaps un-modified vex context region) instrPhase=(30,30) # 0: initial/prefix phase, 1: primary instruction phase lockprefx=(31,31) # 0xf0 LOCK prefix vexMMMMM=(32,36) # need to match for preceding bytes 1=0x0F, 2=0x0F 0x38, 3=0x0F 0x3A evexRp=(37,37) # EVEX.R' bit prefix extends r evexB = (38,38) # EVEX.b Broadcast evexZ = (39,39) # Opmask behavior 1 for zeroing-masking, 0 for merging-masking evexAAA=(40,42) # Opmask selector evexOpmask=(40,42) # Used for attaching Opmask registers evexD8Type=(43,43) # Used for compressed Disp8*N, can range from 1 to 64 evexBType=(47,47) # Used for Disp8*N (see table 2-34 in 325462-sdm-vol-1-2abcd-3abcd-4.pdf) evexTType=(44,47) # Used for Disp8*N (see table 2-35 in 325462-sdm-vol-1-2abcd-3abcd-4.pdf) evexDisp8=(44,46) reservedHigh=(48,63) # reserved for future use ; # These are only to be used with pre-REX (original 8086, 80386) and REX encoding. Do not use with VEX encoding. # These are to be used to designate that the opcode sequence begins with one of these "mandatory" prefix values. # This allows the other prefixes to come before the mandatory value. # For example: CRC32 r32, r16 -- 66 F2 OF 38 F1 C8 @define PRE_NO "mandover=0" @define PRE_66 "prefix_66=1" @define PRE_F3 "prefix_f3=1" @define PRE_F2 "prefix_f2=1" # Define special registers for debugger @ifdef IA64 define register offset=0x2200 size=4 [ IDTR_Limit ]; define register offset=0x2200 size=12 [ IDTR ]; define register offset=0x2204 size=8 [ IDTR_Address ]; define register offset=0x2220 size=4 [ GDTR_Limit ]; define register offset=0x2220 size=12 [ GDTR ]; define register offset=0x2224 size=8 [ GDTR_Address ]; define register offset=0x2240 size=4 [ LDTR_Limit ]; define register offset=0x2240 size=14 [ LDTR ]; define register offset=0x2244 size=8 [ LDTR_Address ]; define register offset=0x2248 size=2 [ LDTR_Attributes ]; define register offset=0x2260 size=4 [ TR_Limit ]; define register offset=0x2260 size=14 [ TR ]; define register offset=0x2264 size=8 [ TR_Address ]; define register offset=0x2268 size=2 [ TR_Attributes ]; @else define register offset=0x2200 size=6 [ IDTR ]; define register offset=0x2200 size=2 [ IDTR_Limit ]; define register offset=0x2202 size=4 [ IDTR_Address ]; define register offset=0x2210 size=6 [ GDTR ]; define register offset=0x2210 size=2 [ GDTR_Limit ]; define register offset=0x2212 size=4 [ GDTR_Address ]; define register offset=0x2220 size=6 [ LDTR ]; define register offset=0x2220 size=2 [ LDTR_Limit ]; define register offset=0x2222 size=4 [ LDTR_Address ]; define register offset=0x2230 size=6 [ TR ]; define register offset=0x2230 size=2 [ TR_Limit ]; define register offset=0x2232 size=4 [ TR_Address ]; @endif define token opbyte (8) byte=(0,7) high4=(4,7) high5=(3,7) low5=(0,4) byte_4=(4,4) byte_0=(0,0) ; define token modrm (8) mod = (6,7) reg_opcode = (3,5) reg_opcode_hb = (5,5) r_m = (0,2) row = (4,7) col = (0,2) page = (3,3) cond = (0,3) reg8 = (3,5) reg16 = (3,5) reg32 = (3,5) reg64 = (3,5) reg8_x0 = (3,5) reg8_x1 = (3,5) reg16_x = (3,5) reg32_x = (3,5) reg64_x = (3,5) Sreg = (3,5) creg = (3,5) creg_x = (3,5) debugreg = (3,5) debugreg_x = (3,5) testreg = (3,5) r8 = (0,2) r16 = (0,2) r32 = (0,2) r64 = (0,2) r8_x0 = (0,2) r8_x1 = (0,2) r16_x = (0,2) r32_x = (0,2) r64_x = (0,2) frow = (4,7) fpage = (3,3) freg = (0,2) rexw = (3,3) rexr = (2,2) rexx = (1,1) rexb = (0,0) mmxmod = (6,7) mmxreg = (3,5) mmxreg1 = (3,5) mmxreg2 = (0,2) xmmmod = (6,7) xmmreg = (3,5) ymmreg = (3,5) zmmreg = (3,5) xmmreg1 = (3,5) ymmreg1 = (3,5) zmmreg1 = (3,5) xmmreg2 = (0,2) ymmreg2 = (0,2) zmmreg2 = (0,2) xmmreg_x = (3,5) ymmreg_x = (3,5) zmmreg_x = (3,5) xmmreg1_x = (3,5) ymmreg1_x = (3,5) zmmreg1_x = (3,5) xmmreg1_r = (3,5) ymmreg1_r = (3,5) zmmreg1_r = (3,5) xmmreg1_rx = (3,5) ymmreg1_rx = (3,5) zmmreg1_rx = (3,5) xmmreg2_b = (0,2) ymmreg2_b = (0,2) zmmreg2_b = (0,2) xmmreg2_x = (0,2) ymmreg2_x = (0,2) zmmreg2_x = (0,2) xmmreg2_bx = (0,2) ymmreg2_bx = (0,2) zmmreg2_bx = (0,2) vex_pp = (0,1) vex_l = (2,2) vex_vvvv = (3,6) vex_r = (7,7) vex_x = (6,6) vex_b = (5,5) vex_w = (7,7) vex_mmmmm = (0,4) evex_rp = (4,4) evex_res = (3,3) evex_res2 = (2,2) evex_mmm = (0,2) evex_z = (7,7) evex_lp = (6,6) evex_l = (5,5) evex_b = (4,4) evex_vp = (3,3) evex_aaa = (0,2) opmaskreg = (3,5) opmaskrm = (0,2) bnd1 = (3,5) bnd1_lb = (3,5) bnd1_ub = (3,5) bnd2 = (0,2) bnd2_lb = (0,2) bnd2_ub = (0,2) ; define token sib (8) ss = (6,7) index = (3,5) index_x = (3,5) index64 = (3,5) index64_x = (3,5) xmm_vsib = (3,5) xmm_vsib_x = (3,5) ymm_vsib = (3,5) ymm_vsib_x = (3,5) zmm_vsib = (3,5) zmm_vsib_x = (3,5) base = (0,2) base_x = (0,2) base64 = (0,2) base64_x = (0,2) ; define token I8 (8) Xmm_imm8_7_4=(4,7) Ymm_imm8_7_4=(4,7) imm8_7=(7,7) imm8_6=(6,6) imm8_6_7=(6,7) imm8_5=(5,5) imm8_5_7=(5,7) imm8_4=(4,4) imm8_4_7=(4,7) imm8_3=(3,3) imm8_3_7=(3,7) imm8_2=(2,2) imm8_2_7=(2,7) imm8_1=(1,1) imm8_1_7=(1,7) imm8_0=(0,0) imm8_3_0=(0,3) imm8=(0,7) imm8_val=(0,7) simm8=(0,7) signed ; define token I16 (16) imm16_15=(15,15) imm16=(0,15) simm16=(0,15) signed j16=(0,15); define token I32 (32) imm32=(0,31) simm32=(0,31) signed; define token I64 (64) imm64=(0,63) simm64=(0,63) signed; define token override (8) over=(0,7); attach variables [ r32 reg32 base index ] [ EAX ECX EDX EBX ESP EBP ESI EDI ]; attach variables [ r16 reg16 ] [ AX CX DX BX SP BP SI DI ]; attach variables [ r8 reg8 ] [ AL CL DL BL AH CH DH BH ]; attach variables Sreg [ ES CS SS DS FS GS _ _ ]; attach variables freg [ ST0 ST1 ST2 ST3 ST4 ST5 ST6 ST7 ]; attach variables [ debugreg ] [ DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7 ]; @ifdef IA64 attach variables [ r64 reg64 base64 index64 ] [ RAX RCX RDX RBX RSP RBP RSI RDI ]; attach variables [ r64_x reg64_x base64_x index64_x ] [ R8 R9 R10 R11 R12 R13 R14 R15 ]; attach variables [ r32_x reg32_x base_x index_x ] [ R8D R9D R10D R11D R12D R13D R14D R15D ]; attach variables [ r16_x reg16_x ] [ R8W R9W R10W R11W R12W R13W R14W R15W ]; attach variables [ r8_x0 reg8_x0 ] [ AL CL DL BL SPL BPL SIL DIL ]; attach variables [ r8_x1 reg8_x1 ] [ R8B R9B R10B R11B R12B R13B R14B R15B ]; attach variables [ debugreg_x ] [ DR8 DR9 DR10 DR11 DR12 DR13 DR14 DR15 ]; attach variables creg [ CR0 CR1 CR2 CR3 CR4 CR5 CR6 CR7 ]; attach variables creg_x [ CR8 CR9 CR10 CR11 CR12 CR13 CR14 CR15 ]; @else attach variables [ testreg ] [ TR0 TR1 TR2 TR3 TR4 TR5 TR6 TR7 ]; attach variables creg [ CR0 _ CR2 CR3 CR4 _ _ _ ]; @endif attach values ss [ 1 2 4 8]; attach variables [ mmxreg mmxreg1 mmxreg2 ] [ MM0 MM1 MM2 MM3 MM4 MM5 MM6 MM7 ]; attach variables [ xmmreg xmmreg1 xmmreg2 xmm_vsib ] [ XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 ]; attach variables [ xmmreg_x xmmreg1_x xmmreg2_b xmm_vsib_x ] [ XMM8 XMM9 XMM10 XMM11 XMM12 XMM13 XMM14 XMM15 ]; attach variables [ xmmreg1_r xmmreg2_x ] [ XMM16 XMM17 XMM18 XMM19 XMM20 XMM21 XMM22 XMM23 ]; attach variables [ xmmreg1_rx xmmreg2_bx ] [ XMM24 XMM25 XMM26 XMM27 XMM28 XMM29 XMM30 XMM31 ]; attach variables [ vexVVVV_XmmReg Xmm_imm8_7_4 ] [ XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 XMM9 XMM10 XMM11 XMM12 XMM13 XMM14 XMM15 ]; attach variables [ vexVVVV_YmmReg Ymm_imm8_7_4 ] [ YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 ]; attach variables [ vexVVVV_ZmmReg ] [ ZMM0 ZMM1 ZMM2 ZMM3 ZMM4 ZMM5 ZMM6 ZMM7 ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15 ]; attach variables [ evexV5_XmmReg ] [ XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 XMM9 XMM10 XMM11 XMM12 XMM13 XMM14 XMM15 XMM16 XMM17 XMM18 XMM19 XMM20 XMM21 XMM22 XMM23 XMM24 XMM25 XMM26 XMM27 XMM28 XMM29 XMM30 XMM31 ]; attach variables [ evexV5_YmmReg ] [ YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 YMM16 YMM17 YMM18 YMM19 YMM20 YMM21 YMM22 YMM23 YMM24 YMM25 YMM26 YMM27 YMM28 YMM29 YMM30 YMM31 ]; attach variables [ evexV5_ZmmReg ] [ ZMM0 ZMM1 ZMM2 ZMM3 ZMM4 ZMM5 ZMM6 ZMM7 ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15 ZMM16 ZMM17 ZMM18 ZMM19 ZMM20 ZMM21 ZMM22 ZMM23 ZMM24 ZMM25 ZMM26 ZMM27 ZMM28 ZMM29 ZMM30 ZMM31 ]; @ifdef IA64 attach variables [ vexVVVV_r32 ] [ EAX ECX EDX EBX ESP EBP ESI EDI R8D R9D R10D R11D R12D R13D R14D R15D ]; attach variables [ vexVVVV_r64 ] [ RAX RCX RDX RBX RSP RBP RSI RDI R8 R9 R10 R11 R12 R13 R14 R15 ]; @else attach variables [ vexVVVV_r32 ] [ EAX ECX EDX EBX ESP EBP ESI EDI _ _ _ _ _ _ _ _ ]; @endif attach variables [ evexOpmask opmaskreg opmaskrm evexVopmask ] [ K0 K1 K2 K3 K4 K5 K6 K7 ]; attach variables [ ymmreg ymmreg1 ymmreg2 ymm_vsib ] [ YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 ]; attach variables [ ymmreg_x ymmreg1_x ymmreg2_b ymm_vsib_x ] [ YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 ]; attach variables [ ymmreg1_r ymmreg2_x ] [ YMM16 YMM17 YMM18 YMM19 YMM20 YMM21 YMM22 YMM23 ]; attach variables [ ymmreg1_rx ymmreg2_bx ] [ YMM24 YMM25 YMM26 YMM27 YMM28 YMM29 YMM30 YMM31 ]; attach variables [ zmmreg zmmreg1 zmmreg2 zmm_vsib ] [ ZMM0 ZMM1 ZMM2 ZMM3 ZMM4 ZMM5 ZMM6 ZMM7 ]; attach variables [ zmmreg_x zmmreg1_x zmmreg2_b zmm_vsib_x ] [ ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15 ]; attach variables [ zmmreg1_r zmmreg2_x ] [ ZMM16 ZMM17 ZMM18 ZMM19 ZMM20 ZMM21 ZMM22 ZMM23 ]; attach variables [ zmmreg1_rx zmmreg2_bx ] [ ZMM24 ZMM25 ZMM26 ZMM27 ZMM28 ZMM29 ZMM30 ZMM31 ]; attach variables [ bnd1 bnd2 ] [ BND0 BND1 BND2 BND3 _ _ _ _ ]; attach variables [ bnd1_lb bnd2_lb ] [ BND0_LB BND1_LB BND2_LB BND3_LB _ _ _ _ ]; attach variables [ bnd1_ub bnd2_ub ] [ BND0_UB BND1_UB BND2_UB BND3_UB _ _ _ _ ]; define pcodeop segment; # Define special pcodeop that calculates the RAM address # given the segment selector and offset as input define pcodeop in; # force in/out to show up in decompiler define pcodeop out; define pcodeop sysenter; define pcodeop sysexit; define pcodeop syscall; define pcodeop sysret; define pcodeop swapgs; define pcodeop invlpg; define pcodeop invlpga; define pcodeop invpcid; define pcodeop rdtscp; define pcodeop mwait; define pcodeop mwaitx; define pcodeop monitor; define pcodeop monitorx; define pcodeop swi; # for INT instruction define pcodeop LOCK; # for LOCK prefix define pcodeop UNLOCK; # for LOCK prefix define pcodeop XACQUIRE; # for XACQUIRE prefix define pcodeop XRELEASE; # for XRELEASE prefix # MFL: definitions for AMD hardware assisted virtualization instructions define pcodeop clgi; # clear global interrupt flag (GIF) define pcodeop stgi; # set global interrupt flag (GIF) define pcodeop vmload; # Load state from VMCD, opcode 0f 01 da define pcodeop vmmcall; # Call VMM, opcode 0f 01 d9 define pcodeop vmrun; # Run virtual machine, opcode 0f 01 d8 define pcodeop vmsave; # Save state to VMCB, opcode 0f 0a db # MFL: definitions for Intel IA hardware assisted virtualization instructions define pcodeop invept; # Invalidate Translations Derived from extended page tables (EPT); opcode 66 0f 38 80 define pcodeop invvpid; # Invalidate Translations Based on virtual-processor identifier (VPID); opcode 66 0f 38 81 define pcodeop vmcall; # Call to VM monitor by causing VM exit, opcode 0f 01 c1 define pcodeop vmclear; # Clear virtual-machine control structure, opcode 66 0f c7 /6 define pcodeop vmfunc; # call virtual-machine function refernced by EAX define pcodeop vmlaunch; # Launch virtual machine managed by current VMCCS; opcode 0f 01 c2 define pcodeop vmresume; # Resume virtual machine managed by current VMCS; opcode 0f 01 c3 define pcodeop vmptrld; # Load pointer to virtual-machine control structure; opcode 0f c6 /6 define pcodeop vmptrst; # Store pointer to virtual-machine control structure; opcode 0f c7 /7 define pcodeop vmread; # Read field from virtual-machine control structure; opcode 0f 78 define pcodeop vmwrite; # Write field to virtual-machine control structure; opcode 0f 79 define pcodeop vmxoff; # Leave VMX operation; opcode 0f 01 c4 define pcodeop vmxon; # Enter VMX operation; opcode f3 0f C7 /6 @ifdef IA64 @define LONGMODE_ON "longMode=1" @define LONGMODE_OFF "longMode=0" @else @define LONGMODE_OFF "opsize=opsize" # NOP @endif #when not in 64-bit mode, opcode 0x82 results in the same instruction as opcode 0x80 #in 64-bit mode, opcode 0x82 results in #UD #see 22.15 "Undefined Opcodes" of the intel manual @ifdef IA64 @define BYTE_80_82 "(byte=0x80 | (longMode=0 & byte=0x82))" @else @define BYTE_80_82 "(byte=0x80 | byte=0x82)" @endif @include "macros.sinc" @ifdef IA64 Reg8: reg8 is rexprefix=0 & reg8 { export reg8; } Reg8: reg8_x0 is rexprefix=1 & rexRprefix=0 & reg8_x0 { export reg8_x0; } Reg8: reg8_x1 is rexprefix=1 & rexRprefix=1 & reg8_x1 { export reg8_x1; } Reg16: reg16 is rexRprefix=0 & reg16 { export reg16; } Reg16: reg16_x is rexRprefix=1 & reg16_x { export reg16_x; } Reg32: reg32 is rexRprefix=0 & reg32 { export reg32; } Reg32: reg32_x is rexRprefix=1 & reg32_x { export reg32_x; } Reg64: reg64 is rexRprefix=0 & reg64 { export reg64; } Reg64: reg64_x is rexRprefix=1 & reg64_x { export reg64_x; } Rmr8: r8 is rexprefix=0 & r8 { export r8; } Rmr8: r8_x0 is rexprefix=1 & rexBprefix=0 & r8_x0 { export r8_x0; } Rmr8: r8_x1 is rexprefix=1 & rexBprefix=1 & r8_x1 { export r8_x1; } CRmr8: r8 is rexBprefix=0 & r8 { export r8; } CRmr8: r8 is addrsize=2 & rexBprefix=0 & r8 { export r8; } CRmr8: r8_x0 is addrsize=2 & rexprefix=1 & rexBprefix=0 & r8_x0 { export r8_x0; } CRmr8: r8_x1 is addrsize=2 & rexprefix=1 & rexBprefix=1 & r8_x1 { export r8_x1; } Rmr16: r16 is rexBprefix=0 & r16 { export r16; } Rmr16: r16_x is rexBprefix=1 & r16_x { export r16_x; } CRmr16: r16 is rexBprefix=0 & r16 { export r16; } CRmr16: r16_x is rexBprefix=1 & r16_x { export r16_x; } Rmr32: r32 is rexBprefix=0 & r32 { export r32; } Rmr32: r32_x is rexBprefix=1 & r32_x { export r32_x; } CRmr32: r32 is rexBprefix=0 & r32 & r64 { export r64; } CRmr32: r32_x is rexBprefix=1 & r32_x & r64_x { export r64_x; } Rmr64: r64 is rexBprefix=0 & r64 { export r64; } Rmr64: r64_x is rexBprefix=1 & r64_x { export r64_x; } Base: base is rexBprefix=0 & base { export base; } Base: base_x is rexBprefix=1 & base_x { export base_x; } Index: index is rexXprefix=0 & index { export index; } Index: index_x is rexXprefix=1 & index_x { export index_x; } Base64: base64 is rexBprefix=0 & base64 { export base64; } Base64: base64_x is rexBprefix=1 & base64_x { export base64_x; } Index64: index64 is rexXprefix=0 & index64 { export index64; } Index64: index64_x is rexXprefix=1 & index64_x { export index64_x; } XmmReg: xmmreg is rexRprefix=0 & xmmreg { export xmmreg; } XmmReg: xmmreg_x is rexRprefix=1 & xmmreg_x { export xmmreg_x; } XmmReg1: xmmreg1 is rexRprefix=0 & xmmreg1 { export xmmreg1; } XmmReg1: xmmreg1_x is rexRprefix=1 & xmmreg1_x { export xmmreg1_x; } XmmReg1: xmmreg1_r is rexRprefix=0 & evexRp=1 & xmmreg1_r { export xmmreg1_r; } XmmReg1: xmmreg1_rx is rexRprefix=1 & evexRp=1 & xmmreg1_rx { export xmmreg1_rx; } XmmReg2: xmmreg2 is rexBprefix=0 & xmmreg2 { export xmmreg2; } XmmReg2: xmmreg2_b is rexBprefix=1 & xmmreg2_b { export xmmreg2_b; } XmmReg2: xmmreg2_x is rexBprefix=0 & rexXprefix=1 & xmmreg2_x { export xmmreg2_x; } XmmReg2: xmmreg2_bx is rexBprefix=1 & rexXprefix=1 & xmmreg2_bx { export xmmreg2_bx; } YmmReg1: ymmreg1 is rexRprefix=0 & ymmreg1 { export ymmreg1; } YmmReg1: ymmreg1_x is rexRprefix=1 & ymmreg1_x { export ymmreg1_x; } YmmReg1: ymmreg1_r is rexRprefix=0 & evexRp=1 & ymmreg1_r { export ymmreg1_r; } YmmReg1: ymmreg1_rx is rexRprefix=1 & evexRp=1 & ymmreg1_rx { export ymmreg1_rx; } YmmReg2: ymmreg2 is rexBprefix=0 & ymmreg2 { export ymmreg2; } YmmReg2: ymmreg2_b is rexBprefix=1 & ymmreg2_b { export ymmreg2_b; } YmmReg2: ymmreg2_x is rexBprefix=0 & rexXprefix=1 & ymmreg2_x { export ymmreg2_x; } YmmReg2: ymmreg2_bx is rexBprefix=1 & rexXprefix=1 & ymmreg2_bx { export ymmreg2_bx; } ZmmReg1: zmmreg1 is rexRprefix=0 & zmmreg1 { export zmmreg1; } ZmmReg1: zmmreg1_x is rexRprefix=1 & zmmreg1_x { export zmmreg1_x; } ZmmReg1: zmmreg1_r is rexRprefix=0 & evexRp=1 & zmmreg1_r { export zmmreg1_r; } ZmmReg1: zmmreg1_rx is rexRprefix=1 & evexRp=1 & zmmreg1_rx { export zmmreg1_rx; } ZmmReg2: zmmreg2 is rexBprefix=0 & zmmreg2 { export zmmreg2; } ZmmReg2: zmmreg2_b is rexBprefix=1 & zmmreg2_b { export zmmreg2_b; } ZmmReg2: zmmreg2_x is rexBprefix=0 & rexXprefix=1 & zmmreg2_x { export zmmreg2_x; } ZmmReg2: zmmreg2_bx is rexBprefix=1 & rexXprefix=1 & zmmreg2_bx { export zmmreg2_bx; } Xmm_vsib: xmm_vsib is rexXprefix=0 & xmm_vsib { export xmm_vsib; } Xmm_vsib: xmm_vsib_x is rexXprefix=1 & xmm_vsib_x { export xmm_vsib_x; } Ymm_vsib: ymm_vsib is rexXprefix=0 & ymm_vsib { export ymm_vsib; } Ymm_vsib: ymm_vsib_x is rexXprefix=1 & ymm_vsib_x { export ymm_vsib_x; } Zmm_vsib: zmm_vsib is rexXprefix=0 & zmm_vsib { export zmm_vsib; } Zmm_vsib: zmm_vsib_x is rexXprefix=1 & zmm_vsib_x { export zmm_vsib_x; } @else Reg8: reg8 is reg8 { export reg8; } Reg16: reg16 is reg16 { export reg16; } Reg32: reg32 is reg32 { export reg32; } Rmr8: r8 is r8 { export r8; } CRmr8: r8 is r8 { export r8; } Rmr16: r16 is r16 { export r16; } CRmr16: r16 is r16 { export r16; } Rmr32: r32 is r32 { export r32; } CRmr32: r32 is r32 { export r32; } Base: base is base { export base; } Index: index is index { export index; } XmmReg: xmmreg is xmmreg { export xmmreg; } XmmReg1: xmmreg1 is xmmreg1 { export xmmreg1; } XmmReg2: xmmreg2 is xmmreg2 { export xmmreg2; } YmmReg1: ymmreg1 is ymmreg1 { export ymmreg1; } YmmReg2: ymmreg2 is ymmreg2 { export ymmreg2; } ZmmReg1: zmmreg1 is zmmreg1 { export zmmreg1; } ZmmReg2: zmmreg2 is zmmreg2 { export zmmreg2; } Xmm_vsib: xmm_vsib is xmm_vsib { export xmm_vsib; } Ymm_vsib: ymm_vsib is ymm_vsib { export ymm_vsib; } Zmm_vsib: zmm_vsib is zmm_vsib { export zmm_vsib; } @endif # signed immediate value subconstructors simm8_16: simm8 is simm8 { export *[const]:2 simm8; } simm8_32: simm8 is simm8 { export *[const]:4 simm8; } @ifdef IA64 simm8_64: simm8 is simm8 { export *[const]:8 simm8; } @endif simm16_16: simm16 is simm16 { export *[const]:2 simm16; } simm32_32: simm32 is simm32 { export *[const]:4 simm32; } @ifdef IA64 simm32_64: simm32 is simm32 { export *[const]:8 simm32; } imm32_64: imm32 is imm32 { export *[const]:8 imm32; } @endif # EVEX used a compressed Disp8*N format # Table 2-35: # TupleType | EVEX.B | InputSize | EVEX.W | Broadcast |N (VL=128) | N (VL=256) | N (VL=512) | evexBType # Full Mem | 0 | 32bit | 0 | none | 16 | 32 | 64 | 0 # Full Mem | 1 | 32bit | 0 | {1tox} | 4 | 4 | 4 | 0 # Full Mem | 0 | 64bit | 1 | none | 16 | 32 | 64 | 0 # Full Mem | 1 | 64bit | 1 | {1tox} | 8 | 8 | 8 | 0 # Half Mem | 0 | 32bit | 0 | none | 8 | 16 | 32 | 1 # Half Mem | 1 | 32bit | 0 | {1tox} | 4 | 4 | 4 | 1 evexDisp8N: offs is evexD8Type=0 & evexBType=0 & evexB=0 & evexL=0 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=0 & evexBType=0 & evexB=0 & evexL=1 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=0 & evexBType=0 & evexB=0 & evexL=2 [ offs = 6; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=0 & evexBType=0 & evexB=1 & rexWprefix=0 [ offs = 2; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=0 & evexBType=0 & evexB=1 & rexWprefix=1 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=0 & evexBType=1 & evexB=1 & rexWprefix=0 [ offs = 2; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=0 & evexBType=1 & evexB=0 & evexL=0 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=0 & evexBType=1 & evexB=0 & evexL=1 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=0 & evexBType=1 & evexB=0 & evexL=2 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; } # Table 2-35: # TupleType | InputSize | EVEX.W | N (VL=128) | N (VL=256) | N (VL=512) | evexTType # Full Mem | N/A | N/A | 16 | 32 | 64 | 0 # Tuple 1 Scalar | 8bit | N/A | 1 | 1 | 1 | 1 # Tuple 1 Scalar | 16bit | N/A | 2 | 2 | 2 | 2 # Tuple 1 Scalar | 32bit | 0 | 4 | 4 | 4 | 3 # Tuple 1 Scalar | 64bit | 1 | 8 | 8 | 8 | 3 # Tuple 1 Fixed | 32bit | N/A | 4 | 4 | 4 | 4 # Tuple 1 Fixed | 64bit | N/A | 8 | 8 | 8 | 5 # Tuple 2 | 32bit | 0 | 8 | 8 | 8 | 6 # Tuple 2 | 64bit | 1 | N/A | 16 | 16 | 6 # Tuple 4 | 32bit | 0 | N/A | 16 | 16 | 7 # Tuple 4 | 64bit | 1 | N/A | N/A | 32 | 7 # Tuple 8 | 32bit | 0 | N/A | N/A | 32 | 8 # Half Mem | N/A | N/A | 8 | 16 | 32 | 9 # Quarter Mem | N/A | N/A | 4 | 8 | 16 | A # Eighth Mem | N/A | N/A | 2 | 4 | 8 | B # Mem128 | N/A | 1 | 16 | 16 | 16 | C # MOVDDUP | N/A | N/A | 8 | 32 | 64 | D evexDisp8N: offs is evexD8Type=1 & evexTType=0x0 & evexL=0 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x0 & evexL=1 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x0 & evexL=2 [ offs = 6; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x9 & evexL=0 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x9 & evexL=1 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x9 & evexL=2 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0xa & evexL=0 [ offs = 2; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0xa & evexL=1 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0xa & evexL=2 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0xb & evexL=0 [ offs = 1; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0xb & evexL=1 [ offs = 2; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0xb & evexL=2 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x1 [ offs = 0; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x2 [ offs = 1; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x3 & rexWprefix=0 [ offs = 2; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x3 & rexWprefix=1 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x4 [ offs = 2; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x5 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x6 & rexWprefix=0 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x6 & rexWprefix=1 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x7 & rexWprefix=0 [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x7 & rexWprefix=1 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0x8 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0xc [ offs = 4; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0xd & evexL=0 [ offs = 3; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0xd & evexL=1 [ offs = 5; evexDisp8=offs; ] { export *[const]:1 offs; } evexDisp8N: offs is evexD8Type=1 & evexTType=0xd & evexL=2 [ offs = 6; evexDisp8=offs; ] { export *[const]:1 offs; } simm8_16: disp8N is vexMode=2 & evexDisp8N & simm8 [ disp8N = simm8 << evexDisp8; ] { export *[const]:2 disp8N; } simm8_32: disp8N is vexMode=2 & evexDisp8N & simm8 [ disp8N = simm8 << evexDisp8; ] { export *[const]:4 disp8N; } @ifdef IA64 simm8_64: disp8N is vexMode=2 & evexDisp8N & simm8 [ disp8N = simm8 << evexDisp8; ] { export *[const]:8 disp8N; } @endif usimm8_16: imm8 is imm8 & imm8_7=0 { export *[const]:2 imm8; } usimm8_16: val is imm8 & imm8_7=1 [ val = 0xff00 | imm8; ] { export *[const]:2 val; } usimm8_32: imm8 is imm8 & imm8_7=0 { export *[const]:4 imm8; } usimm8_32: val is imm8 & imm8_7=1 [ val = 0xffffff00 | imm8; ] { export *[const]:4 val; } @ifdef IA64 usimm8_64: imm8 is imm8 & imm8_7=0 { export *[const]:8 imm8; } usimm8_64: val is imm8 & imm8_7=1 [ val = 0xffffffffffffff00 | imm8; ] { export *[const]:8 val; } @endif # unused #usimm16_32: imm16 is imm16 & imm16_15=0 { export *[const]:4 imm16; } #usimm16_32: val is imm16 & imm16_15=1 [ val = 0xffff0000 | imm16; ] { export *[const]:4 val; } # RIP/EIP relative address - NOTE: export of size 0 is intentional so it may be adjusted pcRelSimm32: addr is simm32 [ addr=inst_next+simm32; ] { export addr; } # 16-bit addressing modes (the offset portion) addr16: [BX + SI] is mod=0 & r_m=0 & BX & SI { local tmp=BX+SI; export tmp; } addr16: [BX + DI] is mod=0 & r_m=1 & BX & DI { local tmp=BX+DI; export tmp; } addr16: [BP + SI] is mod=0 & r_m=2 & BP & SI { local tmp=BP+SI; export tmp; } addr16: [BP + DI] is mod=0 & r_m=3 & BP & DI { local tmp=BP+DI; export tmp; } addr16: [SI] is mod=0 & r_m=4 & SI { export SI; } addr16: [DI] is mod=0 & r_m=5 & DI { export DI; } addr16: [imm16] is mod=0 & r_m=6; imm16 { export *[const]:2 imm16; } addr16: [BX] is mod=0 & r_m=7 & BX { export BX; } addr16: [BX + SI + simm8_16] is mod=1 & r_m=0 & BX & SI; simm8_16 { local tmp=BX+SI+simm8_16; export tmp; } addr16: [BX + DI + simm8_16] is mod=1 & r_m=1 & BX & DI; simm8_16 { local tmp=BX+DI+simm8_16; export tmp; } addr16: [BP + SI + simm8_16] is mod=1 & r_m=2 & BP & SI; simm8_16 { local tmp=BP+SI+simm8_16; export tmp; } addr16: [BP + DI + simm8_16] is mod=1 & r_m=3 & BP & DI; simm8_16 { local tmp=BP+DI+simm8_16; export tmp; } addr16: [SI + simm8_16] is mod=1 & r_m=4 & SI; simm8_16 { local tmp=SI+simm8_16; export tmp; } addr16: [DI + simm8_16] is mod=1 & r_m=5 & DI; simm8_16 { local tmp=DI+simm8_16; export tmp; } addr16: [BP + simm8_16] is mod=1 & r_m=6 & BP; simm8_16 { local tmp=BP+simm8_16; export tmp; } addr16: [BX + simm8_16] is mod=1 & r_m=7 & BX; simm8_16 { local tmp=BX+simm8_16; export tmp; } addr16: [BX + SI + imm16] is mod=2 & r_m=0 & BX & SI; imm16 { local tmp=BX+SI+imm16; export tmp; } addr16: [BX + DI + imm16] is mod=2 & r_m=1 & BX & DI; imm16 { local tmp=BX+DI+imm16; export tmp; } addr16: [BP + SI + imm16] is mod=2 & r_m=2 & BP & SI; imm16 { local tmp=BP+SI+imm16; export tmp; } addr16: [BP + DI + imm16] is mod=2 & r_m=3 & BP & DI; imm16 {local tmp=BP+DI+imm16; export tmp; } addr16: [SI + imm16] is mod=2 & r_m=4 & SI; imm16 { local tmp=SI+imm16; export tmp; } addr16: [DI + imm16] is mod=2 & r_m=5 & DI; imm16 { local tmp=DI+imm16; export tmp; } addr16: [BP + imm16] is mod=2 & r_m=6 & BP; imm16 { local tmp=BP+imm16; export tmp; } addr16: [BX + imm16] is mod=2 & r_m=7 & BX; imm16 { local tmp=BX+imm16; export tmp; } # 32-bit addressing modes (the offset portion) addr32: [Rmr32] is mod=0 & Rmr32 { export Rmr32; } addr32: [Rmr32 + simm8_32] is mod=1 & Rmr32; simm8_32 { local tmp=Rmr32+simm8_32; export tmp; } addr32: [Rmr32] is mod=1 & r_m!=4 & Rmr32; simm8=0 { export Rmr32; } addr32: [Rmr32 + imm32] is mod=2 & Rmr32; imm32 { local tmp=Rmr32+imm32; export tmp; } addr32: [Rmr32] is mod=2 & r_m!=4 & Rmr32; imm32=0 { export Rmr32; } addr32: [imm32] is mod=0 & r_m=5; imm32 { export *[const]:4 imm32; } addr32: [Base + Index*ss] is mod=0 & r_m=4; Index & Base & ss { local tmp=Base+Index*ss; export tmp; } addr32: [Base] is mod=0 & r_m=4; index=4 & Base { export Base; } addr32: [Index*ss + imm32] is mod=0 & r_m=4; Index & base=5 & ss; imm32 { local tmp=imm32+Index*ss; export tmp; } addr32: [imm32] is mod=0 & r_m=4; index=4 & base=5; imm32 { export *[const]:4 imm32; } addr32: [Base + Index*ss + simm8_32] is mod=1 & r_m=4; Index & Base & ss; simm8_32 { local tmp=simm8_32+Base+Index*ss; export tmp; } addr32: [Base + simm8_32] is mod=1 & r_m=4; index=4 & Base; simm8_32 { local tmp=simm8_32+Base; export tmp; } addr32: [Base + Index*ss] is mod=1 & r_m=4; Index & Base & ss; simm8=0 { local tmp=Base+Index*ss; export tmp; } addr32: [Base] is mod=1 & r_m=4; index=4 & Base; simm8=0 { export Base; } addr32: [Base + Index*ss + imm32] is mod=2 & r_m=4; Index & Base & ss; imm32 { local tmp=imm32+Base+Index*ss; export tmp; } addr32: [Base + imm32] is mod=2 & r_m=4; index=4 & Base; imm32 { local tmp=imm32+Base; export tmp; } addr32: [Base + Index*ss] is mod=2 & r_m=4; Index & Base & ss; imm32=0 { local tmp=Base+Index*ss; export tmp; } addr32: [Base] is mod=2 & r_m=4; index=4 & Base; imm32=0 { export Base; } @ifdef IA64 addr32: [pcRelSimm32] is bit64=1 & mod=0 & r_m=4; index=4 & base=5; pcRelSimm32 { export *[const]:4 pcRelSimm32; } Addr32_64: [pcRelSimm32] is mod=0 & r_m=5; pcRelSimm32 { export *[const]:8 pcRelSimm32; } Addr32_64: [imm32] is mod=0 & r_m=4; index=4 & base=5; imm32 { export *[const]:8 imm32; } Addr32_64: addr32 is addr32 { tmp:8 = sext(addr32); export tmp; } @endif # 64-bit addressing modes (the offset portion) @ifdef IA64 addr64: [Rmr64] is mod=0 & Rmr64 { export Rmr64; } addr64: [Rmr64 + simm8_64] is mod=1 & Rmr64; simm8_64 { local tmp=Rmr64+simm8_64; export tmp; } addr64: [Rmr64 + simm32_64] is mod=2 & Rmr64; simm32_64 { local tmp=Rmr64+simm32_64; export tmp; } addr64: [Rmr64] is mod=1 & r_m!=4 & Rmr64; simm8=0 { export Rmr64; } addr64: [Rmr64] is mod=2 & r_m!=4 & Rmr64; simm32=0 { export Rmr64; } addr64: [pcRelSimm32] is mod=0 & r_m=5; pcRelSimm32 { export *[const]:8 pcRelSimm32; } addr64: [Base64 + Index64*ss] is mod=0 & r_m=4; Index64 & Base64 & ss { local tmp=Base64+Index64*ss; export tmp; } addr64: [Base64] is mod=0 & r_m=4; rexXprefix=0 & index64=4 & Base64 { export Base64; } addr64: [simm32_64 + Index64*ss] is mod=0 & r_m=4; Index64 & base64=5 & ss; simm32_64 { local tmp=simm32_64+Index64*ss; export tmp; } addr64: [Index64*ss] is mod=0 & r_m=4; Index64 & base64=5 & ss; imm32=0 { local tmp=Index64*ss; export tmp; } addr64: [simm32_64] is mod=0 & r_m=4; rexXprefix=0 & index64=4 & base64=5; simm32_64 { export *[const]:8 simm32_64; } addr64: [Base64 + simm8_64] is mod=1 & r_m=4; rexXprefix=0 & index64=4 & Base64; simm8_64 { local tmp=simm8_64+Base64; export tmp; } addr64: [Base64 + Index64*ss + simm8_64] is mod=1 & r_m=4; Index64 & Base64 & ss; simm8_64 { local tmp=simm8_64+Base64+Index64*ss; export tmp; } addr64: [Base64 + Index64*ss] is mod=1 & r_m=4; Index64 & Base64 & ss; simm8=0 { local tmp=Base64+Index64*ss; export tmp; } addr64: [Base64 + simm32_64] is mod=2 & r_m=4; rexXprefix=0 & index64=4 & Base64; simm32_64 { local tmp=simm32_64+Base64; export tmp; } addr64: [Base64] is mod=2 & r_m=4; rexXprefix=0 & index64=4 & Base64; imm32=0 { export Base64; } addr64: [Base64 + Index64*ss + simm32_64] is mod=2 & r_m=4; Index64 & Base64 & ss; simm32_64 { local tmp=simm32_64+Base64+Index64*ss; export tmp; } addr64: [Base64 + Index64*ss] is mod=2 & r_m=4; Index64 & Base64 & ss; imm32=0 { local tmp=Base64+Index64*ss; export tmp; } @endif currentCS: CS is protectedMode=0 & CS { tmp:4 = (inst_next >> 4) & 0xf000; CS = tmp:2; export CS; } currentCS: CS is protectedMode=1 & CS { tmp:4 = (inst_next >> 16) & 0xffff; CS = tmp:2; export CS; } segWide: is segover=0 { export 0:$(SIZE); } segWide: CS: is segover=1 & CS { export 0:$(SIZE); } segWide: SS: is segover=2 & SS { export 0:$(SIZE); } segWide: DS: is segover=3 & DS { export 0:$(SIZE); } segWide: ES: is segover=4 & ES { export 0:$(SIZE); } segWide: FS: is segover=5 & FS { export FS_OFFSET; } segWide: GS: is segover=6 & GS { export GS_OFFSET; } seg16: is segover=0 { export DS; } seg16: currentCS: is segover=1 & currentCS { export currentCS; } seg16: SS: is segover=2 & SS { export SS; } seg16: DS: is segover=3 & DS { export DS; } seg16: ES: is segover=4 & ES { export ES; } seg16: FS: is segover=5 & FS { export FS; } seg16: GS: is segover=6 & GS { export GS; } Mem16: addr16 is (segover=0 & mod=0 & r_m=2) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; } Mem16: addr16 is (segover=0 & mod=0 & r_m=3) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; } Mem16: addr16 is (segover=0 & mod=1 & r_m=2) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; } Mem16: addr16 is (segover=0 & mod=1 & r_m=3) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; } Mem16: addr16 is (segover=0 & mod=1 & r_m=6) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; } Mem16: addr16 is (segover=0 & mod=2 & r_m=2) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; } Mem16: addr16 is (segover=0 & mod=2 & r_m=3) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; } Mem16: addr16 is (segover=0 & mod=2 & r_m=6) ... & addr16 { tmp:$(SIZE) = segment(SS,addr16); export tmp; } Mem16: seg16^addr16 is seg16; addr16 { tmp:$(SIZE) = segment(seg16,addr16); export tmp; } Mem: Mem16 is addrsize=0 & Mem16 { export Mem16; } @ifdef IA64 Mem: segWide^Addr32_64 is $(LONGMODE_ON) & addrsize=1 & segWide; Addr32_64 { export Addr32_64; } Mem: segWide^Addr32_64 is $(LONGMODE_ON) & addrsize=1 & segWide & highseg=1; Addr32_64 { tmp:8 = segWide + Addr32_64; export tmp; } Mem: segWide^addr64 is $(LONGMODE_ON) & addrsize=2 & segWide; addr64 { export addr64; } Mem: segWide^addr64 is $(LONGMODE_ON) & addrsize=2 & segWide & highseg=1; addr64 { tmp:$(SIZE) = segWide + addr64; export tmp; } Mem: segWide^addr32 is $(LONGMODE_OFF) & addrsize=1 & segWide; addr32 { tmp:$(SIZE) = zext(addr32); export tmp; } @else Mem: segWide^addr32 is $(LONGMODE_OFF) & addrsize=1 & segWide; addr32 { export addr32; } @endif Mem: segWide^addr32 is $(LONGMODE_OFF) & addrsize=1 & segWide & highseg=1; addr32 { tmp:$(SIZE) = segWide + zext(addr32); export tmp; } rel8: reloc is simm8 [ reloc=inst_next+simm8; ] { export *[ram]:$(SIZE) reloc; } rel16: reloc is simm16 [ reloc=((inst_next >> 16) << 16) | ((inst_next + simm16) & 0xFFFF); ] { export *[ram]:$(SIZE) reloc; } rel32: reloc is simm32 [ reloc=inst_next+simm32; ] { export *[ram]:$(SIZE) reloc; } m8: "byte ptr" Mem is Mem { export *:1 Mem; } m16: "word ptr" Mem is Mem { export *:2 Mem; } m32: "dword ptr" Mem is Mem { export *:4 Mem; } m64: "qword ptr" Mem is Mem { export *:8 Mem; } m80: "tword ptr" Mem is Mem { export *:10 Mem; } m128: "xmmword ptr" Mem is Mem { export *:16 Mem; } m256: "ymmword ptr" Mem is Mem { export *:32 Mem; } m512: "zmmword ptr" Mem is Mem { export *:64 Mem; } m32fp: "float ptr" Mem is Mem { export *:4 Mem; } m64fp: "double ptr" Mem is Mem { export *:8 Mem; } m80fp: "extended double ptr" Mem is Mem { export *:10 Mem; } ## ## VSIB ## vaddr32x: [Base + Xmm_vsib*ss] is mod=0 & r_m=4; Xmm_vsib & Base & ss { local tmp=zext(Base)+Xmm_vsib*ss; export tmp; } vaddr32x: [Xmm_vsib*ss + simm32_32] is mod=0 & r_m=4; Xmm_vsib & base=5 & ss; simm32_32 { local tmp=zext(simm32_32)+Xmm_vsib*ss; export tmp; } vaddr32x: [Base + Xmm_vsib*ss + simm8_32] is mod=1 & r_m=4; Xmm_vsib & Base & ss; simm8_32 { local tmp=zext(Base)+zext(simm8_32)+Xmm_vsib*ss; export tmp; } vaddr32x: [Base + Xmm_vsib*ss + simm32_32] is mod=2 & r_m=4; Xmm_vsib & Base & ss; simm32_32 { local tmp=zext(Base)+zext(simm32_32)+Xmm_vsib*ss; export tmp; } vaddr32y: [Base + Ymm_vsib*ss] is mod=0 & r_m=4; Ymm_vsib & Base & ss { local tmp=zext(Base)+Ymm_vsib*ss; export tmp; } vaddr32y: [Ymm_vsib*ss + simm32_32] is mod=0 & r_m=4; Ymm_vsib & base=5 & ss; simm32_32 { local tmp=zext(simm32_32)+Ymm_vsib*ss; export tmp; } vaddr32y: [Base + Ymm_vsib*ss + simm8_32] is mod=1 & r_m=4; Ymm_vsib & Base & ss; simm8_32 { local tmp=zext(Base)+zext(simm8_32)+Ymm_vsib*ss; export tmp; } vaddr32y: [Base + Ymm_vsib*ss + simm32_32] is mod=2 & r_m=4; Ymm_vsib & Base & ss; simm32_32 { local tmp=zext(Base)+zext(simm32_32)+Ymm_vsib*ss; export tmp; } vaddr32z: [Base + Zmm_vsib*ss] is mod=0 & r_m=4; Zmm_vsib & Base & ss { local tmp=zext(Base)+Zmm_vsib*ss; export tmp; } vaddr32z: [Zmm_vsib*ss + simm32_32] is mod=0 & r_m=4; Zmm_vsib & base=5 & ss; simm32_32 { local tmp=zext(simm32_32)+Zmm_vsib*ss; export tmp; } vaddr32z: [Base + Zmm_vsib*ss + simm8_32] is mod=1 & r_m=4; Zmm_vsib & Base & ss; simm8_32 { local tmp=zext(Base)+zext(simm8_32)+Zmm_vsib*ss; export tmp; } vaddr32z: [Base + Zmm_vsib*ss + simm32_32] is mod=2 & r_m=4; Zmm_vsib & Base & ss; simm32_32 { local tmp=zext(Base)+zext(simm32_32)+Zmm_vsib*ss; export tmp; } @ifdef IA64 vaddr64x: [Base64 + Xmm_vsib*ss] is mod=0 & r_m=4; Xmm_vsib & Base64 & ss { local tmp=zext(Base64)+Xmm_vsib*ss; export tmp; } vaddr64x: [Xmm_vsib*ss + simm32_64] is mod=0 & r_m=4; Xmm_vsib & base64=5 & ss; simm32_64 { local tmp=zext(simm32_64)+Xmm_vsib*ss; export tmp; } vaddr64x: [Base64 + Xmm_vsib*ss + simm8_64] is mod=1 & r_m=4; Xmm_vsib & Base64 & ss; simm8_64 { local tmp=zext(Base64)+zext(simm8_64)+Xmm_vsib*ss; export tmp; } vaddr64x: [Base64 + Xmm_vsib*ss + simm32_64] is mod=2 & r_m=4; Xmm_vsib & Base64 & ss; simm32_64 { local tmp=zext(Base64)+zext(simm32_64)+Xmm_vsib*ss; export tmp; } vaddr64y: [Base64 + Ymm_vsib*ss] is mod=0 & r_m=4; Ymm_vsib & Base64 & ss { local tmp=zext(Base64)+Ymm_vsib*ss; export tmp; } vaddr64y: [Ymm_vsib*ss + simm32_64] is mod=0 & r_m=4; Ymm_vsib & base64=5 & ss; simm32_64 { local tmp=zext(simm32_64)+Ymm_vsib*ss; export tmp; } vaddr64y: [Base64 + Ymm_vsib*ss + simm8_64] is mod=1 & r_m=4; Ymm_vsib & Base64 & ss; simm8_64 { local tmp=zext(Base64)+zext(simm8_64)+Ymm_vsib*ss; export tmp; } vaddr64y: [Base64 + Ymm_vsib*ss + simm32_64] is mod=2 & r_m=4; Ymm_vsib & Base64 & ss; simm32_64 { local tmp=zext(Base64)+zext(simm32_64)+Ymm_vsib*ss; export tmp; } vaddr64z: [Base64 + Zmm_vsib*ss] is mod=0 & r_m=4; Zmm_vsib & Base64 & ss { local tmp=zext(Base64)+Zmm_vsib*ss; export tmp; } vaddr64z: [Zmm_vsib*ss + simm32_64] is mod=0 & r_m=4; Zmm_vsib & base64=5 & ss; simm32_64 { local tmp=zext(simm32_64)+Zmm_vsib*ss; export tmp; } vaddr64z: [Base64 + Zmm_vsib*ss + simm8_64] is mod=1 & r_m=4; Zmm_vsib & Base64 & ss; simm8_64 { local tmp=zext(Base64)+zext(simm8_64)+Zmm_vsib*ss; export tmp; } vaddr64z: [Base64 + Zmm_vsib*ss + simm32_64] is mod=2 & r_m=4; Zmm_vsib & Base64 & ss; simm32_64 { local tmp=zext(Base64)+zext(simm32_64)+Zmm_vsib*ss; export tmp; } @endif vMem32x: segWide^vaddr32x is addrsize=1 & segWide; vaddr32x { export vaddr32x; } vMem32x: segWide^vaddr32x is addrsize=1 & segWide & highseg=1; vaddr32x { export vaddr32x; } vMem32y: segWide^vaddr32y is addrsize=1 & segWide; vaddr32y { export vaddr32y; } vMem32y: segWide^vaddr32y is addrsize=1 & segWide & highseg=1; vaddr32y { export vaddr32y; } vMem32z: segWide^vaddr32z is addrsize=1 & segWide; vaddr32z { export vaddr32z; } vMem32z: segWide^vaddr32z is addrsize=1 & segWide & highseg=1; vaddr32z { export vaddr32z; } @ifdef IA64 # GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base. # Behavior is coded to match Binutils; exceeds what the manual indicates is possible. vMem32x: segWide^vaddr64x is addrsize=2 & segWide; vaddr64x { export vaddr64x; } vMem32x: segWide^vaddr64x is addrsize=2 & segWide & highseg=1; vaddr64x { export vaddr64x; } # GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base. # Behavior is coded to match Binutils; exceeds what the manual indicates is possible. vMem32y: segWide^vaddr64y is addrsize=2 & segWide; vaddr64y { export vaddr64y; } vMem32y: segWide^vaddr64y is addrsize=2 & segWide & highseg=1; vaddr64y { export vaddr64y; } # GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base. # Behavior is coded to match Binutils; exceeds what the manual indicates is possible. vMem32z: segWide^vaddr64z is addrsize=2 & segWide; vaddr64z { export vaddr64z; } vMem32z: segWide^vaddr64z is addrsize=2 & segWide & highseg=1; vaddr64z { export vaddr64z;} # GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base. # Behavior is coded to match Binutils; exceeds what the manual indicates is possible. vMem64x: segWide^vaddr32x is addrsize=1 & segWide; vaddr32x { export vaddr32x; } vMem64x: segWide^vaddr32x is addrsize=1 & segWide & highseg=1; vaddr32x { export vaddr32x; } vMem64x: segWide^vaddr64x is addrsize=2 & segWide; vaddr64x { export vaddr64x; } vMem64x: segWide^vaddr64x is addrsize=2 & segWide & highseg=1; vaddr64x { export vaddr64x; } # GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base. # Behavior is coded to match Binutils; exceeds what the manual indicates is possible. vMem64y: segWide^vaddr32y is addrsize=1 & segWide; vaddr32y { export vaddr32y; } vMem64y: segWide^vaddr32y is addrsize=1 & segWide & highseg=1; vaddr32y { export vaddr32y; } vMem64y: segWide^vaddr64y is addrsize=2 & segWide; vaddr64y { export vaddr64y; } vMem64y: segWide^vaddr64y is addrsize=2 & segWide & highseg=1; vaddr64y { export vaddr64y; } # GAS always inserts a 0x67 prefix before a VSIB instruction with a 32-bit base. # Behavior is coded to match Binutils; exceeds what the manual indicates is possible. vMem64z: segWide^vaddr32z is addrsize=1 & segWide; vaddr32z { export vaddr32z; } vMem64z: segWide^vaddr32z is addrsize=1 & segWide & highseg=1; vaddr32z { export vaddr32z; } vMem64z: segWide^vaddr64z is addrsize=2 & segWide; vaddr64z { export vaddr64z; } vMem64z: segWide^vaddr64z is addrsize=2 & segWide & highseg=1; vaddr64z { export vaddr64z; } @endif d_vm32x: "dword ptr "^vMem32x is vMem32x { } d_vm32y: "dword ptr "^vMem32y is vMem32y { } # not used d_vm32z: "dword ptr "^vMem32z is vMem32z { } @ifdef IA64 d_vm64x: "dword ptr "^vMem64x is vMem64x { } d_vm64y: "dword ptr "^vMem64y is vMem64y { } # not used d_vm64z: "dword ptr "^vMem64z is vMem64z { } @endif q_vm32x: "qword ptr "^vMem32x is vMem32x { export vMem32x; } # not used q_vm32y: "qword ptr "^vMem32y is vMem32y { } # not used q_vm32z: "qword ptr "^vMem32z is vMem32z { } @ifdef IA64 q_vm64x: "qword ptr "^vMem64x is vMem64x { export vMem64x; } q_vm64y: "qword ptr "^vMem64y is vMem64y { export vMem64y; } q_vm64z: "qword ptr "^vMem64z is vMem64z { export vMem64z; } @endif x_vm32x: "xmmword ptr "^vMem32x is vMem32x { export vMem32x; } y_vm32y: "ymmword ptr "^vMem32y is vMem32y { export vMem32y; } z_vm32z: "zmmword ptr "^vMem32z is vMem32z { export vMem32z; } @ifdef IA64 x_vm64x: "xmmword ptr "^vMem64x is vMem64x { export vMem64x; } y_vm64y: "ymmword ptr "^vMem64y is vMem64y { export vMem64y; } z_vm64z: "zmmword ptr "^vMem64z is vMem64z { export vMem64z; } @endif Reg32_m8: Rmr32 is mod=3 & Rmr32 { export Rmr32; } Reg32_m8: m8 is m8 { local tmp:4 = zext(m8); export tmp; } Reg32_m16: Rmr32 is mod=3 & Rmr32 { export Rmr32; } Reg32_m16: m16 is m16 { local tmp:4 = zext(m16); export tmp; } mmxreg2_m64: mmxreg2 is mod=3 & mmxreg2 { export mmxreg2; } mmxreg2_m64: m64 is m64 { export m64; } XmmReg2_m8: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m8: m8 is m8 { local tmp:16 = zext(m8); export tmp; } XmmReg2_m16: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m16: m16 is m16 { local tmp:16 = zext(m16); export tmp; } XmmReg2_m32: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m32: m32 is m32 { local tmp:16 = zext(m32); export tmp; } XmmReg2_m64: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m64: m64 is m64 { local tmp:16 = zext(m64); export tmp; } XmmReg2_m128: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m128: m128 is m128 { export m128; } YmmReg2_m256: YmmReg2 is mod=3 & YmmReg2 { export YmmReg2; } YmmReg2_m256: m256 is m256 { export m256; } ZmmReg2_m512: ZmmReg2 is mod=3 & ZmmReg2 { export ZmmReg2; } ZmmReg2_m512: m512 is m512 { export m512; } # used to extend ZmmReg2 if not assigning to m128 XmmReg2_m128_extend: XmmReg2 is mod=3 & XmmReg2 & ZmmReg2 { ZmmReg2 = zext(XmmReg2); } XmmReg2_m128_extend: XmmReg2 is mod & XmmReg2 { } m16bcst32: m16 is m16 { local tmp:2 = m16; BCST4[0,16] = tmp; BCST4[16,16] = tmp; export BCST4; } m16bcst64: m16 is m16 { local tmp:2 = m16; BCST8[0,16] = tmp; BCST8[16,16] = tmp; BCST8[32,16] = tmp; BCST8[48,16] = tmp; export BCST8; } m16bcst128: m16 is m16 { local tmp:2 = m16; BCST16[0,16] = tmp; BCST16[16,16] = tmp; BCST16[32,16] = tmp; BCST16[48,16] = tmp; BCST16[64,16] = tmp; BCST16[80,16] = tmp; BCST16[96,16] = tmp; BCST16[112,16] = tmp; export BCST16; } m16bcst256: m16 is m16 { local tmp:2 = m16; BCST32[0,16] = tmp; BCST32[16,16] = tmp; BCST32[32,16] = tmp; BCST32[48,16] = tmp; BCST32[64,16] = tmp; BCST32[80,16] = tmp; BCST32[96,16] = tmp; BCST32[112,16] = tmp; BCST32[128,16] = tmp; BCST32[144,16] = tmp; BCST32[160,16] = tmp; BCST32[176,16] = tmp; BCST32[192,16] = tmp; BCST32[208,16] = tmp; BCST32[224,16] = tmp; BCST32[240,16] = tmp; export BCST32; } m16bcst512: m16 is m16 { local tmp:2 = m16; BCST64[0,16] = tmp; BCST64[16,16] = tmp; BCST64[32,16] = tmp; BCST64[48,16] = tmp; BCST64[64,16] = tmp; BCST64[80,16] = tmp; BCST64[96,16] = tmp; BCST64[112,16] = tmp; BCST64[128,16] = tmp; BCST64[144,16] = tmp; BCST64[160,16] = tmp; BCST64[176,16] = tmp; BCST64[192,16] = tmp; BCST64[208,16] = tmp; BCST64[224,16] = tmp; BCST64[240,16] = tmp; BCST64[256,16] = tmp; BCST64[272,16] = tmp; BCST64[288,16] = tmp; BCST64[304,16] = tmp; BCST64[320,16] = tmp; BCST64[336,16] = tmp; BCST64[352,16] = tmp; BCST64[368,16] = tmp; BCST64[384,16] = tmp; BCST64[400,16] = tmp; BCST64[416,16] = tmp; BCST64[432,16] = tmp; BCST64[448,16] = tmp; BCST64[464,16] = tmp; BCST64[480,16] = tmp; BCST64[496,16] = tmp; export BCST64; } m32bcst64: m32 is m32 { local tmp:4 = m32; BCST8[0,32] = tmp; BCST8[32,32] = tmp; export BCST8; } m32bcst128: m32 is m32 { local tmp:4 = m32; BCST16[0,32] = tmp; BCST16[32,32] = tmp; BCST16[64,32] = tmp; BCST16[96,32] = tmp; export BCST16; } m32bcst256: m32 is m32 { local tmp:4 = m32; BCST32[0,32] = tmp; BCST32[32,32] = tmp; BCST32[64,32] = tmp; BCST32[96,32] = tmp; BCST32[128,32] = tmp; BCST32[160,32] = tmp; BCST32[192,32] = tmp; BCST32[224,32] = tmp; export BCST32; } m32bcst512: m32 is m32 { local tmp:4 = m32; BCST64[0,32] = tmp; BCST64[32,32] = tmp; BCST64[64,32] = tmp; BCST64[96,32] = tmp; BCST64[128,32] = tmp; BCST64[160,32] = tmp; BCST64[192,32] = tmp; BCST64[224,32] = tmp; BCST64[256,32] = tmp; BCST64[288,32] = tmp; BCST64[320,32] = tmp; BCST64[352,32] = tmp; BCST64[384,32] = tmp; BCST64[416,32] = tmp; BCST64[448,32] = tmp; BCST64[480,32] = tmp; export BCST64; } m64bcst128: m64 is m64 { local tmp:8 = m64; BCST16[0,64] = tmp; BCST16[64,64] = tmp; export BCST16; } m64bcst256: m64 is m64 { local tmp:8 = m64; BCST32[0,64] = tmp; BCST32[64,64] = tmp; BCST32[128,64] = tmp; BCST32[192,64] = tmp; export BCST32; } m64bcst512: m64 is m64 { local tmp:8 = m64; BCST64[0,64] = tmp; BCST64[64,64] = tmp; BCST64[128,64] = tmp; BCST64[192,64] = tmp; BCST64[256,64] = tmp; BCST64[320,64] = tmp; BCST64[384,64] = tmp; BCST64[448,64] = tmp; export BCST64; } XmmReg2_m32_m16bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m32_m16bcst: m32 is m32 & evexDisp8N { local tmp:16 = zext(m32); export tmp; } XmmReg2_m32_m16bcst: m16bcst32 is evexB=1 & m16bcst32 & evexDisp8N { local tmp:16 = zext(m16bcst32); export tmp; } XmmReg2_m64_m16bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m64_m16bcst: m64 is m64 & evexDisp8N { local tmp:16 = zext(m64); export tmp; } XmmReg2_m64_m16bcst: m16bcst64 is evexB=1 & m16bcst64 & evexDisp8N { local tmp:16 = zext(m16bcst64); export tmp; } XmmReg2_m64_m32bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m64_m32bcst: m64 is m64 & evexDisp8N { local tmp:16 = zext(m64); export tmp; } XmmReg2_m64_m32bcst: m32bcst64 is evexB=1 & m32bcst64 & evexDisp8N { local tmp:16 = zext(m32bcst64); export tmp; } XmmReg2_m128_m16bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m128_m16bcst: m128 is m128& evexDisp8N { export m128; } XmmReg2_m128_m16bcst: m16bcst128 is evexB=1 & m16bcst128 & evexDisp8N { export m16bcst128; } XmmReg2_m128_m32bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m128_m32bcst: m128 is m128& evexDisp8N { export m128; } XmmReg2_m128_m32bcst: m32bcst128 is evexB=1 & m32bcst128 & evexDisp8N { export m32bcst128; } XmmReg2_m128_m64bcst: XmmReg2 is mod=3 & XmmReg2 { export XmmReg2; } XmmReg2_m128_m64bcst: m128 is m128 & evexDisp8N { export m128; } XmmReg2_m128_m64bcst: m64bcst128 is evexB=1 & m64bcst128 & evexDisp8N { export m64bcst128; } YmmReg2_m256_m16bcst: YmmReg2 is mod=3 & YmmReg2 { export YmmReg2; } YmmReg2_m256_m16bcst: m256 is m256 & evexDisp8N { export m256; } YmmReg2_m256_m16bcst: m16bcst256 is evexB=1 & m16bcst256 & evexDisp8N { export m16bcst256; } YmmReg2_m256_m32bcst: YmmReg2 is mod=3 & YmmReg2 { export YmmReg2; } YmmReg2_m256_m32bcst: m256 is m256 & evexDisp8N { export m256; } YmmReg2_m256_m32bcst: m32bcst256 is evexB=1 & m32bcst256 & evexDisp8N { export m32bcst256; } YmmReg2_m256_m64bcst: YmmReg2 is mod=3 & YmmReg2 { export YmmReg2; } YmmReg2_m256_m64bcst: m256 is m256 & evexDisp8N { export m256; } YmmReg2_m256_m64bcst: m64bcst256 is evexB=1 & m64bcst256 & evexDisp8N { export m64bcst256; } ZmmReg2_m512_m16bcst: ZmmReg2 is mod=3 & ZmmReg2 { export ZmmReg2; } ZmmReg2_m512_m16bcst: m512 is m512 & evexDisp8N { export m512; } ZmmReg2_m512_m16bcst: m16bcst512 is evexB=1 & m16bcst512 & evexDisp8N { export m16bcst512; } ZmmReg2_m512_m32bcst: ZmmReg2 is mod=3 & ZmmReg2 { export ZmmReg2; } ZmmReg2_m512_m32bcst: m512 is m512 & evexDisp8N { export m512; } ZmmReg2_m512_m32bcst: m32bcst512 is evexB=1 & m32bcst512 & evexDisp8N { export m32bcst512; } ZmmReg2_m512_m64bcst: ZmmReg2 is mod=3 & ZmmReg2 { export ZmmReg2; } ZmmReg2_m512_m64bcst: m512 is m512 & evexDisp8N { export m512; } ZmmReg2_m512_m64bcst: m64bcst512 is evexB=1 & m64bcst512 & evexDisp8N { export m64bcst512; } moffs8: seg16^[imm16] is addrsize=0 & seg16 & imm16 { tmp:$(SIZE) = segment(seg16,imm16:2); export *:1 tmp; } moffs8: segWide^[imm32] is addrsize=1 & highseg=1 & segWide & imm32 { tmp:$(SIZE) = segWide + imm32; export *:1 tmp; } moffs8: segWide^[imm32] is addrsize=1 & segWide & imm32 { export *:1 imm32; } @ifdef IA64 moffs8: segWide^[imm64] is addrsize=2 & highseg=1 & segWide & imm64 { tmp:8 = segWide + imm64; export *:1 tmp; } moffs8: segWide^[imm64] is addrsize=2 & segWide & imm64 { export *:1 imm64; } @endif moffs16: seg16^[imm16] is addrsize=0 & seg16 & imm16 { tmp:$(SIZE) = segment(seg16,imm16:2); export *:2 tmp; } moffs16: segWide^[imm32] is addrsize=1 & highseg=1 & segWide & imm32 { tmp:$(SIZE) = segWide + imm32; export *:2 tmp; } moffs16: segWide^[imm32] is addrsize=1 & segWide & imm32 { export *:2 imm32; } @ifdef IA64 moffs16: segWide^[imm64] is addrsize=2 & highseg=1 & segWide & imm64 { tmp:8 = segWide + imm64; export *:2 tmp; } moffs16: segWide^[imm64] is addrsize=2 & segWide & imm64 { export *:2 imm64; } @endif moffs32: seg16^[imm16] is addrsize=0 & seg16 & imm16 { tmp:$(SIZE) = segment(seg16,imm16:2); export *:4 tmp; } moffs32: segWide^[imm32] is addrsize=1 & segWide & imm32 { export *:4 imm32; } moffs32: segWide^[imm32] is addrsize=1 & highseg=1 & segWide & imm32 { tmp:$(SIZE) = segWide + imm32; export *:4 tmp; } @ifdef IA64 moffs32: segWide^[imm64] is addrsize=2 & segWide & imm64 { export *:4 imm64; } moffs32: segWide^[imm64] is addrsize=2 & highseg=1 & segWide & imm64 { tmp:8 = segWide + imm64; export *:4 tmp; } @endif @ifdef IA64 moffs64: segWide^[imm64] is addrsize=2 & segWide & imm64 { export *:8 imm64; } moffs64: segWide^[imm64] is addrsize=2 & highseg=1 & segWide & imm64 { tmp:8 = segWide + imm64; export *:8 tmp; } moffs64: segWide^[imm32] is addrsize=1 & segWide & imm32 { export *:8 imm32; } moffs64: segWide^[imm32] is addrsize=1 & highseg=1 & segWide & imm32 { tmp:8 = segWide + imm32; export *:8 tmp; } @endif # TODO: segment register offset in 64bit might not be right # String memory access dseSI1: seg16^SI is addrsize=0 & seg16 & SI { tmp:4 = segment(seg16,SI); SI = SI + 1-2*zext(DF); export *:1 tmp; } dseSI1: segWide^ESI is addrsize=1 & segWide & ESI { tmp:4 = ESI; ESI = ESI + 1-2*zext(DF); export *:1 tmp; } dseSI2: seg16^SI is addrsize=0 & seg16 & SI { tmp:4 = segment(seg16,SI); SI = SI + 2-4*zext(DF); export *:2 tmp; } dseSI2: segWide^ESI is addrsize=1 & segWide & ESI { tmp:4 = ESI; ESI = ESI + 2-4*zext(DF); export *:2 tmp; } dseSI4: seg16^SI is addrsize=0 & seg16 & SI { tmp:4 = segment(seg16,SI); SI = SI + 4-8*zext(DF); export *:4 tmp; } dseSI4: segWide^ESI is addrsize=1 & segWide & ESI { tmp:4 = ESI; ESI = ESI + 4-8*zext(DF); export *:4 tmp; } eseDI1: ES:DI is addrsize=0 & ES & DI { tmp:4 = segment(ES,DI); DI = DI + 1-2*zext(DF); export *:1 tmp; } eseDI1: ES:EDI is addrsize=1 & ES & EDI { tmp:4 = EDI; EDI=EDI+1-2*zext(DF); export *:1 tmp; } eseDI2: ES:DI is addrsize=0 & ES & DI { tmp:4 = segment(ES,DI); DI = DI + 2-4*zext(DF); export *:2 tmp; } eseDI2: ES:EDI is addrsize=1 & ES & EDI { tmp:4 = EDI; EDI=EDI+2-4*zext(DF); export *:2 tmp; } eseDI4: ES:DI is addrsize=0 & ES & DI { tmp:4 = segment(ES,DI); DI = DI + 4-8*zext(DF); export *:4 tmp; } eseDI4: ES:EDI is addrsize=1 & ES & EDI { tmp:4 = EDI; EDI=EDI+4-8*zext(DF); export *:4 tmp; } @ifdef IA64 # quadword string functions dseSI8: seg16^SI is addrsize=0 & seg16 & SI { tmp:4 = segment(seg16,SI); SI = SI + 8-16*zext(DF); export *:8 tmp; } dseSI8: segWide^ESI is addrsize=1 & segWide & ESI { tmp:4 = ESI; ESI = ESI + 8-16*zext(DF); export *:8 tmp; } eseDI8: ES:DI is addrsize=0 & ES & DI { tmp:4 = segment(ES,DI); DI = DI + 8-16*zext(DF); export *:8 tmp; } eseDI8: ES:EDI is addrsize=1 & ES & EDI { tmp:4 = EDI; EDI=EDI+8-16*zext(DF); export *:8 tmp; } dseSI1: RSI is addrsize=2 & RSI { local tmp = RSI; RSI = RSI + 1-2*zext(DF); export *:1 tmp; } dseSI2: RSI is addrsize=2 & RSI { local tmp = RSI; RSI = RSI + 2-4*zext(DF); export *:2 tmp; } dseSI4: RSI is addrsize=2 & RSI { local tmp = RSI; RSI = RSI + 4-8*zext(DF); export *:4 tmp; } dseSI8: RSI is addrsize=2 & RSI { local tmp = RSI; RSI = RSI + 8-16*zext(DF); export *:8 tmp; } eseDI1: RDI is addrsize=2 & RDI { local tmp = RDI; RDI=RDI+1-2*zext(DF); export *:1 tmp; } eseDI2: RDI is addrsize=2 & RDI { local tmp = RDI; RDI=RDI+2-4*zext(DF); export *:2 tmp; } eseDI4: RDI is addrsize=2 & RDI { local tmp = RDI; RDI=RDI+4-8*zext(DF); export *:4 tmp; } eseDI8: RDI is addrsize=2 & RDI { local tmp = RDI; RDI=RDI+8-16*zext(DF); export *:8 tmp; } @endif rm8: Rmr8 is mod=3 & Rmr8 { export Rmr8; } rm8: "byte ptr" Mem is Mem { export *:1 Mem; } rm16: Rmr16 is mod=3 & Rmr16 { export Rmr16; } rm16: "word ptr" Mem is Mem { export *:2 Mem; } rm32: Rmr32 is mod=3 & Rmr32 { export Rmr32; } rm32: "dword ptr" Mem is Mem { export *:4 Mem; } @ifdef IA64 rm64: Rmr64 is mod=3 & Rmr64 { export Rmr64; } rm64: "qword ptr" Mem is Mem { export *:8 Mem; } @endif n1: one is epsilon [ one = 1; ] { export *[const]:1 one; } @ifdef IA64 # Handle zero extension in 64-bit mode for 32-bit destination registers check_Reg32_dest: is rexRprefix=0 & reg32 & reg64 { reg64 = zext(reg32); } check_Reg32_dest: is rexRprefix=1 & reg32_x & reg64_x { reg64_x = zext(reg32_x); } check_Rmr32_dest: is rexBprefix=0 & r32 & r64 { r64 = zext(r32); } check_Rmr32_dest: is rexBprefix=1 & r32_x & r64_x { r64_x = zext(r32_x); } check_rm32_dest: is mod=3 & check_Rmr32_dest { build check_Rmr32_dest; } check_EAX_dest: is epsilon { RAX = zext(EAX); } check_EDX_dest: is epsilon { RDX = zext(EDX); } check_vexVVVV_r32_dest: is bit64=1 & vexVVVV_r64 & vexVVVV_r32 { vexVVVV_r64 = zext(vexVVVV_r32);} @else check_Reg32_dest: is epsilon { } check_Rmr32_dest: is epsilon { } check_EAX_dest: is epsilon { } check_EDX_dest: is epsilon { } check_vexVVVV_r32_dest: is epsilon { } @endif check_rm32_dest: is epsilon { } ptr1616: reloc is protectedMode=0 & imm16; j16 [ reloc = j16*0x10 + imm16; ] { CS = j16; export *[ram]:4 reloc; } ptr1616: reloc is protectedMode=1 & imm16; j16 [ reloc = j16*0x10000 + imm16; ] { CS = j16; export *[ram]:4 reloc; } ptr1632: j16":"imm32 is imm32; j16 { CS = j16; export *:4 imm32; } # conditions cc: "O" is cond=0 { export OF; } cc: "NO" is cond=1 { local tmp = !OF; export tmp; } cc: "C" is cond=2 { export CF; } cc: "NC" is cond=3 { local tmp = !CF; export tmp; } cc: "Z" is cond=4 { export ZF; } cc: "NZ" is cond=5 { local tmp = !ZF; export tmp; } cc: "BE" is cond=6 { local tmp = CF || ZF; export tmp; } cc: "A" is cond=7 { local tmp = !(CF || ZF); export tmp; } cc: "S" is cond=8 { export SF; } cc: "NS" is cond=9 { local tmp = !SF; export tmp; } cc: "P" is cond=10 { export PF; } cc: "NP" is cond=11 { local tmp = !PF; export tmp; } cc: "L" is cond=12 { local tmp = OF != SF; export tmp; } cc: "GE" is cond=13 { local tmp = OF == SF; export tmp; } cc: "LE" is cond=14 { local tmp = ZF || (OF != SF); export tmp; } cc: "G" is cond=15 { local tmp = !ZF && (OF == SF); export tmp; } # repeat prefixes rep: ".REP" is ((repprefx=1 & repneprefx=0)|(repprefx=0 & repneprefx=1)) & addrsize=0 { if (CX==0) goto inst_next; CX=CX-1; } rep: ".REP" is ((repprefx=1 & repneprefx=0)|(repprefx=0 & repneprefx=1)) & addrsize=1 { if (ECX==0) goto inst_next; ECX=ECX-1; } @ifdef IA64 rep: ".REP" is ((repprefx=1 & repneprefx=0)|(repprefx=0 & repneprefx=1)) & addrsize=2 { if (RCX==0) goto inst_next; RCX=RCX-1; } @endif rep: is repprefx=0 & repneprefx=0 { } reptail: is ((repprefx=1 & repneprefx=0)|(repprefx=0 & repneprefx=1)) { goto inst_start; } reptail: is repprefx=0 & repneprefx=0 { } repe: ".REPE" is repprefx=1 & repneprefx=0 & addrsize=0 { if (CX==0) goto inst_next; CX=CX-1; } repe: ".REPE" is repprefx=1 & repneprefx=0 & addrsize=1 { if (ECX==0) goto inst_next; ECX=ECX-1; } @ifdef IA64 repe: ".REPE" is repprefx=1 & repneprefx=0 & addrsize=2 { if (RCX==0) goto inst_next; RCX=RCX-1; } @endif repe: ".REPNE" is repneprefx=1 & repprefx=0 & addrsize=0 { if (CX==0) goto inst_next; CX=CX-1; } repe: ".REPNE" is repneprefx=1 & repprefx=0 & addrsize=1 { if (ECX==0) goto inst_next; ECX=ECX-1; } @ifdef IA64 repe: ".REPNE" is repneprefx=1 & repprefx=0 & addrsize=2 { if (RCX==0) goto inst_next; RCX=RCX-1; } @endif repe: is repprefx=0 & repneprefx=0 { } repetail: is repprefx=1 & repneprefx=0 { if (ZF) goto inst_start; } repetail: is repneprefx=1 & repprefx=0 { if (!ZF) goto inst_start; } repetail: is repprefx=0 & repneprefx=0 { } # XACQUIRE/XRELEASE prefix xacq_xrel_prefx: ".XACQUIRE" is xacquireprefx=1 & xreleaseprefx=0 { XACQUIRE(); } xacq_xrel_prefx: ".XRELEASE" is xacquireprefx=0 & xreleaseprefx=1 { XRELEASE(); } xacq_xrel_prefx: is epsilon { } #the XRELEASE prefix can be used with several variants of MOV (without the LOCK prefix) xrelease: ".XRELEASE" is xacquireprefx=0 & xreleaseprefx=1 { XRELEASE(); } xrelease: is epsilon { } #XCHG with a memory destination asserts a LOCK signal whether or not there is a LOCK prefix (f0) #"alwaysLock" constructor will place "LOCK" in the disassembly if the prefix occurs alwaysLock: ".LOCK" is lockprefx=1 { LOCK(); } alwaysLock: is epsilon { LOCK(); } #check for LOCK prefix and the optional XACQUIRE/XRELEASE lockx: xacq_xrel_prefx^".LOCK" is lockprefx=1 & xacq_xrel_prefx { build xacq_xrel_prefx; LOCK(); } lockx: is epsilon { } #"unlock" constructor is used to pair every LOCK pcodeop with a matching UNLOCK pcodeop unlock: is lockprefx=1 { UNLOCK(); } unlock: is epsilon { } KReg_reg: opmaskreg is opmaskreg { export opmaskreg; } KReg_rm: opmaskrm is opmaskrm { export opmaskrm; } # not used vexVVVV_KReg: evexVopmask is evexVopmask { export evexVopmask; } vex1VVV_KReg: evexVopmask is evexVopmask & vexHighV=0 { export evexVopmask; } XmmMaskMode: is evexZ=0 { } XmmMaskMode: "{z}" is evexZ=1 { XmmMask=0; } YmmMaskMode: is evexZ=0 { } YmmMaskMode: "{z}" is evexZ=1 { YmmMask=0; } ZmmMaskMode: is evexZ=0 { } ZmmMaskMode: "{z}" is evexZ=1 { ZmmMask=0; } AVXOpMask: "{"^evexOpmask^"}" is evexOpmask { export evexOpmask; } AVXOpMask: is evexOpmask=0 { local tmp:8 = 0xffffffffffffffff; export *[const]:8 tmp; } # Z=0: merge masking # Z=1: zero masking XmmOpMask: AVXOpMask^XmmMaskMode is AVXOpMask & XmmMaskMode { export AVXOpMask; } XmmOpMask8: AVXOpMask^XmmMaskMode is AVXOpMask & XmmMaskMode { local mask = AVXOpMask; conditionalAssign(XmmResult[0,8], mask[0,1], XmmResult[0,8], XmmMask[0,8]); conditionalAssign(XmmResult[8,8], mask[1,1],XmmResult[8,8], XmmMask[8,8]); conditionalAssign(XmmResult[16,8], mask[2,1], XmmResult[16,8], XmmMask[16,8]); conditionalAssign(XmmResult[24,8], mask[3,1], XmmResult[24,8], XmmMask[24,8]); conditionalAssign(XmmResult[32,8], mask[4,1], XmmResult[32,8], XmmMask[32,8]); conditionalAssign(XmmResult[40,8], mask[5,1], XmmResult[40,8], XmmMask[40,8]); conditionalAssign(XmmResult[48,8], mask[6,1], XmmResult[48,8], XmmMask[48,8]); conditionalAssign(XmmResult[56,8], mask[7,1], XmmResult[56,8], XmmMask[56,8]); conditionalAssign(XmmResult[64,8], mask[8,1], XmmResult[64,8], XmmMask[64,8]); conditionalAssign(XmmResult[72,8], mask[9,1], XmmResult[72,8], XmmMask[72,8]); conditionalAssign(XmmResult[80,8], mask[10,1], XmmResult[80,8], XmmMask[80,8]); conditionalAssign(XmmResult[88,8], mask[11,1], XmmResult[88,8], XmmMask[88,8]); conditionalAssign(XmmResult[96,8], mask[12,1], XmmResult[96,8], XmmMask[96,8]); conditionalAssign(XmmResult[104,8], mask[13,1], XmmResult[104,8], XmmMask[104,8]); conditionalAssign(XmmResult[112,8], mask[14,1], XmmResult[112,8], XmmMask[112,8]); conditionalAssign(XmmResult[120,8], mask[15,1], XmmResult[120,8], XmmMask[120,8]); } XmmOpMask8: is evexOpmask=0 { } XmmOpMask16: AVXOpMask^XmmMaskMode is AVXOpMask & XmmMaskMode { local mask = AVXOpMask; conditionalAssign(XmmResult[0,16], mask[0,1], XmmResult[0,16], XmmMask[0,16]); conditionalAssign(XmmResult[16,16], mask[1,1], XmmResult[16,16], XmmMask[16,16]); conditionalAssign(XmmResult[32,16], mask[2,1], XmmResult[32,16], XmmMask[32,16]); conditionalAssign(XmmResult[48,16], mask[3,1], XmmResult[48,16], XmmMask[48,16]); conditionalAssign(XmmResult[64,16], mask[4,1], XmmResult[64,16], XmmMask[64,16]); conditionalAssign(XmmResult[80,16], mask[5,1], XmmResult[80,16], XmmMask[80,16]); conditionalAssign(XmmResult[96,16], mask[6,1], XmmResult[96,16], XmmMask[96,16]); conditionalAssign(XmmResult[112,16], mask[7,1], XmmResult[112,16], XmmMask[112,16]); } XmmOpMask16: is evexOpmask=0 { } XmmOpMask32: AVXOpMask^XmmMaskMode is AVXOpMask & XmmMaskMode { local mask = AVXOpMask; conditionalAssign(XmmResult[0,32], mask[0,1], XmmResult[0,32], XmmMask[0,32]); conditionalAssign(XmmResult[32,32], mask[1,1], XmmResult[32,32], XmmMask[32,32]); conditionalAssign(XmmResult[64,32], mask[2,1], XmmResult[64,32], XmmMask[64,32]); conditionalAssign(XmmResult[96,32], mask[3,1], XmmResult[96,32], XmmMask[96,32]); } XmmOpMask32: is evexOpmask=0 { } XmmOpMask64: AVXOpMask^XmmMaskMode is AVXOpMask & XmmMaskMode { local mask = AVXOpMask; conditionalAssign(XmmResult[0,64], mask[0,1], XmmResult[0,64], XmmMask[0,64]); conditionalAssign(XmmResult[64,64], mask[1,1], XmmResult[64,64], XmmMask[64,64]); } XmmOpMask64: is evexOpmask=0 { } YmmOpMask: AVXOpMask^YmmMaskMode is AVXOpMask & YmmMaskMode { export AVXOpMask; } YmmOpMask8: AVXOpMask^YmmMaskMode is AVXOpMask & YmmMaskMode { local mask = AVXOpMask; conditionalAssign(YmmResult[0,8], mask[0,1], YmmResult[0,8], YmmMask[0,8]); conditionalAssign(YmmResult[8,8], mask[1,1], YmmResult[8,8], YmmMask[8,8]); conditionalAssign(YmmResult[16,8], mask[2,1], YmmResult[16,8], YmmMask[16,8]); conditionalAssign(YmmResult[24,8], mask[3,1], YmmResult[24,8], YmmMask[24,8]); conditionalAssign(YmmResult[32,8], mask[4,1], YmmResult[32,8], YmmMask[32,8]); conditionalAssign(YmmResult[40,8], mask[5,1], YmmResult[40,8], YmmMask[40,8]); conditionalAssign(YmmResult[48,8], mask[6,1], YmmResult[48,8], YmmMask[48,8]); conditionalAssign(YmmResult[56,8], mask[7,1], YmmResult[56,8], YmmMask[56,8]); conditionalAssign(YmmResult[64,8], mask[8,1], YmmResult[64,8], YmmMask[64,8]); conditionalAssign(YmmResult[72,8], mask[9,1], YmmResult[72,8], YmmMask[72,8]); conditionalAssign(YmmResult[80,8], mask[10,1], YmmResult[80,8], YmmMask[80,8]); conditionalAssign(YmmResult[88,8], mask[11,1], YmmResult[88,8], YmmMask[88,8]); conditionalAssign(YmmResult[96,8], mask[12,1], YmmResult[96,8], YmmMask[96,8]); conditionalAssign(YmmResult[104,8], mask[13,1], YmmResult[104,8], YmmMask[104,8]); conditionalAssign(YmmResult[112,8], mask[14,1], YmmResult[112,8], YmmMask[112,8]); conditionalAssign(YmmResult[120,8], mask[15,1], YmmResult[120,8], YmmMask[120,8]); conditionalAssign(YmmResult[128,8], mask[16,1], YmmResult[128,8], YmmMask[128,8]); conditionalAssign(YmmResult[136,8], mask[17,1], YmmResult[136,8], YmmMask[136,8]); conditionalAssign(YmmResult[144,8], mask[18,1], YmmResult[144,8], YmmMask[144,8]); conditionalAssign(YmmResult[152,8], mask[19,1], YmmResult[152,8], YmmMask[152,8]); conditionalAssign(YmmResult[160,8], mask[20,1], YmmResult[160,8], YmmMask[160,8]); conditionalAssign(YmmResult[168,8], mask[21,1], YmmResult[168,8], YmmMask[168,8]); conditionalAssign(YmmResult[176,8], mask[22,1], YmmResult[176,8], YmmMask[176,8]); conditionalAssign(YmmResult[184,8], mask[23,1], YmmResult[184,8], YmmMask[184,8]); conditionalAssign(YmmResult[192,8], mask[24,1], YmmResult[192,8], YmmMask[192,8]); conditionalAssign(YmmResult[200,8], mask[25,1], YmmResult[200,8], YmmMask[200,8]); conditionalAssign(YmmResult[208,8], mask[26,1], YmmResult[208,8], YmmMask[208,8]); conditionalAssign(YmmResult[216,8], mask[27,1], YmmResult[216,8], YmmMask[216,8]); conditionalAssign(YmmResult[224,8], mask[28,1], YmmResult[224,8], YmmMask[224,8]); conditionalAssign(YmmResult[232,8], mask[29,1], YmmResult[232,8], YmmMask[232,8]); } YmmOpMask8: is evexOpmask=0 { } YmmOpMask16: AVXOpMask^YmmMaskMode is AVXOpMask & YmmMaskMode { local mask = AVXOpMask; conditionalAssign(YmmResult[0,16], mask[0,1], YmmResult[0,16], YmmMask[0,16]); conditionalAssign(YmmResult[16,16], mask[1,1], YmmResult[16,16], YmmMask[16,16]); conditionalAssign(YmmResult[32,16], mask[2,1], YmmResult[32,16], YmmMask[32,16]); conditionalAssign(YmmResult[48,16], mask[3,1], YmmResult[48,16], YmmMask[48,16]); conditionalAssign(YmmResult[64,16], mask[4,1], YmmResult[64,16], YmmMask[64,16]); conditionalAssign(YmmResult[80,16], mask[5,1], YmmResult[80,16], YmmMask[80,16]); conditionalAssign(YmmResult[96,16], mask[6,1], YmmResult[96,16], YmmMask[96,16]); conditionalAssign(YmmResult[112,16], mask[7,1], YmmResult[112,16], YmmMask[112,16]); conditionalAssign(YmmResult[128,16], mask[8,1], YmmResult[128,16], YmmMask[128,16]); conditionalAssign(YmmResult[144,16], mask[9,1], YmmResult[144,16], YmmMask[144,16]); conditionalAssign(YmmResult[160,16], mask[10,1], YmmResult[160,16], YmmMask[160,16]); conditionalAssign(YmmResult[176,16], mask[11,1], YmmResult[176,16], YmmMask[176,16]); conditionalAssign(YmmResult[192,16], mask[12,1], YmmResult[192,16], YmmMask[192,16]); conditionalAssign(YmmResult[208,16], mask[13,1], YmmResult[208,16], YmmMask[208,16]); conditionalAssign(YmmResult[224,16], mask[14,1], YmmResult[224,16], YmmMask[224,16]); conditionalAssign(YmmResult[240,16], mask[15,1], YmmResult[240,16], YmmMask[240,16]); } YmmOpMask16: is evexOpmask=0 { } YmmOpMask32: AVXOpMask^YmmMaskMode is AVXOpMask & YmmMaskMode { local mask = AVXOpMask; conditionalAssign(YmmResult[0,32], mask[0,1], YmmResult[0,32], YmmMask[0,32]); conditionalAssign(YmmResult[32,32], mask[1,1], YmmResult[32,32], YmmMask[32,32]); conditionalAssign(YmmResult[64,32], mask[2,1], YmmResult[64,32], YmmMask[64,32]); conditionalAssign(YmmResult[96,32], mask[3,1], YmmResult[96,32], YmmMask[96,32]); conditionalAssign(YmmResult[128,32], mask[4,1], YmmResult[128,32], YmmMask[128,32]); conditionalAssign(YmmResult[160,32], mask[5,1], YmmResult[160,32], YmmMask[160,32]); conditionalAssign(YmmResult[192,32], mask[6,1], YmmResult[192,32], YmmMask[192,32]); conditionalAssign(YmmResult[224,32], mask[7,1], YmmResult[224,32], YmmMask[224,32]); } YmmOpMask32: is evexOpmask=0 { } YmmOpMask64: AVXOpMask^YmmMaskMode is AVXOpMask & YmmMaskMode { local mask = AVXOpMask; conditionalAssign(YmmResult[0,64], mask[0,1], YmmResult[0,64], YmmMask[0,64]); conditionalAssign(YmmResult[64,64], mask[1,1], YmmResult[64,64], YmmMask[64,64]); conditionalAssign(YmmResult[128,64], mask[2,1], YmmResult[128,64], YmmMask[128,64]); conditionalAssign(YmmResult[192,64], mask[3,1], YmmResult[192,64], YmmMask[192,64]); } YmmOpMask64: is evexOpmask=0 { } ZmmOpMask: AVXOpMask^ZmmMaskMode is AVXOpMask & ZmmMaskMode { export AVXOpMask; } ZmmOpMask8: AVXOpMask^ZmmMaskMode is AVXOpMask & ZmmMaskMode { local mask = AVXOpMask; conditionalAssign(ZmmResult[0,8], mask[0,1], ZmmResult[0,8], ZmmMask[0,8]); conditionalAssign(ZmmResult[8,8], mask[1,1], ZmmResult[8,8], ZmmMask[8,8]); conditionalAssign(ZmmResult[16,8], mask[2,1], ZmmResult[16,8], ZmmMask[16,8]); conditionalAssign(ZmmResult[24,8], mask[3,1], ZmmResult[24,8], ZmmMask[24,8]); conditionalAssign(ZmmResult[32,8], mask[4,1], ZmmResult[32,8], ZmmMask[32,8]); conditionalAssign(ZmmResult[40,8], mask[5,1], ZmmResult[40,8], ZmmMask[40,8]); conditionalAssign(ZmmResult[48,8], mask[6,1], ZmmResult[48,8], ZmmMask[48,8]); conditionalAssign(ZmmResult[56,8], mask[7,1], ZmmResult[56,8], ZmmMask[56,8]); conditionalAssign(ZmmResult[64,8], mask[8,1], ZmmResult[64,8], ZmmMask[64,8]); conditionalAssign(ZmmResult[72,8], mask[9,1], ZmmResult[72,8], ZmmMask[72,8]); conditionalAssign(ZmmResult[80,8], mask[10,1], ZmmResult[80,8], ZmmMask[80,8]); conditionalAssign(ZmmResult[88,8], mask[11,1], ZmmResult[88,8], ZmmMask[88,8]); conditionalAssign(ZmmResult[96,8], mask[12,1], ZmmResult[96,8], ZmmMask[96,8]); conditionalAssign(ZmmResult[104,8], mask[13,1], ZmmResult[104,8], ZmmMask[104,8]); conditionalAssign(ZmmResult[112,8], mask[14,1], ZmmResult[112,8], ZmmMask[112,8]); conditionalAssign(ZmmResult[120,8], mask[15,1], ZmmResult[120,8], ZmmMask[120,8]); conditionalAssign(ZmmResult[128,8], mask[16,1], ZmmResult[128,8], ZmmMask[128,8]); conditionalAssign(ZmmResult[136,8], mask[17,1], ZmmResult[136,8], ZmmMask[136,8]); conditionalAssign(ZmmResult[144,8], mask[18,1], ZmmResult[144,8], ZmmMask[144,8]); conditionalAssign(ZmmResult[152,8], mask[19,1], ZmmResult[152,8], ZmmMask[152,8]); conditionalAssign(ZmmResult[160,8], mask[20,1], ZmmResult[160,8], ZmmMask[160,8]); conditionalAssign(ZmmResult[168,8], mask[21,1], ZmmResult[168,8], ZmmMask[168,8]); conditionalAssign(ZmmResult[176,8], mask[22,1], ZmmResult[176,8], ZmmMask[176,8]); conditionalAssign(ZmmResult[184,8], mask[23,1], ZmmResult[184,8], ZmmMask[184,8]); conditionalAssign(ZmmResult[192,8], mask[24,1], ZmmResult[192,8], ZmmMask[192,8]); conditionalAssign(ZmmResult[200,8], mask[25,1], ZmmResult[200,8], ZmmMask[200,8]); conditionalAssign(ZmmResult[208,8], mask[26,1], ZmmResult[208,8], ZmmMask[208,8]); conditionalAssign(ZmmResult[216,8], mask[27,1], ZmmResult[216,8], ZmmMask[216,8]); conditionalAssign(ZmmResult[224,8], mask[28,1], ZmmResult[224,8], ZmmMask[224,8]); conditionalAssign(ZmmResult[232,8], mask[29,1], ZmmResult[232,8], ZmmMask[232,8]); conditionalAssign(ZmmResult[240,8], mask[30,1], ZmmResult[240,8], ZmmMask[240,8]); conditionalAssign(ZmmResult[248,8], mask[31,1], ZmmResult[248,8], ZmmMask[248,8]); conditionalAssign(ZmmResult[256,8], mask[32,1], ZmmResult[256,8], ZmmMask[256,8]); conditionalAssign(ZmmResult[264,8], mask[33,1], ZmmResult[264,8], ZmmMask[264,8]); conditionalAssign(ZmmResult[272,8], mask[34,1], ZmmResult[272,8], ZmmMask[272,8]); conditionalAssign(ZmmResult[280,8], mask[35,1], ZmmResult[280,8], ZmmMask[280,8]); conditionalAssign(ZmmResult[288,8], mask[36,1], ZmmResult[288,8], ZmmMask[288,8]); conditionalAssign(ZmmResult[296,8], mask[37,1], ZmmResult[296,8], ZmmMask[296,8]); conditionalAssign(ZmmResult[304,8], mask[38,1], ZmmResult[304,8], ZmmMask[304,8]); conditionalAssign(ZmmResult[312,8], mask[39,1], ZmmResult[312,8], ZmmMask[312,8]); conditionalAssign(ZmmResult[320,8], mask[40,1], ZmmResult[320,8], ZmmMask[320,8]); conditionalAssign(ZmmResult[328,8], mask[41,1], ZmmResult[328,8], ZmmMask[328,8]); conditionalAssign(ZmmResult[336,8], mask[42,1], ZmmResult[336,8], ZmmMask[336,8]); conditionalAssign(ZmmResult[344,8], mask[43,1], ZmmResult[344,8], ZmmMask[344,8]); conditionalAssign(ZmmResult[352,8], mask[44,1], ZmmResult[352,8], ZmmMask[352,8]); conditionalAssign(ZmmResult[360,8], mask[45,1], ZmmResult[360,8], ZmmMask[360,8]); conditionalAssign(ZmmResult[368,8], mask[46,1], ZmmResult[368,8], ZmmMask[368,8]); conditionalAssign(ZmmResult[376,8], mask[47,1], ZmmResult[376,8], ZmmMask[376,8]); conditionalAssign(ZmmResult[384,8], mask[48,1], ZmmResult[384,8], ZmmMask[384,8]); conditionalAssign(ZmmResult[392,8], mask[49,1], ZmmResult[392,8], ZmmMask[392,8]); conditionalAssign(ZmmResult[400,8], mask[50,1], ZmmResult[400,8], ZmmMask[400,8]); conditionalAssign(ZmmResult[408,8], mask[51,1], ZmmResult[408,8], ZmmMask[408,8]); conditionalAssign(ZmmResult[416,8], mask[52,1], ZmmResult[416,8], ZmmMask[416,8]); conditionalAssign(ZmmResult[424,8], mask[53,1], ZmmResult[424,8], ZmmMask[424,8]); conditionalAssign(ZmmResult[432,8], mask[54,1], ZmmResult[432,8], ZmmMask[432,8]); conditionalAssign(ZmmResult[440,8], mask[55,1], ZmmResult[440,8], ZmmMask[440,8]); conditionalAssign(ZmmResult[448,8], mask[56,1], ZmmResult[448,8], ZmmMask[448,8]); conditionalAssign(ZmmResult[456,8], mask[57,1], ZmmResult[456,8], ZmmMask[456,8]); conditionalAssign(ZmmResult[464,8], mask[58,1], ZmmResult[464,8], ZmmMask[464,8]); conditionalAssign(ZmmResult[472,8], mask[59,1], ZmmResult[472,8], ZmmMask[472,8]); conditionalAssign(ZmmResult[480,8], mask[60,1], ZmmResult[480,8], ZmmMask[480,8]); conditionalAssign(ZmmResult[488,8], mask[61,1], ZmmResult[488,8], ZmmMask[488,8]); conditionalAssign(ZmmResult[496,8], mask[62,1], ZmmResult[496,8], ZmmMask[496,8]); conditionalAssign(ZmmResult[504,8], mask[63,1], ZmmResult[504,8], ZmmMask[504,8]); } ZmmOpMask8: is evexOpmask=0 { } ZmmOpMask16: AVXOpMask^ZmmMaskMode is AVXOpMask & ZmmMaskMode { local mask = AVXOpMask; conditionalAssign(ZmmResult[0,16], mask[0,1], ZmmResult[0,16], ZmmMask[0,16]); conditionalAssign(ZmmResult[16,16], mask[1,1], ZmmResult[16,16], ZmmMask[16,16]); conditionalAssign(ZmmResult[32,16], mask[2,1], ZmmResult[32,16], ZmmMask[32,16]); conditionalAssign(ZmmResult[48,16], mask[3,1], ZmmResult[48,16], ZmmMask[48,16]); conditionalAssign(ZmmResult[64,16], mask[4,1], ZmmResult[64,16], ZmmMask[64,16]); conditionalAssign(ZmmResult[80,16], mask[5,1], ZmmResult[80,16], ZmmMask[80,16]); conditionalAssign(ZmmResult[96,16], mask[6,1], ZmmResult[96,16], ZmmMask[96,16]); conditionalAssign(ZmmResult[112,16], mask[7,1], ZmmResult[112,16], ZmmMask[112,16]); conditionalAssign(ZmmResult[128,16], mask[8,1], ZmmResult[128,16], ZmmMask[128,16]); conditionalAssign(ZmmResult[144,16], mask[9,1], ZmmResult[144,16], ZmmMask[144,16]); conditionalAssign(ZmmResult[160,16], mask[10,1], ZmmResult[160,16], ZmmMask[160,16]); conditionalAssign(ZmmResult[176,16], mask[11,1], ZmmResult[176,16], ZmmMask[176,16]); conditionalAssign(ZmmResult[192,16], mask[12,1], ZmmResult[192,16], ZmmMask[192,16]); conditionalAssign(ZmmResult[208,16], mask[13,1], ZmmResult[208,16], ZmmMask[208,16]); conditionalAssign(ZmmResult[224,16], mask[14,1], ZmmResult[224,16], ZmmMask[224,16]); conditionalAssign(ZmmResult[240,16], mask[15,1], ZmmResult[240,16], ZmmMask[240,16]); conditionalAssign(ZmmResult[256,16], mask[16,1], ZmmResult[256,16], ZmmMask[256,16]); conditionalAssign(ZmmResult[272,16], mask[17,1], ZmmResult[272,16], ZmmMask[272,16]); conditionalAssign(ZmmResult[288,16], mask[18,1], ZmmResult[288,16], ZmmMask[288,16]); conditionalAssign(ZmmResult[304,16], mask[19,1], ZmmResult[304,16], ZmmMask[304,16]); conditionalAssign(ZmmResult[320,16], mask[20,1], ZmmResult[320,16], ZmmMask[320,16]); conditionalAssign(ZmmResult[336,16], mask[21,1], ZmmResult[336,16], ZmmMask[336,16]); conditionalAssign(ZmmResult[352,16], mask[22,1], ZmmResult[352,16], ZmmMask[352,16]); conditionalAssign(ZmmResult[368,16], mask[23,1], ZmmResult[368,16], ZmmMask[368,16]); conditionalAssign(ZmmResult[384,16], mask[24,1], ZmmResult[384,16], ZmmMask[384,16]); conditionalAssign(ZmmResult[400,16], mask[25,1], ZmmResult[400,16], ZmmMask[400,16]); conditionalAssign(ZmmResult[416,16], mask[26,1], ZmmResult[416,16], ZmmMask[416,16]); conditionalAssign(ZmmResult[432,16], mask[27,1], ZmmResult[432,16], ZmmMask[432,16]); conditionalAssign(ZmmResult[448,16], mask[28,1], ZmmResult[448,16], ZmmMask[448,16]); conditionalAssign(ZmmResult[464,16], mask[29,1], ZmmResult[464,16], ZmmMask[464,16]); conditionalAssign(ZmmResult[480,16], mask[30,1], ZmmResult[480,16], ZmmMask[480,16]); conditionalAssign(ZmmResult[496,16], mask[31,1], ZmmResult[496,16], ZmmMask[496,16]); } ZmmOpMask16: is evexOpmask=0 { } ZmmOpMask32: AVXOpMask^ZmmMaskMode is AVXOpMask & ZmmMaskMode { local mask = AVXOpMask; conditionalAssign(ZmmResult[0,32], mask[0,1], ZmmResult[0,32], ZmmMask[0,32]); conditionalAssign(ZmmResult[32,32], mask[1,1], ZmmResult[32,32], ZmmMask[32,32]); conditionalAssign(ZmmResult[64,32], mask[2,1], ZmmResult[64,32], ZmmMask[64,32]); conditionalAssign(ZmmResult[96,32], mask[3,1], ZmmResult[96,32], ZmmMask[96,32]); conditionalAssign(ZmmResult[128,32], mask[4,1], ZmmResult[128,32], ZmmMask[128,32]); conditionalAssign(ZmmResult[160,32], mask[5,1], ZmmResult[160,32], ZmmMask[160,32]); conditionalAssign(ZmmResult[192,32], mask[6,1], ZmmResult[192,32], ZmmMask[192,32]); conditionalAssign(ZmmResult[224,32], mask[7,1], ZmmResult[224,32], ZmmMask[224,32]); conditionalAssign(ZmmResult[256,32], mask[8,1], ZmmResult[256,32], ZmmMask[256,32]); conditionalAssign(ZmmResult[288,32], mask[9,1], ZmmResult[288,32], ZmmMask[288,32]); conditionalAssign(ZmmResult[320,32], mask[10,1], ZmmResult[320,32], ZmmMask[320,32]); conditionalAssign(ZmmResult[352,32], mask[11,1], ZmmResult[352,32], ZmmMask[352,32]); conditionalAssign(ZmmResult[384,32], mask[12,1], ZmmResult[384,32], ZmmMask[384,32]); conditionalAssign(ZmmResult[416,32], mask[13,1], ZmmResult[416,32], ZmmMask[416,32]); conditionalAssign(ZmmResult[448,32], mask[14,1], ZmmResult[448,32], ZmmMask[448,32]); conditionalAssign(ZmmResult[480,32], mask[15,1], ZmmResult[480,32], ZmmMask[480,32]); } ZmmOpMask32: is evexOpmask=0 { } ZmmOpMask64: AVXOpMask^ZmmMaskMode is AVXOpMask & ZmmMaskMode { local mask = AVXOpMask; conditionalAssign(ZmmResult[0,64], mask[0,1], ZmmResult[0,64], ZmmMask[0,64]); conditionalAssign(ZmmResult[64,64], mask[1,1], ZmmResult[64,64], ZmmMask[64,64]); conditionalAssign(ZmmResult[128,64], mask[2,1], ZmmResult[128,64], ZmmMask[128,64]); conditionalAssign(ZmmResult[192,64], mask[3,1], ZmmResult[192,64], ZmmMask[192,64]); conditionalAssign(ZmmResult[256,64], mask[4,1], ZmmResult[256,64], ZmmMask[256,64]); conditionalAssign(ZmmResult[320,64], mask[5,1], ZmmResult[320,64], ZmmMask[320,64]); conditionalAssign(ZmmResult[384,64], mask[6,1], ZmmResult[384,64], ZmmMask[384,64]); conditionalAssign(ZmmResult[448,64], mask[7,1], ZmmResult[448,64], ZmmMask[448,64]); } ZmmOpMask64: is evexOpmask=0 { } RegK_m8: KReg_rm is mod=3 & KReg_rm { tmp:1 = KReg_rm[0,8]; export tmp; } RegK_m8: m8 is m8 { tmp:1 = m8; export tmp; } RegK_m16: KReg_rm is mod=3 & KReg_rm { tmp:2 = KReg_rm[0,16]; export tmp;} RegK_m16: m16 is m16 { tmp:2 = m16; export tmp; } RegK_m32: KReg_rm is mod=3 & KReg_rm { tmp:4 = KReg_rm[0,32]; export tmp; } RegK_m32: m32 is m32 { tmp:4 = m32; export tmp; } RegK_m64: KReg_rm is mod=3 & KReg_rm { export KReg_rm; } RegK_m64: m64 is m64 { export m64; } # Some macros macro ptr2(r,x) { r = zext(x); } macro ptr4(r,x) { @ifdef IA64 r = zext(x); @else r = x; @endif } macro ptr8(r,x) { @ifdef IA64 r = x; @else r = x:$(SIZE); @endif } macro push22(x) { mysave:2 = x; SP = SP -2; tmp:$(SIZE) = segment(SS,SP); *:2 tmp = mysave; } macro push24(x) { mysave:4 = x; SP = SP-4; tmp:$(SIZE) = segment(SS,SP); *:4 tmp = mysave; } macro push28(x) { mysave:8 = x; SP = SP-8; tmp:$(SIZE) = segment(SS,SP); *:8 tmp = mysave; } macro push42(x) { mysave:2 = x; $(STACKPTR) = $(STACKPTR) - 2; *:2 $(STACKPTR) = mysave; } macro push44(x) { mysave:4 = x; $(STACKPTR) = $(STACKPTR) - 4; *:4 $(STACKPTR) = mysave; } macro pushseg44(x) { mysave:2 = x; $(STACKPTR) = $(STACKPTR) - 4; *:2 $(STACKPTR) = mysave; } macro push48(x) { mysave:8 = x; $(STACKPTR) = $(STACKPTR) - 8; *:8 $(STACKPTR) = mysave; } @ifdef IA64 macro push82(x) { mysave:2 = x; $(STACKPTR) = $(STACKPTR) - 2; *:2 $(STACKPTR) = mysave; } macro push84(x) { mysave:4 = x; $(STACKPTR) = $(STACKPTR) - 4; *:4 $(STACKPTR) = mysave; } macro push88(x) { mysave:8 = x; $(STACKPTR) = $(STACKPTR) - 8; *:8 $(STACKPTR) = mysave; } macro pushseg88(x) { mysave:8 = zext(x); $(STACKPTR) = $(STACKPTR) - 8; *:8 $(STACKPTR) = mysave; } @endif macro pop22(x) { tmp:$(SIZE) = segment(SS,SP); x = *:2 tmp; SP = SP+2; } macro pop24(x) { tmp:$(SIZE) = segment(SS,SP); x = *:4 tmp; SP = SP+4; } macro pop28(x) { tmp:$(SIZE) = segment(SS,SP); x = *:8 tmp; SP = SP+8; } macro pop42(x) { x = *:2 $(STACKPTR); ESP = ESP + 2; } macro pop44(x) { x = *:4 $(STACKPTR); ESP = ESP + 4; } macro popseg44(x) { x = *:2 $(STACKPTR); ESP = ESP + 4; } macro pop48(x) { x = *:8 $(STACKPTR); ESP = ESP + 8; } @ifdef IA64 macro pop82(x) { x = *:2 $(STACKPTR); RSP = RSP + 2; } macro pop84(x) { x = *:4 $(STACKPTR); RSP = RSP + 4; } macro pop88(x) { x = *:8 $(STACKPTR); RSP = RSP + 8; } macro popseg88(x) { x = *:2 $(STACKPTR); RSP = RSP + 8; } @endif macro unpackflags(tmp) { NT = (tmp & 0x4000) != 0; # IOPL = (tmp & 0x1000) != 0; OF = (tmp & 0x0800) != 0; DF = (tmp & 0x0400) != 0; IF = (tmp & 0x0200) != 0; TF = (tmp & 0x0100) != 0; SF = (tmp & 0x0080) != 0; ZF = (tmp & 0x0040) != 0; AF = (tmp & 0x0010) != 0; PF = (tmp & 0x0004) != 0; CF = (tmp & 0x0001) != 0; } macro unpackeflags(tmp) { ID = (tmp & 0x00200000) != 0; AC = (tmp & 0x00040000) != 0; # RF = (tmp & 0x00010000) != 0; VIP = 0; VIF = 0; } macro packflags(tmp) { tmp= (0x4000 * zext(NT&1)) # | (0x1000 * zext(IOPL&1)) | (0x0800 * zext(OF&1)) | (0x0400 * zext(DF&1)) | (0x0200 * zext(IF&1)) | (0x0100 * zext(TF&1)) | (0x0080 * zext(SF&1)) | (0x0040 * zext(ZF&1)) | (0x0010 * zext(AF&1)) | (0x0004 * zext(PF&1)) | (0x0001 * zext(CF&1)); } macro packeflags(tmp) { tmp = tmp | (0x00200000 * zext(ID&1)) | (0x00100000 * zext(VIP&1)) | (0x00080000 * zext(VIF&1)) | (0x00040000 * zext(AC&1)); } macro addflags(op1,op2) { CF = carry(op1,op2); OF = scarry(op1,op2); } # # full-adder carry and overflow calculations # macro addCarryFlags ( op1, op2 ) { local CFcopy = zext(CF); CF = carry( op1, op2 ); OF = scarry( op1, op2 ); local result = op1 + op2; CF = CF || carry( result, CFcopy ); OF = OF ^^ scarry( result, CFcopy ); op1 = result + CFcopy; # AF not implemented } macro subCarryFlags ( op1, op2 ) { local CFcopy = zext(CF); CF = op1 < op2; OF = sborrow( op1, op2 ); local result = op1 - op2; CF = CF || (result < CFcopy); OF = OF ^^ sborrow( result, CFcopy ); op1 = result - CFcopy; # AF not implemented } macro resultflags(result) { SF = result s< 0; ZF = result == 0; PF = ((popcount(result & 0xff) & 1:1) == 0); # AF not implemented } macro shiftresultflags(result,count) { local notzero = (count != 0); local newSF = (result s< 0); SF = (!notzero & SF) | (notzero & newSF); local newZF = (result == 0); ZF = (!notzero & ZF) | (notzero & newZF); local newPF = ((popcount(result & 0xff) & 1:1) == 0); PF = (!notzero & PF) | (notzero & newPF); # AF not implemented } macro subflags(op1,op2) { CF = op1 < op2; OF = sborrow(op1,op2); } macro negflags(op1) { CF = (op1 != 0); OF = sborrow(0,op1); } macro logicalflags() { CF = 0; OF = 0; } macro imultflags(low,total){ CF = sext(low) != total; OF = CF; } macro multflags(highhalf) { CF = highhalf != 0; OF = CF; } macro rolflags(result,count) { local notzero = (count != 0); local newCF = ((result & 1) != 0); CF = (!notzero & CF) | (notzero & newCF); local one = (count == 1); local newOF = CF ^ (result s< 0); OF = (!one & OF) | (one & newOF); } macro rorflags(result,count) { local notzero = (count != 0); local newCF = (result s< 0); CF = (!notzero & CF) | (notzero & newCF); local one = (count == 1); local newOF = (result s< 0) ^ ((result << 1) s< 0); OF = (!one & OF) | (one & newOF); } macro shlflags(op1,result,count) { # works for shld also local notzero = (count != 0); local newCF = ( (op1 << (count - 1)) s< 0 ); CF = (!notzero & CF) | (notzero & newCF); local one = (count == 1); local newOF = CF ^ (result s< 0); OF = (!one & OF) | (one & newOF); } macro sarflags(op1,result,count) { local notzero = (count != 0); local newCF = ( ( (op1 s>> (count - 1)) & 1 ) != 0 ); CF = (!notzero & CF) | (notzero & newCF); local one = (count == 1); OF = (!one & OF); } macro shrflags(op1,result,count) { local notzero = (count != 0); local newCF = ( ( (op1 >> (count - 1)) & 1 ) != 0 ); CF = (!notzero & CF) | (notzero & newCF); local one = (count == 1); local newOF = (op1 s< 0); OF = (!one & OF) | (one & newOF); } macro shrdflags(op1,result,count) { local notzero = (count != 0); local newCF = ( ( (op1 >> (count - 1)) & 1 ) != 0 ); CF = (!notzero & CF) | (notzero & newCF); local one = (count == 1); local newOF = ((op1 s< 0) ^ (result s< 0)); OF = (!one & OF) | (one & newOF); } macro fdec() { local tmp = ST7; ST7 = ST6; ST6 = ST5; ST5 = ST4; ST4 = ST3; ST3 = ST2; ST2 = ST1; ST1 = ST0; ST0 = tmp; } macro finc() { local tmp = ST0; ST0 = ST1; ST1 = ST2; ST2 = ST3; ST3 = ST4; ST4 = ST5; ST5 = ST6; ST6 = ST7; ST7 = tmp; } macro fpop() { ST0 = ST1; ST1 = ST2; ST2 = ST3; ST3 = ST4; ST4 = ST5; ST5 = ST6; ST6 = ST7; } macro fpushv(val) { ST7 = ST6; ST6 = ST5; ST5 = ST4; ST4 = ST3; ST3 = ST2; ST2 = ST1; ST1 = ST0; ST0 = val; } macro fpopv(val) { val = ST0; ST0 = ST1; ST1 = ST2; ST2 = ST3; ST3 = ST4; ST4 = ST5; ST5 = ST6; ST6 = ST7; } macro fcom(val) { C1 = 0; C2 = nan(ST0) || nan(val); C0 = C2 | ( ST0 f< val ); C3 = C2 | ( ST0 f== val ); FPUStatusWord = (zext(C0)<<8) | (zext(C1)<<9) | (zext(C2)<<10) | (zext(C3)<<14); } macro fcomi(val) { PF = nan(ST0) || nan(val); ZF = PF | ( ST0 f== val ); CF = PF | ( ST0 f< val ); OF = 0; AF = 0; SF = 0; FPUStatusWord = FPUStatusWord & 0xfdff; # Clear C1 C1 = 0; } # floating point NaN comparison into EFLAGS macro fucompe(val1, val2) { PF = nan(val1) || nan(val2 ); ZF = PF | ( val1 f== val2 ); CF = PF | ( val1 f< val2 ); OF = 0; AF = 0; SF = 0; } # The base level constructors # The prefixes :^instruction is instrPhase=0 & over=0x2e; instruction [ segover=1; ] {} # CS override :^instruction is instrPhase=0 & over=0x36; instruction [ segover=2; ] {} # SS override :^instruction is instrPhase=0 & over=0x3e; instruction [ segover=3; ] {} # DS override :^instruction is instrPhase=0 & over=0x26; instruction [ segover=4; ] {} # ES override :^instruction is instrPhase=0 & over=0x64; instruction [ segover=5; ] {} # FS override :^instruction is instrPhase=0 & over=0x65; instruction [ segover=6; ] {} # GS override :^instruction is instrPhase=0 & over=0x66; instruction [ opsize=opsize $xor 1; mandover = mandover $xor 1; ] {} # Operand size override :^instruction is instrPhase=0 & over=0x67; instruction [ addrsize=addrsize $xor 1; ] {} # Address size override :^instruction is instrPhase=0 & over=0xf2; instruction [ repneprefx=1; repprefx=0; ] {} :^instruction is instrPhase=0 & over=0xf3; instruction [ repneprefx=0; repprefx=1; ] {} :^instruction is instrPhase=0 & over=0xf0; instruction [ lockprefx=1; ] {} @ifdef IA64 # # REX opcode extension prefixes # # REX prefix present # Specification is "REX" @define REX "longMode=1 & rexprefix=1 & rexWprefix=0" # Specification is "REX.W" @define REX_W "longMode=1 & rexprefix=1 & rexWprefix=1" # TODO I don't think the following line can really happen because the 66 67 prefix must come before REX prefix :^instruction is $(LONGMODE_ON) & instrPhase=0 & over=0x66 & opsize=2; instruction [ opsize=0; mandover=mandover $xor 1; ] {} # Operand size override :^instruction is $(LONGMODE_ON) & instrPhase=0 & over=0x67 & addrsize=2; instruction [ addrsize=1; ] {} # Address size override :^instruction is $(LONGMODE_ON) & instrPhase=0 & row=0x4 & rexw=0 & rexr & rexx & rexb; instruction [ instrPhase=1; rexprefix=1; opsize=1; rexWprefix=0; rexRprefix=rexr; rexXprefix=rexx; rexBprefix=rexb; ] {} :^instruction is $(LONGMODE_ON) & instrPhase=0 & row=0x4 & rexw=1 & rexr & rexx & rexb; instruction [ instrPhase=1; rexprefix=1; opsize=2; rexWprefix=1; rexRprefix=rexr; rexXprefix=rexx; rexBprefix=rexb; ] {} :^instruction is $(LONGMODE_ON) & instrPhase=0 & opsize=0 & row=0x4 & rexw=0 & rexr & rexx & rexb; instruction [ instrPhase=1; rexprefix=1; opsize=0; rexWprefix=0; rexRprefix=rexr; rexXprefix=rexx; rexBprefix=rexb; ] {} :^instruction is $(LONGMODE_ON) & instrPhase=0 & opsize=0 & row=0x4 & rexw=1 & rexr & rexx & rexb; instruction [ instrPhase=1; rexprefix=1; opsize=2; rexWprefix=1; rexRprefix=rexr; rexXprefix=rexx; rexBprefix=rexb; ] {} # if longmode is off (on 64-bit processor in 32-bit compatibility mode), there is no 64-bit addressing, make sure is off before parsing # :^instruction is $(LONGMODE_OFF) & instrPhase=0 & addrsize=2 & instruction [ addrsize=1; ] {} @endif # # VEX definitions: One from each group must be present in the decoding; following the specification from the manual. # # VEX encoding for type of VEX data flow. # Specification is "VEX.", "VEX.NDS", "VEX.NDD", or "VEX.DDS". If only "VEX." is present, then "VEX_NONE" must be used. @define VEX_NONE "vexMode=1" @define VEX_NDS "vexMode=1" @define VEX_NDD "vexMode=1" @define VEX_DDS "vexMode=1" # Specification is "LIG", "LZ", "128", or "256". @define VEX_LIG "vexL" @define VEX_LZ "vexL=0" @define VEX_L128 "vexL=0" @define VEX_L256 "vexL=1" @define EVEX_L512 "evexLp=1 & vexL=0" @define EVEX_LLIG "evexLp & vexL" # These are only to be used with VEX or EVEX decoding, where only one "mandatory" prefix is encoded in the VEX or EVEX. # If no prefix is specified, then VEX_PRE_NONE must be used. # No other "physical" prefixes are allowed. # Specification is "(empty)", "66", "F3", or "F2". If none of these are present (empty), then "VEX_PRE_NONE" must be used. @define VEX_PRE_NONE "mandover=0" @define VEX_PRE_66 "mandover=1" @define VEX_PRE_F3 "mandover=2" @define VEX_PRE_F2 "mandover=4" # Specification is "0F", "0F38", or "0F3A". @define VEX_0F "vexMMMMM=1" @define VEX_0F38 "vexMMMMM=2" @define VEX_0F3A "vexMMMMM=3" @define VEX_MAP4 "vexMMMMM=4" @define VEX_MAP5 "vexMMMMM=5" @define VEX_MAP6 "vexMMMMM=6" # Specification is "WIG", "W0", or "W1". @define VEX_WIG "rexWprefix" @define VEX_W0 "rexWprefix=0" @define VEX_W1 "rexWprefix=1" @define EVEX_NONE "vexMode=2" @define EVEX_NDS "vexMode=2" @define EVEX_NDD "vexMode=2" @define EVEX_DDS "vexMode=2" @ifdef IA64 # 64-bit 3-byte VEX :^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r & vex_x & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=0; instruction [ instrPhase=1; vexMode=1; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; ] {} :^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r & vex_x & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=1; instruction [ instrPhase=1; vexMode=1; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_66=1; ] {} :^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r & vex_x & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=2; instruction [ instrPhase=1; vexMode=1; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_f3=1; ] {} :^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r & vex_x & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=3; instruction [ instrPhase=1; vexMode=1; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_f2=1; ] {} # 64-bit 2-byte VEX :^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r & vex_vvvv & vex_l & vex_pp=0; instruction [ instrPhase=1; vexMode=1; rexRprefix=~vex_r; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; ] {} :^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r & vex_vvvv & vex_l & vex_pp=1; instruction [ instrPhase=1; vexMode=1; rexRprefix=~vex_r; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_66=1; ] {} :^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r & vex_vvvv & vex_l & vex_pp=2; instruction [ instrPhase=1; vexMode=1; rexRprefix=~vex_r; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_f3=1; ] {} :^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r & vex_vvvv & vex_l & vex_pp=3; instruction [ instrPhase=1; vexMode=1; rexRprefix=~vex_r; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_f2=1; ] {} # 4-byte EVEX prefix :^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r & vex_x & vex_b & evex_rp & evex_res=0 & evex_mmm; vex_w & vex_vvvv & evex_res2=1 & vex_pp=0; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction [ instrPhase=1; vexMode=2; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexMMMMM=evex_mmm; evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; ] {} :^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r & vex_x & vex_b & evex_rp & evex_res=0 & evex_mmm; vex_w & vex_vvvv & evex_res2=1 & vex_pp=1; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction [ instrPhase=1; vexMode=2; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexMMMMM=evex_mmm; evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; prefix_66=1; ] {} :^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r & vex_x & vex_b & evex_rp & evex_res=0 & evex_mmm; vex_w & vex_vvvv & evex_res2=1 & vex_pp=2; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction [ instrPhase=1; vexMode=2; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexMMMMM=evex_mmm; evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; prefix_f3=1; ] {} :^instruction is $(LONGMODE_ON) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r & vex_x & vex_b & evex_rp & evex_res=0 & evex_mmm; vex_w & vex_vvvv & evex_res2=1 & vex_pp=3; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction [ instrPhase=1; vexMode=2; rexRprefix=~vex_r; rexXprefix=~vex_x; rexBprefix=~vex_b; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexMMMMM=evex_mmm; evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; prefix_f2=1; ] {} @endif # 32-bit 3-byte VEX :^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r=1 & vex_x=1 & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=0; instruction [ instrPhase=1; vexMode=1; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; ] {} :^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r=1 & vex_x=1 & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=1; instruction [ instrPhase=1; vexMode=1; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_66=1; ] {} :^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r=1 & vex_x=1 & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=2; instruction [ instrPhase=1; vexMode=1; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_f3=1; ] {} :^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC4; vex_r=1 & vex_x=1 & vex_b & vex_mmmmm; vex_w & vex_vvvv & vex_l & vex_pp=3; instruction [ instrPhase=1; vexMode=1; rexBprefix=~vex_b; vexMMMMM=vex_mmmmm; rexWprefix=vex_w; vexVVVV=~vex_vvvv; vexL=vex_l; prefix_f2=1; ] {} # 32-bit 2-byte VEX :^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r=1 & vex_x=1 & vex_vvvv & vex_l & vex_pp=0; instruction [ instrPhase=1; vexMode=1; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; ] {} :^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r=1 & vex_x=1 & vex_vvvv & vex_l & vex_pp=1; instruction [ instrPhase=1; vexMode=1; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_66=1; ] {} :^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r=1 & vex_x=1 & vex_vvvv & vex_l & vex_pp=2; instruction [ instrPhase=1; vexMode=1; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_f3=1; ] {} :^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover=0 & byte=0xC5; vex_r=1 & vex_x=1 & vex_vvvv & vex_l & vex_pp=3; instruction [ instrPhase=1; vexMode=1; vexVVVV=~vex_vvvv; vexL=vex_l; vexMMMMM=0x1; prefix_f2=1; ] {} :^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r=1 & vex_x=1 & vex_b & evex_rp & evex_res=0 & evex_mmm; vex_w & vex_vvvv & evex_res2=1 & vex_pp=0; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction [ instrPhase=1; vexMode=2; vexVVVV=~vex_vvvv; rexBprefix=~vex_b; rexWprefix=vex_w; evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; vexMMMMM=evex_mmm; ] {} :^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r=1 & vex_x=1 & vex_b & evex_rp & evex_res=0 & evex_mmm; vex_w & vex_vvvv & evex_res2=1 & vex_pp=1; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction [ instrPhase=1; vexMode=2; vexVVVV=~vex_vvvv; rexBprefix=~vex_b; rexWprefix=vex_w; evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; vexMMMMM=evex_mmm; prefix_66=1; ] {} :^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r=1 & vex_x=1 & vex_b & evex_rp & evex_res=0 & evex_mmm; vex_w & vex_vvvv & evex_res2=1 & vex_pp=2; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction [ instrPhase=1; vexMode=2; vexVVVV=~vex_vvvv; rexBprefix=~vex_b; rexWprefix=vex_w; evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; vexMMMMM=evex_mmm; prefix_f3=1; ] {} :^instruction is $(LONGMODE_OFF) & instrPhase=0 & vexMode=0 & rexprefix=0 & mandover & byte=0x62; vex_r=1 & vex_x=1 & vex_b & evex_rp & evex_res=0 & evex_mmm; vex_w & vex_vvvv & evex_res2=1 & vex_pp=3; evex_z & evex_lp & evex_l & evex_b & evex_vp & evex_aaa; instruction [ instrPhase=1; vexMode=2; vexVVVV=~vex_vvvv; rexBprefix=~vex_b; rexWprefix=vex_w; evexRp=~evex_rp; evexVp=~evex_vp; evexLp=evex_lp; vexL=evex_l; evexZ=evex_z; evexB=evex_b; evexAAA=evex_aaa; vexMMMMM=evex_mmm; prefix_f2=1; ] {} # Many of the multimedia instructions have a "mandatory" prefix, either 0x66, 0xf2 or 0xf3 # where the prefix really becomes part of the encoding. We collect the three possible prefixes of this # sort in the mandover context variable so we can pattern all three at once # 3DNow pre-parse to isolate suffix byte into context (suffix3D) # - general format: 0x0f 0x0f [sib] [displacement] # - must determine number of bytes consumed by addressing modes # TODO: determine supported prefixes? (e.g., 0x26) Suffix3D: imm8 is imm8 [ suffix3D=imm8; ] { } :^instruction is instrPhase=0 & (byte=0x0f; byte=0x0f; XmmReg ... & m64; Suffix3D) ... & instruction ... [ instrPhase=1; ] { } :^instruction is instrPhase=0 & (byte=0x0f; byte=0x0f; mmxmod=3; Suffix3D) ... & instruction ... [ instrPhase=1; ] { } # Instructions in alphabetical order # See 'lockable.sinc' file for instructions that are lockable with : lockprefx=0 { :AAA is vexMode=0 & bit64=0 & byte=0x37 { local car = ((AL & 0xf) > 9) | AF; AL = (AL+6*car)&0xf; AH=AH+car; CF=car; AF=car; } :AAD imm8 is vexMode=0 & bit64=0 & byte=0xd5; imm8 { AL = AL + imm8*AH; AH=0; resultflags(AX); } :AAM imm8 is vexMode=0 & bit64=0 & byte=0xd4; imm8 { AH = AL/imm8; AL = AL % imm8; resultflags(AX); } :AAS is vexMode=0 & bit64=0 & byte=0x3f { local car = ((AL & 0xf) > 9) | AF; AL = (AL-6*car)&0xf; AH=AH-car; CF=car; AF=car; } # See 'lockable.sinc' for memory destination, lockable variants :ADC AL,imm8 is vexMode=0 & byte=0x14; AL & imm8 { addCarryFlags( AL, imm8:1 ); resultflags( AL ); } :ADC AX,imm16 is vexMode=0 & opsize=0 & byte=0x15; AX & imm16 { addCarryFlags( AX, imm16:2 ); resultflags( AX ); } :ADC EAX,imm32 is vexMode=0 & opsize=1 & byte=0x15; EAX & check_EAX_dest & imm32 { addCarryFlags( EAX, imm32:4 ); build check_EAX_dest; resultflags( EAX ); } @ifdef IA64 :ADC RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x15; RAX & simm32 { addCarryFlags( RAX, simm32 ); resultflags( RAX ); } @endif :ADC Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=2; imm8 { addCarryFlags( Rmr8, imm8:1 ); resultflags( Rmr8 ); } :ADC Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=2; imm16 { addCarryFlags( Rmr16, imm16:2 ); resultflags( Rmr16 ); } :ADC Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=2; imm32 { addCarryFlags( Rmr32, imm32:4 ); build check_Rmr32_dest; resultflags( Rmr32 ); } @ifdef IA64 :ADC Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=2; simm32 { addCarryFlags( Rmr64, simm32 ); resultflags( Rmr64 ); } @endif :ADC Rmr16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=2; simm8_16 { addCarryFlags( Rmr16, simm8_16 ); resultflags( Rmr16 ); } :ADC Rmr32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=2; simm8_32 { addCarryFlags( Rmr32, simm8_32 ); build check_Rmr32_dest; resultflags( Rmr32 ); } @ifdef IA64 :ADC Rmr64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=2; simm8_64 { addCarryFlags( Rmr64, simm8_64 ); resultflags( Rmr64 ); } @endif :ADC Rmr8,Reg8 is vexMode=0 & byte=0x10; mod=3 & Rmr8 & Reg8 { addCarryFlags( Rmr8, Reg8 ); resultflags( Rmr8 ); } :ADC Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x11; mod=3 & Rmr16 & Reg16 { addCarryFlags( Rmr16, Reg16 ); resultflags( Rmr16 ); } :ADC Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x11; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { addCarryFlags( Rmr32, Reg32 ); build check_Rmr32_dest; resultflags( Rmr32 ); } @ifdef IA64 :ADC Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x11; mod=3 & Rmr64 & Reg64 { addCarryFlags( Rmr64, Reg64 ); resultflags( Rmr64 ); } @endif :ADC Reg8,rm8 is vexMode=0 & byte=0x12; rm8 & Reg8 ... { addCarryFlags( Reg8, rm8 ); resultflags( Reg8 ); } :ADC Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x13; rm16 & Reg16 ... { addCarryFlags( Reg16, rm16 ); resultflags( Reg16 ); } :ADC Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x13; rm32 & Reg32 ... & check_Reg32_dest ... { addCarryFlags( Reg32, rm32 ); build check_Reg32_dest; resultflags( Reg32 ); } @ifdef IA64 :ADC Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x13; rm64 & Reg64 ... { addCarryFlags( Reg64, rm64 ); resultflags( Reg64 ); } @endif # See 'lockable.sinc' for memory destination, lockable variants :ADD AL,imm8 is vexMode=0 & byte=0x4; AL & imm8 { addflags( AL,imm8 ); AL = AL + imm8; resultflags( AL); } :ADD AX,imm16 is vexMode=0 & opsize=0 & byte=0x5; AX & imm16 { addflags( AX,imm16); AX = AX + imm16; resultflags( AX); } :ADD EAX,imm32 is vexMode=0 & opsize=1 & byte=0x5; EAX & check_EAX_dest & imm32 { addflags( EAX,imm32); EAX = EAX + imm32; build check_EAX_dest; resultflags( EAX); } @ifdef IA64 :ADD RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x5; RAX & simm32 { addflags( RAX,simm32); RAX = RAX + simm32; resultflags( RAX); } @endif :ADD Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=0; imm8 { addflags( Rmr8,imm8 ); Rmr8 = Rmr8 + imm8; resultflags( Rmr8); } :ADD Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=0; imm16 { addflags( Rmr16,imm16); Rmr16 = Rmr16 + imm16; resultflags( Rmr16); } :ADD Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=0; imm32 { addflags( Rmr32,imm32); Rmr32 = Rmr32 + imm32; build check_Rmr32_dest; resultflags( Rmr32); } @ifdef IA64 :ADD Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=0; simm32 { addflags( Rmr64,simm32); Rmr64 = Rmr64 + simm32; resultflags( Rmr64); } @endif :ADD Rmr16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=0; simm8_16 { addflags( Rmr16,simm8_16); Rmr16 = Rmr16 + simm8_16; resultflags( Rmr16); } :ADD Rmr32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=0; simm8_32 { addflags( Rmr32,simm8_32); Rmr32 = Rmr32 + simm8_32; build check_Rmr32_dest; resultflags( Rmr32); } @ifdef IA64 :ADD Rmr64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=0; simm8_64 { addflags( Rmr64,simm8_64); Rmr64 = Rmr64 + simm8_64; resultflags( Rmr64); } @endif :ADD Rmr8,Reg8 is vexMode=0 & byte=0x00; mod=3 & Rmr8 & Reg8 { addflags( Rmr8,Reg8 ); Rmr8 = Rmr8 + Reg8; resultflags( Rmr8); } :ADD Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x1; mod=3 & Rmr16 & Reg16 { addflags( Rmr16,Reg16); Rmr16 = Rmr16 + Reg16; resultflags( Rmr16); } :ADD Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x1; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { addflags( Rmr32,Reg32); Rmr32 = Rmr32 + Reg32; build check_Rmr32_dest; resultflags( Rmr32); } @ifdef IA64 :ADD Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x1; mod=3 & Rmr64 & Reg64 { addflags( Rmr64,Reg64); Rmr64 = Rmr64 + Reg64; resultflags( Rmr64); } @endif :ADD Reg8,rm8 is vexMode=0 & byte=0x2; rm8 & Reg8 ... { addflags( Reg8,rm8 ); Reg8 = Reg8 + rm8; resultflags( Reg8); } :ADD Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x3; rm16 & Reg16 ... { addflags(Reg16,rm16 ); Reg16 = Reg16 + rm16; resultflags(Reg16); } :ADD Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x3; rm32 & Reg32 ... & check_Reg32_dest ... { addflags(Reg32,rm32 ); Reg32 = Reg32 + rm32; build check_Reg32_dest; resultflags(Reg32); } @ifdef IA64 :ADD Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x3; rm64 & Reg64 ... { addflags(Reg64,rm64 ); Reg64 = Reg64 + rm64; resultflags(Reg64); } @endif # See 'lockable.sinc' for memory destination, lockable variants :AND AL,imm8 is vexMode=0 & byte=0x24; AL & imm8 { logicalflags(); AL = AL & imm8; resultflags( AL); } :AND AX,imm16 is vexMode=0 & opsize=0 & byte=0x25; AX & imm16 { logicalflags(); AX = AX & imm16; resultflags( AX); } :AND EAX,imm32 is vexMode=0 & opsize=1 & byte=0x25; EAX & check_EAX_dest & imm32 { logicalflags(); EAX = EAX & imm32; build check_EAX_dest; resultflags( EAX); } @ifdef IA64 :AND RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x25; RAX & simm32 { logicalflags(); RAX = RAX & simm32; resultflags( RAX); } @endif :AND Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=4; imm8 { logicalflags(); Rmr8 = Rmr8 & imm8; resultflags( Rmr8); } :AND Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=4; imm16 { logicalflags(); Rmr16 = Rmr16 & imm16; resultflags( Rmr16); } :AND Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=4; imm32 { logicalflags(); Rmr32 = Rmr32 & imm32; build check_Rmr32_dest; resultflags( Rmr32); } @ifdef IA64 :AND Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=4; simm32 { logicalflags(); Rmr64 = Rmr64 & simm32; resultflags( Rmr64); } @endif :AND Rmr16,usimm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=4; usimm8_16 { logicalflags(); Rmr16 = Rmr16 & usimm8_16; resultflags( Rmr16); } :AND Rmr32,usimm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=4; usimm8_32 { logicalflags(); Rmr32 = Rmr32 & usimm8_32; build check_Rmr32_dest; resultflags( Rmr32); } @ifdef IA64 :AND Rmr64,usimm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=4; usimm8_64 { logicalflags(); Rmr64 = Rmr64 & usimm8_64; resultflags( Rmr64); } @endif :AND Rmr8,Reg8 is vexMode=0 & byte=0x20; mod=3 & Rmr8 & Reg8 { logicalflags(); Rmr8 = Rmr8 & Reg8; resultflags( Rmr8); } :AND Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x21; mod=3 & Rmr16 & Reg16 { logicalflags(); Rmr16 = Rmr16 & Reg16; resultflags( Rmr16); } :AND Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x21; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { logicalflags(); Rmr32 = Rmr32 & Reg32; build check_Rmr32_dest; resultflags( Rmr32); } @ifdef IA64 :AND Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x21; mod=3 & Rmr64 & Reg64 { logicalflags(); Rmr64 = Rmr64 & Reg64; resultflags( Rmr64); } @endif :AND Reg8,rm8 is vexMode=0 & byte=0x22; rm8 & Reg8 ... { logicalflags(); Reg8 = Reg8 & rm8; resultflags( Reg8); } :AND Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x23; rm16 & Reg16 ... { logicalflags(); Reg16 = Reg16 & rm16; resultflags(Reg16); } :AND Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x23; rm32 & Reg32 ... & check_Reg32_dest ... { logicalflags(); Reg32 = Reg32 & rm32; build check_Reg32_dest; resultflags(Reg32); } @ifdef IA64 :AND Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x23; rm64 & Reg64 ... { logicalflags(); Reg64 = Reg64 & rm64; resultflags(Reg64); } @endif #ARPL is not encodable in 64-bit mode :ARPL rm16,Reg16 is $(LONGMODE_OFF) & vexMode=0 & bit64=0 & byte=0x63; rm16 & Reg16 ... { local rpldest=rm16&3; local rplsrc=Reg16&3; local rpldiff=rplsrc-rpldest; ZF = rpldiff s> 0; rm16 = rm16 + (zext(CF) * rpldiff); } :BOUND Reg16,m16 is $(LONGMODE_OFF) & vexMode=0 & bit64=0 & opsize=0 & byte=0x62; m16 & Reg16 ... { } :BOUND Reg32,m32 is $(LONGMODE_OFF) & vexMode=0 & bit64=0 & opsize=1 & byte=0x62; m32 & Reg32 ... { } #:BSF Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbc; rm16 & Reg16 ... { ZF = rm16 == 0; # choose = 0xffff * (zext((0xff & rm16) == 0)); # mask = (0xf00 & choose) | (0xf | ~choose); # pos = 8 & choose; # choose = 0xffff * (zext((mask & rm16) == 0)); # mask1 = (mask << 2) & (mask << 4); # mask2 = (mask >> 2) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos + (4 & choose); # choose = 0xffff * (zext((mask & rm16) == 0)); # mask1 = (mask << 1) & (mask << 2); # mask2 = (mask >> 1) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos + (2 & choose); # choose = zext((mask & rm16) == 0); # Reg16 = pos + choose; } :BSF Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbc; rm16 & Reg16 ... { bitIndex:2 = 0; ZF = ( rm16 == 0 ); if ( ZF == 1 ) goto ; if ( ((rm16 >> bitIndex) & 0x0001) != 0 ) goto ; bitIndex = bitIndex + 1; goto ; Reg16 = bitIndex; } #:BSF Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbc; rm32 & Reg32 ... & check_Reg32_dest ... { ZF = rm32 == 0; # choose = 0xffffffff * (zext((0xffff & rm32) == 0)); # mask = (0xff0000 & choose) | (0xff | ~choose); # pos = 16 & choose; # choose = 0xffffffff * (zext((mask & rm32) == 0)); # mask1 = (mask << 4) & (mask << 8); # mask2 = (mask >> 4) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos + (8 & choose); # choose = 0xffffffff * (zext((mask & rm32) == 0)); # mask1 = (mask << 2) & (mask << 4); # mask2 = (mask >> 2) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos + (4 & choose); # choose = 0xffffffff * (zext((mask & rm32) == 0)); # mask1 = (mask << 1) & (mask << 2); # mask2 = (mask >> 1) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos + (2 & choose); # choose = zext((mask & rm32) == 0); # Reg32 = pos + choose; # build check_Reg32_dest; } :BSF Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbc; rm32 & Reg32 ... & check_Reg32_dest ... { bitIndex:4 = 0; ZF = ( rm32 == 0 ); if ( ZF == 1 ) goto ; if ( ((rm32 >> bitIndex) & 0x00000001) != 0 ) goto ; bitIndex = bitIndex + 1; goto ; Reg32 = bitIndex; build check_Reg32_dest; } @ifdef IA64 #:BSF Reg64,rm64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xbc; rm64 & Reg64 ... { ZF = rm64 == 0; ## TODO: NEED TO EXTEND THIS TO 64bit op # choose = 0xffffffff * (zext((0xffff & rm64) == 0)); # mask = (0xff0000 & choose) | (0xff | ~choose); # pos = 16 & choose; # choose = 0xffffffff * (zext((mask & rm64) == 0)); # mask1 = (mask << 4) & (mask << 8); # mask2 = (mask >> 4) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos + (8 & choose); # choose = 0xffffffff * (zext((mask & rm64) == 0)); # mask1 = (mask << 2) & (mask << 4); # mask2 = (mask >> 2) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos + (4 & choose); # choose = 0xffffffff * (zext((mask & rm64) == 0)); # mask1 = (mask << 1) & (mask << 2); # mask2 = (mask >> 1) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos + (2 & choose); # choose = zext((mask & rm64) == 0); # Reg64 = pos + choose; } :BSF Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xbc; rm64 & Reg64 ... { bitIndex:8 = 0; ZF = ( rm64 == 0 ); if ( ZF == 1 ) goto ; if ( ((rm64 >> bitIndex) & 0x0000000000000001) != 0 ) goto ; bitIndex = bitIndex + 1; goto ; Reg64 = bitIndex; } @endif #:BSR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbd; rm16 & Reg16 ... { ZF = rm16 == 0; # choose = 0xffff * (zext((0xff00 & rm16) == 0)); # mask = (0xf000 & ~choose) | (0xf0 | choose); # pos = 16 - (8 & choose); # choose = 0xffff * (zext((mask & rm16) == 0)); # mask1 = (mask >> 2) & (mask >> 4); # mask2 = (mask << 2) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos - (4 & choose); # choose = 0xffff * (zext((mask & rm16) == 0)); # mask1 = (mask >> 1) & (mask >> 2); # mask2 = (mask << 1) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos - (2 & choose); # choose = zext((mask & rm16) == 0); # Reg16 = pos - choose; } :BSR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbd; rm16 & Reg16 ... { bitIndex:2 = 15; ZF = ( rm16 == 0 ); if ( ZF == 1 ) goto ; if ( (rm16 >> bitIndex) != 0 ) goto ; bitIndex = bitIndex - 1; goto ; Reg16 = bitIndex; } #:BSR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbd; rm32 & Reg32 ... & check_Reg32_dest ... { ZF = rm32 == 0; # choose = 0xffffffff * (zext((0xffff0000 & rm32) == 0)); # mask = (0xff000000 & ~choose) | (0xff00 | choose); # pos = 32 - (16 & choose); # choose = 0xffffffff * (zext((mask & rm32) == 0)); # mask1 = (mask >> 4) & (mask >> 8); # mask2 = (mask << 4) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos - (8 & choose); # choose = 0xffffffff * (zext((mask & rm32) == 0)); # mask1 = (mask >> 2) & (mask >> 4); # mask2 = (mask << 2) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos - (4 & choose); # choose = 0xffffffff * (zext((mask & rm32) == 0)); # mask1 = (mask >> 1) & (mask >> 2); # mask2 = (mask << 1) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos - (2 & choose); # choose = zext((mask & rm32) == 0); # Reg32 = pos - choose; # build check_Reg32_dest; } :BSR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbd; rm32 & Reg32 ... & check_Reg32_dest ... { bitIndex:4 = 31; ZF = ( rm32 == 0 ); if ( ZF == 1 ) goto ; if ( (rm32 >> bitIndex) != 0 ) goto ; bitIndex = bitIndex - 1; goto ; Reg32 = bitIndex; build check_Reg32_dest; } @ifdef IA64 #:BSR Reg64,rm64 is vexMode=0 & opsize=2 & byte=0xf; byte=0xbd; rm64 & Reg64 ... { ZF = rm64 == 0; ## TODO: NEED TO EXTEND THIS TO 64bit op # choose = 0xffffffff * (zext((0xffff0000 & rm64) == 0)); # mask = (0xff000000 & ~choose) | (0xff00 | choose); # pos = 32 - (16 & choose); # choose = 0xffffffff * (zext((mask & rm64) == 0)); # mask1 = (mask >> 4) & (mask >> 8); # mask2 = (mask << 4) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos - (8 & choose); # choose = 0xffffffff * (zext((mask & rm64) == 0)); # mask1 = (mask >> 2) & (mask >> 4); # mask2 = (mask << 2) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos - (4 & choose); # choose = 0xffffffff * (zext((mask & rm64) == 0)); # mask1 = (mask >> 1) & (mask >> 2); # mask2 = (mask << 1) & mask; # mask = (mask1 & choose) | (mask2 | ~choose); # pos = pos - (2 & choose); # choose = zext((mask & rm64) == 0); # Reg64 = pos - choose; } :BSR Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xbd; rm64 & Reg64 ... { bitIndex:8 = 63; ZF = ( rm64 == 0 ); if ( ZF == 1 ) goto ; if ( (rm64 >> bitIndex) != 0 ) goto ; bitIndex = bitIndex - 1; goto ; Reg64 = bitIndex; } @endif :BSWAP Rmr32 is vexMode=0 & byte=0xf; row=12 & page=1 & Rmr32 & check_Rmr32_dest { local tmp = (Rmr32 & 0xff000000) >> 24 ; tmp = tmp | ((Rmr32 & 0x00ff0000) >> 8 ); tmp = tmp | ((Rmr32 & 0x0000ff00) << 8 ); Rmr32 = tmp | ((Rmr32 & 0x000000ff) << 24); build check_Rmr32_dest; } @ifdef IA64 :BSWAP Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; row=12 & page=1 & Rmr64 { local tmp = (Rmr64 & 0xff00000000000000) >> 56 ; tmp = tmp | ((Rmr64 & 0x00ff000000000000) >> 40 ); tmp = tmp | ((Rmr64 & 0x0000ff0000000000) >> 24 ); tmp = tmp | ((Rmr64 & 0x000000ff00000000) >> 8 ); tmp = tmp | ((Rmr64 & 0x00000000ff000000) << 8 ); tmp = tmp | ((Rmr64 & 0x0000000000ff0000) << 24 ); tmp = tmp | ((Rmr64 & 0x000000000000ff00) << 40 ); Rmr64 = tmp | ((Rmr64 & 0x00000000000000ff) << 56); } @endif :BT Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xa3; mod=3 & Rmr16 & Reg16 { CF = ((Rmr16 >> (Reg16 & 0xf)) & 1) != 0; } :BT Mem,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xa3; Mem & Reg16 ... { local ptr = Mem + (sext(Reg16) s>> 3); CF = ((*:1 ptr >> (Reg16 & 0x7)) & 1) != 0; } :BT Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xa3; mod=3 & Rmr32 & Reg32 { CF = ((Rmr32 >> (Reg32 & 0x1f)) & 1) != 0; } :BT Mem,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xa3; Mem & Reg32 ... { @ifdef IA64 local ptr = Mem + (sext(Reg32) s>> 3); @else local ptr = Mem + (Reg32 s>> 3); @endif CF = ((*:1 ptr >> (Reg32 & 0x7)) & 1) != 0; } @ifdef IA64 :BT Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xa3; mod=3 & Rmr64 & Reg64 { CF = ((Rmr64 >> (Reg64 & 0x3f)) & 1) != 0; } :BT Mem,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xa3; Mem & Reg64 ... { local ptr = Mem + (Reg64 s>> 3); CF = ((*:1 ptr >> (Reg64 & 0x7)) & 1) != 0; } @endif :BT rm16,imm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xba; (rm16 & reg_opcode=4 ...); imm8 { CF = ((rm16 >> (imm8 & 0x0f)) & 1) != 0; } :BT rm32,imm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xba; (rm32 & reg_opcode=4 ...); imm8 { CF = ((rm32 >> (imm8 & 0x1f)) & 1) != 0; } @ifdef IA64 :BT rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xba; (rm64 & reg_opcode=4 ...); imm8 { CF = ((rm64 >> (imm8 & 0x3f)) & 1) != 0; } @endif # See 'lockable.sinc' for memory destination, lockable variants :BTC Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbb; mod=3 & Rmr16 & Reg16 { local bit=Reg16&0xf; local val=(Rmr16>>bit)&1; Rmr16=Rmr16^(1<>bit)&1; CF=(val!=0); Rmr32=Rmr32^(1<>bit)&1; Rmr64=Rmr64^(1<>bit)&1; Rmr16=Rmr16^(1<>bit)&1; CF=(val!=0); Rmr32=Rmr32^(1<>bit)&1; Rmr64=Rmr64^(1<>bit)&1; Rmr16=Rmr16 & ~(1<>bit)&1; CF=(val!=0); Rmr32=Rmr32 & ~(1<>bit)&1; Rmr64=Rmr64 & ~(1<>bit)&1; Rmr16=Rmr16 & ~(1<>bit)&1; CF=(val!=0); Rmr32=Rmr32 & ~(1<>bit)&1; Rmr64=Rmr64 & ~(1<>bit)&1; Rmr16=Rmr16 | (1<>bit)&1; CF=(val!=0); Rmr32=Rmr32 | (1<>bit)&1; Rmr64=Rmr64 | (1<>bit)&1; Rmr16=Rmr16 | (1<>bit)&1; CF=(val!=0); Rmr32=Rmr32 | (1<>bit)&1; Rmr64=Rmr64 | (1<; AL = dest; goto inst_next; Rmr8 = Reg8; } :CMPXCHG Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xb1; mod=3 & Rmr16 & Reg16 { local dest = Rmr16; subflags(AX,dest); local diff = AX-dest; resultflags(diff); if (ZF) goto ; AX = dest; goto inst_next; Rmr16 = Reg16; } :CMPXCHG Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xb1; mod=3 & Rmr32 & Reg32 & check_EAX_dest & check_Rmr32_dest { #this instruction writes to either EAX or Rmr32 #in 64-bit mode, a 32-bit register that is written to #(and only the register that is written to) #must be zero-extended to 64 bits local dest = Rmr32; subflags(EAX,dest); local diff = EAX-dest; resultflags(diff); if (ZF) goto ; EAX = dest; build check_EAX_dest; goto inst_next; Rmr32 = Reg32; build check_Rmr32_dest; } @ifdef IA64 :CMPXCHG Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xb1; mod=3 & Rmr64 & Reg64 { local dest = Rmr64; subflags(RAX,dest); local diff = RAX-dest; resultflags(diff); if (ZF) goto ; RAX = dest; goto inst_next; Rmr64 = Reg64; } @endif # CMPXCHG8B See 'lockable.sinc' for memory destination, lockable variants # This "bad_CMPXCHG8B" instruction encoding was not meant to be part of the x86 language. # It was allowed by a toolchain (at Intel) and was encoded into at least one library. # GCC does not recognize it. It does not make any semantic sense. define pcodeop bad_CMPXCHG8B; :bad_CMPXCHG8B r32 is vexMode=0 & byte=0xf; byte=0xc7; ( mod = 0b11 & reg_opcode=0b001 ) & r32 { r32 = bad_CMPXCHG8B(r32); } # CMPXCHG16B See 'lockable.sinc' for memory destination, lockable variants define pcodeop cpuid; define pcodeop cpuid_basic_info; define pcodeop cpuid_Version_info; define pcodeop cpuid_cache_tlb_info; define pcodeop cpuid_serial_info; define pcodeop cpuid_Deterministic_Cache_Parameters_info; define pcodeop cpuid_MONITOR_MWAIT_Features_info; define pcodeop cpuid_Thermal_Power_Management_info; define pcodeop cpuid_Extended_Feature_Enumeration_info; define pcodeop cpuid_Direct_Cache_Access_info; define pcodeop cpuid_Architectural_Performance_Monitoring_info; define pcodeop cpuid_Extended_Topology_info; define pcodeop cpuid_Processor_Extended_States_info; define pcodeop cpuid_Quality_of_Service_info; define pcodeop cpuid_brand_part1_info; define pcodeop cpuid_brand_part2_info; define pcodeop cpuid_brand_part3_info; # CPUID is very difficult to implement correctly # The side-effects of the call will show up, but not the correct values :CPUID is vexMode=0 & byte=0xf; byte=0xa2 { tmpptr:$(SIZE) = 0; if (EAX == 0) goto ; if (EAX == 1) goto ; if (EAX == 2) goto ; if (EAX == 3) goto ; if (EAX == 0x4) goto ; if (EAX == 0x5) goto ; if (EAX == 0x6) goto ; if (EAX == 0x7) goto ; if (EAX == 0x9) goto ; if (EAX == 0xa) goto ; if (EAX == 0xb) goto ; if (EAX == 0xd) goto ; if (EAX == 0xf) goto ; if (EAX == 0x80000002) goto ; if (EAX == 0x80000003) goto ; if (EAX == 0x80000004) goto ; tmpptr = cpuid(EAX); goto ; tmpptr = cpuid_basic_info(EAX); goto ; tmpptr = cpuid_Version_info(EAX); goto ; tmpptr = cpuid_cache_tlb_info(EAX); goto ; tmpptr = cpuid_serial_info(EAX); goto ; tmpptr = cpuid_Deterministic_Cache_Parameters_info(EAX); goto ; tmpptr = cpuid_MONITOR_MWAIT_Features_info(EAX); goto ; tmpptr = cpuid_Thermal_Power_Management_info(EAX); goto ; tmpptr = cpuid_Extended_Feature_Enumeration_info(EAX); goto ; tmpptr = cpuid_Direct_Cache_Access_info(EAX); goto ; tmpptr = cpuid_Architectural_Performance_Monitoring_info(EAX); goto ; tmpptr = cpuid_Extended_Topology_info(EAX); goto ; tmpptr = cpuid_Processor_Extended_States_info(EAX); goto ; tmpptr = cpuid_Quality_of_Service_info(EAX); goto ; tmpptr = cpuid_brand_part1_info(EAX); goto ; tmpptr = cpuid_brand_part2_info(EAX); goto ; tmpptr = cpuid_brand_part3_info(EAX); goto ; @ifdef IA64 RAX = zext(*:4 (tmpptr)); RBX = zext(*:4 (tmpptr + 4)); RDX = zext(*:4 (tmpptr + 8)); RCX = zext(*:4 (tmpptr + 12)); @else EAX = *tmpptr; EBX = *(tmpptr + 4); EDX = *(tmpptr + 8); ECX = *(tmpptr + 12); @endif } :DAA is vexMode=0 & bit64=0 & byte=0x27 { local car = ((AL & 0xf) > 9) | AF; AL = AL + 6 * car; CF = CF | car * carry(AL,6); AF = car; car = ((AL & 0xf0) > 0x90) | CF; AL = AL + 0x60 * car; CF = car; } :DAS is vexMode=0 & bit64=0 & byte=0x2f { local car = ((AL & 0xf) > 9) | AF; AL = AL - 6 * car; CF = CF | car * (AL < 6); AF = car; car = (AL > 0x9f) | CF; AL = AL - 0x60 * car; CF = car; } # See 'lockable.sinc' for memory destination, lockable variants :DEC Rmr8 is vexMode=0 & byte=0xfe; mod=3 & Rmr8 & reg_opcode=1 { OF = sborrow(Rmr8,1); Rmr8 = Rmr8 - 1; resultflags( Rmr8); } :DEC Rmr16 is vexMode=0 & opsize=0 & byte=0xff; mod=3 & Rmr16 & reg_opcode=1 { OF = sborrow(Rmr16,1); Rmr16 = Rmr16 - 1; resultflags(Rmr16); } :DEC Rmr32 is vexMode=0 & opsize=1 & byte=0xff; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=1 { OF = sborrow(Rmr32,1); Rmr32 = Rmr32 - 1; build check_rm32_dest; resultflags(Rmr32); } @ifdef IA64 :DEC Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xff; mod=3 & Rmr64 & reg_opcode=1 { OF = sborrow(Rmr64,1); Rmr64 = Rmr64 - 1; resultflags(Rmr64); } @endif :DEC Rmr16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & row=4 & page=1 & Rmr16 { OF = sborrow(Rmr16,1); Rmr16 = Rmr16 - 1; resultflags( Rmr16); } :DEC Rmr32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & row=4 & page=1 & Rmr32 & check_Rmr32_dest { OF = sborrow(Rmr32,1); Rmr32 = Rmr32 - 1; build check_Rmr32_dest; resultflags( Rmr32); } :DIV rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=6 ... { rm8ext:2 = zext(rm8); local quotient = AX / rm8ext; # DE exception if quotient doesn't fit in AL local rem = AX % rm8ext; AL = quotient:1; AH = rem:1; } :DIV rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=6 ... { rm16ext:4 = zext(rm16); tmp:4 = (zext(DX) << 16) | zext(AX); # DE exception if quotient doesn't fit in AX local quotient = tmp / rm16ext; AX = quotient:2; local rem = tmp % rm16ext; DX = rem:2; } :DIV rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_EDX_dest ... & check_EAX_dest ... & reg_opcode=6 ... { rm32ext:8 = zext(rm32); tmp:8 = (zext(EDX) << 32) | zext(EAX); # DE exception if quotient doesn't fit in EAX local quotient = tmp / rm32ext; EAX = quotient:4; build check_EAX_dest; local rem = tmp % rm32ext; EDX = rem:4; build check_EDX_dest; } @ifdef IA64 :DIV rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=6 ... { rm64ext:16 = zext(rm64); tmp:16 = (zext(RDX) << 64) | zext(RAX); # DE exception if quotient doesn't fit in RAX local quotient = tmp / rm64ext; RAX = quotient:8; local rem = tmp % rm64ext; RDX = rem:8; } @endif enterFrames: low5 is low5 { tmp:1 = low5; export tmp; } :ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xc8; imm16; enterFrames & low5=0x00 { push44(EBP); EBP = ESP; ESP = ESP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xc8; imm16; enterFrames & low5=0x01 { push44(EBP); frameTemp:4 = ESP; push44(frameTemp); EBP = frameTemp; ESP = ESP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xc8; imm16; enterFrames { push44(EBP); frameTemp:4 = ESP; @ifdef IA64 ESPt:$(SIZE) = zext(ESP); EBPt:$(SIZE) = zext(EBP); @else ESPt:$(SIZE) = ESP; EBPt:$(SIZE) = EBP; @endif ii:1 = enterFrames - 1; EBPt = EBPt - 4; ESPt = ESPt - 4; *:4 ESPt = *:4 EBPt; ii = ii - 1; if (ii s> 0) goto ; tmp_offset:4 = 4 * zext(enterFrames - 1); ESP = ESP - tmp_offset; EBP = EBP - tmp_offset; push44(frameTemp); EBP = frameTemp; ESP = ESP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xc8; imm16; enterFrames & low5=0x00 { push42(BP); BP = SP; SP = SP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xc8; imm16; enterFrames & low5=0x01 { push42(BP); frameTemp:2 = SP; push42(frameTemp); BP = frameTemp; SP = SP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xc8; imm16; enterFrames { push42(BP); frameTemp:2 = SP; @ifdef IA64 ESPt:$(SIZE) = zext(ESP); EBPt:$(SIZE) = zext(EBP); @else ESPt:$(SIZE) = ESP; EBPt:$(SIZE) = EBP; @endif ii:1 = enterFrames - 1; EBPt = EBPt - 2; ESPt = ESPt - 2; *:2 ESPt = *:2 EBPt; ii = ii - 1; if (ii s> 0) goto ; tmp_offset:4 = 2 * zext(enterFrames - 1); ESP = ESP - tmp_offset; EBP = EBP - tmp_offset; push42(frameTemp); BP = frameTemp; SP = SP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xc8; imm16; enterFrames & low5=0x00 { push22(BP); BP = SP; SP = SP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xc8; imm16; enterFrames & low5=0x01 { push22(BP); frameTemp:2 = SP; push22(frameTemp); BP = frameTemp; SP = SP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & seg16 & addrsize=0 & opsize=1 & byte=0xc8; imm16; enterFrames { push24(zext(BP)); frameTemp:2 = SP; SPt:2 = SP; BPt:2 = BP; ii:1 = enterFrames - 1; BPt = BPt - 4; tmp2:$(SIZE) = segment(seg16,BPt); SPt = SPt - 4; tmp:$(SIZE) = segment(SS,SPt); *:4 tmp = *:4 tmp2; ii = ii - 1; if (ii s> 0) goto ; tmp_offset:2 = 4 * zext(enterFrames - 1); SP = SP - tmp_offset; BP = BP - tmp_offset; push24(zext(frameTemp)); BP = frameTemp; SP = SP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_OFF) & vexMode=0 & seg16 & addrsize=0 & opsize=0 & byte=0xc8; imm16; enterFrames { push22(BP); frameTemp:2 = SP; SPt:2 = SP; BPt:2 = BP; ii:1 = enterFrames - 1; BPt = BPt - 2; tmp2:$(SIZE) = segment(seg16,BPt); SPt = SPt - 2; tmp:$(SIZE) = segment(SS,SPt); *:2 tmp = *:2 tmp2; ii = ii - 1; if (ii s> 0) goto ; tmp_offset:2 = 2 * zext(enterFrames - 1); SP = SP - tmp_offset; BP = BP - tmp_offset; push22(frameTemp); BP = frameTemp; SP = SP - imm16; } @ifdef IA64 :ENTER imm16,enterFrames is $(LONGMODE_ON) & vexMode=0 & byte=0xc8; imm16; enterFrames & low5=0x00 { push88(RBP); RBP = RSP; RSP = RSP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_ON) & vexMode=0 & byte=0xc8; imm16; enterFrames & low5=0x01 { push88(RBP); frameTemp:8 = RSP; push88(frameTemp); RBP = frameTemp; RSP = RSP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_ON) & vexMode=0 & byte=0xc8; imm16; enterFrames { push88(RBP); frameTemp:8 = RSP; RSPt:$(SIZE) = RSP; RBPt:$(SIZE) = RBP; ii:1 = enterFrames - 1; RBPt = RBPt - 8; RSPt = RSPt - 8; *:8 RSPt = *:8 RBPt; ii = ii - 1; if (ii s> 0) goto ; tmp_offset:8 = 8 * zext(enterFrames - 1); RSP = RSP - tmp_offset; RBP = RBP - tmp_offset; push88(frameTemp); RBP = frameTemp; RSP = RSP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xc8; imm16; enterFrames & low5=0x00 { push82(BP); RBP = RSP; RSP = RSP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xc8; imm16; enterFrames & low5=0x01 { push82(BP); frameTemp:2 = SP; push82(frameTemp); BP = frameTemp; RSP = RSP - imm16; } :ENTER imm16,enterFrames is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xc8; imm16; enterFrames { push82(BP); frameTemp:2 = SP; RSPt:$(SIZE) = RSP; RBPt:$(SIZE) = RBP; ii:1 = enterFrames - 1; RBPt = RBPt - 2; RSPt = RSPt - 2; *:2 RSPt = *:2 RBPt; ii = ii - 1; if (ii s> 0) goto ; tmp_offset:8 = 2 * zext(enterFrames - 1); RSP = RSP - tmp_offset; RBP = RBP - tmp_offset; push82(frameTemp); BP = frameTemp; RSP = RSP - imm16; } @endif # Informs the 80287 coprocessor of the switch to protected mode, treated as NOP for 80387 and later. # We used to have a pseudo-op, but as this is a legacy instruction which is now explicitly treated # as a NOP. We treat it as a NOP as well. :FSETPM is vexMode=0 & byte=0xdb; byte=0xe4 { } # 80287 set protected mode :HLT is vexMode=0 & byte=0xf4 { goto inst_start; } :IDIV rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=7 ... { rm8ext:2 = sext(rm8); local quotient = AX s/ rm8ext; # DE exception if quotient doesn't fit in AL local rem = AX s% rm8ext; AL = quotient:1; AH = rem:1; } :IDIV rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=7 ... { rm16ext:4 = sext(rm16); tmp:4 = (zext(DX) << 16) | zext(AX); # DE exception if quotient doesn't fit in AX local quotient = tmp s/ rm16ext; AX = quotient:2; local rem = tmp s% rm16ext; DX = rem:2; } :IDIV rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_EAX_dest ... & check_EDX_dest ... & reg_opcode=7 ... { rm32ext:8 = sext(rm32); tmp:8 = (zext(EDX) << 32) | zext(EAX); # DE exception if quotient doesn't fit in EAX local quotient = tmp s/ rm32ext; EAX = quotient:4; build check_EAX_dest; local rem = tmp s% rm32ext; EDX = rem:4; build check_EDX_dest; } @ifdef IA64 :IDIV rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=7 ... { rm64ext:16 = sext(rm64); tmp:16 = (zext(RDX) << 64) | zext(RAX); # DE exception if quotient doesn't fit in RAX local quotient = tmp s/ rm64ext; RAX = quotient:8; local rem = tmp s% rm64ext; RDX = rem:8; } @endif :IMUL rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=5 ... { AX = sext(AL) * sext(rm8); imultflags(AL,AX); } :IMUL rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=5 ... { tmp:4 = sext(AX) * sext(rm16); DX = tmp(2); AX = tmp(0); imultflags(AX,tmp); } :IMUL rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_EAX_dest ... & check_EDX_dest ... & reg_opcode=5 ... { tmp:8 = sext(EAX) * sext(rm32); EDX = tmp(4); build check_EDX_dest; EAX = tmp(0); build check_EAX_dest; imultflags(EAX,tmp); } @ifdef IA64 # We do a second multiply so emulator(s) that only have precision up to 64 bits will still get lower 64 bits correct :IMUL rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=5 ... { tmp:16 = sext(RAX) * sext(rm64); RAX = RAX * rm64; RDX = tmp(8); imultflags(RAX,tmp); } @endif :IMUL Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xaf; rm16 & Reg16 ... { tmp:4 = sext(Reg16) * sext(rm16); Reg16 = tmp(0); high:2 = tmp(2); imultflags(Reg16,tmp);} :IMUL Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0xaf; rm32 & Reg32 ... & check_Reg32_dest ... { tmp:8 = sext(Reg32) * sext(rm32); Reg32 = tmp(0); high:4 = tmp(4); imultflags(Reg32,tmp); build check_Reg32_dest; } @ifdef IA64 # We do a second multiply so emulator(s) that only have precision up to 64 bits will still get lower 64 bits correct :IMUL Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xaf; rm64 & Reg64 ... { tmp:16 = sext(Reg64) * sext(rm64); Reg64 = Reg64 * rm64; high:8 = tmp(8); imultflags(Reg64,tmp);} @endif :IMUL Reg16,rm16,simm8_16 is vexMode=0 & opsize=0 & byte=0x6b; (rm16 & Reg16 ...) ; simm8_16 { tmp:4 = sext(rm16) * sext(simm8_16); Reg16 = tmp(0); high:2 = tmp(2); imultflags(Reg16,tmp);} :IMUL Reg32,rm32,simm8_32 is vexMode=0 & opsize=1 & byte=0x6b; (rm32 & Reg32 ... & check_Reg32_dest ... ) ; simm8_32 { tmp:8 = sext(rm32) * sext(simm8_32); Reg32 = tmp(0); high:4 = tmp(4); imultflags(Reg32,tmp); build check_Reg32_dest; } @ifdef IA64 # We do a second multiply so emulator(s) that only have precision up to 64 bits will still get lower 64 bits correct :IMUL Reg64,rm64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x6b; (rm64 & Reg64 ...) ; simm8_64 { tmp:16 = sext(rm64) * sext(simm8_64); Reg64 = rm64 * simm8_64; high:8 = tmp(8); imultflags(Reg64,tmp);} @endif :IMUL Reg16,rm16,simm16_16 is vexMode=0 & opsize=0 & byte=0x69; (rm16 & Reg16 ...) ; simm16_16 { tmp:4 = sext(rm16) * sext(simm16_16); Reg16 = tmp(0); high:2 = tmp(2); imultflags(Reg16,tmp);} :IMUL Reg32,rm32,simm32_32 is vexMode=0 & opsize=1 & byte=0x69; (rm32 & Reg32 ... & check_Reg32_dest ...) ; simm32_32 { tmp:8 = sext(rm32) * sext(simm32_32); Reg32 = tmp(0); high:4 = tmp(4); imultflags(Reg32,tmp); build check_Reg32_dest; } @ifdef IA64 :IMUL Reg64,rm64,simm32_32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x69; (rm64 & Reg64 ...) ; simm32_32 { tmp:16 = sext(rm64) * sext(simm32_32); Reg64 = rm64 * sext(simm32_32); high:8 = tmp(8); imultflags(Reg64,tmp);} @endif # these appear in intelman2.pdf, but do they really exist? #:IMUL Reg16,simm8_16 is vexMode=0 & opsize=0 & byte=0x6b; Reg16; simm8_16 #:IMUL Reg32,simm8_32 is vexMode=0 & opsize=1 & byte=0x6b; Reg32; simm8_32 #:IMUL Reg16,simm16 is vexMode=0 & opsize=0 & byte=0x69; Reg16; simm16 #:IMUL Reg32,simm32 is vexMode=0 & opsize=1 & byte=0x69; Reg32; simm32 :IN AL, imm8 is vexMode=0 & AL & (byte=0xe4; imm8) { tmp:1 = imm8; AL = in(tmp); } :IN AX, imm8 is vexMode=0 & opsize=0 & AX & (byte=0xe5; imm8) { tmp:1 = imm8; AX = in(tmp); } :IN EAX, imm8 is vexMode=0 & opsize=1 & EAX & check_EAX_dest & (byte=0xe5; imm8) { tmp:1 = imm8; EAX = in(tmp); build check_EAX_dest; } @ifdef IA64 :IN RAX, imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & RAX & (byte=0xe5; imm8) { tmp:1 = imm8; RAX = in(tmp); } @endif :IN AL, DX is vexMode=0 & AL & DX & (byte=0xec) { AL = in(DX); } :IN AX, DX is vexMode=0 & opsize=0 & AX & DX & (byte=0xed) { AX = in(DX); } :IN EAX, DX is vexMode=0 & opsize=1 & EAX & check_EAX_dest & DX & (byte=0xed) { EAX = in(DX); build check_EAX_dest; } @ifdef IA64 :IN RAX, DX is $(LONGMODE_ON) & vexMode=0 & opsize=2 & RAX & DX & (byte=0xed) { RAX = in(DX); } @endif # See 'lockable.sinc' for memory destination, lockable variants :INC Rmr8 is vexMode=0 & byte=0xfe; mod=3 & Rmr8 & reg_opcode=0 { OF = scarry(Rmr8,1); Rmr8 = Rmr8 + 1; resultflags( Rmr8); } :INC Rmr16 is vexMode=0 & opsize=0 & byte=0xff; mod=3 & Rmr16 & reg_opcode=0 { OF = scarry(Rmr16,1); Rmr16 = Rmr16 + 1; resultflags(Rmr16); } :INC Rmr32 is vexMode=0 & opsize=1 & byte=0xff; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=0 { OF = scarry(Rmr32,1); Rmr32 = Rmr32 + 1; build check_Rmr32_dest; resultflags(Rmr32); } @ifdef IA64 :INC Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xff; mod=3 & Rmr64 & reg_opcode=0 { OF = scarry(Rmr64,1); Rmr64 = Rmr64 + 1; resultflags(Rmr64); } @endif :INC Rmr16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & row = 4 & page = 0 & Rmr16 { OF = scarry(Rmr16,1); Rmr16 = Rmr16 + 1; resultflags( Rmr16); } :INC Rmr32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & row = 4 & page = 0 & Rmr32 { OF = scarry(Rmr32,1); Rmr32 = Rmr32 + 1; resultflags( Rmr32); } :INSB^rep^reptail eseDI1,DX is vexMode=0 & rep & reptail & byte=0x6c & eseDI1 & DX { build rep; build eseDI1; eseDI1 = in(DX); build reptail; } :INSW^rep^reptail eseDI2,DX is vexMode=0 & rep & reptail & opsize=0 & byte=0x6d & eseDI2 & DX { build rep; build eseDI2; eseDI2 = in(DX); build reptail; } :INSD^rep^reptail eseDI4,DX is vexMode=0 & rep & reptail & opsize=1 & byte=0x6d & eseDI4 & DX { build rep; build eseDI4; eseDI4 = in(DX); build reptail; } :INSD^rep^reptail eseDI4,DX is vexMode=0 & rep & reptail & opsize=2 & byte=0x6d & eseDI4 & DX { build rep; build eseDI4; eseDI4 = in(DX); build reptail; } :INT1 is vexMode=0 & byte=0xf1 { tmp:1 = 0x1; intloc:$(SIZE) = swi(tmp); call [intloc]; return [0:1]; } :INT3 is vexMode=0 & byte=0xcc { tmp:1 = 0x3; intloc:$(SIZE) = swi(tmp); call [intloc]; return [0:1]; } :INT imm8 is vexMode=0 & byte=0xcd; imm8 { tmp:1 = imm8; intloc:$(SIZE) = swi(tmp); call [intloc]; } :INTO is vexMode=0 & byte=0xce & bit64=0 { tmp:1 = 0x4; intloc:$(SIZE) = swi(tmp); if (OF != 1) goto ; call [intloc]; } :INVD is vexMode=0 & byte=0xf; byte=0x8 {} :INVLPG Mem is vexMode=0 & byte=0xf; byte=0x1; ( reg_opcode=7 ) ... & Mem { invlpg(Mem); } :INVLPGA is vexMode=0 & addrsize=0 & byte=0xf; byte=0x1; byte=0xDF { invlpga(AX,ECX); } :INVLPGA is vexMode=0 & addrsize=1 & byte=0xf; byte=0x1; byte=0xDF { invlpga(EAX,ECX); } @ifdef IA64 :INVLPGA is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xf; byte=0x1; byte=0xDF { invlpga(RAX,ECX); } @endif :INVPCID r32, m128 is vexMode=0 & addrsize=1 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x82; r32 ... & m128 { invpcid(r32, m128); } @ifdef IA64 :INVPCID r64, m128 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x82; r64 ... & m128 { invpcid(r64, m128); } @endif :IRET is vexMode=0 & addrsize=0 & opsize=0 & byte=0xcf { pop22(IP); EIP=zext(IP); pop22(CS); pop22(flags); return [EIP]; } :IRET is vexMode=0 & addrsize=1 & opsize=0 & byte=0xcf { pop42(IP); EIP=zext(IP); pop42(CS); pop42(flags); return [EIP]; } @ifdef IA64 :IRET is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=0 & byte=0xcf { pop82(IP); RIP=zext(IP); pop82(CS); pop82(flags); return [RIP]; } @endif :IRETD is vexMode=0 & addrsize=0 & opsize=1 & byte=0xcf { pop24(EIP); tmp:4=0; pop24(tmp); CS=tmp(0); pop24(tmp); flags=tmp(0); return [EIP]; } :IRETD is vexMode=0 & addrsize=1 & opsize=1 & byte=0xcf { pop44(EIP); tmp:4=0; pop44(tmp); CS=tmp(0); pop44(eflags); return [EIP]; } @ifdef IA64 :IRETD is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=1 & byte=0xcf { pop84(RIP); tmp:8=0; pop84(tmp); CS=tmp(0); pop84(eflags); return [RIP]; } :IRETQ is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=2 & byte=0xcf { pop88(RIP); tmp:8=0; pop88(tmp); CS=tmp(0); pop88(rflags); return [RIP]; } @endif :J^cc rel8 is vexMode=0 & row=7 & cc; rel8 { if (cc) goto rel8; } :J^cc rel16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xf; row=8 & cc; rel16 { if (cc) goto rel16; } :J^cc rel32 is vexMode=0 & opsize=1 & byte=0xf; row=8 & cc; rel32 { if (cc) goto rel32; } :J^cc rel32 is vexMode=0 & opsize=2 & byte=0xf; row=8 & cc; rel32 { if (cc) goto rel32; } # The following is vexMode=0 & picked up by the line above. rel32 works for both 32 and 64 bit #@ifdef IA64 #:J^cc rel32 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xf; row=8 & cc; rel32 { if (cc) goto rel32; } #@endif :JCXZ rel8 is vexMode=0 & addrsize=0 & byte=0xe3; rel8 { if (CX==0) goto rel8; } :JECXZ rel8 is vexMode=0 & addrsize=1 & byte=0xe3; rel8 { if (ECX==0) goto rel8; } @ifdef IA64 :JRCXZ rel8 is $(LONGMODE_ON) & addrsize=2 & vexMode=0 & byte=0xe3; rel8 { if (RCX==0) goto rel8; } @endif :JMP rel8 is vexMode=0 & byte=0xeb; rel8 { goto rel8; } :JMP rel16 is vexMode=0 & opsize=0 & byte=0xe9; rel16 { goto rel16; } :JMP rel32 is vexMode=0 & opsize=1 & byte=0xe9; rel32 { goto rel32; } :JMP rel32 is vexMode=0 & opsize=2 & byte=0xe9; rel32 { goto rel32; } :JMP rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xff & currentCS; rm16 & reg_opcode=4 ... { target:4 = segment(currentCS,rm16); goto [target]; } :JMP rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xff; rm16 & reg_opcode=4 ... { goto [rm16]; } :JMP rm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xff; rm32 & reg_opcode=4 ... { goto [rm32]; } @ifdef IA64 :JMP rm16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xff & currentCS; rm16 & reg_opcode=4 ... { goto [rm16]; } :JMP rm64 is $(LONGMODE_ON) & vexMode=0 & byte=0xff; rm64 & reg_opcode=4 ... { goto [rm64]; } @endif :JMPF ptr1616 is vexMode=0 & opsize=0 & byte=0xea; ptr1616 { goto ptr1616; } :JMPF ptr1632 is vexMode=0 & opsize=1 & byte=0xea; ptr1632 { goto ptr1632; } :JMPF Mem is vexMode=0 & opsize=0 & byte=0xff; Mem & reg_opcode=5 ... { target:$(SIZE) = zext(*:2 Mem); goto [target]; } :JMPF Mem is vexMode=0 & opsize=1 & byte=0xff; Mem & reg_opcode=5 ... { @ifdef IA64 target:$(SIZE) = zext(*:4 Mem); @else target:$(SIZE) = *:4 Mem; @endif goto [target]; } @ifdef IA64 :JMPF Mem is vexMode=0 & opsize=2 & byte=0xff; Mem & reg_opcode=5 ... { target:$(SIZE) = *:8 Mem; goto [target]; } @endif # Initially disallowed in 64bit mode, but later reintroduced :LAHF is vexMode=0 & byte=0x9f { AH=(SF<<7)|(ZF<<6)|(AF<<4)|(PF<<2)|2|CF; } :LAR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x2; rm16 & Reg16 ... { Reg16 = rm16 & 0xff00; ZF=1; } :LAR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x2; rm32 & Reg32 ... & check_Reg32_dest ... { Reg32 = rm32 & 0xffff00; build check_Reg32_dest; ZF=1; } @ifdef IA64 :LAR Reg64,rm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0x2; rm32 & Reg64 ... { Reg64 = zext( rm32 & 0xffff00 ); ZF=1; } @endif :LDMXCSR m32 is vexMode=0 & byte=0xf; byte=0xae; ( mod != 0b11 & reg_opcode=2 ) ... & m32 { MXCSR = m32; } # 16 & 32-bit only :LDS Reg16,Mem is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xC5; Mem & Reg16 ... { tmp:4 = *Mem; DS = tmp(2); Reg16 = tmp(0); } :LDS Reg32,Mem is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0xC5 & bit64=0; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; DS = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; } :LSS Reg16,Mem is vexMode=0 & opsize=0 & byte=0x0F; byte=0xB2; Mem & Reg16 ... { tmp:4 = *Mem; SS = tmp(2); Reg16 = tmp(0); } :LSS Reg32,Mem is vexMode=0 & opsize=1 & byte=0x0F; byte=0xB2; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; SS = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; } @ifdef IA64 :LSS Reg64,Mem is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x0F; byte=0xB2; Mem & Reg64 ... { tmp:10 = *Mem; SS = tmp(8); Reg64 = tmp(0); } @endif # 16 & 32-bit only :LES Reg16,Mem is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xC4; Mem & Reg16 ... { tmp:4 = *Mem; ES = tmp(2); Reg16 = tmp(0); } :LES Reg32,Mem is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0xC4 & bit64=0; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; ES = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; } :LFS Reg16,Mem is vexMode=0 & opsize=0 & byte=0x0F; byte=0xB4; Mem & Reg16 ... { tmp:4 = *Mem; FS = tmp(2); Reg16 = tmp(0); } :LFS Reg32,Mem is vexMode=0 & opsize=1 & byte=0x0F; byte=0xB4; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; FS = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; } @ifdef IA64 :LFS Reg64,Mem is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x0F; byte=0xB4; Mem & Reg64 ... { tmp:10 = *Mem; FS = tmp(8); Reg64 = tmp(0); } @endif :LGS Reg16,Mem is vexMode=0 & opsize=0 & byte=0x0F; byte=0xB5; Mem & Reg16 ... { tmp:4 = *Mem; GS = tmp(2); Reg16 = tmp(0); } :LGS Reg32,Mem is vexMode=0 & opsize=1 & byte=0x0F; byte=0xB5; Mem & Reg32 ... & check_Reg32_dest ... { tmp:6 = *Mem; GS = tmp(4); Reg32 = tmp(0); build check_Reg32_dest; } @ifdef IA64 :LGS Reg64,Mem is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x0F; byte=0xB5; Mem & Reg64 ... { tmp:10 = *Mem; GS = tmp(8); Reg64 = tmp(0); } @endif #in 64-bit mode address size of 16 is not encodable :LEA Reg16,addr16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & addrsize=0 & byte=0x8D; addr16 & Reg16 ... { Reg16 = addr16; } :LEA Reg32,addr16 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & addrsize=0 & byte=0x8D; addr16 & Reg32 ... { Reg32 = zext(addr16); } :LEA Reg16,addr32 is vexMode=0 & opsize=0 & addrsize=1 & byte=0x8D; addr32 & Reg16 ... { Reg16 = addr32(0); } :LEA Reg32,addr32 is vexMode=0 & opsize=1 & addrsize=1 & byte=0x8D; addr32 & Reg32 ... & check_Reg32_dest ... { Reg32 = addr32; build check_Reg32_dest; } @ifdef IA64 :LEA Reg16,addr64 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & addrsize=2 & byte=0x8D; addr64 & Reg16 ... { Reg16 = addr64(0); } :LEA Reg32,addr64 is $(LONGMODE_ON) & vexMode=0 & opsize=1 & addrsize=2 & byte=0x8D; addr64 & Reg32 ... & check_Reg32_dest ... { Reg32 = addr64(0); build check_Reg32_dest; } :LEA Reg64,addr32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & addrsize=1 & byte=0x8D; addr32 & Reg64 ... { Reg64 = zext(addr32); } :LEA Reg64,addr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & addrsize=2 & byte=0x8D; addr64 & Reg64 ... { Reg64 = addr64; } @endif :LEAVE is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xc9 { SP = BP; pop22(BP); } :LEAVE is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0xc9 { ESP = EBP; pop24(EBP); } :LEAVE is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xc9 { ESP = EBP; pop44(EBP); } :LEAVE is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xc9 { ESP = EBP; pop42(BP); } @ifdef IA64 :LEAVE is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xc9 { RSP = RBP; pop82(BP); } :LEAVE is $(LONGMODE_ON) & vexMode=0 & byte=0xc9 { RSP = RBP; pop88(RBP); } @endif define pcodeop GlobalDescriptorTableRegister; :LGDT m16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=2 ) ... & m16 { GlobalDescriptorTableRegister(m16); } :LGDT m32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=2 ) ... & m32 { GlobalDescriptorTableRegister(m32); } @ifdef IA64 :LGDT m64 is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=2 ) ... & m64 { GlobalDescriptorTableRegister(m64); } @endif define pcodeop InterruptDescriptorTableRegister; :LIDT m16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=3 ) ... & m16 { InterruptDescriptorTableRegister(m16); } :LIDT m32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=3 ) ... & m32 { InterruptDescriptorTableRegister(m32); } @ifdef IA64 :LIDT m64 is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=3 ) ... & m64 { InterruptDescriptorTableRegister(m64); } @endif define pcodeop LocalDescriptorTableRegister; :LLDT rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=2 ... { LocalDescriptorTableRegister(rm16); } @ifdef IA64 :LMSW rm16 is vexMode=0 & byte=0xf; byte=0x01; rm16 & reg_opcode=6 ... { CR0 = (CR0 & 0xFFFFFFFFFFFFFFF0) | zext(rm16 & 0x000F); } @else :LMSW rm16 is vexMode=0 & byte=0xf; byte=0x01; rm16 & reg_opcode=6 ... { CR0 = (CR0 & 0xFFFFFFF0) | zext(rm16 & 0x000F); } @endif :LODSB^rep^reptail dseSI1 is vexMode=0 & rep & reptail & byte=0xAC & dseSI1 { build rep; build dseSI1; AL=dseSI1; build reptail; } :LODSW^rep^reptail dseSI2 is vexMode=0 & rep & reptail & opsize=0 & byte=0xAD & dseSI2 { build rep; build dseSI2; AX=dseSI2; build reptail; } :LODSD^rep^reptail dseSI4 is vexMode=0 & rep & reptail & opsize=1 & byte=0xAD & dseSI4 { build rep; build dseSI4; EAX=dseSI4; build reptail; } @ifdef IA64 :LODSQ^rep^reptail dseSI8 is $(LONGMODE_ON) & vexMode=0 & rep & reptail & opsize=2 & byte=0xAD & dseSI8 { build rep; build dseSI8; RAX=dseSI8; build reptail; } @endif :LOOP rel8 is vexMode=0 & addrsize=0 & byte=0xE2; rel8 { CX = CX -1; if (CX!=0) goto rel8; } :LOOP rel8 is vexMode=0 & addrsize=1 & byte=0xE2; rel8 { ECX = ECX -1; if (ECX!=0) goto rel8; } @ifdef IA64 :LOOP rel8 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xE2; rel8 { RCX = RCX -1; if (RCX!=0) goto rel8; } @endif :LOOPZ rel8 is vexMode=0 & addrsize=0 & byte=0xE1; rel8 { CX = CX -1; if (CX!=0 && ZF!=0) goto rel8; } :LOOPZ rel8 is vexMode=0 & addrsize=1 & byte=0xE1; rel8 { ECX = ECX -1; if (ECX!=0 && ZF!=0) goto rel8; } @ifdef IA64 :LOOPZ rel8 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xE1; rel8 { RCX = RCX -1; if (RCX!=0 && ZF!=0) goto rel8; } @endif :LOOPNZ rel8 is vexMode=0 & addrsize=0 & byte=0xE0; rel8 { CX = CX -1; if (CX!=0 && ZF==0) goto rel8; } :LOOPNZ rel8 is vexMode=0 & addrsize=1 & byte=0xE0; rel8 { ECX = ECX -1; if (ECX!=0 && ZF==0) goto rel8; } @ifdef IA64 :LOOPNZ rel8 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xE0; rel8 { RCX = RCX -1; if (RCX!=0 && ZF==0) goto rel8; } @endif define pcodeop SegmentLimit; :LSL Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x3; rm16 & Reg16 ... { tmp:3 = SegmentLimit(rm16); Reg16 = tmp:2; ZF = tmp[16,1]; } :LSL Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x3; rm32 & Reg32 ... { tmp:3 = SegmentLimit(rm32); Reg32 = zext(tmp:2); ZF = tmp[16,1]; } @ifdef IA64 :LSL Reg64,rm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0x3; rm32 & Reg64 ... { tmp:3 = SegmentLimit(rm32); Reg64 = zext(tmp:2); ZF = tmp[16,1]; } @endif define pcodeop TaskRegister; :LTR rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=3 ... { TaskRegister(rm16); } :MOV Rmr8,Reg8 is vexMode=0 & byte=0x88; mod=3 & Rmr8 & Reg8 { Rmr8=Reg8; } :MOV^xrelease m8,Reg8 is vexMode=0 & xrelease & byte=0x88; m8 & Reg8 ... { build xrelease; build m8; m8=Reg8; } :MOV Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x89; mod=3 & Rmr16 & Reg16 { Rmr16=Reg16; } :MOV^xrelease m16,Reg16 is vexMode=0 & xrelease & opsize=0 & byte=0x89; m16 & Reg16 ... { build xrelease; build m16; m16=Reg16; } :MOV Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x89; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { Rmr32=Reg32; build check_Rmr32_dest; } :MOV^xrelease m32,Reg32 is vexMode=0 & xrelease & opsize=1 & byte=0x89; m32 & Reg32 ... { build xrelease; build m32; m32=Reg32; } @ifdef IA64 :MOV Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x89; mod=3 & Rmr64 & Reg64 { Rmr64=Reg64; } :MOV^xrelease m64,Reg64 is $(LONGMODE_ON) & vexMode=0 & xrelease & opsize=2 & byte=0x89; m64 & Reg64 ... { build xrelease; build m64; m64=Reg64; } @endif :MOV Reg8,rm8 is vexMode=0 & byte=0x8a; rm8 & Reg8 ... { Reg8 = rm8; } :MOV Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x8b; rm16 & Reg16 ... { Reg16 = rm16; } :MOV Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x8b; rm32 & Reg32 ... & check_Reg32_dest ... { Reg32 = rm32; build check_Reg32_dest; } @ifdef IA64 :MOV Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x8b; rm64 & Reg64 ... { Reg64 = rm64; } @endif :MOV rm16,Sreg is vexMode=0 & byte=0x8c; rm16 & Sreg ... { rm16 = Sreg; } :MOV Sreg,rm16 is vexMode=0 & byte=0x8e; rm16 & Sreg ... { Sreg=rm16; } :MOV AL,moffs8 is vexMode=0 & byte=0xa0; AL & moffs8 { AL=moffs8; } :MOV AX,moffs16 is vexMode=0 & opsize=0 & byte=0xa1; AX & moffs16 { AX=moffs16; } :MOV EAX,moffs32 is vexMode=0 & opsize=1 & byte=0xa1; EAX & check_EAX_dest & moffs32 { EAX=moffs32; build check_EAX_dest; } @ifdef IA64 :MOV RAX,moffs64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xa1; RAX & moffs64 { RAX=moffs64; } @endif :MOV moffs8,AL is vexMode=0 & byte=0xa2; AL & moffs8 { moffs8=AL; } :MOV moffs16,AX is vexMode=0 & opsize=0 & byte=0xa3; AX & moffs16 { moffs16=AX; } :MOV moffs32,EAX is vexMode=0 & opsize=1 & byte=0xa3; EAX & moffs32 { moffs32=EAX; } @ifdef IA64 :MOV moffs64,RAX is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xa3; RAX & moffs64 { moffs64=RAX; } @endif :MOV CRmr8,imm8 is vexMode=0 & row=11 & page=0 & CRmr8; imm8 { CRmr8 = imm8; } :MOV CRmr16,imm16 is vexMode=0 & opsize=0 & row=11 & page=1 & CRmr16; imm16 { CRmr16 = imm16; } :MOV CRmr32,imm32 is vexMode=0 & opsize=1 & row=11 & page=1 & CRmr32; imm32 { CRmr32 = imm32; } @ifdef IA64 :MOV Rmr64,imm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & row=11 & page=1 & Rmr64; imm64 { Rmr64 = imm64; } @endif :MOV Rmr8,imm8 is vexMode=0 & byte=0xc6; (mod=3 & Rmr8 & reg_opcode=0); imm8 { Rmr8 = imm8; } :MOV^xrelease m8,imm8 is vexMode=0 & xrelease & byte=0xc6; m8 & reg_opcode=0 ...; imm8 { build xrelease; build m8; m8 = imm8; } :MOV Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0xc7; (mod=3 & Rmr16 & reg_opcode=0); imm16 { Rmr16 = imm16; } :MOV^xrelease m16,imm16 is vexMode=0 & xrelease & opsize=0 & byte=0xc7; m16 & reg_opcode=0 ...; imm16 { build xrelease; build m16; m16 = imm16; } :MOV Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0xc7; (mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=0); imm32 { Rmr32 = imm32; build check_Rmr32_dest; } :MOV^xrelease m32,imm32 is vexMode=0 & xrelease & opsize=1 & byte=0xc7; (m32 & reg_opcode=0 ...); imm32 { build xrelease; build m32; m32 = imm32; } @ifdef IA64 :MOV Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xc7; (mod=3 & Rmr64 & reg_opcode=0); simm32 { Rmr64 = simm32; } :MOV^xrelease m64,simm32 is $(LONGMODE_ON) & vexMode=0 & xrelease & opsize=2 & byte=0xc7; (m64 & reg_opcode=0 ...); simm32 { build xrelease; build m64; m64 = simm32; } @endif :MOV creg, Rmr32 is vexMode=0 & byte=0xf; byte=0x22; Rmr32 & creg { @ifdef IA64 creg=zext(Rmr32); @else creg=Rmr32; @endif } @ifdef IA64 :MOV creg, Rmr64 is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0x22; Rmr64 & creg { creg=Rmr64; } :MOV creg_x, Rmr64 is $(LONGMODE_ON) & vexMode=0 & rexRprefix=1 & byte=0xf; byte=0x22; Rmr64 & creg_x { creg_x=Rmr64; } @endif :MOV Rmr32, creg is $(LONGMODE_OFF) & vexMode=0 & byte=0xf; byte=0x20; Rmr32 & creg { @ifdef IA64 Rmr32 = creg:4; @else Rmr32 = creg; @endif } @ifdef IA64 :MOV Rmr64, creg is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0x20; Rmr64 & creg { Rmr64 = creg; } :MOV Rmr64, creg_x is $(LONGMODE_ON) & vexMode=0 & rexRprefix=1 & byte=0xf; byte=0x20; Rmr64 & creg_x { Rmr64 = creg_x; } @endif :MOV Rmr32, debugreg is $(LONGMODE_OFF) & vexMode=0 & byte=0xf; byte=0x21; Rmr32 & debugreg { @ifdef IA64 Rmr32 = debugreg:4; @else Rmr32 = debugreg; @endif } @ifdef IA64 :MOV Rmr64, debugreg is $(LONGMODE_ON) & vexMode=0 & bit64=1 & byte=0xf; byte=0x21; Rmr64 & debugreg { Rmr64 = debugreg; } :MOV Rmr64, debugreg_x is $(LONGMODE_ON) & vexMode=0 & bit64=1 & rexRprefix=1 & byte=0xf; byte=0x21; Rmr64 & debugreg_x { Rmr64 = debugreg_x; } @endif :MOV debugreg, Rmr32 is $(LONGMODE_OFF) & vexMode=0 & byte=0xf; byte=0x23; Rmr32 & debugreg { @ifdef IA64 debugreg = zext(Rmr32); @else debugreg = Rmr32; @endif } @ifdef IA64 :MOV debugreg, Rmr64 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & byte=0xf; byte=0x23; Rmr64 & debugreg & mod=3 { debugreg = Rmr64; } :MOV debugreg_x, Rmr64 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & rexRprefix=1 & byte=0xf; byte=0x23; Rmr64 & debugreg_x & mod=3 { debugreg_x = Rmr64; } @endif @ifndef IA64 # These are obsolete instructions after the 486 generation. :MOV r32, testreg is vexMode=0 & byte=0xf; byte=0x24; r32 & testreg & mod=3 { r32 = testreg; } :MOV testreg, r32 is vexMode=0 & byte=0xf; byte=0x26; r32 & testreg & mod=3 { testreg = r32; } @endif define pcodeop swap_bytes; :MOVBE Reg16, m16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x38; byte=0xf0; Reg16 ... & m16 { Reg16 = swap_bytes( m16 ); } :MOVBE Reg32, m32 is vexMode=0 & opsize=1 & mandover=0 & byte=0xf; byte=0x38; byte=0xf0; Reg32 ... & m32 { Reg32 = swap_bytes( m32 ); } :MOVBE m16, Reg16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x38; byte=0xf1; Reg16 ... & m16 { m16 = swap_bytes( Reg16 ); } :MOVBE m32, Reg32 is vexMode=0 & opsize=1 & mandover=0 & byte=0xf; byte=0x38; byte=0xf1; Reg32 ... & m32 { m32 = swap_bytes( Reg32 ); } @ifdef IA64 :MOVBE Reg64, m64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & mandover=0 & byte=0xf; byte=0x38; byte=0xf0; Reg64 ... & m64 { Reg64 = swap_bytes( m64 ); } :MOVBE m64, Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & mandover=0 & byte=0xf; byte=0x38; byte=0xf1; Reg64 ... & m64 { m64 = swap_bytes( Reg64 ); } @endif :MOVNTI Mem,Reg32 is vexMode=0 & opsize = 1; byte=0xf; byte=0xc3; Mem & Reg32 ... { *Mem = Reg32; } @ifdef IA64 :MOVNTI Mem,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize = 2; byte=0xf; byte=0xc3; Mem & Reg64 ... { *Mem = Reg64; } @endif :MOVSB^rep^reptail eseDI1,dseSI1 is vexMode=0 & rep & reptail & byte=0xa4 & eseDI1 & dseSI1 { build rep; build eseDI1; build dseSI1; eseDI1 = dseSI1; build reptail; } :MOVSW^rep^reptail eseDI2,dseSI2 is vexMode=0 & rep & reptail & opsize=0 & byte=0xa5 & eseDI2 & dseSI2 { build rep; build eseDI2; build dseSI2; eseDI2 = dseSI2; build reptail; } :MOVSD^rep^reptail eseDI4,dseSI4 is vexMode=0 & rep & reptail & opsize=1 & byte=0xa5 & eseDI4 & dseSI4 { build rep; build eseDI4; build dseSI4; eseDI4 = dseSI4; build reptail; } @ifdef IA64 :MOVSQ^rep^reptail eseDI8,dseSI8 is $(LONGMODE_ON) & vexMode=0 & rep & reptail & opsize=2 & byte=0xa5 & eseDI8 & dseSI8 { build rep; build eseDI8; build dseSI8; eseDI8 = dseSI8; build reptail; } @endif :MOVSX Reg16,rm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbe; rm8 & Reg16 ... { Reg16 = sext(rm8); } :MOVSX Reg32,rm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbe; rm8 & Reg32 ... & check_Reg32_dest ... { Reg32 = sext(rm8); build check_Reg32_dest; } @ifdef IA64 :MOVSX Reg64,rm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xbe; rm8 & Reg64 ... { Reg64 = sext(rm8); } @endif :MOVSX Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xbf; rm16 & Reg16 ... { Reg16 = rm16; } :MOVSX Reg32,rm16 is vexMode=0 & opsize=1 & byte=0xf; byte=0xbf; rm16 & Reg32 ... & check_Reg32_dest ... { Reg32 = sext(rm16); build check_Reg32_dest; } @ifdef IA64 :MOVSX Reg64,rm16 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xbf; rm16 & Reg64 ... { Reg64 = sext(rm16); } @endif :MOVSXD Reg32,rm32 is vexMode=0 & bit64=1 & opsize=1 & byte=0x63; rm32 & Reg32 ... & check_Reg32_dest ... { Reg32 = rm32; build check_Reg32_dest; } @ifdef IA64 :MOVSXD Reg64,rm32 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & opsize=2 & byte=0x63; rm32 & Reg64 ... { Reg64 = sext(rm32); } @endif :MOVZX Reg16,rm8 is vexMode=0 & opsize=0 & byte=0xf; byte=0xb6; rm8 & Reg16 ... { Reg16 = zext(rm8); } :MOVZX Reg32,rm8 is vexMode=0 & opsize=1 & byte=0xf; byte=0xb6; rm8 & Reg32 ... & check_Reg32_dest ... { Reg32 = zext(rm8); build check_Reg32_dest; } @ifdef IA64 :MOVZX Reg64,rm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xb6; rm8 & Reg64 ... { Reg64 = zext(rm8); } @endif :MOVZX Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0xb7; rm16 & Reg16 ... { Reg16 = rm16; } :MOVZX Reg32,rm16 is vexMode=0 & opsize=1 & byte=0xf; byte=0xb7; rm16 & Reg32 ... & check_Reg32_dest ... { Reg32 = zext(rm16); build check_Reg32_dest; } @ifdef IA64 :MOVZX Reg64,rm16 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0xb7; rm16 & Reg64 ... { Reg64 = zext(rm16); } @endif :MUL rm8 is vexMode=0 & byte=0xf6; rm8 & reg_opcode=4 ... { AX=zext(AL)*zext(rm8); multflags(AH); } :MUL rm16 is vexMode=0 & opsize=0 & byte=0xf7; rm16 & reg_opcode=4 ... { tmp:4=zext(AX)*zext(rm16); DX=tmp(2); AX=tmp(0); multflags(DX); } :MUL rm32 is vexMode=0 & opsize=1 & byte=0xf7; rm32 & check_EAX_dest ... & check_EDX_dest ... & reg_opcode=4 ... { tmp:8=zext(EAX)*zext(rm32); EDX=tmp(4); build check_EDX_dest; multflags(EDX); EAX=tmp(0); build check_EAX_dest; } @ifdef IA64 :MUL rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf7; rm64 & reg_opcode=4 ... { tmp:16=zext(RAX)*zext(rm64); RDX=tmp(8); RAX=tmp(0); multflags(RDX); } @endif :MWAIT is vexMode=0 & byte=0x0f; byte=0x01; byte=0xC9 { mwait(); } :MWAITX is vexMode=0 & byte=0x0f; byte=0x01; byte=0xFB { mwaitx(); } :MONITOR is vexMode=0 & byte=0x0f; byte=0x01; byte=0xC8 { monitor(); } :MONITORX is vexMode=0 & byte=0x0f; byte=0x01; byte=0xFA { monitorx(); } # See 'lockable.sinc' for memory destination, lockable variants :NEG Rmr8 is vexMode=0 & byte=0xf6; mod=3 & Rmr8 & reg_opcode=3 { negflags(Rmr8); Rmr8 = -Rmr8; resultflags(Rmr8 ); } :NEG Rmr16 is vexMode=0 & opsize=0 & byte=0xf7; mod=3 & Rmr16 & reg_opcode=3 { negflags(Rmr16); Rmr16 = -Rmr16; resultflags(Rmr16); } :NEG Rmr32 is vexMode=0 & opsize=1 & byte=0xf7; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=3 { negflags(Rmr32); Rmr32 = -Rmr32; resultflags(Rmr32); build check_Rmr32_dest;} @ifdef IA64 :NEG Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf7; mod=3 & Rmr64 & reg_opcode=3 { negflags(Rmr64); Rmr64 = -Rmr64; resultflags(Rmr64); } @endif :NOP is vexMode=0 & byte=0x90 & (mandover=0 | mandover=4 | mandover=1) & (rexprefix=0 | rexWRXBprefix=8) { } :NOP rm16 is vexMode=0 & mandover & opsize=0 & byte=0x0f; high5=3; rm16 ... { } :NOP rm32 is vexMode=0 & mandover & opsize=1 & byte=0x0f; high5=3; rm32 ... { } :NOP^"/reserved" rm16 is vexMode=0 & mandover & opsize=0 & byte=0x0f; byte=0x18; rm16 & reg_opcode_hb=1 ... { } :NOP^"/reserved" rm32 is vexMode=0 & mandover & opsize=1 & byte=0x0f; byte=0x18; rm32 & reg_opcode_hb=1 ... { } :NOP rm16 is vexMode=0 & mandover & opsize=0 & byte=0x0f; byte=0x1f; rm16 & reg_opcode=0 ... { } :NOP rm32 is vexMode=0 & mandover & opsize=1 & byte=0x0f; byte=0x1f; rm32 & reg_opcode=0 ... { } # See 'lockable.sinc' for memory destination, lockable variants :NOT Rmr8 is vexMode=0 & byte=0xf6; mod=3 & Rmr8 & reg_opcode=2 { Rmr8 = ~Rmr8; } :NOT Rmr16 is vexMode=0 & opsize=0 & byte=0xf7; mod=3 & Rmr16 & reg_opcode=2 { Rmr16 = ~Rmr16; } :NOT Rmr32 is vexMode=0 & opsize=1 & byte=0xf7; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=2 { Rmr32 = ~Rmr32; build check_Rmr32_dest;} @ifdef IA64 :NOT Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf7; mod=3 & Rmr64 & reg_opcode=2 { Rmr64 = ~Rmr64; } @endif # See 'lockable.sinc' for memory destination, lockable variants :OR AL,imm8 is vexMode=0 & byte=0x0c; AL & imm8 { logicalflags(); AL = AL | imm8; resultflags( AL); } :OR AX,imm16 is vexMode=0 & opsize=0 & byte=0xd; AX & imm16 { logicalflags(); AX = AX | imm16; resultflags( AX); } :OR EAX,imm32 is vexMode=0 & opsize=1 & byte=0xd; EAX & check_EAX_dest & imm32 { logicalflags(); EAX = EAX | imm32; build check_EAX_dest; resultflags( EAX); } @ifdef IA64 :OR RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xd; RAX & simm32 { logicalflags(); RAX = RAX | simm32; resultflags( RAX); } @endif :OR Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=1; imm8 { logicalflags(); Rmr8 = Rmr8 | imm8; resultflags( Rmr8); } :OR Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=1; imm16 { logicalflags(); Rmr16 = Rmr16 | imm16; resultflags( Rmr16); } :OR Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=1; imm32 { logicalflags(); Rmr32 = Rmr32 | imm32; build check_rm32_dest; resultflags( Rmr32); } @ifdef IA64 :OR Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=1; simm32 { logicalflags(); tmp:8 = Rmr64; Rmr64 = tmp | simm32; resultflags( Rmr64); } @endif :OR Rmr16,usimm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=1; usimm8_16 { logicalflags(); Rmr16 = Rmr16 | usimm8_16; resultflags( Rmr16); } :OR Rmr32,usimm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=1; usimm8_32 { logicalflags(); Rmr32 = Rmr32 | usimm8_32; build check_rm32_dest; resultflags( Rmr32); } @ifdef IA64 :OR Rmr64,usimm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=1; usimm8_64 { logicalflags(); Rmr64 = Rmr64 | usimm8_64; resultflags( Rmr64); } @endif :OR Rmr8,Reg8 is vexMode=0 & byte=0x8; mod=3 & Rmr8 & Reg8 { logicalflags(); Rmr8 = Rmr8 | Reg8; resultflags( Rmr8); } :OR Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x9; mod=3 & Rmr16 & Reg16 { logicalflags(); Rmr16 = Rmr16 | Reg16; resultflags( Rmr16); } :OR Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x9; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { logicalflags(); Rmr32 = Rmr32 | Reg32; build check_Rmr32_dest; resultflags( Rmr32); } @ifdef IA64 :OR Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x9; mod=3 & Rmr64 & Reg64 { logicalflags(); Rmr64 = Rmr64 | Reg64; resultflags( Rmr64); } @endif :OR Reg8,rm8 is vexMode=0 & byte=0xa; rm8 & Reg8 ... { logicalflags(); Reg8 = Reg8 | rm8; resultflags( Reg8); } :OR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0xb; rm16 & Reg16 ... { logicalflags(); Reg16 = Reg16 | rm16; resultflags(Reg16); } :OR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0xb; rm32 & Reg32 ... & check_Reg32_dest ... { logicalflags(); Reg32 = Reg32 | rm32; build check_Reg32_dest; resultflags(Reg32); } @ifdef IA64 :OR Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xb; rm64 & Reg64 ... { logicalflags(); Reg64 = Reg64 | rm64; resultflags(Reg64); } @endif :OUT imm8,AL is vexMode=0 & byte=0xe6; imm8 & AL { tmp:1 = imm8; out(tmp,AL); } :OUT imm8,AX is vexMode=0 & opsize=0 & byte=0xe7; imm8 & AX { tmp:1 = imm8; out(tmp,AX); } :OUT imm8,EAX is vexMode=0 & byte=0xe7; imm8 & EAX { tmp:1 = imm8; out(tmp,EAX); } :OUT DX,AL is vexMode=0 & byte=0xee & DX & AL { out(DX,AL); } :OUT DX,AX is vexMode=0 & opsize=0 & byte=0xef & DX & AX { out(DX,AX); } :OUT DX,EAX is vexMode=0 & byte=0xef & DX & EAX { out(DX,EAX); } :OUTSB^rep^reptail DX,dseSI1 is vexMode=0 & rep & reptail & byte=0x6e & DX & dseSI1 { build rep; build dseSI1; out(dseSI1,DX); build reptail;} :OUTSW^rep^reptail DX,dseSI2 is vexMode=0 & rep & reptail & opsize=0 & byte=0x6f & DX & dseSI2 { build rep; build dseSI2; out(dseSI2,DX); build reptail;} :OUTSD^rep^reptail DX,dseSI4 is vexMode=0 & rep & reptail & byte=0x6f & DX & dseSI4 { build rep; build dseSI4; out(dseSI4,DX); build reptail;} :PAUSE is vexMode=0 & opsize=0 & $(PRE_F3) & byte=0x90 { } :PAUSE is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x90 { } :POP rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x8f; rm16 & reg_opcode=0 ... { local val:2 = 0; pop22(val); build rm16; rm16 = val; } :POP rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x8f; rm16 & reg_opcode=0 ... { local val:2 = 0; pop42(val); build rm16; rm16 = val; } :POP rm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x8f; rm32 & reg_opcode=0 ... { local val:4 = 0; pop24(val); build rm32; rm32 = val; } :POP rm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x8f; rm32 & reg_opcode=0 ... { local val:4 = 0; pop44(val); build rm32; rm32 = val; } @ifdef IA64 :POP rm16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0x8f; rm16 & reg_opcode=0 ... { local val:2 = 0; pop82(val); build rm16; rm16 = val; } :POP rm64 is $(LONGMODE_ON) & vexMode=0 & byte=0x8f; rm64 & reg_opcode=0 ... { local val:8 = 0; pop88(val); build rm64; rm64 = val; } @endif :POP Rmr16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & row=5 & page=1 & Rmr16 { local val:2 = 0; pop22(val); Rmr16 = val; } :POP Rmr16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & row=5 & page=1 & Rmr16 { local val:2 = 0; pop42(val); Rmr16 = val; } :POP Rmr32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & row=5 & page=1 & Rmr32 { local val:4 = 0; pop24(val); Rmr32 = val; } :POP Rmr32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & row=5 & page=1 & Rmr32 { local val:4 = 0; pop44(val); Rmr32 = val; } @ifdef IA64 :POP Rmr16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & row=5 & page=1 & Rmr16 { local val:2 = 0; pop82(val); Rmr16 = val; } :POP Rmr64 is $(LONGMODE_ON) & vexMode=0 & row=5 & page=1 & Rmr64 { local val:8 = 0; pop88(val); Rmr64 = val; } @endif :POP DS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0x1f & DS { pop22(DS); } :POP DS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0x1f & DS { popseg44(DS); } :POP ES is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0x7 & ES { pop22(ES); } :POP ES is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0x7 & ES { popseg44(ES); } :POP SS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0x17 & SS { pop22(SS); } :POP SS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0x17 & SS { popseg44(SS); } :POP FS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xf; byte=0xa1 & FS { pop22(FS); } :POP FS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0xf; byte=0xa1 & FS { popseg44(FS); } @ifdef IA64 :POP FS is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xf; byte=0xa1 & FS { popseg88(FS); } @endif :POP GS is vexMode=0 & addrsize=0 & byte=0xf; byte=0xa9 & GS { pop22(GS); } :POP GS is vexMode=0 & addrsize=1 & byte=0xf; byte=0xa9 & GS { popseg44(GS); } @ifdef IA64 :POP GS is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0xf; byte=0xa9 & GS { popseg88(GS); } @endif :POPA is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x61 { pop22(DI); pop22(SI); pop22(BP); tmp:2=0; pop22(tmp); pop22(BX); pop22(DX); pop22(CX); pop22(AX); } :POPA is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x61 { pop42(DI); pop42(SI); pop42(BP); tmp:2=0; pop42(tmp); pop42(BX); pop42(DX); pop42(CX); pop42(AX); } :POPAD is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x61 { pop24(EDI); pop24(ESI); pop24(EBP); tmp:4=0; pop24(tmp); pop24(EBX); pop24(EDX); pop24(ECX); pop24(EAX); } :POPAD is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x61 { pop44(EDI); pop44(ESI); pop44(EBP); tmp:4=0; pop44(tmp); pop44(EBX); pop44(EDX); pop44(ECX); pop44(EAX); } :POPF is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x9d { pop22(flags); unpackflags(flags); } :POPF is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x9d { pop42(flags); unpackflags(flags); } :POPFD is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x9d { pop24(eflags); unpackflags(eflags); unpackeflags(eflags); } :POPFD is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x9d { pop44(eflags); unpackflags(eflags); unpackeflags(eflags); } @ifdef IA64 :POPF is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0x9d { pop82(flags); unpackflags(flags); } :POPFQ is $(LONGMODE_ON) & vexMode=0 & byte=0x9d { pop88(rflags); unpackflags(rflags); unpackeflags(rflags); } @endif :PREFETCH m8 is vexMode=0 & byte=0x0f; byte=0x0d; m8 & reg_opcode=0 ... { } :PREFETCH m8 is vexMode=0 & byte=0x0f; byte=0x0d; m8 & reg_opcode ... { } # rest aliased to /0 :PREFETCHW m8 is vexMode=0 & byte=0x0f; byte=0x0d; m8 & reg_opcode=1 ... { } :PREFETCHWT1 m8 is vexMode=0 & byte=0x0f; byte=0x0d; m8 & reg_opcode=2 ... { } :PREFETCHT0 m8 is vexMode=0 & byte=0x0f; byte=0x18; ( mod != 0b11 & reg_opcode=1 ) ... & m8 { } :PREFETCHT1 m8 is vexMode=0 & byte=0x0f; byte=0x18; ( mod != 0b11 & reg_opcode=2 ) ... & m8 { } :PREFETCHT2 m8 is vexMode=0 & byte=0x0f; byte=0x18; ( mod != 0b11 & reg_opcode=3 ) ... & m8 { } :PREFETCHNTA m8 is vexMode=0 & byte=0x0f; byte=0x18; ( mod != 0b11 & reg_opcode=0 ) ... & m8 { } define pcodeop ptwrite; :PTWRITE rm32 is vexMode=0 & $(PRE_F3) & byte=0x0f; byte=0xae; rm32 & reg_opcode=4 ... { ptwrite(rm32); } @ifdef IA64 :PTWRITE rm64 is vexMode=0 & $(PRE_F3) & opsize=2 & byte=0x0f; byte=0xae; rm64 & reg_opcode=4 ... { ptwrite(rm64); } @endif :PUSH rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xff; rm16 & reg_opcode=6 ... { push22(rm16); } :PUSH rm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xff; rm16 & reg_opcode=6 ... { push42(rm16); } :PUSH rm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0xff; rm32 & reg_opcode=6 ... { push24(rm32); } :PUSH rm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xff; rm32 & reg_opcode=6 ... { push44(rm32); } @ifdef IA64 :PUSH rm16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xff; rm16 & reg_opcode=6 ... { push82(rm16); } :PUSH rm64 is $(LONGMODE_ON) & vexMode=0 & byte=0xff; rm64 & reg_opcode=6 ... { push88(rm64); } @endif :PUSH Rmr16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & row=5 & page=0 & Rmr16 { push22(Rmr16); } :PUSH Rmr16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & row=5 & page=0 & Rmr16 { push42(Rmr16); } :PUSH Rmr32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & row=5 & page=0 & Rmr32 { push24(Rmr32); } :PUSH Rmr32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & row=5 & page=0 & Rmr32 { push44(Rmr32); } @ifdef IA64 :PUSH Rmr16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & row=5 & page=0 & Rmr16 { push82(Rmr16); } :PUSH Rmr64 is $(LONGMODE_ON) & vexMode=0 & row=5 & page=0 & Rmr64 { push88(Rmr64); } @endif :PUSH simm8_16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push22(tmp); } :PUSH simm8_16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push42(tmp); } :PUSH simm8_32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x6a; simm8_32 { tmp:4=simm8_32; push24(tmp); } :PUSH simm8_32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x6a; simm8_32 { tmp:4=simm8_32; push44(tmp); } @ifdef IA64 :PUSH simm8_16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0x6a; simm8_16 { tmp:2=simm8_16; push82(tmp); } :PUSH simm8_64 is $(LONGMODE_ON) & vexMode=0 & byte=0x6a; simm8_64 { tmp:8=simm8_64; push88(tmp); } @endif :PUSH simm16_16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x68; simm16_16 { tmp:2=simm16_16; push22(tmp); } :PUSH simm16_16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x68; simm16_16 { tmp:2=simm16_16; push42(tmp); } :PUSH imm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x68; imm32 { tmp:4=imm32; push24(tmp); } :PUSH imm32 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x68; imm32 { tmp:4=imm32; push44(tmp); } @ifdef IA64 :PUSH simm16_16 is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0x68; simm16_16 { tmp:2=simm16_16; push82(tmp); } :PUSH simm32_64 is $(LONGMODE_ON) & vexMode=0 & byte=0x68; simm32_64 { tmp:8=simm32_64; push88(tmp); } @endif :PUSH CS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xe & CS { push22(CS); } :PUSH CS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0xe & CS { pushseg44(CS); } :PUSH SS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0x16 & SS { push22(SS); } :PUSH SS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0x16 & SS { pushseg44(SS); } :PUSH DS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0x1e & DS { push22(DS); } :PUSH DS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0x1e & DS { pushseg44(DS); } :PUSH ES is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0x6 & ES { push22(ES); } :PUSH ES is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0x6 & ES { pushseg44(ES); } :PUSH FS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xf; byte=0xa0 & FS { push22(FS); } :PUSH FS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0xf; byte=0xa0 & FS { pushseg44(FS); } @ifdef IA64 :PUSH FS is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xf; byte=0xa0 & FS { push82(FS); } :PUSH FS is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0xa0 & FS { pushseg88(FS); } @endif :PUSH GS is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & byte=0xf; byte=0xa8 & GS { push22(GS); } :PUSH GS is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & byte=0xf; byte=0xa8 & GS { pushseg44(GS); } @ifdef IA64 :PUSH GS is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0xf; byte=0xa8 & GS { push82(GS); } :PUSH GS is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0xa8 & GS { pushseg88(GS); } @endif :PUSHA is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x60 { local tmp=SP; push22(AX); push22(CX); push22(DX); push22(BX); push22(tmp); push22(BP); push22(SI); push22(DI); } :PUSHA is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x60 { local tmp=SP; push42(AX); push42(CX); push42(DX); push42(BX); push42(tmp); push42(BP); push42(SI); push42(DI); } :PUSHAD is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x60 { local tmp=ESP; push24(EAX); push24(ECX); push24(EDX); push24(EBX); push24(tmp); push24(EBP); push24(ESI); push24(EDI); } :PUSHAD is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x60 { local tmp=ESP; push44(EAX); push44(ECX); push44(EDX); push44(EBX); push44(tmp); push44(EBP); push44(ESI); push44(EDI); } :PUSHF is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0x9c { packflags(flags); push22(flags); } :PUSHF is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0x9c { packflags(flags); push42(flags); } :PUSHFD is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0x9c { packflags(eflags); packeflags(eflags); push24(eflags); } :PUSHFD is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0x9c { packflags(eflags); packeflags(eflags); push44(eflags); } @ifdef IA64 :PUSHF is $(LONGMODE_ON) & vexMode=0 & opsize=0 & byte=0x9c { packflags(flags); push82(flags); } :PUSHFQ is $(LONGMODE_ON) & vexMode=0 & byte=0x9c { packflags(rflags); packeflags(rflags); push88(rflags); } @endif :RCL rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=2 ... { local tmpCF = CF; CF = rm8 s< 0; rm8 = (rm8 << 1) | tmpCF; OF = CF ^ (rm8 s< 0); } :RCL rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=2 ... { local cnt=(CL&0x1f)%9; tmp:2=(zext(CF)<<8)|zext(rm8); tmp=(tmp<>(9-cnt));rm8=tmp(0); CF=(tmp&0x100)!=0; } :RCL rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=2 ... ; imm8 { local cnt=(imm8&0x1f)%9; tmp:2=(zext(CF)<<8)|zext(rm8); tmp=(tmp<>(9-cnt)); rm8=tmp(0); CF=(tmp&0x100)!=0; } :RCL rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=2 ... { local tmpCF = CF; CF = rm16 s< 0; rm16 = (rm16 << 1) | zext(tmpCF); OF = CF ^ (rm16 s< 0);} :RCL rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=2 ... {local cnt=(CL&0x1f)%17; tmp:4=(zext(CF)<<16)|zext(rm16); tmp=(tmp<>(17-cnt)); rm16=tmp(0); CF=(tmp&0x10000)!=0; } :RCL rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=2 ... ; imm8 { local cnt=(imm8&0x1f)%17; tmp:4=(zext(CF)<<16)|zext(rm16); tmp=(tmp<>(17-cnt)); rm16=tmp(0); CF=(tmp&0x10000)!=0; } :RCL rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=2 ... { local tmpCF=CF; CF=rm32 s< 0; rm32=(rm32<<1)|zext(tmpCF); OF=CF^(rm32 s< 0); build check_rm32_dest; } :RCL rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=2 ... { local cnt=CL&0x1f; tmp:8=(zext(CF)<<32)|zext(rm32); tmp=(tmp<>(33-cnt)); rm32=tmp(0); CF=(tmp&0x100000000)!=0; build check_rm32_dest; } :RCL rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=2 ... ; imm8 { local cnt=imm8&0x1f; tmp:8=(zext(CF)<<32)|zext(rm32); tmp=(tmp<>(33-cnt)); rm32=tmp(0); CF=(tmp&0x100000000)!=0; build check_rm32_dest; } @ifdef IA64 :RCL rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=2 ... { local tmpCF=CF; CF=rm64 s< 0; rm64=(rm64<<1)|zext(tmpCF); OF=CF^(rm64 s< 0);} :RCL rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=2 ... { local cnt:1=CL&0x3f; local rm64_copy:8 = rm64; local CF_copy:1 = CF; rotated:8 = rm64_copy << cnt; rotated = rotated | (rm64_copy >> (65 -cnt)); local CF_bit:8 = zext(CF_copy) << cnt-1; rotated = rotated | CF_bit; conditionalAssign(CF, cnt == 0:1, CF_copy, ((1:8<<(64-cnt)) & rm64_copy) != 0); rm64 = rotated; } :RCL rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=2 ... ; imm8 { local cnt:1=imm8&0x3f; local rm64_copy:8 = rm64; local CF_copy:1 = CF; rotated:8 = rm64_copy << cnt; rotated = rotated | (rm64_copy >> (65 -cnt)); local CF_bit:8 = zext(CF_copy) << cnt-1; rotated = rotated | CF_bit; conditionalAssign(CF, cnt == 0:1, CF_copy, ((1:8<<(64-cnt)) & rm64_copy) != 0); rm64 = rotated; } @endif :RCR rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=3 ... { local tmpCF=CF; OF=rm8 s< 0; CF=(rm8&1)!=0; rm8=(rm8>>1)|(tmpCF<<7); OF=OF^(rm8 s< 0); } :RCR rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=3 ... { local cnt=(CL&0x1f)%9; tmp:2=(zext(CF)<<8)|zext(rm8); tmp=(tmp>>cnt)|(tmp<<(9-cnt)); rm8=tmp(0); CF=(tmp&0x100)!=0; } :RCR rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=3 ... ; imm8 { local cnt=(imm8&0x1f)%9; tmp:2=(zext(CF)<<8)|zext(rm8); tmp=(tmp>>cnt)|(tmp<<(9-cnt)); rm8=tmp(0); CF=(tmp&0x100)!=0; } :RCR rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=3 ... { local tmpCF=CF; OF=rm16 s< 0; CF=(rm16&1)!=0; rm16=(rm16>>1)|(zext(tmpCF)<<15); OF=OF^(rm16 s< 0); } :RCR rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=3 ... { local cnt=(CL&0x1f)%17; tmp:4=(zext(CF)<<16)|zext(rm16); tmp=(tmp>>cnt)|(tmp<<(17-cnt)); rm16=tmp(0); CF=(tmp&0x10000)!=0; } :RCR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=3 ... ; imm8 { local cnt=(imm8&0x1f)%17; tmp:4=(zext(CF)<<16)|zext(rm16); tmp=(tmp>>cnt)|(tmp<<(17-cnt)); rm16=tmp(0); CF=(tmp&0x10000)!=0; } :RCR rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=3 ... { local tmpCF=CF; OF=rm32 s< 0; CF=(rm32&1)!=0; rm32=(rm32>>1)|(zext(tmpCF)<<31); OF=OF^(rm32 s< 0); build check_rm32_dest; } :RCR rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=3 ... { local cnt=CL&0x1f; tmp:8=(zext(CF)<<32)|zext(rm32); tmp=(tmp>>cnt)|(tmp<<(33-cnt)); rm32=tmp(0); CF=(tmp&0x100000000)!=0; build check_rm32_dest; } :RCR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=3 ... ; imm8 { local cnt=imm8&0x1f; tmp:8=(zext(CF)<<32)|zext(rm32); tmp=(tmp>>cnt)|(tmp<<(33-cnt)); rm32=tmp(0); CF=(tmp&0x100000000)!=0; build check_rm32_dest; } @ifdef IA64 :RCR rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=3 ... { local tmpCF=CF; OF=rm64 s< 0; CF=(rm64&1)!=0; rm64=(rm64>>1)|(zext(tmpCF)<<63); OF=OF^(rm64 s< 0); } :RCR rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=3 ... { local cnt:1=CL&0x3f; local rm64_copy:8 = rm64; local CF_copy:1 = CF; rotated:8 = rm64_copy >> cnt; rotated = rotated | (rm64_copy << (65 -cnt)); local CF_bit:8 = zext(CF_copy) << 64-cnt; rotated = rotated | CF_bit; conditionalAssign(CF, cnt == 0:1, CF_copy, ((1:8<<(cnt-1)) & rm64_copy) != 0); rm64 = rotated; } :RCR rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=3 ... ; imm8 { local cnt:1=imm8&0x3f; local rm64_copy:8 = rm64; local CF_copy:1 = CF; rotated:8 = rm64_copy >> cnt; rotated = rotated | (rm64_copy << (65 -cnt)); local CF_bit:8 = zext(CF_copy) << 64-cnt; rotated = rotated | CF_bit; conditionalAssign(CF, cnt == 0:1, CF_copy, ((1:8<<(cnt-1)) & rm64_copy) != 0); rm64 = rotated; } @endif @ifdef IA64 define pcodeop readfsbase; :RDFSBASE r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=0 & r32 { r32 = readfsbase(); } :RDFSBASE r64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=0 & r64 { r64 = readfsbase(); } define pcodeop readgsbase; :RDGSBASE r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=1 & r32 { r32 = readgsbase(); } :RDGSBASE r64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=1 & r64 { r64 = readgsbase(); } @endif define pcodeop rdmsr; :RDMSR is vexMode=0 & byte=0xf; byte=0x32 & check_EAX_dest & check_EDX_dest { tmp:8 = rdmsr(ECX); EDX = tmp(4); build check_EDX_dest; EAX = tmp(0); build check_EAX_dest; } define pcodeop readPID; :RDPID r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xc7; reg_opcode=7 & r32 { r32 = readPID(); } @ifdef IA64 :RDPID r64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xc7; reg_opcode=7 & r64 { r64 = readPID(); } @endif define pcodeop rdpkru_u32; :RDPKRU is vexMode=0 & byte=0x0f; byte=0x01; byte=0xee { EAX = rdpkru_u32(); } define pcodeop rdpmc; :RDPMC is vexMode=0 & byte=0xf; byte=0x33 { tmp:8 = rdpmc(ECX); EDX = tmp(4); EAX = tmp:4; } define pcodeop rdtsc; :RDTSC is vexMode=0 & byte=0xf; byte=0x31 { tmp:8 = rdtsc(); EDX = tmp(4); EAX = tmp(0); } :RET is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xc3 { pop22(IP); EIP=segment(CS,IP); return [EIP]; } :RET is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xc3 { pop42(IP); EIP=zext(IP); return [EIP]; } :RET is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0xc3 { pop24(EIP); return [EIP]; } :RET is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xc3 { pop44(EIP); return [EIP]; } @ifdef IA64 :RET is $(LONGMODE_ON) & vexMode=0 & byte=0xc3 { pop88(RIP); return [RIP]; } @endif :RETF is vexMode=0 & addrsize=0 & opsize=0 & byte=0xcb { pop22(IP); pop22(CS); EIP = segment(CS,IP); return [EIP]; } :RETF is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xcb { pop42(IP); EIP=zext(IP); pop42(CS); return [EIP]; } @ifdef IA64 :RETF is $(LONGMODE_ON) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xcb { pop82(IP); RIP=zext(IP); pop82(CS); return [RIP]; } :RETF is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=0 & byte=0xcb { pop82(IP); RIP=zext(IP); pop82(CS); return [RIP]; } @endif :RETF is vexMode=0 & addrsize=0 & opsize=1 & byte=0xcb { pop24(EIP); tmp:4=0; pop24(tmp); CS=tmp(0); return [EIP]; } :RETF is vexMode=0 & addrsize=1 & opsize=1 & byte=0xcb { pop44(EIP); tmp:4=0; pop44(tmp); CS=tmp(0); return [EIP]; } @ifdef IA64 :RETF is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=1 & byte=0xcb { pop48(EIP); RIP=zext(EIP); tmp:4=0; pop44(tmp); CS=tmp(0); return [RIP]; } :RETF is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=2 & byte=0xcb { pop88(RIP); tmp:8=0; pop88(tmp); CS=tmp(0); return [RIP]; } @endif :RET imm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=0 & byte=0xc2; imm16 { pop22(IP); EIP=zext(IP); SP=SP+imm16; return [EIP]; } :RET imm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=0 & byte=0xc2; imm16 { pop42(IP); EIP=zext(IP); ESP=ESP+imm16; return [EIP]; } :RET imm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=0 & opsize=1 & byte=0xc2; imm16 { pop24(EIP); SP=SP+imm16; return [EIP]; } :RET imm16 is $(LONGMODE_OFF) & vexMode=0 & addrsize=1 & opsize=1 & byte=0xc2; imm16 { pop44(EIP); ESP=ESP+imm16; return [EIP]; } @ifdef IA64 :RET imm16 is $(LONGMODE_ON) & vexMode=0 & byte=0xc2; imm16 { pop88(RIP); RSP=RSP+imm16; return [RIP]; } @endif :RETF imm16 is vexMode=0 & addrsize=0 & opsize=0 & byte=0xca; imm16 { pop22(IP); EIP=zext(IP); pop22(CS); SP=SP+imm16; return [EIP]; } :RETF imm16 is vexMode=0 & addrsize=1 & opsize=0 & byte=0xca; imm16 { pop42(IP); EIP=zext(IP); pop42(CS); ESP=ESP+imm16; return [EIP]; } @ifdef IA64 :RETF imm16 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=0 & byte=0xca; imm16 { pop82(IP); RIP=zext(IP); pop82(CS); RSP=RSP+imm16; return [RIP]; } @endif :RETF imm16 is vexMode=0 & addrsize=0 & opsize=1 & byte=0xca; imm16 { pop24(EIP); tmp:4=0; pop24(tmp); CS=tmp(0); SP=SP+imm16; return [EIP]; } :RETF imm16 is vexMode=0 & addrsize=1 & opsize=1 & byte=0xca; imm16 { pop44(EIP); tmp:4=0; pop44(tmp); CS=tmp(0); ESP=ESP+imm16; return [EIP]; } @ifdef IA64 :RETF imm16 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=1 & byte=0xca; imm16 { pop84(EIP); tmp:4=0; pop84(tmp); RIP=zext(EIP); CS=tmp(0); RSP=RSP+imm16; return [RIP]; } :RETF imm16 is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & opsize=2 & byte=0xca; imm16 { pop88(RIP); tmp:8=0; pop88(tmp); CS=tmp(0); RSP=RSP+imm16; return [RIP]; } @endif :ROL rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=0 ... { CF = rm8 s< 0; rm8 = (rm8 << 1) | CF; OF = CF ^ (rm8 s< 0); } :ROL rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=0 ... { local cnt = CL & 0x7; local count_and_mask = CL & 0x1f;rm8 = (rm8 << cnt) | (rm8 >> (8 - cnt)); rolflags(rm8, count_and_mask);} :ROL rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=0 ... ; imm8 { local cnt = imm8 & 0x7; rm8 = (rm8 << cnt) | (rm8 >> (8 - cnt)); rolflags(rm8,imm8 & 0x1f:1);} :ROL rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=0 ... { CF = rm16 s< 0; rm16 = (rm16 << 1) | zext(CF); OF = CF ^ (rm16 s< 0); } :ROL rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=0 ... { local cnt = CL & 0xf; local count_and_mask = CL & 0x1f;rm16 = (rm16 << cnt) | (rm16 >> (16 - cnt)); rolflags(rm16,count_and_mask);} :ROL rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=0 ... ; imm8 { local cnt = imm8 & 0xf; rm16 = (rm16 << cnt) | (rm16 >> (16 - cnt)); rolflags(rm16,imm8 & 0x1f:1);} :ROL rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=0 ... { CF = rm32 s< 0; rm32 = (rm32 << 1) | zext(CF); OF = CF ^ (rm32 s< 0); build check_rm32_dest; } :ROL rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=0 ... { local cnt = CL & 0x1f; rm32 = (rm32 << cnt) | (rm32 >> (32 - cnt)); rolflags(rm32,cnt); build check_rm32_dest; } :ROL rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=0 ... ; imm8 { local cnt = imm8 & 0x1f; rm32 = (rm32 << cnt) | (rm32 >> (32 - cnt)); rolflags(rm32,cnt); build check_rm32_dest; } @ifdef IA64 :ROL rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=0 ... { CF = rm64 s< 0; rm64 = (rm64 << 1) | zext(CF); OF = CF ^ (rm64 s< 0); } :ROL rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=0 ... { local cnt = CL & 0x3f; rm64 = (rm64 << cnt) | (rm64 >> (64 - cnt)); rolflags(rm64,cnt);} :ROL rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=0 ... ; imm8 { local cnt = imm8 & 0x3f; rm64 = (rm64 << cnt) | (rm64 >> (64 - cnt)); rolflags(rm64,cnt);} @endif :ROR rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=1 ... { CF = rm8 & 1; rm8 = (rm8 >> 1) | (CF << 7); OF = ((rm8 & 0x40) != 0) ^ (rm8 s< 0); } :ROR rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=1 ... { local cnt = CL & 0x7; local count_and_mask = CL & 0x1f;rm8 = (rm8 >> cnt) | (rm8 << (8 - cnt)); rorflags(rm8,count_and_mask);} :ROR rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=1 ... ; imm8 { local cnt = imm8 & 0x7; rm8 = (rm8 >> cnt) | (rm8 << (8 - cnt)); rorflags(rm8,imm8 & 0x1f:1);} :ROR rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=1 ... { CF=(rm16 & 1)!=0; rm16=(rm16>>1)|(zext(CF)<<15); OF=((rm16 & 0x4000) != 0) ^ (rm16 s< 0); } :ROR rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=1 ... { local cnt = CL & 0xf; local count_and_mask = CL & 0x1f; rm16 = (rm16 >> cnt) | (rm16 << (16 - cnt)); rorflags(rm16,count_and_mask);} :ROR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=1 ... ; imm8 { local cnt = imm8 & 0xf; rm16 = (rm16 >> cnt) | (rm16 << (16 - cnt)); rorflags(rm16,imm8 & 0x1f:1);} :ROR rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=1 ... { CF=(rm32&1)!=0; rm32=(rm32>>1)|(zext(CF)<<31); OF=((rm32&0x40000000)!=0) ^ (rm32 s< 0); build check_rm32_dest; } :ROR rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=1 ... { local cnt = CL & 0x1f; rm32 = (rm32 >> cnt) | (rm32 << (32 - cnt)); rorflags(rm32,cnt); build check_rm32_dest; } :ROR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=1 ... ; imm8 { local cnt = imm8 & 0x1f; rm32 = (rm32 >> cnt) | (rm32 << (32 - cnt)); rorflags(rm32,cnt); build check_rm32_dest; } @ifdef IA64 :ROR rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=1 ... { CF=(rm64&1)!=0; rm64=(rm64>>1)|(zext(CF)<<63); OF=((rm64&0x4000000000000000)!=0) ^ (rm64 s< 0); } :ROR rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=1 ... { local cnt = CL & 0x3f; rm64 = (rm64 >> cnt) | (rm64 << (64 - cnt)); rorflags(rm64,cnt);} :ROR rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=1 ... ; imm8 { local cnt = imm8 & 0x3f; rm64 = (rm64 >> cnt) | (rm64 << (64 - cnt)); rorflags(rm64,cnt);} @endif define pcodeop smm_restore_state; :RSM is vexMode=0 & byte=0xf; byte=0xaa { tmp:4 = smm_restore_state(); return [tmp]; } # Initially disallowed in 64bit mode, but later reintroduced :SAHF is vexMode=0 & byte=0x9e { SF = (AH & 0x80) != 0; ZF = (AH & 0x40) != 0; AF = (AH & 0x10) != 0; PF = (AH & 0x04) != 0; CF = (AH & 0x01) != 0; } :SALC is vexMode=0 & bit64=0 & byte=0xd6 { AL = CF * 0xff; } :SAR rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=7 ... { CF = rm8 & 1; OF = 0; rm8 = rm8 s>> 1; resultflags(rm8); } :SAR rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=7 ... { local count = CL & 0x1f; local tmp = rm8; rm8 = rm8 s>> count; sarflags(tmp, rm8,count); shiftresultflags(rm8,count); } :SAR rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=7 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm8; rm8 = rm8 s>> count; sarflags(tmp, rm8,count); shiftresultflags(rm8,count); } :SAR rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=7 ... { CF = (rm16 & 1) != 0; OF = 0; rm16 = rm16 s>> 1; resultflags(rm16); } :SAR rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=7 ... { local count = CL & 0x1f; local tmp = rm16; rm16 = rm16 s>> count; sarflags(tmp, rm16,count); shiftresultflags(rm16,count); } :SAR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=7 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16; rm16 = rm16 s>> count; sarflags(tmp, rm16,count); shiftresultflags(rm16,count); } :SAR rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=7 ... { CF = (rm32 & 1) != 0; OF = 0; rm32 = rm32 s>> 1; build check_rm32_dest; resultflags(rm32); } :SAR rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=7 ... { local count = CL & 0x1f; local tmp = rm32; rm32 = rm32 s>> count; build check_rm32_dest; sarflags(tmp, rm32,count); shiftresultflags(rm32,count); } :SAR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=7 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32; rm32 = rm32 s>> count; build check_rm32_dest; sarflags(tmp, rm32,count); shiftresultflags(rm32,count); } @ifdef IA64 :SAR rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & reg_opcode=7 ... { CF = (rm64 & 1) != 0; OF = 0; rm64 = rm64 s>> 1; resultflags(rm64); } :SAR rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=7 ... { local count = CL & 0x3f; local tmp = rm64; rm64 = rm64 s>> count; sarflags(tmp, rm64,count); shiftresultflags(rm64,count); } :SAR rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=7 ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64; rm64 = rm64 s>> count; sarflags(tmp, rm64,count); shiftresultflags(rm64,count); } @endif # See 'lockable.sinc' for memory destination, lockable variants :SBB AL,imm8 is vexMode=0 & byte=0x1c; AL & imm8 { subCarryFlags( AL, imm8 ); resultflags(AL); } :SBB AX,imm16 is vexMode=0 & opsize=0 & byte=0x1d; AX & imm16 { subCarryFlags( AX, imm16 ); resultflags(AX); } :SBB EAX,imm32 is vexMode=0 & opsize=1 & byte=0x1d; EAX & check_EAX_dest & imm32 { subCarryFlags( EAX, imm32 ); build check_EAX_dest; resultflags(EAX); } @ifdef IA64 :SBB RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x1d; RAX & simm32 { subCarryFlags( RAX, simm32 ); resultflags(RAX); } @endif :SBB Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=3; imm8 { subCarryFlags( Rmr8, imm8 ); resultflags(Rmr8); } :SBB Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=3; imm16 { subCarryFlags( Rmr16, imm16 ); resultflags(Rmr16); } :SBB Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=3; imm32 { subCarryFlags( Rmr32, imm32 ); build check_Rmr32_dest; resultflags(Rmr32); } @ifdef IA64 :SBB Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=3; simm32 { subCarryFlags( Rmr64, simm32 ); resultflags(Rmr64); } @endif :SBB Rmr16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=3; simm8_16 { subCarryFlags( Rmr16, simm8_16 ); resultflags(Rmr16); } :SBB Rmr32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_Rmr32_dest & reg_opcode=3; simm8_32 { subCarryFlags( Rmr32, simm8_32 ); build check_Rmr32_dest; resultflags(Rmr32); } @ifdef IA64 :SBB Rmr64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=3; simm8_64 { subCarryFlags( Rmr64, simm8_64 ); resultflags(Rmr64); } @endif :SBB Rmr8,Reg8 is vexMode=0 & byte=0x18; mod=3 & Rmr8 & Reg8 { subCarryFlags( Rmr8, Reg8 ); resultflags(Rmr8); } :SBB Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x19; mod=3 & Rmr16 & Reg16 { subCarryFlags( Rmr16, Reg16 ); resultflags(Rmr16); } :SBB Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x19; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { subCarryFlags( Rmr32, Reg32 ); build check_Rmr32_dest; resultflags(Rmr32); } @ifdef IA64 :SBB Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x19; mod=3 & Rmr64 & Reg64 { subCarryFlags( Rmr64, Reg64 ); resultflags(Rmr64); } @endif :SBB Reg8,rm8 is vexMode=0 & byte=0x1a; rm8 & Reg8 ... { subCarryFlags( Reg8, rm8 ); resultflags(Reg8); } :SBB Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x1b; rm16 & Reg16 ... { subCarryFlags( Reg16, rm16 ); resultflags(Reg16); } :SBB Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x1b; rm32 & Reg32 ... & check_Reg32_dest ... { subCarryFlags( Reg32, rm32 ); build check_Reg32_dest; resultflags(Reg32); } @ifdef IA64 :SBB Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x1b; rm64 & Reg64 ... { subCarryFlags( Reg64, rm64 ); resultflags(Reg64); } @endif :SCASB^repe^repetail eseDI1 is vexMode=0 & repe & repetail & byte=0xae & eseDI1 { build repe; build eseDI1; subflags(AL,eseDI1); local diff=AL-eseDI1; resultflags(diff); build repetail; } :SCASW^repe^repetail eseDI2 is vexMode=0 & repe & repetail & opsize=0 & byte=0xaf & eseDI2 { build repe; build eseDI2; subflags(AX,eseDI2); local diff=AX-eseDI2; resultflags(diff); build repetail; } :SCASD^repe^repetail eseDI4 is vexMode=0 & repe & repetail & opsize=1 & byte=0xaf & eseDI4 { build repe; build eseDI4; subflags(EAX,eseDI4); local diff=EAX-eseDI4; resultflags(diff); build repetail; } @ifdef IA64 :SCASQ^repe^repetail eseDI8 is $(LONGMODE_ON) & vexMode=0 & repe & repetail & opsize=2 & byte=0xaf & eseDI8 { build repe; build eseDI8; subflags(RAX,eseDI8); local diff=RAX-eseDI8; resultflags(diff); build repetail; } @endif :SET^cc rm8 is vexMode=0 & byte=0xf; row=9 & cc; rm8 { rm8 = cc; } # manual is not consistent on operands :SGDT m16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=0 ) ... & m16 { m16 = GlobalDescriptorTableRegister(); } :SGDT m32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=0 ) ... & m32 { m32 = GlobalDescriptorTableRegister(); } @ifdef IA64 :SGDT m64 is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=0 ) ... & m64 { m64 = GlobalDescriptorTableRegister(); } @endif :SHL rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 &(reg_opcode=4|reg_opcode=6) ... { CF = rm8 s< 0; rm8 = rm8 << 1; OF = CF ^ (rm8 s< 0); resultflags(rm8); } :SHL rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & (reg_opcode=4|reg_opcode=6) ... { local count = CL & 0x1f; local tmp = rm8; rm8 = rm8 << count; shlflags(tmp, rm8,count); shiftresultflags(rm8,count); } :SHL rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & (reg_opcode=4|reg_opcode=6) ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm8; rm8 = rm8 << count; shlflags(tmp, rm8,count); shiftresultflags(rm8,count); } :SHL rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & (reg_opcode=4|reg_opcode=6) ... { CF = rm16 s< 0; rm16 = rm16 << 1; OF = CF ^ (rm16 s< 0); resultflags(rm16); } :SHL rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & (reg_opcode=4|reg_opcode=6) ... { local count = CL & 0x1f; local tmp = rm16; rm16 = rm16 << count; shlflags(tmp, rm16,count); shiftresultflags(rm16,count); } :SHL rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & (reg_opcode=4|reg_opcode=6) ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16; rm16 = rm16 << count; shlflags(tmp, rm16,count); shiftresultflags(rm16,count); } :SHL rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & (reg_opcode=4|reg_opcode=6) ... { CF = rm32 s< 0; rm32 = rm32 << 1; OF = CF ^ (rm32 s< 0); build check_rm32_dest; resultflags(rm32); } :SHL rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & (reg_opcode=4|reg_opcode=6) ... { local count = CL & 0x1f; local tmp = rm32; rm32 = rm32 << count; build check_rm32_dest; shlflags(tmp, rm32,count); shiftresultflags(rm32,count); } :SHL rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & (reg_opcode=4|reg_opcode=6) ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32; rm32 = rm32 << count; build check_rm32_dest; shlflags(tmp, rm32,count); shiftresultflags(rm32,count); } @ifdef IA64 :SHL rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 & (reg_opcode=4|reg_opcode=6) ... { CF = rm64 s< 0; rm64 = rm64 << 1; OF = CF ^ (rm64 s< 0); resultflags(rm64); } :SHL rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & (reg_opcode=4|reg_opcode=6) ... { local count = CL & 0x3f; local tmp = rm64; rm64 = rm64 << count; shlflags(tmp, rm64,count); shiftresultflags(rm64,count); } :SHL rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & (reg_opcode=4|reg_opcode=6) ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64; rm64 = rm64 << count; shlflags(tmp, rm64,count); shiftresultflags(rm64,count); } @endif :SHLD rm16,Reg16,imm8 is vexMode=0 & opsize=0; byte=0x0F; byte=0xA4; rm16 & Reg16 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16; rm16 = (rm16 << count) | (Reg16 >> (16 - count)); shlflags(tmp,rm16,count); shiftresultflags(rm16,count);} :SHLD rm16,Reg16,CL is vexMode=0 & opsize=0; byte=0x0F; byte=0xA5; CL & rm16 & Reg16 ... { local count = CL & 0x1f; local tmp = rm16; rm16 = (rm16 << count) | (Reg16 >> (16 - count)); shlflags(tmp,rm16,count); shiftresultflags(rm16,count); } :SHLD rm32,Reg32,imm8 is vexMode=0 & opsize=1; byte=0x0F; byte=0xA4; rm32 & check_rm32_dest ... & Reg32 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32; rm32 = (rm32 << count) | (Reg32 >> (32 - count)); build check_rm32_dest; shlflags(tmp,rm32,count); shiftresultflags(rm32,count); } :SHLD rm32,Reg32,CL is vexMode=0 & opsize=1; byte=0x0F; byte=0xA5; CL & rm32 & check_rm32_dest ... & Reg32 ... { local count = CL & 0x1f; local tmp = rm32; rm32 = (rm32 << count) | (Reg32 >> (32 - count)); build check_rm32_dest; shlflags(tmp,rm32,count); shiftresultflags(rm32,count); } @ifdef IA64 :SHLD rm64,Reg64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0x0F; byte=0xA4; rm64 & Reg64 ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64; rm64 = (rm64 << count) | (Reg64 >> (64 - count)); shlflags(tmp,rm64,count); shiftresultflags(rm64,count); } :SHLD rm64,Reg64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0x0F; byte=0xA5; CL & rm64 & Reg64 ... { local count = CL & 0x3f; local tmp = rm64; rm64 = (rm64 << count) | (Reg64 >> (64 - count)); shlflags(tmp,rm64,count); shiftresultflags(rm64,count); } @endif :SHRD rm16,Reg16,imm8 is vexMode=0 & opsize=0; byte=0x0F; byte=0xAC; rm16 & Reg16 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16; rm16 = (rm16 >> count) | (Reg16 << (16 - count)); shrdflags(tmp,rm16,count); shiftresultflags(rm16,count); } :SHRD rm16,Reg16,CL is vexMode=0 & opsize=0; byte=0x0F; byte=0xAD; CL & rm16 & Reg16 ... { local count = CL & 0x1f; local tmp = rm16; rm16 = (rm16 >> count) | (Reg16 << (16 - count)); shrdflags(tmp,rm16,count); shiftresultflags(rm16,count); } :SHRD rm32,Reg32,imm8 is vexMode=0 & opsize=1; byte=0x0F; byte=0xAC; rm32 & check_rm32_dest ... & Reg32 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32; rm32 = (rm32 >> count) | (Reg32 << (32 - count)); build check_rm32_dest; shrdflags(tmp,rm32,count); shiftresultflags(rm32,count); } :SHRD rm32,Reg32,CL is vexMode=0 & opsize=1; byte=0x0F; byte=0xAD; CL & rm32 & check_rm32_dest ... & Reg32 ... { local count = CL & 0x1f; local tmp = rm32; rm32 = (rm32 >> count) | (Reg32 << (32 - count)); build check_rm32_dest; shrdflags(tmp,rm32,count); shiftresultflags(rm32,count); } @ifdef IA64 :SHRD rm64,Reg64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0x0F; byte=0xAC; rm64 & Reg64 ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64; rm64 = (rm64 >> count) | (Reg64 << (64 - count)); shrdflags(tmp,rm64,count); shiftresultflags(rm64,count); } :SHRD rm64,Reg64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0x0F; byte=0xAD; CL & rm64 & Reg64 ... { local count = CL & 0x3f; local tmp = rm64; rm64 = (rm64 >> count) | (Reg64 << (64 - count)); shrdflags(tmp,rm64,count); shiftresultflags(rm64,count); } @endif :SHR rm8,n1 is vexMode=0 & byte=0xD0; rm8 & n1 & reg_opcode=5 ... { CF = rm8 & 1; OF = 0; rm8 = rm8 >> 1; resultflags(rm8); } :SHR rm8,CL is vexMode=0 & byte=0xD2; CL & rm8 & reg_opcode=5 ... { local count = CL & 0x1f; local tmp = rm8; rm8 = rm8 >> count; shrflags(tmp, rm8,count); shiftresultflags(rm8,count); } :SHR rm8,imm8 is vexMode=0 & byte=0xC0; rm8 & reg_opcode=5 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm8; rm8 = rm8 >> count; shrflags(tmp, rm8,count); shiftresultflags(rm8,count); } :SHR rm16,n1 is vexMode=0 & opsize=0 & byte=0xD1; rm16 & n1 & reg_opcode=5 ... { CF = (rm16 & 1) != 0; OF = 0; rm16 = rm16 >> 1; resultflags(rm16); } :SHR rm16,CL is vexMode=0 & opsize=0 & byte=0xD3; CL & rm16 & reg_opcode=5 ... { local count = CL & 0x1f; local tmp = rm16; rm16 = rm16 >> count; shrflags(tmp, rm16,count); shiftresultflags(rm16,count); } :SHR rm16,imm8 is vexMode=0 & opsize=0 & byte=0xC1; rm16 & reg_opcode=5 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm16; rm16 = rm16 >> count; shrflags(tmp, rm16,count); shiftresultflags(rm16,count); } :SHR rm32,n1 is vexMode=0 & opsize=1 & byte=0xD1; rm32 & n1 & check_rm32_dest ... & reg_opcode=5 ... { CF = (rm32 & 1) != 0; OF = 0; rm32 = rm32 >> 1; build check_rm32_dest; resultflags(rm32); } :SHR rm32,CL is vexMode=0 & opsize=1 & byte=0xD3; CL & rm32 & check_rm32_dest ... & reg_opcode=5 ... { local count = CL & 0x1f; local tmp = rm32; rm32 = rm32 >> count; build check_rm32_dest; shrflags(tmp, rm32,count); shiftresultflags(rm32,count); } :SHR rm32,imm8 is vexMode=0 & opsize=1 & byte=0xC1; rm32 & check_rm32_dest ... & reg_opcode=5 ... ; imm8 { local count = imm8 & 0x1f; local tmp = rm32; rm32 = rm32 >> count; build check_rm32_dest; shrflags(tmp, rm32,count); shiftresultflags(rm32,count); } @ifdef IA64 :SHR rm64,n1 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD1; rm64 & n1 ®_opcode=5 ... { CF = (rm64 & 1) != 0; OF = 0; rm64 = rm64 >> 1; resultflags(rm64); } :SHR rm64,CL is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xD3; CL & rm64 & reg_opcode=5 ... { local count = CL & 0x3f; local tmp = rm64; rm64 = rm64 >> count; shrflags(tmp, rm64,count); shiftresultflags(rm64,count); } :SHR rm64,imm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xC1; rm64 & reg_opcode=5 ... ; imm8 { local count = imm8 & 0x3f; local tmp = rm64; rm64 = rm64 >> count; shrflags(tmp, rm64,count); shiftresultflags(rm64,count); } @endif :SIDT m16 is $(LONGMODE_OFF) & vexMode=0 & opsize=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=1 ) ... & m16 { m16 = InterruptDescriptorTableRegister(); } :SIDT m32 is $(LONGMODE_OFF) & vexMode=0 & opsize=1 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=1 ) ... & m32 { m32 = InterruptDescriptorTableRegister(); } @ifdef IA64 :SIDT m64 is $(LONGMODE_ON) & vexMode=0 & byte=0xf; byte=0x1; ( mod != 0b11 & reg_opcode=1 ) ... & m64 { m64 = InterruptDescriptorTableRegister(); } @endif define pcodeop skinit; :SKINIT EAX is vexMode=0 & byte=0x0f; byte=0x01; byte=0xde & EAX { skinit(EAX); } :SLDT rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=0 ... { rm16 = LocalDescriptorTableRegister(); } :SLDT rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x0; rm32 & reg_opcode=0 ... { rm32 = LocalDescriptorTableRegister(); } @ifdef IA64 :SLDT rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0x0; rm64 & reg_opcode=0 ... { rm64 = LocalDescriptorTableRegister(); } @endif :SMSW rm16 is vexMode=0 & opsize=0 & byte=0xf; byte=0x01; rm16 & reg_opcode=4 ... { rm16 = CR0:2; } :SMSW rm32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x01; rm32 & reg_opcode=4 ... { rm32 = zext(CR0:2); } @ifdef IA64 :SMSW rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0x01; rm64 & reg_opcode=4 ... { rm64 = CR0; } @endif :STAC is vexMode=0 & byte=0x0f; byte=0x01; byte=0xcb { AC = 1; } :STC is vexMode=0 & byte=0xf9 { CF = 1; } :STD is vexMode=0 & byte=0xfd { DF = 1; } # MFL: AMD instruction # TODO: define the action. # STGI: set global interrupt flag (GIF); while GIF is zero, all external interrupts are disabled. :STGI is vexMode=0 & byte=0x0f; byte=0x01; byte=0xdc { stgi(); } :STI is vexMode=0 & byte=0xfb { IF = 1; } :STMXCSR m32 is vexMode=0 & byte=0xf; byte=0xae; ( mod != 0b11 & reg_opcode=3 ) ... & m32 { m32 = MXCSR; } :STOSB^rep^reptail eseDI1 is vexMode=0 & rep & reptail & byte=0xaa & eseDI1 { build rep; build eseDI1; eseDI1=AL; build reptail; } :STOSW^rep^reptail eseDI2 is vexMode=0 & rep & reptail & opsize=0 & byte=0xab & eseDI2 { build rep; build eseDI2; eseDI2=AX; build reptail; } :STOSD^rep^reptail eseDI4 is vexMode=0 & rep & reptail & opsize=1 & byte=0xab & eseDI4 { build rep; build eseDI4; eseDI4=EAX; build reptail; } @ifdef IA64 :STOSQ^rep^reptail eseDI8 is $(LONGMODE_ON) & vexMode=0 & rep & reptail & opsize=2 & byte=0xab & eseDI8 { build rep; build eseDI8; eseDI8=RAX; build reptail; } @endif :STR rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=1 ... { rm16 = TaskRegister(); } :STR Rmr32 is vexMode=0 & opsize=1 & byte=0xf; byte=0x0; Rmr32 & mod=3 & reg_opcode=1 & check_Rmr32_dest { local tmp:2 = TaskRegister(); Rmr32 = zext(tmp); build check_Rmr32_dest; } @ifdef IA64 :STR Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0xf; byte=0x0; Rmr64 & mod=3 & reg_opcode=1 { local tmp:2 = TaskRegister(); Rmr64 = zext(tmp); } @endif # See 'lockable.sinc' for memory destination, lockable variants :SUB AL,imm8 is vexMode=0 & byte=0x2c; AL & imm8 { subflags( AL,imm8 ); AL = AL - imm8; resultflags( AL); } :SUB AX,imm16 is vexMode=0 & opsize=0 & byte=0x2d; AX & imm16 { subflags( AX,imm16); AX = AX - imm16; resultflags( AX); } :SUB EAX,imm32 is vexMode=0 & opsize=1 & byte=0x2d; EAX & check_EAX_dest & imm32 { subflags( EAX,imm32); EAX = EAX - imm32; build check_EAX_dest; resultflags( EAX); } @ifdef IA64 :SUB RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x2d; RAX & simm32 { subflags( RAX,simm32); RAX = RAX - simm32; resultflags( RAX); } @endif :SUB Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=5; imm8 { subflags( Rmr8,imm8 ); Rmr8 = Rmr8 - imm8; resultflags( Rmr8); } :SUB Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=5; imm16 { subflags( Rmr16,imm16); Rmr16 = Rmr16 - imm16; resultflags( Rmr16); } :SUB Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=5; imm32 { subflags( Rmr32,imm32); Rmr32 = Rmr32 - imm32; build check_rm32_dest; resultflags( Rmr32); } @ifdef IA64 :SUB Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=5; simm32 { subflags( Rmr64,simm32); Rmr64 = Rmr64 - simm32; resultflags( Rmr64); } @endif :SUB Rmr16,simm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=5; simm8_16 { subflags( Rmr16,simm8_16); Rmr16 = Rmr16 - simm8_16; resultflags( Rmr16); } :SUB Rmr32,simm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=5; simm8_32 { subflags( Rmr32,simm8_32); Rmr32 = Rmr32 - simm8_32; build check_rm32_dest; resultflags( Rmr32); } @ifdef IA64 :SUB Rmr64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=5; simm8_64 { subflags( Rmr64,simm8_64); Rmr64 = Rmr64 - simm8_64; resultflags( Rmr64); } @endif :SUB Rmr8,Reg8 is vexMode=0 & byte=0x28; mod=3 & Rmr8 & Reg8 { subflags( Rmr8,Reg8 ); Rmr8 = Rmr8 - Reg8; resultflags( Rmr8); } :SUB Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x29; mod=3 & Rmr16 & Reg16 { subflags( Rmr16,Reg16); Rmr16 = Rmr16 - Reg16; resultflags( Rmr16); } :SUB Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x29; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { subflags( Rmr32,Reg32); Rmr32 = Rmr32 - Reg32; build check_Rmr32_dest; resultflags( Rmr32); } @ifdef IA64 :SUB Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x29; mod=3 & Rmr64 & Reg64 { subflags( Rmr64,Reg64); Rmr64 = Rmr64 - Reg64; resultflags( Rmr64); } @endif :SUB Reg8,rm8 is vexMode=0 & byte=0x2a; rm8 & Reg8 ... { subflags( Reg8,rm8 ); Reg8 = Reg8 - rm8; resultflags( Reg8); } :SUB Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x2b; rm16 & Reg16 ... { subflags(Reg16,rm16 ); Reg16 = Reg16 - rm16; resultflags(Reg16); } :SUB Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x2b; rm32 & Reg32 ... & check_Reg32_dest ... { subflags(Reg32,rm32 ); Reg32 = Reg32 - rm32; build check_Reg32_dest; resultflags(Reg32); } @ifdef IA64 :SUB Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x2b; rm64 & Reg64 ... { subflags(Reg64,rm64 ); Reg64 = Reg64 - rm64; resultflags(Reg64); } @endif :SYSENTER is vexMode=0 & byte=0x0f; byte=0x34 { sysenter(); } :SYSEXIT is vexMode=0 & byte=0x0f; byte=0x35 { sysexit(); @ifdef IA64 RIP=RCX; return [RIP]; @endif } :SYSCALL is vexMode=0 & byte=0x0f; byte=0x05 { syscall(); } # returning to 32bit mode loads ECX # returning to 64bit mode loads RCX :SYSRET is vexMode=0 & byte=0x0f; byte=0x07 { sysret(); @ifdef IA64 RIP=RCX; return [RIP]; @endif } :SWAPGS is vexMode=0 & bit64=1 & byte=0x0f; byte=0x01; byte=0xf8 { swapgs(); } :RDTSCP is vexMode=0 & bit64=1 & byte=0x0f; byte=0x01; byte=0xf9 { rdtscp(); } :TEST AL,imm8 is vexMode=0 & byte=0xA8; AL & imm8 { logicalflags(); local tmp = AL & imm8; resultflags(tmp); } :TEST AX,imm16 is vexMode=0 & opsize=0; byte=0xA9; AX & imm16 { logicalflags(); local tmp = AX & imm16; resultflags(tmp); } :TEST EAX,imm32 is vexMode=0 & opsize=1; byte=0xA9; EAX & imm32 { logicalflags(); local tmp = EAX & imm32; resultflags(tmp); } @ifdef IA64 :TEST RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0xA9; RAX & simm32 { logicalflags(); local tmp = RAX & simm32; resultflags(tmp); } @endif :TEST rm8,imm8 is vexMode=0 & byte=0xF6; rm8 & (reg_opcode=0 | reg_opcode=1) ... ; imm8 { logicalflags(); local tmp = rm8 & imm8; resultflags(tmp); } :TEST rm16,imm16 is vexMode=0 & opsize=0; byte=0xF7; rm16 & (reg_opcode=0 | reg_opcode=1) ... ; imm16 { logicalflags(); local tmp = rm16 & imm16; resultflags(tmp); } :TEST rm32,imm32 is vexMode=0 & opsize=1; byte=0xF7; rm32 & (reg_opcode=0 | reg_opcode=1) ... ; imm32 { logicalflags(); local tmp = rm32 & imm32; resultflags(tmp); } @ifdef IA64 :TEST rm64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0xF7; rm64 & (reg_opcode=0 | reg_opcode=1) ... ; simm32 { logicalflags(); local tmp = rm64 & simm32; resultflags(tmp); } @endif :TEST rm8,Reg8 is vexMode=0 & byte=0x84; rm8 & Reg8 ... { logicalflags(); local tmp = rm8 & Reg8; resultflags(tmp); } :TEST rm16,Reg16 is vexMode=0 & opsize=0; byte=0x85; rm16 & Reg16 ... { logicalflags(); local tmp = rm16 & Reg16; resultflags(tmp); } :TEST rm32,Reg32 is vexMode=0 & opsize=1; byte=0x85; rm32 & Reg32 ... { logicalflags(); local tmp = rm32 & Reg32; resultflags(tmp); } @ifdef IA64 :TEST rm64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2; byte=0x85; rm64 & Reg64 ... { logicalflags(); local tmp = rm64 & Reg64; resultflags(tmp); } @endif define pcodeop invalidInstructionException; :UD0 Reg32, rm32 is vexMode=0 & byte=0x0f; byte=0xff; rm32 & Reg32 ... { local target:$(SIZE) = invalidInstructionException(); goto [target]; } :UD1 Reg32, rm32 is vexMode=0 & byte=0x0f; byte=0xb9; rm32 & Reg32 ... { local target:$(SIZE) = invalidInstructionException(); goto [target]; } :UD2 is vexMode=0 & byte=0xf; byte=0xb { local target:$(SIZE) = invalidInstructionException(); goto [target]; } define pcodeop verr; define pcodeop verw; :VERR rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=4 ... { ZF = verr(); } :VERW rm16 is vexMode=0 & byte=0xf; byte=0x0; rm16 & reg_opcode=5 ... { ZF = verw(); } # MFL added VMX opcodes # # AMD hardware assisted virtualization opcodes :VMLOAD EAX is vexMode=0 & addrsize=1 & byte=0x0f; byte=0x01; byte=0xda & EAX { vmload(EAX); } @ifdef IA64 :VMLOAD RAX is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0x0f; byte=0x01; byte=0xda & RAX { vmload(RAX); } @endif :VMMCALL is vexMode=0 & byte=0x0f; byte=0x01; byte=0xd9 { vmmcall(); } # Limiting the effective address size to 32 and 64 bit. Surely we're not expecting a 16-bit VM address, are we? :VMRUN EAX is vexMode=0 & addrsize=1 & byte=0x0f; byte=0x01; byte=0xd8 & EAX { vmrun(EAX); } @ifdef IA64 :VMRUN RAX is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0x0f; byte=0x01; byte=0xd8 & RAX { vmrun(RAX); } @endif # Limiting the effective address size to 32 and 64 bit. Surely we're not expecting a 16-bit VM address, are we? :VMSAVE EAX is vexMode=0 & addrsize=1 & byte=0x0f; byte=0x01; byte=0xdb & EAX { vmsave(EAX); } @ifdef IA64 :VMSAVE RAX is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & byte=0x0f; byte=0x01; byte=0xdb & RAX { vmsave(RAX); } @endif # # # Intel hardware assisted virtualization opcodes @ifdef IA64 :INVEPT Reg64, m128 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0x80; Reg64 ... & m128 { invept(Reg64, m128); } @endif :INVEPT Reg32, m128 is vexMode=0 & bit64=0 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0x80; Reg32 ... & m128 { invept(Reg32, m128); } @ifdef IA64 :INVVPID Reg64, m128 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0x81; Reg64 ... & m128 { invvpid(Reg64, m128); } @endif :INVVPID Reg32, m128 is vexMode=0 & bit64=0 & $(PRE_66) & byte=0x0f; byte=0x38; byte=0x81; Reg32 ... & m128 { invvpid(Reg32, m128); } :VMCALL is vexMode=0 & byte=0x0f; byte=0x01; byte=0xc1 { vmcall(); } @ifdef IA64 :VMCLEAR m64 is $(LONGMODE_ON) & vexMode=0 & $(PRE_66) & byte=0x0f; byte=0xc7; ( mod != 0b11 & reg_opcode=6 ) ... & m64 { vmclear(m64); } @endif #TODO: invokes a VM function specified in EAX :VMFUNC EAX is vexMode=0 & byte=0x0f; byte=0x01; byte=0xd4 & EAX { vmfunc(EAX); } #TODO: this launches the VM managed by the current VMCS. How is the VMCS expressed for the emulator? For Ghidra analysis? :VMLAUNCH is vexMode=0 & byte=0x0f; byte=0x01; byte=0xc2 { vmlaunch(); } #TODO: this resumes the VM managed by the current VMCS. How is the VMCS expressed for the emulator? For Ghidra analysis? :VMRESUME is vexMode=0 & byte=0x0f; byte=0x01; byte=0xc3 { vmresume(); } #TODO: this loads the VMCS pointer from the m64 memory address and makes the VMCS pointer valid; how to express # this for analysis and emulation? :VMPTRLD m64 is vexMode=0 & byte=0x0f; byte=0xc7; ( mod != 0b11 & reg_opcode=6 ) ... & m64 { vmptrld(m64); } #TODO: stores the current VMCS pointer into the specified 64-bit memory address; how to express this for analysis and emulation? #TODO: note that the Intel manual does not specify m64 (which it does for VMPTRLD, yet it does state that "the operand # of this instruction is always 64-bits and is always in memory". Is it an error that the "Instruction" entry in the # box giving the definition does not specify m64? :VMPTRST m64 is vexMode=0 & byte=0x0f; byte=0xc7; ( mod != 0b11 & reg_opcode=7 ) ... & m64 { vmptrst(m64); } :VMREAD rm32, Reg32 is $(PRE_NO) & vexMode=0 & opsize=1 & byte=0x0f; byte=0x78; rm32 & check_rm32_dest ... & Reg32 ... { rm32 = vmread(Reg32); build check_rm32_dest; } @ifdef IA64 :VMREAD rm64, Reg64 is $(LONGMODE_ON) & $(PRE_NO) & vexMode=0 & opsize=2 & byte=0x0f; byte=0x78; rm64 & Reg64 ... { rm64 = vmread(Reg64); } @endif :VMWRITE Reg32, rm32 is $(PRE_NO) & vexMode=0 & opsize=1 & byte=0x0f; byte=0x79; rm32 & Reg32 ... & check_Reg32_dest ... { vmwrite(rm32,Reg32); build check_Reg32_dest; } @ifdef IA64 :VMWRITE Reg64, rm64 is $(LONGMODE_ON) & $(PRE_NO) & vexMode=0 & opsize=2 & byte=0x0f; byte=0x79; rm64 & Reg64 ... { vmwrite(rm64,Reg64); } @endif :VMXOFF is vexMode=0 & byte=0x0f; byte=0x01; byte=0xc4 { vmxoff(); } # NB: this opcode is incorrect in the 2005 edition of the Intel manual. Opcode below is taken from the 2008 version. :VMXON m64 is vexMode=0 & $(PRE_F3) & byte=0x0f; byte=0xc7; ( mod != 0b11 & reg_opcode=6 ) ... & m64 { vmxon(m64); } #END of changes for VMX opcodes :WAIT is vexMode=0 & byte=0x9b { } :WBINVD is vexMode=0 & byte=0xf; byte=0x9 { } @ifdef IA64 define pcodeop writefsbase; :WRFSBASE r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=2 & r32 { tmp:8 = zext(r32); writefsbase(tmp); } :WRFSBASE r64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=2 & r64 { writefsbase(r64); } define pcodeop writegsbase; :WRGSBASE r32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=3 & r32 { tmp:8 = zext(r32); writegsbase(tmp); } :WRGSBASE r64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0f; byte=0xae; reg_opcode=3 & r64 { writegsbase(r64); } @endif define pcodeop wrpkru; :WRPKRU is byte=0x0F; byte=0x01; byte=0xEF { wrpkru(EAX); } define pcodeop wrmsr; :WRMSR is vexMode=0 & byte=0xf; byte=0x30 { tmp:8 = (zext(EDX) << 32) | zext(EAX); wrmsr(ECX,tmp); } # See 'lockable.sinc' for memory destination, lockable variants :XADD Rmr8,Reg8 is vexMode=0 & byte=0x0F; byte=0xC0; mod=3 & Rmr8 & Reg8 { addflags( Rmr8,Reg8 ); local tmp = Rmr8 + Reg8; Reg8 = Rmr8; Rmr8 = tmp; resultflags(tmp); } :XADD Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x0F; byte=0xC1; mod=3 & Rmr16 & Reg16 { addflags(Rmr16,Reg16); local tmp = Rmr16 + Reg16; Reg16 = Rmr16; Rmr16 = tmp; resultflags(tmp); } :XADD Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x0F; byte=0xC1; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 & check_Reg32_dest { addflags(Rmr32,Reg32); local tmp = Rmr32 + Reg32; Reg32 = Rmr32; Rmr32 = tmp; build check_Rmr32_dest; build check_Reg32_dest; resultflags(tmp); } @ifdef IA64 :XADD Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x0F; byte=0xC1; mod=3 & Rmr64 & Reg64 { addflags(Rmr64,Reg64); local tmp = Rmr64 + Reg64; Reg64 = Rmr64; Rmr64 = tmp; resultflags(tmp); } @endif define pcodeop xabort; :XABORT imm8 is vexMode=0 & byte=0xc6; byte=0xf8; imm8 { tmp:1 = imm8; xabort(tmp); } define pcodeop xbegin; define pcodeop xend; :XBEGIN rel16 is vexMode=0 & opsize=0 & byte=0xc7; byte=0xf8; rel16 { xbegin(&:$(SIZE) rel16); } :XBEGIN rel32 is vexMode=0 & (opsize=1 | opsize=2) & byte=0xc7; byte=0xf8; rel32 { xbegin(&:$(SIZE) rel32); } :XEND is vexMode=0 & byte=0x0f; byte=0x01; byte=0xd5 { xend(); } # See 'lockable.sinc' for memory destination, lockable variants :XCHG AX,Rmr16 is vexMode=0 & opsize=0 & row = 9 & page = 0 & AX & Rmr16 { local tmp = AX; AX = Rmr16; Rmr16 = tmp; } :XCHG EAX,Rmr32 is vexMode=0 & opsize=1 & row = 9 & page = 0 & EAX & check_EAX_dest & Rmr32 & check_Rmr32_dest { local tmp = EAX; EAX = Rmr32; build check_EAX_dest; Rmr32 = tmp; build check_Rmr32_dest; } @ifdef IA64 :XCHG RAX,Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & row = 9 & page = 0 & RAX & Rmr64 { local tmp = RAX; RAX = Rmr64; Rmr64 = tmp; } @endif :XCHG Rmr8,Reg8 is vexMode=0 & byte=0x86; mod=3 & Rmr8 & Reg8 { local tmp = Rmr8; Rmr8 = Reg8; Reg8 = tmp; } :XCHG Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x87; mod=3 & Rmr16 & Reg16 { local tmp = Rmr16; Rmr16 = Reg16; Reg16 = tmp; } :XCHG Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x87; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 & check_Reg32_dest { local tmp = Rmr32; Rmr32 = Reg32; build check_Rmr32_dest; Reg32 = tmp; build check_Reg32_dest;} @ifdef IA64 :XCHG Rmr64,Reg64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x87; mod=3 & Rmr64 & Reg64 { local tmp = Rmr64; Rmr64 = Reg64; Reg64 = tmp; } @endif :XLAT seg16^BX is vexMode=0 & addrsize=0 & seg16 & byte=0xd7; BX { tmp:$(SIZE)= 0; ptr2(tmp,BX+zext(AL)); AL = *tmp; } :XLAT segWide^EBX is vexMode=0 & addrsize=1 & segWide & byte=0xd7; EBX { tmp:$(SIZE)= 0; ptr4(tmp,EBX+zext(AL)); AL = *tmp; } @ifdef IA64 :XLAT segWide^RBX is $(LONGMODE_ON) & vexMode=0 & addrsize=2 & segWide & byte=0xd7; RBX { tmp:$(SIZE)= 0; ptr8(tmp,RBX+zext(AL)); AL = *tmp; } @endif # See 'lockable.sinc' for memory destination, lockable variants :XOR AL,imm8 is vexMode=0 & byte=0x34; AL & imm8 { logicalflags(); AL = AL ^ imm8; resultflags( AL); } :XOR AX,imm16 is vexMode=0 & opsize=0 & byte=0x35; AX & imm16 { logicalflags(); AX = AX ^ imm16; resultflags( AX); } :XOR EAX,imm32 is vexMode=0 & opsize=1 & byte=0x35; EAX & imm32 & check_EAX_dest { logicalflags(); EAX = EAX ^ imm32; build check_EAX_dest; resultflags( EAX);} @ifdef IA64 :XOR RAX,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x35; RAX & simm32 { logicalflags(); RAX = RAX ^ simm32; resultflags( RAX); } @endif :XOR Rmr8,imm8 is vexMode=0 & $(BYTE_80_82); mod=3 & Rmr8 & reg_opcode=6; imm8 { logicalflags(); Rmr8 = Rmr8 ^ imm8; resultflags( Rmr8); } :XOR Rmr16,imm16 is vexMode=0 & opsize=0 & byte=0x81; mod=3 & Rmr16 & reg_opcode=6; imm16 { logicalflags(); Rmr16 = Rmr16 ^ imm16; resultflags( Rmr16); } :XOR Rmr32,imm32 is vexMode=0 & opsize=1 & byte=0x81; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=6; imm32 { logicalflags(); Rmr32 = Rmr32 ^ imm32; build check_rm32_dest; resultflags( Rmr32); } @ifdef IA64 :XOR Rmr64,simm32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x81; mod=3 & Rmr64 & reg_opcode=6; simm32 { logicalflags(); Rmr64 = Rmr64 ^ simm32; resultflags( Rmr64); } @endif :XOR Rmr16,usimm8_16 is vexMode=0 & opsize=0 & byte=0x83; mod=3 & Rmr16 & reg_opcode=6; usimm8_16 { logicalflags(); Rmr16 = Rmr16 ^ usimm8_16; resultflags( Rmr16); } :XOR Rmr32,usimm8_32 is vexMode=0 & opsize=1 & byte=0x83; mod=3 & Rmr32 & check_rm32_dest & reg_opcode=6; usimm8_32 { logicalflags(); Rmr32 = Rmr32 ^ usimm8_32; build check_rm32_dest; resultflags( Rmr32); } @ifdef IA64 :XOR Rmr64,usimm8_64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x83; mod=3 & Rmr64 & reg_opcode=6; usimm8_64 { logicalflags(); Rmr64 = Rmr64 ^ usimm8_64; resultflags( Rmr64); } @endif :XOR Rmr8,Reg8 is vexMode=0 & byte=0x30; mod=3 & Rmr8 & Reg8 { logicalflags(); Rmr8 = Rmr8 ^ Reg8; resultflags( Rmr8); } :XOR Rmr16,Reg16 is vexMode=0 & opsize=0 & byte=0x31; mod=3 & Rmr16 & Reg16 { logicalflags(); Rmr16 = Rmr16 ^ Reg16; resultflags( Rmr16); } :XOR Rmr32,Reg32 is vexMode=0 & opsize=1 & byte=0x31; mod=3 & Rmr32 & check_Rmr32_dest & Reg32 { logicalflags(); Rmr32 = Rmr32 ^ Reg32; build check_Rmr32_dest; resultflags( Rmr32); } @ifdef IA64 :XOR Rmr64,Reg64 is vexMode=0 & $(LONGMODE_ON) & opsize=2 & byte=0x31; mod=3 & Rmr64 & Reg64 { logicalflags(); Rmr64 = Rmr64 ^ Reg64; resultflags( Rmr64); } @endif :XOR Reg8,rm8 is vexMode=0 & byte=0x32; rm8 & Reg8 ... { logicalflags(); Reg8 = Reg8 ^ rm8; resultflags( Reg8); } :XOR Reg16,rm16 is vexMode=0 & opsize=0 & byte=0x33; rm16 & Reg16 ... { logicalflags(); Reg16 = Reg16 ^ rm16; resultflags(Reg16); } :XOR Reg32,rm32 is vexMode=0 & opsize=1 & byte=0x33; rm32 & Reg32 ... & check_Reg32_dest ... { logicalflags(); Reg32 = Reg32 ^ rm32; build check_Reg32_dest; resultflags(Reg32); } @ifdef IA64 :XOR Reg64,rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & byte=0x33; rm64 & Reg64 ... { logicalflags(); Reg64 = Reg64 ^ rm64; resultflags(Reg64); } @endif :XGETBV is vexMode=0 & byte=0x0F; byte=0x01; byte=0xD0 { local tmp = XCR0 >> 32; EDX = tmp:4; EAX = XCR0:4; } :XSETBV is vexMode=0 & byte=0x0F; byte=0x01; byte=0xD1 { XCR0 = (zext(EDX) << 32) | zext(EAX); } define pcodeop xsave; define pcodeop xsave64; define pcodeop xsavec; define pcodeop xsavec64; define pcodeop xsaveopt; define pcodeop xsaveopt64; define pcodeop xsaves; define pcodeop xsaves64; define pcodeop xrstor; define pcodeop xrstor64; define pcodeop xrstors; define pcodeop xrstors64; :XRSTOR Mem is vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=5 ) ... & Mem { tmp:4 = 512; xrstor(Mem, tmp); } @ifdef IA64 :XRSTOR64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=5 ) ... & Mem { tmp:4 = 512; xrstor64(Mem, tmp); } @endif :XRSTORS Mem is vexMode=0 & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=3 ) ... & Mem { tmp:4 = 512; xrstors(Mem, tmp); } @ifdef IA64 :XRSTORS64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=3 ) ... & Mem { tmp:4 = 512; xrstors64(Mem, tmp); } @endif :XSAVE Mem is vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=4 ) ... & Mem { tmp:4 = 512; xsave(Mem, tmp); } @ifdef IA64 :XSAVE64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=4 ) ... & Mem { tmp:4 = 512; xsave64(Mem, tmp); } @endif :XSAVEC Mem is vexMode=0 & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=4 ) ... & Mem { tmp:4 = 512; xsavec(Mem, tmp); } @ifdef IA64 :XSAVEC64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=4 ) ... & Mem { tmp:4 = 512; xsavec64(Mem, tmp); } @endif :XSAVEOPT Mem is vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=6 ) ... & Mem { tmp:4 = 512; xsaveopt(Mem, tmp); } @ifdef IA64 :XSAVEOPT64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=6 ) ... & Mem { tmp:4 = 512; xsaveopt64(Mem, tmp); } @endif :XSAVES Mem is vexMode=0 & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=5 ) ... & Mem { tmp:4 = 512; xsaves(Mem, tmp); } @ifdef IA64 :XSAVES64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xC7; ( mod != 0b11 & reg_opcode=5 ) ... & Mem { tmp:4 = 512; xsaves64(Mem, tmp); } @endif define pcodeop xtest; :XTEST is byte=0x0F; byte=0x01; byte=0xD6 { ZF = xtest(); } :LFENCE is vexMode=0 & $(PRE_NO) & byte=0x0F; byte=0xAE; mod = 0b11 & reg_opcode=5 & r_m=0 { } :MFENCE is vexMode=0 & $(PRE_NO) & byte=0x0F; byte=0xAE; mod = 0b11 & reg_opcode=6 & r_m=0 { } :SFENCE is vexMode=0 & $(PRE_NO) & byte=0x0F; byte=0xAE; mod = 0b11 & reg_opcode=7 & r_m=0 { } # # floating point instructions # define pcodeop f2xm1; :F2XM1 is vexMode=0 & byte=0xD9; byte=0xF0 { FPUInstructionPointer = inst_start; ST0 = f2xm1(ST0); } # compute 2^x-1 :FABS is vexMode=0 & byte=0xD9; byte=0xE1 { FPUInstructionPointer = inst_start; ST0 = abs(ST0); } :FADD m32fp is vexMode=0 & byte=0xD8; reg_opcode=0 ... & m32fp { FPUInstructionPointer = inst_start; ST0 = ST0 f+ float2float(m32fp); } :FADD m64fp is vexMode=0 & byte=0xDC; reg_opcode=0 ... & m64fp { FPUInstructionPointer = inst_start; ST0 = ST0 f+ float2float(m64fp); } :FADD ST0, freg is vexMode=0 & byte=0xD8; frow=12 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; ST0 = ST0 f+ freg; } :FADD freg, ST0 is vexMode=0 & byte=0xDC; frow=12 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; freg = freg f+ ST0; } :FADDP is vexMode=0 & byte=0xDE; byte=0xC1 { FPUInstructionPointer = inst_start; ST1 = ST0 f+ ST1; fpop(); } :FADDP freg, ST0 is vexMode=0 & byte=0xDE; frow=12 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; freg = ST0 f+ freg; fpop(); } :FIADD m32 is vexMode=0 & byte=0xDA; reg_opcode=0 ... & m32 { FPUInstructionPointer = inst_start; ST0 = ST0 f+ int2float(m32); } :FIADD m16 is vexMode=0 & byte=0xDE; reg_opcode=0 ... & m16 { FPUInstructionPointer = inst_start; ST0 = ST0 f+ int2float(m16); } define pcodeop from_bcd; :FBLD m80 is vexMode=0 & byte=0xDF; reg_opcode=4 ... & m80 { FPUInstructionPointer = inst_start; fdec(); ST0 = from_bcd(m80); } define pcodeop to_bcd; :FBSTP m80 is vexMode=0 & byte=0xDF; reg_opcode=6 ... & m80 { FPUInstructionPointer = inst_start; m80 = to_bcd(ST0); fpop(); } :FCHS is vexMode=0 & byte=0xD9; byte=0xE0 { FPUInstructionPointer = inst_start; ST0 = f- ST0; } :FCLEX is vexMode=0 & byte=0x9B; byte=0xDB; byte=0xE2 { FPUStatusWord[0,8] = 0; FPUStatusWord[15,1] = 0; } :FNCLEX is vexMode=0 & byte=0xDB; byte=0xE2 { FPUStatusWord[0,8] = 0; FPUStatusWord[15,1] = 0; } :FCMOVB ST0, freg is vexMode=0 & byte=0xDA; frow=12 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; if ( !CF ) goto inst_next; ST0 = freg; } :FCMOVE ST0, freg is vexMode=0 & byte=0xDA; frow=12 & fpage=1 & freg & ST0 { FPUInstructionPointer = inst_start; if ( !ZF ) goto inst_next; ST0 = freg; } :FCMOVBE ST0, freg is vexMode=0 & byte=0xDA; frow=13 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; if ( !CF & !ZF ) goto inst_next; ST0 = freg; } :FCMOVU ST0, freg is vexMode=0 & byte=0xDA; frow=13 & fpage=1 & freg & ST0 { FPUInstructionPointer = inst_start; if ( !PF ) goto inst_next; ST0 = freg; } :FCMOVNB ST0, freg is vexMode=0 & byte=0xDB; frow=12 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; if ( CF ) goto inst_next; ST0 = freg; } :FCMOVNE ST0, freg is vexMode=0 & byte=0xDB; frow=12 & fpage=1 & freg & ST0 { FPUInstructionPointer = inst_start; if ( ZF ) goto inst_next; ST0 = freg; } :FCMOVNBE ST0, freg is vexMode=0 & byte=0xDB; frow=13 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; if ( CF & ZF ) goto inst_next; ST0 = freg; } :FCMOVNU ST0, freg is vexMode=0 & byte=0xDB; frow=13 & fpage=1 & freg & ST0 { FPUInstructionPointer = inst_start; if ( PF ) goto inst_next; ST0 = freg; } :FCOM m32fp is vexMode=0 & byte=0xD8; reg_opcode=2 ... & m32fp { FPUInstructionPointer = inst_start; local tmp=float2float(m32fp); fcom(tmp); } :FCOM m64fp is vexMode=0 & byte=0xDC; reg_opcode=2 ... & m64fp { FPUInstructionPointer = inst_start; local tmp=float2float(m64fp); fcom(tmp); } :FCOM freg is vexMode=0 & byte=0xD8; frow=13 & fpage=0 & freg { FPUInstructionPointer = inst_start; fcom(freg); } :FCOM is vexMode=0 & byte=0xD8; byte=0xD1 { FPUInstructionPointer = inst_start; fcom(ST1); } :FCOMP m32fp is vexMode=0 & byte=0xD8; reg_opcode=3 ... & m32fp { FPUInstructionPointer = inst_start; local tmp=float2float(m32fp); fcom(tmp); fpop(); } :FCOMP m64fp is vexMode=0 & byte=0xDC; reg_opcode=3 ... & m64fp { FPUInstructionPointer = inst_start; local tmp=float2float(m64fp); fcom(tmp); fpop(); } :FCOMP freg is vexMode=0 & byte=0xD8; frow=13 & fpage=1 & freg { FPUInstructionPointer = inst_start; fcom(freg); fpop(); } :FCOMP is vexMode=0 & byte=0xD8; byte=0xD9 { FPUInstructionPointer = inst_start; fcom(ST1); fpop(); } :FCOMPP is vexMode=0 & byte=0xDE; byte=0xD9 { FPUInstructionPointer = inst_start; fcom(ST1); fpop(); fpop(); } :FCOMI ST0, freg is vexMode=0 & byte=0xDB; frow=15 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; fcomi(freg); } :FCOMIP ST0, freg is vexMode=0 & byte=0xDF; frow=15 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; fcomi(freg); fpop(); } :FUCOMI ST0, freg is vexMode=0 & byte=0xDB; frow=14 & fpage=1 & freg & ST0 { FPUInstructionPointer = inst_start; fcomi(freg); } :FUCOMIP ST0, freg is vexMode=0 & byte=0xDF; frow=14 & fpage=1 & freg & ST0 { FPUInstructionPointer = inst_start; fcomi(freg); fpop(); } define pcodeop fcos; :FCOS is vexMode=0 & byte=0xD9; byte=0xFF { FPUInstructionPointer = inst_start; ST0 = fcos(ST0); } :FDECSTP is vexMode=0 & byte=0xD9; byte=0xF6 { FPUInstructionPointer = inst_start; fdec(); FPUStatusWord = FPUStatusWord & 0xfeff; C0 = 0; #Clear C0 } # Legacy 8087 instructions. Still valid but treated as NOP instructions. :FDISI is vexMode=0 & byte=0x9B; byte=0xDB; byte=0xE1 {} :FNDISI is vexMode=0 & byte=0xDB; byte=0xE1 {} :FENI is vexMode=0 & byte=0x9B; byte=0xDB; byte=0xE0 {} :FNENI is vexMode=0 & byte=0xDB; byte=0xE0 {} :FDIV m32fp is vexMode=0 & byte=0xD8; reg_opcode=6 ... & m32fp { FPUInstructionPointer = inst_start; ST0 = ST0 f/ float2float(m32fp); } :FDIV m64fp is vexMode=0 & byte=0xDC; reg_opcode=6 ... & m64fp { FPUInstructionPointer = inst_start; ST0 = ST0 f/ float2float(m64fp); } :FDIV ST0,freg is vexMode=0 & byte=0xD8; frow=15 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; ST0 = ST0 f/ freg; } :FDIV freg,ST0 is vexMode=0 & byte=0xDC; frow=15 & fpage=1 & freg & ST0 { FPUInstructionPointer = inst_start; freg = freg f/ ST0; } :FDIVP freg,ST0 is vexMode=0 & byte=0xDE; frow=15 & fpage=1 & freg & ST0 { FPUInstructionPointer = inst_start; freg = freg f/ ST0; fpop(); } :FDIVP is vexMode=0 & byte=0xDE; byte=0xF9 { FPUInstructionPointer = inst_start; ST1 = ST1 f/ ST0; fpop(); } :FIDIV m32 is vexMode=0 & byte=0xDA; reg_opcode=6 ... & m32 { FPUInstructionPointer = inst_start; ST0 = ST0 f/ int2float(m32); } :FIDIV m16 is vexMode=0 & byte=0xDE; reg_opcode=6 ... & m16 { FPUInstructionPointer = inst_start; ST0 = ST0 f/ int2float(m16); } :FDIVR m32fp is vexMode=0 & byte=0xD8; reg_opcode=7 ... & m32fp { FPUInstructionPointer = inst_start; ST0 = float2float(m32fp) f/ ST0; } :FDIVR m64fp is vexMode=0 & byte=0xDC; reg_opcode=7 ... & m64fp { FPUInstructionPointer = inst_start; ST0 = float2float(m64fp) f/ ST0; } :FDIVR ST0,freg is vexMode=0 & byte=0xD8; frow=15 & fpage=1 & freg & ST0 { FPUInstructionPointer = inst_start; ST0 = freg f/ ST0; } :FDIVR freg,ST0 is vexMode=0 & byte=0xDC; frow=15 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; freg = ST0 f/ freg; } :FDIVRP freg,ST0 is vexMode=0 & byte=0xDE; frow=15 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; freg = ST0 f/ freg; fpop(); } :FDIVRP is vexMode=0 & byte=0xDE; byte=0xF1 { FPUInstructionPointer = inst_start; ST1 = ST0 f/ ST1; fpop(); } :FIDIVR m32 is vexMode=0 & byte=0xDA; reg_opcode=7 ... & m32 { FPUInstructionPointer = inst_start; ST0 = int2float(m32) f/ ST0; } :FIDIVR m16 is vexMode=0 & byte=0xDE; reg_opcode=7 ... & m16 { FPUInstructionPointer = inst_start; ST0 = int2float(m16) f/ ST0; } define pcodeop ffree; :FFREE freg is vexMode=0 & byte=0xDD; frow=12 & fpage=0 & freg { FPUInstructionPointer = inst_start; FPUTagWord = ffree(freg); # Set freg to invalid value } :FFREEP freg is vexMode=0 & byte=0xDF; frow=12 & fpage=0 & freg { FPUInstructionPointer = inst_start; FPUTagWord = ffree(freg); fpop(); # FFREE and pop } :FICOM m16 is vexMode=0 & byte=0xDE; reg_opcode=2 ... & m16 { FPUInstructionPointer = inst_start; local tmp = int2float(m16); fcom(tmp); } :FICOM m32 is vexMode=0 & byte=0xDA; reg_opcode=2 ... & m32 { FPUInstructionPointer = inst_start; local tmp = int2float(m32); fcom(tmp); } :FICOMP m16 is vexMode=0 & byte=0xDE; (mod != 0b11 & reg_opcode=3) ... & m16 { FPUInstructionPointer = inst_start; local tmp = int2float(m16); fcom(tmp); fpop(); } :FICOMP m32 is vexMode=0 & byte=0xDA; reg_opcode=3 ... & m32 { FPUInstructionPointer = inst_start; local tmp = int2float(m32); fcom(tmp); fpop(); } :FILD m16 is vexMode=0 & byte=0xDF; reg_opcode=0 ... & m16 { FPUInstructionPointer = inst_start; fdec(); ST0 = int2float(m16); } :FILD m32 is vexMode=0 & byte=0xDB; reg_opcode=0 ... & m32 { FPUInstructionPointer = inst_start; fdec(); ST0 = int2float(m32); } :FILD m64 is vexMode=0 & byte=0xDF; reg_opcode=5 ... & m64 { FPUInstructionPointer = inst_start; fdec(); ST0 = int2float(m64); } :FINCSTP is vexMode=0 & byte=0xD9; byte=0xF7 { FPUInstructionPointer = inst_start; finc(); } :FINIT is vexMode=0 & byte=0x9B; byte=0xDB; byte=0xE3 { FPUControlWord = 0x037f; FPUStatusWord = 0x0000; FPUTagWord = 0xffff; FPUDataPointer = 0x00000000; FPUInstructionPointer = 0x00000000; FPULastInstructionOpcode = 0x0000; C0 = 0; C1 = 0; C2 = 0; C3 = 0; } :FNINIT is vexMode=0 & byte=0xDB; byte=0xE3 { FPUControlWord = 0x037f; FPUStatusWord = 0x0000; FPUTagWord = 0xffff; FPUDataPointer = 0x00000000; FPUInstructionPointer = 0x00000000; FPULastInstructionOpcode = 0x0000; C0 = 0; C1 = 0; C2 = 0; C3 = 0; } :FIST m16 is vexMode=0 & byte=0xDF; (mod != 0b11 & reg_opcode=2) ... & m16 { FPUInstructionPointer = inst_start; tmp:10 = round(ST0); m16 = trunc(tmp); } :FIST m32 is vexMode=0 & byte=0xDB; (mod != 0b11 & reg_opcode=2) ... & m32 { FPUInstructionPointer = inst_start; tmp:10 = round(ST0); m32 = trunc(tmp); } :FISTP m16 is vexMode=0 & byte=0xDF; reg_opcode=3 ... & m16 { FPUInstructionPointer = inst_start; tmp:10 = round(ST0); fpop(); m16 = trunc(tmp); } :FISTP m32 is vexMode=0 & byte=0xDB; reg_opcode=3 ... & m32 { FPUInstructionPointer = inst_start; tmp:10 = round(ST0); fpop(); m32 = trunc(tmp); } :FISTP m64 is vexMode=0 & byte=0xDF; reg_opcode=7 ... & m64 { FPUInstructionPointer = inst_start; tmp:10 = round(ST0); fpop(); m64 = trunc(tmp); } :FISTTP m16 is vexMode=0 & byte=0xDF; reg_opcode=1 ... & m16 { FPUInstructionPointer = inst_start; m16 = trunc(ST0); fpop(); } :FISTTP m32 is vexMode=0 & byte=0xDB; reg_opcode=1 ... & m32 { FPUInstructionPointer = inst_start; m32 = trunc(ST0); fpop(); } :FISTTP m64 is vexMode=0 & byte=0xDD; reg_opcode=1 ... & m64 { FPUInstructionPointer = inst_start; m64 = trunc(ST0); fpop(); } :FLD m32fp is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=0) ... & m32fp { FPUInstructionPointer = inst_start; fdec(); ST0 = float2float(m32fp); } :FLD m64fp is vexMode=0 & byte=0xDD; reg_opcode=0 ... & m64fp { FPUInstructionPointer = inst_start; fdec(); ST0 = float2float(m64fp); } :FLD m80fp is vexMode=0 & byte=0xDB; reg_opcode=5 ... & m80fp { FPUInstructionPointer = inst_start; fpushv(m80fp); } # Be careful that you don't clobber freg during fpushv, need a tmp to hold the value :FLD freg is vexMode=0 & byte=0xD9; frow=12 & fpage=0 & freg { tmp:10 = freg; fpushv(tmp); } :FLD1 is vexMode=0 & byte=0xD9; byte=0xE8 { FPUInstructionPointer = inst_start; one:4 = 1; tmp:10 = int2float(one); fpushv(tmp); } :FLDL2T is vexMode=0 & byte=0xD9; byte=0xE9 { FPUInstructionPointer = inst_start; src:8 = 0x400a934f0979a371; tmp:10 = float2float(src); fpushv(tmp); } :FLDL2E is vexMode=0 & byte=0xD9; byte=0xEA { FPUInstructionPointer = inst_start; src:8 = 0x3ff71547652b82fe; tmp:10 = float2float(src); fpushv(tmp); } :FLDPI is vexMode=0 & byte=0xD9; byte=0xEB { FPUInstructionPointer = inst_start; src:8 = 0x400921fb54442d18; tmp:10 = float2float(src); fpushv(tmp); } :FLDLG2 is vexMode=0 & byte=0xD9; byte=0xEC { FPUInstructionPointer = inst_start; src:8 = 0x3fd34413509f79ff; tmp:10 = float2float(src); fpushv(tmp); } :FLDLN2 is vexMode=0 & byte=0xD9; byte=0xED { FPUInstructionPointer = inst_start; src:8 = 0x3fe62e42fefa39ef; tmp:10 = float2float(src); fpushv(tmp); } :FLDZ is vexMode=0 & byte=0xD9; byte=0xEE { FPUInstructionPointer = inst_start; zero:4 = 0; tmp:10 = int2float(zero); fpushv(tmp); } :FLDCW m16 is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=5) ... & m16 { FPUControlWord = m16; } define pcodeop fldenv; :FLDENV Mem is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=4) ... & Mem { FPUControlWord = *:2 (Mem); FPUStatusWord = *:2 (Mem + 4); FPUTagWord = *:2 (Mem + 8); FPUDataPointer = *:4 (Mem + 20); FPUInstructionPointer = *:4 (Mem + 12); FPULastInstructionOpcode = *:2 (Mem + 18); } :FMUL m32fp is vexMode=0 & byte=0xD8; reg_opcode=1 ... & m32fp { FPUInstructionPointer = inst_start; ST0 = ST0 f* float2float(m32fp); } :FMUL m64fp is vexMode=0 & byte=0xDC; reg_opcode=1 ... & m64fp { ST0 = ST0 f* float2float(m64fp); FPUInstructionPointer = inst_start; } :FMUL freg is vexMode=0 & byte=0xD8; frow=12 & fpage=1 & freg { FPUInstructionPointer = inst_start; ST0 = ST0 f* freg; } :FMUL freg is vexMode=0 & byte=0xDC; frow=12 & fpage=1 & freg { FPUInstructionPointer = inst_start; freg = freg f* ST0; } :FMULP freg is vexMode=0 & byte=0xDE; frow=12 & fpage=1 & freg { FPUInstructionPointer = inst_start; freg = ST0 f* freg; fpop(); } :FMULP is vexMode=0 & byte=0xDE; byte=0xC9 { FPUInstructionPointer = inst_start; ST1 = ST0 f* ST1; fpop(); } :FIMUL m32 is vexMode=0 & byte=0xDA; reg_opcode=1 ... & m32 { FPUInstructionPointer = inst_start; ST0 = ST0 f* int2float(m32); } :FIMUL m16 is vexMode=0 & byte=0xDE; reg_opcode=1 ... & m16 { FPUInstructionPointer = inst_start; ST0 = ST0 f* int2float(m16); } :FNOP is vexMode=0 & byte=0xD9; byte=0xD0 { FPUInstructionPointer = inst_start; } define pcodeop fpatan; :FPATAN is vexMode=0 & byte=0xD9; byte=0xF3 { FPUInstructionPointer = inst_start; ST1 = fpatan(ST1, ST0); fpop(); } :FPREM is vexMode=0 & byte=0xD9; byte=0xF8 { FPUInstructionPointer = inst_start; local tmp = ST0 f/ ST1; tmp = tmp f* ST1; ST0 = ST0 f- tmp; } :FPREM1 is vexMode=0 & byte=0xD9; byte=0xF5 { FPUInstructionPointer = inst_start; local tmp = ST0 f/ ST1; tmp = tmp f* ST1; ST0 = ST0 f- tmp; } define pcodeop fptan; :FPTAN is vexMode=0 & byte=0xD9; byte=0xF2 { FPUInstructionPointer = inst_start; ST0 = fptan(ST0); one:4 = 1; tmp:10 = int2float(one); fpushv(tmp); } :FRNDINT is vexMode=0 & byte=0xD9; byte=0xFC { FPUInstructionPointer = inst_start; local tmp = round(ST0); ST0 = tmp; } :FRSTOR Mem is vexMode=0 & byte=0xDD; reg_opcode=4 ... & Mem { FPUControlWord = *:2 (Mem); FPUStatusWord = *:2 (Mem + 4); FPUTagWord = *:2 (Mem + 8); FPUDataPointer = *:4 (Mem + 20); FPUInstructionPointer = *:4 (Mem + 12); FPULastInstructionOpcode = *:2 (Mem + 18); ST0 = *:10 (Mem + 28); ST1 = *:10 (Mem + 38); ST2 = *:10 (Mem + 48); ST3 = *:10 (Mem + 58); ST4 = *:10 (Mem + 68); ST5 = *:10 (Mem + 78); ST6 = *:10 (Mem + 88); ST7 = *:10 (Mem + 98); } :FSAVE Mem is vexMode=0 & byte=0x9B; byte=0xDD; reg_opcode=6 ... & Mem { *:2 (Mem) = FPUControlWord; *:2 (Mem + 4) = FPUStatusWord; *:2 (Mem + 8) = FPUTagWord; *:4 (Mem + 20) = FPUDataPointer; *:4 (Mem + 12) = FPUInstructionPointer; *:2 (Mem + 18) = FPULastInstructionOpcode; *:10 (Mem + 28) = ST0; *:10 (Mem + 38) = ST1; *:10 (Mem + 48) = ST2; *:10 (Mem + 58) = ST3; *:10 (Mem + 68) = ST4; *:10 (Mem + 78) = ST5; *:10 (Mem + 88) = ST6; *:10 (Mem + 98) = ST7; FPUControlWord = 0x037f; FPUStatusWord = 0x0000; FPUTagWord = 0xffff; FPUDataPointer = 0x00000000; FPUInstructionPointer = 0x00000000; FPULastInstructionOpcode = 0x0000; } :FNSAVE Mem is vexMode=0 & byte=0xDD; reg_opcode=6 ... & Mem { *:2 (Mem) = FPUControlWord; *:2 (Mem + 4) = FPUStatusWord; *:2 (Mem + 8) = FPUTagWord; *:4 (Mem + 20) = FPUDataPointer; *:4 (Mem + 12) = FPUInstructionPointer; *:2 (Mem + 18) = FPULastInstructionOpcode; *:10 (Mem + 28) = ST0; *:10 (Mem + 38) = ST1; *:10 (Mem + 48) = ST2; *:10 (Mem + 58) = ST3; *:10 (Mem + 68) = ST4; *:10 (Mem + 78) = ST5; *:10 (Mem + 88) = ST6; *:10 (Mem + 98) = ST7; FPUControlWord = 0x037f; FPUStatusWord = 0x0000; FPUTagWord = 0xffff; FPUDataPointer = 0x00000000; FPUInstructionPointer = 0x00000000; FPULastInstructionOpcode = 0x0000; } define pcodeop fscale; :FSCALE is vexMode=0 & byte=0xD9; byte=0xFD { FPUInstructionPointer = inst_start; ST0 = fscale(ST0, ST1); } define pcodeop fsin; :FSIN is vexMode=0 & byte=0xD9; byte=0xFE { FPUInstructionPointer = inst_start; ST0 = fsin(ST0); } :FSINCOS is vexMode=0 & byte=0xD9; byte=0xFB { FPUInstructionPointer = inst_start; tmp:10 = fcos(ST0); ST0 = fsin(ST0); fpushv(tmp); } :FSQRT is vexMode=0 & byte=0xD9; byte=0xFA { FPUInstructionPointer = inst_start; ST0 = sqrt(ST0); } :FST m32fp is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=2) ... & m32fp { FPUInstructionPointer = inst_start; m32fp = float2float(ST0); } :FST m64fp is vexMode=0 & byte=0xDD; reg_opcode=2 ... & m64fp { FPUInstructionPointer = inst_start; m64fp = float2float(ST0); } :FST freg is vexMode=0 & byte=0xDD; frow=13 & fpage=0 & freg { FPUInstructionPointer = inst_start; freg = ST0; } :FSTP m32fp is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=3) ... & m32fp { FPUInstructionPointer = inst_start; m32fp = float2float(ST0); fpop(); } :FSTP m64fp is vexMode=0 & byte=0xDD; reg_opcode=3 ... & m64fp { FPUInstructionPointer = inst_start; m64fp = float2float(ST0); fpop(); } :FSTP m80fp is vexMode=0 & byte=0xDB; reg_opcode=7 ... & m80fp { FPUInstructionPointer = inst_start; fpopv(m80fp); } :FSTP freg is vexMode=0 & byte=0xDD; frow=13 & fpage=1 & freg { FPUInstructionPointer = inst_start; fpopv(freg); } :FSTCW m16 is vexMode=0 & byte=0x9B; byte=0xD9; (mod != 0b11 & reg_opcode=7) ... & m16 { m16 = FPUControlWord; } :FNSTCW m16 is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=7) ... & m16 { m16 = FPUControlWord; } :FSTENV Mem is vexMode=0 & byte=0x9B; byte=0xD9; (mod != 0b11 & reg_opcode=6) ... & Mem { *:2 (Mem) = FPUControlWord; *:2 (Mem + 4) = FPUStatusWord; *:2 (Mem + 8) = FPUTagWord; *:4 (Mem + 20) = FPUDataPointer; *:4 (Mem + 12) = FPUInstructionPointer; *:2 (Mem + 18) = FPULastInstructionOpcode; } :FNSTENV Mem is vexMode=0 & byte=0xD9; (mod != 0b11 & reg_opcode=6) ... & Mem { *:2 (Mem) = FPUControlWord; *:2 (Mem + 4) = FPUStatusWord; *:2 (Mem + 8) = FPUTagWord; *:4 (Mem + 20) = FPUDataPointer; *:4 (Mem + 12) = FPUInstructionPointer; *:2 (Mem + 18) = FPULastInstructionOpcode; } :FSTSW m16 is vexMode=0 & byte=0x9B; byte=0xDD; reg_opcode=7 ... & m16 { m16 = FPUStatusWord; } :FSTSW AX is vexMode=0 & byte=0x9B; byte=0xDF; byte=0xE0 & AX { AX = FPUStatusWord; } :FNSTSW m16 is vexMode=0 & byte=0xDD; reg_opcode=7 ... & m16 { m16 = FPUStatusWord; } :FNSTSW AX is vexMode=0 & byte=0xDF; byte=0xE0 & AX { AX = FPUStatusWord; } :FSUB m32fp is vexMode=0 & byte=0xD8; reg_opcode=4 ... & m32fp { FPUInstructionPointer = inst_start; ST0 = ST0 f- float2float(m32fp); } :FSUB m64fp is vexMode=0 & byte=0xDC; reg_opcode=4 ... & m64fp { FPUInstructionPointer = inst_start; ST0 = ST0 f- float2float(m64fp); } :FSUB ST0,freg is vexMode=0 & byte=0xD8; frow=14 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; ST0 = ST0 f- freg; } :FSUB freg,ST0 is vexMode=0 & byte=0xDC; frow=14 & fpage=1 & freg & ST0 { FPUInstructionPointer = inst_start; freg = freg f- ST0; } :FSUBP is vexMode=0 & byte=0xDE; byte=0xE9 { FPUInstructionPointer = inst_start; ST1 = ST1 f- ST0; fpop(); } :FSUBP freg,ST0 is vexMode=0 & byte=0xDE; frow=14 & fpage=1 & freg & ST0 { FPUInstructionPointer = inst_start; freg = freg f- ST0; fpop(); } :FISUB m32 is vexMode=0 & byte=0xDA; (mod != 0b11 & reg_opcode=4) ... & m32 { FPUInstructionPointer = inst_start; ST0 = ST0 f- int2float(m32); } :FISUB m16 is vexMode=0 & byte=0xDE; reg_opcode=4 ... & m16 { FPUInstructionPointer = inst_start; ST0 = ST0 f- int2float(m16); } :FSUBR m32fp is vexMode=0 & byte=0xD8; reg_opcode=5 ... & m32fp { FPUInstructionPointer = inst_start; ST0 = float2float(m32fp) f- ST0; } :FSUBR m64fp is vexMode=0 & byte=0xDC; reg_opcode=5 ... & m64fp { FPUInstructionPointer = inst_start; ST0 = float2float(m64fp) f- ST0; } :FSUBR ST0,freg is vexMode=0 & byte=0xD8; frow=14 & fpage=1 & freg & ST0 { FPUInstructionPointer = inst_start; ST0 = freg f- ST0; } :FSUBR freg,ST0 is vexMode=0 & byte=0xDC; frow=14 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; freg = ST0 f- freg; } :FSUBRP is vexMode=0 & byte=0xDE; byte=0xE1 { FPUInstructionPointer = inst_start; ST1 = ST0 f- ST1; fpop(); } :FSUBRP freg,ST0 is vexMode=0 & byte=0xDE; frow=14 & fpage=0 & freg & ST0 { FPUInstructionPointer = inst_start; freg = ST0 f- freg; fpop(); } :FISUBR m32 is vexMode=0 & byte=0xDA; reg_opcode=5 ... & m32 { FPUInstructionPointer = inst_start; ST0 = int2float(m32) f- ST0; } :FISUBR m16 is vexMode=0 & byte=0xDE; reg_opcode=5 ... & m16 { FPUInstructionPointer = inst_start; ST0 = int2float(m16) f- ST0; } :FTST is vexMode=0 & byte=0xD9; byte=0xE4 { FPUInstructionPointer = inst_start; zero:4 = 0; tmp:10 = int2float(zero); fcom(tmp); } :FUCOM freg is vexMode=0 & byte=0xDD; frow=14 & fpage=0 & freg { FPUInstructionPointer = inst_start; fcom(freg); } :FUCOM is vexMode=0 & byte=0xDD; byte=0xE1 { fcom(ST1); } :FUCOMP freg is vexMode=0 & byte=0xDD; frow=14 & fpage=1 & freg { FPUInstructionPointer = inst_start; fcom(freg); fpop(); } :FUCOMP is vexMode=0 & byte=0xDD; byte=0xE9 { FPUInstructionPointer = inst_start; fcom(ST1); fpop(); } :FUCOMPP is vexMode=0 & byte=0xDA; byte=0xE9 { FPUInstructionPointer = inst_start; fcom(ST1); fpop(); fpop(); } :FXAM is vexMode=0 & byte=0xD9; byte=0xE5 { FPUInstructionPointer = inst_start; # this is not an exact implementation, but gets the sign and zero tests right izero:4 = 0; fzero:10 = int2float(izero); # did not know how test for infinity or empty C0 = nan(ST0); # sign of ST0 C1 = ( ST0 f< fzero ); # assume normal if not zero C2 = ( ST0 f!= fzero ); # equal to zero C3 = ( ST0 f== fzero ); FPUStatusWord = (zext(C0)<<8) | (zext(C1)<<9) | (zext(C2)<<10) | (zext(C3)<<14); } :FXCH freg is vexMode=0 & byte=0xD9; frow=12 & fpage=1 & freg { FPUInstructionPointer = inst_start; local tmp = ST0; ST0 = freg; freg = tmp; } :FXCH is vexMode=0 & byte=0xD9; byte=0xC9 { FPUInstructionPointer = inst_start; local tmp = ST0; ST0 = ST1; ST1 = tmp; } # fxsave and fxrstor define pcodeop _fxsave; define pcodeop _fxrstor; @ifdef IA64 define pcodeop _fxsave64; define pcodeop _fxrstor64; @endif # this saves the FPU state into 512 bytes of memory :FXSAVE Mem is $(LONGMODE_OFF) & vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=0 ) ... & Mem { _fxsave(Mem); } :FXRSTOR Mem is $(LONGMODE_OFF) & vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=1 ) ... & Mem { _fxrstor(Mem); } @ifdef IA64 # this saves the FPU state into 512 bytes of memory similar to the 32-bit mode :FXSAVE Mem is $(LONGMODE_ON) & vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=0 ) ... & Mem { _fxsave(Mem); } :FXSAVE64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=0 ) ... & Mem { _fxsave64(Mem); } :FXRSTOR Mem is $(LONGMODE_ON) & vexMode=0 & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=1 ) ... & Mem { _fxrstor(Mem); } :FXRSTOR64 Mem is $(LONGMODE_ON) & vexMode=0 & $(REX_W) & byte=0x0F; byte=0xAE; ( mod != 0b11 & reg_opcode=1 ) ... & Mem { _fxrstor64(Mem); } @endif define pcodeop extract_significand; define pcodeop extract_exponent; :FXTRACT is vexMode=0 & byte=0xD9; byte=0xF4 { FPUInstructionPointer = inst_start; significand:10 = extract_significand(ST0); exponent:10 = extract_exponent(ST0); ST0 = exponent; fpushv(significand); } :FYL2X is vexMode=0 & byte=0xD9; byte=0xF1 { FPUInstructionPointer = inst_start; local log2st0 = ST0; ST1 = ST1 f* log2st0; fpop(); } :FYL2XP1 is vexMode=0 & byte=0xD9; byte=0xF9 { FPUInstructionPointer = inst_start; one:4 = 1; tmp:10 = int2float(one); log2st0:10 = ST0 f+ tmp; ST1 = ST1 f* log2st0; fpop(); } # # MMX instructions # :ADDPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x58; XmmReg ... & m128 { XmmReg[0,64] = XmmReg[0,64] f+ m128[0,64]; XmmReg[64,64] = XmmReg[64,64] f+ m128[64,64]; } :ADDPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x58; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] f+ XmmReg2[0,64]; XmmReg1[64,64] = XmmReg1[64,64] f+ XmmReg2[64,64]; } :ADDPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x58; m128 & XmmReg ... { local m:16 = m128; # Guarantee value is in a fixed location XmmReg[0,32] = XmmReg[0,32] f+ m[0,32]; XmmReg[32,32] = XmmReg[32,32] f+ m[32,32]; XmmReg[64,32] = XmmReg[64,32] f+ m[64,32]; XmmReg[96,32] = XmmReg[96,32] f+ m[96,32]; } :ADDPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x58; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] f+ XmmReg2[0,32]; XmmReg1[32,32] = XmmReg1[32,32] f+ XmmReg2[32,32]; XmmReg1[64,32] = XmmReg1[64,32] f+ XmmReg2[64,32]; XmmReg1[96,32] = XmmReg1[96,32] f+ XmmReg2[96,32]; } :ADDSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x58; m64 & XmmReg ... { XmmReg[0,64] = XmmReg[0,64] f+ m64; } :ADDSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x58; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] f+ XmmReg2[0,64]; } :ADDSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x58; m32 & XmmReg ... { XmmReg[0,32] = XmmReg[0,32] f+ m32; } :ADDSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x58; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] f+ XmmReg2[0,32]; } :ADDSUBPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD0; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,64] = XmmReg[0,64] f- m[0,64]; XmmReg[64,64] = XmmReg[64,64] f+ m[64,64]; } :ADDSUBPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD0; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg2[0,64]; XmmReg1[64,64] = XmmReg1[64,64] f+ XmmReg2[64,64]; } :ADDSUBPS XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xD0; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = XmmReg[0,32] f- m[0,32]; XmmReg[32,32] = XmmReg[32,32] f+ m[32,32]; XmmReg[64,32] = XmmReg[64,32] f- m[64,32]; XmmReg[96,32] = XmmReg[96,32] f+ m[96,32]; } :ADDSUBPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xD0; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg2[0,32]; XmmReg1[32,32] = XmmReg1[32,32] f+ XmmReg2[32,32]; XmmReg1[64,32] = XmmReg1[64,32] f- XmmReg2[64,32]; XmmReg1[96,32] = XmmReg1[96,32] f+ XmmReg2[96,32]; } # special FLOATING POINT bitwise AND :ANDPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x54; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,64] = XmmReg[0,64] & m[0,64]; XmmReg[64,64] = XmmReg[64,64] & m[64,64]; } # special FLOATING POINT bitwise AND :ANDPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x54; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] & XmmReg2[0,64]; XmmReg1[64,64] = XmmReg1[64,64] & XmmReg2[64,64]; } # special FLOATING POINT bitwise AND :ANDPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x54; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = XmmReg[0,32] & m[0,32]; XmmReg[32,32] = XmmReg[32,32] & m[32,32]; XmmReg[64,32] = XmmReg[64,32] & m[64,32]; XmmReg[96,32] = XmmReg[96,32] & m[96,32]; } # special FLOATING POINT bitwise AND :ANDPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x54; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] & XmmReg2[0,32]; XmmReg1[32,32] = XmmReg1[32,32] & XmmReg2[32,32]; XmmReg1[64,32] = XmmReg1[64,32] & XmmReg2[64,32]; XmmReg1[96,32] = XmmReg1[96,32] & XmmReg2[96,32]; } # special FLOATING POINT bitwise AND NOT :ANDNPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x55; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,64] = ~XmmReg[0,64] & m[0,64]; XmmReg[64,64] = ~XmmReg[64,64] & m[64,64]; } :ANDNPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x55; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = ~XmmReg1[0,64] & XmmReg2[0,64]; XmmReg1[64,64] = ~XmmReg1[64,64] & XmmReg2[64,64]; } # special FLOATING POINT bitwise AND NOT :ANDNPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x55; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = ~XmmReg[0,32] & m[0,32]; XmmReg[32,32] = ~XmmReg[32,32] & m[32,32]; XmmReg[64,32] = ~XmmReg[64,32] & m[64,32]; XmmReg[96,32] = ~XmmReg[96,32] & m[96,32]; } :ANDNPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x55; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = ~XmmReg1[0,32] & XmmReg2[0,32]; XmmReg1[32,32] = ~XmmReg1[32,32] & XmmReg2[32,32]; XmmReg1[64,32] = ~XmmReg1[64,32] & XmmReg2[64,32]; XmmReg1[96,32] = ~XmmReg1[96,32] & XmmReg2[96,32]; } # predicate mnemonics for "CMP...PD" opcode XmmCondPD: "EQ" is imm8=0 { xmmTmp1_Qa = zext( xmmTmp1_Qa f== xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF; xmmTmp1_Qb = zext( xmmTmp1_Qb f== xmmTmp2_Qb ) * 0xFFFFFFFFFFFFFFFF; } XmmCondPD: "LT" is imm8=1 { xmmTmp1_Qa = zext( xmmTmp1_Qa f< xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF; xmmTmp1_Qb = zext( xmmTmp1_Qb f< xmmTmp2_Qb ) * 0xFFFFFFFFFFFFFFFF; } XmmCondPD: "LE" is imm8=2 { xmmTmp1_Qa = zext( xmmTmp1_Qa f<= xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF; xmmTmp1_Qb = zext( xmmTmp1_Qb f<= xmmTmp2_Qb ) * 0xFFFFFFFFFFFFFFFF; } XmmCondPD: "UNORD" is imm8=3 { xmmTmp1_Qa = zext( nan(xmmTmp1_Qa) || nan(xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF; xmmTmp1_Qb = zext( nan(xmmTmp1_Qb) || nan(xmmTmp2_Qb) ) * 0xFFFFFFFFFFFFFFFF; } XmmCondPD: "NEQ" is imm8=4 { xmmTmp1_Qa = zext( xmmTmp1_Qa f!= xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF; xmmTmp1_Qb = zext( xmmTmp1_Qb f!= xmmTmp2_Qb ) * 0xFFFFFFFFFFFFFFFF; } XmmCondPD: "NLT" is imm8=5 { xmmTmp1_Qa = zext( !(xmmTmp1_Qa f< xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF; xmmTmp1_Qb = zext( !(xmmTmp1_Qb f< xmmTmp2_Qb) ) * 0xFFFFFFFFFFFFFFFF; } XmmCondPD: "NLE" is imm8=6 { xmmTmp1_Qa = zext( !(xmmTmp1_Qa f<= xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF; xmmTmp1_Qb = zext( !(xmmTmp1_Qb f<= xmmTmp2_Qb) ) * 0xFFFFFFFFFFFFFFFF; } XmmCondPD: "ORD" is imm8=7 { xmmTmp1_Qa = zext( !(nan(xmmTmp1_Qa) || nan(xmmTmp2_Qa)) ) * 0xFFFFFFFFFFFFFFFF; xmmTmp1_Qb = zext( !(nan(xmmTmp1_Qb) || nan(xmmTmp2_Qb)) ) * 0xFFFFFFFFFFFFFFFF; } define pcodeop cmppd; XmmCondPD: is imm8 { xmmTmp1_Qa = cmppd(xmmTmp1_Qa, xmmTmp2_Qa, imm8:1); xmmTmp1_Qb = cmppd(xmmTmp1_Qb, xmmTmp2_Qb, imm8:1); } # immediate operand for "CMP...PD" opcode # note: normally blank, "imm8" emits for all out of range cases CMPPD_OPERAND: is imm8<8 { } CMPPD_OPERAND: ", "^imm8 is imm8 { } :CMP^XmmCondPD^"PD" XmmReg,m128^CMPPD_OPERAND is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC2; (m128 & XmmReg ...); XmmCondPD & CMPPD_OPERAND { local m:16 = m128; xmmTmp1_Qa = XmmReg[0,64]; xmmTmp1_Qb = XmmReg[64,64]; xmmTmp2_Qa = m[0,64]; xmmTmp2_Qb = m[64,64]; build XmmCondPD; XmmReg[0,64] = xmmTmp1_Qa; XmmReg[64,64] = xmmTmp1_Qb; } :CMP^XmmCondPD^"PD" XmmReg1,XmmReg2^CMPPD_OPERAND is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC2; xmmmod=3 & XmmReg1 & XmmReg2; XmmCondPD & CMPPD_OPERAND { xmmTmp1_Qa = XmmReg1[0,64]; xmmTmp1_Qb = XmmReg1[64,64]; xmmTmp2_Qa = XmmReg2[0,64]; xmmTmp2_Qb = XmmReg2[64,64]; build XmmCondPD; XmmReg1[0,64] = xmmTmp1_Qa; XmmReg1[64,64] = xmmTmp1_Qb; } # predicate mnemonics for "CMP...PS" opcode XmmCondPS: "EQ" is imm8=0 { xmmTmp1_Da = zext( xmmTmp1_Da f== xmmTmp2_Da ) * 0xFFFFFFFF; xmmTmp1_Db = zext( xmmTmp1_Db f== xmmTmp2_Db ) * 0xFFFFFFFF; xmmTmp1_Dc = zext( xmmTmp1_Dc f== xmmTmp2_Dc ) * 0xFFFFFFFF; xmmTmp1_Dd = zext( xmmTmp1_Dd f== xmmTmp2_Dd ) * 0xFFFFFFFF; } XmmCondPS: "LT" is imm8=1 { xmmTmp1_Da = zext( xmmTmp1_Da f< xmmTmp2_Da ) * 0xFFFFFFFF; xmmTmp1_Db = zext( xmmTmp1_Db f< xmmTmp2_Db ) * 0xFFFFFFFF; xmmTmp1_Dc = zext( xmmTmp1_Dc f< xmmTmp2_Dc ) * 0xFFFFFFFF; xmmTmp1_Dd = zext( xmmTmp1_Dd f< xmmTmp2_Dd ) * 0xFFFFFFFF; } XmmCondPS: "LE" is imm8=2 { xmmTmp1_Da = zext( xmmTmp1_Da f<= xmmTmp2_Da ) * 0xFFFFFFFF; xmmTmp1_Db = zext( xmmTmp1_Db f<= xmmTmp2_Db ) * 0xFFFFFFFF; xmmTmp1_Dc = zext( xmmTmp1_Dc f<= xmmTmp2_Dc ) * 0xFFFFFFFF; xmmTmp1_Dd = zext( xmmTmp1_Dd f<= xmmTmp2_Dd ) * 0xFFFFFFFF; } XmmCondPS: "UNORD" is imm8=3 { xmmTmp1_Da = zext( nan(xmmTmp1_Da) || nan(xmmTmp2_Da) ) * 0xFFFFFFFF; xmmTmp1_Db = zext( nan(xmmTmp1_Db) || nan(xmmTmp2_Db) ) * 0xFFFFFFFF; xmmTmp1_Dc = zext( nan(xmmTmp1_Dc) || nan(xmmTmp2_Dc) ) * 0xFFFFFFFF; xmmTmp1_Dd = zext( nan(xmmTmp1_Dd) || nan(xmmTmp2_Dd) ) * 0xFFFFFFFF; } XmmCondPS: "NEQ" is imm8=4 { xmmTmp1_Da = zext( xmmTmp1_Da f!= xmmTmp2_Da ) * 0xFFFFFFFF; xmmTmp1_Db = zext( xmmTmp1_Db f!= xmmTmp2_Db ) * 0xFFFFFFFF; xmmTmp1_Dc = zext( xmmTmp1_Dc f!= xmmTmp2_Dc ) * 0xFFFFFFFF; xmmTmp1_Dd = zext( xmmTmp1_Dd f!= xmmTmp2_Dd ) * 0xFFFFFFFF; } XmmCondPS: "NLT" is imm8=5 { xmmTmp1_Da = zext( !(xmmTmp1_Da f< xmmTmp2_Da) ) * 0xFFFFFFFF; xmmTmp1_Db = zext( !(xmmTmp1_Db f< xmmTmp2_Db) ) * 0xFFFFFFFF; xmmTmp1_Dc = zext( !(xmmTmp1_Dc f< xmmTmp2_Dc) ) * 0xFFFFFFFF; xmmTmp1_Dd = zext( !(xmmTmp1_Dd f< xmmTmp2_Dd) ) * 0xFFFFFFFF; } XmmCondPS: "NLE" is imm8=6 { xmmTmp1_Da = zext( !(xmmTmp1_Da f<= xmmTmp2_Da) ) * 0xFFFFFFFF; xmmTmp1_Db = zext( !(xmmTmp1_Db f<= xmmTmp2_Db) ) * 0xFFFFFFFF; xmmTmp1_Dc = zext( !(xmmTmp1_Dc f<= xmmTmp2_Dc) ) * 0xFFFFFFFF; xmmTmp1_Dd = zext( !(xmmTmp1_Dd f<= xmmTmp2_Dd) ) * 0xFFFFFFFF; } XmmCondPS: "ORD" is imm8=7 { xmmTmp1_Da = zext( !(nan(xmmTmp1_Da) || nan(xmmTmp2_Da)) ) * 0xFFFFFFFF; xmmTmp1_Db = zext( !(nan(xmmTmp1_Db) || nan(xmmTmp2_Db)) ) * 0xFFFFFFFF; xmmTmp1_Dc = zext( !(nan(xmmTmp1_Dc) || nan(xmmTmp2_Dc)) ) * 0xFFFFFFFF; xmmTmp1_Dd = zext( !(nan(xmmTmp1_Dd) || nan(xmmTmp2_Dd)) ) * 0xFFFFFFFF; } define pcodeop cmpps; XmmCondPS: is imm8 { xmmTmp1_Da = cmpps(xmmTmp1_Da, xmmTmp2_Da, imm8:1); xmmTmp1_Db = cmpps(xmmTmp1_Db, xmmTmp2_Db, imm8:1); xmmTmp1_Dc = cmpps(xmmTmp1_Dc, xmmTmp2_Dc, imm8:1); xmmTmp1_Dd = cmpps(xmmTmp1_Dd, xmmTmp2_Dd, imm8:1); } # immediate operand for "CMP...PS" opcode # note: normally blank, "imm8" emits for all out of range cases CMPPS_OPERAND: is imm8<8 { } CMPPS_OPERAND: ", "^imm8 is imm8 { } :CMP^XmmCondPS^"PS" XmmReg,m128^CMPPS_OPERAND is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC2; (m128 & XmmReg ...); XmmCondPS & CMPPS_OPERAND { local m:16 = m128; xmmTmp1_Da = XmmReg[0,32]; xmmTmp1_Db = XmmReg[32,32]; xmmTmp1_Dc = XmmReg[64,32]; xmmTmp1_Dd = XmmReg[96,32]; xmmTmp2_Da = m[0,32]; xmmTmp2_Db = m[32,32]; xmmTmp2_Dc = m[64,32]; xmmTmp2_Dd = m[96,32]; build XmmCondPS; XmmReg[0,32] = xmmTmp1_Da; XmmReg[32,32] = xmmTmp1_Db; XmmReg[64,32] = xmmTmp1_Dc; XmmReg[96,32] = xmmTmp1_Dd; } :CMP^XmmCondPS^"PS" XmmReg1,XmmReg2^CMPPS_OPERAND is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC2; xmmmod=3 & XmmReg1 & XmmReg2; XmmCondPS & CMPPS_OPERAND { xmmTmp1_Da = XmmReg1[0,32]; xmmTmp1_Db = XmmReg1[32,32]; xmmTmp1_Dc = XmmReg1[64,32]; xmmTmp1_Dd = XmmReg1[96,32]; xmmTmp2_Da = XmmReg2[0,32]; xmmTmp2_Db = XmmReg2[32,32]; xmmTmp2_Dc = XmmReg2[64,32]; xmmTmp2_Dd = XmmReg2[96,32]; build XmmCondPS; XmmReg1[0,32] = xmmTmp1_Da; XmmReg1[32,32] = xmmTmp1_Db; XmmReg1[64,32] = xmmTmp1_Dc; XmmReg1[96,32] = xmmTmp1_Dd; } # predicate mnemonics for "CMP...SD" opcode XmmCondSD: "EQ" is imm8=0 { xmmTmp1_Qa = zext( xmmTmp1_Qa f== xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF; } XmmCondSD: "LT" is imm8=1 { xmmTmp1_Qa = zext( xmmTmp1_Qa f< xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF; } XmmCondSD: "LE" is imm8=2 { xmmTmp1_Qa = zext( xmmTmp1_Qa f<= xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF; } XmmCondSD: "UNORD" is imm8=3 { xmmTmp1_Qa = zext( nan(xmmTmp1_Qa) || nan(xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF; } XmmCondSD: "NEQ" is imm8=4 { xmmTmp1_Qa = zext( xmmTmp1_Qa f!= xmmTmp2_Qa ) * 0xFFFFFFFFFFFFFFFF; } XmmCondSD: "NLT" is imm8=5 { xmmTmp1_Qa = zext( !(xmmTmp1_Qa f< xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF; } XmmCondSD: "NLE" is imm8=6 { xmmTmp1_Qa = zext( !(xmmTmp1_Qa f<= xmmTmp2_Qa) ) * 0xFFFFFFFFFFFFFFFF; } XmmCondSD: "ORD" is imm8=7 { xmmTmp1_Qa = zext( !(nan(xmmTmp1_Qa) || nan(xmmTmp2_Qa)) ) * 0xFFFFFFFFFFFFFFFF; } define pcodeop cmpsd; XmmCondSD: is imm8 { xmmTmp1_Qa = cmpsd(xmmTmp1_Qa, xmmTmp2_Qa, imm8:1); } # immediate operand for "CMP...SD" opcode # note: normally blank, "imm8" emits for all out of range cases CMPSD_OPERAND: is imm8<8 { } CMPSD_OPERAND: ", "^imm8 is imm8 { } :CMP^XmmCondSD^"SD" XmmReg, m64^CMPSD_OPERAND is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xC2; (m64 & XmmReg ...); XmmCondSD & CMPSD_OPERAND { xmmTmp1_Qa = XmmReg[0,64]; xmmTmp2_Qa = m64; build XmmCondSD; XmmReg[0,64] = xmmTmp1_Qa; } :CMP^XmmCondSD^"SD" XmmReg1, XmmReg2^CMPSD_OPERAND is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xC2; xmmmod=3 & XmmReg1 & XmmReg2; XmmCondSD & CMPSD_OPERAND { xmmTmp1_Qa = XmmReg1[0,64]; xmmTmp2_Qa = XmmReg2[0,64]; build XmmCondSD; XmmReg1[0,64] = xmmTmp1_Qa; } # predicate mnemonics for "CMP...SS" opcode XmmCondSS: "EQ" is imm8=0 { xmmTmp1_Da = zext( xmmTmp1_Da f== xmmTmp2_Da ) * 0xFFFFFFFF; } XmmCondSS: "LT" is imm8=1 { xmmTmp1_Da = zext( xmmTmp1_Da f< xmmTmp2_Da ) * 0xFFFFFFFF; } XmmCondSS: "LE" is imm8=2 { xmmTmp1_Da = zext( xmmTmp1_Da f<= xmmTmp2_Da ) * 0xFFFFFFFF; } XmmCondSS: "UNORD" is imm8=3 { xmmTmp1_Da = zext( nan(xmmTmp1_Da) || nan(xmmTmp2_Da) ) * 0xFFFFFFFF; } XmmCondSS: "NEQ" is imm8=4 { xmmTmp1_Da = zext( xmmTmp1_Da f!= xmmTmp2_Da ) * 0xFFFFFFFF; } XmmCondSS: "NLT" is imm8=5 { xmmTmp1_Da = zext( !(xmmTmp1_Da f< xmmTmp2_Da) ) * 0xFFFFFFFF; } XmmCondSS: "NLE" is imm8=6 { xmmTmp1_Da = zext( !(xmmTmp1_Da f<= xmmTmp2_Da) ) * 0xFFFFFFFF; } XmmCondSS: "ORD" is imm8=7 { xmmTmp1_Da = zext( !(nan(xmmTmp1_Da) || nan(xmmTmp2_Da)) ) * 0xFFFFFFFF; } define pcodeop cmpss; XmmCondSS: is imm8 { xmmTmp1_Da = cmpss(xmmTmp1_Da, xmmTmp2_Da, imm8:1); } # immediate operand for "CMP...SS" opcode # note: normally blank, "imm8" emits for all out of range cases CMPSS_OPERAND: is imm8<8 { } CMPSS_OPERAND: ", "^imm8 is imm8 { } :CMP^XmmCondSS^"SS" XmmReg, m32^CMPSS_OPERAND is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xC2; (m32 & XmmReg ...); XmmCondSS & CMPSS_OPERAND { xmmTmp1_Da = XmmReg[0,32]; xmmTmp2_Da = m32; build XmmCondSS; XmmReg[0,32] = xmmTmp1_Da; } :CMP^XmmCondSS^"SS" XmmReg1, XmmReg2^CMPSS_OPERAND is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xC2; xmmmod=3 & XmmReg1 & XmmReg2; XmmCondSS & CMPSS_OPERAND { xmmTmp1_Da = XmmReg1[0,32]; xmmTmp2_Da = XmmReg2[0,32]; build XmmCondSS; XmmReg1[0,32] = xmmTmp1_Da; } :COMISD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2F; m64 & XmmReg ... { fucompe(XmmReg[0,64], m64); } :COMISD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2F; xmmmod=3 & XmmReg1 & XmmReg2 { fucompe(XmmReg1[0,64], XmmReg2[0,64]); } :COMISS XmmReg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2F; m32 & XmmReg ... { fucompe(XmmReg[0,32], m32); } :COMISS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2F; xmmmod=3 & XmmReg1 & XmmReg2 { fucompe(XmmReg1[0,32], XmmReg2[0,32]); } :CVTDQ2PD XmmReg, m64 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xE6; m64 & XmmReg ... { local m:8 = m64; XmmReg[0,64] = int2float( m[0,32] ); XmmReg[64,64] = int2float( m[32,32] ); } :CVTDQ2PD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xE6; xmmmod=3 & XmmReg1 & XmmReg2 { local tmp:8 = XmmReg2[0,64]; XmmReg1[0,64] = int2float( tmp[0,32] ); XmmReg1[64,64] = int2float( tmp[32,32] ); } :CVTDQ2PS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5B; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = int2float( m[0,32] ); XmmReg[32,32] = int2float( m[32,32] ); XmmReg[64,32] = int2float( m[64,32] ); XmmReg[96,32] = int2float( m[96,32] ); } :CVTDQ2PS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5B; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = int2float( XmmReg2[0,32] ); XmmReg1[32,32] = int2float( XmmReg2[32,32] ); XmmReg1[64,32] = int2float( XmmReg2[64,32] ); XmmReg1[96,32] = int2float( XmmReg2[96,32] ); } :CVTPD2DQ XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xE6; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = trunc( m[0,64] ); XmmReg[32,32] = trunc( m[64,64] ); XmmReg[64,32] = 0; XmmReg[96,32] = 0; } :CVTPD2DQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xE6; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = trunc( XmmReg2[0,64] ); XmmReg1[32,32] = trunc( XmmReg2[64,64] ); XmmReg1[64,32] = 0; XmmReg1[96,32] = 0; } :CVTPD2PI mmxreg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2D; mmxreg ... & m128 { local m:16 = m128; mmxreg[0,32] = trunc( m[0,64] ); mmxreg[32,32] = trunc( m[64,64] ); } :CVTPD2PI mmxreg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2D; xmmmod=3 & mmxreg1 & XmmReg2 { mmxreg1[0,32] = trunc( XmmReg2[0,64] ); mmxreg1[32,32] = trunc( XmmReg2[64,64] ); } :CVTPD2PS XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5A; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = float2float( m[0,64] ); XmmReg[32,32] = float2float( m[64,64] ); XmmReg[64,32] = 0; XmmReg[96,32] = 0; } :CVTPD2PS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = float2float( XmmReg2[0,64] ); XmmReg1[32,32] = float2float( XmmReg2[64,64] ); XmmReg1[64,32] = 0; XmmReg1[96,32] = 0; } :CVTPI2PD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2A; m64 & XmmReg ... { local m:8 = m64; XmmReg[0,64] = int2float(m[0,32]); XmmReg[64,64] = int2float(m[32,32]); } :CVTPI2PD XmmReg1, mmxreg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2A; xmmmod=3 & XmmReg1 & mmxreg2 { XmmReg1[0,64] = int2float(mmxreg2[0,32]); XmmReg1[64,64] = int2float(mmxreg2[32,32]); } :CVTPI2PS XmmReg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2A; m64 & XmmReg ... { local m:8 = m64; XmmReg[0,32] = int2float(m[0,32]); XmmReg[32,32] = int2float(m[32,32]); } :CVTPI2PS XmmReg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2A; xmmmod=3 & XmmReg1 & mmxreg2 { XmmReg1[0,32] = int2float(mmxreg2[0,32]); XmmReg1[32,32] = int2float(mmxreg2[32,32]); } :CVTPS2DQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5B; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = trunc( m[0,32] ); XmmReg[32,32] = trunc( m[32,32] ); XmmReg[64,32] = trunc( m[64,32] ); XmmReg[96,32] = trunc( m[96,32] ); } :CVTPS2DQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5B; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = trunc( XmmReg2[0,32] ); XmmReg1[32,32] = trunc( XmmReg2[32,32] ); XmmReg1[64,32] = trunc( XmmReg2[64,32] ); XmmReg1[96,32] = trunc( XmmReg2[96,32] ); } :CVTPS2PD XmmReg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5A; m64 & XmmReg ... { local m:8 = m64; XmmReg[0,64] = float2float( m[0,32] ); XmmReg[64,64] = float2float( m[32,32] ); } :CVTPS2PD XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2 { local tmp:8 = XmmReg2[0,64]; XmmReg1[0,64] = float2float( tmp[0,32] ); XmmReg1[64,64] = float2float( tmp[32,32] ); } :CVTPS2PI mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2D; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,32] = round(m[0,32]); mmxreg[32,32] = round(m[32,32]); FPUTagWord = 0x0000; } :CVTPS2PI mmxreg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2D; xmmmod=3 & mmxreg1 & XmmReg2 { mmxreg1[0,32] = round(XmmReg2[0,32]); mmxreg1[32,32] = round(XmmReg2[32,32]); FPUTagWord = 0x0000; } :CVTSD2SI Reg32, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2D; (Reg32 & check_Reg32_dest) ... & m64 { Reg32 = trunc(round(m64)); build check_Reg32_dest; } :CVTSD2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg32 & check_Reg32_dest & XmmReg2 { Reg32 = trunc(round(XmmReg2[0,64])); build check_Reg32_dest; } @ifdef IA64 :CVTSD2SI Reg64, m64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2D; Reg64 ... & m64 { Reg64 = trunc(round(m64)); } :CVTSD2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg64 & XmmReg2 { Reg64 = trunc(round(XmmReg2[0,64])); } @endif :CVTSD2SS XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5A; m64 & XmmReg ... { XmmReg[0,32] = float2float(m64); } :CVTSD2SS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = float2float(XmmReg2[0,64]); } :CVTSI2SD XmmReg, rm32 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2A; rm32 & XmmReg ... { XmmReg[0,64] = int2float(rm32); } @ifdef IA64 :CVTSI2SD XmmReg, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2A; rm64 & XmmReg ... { XmmReg[0,64] = int2float(rm64); } @endif :CVTSI2SS XmmReg, rm32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2A; rm32 & XmmReg ... { XmmReg[0,32] = int2float(rm32); } @ifdef IA64 :CVTSI2SS XmmReg, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2A; rm64 & XmmReg ... { XmmReg[0,32] = int2float(rm64); } @endif :CVTSS2SD XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5A; m32 & XmmReg ... { XmmReg[0,64] = float2float(m32); } :CVTSS2SD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5A; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = float2float(XmmReg2[0,32]); } :CVTSS2SI Reg32, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2D; (Reg32 & check_Reg32_dest) ... & m32 { Reg32 = trunc(round(m32)); build check_Reg32_dest; } :CVTSS2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg32 & check_Reg32_dest & XmmReg2 { Reg32 = trunc(round(XmmReg2[0,32])); build check_Reg32_dest; } @ifdef IA64 :CVTSS2SI Reg64, m32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2D; Reg64 ... & m32 { Reg64 = trunc(round(m32)); } :CVTSS2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2D; xmmmod=3 & Reg64 & XmmReg2 { Reg64 = trunc(round(XmmReg2[0,32])); } @endif :CVTTPD2PI mmxreg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2C; mmxreg ... & m128 { local m:16 = m128; mmxreg[0,32] = trunc(m[0,64]); mmxreg[32,32] = trunc(m[64,64]); FPUTagWord = 0x0000; } :CVTTPD2PI mmxreg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2C; xmmmod=3 & mmxreg1 & XmmReg2 { mmxreg1[0,32] = trunc(XmmReg2[0,64]); mmxreg1[32,32] = trunc(XmmReg2[64,64]); FPUTagWord = 0x0000; } :CVTTPD2DQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE6; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = trunc(m[0,64]); XmmReg[32,32] = trunc(m[64,64]); XmmReg[64,32] = 0; XmmReg[96,32] = 0; } :CVTTPD2DQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE6; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = trunc(XmmReg2[0,64]); XmmReg1[32,32] = trunc(XmmReg2[64,64]); XmmReg1[64,32] = 0; XmmReg1[96,32] = 0; } :CVTTPS2DQ XmmReg, m128 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5B; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = trunc(m[0,32]); XmmReg[32,32] = trunc(m[32,32]); XmmReg[64,32] = trunc(m[64,32]); XmmReg[96,32] = trunc(m[96,32]); } :CVTTPS2DQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5B; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = trunc(XmmReg2[0,32]); XmmReg1[32,32] = trunc(XmmReg2[32,32]); XmmReg1[64,32] = trunc(XmmReg2[64,32]); XmmReg1[96,32] = trunc(XmmReg2[96,32]); } :CVTTPS2PI mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2C; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,32] = trunc(m[0,32]); mmxreg[32,32] = trunc(m[32,32]); FPUTagWord = 0x0000; } :CVTTPS2PI mmxreg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2C; xmmmod=3 & mmxreg1 & XmmReg2 { mmxreg1[0,32] = trunc(XmmReg2[0,32]); mmxreg1[32,32] = trunc(XmmReg2[32,32]); FPUTagWord = 0x0000; } :CVTTSD2SI Reg32, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2C; (Reg32 & check_Reg32_dest) ... & m64 { Reg32 = trunc(m64); build check_Reg32_dest; } :CVTTSD2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2C; xmmmod=3 & Reg32 & check_Reg32_dest & XmmReg2 { Reg32 = trunc(XmmReg2[0,64]); build check_Reg32_dest; } @ifdef IA64 :CVTTSD2SI Reg64, m64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2C; Reg64 ... & m64 { Reg64 = trunc(m64); } :CVTTSD2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F2) & byte=0x0F; byte=0x2C; xmmmod=3 & Reg64 & XmmReg2 { Reg64 = trunc(XmmReg2[0,64]); } @endif :CVTTSS2SI Reg32, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2C; (Reg32 & check_Reg32_dest) ... & m32 { Reg32 = trunc(m32); build check_Reg32_dest; } :CVTTSS2SI Reg32, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2C; xmmmod=3 & Reg32 & check_Reg32_dest & XmmReg2 { Reg32 = trunc(XmmReg2[0,32]); build check_Reg32_dest; } @ifdef IA64 :CVTTSS2SI Reg64, m32 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2C; Reg64 ... & m32 { Reg64 = trunc(m32); } :CVTTSS2SI Reg64, XmmReg2 is vexMode=0 & opsize=2 & $(PRE_F3) & byte=0x0F; byte=0x2C; xmmmod=3 & Reg64 & XmmReg2 { Reg64 = trunc(XmmReg2[0,32]); } @endif define pcodeop divpd; :DIVPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5E; XmmReg ... & m128 { XmmReg = divpd(XmmReg, m128); } :DIVPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5E; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = divpd(XmmReg1, XmmReg2); } define pcodeop divps; :DIVPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5E; XmmReg ... & m128 { XmmReg = divps(XmmReg, m128); } :DIVPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5E; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = divps(XmmReg1, XmmReg2); } :DIVSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5E; m64 & XmmReg ... { XmmReg[0,64] = XmmReg[0,64] f/ m64; } :DIVSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5E; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] f/ XmmReg2[0,64]; } :DIVSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5E; m32 & XmmReg ... { XmmReg[0,32] = XmmReg[0,32] f/ m32; } :DIVSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5E; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] f/ XmmReg2[0,32]; } :EMMS is vexMode=0 & byte=0x0F; byte=0x77 { FPUTagWord = 0xFFFF; } :HADDPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7C; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,64] = XmmReg[0,64] f+ XmmReg[64,64]; XmmReg[64,64] = m[0,64] f+ m[64,64]; } :HADDPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7C; xmmmod=3 & XmmReg1 & XmmReg2 { local tmp:16 = XmmReg2; XmmReg1[0,64] = XmmReg1[0,64] f+ XmmReg1[64,64]; XmmReg1[64,64] = tmp[0,64] f+ tmp[64,64]; } :HADDPS XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7C; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = XmmReg[0,32] f+ XmmReg[32,32]; XmmReg[32,32] = XmmReg[64,32] f+ XmmReg[96,32]; XmmReg[64,32] = m[0,32] f+ m[32,32]; XmmReg[96,32] = m[64,32] f+ m[96,32]; } :HADDPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7C; xmmmod=3 & XmmReg1 & XmmReg2 { local tmp:16 = XmmReg2; XmmReg1[0,32] = XmmReg1[0,32] f+ XmmReg1[32,32]; XmmReg1[32,32] = XmmReg1[64,32] f+ XmmReg1[96,32]; XmmReg1[64,32] = tmp[0,32] f+ tmp[32,32]; XmmReg1[96,32] = tmp[64,32] f+ tmp[96,32]; } :HSUBPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7D; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,64] = XmmReg[0,64] f- XmmReg[64,64]; XmmReg[64,64] = m[0,64] f- m[64,64]; } :HSUBPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7D; xmmmod=3 & XmmReg1 & XmmReg2 { local tmp:16 = XmmReg2; XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg1[64,64]; XmmReg1[64,64] = tmp[0,64] f- tmp[64,64]; } :HSUBPS XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7D; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = XmmReg[0,32] f- XmmReg[32,32]; XmmReg[32,32] = XmmReg[64,32] f- XmmReg[96,32]; XmmReg[64,32] = m[0,32] f- m[32,32]; XmmReg[96,32] = m[64,32] f- m[96,32]; } :HSUBPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x7D; xmmmod=3 & XmmReg1 & XmmReg2 { local tmp:16 = XmmReg2; XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg1[32,32]; XmmReg1[32,32] = XmmReg1[64,32] f- XmmReg1[96,32]; XmmReg1[64,32] = tmp[0,32] f- tmp[32,32]; XmmReg1[96,32] = tmp[64,32] f- tmp[96,32]; } #-------------------- #SSE3... #-------------------- define pcodeop lddqu; :LDDQU XmmReg, m128 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xF0; XmmReg ... & m128 { XmmReg = lddqu(XmmReg, m128); } define pcodeop maskmovdqu; :MASKMOVDQU XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF7; XmmReg1 & XmmReg2 { XmmReg1 = maskmovdqu(XmmReg1, XmmReg2); } define pcodeop maxpd; :MAXPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5F; XmmReg ... & m128 { XmmReg = maxpd(XmmReg, m128); } :MAXPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = maxpd(XmmReg1, XmmReg2); } define pcodeop maxps; :MAXPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5F; XmmReg ... & m128 { XmmReg = maxps(XmmReg, m128); } :MAXPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = maxps(XmmReg1, XmmReg2); } :MAXSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5F; XmmReg ... & m64 { local tmp:8 = m64; if (tmp f< XmmReg[0,64]) goto inst_next; XmmReg[0,64] = tmp; } :MAXSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5F; xmmmod=3 & XmmReg1 & XmmReg2 { if (XmmReg2[0,64] f< XmmReg1[0,64]) goto inst_next; XmmReg1[0,64] = XmmReg2[0,64]; } :MAXSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5F; XmmReg ... & m32 { local tmp:4 = m32; if (tmp f< XmmReg[0,32]) goto inst_next; XmmReg[0,32] = tmp; } :MAXSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5F; xmmmod=3 & XmmReg1 & XmmReg2 { if (XmmReg2[0,32] f< XmmReg1[0,32]) goto inst_next; XmmReg1[0,32] = XmmReg2[0,32]; } define pcodeop minpd; :MINPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5D; XmmReg ... & m128 { XmmReg = minpd(XmmReg, m128); } :MINPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5D; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = minpd(XmmReg1, XmmReg2); } define pcodeop minps; :MINPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5D; XmmReg ... & m128 { XmmReg = minps(XmmReg, m128); } :MINPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5D; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = minps(XmmReg1, XmmReg2); } :MINSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5D; XmmReg ... & m64 { local tmp:8 = m64; if (XmmReg[0,64] f< tmp) goto inst_next; XmmReg[0,64] = tmp; } :MINSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5D; xmmmod=3 & XmmReg1 & XmmReg2 { if (XmmReg1[0,64] f< XmmReg2[0,64]) goto inst_next; XmmReg1[0,64] = XmmReg2[0,64]; } :MINSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5D; XmmReg ... & m32 { local tmp:4 = m32; if (XmmReg[0,32] f< tmp) goto inst_next; XmmReg[0,32] = tmp; } :MINSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5D; xmmmod=3 & XmmReg1 & XmmReg2 { if (XmmReg1[0,32] f< XmmReg2[0,32]) goto inst_next; XmmReg1[0,32] = XmmReg2[0,32]; } :MOVAPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x28; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,64] = m[0,64]; XmmReg[64,64] = m[64,64]; } :MOVAPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x28; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg2[0,64]; XmmReg1[64,64] = XmmReg2[64,64]; } :MOVAPD m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x29; m128 & XmmReg ... { m128 = XmmReg; } :MOVAPD XmmReg2, XmmReg1 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x29; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg2[0,64] = XmmReg1[0,64]; XmmReg2[64,64] = XmmReg1[64,64]; } :MOVAPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x28; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = m[0,32]; XmmReg[32,32] = m[32,32]; XmmReg[64,32] = m[64,32]; XmmReg[96,32] = m[96,32]; } :MOVAPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x28; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg2[0,32]; XmmReg1[32,32] = XmmReg2[32,32]; XmmReg1[64,32] = XmmReg2[64,32]; XmmReg1[96,32] = XmmReg2[96,32]; } :MOVAPS m128, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x29; m128 & XmmReg ... { m128 = XmmReg; } :MOVAPS XmmReg2, XmmReg1 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x29; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg2[0,32] = XmmReg1[0,32]; XmmReg2[32,32] = XmmReg1[32,32]; XmmReg2[64,32] = XmmReg1[64,32]; XmmReg2[96,32] = XmmReg1[96,32]; } :MOVD mmxreg, rm32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6E; rm32 & mmxreg ... { mmxreg = zext(rm32); } :MOVD rm32, mmxreg is vexMode=0 & rexWprefix=0 & mandover=0 & byte=0x0F; byte=0x7E; rm32 & check_rm32_dest ... & mmxreg ... { rm32 = mmxreg(0); build check_rm32_dest; } :MOVD XmmReg, rm32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6E; rm32 & XmmReg ... { XmmReg = zext(rm32); } :MOVD rm32, XmmReg is vexMode=0 & $(PRE_66) & rexWprefix=0 & byte=0x0F; byte=0x7E; rm32 & check_rm32_dest ... & XmmReg ... { rm32 = XmmReg(0); build check_rm32_dest; } @ifdef IA64 :MOVQ mmxreg, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & mandover=0 & byte=0x0F; byte=0x6E; rm64 & mmxreg ... { mmxreg = rm64; } :MOVQ rm64, mmxreg is $(LONGMODE_ON) & vexMode=0 & opsize=2 & mandover=0 & byte=0x0F; byte=0x7E; rm64 & mmxreg ... { rm64 = mmxreg; } :MOVQ XmmReg, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_66) & byte=0x0F; byte=0x6E; rm64 & XmmReg ... { XmmReg = zext(rm64); } :MOVQ rm64, XmmReg is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_66) & byte=0x0F; byte=0x7E; rm64 & XmmReg ... { rm64 = XmmReg(0); } @endif :MOVDIRI Mem,Reg32 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_NO) & byte=0x0F; byte=0x38; byte=0xF9; Mem & Reg32 ... { *Mem = Reg32; } @ifdef IA64 :MOVDIRI Mem,Reg64 is vexMode=0 & $(PRE_NO) & $(REX_W) & byte=0x0F; byte=0x38; byte=0xF9; Mem & Reg64 ... { *Mem = Reg64; } @endif define pcodeop movdir64b; :MOVDIR64B Reg16, m512 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_66) & addrsize=0 & byte=0x0F; byte=0x38; byte=0xF8; Reg16 ... & m512 { movdir64b(Reg16, m512); } :MOVDIR64B Reg32, m512 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_66) & addrsize=1 & byte=0x0F; byte=0x38; byte=0xF8; Reg32 ... & m512 { movdir64b(Reg32, m512); } @ifdef IA64 :MOVDIR64B Reg32, m512 is $(LONGMODE_ON) & vexMode=0 & $(PRE_66) & addrsize=1 & byte=0x0F; byte=0x38; byte=0xF8; Reg32 ... & m512 { movdir64b(Reg32, m512); } :MOVDIR64B Reg64, m512 is $(LONGMODE_ON) & vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xF8; Reg64 ... & m512 { movdir64b(Reg64, m512); } @endif :MOVDDUP XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x12; m64 & XmmReg ... { XmmReg[0,64] = m64; XmmReg[64,64] = m64; } :MOVDDUP XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x12; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg2[0,64]; XmmReg1[64,64] = XmmReg2[0,64]; } :MOVSHDUP XmmReg, m128 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x16; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = m[32,32]; XmmReg[32,32] = m[32,32]; XmmReg[64,32] = m[96,32]; XmmReg[96,32] = m[96,32]; } :MOVSHDUP XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x16; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg2[32,32]; XmmReg1[32,32] = XmmReg2[32,32]; XmmReg1[64,32] = XmmReg2[96,32]; XmmReg1[96,32] = XmmReg2[96,32]; } :MOVSLDUP XmmReg, m128 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x12; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = m[0,32]; XmmReg[32,32] = m[0,32]; XmmReg[64,32] = m[64,32]; XmmReg[96,32] = m[64,32]; } :MOVSLDUP XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x12; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg2[0,32]; XmmReg1[32,32] = XmmReg2[0,32]; XmmReg1[64,32] = XmmReg2[64,32]; XmmReg1[96,32] = XmmReg2[64,32]; } :MOVDQA XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6F; XmmReg ... & m128 { XmmReg = m128; } :MOVDQA XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg2; } :MOVDQA m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7F; XmmReg ... & m128 { m128 = XmmReg; } :MOVDQA XmmReg2, XmmReg1 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x7F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg2 = XmmReg1; } :MOVDQU XmmReg, m128 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x6F; XmmReg ... & m128 { XmmReg = m128; } :MOVDQU XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x6F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg2; } :MOVDQU m128, XmmReg is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x7F; XmmReg ... & m128 { m128 = XmmReg; } :MOVDQU XmmReg2, XmmReg1 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x7F; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg2 = XmmReg1; } # TODO: this vexMode=0 & is potentially wrong define pcodeop movdq2q; :MOVDQ2Q mmxreg2, XmmReg1 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0xD6; XmmReg1 & mmxreg2 { mmxreg2 = movdq2q(mmxreg2, XmmReg1); } :MOVHLPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x12; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg2[64,64]; } :MOVHPD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x16; XmmReg ... & m64 { XmmReg[64,64] = m64; } :MOVHPD m64, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x17; XmmReg ... & m64 { m64 = XmmReg[64,64]; } :MOVHPS XmmReg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x16; XmmReg ... & m64 { XmmReg[64,64] = m64; } :MOVHPS m64, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x17; XmmReg ... & m64 { m64 = XmmReg[64,64]; } :MOVLHPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x16; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[64,64] = XmmReg2[0,64]; } :MOVLPD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x12; XmmReg ... & m64 { XmmReg[0,64] = m64; } :MOVLPD m64, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x13; XmmReg ... & m64 { m64 = XmmReg[0,64]; } :MOVLPS XmmReg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x12; XmmReg ... & m64 { XmmReg[0,64] = m64; } :MOVLPS m64, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x13; XmmReg ... & m64 { m64 = XmmReg[0,64]; } define pcodeop movmskpd; :MOVMSKPD Reg32, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x50; XmmReg2 & Reg32 { Reg32 = movmskpd(Reg32, XmmReg2); } define pcodeop movmskps; :MOVMSKPS Reg32, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x50; XmmReg2 & Reg32 { Reg32 = movmskps(Reg32, XmmReg2); } :MOVNTQ m64, mmxreg is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE7; mmxreg ... & m64 { m64 = mmxreg; } :MOVNTDQ m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE7; XmmReg ... & m128 { m128 = XmmReg; } :MOVNTPD m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2B; XmmReg ... & m128 { m128 = XmmReg; } :MOVNTPS m128, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2B; XmmReg ... & m128 { m128 = XmmReg; } :MOVQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6F; mmxreg ... & m64 { mmxreg = m64; } :MOVQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg2; } :MOVQ m64, mmxreg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x7F; mmxreg ... & m64 { m64 = mmxreg; } :MOVQ mmxreg2, mmxreg1 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x7F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg2 = mmxreg1; } :MOVQ XmmReg, m64 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x7E; XmmReg ... & m64 { XmmReg = zext(m64); } :MOVQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x7E; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = zext(XmmReg2[0,64]); } :MOVQ m64, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD6; m64 & XmmReg ... { m64 = XmmReg[0,64]; } :MOVQ XmmReg2, XmmReg1 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD6; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg2 = zext(XmmReg1[0,64]); } :MOVQ2DQ XmmReg, mmxreg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xD6; XmmReg & mmxreg2 { XmmReg = zext(mmxreg2); # may need to model x87 FPU state changes too ????? } :MOVSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x10; m64 & XmmReg ... { XmmReg[0,64] = m64; XmmReg[64,64] = 0; } :MOVSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg2[0,64]; } :MOVSD m64, XmmReg is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x11; m64 & XmmReg ... { m64 = XmmReg[0,64]; } :MOVSD XmmReg2, XmmReg1 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x11; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg2[0,64] = XmmReg1[0,64]; } :MOVSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x10; m32 & XmmReg ... { XmmReg[0,32] = m32; XmmReg[32,32] = 0; XmmReg[64,32] = 0; XmmReg[96,32] = 0; } :MOVSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg2[0,32]; } :MOVSS m32, XmmReg is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x11; m32 & XmmReg ... { m32 = XmmReg[0,32]; } :MOVSS XmmReg2, XmmReg1 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x11; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg2[0,32] = XmmReg1[0,32]; } :MOVUPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x10; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,64] = m[0,64]; XmmReg[64,64] = m[64,64]; } :MOVUPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg2[0,64]; XmmReg1[64,64] = XmmReg2[64,64]; } :MOVUPD m128, XmmReg is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x11; m128 & XmmReg ... { m128 = XmmReg; } :MOVUPD XmmReg2, XmmReg1 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x11; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg2[0,64] = XmmReg1[0,64]; XmmReg2[64,64] = XmmReg1[64,64]; } # Not sure why someone had done it this way ????? #Xmm2m128: m128 is vexMode=0 & m128 { export m128; } #Xmm2m128: XmmReg2 is vexMode=0 & xmmmod=3 & XmmReg2 { export XmmReg2; } # #define pcodeop movups; ##:MOVUPS XmmReg, m128 is vexMode=0 & byte=0x0F; byte=0x10; XmmReg ... & m128 { XmmReg = movups(XmmReg, m128); } ##:MOVUPS XmmReg1, XmmReg2 is vexMode=0 & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = movups(XmmReg1, XmmReg2); } # #:MOVUPS XmmReg,Xmm2m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x10; XmmReg ... & Xmm2m128 { XmmReg = movups(XmmReg, Xmm2m128); } :MOVUPS XmmReg, m128 is vexMode=0 & byte=0x0F; byte=0x10; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = m[0,32]; XmmReg[32,32] = m[32,32]; XmmReg[64,32] = m[64,32]; XmmReg[96,32] = m[96,32]; } :MOVUPS XmmReg1, XmmReg2 is vexMode=0 & byte=0x0F; byte=0x10; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg2[0,32]; XmmReg1[32,32] = XmmReg2[32,32]; XmmReg1[64,32] = XmmReg2[64,32]; XmmReg1[96,32] = XmmReg2[96,32]; } :MOVUPS m128, XmmReg is vexMode=0 & mandover=0 & byte=0x0F; byte=0x11; m128 & XmmReg ... { m128 = XmmReg; } :MOVUPS XmmReg2, XmmReg1 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x11; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg2[0,32] = XmmReg1[0,32]; XmmReg2[32,32] = XmmReg1[32,32]; XmmReg2[64,32] = XmmReg1[64,32]; XmmReg2[96,32] = XmmReg1[96,32]; } :MULPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x59; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,64] = XmmReg[0,64] f* m[0,64]; XmmReg[64,64] = XmmReg[64,64] f* m[64,64]; } :MULPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x59; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] f* XmmReg2[0,64]; XmmReg1[64,64] = XmmReg1[64,64] f* XmmReg2[64,64]; } :MULPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x59; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = XmmReg[0,32] f* m[0,32]; XmmReg[32,32] = XmmReg[32,32] f* m[32,32]; XmmReg[64,32] = XmmReg[64,32] f* m[64,32]; XmmReg[96,32] = XmmReg[96,32] f* m[96,32]; } :MULPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x59; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] f* XmmReg2[0,32]; XmmReg1[32,32] = XmmReg1[32,32] f* XmmReg2[32,32]; XmmReg1[64,32] = XmmReg1[64,32] f* XmmReg2[64,32]; XmmReg1[96,32] = XmmReg1[96,32] f* XmmReg2[96,32]; } :MULSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x59; m64 & XmmReg ... { XmmReg[0,64] = XmmReg[0,64] f* m64; } :MULSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x59; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] f* XmmReg2[0,64]; } :MULSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x59; m32 & XmmReg ... { XmmReg[0,32] = XmmReg[0,32] f* m32; } :MULSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x59; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] f* XmmReg2[0,32]; } :ORPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x56; XmmReg ... & m128 { XmmReg = XmmReg | m128; } :ORPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x56; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 | XmmReg2; } :ORPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x56; XmmReg ... & m128 { XmmReg = XmmReg | m128; } :ORPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x56; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 | XmmReg2; } # what about these ????? define pcodeop packsswb; :PACKSSWB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x63; mmxreg ... & m64 { mmxreg = packsswb(mmxreg, m64); } :PACKSSWB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x63; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = packsswb(mmxreg1, mmxreg2); } define pcodeop packssdw; :PACKSSDW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6B; mmxreg ... & m64 { mmxreg = packssdw(mmxreg, m64); } :PACKSSDW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6B; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = packssdw(mmxreg1, mmxreg2); } :PACKSSWB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x63; XmmReg ... & m128 { XmmReg = packsswb(XmmReg, m128); } :PACKSSWB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x63; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = packsswb(XmmReg1, XmmReg2); } :PACKSSDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6B; XmmReg ... & m128 { XmmReg = packssdw(XmmReg, m128); } :PACKSSDW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6B; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = packssdw(XmmReg1, XmmReg2); } #sword < 0 : ubyte = 0 #sword > 0xff: ubyte = 0xff #otherwise ubyte = sword macro sswub(sword, ubyte) { ubyte = (sword s> 0xff:2) * 0xff:1; ubyte = ubyte + (sword s> 0:2) * (sword s<= 0xff:2) * sword:1; } :PACKUSWB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x67; mmxreg ... & m64 { local dest_copy:8 = mmxreg; local src_copy:8 = m64; local ubyte:1 = 0; sswub(dest_copy[0,16],ubyte); mmxreg[0,8] = ubyte; sswub(dest_copy[16,16],ubyte); mmxreg[8,8] = ubyte; sswub(dest_copy[32,16],ubyte); mmxreg[16,8] = ubyte; sswub(dest_copy[48,16],ubyte); mmxreg[24,8] = ubyte; sswub(src_copy[0,16],ubyte); mmxreg[32,8] = ubyte; sswub(src_copy[16,16],ubyte); mmxreg[40,8] = ubyte; sswub(src_copy[32,16],ubyte); mmxreg[48,8] = ubyte; sswub(src_copy[48,16],ubyte); mmxreg[56,8] = ubyte; } :PACKUSWB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x67; mmxmod = 3 & mmxreg1 & mmxreg2 { local dest_copy:8 = mmxreg1; local src_copy:8 = mmxreg2; local ubyte:1 = 0; sswub(dest_copy[0,16],ubyte); mmxreg1[0,8] = ubyte; sswub(dest_copy[16,16],ubyte); mmxreg1[8,8] = ubyte; sswub(dest_copy[32,16],ubyte); mmxreg1[16,8] = ubyte; sswub(dest_copy[48,16],ubyte); mmxreg1[24,8] = ubyte; sswub(src_copy[0,16],ubyte); mmxreg1[32,8] = ubyte; sswub(src_copy[16,16],ubyte); mmxreg1[40,8] = ubyte; sswub(src_copy[32,16],ubyte); mmxreg1[48,8] = ubyte; sswub(src_copy[48,16],ubyte); mmxreg1[56,8] = ubyte; } :PACKUSWB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x67; XmmReg ... & m128 { local dest_copy:16 = XmmReg; local src_copy:16 = m128; local ubyte:1 = 0; sswub(dest_copy[0,16],ubyte); XmmReg[0,8] = ubyte; sswub(dest_copy[16,16],ubyte); XmmReg[8,8] = ubyte; sswub(dest_copy[32,16],ubyte); XmmReg[16,8] = ubyte; sswub(dest_copy[48,16],ubyte); XmmReg[24,8] = ubyte; sswub(dest_copy[64,16],ubyte); XmmReg[32,8] = ubyte; sswub(dest_copy[80,16],ubyte); XmmReg[40,8] = ubyte; sswub(dest_copy[96,16],ubyte); XmmReg[48,8] = ubyte; sswub(dest_copy[112,16],ubyte); XmmReg[56,8] = ubyte; sswub(src_copy[0,16],ubyte); XmmReg[64,8] = ubyte; sswub(src_copy[16,16],ubyte); XmmReg[72,8] = ubyte; sswub(src_copy[32,16],ubyte); XmmReg[80,8] = ubyte; sswub(src_copy[48,16],ubyte); XmmReg[88,8] = ubyte; sswub(src_copy[64,16],ubyte); XmmReg[96,8] = ubyte; sswub(src_copy[80,16],ubyte); XmmReg[104,8] = ubyte; sswub(src_copy[96,16],ubyte); XmmReg[112,8] = ubyte; sswub(src_copy[112,16],ubyte); XmmReg[120,8] = ubyte; } :PACKUSWB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x67; xmmmod = 3 & XmmReg1 & XmmReg2 { local dest_copy:16 = XmmReg1; local src_copy:16 = XmmReg2; local ubyte:1 = 0; sswub(dest_copy[0,16],ubyte); XmmReg1[0,8] = ubyte; sswub(dest_copy[16,16],ubyte); XmmReg1[8,8] = ubyte; sswub(dest_copy[32,16],ubyte); XmmReg1[16,8] = ubyte; sswub(dest_copy[48,16],ubyte); XmmReg1[24,8] = ubyte; sswub(dest_copy[64,16],ubyte); XmmReg1[32,8] = ubyte; sswub(dest_copy[80,16],ubyte); XmmReg1[40,8] = ubyte; sswub(dest_copy[96,16],ubyte); XmmReg1[48,8] = ubyte; sswub(dest_copy[112,16],ubyte); XmmReg1[56,8] = ubyte; sswub(src_copy[0,16],ubyte); XmmReg1[64,8] = ubyte; sswub(src_copy[16,16],ubyte); XmmReg1[72,8] = ubyte; sswub(src_copy[32,16],ubyte); XmmReg1[80,8] = ubyte; sswub(src_copy[48,16],ubyte); XmmReg1[88,8] = ubyte; sswub(src_copy[64,16],ubyte); XmmReg1[96,8] = ubyte; sswub(src_copy[80,16],ubyte); XmmReg1[104,8] = ubyte; sswub(src_copy[96,16],ubyte); XmmReg1[112,8] = ubyte; sswub(src_copy[112,16],ubyte); XmmReg1[120,8] = ubyte; } define pcodeop pabsb; :PABSB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1c; mmxreg ... & m64 { mmxreg=pabsb(mmxreg,m64); } :PABSB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1c; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pabsb(mmxreg1,mmxreg2); } :PABSB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1c; XmmReg ... & m128 { XmmReg=pabsb(XmmReg,m128); } :PABSB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1c; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pabsb(XmmReg1,XmmReg2); } define pcodeop pabsw; :PABSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1d; mmxreg ... & m64 { mmxreg=pabsw(mmxreg,m64); } :PABSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1d; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pabsw(mmxreg1,mmxreg2); } :PABSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1d; XmmReg ... & m128 { XmmReg=pabsw(XmmReg,m128); } :PABSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1d; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pabsw(XmmReg1,XmmReg2); } define pcodeop pabsd; :PABSD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1e; mmxreg ... & m64 { mmxreg=pabsd(mmxreg,m64); } :PABSD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x1e; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pabsd(mmxreg1,mmxreg2); } :PABSD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1e; XmmReg ... & m128 { XmmReg=pabsd(XmmReg,m128); } :PABSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x1e; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pabsd(XmmReg1,XmmReg2); } :PADDB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFC; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,8] = mmxreg[0,8] + m[0,8]; mmxreg[8,8] = mmxreg[8,8] + m[8,8]; mmxreg[16,8] = mmxreg[16,8] + m[16,8]; mmxreg[24,8] = mmxreg[24,8] + m[24,8]; mmxreg[32,8] = mmxreg[32,8] + m[32,8]; mmxreg[40,8] = mmxreg[40,8] + m[40,8]; mmxreg[48,8] = mmxreg[48,8] + m[48,8]; mmxreg[56,8] = mmxreg[56,8] + m[56,8]; } :PADDB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFC; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,8] = mmxreg1[0,8] + mmxreg2[0,8]; mmxreg1[8,8] = mmxreg1[8,8] + mmxreg2[8,8]; mmxreg1[16,8] = mmxreg1[16,8] + mmxreg2[16,8]; mmxreg1[24,8] = mmxreg1[24,8] + mmxreg2[24,8]; mmxreg1[32,8] = mmxreg1[32,8] + mmxreg2[32,8]; mmxreg1[40,8] = mmxreg1[40,8] + mmxreg2[40,8]; mmxreg1[48,8] = mmxreg1[48,8] + mmxreg2[48,8]; mmxreg1[56,8] = mmxreg1[56,8] + mmxreg2[56,8]; } :PADDW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFD; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,16] = mmxreg[0,16] + m[0,16]; mmxreg[16,16] = mmxreg[16,16] + m[16,16]; mmxreg[32,16] = mmxreg[32,16] + m[32,16]; mmxreg[48,16] = mmxreg[48,16] + m[48,16]; } :PADDW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFD; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,16] = mmxreg1[0,16] + mmxreg2[0,16]; mmxreg1[16,16] = mmxreg1[16,16] + mmxreg2[16,16]; mmxreg1[32,16] = mmxreg1[32,16] + mmxreg2[32,16]; mmxreg1[48,16] = mmxreg1[48,16] + mmxreg2[48,16]; } :PADDD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFE; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,32] = mmxreg[0,32] + m[0,32]; mmxreg[32,32] = mmxreg[32,32] + m[32,32]; } :PADDD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFE; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,32] = mmxreg1[0,32] + mmxreg2[0,32]; mmxreg1[32,32] = mmxreg1[32,32] + mmxreg2[32,32]; } :PADDB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFC; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,8] = XmmReg[0,8] + m[0,8]; XmmReg[8,8] = XmmReg[8,8] + m[8,8]; XmmReg[16,8] = XmmReg[16,8] + m[16,8]; XmmReg[24,8] = XmmReg[24,8] + m[24,8]; XmmReg[32,8] = XmmReg[32,8] + m[32,8]; XmmReg[40,8] = XmmReg[40,8] + m[40,8]; XmmReg[48,8] = XmmReg[48,8] + m[48,8]; XmmReg[56,8] = XmmReg[56,8] + m[56,8]; XmmReg[64,8] = XmmReg[64,8] + m[64,8]; XmmReg[72,8] = XmmReg[72,8] + m[72,8]; XmmReg[80,8] = XmmReg[80,8] + m[80,8]; XmmReg[88,8] = XmmReg[88,8] + m[88,8]; XmmReg[96,8] = XmmReg[96,8] + m[96,8]; XmmReg[104,8] = XmmReg[104,8] + m[104,8]; XmmReg[112,8] = XmmReg[112,8] + m[112,8]; XmmReg[120,8] = XmmReg[120,8] + m[120,8]; } ## example of bitfield solution #:PADDB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFC; xmmmod = 3 & XmmReg1 & XmmReg2 #{ # XmmReg1[ 0,8] = XmmReg1[ 0,8] + XmmReg2[ 0,8]; # XmmReg1[ 8,8] = XmmReg1[ 8,8] + XmmReg2[ 8,8]; # XmmReg1[ 16,8] = XmmReg1[ 16,8] + XmmReg2[ 16,8]; # XmmReg1[ 24,8] = XmmReg1[ 24,8] + XmmReg2[ 24,8]; # XmmReg1[ 32,8] = XmmReg1[ 32,8] + XmmReg2[ 32,8]; # XmmReg1[ 40,8] = XmmReg1[ 40,8] + XmmReg2[ 40,8]; # XmmReg1[ 48,8] = XmmReg1[ 48,8] + XmmReg2[ 48,8]; # XmmReg1[ 56,8] = XmmReg1[ 56,8] + XmmReg2[ 56,8]; ## XmmReg1[ 64,8] = XmmReg1[ 64,8] + XmmReg2[ 64,8]; ## XmmReg1[ 72,8] = XmmReg1[ 72,8] + XmmReg2[ 72,8]; ## XmmReg1[ 80,8] = XmmReg1[ 80,8] + XmmReg2[ 80,8]; ## XmmReg1[ 88,8] = XmmReg1[ 88,8] + XmmReg2[ 88,8]; ## XmmReg1[ 96,8] = XmmReg1[ 96,8] + XmmReg2[ 96,8]; ## XmmReg1[104,8] = XmmReg1[104,8] + XmmReg2[104,8]; ## XmmReg1[112,8] = XmmReg1[112,8] + XmmReg2[112,8]; ## XmmReg1[120,8] = XmmReg1[120,8] + XmmReg2[120,8]; #} # full set of XMM byte registers :PADDB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFC; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,8] = XmmReg1[0,8] + XmmReg2[0,8]; XmmReg1[8,8] = XmmReg1[8,8] + XmmReg2[8,8]; XmmReg1[16,8] = XmmReg1[16,8] + XmmReg2[16,8]; XmmReg1[24,8] = XmmReg1[24,8] + XmmReg2[24,8]; XmmReg1[32,8] = XmmReg1[32,8] + XmmReg2[32,8]; XmmReg1[40,8] = XmmReg1[40,8] + XmmReg2[40,8]; XmmReg1[48,8] = XmmReg1[48,8] + XmmReg2[48,8]; XmmReg1[56,8] = XmmReg1[56,8] + XmmReg2[56,8]; XmmReg1[64,8] = XmmReg1[64,8] + XmmReg2[64,8]; XmmReg1[72,8] = XmmReg1[72,8] + XmmReg2[72,8]; XmmReg1[80,8] = XmmReg1[80,8] + XmmReg2[80,8]; XmmReg1[88,8] = XmmReg1[88,8] + XmmReg2[88,8]; XmmReg1[96,8] = XmmReg1[96,8] + XmmReg2[96,8]; XmmReg1[104,8] = XmmReg1[104,8] + XmmReg2[104,8]; XmmReg1[112,8] = XmmReg1[112,8] + XmmReg2[112,8]; XmmReg1[120,8] = XmmReg1[120,8] + XmmReg2[120,8]; } :PADDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFD; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,16] = XmmReg[0,16] + m[0,16]; XmmReg[16,16] = XmmReg[16,16] + m[16,16]; XmmReg[32,16] = XmmReg[32,16] + m[32,16]; XmmReg[48,16] = XmmReg[48,16] + m[48,16]; XmmReg[64,16] = XmmReg[64,16] + m[64,16]; XmmReg[80,16] = XmmReg[80,16] + m[80,16]; XmmReg[96,16] = XmmReg[96,16] + m[96,16]; XmmReg[112,16] = XmmReg[112,16] + m[112,16]; } :PADDW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFD; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,16] = XmmReg1[0,16] + XmmReg2[0,16]; XmmReg1[16,16] = XmmReg1[16,16] + XmmReg2[16,16]; XmmReg1[32,16] = XmmReg1[32,16] + XmmReg2[32,16]; XmmReg1[48,16] = XmmReg1[48,16] + XmmReg2[48,16]; XmmReg1[64,16] = XmmReg1[64,16] + XmmReg2[64,16]; XmmReg1[80,16] = XmmReg1[80,16] + XmmReg2[80,16]; XmmReg1[96,16] = XmmReg1[96,16] + XmmReg2[96,16]; XmmReg1[112,16] = XmmReg1[112,16] + XmmReg2[112,16]; } :PADDD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFE; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = XmmReg[0,32] + m[0,32]; XmmReg[32,32] = XmmReg[32,32] + m[32,32]; XmmReg[64,32] = XmmReg[64,32] + m[64,32]; XmmReg[96,32] = XmmReg[96,32] + m[96,32]; } :PADDD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFE; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] + XmmReg2[0,32]; XmmReg1[32,32] = XmmReg1[32,32] + XmmReg2[32,32]; XmmReg1[64,32] = XmmReg1[64,32] + XmmReg2[64,32]; XmmReg1[96,32] = XmmReg1[96,32] + XmmReg2[96,32]; } :PADDQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD4; mmxreg ... & m64 { mmxreg = mmxreg + m64; } :PADDQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD4; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 + mmxreg2; } :PADDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD4; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,64] = XmmReg[0,64] + m[0,64]; XmmReg[64,64] = XmmReg[64,64] + m[64,64]; } :PADDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD4; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] + XmmReg2[0,64]; XmmReg1[64,64] = XmmReg1[64,64] + XmmReg2[64,64]; } define pcodeop paddsb; :PADDSB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEC; mmxreg1 ... & mmxreg2_m64 { mmxreg1 = paddsb(mmxreg1, mmxreg2_m64); } define pcodeop paddsw; :PADDSW mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xED; mmxreg1 ... & mmxreg2_m64 { mmxreg1 = paddsw(mmxreg1, mmxreg2_m64); } :PADDSB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEC; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = paddsb(XmmReg1, XmmReg2_m128); } :PADDSW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xED; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = paddsw(XmmReg1, XmmReg2_m128); } define pcodeop paddusb; :PADDUSB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDC; mmxreg1 ... & mmxreg2_m64 { mmxreg1 = paddusb(mmxreg1, mmxreg2_m64); } define pcodeop paddusw; :PADDUSW mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDD; mmxreg1 ... & mmxreg2_m64 { mmxreg1 = paddusw(mmxreg1, mmxreg2_m64); } :PADDUSB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDC; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = paddusb(XmmReg1, XmmReg2_m128); } :PADDUSW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDD; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = paddusw(XmmReg1, XmmReg2_m128); } :PALIGNR mmxreg, m64, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x3A; byte=0x0F; m64 & mmxreg ...; imm8 { temp:16 = ( ( zext(mmxreg) << 64 ) | zext( m64 ) ) >> ( imm8 * 8 ); mmxreg = temp:8; } :PALIGNR mmxreg1, mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x3A; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2; imm8 { temp:16 = ( ( zext(mmxreg1) << 64 ) | zext( mmxreg2 ) ) >> ( imm8 * 8 ); mmxreg1 = temp:8; } :PALIGNR XmmReg1, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0F; m128 & XmmReg1 ...; imm8 { temp:32 = ( ( zext(XmmReg1) << 128 ) | zext( m128 ) ) >> ( imm8 * 8 ); XmmReg1 = temp:16; } :PALIGNR XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0F; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { temp:32 = ( ( zext(XmmReg1) << 128 ) | zext( XmmReg2 ) ) >> ( imm8 * 8 ); XmmReg1 = temp:16; } :PAND mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDB; mmxreg ... & m64 { mmxreg = mmxreg & m64; } :PAND mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDB; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 & mmxreg2; } :PAND XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDB; XmmReg ... & m128 { XmmReg = XmmReg & m128; } :PAND XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDB; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 & XmmReg2; } :PANDN mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDF; mmxreg ... & m64 { mmxreg = ~mmxreg & m64; } :PANDN mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDF; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = ~mmxreg1 & mmxreg2; } :PANDN XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDF; XmmReg ... & m128 { XmmReg = ~XmmReg & m128; } :PANDN XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDF; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = ~XmmReg1 & XmmReg2; } define pcodeop pavgb; :PAVGB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE0; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,8] = pavgb(mmxreg[0,8], m[0,8]); mmxreg[8,8] = pavgb(mmxreg[8,8], m[8,8]); mmxreg[16,8] = pavgb(mmxreg[16,8], m[16,8]); mmxreg[24,8] = pavgb(mmxreg[24,8], m[24,8]); mmxreg[32,8] = pavgb(mmxreg[32,8], m[32,8]); mmxreg[40,8] = pavgb(mmxreg[40,8], m[40,8]); mmxreg[48,8] = pavgb(mmxreg[48,8], m[48,8]); mmxreg[56,8] = pavgb(mmxreg[56,8], m[56,8]); } :PAVGB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE0; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,8] = pavgb(mmxreg1[0,8], mmxreg2[0,8]); mmxreg1[8,8] = pavgb(mmxreg1[8,8], mmxreg2[8,8]); mmxreg1[16,8] = pavgb(mmxreg1[16,8], mmxreg2[16,8]); mmxreg1[24,8] = pavgb(mmxreg1[24,8], mmxreg2[24,8]); mmxreg1[32,8] = pavgb(mmxreg1[32,8], mmxreg2[32,8]); mmxreg1[40,8] = pavgb(mmxreg1[40,8], mmxreg2[40,8]); mmxreg1[48,8] = pavgb(mmxreg1[48,8], mmxreg2[48,8]); mmxreg1[56,8] = pavgb(mmxreg1[56,8], mmxreg2[56,8]); } define pcodeop pavgw; :PAVGW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE3; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,16] = pavgw(mmxreg[0,16], m[0,16]); mmxreg[16,16] = pavgw(mmxreg[16,16], m[16,16]); mmxreg[32,16] = pavgw(mmxreg[32,16], m[32,16]); mmxreg[48,16] = pavgw(mmxreg[48,16], m[48,16]); } :PAVGW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE3; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,16] = pavgw(mmxreg1[0,16], mmxreg2[0,16]); mmxreg1[16,16] = pavgw(mmxreg1[16,16], mmxreg2[16,16]); mmxreg1[32,16] = pavgw(mmxreg1[32,16], mmxreg2[32,16]); mmxreg1[48,16] = pavgw(mmxreg1[48,16], mmxreg2[48,16]); } :PAVGB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE0; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,8] = pavgb(XmmReg[0,8], m[0,8]); XmmReg[8,8] = pavgb(XmmReg[8,8], m[8,8]); XmmReg[16,8] = pavgb(XmmReg[16,8], m[16,8]); XmmReg[24,8] = pavgb(XmmReg[24,8], m[24,8]); XmmReg[32,8] = pavgb(XmmReg[32,8], m[32,8]); XmmReg[40,8] = pavgb(XmmReg[40,8], m[40,8]); XmmReg[48,8] = pavgb(XmmReg[48,8], m[48,8]); XmmReg[56,8] = pavgb(XmmReg[56,8], m[56,8]); XmmReg[64,8] = pavgb(XmmReg[64,8], m[64,8]); XmmReg[72,8] = pavgb(XmmReg[72,8], m[72,8]); XmmReg[80,8] = pavgb(XmmReg[80,8], m[80,8]); XmmReg[88,8] = pavgb(XmmReg[88,8], m[88,8]); XmmReg[96,8] = pavgb(XmmReg[96,8], m[96,8]); XmmReg[104,8] = pavgb(XmmReg[104,8], m[104,8]); XmmReg[112,8] = pavgb(XmmReg[112,8], m[112,8]); XmmReg[120,8] = pavgb(XmmReg[120,8], m[120,8]); } # full set of XMM byte registers :PAVGB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE0; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,8] = pavgb(XmmReg1[0,8], XmmReg2[0,8]); XmmReg1[8,8] = pavgb(XmmReg1[8,8], XmmReg2[8,8]); XmmReg1[16,8] = pavgb(XmmReg1[16,8], XmmReg2[16,8]); XmmReg1[24,8] = pavgb(XmmReg1[24,8], XmmReg2[24,8]); XmmReg1[32,8] = pavgb(XmmReg1[32,8], XmmReg2[32,8]); XmmReg1[40,8] = pavgb(XmmReg1[40,8], XmmReg2[40,8]); XmmReg1[48,8] = pavgb(XmmReg1[48,8], XmmReg2[48,8]); XmmReg1[56,8] = pavgb(XmmReg1[56,8], XmmReg2[56,8]); XmmReg1[64,8] = pavgb(XmmReg1[64,8], XmmReg2[64,8]); XmmReg1[72,8] = pavgb(XmmReg1[72,8], XmmReg2[72,8]); XmmReg1[80,8] = pavgb(XmmReg1[80,8], XmmReg2[80,8]); XmmReg1[88,8] = pavgb(XmmReg1[88,8], XmmReg2[88,8]); XmmReg1[96,8] = pavgb(XmmReg1[96,8], XmmReg2[96,8]); XmmReg1[104,8] = pavgb(XmmReg1[104,8], XmmReg2[104,8]); XmmReg1[112,8] = pavgb(XmmReg1[112,8], XmmReg2[112,8]); XmmReg1[120,8] = pavgb(XmmReg1[120,8], XmmReg2[120,8]); } :PAVGW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE3; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,16] = pavgw(XmmReg[0,16], m[0,16]); XmmReg[16,16] = pavgw(XmmReg[16,16], m[16,16]); XmmReg[32,16] = pavgw(XmmReg[32,16], m[32,16]); XmmReg[48,16] = pavgw(XmmReg[48,16], m[48,16]); XmmReg[64,16] = pavgw(XmmReg[64,16], m[64,16]); XmmReg[80,16] = pavgw(XmmReg[80,16], m[80,16]); XmmReg[96,16] = pavgw(XmmReg[96,16], m[96,16]); XmmReg[112,16] = pavgw(XmmReg[112,16], m[112,16]); } :PAVGW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE3; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,16] = pavgw(XmmReg1[0,16], XmmReg2[0,16]); XmmReg1[16,16] = pavgw(XmmReg1[16,16], XmmReg2[16,16]); XmmReg1[32,16] = pavgw(XmmReg1[32,16], XmmReg2[32,16]); XmmReg1[48,16] = pavgw(XmmReg1[48,16], XmmReg2[48,16]); XmmReg1[64,16] = pavgw(XmmReg1[64,16], XmmReg2[64,16]); XmmReg1[80,16] = pavgw(XmmReg1[80,16], XmmReg2[80,16]); XmmReg1[96,16] = pavgw(XmmReg1[96,16], XmmReg2[96,16]); XmmReg1[112,16] = pavgw(XmmReg1[112,16], XmmReg2[112,16]); } :PCMPEQB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x74; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,8] = (mmxreg[0,8] == m[0,8]) * 0xFF; mmxreg[8,8] = (mmxreg[8,8] == m[8,8]) * 0xFF; mmxreg[16,8] = (mmxreg[16,8] == m[16,8]) * 0xFF; mmxreg[24,8] = (mmxreg[24,8] == m[24,8]) * 0xFF; mmxreg[32,8] = (mmxreg[32,8] == m[32,8]) * 0xFF; mmxreg[40,8] = (mmxreg[40,8] == m[40,8]) * 0xFF; mmxreg[48,8] = (mmxreg[48,8] == m[48,8]) * 0xFF; mmxreg[56,8] = (mmxreg[56,8] == m[56,8]) * 0xFF; } :PCMPEQB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x74; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,8] = (mmxreg1[0,8] == mmxreg2[0,8]) * 0xFF; mmxreg1[8,8] = (mmxreg1[8,8] == mmxreg2[8,8]) * 0xFF; mmxreg1[16,8] = (mmxreg1[16,8] == mmxreg2[16,8]) * 0xFF; mmxreg1[24,8] = (mmxreg1[24,8] == mmxreg2[24,8]) * 0xFF; mmxreg1[32,8] = (mmxreg1[32,8] == mmxreg2[32,8]) * 0xFF; mmxreg1[40,8] = (mmxreg1[40,8] == mmxreg2[40,8]) * 0xFF; mmxreg1[48,8] = (mmxreg1[48,8] == mmxreg2[48,8]) * 0xFF; mmxreg1[56,8] = (mmxreg1[56,8] == mmxreg2[56,8]) * 0xFF; } :PCMPEQW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x75; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,16] = zext(mmxreg[0,16] == m[0,16]) * 0xFFFF; mmxreg[16,16] = zext(mmxreg[16,16] == m[16,16]) * 0xFFFF; mmxreg[32,16] = zext(mmxreg[32,16] == m[32,16]) * 0xFFFF; mmxreg[48,16] = zext(mmxreg[48,16] == m[48,16]) * 0xFFFF; } :PCMPEQW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x75; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,16] = zext(mmxreg1[0,16] == mmxreg2[0,16]) * 0xFFFF; mmxreg1[16,16] = zext(mmxreg1[16,16] == mmxreg2[16,16]) * 0xFFFF; mmxreg1[32,16] = zext(mmxreg1[32,16] == mmxreg2[32,16]) * 0xFFFF; mmxreg1[48,16] = zext(mmxreg1[48,16] == mmxreg2[48,16]) * 0xFFFF; } :PCMPEQD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x76; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,32] = zext(mmxreg[0,32] == m[0,32]) * 0xFFFFFFFF; mmxreg[32,32] = zext(mmxreg[32,32] == m[32,32]) * 0xFFFFFFFF; } :PCMPEQD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x76; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,32] = zext(mmxreg1[0,32] == mmxreg2[0,32]) * 0xFFFFFFFF; mmxreg1[32,32] = zext(mmxreg1[32,32] == mmxreg2[32,32]) * 0xFFFFFFFF; } :PCMPEQB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x74; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,8] = (XmmReg[0,8] == m[0,8]) * 0xFF; XmmReg[8,8] = (XmmReg[8,8] == m[8,8]) * 0xFF; XmmReg[16,8] = (XmmReg[16,8] == m[16,8]) * 0xFF; XmmReg[24,8] = (XmmReg[24,8] == m[24,8]) * 0xFF; XmmReg[32,8] = (XmmReg[32,8] == m[32,8]) * 0xFF; XmmReg[40,8] = (XmmReg[40,8] == m[40,8]) * 0xFF; XmmReg[48,8] = (XmmReg[48,8] == m[48,8]) * 0xFF; XmmReg[56,8] = (XmmReg[56,8] == m[56,8]) * 0xFF; XmmReg[64,8] = (XmmReg[64,8] == m[64,8]) * 0xFF; XmmReg[72,8] = (XmmReg[72,8] == m[72,8]) * 0xFF; XmmReg[80,8] = (XmmReg[80,8] == m[80,8]) * 0xFF; XmmReg[88,8] = (XmmReg[88,8] == m[88,8]) * 0xFF; XmmReg[96,8] = (XmmReg[96,8] == m[96,8]) * 0xFF; XmmReg[104,8] = (XmmReg[104,8] == m[104,8]) * 0xFF; XmmReg[112,8] = (XmmReg[112,8] == m[112,8]) * 0xFF; XmmReg[120,8] = (XmmReg[120,8] == m[120,8]) * 0xFF; } # full set of XMM byte registers :PCMPEQB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x74; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,8] = (XmmReg1[0,8] == XmmReg2[0,8]) * 0xFF; XmmReg1[8,8] = (XmmReg1[8,8] == XmmReg2[8,8]) * 0xFF; XmmReg1[16,8] = (XmmReg1[16,8] == XmmReg2[16,8]) * 0xFF; XmmReg1[24,8] = (XmmReg1[24,8] == XmmReg2[24,8]) * 0xFF; XmmReg1[32,8] = (XmmReg1[32,8] == XmmReg2[32,8]) * 0xFF; XmmReg1[40,8] = (XmmReg1[40,8] == XmmReg2[40,8]) * 0xFF; XmmReg1[48,8] = (XmmReg1[48,8] == XmmReg2[48,8]) * 0xFF; XmmReg1[56,8] = (XmmReg1[56,8] == XmmReg2[56,8]) * 0xFF; XmmReg1[64,8] = (XmmReg1[64,8] == XmmReg2[64,8]) * 0xFF; XmmReg1[72,8] = (XmmReg1[72,8] == XmmReg2[72,8]) * 0xFF; XmmReg1[80,8] = (XmmReg1[80,8] == XmmReg2[80,8]) * 0xFF; XmmReg1[88,8] = (XmmReg1[88,8] == XmmReg2[88,8]) * 0xFF; XmmReg1[96,8] = (XmmReg1[96,8] == XmmReg2[96,8]) * 0xFF; XmmReg1[104,8] = (XmmReg1[104,8] == XmmReg2[104,8]) * 0xFF; XmmReg1[112,8] = (XmmReg1[112,8] == XmmReg2[112,8]) * 0xFF; XmmReg1[120,8] = (XmmReg1[120,8] == XmmReg2[120,8]) * 0xFF; } :PCMPEQW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x75; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,16] = zext(XmmReg[0,16] == m[0,16]) * 0xFFFF; XmmReg[16,16] = zext(XmmReg[16,16] == m[16,16]) * 0xFFFF; XmmReg[32,16] = zext(XmmReg[32,16] == m[32,16]) * 0xFFFF; XmmReg[48,16] = zext(XmmReg[48,16] == m[48,16]) * 0xFFFF; XmmReg[64,16] = zext(XmmReg[64,16] == m[64,16]) * 0xFFFF; XmmReg[80,16] = zext(XmmReg[80,16] == m[80,16]) * 0xFFFF; XmmReg[96,16] = zext(XmmReg[96,16] == m[96,16]) * 0xFFFF; XmmReg[112,16] = zext(XmmReg[112,16] == m[112,16]) * 0xFFFF; } :PCMPEQW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x75; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,16] = zext(XmmReg1[0,16] == XmmReg2[0,16]) * 0xFFFF; XmmReg1[16,16] = zext(XmmReg1[16,16] == XmmReg2[16,16]) * 0xFFFF; XmmReg1[32,16] = zext(XmmReg1[32,16] == XmmReg2[32,16]) * 0xFFFF; XmmReg1[48,16] = zext(XmmReg1[48,16] == XmmReg2[48,16]) * 0xFFFF; XmmReg1[64,16] = zext(XmmReg1[64,16] == XmmReg2[64,16]) * 0xFFFF; XmmReg1[80,16] = zext(XmmReg1[80,16] == XmmReg2[80,16]) * 0xFFFF; XmmReg1[96,16] = zext(XmmReg1[96,16] == XmmReg2[96,16]) * 0xFFFF; XmmReg1[112,16] = zext(XmmReg1[112,16] == XmmReg2[112,16]) * 0xFFFF; } :PCMPEQD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x76; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = zext(XmmReg[0,32] == m[0,32]) * 0xFFFFFFFF; XmmReg[32,32] = zext(XmmReg[32,32] == m[32,32]) * 0xFFFFFFFF; XmmReg[64,32] = zext(XmmReg[64,32] == m[64,32]) * 0xFFFFFFFF; XmmReg[96,32] = zext(XmmReg[96,32] == m[96,32]) * 0xFFFFFFFF; } :PCMPEQD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x76; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = zext(XmmReg1[0,32] == XmmReg2[0,32]) * 0xFFFFFFFF; XmmReg1[32,32] = zext(XmmReg1[32,32] == XmmReg2[32,32]) * 0xFFFFFFFF; XmmReg1[64,32] = zext(XmmReg1[64,32] == XmmReg2[64,32]) * 0xFFFFFFFF; XmmReg1[96,32] = zext(XmmReg1[96,32] == XmmReg2[96,32]) * 0xFFFFFFFF; } :PCMPGTB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x64; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,8] = (mmxreg[0,8] s> m[0,8]) * 0xFF; mmxreg[8,8] = (mmxreg[8,8] s> m[8,8]) * 0xFF; mmxreg[16,8] = (mmxreg[16,8] s> m[16,8]) * 0xFF; mmxreg[24,8] = (mmxreg[24,8] s> m[24,8]) * 0xFF; mmxreg[32,8] = (mmxreg[32,8] s> m[32,8]) * 0xFF; mmxreg[40,8] = (mmxreg[40,8] s> m[40,8]) * 0xFF; mmxreg[48,8] = (mmxreg[48,8] s> m[48,8]) * 0xFF; mmxreg[56,8] = (mmxreg[56,8] s> m[56,8]) * 0xFF; } :PCMPGTB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x64; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,8] = (mmxreg1[0,8] s> mmxreg2[0,8]) * 0xFF; mmxreg1[8,8] = (mmxreg1[8,8] s> mmxreg2[8,8]) * 0xFF; mmxreg1[16,8] = (mmxreg1[16,8] s> mmxreg2[16,8]) * 0xFF; mmxreg1[24,8] = (mmxreg1[24,8] s> mmxreg2[24,8]) * 0xFF; mmxreg1[32,8] = (mmxreg1[32,8] s> mmxreg2[32,8]) * 0xFF; mmxreg1[40,8] = (mmxreg1[40,8] s> mmxreg2[40,8]) * 0xFF; mmxreg1[48,8] = (mmxreg1[48,8] s> mmxreg2[48,8]) * 0xFF; mmxreg1[56,8] = (mmxreg1[56,8] s> mmxreg2[56,8]) * 0xFF; } :PCMPGTW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x65; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,16] = zext(mmxreg[0,16] s> m[0,16]) * 0xFFFF; mmxreg[16,16] = zext(mmxreg[16,16] s> m[16,16]) * 0xFFFF; mmxreg[32,16] = zext(mmxreg[32,16] s> m[32,16]) * 0xFFFF; mmxreg[48,16] = zext(mmxreg[48,16] s> m[48,16]) * 0xFFFF; } :PCMPGTW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x65; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,16] = zext(mmxreg1[0,16] s> mmxreg2[0,16]) * 0xFFFF; mmxreg1[16,16] = zext(mmxreg1[16,16] s> mmxreg2[16,16]) * 0xFFFF; mmxreg1[32,16] = zext(mmxreg1[32,16] s> mmxreg2[32,16]) * 0xFFFF; mmxreg1[48,16] = zext(mmxreg1[48,16] s> mmxreg2[48,16]) * 0xFFFF; } :PCMPGTD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x66; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,32] = zext(mmxreg[0,32] s> m[0,32]) * 0xFFFFFFFF; mmxreg[32,32] = zext(mmxreg[32,32] s> m[32,32]) * 0xFFFFFFFF; } :PCMPGTD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x66; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,32] = zext(mmxreg1[0,32] s> mmxreg2[0,32]) * 0xFFFFFFFF; mmxreg1[32,32] = zext(mmxreg1[32,32] s> mmxreg2[32,32]) * 0xFFFFFFFF; } :PCMPGTB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x64; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,8] = (XmmReg[0,8] s> m[0,8]) * 0xFF; XmmReg[8,8] = (XmmReg[8,8] s> m[8,8]) * 0xFF; XmmReg[16,8] = (XmmReg[16,8] s> m[16,8]) * 0xFF; XmmReg[24,8] = (XmmReg[24,8] s> m[24,8]) * 0xFF; XmmReg[32,8] = (XmmReg[32,8] s> m[32,8]) * 0xFF; XmmReg[40,8] = (XmmReg[40,8] s> m[40,8]) * 0xFF; XmmReg[48,8] = (XmmReg[48,8] s> m[48,8]) * 0xFF; XmmReg[56,8] = (XmmReg[56,8] s> m[56,8]) * 0xFF; XmmReg[64,8] = (XmmReg[64,8] s> m[64,8]) * 0xFF; XmmReg[72,8] = (XmmReg[72,8] s> m[72,8]) * 0xFF; XmmReg[80,8] = (XmmReg[80,8] s> m[80,8]) * 0xFF; XmmReg[88,8] = (XmmReg[88,8] s> m[88,8]) * 0xFF; XmmReg[96,8] = (XmmReg[96,8] s> m[96,8]) * 0xFF; XmmReg[104,8] = (XmmReg[104,8] s> m[104,8]) * 0xFF; XmmReg[112,8] = (XmmReg[112,8] s> m[112,8]) * 0xFF; XmmReg[120,8] = (XmmReg[120,8] s> m[120,8]) * 0xFF; } # full set of XMM byte registers :PCMPGTB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x64; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,8] = (XmmReg1[0,8] s> XmmReg2[0,8]) * 0xFF; XmmReg1[8,8] = (XmmReg1[8,8] s> XmmReg2[8,8]) * 0xFF; XmmReg1[16,8] = (XmmReg1[16,8] s> XmmReg2[16,8]) * 0xFF; XmmReg1[24,8] = (XmmReg1[24,8] s> XmmReg2[24,8]) * 0xFF; XmmReg1[32,8] = (XmmReg1[32,8] s> XmmReg2[32,8]) * 0xFF; XmmReg1[40,8] = (XmmReg1[40,8] s> XmmReg2[40,8]) * 0xFF; XmmReg1[48,8] = (XmmReg1[48,8] s> XmmReg2[48,8]) * 0xFF; XmmReg1[56,8] = (XmmReg1[56,8] s> XmmReg2[56,8]) * 0xFF; XmmReg1[64,8] = (XmmReg1[64,8] s> XmmReg2[64,8]) * 0xFF; XmmReg1[72,8] = (XmmReg1[72,8] s> XmmReg2[72,8]) * 0xFF; XmmReg1[80,8] = (XmmReg1[80,8] s> XmmReg2[80,8]) * 0xFF; XmmReg1[88,8] = (XmmReg1[88,8] s> XmmReg2[88,8]) * 0xFF; XmmReg1[96,8] = (XmmReg1[96,8] s> XmmReg2[96,8]) * 0xFF; XmmReg1[104,8] = (XmmReg1[104,8] s> XmmReg2[104,8]) * 0xFF; XmmReg1[112,8] = (XmmReg1[112,8] s> XmmReg2[112,8]) * 0xFF; XmmReg1[120,8] = (XmmReg1[120,8] s> XmmReg2[120,8]) * 0xFF; } :PCMPGTW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x65; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,16] = zext(XmmReg[0,16] s> m[0,16]) * 0xFFFF; XmmReg[16,16] = zext(XmmReg[16,16] s> m[16,16]) * 0xFFFF; XmmReg[32,16] = zext(XmmReg[32,16] s> m[32,16]) * 0xFFFF; XmmReg[48,16] = zext(XmmReg[48,16] s> m[48,16]) * 0xFFFF; XmmReg[64,16] = zext(XmmReg[64,16] s> m[64,16]) * 0xFFFF; XmmReg[80,16] = zext(XmmReg[80,16] s> m[80,16]) * 0xFFFF; XmmReg[96,16] = zext(XmmReg[96,16] s> m[96,16]) * 0xFFFF; XmmReg[112,16] = zext(XmmReg[112,16] s> m[112,16]) * 0xFFFF; } :PCMPGTW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x65; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,16] = zext(XmmReg1[0,16] s> XmmReg2[0,16]) * 0xFFFF; XmmReg1[16,16] = zext(XmmReg1[16,16] s> XmmReg2[16,16]) * 0xFFFF; XmmReg1[32,16] = zext(XmmReg1[32,16] s> XmmReg2[32,16]) * 0xFFFF; XmmReg1[48,16] = zext(XmmReg1[48,16] s> XmmReg2[48,16]) * 0xFFFF; XmmReg1[64,16] = zext(XmmReg1[64,16] s> XmmReg2[64,16]) * 0xFFFF; XmmReg1[80,16] = zext(XmmReg1[80,16] s> XmmReg2[80,16]) * 0xFFFF; XmmReg1[96,16] = zext(XmmReg1[96,16] s> XmmReg2[96,16]) * 0xFFFF; XmmReg1[112,16] = zext(XmmReg1[112,16] s> XmmReg2[112,16]) * 0xFFFF; } :PCMPGTD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x66; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = zext(XmmReg[0,32] s> m[0,32]) * 0xFFFFFFFF; XmmReg[32,32] = zext(XmmReg[32,32] s> m[32,32]) * 0xFFFFFFFF; XmmReg[64,32] = zext(XmmReg[64,32] s> m[64,32]) * 0xFFFFFFFF; XmmReg[96,32] = zext(XmmReg[96,32] s> m[96,32]) * 0xFFFFFFFF; } :PCMPGTD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x66; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = zext(XmmReg1[0,32] s> XmmReg2[0,32]) * 0xFFFFFFFF; XmmReg1[32,32] = zext(XmmReg1[32,32] s> XmmReg2[32,32]) * 0xFFFFFFFF; XmmReg1[64,32] = zext(XmmReg1[64,32] s> XmmReg2[64,32]) * 0xFFFFFFFF; XmmReg1[96,32] = zext(XmmReg1[96,32] s> XmmReg2[96,32]) * 0xFFFFFFFF; } :PEXTRW Reg32, mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC5; Reg32 & mmxreg2; imm8 { temp:8 = mmxreg2 >> ( (imm8 & 0x03) * 16 ); Reg32 = zext(temp:2); } :PEXTRW Reg32, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC5; Reg32 & XmmReg2 & check_Reg32_dest; imm8 { local shift:1 = (imm8 & 0x7) * 16:1; local low:1 = shift < 64:1; local temp:8; conditionalAssign(temp,low,XmmReg2[0,64] >> shift, XmmReg2[64,64] >> (shift-64)); Reg32 = zext(temp:2); build check_Reg32_dest; } #break PEXTRW with reg/mem dest into two constructors to handle zext in register case :PEXTRW Rmr32, XmmReg1, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x15; (mod = 3 & Rmr32 & check_Rmr32_dest) & XmmReg1 ; imm8 { local shift:1 = (imm8 & 0x7) * 16:1; local low:1 = shift < 64:1; local temp:8; conditionalAssign(temp,low,XmmReg1[0,64] >> shift,XmmReg1[64,64] >> (shift - 64)); Rmr32 = zext(temp:2); build check_Rmr32_dest; } :PEXTRW m16, XmmReg1, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x15; XmmReg1 ... & m16; imm8 { local shift:1 = (imm8 & 0x7) * 16:1; local low:1 = shift < 64:1; local temp:8; conditionalAssign(temp,low,XmmReg1[0,64] >> shift,XmmReg1[64,64] >> (shift - 64)); m16 = temp:2; } define pcodeop phaddd; :PHADDD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x02; mmxreg ... & m64 { mmxreg=phaddd(mmxreg,m64); } :PHADDD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x02; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phaddd(mmxreg1,mmxreg2); } :PHADDD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x02; XmmReg ... & m128 { XmmReg=phaddd(XmmReg,m128); } :PHADDD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x02; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phaddd(XmmReg1,XmmReg2); } define pcodeop phaddw; :PHADDW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x01; mmxreg ... & m64 { mmxreg=phaddw(mmxreg,m64); } :PHADDW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x01; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phaddw(mmxreg1,mmxreg2); } :PHADDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x01; XmmReg ... & m128 { XmmReg=phaddw(XmmReg,m128); } :PHADDW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x01; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phaddw(XmmReg1,XmmReg2); } define pcodeop phaddsw; :PHADDSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x03; mmxreg ... & m64 { mmxreg=phaddsw(mmxreg,m64); } :PHADDSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x03; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phaddsw(mmxreg1,mmxreg2); } :PHADDSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x03; XmmReg ... & m128 { XmmReg=phaddsw(XmmReg,m128); } :PHADDSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x03; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phaddsw(XmmReg1,XmmReg2); } define pcodeop phsubd; :PHSUBD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x06; mmxreg ... & m64 { mmxreg=phsubd(mmxreg,m64); } :PHSUBD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x06; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phsubd(mmxreg1,mmxreg2); } :PHSUBD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x06; XmmReg ... & m128 { XmmReg=phsubd(XmmReg,m128); } :PHSUBD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x06; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phsubd(XmmReg1,XmmReg2); } define pcodeop phsubw; :PHSUBW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x05; mmxreg ... & m64 { mmxreg=phsubw(mmxreg,m64); } :PHSUBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x05; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phsubw(mmxreg1,mmxreg2); } :PHSUBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x05; XmmReg ... & m128 { XmmReg=phsubw(XmmReg,m128); } :PHSUBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x05; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phsubw(XmmReg1,XmmReg2); } define pcodeop phsubsw; :PHSUBSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x07; mmxreg ... & m64 { mmxreg=phsubsw(mmxreg,m64); } :PHSUBSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x07; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=phsubsw(mmxreg1,mmxreg2); } :PHSUBSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x07; XmmReg ... & m128 { XmmReg=phsubsw(XmmReg,m128); } :PHSUBSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x07; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=phsubsw(XmmReg1,XmmReg2); } :PINSRW mmxreg, Rmr32, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC4; mmxmod=3 & Rmr32 & mmxreg; imm8 { local destIndex:1 = (imm8 & 0x7) * 16:1; mmxreg = mmxreg & ~(0xffff:8 << destIndex); local newVal:8 = zext(Rmr32[0,16]); mmxreg = mmxreg | (newVal << destIndex); } :PINSRW mmxreg, m16, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC4; m16 & mmxreg ... ; imm8 { local destIndex:1 = (imm8 & 0x7) * 16:1; mmxreg = mmxreg & ~(0xffff:8 << destIndex); local newVal:8 = zext(m16); mmxreg = mmxreg | (newVal << destIndex); } :PINSRW XmmReg, Rmr32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC4; xmmmod=3 & Rmr32 & XmmReg; imm8 { local destIndex:1 = (imm8 & 0x7) * 16:1; local useLow:1 = destIndex < 64:1; local newLow:8 = zext(Rmr32:2) << destIndex; newLow = (XmmReg[0,64] & ~(0xffff:8 << destIndex)) | newLow; local newHigh:8 = zext(Rmr32:2) << (destIndex-64:1); newHigh = (XmmReg[64,64] & ~(0xffff:8 << (destIndex - 64:1))) | newHigh; conditionalAssign(XmmReg[0,64],useLow,newLow,XmmReg[0,64]); conditionalAssign(XmmReg[64,64],!useLow,newHigh,XmmReg[64,64]); } :PINSRW XmmReg, m16, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC4; m16 & XmmReg ...; imm8 { local destIndex:1 = (imm8 & 0x7) * 16:1; local useLow:1 = destIndex < 64:1; local newLow:8 = zext(m16) << destIndex; newLow = (XmmReg[0,64] & ~(0xffff:8 << destIndex)) | newLow; local newHigh:8 = zext(m16) << (destIndex-64:1); newHigh = (XmmReg[64,64] & ~(0xffff:8 << (destIndex - 64:1))) | newHigh; conditionalAssign(XmmReg[0,64],useLow,newLow,XmmReg[0,64]); conditionalAssign(XmmReg[64,64],!useLow,newHigh,XmmReg[64,64]); } define pcodeop pmaddubsw; :PMADDUBSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x04; mmxreg ... & m64 { mmxreg=pmaddubsw(mmxreg,m64); } :PMADDUBSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x04; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pmaddubsw(mmxreg1,mmxreg2); } :PMADDUBSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x04; XmmReg ... & m128 { XmmReg=pmaddubsw(XmmReg,m128); } :PMADDUBSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x04; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pmaddubsw(XmmReg1,XmmReg2); } define pcodeop pmaddwd; :PMADDWD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF5; mmxreg ... & m64 { mmxreg = pmaddwd(mmxreg, m64); } :PMADDWD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF5; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pmaddwd(mmxreg1, mmxreg2); } :PMADDWD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF5; XmmReg ... & m128 { XmmReg = pmaddwd(XmmReg, m128); } :PMADDWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF5; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmaddwd(XmmReg1, XmmReg2); } :PMAXSW mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEE; mmxreg1 ... & mmxreg2_m64 { local srcCopy:8 = mmxreg2_m64; conditionalAssign(mmxreg1[0,16],srcCopy[0,16] s> mmxreg1[0,16],srcCopy[0,16],mmxreg1[0,16]); conditionalAssign(mmxreg1[16,16],srcCopy[16,16] s> mmxreg1[16,16],srcCopy[16,16],mmxreg1[16,16]); conditionalAssign(mmxreg1[32,16],srcCopy[32,16] s> mmxreg1[32,16],srcCopy[32,16],mmxreg1[32,16]); conditionalAssign(mmxreg1[48,16],srcCopy[48,16] s> mmxreg1[48,16],srcCopy[48,16],mmxreg1[48,16]); } :PMAXSW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEE; XmmReg1 ... & XmmReg2_m128 { local srcCopy:16 = XmmReg2_m128; conditionalAssign(XmmReg1[0,16],srcCopy[0,16] s> XmmReg1[0,16],srcCopy[0,16],XmmReg1[0,16]); conditionalAssign(XmmReg1[16,16],srcCopy[16,16] s> XmmReg1[16,16],srcCopy[16,16],XmmReg1[16,16]); conditionalAssign(XmmReg1[32,16],srcCopy[32,16] s> XmmReg1[32,16],srcCopy[32,16],XmmReg1[32,16]); conditionalAssign(XmmReg1[48,16],srcCopy[48,16] s> XmmReg1[48,16],srcCopy[48,16],XmmReg1[48,16]); conditionalAssign(XmmReg1[64,16],srcCopy[64,16] s> XmmReg1[64,16],srcCopy[64,16],XmmReg1[64,16]); conditionalAssign(XmmReg1[80,16],srcCopy[80,16] s> XmmReg1[80,16],srcCopy[80,16],XmmReg1[80,16]); conditionalAssign(XmmReg1[96,16],srcCopy[96,16] s> XmmReg1[96,16],srcCopy[96,16],XmmReg1[96,16]); conditionalAssign(XmmReg1[112,16],srcCopy[112,16] s> XmmReg1[112,16],srcCopy[112,16],XmmReg1[112,16]); } :PMAXUB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDE; mmxreg1 ... & mmxreg2_m64 { local srcCopy:8 = mmxreg2_m64; conditionalAssign(mmxreg1[0,8],srcCopy[0,8] > mmxreg1[0,8],srcCopy[0,8],mmxreg1[0,8]); conditionalAssign(mmxreg1[8,8],srcCopy[8,8] > mmxreg1[8,8],srcCopy[8,8],mmxreg1[8,8]); conditionalAssign(mmxreg1[16,8],srcCopy[16,8] > mmxreg1[16,8],srcCopy[16,8],mmxreg1[16,8]); conditionalAssign(mmxreg1[24,8],srcCopy[24,8] > mmxreg1[24,8],srcCopy[24,8],mmxreg1[24,8]); conditionalAssign(mmxreg1[32,8],srcCopy[32,8] > mmxreg1[32,8],srcCopy[32,8],mmxreg1[32,8]); conditionalAssign(mmxreg1[40,8],srcCopy[40,8] > mmxreg1[40,8],srcCopy[40,8],mmxreg1[40,8]); conditionalAssign(mmxreg1[48,8],srcCopy[48,8] > mmxreg1[48,8],srcCopy[48,8],mmxreg1[48,8]); conditionalAssign(mmxreg1[56,8],srcCopy[56,8] > mmxreg1[56,8],srcCopy[56,8],mmxreg1[56,8]); } :PMAXUB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDE; XmmReg1 ... & XmmReg2_m128 { local srcCopy:16 = XmmReg2_m128; conditionalAssign(XmmReg1[0,8],srcCopy[0,8] > XmmReg1[0,8],srcCopy[0,8],XmmReg1[0,8]); conditionalAssign(XmmReg1[8,8],srcCopy[8,8] > XmmReg1[8,8],srcCopy[8,8],XmmReg1[8,8]); conditionalAssign(XmmReg1[16,8],srcCopy[16,8] > XmmReg1[16,8],srcCopy[16,8],XmmReg1[16,8]); conditionalAssign(XmmReg1[24,8],srcCopy[24,8] > XmmReg1[24,8],srcCopy[24,8],XmmReg1[24,8]); conditionalAssign(XmmReg1[32,8],srcCopy[32,8] > XmmReg1[32,8],srcCopy[32,8],XmmReg1[32,8]); conditionalAssign(XmmReg1[40,8],srcCopy[40,8] > XmmReg1[40,8],srcCopy[40,8],XmmReg1[40,8]); conditionalAssign(XmmReg1[48,8],srcCopy[48,8] > XmmReg1[48,8],srcCopy[48,8],XmmReg1[48,8]); conditionalAssign(XmmReg1[56,8],srcCopy[56,8] > XmmReg1[56,8],srcCopy[56,8],XmmReg1[56,8]); conditionalAssign(XmmReg1[64,8],srcCopy[64,8] > XmmReg1[64,8],srcCopy[64,8],XmmReg1[64,8]); conditionalAssign(XmmReg1[72,8],srcCopy[72,8] > XmmReg1[72,8],srcCopy[72,8],XmmReg1[72,8]); conditionalAssign(XmmReg1[80,8],srcCopy[80,8] > XmmReg1[80,8],srcCopy[80,8],XmmReg1[80,8]); conditionalAssign(XmmReg1[88,8],srcCopy[88,8] > XmmReg1[88,8],srcCopy[88,8],XmmReg1[88,8]); conditionalAssign(XmmReg1[96,8],srcCopy[96,8] > XmmReg1[96,8],srcCopy[96,8],XmmReg1[96,8]); conditionalAssign(XmmReg1[104,8],srcCopy[104,8] > XmmReg1[104,8],srcCopy[104,8],XmmReg1[104,8]); conditionalAssign(XmmReg1[112,8],srcCopy[112,8] > XmmReg1[112,8],srcCopy[112,8],XmmReg1[112,8]); conditionalAssign(XmmReg1[120,8],srcCopy[120,8] > XmmReg1[120,8],srcCopy[120,8],XmmReg1[120,8]); } :PMINSW mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEA; mmxreg1 ... & mmxreg2_m64 { local srcCopy:8 = mmxreg2_m64; conditionalAssign(mmxreg1[0,16],srcCopy[0,16] s< mmxreg1[0,16],srcCopy[0,16],mmxreg1[0,16]); conditionalAssign(mmxreg1[16,16],srcCopy[16,16] s< mmxreg1[16,16],srcCopy[16,16],mmxreg1[16,16]); conditionalAssign(mmxreg1[32,16],srcCopy[32,16] s< mmxreg1[32,16],srcCopy[32,16],mmxreg1[32,16]); conditionalAssign(mmxreg1[48,16],srcCopy[48,16] s< mmxreg1[48,16],srcCopy[48,16],mmxreg1[48,16]); } :PMINSW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEA; XmmReg1 ... & XmmReg2_m128 { local srcCopy:16 = XmmReg2_m128; conditionalAssign(XmmReg1[0,16],srcCopy[0,16] s< XmmReg1[0,16],srcCopy[0,16],XmmReg1[0,16]); conditionalAssign(XmmReg1[16,16],srcCopy[16,16] s< XmmReg1[16,16],srcCopy[16,16],XmmReg1[16,16]); conditionalAssign(XmmReg1[32,16],srcCopy[32,16] s< XmmReg1[32,16],srcCopy[32,16],XmmReg1[32,16]); conditionalAssign(XmmReg1[48,16],srcCopy[48,16] s< XmmReg1[48,16],srcCopy[48,16],XmmReg1[48,16]); conditionalAssign(XmmReg1[64,16],srcCopy[64,16] s< XmmReg1[64,16],srcCopy[64,16],XmmReg1[64,16]); conditionalAssign(XmmReg1[80,16],srcCopy[80,16] s< XmmReg1[80,16],srcCopy[80,16],XmmReg1[80,16]); conditionalAssign(XmmReg1[96,16],srcCopy[96,16] s< XmmReg1[96,16],srcCopy[96,16],XmmReg1[96,16]); conditionalAssign(XmmReg1[112,16],srcCopy[112,16] s< XmmReg1[112,16],srcCopy[112,16],XmmReg1[112,16]); } :PMINUB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xDA; mmxreg1 ... & mmxreg2_m64 { local srcCopy:8 = mmxreg2_m64; conditionalAssign(mmxreg1[0,8],srcCopy[0,8] < mmxreg1[0,8],srcCopy[0,8],mmxreg1[0,8]); conditionalAssign(mmxreg1[8,8],srcCopy[8,8] < mmxreg1[8,8],srcCopy[8,8],mmxreg1[8,8]); conditionalAssign(mmxreg1[16,8],srcCopy[16,8] < mmxreg1[16,8],srcCopy[16,8],mmxreg1[16,8]); conditionalAssign(mmxreg1[24,8],srcCopy[24,8] < mmxreg1[24,8],srcCopy[24,8],mmxreg1[24,8]); conditionalAssign(mmxreg1[32,8],srcCopy[32,8] < mmxreg1[32,8],srcCopy[32,8],mmxreg1[32,8]); conditionalAssign(mmxreg1[40,8],srcCopy[40,8] < mmxreg1[40,8],srcCopy[40,8],mmxreg1[40,8]); conditionalAssign(mmxreg1[48,8],srcCopy[48,8] < mmxreg1[48,8],srcCopy[48,8],mmxreg1[48,8]); conditionalAssign(mmxreg1[56,8],srcCopy[56,8] < mmxreg1[56,8],srcCopy[56,8],mmxreg1[56,8]); } :PMINUB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xDA; XmmReg1 ... & XmmReg2_m128 { local srcCopy:16 = XmmReg2_m128; conditionalAssign(XmmReg1[0,8],srcCopy[0,8] < XmmReg1[0,8],srcCopy[0,8],XmmReg1[0,8]); conditionalAssign(XmmReg1[8,8],srcCopy[8,8] < XmmReg1[8,8],srcCopy[8,8],XmmReg1[8,8]); conditionalAssign(XmmReg1[16,8],srcCopy[16,8] < XmmReg1[16,8],srcCopy[16,8],XmmReg1[16,8]); conditionalAssign(XmmReg1[24,8],srcCopy[24,8] < XmmReg1[24,8],srcCopy[24,8],XmmReg1[24,8]); conditionalAssign(XmmReg1[32,8],srcCopy[32,8] < XmmReg1[32,8],srcCopy[32,8],XmmReg1[32,8]); conditionalAssign(XmmReg1[40,8],srcCopy[40,8] < XmmReg1[40,8],srcCopy[40,8],XmmReg1[40,8]); conditionalAssign(XmmReg1[48,8],srcCopy[48,8] < XmmReg1[48,8],srcCopy[48,8],XmmReg1[48,8]); conditionalAssign(XmmReg1[56,8],srcCopy[56,8] < XmmReg1[56,8],srcCopy[56,8],XmmReg1[56,8]); conditionalAssign(XmmReg1[64,8],srcCopy[64,8] < XmmReg1[64,8],srcCopy[64,8],XmmReg1[64,8]); conditionalAssign(XmmReg1[72,8],srcCopy[72,8] < XmmReg1[72,8],srcCopy[72,8],XmmReg1[72,8]); conditionalAssign(XmmReg1[80,8],srcCopy[80,8] < XmmReg1[80,8],srcCopy[80,8],XmmReg1[80,8]); conditionalAssign(XmmReg1[88,8],srcCopy[88,8] < XmmReg1[88,8],srcCopy[88,8],XmmReg1[88,8]); conditionalAssign(XmmReg1[96,8],srcCopy[96,8] < XmmReg1[96,8],srcCopy[96,8],XmmReg1[96,8]); conditionalAssign(XmmReg1[104,8],srcCopy[104,8] < XmmReg1[104,8],srcCopy[104,8],XmmReg1[104,8]); conditionalAssign(XmmReg1[112,8],srcCopy[112,8] < XmmReg1[112,8],srcCopy[112,8],XmmReg1[112,8]); conditionalAssign(XmmReg1[120,8],srcCopy[120,8] < XmmReg1[120,8],srcCopy[120,8],XmmReg1[120,8]); } #in 64-bit mode the default operand size is 64 bits #note that gcc assembles pmovmskb eax, mm0 and pmovmskb rax, mm0 to 0f d7 c0 :PMOVMSKB Reg32, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD7; mod = 3 & Reg32 & mmxreg2 & check_Reg32_dest { local byte_mask:1 = 0:1; byte_mask[0,1] = mmxreg2[7,1]; byte_mask[1,1] = mmxreg2[15,1]; byte_mask[2,1] = mmxreg2[23,1]; byte_mask[3,1] = mmxreg2[31,1]; byte_mask[4,1] = mmxreg2[39,1]; byte_mask[5,1] = mmxreg2[47,1]; byte_mask[6,1] = mmxreg2[55,1]; byte_mask[7,1] = mmxreg2[63,1]; Reg32 = zext(byte_mask); build check_Reg32_dest; } #in 64-bit mode the default operand size is 64 bits #note that gcc assembles pmovmskb eax, xmm0 and pmovmskb rax, xmm0 to 66 0f d7 c0 :PMOVMSKB Reg32, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD7; mod = 3 & Reg32 & XmmReg2 & check_Reg32_dest { local byte_mask:2 = 0:2; byte_mask[0,1] = XmmReg2[7,1]; byte_mask[1,1] = XmmReg2[15,1]; byte_mask[2,1] = XmmReg2[23,1]; byte_mask[3,1] = XmmReg2[31,1]; byte_mask[4,1] = XmmReg2[39,1]; byte_mask[5,1] = XmmReg2[47,1]; byte_mask[6,1] = XmmReg2[55,1]; byte_mask[7,1] = XmmReg2[63,1]; byte_mask[8,1] = XmmReg2[71,1]; byte_mask[9,1] = XmmReg2[79,1]; byte_mask[10,1] = XmmReg2[87,1]; byte_mask[11,1] = XmmReg2[95,1]; byte_mask[12,1] = XmmReg2[103,1]; byte_mask[13,1] = XmmReg2[111,1]; byte_mask[14,1] = XmmReg2[119,1]; byte_mask[15,1] = XmmReg2[127,1]; Reg32 = zext(byte_mask); build check_Reg32_dest; } define pcodeop pmulhrsw; :PMULHRSW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0B; mmxreg ... & m64 { mmxreg=pmulhrsw(mmxreg,m64); } :PMULHRSW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0B; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=pmulhrsw(mmxreg1,mmxreg2); } :PMULHRSW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0B; XmmReg ... & m128 { XmmReg=pmulhrsw(XmmReg,m128); } :PMULHRSW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0B; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=pmulhrsw(XmmReg1,XmmReg2); } define pcodeop pmulhuw; :PMULHUW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE4; mmxreg ... & m64 { mmxreg = pmulhuw(mmxreg, m64); } :PMULHUW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE4; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pmulhuw(mmxreg1, mmxreg2); } :PMULHUW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE4; XmmReg ... & m128 { XmmReg = pmulhuw(XmmReg, m128); } :PMULHUW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE4; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmulhuw(XmmReg1, XmmReg2); } define pcodeop pmulhw; :PMULHW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE5; mmxreg ... & m64 { mmxreg = pmulhw(mmxreg, m64); } :PMULHW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE5; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = pmulhw(mmxreg1, mmxreg2); } :PMULHW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE5; XmmReg ... & m128 { XmmReg = pmulhw(XmmReg, m128); } :PMULHW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE5; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = pmulhw(XmmReg1, XmmReg2); } :PMULLW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD5; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,16] = mmxreg[0,16] * m[0,16]; mmxreg[16,16] = mmxreg[16,16] * m[16,16]; mmxreg[32,16] = mmxreg[32,16] * m[32,16]; mmxreg[48,16] = mmxreg[48,16] * m[48,16]; } :PMULLW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD5; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,16] = mmxreg1[0,16] * mmxreg2[0,16]; mmxreg1[16,16] = mmxreg1[16,16] * mmxreg2[16,16]; mmxreg1[32,16] = mmxreg1[32,16] * mmxreg2[32,16]; mmxreg1[48,16] = mmxreg1[48,16] * mmxreg2[48,16]; } :PMULLW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD5; XmmReg ... & m128 { local m:16 = m128; XmmReg[0,16] = XmmReg[0,16] * m[0,16]; XmmReg[16,16] = XmmReg[16,16] * m[16,16]; XmmReg[32,16] = XmmReg[32,16] * m[32,16]; XmmReg[48,16] = XmmReg[48,16] * m[48,16]; XmmReg[64,16] = XmmReg[64,16] * m[64,16]; XmmReg[80,16] = XmmReg[80,16] * m[80,16]; XmmReg[96,16] = XmmReg[96,16] * m[96,16]; XmmReg[112,16] = XmmReg[112,16] * m[112,16]; } :PMULLW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD5; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,16] = XmmReg1[0,16] * XmmReg2[0,16]; XmmReg1[16,16] = XmmReg1[16,16] * XmmReg2[16,16]; XmmReg1[32,16] = XmmReg1[32,16] * XmmReg2[32,16]; XmmReg1[48,16] = XmmReg1[48,16] * XmmReg2[48,16]; XmmReg1[64,16] = XmmReg1[64,16] * XmmReg2[64,16]; XmmReg1[80,16] = XmmReg1[80,16] * XmmReg2[80,16]; XmmReg1[96,16] = XmmReg1[96,16] * XmmReg2[96,16]; XmmReg1[112,16] = XmmReg1[112,16] * XmmReg2[112,16]; } :PMULUDQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF4; mmxreg ... & m64 { local a:8 = zext(mmxreg[0,32]); local b:8 = zext(m64[0,32]); mmxreg = a * b; } :PMULUDQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF4; mmxmod = 3 & mmxreg1 & mmxreg2 { local a:8 = zext(mmxreg1[0,32]); local b:8 = zext(mmxreg2[0,32]); mmxreg1 = a * b; } :PMULUDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF4; XmmReg ... & m128 { local a:8 = zext(XmmReg[0,32]); local b:8 = zext(m128[0,32]); XmmReg[0,64] = a * b; local c:8 = zext(XmmReg[64,32]); local d:8 = zext(m128[64,32]); XmmReg[64,64] = c * d; } :PMULUDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF4; xmmmod = 3 & XmmReg1 & XmmReg2 { local a:8 = zext(XmmReg1[0,32]); local b:8 = zext(XmmReg2[0,32]); XmmReg1[0,64] = a * b; local c:8 = zext(XmmReg1[64,32]); local d:8 = zext(XmmReg2[64,32]); XmmReg1[64,64] = c * d; } :POR mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEB; mmxreg ... & m64 { mmxreg = mmxreg | m64; } :POR mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEB; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 | mmxreg2; } :POR XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEB; XmmReg ... & m128 { XmmReg = XmmReg | m128; } :POR XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEB; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 | XmmReg2; } define pcodeop psadbw; :PSADBW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF6; mmxreg ... & m64 { mmxreg = psadbw(mmxreg, m64); } :PSADBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF6; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psadbw(mmxreg1, mmxreg2); } :PSADBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF6; XmmReg ... & m128 { XmmReg = psadbw(XmmReg, m128); } :PSADBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF6; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psadbw(XmmReg1, XmmReg2); } # these byte and word shuffles need to be done also ????? define pcodeop pshufb; :PSHUFB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x00; mmxreg1 ... & mmxreg2_m64 { mmxreg1=pshufb(mmxreg1,mmxreg2_m64); } :PSHUFB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x00; XmmReg1 ... & XmmReg2_m128 { XmmReg1=pshufb(XmmReg1,XmmReg2_m128); } # determine the total shift required by the bit fields in a shuffle opcode Order0: order0 is imm8 [ order0 = ( imm8 & 0x3); ] { export *[const]:1 order0; } Order1: order1 is imm8 [ order1 = ((imm8 >> 2) & 0x3); ] { export *[const]:1 order1; } Order2: order2 is imm8 [ order2 = ((imm8 >> 4) & 0x3); ] { export *[const]:1 order2; } Order3: order3 is imm8 [ order3 = ((imm8 >> 6) & 0x3); ] { export *[const]:1 order3; } macro shuffle_4(dest,ord,c0,c1,c2,c3){ dest = zext(ord == 0) * c0 + zext(ord == 1) * c1 + zext(ord == 2) * c2 + zext(ord == 3) * c3; } :PSHUFD XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x70; (XmmReg2_m128 & XmmReg1 ...); imm8 & Order0 & Order1 & Order2 & Order3 { local c0 = XmmReg2_m128[0,32]; local c1 = XmmReg2_m128[32,32]; local c2 = XmmReg2_m128[64,32]; local c3 = XmmReg2_m128[96,32]; shuffle_4(XmmReg1[0,32],Order0,c0,c1,c2,c3); shuffle_4(XmmReg1[32,32],Order1,c0,c1,c2,c3); shuffle_4(XmmReg1[64,32],Order2,c0,c1,c2,c3); shuffle_4(XmmReg1[96,32],Order3,c0,c1,c2,c3); } define pcodeop pshufhw; :PSHUFHW XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x70; XmmReg2_m128 & XmmReg1 ...; imm8 { XmmReg1 = pshufhw(XmmReg1, XmmReg2_m128, imm8:8); } define pcodeop pshuflw; :PSHUFLW XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x70; XmmReg2_m128 & XmmReg1 ...; imm8 { XmmReg1 = pshuflw(XmmReg1, XmmReg2_m128, imm8:8); } define pcodeop pshufw; :PSHUFW mmxreg1, mmxreg2_m64, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x70; mmxreg2_m64 & mmxreg1 ...; imm8 { mmxreg1 = pshufw(mmxreg1, mmxreg2_m64, imm8:8); } define pcodeop psignb; :PSIGNB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x08; mmxreg ... & m64 { mmxreg=psignb(mmxreg,m64); } :PSIGNB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x08; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=psignb(mmxreg1,mmxreg2); } :PSIGNB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x08; XmmReg ... & m128 { XmmReg=psignb(XmmReg,m128); } :PSIGNB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x08; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=psignb(XmmReg1,XmmReg2); } define pcodeop psignw; :PSIGNW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x09; mmxreg ... & m64 { mmxreg=psignw(mmxreg,m64); } :PSIGNW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x09; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=psignw(mmxreg1,mmxreg2); } :PSIGNW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x09; XmmReg ... & m128 { XmmReg=psignw(XmmReg,m128); } :PSIGNW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x09; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=psignw(XmmReg1,XmmReg2); } define pcodeop psignd; :PSIGND mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0a; mmxreg ... & m64 { mmxreg=psignd(mmxreg,m64); } :PSIGND mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x38; byte=0x0a; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1=psignd(mmxreg1,mmxreg2); } :PSIGND XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0a; XmmReg ... & m128 { XmmReg=psignd(XmmReg,m128); } :PSIGND XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x0a; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1=psignd(XmmReg1,XmmReg2); } #break into two 64-bit chunks so decompiler can follow constants :PSLLDQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; xmmmod = 3 & reg_opcode=7 & XmmReg2; imm8 { if (imm8:1 > 15:1) goto ; local low64copy:8 = XmmReg2[0,64]; XmmReg2[0,64] = XmmReg2[0,64] << (8:1 * imm8:1); if (imm8:1 > 8:1) goto ; XmmReg2[64,64] = (XmmReg2[64,64] << (8:1 * imm8:1)) | (low64copy >> (8:1 * (8 - imm8:1))); goto ; XmmReg2[64,64] = low64copy << (8:1 * (imm8 - 8)); goto ; XmmReg2[0,64] = 0:8; XmmReg2[64,64] = 0:8; } define pcodeop psllw; :PSLLW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF1; mmxreg ... & m64 ... { mmxreg = psllw(mmxreg, m64); } :PSLLW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF1; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psllw(mmxreg1, mmxreg2); } :PSLLW mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=6 & mmxreg2; imm8 { mmxreg2 = psllw(mmxreg2, imm8:8); } :PSLLD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF2; mmxreg ... & m64 ... { local m:8 = m64; mmxreg[0,32] = mmxreg[0,32] << m[0,32]; mmxreg[32,32] = mmxreg[32,32] << m[32,32]; } :PSLLD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF2; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,32] = mmxreg1[0,32] << mmxreg2[0,32]; mmxreg1[32,32] = mmxreg1[32,32] << mmxreg2[32,32]; } :PSLLD mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=6 & mmxreg2; imm8 { mmxreg2[0,32] = mmxreg2[0,32] << imm8; mmxreg2[32,32] = mmxreg2[32,32] << imm8; } :PSLLQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF3; mmxreg ... & m64 ... { mmxreg = mmxreg << m64; } :PSLLQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF3; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 << mmxreg2; } :PSLLQ mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x73; mod = 0b11 & reg_opcode=6 & mmxreg2; imm8 { mmxreg2 = mmxreg2 << imm8:8; } :PSLLW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF1; XmmReg ... & m128 ... { XmmReg = psllw(XmmReg, m128); } :PSLLW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF1; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psllw(XmmReg1, XmmReg2); } :PSLLW XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=6 & XmmReg2; imm8 { XmmReg2 = psllw(XmmReg2, imm8:8); } :PSLLD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF2; XmmReg ... & m128 ... { local m:16 = m128; XmmReg[0,32] = XmmReg[0,32] << m[0,32]; XmmReg[32,32] = XmmReg[32,32] << m[32,32]; XmmReg[64,32] = XmmReg[64,32] << m[64,32]; XmmReg[96,32] = XmmReg[96,32] << m[96,32]; } :PSLLD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF2; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] << XmmReg2[0,32]; XmmReg1[32,32] = XmmReg1[32,32] << XmmReg2[32,32]; XmmReg1[64,32] = XmmReg1[64,32] << XmmReg2[64,32]; XmmReg1[96,32] = XmmReg1[96,32] << XmmReg2[96,32]; } :PSLLD XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=6 & XmmReg2; imm8 { XmmReg2[0,32] = XmmReg2[0,32] << imm8; XmmReg2[32,32] = XmmReg2[32,32] << imm8; XmmReg2[64,32] = XmmReg2[64,32] << imm8; XmmReg2[96,32] = XmmReg2[96,32] << imm8; } :PSLLQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF3; XmmReg ... & m128 ... { local m:16 = m128; XmmReg[0,64] = XmmReg[0,64] << m[0,64]; XmmReg[64,64] = XmmReg[64,64] << m[64,64]; } :PSLLQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF3; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] << XmmReg2[0,64]; XmmReg1[64,64] = XmmReg1[64,64] << XmmReg2[64,64]; } :PSLLQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; mod = 0b11 & reg_opcode=6 & XmmReg2; imm8 { XmmReg2[0,64] = XmmReg2[0,64] << imm8; XmmReg2[64,64] = XmmReg2[64,64] << imm8; } define pcodeop psraw; :PSRAW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE1; mmxreg ... & m64 ... { mmxreg = psraw(mmxreg, m64); } :PSRAW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE1; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = psraw(mmxreg1, mmxreg2); } :PSRAW mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=4 & mmxreg2; imm8 { mmxreg2 = psraw(mmxreg2, imm8:8); } :PSRAD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE2; mmxreg ... & m64 { # a count greater than 31 just clears all the bits mmxreg[0,32] = mmxreg[0,32] s>> m64; mmxreg[32,32] = mmxreg[32,32] s>> m64; } :PSRAD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE2; mmxmod = 3 & mmxreg1 & mmxreg2 { # a count greater than 31 just clears all the bits mmxreg1[0,32] = mmxreg1[0,32] s>> mmxreg2; mmxreg1[32,32] = mmxreg1[32,32] s>> mmxreg2; } :PSRAD mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=4 & mmxreg2; imm8 { # a count greater than 31 just clears all the bits mmxreg2[0,32] = mmxreg2[0,32] s>> imm8; mmxreg2[32,32] = mmxreg2[32,32] s>> imm8; } :PSRAW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE1; XmmReg ... & m128 ... { XmmReg = psraw(XmmReg, m128); } :PSRAW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE1; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = psraw(XmmReg1, XmmReg2); } :PSRAW XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=4 & XmmReg2; imm8 { XmmReg2 = psraw(XmmReg2, imm8:8); } :PSRAD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE2; m128 & XmmReg ... { # a count greater than 31 just clears all the bits XmmReg[0,32] = XmmReg[0,32] s>> m128; XmmReg[32,32] = XmmReg[32,32] s>> m128; XmmReg[64,32] = XmmReg[64,32] s>> m128; XmmReg[96,32] = XmmReg[96,32] s>> m128; } :PSRAD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE2; xmmmod = 3 & XmmReg1 & XmmReg2 { # a count greater than 31 just clears all the bits XmmReg1[0,32] = XmmReg1[0,32] s>> XmmReg2; XmmReg1[32,32] = XmmReg1[32,32] s>> XmmReg2; XmmReg1[64,32] = XmmReg1[64,32] s>> XmmReg2; XmmReg1[96,32] = XmmReg1[96,32] s>> XmmReg2; } :PSRAD XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=4 & XmmReg2; imm8 { # a count greater than 31 just clears all the bits XmmReg2[0,32] = XmmReg2[0,32] s>> imm8; XmmReg2[32,32] = XmmReg2[32,32] s>> imm8; XmmReg2[64,32] = XmmReg2[64,32] s>> imm8; XmmReg2[96,32] = XmmReg2[96,32] s>> imm8; } :PSRLDQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; xmmmod=3 & reg_opcode=3 & XmmReg2; imm8 { # a count greater than 15 just clears all the bits XmmReg2 = XmmReg2 >> (imm8 * 8); } :PSRLW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD1; mmxreg ... & m64 ... { mmxreg[0,16] = mmxreg[0,16] >> m64; mmxreg[16,16] = mmxreg[16,16] >> m64; mmxreg[32,16] = mmxreg[32,16] >> m64; mmxreg[48,16] = mmxreg[48,16] >> m64; } :PSRLW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD1; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,16] = mmxreg1[0,16] >> mmxreg2; mmxreg1[16,16] = mmxreg1[16,16] >> mmxreg2; mmxreg1[32,16] = mmxreg1[32,16] >> mmxreg2; mmxreg1[48,16] = mmxreg1[48,16] >> mmxreg2; } :PSRLW mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=2 & mmxreg2; imm8 { mmxreg2[0,16] = mmxreg2[0,16] >> imm8; mmxreg2[16,16] = mmxreg2[16,16] >> imm8; mmxreg2[32,16] = mmxreg2[32,16] >> imm8; mmxreg2[48,16] = mmxreg2[48,16] >> imm8; } :PSRLD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD2; mmxreg ... & m64 ... { mmxreg[0,32] = mmxreg[0,32] >> m64; mmxreg[32,32] = mmxreg[32,32] >> m64; } :PSRLD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD2; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,32] = mmxreg1[0,32] >> mmxreg2; mmxreg1[32,32] = mmxreg1[32,32] >> mmxreg2; } :PSRLD mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=2 & mmxreg2; imm8 { mmxreg2[0,32] = mmxreg2[0,32] >> imm8; mmxreg2[32,32] = mmxreg2[32,32] >> imm8; } :PSRLQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD3; mmxreg ... & m64 ... { mmxreg = mmxreg >> m64; } :PSRLQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD3; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 >> mmxreg2; } :PSRLQ mmxreg2, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x73; mod = 0b11 & reg_opcode=2 & mmxreg2; imm8 { mmxreg2 = mmxreg2 >> imm8; } :PSRLW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD1; XmmReg ... & m128 ... { XmmReg[0,16] = XmmReg[0,16] >> m128[0,64]; XmmReg[16,16] = XmmReg[16,16] >> m128[0,64]; XmmReg[32,16] = XmmReg[32,16] >> m128[0,64]; XmmReg[48,16] = XmmReg[48,16] >> m128[0,64]; XmmReg[64,16] = XmmReg[64,16] >> m128[0,64]; XmmReg[80,16] = XmmReg[80,16] >> m128[0,64]; XmmReg[96,16] = XmmReg[96,16] >> m128[0,64]; XmmReg[112,16] = XmmReg[112,16] >> m128[0,64]; } :PSRLW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD1; xmmmod = 3 & XmmReg1 & XmmReg2 { #save this off in case XmmReg1 and XmmReg2 are the same register local count:8 = XmmReg2[0,64]; XmmReg1[0,16] = XmmReg1[0,16] >> count; XmmReg1[16,16] = XmmReg1[16,16] >> count; XmmReg1[32,16] = XmmReg1[32,16] >> count; XmmReg1[48,16] = XmmReg1[48,16] >> count; XmmReg1[64,16] = XmmReg1[64,16] >> count; XmmReg1[80,16] = XmmReg1[80,16] >> count; XmmReg1[96,16] = XmmReg1[96,16] >> count; XmmReg1[112,16] = XmmReg1[112,16] >> count; } :PSRLW XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x71; mod = 0b11 & reg_opcode=2 & XmmReg2; imm8 { XmmReg2[0,16] = XmmReg2[0,16] >> imm8; XmmReg2[16,16] = XmmReg2[16,16] >> imm8; XmmReg2[32,16] = XmmReg2[32,16] >> imm8; XmmReg2[48,16] = XmmReg2[48,16] >> imm8; XmmReg2[64,16] = XmmReg2[64,16] >> imm8; XmmReg2[80,16] = XmmReg2[80,16] >> imm8; XmmReg2[96,16] = XmmReg2[96,16] >> imm8; XmmReg2[112,16] = XmmReg2[112,16] >> imm8; } :PSRLD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD2; XmmReg ... & m128 ... { XmmReg[0,32] = XmmReg[0,32] >> m128[0,64]; XmmReg[32,32] = XmmReg[32,32] >> m128[0,64]; XmmReg[64,32] = XmmReg[64,32] >> m128[0,64]; XmmReg[96,32] = XmmReg[96,32] >> m128[0,64]; } :PSRLD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD2; xmmmod = 3 & XmmReg1 & XmmReg2 { #save this off in case XmmReg1 and XmmReg2 are the same register local count = XmmReg2[0,64]; XmmReg1[0,32] = XmmReg1[0,32] >> count; XmmReg1[32,32] = XmmReg1[32,32] >> count; XmmReg1[64,32] = XmmReg1[64,32] >> count; XmmReg1[96,32] = XmmReg1[96,32] >> count; } :PSRLD XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x72; mod = 0b11 & reg_opcode=2 & XmmReg2; imm8 { XmmReg2[0,32] = XmmReg2[0,32] >> imm8; XmmReg2[32,32] = XmmReg2[32,32] >> imm8; XmmReg2[64,32] = XmmReg2[64,32] >> imm8; XmmReg2[96,32] = XmmReg2[96,32] >> imm8; } :PSRLQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD3; XmmReg ... & m128 ... { XmmReg[0,64] = XmmReg[0,64] >> m128[0,64]; XmmReg[64,64] = XmmReg[64,64] >> m128[0,64]; } :PSRLQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD3; xmmmod = 3 & XmmReg1 & XmmReg2 { #save this off in case XmmReg1 and XmmReg2 are the same register local count = XmmReg2[0,64]; XmmReg1[0,64] = XmmReg1[0,64] >> count; XmmReg1[64,64] = XmmReg1[64,64] >> count; } :PSRLQ XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x73; mod = 0b11 & reg_opcode=2 & XmmReg2; imm8 { XmmReg2[0,64] = XmmReg2[0,64] >> imm8; XmmReg2[64,64] = XmmReg2[64,64] >> imm8; } :PSUBB mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF8; mmxreg ... & m64 ... { local m:8 = m64; mmxreg[0,8] = mmxreg[0,8] - m[0,8]; mmxreg[8,8] = mmxreg[8,8] - m[8,8]; mmxreg[16,8] = mmxreg[16,8] - m[16,8]; mmxreg[24,8] = mmxreg[24,8] - m[24,8]; mmxreg[32,8] = mmxreg[32,8] - m[32,8]; mmxreg[40,8] = mmxreg[40,8] - m[40,8]; mmxreg[48,8] = mmxreg[48,8] - m[48,8]; mmxreg[56,8] = mmxreg[56,8] - m[56,8]; } :PSUBB mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF8; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,8] = mmxreg1[0,8] - mmxreg2[0,8]; mmxreg1[16,8] = mmxreg1[16,8] - mmxreg2[16,8]; mmxreg1[24,8] = mmxreg1[24,8] - mmxreg2[24,8]; mmxreg1[32,8] = mmxreg1[32,8] - mmxreg2[32,8]; mmxreg1[40,8] = mmxreg1[40,8] - mmxreg2[40,8]; mmxreg1[48,8] = mmxreg1[48,8] - mmxreg2[48,8]; mmxreg1[56,8] = mmxreg1[56,8] - mmxreg2[56,8]; } :PSUBW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF9; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,16] = mmxreg[0,16] - m[0,16]; mmxreg[16,16] = mmxreg[16,16] - m[16,16]; mmxreg[32,16] = mmxreg[32,16] - m[32,16]; mmxreg[48,16] = mmxreg[48,16] - m[48,16]; } :PSUBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF9; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,16] = mmxreg1[0,16] - mmxreg2[0,16]; mmxreg1[16,16] = mmxreg1[16,16] - mmxreg2[16,16]; mmxreg1[32,16] = mmxreg1[32,16] - mmxreg2[32,16]; mmxreg1[48,16] = mmxreg1[48,16] - mmxreg2[48,16]; } :PSUBD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFA; mmxreg ... & m64 ... { local m:8 = m64; mmxreg[0,32] = mmxreg[0,32] - m[0,32]; mmxreg[32,32] = mmxreg[32,32] - m[32,32]; } :PSUBD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFA; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,32] = mmxreg1[0,32] - mmxreg2[0,32]; mmxreg1[32,32] = mmxreg1[32,32] - mmxreg2[32,32]; } :PSUBQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFB; mmxreg ... & m64 ... { mmxreg = mmxreg - m64; } :PSUBQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xFB; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 - mmxreg2; } :PSUBQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFB; XmmReg ... & m128 ... { local m:16 = m128; XmmReg[0,64] = XmmReg[0,64] - m[0,64]; XmmReg[64,64] = XmmReg[64,64] - m[64,64]; } :PSUBQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFB; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] - XmmReg2[0,64]; XmmReg1[64,64] = XmmReg1[64,64] - XmmReg2[64,64]; } :PSUBB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF8; XmmReg ... & m128 ... { local m:16 = m128; XmmReg[0,8] = XmmReg[0,8] - m[0,8]; XmmReg[8,8] = XmmReg[8,8] - m[8,8]; XmmReg[16,8] = XmmReg[16,8] - m[16,8]; XmmReg[24,8] = XmmReg[24,8] - m[24,8]; XmmReg[32,8] = XmmReg[32,8] - m[32,8]; XmmReg[40,8] = XmmReg[40,8] - m[40,8]; XmmReg[48,8] = XmmReg[48,8] - m[48,8]; XmmReg[56,8] = XmmReg[56,8] - m[56,8]; XmmReg[64,8] = XmmReg[64,8] - m[64,8]; XmmReg[72,8] = XmmReg[72,8] - m[72,8]; XmmReg[80,8] = XmmReg[80,8] - m[80,8]; XmmReg[88,8] = XmmReg[88,8] - m[88,8]; XmmReg[96,8] = XmmReg[96,8] - m[96,8]; XmmReg[104,8] = XmmReg[104,8] - m[104,8]; XmmReg[112,8] = XmmReg[112,8] - m[112,8]; XmmReg[120,8] = XmmReg[120,8] - m[120,8]; } :PSUBB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF8; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,8] = XmmReg1[0,8] - XmmReg2[0,8]; XmmReg1[8,8] = XmmReg1[8,8] - XmmReg2[8,8]; XmmReg1[16,8] = XmmReg1[16,8] - XmmReg2[16,8]; XmmReg1[24,8] = XmmReg1[24,8] - XmmReg2[24,8]; XmmReg1[32,8] = XmmReg1[32,8] - XmmReg2[32,8]; XmmReg1[40,8] = XmmReg1[40,8] - XmmReg2[40,8]; XmmReg1[48,8] = XmmReg1[48,8] - XmmReg2[48,8]; XmmReg1[56,8] = XmmReg1[56,8] - XmmReg2[56,8]; XmmReg1[64,8] = XmmReg1[64,8] - XmmReg2[64,8]; XmmReg1[72,8] = XmmReg1[72,8] - XmmReg2[72,8]; XmmReg1[80,8] = XmmReg1[80,8] - XmmReg2[80,8]; XmmReg1[88,8] = XmmReg1[88,8] - XmmReg2[88,8]; XmmReg1[96,8] = XmmReg1[96,8] - XmmReg2[96,8]; XmmReg1[104,8] = XmmReg1[104,8] - XmmReg2[104,8]; XmmReg1[112,8] = XmmReg1[112,8] - XmmReg2[112,8]; XmmReg1[120,8] = XmmReg1[120,8] - XmmReg2[120,8]; } :PSUBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF9; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,16] = XmmReg[0,16] - m[0,16]; XmmReg[16,16] = XmmReg[16,16] - m[16,16]; XmmReg[32,16] = XmmReg[32,16] - m[32,16]; XmmReg[48,16] = XmmReg[48,16] - m[48,16]; XmmReg[64,16] = XmmReg[64,16] - m[64,16]; XmmReg[80,16] = XmmReg[80,16] - m[80,16]; XmmReg[96,16] = XmmReg[96,16] - m[96,16]; XmmReg[112,16] = XmmReg[112,16] - m[112,16]; } :PSUBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xF9; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,16] = XmmReg1[0,16] - XmmReg2[0,16]; XmmReg1[16,16] = XmmReg1[16,16] - XmmReg2[16,16]; XmmReg1[32,16] = XmmReg1[32,16] - XmmReg2[32,16]; XmmReg1[48,16] = XmmReg1[48,16] - XmmReg2[48,16]; XmmReg1[64,16] = XmmReg1[64,16] - XmmReg2[64,16]; XmmReg1[80,16] = XmmReg1[80,16] - XmmReg2[80,16]; XmmReg1[96,16] = XmmReg1[96,16] - XmmReg2[96,16]; XmmReg1[112,16] = XmmReg1[112,16] - XmmReg2[112,16]; } :PSUBD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFA; XmmReg ... & m128 ... { local m:16 = m128; XmmReg[0,32] = XmmReg[0,32] - m[0,32]; XmmReg[32,32] = XmmReg[32,32] - m[32,32]; XmmReg[64,32] = XmmReg[64,32] - m[64,32]; XmmReg[96,32] = XmmReg[96,32] - m[96,32]; } :PSUBD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xFA; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] - XmmReg2[0,32]; XmmReg1[32,32] = XmmReg1[32,32] - XmmReg2[32,32]; XmmReg1[64,32] = XmmReg1[64,32] - XmmReg2[64,32]; XmmReg1[96,32] = XmmReg1[96,32] - XmmReg2[96,32]; } define pcodeop psubsb; :PSUBSB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE8; mmxreg1 ... & mmxreg2_m64 ... { mmxreg1 = psubsb(mmxreg1, mmxreg2_m64); } define pcodeop psubsw; :PSUBSW mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xE9; mmxreg1 ... & mmxreg2_m64 ... { mmxreg1 = psubsw(mmxreg1, mmxreg2_m64); } :PSUBSB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE8; XmmReg1 ... & XmmReg2_m128 ... { XmmReg1 = psubsb(XmmReg1, XmmReg2_m128); } :PSUBSW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xE9; XmmReg1 ... & XmmReg2_m128 ... { XmmReg1 = psubsw(XmmReg1, XmmReg2_m128); } define pcodeop psubusb; :PSUBUSB mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD8; mmxreg1 ... & mmxreg2_m64 ... { mmxreg1 = psubusb(mmxreg1, mmxreg2_m64); } define pcodeop psubusw; :PSUBUSW mmxreg1, mmxreg2_m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xD9; mmxreg1 ... & mmxreg2_m64 ... { mmxreg1 = psubusw(mmxreg1, mmxreg2_m64); } :PSUBUSB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD8; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = psubusb(XmmReg1, XmmReg2_m128); } :PSUBUSW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xD9; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = psubusw(XmmReg1, XmmReg2_m128); } :PUNPCKHBW mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x68; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,8] = mmxreg[32,8]; mmxreg[8,8] = m[32,8]; mmxreg[16,8] = mmxreg[40,8]; mmxreg[24,8] = m[40,8]; mmxreg[32,8] = mmxreg[48,8]; mmxreg[40,8] = m[48,8]; mmxreg[48,8] = mmxreg[56,8]; mmxreg[56,8] = m[56,8]; } :PUNPCKHBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x68; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,8] = mmxreg1[32,8]; mmxreg1[8,8] = mmxreg2[32,8]; mmxreg1[16,8] = mmxreg1[40,8]; mmxreg1[24,8] = mmxreg2[40,8]; mmxreg1[32,8] = mmxreg1[48,8]; mmxreg1[40,8] = mmxreg2[48,8]; mmxreg1[48,8] = mmxreg1[56,8]; mmxreg1[56,8] = mmxreg2[56,8]; } :PUNPCKHWD mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x69; mmxreg ... & m64 { local m:8 = m64; mmxreg[0,16] = mmxreg[32,16]; mmxreg[16,16] = m[32,16]; mmxreg[32,16] = mmxreg[48,16]; mmxreg[48,16] = m[48,16]; } :PUNPCKHWD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x69; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,16] = mmxreg1[32,16]; mmxreg1[16,16] = mmxreg2[32,16]; mmxreg1[32,16] = mmxreg1[48,16]; mmxreg1[48,16] = mmxreg2[48,16]; } :PUNPCKHDQ mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6A; mmxreg ... & m64 { mmxreg[0,32] = mmxreg[32,32]; mmxreg[32,32] = m64[32,32]; } :PUNPCKHDQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x6A; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[0,32] = mmxreg1[32,32]; mmxreg1[32,32] = mmxreg2[32,32]; } :PUNPCKHBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x68; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,8] = XmmReg[64,8]; XmmReg[8,8] = m[64,8]; XmmReg[16,8] = XmmReg[72,8]; XmmReg[24,8] = m[72,8]; XmmReg[32,8] = XmmReg[80,8]; XmmReg[40,8] = m[80,8]; XmmReg[48,8] = XmmReg[88,8]; XmmReg[56,8] = m[88,8]; XmmReg[64,8] = XmmReg[96,8]; XmmReg[72,8] = m[96,8]; XmmReg[80,8] = XmmReg[104,8]; XmmReg[88,8] = m[104,8]; XmmReg[96,8] = XmmReg[112,8]; XmmReg[104,8] = m[112,8]; XmmReg[112,8] = XmmReg[120,8]; XmmReg[120,8] = m[120,8]; } # full set of XMM byte registers :PUNPCKHBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x68; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,8] = XmmReg1[64,8]; XmmReg1[8,8] = XmmReg2[64,8]; XmmReg1[16,8] = XmmReg1[72,8]; XmmReg1[24,8] = XmmReg2[72,8]; XmmReg1[32,8] = XmmReg1[80,8]; XmmReg1[40,8] = XmmReg2[80,8]; XmmReg1[48,8] = XmmReg1[88,8]; XmmReg1[56,8] = XmmReg2[88,8]; XmmReg1[64,8] = XmmReg1[96,8]; XmmReg1[72,8] = XmmReg2[96,8]; XmmReg1[80,8] = XmmReg1[104,8]; XmmReg1[88,8] = XmmReg2[104,8]; XmmReg1[96,8] = XmmReg1[112,8]; XmmReg1[104,8] = XmmReg2[112,8]; XmmReg1[112,8] = XmmReg1[120,8]; XmmReg1[120,8] = XmmReg2[120,8]; } :PUNPCKHWD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x69; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,16] = XmmReg[64,16]; XmmReg[16,16] = m[64,16]; XmmReg[32,16] = XmmReg[80,16]; XmmReg[48,16] = m[80,16]; XmmReg[64,16] = XmmReg[96,16]; XmmReg[80,16] = m[96,16]; XmmReg[96,16] = XmmReg[112,16]; XmmReg[112,16] = m[112,16]; } :PUNPCKHWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x69; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,16] = XmmReg1[64,16]; XmmReg1[16,16] = XmmReg2[64,16]; XmmReg1[32,16] = XmmReg1[80,16]; XmmReg1[48,16] = XmmReg2[80,16]; XmmReg1[64,16] = XmmReg1[96,16]; XmmReg1[80,16] = XmmReg2[96,16]; XmmReg1[96,16] = XmmReg1[112,16]; XmmReg1[112,16] = XmmReg2[112,16]; } :PUNPCKHDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6A; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = XmmReg[64,32]; XmmReg[32,32] = m[64,32]; XmmReg[64,32] = XmmReg[96,32]; XmmReg[96,32] = m[96,32]; } :PUNPCKHDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6A; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[64,32]; XmmReg1[32,32] = XmmReg2[64,32]; XmmReg1[64,32] = XmmReg1[96,32]; XmmReg1[96,32] = XmmReg2[96,32]; } :PUNPCKHQDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6D; m128 & XmmReg ... { XmmReg[0,64] = XmmReg[64,64]; XmmReg[64,64] = m128[64,64]; } :PUNPCKHQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6D; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[64,64]; XmmReg1[64,64] = XmmReg2[64,64]; } :PUNPCKLBW mmxreg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x60; mmxreg ... & m32 { local m:4 = m32; mmxreg[56,8] = m[24,8]; mmxreg[48,8] = mmxreg[24,8]; mmxreg[40,8] = m[16,8]; mmxreg[32,8] = mmxreg[16,8]; mmxreg[24,8] = m[8,8]; mmxreg[16,8] = mmxreg[8,8]; mmxreg[8,8] = m[0,8]; # mmxreg[0,8] = mmxreg[0,8]; superfluous } :PUNPCKLBW mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x60; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[56,8] = mmxreg2[24,8]; mmxreg1[48,8] = mmxreg1[24,8]; mmxreg1[40,8] = mmxreg2[16,8]; mmxreg1[32,8] = mmxreg1[16,8]; mmxreg1[24,8] = mmxreg2[8,8]; mmxreg1[16,8] = mmxreg1[8,8]; mmxreg1[8,8] = mmxreg2[0,8]; # mmxreg1[0,8] = mmxreg1[0,8]; superfluous } :PUNPCKLWD mmxreg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x61; mmxreg ... & m32 { local m:4 = m32; mmxreg[48,16] = m[16,16]; mmxreg[32,16] = mmxreg[16,16]; mmxreg[16,16] = m[0,16]; # mmxreg[0,16] = mmxreg[0,16]; superfluous } :PUNPCKLWD mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x61; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[48,16] = mmxreg2[16,16]; mmxreg1[32,16] = mmxreg1[16,16]; mmxreg1[16,16] = mmxreg2[0,16]; # mmxreg1[0,16] = mmxreg1[0,16]; superfluous } :PUNPCKLDQ mmxreg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x62; mmxreg ... & m32 { mmxreg[32,32] = m32; # mmxreg[0,32] = mmxreg[0,32]; superfluous } :PUNPCKLDQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x62; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1[32,32] = mmxreg2[0,32]; # mmxreg1[0,32] = mmxreg1[0,32]; superfluous } :PUNPCKLBW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x60; m128 & XmmReg ... { local m:16 = m128; XmmReg[120,8] = m[56,8]; XmmReg[112,8] = XmmReg[56,8]; XmmReg[104,8] = m[48,8]; XmmReg[96,8] = XmmReg[48,8]; XmmReg[88,8] = m[40,8]; XmmReg[80,8] = XmmReg[40,8]; XmmReg[72,8] = m[32,8]; XmmReg[64,8] = XmmReg[32,8]; XmmReg[56,8] = m[24,8]; XmmReg[48,8] = XmmReg[24,8]; XmmReg[40,8] = m[16,8]; XmmReg[32,8] = XmmReg[16,8]; XmmReg[24,8] = m[8,8]; XmmReg[16,8] = XmmReg[8,8]; XmmReg[8,8] = m[0,8]; # XmmReg[0,8] = XmmReg[0,8]; superfluous } :PUNPCKLBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x60; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[120,8] = XmmReg2[56,8]; XmmReg1[112,8] = XmmReg1[56,8]; XmmReg1[104,8] = XmmReg2[48,8]; XmmReg1[96,8] = XmmReg1[48,8]; XmmReg1[88,8] = XmmReg2[40,8]; XmmReg1[80,8] = XmmReg1[40,8]; XmmReg1[72,8] = XmmReg2[32,8]; XmmReg1[64,8] = XmmReg1[32,8]; XmmReg1[56,8] = XmmReg2[24,8]; XmmReg1[48,8] = XmmReg1[24,8]; XmmReg1[40,8] = XmmReg2[16,8]; XmmReg1[32,8] = XmmReg1[16,8]; XmmReg1[24,8] = XmmReg2[8,8]; XmmReg1[16,8] = XmmReg1[8,8]; XmmReg1[8,8] = XmmReg2[0,8]; # XmmReg1[0,8] = XmmReg1[0,8]; superfluous } :PUNPCKLWD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x61; m128 & XmmReg ... { local m:16 = m128; XmmReg[112,16] = m[48,16]; XmmReg[96,16] = XmmReg[48,16]; XmmReg[80,16] = m[32,16]; XmmReg[64,16] = XmmReg[32,16]; XmmReg[48,16] = m[16,16]; XmmReg[32,16] = XmmReg[16,16]; XmmReg[16,16] = m[0,16]; # XmmReg[0,16] = XmmReg[0,16]; superfluous } :PUNPCKLWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x61; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[112,16] = XmmReg2[48,16]; XmmReg1[96,16] = XmmReg1[48,16]; XmmReg1[80,16] = XmmReg2[32,16]; XmmReg1[64,16] = XmmReg1[32,16]; XmmReg1[48,16] = XmmReg2[16,16]; XmmReg1[32,16] = XmmReg1[16,16]; XmmReg1[16,16] = XmmReg2[0,16]; # XmmReg1[0,16] = XmmReg1[0,16]; superfluous } :PUNPCKLDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x62; m128 & XmmReg ... { local m:16 = m128; XmmReg[96,32] = m[32,32]; XmmReg[64,32] = XmmReg[32,32]; XmmReg[32,32] = m[0,32]; # XmmReg[0,32] = XmmReg[0,32]; superfluous } :PUNPCKLDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x62; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[96,32] = XmmReg2[32,32]; XmmReg1[64,32] = XmmReg1[32,32]; XmmReg1[32,32] = XmmReg2[0,32]; # XmmReg1[0,32] = XmmReg1[0,32]; superfluous } define pcodeop punpcklqdq; :PUNPCKLQDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6C; m128 & XmmReg ... { XmmReg[64,64] = m128[0,64]; # XmmReg[0,64] = XmmReg[0,64]; superfluous } :PUNPCKLQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x6C; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1[64,64] = XmmReg2[0,64]; # XmmReg1[0,64] = XmmReg1[0,64]; superfluous } :PXOR mmxreg, m64 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEF; mmxreg ... & m64 { mmxreg = mmxreg ^ m64; } :PXOR mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xEF; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = mmxreg1 ^ mmxreg2; } :PXOR XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEF; XmmReg ... & m128 { XmmReg = XmmReg ^ m128; } :PXOR XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xEF; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = XmmReg1 ^ XmmReg2; } define pcodeop rcpps; :RCPPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x53; XmmReg ... & m128 { XmmReg = rcpps(XmmReg, m128); } :RCPPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x53; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = rcpps(XmmReg1, XmmReg2); } define pcodeop rcpss; :RCPSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x53; XmmReg ... & m32 { XmmReg = rcpss(XmmReg, m32); } :RCPSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x53; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = rcpss(XmmReg1, XmmReg2); } define pcodeop rsqrtps; :RSQRTPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x52; XmmReg ... & m128 { XmmReg = rsqrtps(XmmReg, m128); } :RSQRTPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x52; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = rsqrtps(XmmReg1, XmmReg2); } define pcodeop rsqrtss; :RSQRTSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x52; XmmReg ... & m32 { XmmReg = rsqrtss(XmmReg, m32); } :RSQRTSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x52; xmmmod = 3 & XmmReg1 & XmmReg2 { XmmReg1 = rsqrtss(XmmReg1, XmmReg2); } :SHUFPD XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0xC6; XmmReg1 ... & XmmReg2_m128; imm8 { local srcLow:8 = XmmReg2_m128[0,64]; local srcHigh:8 = XmmReg2_m128[64,64]; local destLow:8 = XmmReg1[0,64]; local destHigh:8 = XmmReg1[64,64]; local control:1 = (imm8 & 0x1)== 0:1; conditionalAssign(XmmReg1[0,64],control,destLow,destHigh); control = (imm8 & 0x2) == 0:1; conditionalAssign(XmmReg1[64,64],control,srcLow,srcHigh); } :SHUFPS XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xC6; (XmmReg2_m128 & XmmReg1 ...); imm8 & Order0 & Order1 & Order2 & Order3 { local xmmreg2_m128_c0 = XmmReg2_m128[0,32]; local xmmreg2_m128_c1 = XmmReg2_m128[32,32]; local xmmreg2_m128_c2 = XmmReg2_m128[64,32]; local xmmreg2_m128_c3 = XmmReg2_m128[96,32]; local xmm_c0 = XmmReg1[0,32]; local xmm_c1 = XmmReg1[32,32]; local xmm_c2 = XmmReg1[64,32]; local xmm_c3 = XmmReg1[96,32]; shuffle_4(XmmReg1[0,32],Order0,xmm_c0,xmm_c1,xmm_c2,xmm_c3); shuffle_4(XmmReg1[32,32],Order1,xmm_c0,xmm_c1,xmm_c2,xmm_c3); shuffle_4(XmmReg1[64,32],Order2,xmmreg2_m128_c0,xmmreg2_m128_c1,xmmreg2_m128_c2,xmmreg2_m128_c3); shuffle_4(XmmReg1[96,32],Order3,xmmreg2_m128_c0,xmmreg2_m128_c1,xmmreg2_m128_c2,xmmreg2_m128_c3); } define pcodeop sqrtpd; :SQRTPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x51; XmmReg ... & m128 { XmmReg = sqrtpd(XmmReg, m128); } :SQRTPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x51; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = sqrtpd(XmmReg1, XmmReg2); } define pcodeop sqrtps; :SQRTPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x51; XmmReg ... & m128 { XmmReg = sqrtps(XmmReg, m128); } :SQRTPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x51; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = sqrtps(XmmReg1, XmmReg2); } :SQRTSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x51; XmmReg ... & m64 { XmmReg[0,64] = sqrt(m64); } :SQRTSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x51; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = sqrt(XmmReg2[0,64]); } :SQRTSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x51; XmmReg ... & m32 { XmmReg[0,32] = sqrt(m32); } :SQRTSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x51; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = sqrt(XmmReg2[0,32]); } :SUBPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5C;XmmReg ... & m128 { local m:16 = m128; XmmReg[0,64] = XmmReg[0,64] f- m[0,64]; XmmReg[64,64] = XmmReg[64,64] f- m[64,64]; } :SUBPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x5C; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg2[0,64]; XmmReg1[64,64] = XmmReg1[64,64] f- XmmReg2[64,64]; } :SUBPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5C; XmmReg ... & m128 { local m:16 = m128; XmmReg[0,32] = XmmReg[0,32] f- m[0,32]; XmmReg[32,32] = XmmReg[32,32] f- m[32,32]; XmmReg[64,32] = XmmReg[64,32] f- m[64,32]; XmmReg[96,32] = XmmReg[96,32] f- m[96,32]; } :SUBPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x5C; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg2[0,32]; XmmReg1[32,32] = XmmReg1[32,32] f- XmmReg2[32,32]; XmmReg1[64,32] = XmmReg1[64,32] f- XmmReg2[64,32]; XmmReg1[96,32] = XmmReg1[96,32] f- XmmReg2[96,32]; } :SUBSD XmmReg, m64 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5C; XmmReg ... & m64 { XmmReg[0,64] = XmmReg[0,64] f- m64; } :SUBSD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x5C; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[0,64] f- XmmReg2[0,64]; } :SUBSS XmmReg, m32 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5C; XmmReg ...& m32 { XmmReg[0,32] = XmmReg[0,32] f- m32; } :SUBSS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x5C; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[0,32] f- XmmReg2[0,32]; } #Unordered Compare Scalar Double-Precision Floating-Point Values and Set EFLAGS # RESULT <- UnorderedCompare(SRC1[63-0] <> SRC2[63-0]) { # * Set EFLAGS *CASE (RESULT) OF # UNORDERED: ZF,PF,CF <- 111; # GREATER_THAN: ZF,PF,CF <- 000; # LESS_THAN: ZF,PF,CF <- 001; # EQUAL: ZF,PF,CF <- 100; # ESAC; # OF,AF,SF <- 0;} :UCOMISD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2E; m64 & XmmReg ... { fucompe(XmmReg[0,64], m64); } :UCOMISD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x2E; xmmmod=3 & XmmReg1 & XmmReg2 { fucompe(XmmReg1[0,64], XmmReg2[0,64]); } #Unordered Compare Scalar Single-Precision Floating-Point Values and Set EFLAGS # RESULT <- UnorderedCompare(SRC1[31-0] <> SRC2[31-0]) { # * Set EFLAGS *CASE (RESULT) OF # UNORDERED: ZF,PF,CF <- 111; # GREATER_THAN: ZF,PF,CF <- 000; # LESS_THAN: ZF,PF,CF <- 001; # EQUAL: ZF,PF,CF <- 100; # ESAC; # OF,AF,SF <- 0;} :UCOMISS XmmReg, m32 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2E; m32 & XmmReg ... { fucompe(XmmReg[0,32], m32); } :UCOMISS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x2E; xmmmod=3 & XmmReg1 & XmmReg2 { fucompe(XmmReg1[0,32], XmmReg2[0,32]); } :UNPCKHPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x15; m128 & XmmReg ... { XmmReg[0,64] = XmmReg[64,64]; XmmReg[64,64] = m128[64,64]; } :UNPCKHPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x15; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = XmmReg1[64,64]; XmmReg1[64,64] = XmmReg2[64,64]; } :UNPCKHPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x15; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = XmmReg[64,32]; XmmReg[64,32] = XmmReg[96,32]; XmmReg[32,32] = m[64,32]; XmmReg[96,32] = m[96,32]; } :UNPCKHPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x15; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = XmmReg1[64,32]; XmmReg1[32,32] = XmmReg2[64,32]; XmmReg1[64,32] = XmmReg1[96,32]; # XmmReg1 and XmmReg2 could be the same register, preserve XmmReg1[64,32] till later XmmReg1[96,32] = XmmReg2[96,32]; } :UNPCKLPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x14; m128 & XmmReg ... { # XmmReg[0,64] = XmmReg[0,64]; superfluous XmmReg[64,64] = m128[0,64]; } :UNPCKLPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x14; xmmmod=3 & XmmReg1 & XmmReg2 { # XmmReg1[0,64] = XmmReg1[0,64]; superfluous XmmReg1[64,64] = XmmReg2[0,64]; } :UNPCKLPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x14; m128 & XmmReg ... { local m:16 = m128; # XmmReg[0,32] = XmmReg[0,32]; superfluous XmmReg[64,32] = XmmReg[32,32]; XmmReg[32,32] = m[0,32]; XmmReg[96,32] = m[32,32]; } :UNPCKLPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x14; xmmmod=3 & XmmReg1 & XmmReg2 { # XmmReg1[0,32] = XmmReg1[0,32]; superfluous XmmReg1[64,32] = XmmReg1[32,32]; XmmReg1[96,32] = XmmReg2[32,32]; XmmReg1[32,32] = XmmReg2[0,32]; # XmmReg1 and XmmReg2 could be the same register, preserve Db till last } :XORPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x57; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,64] = ( XmmReg[0,64] ^ m[0,64] ); XmmReg[64,64] = ( XmmReg[64,64] ^ m[64,64] ); } :XORPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x57; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = ( XmmReg1[0,64] ^ XmmReg2[0,64] ); XmmReg1[64,64] = ( XmmReg1[64,64] ^ XmmReg2[64,64] ); } :XORPS XmmReg, m128 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x57; m128 & XmmReg ... { local m:16 = m128; XmmReg[0,32] = ( XmmReg[0,32] ^ m[0,32] ); XmmReg[32,32] = ( XmmReg[32,32] ^ m[32,32] ); XmmReg[64,32] = ( XmmReg[64,32] ^ m[64,32] ); XmmReg[96,32] = ( XmmReg[96,32] ^ m[96,32] ); } :XORPS XmmReg1, XmmReg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0x57; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,32] = ( XmmReg1[0,32] ^ XmmReg2[0,32] ); XmmReg1[32,32] = ( XmmReg1[32,32] ^ XmmReg2[32,32] ); XmmReg1[64,32] = ( XmmReg1[64,32] ^ XmmReg2[64,32] ); XmmReg1[96,32] = ( XmmReg1[96,32] ^ XmmReg2[96,32] ); } #### #### VIA Padlock instructions #### define pcodeop xstore_available; define pcodeop xstore; define pcodeop xcrypt_ecb; define pcodeop xcrypt_cbc; define pcodeop xcrypt_ctr; define pcodeop xcrypt_cfb; define pcodeop xcrypt_ofb; define pcodeop montmul; define pcodeop xsha1; define pcodeop xsha256; :XSTORE is vexMode=0 & mandover=0 & byte=0x0F; byte=0xA7; byte=0xC0 { EAX = xstore_available(EDX,EDI); } :XSTORE.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xC0 { EAX = xstore(ECX,EDX,EDI); ECX = 0; } :XCRYPTECB.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xC8 { xcrypt_ecb(ECX,EDX,EBX,ESI,EDI); } :XCRYPTCBC.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xD0 { xcrypt_cbc(ECX,EAX,EDX,EBX,ESI,EDI); } :XCRYPTCTR.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xD8 { xcrypt_ctr(ECX,EAX,EDX,EBX,ESI,EDI); } :XCRYPTCFB.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xE0 { xcrypt_cfb(ECX,EAX,EDX,EBX,ESI,EDI); } :XCRYPTOFB.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA7; byte=0xE8 { xcrypt_ofb(ECX,EAX,EDX,EBX,ESI,EDI); } :MONTMUL.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA6; byte=0xC0 { montmul(EAX,ECX,ESI); ECX=0; EDX=0; } :XSHA1.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA6; byte=0xC8 { xsha1(ECX,ESI,EDI); EAX = ECX; } :XSHA256.REP is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0xA6; byte=0xD0 { xsha256(ECX,ESI,EDI); EAX = ECX; } #### #### SSE4.1 instructions #### define pcodeop mpsadbw; :MPSADBW XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x42; XmmReg ... & m128; imm8 { XmmReg = mpsadbw(XmmReg, m128, imm8:8); } :MPSADBW XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x42; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = mpsadbw(XmmReg1, XmmReg2, imm8:8); } define pcodeop phminposuw; :PHMINPOSUW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x41; XmmReg ... & m128 { XmmReg = phminposuw(m128); } :PHMINPOSUW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x41; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = phminposuw(XmmReg2); } define pcodeop pmuldq; :PMULDQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x28; XmmReg ... & m128 { XmmReg = pmuldq(XmmReg, m128); } :PMULDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x28; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmuldq(XmmReg1, XmmReg2); } define pcodeop pmulld; :PMULLD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x40; XmmReg ... & m128 { XmmReg = pmulld(XmmReg, m128); } :PMULLD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x40; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmulld(XmmReg1, XmmReg2); } define pcodeop dpps; :DPPS XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x40; XmmReg ... & m128; imm8 { XmmReg = dpps(XmmReg, m128, imm8:8); } :DPPS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x40; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = dpps(XmmReg1, XmmReg2, imm8:8); } define pcodeop dppd; :DPPD XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x41; XmmReg ... & m128; imm8 { XmmReg = dppd(XmmReg, m128, imm8:8); } :DPPD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x41; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = dppd(XmmReg1, XmmReg2, imm8:8); } define pcodeop blendps; :BLENDPS XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0C; XmmReg ... & m128; imm8 { XmmReg = blendps(XmmReg, m128, imm8:8); } :BLENDPS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0C; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = blendps(XmmReg1, XmmReg2, imm8:8); } define pcodeop blendpd; :BLENDPD XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0D; XmmReg ... & m128; imm8 { XmmReg = blendpd(XmmReg, m128, imm8:8); } :BLENDPD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0D; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = blendpd(XmmReg1, XmmReg2, imm8:8); } define pcodeop blendvps; :BLENDVPS XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x14; XmmReg ... & m128 { XmmReg = blendvps(XmmReg, m128, XMM0); } :BLENDVPS XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x14; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = blendvps(XmmReg1, XmmReg2, XMM0); } define pcodeop blendvpd; :BLENDVPD XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x15; XmmReg ... & m128 { XmmReg = blendvpd(XmmReg, m128, XMM0); } :BLENDVPD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x15; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = blendvpd(XmmReg1, XmmReg2, XMM0); } define pcodeop pblendvb; :PBLENDVB XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x10; XmmReg ... & m128 { XmmReg = pblendvb(XmmReg, m128, XMM0); } :PBLENDVB XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x10; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pblendvb(XmmReg1, XmmReg2, XMM0); } define pcodeop pblendw; :PBLENDW XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0E; XmmReg ... & m128; imm8 { XmmReg = pblendw(XmmReg, m128, imm8:8); } :PBLENDW XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0E; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = pblendw(XmmReg1, XmmReg2, imm8:8); } :PMINSB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x38; XmmReg1 ... & XmmReg2_m128 { local srcCopy:16 = XmmReg2_m128; conditionalAssign(XmmReg1[0,8],srcCopy[0,8] s< XmmReg1[0,8],srcCopy[0,8],XmmReg1[0,8]); conditionalAssign(XmmReg1[8,8],srcCopy[8,8] s< XmmReg1[8,8],srcCopy[8,8],XmmReg1[8,8]); conditionalAssign(XmmReg1[16,8],srcCopy[16,8] s< XmmReg1[16,8],srcCopy[16,8],XmmReg1[16,8]); conditionalAssign(XmmReg1[24,8],srcCopy[24,8] s< XmmReg1[24,8],srcCopy[24,8],XmmReg1[24,8]); conditionalAssign(XmmReg1[32,8],srcCopy[32,8] s< XmmReg1[32,8],srcCopy[32,8],XmmReg1[32,8]); conditionalAssign(XmmReg1[40,8],srcCopy[40,8] s< XmmReg1[40,8],srcCopy[40,8],XmmReg1[40,8]); conditionalAssign(XmmReg1[48,8],srcCopy[48,8] s< XmmReg1[48,8],srcCopy[48,8],XmmReg1[48,8]); conditionalAssign(XmmReg1[56,8],srcCopy[56,8] s< XmmReg1[56,8],srcCopy[56,8],XmmReg1[56,8]); conditionalAssign(XmmReg1[64,8],srcCopy[64,8] s< XmmReg1[64,8],srcCopy[64,8],XmmReg1[64,8]); conditionalAssign(XmmReg1[72,8],srcCopy[72,8] s< XmmReg1[72,8],srcCopy[72,8],XmmReg1[72,8]); conditionalAssign(XmmReg1[80,8],srcCopy[80,8] s< XmmReg1[80,8],srcCopy[80,8],XmmReg1[80,8]); conditionalAssign(XmmReg1[88,8],srcCopy[88,8] s< XmmReg1[88,8],srcCopy[88,8],XmmReg1[88,8]); conditionalAssign(XmmReg1[96,8],srcCopy[96,8] s< XmmReg1[96,8],srcCopy[96,8],XmmReg1[96,8]); conditionalAssign(XmmReg1[104,8],srcCopy[104,8] s< XmmReg1[104,8],srcCopy[104,8],XmmReg1[104,8]); conditionalAssign(XmmReg1[112,8],srcCopy[112,8] s< XmmReg1[112,8],srcCopy[112,8],XmmReg1[112,8]); conditionalAssign(XmmReg1[120,8],srcCopy[120,8] s< XmmReg1[120,8],srcCopy[120,8],XmmReg1[120,8]); } :PMINUW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3A; XmmReg1 ... & XmmReg2_m128 { local srcCopy:16 = XmmReg2_m128; conditionalAssign(XmmReg1[0,16],srcCopy[0,16] < XmmReg1[0,16],srcCopy[0,16],XmmReg1[0,16]); conditionalAssign(XmmReg1[16,16],srcCopy[16,16] < XmmReg1[16,16],srcCopy[16,16],XmmReg1[16,16]); conditionalAssign(XmmReg1[32,16],srcCopy[32,16] < XmmReg1[32,16],srcCopy[32,16],XmmReg1[32,16]); conditionalAssign(XmmReg1[48,16],srcCopy[48,16] < XmmReg1[48,16],srcCopy[48,16],XmmReg1[48,16]); conditionalAssign(XmmReg1[64,16],srcCopy[64,16] < XmmReg1[64,16],srcCopy[64,16],XmmReg1[64,16]); conditionalAssign(XmmReg1[80,16],srcCopy[80,16] < XmmReg1[80,16],srcCopy[80,16],XmmReg1[80,16]); conditionalAssign(XmmReg1[96,16],srcCopy[96,16] < XmmReg1[96,16],srcCopy[96,16],XmmReg1[96,16]); conditionalAssign(XmmReg1[112,16],srcCopy[112,16] < XmmReg1[112,16],srcCopy[112,16],XmmReg1[112,16]); } :PMINUD XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3B; XmmReg1 ... & XmmReg2_m128 { local srcCopy:16 = XmmReg2_m128; conditionalAssign(XmmReg1[0,32],srcCopy[0,32] < XmmReg1[0,32],srcCopy[0,32],XmmReg1[0,32]); conditionalAssign(XmmReg1[32,32],srcCopy[32,32] < XmmReg1[32,32],srcCopy[32,32],XmmReg1[32,32]); conditionalAssign(XmmReg1[64,32],srcCopy[64,32] < XmmReg1[64,32],srcCopy[64,32],XmmReg1[64,32]); conditionalAssign(XmmReg1[96,32],srcCopy[96,32] < XmmReg1[96,32],srcCopy[96,32],XmmReg1[96,32]); } :PMINSD XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x39; XmmReg1 ... & XmmReg2_m128 { local srcCopy:16 = XmmReg2_m128; conditionalAssign(XmmReg1[0,32],srcCopy[0,32] s< XmmReg1[0,32],srcCopy[0,32],XmmReg1[0,32]); conditionalAssign(XmmReg1[32,32],srcCopy[32,32] s< XmmReg1[32,32],srcCopy[32,32],XmmReg1[32,32]); conditionalAssign(XmmReg1[64,32],srcCopy[64,32] s< XmmReg1[64,32],srcCopy[64,32],XmmReg1[64,32]); conditionalAssign(XmmReg1[96,32],srcCopy[96,32] s< XmmReg1[96,32],srcCopy[96,32],XmmReg1[96,32]); } :PMAXSB XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3C; XmmReg1 ... & XmmReg2_m128 { local srcCopy:16 = XmmReg2_m128; conditionalAssign(XmmReg1[0,8],srcCopy[0,8] s> XmmReg1[0,8],srcCopy[0,8],XmmReg1[0,8]); conditionalAssign(XmmReg1[8,8],srcCopy[8,8] s> XmmReg1[8,8],srcCopy[8,8],XmmReg1[8,8]); conditionalAssign(XmmReg1[16,8],srcCopy[16,8] s> XmmReg1[16,8],srcCopy[16,8],XmmReg1[16,8]); conditionalAssign(XmmReg1[24,8],srcCopy[24,8] s> XmmReg1[24,8],srcCopy[24,8],XmmReg1[24,8]); conditionalAssign(XmmReg1[32,8],srcCopy[32,8] s> XmmReg1[32,8],srcCopy[32,8],XmmReg1[32,8]); conditionalAssign(XmmReg1[40,8],srcCopy[40,8] s> XmmReg1[40,8],srcCopy[40,8],XmmReg1[40,8]); conditionalAssign(XmmReg1[48,8],srcCopy[48,8] s> XmmReg1[48,8],srcCopy[48,8],XmmReg1[48,8]); conditionalAssign(XmmReg1[56,8],srcCopy[56,8] s> XmmReg1[56,8],srcCopy[56,8],XmmReg1[56,8]); conditionalAssign(XmmReg1[64,8],srcCopy[64,8] s> XmmReg1[64,8],srcCopy[64,8],XmmReg1[64,8]); conditionalAssign(XmmReg1[72,8],srcCopy[72,8] s> XmmReg1[72,8],srcCopy[72,8],XmmReg1[72,8]); conditionalAssign(XmmReg1[80,8],srcCopy[80,8] s> XmmReg1[80,8],srcCopy[80,8],XmmReg1[80,8]); conditionalAssign(XmmReg1[88,8],srcCopy[88,8] s> XmmReg1[88,8],srcCopy[88,8],XmmReg1[88,8]); conditionalAssign(XmmReg1[96,8],srcCopy[96,8] s> XmmReg1[96,8],srcCopy[96,8],XmmReg1[96,8]); conditionalAssign(XmmReg1[104,8],srcCopy[104,8] s> XmmReg1[104,8],srcCopy[104,8],XmmReg1[104,8]); conditionalAssign(XmmReg1[112,8],srcCopy[112,8] s> XmmReg1[112,8],srcCopy[112,8],XmmReg1[112,8]); conditionalAssign(XmmReg1[120,8],srcCopy[120,8] s> XmmReg1[120,8],srcCopy[120,8],XmmReg1[120,8]); } :PMAXUW XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3E; XmmReg1 ... & XmmReg2_m128 { local srcCopy:16 = XmmReg2_m128; conditionalAssign(XmmReg1[0,16],srcCopy[0,16] > XmmReg1[0,16],srcCopy[0,16],XmmReg1[0,16]); conditionalAssign(XmmReg1[16,16],srcCopy[16,16] > XmmReg1[16,16],srcCopy[16,16],XmmReg1[16,16]); conditionalAssign(XmmReg1[32,16],srcCopy[32,16] > XmmReg1[32,16],srcCopy[32,16],XmmReg1[32,16]); conditionalAssign(XmmReg1[48,16],srcCopy[48,16] > XmmReg1[48,16],srcCopy[48,16],XmmReg1[48,16]); conditionalAssign(XmmReg1[64,16],srcCopy[64,16] > XmmReg1[64,16],srcCopy[64,16],XmmReg1[64,16]); conditionalAssign(XmmReg1[80,16],srcCopy[80,16] > XmmReg1[80,16],srcCopy[80,16],XmmReg1[80,16]); conditionalAssign(XmmReg1[96,16],srcCopy[96,16] > XmmReg1[96,16],srcCopy[96,16],XmmReg1[96,16]); conditionalAssign(XmmReg1[112,16],srcCopy[112,16] > XmmReg1[112,16],srcCopy[112,16],XmmReg1[112,16]); } :PMAXUD XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3F; XmmReg1 ... & XmmReg2_m128 { local srcCopy:16 = XmmReg2_m128; conditionalAssign(XmmReg1[0,32],srcCopy[0,32] > XmmReg1[0,32],srcCopy[0,32],XmmReg1[0,32]); conditionalAssign(XmmReg1[32,32],srcCopy[32,32] > XmmReg1[32,32],srcCopy[32,32],XmmReg1[32,32]); conditionalAssign(XmmReg1[64,32],srcCopy[64,32] > XmmReg1[64,32],srcCopy[64,32],XmmReg1[64,32]); conditionalAssign(XmmReg1[96,32],srcCopy[96,32] > XmmReg1[96,32],srcCopy[96,32],XmmReg1[96,32]); } :PMAXSD XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x3D; XmmReg1 ... & XmmReg2_m128 { local srcCopy:16 = XmmReg2_m128; conditionalAssign(XmmReg1[0,32],srcCopy[0,32] s> XmmReg1[0,32],srcCopy[0,32],XmmReg1[0,32]); conditionalAssign(XmmReg1[32,32],srcCopy[32,32] s> XmmReg1[32,32],srcCopy[32,32],XmmReg1[32,32]); conditionalAssign(XmmReg1[64,32],srcCopy[64,32] s> XmmReg1[64,32],srcCopy[64,32],XmmReg1[64,32]); conditionalAssign(XmmReg1[96,32],srcCopy[96,32] s> XmmReg1[96,32],srcCopy[96,32],XmmReg1[96,32]); } define pcodeop roundps; :ROUNDPS XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x08; XmmReg ... & m128; imm8 { XmmReg = roundps(XmmReg, m128, imm8:8); } :ROUNDPS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x08; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = roundps(XmmReg1, XmmReg2, imm8:8); } define pcodeop roundss; :ROUNDSS XmmReg, m32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0A; XmmReg ... & m32; imm8 { XmmReg = roundss(XmmReg, m32, imm8:8); } :ROUNDSS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0A; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = roundss(XmmReg1, XmmReg2, imm8:8); } define pcodeop roundpd; :ROUNDPD XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x09; XmmReg ... & m128; imm8 { XmmReg = roundpd(XmmReg, m128, imm8:8); } :ROUNDPD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x09; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = roundpd(XmmReg1, XmmReg2, imm8:8); } define pcodeop roundsd; :ROUNDSD XmmReg, m64, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0B; XmmReg ... & m64; imm8 { XmmReg = roundsd(XmmReg, m64, imm8:8); } :ROUNDSD XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x0B; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = roundsd(XmmReg1, XmmReg2, imm8:8); } define pcodeop insertps; :INSERTPS XmmReg, m32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x21; XmmReg ... & m32; imm8 { XmmReg = insertps(XmmReg, m32, imm8:8); } :INSERTPS XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x21; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XmmReg1 = insertps(XmmReg1, XmmReg2, imm8:8); } :PINSRB XmmReg, rm32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x20; XmmReg ... & rm32; imm8 { local destIndex:1 = (imm8 & 0xf) * 8:1; local useLow:1 = destIndex < 64:1; local newLow:8 = zext(rm32:1) << destIndex; newLow = (XmmReg[0,64] & ~(0xff:8 << destIndex)) | newLow; local newHigh:8 = zext(rm32:1) << (destIndex-64:1); newHigh = (XmmReg[64,64] & ~(0xff:8 << (destIndex - 64:1))) | newHigh; conditionalAssign(XmmReg[0,64],useLow,newLow,XmmReg[0,64]); conditionalAssign(XmmReg[64,64],!useLow,newHigh,XmmReg[64,64]); } :PINSRD XmmReg, rm32, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x22; XmmReg ... & rm32; imm8 { local destIndex:1 = (imm8 & 0x3) * 32:1; local useLow:1 = destIndex < 64:1; local newLow:8 = zext(rm32) << destIndex; newLow = (XmmReg[0,64] & ~(0xffffffff:8 << destIndex)) | newLow; local newHigh:8 = zext(rm32) << (destIndex-64:1); newHigh = (XmmReg[64,64] & ~(0xffffffff:8 << (destIndex - 64:1))) | newHigh; conditionalAssign(XmmReg[0,64],useLow,newLow,XmmReg[0,64]); conditionalAssign(XmmReg[64,64],!useLow,newHigh,XmmReg[64,64]); } @ifdef IA64 :PINSRQ XmmReg, rm64, imm8 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & $(REX_W) & byte=0x0F; byte=0x3A; byte=0x22; XmmReg ... & rm64; imm8 { local useHigh:1 = imm8 & 0x1; conditionalAssign(XmmReg[0,64],!useHigh,rm64,XmmReg[0,64]); conditionalAssign(XmmReg[64,64],useHigh,rm64,XmmReg[64,64]); } @endif define pcodeop extractps; @ifdef IA64 :EXTRACTPS rm64, XmmReg, imm8 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x17; XmmReg ... & rm64; imm8 { rm64 = extractps(XmmReg, imm8:8); } @endif :EXTRACTPS rm32, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x17; XmmReg ... & rm32 & check_rm32_dest ...; imm8 { rm32 = extractps(XmmReg, imm8:8); build check_rm32_dest; } :PEXTRB Rmr32, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x14; mod=3 & XmmReg & Rmr32 & check_Rmr32_dest; imm8 { local shift:1 = (imm8 & 0xf) * 8:1; local low:1 = shift < 64:1; local temp:8; conditionalAssign(temp,low,XmmReg[0,64] >> shift,XmmReg[64,64] >> (shift - 64)); Rmr32 = zext(temp:1); build check_Rmr32_dest; } :PEXTRB Mem, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x14; XmmReg ... & Mem; imm8 { local shift:1 = (imm8 & 0xf) * 8:1; local low:1 = shift < 64:1; local temp:8; conditionalAssign(temp,low,XmmReg[0,64] >> shift,XmmReg[64,64] >> (shift - 64)); *Mem = temp:1; } :PEXTRD Rmr32, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x16; mod=3 & XmmReg & Rmr32 & check_Rmr32_dest; imm8 { local shift:1 = (imm8 & 0x3) * 32:1; local low:1 = shift < 64:1; local temp:8; conditionalAssign(temp,low,XmmReg[0,64] >> shift,XmmReg[64,64] >> (shift - 64)); Rmr32 = zext(temp:4); build check_Rmr32_dest; } :PEXTRD Mem, XmmReg, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x16; XmmReg ... & Mem; imm8 { local shift:1 = (imm8 & 0x3) * 32:1; local low:1 = shift < 64:1; local temp:8; conditionalAssign(temp,low,XmmReg[0,64] >> shift,XmmReg[64,64] >> (shift - 64)); *Mem = temp:4; } @ifdef IA64 :PEXTRQ Rmr64, XmmReg, imm8 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & $(REX_W) & byte=0x0F; byte=0x3A; byte=0x16; mod=3 & XmmReg & Rmr64; imm8 { local high:1 = imm8 & 0x1; conditionalAssign(Rmr64,high,XmmReg[64,64],XmmReg[0,64]); } :PEXTRQ Mem, XmmReg, imm8 is $(LONGMODE_ON) & vexMode=0 & bit64=1 & $(PRE_66) & $(REX_W) & byte=0x0F; byte=0x3A; byte=0x16; XmmReg ... & Mem; imm8 { local high:1 = imm8 & 0x1; local temp:8; conditionalAssign(temp,high,XmmReg[64,64],XmmReg[0,64]); *Mem = temp; } @endif define pcodeop pmovsxbw; :PMOVSXBW XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x20; XmmReg ... & m64 { XmmReg = pmovsxbw(XmmReg, m64); } :PMOVSXBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x20; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxbw(XmmReg1, XmmReg2); } define pcodeop pmovsxbd; :PMOVSXBD XmmReg, m32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x21; XmmReg ... & m32 { XmmReg = pmovsxbd(XmmReg, m32); } :PMOVSXBD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x21; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxbd(XmmReg1, XmmReg2); } define pcodeop pmovsxbq; :PMOVSXBQ XmmReg, m16 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x22; XmmReg ... & m16 { XmmReg = pmovsxbq(XmmReg, m16); } :PMOVSXBQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x22; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxbq(XmmReg1, XmmReg2); } define pcodeop pmovsxwd; :PMOVSXWD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x23; XmmReg ... & m64 { XmmReg = pmovsxwd(XmmReg, m64); } :PMOVSXWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x23; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxwd(XmmReg1, XmmReg2); } define pcodeop pmovsxwq; :PMOVSXWQ XmmReg, m32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x24; XmmReg ... & m32 { XmmReg = pmovsxwq(XmmReg, m32); } :PMOVSXWQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x24; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxwq(XmmReg1, XmmReg2); } define pcodeop pmovsxdq; :PMOVSXDQ XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x25; XmmReg ... & m64 { XmmReg = pmovsxdq(XmmReg, m64); } :PMOVSXDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x25; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovsxdq(XmmReg1, XmmReg2); } define pcodeop pmovzxbw; :PMOVZXBW XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x30; XmmReg ... & m64 { XmmReg = pmovzxbw(XmmReg, m64); } :PMOVZXBW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x30; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxbw(XmmReg1, XmmReg2); } define pcodeop pmovzxbd; :PMOVZXBD XmmReg, m32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x31; XmmReg ... & m32 { XmmReg = pmovzxbd(XmmReg, m32); } :PMOVZXBD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x31; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxbd(XmmReg1, XmmReg2); } define pcodeop pmovzxbq; :PMOVZXBQ XmmReg, m16 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x32; XmmReg ... & m16 { XmmReg = pmovzxbq(XmmReg, m16); } :PMOVZXBQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x32; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxbq(XmmReg1, XmmReg2); } define pcodeop pmovzxwd; :PMOVZXWD XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x33; XmmReg ... & m64 { XmmReg = pmovzxwd(XmmReg, m64); } :PMOVZXWD XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x33; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxwd(XmmReg1, XmmReg2); } define pcodeop pmovzxwq; :PMOVZXWQ XmmReg, m32 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x34; XmmReg ... & m32 { XmmReg = pmovzxwq(XmmReg, m32); } :PMOVZXWQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x34; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxwq(XmmReg1, XmmReg2); } define pcodeop pmovzxdq; :PMOVZXDQ XmmReg, m64 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x35; XmmReg ... & m64 { XmmReg = pmovzxdq(XmmReg, m64); } :PMOVZXDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x35; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = pmovzxdq(XmmReg1, XmmReg2); } :PTEST XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x17; XmmReg ... & m128 { local temp_m128:16 = m128; local tmp = temp_m128 & XmmReg; ZF = tmp == 0; local tmp2 = temp_m128 & ~XmmReg; CF = tmp2 == 0; AF = 0; OF = 0; PF = 0; SF = 0; } :PTEST XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x17; xmmmod=3 & XmmReg1 & XmmReg2 { local tmp = XmmReg2 & XmmReg1; ZF = tmp == 0; local tmp2 = XmmReg2 & ~XmmReg1; CF = tmp2 == 0; AF = 0; OF = 0; PF = 0; SF = 0; } :PCMPEQQ XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x29; XmmReg ... & m128 { local temp_m128:16 = m128; XmmReg[0,64] = zext(XmmReg[0,64] == temp_m128[0,64]) * 0xffffffffffffffff:8; XmmReg[64,64] = zext(XmmReg[64,64] == temp_m128[64,64]) * 0xffffffffffffffff:8; } :PCMPEQQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x29; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1[0,64] = zext(XmmReg1[0,64] == XmmReg2[0,64]) * 0xffffffffffffffff:8; XmmReg1[64,64] = zext(XmmReg1[64,64] == XmmReg2[64,64]) * 0xffffffffffffffff:8; } define pcodeop packusdw; :PACKUSDW XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x2B; XmmReg ... & m128 { XmmReg = packusdw(XmmReg, m128); } :PACKUSDW XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x2B; xmmmod=3 & XmmReg1 & XmmReg2 { XmmReg1 = packusdw(XmmReg1, XmmReg2); } define pcodeop movntdqa; :MOVNTDQA XmmReg, m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x2A; XmmReg ... & m128 { XmmReg = movntdqa(XmmReg, m128); } #### #### SSE4.2 instructions #### define pcodeop crc32; :CRC32 Reg32, rm8 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x38; byte=0xF0; Reg32 ... & check_Reg32_dest ... & rm8 { Reg32 = crc32(Reg32, rm8); build check_Reg32_dest; } :CRC32 Reg32, rm16 is vexMode=0 & opsize=0 & $(PRE_F2) & byte=0x0F; byte=0x38; byte=0xF1; Reg32 ... & check_Reg32_dest ... & rm16 { Reg32 = crc32(Reg32, rm16); build check_Reg32_dest; } :CRC32 Reg32, rm32 is vexMode=0 & opsize=1 & $(PRE_F2) & byte=0x0F; byte=0x38; byte=0xF1; Reg32 ... & check_Reg32_dest ... & rm32 { Reg32 = crc32(Reg32, rm32); build check_Reg32_dest; } @ifdef IA64 :CRC32 Reg32, rm8 is vexMode=0 & opsize=1 & $(PRE_F2) & $(REX) & byte=0x0F; byte=0x38; byte=0xF0; Reg32 ... & check_Reg32_dest ... & rm8 { Reg32 = crc32(Reg32, rm8); build check_Reg32_dest; } :CRC32 Reg64, rm8 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F2) & $(REX_W) & byte=0x0F; byte=0x38; byte=0xF0; Reg64 ... & rm8 { Reg64 = crc32(Reg64, rm8); } :CRC32 Reg64, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F2) & $(REX_W) & byte=0x0F; byte=0x38; byte=0xF1; Reg64 ... & rm64 { Reg64 = crc32(Reg64, rm64); } @endif define pcodeop pcmpestri; :PCMPESTRI XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x61; XmmReg ... & m128; imm8 { ECX = pcmpestri(XmmReg, m128, imm8:8); } :PCMPESTRI XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x61; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { ECX = pcmpestri(XmmReg1, XmmReg2, imm8:8); } define pcodeop pcmpestrm; :PCMPESTRM XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x60; XmmReg ... & m128; imm8 { XMM0 = pcmpestrm(XmmReg, m128, imm8:8); } :PCMPESTRM XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x60; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XMM0 = pcmpestrm(XmmReg1, XmmReg2, imm8:8); } define pcodeop pcmpistri; :PCMPISTRI XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x63; XmmReg ... & m128; imm8 { ECX = pcmpistri(XmmReg, m128, imm8:8); } :PCMPISTRI XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x63; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { ECX = pcmpistri(XmmReg1, XmmReg2, imm8:8); } define pcodeop pcmpistrm; :PCMPISTRM XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x62; XmmReg ... & m128; imm8 { XMM0 = pcmpistrm(XmmReg, m128, imm8:8); } :PCMPISTRM XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0x62; xmmmod=3 & XmmReg1 & XmmReg2; imm8 { XMM0 = pcmpistrm(XmmReg1, XmmReg2, imm8:8); } :PCMPGTQ XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0x37; XmmReg1 ... & XmmReg2_m128 { XmmReg1[0,64] = 0xffffffffffffffff:8 * (zext(XmmReg1[0,64] s> XmmReg2_m128[0,64])); XmmReg1[64,64] = 0xffffffffffffffff:8 * (zext(XmmReg1[64,64] s> XmmReg2_m128[64,64])); } macro popcountflags(src){ OF = 0:1; SF = 0:1; AF = 0:1; CF = 0:1; PF = 0:1; ZF = (src == 0); } :POPCNT Reg16, rm16 is vexMode=0 & opsize=0 & $(PRE_F3) & byte=0x0F; byte=0xB8; Reg16 ... & rm16 { popcountflags(rm16); Reg16 = popcount(rm16); } :POPCNT Reg32, rm32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0F; byte=0xB8; Reg32 ... & check_Reg32_dest ... & rm32 { popcountflags(rm32); Reg32 = popcount(rm32); build check_Reg32_dest; } @ifdef IA64 :POPCNT Reg64, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & $(REX_W) & byte=0x0F; byte=0xB8; Reg64 ... & rm64 { popcountflags(rm64); Reg64 = popcount(rm64); } @endif #### #### AESNI instructions #### define pcodeop aesdec; :AESDEC XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xde; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = aesdec(XmmReg1, XmmReg2_m128); } :VAESDEC XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xde; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 { XmmReg1 = aesdec(vexVVVV_XmmReg, XmmReg2_m128); YmmReg1 = zext(XmmReg1); } define pcodeop aesdeclast; :AESDECLAST XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xdf; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = aesdeclast(XmmReg1, XmmReg2_m128); } :VAESDECLAST XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xdf; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 { XmmReg1 = aesdeclast(vexVVVV_XmmReg, XmmReg2_m128); YmmReg1 = zext(XmmReg1); } define pcodeop aesenc; :AESENC XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xdc; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = aesenc(XmmReg1, XmmReg2_m128); } :VAESENC XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xdc; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 { XmmReg1 = aesenc(vexVVVV_XmmReg, XmmReg2_m128); YmmReg1 = zext(XmmReg1); } define pcodeop aesenclast; :AESENCLAST XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xdd; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = aesenclast(XmmReg1, XmmReg2_m128); } :VAESENCLAST XmmReg1, vexVVVV_XmmReg, XmmReg2_m128 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0xdd; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 { XmmReg1 = aesenclast(vexVVVV_XmmReg, XmmReg2_m128); YmmReg1 = zext(XmmReg1); } define pcodeop aesimc; :AESIMC XmmReg1, XmmReg2_m128 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x38; byte=0xdb; XmmReg1 ... & XmmReg2_m128 { XmmReg1 = aesimc(XmmReg2_m128); } :VAESIMC XmmReg1, XmmReg2_m128 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F38) & $(VEX_WIG); byte=0xdb; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 { XmmReg1 = aesimc(XmmReg2_m128); YmmReg1 = zext(XmmReg1); } define pcodeop aeskeygenassist; :AESKEYGENASSIST XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x3A; byte=0xdf; XmmReg1 ... & XmmReg2_m128; imm8 { XmmReg1 = aeskeygenassist(XmmReg2_m128, imm8:1); } :VAESKEYGENASSIST XmmReg1, XmmReg2_m128, imm8 is $(VEX_NONE) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG); byte=0xdf; (XmmReg1 & YmmReg1) ... & XmmReg2_m128; imm8 { XmmReg1 = aeskeygenassist(XmmReg2_m128, imm8:1); YmmReg1 = zext(XmmReg1); } #### #### Deprecated 3DNow! instructions #### define pcodeop PackedIntToFloatingDwordConv; :PI2FD mmxreg, m64 is vexMode=0 & suffix3D=0x0D & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedIntToFloatingDwordConv(mmxreg, m64); } :PI2FD mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x0D & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedIntToFloatingDwordConv(mmxreg1, mmxreg2); } define pcodeop PackedFloatingToIntDwordConv; :PF2ID mmxreg, m64 is vexMode=0 & suffix3D=0x1D & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingToIntDwordConv(mmxreg, m64); } :PF2ID mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x1D & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingToIntDwordConv(mmxreg1, mmxreg2); } define pcodeop PackedFloatingCompareGE; :PFCMPGE mmxreg, m64 is vexMode=0 & suffix3D=0x90 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingCompareGE(mmxreg, m64); } :PFCMPGE mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x90 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingCompareGE(mmxreg1, mmxreg2); } define pcodeop PackedFloatingCompareGT; :PFCMPGT mmxreg, m64 is vexMode=0 & suffix3D=0xA0 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingCompareGT(mmxreg, m64); } :PFCMPGT mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xA0 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingCompareGT(mmxreg1, mmxreg2); } define pcodeop PackedFloatingCompareEQ; :PFCMPEQ mmxreg, m64 is vexMode=0 & suffix3D=0xB0 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingCompareEQ(mmxreg, m64); } :PFCMPEQ mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xB0 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingCompareEQ(mmxreg1, mmxreg2); } define pcodeop PackedFloatingAccumulate; :PFACC mmxreg, m64 is vexMode=0 & suffix3D=0xAE & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingAccumulate(mmxreg, m64); } :PFACC mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xAE & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingAccumulate(mmxreg1, mmxreg2); } define pcodeop PackedFloatingADD; :PFADD mmxreg, m64 is vexMode=0 & suffix3D=0x9E & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingADD(mmxreg, m64); } :PFADD mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x9E & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingADD(mmxreg1, mmxreg2); } define pcodeop PackedFloatingSUB; :PFSUB mmxreg, m64 is vexMode=0 & suffix3D=0x9A & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingSUB(mmxreg, m64); } :PFSUB mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x9A & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingSUB(mmxreg1, mmxreg2); } define pcodeop PackedFloatingSUBR; :PFSUBR mmxreg, m64 is vexMode=0 & suffix3D=0xAA & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingSUBR(mmxreg, m64); } :PFSUBR mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xAA & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingSUBR(mmxreg1, mmxreg2); } define pcodeop PackedFloatingMIN; :PFMIN mmxreg, m64 is vexMode=0 & suffix3D=0x94 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingMIN(mmxreg, m64); } :PFMIN mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x94 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingMIN(mmxreg1, mmxreg2); } define pcodeop PackedFloatingMAX; :PFMAX mmxreg, m64 is vexMode=0 & suffix3D=0xA4 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingMAX(mmxreg, m64); } :PFMAX mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xA4 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingMAX(mmxreg1, mmxreg2); } define pcodeop PackedFloatingMUL; :PFMUL mmxreg, m64 is vexMode=0 & suffix3D=0xB4 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingMUL(mmxreg, m64); } :PFMUL mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xB4 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingMUL(mmxreg1, mmxreg2); } define pcodeop FloatingReciprocalAprox; :PFRCP mmxreg, m64 is vexMode=0 & suffix3D=0x96 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = FloatingReciprocalAprox(mmxreg, m64); } :PFRCP mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x96 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = FloatingReciprocalAprox(mmxreg1, mmxreg2); } define pcodeop PackedFloatingReciprocalSQRAprox; :PFRSQRT mmxreg, m64 is vexMode=0 & suffix3D=0x97 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingReciprocalSQRAprox(mmxreg, m64); } :PFRSQRT mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x97 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingReciprocalSQRAprox(mmxreg1, mmxreg2); } define pcodeop PackedFloatingReciprocalIter1; :PFRCPIT1 mmxreg, m64 is vexMode=0 & suffix3D=0xA6 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingReciprocalIter1(mmxreg, m64); } :PFRCPIT1 mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xA6 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingReciprocalIter1(mmxreg1, mmxreg2); } define pcodeop PackedFloatingReciprocalSQRIter1; :PFRSQIT1 mmxreg, m64 is vexMode=0 & suffix3D=0xA7 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingReciprocalSQRIter1(mmxreg, m64); } :PFRSQIT1 mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xA7 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingReciprocalSQRIter1(mmxreg1, mmxreg2); } define pcodeop PackedFloatingReciprocalIter2; :PFRCPIT2 mmxreg, m64 is vexMode=0 & suffix3D=0xB6 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingReciprocalIter2(mmxreg, m64); } :PFRCPIT2 mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xB6 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingReciprocalIter2(mmxreg1, mmxreg2); } define pcodeop PackedAverageUnsignedBytes; :PAVGUSB mmxreg, m64 is vexMode=0 & suffix3D=0xBF & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedAverageUnsignedBytes(mmxreg, m64); } :PAVGUSB mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xBF & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedAverageUnsignedBytes(mmxreg1, mmxreg2); } define pcodeop PackedAverageHighRoundedWord; :PMULHRW mmxreg, m64 is vexMode=0 & suffix3D=0xB7 & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedAverageHighRoundedWord(mmxreg, m64); } :PMULHRW mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xB7 & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedAverageHighRoundedWord(mmxreg1, mmxreg2); } define pcodeop FastExitMediaState; :FEMMS is vexMode=0 & byte=0x0F; byte=0x0E { FastExitMediaState(); } #define pcodeop PrefetchDataIntoCache; #:PREFETCH m8 is vexMode=0 & byte=0x0F; byte=0x18; m8 { PrefetchDataIntoCache(m8); } #define pcodeop PrefetchDataIntoCacheWrite; #:PREFETCHW m8 is vexMode=0 & byte=0x0F; byte=0x0D; reg_opcode=1 ... & m8 { PrefetchDataIntoCacheWrite(m8); } # 3DNow! extensions define pcodeop PackedFloatingToIntWord; :PF2IW mmxreg, m64 is vexMode=0 & suffix3D=0x1C & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingToIntWord(mmxreg, m64); } :PF2IW mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x1C & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingToIntWord(mmxreg1, mmxreg2); } define pcodeop PackedIntToFloatingWord; :PI2FW mmxreg, m64 is vexMode=0 & suffix3D=0x0C & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedIntToFloatingWord(mmxreg, m64); } :PI2FW mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x0C & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedIntToFloatingWord(mmxreg1, mmxreg2); } define pcodeop PackedFloatingNegAccumulate; :PFNACC mmxreg, m64 is vexMode=0 & suffix3D=0x8A & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingNegAccumulate(mmxreg, m64); } :PFNACC mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x8A & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingNegAccumulate(mmxreg1, mmxreg2); } define pcodeop PackedFloatingPosNegAccumulate; :PFPNACC mmxreg, m64 is vexMode=0 & suffix3D=0x8E & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedFloatingPosNegAccumulate(mmxreg, m64); } :PFPNACC mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0x8E & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedFloatingPosNegAccumulate(mmxreg1, mmxreg2); } define pcodeop PackedSwapDWords; :PSWAPD mmxreg, m64 is vexMode=0 & suffix3D=0xBB & mandover=0 & byte=0x0F; byte=0x0F; mmxreg ... & m64 { mmxreg = PackedSwapDWords(mmxreg, m64); } :PSWAPD mmxreg1, mmxreg2 is vexMode=0 & suffix3D=0xBB & mandover=0 & byte=0x0F; byte=0x0F; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = PackedSwapDWords(mmxreg1, mmxreg2); } define pcodeop MaskedMoveQWord; :MASKMOVQ mmxreg1, mmxreg2 is vexMode=0 & mandover=0 & byte=0x0F; byte=0xF7; mmxmod = 3 & mmxreg1 & mmxreg2 { mmxreg1 = MaskedMoveQWord(mmxreg1, mmxreg2); } #### #### SSE4a instructions #### bitLen: val is imm8 [ val=imm8 & 0x3f; ] { export *[const]:1 val; } lsbOffset: val is imm8 [ val=imm8 & 0x3f; ] { export *[const]:1 val; } :EXTRQ XmmReg2, bitLen, lsbOffset is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x78; reg_opcode=0 & XmmReg2; bitLen; lsbOffset { local mask:16 = ((1 << bitLen) - 1) << lsbOffset; local val:16 = (XmmReg2 & mask) >> lsbOffset; XmmReg2 = val; } :EXTRQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x79; XmmReg1 & XmmReg2 { local len = XmmReg2[0,6]; local offs = XmmReg2[6,6]; local mask = ((1 << len) - 1) << offs; local val = (XmmReg1 & mask) >> offs; XmmReg1 = val; } :INSERTQ XmmReg1, XmmReg2, bitLen, lsbOffset is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x78; XmmReg1 & XmmReg2; bitLen; lsbOffset { local mask:16 = ((1 << bitLen) - 1) << lsbOffset; local val:16 = (zext(XmmReg2[0,64]) & ((1 << bitLen) - 1)); XmmReg1 = (XmmReg1 & ~zext(mask)) | (zext(val) << lsbOffset); } :INSERTQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x79; XmmReg1 & XmmReg2 { local len = XmmReg2[64,6]; local offs = XmmReg2[72,6]; local mask:16 = ((1 << len) - 1) << offs; local val:16 = (zext(XmmReg2[0,64]) & ((1 << len) - 1)); XmmReg1 = (XmmReg1 & ~zext(mask)) | (zext(val) << offs); } :MOVNTSD m64, XmmReg1 is vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x2B; XmmReg1 ... & m64 { m64 = XmmReg1[0,64]; } :MOVNTSS m32, XmmReg1 is vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x2B; XmmReg1 ... & m32 { m32 = XmmReg1[0,32]; } } # end with : lockprefx=0 ================================================ FILE: pypcode/processors/x86/data/languages/lockable.sinc ================================================ # The LOCK prefix is only valid for certain instructions, otherwise, from the # Intel instruction manual: # An undefined opcode exception will also be generated if the LOCK prefix # is used with any instruction not in the above list. # The instructions in this file have their non-lockable counterparts in ia.sinc :ADC^lockx m8,imm8 is vexMode=0 & lockx & unlock & $(BYTE_80_82); m8 & reg_opcode=2 ... ; imm8 { build lockx; build m8; addCarryFlags(m8, imm8:1); resultflags(m8); build unlock; } :ADC^lockx m16,imm16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x81; m16 & reg_opcode=2 ...; imm16 { build lockx; build m16; addCarryFlags(m16, imm16:2); resultflags(m16); build unlock; } :ADC^lockx m32,imm32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x81; m32 & reg_opcode=2 ...; imm32 { build lockx; build m32; addCarryFlags(m32, imm32:4); resultflags(m32); build unlock; } @ifdef IA64 :ADC^lockx m64,simm32 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x81; m64 & reg_opcode=2 ...; simm32 { build lockx; build m64; addCarryFlags(m64,simm32); resultflags(m64); build unlock; } @endif :ADC^lockx m16,simm8_16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x83; m16 & reg_opcode=2 ...; simm8_16 { build lockx; build m16; addCarryFlags(m16,simm8_16); resultflags(m16); build unlock; } :ADC^lockx m32,simm8_32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x83; m32 & reg_opcode=2 ...; simm8_32 { build lockx; build m32; addCarryFlags(m32,simm8_32); resultflags(m32); build unlock; } @ifdef IA64 :ADC^lockx m64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x83; m64 & reg_opcode=2 ...; simm8_64 { build lockx; build m64; addCarryFlags(m64,simm8_64); resultflags(m64); build unlock; } @endif :ADC^lockx m8,Reg8 is vexMode=0 & lockx & unlock & byte=0x10; m8 & Reg8 ... { build lockx; build m8; addCarryFlags(m8, Reg8); resultflags(m8); build unlock; } :ADC^lockx m16,Reg16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x11; m16 & Reg16 ... { build lockx; build m16; addCarryFlags(m16, Reg16); resultflags(m16); build unlock; } :ADC^lockx m32,Reg32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x11; m32 & Reg32 ... { build lockx; build m32; addCarryFlags(m32, Reg32); resultflags(m32); build unlock; } @ifdef IA64 :ADC^lockx m64,Reg64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x11; m64 & Reg64 ... { build lockx; build m64; addCarryFlags(m64, Reg64); resultflags(m64); build unlock; } @endif :ADD^lockx m8,imm8 is vexMode=0 & lockx & unlock & $(BYTE_80_82); m8 & reg_opcode=0 ...; imm8 { build lockx; build m8; addflags(m8,imm8); m8 = m8 + imm8; resultflags(m8); build unlock; } :ADD^lockx m16,imm16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x81; m16 & reg_opcode=0 ...; imm16 { build lockx; build m16; addflags(m16,imm16); m16 = m16 + imm16; resultflags(m16); build unlock; } :ADD^lockx m32,imm32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x81; m32 & reg_opcode=0 ...; imm32 { build lockx; build m32; addflags(m32,imm32); m32 = m32 + imm32; resultflags(m32); build unlock; } @ifdef IA64 :ADD^lockx m64,simm32 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x81; m64 & reg_opcode=0 ...; simm32 { build lockx; build m64; addflags(m64,simm32); m64 = m64 + simm32; resultflags(m64); build unlock; } @endif :ADD^lockx m16,simm8_16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x83; m16 & reg_opcode=0 ...; simm8_16 { build lockx; build m16; addflags(m16,simm8_16); m16 = m16 + simm8_16; resultflags(m16); build unlock; } :ADD^lockx m32,simm8_32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x83; m32 & reg_opcode=0 ...; simm8_32 { build lockx; build m32; addflags(m32,simm8_32); m32 = m32 + simm8_32; resultflags(m32); build unlock; } @ifdef IA64 :ADD^lockx m64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x83; m64 & reg_opcode=0 ...; simm8_64 { build lockx; build m64; addflags(m64,simm8_64); m64 = m64 + simm8_64; resultflags(m64); build unlock; } @endif :ADD^lockx m8,Reg8 is vexMode=0 & lockx & unlock & byte=0x00; m8 & Reg8 ... { build lockx; build m8; addflags(m8,Reg8); m8 = m8 + Reg8; resultflags(m8); build unlock; } :ADD^lockx m16,Reg16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x1; m16 & Reg16 ... { build lockx; build m16; addflags(m16,Reg16); m16 = m16 + Reg16; resultflags(m16); build unlock; } :ADD^lockx m32,Reg32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x1; m32 & Reg32 ... { build lockx; build m32; addflags(m32,Reg32); m32 = m32 + Reg32; resultflags(m32); build unlock; } @ifdef IA64 :ADD^lockx m64,Reg64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x1; m64 & Reg64 ... { build lockx; build m64; addflags(m64,Reg64); m64 = m64 + Reg64; resultflags(m64); build unlock; } @endif :AND^lockx m8,imm8 is vexMode=0 & lockx & unlock & $(BYTE_80_82); m8 & reg_opcode=4 ...; imm8 { build lockx; build m8; logicalflags(); m8 = m8 & imm8; resultflags(m8); build unlock; } :AND^lockx m16,imm16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x81; m16 & reg_opcode=4 ...; imm16 { build lockx; build m16; logicalflags(); m16 = m16 & imm16; resultflags(m16); build unlock; } :AND^lockx m32,imm32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x81; m32 & reg_opcode=4 ...; imm32 { build lockx; build m32; logicalflags(); m32 = m32 & imm32; resultflags(m32); build unlock; } @ifdef IA64 :AND^lockx m64,simm32 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x81; m64 & reg_opcode=4 ...; simm32 { build lockx; build m64; logicalflags(); m64 = m64 & simm32; resultflags(m64); build unlock; } @endif :AND^lockx m16,usimm8_16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x83; m16 & reg_opcode=4 ...; usimm8_16 { build lockx; build m16; logicalflags(); m16 = m16 & usimm8_16; resultflags(m16); build unlock; } :AND^lockx m32,usimm8_32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x83; m32 & reg_opcode=4 ...; usimm8_32 { build lockx; build m32; logicalflags(); m32 = m32 & usimm8_32; resultflags(m32); build unlock; } @ifdef IA64 :AND^lockx m64,usimm8_64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x83; m64 & reg_opcode=4 ...; usimm8_64 { build lockx; build m64; logicalflags(); m64 = m64 & usimm8_64; resultflags(m64); build unlock; } @endif :AND^lockx m8,Reg8 is vexMode=0 & lockx & unlock & byte=0x20; m8 & Reg8 ... { build lockx; build m8; logicalflags(); m8 = m8 & Reg8; resultflags(m8); build unlock; } :AND^lockx m16,Reg16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x21; m16 & Reg16 ... { build lockx; build m16; logicalflags(); m16 = m16 & Reg16; resultflags(m16); build unlock; } :AND^lockx m32,Reg32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x21; m32 & Reg32 ... { build lockx; build m32; logicalflags(); m32 = m32 & Reg32; resultflags(m32); build unlock; } @ifdef IA64 :AND^lockx m64,Reg64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x21; m64 & Reg64 ... { build lockx; build m64; logicalflags(); m64 = m64 & Reg64; resultflags(m64); build unlock; } @endif :BTC^lockx Mem,Reg16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0xf; byte=0xbb; Mem & Reg16 ... { build lockx; build Mem; local ptr = Mem + (sext(Reg16) s>> 3); local bit=Reg16&7; local val = (*:1 ptr >> bit) & 1; *:1 ptr= *:1 ptr ^(1<> 3); @else local ptr = Mem + (Reg32 s>> 3); @endif local bit=Reg32&7; local val = (*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr ^ (1<> 3); local bit=Reg64&7; local val = (*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr ^ (1<>bit)&1; m16=m16^(1<>bit)&1; CF=(val!=0); m32=m32^(1<>bit)&1; m64=m64^(1<> 3); local bit=Reg16&7; local val=(*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr & ~(1<> 3); @else local ptr = Mem + (Reg32 s>> 3); @endif local bit = Reg32 & 7; local val = (*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr & ~(1<> 3); local bit = Reg64 & 7; local val = (*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr & ~(1<>bit)&1; m16=m16 & ~(1<>bit)&1; CF=(val!=0); m32=m32 & ~(1<>bit)&1; m64=m64 & ~(1<> 3); local bit = Reg16&7; local val = (*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr | (1<>3); @else local ptr = Mem + (Reg32 s>>3); @endif local bit = Reg32 & 7; local val = (*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr | (1<>3); local bit = Reg64 & 7; local val = (*:1 ptr >> bit) & 1; *:1 ptr = *:1 ptr | (1<>bit)&1; m16=m16 | (1<>bit)&1; CF=(val!=0); m32=m32 | (1<>bit)&1; m64=m64 | (1<; AL = dest; goto ; m8 = Reg8; build unlock; } :CMPXCHG^lockx m16,Reg16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0xf; byte=0xb1; m16 & Reg16 ... { build lockx; local dest = m16; subflags(AX,dest); local diff = AX-dest; resultflags(diff); if (ZF) goto ; AX = dest; goto ; m16 = Reg16; build unlock; } :CMPXCHG^lockx m32,Reg32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0xf; byte=0xb1; m32 & Reg32 ... & check_EAX_dest ... { build lockx; #this instruction writes to either EAX or Rmr32 #in 64-bit mode, a 32-bit register that is written to #(and only the register that is written to) #must be zero-extended to 64 bits local dest = m32; subflags(EAX,dest); local diff = EAX-dest; resultflags(diff); if (ZF) goto ; EAX = dest; build check_EAX_dest; goto ; m32 = Reg32; build unlock; } @ifdef IA64 :CMPXCHG^lockx m64,Reg64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0xf; byte=0xb1; m64 & Reg64 ... { build lockx; local dest = m64; subflags(RAX,dest); local diff = RAX-dest; resultflags(diff); if (ZF) goto ; RAX = dest; goto ; m64 = Reg64; build unlock; } @endif :CMPXCHG8B^lockx m64 is vexMode=0 & lockx & unlock & byte=0xf; byte=0xc7; ( mod != 0b11 & reg_opcode=1 ) ... & m64 { build lockx; local dest = m64; ZF = ((zext(EDX) << 32) | zext(EAX)) == dest; if (ZF == 1) goto ; EDX = dest(4); EAX = dest:4; goto ; m64 = (zext(ECX) << 32) | zext(EBX); build unlock; } @ifdef IA64 :CMPXCHG16B^lockx m128 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0xf; byte=0xc7; ( mod != 0b11 & reg_opcode=1 ) ... & ( m128 ) { build lockx; local dest = m128; ZF = ((zext(RDX) << 64) | zext(RAX)) == dest; if (ZF == 1) goto ; RDX = dest(8); RAX = dest:8; goto ; m128 = ((zext(RCX) << 64) | zext(RBX)); build unlock; } @endif :DEC^lockx m8 is vexMode=0 & lockx & unlock & byte=0xfe; m8 & reg_opcode=1 ... { build lockx; build m8; OF = sborrow(m8,1); m8 = m8 - 1; resultflags(m8); build unlock; } :DEC^lockx m16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0xff; m16 & reg_opcode=1 ... { build lockx; build m16; OF = sborrow(m16,1); m16 = m16 - 1; resultflags(m16); build unlock; } :DEC^lockx m32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0xff; m32 & reg_opcode=1 ... { build lockx; build m32; OF = sborrow(m32,1); m32 = m32 - 1; resultflags(m32); build unlock; } @ifdef IA64 :DEC^lockx m64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0xff; m64 & reg_opcode=1 ... { build lockx; build m64; OF = sborrow(m64,1); m64 = m64 - 1; resultflags(m64); build unlock; } @endif :INC^lockx m8 is vexMode=0 & lockx & unlock & byte=0xfe; m8 ... { build lockx; build m8; OF = scarry(m8,1); m8 = m8 + 1; resultflags( m8); build unlock; } :INC^lockx m16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0xff; m16 ... { build lockx; build m16; OF = scarry(m16,1); m16 = m16 + 1; resultflags(m16); build unlock; } :INC^lockx m32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0xff; m32 ... { build lockx; build m32; OF = scarry(m32,1); m32 = m32 + 1; resultflags(m32); build unlock; } @ifdef IA64 :INC^lockx m64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0xff; m64 ... { build lockx; build m64; OF = scarry(m64,1); m64 = m64 + 1; resultflags(m64); build unlock; } @endif :NEG^lockx m8 is vexMode=0 & lockx & unlock & byte=0xf6; m8 & reg_opcode=3 ... { build lockx; build m8; negflags(m8); m8 = -m8; resultflags(m8); build unlock; } :NEG^lockx m16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0xf7; m16 & reg_opcode=3 ... { build lockx; build m16; negflags(m16); m16 = -m16; resultflags(m16); build unlock; } :NEG^lockx m32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0xf7; m32 & reg_opcode=3 ... { build lockx; build m32; negflags(m32); m32 = -m32; resultflags(m32); build unlock; } @ifdef IA64 :NEG^lockx m64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0xf7; m64 & reg_opcode=3 ... { build lockx; build m64; negflags(m64); m64 = -m64; resultflags(m64); build unlock; } @endif :NOT^lockx m8 is vexMode=0 & lockx & unlock & byte=0xf6; m8 & reg_opcode=2 ... { build lockx; build m8; m8 = ~m8; build unlock; } :NOT^lockx m16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0xf7; m16 & reg_opcode=2 ... { build lockx; build m16; m16 = ~m16; build unlock; } :NOT^lockx m32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0xf7; m32 & reg_opcode=2 ... { build lockx; build m32; m32 = ~m32; build unlock; } @ifdef IA64 :NOT^lockx m64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0xf7; m64 & reg_opcode=2 ... { build lockx; build m64; m64 = ~m64; build unlock; } @endif :OR^lockx m8,imm8 is vexMode=0 & lockx & unlock & $(BYTE_80_82); m8 & reg_opcode=1 ...; imm8 { build lockx; build m8; logicalflags(); m8 = m8 | imm8; resultflags(m8); build unlock; } :OR^lockx m16,imm16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x81; m16 & reg_opcode=1 ...; imm16 { build lockx; build m16; logicalflags(); m16 = m16 | imm16; resultflags(m16); build unlock; } :OR^lockx m32,imm32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x81; m32 & reg_opcode=1 ...; imm32 { build lockx; build m32; logicalflags(); m32 = m32 | imm32; resultflags(m32); build unlock; } @ifdef IA64 :OR^lockx m64,simm32 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x81; m64 & reg_opcode=1 ...; simm32 { build lockx; build m64; logicalflags(); tmp:8 = m64; m64 = tmp | simm32; resultflags(m64); build unlock; } @endif :OR^lockx m16,usimm8_16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x83; m16 & reg_opcode=1 ...; usimm8_16 { build lockx; build m16; logicalflags(); m16 = m16 | usimm8_16; resultflags(m16); build unlock; } :OR^lockx m32,usimm8_32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x83; m32 & reg_opcode=1 ...; usimm8_32 { build lockx; build m32; logicalflags(); m32 = m32 | usimm8_32; resultflags(m32); build unlock; } @ifdef IA64 :OR^lockx m64,usimm8_64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x83; m64 & reg_opcode=1 ...; usimm8_64 { build lockx; build m64; logicalflags(); m64 = m64 | usimm8_64; resultflags(m64); build unlock; } @endif :OR^lockx m8,Reg8 is vexMode=0 & lockx & unlock & byte=0x8; m8 & Reg8 ... { build lockx; build m8; logicalflags(); m8 = m8 | Reg8; resultflags(m8); build unlock; } :OR^lockx m16,Reg16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x9; m16 & Reg16 ... { build lockx; build m16; logicalflags(); m16 = m16 | Reg16; resultflags(m16); build unlock; } :OR^lockx m32,Reg32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x9; m32 & Reg32 ... { build lockx; build m32; logicalflags(); m32 = m32 | Reg32; resultflags(m32); build unlock; } @ifdef IA64 :OR^lockx m64,Reg64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x9; m64 & Reg64 ... { build lockx; build m64; logicalflags(); m64 = m64 | Reg64; resultflags(m64); build unlock; } @endif :SBB^lockx m8,imm8 is vexMode=0 & lockx & unlock & $(BYTE_80_82); m8 & reg_opcode=3 ...; imm8 { build lockx; build m8; subCarryFlags(m8, imm8); resultflags(m8); build unlock; } :SBB^lockx m16,imm16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x81; m16 & reg_opcode=3 ...; imm16 { build lockx; build m16; subCarryFlags(m16, imm16); resultflags(m16); build unlock; } :SBB^lockx m32,imm32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x81; m32 & reg_opcode=3 ...; imm32 { build lockx; build m32; subCarryFlags(m32, imm32); resultflags(m32); build unlock; } @ifdef IA64 :SBB^lockx m64,simm32 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x81; m64 & reg_opcode=3 ...; simm32 { build lockx; build m64; subCarryFlags(m64, simm32); resultflags(m64); build unlock; } @endif :SBB^lockx m16,simm8_16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x83; m16 & reg_opcode=3 ...; simm8_16 { build lockx; build m16; subCarryFlags(m16, simm8_16); resultflags(m16); build unlock; } :SBB^lockx m32,simm8_32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x83; m32 & reg_opcode=3 ...; simm8_32 { build lockx; build m32; subCarryFlags(m32, simm8_32); resultflags(m32); build unlock; } @ifdef IA64 :SBB^lockx m64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x83; m64 & reg_opcode=3 ...; simm8_64 { build lockx; build m64; subCarryFlags(m64, simm8_64); resultflags(m64); build unlock; } @endif :SBB^lockx m8,Reg8 is vexMode=0 & lockx & unlock & byte=0x18; m8 & Reg8 ... { build lockx; build m8; subCarryFlags(m8, Reg8); resultflags(m8); build unlock; } :SBB^lockx m16,Reg16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x19; m16 & Reg16 ... { build lockx; build m16; subCarryFlags(m16, Reg16); resultflags(m16); build unlock; } :SBB^lockx m32,Reg32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x19; m32 & Reg32 ... { build lockx; build m32; subCarryFlags(m32, Reg32); resultflags(m32); build unlock; } @ifdef IA64 :SBB^lockx m64,Reg64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x19; m64 & Reg64 ... { build lockx; build m64; subCarryFlags(m64, Reg64); resultflags(m64); build unlock; } @endif :SUB^lockx m8,imm8 is vexMode=0 & lockx & unlock & $(BYTE_80_82); m8 & reg_opcode=5 ...; imm8 { build lockx; build m8; subflags(m8,imm8); m8 = m8 - imm8; resultflags(m8); build unlock; } :SUB^lockx m16,imm16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x81; m16 & reg_opcode=5 ...; imm16 { build lockx; build m16; subflags(m16,imm16); m16 = m16 - imm16; resultflags(m16); build unlock; } :SUB^lockx m32,imm32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x81; m32 & reg_opcode=5 ...; imm32 { build lockx; build m32; subflags(m32,imm32); m32 = m32 - imm32; resultflags(m32); build unlock; } @ifdef IA64 :SUB^lockx m64,simm32 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x81; m64 & reg_opcode=5 ...; simm32 { build lockx; build m64; subflags(m64,simm32); m64 = m64 - simm32; resultflags(m64); build unlock; } @endif :SUB^lockx m16,simm8_16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x83; m16 & reg_opcode=5 ...; simm8_16 { build lockx; build m16; subflags(m16,simm8_16); m16 = m16 - simm8_16; resultflags(m16); build unlock; } :SUB^lockx m32,simm8_32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x83; m32 & reg_opcode=5 ...; simm8_32 { build lockx; build m32; subflags(m32,simm8_32); m32 = m32 - simm8_32; resultflags(m32); build unlock; } @ifdef IA64 :SUB^lockx m64,simm8_64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x83; m64 & reg_opcode=5 ...; simm8_64 { build lockx; build m64; subflags(m64,simm8_64); m64 = m64 - simm8_64; resultflags(m64); build unlock; } @endif :SUB^lockx m8,Reg8 is vexMode=0 & lockx & unlock & byte=0x28; m8 & Reg8 ... { build lockx; build m8; subflags(m8,Reg8); m8 = m8 - Reg8; resultflags(m8); build unlock; } :SUB^lockx m16,Reg16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x29; m16 & Reg16 ... { build lockx; build m16; subflags(m16,Reg16); m16 = m16 - Reg16; resultflags(m16); build unlock; } :SUB^lockx m32,Reg32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x29; m32 & Reg32 ... { build lockx; build m32; subflags(m32,Reg32); m32 = m32 - Reg32; resultflags(m32); build unlock; } @ifdef IA64 :SUB^lockx m64,Reg64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x29; m64 & Reg64 ... { build lockx; build m64; subflags(m64,Reg64); m64 = m64 - Reg64; resultflags(m64); build unlock; } @endif :XADD^lockx m8,Reg8 is vexMode=0 & lockx & unlock & byte=0x0F; byte=0xC0; m8 & Reg8 ... { build lockx; build m8; addflags( m8,Reg8); local tmp = m8 + Reg8; Reg8 = m8; m8 = tmp; resultflags(tmp); build unlock; } :XADD^lockx m16,Reg16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x0F; byte=0xC1; m16 & Reg16 ... { build lockx; build m16; addflags(m16,Reg16); local tmp = m16 + Reg16; Reg16 = m16; m16 = tmp; resultflags(tmp); build unlock; } :XADD^lockx m32,Reg32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x0F; byte=0xC1; m32 & Reg32 ... { build lockx; build m32; addflags(m32,Reg32); local tmp = m32 + Reg32; Reg32 = m32; m32 = tmp; resultflags(tmp); build unlock; } @ifdef IA64 :XADD^lockx m64,Reg64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x0F; byte=0xC1; m64 & Reg64 ... { build lockx; build m64; addflags(m64,Reg64); local tmp = m64 + Reg64; Reg64 = m64; m64 = tmp; resultflags(tmp); build unlock; } @endif # XCHG with memory operands always asserts a lock signal regardless of prefix presence :XCHG^xacq_xrel_prefx^alwaysLock m8,Reg8 is vexMode=0 & xacq_xrel_prefx & alwaysLock & byte=0x86; m8 & Reg8 ... { build xacq_xrel_prefx; build alwaysLock; build m8; local tmp = m8; m8 = Reg8; Reg8 = tmp; UNLOCK(); } :XCHG^xacq_xrel_prefx^alwaysLock m16,Reg16 is vexMode=0 & xacq_xrel_prefx & alwaysLock & opsize=0 & byte=0x87; m16 & Reg16 ... { build xacq_xrel_prefx; build alwaysLock; build m16; local tmp = m16; m16 = Reg16; Reg16 = tmp; UNLOCK(); } :XCHG^xacq_xrel_prefx^alwaysLock m32,Reg32 is vexMode=0 & xacq_xrel_prefx & alwaysLock & opsize=1 & byte=0x87; m32 & Reg32 ... { build xacq_xrel_prefx; build alwaysLock; build m32; local tmp = m32; m32 = Reg32; Reg32 = tmp; UNLOCK(); } @ifdef IA64 :XCHG^xacq_xrel_prefx^alwaysLock m64,Reg64 is $(LONGMODE_ON) & vexMode=0 & xacq_xrel_prefx & alwaysLock & opsize=2 & byte=0x87; m64 & Reg64 ... { build xacq_xrel_prefx; build alwaysLock; build m64; local tmp = m64; m64 = Reg64; Reg64 = tmp; UNLOCK(); } @endif :XOR^lockx m8,imm8 is vexMode=0 & lockx & unlock & $(BYTE_80_82); m8 & reg_opcode=6 ...; imm8 { build lockx; build m8; logicalflags(); m8 = m8 ^ imm8; resultflags(m8); build unlock; } :XOR^lockx m16,imm16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x81; m16 & reg_opcode=6 ...; imm16 { build lockx; build m16; logicalflags(); m16 = m16 ^ imm16; resultflags(m16); build unlock; } :XOR^lockx m32,imm32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x81; m32 & reg_opcode=6 ...; imm32 { build lockx; build m32; logicalflags(); m32 = m32 ^ imm32; resultflags(m32); build unlock; } @ifdef IA64 :XOR^lockx m64,simm32 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x81; m64 & reg_opcode=6 ...; simm32 { build lockx; build m64; logicalflags(); m64 = m64 ^ simm32; resultflags(m64); build unlock; } @endif :XOR^lockx m16,usimm8_16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x83; m16 & reg_opcode=6 ...; usimm8_16 { build lockx; build m16; logicalflags(); m16 = m16 ^ usimm8_16; resultflags(m16); build unlock; } :XOR^lockx m32,usimm8_32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x83; m32 & reg_opcode=6 ...; usimm8_32 { build lockx; build m32; logicalflags(); m32 = m32 ^ usimm8_32; resultflags(m32); build unlock; } @ifdef IA64 :XOR^lockx m64,usimm8_64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x83; m64 & reg_opcode=6 ...; usimm8_64 { build lockx; build m64; logicalflags(); m64 = m64 ^ usimm8_64; resultflags(m64); build unlock; } @endif :XOR^lockx m8,Reg8 is vexMode=0 & lockx & unlock & byte=0x30; m8 & Reg8 ... { build lockx; build m8; logicalflags(); m8 = m8 ^ Reg8; resultflags(m8); build unlock; } :XOR^lockx m16,Reg16 is vexMode=0 & lockx & unlock & opsize=0 & byte=0x31; m16 & Reg16 ... { build lockx; build m16; logicalflags(); m16 = m16 ^ Reg16; resultflags(m16); build unlock; } :XOR^lockx m32,Reg32 is vexMode=0 & lockx & unlock & opsize=1 & byte=0x31; m32 & Reg32 ... { build lockx; build m32; logicalflags(); m32 = m32 ^ Reg32; resultflags(m32); build unlock; } @ifdef IA64 :XOR^lockx m64,Reg64 is $(LONGMODE_ON) & vexMode=0 & lockx & unlock & opsize=2 & byte=0x31; m64 & Reg64 ... { build lockx; build m64; logicalflags(); m64 = m64 ^ Reg64; resultflags(m64); build unlock; } @endif ================================================ FILE: pypcode/processors/x86/data/languages/lzcnt.sinc ================================================ macro lzcntflags(input, output) { ZF = (output == 0); CF = (input == 0); # OF, SF, PF, AF are undefined } #### #### LZCNT instructions #### :LZCNT Reg16, rm16 is vexMode=0 & opsize=0 & $(PRE_66) & $(PRE_F3) & byte=0x0F; byte=0xBD; Reg16 ... & rm16 { Reg16 = lzcount(rm16); lzcntflags(rm16, Reg16); } :LZCNT Reg32, rm32 is vexMode=0 & opsize=1 & $(PRE_F3) & byte=0x0F; byte=0xBD; Reg32 ... & check_Reg32_dest ... & rm32 { Reg32 = lzcount(rm32); lzcntflags(rm32, Reg32); build check_Reg32_dest; } @ifdef IA64 :LZCNT Reg64, rm64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(PRE_F3) & $(REX_W) & byte=0x0F; byte=0xBD; Reg64 ... & rm64 { Reg64 = lzcount(rm64); lzcntflags(rm64, Reg64); } @endif ================================================ FILE: pypcode/processors/x86/data/languages/macros.sinc ================================================ macro conditionalAssign(dest, cond, trueVal, falseVal) { dest = zext(cond) * trueVal | zext(!cond) * falseVal; } ================================================ FILE: pypcode/processors/x86/data/languages/mpx.sinc ================================================ define pcodeop br_exception; # BNDMK needs the base address register only # - if no base register, needs 0 @ifdef IA64 bndmk_addr64: [Rmr64] is mod=0 & Rmr64 { export Rmr64; } bndmk_addr64: [Rmr64 + simm8_64] is mod=1 & Rmr64; simm8_64 { export Rmr64; } bndmk_addr64: [simm32_64 + Rmr64] is mod=2 & Rmr64; simm32_64 { export Rmr64; } bndmk_addr64: [Rmr64] is mod=1 & r_m!=4 & Rmr64; simm8=0 { export Rmr64; } bndmk_addr64: [Rmr64] is mod=2 & r_m!=4 & Rmr64; simm32=0 { export Rmr64; } #invalid bndmk_addr64: [riprel] is mod=0 & r_m=5; simm32 [ riprel=inst_next+simm32; ] { export *[const]:8 riprel; } bndmk_addr64: [Base64 + Index64*ss] is mod=0 & r_m=4; Index64 & Base64 & ss { export Base64; } bndmk_addr64: [Base64] is mod=0 & r_m=4; rexXprefix=0 & index64=4 & Base64 { export Base64; } bndmk_addr64: [simm32_64 + Index64*ss] is mod=0 & r_m=4; Index64 & base64=5 & ss; simm32_64 { tmp:8 = 0; export tmp; } bndmk_addr64: [Index64*ss] is mod=0 & r_m=4; Index64 & base64=5 & ss; imm32=0 { tmp:8 = 0; export tmp; } bndmk_addr64: [simm32_64] is mod=0 & r_m=4; rexXprefix=0 & index64=4 & base64=5; simm32_64 { tmp:8 = 0; export tmp; } bndmk_addr64: [Base64 + Index64*ss + simm8_64] is mod=1 & r_m=4; Index64 & Base64 & ss; simm8_64 { export Base64; } bndmk_addr64: [Base64 + Index64*ss] is mod=1 & r_m=4; Index64 & Base64 & ss; simm8=0 { export Base64; } bndmk_addr64: [Base64 + simm8_64] is mod=1 & r_m=4; rexXprefix=0 & index64=4 & Base64; simm8_64 { export Base64; } bndmk_addr64: [simm32_64 + Base64 + Index64*ss] is mod=2 & r_m=4; Index64 & Base64 & ss; simm32_64 { export Base64; } bndmk_addr64: [simm32_64 + Base64] is mod=2 & r_m=4; rexXprefix=0 & index64=4 & Base64; simm32_64 { export Base64; } bndmk_addr64: [Base64 + Index64*ss] is mod=2 & r_m=4; Index64 & Base64 & ss; imm32=0 { export Base64; } bndmk_addr64: [Base64] is mod=2 & r_m=4; rexXprefix=0 & index64=4 & Base64; imm32=0 { export Base64; } @endif bndmk_addr32: [Rmr32] is mod=0 & Rmr32 { export Rmr32; } bndmk_addr32: [Rmr32 + simm8_32] is mod=1 & Rmr32; simm8_32 { export Rmr32; } bndmk_addr32: [Rmr32] is mod=1 & r_m!=4 & Rmr32; simm8=0 { export Rmr32; } bndmk_addr32: [imm32 + Rmr32] is mod=2 & Rmr32; imm32 { export Rmr32; } bndmk_addr32: [Rmr32] is mod=2 & r_m!=4 & Rmr32; imm32=0 { export Rmr32; } bndmk_addr32: [imm32] is mod=0 & r_m=5; imm32 { tmp:4 = 0; export tmp; } bndmk_addr32: [Base + Index*ss] is mod=0 & r_m=4; Index & Base & ss { export Base; } bndmk_addr32: [Base] is mod=0 & r_m=4; index=4 & Base { export Base; } bndmk_addr32: [imm32 + Index*ss] is mod=0 & r_m=4; Index & base=5 & ss; imm32 { tmp:4 = 0; export tmp; } bndmk_addr32: [imm32] is mod=0 & r_m=4; index=4 & base=5; imm32 { tmp:4 = 0; export tmp; } bndmk_addr32: [Base + Index*ss + simm8_32] is mod=1 & r_m=4; Index & Base & ss; simm8_32 { export Base; } bndmk_addr32: [Base + simm8_32] is mod=1 & r_m=4; index=4 & Base; simm8_32 { export Base; } bndmk_addr32: [Base + Index*ss] is mod=1 & r_m=4; Index & Base & ss; simm8=0 { export Base; } bndmk_addr32: [Base] is mod=1 & r_m=4; index=4 & Base; simm8=0 { export Base; } bndmk_addr32: [imm32 + Base + Index*ss] is mod=2 & r_m=4; Index & Base & ss; imm32 { export Base; } bndmk_addr32: [imm32 + Base] is mod=2 & r_m=4; index=4 & Base; imm32 { export Base; } bndmk_addr32: [Base + Index*ss] is mod=2 & r_m=4; Index & Base & ss; imm32=0 { export Base; } bndmk_addr32: [Base] is mod=2 & r_m=4; index=4 & Base; imm32=0 { export Base; } @ifdef IA64 :BNDCL bnd1, Rmr64 is $(LONGMODE_ON) & vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x1A; mod=3 & bnd1 & bnd1_lb & Rmr64 { # if (reg < BND.LB) then BNDSTATUS = 01H; AND BOUND EXCEPTION if !(Rmr64 < bnd1_lb) goto ; BNDSTATUS = 0x01; br_exception(); } :BNDCL bnd1, Mem is $(LONGMODE_ON) & vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x1A; (bnd1 & bnd1_lb) ... & Mem { # if (LEA(mem) < BND.LB) then BNDSTATUS = 01H; AND BOUND EXCEPTION if !(Mem < bnd1_lb) goto ; BNDSTATUS = 0x01; br_exception(); } :BNDCU bnd1, Rmr64 is $(LONGMODE_ON) & vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x1A; mod=3 & bnd1 & bnd1_ub & Rmr64 { # if (reg > ~(BND.UB)) then BNDSTATUS = 01H; AND BOUND EXCEPTION if !(Rmr64 > ~bnd1_ub) goto ; BNDSTATUS = 0x01; br_exception(); } :BNDCU bnd1, Mem is $(LONGMODE_ON) & vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x1A; (bnd1 & bnd1_ub) ... & Mem { # if (LEA(mem) > ~(BND.UB)) then BNDSTATUS = 01H; AND BOUND EXCEPTION if !(Mem > ~bnd1_ub) goto ; BNDSTATUS = 0x01; br_exception(); } :BNDCN bnd1, Rmr64 is $(LONGMODE_ON) & vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x1B; mod=3 & bnd1 & bnd1_ub & Rmr64 { # if (reg > BND.UB) then BNDSTATUS = 01H; AND BOUND EXCEPTION if !(Rmr64 > bnd1_ub) goto ; BNDSTATUS = 0x01; br_exception(); } :BNDCN bnd1, Mem is $(LONGMODE_ON) & vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x1B; (bnd1 & bnd1_ub) ... & Mem { # if (LEA(mem) > BND.UB) then BNDSTATUS = 01H; AND BOUND EXCEPTION if !(Mem > bnd1_ub) goto ; BNDSTATUS = 0x01; br_exception(); } #TODO: This probably cannot be fully modeled :BNDLDX bnd1, Mem is $(LONGMODE_ON) & vexMode=0 & byte=0x0F; byte=0x1A; bnd1 ... & Mem { # BNDSTATUS = bndldx_status( Mem, BNDCFGS, BNDCFGU ); # bnd1 = bndldx( Mem, BNDCFGS, BNDCFGU ); # core implementation bnd1 = *:16 Mem; } :BNDMK bnd1, Mem is $(LONGMODE_ON) & vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x1B; ( bnd1 & bnd1_lb & bnd1_ub ) ... & ( bndmk_addr64 & Mem ) { # BND.LB and BND.UB set from m64 bnd1_lb = bndmk_addr64; bnd1_ub = Mem; } :BNDMOV bnd1, m128 is $(LONGMODE_ON) & vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x1A; bnd1 ... & m128 { bnd1 = m128; } :BNDMOV bnd1, bnd2 is $(LONGMODE_ON) & vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x1A; mod=3 & bnd1 & bnd2 { bnd1 = bnd2; } :BNDMOV m128, bnd1 is $(LONGMODE_ON) & vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x1B; bnd1 ... & m128 { m128 = bnd1; } :BNDMOV bnd2, bnd1 is $(LONGMODE_ON) & vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x1B; mod=3 & bnd1 & bnd2 { bnd2 = bnd1; } #TODO: This probably cannot be fully modeled :BNDSTX Mem, bnd1 is $(LONGMODE_ON) & vexMode=0 & byte=0x0F; byte=0x1B; bnd1 ... & Mem { # BNDSTATUS = bndstx_status( bnd1, BNDCFGS, BNDCFGU ); # Mem = bndstx( bnd1, BNDCFGS, BNDCFGU ); # core implementation *:16 Mem = bnd1; } @endif :BNDCL bnd1, Rmr32 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x1A; mod=3 & bnd1 & bnd1_lb & Rmr32 { # if (reg < BND.LB) then BNDSTATUS = 01H; AND BOUND EXCEPTION if !(zext(Rmr32) < bnd1_lb) goto ; BNDSTATUS = 0x01; br_exception(); } :BNDCL bnd1, Mem is $(LONGMODE_OFF) & vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x1A; (bnd1 & bnd1_lb) ... & Mem { # if (LEA(mem) < BND.LB) then BNDSTATUS = 01H; AND BOUND EXCEPTION if !(zext(Mem) < bnd1_lb) goto ; BNDSTATUS = 0x01; br_exception(); } :BNDCU bnd1, Rmr32 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x1A; mod=3 & bnd1 & bnd1_ub & Rmr32 { # if (reg > ~(BND.UB)) then BNDSTATUS = 01H; AND BOUND EXCEPTION if !(zext(Rmr32) > ~bnd1_ub) goto ; BNDSTATUS = 0x01; br_exception(); } :BNDCU bnd1, Mem is $(LONGMODE_OFF) & vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x1A; (bnd1 & bnd1_ub) ... & Mem { # if (LEA(mem) > ~(BND.UB)) then BNDSTATUS = 01H; AND BOUND EXCEPTION if !(zext(Mem) > ~bnd1_ub) goto ; BNDSTATUS = 0x01; br_exception(); } :BNDCN bnd1, Rmr32 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x1B; mod=3 & bnd1 & bnd1_ub & Rmr32 { # if (reg > BND.UB) then BNDSTATUS = 01H; AND BOUND EXCEPTION if !(zext(Rmr32) > bnd1_ub) goto ; BNDSTATUS = 0x01; br_exception(); } :BNDCN bnd1, Mem is $(LONGMODE_OFF) & vexMode=0 & $(PRE_F2) & byte=0x0F; byte=0x1B; (bnd1 & bnd1_ub) ... & Mem { # if (LEA(mem) > BND.UB) then BNDSTATUS = 01H; AND BOUND EXCEPTION if !(zext(Mem) > bnd1_ub) goto ; BNDSTATUS = 0x01; br_exception(); } #TODO: This probably cannot be fully modeled :BNDLDX bnd1, Mem is $(LONGMODE_OFF) & vexMode=0 & byte=0x0F; byte=0x1A; ( bnd1 & bnd1_lb & bnd1_ub ) ... & Mem { # BNDSTATUS = bndldx_status( Mem, BNDCFGS, BNDCFGU ); # bnd1 = bndldx( Mem, BNDCFGS, BNDCFGU ); # core implementation tmp:8 = *:8 Mem; bnd1_lb = zext(tmp:4); tmp2:4 = tmp(4); bnd1_ub = zext(tmp2); } :BNDMK bnd1, Mem is $(LONGMODE_OFF) & vexMode=0 & $(PRE_F3) & byte=0x0F; byte=0x1B; ( bnd1 & bnd1_lb & bnd1_ub ) ... & ( bndmk_addr32 & Mem ) { # BND.LB and BND.UB set from m32 bnd1_lb = zext(bndmk_addr32); bnd1_ub = zext(Mem); } :BNDMOV bnd1, m64 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x1A; ( bnd1 & bnd1_lb & bnd1_ub ) ... & m64 { tmp:8 = m64; bnd1_lb = zext(tmp:4); tmp2:4 = tmp(4); bnd1_ub = zext(tmp2); } :BNDMOV bnd1, bnd2 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x1A; mod=3 & bnd1 & bnd2 { bnd1 = bnd2; } :BNDMOV m64, bnd1 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x1B; ( bnd1 & bnd1_lb & bnd1_ub ) ... & m64 { m64 = (zext(bnd1_ub:4) << 32) | zext(bnd1_lb:4); } :BNDMOV bnd2, bnd1 is $(LONGMODE_OFF) & vexMode=0 & $(PRE_66) & byte=0x0F; byte=0x1B; mod=3 & bnd1 & bnd2 { bnd2 = bnd1; } #TODO: This probably cannot be fully modeled :BNDSTX Mem, bnd1 is $(LONGMODE_OFF) & vexMode=0 & byte=0x0F; byte=0x1B; ( bnd1 & bnd1_lb & bnd1_ub ) ... & Mem { # BNDSTATUS = bndstx_status( bnd1, BNDCFGS, BNDCFGU ); # Mem = bndstx( bnd1, BNDCFGS, BNDCFGU ); # core implementation *:8 Mem = (zext(bnd1_ub:4) << 32) | zext(bnd1_lb:4); } ================================================ FILE: pypcode/processors/x86/data/languages/old/x86RealV1.lang ================================================ x86:LE:16:Real Mode x86 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86RealV1.trans ================================================ x86:LE:16:Real Mode x86:LE:16:Real Mode ================================================ FILE: pypcode/processors/x86/data/languages/old/x86RealV2.lang ================================================ x86:LE:16:Real Mode x86 Real Mode 16 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86RealV2.trans ================================================ x86:LE:16:Real Mode x86:LE:16:Real Mode ================================================ FILE: pypcode/processors/x86/data/languages/old/x86RealV3.lang ================================================ x86:LE:16:Real Mode x86 Real Mode 16 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86RealV3.trans ================================================ x86:LE:16:Real Mode x86:LE:16:Real Mode ================================================ FILE: pypcode/processors/x86/data/languages/old/x86V1.lang ================================================ x86:LE:32:default x86 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86V1.trans ================================================ x86:LE:32:default x86:LE:32:default ================================================ FILE: pypcode/processors/x86/data/languages/old/x86V2.lang ================================================ x86:LE:32:default x86 default 32 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86V2.trans ================================================ x86:LE:32:default x86:LE:32:default ================================================ FILE: pypcode/processors/x86/data/languages/old/x86V3.lang ================================================ x86:LE:32:default x86 default 32 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86V3.trans ================================================ x86:LE:32:default x86:LE:32:default ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_64bit_compat32_v2.lang ================================================ x86:LE:64:compat32 x86 compat32 64 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_64bit_compat32_v2.trans ================================================ x86:LE:64:compat32 x86:LE:64:compat32 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_64bit_compat32_v3.lang ================================================ x86:LE:64:compat32 x86 compat32 64 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_64bit_compat32_v3.trans ================================================ x86:LE:64:compat32 x86:LE:64:compat32 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_64bit_v1.lang ================================================ x64:LE:64:default x64 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_64bit_v1.trans ================================================ x64:LE:64:default x86:LE:64:default ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_64bit_v2.lang ================================================ x86:LE:64:default x86 default 64 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_64bit_v2.trans ================================================ x86:LE:64:default x86:LE:64:default ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_64bit_v3.lang ================================================ x86:LE:64:default x86 default 64 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_64bit_v3.trans ================================================ x86:LE:64:default x86:LE:64:default ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_ProtV2.lang ================================================ x86:LE:16:Protected Mode x86 Protected Mode 16 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_ProtV2.trans ================================================ x86:LE:16:Protected Mode x86:LE:16:Protected Mode ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_ProtV3.lang ================================================ x86:LE:16:Protected Mode x86 Protected Mode 16 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86_ProtV3.trans ================================================ x86:LE:16:Protected Mode x86:LE:16:Protected Mode ================================================ FILE: pypcode/processors/x86/data/languages/old/x86smmV1.lang ================================================ x86:LE:32:System Management Mode x86 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86smmV1.trans ================================================ x86:LE:32:System Management Mode x86:LE:32:System Management Mode ================================================ FILE: pypcode/processors/x86/data/languages/old/x86smmV2.lang ================================================ x86:LE:32:System Management Mode x86 System Management Mode 32 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86smmV2.trans ================================================ x86:LE:32:System Management Mode x86:LE:32:System Management Mode ================================================ FILE: pypcode/processors/x86/data/languages/old/x86smmV3.lang ================================================ x86:LE:32:System Management Mode x86 System Management Mode 32 ================================================ FILE: pypcode/processors/x86/data/languages/old/x86smmV3.trans ================================================ x86:LE:32:System Management Mode x86:LE:32:System Management Mode ================================================ FILE: pypcode/processors/x86/data/languages/pclmulqdq.sinc ================================================ # Due to limitations on variable length matching that preclude opcode matching afterwards, all memory addressing forms of PCLMULQDQ are decoded to PCLMULQDQ, not the macro names. # Display is non-standard, but semantics, and de-compilation should be correct. macro pclmul(src1, src2, dest) { local i:4 = 0:4; local temp:16 = 0; if (i > 63:4) goto ; if ((src1 & (1 << i)) == 0) goto ; temp = temp ^ (src2 << i); i = i+1; goto ; dest = temp; } :PCLMULLQLQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & XmmReg1 & XmmReg2; byte=0x00 { local src1:16 = zext(XmmReg1[0,64]); local src2:16 = zext(XmmReg2[0,64]); pclmul(src1,src2,XmmReg1); } :PCLMULHQLQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & XmmReg1 & XmmReg2; byte=0x01 { local src1:16 = zext(XmmReg1[64,64]); local src2:16 = zext(XmmReg2[0,64]); pclmul(src1,src2,XmmReg1); } :PCLMULLQHQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & XmmReg1 & XmmReg2; byte=0x10 { local src1:16 = zext(XmmReg1[0,64]); local src2:16 = zext(XmmReg2[64,64]); pclmul(src1,src2,XmmReg1); } :PCLMULHQHQDQ XmmReg1, XmmReg2 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & XmmReg1 & XmmReg2; byte=0x11 { local src1:16 = zext(XmmReg1[64,64]); local src2:16 = zext(XmmReg2[64,64]); pclmul(src1,src2,XmmReg1); } :PCLMULQDQ XmmReg1, XmmReg2, imm8 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; xmmmod=3 & XmmReg1 & XmmReg2; imm8 & imm8_4 & imm8_0 { if (imm8_0:1) goto ; src1:16 = zext(XmmReg1[0,64]); goto ; src1 = zext(XmmReg1[64,64]); if (imm8_4:1) goto ; src2:16 = zext(XmmReg2[0,64]); goto ; src2 = zext(XmmReg2[64,64]); pclmul(src1,src2,XmmReg1); } :PCLMULQDQ XmmReg, m128, imm8 is vexMode=0 & $(PRE_66) & byte=0x0f; byte=0x3a; byte=0x44; XmmReg ... & m128; imm8 & imm8_4 & imm8_0 { if (imm8_0:1) goto ; src1:16 = zext(XmmReg[0,64]); goto ; src1 = zext(XmmReg[64,64]); local m:16 = m128; if (imm8_4:1) goto ; src2:16 = zext(m[0,64]); goto ; src2 = zext(m[64,64]); pclmul(src1,src2,XmmReg); } :VPCLMULLQLQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & XmmReg2; byte=0x00 { local src1:16 = zext(vexVVVV_XmmReg[0,64]); local src2:16 = zext(XmmReg2[0,64]); pclmul(src1,src2,XmmReg1); YmmReg1 = zext(XmmReg1); } :VPCLMULHQLQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & XmmReg2; byte=0x01 { local src1:16 = zext(vexVVVV_XmmReg[64,64]); local src2:16 = zext(XmmReg2[0,64]); pclmul(src1,src2,XmmReg1); YmmReg1 = zext(XmmReg1); } :VPCLMULLQHQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & XmmReg2; byte=0x10 { local src1:16 = zext(vexVVVV_XmmReg[0,64]); local src2:16 = zext(XmmReg2[64,64]); pclmul(src1,src2,XmmReg1); YmmReg1 = zext(XmmReg1); } :VPCLMULHQHQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & XmmReg2; byte=0x11 { local src1:16 = zext(vexVVVV_XmmReg[64,64]); local src2:16 = zext(XmmReg2[64,64]); pclmul(src1,src2,XmmReg1); YmmReg1 = zext(XmmReg1); } :VPCLMULQDQ XmmReg1, vexVVVV_XmmReg, XmmReg2, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x44; xmmmod=3 & (XmmReg1 & YmmReg1) & XmmReg2; imm8 & imm8_4 & imm8_0 { if (imm8_0:1) goto ; src1:16 = zext(vexVVVV_XmmReg[0,64]); goto ; src1 = zext(vexVVVV_XmmReg[64,64]); if (imm8_4:1) goto ; src2:16 = zext(XmmReg2[0,64]); goto ; src2 = zext(XmmReg2[64,64]); pclmul(src1,src2,XmmReg1); YmmReg1 = zext(XmmReg1); } :VPCLMULQDQ XmmReg1, vexVVVV_XmmReg, m128, imm8 is $(VEX_NDS) & $(VEX_L128) & $(VEX_PRE_66) & $(VEX_0F3A) & $(VEX_WIG) & vexVVVV_XmmReg; byte=0x44; (XmmReg1 & YmmReg1) ... & m128; imm8 & imm8_4 & imm8_0 { if (imm8_0:1) goto ; src1:16 = zext(vexVVVV_XmmReg[0,64]); goto ; src1 = zext(vexVVVV_XmmReg[64,64]); local m:16 = m128; if (imm8_4:1) goto ; src2:16 = zext(m[0,64]); goto ; src2 = zext(m[64,64]); pclmul(src1,src2,XmmReg1); YmmReg1 = zext(XmmReg1); } ================================================ FILE: pypcode/processors/x86/data/languages/rdrand.sinc ================================================ define pcodeop rdrand; define pcodeop rdrandIsValid; macro rdflags(){ OF = 0; SF = 0; ZF = 0; AF = 0; PF = 0; } :RDRAND Rmr16 is vexMode=0 & opsize=0 & byte=0x0f; byte=0xC7; mod=3 & Rmr16 & reg_opcode=6 { Rmr16 = rdrand(); CF=rdrandIsValid(); rdflags(); } :RDRAND Rmr32 is vexMode=0 & opsize=1 & byte=0x0f; byte=0xC7; mod=3 & Rmr32 & reg_opcode=6 { Rmr32 = rdrand(); CF=rdrandIsValid(); rdflags(); } @ifdef IA64 :RDRAND Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(REX_W) & byte=0x0f; byte=0xC7; mod=3 & Rmr64 & reg_opcode=6 { Rmr64 = rdrand(); CF=rdrandIsValid(); rdflags(); } @endif define pcodeop rdseed; define pcodeop rdseedIsValid; :RDSEED Rmr16 is vexMode=0 & opsize=0 & byte=0x0f; byte=0xC7; mod=3 & Rmr16 & reg_opcode=7 { Rmr16 = rdseed(); CF=rdseedIsValid(); rdflags(); } :RDSEED Rmr32 is vexMode=0 & opsize=1 & byte=0x0f; byte=0xC7; mod=3 & Rmr32 & reg_opcode=7 { Rmr32 = rdseed(); CF=rdseedIsValid(); rdflags(); } @ifdef IA64 :RDSEED Rmr64 is $(LONGMODE_ON) & vexMode=0 & opsize=2 & $(REX_W) & byte=0x0f; byte=0xC7; mod=3 & Rmr64 & reg_opcode=7 { Rmr64 = rdseed(); CF=rdseedIsValid(); rdflags(); } @endif ================================================ FILE: pypcode/processors/x86/data/languages/sgx.sinc ================================================ define pcodeop encls_ecreate; define pcodeop encls_eadd; define pcodeop encls_einit; define pcodeop encls_einit_ZF; define pcodeop encls_eremove; define pcodeop encls_eremove_ZF; define pcodeop encls_edbgrd; define pcodeop encls_edbgrd_RBX; define pcodeop encls_edbgwr; define pcodeop encls_eextend; define pcodeop encls_eldb; define pcodeop encls_eldb_ZF; define pcodeop encls_eldu; define pcodeop encls_eldu_ZF; define pcodeop encls_eblock; define pcodeop encls_eblock_ZF; define pcodeop encls_epa; define pcodeop encls_ewb; define pcodeop encls_ewb_ZF; define pcodeop encls_ewb_CF; define pcodeop encls_etrack; define pcodeop encls_etrack_ZF; define pcodeop encls_eaug; define pcodeop encls_emodpr; define pcodeop encls_emodpr_ZF; define pcodeop encls_emodt; define pcodeop encls_emodt_ZF; define pcodeop encls_unknown; :ENCLS is vexMode=0 & byte=0x0f; byte=0x01; byte=0xcf { if ( EAX != 0x0 ) goto ; encls_ecreate( RBX, RCX ); goto ; if ( EAX != 0x1 ) goto ; encls_eadd( RBX, RCX ); goto ; if ( EAX != 0x2 ) goto ; RAX = encls_einit( RBX, RCX, RDX ); ZF = encls_einit_ZF( RBX, RCX, RDX ); CF = 0; PF = 0; AF = 0; OF = 0; SF = 0; goto ; if ( EAX != 0x3 ) goto ; RAX = encls_eremove( RCX ); ZF = encls_eremove_ZF( RBX, RCX, RDX ); CF = 0; PF = 0; AF = 0; OF = 0; SF = 0; goto ; if ( EAX != 0x4 ) goto ; RAX = encls_edbgrd( RCX ); RBX = encls_edbgrd_RBX( RCX ); goto ; if ( EAX != 0x5 ) goto ; RAX = encls_edbgwr( RBX, RCX ); goto ; if ( EAX != 0x6 ) goto ; encls_eextend( RBX, RCX ); goto ; if ( EAX != 0x7 ) goto ; RAX = encls_eldb( RBX, RCX, RDX ); ZF = encls_eldb_ZF( RBX, RCX, RDX ); CF = 0; PF = 0; AF = 0; OF = 0; SF = 0; goto ; if ( EAX != 0x8 ) goto ; RAX = encls_eldu( RBX, RCX, RDX ); ZF = encls_eldu_ZF( RBX, RCX, RDX ); CF = 0; PF = 0; AF = 0; OF = 0; SF = 0; goto ; if ( EAX != 0x9 ) goto ; RAX = encls_eblock( RCX ); ZF = encls_eblock_ZF( RCX ); PF = 0; AF = 0; OF = 0; SF = 0; goto ; if ( EAX != 0xA ) goto ; encls_epa( RBX, RCX ); goto ; if ( EAX != 0xB ) goto ; RAX = encls_ewb( RBX, RCX, RDX ); ZF = encls_ewb_ZF( RBX, RCX, RDX ); CF = encls_ewb_CF( RBX, RCX, RDX ); PF = 0; AF = 0; OF = 0; SF = 0; goto ; if ( EAX != 0xC ) goto ; RAX = encls_etrack( RCX ); ZF = encls_etrack_ZF( RBX, RCX, RDX ); CF = 0; PF = 0; AF = 0; OF = 0; SF = 0; goto ; if ( EAX != 0xD ) goto ; encls_eaug( RBX, RCX, RDX ); goto ; if ( EAX != 0xE ) goto ; RAX = encls_emodpr( RBX, RCX ); ZF = encls_emodpr_ZF( RCX ); CF = 0; PF = 0; AF = 0; OF = 0; SF = 0; goto ; if ( EAX != 0xF ) goto ; RAX = encls_emodt( RBX, RCX ); ZF = encls_emodt_ZF( RCX ); CF = 0; PF = 0; AF = 0; OF = 0; SF = 0; goto ; encls_unknown(); } define pcodeop enclu_ereport; define pcodeop enclu_egetkey; define pcodeop enclu_egetkey_ZF; define pcodeop enclu_eenter_EAX; define pcodeop enclu_eenter_RCX; define pcodeop enclu_eenter_TF; define pcodeop enclu_eresume; define pcodeop enclu_eexit; define pcodeop enclu_eexit_TF; define pcodeop enclu_eaccept; define pcodeop enclu_eaccept_ZF; define pcodeop enclu_emodpe; define pcodeop enclu_eacceptcopy; define pcodeop enclu_eacceptcopy_ZF; define pcodeop enclu_unknown; :ENCLU is vexMode=0 & byte=0x0f; byte=0x01; byte=0xd7 { if ( EAX != 0x0 ) goto ; enclu_ereport( RBX, RCX, RDX ); goto ; if ( EAX != 0x1 ) goto ; RAX = enclu_egetkey( RBX, RCX ); ZF = enclu_egetkey_ZF( RBX, RCX ); CF = 0; PF = 0; AF = 0; OF = 0; SF = 0; goto ; if ( EAX != 0x2 ) goto ; tempBX:8 = RBX; tempCX:8 = RCX; EAX = enclu_eenter_EAX( tempBX, tempCX ); RCX = enclu_eenter_RCX( tempBX, tempCX ); TF = enclu_eenter_TF( tempBX, tempCX ); goto ; if ( EAX != 0x3 ) goto ; TF = enclu_eresume( RBX, RCX ); goto ; if ( EAX != 0x4 ) goto ; RCX = enclu_eexit( RBX ); TF = enclu_eexit_TF( RBX ); goto ; if ( EAX != 0x5 ) goto ; RAX = enclu_eaccept( RBX, RCX ); ZF = enclu_eaccept_ZF( RBX, RCX ); CF = 0; PF = 0; AF = 0; OF = 0; SF = 0; goto ; if ( EAX != 0x6 ) goto ; enclu_emodpe( RBX, RCX ); goto ; if ( EAX != 0x7 ) goto ; RAX = enclu_eacceptcopy( RBX, RCX, RDX ); ZF = enclu_eacceptcopy_ZF( RBX, RCX, RDX ); CF = 0; PF = 0; AF = 0; OF = 0; SF = 0; goto ; enclu_unknown(); } ================================================ FILE: pypcode/processors/x86/data/languages/sha.sinc ================================================ # INFO This file automatically generated by andre on Fri Mar 16 15:13:25 2018 # INFO Direct edits to this file may be lost in future updates # INFO Command line arguments: ['--sinc', '--cpuid-match', 'SHA'] # SHA1RNDS4 4-602 PAGE 1722 LINE 89511 define pcodeop sha1rnds4_sha ; :SHA1RNDS4 XmmReg1, XmmReg2_m128, imm8 is vexMode=0 & byte=0x0F; byte=0x3A; byte=0xCC; (XmmReg1 & YmmReg1) ... & XmmReg2_m128; imm8 { XmmReg1 = sha1rnds4_sha( XmmReg1, XmmReg2_m128, imm8:1 ); } # SHA1NEXTE 4-604 PAGE 1724 LINE 89602 define pcodeop sha1nexte_sha ; :SHA1NEXTE XmmReg1, XmmReg2_m128 is vexMode=0 & byte=0x0F; byte=0x38; byte=0xC8; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 { XmmReg1 = sha1nexte_sha( XmmReg1, XmmReg2_m128 ); } # SHA1MSG1 4-605 PAGE 1725 LINE 89654 define pcodeop sha1msg1_sha ; :SHA1MSG1 XmmReg1, XmmReg2_m128 is vexMode=0 & byte=0x0F; byte=0x38; byte=0xC9; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 { XmmReg1 = sha1msg1_sha( XmmReg1, XmmReg2_m128 ); } # SHA1MSG2 4-606 PAGE 1726 LINE 89708 define pcodeop sha1msg2_sha ; :SHA1MSG2 XmmReg1, XmmReg2_m128 is vexMode=0 & byte=0x0F; byte=0x38; byte=0xCA; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 { XmmReg1 = sha1msg2_sha( XmmReg1, XmmReg2_m128 ); } # SHA256RNDS2 4-607 PAGE 1727 LINE 89765 define pcodeop sha256rnds2_sha ; :SHA256RNDS2 XmmReg1, XmmReg2_m128, XMM0 is vexMode=0 & byte=0x0F; byte=0x38; byte=0xCB; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 & XMM0 { XmmReg1 = sha256rnds2_sha( XmmReg1, XmmReg2_m128, XMM0 ); } # SHA256MSG1 4-609 PAGE 1729 LINE 89847 define pcodeop sha256msg1_sha ; :SHA256MSG1 XmmReg1, XmmReg2_m128 is vexMode=0 & byte=0x0F; byte=0x38; byte=0xCC; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 { XmmReg1 = sha256msg1_sha( XmmReg1, XmmReg2_m128 ); } # SHA256MSG2 4-610 PAGE 1730 LINE 89900 define pcodeop sha256msg2_sha ; :SHA256MSG2 XmmReg1, XmmReg2_m128 is vexMode=0 & byte=0x0F; byte=0x38; byte=0xCD; (XmmReg1 & YmmReg1) ... & XmmReg2_m128 { XmmReg1 = sha256msg2_sha( XmmReg1, XmmReg2_m128 ); } ================================================ FILE: pypcode/processors/x86/data/languages/smx.sinc ================================================ define pcodeop getsec_capabilities; define pcodeop getsec_enteraccs; define pcodeop getsec_exitac; define pcodeop getsec_senter; define pcodeop getsec_sexit; define pcodeop getsec_parameters_EAX; define pcodeop getsec_parameters_EBX; define pcodeop getsec_parameters_ECX; define pcodeop getsec_smctrl; define pcodeop getsec_wakeup; define pcodeop getsec_unknown; :GETSEC is vexMode=0 & byte=0x0f; byte=0x37 { if ( EAX != 0x0 ) goto ; EAX = 0; if ( EBX != 0x0 ) goto ; EAX = getsec_capabilities( EBX ); goto ; if ( EAX != 0x2 ) goto ; getsec_enteraccs( EBX, ECX ); goto ; if ( EAX != 0x3 ) goto ; @ifdef IA64 getsec_exitac( RBX, EDX ); @else getsec_exitac( EBX, EDX ); @endif goto ; if ( EAX != 0x4 ) goto ; getsec_senter( EBX, ECX, EDX); goto ; if ( EAX != 0x5 ) goto ; getsec_sexit(); goto ; if ( EAX != 0x6 ) goto ; EAX = getsec_parameters_EAX( EBX ); ECX = getsec_parameters_ECX( EBX ); EBX = getsec_parameters_EBX( EBX ); goto ; if ( EAX != 0x7 ) goto ; getsec_smctrl(EBX); goto ; if ( EAX != 0x8 ) goto ; getsec_wakeup(); goto ; getsec_unknown(); } ================================================ FILE: pypcode/processors/x86/data/languages/x86-16-real.pspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-16.cspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-16.gdis ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-16.pspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-32-golang.cspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-32-golang.register.info ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-64-compat32.pspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-64-gcc.cspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-64-golang.cspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-64-golang.register.info ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-64-swift.cspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-64-win.cspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-64.dwarf ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-64.pspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86-64.slaspec ================================================ @define IA64 "IA64" @include "x86.slaspec" with : lockprefx=0 { @include "sgx.sinc" @include "fma.sinc" } ================================================ FILE: pypcode/processors/x86/data/languages/x86.dwarf ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86.ldefs ================================================ Intel/AMD 32-bit x86 Intel/AMD 32-bit x86 System Management Mode Intel/AMD 16-bit x86 Real Mode Intel/AMD 16-bit x86 Protected Mode Intel/AMD 64-bit x86 Intel/AMD 64-bit x86 in 32-bit compatibility mode (long mode off) ================================================ FILE: pypcode/processors/x86/data/languages/x86.opinion ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86.pspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86.slaspec ================================================ @include "ia.sinc" @include "lockable.sinc" with : lockprefx=0 { @include "avx.sinc" @include "avx_manual.sinc" @include "avx2.sinc" @include "avx2_manual.sinc" @include "avx512.sinc" @include "avx512_manual.sinc" @include "adx.sinc" @include "clwb.sinc" @include "pclmulqdq.sinc" @include "mpx.sinc" @include "lzcnt.sinc" @include "bmi1.sinc" @include "bmi2.sinc" @include "sha.sinc" @include "smx.sinc" @include "cet.sinc" @include "rdrand.sinc" } ================================================ FILE: pypcode/processors/x86/data/languages/x86borland.cspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86delphi.cspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86gcc.cspec ================================================ ================================================ FILE: pypcode/processors/x86/data/languages/x86win.cspec ================================================ ================================================ FILE: pypcode/processors/x86/data/manuals/x86.idx ================================================ @325383-sdm-vol-2abcd.pdf [Intel 64 and IA-32 Architectures Software Developer's Manual Volume 2 (2A, 2B, 2C & 2D): Instruction Set Reference, A-Z, Oct 2019 (325383-071US)] AAA, 120 AAD, 122 BLENDPS, 123 AAM, 124 AAS, 126 ADC, 128 ADCX, 131 ADD, 133 ADDPD, 135 VADDPD, 135 ADDPS, 138 VADDPS, 138 ADDSD, 141 VADDSD, 141 ADDSS, 143 VADDSS, 143 ADDSUBPD, 145 VADDSUBPD, 145 ADDSUBPS, 147 VADDSUBPS, 147 ADOX, 150 AESDEC, 152 VAESDEC, 152 AESDECLAST, 154 VAESDECLAST, 154 AESENC, 156 VAESENC, 156 AESENCLAST, 158 VAESENCLAST, 158 AESIMC, 160 VAESIMC, 160 AESKEYGENASSIST, 161 VAESKEYGENASSIST, 161 AND, 163 ANDN, 165 ANDPD, 166 VANDPD, 166 ANDPS, 169 VANDPS, 169 ANDNPD, 172 VANDNPD, 172 ANDNPS, 175 VANDNPS, 175 ARPL, 178 BLENDPD, 180 VBLENDPD, 180 BEXTR, 182 BLENDPS, 183 VBLENDPS, 183 BLENDVPD, 185 VBLENDVPD, 185 BLENDVPS, 187 VBLENDVPS, 187 BLSI, 190 BLSMSK, 191 BLSR, 192 BNDCL, 193 BNDCU, 195 BNDCN, 195 BNDLDX, 197 BNDMK, 200 BNDMOV, 202 BNDSTX, 205 BOUND, 208 BSF, 210 BSR, 212 BSWAP, 214 BT, 215 BTC, 217 BTR, 219 BTS, 221 BZHI, 223 CALL, 224 CBW, 241 CWDE, 241 CDQE, 241 CLAC, 242 CLC, 243 CLD, 244 CLDEMOTE, 245 CLFLUSH, 247 CLFLUSHOPT, 249 CLI, 251 CLRSBSY, 253 CLTS, 255 CLWB, 256 CMC, 258 CMOV, 259 CMOVA, 259 CMOVAE, 259 CMOVB, 259 CMOVBE, 259 CMOVC, 259 CMOVE, 259 CMOVG, 259 CMOVGE, 259 CMOVL, 259 CMOVLE, 259 CMOVNA, 259 CMOVNAE, 259 CMOVNB, 259 CMOVNBE, 259 CMOVNC, 259 CMOVNE, 259 CMOVNG, 259 CMOVNGE, 259 CMOVNL, 259 CMOVNLE, 259 CMOVNO, 259 CMOVNP, 259 CMOVNS, 259 CMOVNZ, 259 CMOVO, 259 CMOVP, 259 CMOVPE, 259 CMOVPO, 259 CMOVS, 259 CMOVZ, 259 CMP, 263 CMPPD, 265 VCMPPD, 265 CMPEQPD, 267 CMPLTPD, 267 CMPLEPD, 267 CMPUNORDPD, 267 CMPNEQPD, 267 CMPNLTPD, 267 CMPNLEPD, 267 CMPORDPD, 267 VCMPEQPD, 268 VCMPLTPD, 268 VCMPLEPD, 268 VCMPUNORDPD, 268 VCMPNEPD, 268 VCMPNLTPD, 268 VCMPNLEPD, 268 VCMPORDPD, 268 VCMPEQ_UQPD, 268 VCMPNGEPD, 268 VCMPNGTPD, 268 VCMPFALSEPD, 268 VCMPNEQ_OQPD, 268 VCMPGEPD, 268 VCMPGTPD, 268 VCMPTRUEPD, 268 VCMPEQ_OSPD, 268 VCMPLT_OQPD, 268 VCMPLE_OQPD, 268 VCMPUNORD_SPD, 268 VCMPNEQ_USPD, 268 VCMPNLT_UQPD, 268 VCMPNLE_UQPD, 268 VCMPORD_SPD, 268 VCMPEQ_USPD, 268 VCMPNGE_UQPD, 268 VCMPNGT_UQPD, 268 VCMPFALSE_OSPD, 268 VCMPNEQ_OSPD, 268 VCMPGE_OQPD, 268 VCMPGT_OQPD, 268 VCMPTRUE_USPD, 268 CMPPS, 272 VCMPPS, 272 CMPEQPS, 273 CMPLTPS, 273 CMPLEPS, 273 CMPUNORDPS, 273 CMPNEQPS, 273 CMPNLTPS, 273 CMPNLEPS, 273 CMPORDPS, 273 VCMPEQPS, 274 VCMPLTPS, 274 VCMPLEPS, 274 VCMPUNORDPS, 274 VCMPNEQPS, 274 VCMPNLTPS, 274 VCMPNLEPS, 274 VCMPORDPS, 274 VCMPEQ_UQPS, 274 VCMPNGEPS, 274 VCMPNGTPS, 274 VCMPFALSEPS, 274 VCMPNEQ_OQPS, 274 VCMPGEPS, 274 VCMPGTPS, 274 VCMPTRUEPS, 274 VCMPEQ_OSPS, 274 VCMPLT_OQPS, 274 VCMPLE_OQPS, 274 VCMPUNORD_SPS, 274 VCMPNEQ_USPS, 274 VCMPNLT_UQPS, 274 VCMPNLE_UQPS, 274 VCMPORD_SPS, 274 VCMPEQ_USPS, 274 VCMPNGE_UQPS, 274 VCMPNGT_UQPS, 274 VCMPFALSE_OSPS, 274 VCMPNEQ_OSPS, 274 VCMPGE_OQPS, 274 VCMPGT_OQPS, 274 VCMPTRUE_USPS, 274 CMPS, 279 CMPSB, 279 CMPSW, 279 CMPSQ, 279 CMPSD, 283 VCMPSD, 283 CMPEQSD, 284 CMPLTSD, 284 CMPLESD, 284 CMPUNORDSD, 284 CMPNEQSD, 284 CMPNLTSD, 284 CMPNLESD, 284 CMPORDSD, 284 VCMPEQSD, 284 VCMPLTSD, 284 VCMPLESD, 284 VCMPUNORDSD, 284 VCMPNEQSD, 284 VCMPNLTSD, 284 VCMPNLESD, 284 VCMPORDSD, 284 VCMPEQ_UQSD, 284 VCMPNGESD, 284 VCMPNGTSD, 284 VCMPFALSESD, 284 VCMPNEQ_OQSD, 284 VCMPGESD, 284 VCMPGTSD, 285 VCMPTRUESD, 285 VCMPEQ_OSSD, 285 VCMPLT_OQSD, 285 VCMPLE_OQSD, 285 VCMPUNORD_SSD, 285 VCMPNEQ_USSD, 285 VCMPNLT_UQSD, 285 VCMPNLE_UQSD, 285 VCMPORD_SSD, 285 VCMPEQ_USSD, 285 VCMPNGE_UQSD, 285 VCMPNGT_UQSD, 285 VCMPFALSE_OSSD, 285 VCMPNEQ_OSSD, 285 VCMPGE_OQSD, 285 VCMPGT_OQSD, 285 VCMPTRUE_USSD, 285 CMPSS, 287 VCMPSS, 287 CMPEQSS, 288 CMPLTSS, 288 CMPLESS, 288 CMPUNORDSS, 288 CMPNEQSS, 288 CMPNLTSS, 288 CMPNLESS, 288 CMPORDSS, 288 VCMPEQSS, 288 VCMPLTSS, 288 VCMPLESS, 288 VCMPUNORDSS, 288 VCMPNEQSS, 288 VCMPNLTSS, 288 VCMPNLESS, 288 VCMPORDSS, 288 VCMPEQ_UQSS, 288 VCMPNGESS, 288 VCMPNGTSS, 288 VCMPFALSESS, 288 VCMPNEQ_OQSS, 288 VCMPGESS, 288 VCMPGTSS, 289 VCMPTRUESS, 289 VCMPEQ_OSSS, 289 VCMPLT_OQSS, 289 VCMPLE_OQSS, 289 VCMPUNORD_SSS, 289 VCMPNEQ_USSS, 289 VCMPNLT_UQSS, 289 VCMPNLE_UQSS, 289 VCMPORD_SSS, 289 VCMPEQ_USSS, 289 VCMPNGE_UQSS, 289 VCMPNGT_UQSS, 289 VCMPFALSE_OSSS, 289 VCMPNEQ_OSSS, 289 VCMPGE_OQSS, 289 VCMPGT_OQSS, 289 VCMPTRUE_USSS, 289 CMPXCHG, 291 CMPXCHG8B, 293 CMPXCHG16B, 293 COMISD, 296 VCOMISD, 296 COMISS, 298 VCOMISS, 298 CPUID, 300 CRC32, 339 CVTDQ2PD, 342 VCVTDQ2PD, 342 CVTDQ2PS, 346 VCVTDQ2PS, 346 CVTPD2DQ, 349 VCVTPD2DQ, 349 CVTPD2PI, 353 CVTPD2PS, 354 VCVTPD2PS, 354 CVTPI2PD, 358 CVTPI2PS, 359 CVTPS2DQ, 360 VCVTPS2DQ, 360 CVTPS2PD, 363 VCVTPS2PD, 363 CVTPS2PI, 366 CVTSD2SI, 367 VCVTSD2SI, 367 CVTSD2SS, 369 VCVTSD2SS, 369 CVTSI2SD, 371 VCVTSI2SD, 371 CVTSI2SS, 373 VCVTSI2SS, 373 CVTSS2SD, 375 VCVTSS2SD, 375 CVTSS2SI, 377 VCVTSS2SI, 377 CVTTPD2DQ, 379 VCVTTPD2DQ, 379 CVTTPD2PI, 383 CVTTPS2DQ, 384 VCVTTPS2DQ, 384 CVTTPS2PI, 387 CVTTSD2SI, 388 VCVTTSD2SI, 388 CVTTSS2SI, 390 VCVTTSS2SI, 390 CWD, 392 CDQ, 392 CQO, 392 DAA, 393 DAS, 395 DEC, 397 DIV, 399 DIVPD, 402 VDIVPD, 402 DIVPS, 405 VDIVPS, 405 DIVSD, 408 VDIVSD, 408 DIVSS, 410 VDIVSS, 410 DPPD, 412 VDPPD, 412 DPPS, 414 VDPPS, 414 EMMS, 417 ENTER, 420 EXTRACTPS, 423 VEXTRACTPS, 423 F2XM1, 425 FABS, 427 FADD, 428 FADDP, 428 FIADD, 428 FBLD, 431 FBSTP, 433 FCHS, 435 FCLEX, 437 FNCLEX, 437 FCMOV, 439 FCMOVB, 439 FCMOVE, 439 FCMOVBE, 439 FCMOVU, 439 FCMOVNB, 439 FCMOVNE, 439 FCMOVNBE, 439 FCMOVNU, 439 FCOM, 441 FCOMP, 441 FCOMPP, 441 FCOMI, 444 FCOMIP, 444 FUCOMI, 444 FUCOMIP, 444 FCOS, 447 FDECSTP, 449 FDIV, 450 FDIVP, 450 FIDIV, 450 FDIVR, 453 FDIVRP, 453 FIDIVR, 453 FFREE, 456 FICOM, 456 FICOMP, 457 FILD, 459 FINCSTP, 461 FINIT, 462 FNINIT, 462 FIST, 464 FISTP, 464 FISTTP, 467 FLD, 469 FLD1, 471 FLDL2T, 471 FLDL2E, 471 FLDPI, 471 FLDLG2, 471 FLDLN2, 471 FLDZ, 471 FLDCW, 473 FLDENV, 475 FMUL, 477 FMULP, 477 FIMUL, 477 FNOP, 480 FPATAN, 481 FPREM, 483 FPREM1, 485 FPTAN, 487 FRNDINT, 489 FRSTOR, 490 FSAVE, 492 FNSAVE, 492 FSCALE, 495 FSIN, 497 FSINCOS, 499 FSQRT, 501 FST, 503 FSTP, 503 FSTCW, 505 FNSTCW, 505 FSTENV, 507 FNSTENV, 507 FSTSW, 509 FNSTSW, 509 FSUB, 511 FSUBP, 511 FISUB, 511 FSUBR, 514 FSUBRP, 514 FISUBR, 514 FTST, 517 FUCOM, 519 FUCOMP, 519 FUCOMPP, 519 FXAM, 522 FXCH, 524 FXRSTOR, 526 FXRSTOR64, 526 FXSAVE, 529 FXSAVE64, 529 FXTRACT, 537 FYL2X, 539 FYL2XP1, 541 GF2P8AFFINEINVQB, 543 GF2P8AFFINEQB, 546 GF2P8MULB, 548 HADDPD, 550 VHADDPD, 550 HADDPS, 553 VHADDPS, 553 HLT, 556 HSUBPD, 557 VHSUBPD, 557 HSUBPS, 560 VHSUBPS, 560 IDIV, 563 IMUL, 566 IN, 570 INC, 572 INCSSPD, 574 INCSSPQ, 574 INS, 576 INSB, 576 INSW, 576 INSD, 576 INSERTPS, 579 VINSERTPS, 579 INT, 582 INTO, 582 INT3, 582 INVD, 597 INVLPG, 599 INVPCID, 601 IRET, 604 IRETD, 604 IRETQ, 604 J, 613 JA, 613 JAE, 613 JB, 613 JBE, 613 JC, 613 JCXZ, 613 JECXZ, 613 JRCXZ, 613 JE, 613 JG, 613 JGE, 613 JL, 613 JLE, 613 JNA, 613 JNAE, 613 JNB, 613 JNBE, 613 JNC, 613 JNE, 613 JNG, 613 JNGE, 613 JNL, 613 JNLE, 613 JNO, 613 JNP, 613 JNS, 613 JNZ, 613 JO, 613 JP, 613 JPE, 613 JPO, 613 JS, 613 JZ, 613 JMP, 618 KADDW, 627 KADDB, 627 KADDQ, 627 KADDD, 627 KANDW, 628 KANDB, 628 KANDQ, 628 KANDD, 628 KANDNW, 629 KANDNB, 629 KANDNQ, 629 KANDND, 629 KMOVW, 630 KMOVB, 630 KMOVQ, 630 KMOVD, 630 KNOTW, 632 KNOTB, 632 KNOTQ, 632 KNOTD, 632 KORW, 633 KORB, 633 KORQ, 633 KORD, 633 KORTESTW, 634 KORTESTB, 634 KORTESTQ, 634 KORTESTD, 634 KSHIFTLW, 636 KSHIFTLB, 636 KSHIFTLQ, 636 KSHIFTLD, 636 KSHIFTRW, 638 KSHIFTRB, 638 KSHIFTRQ, 638 KSHIFTRD, 638 KTESTW, 640 KTESTB, 640 KTESTQ, 640 KTESTD, 640 KUNPCKBW, 642 KUNPCKWD, 642 KUNPCKDQ, 642 KXNORW, 643 KXNORB, 643 KXNORQ, 643 KXNORD, 643 KXORW, 644 KXORB, 644 KXORQ, 644 KXORD, 644 LAHF, 645 LAR, 646 LDDQU, 649 VLDDQU, 649 LDMXCSR, 651 VLDMXCSR, 651 LDS, 652 LES, 652 LFS, 652 LGS, 652 LSS, 652 LEA, 656 LEAVE, 658 LFENCE, 660 LGDT, 661 LIDT, 661 LLDT, 664 LMSW, 666 LOCK, 668 LODS, 670 LODSB, 670 LODSW, 670 LODSD, 670 LODSQ, 670 LOOP, 673 LOOPE, 673 LOOPNE, 673 LSL, 675 LTR, 678 LZCNT, 680 MASKMOVDQU, 690 VMASKMOVDQU, 690 MASKMOVQ, 692 MAXPD, 694 VMAXPD, 694 MAXPS, 697 VMAXPS, 697 MAXSD, 700 VMAXSD, 700 MAXSS, 702 VMAXSS, 702 MFENCE, 704 MINPD, 705 VMINPD, 705 MINPS, 708 VMINPS, 708 MINSD, 711 VMINSD, 711 MINSS, 713 VMINSS, 713 MONITOR, 715 MOV, 717 MOVAPD, 727 VMOVPAD, 727 MOVAPS, 731 VMOVAPS, 731 MOVBE, 735 MOVD, 737 VMOVD, 737 MOVQ, 737 VMOVQ, 737 MOVDDUP, 741 VMOVDDUP, 741 MOVDIRI, 744 MOVDIR64B, 746 MOVDQA, 748 VMOVDQA, 748 VMOVDQA32, 748 VMOVDQA64, 748 MOVDQU, 753 VMOVDQU, 753 VMOVDQU8, 753 VMOVDQU16, 753 VMOVDQU32, 753 VMOVDQU64, 753 MOVDQ2Q, 761 MOVHLPS, 762 VMOVHLPS, 762 MOVHPD, 764 VMOVHPD, 764 MOVHPS, 766 VMOVHPS, 766 MOVLHPS, 768 VMOVLHPS, 768 MOVLPD, 770 VMOVLPD, 770 MOVLPS, 772 VMOVLPS, 772 MOVMSKPD, 774 VMOVMSKPD, 774 MOVMSKPS, 776 VMOVMSKPS, 776 MOVNTDQA, 778 VMOVNTDQA, 778 MOVNTDQ, 780 VMOVNTDQ, 780 MOVNTI, 782 MOVNTPD, 784 VMOVNTPD, 784 MOVNTPS, 786 VMOVNTPS, 786 MOVNTQ, 788 MOVQ, 789 VMOVQ, 789 MOVQ2DQ, 792 MOVS, 793 MOVSB, 793 MOVSW, 793 MOVSQ, 793 MOVSD, 797 VMOVSD, 797 MOVSHDUP, 800 VMOVSHDUP, 800 MOVSLDUP, 803 VMOVSLDUP, 803 MOVSS, 806 VMOVSS, 806 MOVSX, 810 MOVSXD, 810 MOVUPD, 812 VMOVUPD, 812 MOVUPS, 816 VMOVUPS, 816 MOVZX, 820 MPSADBW, 822 VMPSADBW, 822 MUL, 830 MULPD, 832 VMULPD, 832 MULPS, 835 VMULPS, 835 MULSD, 838 VMULSD, 838 MULSS, 840 VMULSS, 840 MULX, 842 MWAIT, 844 NEG, 847 NOP, 849 NOT, 850 OR, 852 ORPD, 854 VORPD, 854 ORPS, 857 VORPS, 857 OUT, 860 OUTS, 862 OUTSB, 862 OUTSW, 862 OUTSD, 862 PABSB, 866 VPABSB, 866 PABSW, 866 VPABSW, 866 PABSD, 866 VPABSD, 866 PABSQ, 866 VPABSQ, 866 PACKSSWB, 872 VPACKSSWB, 872 PACKSSDW, 872 VPACKSSDW, 872 PACKUSDW, 880 VPACKUSDW, 880 PACKUSWB, 885 VPACKUSWB, 885 PADDB, 890 VPADDB, 890 PADDW, 890 VPADDW, 890 PADDD, 890 VPADDD, 890 PADDQ, 890 VPADDQ, 890 PADDSB, 897 VPADDSB, 897 PADDSW, 897 VPADDSW, 897 PADDUSB, 901 VPADDUSB, 901 PADDUSW, 901 VPADDUSW, 901 PALIGNR, 905 VPALIGNR, 905 PAND, 909 VPAND, 909 VPANDD, 909 VPANDQ, 909 PANDN, 912 VPANDN, 912 VPANDND, 912 VPANDNQ, 912 PAUSE, 915 PAVGB, 916 VPAVGB, 916 PAVGW, 916 VPAVGW, 916 PBLENDVB, 920 VPBLENDVB, 920 PBLENDW, 924 VPBLENDW, 924 PCLMULQDQ, 927 VPCLMULQDQ, 927 PCMPEQB, 930 VPCMPEQB, 930 PCMPEQW, 930 VPCMPEQW, 930 PCMPEQD, 930 VPCMPEQD, 930 PCMPEQQ, 936 VPCMPEQQ, 936 PCMPESTRI, 939 VPCMPESTRI, 939 PCMPESTRM, 941 VPCMPESTRM, 941 PCMPGTB, 943 VPCMPGTB, 943 PCMPGTW, 943 VPCMPGTW, 943 PCMPGTD, 943 VPCMPGTD, 943 PCMPGTQ, 949 VPCMPGTQ, 949 PCMPISTRI, 952 VPCMPISTRI, 952 PCMPISTRM, 954 VPCMPISTRM, 954 PDEP, 956 PEXT, 958 PEXTRB, 960 VPEXTRB, 960 PEXTRD, 960 VPEXTRD, 960 PEXTRQ, 960 VPEXTRQ, 960 PEXTRW, 963 VPEXTRW, 963 PHADDW, 966 VPHADDW, 966 PHADDD, 966 VPHADDD, 966 PHADDSW, 970 VPHADDSW, 970 PHMINPOSUW, 972 VPHMINPOSUW, 972 PHSUBW, 974 VPHSUBW, 974 PHSUBD, 974 PHSUBSW, 977 VPHSUBSW, 977 PINSRB, 979 VPINSRB, 979 PINSRD, 979 VPINSRD, 979 PINSRQ, 979 VPINSRQ, 979 PINSRW, 982 VPINSRW, 982 PMADDUBSW, 984 VPMADDUBSW, 984 PMADDWD, 987 VPMADDWD, 987 PMAXSB, 990 VPMAXSB, 990 PMAXSW, 990 VPMAXSW, 990 PMAXSD, 990 VPMAXSD, 990 PMAXSQ, 990 VPMAXSQ, 990 PMAXUB, 997 VPMAXUB, 997 PMAXUW, 997 VPMAXUW, 997 PMAXUD, 1002 VPMAXUD, 1002 PMAXUQ, 1002 VPMAXUQ, 1002 PMINSB, 1006 VPMINSB, 1006 PMINSW, 1006 VPMINSW, 1006 PMINSD, 1011 VPMINSD, 1011 PMINSQ, 1011 VPMINSQ, 1011 PMINUB, 1015 VPMINUB, 1015 PMINUW, 1015 VPMINUW, 1015 PMINUD, 1020 VPMINUD, 1020 PMINUQ, 1020 VPMINUQ, 1020 PMOVMSKB, 1024 VPMOVMSKB, 1024 PMOVSX, 1026 PMOVSXBW, 1026 VPMOVSXBW, 1026 PMOVSXBD, 1026 VPMOVSXBD, 1026 PMOVSXBQ, 1026 VPMOVSXBQ, 1026 PMOVSXWD, 1026 VPMOVSXWD, 1026 PMOVSXWQ, 1026 VPMOVSXWQ, 1026 PMOVSXDQ, 1026 VPMOVSXDQ, 1026 PMOVZX, 1035 PMOVZXBW, 1035 VPMOVZXBW, 1035 PMOVZXBD, 1035 VPMOVZXBD, 1035 PMOVZXBQ, 1035 VPMOVZXBQ, 1035 PMOVZXWD, 1035 VPMOVZXWD, 1035 PMOVZXWQ, 1035 VPMOVZXWQ, 1035 PMOVZXDQ, 1035 VPMOVZXDQ, 1035 PMULDQ, 1044 VPMULDQ, 1044 PMULHRSW, 1047 VPMULHRSW, 1047 PMULHUW, 1051 VPMULHUW, 1051 PMULHW, 1055 VPMULHW, 1055 PMULLD, 1059 VPMULLD, 1059 PMULLQ, 1059 VPMULLQ, 1059 PMULLW, 1063 VPMULLW, 1063 PMULUDQ, 1067 VPMULUDQ, 1067 POP, 1070 POPA, 1075 POPAD, 1075 POPCNT, 1077 POPF, 1079 POPFD, 1079 POPFQ, 1079 POR, 1083 PREFETCHT0, 1086 PREFETCHT1, 1086 PREFETCHT2, 1086 PREFETCHNTA, 1086 PREFETCHW, 1088 PREFETCHWT1, 2090 PSADBW, 1090 VPSADBW, 1090 PSHUFB, 1094 VPSHUFB, 1094 PSHUFD, 1098 VPSHUFD, 1098 PSHUFHW, 1102 VPSHUFHW, 1102 PSHUFLW, 1105 VPSHUFLW, 1105 PSHUFW, 1108 PSIGNB, 1109 VPSIGNB, 1109 PSIGNW, 1109 VPSIGNW, 1109 PSIGND, 1109 VPSIGND, 1109 PSLLDQ, 1113 VPSLLDQ, 1113 PSLLW, 1115 VPSLLW, 1115 PSLLD, 1115 VPSLLD, 1115 PSLLQ, 1115 VPSLLQ, 1115 PSRAW, 1127 VPSRAW, 1127 PSRAD, 1127 VPSRAD, 1127 VPSRAQ, 1127 PSRLDQ, 1137 VPSRLDQ, 1137 PSRLW, 1139 VPSRLW, 1139 PSRLD, 1139 VPSRLD, 1139 PSRLQ, 1139 VPSRLQ, 1139 PSUBB, 1151 VPSUBB, 1151 PSUBW, 1151 VPSUBW, 1151 PSUBD, 1151 VPSUBD, 1151 PSUBQ, 1158 VPSUBQ, 1158 PSUBSB, 1161 VPSUBSB, 1161 PSUBSW, 1161 VPSUBSW, 1161 PSUBUSB, 1165 VPSUBUSB, 1165 PSUBUSW, 1165 VPSUBUSW, 1165 PTEST, 1169 VPTEST, 1169 PTWRITE, 1171 PUNPCKHBW, 1173 VPUNPCKHBW, 1173 PUNPCKHWD, 1173 VPUNPCKHWD, 1173 PUNPCKHDQ, 1173 VPUNPCKHDQ, 1173 PUNPCKHQDQ, 1173 VPUNPCKHQDQ, 1173 PUNPCKLBW, 1183 VPUNPCKLBW, 1183 PUNPCKLWD, 1183 VPUNPCKLWD, 1183 PUNPCKLDQ, 1183 VPUNPCKLDQ, 1183 PUNPCKLQDQ, 1183 VPUNPCKLQDQ, 1183 PUSH, 1193 PUSHA, 1196 PUSHAD, 1196 PUSHF, 1198 PUSHFD, 1198 PUSHFQ, 1198 PXOR, 1200 VPXOR, 1200 VPXORD, 1200 VPXORQ, 1200 RCL, 1203 RCR, 1203 ROL, 1203 ROR, 1203 RCPPS, 1208 VRCPPS, 1208 RCPSS, 1210 VRCPSS, 1210 RDFSBASE, 1212 RDGSBASE, 1212 RDMSR, 1214 RDPID, 1216 RDPKRU, 1217 RDPMC, 1219 RDRAND, 1221 RDSEED, 1223 RDSSPD, 1225 RDSSPQ, 1225 RDTSC, 1227 RDTSCP, 1229 REP, 1231 REPE, 1231 REPZ, 1231 REPNE, 1231 REPNZ, 1231 RET, 1235 RORX, 1248 ROUNDPD, 1249 VROUNDPD, 1249 ROUNDPS, 1252 VROUNDPS, 1252 ROUNDSD, 1255 VROUNDSD, 1255 ROUNDSS, 1257 VROUNDSS, 1257 RSM, 1259 RSQRTPS, 1261 VRSQRTPS, 1261 RSQRTSS, 1263 VRSQRTSS, 1263 RSTORSSP, 1265 SAHF, 1268 SAL, 1270 SAR, 1270 SHL, 1270 SHR, 1270 SARX, 1275 SHLX, 1275 SHRX, 1275 SBB, 1279 SCAS, 1282 SCASB, 1282 SCASW, 1282 SCASD, 1282 SCASQ, 1282 SET, 1286 SETA, 1286 SETAE, 1286 SETB, 1286 SETBE, 1286 SETC, 1286 SETE, 1286 SETG, 1286 SETGE, 1286 SETL, 1286 SETLE, 1286 SETNA, 1286 SETNAE, 1286 SETNB, 1286 SETNBE, 1286 SETNC, 1286 SETNE, 1286 SETNG, 1286 SETNGE, 1286 SETNL, 1286 SETNLE, 1286 SETNO, 1286 SETNP, 1286 SETNS, 1286 SETNZ, 1286 SETO, 1286 SETP, 1286 SETPE, 1286 SETPO, 1286 SETS, 1286 SETZ, 1286 SETSSBSY, 1289 SFENCE, 1291 SGDT, 1292 SHA1RNDS4, 1294 SHA1NEXTE, 1296 SHA1MSG1, 1297 SHA1MSG2, 1298 SHA256RNDS2, 1299 SHA256MSG1, 1301 SHA256MSG2, 1302 SHLD, 1303 SHRD, 1306 SHUFPD, 1309 VSHUFPD, 1309 SHUFPS, 1314 VSHUFPS, 1314 SIDT, 1318 SLDT, 1320 SMSW, 1322 SQRTPD, 1324 VSQRTPD, 1324 SQRTPS, 1327 VSQRTPS, 1327 SQRTSD, 1330 VSQRTSD, 1330 SQRTSS, 1332 VSQRTSS, 1332 STAC, 1334 STC, 1335 STD, 1336 STI, 1337 STMXCSR, 1339 STOS, 1340 STOSB, 1340 STOSW, 1340 STOSD, 1340 STOSQ, 1340 STR, 1344 SUB, 1346 SUBPD, 1348 VSUBPD, 1348 SUBPS, 1351 VSUBPS, 1351 SUBSD, 1354 VSUBSD, 1354 SUBSS, 1356 VSUBSS, 1356 SWAPGS, 1358 SYSCALL, 1360 SYSENTER, 1363 SYSEXIT, 1366 SYSRET, 1369 TEST, 1372 TPAUSE, 1374 TZCNT, 1376 UCOMISD, 1378 VUCOMISD, 1378 UCOMISS, 1380 VUCOMISS, 1380 UD0, 1382 UD1, 1382 UD2, 1382 UMONITOR, 1383 UMWAIT, 1385 UNPCKHPD, 1387 VUNPCKHPD, 1387 UNPCKHPS, 1391 VUNPCKHPS, 1391 UNPCKLPD, 1395 VUNPCKLPD, 1395 UNPCKLPS, 1399 VUNPCKLPS, 1399 VALIGND, 1407 VALIGNQ, 1407 VBLENDMPD, 1411 VBLENDMPS, 1411 VBROADCAST, 1414 VBROADCASTSS, 1414 VBROADCASTSD, 1414 VBROADCASTF128, 1414 VBROADCASTF32X2, 1414 VBROADCASTF32X4, 1414 VBROADCASTF64X2, 1414 VBROADCASTF32X8, 1414 VBROADCASTF64X4, 1414 VCOMPRESSPD, 1422 VCOMPRESSPS, 1424 VCVTPD2QQ, 1426 VCVTPD2UDQ, 1429 VCVTPD2UQQ, 1432 VCVTPH2PS, 1435 VCVTPS2PH, 1438 VCVTPS2UDQ, 1442 VCVTPS2QQ, 1445 VCVTPS2UQQ, 1448 VCVTQQ2PD, 1451 VCVTQQ2PS, 1453 VCVTSD2USI, 1455 VCVTSS2USI, 1456 VCVTTPD2QQ, 1458 VCVTTPD2UDQ, 1460 VCVTTPD2UQQ, 1463 VCVTTPS2UDQ, 1465 VCVTTPS2QQ, 1467 VCVTTPS2UQQ, 1469 VCVTTSD2USI, 1471 VCVTTSS2USI, 1472 VCVTUDQ2PD, 1474 VCVTUDQ2PS, 1476 VCVTUQQ2PD, 1478 VCVTUQQ2PS, 1480 VCVTUSI2SD, 1482 VCVTUSI2SS, 1484 VDBPSADBW, 1486 VEXPANDPD, 1490 VEXPANDPS, 1492 VERR, 1494 VERW, 1494 VEXP2PD, 2096 VEXP2PS, 2098 VEXTRACTF128, 1496 VEXTRACTF32X4, 1496 VEXTRACTF64X2, 1496 VEXTRACTF32X8, 1496 VEXTRACTF64X4, 1496 VEXTRACTI128, 1502 VEXTRACTI32X4, 1502 VEXTRACTI64X2, 1502 VEXTRACTI32X8, 1502 VEXTRACTI64X4, 1502 VFIXUPIMMPD, 1508 VFIXUPIMMPS, 1512 VFIXUPIMMSD, 1516 VFIXUPIMMSS, 1519 VFMADD132PD, 1522 VFMADD213PD, 1522 VFMADD231PD, 1522 VFMADD132PS, 1529 VFMADD213PS, 1529 VFMADD231PS, 1529 VFMADD132SD, 1536 VFMADD213SD, 1536 VFMADD231SD, 1536 VFMADD132SS, 1539 VFMADD213SS, 1539 VFMADD231SS, 1539 VFMADDSUB132PD, 1542 VFMADDSUB213PD, 1542 VFMADDSUB231PD, 1542 VFMADDSUB132PS, 1552 VFMADDSUB213PS, 1552 VFMADDSUB231PS, 1552 VFMSUBADD132PD, 1561 VFMSUBADD213PD, 1561 VFMSUBADD231PD, 1561 VFMSUBADD132PS, 1571 VFMSUBADD213PS, 1571 VFMSUBADD231PS, 1571 VFMSUB132PD, 1581 VFMSUB213PD, 1581 VFMSUB231PD, 1581 VFMSUB132PS, 1588 VFMSUB213PS, 1588 VFMSUB231PS, 1588 VFMSUB132SD, 1595 VFMSUB213SD, 1595 VFMSUB231SD, 1595 VFMSUB132SS, 1598 VFMSUB213SS, 1598 VFMSUB231SS, 1598 VFNMADD132PD, 1601 VFNMADD213PD, 1601 VFNMADD231PD, 1601 VFNMADD132PS, 1608 VFNMADD213PS, 1608 VFNMADD231PS, 1608 VFNMADD132SD, 1614 VFNMADD213SD, 1614 VFNMADD231SD, 1614 VFNMADD132SS, 1617 VFNMADD213SS, 1617 VFNMADD231SS, 1617 VFNMSUB132PD, 1620 VFNMSUB213PD, 1620 VFNMSUB231PD, 1620 VFNMSUB132PS, 1626 VFNMSUB213PS, 1626 VFNMSUB231PS, 1626 VFNMSUB132SD, 1632 VFNMSUB213SD, 1632 VFNMSUB231SD, 1632 VFNMSUB132SS, 1635 VFNMSUB213SS, 1635 VFNMSUB231SS, 1635 VFPCLASSPD, 1638 VFPCLASSPS, 1641 VFPCLASSSD, 1643 VFPCLASSSS, 1645 VGATHERDPD, 1647 VGATHERQPD, 1647 VGATHERDPS, 1652 VGATHERQPS, 1652 VGATHERPFODPS, 2100 VGATHERPFOQPS, 2100 VGATHERPFODPD, 2100 VGATHERPFOQPD, 2100 VGATHERPF1DPS, 2102 VGATHERPF1QPS, 2102 VGATHERPF1DPD, 2102 VGATHERPF1QPD, 2102 VGETEXPPD, 1663 VGETEXPPS, 1666 VGETEXPSD, 1670 VGETEXPSS, 1672 VGETMANTPD, 1674 VGETMANTPS, 1678 VGETMANTSD, 1681 VGETMANTSS, 1683 VINSERTF128, 1685 VINSERTF32X4, 1685 VINSERTF64X2, 1685 VINSERTF32X8, 1685 VINSERTF64X4, 1685 VINSERTI128, 1689 VINSERTI32X4, 1689 VINSERTI64X2, 1689 VINSERTI32X8, 1689 VINSERTI64X4, 1689 VMASKMOV, 1693 VMASKMOVPS, 1693 VMASKMOVPD, 1693 VBLENDD, 1696 VPBLENDMB, 1698 VPBLENDMW, 1698 VPBLENDMD, 1700 VPBLENDMQ, 1700 VPBROADCASTB, 1703 VPBROADCASTW, 1703 VPBROADCASTD, 1703 VPBROADCASTQ, 1703 VPBROADCASTI32X2, 1706 VPBROADCASTI128, 1706 VPBROADCASTI32X4, 1706 VPBROADCASTI64X2, 1706 VPBROADCASTI32X8, 1706 VPBROADCASTI64X4, 1706 VPBROADCASTM, 1715 VPBROADCASTMB2Q, 1715 VPBROADCASTMW2D, 1715 VPCMPB, 1717 VPCMPUB, 1717 VPCMPD, 1720 VPCMPUD, 1720 VPCMPQ, 1723 VPCMPUQ, 1723 VPCMPW, 1726 VPCMPUW, 1726 VCOMPRESSB, 1729 VCOMPRESSW, 1729 VCOMPRESSD, 1732 VCOMPRESSQ, 1734 VPCONFLICTD, 1736 VPCONFLICTQ, 1736 VPDPBUSD, 1739 VPDPBUSDS, 1741 VPDWSSD, 1743 VPDPWSSDS, 1745 VPERM2F128, 1747 VPERM2I128, 1749 VPERMB, 1751 VPERMD, 1753 VPERMW, 1753 VPERMI2B, 1756 VPERMI2W, 1758 VPERMI2D, 1758 VPERMI2Q, 1758 VPERMI2PS, 1758 VPERMI2PD, 1758 VPERMILPD, 1764 VPERMILPS, 1769 VPERMPD, 1774 VPERMPS, 1777 VPERMQ, 1780 VPERMT2B, 1783 VPERMT2W, 1785 VPERMT2D, 1785 VPERMT2Q, 1785 VPERMT2PS, 1785 VPERMT2PD, 1785 VPEXPANDB, 1790 VPEXPANDW, 1790 VEXPANDD, 1793 VEXPANDQ, 1795 VPGATHERDD, 1797 VPGATHERQD, 1797 VPGATHERDQ, 1804 VPGATHERQQ, 1804 VPLZCNTD, 1811 VPLZCNTQ, 1811 VPMADD52HUQ, 1814 VPMADD52LUQ, 1816 VPMASKMOVD, 1818 VPMASKMOVQ, 1818 VPMOVM2B, 1832 VPMOVM2W, 1832 VPMOVM2D, 1832 VPMOVM2Q, 1832 VPMOVB2M, 1821 VPMOVW2M, 1821 VPMOVD2M, 1821 VPMOVQ2M, 1821 VPMOVQB, 1835 VPMOVSQB, 1835 VPMOVUSQB, 1835 VPMOVQW, 1843 VPMOVSQW, 1843 VPMOVUSQW, 1843 VPMOVQD, 1839 VPMOVSQD, 1839 VPMOVUSQD, 1839 VPMOVDB, 1824 VPMOVSDB, 1824 VPMOVUSDB, 1824 VPMOVDW, 1828 VPMOVSDW, 1828 VPMOVUSDW, 1828 VPMOVWB, 1847 VPMOVSWB, 1847 VPMOVUSWB, 1847 VPMULTISHIFTQB, 1851 VPOPCNTB, 1853 VPOPCNTW, 1853 VPOPCNTD, 1853 VPOPCNTQ, 1853 PROLD, 1856 VPROLD, 1856 PROLVD, 1856 VPROLVD, 1856 PROLVQ, 1856 VPROLVQ, 1856 PRORD, 1861 VPRORD, 1861 PRORVD, 1861 VPRORVD, 1861 PRORQ, 1861 VPRORQ, 1861 PRORVQ, 1861 VPRORVQ, 1861 VPSCATTERDD, 1866 VPSCATTERDQ, 1866 VPSCATTERQD, 1866 VPSCATTERQQ, 1866 VPSHLD, 1870 VPSHLDW, 1870 VPSHLDD, 1870 VPSHLDQ, 1870 VPSHLDV, 1873 VPSHLDVW, 1873 VPSHLDVD, 1873 VPSHLDVQ, 1873 VPSHRD, 1876 VPSHRDW, 1876 VPSHRDD, 1876 VPSHRDQ, 1876 VPSHRDV, 1879 VPSHRDVW, 1879 VPSHRDVD, 1879 VPSHRDVQ, 1879 VPSHUFBITQMB, 1882 VPSLLVW, 1883 VPSLLVD, 1883 VPSLLVQ, 1883 VPSRAVW, 1888 VPSRAVD, 1888 VPSRAVQ, 1888 VPSRLVW, 1893 VPSRLVD, 1893 VPSRLVQ, 1893 VPTERNLOGD, 1898 VPTERNLOGQ, 1898 VPTESTMB, 1901 VPTESTMW, 1901 VPTESTMD, 1901 VPTESTMQ, 1901 VPTESTNMB, 1904 VPTESTNMW, 1904 VPTESTNMD, 1904 VPTESTNMQ, 1904 VRANGEPD, 1908 VRANGEPS, 1913 VRANGESD, 1917 VRANGESS, 1920 VRCP14PD, 1923 VRCP14SD, 1925 VRCP14PS, 1927 VRCP14SS, 1929 VRCP28PD, 2108 VRCP28SD, 2110 VRCP28PS, 2112 VRCP28SS, 2114 VREDUCEPD, 1931 VREDUCESD, 1934 VREDUCEPS, 1936 VREDUCESS, 1938 VRNDSCALEPD, 1940 VRNDSCALESD, 1944 VRNDSCALEPS, 1946 VRNDSCALESS, 1949 VRSQRT14PD, 1951 VRSQRT14SD, 1953 VRSQRT14PS, 1955 VRSQRT14SS, 1957 VRSQRT28PD, 2116 VRSQRT28SD, 2118 VRSQRT28PS, 2120 VRSQRT28SS, 2122 VSCALEFPD, 1959 VSCALEFSD, 1962 VSCALEFPS, 1964 VSCALEFSS, 1967 VSCATTERDPS, 1969 VSCATTERDPD, 1969 VSCATTERQPS, 1969 VSCATTERQPD, 1969 VSCATTERPFODPS, 2124 VSCATTERPFOQPS, 2124 VSCATTERPFODPD, 2124 VSCATTERPFOQPD, 2124 VSCATTERPF1DPS, 2126 VSCATTERPF1QPS, 2126 VSCATTERPF1DPD, 2126 VSCATTERPF1QPD, 2126 VSHUFF32X4, 1973 VSHUFF64X2, 1973 VSHUFI32X4, 1973 VSHUFI64X2, 1973 VTESTPD, 1978 VTESTPS, 1978 VZEROALL, 1981 VZEROUPPER, 1982 WAIT, 1983 FWAIT, 1983 WBINVD, 1984 WRFSBASE, 1986 WRGSBASE, 1986 WRMSR, 1988 WRPKRU, 1990 WRSSD, 1991 WRSSQ, 1991 WRUSSD, 1993 WRUSSQ, 1993 XACQUIRE, 1995 XRELEASE, 1995 XABORT, 1999 XADD, 2001 XBEGIN, 2003 XCHG, 2006 XEND, 2008 XGETBV, 2010 XLAT, 2012 XLATB, 2012 XOR, 2014 XORPD, 2016 VXORPD, 2016 XORPS, 2019 VXORPS, 2019 XRSTOR, 2022 XRSTOR64, 2022 XSTORS, 2027 XSTORS64, 2027 XSAVE, 2031 XSAVE64, 2031 XSAVEC, 2034 XSAVEC64, 2034 XSAVEOPT, 2037 XSAVEOPT64, 2037 XSAVES, 2040 XSAVES64, 2040 XSETBV, 2043 XTEST, 2045 @24594.pdf [AMD64 Architecture Programmer's Manual Volume 3: General-Purpose and System Instructions, Rev 3.32 March 2021 (24594)] BLCFILL, 135 BLCI, 137 BLCIC, 139 BLCMSK, 141 BLCS, 143 BLSFILL, 145 BLSIC, 149 CLZERO, 190 LLWPCB, 255 LOOPNZ, 260 LOOPZ, 260 LWPINS, 262 LWPVAL, 264 MONITORX, 271 MWAITX, 297 PREFETCH, 323 SLWPCB, 380 T1MSKC, 388 TZMSK, 394 UD0, 396 UD1, 396 UD2, 396 CLGI, 414 INVLPGA, 428 SKINIT, 496 STGI, 504 VMLOAD, 524 VMMCALL, 526 VMRUN, 527 VMSAVE, 532 @26568.pdf [AMD64 Architecture Programmer's Manual Volume 4: 128-Bit and 256-Bit Media Instructions, Rev 3.24 May 2020 (26568)] EXTRQ, 181 INSERTQ, 196 MOVNTSD, 260 MOVNTSS, 262 VPHSUBD, 388 VPOR, 474 VSTMXCSR, 615 VBROADCASTI128, 639 VFMADDPD, 656 VFMADDPS, 659 VFMADDSD, 662 VFMADDSS, 665 VFMADDSUBPD, 668 VFMADDSUBPS, 671 VFMADDPD, 674 VFMADDPS, 677 VFMSUBPD, 680 VFMSUBPS, 683 VFMSUBSD, 686 VFMSUBSS, 689 VFNMADDPD, 692 VFNMADDPS, 695 VFNMADDSD, 698 VFNMADDSS, 701 VFNMSUBPD, 704 VFNMSUBPS, 707 VFNMSUBSD, 710 VFNMSUBSS, 713 VFRCZPD, 716 VFRCZPS, 718 VFRCZSD, 720 VFRCZSS, 722 VPCMOV, 750 VPCOMB, 752 VPCOMD, 754 VPCOMQ, 756 VPCOMUB, 758 VPCOMUD, 760 VPCOMUQ, 762 VPCOMUW, 764 VPCOMW, 766 VPERMIL2PD, 774 VPERMIL2PS, 778 VPHADDBD, 803 VPHADDBQ, 805 VPHADDBW, 807 VPHADDDQ, 809 VPHADDUBD, 811 VPHADDUBQ, 813 VPHADDUBW, 815 VPHADDUDQ, 817 VPHADDUWD, 819 VPHADDUWQ, 821 VPHADDWD, 823 VPHADDWQ, 825 VPHSUBBW, 827 VPHSUBDQ, 829 VPHSUBWD, 831 VPMACSDD, 833 VPMACSDQH, 835 VPMACSDQL, 837 VPMACSSDD, 839 VPMACSSDQH, 841 VPMACSSDQL, 843 VPMACSSWD, 845 VPMACSSWW, 847 VPMACSWD, 849 VPMACSWW, 851 VPMADCSSWD, 853 VPMADCSWD, 855 VPPERM, 861 VPROTB, 863 VPROTD, 865 VPROTQ, 867 VPROTW, 869 VPSHAB, 871 VPSHAD, 873 VPSHAQ, 875 VPSHAW, 877 VPSHLB, 879 VPSHLD, 881 VPSHLQ, 883 VPSHLW, 885 @26569_APM_V5.pdf [AMD64 Architecture Programmer's Manual Volume 5: 64-Bit Media and x87 Floating-Point Instructions, Rev 3.15 May 2018 (26569)] FEMMS, 48 PAVGUSB, 100 PF2ID, 118 PF2IW, 120 PFACC, 122 PFADD, 124 PFCMPEQ, 126 PFCMPGE, 128 PFCMPGT, 131 PFMAX, 133 PFMIN, 135 PFMUL, 137 PFNACC, 139 PFPNACC, 142 PFRCP, 145 PFRCPIT1, 148 PFRCPIT2, 151 PFRSQIT1, 154 PFRSQRT, 157 PFSUB, 160 PFSUBR, 162 PI2FD, 164 PI2FW, 166 PMULHRW, 182 PSWAPD, 231 @AMD64_128-bit_SSE5_Instructions.pdf [AMD64 Technology 128-Bit SSE5 Instruction Set, Rev 3.01 August 2007 (43479)] COMPD, 32 COMPS, 35 COMSD, 38 COMSS, 42 CVTPH2PS, 45 CVTPS2PH, 47 FMADDPD, 50 FMADDPS, 53 FMADDSD, 56 FMADDSS, 59 FMSUBPD, 62 FMSUBPS, 65 FMSUBSD, 68 FMSUBSS, 71 FNMADDPD, 74 FNMADDPS, 77 FNMADDSD, 80 FNMADDSS, 83 FNMSUBPD, 86 FNMSUBPS, 89 FNMSUBSD, 92 FNMSUBSS, 95 FRCZPD, 98 FRCZPS, 100 FRCZSD, 102 FRCZSS, 104 PCMOV, 106 PCOMB, 109 PCOMD, 112 PCOMQ, 115 PCOMUB, 118 PCOMUD, 121 PCOMUQ, 124 PCOMUW, 127 PCOMW, 130 PERMPD, 133 PERMPS, 137 PHADDBD, 141 PHADDBQ, 143 PHADDBW, 145 PHADDDQ, 147 PHADDUBD, 149 PHADDUBQ, 151 PHADDUBW, 153 PHADDUDQ, 155 PHADDUWD, 157 PHADDUWQ, 159 PHADDWD, 161 PHADDWQ, 163 PHSUBBW, 165 PHSUBBQ, 167 PHSUBWD, 169 PMACSDD, 171 PMACSDQH, 174 PMACSDQL, 177 PMACSSDD, 180 PMACSSDQH, 183 PMACSSDQL, 186 PMACSSWD, 189 PMACSSWW, 192 PMACSWD, 195 PMACSWW, 198 PMADCSSWD, 201 PMADCSWD, 204 PPERM, 207 PROTB, 211 PROTD, 214 PROTQ, 217 PROTW, 220 PSHAB, 223 PSHAD, 225 PSHAQ, 227 PSHAW, 229 PSHLB, 231 PSHLD, 233 PSHLQ, 236 PSHLW, 238 @326019-074.pdf [Intel 64 and IA-32 Architectures Software Developer's Manual, Volume 3C: System Programming Guide, Part 3, 326019-074US April 2021] INVEPT, 157 INVVPID, 160 VMCALL, 163 VMCLEAR, 165 VMFUNC, 167 VMLAUNCH, 168 VMRESUME, 168 VMPTRLD, 171 VMPTRST, 173 VMREAD, 175 VMWRITE, 178 VMXOFF, 180 VMXON, 182 ================================================ FILE: pypcode/processors/x86/data/patterns/patternconstraints.xml ================================================ x86win_patterns.xml x86win_patterns.xml x86delphi_patterns.xml x86gcc_patterns.xml x86-64win_patterns.xml x86-64gcc_patterns.xml x86-16_default_patterns.xml x86-16_default_patterns.xml ================================================ FILE: pypcode/processors/x86/data/patterns/prepatternconstraints.xml ================================================ x86win_prepatterns.xml x86win_prepatterns.xml x86gcc_prepatterns.xml x86gcc_prepatterns.xml ================================================ FILE: pypcode/processors/x86/data/patterns/x86-16_default_patterns.xml ================================================ 0xc9c3 0xc9cb 0x4dcb 0.011... 1100.011 1100.010 .0.....0 0x00 1100.010 .0.....0 0x00 0x90 0xc390 0xcb90 0xc3 0x558bec 0x5589e5 0xc8 000....0 0x0000 0x8cd89045 0x8cd055 ================================================ FILE: pypcode/processors/x86/data/patterns/x86-64gcc_patterns.xml ================================================ 0x90 0x90 0xc3 0x90 0x6690 0xc9 0xc3 0xe9........ 0xe9........90 0xeb.. 0xeb..90 0x5d 0xc3 0x5b 0xc3 0x41 010111.. 0xc3 0x31c0 0xc3 0x4883c4 ....1000 0xc3 0x666690 0x0f1f00 0x0f1f4000 0x0f1f440000 0x660f1f440000 0x0f1f8000000000 0x0f1f840000000000 0x660f1f840000000000 0x48 0x89 0x5c 0x24 11...000 0x48 0x89 0x6c 0x24 11...000 0x48 0x89 0x5c 0x24 11...000 0x4c 0x89 0x64 0x24 111..000 0x48 0x89 0x6c 0x24 11...000 0x4c 0x89 0x64 0x24 111..000 0x5589e5 0x554889e5 0x534889fb 0x554889fd 0x534889fb 0x53 0x48 0x83 0xec 0....000 0x53 0x48 0x81 0xec .....000 00...... 0x00 0x55 0x48 0x89 0xe5 0x48 100000.1 0xec .....000 0x554889e553 0x554889fd53 0x554889e548897df8 0x53 0x48 0x89 0xfb 0xe8 ........ ........ 0xff 0xff 0x4154 0x55 0100100. 0x89 11...... 0x4154 0x55 0x53 0100100. 0x89 11...... 0x415741564155 0x41564155 0x41554154 0x41 010101.. 0100100. 0x89 11...... 0x55 0xf3 0x0f 0x1e 0xfa 0x48 0x89 0x5c 0x24 11...000 0x48 0x89 0x6c 0x24 11...000 0xf3 0x0f 0x1e 0xfa 0x48 0x89 0x5c 0x24 11...000 0x4c 0x89 0x64 0x24 111..000 0xf3 0x0f 0x1e 0xfa 0x48 0x89 0x6c 0x24 11...000 0x4c 0x89 0x64 0x24 111..000 0xf3 0x0f 0x1e 0xfa 0x5589e5 0xf3 0x0f 0x1e 0xfa 0x554889e5 0xf3 0x0f 0x1e 0xfa 0x534889fb 0xf3 0x0f 0x1e 0xfa 0x554889fd 0xf3 0x0f 0x1e 0xfa 0x534889fb 0xf3 0x0f 0x1e 0xfa 0x53 0x48 0x83 0xec 0....000 0xf3 0x0f 0x1e 0xfa 0x53 0x48 0x81 0xec .....000 00...... 0x00 0xf3 0x0f 0x1e 0xfa 0x55 0x48 0x89 0xe5 0x48 100000.1 0xec .....000 0xf3 0x0f 0x1e 0xfa 0x554889e553 0xf3 0x0f 0x1e 0xfa 0x554889fd53 0xf3 0x0f 0x1e 0xfa 0x554889e548897df8 0xf3 0x0f 0x1e 0xfa 0x53 0x48 0x89 0xfb 0xe8 ........ ........ 0xff 0xff 0xf3 0x0f 0x1e 0xfa 0x4154 0x55 0100100. 0x89 11...... 0xf3 0x0f 0x1e 0xfa 0x4154 0x55 0x53 0100100. 0x89 11...... 0xf3 0x0f 0x1e 0xfa 0x415741564155 0xf3 0x0f 0x1e 0xfa 0x41564155 0xf3 0x0f 0x1e 0xfa 0x41554154 0xf3 0x0f 0x1e 0xfa 0x41 010101.. 0100100. 0x89 11...... 0x55 0xf3 0x0f 0x1e 0xfa 0x41 010101.. 0x41 010101.. 0100100. 0x89 11...... 0x90 0x90 0xc3 0x90 0x6690 0xc9 0xc3 0xe9........ 0xe9........90 0xeb.. 0xeb..90 0x5d 0xc3 0x5b 0xc3 0x41 010111.. 0xc3 0x31c0 0xc3 0x4883c4 ....1000 0xc3 0x666690 0x0f1f00 0x0f1f4000 0x0f1f440000 0x660f1f440000 0x0f1f8000000000 0x0f1f840000000000 0x660f1f840000000000 0xf3 0x0f 0x1e 0xfa 0x5589e5 0x55 0x53 0100100. 0x89 11...... 0x4154 0x55 0100100. 0x89 11...... 0x4154 0x55 0x53 0100100. 0x89 11...... 0x53 0x48 0x83 0xec 0....000 0x48 0x83 0xec .....000 0x48 0x81 0xec .....000 00...... 0x00 0x55 0x53 0x48 0x83 100000.1 0xec .....000 0x554889e5 0x55 0x48 0x89 0xe5 0x48 100000.1 0xec .....000 0x554889e553 0x4157 0x4156 0x4155 0x4157 0x4156 0x4156 0x4155 0x41554154 0x41 010101.. 0100100. 0x89 11...... 0x55 0x41 010101.. 0x41 010101.. 0100100. 0x89 11...... 0x41 010101.. 0x41 010101.. 0100100. 0x89 11...... ================================================ FILE: pypcode/processors/x86/data/patterns/x86-64win_patterns.xml ================================================ 0x909090 * 0x4883ec 0x909090 * 0x4889 01...100..100100 0x90 * 0x4889 01...100..100100 0x..4889 0x90 * 0x4889 01...100..100100 ......00 01010...01010... 0xc3 * 0x4889 01...100..100100 ......00 01010...01010... 0xc3 * 0x55488d2c24 0x90 * 0x55488d2c24 0x90 * 01010... 0x55488d2c24 0xc3 * 01010... 0x55488d2c24 0x909090 * 0x488bc4 0x90 * 0xfff54883ec 0x90 * 0x554883ec 0xc3 * 0x554883ec 0xcccccc * 0x4883ec 0xcccccc * 0x4889 01...100..100100 0xcccccc * 0x488bc4 0xcc * 0xfff54883ec 0xcc * 0x40 01010... 0x4883ec 0xcc * 0x554883ec 0xcccccc * 0x4055488b 11101... 0xcc * 0x4c8b 11...100 0x4883ec 0xcccc * 0x4c8b 11...100 01001.01 0x89 01001... 0x3b 0x0d ........ ........ ........ ........ 0x75 0x10 01001... 0xc1 0xc1 0x10 0x66 0xf7 0xc1 0xff 0xff 0x75 0x01 0xc3 01001... 0xc1 0xc9 0x10 0xe9 ================================================ FILE: pypcode/processors/x86/data/patterns/x86delphi_patterns.xml ================================================ 0x9090 0xc390 0x5bc3 0xc2 0000..00 0x00 0xc2 0000..00 0x0090 0xe8 ........ ........ ........ ........ 0xc3 0xeb.. 0xe9........ 0x558bec 0x538bd8 0x535657 0x53568bd8 0x53568bf0 0x535684d2 0x53 0x56 0x83 0xc4 1...1.00 0x558bec 0x538bd8 0x535657 0x53568bd8 0x53568bf0 0x535684d2 0x53 0x56 0x83 0xc4 1...1.00 ================================================ FILE: pypcode/processors/x86/data/patterns/x86gcc_patterns.xml ================================================ 0x5589e583ec 0x5589e581ec....0000 0x5589e5..83ec 0x5589e5....83ec 0x5589e5 01010... 01010... 0x5589e58b 01...101 0x83 0xec 0.....00 100010.1 01...100 ..100100 0.....00 0x81 0xec ......00 0000.... 0x00 0x00 100010.1 01...100 ..100100 0.....00 0x5. 0x83 0xec 0.....00 100010.1 01...100 ..100100 0.....00 0x5. 0x81 0xec ......00 0000.... 0x00 0x00 0x5. 0x5. 100000.1 0xec ......00 0x5. 0x5. 0x5. 100000.1 0xec ......00 0x5. 0x5. 0x5. 0x5. 100000.1 0xec ......00 0x8b 0x04 0x24 0xc3 0x8b 0x1c 0x24 0xc3 0x8b 0x0c 0x24 0xc3 0x8b 0x14 0x24 0xc3 0x8b 0x34 0x24 0xc3 0x90 0xc3 0xe9........ 0xeb.. 0x89f6 0x8d7600 0x8d742600 0x8db600000000 0x8dbf00000000 0x8dbc2700000000 0x8db42600000000 0x5589e5 0x8d 0x4c ..100100 0x04 0x83 0xe4 0xf. 0x57 0x8d 0x7c ..100100 0x08 0x83 0xe4 0xf. 0xf3 0x0f 0x1e 0xfb 0x5589e5 0xf3 0x0f 0x1e 0xfb 0x8d 0x4c ..100100 0x04 0x83 0xe4 0xf. 0xf3 0x0f 0x1e 0xfb 0x57 0x8d 0x7c ..100100 0x08 0x83 0xe4 0xf. 0x90 0xc3 0xe9........ 0xeb.. 0x89f6 0x8d7600 0x8d742600 0x8db600000000 0x8dbf00000000 0x8dbc2700000000 0x8db42600000000 0xf3 0x0f 0x1e 0xfb ================================================ FILE: pypcode/processors/x86/data/patterns/x86gcc_prepatterns.xml ================================================ 0xff25........ 0x68......00 0xe9......ff 0xf3 0x0f 0x1e 0xfa 0xf2 0xff 0x25 ================================================ FILE: pypcode/processors/x86/data/patterns/x86win_patterns.xml ================================================ 0xcc 0xcccc 0x90 0xc3 0xc9c3 0xc2 ......00 0x00 0x558bec 0x83ec 0.....00 0x6aff68........64a100000000 0x568bf1 0xb8........e8........ 100000.1 0xec 0xb8........e8 0x8bff558bec 0x538b 110110.. 0x535657 0x535556 0x535651 0x53568bf2 0x53568bd8 0x53568bf1 0x53568bda 0x53568bf0 0x56578bf9 0x56578bf1 0xe9........ 0xeb.. 0x558bec 0x568bf1 0xb8........e8........ 100000.1 0xec 0xb8........e8 0x8bff558bec 0x558bec 0x8bff558bec 0xcccc 0xcccccc 0xcccccccc 0xcccccc 0x6a.. 0x68........ 0xe8 0xcc 0xcccc 0x90 0xc3 0xc9c3 0xc2 ......00 0x00 0xe9........ 0xeb.. 01010... 0x8b 01...100 ..100100 000...00 0x518d4c24042bc81bc0f7d023c88bc42500f0ffff3bc8720a8bc159948b00890424c32d001000008500ebe9 0x518d4c24082bc883e10f03c11bc90bc159e9........ 0x518d4c24082bc883e10703c11bc90bc159e9........ 0x8b4df4 0x64890d 0x00000000 0x59 0x5f 0x5f 0x5e 0x5b 0x8be5 0x5d 0x51 0xc3 0x8b4df0 0x64890d 0x00000000 0x59 0x5f 0x5f 0x5e 0x5b 0x8be5 0x5d 0x51 0xc3 0xcc 0x3b 0x0d 0x.. 0x.. 0x.. 0x.. 0x75 0x01 0xc3 0xe9 ================================================ FILE: pypcode/processors/x86/data/patterns/x86win_prepatterns.xml ================================================ 0x8bff 0x55 0x8bec 0x83ec20 0x8b4508 0x56 0x57 0x6a08 0x59 0xbe........ 0x8d7de0 0xf3a5 0x8945f8 0x8b450c 0x5f 0x8945fc 0x5e 0x85c0 0x740c 0xf60008 0x7407 0xc745f4........ 0x8d45f4 0x50 0xff75f0 0xff75e4 0xff75e0 0xff15........ 0xc9 0xc20800 ================================================ FILE: pypcode/py.typed ================================================ ================================================ FILE: pypcode/pypcode_native.cpp ================================================ #include #include #include #include #include #include #include #include #include #include #include "sleigh/error.hh" #include "sleigh/loadimage.hh" #include "sleigh/opcodes.hh" #include "sleigh/sleigh.hh" #include "sleigh/space.hh" #include "sleigh/translate.hh" #include "sleigh/xml.hh" namespace nb = nanobind; using namespace nb::literals; using namespace ghidra; const char * #include "__version__.py" ; // #define DEBUG 1 #ifndef DEBUG #define DEBUG 0 #endif #if DEBUG #define LOG(fmt, ...) fprintf(stderr, "pypcode_native: " fmt "\n", ##__VA_ARGS__); #else #define LOG(fmt, ...) \ do { \ } while (0) #endif #define MIN(x, y) ((x) < (y) ? (x) : (y)) struct PcodeOp { OpCode m_opcode; std::optional m_output; std::vector m_inputs; }; class ContextPypcode : public ContextInternal { bool m_finalized; std::unordered_set m_variables; public: ContextPypcode() : ContextInternal() { m_finalized = false; } void finalize() { m_finalized = true; } virtual void registerVariable(const string &nm, int4 sbit, int4 ebit) { if (!m_finalized) { ContextInternal::registerVariable(nm, sbit, ebit); m_variables.insert(nm); } } void resetAllVariables() { for (const string &nm : m_variables) { auto val = ContextDatabase::getDefaultValue(nm); setVariableRegion(nm, Address(Address::m_minimal), Address(), val); } } }; class SimpleLoadImage : public LoadImage { uintb m_baseaddr; int4 m_length; const unsigned char *m_data; public: SimpleLoadImage() : LoadImage("nofile") { m_baseaddr = 0; m_data = NULL; m_length = 0; } void setData(uintb ad, const unsigned char *ptr, int4 sz) { m_baseaddr = ad; m_data = ptr; m_length = sz; } void loadFill(uint1 *ptr, int4 size, const Address &addr) { LOG("Filling %d bytes at %lx", size, addr.getOffset()); uintb start = addr.getOffset(); uintb max = m_baseaddr + m_length - 1; // // When decoding an instruction, SLEIGH will attempt to pull in several // bytes at a time, starting at each instruction boundary. // // If the start address is outside of the defined range, bail out. // Otherwise, if we have some data to provide but cannot satisfy the // entire request, fill the remainder of the buffer with zero. // if (start > max || start < m_baseaddr) { throw std::out_of_range("Attempting to lift outside buffer range"); } for (int4 i = 0; i < size; i++) { uintb curoff = start + i; if ((curoff < m_baseaddr) || (curoff > max)) { ptr[i] = 0; continue; } uintb diff = curoff - m_baseaddr; ptr[i] = m_data[(int4)diff]; } } virtual string getArchType(void) const { return "myload"; } virtual void adjustVma(long adjust) { } }; class PcodeEmitCacher : public PcodeEmit { public: std::vector m_ops; bool m_bb_terminating_op_emitted; PcodeEmitCacher() : m_bb_terminating_op_emitted(false) { m_ops.reserve(512); } // Encode P-code ops into csleigh structures and append them to the translation buffer void dump(const Address &addr, OpCode opc, VarnodeData *outvar, VarnodeData *invars, int4 num_invars) { LOG("Emitting pcode op %d with %d-in,%d-out varnodes from %lx", opc, num_invars, outvar ? 1 : 0, addr.getOffset()); m_bb_terminating_op_emitted |= opc == CPUI_BRANCH || opc == CPUI_CBRANCH || opc == CPUI_BRANCHIND || opc == CPUI_RETURN || opc == CPUI_CALL || opc == CPUI_CALLIND; m_ops.emplace_back(); PcodeOp &op = m_ops.back(); op.m_opcode = opc; if (outvar) { op.m_output.emplace(*outvar); } op.m_inputs.reserve(num_invars); for (int i = 0; i < num_invars; i++) { op.m_inputs.emplace_back(invars[i]); } } }; struct DisassemblyInstruction { Address m_addr; uint64_t m_length; std::string m_mnem; std::string m_body; }; class AssemblyEmitCacher : public AssemblyEmit { public: DisassemblyInstruction &m_disas; AssemblyEmitCacher(DisassemblyInstruction &disas) : m_disas(disas) { } void dump(const Address &addr, const std::string &mnem, const std::string &body) { m_disas.m_addr = addr; m_disas.m_mnem = mnem; m_disas.m_body = body; }; }; class Disassembly { public: std::vector m_instructions; Disassembly() { LOG("Disassembly %p created", this); } Disassembly(Disassembly &&o) noexcept : m_instructions(std::move(o.m_instructions)) { LOG("Disassembly moved from %p to %p", &o, this); } ~Disassembly() { LOG("Disassembly %p released", this); } }; class Translation { public: std::vector m_ops; Translation() { LOG("Translation %p created", this); } Translation(Translation &&o) noexcept : m_ops(std::move(o.m_ops)) { LOG("Translation moved from %p to %p", &o, this); } ~Translation() { LOG("Translation %p released", this); } }; enum TranslateFlags { BB_TERMINATING = 1, }; class Context { public: SimpleLoadImage m_loader; ContextPypcode m_context_db; DocumentStorage m_document_storage; Document *m_document; Element *m_tags; std::unique_ptr m_sleigh; Context(const std::string &path) { LOG("Context %p created", this); // FIXME: Globals... AttributeId::initialize(); ElementId::initialize(); LOG("%p Loading slafile...", this); istringstream sleighfilename(path); m_document = m_document_storage.parseDocument(sleighfilename); m_tags = m_document->getRoot(); m_document_storage.registerTag(m_tags); LOG("Setting up translator"); m_sleigh.reset(new Sleigh(&m_loader, &m_context_db)); m_sleigh->initialize(m_document_storage); m_context_db.finalize(); } ~Context() { LOG("Context %p released", this); } void reset(void) { m_sleigh.reset(new Sleigh(&m_loader, &m_context_db)); m_sleigh->initialize(m_document_storage); m_context_db.finalize(); } std::unique_ptr disassemble(const char *bytes, unsigned int num_bytes, uint64_t address, unsigned int max_instructions) { LOG("%p Disassembling bytes=%p, num_bytes=%d, address=%lx", this, bytes, num_bytes, address); std::unique_ptr disassembly(new Disassembly()); int num_instructions = 0; uint32_t offset = 0; m_sleigh->fastReset(); m_loader.setData(address, (const unsigned char *)bytes, num_bytes); disassembly->m_instructions.reserve(10); while ((offset < num_bytes) && (!max_instructions || (num_instructions < max_instructions))) { Address addr(m_sleigh->getDefaultCodeSpace(), address + offset); disassembly->m_instructions.emplace_back(); DisassemblyInstruction &ins = disassembly->m_instructions.back(); AssemblyEmitCacher asm_cache(ins); // Disassemble the next instruction. If an error occurs after successful disassembly of at least one // instruction, suppress the error and return the successful disassembly. If the caller attempts // disassembly again at the position where the error occurred, then propagate the error. try { ins.m_length = m_sleigh->printAssembly(asm_cache, addr); } catch (BadDataError &err) { if (offset) { disassembly->m_instructions.resize(num_instructions); break; } throw err; } num_instructions += 1; offset += ins.m_length; } return disassembly; } std::unique_ptr translate(const char *bytes, unsigned int num_bytes, uint64_t base_address, unsigned int max_instructions, uint32_t flags) { LOG("%p Translating bytes=%p, num_bytes=%d, base_address=0x%lx, max_instructions=%d flags=0x%x", this, bytes, num_bytes, base_address, max_instructions, flags); std::unique_ptr translation(new Translation); PcodeEmitCacher pcode_cache; uint32_t offset = 0; m_sleigh->fastReset(); m_loader.setData(base_address, (const unsigned char *)bytes, num_bytes); int num_instructions = 0; while ((offset < num_bytes) && (!max_instructions || (num_instructions < max_instructions))) { Address addr(m_sleigh->getDefaultCodeSpace(), base_address + offset); LOG("Lifting at 0x%lx+0x%x=0x%lx", base_address, offset, base_address + offset); int imark_idx = pcode_cache.m_ops.size(); pcode_cache.m_ops.emplace_back(); // Translate the next instruction. If an error occurs after successful translation of at least one // instruction, suppress the error and return the successful translation. If the caller attempts // translation again at the position where the error occurred, then propagate the error. uint32_t num_bytes_decoded = 0; try { num_bytes_decoded = m_sleigh->oneInstruction(pcode_cache, addr); } catch (BadDataError &err) { if (offset) { pcode_cache.m_ops.resize(imark_idx); break; } throw err; } catch (UnimplError &err) { if (offset) { pcode_cache.m_ops.resize(imark_idx); break; } throw err; } PcodeOp &imark_op = pcode_cache.m_ops[imark_idx]; imark_op.m_opcode = OpCode::CPUI_IMARK; // Add varnode to imark op for every decoded instruction in this translation for (int sum = 0; sum < num_bytes_decoded;) { imark_op.m_inputs.emplace_back(); VarnodeData &imark_vn = imark_op.m_inputs.back(); imark_vn.space = addr.getSpace(); imark_vn.offset = addr.getOffset() + sum; imark_vn.size = m_sleigh->instructionLength(addr); sum += imark_vn.size; num_instructions++; } if ((flags & TranslateFlags::BB_TERMINATING) && pcode_cache.m_bb_terminating_op_emitted) { LOG("Reached end of block"); break; } offset += num_bytes_decoded; } translation->m_ops = std::move(pcode_cache.m_ops); return translation; } }; NB_MODULE(pypcode_native, m) { m.attr("__version__") = __version__; m.doc() = "pypcode native extension providing machine code disassembly and translation to P-Code."; nb::exception(m, "LowlevelError"); // "The lowest level error" nb::exception(m, "BadDataError"); // "Exception for bad instruction data" nb::exception(m, "UnimplError"); // "Exception for encountering unimplemented P-Code" nb::exception(m, "DecoderError"); // "An exception thrown by the XML parser" nb::class_(m, "AddrSpace", "A region where processor data is stored.") .def_prop_ro( "name", [](AddrSpace &as) { return as.getName(); }, "name(self) -> str\nThe name of this address space."); nb::class_
(m, "Address", "Low level machine byte address.") .def_prop_ro( "space", [](Address &a) { return a.getSpace(); }, nb::rv_policy::reference_internal, "space(self) -> AddrSpace\nThe address space.") .def_prop_ro( "offset", [](Address &a) { return a.getOffset(); }, "offset(self) -> int\nThe offset within the space."); nb::class_(m, "Varnode", "Data defining a specific memory location.") .def(nb::init<>()) .def_ro("space", &VarnodeData::space, nb::rv_policy::reference_internal, "space(self) -> AddrSpace\nThe address space.") .def_ro("offset", &VarnodeData::offset, "offset(self) -> int\nThe offset within the space.") .def_ro("size", &VarnodeData::size, "size(self) -> int\nThe number of bytes in the location.") .def( "getRegisterName", [](VarnodeData &a) { return a.space->getTrans()->getRegisterName(a.space, a.offset, a.size); }, "Return the register name if this Varnode references a register, otherwise return the empty string.") .def( "getUserDefinedOpName", [](VarnodeData &a) { vector userops; a.space->getTrans()->getUserOpNames(userops); if (a.offset >= userops.size()) { throw std::out_of_range("index out of range"); } return userops[a.offset]; }, "Get the name of a user defined operation.") .def( "getSpaceFromConst", [](VarnodeData &a) { return a.getSpaceFromConst(); }, nb::rv_policy::reference_internal, "Recover encoded address space from constant value."); nb::enum_(m, "OpCode", "OpCode defining a specific p-code operation.") .value("IMARK", OpCode::CPUI_IMARK, "Instruction marker") .value("COPY", OpCode::CPUI_COPY, "Copy one operand to another") .value("LOAD", OpCode::CPUI_LOAD, "Load from a pointer into a specified address space") .value("STORE", OpCode::CPUI_STORE, "Store at a pointer into a specified address space") .value("BRANCH", OpCode::CPUI_BRANCH, "Always branch") .value("CBRANCH", OpCode::CPUI_CBRANCH, "Conditional branch") .value("BRANCHIND", OpCode::CPUI_BRANCHIND, "Indirect branch (jumptable)") .value("CALL", OpCode::CPUI_CALL, "Call to an absolute address") .value("CALLIND", OpCode::CPUI_CALLIND, "Call through an indirect address") .value("CALLOTHER", OpCode::CPUI_CALLOTHER, "User-defined operation") .value("RETURN", OpCode::CPUI_RETURN, "Return from subroutine") .value("INT_EQUAL", OpCode::CPUI_INT_EQUAL, "Integer comparison, equality (==)") .value("INT_NOTEQUAL", OpCode::CPUI_INT_NOTEQUAL, "Integer comparison, in-equality (!=)") .value("INT_SLESS", OpCode::CPUI_INT_SLESS, "Integer comparison, signed less-than (<)") .value("INT_SLESSEQUAL", OpCode::CPUI_INT_SLESSEQUAL, "Integer comparison, signed less-than-or-equal (<=)") .value("INT_LESS", OpCode::CPUI_INT_LESS, "Integer comparison, unsigned less-than (<)") .value("INT_LESSEQUAL", OpCode::CPUI_INT_LESSEQUAL, "Integer comparison, unsigned less-than-or-equal (<=)") .value("INT_ZEXT", OpCode::CPUI_INT_ZEXT, "Zero extension") .value("INT_SEXT", OpCode::CPUI_INT_SEXT, "Sign extension") .value("INT_ADD", OpCode::CPUI_INT_ADD, "Addition, signed or unsigned (+)") .value("INT_SUB", OpCode::CPUI_INT_SUB, "Subtraction, signed or unsigned (-)") .value("INT_CARRY", OpCode::CPUI_INT_CARRY, "Test for unsigned carry") .value("INT_SCARRY", OpCode::CPUI_INT_SCARRY, "Test for signed carry") .value("INT_SBORROW", OpCode::CPUI_INT_SBORROW, "Test for signed borrow") .value("INT_2COMP", OpCode::CPUI_INT_2COMP, "Twos complement") .value("INT_NEGATE", OpCode::CPUI_INT_NEGATE, "Logical/bitwise negation (~)") .value("INT_XOR", OpCode::CPUI_INT_XOR, "Logical/bitwise exclusive-or (^)") .value("INT_AND", OpCode::CPUI_INT_AND, "Logical/bitwise and (&)") .value("INT_OR", OpCode::CPUI_INT_OR, "Logical/bitwise or (|)") .value("INT_LEFT", OpCode::CPUI_INT_LEFT, "Left shift (<<)") .value("INT_RIGHT", OpCode::CPUI_INT_RIGHT, "Right shift, logical (>>)") .value("INT_SRIGHT", OpCode::CPUI_INT_SRIGHT, "Right shift, arithmetic (>>)") .value("INT_MULT", OpCode::CPUI_INT_MULT, "Integer multiplication, signed and unsigned (*)") .value("INT_DIV", OpCode::CPUI_INT_DIV, "Integer division, unsigned (/)") .value("INT_SDIV", OpCode::CPUI_INT_SDIV, "Integer division, signed (/)") .value("INT_REM", OpCode::CPUI_INT_REM, "Remainder/modulo, unsigned (%)") .value("INT_SREM", OpCode::CPUI_INT_SREM, "Remainder/modulo, signed (%)") .value("BOOL_NEGATE", OpCode::CPUI_BOOL_NEGATE, "Boolean negate (!)") .value("BOOL_XOR", OpCode::CPUI_BOOL_XOR, "Boolean exclusive-or (^^)") .value("BOOL_AND", OpCode::CPUI_BOOL_AND, "Boolean and (&&)") .value("BOOL_OR", OpCode::CPUI_BOOL_OR, "Boolean or (||)") .value("FLOAT_EQUAL", OpCode::CPUI_FLOAT_EQUAL, "Floating-point comparison, equality (==)") .value("FLOAT_NOTEQUAL", OpCode::CPUI_FLOAT_NOTEQUAL, "Floating-point comparison, in-equality (!=)") .value("FLOAT_LESS", OpCode::CPUI_FLOAT_LESS, "Floating-point comparison, less-than (<)") .value("FLOAT_LESSEQUAL", OpCode::CPUI_FLOAT_LESSEQUAL, "Floating-point comparison, less-than-or-equal (<=)") .value("FLOAT_NAN", OpCode::CPUI_FLOAT_NAN, "Not-a-number test (NaN)") .value("FLOAT_ADD", OpCode::CPUI_FLOAT_ADD, "Floating-point addition (+)") .value("FLOAT_DIV", OpCode::CPUI_FLOAT_DIV, "Floating-point division (/)") .value("FLOAT_MULT", OpCode::CPUI_FLOAT_MULT, "Floating-point multiplication (*)") .value("FLOAT_SUB", OpCode::CPUI_FLOAT_SUB, "Floating-point subtraction (-)") .value("FLOAT_NEG", OpCode::CPUI_FLOAT_NEG, "Floating-point negation (-)") .value("FLOAT_ABS", OpCode::CPUI_FLOAT_ABS, "Floating-point absolute value (abs)") .value("FLOAT_SQRT", OpCode::CPUI_FLOAT_SQRT, "Floating-point square root (sqrt)") .value("FLOAT_INT2FLOAT", OpCode::CPUI_FLOAT_INT2FLOAT, "Convert an integer to a floating-point") .value("FLOAT_FLOAT2FLOAT", OpCode::CPUI_FLOAT_FLOAT2FLOAT, "Convert between different floating-point sizes") .value("FLOAT_TRUNC", OpCode::CPUI_FLOAT_TRUNC, "Round towards zero") .value("FLOAT_CEIL", OpCode::CPUI_FLOAT_CEIL, "Round towards +infinity") .value("FLOAT_FLOOR", OpCode::CPUI_FLOAT_FLOOR, "Round towards -infinity") .value("FLOAT_ROUND", OpCode::CPUI_FLOAT_ROUND, "Round towards nearest") .value("MULTIEQUAL", OpCode::CPUI_MULTIEQUAL, "Phi-node operator") .value("INDIRECT", OpCode::CPUI_INDIRECT, "Copy with an indirect effect") .value("PIECE", OpCode::CPUI_PIECE, "Concatenate") .value("SUBPIECE", OpCode::CPUI_SUBPIECE, "Truncate") .value("CAST", OpCode::CPUI_CAST, "Cast from one data-type to another") .value("PTRADD", OpCode::CPUI_PTRADD, "Index into an array ([])") .value("PTRSUB", OpCode::CPUI_PTRSUB, "Drill down to a sub-field (->)") .value("SEGMENTOP", OpCode::CPUI_SEGMENTOP, "Look-up a segmented address") .value("CPOOLREF", OpCode::CPUI_CPOOLREF, "Recover a value from the constant pool") .value("NEW", OpCode::CPUI_NEW, "Allocate a new object (new)") .value("INSERT", OpCode::CPUI_INSERT, "Insert a bit-range") .value("EXTRACT", OpCode::CPUI_EXTRACT, "Extract a bit-range") .value("POPCOUNT", OpCode::CPUI_POPCOUNT, "Count the 1-bits") .value("LZCOUNT", OpCode::CPUI_LZCOUNT, "Count the leading 0-bits"); nb::class_(m, "PcodeOp", "Low-level representation of a single P-Code operation.") .def_ro("opcode", &PcodeOp::m_opcode, "opcode(self) -> OpCode\nOpcode for this operation.") .def_ro("output", &PcodeOp::m_output, "output(self) -> Optional[Varnode]\nOutput varnode for this operation.") .def_ro("inputs", &PcodeOp::m_inputs, "inputs(self) -> List[Varnode]\nInput varnodes for this operation."); nb::class_(m, "Translation", "P-Code translation.") .def_ro("ops", &Translation::m_ops, "ops(self) -> List[PcodeOp]\nThe translated sequence of P-Code ops."); nb::class_(m, "Instruction", "Disassembled machine code instruction.") .def_ro("addr", &DisassemblyInstruction::m_addr, "addr(self) -> Address\nAddress of this instruction.") .def_ro( "length", &DisassemblyInstruction::m_length, "length(self) -> int\nLength, in bytes, of this instruction.") .def_ro("mnem", &DisassemblyInstruction::m_mnem, "mnem(self) -> str\nMnemonic string of this instruction.") .def_ro("body", &DisassemblyInstruction::m_body, "body(self) -> str\nOperand string of this instruction."); nb::class_(m, "Disassembly", "Machine Code Disassembly.") .def_ro("instructions", &Disassembly::m_instructions, "instructions(self) -> List[Instruction]\nThe disassembled instructions."); m.attr("TRANSLATE_FLAGS_BB_TERMINATING") = static_cast(TranslateFlags::BB_TERMINATING); nb::class_(m, "Context", "Context for machine code translation and disassembly.") .def(nb::init()) .def( "disassemble", [](Context &t, nb::bytes buf, uint64_t base_address, uint64_t offset, uint64_t max_bytes, uint64_t max_instructions) { if (offset >= buf.size()) { throw std::out_of_range("offset out of range"); } uint64_t available_bytes = buf.size() - offset; max_bytes = max_bytes ? MIN(max_bytes, available_bytes) : available_bytes; return t.disassemble(buf.c_str() + offset, max_bytes, base_address, max_instructions); }, "buf"_a, "base_address"_a = 0, "offset"_a = 0, "max_bytes"_a = 0, "max_instructions"_a = 0, nb::keep_alive<0, 1>(), R"(Disassemble and format machine code as assembly code. .. ipython:: In [0]: import pypcode ...: ctx = pypcode.Context("x86:LE:64:default") ...: dx = ctx.disassemble(b"\x48\x35\x78\x56\x34\x12\xc3") ...: for ins in dx.instructions: ...: print(f"{ins.addr.offset:#x}/{ins.length}: {ins.mnem} {ins.body}") 0x0/6: XOR RAX,0x12345678 0x6/1: RET Instructions are decoded from ``buf`` and formatted in :class:`.Instruction` s: * the end of the buffer is reached, * ``max_bytes`` or ``max_instructions`` is reached, or * an exception occurs. If an exception occurs following successful disassembly of at least one instruction, the exception is discarded and the successful disassembly is returned. If the exception occurs at disassembly of the first instruction, it will be raised. See below for possible exceptions. Args: buf (bytes): Machine code to disassemble. base_address (int): Base address of the code at offset being decoded, 0 by default. offset (int): Offset into ``bytes`` to begin disassembly, 0 by default. max_bytes (int): Maximum number of bytes to disassemble, or 0 for no limit (default). max_instructions (int): Maximum number of instructions to disassemble, or 0 for no limit (default). Returns: Disassembly: The disassembled machine code. Instructions are accessible through :attr:`.Disassembly.instructions`. Raises: BadDataError: The instruction at ``base_address`` could be decoded. )") .def( "getAllRegisters", [](Context &t) { map regmap; t.m_sleigh->getAllRegisters(regmap); return regmap; }, "Get a mapping of all register locations to their corresponding names.") .def( "getRegisterName", [](Context &t, AddrSpace *space, uint64_t offset, uint32_t size) { return t.m_sleigh->getRegisterName(space, offset, size); }, "space"_a, "offset"_a, "size"_a, R"(Get the name of a register. Args: space (AddrSpace): The address space. offset (int): Offset within the address space. size (int): Size of the register, in bytes. Returns: str: The register name, or the empty string if the register could not be identified. )") .def("reset", &Context::reset, "Reset the context.") .def( "setVariableDefault", [](Context &t, const std::string &name, uint32_t value) { t.m_context_db.setVariableDefault(name, value); }, "name"_a, "value"_a, "Provide a default value for a context variable.") .def( "translate", [](Context &t, nb::bytes buf, uint64_t base_address, uint64_t offset, uint64_t max_bytes, uint64_t max_instructions, uint64_t flags) { if (offset >= buf.size()) { throw std::out_of_range("offset out of range"); } uint64_t available_bytes = buf.size() - offset; max_bytes = max_bytes ? MIN(max_bytes, available_bytes) : available_bytes; return t.translate(buf.c_str() + offset, max_bytes, base_address, max_instructions, flags); }, "buf"_a, "base_address"_a = 0, "offset"_a = 0, "max_bytes"_a = 0, "max_instructions"_a = 0, "flags"_a = 0, nb::keep_alive<0, 1>(), R"(Translate machine code to P-Code. .. ipython:: In [0]: import pypcode ...: ctx = pypcode.Context("x86:LE:64:default") ...: tx = ctx.translate(b"\x48\x35\x78\x56\x34\x12\xc3") # xor rax, 0x12345678; ret ...: print(tx) IMARK ram[0:6] CF = 0x0 OF = 0x0 RAX = RAX ^ 0x12345678 SF = RAX s< 0x0 ZF = RAX == 0x0 unique[28080:8] = RAX & 0xff unique[28100:1] = popcount(unique[28080:8]) unique[28180:1] = unique[28100:1] & 0x1 PF = unique[28180:1] == 0x0 IMARK ram[6:1] RIP = *[ram]RSP RSP = RSP + 0x8 return RIP Instructions are decoded from ``buf`` and translated to a sequence of :class:`.PcodeOp` s until: * the end of the buffer is reached, * ``max_bytes`` or ``max_instructions`` is reached, * if the ``BB_TERMINATING`` flag is set, an instruction which performs a branch is encountered, or * an exception occurs. A :class:`.PcodeOp` with opcode :attr:`OpCode.IMARK` is used to identify machine instructions corresponding to a translation. :attr:`OpCode.IMARK` ops precede the corresponding P-Code translation, and will have one or more input :class:`.Varnode` s identifying the address and length in bytes of the source machine instruction(s). The number of input :class:`.Varnode` s depends on the number of instructions that were decoded for the translation of the particular instruction. On architectures with branch delay slots, the effects of the delay slot instructions will be included in the translation of the branch instruction. For this reason, it is possible that more instructions than specified in ``max_instructions`` may be translated. The :attr:`OpCode.IMARK` op identifying the branch instruction will contain an input :class:`.Varnode` corresponding to the branch instruction, with additional input :class:`.Varnode` identifying corresponding delay slot instructions. If an exception occurs following successful translation of at least one instruction, the exception is discarded and the successful translation is returned. If the exception occurs during translation of the first instruction, the exception will be raised. See below for possible exceptions. Args: buf (bytes): Machine code to translate. base_address (int): Base address of the code at offset being decoded. offset (int): Offset into ``bytes`` to begin translation. max_bytes (int): Maximum number of bytes to translate. max_instructions (int): Maximum number of instructions to translate. flags (int): Flags controlling translation. See :class:`.TranslateFlags`. Returns: Translation: The P-Code translation of the input machine code. P-Code ops are accessible through :attr:`.Translation.ops`. Raises: BadDataError: The instruction at ``base_address`` could not be decoded. UnimplError: The P-Code for instruction at ``base_address`` is not yet implemented. )"); } ================================================ FILE: pypcode/pypcode_native.pyi ================================================ from typing import Any, ClassVar, List, Optional __version__: str TRANSLATE_FLAGS_BB_TERMINATING: int class BadDataError(Exception): ... class DecoderError(Exception): ... class LowlevelError(Exception): ... class UnimplError(Exception): ... class AddrSpace: def __init__(self, *args, **kwargs) -> None: ... @property def name(self) -> str: ... class Address: def __init__(self, *args, **kwargs) -> None: ... @property def offset(self) -> int: ... @property def space(self) -> AddrSpace: ... class Instruction: def __init__(self, *args, **kwargs) -> None: ... @property def addr(self) -> Address: ... @property def body(self) -> str: ... @property def length(self) -> int: ... @property def mnem(self) -> str: ... class Disassembly: def __init__(self, *args, **kwargs) -> None: ... @property def instructions(self) -> List[Instruction]: ... class OpCode: BOOL_AND: ClassVar[OpCode] = ... BOOL_NEGATE: ClassVar[OpCode] = ... BOOL_OR: ClassVar[OpCode] = ... BOOL_XOR: ClassVar[OpCode] = ... BRANCH: ClassVar[OpCode] = ... BRANCHIND: ClassVar[OpCode] = ... CALL: ClassVar[OpCode] = ... CALLIND: ClassVar[OpCode] = ... CALLOTHER: ClassVar[OpCode] = ... CAST: ClassVar[OpCode] = ... CBRANCH: ClassVar[OpCode] = ... COPY: ClassVar[OpCode] = ... CPOOLREF: ClassVar[OpCode] = ... EXTRACT: ClassVar[OpCode] = ... FLOAT_ABS: ClassVar[OpCode] = ... FLOAT_ADD: ClassVar[OpCode] = ... FLOAT_CEIL: ClassVar[OpCode] = ... FLOAT_DIV: ClassVar[OpCode] = ... FLOAT_EQUAL: ClassVar[OpCode] = ... FLOAT_FLOAT2FLOAT: ClassVar[OpCode] = ... FLOAT_FLOOR: ClassVar[OpCode] = ... FLOAT_INT2FLOAT: ClassVar[OpCode] = ... FLOAT_LESS: ClassVar[OpCode] = ... FLOAT_LESSEQUAL: ClassVar[OpCode] = ... FLOAT_MULT: ClassVar[OpCode] = ... FLOAT_NAN: ClassVar[OpCode] = ... FLOAT_NEG: ClassVar[OpCode] = ... FLOAT_NOTEQUAL: ClassVar[OpCode] = ... FLOAT_ROUND: ClassVar[OpCode] = ... FLOAT_SQRT: ClassVar[OpCode] = ... FLOAT_SUB: ClassVar[OpCode] = ... FLOAT_TRUNC: ClassVar[OpCode] = ... IMARK: ClassVar[OpCode] = ... INDIRECT: ClassVar[OpCode] = ... INSERT: ClassVar[OpCode] = ... INT_2COMP: ClassVar[OpCode] = ... INT_ADD: ClassVar[OpCode] = ... INT_AND: ClassVar[OpCode] = ... INT_CARRY: ClassVar[OpCode] = ... INT_DIV: ClassVar[OpCode] = ... INT_EQUAL: ClassVar[OpCode] = ... INT_LEFT: ClassVar[OpCode] = ... INT_LESS: ClassVar[OpCode] = ... INT_LESSEQUAL: ClassVar[OpCode] = ... INT_MULT: ClassVar[OpCode] = ... INT_NEGATE: ClassVar[OpCode] = ... INT_NOTEQUAL: ClassVar[OpCode] = ... INT_OR: ClassVar[OpCode] = ... INT_REM: ClassVar[OpCode] = ... INT_RIGHT: ClassVar[OpCode] = ... INT_SBORROW: ClassVar[OpCode] = ... INT_SCARRY: ClassVar[OpCode] = ... INT_SDIV: ClassVar[OpCode] = ... INT_SEXT: ClassVar[OpCode] = ... INT_SLESS: ClassVar[OpCode] = ... INT_SLESSEQUAL: ClassVar[OpCode] = ... INT_SREM: ClassVar[OpCode] = ... INT_SRIGHT: ClassVar[OpCode] = ... INT_SUB: ClassVar[OpCode] = ... INT_XOR: ClassVar[OpCode] = ... INT_ZEXT: ClassVar[OpCode] = ... LOAD: ClassVar[OpCode] = ... MULTIEQUAL: ClassVar[OpCode] = ... NEW: ClassVar[OpCode] = ... PIECE: ClassVar[OpCode] = ... POPCOUNT: ClassVar[OpCode] = ... LZCOUNT: ClassVar[OpCode] = ... PTRADD: ClassVar[OpCode] = ... PTRSUB: ClassVar[OpCode] = ... RETURN: ClassVar[OpCode] = ... SEGMENTOP: ClassVar[OpCode] = ... STORE: ClassVar[OpCode] = ... SUBPIECE: ClassVar[OpCode] = ... __entries: ClassVar[dict] = ... __name__: Any @property def name(self) -> str: ... @property def value(self) -> int: ... def __init__(self, *args, **kwargs) -> None: ... def __eq__(self, other) -> Any: ... def __ge__(self, other) -> Any: ... def __gt__(self, other) -> Any: ... def __hash__(self) -> Any: ... def __index__(self) -> Any: ... def __int__(self) -> Any: ... def __le__(self, other) -> Any: ... def __lt__(self, other) -> Any: ... def __ne__(self, other) -> Any: ... class Varnode: def __init__(self) -> None: ... def getRegisterName(self) -> str: ... def getUserDefinedOpName(self) -> str: ... def getSpaceFromConst(self) -> AddrSpace: ... @property def offset(self) -> int: ... @property def size(self) -> int: ... @property def space(self) -> AddrSpace: ... class PcodeOp: def __init__(self, *args, **kwargs) -> None: ... @property def inputs(self) -> List[Varnode]: ... @property def opcode(self) -> OpCode: ... @property def output(self) -> Optional[Varnode]: ... class Translation: def __init__(self, *args, **kwargs) -> None: ... @property def ops(self) -> List[PcodeOp]: ... class Context: def __init__(self, *args, **kwargs) -> None: ... def disassemble( self, bytes: bytes, base_address: int = ..., offset: int = ..., max_bytes: int = ..., max_instructions: int = ..., ) -> Disassembly: ... def getAllRegisters(self) -> dict[Varnode, str]: ... def getRegisterName(self, space: AddrSpace, offset: int, size: int) -> str: ... def reset(self) -> None: ... def setVariableDefault(self, name: str, value: int) -> None: ... def translate( self, bytes: bytes, base_address: int = ..., offset: int = ..., max_bytes: int = ..., max_instructions: int = ..., flags: int = ..., ) -> Translation: ... ================================================ FILE: pypcode/sleigh/Makefile ================================================ # The C compiler BFDHOME=/usr MAKE_STATIC= ARCH_TYPE= ADDITIONAL_FLAGS= SLEIGHVERSION=sleigh-2.1.0 EXTENSION_POINT=../../../../../../../ghidra.ext-u/Ghidra/Features/DecompilerExtensions/src/decompile/cpp GHIDRA_BIN=../../../../../../../ghidra.bin OS = $(shell uname -s) CPU = $(shell uname -m) # TODO: need to revise to support arm64/aarch64 arch - improve on both OS and arch detection ifeq ($(OS),Linux) # Allow ARCH to be specified externally so we can build for 32-bit from a 64-bit Linux ifndef ARCH ARCH=$(CPU) endif ifeq ($(ARCH),x86_64) ARCH_TYPE=-m64 OSDIR=linux_x86_64 else ARCH_TYPE=-m32 OSDIR=linux_x86_32 endif endif ifeq ($(OS),Darwin) MAKE_STATIC= ARCH_TYPE=-arch x86_64 ADDITIONAL_FLAGS=-mmacosx-version-min=10.6 -w OSDIR=mac_x86_64 endif CXX=g++ -std=c++11 # Debug flags DBG_CXXFLAGS=-g -Wall -Wno-sign-compare $(CFLAGS) #DBG_CXXFLAGS=-g -pg -Wall -Wno-sign-compare #DBG_CXXFLAGS=-g -fprofile-arcs -ftest-coverage -Wall -Wno-sign-compare # Optimization flags OPT_CXXFLAGS=-O2 -Wall -Wno-sign-compare $(CFLAGS) YACC=bison # libraries #INCLUDES=-I$(BFDHOME)/include INCLUDES= BFDLIB=-lbfd LNK=-lz # Source files ALL_SOURCE= $(wildcard *.cc) ALL_NAMES=$(subst .cc,,$(ALL_SOURCE)) UNITTEST_SOURCE= $(wildcard ../unittests/*.cc) UNITTEST_NAMES=$(subst .cc,,$(UNITTEST_SOURCE)) UNITTEST_STRIP=$(subst ../unittests/,,$(UNITTEST_NAMES)) COREEXT_SOURCE= $(wildcard coreext_*.cc) COREEXT_NAMES=$(subst .cc,,$(COREEXT_SOURCE)) GHIDRAEXT_SOURCE= $(wildcard ghidraext_*.cc) GHIDRAEXT_NAMES=$(subst .cc,,$(GHIDRAEXT_SOURCE)) EXTERNAL_COREEXT_SOURCE= $(wildcard $(EXTENSION_POINT)/coreext_*.cc) EXTERNAL_GHIDRAEXT_SOURCE= $(wildcard $(EXTENSION_POINT)/ghidraext_*.cc) EXTERNAL_CONSOLEEXT_SOURCE= $(wildcard $(EXTENSION_POINT)/consoleext_*.cc) EXTERNAL_COREEXT_NAMES=$(subst .cc,,$(notdir $(EXTERNAL_COREEXT_SOURCE))) EXTERNAL_GHIDRAEXT_NAMES=$(subst .cc,,$(notdir $(EXTERNAL_GHIDRAEXT_SOURCE))) EXTERNAL_CONSOLEEXT_NAMES=$(subst .cc,,$(notdir $(EXTERNAL_CONSOLEEXT_SOURCE))) # The following macros partition all the source files, there should be no overlaps # Some core source files used in all projects CORE= xml marshal space float address pcoderaw translate opcodes globalcontext # Additional core files for any projects that decompile DECCORE=capability architecture options graph cover block cast typeop database cpool \ comment stringmanage modelrules fspec action loadimage grammar varnode op type \ variable varmap jumptable emulate emulateutil flow userop expression multiprecision \ funcdata funcdata_block funcdata_op funcdata_varnode unionresolve pcodeinject \ heritage prefersplit rangeutil ruleaction subflow blockaction merge double \ transform constseq coreaction condexe override dynamic crc32 prettyprint \ printlanguage printc printjava memstate opbehavior paramid signature $(COREEXT_NAMES) # Files used for any project that use the sleigh decoder SLEIGH= sleigh pcodeparse pcodecompile sleighbase slghsymbol \ slghpatexpress slghpattern semantics context slaformat compression filemanage # Additional files for the GHIDRA specific build GHIDRA= ghidra_arch inject_ghidra ghidra_translate loadimage_ghidra \ typegrp_ghidra database_ghidra ghidra_context cpool_ghidra \ ghidra_process comment_ghidra string_ghidra signature_ghidra $(GHIDRAEXT_NAMES) # Additional files specific to the sleigh compiler SLACOMP=slgh_compile slghparse slghscan # Additional special files that should not be considered part of the library SPECIAL=consolemain sleighexample test # Any additional modules for the command line decompiler EXTRA= $(filter-out $(CORE) $(DECCORE) $(SLEIGH) $(GHIDRA) $(SLACOMP) $(SPECIAL),$(ALL_NAMES)) EXECS=decomp_dbg decomp_opt decomp_test_dbg ghidra_dbg ghidra_opt sleigh_dbg sleigh_opt libdecomp_dbg.a libdecomp.a # Possible conditional compilation flags # __TERMINAL__ # Turn on terminal support for console mode # CPUI_STATISTICS # Turn on collection of cover and cast statistics # CPUI_RULECOMPILE # Allow user defined dynamic rules # Debug compilation flags # OPACTION_DEBUG # Turns on all the action tracing facilities # MERGEMULTI_DEBUG # Check for MULTIEQUAL and INDIRECT intersections # BLOCKCONSISTENT_DEBUG # Check that block graph structure is consistent # DFSVERIFY_DEBUG # make sure that the block ordering algorithm produces # a true depth first traversal of the dominator tree # CPUI_DEBUG # This is the one controlling switch for all the other debug switches COMMANDLINE_NAMES=$(CORE) $(DECCORE) $(EXTRA) $(SLEIGH) consolemain COMMANDLINE_DEBUG=-DCPUI_DEBUG -D__TERMINAL__ COMMANDLINE_OPT=-D__TERMINAL__ TEST_NAMES=$(CORE) $(DECCORE) $(SLEIGH) $(EXTRA) test TEST_DEBUG=-D__TERMINAL__ GHIDRA_NAMES=$(CORE) $(DECCORE) $(GHIDRA) GHIDRA_NAMES_DBG=$(GHIDRA_NAMES) callgraph ifacedecomp testfunction ifaceterm interface GHIDRA_DEBUG=-DCPUI_DEBUG GHIDRA_OPT= SLEIGH_NAMES=$(CORE) $(SLEIGH) $(SLACOMP) SLEIGH_DEBUG=-DYYDEBUG SLEIGH_OPT= # The SLEIGH library is built with console mode objects and it # uses the COMMANDLINE_* options LIBSLA_NAMES=$(CORE) $(SLEIGH) loadimage sleigh memstate emulate opbehavior # The Decompiler library is built with console mode objects and it uses the COMMANDLINE_* options LIBDECOMP_NAMES=$(CORE) $(DECCORE) $(EXTRA) $(SLEIGH) # object file macros COMMANDLINE_DBG_OBJS=$(COMMANDLINE_NAMES:%=com_dbg/%.o) COMMANDLINE_OPT_OBJS=$(COMMANDLINE_NAMES:%=com_opt/%.o) TEST_DEBUG_OBJS=$(TEST_NAMES:%=test_dbg/%.o) $(UNITTEST_STRIP:%=test_dbg/%.o) GHIDRA_DBG_OBJS=$(GHIDRA_NAMES_DBG:%=ghi_dbg/%.o) GHIDRA_OPT_OBJS=$(GHIDRA_NAMES:%=ghi_opt/%.o) SLEIGH_DBG_OBJS=$(SLEIGH_NAMES:%=sla_dbg/%.o) SLEIGH_OPT_OBJS=$(SLEIGH_NAMES:%=sla_opt/%.o) LIBSLA_DBG_OBJS=$(LIBSLA_NAMES:%=com_dbg/%.o) LIBSLA_OPT_OBJS=$(LIBSLA_NAMES:%=com_opt/%.o) LIBSLA_SOURCE=$(LIBSLA_NAMES:%=%.cc) $(LIBSLA_NAMES:%=%.hh) \ $(SLACOMP:%=%.cc) slgh_compile.hh slghparse.hh types.h \ partmap.hh error.hh slghparse.y pcodeparse.y xml.y slghscan.l loadimage_bfd.hh loadimage_bfd.cc LIBDECOMP_DBG_OBJS=$(LIBDECOMP_NAMES:%=com_dbg/%.o) LIBDECOMP_OPT_OBJS=$(LIBDECOMP_NAMES:%=com_opt/%.o) # conditionals to determine which dependency files to build DEPNAMES=com_dbg/depend com_opt/depend ifeq ($(MAKECMDGOALS),install_ghidraopt) DEPNAMES=ghi_opt/depend endif ifeq ($(MAKECMDGOALS),install_ghidradbg) DEPNAMES=ghi_dbg/depend endif ifeq ($(MAKECMDGOALS),ghidra_opt) DEPNAMES=ghi_opt/depend endif ifeq ($(MAKECMDGOALS),ghidra_opt_mac) DEPNAMES=ghi_opt/depend endif ifeq ($(MAKECMDGOALS),ghidra_dbg) DEPNAMES=ghi_dbg/depend endif ifeq ($(MAKECMDGOALS),sleigh_opt) DEPNAMES=sla_opt/depend endif ifeq ($(MAKECMDGOALS),sleigh_opt_mac) DEPNAMES=sla_opt/depend endif ifeq ($(MAKECMDGOALS),sleigh_dbg) DEPNAMES=sla_dbg/depend endif ifeq ($(MAKECMDGOALS),libsla.a) DEPNAMES=com_opt/depend.lib_sla endif ifeq ($(MAKECMDGOALS),libsla_dbg.a) DEPNAMES=com_dbg/depend.lib_sla endif ifeq ($(MAKECMDGOALS),decomp_dbg) DEPNAMES=com_dbg/depend endif ifeq ($(MAKECMDGOALS),decomp_opt) DEPNAMES=com_opt/depend endif ifneq (,$(filter $(MAKECMDGOALS),decomp_test_dbg test)) DEPNAMES=test_dbg/depend endif ifeq ($(MAKECMDGOALS),reallyclean) DEPNAMES= endif ifeq ($(MAKECMDGOALS),clean) DEPNAMES= endif ifeq ($(MAKECMDGOALS),doc) DEPNAMES= endif ifeq ($(MAKECMDGOALS),tags) DEPNAMES= endif ifeq ($(MAKECMDGOALS),link_extensions) DEPNAMES= endif ifeq ($(MAKECMDGOALS),link_extensions_hard) DEPNAMES= endif com_dbg/%.o: %.cc $(CXX) $(ARCH_TYPE) -c $(DBG_CXXFLAGS) $(ADDITIONAL_FLAGS) $(COMMANDLINE_DEBUG) $< -o $@ com_opt/%.o: %.cc $(CXX) $(ARCH_TYPE) -c $(OPT_CXXFLAGS) $(ADDITIONAL_FLAGS) $(COMMANDLINE_OPT) $< -o $@ test_dbg/%.o: %.cc $(CXX) $(ARCH_TYPE) -c $(DBG_CXXFLAGS) $(ADDITIONAL_FLAGS) $(TEST_DEBUG) $< -o $@ test_dbg/%.o: ../unittests/%.cc $(CXX) -I. $(ARCH_TYPE) -c $(DBG_CXXFLAGS) $(ADDITIONAL_FLAGS) $(TEST_DEBUG) $< -o $@ ghi_dbg/%.o: %.cc $(CXX) $(ARCH_TYPE) -c $(DBG_CXXFLAGS) $(ADDITIONAL_FLAGS) $(GHIDRA_DEBUG) $< -o $@ ghi_opt/%.o: %.cc $(CXX) $(ARCH_TYPE) -c $(OPT_CXXFLAGS) $(ADDITIONAL_FLAGS) $(GHIDRA_OPT) $< -o $@ sla_dbg/%.o: %.cc $(CXX) $(ARCH_TYPE) -c $(DBG_CXXFLAGS) $(ADDITIONAL_FLAGS) $(SLEIGH_DEBUG) $< -o $@ sla_opt/%.o: %.cc $(CXX) $(ARCH_TYPE) -c $(OPT_CXXFLAGS) $(ADDITIONAL_FLAGS) $(SLEIGH_OPT) $< -o $@ grammar.cc: grammar.y $(YACC) -l -o $@ $< xml.cc: xml.y $(YACC) -l -o $@ $< pcodeparse.cc: pcodeparse.y $(YACC) -l -o $@ $< slghparse.cc: slghparse.y $(YACC) -l -d -o $@ $< slghscan.cc: slghscan.l $(LEX) -L -o$@ $< ruleparse.cc: ruleparse.y $(YACC) -p ruleparse -d -o $@ $< slghparse.hh: slghparse.y slghparse.cc slghscan.cc: slghparse.hh slgh_compile.hh ruleparse.hh: ruleparse.y ruleparse.cc decomp_dbg: $(COMMANDLINE_DBG_OBJS) $(CXX) $(DBG_CXXFLAGS) $(ARCH_TYPE) -o decomp_dbg $(COMMANDLINE_DBG_OBJS) $(BFDLIB) $(LNK) decomp_opt: $(COMMANDLINE_OPT_OBJS) $(CXX) $(OPT_CXXFLAGS) $(ARCH_TYPE) -o decomp_opt $(COMMANDLINE_OPT_OBJS) $(BFDLIB) $(LNK) #decomp_test_dbg: DBG_CXXFLAGS += -D_GLIBCXX_ASSERTIONS -fsanitize=address,undefined decomp_test_dbg: $(TEST_DEBUG_OBJS) $(CXX) $(DBG_CXXFLAGS) $(ARCH_TYPE) -o decomp_test_dbg $(TEST_DEBUG_OBJS) $(BFDLIB) $(LNK) test: decomp_test_dbg ./decomp_test_dbg ghidra_dbg: $(GHIDRA_DBG_OBJS) $(CXX) $(DBG_CXXFLAGS) $(ADDITIONAL_FLAGS) $(MAKE_STATIC) $(ARCH_TYPE) -o ghidra_dbg $(GHIDRA_DBG_OBJS) ghidra_opt: $(GHIDRA_OPT_OBJS) $(CXX) $(OPT_CXXFLAGS) $(ADDITIONAL_FLAGS) $(MAKE_STATIC) $(ARCH_TYPE) -o ghidra_opt $(GHIDRA_OPT_OBJS) sleigh_dbg: $(SLEIGH_DBG_OBJS) $(CXX) $(DBG_CXXFLAGS) $(ADDITIONAL_FLAGS) $(MAKE_STATIC) $(ARCH_TYPE) -o sleigh_dbg $(SLEIGH_DBG_OBJS) $(LNK) sleigh_opt: $(SLEIGH_OPT_OBJS) $(CXX) $(OPT_CXXFLAGS) $(ADDITIONAL_FLAGS) $(MAKE_STATIC) $(ARCH_TYPE) -o sleigh_opt $(SLEIGH_OPT_OBJS) $(LNK) install_ghidradbg: ghidra_dbg cp ghidra_dbg $(GHIDRA_BIN)/Ghidra/Features/Decompiler/os/$(OSDIR)/decompile install_ghidraopt: ghidra_opt cp ghidra_opt $(GHIDRA_BIN)/Ghidra/Features/Decompiler/os/$(OSDIR)/decompile libsla_dbg.a: $(LIBSLA_DBG_OBJS) rm -rf libsla_dbg.a ar qc libsla_dbg.a $(LIBSLA_DBG_OBJS) ranlib libsla_dbg.a libsla.a: $(LIBSLA_OPT_OBJS) rm -rf libsla.a ar qc libsla.a $(LIBSLA_OPT_OBJS) ranlib libsla.a libdecomp_dbg.a: $(LIBDECOMP_DBG_OBJS) rm -rf libdecomp_dbg.a ar qc libdecomp_dbg.a $(LIBDECOMP_DBG_OBJS) ranlib libdecomp_dbg.a libdecomp.a: $(LIBDECOMP_OPT_OBJS) rm -rf libdecomp.a ar qc libdecomp.a $(LIBDECOMP_OPT_OBJS) ranlib libdecomp.a sleighexamp_dir: slghscan.cc rm -rf $(SLEIGHVERSION) mkdir $(SLEIGHVERSION) mkdir $(SLEIGHVERSION)/src $(SLEIGHVERSION)/specfiles cp ../../../../../Processors/x86/data/languages/x86.sla \ ../../../../../Processors/x86/data/languages/x86.slaspec \ ../../../../../Processors/x86/data/languages/ia.sinc \ $(SLEIGHVERSION)/specfiles cp $(LIBSLA_SOURCE) Makefile Doxyfile $(SLEIGHVERSION)/src cp sleighexample.cc $(SLEIGHVERSION) grep ^-- sleighexample.cc | sed -e s/--// > $(SLEIGHVERSION)/Makefile grep ^-a- sleighexample.cc | sed -e s/-a-// > $(SLEIGHVERSION)/README sed -e s/page\ sleigh\ /mainpage\ / < $(SLEIGHVERSION)/src/sleigh.hh > $(SLEIGHVERSION)/spam mv $(SLEIGHVERSION)/spam $(SLEIGHVERSION)/src/sleigh.hh cd $(SLEIGHVERSION)/src; doxygen Doxyfile link_extensions: rm -rf coreext_*.cc coreext_*.hh ghidraext_*.cc ghidraext_*.hh consoleext_*.cc consoleext_*.hh for i in $(EXTERNAL_COREEXT_NAMES) $(EXTERNAL_GHIDRAEXT_NAMES) $(EXTERNAL_CONSOLEEXT_NAMES); do \ ln -s $(EXTENSION_POINT)/$$i.cc $$i.cc; \ ln -s $(EXTENSION_POINT)/$$i.hh $$i.hh; \ done link_extensions_hard: rm -rf coreext_*.cc coreext_*.hh ghidraext_*.cc ghidraext_*.hh consoleext_*.cc consoleext_*.hh for i in $(EXTERNAL_COREEXT_NAMES) $(EXTERNAL_GHIDRAEXT_NAMES) $(EXTERNAL_CONSOLEEXT_NAMES); do \ ln $(EXTENSION_POINT)/$$i.cc $$i.cc; \ ln $(EXTENSION_POINT)/$$i.hh $$i.hh; \ done tags: etags *.c *.h *.cc *.hh # Rules to build the different dependency files com_dbg/depend: $(COMMANDLINE_NAMES:%=%.cc) mkdir -p com_dbg @set -e; rm -f $@; \ $(CXX) -MM $(COMMANDLINE_DEBUG) $^ > $@.$$$$; \ sed 's,\(.*\)\.o[ :]*,com_dbg/\1.o $@ : ,g' < $@.$$$$ > $@; \ rm -f $@.$$$$ com_opt/depend: $(COMMANDLINE_NAMES:%=%.cc) mkdir -p com_opt @set -e; rm -f $@; \ $(CXX) -MM $(COMMANDLINE_OPT) $^ > $@.$$$$; \ sed 's,\(.*\)\.o[ :]*,com_opt/\1.o $@ : ,g' < $@.$$$$ > $@; \ rm -f $@.$$$$ test_dbg/depend: $(TEST_NAMES:%=%.cc) $(UNITTEST_NAMES:%=%.cc) mkdir -p test_dbg @set -e; rm -f $@; \ $(CXX) -I. -MM $(TEST_DEBUG) $^ > $@.$$$$; \ sed 's,\(.*\)\.o[ :]*,test_dbg/\1.o $@ : ,g' < $@.$$$$ > $@; \ rm -f $@.$$$$ ghi_dbg/depend: $(GHIDRA_NAMES_DBG:%=%.cc) mkdir -p ghi_dbg @set -e; rm -f $@; \ $(CXX) -MM $(GHIDRA_DEBUG) $^ > $@.$$$$; \ sed 's,\(.*\)\.o[ :]*,ghi_dbg/\1.o $@ : ,g' < $@.$$$$ > $@; \ rm -f $@.$$$$ ghi_opt/depend: $(GHIDRA_NAMES:%=%.cc) mkdir -p ghi_opt @set -e; rm -f $@; \ $(CXX) -MM $(GHIDRA_OPT) $^ > $@.$$$$; \ sed 's,\(.*\)\.o[ :]*,ghi_opt/\1.o $@ : ,g' < $@.$$$$ > $@; \ rm -f $@.$$$$ sla_dbg/depend: $(SLEIGH_NAMES:%=%.cc) mkdir -p sla_dbg @set -e; rm -f $@; \ $(CXX) -MM $(SLEIGH_DEBUG) $^ > $@.$$$$; \ sed 's,\(.*\)\.o[ :]*,sla_dbg/\1.o $@ : ,g' < $@.$$$$ > $@; \ rm -f $@.$$$$ sla_opt/depend: $(SLEIGH_NAMES:%=%.cc) mkdir -p sla_opt @set -e; rm -f $@; \ $(CXX) -MM $(SLEIGH_OPT) $^ > $@.$$$$; \ sed 's,\(.*\)\.o[ :]*,sla_opt/\1.o $@ : ,g' < $@.$$$$ > $@; \ rm -f $@.$$$$ com_opt/depend.lib_sla: $(LIBSLA_NAMES:%=%.cc) mkdir -p com_opt @set -e; rm -f $@; \ $(CXX) -MM $(COMMANDLINE_OPT) $^ > $@.$$$$; \ sed 's,\(.*\)\.o[ :]*,com_opt/\1.o $@ : ,g' < $@.$$$$ > $@; \ rm -f $@.$$$$ com_dbg/depend.lib_sla: $(LIBSLA_NAMES:%=%.cc) mkdir -p com_dbg @set -e; rm -f $@; \ $(CXX) -MM $(COMMANDLINE_DEBUG) $^ > $@.$$$$; \ sed 's,\(.*\)\.o[ :]*,com_dbg/\1.o $@ : ,g' < $@.$$$$ > $@; \ rm -f $@.$$$$ include $(DEPNAMES) doc: doxygen Doxyfile clean: rm -f com_dbg/*.o com_opt/*.o test_dbg/*.o ghi_dbg/*.o ghi_opt/*.o sla_dbg/*.o sla_opt/*.o rm -f *.gcov com_dbg/*.gcno com_dbg/*.gcda resetgcov: rm -f *.gcov com_dbg/*.gcda reallyclean: clean rm -rf coreext_*.cc coreext_*.hh ghidraext_*.cc ghidraext_*.hh consoleext_*.cc consoleext_*.hh rm -rf com_dbg com_opt test_dbg ghi_dbg ghi_opt sla_dbg sla_opt rm -f $(EXECS) TAGS *~ sleigh_src: mkdir -p sleigh_src cp $(LIBSLA_SOURCE) Makefile sleigh_src ================================================ FILE: pypcode/sleigh/address.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "address.hh" #include "translate.hh" namespace ghidra { AttributeId ATTRIB_FIRST = AttributeId("first",27); AttributeId ATTRIB_LAST = AttributeId("last",28); AttributeId ATTRIB_UNIQ = AttributeId("uniq",29); ElementId ELEM_ADDR = ElementId("addr",11); ElementId ELEM_RANGE = ElementId("range",12); ElementId ELEM_RANGELIST = ElementId("rangelist",13); ElementId ELEM_REGISTER = ElementId("register",14); ElementId ELEM_SEQNUM = ElementId("seqnum",15); ElementId ELEM_VARNODE = ElementId("varnode",16); ostream &operator<<(ostream &s,const SeqNum &sq) { sq.pc.printRaw(s); s << ':' << sq.uniq; return s; } /// This allows an Address to be written to a stream using /// the standard '<<' operator. This is a wrapper for the /// printRaw method and is intended for debugging and console /// mode uses. /// \param s is the stream being written to /// \param addr is the Address to write /// \return the output stream ostream &operator<<(ostream &s,const Address &addr) { addr.printRaw(s); return s; } SeqNum::SeqNum(Address::mach_extreme ex) : pc(ex) { uniq = (ex == Address::m_minimal) ? 0 : ~((uintm)0); } void SeqNum::encode(Encoder &encoder) const { encoder.openElement(ELEM_SEQNUM); pc.getSpace()->encodeAttributes(encoder,pc.getOffset()); encoder.writeUnsignedInteger(ATTRIB_UNIQ, uniq); encoder.closeElement(ELEM_SEQNUM); } SeqNum SeqNum::decode(Decoder &decoder) { uintm uniq = ~((uintm)0); uint4 elemId = decoder.openElement(ELEM_SEQNUM); Address pc = Address::decode(decoder); // Recover address for(;;) { uint4 attribId = decoder.getNextAttributeId(); if (attribId == 0) break; if (attribId == ATTRIB_UNIQ) { uniq = decoder.readUnsignedInteger(); break; } } decoder.closeElement(elemId); return SeqNum(pc,uniq); } /// Some data structures sort on an Address, and it is convenient /// to be able to create an Address that is either bigger than /// or smaller than all other Addresses. /// \param ex is either \e m_minimal or \e m_maximal Address::Address(mach_extreme ex) { if (ex == m_minimal) { base = (AddrSpace *)0; offset = 0; } else { base = (AddrSpace *) ~((uintp)0); offset = ~((uintb)0); } } /// Return \b true if the range starting at \b this extending the given number of bytes /// is contained by the second given range. /// \param sz is the given number of bytes in \b this range /// \param op2 is the start of the second given range /// \param sz2 is the number of bytes in the second given range /// \return \b true if the second given range contains \b this range bool Address::containedBy(int4 sz,const Address &op2,int4 sz2) const { if (base != op2.base) return false; if (op2.offset > offset) return false; uintb off1 = offset + (sz-1); uintb off2 = op2.offset + (sz2-1); return (off2 >= off1); } /// Return -1 if (\e op2,\e sz2) is not properly contained in (\e this,\e sz). /// If it is contained, return the endian aware offset of (\e op2,\e sz2) /// I.e. if the least significant byte of the \e op2 range falls on the least significant /// byte of the \e this range, return 0. If it intersects the second least significant, return 1, etc. /// The -forceleft- toggle causes the check to be made against the left (lowest address) side /// of the container, regardless of the endianness. I.e. it forces a little endian interpretation. /// \param sz is the size of \e this range /// \param op2 is the address of the second range /// \param sz2 is the size of the second range /// \param forceleft is \b true if containments is forced to be on the left even for big endian /// \return the endian aware offset, or -1 int4 Address::justifiedContain(int4 sz,const Address &op2,int4 sz2,bool forceleft) const { if (base != op2.base) return -1; if (op2.offset < offset) return -1; uintb off1 = offset + (sz-1); uintb off2 = op2.offset + (sz2-1); if (off2 > off1) return -1; if (base->isBigEndian()&&(!forceleft)) { return (int4)(off1 - off2); } return (int4)(op2.offset - offset); } /// If \e this + \e skip falls in the range /// \e op to \e op + \e size, then a non-negative integer is /// returned indicating where in the interval it falls. I.e. /// if \e this + \e skip == \e op, then 0 is returned. Otherwise /// -1 is returned. /// \param skip is an adjust to \e this address /// \param op is the start of the range to check /// \param size is the size of the range /// \return an integer indicating how overlap occurs int4 Address::overlap(int4 skip,const Address &op,int4 size) const { uintb dist; if (base != op.base) return -1; // Must be in same address space to overlap if (base->getType()==IPTR_CONSTANT) return -1; // Must not be constants dist = base->wrapOffset(offset+skip-op.offset); if (dist >= size) return -1; // but must fall before op+size return (int4) dist; } /// Does the location \e this, \e sz form a contiguous region to \e loaddr, \e losz, /// where \e this forms the most significant piece of the logical whole /// \param sz is the size of \e this hi region /// \param loaddr is the starting address of the low region /// \param losz is the size of the low region /// \return \b true if the pieces form a contiguous whole bool Address::isContiguous(int4 sz,const Address &loaddr,int4 losz) const { if (base != loaddr.base) return false; if (base->isBigEndian()) { uintb nextoff = base->wrapOffset(offset+sz); if (nextoff == loaddr.offset) return true; } else { uintb nextoff = base->wrapOffset(loaddr.offset+losz); if (nextoff == offset) return true; } return false; } /// If \b this is (originally) a \e join address, reevaluate it in terms of its new /// \e offset and \e size, changing the space and offset if necessary. /// \param size is the new size in bytes of the underlying object void Address::renormalize(int4 size) { if (base->getType() == IPTR_JOIN) base->getManager()->renormalizeJoinAddress(*this,size); } /// This is usually used to decode an address from an \b \ /// element, but any element can be used if it has the appropriate attributes /// - \e space indicates the address space of the tag /// - \e offset indicates the offset within the space /// /// or a \e name attribute can be used to recover an address /// based on a register name. /// \param decoder is the stream decoder /// \return the resulting Address Address Address::decode(Decoder &decoder) { VarnodeData var; var.decode(decoder); return Address(var.space,var.offset); } /// This is usually used to decode an address from an \b \ /// element, but any element can be used if it has the appropriate attributes /// - \e space indicates the address space of the tag /// - \e offset indicates the offset within the space /// - \e size indicates the size of an address range /// /// or a \e name attribute can be used to recover an address /// and size based on a register name. If a size is recovered /// it is stored in \e size reference. /// \param decoder is the stream decoder /// \param size is the reference to any recovered size /// \return the resulting Address Address Address::decode(Decoder &decoder,int4 &size) { VarnodeData var; var.decode(decoder); size = var.size; return Address(var.space,var.offset); } Range::Range(const RangeProperties &properties,const AddrSpaceManager *manage) { if (properties.isRegister) { const Translate *trans = manage->getDefaultCodeSpace()->getTrans(); const VarnodeData &point(trans->getRegister(properties.spaceName)); spc = point.space; first = point.offset; last = (first-1) + point.size; return; } spc = manage->getSpaceByName(properties.spaceName); if (spc == (AddrSpace *)0) throw LowlevelError("Undefined space: "+properties.spaceName); if (spc == (AddrSpace *)0) throw LowlevelError("No address space indicated in range tag"); first = properties.first; last = properties.last; if (!properties.seenLast) { last = spc->getHighest(); } if (first > spc->getHighest() || last > spc->getHighest() || last < first) throw LowlevelError("Illegal range tag"); } /// Get the last address +1, updating the space, or returning /// the extremal address if necessary /// \param manage is used to fetch the next address space Address Range::getLastAddrOpen(const AddrSpaceManager *manage) const { AddrSpace *curspc = spc; uintb curlast = last; if (curlast == curspc->getHighest()) { curspc = manage->getNextSpaceInOrder(curspc); curlast = 0; } else curlast += 1; if (curspc == (AddrSpace *)0) return Address(Address::m_maximal); return Address(curspc,curlast); } /// Output a description of this Range like: ram: 7f-9c /// \param s is the output stream void Range::printBounds(ostream &s) const { s << spc->getName() << ": "; s << hex << first << '-' << last; } /// Encode \b this to a stream as a \ element. /// \param encoder is the stream encoder void Range::encode(Encoder &encoder) const { encoder.openElement(ELEM_RANGE); encoder.writeSpace(ATTRIB_SPACE, spc); encoder.writeUnsignedInteger(ATTRIB_FIRST, first); encoder.writeUnsignedInteger(ATTRIB_LAST, last); encoder.closeElement(ELEM_RANGE); } /// Reconstruct this object from a \ or \ element /// \param decoder is the stream decoder void Range::decode(Decoder &decoder) { uint4 elemId = decoder.openElement(); if (elemId != ELEM_RANGE && elemId != ELEM_REGISTER) throw DecoderError("Expecting or element"); decodeFromAttributes(decoder); decoder.closeElement(elemId); } /// Reconstruct from attributes that may not be part of a \ element. /// \param decoder is the stream decoder void Range::decodeFromAttributes(Decoder &decoder) { spc = (AddrSpace *)0; bool seenLast = false; first = 0; last = 0; for(;;) { uint4 attribId = decoder.getNextAttributeId(); if (attribId == 0) break; if (attribId == ATTRIB_SPACE) { spc = decoder.readSpace(); } else if (attribId == ATTRIB_FIRST) { first = decoder.readUnsignedInteger(); } else if (attribId == ATTRIB_LAST) { last = decoder.readUnsignedInteger(); seenLast = true; } else if (attribId == ATTRIB_NAME) { const Translate *trans = decoder.getAddrSpaceManager()->getDefaultCodeSpace()->getTrans(); const VarnodeData &point(trans->getRegister(decoder.readString())); spc = point.space; first = point.offset; last = (first-1) + point.size; return; // There should be no (space,first,last) attributes } } if (spc == (AddrSpace *)0) throw LowlevelError("No address space indicated in range tag"); if (!seenLast) { last = spc->getHighest(); } if (first > spc->getHighest() || last > spc->getHighest() || last < first) throw LowlevelError("Illegal range tag"); } void RangeProperties::decode(Decoder &decoder) { uint4 elemId = decoder.openElement(); if (elemId != ELEM_RANGE && elemId != ELEM_REGISTER) throw DecoderError("Expecting or element"); for(;;) { uint4 attribId = decoder.getNextAttributeId(); if (attribId == 0) break; if (attribId == ATTRIB_SPACE) spaceName = decoder.readString(); else if (attribId == ATTRIB_FIRST) first = decoder.readUnsignedInteger(); else if (attribId == ATTRIB_LAST) { last = decoder.readUnsignedInteger(); seenLast = true; } else if (attribId == ATTRIB_NAME) { spaceName = decoder.readString(); isRegister = true; } } decoder.closeElement(elemId); } /// Insert a new Range merging as appropriate to maintain the disjoint cover /// \param spc is the address space containing the new range /// \param first is the offset of the first byte in the new range /// \param last is the offset of the last byte in the new range void RangeList::insertRange(AddrSpace *spc,uintb first,uintb last) { set::iterator iter1,iter2; // we must have iter1.first > first iter1 = tree.upper_bound(Range(spc,first,first)); // Set iter1 to first range with range.last >=first // It is either current iter1 or the one before if (iter1 != tree.begin()) { --iter1; if (((*iter1).spc!=spc)||((*iter1).last < first)) ++iter1; } // Set iter2 to first range with range.first > last iter2 = tree.upper_bound(Range(spc,last,last)); while(iter1!=iter2) { if ((*iter1).first < first) first = (*iter1).first; if ((*iter1).last > last) last = (*iter1).last; tree.erase(iter1++); } tree.insert(Range(spc,first,last)); } /// Remove/narrow/split existing Range objects to eliminate the indicated addresses /// while still maintaining a disjoint cover. /// \param spc is the address space of the address range to remove /// \param first is the offset of the first byte of the range /// \param last is the offset of the last byte of the range void RangeList::removeRange(AddrSpace *spc,uintb first,uintb last) { // remove a range set::iterator iter1,iter2; if (tree.empty()) return; // Nothing to do // we must have iter1.first > first iter1 = tree.upper_bound(Range(spc,first,first)); // Set iter1 to first range with range.last >=first // It is either current iter1 or the one before if (iter1 != tree.begin()) { --iter1; if (((*iter1).spc!=spc)||((*iter1).last < first)) ++iter1; } // Set iter2 to first range with range.first > last iter2 = tree.upper_bound(Range(spc,last,last)); while(iter1!=iter2) { uintb a,b; a = (*iter1).first; b = (*iter1).last; tree.erase(iter1++); if (a last) tree.insert(Range(spc,last+1,b)); } } void RangeList::merge(const RangeList &op2) { // Merge -op2- into this rangelist set::const_iterator iter1,iter2; iter1 = op2.tree.begin(); iter2 = op2.tree.end(); while(iter1 != iter2) { const Range &range( *iter1 ); ++iter1; insertRange(range.spc, range.first, range.last); } } /// Make sure indicated range of addresses is \e contained in \b this RangeList /// \param addr is the first Address in the target range /// \param size is the number of bytes in the target range /// \return \b true is the range is fully contained by this RangeList bool RangeList::inRange(const Address &addr,int4 size) const { set::const_iterator iter; if (addr.isInvalid()) return true; // We don't really care if (tree.empty()) return false; // iter = first range with its first > addr iter = tree.upper_bound(Range(addr.getSpace(),addr.getOffset(),addr.getOffset())); if (iter == tree.begin()) return false; // Set iter to last range with range.first <= addr --iter; // if (iter == tree.end()) // iter can't be end if non-empty // return false; if ((*iter).spc != addr.getSpace()) return false; if ((*iter).last >= addr.getOffset()+size-1) return true; return false; } /// If \b this RangeList contains the specific address (spaceid,offset), return it /// \return the containing Range or NULL const Range *RangeList::getRange(AddrSpace *spaceid,uintb offset) const { if (tree.empty()) return (const Range *)0; // iter = first range with its first > offset set::const_iterator iter = tree.upper_bound(Range(spaceid,offset,offset)); if (iter == tree.begin()) return (const Range *)0; // Set iter to last range with range.first <= offset --iter; if ((*iter).spc != spaceid) return (const Range *)0; if ((*iter).last >= offset) return &(*iter); return (const Range *)0; } /// Return the size of the biggest contiguous sequence of addresses in /// \b this RangeList which contain the given address /// \param addr is the given address /// \param maxsize is the large range to consider before giving up /// \return the size (in bytes) of the biggest range uintb RangeList::longestFit(const Address &addr,uintb maxsize) const { set::const_iterator iter; if (addr.isInvalid()) return 0; if (tree.empty()) return 0; // iter = first range with its first > addr uintb offset = addr.getOffset(); iter = tree.upper_bound(Range(addr.getSpace(),offset,offset)); if (iter == tree.begin()) return 0; // Set iter to last range with range.first <= addr --iter; uintb sizeres = 0; if ((*iter).last < offset) return sizeres; do { if ((*iter).spc != addr.getSpace()) break; if ((*iter).first > offset) break; sizeres += ((*iter).last + 1 - offset); // Size extends to end of range offset = (*iter).last + 1; // Try to chain on the next range if (sizeres >= maxsize) break; // Don't bother if past maxsize ++iter; // Next range in the chain } while(iter != tree.end()); return sizeres; } /// \return the first contiguous range of addresses or NULL if empty const Range *RangeList::getFirstRange(void) const { if (tree.empty()) return (const Range *)0; return &(*tree.begin()); } /// \return the last contiguous range of addresses or NULL if empty const Range *RangeList::getLastRange(void) const { if (tree.empty()) return (const Range *)0; set::const_iterator iter = tree.end(); --iter; return &(*iter); } /// Treating offsets with their high-bits set as coming \e before /// offset where the high-bit is clear, return the last/latest contiguous /// Range within the given address space /// \param spaceid is the given address space /// \return indicated Range or NULL if empty const Range *RangeList::getLastSignedRange(AddrSpace *spaceid) const { uintb midway = spaceid->getHighest() / 2; // Maximal signed value Range range(spaceid,midway,midway); set::const_iterator iter = tree.upper_bound(range); // First element greater than -range- (should be MOST negative) if (iter!=tree.begin()) { --iter; if ((*iter).getSpace() == spaceid) return &(*iter); } // If there were no "positive" ranges, search for biggest negative range range = Range(spaceid,spaceid->getHighest(),spaceid->getHighest()); iter = tree.upper_bound(range); if (iter != tree.begin()) { --iter; if ((*iter).getSpace() == spaceid) return &(*iter); } return (const Range *)0; } /// Print a one line description of each disjoint Range making up \b this RangeList /// \param s is the output stream void RangeList::printBounds(ostream &s) const { if (tree.empty()) s << "all" << endl; else { set::const_iterator iter; for(iter=tree.begin();iter!=tree.end();++iter) { (*iter).printBounds(s); s << endl; } } } /// Encode \b this as a \ element /// \param encoder is the stream encoder void RangeList::encode(Encoder &encoder) const { set::const_iterator iter; encoder.openElement(ELEM_RANGELIST); for(iter=tree.begin();iter!=tree.end();++iter) { (*iter).encode(encoder); } encoder.closeElement(ELEM_RANGELIST); } /// Recover each individual disjoint Range for \b this RangeList. /// \param decoder is the stream decoder void RangeList::decode(Decoder &decoder) { uint4 elemId = decoder.openElement(ELEM_RANGELIST); while(decoder.peekElement() != 0) { Range range; range.decode(decoder); tree.insert(range); } decoder.closeElement(elemId); } #ifdef UINTB4 uintb uintbmasks[9] = { 0, 0xff, 0xffff, 0xffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff }; #else uintb uintbmasks[9] = { 0, 0xff, 0xffff, 0xffffff, 0xffffffff, 0xffffffffffLL, 0xffffffffffffLL, 0xffffffffffffffLL, 0xffffffffffffffffLL }; #endif /// Treat the given \b val as a constant of \b size bytes /// \param val is the given value /// \param size is the size in bytes /// \return \b true if the constant (as sized) has its sign bit set bool signbit_negative(uintb val,int4 size) { // Return true if signbit is set (negative) uintb mask = 0x80; mask <<= 8*(size-1); return ((val&mask) != 0); } /// Treat the given \b in as a constant of \b size bytes. /// Negate this constant keeping the upper bytes zero. /// \param in is the given value /// \param size is the size in bytes /// \return the negation of the sized constant uintb uintb_negate(uintb in,int4 size) { // Invert bits return ((~in)&calc_mask(size)); } /// Take the first \b sizein bytes of the given \b in and sign-extend /// this to \b sizeout bytes, keeping any more significant bytes zero /// \param in is the given value /// \param sizein is the size to treat that value as an input /// \param sizeout is the size to sign-extend the value to /// \return the sign-extended value uintb sign_extend(uintb in,int4 sizein,int4 sizeout) { sizein = (sizein < sizeof(uintb)) ? sizein : sizeof(uintb); sizeout = (sizeout < sizeof(uintb)) ? sizeout : sizeof(uintb); intb sval = in; sval <<= (sizeof(intb) - sizein) * 8; uintb res = (uintb)(sval >> (sizeout - sizein) * 8); res >>= (sizeof(uintb) - sizeout)*8; return res; } /// Swap the least significant \b size bytes in \b val /// \param val is a reference to the value to swap /// \param size is the number of bytes to swap void byte_swap(intb &val,int4 size) { intb res = 0; while(size>0) { res <<= 8; res |= (val&0xff); val >>= 8; size -= 1; } val = res; } /// Swap the least significant \b size bytes in \b val /// \param val is the value to swap /// \param size is the number of bytes to swap /// \return the swapped value uintb byte_swap(uintb val,int4 size) { uintb res=0; while(size>0) { res <<= 8; res |= (val&0xff); val >>= 8; size -= 1; } return res; } /// The least significant bit is index 0. /// \param val is the given value /// \return the index of the least significant set bit, or -1 if none are set int4 leastsigbit_set(uintb val) { if (val==0) return -1; int4 res = 0; int4 sz = 4*sizeof(uintb); uintb mask = ~((uintb)0); do { mask >>= sz; if ((mask&val)==0) { res += sz; val >>= sz; } sz >>= 1; } while(sz!=0); return res; } /// The least significant bit is index 0. /// \param val is the given value /// \return the index of the most significant set bit, or -1 if none are set int4 mostsigbit_set(uintb val) { if (val==0) return -1; int4 res = 8*sizeof(uintb)-1; int4 sz = 4*sizeof(uintb); uintb mask = ~((uintb)0); do { mask <<= sz; if ((mask&val)==0) { res -= sz; val <<= sz; } sz >>= 1; } while(sz != 0); return res; } /// Count the number (population) bits set. /// \param val is the given value /// \return the number of one bits int4 popcount(uintb val) { val = (val & 0x5555555555555555L) + ((val >> 1) & 0x5555555555555555L); val = (val & 0x3333333333333333L) + ((val >> 2) & 0x3333333333333333L); val = (val & 0x0f0f0f0f0f0f0f0fL) + ((val >> 4) & 0x0f0f0f0f0f0f0f0fL); val = (val & 0x00ff00ff00ff00ffL) + ((val >> 8) & 0x00ff00ff00ff00ffL); val = (val & 0x0000ffff0000ffffL) + ((val >> 16) & 0x0000ffff0000ffffL); int4 res = (int4)(val & 0xff); res += (int4)((val >> 32) & 0xff); return res; } /// Count the number of more significant zero bits before the most significant /// one bit in the representation of the given value; /// \param val is the given value /// \return the number of zero bits int4 count_leading_zeros(uintb val) { if (val == 0) return 8*sizeof(uintb); uintb mask = ~((uintb)0); int4 maskSize = 4*sizeof(uintb); mask &= (mask << maskSize); int4 bit = 0; do { if ((mask & val)==0) { bit += maskSize; maskSize >>= 1; mask |= (mask >> maskSize); } else { maskSize >>= 1; mask &= (mask << maskSize); } } while(maskSize != 0); return bit; } /// Return smallest number of form 2^n-1, bigger or equal to the given value /// \param val is the given value /// \return the mask uintb coveringmask(uintb val) { uintb res = val; int4 sz = 1; while(sz < 8*sizeof(uintb)) { res = res | (res>>sz); sz <<= 1; } return res; } /// Treat \b val as a constant of size \b sz. /// Scanning across the bits of \b val return the number of transitions (from 0->1 or 1->0) /// If there are 2 or less transitions, this is an indication of a bit flag or a mask /// \param val is the given value /// \param sz is the size to treat the value as /// \return the number of transitions int4 bit_transitions(uintb val,int4 sz) { int4 res = 0; int4 last = val & 1; int4 cur; for(int4 i=1;i<8*sz;++i) { val >>= 1; cur = val & 1; if (cur != last) { res += 1; last = cur; } if (val==0) break; } return res; } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/address.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file address.hh /// \brief Classes for specifying addresses and other low-level constants /// /// All addresses are absolute and there are no registers in CPUI. However, /// all addresses are prefixed with an "immutable" pointer, which can /// specify a separate RAM space, a register space, an i/o space etc. Thus /// a translation from a real machine language will typically simulate registers /// by placing them in their own space, separate from RAM. Indirection /// (i.e. pointers) must be simulated through the LOAD and STORE ops. #ifndef __ADDRESS_HH__ #define __ADDRESS_HH__ #include "space.hh" namespace ghidra { class AddrSpaceManager; extern AttributeId ATTRIB_FIRST; ///< Marshaling attribute "first" extern AttributeId ATTRIB_LAST; ///< Marshaling attribute "last" extern AttributeId ATTRIB_UNIQ; ///< Marshaling attribute "uniq" extern ElementId ELEM_ADDR; ///< Marshaling element \ extern ElementId ELEM_RANGE; ///< Marshaling element \ extern ElementId ELEM_RANGELIST; ///< Marshaling element \ extern ElementId ELEM_REGISTER; ///< Marshaling element \ extern ElementId ELEM_SEQNUM; ///< Marshaling element \ extern ElementId ELEM_VARNODE; ///< Marshaling element \ /// \brief A low-level machine address for labelling bytes and data. /// /// All data that can be manipulated within the processor reverse /// engineering model can be labelled with an Address. It is /// simply an address space (AddrSpace) and an offset within that /// space. Note that processor registers are typically modelled /// by creating a dedicated address space for them, as distinct /// from RAM say, and then specifying certain addresses within the /// register space that correspond to particular registers. However, /// an arbitrary address could refer to anything, /// RAM, ROM, cpu register, data segment, coprocessor, stack, /// nvram, etc. /// An Address represents an offset \e only, not an offset and length class Address { protected: AddrSpace *base; ///< Pointer to our address space uintb offset; ///< Offset (in bytes) public: /// An enum for specifying extremal addresses enum mach_extreme { m_minimal, ///< Smallest possible address m_maximal ///< Biggest possible address }; Address(mach_extreme ex); ///< Initialize an extremal address Address(void); ///< Create an invalid address Address(AddrSpace *id,uintb off); ///< Construct an address with a space/offset pair Address(const Address &op2); ///< A copy constructor bool isInvalid(void) const; ///< Is the address invalid? int4 getAddrSize(void) const; ///< Get the number of bytes in the address bool isBigEndian(void) const; ///< Is data at this address big endian encoded void printRaw(ostream &s) const; ///< Write a raw version of the address to a stream int4 read(const string &s); ///< Read in the address from a string AddrSpace *getSpace(void) const; ///< Get the address space uintb getOffset(void) const; ///< Get the address offset char getShortcut(void) const; ///< Get the shortcut character for the address space Address &operator=(const Address &op2); ///< Copy an address bool operator==(const Address &op2) const; ///< Compare two addresses for equality bool operator!=(const Address &op2) const; ///< Compare two addresses for inequality bool operator<(const Address &op2) const; ///< Compare two addresses via their natural ordering bool operator<=(const Address &op2) const; ///< Compare two addresses via their natural ordering Address operator+(int8 off) const; ///< Increment address by a number of bytes Address operator-(int8 off) const; ///< Decrement address by a number of bytes friend ostream &operator<<(ostream &s,const Address &addr); ///< Write out an address to stream bool containedBy(int4 sz,const Address &op2,int4 sz2) const; ///< Determine if \e op2 range contains \b this range int4 justifiedContain(int4 sz,const Address &op2,int4 sz2,bool forceleft) const; ///< Determine if \e op2 is the least significant part of \e this. int4 overlap(int4 skip,const Address &op,int4 size) const; ///< Determine how \b this address falls in a given address range int4 overlapJoin(int4 skip,const Address &op,int4 size) const; ///< Determine how \b this falls in a possible \e join space address range bool isContiguous(int4 sz,const Address &loaddr,int4 losz) const; ///< Does \e this form a contiguous range with \e loaddr bool isConstant(void) const; ///< Is this a \e constant \e value void renormalize(int4 size); ///< Make sure there is a backing JoinRecord if \b this is in the \e join space bool isJoin(void) const; ///< Is this a \e join \e value void encode(Encoder &encoder) const; ///< Encode \b this to a stream void encode(Encoder &encoder,int4 size) const; ///< Encode \b this and a size to a stream /// Decode an address from a stream static Address decode(Decoder &decoder); /// Decode an address and size from a stream static Address decode(Decoder &decoder,int4 &size); }; /// \brief A class for uniquely labelling and comparing PcodeOps /// /// Different PcodeOps generated by a single machine instruction /// can only be labelled with a single Address. But PcodeOps /// must be distinguishable and compared for execution order. /// A SeqNum extends the address for a PcodeOp to include: /// - A fixed \e time field, which is set at the time the PcodeOp /// is created. The \e time field guarantees a unique SeqNum /// for the life of the PcodeOp. /// - An \e order field, which is guaranteed to be comparable /// for the execution order of the PcodeOp within its basic /// block. The \e order field also provides uniqueness but /// may change over time if the syntax tree is manipulated. class SeqNum { Address pc; ///< Program counter at start of instruction uintm uniq; ///< Number to guarantee uniqueness uintm order; ///< Number for order comparisons within a block public: SeqNum(void) {} ///< Create an invalid sequence number SeqNum(Address::mach_extreme ex); ///< Create an extremal sequence number /// Create a sequence number with a specific \e time field SeqNum(const Address &a,uintm b) : pc(a) { uniq = b; } /// Copy a sequence number SeqNum(const SeqNum &op2) : pc(op2.pc) { uniq = op2.uniq; } /// Get the address portion of a sequence number const Address &getAddr(void) const { return pc; } /// Get the \e time field of a sequence number uintm getTime(void) const { return uniq; } /// Get the \e order field of a sequence number uintm getOrder(void) const { return order; } /// Set the \e order field of a sequence number void setOrder(uintm ord) { order = ord; } /// Compare two sequence numbers for equality bool operator==(const SeqNum &op2) const { return (uniq == op2.uniq); } /// Compare two sequence numbers for inequality bool operator!=(const SeqNum &op2) const { return (uniq != op2.uniq); } /// Compare two sequence numbers with their natural order bool operator<(const SeqNum &op2) const { if (pc == op2.pc) return (uniq < op2.uniq); return (pc < op2.pc); } /// Encode a SeqNum to a stream void encode(Encoder &encoder) const; /// Decode a SeqNum from a stream static SeqNum decode(Decoder &decoder); /// Write out a SeqNum in human readable form to a stream friend ostream &operator<<(ostream &s,const SeqNum &sq); }; class RangeProperties; /// \brief A contiguous range of bytes in some address space class Range { friend class RangeList; AddrSpace *spc; ///< Space containing range uintb first; ///< Offset of first byte in \b this Range uintb last; ///< Offset of last byte in \b this Range public: /// \brief Construct a Range from offsets /// /// Offsets must expressed in \e bytes as opposed to addressable \e words /// \param s is the address space containing the range /// \param f is the offset of the first byte in the range /// \param l is the offset of the last byte in the range Range(AddrSpace *s,uintb f,uintb l) { spc = s; first = f; last = l; } Range(void) {} ///< Constructor for use with decode Range(const RangeProperties &properties,const AddrSpaceManager *manage); ///< Construct range out of basic properties AddrSpace *getSpace(void) const { return spc; } ///< Get the address space containing \b this Range uintb getFirst(void) const { return first; } ///< Get the offset of the first byte in \b this Range uintb getLast(void) const { return last; } ///< Get the offset of the last byte in \b this Range Address getFirstAddr(void) const { return Address(spc,first); } ///< Get the address of the first byte Address getLastAddr(void) const { return Address(spc,last); } ///< Get the address of the last byte Address getLastAddrOpen(const AddrSpaceManager *manage) const; ///< Get address of first byte after \b this bool contains(const Address &addr) const; ///< Determine if the address is in \b this Range /// \brief Sorting operator for Ranges /// /// Compare based on address space, then the starting offset /// \param op2 is the Range to compare with \b this /// \return \b true if \b this comes before op2 bool operator<(const Range &op2) const { if (spc->getIndex() != op2.spc->getIndex()) return (spc->getIndex() < op2.spc->getIndex()); return (first < op2.first); } void printBounds(ostream &s) const; ///< Print \b this Range to a stream void encode(Encoder &encoder) const; ///< Encode \b this Range to a stream void decode(Decoder &decoder); ///< Restore \b this from a stream void decodeFromAttributes(Decoder &decoder); ///< Read \b from attributes on another tag }; /// \brief A partially parsed description of a Range /// /// Class that allows \ tags to be parsed, when the address space doesn't yet exist class RangeProperties { friend class Range; string spaceName; ///< Name of the address space containing the range uintb first; ///< Offset of first byte in the Range uintb last; ///< Offset of last byte in the Range bool isRegister; ///< Range is specified a register name bool seenLast; ///< End of the range is actively specified public: RangeProperties(void) { first = 0; last = 0; isRegister = false; seenLast = false; } void decode(Decoder &decoder); ///< Decode \b this from a stream }; /// \brief A disjoint set of Ranges, possibly across multiple address spaces /// /// This is a container for addresses. It maintains a disjoint list of Ranges /// that cover all the addresses in the container. Ranges can be inserted /// and removed, but overlapping/adjacent ranges will get merged. class RangeList { set tree; ///< The sorted list of Range objects public: RangeList(const RangeList &op2) { tree = op2.tree; } ///< Copy constructor RangeList(void) {} ///< Construct an empty container void clear(void) { tree.clear(); } ///< Clear \b this container to empty bool empty(void) const { return tree.empty(); } ///< Return \b true if \b this is empty set::const_iterator begin(void) const { return tree.begin(); } ///< Get iterator to beginning Range set::const_iterator end(void) const { return tree.end(); } ///< Get iterator to ending Range int4 numRanges(void) const { return tree.size(); } ///< Return the number of Range objects in container const Range *getFirstRange(void) const; ///< Get the first Range const Range *getLastRange(void) const; ///< Get the last Range const Range *getLastSignedRange(AddrSpace *spaceid) const; ///< Get the last Range viewing offsets as signed const Range *getRange(AddrSpace *spaceid,uintb offset) const; ///< Get Range containing the given byte void insertRange(AddrSpace *spc,uintb first,uintb last); ///< Insert a range of addresses void removeRange(AddrSpace *spc,uintb first,uintb last); ///< Remove a range of addresses void merge(const RangeList &op2); ///< Merge another RangeList into \b this bool inRange(const Address &addr,int4 size) const; ///< Check containment an address range uintb longestFit(const Address &addr,uintb maxsize) const; ///< Find size of biggest Range containing given address void printBounds(ostream &s) const; ///< Print a description of \b this RangeList to stream void encode(Encoder &encoder) const; ///< Encode \b this RangeList to a stream void decode(Decoder &decoder); ///< Decode \b this RangeList from a \ element }; /// Precalculated masks indexed by size extern uintb uintbmasks[]; // Inline functions /// An invalid address is possible in some circumstances. /// This deliberately constructs an invalid address inline Address::Address(void) { base = (AddrSpace *)0; } /// This is the basic Address constructor /// \param id is the space containing the address /// \param off is the offset of the address inline Address::Address(AddrSpace *id,uintb off) { base=id; offset=off; } /// This is a standard copy constructor, copying the /// address space and the offset /// \param op2 is the Address to copy inline Address::Address(const Address &op2) { base = op2.base; offset = op2.offset; } /// Determine if this is an invalid address. This only /// detects \e deliberate invalid addresses. /// \return \b true if the address is invalid inline bool Address::isInvalid(void) const { return (base == (AddrSpace *)0); } /// Get the number of bytes needed to encode the \e offset /// for this address. /// \return the number of bytes in the encoding inline int4 Address::getAddrSize(void) const { return base->getAddrSize(); } /// Determine if data stored at this address is big endian encoded. /// \return \b true if the address is big endian inline bool Address::isBigEndian(void) const { return base->isBigEndian(); } /// Write a short-hand or debug version of this address to a /// stream. /// \param s is the stream being written inline void Address::printRaw(ostream &s) const { if (base == (AddrSpace *)0) { s << "invalid_addr"; return; } base->printRaw(s,offset); } /// Convert a string into an address. The string format can be /// tailored for the particular address space. /// \param s is the string to parse /// \return any size associated with the parsed string inline int4 Address::read(const string &s) { int4 sz; offset=base->read(s,sz); return sz; } /// Get the address space associated with this address. /// \return the AddressSpace pointer, or \b NULL if invalid inline AddrSpace *Address::getSpace(void) const { return base; } /// Get the offset of the address as an integer. /// \return the offset integer inline uintb Address::getOffset(void) const { return offset; } /// Each address has a shortcut character associated with it /// for use with the read and printRaw methods. /// \return the shortcut char inline char Address::getShortcut(void) const { return base->getShortcut(); } /// This is a standard assignment operator, copying the /// address space pointer and the offset /// \param op2 is the Address being assigned /// \return a reference to altered address inline Address &Address::operator=(const Address &op2) { base = op2.base; offset = op2.offset; return *this; } /// Check if two addresses are equal. I.e. if their address /// space and offset are the same. /// \param op2 is the address to compare to \e this /// \return \b true if the addresses are the same inline bool Address::operator==(const Address &op2) const { return ((base==op2.base)&&(offset==op2.offset)); } /// Check if two addresses are not equal. I.e. if either their /// address space or offset are different. /// \param op2 is the address to compare to \e this /// \return \b true if the addresses are different inline bool Address::operator!=(const Address &op2) const { return !(*this==op2); } /// Do an ordering comparison of two addresses. Addresses are /// sorted first on space, then on offset. So two addresses in /// the same space compare naturally based on their offset, but /// addresses in different spaces also compare. Different spaces /// are ordered by their index. /// \param op2 is the address to compare to /// \return \b true if \e this comes before \e op2 inline bool Address::operator<(const Address &op2) const { if (base != op2.base) { if (base == (AddrSpace *)0) { return true; } else if (base == (AddrSpace *) ~((uintp)0)) { return false; } else if (op2.base == (AddrSpace *)0) { return false; } else if (op2.base == (AddrSpace *) ~((uintp)0)) { return true; } return (base->getIndex() < op2.base->getIndex()); } if (offset != op2.offset) return (offset < op2.offset); return false; } /// Do an ordering comparison of two addresses. /// \param op2 is the address to compare to /// \return \b true if \e this comes before or is equal to \e op2 inline bool Address::operator<=(const Address &op2) const { if (base != op2.base) { if (base == (AddrSpace *)0) { return true; } else if (base == (AddrSpace *) ~((uintp)0)) { return false; } else if (op2.base == (AddrSpace *)0) { return false; } else if (op2.base == (AddrSpace *) ~((uintp)0)) { return true; } return (base->getIndex() < op2.base->getIndex()); } if (offset != op2.offset) return (offset < op2.offset); return true; } /// Add an integer value to the offset portion of the address. /// The addition takes into account the \e size of the address /// space, and the Address will wrap around if necessary. /// \param off is the number to add to the offset /// \return the new incremented address inline Address Address::operator+(int8 off) const { return Address(base,base->wrapOffset(offset+off)); } /// Subtract an integer value from the offset portion of the /// address. The subtraction takes into account the \e size of /// the address space, and the Address will wrap around if /// necessary. /// \param off is the number to subtract from the offset /// \return the new decremented address inline Address Address::operator-(int8 off) const { return Address(base,base->wrapOffset(offset-off)); } /// This method is equivalent to Address::overlap, but a range in the \e join space can be /// considered overlapped with its constituent pieces. /// If \e this + \e skip falls in the range, \e op to \e op + \e size, then a non-negative integer is /// returned indicating where in the interval it falls. Otherwise -1 is returned. /// \param skip is an adjust to \e this address /// \param op is the start of the range to check /// \param size is the size of the range /// \return an integer indicating how overlap occurs inline int4 Address::overlapJoin(int4 skip,const Address &op,int4 size) const { return op.getSpace()->overlapJoin(op.getOffset(), size, base, offset, skip); } /// Determine if this address is from the \e constant \e space. /// All constant values are represented as an offset into /// the \e constant \e space. /// \return \b true if this address represents a constant inline bool Address::isConstant(void) const { return (base->getType() == IPTR_CONSTANT); } /// Determine if this address represents a set of joined memory locations. /// \return \b true if this address represents a join inline bool Address::isJoin(void) const { return (base->getType() == IPTR_JOIN); } /// Save an \ element corresponding to this address to a /// stream. The exact format is determined by the address space, /// but this generally has a \e space and an \e offset attribute. /// \param encoder is the stream encoder inline void Address::encode(Encoder &encoder) const { encoder.openElement(ELEM_ADDR); if (base!=(AddrSpace *)0) base->encodeAttributes(encoder,offset); encoder.closeElement(ELEM_ADDR); } /// Encode an \ element corresponding to this address to a /// stream. The tag will also include an extra \e size attribute /// so that it can describe an entire memory range. /// \param encoder is the stream encoder /// \param size is the number of bytes in the range inline void Address::encode(Encoder &encoder,int4 size) const { encoder.openElement(ELEM_ADDR); if (base!=(AddrSpace *)0) base->encodeAttributes(encoder,offset,size); encoder.closeElement(ELEM_ADDR); } /// \param addr is the Address to test for containment /// \return \b true if addr is in \b this Range inline bool Range::contains(const Address &addr) const { if (spc != addr.getSpace()) return false; if (first > addr.getOffset()) return false; if (last < addr.getOffset()) return false; return true; } /// \param size is the desired size in bytes /// \return a value appropriate for masking off the first \e size bytes inline uintb calc_mask(int4 size) { return uintbmasks[((uint4)size) < 8 ? size : 8]; } /// Perform a CPUI_INT_RIGHT on the given val /// \param val is the value to shift /// \param sa is the number of bits to shift /// \return the shifted value inline uintb pcode_right(uintb val,int4 sa) { if (sa >= 8*sizeof(uintb)) return 0; return val >> sa; } /// Perform a CPUI_INT_LEFT on the given val /// \param val is the value to shift /// \param sa is the number of bits to shift /// \return the shifted value inline uintb pcode_left(uintb val,int4 sa) { if (sa >= 8*sizeof(uintb)) return 0; return val << sa; } /// \brief Calculate smallest mask that covers the given value /// /// Calculcate a mask that covers either the least significant byte, uint2, uint4, or uint8, /// whatever is smallest. /// \param val is the given value /// \return the minimal mask inline uintb minimalmask(uintb val) { if (val > 0xffffffff) return ~((uintb)0); if (val > 0xffff) return 0xffffffff; if (val > 0xff) return 0xffff; return 0xff; } /// \brief Sign extend above given bit /// /// Sign extend \b val starting at \b bit /// \param val is the value to be sign-extended /// \param bit is the index of the bit to extend from (0=least significant bit) /// \return the sign extended value inline intb sign_extend(intb val,int4 bit) { int4 sa = 8*sizeof(intb) - (bit+1); val = (val << sa) >> sa; return val; } /// \brief Clear all bits above given bit /// /// Zero extend \b val starting at \b bit /// \param val is the value to be zero extended /// \param bit is the index of the bit to extend from (0=least significant bit) /// \return the extended value inline intb zero_extend(intb val,int4 bit) { int4 sa = sizeof(intb)*8 - (bit+1); return (intb)(((uintb)val << sa) >> sa); } extern bool signbit_negative(uintb val,int4 size); ///< Return true if the sign-bit is set extern uintb calc_mask(int4 size); ///< Calculate a mask for a given byte size extern uintb uintb_negate(uintb in,int4 size); ///< Negate the \e sized value extern uintb sign_extend(uintb in,int4 sizein,int4 sizeout); ///< Sign-extend a value between two byte sizes extern void byte_swap(intb &val,int4 size); ///< Swap bytes in the given value extern uintb byte_swap(uintb val,int4 size); ///< Return the given value with bytes swapped extern int4 leastsigbit_set(uintb val); ///< Return index of least significant bit set in given value extern int4 mostsigbit_set(uintb val); ///< Return index of most significant bit set in given value extern int4 popcount(uintb val); ///< Return the number of one bits in the given value extern int4 count_leading_zeros(uintb val); ///< Return the number of leading zero bits in the given value extern uintb coveringmask(uintb val); ///< Return a mask that \e covers the given value extern int4 bit_transitions(uintb val,int4 sz); ///< Calculate the number of bit transitions in the sized value } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/compression.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "compression.hh" namespace ghidra { /// The compression \b level ranges from 1-9 from faster/least compression to slower/most compression. /// Use a \b level of 0 for no compression and -1 for the \e default compression level. /// \param level is the compression level Compress::Compress(int4 level) { compStream.zalloc = Z_NULL; compStream.zfree = Z_NULL; compStream.opaque = Z_NULL; int4 ret = deflateInit(&compStream, level); if (ret != Z_OK) throw LowlevelError("Could not initialize deflate stream state"); } Compress::~Compress(void) { deflateEnd(&compStream); } /// Return the number of bytes of output space still available. Output may be limited by the amount /// of space in the output buffer or the amount of data available in the current input buffer. /// \param buffer is where compressed bytes are stored /// \param sz is the size, in bytes, of the buffer /// \param finish is set to \b true if this is the final buffer to add to the stream /// \return the number of output bytes still available int4 Compress::deflate(uint1 *buffer,int4 sz,bool finish) { int flush = finish ? Z_FINISH : Z_NO_FLUSH; compStream.avail_out = sz; compStream.next_out = buffer; int ret = ::deflate(&compStream, flush); if (ret == Z_STREAM_ERROR) throw LowlevelError("Error compressing stream"); return compStream.avail_out; } Decompress::Decompress(void) { streamFinished = false; compStream.zalloc = Z_NULL; compStream.zfree = Z_NULL; compStream.opaque = Z_NULL; compStream.avail_in = 0; compStream.next_in = Z_NULL; int ret = inflateInit(&compStream); if (ret != Z_OK) throw LowlevelError("Could not initialize inflate stream state"); } /// Return the number of bytes of output space still available. Output may be limited by the amount /// of space in the output buffer or the amount of data available in the current input buffer. /// \param buffer is where uncompressed bytes are stored /// \param sz is the size, in bytes, of the buffer /// \return the number of output bytes still available int4 Decompress::inflate(uint1 *buffer,int4 sz) { compStream.avail_out = sz; compStream.next_out = buffer; int ret = ::inflate(&compStream, Z_NO_FLUSH); switch (ret) { case Z_NEED_DICT: case Z_DATA_ERROR: case Z_MEM_ERROR: case Z_STREAM_ERROR: throw LowlevelError("Error decompressing stream"); case Z_STREAM_END: streamFinished = true; break; default: break; } return compStream.avail_out; } Decompress::~Decompress(void) { inflateEnd(&compStream); } const int4 CompressBuffer::IN_BUFFER_SIZE = 4096; const int4 CompressBuffer::OUT_BUFFER_SIZE = 4096; /// \param s is the backing output stream /// \param level is the level of compression CompressBuffer::CompressBuffer(ostream &s,int4 level) : outStream(s), compressor(level) { inBuffer = new uint1[IN_BUFFER_SIZE]; outBuffer = new uint1[OUT_BUFFER_SIZE]; setp((char *)inBuffer,(char *)inBuffer + IN_BUFFER_SIZE-1); } CompressBuffer::~CompressBuffer(void) { delete [] inBuffer; delete [] outBuffer; } /// The compressor is called repeatedly and its output is written to the backing stream /// until the compressor can no longer fill the \e output buffer. /// \param lastBuffer is \b true if this is the final set of bytes to add to the compressed stream void CompressBuffer::flushInput(bool lastBuffer) { int len = pptr() - pbase(); compressor.input((uint1 *)pbase(),len); int4 outAvail; do { outAvail = OUT_BUFFER_SIZE; outAvail = compressor.deflate(outBuffer,outAvail,lastBuffer); outStream.write((char *)outBuffer,OUT_BUFFER_SIZE-outAvail); } while(outAvail == 0); pbump(-len); } /// \param c is the final character filling the buffer /// \return the written character int CompressBuffer::overflow(int c) { if (c != EOF) { *pptr() = c; pbump(1); } flushInput(false); return c; } /// \return 0 for success int CompressBuffer::sync(void) { flushInput(true); return 0; } } ================================================ FILE: pypcode/sleigh/compression.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file compression.hh /// \brief The Compress and Decompress classes wrapping the deflate and inflate algorithms #ifndef __COMPRESSION__ #define __COMPRESSION__ #include "error.hh" #ifdef LOCAL_ZLIB #include "../zlib/zlib.h" #else #include #endif namespace ghidra { /// \brief Wrapper for the deflate algorithm /// /// Initialize/free algorithm resources. Provide successive arrays of bytes to compress via /// the input() method. Compute successive arrays of compressed bytes via the deflate() method. class Compress { z_stream compStream; ///< The zlib deflate algorithm state public: Compress(int4 level); ///< Initialize the deflate algorithm state ~Compress(void); ///< Free algorithm state resources /// \brief Provide the next sequence of bytes to be compressed /// /// \param buffer is a pointer to the bytes to compress /// \param sz is the number of bytes void input(uint1 *buffer,int4 sz) { compStream.avail_in = sz; compStream.next_in = buffer; } int4 deflate(uint1 *buffer,int4 sz,bool finish); ///< Deflate as much as possible into given buffer }; /// \brief Wrapper for the inflate algorithm /// /// Initialize/free algorithm resources. Provide successive arrays of compressed bytes via /// the input() method. Compute successive arrays of uncompressed bytes via the inflate() method. class Decompress { z_stream compStream; ///< The zlib inflate algorithm state bool streamFinished; ///< Set to \b true if the end of the compressed stream has been reached public: Decompress(void); ///< Initialize the inflate algorithm state ~Decompress(void); ///< Free algorithm state resources /// \brief Provide the next sequence of compressed bytes /// /// \param buffer is a pointer to the compressed bytes /// \param sz is the number of bytes void input(uint1 *buffer,int4 sz) { compStream.next_in = buffer; compStream.avail_in = sz; } bool isFinished(void) const { return streamFinished; } ///< Return \b if end of compressed stream is reached int4 inflate(uint1 *buffer,int4 sz); ///< Inflate as much as possible into given buffer }; /// \brief Stream buffer that performs compression /// /// Provides an ostream filter that compresses the stream using the \e deflate algorithm. /// The stream buffer is provided a backing stream that is the ultimate destination of the compressed bytes. /// A front-end stream is initialized with \b this stream buffer. /// After writing the full sequence of bytes to compressed to the front-end stream, make sure to /// call the stream's flush() method to emit the final compressed bytes to the backing stream. class CompressBuffer : public std::streambuf { static const int4 IN_BUFFER_SIZE; ///< Number of bytes in the \e input buffer static const int4 OUT_BUFFER_SIZE; ///< Number of bytes in the \e output buffer ostream &outStream; ///< The backing stream receiving compressed bytes uint1 *inBuffer; ///< The \e input buffer uint1 *outBuffer; ///< The \e output buffer Compress compressor; ///< Compressor state protected: void flushInput(bool lastBuffer); ///< Compress the current set of bytes in the \e input buffer virtual int overflow(int c); ///< Pass the filled input buffer to the compressor virtual int sync(void); ///< Pass remaining bytes in the input buffer to the compressor public: CompressBuffer(ostream &s,int4 level); ///< Constructor ~CompressBuffer(void); ///< Destructor }; } #endif ================================================ FILE: pypcode/sleigh/context.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "context.hh" #include "slghsymbol.hh" #include "translate.hh" namespace ghidra { ParserContext::ParserContext(ContextCache *ccache,Translate *trans) { parsestate = uninitialized; contcache = ccache; translate = trans; if (ccache != (ContextCache *)0) { contextsize = ccache->getDatabase()->getContextSize(); context = new uintm[ contextsize ]; } else { contextsize = 0; context = (uintm *)0; } } void ParserContext::initialize(int4 maxstate,int4 maxparam,AddrSpace *spc) { const_space = spc; state.resize(maxstate); state[0].parent = (ConstructState *)0; for(int4 i=0;iinstructionLength(naddr); n2addr = naddr + length; } return n2addr; } uintm ParserContext::getInstructionBytes(int4 bytestart,int4 size,uint4 off) const { // Get bytes from the instruction stream into a intm // (assuming big endian format) off += bytestart; if (off >=16) throw BadDataError("Instruction is using more than 16 bytes"); const uint1 *ptr = buf + off; uintm res = 0; for(int4 i=0;i= 16) throw BadDataError("Instruction is using more than 16 bytes"); const uint1 *ptr = buf + off; startbit = startbit % 8; int4 bytesize = (startbit+size-1)/8 + 1; uintm res = 0; for(int4 i=0;i>= 8*sizeof(uintm)-size; // Shift to bottom of intm return res; } uintm ParserContext::getContextBytes(int4 bytestart,int4 size) const { // Get bytes from context into a uintm int4 intstart = bytestart / sizeof(uintm); uintm res = context[ intstart ]; int4 byteOffset = bytestart % sizeof(uintm); int4 unusedBytes = sizeof(uintm) - size; res <<= byteOffset*8; res >>= unusedBytes*8; int4 remaining = size - sizeof(uintm) + byteOffset; if ((remaining > 0)&&(++intstart < contextsize)) { // If we extend beyond boundary of a single uintm uintm res2 = context[ intstart ]; unusedBytes = sizeof(uintm) - remaining; res2 >>= unusedBytes * 8; res |= res2; } return res; } uintm ParserContext::getContextBits(int4 startbit,int4 size) const { int4 intstart = startbit / (8*sizeof(uintm)); uintm res = context[ intstart ]; // Get intm containing highest bit int4 bitOffset = startbit % (8*sizeof(uintm)); int4 unusedBits = 8*sizeof(uintm) - size; res <<= bitOffset; // Shift startbit to highest position res >>= unusedBits; int4 remaining = size - 8*sizeof(uintm) + bitOffset; if ((remaining > 0) && (++intstart < contextsize)) { uintm res2 = context[ intstart ]; unusedBits = 8*sizeof(uintm) - remaining; res2 >>= unusedBits; res |= res2; } return res; } void ParserContext::addCommit(TripleSymbol *sym,int4 num,uintm mask,bool flow,ConstructState *point) { contextcommit.emplace_back(); ContextSet &set(contextcommit.back()); set.sym = sym; set.point = point; // This is the current state set.num = num; set.mask = mask; set.value = context[num] & mask; set.flow = flow; } void ParserContext::applyCommits(void) { if (contextcommit.empty()) return; ParserWalker walker(this); walker.baseState(); vector::iterator iter; for(iter=contextcommit.begin();iter!=contextcommit.end();++iter) { TripleSymbol *sym = (*iter).sym; Address commitaddr; if (sym->getType() == SleighSymbol::operand_symbol) { // The value for an OperandSymbol is probabably already // calculated, we just need to find the right // tree node of the state int4 i = ((OperandSymbol *)sym)->getIndex(); FixedHandle &h((*iter).point->resolve[i]->hand); commitaddr = Address(h.space,h.offset_offset); } else { FixedHandle hand; sym->getFixedHandle(hand,walker); commitaddr = Address(hand.space,hand.offset_offset); } if (commitaddr.isConstant()) { // If the symbol handed to globalset was a computed value, the getFixedHandle calculation // will return a value in the constant space. If this is a case, we explicitly convert the // offset into the current address space uintb newoff = AddrSpace::addressToByte(commitaddr.getOffset(),addr.getSpace()->getWordSize()); commitaddr = Address(addr.getSpace(),newoff); } // Commit context change if ((*iter).flow) // The context flows contcache->setContext(commitaddr,(*iter).num,(*iter).mask,(*iter).value); else { // Set the context so that is doesn't flow Address nextaddr = commitaddr + 1; if (nextaddr.getOffset() < commitaddr.getOffset()) contcache->setContext(commitaddr,(*iter).num,(*iter).mask,(*iter).value); else contcache->setContext(commitaddr,nextaddr,(*iter).num,(*iter).mask,(*iter).value); } } } void ParserWalker::setOutOfBandState(Constructor *ct,int4 index,ConstructState *tempstate,const ParserWalker &otherwalker) { // Initialize walker for future calls into getInstructionBytes assuming -ct- is the current position in the walk const ConstructState *pt = otherwalker.point; int4 curdepth = otherwalker.depth; while(pt->ct != ct) { if (curdepth <= 0) return; curdepth -= 1; pt = pt->parent; } OperandSymbol *sym = ct->getOperand(index); int4 i = sym->getOffsetBase(); // if i<0, i.e. the offset of the operand is constructor relative // its possible that the branch corresponding to the operand // has not been constructed yet. Context expressions are // evaluated BEFORE the constructors branches are created. // So we have to construct the offset explicitly. if (i<0) tempstate->offset = pt->offset + sym->getRelativeOffset(); else tempstate->offset = pt->resolve[index]->offset; tempstate->ct = ct; tempstate->length = pt->length; point = tempstate; depth = 0; breadcrumb[0] = 0; } void ParserWalkerChange::calcCurrentLength(int4 length,int4 numopers) { // Calculate the length of the current constructor // state assuming all its operands are constructed length += point->offset; // Convert relative length to absolute length for(int4 i=0;iresolve[i]; int4 sublength = subpoint->length + subpoint->offset; // Since subpoint->offset is an absolute offset // (relative to beginning of instruction) sublength if (sublength > length) // is absolute and must be compared to absolute length length = sublength; } point->length = length - point->offset; // Convert back to relative length } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/context.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __CONTEXT_HH__ #define __CONTEXT_HH__ #include "globalcontext.hh" #include "opcodes.hh" namespace ghidra { class Token { // A multiple-byte sized chunk of pattern in a bitstream string name; int4 size; // Number of bytes in token; int4 index; // Index of this token, for resolving offsets bool bigendian; public: Token(const string &nm,int4 sz,bool be,int4 ind) : name(nm) { size = sz; bigendian=be; index = ind; } int4 getSize(void) const { return size; } bool isBigEndian(void) const { return bigendian; } int4 getIndex(void) const { return index; } const string &getName(void) const { return name; } }; struct FixedHandle { // A handle that is fully resolved AddrSpace *space; uint4 size; AddrSpace *offset_space; // Either null or where dynamic offset is stored uintb offset_offset; // Either static offset or ptr offset uint4 offset_size; // Size of pointer AddrSpace *temp_space; // Consistent temporary location for value uintb temp_offset; }; class Constructor; struct ConstructState { Constructor *ct; FixedHandle hand; vector resolve; ConstructState *parent; int4 length; // Length of this instantiation of the constructor uint4 offset; // Absolute offset (from start of instruction) }; class TripleSymbol; struct ContextSet { // Instructions for setting a global context value TripleSymbol *sym; // Resolves to address where setting takes effect ConstructState *point; // Point at which context set was made int4 num; // Number of context word affected uintm mask; // Bits within word affected uintm value; // New setting for bits bool flow; // Does the new context flow from its set point }; class ParserWalker; // Forward declaration class ParserWalkerChange; class Translate; class ParserContext { friend class ParserWalker; friend class ParserWalkerChange; public: enum { // Possible states of the ParserContext uninitialized = 0, // Instruction has not been parsed at all disassembly = 1, // Instruction is parsed in preparation for disassembly pcode = 2 // Instruction is parsed in preparation for generating p-code }; private: Translate *translate; // Instruction parser int4 parsestate; AddrSpace *const_space; uint1 buf[16]; // Buffer of bytes in the instruction stream uintm *context; // Pointer to local context int4 contextsize; // Number of entries in context array ContextCache *contcache; // Interface for getting/setting context vector contextcommit; Address addr; // Address of start of instruction Address naddr; // Address of next instruction mutable Address n2addr; // Address of instruction after the next Address calladdr; // For injections, this is the address of the call being overridden vector state; // Current resolved instruction ConstructState *base_state; int4 alloc; // Number of ConstructState's allocated int4 delayslot; // delayslot depth public: ParserContext(ContextCache *ccache,Translate *trans); ~ParserContext(void) { if (context != (uintm *)0) delete [] context; } uint1 *getBuffer(void) { return buf; } void initialize(int4 maxstate,int4 maxparam,AddrSpace *spc); int4 getParserState(void) const { return parsestate; } void setParserState(int4 st) { parsestate = st; } void deallocateState(ParserWalkerChange &walker); void allocateOperand(int4 i,ParserWalkerChange &walker); void setAddr(const Address &ad) { addr = ad; n2addr = Address(); } void setNaddr(const Address &ad) { naddr = ad; } void setCalladdr(const Address &ad) { calladdr = ad; } void addCommit(TripleSymbol *sym,int4 num,uintm mask,bool flow,ConstructState *point); void clearCommits(void) { contextcommit.clear(); } void applyCommits(void); const Address &getAddr(void) const { return addr; } const Address &getNaddr(void) const { return naddr; } const Address &getN2addr(void) const; const Address &getDestAddr(void) const { return calladdr; } const Address &getRefAddr(void) const { return calladdr; } AddrSpace *getCurSpace(void) const { return addr.getSpace(); } AddrSpace *getConstSpace(void) const { return const_space; } uintm getInstructionBytes(int4 byteoff,int4 numbytes,uint4 off) const; uintm getContextBytes(int4 byteoff,int4 numbytes) const; uintm getInstructionBits(int4 startbit,int4 size,uint4 off) const; uintm getContextBits(int4 startbit,int4 size) const; void setContextWord(int4 i,uintm val,uintm mask) { context[i] = (context[i]&(~mask))|(mask&val); } void loadContext(void) { contcache->getContext(addr,context); } int4 getLength(void) const { return base_state->length; } void setDelaySlot(int4 val) { delayslot = val; } int4 getDelaySlot(void) const { return delayslot; } }; class ParserWalker { // A class for walking the ParserContext const ParserContext *const_context; const ParserContext *cross_context; protected: ConstructState *point; // The current node being visited int4 depth; // Depth of the current node int4 breadcrumb[32]; // Path of operands from root public: ParserWalker(const ParserContext *c) { const_context = c; cross_context = (const ParserContext *)0; } ParserWalker(const ParserContext *c,const ParserContext *cross) { const_context = c; cross_context = cross; } const ParserContext *getParserContext(void) const { return const_context; } void baseState(void) { point = const_context->base_state; depth=0; breadcrumb[0] = 0; } void setOutOfBandState(Constructor *ct,int4 index,ConstructState *tempstate,const ParserWalker &otherwalker); bool isState(void) const { return (point != (ConstructState *)0); } void pushOperand(int4 i) { breadcrumb[depth++] = i+1; point = point->resolve[i]; breadcrumb[depth] = 0; } void popOperand(void) { point = point->parent; depth-= 1; } uint4 getOffset(int4 i) const { if (i<0) return point->offset; ConstructState *op=point->resolve[i]; return op->offset + op->length; } Constructor *getConstructor(void) const { return point->ct; } int4 getOperand(void) const { return breadcrumb[depth]; } FixedHandle &getParentHandle(void) { return point->hand; } const FixedHandle &getFixedHandle(int4 i) const { return point->resolve[i]->hand; } AddrSpace *getCurSpace(void) const { return const_context->getCurSpace(); } AddrSpace *getConstSpace(void) const { return const_context->getConstSpace(); } const Address &getAddr(void) const { if (cross_context != (const ParserContext *)0) { return cross_context->getAddr(); } return const_context->getAddr(); } const Address &getNaddr(void) const { if (cross_context != (const ParserContext *)0) { return cross_context->getNaddr();} return const_context->getNaddr(); } const Address &getN2addr(void) const { if (cross_context != (const ParserContext *)0) { return cross_context->getN2addr();} return const_context->getN2addr(); } const Address &getRefAddr(void) const { if (cross_context != (const ParserContext *)0) { return cross_context->getRefAddr();} return const_context->getRefAddr(); } const Address &getDestAddr(void) const { if (cross_context != (const ParserContext *)0) { return cross_context->getDestAddr();} return const_context->getDestAddr(); } int4 getLength(void) const { return const_context->getLength(); } uintm getInstructionBytes(int4 byteoff,int4 numbytes) const { return const_context->getInstructionBytes(byteoff,numbytes,point->offset); } uintm getContextBytes(int4 byteoff,int4 numbytes) const { return const_context->getContextBytes(byteoff,numbytes); } uintm getInstructionBits(int4 startbit,int4 size) const { return const_context->getInstructionBits(startbit,size,point->offset); } uintm getContextBits(int4 startbit,int4 size) const { return const_context->getContextBits(startbit,size); } }; class ParserWalkerChange : public ParserWalker { // Extension to walker that allows for on the fly modifications to tree friend class ParserContext; ParserContext *context; public: ParserWalkerChange(ParserContext *c) : ParserWalker(c) { context = c; } ParserContext *getParserContext(void) { return context; } ConstructState *getPoint(void) { return point; } void setOffset(uint4 off) { point->offset = off; } void setConstructor(Constructor *c) { point->ct = c; } void setCurrentLength(int4 len) { point->length = len; } void calcCurrentLength(int4 length,int4 numopers); }; struct SleighError : public LowlevelError { SleighError(const string &s) : LowlevelError(s) {} }; inline void ParserContext::deallocateState(ParserWalkerChange &walker) { alloc = 1; walker.context=this; walker.baseState(); } inline void ParserContext::allocateOperand(int4 i,ParserWalkerChange &walker) { ConstructState *opstate = &state[alloc++]; opstate->parent = walker.point; opstate->ct = (Constructor *)0; walker.point->resolve[i] = opstate; walker.breadcrumb[walker.depth++] += 1; walker.point = opstate; walker.breadcrumb[walker.depth] = 0; } } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/emulate.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "emulate.hh" namespace ghidra { /// Any time the emulator is about to execute a user-defined pcode op with the given name, /// the indicated breakpoint is invoked first. The break table does \e not assume responsibility /// for freeing the breakpoint object. /// \param name is the name of the user-defined pcode op /// \param func is the breakpoint object to associate with the pcode op void BreakTableCallBack::registerPcodeCallback(const string &name,BreakCallBack *func) { func->setEmulate(emulate); vector userops; trans->getUserOpNames(userops); for(int4 i=0;isetEmulate(emulate); addresscallback[addr] = func; } /// This routine invokes the setEmulate method on each breakpoint currently in the table /// \param emu is the emulator to be associated with the breakpoints void BreakTableCallBack::setEmulate(Emulate *emu) { // Make sure all callbbacks are aware of new emulator emulate = emu; map::iterator iter1; for(iter1=addresscallback.begin();iter1!=addresscallback.end();++iter1) (*iter1).second->setEmulate(emu); map::iterator iter2; for(iter2=pcodecallback.begin();iter2!=pcodecallback.end();++iter2) (*iter2).second->setEmulate(emu); } /// This routine examines the pcode-op based container for any breakpoints associated with the /// given op. If one is found, its pcodeCallback method is invoked. /// \param curop is pcode op being checked for breakpoints /// \return \b true if the breakpoint exists and returns \b true, otherwise return \b false bool BreakTableCallBack::doPcodeOpBreak(PcodeOpRaw *curop) { uintb val = curop->getInput(0)->offset; map::const_iterator iter; iter = pcodecallback.find(val); if (iter == pcodecallback.end()) return false; return (*iter).second->pcodeCallback(curop); } /// This routine examines the address based container for any breakpoints associated with the /// given address. If one is found, its addressCallback method is invoked. /// \param addr is the address being checked for breakpoints /// \return \b true if the breakpoint exists and returns \b true, otherwise return \b false bool BreakTableCallBack::doAddressBreak(const Address &addr) { map::const_iterator iter; iter = addresscallback.find(addr); if (iter == addresscallback.end()) return false; return (*iter).second->addressCallback(addr); } /// Provide the emitter with the containers that will hold the cached p-code ops and varnodes. /// \param ocache is the container for cached PcodeOpRaw /// \param vcache is the container for cached VarnodeData /// \param in is the map of OpBehavior /// \param uniqReserve is the starting offset for temporaries in the \e unique space PcodeEmitCache::PcodeEmitCache(vector &ocache,vector &vcache, const vector &in,uintb uniqReserve) : opcache(ocache), varcache(vcache), inst(in) { uniq = uniqReserve; } /// Create an internal copy of the VarnodeData and cache it. /// \param var is the incoming VarnodeData being dumped /// \return the cloned VarnodeData VarnodeData *PcodeEmitCache::createVarnode(const VarnodeData *var) { VarnodeData *res = new VarnodeData(); *res = *var; varcache.push_back(res); return res; } void PcodeEmitCache::dump(const Address &addr,OpCode opc,VarnodeData *outvar,VarnodeData *vars,int4 isize) { PcodeOpRaw *op = new PcodeOpRaw(); op->setSeqNum(addr,uniq); opcache.push_back(op); op->setBehavior( inst[opc] ); uniq += 1; if (outvar != (VarnodeData *)0) { VarnodeData *outvn = createVarnode(outvar); op->setOutput(outvn); } for(int4 i=0;iaddInput(invn); } } /// This method executes a single pcode operation, the current one (returned by getCurrentOp()). /// The MemoryState of the emulator is queried and changed as needed to accomplish this. void Emulate::executeCurrentOp(void) { if (currentBehave == (OpBehavior *)0) { // Presumably a NO-OP fallthruOp(); return; } if (currentBehave->isSpecial()) { switch(currentBehave->getOpcode()) { case CPUI_LOAD: executeLoad(); fallthruOp(); break; case CPUI_STORE: executeStore(); fallthruOp(); break; case CPUI_BRANCH: executeBranch(); break; case CPUI_CBRANCH: if (executeCbranch()) executeBranch(); else fallthruOp(); break; case CPUI_BRANCHIND: executeBranchind(); break; case CPUI_CALL: executeCall(); break; case CPUI_CALLIND: executeCallind(); break; case CPUI_CALLOTHER: executeCallother(); break; case CPUI_RETURN: executeBranchind(); break; case CPUI_MULTIEQUAL: executeMultiequal(); fallthruOp(); break; case CPUI_INDIRECT: executeIndirect(); fallthruOp(); break; case CPUI_SEGMENTOP: executeSegmentOp(); fallthruOp(); break; case CPUI_CPOOLREF: executeCpoolRef(); fallthruOp(); break; case CPUI_NEW: executeNew(); fallthruOp(); break; default: throw LowlevelError("Bad special op"); } } else if (currentBehave->isUnary()) { // Unary operation executeUnary(); fallthruOp(); } else { // Binary operation executeBinary(); fallthruOp(); // All binary ops are fallthrus } } void EmulateMemory::executeUnary(void) { uintb in1 = memstate->getValue(currentOp->getInput(0)); uintb out = currentBehave->evaluateUnary(currentOp->getOutput()->size, currentOp->getInput(0)->size,in1); memstate->setValue(currentOp->getOutput(),out); } void EmulateMemory::executeBinary(void) { uintb in1 = memstate->getValue(currentOp->getInput(0)); uintb in2 = memstate->getValue(currentOp->getInput(1)); uintb out = currentBehave->evaluateBinary(currentOp->getOutput()->size, currentOp->getInput(0)->size,in1,in2); memstate->setValue(currentOp->getOutput(),out); } void EmulateMemory::executeLoad(void) { uintb off = memstate->getValue(currentOp->getInput(1)); AddrSpace *spc = currentOp->getInput(0)->getSpaceFromConst(); off = AddrSpace::addressToByte(off,spc->getWordSize()); uintb res = memstate->getValue(spc,off,currentOp->getOutput()->size); memstate->setValue(currentOp->getOutput(),res); } void EmulateMemory::executeStore(void) { uintb val = memstate->getValue(currentOp->getInput(2)); // Value being stored uintb off = memstate->getValue(currentOp->getInput(1)); // Offset to store at AddrSpace *spc = currentOp->getInput(0)->getSpaceFromConst(); // Space to store in off = AddrSpace::addressToByte(off,spc->getWordSize()); memstate->setValue(spc,off,currentOp->getInput(2)->size,val); } void EmulateMemory::executeBranch(void) { setExecuteAddress(currentOp->getInput(0)->getAddr()); } bool EmulateMemory::executeCbranch(void) { uintb cond = memstate->getValue(currentOp->getInput(1)); return (cond != 0); } void EmulateMemory::executeBranchind(void) { uintb off = memstate->getValue(currentOp->getInput(0)); setExecuteAddress(Address(currentOp->getAddr().getSpace(),off)); } void EmulateMemory::executeCall(void) { setExecuteAddress(currentOp->getInput(0)->getAddr()); } void EmulateMemory::executeCallind(void) { uintb off = memstate->getValue(currentOp->getInput(0)); setExecuteAddress(Address(currentOp->getAddr().getSpace(),off)); } void EmulateMemory::executeCallother(void) { throw LowlevelError("CALLOTHER emulation not currently supported"); } void EmulateMemory::executeMultiequal(void) { throw LowlevelError("MULTIEQUAL appearing in unheritaged code?"); } void EmulateMemory::executeIndirect(void) { throw LowlevelError("INDIRECT appearing in unheritaged code?"); } void EmulateMemory::executeSegmentOp(void) { throw LowlevelError("SEGMENTOP emulation not currently supported"); } void EmulateMemory::executeCpoolRef(void) { throw LowlevelError("Cannot currently emulate cpool operator"); } void EmulateMemory::executeNew(void) { throw LowlevelError("Cannot currently emulate new operator"); } /// \param t is the SLEIGH translator /// \param s is the MemoryState the emulator should manipulate /// \param b is the table of breakpoints the emulator should invoke EmulatePcodeCache::EmulatePcodeCache(Translate *t,MemoryState *s,BreakTable *b) : EmulateMemory(s) { trans = t; OpBehavior::registerInstructions(inst,t); breaktable = b; breaktable->setEmulate(this); } /// Free all the VarnodeData and PcodeOpRaw objects and clear the cache void EmulatePcodeCache::clearCache(void) { for(int4 i=0;ioneInstruction(emit,addr); current_op = 0; instruction_start = true; } /// Set-up currentOp and currentBehave void EmulatePcodeCache::establishOp(void) { if (current_op < opcache.size()) { currentOp = opcache[current_op]; currentBehave = currentOp->getBehavior(); return; } currentOp = (PcodeOpRaw *)0; currentBehave = (OpBehavior *)0; } /// Update the iterator into the current pcode cache, and if necessary, generate /// the pcode for the fallthru instruction and reset the iterator. void EmulatePcodeCache::fallthruOp(void) { instruction_start = false; current_op += 1; if (current_op >= opcache.size()) { current_address = current_address + instruction_length; createInstruction(current_address); } establishOp(); } /// Since the full instruction is cached, we can do relative branches properly void EmulatePcodeCache::executeBranch(void) { const Address &destaddr( currentOp->getInput(0)->getAddr() ); if (destaddr.isConstant()) { uintm id = destaddr.getOffset(); id = id + (uintm)current_op; current_op = id; if (current_op == opcache.size()) fallthruOp(); else if ((current_op < 0)||(current_op >= opcache.size())) throw LowlevelError("Bad intra-instruction branch"); else establishOp(); } else setExecuteAddress(destaddr); } /// Look for a breakpoint for the given user-defined op and invoke it. /// If it doesn't exist, or doesn't replace the action, throw an exception void EmulatePcodeCache::executeCallother(void) { if (!breaktable->doPcodeOpBreak(currentOp)) throw LowlevelError("Userop not hooked"); fallthruOp(); } /// Set the current execution address and cache the pcode translation of the machine instruction /// at that address /// \param addr is the address where execution should continue void EmulatePcodeCache::setExecuteAddress(const Address &addr) { current_address = addr; // Copy -addr- BEFORE calling createInstruction // as it calls clear and may delete -addr- createInstruction(current_address); establishOp(); } /// This routine executes an entire machine instruction at once, as a conventional debugger step /// function would do. If execution is at the start of an instruction, the breakpoints are checked /// and invoked as needed for the current address. If this routine is invoked while execution is /// in the middle of a machine instruction, execution is continued until the current instruction /// completes. void EmulatePcodeCache::executeInstruction(void) { if (instruction_start) { if (breaktable->doAddressBreak(current_address)) return; } do { executeCurrentOp(); } while(!instruction_start); } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/emulate.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file emulate.hh /// \brief Classes for emulating p-code #ifndef __EMULATE_HH__ #define __EMULATE_HH__ #include "memstate.hh" #include "translate.hh" namespace ghidra { class Emulate; // Forward declaration /// \brief A collection of breakpoints for the emulator /// /// A BreakTable keeps track of an arbitrary number of breakpoints for an emulator. /// Breakpoints are either associated with a particular user-defined pcode op, /// or with a specific machine address (as in a standard debugger). Through the BreakTable /// object, an emulator can invoke breakpoints through the two methods /// - doPcodeOpBreak() /// - doAddressBreak() /// /// depending on the type of breakpoint they currently want to invoke class BreakTable { public: virtual ~BreakTable(void) {}; /// \brief Associate a particular emulator with breakpoints in this table /// /// Breakpoints may need access to the context in which they are invoked. This /// routine provides the context for all breakpoints in the table. /// \param emu is the Emulate context virtual void setEmulate(Emulate *emu)=0; /// \brief Invoke any breakpoints associated with this particular pcodeop /// /// Within the table, the first breakpoint which is designed to work with this particular /// kind of pcode operation is invoked. If there was a breakpoint and it was designed /// to \e replace the action of the pcode op, then \b true is returned. /// \param curop is the instance of a pcode op to test for breakpoints /// \return \b true if the action of the pcode op is performed by the breakpoint virtual bool doPcodeOpBreak(PcodeOpRaw *curop)=0; /// \brief Invoke any breakpoints associated with this machine address /// /// Within the table, the first breakpoint which is designed to work with at this address /// is invoked. If there was a breakpoint, and if it was designed to \e replace /// the action of the machine instruction, then \b true is returned. /// \param addr is address to test for breakpoints /// \return \b true if the machine instruction has been replaced by a breakpoint virtual bool doAddressBreak(const Address &addr)=0; }; /// \brief A breakpoint object /// /// This is a base class for breakpoint objects in an emulator. The breakpoints are implemented /// as callback method, which is overridden for the particular behavior needed by the emulator. /// Each derived class must override either /// - pcodeCallback() /// - addressCallback() /// /// depending on whether the breakpoint is tailored for a particular pcode op or for /// a machine address. class BreakCallBack { protected: Emulate *emulate; ///< The emulator currently associated with this breakpoint public: BreakCallBack(void); ///< Generic breakpoint constructor virtual ~BreakCallBack(void) {} virtual bool pcodeCallback(PcodeOpRaw *op); ///< Call back method for pcode based breakpoints virtual bool addressCallback(const Address &addr); ///< Call back method for address based breakpoints void setEmulate(Emulate *emu); ///< Associate a particular emulator with this breakpoint }; /// The base breakpoint needs no initialization parameters, the setEmulate() method must be /// called before the breakpoint can be invoked inline BreakCallBack::BreakCallBack(void) { emulate = (Emulate *)0; } /// This routine is invoked during emulation, if this breakpoint has somehow been associated with /// this kind of pcode op. The callback can perform any operation on the emulator context it wants. /// It then returns \b true if these actions are intended to replace the action of the pcode op itself. /// Or it returns \b false if the pcode op should still have its normal effect on the emulator context. /// \param op is the particular pcode operation where the break occurs. /// \return \b true if the normal pcode op action should not occur inline bool BreakCallBack::pcodeCallback(PcodeOpRaw *op) { return true; } /// This routine is invoked during emulation, if this breakpoint has somehow been associated with /// this address. The callback can perform any operation on the emulator context it wants. It then /// returns \b true if these actions are intended to replace the action of the \b entire machine /// instruction at this address. Or it returns \b false if the machine instruction should still be /// executed normally. /// \param addr is the address where the break has occurred /// \return \b true if the machine instruction should not be executed inline bool BreakCallBack::addressCallback(const Address &addr) { return true; } /// Breakpoints can be associated with one emulator at a time. /// \param emu is the emulator to associate this breakpoint with inline void BreakCallBack::setEmulate(Emulate *emu) { emulate = emu; } /// \brief A basic instantiation of a breakpoint table /// /// This object allows breakpoints to registered in the table via either /// - registerPcodeCallback() or /// = registerAddressCallback() /// /// Breakpoints are stored in map containers, and the core BreakTable methods /// are implemented to search in these containers class BreakTableCallBack : public BreakTable { Emulate *emulate; ///< The emulator associated with this table Translate *trans; ///< The translator map addresscallback; ///< a container of pcode based breakpoints map pcodecallback; ///< a container of addressed based breakpoints public: BreakTableCallBack(Translate *t); ///< Basic breaktable constructor void registerPcodeCallback(const string &nm,BreakCallBack *func); ///< Register a pcode based breakpoint void registerAddressCallback(const Address &addr,BreakCallBack *func); ///< Register an address based breakpoint virtual void setEmulate(Emulate *emu); ///< Associate an emulator with all breakpoints in the table virtual bool doPcodeOpBreak(PcodeOpRaw *curop); ///< Invoke any breakpoints for the given pcode op virtual bool doAddressBreak(const Address &addr); ///< Invoke any breakpoints for the given address }; /// The break table needs a translator object so user-defined pcode ops can be registered against /// by name. /// \param t is the translator object inline BreakTableCallBack::BreakTableCallBack(Translate *t) { emulate = (Emulate *)0; trans = t; } /// \brief A pcode-based emulator interface. /// /// The interface expects that the underlying emulation engine operates on individual pcode /// operations as its atomic operation. The interface allows execution stepping through /// individual pcode operations. The interface allows /// querying of the \e current pcode op, the current machine address, and the rest of the /// machine state. class Emulate { protected: bool emu_halted; ///< Set to \b true if the emulator is halted OpBehavior *currentBehave; ///< Behavior of the next op to execute virtual void executeUnary(void)=0; ///< Execute a unary arithmetic/logical operation virtual void executeBinary(void)=0; ///< Execute a binary arithmetic/logical operation virtual void executeLoad(void)=0; ///< Standard behavior for a p-code LOAD virtual void executeStore(void)=0; ///< Standard behavior for a p-code STORE /// \brief Standard behavior for a BRANCH /// /// This routine performs a standard p-code BRANCH operation on the memory state. /// This same routine is used for CBRANCH operations if the condition /// has evaluated to \b true. virtual void executeBranch(void)=0; /// \brief Check if the conditional of a CBRANCH is \b true /// /// This routine only checks if the condition for a p-code CBRANCH is true. /// It does \e not perform the actual branch. /// \return the boolean state indicated by the condition virtual bool executeCbranch(void)=0; virtual void executeBranchind(void)=0; ///< Standard behavior for a BRANCHIND virtual void executeCall(void)=0; ///< Standard behavior for a p-code CALL virtual void executeCallind(void)=0; ///< Standard behavior for a CALLIND virtual void executeCallother(void)=0; ///< Standard behavior for a user-defined p-code op virtual void executeMultiequal(void)=0; ///< Standard behavior for a MULTIEQUAL (phi-node) virtual void executeIndirect(void)=0; ///< Standard behavior for an INDIRECT op virtual void executeSegmentOp(void)=0; ///< Behavior for a SEGMENTOP virtual void executeCpoolRef(void)=0; ///< Standard behavior for a CPOOLREF (constant pool reference) op virtual void executeNew(void)=0; ///< Standard behavior for (low-level) NEW op virtual void fallthruOp(void)=0; ///< Standard p-code fall-thru semantics public: Emulate(void) { emu_halted = true; currentBehave = (OpBehavior *)0; } ///< generic emulator constructor virtual ~Emulate(void) {} void setHalt(bool val); ///< Set the \e halt state of the emulator bool getHalt(void) const; ///< Get the \e halt state of the emulator virtual void setExecuteAddress(const Address &addr)=0; ///< Set the address of the next instruction to emulate virtual Address getExecuteAddress(void) const=0; ///< Get the address of the current instruction being executed void executeCurrentOp(void); ///< Do a single pcode op step }; /// Applications and breakpoints can use this method and its companion getHalt() to /// terminate and restart the main emulator loop as needed. The emulator itself makes no use /// of this routine or the associated state variable \b emu_halted. /// \param val is what the halt state of the emulator should be set to inline void Emulate::setHalt(bool val) { emu_halted = val; } /// Applications and breakpoints can use this method and its companion setHalt() to /// terminate and restart the main emulator loop as needed. The emulator itself makes no use /// of this routine or the associated state variable \b emu_halted. /// \return \b true if the emulator is in a "halted" state. inline bool Emulate::getHalt(void) const { return emu_halted; } /// \brief An abstract Emulate class using a MemoryState object as the backing machine state /// /// Most p-code operations are implemented using the MemoryState to fetch and store /// values. Control-flow is implemented partially in that setExecuteAddress() is called /// to indicate which instruction is being executed. The derived class must provide /// - fallthruOp() /// - setExecuteAddress() /// - getExecuteAddress() /// /// The following p-code operations are stubbed out and will throw an exception: /// CALLOTHER, MULTIEQUAL, INDIRECT, CPOOLREF, SEGMENTOP, and NEW. /// Of course the derived class can override these. class EmulateMemory : public Emulate { protected: MemoryState *memstate; ///< The memory state of the emulator PcodeOpRaw *currentOp; ///< Current op to execute virtual void executeUnary(void); virtual void executeBinary(void); virtual void executeLoad(void); virtual void executeStore(void); virtual void executeBranch(void); virtual bool executeCbranch(void); virtual void executeBranchind(void); virtual void executeCall(void); virtual void executeCallind(void); virtual void executeCallother(void); virtual void executeMultiequal(void); virtual void executeIndirect(void); virtual void executeSegmentOp(void); virtual void executeCpoolRef(void); virtual void executeNew(void); public: /// Construct given a memory state EmulateMemory(MemoryState *mem) { memstate = mem; currentOp = (PcodeOpRaw *)0; } MemoryState *getMemoryState(void) const; ///< Get the emulator's memory state }; /// \return the memory state object which this emulator uses inline MemoryState *EmulateMemory::getMemoryState(void) const { return memstate; } /// \brief P-code emitter that dumps its raw Varnodes and PcodeOps to an in memory cache /// /// This is used for emulation when full Varnode and PcodeOp objects aren't needed class PcodeEmitCache : public PcodeEmit { vector &opcache; ///< The cache of current p-code ops vector &varcache; ///< The cache of current varnodes const vector &inst; ///< Array of behaviors for translating OpCode uintm uniq; ///< Starting offset for defining temporaries in \e unique space VarnodeData *createVarnode(const VarnodeData *var); ///< Clone and cache a raw VarnodeData public: PcodeEmitCache(vector &ocache,vector &vcache, const vector &in,uintb uniqReserve); ///< Constructor virtual void dump(const Address &addr,OpCode opc,VarnodeData *outvar,VarnodeData *vars,int4 isize); }; /// \brief A SLEIGH based implementation of the Emulate interface /// /// This implementation uses a Translate object to translate machine instructions into /// pcode and caches pcode ops for later use by the emulator. The pcode is cached as soon /// as the execution address is set, either explicitly, or via branches and fallthrus. There /// are additional methods for inspecting the pcode ops in the current instruction as a sequence. class EmulatePcodeCache : public EmulateMemory { Translate *trans; ///< The SLEIGH translator vector opcache; ///< The cache of current p-code ops vector varcache; ///< The cache of current varnodes vector inst; ///< Map from OpCode to OpBehavior BreakTable *breaktable; ///< The table of breakpoints Address current_address; ///< Address of current instruction being executed bool instruction_start; ///< \b true if next pcode op is start of instruction int4 current_op; ///< Index of current pcode op within machine instruction int4 instruction_length; ///< Length of current instruction in bytes void clearCache(void); ///< Clear the p-code cache void createInstruction(const Address &addr); ///< Cache pcode for instruction at given address void establishOp(void); protected: virtual void fallthruOp(void); ///< Execute fallthru semantics for the pcode cache virtual void executeBranch(void); ///< Execute branch (including relative branches) virtual void executeCallother(void); ///< Execute breakpoint for this user-defined op public: EmulatePcodeCache(Translate *t,MemoryState *s,BreakTable *b); ///< Pcode cache emulator constructor ~EmulatePcodeCache(void); bool isInstructionStart(void) const; ///< Return \b true if we are at an instruction start int4 numCurrentOps(void) const; ///< Return number of pcode ops in translation of current instruction int4 getCurrentOpIndex(void) const; ///< Get the index of current pcode op within current instruction PcodeOpRaw *getOpByIndex(int4 i) const; ///< Get pcode op in current instruction translation by index virtual void setExecuteAddress(const Address &addr); ///< Set current execution address virtual Address getExecuteAddress(void) const; ///< Get current execution address void executeInstruction(void); ///< Execute (the rest of) a single machine instruction }; /// Since the emulator can single step through individual pcode operations, the machine state /// may be halted in the \e middle of a single machine instruction, unlike conventional debuggers. /// This routine can be used to determine if execution is actually at the beginning of a machine /// instruction. /// \return \b true if the next pcode operation is at the start of the instruction translation inline bool EmulatePcodeCache::isInstructionStart(void) const { return instruction_start; } /// A typical machine instruction translates into a sequence of pcode ops. /// \return the number of ops in the sequence inline int4 EmulatePcodeCache::numCurrentOps(void) const { return opcache.size(); } /// This routine can be used to determine where, within the sequence of ops in the translation /// of the entire machine instruction, the currently executing op is. /// \return the index of the current (next) pcode op. inline int4 EmulatePcodeCache::getCurrentOpIndex(void) const { return current_op; } /// This routine can be used to examine ops other than the currently executing op in the /// machine instruction's translation sequence. /// \param i is the desired op index /// \return the pcode op at the indicated index inline PcodeOpRaw *EmulatePcodeCache::getOpByIndex(int4 i) const { return opcache[i]; } /// \return the currently executing machine address inline Address EmulatePcodeCache::getExecuteAddress(void) const { return current_address; } /** \page sleighAPIemulate The SLEIGH Emulator \section emu_overview Overview \b SLEIGH provides a framework for emulating the processors which have a specification written for them. The key classes in this framework are: \b Key \b Classes - \ref MemoryState - \ref MemoryBank - \ref BreakTable - \ref BreakCallBack - \ref Emulate - \ref EmulatePcodeCache The MemoryState object holds the representation of registers and memory during emulation. It understands the address spaces defined in the \b SLEIGH specification and how data is encoded in these spaces. It also knows any register names defined by the specification, so these can be used to set or query the state of these registers naturally. The emulation framework can be tailored to a particular environment by creating \b breakpoint objects, which derive off the BreakCallBack interface. These can be used to create callbacks during emulation that have full access to the memory state and the emulator, so any action can be accomplished. The breakpoint callbacks can be designed to either augment or replace the instruction at a particular address, or the callback can be used to implement the action of a user-defined pcode op. The BreakCallBack objects are managed by the BreakTable object, which takes care of invoking the callback at the appropriate time. The Emulate object serves as a basic execution engine. Its main method is Emulate::executeCurrentOp() which executes a single pcode operation on the memory state. Methods exist for querying and setting the current execution address and examining the pcode op being executed. The main implementation of the Emulate interface is the EmulatePcodeCache object. It uses SLEIGH to translate machine instructions as they are executed. The currently executing instruction is translated into a cached sequence of pcode operations. Additional methods allow this entire sequence to be inspected, and there is another stepping function which allows the emulator to be stepped through an entire machine instruction at a time. The single pcode stepping methods are of course still available and the two methods can be used together without conflict. \section emu_membuild Building a Memory State Assuming the SLEIGH Translate object and the LoadImage object have already been built (see \ref sleighAPIbasic), the only required step left before instantiating an emulator is to create a MemoryState object. The MemoryState object can be instantiated simply by passing the constructor the Translate object, but before it will work properly, you need to register individual MemoryBank objects with it, for each address space that might get used by the emulator. A MemoryBank is a representation of data stored in a single address space There are some choices for the type of MemoryBank associated with an address space. A MemoryImage is a read-only memory bank that gets its data from a LoadImage. In order to make this writeable, or to create a writeable memory bank which starts with its bytes initialized to zero, you can use a MemoryHashOverlay or a MemoryPageOverlay. A MemoryHashOverlay overlays some other memory bank, such as a MemoryImage. If you read from a location that hasn't been written to directly before, you get the data in the underlying memory bank. But if you write to this overlay, the value is stored in a hash table, and subsequent reads will return this value. Internally, the hashtable stores values in a \e preferred wordsize only on aligned addresses, but this is irrelevant to the interface. Unaligned requests are split up and handled transparently. A MemoryPageOverlay overlays another memory bank as well. But it implements writes to the bank by caching memory \e pages. Any write creates an aligned page to hold the new data. The class takes care of loading and filling in pages as needed. Here is an example of instantiating a MemoryState and registering memory banks for a \e ram space which is initialized with the load image. The \e ram space is implemented with the MemoryPageOverlay, and the \e register space and the \e temporary space are implemented using the MemoryHashOverlay. \code void setupMemoryState(Translate &trans,LoadImage &loader) { // Set up memory state object MemoryImage loadmemory(trans.getDefaultCodeSpace(),8,4096,&loader); MemoryPageOverlay ramstate(trans.getDefaultCodeSpace(),8,4096,&loadmemory); MemoryHashOverlay registerstate(trans.getSpaceByName("register"),8,4096,4096,(MemoryBank *)0); MemoryHashOverlay tmpstate(trans.getUniqueSpace(),8,4096,4096,(MemoryBank *)0); MemoryState memstate(&trans); // Instantiate the memory state object memstate.setMemoryBank(&ramstate); memstate.setMemoryBank(®isterstate); memstate.setMemoryBank(&tmpstate); } \endcode All the memory bank constructors need a preferred wordsize, which is most relevant to the hashtable implementation, and a page size, which is most relevant to the page implementation. The hash overlays need an additional initializer specifying how big the hashtable should be. The null pointers passed in, in place of a real memory bank, indicate that the memory bank is initialized with all zeroes. Once the memory banks are instantiated, they are registered with the memory state via the MemoryState::setMemoryBank() method. \section emu_breakpoints Breakpoints In order to provide behavior within the emulator beyond just what the core instruction emulation provides, the framework supports \b breakpoint classes. A breakpoint is created by deriving a class from the BreakCallBack class and overriding either BreakCallBack::addressCallback() or BreakCallBack::pcodeCallback(). Here is an example of a breakpoint that implements a standard C library \e puts call an the x86 architecture. When the breakpoint is invoked, a call to \e puts has just been made, so the stack pointer is pointing to the return address and the next 4 bytes on the stack are a pointer to the string being passed in. \code class PutsCallBack : public BreakCallBack { public: virtual bool addressCallback(const Address &addr); }; bool PutsCallBack::addressCallback(const Address &addr) { MemoryState *mem = emulate->getMemoryState(); uint1 buffer[256]; uint4 esp = mem->getValue("ESP"); AddrSpace *ram = mem->getTranslate()->getSpaceByName("ram"); uint4 param1 = mem->getValue(ram,esp+4,4); mem->getChunk(buffer,ram,param1,255); cout << (char *)&buffer << endl; uint4 returnaddr = mem->getValue(ram,esp,4); mem->setValue("ESP",esp+8); emulate->setExecuteAddress(Address(ram,returnaddr)); return true; // This replaces the indicated instruction } \endcode Notice that the callback retrieves the value of the stack pointer by name. Using this value, the string pointer is retrieved, then the data for the actual string is retrieved. After dumping the string to standard out, the return address is recovered and the \e return instruction is emulated by explicitly setting the next execution address to be the return value. \section emu_finalsetup Running the Emulator Here is an example of instantiating an EmulatePcodeCache object. A breakpoint is also instantiated and registered with the BreakTable. \code ... Sleigh trans(&loader,&context); // Instantiate the translator ... MemoryState memstate(&trans); // Instantiate the memory state ... BreakTableCallBack breaktable(&trans); // Instantiate a breakpoint table EmulatePcodeCache emulator(&trans,&memstate,&breaktable); // Instantiate the emulator // Set up the initial stack pointer memstate.setValue("ESP",0xbffffffc); emulator.setExecuteAddress(Address(trans.getDefaultCodeSpace(),0x1D00114)); // Initial execution address PutsCallBack putscallback; breaktable.registerAddressCallback(Address(trans.getDefaultCodeSpace(),0x1D00130),&putscallback); AssemblyRaw assememit; for(;;) { Address addr = emulator.getExecuteAddress(); trans.printAssembly(assememit,addr); emulator.executeInstruction(); } \endcode Notice how the initial stack pointer and initial execute address is set up. The breakpoint is registered with the BreakTable, giving it a specific address. The executeInstruction method is called inside the loop, to actually run the emulator. Notice that a disassembly of each instruction is printed after each step of the emulator. Other information can be examined from within this execution loop or in other tailored breakpoints. In particular, the Emulate::getCurrentOp() method can be used to retrieve the an instance of the currently executing pcode operation. From this starting point, you can examine the low-level objects: - PcodeOpRaw and - VarnodeData */ } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/error.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file error.hh /// \brief Base class for error handling facilities /// /// This is also doubles as a place to list the common include files #ifndef __ERROR_HH__ #define __ERROR_HH__ #include "types.h" #include #include #include #include #include #include #include #include #include #include namespace ghidra { using std::string; using std::map; using std::set; using std::list; using std::vector; using std::pair; using std::make_pair; using std::ostream; using std::istream; using std::ifstream; using std::ofstream; using std::istringstream; using std::ostringstream; using std::ios; using std::dec; using std::hex; using std::oct; using std::setfill; using std::fixed; using std::setprecision; using std::setw; using std::endl; using std::ws; using std::min; using std::max; using std::to_string; using std::piecewise_construct; using std::forward_as_tuple; /// \brief The lowest level error generated by the decompiler /// /// This is the base error for all exceptions thrown by the /// decompiler. This underived form is thrown for very low /// level errors that immediately abort decompilation (usually /// for just a single function). struct LowlevelError { string explain; ///< Explanatory string /// Initialize the error with an explanatory string LowlevelError(const string &s) { explain = s; } const char *what() { return explain.c_str(); } }; /// \brief A generic recoverable error /// /// This error is the most basic form of recoverable error, /// meaning there is some problem that the user did not take /// into account. struct RecovError : public LowlevelError { /// Initialize the error with an explanatory string RecovError(const string &s) : LowlevelError(s) {} }; /// \brief An error generated while parsing a command or language /// /// This error is generated when parsing character data of some /// form, as in a user command from the console or when parsing /// C syntax. struct ParseError : public LowlevelError { // Parsing error /// Initialize the error with an explanatory string ParseError(const string &s) : LowlevelError(s) {} }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/filemanage.cc ================================================ /* ### * IP: GHIDRA * NOTE: Calls to Windows APIs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "filemanage.hh" #ifdef _WINDOWS #include #else // POSIX functions for searching directories extern "C" { #include #include #include #include } #endif namespace ghidra { // Path name separator #ifdef _WINDOWS char FileManage::separator = '\\'; char FileManage::separatorClass[] = "/\\"; #else char FileManage::separator = '/'; char FileManage::separatorClass[] = "/"; #endif void FileManage::addDir2Path(const string &path) { if (path.size()>0) { pathlist.push_back(path); if (!isSeparator(path[path.size()-1])) pathlist.back() += separator; } } void FileManage::findFile(string &res,const string &name) const { // Search through paths to find file with given name vector::const_iterator iter; if (isSeparator(name[0])) { res = name; ifstream s(res.c_str()); if (s) { s.close(); return; } } else { for(iter=pathlist.begin();iter!=pathlist.end();++iter) { res = *iter + name; ifstream s(res.c_str()); if (s) { s.close(); return; } } } res.clear(); // Can't find it, return empty string } #ifdef _WINDOWS void FileManage::addCurrentDir(void) { char dirname[256]; if (0!=GetCurrentDirectoryA(256,dirname)) { string filename(dirname); addDir2Path(filename); } } #else void FileManage::addCurrentDir(void) { // Add current working directory to path char dirname[256]; char *buf; buf = getcwd(dirname,256); if ((char *)0 == buf) return; string filename(buf); addDir2Path(filename); } #endif #ifdef _WINDOWS bool FileManage::isDirectory(const string &path) { DWORD attribs = GetFileAttributes(path.c_str()); if (attribs == INVALID_FILE_ATTRIBUTES) return false; return ((attribs & FILE_ATTRIBUTE_DIRECTORY)!=0); } #else bool FileManage::isDirectory(const string &path) { struct stat buf; if (stat(path.c_str(),&buf) < 0) { return false; } return S_ISDIR(buf.st_mode); } #endif #ifdef _WINDOWS bool FileManage::isSeparator(char c) { return (c == '/' || c == '\\'); } #else bool FileManage::isSeparator(char c) { return c == separator; } #endif #ifdef _WINDOWS void FileManage::matchListDir(vector &res,const string &match,bool isSuffix,const string &dirname,bool allowdot) { WIN32_FIND_DATAA FindFileData; HANDLE hFind; string dirfinal; dirfinal = dirname; if (!isSeparator(dirfinal[dirfinal.size()-1])) dirfinal += separator; string regex = dirfinal + '*'; hFind = FindFirstFileA(regex.c_str(),&FindFileData); if (hFind == INVALID_HANDLE_VALUE) return; do { string fullname(FindFileData.cFileName); if (match.size() <= fullname.size()) { if (allowdot||(fullname[0] != '.')) { if (isSuffix) { if (0==fullname.compare(fullname.size()-match.size(),match.size(),match)) res.push_back(dirfinal + fullname); } else { if (0==fullname.compare(0,match.size(),match)) res.push_back(dirfinal + fullname); } } } } while(0!=FindNextFileA(hFind,&FindFileData)); FindClose(hFind); } #else void FileManage::matchListDir(vector &res,const string &match,bool isSuffix,const string &dirname,bool allowdot) { // Look through files in a directory for those matching -match- DIR *dir; struct dirent *entry; string dirfinal = dirname; if (!isSeparator(dirfinal[dirfinal.size()-1])) dirfinal += separator; dir = opendir(dirfinal.c_str()); if (dir == (DIR *)0) return; entry = readdir(dir); while(entry != (struct dirent *)0) { string fullname(entry->d_name); if (match.size() <= fullname.size()) { if (allowdot||(fullname[0] != '.')) { if (isSuffix) { if (0==fullname.compare( fullname.size()-match.size(),match.size(),match)) res.push_back( dirfinal + fullname ); } else { if (0==fullname.compare(0,match.size(),match)) res.push_back(dirfinal + fullname); } } } entry = readdir(dir); } closedir(dir); } #endif void FileManage::matchList(vector &res,const string &match,bool isSuffix) const { vector::const_iterator iter; for(iter=pathlist.begin();iter!=pathlist.end();++iter) matchListDir(res,match,isSuffix,*iter,false); } #ifdef _WINDOWS void FileManage::directoryList(vector &res,const string &dirname,bool allowdot) { WIN32_FIND_DATAA FindFileData; HANDLE hFind; string dirfinal = dirname; if (!isSeparator(dirfinal[dirfinal.size()-1])) dirfinal += separator; string regex = dirfinal + "*"; const char *s = regex.c_str(); hFind = FindFirstFileA(s,&FindFileData); if (hFind == INVALID_HANDLE_VALUE) return; do { if ( (FindFileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) == FILE_ATTRIBUTE_DIRECTORY ) { string fullname(FindFileData.cFileName); if (allowdot || (fullname[0] != '.')) res.push_back(dirfinal + fullname); } } while(0!=FindNextFileA(hFind,&FindFileData)); FindClose(hFind); } #else void FileManage::directoryList(vector &res,const string &dirname,bool allowdot) { // List full pathnames of all directories under the directory -dir- DIR *dir; struct dirent *entry; string dirfinal; dirfinal = dirname; if (!isSeparator(dirfinal[dirfinal.size()-1])) dirfinal += separator; dir = opendir(dirfinal.c_str()); if (dir == (DIR *)0) return; entry = readdir(dir); while(entry != (struct dirent *)0) { bool isDirectory = false; if (entry->d_type == DT_DIR) isDirectory = true; else if (entry->d_type == DT_UNKNOWN || entry->d_type == DT_LNK) { string path = dirfinal + entry->d_name; struct stat stbuf; stat(path.c_str(), &stbuf); isDirectory = S_ISDIR(stbuf.st_mode); } if (isDirectory) { string fullname(entry->d_name); if ((fullname!=".")&&(fullname!="..")) { if (allowdot || (fullname[0] != '.')) res.push_back( dirfinal + fullname ); } } entry = readdir(dir); } closedir(dir); } #endif void FileManage::scanDirectoryRecursive(vector &res,const string &matchname,const string &rootpath,int maxdepth) { if (maxdepth == 0) return; vector subdir; directoryList(subdir,rootpath); vector::const_iterator iter; for(iter = subdir.begin();iter!=subdir.end();++iter) { const string &curpath( *iter ); string::size_type pos = curpath.find_last_of(separatorClass); if (pos == string::npos) pos = 0; else pos = pos + 1; if (curpath.compare(pos,string::npos,matchname)==0) res.push_back(curpath); else scanDirectoryRecursive(res,matchname,curpath,maxdepth-1); // Recurse } } void FileManage::splitPath(const string &full,string &path,string &base) { // Split path string -full- into its -base-name and -path- (relative or absolute) // If there is no path, i.e. only a basename in full, then -path- will return as an empty string // otherwise -path- will be non-empty and end in a separator character string::size_type end = full.size()-1; if (isSeparator(full[full.size()-1])) // Take into account terminating separator end = full.size()-2; string::size_type pos = full.find_last_of(separatorClass,end); if (pos == string::npos) { // Didn't find any separator base = full; path.clear(); } else { string::size_type sz = (end - pos); base = full.substr(pos+1,sz); path = full.substr(0,pos+1); } } string FileManage::buildPath(const vector &pathels,int level) { // Build an absolute path using elements from -pathels-, in reverse order // Build up to and including pathels[level] ostringstream s; for(int i=pathels.size()-1;i>=level;--i) { s << separator; s << pathels[i]; } return s.str(); } bool FileManage::testDevelopmentPath(const vector &pathels,int level,string &root) { // Given pathels[level] is "Ghidra", determine if this is a Ghidra development layout if (level + 2 >= pathels.size()) return false; string parent = pathels[level + 1]; if (parent.size() < 11) return false; string piecestr = parent.substr(0,7); if (piecestr != "ghidra.") return false; piecestr = parent.substr(parent.size() - 4); if (piecestr != ".git") return false; root = buildPath(pathels,level+2); vector testpaths1; vector testpaths2; scanDirectoryRecursive(testpaths1,"ghidra.git",root,1); if (testpaths1.size() != 1) return false; scanDirectoryRecursive(testpaths2,"Ghidra",testpaths1[0],1); return (testpaths2.size() == 1); } bool FileManage::testInstallPath(const vector &pathels,int level,string &root) { if (level + 1 >= pathels.size()) return false; root = buildPath(pathels,level+1); vector testpaths1; vector testpaths2; scanDirectoryRecursive(testpaths1,"server",root,1); if (testpaths1.size() != 1) return false; scanDirectoryRecursive(testpaths2,"server.conf",testpaths1[0],1); return (testpaths2.size() == 1); } string FileManage::discoverGhidraRoot(const char *argv0) { // Find the root of the ghidra distribution based on current working directory and passed in path vector pathels; string cur(argv0); string base; int skiplevel = 0; bool isAbs = isAbsolutePath(cur); for(;;) { int sizebefore = cur.size(); splitPath(cur,cur,base); if (cur.size() == sizebefore) break; if (base == ".") skiplevel += 1; else if (base == "..") skiplevel += 2; if (skiplevel > 0) skiplevel -= 1; else pathels.push_back(base); } if (!isAbs) { FileManage curdir; curdir.addCurrentDir(); cur = curdir.pathlist[0]; for(;;) { int sizebefore = cur.size(); splitPath(cur,cur,base); if (cur.size() == sizebefore) break; pathels.push_back(base); } } for(int i=0;i #include #include #include #include namespace ghidra { using std::vector; using std::string; using std::ifstream; using std::ostringstream; class FileManage { vector pathlist; // List of paths to search for files static char separator; static char separatorClass[]; // Characters that can be accepted as a separator static string buildPath(const vector &pathels,int level); static bool testDevelopmentPath(const vector &pathels,int level,string &root); static bool testInstallPath(const vector &pathels,int level,string &root); public: void addDir2Path(const string &path); void addCurrentDir(void); void findFile(string &res,const string &name) const; // Resolve full pathname void matchList(vector &res,const string &match,bool isSuffix) const; // List of files with suffix static bool isSeparator(char c); static bool isDirectory(const string &path); static void matchListDir(vector &res,const string &match,bool isSuffix,const string &dir,bool allowdot); static void directoryList(vector &res,const string &dirname,bool allowdot=false); static void scanDirectoryRecursive(vector &res,const string &matchname,const string &rootpath,int maxdepth); static void splitPath(const string &full,string &path,string &base); static bool isAbsolutePath(const string &full) { if (full.empty()) return false; return (full[0] == separator); } static string discoverGhidraRoot(const char *argv0); }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/float.cc ================================================ /* ### * IP: GHIDRA * NOTE: uses some windows and sparc specific floating point definitions * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "float.hh" #include "address.hh" #include #include namespace ghidra { using std::ldexp; using std::frexp; using std::signbit; using std::sqrt; using std::floor; using std::ceil; using std::round; using std::fabs; /// Set format for a given encoding size according to IEEE 754 standards /// \param sz is the size of the encoding in bytes FloatFormat::FloatFormat(int4 sz) { size = sz; if (size == 4) { signbit_pos = 31; exp_pos = 23; exp_size = 8; frac_pos = 0; frac_size = 23; bias = 127; jbitimplied = true; } else if (size == 8) { signbit_pos = 63; exp_pos = 52; exp_size = 11; frac_pos = 0; frac_size = 52; bias = 1023; jbitimplied = true; } maxexponent = (1<>= 1; // Throw away 1 bit of precision we will // lose anyway, to make sure highbit is 0 int4 precis = 8*sizeof(uintb) - 1; // fullword - 1 we threw away double res = (double)signif; int4 expchange = exp - precis + 1; // change in exponent is precis // -1 integer bit res = ldexp(res,expchange); if (sign) res = res * -1.0; return res; } /// \brief Extract the sign, fractional, and exponent from a given floating-point value /// /// \param x is the given value /// \param sgn passes back the sign /// \param signif passes back the fractional part /// \param exp passes back the exponent /// \return the floating-point class of the value FloatFormat::floatclass FloatFormat::extractExpSig(double x,bool *sgn,uintb *signif,int4 *exp) { int4 e; *sgn = signbit(x); if (x == 0.0) return zero; if (std::isinf(x)) return infinity; if (std::isnan(x)) return nan; if (*sgn) x = -x; double norm = frexp(x,&e); // norm is between 1/2 and 1 norm = ldexp(norm,8*sizeof(uintb)-1); // norm between 2^62 and 2^63 *signif = (uintb)norm; // Convert to normalized integer *signif <<= 1; e -= 1; // Consider normalization between 1 and 2 *exp = e; return normalized; } /// \param x is an encoded floating-point value /// \return the fraction part of the value aligned to the top of the word uintb FloatFormat::extractFractionalCode(uintb x) const { x >>= frac_pos; // Eliminate bits below x <<= 8*sizeof(uintb) - frac_size; // Align with top of word return x; } /// \param x is an encoded floating-point value /// \return the sign bit bool FloatFormat::extractSign(uintb x) const { x >>= signbit_pos; return ((x&1)!=0); } /// \param x is an encoded floating-point value /// \return the (signed) exponent int4 FloatFormat::extractExponentCode(uintb x) const { x >>= exp_pos; uintb mask = 1; mask = (mask<>= 8*sizeof(uintb) - frac_size; code <<= frac_pos; // Move bits into position; x |= code; return x; } /// \param x is an encoded value (with sign set to zero) /// \param sign is the sign bit to set /// \return the encoded value with the sign bit set uintb FloatFormat::setSign(uintb x,bool sign) const { if (!sign) return x; // Assume bit is already zero uintb mask = 1; mask <<= signbit_pos; x |= mask; // Stick in the bit return x; } /// \param x is an encoded value (with exponent set to zero) /// \param code is the exponent to set /// \return the encoded value with the new exponent uintb FloatFormat::setExponentCode(uintb x,uintb code) const { code <<= exp_pos; // Move bits into position x |= code; return x; } /// \param sgn is set to \b true for negative zero, \b false for positive /// \return the encoded zero uintb FloatFormat::getZeroEncoding(bool sgn) const { uintb res = 0; // Use IEEE 754 standard for zero encoding res = setFractionalCode(res,0); res = setExponentCode(res,0); return setSign(res,sgn); } /// \param sgn is set to \b true for negative infinity, \b false for positive /// \return the encoded infinity uintb FloatFormat::getInfinityEncoding(bool sgn) const { uintb res = 0; // Use IEEE 754 standard for infinity encoding res = setFractionalCode(res,0); res = setExponentCode(res,(uintb)maxexponent); return setSign(res,sgn); } /// \param sgn is set to \b true for negative NaN, \b false for positive /// \return the encoded NaN uintb FloatFormat::getNaNEncoding(bool sgn) const { uintb res = 0; // Use IEEE 754 standard for NaN encoding uintb mask = 1; mask <<= 8*sizeof(uintb)-1; // Create "quiet" NaN res = setFractionalCode(res,mask); res = setExponentCode(res,(uintb)maxexponent); return setSign(res,sgn); } void FloatFormat::calcPrecision(void) { decimalMinPrecision = (int4)floor(frac_size * 0.30103); // Precision needed to guarantee IEEE 754 binary -> decimal -> binary round trip conversion decimalMaxPrecision = (int4)ceil((frac_size + 1) * 0.30103) + 1; } /// \param encoding is the encoding value /// \param type points to the floating-point class, which is passed back /// \return the equivalent double value double FloatFormat::getHostFloat(uintb encoding,floatclass *type) const { bool sgn = extractSign(encoding); uintb frac = extractFractionalCode(encoding); int4 exp = extractExponentCode(encoding); bool normal = true; if (exp == 0) { if ( frac == 0 ) { // Floating point zero *type = zero; return sgn ? -0.0 : +0.0; } *type = denormalized; // Number is denormalized normal = false; } else if (exp == maxexponent) { if ( frac == 0 ) { // Floating point infinity *type = infinity; double infinity = std::numeric_limits::infinity(); return sgn ? -infinity : +infinity; } *type = nan; // encoding is "Not a Number" NaN double nan = std::numeric_limits::quiet_NaN(); return sgn ? -nan : +nan; // Sign is usually ignored } else *type = normalized; // Get "true" exponent and fractional exp -= bias; if (normal && jbitimplied) { frac >>= 1; // Make room for 1 jbit uintb highbit = 1; highbit <<= 8*sizeof(uintb)-1; frac |= highbit; // Stick bit in at top } return createFloat(sgn,frac,exp); } /// \brief Round a floating point value to the nearest even /// /// \param signif the significant bits of a floating point value /// \param lowbitpos the position in signif of the floating point /// \return true if we rounded up bool FloatFormat::roundToNearestEven(uintb &signif, int4 lowbitpos) { uintb lowbitmask = (lowbitpos < 8 * sizeof(uintb)) ? ((uintb)1 << lowbitpos) : 0; uintb midbitmask = (uintb)1 << (lowbitpos - 1); uintb epsmask = midbitmask - 1; bool odd = (signif & lowbitmask) != 0; if ((signif & midbitmask) != 0 && ((signif & epsmask) != 0 || odd)) { signif += midbitmask; return true; } return false; } /// \param host is the double value to convert /// \return the equivalent encoded value uintb FloatFormat::getEncoding(double host) const { floatclass type; bool sgn; uintb signif; int4 exp; type = extractExpSig(host, &sgn, &signif, &exp); if (type == zero) return getZeroEncoding(sgn); else if (type == infinity) return getInfinityEncoding(sgn); else if (type == nan) return getNaNEncoding(sgn); // convert exponent and fractional to their encodings exp += bias; if (exp < -frac_size) // Exponent is too small to represent return getZeroEncoding(sgn); // TODO handle round to non-zero if (exp < 1) { // Must be denormalized if (roundToNearestEven(signif, 8 * sizeof(uintb) - frac_size - exp)) { // TODO handle round to normal case if ((signif >> (8 * sizeof(uintb) - 1)) == 0) { signif = (uintb)1 << (8 * sizeof(uintb) - 1); exp += 1; } } uintb res = getZeroEncoding(sgn); return setFractionalCode(res, signif >> (-exp)); } if (roundToNearestEven(signif, 8 * sizeof(uintb) - frac_size - 1)) { // if high bit is clear, then the add overflowed. Increase exp and set // signif to 1. if ((signif >> (8 * sizeof(uintb) - 1)) == 0) { signif = (uintb)1 << (8 * sizeof(uintb) - 1); exp += 1; } } if (exp >= maxexponent) // Exponent is too big to represent return getInfinityEncoding(sgn); if (jbitimplied && (exp != 0)) signif <<= 1; // Cut off top bit (which should be 1) uintb res = 0; res = setFractionalCode(res, signif); res = setExponentCode(res, (uintb)exp); return setSign(res, sgn); } /// \param encoding is the value in the \e other FloatFormat /// \param formin is the \e other FloatFormat /// \return the equivalent value in \b this FloatFormat uintb FloatFormat::convertEncoding(uintb encoding, const FloatFormat *formin) const { bool sgn = formin->extractSign(encoding); uintb signif = formin->extractFractionalCode(encoding); int4 exp = formin->extractExponentCode(encoding); if (exp == formin->maxexponent) { // NaN or INFINITY encoding exp = maxexponent; if (signif != 0) return getNaNEncoding(sgn); else return getInfinityEncoding(sgn); } if (exp == 0) { // incoming is subnormal if (signif == 0) return getZeroEncoding(sgn); // normalize int4 lz = count_leading_zeros(signif); signif <<= lz; exp = -formin->bias - lz; } else { // incoming is normal exp -= formin->bias; if (jbitimplied) signif = ((uintb)1 << (8 * sizeof(uintb) - 1)) | (signif >> 1); } exp += bias; if (exp < -frac_size) // Exponent is too small to represent return getZeroEncoding(sgn); // TODO handle round to non-zero if (exp < 1) { // Must be denormalized if (roundToNearestEven(signif, 8 * sizeof(uintb) - frac_size - exp)) { // TODO handle carry to normal case if ((signif >> (8 * sizeof(uintb) - 1)) == 0) { signif = (uintb)1 << (8 * sizeof(uintb) - 1); exp += 1; } } uintb res = getZeroEncoding(sgn); return setFractionalCode(res, signif >> (-exp)); } if (roundToNearestEven(signif, 8 * sizeof(uintb) - frac_size - 1)) { // if high bit is clear, then the add overflowed. Increase exp and set // signif to 1. if ((signif >> (8 * sizeof(uintb) - 1)) == 0) { signif = (uintb)1 << (8 * sizeof(uintb) - 1); exp += 1; } } if (exp >= maxexponent) // Exponent is too big to represent return getInfinityEncoding(sgn); if (jbitimplied && (exp != 0)) signif <<= 1; // Cut off top bit (which should be 1) uintb res = 0; res = setFractionalCode(res, signif); res = setExponentCode(res, (uintb)exp); return setSign(res, sgn); } /// The string should be printed with the minimum number of digits to uniquely specify the underlying /// binary value. This currently only works for the 32-bit and 64-bit IEEE 754 formats. /// If the \b forcesci parameter is \b true, the string will always be printed using scientific notation. /// \param host is the given value already converted to the host's \b double format. /// \param forcesci is \b true if the value should be printed in scientific notation. /// \return the decimal representation as a string string FloatFormat::printDecimal(double host,bool forcesci) const { string res; for(int4 prec=decimalMinPrecision;;++prec) { ostringstream s; if (forcesci) { s.setf( ios::scientific ); // Set to scientific notation s.precision(prec-1); // scientific doesn't include first digit in precision count } else { s.unsetf( ios::floatfield ); // Use "default" notation to allow fewer digits to be printed if possible s.precision(prec); } s << host; if (prec == decimalMaxPrecision) { return s.str(); } res = s.str(); double roundtrip = 0.0; istringstream t(res); if (size <= 4) { float tmp = 0.0; t >> tmp; roundtrip = tmp; } else { t >> roundtrip; } if (roundtrip == host) break; } return res; } // Currently we emulate floating point operations on the target // By converting the encoding to the host's encoding and then // performing the operation using the host's floating point unit // then the host's encoding is converted back to the targets encoding /// \param a is the first floating-point value /// \param b is the second floating-point value /// \return \b true if (a == b) uintb FloatFormat::opEqual(uintb a,uintb b) const { floatclass type; double val1 = getHostFloat(a,&type); double val2 = getHostFloat(b,&type); uintb res = (val1 == val2) ? 1 : 0; return res; } /// \param a is the first floating-point value /// \param b is the second floating-point value /// \return \b true if (a != b) uintb FloatFormat::opNotEqual(uintb a,uintb b) const { floatclass type; double val1 = getHostFloat(a,&type); double val2 = getHostFloat(b,&type); uintb res = (val1 != val2) ? 1 : 0; return res; } /// \param a is the first floating-point value /// \param b is the second floating-point value /// \return \b true if (a < b) uintb FloatFormat::opLess(uintb a,uintb b) const { floatclass type; double val1 = getHostFloat(a,&type); double val2 = getHostFloat(b,&type); uintb res = (val1 < val2) ? 1 : 0; return res; } /// \param a is the first floating-point value /// \param b is the second floating-point value /// \return \b true if (a <= b) uintb FloatFormat::opLessEqual(uintb a,uintb b) const { floatclass type; double val1 = getHostFloat(a,&type); double val2 = getHostFloat(b,&type); uintb res = (val1 <= val2) ? 1 : 0; return res; } /// \param a is an encoded floating-point value /// \return \b true if a is Not-a-Number uintb FloatFormat::opNan(uintb a) const { floatclass type; getHostFloat(a,&type); uintb res = (type == FloatFormat::nan) ? 1 : 0; return res; } /// \param a is the first floating-point value /// \param b is the second floating-point value /// \return a + b uintb FloatFormat::opAdd(uintb a,uintb b) const { floatclass type; double val1 = getHostFloat(a,&type); double val2 = getHostFloat(b,&type); return getEncoding(val1 + val2); } /// \param a is the first floating-point value /// \param b is the second floating-point value /// \return a / b uintb FloatFormat::opDiv(uintb a,uintb b) const { floatclass type; double val1 = getHostFloat(a,&type); double val2 = getHostFloat(b,&type); return getEncoding(val1 / val2); } /// \param a is the first floating-point value /// \param b is the second floating-point value /// \return a * b uintb FloatFormat::opMult(uintb a,uintb b) const { floatclass type; double val1 = getHostFloat(a,&type); double val2 = getHostFloat(b,&type); return getEncoding(val1 * val2); } /// \param a is the first floating-point value /// \param b is the second floating-point value /// \return a - b uintb FloatFormat::opSub(uintb a,uintb b) const { floatclass type; double val1 = getHostFloat(a,&type); double val2 = getHostFloat(b,&type); return getEncoding(val1 - val2); } /// \param a is an encoded floating-point value /// \return -a uintb FloatFormat::opNeg(uintb a) const { floatclass type; double val = getHostFloat(a,&type); return getEncoding(-val); } /// \param a is an encoded floating-point value /// \return abs(a) uintb FloatFormat::opAbs(uintb a) const { floatclass type; double val = getHostFloat(a,&type); return getEncoding(fabs(val)); } /// \param a is an encoded floating-point value /// \return sqrt(a) uintb FloatFormat::opSqrt(uintb a) const { floatclass type; double val = getHostFloat(a,&type); return getEncoding(sqrt(val)); } /// \param a is a signed integer value /// \param sizein is the number of bytes in the integer encoding /// \return a converted to an encoded floating-point value uintb FloatFormat::opInt2Float(uintb a,int4 sizein) const { intb ival = sign_extend(a,8*sizein-1); double val = (double) ival; // Convert integer to float return getEncoding(val); } /// \param a is an encoded floating-point value /// \param outformat is the desired output FloatFormat /// \return a converted to the output FloatFormat uintb FloatFormat::opFloat2Float(uintb a,const FloatFormat &outformat) const { return outformat.convertEncoding(a, this); } /// \param a is an encoded floating-point value /// \param sizeout is the desired encoding size of the output /// \return an integer encoding of a uintb FloatFormat::opTrunc(uintb a,int4 sizeout) const { floatclass type; double val = getHostFloat(a,&type); intb ival = (intb) val; // Convert to integer uintb res = (uintb) ival; // Convert to unsigned res &= calc_mask(sizeout); // Truncate to proper size return res; } /// \param a is an encoded floating-point value /// \return ceil(a) uintb FloatFormat::opCeil(uintb a) const { floatclass type; double val = getHostFloat(a,&type); return getEncoding(ceil(val)); } /// \param a is an encoded floating-point value /// \return floor(a) uintb FloatFormat::opFloor(uintb a) const { floatclass type; double val = getHostFloat(a,&type); return getEncoding(floor(val)); } /// \param a is an encoded floating-point value /// \return round(a) uintb FloatFormat::opRound(uintb a) const { floatclass type; double val = getHostFloat(a,&type); // return getEncoding(floor(val+.5)); // round half up return getEncoding(round(val)); // round half away from zero } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/float.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file float.hh /// \brief Support for decoding different floating-point formats #ifndef __FLOAT_HH__ #define __FLOAT_HH__ #include "error.hh" namespace ghidra { /// \brief Encoding information for a single floating-point format /// /// This class supports manipulation of a single floating-point encoding. /// An encoding can be converted to and from the host format and /// convenience methods allow p-code floating-point operations to be /// performed on natively encoded operands. This follows the IEEE754 standards. class FloatFormat { public: /// \brief The various classes of floating-point encodings enum floatclass { normalized = 0, ///< A normal floating-point number infinity = 1, ///< An encoding representing an infinite value zero = 2, ///< An encoding of the value zero nan = 3, ///< An invalid encoding, Not-a-Number denormalized = 4 ///< A denormalized encoding (for very small values) }; private: int4 size; ///< Size of float in bytes (this format) int4 signbit_pos; ///< Bit position of sign bit int4 frac_pos; ///< (lowest) bit position of fractional part int4 frac_size; ///< Number of bits in fractional part int4 exp_pos; ///< (lowest) bit position of exponent int4 exp_size; ///< Number of bits in exponent int4 bias; ///< What to add to real exponent to get encoding int4 maxexponent; ///< Maximum possible exponent int4 decimalMinPrecision; ///< Minimum decimal digits of precision guaranteed by the format int4 decimalMaxPrecision; ///< Maximum decimal digits of precision needed to uniquely represent value bool jbitimplied; ///< Set to \b true if integer bit of 1 is assumed static double createFloat(bool sign,uintb signif,int4 exp); ///< Create a double given sign, fractional, and exponent static floatclass extractExpSig(double x,bool *sgn,uintb *signif,int4 *exp); static bool roundToNearestEven(uintb &signif, int4 lowbitpos); uintb setFractionalCode(uintb x,uintb code) const; ///< Set the fractional part of an encoded value uintb setSign(uintb x,bool sign) const; ///< Set the sign bit of an encoded value uintb setExponentCode(uintb x,uintb code) const; ///< Set the exponent of an encoded value uintb getZeroEncoding(bool sgn) const; ///< Get an encoded zero value uintb getInfinityEncoding(bool sgn) const; ///< Get an encoded infinite value uintb getNaNEncoding(bool sgn) const; ///< Get an encoded NaN value void calcPrecision(void); ///< Calculate the decimal precision of this format public: FloatFormat(int4 sz); ///< Construct default IEEE 754 standard settings int4 getSize(void) const { return size; } ///< Get the size of the encoding in bytes double getHostFloat(uintb encoding,floatclass *type) const; ///< Convert an encoding into host's double uintb getEncoding(double host) const; ///< Convert host's double into \b this encoding uintb convertEncoding(uintb encoding,const FloatFormat *formin) const; ///< Convert between two different formats uintb extractFractionalCode(uintb x) const; ///< Extract the fractional part of the encoding bool extractSign(uintb x) const; ///< Extract the sign bit from the encoding int4 extractExponentCode(uintb x) const; ///< Extract the exponent from the encoding string printDecimal(double host,bool forcesci) const; ///< Print given value as a decimal string // Operations on floating point values uintb opEqual(uintb a,uintb b) const; ///< Equality comparison (==) uintb opNotEqual(uintb a,uintb b) const; ///< Inequality comparison (!=) uintb opLess(uintb a,uintb b) const; ///< Less-than comparison (<) uintb opLessEqual(uintb a,uintb b) const; ///< Less-than-or-equal comparison (<=) uintb opNan(uintb a) const; ///< Test if Not-a-Number (NaN) uintb opAdd(uintb a,uintb b) const; ///< Addition (+) uintb opDiv(uintb a,uintb b) const; ///< Division (/) uintb opMult(uintb a,uintb b) const; ///< Multiplication (*) uintb opSub(uintb a,uintb b) const; ///< Subtraction (-) uintb opNeg(uintb a) const; ///< Unary negate uintb opAbs(uintb a) const; ///< Absolute value (abs) uintb opSqrt(uintb a) const; ///< Square root (sqrt) uintb opTrunc(uintb a,int4 sizeout) const; ///< Convert floating-point to integer uintb opCeil(uintb a) const; ///< Ceiling (ceil) uintb opFloor(uintb a) const; ///< Floor (floor) uintb opRound(uintb a) const; ///< Round uintb opInt2Float(uintb a,int4 sizein) const; ///< Convert integer to floating-point uintb opFloat2Float(uintb a,const FloatFormat &outformat) const; ///< Convert between floating-point precisions }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/globalcontext.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "globalcontext.hh" namespace ghidra { ElementId ELEM_CONTEXT_DATA = ElementId("context_data",120); ElementId ELEM_CONTEXT_POINTS = ElementId("context_points",121); ElementId ELEM_CONTEXT_POINTSET = ElementId("context_pointset",122); ElementId ELEM_CONTEXT_SET = ElementId("context_set",123); ElementId ELEM_SET = ElementId("set",124); ElementId ELEM_TRACKED_POINTSET = ElementId("tracked_pointset",125); ElementId ELEM_TRACKED_SET = ElementId("tracked_set",126); /// Bits within the whole context blob are labeled starting with 0 as the most significant bit /// in the first word in the sequence. The new context value must be contained within a single /// word. /// \param sbit is the starting (most significant) bit of the new value /// \param ebit is the ending (least significant) bit of the new value ContextBitRange::ContextBitRange(int4 sbit,int4 ebit) { word = sbit/(8*sizeof(uintm)); startbit = sbit - word*8*sizeof(uintm); endbit = ebit - word*8*sizeof(uintm); shift = 8*sizeof(uintm)-endbit-1; mask = (~((uintm)0))>>(startbit+shift); } /// The register storage and value are encoded as a \ element. /// \param encoder is the stream encoder void TrackedContext::encode(Encoder &encoder) const { encoder.openElement(ELEM_SET); loc.space->encodeAttributes(encoder,loc.offset,loc.size); encoder.writeUnsignedInteger(ATTRIB_VAL, val); encoder.closeElement(ELEM_SET); } /// Parse a \ element to fill in the storage and value details. /// \param decoder is the stream decoder void TrackedContext::decode(Decoder &decoder) { uint4 elemId = decoder.openElement(ELEM_SET); loc.decodeFromAttributes(decoder); val = decoder.readUnsignedInteger(ATTRIB_VAL); decoder.closeElement(elemId); } /// \brief Encode all tracked register values for a specific address to a stream /// /// Encode all the tracked register values associated with a specific target address /// as a \ tag. /// \param encoder is the stream encoder /// \param addr is the specific address we have tracked values for /// \param vec is the list of tracked values void ContextDatabase::encodeTracked(Encoder &encoder,const Address &addr,const TrackedSet &vec) { if (vec.empty()) return; encoder.openElement(ELEM_TRACKED_POINTSET); addr.getSpace()->encodeAttributes(encoder,addr.getOffset() ); for(int4 i=0;i element, decoding each child in turn to populate a list of /// TrackedContext objects. /// \param decoder is the given stream decoder /// \param vec is the container that will hold the new TrackedContext objects void ContextDatabase::decodeTracked(Decoder &decoder,TrackedSet &vec) { vec.clear(); // Clear out any old stuff while(decoder.peekElement() != 0) { vec.emplace_back(); vec.back().decode(decoder); } } /// The default value is returned for addresses that have not been overlaid with other values. /// \param nm is the name of the context variable /// \param val is the default value to establish void ContextDatabase::setVariableDefault(const string &nm,uintm val) { ContextBitRange &var( getVariable(nm) ); var.setValue(getDefaultValue(),val); } /// This will return the default value used for addresses that have not been overlaid with other values. /// \param nm is the name of the context variable /// \return the variable's default value uintm ContextDatabase::getDefaultValue(const string &nm) const { const ContextBitRange &var( getVariable(nm) ); return var.getValue(getDefaultValue()); } /// The variable will be changed to the new value, starting at the given address up to the next /// point of change. /// \param nm is the name of the context variable /// \param addr is the given address /// \param value is the new value to set void ContextDatabase::setVariable(const string &nm,const Address &addr, uintm value) { const ContextBitRange &bitrange( getVariable(nm) ); int4 num = bitrange.getWord(); uintm mask = bitrange.getMask()< contvec; getRegionToChangePoint(contvec,addr,num,mask); for(uint4 i=0;i contvec; getRegionToChangePoint(contvec,addr,num,mask); for(uint4 i=0;i vec; getRegionForSet(vec,addr1,addr2,num,mask); for(uint4 i=0;i vec; getRegionForSet(vec,begad,endad,bitrange.getWord(),bitrange.getMask() << bitrange.getShift()); for(int4 i=0;i mem.offset) continue; tendoff = tcont.loc.offset + tcont.loc.size - 1; if (tendoff < endoff) continue; uintb res = tcont.val; // If we have proper containment, trim value based on endianness if (tcont.loc.space->isBigEndian()) { if (endoff != tendoff) res >>= (8* (tendoff - mem.offset)); } else { if (mem.offset != tcont.loc.offset) res >>= (8* (mem.offset-tcont.loc.offset)); } res &= calc_mask( mem.size ); // Final trim based on size return res; } return (uintb)0; } /// The "array of words" and mask array are resized to the given value. Old values are preserved, /// chopping off the last values, or appending zeroes, as needed. /// \param sz is the new number of words to resize array to void ContextInternal::FreeArray::reset(int4 sz) { uintm *newarray = (uintm *)0; uintm *newmask = (uintm *)0; if (sz != 0) { newarray = new uintm[sz]; newmask = new uintm[sz]; int4 min; if (sz > size) { min = size; for(int4 i=min;i elements within a parent \ element. /// \param encoder is the stream encoder /// \param addr is the address of the split point where the blob is valid /// \param vec is the array of words holding the blob values void ContextInternal::encodeContext(Encoder &encoder,const Address &addr,const uintm *vec) const { encoder.openElement(ELEM_CONTEXT_POINTSET); addr.getSpace()->encodeAttributes(encoder,addr.getOffset() ); map::const_iterator iter; for(iter=variables.begin();iter!=variables.end();++iter) { uintm val = (*iter).second.getValue(vec); encoder.openElement(ELEM_SET); encoder.writeString(ATTRIB_NAME, (*iter).first); encoder.writeUnsignedInteger(ATTRIB_VAL, val); encoder.closeElement(ELEM_SET); } encoder.closeElement(ELEM_CONTEXT_POINTSET); } /// \brief Restore a context blob for given address range from a stream decoder /// /// Parse either a \ or \ element. In either case, /// children are parsed to get context variable values. Then a context blob is /// reconstructed from the values. The new blob is added to the interval map based /// on the address range. If the start address is invalid, the default value of /// the context variables are painted. The second address can be invalid, if /// only a split point is known. /// \param decoder is the stream decoder /// \param addr1 is the starting address of the given range /// \param addr2 is the ending address of the given range void ContextInternal::decodeContext(Decoder &decoder,const Address &addr1,const Address &addr2) { for(;;) { uint4 subId = decoder.openElement(); if (subId != ELEM_SET) break; uintm val = decoder.readUnsignedInteger(ATTRIB_VAL); ContextBitRange &var(getVariable(decoder.readString(ATTRIB_NAME))); vector vec; if (addr1.isInvalid()) { // Invalid addr1, indicates we should set default value uintm *defaultBuffer = getDefaultValue(); for(int4 i=0;i size) { size = sz; database.defaultValue().reset(size); } variables[nm] = bitrange; } ContextBitRange &ContextInternal::getVariable(const string &nm) { map::iterator iter; iter = variables.find(nm); if (iter == variables.end()) throw LowlevelError("Non-existent context variable: "+nm); return (*iter).second; } const ContextBitRange &ContextInternal::getVariable(const string &nm) const { map::const_iterator iter; iter = variables.find(nm); if (iter == variables.end()) throw LowlevelError("Non-existent context variable: "+nm); return (*iter).second; } const uintm *ContextInternal::getContext(const Address &addr, uintb &first,uintb &last) const { int4 valid; Address before,after; const uintm *res = database.bounds(addr,before,after,valid).array; if (((valid&1)!=0)||(before.getSpace() != addr.getSpace())) first = 0; else first = before.getOffset(); if (((valid&2)!=0)||(after.getSpace() != addr.getSpace())) last = addr.getSpace()->getHighest(); else last = after.getOffset()-1; return res; } void ContextInternal::getRegionForSet(vector &res,const Address &addr1,const Address &addr2, int4 num,uintm mask) { database.split(addr1); partmap::iterator aiter,biter; aiter = database.begin(addr1); if (!addr2.isInvalid()) { database.split(addr2); biter = database.begin(addr2); } else biter = database.end(); while(aiter != biter) { uintm *context = (*aiter).second.array; uintm *maskPtr = (*aiter).second.mask; res.push_back(context); maskPtr[num] |= mask; // Mark that this value is being definitely set ++aiter; } } void ContextInternal::getRegionToChangePoint(vector &res,const Address &addr,int4 num,uintm mask) { database.split(addr); partmap::iterator aiter,biter; uintm *maskArray,*vecArray; aiter = database.begin(addr); biter = database.end(); if (aiter == biter) return; vecArray = (*aiter).second.array; res.push_back(vecArray); maskArray = (*aiter).second.mask; maskArray[num] |= mask; ++aiter; while(aiter != biter) { vecArray = (*aiter).second.array; maskArray = (*aiter).second.mask; if ((maskArray[num] & mask) != 0) break; // Reached point where this value was definitively set before res.push_back(vecArray); ++aiter; } } TrackedSet &ContextInternal::createSet(const Address &addr1,const Address &addr2) { TrackedSet &res(trackbase.clearRange(addr1,addr2)); res.clear(); return res; } void ContextInternal::encode(Encoder &encoder) const { if (database.empty() && trackbase.empty()) return; encoder.openElement(ELEM_CONTEXT_POINTS); partmap::const_iterator fiter,fenditer; fiter = database.begin(); fenditer = database.end(); for(;fiter!=fenditer;++fiter) // Save context at each changepoint encodeContext(encoder,(*fiter).first,(*fiter).second.array); partmap::const_iterator titer,tenditer; titer = trackbase.begin(); tenditer = trackbase.end(); for(;titer!=tenditer;++titer) encodeTracked(encoder,(*titer).first,(*titer).second); encoder.closeElement(ELEM_CONTEXT_POINTS); } void ContextInternal::decode(Decoder &decoder) { uint4 elemId = decoder.openElement(ELEM_CONTEXT_POINTS); for(;;) { uint4 subId = decoder.openElement(); if (subId == 0) break; if (subId == ELEM_CONTEXT_POINTSET) { uint4 attribId = decoder.getNextAttributeId(); decoder.rewindAttributes(); if (attribId==0) { decodeContext(decoder,Address(),Address()); // Restore the default value } else { VarnodeData vData; vData.decodeFromAttributes(decoder); decodeContext(decoder,vData.getAddr(),Address()); } } else if (subId == ELEM_TRACKED_POINTSET) { VarnodeData vData; vData.decodeFromAttributes(decoder); decodeTracked(decoder,trackbase.split(vData.getAddr()) ); } else throw LowlevelError("Bad tag"); decoder.closeElement(subId); } decoder.closeElement(elemId); } void ContextInternal::decodeFromSpec(Decoder &decoder) { uint4 elemId = decoder.openElement(ELEM_CONTEXT_DATA); for(;;) { uint4 subId = decoder.openElement(); if (subId == 0) break; Range range; range.decodeFromAttributes(decoder); // There MUST be a range Address addr1 = range.getFirstAddr(); Address addr2 = range.getLastAddrOpen(decoder.getAddrSpaceManager()); if (subId == ELEM_CONTEXT_SET) { decodeContext(decoder,addr1,addr2); } else if (subId == ELEM_TRACKED_SET) { decodeTracked(decoder,createSet(addr1,addr2)); } else throw LowlevelError("Bad tag"); decoder.closeElement(subId); } decoder.closeElement(elemId); } /// \param db is the context database that will be encapsulated ContextCache::ContextCache(ContextDatabase *db) { database = db; curspace = (AddrSpace *)0; // Mark cache as invalid allowset = true; } /// Check if the address is in the current valid range. If it is, return the cached /// blob. Otherwise, make a call to the database and cache a new block and valid range. /// \param addr is the given address /// \param buf is where the blob should be stored void ContextCache::getContext(const Address &addr,uintm *buf) const { if ((addr.getSpace()!=curspace)||(first>addr.getOffset())||(lastgetContext(addr,first,last); } for(int4 i=0;igetContextSize();++i) buf[i] = context[i]; } /// \brief Change the value of a context variable at the given address with no bound /// /// The context value is set starting at the given address and \e paints memory up /// to the next explicit change point. /// \param addr is the given starting address /// \param num is the word index of the context variable /// \param mask is the mask delimiting the context variable /// \param value is the (already shifted) value to set void ContextCache::setContext(const Address &addr,int4 num,uintm mask,uintm value) { if (!allowset) return; database->setContextChangePoint(addr,num,mask,value); if ((addr.getSpace()==curspace)&&(first<=addr.getOffset())&&(last>=addr.getOffset())) curspace = (AddrSpace *)0; // Invalidate cache } /// \brief Change the value of a context variable across an explicit address range /// /// The context value is \e painted across the range. The context variable is marked as /// explicitly changing at the starting address of the range. /// \param addr1 is the starting address of the given range /// \param addr2 is the ending address of the given range /// \param num is the word index of the context variable /// \param mask is the mask delimiting the context variable /// \param value is the (already shifted) value to set void ContextCache::setContext(const Address &addr1,const Address &addr2,int4 num,uintm mask,uintm value) { if (!allowset) return; database->setContextRegion(addr1,addr2,num,mask,value); if ((addr1.getSpace()==curspace)&&(first<=addr1.getOffset())&&(last>=addr1.getOffset())) curspace = (AddrSpace *)0; // Invalidate cache if ((first<=addr2.getOffset())&&(last>=addr2.getOffset())) curspace = (AddrSpace *)0; // Invalidate cache if ((first>=addr1.getOffset())&&(first<=addr2.getOffset())) curspace = (AddrSpace *)0; // Invalidate cache } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/globalcontext.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __GLOBALCONTEXT_HH__ #define __GLOBALCONTEXT_HH__ /// \file globalcontext.hh /// \brief Utilities for getting address-based context to the disassembler and decompiler #include "pcoderaw.hh" #include "partmap.hh" namespace ghidra { extern ElementId ELEM_CONTEXT_DATA; ///< Marshaling element \ extern ElementId ELEM_CONTEXT_POINTS; ///< Marshaling element \ extern ElementId ELEM_CONTEXT_POINTSET; ///< Marshaling element \ extern ElementId ELEM_CONTEXT_SET; ///< Marshaling element \ extern ElementId ELEM_SET; ///< Marshaling element \ extern ElementId ELEM_TRACKED_POINTSET; ///< Marshaling element \ extern ElementId ELEM_TRACKED_SET; ///< Marshaling element \ /// \brief Description of a context variable within the disassembly context \e blob /// /// Disassembly context is stored as individual (integer) values packed into a sequence of words. This class /// represents the info for encoding or decoding a single value within this sequence. A value is /// a contiguous range of bits within one context word. Size can range from 1 bit up to the size of a word. class ContextBitRange { int4 word; ///< Index of word containing this context value int4 startbit; ///< Starting bit of the value within its word (0=most significant bit 1=least significant) int4 endbit; ///< Ending bit of the value within its word int4 shift; ///< Right-shift amount to apply when unpacking this value from its word uintm mask; ///< Mask to apply (after shifting) when unpacking this value from its word public: ContextBitRange(void) { } ///< Construct an undefined bit range ContextBitRange(int4 sbit,int4 ebit); ///< Construct a context value given an absolute bit range int4 getShift(void) const { return shift; } ///< Return the shift-amount for \b this value uintm getMask(void) const { return mask; } ///< Return the mask for \b this value int4 getWord(void) const { return word; } ///< Return the word index for \b this value /// \brief Set \b this value within a given context blob /// /// \param vec is the given context blob to alter (as an array of uintm words) /// \param val is the integer value to set void setValue(uintm *vec,uintm val) const { uintm newval = vec[word]; newval &= ~(mask<>shift)&mask); } }; /// \brief A tracked register (Varnode) and the value it contains /// /// This is the object returned when querying for tracked registers, /// via ContextDatabase::getTrackedSet(). It holds the storage details of the register and /// the actual value it holds at the point of the query. struct TrackedContext { VarnodeData loc; ///< Storage details of the register being tracked uintb val; ///< The value of the register void decode(Decoder &decoder); ///< Decode \b this from a stream void encode(Encoder &encoder) const; ///< Encode \b this to a stream }; typedef vector TrackedSet; ///< A set of tracked registers and their values (at one code point) /// \brief An interface to a database of disassembly/decompiler \b context information /// /// \b Context \b information is a set of named variables that hold concrete values at specific /// addresses in the target executable being analyzed. A variable can hold different values at /// different addresses, but a specific value at a specific address never changes. Analysis recovers /// these values over time, populating this database, and querying this database lets analysis /// provides concrete values for memory locations in context. /// /// Context variables come in two flavors: /// - \b Low-level \b context \b variables: /// These can affect instruction decoding. These can be as small as a single bit and need to /// be defined in the Sleigh specification (so that Sleigh knows how they effect disassembly). /// These variables are not mapped to normal memory locations with an address space and offset /// (although they often have a corresponding embedding into a normal memory location). /// The model to keep in mind is a control register with specialized bit-fields within it. /// - \b High-level \b tracked \b variables: /// These are normal memory locations that are to be treated as constants across some range of /// code. These are normally registers that are being tracked by the compiler outside the /// domain of normal local and global variables. They have a specific value established by /// the compiler coming into a function but are not supposed to be interpreted as a high-level /// variable. Typical examples are the direction flag (for \e string instructions) and segment /// registers. All tracked variables are interpreted as a constant value at the start of a /// function, although the memory location can be recycled for other calculations later in the /// function. /// /// Low-level context variables can be queried and set by name -- getVariable(), setVariable(), /// setVariableRegion() -- but the disassembler accesses all the variables at an address as a group /// via getContext(), setContextChangePoint(), setContextRegion(). In this setting, all the values /// are packed together in an array of words, a context \e blob (See ContextBitRange). /// /// Tracked variables are also queried as a group via getTrackedSet() and createSet(). These return /// a list of TrackedContext objects. class ContextDatabase { protected: static void encodeTracked(Encoder &encoder,const Address &addr,const TrackedSet &vec); static void decodeTracked(Decoder &decoder,TrackedSet &vec); /// \brief Retrieve the context variable description object by name /// /// If the variable doesn't exist an exception is thrown. /// \param nm is the name of the context value /// \return the ContextBitRange object matching the name virtual ContextBitRange &getVariable(const string &nm)=0; /// \brief Retrieve the context variable description object by name /// /// If the variable doesn't exist an exception is thrown. /// \param nm is the name of the context value /// \return the ContextBitRange object matching the name virtual const ContextBitRange &getVariable(const string &nm) const=0; /// \brief Grab the context blob(s) for the given address range, marking bits that will be set /// /// This is an internal routine for obtaining the actual memory regions holding context values /// for the address range. This also informs the system which bits are getting set. A split is forced /// at the first address, and at least one memory region is passed back. The second address can be /// invalid in which case the memory region passed back is valid from the first address to whatever /// the next split point is. /// \param res will hold pointers to memory regions for the given range /// \param addr1 is the starting address of the range /// \param addr2 is (1 past) the last address of the range or is invalid /// \param num is the word index for the context value that will be set /// \param mask is a mask of the value being set (within its word) virtual void getRegionForSet(vector &res,const Address &addr1, const Address &addr2,int4 num,uintm mask)=0; /// \brief Grab the context blob(s) starting at the given address up to the first point of change /// /// This is an internal routine for obtaining the actual memory regions holding context values /// starting at the given address. A specific context value is specified, and all memory regions /// are returned up to the first address where that particular context value changes. /// \param res will hold pointers to memory regions being passed back /// \param addr is the starting address of the regions to fetch /// \param num is the word index for the specific context value being set /// \param mask is a mask of the context value being set (within its word) virtual void getRegionToChangePoint(vector &res,const Address &addr,int4 num,uintm mask)=0; /// \brief Retrieve the memory region holding all default context values /// /// This fetches the active memory holding the default context values on top of which all other context /// values are overlaid. /// \return the memory region holding all the default context values virtual uintm *getDefaultValue(void)=0; /// \brief Retrieve the memory region holding all default context values /// /// This fetches the active memory holding the default context values on top of which all other context /// values are overlaid. /// \return the memory region holding all the default context values virtual const uintm *getDefaultValue(void) const=0; public: virtual ~ContextDatabase() {} ///< Destructor /// \brief Retrieve the number of words (uintm) in a context \e blob /// /// \return the number of words virtual int4 getContextSize(void) const=0; /// \brief Register a new named context variable (as a bit range) with the database /// /// A new variable is registered by providing a name and the range of bits the value will occupy /// within the context blob. The full blob size is automatically increased if necessary. The variable /// must be contained within a single word, and all variables must be registered before any values can /// be set. /// \param nm is the name of the new variable /// \param sbit is the position of the variable's most significant bit within the blob /// \param ebit is the position of the variable's least significant bit within the blob virtual void registerVariable(const string &nm,int4 sbit,int4 ebit)=0; /// \brief Get the context blob of values associated with a given address /// /// \param addr is the given address /// \return the memory region holding the context values for the address virtual const uintm *getContext(const Address &addr) const=0; /// \brief Get the context blob of values associated with a given address and its bounding offsets /// /// In addition to the memory region, the range of addresses for which the region is valid /// is passed back as offsets into the address space. /// \param addr is the given address /// \param first will hold the starting offset of the valid range /// \param last will hold the ending offset of the valid range /// \return the memory region holding the context values for the address virtual const uintm *getContext(const Address &addr,uintb &first,uintb &last) const=0; /// \brief Get the set of default values for all tracked registers /// /// \return the list of TrackedContext objects virtual TrackedSet &getTrackedDefault(void)=0; /// \brief Get the set of tracked register values associated with the given address /// /// \param addr is the given address /// \return the list of TrackedContext objects virtual const TrackedSet &getTrackedSet(const Address &addr) const=0; /// \brief Create a tracked register set that is valid over the given range /// /// This really should be an internal routine. The created set is empty, old values are blown /// away. If old/default values are to be preserved, they must be copied back in. /// \param addr1 is the starting address of the given range /// \param addr2 is (1 past) the ending address of the given range /// \return the empty set of tracked register values virtual TrackedSet &createSet(const Address &addr1,const Address &addr2)=0; /// \brief Encode the entire database to a stream /// /// \param encoder is the stream encoder virtual void encode(Encoder &encoder) const=0; /// \brief Restore the state of \b this database object from the given stream decoder /// /// \param decoder is the given stream decoder virtual void decode(Decoder &decoder)=0; /// \brief Add initial context state from elements in the compiler/processor specifications /// /// Parse a \ element from the given stream decoder from either the compiler /// or processor specification file for the architecture, initializing this database. /// \param decoder is the given stream decoder virtual void decodeFromSpec(Decoder &decoder)=0; void setVariableDefault(const string &nm,uintm val); ///< Provide a default value for a context variable uintm getDefaultValue(const string &nm) const; ///< Retrieve the default value for a context variable void setVariable(const string &nm,const Address &addr,uintm value); ///< Set a context value at the given address uintm getVariable(const string &nm,const Address &addr) const; ///< Retrieve a context value at the given address void setContextChangePoint(const Address &addr,int4 num,uintm mask,uintm value); void setContextRegion(const Address &addr1,const Address &addr2,int4 num,uintm mask,uintm value); void setVariableRegion(const string &nm,const Address &begad, const Address &endad,uintm value); uintb getTrackedValue(const VarnodeData &mem,const Address &point) const; }; /// \brief An in-memory implementation of the ContextDatabase interface /// /// Context blobs are held in a partition map on addresses. Any address within the map /// indicates a \e split point, where the value of a context variable was explicitly changed. /// Sets of tracked registers are held in a separate partition map. class ContextInternal : public ContextDatabase { /// \brief A context blob, holding context values across some range of code addresses /// /// This is an internal object that allocates the actual "array of words" for a context blob. /// An associated mask array holds 1-bits for context variables that were explicitly set for the /// specific split point. struct FreeArray { uintm *array; ///< The "array of words" holding context variable values uintm *mask; ///< The mask array indicating which variables are explicitly set int4 size; ///< The number of words in the array FreeArray(void) { size=0; array = (uintm *)0; mask = (uintm *)0; } ///< Construct an empty context blob ~FreeArray(void) { if (size!=0) { delete [] array; delete [] mask; } } ///< Destructor void reset(int4 sz); ///< Resize the context blob, preserving old values FreeArray &operator=(const FreeArray &op2); ///< Assignment operator }; int4 size; ///< Number of words in a context blob (for this architecture) map variables; ///< Map from context variable name to description object partmap database; ///< Partition map of context blobs (FreeArray) partmap trackbase; ///< Partition map of tracked register sets void encodeContext(Encoder &encoder,const Address &addr,const uintm *vec) const; void decodeContext(Decoder &decoder,const Address &addr1,const Address &addr2); virtual ContextBitRange &getVariable(const string &nm); virtual const ContextBitRange &getVariable(const string &nm) const; virtual void getRegionForSet(vector &res,const Address &addr1, const Address &addr2,int4 num,uintm mask); virtual void getRegionToChangePoint(vector &res,const Address &addr,int4 num,uintm mask); virtual uintm *getDefaultValue(void) { return database.defaultValue().array; } virtual const uintm *getDefaultValue(void) const { return database.defaultValue().array; } public: ContextInternal(void) { size = 0; } virtual ~ContextInternal(void) {} virtual int4 getContextSize(void) const { return size; } virtual void registerVariable(const string &nm,int4 sbit,int4 ebit); virtual const uintm *getContext(const Address &addr) const { return database.getValue(addr).array; } virtual const uintm *getContext(const Address &addr,uintb &first,uintb &last) const; virtual TrackedSet &getTrackedDefault(void) { return trackbase.defaultValue(); } virtual const TrackedSet &getTrackedSet(const Address &addr) const { return trackbase.getValue(addr); } virtual TrackedSet &createSet(const Address &addr1,const Address &addr2); virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder); virtual void decodeFromSpec(Decoder &decoder); }; /// \brief A helper class for caching the active context blob to minimize database lookups /// /// This merely caches the last retrieved context blob ("array of words") and the range of /// addresses over which the blob is valid. It encapsulates the ContextDatabase itself and /// exposes a minimal interface (getContext() and setContext()). class ContextCache { ContextDatabase *database; ///< The encapsulated context database bool allowset; ///< If set to \b false, and setContext() call is dropped mutable AddrSpace *curspace; ///< Address space of the current valid range mutable uintb first; ///< Starting offset of the current valid range mutable uintb last; ///< Ending offset of the current valid range mutable const uintm *context; ///< The current cached context blob public: ContextCache(ContextDatabase *db); ///< Construct given a context database ContextDatabase *getDatabase(void) const { return database; } ///< Retrieve the encapsulated database object void allowSet(bool val) { allowset = val; } ///< Toggle whether setContext() calls are ignored void getContext(const Address &addr,uintm *buf) const; ///< Retrieve the context blob for the given address void setContext(const Address &addr,int4 num,uintm mask,uintm value); void setContext(const Address &addr1,const Address &addr2,int4 num,uintm mask,uintm value); }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/loadimage.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "loadimage.hh" namespace ghidra { /// This is a convenience method wrapped around the core /// loadFill() routine. It automatically allocates an array /// of the desired size, and then fills it with load image data. /// If the array cannot be allocated, an exception is thrown. /// The caller assumes the responsibility of freeing the /// array after it has been used. /// \param size is the number of bytes to read from the image /// \param addr is the address of the first byte being read /// \return a pointer to the desired bytes uint1 *LoadImage::load(int4 size,const Address &addr) { uint1 *buf = new uint1[ size ]; if (buf == (uint1 *)0) throw LowlevelError("Out of memory"); loadFill(buf,size,addr); return buf; } RawLoadImage::RawLoadImage(const string &f) : LoadImage(f) { vma = 0; thefile = (ifstream *)0; spaceid = (AddrSpace *)0; filesize = 0; } RawLoadImage::~RawLoadImage(void) { if (thefile != (ifstream *)0) { thefile->close(); delete thefile; } } /// The file is opened and its size immediately recovered. void RawLoadImage::open(void) { if (thefile != (ifstream *)0) throw LowlevelError("loadimage is already open"); thefile = new ifstream(filename.c_str()); if (!(*thefile)) { string errmsg = "Unable to open raw image file: "+filename; throw LowlevelError(errmsg); } thefile->seekg(0,ios::end); filesize = thefile->tellg(); } string RawLoadImage::getArchType(void) const { return "unknown"; } void RawLoadImage::adjustVma(long adjust) { adjust = AddrSpace::addressToByte(adjust,spaceid->getWordSize()); vma += adjust; } void RawLoadImage::loadFill(uint1 *ptr,int4 size,const Address &addr) { uintb curaddr = addr.getOffset(); uintb offset = 0; uintb readsize; curaddr -= vma; // Get relative offset of first byte while(size>0) { if (curaddr >= filesize) { if (offset == 0) // Initial address not within file break; memset(ptr+offset,0,size); // Fill out the rest of the buffer with 0 return; } readsize = size; if (curaddr + readsize > filesize) // Adjust to biggest possible read readsize = filesize - curaddr; thefile->seekg(curaddr); thefile->read((char *)(ptr+offset),readsize); offset += readsize; size -= readsize; curaddr += readsize; } if (size > 0) { ostringstream errmsg; errmsg << "Unable to load " << dec << size << " bytes at " << addr.getShortcut(); addr.printRaw(errmsg); throw DataUnavailError(errmsg.str()); } } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/loadimage.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file loadimage.hh /// \brief Classes and API for accessing a binary load image #ifndef __LOADIMAGE_HH__ #define __LOADIMAGE_HH__ #include "address.hh" namespace ghidra { // XXX: On Windows, LoadImage is defined as a macro which conflicts with name // of the class declared in this file. For now, just undefine the macro. #ifdef _WINDOWS #ifdef LoadImage #undef LoadImage #endif #endif /// \brief Exception indicating data was not available /// /// This exception is thrown when a request for load image /// data cannot be met, usually because the requested address /// range is not in the image. struct DataUnavailError : public LowlevelError { DataUnavailError(const string &s) : LowlevelError(s) {} ///< Instantiate with an explanatory string }; /// \brief A record indicating a function symbol /// /// This is a lightweight object holding the Address and name of a function struct LoadImageFunc { Address address; ///< Start of function string name; ///< Name of function }; /// \brief A record describing a section bytes in the executable /// /// A lightweight object specifying the location and size of the section and basic properties struct LoadImageSection { /// Boolean properties a section might have enum { unalloc = 1, ///< Not allocated in memory (debug info) noload = 2, ///< uninitialized section code = 4, ///< code only data = 8, ///< data only readonly = 16 ///< read only section }; Address address; ///< Starting address of section uintb size; ///< Number of bytes in section uint4 flags; ///< Properties of the section }; /// \brief An interface into a particular binary executable image /// /// This class provides the abstraction needed by the decompiler /// for the numerous load file formats used to encode binary /// executables. The data encoding the machine instructions /// for the executable can be accessed via the addresses where /// that data would be loaded into RAM. /// Properties other than the main data and instructions of the /// binary are not supposed to repeatedly queried through this /// interface. This information is intended to be read from /// this class exactly once, during initialization, and used to /// populate the main decompiler database. This class currently /// has only rudimentary support for accessing such properties. class LoadImage { protected: string filename; ///< Name of the loadimage public: LoadImage(const string &f); ///< LoadImage constructor virtual ~LoadImage(void); ///< LoadImage destructor const string &getFileName(void) const; ///< Get the name of the LoadImage virtual void loadFill(uint1 *ptr,int4 size,const Address &addr)=0; ///< Get data from the LoadImage virtual void openSymbols(void) const; ///< Prepare to read symbols virtual void closeSymbols(void) const; ///< Stop reading symbols virtual bool getNextSymbol(LoadImageFunc &record) const; ///< Get the next symbol record virtual void openSectionInfo(void) const; ///< Prepare to read section info virtual void closeSectionInfo(void) const; ///< Stop reading section info virtual bool getNextSection(LoadImageSection &sec) const; ///< Get info on the next section virtual void getReadonly(RangeList &list) const; ///< Return list of \e readonly address ranges virtual string getArchType(void) const=0; ///< Get a string indicating the architecture type virtual void adjustVma(long adjust)=0; ///< Adjust load addresses with a global offset uint1 *load(int4 size,const Address &addr); ///< Load a chunk of image }; /// \brief A simple raw binary loadimage /// /// This is probably the simplest loadimage. Bytes from the image are read directly from a file stream. /// The address associated with each byte is determined by a single value, the vma, which is the address /// of the first byte in the file. No symbols or sections are supported class RawLoadImage : public LoadImage { uintb vma; ///< Address of first byte in the file ifstream *thefile; ///< Main file stream for image uintb filesize; ///< Total number of bytes in the loadimage/file AddrSpace *spaceid; ///< Address space that the file bytes are mapped to public: RawLoadImage(const string &f); ///< RawLoadImage constructor void attachToSpace(AddrSpace *id) { spaceid = id; } ///< Attach the raw image to a particular space void open(void); ///< Open the raw file for reading virtual ~RawLoadImage(void); ///< RawLoadImage destructor virtual void loadFill(uint1 *ptr,int4 size,const Address &addr); virtual string getArchType(void) const; virtual void adjustVma(long adjust); }; /// For the base class there is no relevant initialization except /// the name of the image. /// \param f is the name of the image inline LoadImage::LoadImage(const string &f) { filename = f; } /// The destructor for the load image object. inline LoadImage::~LoadImage(void) { } /// The loadimage is usually associated with a file. This routine /// retrieves the name as a string. /// \return the name of the image inline const string &LoadImage::getFileName(void) const { return filename; } /// This routine should read in and parse any symbol information /// that the load image contains about executable. Once this /// method is called, individual symbol records are read out /// using the getNextSymbol() method. inline void LoadImage::openSymbols(void) const { } /// Once all the symbol information has been read out from the /// load image via the openSymbols() and getNextSymbol() calls, /// the application should call this method to free up resources /// used in parsing the symbol information. inline void LoadImage::closeSymbols(void) const { } /// This method is used to read out an individual symbol record, /// LoadImageFunc, from the load image. Right now, the only /// information that can be read out are function starts and the /// associated function name. This method can be called repeatedly /// to iterate through all the symbols, until it returns \b false. /// This indicates the end of the symbols. /// \param record is a reference to the symbol record to be filled in /// \return \b true if there are more records to read inline bool LoadImage::getNextSymbol(LoadImageFunc &record) const { return false; } /// This method initializes iteration over all the sections of /// bytes that are mapped by the load image. Once this is called, /// information on individual sections should be read out with /// the getNextSection() method. inline void LoadImage::openSectionInfo(void) const { } /// Once all the section information is read from the load image /// using the getNextSection() method, this method should be /// called to free up any resources used in parsing the section info. inline void LoadImage::closeSectionInfo(void) const { } /// This method is used to read out a record that describes a /// single section of bytes mapped by the load image. This /// method can be called repeatedly until it returns \b false, /// to get info on additional sections. /// \param record is a reference to the info record to be filled in /// \return \b true if there are more records to read inline bool LoadImage::getNextSection(LoadImageSection &record) const { return false; } /// This method should read out information about \e all /// address ranges within the load image that are known to be /// \b readonly. This method is intended to be called only /// once, so all information should be written to the passed /// RangeList object. /// \param list is where readonly info will get put inline void LoadImage::getReadonly(RangeList &list) const { } /// \fn void LoadImage::adjustVma(long adjust) /// Most load image formats automatically encode information /// about the true loading address(es) for the data in the image. /// But if this is missing or incorrect, this routine can be /// used to make a global adjustment to the load address. Only /// one adjustment is made across \e all addresses in the image. /// The offset passed to this method is added to the stored /// or default value for any address queried in the image. /// This is most often used in a \e raw binary file format. In /// this case, the entire executable file is intended to be /// read straight into RAM, as one contiguous chunk, in order to /// be executed. In the absence of any other info, the first /// byte of the image file is loaded at offset 0. This method /// then would adjust the load address of the first byte. /// \param adjust is the offset amount to be added to default values /// \fn string LoadImage::getArchType(void) const /// The load image class is intended to be a generic front-end /// to the large variety of load formats in use. This method /// should return a string that identifies the particular /// architecture this particular image is intended to run on. /// It is currently the responsibility of any derived LoadImage /// class to establish a format for this string, but it should /// generally contain some indication of the operating system /// and the processor. /// \return the identifier string /// \fn void LoadImage::loadFill(uint1 *ptr,int4 size,const Address &addr) /// This is the \e core routine of a LoadImage. Given a particular /// address range, this routine retrieves the exact byte values /// that are stored at that address when the executable is loaded /// into RAM. The caller must supply a pre-allocated array /// of bytes where the returned bytes should be stored. If the /// requested address range does not exist in the image, or /// otherwise can't be retrieved, this method throws an /// DataUnavailError exception. /// \param ptr points to where the resulting bytes will be stored /// \param size is the number of bytes to retrieve from the image /// \param addr is the starting address of the bytes to retrieve } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/loadimage_bfd.cc ================================================ /* ### * IP: GHIDRA * NOTE: Excluded from Build. Used for development only in support of console mode - Links to GNU BFD library which is GPL 3 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "loadimage_bfd.hh" namespace ghidra { int4 LoadImageBfd::bfdinit = 0; // Global initialization variable LoadImageBfd::LoadImageBfd(const string &f,const string &t) : LoadImage(f) { target = t; if (bfdinit == 0) { bfdinit = 1; bfd_init(); } thebfd = (bfd *)0; spaceid = (AddrSpace *)0; symbol_table = (asymbol **)0; bufsize = 512; // Default buffer size bufoffset = ~((uintb)0); buffer = new uint1[ bufsize ]; } LoadImageBfd::~LoadImageBfd(void) { if (symbol_table != (asymbol **)0) delete [] symbol_table; if (thebfd != (bfd *) 0) close(); delete [] buffer; } string LoadImageBfd::getArchType(void) const { string type; string targ; type = bfd_printable_name(thebfd); type += ':'; targ = thebfd->xvec->name; type += targ; return type; } void LoadImageBfd::adjustVma(long adjust) { asection *s; adjust = AddrSpace::addressToByte(adjust,spaceid->getWordSize()); for(s=thebfd->sections;s!=(asection *)NULL;s = s->next) { s->vma += adjust; s->lma += adjust; } } void LoadImageBfd::open(void) { if (thebfd != (bfd *)0) throw LowlevelError("BFD library did not initialize"); thebfd = bfd_openr(filename.c_str(),target.c_str()); if (thebfd == (bfd *)0) { string errmsg="Unable to open image file: "; errmsg += filename; throw LowlevelError(errmsg); } if (!bfd_check_format( thebfd, bfd_object)) { string errmsg="File: "; errmsg += filename; errmsg += " : not in recognized object file format"; throw LowlevelError(errmsg); } } void LoadImageBfd::close(void) { bfd_close(thebfd); thebfd = (bfd *)0; } asection *LoadImageBfd::findSection(uintb offset,uintb &secsize) const { // Return section containing offset, or closest greater section asection *p; uintb start,stop; for(p = thebfd->sections; p != (asection *)NULL; p = p->next) { start = p->vma; secsize = (p->size!=0) ? p->size : p->rawsize; stop = start + secsize; if ((offset>=start)&&(offsetsections; p != (asection *)NULL; p = p->next) { if (p->vma > offset) { if (champ == (asection *)0) champ = p; else if (p->vma < champ->vma) champ = p; } } return champ; } void LoadImageBfd::loadFill(uint1 *ptr,int4 size,const Address &addr) { asection *p; uintb secsize; uintb curaddr,offset; bfd_size_type readsize; int4 cursize; if (addr.getSpace() != spaceid) throw DataUnavailError("Trying to get loadimage bytes from space: "+addr.getSpace()->getName()); curaddr = addr.getOffset(); if ((curaddr>=bufoffset)&&(curaddr+size0) { p = findSection(curaddr,secsize); if (p == (asection *)0) { if (offset==0) // Initial address not mapped break; memset(buffer+offset,0,cursize); // Fill out the rest of the buffer with 0 memcpy(ptr,buffer,size); return; } if (p->vma > curaddr) { // No section matches if (offset==0) // Initial address not mapped break; readsize = p->vma - curaddr; if (readsize > cursize) readsize = cursize; memset(buffer+offset,0,readsize); // Fill in with zeroes to next section } else { readsize = cursize; if (curaddr+readsize>p->vma+secsize) // Adjust to biggest possible read readsize = (bfd_size_type)(p->vma+secsize-curaddr); bfd_get_section_contents(thebfd,p,buffer+offset,(file_ptr)(curaddr-p->vma),readsize); } offset += readsize; cursize -= readsize; curaddr += readsize; } if (cursize > 0) { ostringstream errmsg; errmsg << "Unable to load " << dec << cursize << " bytes at " << addr.getShortcut(); addr.printRaw(errmsg); throw DataUnavailError(errmsg.str()); } memcpy(ptr,buffer,size); // Copy requested bytes from the buffer } void LoadImageBfd::advanceToNextSymbol(void) const { while(cursymbol < number_of_symbols) { const asymbol *a = symbol_table[cursymbol]; if ((a->flags & BSF_FUNCTION)!=0) { if (a->name != (const char *)0) return; } cursymbol += 1; } } void LoadImageBfd::openSymbols(void) const { long storage_needed; cursymbol = 0; if (symbol_table != (asymbol **)0) { advanceToNextSymbol(); return; } if (!(bfd_get_file_flags(thebfd) & HAS_SYMS)) { // There are no symbols number_of_symbols = 0; return; } storage_needed = bfd_get_symtab_upper_bound(thebfd); if (storage_needed <= 0) { number_of_symbols = 0; return; } symbol_table = (asymbol **) new uint1[storage_needed]; // Storage needed in bytes number_of_symbols = bfd_canonicalize_symtab(thebfd,symbol_table); if (number_of_symbols <= 0) { delete [] symbol_table; symbol_table = (asymbol **)0; number_of_symbols = 0; return; } advanceToNextSymbol(); // sort(symbol_table,symbol_table+number_of_symbols,compare_symbols); } bool LoadImageBfd::getNextSymbol(LoadImageFunc &record) const { // Get record for next symbol if it exists, otherwise return false if (cursymbol >= number_of_symbols) return false; const asymbol *a = symbol_table[cursymbol]; cursymbol += 1; advanceToNextSymbol(); record.name = a->name; uintb val = bfd_asymbol_value(a); record.address = Address(spaceid,val); return true; } void LoadImageBfd::openSectionInfo(void) const { secinfoptr = thebfd->sections; } void LoadImageBfd::closeSectionInfo(void) const { secinfoptr = (asection *)0; } bool LoadImageBfd::getNextSection(LoadImageSection &record) const { if (secinfoptr == (asection *)0) return false; record.address = Address(spaceid,secinfoptr->vma); record.size = (secinfoptr->size!=0) ? secinfoptr->size : secinfoptr->rawsize; record.flags = 0; if ((secinfoptr->flags & SEC_ALLOC)==0) record.flags |= LoadImageSection::unalloc; if ((secinfoptr->flags & SEC_LOAD)==0) record.flags |= LoadImageSection::noload; if ((secinfoptr->flags & SEC_READONLY)!=0) record.flags |= LoadImageSection::readonly; if ((secinfoptr->flags & SEC_CODE)!=0) record.flags |= LoadImageSection::code; if ((secinfoptr->flags & SEC_DATA)!=0) record.flags |= LoadImageSection::data; secinfoptr = secinfoptr->next; return (secinfoptr != (asection *)0); } void LoadImageBfd::closeSymbols(void) const { if (symbol_table != (asymbol **)0) delete [] symbol_table; symbol_table = (asymbol **)0; number_of_symbols = 0; cursymbol = 0; } void LoadImageBfd::getReadonly(RangeList &list) const { // List all ranges that are read only uintb start,stop,secsize; asection *p; for(p = thebfd->sections; p != (asection *)NULL; p = p->next) { if ((p->flags & SEC_READONLY)!=0) { start = p->vma; secsize = (p->size!=0) ? p->size : p->rawsize; if (secsize == 0) continue; stop = start + secsize - 1; list.insertRange(spaceid,start,stop); } } } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/loadimage_bfd.hh ================================================ /* ### * IP: GHIDRA * NOTE: Interface to GNU BFD library which is GPL 3 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Use the GNU bfd library to manipulate a load image #ifndef __LOADIMAGE_BFD_HH__ #define __LOADIMAGE_BFD_HH__ #include "loadimage.hh" // bfd.h requires PACKAGE/PACKAGE_VERSION to be defined // https://sourceware.org/bugzilla/show_bug.cgi?id=14243 #ifndef PACKAGE #define PACKAGE #define __LOADIMAGE_BFD__DEFINED_PACKAGE #endif #ifndef PACKAGE_VERSION #define PACKAGE_VERSION #define __LOADIMAGE_BFD__DEFINED_PACKAGE_VERSION #endif #include #ifdef __LOADIMAGE_BFD__DEFINED_PACKAGE #undef PACKAGE #undef __LOADIMAGE_BFD__DEFINED_PACKAGE #endif #ifdef __LOADIMAGE_BFD__DEFINED_PACKAGE_VERSION #undef PACKAGE_VERSION #undef __LOADIMAGE_BFD__DEFINED_PACKAGE_VERSION #endif namespace ghidra { struct ImportRecord { string dllname; string funcname; int ordinal; Address address; Address thunkaddress; }; class LoadImageBfd : public LoadImage { static int4 bfdinit; // Is the library (globally) initialized string target; // File format (supported by BFD) bfd *thebfd; AddrSpace *spaceid; // We need to map space id to segments but since // we are currently ignoring segments anyway... uintb bufoffset; // Starting offset of byte buffer uint4 bufsize; // Number of bytes in the buffer uint1 *buffer; // The actual buffer mutable asymbol **symbol_table; mutable long number_of_symbols; mutable long cursymbol; mutable asection *secinfoptr; asection *findSection(uintb offset,uintb &ssize) const; // Find section containing given offset void advanceToNextSymbol(void) const; public: LoadImageBfd(const string &f,const string &t); void attachToSpace(AddrSpace *id) { spaceid = id; } void open(void); // Open any descriptors void close(void); // Close any descriptor void getImportTable(vector &irec) { throw LowlevelError("Not implemented"); } virtual ~LoadImageBfd(void); virtual void loadFill(uint1 *ptr,int4 size,const Address &addr); // Load a chunk of image virtual void openSymbols(void) const; virtual void closeSymbols(void) const; virtual bool getNextSymbol(LoadImageFunc &record) const; virtual void openSectionInfo(void) const; virtual void closeSectionInfo(void) const; virtual bool getNextSection(LoadImageSection &sec) const; virtual void getReadonly(RangeList &list) const; virtual string getArchType(void) const; virtual void adjustVma(long adjust); }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/marshal.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "marshal.hh" #include "translate.hh" namespace ghidra { using namespace PackedFormat; unordered_map AttributeId::lookupAttributeId; const int4 PackedDecode::BUFFER_SIZE = 1024; const char XmlEncode::spaces[] = "\n "; const int4 XmlEncode::MAX_SPACES = 24+1; /// Access static vector of AttributeId objects that are registered during static initialization /// The list itself is created once on the first call to this method. /// \return a reference to the vector vector &AttributeId::getList(void) { static vector thelist; return thelist; } /// This constructor should only be invoked for static objects. It registers the attribute for inclusion /// in the global hashtable. /// \param nm is the name of the attribute /// \param i is an id to associate with the attribute /// \param scope is an id for the scope of this attribute AttributeId::AttributeId(const string &nm,uint4 i,int4 scope) : name(nm) { id = i; if (scope == 0) getList().push_back(this); } /// Fill the hashtable mapping attribute names to their id, from registered attribute objects void AttributeId::initialize(void) { vector &thelist(getList()); for(int4 i=0;iname) != lookupAttributeId.end()) throw DecoderError(attrib->name + " attribute registered more than once"); #endif lookupAttributeId[attrib->name] = attrib->id; } thelist.clear(); thelist.shrink_to_fit(); } unordered_map ElementId::lookupElementId; /// Access static vector of ElementId objects that are registered during static initialization /// The list itself is created once on the first call to this method. /// \return a reference to the vector vector &ElementId::getList(void) { static vector thelist; return thelist; } /// This constructor should only be invoked for static objects. It registers the element for inclusion /// in the global hashtable. /// \param nm is the name of the element /// \param i is an id to associate with the element /// \param scope is an id for the scope of this element ElementId::ElementId(const string &nm,uint4 i,int4 scope) : name(nm) { id = i; if (scope == 0) getList().push_back(this); } /// Fill the hashtable mapping element names to their id, from registered element objects void ElementId::initialize(void) { vector &thelist(getList()); for(int4 i=0;iname) != lookupElementId.end()) throw DecoderError(elem->name + " element registered more than once"); #endif lookupElementId[elem->name] = elem->id; } thelist.clear(); thelist.shrink_to_fit(); } XmlDecode::~XmlDecode(void) { if (document != (Document *)0) delete document; } void XmlDecode::ingestStream(istream &s) { document = xml_tree(s); rootElement = document->getRoot(); } uint4 XmlDecode::peekElement(void) { const Element *el; if (elStack.empty()) { if (rootElement == (const Element *)0) return 0; el = rootElement; } else { el = elStack.back(); List::const_iterator iter = iterStack.back(); if (iter == el->getChildren().end()) return 0; el = *iter; } return ElementId::find(el->getName(),scope); } uint4 XmlDecode::openElement(void) { const Element *el; if (elStack.empty()) { if (rootElement == (const Element *)0) return 0; // Document already traversed el = rootElement; rootElement = (const Element *)0; // Only open once } else { el = elStack.back(); List::const_iterator iter = iterStack.back(); if (iter == el->getChildren().end()) return 0; // Element already fully traversed el = *iter; iterStack.back() = ++iter; } elStack.push_back(el); iterStack.push_back(el->getChildren().begin()); attributeIndex = -1; return ElementId::find(el->getName(),scope); } uint4 XmlDecode::openElement(const ElementId &elemId) { const Element *el; if (elStack.empty()) { if (rootElement == (const Element *)0) throw DecoderError("Expecting <" + elemId.getName() + "> but reached end of document"); el = rootElement; rootElement = (const Element *)0; // Only open document once } else { el = elStack.back(); List::const_iterator iter = iterStack.back(); if (iter != el->getChildren().end()) { el = *iter; iterStack.back() = ++iter; } else throw DecoderError("Expecting <" + elemId.getName() + "> but no remaining children in current element"); } if (el->getName() != elemId.getName()) throw DecoderError("Expecting <" + elemId.getName() + "> but got <" + el->getName() + ">"); elStack.push_back(el); iterStack.push_back(el->getChildren().begin()); attributeIndex = -1; return elemId.getId(); } void XmlDecode::closeElement(uint4 id) { #ifdef CPUI_DEBUG const Element *el = elStack.back(); if (iterStack.back() != el->getChildren().end()) throw DecoderError("Closing element <" + el->getName() + "> with additional children"); if (ElementId::find(el->getName(), scope) != id) throw DecoderError("Trying to close <" + el->getName() + "> with mismatching id"); #endif elStack.pop_back(); iterStack.pop_back(); attributeIndex = 1000; // Cannot read any additional attributes } void XmlDecode::closeElementSkipping(uint4 id) { #ifdef CPUI_DEBUG const Element *el = elStack.back(); if (ElementId::find(el->getName(), scope) != id) throw DecoderError("Trying to close <" + el->getName() + "> with mismatching id"); #endif elStack.pop_back(); iterStack.pop_back(); attributeIndex = 1000; // We could check that id matches current element } void XmlDecode::rewindAttributes(void) { attributeIndex = -1; } uint4 XmlDecode::getNextAttributeId(void) { const Element *el = elStack.back(); int4 nextIndex = attributeIndex + 1; if (nextIndex < el->getNumAttributes()) { attributeIndex = nextIndex; return AttributeId::find(el->getAttributeName(attributeIndex),scope); } return 0; } uint4 XmlDecode::getIndexedAttributeId(const AttributeId &attribId) { const Element *el = elStack.back(); if (attributeIndex < 0 || attributeIndex >= el->getNumAttributes()) return ATTRIB_UNKNOWN.getId(); // For XML, the index is encoded directly in the attribute name const string &attribName(el->getAttributeName(attributeIndex)); // Does the name start with desired attribute base name? if (0 != attribName.compare(0,attribId.getName().size(),attribId.getName())) return ATTRIB_UNKNOWN.getId(); uint4 val = 0; istringstream s(attribName.substr(attribId.getName().size())); // Strip off the base name s >> dec >> val; // Decode the remaining decimal integer (starting at 1) if (val == 0) throw LowlevelError("Bad indexed attribute: " + attribId.getName()); return attribId.getId() + (val-1); } /// \brief Find the attribute index, within the given element, for the given name /// /// Run through the attributes of the element until we find the one matching the name, /// or throw an exception otherwise. /// \param el is the given element to search /// \param attribName is the attribute name to search for /// \return the matching attribute index int4 XmlDecode::findMatchingAttribute(const Element *el,const string &attribName) { for(int4 i=0;igetNumAttributes();++i) { if (el->getAttributeName(i) == attribName) return i; } throw DecoderError("Attribute missing: " + attribName); } bool XmlDecode::readBool(void) { const Element *el = elStack.back(); return xml_readbool(el->getAttributeValue(attributeIndex)); } bool XmlDecode::readBool(const AttributeId &attribId) { const Element *el = elStack.back(); if (attribId == ATTRIB_CONTENT) return xml_readbool(el->getContent()); int4 index = findMatchingAttribute(el, attribId.getName()); return xml_readbool(el->getAttributeValue(index)); } intb XmlDecode::readSignedInteger(void) { const Element *el = elStack.back(); intb res = 0; istringstream s2(el->getAttributeValue(attributeIndex)); s2.unsetf(ios::dec | ios::hex | ios::oct); s2 >> res; return res; } intb XmlDecode::readSignedInteger(const AttributeId &attribId) { const Element *el = elStack.back(); intb res = 0; if (attribId == ATTRIB_CONTENT) { istringstream s(el->getContent()); s.unsetf(ios::dec | ios::hex | ios::oct); s >> res; } else { int4 index = findMatchingAttribute(el, attribId.getName()); istringstream s(el->getAttributeValue(index)); s.unsetf(ios::dec | ios::hex | ios::oct); s >> res; } return res; } intb XmlDecode::readSignedIntegerExpectString(const string &expect,intb expectval) { const Element *el = elStack.back(); const string &value( el->getAttributeValue(attributeIndex) ); if (value == expect) return expectval; istringstream s2(value); s2.unsetf(ios::dec | ios::hex | ios::oct); intb res = 0; s2 >> res; return res; } intb XmlDecode::readSignedIntegerExpectString(const AttributeId &attribId,const string &expect,intb expectval) { string value = readString(attribId); if (value == expect) return expectval; istringstream s2(value); s2.unsetf(ios::dec | ios::hex | ios::oct); intb res = 0; s2 >> res; return res; } uintb XmlDecode::readUnsignedInteger(void) { const Element *el = elStack.back(); uintb res = 0; istringstream s2(el->getAttributeValue(attributeIndex)); s2.unsetf(ios::dec | ios::hex | ios::oct); s2 >> res; return res; } uintb XmlDecode::readUnsignedInteger(const AttributeId &attribId) { const Element *el = elStack.back(); uintb res = 0; if (attribId == ATTRIB_CONTENT) { istringstream s(el->getContent()); s.unsetf(ios::dec | ios::hex | ios::oct); s >> res; } else { int4 index = findMatchingAttribute(el, attribId.getName()); istringstream s(el->getAttributeValue(index)); s.unsetf(ios::dec | ios::hex | ios::oct); s >> res; } return res; } string XmlDecode::readString(void) { const Element *el = elStack.back(); return el->getAttributeValue(attributeIndex); } string XmlDecode::readString(const AttributeId &attribId) { const Element *el = elStack.back(); if (attribId == ATTRIB_CONTENT) return el->getContent(); int4 index = findMatchingAttribute(el, attribId.getName()); return el->getAttributeValue(index); } AddrSpace *XmlDecode::readSpace(void) { const Element *el = elStack.back(); string nm = el->getAttributeValue(attributeIndex); AddrSpace *res = spcManager->getSpaceByName(nm); if (res == (AddrSpace *)0) throw DecoderError("Unknown address space name: "+nm); return res; } AddrSpace *XmlDecode::readSpace(const AttributeId &attribId) { const Element *el = elStack.back(); string nm; if (attribId == ATTRIB_CONTENT) { nm = el->getContent(); } else { int4 index = findMatchingAttribute(el, attribId.getName()); nm = el->getAttributeValue(index); } AddrSpace *res = spcManager->getSpaceByName(nm); if (res == (AddrSpace *)0) throw DecoderError("Unknown address space name: "+nm); return res; } OpCode XmlDecode::readOpcode(void) { const Element *el = elStack.back(); string nm = el->getAttributeValue(attributeIndex); OpCode opc = get_opcode(nm); if (opc == (OpCode)0) throw DecoderError("Bad encoded OpCode"); return opc; } OpCode XmlDecode::readOpcode(AttributeId &attribId) { const Element *el = elStack.back(); string nm; if (attribId == ATTRIB_CONTENT) { nm = el->getContent(); } else { int4 index = findMatchingAttribute(el, attribId.getName()); nm = el->getAttributeValue(index); } OpCode opc = get_opcode(nm); if (opc == (OpCode)0) throw DecoderError("Bad encoded OpCode"); return opc; } void XmlEncode::newLine(void) { if (!doFormatting) return; int numSpaces = depth * 2 + 1; if (numSpaces > MAX_SPACES) { numSpaces = MAX_SPACES; } outStream.write(spaces,numSpaces); } void XmlEncode::openElement(const ElementId &elemId) { if (tagStatus == tag_start) outStream << '>'; else tagStatus = tag_start; newLine(); outStream << '<' << elemId.getName(); depth += 1; } void XmlEncode::closeElement(const ElementId &elemId) { depth -= 1; if (tagStatus == tag_start) { outStream << "/>"; tagStatus = tag_stop; return; } if (tagStatus != tag_content) newLine(); else tagStatus = tag_stop; outStream << "'; } void XmlEncode::writeBool(const AttributeId &attribId,bool val) { if (attribId == ATTRIB_CONTENT) { // Special id indicating, text value if (tagStatus == tag_start) { outStream << '>'; } if (val) outStream << "true"; else outStream << "false"; tagStatus = tag_content; return; } a_v_b(outStream, attribId.getName(), val); } void XmlEncode::writeSignedInteger(const AttributeId &attribId,intb val) { if (attribId == ATTRIB_CONTENT) { // Special id indicating, text value if (tagStatus == tag_start) { outStream << '>'; } outStream << dec << val; tagStatus = tag_content; return; } a_v_i(outStream, attribId.getName(), val); } void XmlEncode::writeUnsignedInteger(const AttributeId &attribId,uintb val) { if (attribId == ATTRIB_CONTENT) { // Special id indicating, text value if (tagStatus == tag_start) { outStream << '>'; } outStream << hex << "0x" << val; tagStatus = tag_content; return; } a_v_u(outStream, attribId.getName(), val); } void XmlEncode::writeString(const AttributeId &attribId,const string &val) { if (attribId == ATTRIB_CONTENT) { // Special id indicating, text value if (tagStatus == tag_start) { outStream << '>'; } xml_escape(outStream, val.c_str()); tagStatus = tag_content; return; } a_v(outStream,attribId.getName(),val); } void XmlEncode::writeStringIndexed(const AttributeId &attribId,uint4 index,const string &val) { outStream << ' ' << attribId.getName() << dec << index + 1; outStream << "=\""; xml_escape(outStream,val.c_str()); outStream << "\""; } void XmlEncode::writeSpace(const AttributeId &attribId,const AddrSpace *spc) { if (attribId == ATTRIB_CONTENT) { // Special id indicating, text value if (tagStatus == tag_start) { outStream << '>'; } xml_escape(outStream, spc->getName().c_str()); tagStatus = tag_content; return; } a_v(outStream,attribId.getName(),spc->getName()); } void XmlEncode::writeOpcode(const AttributeId &attribId,OpCode opc) { const char *name = get_opname(opc); if (attribId == ATTRIB_CONTENT) { // Special id indicating, text value if (tagStatus == tag_start) { outStream << '>'; } outStream << name; tagStatus = tag_content; return; } outStream << ' ' << attribId.getName() << "=\""; outStream << name; outStream << "\""; } /// The integer is encoded, 7-bits per byte, starting with the most significant 7-bits. /// The integer is decode from the \e current position, and the position is advanced. /// \param len is the number of bytes to extract uint8 PackedDecode::readInteger(int4 len) { uint8 res = 0; while(len > 0) { res <<= RAWDATA_BITSPERBYTE; res |= (getNextByte(curPos) & RAWDATA_MASK); len -= 1; } return res; } /// The \e current position is reset to the start of the current open element. Attributes are scanned /// and skipped until the attribute matching the given id is found. The \e current position is set to the /// start of the matching attribute, in preparation for one of the read*() methods. /// If the id is not found an exception is thrown. /// \param attribId is the attribute id to scan for. void PackedDecode::findMatchingAttribute(const AttributeId &attribId) { curPos = startPos; for(;;) { uint1 header1 = getByte(curPos); if ((header1 & HEADER_MASK) != ATTRIBUTE) break; uint4 id = header1 & ELEMENTID_MASK; if ((header1 & HEADEREXTEND_MASK) != 0) { id <<= RAWDATA_BITSPERBYTE; id |= (getBytePlus1(curPos) & RAWDATA_MASK); } if (attribId.getId() == id) return; // Found it skipAttribute(); } throw DecoderError("Attribute " + attribId.getName() + " is not present"); } /// The attribute at the \e current position is scanned enough to determine its length, and the position /// is advanced to the following byte. void PackedDecode::skipAttribute(void) { uint1 header1 = getNextByte(curPos); // Attribute header if ((header1 & HEADEREXTEND_MASK) != 0) getNextByte(curPos); // Extra byte for extended id uint1 typeByte = getNextByte(curPos); // Type (and length) byte uint1 attribType = typeByte >> TYPECODE_SHIFT; if (attribType == TYPECODE_BOOLEAN || attribType == TYPECODE_SPECIALSPACE) return; // has no additional data uint4 length = readLengthCode(typeByte); // Length of data in bytes if (attribType == TYPECODE_STRING) { length = readInteger(length); // Read length field to get final length of string } advancePosition(curPos, length); // Skip -length- data } /// This assumes the header and \b type \b byte have been read. Decode type and length info and finish /// skipping over the attribute so that the next call to getNextAttributeId() is on cut. /// \param typeByte is the previously scanned type byte void PackedDecode::skipAttributeRemaining(uint1 typeByte) { uint1 attribType = typeByte >> TYPECODE_SHIFT; if (attribType == TYPECODE_BOOLEAN || attribType == TYPECODE_SPECIALSPACE) return; // has no additional data uint4 length = readLengthCode(typeByte); // Length of data in bytes if (attribType == TYPECODE_STRING) { length = readInteger(length); // Read length field to get final length of string } advancePosition(curPos, length); // Skip -length- data } /// Set decoder to beginning of the stream. Add padding to end of the stream. /// \param bufPos is the number of bytes used by the last input buffer void PackedDecode::endIngest(int4 bufPos) { endPos.seqIter = inStream.begin(); // Set position to beginning of stream if (endPos.seqIter != inStream.end()) { endPos.current = (*endPos.seqIter).start; endPos.end = (*endPos.seqIter).end; // Make sure there is at least one character after ingested buffer if (bufPos == BUFFER_SIZE) { // Last buffer was entirely filled uint1 *endbuf = new uint1[1]; // Add one more buffer inStream.emplace_back(endbuf,endbuf + 1); bufPos = 0; } uint1 *buf = inStream.back().start; buf[bufPos] = ELEMENT_END; } } PackedDecode::~PackedDecode(void) { list::const_iterator iter; for(iter=inStream.begin();iter!=inStream.end();++iter) { delete [] (*iter).start; } } void PackedDecode::ingestStream(istream &s) { int4 gcount = 0; while(s.peek() > 0) { uint1 *buf = allocateNextInputBuffer(1); s.get((char *)buf,BUFFER_SIZE+1,'\0'); gcount = s.gcount(); } endIngest(gcount); } uint4 PackedDecode::peekElement(void) { uint1 header1 = getByte(endPos); if ((header1 & HEADER_MASK) != ELEMENT_START) return 0; uint4 id = header1 & ELEMENTID_MASK; if ((header1 & HEADEREXTEND_MASK) != 0) { id <<= RAWDATA_BITSPERBYTE; id |= (getBytePlus1(endPos) & RAWDATA_MASK); } return id; } uint4 PackedDecode::openElement(void) { uint1 header1 = getByte(endPos); if ((header1 & HEADER_MASK) != ELEMENT_START) return 0; getNextByte(endPos); uint4 id = header1 & ELEMENTID_MASK; if ((header1 & HEADEREXTEND_MASK) != 0) { id <<= RAWDATA_BITSPERBYTE; id |= (getNextByte(endPos) & RAWDATA_MASK); } startPos = endPos; curPos = endPos; header1 = getByte(curPos); while((header1 & HEADER_MASK) == ATTRIBUTE) { skipAttribute(); header1 = getByte(curPos); } endPos = curPos; curPos = startPos; attributeRead = true; // "Last attribute was read" is vacuously true return id; } uint4 PackedDecode::openElement(const ElementId &elemId) { uint4 id = openElement(); if (id != elemId.getId()) { if (id == 0) throw DecoderError("Expecting <" + elemId.getName() + "> but did not scan an element"); throw DecoderError("Expecting <" + elemId.getName() + "> but id did not match"); } return id; } void PackedDecode::closeElement(uint4 id) { uint1 header1 = getNextByte(endPos); if ((header1 & HEADER_MASK) != ELEMENT_END) throw DecoderError("Expecting element close"); uint4 closeId = header1 & ELEMENTID_MASK; if ((header1 & HEADEREXTEND_MASK) != 0) { closeId <<= RAWDATA_BITSPERBYTE; closeId |= (getNextByte(endPos) & RAWDATA_MASK); } if (id != closeId) throw DecoderError("Did not see expected closing element"); } void PackedDecode::closeElementSkipping(uint4 id) { vector idstack; idstack.push_back(id); do { uint1 header1 = getByte(endPos) & HEADER_MASK; if (header1 == ELEMENT_END) { closeElement(idstack.back()); idstack.pop_back(); } else if (header1 == ELEMENT_START) { idstack.push_back(openElement()); } else throw DecoderError("Corrupt stream"); } while(!idstack.empty()); } void PackedDecode::rewindAttributes(void) { curPos = startPos; attributeRead = true; } uint4 PackedDecode::getNextAttributeId(void) { if (!attributeRead) skipAttribute(); uint1 header1 = getByte(curPos); if ((header1 & HEADER_MASK) != ATTRIBUTE) return 0; uint4 id = header1 & ELEMENTID_MASK; if ((header1 & HEADEREXTEND_MASK) != 0) { id <<= RAWDATA_BITSPERBYTE; id |= (getBytePlus1(curPos) & RAWDATA_MASK); } attributeRead = false; return id; } uint4 PackedDecode::getIndexedAttributeId(const AttributeId &attribId) { return ATTRIB_UNKNOWN.getId(); // PackedDecode never needs to reinterpret an attribute } bool PackedDecode::readBool(void) { uint1 header1 = getNextByte(curPos); if ((header1 & HEADEREXTEND_MASK)!=0) getNextByte(curPos); uint1 typeByte = getNextByte(curPos); attributeRead = true; if ((typeByte >> TYPECODE_SHIFT) != TYPECODE_BOOLEAN) throw DecoderError("Expecting boolean attribute"); return ((typeByte & LENGTHCODE_MASK) != 0); } bool PackedDecode::readBool(const AttributeId &attribId) { findMatchingAttribute(attribId); bool res = readBool(); curPos = startPos; return res; } intb PackedDecode::readSignedInteger(void) { uint1 header1 = getNextByte(curPos); if ((header1 & HEADEREXTEND_MASK)!=0) getNextByte(curPos); uint1 typeByte = getNextByte(curPos); uint4 typeCode = typeByte >> TYPECODE_SHIFT; intb res; if (typeCode == TYPECODE_SIGNEDINT_POSITIVE) { res = readInteger(readLengthCode(typeByte)); } else if (typeCode == TYPECODE_SIGNEDINT_NEGATIVE) { res = readInteger(readLengthCode(typeByte)); res = -res; } else { skipAttributeRemaining(typeByte); attributeRead = true; throw DecoderError("Expecting signed integer attribute"); } attributeRead = true; return res; } intb PackedDecode::readSignedInteger(const AttributeId &attribId) { findMatchingAttribute(attribId); intb res = readSignedInteger(); curPos = startPos; return res; } intb PackedDecode::readSignedIntegerExpectString(const string &expect,intb expectval) { intb res; Position tmpPos = curPos; uint1 header1 = getNextByte(tmpPos); if ((header1 & HEADEREXTEND_MASK)!=0) getNextByte(tmpPos); uint1 typeByte = getNextByte(tmpPos); uint4 typeCode = typeByte >> TYPECODE_SHIFT; if (typeCode == TYPECODE_STRING) { string val = readString(); if (val != expect) { ostringstream s; s << "Expecting string \"" << expect << "\" but read \"" << val << "\""; throw DecoderError(s.str()); } res = expectval; } else { res = readSignedInteger(); } return res; } intb PackedDecode::readSignedIntegerExpectString(const AttributeId &attribId,const string &expect,intb expectval) { findMatchingAttribute(attribId); intb res = readSignedIntegerExpectString(expect,expectval); curPos = startPos; return res; } uintb PackedDecode::readUnsignedInteger(void) { uint1 header1 = getNextByte(curPos); if ((header1 & HEADEREXTEND_MASK)!=0) getNextByte(curPos); uint1 typeByte = getNextByte(curPos); uint4 typeCode = typeByte >> TYPECODE_SHIFT; uintb res; if (typeCode == TYPECODE_UNSIGNEDINT) { res = readInteger(readLengthCode(typeByte)); } else { skipAttributeRemaining(typeByte); attributeRead = true; throw DecoderError("Expecting unsigned integer attribute"); } attributeRead = true; return res; } uintb PackedDecode::readUnsignedInteger(const AttributeId &attribId) { findMatchingAttribute(attribId); uintb res = readUnsignedInteger(); curPos = startPos; return res; } string PackedDecode::readString(void) { uint1 header1 = getNextByte(curPos); if ((header1 & HEADEREXTEND_MASK)!=0) getNextByte(curPos); uint1 typeByte = getNextByte(curPos); uint4 typeCode = typeByte >> TYPECODE_SHIFT; if (typeCode != TYPECODE_STRING) { skipAttributeRemaining(typeByte); attributeRead = true; throw DecoderError("Expecting string attribute"); } int4 length = readLengthCode(typeByte); length = readInteger(length); attributeRead = true; int4 curLen = curPos.end - curPos.current; if (curLen >= length) { string res((const char *)curPos.current,length); advancePosition(curPos, length); return res; } string res((const char *)curPos.current,curLen); length -= curLen; advancePosition(curPos, curLen); while(length > 0) { curLen = curPos.end - curPos.current; if (curLen > length) curLen = length; res.append((const char *)curPos.current,curLen); length -= curLen; advancePosition(curPos, curLen); } return res; } string PackedDecode::readString(const AttributeId &attribId) { findMatchingAttribute(attribId); string res = readString(); curPos = startPos; return res; } AddrSpace *PackedDecode::readSpace(void) { uint1 header1 = getNextByte(curPos); if ((header1 & HEADEREXTEND_MASK)!=0) getNextByte(curPos); uint1 typeByte = getNextByte(curPos); uint4 typeCode = typeByte >> TYPECODE_SHIFT; int4 res; AddrSpace *spc; if (typeCode == TYPECODE_ADDRESSSPACE) { res = readInteger(readLengthCode(typeByte)); spc = spcManager->getSpace(res); if (spc == (AddrSpace *)0) throw DecoderError("Unknown address space index"); } else if (typeCode == TYPECODE_SPECIALSPACE) { uint4 specialCode = readLengthCode(typeByte); if (specialCode == SPECIALSPACE_STACK) spc = spcManager->getStackSpace(); else if (specialCode == SPECIALSPACE_JOIN) { spc = spcManager->getJoinSpace(); } else { throw DecoderError("Cannot marshal special address space"); } } else { skipAttributeRemaining(typeByte); attributeRead = true; throw DecoderError("Expecting space attribute"); } attributeRead = true; return spc; } AddrSpace *PackedDecode::readSpace(const AttributeId &attribId) { findMatchingAttribute(attribId); AddrSpace *res = readSpace(); curPos = startPos; return res; } OpCode PackedDecode::readOpcode(void) { int4 val = (int4)readSignedInteger(); if (val < 0 || val >= CPUI_MAX) throw DecoderError("Bad encoded OpCode"); return (OpCode)val; } OpCode PackedDecode::readOpcode(AttributeId &attribId) { findMatchingAttribute(attribId); OpCode opc = readOpcode(); curPos = startPos; return opc; } /// The value is either an unsigned integer, an address space index, or (the absolute value of) a signed integer. /// A type header is passed in with the particular type code for the value already filled in. /// This method then fills in the length code, outputs the full type header and the encoded bytes of the integer. /// \param typeByte is the type header /// \param val is the integer value void PackedEncode::writeInteger(uint1 typeByte,uint8 val) { uint1 lenCode; int4 sa; if (val == 0) { lenCode = 0; sa = -1; } else if (val < 0x800000000) { if (val < 0x200000) { if (val < 0x80) { lenCode = 1; // 7-bits sa = 0; } else if (val < 0x4000) { lenCode = 2; // 14-bits sa = RAWDATA_BITSPERBYTE; } else { lenCode = 3; // 21-bits sa = 2*RAWDATA_BITSPERBYTE; } } else if (val < 0x10000000) { lenCode = 4; // 28-bits sa = 3*RAWDATA_BITSPERBYTE; } else { lenCode = 5; // 35-bits sa = 4*RAWDATA_BITSPERBYTE; } } else if (val < 0x2000000000000) { if (val < 0x40000000000) { lenCode = 6; sa = 5*RAWDATA_BITSPERBYTE; } else { lenCode = 7; sa = 6*RAWDATA_BITSPERBYTE; } } else { if (val < 0x100000000000000) { lenCode = 8; sa = 7*RAWDATA_BITSPERBYTE; } else if (val < 0x8000000000000000) { lenCode = 9; sa = 8*RAWDATA_BITSPERBYTE; } else { lenCode = 10; sa = 9*RAWDATA_BITSPERBYTE; } } typeByte |= lenCode; outStream.put(typeByte); for(;sa >= 0;sa -= RAWDATA_BITSPERBYTE) { uint1 piece = (val >> sa) & RAWDATA_MASK; piece |= RAWDATA_MARKER; outStream.put(piece); } } void PackedEncode::openElement(const ElementId &elemId) { writeHeader(ELEMENT_START, elemId.getId()); } void PackedEncode::closeElement(const ElementId &elemId) { writeHeader(ELEMENT_END, elemId.getId()); } void PackedEncode::writeBool(const AttributeId &attribId,bool val) { writeHeader(ATTRIBUTE, attribId.getId()); uint1 typeByte = val ? ((TYPECODE_BOOLEAN << TYPECODE_SHIFT) | 1) : (TYPECODE_BOOLEAN << TYPECODE_SHIFT); outStream.put(typeByte); } void PackedEncode::writeSignedInteger(const AttributeId &attribId,intb val) { writeHeader(ATTRIBUTE, attribId.getId()); uint1 typeByte; uint8 num; if (val < 0) { typeByte = (TYPECODE_SIGNEDINT_NEGATIVE << TYPECODE_SHIFT); num = -val; } else { typeByte = (TYPECODE_SIGNEDINT_POSITIVE << TYPECODE_SHIFT); num = val; } writeInteger(typeByte, num); } void PackedEncode::writeUnsignedInteger(const AttributeId &attribId,uintb val) { writeHeader(ATTRIBUTE, attribId.getId()); writeInteger((TYPECODE_UNSIGNEDINT << TYPECODE_SHIFT),val); } void PackedEncode::writeString(const AttributeId &attribId,const string &val) { uint8 length = val.length(); writeHeader(ATTRIBUTE, attribId.getId()); writeInteger((TYPECODE_STRING << TYPECODE_SHIFT), length); outStream.write(val.c_str(), length); } void PackedEncode::writeStringIndexed(const AttributeId &attribId,uint4 index,const string &val) { uint8 length = val.length(); writeHeader(ATTRIBUTE, attribId.getId() + index); writeInteger((TYPECODE_STRING << TYPECODE_SHIFT), length); outStream.write(val.c_str(), length); } void PackedEncode::writeSpace(const AttributeId &attribId,const AddrSpace *spc) { writeHeader(ATTRIBUTE, attribId.getId()); switch(spc->getType()) { case IPTR_FSPEC: outStream.put((TYPECODE_SPECIALSPACE << TYPECODE_SHIFT) | SPECIALSPACE_FSPEC); break; case IPTR_IOP: outStream.put((TYPECODE_SPECIALSPACE << TYPECODE_SHIFT) | SPECIALSPACE_IOP); break; case IPTR_JOIN: outStream.put((TYPECODE_SPECIALSPACE << TYPECODE_SHIFT) | SPECIALSPACE_JOIN); break; case IPTR_SPACEBASE: if (spc->isFormalStackSpace()) outStream.put((TYPECODE_SPECIALSPACE << TYPECODE_SHIFT) | SPECIALSPACE_STACK); else outStream.put((TYPECODE_SPECIALSPACE << TYPECODE_SHIFT) | SPECIALSPACE_SPACEBASE); // A secondary register offset space break; default: uint8 spcId = spc->getIndex(); writeInteger((TYPECODE_ADDRESSSPACE << TYPECODE_SHIFT), spcId); break; } } void PackedEncode::writeOpcode(const AttributeId &attribId,OpCode opc) { writeHeader(ATTRIBUTE, attribId.getId()); writeInteger((TYPECODE_SIGNEDINT_POSITIVE << TYPECODE_SHIFT), opc); } // Common attributes. Attributes with multiple uses AttributeId ATTRIB_CONTENT = AttributeId("XMLcontent",1); AttributeId ATTRIB_ALIGN = AttributeId("align",2); AttributeId ATTRIB_BIGENDIAN = AttributeId("bigendian",3); AttributeId ATTRIB_CONSTRUCTOR = AttributeId("constructor",4); AttributeId ATTRIB_DESTRUCTOR = AttributeId("destructor",5); AttributeId ATTRIB_EXTRAPOP = AttributeId("extrapop",6); AttributeId ATTRIB_FORMAT = AttributeId("format",7); AttributeId ATTRIB_HIDDENRETPARM = AttributeId("hiddenretparm",8); AttributeId ATTRIB_ID = AttributeId("id",9); AttributeId ATTRIB_INDEX = AttributeId("index",10); AttributeId ATTRIB_INDIRECTSTORAGE = AttributeId("indirectstorage",11); AttributeId ATTRIB_METATYPE = AttributeId("metatype",12); AttributeId ATTRIB_MODEL = AttributeId("model",13); AttributeId ATTRIB_NAME = AttributeId("name",14); AttributeId ATTRIB_NAMELOCK = AttributeId("namelock",15); AttributeId ATTRIB_OFFSET = AttributeId("offset",16); AttributeId ATTRIB_READONLY = AttributeId("readonly",17); AttributeId ATTRIB_REF = AttributeId("ref",18); AttributeId ATTRIB_SIZE = AttributeId("size",19); AttributeId ATTRIB_SPACE = AttributeId("space",20); AttributeId ATTRIB_THISPTR = AttributeId("thisptr",21); AttributeId ATTRIB_TYPE = AttributeId("type",22); AttributeId ATTRIB_TYPELOCK = AttributeId("typelock",23); AttributeId ATTRIB_VAL = AttributeId("val",24); AttributeId ATTRIB_VALUE = AttributeId("value",25); AttributeId ATTRIB_WORDSIZE = AttributeId("wordsize",26); AttributeId ATTRIB_STORAGE = AttributeId("storage",149); AttributeId ATTRIB_STACKSPILL = AttributeId("stackspill",150); AttributeId ATTRIB_UNKNOWN = AttributeId("XMLunknown",159); // Number serves as next open index ElementId ELEM_DATA = ElementId("data",1); ElementId ELEM_INPUT = ElementId("input",2); ElementId ELEM_OFF = ElementId("off",3); ElementId ELEM_OUTPUT = ElementId("output",4); ElementId ELEM_RETURNADDRESS = ElementId("returnaddress",5); ElementId ELEM_SYMBOL = ElementId("symbol",6); ElementId ELEM_TARGET = ElementId("target",7); ElementId ELEM_VAL = ElementId("val",8); ElementId ELEM_VALUE = ElementId("value",9); ElementId ELEM_VOID = ElementId("void",10); ElementId ELEM_UNKNOWN = ElementId("XMLunknown",289); // Number serves as next open index } // End namespace ghidra ================================================ FILE: pypcode/sleigh/marshal.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __MARSHAL_HH__ #define __MARSHAL_HH__ #include "xml.hh" #include "opcodes.hh" #include #include namespace ghidra { using std::list; using std::unordered_map; /// \brief An annotation for a data element to being transferred to/from a stream /// /// This class parallels the XML concept of an \b attribute on an element. An AttributeId describes /// a particular piece of data associated with an ElementId. The defining characteristic of the AttributeId is /// its name. Internally this name is associated with an integer id. The name (and id) uniquely determine /// the data being labeled, within the context of a specific ElementId. Within this context, an AttributeId labels either /// - An unsigned integer /// - A signed integer /// - A boolean value /// - A string /// /// The same AttributeId can be used to label a different type of data when associated with a different ElementId. class AttributeId { static unordered_map lookupAttributeId; ///< A map of AttributeId names to their associated id static vector &getList(void); ///< Retrieve the list of static AttributeId string name; ///< The name of the attribute uint4 id; ///< The (internal) id of the attribute public: AttributeId(const string &nm,uint4 i,int4 scope=0); ///< Construct given a name and id const string &getName(void) const { return name; } ///< Get the attribute's name uint4 getId(void) const { return id; } ///< Get the attribute's id bool operator==(const AttributeId &op2) const { return (id == op2.id); } ///< Test equality with another AttributeId static uint4 find(const string &nm,int4 scope); ///< Find the id associated with a specific attribute name static void initialize(void); ///< Populate a hashtable with all AttributeId objects friend bool operator==(uint4 id,const AttributeId &op2) { return (id == op2.id); } ///< Test equality of a raw integer id with an AttributeId friend bool operator==(const AttributeId &op1,uint4 id) { return (op1.id == id); } ///< Test equality of an AttributeId with a raw integer id }; /// \brief An annotation for a specific collection of hierarchical data /// /// This class parallels the XML concept of an \b element. An ElementId describes a collection of data, where each /// piece is annotated by a specific AttributeId. In addition, each ElementId can contain zero or more \e child /// ElementId objects, forming a hierarchy of annotated data. Each ElementId has a name, which is unique at least /// within the context of its parent ElementId. Internally this name is associated with an integer id. A special /// AttributeId ATTRIB_CONTENT is used to label the XML element's text content, which is traditionally not labeled /// as an attribute. class ElementId { static unordered_map lookupElementId; ///< A map of ElementId names to their associated id static vector &getList(void); ///< Retrieve the list of static ElementId string name; ///< The name of the element uint4 id; ///< The (internal) id of the attribute public: ElementId(const string &nm,uint4 i,int4 scope=0); ///< Construct given a name and id const string &getName(void) const { return name; } ///< Get the element's name uint4 getId(void) const { return id; } ///< Get the element's id bool operator==(const ElementId &op2) const { return (id == op2.id); } ///< Test equality with another ElementId static uint4 find(const string &nm,int4 scope); ///< Find the id associated with a specific element name static void initialize(void); ///< Populate a hashtable with all ElementId objects friend bool operator==(uint4 id,const ElementId &op2) { return (id == op2.id); } ///< Test equality of a raw integer id with an ElementId friend bool operator==(const ElementId &op1,uint4 id) { return (op1.id == id); } ///< Test equality of an ElementId with a raw integer id friend bool operator!=(uint4 id,const ElementId &op2) { return (id != op2.id); } ///< Test inequality of a raw integer id with an ElementId friend bool operator!=(const ElementId &op1,uint4 id) { return (op1.id != id); } ///< Test inequality of an ElementId with a raw integer id }; class AddrSpace; class AddrSpaceManager; /// \brief A class for reading structured data from a stream /// /// All data is loosely structured as with an XML document. A document contains a nested set /// of \b elements, with labels corresponding to the ElementId class. A single element can hold /// zero or more attributes and zero or more child elements. An attribute holds a primitive /// data element (bool, integer, string) and is labeled by an AttributeId. The document is traversed /// using a sequence of openElement() and closeElement() calls, intermixed with read*() calls to extract /// the data. The elements are traversed in a depth first order. Attributes within an element can /// be traversed in order using repeated calls to the getNextAttributeId() method, followed by a calls to /// one of the read*(void) methods to extract the data. Alternately a read*(AttributeId) call can be used /// to extract data for an attribute known to be in the element. There is a special content attribute /// whose data can be extracted using a read*(AttributeId) call that is passed the special ATTRIB_CONTENT id. /// This attribute will not be traversed by getNextAttribute(). class Decoder { protected: const AddrSpaceManager *spcManager; ///< Manager for decoding address space attributes public: Decoder(const AddrSpaceManager *spc) { spcManager = spc; } ///< Base constructor const AddrSpaceManager *getAddrSpaceManager(void) const { return spcManager; } ///< Get the manager used for address space decoding virtual ~Decoder(void) {} ///< Destructor /// \brief Prepare to decode a given stream /// /// Called once before any decoding. Currently this is assumed to make an internal copy of the stream data, /// i.e. the input stream is cleared before any decoding takes place. /// \param s is the given input stream to be decode /// \return \b true if the stream was fully ingested virtual void ingestStream(istream &s)=0; /// \brief Peek at the next child element of the current parent, without traversing in (opening) it. /// /// The element id is returned, which can be compared to ElementId labels. /// If there are no remaining child elements to traverse, 0 is returned. /// \return the element id or 0 virtual uint4 peekElement(void)=0; /// \brief Open (traverse into) the next child element of the current parent. /// /// The child becomes the current parent. The list of attributes is initialized for use with getNextAttributeId. /// \return the id of the child element virtual uint4 openElement(void)=0; /// \brief Open (traverse into) the next child element, which must be of a specific type /// /// The child becomes the current parent, and its attributes are initialized for use with getNextAttributeId. /// The child must match the given element id or an exception is thrown. /// \param elemId is the given element id to match /// \return the id of the child element virtual uint4 openElement(const ElementId &elemId)=0; /// \brief Close the current element /// /// The data for the current element is considered fully processed. If the element has additional children, /// an exception is thrown. The stream must indicate the end of the element in some way. /// \param id is the id of the element to close (which must be the current element) virtual void closeElement(uint4 id)=0; /// \brief Close the current element, skipping any child elements that have not yet been parsed /// /// This closes the given element, which must be current. If there are child elements that have not been /// parsed, this is not considered an error, and they are skipped over in the parse. /// \param id is the id of the element to close (which must be the current element) virtual void closeElementSkipping(uint4 id)=0; /// \brief Get the next attribute id for the current element /// /// Attributes are automatically set up for traversal using this method, when the element is opened. /// If all attributes have been traversed (or there are no attributes), 0 is returned. /// \return the id of the next attribute or 0 virtual uint4 getNextAttributeId(void)=0; /// \brief Get the id for the (current) attribute, assuming it is indexed /// /// Assuming the previous call to getNextAttributeId() returned the id of ATTRIB_UNKNOWN, /// reinterpret the attribute as being an indexed form of the given attribute. If the attribute /// matches, return this indexed id, otherwise return ATTRIB_UNKNOWN. /// \param attribId is the attribute being indexed /// \return the indexed id or ATTRIB_UNKNOWN virtual uint4 getIndexedAttributeId(const AttributeId &attribId)=0; /// \brief Reset attribute traversal for the current element /// /// Attributes for a single element can be traversed more than once using the getNextAttributeId method. virtual void rewindAttributes(void)=0; /// \brief Parse the current attribute as a boolean value /// /// The last attribute, as returned by getNextAttributeId, is treated as a boolean, and its value is returned. /// \return the boolean value associated with the current attribute. virtual bool readBool(void)=0; /// \brief Find and parse a specific attribute in the current element as a boolean value /// /// The set of attributes for the current element is searched for a match to the given attribute id. /// This attribute is then parsed as a boolean and its value returned. /// If there is no attribute matching the id, an exception is thrown. /// Parsing via getNextAttributeId is reset. /// \param attribId is the specific attribute id to match /// \return the boolean value virtual bool readBool(const AttributeId &attribId)=0; /// \brief Parse the current attribute as a signed integer value /// /// The last attribute, as returned by getNextAttributeId, is treated as a signed integer, and its value is returned. /// \return the signed integer value associated with the current attribute. virtual intb readSignedInteger(void)=0; /// \brief Find and parse a specific attribute in the current element as a signed integer /// /// The set of attributes for the current element is searched for a match to the given attribute id. /// This attribute is then parsed as a signed integer and its value returned. /// If there is no attribute matching the id, an exception is thrown. /// Parsing via getNextAttributeId is reset. /// \param attribId is the specific attribute id to match /// \return the signed integer value virtual intb readSignedInteger(const AttributeId &attribId)=0; /// \brief Parse the current attribute as either a signed integer value or a string. /// /// If the attribute is an integer, its value is returned. If the attribute is a string, it must match an /// expected string passed to the method, and a predetermined integer value associated with the string is returned. /// If the attribute neither matches the expected string nor is an integer, the return value is undefined. /// \param expect is the string value to expect if the attribute is encoded as a string /// \param expectval is the integer value to return if the attribute matches the expected string /// \return the encoded integer or the integer value associated with the expected string virtual intb readSignedIntegerExpectString(const string &expect,intb expectval)=0; /// \brief Find and parse a specific attribute in the current element as either a signed integer or a string. /// /// If the attribute is an integer, its value is parsed and returned. /// If the attribute is encoded as a string, it must match an expected string passed to this method. /// In this case, a predetermined integer value is passed back, indicating a matching string was parsed. /// If the attribute neither matches the expected string nor is an integer, the return value is undefined. /// If there is no attribute matching the id, an exception is thrown. /// \param attribId is the specific attribute id to match /// \param expect is the string to expect, if the attribute is not encoded as an integer /// \param expectval is the integer value to return if the attribute matches the expected string /// \return the encoded integer or the integer value associated with the expected string virtual intb readSignedIntegerExpectString(const AttributeId &attribId,const string &expect,intb expectval)=0; /// \brief Parse the current attribute as an unsigned integer value /// /// The last attribute, as returned by getNextAttributeId, is treated as an unsigned integer, and its value is returned. /// \return the unsigned integer value associated with the current attribute. virtual uintb readUnsignedInteger(void)=0; /// \brief Find and parse a specific attribute in the current element as an unsigned integer /// /// The set of attributes for the current element is searched for a match to the given attribute id. /// This attribute is then parsed as an unsigned integer and its value returned. /// If there is no attribute matching the id, an exception is thrown. /// Parsing via getNextAttributeId is reset. /// \param attribId is the specific attribute id to match /// \return the unsigned integer value virtual uintb readUnsignedInteger(const AttributeId &attribId)=0; /// \brief Parse the current attribute as a string /// /// The last attribute, as returned by getNextAttributeId, is returned as a string. /// \return the string associated with the current attribute. virtual string readString(void)=0; /// \brief Find the specific attribute in the current element and return it as a string /// /// The set of attributes for the current element is searched for a match to the given attribute id. /// This attribute is then returned as a string. If there is no attribute matching the id, and exception is thrown. /// Parse via getNextAttributeId is reset. /// \param attribId is the specific attribute id to match /// \return the string associated with the attribute virtual string readString(const AttributeId &attribId)=0; /// \brief Parse the current attribute as an address space /// /// The last attribute, as returned by getNextAttributeId, is returned as an address space. /// \return the address space associated with the current attribute. virtual AddrSpace *readSpace(void)=0; /// \brief Find the specific attribute in the current element and return it as an address space /// /// Search attributes from the current element for a match to the given attribute id. /// Return this attribute as an address space. If there is no attribute matching the id, an exception is thrown. /// Parse via getNextAttributeId is reset. /// \param attribId is the specific attribute id to match /// \return the address space associated with the attribute virtual AddrSpace *readSpace(const AttributeId &attribId)=0; /// \brief Parse the current attribute as a p-code OpCode /// /// The last attribute, as returned by getNextAttributeId, is returned as an OpCode. /// \return the OpCode associated with the current attribute virtual OpCode readOpcode(void)=0; /// \brief Find the specific attribute in the current element and return it as an OpCode /// /// Search attributes from the current element for a match to the given attribute id. /// Return this attribute as an OpCode. If there is no matching attribute id, an exception is thrown. /// Parse via getNextAttributeId is reset. /// \param attribId is the specific attribute id to match /// \return the OpCode associated with the attribute virtual OpCode readOpcode(AttributeId &attribId)=0; /// \brief Skip parsing of the next element /// /// The element skipped is the one that would be opened by the next call to openElement. void skipElement(void) { uint4 elemId = openElement(); closeElementSkipping(elemId); } }; /// \brief A class for writing structured data to a stream /// /// The resulting encoded data is structured similarly to an XML document. The document contains a nested set /// of \b elements, with labels corresponding to the ElementId class. A single element can hold /// zero or more attributes and zero or more child elements. An \b attribute holds a primitive /// data element (bool, integer, string) and is labeled by an AttributeId. The document is written /// using a sequence of openElement() and closeElement() calls, intermixed with write*() calls to encode /// the data primitives. All primitives written using a write*() call are associated with current open element, /// and all write*() calls for one element must come before opening any child element. /// The traditional XML element text content can be written using the special ATTRIB_CONTENT AttributeId, which /// must be the last write*() call associated with the specific element. class Encoder { public: virtual ~Encoder(void) {} ///< Destructor /// \brief Begin a new element in the encoding /// /// The element will have the given ElementId annotation and becomes the \e current element. /// \param elemId is the given ElementId annotation virtual void openElement(const ElementId &elemId)=0; /// \brief End the current element in the encoding /// /// The current element must match the given annotation or an exception is thrown. /// \param elemId is the given (expected) annotation for the current element virtual void closeElement(const ElementId &elemId)=0; /// \brief Write an annotated boolean value into the encoding /// /// The boolean data is associated with the given AttributeId annotation and the current open element. /// \param attribId is the given AttributeId annotation /// \param val is boolean value to encode virtual void writeBool(const AttributeId &attribId,bool val)=0; /// \brief Write an annotated signed integer value into the encoding /// /// The integer is associated with the given AttributeId annotation and the current open element. /// \param attribId is the given AttributeId annotation /// \param val is the signed integer value to encode virtual void writeSignedInteger(const AttributeId &attribId,intb val)=0; /// \brief Write an annotated unsigned integer value into the encoding /// /// The integer is associated with the given AttributeId annotation and the current open element. /// \param attribId is the given AttributeId annotation /// \param val is the unsigned integer value to encode virtual void writeUnsignedInteger(const AttributeId &attribId,uintb val)=0; /// \brief Write an annotated string into the encoding /// /// The string is associated with the given AttributeId annotation and the current open element. /// \param attribId is the given AttributeId annotation /// \param val is the string to encode virtual void writeString(const AttributeId &attribId,const string &val)=0; /// \brief Write an annotated string, using an indexed attribute, into the encoding /// /// Multiple attributes with a shared name can be written to the same element by calling this method /// multiple times with a different \b index value. The encoding will use attribute ids up to the base id /// plus the maximum index passed in. Implementors must be careful to not use other attributes with ids /// bigger than the base id within the element taking the indexed attribute. /// \param attribId is the shared AttributeId /// \param index is the unique index to associated with the string /// \param val is the string to encode virtual void writeStringIndexed(const AttributeId &attribId,uint4 index,const string &val)=0; /// \brief Write an address space reference into the encoding /// /// The address space is associated with the given AttributeId annotation and the current open element. /// \param attribId is the given AttributeId annotation /// \param spc is the address space to encode virtual void writeSpace(const AttributeId &attribId,const AddrSpace *spc)=0; /// \brief Write a p-code operation opcode into the encoding, associating it with the given annotation /// /// \param attribId is the given annotation /// \param opc is the opcode virtual void writeOpcode(const AttributeId &attribId,OpCode opc)=0; }; /// \brief An XML based decoder /// /// The underlying transfer encoding is an XML document. The decoder can either be initialized with an /// existing Element as the root of the data to transfer, or the ingestStream() method can be invoked /// to read the XML document from an input stream, in which case the decoder manages the Document object. class XmlDecode : public Decoder { Document *document; ///< An ingested XML document, owned by \b this decoder const Element *rootElement; ///< The root XML element to be decoded vector elStack; ///< Stack of currently \e open elements vector iterStack; ///< Index of next child for each \e open element int4 attributeIndex; ///< Position of \e current attribute to parse (in \e current element) int4 scope; ///< Scope of element/attribute tags to look up int4 findMatchingAttribute(const Element *el,const string &attribName); public: XmlDecode(const AddrSpaceManager *spc,const Element *root,int4 sc=0) : Decoder(spc) { document = (Document *)0; rootElement = root; attributeIndex = -1; scope = sc; } ///< Constructor with preparsed root XmlDecode(const AddrSpaceManager *spc,int4 sc=0) : Decoder(spc) { document = (Document *)0; rootElement = (const Element *)0; attributeIndex = -1; scope=sc; } ///< Constructor for use with ingestStream const Element *getCurrentXmlElement(void) const { return elStack.back(); } ///< Get pointer to underlying XML element object virtual ~XmlDecode(void); virtual void ingestStream(istream &s); virtual uint4 peekElement(void); virtual uint4 openElement(void); virtual uint4 openElement(const ElementId &elemId); virtual void closeElement(uint4 id); virtual void closeElementSkipping(uint4 id); virtual void rewindAttributes(void); virtual uint4 getNextAttributeId(void); virtual uint4 getIndexedAttributeId(const AttributeId &attribId); virtual bool readBool(void); virtual bool readBool(const AttributeId &attribId); virtual intb readSignedInteger(void); virtual intb readSignedInteger(const AttributeId &attribId); virtual intb readSignedIntegerExpectString(const string &expect,intb expectval); virtual intb readSignedIntegerExpectString(const AttributeId &attribId,const string &expect,intb expectval); virtual uintb readUnsignedInteger(void); virtual uintb readUnsignedInteger(const AttributeId &attribId); virtual string readString(void); virtual string readString(const AttributeId &attribId); virtual AddrSpace *readSpace(void); virtual AddrSpace *readSpace(const AttributeId &attribId); virtual OpCode readOpcode(void); virtual OpCode readOpcode(AttributeId &attribId); }; /// \brief An XML based encoder /// /// The underlying transfer encoding is an XML document. The encoder is initialized with a stream which will /// receive the XML document as calls are made on the encoder. class XmlEncode : public Encoder { friend class XmlDecode; enum { tag_start = 0, ///< Tag has been opened, attributes can be written tag_content = 1, ///< Opening tag and content have been written tag_stop = 2 ///< No tag is currently being written }; static const char spaces[]; ///< Array of ' ' characters for emitting indents static const int4 MAX_SPACES; ///< Maximum number of leading spaces when indenting XML ostream &outStream; ///< The stream receiving the encoded data int4 tagStatus; ///< Stage of writing an element tag int4 depth; ///< Depth of open elements bool doFormatting; ///< \b true if encoder should indent and emit newlines void newLine(void); ///< Emit a newline and proper indenting for the next tag public: XmlEncode(ostream &s,bool doFormat=true) : outStream(s) { depth=0; tagStatus=tag_stop; doFormatting=doFormat; } ///< Construct from a stream virtual void openElement(const ElementId &elemId); virtual void closeElement(const ElementId &elemId); virtual void writeBool(const AttributeId &attribId,bool val); virtual void writeSignedInteger(const AttributeId &attribId,intb val); virtual void writeUnsignedInteger(const AttributeId &attribId,uintb val); virtual void writeString(const AttributeId &attribId,const string &val); virtual void writeStringIndexed(const AttributeId &attribId,uint4 index,const string &val); virtual void writeSpace(const AttributeId &attribId,const AddrSpace *spc); virtual void writeOpcode(const AttributeId &attribId,OpCode opc); }; /// \brief Protocol format for PackedEncode and PackedDecode classes /// /// All bytes in the encoding are expected to be non-zero. Element encoding looks like /// - 01xiiiii is an element start /// - 10xiiiii is an element end /// - 11xiiiii is an attribute start /// /// Where iiiii is the (first) 5 bits of the element/attribute id. /// If x=0, the id is complete. If x=1, the next byte contains 7 more bits of the id: 1iiiiiii /// /// After an attribute start, there follows a \e type byte: ttttllll, where the first 4 bits indicate the /// type of attribute and final 4 bits are a \b length \b code. The types are: /// - 1 = boolean (lengthcode=0 for false, lengthcode=1 for true) /// - 2 = positive signed integer /// - 3 = negative signed integer (stored in negated form) /// - 4 = unsigned integer /// - 5 = basic address space (encoded as the integer index of the space) /// - 6 = special address space (lengthcode 0=>stack 1=>join 2=>fspec 3=>iop) /// - 7 = string /// /// All attribute types except \e boolean and \e special, have an encoded integer after the \e type byte. /// The \b length \b code, indicates the number bytes used to encode the integer, 7-bits of info per byte, 1iiiiiii. /// A \b length \b code of zero is used to encode an integer value of 0, with no following bytes. /// /// For strings, the integer encoded after the \e type byte, is the actual length of the string. The /// string data itself is stored immediately after the length integer using UTF8 format. namespace PackedFormat { static const uint1 HEADER_MASK = 0xc0; ///< Bits encoding the record type static const uint1 ELEMENT_START = 0x40; ///< Header for an element start record static const uint1 ELEMENT_END = 0x80; ///< Header for an element end record static const uint1 ATTRIBUTE = 0xc0; ///< Header for an attribute record static const uint1 HEADEREXTEND_MASK = 0x20; ///< Bit indicating the id extends into the next byte static const uint1 ELEMENTID_MASK = 0x1f; ///< Bits encoding (part of) the id in the record header static const uint1 RAWDATA_MASK = 0x7f; ///< Bits of raw data in follow-on bytes static const int4 RAWDATA_BITSPERBYTE = 7; ///< Number of bits used in a follow-on byte static const uint1 RAWDATA_MARKER = 0x80; ///< The unused bit in follow-on bytes. (Always set to 1) static const int4 TYPECODE_SHIFT = 4; ///< Bit position of the type code in the type byte static const uint1 LENGTHCODE_MASK = 0xf; ///< Bits in the type byte forming the length code static const uint1 TYPECODE_BOOLEAN = 1; ///< Type code for the \e boolean type static const uint1 TYPECODE_SIGNEDINT_POSITIVE = 2; ///< Type code for the \e signed \e positive \e integer type static const uint1 TYPECODE_SIGNEDINT_NEGATIVE = 3; ///< Type code for the \e signed \e negative \e integer type static const uint1 TYPECODE_UNSIGNEDINT = 4; ///< Type code for the \e unsigned \e integer type static const uint1 TYPECODE_ADDRESSSPACE = 5; ///< Type code for the \e address \e space type static const uint1 TYPECODE_SPECIALSPACE = 6; ///< Type code for the \e special \e address \e space type static const uint1 TYPECODE_STRING = 7; ///< Type code for the \e string type static const uint4 SPECIALSPACE_STACK = 0; ///< Special code for the \e stack space static const uint4 SPECIALSPACE_JOIN = 1; ///< Special code for the \e join space static const uint4 SPECIALSPACE_FSPEC = 2; ///< Special code for the \e fspec space static const uint4 SPECIALSPACE_IOP = 3; ///< Special code for the \e iop space static const uint4 SPECIALSPACE_SPACEBASE = 4; ///< Special code for a \e spacebase space } /// \brief A byte-based decoder designed to marshal info to the decompiler efficiently /// /// The decoder expects an encoding as described in PackedFormat. When ingested, the stream bytes are /// held in a sequence of arrays (ByteChunk). During decoding, \b this object maintains a Position in the /// stream at the start and end of the current open element, and a Position of the next attribute to read to /// facilitate getNextAttributeId() and associated read*() methods. class PackedDecode : public Decoder { public: static const int4 BUFFER_SIZE; ///< The size, in bytes, of a single cached chunk of the input stream private: /// \brief A bounded array of bytes class ByteChunk { friend class PackedDecode; uint1 *start; ///< Start of the byte array uint1 *end; ///< End of the byte array public: ByteChunk(uint1 *s,uint1 *e) { start = s; end = e; } ///< Constructor }; /// \brief An iterator into input stream class Position { friend class PackedDecode; list::const_iterator seqIter; ///< Current byte sequence uint1 *current; ///< Current position in sequence uint1 *end; ///< End of current sequence }; list inStream; ///< Incoming raw data as a sequence of byte arrays Position startPos; ///< Position at the start of the current open element Position curPos; ///< Position of the next attribute as returned by getNextAttributeId Position endPos; ///< Ending position after all attributes in current open element bool attributeRead; ///< Has the last attribute returned by getNextAttributeId been read uint1 getByte(Position &pos) { return *pos.current; } ///< Get the byte at the current position, do not advance uint1 getBytePlus1(Position &pos); ///< Get the byte following the current byte, do not advance position uint1 getNextByte(Position &pos); ///< Get the byte at the current position and advance to the next byte void advancePosition(Position &pos,int4 skip); ///< Advance the position by the given number of bytes uint8 readInteger(int4 len); ///< Read an integer from the \e current position given its length in bytes uint4 readLengthCode(uint1 typeByte) { return ((uint4)typeByte & PackedFormat::LENGTHCODE_MASK); } ///< Extract length code from type byte void findMatchingAttribute(const AttributeId &attribId); ///< Find attribute matching the given id in open element void skipAttribute(void); ///< Skip over the attribute at the current position void skipAttributeRemaining(uint1 typeByte); ///< Skip over remaining attribute data, after a mismatch protected: uint1 *allocateNextInputBuffer(int4 pad); ///< Allocate the next chunk of space in the input stream void endIngest(int4 bufPos); ///< Finish set up for reading input stream public: PackedDecode(const AddrSpaceManager *spcManager) : Decoder(spcManager) {} ///< Constructor virtual ~PackedDecode(void); virtual void ingestStream(istream &s); virtual uint4 peekElement(void); virtual uint4 openElement(void); virtual uint4 openElement(const ElementId &elemId); virtual void closeElement(uint4 id); virtual void closeElementSkipping(uint4 id); virtual void rewindAttributes(void); virtual uint4 getNextAttributeId(void); virtual uint4 getIndexedAttributeId(const AttributeId &attribId); virtual bool readBool(void); virtual bool readBool(const AttributeId &attribId); virtual intb readSignedInteger(void); virtual intb readSignedInteger(const AttributeId &attribId); virtual intb readSignedIntegerExpectString(const string &expect,intb expectval); virtual intb readSignedIntegerExpectString(const AttributeId &attribId,const string &expect,intb expectval); virtual uintb readUnsignedInteger(void); virtual uintb readUnsignedInteger(const AttributeId &attribId); virtual string readString(void); virtual string readString(const AttributeId &attribId); virtual AddrSpace *readSpace(void); virtual AddrSpace *readSpace(const AttributeId &attribId); virtual OpCode readOpcode(void); virtual OpCode readOpcode(AttributeId &attribId); }; /// \brief A byte-based encoder designed to marshal from the decompiler efficiently /// /// See PackedDecode for details of the encoding format. class PackedEncode : public Encoder { ostream &outStream; ///< The stream receiving the encoded data void writeHeader(uint1 header,uint4 id); ///< Write a header, element or attribute, to stream void writeInteger(uint1 typeByte,uint8 val); ///< Write an integer value to the stream public: PackedEncode(ostream &s) : outStream(s) {} ///< Construct from a stream virtual void openElement(const ElementId &elemId); virtual void closeElement(const ElementId &elemId); virtual void writeBool(const AttributeId &attribId,bool val); virtual void writeSignedInteger(const AttributeId &attribId,intb val); virtual void writeUnsignedInteger(const AttributeId &attribId,uintb val); virtual void writeString(const AttributeId &attribId,const string &val); virtual void writeStringIndexed(const AttributeId &attribId,uint4 index,const string &val); virtual void writeSpace(const AttributeId &attribId,const AddrSpace *spc); virtual void writeOpcode(const AttributeId &attribId,OpCode opc); }; /// An exception is thrown if the position currently points to the last byte in the stream /// \param pos is the position in the stream to look ahead from /// \return the next byte inline uint1 PackedDecode::getBytePlus1(Position &pos) { uint1 *ptr = pos.current + 1; if (ptr == pos.end) { list::const_iterator iter = pos.seqIter; ++iter; if (iter == inStream.end()) throw DecoderError("Unexpected end of stream"); ptr = (*iter).start; } return *ptr; } /// An exception is thrown if there are no additional bytes in the stream /// \param pos is the position of the byte /// \return the byte at the current position inline uint1 PackedDecode::getNextByte(Position &pos) { uint1 res = *pos.current; pos.current += 1; if (pos.current != pos.end) return res; ++pos.seqIter; if (pos.seqIter == inStream.end()) throw DecoderError("Unexpected end of stream"); pos.current = (*pos.seqIter).start; pos.end = (*pos.seqIter).end; return res; } /// An exception is thrown of position is advanced past the end of the stream /// \param pos is the position being advanced /// \param skip is the number of bytes to advance inline void PackedDecode::advancePosition(Position &pos,int4 skip) { while(pos.end - pos.current <= skip) { skip -= (pos.end - pos.current); ++pos.seqIter; if (pos.seqIter == inStream.end()) throw DecoderError("Unexpected end of stream"); pos.current = (*pos.seqIter).start; pos.end = (*pos.seqIter).end; } pos.current += skip; } /// Allocate an array of BUFFER_SIZE bytes and add it to the in-memory stream /// \param pad is the number of bytes of padding to add to the allocation size, above BUFFER_SIZE /// \return the newly allocated buffer inline uint1 *PackedDecode::allocateNextInputBuffer(int4 pad) { uint1 *buf = new uint1[BUFFER_SIZE + pad]; inStream.emplace_back(buf,buf+BUFFER_SIZE); return buf; } /// \param header is the type of header /// \param id is the id associated with the element or attribute inline void PackedEncode::writeHeader(uint1 header,uint4 id) { if (id > 0x1f) { header |= PackedFormat::HEADEREXTEND_MASK; header |= (id >> PackedFormat::RAWDATA_BITSPERBYTE); uint1 extendByte = (id & PackedFormat::RAWDATA_MASK) | PackedFormat::RAWDATA_MARKER; outStream.put(header); outStream.put(extendByte); } else { header |= id; outStream.put(header); } } extern ElementId ELEM_UNKNOWN; ///< Special element to represent an element with an unrecognized name extern AttributeId ATTRIB_UNKNOWN; ///< Special attribute to represent an attribute with an unrecognized name extern AttributeId ATTRIB_CONTENT; ///< Special attribute for XML text content of an element /// The name is looked up in the scoped list of attributes. If the attribute is not in the list, a special /// placeholder attribute, ATTRIB_UNKNOWN, is returned as a placeholder for attributes with unrecognized names. /// \param nm is the name of the attribute /// \param scope is the id of the scope in which to lookup of the name /// \return the associated id inline uint4 AttributeId::find(const string &nm,int4 scope) { if (scope == 0) { // Current only support reverse look up for scope 0 unordered_map::const_iterator iter = lookupAttributeId.find(nm); if (iter != lookupAttributeId.end()) return (*iter).second; } return ATTRIB_UNKNOWN.id; } /// The name is looked up in the scoped list of elements. If the element is not in the list, a special /// placeholder element, ELEM_UNKNOWN, is returned as a placeholder for elements with unrecognized names. /// \param nm is the name of the element /// \param scope is the id of the scope in which to search /// \return the associated id inline uint4 ElementId::find(const string &nm,int4 scope) { if (scope == 0) { unordered_map::const_iterator iter = lookupElementId.find(nm); if (iter != lookupElementId.end()) return (*iter).second; } return ELEM_UNKNOWN.id; } extern AttributeId ATTRIB_ALIGN; ///< Marshaling attribute "align" extern AttributeId ATTRIB_BIGENDIAN; ///< Marshaling attribute "bigendian" extern AttributeId ATTRIB_CONSTRUCTOR; ///< Marshaling attribute "constructor" extern AttributeId ATTRIB_DESTRUCTOR; ///< Marshaling attribute "destructor" extern AttributeId ATTRIB_EXTRAPOP; ///< Marshaling attribute "extrapop" extern AttributeId ATTRIB_FORMAT; ///< Marshaling attribute "format" extern AttributeId ATTRIB_HIDDENRETPARM; ///< Marshaling attribute "hiddenretparm" extern AttributeId ATTRIB_ID; ///< Marshaling attribute "id" extern AttributeId ATTRIB_INDEX; ///< Marshaling attribute "index" extern AttributeId ATTRIB_INDIRECTSTORAGE; ///< Marshaling attribute "indirectstorage" extern AttributeId ATTRIB_METATYPE; ///< Marshaling attribute "metatype" extern AttributeId ATTRIB_MODEL; ///< Marshaling attribute "model" extern AttributeId ATTRIB_NAME; ///< Marshaling attribute "name" extern AttributeId ATTRIB_NAMELOCK; ///< Marshaling attribute "namelock" extern AttributeId ATTRIB_OFFSET; ///< Marshaling attribute "offset" extern AttributeId ATTRIB_READONLY; ///< Marshaling attribute "readonly" extern AttributeId ATTRIB_REF; ///< Marshaling attribute "ref" extern AttributeId ATTRIB_SIZE; ///< Marshaling attribute "size" extern AttributeId ATTRIB_SPACE; ///< Marshaling attribute "space" extern AttributeId ATTRIB_THISPTR; ///< Marshaling attribute "thisptr" extern AttributeId ATTRIB_TYPE; ///< Marshaling attribute "type" extern AttributeId ATTRIB_TYPELOCK; ///< Marshaling attribute "typelock" extern AttributeId ATTRIB_VAL; ///< Marshaling attribute "val" extern AttributeId ATTRIB_VALUE; ///< Marshaling attribute "value" extern AttributeId ATTRIB_WORDSIZE; ///< Marshaling attribute "wordsize" extern AttributeId ATTRIB_STORAGE; ///< Marshaling attribute "storage" extern AttributeId ATTRIB_STACKSPILL; ///< Marshaling attribute "stackspill" extern ElementId ELEM_DATA; ///< Marshaling element \ extern ElementId ELEM_INPUT; ///< Marshaling element \ extern ElementId ELEM_OFF; ///< Marshaling element \ extern ElementId ELEM_OUTPUT; ///< Marshaling element \ extern ElementId ELEM_RETURNADDRESS; ///< Marshaling element \ extern ElementId ELEM_SYMBOL; ///< Marshaling element \ extern ElementId ELEM_TARGET; ///< Marshaling element \ extern ElementId ELEM_VAL; ///< Marshaling element \ extern ElementId ELEM_VALUE; ///< Marshaling element \ extern ElementId ELEM_VOID; ///< Marshaling element \ } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/memstate.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "memstate.hh" #include "translate.hh" namespace ghidra { /// This is a static convenience routine for decoding a value from a sequence of bytes depending /// on the desired endianness /// \param ptr is the pointer to the bytes to decode /// \param size is the number of bytes /// \param bigendian is \b true if the bytes are encoded in big endian form /// \return the decoded value uintb MemoryBank::constructValue(const uint1 *ptr,int4 size,bool bigendian) { uintb res = 0; if (bigendian) { for(int4 i=0;i=0;--i) { res <<= 8; res += (uintb) ptr[i]; } } return res; } /// This is a static convenience routine for encoding bytes from a given value, depending on /// the desired endianness /// \param ptr is a pointer to the location to write the encoded bytes /// \param val is the value to be encoded /// \param size is the number of bytes to encode /// \param bigendian is \b true if a big endian encoding is desired void MemoryBank::deconstructValue(uint1 *ptr,uintb val,int4 size,bool bigendian) { if (bigendian) { for(int4 i=size-1;i>=0;--i) { ptr[i] = (uint1) (val & 0xff); val >>= 8; } } else { for(int4 i=0;i>= 8; } } } /// A MemoryBank must be associated with a specific address space, have a preferred or natural /// \e wordsize and a natural \e pagesize. Both the \e wordsize and \e pagesize must be a power of 2. /// \param spc is the associated address space /// \param ws is the number of bytes in the preferred wordsize /// \param ps is the number of bytes in a page MemoryBank::MemoryBank(AddrSpace *spc,int4 ws,int4 ps) { space = spc; wordsize = ws; pagesize = ps; } /// This routine only retrieves data from a single \e page in the memory bank. Bytes need not /// be retrieved from the exact start of a page, but all bytes must come from \e one page. /// A page is a fixed number of bytes, and the address of a page is always aligned based /// on that number of bytes. This routine may be overridden for a page based implementation /// of the MemoryBank. The default implementation retrieves the page as aligned words /// using the find method. /// \param addr is the \e aligned offset of the desired page /// \param res is a pointer to where fetched data should be written /// \param skip is the offset \e into \e the \e page to get the bytes from /// \param size is the number of bytes to retrieve void MemoryBank::getPage(uintb addr,uint1 *res,int4 skip,int4 size) const { // Default implementation just iterates using find // but could be optimized uintb ptraddr = addr + skip; uintb endaddr = ptraddr + size; uintb startalign = ptraddr & ~((uintb)(wordsize-1)); uintb endalign = endaddr & ~((uintb)(wordsize-1)); if ((endaddr & ((uintb)(wordsize-1))) != 0) endalign += wordsize; uintb curval; bool bswap = ((HOST_ENDIAN==1) != space->isBigEndian()); uint1 *ptr; do { curval = find(startalign); if (bswap) curval = byte_swap(curval,wordsize); ptr = (uint1 *)&curval; int4 sz = wordsize; if (startalign < addr) { ptr += (addr-startalign); sz = wordsize - (addr-startalign); } if (startalign + wordsize > endaddr) sz -= (startalign + wordsize -endaddr); memcpy(res,ptr,sz); res += sz; startalign += wordsize; } while(startalign != endalign); } /// This routine writes data only to a single \e page of the memory bank. Bytes need not be /// written to the exact start of the page, but all bytes must be written to only one page /// when using this routine. A page is a /// fixed number of bytes, and the address of a page is always aligned based on this size. /// This routine may be overridden for a page based implementation of the MemoryBank. The /// default implementation writes the page as a sequence of aligned words, using the /// insert method. /// \param addr is the \e aligned offset of the desired page /// \param val is a pointer to the bytes to be written into the page /// \param skip is the offset \e into \e the \e page where bytes will be written /// \param size is the number of bytes to be written void MemoryBank::setPage(uintb addr,const uint1 *val,int4 skip,int4 size) { // Default implementation just iterates using insert // but could be optimized uintb ptraddr = addr + skip; uintb endaddr = ptraddr + size; uintb startalign = ptraddr & ~((uintb)(wordsize-1)); uintb endalign = endaddr & ~((uintb)(wordsize-1)); if ((endaddr & ((uintb)(wordsize-1))) != 0) endalign += wordsize; uintb curval; bool bswap = ((HOST_ENDIAN==1) != space->isBigEndian()); uint1 *ptr; do { ptr = (uint1 *)&curval; int4 sz = wordsize; if (startalign < addr) { ptr += (addr-startalign); sz = wordsize - (addr-startalign); } if (startalign + wordsize > endaddr) sz -= (startalign + wordsize - endaddr); if (sz != wordsize) { curval = find(startalign); // Part of word is copied from underlying memcpy(ptr,val,sz); // Rest is taken from -val- } else curval = *((const uintb *)val); // -val- supplies entire word if (bswap) curval = byte_swap(curval,wordsize); insert(startalign,curval); val += sz; startalign += wordsize; } while(startalign != endalign); } /// This routine is used to set a single value in the memory bank at an arbitrary address /// It takes into account the endianness of the associated address space when encoding the /// value as bytes in the bank. The value is broken up into aligned pieces of \e wordsize and /// the actual \b write is performed with the insert routine. If only parts of aligned words /// are written to, then the remaining parts are filled in with the original value, via the /// find routine. /// \param offset is the start of the byte range to write /// \param size is the number of bytes in the range to write /// \param val is the value to be written void MemoryBank::setValue(uintb offset,int4 size,uintb val) { uintb alignmask = (uintb)(wordsize-1); uintb ind = offset & (~alignmask); int4 skip = offset & alignmask; int4 size1 = wordsize-skip; int4 size2; int4 gap; uintb val1,val2; if (size > size1) { // We have spill over size2 = size - size1; val1 = find(ind); val2 = find(ind+wordsize); gap = wordsize - size2; } else { if (size == wordsize) { insert(ind,val); return; } val1 = find(ind); val2 = 0; gap = size1-size; size1 = size; size2 = 0; } skip = skip * 8; // Convert from byte skip to bit skip gap = gap * 8; // Convert from byte to bits if (space->isBigEndian()) { if (size2 == 0) { val1 &= ~(calc_mask(size1)<> 8*size2; insert(ind,val1); val2 &= (~((uintb)0)) >> 8*size2; val2 |= val << gap; insert(ind+wordsize,val2); } } else { if (size2 == 0) { val1 &= ~(calc_mask(size1)<> 8*size1; val1 |= val << skip; insert(ind,val1); val2 &= (~((uintb)0)) << 8*size2; val2 |= val >> 8*size1; insert(ind+wordsize,val2); } } } /// This routine gets the value from a range of bytes at an arbitrary address. /// It takes into account the endianness of the underlying space when decoding the value. /// The value is constructed by making one or more aligned word queries, using the find method. /// The desired value may span multiple words and is reconstructed properly. /// \param offset is the start of the byte range encoding the value /// \param size is the number of bytes in the range /// \return the decoded value uintb MemoryBank::getValue(uintb offset,int4 size) const { uintb res; uintb alignmask = (uintb) (wordsize-1); uintb ind = offset & (~alignmask); int4 skip = offset & alignmask; int4 size1 = wordsize-skip; int4 size2; int4 gap; uintb val1,val2; if (size > size1) { // We have spill over size2 = size - size1; val1 = find(ind); val2 = find(ind+wordsize); gap = wordsize - size2; } else { val1 = find(ind); val2 = 0; if (size == wordsize) return val1; gap = size1-size; size1 = size; size2 = 0; } if (space->isBigEndian()) { if (size2 == 0) res = val1>>(8*gap); else res = (val1<<(8*size2)) | (val2 >> (8*gap)); } else { if (size2 == 0) res = val1 >> (skip*8); else res = (val1>>(skip*8)) | (val2<<(size1*8) ); } res &= (uintb)calc_mask(size); return res; } /// This the most general method for writing a sequence of bytes into the memory bank. /// There is no restriction on the offset to write to or the number of bytes to be written, /// except that the range must be contained in the address space. /// \param offset is the start of the byte range to be written /// \param size is the number of bytes to write /// \param val is a pointer to the sequence of bytes to be written into the bank void MemoryBank::setChunk(uintb offset,int4 size,const uint1 *val) { int4 cursize; int4 count; uintb pagemask = (uintb) (pagesize - 1); uintb offalign; int4 skip; count = 0; while(count < size) { cursize = pagesize; offalign = offset & ~pagemask; skip = 0; if (offalign != offset) { skip = offset - offalign; cursize -= skip; } if (size - count < cursize) cursize = size - count; setPage(offalign,val,skip,cursize); count += cursize; offset += cursize; val += cursize; } } /// This is the most general method for reading a sequence of bytes from the memory bank. /// There is no restriction on the offset or the number of bytes to read, except that the /// range must be contained in the address space. /// \param offset is the start of the byte range to read /// \param size is the number of bytes to read /// \param res is a pointer to where the retrieved bytes should be stored void MemoryBank::getChunk(uintb offset,int4 size,uint1 *res) const { int4 cursize,count; uintb pagemask = (uintb) (pagesize-1); uintb offalign; int4 skip; count = 0; while(count < size) { cursize = pagesize; offalign = offset & ~pagemask; skip = 0; if (offalign != offset) { skip = offset-offalign; cursize -= skip; } if (size - count < cursize) cursize = size - count; getPage(offalign,res,skip,cursize); count += cursize; offset += cursize; res += cursize; } } /// Find an aligned word from the bank. First an attempt is made to fetch the data from the /// LoadImage. If this fails, the value is returned as 0. /// \param addr is the address of the word to fetch /// \return the fetched value uintb MemoryImage::find(uintb addr) const { // Assume that -addr- is word aligned uintb res = 0; // Make sure all bytes start as 0, as load may not fill all bytes AddrSpace *spc = getSpace(); try { uint1 *ptr = (uint1 *)&res; ptr += (HOST_ENDIAN==1) ? (sizeof(uintb) - getWordSize()) : 0; loader->loadFill(ptr,getWordSize(),Address(spc,addr)); } catch(DataUnavailError &err) { // Pages not mapped in the load image, are assumed to be zero res = 0; } if ((HOST_ENDIAN==1) != spc->isBigEndian()) res = byte_swap(res,getWordSize()); return res; } /// Retrieve an aligned page from the bank. First an attempt is made to retrieve the /// page from the LoadImage, which may do its own zero filling. If the attempt fails, the /// page is entirely filled in with zeros. void MemoryImage::getPage(uintb addr,uint1 *res,int4 skip,int4 size) const { // Assume that -addr- is page aligned AddrSpace *spc = getSpace(); try { loader->loadFill(res,size,Address(spc,addr+skip)); } catch(DataUnavailError &err) { // Pages not mapped in the load image, are assumed to be zero for(int4 i=0;i::iterator iter; uint1 *pageptr; iter = page.find(pageaddr); if (iter != page.end()) pageptr = (*iter).second; else { pageptr = new uint1[getPageSize()]; page[pageaddr] = pageptr; if (underlie == (MemoryBank *)0) { for(int4 i=0;igetPage(pageaddr,pageptr,0,getPageSize()); } uintb pageoffset = addr & ((uintb)(getPageSize()-1)); deconstructValue(pageptr + pageoffset,val,getWordSize(),getSpace()->isBigEndian()); } /// This derived method first looks for the aligned word in the mapped pages. If the /// address is not mapped, the search is forwarded to the \e underlying memory bank. /// If there is no underlying bank, zero is returned. /// \param addr is the aligned offset of the word /// \return the retrieved value uintb MemoryPageOverlay::find(uintb addr) const { uintb pageaddr = addr & ~((uintb)(getPageSize()-1)); map::const_iterator iter; iter = page.find(pageaddr); if (iter == page.end()) { if (underlie == (MemoryBank *)0) return (uintb)0; return underlie->find(addr); } const uint1 *pageptr = (*iter).second; uintb pageoffset = addr & ((uintb)(getPageSize()-1)); return constructValue(pageptr+pageoffset,getWordSize(),getSpace()->isBigEndian()); } /// The desired page is looked for in the page cache. If it doesn't exist, the /// request is forwarded to \e underlying bank. If there is no underlying bank, the /// result buffer is filled with zeros. /// \param addr is the aligned offset of the page /// \param res is the pointer to where retrieved bytes should be stored /// \param skip is the offset \e into \e the \e page from where bytes should be retrieved /// \param size is the number of bytes to retrieve void MemoryPageOverlay::getPage(uintb addr,uint1 *res,int4 skip,int4 size) const { map::const_iterator iter; iter = page.find(addr); if (iter == page.end()) { if (underlie == (MemoryBank *)0) { for(int4 i=0;igetPage(addr,res,skip,size); return; } const uint1 *pageptr = (*iter).second; memcpy(res,pageptr+skip,size); } /// First, a cached version of the desired page is searched for via its address. If it doesn't /// exist, it is created, and its initial value is filled via the \e underlying bank. The bytes /// to be written are then copied into the cached page. /// \param addr is the aligned offset of the page to write /// \param val is a pointer to bytes to be written into the page /// \param skip is the offset \e into \e the \e page where bytes should be written /// \param size is the number of bytes to write void MemoryPageOverlay::setPage(uintb addr,const uint1 *val,int4 skip,int4 size) { map::iterator iter; uint1 *pageptr; iter = page.find(addr); if (iter == page.end()) { pageptr = new uint1[getPageSize()]; page[addr] = pageptr; if (size != getPageSize()) { if (underlie == (MemoryBank *)0) { for(int4 i=0;igetPage(addr,pageptr,0,getPageSize()); } } else pageptr = (*iter).second; memcpy(pageptr+skip,val,size); } /// A page overlay memory bank needs all the parameters for a generic memory bank /// and it needs to know the underlying memory bank being overlayed. /// \param spc is the address space associated with the memory bank /// \param ws is the number of bytes in the preferred wordsize (must be power of 2) /// \param ps is the number of bytes in a page (must be power of 2) /// \param ul is the underlying MemoryBank MemoryPageOverlay::MemoryPageOverlay(AddrSpace *spc,int4 ws,int4 ps,MemoryBank *ul) : MemoryBank(spc,ws,ps) { underlie = ul; } MemoryPageOverlay::~MemoryPageOverlay(void) { map::iterator iter; for(iter=page.begin();iter!=page.end();++iter) delete [] (*iter).second; } /// Write the value into the hashtable, using \b addr as a key. /// \param addr is the aligned address of the word being written /// \param val is the value of the word to write void MemoryHashOverlay::insert(uintb addr,uintb val) { int4 size = address.size(); uintb offset = (addr>>alignshift) % size; for(int4 i=0;i>alignshift) % size; for(int4 i=0;ifind(addr); } /// A MemoryBank implemented as a hash table needs everything associated with a generic /// memory bank, but the constructor also needs to know the size of the hashtable and /// the underlying memorybank to forward reads and writes to. /// \param spc is the address space associated with the memory bank /// \param ws is the number of bytes in the preferred wordsize (must be power of 2) /// \param ps is the number of bytes in a page (must be a power of 2) /// \param hashsize is the maximum number of entries in the hashtable /// \param ul is the underlying memory bank being overlayed MemoryHashOverlay::MemoryHashOverlay(AddrSpace *spc,int4 ws,int4 ps,int4 hashsize,MemoryBank *ul) : MemoryBank(spc,ws,ps), address(hashsize,0xBADBEEF), value(hashsize,0) { underlie = ul; collideskip = 1023; uint4 tmp = ws - 1; alignshift = 0; while(tmp != 0) { alignshift += 1; tmp >>= 1; } } /// MemoryBanks associated with specific address spaces must be registers with this MemoryState /// via this method. Each address space that will be used during emulation must be registered /// separately. The MemoryState object does \e not assume responsibility for freeing the MemoryBank /// \param bank is a pointer to the MemoryBank to be registered void MemoryState::setMemoryBank(MemoryBank *bank) { AddrSpace *spc = bank->getSpace(); int4 index = spc->getIndex(); while(index >= memspace.size()) memspace.push_back((MemoryBank *)0); memspace[index] = bank; } /// Any MemoryBank that has been registered with this MemoryState can be retrieved via this /// method if the MemoryBank's associated address space is known. /// \param spc is the address space of the desired MemoryBank /// \return a pointer to the MemoryBank or \b null if no bank is associated with \e spc. MemoryBank *MemoryState::getMemoryBank(AddrSpace *spc) const { int4 index = spc->getIndex(); if (index >= memspace.size()) return (MemoryBank *)0; return memspace[index]; } /// This is the main interface for writing values to the MemoryState. /// If there is no registered MemoryBank for the desired address space, or /// if there is some other error, an exception is thrown. /// \param spc is the address space to write to /// \param off is the offset where the value should be written /// \param size is the number of bytes to be written /// \param cval is the value to be written void MemoryState::setValue(AddrSpace *spc,uintb off,int4 size,uintb cval) { MemoryBank *mspace = getMemoryBank(spc); if (mspace == (MemoryBank *)0) throw LowlevelError("Setting value for unmapped memory space: "+spc->getName()); mspace->setValue(off,size,cval); } /// This is the main interface for reading values from the MemoryState. /// If there is no registered MemoryBank for the desired address space, or /// if there is some other error, an exception is thrown. /// \param spc is the address space being queried /// \param off is the offset of the value being queried /// \param size is the number of bytes to query /// \return the queried value uintb MemoryState::getValue(AddrSpace *spc,uintb off,int4 size) const { if (spc->getType() == IPTR_CONSTANT) return off; MemoryBank *mspace = getMemoryBank(spc); if (mspace == (MemoryBank *)0) throw LowlevelError("Getting value from unmapped memory space: "+spc->getName()); return mspace->getValue(off,size); } /// This is a convenience method for setting registers by name. /// Any register name known to the Translate object can be used as a write location. /// The associated address space, offset, and size is looked up and automatically /// passed to the main setValue routine. /// \param nm is the name of the register /// \param cval is the value to write to the register void MemoryState::setValue(const string &nm,uintb cval) { // Set a "register" value const VarnodeData &vdata( trans->getRegister(nm) ); setValue(vdata.space,vdata.offset,vdata.size,cval); } /// This is a convenience method for reading registers by name. /// Any register name known to the Translate object can be used as a read location. /// The associated address space, offset, and size is looked up and automatically /// passed to the main getValue routine. /// \param nm is the name of the register /// \return the value associated with that register uintb MemoryState::getValue(const string &nm) const { // Get a "register" value const VarnodeData &vdata( trans->getRegister(nm) ); return getValue(vdata.space,vdata.offset,vdata.size); } /// This is the main interface for reading a range of bytes from the MemorySate. /// The MemoryBank associated with the address space of the query is looked up /// and the request is forwarded to the getChunk method on the MemoryBank. If there /// is no registered MemoryBank or some other error, an exception is thrown /// \param res is a pointer to the result buffer for storing retrieved bytes /// \param spc is the desired address space /// \param off is the starting offset of the byte range being queried /// \param size is the number of bytes being queried void MemoryState::getChunk(uint1 *res,AddrSpace *spc,uintb off,int4 size) const { MemoryBank *mspace = getMemoryBank(spc); if (mspace == (MemoryBank *)0) throw LowlevelError("Getting chunk from unmapped memory space: "+spc->getName()); mspace->getChunk(off,size,res); } /// This is the main interface for setting values for a range of bytes in the MemoryState. /// The MemoryBank associated with the desired address space is looked up and the /// write is forwarded to the setChunk method on the MemoryBank. If there is no /// registered MemoryBank or some other error, an exception is throw. /// \param val is a pointer to the byte values to be written into the MemoryState /// \param spc is the address space being written /// \param off is the starting offset of the range being written /// \param size is the number of bytes to write void MemoryState::setChunk(const uint1 *val,AddrSpace *spc,uintb off,int4 size) { MemoryBank *mspace = getMemoryBank(spc); if (mspace == (MemoryBank *)0) throw LowlevelError("Setting chunk of unmapped memory space: "+spc->getName()); mspace->setChunk(off,size,val); } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/memstate.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file memstate.hh /// \brief Classes for keeping track of memory state during emulation #ifndef __MEMSTATE_HH__ #define __MEMSTATE_HH__ #include "pcoderaw.hh" #include "loadimage.hh" namespace ghidra { /// \brief Memory storage/state for a single AddressSpace /// /// Class for setting and getting memory values within a space /// The basic API is to get/set arrays of byte values via offset within the space. /// Helper functions getValue and setValue easily retrieve/store integers /// of various sizes from memory, using the endianness encoding specified by the space. /// Accesses through the public interface, are automatically broken down into /// \b word accesses, through the private insert/find methods, and \b page /// accesses through getPage/setPage. So these are the virtual methods that need /// to be overridden in the derived classes. class MemoryBank { friend class MemoryPageOverlay; friend class MemoryHashOverlay; int4 wordsize; ///< Number of bytes in an aligned word access int4 pagesize; ///< Number of bytes in an aligned page access AddrSpace *space; ///< The address space associated with this memory protected: virtual void insert(uintb addr,uintb val)=0; ///< Insert a word in memory bank at an aligned location virtual uintb find(uintb addr) const=0; ///< Retrieve a word from memory bank at an aligned location virtual void getPage(uintb addr,uint1 *res,int4 skip,int4 size) const; ///< Retrieve data from a memory \e page virtual void setPage(uintb addr,const uint1 *val,int4 skip,int4 size); ///< Write data into a memory page public: MemoryBank(AddrSpace *spc,int4 ws,int4 ps); ///< Generic constructor for a memory bank virtual ~MemoryBank(void) {} int4 getWordSize(void) const; ///< Get the number of bytes in a word for this memory bank int4 getPageSize(void) const; ///< Get the number of bytes in a page for this memory bank AddrSpace *getSpace(void) const; ///< Get the address space associated with this memory bank void setValue(uintb offset,int4 size,uintb val); ///< Set the value of a (small) range of bytes uintb getValue(uintb offset,int4 size) const; ///< Retrieve the value encoded in a (small) range of bytes void setChunk(uintb offset,int4 size,const uint1 *val); ///< Set values of an arbitrary sequence of bytes void getChunk(uintb offset,int4 size,uint1 *res) const; ///< Retrieve an arbitrary sequence of bytes static uintb constructValue(const uint1 *ptr,int4 size,bool bigendian); ///< Decode bytes to value static void deconstructValue(uint1 *ptr,uintb val,int4 size,bool bigendian); ///< Encode value to bytes }; /// A MemoryBank is instantiated with a \e natural word size. Requests for arbitrary byte ranges /// may be broken down into units of this size. /// \return the number of bytes in a \e word. inline int4 MemoryBank::getWordSize(void) const { return wordsize; } /// A MemoryBank is instantiated with a \e natural page size. Requests for large chunks of data /// may be broken down into units of this size. /// \return the number of bytes in a \e page. inline int4 MemoryBank::getPageSize(void) const { return pagesize; } /// A MemoryBank is a contiguous sequence of bytes associated with a particular address space. /// \return the AddressSpace associated with this bank. inline AddrSpace *MemoryBank::getSpace(void) const { return space; } /// \brief A kind of MemoryBank which retrieves its data from an underlying LoadImage /// /// Any bytes requested on the bank which lie in the LoadImage are retrieved from /// the LoadImage. Other addresses in the space are filled in with zero. /// This bank cannot be written to. class MemoryImage : public MemoryBank { LoadImage *loader; ///< The underlying LoadImage protected: virtual void insert(uintb addr,uintb val) { throw LowlevelError("Writing to read-only MemoryBank"); } ///< Exception is thrown for write attempts virtual uintb find(uintb addr) const; ///< Overridden find method virtual void getPage(uintb addr,uint1 *res,int4 skip,int4 size) const; ///< Overridded getPage method public: MemoryImage(AddrSpace *spc,int4 ws,int4 ps,LoadImage *ld); ///< Constructor for a loadimage memorybank }; /// \brief Memory bank that overlays some other memory bank, using a "copy on write" behavior. /// /// Pages are copied from the underlying object only when there is /// a write. The underlying access routines are overridden to make optimal use /// of this page implementation. The underlying memory bank can be a \b null pointer /// in which case, this memory bank behaves as if it were initially filled with zeros. class MemoryPageOverlay : public MemoryBank { MemoryBank *underlie; ///< Underlying memory object map page; ///< Overlayed pages protected: virtual void insert(uintb addr,uintb val); ///< Overridden aligned word insert virtual uintb find(uintb addr) const; ///< Overridden aligned word find virtual void getPage(uintb addr,uint1 *res,int4 skip,int4 size) const; ///< Overridden getPage virtual void setPage(uintb addr,const uint1 *val,int4 skip,int4 size); ///< Overridden setPage public: MemoryPageOverlay(AddrSpace *spc,int4 ws,int4 ps,MemoryBank *ul); ///< Constructor for page overlay virtual ~MemoryPageOverlay(void); }; /// \brief A memory bank that implements reads and writes using a hash table. /// /// The initial state of the /// bank is taken from an \e underlying memory bank or is all zero, if this bank is initialized with /// a \b null pointer. This implementation will not be very efficient for accessing entire pages. class MemoryHashOverlay : public MemoryBank { MemoryBank *underlie; ///< Underlying memory bank int4 alignshift; ///< How many LSBs are thrown away from address when doing hash table lookup uintb collideskip; ///< How many slots to skip after a hashtable collision vector address; ///< The hashtable addresses vector value; ///< The hashtable values protected: virtual void insert(uintb addr,uintb val); ///< Overridden aligned word insert virtual uintb find(uintb addr) const; ///< Overridden aligned word find public: MemoryHashOverlay(AddrSpace *spc,int4 ws,int4 ps,int4 hashsize,MemoryBank *ul); ///< Constructor for hash overlay }; class Translate; // Forward declaration /// \brief All storage/state for a pcode machine /// /// Every piece of information in a pcode machine is representable as a triple /// (AddrSpace,offset,size). This class allows getting and setting /// of all state information of this form. class MemoryState { protected: Translate *trans; ///< Architecture information about memory spaces vector memspace; ///< Memory banks associated with each address space public: MemoryState(Translate *t); ///< A constructor for MemoryState ~MemoryState(void) {} Translate *getTranslate(void) const; ///< Get the Translate object void setMemoryBank(MemoryBank *bank); ///< Map a memory bank into the state MemoryBank *getMemoryBank(AddrSpace *spc) const; ///< Get a memory bank associated with a particular space void setValue(AddrSpace *spc,uintb off,int4 size,uintb cval); ///< Set a value on the memory state uintb getValue(AddrSpace *spc,uintb off,int4 size) const; ///< Retrieve a memory value from the memory state void setValue(const string &nm,uintb cval); ///< Set a value on a named register in the memory state uintb getValue(const string &nm) const; ///< Retrieve a value from a named register in the memory state void setValue(const VarnodeData *vn,uintb cval); ///< Set value on a given \b varnode uintb getValue(const VarnodeData *vn) const; ///< Get a value from a \b varnode void getChunk(uint1 *res,AddrSpace *spc,uintb off,int4 size) const; ///< Get a chunk of data from memory state void setChunk(const uint1 *val,AddrSpace *spc,uintb off,int4 size); ///< Set a chunk of data from memory state }; /// The MemoryState needs a Translate object in order to be able to convert register names /// into varnodes /// \param t is the translator inline MemoryState::MemoryState(Translate *t) { trans = t; } /// Retrieve the actual pcode translator being used by this machine state /// \return a pointer to the Translate object inline Translate *MemoryState::getTranslate(void) const { return trans; } /// A convenience method for setting a value directly on a varnode rather than /// breaking out the components /// \param vn is a pointer to the varnode to be written /// \param cval is the value to write into the varnode inline void MemoryState::setValue(const VarnodeData *vn,uintb cval) { setValue(vn->space,vn->offset,vn->size,cval); } /// A convenience method for reading a value directly from a varnode rather /// than querying for the offset and space /// \param vn is a pointer to the varnode to be read /// \return the value read from the varnode inline uintb MemoryState::getValue(const VarnodeData *vn) const { return getValue(vn->space,vn->offset,vn->size); } } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/opbehavior.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "opbehavior.hh" #include "translate.hh" namespace ghidra { /// This routine generates a vector of OpBehavior objects indexed by opcode /// \param inst is the vector of behaviors to be filled /// \param trans is the translator object needed by the floating point behaviors void OpBehavior::registerInstructions(vector &inst,const Translate *trans) { inst.insert(inst.end(),CPUI_MAX,(OpBehavior *)0); inst[CPUI_COPY] = new OpBehaviorCopy(); inst[CPUI_LOAD] = new OpBehavior(CPUI_LOAD,false,true); inst[CPUI_STORE] = new OpBehavior(CPUI_STORE,false,true); inst[CPUI_BRANCH] = new OpBehavior(CPUI_BRANCH,false,true); inst[CPUI_CBRANCH] = new OpBehavior(CPUI_CBRANCH,false,true); inst[CPUI_BRANCHIND] = new OpBehavior(CPUI_BRANCHIND,false,true); inst[CPUI_CALL] = new OpBehavior(CPUI_CALL,false,true); inst[CPUI_CALLIND] = new OpBehavior(CPUI_CALLIND,false,true); inst[CPUI_CALLOTHER] = new OpBehavior(CPUI_CALLOTHER,false,true); inst[CPUI_RETURN] = new OpBehavior(CPUI_RETURN,false,true); inst[CPUI_MULTIEQUAL] = new OpBehavior(CPUI_MULTIEQUAL,false,true); inst[CPUI_INDIRECT] = new OpBehavior(CPUI_INDIRECT,false,true); inst[CPUI_PIECE] = new OpBehaviorPiece(); inst[CPUI_SUBPIECE] = new OpBehaviorSubpiece(); inst[CPUI_INT_EQUAL] = new OpBehaviorEqual(); inst[CPUI_INT_NOTEQUAL] = new OpBehaviorNotEqual(); inst[CPUI_INT_SLESS] = new OpBehaviorIntSless(); inst[CPUI_INT_SLESSEQUAL] = new OpBehaviorIntSlessEqual(); inst[CPUI_INT_LESS] = new OpBehaviorIntLess(); inst[CPUI_INT_LESSEQUAL] = new OpBehaviorIntLessEqual(); inst[CPUI_INT_ZEXT] = new OpBehaviorIntZext(); inst[CPUI_INT_SEXT] = new OpBehaviorIntSext(); inst[CPUI_INT_ADD] = new OpBehaviorIntAdd(); inst[CPUI_INT_SUB] = new OpBehaviorIntSub(); inst[CPUI_INT_CARRY] = new OpBehaviorIntCarry(); inst[CPUI_INT_SCARRY] = new OpBehaviorIntScarry(); inst[CPUI_INT_SBORROW] = new OpBehaviorIntSborrow(); inst[CPUI_INT_2COMP] = new OpBehaviorInt2Comp(); inst[CPUI_INT_NEGATE] = new OpBehaviorIntNegate(); inst[CPUI_INT_XOR] = new OpBehaviorIntXor(); inst[CPUI_INT_AND] = new OpBehaviorIntAnd(); inst[CPUI_INT_OR] = new OpBehaviorIntOr(); inst[CPUI_INT_LEFT] = new OpBehaviorIntLeft(); inst[CPUI_INT_RIGHT] = new OpBehaviorIntRight(); inst[CPUI_INT_SRIGHT] = new OpBehaviorIntSright(); inst[CPUI_INT_MULT] = new OpBehaviorIntMult(); inst[CPUI_INT_DIV] = new OpBehaviorIntDiv(); inst[CPUI_INT_SDIV] = new OpBehaviorIntSdiv(); inst[CPUI_INT_REM] = new OpBehaviorIntRem(); inst[CPUI_INT_SREM] = new OpBehaviorIntSrem(); inst[CPUI_BOOL_NEGATE] = new OpBehaviorBoolNegate(); inst[CPUI_BOOL_XOR] = new OpBehaviorBoolXor(); inst[CPUI_BOOL_AND] = new OpBehaviorBoolAnd(); inst[CPUI_BOOL_OR] = new OpBehaviorBoolOr(); inst[CPUI_CAST] = new OpBehavior(CPUI_CAST,false,true); inst[CPUI_PTRADD] = new OpBehavior(CPUI_PTRADD,false); inst[CPUI_PTRSUB] = new OpBehavior(CPUI_PTRSUB,false); inst[CPUI_FLOAT_EQUAL] = new OpBehaviorFloatEqual(trans); inst[CPUI_FLOAT_NOTEQUAL] = new OpBehaviorFloatNotEqual(trans); inst[CPUI_FLOAT_LESS] = new OpBehaviorFloatLess(trans); inst[CPUI_FLOAT_LESSEQUAL] = new OpBehaviorFloatLessEqual(trans); inst[CPUI_FLOAT_NAN] = new OpBehaviorFloatNan(trans); inst[CPUI_FLOAT_ADD] = new OpBehaviorFloatAdd(trans); inst[CPUI_FLOAT_DIV] = new OpBehaviorFloatDiv(trans); inst[CPUI_FLOAT_MULT] = new OpBehaviorFloatMult(trans); inst[CPUI_FLOAT_SUB] = new OpBehaviorFloatSub(trans); inst[CPUI_FLOAT_NEG] = new OpBehaviorFloatNeg(trans); inst[CPUI_FLOAT_ABS] = new OpBehaviorFloatAbs(trans); inst[CPUI_FLOAT_SQRT] = new OpBehaviorFloatSqrt(trans); inst[CPUI_FLOAT_INT2FLOAT] = new OpBehaviorFloatInt2Float(trans); inst[CPUI_FLOAT_FLOAT2FLOAT] = new OpBehaviorFloatFloat2Float(trans); inst[CPUI_FLOAT_TRUNC] = new OpBehaviorFloatTrunc(trans); inst[CPUI_FLOAT_CEIL] = new OpBehaviorFloatCeil(trans); inst[CPUI_FLOAT_FLOOR] = new OpBehaviorFloatFloor(trans); inst[CPUI_FLOAT_ROUND] = new OpBehaviorFloatRound(trans); inst[CPUI_SEGMENTOP] = new OpBehavior(CPUI_SEGMENTOP,false,true); inst[CPUI_CPOOLREF] = new OpBehavior(CPUI_CPOOLREF,false,true); inst[CPUI_NEW] = new OpBehavior(CPUI_NEW,false,true); inst[CPUI_INSERT] = new OpBehavior(CPUI_INSERT,false); inst[CPUI_EXTRACT] = new OpBehavior(CPUI_EXTRACT,false); inst[CPUI_POPCOUNT] = new OpBehaviorPopcount(); inst[CPUI_LZCOUNT] = new OpBehaviorLzcount(); } /// \param sizeout is the size of the output in bytes /// \param sizein is the size of the input in bytes /// \param in1 is the input value /// \return the output value uintb OpBehavior::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { string name(get_opname(opcode)); throw LowlevelError("Unary emulation unimplemented for "+name); } /// \param sizeout is the size of the output in bytes /// \param sizein is the size of the inputs in bytes /// \param in1 is the first input value /// \param in2 is the second input value /// \return the output value uintb OpBehavior::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { string name(get_opname(opcode)); throw LowlevelError("Binary emulation unimplemented for "+name); } /// \param sizeout is the size of the output in bytes /// \param sizein is the size of the inputs in bytes /// \param in1 is the first input value /// \param in2 is the second input value /// \param in3 is the third input value /// \return the output value uintb OpBehavior::evaluateTernary(int4 sizeout,int4 sizein,uintb in1,uintb in2,uintb in3) const { string name(get_opname(opcode)); throw LowlevelError("Ternary emulation unimplemented for "+name); } /// If the output value is known, recover the input value. /// \param sizeout is the size of the output in bytes /// \param out is the output value /// \param sizein is the size of the input in bytes /// \return the input value uintb OpBehavior::recoverInputUnary(int4 sizeout,uintb out,int4 sizein) const { throw LowlevelError("Cannot recover input parameter without loss of information"); } /// If the output value and one of the input values is known, recover the value /// of the other input. /// \param slot is the input slot to recover /// \param sizeout is the size of the output in bytes /// \param out is the output value /// \param sizein is the size of the inputs in bytes /// \param in is the known input value /// \return the input value corresponding to the \b slot uintb OpBehavior::recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) const { throw LowlevelError("Cannot recover input parameter without loss of information"); } uintb OpBehaviorCopy::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { return in1; } uintb OpBehaviorCopy::recoverInputUnary(int4 sizeout,uintb out,int4 sizein) const { return out; } uintb OpBehaviorEqual::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = (in1 == in2) ? 1 : 0; return res; } uintb OpBehaviorNotEqual::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = (in1 != in2) ? 1 : 0; return res; } uintb OpBehaviorIntSless::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res,mask,bit1,bit2; if (sizein<=0) res = 0; else { mask = 0x80; mask <<= 8*(sizein-1); bit1 = in1 & mask; // Get the sign bits bit2 = in2 & mask; if (bit1 != bit2) res = (bit1 != 0) ? 1 : 0; else res = (in1 < in2) ? 1 : 0; } return res; } uintb OpBehaviorIntSlessEqual::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res,mask,bit1,bit2; if (sizein<=0) res = 0; else { mask = 0x80; mask <<= 8*(sizein-1); bit1 = in1 & mask; // Get the sign bits bit2 = in2 & mask; if (bit1 != bit2) res = (bit1 != 0) ? 1 : 0; else res = (in1 <= in2) ? 1 : 0; } return res; } uintb OpBehaviorIntLess::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = (in1 < in2) ? 1 : 0; return res; } uintb OpBehaviorIntLessEqual::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = (in1 <= in2) ? 1 : 0; return res; } uintb OpBehaviorIntZext::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { return in1; } uintb OpBehaviorIntZext::recoverInputUnary(int4 sizeout,uintb out,int4 sizein) const { uintb mask = calc_mask(sizein); if ((mask&out)!=out) throw EvaluationError("Output is not in range of zext operation"); return out; } uintb OpBehaviorIntSext::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { uintb res = sign_extend(in1,sizein,sizeout); return res; } uintb OpBehaviorIntSext::recoverInputUnary(int4 sizeout,uintb out,int4 sizein) const { uintb masklong = calc_mask(sizeout); uintb maskshort = calc_mask(sizein); if ((out & (maskshort ^ (maskshort>>1))) == 0) { // Positive input if ((out & maskshort) != out) throw EvaluationError("Output is not in range of sext operation"); } else { // Negative input if ((out & (masklong^maskshort)) != (masklong^maskshort)) throw EvaluationError("Output is not in range of sext operation"); } return (out&maskshort); } uintb OpBehaviorIntAdd::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = (in1 + in2) & calc_mask(sizeout); return res; } uintb OpBehaviorIntAdd::recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) const { uintb res = (out-in) & calc_mask(sizeout); return res; } uintb OpBehaviorIntSub::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = (in1 - in2) & calc_mask(sizeout); return res; } uintb OpBehaviorIntSub::recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) const { uintb res; if (slot==0) res = in + out; else res = in - out; res &= calc_mask(sizeout); return res; } uintb OpBehaviorIntCarry::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = (in1 > ((in1 + in2)&calc_mask(sizein))) ? 1 : 0; return res; } uintb OpBehaviorIntScarry::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = in1 + in2; uint4 a = (in1>>(sizein*8-1))&1; // Grab sign bit uint4 b = (in2>>(sizein*8-1))&1; // Grab sign bit uint4 r = (res>>(sizein*8-1))&1; // Grab sign bit r ^= a; a ^= b; a ^= 1; r &= a; return (uintb)r; } uintb OpBehaviorIntSborrow::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = in1 - in2; uint4 a = (in1>>(sizein*8-1))&1; // Grab sign bit uint4 b = (in2>>(sizein*8-1))&1; // Grab sign bit uint4 r = (res>>(sizein*8-1))&1; // Grab sign bit a ^= r; r ^= b; r ^= 1; a &= r; return (uintb)a; } uintb OpBehaviorInt2Comp::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { uintb res = uintb_negate(in1-1,sizein); return res; } uintb OpBehaviorInt2Comp::recoverInputUnary(int4 sizeout,uintb out,int4 sizein) const { uintb res = uintb_negate(out-1,sizein); return res; } uintb OpBehaviorIntNegate::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { uintb res = uintb_negate(in1,sizein); return res; } uintb OpBehaviorIntNegate::recoverInputUnary(int4 sizeout,uintb out,int4 sizein) const { uintb res = uintb_negate(out,sizein); return res; } uintb OpBehaviorIntXor::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = in1 ^ in2; return res; } uintb OpBehaviorIntAnd::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = in1 & in2; return res; } uintb OpBehaviorIntOr::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = in1 | in2; return res; } uintb OpBehaviorIntLeft::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { if (in2 >= sizeout*8){ return 0; } uintb res = (in1 << in2) & calc_mask(sizeout); return res; } uintb OpBehaviorIntLeft::recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) const { if ((slot!=0) || (in >= sizeout*8)) return OpBehavior::recoverInputBinary(slot,sizeout,out,sizein,in); int4 sa = in; if (((out<<(8*sizeout-sa))&calc_mask(sizeout))!=0) throw EvaluationError("Output is not in range of left shift operation"); return out >> sa; } uintb OpBehaviorIntRight::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { if (in2 >= sizeout*8){ return 0; } uintb res = (in1&calc_mask(sizeout)) >> in2; return res; } uintb OpBehaviorIntRight::recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) const { if ((slot!=0) || (in >= sizeout*8)) return OpBehavior::recoverInputBinary(slot,sizeout,out,sizein,in); int4 sa = in; if ((out>>(8*sizein-sa))!=0) throw EvaluationError("Output is not in range of right shift operation"); return out << sa; } uintb OpBehaviorIntSright::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { if (in2 >= 8*sizeout){ return signbit_negative(in1,sizein) ? calc_mask(sizeout) : 0; } uintb res; if (signbit_negative(in1,sizein)) { res = in1 >> in2; uintb mask = calc_mask(sizein); mask = (mask >> in2) ^ mask; res |= mask; } else { res = in1 >> in2; } return res; } uintb OpBehaviorIntSright::recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) const { if ((slot!=0) || (in >= sizeout*8)) return OpBehavior::recoverInputBinary(slot,sizeout,out,sizein,in); int4 sa = in; uintb testval = out>>(sizein*8-sa-1); int4 count=0; for(int4 i=0;i<=sa;++i) { if ((testval&1)!=0) count += 1; testval >>= 1; } if (count != sa+1) throw EvaluationError("Output is not in range of right shift operation"); return out<getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateBinary(sizeout,sizein,in1,in2); return format->opEqual(in1,in2); } uintb OpBehaviorFloatNotEqual::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateBinary(sizeout,sizein,in1,in2); return format->opNotEqual(in1,in2); } uintb OpBehaviorFloatLess::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateBinary(sizeout,sizein,in1,in2); return format->opLess(in1,in2); } uintb OpBehaviorFloatLessEqual::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateBinary(sizeout,sizein,in1,in2); return format->opLessEqual(in1,in2); } uintb OpBehaviorFloatNan::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateUnary(sizeout,sizein,in1); return format->opNan(in1); } uintb OpBehaviorFloatAdd::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateBinary(sizeout,sizein,in1,in2); return format->opAdd(in1,in2); } uintb OpBehaviorFloatDiv::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateBinary(sizeout,sizein,in1,in2); return format->opDiv(in1,in2); } uintb OpBehaviorFloatMult::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateBinary(sizeout,sizein,in1,in2); return format->opMult(in1,in2); } uintb OpBehaviorFloatSub::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateBinary(sizeout,sizein,in1,in2); return format->opSub(in1,in2); } uintb OpBehaviorFloatNeg::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateUnary(sizeout,sizein,in1); return format->opNeg(in1); } uintb OpBehaviorFloatAbs::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateUnary(sizeout,sizein,in1); return format->opAbs(in1); } uintb OpBehaviorFloatSqrt::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateUnary(sizeout,sizein,in1); return format->opSqrt(in1); } uintb OpBehaviorFloatInt2Float::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { const FloatFormat *format = translate->getFloatFormat(sizeout); if (format == (const FloatFormat *)0) return OpBehavior::evaluateUnary(sizeout,sizein,in1); return format->opInt2Float(in1,sizein); } uintb OpBehaviorFloatFloat2Float::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { const FloatFormat *formatout = translate->getFloatFormat(sizeout); if (formatout == (const FloatFormat *)0) return OpBehavior::evaluateUnary(sizeout,sizein,in1); const FloatFormat *formatin = translate->getFloatFormat(sizein); if (formatin == (const FloatFormat *)0) return OpBehavior::evaluateUnary(sizeout,sizein,in1); return formatin->opFloat2Float(in1,*formatout); } uintb OpBehaviorFloatTrunc::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateUnary(sizeout,sizein,in1); return format->opTrunc(in1,sizeout); } uintb OpBehaviorFloatCeil::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateUnary(sizeout,sizein,in1); return format->opCeil(in1); } uintb OpBehaviorFloatFloor::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateUnary(sizeout,sizein,in1); return format->opFloor(in1); } uintb OpBehaviorFloatRound::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { const FloatFormat *format = translate->getFloatFormat(sizein); if (format == (const FloatFormat *)0) return OpBehavior::evaluateUnary(sizeout,sizein,in1); return format->opRound(in1); } uintb OpBehaviorPiece::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = ( in1<<((sizeout-sizein)*8)) | in2; return res; } uintb OpBehaviorSubpiece::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { if (in2 >= sizeof(uintb)) return 0; uintb res = (in1>>(in2*8)) & calc_mask(sizeout); return res; } uintb OpBehaviorPtradd::evaluateTernary(int4 sizeout,int4 sizein,uintb in1,uintb in2,uintb in3) const { uintb res = (in1 + in2 * in3) & calc_mask(sizeout); return res; } uintb OpBehaviorPtrsub::evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const { uintb res = (in1 + in2) & calc_mask(sizeout); return res; } uintb OpBehaviorPopcount::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { return (uintb)popcount(in1); } uintb OpBehaviorLzcount::evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const { return (uintb)(count_leading_zeros(in1) - 8*(sizeof(uintb) - sizein)); } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/opbehavior.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file opbehavior.hh /// \brief Classes for describing the behavior of individual p-code operations #ifndef __OPBEHAVIOR_HH__ #define __OPBEHAVIOR_HH__ #include "error.hh" #include "opcodes.hh" namespace ghidra { class Translate; // Forward declaration /// This exception is thrown when emulation evaluation of an operator fails for some reason. /// This can be thrown for either forward or reverse emulation struct EvaluationError : public LowlevelError { EvaluationError(const string &s) : LowlevelError(s) {} ///< Initialize the error with an explanatory string }; /// \brief Class encapsulating the action/behavior of specific pcode opcodes /// /// At the lowest level, a pcode op is one of a small set of opcodes that /// operate on varnodes (address space, offset, size). Classes derived from /// this base class encapsulate this basic behavior for each possible opcode. /// These classes describe the most basic behaviors and include: /// * uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb int2) /// * uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) /// * uintb recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) /// * uintb recoverInputUnary(int4 sizeout,uintb out,int4 sizein) class OpBehavior { OpCode opcode; ///< the internal enumeration for pcode types bool isunary; ///< true= use unary interfaces, false = use binary bool isspecial; ///< Is op not a normal unary or binary op public: OpBehavior(OpCode opc,bool isun); ///< A behavior constructor OpBehavior(OpCode opc,bool isun,bool isspec); ///< A special behavior constructor virtual ~OpBehavior(void) {} /// \brief Get the opcode for this pcode operation OpCode getOpcode(void) const; /// \brief Check if this is a special operator bool isSpecial(void) const; /// \brief Check if operator is unary bool isUnary(void) const; /// \brief Emulate the unary op-code on an input value virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; /// \brief Emulate the binary op-code on input values virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; /// \brief Emulate the ternary op-code on input values virtual uintb evaluateTernary(int4 sizeout,int4 sizein,uintb in1,uintb in2,uintb in3) const; /// \brief Reverse the binary op-code operation, recovering an input value virtual uintb recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) const; /// \brief Reverse the unary op-code operation, recovering the input value virtual uintb recoverInputUnary(int4 sizeout,uintb out,int4 sizein) const; static void registerInstructions(vector &inst,const Translate *trans); ///< Build all pcode behaviors }; /// This kind of OpBehavior is associated with a particular opcode and is either unary or binary /// \param opc is the opcode of the behavior /// \param isun is \b true if the behavior is unary, \b false if binary inline OpBehavior::OpBehavior(OpCode opc,bool isun) { opcode = opc; isunary = isun; isspecial = false; } /// This kind of OpBehavior can be set to \b special, if it neither unary or binary. /// \param opc is the opcode of the behavior /// \param isun is \b true if the behavior is unary /// \param isspec is \b true if the behavior is neither unary or binary inline OpBehavior::OpBehavior(OpCode opc,bool isun,bool isspec) { opcode = opc; isunary = isun; isspecial = isspec; } /// There is an internal enumeration value for each type of pcode operation. /// This routine returns that value. /// \return the opcode value inline OpCode OpBehavior::getOpcode(void) const { return opcode; } /// If this function returns false, the operation is a normal unary or binary operation /// which can be evaluated calling evaluateBinary() or evaluateUnary(). /// Otherwise, the operation requires special handling to emulate properly inline bool OpBehavior::isSpecial(void) const { return isspecial; } /// The operated can either be evaluated as unary or binary /// \return \b true if the operator is unary inline bool OpBehavior::isUnary(void) const { return isunary; } // A class for each opcode /// CPUI_COPY behavior class OpBehaviorCopy : public OpBehavior { public: OpBehaviorCopy(void) : OpBehavior(CPUI_COPY,true) {} ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; virtual uintb recoverInputUnary(int4 sizeout,uintb out,int4 sizein) const; }; /// CPUI_INT_EQUAL behavior class OpBehaviorEqual : public OpBehavior { public: OpBehaviorEqual(void) : OpBehavior(CPUI_INT_EQUAL,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_NOTEQUAL behavior class OpBehaviorNotEqual : public OpBehavior { public: OpBehaviorNotEqual(void) : OpBehavior(CPUI_INT_NOTEQUAL,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_SLESS behavior class OpBehaviorIntSless : public OpBehavior { public: OpBehaviorIntSless(void) : OpBehavior(CPUI_INT_SLESS,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_SLESSEQUAL behavior class OpBehaviorIntSlessEqual : public OpBehavior { public: OpBehaviorIntSlessEqual(void) : OpBehavior(CPUI_INT_SLESSEQUAL,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_LESS behavior class OpBehaviorIntLess : public OpBehavior { public: OpBehaviorIntLess(void) : OpBehavior(CPUI_INT_LESS,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_LESSEQUAL behavior class OpBehaviorIntLessEqual : public OpBehavior { public: OpBehaviorIntLessEqual(void): OpBehavior(CPUI_INT_LESSEQUAL,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_ZEXT behavior class OpBehaviorIntZext : public OpBehavior { public: OpBehaviorIntZext(void): OpBehavior(CPUI_INT_ZEXT,true) {} ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; virtual uintb recoverInputUnary(int4 sizeout,uintb out,int4 sizein) const; }; /// CPUI_INT_SEXT behavior class OpBehaviorIntSext : public OpBehavior { public: OpBehaviorIntSext(void): OpBehavior(CPUI_INT_SEXT,true) {} ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; virtual uintb recoverInputUnary(int4 sizeout,uintb out,int4 sizein) const; }; /// CPUI_INT_ADD behavior class OpBehaviorIntAdd : public OpBehavior { public: OpBehaviorIntAdd(void): OpBehavior(CPUI_INT_ADD,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; virtual uintb recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) const; }; /// CPUI_INT_SUB behavior class OpBehaviorIntSub : public OpBehavior { public: OpBehaviorIntSub(void): OpBehavior(CPUI_INT_SUB,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; virtual uintb recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) const; }; /// CPUI_INT_CARRY behavior class OpBehaviorIntCarry : public OpBehavior { public: OpBehaviorIntCarry(void): OpBehavior(CPUI_INT_CARRY,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_SCARRY behavior class OpBehaviorIntScarry : public OpBehavior { public: OpBehaviorIntScarry(void): OpBehavior(CPUI_INT_SCARRY,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_SBORROW behavior class OpBehaviorIntSborrow : public OpBehavior { public: OpBehaviorIntSborrow(void): OpBehavior(CPUI_INT_SBORROW,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_2COMP behavior class OpBehaviorInt2Comp : public OpBehavior { public: OpBehaviorInt2Comp(void): OpBehavior(CPUI_INT_2COMP,true) {} ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; virtual uintb recoverInputUnary(int4 sizeout,uintb out,int4 sizein) const; }; /// CPUI_INT_NEGATE behavior class OpBehaviorIntNegate : public OpBehavior { public: OpBehaviorIntNegate(void): OpBehavior(CPUI_INT_NEGATE,true) {} ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; virtual uintb recoverInputUnary(int4 sizeout,uintb out,int4 sizein) const; }; /// CPUI_INT_XOR behavior class OpBehaviorIntXor : public OpBehavior { public: OpBehaviorIntXor(void): OpBehavior(CPUI_INT_XOR,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_AND behavior class OpBehaviorIntAnd : public OpBehavior { public: OpBehaviorIntAnd(void): OpBehavior(CPUI_INT_AND,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_OR behavior class OpBehaviorIntOr : public OpBehavior { public: OpBehaviorIntOr(void): OpBehavior(CPUI_INT_OR,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_LEFT behavior class OpBehaviorIntLeft : public OpBehavior { public: OpBehaviorIntLeft(void): OpBehavior(CPUI_INT_LEFT,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; virtual uintb recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) const; }; /// CPUI_INT_RIGHT behavior class OpBehaviorIntRight : public OpBehavior { public: OpBehaviorIntRight(void): OpBehavior(CPUI_INT_RIGHT,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; virtual uintb recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) const; }; /// CPUI_INT_SRIGHT behavior class OpBehaviorIntSright : public OpBehavior { public: OpBehaviorIntSright(void): OpBehavior(CPUI_INT_SRIGHT,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; virtual uintb recoverInputBinary(int4 slot,int4 sizeout,uintb out,int4 sizein,uintb in) const; }; /// CPUI_INT_MULT behavior class OpBehaviorIntMult : public OpBehavior { public: OpBehaviorIntMult(void): OpBehavior(CPUI_INT_MULT,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_DIV behavior class OpBehaviorIntDiv : public OpBehavior { public: OpBehaviorIntDiv(void): OpBehavior(CPUI_INT_DIV,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_SDIV behavior class OpBehaviorIntSdiv : public OpBehavior { public: OpBehaviorIntSdiv(void): OpBehavior(CPUI_INT_SDIV,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_REM behavior class OpBehaviorIntRem : public OpBehavior { public: OpBehaviorIntRem(void): OpBehavior(CPUI_INT_REM,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_INT_SREM behavior class OpBehaviorIntSrem : public OpBehavior { public: OpBehaviorIntSrem(void): OpBehavior(CPUI_INT_SREM,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_BOOL_NEGATE behavior class OpBehaviorBoolNegate : public OpBehavior { public: OpBehaviorBoolNegate(void): OpBehavior(CPUI_BOOL_NEGATE,true) {} ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; /// CPUI_BOOL_XOR behavior class OpBehaviorBoolXor : public OpBehavior { public: OpBehaviorBoolXor(void): OpBehavior(CPUI_BOOL_XOR,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_BOOL_AND behavior class OpBehaviorBoolAnd : public OpBehavior { public: OpBehaviorBoolAnd(void): OpBehavior(CPUI_BOOL_AND,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_BOOL_OR behavior class OpBehaviorBoolOr : public OpBehavior { public: OpBehaviorBoolOr(void): OpBehavior(CPUI_BOOL_OR,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_FLOAT_EQUAL behavior class OpBehaviorFloatEqual : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatEqual(const Translate *trans): OpBehavior(CPUI_FLOAT_EQUAL,false) { translate = trans; } ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_FLOAT_NOTEQUAL behavior class OpBehaviorFloatNotEqual : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatNotEqual(const Translate *trans): OpBehavior(CPUI_FLOAT_NOTEQUAL,false) { translate = trans; } ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_FLOAT_LESS behavior class OpBehaviorFloatLess : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatLess(const Translate *trans) : OpBehavior(CPUI_FLOAT_LESS,false) { translate = trans; } ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_FLOAT_LESSEQUAL behavior class OpBehaviorFloatLessEqual : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatLessEqual(const Translate *trans) : OpBehavior(CPUI_FLOAT_LESSEQUAL,false) { translate = trans; } ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_FLOAT_NAN behavior class OpBehaviorFloatNan : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatNan(const Translate *trans) : OpBehavior(CPUI_FLOAT_NAN,true) { translate = trans; } ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; /// CPUI_FLOAT_ADD behavior class OpBehaviorFloatAdd : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatAdd(const Translate *trans) : OpBehavior(CPUI_FLOAT_ADD,false) { translate = trans; } ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_FLOAT_DIV behavior class OpBehaviorFloatDiv : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatDiv(const Translate *trans) : OpBehavior(CPUI_FLOAT_DIV,false) { translate = trans; } ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_FLOAT_MULT behavior class OpBehaviorFloatMult : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatMult(const Translate *trans) : OpBehavior(CPUI_FLOAT_MULT,false) { translate = trans; } ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_FLOAT_SUB behavior class OpBehaviorFloatSub : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatSub(const Translate *trans) : OpBehavior(CPUI_FLOAT_SUB,false) { translate = trans; } ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_FLOAT_NEG behavior class OpBehaviorFloatNeg : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatNeg(const Translate *trans) : OpBehavior(CPUI_FLOAT_NEG,true) { translate = trans; } ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; /// CPUI_FLOAT_ABS behavior class OpBehaviorFloatAbs : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatAbs(const Translate *trans) : OpBehavior(CPUI_FLOAT_ABS,true) { translate = trans; } ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; /// CPUI_FLOAT_SQRT behavior class OpBehaviorFloatSqrt : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatSqrt(const Translate *trans) : OpBehavior(CPUI_FLOAT_SQRT,true) { translate = trans; } ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; /// CPUI_FLOAT_INT2FLOAT behavior class OpBehaviorFloatInt2Float : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatInt2Float(const Translate *trans) : OpBehavior(CPUI_FLOAT_INT2FLOAT,true) { translate = trans; } ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; /// CPUI_FLOAT_FLOAT2FLOAT behavior class OpBehaviorFloatFloat2Float : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatFloat2Float(const Translate *trans) : OpBehavior(CPUI_FLOAT_FLOAT2FLOAT,true) { translate = trans; } ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; /// CPUI_FLOAT_TRUNC behavior class OpBehaviorFloatTrunc : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatTrunc(const Translate *trans) : OpBehavior(CPUI_FLOAT_TRUNC,true) { translate = trans; } ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; /// CPUI_FLOAT_CEIL behavior class OpBehaviorFloatCeil : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatCeil(const Translate *trans) : OpBehavior(CPUI_FLOAT_CEIL,true) { translate = trans; } ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; /// CPUI_FLOAT_FLOOR behavior class OpBehaviorFloatFloor : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatFloor(const Translate *trans) : OpBehavior(CPUI_FLOAT_FLOOR,true) { translate = trans; } ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; /// CPUI_FLOAT_ROUND behavior class OpBehaviorFloatRound : public OpBehavior { const Translate *translate; ///< Translate object for recovering float format public: OpBehaviorFloatRound(const Translate *trans) : OpBehavior(CPUI_FLOAT_ROUND,true) { translate = trans; } ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; /// CPUI_PIECE behavior class OpBehaviorPiece : public OpBehavior { public: OpBehaviorPiece(void) : OpBehavior(CPUI_PIECE,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_SUBPIECE behavior class OpBehaviorSubpiece : public OpBehavior { public: OpBehaviorSubpiece(void) : OpBehavior(CPUI_SUBPIECE,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_PTRADD behavior class OpBehaviorPtradd : public OpBehavior { public: OpBehaviorPtradd(void) : OpBehavior(CPUI_PTRADD,false) {} ///< Constructor virtual uintb evaluateTernary(int4 sizeout,int4 sizein,uintb in1,uintb in2,uintb in3) const; }; /// CPUI_PTRSUB behavior class OpBehaviorPtrsub : public OpBehavior { public: OpBehaviorPtrsub(void) : OpBehavior(CPUI_PTRSUB,false) {} ///< Constructor virtual uintb evaluateBinary(int4 sizeout,int4 sizein,uintb in1,uintb in2) const; }; /// CPUI_POPCOUNT behavior class OpBehaviorPopcount : public OpBehavior { public: OpBehaviorPopcount(void) : OpBehavior(CPUI_POPCOUNT,true) {} ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; /// CPUI_LZCOUNT behavior class OpBehaviorLzcount : public OpBehavior { public: OpBehaviorLzcount(void) : OpBehavior(CPUI_LZCOUNT,true) {} ///< Constructor virtual uintb evaluateUnary(int4 sizeout,int4 sizein,uintb in1) const; }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/opcodes.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "opcodes.hh" #include "types.h" namespace ghidra { /// \brief Names of operations associated with their opcode number /// /// Some of the names have been replaced with special placeholder /// ops for the sleigh compiler and interpreter these are as follows: /// - MULTIEQUAL = BUILD /// - INDIRECT = DELAY_SLOT /// - PTRADD = LABEL /// - PTRSUB = CROSSBUILD static const char *opcode_name[] = { "BLANK", "COPY", "LOAD", "STORE", "BRANCH", "CBRANCH", "BRANCHIND", "CALL", "CALLIND", "CALLOTHER", "RETURN", "INT_EQUAL", "INT_NOTEQUAL", "INT_SLESS", "INT_SLESSEQUAL", "INT_LESS", "INT_LESSEQUAL", "INT_ZEXT", "INT_SEXT", "INT_ADD", "INT_SUB", "INT_CARRY", "INT_SCARRY", "INT_SBORROW", "INT_2COMP", "INT_NEGATE", "INT_XOR", "INT_AND", "INT_OR", "INT_LEFT", "INT_RIGHT", "INT_SRIGHT", "INT_MULT", "INT_DIV", "INT_SDIV", "INT_REM", "INT_SREM", "BOOL_NEGATE", "BOOL_XOR", "BOOL_AND", "BOOL_OR", "FLOAT_EQUAL", "FLOAT_NOTEQUAL", "FLOAT_LESS", "FLOAT_LESSEQUAL", "UNUSED1", "FLOAT_NAN", "FLOAT_ADD", "FLOAT_DIV", "FLOAT_MULT", "FLOAT_SUB", "FLOAT_NEG", "FLOAT_ABS", "FLOAT_SQRT", "INT2FLOAT", "FLOAT2FLOAT", "TRUNC", "CEIL", "FLOOR", "ROUND", "BUILD", "DELAY_SLOT", "PIECE", "SUBPIECE", "CAST", "LABEL", "CROSSBUILD", "SEGMENTOP", "CPOOLREF", "NEW", "INSERT", "EXTRACT", "POPCOUNT", "LZCOUNT" }; static const int4 opcode_indices[] = { 0, 39, 37, 40, 38, 4, 6, 60, 7, 8, 9, 64, 5, 57, 1, 68, 66, 61, 71, 55, 52, 47, 48, 41, 43, 44, 49, 46, 51, 42, 53, 50, 58, 70, 54, 24, 19, 27, 21, 33, 11, 29, 15, 16, 32, 25, 12, 28, 35, 30, 23, 22, 34, 18, 13, 14, 36, 31, 20, 26, 17, 65, 2, 73, 69, 62, 72, 10, 59, 67, 3, 63, 56, 45 }; /// \param opc is an OpCode value /// \return the name of the operation as a string const char *get_opname(OpCode opc) { return opcode_name[opc]; } /// \param nm is the name of an operation /// \return the corresponding OpCode value OpCode get_opcode(const string &nm) { int4 min = 1; // Don't include BLANK int4 max = CPUI_MAX-1; int4 cur,ind; while(min <= max) { // Binary search cur = (min + max)/2; ind = opcode_indices[cur]; // Get opcode in cur's sort slot if (opcode_name[ind] < nm) min = cur + 1; // Everything equal or below cur is less else if (opcode_name[ind] > nm) max = cur - 1; // Everything equal or above cur is greater else return (OpCode)ind; // Found the match } return (OpCode)0; // Name isn't an op } /// Every comparison operation has a complementary form that produces /// the opposite output on the same inputs. Set \b reorder to true if /// the complimentary operation involves reordering the input parameters. /// \param opc is the OpCode to complement /// \param reorder is set to \b true if the inputs need to be reordered /// \return the complementary OpCode or CPUI_MAX if not given a comparison operation OpCode get_booleanflip(OpCode opc,bool &reorder) { switch(opc) { case CPUI_INT_EQUAL: reorder = false; return CPUI_INT_NOTEQUAL; case CPUI_INT_NOTEQUAL: reorder = false; return CPUI_INT_EQUAL; case CPUI_INT_SLESS: reorder = true; return CPUI_INT_SLESSEQUAL; case CPUI_INT_SLESSEQUAL: reorder = true; return CPUI_INT_SLESS; case CPUI_INT_LESS: reorder = true; return CPUI_INT_LESSEQUAL; case CPUI_INT_LESSEQUAL: reorder = true; return CPUI_INT_LESS; case CPUI_BOOL_NEGATE: reorder = false; return CPUI_COPY; case CPUI_FLOAT_EQUAL: reorder = false; return CPUI_FLOAT_NOTEQUAL; case CPUI_FLOAT_NOTEQUAL: reorder = false; return CPUI_FLOAT_EQUAL; case CPUI_FLOAT_LESS: reorder = true; return CPUI_FLOAT_LESSEQUAL; case CPUI_FLOAT_LESSEQUAL: reorder = true; return CPUI_FLOAT_LESS; default: break; } return CPUI_MAX; } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/opcodes.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file opcodes.hh /// \brief All the individual p-code operations #ifndef __OPCODES_HH__ #define __OPCODES_HH__ #include namespace ghidra { using std::string; /// \brief The op-code defining a specific p-code operation (PcodeOp) /// /// These break up into categories: /// - Branching operations /// - Load and Store /// - Comparison operations /// - Arithmetic operations /// - Logical operations /// - Extension and truncation operations enum OpCode { CPUI_IMARK = 0, CPUI_COPY = 1, ///< Copy one operand to another CPUI_LOAD = 2, ///< Load from a pointer into a specified address space CPUI_STORE = 3, ///< Store at a pointer into a specified address space CPUI_BRANCH = 4, ///< Always branch CPUI_CBRANCH = 5, ///< Conditional branch CPUI_BRANCHIND = 6, ///< Indirect branch (jumptable) CPUI_CALL = 7, ///< Call to an absolute address CPUI_CALLIND = 8, ///< Call through an indirect address CPUI_CALLOTHER = 9, ///< User-defined operation CPUI_RETURN = 10, ///< Return from subroutine // Integer/bit operations CPUI_INT_EQUAL = 11, ///< Integer comparison, equality (==) CPUI_INT_NOTEQUAL = 12, ///< Integer comparison, in-equality (!=) CPUI_INT_SLESS = 13, ///< Integer comparison, signed less-than (<) CPUI_INT_SLESSEQUAL = 14, ///< Integer comparison, signed less-than-or-equal (<=) CPUI_INT_LESS = 15, ///< Integer comparison, unsigned less-than (<) // This also indicates a borrow on unsigned substraction CPUI_INT_LESSEQUAL = 16, ///< Integer comparison, unsigned less-than-or-equal (<=) CPUI_INT_ZEXT = 17, ///< Zero extension CPUI_INT_SEXT = 18, ///< Sign extension CPUI_INT_ADD = 19, ///< Addition, signed or unsigned (+) CPUI_INT_SUB = 20, ///< Subtraction, signed or unsigned (-) CPUI_INT_CARRY = 21, ///< Test for unsigned carry CPUI_INT_SCARRY = 22, ///< Test for signed carry CPUI_INT_SBORROW = 23, ///< Test for signed borrow CPUI_INT_2COMP = 24, ///< Twos complement CPUI_INT_NEGATE = 25, ///< Logical/bitwise negation (~) CPUI_INT_XOR = 26, ///< Logical/bitwise exclusive-or (^) CPUI_INT_AND = 27, ///< Logical/bitwise and (&) CPUI_INT_OR = 28, ///< Logical/bitwise or (|) CPUI_INT_LEFT = 29, ///< Left shift (<<) CPUI_INT_RIGHT = 30, ///< Right shift, logical (>>) CPUI_INT_SRIGHT = 31, ///< Right shift, arithmetic (>>) CPUI_INT_MULT = 32, ///< Integer multiplication, signed and unsigned (*) CPUI_INT_DIV = 33, ///< Integer division, unsigned (/) CPUI_INT_SDIV = 34, ///< Integer division, signed (/) CPUI_INT_REM = 35, ///< Remainder/modulo, unsigned (%) CPUI_INT_SREM = 36, ///< Remainder/modulo, signed (%) CPUI_BOOL_NEGATE = 37, ///< Boolean negate (!) CPUI_BOOL_XOR = 38, ///< Boolean exclusive-or (^^) CPUI_BOOL_AND = 39, ///< Boolean and (&&) CPUI_BOOL_OR = 40, ///< Boolean or (||) // Floating point operations CPUI_FLOAT_EQUAL = 41, ///< Floating-point comparison, equality (==) CPUI_FLOAT_NOTEQUAL = 42, ///< Floating-point comparison, in-equality (!=) CPUI_FLOAT_LESS = 43, ///< Floating-point comparison, less-than (<) CPUI_FLOAT_LESSEQUAL = 44, ///< Floating-point comparison, less-than-or-equal (<=) // Slot 45 is currently unused CPUI_FLOAT_NAN = 46, ///< Not-a-number test (NaN) CPUI_FLOAT_ADD = 47, ///< Floating-point addition (+) CPUI_FLOAT_DIV = 48, ///< Floating-point division (/) CPUI_FLOAT_MULT = 49, ///< Floating-point multiplication (*) CPUI_FLOAT_SUB = 50, ///< Floating-point subtraction (-) CPUI_FLOAT_NEG = 51, ///< Floating-point negation (-) CPUI_FLOAT_ABS = 52, ///< Floating-point absolute value (abs) CPUI_FLOAT_SQRT = 53, ///< Floating-point square root (sqrt) CPUI_FLOAT_INT2FLOAT = 54, ///< Convert an integer to a floating-point CPUI_FLOAT_FLOAT2FLOAT = 55, ///< Convert between different floating-point sizes CPUI_FLOAT_TRUNC = 56, ///< Round towards zero CPUI_FLOAT_CEIL = 57, ///< Round towards +infinity CPUI_FLOAT_FLOOR = 58, ///< Round towards -infinity CPUI_FLOAT_ROUND = 59, ///< Round towards nearest // Internal opcodes for simplification. Not // typically generated in a direct translation. // Data-flow operations CPUI_MULTIEQUAL = 60, ///< Phi-node operator CPUI_INDIRECT = 61, ///< Copy with an indirect effect CPUI_PIECE = 62, ///< Concatenate CPUI_SUBPIECE = 63, ///< Truncate CPUI_CAST = 64, ///< Cast from one data-type to another CPUI_PTRADD = 65, ///< Index into an array ([]) CPUI_PTRSUB = 66, ///< Drill down to a sub-field (->) CPUI_SEGMENTOP = 67, ///< Look-up a \e segmented address CPUI_CPOOLREF = 68, ///< Recover a value from the \e constant \e pool CPUI_NEW = 69, ///< Allocate a new object (new) CPUI_INSERT = 70, ///< Insert a bit-range CPUI_EXTRACT = 71, ///< Extract a bit-range CPUI_POPCOUNT = 72, ///< Count the 1-bits CPUI_LZCOUNT = 73, ///< Count the leading 0-bits CPUI_MAX = 74 ///< Value indicating the end of the op-code values }; extern const char *get_opname(OpCode opc); ///< Convert an OpCode to the name as a string extern OpCode get_opcode(const string &nm); ///< Convert a name string to the matching OpCode extern OpCode get_booleanflip(OpCode opc,bool &reorder); ///< Get the complementary OpCode } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/partmap.hh ================================================ /* ### * IP: GHIDRA * NOTE: very generic partition container * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file partmap.hh /// \brief The partmap<> template mapping a linear space to value objects #ifndef __PARTMAP_HH__ #define __PARTMAP_HH__ #include namespace ghidra { /// \brief A map from a linear space to value objects /// /// The partmap is a template class taking: /// - _linetype which represents an element in the linear space /// - _valuetype which are the objects that linear space maps to /// /// Let R be the linear space with an ordering, and let { a_i } be a finite set /// of points in R. /// The a_i partition R into a finite number of disjoint sets /// { x : x < a_0 }, { x : x>=a_0 && x < a_1 }, ... /// { x : x>=a_i && x < a_i+1 }, ... /// { x : x>=a_n } /// /// A partmap maps elements of this partition to _valuetype objects /// A _valuetype is then associated with any element x in R by /// looking up the value associated with the partition element /// containing x. /// /// The map is defined by starting with a \e default value object that applies /// to the whole linear space. Then \e split points are introduced, one at a time, /// in the linear space. At each split point, the associated value object is split /// into two objects. At any point the value object describing some part of the linear space /// can be changed. template class partmap { public: typedef std::map<_linetype,_valuetype> maptype; ///< Defining the map from split points to value objects typedef typename maptype::iterator iterator; ///< A partmap iterator is an iterator into the map typedef typename maptype::const_iterator const_iterator; ///< A constant iterator private: maptype database; ///< Map from linear split points to the value objects _valuetype defaultvalue; ///< The value object \e before the first split point public: _valuetype &getValue(const _linetype &pnt); ///< Get the value object at a point const _valuetype &getValue(const _linetype &pnt) const; ///< Get the value object at a point const _valuetype &bounds(const _linetype &pnt,_linetype &before,_linetype &after,int &valid) const; _valuetype &split(const _linetype &pnt); ///< Introduce a new split point const _valuetype &defaultValue(void) const { return defaultvalue; } ///< Get the default value object _valuetype &defaultValue(void) { return defaultvalue; } ///< Get the default value object _valuetype & clearRange(const _linetype &pnt1,const _linetype &pnt2); ///< Clear a range of split points const_iterator begin(void) const { return database.begin(); } ///< Beginning of split points const_iterator end(void) const { return database.end(); } ///< End of split points iterator begin(void) { return database.begin(); } ///< Beginning of split points iterator end(void) { return database.end(); } ///< End of split points const_iterator begin(const _linetype &pnt) const { return database.lower_bound(pnt); } ///< Get first split point after given point iterator begin(const _linetype &pnt) { return database.lower_bound(pnt); } ///< Get first split point after given point void clear(void) { database.clear(); } ///< Clear all split points bool empty(void) const { return database.empty(); } ///< Return \b true if there are no split points }; /// Look up the first split point coming before the given point /// and return the value object it maps to. If there is no earlier split point /// return the default value. /// \param pnt is the given point in the linear space /// \return the corresponding value object template _valuetype &partmap<_linetype,_valuetype>:: getValue(const _linetype &pnt) { iterator iter; iter = database.upper_bound(pnt); if (iter == database.begin()) return defaultvalue; --iter; return (*iter).second; } /// Look up the first split point coming before the given point /// and return the value object it maps to. If there is no earlier split point /// return the default value. /// \param pnt is the given point in the linear space /// \return the corresponding value object template const _valuetype &partmap<_linetype,_valuetype>:: getValue(const _linetype &pnt) const { const_iterator iter; iter = database.upper_bound(pnt); if (iter == database.begin()) return defaultvalue; --iter; return (*iter).second; } /// Add (if not already present) a point to the linear partition. /// \param pnt is the (new) point /// \return the (possibly) new value object for the range starting at the point template _valuetype &partmap<_linetype,_valuetype>:: split(const _linetype &pnt) { iterator iter; iter = database.upper_bound(pnt); if (iter != database.begin()) { --iter; if ((*iter).first == pnt) // point matches exactly return (*iter).second; // Return old ref _valuetype &newref( database[pnt] ); // Create new ref at point newref = (*iter).second; // Copy of original partition value return newref; } _valuetype &newref( database[pnt] ); // Create new ref at point newref = defaultvalue; // Copy of defaultvalue return newref; } /// Split points are introduced at the two boundary points of the given range, /// and all split points in between are removed. The value object that was initially /// present at the left-most boundary point becomes the value (as a copy) for the whole range. /// \param pnt1 is the left-most boundary point of the range /// \param pnt2 is the right-most boundary point /// \return the value object assigned to the range template _valuetype &partmap<_linetype,_valuetype>:: clearRange(const _linetype &pnt1,const _linetype &pnt2) { split(pnt1); split(pnt2); iterator beg = begin(pnt1); iterator end = begin(pnt2); _valuetype &ref( (*beg).second ); ++beg; database.erase(beg,end); return ref; } /// \brief Get the value object for a given point and return the range over which the value object applies /// /// Pass back a \b before and \b after point defining the maximal range over which the value applies. /// An additional validity code is passed back describing which of the bounding points apply: /// - 0 if both bounds apply /// - 1 if there is no lower bound /// - 2 if there is no upper bound, /// - 3 if there is neither a lower or upper bound /// \param pnt is the given point around which to compute the range /// \param before is a reference to the passed back lower bound /// \param after is a reference to the passed back upper bound /// \param valid is a reference to the passed back validity code /// \return the corresponding value object template const _valuetype &partmap<_linetype,_valuetype>:: bounds(const _linetype &pnt,_linetype &before,_linetype &after,int &valid) const { if (database.empty()) { valid = 3; return defaultvalue; } const_iterator iter,enditer; enditer = database.upper_bound(pnt); if (enditer != database.begin()) { iter = enditer; --iter; before = (*iter).first; if (enditer == database.end()) valid = 2; // No upperbound else { after = (*enditer).first; valid = 0; // Fully bounded } return (*iter).second; } valid = 1; // No lowerbound after = (*enditer).first; return defaultvalue; } } // End namespace ghidra #endif #if 0 #include using std::cout; int main(int argc,char **argv) { partmap data; data.defaultValue() = 0; data.split(5) = 5; data.split(2) = 2; data.split(3) = 4; data.split(3) = 3; cout << data.getValue(6) << endl; cout << data.getValue(8) << endl; cout << data.getValue(4) << endl; cout << data.getValue(1) << endl; partmap::const_iterator iter; iter = data.begin(3); while(iter!=data.end()) { cout << (*iter).second << endl; ++iter; } } #endif ================================================ FILE: pypcode/sleigh/pcodecompile.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "pcodecompile.hh" namespace ghidra { string Location::format(void) const { ostringstream s; s << filename << ":" << dec << lineno; return s.str(); } ExprTree::ExprTree(VarnodeTpl *vn) { outvn = vn; ops = new vector; } ExprTree::ExprTree(OpTpl *op) { ops = new vector; ops->push_back(op); if (op->getOut() != (VarnodeTpl *)0) outvn = new VarnodeTpl(*op->getOut()); else outvn = (VarnodeTpl *)0; } ExprTree::~ExprTree(void) { if (outvn != (VarnodeTpl *)0) delete outvn; if (ops != (vector *)0) { for(int4 i=0;isize();++i) delete (*ops)[i]; delete ops; } } vector *ExprTree::appendParams(OpTpl *op,vector *param) { // Create op expression with entire list of expression // inputs vector *res = new vector; for(int4 i=0;isize();++i) { res->insert(res->end(),(*param)[i]->ops->begin(),(*param)[i]->ops->end()); (*param)[i]->ops->clear(); op->addInput((*param)[i]->outvn); (*param)[i]->outvn = (VarnodeTpl *)0; delete (*param)[i]; } res->push_back(op); delete param; return res; } vector *ExprTree::toVector(ExprTree *expr) { // Grab the op vector and delete the output expression vector *res = expr->ops; expr->ops = (vector *)0; delete expr; return res; } void ExprTree::setOutput(VarnodeTpl *newout) { // Force the output of the expression to be new out // If the original output is named, this requires // an extra COPY op OpTpl *op; if (outvn == (VarnodeTpl *)0) throw SleighError("Expression has no output"); if (outvn->isUnnamed()) { delete outvn; op = ops->back(); op->clearOutput(); op->setOutput(newout); } else { op = new OpTpl(CPUI_COPY); op->addInput(outvn); op->setOutput(newout); ops->push_back(op); } outvn = new VarnodeTpl(*newout); } void PcodeCompile::force_size(VarnodeTpl *vt,const ConstTpl &size,const vector &ops) { if ((vt->getSize().getType()!=ConstTpl::real)||(vt->getSize().getReal() != 0)) return; // Size already exists vt->setSize(size); if (!vt->isLocalTemp()) return; // If the variable is a local temporary // The size may need to be propagated to the various // uses of the variable OpTpl *op; VarnodeTpl *vn; for(int4 i=0;igetOut(); if ((vn!=(VarnodeTpl *)0)&&(vn->isLocalTemp())) { if (vn->getOffset() == vt->getOffset()) { if ((size.getType() == ConstTpl::real)&&(vn->getSize().getType() == ConstTpl::real)&& (vn->getSize().getReal() != 0) && (vn->getSize().getReal() != size.getReal())) throw SleighError("Localtemp size mismatch"); vn->setSize(size); } } for(int4 j=0;jnumInput();++j) { vn = op->getIn(j); if (vn->isLocalTemp()&&(vn->getOffset()==vt->getOffset())) { if ((size.getType() == ConstTpl::real)&&(vn->getSize().getType() == ConstTpl::real)&& (vn->getSize().getReal() != 0) && (vn->getSize().getReal() != size.getReal())) throw SleighError("Localtemp size mismatch"); vn->setSize(size); } } } } void PcodeCompile::matchSize(int4 j,OpTpl *op,bool inputonly,const vector &ops) { // Find something to fill in zero size varnode // j is the slot we are trying to fill (-1=output) // Don't check output for non-zero if inputonly is true VarnodeTpl *match = (VarnodeTpl *)0; VarnodeTpl *vt; int4 i,inputsize; vt = (j==-1) ? op->getOut() : op->getIn(j); if (!inputonly) { if (op->getOut() != (VarnodeTpl *)0) if (!op->getOut()->isZeroSize()) match = op->getOut(); } inputsize = op->numInput(); for(i=0;igetIn(i)->isZeroSize()) continue; match = op->getIn(i); } if (match != (VarnodeTpl *)0) force_size(vt,match->getSize(),ops); } void PcodeCompile::fillinZero(OpTpl *op,const vector &ops) { // Try to get rid of zero size varnodes in op // Right now this is written assuming operands for the constructor are // are built before any other pcode in the constructor is generated int4 inputsize,i; switch(op->getOpcode()) { case CPUI_COPY: // Instructions where all inputs and output are same size case CPUI_INT_ADD: case CPUI_INT_SUB: case CPUI_INT_2COMP: case CPUI_INT_NEGATE: case CPUI_INT_XOR: case CPUI_INT_AND: case CPUI_INT_OR: case CPUI_INT_MULT: case CPUI_INT_DIV: case CPUI_INT_SDIV: case CPUI_INT_REM: case CPUI_INT_SREM: case CPUI_FLOAT_ADD: case CPUI_FLOAT_DIV: case CPUI_FLOAT_MULT: case CPUI_FLOAT_SUB: case CPUI_FLOAT_NEG: case CPUI_FLOAT_ABS: case CPUI_FLOAT_SQRT: case CPUI_FLOAT_CEIL: case CPUI_FLOAT_FLOOR: case CPUI_FLOAT_ROUND: if ((op->getOut()!=(VarnodeTpl *)0)&&(op->getOut()->isZeroSize())) matchSize(-1,op,false,ops); inputsize = op->numInput(); for(i=0;igetIn(i)->isZeroSize()) matchSize(i,op,false,ops); break; case CPUI_INT_EQUAL: // Instructions with bool output case CPUI_INT_NOTEQUAL: case CPUI_INT_SLESS: case CPUI_INT_SLESSEQUAL: case CPUI_INT_LESS: case CPUI_INT_LESSEQUAL: case CPUI_INT_CARRY: case CPUI_INT_SCARRY: case CPUI_INT_SBORROW: case CPUI_FLOAT_EQUAL: case CPUI_FLOAT_NOTEQUAL: case CPUI_FLOAT_LESS: case CPUI_FLOAT_LESSEQUAL: case CPUI_FLOAT_NAN: case CPUI_BOOL_NEGATE: case CPUI_BOOL_XOR: case CPUI_BOOL_AND: case CPUI_BOOL_OR: if (op->getOut()->isZeroSize()) force_size(op->getOut(),ConstTpl(ConstTpl::real,1),ops); inputsize = op->numInput(); for(i=0;igetIn(i)->isZeroSize()) matchSize(i,op,true,ops); break; // The shift amount does not necessarily have to be the same size // But if no size is specified, assume it is the same size case CPUI_INT_LEFT: case CPUI_INT_RIGHT: case CPUI_INT_SRIGHT: if (op->getOut()->isZeroSize()) { if (!op->getIn(0)->isZeroSize()) force_size(op->getOut(),op->getIn(0)->getSize(),ops); } else if (op->getIn(0)->isZeroSize()) force_size(op->getIn(0),op->getOut()->getSize(),ops); // fallthru to subpiece constant check case CPUI_SUBPIECE: if (op->getIn(1)->isZeroSize()) force_size(op->getIn(1),ConstTpl(ConstTpl::real,4),ops); break; case CPUI_CPOOLREF: if (op->getOut()->isZeroSize() && (!op->getIn(0)->isZeroSize())) force_size(op->getOut(),op->getIn(0)->getSize(),ops); if (op->getIn(0)->isZeroSize() && (!op->getOut()->isZeroSize())) force_size(op->getIn(0),op->getOut()->getSize(),ops); for(i=1;inumInput();++i) { if (op->getIn(i)->isZeroSize()) force_size(op->getIn(i),ConstTpl(ConstTpl::real,sizeof(uintb)),ops); } break; default: break; } } bool PcodeCompile::propagateSize(ConstructTpl *ct) { // Fill in size for varnodes with size 0 // Return first OpTpl with a size 0 varnode // that cannot be filled in or NULL otherwise vector zerovec,zerovec2; vector::const_iterator iter; int4 lastsize; for(iter=ct->getOpvec().begin();iter!=ct->getOpvec().end();++iter) if ((*iter)->isZeroSize()) { fillinZero(*iter,ct->getOpvec()); if ((*iter)->isZeroSize()) zerovec.push_back(*iter); } lastsize = zerovec.size() + 1; while( zerovec.size() < lastsize ) { lastsize = zerovec.size(); zerovec2.clear(); for(iter=zerovec.begin();iter!=zerovec.end();++iter) { fillinZero(*iter,ct->getOpvec()); if ((*iter)->isZeroSize()) zerovec2.push_back( *iter ); } zerovec = zerovec2; } if ( lastsize != 0 ) return false; return true; } VarnodeTpl *PcodeCompile::buildTemporary(void) { // Build temporary variable (with zerosize) VarnodeTpl *res = new VarnodeTpl(ConstTpl(uniqspace), ConstTpl(ConstTpl::real,allocateTemp()), ConstTpl(ConstTpl::real,0)); res->setUnnamed(true); return res; } LabelSymbol *PcodeCompile::defineLabel(string *name) { // Create a label symbol LabelSymbol *labsym = new LabelSymbol(*name,local_labelcount++); delete name; addSymbol(labsym); // Add symbol to local scope return labsym; } vector *PcodeCompile::placeLabel(LabelSymbol *labsym) { // Create placeholder OpTpl for a label if (labsym->isPlaced()) { reportError(getLocation(labsym), "Label '" + labsym->getName() + "' is placed more than once"); } labsym->setPlaced(); vector *res = new vector; OpTpl *op = new OpTpl(LABELBUILD); VarnodeTpl *idvn = new VarnodeTpl(ConstTpl(constantspace), ConstTpl(ConstTpl::real,labsym->getIndex()), ConstTpl(ConstTpl::real,4)); op->addInput(idvn); res->push_back(op); return res; } vector *PcodeCompile::newOutput(bool usesLocalKey,ExprTree *rhs,string *varname,uint4 size) { VarnodeSymbol *sym; VarnodeTpl *tmpvn = buildTemporary(); if (size != 0) tmpvn->setSize(ConstTpl(ConstTpl::real,size)); // Size was explicitly specified else if ((rhs->getSize().getType()==ConstTpl::real)&&(rhs->getSize().getReal()!=0)) tmpvn->setSize(rhs->getSize()); // Inherit size from unnamed expression result // Only inherit if the size is real, otherwise we // cannot build the VarnodeSymbol with a placeholder constant rhs->setOutput(tmpvn); sym = new VarnodeSymbol(*varname,tmpvn->getSpace().getSpace(),tmpvn->getOffset().getReal(),tmpvn->getSize().getReal()); // Create new symbol regardless addSymbol(sym); if ((!usesLocalKey) && enforceLocalKey) reportError(getLocation(sym), "Must use 'local' keyword to define symbol '"+*varname + "'"); delete varname; return ExprTree::toVector(rhs); } void PcodeCompile::newLocalDefinition(string *varname,uint4 size) { // Create a new temporary symbol (without generating any pcode) VarnodeSymbol *sym; sym = new VarnodeSymbol(*varname,uniqspace,allocateTemp(),size); addSymbol(sym); delete varname; } ExprTree *PcodeCompile::createOp(OpCode opc,ExprTree *vn) { // Create new expression with output -outvn- // built by performing -opc- on input vn. // Free input expression VarnodeTpl *outvn = buildTemporary(); OpTpl *op = new OpTpl(opc); op->addInput(vn->outvn); op->setOutput(outvn); vn->ops->push_back(op); vn->outvn = new VarnodeTpl(*outvn); return vn; } ExprTree *PcodeCompile::createOp(OpCode opc,ExprTree *vn1, ExprTree *vn2) { // Create new expression with output -outvn- // built by performing -opc- on inputs vn1 and vn2. // Free input expressions VarnodeTpl *outvn = buildTemporary(); vn1->ops->insert(vn1->ops->end(),vn2->ops->begin(),vn2->ops->end()); vn2->ops->clear(); OpTpl *op = new OpTpl(opc); op->addInput(vn1->outvn); op->addInput(vn2->outvn); vn2->outvn = (VarnodeTpl *)0; op->setOutput(outvn); vn1->ops->push_back(op); vn1->outvn = new VarnodeTpl(*outvn); delete vn2; return vn1; } ExprTree *PcodeCompile::createOpOut(VarnodeTpl *outvn,OpCode opc, ExprTree *vn1,ExprTree *vn2) { // Create an op with explicit output and two inputs vn1->ops->insert(vn1->ops->end(),vn2->ops->begin(),vn2->ops->end()); vn2->ops->clear(); OpTpl *op = new OpTpl(opc); op->addInput(vn1->outvn); op->addInput(vn2->outvn); vn2->outvn = (VarnodeTpl *)0; op->setOutput(outvn); vn1->ops->push_back(op); vn1->outvn = new VarnodeTpl(*outvn); delete vn2; return vn1; } ExprTree *PcodeCompile::createOpOutUnary(VarnodeTpl *outvn,OpCode opc,ExprTree *vn) { // Create an op with explicit output and 1 input OpTpl *op = new OpTpl(opc); op->addInput(vn->outvn); op->setOutput(outvn); vn->ops->push_back(op); vn->outvn = new VarnodeTpl(*outvn); return vn; } vector *PcodeCompile::createOpNoOut(OpCode opc,ExprTree *vn) { // Create new expression by creating op with given -opc- // and single input vn. Free the input expression OpTpl *op = new OpTpl(opc); op->addInput(vn->outvn); vn->outvn = (VarnodeTpl *)0; // There is no longer an output to this expression vector *res = vn->ops; vn->ops = (vector *)0; delete vn; res->push_back(op); return res; } vector *PcodeCompile::createOpNoOut(OpCode opc,ExprTree *vn1,ExprTree *vn2) { // Create new expression by creating op with given -opc- // and inputs vn1 and vn2. Free the input expressions vector *res = vn1->ops; vn1->ops = (vector *)0; res->insert(res->end(),vn2->ops->begin(),vn2->ops->end()); vn2->ops->clear(); OpTpl *op = new OpTpl(opc); op->addInput(vn1->outvn); vn1->outvn = (VarnodeTpl *)0; op->addInput(vn2->outvn); vn2->outvn = (VarnodeTpl *)0; res->push_back(op); delete vn1; delete vn2; return res; } vector *PcodeCompile::createOpConst(OpCode opc,uintb val) { VarnodeTpl *vn = new VarnodeTpl(ConstTpl(constantspace), ConstTpl(ConstTpl::real,val), ConstTpl(ConstTpl::real,4)); vector *res = new vector; OpTpl *op = new OpTpl(opc); op->addInput(vn); res->push_back(op); return res; } ExprTree *PcodeCompile::createLoad(StarQuality *qual,ExprTree *ptr) { // Create new load expression, free ptr expression VarnodeTpl *outvn = buildTemporary(); OpTpl *op = new OpTpl(CPUI_LOAD); // The first varnode input to the load is a constant reference to the AddrSpace being loaded // from. Internally, we really store the pointer to the AddrSpace as the reference, but this // isn't platform independent. So officially, we assume that the constant reference will be the // AddrSpace index. We can safely assume this always has size 4. VarnodeTpl *spcvn = new VarnodeTpl(ConstTpl(constantspace), qual->id, ConstTpl(ConstTpl::real,8)); op->addInput(spcvn); op->addInput(ptr->outvn); op->setOutput(outvn); ptr->ops->push_back(op); if (qual->size > 0) force_size(outvn,ConstTpl(ConstTpl::real,qual->size),*ptr->ops); ptr->outvn = new VarnodeTpl(*outvn); delete qual; return ptr; } vector *PcodeCompile::createStore(StarQuality *qual, ExprTree *ptr,ExprTree *val) { vector *res = ptr->ops; ptr->ops = (vector *)0; res->insert(res->end(),val->ops->begin(),val->ops->end()); val->ops->clear(); OpTpl *op = new OpTpl(CPUI_STORE); // The first varnode input to the store is a constant reference to the AddrSpace being loaded // from. Internally, we really store the pointer to the AddrSpace as the reference, but this // isn't platform independent. So officially, we assume that the constant reference will be the // AddrSpace index. We can safely assume this always has size 4. VarnodeTpl *spcvn = new VarnodeTpl(ConstTpl(constantspace), qual->id, ConstTpl(ConstTpl::real,8)); op->addInput(spcvn); op->addInput(ptr->outvn); op->addInput(val->outvn); res->push_back(op); force_size(val->outvn,ConstTpl(ConstTpl::real,qual->size),*res); ptr->outvn = (VarnodeTpl *)0; val->outvn = (VarnodeTpl *)0; delete ptr; delete val; delete qual; return res; } ExprTree *PcodeCompile::createUserOp(UserOpSymbol *sym,vector *param) { // Create userdefined pcode op, given symbol and parameters VarnodeTpl *outvn = buildTemporary(); ExprTree *res = new ExprTree(); res->ops = createUserOpNoOut(sym,param); res->ops->back()->setOutput(outvn); res->outvn = new VarnodeTpl(*outvn); return res; } vector *PcodeCompile::createUserOpNoOut(UserOpSymbol *sym,vector *param) { OpTpl *op = new OpTpl(CPUI_CALLOTHER); VarnodeTpl *vn = new VarnodeTpl(ConstTpl(constantspace), ConstTpl(ConstTpl::real,sym->getIndex()), ConstTpl(ConstTpl::real,4)); op->addInput(vn); return ExprTree::appendParams(op,param); } ExprTree *PcodeCompile::createVariadic(OpCode opc,vector *param) { VarnodeTpl *outvn = buildTemporary(); ExprTree *res = new ExprTree(); OpTpl *op = new OpTpl(opc); res->ops = ExprTree::appendParams(op,param); res->ops->back()->setOutput(outvn); res->outvn = new VarnodeTpl(*outvn); return res; } void PcodeCompile::appendOp(OpCode opc,ExprTree *res,uintb constval,int4 constsz) { // Take output of res expression, combine with constant, // using opc operation, return the resulting expression OpTpl *op = new OpTpl(opc); VarnodeTpl *constvn = new VarnodeTpl(ConstTpl(constantspace), ConstTpl(ConstTpl::real,constval), ConstTpl(ConstTpl::real,constsz)); VarnodeTpl *outvn = buildTemporary(); op->addInput(res->outvn); op->addInput(constvn); op->setOutput(outvn); res->ops->push_back(op); res->outvn = new VarnodeTpl(*outvn); } VarnodeTpl *PcodeCompile::buildTruncatedVarnode(VarnodeTpl *basevn,uint4 bitoffset,uint4 numbits) { // Build a truncated form -basevn- that matches the bitrange [ -bitoffset-, -numbits- ] if possible // using just ConstTpl mechanics, otherwise return null uint4 byteoffset = bitoffset / 8; // Convert to byte units uint4 numbytes = numbits / 8; uintb fullsz = 0; if (basevn->getSize().getType() == ConstTpl::real) { // If we know the size of base, make sure the bit range is in bounds fullsz = basevn->getSize().getReal(); if (fullsz == 0) return (VarnodeTpl *)0; if (byteoffset + numbytes > fullsz) throw SleighError("Requested bit range out of bounds"); } if ((bitoffset % 8) != 0) return (VarnodeTpl *)0; if ((numbits % 8) != 0) return (VarnodeTpl *)0; ConstTpl::const_type offset_type = basevn->getOffset().getType(); if ((offset_type != ConstTpl::real)&&(offset_type != ConstTpl::handle)) return (VarnodeTpl *)0; ConstTpl specialoff; if (offset_type == ConstTpl::handle) { // We put in the correct adjustment to offset assuming things are little endian // We defer the correct big endian calculation until after the consistency check // because we need to know the subtable export sizes specialoff = ConstTpl(ConstTpl::handle,basevn->getOffset().getHandleIndex(), ConstTpl::v_offset_plus,byteoffset); } else { if (basevn->getSize().getType() != ConstTpl::real) throw SleighError("Could not construct requested bit range"); uintb plus; if (defaultspace->isBigEndian()) plus = fullsz - (byteoffset + numbytes); else plus = byteoffset; specialoff = ConstTpl(ConstTpl::real,basevn->getOffset().getReal() + plus); } VarnodeTpl *res = new VarnodeTpl(basevn->getSpace(),specialoff,ConstTpl(ConstTpl::real,numbytes)); return res; } vector *PcodeCompile::assignBitRange(VarnodeTpl *vn,uint4 bitoffset,uint4 numbits,ExprTree *rhs) { // Create an expression assigning the rhs to a bitrange within sym string errmsg; if (numbits == 0) errmsg = "Size of bitrange is zero"; uint4 smallsize = (numbits+7)/8; // Size of input (output of rhs) bool shiftneeded = (bitoffset != 0); bool zextneeded = true; uintb mask = (uintb)2; mask = ~(((mask<<(numbits-1))-1) << bitoffset); if (vn->getSize().getType()==ConstTpl::real) { // If we know the size of the bitranged varnode, we can // do some immediate checks, and possibly simplify things uint4 symsize = vn->getSize().getReal(); if (symsize > 0) zextneeded = (symsize > smallsize); symsize *= 8; // Convert to number of bits if ((bitoffset>=symsize)||(bitoffset+numbits>symsize)) errmsg = "Assigned bitrange is bad"; else if ((bitoffset==0)&&(numbits==symsize)) errmsg = "Assigning to bitrange is superfluous"; } if (errmsg.size()>0) { // Was there an error condition reportError((const Location *)0, errmsg); // Report the error delete vn; // Clean up vector *resops = rhs->ops; // Passthru old expression rhs->ops = (vector *)0; delete rhs; return resops; } // We know what the size of the input has to be force_size(rhs->outvn,ConstTpl(ConstTpl::real,smallsize),*rhs->ops); ExprTree *res; VarnodeTpl *finalout = buildTruncatedVarnode(vn,bitoffset,numbits); if (finalout != (VarnodeTpl *)0) { delete vn; // Don't keep the original Varnode object res = createOpOutUnary(finalout,CPUI_COPY,rhs); } else { if (bitoffset + numbits > 64) errmsg = "Assigned bitrange extends past first 64 bits"; res = new ExprTree(vn); appendOp(CPUI_INT_AND,res,mask,0); if (zextneeded) createOp(CPUI_INT_ZEXT,rhs); if (shiftneeded) appendOp(CPUI_INT_LEFT,rhs,bitoffset,4); finalout = new VarnodeTpl(*vn); res = createOpOut(finalout,CPUI_INT_OR,res,rhs); } if (errmsg.size() > 0) reportError((const Location *)0, errmsg); vector *resops = res->ops; res->ops = (vector *)0; delete res; return resops; } ExprTree *PcodeCompile::createBitRange(SpecificSymbol *sym,uint4 bitoffset,uint4 numbits) { // Create an expression computing the indicated bitrange of sym // The result is truncated to the smallest byte size that can // contain the indicated number of bits. The result has the // desired bits shifted all the way to the right string errmsg; if (numbits == 0) errmsg = "Size of bitrange is zero"; VarnodeTpl *vn = sym->getVarnode(); uint4 finalsize = (numbits+7)/8; // Round up to neareast byte size uint4 truncshift = 0; bool maskneeded = ((numbits%8)!=0); bool truncneeded = true; // Special case where we can set the size, without invoking // a truncation operator if ((errmsg.size()==0)&&(bitoffset==0)&&(!maskneeded)) { if ((vn->getSpace().getType()==ConstTpl::handle)&&vn->isZeroSize()) { vn->setSize(ConstTpl(ConstTpl::real,finalsize)); ExprTree *res = new ExprTree(vn); // VarnodeTpl *cruft = buildTemporary(); // delete cruft; return res; } } if (errmsg.size()==0) { VarnodeTpl *truncvn = buildTruncatedVarnode(vn,bitoffset,numbits); if (truncvn != (VarnodeTpl *)0) { // If we are able to construct a simple truncated varnode ExprTree *res = new ExprTree(truncvn); // Return just the varnode as an expression delete vn; return res; } } if (vn->getSize().getType()==ConstTpl::real) { // If we know the size of the input varnode, we can // do some immediate checks, and possibly simplify things uint4 insize = vn->getSize().getReal(); if (insize > 0) { truncneeded = (finalsize < insize); insize *= 8; // Convert to number of bits if ((bitoffset >= insize)||(bitoffset+numbits > insize)) errmsg = "Bitrange is bad"; if (maskneeded && ((bitoffset+numbits)==insize)) maskneeded = false; } } uintb mask = (uintb)2; mask = ((mask<<(numbits-1))-1); if (truncneeded && ((bitoffset % 8)==0)) { truncshift = bitoffset/8; bitoffset = 0; } if ((bitoffset==0)&&(!truncneeded)&&(!maskneeded)) errmsg = "Superfluous bitrange"; if (maskneeded && (finalsize > 8)) errmsg = "Illegal masked bitrange producing varnode larger than 64 bits: " + sym->getName(); ExprTree *res = new ExprTree(vn); if (errmsg.size()>0) { // Check for error condition reportError(getLocation(sym), errmsg); return res; } if (bitoffset !=0) appendOp(CPUI_INT_RIGHT,res,bitoffset,4); if (truncneeded) appendOp(CPUI_SUBPIECE,res,truncshift,4); if (maskneeded) appendOp(CPUI_INT_AND,res,mask,finalsize); force_size(res->outvn,ConstTpl(ConstTpl::real,finalsize),*res->ops); return res; } VarnodeTpl *PcodeCompile::addressOf(VarnodeTpl *var,uint4 size) { // Produce constant varnode that is the offset // portion of varnode -var- if (size==0) { // If no size specified if (var->getSpace().getType() == ConstTpl::spaceid) { AddrSpace *spc = var->getSpace().getSpace(); // Look to the particular space size = spc->getAddrSize(); // to see if it has a standard address size } } VarnodeTpl *res; if ((var->getOffset().getType() == ConstTpl::real)&&(var->getSpace().getType() == ConstTpl::spaceid)) { AddrSpace *spc = var->getSpace().getSpace(); uintb off = AddrSpace::byteToAddress(var->getOffset().getReal(),spc->getWordSize()); res = new VarnodeTpl(ConstTpl(constantspace), ConstTpl(ConstTpl::real,off), ConstTpl(ConstTpl::real,size)); } else res = new VarnodeTpl(ConstTpl(constantspace),var->getOffset(),ConstTpl(ConstTpl::real,size)); delete var; return res; } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/pcodecompile.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __PCODECOMPILE_HH__ #define __PCODECOMPILE_HH__ #include "slghsymbol.hh" namespace ghidra { class Location { string filename; int4 lineno; public: Location(void) {} Location(const string &fname, const int4 line) { filename = fname; lineno = line; } string getFilename(void) const { return filename; } int4 getLineno(void) const { return lineno; } string format(void) const; }; struct StarQuality { ConstTpl id; uint4 size; }; class ExprTree { // A flattened expression tree friend class PcodeCompile; vector *ops; // flattened ops making up the expression VarnodeTpl *outvn; // Output varnode of the expression // If the last op has an output, -outvn- is // a COPY of that varnode public: ExprTree(void) { ops = (vector *)0; outvn = (VarnodeTpl *)0; } ExprTree(VarnodeTpl *vn); ExprTree(OpTpl *op); ~ExprTree(void); void setOutput(VarnodeTpl *newout); VarnodeTpl *getOut(void) { return outvn; } const ConstTpl &getSize(void) const { return outvn->getSize(); } static vector *appendParams(OpTpl *op,vector *param); static vector *toVector(ExprTree *expr); }; class PcodeCompile { AddrSpace *defaultspace; AddrSpace *constantspace; AddrSpace *uniqspace; uint4 local_labelcount; // Number of labels in current constructor bool enforceLocalKey; // Force slaspec to use 'local' keyword when defining temporary varnodes virtual uint4 allocateTemp(void)=0; virtual void addSymbol(SleighSymbol *sym)=0; public: PcodeCompile(void) { defaultspace=(AddrSpace *)0; constantspace=(AddrSpace *)0; uniqspace=(AddrSpace *)0; local_labelcount=0; enforceLocalKey=false; } virtual ~PcodeCompile(void) {} virtual const Location *getLocation(SleighSymbol *sym) const=0; virtual void reportError(const Location *loc, const string &msg)=0; virtual void reportWarning(const Location *loc, const string &msg)=0; void resetLabelCount(void) { local_labelcount=0; } void setDefaultSpace(AddrSpace *spc) { defaultspace = spc; } void setConstantSpace(AddrSpace *spc) { constantspace = spc; } void setUniqueSpace(AddrSpace *spc) { uniqspace = spc; } void setEnforceLocalKey(bool val) { enforceLocalKey = val; } AddrSpace *getDefaultSpace(void) const { return defaultspace; } AddrSpace *getConstantSpace(void) const { return constantspace; } VarnodeTpl *buildTemporary(void); LabelSymbol *defineLabel(string *name); vector *placeLabel(LabelSymbol *sym); vector *newOutput(bool usesLocalKey,ExprTree *rhs,string *varname,uint4 size=0); void newLocalDefinition(string *varname,uint4 size=0); ExprTree *createOp(OpCode opc,ExprTree *vn); ExprTree *createOp(OpCode opc,ExprTree *vn1,ExprTree *vn2); ExprTree *createOpOut(VarnodeTpl *outvn,OpCode opc,ExprTree *vn1,ExprTree *vn2); ExprTree *createOpOutUnary(VarnodeTpl *outvn,OpCode opc,ExprTree *vn); vector *createOpNoOut(OpCode opc,ExprTree *vn); vector *createOpNoOut(OpCode opc,ExprTree *vn1,ExprTree *vn2); vector *createOpConst(OpCode opc,uintb val); ExprTree *createLoad(StarQuality *qual,ExprTree *ptr); vector *createStore(StarQuality *qual,ExprTree *ptr,ExprTree *val); ExprTree *createUserOp(UserOpSymbol *sym,vector *param); vector *createUserOpNoOut(UserOpSymbol *sym,vector *param); ExprTree *createVariadic(OpCode opc,vector *param); void appendOp(OpCode opc,ExprTree *res,uintb constval,int4 constsz); VarnodeTpl *buildTruncatedVarnode(VarnodeTpl *basevn,uint4 bitoffset,uint4 numbits); vector *assignBitRange(VarnodeTpl *vn,uint4 bitoffset,uint4 numbits,ExprTree *rhs); ExprTree *createBitRange(SpecificSymbol *sym,uint4 bitoffset,uint4 numbits); VarnodeTpl *addressOf(VarnodeTpl *var,uint4 size); static void force_size(VarnodeTpl *vt,const ConstTpl &size,const vector &ops); static void matchSize(int4 j,OpTpl *op,bool inputonly,const vector &ops); static void fillinZero(OpTpl *op,const vector &ops); static bool propagateSize(ConstructTpl *ct); }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/pcodeparse.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* A Bison parser, made by GNU Bison 3.5.1. */ /* Bison implementation for Yacc-like parsers in C Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2020 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ /* As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice, so long as that work isn't itself a parser generator using the skeleton or a modified version thereof as a parser skeleton. Alternatively, if you modify or redistribute the parser skeleton itself, you may (at your option) remove this special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ /* C LALR(1) parser skeleton written by Richard Stallman, by simplifying the original so-called "semantic" parser. */ /* All symbols defined below should begin with yy or YY, to avoid infringing on user name space. This should be done even for local variables, as they might otherwise be expanded by user macros. There are some unavoidable exceptions within include files to define necessary library symbols; they are noted "INFRINGES ON USER NAME SPACE" below. */ /* Undocumented macros, especially those whose name start with YY_, are private implementation details. Do not rely on them. */ /* Identify Bison output. */ #define YYBISON 1 /* Bison version. */ #define YYBISON_VERSION "3.5.1" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" /* Pure parsers. */ #define YYPURE 0 /* Push parsers. */ #define YYPUSH 0 /* Pull parsers. */ #define YYPULL 1 /* Substitute the type names. */ #define YYSTYPE PCODESTYPE /* Substitute the variable and function names. */ #define yyparse pcodeparse #define yylex pcodelex #define yyerror pcodeerror #define yydebug pcodedebug #define yynerrs pcodenerrs #define yylval pcodelval #define yychar pcodechar /* First part of user prologue. */ #include "pcodeparse.hh" //#define YYERROR_VERBOSE namespace ghidra { extern int pcodelex(void); static PcodeSnippet *pcode; extern int pcodeerror(const char *str ); # ifndef YY_CAST # ifdef __cplusplus # define YY_CAST(Type, Val) static_cast (Val) # define YY_REINTERPRET_CAST(Type, Val) reinterpret_cast (Val) # else # define YY_CAST(Type, Val) ((Type) (Val)) # define YY_REINTERPRET_CAST(Type, Val) ((Type) (Val)) # endif # endif # ifndef YY_NULLPTR # if defined __cplusplus # if 201103L <= __cplusplus # define YY_NULLPTR nullptr # else # define YY_NULLPTR 0 # endif # else # define YY_NULLPTR ((void*)0) # endif # endif /* Enabling verbose error messages. */ #ifdef YYERROR_VERBOSE # undef YYERROR_VERBOSE # define YYERROR_VERBOSE 1 #else # define YYERROR_VERBOSE 0 #endif /* Debug traces. */ #ifndef PCODEDEBUG # if defined YYDEBUG #if YYDEBUG # define PCODEDEBUG 1 # else # define PCODEDEBUG 0 # endif # else /* ! defined YYDEBUG */ # define PCODEDEBUG 0 # endif /* ! defined YYDEBUG */ #endif /* ! defined PCODEDEBUG */ #if PCODEDEBUG extern int pcodedebug; #endif /* Token type. */ #ifndef PCODETOKENTYPE # define PCODETOKENTYPE enum pcodetokentype { OP_BOOL_OR = 258, OP_BOOL_AND = 259, OP_BOOL_XOR = 260, OP_EQUAL = 261, OP_NOTEQUAL = 262, OP_FEQUAL = 263, OP_FNOTEQUAL = 264, OP_GREATEQUAL = 265, OP_LESSEQUAL = 266, OP_SLESS = 267, OP_SGREATEQUAL = 268, OP_SLESSEQUAL = 269, OP_SGREAT = 270, OP_FLESS = 271, OP_FGREAT = 272, OP_FLESSEQUAL = 273, OP_FGREATEQUAL = 274, OP_LEFT = 275, OP_RIGHT = 276, OP_SRIGHT = 277, OP_FADD = 278, OP_FSUB = 279, OP_SDIV = 280, OP_SREM = 281, OP_FMULT = 282, OP_FDIV = 283, OP_ZEXT = 284, OP_CARRY = 285, OP_BORROW = 286, OP_SEXT = 287, OP_SCARRY = 288, OP_SBORROW = 289, OP_NAN = 290, OP_ABS = 291, OP_SQRT = 292, OP_CEIL = 293, OP_FLOOR = 294, OP_ROUND = 295, OP_INT2FLOAT = 296, OP_FLOAT2FLOAT = 297, OP_TRUNC = 298, OP_NEW = 299, BADINTEGER = 300, GOTO_KEY = 301, CALL_KEY = 302, RETURN_KEY = 303, IF_KEY = 304, ENDOFSTREAM = 305, LOCAL_KEY = 306, INTEGER = 307, STRING = 308, SPACESYM = 309, USEROPSYM = 310, VARSYM = 311, OPERANDSYM = 312, JUMPSYM = 313, LABELSYM = 314 }; #endif /* Value type. */ #if ! defined PCODESTYPE && ! defined PCODESTYPE_IS_DECLARED union PCODESTYPE { uintb *i; string *str; vector *param; StarQuality *starqual; VarnodeTpl *varnode; ExprTree *tree; vector *stmt; ConstructTpl *sem; SpaceSymbol *spacesym; UserOpSymbol *useropsym; LabelSymbol *labelsym; OperandSymbol *operandsym; VarnodeSymbol *varsym; SpecificSymbol *specsym; }; typedef union PCODESTYPE PCODESTYPE; # define PCODESTYPE_IS_TRIVIAL 1 # define PCODESTYPE_IS_DECLARED 1 #endif extern PCODESTYPE pcodelval; int pcodeparse (void); #ifdef short # undef short #endif /* On compilers that do not define __PTRDIFF_MAX__ etc., make sure and (if available) are included so that the code can choose integer types of a good width. */ #ifndef __PTRDIFF_MAX__ # include /* INFRINGES ON USER NAME SPACE */ # if defined __STDC_VERSION__ && 199901 <= __STDC_VERSION__ # include /* INFRINGES ON USER NAME SPACE */ # define YY_STDINT_H # endif #endif /* Narrow types that promote to a signed type and that can represent a signed or unsigned integer of at least N bits. In tables they can save space and decrease cache pressure. Promoting to a signed type helps avoid bugs in integer arithmetic. */ #ifdef __INT_LEAST8_MAX__ typedef __INT_LEAST8_TYPE__ yytype_int8; #elif defined YY_STDINT_H typedef int_least8_t yytype_int8; #else typedef signed char yytype_int8; #endif #ifdef __INT_LEAST16_MAX__ typedef __INT_LEAST16_TYPE__ yytype_int16; #elif defined YY_STDINT_H typedef int_least16_t yytype_int16; #else typedef short yytype_int16; #endif #if defined __UINT_LEAST8_MAX__ && __UINT_LEAST8_MAX__ <= __INT_MAX__ typedef __UINT_LEAST8_TYPE__ yytype_uint8; #elif (!defined __UINT_LEAST8_MAX__ && defined YY_STDINT_H \ && UINT_LEAST8_MAX <= INT_MAX) typedef uint_least8_t yytype_uint8; #elif !defined __UINT_LEAST8_MAX__ && UCHAR_MAX <= INT_MAX typedef unsigned char yytype_uint8; #else typedef short yytype_uint8; #endif #if defined __UINT_LEAST16_MAX__ && __UINT_LEAST16_MAX__ <= __INT_MAX__ typedef __UINT_LEAST16_TYPE__ yytype_uint16; #elif (!defined __UINT_LEAST16_MAX__ && defined YY_STDINT_H \ && UINT_LEAST16_MAX <= INT_MAX) typedef uint_least16_t yytype_uint16; #elif !defined __UINT_LEAST16_MAX__ && USHRT_MAX <= INT_MAX typedef unsigned short yytype_uint16; #else typedef int yytype_uint16; #endif #ifndef YYPTRDIFF_T # if defined __PTRDIFF_TYPE__ && defined __PTRDIFF_MAX__ # define YYPTRDIFF_T __PTRDIFF_TYPE__ # define YYPTRDIFF_MAXIMUM __PTRDIFF_MAX__ # elif defined PTRDIFF_MAX # ifndef ptrdiff_t # include /* INFRINGES ON USER NAME SPACE */ # endif # define YYPTRDIFF_T ptrdiff_t # define YYPTRDIFF_MAXIMUM PTRDIFF_MAX # else # define YYPTRDIFF_T long # define YYPTRDIFF_MAXIMUM LONG_MAX # endif #endif #ifndef YYSIZE_T # ifdef __SIZE_TYPE__ # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t # elif defined __STDC_VERSION__ && 199901 <= __STDC_VERSION__ # include /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else # define YYSIZE_T unsigned # endif #endif #define YYSIZE_MAXIMUM \ YY_CAST (YYPTRDIFF_T, \ (YYPTRDIFF_MAXIMUM < YY_CAST (YYSIZE_T, -1) \ ? YYPTRDIFF_MAXIMUM \ : YY_CAST (YYSIZE_T, -1))) #define YYSIZEOF(X) YY_CAST (YYPTRDIFF_T, sizeof (X)) /* Stored state numbers (used for stacks). */ typedef yytype_int16 yy_state_t; /* State numbers in computations. */ typedef int yy_state_fast_t; #ifndef YY_ # if defined YYENABLE_NLS && YYENABLE_NLS # if ENABLE_NLS # include /* INFRINGES ON USER NAME SPACE */ # define YY_(Msgid) dgettext ("bison-runtime", Msgid) # endif # endif # ifndef YY_ # define YY_(Msgid) Msgid # endif #endif #ifndef YY_ATTRIBUTE_PURE # if defined __GNUC__ && 2 < __GNUC__ + (96 <= __GNUC_MINOR__) # define YY_ATTRIBUTE_PURE __attribute__ ((__pure__)) # else # define YY_ATTRIBUTE_PURE # endif #endif #ifndef YY_ATTRIBUTE_UNUSED # if defined __GNUC__ && 2 < __GNUC__ + (7 <= __GNUC_MINOR__) # define YY_ATTRIBUTE_UNUSED __attribute__ ((__unused__)) # else # define YY_ATTRIBUTE_UNUSED # endif #endif /* Suppress unused-variable warnings by "using" E. */ #if ! defined lint || defined __GNUC__ # define YYUSE(E) ((void) (E)) #else # define YYUSE(E) /* empty */ #endif #if defined __GNUC__ && ! defined __ICC && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ /* Suppress an incorrect diagnostic about yylval being uninitialized. */ # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"") \ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") # define YY_IGNORE_MAYBE_UNINITIALIZED_END \ _Pragma ("GCC diagnostic pop") #else # define YY_INITIAL_VALUE(Value) Value #endif #ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_END #endif #ifndef YY_INITIAL_VALUE # define YY_INITIAL_VALUE(Value) /* Nothing. */ #endif #if defined __cplusplus && defined __GNUC__ && ! defined __ICC && 6 <= __GNUC__ # define YY_IGNORE_USELESS_CAST_BEGIN \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wuseless-cast\"") # define YY_IGNORE_USELESS_CAST_END \ _Pragma ("GCC diagnostic pop") #endif #ifndef YY_IGNORE_USELESS_CAST_BEGIN # define YY_IGNORE_USELESS_CAST_BEGIN # define YY_IGNORE_USELESS_CAST_END #endif #define YY_ASSERT(E) ((void) (0 && (E))) #if ! defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ # ifdef YYSTACK_USE_ALLOCA # if YYSTACK_USE_ALLOCA # ifdef __GNUC__ # define YYSTACK_ALLOC __builtin_alloca # elif defined __BUILTIN_VA_ARG_INCR # include /* INFRINGES ON USER NAME SPACE */ # elif defined _AIX # define YYSTACK_ALLOC __alloca # elif defined _MSC_VER # include /* INFRINGES ON USER NAME SPACE */ # define alloca _alloca # else # define YYSTACK_ALLOC alloca # if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS # include /* INFRINGES ON USER NAME SPACE */ /* Use EXIT_SUCCESS as a witness for stdlib.h. */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # endif # endif # endif # ifdef YYSTACK_ALLOC /* Pacify GCC's 'empty if-body' warning. */ # define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) # ifndef YYSTACK_ALLOC_MAXIMUM /* The OS might guarantee only one guard page at the bottom of the stack, and a page size can be as small as 4096 bytes. So we cannot safely invoke alloca (N) if N exceeds 4096. Use a slightly smaller number to allow for a few compiler-allocated temporary stack slots. */ # define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ # endif # else # define YYSTACK_ALLOC YYMALLOC # define YYSTACK_FREE YYFREE # ifndef YYSTACK_ALLOC_MAXIMUM # define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM # endif # if (defined __cplusplus && ! defined EXIT_SUCCESS \ && ! ((defined YYMALLOC || defined malloc) \ && (defined YYFREE || defined free))) # include /* INFRINGES ON USER NAME SPACE */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # ifndef YYMALLOC # define YYMALLOC malloc # if ! defined malloc && ! defined EXIT_SUCCESS void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free # if ! defined free && ! defined EXIT_SUCCESS void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif # endif #endif /* ! defined yyoverflow || YYERROR_VERBOSE */ #if (! defined yyoverflow \ && (! defined __cplusplus \ || (defined PCODESTYPE_IS_TRIVIAL && PCODESTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ union yyalloc { yy_state_t yyss_alloc; YYSTYPE yyvs_alloc; }; /* The size of the maximum gap between one aligned stack and the next. */ # define YYSTACK_GAP_MAXIMUM (YYSIZEOF (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with N elements. */ # define YYSTACK_BYTES(N) \ ((N) * (YYSIZEOF (yy_state_t) + YYSIZEOF (YYSTYPE)) \ + YYSTACK_GAP_MAXIMUM) # define YYCOPY_NEEDED 1 /* Relocate STACK from its old location to the new one. The local variables YYSIZE and YYSTACKSIZE give the old and new number of elements in the stack, and YYPTR gives the new location of the stack. Advance YYPTR to a properly aligned location for the next stack. */ # define YYSTACK_RELOCATE(Stack_alloc, Stack) \ do \ { \ YYPTRDIFF_T yynewbytes; \ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ Stack = &yyptr->Stack_alloc; \ yynewbytes = yystacksize * YYSIZEOF (*Stack) + YYSTACK_GAP_MAXIMUM; \ yyptr += yynewbytes / YYSIZEOF (*yyptr); \ } \ while (0) #endif #if defined YYCOPY_NEEDED && YYCOPY_NEEDED /* Copy COUNT objects from SRC to DST. The source and destination do not overlap. */ # ifndef YYCOPY # if defined __GNUC__ && 1 < __GNUC__ # define YYCOPY(Dst, Src, Count) \ __builtin_memcpy (Dst, Src, YY_CAST (YYSIZE_T, (Count)) * sizeof (*(Src))) # else # define YYCOPY(Dst, Src, Count) \ do \ { \ YYPTRDIFF_T yyi; \ for (yyi = 0; yyi < (Count); yyi++) \ (Dst)[yyi] = (Src)[yyi]; \ } \ while (0) # endif # endif #endif /* !YYCOPY_NEEDED */ /* YYFINAL -- State number of the termination state. */ #define YYFINAL 3 /* YYLAST -- Last index in YYTABLE. */ #define YYLAST 2214 /* YYNTOKENS -- Number of terminals. */ #define YYNTOKENS 80 /* YYNNTS -- Number of nonterminals. */ #define YYNNTS 13 /* YYNRULES -- Number of rules. */ #define YYNRULES 116 /* YYNSTATES -- Number of states. */ #define YYNSTATES 294 #define YYUNDEFTOK 2 #define YYMAXUTOK 314 /* YYTRANSLATE(TOKEN-NUM) -- Symbol number corresponding to TOKEN-NUM as returned by yylex, with out-of-bounds checking. */ #define YYTRANSLATE(YYX) \ (0 <= (YYX) && (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) /* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM as returned by yylex. */ static const yytype_int8 yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 40, 2, 2, 2, 35, 9, 2, 75, 76, 33, 29, 78, 30, 2, 34, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 73, 7, 14, 74, 15, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 77, 2, 79, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 6, 2, 41, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 10, 11, 12, 13, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 36, 37, 38, 39, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72 }; #if PCODEDEBUG /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ static const yytype_uint8 yyrline[] = { 0, 99, 99, 101, 102, 103, 104, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191, 192, 193, 195, 196, 197, 198, 199, 200, 202, 203, 204, 206, 207, 208, 209, 210, 212, 213, 215, 216, 218, 219, 220, 222, 223, 224 }; #endif #if PCODEDEBUG || YYERROR_VERBOSE || 0 /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. First, the terminals, then, starting at YYNTOKENS, nonterminals. */ static const char *const yytname[] = { "$end", "error", "$undefined", "OP_BOOL_OR", "OP_BOOL_AND", "OP_BOOL_XOR", "'|'", "';'", "'^'", "'&'", "OP_EQUAL", "OP_NOTEQUAL", "OP_FEQUAL", "OP_FNOTEQUAL", "'<'", "'>'", "OP_GREATEQUAL", "OP_LESSEQUAL", "OP_SLESS", "OP_SGREATEQUAL", "OP_SLESSEQUAL", "OP_SGREAT", "OP_FLESS", "OP_FGREAT", "OP_FLESSEQUAL", "OP_FGREATEQUAL", "OP_LEFT", "OP_RIGHT", "OP_SRIGHT", "'+'", "'-'", "OP_FADD", "OP_FSUB", "'*'", "'/'", "'%'", "OP_SDIV", "OP_SREM", "OP_FMULT", "OP_FDIV", "'!'", "'~'", "OP_ZEXT", "OP_CARRY", "OP_BORROW", "OP_SEXT", "OP_SCARRY", "OP_SBORROW", "OP_NAN", "OP_ABS", "OP_SQRT", "OP_CEIL", "OP_FLOOR", "OP_ROUND", "OP_INT2FLOAT", "OP_FLOAT2FLOAT", "OP_TRUNC", "OP_NEW", "BADINTEGER", "GOTO_KEY", "CALL_KEY", "RETURN_KEY", "IF_KEY", "ENDOFSTREAM", "LOCAL_KEY", "INTEGER", "STRING", "SPACESYM", "USEROPSYM", "VARSYM", "OPERANDSYM", "JUMPSYM", "LABELSYM", "':'", "'='", "'('", "')'", "'['", "','", "']'", "$accept", "rtl", "rtlmid", "statement", "expr", "sizedstar", "jumpdest", "varnode", "integervarnode", "lhsvarnode", "label", "specificsymbol", "paramlist", YY_NULLPTR }; #endif # ifdef YYPRINT /* YYTOKNUM[NUM] -- (External) token number corresponding to the (internal) symbol number NUM (which must be that of a token). */ static const yytype_int16 yytoknum[] = { 0, 256, 257, 258, 259, 260, 124, 59, 94, 38, 261, 262, 263, 264, 60, 62, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 43, 45, 278, 279, 42, 47, 37, 280, 281, 282, 283, 33, 126, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 58, 61, 40, 41, 91, 44, 93 }; # endif #define YYPACT_NINF (-65) #define yypact_value_is_default(Yyn) \ ((Yyn) == YYPACT_NINF) #define YYTABLE_NINF (-109) #define yytable_value_is_error(Yyn) \ ((Yyn) == YYTABLE_NINF) /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing STATE-NUM. */ static const yytype_int16 yypact[] = { -65, 32, 1720, -65, 308, -45, -23, -65, -6, 416, -1, 1670, -65, 74, -64, -48, -13, -65, -65, -65, -65, 1670, -51, -65, -44, -65, -16, -65, 7, -65, -65, 58, 66, 19, 18, -65, 24, -65, -65, 1670, 95, -65, 1670, 140, -65, 1670, 1670, 1670, 1670, 1670, 73, 115, 149, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 164, 167, 1670, 1789, 1670, -65, -7, 4, 169, 179, 180, 1670, 1670, 1635, 181, 182, 1670, 184, 529, -65, -65, -65, 171, 186, 146, -65, 183, -65, 273, -65, -65, -65, -65, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 531, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 1670, 9, -65, 189, -2, 190, -65, 191, 1670, -65, -65, 174, 1846, 2105, -25, 1670, 177, 185, 1883, 187, -65, 193, 178, 252, 253, 256, 600, 386, 669, 423, 494, 738, 807, 876, 945, 1014, 1083, 1152, 1221, 1290, 310, 63, -65, 2141, 2175, 2175, 632, 700, 768, 833, 833, 833, 833, 902, 902, 902, 902, 902, 902, 902, 902, 902, 902, 902, 902, 10, 10, 10, -19, -19, -19, -19, -65, -65, -65, -65, -65, -65, -65, 257, -65, 192, 194, 5, 1920, 1670, -65, 260, 1670, 1957, -65, -65, -65, 204, 205, -65, -65, -65, -65, -65, 1670, -65, 1670, 1670, -65, -65, -65, -65, -65, -65, -65, -65, -65, -65, 1670, -65, -65, -65, 206, -65, 1670, -65, 1994, -65, 2105, -65, 195, -65, 1359, 1428, 1497, 1566, 196, 2031, -65, 199, -65, -65, -65, -65, -65, -65, 1670, 2068, -65 }; /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. Performed when YYTABLE does not specify something else to do. Zero means the default is an error. */ static const yytype_int8 yydefact[] = { 3, 0, 0, 1, 0, 0, 92, 103, 0, 0, 0, 0, 2, 0, 102, 101, 0, 111, 112, 113, 4, 0, 0, 100, 0, 25, 99, 101, 0, 105, 99, 0, 0, 0, 0, 95, 94, 98, 93, 0, 0, 97, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 26, 99, 0, 0, 0, 0, 0, 114, 0, 0, 0, 0, 0, 0, 110, 109, 91, 0, 0, 0, 18, 0, 21, 0, 41, 68, 54, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 0, 0, 0, 5, 0, 0, 12, 104, 0, 0, 115, 0, 0, 0, 0, 0, 0, 106, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 57, 56, 55, 45, 43, 44, 31, 32, 58, 59, 33, 36, 34, 35, 37, 38, 39, 40, 60, 61, 62, 63, 46, 47, 48, 29, 30, 64, 65, 49, 50, 52, 51, 53, 66, 67, 0, 86, 0, 0, 0, 0, 0, 9, 0, 0, 0, 16, 17, 7, 0, 0, 96, 20, 22, 24, 72, 0, 71, 0, 0, 78, 69, 70, 80, 81, 82, 77, 76, 79, 83, 0, 88, 19, 85, 0, 6, 0, 8, 0, 14, 116, 13, 0, 89, 0, 0, 0, 0, 0, 0, 11, 0, 73, 74, 75, 84, 87, 10, 0, 0, 15 }; /* YYPGOTO[NTERM-NUM]. */ static const yytype_int16 yypgoto[] = { -65, -65, -65, -65, -11, 348, -8, 1, 198, -65, 351, 0, 242 }; /* YYDEFGOTO[NTERM-NUM]. */ static const yytype_int16 yydefgoto[] = { -1, 1, 2, 20, 162, 68, 40, 69, 23, 24, 41, 70, 163 }; /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If positive, shift that token. If negative, reduce the rule whose number is the opposite. If YYTABLE_NINF, syntax error. */ static const yytype_int16 yytable[] = { 67, 43, 26, 22, 30, 29, 44, 4, 5, 73, 77, 155, 268, 72, 143, 144, 145, 146, 147, 148, 149, 31, 78, 5, 79, 74, 75, 32, 88, -108, 80, 90, 3, 81, 92, 93, 94, 95, 96, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 33, 236, 35, 237, 34, 113, 7, 151, -107, 36, 37, -107, 76, 14, 161, 38, 152, 35, 153, 167, 154, 39, 82, 83, 36, 37, 45, 156, 157, 269, 38, 84, 30, 169, 85, 86, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 87, 89, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 264, 71, 237, 228, 17, 18, 19, 233, 91, 97, 114, 115, 116, 117, 238, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 114, 115, 116, 117, 98, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 271, 99, 172, 273, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 277, 111, 278, 279, 112, 158, 159, 160, 165, 166, 234, 168, 170, 239, 280, 171, 229, 231, 232, 244, 282, 245, 246, 240, 173, 247, 265, 242, 243, 272, 266, 275, 276, 281, 267, 291, 284, 289, 114, 115, 116, 117, 292, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 114, 115, 116, 117, 4, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 21, 230, 174, 25, 190, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 14, 27, 0, 0, 17, 18, 19, 0, 28, 0, 0, 0, 0, 262, 0, 263, 114, 115, 116, 117, 0, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 114, 115, 116, 117, 5, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 249, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35, 0, 0, 0, 0, 0, 0, 36, 37, 0, 0, 0, 0, 38, 0, 0, 0, 0, 0, 42, 0, 0, 0, 114, 115, 116, 117, 251, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 114, 115, 116, 117, 4, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 14, 27, 0, 0, 17, 18, 19, 0, 0, 114, 115, 116, 117, 191, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 114, 115, 116, 117, 248, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 114, 115, 116, 117, 250, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 114, 115, 116, 117, 253, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 114, 115, 116, 117, 254, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, -109, -109, -109, -109, -109, -109, -109, -109, -109, -109, -109, -109, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 114, 115, 116, 117, 255, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 115, 116, 117, 256, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 115, 116, 117, 257, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 115, 116, 117, 258, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 115, 116, 117, 259, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 115, 116, 117, 260, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 115, 116, 117, 261, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 115, 116, 117, 285, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 115, 116, 117, 286, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 115, 116, 117, 287, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 114, 115, 116, 117, 288, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 46, 0, 47, 6, 0, 0, 0, 0, 0, 164, 48, 49, 50, 51, 0, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 7, 4, 0, 0, 0, 0, 5, 14, 27, 0, 65, 17, 18, 19, 0, 0, 0, 66, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 16, 17, 18, 19, 114, 115, 116, 117, 0, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 150, 114, 115, 116, 117, 235, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 114, 115, 116, 117, 241, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 114, 115, 116, 117, 270, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 114, 115, 116, 117, 274, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 114, 115, 116, 117, 283, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 114, 115, 116, 117, 290, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 114, 115, 116, 117, 293, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 114, 115, 116, 117, 0, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 115, 116, 117, 0, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 117, 0, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149 }; static const yytype_int16 yycheck[] = { 11, 9, 2, 2, 4, 4, 7, 9, 14, 73, 21, 7, 7, 13, 33, 34, 35, 36, 37, 38, 39, 66, 73, 14, 75, 73, 74, 72, 39, 77, 74, 42, 0, 77, 45, 46, 47, 48, 49, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 73, 76, 58, 78, 77, 66, 58, 68, 74, 65, 66, 77, 75, 65, 75, 71, 73, 58, 75, 80, 77, 77, 65, 15, 65, 66, 77, 73, 74, 74, 71, 15, 82, 82, 65, 67, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 77, 7, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 76, 66, 78, 150, 69, 70, 71, 157, 7, 75, 3, 4, 5, 6, 164, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 3, 4, 5, 6, 75, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 234, 75, 79, 237, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 249, 75, 251, 252, 75, 74, 65, 65, 65, 65, 74, 65, 79, 74, 263, 67, 65, 65, 65, 79, 269, 7, 7, 76, 79, 7, 7, 78, 73, 7, 76, 65, 65, 65, 78, 74, 79, 79, 3, 4, 5, 6, 291, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 3, 4, 5, 6, 9, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 2, 153, 79, 2, 112, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 58, -1, -1, -1, -1, -1, -1, 65, 66, -1, -1, 69, 70, 71, -1, 73, -1, -1, -1, -1, 76, -1, 78, 3, 4, 5, 6, -1, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 3, 4, 5, 6, 14, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, 78, -1, -1, -1, -1, -1, -1, -1, -1, -1, 58, -1, -1, -1, -1, -1, -1, 65, 66, -1, -1, -1, -1, 71, -1, -1, -1, -1, -1, 77, -1, -1, -1, 3, 4, 5, 6, 78, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 3, 4, 5, 6, 9, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, 78, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 58, -1, -1, -1, -1, -1, -1, 65, 66, -1, -1, 69, 70, 71, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, 4, 5, 6, 76, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 30, -1, 32, 33, -1, -1, -1, -1, -1, 74, 40, 41, 42, 43, -1, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 9, -1, -1, -1, -1, 14, 65, 66, -1, 68, 69, 70, 71, -1, -1, -1, 75, -1, -1, -1, -1, -1, -1, -1, 33, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 58, 59, 60, 61, 62, 63, 64, 65, 66, -1, 68, 69, 70, 71, 3, 4, 5, 6, -1, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 59, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 3, 4, 5, 6, -1, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 4, 5, 6, -1, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 6, -1, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39 }; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing symbol of state STATE-NUM. */ static const yytype_int8 yystos[] = { 0, 81, 82, 0, 9, 14, 33, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68, 69, 70, 71, 83, 85, 87, 88, 89, 90, 91, 66, 73, 87, 91, 66, 72, 73, 77, 58, 65, 66, 71, 77, 86, 90, 77, 86, 7, 77, 30, 32, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 68, 75, 84, 85, 87, 91, 66, 91, 73, 73, 74, 75, 84, 73, 75, 74, 77, 65, 15, 15, 65, 67, 77, 84, 7, 84, 7, 84, 84, 84, 84, 84, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 84, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 59, 84, 73, 75, 77, 7, 73, 74, 74, 65, 65, 84, 84, 92, 74, 65, 65, 84, 65, 87, 79, 67, 79, 79, 79, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 92, 76, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 86, 65, 88, 65, 65, 84, 74, 7, 76, 78, 84, 74, 76, 7, 78, 73, 79, 7, 7, 7, 76, 78, 76, 78, 78, 76, 76, 76, 76, 76, 76, 76, 76, 76, 76, 78, 76, 7, 76, 78, 7, 74, 7, 84, 7, 84, 7, 65, 65, 84, 84, 84, 84, 65, 84, 7, 79, 76, 76, 76, 76, 79, 7, 74, 84, 7 }; /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ static const yytype_int8 yyr1[] = { 0, 80, 81, 82, 82, 82, 82, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 85, 85, 85, 85, 86, 86, 86, 86, 86, 86, 87, 87, 87, 88, 88, 88, 88, 88, 89, 89, 90, 90, 91, 91, 91, 92, 92, 92 }; /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */ static const yytype_int8 yyr2[] = { 0, 2, 2, 0, 2, 4, 6, 4, 5, 4, 7, 6, 3, 5, 5, 9, 4, 4, 3, 5, 5, 3, 5, 2, 5, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 4, 4, 4, 4, 6, 6, 6, 4, 4, 4, 4, 4, 4, 4, 4, 6, 4, 3, 6, 4, 6, 4, 3, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 3, 2, 4, 1, 1, 3, 3, 1, 1, 1, 0, 1, 3 }; #define yyerrok (yyerrstatus = 0) #define yyclearin (yychar = YYEMPTY) #define YYEMPTY (-2) #define YYEOF 0 #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab #define YYERROR goto yyerrorlab #define YYRECOVERING() (!!yyerrstatus) #define YYBACKUP(Token, Value) \ do \ if (yychar == YYEMPTY) \ { \ yychar = (Token); \ yylval = (Value); \ YYPOPSTACK (yylen); \ yystate = *yyssp; \ goto yybackup; \ } \ else \ { \ yyerror (YY_("syntax error: cannot back up")); \ YYERROR; \ } \ while (0) /* Error token number */ #define YYTERROR 1 #define YYERRCODE 256 /* Enable debugging if requested. */ #if PCODEDEBUG # ifndef YYFPRINTF # include /* INFRINGES ON USER NAME SPACE */ # define YYFPRINTF fprintf # endif # define YYDPRINTF(Args) \ do { \ if (yydebug) \ YYFPRINTF Args; \ } while (0) /* This macro is provided for backward compatibility. */ #ifndef YY_LOCATION_PRINT # define YY_LOCATION_PRINT(File, Loc) ((void) 0) #endif # define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ do { \ if (yydebug) \ { \ YYFPRINTF (stderr, "%s ", Title); \ yy_symbol_print (stderr, \ Type, Value); \ YYFPRINTF (stderr, "\n"); \ } \ } while (0) /*-----------------------------------. | Print this symbol's value on YYO. | `-----------------------------------*/ static void yy_symbol_value_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep) { FILE *yyoutput = yyo; YYUSE (yyoutput); if (!yyvaluep) return; # ifdef YYPRINT if (yytype < YYNTOKENS) YYPRINT (yyo, yytoknum[yytype], *yyvaluep); # endif YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN YYUSE (yytype); YY_IGNORE_MAYBE_UNINITIALIZED_END } /*---------------------------. | Print this symbol on YYO. | `---------------------------*/ static void yy_symbol_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep) { YYFPRINTF (yyo, "%s %s (", yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]); yy_symbol_value_print (yyo, yytype, yyvaluep); YYFPRINTF (yyo, ")"); } /*------------------------------------------------------------------. | yy_stack_print -- Print the state stack from its BOTTOM up to its | | TOP (included). | `------------------------------------------------------------------*/ static void yy_stack_print (yy_state_t *yybottom, yy_state_t *yytop) { YYFPRINTF (stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) { int yybot = *yybottom; YYFPRINTF (stderr, " %d", yybot); } YYFPRINTF (stderr, "\n"); } # define YY_STACK_PRINT(Bottom, Top) \ do { \ if (yydebug) \ yy_stack_print ((Bottom), (Top)); \ } while (0) /*------------------------------------------------. | Report that the YYRULE is going to be reduced. | `------------------------------------------------*/ static void yy_reduce_print (yy_state_t *yyssp, YYSTYPE *yyvsp, int yyrule) { int yylno = yyrline[yyrule]; int yynrhs = yyr2[yyrule]; int yyi; YYFPRINTF (stderr, "Reducing stack by rule %d (line %d):\n", yyrule - 1, yylno); /* The symbols being reduced. */ for (yyi = 0; yyi < yynrhs; yyi++) { YYFPRINTF (stderr, " $%d = ", yyi + 1); yy_symbol_print (stderr, yystos[+yyssp[yyi + 1 - yynrhs]], &yyvsp[(yyi + 1) - (yynrhs)] ); YYFPRINTF (stderr, "\n"); } } # define YY_REDUCE_PRINT(Rule) \ do { \ if (yydebug) \ yy_reduce_print (yyssp, yyvsp, Rule); \ } while (0) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ int yydebug; #else /* !PCODEDEBUG */ # define YYDPRINTF(Args) # define YY_SYMBOL_PRINT(Title, Type, Value, Location) # define YY_STACK_PRINT(Bottom, Top) # define YY_REDUCE_PRINT(Rule) #endif /* !PCODEDEBUG */ /* YYINITDEPTH -- initial size of the parser's stacks. */ #ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only if the built-in stack extension method is used). Do not make this value too large; the results are undefined if YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) evaluated with infinite-precision integer arithmetic. */ #ifndef YYMAXDEPTH # define YYMAXDEPTH 10000 #endif #if YYERROR_VERBOSE # ifndef yystrlen # if defined __GLIBC__ && defined _STRING_H # define yystrlen(S) (YY_CAST (YYPTRDIFF_T, strlen (S))) # else /* Return the length of YYSTR. */ static YYPTRDIFF_T yystrlen (const char *yystr) { YYPTRDIFF_T yylen; for (yylen = 0; yystr[yylen]; yylen++) continue; return yylen; } # endif # endif # ifndef yystpcpy # if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE # define yystpcpy stpcpy # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ static char * yystpcpy (char *yydest, const char *yysrc) { char *yyd = yydest; const char *yys = yysrc; while ((*yyd++ = *yys++) != '\0') continue; return yyd - 1; } # endif # endif # ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary quotes and backslashes, so that it's suitable for yyerror. The heuristic is that double-quoting is unnecessary unless the string contains an apostrophe, a comma, or backslash (other than backslash-backslash). YYSTR is taken from yytname. If YYRES is null, do not copy; instead, return the length of what the result would have been. */ static YYPTRDIFF_T yytnamerr (char *yyres, const char *yystr) { if (*yystr == '"') { YYPTRDIFF_T yyn = 0; char const *yyp = yystr; for (;;) switch (*++yyp) { case '\'': case ',': goto do_not_strip_quotes; case '\\': if (*++yyp != '\\') goto do_not_strip_quotes; else goto append; append: default: if (yyres) yyres[yyn] = *yyp; yyn++; break; case '"': if (yyres) yyres[yyn] = '\0'; return yyn; } do_not_strip_quotes: ; } if (yyres) return yystpcpy (yyres, yystr) - yyres; else return yystrlen (yystr); } # endif /* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message about the unexpected token YYTOKEN for the state stack whose top is YYSSP. Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is not large enough to hold the message. In that case, also set *YYMSG_ALLOC to the required number of bytes. Return 2 if the required number of bytes is too large to store. */ static int yysyntax_error (YYPTRDIFF_T *yymsg_alloc, char **yymsg, yy_state_t *yyssp, int yytoken) { enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; /* Internationalized format string. */ const char *yyformat = YY_NULLPTR; /* Arguments of yyformat: reported tokens (one for the "unexpected", one per "expected"). */ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; /* Actual size of YYARG. */ int yycount = 0; /* Cumulated lengths of YYARG. */ YYPTRDIFF_T yysize = 0; /* There are many possibilities here to consider: - If this state is a consistent state with a default action, then the only way this function was invoked is if the default action is an error action. In that case, don't check for expected tokens because there are none. - The only way there can be no lookahead present (in yychar) is if this state is a consistent state with a default action. Thus, detecting the absence of a lookahead is sufficient to determine that there is no unexpected or expected token to report. In that case, just report a simple "syntax error". - Don't assume there isn't a lookahead just because this state is a consistent state with a default action. There might have been a previous inconsistent state, consistent state with a non-default action, or user semantic action that manipulated yychar. - Of course, the expected token list depends on states to have correct lookahead information, and it depends on the parser not to perform extra reductions after fetching a lookahead from the scanner and before detecting a syntax error. Thus, state merging (from LALR or IELR) and default reductions corrupt the expected token list. However, the list is correct for canonical LR with one exception: it will still contain any token that will not be accepted due to an error action in a later state. */ if (yytoken != YYEMPTY) { int yyn = yypact[+*yyssp]; YYPTRDIFF_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); yysize = yysize0; yyarg[yycount++] = yytname[yytoken]; if (!yypact_value_is_default (yyn)) { /* Start YYX at -YYN if negative to avoid negative indexes in YYCHECK. In other words, skip the first -YYN actions for this state because they are default actions. */ int yyxbegin = yyn < 0 ? -yyn : 0; /* Stay within bounds of both yycheck and yytname. */ int yychecklim = YYLAST - yyn + 1; int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; int yyx; for (yyx = yyxbegin; yyx < yyxend; ++yyx) if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR && !yytable_value_is_error (yytable[yyx + yyn])) { if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { yycount = 1; yysize = yysize0; break; } yyarg[yycount++] = yytname[yyx]; { YYPTRDIFF_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM) yysize = yysize1; else return 2; } } } } switch (yycount) { # define YYCASE_(N, S) \ case N: \ yyformat = S; \ break default: /* Avoid compiler warnings. */ YYCASE_(0, YY_("syntax error")); YYCASE_(1, YY_("syntax error, unexpected %s")); YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); # undef YYCASE_ } { /* Don't count the "%s"s in the final size, but reserve room for the terminator. */ YYPTRDIFF_T yysize1 = yysize + (yystrlen (yyformat) - 2 * yycount) + 1; if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM) yysize = yysize1; else return 2; } if (*yymsg_alloc < yysize) { *yymsg_alloc = 2 * yysize; if (! (yysize <= *yymsg_alloc && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM)) *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM; return 1; } /* Avoid sprintf, as that infringes on the user's name space. Don't have undefined behavior even if the translation produced a string with the wrong number of "%s"s. */ { char *yyp = *yymsg; int yyi = 0; while ((*yyp = *yyformat) != '\0') if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) { yyp += yytnamerr (yyp, yyarg[yyi++]); yyformat += 2; } else { ++yyp; ++yyformat; } } return 0; } #endif /* YYERROR_VERBOSE */ /*-----------------------------------------------. | Release the memory associated to this symbol. | `-----------------------------------------------*/ static void yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep) { YYUSE (yyvaluep); if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN switch (yytype) { case 65: /* INTEGER */ { delete ((*yyvaluep).i); } break; case 66: /* STRING */ { delete ((*yyvaluep).str); } break; case 82: /* rtlmid */ { delete ((*yyvaluep).sem); } break; case 83: /* statement */ { if (((*yyvaluep).stmt) != (vector *)0) { for(int4 i=0;i<((*yyvaluep).stmt)->size();++i) delete (*((*yyvaluep).stmt))[i]; delete ((*yyvaluep).stmt);} } break; case 84: /* expr */ { delete ((*yyvaluep).tree); } break; case 85: /* sizedstar */ { delete ((*yyvaluep).starqual); } break; case 86: /* jumpdest */ { if (((*yyvaluep).varnode) != (VarnodeTpl *)0) delete ((*yyvaluep).varnode); } break; case 87: /* varnode */ { if (((*yyvaluep).varnode) != (VarnodeTpl *)0) delete ((*yyvaluep).varnode); } break; case 88: /* integervarnode */ { if (((*yyvaluep).varnode) != (VarnodeTpl *)0) delete ((*yyvaluep).varnode); } break; case 89: /* lhsvarnode */ { if (((*yyvaluep).varnode) != (VarnodeTpl *)0) delete ((*yyvaluep).varnode); } break; case 92: /* paramlist */ { for(int4 i=0;i<((*yyvaluep).param)->size();++i) delete (*((*yyvaluep).param))[i]; delete ((*yyvaluep).param); } break; default: break; } YY_IGNORE_MAYBE_UNINITIALIZED_END } /* The lookahead symbol. */ int yychar; /* The semantic value of the lookahead symbol. */ YYSTYPE yylval; /* Number of syntax errors so far. */ int yynerrs; /*----------. | yyparse. | `----------*/ int yyparse (void) { yy_state_fast_t yystate; /* Number of tokens to shift before error messages enabled. */ int yyerrstatus; /* The stacks and their tools: 'yyss': related to states. 'yyvs': related to semantic values. Refer to the stacks through separate pointers, to allow yyoverflow to reallocate them elsewhere. */ /* The state stack. */ yy_state_t yyssa[YYINITDEPTH]; yy_state_t *yyss; yy_state_t *yyssp; /* The semantic value stack. */ YYSTYPE yyvsa[YYINITDEPTH]; YYSTYPE *yyvs; YYSTYPE *yyvsp; YYPTRDIFF_T yystacksize; int yyn; int yyresult; /* Lookahead token as an internal (translated) token number. */ int yytoken = 0; /* The variables used to return semantic value and location from the action routines. */ YYSTYPE yyval; #if YYERROR_VERBOSE /* Buffer for error messages, and its allocated size. */ char yymsgbuf[128]; char *yymsg = yymsgbuf; YYPTRDIFF_T yymsg_alloc = sizeof yymsgbuf; #endif #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) /* The number of symbols on the RHS of the reduced rule. Keep to zero when no symbol should be popped. */ int yylen = 0; yyssp = yyss = yyssa; yyvsp = yyvs = yyvsa; yystacksize = YYINITDEPTH; YYDPRINTF ((stderr, "Starting parse\n")); yystate = 0; yyerrstatus = 0; yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ goto yysetstate; /*------------------------------------------------------------. | yynewstate -- push a new state, which is found in yystate. | `------------------------------------------------------------*/ yynewstate: /* In all cases, when you get here, the value and location stacks have just been pushed. So pushing a state here evens the stacks. */ yyssp++; /*--------------------------------------------------------------------. | yysetstate -- set current state (the top of the stack) to yystate. | `--------------------------------------------------------------------*/ yysetstate: YYDPRINTF ((stderr, "Entering state %d\n", yystate)); YY_ASSERT (0 <= yystate && yystate < YYNSTATES); YY_IGNORE_USELESS_CAST_BEGIN *yyssp = YY_CAST (yy_state_t, yystate); YY_IGNORE_USELESS_CAST_END if (yyss + yystacksize - 1 <= yyssp) #if !defined yyoverflow && !defined YYSTACK_RELOCATE goto yyexhaustedlab; #else { /* Get the current used size of the three stacks, in elements. */ YYPTRDIFF_T yysize = yyssp - yyss + 1; # if defined yyoverflow { /* Give user a chance to reallocate the stack. Use copies of these so that the &'s don't force the real ones into memory. */ yy_state_t *yyss1 = yyss; YYSTYPE *yyvs1 = yyvs; /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. This used to be a conditional around just the two extra args, but that might be undefined if yyoverflow is a macro. */ yyoverflow (YY_("memory exhausted"), &yyss1, yysize * YYSIZEOF (*yyssp), &yyvs1, yysize * YYSIZEOF (*yyvsp), &yystacksize); yyss = yyss1; yyvs = yyvs1; } # else /* defined YYSTACK_RELOCATE */ /* Extend the stack our own way. */ if (YYMAXDEPTH <= yystacksize) goto yyexhaustedlab; yystacksize *= 2; if (YYMAXDEPTH < yystacksize) yystacksize = YYMAXDEPTH; { yy_state_t *yyss1 = yyss; union yyalloc *yyptr = YY_CAST (union yyalloc *, YYSTACK_ALLOC (YY_CAST (YYSIZE_T, YYSTACK_BYTES (yystacksize)))); if (! yyptr) goto yyexhaustedlab; YYSTACK_RELOCATE (yyss_alloc, yyss); YYSTACK_RELOCATE (yyvs_alloc, yyvs); # undef YYSTACK_RELOCATE if (yyss1 != yyssa) YYSTACK_FREE (yyss1); } # endif yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; YY_IGNORE_USELESS_CAST_BEGIN YYDPRINTF ((stderr, "Stack size increased to %ld\n", YY_CAST (long, yystacksize))); YY_IGNORE_USELESS_CAST_END if (yyss + yystacksize - 1 <= yyssp) YYABORT; } #endif /* !defined yyoverflow && !defined YYSTACK_RELOCATE */ if (yystate == YYFINAL) YYACCEPT; goto yybackup; /*-----------. | yybackup. | `-----------*/ yybackup: /* Do appropriate processing given the current state. Read a lookahead token if we need one and don't already have one. */ /* First try to decide what to do without reference to lookahead token. */ yyn = yypact[yystate]; if (yypact_value_is_default (yyn)) goto yydefault; /* Not known => get a lookahead token if don't already have one. */ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ if (yychar == YYEMPTY) { YYDPRINTF ((stderr, "Reading a token: ")); yychar = yylex (); } if (yychar <= YYEOF) { yychar = yytoken = YYEOF; YYDPRINTF ((stderr, "Now at end of input.\n")); } else { yytoken = YYTRANSLATE (yychar); YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); } /* If the proper action on seeing token YYTOKEN is to reduce or to detect an error, take that action. */ yyn += yytoken; if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) goto yydefault; yyn = yytable[yyn]; if (yyn <= 0) { if (yytable_value_is_error (yyn)) goto yyerrlab; yyn = -yyn; goto yyreduce; } /* Count tokens shifted since error; after three, turn off error status. */ if (yyerrstatus) yyerrstatus--; /* Shift the lookahead token. */ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); yystate = yyn; YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END /* Discard the shifted token. */ yychar = YYEMPTY; goto yynewstate; /*-----------------------------------------------------------. | yydefault -- do the default action for the current state. | `-----------------------------------------------------------*/ yydefault: yyn = yydefact[yystate]; if (yyn == 0) goto yyerrlab; goto yyreduce; /*-----------------------------. | yyreduce -- do a reduction. | `-----------------------------*/ yyreduce: /* yyn is the number of a rule to reduce with. */ yylen = yyr2[yyn]; /* If YYLEN is nonzero, implement the default value of the action: '$$ = $1'. Otherwise, the following line sets YYVAL to garbage. This behavior is undocumented and Bison users should not rely upon it. Assigning to YYVAL unconditionally makes the parser a bit smaller, and it avoids a GCC warning that YYVAL may be used uninitialized. */ yyval = yyvsp[1-yylen]; YY_REDUCE_PRINT (yyn); switch (yyn) { case 2: { pcode->setResult((yyvsp[-1].sem)); } break; case 3: { (yyval.sem) = new ConstructTpl(); } break; case 4: { (yyval.sem) = (yyvsp[-1].sem); if (!(yyval.sem)->addOpList(*(yyvsp[0].stmt))) { delete (yyvsp[0].stmt); yyerror("Multiple delayslot declarations"); YYERROR; } delete (yyvsp[0].stmt); } break; case 5: { (yyval.sem) = (yyvsp[-3].sem); pcode->newLocalDefinition((yyvsp[-1].str)); } break; case 6: { (yyval.sem) = (yyvsp[-5].sem); pcode->newLocalDefinition((yyvsp[-3].str),*(yyvsp[-1].i)); delete (yyvsp[-1].i); } break; case 7: { (yyvsp[-1].tree)->setOutput((yyvsp[-3].varnode)); (yyval.stmt) = ExprTree::toVector((yyvsp[-1].tree)); } break; case 8: { (yyval.stmt) = pcode->newOutput(true,(yyvsp[-1].tree),(yyvsp[-3].str)); } break; case 9: { (yyval.stmt) = pcode->newOutput(false,(yyvsp[-1].tree),(yyvsp[-3].str)); } break; case 10: { (yyval.stmt) = pcode->newOutput(true,(yyvsp[-1].tree),(yyvsp[-5].str),*(yyvsp[-3].i)); delete (yyvsp[-3].i); } break; case 11: { (yyval.stmt) = pcode->newOutput(true,(yyvsp[-1].tree),(yyvsp[-5].str),*(yyvsp[-3].i)); delete (yyvsp[-3].i); } break; case 12: { (yyval.stmt) = (vector *)0; string errmsg = "Redefinition of symbol: "+(yyvsp[-1].specsym)->getName(); yyerror(errmsg.c_str()); YYERROR; } break; case 13: { (yyval.stmt) = pcode->createStore((yyvsp[-4].starqual),(yyvsp[-3].tree),(yyvsp[-1].tree)); } break; case 14: { (yyval.stmt) = pcode->createUserOpNoOut((yyvsp[-4].useropsym),(yyvsp[-2].param)); } break; case 15: { (yyval.stmt) = pcode->assignBitRange((yyvsp[-8].varnode),(uint4)*(yyvsp[-6].i),(uint4)*(yyvsp[-4].i),(yyvsp[-1].tree)); delete (yyvsp[-6].i), delete (yyvsp[-4].i); } break; case 16: { (yyval.stmt) = (vector *)0; delete (yyvsp[-3].varnode); delete (yyvsp[-1].i); yyerror("Illegal truncation on left-hand side of assignment"); YYERROR; } break; case 17: { (yyval.stmt) = (vector *)0; delete (yyvsp[-3].varnode); delete (yyvsp[-1].i); yyerror("Illegal subpiece on left-hand side of assignment"); YYERROR; } break; case 18: { (yyval.stmt) = pcode->createOpNoOut(CPUI_BRANCH,new ExprTree((yyvsp[-1].varnode))); } break; case 19: { (yyval.stmt) = pcode->createOpNoOut(CPUI_CBRANCH,new ExprTree((yyvsp[-1].varnode)),(yyvsp[-3].tree)); } break; case 20: { (yyval.stmt) = pcode->createOpNoOut(CPUI_BRANCHIND,(yyvsp[-2].tree)); } break; case 21: { (yyval.stmt) = pcode->createOpNoOut(CPUI_CALL,new ExprTree((yyvsp[-1].varnode))); } break; case 22: { (yyval.stmt) = pcode->createOpNoOut(CPUI_CALLIND,(yyvsp[-2].tree)); } break; case 23: { (yyval.stmt) = (vector *)0; yyerror("Must specify an indirect parameter for return"); YYERROR; } break; case 24: { (yyval.stmt) = pcode->createOpNoOut(CPUI_RETURN,(yyvsp[-2].tree)); } break; case 25: { (yyval.stmt) = pcode->placeLabel( (yyvsp[0].labelsym) ); } break; case 26: { (yyval.tree) = new ExprTree((yyvsp[0].varnode)); } break; case 27: { (yyval.tree) = pcode->createLoad((yyvsp[-1].starqual),(yyvsp[0].tree)); } break; case 28: { (yyval.tree) = (yyvsp[-1].tree); } break; case 29: { (yyval.tree) = pcode->createOp(CPUI_INT_ADD,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 30: { (yyval.tree) = pcode->createOp(CPUI_INT_SUB,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 31: { (yyval.tree) = pcode->createOp(CPUI_INT_EQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 32: { (yyval.tree) = pcode->createOp(CPUI_INT_NOTEQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 33: { (yyval.tree) = pcode->createOp(CPUI_INT_LESS,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 34: { (yyval.tree) = pcode->createOp(CPUI_INT_LESSEQUAL,(yyvsp[0].tree),(yyvsp[-2].tree)); } break; case 35: { (yyval.tree) = pcode->createOp(CPUI_INT_LESSEQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 36: { (yyval.tree) = pcode->createOp(CPUI_INT_LESS,(yyvsp[0].tree),(yyvsp[-2].tree)); } break; case 37: { (yyval.tree) = pcode->createOp(CPUI_INT_SLESS,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 38: { (yyval.tree) = pcode->createOp(CPUI_INT_SLESSEQUAL,(yyvsp[0].tree),(yyvsp[-2].tree)); } break; case 39: { (yyval.tree) = pcode->createOp(CPUI_INT_SLESSEQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 40: { (yyval.tree) = pcode->createOp(CPUI_INT_SLESS,(yyvsp[0].tree),(yyvsp[-2].tree)); } break; case 41: { (yyval.tree) = pcode->createOp(CPUI_INT_2COMP,(yyvsp[0].tree)); } break; case 42: { (yyval.tree) = pcode->createOp(CPUI_INT_NEGATE,(yyvsp[0].tree)); } break; case 43: { (yyval.tree) = pcode->createOp(CPUI_INT_XOR,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 44: { (yyval.tree) = pcode->createOp(CPUI_INT_AND,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 45: { (yyval.tree) = pcode->createOp(CPUI_INT_OR,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 46: { (yyval.tree) = pcode->createOp(CPUI_INT_LEFT,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 47: { (yyval.tree) = pcode->createOp(CPUI_INT_RIGHT,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 48: { (yyval.tree) = pcode->createOp(CPUI_INT_SRIGHT,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 49: { (yyval.tree) = pcode->createOp(CPUI_INT_MULT,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 50: { (yyval.tree) = pcode->createOp(CPUI_INT_DIV,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 51: { (yyval.tree) = pcode->createOp(CPUI_INT_SDIV,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 52: { (yyval.tree) = pcode->createOp(CPUI_INT_REM,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 53: { (yyval.tree) = pcode->createOp(CPUI_INT_SREM,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 54: { (yyval.tree) = pcode->createOp(CPUI_BOOL_NEGATE,(yyvsp[0].tree)); } break; case 55: { (yyval.tree) = pcode->createOp(CPUI_BOOL_XOR,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 56: { (yyval.tree) = pcode->createOp(CPUI_BOOL_AND,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 57: { (yyval.tree) = pcode->createOp(CPUI_BOOL_OR,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 58: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_EQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 59: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_NOTEQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 60: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_LESS,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 61: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_LESS,(yyvsp[0].tree),(yyvsp[-2].tree)); } break; case 62: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_LESSEQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 63: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_LESSEQUAL,(yyvsp[0].tree),(yyvsp[-2].tree)); } break; case 64: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_ADD,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 65: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_SUB,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 66: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_MULT,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 67: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_DIV,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 68: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_NEG,(yyvsp[0].tree)); } break; case 69: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_ABS,(yyvsp[-1].tree)); } break; case 70: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_SQRT,(yyvsp[-1].tree)); } break; case 71: { (yyval.tree) = pcode->createOp(CPUI_INT_SEXT,(yyvsp[-1].tree)); } break; case 72: { (yyval.tree) = pcode->createOp(CPUI_INT_ZEXT,(yyvsp[-1].tree)); } break; case 73: { (yyval.tree) = pcode->createOp(CPUI_INT_CARRY,(yyvsp[-3].tree),(yyvsp[-1].tree)); } break; case 74: { (yyval.tree) = pcode->createOp(CPUI_INT_SCARRY,(yyvsp[-3].tree),(yyvsp[-1].tree)); } break; case 75: { (yyval.tree) = pcode->createOp(CPUI_INT_SBORROW,(yyvsp[-3].tree),(yyvsp[-1].tree)); } break; case 76: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_FLOAT2FLOAT,(yyvsp[-1].tree)); } break; case 77: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_INT2FLOAT,(yyvsp[-1].tree)); } break; case 78: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_NAN,(yyvsp[-1].tree)); } break; case 79: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_TRUNC,(yyvsp[-1].tree)); } break; case 80: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_CEIL,(yyvsp[-1].tree)); } break; case 81: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_FLOOR,(yyvsp[-1].tree)); } break; case 82: { (yyval.tree) = pcode->createOp(CPUI_FLOAT_ROUND,(yyvsp[-1].tree)); } break; case 83: { (yyval.tree) = pcode->createOp(CPUI_NEW,(yyvsp[-1].tree)); } break; case 84: { (yyval.tree) = pcode->createOp(CPUI_NEW,(yyvsp[-3].tree),(yyvsp[-1].tree)); } break; case 85: { (yyval.tree) = pcode->createOp(CPUI_SUBPIECE,new ExprTree((yyvsp[-3].specsym)->getVarnode()),new ExprTree((yyvsp[-1].varnode))); } break; case 86: { (yyval.tree) = pcode->createBitRange((yyvsp[-2].specsym),0,(uint4)(*(yyvsp[0].i) * 8)); delete (yyvsp[0].i); } break; case 87: { (yyval.tree) = pcode->createBitRange((yyvsp[-5].specsym),(uint4)*(yyvsp[-3].i),(uint4)*(yyvsp[-1].i)); delete (yyvsp[-3].i), delete (yyvsp[-1].i); } break; case 88: { (yyval.tree) = pcode->createUserOp((yyvsp[-3].useropsym),(yyvsp[-1].param)); } break; case 89: { (yyval.starqual) = new StarQuality; (yyval.starqual)->size = *(yyvsp[0].i); delete (yyvsp[0].i); (yyval.starqual)->id=ConstTpl((yyvsp[-3].spacesym)->getSpace()); } break; case 90: { (yyval.starqual) = new StarQuality; (yyval.starqual)->size = 0; (yyval.starqual)->id=ConstTpl((yyvsp[-1].spacesym)->getSpace()); } break; case 91: { (yyval.starqual) = new StarQuality; (yyval.starqual)->size = *(yyvsp[0].i); delete (yyvsp[0].i); (yyval.starqual)->id=ConstTpl(pcode->getDefaultSpace()); } break; case 92: { (yyval.starqual) = new StarQuality; (yyval.starqual)->size = 0; (yyval.starqual)->id=ConstTpl(pcode->getDefaultSpace()); } break; case 93: { VarnodeTpl *sym = (yyvsp[0].specsym)->getVarnode(); (yyval.varnode) = new VarnodeTpl(ConstTpl(ConstTpl::j_curspace),sym->getOffset(),ConstTpl(ConstTpl::j_curspace_size)); delete sym; } break; case 94: { (yyval.varnode) = new VarnodeTpl(ConstTpl(ConstTpl::j_curspace),ConstTpl(ConstTpl::real,*(yyvsp[0].i)),ConstTpl(ConstTpl::j_curspace_size)); delete (yyvsp[0].i); } break; case 95: { (yyval.varnode) = new VarnodeTpl(ConstTpl(ConstTpl::j_curspace),ConstTpl(ConstTpl::real,0),ConstTpl(ConstTpl::j_curspace_size)); yyerror("Parsed integer is too big (overflow)"); } break; case 96: { AddrSpace *spc = (yyvsp[-1].spacesym)->getSpace(); (yyval.varnode) = new VarnodeTpl(ConstTpl(spc),ConstTpl(ConstTpl::real,*(yyvsp[-3].i)),ConstTpl(ConstTpl::real,spc->getAddrSize())); delete (yyvsp[-3].i); } break; case 97: { (yyval.varnode) = new VarnodeTpl(ConstTpl(pcode->getConstantSpace()),ConstTpl(ConstTpl::j_relative,(yyvsp[0].labelsym)->getIndex()),ConstTpl(ConstTpl::real,sizeof(uintm))); (yyvsp[0].labelsym)->incrementRefCount(); } break; case 98: { (yyval.varnode) = (VarnodeTpl *)0; string errmsg = "Unknown jump destination: "+*(yyvsp[0].str); delete (yyvsp[0].str); yyerror(errmsg.c_str()); YYERROR; } break; case 99: { (yyval.varnode) = (yyvsp[0].specsym)->getVarnode(); } break; case 100: { (yyval.varnode) = (yyvsp[0].varnode); } break; case 101: { (yyval.varnode) = (VarnodeTpl *)0; string errmsg = "Unknown varnode parameter: "+*(yyvsp[0].str); delete (yyvsp[0].str); yyerror(errmsg.c_str()); YYERROR; } break; case 102: { (yyval.varnode) = new VarnodeTpl(ConstTpl(pcode->getConstantSpace()),ConstTpl(ConstTpl::real,*(yyvsp[0].i)),ConstTpl(ConstTpl::real,0)); delete (yyvsp[0].i); } break; case 103: { (yyval.varnode) = new VarnodeTpl(ConstTpl(pcode->getConstantSpace()),ConstTpl(ConstTpl::real,0),ConstTpl(ConstTpl::real,0)); yyerror("Parsed integer is too big (overflow)"); } break; case 104: { (yyval.varnode) = new VarnodeTpl(ConstTpl(pcode->getConstantSpace()),ConstTpl(ConstTpl::real,*(yyvsp[-2].i)),ConstTpl(ConstTpl::real,*(yyvsp[0].i))); delete (yyvsp[-2].i); delete (yyvsp[0].i); } break; case 105: { (yyval.varnode) = pcode->addressOf((yyvsp[0].varnode),0); } break; case 106: { (yyval.varnode) = pcode->addressOf((yyvsp[0].varnode),*(yyvsp[-1].i)); delete (yyvsp[-1].i); } break; case 107: { (yyval.varnode) = (yyvsp[0].specsym)->getVarnode(); } break; case 108: { (yyval.varnode) = (VarnodeTpl *)0; string errmsg = "Unknown assignment varnode: "+*(yyvsp[0].str); delete (yyvsp[0].str); yyerror(errmsg.c_str()); YYERROR; } break; case 109: { (yyval.labelsym) = (yyvsp[-1].labelsym); } break; case 110: { (yyval.labelsym) = pcode->defineLabel( (yyvsp[-1].str) ); } break; case 111: { (yyval.specsym) = (yyvsp[0].varsym); } break; case 112: { (yyval.specsym) = (yyvsp[0].operandsym); } break; case 113: { (yyval.specsym) = (yyvsp[0].specsym); } break; case 114: { (yyval.param) = new vector; } break; case 115: { (yyval.param) = new vector; (yyval.param)->push_back((yyvsp[0].tree)); } break; case 116: { (yyval.param) = (yyvsp[-2].param); (yyval.param)->push_back((yyvsp[0].tree)); } break; default: break; } /* User semantic actions sometimes alter yychar, and that requires that yytoken be updated with the new translation. We take the approach of translating immediately before every use of yytoken. One alternative is translating here after every semantic action, but that translation would be missed if the semantic action invokes YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an incorrect destructor might then be invoked immediately. In the case of YYERROR or YYBACKUP, subsequent parser actions might lead to an incorrect destructor call or verbose syntax error message before the lookahead is translated. */ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); *++yyvsp = yyval; /* Now 'shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ { const int yylhs = yyr1[yyn] - YYNTOKENS; const int yyi = yypgoto[yylhs] + *yyssp; yystate = (0 <= yyi && yyi <= YYLAST && yycheck[yyi] == *yyssp ? yytable[yyi] : yydefgoto[yylhs]); } goto yynewstate; /*--------------------------------------. | yyerrlab -- here on detecting error. | `--------------------------------------*/ yyerrlab: /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); /* If not already recovering from an error, report this error. */ if (!yyerrstatus) { ++yynerrs; #if ! YYERROR_VERBOSE yyerror (YY_("syntax error")); #else # define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \ yyssp, yytoken) { char const *yymsgp = YY_("syntax error"); int yysyntax_error_status; yysyntax_error_status = YYSYNTAX_ERROR; if (yysyntax_error_status == 0) yymsgp = yymsg; else if (yysyntax_error_status == 1) { if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); yymsg = YY_CAST (char *, YYSTACK_ALLOC (YY_CAST (YYSIZE_T, yymsg_alloc))); if (!yymsg) { yymsg = yymsgbuf; yymsg_alloc = sizeof yymsgbuf; yysyntax_error_status = 2; } else { yysyntax_error_status = YYSYNTAX_ERROR; yymsgp = yymsg; } } yyerror (yymsgp); if (yysyntax_error_status == 2) goto yyexhaustedlab; } # undef YYSYNTAX_ERROR #endif } if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an error, discard it. */ if (yychar <= YYEOF) { /* Return failure if at end of input. */ if (yychar == YYEOF) YYABORT; } else { yydestruct ("Error: discarding", yytoken, &yylval); yychar = YYEMPTY; } } /* Else will try to reuse lookahead token after shifting the error token. */ goto yyerrlab1; /*---------------------------------------------------. | yyerrorlab -- error raised explicitly by YYERROR. | `---------------------------------------------------*/ yyerrorlab: /* Pacify compilers when the user code never invokes YYERROR and the label yyerrorlab therefore never appears in user code. */ if (0) YYERROR; /* Do not reclaim the symbols of the rule whose action triggered this YYERROR. */ YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); yystate = *yyssp; goto yyerrlab1; /*-------------------------------------------------------------. | yyerrlab1 -- common code for both syntax error and YYERROR. | `-------------------------------------------------------------*/ yyerrlab1: yyerrstatus = 3; /* Each real token shifted decrements this. */ for (;;) { yyn = yypact[yystate]; if (!yypact_value_is_default (yyn)) { yyn += YYTERROR; if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) { yyn = yytable[yyn]; if (0 < yyn) break; } } /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) YYABORT; yydestruct ("Error: popping", yystos[yystate], yyvsp); YYPOPSTACK (1); yystate = *yyssp; YY_STACK_PRINT (yyss, yyssp); } YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END /* Shift the error token. */ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); yystate = yyn; goto yynewstate; /*-------------------------------------. | yyacceptlab -- YYACCEPT comes here. | `-------------------------------------*/ yyacceptlab: yyresult = 0; goto yyreturn; /*-----------------------------------. | yyabortlab -- YYABORT comes here. | `-----------------------------------*/ yyabortlab: yyresult = 1; goto yyreturn; #if !defined yyoverflow || YYERROR_VERBOSE /*-------------------------------------------------. | yyexhaustedlab -- memory exhaustion comes here. | `-------------------------------------------------*/ yyexhaustedlab: yyerror (YY_("memory exhausted")); yyresult = 2; /* Fall through. */ #endif /*-----------------------------------------------------. | yyreturn -- parsing is finished, return the result. | `-----------------------------------------------------*/ yyreturn: if (yychar != YYEMPTY) { /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = YYTRANSLATE (yychar); yydestruct ("Cleanup: discarding lookahead", yytoken, &yylval); } /* Do not reclaim the symbols of the rule whose action triggered this YYABORT or YYACCEPT. */ YYPOPSTACK (yylen); YY_STACK_PRINT (yyss, yyssp); while (yyssp != yyss) { yydestruct ("Cleanup: popping", yystos[+*yyssp], yyvsp); YYPOPSTACK (1); } #ifndef yyoverflow if (yyss != yyssa) YYSTACK_FREE (yyss); #endif #if YYERROR_VERBOSE if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); #endif return yyresult; } #define IDENTREC_SIZE 46 const IdentRec PcodeLexer::idents[]= { // Sorted list of identifiers { "!=", OP_NOTEQUAL }, { "&&", OP_BOOL_AND }, { "<<", OP_LEFT }, { "<=", OP_LESSEQUAL }, { "==", OP_EQUAL }, { ">=", OP_GREATEQUAL }, { ">>", OP_RIGHT }, { "^^", OP_BOOL_XOR }, { "||", OP_BOOL_OR }, { "abs", OP_ABS }, { "borrow", OP_BORROW }, { "call", CALL_KEY }, { "carry", OP_CARRY }, { "ceil", OP_CEIL }, { "f!=", OP_FNOTEQUAL }, { "f*", OP_FMULT }, { "f+", OP_FADD }, { "f-", OP_FSUB }, { "f/", OP_FDIV }, { "f<", OP_FLESS }, { "f<=", OP_FLESSEQUAL }, { "f==", OP_FEQUAL }, { "f>", OP_FGREAT }, { "f>=", OP_FGREATEQUAL }, { "float2float", OP_FLOAT2FLOAT }, { "floor", OP_FLOOR }, { "goto", GOTO_KEY }, { "if", IF_KEY }, { "int2float", OP_INT2FLOAT }, { "local", LOCAL_KEY }, { "nan", OP_NAN }, { "return", RETURN_KEY }, { "round", OP_ROUND }, { "s%", OP_SREM }, { "s/", OP_SDIV }, { "s<", OP_SLESS }, { "s<=", OP_SLESSEQUAL }, { "s>", OP_SGREAT }, { "s>=", OP_SGREATEQUAL }, { "s>>",OP_SRIGHT }, { "sborrow", OP_SBORROW }, { "scarry", OP_SCARRY }, { "sext", OP_SEXT }, { "sqrt", OP_SQRT }, { "trunc", OP_TRUNC }, { "zext", OP_ZEXT } }; int4 PcodeLexer::findIdentifier(const string &str) const { int4 low = 0; int4 high = IDENTREC_SIZE-1; int4 comp; do { int4 targ = (low+high)/2; comp = str.compare(idents[targ].nm); if (comp < 0) // str comes before targ high = targ-1; else if (comp > 0) // str comes after targ low = targ + 1; else return targ; } while(low <= high); return -1; } int4 PcodeLexer::moveState(void) { switch(curstate) { case start: switch(curchar) { case '#': curstate = comment; return start; case '|': if (lookahead1 == '|') { starttoken(); curstate = special2; return start; } return punctuation; case '&': if (lookahead1 == '&') { starttoken(); curstate = special2; return start; } return punctuation; case '^': if (lookahead1 == '^') { starttoken(); curstate = special2; return start; } return punctuation; case '>': if ((lookahead1 == '>')||(lookahead1=='=')) { starttoken(); curstate = special2; return start; } return punctuation; case '<': if ((lookahead1 == '<')||(lookahead1=='=')) { starttoken(); curstate = special2; return start; } return punctuation; case '=': if (lookahead1 == '=') { starttoken(); curstate = special2; return start; } return punctuation; case '!': if (lookahead1 == '=') { starttoken(); curstate = special2; return start; } return punctuation; case '(': case ')': case ',': case ':': case '[': case ']': case ';': case '+': case '-': case '*': case '/': case '%': case '~': return punctuation; case 's': case 'f': if (curchar == 's') { if ((lookahead1 == '/')||(lookahead1=='%')) { starttoken(); curstate = special2; return start; } else if (lookahead1 == '<') { starttoken(); if (lookahead2 == '=') curstate = special3; else curstate = special2; return start; } else if (lookahead1 == '>') { starttoken(); if ((lookahead2=='>')||(lookahead2=='=')) curstate = special3; else curstate = special2; return start; } } else { // curchar == 'f' if ((lookahead1=='+')||(lookahead1=='-')||(lookahead1=='*')||(lookahead1=='/')) { starttoken(); curstate = special2; return start; } else if (((lookahead1=='=')||(lookahead1=='!'))&&(lookahead2=='=')) { starttoken(); curstate = special3; return start; } else if ((lookahead1=='<')||(lookahead1=='>')) { starttoken(); if (lookahead2 == '=') curstate = special3; else curstate = special2; return start; } } // fall through here, treat 's' and 'f' as ordinary characters case 'a': case 'b': case 'c': case 'd': case 'e': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case '.': starttoken(); if (isIdent(lookahead1)) { curstate = identifier; return start; } curstate = start; return identifier; case '0': starttoken(); if (lookahead1 == 'x') { curstate = hexstring; return start; } if (isDec(lookahead1)) { curstate = decstring; return start; } curstate = start; return decstring; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': starttoken(); if (isDec(lookahead1)) { curstate = decstring; return start; } curstate = start; return decstring; case '\n': case ' ': case '\t': case '\v': case '\r': return start; // Ignore whitespace case '\0': curstate = endstream; return endstream; default: curstate = illegal; return illegal; } break; case special2: advancetoken(); curstate = start; return identifier; case special3: advancetoken(); curstate = special32; return start; case special32: advancetoken(); curstate = start; return identifier; case comment: if (curchar == '\n') curstate = start; else if (curchar == '\0') { curstate = endstream; return endstream; } return start; case identifier: advancetoken(); if (isIdent(lookahead1)) return start; curstate = start; return identifier; case hexstring: advancetoken(); if (isHex(lookahead1)) return start; curstate = start; return hexstring; case decstring: advancetoken(); if (isDec(lookahead1)) return start; curstate = start; return decstring; default: curstate = endstream; } return endstream; } int4 PcodeLexer::getNextToken(void) { // Will return either: identifier, punctuation, hexstring, decstring, endstream, or illegal // If identifier, hexstring, or decstring, curtoken will be filled with the characters int4 tok; do { curchar = lookahead1; lookahead1 = lookahead2; if (endofstream) lookahead2 = '\0'; else { s->get(lookahead2); if (!(*s)) { endofstream = true; lookahead2 = '\0'; } } tok = moveState(); } while(tok == start); if (tok == identifier) { curtoken[tokpos] = '\0'; // Append null terminator curidentifier = curtoken; int4 num = findIdentifier(curidentifier); if (num < 0) // Not a keyword return STRING; return idents[num].id; } else if ((tok == hexstring)||(tok == decstring)) { curtoken[tokpos] = '\0'; istringstream s1(curtoken); s1.unsetf(ios::dec | ios::hex | ios::oct); s1 >> curnum; if (!s1) return BADINTEGER; return INTEGER; } else if (tok == endstream) { if (!endofstreamsent) { endofstreamsent = true; return ENDOFSTREAM; // Send 'official' end of stream token } return 0; // 0 means end of file to parser } else if (tok == illegal) return 0; return (int4)curchar; } void PcodeLexer::initialize(istream *t) { // Set up for new lex s = t; curstate = start; tokpos = 0; endofstream = false; endofstreamsent = false; lookahead1 = 0; lookahead2 = 0; s->get(lookahead1); // Buffer the first two characters if (!(*s)) { endofstream = true; lookahead1 = 0; return; } s->get(lookahead2); if (!(*s)) { endofstream = true; lookahead2 = 0; return; } } uint4 PcodeSnippet::allocateTemp(void) { // Allocate a variable in the unique space and return the offset uint4 res = tempbase; tempbase += 16; return res; } void PcodeSnippet::addSymbol(SleighSymbol *sym) { pair res; res = tree.insert( sym ); if (!res.second) { reportError((const Location *)0,"Duplicate symbol name: "+sym->getName()); delete sym; // Symbol is unattached to anything else } } void PcodeSnippet::clear(void) { // Clear everything, prepare for a new parse against the same language SymbolTree::iterator iter,tmpiter; iter = tree.begin(); while(iter != tree.end()) { SleighSymbol *sym = *iter; tmpiter = iter; ++iter; // Increment now, as node may be deleted if (sym->getType() != SleighSymbol::space_symbol) { delete sym; // Free any old local symbols tree.erase(tmpiter); } } if (result != (ConstructTpl *)0) { delete result; result = (ConstructTpl *)0; } // tempbase = 0; errorcount = 0; firsterror.clear(); resetLabelCount(); } PcodeSnippet::PcodeSnippet(const SleighBase *slgh) : PcodeCompile() { sleigh = slgh; tempbase = 0; errorcount = 0; result = (ConstructTpl *)0; setDefaultSpace(slgh->getDefaultCodeSpace()); setConstantSpace(slgh->getConstantSpace()); setUniqueSpace(slgh->getUniqueSpace()); int4 num = slgh->numSpaces(); for(int4 i=0;igetSpace(i); spacetype type = spc->getType(); if ((type==IPTR_CONSTANT)||(type==IPTR_PROCESSOR)||(type==IPTR_SPACEBASE)||(type==IPTR_INTERNAL)) tree.insert(new SpaceSymbol(spc)); } addSymbol(new FlowDestSymbol("inst_dest",slgh->getConstantSpace())); addSymbol(new FlowRefSymbol("inst_ref",slgh->getConstantSpace())); } PcodeSnippet::~PcodeSnippet(void) { SymbolTree::iterator iter; for(iter=tree.begin();iter!=tree.end();++iter) delete *iter; // Free ALL temporary symbols if (result != (ConstructTpl *)0) { delete result; result = (ConstructTpl *)0; } } void PcodeSnippet::reportError(const Location *loc, const string &msg) { if (errorcount == 0) firsterror = msg; errorcount += 1; } int4 PcodeSnippet::lex(void) { int4 tok = lexer.getNextToken(); if (tok == STRING) { SleighSymbol *sym; SleighSymbol tmpsym(lexer.getIdentifier()); SymbolTree::const_iterator iter = tree.find(&tmpsym); if (iter != tree.end()) sym = *iter; // Found a local symbol else sym = sleigh->findSymbol(lexer.getIdentifier()); if (sym != (SleighSymbol *)0) { switch(sym->getType()) { case SleighSymbol::space_symbol: yylval.spacesym = (SpaceSymbol *)sym; return SPACESYM; case SleighSymbol::userop_symbol: yylval.useropsym = (UserOpSymbol *)sym; return USEROPSYM; case SleighSymbol::varnode_symbol: yylval.varsym = (VarnodeSymbol *)sym; return VARSYM; case SleighSymbol::operand_symbol: yylval.operandsym = (OperandSymbol *)sym; return OPERANDSYM; case SleighSymbol::start_symbol: case SleighSymbol::end_symbol: case SleighSymbol::next2_symbol: case SleighSymbol::flowdest_symbol: case SleighSymbol::flowref_symbol: yylval.specsym = (SpecificSymbol *)sym; return JUMPSYM; case SleighSymbol::label_symbol: yylval.labelsym = (LabelSymbol *)sym; return LABELSYM; case SleighSymbol::dummy_symbol: break; default: // The translator may have other symbols in it that we don't want visible in the snippet compiler break; } } yylval.str = new string(lexer.getIdentifier()); return STRING; } if (tok == INTEGER) { yylval.i = new uintb(lexer.getNumber()); return INTEGER; } return tok; } bool PcodeSnippet::parseStream(istream &s) { lexer.initialize(&s); pcode = this; // Setup global object for yyparse int4 res = yyparse(); if (res != 0) { reportError((const Location *)0,"Syntax error"); return false; } if (!PcodeCompile::propagateSize(result)) { reportError((const Location *)0,"Could not resolve at least 1 variable size"); return false; } return true; } void PcodeSnippet::addOperand(const string &name,int4 index) { // Add an operand symbol for this snippet OperandSymbol *sym = new OperandSymbol(name,index,(Constructor *)0); addSymbol(sym); } int pcodelex(void) { return pcode->lex(); } int pcodeerror(const char *s) { pcode->reportError((const Location *)0,s); return 0; } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/pcodeparse.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __PCODEPARSE_HH__ #define __PCODEPARSE_HH__ #include "pcodecompile.hh" #include "sleighbase.hh" namespace ghidra { // Classes for compiling a standalone snippet of pcode, given an existing sleigh language struct IdentRec { const char *nm; int4 id; }; class PcodeLexer { public: enum { // Lexer states start, special2, // Middle of special 2 character operator special3, // First character of special 3 character operator special32, // Second character of special 3 character operator comment, // Middle of an endofline comment punctuation, // Punctuation character identifier, // Middle of an identifier hexstring, // Middle of a hexidecimal number decstring, // Middle of a decimal number endstream, // Reached end of stream illegal // Scanned an illegal character }; private: static const IdentRec idents[]; int4 curstate; char curchar,lookahead1,lookahead2; char curtoken[256]; int4 tokpos; bool endofstream; bool endofstreamsent; istream *s; string curidentifier; uintb curnum; void starttoken(void) { curtoken[0] = curchar; tokpos = 1; } void advancetoken(void) { curtoken[tokpos++] = curchar; } bool isIdent(char c) const { return (isalnum(c)||(c=='_')||(c=='.')); } bool isHex(char c) const { return isxdigit(c); } bool isDec(char c) const { return isdigit(c); } int4 findIdentifier(const string &str) const; int4 moveState(void); public: PcodeLexer(void) { s = (istream *)0; } void initialize(istream *t); int4 getNextToken(void); const string &getIdentifier(void) const { return curidentifier; } uintb getNumber(void) const { return curnum; } }; class PcodeSnippet : public PcodeCompile { PcodeLexer lexer; const SleighBase *sleigh; // Language from which we get symbols SymbolTree tree; // Symbols in the local scope of the snippet (temporaries) uint4 tempbase; int4 errorcount; string firsterror; ConstructTpl *result; virtual uint4 allocateTemp(void); virtual void addSymbol(SleighSymbol *sym); public: PcodeSnippet(const SleighBase *slgh); void setResult(ConstructTpl *res) { result = res; } ConstructTpl *releaseResult(void) { ConstructTpl *res = result; result = (ConstructTpl *)0; return res; } virtual ~PcodeSnippet(void); virtual const Location *getLocation(SleighSymbol *sym) const { return (const Location *)0; } virtual void reportError(const Location *loc, const string &msg); virtual void reportWarning(const Location *loc, const string &msg) {} bool hasErrors(void) const { return (errorcount != 0); } const string getErrorMessage(void) const { return firsterror; } void setUniqueBase(uint4 val) { tempbase = val; } uint4 getUniqueBase(void) const { return tempbase; } void clear(void); int lex(void); bool parseStream(istream& s); void addOperand(const string &name,int4 index); }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/pcodeparse.y ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ %define api.prefix {pcode} %{ #include "pcodeparse.hh" //#define YYERROR_VERBOSE namespace ghidra { extern int pcodelex(void); static PcodeSnippet *pcode; extern int pcodeerror(const char *str ); %} %union { uintb *i; string *str; vector *param; StarQuality *starqual; VarnodeTpl *varnode; ExprTree *tree; vector *stmt; ConstructTpl *sem; SpaceSymbol *spacesym; UserOpSymbol *useropsym; LabelSymbol *labelsym; OperandSymbol *operandsym; VarnodeSymbol *varsym; SpecificSymbol *specsym; } %expect 3 // Conflicts // 1 integervarnode ':' conflict (does ':' apply to INTEGER or varnode) // resolved by shifting which applies ':' to INTEGER (best solution) // 2 statement -> STRING . conflicts (STRING might be mislabelled varnode, or temporary declaration) // resolved by shifting which means assume this is a temporary declaration %left OP_BOOL_OR %left OP_BOOL_AND OP_BOOL_XOR %left '|' %left ';' %left '^' %left '&' %left OP_EQUAL OP_NOTEQUAL OP_FEQUAL OP_FNOTEQUAL %nonassoc '<' '>' OP_GREATEQUAL OP_LESSEQUAL OP_SLESS OP_SGREATEQUAL OP_SLESSEQUAL OP_SGREAT OP_FLESS OP_FGREAT OP_FLESSEQUAL OP_FGREATEQUAL %left OP_LEFT OP_RIGHT OP_SRIGHT %left '+' '-' OP_FADD OP_FSUB %left '*' '/' '%' OP_SDIV OP_SREM OP_FMULT OP_FDIV %right '!' '~' %token OP_ZEXT OP_CARRY OP_BORROW OP_SEXT OP_SCARRY OP_SBORROW OP_NAN OP_ABS %token OP_SQRT OP_CEIL OP_FLOOR OP_ROUND OP_INT2FLOAT OP_FLOAT2FLOAT %token OP_TRUNC OP_NEW %token BADINTEGER GOTO_KEY CALL_KEY RETURN_KEY IF_KEY ENDOFSTREAM LOCAL_KEY %token INTEGER %token STRING %token SPACESYM %token USEROPSYM %token VARSYM %token OPERANDSYM %token JUMPSYM %token LABELSYM %type paramlist %type rtlmid %type statement %type expr %type varnode integervarnode lhsvarnode jumpdest %type label %type sizedstar %type specificsymbol %destructor { delete $$; } INTEGER %destructor { delete $$; } STRING %destructor { for(int4 i=0;i<$$->size();++i) delete (*$$)[i]; delete $$; } paramlist %destructor { delete $$; } rtlmid %destructor { if ($$ != (vector *)0) { for(int4 i=0;i<$$->size();++i) delete (*$$)[i]; delete $$;} } statement %destructor { delete $$; } expr %destructor { if ($$ != (VarnodeTpl *)0) delete $$; } varnode integervarnode lhsvarnode jumpdest %destructor { delete $$; } sizedstar %% rtl: rtlmid ENDOFSTREAM { pcode->setResult($1); } ; rtlmid: /* EMPTY */ { $$ = new ConstructTpl(); } | rtlmid statement { $$ = $1; if (!$$->addOpList(*$2)) { delete $2; yyerror("Multiple delayslot declarations"); YYERROR; } delete $2; } | rtlmid LOCAL_KEY STRING ';' { $$ = $1; pcode->newLocalDefinition($3); } | rtlmid LOCAL_KEY STRING ':' INTEGER ';' { $$ = $1; pcode->newLocalDefinition($3,*$5); delete $5; } ; statement: lhsvarnode '=' expr ';' { $3->setOutput($1); $$ = ExprTree::toVector($3); } | LOCAL_KEY STRING '=' expr ';' { $$ = pcode->newOutput(true,$4,$2); } | STRING '=' expr ';' { $$ = pcode->newOutput(false,$3,$1); } | LOCAL_KEY STRING ':' INTEGER '=' expr ';' { $$ = pcode->newOutput(true,$6,$2,*$4); delete $4; } | STRING ':' INTEGER '=' expr ';' { $$ = pcode->newOutput(true,$5,$1,*$3); delete $3; } | LOCAL_KEY specificsymbol '=' { $$ = (vector *)0; string errmsg = "Redefinition of symbol: "+$2->getName(); yyerror(errmsg.c_str()); YYERROR; } | sizedstar expr '=' expr ';' { $$ = pcode->createStore($1,$2,$4); } | USEROPSYM '(' paramlist ')' ';' { $$ = pcode->createUserOpNoOut($1,$3); } | lhsvarnode '[' INTEGER ',' INTEGER ']' '=' expr ';' { $$ = pcode->assignBitRange($1,(uint4)*$3,(uint4)*$5,$8); delete $3, delete $5; } | varnode ':' INTEGER '=' { $$ = (vector *)0; delete $1; delete $3; yyerror("Illegal truncation on left-hand side of assignment"); YYERROR; } | varnode '(' INTEGER ')' { $$ = (vector *)0; delete $1; delete $3; yyerror("Illegal subpiece on left-hand side of assignment"); YYERROR; } | GOTO_KEY jumpdest ';' { $$ = pcode->createOpNoOut(CPUI_BRANCH,new ExprTree($2)); } | IF_KEY expr GOTO_KEY jumpdest ';' { $$ = pcode->createOpNoOut(CPUI_CBRANCH,new ExprTree($4),$2); } | GOTO_KEY '[' expr ']' ';' { $$ = pcode->createOpNoOut(CPUI_BRANCHIND,$3); } | CALL_KEY jumpdest ';' { $$ = pcode->createOpNoOut(CPUI_CALL,new ExprTree($2)); } | CALL_KEY '[' expr ']' ';' { $$ = pcode->createOpNoOut(CPUI_CALLIND,$3); } | RETURN_KEY ';' { $$ = (vector *)0; yyerror("Must specify an indirect parameter for return"); YYERROR; } | RETURN_KEY '[' expr ']' ';' { $$ = pcode->createOpNoOut(CPUI_RETURN,$3); } | label { $$ = pcode->placeLabel( $1 ); } ; expr: varnode { $$ = new ExprTree($1); } | sizedstar expr %prec '!' { $$ = pcode->createLoad($1,$2); } | '(' expr ')' { $$ = $2; } | expr '+' expr { $$ = pcode->createOp(CPUI_INT_ADD,$1,$3); } | expr '-' expr { $$ = pcode->createOp(CPUI_INT_SUB,$1,$3); } | expr OP_EQUAL expr { $$ = pcode->createOp(CPUI_INT_EQUAL,$1,$3); } | expr OP_NOTEQUAL expr { $$ = pcode->createOp(CPUI_INT_NOTEQUAL,$1,$3); } | expr '<' expr { $$ = pcode->createOp(CPUI_INT_LESS,$1,$3); } | expr OP_GREATEQUAL expr { $$ = pcode->createOp(CPUI_INT_LESSEQUAL,$3,$1); } | expr OP_LESSEQUAL expr { $$ = pcode->createOp(CPUI_INT_LESSEQUAL,$1,$3); } | expr '>' expr { $$ = pcode->createOp(CPUI_INT_LESS,$3,$1); } | expr OP_SLESS expr { $$ = pcode->createOp(CPUI_INT_SLESS,$1,$3); } | expr OP_SGREATEQUAL expr { $$ = pcode->createOp(CPUI_INT_SLESSEQUAL,$3,$1); } | expr OP_SLESSEQUAL expr { $$ = pcode->createOp(CPUI_INT_SLESSEQUAL,$1,$3); } | expr OP_SGREAT expr { $$ = pcode->createOp(CPUI_INT_SLESS,$3,$1); } | '-' expr %prec '!' { $$ = pcode->createOp(CPUI_INT_2COMP,$2); } | '~' expr { $$ = pcode->createOp(CPUI_INT_NEGATE,$2); } | expr '^' expr { $$ = pcode->createOp(CPUI_INT_XOR,$1,$3); } | expr '&' expr { $$ = pcode->createOp(CPUI_INT_AND,$1,$3); } | expr '|' expr { $$ = pcode->createOp(CPUI_INT_OR,$1,$3); } | expr OP_LEFT expr { $$ = pcode->createOp(CPUI_INT_LEFT,$1,$3); } | expr OP_RIGHT expr { $$ = pcode->createOp(CPUI_INT_RIGHT,$1,$3); } | expr OP_SRIGHT expr { $$ = pcode->createOp(CPUI_INT_SRIGHT,$1,$3); } | expr '*' expr { $$ = pcode->createOp(CPUI_INT_MULT,$1,$3); } | expr '/' expr { $$ = pcode->createOp(CPUI_INT_DIV,$1,$3); } | expr OP_SDIV expr { $$ = pcode->createOp(CPUI_INT_SDIV,$1,$3); } | expr '%' expr { $$ = pcode->createOp(CPUI_INT_REM,$1,$3); } | expr OP_SREM expr { $$ = pcode->createOp(CPUI_INT_SREM,$1,$3); } | '!' expr { $$ = pcode->createOp(CPUI_BOOL_NEGATE,$2); } | expr OP_BOOL_XOR expr { $$ = pcode->createOp(CPUI_BOOL_XOR,$1,$3); } | expr OP_BOOL_AND expr { $$ = pcode->createOp(CPUI_BOOL_AND,$1,$3); } | expr OP_BOOL_OR expr { $$ = pcode->createOp(CPUI_BOOL_OR,$1,$3); } | expr OP_FEQUAL expr { $$ = pcode->createOp(CPUI_FLOAT_EQUAL,$1,$3); } | expr OP_FNOTEQUAL expr { $$ = pcode->createOp(CPUI_FLOAT_NOTEQUAL,$1,$3); } | expr OP_FLESS expr { $$ = pcode->createOp(CPUI_FLOAT_LESS,$1,$3); } | expr OP_FGREAT expr { $$ = pcode->createOp(CPUI_FLOAT_LESS,$3,$1); } | expr OP_FLESSEQUAL expr { $$ = pcode->createOp(CPUI_FLOAT_LESSEQUAL,$1,$3); } | expr OP_FGREATEQUAL expr { $$ = pcode->createOp(CPUI_FLOAT_LESSEQUAL,$3,$1); } | expr OP_FADD expr { $$ = pcode->createOp(CPUI_FLOAT_ADD,$1,$3); } | expr OP_FSUB expr { $$ = pcode->createOp(CPUI_FLOAT_SUB,$1,$3); } | expr OP_FMULT expr { $$ = pcode->createOp(CPUI_FLOAT_MULT,$1,$3); } | expr OP_FDIV expr { $$ = pcode->createOp(CPUI_FLOAT_DIV,$1,$3); } | OP_FSUB expr %prec '!' { $$ = pcode->createOp(CPUI_FLOAT_NEG,$2); } | OP_ABS '(' expr ')' { $$ = pcode->createOp(CPUI_FLOAT_ABS,$3); } | OP_SQRT '(' expr ')' { $$ = pcode->createOp(CPUI_FLOAT_SQRT,$3); } | OP_SEXT '(' expr ')' { $$ = pcode->createOp(CPUI_INT_SEXT,$3); } | OP_ZEXT '(' expr ')' { $$ = pcode->createOp(CPUI_INT_ZEXT,$3); } | OP_CARRY '(' expr ',' expr ')' { $$ = pcode->createOp(CPUI_INT_CARRY,$3,$5); } | OP_SCARRY '(' expr ',' expr ')' { $$ = pcode->createOp(CPUI_INT_SCARRY,$3,$5); } | OP_SBORROW '(' expr ',' expr ')' { $$ = pcode->createOp(CPUI_INT_SBORROW,$3,$5); } | OP_FLOAT2FLOAT '(' expr ')' { $$ = pcode->createOp(CPUI_FLOAT_FLOAT2FLOAT,$3); } | OP_INT2FLOAT '(' expr ')' { $$ = pcode->createOp(CPUI_FLOAT_INT2FLOAT,$3); } | OP_NAN '(' expr ')' { $$ = pcode->createOp(CPUI_FLOAT_NAN,$3); } | OP_TRUNC '(' expr ')' { $$ = pcode->createOp(CPUI_FLOAT_TRUNC,$3); } | OP_CEIL '(' expr ')' { $$ = pcode->createOp(CPUI_FLOAT_CEIL,$3); } | OP_FLOOR '(' expr ')' { $$ = pcode->createOp(CPUI_FLOAT_FLOOR,$3); } | OP_ROUND '(' expr ')' { $$ = pcode->createOp(CPUI_FLOAT_ROUND,$3); }; | OP_NEW '(' expr ')' { $$ = pcode->createOp(CPUI_NEW,$3); }; | OP_NEW '(' expr ',' expr ')' { $$ = pcode->createOp(CPUI_NEW,$3,$5); } | specificsymbol '(' integervarnode ')' { $$ = pcode->createOp(CPUI_SUBPIECE,new ExprTree($1->getVarnode()),new ExprTree($3)); } | specificsymbol ':' INTEGER { $$ = pcode->createBitRange($1,0,(uint4)(*$3 * 8)); delete $3; } | specificsymbol '[' INTEGER ',' INTEGER ']' { $$ = pcode->createBitRange($1,(uint4)*$3,(uint4)*$5); delete $3, delete $5; } | USEROPSYM '(' paramlist ')' { $$ = pcode->createUserOp($1,$3); } ; sizedstar: '*' '[' SPACESYM ']' ':' INTEGER { $$ = new StarQuality; $$->size = *$6; delete $6; $$->id=ConstTpl($3->getSpace()); } | '*' '[' SPACESYM ']' { $$ = new StarQuality; $$->size = 0; $$->id=ConstTpl($3->getSpace()); } | '*' ':' INTEGER { $$ = new StarQuality; $$->size = *$3; delete $3; $$->id=ConstTpl(pcode->getDefaultSpace()); } | '*' { $$ = new StarQuality; $$->size = 0; $$->id=ConstTpl(pcode->getDefaultSpace()); } ; jumpdest: JUMPSYM { VarnodeTpl *sym = $1->getVarnode(); $$ = new VarnodeTpl(ConstTpl(ConstTpl::j_curspace),sym->getOffset(),ConstTpl(ConstTpl::j_curspace_size)); delete sym; } | INTEGER { $$ = new VarnodeTpl(ConstTpl(ConstTpl::j_curspace),ConstTpl(ConstTpl::real,*$1),ConstTpl(ConstTpl::j_curspace_size)); delete $1; } | BADINTEGER { $$ = new VarnodeTpl(ConstTpl(ConstTpl::j_curspace),ConstTpl(ConstTpl::real,0),ConstTpl(ConstTpl::j_curspace_size)); yyerror("Parsed integer is too big (overflow)"); } | INTEGER '[' SPACESYM ']' { AddrSpace *spc = $3->getSpace(); $$ = new VarnodeTpl(ConstTpl(spc),ConstTpl(ConstTpl::real,*$1),ConstTpl(ConstTpl::real,spc->getAddrSize())); delete $1; } | label { $$ = new VarnodeTpl(ConstTpl(pcode->getConstantSpace()),ConstTpl(ConstTpl::j_relative,$1->getIndex()),ConstTpl(ConstTpl::real,sizeof(uintm))); $1->incrementRefCount(); } | STRING { $$ = (VarnodeTpl *)0; string errmsg = "Unknown jump destination: "+*$1; delete $1; yyerror(errmsg.c_str()); YYERROR; } ; varnode: specificsymbol { $$ = $1->getVarnode(); } | integervarnode { $$ = $1; } | STRING { $$ = (VarnodeTpl *)0; string errmsg = "Unknown varnode parameter: "+*$1; delete $1; yyerror(errmsg.c_str()); YYERROR; } ; integervarnode: INTEGER { $$ = new VarnodeTpl(ConstTpl(pcode->getConstantSpace()),ConstTpl(ConstTpl::real,*$1),ConstTpl(ConstTpl::real,0)); delete $1; } | BADINTEGER { $$ = new VarnodeTpl(ConstTpl(pcode->getConstantSpace()),ConstTpl(ConstTpl::real,0),ConstTpl(ConstTpl::real,0)); yyerror("Parsed integer is too big (overflow)"); } | INTEGER ':' INTEGER { $$ = new VarnodeTpl(ConstTpl(pcode->getConstantSpace()),ConstTpl(ConstTpl::real,*$1),ConstTpl(ConstTpl::real,*$3)); delete $1; delete $3; } | '&' varnode { $$ = pcode->addressOf($2,0); } | '&' ':' INTEGER varnode { $$ = pcode->addressOf($4,*$3); delete $3; } ; lhsvarnode: specificsymbol { $$ = $1->getVarnode(); } | STRING { $$ = (VarnodeTpl *)0; string errmsg = "Unknown assignment varnode: "+*$1; delete $1; yyerror(errmsg.c_str()); YYERROR; } ; label: '<' LABELSYM '>' { $$ = $2; } | '<' STRING '>' { $$ = pcode->defineLabel( $2 ); } ; specificsymbol: VARSYM { $$ = $1; } | OPERANDSYM { $$ = $1; } | JUMPSYM { $$ = $1; } ; paramlist: /* EMPTY */ { $$ = new vector; } | expr { $$ = new vector; $$->push_back($1); } | paramlist ',' expr { $$ = $1; $$->push_back($3); } ; %% #define IDENTREC_SIZE 46 const IdentRec PcodeLexer::idents[]= { // Sorted list of identifiers { "!=", OP_NOTEQUAL }, { "&&", OP_BOOL_AND }, { "<<", OP_LEFT }, { "<=", OP_LESSEQUAL }, { "==", OP_EQUAL }, { ">=", OP_GREATEQUAL }, { ">>", OP_RIGHT }, { "^^", OP_BOOL_XOR }, { "||", OP_BOOL_OR }, { "abs", OP_ABS }, { "borrow", OP_BORROW }, { "call", CALL_KEY }, { "carry", OP_CARRY }, { "ceil", OP_CEIL }, { "f!=", OP_FNOTEQUAL }, { "f*", OP_FMULT }, { "f+", OP_FADD }, { "f-", OP_FSUB }, { "f/", OP_FDIV }, { "f<", OP_FLESS }, { "f<=", OP_FLESSEQUAL }, { "f==", OP_FEQUAL }, { "f>", OP_FGREAT }, { "f>=", OP_FGREATEQUAL }, { "float2float", OP_FLOAT2FLOAT }, { "floor", OP_FLOOR }, { "goto", GOTO_KEY }, { "if", IF_KEY }, { "int2float", OP_INT2FLOAT }, { "local", LOCAL_KEY }, { "nan", OP_NAN }, { "return", RETURN_KEY }, { "round", OP_ROUND }, { "s%", OP_SREM }, { "s/", OP_SDIV }, { "s<", OP_SLESS }, { "s<=", OP_SLESSEQUAL }, { "s>", OP_SGREAT }, { "s>=", OP_SGREATEQUAL }, { "s>>",OP_SRIGHT }, { "sborrow", OP_SBORROW }, { "scarry", OP_SCARRY }, { "sext", OP_SEXT }, { "sqrt", OP_SQRT }, { "trunc", OP_TRUNC }, { "zext", OP_ZEXT } }; int4 PcodeLexer::findIdentifier(const string &str) const { int4 low = 0; int4 high = IDENTREC_SIZE-1; int4 comp; do { int4 targ = (low+high)/2; comp = str.compare(idents[targ].nm); if (comp < 0) // str comes before targ high = targ-1; else if (comp > 0) // str comes after targ low = targ + 1; else return targ; } while(low <= high); return -1; } int4 PcodeLexer::moveState(void) { switch(curstate) { case start: switch(curchar) { case '#': curstate = comment; return start; case '|': if (lookahead1 == '|') { starttoken(); curstate = special2; return start; } return punctuation; case '&': if (lookahead1 == '&') { starttoken(); curstate = special2; return start; } return punctuation; case '^': if (lookahead1 == '^') { starttoken(); curstate = special2; return start; } return punctuation; case '>': if ((lookahead1 == '>')||(lookahead1=='=')) { starttoken(); curstate = special2; return start; } return punctuation; case '<': if ((lookahead1 == '<')||(lookahead1=='=')) { starttoken(); curstate = special2; return start; } return punctuation; case '=': if (lookahead1 == '=') { starttoken(); curstate = special2; return start; } return punctuation; case '!': if (lookahead1 == '=') { starttoken(); curstate = special2; return start; } return punctuation; case '(': case ')': case ',': case ':': case '[': case ']': case ';': case '+': case '-': case '*': case '/': case '%': case '~': return punctuation; case 's': case 'f': if (curchar == 's') { if ((lookahead1 == '/')||(lookahead1=='%')) { starttoken(); curstate = special2; return start; } else if (lookahead1 == '<') { starttoken(); if (lookahead2 == '=') curstate = special3; else curstate = special2; return start; } else if (lookahead1 == '>') { starttoken(); if ((lookahead2=='>')||(lookahead2=='=')) curstate = special3; else curstate = special2; return start; } } else { // curchar == 'f' if ((lookahead1=='+')||(lookahead1=='-')||(lookahead1=='*')||(lookahead1=='/')) { starttoken(); curstate = special2; return start; } else if (((lookahead1=='=')||(lookahead1=='!'))&&(lookahead2=='=')) { starttoken(); curstate = special3; return start; } else if ((lookahead1=='<')||(lookahead1=='>')) { starttoken(); if (lookahead2 == '=') curstate = special3; else curstate = special2; return start; } } // fall through here, treat 's' and 'f' as ordinary characters case 'a': case 'b': case 'c': case 'd': case 'e': case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': case 't': case 'u': case 'v': case 'w': case 'x': case 'y': case 'z': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': case 'Y': case 'Z': case '_': case '.': starttoken(); if (isIdent(lookahead1)) { curstate = identifier; return start; } curstate = start; return identifier; case '0': starttoken(); if (lookahead1 == 'x') { curstate = hexstring; return start; } if (isDec(lookahead1)) { curstate = decstring; return start; } curstate = start; return decstring; case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': starttoken(); if (isDec(lookahead1)) { curstate = decstring; return start; } curstate = start; return decstring; case '\n': case ' ': case '\t': case '\v': case '\r': return start; // Ignore whitespace case '\0': curstate = endstream; return endstream; default: curstate = illegal; return illegal; } break; case special2: advancetoken(); curstate = start; return identifier; case special3: advancetoken(); curstate = special32; return start; case special32: advancetoken(); curstate = start; return identifier; case comment: if (curchar == '\n') curstate = start; else if (curchar == '\0') { curstate = endstream; return endstream; } return start; case identifier: advancetoken(); if (isIdent(lookahead1)) return start; curstate = start; return identifier; case hexstring: advancetoken(); if (isHex(lookahead1)) return start; curstate = start; return hexstring; case decstring: advancetoken(); if (isDec(lookahead1)) return start; curstate = start; return decstring; default: curstate = endstream; } return endstream; } int4 PcodeLexer::getNextToken(void) { // Will return either: identifier, punctuation, hexstring, decstring, endstream, or illegal // If identifier, hexstring, or decstring, curtoken will be filled with the characters int4 tok; do { curchar = lookahead1; lookahead1 = lookahead2; if (endofstream) lookahead2 = '\0'; else { s->get(lookahead2); if (!(*s)) { endofstream = true; lookahead2 = '\0'; } } tok = moveState(); } while(tok == start); if (tok == identifier) { curtoken[tokpos] = '\0'; // Append null terminator curidentifier = curtoken; int4 num = findIdentifier(curidentifier); if (num < 0) // Not a keyword return STRING; return idents[num].id; } else if ((tok == hexstring)||(tok == decstring)) { curtoken[tokpos] = '\0'; istringstream s1(curtoken); s1.unsetf(ios::dec | ios::hex | ios::oct); s1 >> curnum; if (!s1) return BADINTEGER; return INTEGER; } else if (tok == endstream) { if (!endofstreamsent) { endofstreamsent = true; return ENDOFSTREAM; // Send 'official' end of stream token } return 0; // 0 means end of file to parser } else if (tok == illegal) return 0; return (int4)curchar; } void PcodeLexer::initialize(istream *t) { // Set up for new lex s = t; curstate = start; tokpos = 0; endofstream = false; endofstreamsent = false; lookahead1 = 0; lookahead2 = 0; s->get(lookahead1); // Buffer the first two characters if (!(*s)) { endofstream = true; lookahead1 = 0; return; } s->get(lookahead2); if (!(*s)) { endofstream = true; lookahead2 = 0; return; } } uint4 PcodeSnippet::allocateTemp(void) { // Allocate a variable in the unique space and return the offset uint4 res = tempbase; tempbase += 16; return res; } void PcodeSnippet::addSymbol(SleighSymbol *sym) { pair res; res = tree.insert( sym ); if (!res.second) { reportError((const Location *)0,"Duplicate symbol name: "+sym->getName()); delete sym; // Symbol is unattached to anything else } } void PcodeSnippet::clear(void) { // Clear everything, prepare for a new parse against the same language SymbolTree::iterator iter,tmpiter; iter = tree.begin(); while(iter != tree.end()) { SleighSymbol *sym = *iter; tmpiter = iter; ++iter; // Increment now, as node may be deleted if (sym->getType() != SleighSymbol::space_symbol) { delete sym; // Free any old local symbols tree.erase(tmpiter); } } if (result != (ConstructTpl *)0) { delete result; result = (ConstructTpl *)0; } // tempbase = 0; errorcount = 0; firsterror.clear(); resetLabelCount(); } PcodeSnippet::PcodeSnippet(const SleighBase *slgh) : PcodeCompile() { sleigh = slgh; tempbase = 0; errorcount = 0; result = (ConstructTpl *)0; setDefaultSpace(slgh->getDefaultCodeSpace()); setConstantSpace(slgh->getConstantSpace()); setUniqueSpace(slgh->getUniqueSpace()); int4 num = slgh->numSpaces(); for(int4 i=0;igetSpace(i); spacetype type = spc->getType(); if ((type==IPTR_CONSTANT)||(type==IPTR_PROCESSOR)||(type==IPTR_SPACEBASE)||(type==IPTR_INTERNAL)) tree.insert(new SpaceSymbol(spc)); } addSymbol(new FlowDestSymbol("inst_dest",slgh->getConstantSpace())); addSymbol(new FlowRefSymbol("inst_ref",slgh->getConstantSpace())); } PcodeSnippet::~PcodeSnippet(void) { SymbolTree::iterator iter; for(iter=tree.begin();iter!=tree.end();++iter) delete *iter; // Free ALL temporary symbols if (result != (ConstructTpl *)0) { delete result; result = (ConstructTpl *)0; } } void PcodeSnippet::reportError(const Location *loc, const string &msg) { if (errorcount == 0) firsterror = msg; errorcount += 1; } int4 PcodeSnippet::lex(void) { int4 tok = lexer.getNextToken(); if (tok == STRING) { SleighSymbol *sym; SleighSymbol tmpsym(lexer.getIdentifier()); SymbolTree::const_iterator iter = tree.find(&tmpsym); if (iter != tree.end()) sym = *iter; // Found a local symbol else sym = sleigh->findSymbol(lexer.getIdentifier()); if (sym != (SleighSymbol *)0) { switch(sym->getType()) { case SleighSymbol::space_symbol: yylval.spacesym = (SpaceSymbol *)sym; return SPACESYM; case SleighSymbol::userop_symbol: yylval.useropsym = (UserOpSymbol *)sym; return USEROPSYM; case SleighSymbol::varnode_symbol: yylval.varsym = (VarnodeSymbol *)sym; return VARSYM; case SleighSymbol::operand_symbol: yylval.operandsym = (OperandSymbol *)sym; return OPERANDSYM; case SleighSymbol::start_symbol: case SleighSymbol::end_symbol: case SleighSymbol::next2_symbol: case SleighSymbol::flowdest_symbol: case SleighSymbol::flowref_symbol: yylval.specsym = (SpecificSymbol *)sym; return JUMPSYM; case SleighSymbol::label_symbol: yylval.labelsym = (LabelSymbol *)sym; return LABELSYM; case SleighSymbol::dummy_symbol: break; default: // The translator may have other symbols in it that we don't want visible in the snippet compiler break; } } yylval.str = new string(lexer.getIdentifier()); return STRING; } if (tok == INTEGER) { yylval.i = new uintb(lexer.getNumber()); return INTEGER; } return tok; } bool PcodeSnippet::parseStream(istream &s) { lexer.initialize(&s); pcode = this; // Setup global object for yyparse int4 res = yyparse(); if (res != 0) { reportError((const Location *)0,"Syntax error"); return false; } if (!PcodeCompile::propagateSize(result)) { reportError((const Location *)0,"Could not resolve at least 1 variable size"); return false; } return true; } void PcodeSnippet::addOperand(const string &name,int4 index) { // Add an operand symbol for this snippet OperandSymbol *sym = new OperandSymbol(name,index,(Constructor *)0); addSymbol(sym); } int pcodelex(void) { return pcode->lex(); } int pcodeerror(const char *s) { pcode->reportError((const Location *)0,s); return 0; } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/pcoderaw.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "pcoderaw.hh" #include "translate.hh" namespace ghidra { /// Build this VarnodeData from an \, \, or \ element. /// \param decoder is the stream decoder void VarnodeData::decode(Decoder &decoder) { uint4 elemId = decoder.openElement(); decodeFromAttributes(decoder); decoder.closeElement(elemId); } /// Collect attributes for the VarnodeData possibly from amidst other attributes /// \param decoder is the stream decoder void VarnodeData::decodeFromAttributes(Decoder &decoder) { space = (AddrSpace *)0; size = 0; for(;;) { uint4 attribId = decoder.getNextAttributeId(); if (attribId == 0) break; // Its possible to have no attributes in an tag if (attribId == ATTRIB_SPACE) { space = decoder.readSpace(); decoder.rewindAttributes(); offset = space->decodeAttributes(decoder,size); break; } else if (attribId == ATTRIB_NAME) { const Translate *trans = decoder.getAddrSpaceManager()->getDefaultCodeSpace()->getTrans(); const VarnodeData &point(trans->getRegister(decoder.readString())); *this = point; break; } } } /// Return \b true, if \b this, as an address range, contains the other address range /// \param op2 is the other VarnodeData to test for containment /// \return \b true if \b this contains the other bool VarnodeData::contains(const VarnodeData &op2) const { if (space != op2.space) return false; if (op2.offset < offset) return false; if ((offset + (size-1)) < (op2.offset + (op2.size-1))) return false; return true; } /// If \b this and \b lo form a contiguous range of bytes, where \b this makes up the most significant /// bytes and \b lo makes up the least significant bytes, return \b true. /// \param lo is the given VarnodeData to compare with /// \return \b true if the two byte ranges are contiguous and in order bool VarnodeData::isContiguous(const VarnodeData &lo) const { if (space != lo.space) return false; if (space->isBigEndian()) { uintb nextoff = space->wrapOffset(offset+size); if (nextoff == lo.offset) return true; } else { uintb nextoff = space->wrapOffset(lo.offset+lo.size); if (nextoff == offset) return true; } return false; } /// This assumes the \ element is already open. /// Decode info suitable for call to PcodeEmit::dump. The output pointer is changed to null if there /// is no output for this op, otherwise the existing pointer is used to store the output. /// \param decoder is the stream decoder /// \param isize is the (preparsed) number of input parameters for the p-code op /// \param invar is an array of storage for the input Varnodes /// \param outvar is a (handle) to the storage for the output Varnode /// \return the p-code op OpCode OpCode PcodeOpRaw::decode(Decoder &decoder,int4 isize,VarnodeData *invar,VarnodeData **outvar) { OpCode opcode = (OpCode)decoder.readSignedInteger(ATTRIB_CODE); uint4 subId = decoder.peekElement(); if (subId == ELEM_VOID) { decoder.openElement(); decoder.closeElement(subId); *outvar = (VarnodeData *)0; } else { (*outvar)->decode(decoder); } for(int4 i=0;igetConstantSpace(); invar[i].offset = (uintb)(uintp)decoder.readSpace(ATTRIB_NAME); invar[i].size = sizeof(void *); decoder.closeElement(subId); } else invar[i].decode(decoder); } return opcode; } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/pcoderaw.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file pcoderaw.hh /// \brief Raw descriptions of varnodes and p-code ops #ifndef __PCODERAW_HH__ #define __PCODERAW_HH__ #include "address.hh" #include "opbehavior.hh" namespace ghidra { /// \brief Data defining a specific memory location /// /// Within the decompiler's model of a processor, any register, /// memory location, or other variable can always be represented /// as an address space, an offset within the space, and the /// size of the sequence of bytes. This is more commonly referred /// to as a Varnode, but this is a bare-bones container /// for the data that doesn't have the cached attributes and /// the dataflow links of the Varnode within its syntax tree. struct VarnodeData { AddrSpace *space; ///< The address space uintb offset; ///< The offset within the space uint4 size; ///< The number of bytes in the location bool operator<(const VarnodeData &op2) const; ///< An ordering for VarnodeData bool operator==(const VarnodeData &op2) const; ///< Compare for equality bool operator!=(const VarnodeData &op2) const; ///< Compare for inequality /// Get the location of the varnode as an address Address getAddr(void) const; /// Treat \b this as a constant and recover encoded address space AddrSpace *getSpaceFromConst(void) const; /// Recover this object from a stream void decode(Decoder &decoder); /// Recover \b this object from attributes of the current open element void decodeFromAttributes(Decoder &decoder); /// Does \b this container another given VarnodeData bool contains(const VarnodeData &op2) const; /// Is \b this contiguous (as the most significant piece) with the given VarnodeData bool isContiguous(const VarnodeData &lo) const; }; /// VarnodeData can be sorted in terms of the space its in /// (the space's \e index), the offset within the space, /// and finally by the size. /// \param op2 is the object being compared to /// \return true if \e this is less than \e op2 inline bool VarnodeData::operator<(const VarnodeData &op2) const { if (space != op2.space) return (space->getIndex() < op2.space->getIndex()); if (offset != op2.offset) return (offset < op2.offset); return (size > op2.size); // BIG sizes come first } /// Compare VarnodeData for equality. The space, offset, and size /// must all be exactly equal /// \param op2 is the object being compared to /// \return true if \e this is equal to \e op2 inline bool VarnodeData::operator==(const VarnodeData &op2) const { if (space != op2.space) return false; if (offset != op2.offset) return false; return (size == op2.size); } /// Compare VarnodeData for inequality. If either the space, /// offset, or size is not equal, return \b true. /// \param op2 is the object being compared to /// \return true if \e this is not equal to \e op2 inline bool VarnodeData::operator!=(const VarnodeData &op2) const { if (space != op2.space) return true; if (offset != op2.offset) return true; return (size != op2.size); } /// This is a convenience function to construct a full Address from the /// VarnodeData's address space and offset /// \return the address of the varnode inline Address VarnodeData::getAddr(void) const { return Address(space,offset); } /// \return the encoded AddrSpace inline AddrSpace *VarnodeData::getSpaceFromConst(void) const { return (AddrSpace *)(uintp)offset; } /// \brief A low-level representation of a single pcode operation /// /// This is just the minimum amount of data to represent a pcode operation /// An opcode, sequence number, optional output varnode /// and input varnodes class PcodeOpRaw { OpBehavior *behave; ///< The opcode for this operation SeqNum seq; ///< Identifying address and index of this operation VarnodeData *out; ///< Output varnode triple vector in; ///< Raw varnode inputs to this op public: void setBehavior(OpBehavior *be); ///< Set the opcode for this op OpBehavior *getBehavior(void) const; ///< Retrieve the behavior for this op OpCode getOpcode(void) const; ///< Get the opcode for this op void setSeqNum(const Address &a,uintm b); ///< Set the sequence number const SeqNum &getSeqNum(void) const; ///< Retrieve the sequence number const Address &getAddr(void) const; ///< Get address of this operation void setOutput(VarnodeData *o); ///< Set the output varnode for this op VarnodeData *getOutput(void) const; ///< Retrieve the output varnode for this op void addInput(VarnodeData *i); ///< Add an additional input varnode to this op void clearInputs(void); ///< Remove all input varnodes to this op int4 numInput(void) const; ///< Get the number of input varnodes to this op VarnodeData *getInput(int4 i) const; ///< Get the i-th input varnode for this op /// \brief Decode the raw OpCode and input/output Varnode data for a PcodeOp static OpCode decode(Decoder &decoder,int4 isize,VarnodeData *invar,VarnodeData **outvar); }; /// The core behavior for this operation is controlled by an OpBehavior object /// which knows how output is determined given inputs. This routine sets that object /// \param be is the behavior object inline void PcodeOpRaw::setBehavior(OpBehavior *be) { behave = be; } /// Get the underlying behavior object for this pcode operation. From this /// object you can determine how the object evaluates inputs to get the output /// \return the behavior object inline OpBehavior *PcodeOpRaw::getBehavior(void) const { return behave; } /// The possible types of pcode operations are enumerated by OpCode /// This routine retrieves the enumeration value for this particular op /// \return the opcode value inline OpCode PcodeOpRaw::getOpcode(void) const { return behave->getOpcode(); } /// Every pcode operation has a \b sequence \b number /// which associates the operation with the address of the machine instruction /// being translated and an order number which provides an index for this /// particular operation within the entire translation of the machine instruction /// \param a is the instruction address /// \param b is the order number inline void PcodeOpRaw::setSeqNum(const Address &a,uintm b) { seq = SeqNum(a,b); } /// Every pcode operation has a \b sequence \b number which associates /// the operation with the address of the machine instruction being translated /// and an index number for this operation within the translation. /// \return a reference to the sequence number inline const SeqNum &PcodeOpRaw::getSeqNum(void) const { return seq; } /// This is a convenience function to get the address of the machine instruction /// (of which this pcode op is a translation) /// \return the machine instruction address inline const Address &PcodeOpRaw::getAddr(void) const { return seq.getAddr(); } /// Most pcode operations output to a varnode. This routine sets what that varnode is. /// \param o is the varnode to set as output inline void PcodeOpRaw::setOutput(VarnodeData *o) { out = o; } /// Most pcode operations have an output varnode. This routine retrieves that varnode. /// \return the output varnode or \b null if there is no output inline VarnodeData *PcodeOpRaw::getOutput(void) const { return out; } /// A PcodeOpRaw is initially created with no input varnodes. Inputs are added with this method. /// Varnodes are added in order, so the first addInput call creates input 0, for example. /// \param i is the varnode to be added as input inline void PcodeOpRaw::addInput(VarnodeData *i) { in.push_back(i); } /// If the inputs to a pcode operation need to be changed, this routine clears the existing /// inputs so new ones can be added. inline void PcodeOpRaw::clearInputs(void) { in.clear(); } /// \return the number of inputs inline int4 PcodeOpRaw::numInput(void) const { return in.size(); } /// Input varnodes are indexed starting at 0. This retrieves the input varnode by index. /// The index \e must be in range, or unpredicatable behavior will result. Use the numInput method /// to get the number of inputs. /// \param i is the index of the desired input /// \return the desired input varnode inline VarnodeData *PcodeOpRaw::getInput(int4 i) const { return in[i]; } } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/semantics.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "semantics.hh" #include "translate.hh" namespace ghidra { ConstTpl::ConstTpl(const_type tp) { // Constructor for relative jump constants and uniques type = tp; } ConstTpl::ConstTpl(const_type tp,uintb val) { // Constructor for real constants type = tp; value_real = val; value.handle_index = 0; select = v_space; } ConstTpl::ConstTpl(const_type tp,int4 ht,v_field vf) { // Constructor for handle constant type = handle; value.handle_index = ht; select = vf; value_real = 0; } ConstTpl::ConstTpl(const_type tp,int4 ht,v_field vf,uintb plus) { type = handle; value.handle_index = ht; select = vf; value_real = plus; } ConstTpl::ConstTpl(AddrSpace *sid) { type = spaceid; value.spaceid = sid; } bool ConstTpl::isConstSpace(void) const { if (type==spaceid) return (value.spaceid->getType()==IPTR_CONSTANT); return false; } bool ConstTpl::isUniqueSpace(void) const { if (type==spaceid) return (value.spaceid->getType()==IPTR_INTERNAL); return false; } bool ConstTpl::operator==(const ConstTpl &op2) const { if (type != op2.type) return false; switch(type) { case real: return (value_real == op2.value_real); case handle: if (value.handle_index != op2.value.handle_index) return false; if (select != op2.select) return false; break; case spaceid: return (value.spaceid == op2.value.spaceid); default: // Nothing additional to compare break; } return true; } bool ConstTpl::operator<(const ConstTpl &op2) const { if (type != op2.type) return (type < op2.type); switch(type) { case real: return (value_real < op2.value_real); case handle: if (value.handle_index != op2.value.handle_index) return (value.handle_index < op2.value.handle_index); if (select != op2.select) return (select < op2.select); break; case spaceid: return (value.spaceid < op2.value.spaceid); default: // Nothing additional to compare break; } return false; } uintb ConstTpl::fix(const ParserWalker &walker) const { // Get the value of the ConstTpl in context // NOTE: if the property is dynamic this returns the property // of the temporary storage switch(type) { case j_start: return walker.getAddr().getOffset(); // Fill in starting address placeholder with real address case j_next: return walker.getNaddr().getOffset(); // Fill in next address placeholder with real address case j_next2: return walker.getN2addr().getOffset(); // Fill in next2 address placeholder with real address case j_flowref: return walker.getRefAddr().getOffset(); case j_flowref_size: return walker.getRefAddr().getAddrSize(); case j_flowdest: return walker.getDestAddr().getOffset(); case j_flowdest_size: return walker.getDestAddr().getAddrSize(); case j_curspace_size: return walker.getCurSpace()->getAddrSize(); case j_curspace: return (uintb)(uintp)walker.getCurSpace(); case handle: { const FixedHandle &hand(walker.getFixedHandle(value.handle_index)); switch(select) { case v_space: if (hand.offset_space == (AddrSpace *)0) return (uintb)(uintp)hand.space; return (uintb)(uintp)hand.temp_space; case v_offset: if (hand.offset_space==(AddrSpace *)0) return hand.offset_offset; return hand.temp_offset; case v_size: return hand.size; case v_offset_plus: if (hand.space != walker.getConstSpace()) { // If we are not a constant if (hand.offset_space==(AddrSpace *)0) return hand.offset_offset + (value_real&0xffff); // Adjust offset by truncation amount return hand.temp_offset + (value_real&0xffff); } else { // If we are a constant, we want to return a shifted value uintb val; if (hand.offset_space==(AddrSpace *)0) val = hand.offset_offset; else val = hand.temp_offset; val >>= 8 * (value_real>>16); return val; } } break; } case j_relative: case real: return value_real; case spaceid: return (uintb)(uintp)value.spaceid; } return 0; // Should never reach here } AddrSpace *ConstTpl::fixSpace(const ParserWalker &walker) const { // Get the value of the ConstTpl in context // when we know it is a space switch(type) { case j_curspace: return walker.getCurSpace(); case handle: { const FixedHandle &hand(walker.getFixedHandle(value.handle_index)); switch(select) { case v_space: if (hand.offset_space == (AddrSpace *)0) return hand.space; return hand.temp_space; default: break; } break; } case spaceid: return value.spaceid; case j_flowref: return walker.getRefAddr().getSpace(); default: break; } throw LowlevelError("ConstTpl is not a spaceid as expected"); } void ConstTpl::fillinSpace(FixedHandle &hand,const ParserWalker &walker) const { // Fill in the space portion of a FixedHandle, base on this ConstTpl switch(type) { case j_curspace: hand.space = walker.getCurSpace(); return; case handle: { const FixedHandle &otherhand(walker.getFixedHandle(value.handle_index)); switch(select) { case v_space: hand.space = otherhand.space; return; default: break; } break; } case spaceid: hand.space = value.spaceid; return; default: break; } throw LowlevelError("ConstTpl is not a spaceid as expected"); } void ConstTpl::fillinOffset(FixedHandle &hand,const ParserWalker &walker) const { // Fillin the offset portion of a FixedHandle, based on this ConstTpl // If the offset value is dynamic, indicate this in the handle // we don't just fill in the temporary variable offset // we assume hand.space is already filled in if (type == handle) { const FixedHandle &otherhand(walker.getFixedHandle(value.handle_index)); hand.offset_space = otherhand.offset_space; hand.offset_offset = otherhand.offset_offset; hand.offset_size = otherhand.offset_size; hand.temp_space = otherhand.temp_space; hand.temp_offset = otherhand.temp_offset; } else { hand.offset_space = (AddrSpace *)0; hand.offset_offset = hand.space->wrapOffset(fix(walker)); } } void ConstTpl::transfer(const vector ¶ms) { // Replace old handles with new handles if (type != handle) return; HandleTpl *newhandle = params[value.handle_index]; switch(select) { case v_space: *this = newhandle->getSpace(); break; case v_offset: *this = newhandle->getPtrOffset(); break; case v_offset_plus: { uintb tmp = value_real; *this = newhandle->getPtrOffset(); if (type == real) { value_real += (tmp&0xffff); } else if ((type == handle)&&(select == v_offset)) { select = v_offset_plus; value_real = tmp; } else throw LowlevelError("Cannot truncate macro input in this way"); break; } case v_size: *this = newhandle->getSize(); break; } } void ConstTpl::changeHandleIndex(const vector &handmap) { if (type == handle) value.handle_index = handmap[value.handle_index]; } void ConstTpl::encode(Encoder &encoder) const { switch(type) { case real: encoder.openElement(sla::ELEM_CONST_REAL); encoder.writeUnsignedInteger(sla::ATTRIB_VAL, value_real); encoder.closeElement(sla::ELEM_CONST_REAL); break; case handle: encoder.openElement(sla::ELEM_CONST_HANDLE); encoder.writeSignedInteger(sla::ATTRIB_VAL, value.handle_index); encoder.writeSignedInteger(sla::ATTRIB_S, select); if (select == v_offset_plus) encoder.writeUnsignedInteger(sla::ATTRIB_PLUS, value_real); encoder.closeElement(sla::ELEM_CONST_HANDLE); break; case j_start: encoder.openElement(sla::ELEM_CONST_START); encoder.closeElement(sla::ELEM_CONST_START); break; case j_next: encoder.openElement(sla::ELEM_CONST_NEXT); encoder.closeElement(sla::ELEM_CONST_NEXT); break; case j_next2: encoder.openElement(sla::ELEM_CONST_NEXT2); encoder.closeElement(sla::ELEM_CONST_NEXT2); break; case j_curspace: encoder.openElement(sla::ELEM_CONST_CURSPACE); encoder.closeElement(sla::ELEM_CONST_CURSPACE); break; case j_curspace_size: encoder.openElement(sla::ELEM_CONST_CURSPACE_SIZE); encoder.closeElement(sla::ELEM_CONST_CURSPACE_SIZE); break; case spaceid: encoder.openElement(sla::ELEM_CONST_SPACEID); encoder.writeSpace(sla::ATTRIB_SPACE, value.spaceid); encoder.closeElement(sla::ELEM_CONST_SPACEID); break; case j_relative: encoder.openElement(sla::ELEM_CONST_RELATIVE); encoder.writeUnsignedInteger(sla::ATTRIB_VAL, value_real); encoder.closeElement(sla::ELEM_CONST_RELATIVE); break; case j_flowref: encoder.openElement(sla::ELEM_CONST_FLOWREF); encoder.closeElement(sla::ELEM_CONST_FLOWREF); break; case j_flowref_size: encoder.openElement(sla::ELEM_CONST_FLOWREF_SIZE); encoder.closeElement(sla::ELEM_CONST_FLOWREF_SIZE); break; case j_flowdest: encoder.openElement(sla::ELEM_CONST_FLOWDEST); encoder.closeElement(sla::ELEM_CONST_FLOWDEST); break; case j_flowdest_size: encoder.openElement(sla::ELEM_CONST_FLOWDEST_SIZE); encoder.closeElement(sla::ELEM_CONST_FLOWDEST_SIZE); break; } } void ConstTpl::decode(Decoder &decoder) { uint4 el = decoder.openElement(); if (el == sla::ELEM_CONST_REAL) { type = real; value_real = decoder.readUnsignedInteger(sla::ATTRIB_VAL); } else if (el == sla::ELEM_CONST_HANDLE) { type = handle; value.handle_index = decoder.readSignedInteger(sla::ATTRIB_VAL); uint4 selectInt = decoder.readSignedInteger(sla::ATTRIB_S); if (selectInt > v_offset_plus) throw DecoderError("Bad handle selector encoding"); select = (v_field)selectInt; if (select == v_offset_plus) { value_real = decoder.readUnsignedInteger(sla::ATTRIB_PLUS); } } else if (el == sla::ELEM_CONST_START) { type = j_start; } else if (el == sla::ELEM_CONST_NEXT) { type = j_next; } else if (el == sla::ELEM_CONST_NEXT2) { type = j_next2; } else if (el == sla::ELEM_CONST_CURSPACE) { type = j_curspace; } else if (el == sla::ELEM_CONST_CURSPACE_SIZE) { type = j_curspace_size; } else if (el == sla::ELEM_CONST_SPACEID) { type = spaceid; value.spaceid = decoder.readSpace(sla::ATTRIB_SPACE); } else if (el == sla::ELEM_CONST_RELATIVE) { type = j_relative; value_real = decoder.readUnsignedInteger(sla::ATTRIB_VAL); } else if (el == sla::ELEM_CONST_FLOWREF) { type = j_flowref; } else if (el == sla::ELEM_CONST_FLOWREF_SIZE) { type = j_flowref_size; } else if (el == sla::ELEM_CONST_FLOWDEST) { type = j_flowdest; } else if (el == sla::ELEM_CONST_FLOWDEST_SIZE) { type = j_flowdest_size; } else throw LowlevelError("Bad constant type"); decoder.closeElement(el); } VarnodeTpl::VarnodeTpl(int4 hand,bool zerosize) : space(ConstTpl::handle,hand,ConstTpl::v_space), offset(ConstTpl::handle,hand,ConstTpl::v_offset), size(ConstTpl::handle,hand,ConstTpl::v_size) { // Varnode built from a handle // if zerosize is true, set the size constant to zero if (zerosize) size = ConstTpl(ConstTpl::real,0); unnamed_flag = false; } VarnodeTpl::VarnodeTpl(const ConstTpl &sp,const ConstTpl &off,const ConstTpl &sz) : space(sp), offset(off), size(sz) { unnamed_flag = false; } VarnodeTpl::VarnodeTpl(const VarnodeTpl &vn) : space(vn.space), offset(vn.offset), size(vn.size) { // A clone of the VarnodeTpl unnamed_flag = vn.unnamed_flag; } bool VarnodeTpl::isLocalTemp(void) const { if (space.getType() != ConstTpl::spaceid) return false; if (space.getSpace()->getType()!=IPTR_INTERNAL) return false; return true; } bool VarnodeTpl::isDynamic(const ParserWalker &walker) const { if (offset.getType()!=ConstTpl::handle) return false; // Technically we should probably check all three // ConstTpls for dynamic handles, but in all cases // if there is any dynamic piece then the offset is const FixedHandle &hand(walker.getFixedHandle(offset.getHandleIndex())); return (hand.offset_space != (AddrSpace *)0); } int4 VarnodeTpl::transfer(const vector ¶ms) { bool doesOffsetPlus = false; int4 handleIndex; int4 plus; if ((offset.getType() == ConstTpl::handle)&&(offset.getSelect()==ConstTpl::v_offset_plus)) { handleIndex = offset.getHandleIndex(); plus = (int4)offset.getReal(); doesOffsetPlus = true; } space.transfer(params); offset.transfer(params); size.transfer(params); if (doesOffsetPlus) { if (isLocalTemp()) return plus; // A positive number indicates truncation of a local temp if (params[handleIndex]->getSize().isZero()) return plus; // or a zerosize object } return -1; } void VarnodeTpl::changeHandleIndex(const vector &handmap) { space.changeHandleIndex(handmap); offset.changeHandleIndex(handmap); size.changeHandleIndex(handmap); } bool VarnodeTpl::adjustTruncation(int4 sz,bool isbigendian) { // We know this->offset is an offset_plus, check that the truncation is in bounds (given -sz-) // adjust plus for endianness if necessary // return true if truncation is in bounds if (size.getType() != ConstTpl::real) return false; int4 numbytes = (int4) size.getReal(); int4 byteoffset = (int4) offset.getReal(); if (numbytes + byteoffset > sz) return false; // Encode the original truncation amount with the plus value uintb val = byteoffset; val <<= 16; if (isbigendian) { val |= (uintb)(sz - (numbytes+byteoffset)); } else { val |= (uintb) byteoffset; } offset = ConstTpl(ConstTpl::handle,offset.getHandleIndex(),ConstTpl::v_offset_plus,val); return true; } void VarnodeTpl::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_VARNODE_TPL); space.encode(encoder); offset.encode(encoder); size.encode(encoder); encoder.closeElement(sla::ELEM_VARNODE_TPL); } void VarnodeTpl::decode(Decoder &decoder) { uint4 el = decoder.openElement(sla::ELEM_VARNODE_TPL); space.decode(decoder); offset.decode(decoder); size.decode(decoder); decoder.closeElement(el); } bool VarnodeTpl::operator==(const VarnodeTpl &op2) const { return space==op2.space && offset==op2.offset && size==op2.size; } bool VarnodeTpl::operator!=(const VarnodeTpl &op2) const { return !(*this == op2); } bool VarnodeTpl::operator<(const VarnodeTpl &op2) const { if (!(space==op2.space)) return (spacegetSpace(); size = vn->getSize(); ptrspace = ConstTpl(ConstTpl::real,0); ptroffset = vn->getOffset(); } HandleTpl::HandleTpl(const ConstTpl &spc,const ConstTpl &sz,const VarnodeTpl *vn, AddrSpace *t_space,uintb t_offset) : space(spc), size(sz), ptrspace(vn->getSpace()), ptroffset(vn->getOffset()), ptrsize(vn->getSize()), temp_space(t_space), temp_offset(ConstTpl::real,t_offset) { // Build handle to thing being pointed at by -vn- } void HandleTpl::fix(FixedHandle &hand,const ParserWalker &walker) const { if (ptrspace.getType() == ConstTpl::real) { // The export is unstarred, but this doesn't mean the varnode // being exported isn't dynamic space.fillinSpace(hand,walker); hand.size = size.fix(walker); ptroffset.fillinOffset(hand,walker); } else { hand.space = space.fixSpace(walker); hand.size = size.fix(walker); hand.offset_offset = ptroffset.fix(walker); hand.offset_space = ptrspace.fixSpace(walker); if (hand.offset_space->getType()==IPTR_CONSTANT) { // Handle could have been dynamic but wasn't hand.offset_space = (AddrSpace *)0; hand.offset_offset = AddrSpace::addressToByte(hand.offset_offset,hand.space->getWordSize()); hand.offset_offset = hand.space->wrapOffset(hand.offset_offset); } else { hand.offset_size = ptrsize.fix(walker); hand.temp_space = temp_space.fixSpace(walker); hand.temp_offset = temp_offset.fix(walker); } } } void HandleTpl::changeHandleIndex(const vector &handmap) { space.changeHandleIndex(handmap); size.changeHandleIndex(handmap); ptrspace.changeHandleIndex(handmap); ptroffset.changeHandleIndex(handmap); ptrsize.changeHandleIndex(handmap); temp_space.changeHandleIndex(handmap); temp_offset.changeHandleIndex(handmap); } void HandleTpl::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_HANDLE_TPL); space.encode(encoder); size.encode(encoder); ptrspace.encode(encoder); ptroffset.encode(encoder); ptrsize.encode(encoder); temp_space.encode(encoder); temp_offset.encode(encoder); encoder.closeElement(sla::ELEM_HANDLE_TPL); } void HandleTpl::decode(Decoder &decoder) { uint4 el = decoder.openElement(sla::ELEM_HANDLE_TPL); space.decode(decoder); size.decode(decoder); ptrspace.decode(decoder); ptroffset.decode(decoder); ptrsize.decode(decoder); temp_space.decode(decoder); temp_offset.decode(decoder); decoder.closeElement(el); } OpTpl::~OpTpl(void) { // An OpTpl owns its varnode_tpls if (output != (VarnodeTpl *)0) delete output; vector::iterator iter; for(iter=input.begin();iter!=input.end();++iter) delete *iter; } bool OpTpl::isZeroSize(void) const { // Return if any input or output has zero size vector::const_iterator iter; if (output != (VarnodeTpl *)0) if (output->isZeroSize()) return true; for(iter=input.begin();iter!=input.end();++iter) if ((*iter)->isZeroSize()) return true; return false; } void OpTpl::removeInput(int4 index) { // Remove the indicated input delete input[index]; for(int4 i=index;i &handmap) { if (output != (VarnodeTpl *)0) output->changeHandleIndex(handmap); vector::const_iterator iter; for(iter=input.begin();iter!=input.end();++iter) (*iter)->changeHandleIndex(handmap); } void OpTpl::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_OP_TPL); encoder.writeOpcode(sla::ATTRIB_CODE, opc); if (output == (VarnodeTpl *)0) { encoder.openElement(sla::ELEM_NULL); encoder.closeElement(sla::ELEM_NULL); } else output->encode(encoder); for(int4 i=0;iencode(encoder); encoder.closeElement(sla::ELEM_OP_TPL); } void OpTpl::decode(Decoder &decoder) { uint4 el = decoder.openElement(sla::ELEM_OP_TPL); opc = decoder.readOpcode(sla::ATTRIB_CODE); uint4 subel = decoder.peekElement(); if (subel == sla::ELEM_NULL) { decoder.openElement(); decoder.closeElement(subel); output = (VarnodeTpl *)0; } else { output = new VarnodeTpl(); output->decode(decoder); } while(decoder.peekElement() != 0) { VarnodeTpl *vn = new VarnodeTpl(); vn->decode(decoder); input.push_back(vn); } decoder.closeElement(el); } ConstructTpl::~ConstructTpl(void) { // Constructor owns its ops and handles vector::iterator oiter; for(oiter=vec.begin();oiter!=vec.end();++oiter) delete *oiter; if (result != (HandleTpl *)0) delete result; } bool ConstructTpl::addOp(OpTpl *ot) { if (ot->getOpcode() == DELAY_SLOT) { if (delayslot != 0) return false; // Cannot have multiple delay slots delayslot = ot->getIn(0)->getOffset().getReal(); } else if (ot->getOpcode() == LABELBUILD) numlabels += 1; // Count labels vec.push_back(ot); return true; } bool ConstructTpl::addOpList(const vector &oplist) { for(int4 i=0;i &check,AddrSpace *const_space) { // Make sure there is a build statement for all subtable params // Return 0 upon success, 1 if there is a duplicate BUILD, 2 if there is a build for a non-subtable vector::iterator iter; OpTpl *op; VarnodeTpl *indvn; for(iter=vec.begin();iter!=vec.end();++iter) { op = *iter; if (op->getOpcode() == BUILD) { int4 index = op->getIn(0)->getOffset().getReal(); if (check[index] != 0) return check[index]; // Duplicate BUILD statement or non-subtable check[index] = 1; // Mark to avoid future duplicate build } } for(int4 i=0;iaddInput(indvn); vec.insert(vec.begin(),op); } } return 0; } bool ConstructTpl::buildOnly(void) const { vector::const_iterator iter; OpTpl *op; for(iter=vec.begin();iter!=vec.end();++iter) { op = *iter; if (op->getOpcode() != BUILD) return false; } return true; } void ConstructTpl::changeHandleIndex(const vector &handmap) { vector::const_iterator iter; OpTpl *op; for(iter=vec.begin();iter!=vec.end();++iter) { op = *iter; if (op->getOpcode() == BUILD) { int4 index = op->getIn(0)->getOffset().getReal(); index = handmap[index]; op->getIn(0)->setOffset(index); } else op->changeHandleIndex(handmap); } if (result != (HandleTpl *)0) result->changeHandleIndex(handmap); } void ConstructTpl::setInput(VarnodeTpl *vn,int4 index,int4 slot) { // set the VarnodeTpl input for a particular op // for use with optimization routines OpTpl *op = vec[index]; VarnodeTpl *oldvn = op->getIn(slot); op->setInput(vn,slot); if (oldvn != (VarnodeTpl *)0) delete oldvn; } void ConstructTpl::setOutput(VarnodeTpl *vn,int4 index) { // set the VarnodeTpl output for a particular op // for use with optimization routines OpTpl *op = vec[index]; VarnodeTpl *oldvn = op->getOut(); op->setOutput(vn); if (oldvn != (VarnodeTpl *)0) delete oldvn; } void ConstructTpl::deleteOps(const vector &indices) { // delete a particular set of ops for(uint4 i=0;i poscur) vec.pop_back(); } void ConstructTpl::encode(Encoder &encoder,int4 sectionid) const { encoder.openElement(sla::ELEM_CONSTRUCT_TPL); if (sectionid >=0 ) encoder.writeSignedInteger(sla::ATTRIB_SECTION, sectionid); if (delayslot != 0) encoder.writeSignedInteger(sla::ATTRIB_DELAY, delayslot); if (numlabels != 0) encoder.writeSignedInteger(sla::ATTRIB_LABELS, numlabels); if (result != (HandleTpl *)0) result->encode(encoder); else { encoder.openElement(sla::ELEM_NULL); encoder.closeElement(sla::ELEM_NULL); } for(int4 i=0;iencode(encoder); encoder.closeElement(sla::ELEM_CONSTRUCT_TPL); } int4 ConstructTpl::decode(Decoder &decoder) { uint4 el = decoder.openElement(sla::ELEM_CONSTRUCT_TPL); int4 sectionid = -1; uint4 attrib = decoder.getNextAttributeId(); while(attrib != 0) { if (attrib == sla::ATTRIB_DELAY) { delayslot = decoder.readSignedInteger(); } else if (attrib == sla::ATTRIB_LABELS) { numlabels = decoder.readSignedInteger(); } else if (attrib == sla::ATTRIB_SECTION) { sectionid = decoder.readSignedInteger(); } attrib = decoder.getNextAttributeId(); } uint4 subel = decoder.peekElement(); if (subel == sla::ELEM_NULL) { decoder.openElement(); decoder.closeElement(subel); result = (HandleTpl *)0; } else { result = new HandleTpl(); result->decode(decoder); } while(decoder.peekElement() != 0) { OpTpl *op = new OpTpl(); op->decode(decoder); vec.push_back(op); } decoder.closeElement(el); return sectionid; } void PcodeBuilder::build(ConstructTpl *construct,int4 secnum) { if (construct == (ConstructTpl *)0) throw UnimplError("",0); // Pcode is not implemented for this constructor uint4 oldbase = labelbase; // Recursively store old labelbase labelbase = labelcount; // Set the newbase labelcount += construct->numLabels(); // Add labels from this template vector::const_iterator iter; OpTpl *op; const vector &ops(construct->getOpvec()); for(iter=ops.begin();iter!=ops.end();++iter) { op = *iter; switch(op->getOpcode()) { case BUILD: appendBuild( op, secnum ); break; case DELAY_SLOT: delaySlot( op ); break; case LABELBUILD: setLabel( op ); break; case CROSSBUILD: appendCrossBuild(op,secnum); break; default: dump( op ); break; } } labelbase = oldbase; // Restore old labelbase } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/semantics.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __SEMANTICS_HH__ #define __SEMANTICS_HH__ #include "context.hh" #include "slaformat.hh" namespace ghidra { // We remap these opcodes for internal use during pcode generation #define BUILD CPUI_MULTIEQUAL #define DELAY_SLOT CPUI_INDIRECT #define CROSSBUILD CPUI_PTRSUB #define MACROBUILD CPUI_CAST #define LABELBUILD CPUI_PTRADD class Translate; // Forward declaration class HandleTpl; // Forward declaration class ConstTpl { public: enum const_type { real=0, handle=1, j_start=2, j_next=3, j_next2=4, j_curspace=5, j_curspace_size=6, spaceid=7, j_relative=8, j_flowref=9, j_flowref_size=10, j_flowdest=11, j_flowdest_size=12 }; enum v_field { v_space=0, v_offset=1, v_size=2, v_offset_plus=3 }; private: const_type type; union { // uintb real; // an actual constant AddrSpace *spaceid; // Id (pointer) for registered space int4 handle_index; // Place holder for run-time determined value } value; uintb value_real; v_field select; // Which part of handle to use as constant public: ConstTpl(void) { type = real; value_real = 0; } ConstTpl(const ConstTpl &op2) { type=op2.type; value=op2.value; value_real=op2.value_real; select=op2.select; } ConstTpl(const_type tp,uintb val); ConstTpl(const_type tp); ConstTpl(AddrSpace *sid); ConstTpl(const_type tp,int4 ht,v_field vf); ConstTpl(const_type tp,int4 ht,v_field vf,uintb plus); bool isConstSpace(void) const; bool isUniqueSpace(void) const; bool operator==(const ConstTpl &op2) const; bool operator<(const ConstTpl &op2) const; uintb getReal(void) const { return value_real; } AddrSpace *getSpace(void) const { return value.spaceid; } int4 getHandleIndex(void) const { return value.handle_index; } const_type getType(void) const { return type; } v_field getSelect(void) const { return select; } uintb fix(const ParserWalker &walker) const; AddrSpace *fixSpace(const ParserWalker &walker) const; void transfer(const vector ¶ms); bool isZero(void) const { return ((type==real)&&(value_real==0)); } void changeHandleIndex(const vector &handmap); void fillinSpace(FixedHandle &hand,const ParserWalker &walker) const; void fillinOffset(FixedHandle &hand,const ParserWalker &walker) const; void encode(Encoder &encoder) const; void decode(Decoder &decoder); }; class VarnodeTpl { friend class OpTpl; friend class HandleTpl; ConstTpl space,offset,size; bool unnamed_flag; public: VarnodeTpl(int4 hand,bool zerosize); VarnodeTpl(void) : space(), offset(), size() { unnamed_flag=false; } VarnodeTpl(const ConstTpl &sp,const ConstTpl &off,const ConstTpl &sz); VarnodeTpl(const VarnodeTpl &vn); const ConstTpl &getSpace(void) const { return space; } const ConstTpl &getOffset(void) const { return offset; } const ConstTpl &getSize(void) const { return size; } bool isDynamic(const ParserWalker &walker) const; int4 transfer(const vector ¶ms); bool isZeroSize(void) const { return size.isZero(); } bool operator==(const VarnodeTpl &op2) const; bool operator!=(const VarnodeTpl &op2) const; bool operator<(const VarnodeTpl &op2) const; void setOffset(uintb constVal) { offset = ConstTpl(ConstTpl::real,constVal); } void setRelative(uintb constVal) { offset = ConstTpl(ConstTpl::j_relative,constVal); } void setSize(const ConstTpl &sz ) { size = sz; } bool isUnnamed(void) const { return unnamed_flag; } void setUnnamed(bool val) { unnamed_flag = val; } bool isLocalTemp(void) const; bool isRelative(void) const { return (offset.getType() == ConstTpl::j_relative); } void changeHandleIndex(const vector &handmap); bool adjustTruncation(int4 sz,bool isbigendian); void encode(Encoder &encoder) const; void decode(Decoder &decoder); }; class HandleTpl { ConstTpl space; ConstTpl size; ConstTpl ptrspace; ConstTpl ptroffset; ConstTpl ptrsize; ConstTpl temp_space; ConstTpl temp_offset; public: HandleTpl(void) {} HandleTpl(const VarnodeTpl *vn); HandleTpl(const ConstTpl &spc,const ConstTpl &sz,const VarnodeTpl *vn, AddrSpace *t_space,uintb t_offset); const ConstTpl &getSpace(void) const { return space; } const ConstTpl &getPtrSpace(void) const { return ptrspace; } const ConstTpl &getPtrOffset(void) const { return ptroffset; } const ConstTpl &getPtrSize(void) const { return ptrsize; } const ConstTpl &getSize(void) const { return size; } const ConstTpl &getTempSpace(void) const { return temp_space; } const ConstTpl &getTempOffset(void) const { return temp_offset; } void setSize(const ConstTpl &sz) { size = sz; } void setPtrSize(const ConstTpl &sz) { ptrsize=sz; } void setPtrOffset(uintb val) { ptroffset = ConstTpl(ConstTpl::real,val); } void setTempOffset(uintb val) { temp_offset = ConstTpl(ConstTpl::real,val); } void fix(FixedHandle &hand,const ParserWalker &walker) const; void changeHandleIndex(const vector &handmap); void encode(Encoder &encoder) const; void decode(Decoder &decoder); }; class OpTpl { VarnodeTpl *output; OpCode opc; vector input; public: OpTpl(void) {} OpTpl(OpCode oc) { opc = oc; output = (VarnodeTpl *)0; } ~OpTpl(void); VarnodeTpl *getOut(void) const { return output; } int4 numInput(void) const { return input.size(); } VarnodeTpl *getIn(int4 i) const { return input[i]; } OpCode getOpcode(void) const { return opc; } bool isZeroSize(void) const; void setOpcode(OpCode o) { opc = o; } void setOutput(VarnodeTpl *vt) { output = vt; } void clearOutput(void) { delete output; output = (VarnodeTpl *)0; } void addInput(VarnodeTpl *vt) { input.push_back(vt); } void setInput(VarnodeTpl *vt,int4 slot) { input[slot] = vt; } void removeInput(int4 index); void changeHandleIndex(const vector &handmap); void encode(Encoder &encoder) const; void decode(Decoder &decoder); }; class ConstructTpl { friend class SleighCompile; protected: uint4 delayslot; uint4 numlabels; // Number of label templates vector vec; HandleTpl *result; void setOpvec(vector &opvec) { vec = opvec; } void setNumLabels(uint4 val) { numlabels = val; } public: ConstructTpl(void) { delayslot=0; numlabels=0; result = (HandleTpl *)0; } ~ConstructTpl(void); uint4 delaySlot(void) const { return delayslot; } uint4 numLabels(void) const { return numlabels; } const vector &getOpvec(void) const { return vec; } HandleTpl *getResult(void) const { return result; } bool addOp(OpTpl *ot); bool addOpList(const vector &oplist); void setResult(HandleTpl *t) { result = t; } int4 fillinBuild(vector &check,AddrSpace *const_space); bool buildOnly(void) const; void changeHandleIndex(const vector &handmap); void setInput(VarnodeTpl *vn,int4 index,int4 slot); void setOutput(VarnodeTpl *vn,int4 index); void deleteOps(const vector &indices); void encode(Encoder &encoder,int4 sectionid) const; int4 decode(Decoder &decoder); }; class PcodeEmit; // Forward declaration for emitter class PcodeBuilder { // SLEIGH specific pcode generator uint4 labelbase; uint4 labelcount; protected: ParserWalker *walker; virtual void dump( OpTpl *op )=0; public: PcodeBuilder(uint4 lbcnt) { labelbase=labelcount=lbcnt; } virtual ~PcodeBuilder(void) {} uint4 getLabelBase(void) const { return labelbase; } ParserWalker *getCurrentWalker() const { return walker; } void build(ConstructTpl *construct,int4 secnum); virtual void appendBuild(OpTpl *bld,int4 secnum)=0; virtual void delaySlot(OpTpl *op)=0; virtual void setLabel(OpTpl *op)=0; virtual void appendCrossBuild(OpTpl *bld,int4 secnum)=0; }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/slaformat.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "slaformat.hh" namespace ghidra { namespace sla { const int4 FORMAT_SCOPE = 1; const int4 FORMAT_VERSION = 4; // ATTRIB_CONTEXT = 1 is reserved AttributeId ATTRIB_VAL = AttributeId("val", 2, FORMAT_SCOPE); AttributeId ATTRIB_ID = AttributeId("id", 3, FORMAT_SCOPE); AttributeId ATTRIB_SPACE = AttributeId("space", 4, FORMAT_SCOPE); AttributeId ATTRIB_S = AttributeId("s", 5, FORMAT_SCOPE); AttributeId ATTRIB_OFF = AttributeId("off", 6, FORMAT_SCOPE); AttributeId ATTRIB_CODE = AttributeId("code", 7, FORMAT_SCOPE); AttributeId ATTRIB_MASK = AttributeId("mask", 8, FORMAT_SCOPE); AttributeId ATTRIB_INDEX = AttributeId("index", 9, FORMAT_SCOPE); AttributeId ATTRIB_NONZERO = AttributeId("nonzero", 10, FORMAT_SCOPE); AttributeId ATTRIB_PIECE = AttributeId("piece", 11, FORMAT_SCOPE); AttributeId ATTRIB_NAME = AttributeId("name", 12, FORMAT_SCOPE); AttributeId ATTRIB_SCOPE = AttributeId("scope", 13, FORMAT_SCOPE); AttributeId ATTRIB_STARTBIT = AttributeId("startbit", 14, FORMAT_SCOPE); AttributeId ATTRIB_SIZE = AttributeId("size", 15, FORMAT_SCOPE); AttributeId ATTRIB_TABLE = AttributeId("table", 16, FORMAT_SCOPE); AttributeId ATTRIB_CT = AttributeId("ct", 17, FORMAT_SCOPE); AttributeId ATTRIB_MINLEN = AttributeId("minlen", 18, FORMAT_SCOPE); AttributeId ATTRIB_BASE = AttributeId("base", 19, FORMAT_SCOPE); AttributeId ATTRIB_NUMBER = AttributeId("number", 20, FORMAT_SCOPE); AttributeId ATTRIB_CONTEXT = AttributeId("context", 21, FORMAT_SCOPE); AttributeId ATTRIB_PARENT = AttributeId("parent", 22, FORMAT_SCOPE); AttributeId ATTRIB_SUBSYM = AttributeId("subsym", 23, FORMAT_SCOPE); AttributeId ATTRIB_LINE = AttributeId("line", 24, FORMAT_SCOPE); AttributeId ATTRIB_SOURCE = AttributeId("source", 25, FORMAT_SCOPE); AttributeId ATTRIB_LENGTH = AttributeId("length", 26, FORMAT_SCOPE); AttributeId ATTRIB_FIRST = AttributeId("first", 27, FORMAT_SCOPE); AttributeId ATTRIB_PLUS = AttributeId("plus", 28, FORMAT_SCOPE); AttributeId ATTRIB_SHIFT = AttributeId("shift", 29, FORMAT_SCOPE); AttributeId ATTRIB_ENDBIT = AttributeId("endbit", 30, FORMAT_SCOPE); AttributeId ATTRIB_SIGNBIT = AttributeId("signbit", 31, FORMAT_SCOPE); AttributeId ATTRIB_ENDBYTE = AttributeId("endbyte", 32, FORMAT_SCOPE); AttributeId ATTRIB_STARTBYTE = AttributeId("startbyte", 33, FORMAT_SCOPE); AttributeId ATTRIB_VERSION = AttributeId("version", 34, FORMAT_SCOPE); AttributeId ATTRIB_BIGENDIAN = AttributeId("bigendian", 35, FORMAT_SCOPE); AttributeId ATTRIB_ALIGN = AttributeId("align", 36, FORMAT_SCOPE); AttributeId ATTRIB_UNIQBASE = AttributeId("uniqbase", 37, FORMAT_SCOPE); AttributeId ATTRIB_MAXDELAY = AttributeId("maxdelay", 38, FORMAT_SCOPE); AttributeId ATTRIB_UNIQMASK = AttributeId("uniqmask", 39, FORMAT_SCOPE); AttributeId ATTRIB_NUMSECTIONS = AttributeId("numsections", 40, FORMAT_SCOPE); AttributeId ATTRIB_DEFAULTSPACE = AttributeId("defaultspace", 41, FORMAT_SCOPE); AttributeId ATTRIB_DELAY = AttributeId("delay", 42, FORMAT_SCOPE); AttributeId ATTRIB_WORDSIZE = AttributeId("wordsize", 43, FORMAT_SCOPE); AttributeId ATTRIB_PHYSICAL = AttributeId("physical", 44, FORMAT_SCOPE); AttributeId ATTRIB_SCOPESIZE = AttributeId("scopesize", 45, FORMAT_SCOPE); AttributeId ATTRIB_SYMBOLSIZE = AttributeId("symbolsize", 46, FORMAT_SCOPE); AttributeId ATTRIB_VARNODE = AttributeId("varnode", 47, FORMAT_SCOPE); AttributeId ATTRIB_LOW = AttributeId("low", 48, FORMAT_SCOPE); AttributeId ATTRIB_HIGH = AttributeId("high", 49, FORMAT_SCOPE); AttributeId ATTRIB_FLOW = AttributeId("flow", 50, FORMAT_SCOPE); AttributeId ATTRIB_CONTAIN = AttributeId("contain", 51, FORMAT_SCOPE); AttributeId ATTRIB_I = AttributeId("i", 52, FORMAT_SCOPE); AttributeId ATTRIB_NUMCT = AttributeId("numct", 53, FORMAT_SCOPE); AttributeId ATTRIB_SECTION = AttributeId("section", 54, FORMAT_SCOPE); AttributeId ATTRIB_LABELS = AttributeId("labels", 55, FORMAT_SCOPE); ElementId ELEM_CONST_REAL = ElementId("const_real", 1, FORMAT_SCOPE); ElementId ELEM_VARNODE_TPL = ElementId("varnode_tpl", 2, FORMAT_SCOPE); ElementId ELEM_CONST_SPACEID = ElementId("const_spaceid", 3, FORMAT_SCOPE); ElementId ELEM_CONST_HANDLE = ElementId("const_handle", 4, FORMAT_SCOPE); ElementId ELEM_OP_TPL = ElementId("op_tpl", 5, FORMAT_SCOPE); ElementId ELEM_MASK_WORD = ElementId("mask_word", 6, FORMAT_SCOPE); ElementId ELEM_PAT_BLOCK = ElementId("pat_block", 7, FORMAT_SCOPE); ElementId ELEM_PRINT = ElementId("print", 8, FORMAT_SCOPE); ElementId ELEM_PAIR = ElementId("pair", 9, FORMAT_SCOPE); ElementId ELEM_CONTEXT_PAT = ElementId("context_pat", 10, FORMAT_SCOPE); ElementId ELEM_NULL = ElementId("null", 11, FORMAT_SCOPE); ElementId ELEM_OPERAND_EXP = ElementId("operand_exp", 12, FORMAT_SCOPE); ElementId ELEM_OPERAND_SYM = ElementId("operand_sym", 13, FORMAT_SCOPE); ElementId ELEM_OPERAND_SYM_HEAD = ElementId("operand_sym_head", 14, FORMAT_SCOPE); ElementId ELEM_OPER = ElementId("oper", 15, FORMAT_SCOPE); ElementId ELEM_DECISION = ElementId("decision", 16, FORMAT_SCOPE); ElementId ELEM_OPPRINT = ElementId("opprint", 17, FORMAT_SCOPE); ElementId ELEM_INSTRUCT_PAT = ElementId("instruct_pat", 18, FORMAT_SCOPE); ElementId ELEM_COMBINE_PAT = ElementId("combine_pat", 19, FORMAT_SCOPE); ElementId ELEM_CONSTRUCTOR = ElementId("constructor", 20, FORMAT_SCOPE); ElementId ELEM_CONSTRUCT_TPL = ElementId("construct_tpl", 21, FORMAT_SCOPE); ElementId ELEM_SCOPE = ElementId("scope", 22, FORMAT_SCOPE); ElementId ELEM_VARNODE_SYM = ElementId("varnode_sym", 23, FORMAT_SCOPE); ElementId ELEM_VARNODE_SYM_HEAD = ElementId("varnode_sym_head", 24, FORMAT_SCOPE); ElementId ELEM_USEROP = ElementId("userop", 25, FORMAT_SCOPE); ElementId ELEM_USEROP_HEAD = ElementId("userop_head", 26, FORMAT_SCOPE); ElementId ELEM_TOKENFIELD = ElementId("tokenfield", 27, FORMAT_SCOPE); ElementId ELEM_VAR = ElementId("var", 28, FORMAT_SCOPE); ElementId ELEM_CONTEXTFIELD = ElementId("contextfield", 29, FORMAT_SCOPE); ElementId ELEM_HANDLE_TPL = ElementId("handle_tpl", 30, FORMAT_SCOPE); ElementId ELEM_CONST_RELATIVE = ElementId("const_relative", 31, FORMAT_SCOPE); ElementId ELEM_CONTEXT_OP = ElementId("context_op", 32, FORMAT_SCOPE); ElementId ELEM_SLEIGH = ElementId("sleigh", 33, FORMAT_SCOPE); ElementId ELEM_SPACES = ElementId("spaces", 34, FORMAT_SCOPE); ElementId ELEM_SOURCEFILES = ElementId("sourcefiles", 35, FORMAT_SCOPE); ElementId ELEM_SOURCEFILE = ElementId("sourcefile", 36, FORMAT_SCOPE); ElementId ELEM_SPACE = ElementId("space", 37, FORMAT_SCOPE); ElementId ELEM_SYMBOL_TABLE = ElementId("symbol_table", 38, FORMAT_SCOPE); ElementId ELEM_VALUE_SYM = ElementId("value_sym", 39, FORMAT_SCOPE); ElementId ELEM_VALUE_SYM_HEAD = ElementId("value_sym_head", 40, FORMAT_SCOPE); ElementId ELEM_CONTEXT_SYM = ElementId("context_sym", 41, FORMAT_SCOPE); ElementId ELEM_CONTEXT_SYM_HEAD = ElementId("context_sym_head", 42, FORMAT_SCOPE); ElementId ELEM_END_SYM = ElementId("end_sym", 43, FORMAT_SCOPE); ElementId ELEM_END_SYM_HEAD = ElementId("end_sym_head", 44, FORMAT_SCOPE); ElementId ELEM_SPACE_OTHER = ElementId("space_other", 45, FORMAT_SCOPE); ElementId ELEM_SPACE_UNIQUE = ElementId("space_unique", 46, FORMAT_SCOPE); ElementId ELEM_AND_EXP = ElementId("and_exp", 47, FORMAT_SCOPE); ElementId ELEM_DIV_EXP = ElementId("div_exp", 48, FORMAT_SCOPE); ElementId ELEM_LSHIFT_EXP = ElementId("lshift_exp", 49, FORMAT_SCOPE); ElementId ELEM_MINUS_EXP = ElementId("minus_exp", 50, FORMAT_SCOPE); ElementId ELEM_MULT_EXP = ElementId("mult_exp", 51, FORMAT_SCOPE); ElementId ELEM_NOT_EXP = ElementId("not_exp", 52, FORMAT_SCOPE); ElementId ELEM_OR_EXP = ElementId("or_exp", 53, FORMAT_SCOPE); ElementId ELEM_PLUS_EXP = ElementId("plus_exp", 54, FORMAT_SCOPE); ElementId ELEM_RSHIFT_EXP = ElementId("rshift_exp", 55, FORMAT_SCOPE); ElementId ELEM_SUB_EXP = ElementId("sub_exp", 56, FORMAT_SCOPE); ElementId ELEM_XOR_EXP = ElementId("xor_exp", 57, FORMAT_SCOPE); ElementId ELEM_INTB = ElementId("intb", 58, FORMAT_SCOPE); ElementId ELEM_END_EXP = ElementId("end_exp", 59, FORMAT_SCOPE); ElementId ELEM_NEXT2_EXP = ElementId("next2_exp", 60, FORMAT_SCOPE); ElementId ELEM_START_EXP = ElementId("start_exp", 61, FORMAT_SCOPE); ElementId ELEM_EPSILON_SYM = ElementId("epsilon_sym", 62, FORMAT_SCOPE); ElementId ELEM_EPSILON_SYM_HEAD = ElementId("epsilon_sym_head", 63, FORMAT_SCOPE); ElementId ELEM_NAME_SYM = ElementId("name_sym", 64, FORMAT_SCOPE); ElementId ELEM_NAME_SYM_HEAD = ElementId("name_sym_head", 65, FORMAT_SCOPE); ElementId ELEM_NAMETAB = ElementId("nametab", 66, FORMAT_SCOPE); ElementId ELEM_NEXT2_SYM = ElementId("next2_sym", 67, FORMAT_SCOPE); ElementId ELEM_NEXT2_SYM_HEAD = ElementId("next2_sym_head", 68, FORMAT_SCOPE); ElementId ELEM_START_SYM = ElementId("start_sym", 69, FORMAT_SCOPE); ElementId ELEM_START_SYM_HEAD = ElementId("start_sym_head", 70, FORMAT_SCOPE); ElementId ELEM_SUBTABLE_SYM = ElementId("subtable_sym", 71, FORMAT_SCOPE); ElementId ELEM_SUBTABLE_SYM_HEAD = ElementId("subtable_sym_head", 72, FORMAT_SCOPE); ElementId ELEM_VALUEMAP_SYM = ElementId("valuemap_sym", 73, FORMAT_SCOPE); ElementId ELEM_VALUEMAP_SYM_HEAD = ElementId("valuemap_sym_head", 74, FORMAT_SCOPE); ElementId ELEM_VALUETAB = ElementId("valuetab", 75, FORMAT_SCOPE); ElementId ELEM_VARLIST_SYM = ElementId("varlist_sym", 76, FORMAT_SCOPE); ElementId ELEM_VARLIST_SYM_HEAD = ElementId("varlist_sym_head", 77, FORMAT_SCOPE); ElementId ELEM_OR_PAT = ElementId("or_pat", 78, FORMAT_SCOPE); ElementId ELEM_COMMIT = ElementId("commit", 79, FORMAT_SCOPE); ElementId ELEM_CONST_START = ElementId("const_start", 80, FORMAT_SCOPE); ElementId ELEM_CONST_NEXT = ElementId("const_next", 81, FORMAT_SCOPE); ElementId ELEM_CONST_NEXT2 = ElementId("const_next2", 82, FORMAT_SCOPE); ElementId ELEM_CONST_CURSPACE = ElementId("const_curspace", 83, FORMAT_SCOPE); ElementId ELEM_CONST_CURSPACE_SIZE = ElementId("const_curspace_size", 84, FORMAT_SCOPE); ElementId ELEM_CONST_FLOWREF = ElementId("const_flowref", 85, FORMAT_SCOPE); ElementId ELEM_CONST_FLOWREF_SIZE = ElementId("const_flowref_size", 86, FORMAT_SCOPE); ElementId ELEM_CONST_FLOWDEST = ElementId("const_flowdest", 87, FORMAT_SCOPE); ElementId ELEM_CONST_FLOWDEST_SIZE = ElementId("const_flowdest_size", 88, FORMAT_SCOPE); /// The bytes of the header are read from the stream and verified against the required form and current version. /// If the form matches, \b true is returned. No additional bytes are read. /// \param s is the given stream /// \return \b true if a valid header is present bool isSlaFormat(istream &s) { uint1 header[4]; s.read((char *)header,4); if (!s) return false; if (header[0] != 's' || header[1] != 'l' || header[2] != 'a') return false; if (header[3] != FORMAT_VERSION) return false; return true; } /// A valid header, including the format version number, is written to the stream. /// \param s is the given stream void writeSlaHeader(ostream &s) { char header[4]; header[0] = 's'; header[1] = 'l'; header[2] = 'a'; header[3] = FORMAT_VERSION; s.write(header,4); } /// \param s is the backing stream that will receive the final bytes of the .sla file /// \param level is the compression level FormatEncode::FormatEncode(ostream &s,int4 level) : PackedEncode(compStream), compBuffer(s,level), compStream(&compBuffer) { writeSlaHeader(s); } void FormatEncode::flush(void) { compStream.flush(); } const int4 FormatDecode::IN_BUFFER_SIZE = 4096; /// \param spcManager is the (uninitialized) manager that will hold decoded address spaces FormatDecode::FormatDecode(const AddrSpaceManager *spcManager) : PackedDecode(spcManager) { inBuffer = new uint1[IN_BUFFER_SIZE]; } FormatDecode::~FormatDecode(void) { delete [] inBuffer; } void FormatDecode::ingestStream(istream &s) { if (!isSlaFormat(s)) throw LowlevelError("Missing SLA format header"); Decompress decompressor; uint1 *outBuf; int4 outAvail = 0; while(!decompressor.isFinished()) { s.read((char *)inBuffer,IN_BUFFER_SIZE); int4 gcount = s.gcount(); if (gcount == 0) break; decompressor.input(inBuffer,gcount); do { if (outAvail == 0) { outBuf = allocateNextInputBuffer(0); outAvail = BUFFER_SIZE; } outAvail = decompressor.inflate(outBuf + (BUFFER_SIZE - outAvail), outAvail); } while(outAvail == 0); } endIngest(BUFFER_SIZE - outAvail); } } // End sla namespace } // End ghidra namespace ================================================ FILE: pypcode/sleigh/slaformat.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file slaformat.hh /// \brief Encoding values for the SLA file format #ifndef __SLAFORMAT__ #define __SLAFORMAT__ #include "compression.hh" #include "marshal.hh" namespace ghidra { namespace sla { extern const int4 FORMAT_SCOPE; ///< Grouping elements/attributes for SLA file format extern const int4 FORMAT_VERSION; ///< Current version of the .sla file extern AttributeId ATTRIB_VAL; ///< SLA format attribute "val" extern AttributeId ATTRIB_ID; ///< SLA format attribute "id" extern AttributeId ATTRIB_SPACE; ///< SLA format attribute "space" extern AttributeId ATTRIB_S; ///< SLA format attribute "s" extern AttributeId ATTRIB_OFF; ///< SLA format attribute "off" extern AttributeId ATTRIB_CODE; ///< SLA format attribute "code" extern AttributeId ATTRIB_MASK; ///< SLA format attribute "mask" extern AttributeId ATTRIB_INDEX; ///< SLA format attribute "index" extern AttributeId ATTRIB_NONZERO; ///< SLA format attribute "nonzero" extern AttributeId ATTRIB_PIECE; ///< SLA format attribute "piece" extern AttributeId ATTRIB_NAME; ///< SLA format attribute "name" extern AttributeId ATTRIB_SCOPE; ///< SLA format attribute "scope" extern AttributeId ATTRIB_STARTBIT; ///< SLA format attribute "startbit" extern AttributeId ATTRIB_SIZE; ///< SLA format attribute "size" extern AttributeId ATTRIB_TABLE; ///< SLA format attribute "table" extern AttributeId ATTRIB_CT; ///< SLA format attribute "ct" extern AttributeId ATTRIB_MINLEN; ///< SLA format attribute "minlen" extern AttributeId ATTRIB_BASE; ///< SLA format attribute "base" extern AttributeId ATTRIB_NUMBER; ///< SLA format attribute "number" extern AttributeId ATTRIB_CONTEXT; ///< SLA format attribute "context" extern AttributeId ATTRIB_PARENT; ///< SLA format attribute "parent" extern AttributeId ATTRIB_SUBSYM; ///< SLA format attribute "subsym" extern AttributeId ATTRIB_LINE; ///< SLA format attribute "line" extern AttributeId ATTRIB_SOURCE; ///< SLA format attribute "source" extern AttributeId ATTRIB_LENGTH; ///< SLA format attribute "length" extern AttributeId ATTRIB_FIRST; ///< SLA format attribute "first" extern AttributeId ATTRIB_PLUS; ///< SLA format attribute "plus" extern AttributeId ATTRIB_SHIFT; ///< SLA format attribute "shift" extern AttributeId ATTRIB_ENDBIT; ///< SLA format attribute "endbit" extern AttributeId ATTRIB_SIGNBIT; ///< SLA format attribute "signbit" extern AttributeId ATTRIB_ENDBYTE; ///< SLA format attribute "endbyte" extern AttributeId ATTRIB_STARTBYTE; ///< SLA format attribute "startbyte" extern AttributeId ATTRIB_VERSION; ///< SLA format attribute "version" extern AttributeId ATTRIB_BIGENDIAN; ///< SLA format attribute "bigendian" extern AttributeId ATTRIB_ALIGN; ///< SLA format attribute "align" extern AttributeId ATTRIB_UNIQBASE; ///< SLA format attribute "uniqbase" extern AttributeId ATTRIB_MAXDELAY; ///< SLA format attribute "maxdelay" extern AttributeId ATTRIB_UNIQMASK; ///< SLA format attribute "uniqmask" extern AttributeId ATTRIB_NUMSECTIONS; ///< SLA format attribute "numsections" extern AttributeId ATTRIB_DEFAULTSPACE; ///< SLA format attribute "defaultspace" extern AttributeId ATTRIB_DELAY; ///< SLA format attribute "delay" extern AttributeId ATTRIB_WORDSIZE; ///< SLA format attribute "wordsize" extern AttributeId ATTRIB_PHYSICAL; ///< SLA format attribute "physical" extern AttributeId ATTRIB_SCOPESIZE; ///< SLA format attribute "scopesize" extern AttributeId ATTRIB_SYMBOLSIZE; ///< SLA format attribute "symbolsize" extern AttributeId ATTRIB_VARNODE; ///< SLA format attribute "varnode" extern AttributeId ATTRIB_LOW; ///< SLA format attribute "low" extern AttributeId ATTRIB_HIGH; ///< SLA format attribute "high" extern AttributeId ATTRIB_FLOW; ///< SLA format attribute "flow" extern AttributeId ATTRIB_CONTAIN; ///< SLA format attribute "contain" extern AttributeId ATTRIB_I; ///< SLA format attribute "i" extern AttributeId ATTRIB_NUMCT; ///< SLA format attribute "numct" extern AttributeId ATTRIB_SECTION; ///< SLA format attribute "section" extern AttributeId ATTRIB_LABELS; ///< SLA format attribute "labels" extern ElementId ELEM_CONST_REAL; ///< SLA format element "const_real" extern ElementId ELEM_VARNODE_TPL; ///< SLA format element "varnode_tpl" extern ElementId ELEM_CONST_SPACEID; ///< SLA format element "const_spaceid" extern ElementId ELEM_CONST_HANDLE; ///< SLA format element "const_handle" extern ElementId ELEM_OP_TPL; ///< SLA format element "op_tpl" extern ElementId ELEM_MASK_WORD; ///< SLA format element "mask_word" extern ElementId ELEM_PAT_BLOCK; ///< SLA format element "pat_block" extern ElementId ELEM_PRINT; ///< SLA format element "print" extern ElementId ELEM_PAIR; ///< SLA format element "pair" extern ElementId ELEM_CONTEXT_PAT; ///< SLA format element "context_pat" extern ElementId ELEM_NULL; ///< SLA format element "null" extern ElementId ELEM_OPERAND_EXP; ///< SLA format element "operand_exp" extern ElementId ELEM_OPERAND_SYM; ///< SLA format element "operand_sym" extern ElementId ELEM_OPERAND_SYM_HEAD; ///< SLA format element "operand_sym_head" extern ElementId ELEM_OPER; ///< SLA format element "oper" extern ElementId ELEM_DECISION; ///< SLA format element "decision" extern ElementId ELEM_OPPRINT; ///< SLA format element "opprint" extern ElementId ELEM_INSTRUCT_PAT; ///< SLA format element "instruct_pat" extern ElementId ELEM_COMBINE_PAT; ///< SLA format element "combine_pat" extern ElementId ELEM_CONSTRUCTOR; ///< SLA format element "constructor" extern ElementId ELEM_CONSTRUCT_TPL; ///< SLA format element "construct_tpl" extern ElementId ELEM_SCOPE; ///< SLA format element "scope" extern ElementId ELEM_VARNODE_SYM; ///< SLA format element "varnode_sym" extern ElementId ELEM_VARNODE_SYM_HEAD; ///< SLA format element "varnode_sym_head" extern ElementId ELEM_USEROP; ///< SLA format element "userop" extern ElementId ELEM_USEROP_HEAD; ///< SLA format element "userop_head" extern ElementId ELEM_TOKENFIELD; ///< SLA format element "tokenfield" extern ElementId ELEM_VAR; ///< SLA format element "var" extern ElementId ELEM_CONTEXTFIELD; ///< SLA format element "contextfield" extern ElementId ELEM_HANDLE_TPL; ///< SLA format element "handle_tpl" extern ElementId ELEM_CONST_RELATIVE; ///< SLA format element "const_relative" extern ElementId ELEM_CONTEXT_OP; ///< SLA format element "context_op" extern ElementId ELEM_SLEIGH; ///< SLA format element "sleigh" extern ElementId ELEM_SPACES; ///< SLA format element "spaces" extern ElementId ELEM_SOURCEFILES; ///< SLA format element "sourcefiles" extern ElementId ELEM_SOURCEFILE; ///< SLA format element "sourcefile" extern ElementId ELEM_SPACE; ///< SLA format element "space" extern ElementId ELEM_SYMBOL_TABLE; ///< SLA format element "symbol_table" extern ElementId ELEM_VALUE_SYM; ///< SLA format element "value_sym" extern ElementId ELEM_VALUE_SYM_HEAD; ///< SLA format element "value_sym_head" extern ElementId ELEM_CONTEXT_SYM; ///< SLA format element "context_sym" extern ElementId ELEM_CONTEXT_SYM_HEAD; ///< SLA format element "context_sym_head" extern ElementId ELEM_END_SYM; ///< SLA format element "end_sym" extern ElementId ELEM_END_SYM_HEAD; ///< SLA format element "end_sym_head" extern ElementId ELEM_SPACE_OTHER; ///< SLA format element "space_other" extern ElementId ELEM_SPACE_UNIQUE; ///< SLA format element "space_unique" extern ElementId ELEM_AND_EXP; ///< SLA format element "and_exp" extern ElementId ELEM_DIV_EXP; ///< SLA format element "div_exp" extern ElementId ELEM_LSHIFT_EXP; ///< SLA format element "lshift_exp" extern ElementId ELEM_MINUS_EXP; ///< SLA format element "minus_exp" extern ElementId ELEM_MULT_EXP; ///< SLA format element "mult_exp" extern ElementId ELEM_NOT_EXP; ///< SLA format element "not_exp" extern ElementId ELEM_OR_EXP; ///< SLA format element "or_exp" extern ElementId ELEM_PLUS_EXP; ///< SLA format element "plus_exp" extern ElementId ELEM_RSHIFT_EXP; ///< SLA format element "rshift_exp" extern ElementId ELEM_SUB_EXP; ///< SLA format element "sub_exp" extern ElementId ELEM_XOR_EXP; ///< SLA format element "xor_exp" extern ElementId ELEM_INTB; ///< SLA format element "intb" extern ElementId ELEM_END_EXP; ///< SLA format element "end_exp" extern ElementId ELEM_NEXT2_EXP; ///< SLA format element "next2_exp" extern ElementId ELEM_START_EXP; ///< SLA format element "start_exp" extern ElementId ELEM_EPSILON_SYM; ///< SLA format element "epsilon_sym" extern ElementId ELEM_EPSILON_SYM_HEAD; ///< SLA format element "epsilon_sym_head" extern ElementId ELEM_NAME_SYM; ///< SLA format element "name_sym" extern ElementId ELEM_NAME_SYM_HEAD; ///< SLA format element "name_sym_head" extern ElementId ELEM_NAMETAB; ///< SLA format element "nametab" extern ElementId ELEM_NEXT2_SYM; ///< SLA format element "next2_sym" extern ElementId ELEM_NEXT2_SYM_HEAD; ///< SLA format element "next2_sym_head" extern ElementId ELEM_START_SYM; ///< SLA format element "start_sym" extern ElementId ELEM_START_SYM_HEAD; ///< SLA format element "start_sym_head" extern ElementId ELEM_SUBTABLE_SYM; ///< SLA format element "subtable_sym" extern ElementId ELEM_SUBTABLE_SYM_HEAD; ///< SLA format element "subtable_sym_head" extern ElementId ELEM_VALUEMAP_SYM; ///< SLA format element "valuemap_sym" extern ElementId ELEM_VALUEMAP_SYM_HEAD; ///< SLA format element "valuemap_sym_head" extern ElementId ELEM_VALUETAB; ///< SLA format element "valuetab" extern ElementId ELEM_VARLIST_SYM; ///< SLA format element "varlist_sym" extern ElementId ELEM_VARLIST_SYM_HEAD; ///< SLA format element "varlist_sym_head" extern ElementId ELEM_OR_PAT; ///< SLA format element "or_pat" extern ElementId ELEM_COMMIT; ///< SLA format element "commit" extern ElementId ELEM_CONST_START; ///< SLA format element "const_start" extern ElementId ELEM_CONST_NEXT; ///< SLA format element "const_next" extern ElementId ELEM_CONST_NEXT2; ///< SLA format element "const_next2" extern ElementId ELEM_CONST_CURSPACE; ///< SLA format element "curspace" extern ElementId ELEM_CONST_CURSPACE_SIZE; ///< SLA format element "curspace_size" extern ElementId ELEM_CONST_FLOWREF; ///< SLA format element "const_flowref" extern ElementId ELEM_CONST_FLOWREF_SIZE; ///< SLA format element "const_flowref_size" extern ElementId ELEM_CONST_FLOWDEST; ///< SLA format element "const_flowdest" extern ElementId ELEM_CONST_FLOWDEST_SIZE; ///< SLA format element "const_flowdest_size" extern bool isSlaFormat(istream &s); ///< Verify a .sla file header at the current point of the given stream extern void writeSlaHeader(ostream &s); ///< Write a .sla file header to the given stream /// \brief The encoder for the .sla file format /// /// This provides the format header, does compression, and encodes the raw data elements/attributes. class FormatEncode : public PackedEncode { CompressBuffer compBuffer; ///< The compression stream filter ostream compStream; ///< The front-end stream receiving uncompressed bytes public: FormatEncode(ostream &s,int4 level); ///< Initialize an encoder at a specific compression level void flush(void); ///< Flush any buffered bytes in the encoder to the backing stream }; /// \brief The decoder for the .sla file format /// /// This verifies the .sla file header, does decompression, and decodes the raw data elements/attributes. class FormatDecode : public PackedDecode { static const int4 IN_BUFFER_SIZE; ///< The size of the \e input buffer uint1 *inBuffer; ///< The \e input buffer public: FormatDecode(const AddrSpaceManager *spcManager); ///< Initialize the decoder virtual ~FormatDecode(void); ///< Destructor virtual void ingestStream(istream &s); }; } // End namespace sla } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/sleigh.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sleigh.hh" #include "loadimage.hh" namespace ghidra { PcodeCacher::PcodeCacher(void) { // We aim to allocate this array only once uint4 maxsize = 600; poolstart = new VarnodeData[ maxsize ]; endpool = poolstart + maxsize; curpool = poolstart; } PcodeCacher::~PcodeCacher(void) { delete [] poolstart; } /// Expand the VarnodeData pool so that \e size more elements fit, and return /// a pointer to first available element. /// \param size is the number of elements to expand the pool by /// \return the first available VarnodeData VarnodeData *PcodeCacher::expandPool(uint4 size) { uint4 curmax = endpool - poolstart; uint4 cursize = curpool - poolstart; if (cursize + size <= curmax) return curpool; // No expansion necessary uint4 increase = (cursize + size) - curmax; if (increase < 100) // Increase by at least 100 increase = 100; uint4 newsize = curmax + increase; VarnodeData *newpool = new VarnodeData[newsize]; for(uint4 i=0;i::iterator iter; for(iter=label_refs.begin();iter!=label_refs.end();++iter) { VarnodeData *ref = (*iter).dataptr; (*iter).dataptr = newpool + (ref - poolstart); } delete [] poolstart; // Free up old pool poolstart = newpool; curpool = newpool + (cursize + size); endpool = newpool + newsize; return newpool + cursize; } /// Store off a reference to the Varnode and the absolute index of the next /// instruction. The Varnode must be an operand of the current instruction. /// \param ptr is the Varnode reference void PcodeCacher::addLabelRef(VarnodeData *ptr) { label_refs.emplace_back(); label_refs.back().dataptr = ptr; label_refs.back().calling_index = issued.size(); } /// The label has an id that is referred to by Varnodes holding /// intra-instruction branch targets, prior to converting /// them to a \e relative \e branch offset. The label is associated with /// the absolute index of the next PcodeData object to be issued, /// facilitating this conversion. /// \param id is the given id of the label void PcodeCacher::addLabel(uint4 id) { while(labels.size() <= id) labels.push_back(0xbadbeef); labels[ id ] = issued.size(); } void PcodeCacher::clear(void) { curpool = poolstart; issued.clear(); label_refs.clear(); labels.clear(); } /// Assuming all the PcodeData has been generated for an /// instruction, go resolve any relative offsets and back /// patch their value(s) into the PcodeData void PcodeCacher::resolveRelatives(void) { list::const_iterator iter; for(iter=label_refs.begin();iter!=label_refs.end();++iter) { VarnodeData *ptr = (*iter).dataptr; uint4 id = ptr->offset; if ((id >= labels.size())||(labels[id] == 0xbadbeef)) throw LowlevelError("Reference to non-existant sleigh label"); // Calculate the relative index given the two absolute indices uintb res = labels[id] - (*iter).calling_index; res &= calc_mask( ptr->size ); ptr->offset = res; } } /// Each p-code operation is presented to the emitter via its dump() method. /// \param addr is the Address associated with the p-code operation /// \param emt is the emitter void PcodeCacher::emit(const Address &addr,PcodeEmit *emt) const { vector::const_iterator iter; for(iter=issued.begin();iter!=issued.end();++iter) emt->dump(addr,(*iter).opc,(*iter).outvar,(*iter).invar,(*iter).isize); } /// \brief Generate a concrete VarnodeData object from the given template (VarnodeTpl) /// /// \param vntpl is the template to reference /// \param vn is the object to fill in with concrete values void SleighBuilder::generateLocation(const VarnodeTpl *vntpl,VarnodeData &vn) { vn.space = vntpl->getSpace().fixSpace(*walker); vn.size = vntpl->getSize().fix(*walker); if (vn.space == const_space) vn.offset = vntpl->getOffset().fix(*walker) & calc_mask(vn.size); else if (vn.space == uniq_space) { vn.offset = vntpl->getOffset().fix(*walker); vn.offset |= uniqueoffset; } else vn.offset = vn.space->wrapOffset(vntpl->getOffset().fix(*walker)); } /// \brief Generate a pointer VarnodeData from a dynamic template (VarnodeTpl) /// /// The symbol represents a value referenced through a dynamic pointer. /// This method generates the varnode representing the pointer itself and also /// returns the address space in anticipation of generating the LOAD or STORE /// that actually manipulates the value. /// \param vntpl is the dynamic template to reference /// \param vn is the object to fill with concrete values /// \return the address space being pointed to AddrSpace *SleighBuilder::generatePointer(const VarnodeTpl *vntpl,VarnodeData &vn) { const FixedHandle &hand(walker->getFixedHandle(vntpl->getOffset().getHandleIndex())); vn.space = hand.offset_space; vn.size = hand.offset_size; if (vn.space == const_space) vn.offset = hand.offset_offset & calc_mask(vn.size); else if (vn.space == uniq_space) vn.offset = hand.offset_offset | uniqueoffset; else vn.offset = vn.space->wrapOffset(hand.offset_offset); return hand.space; } /// \brief Add in an additional offset to the address of a dynamic Varnode /// /// The Varnode is ultimately read/written via LOAD/STORE operation AND has undergone a truncation /// operation, so an additional offset needs to get added to the pointer referencing the Varnode. /// \param op is the LOAD/STORE operation being generated /// \param vntpl is the dynamic Varnode void SleighBuilder::generatePointerAdd(PcodeData *op,const VarnodeTpl *vntpl) { uintb offsetPlus = vntpl->getOffset().getReal() & 0xffff; if (offsetPlus == 0) { return; } PcodeData *nextop = cache->allocateInstruction(); nextop->opc = op->opc; nextop->invar = op->invar; nextop->isize = op->isize; nextop->outvar = op->outvar; op->isize = 2; op->opc = CPUI_INT_ADD; VarnodeData *newparams = op->invar = cache->allocateVarnodes(2); newparams[0] = nextop->invar[1]; newparams[1].space = const_space; // Add in V_OFFSET_PLUS newparams[1].offset = offsetPlus; newparams[1].size = newparams[0].size; op->outvar = nextop->invar + 1; // Output of ADD is input to original op op->outvar->space = uniq_space; // Result of INT_ADD in special runtime temp op->outvar->offset = uniq_space->getTrans()->getUniqueStart(Translate::RUNTIME_BITRANGE_EA); } void SleighBuilder::dump(OpTpl *op) { // Dump on op through low-level dump interface // filling in dynamic loads and stores if necessary PcodeData *thisop; VarnodeData *invars; VarnodeData *loadvars; VarnodeData *storevars; VarnodeTpl *vn,*outvn; int4 isize = op->numInput(); // First build all the inputs invars = cache->allocateVarnodes(isize); for(int4 i=0;igetIn(i); if (vn->isDynamic(*walker)) { generateLocation(vn,invars[i]); // Input of -op- is really temporary storage PcodeData *load_op = cache->allocateInstruction(); load_op->opc = CPUI_LOAD; load_op->outvar = invars + i; load_op->isize = 2; loadvars = load_op->invar = cache->allocateVarnodes(2); AddrSpace *spc = generatePointer(vn,loadvars[1]); loadvars[0].space = const_space; loadvars[0].offset = (uintb)(uintp)spc; loadvars[0].size = sizeof(spc); if (vn->getOffset().getSelect() == ConstTpl::v_offset_plus) generatePointerAdd(load_op, vn); } else generateLocation(vn,invars[i]); } if ((isize>0)&&(op->getIn(0)->isRelative())) { invars->offset += getLabelBase(); cache->addLabelRef(invars); } thisop = cache->allocateInstruction(); thisop->opc = op->getOpcode(); thisop->invar = invars; thisop->isize = isize; outvn = op->getOut(); if (outvn != (VarnodeTpl *)0) { if (outvn->isDynamic(*walker)) { storevars = cache->allocateVarnodes(3); generateLocation(outvn,storevars[2]); // Output of -op- is really temporary storage thisop->outvar = storevars+2; PcodeData *store_op = cache->allocateInstruction(); store_op->opc = CPUI_STORE; store_op->isize = 3; // store_op->outvar = (VarnodeData *)0; store_op->invar = storevars; AddrSpace *spc = generatePointer(outvn,storevars[1]); // pointer storevars[0].space = const_space; storevars[0].offset = (uintb)(uintp)spc; // space in which to store storevars[0].size = sizeof(spc); if (outvn->getOffset().getSelect() == ConstTpl::v_offset_plus) generatePointerAdd(store_op,outvn); } else { thisop->outvar = cache->allocateVarnodes(1); generateLocation(outvn,*thisop->outvar); } } } /// \brief Build a named p-code section of a constructor that contains only implied BUILD directives /// /// If a named section of a constructor is empty, we still need to walk /// through any subtables that might contain p-code in their named sections. /// This method treats each subtable operand as an implied \e build directive, /// in the otherwise empty section. /// \param ct is the matching currently Constructor being built /// \param secnum is the particular \e named section number to build void SleighBuilder::buildEmpty(Constructor *ct,int4 secnum) { int4 numops = ct->getNumOperands(); for(int4 i=0;igetOperand(i)->getDefiningSymbol(); if (sym == (SubtableSymbol *)0) continue; if (sym->getType() != SleighSymbol::subtable_symbol) continue; walker->pushOperand(i); ConstructTpl *construct = walker->getConstructor()->getNamedTempl(secnum); if (construct == (ConstructTpl *)0) buildEmpty(walker->getConstructor(),secnum); else build(construct,secnum); walker->popOperand(); } } /// Bits used to make temporary registers unique across multiple instructions /// are generated based on the given address. /// \param addr is the given Address void SleighBuilder::setUniqueOffset(const Address &addr) { uniqueoffset = (addr.getOffset() & uniquemask)<<8; } /// \brief Constructor /// /// \param w is the parsed instruction /// \param dcache is a cache of nearby instruction parses /// \param pc will hold the PcodeData and VarnodeData objects produced by \b this builder /// \param cspc is the constant address space /// \param uspc is the unique address space /// \param umask is the mask to use to find unique bits within an Address SleighBuilder::SleighBuilder(ParserWalker *w,DisassemblyCache *dcache,PcodeCacher *pc,AddrSpace *cspc, AddrSpace *uspc,uint4 umask) : PcodeBuilder(0) { walker = w; discache = dcache; cache = pc; const_space = cspc; uniq_space = uspc; uniquemask = umask; uniqueoffset = (walker->getAddr().getOffset() & uniquemask)<<8; } void SleighBuilder::appendBuild(OpTpl *bld,int4 secnum) { // Append p-code for a particular build statement int4 index = bld->getIn(0)->getOffset().getReal(); // Recover operand index from build statement // Check if operand is a subtable SubtableSymbol *sym = (SubtableSymbol *)walker->getConstructor()->getOperand(index)->getDefiningSymbol(); if ((sym==(SubtableSymbol *)0)||(sym->getType() != SleighSymbol::subtable_symbol)) return; walker->pushOperand(index); Constructor *ct = walker->getConstructor(); if (secnum >=0) { ConstructTpl *construct = ct->getNamedTempl(secnum); if (construct == (ConstructTpl *)0) buildEmpty(ct,secnum); else build(construct,secnum); } else { ConstructTpl *construct = ct->getTempl(); build(construct,-1); } walker->popOperand(); } void SleighBuilder::delaySlot(OpTpl *op) { // Append pcode for an entire instruction (delay slot) // in the middle of the current instruction ParserWalker *tmp = walker; uintb olduniqueoffset = uniqueoffset; Address baseaddr = tmp->getAddr(); int4 fallOffset = tmp->getLength(); int4 delaySlotByteCnt = tmp->getParserContext()->getDelaySlot(); int4 bytecount = 0; do { Address newaddr = baseaddr + fallOffset; setUniqueOffset(newaddr); const ParserContext *pos = discache->getParserContext(newaddr); if (pos->getParserState() != ParserContext::pcode) throw LowlevelError("Could not obtain cached delay slot instruction"); int4 len = pos->getLength(); ParserWalker newwalker( pos ); walker = &newwalker; walker->baseState(); build(walker->getConstructor()->getTempl(),-1); // Build the whole delay slot fallOffset += len; bytecount += len; } while(bytecount < delaySlotByteCnt); walker = tmp; // Restore original context uniqueoffset = olduniqueoffset; } void SleighBuilder::setLabel(OpTpl *op) { cache->addLabel( op->getIn(0)->getOffset().getReal()+getLabelBase() ); } void SleighBuilder::appendCrossBuild(OpTpl *bld,int4 secnum) { // Weave in the p-code section from an instruction at another address // bld-param(0) contains the address of the instruction // bld-param(1) contains the section number if (secnum>=0) throw LowlevelError("CROSSBUILD directive within a named section"); secnum = bld->getIn(1)->getOffset().getReal(); VarnodeTpl *vn = bld->getIn(0); AddrSpace *spc = vn->getSpace().fixSpace(*walker); uintb addr = spc->wrapOffset( vn->getOffset().fix(*walker) ); ParserWalker *tmp = walker; uintb olduniqueoffset = uniqueoffset; Address newaddr(spc,addr); setUniqueOffset(newaddr); const ParserContext *pos = discache->getParserContext( newaddr ); if (pos->getParserState() != ParserContext::pcode) throw LowlevelError("Could not obtain cached crossbuild instruction"); ParserWalker newwalker( pos, tmp->getParserContext() ); walker = &newwalker; walker->baseState(); Constructor *ct = walker->getConstructor(); ConstructTpl *construct = ct->getNamedTempl(secnum); if (construct == (ConstructTpl *)0) buildEmpty(ct,secnum); else build(construct,secnum); walker = tmp; uniqueoffset = olduniqueoffset; } /// \param min is the minimum number of allocations before a reuse is expected /// \param hashsize is the number of elements in the hash-table void DisassemblyCache::initialize(int4 min,int4 hashsize) { minimumreuse = min; mask = hashsize-1; uintb masktest = coveringmask((uintb)mask); if (masktest != (uintb)mask) // -hashsize- must be a power of 2 throw LowlevelError("Bad windowsize for disassembly cache"); list = new ParserContext *[minimumreuse]; nextfree = 0; hashtable = new ParserContext *[hashsize]; for(int4 i=0;iinitialize(75,20,constspace); list[i] = pos; } ParserContext *pos = list[0]; for(int4 i=0;isetParserState(ParserContext::uninitialized); } /// Return a (possibly cached) ParserContext that is associated with \e addr /// If n different calls to this interface are made with n different Addresses, if /// - n <= minimumreuse AND /// - all the addresses are within the windowsize (=mask+1) /// /// then the cacher guarantees that you get all different ParserContext objects /// \param addr is the Address to disassemble at /// \return the ParserContext associated with the address ParserContext *DisassemblyCache::getParserContext(const Address &addr) { int4 hashindex = ((int4) addr.getOffset()) & mask; ParserContext *res = hashtable[ hashindex ]; if (res->getAddr() == addr) return res; res = list[ nextfree ]; nextfree += 1; // Advance the circular index if (nextfree >= minimumreuse) nextfree = 0; res->setAddr(addr); res->setParserState(ParserContext::uninitialized); // Need to start over with parsing hashtable[ hashindex ] = res; // Stick it into the hashtable return res; } /// \param ld is the LoadImage to draw program bytes from /// \param c_db is the context database Sleigh::Sleigh(LoadImage *ld,ContextDatabase *c_db) : SleighBase() { loader = ld; context_db = c_db; cache = new ContextCache(c_db); discache = (DisassemblyCache *)0; } void Sleigh::clearForDelete(void) { delete cache; if (discache != (DisassemblyCache *)0) delete discache; } Sleigh::~Sleigh(void) { clearForDelete(); } /// Completely clear everything except the base and reconstruct /// with a new LoadImage and ContextDatabase /// \param ld is the new LoadImage /// \param c_db is the new ContextDatabase void Sleigh::reset(LoadImage *ld,ContextDatabase *c_db) { clearForDelete(); pcode_cache.clear(); loader = ld; context_db = c_db; cache = new ContextCache(c_db); discache = (DisassemblyCache *)0; } void Sleigh::fastReset() { if (discache) { discache->fastReset(); } } /// The .sla file from the document store is loaded and cache objects are prepared /// \param store is the document store containing the main \ tag. void Sleigh::initialize(DocumentStorage &store) { if (!isInitialized()) { // Initialize the base if not already const Element *el = store.getTag("sleigh"); if (el == (const Element *)0) throw LowlevelError("Could not find sleigh tag"); sla::FormatDecode decoder(this); ifstream s(el->getContent(), std::ios_base::binary); if (!s) throw LowlevelError("Could not open .sla file: " + el->getContent()); decoder.ingestStream(s); s.close(); decode(decoder); } else reregisterContext(); uint4 parser_cachesize = 2; uint4 parser_windowsize = 32; if ((maxdelayslotbytes > 1)||(unique_allocatemask != 0)) { parser_cachesize = 8; parser_windowsize = 256; } discache = new DisassemblyCache(this,cache,getConstantSpace(),parser_cachesize,parser_windowsize); } /// \brief Obtain a parse tree for the instruction at the given address /// /// The tree may be cached from a previous access. If the address /// has not been parsed, disassembly is performed, and a new parse tree /// is prepared. Depending on the desired \e state, the parse tree /// can be prepared either for disassembly or for p-code generation. /// \param addr is the given address of the instruction /// \param state is the desired parse state. /// \return the parse tree object (ParseContext) ParserContext *Sleigh::obtainContext(const Address &addr,int4 state) const { ParserContext *pos = discache->getParserContext(addr); int4 curstate = pos->getParserState(); if (curstate >= state) return pos; if (curstate == ParserContext::uninitialized) { resolve(*pos); if (state == ParserContext::disassembly) return pos; } // If we reach here, state must be ParserContext::pcode resolveHandles(*pos); return pos; } /// Resolve \e all the constructors involved in the instruction at the indicated address /// \param pos is the parse object that will hold the resulting tree void Sleigh::resolve(ParserContext &pos) const { loader->loadFill(pos.getBuffer(),16,pos.getAddr()); ParserWalkerChange walker(&pos); pos.deallocateState(walker); // Clear the previous resolve and initialize the walker Constructor *ct,*subct; uint4 off; int4 oper,numoper; pos.setDelaySlot(0); walker.setOffset(0); // Initial offset pos.clearCommits(); // Clear any old context commits pos.loadContext(); // Get context for current address ct = root->resolve(walker); // Base constructor walker.setConstructor(ct); ct->applyContext(walker); while(walker.isState()) { ct = walker.getConstructor(); oper = walker.getOperand(); numoper = ct->getNumOperands(); while(oper < numoper) { OperandSymbol *sym = ct->getOperand(oper); off = walker.getOffset(sym->getOffsetBase()) + sym->getRelativeOffset(); pos.allocateOperand(oper,walker); // Descend into new operand and reserve space walker.setOffset(off); TripleSymbol *tsym = sym->getDefiningSymbol(); if (tsym != (TripleSymbol *)0) { subct = tsym->resolve(walker); if (subct != (Constructor *)0) { walker.setConstructor(subct); subct->applyContext(walker); break; } } walker.setCurrentLength(sym->getMinimumLength()); walker.popOperand(); oper += 1; } if (oper >= numoper) { // Finished processing constructor walker.calcCurrentLength(ct->getMinimumLength(),numoper); walker.popOperand(); // Check for use of delayslot ConstructTpl *templ = ct->getTempl(); if ((templ != (ConstructTpl *)0)&&(templ->delaySlot() > 0)) pos.setDelaySlot(templ->delaySlot()); } } pos.setNaddr(pos.getAddr()+pos.getLength()); // Update Naddr to pointer after instruction pos.setParserState(ParserContext::disassembly); } /// Resolve handle templates for the given parse tree, assuming Constructors /// are already resolved. /// \param pos is the given parse tree void Sleigh::resolveHandles(ParserContext &pos) const { TripleSymbol *triple; Constructor *ct; int4 oper,numoper; ParserWalker walker(&pos); walker.baseState(); while(walker.isState()) { ct = walker.getConstructor(); oper = walker.getOperand(); numoper = ct->getNumOperands(); while(oper < numoper) { OperandSymbol *sym = ct->getOperand(oper); walker.pushOperand(oper); // Descend into node triple = sym->getDefiningSymbol(); if (triple != (TripleSymbol *)0) { if (triple->getType() == SleighSymbol::subtable_symbol) break; else // Some other kind of symbol as an operand triple->getFixedHandle(walker.getParentHandle(),walker); } else { // Must be an expression PatternExpression *patexp = sym->getDefiningExpression(); intb res = patexp->getValue(walker); FixedHandle &hand(walker.getParentHandle()); hand.space = pos.getConstSpace(); // Result of expression is a constant hand.offset_space = (AddrSpace *)0; hand.offset_offset = (uintb)res; hand.size = 0; // This size should not get used } walker.popOperand(); oper += 1; } if (oper >= numoper) { // Finished processing constructor ConstructTpl *templ = ct->getTempl(); if (templ != (ConstructTpl *)0) { HandleTpl *res = templ->getResult(); if (res != (HandleTpl *)0) // Pop up handle to containing operand res->fix(walker.getParentHandle(),walker); // If we need an indicator that the constructor exports nothing try // else // walker.getParentHandle().setInvalid(); } walker.popOperand(); } } pos.setParserState(ParserContext::pcode); } int4 Sleigh::instructionLength(const Address &baseaddr) const { ParserContext *pos = obtainContext(baseaddr,ParserContext::disassembly); return pos->getLength(); } int4 Sleigh::printAssembly(AssemblyEmit &emit,const Address &baseaddr) const { int4 sz; ParserContext *pos = obtainContext(baseaddr,ParserContext::disassembly); ParserWalker walker(pos); walker.baseState(); Constructor *ct = walker.getConstructor(); ostringstream mons; ct->printMnemonic(mons,walker); ostringstream body; ct->printBody(body,walker); emit.dump(baseaddr,mons.str(),body.str()); sz = pos->getLength(); return sz; } int4 Sleigh::oneInstruction(PcodeEmit &emit,const Address &baseaddr) const { int4 fallOffset; if (alignment != 1) { if ((baseaddr.getOffset() % alignment)!=0) { ostringstream s; s << "Instruction address not aligned: " << baseaddr; throw UnimplError(s.str(),0); } } ParserContext *pos = obtainContext(baseaddr,ParserContext::pcode); pos->applyCommits(); fallOffset = pos->getLength(); if (pos->getDelaySlot()>0) { int4 bytecount = 0; do { // Do not pass pos->getNaddr() to obtainContext, as pos may have been previously cached and had naddr adjusted ParserContext *delaypos = obtainContext(pos->getAddr() + fallOffset,ParserContext::pcode); delaypos->applyCommits(); int4 len = delaypos->getLength(); fallOffset += len; bytecount += len; } while(bytecount < pos->getDelaySlot()); pos->setNaddr(pos->getAddr()+fallOffset); } ParserWalker walker(pos); walker.baseState(); pcode_cache.clear(); SleighBuilder builder(&walker,discache,&pcode_cache,getConstantSpace(),getUniqueSpace(),unique_allocatemask); try { builder.build(walker.getConstructor()->getTempl(),-1); pcode_cache.resolveRelatives(); pcode_cache.emit(baseaddr,&emit); } catch(UnimplError &err) { ostringstream s; s << "Instruction not implemented in pcode:\n "; ParserWalker *cur = builder.getCurrentWalker(); cur->baseState(); Constructor *ct = cur->getConstructor(); cur->getAddr().printRaw(s); s << ": "; ct->printMnemonic(s,*cur); s << " "; ct->printBody(s,*cur); err.explain = s.str(); err.instruction_length = fallOffset; throw err; } return fallOffset; } void Sleigh::registerContext(const string &name,int4 sbit,int4 ebit) { context_db->registerVariable(name,sbit,ebit); } void Sleigh::setContextDefault(const string &name,uintm val) { context_db->setVariableDefault(name,val); } void Sleigh::allowContextSet(bool val) const { cache->allowSet(val); } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/sleigh.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file sleigh.hh /// \brief Classes and utilities for the main SLEIGH engine #ifndef __SLEIGH_HH__ #define __SLEIGH_HH__ #include "sleighbase.hh" namespace ghidra { class LoadImage; /// \brief Class for describing a relative p-code branch destination /// /// An intra-instruction p-code branch takes a \e relative operand. /// The actual value produced during p-code generation is calculated at /// the last second using \b this. It stores the index of the BRANCH /// instruction and a reference to its destination operand. This initially /// holds a reference to a destination \e label symbol, but is later updated /// with the final relative value. struct RelativeRecord { VarnodeData *dataptr; ///< Varnode indicating relative offset uintb calling_index; ///< Index of instruction containing relative offset }; /// \brief Data for building one p-code instruction /// /// Raw data used by the emitter to produce a single PcodeOp struct PcodeData { OpCode opc; ///< The op code VarnodeData *outvar; ///< Output Varnode data (or null) VarnodeData *invar; ///< Array of input Varnode data int4 isize; ///< Number of input Varnodes }; /// \brief Class for caching a chunk of p-code, prior to emitting /// /// The engine accumulates PcodeData and VarnodeData objects for /// a single instruction. Once the full instruction is constructed, /// the objects are passed to the emitter (PcodeEmit) via the emit() method. /// The class acts as a pool of memory for PcodeData and VarnodeData objects /// that can be reused repeatedly to emit multiple instructions. class PcodeCacher { VarnodeData *poolstart; ///< Start of the pool of VarnodeData objects VarnodeData *curpool; ///< First unused VarnodeData VarnodeData *endpool; ///< End of the pool of VarnodeData objects vector issued; ///< P-code ops issued for the current instruction list label_refs; ///< References to labels vector labels; ///< Locations of labels VarnodeData *expandPool(uint4 size); ///< Expand the memory pool public: PcodeCacher(void); ///< Constructor ~PcodeCacher(void); ///< Destructor /// \brief Allocate data objects for a new set of Varnodes /// /// \param size is the number of objects to allocate /// \return a pointer to the array of available VarnodeData objects VarnodeData *allocateVarnodes(uint4 size) { VarnodeData *newptr = curpool + size; if (newptr <= endpool) { VarnodeData *res = curpool; curpool = newptr; return res; } return expandPool(size); } /// \brief Allocate a data object for a new p-code operation /// /// \return the new PcodeData object PcodeData *allocateInstruction(void) { issued.emplace_back(); PcodeData *res = &issued.back(); res->outvar = (VarnodeData *)0; res->invar = (VarnodeData *)0; return res; } void addLabelRef(VarnodeData *ptr); ///< Denote a Varnode holding a \e relative \e branch offset void addLabel(uint4 id); ///< Attach a label to the \e next p-code instruction void clear(void); ///< Reset the cache so that all objects are unallocated void resolveRelatives(void); ///< Rewrite branch target Varnodes as \e relative offsets void emit(const Address &addr,PcodeEmit *emt) const; ///< Pass the cached p-code data to the emitter }; /// \brief A container for disassembly context used by the SLEIGH engine /// /// This acts as a factor for the ParserContext objects which are used to disassemble /// a single instruction. These all share a ContextCache which is a front end for /// accessing the ContextDatabase and resolving context variables from the SLEIGH spec. /// ParserContext objects are stored in a hash-table keyed by the address of the instruction. class DisassemblyCache { Translate *translate; ///< The Translate object that owns this cache ContextCache *contextcache; ///< Cached values from the ContextDatabase AddrSpace *constspace; ///< The constant address space int4 minimumreuse; ///< Can call getParserContext this many times, before a ParserContext is reused uint4 mask; ///< Size of the hashtable in form 2^n-1 ParserContext **list; ///< (circular) array of currently cached ParserContext objects int4 nextfree; ///< Current end/beginning of circular list ParserContext **hashtable; ///< Hashtable for looking up ParserContext via Address void initialize(int4 min,int4 hashsize); ///< Initialize the hash-table of ParserContexts void free(void); ///< Free the hash-table of ParserContexts public: DisassemblyCache(Translate *trans,ContextCache *ccache,AddrSpace *cspace,int4 cachesize,int4 windowsize); ///< Constructor ~DisassemblyCache(void) { free(); } ///< Destructor ParserContext *getParserContext(const Address &addr); ///< Get the parser for a particular Address void fastReset(); ///< Reset parser states to uninitialized }; /// \brief Build p-code from a pre-parsed instruction /// /// Through the build() method, \b this walks the parse tree and prepares data /// for final emission as p-code. (The final emitting is done separately through the /// PcodeCacher.emit() method). Generally, only p-code for one instruction is prepared. /// But, through the \b delay-slot mechanism, build() may recursively visit /// additional instructions. class SleighBuilder : public PcodeBuilder { virtual void dump( OpTpl *op ); AddrSpace *const_space; ///< The constant address space AddrSpace *uniq_space; ///< The unique address space uintb uniquemask; ///< Mask of address bits to use to uniquify temporary registers uintb uniqueoffset; ///< Uniquifier bits for \b this instruction DisassemblyCache *discache; ///< Cache of disassembled instructions PcodeCacher *cache; ///< Cache accumulating p-code data for the instruction void buildEmpty(Constructor *ct,int4 secnum); void generateLocation(const VarnodeTpl *vntpl,VarnodeData &vn); AddrSpace *generatePointer(const VarnodeTpl *vntpl,VarnodeData &vn); void generatePointerAdd(PcodeData *op,const VarnodeTpl *vntpl); void setUniqueOffset(const Address &addr); ///< Set uniquifying bits for the current instruction public: SleighBuilder(ParserWalker *w,DisassemblyCache *dcache,PcodeCacher *pc,AddrSpace *cspc,AddrSpace *uspc,uint4 umask); virtual void appendBuild(OpTpl *bld,int4 secnum); virtual void delaySlot(OpTpl *op); virtual void setLabel(OpTpl *op); virtual void appendCrossBuild(OpTpl *bld,int4 secnum); }; /// \brief A full SLEIGH engine /// /// Its provided with a LoadImage of the bytes to be disassembled and /// a ContextDatabase. /// /// Assembly is produced via the printAssembly() method, provided with an /// AssemblyEmit object and an Address. /// /// P-code is produced via the oneInstruction() method, provided with a PcodeEmit /// object and an Address. class Sleigh : public SleighBase { LoadImage *loader; ///< The mapped bytes in the program ContextDatabase *context_db; ///< Database of context values steering disassembly ContextCache *cache; ///< Cache of recently used context values mutable DisassemblyCache *discache; ///< Cache of recently parsed instructions mutable PcodeCacher pcode_cache; ///< Cache of p-code data just prior to emitting void clearForDelete(void); ///< Delete the context and disassembly caches protected: ParserContext *obtainContext(const Address &addr,int4 state) const; void resolve(ParserContext &pos) const; ///< Generate a parse tree suitable for disassembly void resolveHandles(ParserContext &pos) const; ///< Prepare the parse tree for p-code generation public: Sleigh(LoadImage *ld,ContextDatabase *c_db); ///< Constructor virtual ~Sleigh(void); ///< Destructor void reset(LoadImage *ld,ContextDatabase *c_db); ///< Reset the engine for a new program void fastReset(); ///< Quickly reset the engine virtual void initialize(DocumentStorage &store); virtual void registerContext(const string &name,int4 sbit,int4 ebit); virtual void setContextDefault(const string &nm,uintm val); virtual void allowContextSet(bool val) const; virtual int4 instructionLength(const Address &baseaddr) const; virtual int4 oneInstruction(PcodeEmit &emit,const Address &baseaddr) const; virtual int4 printAssembly(AssemblyEmit &emit,const Address &baseaddr) const; }; /** \page sleigh SLEIGH \section sleightoc Table of Contents - \ref sleighoverview - \ref sleighbuild - \ref sleighuse - \subpage sleighAPIbasic - \subpage sleighAPIemulate \b Key \b Classes - \ref Translate - \ref AssemblyEmit - \ref PcodeEmit - \ref LoadImage - \ref ContextDatabase \section sleighoverview Overview Welcome to \b SLEIGH, a machine language translation and dissassembly engine. SLEIGH is both a processor specification language and the associated library and tools for using such a specification to generate assembly and to generate \b pcode, a reverse engineering Register Transfer Language (RTL), from binary machine instructions. SLEIGH was originally based on \b SLED, a \e Specification \e Language \e for \e Encoding \e and \e Decoding, designed by Norman Ramsey and Mary F. Fernandez, which performed disassembly (and assembly). SLEIGH extends SLED by providing semantic descriptions (via the RTL) of machine instructions and other practical enhancements for doing real world reverse engineering. SLEIGH is part of Project \b GHIDRA. It provides the core of the GHIDRA disassembler and the data-flow and decompilation analysis. However, SLEIGH can serve as a standalone library for use in other applications for providing a generic disassembly and RTL translation interface. \section sleighbuild Building SLEIGH There are a couple of \e make targets for building the SLEIGH library from source. These are: \code make libsla.a # Build the main library make libsla_dbg.a # Build the library with debug symbols \endcode The source code file \e sleighexample.cc has a complete example of initializing the Translate engine and using it to generate assembly and pcode. The source has a hard-coded file name, \e x86testcode, as the example binary executable it attempts to decode, but this can easily be changed. It also needs a SLEIGH specification file (\e .sla) to be present. Building the example application can be done with something similar to the following makefile fragment. \code # The C compiler CXX=g++ # Debug flags DBG_CXXFLAGS=-g -Wall -Wno-sign-compare OPT_CXXFLAGS=-O2 -Wall -Wno-sign-compare # libraries INCLUDES=-I./src LNK=src/libsla_dbg.a sleighexample.o: sleighexample.cc $(CXX) -c $(DBG_CXXFLAGS) -o sleighexample sleighexample.o $(LNK) clean: rm -rf *.o sleighexample \endcode \section sleighuse Using SLEIGH SLEIGH is a generic reverse engineering tool in the sense that the API is designed to be completely processor independent. In order to process binary executables for a specific processor, The library reads in a \e specification \e file, which describes how instructions are encoded and how they are interpreted by the processor. An application which needs to do disassembly or generate \b pcode can design to the SLEIGH API once, and then the application will automatically support any processor for which there is a specification. For working with a single processor, the SLEIGH library needs to load a single \e compiled form of the processor specification, which is traditionally given a ".sla" suffix. Most common processors already have a ".sla" file available. So to use SLEIGH with these processors, the library merely needs to be made aware of the desired file. This documentation covers the use of the SLEIGH API, assuming that this specification file is available. The ".sla" files themselves are created by running the \e compiler on a file written in the formal SLEIGH language. These files traditionally have the suffix ".slaspec" For those who want to design such a specification for a new processor, please refer to the document, "SLEIGH: A Language for Rapid Processor Specification." */ /** \page sleighAPIbasic The Basic SLEIGH Interface To use SLEIGH as a library within an application, there are basically five classes that you need to be aware of. - \ref sleightranslate - \ref sleighassememit - \ref sleighpcodeemit - \ref sleighloadimage - \ref sleighcontext \section sleightranslate Translate (or Sleigh) The core SLEIGH class is Sleigh, which is derived from the interface, Translate. In order to instantiate it in your code, you need a LoadImage object, and a ContextDatabase object. The load image is responsible for retrieving instruction bytes, based on address, from a binary executable. The context database provides the library extra mode information that may be necessary to do the disassembly or translation. This can be used, for instance, to specify that an x86 binary is running in 32-bit mode, or to specify that an ARM processor is running in THUMB mode. Once these objects are built, the Sleigh object can be immediately instantiated. \code LoadImageBfd *loader; ContextDatabase *context; Translate *trans; // Set up the loadimage // Providing an executable name and architecture string loadimagename = "x86testcode"; string bfdtarget= "default"; loader = new LoadImageBfd(loadimagename,bfdtarget); loader->open(); // Load the executable from file context = new ContextInternal(); // Create a processor context trans = new Sleigh(loader,context); // Instantiate the translator \endcode Once the Sleigh object is in hand, the only required initialization step left is to inform it of the ".sla" file. The file is in XML format and needs to be read in using SLEIGH's built-in XML parser. The following code accomplishes this. \code string sleighfilename = "specfiles/x86.sla"; DocumentStorage docstorage; Element *sleighroot = docstorage.openDocument(sleighfilename)->getRoot(); docstorage.registerTag(sleighroot); trans->initialize(docstorage); // Initialize the translator \endcode \section sleighassememit AssemblyEmit In order to do disassembly, you need to derive a class from AssemblyEmit, and implement the method \e dump. The library will call this method exactly once, for each instruction disassembled. This routine simply needs to decide how (and where) to print the corresponding portion of the disassembly. For instance, \code class AssemblyRaw : public AssemblyEmit { public: virtual void dump(const Address &addr,const string &mnem,const string &body) { addr.printRaw(cout); cout << ": " << mnem << ' ' << body << endl; } }; \endcode This is a minimal implementation that simply dumps the disassembly straight to standard out. Once this object is instantiated, the Sleigh object can use it to write out assembly via the Translate::printAssembly() method. \code AssemblyEmit *assememit = new AssemblyRaw(); Address addr(trans->getDefaultCodeSpace(),0x80484c0); int4 length; // Length of instruction in bytes length = trans->printAssembly(*assememit,addr); addr = addr + length; // Advance to next instruction length = trans->printAssembly(*assememit,addr); addr = addr + length; length = trans->printAssembly(*assememit,addr); \endcode \section sleighpcodeemit PcodeEmit In order to generate a \b pcode translation of a machine instruction, you need to derive a class from PcodeEmit and implement the virtual method \e dump. This method will be invoked once for each \b pcode operation in the translation of a machine instruction. There will likely be multiple calls per instruction. Each call passes in a single \b pcode operation, complete with its possible varnode output, and all of its varnode inputs. Here is an example of a PcodeEmit object that simply prints out the \b pcode. \code class PcodeRawOut : public PcodeEmit { public: virtual void dump(const Address &addr,OpCode opc,VarnodeData *outvar,VarnodeData *vars,int4 isize); }; static void print_vardata(ostream &s,VarnodeData &data) { s << '(' << data.space->getName() << ','; data.space->printOffset(s,data.offset); s << ',' << dec << data.size << ')'; } void PcodeRawOut::dump(const Address &addr,OpCode opc,VarnodeData *outvar,VarnodeData *vars,int4 isize) { if (outvar != (VarnodeData *)0) { // The output is optional print_vardata(cout,*outvar); cout << " = "; } cout << get_opname(opc); // Possibly check for a code reference or a space reference for(int4 i=0;igetDefaultCodeSpace(),0x80484c0); int4 length; // Length of instruction in bytes length = trans->oneInstruction(*pcodeemit,addr); addr = addr + length; // Advance to next instruction length = trans->oneInstruction(*pcodeemit,addr); addr = addr + length; length = trans->oneInstruction(*pcodeemit,addr); \endcode For an application to properly \e follow \e flow, while translating machine instructions into pcode, the emitted pcode must be inspected for the various branch operations. \section sleighloadimage LoadImage A LoadImage holds all the binary data from an executable file in the format similar to how it would exist when being executed by a real processor. The interface to this from SLEIGH is actually very simple, although it can hide a complicated structure. One method does most of the work, LoadImage::loadFill(). It takes a byte pointer, a size, and an Address. The method is expected to fill in the \e ptr array with \e size bytes taken from the load image, corresponding to the address \e addr. There are two more virtual methods that are required for a complete implementation of LoadImage, \e getArchType and \e adjustVma, but these do not need to be implemented fully. \code class MyLoadImage : public LoadImage { public: MyLoadImage(const string &nm) : Loadimage(nm) {} virtual void loadFill(uint1 *ptr,int4 size,const Address &addr); virtual string getArchType(void) const { return "mytype"; } virtual void adjustVma(long adjust) {} }; \endcode \section sleighcontext ContextDatabase The ContextDatabase needs to keep track of any possible context variable and its value, over different address ranges. In most cases, you probably don't need to override the class yourself, but can use the built-in class, ContextInternal. This provides the basic functionality required and will work for different architectures. What you may need to do is set values for certain variables, depending on the processor and the environment it is running in. For instance, for the x86 platform, you need to set the \e addrsize and \e opsize bits, to indicate the processor would be running in 32-bit mode. The context variables specific to a particular processor are established by the SLEIGH spec. So the variables can only be set \e after the spec has been loaded. \code ... context = new ContextInternal(); trans = new Sleigh(loader,context); DocumentStorage docstorage; Element *root = docstorage.openDocument("specfiles/x86.sla")->getRoot(); docstorage.registerTag(root); trans->initialize(docstorage); context->setVariableDefault("addrsize",1); // Address size is 32-bits context->setVariableDefault("opsize",1); // Operand size is 32-bits \endcode */ } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/sleighbase.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sleighbase.hh" namespace ghidra { const uint4 SleighBase::MAX_UNIQUE_SIZE = 256; int4 SourceFileIndexer::index(const string filename){ auto it = fileToIndex.find(filename); if (fileToIndex.end() != it){ return it->second; } fileToIndex[filename] = leastUnusedIndex; indexToFile[leastUnusedIndex] = filename; return leastUnusedIndex++; } int4 SourceFileIndexer::getIndex(string filename){ return fileToIndex[filename]; } string SourceFileIndexer::getFilename(int4 index){ return indexToFile[index]; } void SourceFileIndexer::decode(Decoder &decoder) { uint4 el = decoder.openElement(sla::ELEM_SOURCEFILES); while(decoder.peekElement() == sla::ELEM_SOURCEFILE) { int4 subel = decoder.openElement(); string filename = decoder.readString(sla::ATTRIB_NAME); int4 index = decoder.readSignedInteger(sla::ATTRIB_INDEX); decoder.closeElement(subel); fileToIndex[filename] = index; indexToFile[index] = filename; } decoder.closeElement(el); } void SourceFileIndexer::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_SOURCEFILES); for (int4 i = 0; i < leastUnusedIndex; ++i){ encoder.openElement(sla::ELEM_SOURCEFILE); encoder.writeString(sla::ATTRIB_NAME, indexToFile.at(i)); encoder.writeSignedInteger(sla::ATTRIB_INDEX, i); encoder.closeElement(sla::ELEM_SOURCEFILE); } encoder.closeElement(sla::ELEM_SOURCEFILES); } SleighBase::SleighBase(void) { root = (SubtableSymbol *)0; maxdelayslotbytes = 0; unique_allocatemask = 0; numSections = 0; } /// Assuming the symbol table is populated, iterate through the table collecting /// registers (for the map), user-op names, and context fields. void SleighBase::buildXrefs(vector &errorPairs) { SymbolScope *glb = symtab.getGlobalScope(); SymbolTree::const_iterator iter; SleighSymbol *sym; ostringstream s; for(iter=glb->begin();iter!=glb->end();++iter) { sym = *iter; if (sym->getType() == SleighSymbol::varnode_symbol) { pair ins(((VarnodeSymbol *)sym)->getFixedVarnode(),sym->getName()); pair::iterator,bool> res = varnode_xref.insert(ins); if (!res.second) { errorPairs.push_back(sym->getName()); errorPairs.push_back((*(res.first)).second); } } else if (sym->getType() == SleighSymbol::userop_symbol) { int4 index = ((UserOpSymbol *)sym)->getIndex(); while(userop.size() <= index) userop.push_back(""); userop[index] = sym->getName(); } else if (sym->getType() == SleighSymbol::context_symbol) { ContextSymbol *csym = (ContextSymbol *)sym; ContextField *field = (ContextField *)csym->getPatternValue(); int4 startbit = field->getStartBit(); int4 endbit = field->getEndBit(); registerContext(csym->getName(),startbit,endbit); } } } /// If \b this SleighBase is being reused with a new program, the context /// variables need to be registered with the new program's database void SleighBase::reregisterContext(void) { SymbolScope *glb = symtab.getGlobalScope(); SymbolTree::const_iterator iter; SleighSymbol *sym; for(iter=glb->begin();iter!=glb->end();++iter) { sym = *iter; if (sym->getType() == SleighSymbol::context_symbol) { ContextSymbol *csym = (ContextSymbol *)sym; ContextField *field = (ContextField *)csym->getPatternValue(); int4 startbit = field->getStartBit(); int4 endbit = field->getEndBit(); registerContext(csym->getName(),startbit,endbit); } } } const VarnodeData &SleighBase::getRegister(const string &nm) const { VarnodeSymbol *sym = (VarnodeSymbol *)findSymbol(nm); if (sym == (VarnodeSymbol *)0) throw SleighError("Unknown register name: "+nm); if (sym->getType() != SleighSymbol::varnode_symbol) throw SleighError("Symbol is not a register: "+nm); return sym->getFixedVarnode(); } string SleighBase::getRegisterName(AddrSpace *base,uintb off,int4 size) const { VarnodeData sym; sym.space = base; sym.offset = off; sym.size = size; map::const_iterator iter = varnode_xref.upper_bound(sym); // First point greater than offset if (iter == varnode_xref.begin()) return ""; iter--; const VarnodeData &point((*iter).first); if (point.space != base) return ""; uintb offbase = point.offset; if (point.offset+point.size >= off+size) return (*iter).second; while(iter != varnode_xref.begin()) { --iter; const VarnodeData &point((*iter).first); if ((point.space != base)||(point.offset != offbase)) return ""; if (point.offset+point.size >= off+size) return (*iter).second; } return ""; } string SleighBase::getExactRegisterName(AddrSpace *base,uintb off,int4 size) const { VarnodeData sym; sym.space = base; sym.offset = off; sym.size = size; map::const_iterator iter = varnode_xref.find(sym); if (iter == varnode_xref.end()) return ""; return (*iter).second; } void SleighBase::getAllRegisters(map ®list) const { reglist = varnode_xref; } void SleighBase::getUserOpNames(vector &res) const { res = userop; // Return list of all language defined user ops (with index) } /// Write a tag fully describing the details of the space. /// \param encoder is the stream being written /// \param spc is the given address space void SleighBase::encodeSlaSpace(Encoder &encoder,AddrSpace *spc) const { if (spc->getType() == IPTR_INTERNAL) encoder.openElement(sla::ELEM_SPACE_UNIQUE); else if (spc->isOtherSpace()) encoder.openElement(sla::ELEM_SPACE_OTHER); else encoder.openElement(sla::ELEM_SPACE); encoder.writeString(sla::ATTRIB_NAME,spc->getName()); encoder.writeSignedInteger(sla::ATTRIB_INDEX, spc->getIndex()); encoder.writeBool(sla::ATTRIB_BIGENDIAN, isBigEndian()); encoder.writeSignedInteger(sla::ATTRIB_DELAY, spc->getDelay()); // if (spc->getDelay() != spc->getDeadcodeDelay()) // encoder.writeSignedInteger(sla::ATTRIB_DEADCODEDELAY, spc->getDeadcodeDelay()); encoder.writeSignedInteger(sla::ATTRIB_SIZE, spc->getAddrSize()); if (spc->getWordSize() > 1) encoder.writeSignedInteger(sla::ATTRIB_WORDSIZE, spc->getWordSize()); encoder.writeBool(sla::ATTRIB_PHYSICAL, spc->hasPhysical()); if (spc->getType() == IPTR_INTERNAL) encoder.closeElement(sla::ELEM_SPACE_UNIQUE); else if (spc->isOtherSpace()) encoder.closeElement(sla::ELEM_SPACE_OTHER); else encoder.closeElement(sla::ELEM_SPACE); } /// This does the bulk of the work of creating a .sla file /// \param encoder is the stream encoder void SleighBase::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_SLEIGH); encoder.writeSignedInteger(sla::ATTRIB_VERSION, sla::FORMAT_VERSION); encoder.writeBool(sla::ATTRIB_BIGENDIAN, isBigEndian()); encoder.writeSignedInteger(sla::ATTRIB_ALIGN, alignment); encoder.writeUnsignedInteger(sla::ATTRIB_UNIQBASE, getUniqueBase()); if (maxdelayslotbytes > 0) encoder.writeUnsignedInteger(sla::ATTRIB_MAXDELAY, maxdelayslotbytes); if (unique_allocatemask != 0) encoder.writeUnsignedInteger(sla::ATTRIB_UNIQMASK, unique_allocatemask); if (numSections != 0) encoder.writeUnsignedInteger(sla::ATTRIB_NUMSECTIONS, numSections); indexer.encode(encoder); encoder.openElement(sla::ELEM_SPACES); encoder.writeString(sla::ATTRIB_DEFAULTSPACE, getDefaultCodeSpace()->getName()); for(int4 i=0;igetType()==IPTR_CONSTANT) || (spc->getType()==IPTR_FSPEC)|| (spc->getType()==IPTR_IOP)|| (spc->getType()==IPTR_JOIN)) continue; encodeSlaSpace(encoder,spc); } encoder.closeElement(sla::ELEM_SPACES); symtab.encode(encoder); encoder.closeElement(sla::ELEM_SLEIGH); } /// This is identical to the functionality of decodeSpace, but the AddrSpace information is stored /// in the .sla file format. /// \param decoder is the stream decoder /// \param trans is the translator object to be associated with the new space /// \return a pointer to the initialized AddrSpace AddrSpace *SleighBase::decodeSlaSpace(Decoder &decoder,const Translate *trans) { uint4 elemId = decoder.openElement(); AddrSpace *res; int4 index = 0; int4 addressSize = 0; int4 delay = -1; int4 deadcodedelay = -1; string name; int4 wordsize = 1; bool bigEnd = false; uint4 flags = 0; for (;;) { uint4 attribId = decoder.getNextAttributeId(); if (attribId == 0) break; if (attribId == sla::ATTRIB_NAME) { name = decoder.readString(); } if (attribId == sla::ATTRIB_INDEX) index = decoder.readSignedInteger(); else if (attribId == sla::ATTRIB_SIZE) addressSize = decoder.readSignedInteger(); else if (attribId == sla::ATTRIB_WORDSIZE) wordsize = decoder.readSignedInteger(); else if (attribId == sla::ATTRIB_BIGENDIAN) { bigEnd = decoder.readBool(); } else if (attribId == sla::ATTRIB_DELAY) delay = decoder.readSignedInteger(); else if (attribId == sla::ATTRIB_PHYSICAL) { if (decoder.readBool()) flags |= AddrSpace::hasphysical; } } decoder.closeElement(elemId); if (deadcodedelay == -1) deadcodedelay = delay; // If deadcodedelay attribute not present, set it to delay if (index == 0) throw LowlevelError("Expecting index attribute"); if (elemId == sla::ELEM_SPACE_UNIQUE) res = new UniqueSpace(this,trans,index,flags); else if (elemId == sla::ELEM_SPACE_OTHER) res = new OtherSpace(this,trans,index); else { if (addressSize == 0 || delay == -1 || name.size() == 0) throw LowlevelError("Expecting size/delay/name attributes"); res = new AddrSpace(this,trans,IPTR_PROCESSOR,name,bigEnd,addressSize,wordsize,index,flags,delay,deadcodedelay); } return res; } /// This is identical in functionality to decodeSpaces but the AddrSpace information /// is stored in the .sla file format. /// \param decoder is the stream decoder /// \param trans is the processor translator to be associated with the spaces void SleighBase::decodeSlaSpaces(Decoder &decoder,const Translate *trans) { // The first space should always be the constant space insertSpace(new ConstantSpace(this,trans)); uint4 elemId = decoder.openElement(sla::ELEM_SPACES); string defname = decoder.readString(sla::ATTRIB_DEFAULTSPACE); while(decoder.peekElement() != 0) { AddrSpace *spc = decodeSlaSpace(decoder,trans); insertSpace(spc); } decoder.closeElement(elemId); AddrSpace *spc = getSpaceByName(defname); if (spc == (AddrSpace *)0) throw LowlevelError("Bad 'defaultspace' attribute: "+defname); setDefaultCodeSpace(spc->getIndex()); } /// This parses the main \ tag (from a .sla file), which includes the description /// of address spaces and the symbol table, with its associated decoding tables /// \param decoder is the stream to decode void SleighBase::decode(Decoder &decoder) { maxdelayslotbytes = 0; unique_allocatemask = 0; numSections = 0; int4 version = 0; uint4 el = decoder.openElement(sla::ELEM_SLEIGH); uint4 attrib = decoder.getNextAttributeId(); while(attrib != 0) { if (attrib == sla::ATTRIB_BIGENDIAN) setBigEndian(decoder.readBool()); else if (attrib == sla::ATTRIB_ALIGN) alignment = decoder.readSignedInteger(); else if (attrib == sla::ATTRIB_UNIQBASE) setUniqueBase(decoder.readUnsignedInteger()); else if (attrib == sla::ATTRIB_MAXDELAY) maxdelayslotbytes = decoder.readUnsignedInteger(); else if (attrib == sla::ATTRIB_UNIQMASK) unique_allocatemask = decoder.readUnsignedInteger(); else if (attrib == sla::ATTRIB_NUMSECTIONS) numSections = decoder.readUnsignedInteger(); else if (attrib == sla::ATTRIB_VERSION) version = decoder.readSignedInteger(); attrib = decoder.getNextAttributeId(); } if (version != sla::FORMAT_VERSION) throw LowlevelError(".sla file has wrong format"); indexer.decode(decoder); decodeSlaSpaces(decoder,this); symtab.decode(decoder,this); decoder.closeElement(el); root = (SubtableSymbol *)symtab.getGlobalScope()->findSymbol("instruction"); vector errorPairs; buildXrefs(errorPairs); if (!errorPairs.empty()) throw SleighError("Duplicate register pairs"); } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/sleighbase.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file sleighbase.hh /// \brief Base class for applications that process SLEIGH format specifications #ifndef __SLEIGHBASE_HH__ #define __SLEIGHBASE_HH__ #include "translate.hh" #include "slaformat.hh" #include "slghsymbol.hh" namespace ghidra { /// \brief class for recording source file information for SLEIGH constructors. /// /// A SLEIGH specification may contain many source files. This class is /// used to associate each constructor in a SLEIGH language to the source /// file where it is defined. This information is useful when debugging /// SLEIGH specifications. Sourcefiles are assigned a numeric index and /// the mapping from indices to filenames is written to the generated .sla /// file. For each constructor, the data written to the .sla file includes /// the source file index. class SourceFileIndexer { public: SourceFileIndexer() {leastUnusedIndex = 0;} ~SourceFileIndexer(void) { } ///Returns the index of the file. If the file is not in the index it is added. int4 index(const string filename); int4 getIndex(const string); ///< get the index of a file. Error if the file is not in the index. string getFilename(int4); ///< get the filename corresponding to an index void decode(Decoder &decoder); ///< decode a stored index mapping from a stream void encode(Encoder &encoder) const; ///< Encode the index mapping to stream private: int4 leastUnusedIndex; ///< one-up count for assigning indices to files map indexToFile; ///< map from indices to files map fileToIndex; ///< map from files to indices }; /// \brief Common core of classes that read or write SLEIGH specification files natively. /// /// This class represents what's in common across the SLEIGH infrastructure between: /// - Reading the various SLEIGH specification files /// - Building and writing out SLEIGH specification files class SleighBase : public Translate { vector userop; ///< Names of user-define p-code ops for \b this Translate object map varnode_xref; ///< A map from Varnodes in the \e register space to register names protected: SubtableSymbol *root; ///< The root SLEIGH decoding symbol SymbolTable symtab; ///< The SLEIGH symbol table uint4 maxdelayslotbytes; ///< Maximum number of bytes in a delay-slot directive uint4 unique_allocatemask; ///< Bits that are guaranteed to be zero in the unique allocation scheme uint4 numSections; ///< Number of \e named sections SourceFileIndexer indexer; ///< source file index used when generating SLEIGH constructor debug info void buildXrefs(vector &errorPairs); ///< Build register map. Collect user-ops and context-fields. void reregisterContext(void); ///< Reregister context fields for a new executable AddrSpace *decodeSlaSpace(Decoder &decoder,const Translate *trans); ///< Add a space parsed from a .sla file void decodeSlaSpaces(Decoder &decoder,const Translate *trans); ///< Restore address spaces from a .sla file void decode(Decoder &decoder); /// Decode a SELIGH specification from a stream public: static const uint4 MAX_UNIQUE_SIZE; ///< Maximum size of a varnode in the unique space (should match value in SleighBase.java) SleighBase(void); ///< Construct an uninitialized translator bool isInitialized(void) const { return (root != (SubtableSymbol *)0); } ///< Return \b true if \b this is initialized virtual ~SleighBase(void) {} ///< Destructor virtual const VarnodeData &getRegister(const string &nm) const; virtual string getRegisterName(AddrSpace *base,uintb off,int4 size) const; virtual string getExactRegisterName(AddrSpace *base,uintb off,int4 size) const; virtual void getAllRegisters(map ®list) const; virtual void getUserOpNames(vector &res) const; SleighSymbol *findSymbol(const string &nm) const { return symtab.findSymbol(nm); } ///< Find a specific SLEIGH symbol by name in the current scope SleighSymbol *findSymbol(uintm id) const { return symtab.findSymbol(id); } ///< Find a specific SLEIGH symbol by id SleighSymbol *findGlobalSymbol(const string &nm) const { return symtab.findGlobalSymbol(nm); } ///< Find a specific global SLEIGH symbol by name void encodeSlaSpace(Encoder &encoder,AddrSpace *spc) const; ///< Write the details of given space in .sla format void encode(Encoder &encoder) const; ///< Write out the SLEIGH specification as a \ tag. }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/slgh_compile.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "slgh_compile.hh" #include "filemanage.hh" #include extern FILE *sleighin; // Global pointer to file for lexer extern int sleighlex_destroy(void); namespace ghidra { SleighCompile *slgh; // Global pointer to sleigh object for use with parser #ifdef YYDEBUG extern int sleighdebug; // Global debugging state for parser #endif extern int sleighparse(void); /// This must be constructed with the \e main section of p-code, which can contain no p-code /// \param rtl is the \e main section of p-code /// \param scope is the symbol scope associated with the section SectionVector::SectionVector(ConstructTpl *rtl,SymbolScope *scope) { nextindex = -1; main.section = rtl; main.scope = scope; } /// Associate the new section with \b nextindex, established prior to parsing /// \param rtl is the \e named section of p-code /// \param scope is the associated symbol scope void SectionVector::append(ConstructTpl *rtl,SymbolScope *scope) { while(named.size() <= nextindex) named.emplace_back(); named[ nextindex ] = RtlPair(rtl,scope); } /// Construct with the default qualities for an address space, which /// can then be overridden with further parsing. /// \param nm is the name of the address space SpaceQuality::SpaceQuality(const string &nm) { name = nm; type = ramtype; size = 0; wordsize = 1; isdefault = false; } /// Establish default qualities for the field, which can then be overridden /// by further parsing. A name and bit range must always be explicitly given. /// \param nm is the parsed name for the field /// \param l is the parsed lower bound of the bit range /// \param h is the parse upper bound of the bit range FieldQuality::FieldQuality(string *nm,uintb *l,uintb *h) { name = *nm; low = *l; high = *h; signext = false; flow = true; hex = true; delete nm; delete l; delete h; } /// Establish each component of the \b with block header /// \param s is the subtable (or null) /// \param pq is the pattern to prepend (or null) /// \param cvec is the set of context changes (or null) void WithBlock::set(SubtableSymbol *s, PatternEquation *pq, vector *cvec) { ss = s; pateq = pq; if (pateq != (PatternEquation *)0) pateq->layClaim(); if (cvec != (vector *)0) { for(int4 i=0;isize();++i) contvec.push_back((*cvec)[i]); // Lay claim to -cvec-s pointers, we don't clone delete cvec; } } WithBlock::~WithBlock(void) { if (pateq != (PatternEquation *)0) PatternEquation::release(pateq); for(int4 i=0;i &stack, PatternEquation *pateq) { list::const_iterator iter; for(iter=stack.begin();iter!=stack.end();++iter) { PatternEquation *witheq = (*iter).pateq; if (witheq != (PatternEquation *)0) pateq = new EquationAnd(witheq, pateq); } return pateq; } /// \brief Build a complete array of context changes from any surrounding \b with blocks /// /// Given a list of ContextChanges parsed locally from a Constructor and the stack of /// surrounding \b with blocks, make a new list of ContextChanges, prepending everything from /// the stack to the local vector. Return the new list and delete the old. /// \param stack is the current \b with block stack /// \param contvec is the local list of ContextChanges (or null) /// \return the new list of ContextChanges vector *WithBlock::collectAndPrependContext(const list &stack, vector *contvec) { vector *res = (vector *)0; list::const_iterator iter; for(iter=stack.begin();iter!=stack.end();++iter) { const vector &changelist( (*iter).contvec ); if (changelist.size() == 0) continue; if (res == (vector *)0) res = new vector(); for(int4 i=0;ipush_back(changelist[i]->clone()); } } if (contvec != (vector *)0) { if (contvec->size() != 0) { if (res == (vector *)0) res = new vector(); for(int4 i=0;isize();++i) res->push_back((*contvec)[i]); // lay claim to contvecs pointer } delete contvec; } return res; } /// \brief Get the active subtable from the stack of currently active \b with blocks /// /// Find the subtable associated with the innermost \b with block and return it. /// \param stack is the stack of currently active \b with blocks /// \return the innermost subtable (or null) SubtableSymbol *WithBlock::getCurrentSubtable(const list &stack) { list::const_iterator iter; for(iter=stack.begin();iter!=stack.end();++iter) { if ((*iter).ss != (SubtableSymbol *)0) return (*iter).ss; } return (SubtableSymbol *)0; } void ConsistencyChecker::OptimizeRecord::copyFromExcludingSize(ConsistencyChecker::OptimizeRecord &that) { this->writeop = that.writeop; this->readop = that.readop; this->inslot = that.inslot; this->writecount = that.writecount; this->readcount = that.readcount; this->writesection = that.writesection; this->readsection = that.readsection; this->opttype = that.opttype; } void ConsistencyChecker::OptimizeRecord::update(int4 opIdx, int4 slotIdx, int4 secNum) { if (slotIdx >= 0) { updateRead(opIdx, slotIdx, secNum); } else { updateWrite(opIdx, secNum); } } void ConsistencyChecker::OptimizeRecord::updateRead(int4 i, int4 inslot, int4 secNum) { this->readop = i; this->readcount++; this->inslot = inslot; this->readsection = secNum; } void ConsistencyChecker::OptimizeRecord::updateWrite(int4 i, int4 secNum) { this->writeop = i; this->writecount++; this->writesection = secNum; } void ConsistencyChecker::OptimizeRecord::updateExport() { this->writeop = 0; this->readop = 0; this->writecount = 2; this->readcount = 2; this->readsection = -2; this->writesection = -2; } void ConsistencyChecker::OptimizeRecord::updateCombine(ConsistencyChecker::OptimizeRecord &that) { if (that.writecount != 0) { this->writeop = that.writeop; this->writesection = that.writesection; } if (that.readcount != 0) { this->readop = that.readop; this->inslot = that.inslot; this->readsection = that.readsection; } this->writecount += that.writecount; this->readcount += that.readcount; // opttype is not relevant here } /// \brief Construct the consistency checker and optimizer /// /// \param sleigh is the parsed SLEIGH spec /// \param rt is the root subtable of the SLEIGH spec /// \param un is \b true to request "Unnecessary extension" warnings /// \param warndead is \b true to request warnings for written but not read temporaries ConsistencyChecker::ConsistencyChecker(SleighCompile *sleigh,SubtableSymbol *rt,bool un,bool warndead) { compiler = sleigh; root_symbol = rt; unnecessarypcode = 0; readnowrite = 0; writenoread = 0; printextwarning = un; printdeadwarning = warndead; } /// \brief Recover a specific value for the size associated with a Varnode template /// /// This method is passed a ConstTpl that is assumed to be the \e size attribute of /// a VarnodeTpl (as returned by getSize()). This method recovers the specific /// integer value for this constant template or throws an exception. /// The integer value can either be immediately available from parsing, derived /// from a Constructor operand symbol whose size is known, or taken from /// the calculated export size of a subtable symbol. /// \param sizeconst is the Varnode size template /// \param ct is the Constructor containing the Varnode /// \return the integer value int4 ConsistencyChecker::recoverSize(const ConstTpl &sizeconst,Constructor *ct) { int4 size,handindex; OperandSymbol *opsym; SubtableSymbol *tabsym; map::const_iterator iter; switch(sizeconst.getType()) { case ConstTpl::real: size = (int4) sizeconst.getReal(); break; case ConstTpl::handle: handindex = sizeconst.getHandleIndex(); opsym = ct->getOperand(handindex); size = opsym->getSize(); if (size == -1) { tabsym = dynamic_cast(opsym->getDefiningSymbol()); if (tabsym == (SubtableSymbol *)0) throw SleighError("Could not recover varnode template size"); iter = sizemap.find(tabsym); if (iter == sizemap.end()) throw SleighError("Subtable out of order"); size = (*iter).second; } break; default: throw SleighError("Bad constant type as varnode template size"); } return size; } /// \brief Convert an unnecessary CPUI_INT_ZEXT and CPUI_INT_SEXT into a COPY /// /// SLEIGH allows \b zext and \b sext notation even if the input and output /// Varnodes are ultimately the same size. In this case, a warning may be /// issued and the operator is converted to a CPUI_COPY. /// \param op is the given CPUI_INT_ZEXT or CPUI_INT_SEXT operator to check /// \param ct is the Constructor containing the operator void ConsistencyChecker::dealWithUnnecessaryExt(OpTpl *op,Constructor *ct) { if (printextwarning) { ostringstream msg; msg << "Unnecessary "; printOpName(msg,op); compiler->reportWarning(compiler->getLocation(ct), msg.str()); } op->setOpcode(CPUI_COPY); // Equivalent to copy unnecessarypcode += 1; } /// \brief Convert an unnecessary CPUI_SUBPIECE into a COPY /// /// SLEIGH allows truncation notation even if the input and output Varnodes are /// ultimately the same size. In this case, a warning may be issued and the operator /// is converted to a CPUI_COPY. /// \param op is the given CPUI_SUBPIECE operator /// \param ct is the containing Constructor void ConsistencyChecker::dealWithUnnecessaryTrunc(OpTpl *op,Constructor *ct) { if (printextwarning) { ostringstream msg; msg << "Unnecessary "; printOpName(msg,op); compiler->reportWarning(compiler->getLocation(ct), msg.str()); } op->setOpcode(CPUI_COPY); // Equivalent to copy op->removeInput(1); unnecessarypcode += 1; } /// \brief Check for misuse of the given operator and print a warning /// /// This method currently checks for: /// - Unsigned less-than comparison with zero /// /// \param op is the given operator /// \param ct is the Constructor owning the operator /// \return \b false if the operator is fatally misused bool ConsistencyChecker::checkOpMisuse(OpTpl *op,Constructor *ct) { switch(op->getOpcode()) { case CPUI_INT_LESS: { VarnodeTpl *vn = op->getIn(1); if (vn->getSpace().isConstSpace() && vn->getOffset().isZero()) { compiler->reportWarning(compiler->getLocation(ct), "Unsigned comparison with zero is always false"); } } break; default: break; } return true; } /// \brief Make sure the given operator meets size restrictions /// /// Many SLEIGH operators require that inputs and/or outputs are the /// same size, or they have other specific size requirement. /// Print an error and return \b false for any violations. /// \param op is the given p-code operator /// \param ct is the Constructor owning the operator /// \return \b true if there are no size restriction violations bool ConsistencyChecker::sizeRestriction(OpTpl *op,Constructor *ct) { // Make sure op template meets size restrictions // Return false and any info about mismatched sizes int4 vnout,vn0,vn1; AddrSpace *spc; switch(op->getOpcode()) { case CPUI_COPY: // Instructions where all inputs and output are same size case CPUI_INT_2COMP: case CPUI_INT_NEGATE: case CPUI_FLOAT_NEG: case CPUI_FLOAT_ABS: case CPUI_FLOAT_SQRT: case CPUI_FLOAT_CEIL: case CPUI_FLOAT_FLOOR: case CPUI_FLOAT_ROUND: vnout = recoverSize(op->getOut()->getSize(),ct); if (vnout == -1) { printOpError(op,ct,-1,-1,"Using subtable with exports in expression"); return false; } vn0 = recoverSize(op->getIn(0)->getSize(),ct); if (vn0 == -1) { printOpError(op,ct,0,0,"Using subtable with exports in expression"); return false; } if (vnout == vn0) return true; if ((vnout==0)||(vn0==0)) return true; printOpError(op,ct,-1,0,"Input and output sizes must match"); return false; case CPUI_INT_ADD: case CPUI_INT_SUB: case CPUI_INT_XOR: case CPUI_INT_AND: case CPUI_INT_OR: case CPUI_INT_MULT: case CPUI_INT_DIV: case CPUI_INT_SDIV: case CPUI_INT_REM: case CPUI_INT_SREM: case CPUI_FLOAT_ADD: case CPUI_FLOAT_DIV: case CPUI_FLOAT_MULT: case CPUI_FLOAT_SUB: vnout = recoverSize(op->getOut()->getSize(),ct); if (vnout == -1) { printOpError(op,ct,-1,-1,"Using subtable with exports in expression"); return false; } vn0 = recoverSize(op->getIn(0)->getSize(),ct); if (vn0 == -1) { printOpError(op,ct,0,0,"Using subtable with exports in expression"); return false; } vn1 = recoverSize(op->getIn(1)->getSize(),ct); if (vn1 == -1) { printOpError(op,ct,1,1,"Using subtable with exports in expression"); return false; } if ((vnout!=0)&&(vn0!=0)&&(vnout!=vn0)) { printOpError(op,ct,-1,0,"The output and all input sizes must match"); return false; } if ((vnout!=0)&&(vn1!=0)&&(vnout!=vn1)) { printOpError(op,ct,-1,1,"The output and all input sizes must match"); return false; } if ((vn0!=0)&&(vn1!=0)&&(vn0!=vn1)) { printOpError(op,ct,0,1,"The output and all input sizes must match"); return false; } return true; case CPUI_FLOAT_NAN: vnout = recoverSize(op->getOut()->getSize(),ct); if (vnout == -1) { printOpError(op,ct,-1,-1,"Using subtable with exports in expression"); return false; } if (vnout != 1) { printOpError(op,ct,-1,-1,"Output must be a boolean (size 1)"); return false; } break; case CPUI_INT_EQUAL: // Instructions with bool output, all inputs equal size case CPUI_INT_NOTEQUAL: case CPUI_INT_SLESS: case CPUI_INT_SLESSEQUAL: case CPUI_INT_LESS: case CPUI_INT_LESSEQUAL: case CPUI_INT_CARRY: case CPUI_INT_SCARRY: case CPUI_INT_SBORROW: case CPUI_FLOAT_EQUAL: case CPUI_FLOAT_NOTEQUAL: case CPUI_FLOAT_LESS: case CPUI_FLOAT_LESSEQUAL: vnout = recoverSize(op->getOut()->getSize(),ct); if (vnout == -1) { printOpError(op,ct,-1,-1,"Using subtable with exports in expression"); return false; } if (vnout != 1) { printOpError(op,ct,-1,-1,"Output must be a boolean (size 1)"); return false; } vn0 = recoverSize(op->getIn(0)->getSize(),ct); if (vn0 == -1) { printOpError(op,ct,0,0,"Using subtable with exports in expression"); return false; } vn1 = recoverSize(op->getIn(1)->getSize(),ct); if (vn1 == -1) { printOpError(op,ct,1,1,"Using subtable with exports in expression"); return false; } if ((vn0==0)||(vn1==0)) return true; if (vn0 != vn1) { printOpError(op,ct,0,1,"Inputs must be the same size"); return false; } return true; case CPUI_BOOL_XOR: case CPUI_BOOL_AND: case CPUI_BOOL_OR: vnout = recoverSize(op->getOut()->getSize(),ct); if (vnout == -1) { printOpError(op,ct,-1,-1,"Using subtable with exports in expression"); return false; } if (vnout != 1) { printOpError(op,ct,-1,-1,"Output must be a boolean (size 1)"); return false; } vn0 = recoverSize(op->getIn(0)->getSize(),ct); if (vn0 == -1) { printOpError(op,ct,0,0,"Using subtable with exports in expression"); return false; } if (vn0 != 1) { printOpError(op,ct,0,0,"Input must be a boolean (size 1)"); return false; } return true; case CPUI_BOOL_NEGATE: vnout = recoverSize(op->getOut()->getSize(),ct); if (vnout == -1) { printOpError(op,ct,-1,-1,"Using subtable with exports in expression"); return false; } if (vnout != 1) { printOpError(op,ct,-1,-1,"Output must be a boolean (size 1)"); return false; } vn0 = recoverSize(op->getIn(0)->getSize(),ct); if (vn0 == -1) { printOpError(op,ct,0,0,"Using subtable with exports in expression"); return false; } if (vn0 != 1) { printOpError(op,ct,0,0,"Input must be a boolean (size 1)"); return false; } return true; // The shift amount does not necessarily have to be the same size // But the output and first parameter must be same size case CPUI_INT_LEFT: case CPUI_INT_RIGHT: case CPUI_INT_SRIGHT: vnout = recoverSize(op->getOut()->getSize(),ct); if (vnout == -1) { printOpError(op,ct,-1,-1,"Using subtable with exports in expression"); return false; } vn0 = recoverSize(op->getIn(0)->getSize(),ct); if (vn0 == -1) { printOpError(op,ct,0,0,"Using subtable with exports in expression"); return false; } if ((vnout==0)||(vn0==0)) return true; if (vnout != vn0) { printOpError(op,ct,-1,0,"Output and first input must be the same size"); return false; } return true; case CPUI_INT_ZEXT: case CPUI_INT_SEXT: vnout = recoverSize(op->getOut()->getSize(),ct); if (vnout == -1) { printOpError(op,ct,-1,-1,"Using subtable with exports in expression"); return false; } vn0 = recoverSize(op->getIn(0)->getSize(),ct); if (vn0 == -1) { printOpError(op,ct,0,0,"Using subtable with exports in expression"); return false; } if ((vnout==0)||(vn0==0)) return true; if (vnout == vn0) { dealWithUnnecessaryExt(op,ct); return true; } else if (vnout < vn0) { printOpError(op,ct,-1,0,"Output size must be strictly bigger than input size"); return false; } return true; case CPUI_CBRANCH: vn1 = recoverSize(op->getIn(1)->getSize(),ct); if (vn1 == -1) { printOpError(op,ct,1,1,"Using subtable with exports in expression"); return false; } if (vn1 != 1) { printOpError(op,ct,1,1,"Input must be a boolean (size 1)"); return false; } return true; case CPUI_LOAD: case CPUI_STORE: if (op->getIn(0)->getOffset().getType() != ConstTpl::spaceid) return true; spc = op->getIn(0)->getOffset().getSpace(); vn1 = recoverSize(op->getIn(1)->getSize(),ct); if (vn1 == -1) { printOpError(op,ct,1,1,"Using subtable with exports in expression"); return false; } if ((vn1!=0)&&(vn1 != spc->getAddrSize())) { printOpError(op,ct,1,1,"Pointer size must match size of space"); return false; } return true; case CPUI_SUBPIECE: vnout = recoverSize(op->getOut()->getSize(),ct); if (vnout == -1) { printOpError(op,ct,-1,-1,"Using subtable with exports in expression"); return false; } vn0 = recoverSize(op->getIn(0)->getSize(),ct); if (vn0 == -1) { printOpError(op,ct,0,0,"Using subtable with exports in expression"); return false; } vn1 = op->getIn(1)->getOffset().getReal(); if ((vnout==0)||(vn0==0)) return true; if ((vnout==vn0)&&(vn1==0)) { // No actual truncation is occuring dealWithUnnecessaryTrunc(op,ct); return true; } else if (vnout>=vn0) { printOpError(op,ct,-1,0,"Output must be strictly smaller than input"); return false; } if (vnout>vn0-vn1) { printOpError(op,ct,-1,0,"Too much truncation"); return false; } return true; default: break; } return true; } /// \brief Print the name of a p-code operator (for warning and error messages) /// /// Print the full name of the operator with its syntax token in parentheses. /// \param s is the output stream to write to /// \param op is the operator to print void ConsistencyChecker::printOpName(ostream &s,OpTpl *op) { switch(op->getOpcode()) { case CPUI_COPY: s << "Copy(=)"; break; case CPUI_LOAD: s << "Load(*)"; break; case CPUI_STORE: s << "Store(*)"; break; case CPUI_BRANCH: s << "Branch(goto)"; break; case CPUI_CBRANCH: s << "Conditional branch(if)"; break; case CPUI_BRANCHIND: s << "Indirect branch(goto[])"; break; case CPUI_CALL: s << "Call"; break; case CPUI_CALLIND: s << "Indirect Call"; break; case CPUI_CALLOTHER: s << "User defined"; break; case CPUI_RETURN: s << "Return"; break; case CPUI_INT_EQUAL: s << "Equality(==)"; break; case CPUI_INT_NOTEQUAL: s << "Notequal(!=)"; break; case CPUI_INT_SLESS: s << "Signed less than(s<)"; break; case CPUI_INT_SLESSEQUAL: s << "Signed less than or equal(s<=)"; break; case CPUI_INT_LESS: s << "Less than(<)"; break; case CPUI_INT_LESSEQUAL: s << "Less than or equal(<=)"; break; case CPUI_INT_ZEXT: s << "Zero extension(zext)"; break; case CPUI_INT_SEXT: s << "Signed extension(sext)"; break; case CPUI_INT_ADD: s << "Addition(+)"; break; case CPUI_INT_SUB: s << "Subtraction(-)"; break; case CPUI_INT_CARRY: s << "Carry"; break; case CPUI_INT_SCARRY: s << "Signed carry"; break; case CPUI_INT_SBORROW: s << "Signed borrow"; break; case CPUI_INT_2COMP: s << "Twos complement(-)"; break; case CPUI_INT_NEGATE: s << "Negate(~)"; break; case CPUI_INT_XOR: s << "Exclusive or(^)"; break; case CPUI_INT_AND: s << "And(&)"; break; case CPUI_INT_OR: s << "Or(|)"; break; case CPUI_INT_LEFT: s << "Left shift(<<)"; break; case CPUI_INT_RIGHT: s << "Right shift(>>)"; break; case CPUI_INT_SRIGHT: s << "Signed right shift(s>>)"; break; case CPUI_INT_MULT: s << "Multiplication(*)"; break; case CPUI_INT_DIV: s << "Division(/)"; break; case CPUI_INT_SDIV: s << "Signed division(s/)"; break; case CPUI_INT_REM: s << "Remainder(%)"; break; case CPUI_INT_SREM: s << "Signed remainder(s%)"; break; case CPUI_BOOL_NEGATE: s << "Boolean negate(!)"; break; case CPUI_BOOL_XOR: s << "Boolean xor(^^)"; break; case CPUI_BOOL_AND: s << "Boolean and(&&)"; break; case CPUI_BOOL_OR: s << "Boolean or(||)"; break; case CPUI_FLOAT_EQUAL: s << "Float equal(f==)"; break; case CPUI_FLOAT_NOTEQUAL: s << "Float notequal(f!=)"; break; case CPUI_FLOAT_LESS: s << "Float less than(f<)"; break; case CPUI_FLOAT_LESSEQUAL: s << "Float less than or equal(f<=)"; break; case CPUI_FLOAT_NAN: s << "Not a number(nan)"; break; case CPUI_FLOAT_ADD: s << "Float addition(f+)"; break; case CPUI_FLOAT_DIV: s << "Float division(f/)"; break; case CPUI_FLOAT_MULT: s << "Float multiplication(f*)"; break; case CPUI_FLOAT_SUB: s << "Float subtractions(f-)"; break; case CPUI_FLOAT_NEG: s << "Float minus(f-)"; break; case CPUI_FLOAT_ABS: s << "Absolute value(abs)"; break; case CPUI_FLOAT_SQRT: s << "Square root"; break; case CPUI_FLOAT_INT2FLOAT: s << "Integer to float conversion(int2float)"; break; case CPUI_FLOAT_FLOAT2FLOAT: s << "Float to float conversion(float2float)"; break; case CPUI_FLOAT_TRUNC: s << "Float truncation(trunc)"; break; case CPUI_FLOAT_CEIL: s << "Ceiling(ceil)"; break; case CPUI_FLOAT_FLOOR: s << "Floor"; break; case CPUI_FLOAT_ROUND: s << "Round"; break; case CPUI_MULTIEQUAL: s << "Build"; break; case CPUI_INDIRECT: s << "Delay"; break; case CPUI_SUBPIECE: s << "Truncation(:)"; break; case CPUI_SEGMENTOP: s << "Segment table(segment)"; break; case CPUI_CPOOLREF: s << "Constant Pool(cpool)"; break; case CPUI_NEW: s << "New object(newobject)"; break; case CPUI_POPCOUNT: s << "Count bits(popcount)"; break; case CPUI_LZCOUNT: s << "Count leading zero bits(lzcount)"; break; default: break; } } /// \brief Get the OperandSymbol associated with an input/output Varnode of the given p-code operator /// /// Find the Constructor operand associated with a specified Varnode, if it exists. /// The Varnode is specified by the p-code operator using it and the input \e slot index, with -1 /// indicating the operator's output Varnode. Not all Varnode's are associated with a /// Constructor operand, in which case \e null is returned. /// \param slot is the input \e slot index, or -1 for an output Varnode /// \param op is the p-code operator using the Varnode /// \param ct is the Constructor containing the p-code and operands /// \return the associated operand or null OperandSymbol *ConsistencyChecker::getOperandSymbol(int4 slot,OpTpl *op,Constructor *ct) { VarnodeTpl *vn; OperandSymbol *opsym; int4 handindex; if (slot == -1) vn = op->getOut(); else vn = op->getIn(slot); switch(vn->getSize().getType()) { case ConstTpl::handle: handindex = vn->getSize().getHandleIndex(); opsym = ct->getOperand(handindex); break; default: opsym = (OperandSymbol *)0; break; } return opsym; } /// \brief Print an error message describing a size restriction violation /// /// The given p-code operator is assumed to violate the Varnode size rules for its opcode. /// If the violation is for two Varnodes that should be the same size, each Varnode is indicated /// as an input \e slot index, where -1 indicates the operator's output Varnode. /// If the violation is for a single Varnode, its \e slot index is passed in twice. /// \param op is the given p-code operator /// \param ct is the containing Constructor /// \param err1 is the slot of the first violating Varnode /// \param err2 is the slot of the second violating Varnode (or equal to \b err1) /// \param msg is additional description that is appended to the error message void ConsistencyChecker::printOpError(OpTpl *op,Constructor *ct,int4 err1,int4 err2,const string &msg) { SubtableSymbol *sym = ct->getParent(); OperandSymbol *op1,*op2; op1 = getOperandSymbol(err1,op,ct); if (err2 != err1) op2 = getOperandSymbol(err2,op,ct); else op2 = (OperandSymbol *)0; ostringstream msgBuilder; msgBuilder << "Size restriction error in table '" << sym->getName() << "'" << endl; if ((op1 != (OperandSymbol *)0)&&(op2 != (OperandSymbol *)0)) msgBuilder << " Problem with operands '" << op1->getName() << "' and '" << op2->getName() << "'"; else if (op1 != (OperandSymbol *)0) msgBuilder << " Problem with operand 1 '" << op1->getName() << "'"; else if (op2 != (OperandSymbol *)0) msgBuilder << " Problem with operand 2 '" << op2->getName() << "'"; else msgBuilder << " Problem"; msgBuilder << " in "; printOpName(msgBuilder,op); msgBuilder << " operator" << endl << " " << msg; compiler->reportError(compiler->getLocation(ct), msgBuilder.str()); } /// \brief Check all p-code operators within a given Constructor section for misuse and size consistency /// /// Each operator within the section is checked in turn, and warning and error messages are emitted /// if necessary. The method returns \b false if there is a fatal error associated with any /// operator. /// \param ct is the Constructor to check /// \param cttpl is the specific p-code section to check /// \return \b true if there are no fatal errors in the section bool ConsistencyChecker::checkConstructorSection(Constructor *ct,ConstructTpl *cttpl) { if (cttpl == (ConstructTpl *)0) return true; // Nothing to check vector::const_iterator iter; const vector &ops(cttpl->getOpvec()); bool testresult = true; for(iter=ops.begin();iter!=ops.end();++iter) { if (!sizeRestriction(*iter,ct)) testresult = false; if (!checkOpMisuse(*iter,ct)) testresult = false; } return testresult; } /// \brief Check the given p-code operator for too large temporary registers /// /// Return \b true if the output or one of the inputs to the operator /// is in the \e unique space and larger than SleighBase::MAX_UNIQUE_SIZE /// \param op is the given operator /// \return \b true if the operator has a too large temporary parameter bool ConsistencyChecker::hasLargeTemporary(OpTpl *op) { VarnodeTpl *out = op->getOut(); if ((out != (VarnodeTpl*)0x0) && isTemporaryAndTooBig(out)) { return true; } for(int4 i = 0;i < op->numInput();++i) { VarnodeTpl *in = op->getIn(i); if (isTemporaryAndTooBig(in)) { return true; } } return false; } /// \brief Check if the given Varnode is a too large temporary register /// /// Return \b true precisely when the Varnode is in the \e unique space and /// has size larger than SleighBase::MAX_UNIQUE_SIZE /// \param vn is the given Varnode /// \return \b true if the Varnode is a too large temporary register bool ConsistencyChecker::isTemporaryAndTooBig(VarnodeTpl *vn) { return vn->getSpace().isUniqueSpace() && (vn->getSize().getReal() > SleighBase::MAX_UNIQUE_SIZE); } /// \brief Resolve the offset of the given \b truncated Varnode /// /// SLEIGH allows a Varnode to be derived from another larger Varnode using /// truncation or bit range notation. The final offset of the truncated Varnode may not /// be calculable immediately during parsing, especially if the address space is big endian /// and the size of the containing Varnode is not immediately known. /// This method recovers the final offset of the truncated Varnode now that all sizes are /// known and otherwise checks that the truncation expression is valid. /// \param ct is the Constructor containing the Varnode /// \param slot is the \e slot index of the truncated Varnode (for error messages) /// \param op is the operator using the truncated Varnode (for error messages) /// \param vn is the given truncated Varnode /// \param isbigendian is \b true if the Varnode is in a big endian address space /// \return \b true if the truncation expression was valid bool ConsistencyChecker::checkVarnodeTruncation(Constructor *ct,int4 slot, OpTpl *op,VarnodeTpl *vn,bool isbigendian) { const ConstTpl &off( vn->getOffset() ); if (off.getType() != ConstTpl::handle) return true; if (off.getSelect() != ConstTpl::v_offset_plus) return true; ConstTpl::const_type sztype = vn->getSize().getType(); if ((sztype != ConstTpl::real)&&(sztype != ConstTpl::handle)) { printOpError(op,ct,slot,slot,"Bad truncation expression"); return false; } int4 sz = recoverSize(off,ct); // Recover the size of the original operand if (sz <= 0) { printOpError(op,ct,slot,slot,"Could not recover size"); return false; } bool res = vn->adjustTruncation(sz,isbigendian); if (!res) { printOpError(op,ct,slot,slot,"Truncation operator out of bounds"); return false; } return true; } /// \brief Check and adjust truncated Varnodes in the given Constructor p-code section /// /// Run through all Varnodes looking for offset templates marked as ConstTpl::v_offset_plus, /// which indicates they were constructed using truncation notation. These truncation expressions /// are checked for validity and adjusted depending on the endianness of the address space. /// \param ct is the Constructor /// \param cttpl is the given p-code section /// \param isbigendian is set to \b true if the SLEIGH specification is big endian /// \return \b true if all truncation expressions were valid bool ConsistencyChecker::checkSectionTruncations(Constructor *ct,ConstructTpl *cttpl,bool isbigendian) { vector::const_iterator iter; const vector &ops(cttpl->getOpvec()); bool testresult = true; for(iter=ops.begin();iter!=ops.end();++iter) { OpTpl *op = *iter; VarnodeTpl *outvn = op->getOut(); if (outvn != (VarnodeTpl *)0) { if (!checkVarnodeTruncation(ct,-1,op,outvn,isbigendian)) testresult = false; } for(int4 i=0;inumInput();++i) { if (!checkVarnodeTruncation(ct,i,op,op->getIn(i),isbigendian)) testresult = false; } } return testresult; } /// \brief Check all Constructors within the given subtable for operator misuse and size consistency /// /// Each Constructor and section is checked in turn. Additionally, the size of Constructor /// exports is checked for consistency across the subtable. Constructors within one subtable must /// all export the same size Varnode if the export at all. /// \param sym is the given subtable to check /// \return \b true if there are no fatal misuse or consistency violations bool ConsistencyChecker::checkSubtable(SubtableSymbol *sym) { int4 tablesize = -1; int4 numconstruct = sym->getNumConstructors(); Constructor *ct; bool testresult = true; bool seenemptyexport = false; bool seennonemptyexport = false; for(int4 i=0;igetConstructor(i); if (!checkConstructorSection(ct,ct->getTempl())) testresult = false; int4 numsection = ct->getNumSections(); for(int4 j=0;jgetNamedTempl(j))) testresult = false; } if (ct->getTempl() == (ConstructTpl *)0) continue; // Unimplemented HandleTpl *exportres = ct->getTempl()->getResult(); if (exportres != (HandleTpl *)0) { if (seenemptyexport && (!seennonemptyexport)) { ostringstream msg; msg << "Table '" << sym->getName() << "' exports inconsistently; "; msg << "Constructor starting at line " << dec << ct->getLineno() << " is first inconsistency"; compiler->reportError(compiler->getLocation(ct), msg.str()); testresult = false; } seennonemptyexport = true; int4 exsize = recoverSize(exportres->getSize(),ct); if (tablesize == -1) tablesize = exsize; if (exsize != tablesize) { ostringstream msg; msg << "Table '" << sym->getName() << "' has inconsistent export size; "; msg << "Constructor starting at line " << dec << ct->getLineno() << " is first conflict"; compiler->reportError(compiler->getLocation(ct), msg.str()); testresult = false; } } else { if (seennonemptyexport && (!seenemptyexport)) { ostringstream msg; msg << "Table '" << sym->getName() << "' exports inconsistently; "; msg << "Constructor starting at line " << dec << ct->getLineno() << " is first inconsistency"; compiler->reportError(compiler->getLocation(ct), msg.str()); testresult = false; } seenemptyexport = true; } } if (seennonemptyexport) { if (tablesize == 0) { compiler->reportWarning(compiler->getLocation(sym), "Table '" + sym->getName() + "' exports size 0"); } sizemap[sym] = tablesize; // Remember recovered size } else sizemap[sym] = -1; return testresult; } /// \brief Establish ordering on subtables so that more dependent tables come first /// /// Do a depth first traversal of SubtableSymbols starting at the root table going /// through Constructors and then through their operands, establishing a post-order on the /// subtables. This allows the size restriction checks to recursively calculate sizes of dependent /// subtables first and propagate their values into more global Varnodes (as Constructor operands) /// \param root is the root subtable void ConsistencyChecker::setPostOrder(SubtableSymbol *root) { postorder.clear(); sizemap.clear(); vector path; vector state; vector ctstate; sizemap[root] = -1; // Mark root as traversed path.push_back(root); state.push_back(0); ctstate.push_back(0); while(!path.empty()) { SubtableSymbol *cur = path.back(); int4 ctind = state.back(); if (ctind >= cur->getNumConstructors()) { path.pop_back(); // Table is fully traversed state.pop_back(); ctstate.pop_back(); postorder.push_back(cur); // Post the traversed table } else { Constructor *ct = cur->getConstructor(ctind); int4 oper = ctstate.back(); if (oper >= ct->getNumOperands()) { state.back() = ctind + 1; // Constructor fully traversed ctstate.back() = 0; } else { ctstate.back() = oper + 1; OperandSymbol *opsym = ct->getOperand(oper); SubtableSymbol *subsym = dynamic_cast(opsym->getDefiningSymbol()); if (subsym != (SubtableSymbol *)0) { map::const_iterator iter; iter = sizemap.find(subsym); if (iter == sizemap.end()) { // Not traversed yet sizemap[subsym] = -1; // Mark table as traversed path.push_back(subsym); // Recurse state.push_back(0); ctstate.push_back(0); } } } } } } map::iterator ConsistencyChecker::UniqueState::lesserIter(uintb offset) { if (recs.begin() == recs.end()) { return recs.end(); } map::iterator iter; iter = recs.lower_bound(offset); if (iter == recs.begin()) { return recs.end(); } return std::prev(iter); } ConsistencyChecker::OptimizeRecord ConsistencyChecker::UniqueState::coalesce(vector &records) { uintb minOff = -1; uintb maxOff = -1; vector::iterator iter; for (iter = records.begin(); iter != records.end(); ++iter) { if (minOff == -1 || (*iter)->offset < minOff) { minOff = (*iter)->offset; } if (maxOff == -1 || (*iter)->offset + (*iter)->size > maxOff) { maxOff = (*iter)->offset + (*iter)->size; } } OptimizeRecord result(minOff, maxOff - minOff); for (iter = records.begin(); iter != records.end(); ++iter) { result.updateCombine(**iter); } return result; } void ConsistencyChecker::UniqueState::set(uintb offset, int4 size, OptimizeRecord &rec) { vector records; getDefinitions(records, offset, size); records.push_back(&rec); OptimizeRecord coalesced = coalesce(records); recs.erase(recs.lower_bound(coalesced.offset), recs.lower_bound(coalesced.offset+coalesced.size)); recs.insert(pair(coalesced.offset, coalesced)); } void ConsistencyChecker::UniqueState::getDefinitions(vector &result, uintb offset, int4 size) { if (size == 0) { size = 1; } map::iterator iter; iter = lesserIter(offset); uintb cursor = offset; if (iter != recs.end() && endOf(iter) > offset) { OptimizeRecord &preRec = iter->second; cursor = endOf(iter); result.push_back(&preRec); } uintb end = offset + size; iter = recs.lower_bound(offset); while (iter != recs.end() && iter->first < end) { if (iter->first > cursor) { // The iterator becomes invalid with this insertion, so take the new one. iter = recs.insert(pair(cursor,OptimizeRecord(cursor, iter->first - cursor))).first; result.push_back(&iter->second); iter++; // Put the (now valid) iterator back to where it was. } // No need to truncate, as we're just counting a read result.push_back(&iter->second); cursor = endOf(iter); iter++; } if (end > cursor) { iter = recs.insert(pair(cursor,OptimizeRecord(cursor, end - cursor))).first; result.push_back(&iter->second); } } /// \brief Test whether two given Varnodes intersect /// /// This test must be conservative. If it can't explicitly prove that the /// Varnodes don't intersect, it returns \b true (a possible intersection). /// \param vn1 is the first Varnode to check /// \param vn2 is the second Varnode to check /// \return \b true if there is a possible intersection of the Varnodes' storage bool ConsistencyChecker::possibleIntersection(const VarnodeTpl *vn1,const VarnodeTpl *vn2) { // Conservatively test whether vn1 and vn2 can intersect if (vn1->getSpace().isConstSpace()) return false; if (vn2->getSpace().isConstSpace()) return false; bool u1 = vn1->getSpace().isUniqueSpace(); bool u2 = vn2->getSpace().isUniqueSpace(); if (u1 != u2) return false; if (vn1->getSpace().getType() != ConstTpl::spaceid) return true; if (vn2->getSpace().getType() != ConstTpl::spaceid) return true; AddrSpace *spc = vn1->getSpace().getSpace(); if (spc != vn2->getSpace().getSpace()) return false; if (vn2->getOffset().getType() != ConstTpl::real) return true; if (vn2->getSize().getType() != ConstTpl::real) return true; if (vn1->getOffset().getType() != ConstTpl::real) return true; if (vn1->getSize().getType() != ConstTpl::real) return true; uintb offset = vn1->getOffset().getReal(); uintb size = vn1->getSize().getReal(); uintb off = vn2->getOffset().getReal(); if (off+vn2->getSize().getReal()-1 < offset) return false; if (off > (offset+size-1)) return false; return true; } /// \brief Check if a p-code operator reads from or writes to a given Varnode /// /// A write check is always performed. A read check is performed only if requested. /// Return \b true if there is a possible write (or read) of the Varnode. /// The checks need to be extremely conservative. If it can't be determined what /// exactly is being read or written, \b true (possible interference) is returned. /// \param vn is the given Varnode /// \param op is p-code operator to test for interference /// \param checkread is \b true if read interference should be checked /// \return \b true if there is write (or read) interference bool ConsistencyChecker::readWriteInterference(const VarnodeTpl *vn,const OpTpl *op,bool checkread) const { switch(op->getOpcode()) { case BUILD: case CROSSBUILD: case DELAY_SLOT: case MACROBUILD: case CPUI_LOAD: case CPUI_STORE: case CPUI_BRANCH: case CPUI_CBRANCH: case CPUI_BRANCHIND: case CPUI_CALL: case CPUI_CALLIND: case CPUI_CALLOTHER: case CPUI_RETURN: case LABELBUILD: // Another value might jump in here return true; default: break; } if (checkread) { int4 numinputs = op->numInput(); for(int4 i=0;igetIn(i))) return true; } // We always check for writes to -vn- const VarnodeTpl *vn2 = op->getOut(); if (vn2 != (const VarnodeTpl *)0) { if (possibleIntersection(vn,vn2)) return true; } return false; } /// \brief Accumulate read/write info if the given Varnode is temporary /// /// If the Varnode is in the \e unique space, an OptimizationRecord for it is looked /// up based on its offset. Information about how a p-code operator uses the Varnode /// is accumulated in the record. /// \param state is collection of OptimizationRecords associated with temporary Varnodes /// \param vn is the given Varnode to check (which may or may not be temporary) /// \param i is the index of the operator using the Varnode (within its p-code section) /// \param inslot is the \e slot index of the Varnode within its operator /// \param secnum is the section number containing the operator void ConsistencyChecker::examineVn(UniqueState &state, const VarnodeTpl *vn,uint4 i,int4 inslot,int4 secnum) { if (vn == (const VarnodeTpl *)0) return; if (!vn->getSpace().isUniqueSpace()) return; if (vn->getOffset().getType() != ConstTpl::real) return; uintb offset = vn->getOffset().getReal(); int4 size = vn->getSize().getReal(); if (inslot >= 0) { vector defs; state.getDefinitions(defs,offset,size); for (vector::iterator iter=defs.begin();iter!=defs.end();++iter) { (*iter)->updateRead(i,inslot,secnum); } } else { OptimizeRecord rec(offset,size); rec.updateWrite(i,secnum); state.set(offset,size,rec); } } /// \brief Gather statistics about read and writes to temporary Varnodes within a given p-code section /// /// For each temporary Varnode, count how many times it is read from or written to /// in the given section of p-code operators. /// \param ct is the given Constructor /// \param state is the (initially empty) collection of count records /// \param secnum is the given p-code section number void ConsistencyChecker::optimizeGather1(Constructor *ct,UniqueState &state,int4 secnum) const { ConstructTpl *tpl; if (secnum < 0) tpl = ct->getTempl(); else tpl = ct->getNamedTempl(secnum); if (tpl == (ConstructTpl *)0) return; const vector &ops( tpl->getOpvec() ); for(uint4 i=0;inumInput();++j) { const VarnodeTpl *vnin = op->getIn(j); examineVn(state,vnin,i,j,secnum); } const VarnodeTpl *vn = op->getOut(); examineVn(state,vn,i,-1,secnum); } } /// \brief Mark Varnodes in the export of the given p-code section as read and written /// /// As part of accumulating read/write info for temporary Varnodes, examine the export Varnode /// for the section, and if it involves a temporary, mark it as both read and written, guaranteeing /// that the Varnode is not optimized away. /// \param ct is the given Constructor /// \param state is the collection of count records /// \param secnum is the given p-code section number void ConsistencyChecker::optimizeGather2(Constructor *ct,UniqueState &state,int4 secnum) const { ConstructTpl *tpl; if (secnum < 0) tpl = ct->getTempl(); else tpl = ct->getNamedTempl(secnum); if (tpl == (ConstructTpl *)0) return; HandleTpl *hand = tpl->getResult(); if (hand == (HandleTpl *)0) return; if (hand->getPtrSpace().isUniqueSpace()) { if (hand->getPtrOffset().getType() == ConstTpl::real) { uintb offset = hand->getPtrOffset().getReal(); int4 size = hand->getPtrSize().getReal(); vector defs; state.getDefinitions(defs,offset,size); for (vector::iterator iter=defs.begin();iter!=defs.end();++iter) { (*iter)->updateExport(); // NOTE: Could this just be updateRead? // Technically, an exported handle could be written by the parent.... } } } if (hand->getSpace().isUniqueSpace()) { if ((hand->getPtrSpace().getType() == ConstTpl::real)&& (hand->getPtrOffset().getType() == ConstTpl::real)) { uintb offset = hand->getPtrOffset().getReal(); int4 size = hand->getPtrSize().getReal(); vector defs; state.getDefinitions(defs,offset,size); for (vector::iterator iter=defs.begin();iter!=defs.end();++iter) { (*iter)->updateExport(); // NOTE: Could this just be updateRead? // Technically, an exported handle could be written by the parent.... } } } } /// \brief Search for an OptimizeRecord indicating a temporary Varnode that can be optimized away /// /// OptimizeRecords for all temporary Varnodes must already be calculated. /// Find a record indicating a temporary Varnode that is written once and read once through a COPY. /// Test propagation of the other Varnode associated with the COPY, making sure: /// if propagation is backward, the Varnode must not cross another read or write, and /// if propagation is forward, the Varnode must not cross another write. /// If all the requirements pass, return the record indicating that the COPY can be removed. /// \param ct is the Constructor owning the p-code /// \param state is the collection of OptimizeRecords to search /// \return a passing OptimizeRecord or null const ConsistencyChecker::OptimizeRecord *ConsistencyChecker::findValidRule(Constructor *ct, const UniqueState &state) const { map::const_iterator iter; iter = state.begin(); while(iter!=state.end()) { const OptimizeRecord &currec( (*iter).second ); ++iter; if ((currec.writecount==1)&&(currec.readcount==1)&&(currec.readsection==currec.writesection)) { // Temporary must be read and written exactly once ConstructTpl *tpl; if (currec.readsection < 0) tpl = ct->getTempl(); else tpl = ct->getNamedTempl(currec.readsection); const vector &ops( tpl->getOpvec() ); const OpTpl *writeop = ops[ currec.writeop ]; const OpTpl *readop = ops[ currec.readop ]; if (currec.writeop >= currec.readop) // Read must come after write throw SleighError("Read of temporary before write"); VarnodeTpl *writevn = writeop->getOut(); VarnodeTpl *readvn = readop->getIn(currec.inslot); // Because the record can change size and position, we have to check if the varnode // "connecting" the write and read ops is actually the same varnode. If not, then we can't // optimize it out. // There may be an opportunity here to re-write the size/offset when either the write or read // op is a COPY, but I'll leave that for later discussion. // Actually, maybe not. If the truncate would be of a handle, we can't. if (*writevn != *readvn) { continue; } if (readop->getOpcode() == CPUI_COPY) { bool saverecord = true; currec.opttype = 0; // Read op is a COPY const VarnodeTpl *vn = readop->getOut(); for(int4 i=currec.writeop+1;igetOpcode() == CPUI_COPY) { bool saverecord = true; currec.opttype = 1; // Write op is a COPY const VarnodeTpl *vn = writeop->getIn(0); for(int4 i=currec.writeop+1;i deleteops; ConstructTpl *ctempl; if (rec.readsection < 0) ctempl = ct->getTempl(); else ctempl = ct->getNamedTempl(rec.readsection); if (rec.opttype == 0) { // If read op is COPY int4 readop = rec.readop; OpTpl *op = ctempl->getOpvec()[ readop ]; VarnodeTpl *vnout = new VarnodeTpl(*op->getOut()); // Make COPY output ctempl->setOutput(vnout,rec.writeop); // become write output deleteops.push_back(readop); // and then delete the read (COPY) } else if (rec.opttype == 1) { // If write op is COPY int4 writeop = rec.writeop; OpTpl *op = ctempl->getOpvec()[ writeop ]; VarnodeTpl *vnin = new VarnodeTpl(*op->getIn(0)); // Make COPY input ctempl->setInput(vnin,rec.readop,rec.inslot); // become read input deleteops.push_back(writeop); // and then delete the write (COPY) } ctempl->deleteOps(deleteops); } /// \brief Issue error/warning messages for unused temporary Varnodes /// /// An error message is issued if a temporary is read but not written. /// A warning may be issued if a temporary is written but not read. /// \param ct is the Constructor /// \param state is the collection of records associated with each temporary Varnode void ConsistencyChecker::checkUnusedTemps(Constructor *ct,const UniqueState &state) { map::const_iterator iter; iter = state.begin(); while(iter != state.end()) { const OptimizeRecord &currec( (*iter).second ); if (currec.readcount == 0) { if (printdeadwarning) compiler->reportWarning(compiler->getLocation(ct), "Temporary is written but not read"); writenoread += 1; } else if (currec.writecount == 0) { compiler->reportError(compiler->getLocation(ct), "Temporary is read but not written"); readnowrite += 1; } ++iter; } } /// \brief In the given Constructor p-code section, check for temporary Varnodes that are too large /// /// Run through all Varnodes in the constructor, if a Varnode is in the \e unique /// space and its size exceeds the threshold SleighBase::MAX_UNIQUE_SIZE, issue /// a warning. Note that this method returns after the first large Varnode is found. /// \param ct is the given Constructor /// \param ctpl is the specific p-code section void ConsistencyChecker::checkLargeTemporaries(Constructor *ct,ConstructTpl *ctpl) { vector ops = ctpl->getOpvec(); for(vector::iterator iter = ops.begin();iter != ops.end();++iter) { if (hasLargeTemporary(*iter)) { compiler->reportError( compiler->getLocation(ct), "Constructor uses temporary varnode larger than " + to_string(SleighBase::MAX_UNIQUE_SIZE) + " bytes."); return; } } } /// \brief Do p-code optimization on each section of the given Constructor /// /// For p-code section, statistics on temporary Varnode usage is collected, /// and unnecessary COPY operators are removed. /// \param ct is the given Constructor void ConsistencyChecker::optimize(Constructor *ct) { const OptimizeRecord *currec; UniqueState state; int4 numsections = ct->getNumSections(); do { state.clear(); for(int4 i=-1;iisBigEndian(); for(int4 i=0;igetNumConstructors(); Constructor *ct; for(int4 j=0;jgetConstructor(j); int4 numsections = ct->getNumSections(); for(int4 k=-1;kgetTempl(); else tpl = ct->getNamedTempl(k); if (tpl == (ConstructTpl *)0) continue; if (!checkSectionTruncations(ct,tpl,isbigendian)) testresult = false; } } } return testresult; } /// This counts Constructors that contain temporary Varnodes that are too large. /// If requested, an individual warning is printed for each Constructor. void ConsistencyChecker::testLargeTemporary(void) { for(int4 i=0;igetNumConstructors(); Constructor *ct; for(int4 j=0;jgetConstructor(j); int4 numsections = ct->getNumSections(); for(int4 k=-1;kgetTempl(); else tpl = ct->getNamedTempl(k); if (tpl == (ConstructTpl *)0) continue; checkLargeTemporaries(ct, tpl); } } } } void ConsistencyChecker::optimizeAll(void) { for(int4 i=0;igetNumConstructors(); Constructor *ct; for(int4 i=0;igetConstructor(i); optimize(ct); } } } ostream& operator<<(ostream &os, const ConsistencyChecker::OptimizeRecord &rec) { os << "{writeop=" << rec.writeop << " readop=" << rec.readop << " inslot=" << rec.inslot << " writecount=" << rec.writecount << " readcount=" << rec.readcount << " opttype=" << rec.opttype << "}"; return os; } /// Sort based on the containing Varnode, then on the bit boundary /// \param op2 is a field to compare with \b this /// \return \b true if \b this should be sorted before the other field bool FieldContext::operator<(const FieldContext &op2) const { if (sym->getName() != op2.sym->getName()) return (sym->getName() < op2.sym->getName()); return (qual->low < op2.qual->low); } void MacroBuilder::free(void) { vector::iterator iter; for(iter=params.begin();iter!=params.end();++iter) delete *iter; params.clear(); } /// The error is passed up to the main parse object and a note is made /// locally that an error occurred so parsing can be terminated immediately. /// \param loc is the parse location where the error occurred /// \param val is the error message void MacroBuilder::reportError(const Location* loc, const string &val) { slgh->reportError(loc, val); haserror = true; } /// Given the op corresponding to the invocation, set up the specific parameters. /// \param macroop is the given MACRO directive op void MacroBuilder::setMacroOp(OpTpl *macroop) { VarnodeTpl *vn; HandleTpl *hand; free(); for(int4 i=1;inumInput();++i) { vn = macroop->getIn(i); hand = new HandleTpl(vn); params.push_back(hand); } } /// \brief Given a cloned OpTpl, substitute parameters and add to the output list /// /// VarnodesTpls used by the op are examined to see if they are derived from /// parameters of the macro. If so, details of the parameters actively passed /// as part of the specific macro invocation are substituted into the VarnodeTpl. /// Truncation operations on a macro parameter may cause additional CPUI_SUBPIECE /// operators to be inserted as part of the expansion and certain forms are not /// permitted. /// \param op is the cloned op to emit /// \param params is the set of parameters specific to the macro invocation /// \return \b true if there are no illegal truncations bool MacroBuilder::transferOp(OpTpl *op,vector ¶ms) { // Fix handle details of a macro generated OpTpl relative to its specific invocation // and transfer it into the output stream VarnodeTpl *outvn = op->getOut(); int4 handleIndex = 0; int4 plus; bool hasrealsize = false; uintb realsize = 0; if (outvn != (VarnodeTpl *)0) { plus = outvn->transfer(params); if (plus >= 0) { reportError((const Location *)0, "Cannot currently assign to bitrange of macro parameter that is a temporary"); return false; } } for(int4 i=0;inumInput();++i) { VarnodeTpl *vn = op->getIn(i); if (vn->getOffset().getType() == ConstTpl::handle) { handleIndex = vn->getOffset().getHandleIndex(); hasrealsize = (vn->getSize().getType() == ConstTpl::real); realsize = vn->getSize().getReal(); } plus = vn->transfer(params); if (plus >= 0) { if (!hasrealsize) { reportError((const Location *)0, "Problem with bit range operator in macro"); return false; } uintb newtemp = slgh->getUniqueAddr(); // Generate a new temporary location // Generate a SUBPIECE op that implements the offset_plus OpTpl *subpieceop = new OpTpl(CPUI_SUBPIECE); VarnodeTpl *newvn = new VarnodeTpl(ConstTpl(slgh->getUniqueSpace()),ConstTpl(ConstTpl::real,newtemp), ConstTpl(ConstTpl::real,realsize)); subpieceop->setOutput(newvn); HandleTpl *hand = params[handleIndex]; VarnodeTpl *origvn = new VarnodeTpl( hand->getSpace(), hand->getPtrOffset(), hand->getSize() ); subpieceop->addInput(origvn); VarnodeTpl *plusvn = new VarnodeTpl( ConstTpl(slgh->getConstantSpace()), ConstTpl(ConstTpl::real,plus), ConstTpl(ConstTpl::real, 4) ); subpieceop->addInput(plusvn); outvec.push_back(subpieceop); delete vn; // Replace original varnode op->setInput(new VarnodeTpl( *newvn ), i); // with output of subpiece } } outvec.push_back(op); return true; } void MacroBuilder::dump(OpTpl *op) { OpTpl *clone; VarnodeTpl *v_clone,*vn; clone = new OpTpl(op->getOpcode()); vn = op->getOut(); if (vn != (VarnodeTpl *)0) { v_clone = new VarnodeTpl(*vn); clone->setOutput(v_clone); } for(int4 i=0;inumInput();++i) { vn = op->getIn(i); v_clone = new VarnodeTpl(*vn); if (v_clone->isRelative()) { // Adjust relative index, depending on the labelbase uintb val = v_clone->getOffset().getReal() + getLabelBase(); v_clone->setRelative(val); } clone->addInput(v_clone); } if (!transferOp(clone,params)) delete clone; } void MacroBuilder::setLabel(OpTpl *op) { // A label within a macro is local to the macro, but when // we expand the macro, we have to adjust the index of // the label, which is local to the macro, so that it fits // in with other labels local to the parent OpTpl *clone; VarnodeTpl *v_clone; clone = new OpTpl(op->getOpcode()); v_clone = new VarnodeTpl( *op->getIn(0) ); // Clone the label index // Make adjustment to macro local value so that it is parent local uintb val = v_clone->getOffset().getReal() + getLabelBase(); v_clone->setOffset(val); clone->addInput(v_clone); outvec.push_back(clone); } uint4 SleighPcode::allocateTemp(void) { return compiler->getUniqueAddr(); } const Location *SleighPcode::getLocation(SleighSymbol *sym) const { return compiler->getLocation(sym); } void SleighPcode::reportError(const Location *loc, const string &msg) { return compiler->reportError(loc, msg); } void SleighPcode::reportWarning(const Location *loc, const string &msg) { return compiler->reportWarning(loc, msg); } void SleighPcode::addSymbol(SleighSymbol *sym) { return compiler->addSymbol(sym); } SleighCompile::SleighCompile(void) : SleighBase() { pcode.setCompiler(this); contextlock = false; // Context layout is not locked userop_count = 0; errors = 0; warnunnecessarypcode = false; warndeadtemps = false; lenientconflicterrors = true; warnalllocalcollisions = false; warnallnops = false; failinsensitivedups = true; debugoutput = false; root = (SubtableSymbol *)0; curmacro = (MacroSymbol *)0; curct = (Constructor *)0; } /// Create the address spaces: \b const, \b unique, and \b other. /// Define the special symbols: \b inst_start, \b inst_next, \b inst_next2, \b epsilon. /// Define the root subtable symbol: \b instruction void SleighCompile::predefinedSymbols(void) { symtab.addScope(); // Create global scope // Some predefined symbols root = new SubtableSymbol("instruction"); // Base constructors symtab.addSymbol(root); insertSpace(new ConstantSpace(this,this)); SpaceSymbol *spacesym = new SpaceSymbol(getConstantSpace()); // Constant space symtab.addSymbol(spacesym); OtherSpace *otherSpace = new OtherSpace(this,this,OtherSpace::INDEX); insertSpace(otherSpace); spacesym = new SpaceSymbol(otherSpace); symtab.addSymbol(spacesym); insertSpace(new UniqueSpace(this,this,numSpaces(),0)); spacesym = new SpaceSymbol(getUniqueSpace()); // Temporary register space symtab.addSymbol(spacesym); StartSymbol *startsym = new StartSymbol("inst_start",getConstantSpace()); symtab.addSymbol(startsym); EndSymbol *endsym = new EndSymbol("inst_next",getConstantSpace()); symtab.addSymbol(endsym); Next2Symbol *next2sym = new Next2Symbol("inst_next2",getConstantSpace()); symtab.addSymbol(next2sym); EpsilonSymbol *epsilon = new EpsilonSymbol("epsilon",getConstantSpace()); symtab.addSymbol(epsilon); pcode.setConstantSpace(getConstantSpace()); pcode.setUniqueSpace(getUniqueSpace()); } /// \brief Calculate the complete context layout for all definitions sharing the same backing storage Varnode /// /// Internally context is stored in an array of (32-bit) words. The bit-range for each field definition is /// adjusted to pack the fields within this array, but overlapping bit-ranges between definitions are preserved. /// Due to the internal storage word size, the covering range across a set of overlapping definitions cannot /// exceed the word size (of 32-bits). /// Within the sorted list of all context definitions, the subset sharing the same backing storage is /// provided to this method as a starting index and a size (of the subset), along with the total number /// of context bits already allocated. /// \param start is the provided starting index of the definition subset /// \param sz is the provided number of definitions in the subset /// \param numbits is the number of previously allocated context bits /// \return the total number of allocated bits (after the new allocations) int4 SleighCompile::calcContextVarLayout(int4 start,int4 sz,int4 numbits) { VarnodeSymbol *sym = contexttable[start].sym; FieldQuality *qual; int4 i,j; int4 maxbits; if ((sym->getSize()) % 4 != 0) reportError(getCurrentLocation(), "Invalid size of context register '"+sym->getName()+"': must be a multiple of 4 bytes"); maxbits = sym->getSize() * 8 -1; i = 0; while(ilow; int4 max = qual->high; if ((max - min) > (8*sizeof(uintm))) reportError(getCurrentLocation(), "Size of bitfield '" + qual->name + "' larger than 32 bits"); if (max > maxbits) reportError(getCurrentLocation(), "Scope of bitfield '" + qual->name + "' extends beyond the size of context register"); j = i+1; // Find union of fields overlapping with first field while(jlow <= max) { // We have overlap of context variables if (qual->high > max) max = qual->high; // reportWarning("Local context variables overlap in "+sym->getName(),false); } else break; j = j+1; } int4 alloc = max-min+1; int4 startword = numbits / (8*sizeof(uintm)); int4 endword = (numbits+alloc-1) / (8*sizeof(uintm)); if (startword != endword) numbits = endword * (8*sizeof(uintm)); // Bump up to next word uint4 low = numbits; numbits += alloc; for(;ilow - min + low; uint4 h = numbits-1-(max-qual->high); ContextField *field = new ContextField(qual->signext,l,h); addSymbol(new ContextSymbol(qual->name,field,sym,qual->low,qual->high,qual->flow)); } } sym->markAsContext(); return numbits; } /// A separate decision tree is calculated for each subtable, and information about /// conflicting patterns is accumulated. Identical pattern pairs are reported /// as errors, and indistinguishable pattern pairs are reported as errors depending /// on the \b lenientconflicterrors setting. void SleighCompile::buildDecisionTrees(void) { DecisionProperties props; root->buildDecisionTree(props); for(int4 i=0;ibuildDecisionTree(props); const vector > &ierrors( props.getIdentErrors() ); if (ierrors.size() != 0) { string identMsg = "Constructor has identical pattern to constructor at "; for(int4 i=0;iformat()); reportError(locB, identMsg + locA->format()); } } const vector > &cerrors( props.getConflictErrors() ); if (!lenientconflicterrors && cerrors.size() != 0) { string conflictMsg = "Constructor pattern cannot be distinguished from constructor at "; for(int4 i=0;iformat()); reportError(locB, conflictMsg + locA->format()); } } } /// For each Constructor, generate the final pattern (TokenPattern) used to match it from /// the parsed constraints (PatternEquation). Accumulated error messages are reported. void SleighCompile::buildPatterns(void) { if (root == 0) { reportError((const Location *)0, "No patterns to match."); return; } ostringstream msg; root->buildPattern(msg); // This should recursively hit everything if (root->isError()) { reportError(getLocation(root), msg.str()); errors += 1; } for(int4 i=0;iisError()) { reportError(getLocation(tables[i]), "Problem in table '"+tables[i]->getName() + "':" + msg.str()); errors += 1; } if (tables[i]->getPattern() == (TokenPattern *)0) { reportWarning(getLocation(tables[i]), "Unreferenced table '"+tables[i]->getName() + "'"); } } } /// Optimization is performed across all p-code sections. Size restriction and other consistency /// checks are performed. Errors and warnings are reported as appropriate. void SleighCompile::checkConsistency(void) { ConsistencyChecker checker(this, root,warnunnecessarypcode,warndeadtemps); if (!checker.testSizeRestrictions()) { errors += 1; return; } if (!checker.testTruncations()) { errors += 1; return; } if ((!warnunnecessarypcode)&&(checker.getNumUnnecessaryPcode() > 0)) { ostringstream msg; msg << dec << checker.getNumUnnecessaryPcode(); msg << " unnecessary extensions/truncations were converted to copies"; reportWarning(msg.str()); reportWarning("Use -u switch to list each individually"); } checker.optimizeAll(); if (checker.getNumReadNoWrite() > 0) { errors += 1; return; } if ((!warndeadtemps)&&(checker.getNumWriteNoRead() > 0)) { ostringstream msg; msg << dec << checker.getNumWriteNoRead(); msg << " operations wrote to temporaries that were not read"; reportWarning(msg.str()); reportWarning("Use -t switch to list each individually"); } checker.testLargeTemporary(); } /// \brief Search for offset matches between a previous set and the given current set /// /// This method is given a collection of offsets, each mapped to a particular set index. /// A new set of offsets and set index is given. The new set is added to the collection. /// If any offset in the new set matches an offset in one of the old sets, the old matching /// set index is returned. Otherwise -1 is returned. /// \param local2Operand is the collection of previous offsets /// \param locals is the new given set of offsets /// \param operand is the new given set index /// \return the set index of an old matching offset or -1 int4 SleighCompile::findCollision(map &local2Operand,const vector &locals,int operand) { for(int4 i=0;i::iterator,bool> res; res = local2Operand.insert(pair(locals[i],operand)); if (!res.second) { int4 oldIndex = (*res.first).second; if (oldIndex != operand) return oldIndex; } } return -1; } /// Because local variables can be exported and subtable symbols can be reused as operands across /// multiple Constructors, its possible for different operands in the same Constructor to be assigned /// the same exported local variable. As this is a potential spec design problem, this method searches /// for these collisions and potentially reports a warning. /// For each operand of the given Constructor, the potential local variable exports are collected and /// compared with the other operands. Any potential collision may generate a warning and causes /// \b false to be returned. /// \param ct is the given Constructor /// \return \b true if there are no potential collisions between operands bool SleighCompile::checkLocalExports(Constructor *ct) { if (ct->getTempl() == (ConstructTpl *)0) return true; // No template, collisions impossible if (ct->getTempl()->buildOnly()) return true; // Operand exports aren't manipulated, so no collision is possible if (ct->getNumOperands() < 2) return true; // Collision can only happen with multiple operands bool noCollisions = true; map collect; for(int4 i=0;igetNumOperands();++i) { vector newCollect; ct->getOperand(i)->collectLocalValues(newCollect); if (newCollect.empty()) continue; int4 collideOperand = findCollision(collect, newCollect, i); if (collideOperand >= 0) { noCollisions = false; if (warnalllocalcollisions) { reportWarning(getLocation(ct), "Possible operand collision between symbols '" + ct->getOperand(collideOperand)->getName() + "' and '" + ct->getOperand(i)->getName() + "'"); } break; // Don't continue } } return noCollisions; } /// Check each Constructor for collisions in turn. If there are any collisions /// report a warning indicating the number of Construtors with collisions. Optionally /// generate a warning for each colliding Constructor. void SleighCompile::checkLocalCollisions(void) { int4 collisionCount = 0; SubtableSymbol *sym = root; // Start with the instruction table int4 i = -1; for(;;) { int4 numconst = sym->getNumConstructors(); for(int4 j=0;jgetConstructor(j))) collisionCount += 1; } i+=1; if (i>=tables.size()) break; sym = tables[i]; } if (collisionCount > 0) { ostringstream msg; msg << dec << collisionCount << " constructors with local collisions between operands"; reportWarning(msg.str()); if (!warnalllocalcollisions) reportWarning("Use -c switch to list each individually"); } } /// The number of \e empty Constructors, with no p-code and no export, is always reported. /// Optionally, empty Constructors are reported individually. void SleighCompile::checkNops(void) { if (noplist.size() > 0) { if (warnallnops) { for(int4 i=0;i` directive, in an address space of /// type \e IPTR_PROCESSOR (either RAM or REGISTER) void SleighCompile::checkCaseSensitivity(void) { if (!failinsensitivedups) return; // Case insensitive duplicates don't cause error map registerMap; SymbolScope *scope = symtab.getGlobalScope(); SymbolTree::const_iterator iter; for(iter=scope->begin();iter!=scope->end();++iter) { SleighSymbol *sym = *iter; if (sym->getType() != SleighSymbol::varnode_symbol) continue; VarnodeSymbol *vsym = (VarnodeSymbol *)sym; AddrSpace *space = vsym->getFixedVarnode().space; if (space->getType() != IPTR_PROCESSOR) continue; string nm = sym->getName(); transform(nm.begin(), nm.end(), nm.begin(), ::toupper); pair::iterator,bool> check; check = registerMap.insert( pair(nm,sym) ); if (!check.second) { // Name already existed SleighSymbol *oldsym = (*check.first).second; ostringstream s; s << "Name collision: " << sym->getName() << " --- "; s << "Duplicate symbol " << oldsym->getName(); const Location *oldLocation = getLocation(oldsym); if (oldLocation != (Location *) 0x0) { s << " defined at " << oldLocation->format(); } const Location *location = getLocation(sym); reportError(location,s.str()); } } } /// Each label symbol define which operator is being labeled and must also be /// used as a jump destination by at least 1 operator. A description of each /// symbol violating this is accumulated in a string returned by this method. /// \param scope is the scope across which to look for label symbols /// \return the accumulated error messages string SleighCompile::checkSymbols(SymbolScope *scope) { ostringstream msg; SymbolTree::const_iterator iter; for(iter=scope->begin();iter!=scope->end();++iter) { LabelSymbol *sym = (LabelSymbol *)*iter; if (sym->getType() != SleighSymbol::label_symbol) continue; if (sym->getRefCount() == 0) msg << " Label <" << sym->getName() << "> was placed but not used" << endl; else if (!sym->isPlaced()) msg << " Label <" << sym->getName() << "> was referenced but never placed" << endl; } return msg.str(); } /// The symbol definition is assumed to have just been parsed. It is added to the /// table within the current scope as determined by the parse state and is cross /// referenced with the current parse location. /// Duplicate symbol exceptions are caught and reported as a parse error. /// \param sym is the new symbol void SleighCompile::addSymbol(SleighSymbol *sym) { try { symtab.addSymbol(sym); symbolLocationMap[sym] = *getCurrentLocation(); } catch(SleighError &err) { reportError(err.explain); } } /// \param ctor is the given Constructor /// \return the filename and line number const Location *SleighCompile::getLocation(Constructor *ctor) const { return &ctorLocationMap.at(ctor); } /// \param sym is the given symbol /// \return the filename and line number or null if location not found const Location *SleighCompile::getLocation(SleighSymbol *sym) const { try { return &symbolLocationMap.at(sym); } catch (const out_of_range &e) { return nullptr; } } /// The current filename and line number are placed into a Location object /// which is then returned. /// \return the current Location const Location *SleighCompile::getCurrentLocation(void) const { // Update the location cache field currentLocCache = Location(filename.back(), lineno.back()); return ¤tLocCache; } /// \brief Format an error or warning message given an optional source location /// /// \param loc is the given source location (or null) /// \param msg is the message /// \return the formatted message string SleighCompile::formatStatusMessage(const Location* loc, const string &msg) { ostringstream s; if (loc != (Location*)0) { s << loc->format(); s << ": "; } s << msg; return s.str(); } /// The error message is formatted indicating the location of the error in source. /// The message is displayed for the user and a count is incremented. /// Otherwise, parsing can continue, but the compiler will not produce an output file. /// \param loc is the location of the error /// \param msg is the error message void SleighCompile::reportError(const Location* loc, const string &msg) { reportError(formatStatusMessage(loc, msg)); } /// The message is formatted and displayed for the user and a count is incremented. /// If there are too many fatal errors, the entire compilation process is terminated. /// Otherwise, parsing can continue, but the compiler will not produce an output file. /// \param msg is the error message void SleighCompile::reportError(const string &msg) { cerr << filename.back() << ":" << lineno.back() << " - ERROR " << msg << endl; errors += 1; if (errors > 1000000) { cerr << "Too many errors: Aborting" << endl; exit(2); } } /// The message indicates a potential problem with the SLEIGH specification but does not /// prevent compilation from producing output. /// \param loc is the location of the problem in source /// \param msg is the warning message void SleighCompile::reportWarning(const Location* loc, const string &msg) { reportWarning(formatStatusMessage(loc, msg)); } /// The message indicates a potential problem with the SLEIGH specification but does not /// prevent compilation from producing output. /// \param msg is the warning message void SleighCompile::reportWarning(const string &msg) { cerr << "WARN " << msg << endl; } /// The \e unique space acts as a pool of temporary registers that are drawn as needed. /// As Varnode sizes are frequently inferred and not immediately available during the parse, /// this method does not make an assumption about the size of the requested temporary Varnode. /// It reserves a fixed amount of space and returns its starting offset. /// \return the starting offset of the new temporary register uint4 SleighCompile::getUniqueAddr(void) { uint4 base = getUniqueBase(); setUniqueBase(base + SleighBase::MAX_UNIQUE_SIZE); return base; } /// This method is called after parsing is complete. It builds the final Constructor patterns, /// builds decision trees, does p-code optimization, and builds cross referencing structures. /// A number of checks are also performed, which may generate errors or warnings, including /// size restriction checks, pattern conflict checks, NOP constructor checks, and /// local collision checks. Once this method is run, \b this SleighCompile is ready for the /// encode method. void SleighCompile::process(void) { checkNops(); checkCaseSensitivity(); if (getDefaultCodeSpace() == (AddrSpace *)0) reportError("No default space specified"); if (errors>0) return; checkConsistency(); if (errors>0) return; checkLocalCollisions(); if (errors>0) return; buildPatterns(); if (errors>0) return; buildDecisionTrees(); if (errors>0) return; vector errorPairs; buildXrefs(errorPairs); // Make sure we can build crossrefs properly if (!errorPairs.empty()) { for(int4 i=0;i::const_iterator iter = preproc_defines.find(nm); if (iter == preproc_defines.end()) return false; res = (*iter).second; return true; } /// The string value is associated with the variable name. /// \param nm is the name of the given preprocessor variable /// \param value is the string value to associate void SleighCompile::setPreprocValue(const string &nm,const string &value) { preproc_defines[nm] = value; } /// Any existing string value associated with the variable is removed. /// \param nm is the name of the given preprocessor variable /// \return \b true if the variable had a value (was defined) initially bool SleighCompile::undefinePreprocValue(const string &nm) { map::iterator iter = preproc_defines.find(nm); if (iter==preproc_defines.end()) return false; preproc_defines.erase(iter); return true; } // Functions needed by the parser /// \brief Define a new SLEIGH token /// /// In addition to the name and size, an endianness code is provided, with the possible values: /// - -1 indicates a \e little endian interpretation is forced on the token /// - 0 indicates the global endianness setting is used for the token /// - 1 indicates a \e big endian interpretation is forced on the token /// /// \param name is the name of the token /// \param sz is the number of bits in the token /// \param endian is the endianness code /// \return the new token symbol TokenSymbol *SleighCompile::defineToken(string *name,uintb *sz,int4 endian) { uint4 size = *sz; delete sz; if ((size&7)!=0) { reportError(getCurrentLocation(), "'" + *name + "': token size must be multiple of 8"); size = (size/8)+1; } else size = size/8; bool isBig; if (endian ==0) isBig = isBigEndian(); else isBig = (endian > 0); Token *newtoken = new Token(*name,size,isBig,tokentable.size()); tokentable.push_back(newtoken); delete name; TokenSymbol *res = new TokenSymbol(newtoken); addSymbol(res); return res; } /// \brief Add a new field definition to the given token /// /// \param sym is the given token /// \param qual is the set of parsed qualities to associate with the new field void SleighCompile::addTokenField(TokenSymbol *sym,FieldQuality *qual) { if (qual->high < qual->low) { ostringstream s; s << "Field '" << qual->name << "' starts at " << qual->low << " and ends at " << qual->high; reportError(getCurrentLocation(), s.str()); } if (sym->getToken()->getSize() * 8 <= qual->high) { ostringstream s; s << "Field '" << qual->name << "' high must be less than token size"; reportError(getCurrentLocation(), s.str()); } TokenField *field = new TokenField(sym->getToken(),qual->signext,qual->low,qual->high); addSymbol(new ValueSymbol(qual->name,field)); delete qual; } /// \brief Add a new context field definition to the given backing Varnode /// /// \param sym is the given Varnode providing backing storage for the context field /// \param qual is the set of parsed qualities to associate with the new field bool SleighCompile::addContextField(VarnodeSymbol *sym,FieldQuality *qual) { if (qual->high < qual->low) { ostringstream s; s << "Context field '" << qual->name << "' starts at " << qual->low << " and ends at " << qual->high; reportError(getCurrentLocation(), s.str()); } if (sym->getSize() * 8 <= qual->high) { ostringstream s; s << "Context field '" << qual->name << "' high must be less than context size"; reportError(getCurrentLocation(), s.str()); } if (contextlock) return false; // Context layout has already been satisfied contexttable.push_back(FieldContext(sym,qual)); return true; } /// \brief Define a new addresds space /// /// \param qual is the set of parsed qualities to associate with the new space void SleighCompile::newSpace(SpaceQuality *qual) { if (qual->size == 0) { reportError(getCurrentLocation(), "Space definition '" + qual->name + "' missing size attribute"); delete qual; return; } int4 delay = (qual->type == SpaceQuality::registertype) ? 0 : 1; AddrSpace *spc = new AddrSpace(this,this,IPTR_PROCESSOR,qual->name,isBigEndian(), qual->size,qual->wordsize,numSpaces(),AddrSpace::hasphysical,delay,delay); insertSpace(spc); if (qual->isdefault) { if (getDefaultCodeSpace() != (AddrSpace *)0) reportError(getCurrentLocation(), "Multiple default spaces -- '" + getDefaultCodeSpace()->getName() + "', '" + qual->name + "'"); else { setDefaultCodeSpace(spc->getIndex()); // Make the flagged space the default pcode.setDefaultSpace(spc); } } delete qual; addSymbol( new SpaceSymbol(spc) ); } /// \brief Start a new named p-code section and define the associated section symbol /// /// \param nm is the name of the section /// \return the new section symbol SectionSymbol *SleighCompile::newSectionSymbol(const string &nm) { SectionSymbol *sym = new SectionSymbol(nm,sections.size()); try { symtab.addGlobalSymbol(sym); } catch(SleighError &err) { reportError(getCurrentLocation(), err.explain); } sections.push_back(sym); numSections = sections.size(); return sym; } /// \brief Set the global endianness of the SLEIGH specification /// /// This \b must be called at the very beginning of the parse. /// This method additionally establishes predefined symbols for the specification. /// \param end is the endianness value (0=little 1=big) void SleighCompile::setEndian(int4 end) { setBigEndian( (end == 1) ); predefinedSymbols(); // Set up symbols now that we know endianness } /// \brief Definition a set of Varnodes /// /// Storage for each Varnode is allocated in sequence from the given address space, /// starting from the specified offset. /// \param spacesym is the given address space /// \param off is the starting offset /// \param size is the size (in bytes) to allocate for each Varnode /// \param names is the list of Varnode names to define void SleighCompile::defineVarnodes(SpaceSymbol *spacesym,uintb *off,uintb *size,vector *names) { AddrSpace *spc = spacesym->getSpace(); uintb myoff = *off; for(int4 i=0;isize();++i) { if ((*names)[i] != "_") addSymbol( new VarnodeSymbol((*names)[i],spc,myoff,*size) ); myoff += *size; } delete names; delete off; delete size; } /// \brief Define a new Varnode symbol as a subrange of bits within another symbol /// /// If the ends of the range fall on byte boundaries, we /// simply define a normal VarnodeSymbol, otherwise we create /// a special symbol which is a place holder for the bitrange operator /// \param name is the name of the new Varnode /// \param sym is the parent Varnode /// \param bitoffset is the (least significant) starting bit of the new Varnode within the parent /// \param numb is the number of bits in the new Varnode void SleighCompile::defineBitrange(string *name,VarnodeSymbol *sym,uint4 bitoffset,uint4 numb) { string namecopy = *name; delete name; uint4 size = 8*sym->getSize(); // Number of bits if (numb == 0) { reportError(getCurrentLocation(), "'" + namecopy + "': size of bitrange is zero"); return; } if ((bitoffset >= size)||((bitoffset+numb)>size)) { reportError(getCurrentLocation(), "'" + namecopy + "': bad bitrange"); return; } if ((bitoffset%8 == 0)&&(numb%8 == 0)) { // This can be reduced to an ordinary varnode definition AddrSpace *newspace = sym->getFixedVarnode().space; uintb newoffset = sym->getFixedVarnode().offset; int4 newsize = numb/8; if (isBigEndian()) newoffset += (size-bitoffset-numb)/8; else newoffset += bitoffset/8; addSymbol( new VarnodeSymbol(namecopy,newspace,newoffset,newsize) ); } else // Otherwise define the special symbol addSymbol( new BitrangeSymbol(namecopy,sym,bitoffset,numb) ); } /// \brief Define a list of new user-defined operators /// /// A new symbol is created for each name. /// \param names is the list of names void SleighCompile::addUserOp(vector *names) { for(int4 i=0;isize();++i) { UserOpSymbol *sym = new UserOpSymbol((*names)[i]); sym->setIndex(userop_count++); addSymbol( sym ); } delete names; } /// Find duplicates in the list and null out any entry but the first. /// Return an example of a symbol with duplicates or null if there are /// no duplicates. /// \param symlist is the given list of symbols (which may contain nulls) /// \return an example symbol with a duplicate are null SleighSymbol *SleighCompile::dedupSymbolList(vector *symlist) { SleighSymbol *res = (SleighSymbol *)0; for(int4 i=0;isize();++i) { SleighSymbol *sym = (*symlist)[i]; if (sym == (SleighSymbol *)0) continue; for(int4 j=i+1;jsize();++j) { if ((*symlist)[j] == sym) { // Found a duplicate res = sym; // Return example duplicate for error reporting (*symlist)[j] = (SleighSymbol *)0; // Null out the duplicate } } } return res; } /// \brief Attach a list integer values, to each value symbol in the given list /// /// Each symbol's original bit representation is no longer used as the absolute integer /// value associated with the symbol. Instead it is used to map into this integer list. /// \param symlist is the given list of value symbols /// \param numlist is the list of integer values to attach void SleighCompile::attachValues(vector *symlist,vector *numlist) { SleighSymbol *dupsym = dedupSymbolList(symlist); if (dupsym != (SleighSymbol *)0) reportWarning(getCurrentLocation(), "'attach values' list contains duplicate entries: "+dupsym->getName()); for(int4 i=0;isize();++i) { ValueSymbol *sym = (ValueSymbol *)(*symlist)[i]; if (sym == (ValueSymbol *)0) continue; PatternValue *patval = sym->getPatternValue(); if (patval->maxValue() + 1 != numlist->size()) { ostringstream msg; msg << "Attach value '" + sym->getName(); msg << "' (range 0.." << patval->maxValue() << ") is wrong size for list (of " << numlist->size() << " entries)"; reportError(getCurrentLocation(), msg.str()); } symtab.replaceSymbol(sym, new ValueMapSymbol(sym->getName(),patval,*numlist)); } delete numlist; delete symlist; } /// \brief Attach a list of display names to the given list of value symbols /// /// Each symbol's original bit representation is no longer used as the display name /// for the symbol. Instead it is used to map into this list of display names. /// \param symlist is the given list of value symbols /// \param names is the list of display names to attach void SleighCompile::attachNames(vector *symlist,vector *names) { SleighSymbol *dupsym = dedupSymbolList(symlist); if (dupsym != (SleighSymbol *)0) reportWarning(getCurrentLocation(), "'attach names' list contains duplicate entries: "+dupsym->getName()); for(int4 i=0;isize();++i) { ValueSymbol *sym = (ValueSymbol *)(*symlist)[i]; if (sym == (ValueSymbol *)0) continue; PatternValue *patval = sym->getPatternValue(); if (patval->maxValue() + 1 != names->size()) { ostringstream msg; msg << "Attach name '" + sym->getName(); msg << "' (range 0.." << patval->maxValue() << ") is wrong size for list (of " << names->size() << " entries)"; reportError(getCurrentLocation(), msg.str()); } symtab.replaceSymbol(sym,new NameSymbol(sym->getName(),patval,*names)); } delete names; delete symlist; } /// \brief Attach a list of Varnodes to the given list of value symbols /// /// Each symbol's original bit representation is no longer used as the display name and /// semantic value of the symbol. Instead it is used to map into this list of Varnodes. /// \param symlist is the given list of value symbols /// \param varlist is the list of Varnodes to attach void SleighCompile::attachVarnodes(vector *symlist,vector *varlist) { SleighSymbol *dupsym = dedupSymbolList(symlist); if (dupsym != (SleighSymbol *)0) reportWarning(getCurrentLocation(), "'attach variables' list contains duplicate entries: "+dupsym->getName()); for(int4 i=0;isize();++i) { ValueSymbol *sym = (ValueSymbol *)(*symlist)[i]; if (sym == (ValueSymbol *)0) continue; PatternValue *patval = sym->getPatternValue(); if (patval->maxValue() + 1 != varlist->size()) { ostringstream msg; msg << "Attach varnode '" + sym->getName(); msg << "' (range 0.." << patval->maxValue() << ") is wrong size for list (of " << varlist->size() << " entries)"; reportError(getCurrentLocation(), msg.str()); } int4 sz = 0; for(int4 j=0;jsize();++j) { VarnodeSymbol *vsym = (VarnodeSymbol *)(*varlist)[j]; if (vsym != (VarnodeSymbol *)0) { if (sz == 0) sz = vsym->getFixedVarnode().size; else if (sz != vsym->getFixedVarnode().size) { ostringstream msg; msg << "Attach statement contains varnodes of different sizes -- " << dec << sz << " != " << dec << vsym->getFixedVarnode().size; reportError(getCurrentLocation(), msg.str()); break; } } } symtab.replaceSymbol(sym,new VarnodeListSymbol(sym->getName(),patval,*varlist)); } delete varlist; delete symlist; } /// \brief Define a new SLEIGH subtable /// /// A symbol and table entry are created. /// \param nm is the name of the new subtable SubtableSymbol *SleighCompile::newTable(string *nm) { SubtableSymbol *sym = new SubtableSymbol(*nm); addSymbol(sym); tables.push_back(sym); delete nm; return sym; } /// \brief Define a new operand for the given Constructor /// /// A symbol local to the Constructor is defined, which initially is unmapped. /// Operands are defined in order. /// \param ct is the given Constructor /// \param nm is the name of the new operand void SleighCompile::newOperand(Constructor *ct,string *nm) { int4 index = ct->getNumOperands(); OperandSymbol *sym = new OperandSymbol(*nm,index,ct); addSymbol(sym); ct->addOperand(sym); delete nm; } /// \brief Create a new constraint equation based on the given operand /// /// The constraint forces the operand to \e match the specified expression /// \param sym is the given operand /// \param patexp is the specified expression /// \return the new constraint equation PatternEquation *SleighCompile::constrainOperand(OperandSymbol *sym,PatternExpression *patexp) { PatternEquation *res; FamilySymbol *famsym = dynamic_cast(sym->getDefiningSymbol()); if (famsym != (FamilySymbol *)0) { // Operand already defined as family symbol // This equation must be a constraint res = new EqualEquation(famsym->getPatternValue(),patexp); } else { // Operand is currently undefined, so we can't constrain PatternExpression::release(patexp); res = (PatternEquation *)0; } return res; } /// \brief Map the local operand symbol to a PatternExpression /// /// The operand symbol's display string and semantic value are calculated at /// disassembly time based on the specified expression. /// \param sym is the local operand /// \param patexp is the expression to map to the operand void SleighCompile::defineOperand(OperandSymbol *sym,PatternExpression *patexp) { try { sym->defineOperand(patexp); sym->setOffsetIrrelevant(); // If not a self-definition, the operand has no // pattern directly associated with it, so // the operand's offset is irrelevant } catch(SleighError &err) { reportError(getCurrentLocation(), err.explain); PatternExpression::release(patexp); } } /// \brief Define a new \e invisible operand based on an existing symbol /// /// A new symbol is defined that is considered an operand of the current Constructor, /// but its display does not contribute to the display of the Constructor. /// The new symbol may still contribute matching patterns and p-code /// \param sym is the existing symbol that the new operand maps to /// \return an (unconstrained) operand pattern PatternEquation *SleighCompile::defineInvisibleOperand(TripleSymbol *sym) { int4 index = curct->getNumOperands(); OperandSymbol *opsym = new OperandSymbol(sym->getName(),index,curct); addSymbol(opsym); curct->addInvisibleOperand(opsym); PatternEquation *res = new OperandEquation(opsym->getIndex()); SleighSymbol::symbol_type tp = sym->getType(); try { if ((tp==SleighSymbol::value_symbol)||(tp==SleighSymbol::context_symbol)) { opsym->defineOperand(sym->getPatternExpression()); } else { opsym->defineOperand(sym); } } catch(SleighError &err) { reportError(getCurrentLocation(), err.explain); } return res; } /// \brief Map given operand to a global symbol of same name /// /// The operand symbol still acts as a local symbol but gets its display, /// pattern, and semantics from the global symbol. /// \param sym is the given operand void SleighCompile::selfDefine(OperandSymbol *sym) { TripleSymbol *glob = dynamic_cast(symtab.findSymbol(sym->getName(),1)); if (glob == (TripleSymbol *)0) { reportError(getCurrentLocation(), "No matching global symbol '" + sym->getName() + "'"); return; } SleighSymbol::symbol_type tp = glob->getType(); try { if ((tp==SleighSymbol::value_symbol)||(tp==SleighSymbol::context_symbol)) { sym->defineOperand(glob->getPatternExpression()); } else sym->defineOperand(glob); } catch(SleighError &err) { reportError(getCurrentLocation(), err.explain); } } /// \brief Set \e export of a Constructor to the given Varnode /// /// SLEIGH symbols matching the Constructor use this Varnode as their semantic storage/value. /// \param ct is the Constructor p-code section /// \param vn is the given Varnode /// \return the p-code section ConstructTpl *SleighCompile::setResultVarnode(ConstructTpl *ct,VarnodeTpl *vn) { HandleTpl *res = new HandleTpl(vn); delete vn; ct->setResult(res); return ct; } /// \brief Set a Constructor export to be the location pointed to by the given Varnode /// /// SLEIGH symbols matching the Constructor use this dynamic location as their semantic storage/value. /// \param ct is the Constructor p-code section /// \param star describes the pointer details /// \param vn is the given Varnode pointer /// \return the p-code section ConstructTpl *SleighCompile::setResultStarVarnode(ConstructTpl *ct,StarQuality *star,VarnodeTpl *vn) { HandleTpl *res = new HandleTpl(star->id,ConstTpl(ConstTpl::real,star->size),vn, getUniqueSpace(),getUniqueAddr()); delete star; delete vn; ct->setResult(res); return ct; } /// \brief Create a change operation that makes a temporary change to a context variable /// /// The new change operation is added to the current list. /// When executed, the change operation will assign a new value to the given context variable /// using the specified expression. The change only applies within the parsing of a single instruction. /// Because we are in the middle of parsing, the \b inst_next and \b inst_next2 values have not /// been computed yet. So we check to make sure the value expression doesn't use this symbol. /// \param vec is the current list of change operations /// \param sym is the given context variable affected by the operation /// \param pe is the specified expression /// \return \b true if the expression does not use the \b inst_next or \b inst_next2 symbol bool SleighCompile::contextMod(vector *vec,ContextSymbol *sym,PatternExpression *pe) { vector vallist; pe->listValues(vallist); for(uint4 i=0;i(vallist[i]) != (const EndInstructionValue *)0) return false; if (dynamic_cast(vallist[i]) != (const Next2InstructionValue *)0) return false; } // Otherwise we generate a "temporary" change to context instruction (ContextOp) ContextField *field = (ContextField *)sym->getPatternValue(); ContextOp *op = new ContextOp(field->getStartBit(),field->getEndBit(),pe); vec->push_back(op); return true; } /// \brief Create a change operation that makes a context variable change permanent /// /// The new change operation is added to the current list. /// When executed, the operation makes the final value of the given context variable permanent, /// starting at the specified address symbol. This value is set for contexts starting at the /// specified symbol address and may flow to following addresses depending on the variable settings. /// \param vec is the current list of change operations /// \param sym is the specified address symbol /// \param cvar is the given context variable void SleighCompile::contextSet(vector *vec,TripleSymbol *sym, ContextSymbol *cvar) { ContextField *field = (ContextField *)cvar->getPatternValue(); ContextCommit *op = new ContextCommit(sym,field->getStartBit(),field->getEndBit(),cvar->getFlow()); vec->push_back(op); } /// \brief Create a macro symbol (with parameter names) /// /// An uninitialized symbol is defined and a macro table entry assigned. /// The body of the macro must be provided later with the buildMacro method. /// \param name is the name of the macro /// \param params is the list of parameter names for the macro /// \return the new macro symbol MacroSymbol *SleighCompile::createMacro(string *name,vector *params) { curct = (Constructor *)0; // Not currently defining a Constructor curmacro = new MacroSymbol(*name,macrotable.size()); delete name; addSymbol(curmacro); symtab.addScope(); // New scope for the body of the macro definition pcode.resetLabelCount(); // Macros have their own labels for(int4 i=0;isize();++i) { OperandSymbol *oper = new OperandSymbol((*params)[i],i,(Constructor *)0); addSymbol(oper); curmacro->addOperand(oper); } delete params; return curmacro; } /// \brief Pass through operand properties of an invoked macro to the parent operands /// /// Match up any qualities of the macro's OperandSymbols with any OperandSymbol passed /// into the macro. /// \param sym is the macro being invoked /// \param param is the list of expressions passed to the macro void SleighCompile::compareMacroParams(MacroSymbol *sym,const vector ¶m) { for(uint4 i=0;igetOut(); if (outvn == (VarnodeTpl *)0) continue; // Check if an OperandSymbol was passed into this macro if (outvn->getOffset().getType() != ConstTpl::handle) continue; int4 hand = outvn->getOffset().getHandleIndex(); // The matching operands OperandSymbol *macroop = sym->getOperand(i); OperandSymbol *parentop; if (curct == (Constructor *)0) parentop = curmacro->getOperand(hand); else parentop = curct->getOperand(hand); // This is the only property we check right now if (macroop->isCodeAddress()) parentop->setCodeAddress(); } } /// \brief Create a p-code sequence that invokes a macro /// /// The given parameter expressions are expanded first into the p-code sequence, /// followed by a final macro build directive. /// \param sym is the macro being invoked /// \param param is the sequence of parameter expressions passed to the macro /// \return the p-code sequence vector *SleighCompile::createMacroUse(MacroSymbol *sym,vector *param) { if (sym->getNumOperands() != param->size()) { bool tooManyParams = param->size() > sym->getNumOperands(); string errmsg = "Invocation of macro '" + sym->getName() + "' passes too " + (tooManyParams ? "many" : "few") + " parameters"; reportError(getCurrentLocation(), errmsg); return new vector; } compareMacroParams(sym,*param); OpTpl *op = new OpTpl(MACROBUILD); VarnodeTpl *idvn = new VarnodeTpl(ConstTpl(getConstantSpace()), ConstTpl(ConstTpl::real,sym->getIndex()), ConstTpl(ConstTpl::real,4)); op->addInput(idvn); return ExprTree::appendParams(op,param); } /// \brief Create a SectionVector containing just the \e main p-code section with no named sections /// /// \param main is the main p-code section /// \return the new SectionVector SectionVector *SleighCompile::standaloneSection(ConstructTpl *main) { SectionVector *res = new SectionVector(main,symtab.getCurrentScope()); return res; } /// \brief Start a new named p-code section after the given \e main p-code section /// /// The \b main p-code section must already be constructed, and the new named section /// symbol defined. A SectionVector is initialized with the \e main section, and a /// symbol scope is created for the new p-code section. /// \param main is the existing \e main p-code section /// \param sym is the existing symbol for the new named p-code section /// \return the new SectionVector SectionVector *SleighCompile::firstNamedSection(ConstructTpl *main,SectionSymbol *sym) { sym->incrementDefineCount(); SymbolScope *curscope = symtab.getCurrentScope(); // This should be a Constructor scope SymbolScope *parscope = curscope->getParent(); if (parscope != symtab.getGlobalScope()) throw LowlevelError("firstNamedSection called when not in Constructor scope"); // Unrecoverable error symtab.addScope(); // Add new scope under the Constructor scope SectionVector *res = new SectionVector(main,curscope); res->setNextIndex(sym->getTemplateId()); return res; } /// \brief Complete a named p-code section and prepare for a new named section /// /// The actual p-code templates are assigned to a previously registered p-code section symbol /// and is added to the existing Section Vector. The old symbol scope is popped and another /// scope is created for the new named section. /// \param vec is the existing SectionVector /// \param section contains the p-code templates to assign to the previous section /// \param sym is the symbol describing the new named section being parsed /// \return the updated SectionVector SectionVector *SleighCompile::nextNamedSection(SectionVector *vec,ConstructTpl *section,SectionSymbol *sym) { sym->incrementDefineCount(); SymbolScope *curscope = symtab.getCurrentScope(); symtab.popScope(); // Pop the scope of the last named section SymbolScope *parscope = symtab.getCurrentScope()->getParent(); if (parscope != symtab.getGlobalScope()) throw LowlevelError("nextNamedSection called when not in section scope"); // Unrecoverable symtab.addScope(); // Add new scope under the Constructor scope (not the last section scope) vec->append(section,curscope); // Associate finished section vec->setNextIndex(sym->getTemplateId()); // Set index for the NEXT section (not been fully parsed yet) return vec; } /// \brief Fill-in final named section to match the previous SectionSymbol /// /// The provided p-code templates are assigned to the previously registered p-code section symbol, /// and the completed section is added to the SectionVector. /// \param vec is the existing SectionVector /// \param section contains the p-code templates to assign to the last section /// \return the updated SectionVector SectionVector *SleighCompile::finalNamedSection(SectionVector *vec,ConstructTpl *section) { vec->append(section,symtab.getCurrentScope()); symtab.popScope(); // Pop the section scope return vec; } /// \brief Create the \b crossbuild directive as a p-code template /// /// \param addr is the address symbol indicating the instruction to \b crossbuild /// \param sym is the symbol indicating the p-code to be build /// \return the p-code template vector *SleighCompile::createCrossBuild(VarnodeTpl *addr,SectionSymbol *sym) { unique_allocatemask = 1; vector *res = new vector(); VarnodeTpl *sectionid = new VarnodeTpl(ConstTpl(getConstantSpace()), ConstTpl(ConstTpl::real,sym->getTemplateId()), ConstTpl(ConstTpl::real,4)); // This is simply a single pcodeop (template), where the opcode indicates the crossbuild directive OpTpl *op = new OpTpl( CROSSBUILD ); op->addInput(addr); // The first input is the VarnodeTpl representing the address op->addInput(sectionid); // The second input is the indexed representing the named pcode section to build res->push_back(op); sym->incrementRefCount(); // Keep track of the references to the section symbol return res; } /// \brief Prepare for a new section of p-code templates /// /// Create the ConstructTpl to hold the templates and reset counters. /// \return the new ConstructTpl ConstructTpl *SleighCompile::enterSection(void) { ConstructTpl *tpl = new ConstructTpl(); pcode.resetLabelCount(); // Macros have their own labels return tpl; } /// \brief Create a new Constructor under the given subtable /// /// Create the object and initialize parsing for the new definition /// \param sym is the given subtable or null for the root table /// \return the new Constructor Constructor *SleighCompile::createConstructor(SubtableSymbol *sym) { if (sym == (SubtableSymbol *)0) sym = WithBlock::getCurrentSubtable(withstack); if (sym == (SubtableSymbol *)0) sym = root; curmacro = (MacroSymbol *)0; // Not currently defining a macro curct = new Constructor(sym); curct->setLineno(lineno.back()); ctorLocationMap[curct] = *getCurrentLocation(); sym->addConstructor(curct); symtab.addScope(); // Make a new symbol scope for our constructor pcode.resetLabelCount(); int4 index = indexer.index(ctorLocationMap[curct].getFilename()); curct->setSrcIndex(index); return curct; } /// \brief Reset state after a parsing error in the previous Constructor void SleighCompile::resetConstructors(void) { symtab.setCurrentScope(symtab.getGlobalScope()); // Purge any dangling local scopes } /// Run through the section looking for MACRO directives. The directive includes an /// id for a specific macro in the table. Using the MacroBuilder class each directive /// is replaced with new sequence of OpTpls that tailors the macro with parameters /// in its invocation. Any errors encountered during expansion are reported. /// Other OpTpls in the section are unchanged. /// \param ctpl is the given section of p-code to expand /// \return \b true if there were no errors expanding a macro bool SleighCompile::expandMacros(ConstructTpl *ctpl) { vector newvec; vector::const_iterator iter; OpTpl *op; for(iter=ctpl->getOpvec().begin();iter!=ctpl->getOpvec().end();++iter) { op = *iter; if (op->getOpcode() == MACROBUILD) { MacroBuilder builder(this,newvec,ctpl->numLabels()); int4 index = op->getIn(0)->getOffset().getReal(); if (index >= macrotable.size()) return false; builder.setMacroOp(op); ConstructTpl *macro_tpl = macrotable[index]; builder.build(macro_tpl,-1); ctpl->setNumLabels( ctpl->numLabels() + macro_tpl->numLabels() ); delete op; // Throw away the place holder op if (builder.hasError()) return false; } else newvec.push_back(op); } ctpl->setOpvec(newvec); return true; } /// For each p-code section of the given Constructor: /// - Expand macros /// - Check that labels are both defined and referenced /// - Generate BUILD directives for subtable operands /// - Propagate Varnode sizes throughout the section /// /// Each action may generate errors or warnings. /// \param big is the given Constructor /// \param vec is the list of p-code sections /// \return \b true if there were no fatal errors bool SleighCompile::finalizeSections(Constructor *big,SectionVector *vec) { vector errors; RtlPair cur = vec->getMainPair(); int4 i=-1; string sectionstring = " Main section: "; int4 max = vec->getMaxId(); for(;;) { string errstring; errstring = checkSymbols(cur.scope); // Check labels in the section's scope if (errstring.size()!=0) { errors.push_back(sectionstring + errstring); } else { if (!expandMacros(cur.section)) errors.push_back(sectionstring + "Could not expand macros"); vector check; big->markSubtableOperands(check); int4 res = cur.section->fillinBuild(check,getConstantSpace()); if (res == 1) errors.push_back(sectionstring + "Duplicate BUILD statements"); if (res == 2) errors.push_back(sectionstring + "Unnecessary BUILD statements"); if (!PcodeCompile::propagateSize(cur.section)) errors.push_back(sectionstring + "Could not resolve at least 1 variable size"); } if (i < 0) { // These potential errors only apply to main section if (cur.section->getResult() != (HandleTpl *)0) { // If there is an export statement if (big->getParent()==root) errors.push_back(" Cannot have export statement in root constructor"); else if (!forceExportSize(cur.section)) errors.push_back(" Size of export is unknown"); } } if (cur.section->delaySlot() != 0) { // Delay slot is present in this constructor if (root != big->getParent()) { // it is not in a root constructor ostringstream msg; msg << "Delay slot used in non-root constructor "; big->printInfo(msg); msg << endl; reportWarning(getLocation(big), msg.str()); } if (cur.section->delaySlot() > maxdelayslotbytes) // Keep track of maximum delayslot parameter maxdelayslotbytes = cur.section->delaySlot(); } do { i += 1; if (i >= max) break; cur = vec->getNamedPair(i); } while(cur.section == (ConstructTpl *)0); if (i >= max) break; SectionSymbol *sym = sections[i]; sectionstring = " " + sym->getName() + " section: "; } if (!errors.empty()) { ostringstream s; s << "in "; big->printInfo(s); reportError(getLocation(big), s.str()); for(int4 j=0;j &ops(ct->getOpvec()); VarnodeTpl *vn; OpTpl *op; for(int4 i=0;igetOut(); if ((vn!=(VarnodeTpl *)0)&&(vn->isLocalTemp())) { if (vn->getOffset() == offset) return vn; } for(int4 j=0;jnumInput();++j) { vn = op->getIn(j); if (vn->isLocalTemp()&&(vn->getOffset()==offset)) return vn; } } return (VarnodeTpl *)0; } /// \brief Propagate local variable sizes into an \b export statement /// /// Look for zero size temporary Varnodes in \b export statements, search for /// the matching local Varnode symbol and force its size on the \b export. /// \param ct is the Constructor whose \b export is to be modified /// \return \b false if a local zero size can't be updated bool SleighCompile::forceExportSize(ConstructTpl *ct) { HandleTpl *result = ct->getResult(); if (result == (HandleTpl *)0) return true; VarnodeTpl *vt; if (result->getPtrSpace().isUniqueSpace()&&result->getPtrSize().isZero()) { vt = findSize(result->getPtrOffset(),ct); if (vt == (VarnodeTpl *)0) return false; result->setPtrSize(vt->getSize()); } else if (result->getSpace().isUniqueSpace()&&result->getSize().isZero()) { vt = findSize(result->getPtrOffset(),ct); if (vt == (VarnodeTpl *)0) return false; result->setSize(vt->getSize()); } return true; } /// \brief Insert a region of zero bits into an address offset /// /// \param addr is the address offset /// \return the modified offset uintb SleighCompile::insertCrossBuildRegion(uintb addr) { uintb upperbits = (addr >> UNIQUE_CROSSBUILD_POSITION) << (UNIQUE_CROSSBUILD_POSITION + UNIQUE_CROSSBUILD_NUMBITS); uintb lowerbits = (addr << (8*sizeof(uintb) - UNIQUE_CROSSBUILD_POSITION)) >> (8*sizeof(uintb) - UNIQUE_CROSSBUILD_POSITION); return upperbits | lowerbits; } /// \brief If the given Varnode is in the \e unique space, insert a region of zero bits /// /// \param vn is the given Varnode void SleighCompile::shiftUniqueVn(VarnodeTpl *vn) { if (vn->getSpace().isUniqueSpace() && (vn->getOffset().getType() == ConstTpl::real)) { uintb val = insertCrossBuildRegion(vn->getOffset().getReal()); vn->setOffset(val); } } /// \brief Insert a region of zero bits for any Varnode used by the given op in the \e unique space /// /// \param op is the given op void SleighCompile::shiftUniqueOp(OpTpl *op) { VarnodeTpl *outvn = op->getOut(); if (outvn != (VarnodeTpl *)0) shiftUniqueVn(outvn); for(int4 i=0;inumInput();++i) shiftUniqueVn(op->getIn(i)); } /// \brief Insert a region of zero bits for both \e dynamic or \e static Varnode aspects in the \e unique space /// /// \param hand is a handle template whose aspects should be modified void SleighCompile::shiftUniqueHandle(HandleTpl *hand) { if (hand->getSpace().isUniqueSpace() && (hand->getPtrSpace().getType() == ConstTpl::real) && (hand->getPtrOffset().getType() == ConstTpl::real)) { uintb val = insertCrossBuildRegion(hand->getPtrOffset().getReal()); hand->setPtrOffset(val); } else if (hand->getPtrSpace().isUniqueSpace() && (hand->getPtrOffset().getType() == ConstTpl::real)) { uintb val = insertCrossBuildRegion(hand->getPtrOffset().getReal()); hand->setPtrOffset(val); } if (hand->getTempSpace().isUniqueSpace() && (hand->getTempOffset().getType() == ConstTpl::real)) { uintb val = insertCrossBuildRegion(hand->getTempOffset().getReal()); hand->setTempOffset(val); } } /// \brief Insert a region of zero bits for any Varnode in the \e unique space for all p-code in the given section /// /// \param tpl is the given p-code section void SleighCompile::shiftUniqueConstruct(ConstructTpl *tpl) { HandleTpl *result = tpl->getResult(); if (result != (HandleTpl *)0) shiftUniqueHandle(result); const vector &vec( tpl->getOpvec() ); for(int4 i=0;igetNumConstructors(); for(int4 j=0;jgetConstructor(j); ConstructTpl *tpl = ct->getTempl(); if (tpl != (ConstructTpl *)0) shiftUniqueConstruct(tpl); for(int4 k=0;kgetNamedTempl(k); if (namedtpl != (ConstructTpl *)0) shiftUniqueConstruct(namedtpl); } } i+=1; if (i>=tables.size()) break; sym = tables[i]; } uint4 ubase = getUniqueBase(); // We have to adjust the unique base ubase += 1 << UNIQUE_CROSSBUILD_POSITION; ubase <<= UNIQUE_CROSSBUILD_NUMBITS; setUniqueBase(ubase); } /// \brief Add a new \b with block to the current stack /// /// All subsequent Constructors adopt properties declared in the \b with header. /// \param ss the subtable to assign to each Constructor, or null /// \param pateq is an pattern equation constraining each Constructor, or null /// \param contvec is a context change applied to each Constructor, or null void SleighCompile::pushWith(SubtableSymbol *ss,PatternEquation *pateq,vector *contvec) { withstack.emplace_back(); withstack.back().set(ss,pateq,contvec); } /// \brief Pop the current \b with block from the stack void SleighCompile::popWith(void) { withstack.pop_back(); } /// \brief Finish building a given Constructor after all its pieces have been parsed /// /// The constraint pattern and context changes are modified by the current \b with block. /// The result along with any p-code sections are registered with the Constructor object. /// \param big is the given Constructor /// \param pateq is the parsed pattern equation /// \param contvec is the list of context changes or null /// \param vec is the collection of p-code sections, or null void SleighCompile::buildConstructor(Constructor *big,PatternEquation *pateq,vector *contvec,SectionVector *vec) { bool noerrors = true; if (vec != (SectionVector *)0) { // If the sections were implemented noerrors = finalizeSections(big,vec); if (noerrors) { // Attach the sections to the Constructor big->setMainSection(vec->getMainSection()); int4 max = vec->getMaxId(); for(int4 i=0;igetNamedSection(i); if (section != (ConstructTpl *)0) big->setNamedSection(section,i); } } delete vec; } if (noerrors) { pateq = WithBlock::collectAndPrependPattern(withstack, pateq); contvec = WithBlock::collectAndPrependContext(withstack, contvec); big->addEquation(pateq); big->removeTrailingSpace(); if (contvec != (vector *)0) { big->addContext(*contvec); delete contvec; } } symtab.popScope(); // In all cases pop scope } /// \brief Finish defining a macro given a set of p-code templates for its body /// /// Try to propagate sizes through the templates, expand any (sub)macros and make /// sure any label symbols are defined and used. /// \param sym is the macro being defined /// \param rtl is the set of p-code templates void SleighCompile::buildMacro(MacroSymbol *sym,ConstructTpl *rtl) { string errstring = checkSymbols(symtab.getCurrentScope()); if (errstring.size() != 0) { reportError(getCurrentLocation(), "In definition of macro '"+sym->getName() + "': " + errstring); return; } if (!expandMacros(rtl)) { reportError(getCurrentLocation(), "Could not expand submacro in definition of macro '" + sym->getName() + "'"); return; } PcodeCompile::propagateSize(rtl); // Propagate size information (as much as possible) sym->setConstruct(rtl); symtab.popScope(); // Pop local variables used to define macro macrotable.push_back(rtl); } /// \brief Record a NOP constructor at the current location /// /// The location is recorded and may be reported on after parsing. void SleighCompile::recordNop(void) { string msg = formatStatusMessage(getCurrentLocation(), "NOP detected"); noplist.push_back(msg); } /// \brief Run the full compilation process, given a path to the specification file /// /// The specification file is opened and a parse is started. Errors and warnings /// are printed to standard out, and if no fatal errors are encountered, the compiled /// form of the specification is written out. /// \param filein is the given path to the specification file to compile /// \param fileout is the path to output file /// \return an error code, where 0 indicates that a compiled file was successfully produced int4 SleighCompile::run_compilation(const string &filein,const string &fileout) { parseFromNewFile(filein); slgh = this; // Set global pointer up for parser sleighin = fopen(filein.c_str(),"r"); // Open the file for the lexer if (sleighin == (FILE *)0) { cerr << "Unable to open specfile: " << filein << endl; return 2; } try { int4 parseres = sleighparse(); // Try to parse fclose(sleighin); if (parseres==0) process(); // Do all the post-processing if ((parseres==0)&&(numErrors()==0)) { // If no errors ofstream s(fileout,ios::binary); if (!s) { ostringstream errs; errs << "Unable to open output file: " << fileout; throw SleighError(errs.str()); } if (debugoutput) { // If the debug output format was requested, use the XML encoder XmlEncode encoder(s); encode(encoder); } else { // Use the standard .sla format encoder sla::FormatEncode encoder(s,-1); encode(encoder); encoder.flush(); } s.close(); } else { cerr << "No output produced" <getRoot(); for(;;) { const List &list(el->getChildren()); List::const_iterator iter; for(iter=list.begin();iter!=list.end();++iter) { el = *iter; if (el->getName() == "processorfile") { specfileout = el->getContent(); int4 num = el->getNumAttributes(); for(int4 i=0;igetAttributeName(i)=="slaspec") specfilein = el->getAttributeValue(i); else { compiler.setPreprocValue(el->getAttributeName(i),el->getAttributeValue(i)); } } } else if (el->getName() == "language_spec") break; else if (el->getName() == "language_description") break; } if (iter==list.end()) break; } delete doc; if (specfilein.size() == 0) { cerr << "Input slaspec file was not specified in " << filein << endl; exit(1); } if (specfileout.size() == 0) { cerr << "Output sla file was not specified in " << filein << endl; exit(1); } return compiler.run_compilation(specfilein,specfileout); } static void findSlaSpecs(vector &res, const string &dir, const string &suffix) { FileManage::matchListDir(res, suffix, true, dir, false); vector dirs; FileManage::directoryList(dirs, dir); vector::const_iterator iter; for(iter = dirs.begin();iter!=dirs.end();++iter) { const string &nextdir( *iter ); findSlaSpecs(res, nextdir,suffix); } } /// \brief Set all compiler options at the same time /// /// \param defines is map of \e variable to \e value that is passed to the preprocessor /// \param unnecessaryPcodeWarning is \b true for individual warnings about unnecessary p-code ops /// \param lenientConflict is \b false to report indistinguishable patterns as errors /// \param allCollisionWarning is \b true for individual warnings about constructors with colliding operands /// \param allNopWarning is \b true for individual warnings about NOP constructors /// \param deadTempWarning is \b true for individual warnings about dead temporary varnodes /// \param enforceLocalKeyWord is \b true to force all local variable definitions to use the \b local keyword /// \param caseSensitiveRegisterNames is \b true if register names are allowed to be case sensitive /// \param debugOutput is \b true if the output file is written using the debug (XML) form of the .sla format void SleighCompile::setAllOptions(const map &defines, bool unnecessaryPcodeWarning, bool lenientConflict, bool allCollisionWarning, bool allNopWarning,bool deadTempWarning,bool enforceLocalKeyWord, bool caseSensitiveRegisterNames,bool debugOutput) { map::const_iterator iter = defines.begin(); for (iter = defines.begin(); iter != defines.end(); iter++) { setPreprocValue((*iter).first, (*iter).second); } setUnnecessaryPcodeWarning(unnecessaryPcodeWarning); setLenientConflict(lenientConflict); setLocalCollisionWarning( allCollisionWarning ); setAllNopWarning( allNopWarning ); setDeadTempWarning(deadTempWarning); setEnforceLocalKeyWord(enforceLocalKeyWord); setInsensitiveDuplicateError(!caseSensitiveRegisterNames); setDebugOutput(debugOutput); } static void segvHandler(int sig) { exit(1); // Just die - prevents OS from popping-up a dialog } } // End namespace ghidra int main(int argc,char **argv) { using namespace ghidra; int4 retval = 0; signal(SIGSEGV, &segvHandler); // Exit on SEGV errors #ifdef YYDEBUG sleighdebug = 0; #endif if (argc < 2) { cerr << "USAGE: sleigh [-x] [-dNAME=VALUE] inputfile [outputfile]" << endl; cerr << " -a scan for all slaspec files recursively where inputfile is a directory" << endl; cerr << " -x turns on parser debugging" << endl; cerr << " -y write .sla using XML debug format" << endl; cerr << " -u print warnings for unnecessary pcode instructions" << endl; cerr << " -l report pattern conflicts" << endl; cerr << " -n print warnings for all NOP constructors" << endl; cerr << " -t print warnings for dead temporaries" << endl; cerr << " -e enforce use of 'local' keyword for temporaries" << endl; cerr << " -c print warnings for all constructors with colliding operands" << endl; cerr << " -s treat register names as case sensitive" << endl; cerr << " -DNAME=VALUE defines a preprocessor macro NAME with value VALUE" << endl; exit(2); } const string SLAEXT(".sla"); // Default sla extension const string SLASPECEXT(".slaspec"); map defines; bool unnecessaryPcodeWarning = false; bool lenientConflict = true; bool allCollisionWarning = false; bool allNopWarning = false; bool deadTempWarning = false; bool enforceLocalKeyWord = false; bool caseSensitiveRegisterNames = false; bool debugOutput = false; bool compileAll = false; int4 i; for(i=1;i slaspecs; string dirStr = "."; if (i != argc) dirStr = argv[i]; findSlaSpecs(slaspecs, dirStr,SLASPECEXT); cout << "Compiling " << dec << slaspecs.size() << " slaspec files in " << dirStr << endl; for(int4 j=0;j "sleigh file.slaspec file.sla" string fileoutSTR = fileinPreExt; fileoutSTR.append(SLAEXT); retval = compiler.run_compilation(fileinExamine,fileoutSTR); }else{ retval = run_xml(fileinExamine,compiler); } } } return retval; } ================================================ FILE: pypcode/sleigh/slgh_compile.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file slgh_compile.hh /// \brief High-level control of the sleigh compilation process #ifndef __SLGH_COMPILE_HH__ #define __SLGH_COMPILE_HH__ #include "sleighbase.hh" #include "pcodecompile.hh" #include "filemanage.hh" #include #include #include namespace ghidra { using std::cout; using std::cerr; using std::out_of_range; using std::string; /// \brief A helper class to associate a \e named Constructor section with its symbol scope /// /// A Constructor can contain multiple named sections of p-code. There is a \e main /// section associated with the constructor, but other sections are possible and can /// be accessed through the \b crossbuild directive, which allows their operations to be /// incorporated into nearby instructions. During parsing of a SLEIGH file, \b this class /// associates a named section with its dedicated symbol scope. struct RtlPair { ConstructTpl *section; ///< A named p-code section SymbolScope *scope; ///< Symbol scope associated with the section RtlPair(void) { section = (ConstructTpl *)0; scope = (SymbolScope *)0; } ///< Construct on empty pair RtlPair(ConstructTpl *sec,SymbolScope *sc) { section = sec; scope = sc; } ///< Constructor }; /// \brief A collection of \e named p-code sections for a single Constructor /// /// A Constructor always has a \b main section of p-code (which may be empty). /// Alternately a Constructor may define additional \e named sections of p-code. /// Operations in these sections are emitted using the \b crossbuild directive and /// can be incorporated into following instructions. /// /// Internally different sections (RtlPair) are identified by index. A /// SectionSymbol holds the section's name and its corresponding index. class SectionVector { int4 nextindex; ///< Index of the section currently being parsed. RtlPair main; ///< The main section vector named; ///< Named sections accessed by index public: SectionVector(ConstructTpl *rtl,SymbolScope *scope); ///< Constructor ConstructTpl *getMainSection(void) const { return main.section; } ///< Get the \e main section ConstructTpl *getNamedSection(int4 index) const { return named[index].section; } ///< Get a \e named section by index RtlPair getMainPair(void) const { return main; } ///< Get the \e main section/namespace pair RtlPair getNamedPair(int4 i) const { return named[i]; } ///< Get a \e named section/namespace pair by index void setNextIndex(int4 i) { nextindex = i; } ///< Set the index of the currently parsing \e named section int4 getMaxId(void) const { return named.size(); } ///< Get the maximum (exclusive) named section index void append(ConstructTpl *rtl,SymbolScope *scope); ///< Add a new \e named section }; /// \brief Qualities associated (via parsing) with an address space /// /// An object of this class accumulates properties of an address space as they /// are parsed in the \b define statement prior to formally allocating the AddrSpace object. struct SpaceQuality { /// \brief The type of space being defined enum { ramtype, ///< An address space representing normal, indexed, memory registertype ///< An address space containing registers }; string name; ///< Name of the address space uint4 type; ///< Type of address space, \e ramtype or \e registertype uint4 size; ///< Number of bytes required to index all bytes of the space uint4 wordsize; ///< Number of bytes in an addressable unit of the space bool isdefault; ///< \b true if the new address space will be the default SpaceQuality(const string &nm); ///< Constructor }; /// \brief Qualities associated (via parsing) with a token or context \b field /// /// An object of this class accumulates properties of a field as they /// are parsed in of a \b define \b token block prior to formally allocating the /// TokenField or FieldContext object. struct FieldQuality { string name; ///< Name of the field uint4 low; ///< The least significant bit of the field within the token uint4 high; ///< The most significant bit of the field within the token bool signext; ///< \b true if the field's value is signed bool flow; ///< \b true if the context \b flows for this field. bool hex; ///< \b true if the field value is displayed in hex FieldQuality(string *nm,uintb *l,uintb *h); ///< Constructor }; /// \brief Subtable, pattern, and context information applied across a \b with block /// /// The header of a \b with block is applied to all constructors in the block. It /// attaches each constructor to a specific subtable. A pattern expression and/or a /// a series of context changes is attached to each constructor as well. class WithBlock { SubtableSymbol *ss; ///< Subtable containing each Constructor (or null for root table) PatternEquation *pateq; ///< Pattern to prepend to each Constructor (or null) vector contvec; ///< Context change to associate with each constructor (or null) public: WithBlock(void) { pateq = (PatternEquation *)0; } ///< Constructor void set(SubtableSymbol *s, PatternEquation *pq, vector *cvec); ///< Set components of the header ~WithBlock(void); ///< Destructor static PatternEquation *collectAndPrependPattern(const list &stack, PatternEquation *pateq); static vector *collectAndPrependContext(const list &stack, vector *contvec); static SubtableSymbol *getCurrentSubtable(const list &stack); }; class SleighCompile; /// \brief Derive Varnode sizes and optimize p-code in SLEIGH Constructors /// /// This class examines p-code parsed from a SLEIGH file and performs three main tasks: /// - Enforcing size rules in Constructor p-code, /// - Optimizing p-code within a Constructor, and /// - Searching for other p-code validity violations /// /// Many p-code operators require that their input and/or output operands are all the same size /// or have other specific size restrictions on their operands. This class enforces those requirements. /// /// This class performs limited optimization of p-code within a Constructor by performing COPY /// propagation through \e temporary registers. /// /// This class searches for unnecessary truncations and extensions, temporary varnodes that are either dead, /// read before written, or that exceed the standard allocation size. class ConsistencyChecker { public: /// \brief Description of how a temporary register is being used within a Constructor /// /// This counts reads and writes of the register. If the register is read only once, the /// particular p-code op and input slot reading it is recorded. If the register is written /// only once, the particular p-code op writing it is recorded. struct OptimizeRecord { uintb offset; ///< Offset of the varnode address int4 size; ///< Size in bytes of the varnode or piece (immutable) int4 writeop; ///< Index of the (last) p-code op writing to register (or -1) int4 readop; ///< Index of the (last) p-code op reading the register (or -1) int4 inslot; ///< Input slot of p-code op reading the register (or -1) int4 writecount; ///< Number of times the register is written int4 readcount; ///< Number of times the register is read int4 writesection; ///< Section containing (last) p-code op writing to the register (or -2) int4 readsection; ///< Section containing (last) p-code op reading the register (or -2) mutable int4 opttype; ///< 0 = register read by a COPY, 1 = register written by a COPY (-1 otherwise) /// \brief Construct a record, initializing counts OptimizeRecord(uintb offset, int4 size) { this->offset = offset; this->size = size; writeop = -1; readop = -1; inslot=-1; writecount=0; readcount=0; writesection=-2; readsection=-2; opttype=-1; } void copyFromExcludingSize(OptimizeRecord &that); void update(int4 opIdx, int4 slotIdx, int4 secNum); void updateRead(int4 i, int4 inslot, int4 secNum); void updateWrite(int4 i, int4 secNum); void updateExport(); void updateCombine(OptimizeRecord &that); }; private: class UniqueState { map recs; static uintb endOf(map::iterator &iter) { return iter->first + iter->second.size; } OptimizeRecord coalesce(vector &records); map::iterator lesserIter(uintb offset); public: void clear(void) { recs.clear(); } void set(uintb offset, int4 size, OptimizeRecord &rec); void getDefinitions(vector &result, uintb offset, int4 size); map::const_iterator begin(void) const { return recs.begin(); } map::const_iterator end(void) const { return recs.end(); } }; SleighCompile *compiler; ///< Parsed form of the SLEIGH file being examined int4 unnecessarypcode; ///< Count of unnecessary extension/truncation operations int4 readnowrite; ///< Count of temporary registers that are read but not written int4 writenoread; ///< Count of temporary registers that are written but not read bool printextwarning; ///< Set to \b true if warning emitted for each unnecessary truncation/extension bool printdeadwarning; ///< Set to \b true if warning emitted for each written but not read temporary SubtableSymbol *root_symbol; ///< The root symbol table for the parsed SLEIGH file vector postorder; ///< Subtables sorted into \e post order (dependent tables listed earlier) map sizemap; ///< Sizes associated with table \e exports OperandSymbol *getOperandSymbol(int4 slot,OpTpl *op,Constructor *ct); void printOpName(ostream &s,OpTpl *op); void printOpError(OpTpl *op,Constructor *ct,int4 err1,int4 err2,const string &message); int4 recoverSize(const ConstTpl &sizeconst,Constructor *ct); bool checkOpMisuse(OpTpl *op,Constructor *ct); bool sizeRestriction(OpTpl *op,Constructor *ct); bool checkConstructorSection(Constructor *ct,ConstructTpl *cttpl); bool hasLargeTemporary(OpTpl *op); bool isTemporaryAndTooBig(VarnodeTpl *vn); bool checkVarnodeTruncation(Constructor *ct,int4 slot,OpTpl *op,VarnodeTpl *vn,bool isbigendian); bool checkSectionTruncations(Constructor *ct,ConstructTpl *cttpl,bool isbigendian); bool checkSubtable(SubtableSymbol *sym); void dealWithUnnecessaryExt(OpTpl *op,Constructor *ct); void dealWithUnnecessaryTrunc(OpTpl *op,Constructor *ct); void setPostOrder(SubtableSymbol *root); // Optimization routines static void examineVn(UniqueState &state,const VarnodeTpl *vn,uint4 i,int4 inslot,int4 secnum); static bool possibleIntersection(const VarnodeTpl *vn1,const VarnodeTpl *vn2); bool readWriteInterference(const VarnodeTpl *vn,const OpTpl *op,bool checkread) const; void optimizeGather1(Constructor *ct,UniqueState &state,int4 secnum) const; void optimizeGather2(Constructor *ct,UniqueState &state,int4 secnum) const; const OptimizeRecord *findValidRule(Constructor *ct,const UniqueState &state) const; void applyOptimization(Constructor *ct,const OptimizeRecord &rec); void checkUnusedTemps(Constructor *ct,const UniqueState &state); void checkLargeTemporaries(Constructor *ct,ConstructTpl *ctpl); void optimize(Constructor *ct); public: ConsistencyChecker(SleighCompile *sleigh, SubtableSymbol *rt,bool unnecessary,bool warndead); bool testSizeRestrictions(void); ///< Test size consistency of all p-code bool testTruncations(void); ///< Test truncation validity of all p-code void testLargeTemporary(void); ///< Test for temporary Varnodes that are too large void optimizeAll(void); ///< Do COPY propagation optimization on all p-code int4 getNumUnnecessaryPcode(void) const { return unnecessarypcode; } ///< Return the number of unnecessary extensions and truncations int4 getNumReadNoWrite(void) const { return readnowrite; } ///< Return the number of temporaries read but not written int4 getNumWriteNoRead(void) const { return writenoread; } ///< Return the number of temporaries written but not read }; /// \brief Helper function holding properties of a \e context field prior to calculating the context layout /// /// This holds the concrete Varnode reprensenting the context field's physical storage and the /// properties of the field itself, prior to the final ContextField being allocated. struct FieldContext { VarnodeSymbol *sym; ///< The concrete Varnode representing physical storage for the field FieldQuality *qual; ///< Qualities of the field, as parsed bool operator<(const FieldContext &op2) const; ///< Sort context fields based on their least significant bit boundary FieldContext(VarnodeSymbol *s,FieldQuality *q) { sym=s; qual=q; } ///< Constructor }; /// \brief A class for expanding macro directives within a p-code section /// /// It is handed a (partial) list of p-code op templates (OpTpl). The /// macro directive is established with the setMacroOp() method. Then calling /// build() expands the macro into the list of OpTpls, providing parameter /// substitution. The class is derived from PcodeBuilder, where the dump() method, /// instead of emitting raw p-code, clones the macro templates into the list /// of OpTpls. class MacroBuilder : public PcodeBuilder { SleighCompile *slgh; ///< The SLEIGH parsing object bool haserror; ///< Set to \b true by the build() method if there was an error vector &outvec; ///< The partial list of op templates to expand the macro into vector params; ///< List of parameters to substitute into the macro bool transferOp(OpTpl *op,vector ¶ms); virtual void dump( OpTpl *op ); void free(void); ///< Free resources used by the builder void reportError(const Location* loc, const string &val); ///< Report error encountered expanding the macro public: MacroBuilder(SleighCompile *sl,vector &ovec,uint4 lbcnt) : PcodeBuilder(lbcnt),outvec(ovec) { slgh = sl; haserror = false; } ///< Constructor void setMacroOp(OpTpl *macroop); ///< Establish the MACRO directive to expand bool hasError(void) const { return haserror; } ///< Return \b true if there were errors during expansion virtual ~MacroBuilder(void) { free(); } virtual void appendBuild(OpTpl *bld,int4 secnum) { dump(bld); } virtual void delaySlot(OpTpl *op) { dump(op); } virtual void setLabel(OpTpl *op); virtual void appendCrossBuild(OpTpl *bld,int4 secnum) { dump(bld); } }; /// \brief Parsing for the semantic section of Constructors /// /// This is just the base p-code compiler for building OpTpl and VarnodeTpl. /// Symbols, locations, and error/warning messages are tied into to the main /// parser. class SleighPcode : public PcodeCompile { SleighCompile *compiler; ///< The main SLEIGH parser virtual uint4 allocateTemp(void); virtual const Location *getLocation(SleighSymbol *sym) const; virtual void reportError(const Location* loc, const string &msg); virtual void reportWarning(const Location* loc, const string &msg); virtual void addSymbol(SleighSymbol *sym); public: SleighPcode(void) : PcodeCompile() { compiler = (SleighCompile *)0; } ///< Constructor void setCompiler(SleighCompile *comp) { compiler = comp; } ///< Hook in the main parser }; /// \brief SLEIGH specification compiling /// /// Class for parsing SLEIGH specifications (.slaspec files) and producing the /// \e compiled form (.sla file), which can then be loaded by a SLEIGH disassembly /// and p-code generation engine. This full parser contains the p-code parser SleighPcode /// within it. The main entry point is run_compilation(), which takes the input and output /// file paths as parameters. Various options and preprocessor macros can be set using the /// various set*() methods prior to calling run_compilation. class SleighCompile : public SleighBase { friend class SleighPcode; static const int4 UNIQUE_CROSSBUILD_POSITION = 8; static const int4 UNIQUE_CROSSBUILD_NUMBITS = 8; public: SleighPcode pcode; ///< The p-code parsing (sub)engine private: map preproc_defines; ///< Defines for the preprocessor vector contexttable; ///< Context field definitions (prior to defining ContextField and ContextSymbol) vector macrotable; ///< SLEIGH macro definitions vector tokentable; ///< SLEIGH token definitions vector tables; ///< SLEIGH subtables vector sections; ///< Symbols defining Constructor sections list withstack; ///< Current stack of \b with blocks Constructor *curct; ///< Current Constructor being defined MacroSymbol *curmacro; ///< Current macro being defined bool contextlock; ///< If the context layout has been established yet vector relpath; ///< Relative path (to cwd) for each filename vector filename; ///< Stack of current files being parsed vector lineno; ///< Current line number for each file in stack map ctorLocationMap; ///< Map each Constructor to its defining parse location map symbolLocationMap; ///< Map each symbol to its defining parse location int4 userop_count; ///< Number of userops defined bool warnunnecessarypcode; ///< \b true if we warn of unnecessary ZEXT or SEXT bool warndeadtemps; ///< \b true if we warn of temporaries that are written but not read bool lenientconflicterrors; ///< \b true if we ignore most pattern conflict errors bool warnalllocalcollisions; ///< \b true if local export collisions generate individual warnings bool warnallnops; ///< \b true if pcode NOPs generate individual warnings bool failinsensitivedups; ///< \b true if case insensitive register duplicates cause error bool debugoutput; ///< \b true if output .sla is written in XML debug format vector noplist; ///< List of individual NOP warnings mutable Location currentLocCache; ///< Location for (last) request of current location int4 errors; ///< Number of fatal errors encountered const Location* getCurrentLocation(void) const; ///< Get the current file and line number being parsed void predefinedSymbols(void); ///< Get SLEIGHs predefined address spaces and symbols int4 calcContextVarLayout(int4 start,int4 sz,int4 numbits); void buildDecisionTrees(void); ///< Build decision trees for all subtables void buildPatterns(void); ///< Generate final match patterns based on parse constraint equations void checkConsistency(void); ///< Perform final consistency checks on the SLEIGH definitions static int4 findCollision(map &local2Operand,const vector &locals,int operand); bool checkLocalExports(Constructor *ct); ///< Check for operands that \e might export the same local variable void checkLocalCollisions(void); ///< Check all Constructors for local export collisions between operands void checkNops(void); ///< Report on all Constructors with empty semantic sections void checkCaseSensitivity(void); ///< Check that register names can be treated as case insensitive string checkSymbols(SymbolScope *scope); ///< Make sure label symbols are both defined and used void addSymbol(SleighSymbol *sym); ///< Add a new symbol to the current scope SleighSymbol *dedupSymbolList(vector *symlist); ///< Deduplicate the given list of symbols bool expandMacros(ConstructTpl *ctpl); ///< Expand any formal SLEIGH macros in the given section of p-code bool finalizeSections(Constructor *big,SectionVector *vec); ///< Do final checks, expansions, and linking for p-code sections static VarnodeTpl *findSize(const ConstTpl &offset,const ConstructTpl *ct); static bool forceExportSize(ConstructTpl *ct); static uintb insertCrossBuildRegion(uintb addr); static void shiftUniqueVn(VarnodeTpl *vn); static void shiftUniqueOp(OpTpl *op); static void shiftUniqueHandle(HandleTpl *hand); static void shiftUniqueConstruct(ConstructTpl *tpl); static string formatStatusMessage(const Location* loc, const string &msg); void checkUniqueAllocation(void); ///< Modify temporary Varnode offsets to support \b crossbuilds void process(void); ///< Do all post processing on the parsed data structures public: SleighCompile(void); ///< Constructor const Location *getLocation(Constructor* ctor) const; ///< Get the source location of the given Constructor's definition const Location *getLocation(SleighSymbol *sym) const; ///< Get the source location of the given symbol's definition void reportError(const string &msg); ///< Issue a fatal error message void reportError(const Location *loc, const string &msg); ///< Issue a fatal error message with a source location void reportWarning(const string &msg); ///< Issue a warning message void reportWarning(const Location *loc, const string &msg); ///< Issue a warning message with a source location int4 numErrors(void) const { return errors; } ///< Return the current number of fatal errors uint4 getUniqueAddr(void); ///< Get the next available temporary register offset /// \brief Set whether unnecessary truncation and extension operators generate warnings individually /// /// \param val is \b true if warnings are generated individually. The default is \b false. void setUnnecessaryPcodeWarning(bool val) { warnunnecessarypcode = val; } /// \brief Set whether dead temporary registers generate warnings individually /// /// \param val is \b true if warnings are generated individually. The default is \b false. void setDeadTempWarning(bool val) { warndeadtemps = val; } /// \brief Set whether named temporary registers must be defined using the \b local keyword. /// /// \param val is \b true if the \b local keyword must always be used. The default is \b false. void setEnforceLocalKeyWord(bool val) { pcode.setEnforceLocalKey(val); } /// \brief Set whether indistinguishable Constructor patterns generate fatal errors /// /// \param val is \b true if no error is generated. The default is \b true. void setLenientConflict(bool val) { lenientconflicterrors = val; } /// \brief Set whether collisions in exported locals generate warnings individually /// /// \param val is \b true if warnings are generated individually. The default is \b false. void setLocalCollisionWarning(bool val) { warnalllocalcollisions = val; } /// \brief Set whether NOP Constructors generate warnings individually /// /// \param val is \b true if warnings are generated individually. The default is \b false. void setAllNopWarning(bool val) { warnallnops = val; } /// \brief Set whether case insensitive duplicates of register names cause an error /// /// \param val is \b true is duplicates cause an error. void setInsensitiveDuplicateError(bool val) { failinsensitivedups = val; } /// \brief Set whether the output .sla file should be written in XML debug format /// /// \param val is \b true if the XML debug format should be used void setDebugOutput(bool val) { debugoutput = val; } // Lexer functions void calcContextLayout(void); ///< Calculate the internal context field layout string grabCurrentFilePath(void) const; ///< Get the path to the current source file void parseFromNewFile(const string &fname); ///< Push a new source file to the current parse stack void parsePreprocMacro(void); ///< Mark start of parsing for an expanded preprocessor macro void parseFileFinished(void); ///< Mark end of parsing for the current file or macro void nextLine(void) { lineno.back() += 1; } ///< Indicate parsing proceeded to the next line of the current file bool getPreprocValue(const string &nm,string &res) const; ///< Retrieve a given preprocessor variable void setPreprocValue(const string &nm,const string &value); ///< Set a given preprocessor variable bool undefinePreprocValue(const string &nm); ///< Remove the value associated with the given preprocessor variable // Parser functions TokenSymbol *defineToken(string *name,uintb *sz,int4 endian); void addTokenField(TokenSymbol *sym,FieldQuality *qual); bool addContextField(VarnodeSymbol *sym,FieldQuality *qual); void newSpace(SpaceQuality *qual); SectionSymbol *newSectionSymbol(const string &nm); void setEndian(int4 end); /// \brief Set instruction alignment for the SLEIGH specification /// /// \param val is the alignment value in bytes. 1 is the default indicating no alignment void setAlignment(int4 val) { alignment = val; } void defineVarnodes(SpaceSymbol *spacesym,uintb *off,uintb *size,vector *names); void defineBitrange(string *name,VarnodeSymbol *sym,uint4 bitoffset,uint4 numb); void addUserOp(vector *names); void attachValues(vector *symlist,vector *numlist); void attachNames(vector *symlist,vector *names); void attachVarnodes(vector *symlist,vector *varlist); SubtableSymbol *newTable(string *nm); void newOperand(Constructor *ct,string *nm); PatternEquation *constrainOperand(OperandSymbol *sym,PatternExpression *patexp); void defineOperand(OperandSymbol *sym,PatternExpression *patexp); PatternEquation *defineInvisibleOperand(TripleSymbol *sym); void selfDefine(OperandSymbol *sym); ConstructTpl *setResultVarnode(ConstructTpl *ct,VarnodeTpl *vn); ConstructTpl *setResultStarVarnode(ConstructTpl *ct,StarQuality *star,VarnodeTpl *vn); bool contextMod(vector *vec,ContextSymbol *sym,PatternExpression *pe); void contextSet(vector *vec,TripleSymbol *sym,ContextSymbol *cvar); MacroSymbol *createMacro(string *name,vector *param); void compareMacroParams(MacroSymbol *sym,const vector ¶m); vector *createMacroUse(MacroSymbol *sym,vector *param); SectionVector *standaloneSection(ConstructTpl *main); SectionVector *firstNamedSection(ConstructTpl *main,SectionSymbol *sym); SectionVector *nextNamedSection(SectionVector *vec,ConstructTpl *section,SectionSymbol *sym); SectionVector *finalNamedSection(SectionVector *vec,ConstructTpl *section); vector *createCrossBuild(VarnodeTpl *addr,SectionSymbol *sym); ConstructTpl *enterSection(void); Constructor *createConstructor(SubtableSymbol *sym); bool isInRoot(Constructor *ct) const { return (root == ct->getParent()); } ///< Is the Constructor in the root table? void resetConstructors(void); void pushWith(SubtableSymbol *ss,PatternEquation *pateq,vector *contvec); void popWith(void); void buildConstructor(Constructor *big,PatternEquation *pateq,vector *contvec,SectionVector *vec); void buildMacro(MacroSymbol *sym,ConstructTpl *rtl); void recordNop(void); // Virtual functions (not used by the compiler) virtual void initialize(DocumentStorage &store) {} virtual int4 instructionLength(const Address &baseaddr) const { return 0; } virtual int4 oneInstruction(PcodeEmit &emit,const Address &baseaddr) const { return 0; } virtual int4 printAssembly(AssemblyEmit &emit,const Address &baseaddr) const { return 0; } void setAllOptions(const map &defines, bool unnecessaryPcodeWarning, bool lenientConflict, bool allCollisionWarning, bool allNopWarning,bool deadTempWarning,bool enforceLocalKeyWord, bool caseSensitiveRegisterNames,bool debugOutput); int4 run_compilation(const string &filein,const string &fileout); }; ostream& operator<<(ostream &os, const ConsistencyChecker::OptimizeRecord &rec); extern SleighCompile *slgh; ///< A global reference to the SLEIGH compiler accessible to the parse functions extern int yydebug; ///< Debug state for the SLEIGH parse functions } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/slghparse.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* A Bison parser, made by GNU Bison 3.5.1. */ /* Bison implementation for Yacc-like parsers in C Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2020 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ /* As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice, so long as that work isn't itself a parser generator using the skeleton or a modified version thereof as a parser skeleton. Alternatively, if you modify or redistribute the parser skeleton itself, you may (at your option) remove this special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ /* C LALR(1) parser skeleton written by Richard Stallman, by simplifying the original so-called "semantic" parser. */ /* All symbols defined below should begin with yy or YY, to avoid infringing on user name space. This should be done even for local variables, as they might otherwise be expanded by user macros. There are some unavoidable exceptions within include files to define necessary library symbols; they are noted "INFRINGES ON USER NAME SPACE" below. */ /* Undocumented macros, especially those whose name start with YY_, are private implementation details. Do not rely on them. */ /* Identify Bison output. */ #define YYBISON 1 /* Bison version. */ #define YYBISON_VERSION "3.5.1" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" /* Pure parsers. */ #define YYPURE 0 /* Push parsers. */ #define YYPUSH 0 /* Pull parsers. */ #define YYPULL 1 /* Substitute the type names. */ #define YYSTYPE SLEIGHSTYPE /* Substitute the variable and function names. */ #define yyparse sleighparse #define yylex sleighlex #define yyerror sleigherror #define yydebug sleighdebug #define yynerrs sleighnerrs #define yylval sleighlval #define yychar sleighchar /* First part of user prologue. */ #include "slgh_compile.hh" extern FILE *sleighin; extern int sleighlex(void); namespace ghidra { extern SleighCompile *slgh; extern int4 actionon; extern int sleighdebug; extern int sleigherror(const char *str ); # ifndef YY_CAST # ifdef __cplusplus # define YY_CAST(Type, Val) static_cast (Val) # define YY_REINTERPRET_CAST(Type, Val) reinterpret_cast (Val) # else # define YY_CAST(Type, Val) ((Type) (Val)) # define YY_REINTERPRET_CAST(Type, Val) ((Type) (Val)) # endif # endif # ifndef YY_NULLPTR # if defined __cplusplus # if 201103L <= __cplusplus # define YY_NULLPTR nullptr # else # define YY_NULLPTR 0 # endif # else # define YY_NULLPTR ((void*)0) # endif # endif /* Enabling verbose error messages. */ #ifdef YYERROR_VERBOSE # undef YYERROR_VERBOSE # define YYERROR_VERBOSE 1 #else # define YYERROR_VERBOSE 0 #endif /* Use api.header.include to #include this header instead of duplicating it here. */ #ifndef YY_SLEIGH_SLGHPARSE_HH_INCLUDED # define YY_SLEIGH_SLGHPARSE_HH_INCLUDED /* Debug traces. */ #ifndef SLEIGHDEBUG # if defined YYDEBUG #if YYDEBUG # define SLEIGHDEBUG 1 # else # define SLEIGHDEBUG 0 # endif # else /* ! defined YYDEBUG */ # define SLEIGHDEBUG 0 # endif /* ! defined YYDEBUG */ #endif /* ! defined SLEIGHDEBUG */ #if SLEIGHDEBUG extern int sleighdebug; #endif /* Token type. */ #ifndef SLEIGHTOKENTYPE # define SLEIGHTOKENTYPE enum sleightokentype { OP_BOOL_OR = 258, OP_BOOL_AND = 259, OP_BOOL_XOR = 260, OP_OR = 261, OP_XOR = 262, OP_AND = 263, OP_EQUAL = 264, OP_NOTEQUAL = 265, OP_FEQUAL = 266, OP_FNOTEQUAL = 267, OP_GREATEQUAL = 268, OP_LESSEQUAL = 269, OP_SLESS = 270, OP_SGREATEQUAL = 271, OP_SLESSEQUAL = 272, OP_SGREAT = 273, OP_FLESS = 274, OP_FGREAT = 275, OP_FLESSEQUAL = 276, OP_FGREATEQUAL = 277, OP_LEFT = 278, OP_RIGHT = 279, OP_SRIGHT = 280, OP_FADD = 281, OP_FSUB = 282, OP_SDIV = 283, OP_SREM = 284, OP_FMULT = 285, OP_FDIV = 286, OP_ZEXT = 287, OP_CARRY = 288, OP_BORROW = 289, OP_SEXT = 290, OP_SCARRY = 291, OP_SBORROW = 292, OP_NAN = 293, OP_ABS = 294, OP_SQRT = 295, OP_CEIL = 296, OP_FLOOR = 297, OP_ROUND = 298, OP_INT2FLOAT = 299, OP_FLOAT2FLOAT = 300, OP_TRUNC = 301, OP_CPOOLREF = 302, OP_NEW = 303, OP_POPCOUNT = 304, OP_LZCOUNT = 305, BADINTEGER = 306, GOTO_KEY = 307, CALL_KEY = 308, RETURN_KEY = 309, IF_KEY = 310, DEFINE_KEY = 311, ATTACH_KEY = 312, MACRO_KEY = 313, SPACE_KEY = 314, TYPE_KEY = 315, RAM_KEY = 316, DEFAULT_KEY = 317, REGISTER_KEY = 318, ENDIAN_KEY = 319, WITH_KEY = 320, ALIGN_KEY = 321, OP_UNIMPL = 322, TOKEN_KEY = 323, SIGNED_KEY = 324, NOFLOW_KEY = 325, HEX_KEY = 326, DEC_KEY = 327, BIG_KEY = 328, LITTLE_KEY = 329, SIZE_KEY = 330, WORDSIZE_KEY = 331, OFFSET_KEY = 332, NAMES_KEY = 333, VALUES_KEY = 334, VARIABLES_KEY = 335, PCODEOP_KEY = 336, IS_KEY = 337, LOCAL_KEY = 338, DELAYSLOT_KEY = 339, CROSSBUILD_KEY = 340, EXPORT_KEY = 341, BUILD_KEY = 342, CONTEXT_KEY = 343, ELLIPSIS_KEY = 344, GLOBALSET_KEY = 345, BITRANGE_KEY = 346, CHAR = 347, INTEGER = 348, INTB = 349, STRING = 350, SYMBOLSTRING = 351, SPACESYM = 352, SECTIONSYM = 353, TOKENSYM = 354, USEROPSYM = 355, VALUESYM = 356, VALUEMAPSYM = 357, CONTEXTSYM = 358, NAMESYM = 359, VARSYM = 360, BITSYM = 361, SPECSYM = 362, VARLISTSYM = 363, OPERANDSYM = 364, JUMPSYM = 365, MACROSYM = 366, LABELSYM = 367, SUBTABLESYM = 368 }; #endif /* Value type. */ #if ! defined SLEIGHSTYPE && ! defined SLEIGHSTYPE_IS_DECLARED union SLEIGHSTYPE { char ch; uintb *i; intb *big; string *str; vector *strlist; vector *biglist; vector *param; SpaceQuality *spacequal; FieldQuality *fieldqual; StarQuality *starqual; VarnodeTpl *varnode; ExprTree *tree; vector *stmt; ConstructTpl *sem; SectionVector *sectionstart; Constructor *construct; PatternEquation *pateq; PatternExpression *patexp; vector *symlist; vector *contop; SleighSymbol *anysym; SpaceSymbol *spacesym; SectionSymbol *sectionsym; TokenSymbol *tokensym; UserOpSymbol *useropsym; MacroSymbol *macrosym; LabelSymbol *labelsym; SubtableSymbol *subtablesym; OperandSymbol *operandsym; VarnodeListSymbol *varlistsym; VarnodeSymbol *varsym; BitrangeSymbol *bitsym; NameSymbol *namesym; ValueSymbol *valuesym; ValueMapSymbol *valuemapsym; ContextSymbol *contextsym; FamilySymbol *famsym; SpecificSymbol *specsym; }; typedef union SLEIGHSTYPE SLEIGHSTYPE; # define SLEIGHSTYPE_IS_TRIVIAL 1 # define SLEIGHSTYPE_IS_DECLARED 1 #endif extern SLEIGHSTYPE sleighlval; int sleighparse (void); #endif /* !YY_SLEIGH_SLGHPARSE_HH_INCLUDED */ #ifdef short # undef short #endif /* On compilers that do not define __PTRDIFF_MAX__ etc., make sure and (if available) are included so that the code can choose integer types of a good width. */ #ifndef __PTRDIFF_MAX__ # include /* INFRINGES ON USER NAME SPACE */ # if defined __STDC_VERSION__ && 199901 <= __STDC_VERSION__ # include /* INFRINGES ON USER NAME SPACE */ # define YY_STDINT_H # endif #endif /* Narrow types that promote to a signed type and that can represent a signed or unsigned integer of at least N bits. In tables they can save space and decrease cache pressure. Promoting to a signed type helps avoid bugs in integer arithmetic. */ #ifdef __INT_LEAST8_MAX__ typedef __INT_LEAST8_TYPE__ yytype_int8; #elif defined YY_STDINT_H typedef int_least8_t yytype_int8; #else typedef signed char yytype_int8; #endif #ifdef __INT_LEAST16_MAX__ typedef __INT_LEAST16_TYPE__ yytype_int16; #elif defined YY_STDINT_H typedef int_least16_t yytype_int16; #else typedef short yytype_int16; #endif #if defined __UINT_LEAST8_MAX__ && __UINT_LEAST8_MAX__ <= __INT_MAX__ typedef __UINT_LEAST8_TYPE__ yytype_uint8; #elif (!defined __UINT_LEAST8_MAX__ && defined YY_STDINT_H \ && UINT_LEAST8_MAX <= INT_MAX) typedef uint_least8_t yytype_uint8; #elif !defined __UINT_LEAST8_MAX__ && UCHAR_MAX <= INT_MAX typedef unsigned char yytype_uint8; #else typedef short yytype_uint8; #endif #if defined __UINT_LEAST16_MAX__ && __UINT_LEAST16_MAX__ <= __INT_MAX__ typedef __UINT_LEAST16_TYPE__ yytype_uint16; #elif (!defined __UINT_LEAST16_MAX__ && defined YY_STDINT_H \ && UINT_LEAST16_MAX <= INT_MAX) typedef uint_least16_t yytype_uint16; #elif !defined __UINT_LEAST16_MAX__ && USHRT_MAX <= INT_MAX typedef unsigned short yytype_uint16; #else typedef int yytype_uint16; #endif #ifndef YYPTRDIFF_T # if defined __PTRDIFF_TYPE__ && defined __PTRDIFF_MAX__ # define YYPTRDIFF_T __PTRDIFF_TYPE__ # define YYPTRDIFF_MAXIMUM __PTRDIFF_MAX__ # elif defined PTRDIFF_MAX # ifndef ptrdiff_t # include /* INFRINGES ON USER NAME SPACE */ # endif # define YYPTRDIFF_T ptrdiff_t # define YYPTRDIFF_MAXIMUM PTRDIFF_MAX # else # define YYPTRDIFF_T long # define YYPTRDIFF_MAXIMUM LONG_MAX # endif #endif #ifndef YYSIZE_T # ifdef __SIZE_TYPE__ # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t # elif defined __STDC_VERSION__ && 199901 <= __STDC_VERSION__ # include /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else # define YYSIZE_T unsigned # endif #endif #define YYSIZE_MAXIMUM \ YY_CAST (YYPTRDIFF_T, \ (YYPTRDIFF_MAXIMUM < YY_CAST (YYSIZE_T, -1) \ ? YYPTRDIFF_MAXIMUM \ : YY_CAST (YYSIZE_T, -1))) #define YYSIZEOF(X) YY_CAST (YYPTRDIFF_T, sizeof (X)) /* Stored state numbers (used for stacks). */ typedef yytype_int16 yy_state_t; /* State numbers in computations. */ typedef int yy_state_fast_t; #ifndef YY_ # if defined YYENABLE_NLS && YYENABLE_NLS # if ENABLE_NLS # include /* INFRINGES ON USER NAME SPACE */ # define YY_(Msgid) dgettext ("bison-runtime", Msgid) # endif # endif # ifndef YY_ # define YY_(Msgid) Msgid # endif #endif #ifndef YY_ATTRIBUTE_PURE # if defined __GNUC__ && 2 < __GNUC__ + (96 <= __GNUC_MINOR__) # define YY_ATTRIBUTE_PURE __attribute__ ((__pure__)) # else # define YY_ATTRIBUTE_PURE # endif #endif #ifndef YY_ATTRIBUTE_UNUSED # if defined __GNUC__ && 2 < __GNUC__ + (7 <= __GNUC_MINOR__) # define YY_ATTRIBUTE_UNUSED __attribute__ ((__unused__)) # else # define YY_ATTRIBUTE_UNUSED # endif #endif /* Suppress unused-variable warnings by "using" E. */ #if ! defined lint || defined __GNUC__ # define YYUSE(E) ((void) (E)) #else # define YYUSE(E) /* empty */ #endif #if defined __GNUC__ && ! defined __ICC && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ /* Suppress an incorrect diagnostic about yylval being uninitialized. */ # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"") \ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") # define YY_IGNORE_MAYBE_UNINITIALIZED_END \ _Pragma ("GCC diagnostic pop") #else # define YY_INITIAL_VALUE(Value) Value #endif #ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_END #endif #ifndef YY_INITIAL_VALUE # define YY_INITIAL_VALUE(Value) /* Nothing. */ #endif #if defined __cplusplus && defined __GNUC__ && ! defined __ICC && 6 <= __GNUC__ # define YY_IGNORE_USELESS_CAST_BEGIN \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wuseless-cast\"") # define YY_IGNORE_USELESS_CAST_END \ _Pragma ("GCC diagnostic pop") #endif #ifndef YY_IGNORE_USELESS_CAST_BEGIN # define YY_IGNORE_USELESS_CAST_BEGIN # define YY_IGNORE_USELESS_CAST_END #endif #define YY_ASSERT(E) ((void) (0 && (E))) #if ! defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ # ifdef YYSTACK_USE_ALLOCA # if YYSTACK_USE_ALLOCA # ifdef __GNUC__ # define YYSTACK_ALLOC __builtin_alloca # elif defined __BUILTIN_VA_ARG_INCR # include /* INFRINGES ON USER NAME SPACE */ # elif defined _AIX # define YYSTACK_ALLOC __alloca # elif defined _MSC_VER # include /* INFRINGES ON USER NAME SPACE */ # define alloca _alloca # else # define YYSTACK_ALLOC alloca # if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS # include /* INFRINGES ON USER NAME SPACE */ /* Use EXIT_SUCCESS as a witness for stdlib.h. */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # endif # endif # endif # ifdef YYSTACK_ALLOC /* Pacify GCC's 'empty if-body' warning. */ # define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) # ifndef YYSTACK_ALLOC_MAXIMUM /* The OS might guarantee only one guard page at the bottom of the stack, and a page size can be as small as 4096 bytes. So we cannot safely invoke alloca (N) if N exceeds 4096. Use a slightly smaller number to allow for a few compiler-allocated temporary stack slots. */ # define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ # endif # else # define YYSTACK_ALLOC YYMALLOC # define YYSTACK_FREE YYFREE # ifndef YYSTACK_ALLOC_MAXIMUM # define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM # endif # if (defined __cplusplus && ! defined EXIT_SUCCESS \ && ! ((defined YYMALLOC || defined malloc) \ && (defined YYFREE || defined free))) # include /* INFRINGES ON USER NAME SPACE */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # ifndef YYMALLOC # define YYMALLOC malloc # if ! defined malloc && ! defined EXIT_SUCCESS void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free # if ! defined free && ! defined EXIT_SUCCESS void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif # endif #endif /* ! defined yyoverflow || YYERROR_VERBOSE */ #if (! defined yyoverflow \ && (! defined __cplusplus \ || (defined SLEIGHSTYPE_IS_TRIVIAL && SLEIGHSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ union yyalloc { yy_state_t yyss_alloc; YYSTYPE yyvs_alloc; }; /* The size of the maximum gap between one aligned stack and the next. */ # define YYSTACK_GAP_MAXIMUM (YYSIZEOF (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with N elements. */ # define YYSTACK_BYTES(N) \ ((N) * (YYSIZEOF (yy_state_t) + YYSIZEOF (YYSTYPE)) \ + YYSTACK_GAP_MAXIMUM) # define YYCOPY_NEEDED 1 /* Relocate STACK from its old location to the new one. The local variables YYSIZE and YYSTACKSIZE give the old and new number of elements in the stack, and YYPTR gives the new location of the stack. Advance YYPTR to a properly aligned location for the next stack. */ # define YYSTACK_RELOCATE(Stack_alloc, Stack) \ do \ { \ YYPTRDIFF_T yynewbytes; \ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ Stack = &yyptr->Stack_alloc; \ yynewbytes = yystacksize * YYSIZEOF (*Stack) + YYSTACK_GAP_MAXIMUM; \ yyptr += yynewbytes / YYSIZEOF (*yyptr); \ } \ while (0) #endif #if defined YYCOPY_NEEDED && YYCOPY_NEEDED /* Copy COUNT objects from SRC to DST. The source and destination do not overlap. */ # ifndef YYCOPY # if defined __GNUC__ && 1 < __GNUC__ # define YYCOPY(Dst, Src, Count) \ __builtin_memcpy (Dst, Src, YY_CAST (YYSIZE_T, (Count)) * sizeof (*(Src))) # else # define YYCOPY(Dst, Src, Count) \ do \ { \ YYPTRDIFF_T yyi; \ for (yyi = 0; yyi < (Count); yyi++) \ (Dst)[yyi] = (Src)[yyi]; \ } \ while (0) # endif # endif #endif /* !YYCOPY_NEEDED */ /* YYFINAL -- State number of the termination state. */ #define YYFINAL 5 /* YYLAST -- Last index in YYTABLE. */ #define YYLAST 2629 /* YYNTOKENS -- Number of terminals. */ #define YYNTOKENS 137 /* YYNNTS -- Number of nonterminals. */ #define YYNNTS 71 /* YYNRULES -- Number of rules. */ #define YYNRULES 336 /* YYNSTATES -- Number of states. */ #define YYNSTATES 714 #define YYUNDEFTOK 2 #define YYMAXUTOK 368 /* YYTRANSLATE(TOKEN-NUM) -- Symbol number corresponding to TOKEN-NUM as returned by yylex, with out-of-bounds checking. */ #define YYTRANSLATE(YYX) \ (0 <= (YYX) && (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) /* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM as returned by yylex. */ static const yytype_uint8 yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 136, 43, 2, 2, 2, 38, 11, 2, 129, 130, 36, 32, 131, 33, 2, 37, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 135, 8, 17, 128, 18, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 132, 2, 133, 9, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 134, 6, 127, 44, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 7, 10, 12, 13, 14, 15, 16, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 34, 35, 39, 40, 41, 42, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126 }; #if SLEIGHDEBUG /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ static const yytype_int16 yyrline[] = { 0, 155, 155, 156, 157, 158, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 171, 172, 173, 174, 176, 177, 179, 181, 183, 184, 185, 186, 187, 189, 191, 192, 195, 196, 197, 198, 199, 201, 202, 203, 204, 205, 206, 208, 210, 211, 212, 213, 214, 215, 216, 218, 220, 222, 224, 225, 227, 230, 232, 234, 236, 238, 241, 243, 244, 245, 247, 249, 250, 251, 254, 255, 258, 260, 261, 262, 264, 265, 267, 268, 269, 270, 271, 272, 273, 274, 275, 277, 278, 279, 280, 282, 284, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 301, 302, 303, 304, 306, 307, 309, 310, 312, 313, 315, 316, 317, 318, 319, 320, 321, 324, 325, 326, 327, 329, 330, 332, 333, 334, 335, 336, 337, 339, 340, 342, 344, 345, 347, 348, 349, 350, 351, 353, 354, 355, 356, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 452, 453, 454, 455, 457, 458, 459, 460, 461, 462, 463, 465, 466, 467, 468, 470, 471, 472, 473, 474, 476, 477, 478, 480, 481, 483, 484, 485, 486, 487, 488, 490, 491, 492, 493, 494, 496, 497, 498, 499, 501, 502, 504, 505, 506, 508, 509, 510, 512, 513, 514, 517, 518, 520, 521, 522, 524, 526, 527, 528, 529, 531, 532, 533, 535, 536, 537, 538, 539, 541, 542, 544, 545, 547, 548, 551, 552, 553, 555, 556, 557, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573 }; #endif #if SLEIGHDEBUG || YYERROR_VERBOSE || 0 /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. First, the terminals, then, starting at YYNTOKENS, nonterminals. */ static const char *const yytname[] = { "$end", "error", "$undefined", "OP_BOOL_OR", "OP_BOOL_AND", "OP_BOOL_XOR", "'|'", "OP_OR", "';'", "'^'", "OP_XOR", "'&'", "OP_AND", "OP_EQUAL", "OP_NOTEQUAL", "OP_FEQUAL", "OP_FNOTEQUAL", "'<'", "'>'", "OP_GREATEQUAL", "OP_LESSEQUAL", "OP_SLESS", "OP_SGREATEQUAL", "OP_SLESSEQUAL", "OP_SGREAT", "OP_FLESS", "OP_FGREAT", "OP_FLESSEQUAL", "OP_FGREATEQUAL", "OP_LEFT", "OP_RIGHT", "OP_SRIGHT", "'+'", "'-'", "OP_FADD", "OP_FSUB", "'*'", "'/'", "'%'", "OP_SDIV", "OP_SREM", "OP_FMULT", "OP_FDIV", "'!'", "'~'", "OP_ZEXT", "OP_CARRY", "OP_BORROW", "OP_SEXT", "OP_SCARRY", "OP_SBORROW", "OP_NAN", "OP_ABS", "OP_SQRT", "OP_CEIL", "OP_FLOOR", "OP_ROUND", "OP_INT2FLOAT", "OP_FLOAT2FLOAT", "OP_TRUNC", "OP_CPOOLREF", "OP_NEW", "OP_POPCOUNT", "OP_LZCOUNT", "BADINTEGER", "GOTO_KEY", "CALL_KEY", "RETURN_KEY", "IF_KEY", "DEFINE_KEY", "ATTACH_KEY", "MACRO_KEY", "SPACE_KEY", "TYPE_KEY", "RAM_KEY", "DEFAULT_KEY", "REGISTER_KEY", "ENDIAN_KEY", "WITH_KEY", "ALIGN_KEY", "OP_UNIMPL", "TOKEN_KEY", "SIGNED_KEY", "NOFLOW_KEY", "HEX_KEY", "DEC_KEY", "BIG_KEY", "LITTLE_KEY", "SIZE_KEY", "WORDSIZE_KEY", "OFFSET_KEY", "NAMES_KEY", "VALUES_KEY", "VARIABLES_KEY", "PCODEOP_KEY", "IS_KEY", "LOCAL_KEY", "DELAYSLOT_KEY", "CROSSBUILD_KEY", "EXPORT_KEY", "BUILD_KEY", "CONTEXT_KEY", "ELLIPSIS_KEY", "GLOBALSET_KEY", "BITRANGE_KEY", "CHAR", "INTEGER", "INTB", "STRING", "SYMBOLSTRING", "SPACESYM", "SECTIONSYM", "TOKENSYM", "USEROPSYM", "VALUESYM", "VALUEMAPSYM", "CONTEXTSYM", "NAMESYM", "VARSYM", "BITSYM", "SPECSYM", "VARLISTSYM", "OPERANDSYM", "JUMPSYM", "MACROSYM", "LABELSYM", "SUBTABLESYM", "'}'", "'='", "'('", "')'", "','", "'['", "']'", "'{'", "':'", "' '", "$accept", "spec", "definition", "constructorlike", "endiandef", "aligndef", "tokendef", "tokenprop", "contextdef", "contextprop", "fielddef", "contextfielddef", "spacedef", "spaceprop", "varnodedef", "bitrangedef", "bitrangelist", "bitrangesingle", "pcodeopdef", "valueattach", "nameattach", "varattach", "macrodef", "withblockstart", "withblockmid", "withblock", "id_or_nil", "bitpat_or_nil", "macrostart", "rtlbody", "constructor", "constructprint", "subtablestart", "pexpression", "pequation", "elleq", "ellrt", "atomic", "constraint", "contextblock", "contextlist", "section_def", "rtlfirstsection", "rtlcontinue", "rtl", "rtlmid", "statement", "expr", "sizedstar", "jumpdest", "varnode", "integervarnode", "lhsvarnode", "label", "exportvarnode", "familysymbol", "specificsymbol", "charstring", "intblist", "intbpart", "stringlist", "stringpart", "anystringlist", "anystringpart", "valuelist", "valuepart", "varlist", "varpart", "paramlist", "oplist", "anysymbol", YY_NULLPTR }; #endif # ifdef YYPRINT /* YYTOKNUM[NUM] -- (External) token number corresponding to the (internal) symbol number NUM (which must be that of a token). */ static const yytype_int16 yytoknum[] = { 0, 256, 257, 258, 259, 260, 124, 261, 59, 94, 262, 38, 263, 264, 265, 266, 267, 60, 62, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 43, 45, 281, 282, 42, 47, 37, 283, 284, 285, 286, 33, 126, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 125, 61, 40, 41, 44, 91, 93, 123, 58, 32 }; # endif #define YYPACT_NINF (-293) #define yypact_value_is_default(Yyn) \ ((Yyn) == YYPACT_NINF) #define YYTABLE_NINF (-271) #define yytable_value_is_error(Yyn) \ ((Yyn) == YYTABLE_NINF) /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing STATE-NUM. */ static const yytype_int16 yypact[] = { 35, 12, 37, -293, -15, -293, 20, 1667, 303, 61, -72, -13, 41, -293, -293, -293, -293, -293, 430, -293, 1591, -293, 89, -293, -293, -293, -293, -293, -293, -293, -293, 40, -293, 47, -293, 24, 180, 84, -293, -293, 2467, 99, 2486, -27, 160, 191, 211, -41, -41, -41, 206, -293, -293, 234, -293, -293, -293, 244, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, 346, 247, -293, 249, 320, 251, -293, 253, -293, 255, 261, -33, -293, -293, -293, -293, -293, 78, -293, -293, -293, -293, 286, -293, 78, -293, -293, -293, 286, 390, 392, -293, -293, 305, 290, -293, -293, 313, 415, -293, 301, 6, -293, 307, -293, -293, 36, 323, -16, -92, 344, 78, 327, -293, -293, -293, 328, 330, -293, -293, -293, -293, 331, 83, 355, 356, 337, 1721, 1522, -293, -293, -293, -293, -293, -293, 339, -293, 78, 5, -293, -293, 368, -293, 45, -293, 5, -293, -293, 457, 362, -293, 2419, -293, 354, -293, -293, -54, -293, -293, 186, 2503, 466, 370, -293, -24, 470, -293, -87, 474, -293, 60, 352, 365, 381, 388, 393, 397, -293, -293, -293, -293, -293, 262, -22, -103, -293, 369, 389, 10, 1571, 406, 367, 314, 382, 384, 372, 33, 387, -293, 385, -293, -293, -293, 391, 94, -293, 1571, -8, -293, 149, -293, 151, -293, 1543, 16, 78, 78, 78, -293, -60, -293, 1543, 1543, 1543, 1543, 1543, 1543, -60, -293, 400, -293, -293, -293, 386, -293, 431, -293, -293, -293, -293, -293, 2443, -293, -293, -293, 416, -293, -293, -21, -293, -293, -293, -39, -293, -293, 419, 399, 403, 404, 405, 424, -293, -293, 417, -293, -293, 519, 532, 447, 452, -293, 427, -293, -293, -293, 1571, 552, -293, 1571, 553, -293, 1571, 1571, 1571, 1571, 1571, 433, 442, 443, 445, 482, 483, 485, 487, 522, 523, 525, 527, 558, 563, 566, 603, 606, 639, 640, -293, 1571, 1845, 1571, -293, 139, -4, 448, 587, 602, 363, 642, 771, -293, 164, 802, -293, 807, 712, 1571, 714, 1571, 1571, 1571, 1528, 749, 752, 1571, 754, 1543, 1543, -293, 1543, 2405, -293, -293, -293, 85, 884, -293, -50, -293, -293, -293, 2405, 2405, 2405, 2405, 2405, 2405, -293, 819, 794, 812, -293, -293, -293, -293, 829, -293, -293, -293, -293, -293, -293, -293, -293, 830, 869, 870, 874, 314, -293, -293, 882, -293, 906, 325, -293, 564, -293, 604, -293, -293, -293, -293, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 808, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 1571, 409, -293, 14, 914, 949, -293, 1571, 950, -293, 930, 212, 989, -293, 990, 1092, -293, 1127, -293, -293, -293, -293, 1898, 1012, 2218, 66, 1938, 136, 1571, 1006, 1047, 1978, 1045, -293, -293, 380, 1543, 1543, 1543, 1543, 1543, 1543, 1543, 1543, 1543, 1051, -293, 1087, 1132, -293, -293, -293, -10, 1167, 1085, 1114, -293, 1125, 1126, 1166, 1170, -293, 1200, 1203, 1332, 1367, 1372, 848, 685, 888, 725, 767, 928, 968, 1008, 1048, 1088, 1128, 1168, 1208, 1248, 162, 644, 1288, 1328, 182, -293, 2257, 2294, 2294, 2328, 2360, 2430, 1773, 1773, 1773, 1773, 2484, 2484, 2484, 2484, 2484, 2484, 2484, 2484, 2484, 2484, 2484, 2484, 1856, 1856, 1856, 2382, 2382, 2382, 2382, -293, -293, -293, -293, -293, -293, -293, 1407, 1246, 1285, -293, 2018, 0, 1412, 1447, 1452, 314, -293, -293, -293, 1571, 1487, 1571, -293, 1492, 2058, -293, -293, -293, 1350, -293, 2463, 285, 1556, 169, 169, 296, 296, -293, -293, 1613, 1543, 1543, 1656, 216, -293, -293, 321, 1390, -27, -293, -293, -293, -293, 1429, -293, -293, -293, -293, -293, 1571, -293, 1571, 1571, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, 1571, -293, -293, -293, -293, -293, 1430, -293, -293, 1571, -293, -293, -293, -293, 2098, -293, 2218, -293, -293, 1438, 1409, 1443, 1565, 2396, -293, -293, 1549, 1550, -293, -293, 1450, 1573, -293, 1368, 1408, 1448, 1488, 1451, 2138, -293, 1462, 1475, 1480, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, 1571, 1470, 1473, 2178, 1597, 1600, -293, -293, -293 }; /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. Performed when YYTABLE does not specify something else to do. Zero means the default is an error. */ static const yytype_int16 yydefact[] = { 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 67, 0, 0, 89, 4, 5, 3, 6, 0, 7, 0, 8, 0, 9, 10, 11, 12, 13, 14, 17, 63, 0, 18, 0, 16, 0, 0, 0, 15, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 69, 68, 0, 88, 87, 23, 0, 322, 323, 324, 325, 328, 329, 330, 331, 332, 336, 333, 334, 335, 326, 327, 27, 0, 29, 0, 31, 0, 43, 0, 50, 0, 0, 0, 66, 64, 65, 145, 82, 0, 281, 83, 86, 85, 84, 81, 0, 78, 80, 90, 79, 0, 0, 44, 45, 0, 0, 28, 293, 0, 0, 30, 0, 0, 54, 0, 303, 304, 0, 0, 0, 0, 319, 70, 0, 34, 35, 36, 0, 0, 39, 40, 41, 42, 0, 0, 0, 0, 0, 140, 0, 272, 273, 274, 275, 124, 276, 123, 126, 0, 127, 106, 111, 113, 114, 125, 282, 127, 20, 21, 0, 0, 294, 0, 57, 0, 53, 55, 0, 305, 306, 0, 0, 0, 0, 284, 0, 0, 311, 0, 0, 320, 0, 127, 71, 0, 0, 0, 0, 46, 47, 48, 49, 61, 0, 0, 244, 257, 0, 0, 0, 0, 0, 0, 0, 0, 0, 256, 254, 0, 277, 0, 278, 279, 280, 0, 255, 146, 0, 0, 253, 0, 173, 252, 110, 0, 0, 0, 0, 0, 129, 0, 112, 0, 0, 0, 0, 0, 0, 0, 22, 0, 295, 292, 296, 0, 52, 0, 309, 307, 308, 302, 298, 0, 299, 59, 285, 0, 286, 288, 0, 58, 313, 312, 0, 60, 72, 0, 0, 0, 0, 0, 0, 254, 255, 0, 259, 252, 0, 0, 0, 0, 247, 246, 251, 248, 245, 0, 0, 250, 0, 0, 170, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 238, 0, 0, 0, 174, 252, 0, 0, 0, 0, 0, 0, 143, 271, 0, 0, 266, 0, 0, 0, 0, 316, 0, 316, 0, 0, 0, 0, 0, 0, 0, 91, 0, 122, 92, 93, 115, 108, 109, 107, 0, 75, 145, 76, 117, 118, 120, 121, 119, 116, 77, 24, 0, 0, 300, 297, 301, 287, 0, 289, 291, 283, 315, 314, 310, 321, 62, 0, 0, 0, 0, 0, 265, 264, 0, 243, 0, 0, 165, 0, 168, 0, 189, 216, 202, 190, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 316, 0, 0, 0, 316, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 175, 0, 0, 0, 147, 0, 0, 154, 0, 0, 0, 267, 0, 144, 263, 0, 261, 141, 161, 258, 0, 0, 317, 0, 0, 0, 0, 0, 0, 0, 0, 104, 105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 134, 0, 0, 128, 138, 145, 0, 0, 0, 0, 290, 0, 0, 0, 0, 260, 242, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 176, 205, 204, 203, 193, 191, 192, 179, 180, 206, 207, 181, 184, 182, 183, 185, 186, 187, 188, 208, 209, 210, 211, 194, 195, 196, 177, 178, 212, 213, 197, 198, 200, 199, 201, 214, 215, 0, 0, 0, 236, 0, 0, 0, 0, 0, 0, 269, 142, 151, 0, 0, 0, 158, 0, 0, 160, 159, 149, 0, 94, 101, 102, 100, 98, 99, 95, 96, 97, 103, 0, 0, 0, 0, 0, 73, 137, 0, 0, 0, 32, 33, 37, 38, 0, 249, 167, 169, 171, 220, 0, 219, 0, 0, 226, 217, 218, 228, 229, 230, 225, 224, 227, 240, 231, 0, 233, 234, 239, 166, 235, 0, 150, 148, 0, 164, 163, 162, 268, 0, 156, 318, 172, 155, 0, 0, 0, 0, 0, 74, 139, 0, 0, 26, 25, 0, 0, 241, 0, 0, 0, 0, 0, 0, 153, 0, 0, 0, 130, 133, 135, 136, 56, 51, 221, 222, 223, 232, 237, 152, 0, 0, 0, 0, 0, 0, 157, 131, 132 }; /* YYPGOTO[NTERM-NUM]. */ static const yytype_int16 yypgoto[] = { -293, -293, 1578, 1579, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, 1497, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, -293, 1373, -293, -293, -293, -192, -94, -293, 1471, -293, -293, -108, -293, 1022, -293, -293, 1281, 1135, -293, -196, -139, -195, -125, 1184, 1315, -138, -293, -90, -52, 1616, -293, -293, 1025, -293, -293, -293, 366, -293, -293, -293, -292, -293, 15 }; /* YYDEFGOTO[NTERM-NUM]. */ static const yytype_int16 yydefgoto[] = { -1, 2, 14, 15, 3, 16, 17, 18, 19, 20, 73, 77, 21, 22, 23, 24, 114, 115, 25, 26, 27, 28, 29, 30, 31, 32, 53, 184, 33, 361, 34, 35, 36, 351, 151, 152, 153, 154, 155, 232, 358, 621, 509, 510, 139, 140, 218, 483, 321, 289, 322, 221, 222, 290, 333, 352, 323, 95, 178, 261, 111, 164, 174, 254, 120, 172, 181, 265, 484, 183, 74 }; /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If positive, shift that token. If negative, reduce the rule whose number is the opposite. If YYTABLE_NINF, syntax error. */ static const yytype_int16 yytable[] = { 156, 219, 223, 158, 465, 292, 320, 156, 658, 258, 247, 228, 376, 229, 167, 220, 230, 175, 293, 619, 359, 263, 228, 342, 229, 195, 179, 230, 38, 281, 185, 264, 282, 89, 156, 78, 51, 5, 6, 40, 180, 6, 362, 363, 364, 365, 366, 367, 42, 486, 240, 156, 248, 504, 52, 105, 227, 108, 505, 234, 156, 43, 235, 236, 237, 238, 506, 332, 44, 380, 277, 45, 507, 117, 360, 118, 269, 46, 198, 381, 327, 109, 259, 508, 260, 377, 279, 378, 224, 4, 176, 119, 395, 229, 382, 397, 230, 79, 399, 400, 401, 402, 403, 280, 1, 110, 7, 8, 9, 84, 8, 9, 379, 37, 113, 10, 177, 620, 10, 90, 208, 343, 54, 423, 466, 461, 540, 344, 659, 91, 544, 467, 92, 93, 355, 356, 357, 231, 156, 156, 156, 481, 294, 278, 485, 11, 354, 39, 11, 490, 170, 325, 171, 278, 334, 492, 493, 190, 494, 191, 94, 337, 80, 12, 81, -262, 12, 85, 338, 50, 102, 103, 13, 239, 353, 13, 55, 82, 83, 245, 141, 88, 353, 353, 353, 353, 353, 353, 255, 96, 267, 268, 142, 143, 144, 145, 596, 597, 146, 147, 148, 500, 501, 472, 149, 502, 503, 150, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, -263, 541, 542, 543, -263, 106, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 520, 582, 599, 597, 462, 374, 586, 463, 474, 195, 464, 97, 278, 345, 112, -261, 477, 346, 211, -261, 213, 91, 214, 215, 98, 99, 475, 600, 648, 597, 249, 353, 353, 497, 353, 113, 250, 116, 251, 606, 607, 608, 609, 610, 611, 612, 613, 614, 653, 597, 498, 499, 100, 500, 501, 252, 589, 502, 503, 590, 676, 195, 198, 677, 424, 425, 426, 427, 502, 503, 428, 123, 429, 278, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 208, 124, 274, 228, 125, 229, 195, 129, 230, 130, 198, 135, 211, 136, 213, 137, 214, 215, 196, 495, 275, 138, 496, 157, 497, 328, 47, 48, 49, 276, 159, 664, 160, 666, 131, 132, 133, 134, 196, 678, 679, 498, 499, 161, 500, 501, 121, 122, 502, 503, 197, 162, 208, 163, 274, 165, 672, 673, 196, 198, 126, 166, 127, 128, 211, 283, 213, 169, 214, 215, 56, 683, 275, 684, 685, 353, 353, 353, 353, 353, 353, 353, 353, 353, 182, 283, 686, 173, 186, 187, 523, 188, 189, 192, 193, 688, 194, 241, 663, 226, 242, 208, 233, 274, 246, 283, 256, 284, 257, 285, 262, 219, 223, 211, 266, 213, 231, 214, 215, 270, 329, 275, 330, 286, 287, 220, 271, 284, 326, 285, 471, 272, 211, 288, 213, 273, 214, 215, 335, 336, 331, 708, 605, 286, 287, 340, 324, 284, 339, 285, 370, 371, 341, 291, 375, 389, 211, 670, 213, 383, 214, 215, 369, 286, 287, 384, 385, 386, 387, 390, 57, 278, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 391, 68, 69, 70, 71, 388, 72, 392, 393, 394, 396, 398, 404, 671, 353, 353, 224, 424, 425, 426, 427, 405, 406, 428, 407, 429, 468, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 408, 409, 428, 410, 429, 411, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 412, 413, 428, 414, 429, 415, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 416, 424, 425, 426, 427, 417, 469, 428, 418, 429, 524, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 419, 470, 428, 420, 429, 525, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 421, 422, 424, 425, 426, 427, 649, 650, 428, 473, 429, -270, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 478, 424, 425, 426, 427, 479, 635, 428, 480, 429, 482, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 488, 637, 428, 489, 429, 491, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 230, 512, 428, 638, 429, 513, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 515, 516, 428, 545, 429, 514, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 517, 518, 428, 634, 429, 519, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 521, 522, 428, 636, 429, 584, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 585, 587, 428, 639, 429, 588, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 591, 592, 428, 640, 429, -262, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 593, 601, 428, 641, 429, 595, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 602, 604, 428, 642, 429, 615, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 616, 623, 428, 643, 429, 624, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 625, 626, 428, 644, 429, 617, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 622, 627, 428, 645, 429, 628, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 629, 630, 428, 646, 429, 631, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 632, 655, 428, 647, 429, 633, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 654, 656, 428, 651, 429, 660, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 661, 669, 428, 652, 429, 662, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 665, 680, 428, 699, 429, 667, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 682, 687, 428, 700, 429, 691, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 690, 495, 693, 692, 496, 347, 497, 701, 695, 696, 698, 195, 697, 703, 498, 499, 348, 500, 501, 705, 706, 502, 503, 498, 499, 707, 500, 501, 75, 709, 502, 503, 710, 295, 712, 296, 197, 713, 86, 87, 168, 225, 368, 297, 298, 299, 300, 702, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 198, 142, 143, 144, 145, 675, 511, 146, 147, 148, 618, 583, 476, 149, 681, 349, 150, 101, 0, 0, 0, 487, 142, 143, 144, 145, 211, 0, 213, 147, 214, 215, 195, 0, 0, 0, 0, 350, 196, 0, 0, 0, 208, 0, 274, 0, 0, 0, 0, 317, 619, 0, 0, 0, 211, 318, 213, 197, 214, 215, 0, 0, 275, 0, 76, 319, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 0, 68, 69, 70, 71, 0, 72, 0, 0, 198, 199, 200, 201, 202, 0, 0, 142, 143, 144, 145, 211, 195, 213, 147, 214, 215, 0, 196, 40, 0, 0, 0, 0, 0, 0, 41, 0, 42, 0, 0, 0, 203, 204, 205, 0, 207, 197, 0, 0, 0, 43, 208, 0, 209, 0, 0, 0, 44, 210, 0, 45, 0, 0, 211, 212, 213, 46, 214, 215, 216, 0, 217, 674, 0, 198, 199, 200, 201, 202, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 0, 203, 204, 205, 206, 207, 0, 0, 0, 0, 0, 208, 0, 209, 0, 0, 0, 0, 210, 0, 0, 0, 0, 211, 212, 213, 0, 214, 215, 216, 0, 217, 424, 425, 426, 427, 0, 0, 428, 0, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 0, 0, 424, 425, 426, 427, 0, 594, 428, 0, 429, 460, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 0, 598, 428, 0, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 0, 603, 428, 0, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 0, 657, 428, 0, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 0, 668, 428, 0, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 0, 689, 428, 0, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 0, 704, 428, 0, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 0, 711, 428, 0, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 424, 425, 426, 427, 0, 0, 428, 0, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 425, 426, 427, 0, 0, 428, 0, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 427, 0, 0, 428, 0, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 428, 0, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 429, 0, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 495, 694, 0, 496, 0, 497, 0, 0, 0, 495, 0, 0, 496, 0, 497, 453, 454, 455, 456, 457, 458, 459, 498, 499, 0, 500, 501, 0, 0, 502, 503, 498, 499, 0, 500, 501, 0, 0, 502, 503, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 496, 0, 497, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 498, 499, 0, 500, 501, 0, 0, 502, 503, -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 243, 0, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 0, 68, 69, 70, 71, 0, 72, 0, 0, 0, 0, 0, 372, 244, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 0, 68, 69, 70, 71, 0, 72, 0, 0, 0, 0, 0, 104, 373, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 0, 68, 69, 70, 71, 0, 72, 107, 0, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 0, 68, 69, 70, 71, 253, 72, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 0, 68, 69, 70, 71, 0, 72 }; static const yytype_int16 yycheck[] = { 90, 140, 140, 97, 8, 200, 202, 97, 8, 33, 64, 6, 33, 8, 8, 140, 11, 33, 8, 29, 80, 108, 6, 219, 8, 11, 118, 11, 8, 132, 124, 118, 135, 9, 124, 20, 108, 0, 1, 72, 132, 1, 234, 235, 236, 237, 238, 239, 81, 341, 158, 141, 106, 103, 126, 40, 150, 42, 108, 14, 150, 94, 17, 18, 19, 20, 116, 206, 101, 108, 195, 104, 122, 114, 134, 116, 184, 110, 64, 118, 205, 108, 106, 133, 108, 106, 108, 108, 140, 77, 106, 132, 288, 8, 133, 291, 11, 8, 294, 295, 296, 297, 298, 125, 69, 132, 69, 70, 71, 69, 70, 71, 133, 128, 108, 78, 132, 127, 78, 95, 106, 129, 135, 319, 128, 321, 418, 135, 128, 105, 422, 135, 108, 109, 228, 229, 230, 132, 228, 229, 230, 337, 132, 195, 340, 108, 130, 127, 108, 345, 114, 203, 116, 205, 206, 347, 348, 74, 350, 76, 136, 128, 73, 126, 75, 132, 126, 127, 135, 108, 86, 87, 135, 128, 226, 135, 135, 88, 89, 164, 102, 134, 234, 235, 236, 237, 238, 239, 173, 9, 130, 131, 114, 115, 116, 117, 130, 131, 120, 121, 122, 32, 33, 328, 126, 36, 37, 129, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 128, 419, 420, 421, 132, 128, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 389, 460, 130, 131, 129, 254, 466, 132, 108, 11, 135, 95, 328, 128, 118, 128, 332, 132, 118, 132, 120, 105, 122, 123, 108, 109, 126, 487, 130, 131, 108, 347, 348, 12, 350, 108, 114, 90, 116, 495, 496, 497, 498, 499, 500, 501, 502, 503, 130, 131, 29, 30, 136, 32, 33, 133, 108, 36, 37, 111, 108, 11, 64, 111, 3, 4, 5, 6, 36, 37, 9, 129, 11, 389, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 106, 135, 108, 6, 128, 8, 11, 128, 11, 128, 64, 128, 118, 128, 120, 128, 122, 123, 17, 7, 126, 128, 10, 105, 12, 11, 91, 92, 93, 135, 8, 595, 8, 597, 82, 83, 84, 85, 17, 86, 87, 29, 30, 106, 32, 33, 48, 49, 36, 37, 36, 129, 106, 108, 108, 8, 616, 617, 17, 64, 82, 128, 84, 85, 118, 64, 120, 128, 122, 123, 8, 635, 126, 637, 638, 495, 496, 497, 498, 499, 500, 501, 502, 503, 108, 64, 650, 132, 129, 129, 133, 129, 129, 106, 106, 659, 127, 8, 591, 128, 106, 106, 102, 108, 118, 64, 8, 106, 106, 108, 8, 618, 618, 118, 8, 120, 132, 122, 123, 106, 106, 126, 108, 122, 123, 618, 106, 106, 129, 108, 135, 106, 118, 132, 120, 106, 122, 123, 122, 135, 126, 705, 130, 122, 123, 128, 108, 106, 129, 108, 132, 88, 129, 132, 106, 106, 118, 615, 120, 108, 122, 123, 130, 122, 123, 134, 131, 131, 131, 18, 108, 591, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 18, 121, 122, 123, 124, 131, 126, 110, 106, 132, 8, 8, 129, 615, 616, 617, 618, 3, 4, 5, 6, 129, 129, 9, 129, 11, 128, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 129, 129, 9, 129, 11, 129, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 129, 129, 9, 129, 11, 129, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 129, 3, 4, 5, 6, 129, 106, 9, 129, 11, 133, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 129, 131, 9, 129, 11, 133, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 129, 129, 3, 4, 5, 6, 130, 131, 9, 135, 11, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 8, 3, 4, 5, 6, 8, 131, 9, 106, 11, 106, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 106, 131, 9, 106, 11, 106, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 11, 77, 9, 131, 11, 106, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 106, 106, 9, 130, 11, 128, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 106, 106, 9, 130, 11, 106, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 133, 110, 9, 130, 11, 106, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 106, 106, 9, 130, 11, 130, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 106, 106, 9, 130, 11, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 8, 130, 9, 130, 11, 128, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 128, 131, 9, 130, 11, 129, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 128, 131, 9, 130, 11, 106, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 130, 130, 9, 130, 11, 128, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 128, 130, 9, 130, 11, 130, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 135, 133, 9, 130, 11, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 8, 130, 9, 130, 11, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 8, 131, 9, 130, 11, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 8, 106, 9, 130, 11, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 8, 106, 9, 130, 11, 8, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, 106, 106, 9, 130, 11, 131, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 133, 7, 8, 131, 10, 33, 12, 130, 30, 30, 8, 11, 133, 133, 29, 30, 44, 32, 33, 128, 116, 36, 37, 29, 30, 116, 32, 33, 8, 130, 36, 37, 130, 33, 8, 35, 36, 8, 31, 31, 114, 141, 240, 43, 44, 45, 46, 130, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 114, 115, 116, 117, 618, 360, 120, 121, 122, 510, 462, 332, 126, 624, 107, 129, 36, -1, -1, -1, 128, 114, 115, 116, 117, 118, -1, 120, 121, 122, 123, 11, -1, -1, -1, -1, 129, 17, -1, -1, -1, 106, -1, 108, -1, -1, -1, -1, 113, 29, -1, -1, -1, 118, 119, 120, 36, 122, 123, -1, -1, 126, -1, 108, 129, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, -1, 121, 122, 123, 124, -1, 126, -1, -1, 64, 65, 66, 67, 68, -1, -1, 114, 115, 116, 117, 118, 11, 120, 121, 122, 123, -1, 17, 72, -1, -1, -1, -1, -1, -1, 79, -1, 81, -1, -1, -1, 96, 97, 98, -1, 100, 36, -1, -1, -1, 94, 106, -1, 108, -1, -1, -1, 101, 113, -1, 104, -1, -1, 118, 119, 120, 110, 122, 123, 124, -1, 126, 127, -1, 64, 65, 66, 67, 68, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, -1, 96, 97, 98, 99, 100, -1, -1, -1, -1, -1, 106, -1, 108, -1, -1, -1, -1, 113, -1, -1, -1, -1, 118, 119, 120, -1, 122, 123, 124, -1, 126, 3, 4, 5, 6, -1, -1, 9, -1, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, -1, -1, 3, 4, 5, 6, -1, 8, 9, -1, 11, 65, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, -1, 8, 9, -1, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, -1, 8, 9, -1, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, -1, 8, 9, -1, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, -1, 8, 9, -1, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, -1, 8, 9, -1, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, -1, 8, 9, -1, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, -1, 8, 9, -1, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 3, 4, 5, 6, -1, -1, 9, -1, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 4, 5, 6, -1, -1, 9, -1, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 6, -1, -1, 9, -1, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 9, -1, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 11, -1, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 7, 8, -1, 10, -1, 12, -1, -1, -1, 7, -1, -1, 10, -1, 12, 36, 37, 38, 39, 40, 41, 42, 29, 30, -1, 32, 33, -1, -1, 36, 37, 29, 30, -1, 32, 33, -1, -1, 36, 37, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 10, -1, 12, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 29, 30, -1, 32, 33, -1, -1, 36, 37, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 108, -1, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, -1, 121, 122, 123, 124, -1, 126, -1, -1, -1, -1, -1, 108, 133, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, -1, 121, 122, 123, 124, -1, 126, -1, -1, -1, -1, -1, 108, 133, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, -1, 121, 122, 123, 124, -1, 126, 108, -1, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, -1, 121, 122, 123, 124, 108, 126, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, -1, 121, 122, 123, 124, -1, 126 }; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing symbol of state STATE-NUM. */ static const yytype_uint8 yystos[] = { 0, 69, 138, 141, 77, 0, 1, 69, 70, 71, 78, 108, 126, 135, 139, 140, 142, 143, 144, 145, 146, 149, 150, 151, 152, 155, 156, 157, 158, 159, 160, 161, 162, 165, 167, 168, 169, 128, 8, 127, 72, 79, 81, 94, 101, 104, 110, 91, 92, 93, 108, 108, 126, 163, 135, 135, 8, 108, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 121, 122, 123, 124, 126, 147, 207, 8, 108, 148, 207, 8, 73, 75, 88, 89, 69, 127, 139, 140, 134, 9, 95, 105, 108, 109, 136, 194, 9, 95, 108, 109, 136, 194, 86, 87, 108, 207, 128, 108, 207, 108, 132, 197, 118, 108, 153, 154, 90, 114, 116, 132, 201, 201, 201, 129, 135, 128, 82, 84, 85, 128, 128, 82, 83, 84, 85, 128, 128, 128, 128, 181, 182, 102, 114, 115, 116, 117, 120, 121, 122, 126, 129, 171, 172, 173, 174, 175, 192, 105, 171, 8, 8, 106, 129, 108, 198, 8, 128, 8, 154, 128, 114, 116, 202, 132, 199, 33, 106, 132, 195, 118, 132, 203, 108, 206, 164, 171, 129, 129, 129, 129, 74, 76, 106, 106, 127, 11, 17, 36, 64, 65, 66, 67, 68, 96, 97, 98, 99, 100, 106, 108, 113, 118, 119, 120, 122, 123, 124, 126, 183, 185, 187, 188, 189, 190, 193, 173, 128, 171, 6, 8, 11, 132, 176, 102, 14, 17, 18, 19, 20, 128, 176, 8, 106, 108, 133, 207, 118, 64, 106, 108, 114, 116, 133, 108, 200, 207, 8, 106, 33, 106, 108, 196, 8, 108, 118, 204, 8, 130, 131, 176, 106, 106, 106, 106, 108, 126, 135, 187, 193, 108, 125, 132, 135, 64, 106, 108, 122, 123, 132, 186, 190, 132, 186, 8, 132, 33, 35, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 113, 119, 129, 184, 185, 187, 193, 108, 193, 129, 187, 11, 106, 108, 126, 185, 191, 193, 122, 135, 128, 135, 129, 128, 129, 184, 129, 135, 128, 132, 33, 44, 107, 129, 170, 192, 193, 130, 171, 171, 171, 177, 80, 134, 166, 170, 170, 170, 170, 170, 170, 166, 130, 132, 88, 108, 133, 207, 106, 33, 106, 108, 133, 108, 118, 133, 108, 134, 131, 131, 131, 131, 106, 18, 18, 110, 106, 132, 184, 8, 184, 8, 184, 184, 184, 184, 184, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 184, 3, 4, 5, 6, 9, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 65, 184, 129, 132, 135, 8, 128, 135, 128, 106, 131, 135, 187, 135, 108, 126, 189, 193, 8, 8, 106, 184, 106, 184, 205, 184, 205, 128, 106, 106, 184, 106, 170, 170, 170, 7, 10, 12, 29, 30, 32, 33, 36, 37, 103, 108, 116, 122, 133, 179, 180, 181, 77, 106, 128, 106, 106, 106, 106, 106, 187, 133, 110, 133, 133, 133, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 205, 184, 184, 184, 205, 130, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 186, 188, 106, 106, 184, 106, 130, 108, 111, 106, 106, 8, 8, 128, 130, 131, 8, 130, 184, 130, 128, 8, 131, 130, 170, 170, 170, 170, 170, 170, 170, 170, 170, 129, 128, 128, 182, 29, 127, 178, 128, 131, 106, 130, 130, 130, 130, 135, 133, 8, 8, 8, 130, 131, 130, 131, 131, 130, 130, 130, 130, 130, 130, 130, 130, 130, 130, 130, 131, 130, 130, 130, 8, 130, 131, 8, 8, 128, 8, 8, 8, 187, 184, 8, 184, 8, 8, 106, 192, 193, 170, 170, 127, 178, 108, 111, 86, 87, 106, 197, 106, 184, 184, 184, 184, 106, 184, 8, 133, 131, 131, 8, 8, 30, 30, 133, 8, 130, 130, 130, 130, 133, 8, 128, 116, 116, 184, 130, 130, 8, 8, 8 }; /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ static const yytype_uint8 yyr1[] = { 0, 137, 138, 138, 138, 138, 139, 139, 139, 139, 139, 139, 139, 139, 139, 139, 140, 140, 140, 140, 141, 141, 142, 143, 144, 144, 144, 144, 144, 145, 146, 146, 147, 147, 147, 147, 147, 148, 148, 148, 148, 148, 148, 149, 150, 150, 150, 150, 150, 150, 150, 151, 151, 152, 153, 153, 154, 155, 156, 157, 158, 159, 160, 161, 161, 161, 162, 163, 163, 163, 164, 164, 165, 166, 166, 166, 167, 167, 168, 168, 168, 168, 168, 168, 168, 168, 168, 169, 169, 169, 169, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 171, 171, 171, 171, 172, 172, 173, 173, 174, 174, 175, 175, 175, 175, 175, 175, 175, 175, 175, 175, 175, 176, 176, 177, 177, 177, 177, 177, 177, 178, 178, 179, 180, 180, 181, 181, 181, 181, 181, 182, 182, 182, 182, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 185, 185, 185, 185, 186, 186, 186, 186, 186, 186, 186, 187, 187, 187, 187, 188, 188, 188, 188, 188, 189, 189, 189, 190, 190, 191, 191, 191, 191, 191, 191, 192, 192, 192, 192, 192, 193, 193, 193, 193, 194, 194, 195, 195, 195, 196, 196, 196, 196, 196, 196, 197, 197, 198, 198, 198, 199, 200, 200, 200, 200, 201, 201, 201, 202, 202, 202, 202, 202, 203, 203, 204, 204, 204, 204, 205, 205, 205, 206, 206, 206, 207, 207, 207, 207, 207, 207, 207, 207, 207, 207, 207, 207, 207, 207, 207 }; /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */ static const yytype_int8 yyr2[] = { 0, 2, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 5, 5, 5, 2, 6, 9, 9, 2, 3, 2, 3, 2, 7, 7, 2, 2, 2, 7, 7, 2, 2, 2, 2, 2, 3, 3, 4, 4, 4, 4, 2, 10, 5, 4, 1, 2, 8, 4, 5, 5, 5, 4, 6, 1, 2, 2, 2, 0, 1, 1, 0, 1, 5, 3, 4, 1, 5, 5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 3, 3, 3, 2, 1, 2, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 0, 3, 0, 5, 8, 8, 5, 2, 3, 3, 2, 1, 3, 1, 4, 5, 3, 4, 0, 2, 4, 6, 4, 5, 4, 7, 6, 3, 5, 5, 9, 4, 4, 4, 3, 5, 5, 5, 3, 5, 5, 3, 5, 2, 5, 5, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 4, 4, 4, 4, 6, 6, 6, 4, 4, 4, 4, 4, 4, 4, 4, 6, 4, 4, 4, 3, 6, 1, 4, 4, 6, 4, 3, 1, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 4, 1, 1, 1, 3, 3, 1, 2, 4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 1, 2, 1, 2, 1, 2, 3, 2, 3, 1, 1, 2, 2, 3, 1, 1, 2, 2, 3, 1, 1, 1, 1, 2, 2, 2, 3, 1, 1, 1, 2, 2, 0, 1, 3, 0, 1, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; #define yyerrok (yyerrstatus = 0) #define yyclearin (yychar = YYEMPTY) #define YYEMPTY (-2) #define YYEOF 0 #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab #define YYERROR goto yyerrorlab #define YYRECOVERING() (!!yyerrstatus) #define YYBACKUP(Token, Value) \ do \ if (yychar == YYEMPTY) \ { \ yychar = (Token); \ yylval = (Value); \ YYPOPSTACK (yylen); \ yystate = *yyssp; \ goto yybackup; \ } \ else \ { \ yyerror (YY_("syntax error: cannot back up")); \ YYERROR; \ } \ while (0) /* Error token number */ #define YYTERROR 1 #define YYERRCODE 256 /* Enable debugging if requested. */ #if SLEIGHDEBUG # ifndef YYFPRINTF # include /* INFRINGES ON USER NAME SPACE */ # define YYFPRINTF fprintf # endif # define YYDPRINTF(Args) \ do { \ if (yydebug) \ YYFPRINTF Args; \ } while (0) /* This macro is provided for backward compatibility. */ #ifndef YY_LOCATION_PRINT # define YY_LOCATION_PRINT(File, Loc) ((void) 0) #endif # define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ do { \ if (yydebug) \ { \ YYFPRINTF (stderr, "%s ", Title); \ yy_symbol_print (stderr, \ Type, Value); \ YYFPRINTF (stderr, "\n"); \ } \ } while (0) /*-----------------------------------. | Print this symbol's value on YYO. | `-----------------------------------*/ static void yy_symbol_value_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep) { FILE *yyoutput = yyo; YYUSE (yyoutput); if (!yyvaluep) return; # ifdef YYPRINT if (yytype < YYNTOKENS) YYPRINT (yyo, yytoknum[yytype], *yyvaluep); # endif YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN YYUSE (yytype); YY_IGNORE_MAYBE_UNINITIALIZED_END } /*---------------------------. | Print this symbol on YYO. | `---------------------------*/ static void yy_symbol_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep) { YYFPRINTF (yyo, "%s %s (", yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]); yy_symbol_value_print (yyo, yytype, yyvaluep); YYFPRINTF (yyo, ")"); } /*------------------------------------------------------------------. | yy_stack_print -- Print the state stack from its BOTTOM up to its | | TOP (included). | `------------------------------------------------------------------*/ static void yy_stack_print (yy_state_t *yybottom, yy_state_t *yytop) { YYFPRINTF (stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) { int yybot = *yybottom; YYFPRINTF (stderr, " %d", yybot); } YYFPRINTF (stderr, "\n"); } # define YY_STACK_PRINT(Bottom, Top) \ do { \ if (yydebug) \ yy_stack_print ((Bottom), (Top)); \ } while (0) /*------------------------------------------------. | Report that the YYRULE is going to be reduced. | `------------------------------------------------*/ static void yy_reduce_print (yy_state_t *yyssp, YYSTYPE *yyvsp, int yyrule) { int yylno = yyrline[yyrule]; int yynrhs = yyr2[yyrule]; int yyi; YYFPRINTF (stderr, "Reducing stack by rule %d (line %d):\n", yyrule - 1, yylno); /* The symbols being reduced. */ for (yyi = 0; yyi < yynrhs; yyi++) { YYFPRINTF (stderr, " $%d = ", yyi + 1); yy_symbol_print (stderr, yystos[+yyssp[yyi + 1 - yynrhs]], &yyvsp[(yyi + 1) - (yynrhs)] ); YYFPRINTF (stderr, "\n"); } } # define YY_REDUCE_PRINT(Rule) \ do { \ if (yydebug) \ yy_reduce_print (yyssp, yyvsp, Rule); \ } while (0) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ int yydebug; #else /* !SLEIGHDEBUG */ # define YYDPRINTF(Args) # define YY_SYMBOL_PRINT(Title, Type, Value, Location) # define YY_STACK_PRINT(Bottom, Top) # define YY_REDUCE_PRINT(Rule) #endif /* !SLEIGHDEBUG */ /* YYINITDEPTH -- initial size of the parser's stacks. */ #ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only if the built-in stack extension method is used). Do not make this value too large; the results are undefined if YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) evaluated with infinite-precision integer arithmetic. */ #ifndef YYMAXDEPTH # define YYMAXDEPTH 10000 #endif #if YYERROR_VERBOSE # ifndef yystrlen # if defined __GLIBC__ && defined _STRING_H # define yystrlen(S) (YY_CAST (YYPTRDIFF_T, strlen (S))) # else /* Return the length of YYSTR. */ static YYPTRDIFF_T yystrlen (const char *yystr) { YYPTRDIFF_T yylen; for (yylen = 0; yystr[yylen]; yylen++) continue; return yylen; } # endif # endif # ifndef yystpcpy # if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE # define yystpcpy stpcpy # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ static char * yystpcpy (char *yydest, const char *yysrc) { char *yyd = yydest; const char *yys = yysrc; while ((*yyd++ = *yys++) != '\0') continue; return yyd - 1; } # endif # endif # ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary quotes and backslashes, so that it's suitable for yyerror. The heuristic is that double-quoting is unnecessary unless the string contains an apostrophe, a comma, or backslash (other than backslash-backslash). YYSTR is taken from yytname. If YYRES is null, do not copy; instead, return the length of what the result would have been. */ static YYPTRDIFF_T yytnamerr (char *yyres, const char *yystr) { if (*yystr == '"') { YYPTRDIFF_T yyn = 0; char const *yyp = yystr; for (;;) switch (*++yyp) { case '\'': case ',': goto do_not_strip_quotes; case '\\': if (*++yyp != '\\') goto do_not_strip_quotes; else goto append; append: default: if (yyres) yyres[yyn] = *yyp; yyn++; break; case '"': if (yyres) yyres[yyn] = '\0'; return yyn; } do_not_strip_quotes: ; } if (yyres) return yystpcpy (yyres, yystr) - yyres; else return yystrlen (yystr); } # endif /* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message about the unexpected token YYTOKEN for the state stack whose top is YYSSP. Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is not large enough to hold the message. In that case, also set *YYMSG_ALLOC to the required number of bytes. Return 2 if the required number of bytes is too large to store. */ static int yysyntax_error (YYPTRDIFF_T *yymsg_alloc, char **yymsg, yy_state_t *yyssp, int yytoken) { enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; /* Internationalized format string. */ const char *yyformat = YY_NULLPTR; /* Arguments of yyformat: reported tokens (one for the "unexpected", one per "expected"). */ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; /* Actual size of YYARG. */ int yycount = 0; /* Cumulated lengths of YYARG. */ YYPTRDIFF_T yysize = 0; /* There are many possibilities here to consider: - If this state is a consistent state with a default action, then the only way this function was invoked is if the default action is an error action. In that case, don't check for expected tokens because there are none. - The only way there can be no lookahead present (in yychar) is if this state is a consistent state with a default action. Thus, detecting the absence of a lookahead is sufficient to determine that there is no unexpected or expected token to report. In that case, just report a simple "syntax error". - Don't assume there isn't a lookahead just because this state is a consistent state with a default action. There might have been a previous inconsistent state, consistent state with a non-default action, or user semantic action that manipulated yychar. - Of course, the expected token list depends on states to have correct lookahead information, and it depends on the parser not to perform extra reductions after fetching a lookahead from the scanner and before detecting a syntax error. Thus, state merging (from LALR or IELR) and default reductions corrupt the expected token list. However, the list is correct for canonical LR with one exception: it will still contain any token that will not be accepted due to an error action in a later state. */ if (yytoken != YYEMPTY) { int yyn = yypact[+*yyssp]; YYPTRDIFF_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); yysize = yysize0; yyarg[yycount++] = yytname[yytoken]; if (!yypact_value_is_default (yyn)) { /* Start YYX at -YYN if negative to avoid negative indexes in YYCHECK. In other words, skip the first -YYN actions for this state because they are default actions. */ int yyxbegin = yyn < 0 ? -yyn : 0; /* Stay within bounds of both yycheck and yytname. */ int yychecklim = YYLAST - yyn + 1; int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; int yyx; for (yyx = yyxbegin; yyx < yyxend; ++yyx) if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR && !yytable_value_is_error (yytable[yyx + yyn])) { if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { yycount = 1; yysize = yysize0; break; } yyarg[yycount++] = yytname[yyx]; { YYPTRDIFF_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM) yysize = yysize1; else return 2; } } } } switch (yycount) { # define YYCASE_(N, S) \ case N: \ yyformat = S; \ break default: /* Avoid compiler warnings. */ YYCASE_(0, YY_("syntax error")); YYCASE_(1, YY_("syntax error, unexpected %s")); YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); # undef YYCASE_ } { /* Don't count the "%s"s in the final size, but reserve room for the terminator. */ YYPTRDIFF_T yysize1 = yysize + (yystrlen (yyformat) - 2 * yycount) + 1; if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM) yysize = yysize1; else return 2; } if (*yymsg_alloc < yysize) { *yymsg_alloc = 2 * yysize; if (! (yysize <= *yymsg_alloc && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM)) *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM; return 1; } /* Avoid sprintf, as that infringes on the user's name space. Don't have undefined behavior even if the translation produced a string with the wrong number of "%s"s. */ { char *yyp = *yymsg; int yyi = 0; while ((*yyp = *yyformat) != '\0') if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) { yyp += yytnamerr (yyp, yyarg[yyi++]); yyformat += 2; } else { ++yyp; ++yyformat; } } return 0; } #endif /* YYERROR_VERBOSE */ /*-----------------------------------------------. | Release the memory associated to this symbol. | `-----------------------------------------------*/ static void yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep) { YYUSE (yyvaluep); if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN YYUSE (yytype); YY_IGNORE_MAYBE_UNINITIALIZED_END } /* The lookahead symbol. */ int yychar; /* The semantic value of the lookahead symbol. */ YYSTYPE yylval; /* Number of syntax errors so far. */ int yynerrs; /*----------. | yyparse. | `----------*/ int yyparse (void) { yy_state_fast_t yystate; /* Number of tokens to shift before error messages enabled. */ int yyerrstatus; /* The stacks and their tools: 'yyss': related to states. 'yyvs': related to semantic values. Refer to the stacks through separate pointers, to allow yyoverflow to reallocate them elsewhere. */ /* The state stack. */ yy_state_t yyssa[YYINITDEPTH]; yy_state_t *yyss; yy_state_t *yyssp; /* The semantic value stack. */ YYSTYPE yyvsa[YYINITDEPTH]; YYSTYPE *yyvs; YYSTYPE *yyvsp; YYPTRDIFF_T yystacksize; int yyn; int yyresult; /* Lookahead token as an internal (translated) token number. */ int yytoken = 0; /* The variables used to return semantic value and location from the action routines. */ YYSTYPE yyval; #if YYERROR_VERBOSE /* Buffer for error messages, and its allocated size. */ char yymsgbuf[128]; char *yymsg = yymsgbuf; YYPTRDIFF_T yymsg_alloc = sizeof yymsgbuf; #endif #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) /* The number of symbols on the RHS of the reduced rule. Keep to zero when no symbol should be popped. */ int yylen = 0; yyssp = yyss = yyssa; yyvsp = yyvs = yyvsa; yystacksize = YYINITDEPTH; YYDPRINTF ((stderr, "Starting parse\n")); yystate = 0; yyerrstatus = 0; yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ goto yysetstate; /*------------------------------------------------------------. | yynewstate -- push a new state, which is found in yystate. | `------------------------------------------------------------*/ yynewstate: /* In all cases, when you get here, the value and location stacks have just been pushed. So pushing a state here evens the stacks. */ yyssp++; /*--------------------------------------------------------------------. | yysetstate -- set current state (the top of the stack) to yystate. | `--------------------------------------------------------------------*/ yysetstate: YYDPRINTF ((stderr, "Entering state %d\n", yystate)); YY_ASSERT (0 <= yystate && yystate < YYNSTATES); YY_IGNORE_USELESS_CAST_BEGIN *yyssp = YY_CAST (yy_state_t, yystate); YY_IGNORE_USELESS_CAST_END if (yyss + yystacksize - 1 <= yyssp) #if !defined yyoverflow && !defined YYSTACK_RELOCATE goto yyexhaustedlab; #else { /* Get the current used size of the three stacks, in elements. */ YYPTRDIFF_T yysize = yyssp - yyss + 1; # if defined yyoverflow { /* Give user a chance to reallocate the stack. Use copies of these so that the &'s don't force the real ones into memory. */ yy_state_t *yyss1 = yyss; YYSTYPE *yyvs1 = yyvs; /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. This used to be a conditional around just the two extra args, but that might be undefined if yyoverflow is a macro. */ yyoverflow (YY_("memory exhausted"), &yyss1, yysize * YYSIZEOF (*yyssp), &yyvs1, yysize * YYSIZEOF (*yyvsp), &yystacksize); yyss = yyss1; yyvs = yyvs1; } # else /* defined YYSTACK_RELOCATE */ /* Extend the stack our own way. */ if (YYMAXDEPTH <= yystacksize) goto yyexhaustedlab; yystacksize *= 2; if (YYMAXDEPTH < yystacksize) yystacksize = YYMAXDEPTH; { yy_state_t *yyss1 = yyss; union yyalloc *yyptr = YY_CAST (union yyalloc *, YYSTACK_ALLOC (YY_CAST (YYSIZE_T, YYSTACK_BYTES (yystacksize)))); if (! yyptr) goto yyexhaustedlab; YYSTACK_RELOCATE (yyss_alloc, yyss); YYSTACK_RELOCATE (yyvs_alloc, yyvs); # undef YYSTACK_RELOCATE if (yyss1 != yyssa) YYSTACK_FREE (yyss1); } # endif yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; YY_IGNORE_USELESS_CAST_BEGIN YYDPRINTF ((stderr, "Stack size increased to %ld\n", YY_CAST (long, yystacksize))); YY_IGNORE_USELESS_CAST_END if (yyss + yystacksize - 1 <= yyssp) YYABORT; } #endif /* !defined yyoverflow && !defined YYSTACK_RELOCATE */ if (yystate == YYFINAL) YYACCEPT; goto yybackup; /*-----------. | yybackup. | `-----------*/ yybackup: /* Do appropriate processing given the current state. Read a lookahead token if we need one and don't already have one. */ /* First try to decide what to do without reference to lookahead token. */ yyn = yypact[yystate]; if (yypact_value_is_default (yyn)) goto yydefault; /* Not known => get a lookahead token if don't already have one. */ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ if (yychar == YYEMPTY) { YYDPRINTF ((stderr, "Reading a token: ")); yychar = yylex (); } if (yychar <= YYEOF) { yychar = yytoken = YYEOF; YYDPRINTF ((stderr, "Now at end of input.\n")); } else { yytoken = YYTRANSLATE (yychar); YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); } /* If the proper action on seeing token YYTOKEN is to reduce or to detect an error, take that action. */ yyn += yytoken; if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) goto yydefault; yyn = yytable[yyn]; if (yyn <= 0) { if (yytable_value_is_error (yyn)) goto yyerrlab; yyn = -yyn; goto yyreduce; } /* Count tokens shifted since error; after three, turn off error status. */ if (yyerrstatus) yyerrstatus--; /* Shift the lookahead token. */ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); yystate = yyn; YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END /* Discard the shifted token. */ yychar = YYEMPTY; goto yynewstate; /*-----------------------------------------------------------. | yydefault -- do the default action for the current state. | `-----------------------------------------------------------*/ yydefault: yyn = yydefact[yystate]; if (yyn == 0) goto yyerrlab; goto yyreduce; /*-----------------------------. | yyreduce -- do a reduction. | `-----------------------------*/ yyreduce: /* yyn is the number of a rule to reduce with. */ yylen = yyr2[yyn]; /* If YYLEN is nonzero, implement the default value of the action: '$$ = $1'. Otherwise, the following line sets YYVAL to garbage. This behavior is undocumented and Bison users should not rely upon it. Assigning to YYVAL unconditionally makes the parser a bit smaller, and it avoids a GCC warning that YYVAL may be used uninitialized. */ yyval = yyvsp[1-yylen]; YY_REDUCE_PRINT (yyn); switch (yyn) { case 19: { slgh->resetConstructors(); } break; case 20: { slgh->setEndian(1); } break; case 21: { slgh->setEndian(0); } break; case 22: { slgh->setAlignment(*(yyvsp[-1].i)); delete (yyvsp[-1].i); } break; case 23: {} break; case 24: { (yyval.tokensym) = slgh->defineToken((yyvsp[-3].str),(yyvsp[-1].i),0); } break; case 25: { (yyval.tokensym) = slgh->defineToken((yyvsp[-6].str),(yyvsp[-4].i),-1); } break; case 26: { (yyval.tokensym) = slgh->defineToken((yyvsp[-6].str),(yyvsp[-4].i),1); } break; case 27: { (yyval.tokensym) = (yyvsp[-1].tokensym); slgh->addTokenField((yyvsp[-1].tokensym),(yyvsp[0].fieldqual)); } break; case 28: { string errmsg=(yyvsp[0].anysym)->getName()+": redefined as a token"; slgh->reportError(errmsg); YYERROR; } break; case 29: {} break; case 30: { (yyval.varsym) = (yyvsp[0].varsym); } break; case 31: { (yyval.varsym) = (yyvsp[-1].varsym); if (!slgh->addContextField( (yyvsp[-1].varsym), (yyvsp[0].fieldqual) )) { slgh->reportError("All context definitions must come before constructors"); YYERROR; } } break; case 32: { (yyval.fieldqual) = new FieldQuality((yyvsp[-6].str),(yyvsp[-3].i),(yyvsp[-1].i)); } break; case 33: { delete (yyvsp[-3].i); delete (yyvsp[-1].i); string errmsg = (yyvsp[-6].anysym)->getName()+": redefined as field"; slgh->reportError(errmsg); YYERROR; } break; case 34: { (yyval.fieldqual) = (yyvsp[-1].fieldqual); (yyval.fieldqual)->signext = true; } break; case 35: { (yyval.fieldqual) = (yyvsp[-1].fieldqual); (yyval.fieldqual)->hex = true; } break; case 36: { (yyval.fieldqual) = (yyvsp[-1].fieldqual); (yyval.fieldqual)->hex = false; } break; case 37: { (yyval.fieldqual) = new FieldQuality((yyvsp[-6].str),(yyvsp[-3].i),(yyvsp[-1].i)); } break; case 38: { delete (yyvsp[-3].i); delete (yyvsp[-1].i); string errmsg = (yyvsp[-6].anysym)->getName()+": redefined as field"; slgh->reportError(errmsg); YYERROR; } break; case 39: { (yyval.fieldqual) = (yyvsp[-1].fieldqual); (yyval.fieldqual)->signext = true; } break; case 40: { (yyval.fieldqual) = (yyvsp[-1].fieldqual); (yyval.fieldqual)->flow = false; } break; case 41: { (yyval.fieldqual) = (yyvsp[-1].fieldqual); (yyval.fieldqual)->hex = true; } break; case 42: { (yyval.fieldqual) = (yyvsp[-1].fieldqual); (yyval.fieldqual)->hex = false; } break; case 43: { slgh->newSpace((yyvsp[-1].spacequal)); } break; case 44: { (yyval.spacequal) = new SpaceQuality(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 45: { string errmsg = (yyvsp[0].anysym)->getName()+": redefined as space"; slgh->reportError(errmsg); YYERROR; } break; case 46: { (yyval.spacequal) = (yyvsp[-3].spacequal); (yyval.spacequal)->type = SpaceQuality::ramtype; } break; case 47: { (yyval.spacequal) = (yyvsp[-3].spacequal); (yyval.spacequal)->type = SpaceQuality::registertype; } break; case 48: { (yyval.spacequal) = (yyvsp[-3].spacequal); (yyval.spacequal)->size = *(yyvsp[0].i); delete (yyvsp[0].i); } break; case 49: { (yyval.spacequal) = (yyvsp[-3].spacequal); (yyval.spacequal)->wordsize = *(yyvsp[0].i); delete (yyvsp[0].i); } break; case 50: { (yyval.spacequal) = (yyvsp[-1].spacequal); (yyval.spacequal)->isdefault = true; } break; case 51: { slgh->defineVarnodes((yyvsp[-8].spacesym),(yyvsp[-5].i),(yyvsp[-2].i),(yyvsp[-1].strlist)); } break; case 52: { slgh->reportError("Parsed integer is too big (overflow)"); YYERROR; } break; case 56: { slgh->defineBitrange((yyvsp[-7].str),(yyvsp[-5].varsym),(uint4)*(yyvsp[-3].i),(uint4)*(yyvsp[-1].i)); delete (yyvsp[-3].i); delete (yyvsp[-1].i); } break; case 57: { slgh->addUserOp((yyvsp[-1].strlist)); } break; case 58: { slgh->attachValues((yyvsp[-2].symlist),(yyvsp[-1].biglist)); } break; case 59: { slgh->attachNames((yyvsp[-2].symlist),(yyvsp[-1].strlist)); } break; case 60: { slgh->attachVarnodes((yyvsp[-2].symlist),(yyvsp[-1].symlist)); } break; case 61: { slgh->buildMacro((yyvsp[-3].macrosym),(yyvsp[-1].sem)); } break; case 62: { slgh->pushWith((yyvsp[-4].subtablesym),(yyvsp[-2].pateq),(yyvsp[-1].contop)); } break; case 66: { slgh->popWith(); } break; case 67: { (yyval.subtablesym) = (SubtableSymbol *)0; } break; case 68: { (yyval.subtablesym) = (yyvsp[0].subtablesym); } break; case 69: { (yyval.subtablesym) = slgh->newTable((yyvsp[0].str)); } break; case 70: { (yyval.pateq) = (PatternEquation *)0; } break; case 71: { (yyval.pateq) = (yyvsp[0].pateq); } break; case 72: { (yyval.macrosym) = slgh->createMacro((yyvsp[-3].str),(yyvsp[-1].strlist)); } break; case 73: { (yyval.sectionstart) = slgh->standaloneSection((yyvsp[-1].sem)); } break; case 74: { (yyval.sectionstart) = slgh->finalNamedSection((yyvsp[-2].sectionstart),(yyvsp[-1].sem)); } break; case 75: { (yyval.sectionstart) = (SectionVector *)0; } break; case 76: { slgh->buildConstructor((yyvsp[-4].construct),(yyvsp[-2].pateq),(yyvsp[-1].contop),(yyvsp[0].sectionstart)); } break; case 77: { slgh->buildConstructor((yyvsp[-4].construct),(yyvsp[-2].pateq),(yyvsp[-1].contop),(yyvsp[0].sectionstart)); } break; case 78: { (yyval.construct) = (yyvsp[-1].construct); (yyval.construct)->addSyntax(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 79: { (yyval.construct) = (yyvsp[-1].construct); (yyval.construct)->addSyntax(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 80: { (yyval.construct) = (yyvsp[-1].construct); if (slgh->isInRoot((yyvsp[-1].construct))) { (yyval.construct)->addSyntax(*(yyvsp[0].str)); delete (yyvsp[0].str); } else slgh->newOperand((yyvsp[-1].construct),(yyvsp[0].str)); } break; case 81: { (yyval.construct) = (yyvsp[-1].construct); if (!slgh->isInRoot((yyvsp[-1].construct))) { slgh->reportError("Unexpected '^' at start of print pieces"); YYERROR; } } break; case 82: { (yyval.construct) = (yyvsp[-1].construct); } break; case 83: { (yyval.construct) = (yyvsp[-1].construct); (yyval.construct)->addSyntax(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 84: { (yyval.construct) = (yyvsp[-1].construct); (yyval.construct)->addSyntax(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 85: { (yyval.construct) = (yyvsp[-1].construct); (yyval.construct)->addSyntax(string(" ")); } break; case 86: { (yyval.construct) = (yyvsp[-1].construct); slgh->newOperand((yyvsp[-1].construct),(yyvsp[0].str)); } break; case 87: { (yyval.construct) = slgh->createConstructor((yyvsp[-1].subtablesym)); } break; case 88: { SubtableSymbol *sym=slgh->newTable((yyvsp[-1].str)); (yyval.construct) = slgh->createConstructor(sym); } break; case 89: { (yyval.construct) = slgh->createConstructor((SubtableSymbol *)0); } break; case 90: { (yyval.construct) = (yyvsp[-1].construct); } break; case 91: { (yyval.patexp) = new ConstantValue(*(yyvsp[0].big)); delete (yyvsp[0].big); } break; case 92: { if ((actionon==1)&&((yyvsp[0].famsym)->getType() != SleighSymbol::context_symbol)) { string errmsg="Global symbol "+(yyvsp[0].famsym)->getName(); errmsg += " is not allowed in action expression"; slgh->reportError(errmsg); } (yyval.patexp) = (yyvsp[0].famsym)->getPatternValue(); } break; case 93: { (yyval.patexp) = (yyvsp[0].specsym)->getPatternExpression(); } break; case 94: { (yyval.patexp) = (yyvsp[-1].patexp); } break; case 95: { (yyval.patexp) = new PlusExpression((yyvsp[-2].patexp),(yyvsp[0].patexp)); } break; case 96: { (yyval.patexp) = new SubExpression((yyvsp[-2].patexp),(yyvsp[0].patexp)); } break; case 97: { (yyval.patexp) = new MultExpression((yyvsp[-2].patexp),(yyvsp[0].patexp)); } break; case 98: { (yyval.patexp) = new LeftShiftExpression((yyvsp[-2].patexp),(yyvsp[0].patexp)); } break; case 99: { (yyval.patexp) = new RightShiftExpression((yyvsp[-2].patexp),(yyvsp[0].patexp)); } break; case 100: { (yyval.patexp) = new AndExpression((yyvsp[-2].patexp),(yyvsp[0].patexp)); } break; case 101: { (yyval.patexp) = new OrExpression((yyvsp[-2].patexp),(yyvsp[0].patexp)); } break; case 102: { (yyval.patexp) = new XorExpression((yyvsp[-2].patexp),(yyvsp[0].patexp)); } break; case 103: { (yyval.patexp) = new DivExpression((yyvsp[-2].patexp),(yyvsp[0].patexp)); } break; case 104: { (yyval.patexp) = new MinusExpression((yyvsp[0].patexp)); } break; case 105: { (yyval.patexp) = new NotExpression((yyvsp[0].patexp)); } break; case 107: { (yyval.pateq) = new EquationAnd((yyvsp[-2].pateq),(yyvsp[0].pateq)); } break; case 108: { (yyval.pateq) = new EquationOr((yyvsp[-2].pateq),(yyvsp[0].pateq)); } break; case 109: { (yyval.pateq) = new EquationCat((yyvsp[-2].pateq),(yyvsp[0].pateq)); } break; case 110: { (yyval.pateq) = new EquationLeftEllipsis((yyvsp[0].pateq)); } break; case 112: { (yyval.pateq) = new EquationRightEllipsis((yyvsp[-1].pateq)); } break; case 115: { (yyval.pateq) = (yyvsp[-1].pateq); } break; case 116: { (yyval.pateq) = new EqualEquation((yyvsp[-2].famsym)->getPatternValue(),(yyvsp[0].patexp)); } break; case 117: { (yyval.pateq) = new NotEqualEquation((yyvsp[-2].famsym)->getPatternValue(),(yyvsp[0].patexp)); } break; case 118: { (yyval.pateq) = new LessEquation((yyvsp[-2].famsym)->getPatternValue(),(yyvsp[0].patexp)); } break; case 119: { (yyval.pateq) = new LessEqualEquation((yyvsp[-2].famsym)->getPatternValue(),(yyvsp[0].patexp)); } break; case 120: { (yyval.pateq) = new GreaterEquation((yyvsp[-2].famsym)->getPatternValue(),(yyvsp[0].patexp)); } break; case 121: { (yyval.pateq) = new GreaterEqualEquation((yyvsp[-2].famsym)->getPatternValue(),(yyvsp[0].patexp)); } break; case 122: { (yyval.pateq) = slgh->constrainOperand((yyvsp[-2].operandsym),(yyvsp[0].patexp)); if ((yyval.pateq) == (PatternEquation *)0) { string errmsg="Constraining currently undefined operand "+(yyvsp[-2].operandsym)->getName(); slgh->reportError(errmsg); } } break; case 123: { (yyval.pateq) = new OperandEquation((yyvsp[0].operandsym)->getIndex()); slgh->selfDefine((yyvsp[0].operandsym)); } break; case 124: { (yyval.pateq) = new UnconstrainedEquation((yyvsp[0].specsym)->getPatternExpression()); } break; case 125: { (yyval.pateq) = slgh->defineInvisibleOperand((yyvsp[0].famsym)); } break; case 126: { (yyval.pateq) = slgh->defineInvisibleOperand((yyvsp[0].subtablesym)); } break; case 127: { (yyval.contop) = (vector *)0; } break; case 128: { (yyval.contop) = (yyvsp[-1].contop); } break; case 129: { (yyval.contop) = new vector; } break; case 130: { (yyval.contop) = (yyvsp[-4].contop); if (!slgh->contextMod((yyvsp[-4].contop),(yyvsp[-3].contextsym),(yyvsp[-1].patexp))) { string errmsg="Cannot use 'inst_next' or 'inst_next2' to set context variable: "+(yyvsp[-3].contextsym)->getName(); slgh->reportError(errmsg); YYERROR; } } break; case 131: { (yyval.contop) = (yyvsp[-7].contop); slgh->contextSet((yyvsp[-7].contop),(yyvsp[-4].famsym),(yyvsp[-2].contextsym)); } break; case 132: { (yyval.contop) = (yyvsp[-7].contop); slgh->contextSet((yyvsp[-7].contop),(yyvsp[-4].specsym),(yyvsp[-2].contextsym)); } break; case 133: { (yyval.contop) = (yyvsp[-4].contop); slgh->defineOperand((yyvsp[-3].operandsym),(yyvsp[-1].patexp)); } break; case 134: { string errmsg="Expecting context symbol, not "+*(yyvsp[0].str); delete (yyvsp[0].str); slgh->reportError(errmsg); YYERROR; } break; case 135: { (yyval.sectionsym) = slgh->newSectionSymbol( *(yyvsp[-1].str) ); delete (yyvsp[-1].str); } break; case 136: { (yyval.sectionsym) = (yyvsp[-1].sectionsym); } break; case 137: { (yyval.sectionstart) = slgh->firstNamedSection((yyvsp[-1].sem),(yyvsp[0].sectionsym)); } break; case 138: { (yyval.sectionstart) = (yyvsp[0].sectionstart); } break; case 139: { (yyval.sectionstart) = slgh->nextNamedSection((yyvsp[-2].sectionstart),(yyvsp[-1].sem),(yyvsp[0].sectionsym)); } break; case 140: { (yyval.sem) = (yyvsp[0].sem); if ((yyval.sem)->getOpvec().empty() && ((yyval.sem)->getResult() == (HandleTpl *)0)) slgh->recordNop(); } break; case 141: { (yyval.sem) = slgh->setResultVarnode((yyvsp[-3].sem),(yyvsp[-1].varnode)); } break; case 142: { (yyval.sem) = slgh->setResultStarVarnode((yyvsp[-4].sem),(yyvsp[-2].starqual),(yyvsp[-1].varnode)); } break; case 143: { string errmsg="Unknown export varnode: "+*(yyvsp[0].str); delete (yyvsp[0].str); slgh->reportError(errmsg); YYERROR; } break; case 144: { string errmsg="Unknown pointer varnode: "+*(yyvsp[0].str); delete (yyvsp[-1].starqual); delete (yyvsp[0].str); slgh->reportError(errmsg); YYERROR; } break; case 145: { (yyval.sem) = slgh->enterSection(); } break; case 146: { (yyval.sem) = (yyvsp[-1].sem); if (!(yyval.sem)->addOpList(*(yyvsp[0].stmt))) { delete (yyvsp[0].stmt); slgh->reportError("Multiple delayslot declarations"); YYERROR; } delete (yyvsp[0].stmt); } break; case 147: { (yyval.sem) = (yyvsp[-3].sem); slgh->pcode.newLocalDefinition((yyvsp[-1].str)); } break; case 148: { (yyval.sem) = (yyvsp[-5].sem); slgh->pcode.newLocalDefinition((yyvsp[-3].str),*(yyvsp[-1].i)); delete (yyvsp[-1].i); } break; case 149: { (yyvsp[-1].tree)->setOutput((yyvsp[-3].varnode)); (yyval.stmt) = ExprTree::toVector((yyvsp[-1].tree)); } break; case 150: { (yyval.stmt) = slgh->pcode.newOutput(true,(yyvsp[-1].tree),(yyvsp[-3].str)); } break; case 151: { (yyval.stmt) = slgh->pcode.newOutput(false,(yyvsp[-1].tree),(yyvsp[-3].str)); } break; case 152: { (yyval.stmt) = slgh->pcode.newOutput(true,(yyvsp[-1].tree),(yyvsp[-5].str),*(yyvsp[-3].i)); delete (yyvsp[-3].i); } break; case 153: { (yyval.stmt) = slgh->pcode.newOutput(true,(yyvsp[-1].tree),(yyvsp[-5].str),*(yyvsp[-3].i)); delete (yyvsp[-3].i); } break; case 154: { (yyval.stmt) = (vector *)0; string errmsg = "Redefinition of symbol: "+(yyvsp[-1].specsym)->getName(); slgh->reportError(errmsg); YYERROR; } break; case 155: { (yyval.stmt) = slgh->pcode.createStore((yyvsp[-4].starqual),(yyvsp[-3].tree),(yyvsp[-1].tree)); } break; case 156: { (yyval.stmt) = slgh->pcode.createUserOpNoOut((yyvsp[-4].useropsym),(yyvsp[-2].param)); } break; case 157: { (yyval.stmt) = slgh->pcode.assignBitRange((yyvsp[-8].varnode),(uint4)*(yyvsp[-6].i),(uint4)*(yyvsp[-4].i),(yyvsp[-1].tree)); delete (yyvsp[-6].i), delete (yyvsp[-4].i); } break; case 158: { (yyval.stmt)=slgh->pcode.assignBitRange((yyvsp[-3].bitsym)->getParentSymbol()->getVarnode(),(yyvsp[-3].bitsym)->getBitOffset(),(yyvsp[-3].bitsym)->numBits(),(yyvsp[-1].tree)); } break; case 159: { delete (yyvsp[-3].varnode); delete (yyvsp[-1].i); slgh->reportError("Illegal truncation on left-hand side of assignment"); YYERROR; } break; case 160: { delete (yyvsp[-3].varnode); delete (yyvsp[-1].i); slgh->reportError("Illegal subpiece on left-hand side of assignment"); YYERROR; } break; case 161: { (yyval.stmt) = slgh->pcode.createOpConst(BUILD,(yyvsp[-1].operandsym)->getIndex()); } break; case 162: { (yyval.stmt) = slgh->createCrossBuild((yyvsp[-3].varnode),(yyvsp[-1].sectionsym)); } break; case 163: { (yyval.stmt) = slgh->createCrossBuild((yyvsp[-3].varnode),slgh->newSectionSymbol(*(yyvsp[-1].str))); delete (yyvsp[-1].str); } break; case 164: { (yyval.stmt) = slgh->pcode.createOpConst(DELAY_SLOT,*(yyvsp[-2].i)); delete (yyvsp[-2].i); } break; case 165: { (yyval.stmt) = slgh->pcode.createOpNoOut(CPUI_BRANCH,new ExprTree((yyvsp[-1].varnode))); } break; case 166: { (yyval.stmt) = slgh->pcode.createOpNoOut(CPUI_CBRANCH,new ExprTree((yyvsp[-1].varnode)),(yyvsp[-3].tree)); } break; case 167: { (yyval.stmt) = slgh->pcode.createOpNoOut(CPUI_BRANCHIND,(yyvsp[-2].tree)); } break; case 168: { (yyval.stmt) = slgh->pcode.createOpNoOut(CPUI_CALL,new ExprTree((yyvsp[-1].varnode))); } break; case 169: { (yyval.stmt) = slgh->pcode.createOpNoOut(CPUI_CALLIND,(yyvsp[-2].tree)); } break; case 170: { slgh->reportError("Must specify an indirect parameter for return"); YYERROR; } break; case 171: { (yyval.stmt) = slgh->pcode.createOpNoOut(CPUI_RETURN,(yyvsp[-2].tree)); } break; case 172: { (yyval.stmt) = slgh->createMacroUse((yyvsp[-4].macrosym),(yyvsp[-2].param)); } break; case 173: { (yyval.stmt) = slgh->pcode.placeLabel( (yyvsp[0].labelsym) ); } break; case 174: { (yyval.tree) = new ExprTree((yyvsp[0].varnode)); } break; case 175: { (yyval.tree) = slgh->pcode.createLoad((yyvsp[-1].starqual),(yyvsp[0].tree)); } break; case 176: { (yyval.tree) = (yyvsp[-1].tree); } break; case 177: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_ADD,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 178: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_SUB,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 179: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_EQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 180: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_NOTEQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 181: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_LESS,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 182: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_LESSEQUAL,(yyvsp[0].tree),(yyvsp[-2].tree)); } break; case 183: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_LESSEQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 184: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_LESS,(yyvsp[0].tree),(yyvsp[-2].tree)); } break; case 185: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_SLESS,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 186: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_SLESSEQUAL,(yyvsp[0].tree),(yyvsp[-2].tree)); } break; case 187: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_SLESSEQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 188: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_SLESS,(yyvsp[0].tree),(yyvsp[-2].tree)); } break; case 189: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_2COMP,(yyvsp[0].tree)); } break; case 190: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_NEGATE,(yyvsp[0].tree)); } break; case 191: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_XOR,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 192: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_AND,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 193: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_OR,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 194: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_LEFT,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 195: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_RIGHT,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 196: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_SRIGHT,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 197: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_MULT,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 198: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_DIV,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 199: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_SDIV,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 200: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_REM,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 201: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_SREM,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 202: { (yyval.tree) = slgh->pcode.createOp(CPUI_BOOL_NEGATE,(yyvsp[0].tree)); } break; case 203: { (yyval.tree) = slgh->pcode.createOp(CPUI_BOOL_XOR,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 204: { (yyval.tree) = slgh->pcode.createOp(CPUI_BOOL_AND,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 205: { (yyval.tree) = slgh->pcode.createOp(CPUI_BOOL_OR,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 206: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_EQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 207: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_NOTEQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 208: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_LESS,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 209: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_LESS,(yyvsp[0].tree),(yyvsp[-2].tree)); } break; case 210: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_LESSEQUAL,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 211: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_LESSEQUAL,(yyvsp[0].tree),(yyvsp[-2].tree)); } break; case 212: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_ADD,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 213: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_SUB,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 214: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_MULT,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 215: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_DIV,(yyvsp[-2].tree),(yyvsp[0].tree)); } break; case 216: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_NEG,(yyvsp[0].tree)); } break; case 217: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_ABS,(yyvsp[-1].tree)); } break; case 218: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_SQRT,(yyvsp[-1].tree)); } break; case 219: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_SEXT,(yyvsp[-1].tree)); } break; case 220: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_ZEXT,(yyvsp[-1].tree)); } break; case 221: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_CARRY,(yyvsp[-3].tree),(yyvsp[-1].tree)); } break; case 222: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_SCARRY,(yyvsp[-3].tree),(yyvsp[-1].tree)); } break; case 223: { (yyval.tree) = slgh->pcode.createOp(CPUI_INT_SBORROW,(yyvsp[-3].tree),(yyvsp[-1].tree)); } break; case 224: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_FLOAT2FLOAT,(yyvsp[-1].tree)); } break; case 225: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_INT2FLOAT,(yyvsp[-1].tree)); } break; case 226: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_NAN,(yyvsp[-1].tree)); } break; case 227: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_TRUNC,(yyvsp[-1].tree)); } break; case 228: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_CEIL,(yyvsp[-1].tree)); } break; case 229: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_FLOOR,(yyvsp[-1].tree)); } break; case 230: { (yyval.tree) = slgh->pcode.createOp(CPUI_FLOAT_ROUND,(yyvsp[-1].tree)); } break; case 231: { (yyval.tree) = slgh->pcode.createOp(CPUI_NEW,(yyvsp[-1].tree)); } break; case 232: { (yyval.tree) = slgh->pcode.createOp(CPUI_NEW,(yyvsp[-3].tree),(yyvsp[-1].tree)); } break; case 233: { (yyval.tree) = slgh->pcode.createOp(CPUI_POPCOUNT,(yyvsp[-1].tree)); } break; case 234: { (yyval.tree) = slgh->pcode.createOp(CPUI_LZCOUNT,(yyvsp[-1].tree)); } break; case 235: { (yyval.tree) = slgh->pcode.createOp(CPUI_SUBPIECE,new ExprTree((yyvsp[-3].specsym)->getVarnode()),new ExprTree((yyvsp[-1].varnode))); } break; case 236: { (yyval.tree) = slgh->pcode.createBitRange((yyvsp[-2].specsym),0,(uint4)(*(yyvsp[0].i) * 8)); delete (yyvsp[0].i); } break; case 237: { (yyval.tree) = slgh->pcode.createBitRange((yyvsp[-5].specsym),(uint4)*(yyvsp[-3].i),(uint4)*(yyvsp[-1].i)); delete (yyvsp[-3].i), delete (yyvsp[-1].i); } break; case 238: { (yyval.tree)=slgh->pcode.createBitRange((yyvsp[0].bitsym)->getParentSymbol(),(yyvsp[0].bitsym)->getBitOffset(),(yyvsp[0].bitsym)->numBits()); } break; case 239: { (yyval.tree) = slgh->pcode.createUserOp((yyvsp[-3].useropsym),(yyvsp[-1].param)); } break; case 240: { if ((*(yyvsp[-1].param)).size() < 2) { string errmsg = "Must at least two inputs to cpool"; slgh->reportError(errmsg); YYERROR; } (yyval.tree) = slgh->pcode.createVariadic(CPUI_CPOOLREF,(yyvsp[-1].param)); } break; case 241: { (yyval.starqual) = new StarQuality; (yyval.starqual)->size = *(yyvsp[0].i); delete (yyvsp[0].i); (yyval.starqual)->id=ConstTpl((yyvsp[-3].spacesym)->getSpace()); } break; case 242: { (yyval.starqual) = new StarQuality; (yyval.starqual)->size = 0; (yyval.starqual)->id=ConstTpl((yyvsp[-1].spacesym)->getSpace()); } break; case 243: { (yyval.starqual) = new StarQuality; (yyval.starqual)->size = *(yyvsp[0].i); delete (yyvsp[0].i); (yyval.starqual)->id=ConstTpl(slgh->getDefaultCodeSpace()); } break; case 244: { (yyval.starqual) = new StarQuality; (yyval.starqual)->size = 0; (yyval.starqual)->id=ConstTpl(slgh->getDefaultCodeSpace()); } break; case 245: { VarnodeTpl *sym = (yyvsp[0].specsym)->getVarnode(); (yyval.varnode) = new VarnodeTpl(ConstTpl(ConstTpl::j_curspace),sym->getOffset(),ConstTpl(ConstTpl::j_curspace_size)); delete sym; } break; case 246: { (yyval.varnode) = new VarnodeTpl(ConstTpl(ConstTpl::j_curspace),ConstTpl(ConstTpl::real,*(yyvsp[0].i)),ConstTpl(ConstTpl::j_curspace_size)); delete (yyvsp[0].i); } break; case 247: { (yyval.varnode) = new VarnodeTpl(ConstTpl(ConstTpl::j_curspace),ConstTpl(ConstTpl::real,0),ConstTpl(ConstTpl::j_curspace_size)); slgh->reportError("Parsed integer is too big (overflow)"); } break; case 248: { (yyval.varnode) = (yyvsp[0].operandsym)->getVarnode(); (yyvsp[0].operandsym)->setCodeAddress(); } break; case 249: { AddrSpace *spc = (yyvsp[-1].spacesym)->getSpace(); (yyval.varnode) = new VarnodeTpl(ConstTpl(spc),ConstTpl(ConstTpl::real,*(yyvsp[-3].i)),ConstTpl(ConstTpl::real,spc->getAddrSize())); delete (yyvsp[-3].i); } break; case 250: { (yyval.varnode) = new VarnodeTpl(ConstTpl(slgh->getConstantSpace()),ConstTpl(ConstTpl::j_relative,(yyvsp[0].labelsym)->getIndex()),ConstTpl(ConstTpl::real,sizeof(uintm))); (yyvsp[0].labelsym)->incrementRefCount(); } break; case 251: { string errmsg = "Unknown jump destination: "+*(yyvsp[0].str); delete (yyvsp[0].str); slgh->reportError(errmsg); YYERROR; } break; case 252: { (yyval.varnode) = (yyvsp[0].specsym)->getVarnode(); } break; case 253: { (yyval.varnode) = (yyvsp[0].varnode); } break; case 254: { string errmsg = "Unknown varnode parameter: "+*(yyvsp[0].str); delete (yyvsp[0].str); slgh->reportError(errmsg); YYERROR; } break; case 255: { string errmsg = "Subtable not attached to operand: "+(yyvsp[0].subtablesym)->getName(); slgh->reportError(errmsg); YYERROR; } break; case 256: { (yyval.varnode) = new VarnodeTpl(ConstTpl(slgh->getConstantSpace()),ConstTpl(ConstTpl::real,*(yyvsp[0].i)),ConstTpl(ConstTpl::real,0)); delete (yyvsp[0].i); } break; case 257: { (yyval.varnode) = new VarnodeTpl(ConstTpl(slgh->getConstantSpace()),ConstTpl(ConstTpl::real,0),ConstTpl(ConstTpl::real,0)); slgh->reportError("Parsed integer is too big (overflow)"); } break; case 258: { (yyval.varnode) = new VarnodeTpl(ConstTpl(slgh->getConstantSpace()),ConstTpl(ConstTpl::real,*(yyvsp[-2].i)),ConstTpl(ConstTpl::real,*(yyvsp[0].i))); delete (yyvsp[-2].i); delete (yyvsp[0].i); } break; case 259: { (yyval.varnode) = slgh->pcode.addressOf((yyvsp[0].varnode),0); } break; case 260: { (yyval.varnode) = slgh->pcode.addressOf((yyvsp[0].varnode),*(yyvsp[-1].i)); delete (yyvsp[-1].i); } break; case 261: { (yyval.varnode) = (yyvsp[0].specsym)->getVarnode(); } break; case 262: { string errmsg = "Unknown assignment varnode: "+*(yyvsp[0].str); delete (yyvsp[0].str); slgh->reportError(errmsg); YYERROR; } break; case 263: { string errmsg = "Subtable not attached to operand: "+(yyvsp[0].subtablesym)->getName(); slgh->reportError(errmsg); YYERROR; } break; case 264: { (yyval.labelsym) = (yyvsp[-1].labelsym); } break; case 265: { (yyval.labelsym) = slgh->pcode.defineLabel( (yyvsp[-1].str) ); } break; case 266: { (yyval.varnode) = (yyvsp[0].specsym)->getVarnode(); } break; case 267: { (yyval.varnode) = slgh->pcode.addressOf((yyvsp[0].varnode),0); } break; case 268: { (yyval.varnode) = slgh->pcode.addressOf((yyvsp[0].varnode),*(yyvsp[-1].i)); delete (yyvsp[-1].i); } break; case 269: { (yyval.varnode) = new VarnodeTpl(ConstTpl(slgh->getConstantSpace()),ConstTpl(ConstTpl::real,*(yyvsp[-2].i)),ConstTpl(ConstTpl::real,*(yyvsp[0].i))); delete (yyvsp[-2].i); delete (yyvsp[0].i); } break; case 270: { string errmsg="Unknown export varnode: "+*(yyvsp[0].str); delete (yyvsp[0].str); slgh->reportError(errmsg); YYERROR; } break; case 271: { string errmsg = "Subtable not attached to operand: "+(yyvsp[0].subtablesym)->getName(); slgh->reportError(errmsg); YYERROR; } break; case 272: { (yyval.famsym) = (yyvsp[0].valuesym); } break; case 273: { (yyval.famsym) = (yyvsp[0].valuemapsym); } break; case 274: { (yyval.famsym) = (yyvsp[0].contextsym); } break; case 275: { (yyval.famsym) = (yyvsp[0].namesym); } break; case 276: { (yyval.famsym) = (yyvsp[0].varlistsym); } break; case 277: { (yyval.specsym) = (yyvsp[0].varsym); } break; case 278: { (yyval.specsym) = (yyvsp[0].specsym); } break; case 279: { (yyval.specsym) = (yyvsp[0].operandsym); } break; case 280: { (yyval.specsym) = (yyvsp[0].specsym); } break; case 281: { (yyval.str) = new string; (*(yyval.str)) += (yyvsp[0].ch); } break; case 282: { (yyval.str) = (yyvsp[-1].str); (*(yyval.str)) += (yyvsp[0].ch); } break; case 283: { (yyval.biglist) = (yyvsp[-1].biglist); } break; case 284: { (yyval.biglist) = new vector; (yyval.biglist)->push_back(intb(*(yyvsp[0].i))); delete (yyvsp[0].i); } break; case 285: { (yyval.biglist) = new vector; (yyval.biglist)->push_back(-intb(*(yyvsp[0].i))); delete (yyvsp[0].i); } break; case 286: { (yyval.biglist) = new vector; (yyval.biglist)->push_back(intb(*(yyvsp[0].i))); delete (yyvsp[0].i); } break; case 287: { (yyval.biglist) = new vector; (yyval.biglist)->push_back(-intb(*(yyvsp[0].i))); delete (yyvsp[0].i); } break; case 288: { if (*(yyvsp[0].str)!="_") { string errmsg = "Expecting integer but saw: "+*(yyvsp[0].str); delete (yyvsp[0].str); slgh->reportError(errmsg); YYERROR; } (yyval.biglist) = new vector; (yyval.biglist)->push_back((intb)0xBADBEEF); delete (yyvsp[0].str); } break; case 289: { (yyval.biglist) = (yyvsp[-1].biglist); (yyval.biglist)->push_back(intb(*(yyvsp[0].i))); delete (yyvsp[0].i); } break; case 290: { (yyval.biglist) = (yyvsp[-2].biglist); (yyval.biglist)->push_back(-intb(*(yyvsp[0].i))); delete (yyvsp[0].i); } break; case 291: { if (*(yyvsp[0].str)!="_") { string errmsg = "Expecting integer but saw: "+*(yyvsp[0].str); delete (yyvsp[0].str); slgh->reportError(errmsg); YYERROR; } (yyval.biglist) = (yyvsp[-1].biglist); (yyval.biglist)->push_back((intb)0xBADBEEF); delete (yyvsp[0].str); } break; case 292: { (yyval.strlist) = (yyvsp[-1].strlist); } break; case 293: { (yyval.strlist) = new vector; (yyval.strlist)->push_back(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 294: { (yyval.strlist) = new vector; (yyval.strlist)->push_back( *(yyvsp[0].str) ); delete (yyvsp[0].str); } break; case 295: { (yyval.strlist) = (yyvsp[-1].strlist); (yyval.strlist)->push_back(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 296: { string errmsg = (yyvsp[0].anysym)->getName()+": redefined"; slgh->reportError(errmsg); YYERROR; } break; case 297: { (yyval.strlist) = (yyvsp[-1].strlist); } break; case 298: { (yyval.strlist) = new vector; (yyval.strlist)->push_back( *(yyvsp[0].str) ); delete (yyvsp[0].str); } break; case 299: { (yyval.strlist) = new vector; (yyval.strlist)->push_back( (yyvsp[0].anysym)->getName() ); } break; case 300: { (yyval.strlist) = (yyvsp[-1].strlist); (yyval.strlist)->push_back(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 301: { (yyval.strlist) = (yyvsp[-1].strlist); (yyval.strlist)->push_back((yyvsp[0].anysym)->getName()); } break; case 302: { (yyval.symlist) = (yyvsp[-1].symlist); } break; case 303: { (yyval.symlist) = new vector; (yyval.symlist)->push_back((yyvsp[0].valuesym)); } break; case 304: { (yyval.symlist) = new vector; (yyval.symlist)->push_back((yyvsp[0].contextsym)); } break; case 305: { (yyval.symlist) = new vector; (yyval.symlist)->push_back( (yyvsp[0].valuesym) ); } break; case 306: { (yyval.symlist) = new vector; (yyval.symlist)->push_back((yyvsp[0].contextsym)); } break; case 307: { (yyval.symlist) = (yyvsp[-1].symlist); (yyval.symlist)->push_back((yyvsp[0].valuesym)); } break; case 308: { (yyval.symlist) = (yyvsp[-1].symlist); (yyval.symlist)->push_back((yyvsp[0].contextsym)); } break; case 309: { string errmsg = *(yyvsp[0].str)+": is not a value pattern"; delete (yyvsp[0].str); slgh->reportError(errmsg); YYERROR; } break; case 310: { (yyval.symlist) = (yyvsp[-1].symlist); } break; case 311: { (yyval.symlist) = new vector; (yyval.symlist)->push_back((yyvsp[0].varsym)); } break; case 312: { (yyval.symlist) = new vector; (yyval.symlist)->push_back((yyvsp[0].varsym)); } break; case 313: { if (*(yyvsp[0].str)!="_") { string errmsg = *(yyvsp[0].str)+": is not a varnode symbol"; delete (yyvsp[0].str); slgh->reportError(errmsg); YYERROR; } (yyval.symlist) = new vector; (yyval.symlist)->push_back((SleighSymbol *)0); delete (yyvsp[0].str); } break; case 314: { (yyval.symlist) = (yyvsp[-1].symlist); (yyval.symlist)->push_back((yyvsp[0].varsym)); } break; case 315: { if (*(yyvsp[0].str)!="_") { string errmsg = *(yyvsp[0].str)+": is not a varnode symbol"; delete (yyvsp[0].str); slgh->reportError(errmsg); YYERROR; } (yyval.symlist) = (yyvsp[-1].symlist); (yyval.symlist)->push_back((SleighSymbol *)0); delete (yyvsp[0].str); } break; case 316: { (yyval.param) = new vector; } break; case 317: { (yyval.param) = new vector; (yyval.param)->push_back((yyvsp[0].tree)); } break; case 318: { (yyval.param) = (yyvsp[-2].param); (yyval.param)->push_back((yyvsp[0].tree)); } break; case 319: { (yyval.strlist) = new vector; } break; case 320: { (yyval.strlist) = new vector; (yyval.strlist)->push_back(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 321: { (yyval.strlist) = (yyvsp[-2].strlist); (yyval.strlist)->push_back(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 322: { (yyval.anysym) = (yyvsp[0].spacesym); } break; case 323: { (yyval.anysym) = (yyvsp[0].sectionsym); } break; case 324: { (yyval.anysym) = (yyvsp[0].tokensym); } break; case 325: { (yyval.anysym) = (yyvsp[0].useropsym); } break; case 326: { (yyval.anysym) = (yyvsp[0].macrosym); } break; case 327: { (yyval.anysym) = (yyvsp[0].subtablesym); } break; case 328: { (yyval.anysym) = (yyvsp[0].valuesym); } break; case 329: { (yyval.anysym) = (yyvsp[0].valuemapsym); } break; case 330: { (yyval.anysym) = (yyvsp[0].contextsym); } break; case 331: { (yyval.anysym) = (yyvsp[0].namesym); } break; case 332: { (yyval.anysym) = (yyvsp[0].varsym); } break; case 333: { (yyval.anysym) = (yyvsp[0].varlistsym); } break; case 334: { (yyval.anysym) = (yyvsp[0].operandsym); } break; case 335: { (yyval.anysym) = (yyvsp[0].specsym); } break; case 336: { (yyval.anysym) = (yyvsp[0].bitsym); } break; default: break; } /* User semantic actions sometimes alter yychar, and that requires that yytoken be updated with the new translation. We take the approach of translating immediately before every use of yytoken. One alternative is translating here after every semantic action, but that translation would be missed if the semantic action invokes YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an incorrect destructor might then be invoked immediately. In the case of YYERROR or YYBACKUP, subsequent parser actions might lead to an incorrect destructor call or verbose syntax error message before the lookahead is translated. */ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); *++yyvsp = yyval; /* Now 'shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ { const int yylhs = yyr1[yyn] - YYNTOKENS; const int yyi = yypgoto[yylhs] + *yyssp; yystate = (0 <= yyi && yyi <= YYLAST && yycheck[yyi] == *yyssp ? yytable[yyi] : yydefgoto[yylhs]); } goto yynewstate; /*--------------------------------------. | yyerrlab -- here on detecting error. | `--------------------------------------*/ yyerrlab: /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); /* If not already recovering from an error, report this error. */ if (!yyerrstatus) { ++yynerrs; #if ! YYERROR_VERBOSE yyerror (YY_("syntax error")); #else # define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \ yyssp, yytoken) { char const *yymsgp = YY_("syntax error"); int yysyntax_error_status; yysyntax_error_status = YYSYNTAX_ERROR; if (yysyntax_error_status == 0) yymsgp = yymsg; else if (yysyntax_error_status == 1) { if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); yymsg = YY_CAST (char *, YYSTACK_ALLOC (YY_CAST (YYSIZE_T, yymsg_alloc))); if (!yymsg) { yymsg = yymsgbuf; yymsg_alloc = sizeof yymsgbuf; yysyntax_error_status = 2; } else { yysyntax_error_status = YYSYNTAX_ERROR; yymsgp = yymsg; } } yyerror (yymsgp); if (yysyntax_error_status == 2) goto yyexhaustedlab; } # undef YYSYNTAX_ERROR #endif } if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an error, discard it. */ if (yychar <= YYEOF) { /* Return failure if at end of input. */ if (yychar == YYEOF) YYABORT; } else { yydestruct ("Error: discarding", yytoken, &yylval); yychar = YYEMPTY; } } /* Else will try to reuse lookahead token after shifting the error token. */ goto yyerrlab1; /*---------------------------------------------------. | yyerrorlab -- error raised explicitly by YYERROR. | `---------------------------------------------------*/ yyerrorlab: /* Pacify compilers when the user code never invokes YYERROR and the label yyerrorlab therefore never appears in user code. */ if (0) YYERROR; /* Do not reclaim the symbols of the rule whose action triggered this YYERROR. */ YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); yystate = *yyssp; goto yyerrlab1; /*-------------------------------------------------------------. | yyerrlab1 -- common code for both syntax error and YYERROR. | `-------------------------------------------------------------*/ yyerrlab1: yyerrstatus = 3; /* Each real token shifted decrements this. */ for (;;) { yyn = yypact[yystate]; if (!yypact_value_is_default (yyn)) { yyn += YYTERROR; if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) { yyn = yytable[yyn]; if (0 < yyn) break; } } /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) YYABORT; yydestruct ("Error: popping", yystos[yystate], yyvsp); YYPOPSTACK (1); yystate = *yyssp; YY_STACK_PRINT (yyss, yyssp); } YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END /* Shift the error token. */ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); yystate = yyn; goto yynewstate; /*-------------------------------------. | yyacceptlab -- YYACCEPT comes here. | `-------------------------------------*/ yyacceptlab: yyresult = 0; goto yyreturn; /*-----------------------------------. | yyabortlab -- YYABORT comes here. | `-----------------------------------*/ yyabortlab: yyresult = 1; goto yyreturn; #if !defined yyoverflow || YYERROR_VERBOSE /*-------------------------------------------------. | yyexhaustedlab -- memory exhaustion comes here. | `-------------------------------------------------*/ yyexhaustedlab: yyerror (YY_("memory exhausted")); yyresult = 2; /* Fall through. */ #endif /*-----------------------------------------------------. | yyreturn -- parsing is finished, return the result. | `-----------------------------------------------------*/ yyreturn: if (yychar != YYEMPTY) { /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = YYTRANSLATE (yychar); yydestruct ("Cleanup: discarding lookahead", yytoken, &yylval); } /* Do not reclaim the symbols of the rule whose action triggered this YYABORT or YYACCEPT. */ YYPOPSTACK (yylen); YY_STACK_PRINT (yyss, yyssp); while (yyssp != yyss) { yydestruct ("Cleanup: popping", yystos[+*yyssp], yyvsp); YYPOPSTACK (1); } #ifndef yyoverflow if (yyss != yyssa) YYSTACK_FREE (yyss); #endif #if YYERROR_VERBOSE if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); #endif return yyresult; } int sleigherror(const char *s) { slgh->reportError(s); return 0; } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/slghparse.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* A Bison parser, made by GNU Bison 3.5.1. */ /* Bison interface for Yacc-like parsers in C Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2020 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ /* As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice, so long as that work isn't itself a parser generator using the skeleton or a modified version thereof as a parser skeleton. Alternatively, if you modify or redistribute the parser skeleton itself, you may (at your option) remove this special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ /* Undocumented macros, especially those whose name start with YY_, are private implementation details. Do not rely on them. */ #ifndef YY_SLEIGH_SLGHPARSE_HH_INCLUDED # define YY_SLEIGH_SLGHPARSE_HH_INCLUDED /* Debug traces. */ #ifndef SLEIGHDEBUG # if defined YYDEBUG #if YYDEBUG # define SLEIGHDEBUG 1 # else # define SLEIGHDEBUG 0 # endif # else /* ! defined YYDEBUG */ # define SLEIGHDEBUG 0 # endif /* ! defined YYDEBUG */ #endif /* ! defined SLEIGHDEBUG */ #if SLEIGHDEBUG extern int sleighdebug; #endif /* Token type. */ #ifndef SLEIGHTOKENTYPE # define SLEIGHTOKENTYPE enum sleightokentype { OP_BOOL_OR = 258, OP_BOOL_AND = 259, OP_BOOL_XOR = 260, OP_OR = 261, OP_XOR = 262, OP_AND = 263, OP_EQUAL = 264, OP_NOTEQUAL = 265, OP_FEQUAL = 266, OP_FNOTEQUAL = 267, OP_GREATEQUAL = 268, OP_LESSEQUAL = 269, OP_SLESS = 270, OP_SGREATEQUAL = 271, OP_SLESSEQUAL = 272, OP_SGREAT = 273, OP_FLESS = 274, OP_FGREAT = 275, OP_FLESSEQUAL = 276, OP_FGREATEQUAL = 277, OP_LEFT = 278, OP_RIGHT = 279, OP_SRIGHT = 280, OP_FADD = 281, OP_FSUB = 282, OP_SDIV = 283, OP_SREM = 284, OP_FMULT = 285, OP_FDIV = 286, OP_ZEXT = 287, OP_CARRY = 288, OP_BORROW = 289, OP_SEXT = 290, OP_SCARRY = 291, OP_SBORROW = 292, OP_NAN = 293, OP_ABS = 294, OP_SQRT = 295, OP_CEIL = 296, OP_FLOOR = 297, OP_ROUND = 298, OP_INT2FLOAT = 299, OP_FLOAT2FLOAT = 300, OP_TRUNC = 301, OP_CPOOLREF = 302, OP_NEW = 303, OP_POPCOUNT = 304, OP_LZCOUNT = 305, BADINTEGER = 306, GOTO_KEY = 307, CALL_KEY = 308, RETURN_KEY = 309, IF_KEY = 310, DEFINE_KEY = 311, ATTACH_KEY = 312, MACRO_KEY = 313, SPACE_KEY = 314, TYPE_KEY = 315, RAM_KEY = 316, DEFAULT_KEY = 317, REGISTER_KEY = 318, ENDIAN_KEY = 319, WITH_KEY = 320, ALIGN_KEY = 321, OP_UNIMPL = 322, TOKEN_KEY = 323, SIGNED_KEY = 324, NOFLOW_KEY = 325, HEX_KEY = 326, DEC_KEY = 327, BIG_KEY = 328, LITTLE_KEY = 329, SIZE_KEY = 330, WORDSIZE_KEY = 331, OFFSET_KEY = 332, NAMES_KEY = 333, VALUES_KEY = 334, VARIABLES_KEY = 335, PCODEOP_KEY = 336, IS_KEY = 337, LOCAL_KEY = 338, DELAYSLOT_KEY = 339, CROSSBUILD_KEY = 340, EXPORT_KEY = 341, BUILD_KEY = 342, CONTEXT_KEY = 343, ELLIPSIS_KEY = 344, GLOBALSET_KEY = 345, BITRANGE_KEY = 346, CHAR = 347, INTEGER = 348, INTB = 349, STRING = 350, SYMBOLSTRING = 351, SPACESYM = 352, SECTIONSYM = 353, TOKENSYM = 354, USEROPSYM = 355, VALUESYM = 356, VALUEMAPSYM = 357, CONTEXTSYM = 358, NAMESYM = 359, VARSYM = 360, BITSYM = 361, SPECSYM = 362, VARLISTSYM = 363, OPERANDSYM = 364, JUMPSYM = 365, MACROSYM = 366, LABELSYM = 367, SUBTABLESYM = 368 }; #endif /* Value type. */ #if ! defined SLEIGHSTYPE && ! defined SLEIGHSTYPE_IS_DECLARED union SLEIGHSTYPE { char ch; uintb *i; intb *big; string *str; vector *strlist; vector *biglist; vector *param; SpaceQuality *spacequal; FieldQuality *fieldqual; StarQuality *starqual; VarnodeTpl *varnode; ExprTree *tree; vector *stmt; ConstructTpl *sem; SectionVector *sectionstart; Constructor *construct; PatternEquation *pateq; PatternExpression *patexp; vector *symlist; vector *contop; SleighSymbol *anysym; SpaceSymbol *spacesym; SectionSymbol *sectionsym; TokenSymbol *tokensym; UserOpSymbol *useropsym; MacroSymbol *macrosym; LabelSymbol *labelsym; SubtableSymbol *subtablesym; OperandSymbol *operandsym; VarnodeListSymbol *varlistsym; VarnodeSymbol *varsym; BitrangeSymbol *bitsym; NameSymbol *namesym; ValueSymbol *valuesym; ValueMapSymbol *valuemapsym; ContextSymbol *contextsym; FamilySymbol *famsym; SpecificSymbol *specsym; }; typedef union SLEIGHSTYPE SLEIGHSTYPE; # define SLEIGHSTYPE_IS_TRIVIAL 1 # define SLEIGHSTYPE_IS_DECLARED 1 #endif extern SLEIGHSTYPE sleighlval; int sleighparse (void); #endif /* !YY_SLEIGH_SLGHPARSE_HH_INCLUDED */ ================================================ FILE: pypcode/sleigh/slghparse.y ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ %define api.prefix {sleigh} %{ #include "slgh_compile.hh" extern FILE *sleighin; extern int sleighlex(void); namespace ghidra { extern SleighCompile *slgh; extern int4 actionon; extern int sleighdebug; extern int sleigherror(const char *str ); %} %union { char ch; uintb *i; intb *big; string *str; vector *strlist; vector *biglist; vector *param; SpaceQuality *spacequal; FieldQuality *fieldqual; StarQuality *starqual; VarnodeTpl *varnode; ExprTree *tree; vector *stmt; ConstructTpl *sem; SectionVector *sectionstart; Constructor *construct; PatternEquation *pateq; PatternExpression *patexp; vector *symlist; vector *contop; SleighSymbol *anysym; SpaceSymbol *spacesym; SectionSymbol *sectionsym; TokenSymbol *tokensym; UserOpSymbol *useropsym; MacroSymbol *macrosym; LabelSymbol *labelsym; SubtableSymbol *subtablesym; OperandSymbol *operandsym; VarnodeListSymbol *varlistsym; VarnodeSymbol *varsym; BitrangeSymbol *bitsym; NameSymbol *namesym; ValueSymbol *valuesym; ValueMapSymbol *valuemapsym; ContextSymbol *contextsym; FamilySymbol *famsym; SpecificSymbol *specsym; } %expect 5 // Conflicts // 2 charstring conflicts (do we lump CHARs together before appending to constructprint) // resolved by shifting which lumps before appending (best solution) // 1 integervarnode ':' conflict (does ':' apply to INTEGER or varnode) // resolved by shifting which applies ':' to INTEGER (best solution) // 2 statement -> STRING . conflicts (STRING might be mislabelled varnode, or temporary declaration) // resolved by shifting which means assume this is a temporary declaration %left OP_BOOL_OR %left OP_BOOL_AND OP_BOOL_XOR %left '|' OP_OR %left ';' %left '^' OP_XOR %left '&' OP_AND %left OP_EQUAL OP_NOTEQUAL OP_FEQUAL OP_FNOTEQUAL %nonassoc '<' '>' OP_GREATEQUAL OP_LESSEQUAL OP_SLESS OP_SGREATEQUAL OP_SLESSEQUAL OP_SGREAT OP_FLESS OP_FGREAT OP_FLESSEQUAL OP_FGREATEQUAL %left OP_LEFT OP_RIGHT OP_SRIGHT %left '+' '-' OP_FADD OP_FSUB %left '*' '/' '%' OP_SDIV OP_SREM OP_FMULT OP_FDIV %right '!' '~' %token OP_ZEXT OP_CARRY OP_BORROW OP_SEXT OP_SCARRY OP_SBORROW OP_NAN OP_ABS %token OP_SQRT OP_CEIL OP_FLOOR OP_ROUND OP_INT2FLOAT OP_FLOAT2FLOAT %token OP_TRUNC OP_CPOOLREF OP_NEW OP_POPCOUNT OP_LZCOUNT %token BADINTEGER GOTO_KEY CALL_KEY RETURN_KEY IF_KEY %token DEFINE_KEY ATTACH_KEY MACRO_KEY SPACE_KEY TYPE_KEY RAM_KEY DEFAULT_KEY %token REGISTER_KEY ENDIAN_KEY WITH_KEY ALIGN_KEY OP_UNIMPL %token TOKEN_KEY SIGNED_KEY NOFLOW_KEY HEX_KEY DEC_KEY BIG_KEY LITTLE_KEY %token SIZE_KEY WORDSIZE_KEY OFFSET_KEY NAMES_KEY VALUES_KEY VARIABLES_KEY PCODEOP_KEY IS_KEY LOCAL_KEY %token DELAYSLOT_KEY CROSSBUILD_KEY EXPORT_KEY BUILD_KEY CONTEXT_KEY ELLIPSIS_KEY GLOBALSET_KEY BITRANGE_KEY %token CHAR %token INTEGER %token INTB %token STRING SYMBOLSTRING %token SPACESYM %token SECTIONSYM %token TOKENSYM %token USEROPSYM %token VALUESYM %token VALUEMAPSYM %token CONTEXTSYM %token NAMESYM %token VARSYM %token BITSYM %token SPECSYM %token VARLISTSYM %token OPERANDSYM %token JUMPSYM %token MACROSYM %token LABELSYM %token SUBTABLESYM %type macrostart %type paramlist %type rtl rtlmid %type rtlbody rtlfirstsection rtlcontinue %type statement %type expr %type varnode integervarnode exportvarnode lhsvarnode jumpdest %type label %type pequation bitpat_or_nil elleq ellrt atomic constraint %type pexpression %type charstring %type constructprint subtablestart %type section_def %type contextprop %type tokenprop %type spaceprop %type fielddef contextfielddef %type sizedstar %type stringlist stringpart anystringlist anystringpart oplist %type intblist intbpart %type valuelist valuepart varlist varpart %type contextlist contextblock %type anysymbol %type familysymbol %type specificsymbol %type id_or_nil %% spec: endiandef | spec aligndef | spec definition | spec constructorlike ; definition: tokendef | contextdef | spacedef | varnodedef | bitrangedef | pcodeopdef | valueattach | nameattach | varattach | error ';' ; constructorlike: constructor | macrodef | withblock | error '}' { slgh->resetConstructors(); } ; endiandef: DEFINE_KEY ENDIAN_KEY '=' BIG_KEY ';' { slgh->setEndian(1); } | DEFINE_KEY ENDIAN_KEY '=' LITTLE_KEY ';' { slgh->setEndian(0); } ; aligndef: DEFINE_KEY ALIGN_KEY '=' INTEGER ';' { slgh->setAlignment(*$4); delete $4; } ; tokendef: tokenprop ';' {} ; tokenprop: DEFINE_KEY TOKEN_KEY STRING '(' INTEGER ')' { $$ = slgh->defineToken($3,$5,0); } | DEFINE_KEY TOKEN_KEY STRING '(' INTEGER ')' ENDIAN_KEY '=' LITTLE_KEY { $$ = slgh->defineToken($3,$5,-1); } | DEFINE_KEY TOKEN_KEY STRING '(' INTEGER ')' ENDIAN_KEY '=' BIG_KEY { $$ = slgh->defineToken($3,$5,1); } | tokenprop fielddef { $$ = $1; slgh->addTokenField($1,$2); } | DEFINE_KEY TOKEN_KEY anysymbol { string errmsg=$3->getName()+": redefined as a token"; slgh->reportError(errmsg); YYERROR; } ; contextdef: contextprop ';' {} ; contextprop: DEFINE_KEY CONTEXT_KEY VARSYM { $$ = $3; } | contextprop contextfielddef { $$ = $1; if (!slgh->addContextField( $1, $2 )) { slgh->reportError("All context definitions must come before constructors"); YYERROR; } } ; fielddef: STRING '=' '(' INTEGER ',' INTEGER ')' { $$ = new FieldQuality($1,$4,$6); } | anysymbol '=' '(' INTEGER ',' INTEGER ')' { delete $4; delete $6; string errmsg = $1->getName()+": redefined as field"; slgh->reportError(errmsg); YYERROR; } | fielddef SIGNED_KEY { $$ = $1; $$->signext = true; } | fielddef HEX_KEY { $$ = $1; $$->hex = true; } | fielddef DEC_KEY { $$ = $1; $$->hex = false; } ; contextfielddef: STRING '=' '(' INTEGER ',' INTEGER ')' { $$ = new FieldQuality($1,$4,$6); } | anysymbol '=' '(' INTEGER ',' INTEGER ')' { delete $4; delete $6; string errmsg = $1->getName()+": redefined as field"; slgh->reportError(errmsg); YYERROR; } | contextfielddef SIGNED_KEY { $$ = $1; $$->signext = true; } | contextfielddef NOFLOW_KEY { $$ = $1; $$->flow = false; } | contextfielddef HEX_KEY { $$ = $1; $$->hex = true; } | contextfielddef DEC_KEY { $$ = $1; $$->hex = false; } ; spacedef: spaceprop ';' { slgh->newSpace($1); } ; spaceprop: DEFINE_KEY SPACE_KEY STRING { $$ = new SpaceQuality(*$3); delete $3; } | DEFINE_KEY SPACE_KEY anysymbol { string errmsg = $3->getName()+": redefined as space"; slgh->reportError(errmsg); YYERROR; } | spaceprop TYPE_KEY '=' RAM_KEY { $$ = $1; $$->type = SpaceQuality::ramtype; } | spaceprop TYPE_KEY '=' REGISTER_KEY { $$ = $1; $$->type = SpaceQuality::registertype; } | spaceprop SIZE_KEY '=' INTEGER { $$ = $1; $$->size = *$4; delete $4; } | spaceprop WORDSIZE_KEY '=' INTEGER { $$ = $1; $$->wordsize = *$4; delete $4; } | spaceprop DEFAULT_KEY { $$ = $1; $$->isdefault = true; } ; varnodedef: DEFINE_KEY SPACESYM OFFSET_KEY '=' INTEGER SIZE_KEY '=' INTEGER stringlist ';' { slgh->defineVarnodes($2,$5,$8,$9); } | DEFINE_KEY SPACESYM OFFSET_KEY '=' BADINTEGER { slgh->reportError("Parsed integer is too big (overflow)"); YYERROR; } ; bitrangedef: DEFINE_KEY BITRANGE_KEY bitrangelist ';' ; bitrangelist: bitrangesingle | bitrangelist bitrangesingle ; bitrangesingle: STRING '=' VARSYM '[' INTEGER ',' INTEGER ']' { slgh->defineBitrange($1,$3,(uint4)*$5,(uint4)*$7); delete $5; delete $7; } ; pcodeopdef: DEFINE_KEY PCODEOP_KEY stringlist ';' { slgh->addUserOp($3); } ; valueattach: ATTACH_KEY VALUES_KEY valuelist intblist ';' { slgh->attachValues($3,$4); } ; nameattach: ATTACH_KEY NAMES_KEY valuelist anystringlist ';' { slgh->attachNames($3,$4); } ; varattach: ATTACH_KEY VARIABLES_KEY valuelist varlist ';' { slgh->attachVarnodes($3,$4); } ; macrodef: macrostart '{' rtl '}' { slgh->buildMacro($1,$3); } ; withblockstart: WITH_KEY id_or_nil ':' bitpat_or_nil contextblock '{' { slgh->pushWith($2,$4,$5); } ; withblockmid: withblockstart | withblockmid definition | withblockmid constructorlike ; withblock: withblockmid '}' { slgh->popWith(); } id_or_nil: /* empty */ { $$ = (SubtableSymbol *)0; } | SUBTABLESYM { $$ = $1; } | STRING { $$ = slgh->newTable($1); } ; bitpat_or_nil: /* empty */ { $$ = (PatternEquation *)0; } | pequation { $$ = $1; } ; macrostart: MACRO_KEY STRING '(' oplist ')' { $$ = slgh->createMacro($2,$4); } ; rtlbody: '{' rtl '}' { $$ = slgh->standaloneSection($2); } | '{' rtlcontinue rtlmid '}' { $$ = slgh->finalNamedSection($2,$3); } | OP_UNIMPL { $$ = (SectionVector *)0; } ; constructor: constructprint IS_KEY pequation contextblock rtlbody { slgh->buildConstructor($1,$3,$4,$5); } | subtablestart IS_KEY pequation contextblock rtlbody { slgh->buildConstructor($1,$3,$4,$5); } ; constructprint: subtablestart STRING { $$ = $1; $$->addSyntax(*$2); delete $2; } | subtablestart charstring { $$ = $1; $$->addSyntax(*$2); delete $2; } | subtablestart SYMBOLSTRING { $$ = $1; if (slgh->isInRoot($1)) { $$->addSyntax(*$2); delete $2; } else slgh->newOperand($1,$2); } | subtablestart '^' { $$ = $1; if (!slgh->isInRoot($1)) { slgh->reportError("Unexpected '^' at start of print pieces"); YYERROR; } } | constructprint '^' { $$ = $1; } | constructprint STRING { $$ = $1; $$->addSyntax(*$2); delete $2; } | constructprint charstring { $$ = $1; $$->addSyntax(*$2); delete $2; } | constructprint ' ' { $$ = $1; $$->addSyntax(string(" ")); } | constructprint SYMBOLSTRING { $$ = $1; slgh->newOperand($1,$2); } ; subtablestart: SUBTABLESYM ':' { $$ = slgh->createConstructor($1); } | STRING ':' { SubtableSymbol *sym=slgh->newTable($1); $$ = slgh->createConstructor(sym); } | ':' { $$ = slgh->createConstructor((SubtableSymbol *)0); } | subtablestart ' ' { $$ = $1; } ; pexpression: INTB { $$ = new ConstantValue(*$1); delete $1; } // familysymbol is not acceptable in an action expression because it isn't attached to an offset | familysymbol { if ((actionon==1)&&($1->getType() != SleighSymbol::context_symbol)) { string errmsg="Global symbol "+$1->getName(); errmsg += " is not allowed in action expression"; slgh->reportError(errmsg); } $$ = $1->getPatternValue(); } // | CONTEXTSYM { $$ = $1->getPatternValue(); } | specificsymbol { $$ = $1->getPatternExpression(); } | '(' pexpression ')' { $$ = $2; } | pexpression '+' pexpression { $$ = new PlusExpression($1,$3); } | pexpression '-' pexpression { $$ = new SubExpression($1,$3); } | pexpression '*' pexpression { $$ = new MultExpression($1,$3); } | pexpression OP_LEFT pexpression { $$ = new LeftShiftExpression($1,$3); } | pexpression OP_RIGHT pexpression { $$ = new RightShiftExpression($1,$3); } | pexpression OP_AND pexpression { $$ = new AndExpression($1,$3); } | pexpression OP_OR pexpression { $$ = new OrExpression($1,$3); } | pexpression OP_XOR pexpression { $$ = new XorExpression($1,$3); } | pexpression '/' pexpression { $$ = new DivExpression($1,$3); } | '-' pexpression %prec '!' { $$ = new MinusExpression($2); } | '~' pexpression { $$ = new NotExpression($2); } ; pequation: elleq | pequation '&' pequation { $$ = new EquationAnd($1,$3); } | pequation '|' pequation { $$ = new EquationOr($1,$3); } | pequation ';' pequation { $$ = new EquationCat($1,$3); } ; elleq: ELLIPSIS_KEY ellrt { $$ = new EquationLeftEllipsis($2); } | ellrt ; ellrt: atomic ELLIPSIS_KEY { $$ = new EquationRightEllipsis($1); } | atomic ; atomic: constraint | '(' pequation ')' { $$ = $2; } ; constraint: familysymbol '=' pexpression { $$ = new EqualEquation($1->getPatternValue(),$3); } | familysymbol OP_NOTEQUAL pexpression { $$ = new NotEqualEquation($1->getPatternValue(),$3); } | familysymbol '<' pexpression { $$ = new LessEquation($1->getPatternValue(),$3); } | familysymbol OP_LESSEQUAL pexpression { $$ = new LessEqualEquation($1->getPatternValue(),$3); } | familysymbol '>' pexpression { $$ = new GreaterEquation($1->getPatternValue(),$3); } | familysymbol OP_GREATEQUAL pexpression { $$ = new GreaterEqualEquation($1->getPatternValue(),$3); } | OPERANDSYM '=' pexpression { $$ = slgh->constrainOperand($1,$3); if ($$ == (PatternEquation *)0) { string errmsg="Constraining currently undefined operand "+$1->getName(); slgh->reportError(errmsg); } } | OPERANDSYM { $$ = new OperandEquation($1->getIndex()); slgh->selfDefine($1); } | SPECSYM { $$ = new UnconstrainedEquation($1->getPatternExpression()); } | familysymbol { $$ = slgh->defineInvisibleOperand($1); } | SUBTABLESYM { $$ = slgh->defineInvisibleOperand($1); } ; contextblock: { $$ = (vector *)0; } | '[' contextlist ']' { $$ = $2; } ; contextlist: { $$ = new vector; } | contextlist CONTEXTSYM '=' pexpression ';' { $$ = $1; if (!slgh->contextMod($1,$2,$4)) { string errmsg="Cannot use 'inst_next' or 'inst_next2' to set context variable: "+$2->getName(); slgh->reportError(errmsg); YYERROR; } } | contextlist GLOBALSET_KEY '(' familysymbol ',' CONTEXTSYM ')' ';' { $$ = $1; slgh->contextSet($1,$4,$6); } | contextlist GLOBALSET_KEY '(' specificsymbol ',' CONTEXTSYM ')' ';' { $$ = $1; slgh->contextSet($1,$4,$6); } | contextlist OPERANDSYM '=' pexpression ';' { $$ = $1; slgh->defineOperand($2,$4); } | contextlist STRING { string errmsg="Expecting context symbol, not "+*$2; delete $2; slgh->reportError(errmsg); YYERROR; } ; section_def: OP_LEFT STRING OP_RIGHT { $$ = slgh->newSectionSymbol( *$2 ); delete $2; } | OP_LEFT SECTIONSYM OP_RIGHT { $$ = $2; } ; rtlfirstsection: rtl section_def { $$ = slgh->firstNamedSection($1,$2); } ; rtlcontinue: rtlfirstsection { $$ = $1; } | rtlcontinue rtlmid section_def { $$ = slgh->nextNamedSection($1,$2,$3); } ; rtl: rtlmid { $$ = $1; if ($$->getOpvec().empty() && ($$->getResult() == (HandleTpl *)0)) slgh->recordNop(); } | rtlmid EXPORT_KEY exportvarnode ';' { $$ = slgh->setResultVarnode($1,$3); } | rtlmid EXPORT_KEY sizedstar lhsvarnode ';' { $$ = slgh->setResultStarVarnode($1,$3,$4); } | rtlmid EXPORT_KEY STRING { string errmsg="Unknown export varnode: "+*$3; delete $3; slgh->reportError(errmsg); YYERROR; } | rtlmid EXPORT_KEY sizedstar STRING { string errmsg="Unknown pointer varnode: "+*$4; delete $3; delete $4; slgh->reportError(errmsg); YYERROR; } ; rtlmid: /* EMPTY */ { $$ = slgh->enterSection(); } | rtlmid statement { $$ = $1; if (!$$->addOpList(*$2)) { delete $2; slgh->reportError("Multiple delayslot declarations"); YYERROR; } delete $2; } | rtlmid LOCAL_KEY STRING ';' { $$ = $1; slgh->pcode.newLocalDefinition($3); } | rtlmid LOCAL_KEY STRING ':' INTEGER ';' { $$ = $1; slgh->pcode.newLocalDefinition($3,*$5); delete $5; } ; statement: lhsvarnode '=' expr ';' { $3->setOutput($1); $$ = ExprTree::toVector($3); } | LOCAL_KEY STRING '=' expr ';' { $$ = slgh->pcode.newOutput(true,$4,$2); } | STRING '=' expr ';' { $$ = slgh->pcode.newOutput(false,$3,$1); } | LOCAL_KEY STRING ':' INTEGER '=' expr ';' { $$ = slgh->pcode.newOutput(true,$6,$2,*$4); delete $4; } | STRING ':' INTEGER '=' expr ';' { $$ = slgh->pcode.newOutput(true,$5,$1,*$3); delete $3; } | LOCAL_KEY specificsymbol '=' { $$ = (vector *)0; string errmsg = "Redefinition of symbol: "+$2->getName(); slgh->reportError(errmsg); YYERROR; } | sizedstar expr '=' expr ';' { $$ = slgh->pcode.createStore($1,$2,$4); } | USEROPSYM '(' paramlist ')' ';' { $$ = slgh->pcode.createUserOpNoOut($1,$3); } | lhsvarnode '[' INTEGER ',' INTEGER ']' '=' expr ';' { $$ = slgh->pcode.assignBitRange($1,(uint4)*$3,(uint4)*$5,$8); delete $3, delete $5; } | BITSYM '=' expr ';' { $$=slgh->pcode.assignBitRange($1->getParentSymbol()->getVarnode(),$1->getBitOffset(),$1->numBits(),$3); } | varnode ':' INTEGER '=' { delete $1; delete $3; slgh->reportError("Illegal truncation on left-hand side of assignment"); YYERROR; } | varnode '(' INTEGER ')' { delete $1; delete $3; slgh->reportError("Illegal subpiece on left-hand side of assignment"); YYERROR; } | BUILD_KEY OPERANDSYM ';' { $$ = slgh->pcode.createOpConst(BUILD,$2->getIndex()); } | CROSSBUILD_KEY varnode ',' SECTIONSYM ';' { $$ = slgh->createCrossBuild($2,$4); } | CROSSBUILD_KEY varnode ',' STRING ';' { $$ = slgh->createCrossBuild($2,slgh->newSectionSymbol(*$4)); delete $4; } | DELAYSLOT_KEY '(' INTEGER ')' ';' { $$ = slgh->pcode.createOpConst(DELAY_SLOT,*$3); delete $3; } | GOTO_KEY jumpdest ';' { $$ = slgh->pcode.createOpNoOut(CPUI_BRANCH,new ExprTree($2)); } | IF_KEY expr GOTO_KEY jumpdest ';' { $$ = slgh->pcode.createOpNoOut(CPUI_CBRANCH,new ExprTree($4),$2); } | GOTO_KEY '[' expr ']' ';' { $$ = slgh->pcode.createOpNoOut(CPUI_BRANCHIND,$3); } | CALL_KEY jumpdest ';' { $$ = slgh->pcode.createOpNoOut(CPUI_CALL,new ExprTree($2)); } | CALL_KEY '[' expr ']' ';' { $$ = slgh->pcode.createOpNoOut(CPUI_CALLIND,$3); } | RETURN_KEY ';' { slgh->reportError("Must specify an indirect parameter for return"); YYERROR; } | RETURN_KEY '[' expr ']' ';' { $$ = slgh->pcode.createOpNoOut(CPUI_RETURN,$3); } | MACROSYM '(' paramlist ')' ';' { $$ = slgh->createMacroUse($1,$3); } | label { $$ = slgh->pcode.placeLabel( $1 ); } ; expr: varnode { $$ = new ExprTree($1); } | sizedstar expr %prec '!' { $$ = slgh->pcode.createLoad($1,$2); } | '(' expr ')' { $$ = $2; } | expr '+' expr { $$ = slgh->pcode.createOp(CPUI_INT_ADD,$1,$3); } | expr '-' expr { $$ = slgh->pcode.createOp(CPUI_INT_SUB,$1,$3); } | expr OP_EQUAL expr { $$ = slgh->pcode.createOp(CPUI_INT_EQUAL,$1,$3); } | expr OP_NOTEQUAL expr { $$ = slgh->pcode.createOp(CPUI_INT_NOTEQUAL,$1,$3); } | expr '<' expr { $$ = slgh->pcode.createOp(CPUI_INT_LESS,$1,$3); } | expr OP_GREATEQUAL expr { $$ = slgh->pcode.createOp(CPUI_INT_LESSEQUAL,$3,$1); } | expr OP_LESSEQUAL expr { $$ = slgh->pcode.createOp(CPUI_INT_LESSEQUAL,$1,$3); } | expr '>' expr { $$ = slgh->pcode.createOp(CPUI_INT_LESS,$3,$1); } | expr OP_SLESS expr { $$ = slgh->pcode.createOp(CPUI_INT_SLESS,$1,$3); } | expr OP_SGREATEQUAL expr { $$ = slgh->pcode.createOp(CPUI_INT_SLESSEQUAL,$3,$1); } | expr OP_SLESSEQUAL expr { $$ = slgh->pcode.createOp(CPUI_INT_SLESSEQUAL,$1,$3); } | expr OP_SGREAT expr { $$ = slgh->pcode.createOp(CPUI_INT_SLESS,$3,$1); } | '-' expr %prec '!' { $$ = slgh->pcode.createOp(CPUI_INT_2COMP,$2); } | '~' expr { $$ = slgh->pcode.createOp(CPUI_INT_NEGATE,$2); } | expr '^' expr { $$ = slgh->pcode.createOp(CPUI_INT_XOR,$1,$3); } | expr '&' expr { $$ = slgh->pcode.createOp(CPUI_INT_AND,$1,$3); } | expr '|' expr { $$ = slgh->pcode.createOp(CPUI_INT_OR,$1,$3); } | expr OP_LEFT expr { $$ = slgh->pcode.createOp(CPUI_INT_LEFT,$1,$3); } | expr OP_RIGHT expr { $$ = slgh->pcode.createOp(CPUI_INT_RIGHT,$1,$3); } | expr OP_SRIGHT expr { $$ = slgh->pcode.createOp(CPUI_INT_SRIGHT,$1,$3); } | expr '*' expr { $$ = slgh->pcode.createOp(CPUI_INT_MULT,$1,$3); } | expr '/' expr { $$ = slgh->pcode.createOp(CPUI_INT_DIV,$1,$3); } | expr OP_SDIV expr { $$ = slgh->pcode.createOp(CPUI_INT_SDIV,$1,$3); } | expr '%' expr { $$ = slgh->pcode.createOp(CPUI_INT_REM,$1,$3); } | expr OP_SREM expr { $$ = slgh->pcode.createOp(CPUI_INT_SREM,$1,$3); } | '!' expr { $$ = slgh->pcode.createOp(CPUI_BOOL_NEGATE,$2); } | expr OP_BOOL_XOR expr { $$ = slgh->pcode.createOp(CPUI_BOOL_XOR,$1,$3); } | expr OP_BOOL_AND expr { $$ = slgh->pcode.createOp(CPUI_BOOL_AND,$1,$3); } | expr OP_BOOL_OR expr { $$ = slgh->pcode.createOp(CPUI_BOOL_OR,$1,$3); } | expr OP_FEQUAL expr { $$ = slgh->pcode.createOp(CPUI_FLOAT_EQUAL,$1,$3); } | expr OP_FNOTEQUAL expr { $$ = slgh->pcode.createOp(CPUI_FLOAT_NOTEQUAL,$1,$3); } | expr OP_FLESS expr { $$ = slgh->pcode.createOp(CPUI_FLOAT_LESS,$1,$3); } | expr OP_FGREAT expr { $$ = slgh->pcode.createOp(CPUI_FLOAT_LESS,$3,$1); } | expr OP_FLESSEQUAL expr { $$ = slgh->pcode.createOp(CPUI_FLOAT_LESSEQUAL,$1,$3); } | expr OP_FGREATEQUAL expr { $$ = slgh->pcode.createOp(CPUI_FLOAT_LESSEQUAL,$3,$1); } | expr OP_FADD expr { $$ = slgh->pcode.createOp(CPUI_FLOAT_ADD,$1,$3); } | expr OP_FSUB expr { $$ = slgh->pcode.createOp(CPUI_FLOAT_SUB,$1,$3); } | expr OP_FMULT expr { $$ = slgh->pcode.createOp(CPUI_FLOAT_MULT,$1,$3); } | expr OP_FDIV expr { $$ = slgh->pcode.createOp(CPUI_FLOAT_DIV,$1,$3); } | OP_FSUB expr %prec '!' { $$ = slgh->pcode.createOp(CPUI_FLOAT_NEG,$2); } | OP_ABS '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_FLOAT_ABS,$3); } | OP_SQRT '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_FLOAT_SQRT,$3); } | OP_SEXT '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_INT_SEXT,$3); } | OP_ZEXT '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_INT_ZEXT,$3); } | OP_CARRY '(' expr ',' expr ')' { $$ = slgh->pcode.createOp(CPUI_INT_CARRY,$3,$5); } | OP_SCARRY '(' expr ',' expr ')' { $$ = slgh->pcode.createOp(CPUI_INT_SCARRY,$3,$5); } | OP_SBORROW '(' expr ',' expr ')' { $$ = slgh->pcode.createOp(CPUI_INT_SBORROW,$3,$5); } | OP_FLOAT2FLOAT '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_FLOAT_FLOAT2FLOAT,$3); } | OP_INT2FLOAT '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_FLOAT_INT2FLOAT,$3); } | OP_NAN '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_FLOAT_NAN,$3); } | OP_TRUNC '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_FLOAT_TRUNC,$3); } | OP_CEIL '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_FLOAT_CEIL,$3); } | OP_FLOOR '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_FLOAT_FLOOR,$3); } | OP_ROUND '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_FLOAT_ROUND,$3); } | OP_NEW '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_NEW,$3); } | OP_NEW '(' expr ',' expr ')' { $$ = slgh->pcode.createOp(CPUI_NEW,$3,$5); } | OP_POPCOUNT '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_POPCOUNT,$3); } | OP_LZCOUNT '(' expr ')' { $$ = slgh->pcode.createOp(CPUI_LZCOUNT,$3); } | specificsymbol '(' integervarnode ')' { $$ = slgh->pcode.createOp(CPUI_SUBPIECE,new ExprTree($1->getVarnode()),new ExprTree($3)); } | specificsymbol ':' INTEGER { $$ = slgh->pcode.createBitRange($1,0,(uint4)(*$3 * 8)); delete $3; } | specificsymbol '[' INTEGER ',' INTEGER ']' { $$ = slgh->pcode.createBitRange($1,(uint4)*$3,(uint4)*$5); delete $3, delete $5; } | BITSYM { $$=slgh->pcode.createBitRange($1->getParentSymbol(),$1->getBitOffset(),$1->numBits()); } | USEROPSYM '(' paramlist ')' { $$ = slgh->pcode.createUserOp($1,$3); } | OP_CPOOLREF '(' paramlist ')' { if ((*$3).size() < 2) { string errmsg = "Must at least two inputs to cpool"; slgh->reportError(errmsg); YYERROR; } $$ = slgh->pcode.createVariadic(CPUI_CPOOLREF,$3); } ; sizedstar: '*' '[' SPACESYM ']' ':' INTEGER { $$ = new StarQuality; $$->size = *$6; delete $6; $$->id=ConstTpl($3->getSpace()); } | '*' '[' SPACESYM ']' { $$ = new StarQuality; $$->size = 0; $$->id=ConstTpl($3->getSpace()); } | '*' ':' INTEGER { $$ = new StarQuality; $$->size = *$3; delete $3; $$->id=ConstTpl(slgh->getDefaultCodeSpace()); } | '*' { $$ = new StarQuality; $$->size = 0; $$->id=ConstTpl(slgh->getDefaultCodeSpace()); } ; jumpdest: JUMPSYM { VarnodeTpl *sym = $1->getVarnode(); $$ = new VarnodeTpl(ConstTpl(ConstTpl::j_curspace),sym->getOffset(),ConstTpl(ConstTpl::j_curspace_size)); delete sym; } | INTEGER { $$ = new VarnodeTpl(ConstTpl(ConstTpl::j_curspace),ConstTpl(ConstTpl::real,*$1),ConstTpl(ConstTpl::j_curspace_size)); delete $1; } | BADINTEGER { $$ = new VarnodeTpl(ConstTpl(ConstTpl::j_curspace),ConstTpl(ConstTpl::real,0),ConstTpl(ConstTpl::j_curspace_size)); slgh->reportError("Parsed integer is too big (overflow)"); } | OPERANDSYM { $$ = $1->getVarnode(); $1->setCodeAddress(); } | INTEGER '[' SPACESYM ']' { AddrSpace *spc = $3->getSpace(); $$ = new VarnodeTpl(ConstTpl(spc),ConstTpl(ConstTpl::real,*$1),ConstTpl(ConstTpl::real,spc->getAddrSize())); delete $1; } | label { $$ = new VarnodeTpl(ConstTpl(slgh->getConstantSpace()),ConstTpl(ConstTpl::j_relative,$1->getIndex()),ConstTpl(ConstTpl::real,sizeof(uintm))); $1->incrementRefCount(); } | STRING { string errmsg = "Unknown jump destination: "+*$1; delete $1; slgh->reportError(errmsg); YYERROR; } ; varnode: specificsymbol { $$ = $1->getVarnode(); } | integervarnode { $$ = $1; } | STRING { string errmsg = "Unknown varnode parameter: "+*$1; delete $1; slgh->reportError(errmsg); YYERROR; } | SUBTABLESYM { string errmsg = "Subtable not attached to operand: "+$1->getName(); slgh->reportError(errmsg); YYERROR; } ; integervarnode: INTEGER { $$ = new VarnodeTpl(ConstTpl(slgh->getConstantSpace()),ConstTpl(ConstTpl::real,*$1),ConstTpl(ConstTpl::real,0)); delete $1; } | BADINTEGER { $$ = new VarnodeTpl(ConstTpl(slgh->getConstantSpace()),ConstTpl(ConstTpl::real,0),ConstTpl(ConstTpl::real,0)); slgh->reportError("Parsed integer is too big (overflow)"); } | INTEGER ':' INTEGER { $$ = new VarnodeTpl(ConstTpl(slgh->getConstantSpace()),ConstTpl(ConstTpl::real,*$1),ConstTpl(ConstTpl::real,*$3)); delete $1; delete $3; } | '&' varnode { $$ = slgh->pcode.addressOf($2,0); } | '&' ':' INTEGER varnode { $$ = slgh->pcode.addressOf($4,*$3); delete $3; } ; lhsvarnode: specificsymbol { $$ = $1->getVarnode(); } | STRING { string errmsg = "Unknown assignment varnode: "+*$1; delete $1; slgh->reportError(errmsg); YYERROR; } | SUBTABLESYM { string errmsg = "Subtable not attached to operand: "+$1->getName(); slgh->reportError(errmsg); YYERROR; } ; label: '<' LABELSYM '>' { $$ = $2; } | '<' STRING '>' { $$ = slgh->pcode.defineLabel( $2 ); } ; exportvarnode: specificsymbol { $$ = $1->getVarnode(); } | '&' varnode { $$ = slgh->pcode.addressOf($2,0); } | '&' ':' INTEGER varnode { $$ = slgh->pcode.addressOf($4,*$3); delete $3; } | INTEGER ':' INTEGER { $$ = new VarnodeTpl(ConstTpl(slgh->getConstantSpace()),ConstTpl(ConstTpl::real,*$1),ConstTpl(ConstTpl::real,*$3)); delete $1; delete $3; } | STRING { string errmsg="Unknown export varnode: "+*$1; delete $1; slgh->reportError(errmsg); YYERROR; } | SUBTABLESYM { string errmsg = "Subtable not attached to operand: "+$1->getName(); slgh->reportError(errmsg); YYERROR; } ; familysymbol: VALUESYM { $$ = $1; } | VALUEMAPSYM { $$ = $1; } | CONTEXTSYM { $$ = $1; } | NAMESYM { $$ = $1; } | VARLISTSYM { $$ = $1; } ; specificsymbol: VARSYM { $$ = $1; } | SPECSYM { $$ = $1; } | OPERANDSYM { $$ = $1; } | JUMPSYM { $$ = $1; } ; charstring: CHAR { $$ = new string; (*$$) += $1; } | charstring CHAR { $$ = $1; (*$$) += $2; } ; intblist: '[' intbpart ']' { $$ = $2; } | INTEGER { $$ = new vector; $$->push_back(intb(*$1)); delete $1; } | '-' INTEGER { $$ = new vector; $$->push_back(-intb(*$2)); delete $2; } ; intbpart: INTEGER { $$ = new vector; $$->push_back(intb(*$1)); delete $1; } | '-' INTEGER { $$ = new vector; $$->push_back(-intb(*$2)); delete $2; } | STRING { if (*$1!="_") { string errmsg = "Expecting integer but saw: "+*$1; delete $1; slgh->reportError(errmsg); YYERROR; } $$ = new vector; $$->push_back((intb)0xBADBEEF); delete $1; } | intbpart INTEGER { $$ = $1; $$->push_back(intb(*$2)); delete $2; } | intbpart '-' INTEGER { $$ = $1; $$->push_back(-intb(*$3)); delete $3; } | intbpart STRING { if (*$2!="_") { string errmsg = "Expecting integer but saw: "+*$2; delete $2; slgh->reportError(errmsg); YYERROR; } $$ = $1; $$->push_back((intb)0xBADBEEF); delete $2; } ; stringlist: '[' stringpart ']' { $$ = $2; } | STRING { $$ = new vector; $$->push_back(*$1); delete $1; } ; stringpart: STRING { $$ = new vector; $$->push_back( *$1 ); delete $1; } | stringpart STRING { $$ = $1; $$->push_back(*$2); delete $2; } | stringpart anysymbol { string errmsg = $2->getName()+": redefined"; slgh->reportError(errmsg); YYERROR; } ; anystringlist: '[' anystringpart ']' { $$ = $2; } ; anystringpart: STRING { $$ = new vector; $$->push_back( *$1 ); delete $1; } | anysymbol { $$ = new vector; $$->push_back( $1->getName() ); } | anystringpart STRING { $$ = $1; $$->push_back(*$2); delete $2; } | anystringpart anysymbol { $$ = $1; $$->push_back($2->getName()); } ; valuelist: '[' valuepart ']' { $$ = $2; } | VALUESYM { $$ = new vector; $$->push_back($1); } | CONTEXTSYM { $$ = new vector; $$->push_back($1); } ; valuepart: VALUESYM { $$ = new vector; $$->push_back( $1 ); } | CONTEXTSYM { $$ = new vector; $$->push_back($1); } | valuepart VALUESYM { $$ = $1; $$->push_back($2); } | valuepart CONTEXTSYM { $$ = $1; $$->push_back($2); } | valuepart STRING { string errmsg = *$2+": is not a value pattern"; delete $2; slgh->reportError(errmsg); YYERROR; } ; varlist: '[' varpart ']' { $$ = $2; } | VARSYM { $$ = new vector; $$->push_back($1); } ; varpart: VARSYM { $$ = new vector; $$->push_back($1); } | STRING { if (*$1!="_") { string errmsg = *$1+": is not a varnode symbol"; delete $1; slgh->reportError(errmsg); YYERROR; } $$ = new vector; $$->push_back((SleighSymbol *)0); delete $1; } | varpart VARSYM { $$ = $1; $$->push_back($2); } | varpart STRING { if (*$2!="_") { string errmsg = *$2+": is not a varnode symbol"; delete $2; slgh->reportError(errmsg); YYERROR; } $$ = $1; $$->push_back((SleighSymbol *)0); delete $2; } ; paramlist: /* EMPTY */ { $$ = new vector; } | expr { $$ = new vector; $$->push_back($1); } | paramlist ',' expr { $$ = $1; $$->push_back($3); } ; oplist: /* EMPTY */ { $$ = new vector; } | STRING { $$ = new vector; $$->push_back(*$1); delete $1; } | oplist ',' STRING { $$ = $1; $$->push_back(*$3); delete $3; } ; anysymbol: SPACESYM { $$ = $1; } | SECTIONSYM { $$ = $1; } | TOKENSYM { $$ = $1; } | USEROPSYM { $$ = $1; } | MACROSYM { $$ = $1; } | SUBTABLESYM { $$ = $1; } | VALUESYM { $$ = $1; } | VALUEMAPSYM { $$ = $1; } | CONTEXTSYM { $$ = $1; } | NAMESYM { $$ = $1; } | VARSYM { $$ = $1; } | VARLISTSYM { $$ = $1; } | OPERANDSYM { $$ = $1; } | JUMPSYM { $$ = $1; } | BITSYM { $$ = $1; } ; %% int sleigherror(const char *s) { slgh->reportError(s); return 0; } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/slghpatexpress.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "slghpatexpress.hh" #include "sleighbase.hh" namespace ghidra { int4 TokenPattern::resolveTokens(const TokenPattern &tok1,const TokenPattern &tok2) { // Use the token lists to decide how the two patterns // should be aligned relative to each other // return how much -tok2- needs to be shifted // and set the resulting tokenlist and ellipses bool reversedirection = false; leftellipsis = false; rightellipsis = false; int4 ressa = 0; int4 minsize = tok1.toklist.size() < tok2.toklist.size() ? tok1.toklist.size() : tok2.toklist.size(); if (minsize == 0) { // Check if pattern doesn't care about tokens if ((tok1.toklist.size()==0)&&(tok1.leftellipsis==false)&&(tok1.rightellipsis==false)) { toklist = tok2.toklist; leftellipsis = tok2.leftellipsis; rightellipsis = tok2.rightellipsis; return 0; } else if ((tok2.toklist.size()==0)&&(tok2.leftellipsis==false)&&(tok2.rightellipsis==false)) { toklist = tok1.toklist; leftellipsis = tok1.leftellipsis; rightellipsis = tok1.rightellipsis; return 0; } // If one of the ellipses is true then the pattern // still cares about tokens even though none are // specified } if (tok1.leftellipsis) { reversedirection = true; if (tok2.rightellipsis) throw SleighError("Right/left ellipsis"); else if (tok2.leftellipsis) leftellipsis = true; else if (tok1.toklist.size() != minsize) { ostringstream msg; msg << "Mismatched pattern sizes -- " << dec << tok1.toklist.size() << " != " << dec << minsize; throw SleighError(msg.str()); } else if (tok1.toklist.size()==tok2.toklist.size()) throw SleighError("Pattern size cannot vary (missing '...'?)"); } else if (tok1.rightellipsis) { if (tok2.leftellipsis) throw SleighError("Left/right ellipsis"); else if (tok2.rightellipsis) rightellipsis = true; else if (tok1.toklist.size() != minsize) { ostringstream msg; msg << "Mismatched pattern sizes -- " << dec << tok1.toklist.size() << " != " << dec << minsize; throw SleighError(msg.str()); } else if (tok1.toklist.size()==tok2.toklist.size()) throw SleighError("Pattern size cannot vary (missing '...'?)"); } else { if (tok2.leftellipsis) { reversedirection = true; if (tok2.toklist.size() != minsize) { ostringstream msg; msg << "Mismatched pattern sizes -- " << dec << tok2.toklist.size() << " != " << dec << minsize; throw SleighError(msg.str()); } else if (tok1.toklist.size()==tok2.toklist.size()) throw SleighError("Pattern size cannot vary (missing '...'?)"); } else if (tok2.rightellipsis) { if (tok2.toklist.size() != minsize) { ostringstream msg; msg << "Mismatched pattern sizes -- " << dec << tok2.toklist.size() << " != " << dec << minsize; throw SleighError(msg.str()); } else if (tok1.toklist.size()==tok2.toklist.size()) throw SleighError("Pattern size cannot vary (missing '...'?)"); } else { if (tok2.toklist.size() != tok1.toklist.size()) { ostringstream msg; msg << "Mismatched pattern sizes -- " << dec << tok2.toklist.size() << " != " << dec << tok1.toklist.size(); throw SleighError(msg.str()); } } } if (reversedirection) { for(int4 i=0;i::const_iterator iter; for(iter=toklist.begin();iter!=toklist.end();++iter) sa += (*iter)->getSize(); for(iter=tokpat.toklist.begin();iter!=tokpat.toklist.end();++iter) res.toklist.push_back(*iter); res.rightellipsis = tokpat.rightellipsis; } if (res.rightellipsis && res.leftellipsis) throw SleighError("Double ellipsis in pattern"); if (sa < 0) res.pattern = pattern->doAnd(tokpat.pattern,0); else res.pattern = pattern->doAnd(tokpat.pattern,sa); return res; } TokenPattern TokenPattern::commonSubPattern(const TokenPattern &tokpat) const { // Construct pattern that matches anything // that matches either -this- or -tokpat- TokenPattern patres((Pattern *)0); // Empty shell int4 i; bool reversedirection = false; if (leftellipsis||tokpat.leftellipsis) { if (rightellipsis||tokpat.rightellipsis) throw SleighError("Right/left ellipsis in commonSubPattern"); reversedirection = true; } // Find common subset of tokens and ellipses patres.leftellipsis = leftellipsis || tokpat.leftellipsis; patres.rightellipsis = rightellipsis || tokpat.rightellipsis; int4 minnum = toklist.size(); int4 maxnum = tokpat.toklist.size(); if (maxnum < minnum) { int4 tmp = minnum; minnum = maxnum; maxnum = tmp; } if (reversedirection) { for(i=0;icommonSubPattern(tokpat.pattern,0); return patres; } int4 TokenPattern::getMinimumLength(void) const { // Add up length of concatenated tokens int4 length = 0; for(int4 i=0;igetSize(); return length; } void PatternExpression::release(PatternExpression *p) { p->refcount -= 1; if (p->refcount <= 0) delete p; } PatternExpression *PatternExpression::decodeExpression(Decoder &decoder,Translate *trans) { PatternExpression *res; uint4 el = decoder.peekElement(); if (el == sla::ELEM_TOKENFIELD) res = new TokenField(); else if (el == sla::ELEM_CONTEXTFIELD) res = new ContextField(); else if (el == sla::ELEM_INTB) res = new ConstantValue(); else if (el == sla::ELEM_OPERAND_EXP) res = new OperandValue(); else if (el == sla::ELEM_START_EXP) res = new StartInstructionValue(); else if (el == sla::ELEM_END_EXP) res = new EndInstructionValue(); else if (el == sla::ELEM_PLUS_EXP) res = new PlusExpression(); else if (el == sla::ELEM_SUB_EXP) res = new SubExpression(); else if (el == sla::ELEM_MULT_EXP) res = new MultExpression(); else if (el == sla::ELEM_LSHIFT_EXP) res = new LeftShiftExpression(); else if (el == sla::ELEM_RSHIFT_EXP) res = new RightShiftExpression(); else if (el == sla::ELEM_AND_EXP) res = new AndExpression(); else if (el == sla::ELEM_OR_EXP) res = new OrExpression(); else if (el == sla::ELEM_XOR_EXP) res = new XorExpression(); else if (el == sla::ELEM_DIV_EXP) res = new DivExpression(); else if (el == sla::ELEM_MINUS_EXP) res = new MinusExpression(); else if (el == sla::ELEM_NOT_EXP) res = new NotExpression(); else return (PatternExpression *)0; res->decode(decoder,trans); return res; } static intb getInstructionBytes(ParserWalker &walker,int4 bytestart,int4 byteend,bool bigendian) { // Build a intb from the instruction bytes intb res = 0; uintm tmp; int4 size,tmpsize; size = byteend-bytestart+1; tmpsize = size; while(tmpsize >= sizeof(uintm)) { tmp = walker.getInstructionBytes(bytestart,sizeof(uintm)); res <<= 8*sizeof(uintm); res |= tmp; bytestart += sizeof(uintm); tmpsize -= sizeof(uintm); } if (tmpsize > 0) { tmp = walker.getInstructionBytes(bytestart,tmpsize); res <<= 8*tmpsize; res |= tmp; } if (!bigendian) byte_swap(res,size); return res; } static intb getContextBytes(ParserWalker &walker,int4 bytestart,int4 byteend) { // Build a intb from the context bytes intb res = 0; uintm tmp; int4 size; size = byteend-bytestart+1; while(size >= sizeof(uintm)) { tmp = walker.getContextBytes(bytestart,sizeof(uintm)); res <<= 8*sizeof(uintm); res |= tmp; bytestart += sizeof(uintm); size = byteend-bytestart+1; } if (size > 0) { tmp = walker.getContextBytes(bytestart,size); res <<= 8*size; res |= tmp; } return res; } TokenField::TokenField(Token *tk,bool s,int4 bstart,int4 bend) { tok = tk; bigendian = tok->isBigEndian(); signbit = s; bitstart = bstart; bitend = bend; if (tk->isBigEndian()) { byteend = (tk->getSize()*8 - bitstart - 1)/8; bytestart = (tk->getSize()*8 - bitend - 1)/8; } else { bytestart = bitstart/8; byteend = bitend/8; } shift = bitstart % 8; } intb TokenField::getValue(ParserWalker &walker) const { // Construct value given specific instruction stream intb res = getInstructionBytes(walker,bytestart,byteend,bigendian); res >>= shift; if (signbit) res = sign_extend(res,bitend-bitstart); else res = zero_extend(res,bitend-bitstart); return res; } TokenPattern TokenField::genPattern(intb val) const { // Generate corresponding pattern if the // value is forced to be val return TokenPattern(tok,val,bitstart,bitend); } void TokenField::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_TOKENFIELD); encoder.writeBool(sla::ATTRIB_BIGENDIAN, bigendian); encoder.writeBool(sla::ATTRIB_SIGNBIT, signbit); encoder.writeSignedInteger(sla::ATTRIB_STARTBIT, bitstart); encoder.writeSignedInteger(sla::ATTRIB_ENDBIT, bitend); encoder.writeSignedInteger(sla::ATTRIB_STARTBYTE, bytestart); encoder.writeSignedInteger(sla::ATTRIB_ENDBYTE, byteend); encoder.writeSignedInteger(sla::ATTRIB_SHIFT, shift); encoder.closeElement(sla::ELEM_TOKENFIELD); } void TokenField::decode(Decoder &decoder,Translate *trans) { uint4 el = decoder.openElement(sla::ELEM_TOKENFIELD); tok = (Token *)0; bigendian = decoder.readBool(sla::ATTRIB_BIGENDIAN); signbit = decoder.readBool(sla::ATTRIB_SIGNBIT); bitstart = decoder.readSignedInteger(sla::ATTRIB_STARTBIT); bitend = decoder.readSignedInteger(sla::ATTRIB_ENDBIT); bytestart = decoder.readSignedInteger(sla::ATTRIB_STARTBYTE); byteend = decoder.readSignedInteger(sla::ATTRIB_ENDBYTE); shift = decoder.readSignedInteger(sla::ATTRIB_SHIFT); decoder.closeElement(el); } ContextField::ContextField(bool s,int4 sbit,int4 ebit) { signbit = s; startbit = sbit; endbit = ebit; startbyte = startbit/8; endbyte = endbit/8; shift = 7 - (endbit%8); } intb ContextField::getValue(ParserWalker &walker) const { intb res = getContextBytes(walker,startbyte,endbyte); res >>= shift; if (signbit) res = sign_extend(res,endbit-startbit); else res = zero_extend(res,endbit-startbit); return res; } TokenPattern ContextField::genPattern(intb val) const { return TokenPattern(val,startbit,endbit); } void ContextField::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_CONTEXTFIELD); encoder.writeBool(sla::ATTRIB_SIGNBIT, signbit); encoder.writeSignedInteger(sla::ATTRIB_STARTBIT, startbit); encoder.writeSignedInteger(sla::ATTRIB_ENDBIT, endbit); encoder.writeSignedInteger(sla::ATTRIB_STARTBYTE, startbyte); encoder.writeSignedInteger(sla::ATTRIB_ENDBYTE, endbyte); encoder.writeSignedInteger(sla::ATTRIB_SHIFT, shift); encoder.closeElement(sla::ELEM_CONTEXTFIELD); } void ContextField::decode(Decoder &decoder,Translate *trans) { uint4 el = decoder.openElement(sla::ELEM_CONTEXTFIELD); signbit = decoder.readBool(sla::ATTRIB_SIGNBIT); startbit = decoder.readSignedInteger(sla::ATTRIB_STARTBIT); endbit = decoder.readSignedInteger(sla::ATTRIB_ENDBIT); startbyte = decoder.readSignedInteger(sla::ATTRIB_STARTBYTE); endbyte = decoder.readSignedInteger(sla::ATTRIB_ENDBYTE); shift = decoder.readSignedInteger(sla::ATTRIB_SHIFT); decoder.closeElement(el); } void ConstantValue::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_INTB); encoder.writeSignedInteger(sla::ATTRIB_VAL, val); encoder.closeElement(sla::ELEM_INTB); } void ConstantValue::decode(Decoder &decoder,Translate *trans) { uint4 el = decoder.openElement(sla::ELEM_INTB); val = decoder.readSignedInteger(sla::ATTRIB_VAL); decoder.closeElement(el); } void StartInstructionValue::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_START_EXP); encoder.closeElement(sla::ELEM_START_EXP); } void StartInstructionValue::decode(Decoder &decoder,Translate *trans) { uint4 el = decoder.openElement(sla::ELEM_START_EXP); decoder.closeElement(el); } void EndInstructionValue::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_END_EXP); encoder.closeElement(sla::ELEM_END_EXP); } void EndInstructionValue::decode(Decoder &decoder,Translate *trans) { uint4 el = decoder.openElement(sla::ELEM_END_EXP); decoder.closeElement(el); } void Next2InstructionValue::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_NEXT2_EXP); encoder.closeElement(sla::ELEM_NEXT2_EXP); } void Next2InstructionValue::decode(Decoder &decoder,Translate *trans) { uint4 el = decoder.openElement(sla::ELEM_NEXT2_EXP); decoder.closeElement(el); } TokenPattern OperandValue::genPattern(intb val) const { // In general an operand cannot be interpreted as any sort // of static constraint in an equation, and if it is being // defined by the equation, it should be on the left hand side. // If the operand has a defining expression already, use // of the operand in the equation makes sense, its defining // expression would become a subexpression in the full // expression. However, since this can be accomplished // by explicitly copying the subexpression into the full // expression, we don't support operands as placeholders. throw SleighError("Operand used in pattern expression"); } intb OperandValue::minValue(void) const { throw SleighError("Operand used in pattern expression"); } intb OperandValue::maxValue(void) const { throw SleighError("Operand used in pattern expression"); } intb OperandValue::getValue(ParserWalker &walker) const { // Get the value of an operand when it is used in // an expression. OperandSymbol *sym = ct->getOperand(index); PatternExpression *patexp = sym->getDefiningExpression(); if (patexp == (PatternExpression *)0) { TripleSymbol *defsym = sym->getDefiningSymbol(); if (defsym != (TripleSymbol *)0) patexp = defsym->getPatternExpression(); if (patexp == (PatternExpression *)0) return 0; } ConstructState tempstate; ParserWalker newwalker(walker.getParserContext()); newwalker.setOutOfBandState(ct,index,&tempstate,walker); intb res = patexp->getValue(newwalker); return res; } intb OperandValue::getSubValue(const vector &replace,int4 &listpos) const { OperandSymbol *sym = ct->getOperand(index); return sym->getDefiningExpression()->getSubValue(replace,listpos); } bool OperandValue::isConstructorRelative(void) const { OperandSymbol *sym = ct->getOperand(index); return (sym->getOffsetBase()==-1); } const string &OperandValue::getName(void) const { OperandSymbol *sym = ct->getOperand(index); return sym->getName(); } void OperandValue::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_OPERAND_EXP); encoder.writeSignedInteger(sla::ATTRIB_INDEX, index); encoder.writeUnsignedInteger(sla::ATTRIB_TABLE, ct->getParent()->getId()); encoder.writeUnsignedInteger(sla::ATTRIB_CT, ct->getId()); // Save id of our constructor encoder.closeElement(sla::ELEM_OPERAND_EXP); } void OperandValue::decode(Decoder &decoder,Translate *trans) { uint4 el = decoder.openElement(sla::ELEM_OPERAND_EXP); index = decoder.readSignedInteger(sla::ATTRIB_INDEX); uintm tabid = decoder.readUnsignedInteger(sla::ATTRIB_TABLE); uintm ctid = decoder.readUnsignedInteger(sla::ATTRIB_CT); SleighBase *sleigh = (SleighBase *)trans; SubtableSymbol *tab = dynamic_cast(sleigh->findSymbol(tabid)); ct = tab->getConstructor(ctid); decoder.closeElement(el); } BinaryExpression::BinaryExpression(PatternExpression *l,PatternExpression *r) { (left=l)->layClaim(); (right=r)->layClaim(); } BinaryExpression::~BinaryExpression(void) { // Delete only non-pattern values if (left != (PatternExpression *)0) PatternExpression::release(left); if (right != (PatternExpression *)0) PatternExpression::release(right); } void BinaryExpression::encode(Encoder &encoder) const { // Outer tag is generated by derived classes left->encode(encoder); right->encode(encoder); } void BinaryExpression::decode(Decoder &decoder,Translate *trans) { uint4 el = decoder.openElement(); left = PatternExpression::decodeExpression(decoder,trans); right = PatternExpression::decodeExpression(decoder,trans); left->layClaim(); right->layClaim(); decoder.closeElement(el); } UnaryExpression::UnaryExpression(PatternExpression *u) { (unary=u)->layClaim(); } UnaryExpression::~UnaryExpression(void) { // Delete only non-pattern values if (unary != (PatternExpression *)0) PatternExpression::release(unary); } void UnaryExpression::encode(Encoder &encoder) const { // Outer tag is generated by derived classes unary->encode(encoder); } void UnaryExpression::decode(Decoder &decoder,Translate *trans) { uint4 el = decoder.openElement(); unary = PatternExpression::decodeExpression(decoder,trans); unary->layClaim(); decoder.closeElement(el); } intb PlusExpression::getValue(ParserWalker &walker) const { intb leftval = getLeft()->getValue(walker); intb rightval = getRight()->getValue(walker); return leftval + rightval; } intb PlusExpression::getSubValue(const vector &replace,int4 &listpos) const { intb leftval = getLeft()->getSubValue(replace,listpos); // Must be left first intb rightval = getRight()->getSubValue(replace,listpos); return leftval + rightval; } void PlusExpression::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_PLUS_EXP); BinaryExpression::encode(encoder); encoder.closeElement(sla::ELEM_PLUS_EXP); } intb SubExpression::getValue(ParserWalker &walker) const { intb leftval = getLeft()->getValue(walker); intb rightval = getRight()->getValue(walker); return leftval - rightval; } intb SubExpression::getSubValue(const vector &replace,int4 &listpos) const { intb leftval = getLeft()->getSubValue(replace,listpos); // Must be left first intb rightval = getRight()->getSubValue(replace,listpos); return leftval - rightval; } void SubExpression::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_SUB_EXP); BinaryExpression::encode(encoder); encoder.closeElement(sla::ELEM_SUB_EXP); } intb MultExpression::getValue(ParserWalker &walker) const { intb leftval = getLeft()->getValue(walker); intb rightval = getRight()->getValue(walker); return leftval * rightval; } intb MultExpression::getSubValue(const vector &replace,int4 &listpos) const { intb leftval = getLeft()->getSubValue(replace,listpos); // Must be left first intb rightval = getRight()->getSubValue(replace,listpos); return leftval * rightval; } void MultExpression::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_MULT_EXP); BinaryExpression::encode(encoder); encoder.closeElement(sla::ELEM_MULT_EXP); } intb LeftShiftExpression::getValue(ParserWalker &walker) const { intb leftval = getLeft()->getValue(walker); intb rightval = getRight()->getValue(walker); return leftval << rightval; } intb LeftShiftExpression::getSubValue(const vector &replace,int4 &listpos) const { intb leftval = getLeft()->getSubValue(replace,listpos); // Must be left first intb rightval = getRight()->getSubValue(replace,listpos); return leftval << rightval; } void LeftShiftExpression::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_LSHIFT_EXP); BinaryExpression::encode(encoder); encoder.closeElement(sla::ELEM_LSHIFT_EXP); } intb RightShiftExpression::getValue(ParserWalker &walker) const { intb leftval = getLeft()->getValue(walker); intb rightval = getRight()->getValue(walker); return leftval >> rightval; } intb RightShiftExpression::getSubValue(const vector &replace,int4 &listpos) const { intb leftval = getLeft()->getSubValue(replace,listpos); // Must be left first intb rightval = getRight()->getSubValue(replace,listpos); return leftval >> rightval; } void RightShiftExpression::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_RSHIFT_EXP); BinaryExpression::encode(encoder); encoder.closeElement(sla::ELEM_RSHIFT_EXP); } intb AndExpression::getValue(ParserWalker &walker) const { intb leftval = getLeft()->getValue(walker); intb rightval = getRight()->getValue(walker); return leftval & rightval; } intb AndExpression::getSubValue(const vector &replace,int4 &listpos) const { intb leftval = getLeft()->getSubValue(replace,listpos); // Must be left first intb rightval = getRight()->getSubValue(replace,listpos); return leftval & rightval; } void AndExpression::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_AND_EXP); BinaryExpression::encode(encoder); encoder.closeElement(sla::ELEM_AND_EXP); } intb OrExpression::getValue(ParserWalker &walker) const { intb leftval = getLeft()->getValue(walker); intb rightval = getRight()->getValue(walker); return leftval | rightval; } intb OrExpression::getSubValue(const vector &replace,int4 &listpos) const { intb leftval = getLeft()->getSubValue(replace,listpos); // Must be left first intb rightval = getRight()->getSubValue(replace,listpos); return leftval | rightval; } void OrExpression::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_OR_EXP); BinaryExpression::encode(encoder); encoder.closeElement(sla::ELEM_OR_EXP); } intb XorExpression::getValue(ParserWalker &walker) const { intb leftval = getLeft()->getValue(walker); intb rightval = getRight()->getValue(walker); return leftval ^ rightval; } intb XorExpression::getSubValue(const vector &replace,int4 &listpos) const { intb leftval = getLeft()->getSubValue(replace,listpos); // Must be left first intb rightval = getRight()->getSubValue(replace,listpos); return leftval ^ rightval; } void XorExpression::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_XOR_EXP); BinaryExpression::encode(encoder); encoder.closeElement(sla::ELEM_XOR_EXP); } intb DivExpression::getValue(ParserWalker &walker) const { intb leftval = getLeft()->getValue(walker); intb rightval = getRight()->getValue(walker); return leftval / rightval; } intb DivExpression::getSubValue(const vector &replace,int4 &listpos) const { intb leftval = getLeft()->getSubValue(replace,listpos); // Must be left first intb rightval = getRight()->getSubValue(replace,listpos); return leftval / rightval; } void DivExpression::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_DIV_EXP); BinaryExpression::encode(encoder); encoder.closeElement(sla::ELEM_DIV_EXP); } intb MinusExpression::getValue(ParserWalker &walker) const { intb val = getUnary()->getValue(walker); return -val; } intb MinusExpression::getSubValue(const vector &replace,int4 &listpos) const { intb val = getUnary()->getSubValue(replace,listpos); return -val; } void MinusExpression::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_MINUS_EXP); UnaryExpression::encode(encoder); encoder.closeElement(sla::ELEM_MINUS_EXP); } intb NotExpression::getValue(ParserWalker &walker) const { intb val = getUnary()->getValue(walker); return ~val; } intb NotExpression::getSubValue(const vector &replace,int4 &listpos) const { intb val = getUnary()->getSubValue(replace,listpos); return ~val; } void NotExpression::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_NOT_EXP); UnaryExpression::encode(encoder); encoder.closeElement(sla::ELEM_NOT_EXP); } static bool advance_combo(vector &val,const vector &min,vector &max) { int4 i; i = 0; while(i &semval, vector &val) { TokenPattern respattern = lhs->genPattern(lhsval); for(int4 i=0;igenPattern(val[i])); return respattern; } void PatternEquation::release(PatternEquation *pateq) { pateq->refcount -= 1; if (pateq->refcount <= 0) delete pateq; } void OperandEquation::genPattern(const vector &ops) const { resultpattern = ops[index]; } bool OperandEquation::resolveOperandLeft(OperandResolve &state) const { OperandSymbol *sym = state.operands[ index ]; if (sym->isOffsetIrrelevant()) { sym->offsetbase = -1; sym->reloffset = 0; return true; } if (state.base == -2) // We have no base return false; sym->offsetbase = state.base; sym->reloffset = state.offset; state.cur_rightmost = index; state.size = 0; // Distance from right edge return true; } void OperandEquation::operandOrder(Constructor *ct,vector &order) const { OperandSymbol *sym = ct->getOperand(index); if (!sym->isMarked()) { order.push_back(sym); sym->setMark(); } } UnconstrainedEquation::UnconstrainedEquation(PatternExpression *p) { (patex=p)->layClaim(); } UnconstrainedEquation::~UnconstrainedEquation(void) { PatternExpression::release(patex); } void UnconstrainedEquation::genPattern(const vector &ops) const { resultpattern = patex->genMinPattern(ops); } bool UnconstrainedEquation::resolveOperandLeft(OperandResolve &state) const { state.cur_rightmost = -1; if (resultpattern.getLeftEllipsis()||resultpattern.getRightEllipsis()) // don't know length state.size = -1; else state.size = resultpattern.getMinimumLength(); return true; } ValExpressEquation::ValExpressEquation(PatternValue *l,PatternExpression *r) { (lhs=l)->layClaim(); (rhs=r)->layClaim(); } ValExpressEquation::~ValExpressEquation(void) { PatternExpression::release(lhs); PatternExpression::release(rhs); } bool ValExpressEquation::resolveOperandLeft(OperandResolve &state) const { state.cur_rightmost = -1; if (resultpattern.getLeftEllipsis()||resultpattern.getRightEllipsis()) // don't know length state.size = -1; else state.size = resultpattern.getMinimumLength(); return true; } void EqualEquation::genPattern(const vector &ops) const { intb lhsmin = lhs->minValue(); intb lhsmax = lhs->maxValue(); vector semval; vector min; vector max; vector cur; int4 count=0; rhs->listValues(semval); rhs->getMinMax(min,max); cur = min; do { intb val = rhs->getSubValue(cur); if ((val>=lhsmin)&&(val<=lhsmax)) { if (count==0) resultpattern = buildPattern(lhs,val,semval,cur); else resultpattern = resultpattern.doOr(buildPattern(lhs,val,semval,cur)); count += 1; } } while(advance_combo(cur,min,max)); if (count == 0) throw SleighError("Equal constraint is impossible to match"); } void NotEqualEquation::genPattern(const vector &ops) const { intb lhsmin = lhs->minValue(); intb lhsmax = lhs->maxValue(); vector semval; vector min; vector max; vector cur; int4 count=0; rhs->listValues(semval); rhs->getMinMax(min,max); cur = min; do { intb lhsval; intb val = rhs->getSubValue(cur); for(lhsval=lhsmin;lhsval<=lhsmax;++lhsval) { if (lhsval == val) continue; if (count==0) resultpattern = buildPattern(lhs,lhsval,semval,cur); else resultpattern = resultpattern.doOr(buildPattern(lhs,lhsval,semval,cur)); count += 1; } } while(advance_combo(cur,min,max)); if (count == 0) throw SleighError("Notequal constraint is impossible to match"); } void LessEquation::genPattern(const vector &ops) const { intb lhsmin = lhs->minValue(); intb lhsmax = lhs->maxValue(); vector semval; vector min; vector max; vector cur; int4 count=0; rhs->listValues(semval); rhs->getMinMax(min,max); cur = min; do { intb lhsval; intb val = rhs->getSubValue(cur); for(lhsval=lhsmin;lhsval<=lhsmax;++lhsval) { if (lhsval >= val) continue; if (count==0) resultpattern = buildPattern(lhs,lhsval,semval,cur); else resultpattern = resultpattern.doOr(buildPattern(lhs,lhsval,semval,cur)); count += 1; } } while(advance_combo(cur,min,max)); if (count == 0) throw SleighError("Less than constraint is impossible to match"); } void LessEqualEquation::genPattern(const vector &ops) const { intb lhsmin = lhs->minValue(); intb lhsmax = lhs->maxValue(); vector semval; vector min; vector max; vector cur; int4 count=0; rhs->listValues(semval); rhs->getMinMax(min,max); cur = min; do { intb lhsval; intb val = rhs->getSubValue(cur); for(lhsval=lhsmin;lhsval<=lhsmax;++lhsval) { if (lhsval > val) continue; if (count==0) resultpattern = buildPattern(lhs,lhsval,semval,cur); else resultpattern = resultpattern.doOr(buildPattern(lhs,lhsval,semval,cur)); count += 1; } } while(advance_combo(cur,min,max)); if (count == 0) throw SleighError("Less than or equal constraint is impossible to match"); } void GreaterEquation::genPattern(const vector &ops) const { intb lhsmin = lhs->minValue(); intb lhsmax = lhs->maxValue(); vector semval; vector min; vector max; vector cur; int4 count=0; rhs->listValues(semval); rhs->getMinMax(min,max); cur = min; do { intb lhsval; intb val = rhs->getSubValue(cur); for(lhsval=lhsmin;lhsval<=lhsmax;++lhsval) { if (lhsval <= val) continue; if (count==0) resultpattern = buildPattern(lhs,lhsval,semval,cur); else resultpattern = resultpattern.doOr(buildPattern(lhs,lhsval,semval,cur)); count += 1; } } while(advance_combo(cur,min,max)); if (count == 0) throw SleighError("Greater than constraint is impossible to match"); } void GreaterEqualEquation::genPattern(const vector &ops) const { intb lhsmin = lhs->minValue(); intb lhsmax = lhs->maxValue(); vector semval; vector min; vector max; vector cur; int4 count=0; rhs->listValues(semval); rhs->getMinMax(min,max); cur = min; do { intb lhsval; intb val = rhs->getSubValue(cur); for(lhsval=lhsmin;lhsval<=lhsmax;++lhsval) { if (lhsval < val) continue; if (count==0) resultpattern = buildPattern(lhs,lhsval,semval,cur); else resultpattern = resultpattern.doOr(buildPattern(lhs,lhsval,semval,cur)); count += 1; } } while(advance_combo(cur,min,max)); if (count == 0) throw SleighError("Greater than or equal constraint is impossible to match"); } EquationAnd::EquationAnd(PatternEquation *l,PatternEquation *r) { (left=l)->layClaim(); (right=r)->layClaim(); } EquationAnd::~EquationAnd(void) { PatternEquation::release(left); PatternEquation::release(right); } void EquationAnd::genPattern(const vector &ops) const { left->genPattern(ops); right->genPattern(ops); resultpattern = left->getTokenPattern().doAnd(right->getTokenPattern()); } bool EquationAnd::resolveOperandLeft(OperandResolve &state) const { int4 cur_rightmost = -1; // Initially we don't know our rightmost int4 cur_size = -1; // or size traversed since rightmost bool res = right->resolveOperandLeft(state); if (!res) return false; if ((state.cur_rightmost != -1)&&(state.size != -1)) { cur_rightmost = state.cur_rightmost; cur_size = state.size; } res = left->resolveOperandLeft(state); if (!res) return false; if ((state.cur_rightmost == -1)||(state.size == -1)) { state.cur_rightmost = cur_rightmost; state.size = cur_size; } return true; } void EquationAnd::operandOrder(Constructor *ct,vector &order) const { left->operandOrder(ct,order); // List operands left right->operandOrder(ct,order); // to right } EquationOr::EquationOr(PatternEquation *l,PatternEquation *r) { (left=l)->layClaim(); (right=r)->layClaim(); } EquationOr::~EquationOr(void) { PatternEquation::release(left); PatternEquation::release(right); } void EquationOr::genPattern(const vector &ops) const { left->genPattern(ops); right->genPattern(ops); resultpattern = left->getTokenPattern().doOr(right->getTokenPattern()); } bool EquationOr::resolveOperandLeft(OperandResolve &state) const { int4 cur_rightmost = -1; // Initially we don't know our rightmost int4 cur_size = -1; // or size traversed since rightmost bool res = right->resolveOperandLeft(state); if (!res) return false; if ((state.cur_rightmost != -1)&&(state.size != -1)) { cur_rightmost = state.cur_rightmost; cur_size = state.size; } res = left->resolveOperandLeft(state); if (!res) return false; if ((state.cur_rightmost == -1)||(state.size == -1)) { state.cur_rightmost = cur_rightmost; state.size = cur_size; } return true; } void EquationOr::operandOrder(Constructor *ct,vector &order) const { left->operandOrder(ct,order); // List operands left right->operandOrder(ct,order); // to right } EquationCat::EquationCat(PatternEquation *l,PatternEquation *r) { (left=l)->layClaim(); (right=r)->layClaim(); } EquationCat::~EquationCat(void) { PatternEquation::release(left); PatternEquation::release(right); } void EquationCat::genPattern(const vector &ops) const { left->genPattern(ops); right->genPattern(ops); resultpattern = left->getTokenPattern().doCat(right->getTokenPattern()); } bool EquationCat::resolveOperandLeft(OperandResolve &state) const { bool res = left->resolveOperandLeft(state); if (!res) return false; int4 cur_base = state.base; int4 cur_offset = state.offset; if ((!left->getTokenPattern().getLeftEllipsis())&&(!left->getTokenPattern().getRightEllipsis())) { // Keep the same base state.offset += left->getTokenPattern().getMinimumLength(); // But add to its size } else if (state.cur_rightmost != -1) { state.base = state.cur_rightmost; state.offset = state.size; } else if (state.size != -1) { state.offset += state.size; } else { state.base = -2; // We have no anchor } int4 cur_rightmost = state.cur_rightmost; int4 cur_size = state.size; res = right->resolveOperandLeft(state); if (!res) return false; state.base = cur_base; // Restore base and offset state.offset = cur_offset; if (state.cur_rightmost == -1) { if ((state.size != -1)&&(cur_rightmost != -1)&&(cur_size != -1)) { state.cur_rightmost = cur_rightmost; state.size += cur_size; } } return true; } void EquationCat::operandOrder(Constructor *ct,vector &order) const { left->operandOrder(ct,order); // List operands left right->operandOrder(ct,order); // to right } void EquationLeftEllipsis::genPattern(const vector &ops) const { eq->genPattern(ops); resultpattern = eq->getTokenPattern(); resultpattern.setLeftEllipsis(true); } bool EquationLeftEllipsis::resolveOperandLeft(OperandResolve &state) const { int4 cur_base = state.base; state.base = -2; bool res = eq->resolveOperandLeft(state); if (!res) return false; state.base = cur_base; return true; } void EquationLeftEllipsis::operandOrder(Constructor *ct,vector &order) const { eq->operandOrder(ct,order); // List operands } void EquationRightEllipsis::genPattern(const vector &ops) const { eq->genPattern(ops); resultpattern = eq->getTokenPattern(); resultpattern.setRightEllipsis(true); } bool EquationRightEllipsis::resolveOperandLeft(OperandResolve &state) const { bool res = eq->resolveOperandLeft(state); if (!res) return false; state.size = -1; // Cannot predict size return true; } void EquationRightEllipsis::operandOrder(Constructor *ct,vector &order) const { eq->operandOrder(ct,order); // List operands } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/slghpatexpress.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __SLGHPATEXPRESS_HH__ #define __SLGHPATEXPRESS_HH__ #include "slghpattern.hh" namespace ghidra { class TokenPattern { Pattern *pattern; vector toklist; bool leftellipsis; bool rightellipsis; static PatternBlock *buildSingle(int4 startbit,int4 endbit,uintm byteval); static PatternBlock *buildBigBlock(int4 size,int4 bitstart,int4 bitend,intb value); static PatternBlock *buildLittleBlock(int4 size,int4 bitstart,int4 bitend,intb value); int4 resolveTokens(const TokenPattern &tokpat1,const TokenPattern &tokpat2); TokenPattern(Pattern *pat) { pattern = pat; leftellipsis=false; rightellipsis=false; } public: TokenPattern(void); // TRUE pattern unassociated with a token TokenPattern(bool tf); // TRUE or FALSE pattern unassociated with a token TokenPattern(Token *tok); // TRUE pattern associated with token -tok- TokenPattern(Token *tok,intb value,int4 bitstart,int4 bitend); TokenPattern(intb value,int4 startbit,int4 endbit); TokenPattern(const TokenPattern &tokpat); ~TokenPattern(void) { delete pattern; } const TokenPattern &operator=(const TokenPattern &tokpat); void setLeftEllipsis(bool val) { leftellipsis = val; } void setRightEllipsis(bool val) { rightellipsis = val; } bool getLeftEllipsis(void) const { return leftellipsis; } bool getRightEllipsis(void) const { return rightellipsis; } TokenPattern doAnd(const TokenPattern &tokpat) const; TokenPattern doOr(const TokenPattern &tokpat) const; TokenPattern doCat(const TokenPattern &tokpat) const; TokenPattern commonSubPattern(const TokenPattern &tokpat) const; Pattern *getPattern(void) const { return pattern; } int4 getMinimumLength(void) const; bool alwaysTrue(void) const { return pattern->alwaysTrue(); } bool alwaysFalse(void) const { return pattern->alwaysFalse(); } bool alwaysInstructionTrue(void) const { return pattern->alwaysInstructionTrue(); } }; class PatternValue; class PatternExpression { int4 refcount; // Number of objects referencing this // for deletion protected: virtual ~PatternExpression(void) {} // Only delete through release public: PatternExpression(void) { refcount = 0; } virtual intb getValue(ParserWalker &walker) const=0; virtual TokenPattern genMinPattern(const vector &ops) const=0; virtual void listValues(vector &list) const=0; virtual void getMinMax(vector &minlist,vector &maxlist) const=0; virtual intb getSubValue(const vector &replace,int4 &listpos) const=0; virtual void encode(Encoder &encoder) const=0; virtual void decode(Decoder &decoder,Translate *trans)=0; intb getSubValue(const vector &replace) { int4 listpos = 0; return getSubValue(replace,listpos); } void layClaim(void) { refcount += 1; } static void release(PatternExpression *p); static PatternExpression *decodeExpression(Decoder &decoder,Translate *trans); }; class PatternValue : public PatternExpression { public: virtual TokenPattern genPattern(intb val) const=0; virtual void listValues(vector &list) const { list.push_back(this); } virtual void getMinMax(vector &minlist,vector &maxlist) const { minlist.push_back(minValue()); maxlist.push_back(maxValue()); } virtual intb getSubValue(const vector &replace,int4 &listpos) const { return replace[listpos++]; } virtual intb minValue(void) const=0; virtual intb maxValue(void) const=0; }; class TokenField : public PatternValue { Token *tok; bool bigendian; bool signbit; int4 bitstart,bitend; // Bits within the token, 0 bit is LEAST significant int4 bytestart,byteend; // Bytes to read to get value int4 shift; // Amount to shift to align value (bitstart % 8) public: TokenField(void) {} // For use with decode TokenField(Token *tk,bool s,int4 bstart,int4 bend); virtual intb getValue(ParserWalker &walker) const; virtual TokenPattern genMinPattern(const vector &ops) const { return TokenPattern(tok); } virtual TokenPattern genPattern(intb val) const; virtual intb minValue(void) const { return 0; } virtual intb maxValue(void) const { intb res=0; return zero_extend(~res,bitend-bitstart); } virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder,Translate *trans); }; class ContextField : public PatternValue { int4 startbit,endbit; int4 startbyte,endbyte; int4 shift; bool signbit; public: ContextField(void) {} // For use with decode ContextField(bool s,int4 sbit,int4 ebit); int4 getStartBit(void) const { return startbit; } int4 getEndBit(void) const { return endbit; } bool getSignBit(void) const { return signbit; } virtual intb getValue(ParserWalker &walker) const; virtual TokenPattern genMinPattern(const vector &ops) const { return TokenPattern(); } virtual TokenPattern genPattern(intb val) const; virtual intb minValue(void) const { return 0; } virtual intb maxValue(void) const { intb res=0; return zero_extend(~res,(endbit-startbit)); } virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder,Translate *trans); }; class ConstantValue : public PatternValue { intb val; public: ConstantValue(void) {} // For use with decode ConstantValue(intb v) { val = v; } virtual intb getValue(ParserWalker &walker) const { return val; } virtual TokenPattern genMinPattern(const vector &ops) const { return TokenPattern(); } virtual TokenPattern genPattern(intb v) const { return TokenPattern(val==v); } virtual intb minValue(void) const { return val; } virtual intb maxValue(void) const { return val; } virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder,Translate *trans); }; class StartInstructionValue : public PatternValue { public: StartInstructionValue(void) {} virtual intb getValue(ParserWalker &walker) const { return (intb)AddrSpace::byteToAddress(walker.getAddr().getOffset(),walker.getAddr().getSpace()->getWordSize()); } virtual TokenPattern genMinPattern(const vector &ops) const { return TokenPattern(); } virtual TokenPattern genPattern(intb val) const { return TokenPattern(); } virtual intb minValue(void) const { return (intb)0; } virtual intb maxValue(void) const { return (intb)0; } virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder,Translate *trans); }; class EndInstructionValue : public PatternValue { public: EndInstructionValue(void) {} virtual intb getValue(ParserWalker &walker) const { return (intb)AddrSpace::byteToAddress(walker.getNaddr().getOffset(),walker.getNaddr().getSpace()->getWordSize()); } virtual TokenPattern genMinPattern(const vector &ops) const { return TokenPattern(); } virtual TokenPattern genPattern(intb val) const { return TokenPattern(); } virtual intb minValue(void) const { return (intb)0; } virtual intb maxValue(void) const { return (intb)0; } virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder,Translate *trans); }; class Next2InstructionValue : public PatternValue { public: Next2InstructionValue(void) {} virtual intb getValue(ParserWalker &walker) const { return (intb)AddrSpace::byteToAddress(walker.getN2addr().getOffset(),walker.getN2addr().getSpace()->getWordSize()); } virtual TokenPattern genMinPattern(const vector &ops) const { return TokenPattern(); } virtual TokenPattern genPattern(intb val) const { return TokenPattern(); } virtual intb minValue(void) const { return (intb)0; } virtual intb maxValue(void) const { return (intb)0; } virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder,Translate *trans); }; class Constructor; // Forward declaration class OperandSymbol; class OperandValue : public PatternValue { int4 index; // This is the defining field of expression Constructor *ct; // cached pointer to constructor public: OperandValue(void) { } // For use with decode OperandValue(int4 ind,Constructor *c) { index = ind; ct = c; } void changeIndex(int4 newind) { index = newind; } bool isConstructorRelative(void) const; const string &getName(void) const; virtual TokenPattern genPattern(intb val) const; virtual TokenPattern genMinPattern(const vector &ops) const { return ops[index]; } virtual intb getValue(ParserWalker &walker) const; virtual intb getSubValue(const vector &replace,int4 &listpos) const; virtual intb minValue(void) const; virtual intb maxValue(void) const; virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder,Translate *trans); }; class BinaryExpression : public PatternExpression { PatternExpression *left,*right; protected: virtual ~BinaryExpression(void); public: BinaryExpression(void) { left = (PatternExpression *)0; right = (PatternExpression *)0; } // For use with decode BinaryExpression(PatternExpression *l,PatternExpression *r); PatternExpression *getLeft(void) const { return left; } PatternExpression *getRight(void) const { return right; } virtual TokenPattern genMinPattern(const vector &ops) const { return TokenPattern(); } virtual void listValues(vector &list) const { left->listValues(list); right->listValues(list); } virtual void getMinMax(vector &minlist,vector &maxlist) const { left->getMinMax(minlist,maxlist); right->getMinMax(minlist,maxlist); } virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder,Translate *trans); }; class UnaryExpression : public PatternExpression { PatternExpression *unary; protected: virtual ~UnaryExpression(void); public: UnaryExpression(void) { unary = (PatternExpression *)0; } // For use with decode UnaryExpression(PatternExpression *u); PatternExpression *getUnary(void) const { return unary; } virtual TokenPattern genMinPattern(const vector &ops) const { return TokenPattern(); } virtual void listValues(vector &list) const { unary->listValues(list); } virtual void getMinMax(vector &minlist,vector &maxlist) const { unary->getMinMax(minlist,maxlist); } virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder,Translate *trans); }; class PlusExpression : public BinaryExpression { public: PlusExpression(void) {} // For use by decode PlusExpression(PatternExpression *l,PatternExpression *r) : BinaryExpression(l,r) {} virtual intb getValue(ParserWalker &walker) const; virtual intb getSubValue(const vector &replace,int4 &listpos) const; virtual void encode(Encoder &encoder) const; }; class SubExpression : public BinaryExpression { public: SubExpression(void) {} // For use with decode SubExpression(PatternExpression *l,PatternExpression *r) : BinaryExpression(l,r) {} virtual intb getValue(ParserWalker &walker) const; virtual intb getSubValue(const vector &replace,int4 &listpos) const; virtual void encode(Encoder &encoder) const; }; class MultExpression : public BinaryExpression { public: MultExpression(void) {} // For use with decode MultExpression(PatternExpression *l,PatternExpression *r) : BinaryExpression(l,r) {} virtual intb getValue(ParserWalker &walker) const; virtual intb getSubValue(const vector &replace,int4 &listpos) const; virtual void encode(Encoder &encoder) const; }; class LeftShiftExpression : public BinaryExpression { public: LeftShiftExpression(void) {} LeftShiftExpression(PatternExpression *l,PatternExpression *r) : BinaryExpression(l,r) {} virtual intb getValue(ParserWalker &walker) const; virtual intb getSubValue(const vector &replace,int4 &listpos) const; virtual void encode(Encoder &encoder) const; }; class RightShiftExpression : public BinaryExpression { public: RightShiftExpression(void) {} RightShiftExpression(PatternExpression *l,PatternExpression *r) : BinaryExpression(l,r) {} virtual intb getValue(ParserWalker &walker) const; virtual intb getSubValue(const vector &replace,int4 &listpos) const; virtual void encode(Encoder &encoder) const; }; class AndExpression : public BinaryExpression { public: AndExpression(void) {} AndExpression(PatternExpression *l,PatternExpression *r) : BinaryExpression(l,r) {} virtual intb getValue(ParserWalker &walker) const; virtual intb getSubValue(const vector &replace,int4 &listpos) const; virtual void encode(Encoder &encoder) const; }; class OrExpression : public BinaryExpression { public: OrExpression(void) {} OrExpression(PatternExpression *l,PatternExpression *r) : BinaryExpression(l,r) {} virtual intb getValue(ParserWalker &walker) const; virtual intb getSubValue(const vector &replace,int4 &listpos) const; virtual void encode(Encoder &encoder) const; }; class XorExpression : public BinaryExpression { public: XorExpression(void) {} XorExpression(PatternExpression *l,PatternExpression *r) : BinaryExpression(l,r) {} virtual intb getValue(ParserWalker &walker) const; virtual intb getSubValue(const vector &replace,int4 &listpos) const; virtual void encode(Encoder &encoder) const; }; class DivExpression : public BinaryExpression { public: DivExpression(void) {} DivExpression(PatternExpression *l,PatternExpression *r) : BinaryExpression(l,r) {} virtual intb getValue(ParserWalker &walker) const; virtual intb getSubValue(const vector &replace,int4 &listpos) const; virtual void encode(Encoder &encoder) const; }; class MinusExpression : public UnaryExpression { public: MinusExpression(void) {} MinusExpression(PatternExpression *u) : UnaryExpression(u) {} virtual intb getValue(ParserWalker &walker) const; virtual intb getSubValue(const vector &replace,int4 &listpos) const; virtual void encode(Encoder &encoder) const; }; class NotExpression : public UnaryExpression { public: NotExpression(void) {} NotExpression(PatternExpression *u) : UnaryExpression(u) {} virtual intb getValue(ParserWalker &walker) const; virtual intb getSubValue(const vector &replace,int4 &listpos) const; virtual void encode(Encoder &encoder) const; }; struct OperandResolve { vector &operands; OperandResolve(vector &ops) : operands(ops) { base=-1; offset=0; cur_rightmost = -1; size = 0; } int4 base; // Current base operand (as we traverse the pattern equation from left to right) int4 offset; // Bytes we have traversed from the LEFT edge of the current base int4 cur_rightmost; // (resulting) rightmost operand in our pattern int4 size; // (resulting) bytes traversed from the LEFT edge of the rightmost }; // operandOrder returns a vector of the self-defining OperandSymbols as the appear // in left to right order in the pattern class PatternEquation { int4 refcount; // Number of objects referencing this protected: mutable TokenPattern resultpattern; // Resulting pattern generated by this equation virtual ~PatternEquation(void) {} // Only delete through release public: PatternEquation(void) { refcount = 0; } const TokenPattern &getTokenPattern(void) const { return resultpattern; } virtual void genPattern(const vector &ops) const=0; virtual bool resolveOperandLeft(OperandResolve &state) const=0; virtual void operandOrder(Constructor *ct,vector &order) const {} void layClaim(void) { refcount += 1; } static void release(PatternEquation *pateq); }; class OperandEquation : public PatternEquation { // Equation that defines operand int4 index; public: OperandEquation(int4 ind) { index = ind; } virtual void genPattern(const vector &ops) const; virtual bool resolveOperandLeft(OperandResolve &state) const; virtual void operandOrder(Constructor *ct,vector &order) const; }; class UnconstrainedEquation : public PatternEquation { // Unconstrained equation, just get tokens PatternExpression *patex; protected: virtual ~UnconstrainedEquation(void); public: UnconstrainedEquation(PatternExpression *p); virtual void genPattern(const vector &ops) const; virtual bool resolveOperandLeft(OperandResolve &state) const; }; class ValExpressEquation : public PatternEquation { protected: PatternValue *lhs; PatternExpression *rhs; virtual ~ValExpressEquation(void); public: ValExpressEquation(PatternValue *l,PatternExpression *r); virtual bool resolveOperandLeft(OperandResolve &state) const; }; class EqualEquation : public ValExpressEquation { public: EqualEquation(PatternValue *l,PatternExpression *r) : ValExpressEquation(l,r) {} virtual void genPattern(const vector &ops) const; }; class NotEqualEquation : public ValExpressEquation { public: NotEqualEquation(PatternValue *l,PatternExpression *r) : ValExpressEquation(l,r) {} virtual void genPattern(const vector &ops) const; }; class LessEquation : public ValExpressEquation { public: LessEquation(PatternValue *l,PatternExpression *r) : ValExpressEquation(l,r) {} virtual void genPattern(const vector &ops) const; }; class LessEqualEquation : public ValExpressEquation { public: LessEqualEquation(PatternValue *l,PatternExpression *r) : ValExpressEquation(l,r) {} virtual void genPattern(const vector &ops) const; }; class GreaterEquation : public ValExpressEquation { public: GreaterEquation(PatternValue *l,PatternExpression *r) : ValExpressEquation(l,r) {} virtual void genPattern(const vector &ops) const; }; class GreaterEqualEquation : public ValExpressEquation { public: GreaterEqualEquation(PatternValue *l,PatternExpression *r) : ValExpressEquation(l,r) {} virtual void genPattern(const vector &ops) const; }; class EquationAnd : public PatternEquation { // Pattern Equations ANDed together PatternEquation *left; PatternEquation *right; protected: virtual ~EquationAnd(void); public: EquationAnd(PatternEquation *l,PatternEquation *r); virtual void genPattern(const vector &ops) const; virtual bool resolveOperandLeft(OperandResolve &state) const; virtual void operandOrder(Constructor *ct,vector &order) const; }; class EquationOr : public PatternEquation { // Pattern Equations ORed together PatternEquation *left; PatternEquation *right; protected: virtual ~EquationOr(void); public: EquationOr(PatternEquation *l,PatternEquation *r); virtual void genPattern(const vector &ops) const; virtual bool resolveOperandLeft(OperandResolve &state) const; virtual void operandOrder(Constructor *ct,vector &order) const; }; class EquationCat : public PatternEquation { // Pattern Equations concatenated PatternEquation *left; PatternEquation *right; protected: virtual ~EquationCat(void); public: EquationCat(PatternEquation *l,PatternEquation *r); virtual void genPattern(const vector &ops) const; virtual bool resolveOperandLeft(OperandResolve &state) const; virtual void operandOrder(Constructor *ct,vector &order) const; }; class EquationLeftEllipsis : public PatternEquation { // Equation preceded by ellipses PatternEquation *eq; protected: virtual ~EquationLeftEllipsis(void) { PatternEquation::release(eq); } public: EquationLeftEllipsis(PatternEquation *e) { (eq=e)->layClaim(); } virtual void genPattern(const vector &ops) const; virtual bool resolveOperandLeft(OperandResolve &state) const; virtual void operandOrder(Constructor *ct,vector &order) const; }; class EquationRightEllipsis : public PatternEquation { // Equation preceded by ellipses PatternEquation *eq; protected: virtual ~EquationRightEllipsis(void) { PatternEquation::release(eq); } public: EquationRightEllipsis(PatternEquation *e) { (eq=e)->layClaim(); } virtual void genPattern(const vector &ops) const; virtual bool resolveOperandLeft(OperandResolve &state) const; virtual void operandOrder(Constructor *ct,vector &order) const; }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/slghpattern.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "slghpattern.hh" #include "slaformat.hh" namespace ghidra { uintm DisjointPattern::getMask(int4 startbit,int4 size,bool context) const { PatternBlock *block = getBlock(context); if (block != (PatternBlock *)0) return block->getMask(startbit,size); return 0; } uintm DisjointPattern::getValue(int4 startbit,int4 size,bool context) const { PatternBlock *block = getBlock(context); if (block != (PatternBlock *)0) return block->getValue(startbit,size); return 0; } int4 DisjointPattern::getLength(bool context) const { PatternBlock *block = getBlock(context); if (block != (PatternBlock *)0) return block->getLength(); return 0; } bool DisjointPattern::specializes(const DisjointPattern *op2) const { // Return true, if everywhere this's mask is non-zero // op2's mask is non-zero and op2's value match this's PatternBlock *a,*b; a = getBlock(false); b = op2->getBlock(false); if ((b != (PatternBlock *)0)&&(!b->alwaysTrue())) { // a must match existing block if (a == (PatternBlock *)0) return false; if (!a->specializes(b)) return false; } a = getBlock(true); b = op2->getBlock(true); if ((b != (PatternBlock *)0)&&(!b->alwaysTrue())) { // a must match existing block if (a == (PatternBlock *)0) return false; if (!a->specializes(b)) return false; } return true; } bool DisjointPattern::identical(const DisjointPattern *op2) const { // Return true if patterns match exactly PatternBlock *a,*b; a = getBlock(false); b = op2->getBlock(false); if (b != (PatternBlock *)0) { // a must match existing block if (a == (PatternBlock *)0) { if (!b->alwaysTrue()) return false; } else if (!a->identical(b)) return false; } else { if ((a != (PatternBlock *)0)&&(!a->alwaysTrue())) return false; } a = getBlock(true); b = op2->getBlock(true); if (b != (PatternBlock *)0) { // a must match existing block if (a == (PatternBlock *)0) { if (!b->alwaysTrue()) return false; } else if (!a->identical(b)) return false; } else { if ((a != (PatternBlock *)0)&&(!a->alwaysTrue())) return false; } return true; } static bool resolveIntersectBlock(PatternBlock *bl1,PatternBlock *bl2,PatternBlock *thisblock) { PatternBlock *inter; bool allocated = false; bool res = true; if (bl1 == (PatternBlock *)0) inter = bl2; else if (bl2 == (PatternBlock *)0) inter = bl1; else { allocated = true; inter = bl1->intersect(bl2); } if (inter == (PatternBlock *)0) { if (thisblock != (PatternBlock *)0) res = false; } else if (thisblock == (PatternBlock *)0) res = false; else res = thisblock->identical(inter); if (allocated) delete inter; return res; } bool DisjointPattern::resolvesIntersect(const DisjointPattern *op1,const DisjointPattern *op2) const { // Is this pattern equal to the intersection of -op1- and -op2- if (!resolveIntersectBlock(op1->getBlock(false),op2->getBlock(false),getBlock(false))) return false; return resolveIntersectBlock(op1->getBlock(true),op2->getBlock(true),getBlock(true)); } DisjointPattern *DisjointPattern::decodeDisjoint(Decoder &decoder) { // DisjointPattern factory DisjointPattern *res; uint4 el = decoder.peekElement(); if (el == sla::ELEM_INSTRUCT_PAT) res = new InstructionPattern(); else if (el == sla::ELEM_CONTEXT_PAT) res = new ContextPattern(); else res = new CombinePattern(); res->decode(decoder); return res; } void PatternBlock::normalize(void) { if (nonzerosize<=0) { // Check if alwaystrue or alwaysfalse offset = 0; // in which case we don't need mask and value maskvec.clear(); valvec.clear(); return; } vector::iterator iter1,iter2; iter1 = maskvec.begin(); // Cut zeros from beginning of mask iter2 = valvec.begin(); while((iter1 != maskvec.end())&&((*iter1)==0)) { iter1++; iter2++; offset += sizeof(uintm); } maskvec.erase(maskvec.begin(),iter1); valvec.erase(valvec.begin(),iter2); if (!maskvec.empty()) { int4 suboff = 0; // Cut off unaligned zeros from beginning of mask uintm tmp = maskvec[0]; while(tmp != 0) { suboff += 1; tmp >>= 8; } suboff = sizeof(uintm)-suboff; if (suboff != 0) { offset += suboff; // Slide up maskvec by suboff bytes for(int4 i=0;i> ((sizeof(uintm)-suboff)*8)); maskvec[i] = tmp; } maskvec.back() <<= suboff*8; for(int4 i=0;i> ((sizeof(uintm)-suboff)*8)); valvec[i] = tmp; } valvec.back() <<= suboff*8; } iter1 = maskvec.end(); // Cut zeros from end of mask iter2 = valvec.end(); while(iter1 != maskvec.begin()) { --iter1; --iter2; if ((*iter1) != 0) break; // Find last non-zero } if (iter1 != maskvec.end()) { iter1++; // Find first zero, in last zero chain iter2++; } maskvec.erase(iter1,maskvec.end()); valvec.erase(iter2,valvec.end()); } if (maskvec.empty()) { offset = 0; nonzerosize = 0; // Always true return; } nonzerosize = maskvec.size() * sizeof(uintm); uintm tmp = maskvec.back(); // tmp must be nonzero while( (tmp&0xff) == 0) { nonzerosize -= 1; tmp >>= 8; } } PatternBlock::PatternBlock(int4 off,uintm msk,uintm val) { // Define mask and value pattern, confined to one uintm offset = off; maskvec.push_back(msk); valvec.push_back(val); nonzerosize = sizeof(uintm); // Assume all non-zero bytes before normalization normalize(); } PatternBlock::PatternBlock(bool tf) { offset = 0; if (tf) nonzerosize = 0; else nonzerosize = -1; } PatternBlock::PatternBlock(const PatternBlock *a,const PatternBlock *b) { // Construct PatternBlock by ANDing two others together PatternBlock *res = a->intersect(b); offset = res->offset; nonzerosize = res->nonzerosize; maskvec = res->maskvec; valvec = res->valvec; delete res; } PatternBlock::PatternBlock(vector &list) { // AND several blocks together to construct new block PatternBlock *res,*next; if (list.empty()) { // If not ANDing anything offset = 0; // make constructed block always true nonzerosize = 0; return; } res = list[0]; for(int4 i=1;iintersect(list[i]); delete res; res = next; } offset = res->offset; nonzerosize = res->nonzerosize; maskvec = res->maskvec; valvec = res->valvec; delete res; } PatternBlock *PatternBlock::clone(void) const { PatternBlock *res = new PatternBlock(true); res->offset = offset; res->nonzerosize = nonzerosize; res->maskvec = maskvec; res->valvec = valvec; return res; } PatternBlock *PatternBlock::commonSubPattern(const PatternBlock *b) const { // The resulting pattern has a 1-bit in the mask // only if the two pieces have a 1-bit and the // values agree PatternBlock *res = new PatternBlock(true); int4 maxlength = (getLength() > b->getLength()) ? getLength() : b->getLength(); res->offset = 0; int4 offset = 0; uintm mask1,val1,mask2,val2; uintm resmask,resval; while(offset < maxlength) { mask1 = getMask(offset*8,sizeof(uintm)*8); val1 = getValue(offset*8,sizeof(uintm)*8); mask2 = b->getMask(offset*8,sizeof(uintm)*8); val2 = b->getValue(offset*8,sizeof(uintm)*8); resmask = mask1 & mask2 & ~(val1^val2); resval = val1 & val2 & resmask; res->maskvec.push_back(resmask); res->valvec.push_back(resval); offset += sizeof(uintm); } res->nonzerosize = maxlength; res->normalize(); return res; } PatternBlock *PatternBlock::intersect(const PatternBlock *b) const { // Construct the intersecting pattern if (alwaysFalse() || b->alwaysFalse()) return new PatternBlock(false); PatternBlock *res = new PatternBlock(true); int4 maxlength = (getLength() > b->getLength()) ? getLength() : b->getLength(); res->offset = 0; int4 offset = 0; uintm mask1,val1,mask2,val2,commonmask; uintm resmask,resval; while(offset < maxlength) { mask1 = getMask(offset*8,sizeof(uintm)*8); val1 = getValue(offset*8,sizeof(uintm)*8); mask2 = b->getMask(offset*8,sizeof(uintm)*8); val2 = b->getValue(offset*8,sizeof(uintm)*8); commonmask = mask1 & mask2; // Bits in mask shared by both patterns if ((commonmask & val1) != (commonmask & val2)) { res->nonzerosize = -1; // Impossible pattern res->normalize(); return res; } resmask = mask1 | mask2; resval = (mask1 & val1) | (mask2 & val2); res->maskvec.push_back(resmask); res->valvec.push_back(resval); offset += sizeof(uintm); } res->nonzerosize = maxlength; res->normalize(); return res; } bool PatternBlock::specializes(const PatternBlock *op2) const { // does every masked bit in -this- match the corresponding // masked bit in -op2- int4 length = 8*op2->getLength(); int4 tmplength; uintm mask1,mask2,value1,value2; int4 sbit = 0; while(sbit < length) { tmplength = length-sbit; if (tmplength > 8*sizeof(uintm)) tmplength = 8*sizeof(uintm); mask1 = getMask(sbit,tmplength); value1 = getValue(sbit,tmplength); mask2 = op2->getMask(sbit,tmplength); value2 = op2->getValue(sbit,tmplength); if ((mask1 & mask2) != mask2) return false; if ((value1 & mask2) != (value2 & mask2)) return false; sbit += tmplength; } return true; } bool PatternBlock::identical(const PatternBlock *op2) const { // Do the mask and value match exactly int4 tmplength; int4 length = 8*op2->getLength(); tmplength = 8*getLength(); if (tmplength > length) length = tmplength; // Maximum of two lengths uintm mask1,mask2,value1,value2; int4 sbit = 0; while(sbit < length) { tmplength = length-sbit; if (tmplength > 8*sizeof(uintm)) tmplength = 8*sizeof(uintm); mask1 = getMask(sbit,tmplength); value1 = getValue(sbit,tmplength); mask2 = op2->getMask(sbit,tmplength); value2 = op2->getValue(sbit,tmplength); if (mask1 != mask2) return false; if ((mask1&value1) != (mask2&value2)) return false; sbit += tmplength; } return true; } uintm PatternBlock::getMask(int4 startbit,int4 size) const { startbit -= 8*offset; // Note the division and remainder here is unsigned. Then it is recast to signed. // If startbit is negative, then wordnum1 is either negative or very big, // if (unsigned size is same as sizeof int) // In either case, shift should come out between 0 and 8*sizeof(uintm)-1 int4 wordnum1 = startbit/(8*sizeof(uintm)); int4 shift = startbit % (8*sizeof(uintm)); int4 wordnum2 = (startbit+size-1)/(8*sizeof(uintm)); uintm res; if ((wordnum1<0)||(wordnum1>=maskvec.size())) res = 0; else res = maskvec[wordnum1]; res <<= shift; if (wordnum1 != wordnum2) { uintm tmp; if ((wordnum2<0)||(wordnum2>=maskvec.size())) tmp = 0; else tmp = maskvec[wordnum2]; res |= (tmp>>(8*sizeof(uintm)-shift)); } res >>= (8*sizeof(uintm) - size); return res; } uintm PatternBlock::getValue(int4 startbit,int4 size) const { startbit -= 8*offset; int4 wordnum1 = startbit/(8*sizeof(uintm)); int4 shift = startbit % (8*sizeof(uintm)); int4 wordnum2 = (startbit+size-1)/(8*sizeof(uintm)); uintm res; if ((wordnum1<0)||(wordnum1>=valvec.size())) res = 0; else res = valvec[wordnum1]; res <<= shift; if (wordnum1 != wordnum2) { uintm tmp; if ((wordnum2<0)||(wordnum2>=valvec.size())) tmp = 0; else tmp = valvec[wordnum2]; res |= (tmp>>(8*sizeof(uintm)-shift)); } res >>= (8*sizeof(uintm) - size); return res; } bool PatternBlock::isInstructionMatch(ParserWalker &walker) const { if (nonzerosize<=0) return (nonzerosize==0); int4 off = offset; for(int4 i=0;inumDisjoint()>0) return b->doAnd(this,-sa); const CombinePattern *b2 = dynamic_cast(b); if (b2 != (const CombinePattern *)0) return b->doAnd(this,-sa); const ContextPattern *b3 = dynamic_cast(b); if (b3 != (const ContextPattern *)0) { InstructionPattern *newpat = (InstructionPattern *)simplifyClone(); if (sa < 0) newpat->shiftInstruction(-sa); return new CombinePattern((ContextPattern *)b3->simplifyClone(),newpat); } const InstructionPattern *b4 = (const InstructionPattern *)b; PatternBlock *respattern; if (sa < 0) { PatternBlock *a = maskvalue->clone(); a->shift(-sa); respattern = a->intersect(b4->maskvalue); delete a; } else { PatternBlock *c = b4->maskvalue->clone(); c->shift(sa); respattern = maskvalue->intersect(c); delete c; } return new InstructionPattern(respattern); } Pattern *InstructionPattern::commonSubPattern(const Pattern *b,int4 sa) const { if (b->numDisjoint()>0) return b->commonSubPattern(this,-sa); const CombinePattern *b2 = dynamic_cast(b); if (b2 != (const CombinePattern *)0) return b->commonSubPattern(this,-sa); const ContextPattern *b3 = dynamic_cast(b); if (b3 != (const ContextPattern *)0) { InstructionPattern *res = new InstructionPattern(true); return res; } const InstructionPattern *b4 = (const InstructionPattern *)b; PatternBlock *respattern; if (sa < 0) { PatternBlock *a = maskvalue->clone(); a->shift(-sa); respattern = a->commonSubPattern(b4->maskvalue); delete a; } else { PatternBlock *c = b4->maskvalue->clone(); c->shift(sa); respattern = maskvalue->commonSubPattern(c); delete c; } return new InstructionPattern(respattern); } Pattern *InstructionPattern::doOr(const Pattern *b,int4 sa) const { if (b->numDisjoint()>0) return b->doOr(this,-sa); const CombinePattern *b2 = dynamic_cast(b); if (b2 != (const CombinePattern *)0) return b->doOr(this,-sa); DisjointPattern *res1,*res2; res1 = (DisjointPattern *)simplifyClone(); res2 = (DisjointPattern *)b->simplifyClone(); if (sa < 0) res1->shiftInstruction(-sa); else res2->shiftInstruction(sa); return new OrPattern(res1,res2); } void InstructionPattern::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_INSTRUCT_PAT); maskvalue->encode(encoder); encoder.closeElement(sla::ELEM_INSTRUCT_PAT); } void InstructionPattern::decode(Decoder &decoder) { uint4 el = decoder.openElement(sla::ELEM_INSTRUCT_PAT); maskvalue = new PatternBlock(true); maskvalue->decode(decoder); decoder.closeElement(el); } Pattern *ContextPattern::doOr(const Pattern *b,int4 sa) const { const ContextPattern *b2 = dynamic_cast(b); if (b2 == (const ContextPattern *)0) return b->doOr(this,-sa); return new OrPattern((DisjointPattern *)simplifyClone(),(DisjointPattern *)b2->simplifyClone()); } Pattern *ContextPattern::doAnd(const Pattern *b,int4 sa) const { const ContextPattern *b2 = dynamic_cast(b); if (b2 == (const ContextPattern *)0) return b->doAnd(this,-sa); PatternBlock *resblock = maskvalue->intersect(b2->maskvalue); return new ContextPattern(resblock); } Pattern *ContextPattern::commonSubPattern(const Pattern *b,int4 sa) const { const ContextPattern *b2 = dynamic_cast(b); if (b2 == (const ContextPattern *)0) return b->commonSubPattern(this,-sa); PatternBlock *resblock = maskvalue->commonSubPattern(b2->maskvalue); return new ContextPattern(resblock); } void ContextPattern::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_CONTEXT_PAT); maskvalue->encode(encoder); encoder.closeElement(sla::ELEM_CONTEXT_PAT); } void ContextPattern::decode(Decoder &decoder) { uint4 el = decoder.openElement(sla::ELEM_CONTEXT_PAT); maskvalue = new PatternBlock(true); maskvalue->decode(decoder); decoder.closeElement(el); } CombinePattern::~CombinePattern(void) { if (context != (ContextPattern *)0) delete context; if (instr != (InstructionPattern *)0) delete instr; } bool CombinePattern::isMatch(ParserWalker &walker) const { if (!instr->isMatch(walker)) return false; if (!context->isMatch(walker)) return false; return true; } bool CombinePattern::alwaysTrue(void) const { return (context->alwaysTrue() && instr->alwaysTrue()); } bool CombinePattern::alwaysFalse(void) const { return (context->alwaysFalse() || instr->alwaysFalse()); } Pattern *CombinePattern::doAnd(const Pattern *b,int4 sa) const { CombinePattern *tmp; if (b->numDisjoint() != 0) return b->doAnd(this,-sa); const CombinePattern *b2 = dynamic_cast(b); if (b2 != (CombinePattern *)0) { ContextPattern *c = (ContextPattern *)context->doAnd(b2->context,0); InstructionPattern *i = (InstructionPattern *)instr->doAnd(b2->instr,sa); tmp = new CombinePattern(c,i); } else { const InstructionPattern *b3 = dynamic_cast(b); if (b3 != (const InstructionPattern *)0) { InstructionPattern *i = (InstructionPattern *)instr->doAnd(b3,sa); tmp = new CombinePattern((ContextPattern *)context->simplifyClone(),i); } else { // Must be a ContextPattern ContextPattern *c = (ContextPattern *)context->doAnd(b,0); InstructionPattern *newpat = (InstructionPattern *) instr->simplifyClone(); if (sa < 0) newpat->shiftInstruction(-sa); tmp = new CombinePattern(c,newpat); } } return tmp; } Pattern *CombinePattern::commonSubPattern(const Pattern *b,int4 sa) const { Pattern *tmp; if (b->numDisjoint() != 0) return b->commonSubPattern(this,-sa); const CombinePattern *b2 = dynamic_cast(b); if (b2 != (CombinePattern *)0) { ContextPattern *c = (ContextPattern *)context->commonSubPattern(b2->context,0); InstructionPattern *i = (InstructionPattern *)instr->commonSubPattern(b2->instr,sa); tmp = new CombinePattern(c,i); } else { const InstructionPattern *b3 = dynamic_cast(b); if (b3 != (const InstructionPattern *)0) tmp = instr->commonSubPattern(b3,sa); else // Must be a ContextPattern tmp = context->commonSubPattern(b,0); } return tmp; } Pattern *CombinePattern::doOr(const Pattern *b,int4 sa) const { if (b->numDisjoint() != 0) return b->doOr(this,-sa); DisjointPattern *res1 = (DisjointPattern *)simplifyClone(); DisjointPattern *res2 = (DisjointPattern *)b->simplifyClone(); if (sa < 0) res1->shiftInstruction(-sa); else res2->shiftInstruction(sa); OrPattern *tmp = new OrPattern(res1,res2); return tmp; } Pattern *CombinePattern::simplifyClone(void) const { // We should only have to think at "our" level if (context->alwaysTrue()) return instr->simplifyClone(); if (instr->alwaysTrue()) return context->simplifyClone(); if (context->alwaysFalse()||instr->alwaysFalse()) return new InstructionPattern(false); return new CombinePattern((ContextPattern *)context->simplifyClone(), (InstructionPattern *)instr->simplifyClone()); } void CombinePattern::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_COMBINE_PAT); context->encode(encoder); instr->encode(encoder); encoder.closeElement(sla::ELEM_COMBINE_PAT); } void CombinePattern::decode(Decoder &decoder) { uint4 el = decoder.openElement(sla::ELEM_COMBINE_PAT); context = new ContextPattern(); context->decode(decoder); instr = new InstructionPattern(); instr->decode(decoder); decoder.closeElement(el); } OrPattern::OrPattern(DisjointPattern *a,DisjointPattern *b) { orlist.push_back(a); orlist.push_back(b); } OrPattern::OrPattern(const vector &list) { vector::const_iterator iter; for(iter=list.begin();iter!=list.end();++iter) orlist.push_back(*iter); } OrPattern::~OrPattern(void) { vector::iterator iter; for(iter=orlist.begin();iter!=orlist.end();++iter) delete *iter; } void OrPattern::shiftInstruction(int4 sa) { vector::iterator iter; for(iter=orlist.begin();iter!=orlist.end();++iter) (*iter)->shiftInstruction(sa); } bool OrPattern::isMatch(ParserWalker &walker) const { for(int4 i=0;iisMatch(walker)) return true; return false; } bool OrPattern::alwaysTrue(void) const { // This isn't quite right because different branches // may cover the entire gamut vector::const_iterator iter; for(iter=orlist.begin();iter!=orlist.end();++iter) if ((*iter)->alwaysTrue()) return true; return false; } bool OrPattern::alwaysFalse(void) const { vector::const_iterator iter; for(iter=orlist.begin();iter!=orlist.end();++iter) if (!(*iter)->alwaysFalse()) return false; return true; } bool OrPattern::alwaysInstructionTrue(void) const { vector::const_iterator iter; for(iter=orlist.begin();iter!=orlist.end();++iter) if (!(*iter)->alwaysInstructionTrue()) return false; return true; } Pattern *OrPattern::doAnd(const Pattern *b,int4 sa) const { const OrPattern *b2 = dynamic_cast(b); vector newlist; vector::const_iterator iter,iter2; DisjointPattern *tmp; OrPattern *tmpor; if (b2 == (const OrPattern *)0) { for(iter=orlist.begin();iter!=orlist.end();++iter) { tmp = (DisjointPattern *)(*iter)->doAnd(b,sa); newlist.push_back(tmp); } } else { for(iter=orlist.begin();iter!=orlist.end();++iter) for(iter2=b2->orlist.begin();iter2!=b2->orlist.end();++iter2) { tmp = (DisjointPattern *)(*iter)->doAnd(*iter2,sa); newlist.push_back(tmp); } } tmpor = new OrPattern(newlist); return tmpor; } Pattern *OrPattern::commonSubPattern(const Pattern *b,int4 sa) const { vector::const_iterator iter; Pattern *res,*next; iter = orlist.begin(); res = (*iter)->commonSubPattern(b,sa); iter++; if (sa > 0) sa = 0; while(iter!=orlist.end()) { next = (*iter)->commonSubPattern(res,sa); delete res; res = next; ++iter; } return res; } Pattern *OrPattern::doOr(const Pattern *b,int4 sa) const { const OrPattern *b2 = dynamic_cast(b); vector newlist; vector::const_iterator iter; for(iter=orlist.begin();iter!=orlist.end();++iter) newlist.push_back((DisjointPattern *)(*iter)->simplifyClone()); if (sa < 0) for(iter=orlist.begin();iter!=orlist.end();++iter) (*iter)->shiftInstruction(-sa); if (b2 == (const OrPattern *)0) newlist.push_back((DisjointPattern *)b->simplifyClone()); else { for(iter=b2->orlist.begin();iter!=b2->orlist.end();++iter) newlist.push_back((DisjointPattern *)(*iter)->simplifyClone()); } if (sa > 0) for(int4 i=0;ishiftInstruction(sa); OrPattern *tmpor = new OrPattern(newlist); return tmpor; } Pattern *OrPattern::simplifyClone(void) const { // Look for alwaysTrue eliminate alwaysFalse vector::const_iterator iter; for(iter=orlist.begin();iter!=orlist.end();++iter) // Look for alwaysTrue if ((*iter)->alwaysTrue()) return new InstructionPattern(true); vector newlist; for(iter=orlist.begin();iter!=orlist.end();++iter) // Look for alwaysFalse if (!(*iter)->alwaysFalse()) newlist.push_back((DisjointPattern *)(*iter)->simplifyClone()); if (newlist.empty()) return new InstructionPattern(false); else if (newlist.size() == 1) return newlist[0]; return new OrPattern(newlist); } void OrPattern::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_OR_PAT); for(int4 i=0;iencode(encoder); encoder.closeElement(sla::ELEM_OR_PAT); } void OrPattern::decode(Decoder &decoder) { uint4 el = decoder.openElement(sla::ELEM_OR_PAT); while(decoder.peekElement() != 0) { DisjointPattern *pat = DisjointPattern::decodeDisjoint(decoder); orlist.push_back(pat); } decoder.closeElement(el); } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/slghpattern.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __SLGHPATTERN_HH__ #define __SLGHPATTERN_HH__ #include "context.hh" namespace ghidra { // A mask/value pair viewed as two bitstreams class PatternBlock { int4 offset; // Offset to non-zero byte of mask int4 nonzerosize; // Last byte(+1) containing nonzero mask vector maskvec; // Mask vector valvec; // Value void normalize(void); public: PatternBlock(int4 off,uintm msk,uintm val); PatternBlock(bool tf); PatternBlock(const PatternBlock *a,const PatternBlock *b); PatternBlock(vector &list); PatternBlock *commonSubPattern(const PatternBlock *b) const; PatternBlock *intersect(const PatternBlock *b) const; bool specializes(const PatternBlock *op2) const; bool identical(const PatternBlock *op2) const; PatternBlock *clone(void) const; void shift(int4 sa) { offset += sa; normalize(); } int4 getLength(void) const { return offset+nonzerosize; } uintm getMask(int4 startbit,int4 size) const; uintm getValue(int4 startbit,int4 size) const; bool alwaysTrue(void) const { return (nonzerosize==0); } bool alwaysFalse(void) const { return (nonzerosize==-1); } bool isInstructionMatch(ParserWalker &walker) const; bool isContextMatch(ParserWalker &walker) const; void encode(Encoder &encoder) const; void decode(Decoder &decoder); }; class DisjointPattern; class Pattern { public: virtual ~Pattern(void) {} virtual Pattern *simplifyClone(void) const=0; virtual void shiftInstruction(int4 sa)=0; virtual Pattern *doOr(const Pattern *b,int4 sa) const=0; virtual Pattern *doAnd(const Pattern *b,int4 sa) const=0; virtual Pattern *commonSubPattern(const Pattern *b,int4 sa) const=0; virtual bool isMatch(ParserWalker &walker) const=0; // Does this pattern match context virtual int4 numDisjoint(void) const=0; virtual DisjointPattern *getDisjoint(int4 i) const=0; virtual bool alwaysTrue(void) const=0; virtual bool alwaysFalse(void) const=0; virtual bool alwaysInstructionTrue(void) const=0; virtual void encode(Encoder &encoder) const=0; virtual void decode(Decoder &decoder)=0; }; class DisjointPattern : public Pattern { // A pattern with no ORs in it virtual PatternBlock *getBlock(bool context) const=0; public: virtual int4 numDisjoint(void) const { return 0; } virtual DisjointPattern *getDisjoint(int4 i) const { return (DisjointPattern *)0; } uintm getMask(int4 startbit,int4 size,bool context) const; uintm getValue(int4 startbit,int4 size,bool context) const; int4 getLength(bool context) const; bool specializes(const DisjointPattern *op2) const; bool identical(const DisjointPattern *op2) const; bool resolvesIntersect(const DisjointPattern *op1,const DisjointPattern *op2) const; static DisjointPattern *decodeDisjoint(Decoder &decoder); }; class InstructionPattern : public DisjointPattern { // Matches the instruction bitstream PatternBlock *maskvalue; virtual PatternBlock *getBlock(bool context) const { return context ? (PatternBlock *)0 : maskvalue; } public: InstructionPattern(void) { maskvalue = (PatternBlock *)0; } // For use with decode InstructionPattern(PatternBlock *mv) { maskvalue = mv; } InstructionPattern(bool tf) { maskvalue = new PatternBlock(tf); } PatternBlock *getBlock(void) { return maskvalue; } virtual ~InstructionPattern(void) { if (maskvalue != (PatternBlock *)0) delete maskvalue; } virtual Pattern *simplifyClone(void) const { return new InstructionPattern(maskvalue->clone()); } virtual void shiftInstruction(int4 sa) { maskvalue->shift(sa); } virtual Pattern *doOr(const Pattern *b,int4 sa) const; virtual Pattern *doAnd(const Pattern *b,int4 sa) const; virtual Pattern *commonSubPattern(const Pattern *b,int4 sa) const; virtual bool isMatch(ParserWalker &walker) const { return maskvalue->isInstructionMatch(walker); } virtual bool alwaysTrue(void) const { return maskvalue->alwaysTrue(); } virtual bool alwaysFalse(void) const { return maskvalue->alwaysFalse(); } virtual bool alwaysInstructionTrue(void) const { return maskvalue->alwaysTrue(); } virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder); }; class ContextPattern : public DisjointPattern { // Matches the context bitstream PatternBlock *maskvalue; virtual PatternBlock *getBlock(bool context) const { return context ? maskvalue : (PatternBlock *)0; } public: ContextPattern(void) { maskvalue = (PatternBlock *)0; } // For use with decode ContextPattern(PatternBlock *mv) { maskvalue = mv; } PatternBlock *getBlock(void) { return maskvalue; } virtual ~ContextPattern(void) { if (maskvalue != (PatternBlock *)0) delete maskvalue; } virtual Pattern *simplifyClone(void) const { return new ContextPattern(maskvalue->clone()); } virtual void shiftInstruction(int4 sa) { } // do nothing virtual Pattern *doOr(const Pattern *b,int4 sa) const; virtual Pattern *doAnd(const Pattern *b,int4 sa) const; virtual Pattern *commonSubPattern(const Pattern *b,int4 sa) const; virtual bool isMatch(ParserWalker &walker) const { return maskvalue->isContextMatch(walker); } virtual bool alwaysTrue(void) const { return maskvalue->alwaysTrue(); } virtual bool alwaysFalse(void) const { return maskvalue->alwaysFalse(); } virtual bool alwaysInstructionTrue(void) const { return true; } virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder); }; // A pattern with a context piece and an instruction piece class CombinePattern : public DisjointPattern { ContextPattern *context; // Context piece InstructionPattern *instr; // Instruction piece virtual PatternBlock *getBlock(bool cont) const { return cont ? context->getBlock() : instr->getBlock(); } public: CombinePattern(void) { context = (ContextPattern *)0; instr = (InstructionPattern *)0; } CombinePattern(ContextPattern *con,InstructionPattern *in) { context = con; instr = in; } virtual ~CombinePattern(void); virtual Pattern *simplifyClone(void) const; virtual void shiftInstruction(int4 sa) { instr->shiftInstruction(sa); } virtual bool isMatch(ParserWalker &walker) const; virtual bool alwaysTrue(void) const; virtual bool alwaysFalse(void) const; virtual bool alwaysInstructionTrue(void) const { return instr->alwaysInstructionTrue(); } virtual Pattern *doOr(const Pattern *b,int4 sa) const; virtual Pattern *doAnd(const Pattern *b,int4 sa) const; virtual Pattern *commonSubPattern(const Pattern *b,int4 sa) const; virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder); }; class OrPattern : public Pattern { vector orlist; public: OrPattern(void) {} // For use with decode OrPattern(DisjointPattern *a,DisjointPattern *b); OrPattern(const vector &list); virtual ~OrPattern(void); virtual Pattern *simplifyClone(void) const; virtual void shiftInstruction(int4 sa); virtual bool isMatch(ParserWalker &walker) const; virtual int4 numDisjoint(void) const { return orlist.size(); } virtual DisjointPattern *getDisjoint(int4 i) const { return orlist[i]; } virtual bool alwaysTrue(void) const; virtual bool alwaysFalse(void) const; virtual bool alwaysInstructionTrue(void) const; virtual Pattern *doOr(const Pattern *b,int4 sa) const; virtual Pattern *doAnd(const Pattern *b,int4 sa) const; virtual Pattern *commonSubPattern(const Pattern *b,int4 sa) const; virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder); }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/slghscan.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define YY_INT_ALIGNED short int /* A lexical scanner generated by flex */ #define yy_create_buffer sleigh_create_buffer #define yy_delete_buffer sleigh_delete_buffer #define yy_scan_buffer sleigh_scan_buffer #define yy_scan_string sleigh_scan_string #define yy_scan_bytes sleigh_scan_bytes #define yy_init_buffer sleigh_init_buffer #define yy_flush_buffer sleigh_flush_buffer #define yy_load_buffer_state sleigh_load_buffer_state #define yy_switch_to_buffer sleigh_switch_to_buffer #define yypush_buffer_state sleighpush_buffer_state #define yypop_buffer_state sleighpop_buffer_state #define yyensure_buffer_stack sleighensure_buffer_stack #define yy_flex_debug sleigh_flex_debug #define yyin sleighin #define yyleng sleighleng #define yylex sleighlex #define yylineno sleighlineno #define yyout sleighout #define yyrestart sleighrestart #define yytext sleightext #define yywrap sleighwrap #define yyalloc sleighalloc #define yyrealloc sleighrealloc #define yyfree sleighfree #define FLEX_SCANNER #define YY_FLEX_MAJOR_VERSION 2 #define YY_FLEX_MINOR_VERSION 6 #define YY_FLEX_SUBMINOR_VERSION 4 #if YY_FLEX_SUBMINOR_VERSION > 0 #define FLEX_BETA #endif #ifdef yy_create_buffer #define sleigh_create_buffer_ALREADY_DEFINED #else #define yy_create_buffer sleigh_create_buffer #endif #ifdef yy_delete_buffer #define sleigh_delete_buffer_ALREADY_DEFINED #else #define yy_delete_buffer sleigh_delete_buffer #endif #ifdef yy_scan_buffer #define sleigh_scan_buffer_ALREADY_DEFINED #else #define yy_scan_buffer sleigh_scan_buffer #endif #ifdef yy_scan_string #define sleigh_scan_string_ALREADY_DEFINED #else #define yy_scan_string sleigh_scan_string #endif #ifdef yy_scan_bytes #define sleigh_scan_bytes_ALREADY_DEFINED #else #define yy_scan_bytes sleigh_scan_bytes #endif #ifdef yy_init_buffer #define sleigh_init_buffer_ALREADY_DEFINED #else #define yy_init_buffer sleigh_init_buffer #endif #ifdef yy_flush_buffer #define sleigh_flush_buffer_ALREADY_DEFINED #else #define yy_flush_buffer sleigh_flush_buffer #endif #ifdef yy_load_buffer_state #define sleigh_load_buffer_state_ALREADY_DEFINED #else #define yy_load_buffer_state sleigh_load_buffer_state #endif #ifdef yy_switch_to_buffer #define sleigh_switch_to_buffer_ALREADY_DEFINED #else #define yy_switch_to_buffer sleigh_switch_to_buffer #endif #ifdef yypush_buffer_state #define sleighpush_buffer_state_ALREADY_DEFINED #else #define yypush_buffer_state sleighpush_buffer_state #endif #ifdef yypop_buffer_state #define sleighpop_buffer_state_ALREADY_DEFINED #else #define yypop_buffer_state sleighpop_buffer_state #endif #ifdef yyensure_buffer_stack #define sleighensure_buffer_stack_ALREADY_DEFINED #else #define yyensure_buffer_stack sleighensure_buffer_stack #endif #ifdef yylex #define sleighlex_ALREADY_DEFINED #else #define yylex sleighlex #endif #ifdef yyrestart #define sleighrestart_ALREADY_DEFINED #else #define yyrestart sleighrestart #endif #ifdef yylex_init #define sleighlex_init_ALREADY_DEFINED #else #define yylex_init sleighlex_init #endif #ifdef yylex_init_extra #define sleighlex_init_extra_ALREADY_DEFINED #else #define yylex_init_extra sleighlex_init_extra #endif #ifdef yylex_destroy #define sleighlex_destroy_ALREADY_DEFINED #else #define yylex_destroy sleighlex_destroy #endif #ifdef yyget_debug #define sleighget_debug_ALREADY_DEFINED #else #define yyget_debug sleighget_debug #endif #ifdef yyset_debug #define sleighset_debug_ALREADY_DEFINED #else #define yyset_debug sleighset_debug #endif #ifdef yyget_extra #define sleighget_extra_ALREADY_DEFINED #else #define yyget_extra sleighget_extra #endif #ifdef yyset_extra #define sleighset_extra_ALREADY_DEFINED #else #define yyset_extra sleighset_extra #endif #ifdef yyget_in #define sleighget_in_ALREADY_DEFINED #else #define yyget_in sleighget_in #endif #ifdef yyset_in #define sleighset_in_ALREADY_DEFINED #else #define yyset_in sleighset_in #endif #ifdef yyget_out #define sleighget_out_ALREADY_DEFINED #else #define yyget_out sleighget_out #endif #ifdef yyset_out #define sleighset_out_ALREADY_DEFINED #else #define yyset_out sleighset_out #endif #ifdef yyget_leng #define sleighget_leng_ALREADY_DEFINED #else #define yyget_leng sleighget_leng #endif #ifdef yyget_text #define sleighget_text_ALREADY_DEFINED #else #define yyget_text sleighget_text #endif #ifdef yyget_lineno #define sleighget_lineno_ALREADY_DEFINED #else #define yyget_lineno sleighget_lineno #endif #ifdef yyset_lineno #define sleighset_lineno_ALREADY_DEFINED #else #define yyset_lineno sleighset_lineno #endif #ifdef yywrap #define sleighwrap_ALREADY_DEFINED #else #define yywrap sleighwrap #endif #ifdef yyalloc #define sleighalloc_ALREADY_DEFINED #else #define yyalloc sleighalloc #endif #ifdef yyrealloc #define sleighrealloc_ALREADY_DEFINED #else #define yyrealloc sleighrealloc #endif #ifdef yyfree #define sleighfree_ALREADY_DEFINED #else #define yyfree sleighfree #endif #ifdef yytext #define sleightext_ALREADY_DEFINED #else #define yytext sleightext #endif #ifdef yyleng #define sleighleng_ALREADY_DEFINED #else #define yyleng sleighleng #endif #ifdef yyin #define sleighin_ALREADY_DEFINED #else #define yyin sleighin #endif #ifdef yyout #define sleighout_ALREADY_DEFINED #else #define yyout sleighout #endif #ifdef yy_flex_debug #define sleigh_flex_debug_ALREADY_DEFINED #else #define yy_flex_debug sleigh_flex_debug #endif #ifdef yylineno #define sleighlineno_ALREADY_DEFINED #else #define yylineno sleighlineno #endif /* First, we deal with platform-specific or compiler-specific issues. */ /* begin standard C headers. */ #include #include #include #include /* end standard C headers. */ /* flex integer type definitions */ #ifndef FLEXINT_H #define FLEXINT_H /* C99 systems have . Non-C99 systems may or may not. */ #if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 says to define __STDC_LIMIT_MACROS before including stdint.h, * if you want the limit (max/min) macros for int types. */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS 1 #endif #include typedef int8_t flex_int8_t; typedef uint8_t flex_uint8_t; typedef int16_t flex_int16_t; typedef uint16_t flex_uint16_t; typedef int32_t flex_int32_t; typedef uint32_t flex_uint32_t; #else typedef signed char flex_int8_t; typedef short int flex_int16_t; typedef int flex_int32_t; typedef unsigned char flex_uint8_t; typedef unsigned short int flex_uint16_t; typedef unsigned int flex_uint32_t; /* Limits of integral types. */ #ifndef INT8_MIN #define INT8_MIN (-128) #endif #ifndef INT16_MIN #define INT16_MIN (-32767-1) #endif #ifndef INT32_MIN #define INT32_MIN (-2147483647-1) #endif #ifndef INT8_MAX #define INT8_MAX (127) #endif #ifndef INT16_MAX #define INT16_MAX (32767) #endif #ifndef INT32_MAX #define INT32_MAX (2147483647) #endif #ifndef UINT8_MAX #define UINT8_MAX (255U) #endif #ifndef UINT16_MAX #define UINT16_MAX (65535U) #endif #ifndef UINT32_MAX #define UINT32_MAX (4294967295U) #endif #ifndef SIZE_MAX #define SIZE_MAX (~(size_t)0) #endif #endif /* ! C99 */ #endif /* ! FLEXINT_H */ /* begin standard C++ headers. */ /* TODO: this is always defined, so inline it */ #define yyconst const #if defined(__GNUC__) && __GNUC__ >= 3 #define yynoreturn __attribute__((__noreturn__)) #else #define yynoreturn #endif /* Returned upon end-of-file. */ #define YY_NULL 0 /* Promotes a possibly negative, possibly signed char to an * integer in range [0..255] for use as an array index. */ #define YY_SC_TO_UI(c) ((YY_CHAR) (c)) /* Enter a start condition. This macro really ought to take a parameter, * but we do it the disgusting crufty way forced on us by the ()-less * definition of BEGIN. */ #define BEGIN (yy_start) = 1 + 2 * /* Translate the current start state into a value that can be later handed * to BEGIN to return to the state. The YYSTATE alias is for lex * compatibility. */ #define YY_START (((yy_start) - 1) / 2) #define YYSTATE YY_START /* Action number for EOF rule of a given start state. */ #define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1) /* Special action meaning "start processing a new file". */ #define YY_NEW_FILE yyrestart( yyin ) #define YY_END_OF_BUFFER_CHAR 0 /* Size of default input buffer. */ #ifndef YY_BUF_SIZE #ifdef __ia64__ /* On IA-64, the buffer size is 16k, not 8k. * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case. * Ditto for the __ia64__ case accordingly. */ #define YY_BUF_SIZE 32768 #else #define YY_BUF_SIZE 16384 #endif /* __ia64__ */ #endif /* The state buf must be large enough to hold one state per character in the main buffer. */ #define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type)) #ifndef YY_TYPEDEF_YY_BUFFER_STATE #define YY_TYPEDEF_YY_BUFFER_STATE typedef struct yy_buffer_state *YY_BUFFER_STATE; #endif #ifndef YY_TYPEDEF_YY_SIZE_T #define YY_TYPEDEF_YY_SIZE_T typedef size_t yy_size_t; #endif extern int yyleng; extern FILE *yyin, *yyout; #define EOB_ACT_CONTINUE_SCAN 0 #define EOB_ACT_END_OF_FILE 1 #define EOB_ACT_LAST_MATCH 2 #define YY_LESS_LINENO(n) #define YY_LINENO_REWIND_TO(ptr) /* Return all but the first "n" matched characters back to the input stream. */ #define yyless(n) \ do \ { \ /* Undo effects of setting up yytext. */ \ int yyless_macro_arg = (n); \ YY_LESS_LINENO(yyless_macro_arg);\ *yy_cp = (yy_hold_char); \ YY_RESTORE_YY_MORE_OFFSET \ (yy_c_buf_p) = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \ YY_DO_BEFORE_ACTION; /* set up yytext again */ \ } \ while ( 0 ) #define unput(c) yyunput( c, (yytext_ptr) ) #ifndef YY_STRUCT_YY_BUFFER_STATE #define YY_STRUCT_YY_BUFFER_STATE struct yy_buffer_state { FILE *yy_input_file; char *yy_ch_buf; /* input buffer */ char *yy_buf_pos; /* current position in input buffer */ /* Size of input buffer in bytes, not including room for EOB * characters. */ int yy_buf_size; /* Number of characters read into yy_ch_buf, not including EOB * characters. */ int yy_n_chars; /* Whether we "own" the buffer - i.e., we know we created it, * and can realloc() it to grow it, and should free() it to * delete it. */ int yy_is_our_buffer; /* Whether this is an "interactive" input source; if so, and * if we're using stdio for input, then we want to use getc() * instead of fread(), to make sure we stop fetching input after * each newline. */ int yy_is_interactive; /* Whether we're considered to be at the beginning of a line. * If so, '^' rules will be active on the next match, otherwise * not. */ int yy_at_bol; int yy_bs_lineno; /**< The line count. */ int yy_bs_column; /**< The column count. */ /* Whether to try to fill the input buffer when we reach the * end of it. */ int yy_fill_buffer; int yy_buffer_status; #define YY_BUFFER_NEW 0 #define YY_BUFFER_NORMAL 1 /* When an EOF's been seen but there's still some text to process * then we mark the buffer as YY_EOF_PENDING, to indicate that we * shouldn't try reading from the input source any more. We might * still have a bunch of tokens to match, though, because of * possible backing-up. * * When we actually see the EOF, we change the status to "new" * (via yyrestart()), so that the user can continue scanning by * just pointing yyin at a new input file. */ #define YY_BUFFER_EOF_PENDING 2 }; #endif /* !YY_STRUCT_YY_BUFFER_STATE */ /* Stack of input buffers. */ static size_t yy_buffer_stack_top = 0; /**< index of top of stack. */ static size_t yy_buffer_stack_max = 0; /**< capacity of stack. */ static YY_BUFFER_STATE * yy_buffer_stack = NULL; /**< Stack as an array. */ /* We provide macros for accessing buffer states in case in the * future we want to put the buffer states in a more general * "scanner state". * * Returns the top of the stack, or NULL. */ #define YY_CURRENT_BUFFER ( (yy_buffer_stack) \ ? (yy_buffer_stack)[(yy_buffer_stack_top)] \ : NULL) /* Same as previous macro, but useful when we know that the buffer stack is not * NULL or when we need an lvalue. For internal use only. */ #define YY_CURRENT_BUFFER_LVALUE (yy_buffer_stack)[(yy_buffer_stack_top)] /* yy_hold_char holds the character lost when yytext is formed. */ static char yy_hold_char; static int yy_n_chars; /* number of characters read into yy_ch_buf */ int yyleng; /* Points to current character in buffer. */ static char *yy_c_buf_p = NULL; static int yy_init = 0; /* whether we need to initialize */ static int yy_start = 0; /* start state number */ /* Flag which is used to allow yywrap()'s to do buffer switches * instead of setting up a fresh yyin. A bit of a hack ... */ static int yy_did_buffer_switch_on_eof; void yyrestart ( FILE *input_file ); void yy_switch_to_buffer ( YY_BUFFER_STATE new_buffer ); YY_BUFFER_STATE yy_create_buffer ( FILE *file, int size ); void yy_delete_buffer ( YY_BUFFER_STATE b ); void yy_flush_buffer ( YY_BUFFER_STATE b ); void yypush_buffer_state ( YY_BUFFER_STATE new_buffer ); void yypop_buffer_state ( void ); static void yyensure_buffer_stack ( void ); static void yy_load_buffer_state ( void ); static void yy_init_buffer ( YY_BUFFER_STATE b, FILE *file ); #define YY_FLUSH_BUFFER yy_flush_buffer( YY_CURRENT_BUFFER ) YY_BUFFER_STATE yy_scan_buffer ( char *base, yy_size_t size ); YY_BUFFER_STATE yy_scan_string ( const char *yy_str ); YY_BUFFER_STATE yy_scan_bytes ( const char *bytes, int len ); void *yyalloc ( yy_size_t ); void *yyrealloc ( void *, yy_size_t ); void yyfree ( void * ); #define yy_new_buffer yy_create_buffer #define yy_set_interactive(is_interactive) \ { \ if ( ! YY_CURRENT_BUFFER ){ \ yyensure_buffer_stack (); \ YY_CURRENT_BUFFER_LVALUE = \ yy_create_buffer( yyin, YY_BUF_SIZE ); \ } \ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \ } #define yy_set_bol(at_bol) \ { \ if ( ! YY_CURRENT_BUFFER ){\ yyensure_buffer_stack (); \ YY_CURRENT_BUFFER_LVALUE = \ yy_create_buffer( yyin, YY_BUF_SIZE ); \ } \ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \ } #define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol) typedef flex_uint8_t YY_CHAR; FILE *yyin = NULL, *yyout = NULL; typedef int yy_state_type; extern int yylineno; int yylineno = 1; extern char *yytext; #ifdef yytext_ptr #undef yytext_ptr #endif #define yytext_ptr yytext static yy_state_type yy_get_previous_state ( void ); static yy_state_type yy_try_NUL_trans ( yy_state_type current_state ); static int yy_get_next_buffer ( void ); static void yynoreturn yy_fatal_error ( const char* msg ); /* Done after the current pattern has been matched and before the * corresponding action - sets up yytext. */ #define YY_DO_BEFORE_ACTION \ (yytext_ptr) = yy_bp; \ yyleng = (int) (yy_cp - yy_bp); \ (yy_hold_char) = *yy_cp; \ *yy_cp = '\0'; \ (yy_c_buf_p) = yy_cp; #define YY_NUM_RULES 165 #define YY_END_OF_BUFFER 166 /* This struct is not used in this scanner, but its presence is necessary. */ struct yy_trans_info { flex_int32_t yy_verify; flex_int32_t yy_nxt; }; static const flex_int16_t yy_accept[533] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 166, 14, 7, 8, 6, 14, 3, 13, 4, 13, 13, 13, 13, 5, 1, 58, 56, 57, 58, 50, 58, 25, 51, 52, 52, 26, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 23, 22, 20, 21, 22, 17, 19, 18, 15, 68, 66, 67, 61, 68, 61, 64, 62, 64, 59, 96, 94, 95, 96, 89, 96, 85, 88, 90, 91, 91, 88, 88, 90, 83, 84, 87, 90, 90, 71, 86, 69, 162, 160, 161, 154, 155, 162, 154, 154, 156, 157, 157, 154, 154, 154, 154, 156, 156, 156, 156, 156, 156, 156, 156, 156, 156, 156, 156, 156, 156, 156, 154, 99, 97, 165, 165, 164, 163, 7, 6, 0, 13, 13, 13, 13, 13, 1, 1, 56, 0, 55, 50, 0, 51, 0, 0, 52, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 23, 23, 20, 0, 19, 15, 15, 66, 0, 65, 0, 64, 63, 59, 59, 94, 76, 89, 0, 0, 0, 0, 90, 90, 0, 0, 91, 75, 77, 78, 74, 90, 90, 69, 69, 160, 106, 155, 0, 101, 156, 0, 0, 157, 104, 107, 105, 108, 103, 102, 156, 156, 156, 156, 156, 156, 156, 156, 156, 0, 118, 116, 117, 119, 122, 0, 123, 156, 156, 145, 156, 156, 156, 156, 156, 156, 156, 156, 110, 109, 112, 113, 156, 156, 156, 156, 156, 156, 100, 97, 97, 0, 164, 163, 163, 0, 13, 13, 13, 13, 0, 54, 53, 51, 41, 51, 51, 38, 51, 51, 37, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 0, 0, 0, 0, 80, 0, 82, 93, 92, 90, 90, 0, 159, 158, 133, 156, 156, 156, 156, 156, 156, 156, 156, 156, 121, 124, 120, 125, 156, 156, 156, 156, 156, 132, 156, 156, 156, 156, 114, 115, 111, 156, 156, 156, 156, 156, 156, 2, 0, 13, 13, 13, 12, 24, 0, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 43, 51, 51, 28, 51, 51, 51, 16, 0, 60, 0, 70, 0, 79, 81, 90, 90, 98, 0, 156, 156, 147, 156, 135, 156, 156, 156, 156, 156, 156, 146, 156, 156, 156, 156, 156, 156, 156, 156, 156, 129, 134, 156, 126, 13, 13, 9, 51, 51, 51, 51, 51, 51, 46, 51, 51, 51, 51, 51, 51, 27, 32, 51, 51, 51, 90, 90, 156, 152, 127, 141, 156, 156, 156, 156, 136, 156, 153, 156, 156, 156, 156, 137, 156, 156, 140, 11, 10, 51, 51, 51, 51, 39, 42, 36, 45, 51, 51, 51, 35, 47, 51, 51, 90, 72, 128, 156, 156, 151, 156, 156, 156, 156, 156, 148, 156, 130, 51, 51, 33, 30, 49, 51, 51, 51, 51, 90, 156, 156, 156, 156, 144, 156, 156, 131, 51, 34, 51, 51, 51, 44, 90, 156, 156, 156, 156, 156, 143, 40, 29, 51, 48, 73, 156, 149, 156, 138, 142, 51, 150, 156, 51, 139, 51, 51, 31, 0 } ; static const YY_CHAR yy_ec[256] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 23, 23, 23, 23, 23, 23, 24, 25, 26, 27, 28, 29, 30, 31, 31, 31, 31, 31, 31, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 33, 11, 34, 35, 36, 11, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 1, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11 } ; static const YY_CHAR yy_meta[67] = { 0, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 3, 3, 5, 3, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 6, 5, 3, 3, 3, 5, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 3 } ; static const flex_int16_t yy_base[570] = { 0, 0, 937, 66, 936, 132, 935, 198, 934, 264, 933, 330, 932, 0, 394, 961, 968, 396, 968, 0, 948, 968, 0, 968, 903, 917, 920, 911, 968, 952, 968, 397, 968, 948, 0, 941, 968, 0, 365, 384, 968, 904, 906, 899, 908, 898, 906, 901, 365, 903, 905, 371, 365, 358, 906, 891, 938, 968, 409, 968, 928, 968, 0, 968, 936, 968, 416, 968, 968, 932, 925, 0, 968, 881, 932, 968, 419, 968, 907, 0, 402, 968, 968, 915, 377, 406, 404, 405, 0, 968, 968, 968, 884, 881, 968, 968, 927, 968, 432, 968, 902, 0, 916, 968, 917, 0, 384, 420, 419, 899, 420, 890, 886, 398, 413, 882, 862, 454, 870, 410, 410, 429, 869, 423, 466, 865, 877, 853, 968, 913, 968, 912, 968, 911, 454, 0, 0, 0, 857, 870, 872, 854, 906, 968, 474, 902, 901, 0, 0, 0, 463, 0, 466, 861, 395, 855, 451, 864, 843, 846, 852, 858, 857, 847, 848, 853, 434, 858, 847, 841, 409, 838, 888, 968, 493, 0, 0, 887, 968, 496, 883, 882, 0, 0, 0, 884, 968, 497, 968, 0, 0, 836, 831, 833, 865, 0, 488, 0, 490, 968, 968, 968, 968, 831, 836, 877, 968, 512, 968, 0, 0, 968, 0, 497, 0, 500, 968, 968, 968, 968, 968, 968, 824, 824, 832, 476, 831, 824, 823, 825, 820, 844, 968, 968, 968, 968, 843, 842, 841, 816, 810, 0, 809, 825, 824, 812, 802, 808, 803, 801, 968, 968, 830, 498, 805, 818, 794, 799, 795, 791, 968, 847, 968, 846, 968, 845, 968, 834, 809, 800, 790, 799, 829, 507, 0, 798, 0, 786, 783, 0, 801, 792, 0, 780, 794, 786, 778, 792, 795, 785, 779, 787, 788, 785, 784, 767, 778, 782, 808, 807, 806, 778, 968, 763, 0, 511, 0, 778, 766, 801, 513, 0, 0, 759, 764, 763, 756, 761, 757, 752, 769, 754, 968, 968, 968, 968, 478, 753, 781, 765, 750, 0, 749, 760, 741, 747, 968, 968, 968, 742, 741, 738, 737, 742, 735, 968, 777, 750, 738, 736, 0, 968, 773, 735, 747, 742, 725, 744, 732, 724, 727, 736, 735, 720, 719, 732, 0, 731, 721, 0, 729, 732, 713, 968, 754, 968, 753, 968, 752, 968, 968, 727, 711, 968, 749, 710, 720, 0, 698, 0, 710, 702, 695, 701, 698, 699, 0, 710, 703, 693, 711, 697, 693, 706, 691, 690, 0, 0, 704, 0, 698, 700, 0, 691, 689, 678, 689, 686, 694, 0, 675, 677, 681, 679, 674, 689, 0, 0, 673, 689, 681, 677, 676, 664, 0, 0, 0, 684, 666, 664, 697, 0, 670, 0, 667, 670, 658, 664, 0, 662, 651, 0, 0, 0, 670, 667, 653, 652, 0, 0, 0, 0, 655, 669, 664, 0, 0, 656, 641, 647, 0, 0, 644, 652, 0, 657, 647, 641, 655, 645, 0, 635, 0, 643, 651, 0, 0, 0, 652, 636, 648, 647, 646, 641, 634, 636, 646, 0, 643, 625, 0, 624, 0, 638, 642, 622, 0, 620, 627, 618, 622, 616, 615, 0, 0, 0, 615, 0, 0, 629, 0, 577, 0, 0, 548, 0, 510, 511, 0, 467, 462, 0, 968, 534, 540, 546, 548, 554, 558, 564, 566, 572, 574, 580, 584, 586, 592, 598, 600, 606, 612, 614, 620, 626, 632, 634, 636, 485, 638, 640, 642, 473, 644, 429, 647, 650, 653, 656, 659, 662 } ; static const flex_int16_t yy_def[570] = { 0, 532, 1, 532, 3, 532, 5, 532, 7, 532, 9, 532, 11, 533, 534, 532, 532, 532, 532, 535, 532, 532, 536, 532, 536, 536, 536, 536, 532, 537, 532, 532, 532, 538, 539, 532, 532, 540, 532, 532, 532, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 541, 532, 532, 532, 532, 532, 542, 532, 543, 532, 532, 532, 532, 544, 532, 545, 532, 545, 546, 532, 532, 532, 532, 547, 532, 532, 532, 548, 532, 532, 532, 532, 548, 532, 532, 532, 548, 548, 532, 532, 549, 532, 532, 532, 532, 550, 532, 532, 532, 551, 532, 532, 532, 532, 532, 532, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 532, 532, 552, 532, 553, 532, 554, 532, 535, 555, 536, 536, 536, 536, 536, 537, 532, 532, 538, 532, 539, 556, 540, 532, 557, 532, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 541, 532, 532, 558, 542, 543, 532, 532, 544, 532, 559, 545, 545, 546, 532, 532, 532, 547, 560, 532, 532, 532, 548, 548, 532, 561, 532, 532, 532, 532, 532, 548, 548, 549, 532, 532, 532, 550, 562, 532, 551, 532, 563, 532, 532, 532, 532, 532, 532, 532, 551, 551, 551, 551, 551, 551, 551, 551, 551, 532, 532, 532, 532, 532, 532, 532, 532, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 532, 532, 532, 532, 551, 551, 551, 551, 551, 551, 532, 552, 532, 553, 532, 554, 532, 564, 536, 536, 536, 536, 565, 532, 557, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 566, 567, 568, 532, 532, 532, 548, 532, 561, 548, 548, 569, 532, 563, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 532, 532, 532, 532, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 532, 532, 532, 551, 551, 551, 551, 551, 551, 532, 564, 536, 536, 536, 536, 532, 565, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 532, 566, 532, 567, 532, 568, 532, 532, 548, 548, 532, 569, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 536, 536, 536, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 548, 548, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 536, 536, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 540, 548, 548, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 551, 540, 540, 540, 540, 540, 540, 540, 540, 540, 548, 551, 551, 551, 551, 551, 551, 551, 551, 540, 540, 540, 540, 540, 540, 548, 551, 551, 551, 551, 551, 551, 540, 540, 540, 540, 548, 551, 551, 551, 551, 551, 540, 551, 551, 540, 551, 540, 540, 540, 0, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532 } ; static const flex_int16_t yy_nxt[1035] = { 0, 16, 17, 18, 17, 16, 16, 19, 20, 16, 16, 16, 21, 21, 16, 16, 21, 21, 22, 16, 16, 16, 16, 16, 23, 16, 16, 16, 16, 16, 16, 22, 22, 16, 16, 16, 22, 24, 22, 22, 25, 22, 22, 22, 22, 22, 22, 22, 22, 26, 22, 22, 22, 22, 22, 22, 22, 22, 22, 27, 22, 22, 22, 28, 16, 16, 16, 30, 31, 32, 31, 30, 33, 34, 35, 30, 30, 30, 36, 36, 30, 30, 36, 30, 37, 30, 38, 39, 39, 39, 36, 40, 30, 36, 30, 30, 30, 37, 37, 36, 36, 30, 37, 41, 42, 43, 44, 45, 37, 37, 46, 37, 37, 37, 47, 37, 48, 49, 50, 37, 51, 52, 53, 37, 54, 55, 37, 37, 37, 30, 30, 30, 30, 57, 58, 59, 58, 57, 57, 57, 60, 57, 57, 57, 61, 61, 57, 57, 61, 57, 62, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 62, 62, 57, 57, 57, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 63, 57, 57, 57, 65, 66, 67, 66, 68, 69, 68, 70, 68, 68, 65, 68, 68, 68, 68, 68, 68, 71, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 71, 71, 68, 68, 72, 71, 71, 71, 71, 71, 71, 71, 71, 71, 73, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 68, 68, 68, 68, 75, 76, 77, 76, 78, 75, 79, 80, 75, 81, 75, 82, 82, 82, 82, 82, 82, 83, 82, 84, 85, 85, 85, 82, 82, 86, 82, 87, 75, 75, 88, 88, 89, 90, 91, 88, 88, 88, 88, 88, 88, 88, 92, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 93, 88, 88, 88, 88, 88, 94, 95, 75, 82, 97, 98, 99, 98, 100, 97, 101, 102, 103, 104, 97, 103, 103, 103, 103, 103, 103, 105, 103, 106, 107, 107, 107, 103, 103, 108, 109, 110, 97, 97, 105, 105, 103, 103, 111, 105, 112, 113, 114, 115, 116, 117, 118, 105, 119, 105, 105, 120, 105, 121, 105, 122, 105, 123, 124, 125, 105, 105, 105, 105, 105, 126, 97, 127, 128, 103, 132, 134, 144, 134, 144, 160, 150, 152, 152, 152, 152, 164, 168, 166, 174, 165, 174, 190, 196, 161, 167, 179, 169, 179, 187, 213, 187, 133, 151, 198, 198, 198, 198, 199, 200, 201, 202, 207, 311, 207, 197, 276, 191, 215, 215, 215, 215, 214, 216, 217, 219, 220, 223, 225, 277, 241, 192, 226, 224, 134, 295, 134, 231, 242, 243, 193, 296, 248, 227, 245, 228, 232, 233, 246, 234, 244, 235, 249, 250, 144, 290, 144, 306, 236, 237, 238, 273, 273, 251, 152, 152, 152, 152, 279, 274, 252, 280, 253, 174, 291, 174, 179, 187, 179, 187, 239, 531, 254, 255, 530, 256, 305, 305, 198, 198, 198, 198, 207, 394, 207, 310, 310, 257, 215, 215, 215, 215, 315, 337, 338, 273, 273, 395, 316, 305, 305, 310, 310, 130, 130, 130, 130, 130, 130, 131, 131, 131, 131, 131, 131, 135, 529, 135, 135, 135, 135, 137, 137, 142, 142, 142, 142, 142, 142, 145, 145, 145, 145, 147, 528, 147, 147, 147, 147, 149, 149, 172, 172, 172, 172, 172, 172, 176, 176, 177, 177, 177, 177, 177, 177, 180, 180, 180, 180, 183, 183, 185, 185, 185, 185, 185, 185, 189, 527, 189, 189, 189, 189, 195, 195, 205, 205, 205, 205, 205, 205, 209, 526, 209, 209, 209, 209, 212, 212, 261, 261, 261, 261, 261, 261, 263, 263, 263, 263, 263, 263, 265, 265, 265, 265, 265, 265, 267, 267, 272, 272, 298, 298, 299, 299, 300, 300, 309, 309, 346, 346, 346, 352, 352, 352, 374, 374, 374, 376, 376, 376, 378, 378, 378, 384, 384, 384, 525, 524, 523, 522, 521, 520, 519, 518, 517, 516, 515, 514, 513, 512, 511, 510, 509, 508, 507, 506, 505, 504, 503, 502, 501, 500, 499, 498, 497, 496, 495, 494, 493, 492, 491, 490, 489, 488, 487, 486, 485, 484, 483, 482, 481, 480, 479, 478, 477, 476, 475, 474, 473, 472, 471, 470, 469, 468, 467, 466, 465, 464, 463, 462, 461, 460, 459, 458, 457, 456, 455, 454, 453, 452, 451, 450, 449, 448, 447, 446, 445, 444, 443, 442, 441, 440, 439, 438, 437, 436, 435, 434, 433, 383, 432, 431, 377, 375, 373, 430, 429, 428, 427, 426, 425, 424, 423, 422, 421, 420, 419, 418, 417, 416, 415, 414, 413, 351, 412, 411, 410, 345, 409, 408, 407, 406, 405, 404, 403, 402, 401, 400, 399, 398, 397, 396, 393, 392, 391, 390, 389, 388, 387, 386, 385, 383, 382, 381, 380, 379, 377, 375, 373, 372, 371, 370, 369, 368, 367, 366, 365, 364, 363, 362, 361, 360, 359, 358, 357, 356, 355, 354, 353, 351, 350, 349, 348, 347, 345, 266, 264, 262, 344, 343, 342, 341, 340, 339, 336, 335, 334, 333, 332, 331, 330, 329, 328, 327, 326, 325, 324, 323, 322, 321, 320, 319, 318, 317, 314, 313, 312, 206, 308, 307, 304, 303, 302, 301, 186, 180, 181, 178, 173, 297, 294, 293, 292, 289, 288, 287, 286, 285, 284, 283, 282, 281, 278, 275, 145, 146, 143, 271, 270, 269, 268, 266, 264, 262, 260, 259, 258, 247, 240, 230, 229, 222, 221, 218, 211, 210, 208, 206, 204, 203, 194, 188, 186, 184, 182, 181, 178, 175, 173, 171, 170, 163, 162, 159, 158, 157, 156, 155, 154, 153, 148, 146, 143, 141, 140, 139, 138, 136, 532, 129, 96, 74, 64, 56, 29, 15, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532 } ; static const flex_int16_t yy_chk[1035] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 14, 17, 31, 17, 31, 48, 38, 39, 39, 39, 39, 51, 53, 52, 58, 51, 58, 80, 84, 48, 52, 66, 53, 66, 76, 106, 76, 14, 38, 85, 85, 85, 85, 86, 86, 87, 87, 98, 563, 98, 84, 154, 80, 107, 107, 107, 107, 106, 108, 108, 110, 110, 113, 114, 154, 119, 80, 114, 113, 134, 170, 134, 117, 119, 120, 80, 170, 123, 114, 121, 114, 117, 117, 121, 117, 120, 117, 123, 124, 144, 166, 144, 561, 117, 117, 117, 150, 150, 124, 152, 152, 152, 152, 156, 557, 124, 156, 124, 174, 166, 174, 179, 187, 179, 187, 117, 530, 124, 124, 529, 124, 196, 196, 198, 198, 198, 198, 207, 326, 207, 213, 213, 124, 215, 215, 215, 215, 225, 253, 253, 273, 273, 326, 225, 305, 305, 310, 310, 533, 533, 533, 533, 533, 533, 534, 534, 534, 534, 534, 534, 535, 527, 535, 535, 535, 535, 536, 536, 537, 537, 537, 537, 537, 537, 538, 538, 538, 538, 539, 526, 539, 539, 539, 539, 540, 540, 541, 541, 541, 541, 541, 541, 542, 542, 543, 543, 543, 543, 543, 543, 544, 544, 544, 544, 545, 545, 546, 546, 546, 546, 546, 546, 547, 524, 547, 547, 547, 547, 548, 548, 549, 549, 549, 549, 549, 549, 550, 521, 550, 550, 550, 550, 551, 551, 552, 552, 552, 552, 552, 552, 553, 553, 553, 553, 553, 553, 554, 554, 554, 554, 554, 554, 555, 555, 556, 556, 558, 558, 559, 559, 560, 560, 562, 562, 564, 564, 564, 565, 565, 565, 566, 566, 566, 567, 567, 567, 568, 568, 568, 569, 569, 569, 519, 516, 512, 511, 510, 509, 508, 507, 505, 504, 503, 501, 499, 498, 496, 495, 494, 493, 492, 491, 490, 489, 488, 484, 483, 481, 479, 478, 477, 476, 475, 473, 472, 469, 468, 467, 464, 463, 462, 457, 456, 455, 454, 450, 449, 447, 446, 445, 444, 442, 440, 439, 438, 437, 433, 432, 431, 430, 429, 428, 425, 424, 423, 422, 421, 420, 418, 417, 416, 415, 414, 413, 411, 410, 408, 405, 404, 403, 402, 401, 400, 399, 398, 397, 395, 394, 393, 392, 391, 390, 388, 386, 385, 384, 382, 381, 378, 376, 374, 372, 371, 370, 368, 367, 365, 364, 363, 362, 361, 360, 359, 358, 357, 356, 355, 354, 353, 352, 349, 348, 347, 346, 344, 343, 342, 341, 340, 339, 335, 334, 333, 332, 330, 329, 328, 327, 321, 320, 319, 318, 317, 316, 315, 314, 313, 309, 308, 307, 303, 301, 300, 299, 298, 297, 296, 295, 294, 293, 292, 291, 290, 289, 288, 287, 286, 285, 284, 283, 281, 280, 278, 277, 275, 272, 271, 270, 269, 268, 267, 265, 263, 261, 259, 258, 257, 256, 255, 254, 252, 249, 248, 247, 246, 245, 244, 243, 242, 240, 239, 238, 237, 236, 231, 230, 229, 228, 227, 226, 224, 223, 222, 205, 204, 203, 194, 193, 192, 191, 185, 181, 180, 177, 172, 171, 169, 168, 167, 165, 164, 163, 162, 161, 160, 159, 158, 157, 155, 153, 146, 145, 142, 141, 140, 139, 138, 133, 131, 129, 127, 126, 125, 122, 118, 116, 115, 112, 111, 109, 104, 102, 100, 96, 93, 92, 83, 78, 74, 73, 70, 69, 64, 60, 56, 55, 54, 50, 49, 47, 46, 45, 44, 43, 42, 41, 35, 33, 29, 27, 26, 25, 24, 20, 15, 12, 10, 8, 6, 4, 2, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532, 532 } ; static yy_state_type yy_last_accepting_state; static char *yy_last_accepting_cpos; extern int yy_flex_debug; int yy_flex_debug = 0; /* The intent behind this definition is that it'll catch * any uses of REJECT which flex missed. */ #define REJECT reject_used_but_not_detected #define yymore() yymore_used_but_not_detected #define YY_MORE_ADJ 0 #define YY_RESTORE_YY_MORE_OFFSET char *yytext; /* ### * IP: GHIDRA * NOTE: flex skeletons are NOT bound by flex's BSD license * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define sleighwrap() 1 #define YY_SKIP_YYWRAP /* If we are building don't include unistd.h */ /* flex provides us with this macro for turning it off */ #ifdef _WIN32 #define YY_NO_UNISTD_H static int isatty (int fildes) { return 0; } #endif #include "slgh_compile.hh" namespace ghidra { #include "slghparse.hh" struct FileStreamState { YY_BUFFER_STATE lastbuffer; // Last lex buffer corresponding to the stream FILE *file; // The NEW file stream }; extern SleighCompile *slgh; int4 last_preproc; // lex state before last preprocessing erasure int4 actionon; // whether '&' '|' and '^' are treated as actionon in pattern section int4 withsection = 0; // whether we are between the 'with' keyword and its open brace '{' vector filebuffers; vector ifstack; int4 negative_if = -1; void preproc_error(const string &err) { slgh->reportError((const Location *)0, err); cerr << "Terminating due to error in preprocessing" << endl; exit(1); } void check_to_endofline(istream &s) { // Make sure there is nothing to the end of the line s >> ws; if (!s.eof()) if (s.peek() != '#') preproc_error("Extra characters in preprocessor directive"); } string read_identifier(istream &s) { // Read a proper identifier from the stream s >> ws; // Skip any whitespace string res; while(!s.eof()) { char tok = s.peek(); if (isalnum(tok) || (tok == '_')) { s >> tok; res += tok; } else break; } return res; } void preprocess_string(istream &s,string &res) { // Grab string surrounded by double quotes from stream or call preprocess_error int4 val; s >> ws; // Skip any whitespace val = s.get(); if (val != '\"') preproc_error("Expecting double quoted string"); val = s.get(); while((val != '\"')&&(val>=0)) { res += (char)val; val = s.get(); } if (val != '\"') preproc_error("Missing terminating double quote"); } extern int4 preprocess_if(istream &s); // Forward declaration for recursion int4 read_defined_operator(istream &s) { // We have seen a -defined- keyword in an if or elif // Read macro name used as input, return 1 if it is defined char tok = ' '; string macroname; s >> ws >> tok; if (tok != '(') preproc_error("Badly formed \"defined\" operator"); macroname = read_identifier(s); int4 res = slgh->getPreprocValue(macroname,macroname) ? 1 : 0; s >> ws >> tok; if (tok != ')') preproc_error("Badly formed \"defined\" operator"); return res; } int4 read_boolean_clause(istream &s) { // We have seen an if or elif // return 1 if condition is true or else 0 s >> ws; if (s.peek()=='(') { // Parenthetical expression spawns recursion int4 val = s.get(); int4 res = preprocess_if(s); s >> ws; val = s.get(); if (val != ')') preproc_error("Unbalanced parentheses"); return res; } // Otherwise we must have a normal comparison operator string lhs,rhs,comp; if (s.peek()=='\"') // Read left-hand side string preprocess_string(s,lhs); else { lhs = read_identifier(s); if (lhs == "defined") return read_defined_operator(s); if (!slgh->getPreprocValue(lhs,lhs)) preproc_error("Could not find preprocessor macro "+lhs); } char tok; s >> tok; // Read comparison symbol comp += tok; s >> tok; comp += tok; s >> ws; if (s.peek()=='\"') // Read right-hand side string preprocess_string(s,rhs); else { rhs = read_identifier(s); if (!slgh->getPreprocValue(rhs,rhs)) preproc_error("Could not find preprocessor macro "+rhs); } if (comp == "==") return (lhs == rhs) ? 1 : 0; else if (comp=="!=") return (lhs != rhs) ? 1 : 0; else preproc_error("Syntax error in condition"); return 0; } int4 preprocess_if(istream &s) { int4 res = read_boolean_clause(s); s >> ws; while((!s.eof())&&(s.peek()!=')')) { string boolop; char tok; s >> tok; boolop += tok; s >> tok; boolop += tok; int4 res2 = read_boolean_clause(s); if (boolop == "&&") res = res & res2; else if (boolop == "||") res = res | res2; else if (boolop == "^^") res = res ^ res2; else preproc_error("Syntax error in expression"); s >> ws; } return res; } void expand_preprocmacros(string &str) { string::size_type pos; string::size_type lastpos = 0; pos = str.find("$(",lastpos); if (pos == string::npos) return; string res; for(;;) { if (pos == string::npos) { res += str.substr(lastpos); str = res; return; } else { res += str.substr(lastpos,(pos-lastpos)); string::size_type endpos = str.find(')',pos+2); if (endpos == string::npos) { preproc_error("Unterminated macro in string"); break; } string macro = str.substr(pos+2, endpos - (pos+2)); string value; if (!slgh->getPreprocValue(macro,value)) { preproc_error("Unknown preprocessing macro "+macro); break; } res += value; lastpos = endpos + 1; } pos = str.find("$(",lastpos); } } int4 preprocess(int4 cur_state,int4 blank_state) { string str(sleightext); string::size_type pos = str.find('#'); if (pos != string::npos) str.erase(pos); istringstream s(str); string type; if (cur_state != blank_state) last_preproc = cur_state; s.get(); // Skip the preprocessor marker s >> type; if (type == "include") { if (negative_if == -1) { // Not in the middle of a false if clause filebuffers.push_back(FileStreamState()); // Save state of current file filebuffers.back().lastbuffer = YY_CURRENT_BUFFER; filebuffers.back().file = (FILE *)0; s >> ws; string fname; preprocess_string(s,fname); expand_preprocmacros(fname); slgh->parseFromNewFile(fname); fname = slgh->grabCurrentFilePath(); sleighin = fopen(fname.c_str(),"r"); if (sleighin == (FILE *)0) preproc_error("Could not open included file "+fname); filebuffers.back().file = sleighin; sleigh_switch_to_buffer( sleigh_create_buffer(sleighin, YY_BUF_SIZE) ); check_to_endofline(s); } } else if (type == "define") { if (negative_if == -1) { string varname; string value; varname = read_identifier(s); // Get name of variable being defined s >> ws; if (s.peek() == '\"') preprocess_string(s,value); else value = read_identifier(s); if (varname.size()==0) preproc_error("Error in preprocessor definition"); slgh->setPreprocValue(varname,value); check_to_endofline(s); } } else if (type == "undef") { if (negative_if == -1) { string varname; varname = read_identifier(s); // Name of variable to undefine if (varname.size()==0) preproc_error("Error in preprocessor undef"); slgh->undefinePreprocValue(varname); check_to_endofline(s); } } else if (type=="ifdef") { string varname; varname = read_identifier(s); if (varname.size()==0) preproc_error("Error in preprocessor ifdef"); string value; int4 truth = (slgh->getPreprocValue(varname,value)) ? 1 : 0; ifstack.push_back(truth); check_to_endofline(s); } else if (type=="ifndef") { string varname; varname = read_identifier(s); if (varname.size()==0) preproc_error("Error in preprocessor ifndef"); string value; int4 truth = (slgh->getPreprocValue(varname,value)) ? 0 : 1; // flipped from ifdef ifstack.push_back(truth); check_to_endofline(s); } else if (type=="if") { int4 truth = preprocess_if(s); if (!s.eof()) preproc_error("Unbalanced parentheses"); ifstack.push_back(truth); } else if (type=="elif") { if (ifstack.empty()) preproc_error("elif without preceding if"); if ((ifstack.back()&2)!=0) // We have already seen an else clause preproc_error("elif follows else"); if ((ifstack.back()&4)!=0) // We have already seen a true elif clause ifstack.back() = 4; // don't include any other elif clause else if ((ifstack.back()&1)!=0) // Last clause was a true if ifstack.back() = 4; // don't include this elif else { int4 truth = preprocess_if(s); if (!s.eof()) preproc_error("Unbalanced parentheses"); if (truth==0) ifstack.back() = 0; else ifstack.back() = 5; } } else if (type=="endif") { if (ifstack.empty()) preproc_error("preprocessing endif without matching if"); ifstack.pop_back(); check_to_endofline(s); } else if (type=="else") { if (ifstack.empty()) preproc_error("preprocessing else without matching if"); if ((ifstack.back()&2)!=0) preproc_error("second else for one if"); if ((ifstack.back()&4)!=0) // Seen a true elif clause before ifstack.back() = 6; else if (ifstack.back()==0) ifstack.back() = 3; else ifstack.back() = 2; check_to_endofline(s); } else preproc_error("Unknown preprocessing directive: "+type); if (negative_if >= 0) { // We were in a false state if (negative_if+1 < ifstack.size()) return blank_state; // false state is still deep in stack else // false state is popped off or is current and changed negative_if = -1; } if (ifstack.empty()) return last_preproc; if ((ifstack.back()&1)==0) { negative_if = ifstack.size()-1; return blank_state; } return last_preproc; } void preproc_macroexpand(void) { filebuffers.push_back(FileStreamState()); filebuffers.back().lastbuffer = YY_CURRENT_BUFFER; filebuffers.back().file = (FILE *)0; string macro(sleightext); macro.erase(0,2); macro.erase(macro.size()-1,1); string value; if (!slgh->getPreprocValue(macro,value)) preproc_error("Unknown preprocessing macro "+macro); sleigh_switch_to_buffer( sleigh_scan_string( value.c_str() ) ); slgh->parsePreprocMacro(); } int4 find_symbol(void) { string * newstring = new string(sleightext); SleighSymbol *sym = slgh->findSymbol(*newstring); if (sym == (SleighSymbol *)0) { sleighlval.str = newstring; return STRING; } delete newstring; switch(sym->getType()) { case SleighSymbol::section_symbol: sleighlval.sectionsym = (SectionSymbol *)sym; return SECTIONSYM; case SleighSymbol::space_symbol: sleighlval.spacesym = (SpaceSymbol *)sym; return SPACESYM; case SleighSymbol::token_symbol: sleighlval.tokensym = (TokenSymbol *)sym; return TOKENSYM; case SleighSymbol::userop_symbol: sleighlval.useropsym = (UserOpSymbol *)sym; return USEROPSYM; case SleighSymbol::value_symbol: sleighlval.valuesym = (ValueSymbol *)sym; return VALUESYM; case SleighSymbol::valuemap_symbol: sleighlval.valuemapsym = (ValueMapSymbol *)sym; return VALUEMAPSYM; case SleighSymbol::name_symbol: sleighlval.namesym = (NameSymbol *)sym; return NAMESYM; case SleighSymbol::varnode_symbol: sleighlval.varsym = (VarnodeSymbol *)sym; return VARSYM; case SleighSymbol::bitrange_symbol: sleighlval.bitsym = (BitrangeSymbol *)sym; return BITSYM; case SleighSymbol::varnodelist_symbol: sleighlval.varlistsym = (VarnodeListSymbol *)sym; return VARLISTSYM; case SleighSymbol::operand_symbol: sleighlval.operandsym = (OperandSymbol *)sym; return OPERANDSYM; case SleighSymbol::start_symbol: case SleighSymbol::end_symbol: case SleighSymbol::next2_symbol: case SleighSymbol::flowdest_symbol: case SleighSymbol::flowref_symbol: sleighlval.specsym = (SpecificSymbol *)sym; return JUMPSYM; case SleighSymbol::subtable_symbol: sleighlval.subtablesym = (SubtableSymbol *)sym; return SUBTABLESYM; case SleighSymbol::macro_symbol: sleighlval.macrosym = (MacroSymbol *)sym; return MACROSYM; case SleighSymbol::label_symbol: sleighlval.labelsym = (LabelSymbol *)sym; return LABELSYM; case SleighSymbol::epsilon_symbol: sleighlval.specsym = (SpecificSymbol *)sym; return SPECSYM; case SleighSymbol::context_symbol: sleighlval.contextsym = (ContextSymbol *)sym; return CONTEXTSYM; case SleighSymbol::dummy_symbol: break; } return -1; // Should never reach here } int4 scan_number(char *numtext,SLEIGHSTYPE *lval,bool signednum) { uintb val; if (numtext[0] == '0' && numtext[1] == 'b') { val = 0; numtext += 2; while ((*numtext) != 0) { val <<= 1; if (*numtext == '1') { val |= 1; } ++numtext; } } else { istringstream s(numtext); s.unsetf(ios::dec | ios::hex | ios::oct); s >> val; if (!s) return BADINTEGER; } if (signednum) { lval->big = new intb(val); return INTB; } lval->i = new uintb(val); return INTEGER; } } // End namespace ghidra using namespace ghidra; #define INITIAL 0 #define defblock 1 #define macroblock 2 #define print 3 #define pattern 4 #define sem 5 #define preproc 6 #ifndef YY_NO_UNISTD_H /* Special case for "unistd.h", since it is non-ANSI. We include it way * down here because we want the user's section 1 to have been scanned first. * The user has a chance to override it with an option. */ #include #endif #ifndef YY_EXTRA_TYPE #define YY_EXTRA_TYPE void * #endif static int yy_init_globals ( void ); /* Accessor methods to globals. These are made visible to non-reentrant scanners for convenience. */ int yylex_destroy ( void ); int yyget_debug ( void ); void yyset_debug ( int debug_flag ); YY_EXTRA_TYPE yyget_extra ( void ); void yyset_extra ( YY_EXTRA_TYPE user_defined ); FILE *yyget_in ( void ); void yyset_in ( FILE * _in_str ); FILE *yyget_out ( void ); void yyset_out ( FILE * _out_str ); int yyget_leng ( void ); char *yyget_text ( void ); int yyget_lineno ( void ); void yyset_lineno ( int _line_number ); /* Macros after this point can all be overridden by user definitions in * section 1. */ #ifndef YY_SKIP_YYWRAP #ifdef __cplusplus extern "C" int yywrap ( void ); #else extern int yywrap ( void ); #endif #endif #ifndef YY_NO_UNPUT static void yyunput ( int c, char *buf_ptr ); #endif #ifndef yytext_ptr static void yy_flex_strncpy ( char *, const char *, int ); #endif #ifdef YY_NEED_STRLEN static int yy_flex_strlen ( const char * ); #endif #ifndef YY_NO_INPUT #ifdef __cplusplus static int yyinput ( void ); #else static int input ( void ); #endif #endif /* Amount of stuff to slurp up with each read. */ #ifndef YY_READ_BUF_SIZE #ifdef __ia64__ /* On IA-64, the buffer size is 16k, not 8k */ #define YY_READ_BUF_SIZE 16384 #else #define YY_READ_BUF_SIZE 8192 #endif /* __ia64__ */ #endif /* Copy whatever the last rule matched to the standard output. */ #ifndef ECHO /* This used to be an fputs(), but since the string might contain NUL's, * we now use fwrite(). */ #define ECHO do { if (fwrite( yytext, (size_t) yyleng, 1, yyout )) {} } while (0) #endif /* Gets input and stuffs it into "buf". number of characters read, or YY_NULL, * is returned in "result". */ #ifndef YY_INPUT #define YY_INPUT(buf,result,max_size) \ if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \ { \ int c = '*'; \ int n; \ for ( n = 0; n < max_size && \ (c = getc( yyin )) != EOF && c != '\n'; ++n ) \ buf[n] = (char) c; \ if ( c == '\n' ) \ buf[n++] = (char) c; \ if ( c == EOF && ferror( yyin ) ) \ YY_FATAL_ERROR( "input in flex scanner failed" ); \ result = n; \ } \ else \ { \ errno=0; \ while ( (result = (int) fread(buf, 1, (yy_size_t) max_size, yyin)) == 0 && ferror(yyin)) \ { \ if( errno != EINTR) \ { \ YY_FATAL_ERROR( "input in flex scanner failed" ); \ break; \ } \ errno=0; \ clearerr(yyin); \ } \ }\ \ #endif /* No semi-colon after return; correct usage is to write "yyterminate();" - * we don't want an extra ';' after the "return" because that will cause * some compilers to complain about unreachable statements. */ #ifndef yyterminate #define yyterminate() return YY_NULL #endif /* Number of entries by which start-condition stack grows. */ #ifndef YY_START_STACK_INCR #define YY_START_STACK_INCR 25 #endif /* Report a fatal error. */ #ifndef YY_FATAL_ERROR #define YY_FATAL_ERROR(msg) yy_fatal_error( msg ) #endif /* end tables serialization structures and prototypes */ /* Default declaration of generated scanner - a define so the user can * easily add parameters. */ #ifndef YY_DECL #define YY_DECL_IS_OURS 1 extern int yylex (void); #define YY_DECL int yylex (void) #endif /* !YY_DECL */ /* Code executed at the beginning of each rule, after yytext and yyleng * have been set up. */ #ifndef YY_USER_ACTION #define YY_USER_ACTION #endif /* Code executed at the end of each rule. */ #ifndef YY_BREAK #define YY_BREAK /*LINTED*/break; #endif #define YY_RULE_SETUP \ if ( yyleng > 0 ) \ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = \ (yytext[yyleng - 1] == '\n'); \ YY_USER_ACTION /** The main scanner function which does all the work. */ YY_DECL { yy_state_type yy_current_state; char *yy_cp, *yy_bp; int yy_act; if ( !(yy_init) ) { (yy_init) = 1; #ifdef YY_USER_INIT YY_USER_INIT; #endif if ( ! (yy_start) ) (yy_start) = 1; /* first start state */ if ( ! yyin ) yyin = stdin; if ( ! yyout ) yyout = stdout; if ( ! YY_CURRENT_BUFFER ) { yyensure_buffer_stack (); YY_CURRENT_BUFFER_LVALUE = yy_create_buffer( yyin, YY_BUF_SIZE ); } yy_load_buffer_state( ); } { while ( /*CONSTCOND*/1 ) /* loops until end-of-file is reached */ { yy_cp = (yy_c_buf_p); /* Support of yytext. */ *yy_cp = (yy_hold_char); /* yy_bp points to the position in yy_ch_buf of the start of * the current run. */ yy_bp = yy_cp; yy_current_state = (yy_start); yy_current_state += YY_AT_BOL(); yy_match: do { YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)] ; if ( yy_accept[yy_current_state] ) { (yy_last_accepting_state) = yy_current_state; (yy_last_accepting_cpos) = yy_cp; } while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) { yy_current_state = (int) yy_def[yy_current_state]; if ( yy_current_state >= 533 ) yy_c = yy_meta[yy_c]; } yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c]; ++yy_cp; } while ( yy_base[yy_current_state] != 968 ); yy_find_action: yy_act = yy_accept[yy_current_state]; if ( yy_act == 0 ) { /* have to back up */ yy_cp = (yy_last_accepting_cpos); yy_current_state = (yy_last_accepting_state); yy_act = yy_accept[yy_current_state]; } YY_DO_BEFORE_ACTION; do_action: /* This label is used only to access EOF actions. */ switch ( yy_act ) { /* beginning of action switch */ case 0: /* must back up */ /* undo the effects of YY_DO_BEFORE_ACTION */ *yy_cp = (yy_hold_char); yy_cp = (yy_last_accepting_cpos); yy_current_state = (yy_last_accepting_state); goto yy_find_action; case 1: /* rule 1 can match eol */ YY_RULE_SETUP { slgh->nextLine(); BEGIN( preprocess(INITIAL,preproc) ); } YY_BREAK case 2: YY_RULE_SETUP { preproc_macroexpand(); } YY_BREAK case 3: YY_RULE_SETUP { sleighlval.ch = sleightext[0]; return sleightext[0]; } YY_BREAK case 4: YY_RULE_SETUP { BEGIN(print); slgh->calcContextLayout(); sleighlval.ch = sleightext[0]; return sleightext[0]; } YY_BREAK case 5: YY_RULE_SETUP { BEGIN(sem); sleighlval.ch = sleightext[0]; return sleightext[0]; } YY_BREAK case 6: YY_RULE_SETUP YY_BREAK case 7: YY_RULE_SETUP YY_BREAK case 8: /* rule 8 can match eol */ YY_RULE_SETUP { slgh->nextLine(); } YY_BREAK case 9: YY_RULE_SETUP { BEGIN(macroblock); return MACRO_KEY; } YY_BREAK case 10: YY_RULE_SETUP { BEGIN(defblock); return DEFINE_KEY; } YY_BREAK case 11: YY_RULE_SETUP { BEGIN(defblock); slgh->calcContextLayout(); return ATTACH_KEY; } YY_BREAK case 12: YY_RULE_SETUP { BEGIN(pattern); withsection = 1; slgh->calcContextLayout(); return WITH_KEY; } YY_BREAK case 13: YY_RULE_SETUP { return find_symbol(); } YY_BREAK case 14: YY_RULE_SETUP { return sleightext[0]; } YY_BREAK case 15: /* rule 15 can match eol */ YY_RULE_SETUP { slgh->nextLine(); BEGIN( preprocess(macroblock,preproc) ); } YY_BREAK case 16: YY_RULE_SETUP { preproc_macroexpand(); } YY_BREAK case 17: YY_RULE_SETUP { sleighlval.ch = sleightext[0]; return sleightext[0]; } YY_BREAK case 18: YY_RULE_SETUP { BEGIN(sem); return sleightext[0]; } YY_BREAK case 19: YY_RULE_SETUP { sleighlval.str = new string(sleightext); return STRING; } YY_BREAK case 20: YY_RULE_SETUP YY_BREAK case 21: /* rule 21 can match eol */ YY_RULE_SETUP { slgh->nextLine(); } YY_BREAK case 22: YY_RULE_SETUP { return sleightext[0]; } YY_BREAK case 23: /* rule 23 can match eol */ YY_RULE_SETUP { slgh->nextLine(); BEGIN( preprocess(defblock,preproc) ); } YY_BREAK case 24: YY_RULE_SETUP { preproc_macroexpand(); } YY_BREAK case 25: YY_RULE_SETUP { sleighlval.ch = sleightext[0]; return sleightext[0]; } YY_BREAK case 26: YY_RULE_SETUP { BEGIN(INITIAL); sleighlval.ch = sleightext[0]; return sleightext[0]; } YY_BREAK case 27: YY_RULE_SETUP { return SPACE_KEY; } YY_BREAK case 28: YY_RULE_SETUP { return TYPE_KEY; } YY_BREAK case 29: YY_RULE_SETUP { return RAM_KEY; } YY_BREAK case 30: YY_RULE_SETUP { return DEFAULT_KEY; } YY_BREAK case 31: YY_RULE_SETUP { return REGISTER_KEY; } YY_BREAK case 32: YY_RULE_SETUP { return TOKEN_KEY; } YY_BREAK case 33: YY_RULE_SETUP { return CONTEXT_KEY; } YY_BREAK case 34: YY_RULE_SETUP { return BITRANGE_KEY; } YY_BREAK case 35: YY_RULE_SETUP { return SIGNED_KEY; } YY_BREAK case 36: YY_RULE_SETUP { return NOFLOW_KEY; } YY_BREAK case 37: YY_RULE_SETUP { return HEX_KEY; } YY_BREAK case 38: YY_RULE_SETUP { return DEC_KEY; } YY_BREAK case 39: YY_RULE_SETUP { return ENDIAN_KEY; } YY_BREAK case 40: YY_RULE_SETUP { return ALIGN_KEY; } YY_BREAK case 41: YY_RULE_SETUP { return BIG_KEY; } YY_BREAK case 42: YY_RULE_SETUP { return LITTLE_KEY; } YY_BREAK case 43: YY_RULE_SETUP { return SIZE_KEY; } YY_BREAK case 44: YY_RULE_SETUP { return WORDSIZE_KEY; } YY_BREAK case 45: YY_RULE_SETUP { return OFFSET_KEY; } YY_BREAK case 46: YY_RULE_SETUP { return NAMES_KEY; } YY_BREAK case 47: YY_RULE_SETUP { return VALUES_KEY; } YY_BREAK case 48: YY_RULE_SETUP { return VARIABLES_KEY; } YY_BREAK case 49: YY_RULE_SETUP { return PCODEOP_KEY; } YY_BREAK case 50: YY_RULE_SETUP YY_BREAK case 51: YY_RULE_SETUP { return find_symbol(); } YY_BREAK case 52: YY_RULE_SETUP { return scan_number(sleightext,&sleighlval,false); } YY_BREAK case 53: YY_RULE_SETUP { return scan_number(sleightext,&sleighlval,false); } YY_BREAK case 54: YY_RULE_SETUP { return scan_number(sleightext,&sleighlval,false); } YY_BREAK case 55: YY_RULE_SETUP { sleighlval.str = new string(sleightext+1,strlen(sleightext)-2); return STRING; } YY_BREAK case 56: YY_RULE_SETUP YY_BREAK case 57: /* rule 57 can match eol */ YY_RULE_SETUP { slgh->nextLine(); } YY_BREAK case 58: YY_RULE_SETUP { return sleightext[0]; } YY_BREAK case 59: /* rule 59 can match eol */ YY_RULE_SETUP { slgh->nextLine(); BEGIN( preprocess(print,preproc) ); } YY_BREAK case 60: YY_RULE_SETUP { preproc_macroexpand(); } YY_BREAK case 61: YY_RULE_SETUP { sleighlval.ch = sleightext[0]; return CHAR; } YY_BREAK case 62: YY_RULE_SETUP { sleighlval.ch = '^'; return '^'; } YY_BREAK case 63: YY_RULE_SETUP { BEGIN(pattern); actionon=0; return IS_KEY; } YY_BREAK case 64: YY_RULE_SETUP { sleighlval.str = new string(sleightext); return SYMBOLSTRING; } YY_BREAK case 65: YY_RULE_SETUP { sleighlval.str = new string(sleightext+1,strlen(sleightext)-2); return STRING; } YY_BREAK case 66: YY_RULE_SETUP { sleighlval.ch = ' '; return ' '; } YY_BREAK case 67: /* rule 67 can match eol */ YY_RULE_SETUP { slgh->nextLine(); return ' '; } YY_BREAK case 68: YY_RULE_SETUP { return sleightext[0]; } YY_BREAK case 69: /* rule 69 can match eol */ YY_RULE_SETUP { slgh->nextLine(); BEGIN( preprocess(pattern,preproc) ); } YY_BREAK case 70: YY_RULE_SETUP { preproc_macroexpand(); } YY_BREAK case 71: YY_RULE_SETUP { BEGIN((withsection==1) ? INITIAL:sem); withsection=0; sleighlval.ch = sleightext[0]; return sleightext[0]; } YY_BREAK case 72: YY_RULE_SETUP { BEGIN(INITIAL); return OP_UNIMPL; } YY_BREAK case 73: YY_RULE_SETUP { return GLOBALSET_KEY; } YY_BREAK case 74: YY_RULE_SETUP { return OP_RIGHT; } YY_BREAK case 75: YY_RULE_SETUP { return OP_LEFT; } YY_BREAK case 76: YY_RULE_SETUP { return OP_NOTEQUAL; } YY_BREAK case 77: YY_RULE_SETUP { return OP_LESSEQUAL; } YY_BREAK case 78: YY_RULE_SETUP { return OP_GREATEQUAL; } YY_BREAK case 79: YY_RULE_SETUP { return OP_AND; } YY_BREAK case 80: YY_RULE_SETUP { return OP_OR; } YY_BREAK case 81: YY_RULE_SETUP { return OP_XOR; } YY_BREAK case 82: YY_RULE_SETUP { return ELLIPSIS_KEY; } YY_BREAK case 83: YY_RULE_SETUP { actionon = 1; sleighlval.ch = sleightext[0]; return sleightext[0]; } YY_BREAK case 84: YY_RULE_SETUP { actionon = 0; sleighlval.ch = sleightext[0]; return sleightext[0]; } YY_BREAK case 85: YY_RULE_SETUP { sleighlval.ch = sleightext[0]; return (actionon==0) ? sleightext[0] : OP_AND; } YY_BREAK case 86: YY_RULE_SETUP { sleighlval.ch = sleightext[0]; return (actionon==0) ? sleightext[0] : OP_OR; } YY_BREAK case 87: YY_RULE_SETUP { return OP_XOR; } YY_BREAK case 88: YY_RULE_SETUP { sleighlval.ch = sleightext[0]; return sleightext[0]; } YY_BREAK case 89: YY_RULE_SETUP YY_BREAK case 90: YY_RULE_SETUP { return find_symbol(); } YY_BREAK case 91: YY_RULE_SETUP { return scan_number(sleightext,&sleighlval,true); } YY_BREAK case 92: YY_RULE_SETUP { return scan_number(sleightext,&sleighlval,true); } YY_BREAK case 93: YY_RULE_SETUP { return scan_number(sleightext,&sleighlval,true); } YY_BREAK case 94: YY_RULE_SETUP YY_BREAK case 95: /* rule 95 can match eol */ YY_RULE_SETUP { slgh->nextLine(); } YY_BREAK case 96: YY_RULE_SETUP { return sleightext[0]; } YY_BREAK case 97: /* rule 97 can match eol */ YY_RULE_SETUP { slgh->nextLine(); BEGIN( preprocess(sem,preproc) ); } YY_BREAK case 98: YY_RULE_SETUP { preproc_macroexpand(); } YY_BREAK case 99: YY_RULE_SETUP { BEGIN(INITIAL); sleighlval.ch = sleightext[0]; return sleightext[0]; } YY_BREAK case 100: YY_RULE_SETUP { return OP_BOOL_OR; } YY_BREAK case 101: YY_RULE_SETUP { return OP_BOOL_AND; } YY_BREAK case 102: YY_RULE_SETUP { return OP_BOOL_XOR; } YY_BREAK case 103: YY_RULE_SETUP { return OP_RIGHT; } YY_BREAK case 104: YY_RULE_SETUP { return OP_LEFT; } YY_BREAK case 105: YY_RULE_SETUP { return OP_EQUAL; } YY_BREAK case 106: YY_RULE_SETUP { return OP_NOTEQUAL; } YY_BREAK case 107: YY_RULE_SETUP { return OP_LESSEQUAL; } YY_BREAK case 108: YY_RULE_SETUP { return OP_GREATEQUAL; } YY_BREAK case 109: YY_RULE_SETUP { return OP_SDIV; } YY_BREAK case 110: YY_RULE_SETUP { return OP_SREM; } YY_BREAK case 111: YY_RULE_SETUP { return OP_SRIGHT; } YY_BREAK case 112: YY_RULE_SETUP { return OP_SLESS; } YY_BREAK case 113: YY_RULE_SETUP { return OP_SGREAT; } YY_BREAK case 114: YY_RULE_SETUP { return OP_SLESSEQUAL; } YY_BREAK case 115: YY_RULE_SETUP { return OP_SGREATEQUAL; } YY_BREAK case 116: YY_RULE_SETUP { return OP_FADD; } YY_BREAK case 117: YY_RULE_SETUP { return OP_FSUB; } YY_BREAK case 118: YY_RULE_SETUP { return OP_FMULT; } YY_BREAK case 119: YY_RULE_SETUP { return OP_FDIV; } YY_BREAK case 120: YY_RULE_SETUP { return OP_FEQUAL; } YY_BREAK case 121: YY_RULE_SETUP { return OP_FNOTEQUAL; } YY_BREAK case 122: YY_RULE_SETUP { return OP_FLESS; } YY_BREAK case 123: YY_RULE_SETUP { return OP_FGREAT; } YY_BREAK case 124: YY_RULE_SETUP { return OP_FLESSEQUAL; } YY_BREAK case 125: YY_RULE_SETUP { return OP_FGREATEQUAL; } YY_BREAK case 126: YY_RULE_SETUP { return OP_ZEXT; } YY_BREAK case 127: YY_RULE_SETUP { return OP_CARRY; } YY_BREAK case 128: YY_RULE_SETUP { return OP_BORROW; } YY_BREAK case 129: YY_RULE_SETUP { return OP_SEXT; } YY_BREAK case 130: YY_RULE_SETUP { return OP_SCARRY; } YY_BREAK case 131: YY_RULE_SETUP { return OP_SBORROW; } YY_BREAK case 132: YY_RULE_SETUP { return OP_NAN; } YY_BREAK case 133: YY_RULE_SETUP { return OP_ABS; } YY_BREAK case 134: YY_RULE_SETUP { return OP_SQRT; } YY_BREAK case 135: YY_RULE_SETUP { return OP_CEIL; } YY_BREAK case 136: YY_RULE_SETUP { return OP_FLOOR; } YY_BREAK case 137: YY_RULE_SETUP { return OP_ROUND; } YY_BREAK case 138: YY_RULE_SETUP { return OP_INT2FLOAT; } YY_BREAK case 139: YY_RULE_SETUP { return OP_FLOAT2FLOAT; } YY_BREAK case 140: YY_RULE_SETUP { return OP_TRUNC; } YY_BREAK case 141: YY_RULE_SETUP { return OP_CPOOLREF; } YY_BREAK case 142: YY_RULE_SETUP { return OP_NEW; } YY_BREAK case 143: YY_RULE_SETUP { return OP_POPCOUNT; } YY_BREAK case 144: YY_RULE_SETUP { return OP_LZCOUNT; } YY_BREAK case 145: YY_RULE_SETUP { return IF_KEY; } YY_BREAK case 146: YY_RULE_SETUP { return GOTO_KEY; } YY_BREAK case 147: YY_RULE_SETUP { return CALL_KEY; } YY_BREAK case 148: YY_RULE_SETUP { return RETURN_KEY; } YY_BREAK case 149: YY_RULE_SETUP { return DELAYSLOT_KEY; } YY_BREAK case 150: YY_RULE_SETUP { return CROSSBUILD_KEY; } YY_BREAK case 151: YY_RULE_SETUP { return EXPORT_KEY; } YY_BREAK case 152: YY_RULE_SETUP { return BUILD_KEY; } YY_BREAK case 153: YY_RULE_SETUP { return LOCAL_KEY; } YY_BREAK case 154: YY_RULE_SETUP { sleighlval.ch = sleightext[0]; return sleightext[0]; } YY_BREAK case 155: YY_RULE_SETUP YY_BREAK case 156: YY_RULE_SETUP { return find_symbol(); } YY_BREAK case 157: YY_RULE_SETUP { return scan_number(sleightext,&sleighlval,false); } YY_BREAK case 158: YY_RULE_SETUP { return scan_number(sleightext,&sleighlval,false); } YY_BREAK case 159: YY_RULE_SETUP { return scan_number(sleightext,&sleighlval,false); } YY_BREAK case 160: YY_RULE_SETUP YY_BREAK case 161: /* rule 161 can match eol */ YY_RULE_SETUP { slgh->nextLine(); } YY_BREAK case 162: YY_RULE_SETUP { return sleightext[0]; } YY_BREAK case 163: /* rule 163 can match eol */ YY_RULE_SETUP { slgh->nextLine(); BEGIN( preprocess(preproc,preproc) ); } YY_BREAK case 164: /* rule 164 can match eol */ YY_RULE_SETUP { slgh->nextLine(); } YY_BREAK case YY_STATE_EOF(INITIAL): case YY_STATE_EOF(defblock): case YY_STATE_EOF(macroblock): case YY_STATE_EOF(print): case YY_STATE_EOF(pattern): case YY_STATE_EOF(sem): case YY_STATE_EOF(preproc): { sleigh_delete_buffer( YY_CURRENT_BUFFER ); if (filebuffers.empty()) yyterminate(); sleigh_switch_to_buffer( filebuffers.back().lastbuffer ); FILE *curfile = filebuffers.back().file; if (curfile != (FILE *)0) fclose(curfile); filebuffers.pop_back(); slgh->parseFileFinished(); } YY_BREAK case 165: YY_RULE_SETUP ECHO; YY_BREAK case YY_END_OF_BUFFER: { /* Amount of text matched not including the EOB char. */ int yy_amount_of_matched_text = (int) (yy_cp - (yytext_ptr)) - 1; /* Undo the effects of YY_DO_BEFORE_ACTION. */ *yy_cp = (yy_hold_char); YY_RESTORE_YY_MORE_OFFSET if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW ) { /* We're scanning a new file or input source. It's * possible that this happened because the user * just pointed yyin at a new source and called * yylex(). If so, then we have to assure * consistency between YY_CURRENT_BUFFER and our * globals. Here is the right place to do so, because * this is the first action (other than possibly a * back-up) that will match for the new input source. */ (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin; YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL; } /* Note that here we test for yy_c_buf_p "<=" to the position * of the first EOB in the buffer, since yy_c_buf_p will * already have been incremented past the NUL character * (since all states make transitions on EOB to the * end-of-buffer state). Contrast this with the test * in input(). */ if ( (yy_c_buf_p) <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] ) { /* This was really a NUL. */ yy_state_type yy_next_state; (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text; yy_current_state = yy_get_previous_state( ); /* Okay, we're now positioned to make the NUL * transition. We couldn't have * yy_get_previous_state() go ahead and do it * for us because it doesn't know how to deal * with the possibility of jamming (and we don't * want to build jamming into it because then it * will run more slowly). */ yy_next_state = yy_try_NUL_trans( yy_current_state ); yy_bp = (yytext_ptr) + YY_MORE_ADJ; if ( yy_next_state ) { /* Consume the NUL. */ yy_cp = ++(yy_c_buf_p); yy_current_state = yy_next_state; goto yy_match; } else { yy_cp = (yy_c_buf_p); goto yy_find_action; } } else switch ( yy_get_next_buffer( ) ) { case EOB_ACT_END_OF_FILE: { (yy_did_buffer_switch_on_eof) = 0; if ( yywrap( ) ) { /* Note: because we've taken care in * yy_get_next_buffer() to have set up * yytext, we can now set up * yy_c_buf_p so that if some total * hoser (like flex itself) wants to * call the scanner after we return the * YY_NULL, it'll still work - another * YY_NULL will get returned. */ (yy_c_buf_p) = (yytext_ptr) + YY_MORE_ADJ; yy_act = YY_STATE_EOF(YY_START); goto do_action; } else { if ( ! (yy_did_buffer_switch_on_eof) ) YY_NEW_FILE; } break; } case EOB_ACT_CONTINUE_SCAN: (yy_c_buf_p) = (yytext_ptr) + yy_amount_of_matched_text; yy_current_state = yy_get_previous_state( ); yy_cp = (yy_c_buf_p); yy_bp = (yytext_ptr) + YY_MORE_ADJ; goto yy_match; case EOB_ACT_LAST_MATCH: (yy_c_buf_p) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)]; yy_current_state = yy_get_previous_state( ); yy_cp = (yy_c_buf_p); yy_bp = (yytext_ptr) + YY_MORE_ADJ; goto yy_find_action; } break; } default: YY_FATAL_ERROR( "fatal flex scanner internal error--no action found" ); } /* end of action switch */ } /* end of scanning one token */ } /* end of user's declarations */ } /* end of yylex */ /* yy_get_next_buffer - try to read in a new buffer * * Returns a code representing an action: * EOB_ACT_LAST_MATCH - * EOB_ACT_CONTINUE_SCAN - continue scanning from current position * EOB_ACT_END_OF_FILE - end of file */ static int yy_get_next_buffer (void) { char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf; char *source = (yytext_ptr); int number_to_move, i; int ret_val; if ( (yy_c_buf_p) > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] ) YY_FATAL_ERROR( "fatal flex scanner internal error--end of buffer missed" ); if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 ) { /* Don't try to fill the buffer, so this is an EOF. */ if ( (yy_c_buf_p) - (yytext_ptr) - YY_MORE_ADJ == 1 ) { /* We matched a single character, the EOB, so * treat this as a final EOF. */ return EOB_ACT_END_OF_FILE; } else { /* We matched some text prior to the EOB, first * process it. */ return EOB_ACT_LAST_MATCH; } } /* Try to read more data. */ /* First move last chars to start of buffer. */ number_to_move = (int) ((yy_c_buf_p) - (yytext_ptr) - 1); for ( i = 0; i < number_to_move; ++i ) *(dest++) = *(source++); if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING ) /* don't do the read, it's not guaranteed to return an EOF, * just force an EOF */ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = 0; else { int num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; while ( num_to_read <= 0 ) { /* Not enough room in the buffer - grow it. */ /* just a shorter name for the current buffer */ YY_BUFFER_STATE b = YY_CURRENT_BUFFER_LVALUE; int yy_c_buf_p_offset = (int) ((yy_c_buf_p) - b->yy_ch_buf); if ( b->yy_is_our_buffer ) { int new_size = b->yy_buf_size * 2; if ( new_size <= 0 ) b->yy_buf_size += b->yy_buf_size / 8; else b->yy_buf_size *= 2; b->yy_ch_buf = (char *) /* Include room in for 2 EOB chars. */ yyrealloc( (void *) b->yy_ch_buf, (yy_size_t) (b->yy_buf_size + 2) ); } else /* Can't grow it, we don't own it. */ b->yy_ch_buf = NULL; if ( ! b->yy_ch_buf ) YY_FATAL_ERROR( "fatal error - scanner input buffer overflow" ); (yy_c_buf_p) = &b->yy_ch_buf[yy_c_buf_p_offset]; num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; } if ( num_to_read > YY_READ_BUF_SIZE ) num_to_read = YY_READ_BUF_SIZE; /* Read in more data. */ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]), (yy_n_chars), num_to_read ); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } if ( (yy_n_chars) == 0 ) { if ( number_to_move == YY_MORE_ADJ ) { ret_val = EOB_ACT_END_OF_FILE; yyrestart( yyin ); } else { ret_val = EOB_ACT_LAST_MATCH; YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_EOF_PENDING; } } else ret_val = EOB_ACT_CONTINUE_SCAN; if (((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) { /* Extend the array by 50%, plus the number we really need. */ int new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1); YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc( (void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf, (yy_size_t) new_size ); if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf ) YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" ); /* "- 2" to take care of EOB's */ YY_CURRENT_BUFFER_LVALUE->yy_buf_size = (int) (new_size - 2); } (yy_n_chars) += number_to_move; YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] = YY_END_OF_BUFFER_CHAR; YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars) + 1] = YY_END_OF_BUFFER_CHAR; (yytext_ptr) = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0]; return ret_val; } /* yy_get_previous_state - get the state just before the EOB char was reached */ static yy_state_type yy_get_previous_state (void) { yy_state_type yy_current_state; char *yy_cp; yy_current_state = (yy_start); yy_current_state += YY_AT_BOL(); for ( yy_cp = (yytext_ptr) + YY_MORE_ADJ; yy_cp < (yy_c_buf_p); ++yy_cp ) { YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1); if ( yy_accept[yy_current_state] ) { (yy_last_accepting_state) = yy_current_state; (yy_last_accepting_cpos) = yy_cp; } while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) { yy_current_state = (int) yy_def[yy_current_state]; if ( yy_current_state >= 533 ) yy_c = yy_meta[yy_c]; } yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c]; } return yy_current_state; } /* yy_try_NUL_trans - try to make a transition on the NUL character * * synopsis * next_state = yy_try_NUL_trans( current_state ); */ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state ) { int yy_is_jam; char *yy_cp = (yy_c_buf_p); YY_CHAR yy_c = 1; if ( yy_accept[yy_current_state] ) { (yy_last_accepting_state) = yy_current_state; (yy_last_accepting_cpos) = yy_cp; } while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) { yy_current_state = (int) yy_def[yy_current_state]; if ( yy_current_state >= 533 ) yy_c = yy_meta[yy_c]; } yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c]; yy_is_jam = (yy_current_state == 532); return yy_is_jam ? 0 : yy_current_state; } #ifndef YY_NO_UNPUT static void yyunput (int c, char * yy_bp ) { char *yy_cp; yy_cp = (yy_c_buf_p); /* undo effects of setting up yytext */ *yy_cp = (yy_hold_char); if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 ) { /* need to shift things up to make room */ /* +2 for EOB chars. */ int number_to_move = (yy_n_chars) + 2; char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[ YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2]; char *source = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]; while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf ) *--dest = *--source; yy_cp += (int) (dest - source); yy_bp += (int) (dest - source); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars) = (int) YY_CURRENT_BUFFER_LVALUE->yy_buf_size; if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 ) YY_FATAL_ERROR( "flex scanner push-back overflow" ); } *--yy_cp = (char) c; (yytext_ptr) = yy_bp; (yy_hold_char) = *yy_cp; (yy_c_buf_p) = yy_cp; } #endif #ifndef YY_NO_INPUT #ifdef __cplusplus static int yyinput (void) #else static int input (void) #endif { int c; *(yy_c_buf_p) = (yy_hold_char); if ( *(yy_c_buf_p) == YY_END_OF_BUFFER_CHAR ) { /* yy_c_buf_p now points to the character we want to return. * If this occurs *before* the EOB characters, then it's a * valid NUL; if not, then we've hit the end of the buffer. */ if ( (yy_c_buf_p) < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[(yy_n_chars)] ) /* This was really a NUL. */ *(yy_c_buf_p) = '\0'; else { /* need more input */ int offset = (int) ((yy_c_buf_p) - (yytext_ptr)); ++(yy_c_buf_p); switch ( yy_get_next_buffer( ) ) { case EOB_ACT_LAST_MATCH: /* This happens because yy_g_n_b() * sees that we've accumulated a * token and flags that we need to * try matching the token before * proceeding. But for input(), * there's no matching to consider. * So convert the EOB_ACT_LAST_MATCH * to EOB_ACT_END_OF_FILE. */ /* Reset buffer status. */ yyrestart( yyin ); /*FALLTHROUGH*/ case EOB_ACT_END_OF_FILE: { if ( yywrap( ) ) return 0; if ( ! (yy_did_buffer_switch_on_eof) ) YY_NEW_FILE; #ifdef __cplusplus return yyinput(); #else return input(); #endif } case EOB_ACT_CONTINUE_SCAN: (yy_c_buf_p) = (yytext_ptr) + offset; break; } } } c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */ *(yy_c_buf_p) = '\0'; /* preserve yytext */ (yy_hold_char) = *++(yy_c_buf_p); YY_CURRENT_BUFFER_LVALUE->yy_at_bol = (c == '\n'); return c; } #endif /* ifndef YY_NO_INPUT */ /** Immediately switch to a different input stream. * @param input_file A readable stream. * * @note This function does not reset the start condition to @c INITIAL . */ void yyrestart (FILE * input_file ) { if ( ! YY_CURRENT_BUFFER ){ yyensure_buffer_stack (); YY_CURRENT_BUFFER_LVALUE = yy_create_buffer( yyin, YY_BUF_SIZE ); } yy_init_buffer( YY_CURRENT_BUFFER, input_file ); yy_load_buffer_state( ); } /** Switch to a different input buffer. * @param new_buffer The new input buffer. * */ void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ) { /* TODO. We should be able to replace this entire function body * with * yypop_buffer_state(); * yypush_buffer_state(new_buffer); */ yyensure_buffer_stack (); if ( YY_CURRENT_BUFFER == new_buffer ) return; if ( YY_CURRENT_BUFFER ) { /* Flush out information for old buffer. */ *(yy_c_buf_p) = (yy_hold_char); YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } YY_CURRENT_BUFFER_LVALUE = new_buffer; yy_load_buffer_state( ); /* We don't actually know whether we did this switch during * EOF (yywrap()) processing, but the only time this flag * is looked at is after yywrap() is called, so it's safe * to go ahead and always set it. */ (yy_did_buffer_switch_on_eof) = 1; } static void yy_load_buffer_state (void) { (yy_n_chars) = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; (yytext_ptr) = (yy_c_buf_p) = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos; yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file; (yy_hold_char) = *(yy_c_buf_p); } /** Allocate and initialize an input buffer state. * @param file A readable stream. * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE. * * @return the allocated buffer state. */ YY_BUFFER_STATE yy_create_buffer (FILE * file, int size ) { YY_BUFFER_STATE b; b = (YY_BUFFER_STATE) yyalloc( sizeof( struct yy_buffer_state ) ); if ( ! b ) YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" ); b->yy_buf_size = size; /* yy_ch_buf has to be 2 characters longer than the size given because * we need to put in 2 end-of-buffer characters. */ b->yy_ch_buf = (char *) yyalloc( (yy_size_t) (b->yy_buf_size + 2) ); if ( ! b->yy_ch_buf ) YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" ); b->yy_is_our_buffer = 1; yy_init_buffer( b, file ); return b; } /** Destroy the buffer. * @param b a buffer created with yy_create_buffer() * */ void yy_delete_buffer (YY_BUFFER_STATE b ) { if ( ! b ) return; if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0; if ( b->yy_is_our_buffer ) yyfree( (void *) b->yy_ch_buf ); yyfree( (void *) b ); } /* Initializes or reinitializes a buffer. * This function is sometimes called more than once on the same buffer, * such as during a yyrestart() or at EOF. */ static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file ) { int oerrno = errno; yy_flush_buffer( b ); b->yy_input_file = file; b->yy_fill_buffer = 1; /* If b is the current buffer, then yy_init_buffer was _probably_ * called from yyrestart() or through yy_get_next_buffer. * In that case, we don't want to reset the lineno or column. */ if (b != YY_CURRENT_BUFFER){ b->yy_bs_lineno = 1; b->yy_bs_column = 0; } b->yy_is_interactive = file ? (isatty( fileno(file) ) > 0) : 0; errno = oerrno; } /** Discard all buffered characters. On the next scan, YY_INPUT will be called. * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER. * */ void yy_flush_buffer (YY_BUFFER_STATE b ) { if ( ! b ) return; b->yy_n_chars = 0; /* We always need two end-of-buffer characters. The first causes * a transition to the end-of-buffer state. The second causes * a jam in that state. */ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR; b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR; b->yy_buf_pos = &b->yy_ch_buf[0]; b->yy_at_bol = 1; b->yy_buffer_status = YY_BUFFER_NEW; if ( b == YY_CURRENT_BUFFER ) yy_load_buffer_state( ); } /** Pushes the new state onto the stack. The new state becomes * the current state. This function will allocate the stack * if necessary. * @param new_buffer The new state. * */ void yypush_buffer_state (YY_BUFFER_STATE new_buffer ) { if (new_buffer == NULL) return; yyensure_buffer_stack(); /* This block is copied from yy_switch_to_buffer. */ if ( YY_CURRENT_BUFFER ) { /* Flush out information for old buffer. */ *(yy_c_buf_p) = (yy_hold_char); YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = (yy_c_buf_p); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } /* Only push if top exists. Otherwise, replace top. */ if (YY_CURRENT_BUFFER) (yy_buffer_stack_top)++; YY_CURRENT_BUFFER_LVALUE = new_buffer; /* copied from yy_switch_to_buffer. */ yy_load_buffer_state( ); (yy_did_buffer_switch_on_eof) = 1; } /** Removes and deletes the top of the stack, if present. * The next element becomes the new top. * */ void yypop_buffer_state (void) { if (!YY_CURRENT_BUFFER) return; yy_delete_buffer(YY_CURRENT_BUFFER ); YY_CURRENT_BUFFER_LVALUE = NULL; if ((yy_buffer_stack_top) > 0) --(yy_buffer_stack_top); if (YY_CURRENT_BUFFER) { yy_load_buffer_state( ); (yy_did_buffer_switch_on_eof) = 1; } } /* Allocates the stack if it does not exist. * Guarantees space for at least one push. */ static void yyensure_buffer_stack (void) { yy_size_t num_to_alloc; if (!(yy_buffer_stack)) { /* First allocation is just for 2 elements, since we don't know if this * scanner will even need a stack. We use 2 instead of 1 to avoid an * immediate realloc on the next call. */ num_to_alloc = 1; /* After all that talk, this was set to 1 anyways... */ (yy_buffer_stack) = (struct yy_buffer_state**)yyalloc (num_to_alloc * sizeof(struct yy_buffer_state*) ); if ( ! (yy_buffer_stack) ) YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" ); memset((yy_buffer_stack), 0, num_to_alloc * sizeof(struct yy_buffer_state*)); (yy_buffer_stack_max) = num_to_alloc; (yy_buffer_stack_top) = 0; return; } if ((yy_buffer_stack_top) >= ((yy_buffer_stack_max)) - 1){ /* Increase the buffer to prepare for a possible push. */ yy_size_t grow_size = 8 /* arbitrary grow size */; num_to_alloc = (yy_buffer_stack_max) + grow_size; (yy_buffer_stack) = (struct yy_buffer_state**)yyrealloc ((yy_buffer_stack), num_to_alloc * sizeof(struct yy_buffer_state*) ); if ( ! (yy_buffer_stack) ) YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" ); /* zero only the new slots.*/ memset((yy_buffer_stack) + (yy_buffer_stack_max), 0, grow_size * sizeof(struct yy_buffer_state*)); (yy_buffer_stack_max) = num_to_alloc; } } /** Setup the input buffer state to scan directly from a user-specified character buffer. * @param base the character buffer * @param size the size in bytes of the character buffer * * @return the newly allocated buffer state object. */ YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size ) { YY_BUFFER_STATE b; if ( size < 2 || base[size-2] != YY_END_OF_BUFFER_CHAR || base[size-1] != YY_END_OF_BUFFER_CHAR ) /* They forgot to leave room for the EOB's. */ return NULL; b = (YY_BUFFER_STATE) yyalloc( sizeof( struct yy_buffer_state ) ); if ( ! b ) YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" ); b->yy_buf_size = (int) (size - 2); /* "- 2" to take care of EOB's */ b->yy_buf_pos = b->yy_ch_buf = base; b->yy_is_our_buffer = 0; b->yy_input_file = NULL; b->yy_n_chars = b->yy_buf_size; b->yy_is_interactive = 0; b->yy_at_bol = 1; b->yy_fill_buffer = 0; b->yy_buffer_status = YY_BUFFER_NEW; yy_switch_to_buffer( b ); return b; } /** Setup the input buffer state to scan a string. The next call to yylex() will * scan from a @e copy of @a str. * @param yystr a NUL-terminated string to scan * * @return the newly allocated buffer state object. * @note If you want to scan bytes that may contain NUL values, then use * yy_scan_bytes() instead. */ YY_BUFFER_STATE yy_scan_string (const char * yystr ) { return yy_scan_bytes( yystr, (int) strlen(yystr) ); } /** Setup the input buffer state to scan the given bytes. The next call to yylex() will * scan from a @e copy of @a bytes. * @param yybytes the byte buffer to scan * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes. * * @return the newly allocated buffer state object. */ YY_BUFFER_STATE yy_scan_bytes (const char * yybytes, int _yybytes_len ) { YY_BUFFER_STATE b; char *buf; yy_size_t n; int i; /* Get memory for full buffer, including space for trailing EOB's. */ n = (yy_size_t) (_yybytes_len + 2); buf = (char *) yyalloc( n ); if ( ! buf ) YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" ); for ( i = 0; i < _yybytes_len; ++i ) buf[i] = yybytes[i]; buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR; b = yy_scan_buffer( buf, n ); if ( ! b ) YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" ); /* It's okay to grow etc. this buffer, and we should throw it * away when we're done. */ b->yy_is_our_buffer = 1; return b; } #ifndef YY_EXIT_FAILURE #define YY_EXIT_FAILURE 2 #endif static void yynoreturn yy_fatal_error (const char* msg ) { fprintf( stderr, "%s\n", msg ); exit( YY_EXIT_FAILURE ); } /* Redefine yyless() so it works in section 3 code. */ #undef yyless #define yyless(n) \ do \ { \ /* Undo effects of setting up yytext. */ \ int yyless_macro_arg = (n); \ YY_LESS_LINENO(yyless_macro_arg);\ yytext[yyleng] = (yy_hold_char); \ (yy_c_buf_p) = yytext + yyless_macro_arg; \ (yy_hold_char) = *(yy_c_buf_p); \ *(yy_c_buf_p) = '\0'; \ yyleng = yyless_macro_arg; \ } \ while ( 0 ) /* Accessor methods (get/set functions) to struct members. */ /** Get the current line number. * */ int yyget_lineno (void) { return yylineno; } /** Get the input stream. * */ FILE *yyget_in (void) { return yyin; } /** Get the output stream. * */ FILE *yyget_out (void) { return yyout; } /** Get the length of the current token. * */ int yyget_leng (void) { return yyleng; } /** Get the current token. * */ char *yyget_text (void) { return yytext; } /** Set the current line number. * @param _line_number line number * */ void yyset_lineno (int _line_number ) { yylineno = _line_number; } /** Set the input stream. This does not discard the current * input buffer. * @param _in_str A readable stream. * * @see yy_switch_to_buffer */ void yyset_in (FILE * _in_str ) { yyin = _in_str ; } void yyset_out (FILE * _out_str ) { yyout = _out_str ; } int yyget_debug (void) { return yy_flex_debug; } void yyset_debug (int _bdebug ) { yy_flex_debug = _bdebug ; } static int yy_init_globals (void) { /* Initialization is the same as for the non-reentrant scanner. * This function is called from yylex_destroy(), so don't allocate here. */ (yy_buffer_stack) = NULL; (yy_buffer_stack_top) = 0; (yy_buffer_stack_max) = 0; (yy_c_buf_p) = NULL; (yy_init) = 0; (yy_start) = 0; /* Defined in main.c */ #ifdef YY_STDINIT yyin = stdin; yyout = stdout; #else yyin = NULL; yyout = NULL; #endif /* For future reference: Set errno on error, since we are called by * yylex_init() */ return 0; } /* yylex_destroy is for both reentrant and non-reentrant scanners. */ int yylex_destroy (void) { /* Pop the buffer stack, destroying each element. */ while(YY_CURRENT_BUFFER){ yy_delete_buffer( YY_CURRENT_BUFFER ); YY_CURRENT_BUFFER_LVALUE = NULL; yypop_buffer_state(); } /* Destroy the stack itself. */ yyfree((yy_buffer_stack) ); (yy_buffer_stack) = NULL; /* Reset the globals. This is important in a non-reentrant scanner so the next time * yylex() is called, initialization will occur. */ yy_init_globals( ); return 0; } /* * Internal utility routines. */ #ifndef yytext_ptr static void yy_flex_strncpy (char* s1, const char * s2, int n ) { int i; for ( i = 0; i < n; ++i ) s1[i] = s2[i]; } #endif #ifdef YY_NEED_STRLEN static int yy_flex_strlen (const char * s ) { int n; for ( n = 0; s[n]; ++n ) ; return n; } #endif void *yyalloc (yy_size_t size ) { return malloc(size); } void *yyrealloc (void * ptr, yy_size_t size ) { /* The cast to (char *) in the following accommodates both * implementations that use char* generic pointers, and those * that use void* generic pointers. It works with the latter * because both ANSI C and C++ allow castless assignment from * any pointer type to void*, and deal with argument conversions * as though doing an assignment. */ return realloc(ptr, size); } void yyfree (void * ptr ) { free( (char *) ptr ); /* see yyrealloc() for (char *) cast */ } #define YYTABLES_NAME "yytables" ================================================ FILE: pypcode/sleigh/slghscan.l ================================================ /* ### * IP: GHIDRA * NOTE: flex skeletons are NOT bound by flex's BSD license * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ %option prefix="sleigh" %{ #define sleighwrap() 1 #define YY_SKIP_YYWRAP /* If we are building don't include unistd.h */ /* flex provides us with this macro for turning it off */ #ifdef _WIN32 #define YY_NO_UNISTD_H static int isatty (int fildes) { return 0; } #endif #include "slgh_compile.hh" namespace ghidra { #include "slghparse.hh" struct FileStreamState { YY_BUFFER_STATE lastbuffer; // Last lex buffer corresponding to the stream FILE *file; // The NEW file stream }; extern SleighCompile *slgh; int4 last_preproc; // lex state before last preprocessing erasure int4 actionon; // whether '&' '|' and '^' are treated as actionon in pattern section int4 withsection = 0; // whether we are between the 'with' keyword and its open brace '{' vector filebuffers; vector ifstack; int4 negative_if = -1; void preproc_error(const string &err) { slgh->reportError((const Location *)0, err); cerr << "Terminating due to error in preprocessing" << endl; exit(1); } void check_to_endofline(istream &s) { // Make sure there is nothing to the end of the line s >> ws; if (!s.eof()) if (s.peek() != '#') preproc_error("Extra characters in preprocessor directive"); } string read_identifier(istream &s) { // Read a proper identifier from the stream s >> ws; // Skip any whitespace string res; while(!s.eof()) { char tok = s.peek(); if (isalnum(tok) || (tok == '_')) { s >> tok; res += tok; } else break; } return res; } void preprocess_string(istream &s,string &res) { // Grab string surrounded by double quotes from stream or call preprocess_error int4 val; s >> ws; // Skip any whitespace val = s.get(); if (val != '\"') preproc_error("Expecting double quoted string"); val = s.get(); while((val != '\"')&&(val>=0)) { res += (char)val; val = s.get(); } if (val != '\"') preproc_error("Missing terminating double quote"); } extern int4 preprocess_if(istream &s); // Forward declaration for recursion int4 read_defined_operator(istream &s) { // We have seen a -defined- keyword in an if or elif // Read macro name used as input, return 1 if it is defined char tok = ' '; string macroname; s >> ws >> tok; if (tok != '(') preproc_error("Badly formed \"defined\" operator"); macroname = read_identifier(s); int4 res = slgh->getPreprocValue(macroname,macroname) ? 1 : 0; s >> ws >> tok; if (tok != ')') preproc_error("Badly formed \"defined\" operator"); return res; } int4 read_boolean_clause(istream &s) { // We have seen an if or elif // return 1 if condition is true or else 0 s >> ws; if (s.peek()=='(') { // Parenthetical expression spawns recursion int4 val = s.get(); int4 res = preprocess_if(s); s >> ws; val = s.get(); if (val != ')') preproc_error("Unbalanced parentheses"); return res; } // Otherwise we must have a normal comparison operator string lhs,rhs,comp; if (s.peek()=='\"') // Read left-hand side string preprocess_string(s,lhs); else { lhs = read_identifier(s); if (lhs == "defined") return read_defined_operator(s); if (!slgh->getPreprocValue(lhs,lhs)) preproc_error("Could not find preprocessor macro "+lhs); } char tok; s >> tok; // Read comparison symbol comp += tok; s >> tok; comp += tok; s >> ws; if (s.peek()=='\"') // Read right-hand side string preprocess_string(s,rhs); else { rhs = read_identifier(s); if (!slgh->getPreprocValue(rhs,rhs)) preproc_error("Could not find preprocessor macro "+rhs); } if (comp == "==") return (lhs == rhs) ? 1 : 0; else if (comp=="!=") return (lhs != rhs) ? 1 : 0; else preproc_error("Syntax error in condition"); return 0; } int4 preprocess_if(istream &s) { int4 res = read_boolean_clause(s); s >> ws; while((!s.eof())&&(s.peek()!=')')) { string boolop; char tok; s >> tok; boolop += tok; s >> tok; boolop += tok; int4 res2 = read_boolean_clause(s); if (boolop == "&&") res = res & res2; else if (boolop == "||") res = res | res2; else if (boolop == "^^") res = res ^ res2; else preproc_error("Syntax error in expression"); s >> ws; } return res; } void expand_preprocmacros(string &str) { string::size_type pos; string::size_type lastpos = 0; pos = str.find("$(",lastpos); if (pos == string::npos) return; string res; for(;;) { if (pos == string::npos) { res += str.substr(lastpos); str = res; return; } else { res += str.substr(lastpos,(pos-lastpos)); string::size_type endpos = str.find(')',pos+2); if (endpos == string::npos) { preproc_error("Unterminated macro in string"); break; } string macro = str.substr(pos+2, endpos - (pos+2)); string value; if (!slgh->getPreprocValue(macro,value)) { preproc_error("Unknown preprocessing macro "+macro); break; } res += value; lastpos = endpos + 1; } pos = str.find("$(",lastpos); } } int4 preprocess(int4 cur_state,int4 blank_state) { string str(sleightext); string::size_type pos = str.find('#'); if (pos != string::npos) str.erase(pos); istringstream s(str); string type; if (cur_state != blank_state) last_preproc = cur_state; s.get(); // Skip the preprocessor marker s >> type; if (type == "include") { if (negative_if == -1) { // Not in the middle of a false if clause filebuffers.push_back(FileStreamState()); // Save state of current file filebuffers.back().lastbuffer = YY_CURRENT_BUFFER; filebuffers.back().file = (FILE *)0; s >> ws; string fname; preprocess_string(s,fname); expand_preprocmacros(fname); slgh->parseFromNewFile(fname); fname = slgh->grabCurrentFilePath(); sleighin = fopen(fname.c_str(),"r"); if (sleighin == (FILE *)0) preproc_error("Could not open included file "+fname); filebuffers.back().file = sleighin; sleigh_switch_to_buffer( sleigh_create_buffer(sleighin, YY_BUF_SIZE) ); check_to_endofline(s); } } else if (type == "define") { if (negative_if == -1) { string varname; string value; varname = read_identifier(s); // Get name of variable being defined s >> ws; if (s.peek() == '\"') preprocess_string(s,value); else value = read_identifier(s); if (varname.size()==0) preproc_error("Error in preprocessor definition"); slgh->setPreprocValue(varname,value); check_to_endofline(s); } } else if (type == "undef") { if (negative_if == -1) { string varname; varname = read_identifier(s); // Name of variable to undefine if (varname.size()==0) preproc_error("Error in preprocessor undef"); slgh->undefinePreprocValue(varname); check_to_endofline(s); } } else if (type=="ifdef") { string varname; varname = read_identifier(s); if (varname.size()==0) preproc_error("Error in preprocessor ifdef"); string value; int4 truth = (slgh->getPreprocValue(varname,value)) ? 1 : 0; ifstack.push_back(truth); check_to_endofline(s); } else if (type=="ifndef") { string varname; varname = read_identifier(s); if (varname.size()==0) preproc_error("Error in preprocessor ifndef"); string value; int4 truth = (slgh->getPreprocValue(varname,value)) ? 0 : 1; // flipped from ifdef ifstack.push_back(truth); check_to_endofline(s); } else if (type=="if") { int4 truth = preprocess_if(s); if (!s.eof()) preproc_error("Unbalanced parentheses"); ifstack.push_back(truth); } else if (type=="elif") { if (ifstack.empty()) preproc_error("elif without preceding if"); if ((ifstack.back()&2)!=0) // We have already seen an else clause preproc_error("elif follows else"); if ((ifstack.back()&4)!=0) // We have already seen a true elif clause ifstack.back() = 4; // don't include any other elif clause else if ((ifstack.back()&1)!=0) // Last clause was a true if ifstack.back() = 4; // don't include this elif else { int4 truth = preprocess_if(s); if (!s.eof()) preproc_error("Unbalanced parentheses"); if (truth==0) ifstack.back() = 0; else ifstack.back() = 5; } } else if (type=="endif") { if (ifstack.empty()) preproc_error("preprocessing endif without matching if"); ifstack.pop_back(); check_to_endofline(s); } else if (type=="else") { if (ifstack.empty()) preproc_error("preprocessing else without matching if"); if ((ifstack.back()&2)!=0) preproc_error("second else for one if"); if ((ifstack.back()&4)!=0) // Seen a true elif clause before ifstack.back() = 6; else if (ifstack.back()==0) ifstack.back() = 3; else ifstack.back() = 2; check_to_endofline(s); } else preproc_error("Unknown preprocessing directive: "+type); if (negative_if >= 0) { // We were in a false state if (negative_if+1 < ifstack.size()) return blank_state; // false state is still deep in stack else // false state is popped off or is current and changed negative_if = -1; } if (ifstack.empty()) return last_preproc; if ((ifstack.back()&1)==0) { negative_if = ifstack.size()-1; return blank_state; } return last_preproc; } void preproc_macroexpand(void) { filebuffers.push_back(FileStreamState()); filebuffers.back().lastbuffer = YY_CURRENT_BUFFER; filebuffers.back().file = (FILE *)0; string macro(sleightext); macro.erase(0,2); macro.erase(macro.size()-1,1); string value; if (!slgh->getPreprocValue(macro,value)) preproc_error("Unknown preprocessing macro "+macro); sleigh_switch_to_buffer( sleigh_scan_string( value.c_str() ) ); slgh->parsePreprocMacro(); } int4 find_symbol(void) { string * newstring = new string(sleightext); SleighSymbol *sym = slgh->findSymbol(*newstring); if (sym == (SleighSymbol *)0) { sleighlval.str = newstring; return STRING; } delete newstring; switch(sym->getType()) { case SleighSymbol::section_symbol: sleighlval.sectionsym = (SectionSymbol *)sym; return SECTIONSYM; case SleighSymbol::space_symbol: sleighlval.spacesym = (SpaceSymbol *)sym; return SPACESYM; case SleighSymbol::token_symbol: sleighlval.tokensym = (TokenSymbol *)sym; return TOKENSYM; case SleighSymbol::userop_symbol: sleighlval.useropsym = (UserOpSymbol *)sym; return USEROPSYM; case SleighSymbol::value_symbol: sleighlval.valuesym = (ValueSymbol *)sym; return VALUESYM; case SleighSymbol::valuemap_symbol: sleighlval.valuemapsym = (ValueMapSymbol *)sym; return VALUEMAPSYM; case SleighSymbol::name_symbol: sleighlval.namesym = (NameSymbol *)sym; return NAMESYM; case SleighSymbol::varnode_symbol: sleighlval.varsym = (VarnodeSymbol *)sym; return VARSYM; case SleighSymbol::bitrange_symbol: sleighlval.bitsym = (BitrangeSymbol *)sym; return BITSYM; case SleighSymbol::varnodelist_symbol: sleighlval.varlistsym = (VarnodeListSymbol *)sym; return VARLISTSYM; case SleighSymbol::operand_symbol: sleighlval.operandsym = (OperandSymbol *)sym; return OPERANDSYM; case SleighSymbol::start_symbol: case SleighSymbol::end_symbol: case SleighSymbol::next2_symbol: case SleighSymbol::flowdest_symbol: case SleighSymbol::flowref_symbol: sleighlval.specsym = (SpecificSymbol *)sym; return JUMPSYM; case SleighSymbol::subtable_symbol: sleighlval.subtablesym = (SubtableSymbol *)sym; return SUBTABLESYM; case SleighSymbol::macro_symbol: sleighlval.macrosym = (MacroSymbol *)sym; return MACROSYM; case SleighSymbol::label_symbol: sleighlval.labelsym = (LabelSymbol *)sym; return LABELSYM; case SleighSymbol::epsilon_symbol: sleighlval.specsym = (SpecificSymbol *)sym; return SPECSYM; case SleighSymbol::context_symbol: sleighlval.contextsym = (ContextSymbol *)sym; return CONTEXTSYM; case SleighSymbol::dummy_symbol: break; } return -1; // Should never reach here } int4 scan_number(char *numtext,SLEIGHSTYPE *lval,bool signednum) { uintb val; if (numtext[0] == '0' && numtext[1] == 'b') { val = 0; numtext += 2; while ((*numtext) != 0) { val <<= 1; if (*numtext == '1') { val |= 1; } ++numtext; } } else { istringstream s(numtext); s.unsetf(ios::dec | ios::hex | ios::oct); s >> val; if (!s) return BADINTEGER; } if (signednum) { lval->big = new intb(val); return INTB; } lval->i = new uintb(val); return INTEGER; } } // End namespace ghidra using namespace ghidra; %} %x defblock %x macroblock %x print %x pattern %x sem %x preproc %% ^@[^\n]*\n? { slgh->nextLine(); BEGIN( preprocess(INITIAL,preproc) ); } \$\([a-zA-Z0-9_.][a-zA-Z0-9_.]*\) { preproc_macroexpand(); } [(),\-] { sleighlval.ch = sleightext[0]; return sleightext[0]; } \: { BEGIN(print); slgh->calcContextLayout(); sleighlval.ch = sleightext[0]; return sleightext[0]; } \{ { BEGIN(sem); sleighlval.ch = sleightext[0]; return sleightext[0]; } #.* [\r\ \t\v]+ \n { slgh->nextLine(); } macro { BEGIN(macroblock); return MACRO_KEY; } define { BEGIN(defblock); return DEFINE_KEY; } attach { BEGIN(defblock); slgh->calcContextLayout(); return ATTACH_KEY; } with { BEGIN(pattern); withsection = 1; slgh->calcContextLayout(); return WITH_KEY; } [a-zA-Z_.][a-zA-Z0-9_.]* { return find_symbol(); } . { return sleightext[0]; } ^@[^\n]*\n? { slgh->nextLine(); BEGIN( preprocess(macroblock,preproc) ); } \$\([a-zA-Z0-9_.][a-zA-Z0-9_.]*\) { preproc_macroexpand(); } [(),] { sleighlval.ch = sleightext[0]; return sleightext[0]; } \{ { BEGIN(sem); return sleightext[0]; } [a-zA-Z_.][a-zA-Z0-9_.]* { sleighlval.str = new string(sleightext); return STRING; } [\r\ \t\v]+ \n { slgh->nextLine(); } . { return sleightext[0]; } ^@[^\n]*\n? { slgh->nextLine(); BEGIN( preprocess(defblock,preproc) ); } \$\([a-zA-Z0-9_.][a-zA-Z0-9_.]*\) { preproc_macroexpand(); } [(),=:\[\]] { sleighlval.ch = sleightext[0]; return sleightext[0]; } \; { BEGIN(INITIAL); sleighlval.ch = sleightext[0]; return sleightext[0]; } space { return SPACE_KEY; } type { return TYPE_KEY; } ram_space { return RAM_KEY; } default { return DEFAULT_KEY; } register_space { return REGISTER_KEY; } token { return TOKEN_KEY; } context { return CONTEXT_KEY; } bitrange { return BITRANGE_KEY; } signed { return SIGNED_KEY; } noflow { return NOFLOW_KEY; } hex { return HEX_KEY; } dec { return DEC_KEY; } endian { return ENDIAN_KEY; } alignment { return ALIGN_KEY; } big { return BIG_KEY; } little { return LITTLE_KEY; } size { return SIZE_KEY; } wordsize { return WORDSIZE_KEY; } offset { return OFFSET_KEY; } names { return NAMES_KEY; } values { return VALUES_KEY; } variables { return VARIABLES_KEY; } pcodeop { return PCODEOP_KEY; } #.* [a-zA-Z_.][a-zA-Z0-9_.]* { return find_symbol(); } [0-9]|[1-9][0-9]+ { return scan_number(sleightext,&sleighlval,false); } 0x[0-9a-fA-F]+ { return scan_number(sleightext,&sleighlval,false); } 0b[01]+ { return scan_number(sleightext,&sleighlval,false); } \"([^\"[:cntrl:]]|\"\")*\" { sleighlval.str = new string(sleightext+1,strlen(sleightext)-2); return STRING; } [\r\ \t\v]+ \n { slgh->nextLine(); } . { return sleightext[0]; } ^@[^\n]*\n? { slgh->nextLine(); BEGIN( preprocess(print,preproc) ); } \$\([a-zA-Z0-9_.][a-zA-Z0-9_.]*\) { preproc_macroexpand(); } [~!@#$%&*()\-=+\[\]{}|;:<>?,/0-9] { sleighlval.ch = sleightext[0]; return CHAR; } \^ { sleighlval.ch = '^'; return '^'; } is { BEGIN(pattern); actionon=0; return IS_KEY; } [a-zA-Z_.][a-zA-Z0-9_.]* { sleighlval.str = new string(sleightext); return SYMBOLSTRING; } \"([^\"[:cntrl:]]|\"\")*\" { sleighlval.str = new string(sleightext+1,strlen(sleightext)-2); return STRING; } [\r\ \t\v]+ { sleighlval.ch = ' '; return ' '; } \n { slgh->nextLine(); return ' '; } . { return sleightext[0]; } ^@[^\n]*\n? { slgh->nextLine(); BEGIN( preprocess(pattern,preproc) ); } \$\([a-zA-Z0-9_.][a-zA-Z0-9_.]*\) { preproc_macroexpand(); } \{ { BEGIN((withsection==1) ? INITIAL:sem); withsection=0; sleighlval.ch = sleightext[0]; return sleightext[0]; } unimpl { BEGIN(INITIAL); return OP_UNIMPL; } globalset { return GLOBALSET_KEY; } \>\> { return OP_RIGHT; } \<\< { return OP_LEFT; } \!\= { return OP_NOTEQUAL; } \<\= { return OP_LESSEQUAL; } \>\= { return OP_GREATEQUAL; } \$and { return OP_AND; } \$or { return OP_OR; } \$xor { return OP_XOR; } \.\.\. { return ELLIPSIS_KEY; } \[ { actionon = 1; sleighlval.ch = sleightext[0]; return sleightext[0]; } \] { actionon = 0; sleighlval.ch = sleightext[0]; return sleightext[0]; } \& { sleighlval.ch = sleightext[0]; return (actionon==0) ? sleightext[0] : OP_AND; } \| { sleighlval.ch = sleightext[0]; return (actionon==0) ? sleightext[0] : OP_OR; } \^ { return OP_XOR; } [=(),:;+\-*/~<>] { sleighlval.ch = sleightext[0]; return sleightext[0]; } #.* [a-zA-Z_.][a-zA-Z0-9_.]* { return find_symbol(); } [0-9]|[1-9][0-9]+ { return scan_number(sleightext,&sleighlval,true); } 0x[0-9a-fA-F]+ { return scan_number(sleightext,&sleighlval,true); } 0b[01]+ { return scan_number(sleightext,&sleighlval,true); } [\r\ \t\v]+ \n { slgh->nextLine(); } . { return sleightext[0]; } ^@[^\n]*\n? { slgh->nextLine(); BEGIN( preprocess(sem,preproc) ); } \$\([a-zA-Z0-9_.][a-zA-Z0-9_.]*\) { preproc_macroexpand(); } \} { BEGIN(INITIAL); sleighlval.ch = sleightext[0]; return sleightext[0]; } \|\| { return OP_BOOL_OR; } \&\& { return OP_BOOL_AND; } \^\^ { return OP_BOOL_XOR; } \>\> { return OP_RIGHT; } \<\< { return OP_LEFT; } \=\= { return OP_EQUAL; } \!\= { return OP_NOTEQUAL; } \<\= { return OP_LESSEQUAL; } \>\= { return OP_GREATEQUAL; } s\/ { return OP_SDIV; } s\% { return OP_SREM; } s\>\> { return OP_SRIGHT; } s\< { return OP_SLESS; } s\> { return OP_SGREAT; } s\<\= { return OP_SLESSEQUAL; } s\>\= { return OP_SGREATEQUAL; } f\+ { return OP_FADD; } f\- { return OP_FSUB; } f\* { return OP_FMULT; } f\/ { return OP_FDIV; } f\=\= { return OP_FEQUAL; } f\!\= { return OP_FNOTEQUAL; } f\< { return OP_FLESS; } f\> { return OP_FGREAT; } f\<\= { return OP_FLESSEQUAL; } f\>\= { return OP_FGREATEQUAL; } zext { return OP_ZEXT; } carry { return OP_CARRY; } borrow { return OP_BORROW; } sext { return OP_SEXT; } scarry { return OP_SCARRY; } sborrow { return OP_SBORROW; } nan { return OP_NAN; } abs { return OP_ABS; } sqrt { return OP_SQRT; } ceil { return OP_CEIL; } floor { return OP_FLOOR; } round { return OP_ROUND; } int2float { return OP_INT2FLOAT; } float2float { return OP_FLOAT2FLOAT; } trunc { return OP_TRUNC; } cpool { return OP_CPOOLREF; } newobject { return OP_NEW; } popcount { return OP_POPCOUNT; } lzcount { return OP_LZCOUNT; } if { return IF_KEY; } goto { return GOTO_KEY; } call { return CALL_KEY; } return { return RETURN_KEY; } delayslot { return DELAYSLOT_KEY; } crossbuild { return CROSSBUILD_KEY; } export { return EXPORT_KEY; } build { return BUILD_KEY; } local { return LOCAL_KEY; } [=(),:\[\];!&|^+\-*/%~<>] { sleighlval.ch = sleightext[0]; return sleightext[0]; } #.* [a-zA-Z_.][a-zA-Z0-9_.]* { return find_symbol(); } [0-9]|[1-9][0-9]+ { return scan_number(sleightext,&sleighlval,false); } 0x[0-9a-fA-F]+ { return scan_number(sleightext,&sleighlval,false); } 0b[01]+ { return scan_number(sleightext,&sleighlval,false); } [\r\ \t\v]+ \n { slgh->nextLine(); } . { return sleightext[0]; } ^@.*\n? { slgh->nextLine(); BEGIN( preprocess(preproc,preproc) ); } ^.*\n { slgh->nextLine(); } <> { sleigh_delete_buffer( YY_CURRENT_BUFFER ); if (filebuffers.empty()) yyterminate(); sleigh_switch_to_buffer( filebuffers.back().lastbuffer ); FILE *curfile = filebuffers.back().file; if (curfile != (FILE *)0) fclose(curfile); filebuffers.pop_back(); slgh->parseFileFinished(); } ================================================ FILE: pypcode/sleigh/slghsymbol.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "slghsymbol.hh" #include "sleighbase.hh" #include namespace ghidra { using std::log; SleighSymbol *SymbolScope::addSymbol(SleighSymbol *a) { pair res; res = tree.insert( a ); if (!res.second) return *res.first; // Symbol already exists in this table return a; } SleighSymbol *SymbolScope::findSymbol(const string &nm) const { SleighSymbol dummy(nm); SymbolTree::const_iterator iter; iter = tree.find( &dummy ); if (iter != tree.end()) return *iter; return (SleighSymbol *)0; } SymbolTable::~SymbolTable(void) { vector::iterator iter; for(iter=table.begin();iter!=table.end();++iter) delete *iter; vector::iterator siter; for(siter=symbollist.begin();siter!=symbollist.end();++siter) delete *siter; } void SymbolTable::addScope(void) { curscope = new SymbolScope(curscope,table.size()); table.push_back(curscope); } void SymbolTable::popScope(void) { if (curscope != (SymbolScope *)0) curscope = curscope->getParent(); } SymbolScope *SymbolTable::skipScope(int4 i) const { SymbolScope *res = curscope; while(i>0) { if (res->parent == (SymbolScope *)0) return res; res = res->parent; --i; } return res; } void SymbolTable::addGlobalSymbol(SleighSymbol *a) { a->id = symbollist.size(); symbollist.push_back(a); SymbolScope *scope = getGlobalScope(); a->scopeid = scope->getId(); SleighSymbol *res = scope->addSymbol(a); if (res != a) throw SleighError("Duplicate symbol name '" + a->getName() + "'"); } void SymbolTable::addSymbol(SleighSymbol *a) { a->id = symbollist.size(); symbollist.push_back(a); a->scopeid = curscope->getId(); SleighSymbol *res = curscope->addSymbol(a); if (res != a) throw SleighError("Duplicate symbol name: "+a->getName()); } SleighSymbol *SymbolTable::findSymbolInternal(SymbolScope *scope,const string &nm) const { SleighSymbol *res; while(scope != (SymbolScope *)0) { res = scope->findSymbol(nm); if (res != (SleighSymbol *)0) return res; scope = scope->getParent(); // Try higher scope } return (SleighSymbol *)0; } void SymbolTable::replaceSymbol(SleighSymbol *a,SleighSymbol *b) { // Replace symbol a with symbol b // assuming a and b have the same name SleighSymbol *sym; int4 i = table.size()-1; while(i>=0) { // Find the particular symbol sym = table[i]->findSymbol( a->getName() ); if (sym == a) { table[i]->removeSymbol(a); b->id = a->id; b->scopeid = a->scopeid; symbollist[b->id] = b; table[i]->addSymbol(b); delete a; return; } --i; } } void SymbolTable::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_SYMBOL_TABLE); encoder.writeSignedInteger(sla::ATTRIB_SCOPESIZE, table.size()); encoder.writeSignedInteger(sla::ATTRIB_SYMBOLSIZE, symbollist.size()); for(int4 i=0;igetId()); if (table[i]->getParent() == (SymbolScope *)0) encoder.writeUnsignedInteger(sla::ATTRIB_PARENT, 0); else encoder.writeUnsignedInteger(sla::ATTRIB_PARENT, table[i]->getParent()->getId()); encoder.closeElement(sla::ELEM_SCOPE); } // First save the headers for(int4 i=0;iencodeHeader(encoder); // Now save the content of each symbol for(int4 i=0;iencode(encoder); encoder.closeElement(sla::ELEM_SYMBOL_TABLE); } void SymbolTable::decode(Decoder &decoder,SleighBase *trans) { int4 el = decoder.openElement(sla::ELEM_SYMBOL_TABLE); table.resize(decoder.readSignedInteger(sla::ATTRIB_SCOPESIZE), (SymbolScope *)0); symbollist.resize(decoder.readSignedInteger(sla::ATTRIB_SYMBOLSIZE), (SleighSymbol *)0); for(int4 i=0;idecode(decoder,trans); // Tag closed by decode method // decoder.closeElement(subel); } decoder.closeElement(el); } void SymbolTable::decodeSymbolHeader(Decoder &decoder) { // Put the shell of a symbol in the symbol table // in order to allow recursion SleighSymbol *sym; uint4 el = decoder.peekElement(); if (el == sla::ELEM_USEROP_HEAD) sym = new UserOpSymbol(); else if (el == sla::ELEM_EPSILON_SYM_HEAD) sym = new EpsilonSymbol(); else if (el == sla::ELEM_VALUE_SYM_HEAD) sym = new ValueSymbol(); else if (el == sla::ELEM_VALUEMAP_SYM_HEAD) sym = new ValueMapSymbol(); else if (el == sla::ELEM_NAME_SYM_HEAD) sym = new NameSymbol(); else if (el == sla::ELEM_VARNODE_SYM_HEAD) sym = new VarnodeSymbol(); else if (el == sla::ELEM_CONTEXT_SYM_HEAD) sym = new ContextSymbol(); else if (el == sla::ELEM_VARLIST_SYM_HEAD) sym = new VarnodeListSymbol(); else if (el == sla::ELEM_OPERAND_SYM_HEAD) sym = new OperandSymbol(); else if (el == sla::ELEM_START_SYM_HEAD) sym = new StartSymbol(); else if (el == sla::ELEM_END_SYM_HEAD) sym = new EndSymbol(); else if (el == sla::ELEM_NEXT2_SYM_HEAD) sym = new Next2Symbol(); else if (el == sla::ELEM_SUBTABLE_SYM_HEAD) sym = new SubtableSymbol(); else throw SleighError("Bad symbol xml"); sym->decodeHeader(decoder); // Restore basic elements of symbol symbollist[sym->id] = sym; // Put the basic symbol in the table table[sym->scopeid]->addSymbol(sym); // to allow recursion } void SymbolTable::purge(void) { // Get rid of unsavable symbols and scopes SleighSymbol *sym; for(int4 i=0;iscopeid != 0) { // Not in global scope if (sym->getType() == SleighSymbol::operand_symbol) continue; } else { switch(sym->getType()) { case SleighSymbol::space_symbol: case SleighSymbol::token_symbol: case SleighSymbol::epsilon_symbol: case SleighSymbol::section_symbol: case SleighSymbol::bitrange_symbol: break; case SleighSymbol::macro_symbol: { // Delete macro's local symbols MacroSymbol *macro = (MacroSymbol *)sym; for(int4 i=0;igetNumOperands();++i) { SleighSymbol *opersym = macro->getOperand(i); table[opersym->scopeid]->removeSymbol(opersym); symbollist[opersym->id] = (SleighSymbol *)0; delete opersym; } break; } case SleighSymbol::subtable_symbol: { // Delete unused subtables SubtableSymbol *subsym = (SubtableSymbol *)sym; if (subsym->getPattern() != (TokenPattern *)0) continue; for(int4 i=0;igetNumConstructors();++i) { // Go thru each constructor Constructor *con = subsym->getConstructor(i); for(int4 j=0;jgetNumOperands();++j) { // Go thru each operand OperandSymbol *oper = con->getOperand(j); table[oper->scopeid]->removeSymbol(oper); symbollist[oper->id] = (SleighSymbol *)0; delete oper; } } break; // Remove the subtable symbol itself } default: continue; } } table[sym->scopeid]->removeSymbol(sym); // Remove the symbol symbollist[i] = (SleighSymbol *)0; delete sym; } for(int4 i=1;itree.empty()) { delete table[i]; table[i] = (SymbolScope *)0; } } renumber(); } void SymbolTable::renumber(void) { // Renumber all the scopes and symbols // so that there are no gaps vector newtable; vector newsymbol; // First renumber the scopes SymbolScope *scope; for(int4 i=0;iid = newtable.size(); newtable.push_back(scope); } } // Now renumber the symbols SleighSymbol *sym; for(int4 i=0;iscopeid = table[sym->scopeid]->id; sym->id = newsymbol.size(); newsymbol.push_back(sym); } } table = newtable; symbollist = newsymbol; } void SleighSymbol::encodeHeader(Encoder &encoder) const { // Save the basic attributes of a symbol encoder.writeString(sla::ATTRIB_NAME, name); encoder.writeUnsignedInteger(sla::ATTRIB_ID, id); encoder.writeUnsignedInteger(sla::ATTRIB_SCOPE, scopeid); } void SleighSymbol::decodeHeader(Decoder &decoder) { uint4 el = decoder.openElement(); name = decoder.readString(sla::ATTRIB_NAME); id = decoder.readUnsignedInteger(sla::ATTRIB_ID); scopeid = decoder.readUnsignedInteger(sla::ATTRIB_SCOPE); decoder.closeElement(el); } void SleighSymbol::encode(Encoder &encoder) const { throw LowlevelError("Symbol "+name+" cannot be encoded to stream directly"); } void SleighSymbol::decode(Decoder &decoder,SleighBase *trans) { throw LowlevelError("Symbol "+name+" cannot be decoded from stream directly"); } void UserOpSymbol::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_USEROP); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); encoder.writeSignedInteger(sla::ATTRIB_INDEX, index); encoder.closeElement(sla::ELEM_USEROP); } void UserOpSymbol::encodeHeader(Encoder &encoder) const { encoder.openElement(sla::ELEM_USEROP_HEAD); SleighSymbol::encodeHeader(encoder); encoder.closeElement(sla::ELEM_USEROP_HEAD); } void UserOpSymbol::decode(Decoder &decoder,SleighBase *trans) { index = decoder.readSignedInteger(sla::ATTRIB_INDEX); decoder.closeElement(sla::ELEM_USEROP.getId()); } PatternlessSymbol::PatternlessSymbol(void) { // The void constructor must explicitly build the ConstantValue. It is not decode (or encoded) patexp = new ConstantValue((intb)0); patexp->layClaim(); } PatternlessSymbol::PatternlessSymbol(const string &nm) : SpecificSymbol(nm) { patexp = new ConstantValue((intb)0); patexp->layClaim(); } PatternlessSymbol::~PatternlessSymbol(void) { PatternExpression::release(patexp); } void EpsilonSymbol::getFixedHandle(FixedHandle &hand,ParserWalker &walker) const { hand.space = const_space; hand.offset_space = (AddrSpace *)0; // Not a dynamic value hand.offset_offset = 0; hand.size = 0; // Cannot provide size } void EpsilonSymbol::print(ostream &s,ParserWalker &walker) const { s << '0'; } VarnodeTpl *EpsilonSymbol::getVarnode(void) const { VarnodeTpl *res = new VarnodeTpl(ConstTpl(const_space), ConstTpl(ConstTpl::real,0), ConstTpl(ConstTpl::real,0)); return res; } void EpsilonSymbol::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_EPSILON_SYM); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); encoder.closeElement(sla::ELEM_EPSILON_SYM); } void EpsilonSymbol::encodeHeader(Encoder &encoder) const { encoder.openElement(sla::ELEM_EPSILON_SYM_HEAD); SleighSymbol::encodeHeader(encoder); encoder.closeElement(sla::ELEM_EPSILON_SYM_HEAD); } void EpsilonSymbol::decode(Decoder &decoder,SleighBase *trans) { const_space = trans->getConstantSpace(); decoder.closeElement(sla::ELEM_EPSILON_SYM.getId()); } ValueSymbol::ValueSymbol(const string &nm,PatternValue *pv) : FamilySymbol(nm) { (patval=pv)->layClaim(); } ValueSymbol::~ValueSymbol(void) { if (patval != (PatternValue *)0) PatternExpression::release(patval); } void ValueSymbol::getFixedHandle(FixedHandle &hand,ParserWalker &walker) const { hand.space = walker.getConstSpace(); hand.offset_space = (AddrSpace *)0; hand.offset_offset = (uintb) patval->getValue(walker); hand.size = 0; // Cannot provide size } void ValueSymbol::print(ostream &s,ParserWalker &walker) const { intb val = patval->getValue(walker); if (val >= 0) s << "0x" << hex << val; else s << "-0x" << hex << -val; } void ValueSymbol::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_VALUE_SYM); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); patval->encode(encoder); encoder.closeElement(sla::ELEM_VALUE_SYM); } void ValueSymbol::encodeHeader(Encoder &encoder) const { encoder.openElement(sla::ELEM_VALUE_SYM_HEAD); SleighSymbol::encodeHeader(encoder); encoder.closeElement(sla::ELEM_VALUE_SYM_HEAD); } void ValueSymbol::decode(Decoder &decoder,SleighBase *trans) { patval = (PatternValue *) PatternExpression::decodeExpression(decoder,trans); patval->layClaim(); decoder.closeElement(sla::ELEM_VALUE_SYM.getId()); } void ValueMapSymbol::checkTableFill(void) { // Check if all possible entries in the table have been filled intb min = patval->minValue(); intb max = patval->maxValue(); tableisfilled = (min>=0)&&(maxgetValue(walker); if ((ind >= valuetable.size())||(ind<0)||(valuetable[ind] == 0xBADBEEF)) { ostringstream s; s << walker.getAddr().getShortcut(); walker.getAddr().printRaw(s); s << ": No corresponding entry in valuetable"; throw BadDataError(s.str()); } } return (Constructor *)0; } void ValueMapSymbol::getFixedHandle(FixedHandle &hand,ParserWalker &walker) const { uint4 ind = (uint4) patval->getValue(walker); // The resolve routine has checked that -ind- must be a valid index hand.space = walker.getConstSpace(); hand.offset_space = (AddrSpace *)0; // Not a dynamic value hand.offset_offset = (uintb)valuetable[ind]; hand.size = 0; // Cannot provide size } void ValueMapSymbol::print(ostream &s,ParserWalker &walker) const { uint4 ind = (uint4)patval->getValue(walker); // ind is already checked to be in range by the resolve routine intb val = valuetable[ind]; if (val >= 0) s << "0x" << hex << val; else s << "-0x" << hex << -val; } void ValueMapSymbol::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_VALUEMAP_SYM); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); patval->encode(encoder); for(uint4 i=0;ilayClaim(); while(decoder.peekElement() != 0) { uint4 subel = decoder.openElement(); intb val = decoder.readSignedInteger(sla::ATTRIB_VAL); valuetable.push_back(val); decoder.closeElement(subel); } decoder.closeElement(sla::ELEM_VALUEMAP_SYM.getId()); checkTableFill(); } void NameSymbol::checkTableFill(void) { // Check if all possible entries in the table have been filled intb min = patval->minValue(); intb max = patval->maxValue(); tableisfilled = (min>=0)&&(maxgetValue(walker); if ((ind >= nametable.size())||(ind<0)||((nametable[ind].size()==1)&&(nametable[ind][0]=='\t'))) { ostringstream s; s << walker.getAddr().getShortcut(); walker.getAddr().printRaw(s); s << ": No corresponding entry in nametable"; throw BadDataError(s.str()); } } return (Constructor *)0; } void NameSymbol::print(ostream &s,ParserWalker &walker) const { uint4 ind = (uint4)patval->getValue(walker); // ind is already checked to be in range by the resolve routine s << nametable[ind]; } void NameSymbol::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_NAME_SYM); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); patval->encode(encoder); for(int4 i=0;ilayClaim(); while(decoder.peekElement() != 0) { uint4 subel = decoder.openElement(); if (decoder.getNextAttributeId() == sla::ATTRIB_NAME) nametable.push_back(decoder.readString()); else nametable.push_back("\t"); // TAB indicates an illegal index decoder.closeElement(subel); } decoder.closeElement(sla::ELEM_NAME_SYM.getId()); checkTableFill(); } VarnodeSymbol::VarnodeSymbol(const string &nm,AddrSpace *base,uintb offset,int4 size) : PatternlessSymbol(nm) { fix.space = base; fix.offset = offset; fix.size = size; context_bits = false; } VarnodeTpl *VarnodeSymbol::getVarnode(void) const { return new VarnodeTpl(ConstTpl(fix.space),ConstTpl(ConstTpl::real,fix.offset),ConstTpl(ConstTpl::real,fix.size)); } void VarnodeSymbol::getFixedHandle(FixedHandle &hand,ParserWalker &walker) const { hand.space = fix.space; hand.offset_space = (AddrSpace *)0; // Not a dynamic symbol hand.offset_offset = fix.offset; hand.size = fix.size; } void VarnodeSymbol::collectLocalValues(vector &results) const { if (fix.space->getType() == IPTR_INTERNAL) results.push_back(fix.offset); } void VarnodeSymbol::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_VARNODE_SYM); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); encoder.writeSpace(sla::ATTRIB_SPACE,fix.space); encoder.writeUnsignedInteger(sla::ATTRIB_OFF, fix.offset); encoder.writeSignedInteger(sla::ATTRIB_SIZE, fix.size); encoder.closeElement(sla::ELEM_VARNODE_SYM); } void VarnodeSymbol::encodeHeader(Encoder &encoder) const { encoder.openElement(sla::ELEM_VARNODE_SYM_HEAD); SleighSymbol::encodeHeader(encoder); encoder.closeElement(sla::ELEM_VARNODE_SYM_HEAD); } void VarnodeSymbol::decode(Decoder &decoder,SleighBase *trans) { fix.space = decoder.readSpace(sla::ATTRIB_SPACE); fix.offset = decoder.readUnsignedInteger(sla::ATTRIB_OFF); fix.size = decoder.readSignedInteger(sla::ATTRIB_SIZE); // PatternlessSymbol does not need restoring decoder.closeElement(sla::ELEM_VARNODE_SYM.getId()); } ContextSymbol::ContextSymbol(const string &nm,ContextField *pate,VarnodeSymbol *v, uint4 l,uint4 h,bool fl) : ValueSymbol(nm,pate) { vn = v; low = l; high = h; flow = fl; } void ContextSymbol::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_CONTEXT_SYM); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); encoder.writeUnsignedInteger(sla::ATTRIB_VARNODE, vn->getId()); encoder.writeSignedInteger(sla::ATTRIB_LOW, low); encoder.writeSignedInteger(sla::ATTRIB_HIGH, high); encoder.writeBool(sla::ATTRIB_FLOW, flow); patval->encode(encoder); encoder.closeElement(sla::ELEM_CONTEXT_SYM); } void ContextSymbol::encodeHeader(Encoder &encoder) const { encoder.openElement(sla::ELEM_CONTEXT_SYM_HEAD); SleighSymbol::encodeHeader(encoder); encoder.closeElement(sla::ELEM_CONTEXT_SYM_HEAD); } void ContextSymbol::decode(Decoder &decoder,SleighBase *trans) { // SleighSymbol::decodeHeader(decoder); // Already filled in by the header tag flow = false; bool highMissing = true; bool lowMissing = true; uint4 attrib = decoder.getNextAttributeId(); while(attrib != 0) { if (attrib == sla::ATTRIB_VARNODE) { uintm id = decoder.readUnsignedInteger(); vn = (VarnodeSymbol *)trans->findSymbol(id); } else if (attrib == sla::ATTRIB_LOW) { low = decoder.readSignedInteger(); lowMissing = false; } else if (attrib == sla::ATTRIB_HIGH) { high = decoder.readSignedInteger(); highMissing = false; } else if (attrib == sla::ATTRIB_FLOW) { flow = decoder.readBool(); } attrib = decoder.getNextAttributeId(); } if (lowMissing || highMissing) { throw DecoderError("Missing high/low attributes"); } patval = (PatternValue *) PatternExpression::decodeExpression(decoder,trans); patval->layClaim(); decoder.closeElement(sla::ELEM_CONTEXT_SYM.getId()); } VarnodeListSymbol::VarnodeListSymbol(const string &nm,PatternValue *pv,const vector &vt) : ValueSymbol(nm,pv) { for(int4 i=0;iminValue(); intb max = patval->maxValue(); tableisfilled = (min>=0)&&(maxgetValue(walker); if ((ind<0)||(ind>=varnode_table.size())||(varnode_table[ind]==(VarnodeSymbol *)0)) { ostringstream s; s << walker.getAddr().getShortcut(); walker.getAddr().printRaw(s); s << ": No corresponding entry in varnode list"; throw BadDataError(s.str()); } } return (Constructor *)0; } void VarnodeListSymbol::getFixedHandle(FixedHandle &hand,ParserWalker &walker) const { uint4 ind = (uint4) patval->getValue(walker); // The resolve routine has checked that -ind- must be a valid index const VarnodeData &fix( varnode_table[ind]->getFixedVarnode() ); hand.space = fix.space; hand.offset_space = (AddrSpace *)0; // Not a dynamic value hand.offset_offset = fix.offset; hand.size = fix.size; } int4 VarnodeListSymbol::getSize(void) const { for(int4 i=0;igetSize(); } throw SleighError("No register attached to: "+getName()); } void VarnodeListSymbol::print(ostream &s,ParserWalker &walker) const { uint4 ind = (uint4)patval->getValue(walker); if (ind >= varnode_table.size()) throw SleighError("Value out of range for varnode table"); s << varnode_table[ind]->getName(); } void VarnodeListSymbol::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_VARLIST_SYM); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); patval->encode(encoder); for(int4 i=0;igetId()); encoder.closeElement(sla::ELEM_VAR); } } encoder.closeElement(sla::ELEM_VARLIST_SYM); } void VarnodeListSymbol::encodeHeader(Encoder &encoder) const { encoder.openElement(sla::ELEM_VARLIST_SYM_HEAD); SleighSymbol::encodeHeader(encoder); encoder.closeElement(sla::ELEM_VARLIST_SYM_HEAD); } void VarnodeListSymbol::decode(Decoder &decoder,SleighBase *trans) { patval = (PatternValue *) PatternExpression::decodeExpression(decoder,trans); patval->layClaim(); while(decoder.peekElement() != 0) { uint4 subel = decoder.openElement(); if (subel == sla::ELEM_VAR) { uintm id = decoder.readUnsignedInteger(sla::ATTRIB_ID); varnode_table.push_back( (VarnodeSymbol *)trans->findSymbol(id) ); } else varnode_table.push_back( (VarnodeSymbol *)0 ); decoder.closeElement(subel); } decoder.closeElement(sla::ELEM_VARLIST_SYM.getId()); checkTableFill(); } OperandSymbol::OperandSymbol(const string &nm,int4 index,Constructor *ct) : SpecificSymbol(nm) { flags = 0; hand = index; localexp = new OperandValue(index,ct); localexp->layClaim(); defexp = (PatternExpression *)0; triple = (TripleSymbol *)0; } void OperandSymbol::defineOperand(PatternExpression *pe) { if ((defexp != (PatternExpression *)0)||(triple!=(TripleSymbol *)0)) throw SleighError("Redefining operand"); defexp = pe; defexp->layClaim(); } void OperandSymbol::defineOperand(TripleSymbol *tri) { if ((defexp != (PatternExpression *)0)||(triple!=(TripleSymbol *)0)) throw SleighError("Redefining operand"); triple = tri; } OperandSymbol::~OperandSymbol(void) { PatternExpression::release(localexp); if (defexp != (PatternExpression *)0) PatternExpression::release(defexp); } VarnodeTpl *OperandSymbol::getVarnode(void) const { VarnodeTpl *res; if (defexp != (PatternExpression *)0) res = new VarnodeTpl(hand,true); // Definite constant handle else { SpecificSymbol *specsym = dynamic_cast(triple); if (specsym != (SpecificSymbol *)0) res = specsym->getVarnode(); else if ((triple != (TripleSymbol *)0)&& ((triple->getType() == valuemap_symbol)||(triple->getType() == name_symbol))) res = new VarnodeTpl(hand,true); // Zero-size symbols else res = new VarnodeTpl(hand,false); // Possible dynamic handle } return res; } void OperandSymbol::getFixedHandle(FixedHandle &hnd,ParserWalker &walker) const { hnd = walker.getFixedHandle(hand); } int4 OperandSymbol::getSize(void) const { if (triple != (TripleSymbol *)0) return triple->getSize(); return 0; } void OperandSymbol::print(ostream &s,ParserWalker &walker) const { walker.pushOperand(getIndex()); if (triple != (TripleSymbol *)0) { if (triple->getType() == SleighSymbol::subtable_symbol) walker.getConstructor()->print(s,walker); else triple->print(s,walker); } else { intb val = defexp->getValue(walker); if (val >= 0) s << "0x" << hex << val; else s << "-0x" << hex << -val; } walker.popOperand(); } void OperandSymbol::collectLocalValues(vector &results) const { if (triple != (TripleSymbol *)0) triple->collectLocalValues(results); } void OperandSymbol::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_OPERAND_SYM); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); if (triple != (TripleSymbol *)0) encoder.writeUnsignedInteger(sla::ATTRIB_SUBSYM, triple->getId()); encoder.writeSignedInteger(sla::ATTRIB_OFF, reloffset); encoder.writeSignedInteger(sla::ATTRIB_BASE, offsetbase); encoder.writeSignedInteger(sla::ATTRIB_MINLEN, minimumlength); if (isCodeAddress()) encoder.writeBool(sla::ATTRIB_CODE, true); encoder.writeSignedInteger(sla::ATTRIB_INDEX, hand); localexp->encode(encoder); if (defexp != (PatternExpression *)0) defexp->encode(encoder); encoder.closeElement(sla::ELEM_OPERAND_SYM); } void OperandSymbol::encodeHeader(Encoder &encoder) const { encoder.openElement(sla::ELEM_OPERAND_SYM_HEAD); SleighSymbol::encodeHeader(encoder); encoder.closeElement(sla::ELEM_OPERAND_SYM_HEAD); } void OperandSymbol::decode(Decoder &decoder,SleighBase *trans) { defexp = (PatternExpression *)0; triple = (TripleSymbol *)0; flags = 0; uint4 attrib = decoder.getNextAttributeId(); while(attrib != 0) { attrib = decoder.getNextAttributeId(); if (attrib == sla::ATTRIB_INDEX) hand = decoder.readSignedInteger(); else if (attrib == sla::ATTRIB_OFF) reloffset = decoder.readSignedInteger(); else if (attrib == sla::ATTRIB_BASE) offsetbase = decoder.readSignedInteger(); else if (attrib == sla::ATTRIB_MINLEN) minimumlength = decoder.readSignedInteger(); else if (attrib == sla::ATTRIB_SUBSYM) { uintm id = decoder.readUnsignedInteger(); triple = (TripleSymbol *)trans->findSymbol(id); } else if (attrib == sla::ATTRIB_CODE) { if (decoder.readBool()) flags |= code_address; } } localexp = (OperandValue *)PatternExpression::decodeExpression(decoder,trans); localexp->layClaim(); if (decoder.peekElement() != 0) { defexp = PatternExpression::decodeExpression(decoder,trans); defexp->layClaim(); } decoder.closeElement(sla::ELEM_OPERAND_SYM.getId()); } StartSymbol::StartSymbol(const string &nm,AddrSpace *cspc) : SpecificSymbol(nm) { const_space = cspc; patexp = new StartInstructionValue(); patexp->layClaim(); } StartSymbol::~StartSymbol(void) { if (patexp != (PatternExpression *)0) PatternExpression::release(patexp); } VarnodeTpl *StartSymbol::getVarnode(void) const { // Returns current instruction offset as a constant ConstTpl spc(const_space); ConstTpl off(ConstTpl::j_start); ConstTpl sz_zero; return new VarnodeTpl(spc,off,sz_zero); } void StartSymbol::getFixedHandle(FixedHandle &hand,ParserWalker &walker) const { hand.space = walker.getCurSpace(); hand.offset_space = (AddrSpace *)0; hand.offset_offset = walker.getAddr().getOffset(); // Get starting address of instruction hand.size = hand.space->getAddrSize(); } void StartSymbol::print(ostream &s,ParserWalker &walker) const { intb val = (intb) walker.getAddr().getOffset(); s << "0x" << hex << val; } void StartSymbol::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_START_SYM); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); encoder.closeElement(sla::ELEM_START_SYM); } void StartSymbol::encodeHeader(Encoder &encoder) const { encoder.openElement(sla::ELEM_START_SYM_HEAD); SleighSymbol::encodeHeader(encoder); encoder.closeElement(sla::ELEM_START_SYM_HEAD); } void StartSymbol::decode(Decoder &decoder,SleighBase *trans) { const_space = trans->getConstantSpace(); patexp = new StartInstructionValue(); patexp->layClaim(); decoder.closeElement(sla::ELEM_START_SYM.getId()); } EndSymbol::EndSymbol(const string &nm,AddrSpace *cspc) : SpecificSymbol(nm) { const_space = cspc; patexp = new EndInstructionValue(); patexp->layClaim(); } EndSymbol::~EndSymbol(void) { if (patexp != (PatternExpression *)0) PatternExpression::release(patexp); } VarnodeTpl *EndSymbol::getVarnode(void) const { // Return next instruction offset as a constant ConstTpl spc(const_space); ConstTpl off(ConstTpl::j_next); ConstTpl sz_zero; return new VarnodeTpl(spc,off,sz_zero); } void EndSymbol::getFixedHandle(FixedHandle &hand,ParserWalker &walker) const { hand.space = walker.getCurSpace(); hand.offset_space = (AddrSpace *)0; hand.offset_offset = walker.getNaddr().getOffset(); // Get starting address of next instruction hand.size = hand.space->getAddrSize(); } void EndSymbol::print(ostream &s,ParserWalker &walker) const { intb val = (intb) walker.getNaddr().getOffset(); s << "0x" << hex << val; } void EndSymbol::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_END_SYM); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); encoder.closeElement(sla::ELEM_END_SYM); } void EndSymbol::encodeHeader(Encoder &encoder) const { encoder.openElement(sla::ELEM_END_SYM_HEAD); SleighSymbol::encodeHeader(encoder); encoder.closeElement(sla::ELEM_END_SYM_HEAD); } void EndSymbol::decode(Decoder &decoder,SleighBase *trans) { const_space = trans->getConstantSpace(); patexp = new EndInstructionValue(); patexp->layClaim(); decoder.closeElement(sla::ELEM_END_SYM.getId()); } Next2Symbol::Next2Symbol(const string &nm,AddrSpace *cspc) : SpecificSymbol(nm) { const_space = cspc; patexp = new Next2InstructionValue(); patexp->layClaim(); } Next2Symbol::~Next2Symbol(void) { if (patexp != (PatternExpression *)0) PatternExpression::release(patexp); } VarnodeTpl *Next2Symbol::getVarnode(void) const { // Return instruction offset after next instruction offset as a constant ConstTpl spc(const_space); ConstTpl off(ConstTpl::j_next2); ConstTpl sz_zero; return new VarnodeTpl(spc,off,sz_zero); } void Next2Symbol::getFixedHandle(FixedHandle &hand,ParserWalker &walker) const { hand.space = walker.getCurSpace(); hand.offset_space = (AddrSpace *)0; hand.offset_offset = walker.getN2addr().getOffset(); // Get instruction address after next instruction hand.size = hand.space->getAddrSize(); } void Next2Symbol::print(ostream &s,ParserWalker &walker) const { intb val = (intb) walker.getN2addr().getOffset(); s << "0x" << hex << val; } void Next2Symbol::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_NEXT2_SYM); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); encoder.closeElement(sla::ELEM_NEXT2_SYM); } void Next2Symbol::encodeHeader(Encoder &encoder) const { encoder.openElement(sla::ELEM_NEXT2_SYM_HEAD); SleighSymbol::encodeHeader(encoder); encoder.closeElement(sla::ELEM_NEXT2_SYM_HEAD); } void Next2Symbol::decode(Decoder &decoder,SleighBase *trans) { const_space = trans->getConstantSpace(); patexp = new Next2InstructionValue(); patexp->layClaim(); decoder.closeElement(sla::ELEM_NEXT2_SYM.getId()); } FlowDestSymbol::FlowDestSymbol(const string &nm,AddrSpace *cspc) : SpecificSymbol(nm) { const_space = cspc; } VarnodeTpl *FlowDestSymbol::getVarnode(void) const { ConstTpl spc(const_space); ConstTpl off(ConstTpl::j_flowdest); ConstTpl sz_zero; return new VarnodeTpl(spc,off,sz_zero); } void FlowDestSymbol::getFixedHandle(FixedHandle &hand,ParserWalker &walker) const { Address refAddr = walker.getDestAddr(); hand.space = const_space; hand.offset_space = (AddrSpace *)0; hand.offset_offset = refAddr.getOffset(); hand.size = refAddr.getAddrSize(); } void FlowDestSymbol::print(ostream &s,ParserWalker &walker) const { intb val = (intb) walker.getDestAddr().getOffset(); s << "0x" << hex << val; } FlowRefSymbol::FlowRefSymbol(const string &nm,AddrSpace *cspc) : SpecificSymbol(nm) { const_space = cspc; } VarnodeTpl *FlowRefSymbol::getVarnode(void) const { ConstTpl spc(const_space); ConstTpl off(ConstTpl::j_flowref); ConstTpl sz_zero; return new VarnodeTpl(spc,off,sz_zero); } void FlowRefSymbol::getFixedHandle(FixedHandle &hand,ParserWalker &walker) const { Address refAddr = walker.getRefAddr(); hand.space = const_space; hand.offset_space = (AddrSpace *)0; hand.offset_offset = refAddr.getOffset(); hand.size = refAddr.getAddrSize(); } void FlowRefSymbol::print(ostream &s,ParserWalker &walker) const { intb val = (intb) walker.getRefAddr().getOffset(); s << "0x" << hex << val; } Constructor::Constructor(void) { pattern = (TokenPattern *)0; parent = (SubtableSymbol *)0; pateq = (PatternEquation *)0; templ = (ConstructTpl *)0; firstwhitespace = -1; flowthruindex = -1; inerror = false; } Constructor::Constructor(SubtableSymbol *p) { pattern = (TokenPattern *)0; parent = p; pateq = (PatternEquation *)0; templ = (ConstructTpl *)0; firstwhitespace = -1; inerror = false; } Constructor::~Constructor(void) { if (pattern != (TokenPattern *)0) delete pattern; if (pateq != (PatternEquation *)0) PatternEquation::release(pateq); if (templ != (ConstructTpl *)0) delete templ; for(int4 i=0;i::iterator iter; for(iter=context.begin();iter!=context.end();++iter) delete *iter; } void Constructor::addInvisibleOperand(OperandSymbol *sym) { operands.push_back(sym); } void Constructor::addOperand(OperandSymbol *sym) { string operstring = "\n "; // Indicater character for operand operstring[1] = ('A'+operands.size()); // Encode index of operand operands.push_back(sym); printpiece.push_back(operstring); // Placeholder for operand's string } void Constructor::addSyntax(const string &syn) { string syntrim; if (syn.size() == 0) return; bool hasNonSpace = false; for(int4 i=0;ilayClaim(); } void Constructor::setNamedSection(ConstructTpl *tpl,int4 id) { // Add a named section to the constructor while(namedtempl.size() <= id) namedtempl.push_back((ConstructTpl *)0); namedtempl[id] = tpl; } ConstructTpl *Constructor::getNamedTempl(int4 secnum) const { if (secnum < namedtempl.size()) return namedtempl[secnum]; return (ConstructTpl *)0; } void Constructor::print(ostream &s,ParserWalker &walker) const { vector::const_iterator piter; for(piter=printpiece.begin();piter!=printpiece.end();++piter) { if ((*piter)[0] == '\n') { int4 index = (*piter)[1]-'A'; operands[index]->print(s,walker); } else s << *piter; } } void Constructor::printMnemonic(ostream &s,ParserWalker &walker) const { if (flowthruindex != -1) { SubtableSymbol *sym = dynamic_cast(operands[flowthruindex]->getDefiningSymbol()); if (sym != (SubtableSymbol *)0) { walker.pushOperand(flowthruindex); walker.getConstructor()->printMnemonic(s,walker); walker.popOperand(); return; } } int4 endind = (firstwhitespace==-1) ? printpiece.size() : firstwhitespace; for(int4 i=0;iprint(s,walker); } else s << printpiece[i]; } } void Constructor::printBody(ostream &s,ParserWalker &walker) const { if (flowthruindex != -1) { SubtableSymbol *sym = dynamic_cast(operands[flowthruindex]->getDefiningSymbol()); if (sym != (SubtableSymbol *)0) { walker.pushOperand(flowthruindex); walker.getConstructor()->printBody(s,walker); walker.popOperand(); return; } } if (firstwhitespace == -1) return; // Nothing to print after firstwhitespace for(int4 i=firstwhitespace+1;iprint(s,walker); } else s << printpiece[i]; } } void Constructor::removeTrailingSpace(void) { // Allow for user to force extra space at end of printing if ((!printpiece.empty())&&(printpiece.back()==" ")) printpiece.pop_back(); // while((!printpiece.empty())&&(printpiece.back()==" ")) // printpiece.pop_back(); } void Constructor::markSubtableOperands(vector &check) const { // Adjust -check- so it has one entry for every operand, a 0 if it is a subtable, a 2 if it is not check.resize(operands.size()); for(int4 i=0;igetDefiningSymbol(); if ((sym != (TripleSymbol *)0)&&(sym->getType() == SleighSymbol::subtable_symbol)) check[i] = 0; else check[i] = 2; } } void Constructor::collectLocalExports(vector &results) const { if (templ == (ConstructTpl *)0) return; HandleTpl *handle = templ->getResult(); if (handle == (HandleTpl *)0) return; if (handle->getSpace().isConstSpace()) return; // Even if the value is dynamic, the pointed to value won't get used if (handle->getPtrSpace().getType() != ConstTpl::real) { if (handle->getTempSpace().isUniqueSpace()) results.push_back(handle->getTempOffset().getReal()); return; } if (handle->getSpace().isUniqueSpace()) { results.push_back(handle->getPtrOffset().getReal()); return; } if (handle->getSpace().getType() == ConstTpl::handle) { int4 handleIndex = handle->getSpace().getHandleIndex(); OperandSymbol *opSym = getOperand(handleIndex); opSym->collectLocalValues(results); } } bool Constructor::isRecursive(void) const { // Does this constructor cause recursion with its table for(int4 i=0;igetDefiningSymbol(); if (sym == parent) return true; } return false; } void Constructor::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_CONSTRUCTOR); encoder.writeUnsignedInteger(sla::ATTRIB_PARENT, parent->getId()); encoder.writeSignedInteger(sla::ATTRIB_FIRST, firstwhitespace); encoder.writeSignedInteger(sla::ATTRIB_LENGTH, minimumlength); encoder.writeSignedInteger(sla::ATTRIB_SOURCE, src_index); encoder.writeSignedInteger(sla::ATTRIB_LINE, lineno); for(int4 i=0;igetId()); encoder.closeElement(sla::ELEM_OPER); } for(int4 i=0;iencode(encoder); if (templ != (ConstructTpl *)0) templ->encode(encoder,-1); for(int4 i=0;iencode(encoder,i); } encoder.closeElement(sla::ELEM_CONSTRUCTOR); } void Constructor::decode(Decoder &decoder,SleighBase *trans) { uint4 el = decoder.openElement(sla::ELEM_CONSTRUCTOR); uintm id = decoder.readUnsignedInteger(sla::ATTRIB_PARENT); parent = (SubtableSymbol *)trans->findSymbol(id); firstwhitespace = decoder.readSignedInteger(sla::ATTRIB_FIRST); minimumlength = decoder.readSignedInteger(sla::ATTRIB_LENGTH); src_index = decoder.readSignedInteger(sla::ATTRIB_SOURCE); lineno = decoder.readSignedInteger(sla::ATTRIB_LINE); uint4 subel = decoder.peekElement(); while(subel != 0) { if (subel == sla::ELEM_OPER) { decoder.openElement(); uintm id = decoder.readUnsignedInteger(sla::ATTRIB_ID); OperandSymbol *sym = (OperandSymbol *)trans->findSymbol(id); operands.push_back(sym); decoder.closeElement(subel); } else if (subel == sla::ELEM_PRINT) { decoder.openElement(); printpiece.push_back( decoder.readString(sla::ATTRIB_PIECE)); decoder.closeElement(subel); } else if (subel == sla::ELEM_OPPRINT) { decoder.openElement(); int4 index = decoder.readSignedInteger(sla::ATTRIB_ID); string operstring = "\n "; operstring[1] = ('A' + index); printpiece.push_back(operstring); decoder.closeElement(subel); } else if (subel == sla::ELEM_CONTEXT_OP) { ContextOp *c_op = new ContextOp(); c_op->decode(decoder,trans); context.push_back(c_op); } else if (subel == sla::ELEM_COMMIT) { ContextCommit *c_op = new ContextCommit(); c_op->decode(decoder,trans); context.push_back(c_op); } else { ConstructTpl *cur = new ConstructTpl(); int4 sectionid = cur->decode(decoder); if (sectionid < 0) { if (templ != (ConstructTpl *)0) throw LowlevelError("Duplicate main section"); templ = cur; } else { while(namedtempl.size() <= sectionid) namedtempl.push_back((ConstructTpl *)0); if (namedtempl[sectionid] != (ConstructTpl *)0) throw LowlevelError("Duplicate named section"); namedtempl[sectionid] = cur; } } subel = decoder.peekElement(); } pattern = (TokenPattern *)0; if ((printpiece.size()==1)&&(printpiece[0][0]=='\n')) flowthruindex = printpiece[0][1] - 'A'; else flowthruindex = -1; decoder.closeElement(el); } void Constructor::orderOperands(void) { OperandSymbol *sym; vector patternorder; vector newops; // New order of the operands int4 lastsize; pateq->operandOrder(this,patternorder); for(int4 i=0;iisMarked()) { patternorder.push_back(sym); sym->setMark(); // Make sure all operands are marked } } do { lastsize = newops.size(); for(int4 i=0;iisMarked()) continue; // "unmarked" means it is already in newops if (sym->isOffsetIrrelevant()) continue; // expression Operands come last if ((sym->offsetbase == -1)||(!operands[sym->offsetbase]->isMarked())) { newops.push_back(sym); sym->clearMark(); } } } while(newops.size() != lastsize); for(int4 i=0;iisOffsetIrrelevant()) { newops.push_back(sym); sym->clearMark(); } } if (newops.size() != operands.size()) throw SleighError("Circular offset dependency between operands"); for(int4 i=0;ihand = i; newops[i]->localexp->changeIndex(i); } vector handmap; // Create index translation map for(int4 i=0;ihand); // Fix up offsetbase for(int4 i=0;ioffsetbase == -1) continue; sym->offsetbase = handmap[sym->offsetbase]; } if (templ != (ConstructTpl *)0) // Fix up templates templ->changeHandleIndex(handmap); for(int4 i=0;ichangeHandleIndex(handmap); } // Fix up printpiece operand refs for(int4 i=0;i oppattern; bool recursion = false; // Generate pattern for each operand, store in oppattern for(int4 i=0;igetDefiningSymbol(); PatternExpression *defexp = sym->getDefiningExpression(); if (triple != (TripleSymbol *)0) { SubtableSymbol *subsym = dynamic_cast(triple); if (subsym != (SubtableSymbol *)0) { if (subsym->isBeingBuilt()) { // Detected recursion if (recursion) { throw SleighError("Illegal recursion"); } // We should also check that recursion is rightmost extreme recursion = true; oppattern.emplace_back(); } else oppattern.push_back(*subsym->buildPattern(s)); } else oppattern.push_back(triple->getPatternExpression()->genMinPattern(oppattern)); } else if (defexp != (PatternExpression *)0) oppattern.push_back(defexp->genMinPattern(oppattern)); else { throw SleighError(sym->getName()+": operand is undefined"); } TokenPattern &sympat( oppattern.back() ); sym->minimumlength = sympat.getMinimumLength(); if (sympat.getLeftEllipsis() || sympat.getRightEllipsis()) sym->setVariableLength(); } if (pateq == (PatternEquation *)0) throw SleighError("Missing equation"); // Build the entire pattern pateq->genPattern(oppattern); *pattern = pateq->getTokenPattern(); if (pattern->alwaysFalse()) throw SleighError("Impossible pattern"); if (recursion) pattern->setRightEllipsis(true); minimumlength = pattern->getMinimumLength(); // Get length of the pattern in bytes // Resolve offsets of the operands OperandResolve resolve(operands); if (!pateq->resolveOperandLeft(resolve)) throw SleighError("Unable to resolve operand offsets"); for(int4 i=0;iisOffsetIrrelevant()) { sym->offsetbase = -1; sym->reloffset = 0; continue; } base = sym->offsetbase; offset = sym->reloffset; while(base >= 0) { sym = operands[base]; if (sym->isVariableLength()) break; // Cannot resolve to absolute base = sym->offsetbase; offset += sym->getMinimumLength(); offset += sym->reloffset; if (base < 0) { operands[i]->offsetbase = base; operands[i]->reloffset = offset; } } } // Make sure context expressions are valid for(int4 i=0;ivalidate(); orderOperands(); // Order the operands based on offset dependency return pattern; } void Constructor::printInfo(ostream &s) const { // Print identifying information about constructor // for use in error messages s << "table \"" << parent->getName(); s << "\" constructor starting at line " << dec << lineno; } SubtableSymbol::SubtableSymbol(const string &nm) : TripleSymbol(nm) { beingbuilt = false; pattern = (TokenPattern *)0; decisiontree = (DecisionNode *)0; errors = 0; } SubtableSymbol::~SubtableSymbol(void) { if (pattern != (TokenPattern *)0) delete pattern; if (decisiontree != (DecisionNode *)0) delete decisiontree; vector::iterator iter; for(iter=construct.begin();iter!=construct.end();++iter) delete *iter; } void SubtableSymbol::collectLocalValues(vector &results) const { for(int4 i=0;icollectLocalExports(results); } void SubtableSymbol::encode(Encoder &encoder) const { if (decisiontree == (DecisionNode *)0) return; // Not fully formed encoder.openElement(sla::ELEM_SUBTABLE_SYM); encoder.writeUnsignedInteger(sla::ATTRIB_ID, getId()); encoder.writeSignedInteger(sla::ATTRIB_NUMCT, construct.size()); for(int4 i=0;iencode(encoder); decisiontree->encode(encoder); encoder.closeElement(sla::ELEM_SUBTABLE_SYM); } void SubtableSymbol::encodeHeader(Encoder &encoder) const { encoder.openElement(sla::ELEM_SUBTABLE_SYM_HEAD); SleighSymbol::encodeHeader(encoder); encoder.closeElement(sla::ELEM_SUBTABLE_SYM_HEAD); } void SubtableSymbol::decode(Decoder &decoder,SleighBase *trans) { int4 numct = decoder.readSignedInteger(sla::ATTRIB_NUMCT); construct.reserve(numct); uint4 subel = decoder.peekElement(); while(subel != 0) { if (subel == sla::ELEM_CONSTRUCTOR) { Constructor *ct = new Constructor(); addConstructor(ct); ct->decode(decoder,trans); } else if (subel == sla::ELEM_DECISION) { decisiontree = new DecisionNode(); decisiontree->decode(decoder,(DecisionNode *)0,this); } subel = decoder.peekElement(); } pattern = (TokenPattern *)0; beingbuilt = false; errors = 0; decoder.closeElement(sla::ELEM_SUBTABLE_SYM.getId()); } void SubtableSymbol::buildDecisionTree(DecisionProperties &props) { // Associate pattern disjoints to constructors if (pattern == (TokenPattern *)0) return; // Pattern not fully formed Pattern *pat; decisiontree = new DecisionNode((DecisionNode *)0); for(int4 i=0;igetPattern()->getPattern(); if (pat->numDisjoint() == 0) decisiontree->addConstructorPair((const DisjointPattern *)pat,construct[i]); else for(int4 j=0;jnumDisjoint();++j) decisiontree->addConstructorPair(pat->getDisjoint(j),construct[i]); } decisiontree->split(props); // Create the decision strategy } TokenPattern *SubtableSymbol::buildPattern(ostream &s) { if (pattern != (TokenPattern *)0) return pattern; // Already built errors = false; beingbuilt = true; pattern = new TokenPattern(); if (construct.empty()) { s << "Error: There are no constructors in table: "+getName() << endl; errors = true; return pattern; } try { construct.front()->buildPattern(s); } catch(SleighError &err) { s << "Error: " << err.explain << ": for "; construct.front()->printInfo(s); s << endl; errors = true; } *pattern = *construct.front()->getPattern(); for(int4 i=1;ibuildPattern(s); } catch(SleighError &err) { s << "Error: " << err.explain << ": for "; construct[i]->printInfo(s); s << endl; errors = true; } *pattern = construct[i]->getPattern()->commonSubPattern(*pattern); } beingbuilt = false; return pattern; } void DecisionProperties::identicalPattern(Constructor *a,Constructor *b) { // Note that -a- and -b- have identical patterns if ((!a->isError())&&(!b->isError())) { a->setError(true); b->setError(true); identerrors.push_back(make_pair(a, b)); } } void DecisionProperties::conflictingPattern(Constructor *a,Constructor *b) { // Note that -a- and -b- have (potentially) conflicting patterns if ((!a->isError())&&(!b->isError())) { a->setError(true); b->setError(true); conflicterrors.push_back(make_pair(a, b)); } } DecisionNode::DecisionNode(DecisionNode *p) { parent = p; num = 0; startbit = 0; bitsize = 0; contextdecision = false; } DecisionNode::~DecisionNode(void) { // We own sub nodes vector::iterator iter; for(iter=children.begin();iter!=children.end();++iter) delete *iter; vector >::iterator piter; for(piter=list.begin();piter!=list.end();++piter) delete (*piter).first; // Delete the patterns } void DecisionNode::addConstructorPair(const DisjointPattern *pat,Constructor *ct) { DisjointPattern *clone = (DisjointPattern *)pat->simplifyClone(); // We need to own pattern list.push_back(pair(clone,ct)); num += 1; } int4 DecisionNode::getMaximumLength(bool context) { // Get maximum length of instruction pattern in bytes int4 max = 0; int4 val,i; for(i=0;igetLength(context); if (val > max) max = val; } return max; } int4 DecisionNode::getNumFixed(int4 low,int4 size,bool context) { // Get number of patterns that specify this field int4 count = 0; uintm mask; // Bits which must be specified in the mask uintm m = (size==8*sizeof(uintm)) ? 0 : (((uintm)1)<getMask(low,size,context); if ((mask&m)==m) count += 1; } return count; } double DecisionNode::getScore(int4 low,int4 size,bool context) { int4 numBins = 1 << size; // size is between 1 and 8 int4 i; uintm val,mask; uintm m = ((uintm)1)< count(numBins,0); for(i=0;igetMask(low,size,context); if ((mask&m)!=m) continue; // Skip if field not fully specified val = list[i].first->getValue(low,size,context); total += 1; count[val] += 1; } if (total <= 0) return -1.0; double sc = 0.0; for(i=0;i= list.size()) return -1.0; double p = ((double)count[i])/total; sc -= p * log(p); } return ( sc / log(2.0) ); } void DecisionNode::chooseOptimalField(void) { double score = 0.0; int4 sbit,size; // The current field bool context; double sc; int4 maxlength,numfixed,maxfixed; maxfixed = 1; context = true; do { maxlength = 8*getMaximumLength(context); for(sbit=0;sbit maxfixed)&&(sc > 0.0)) { score = sc; maxfixed = numfixed; startbit = sbit; bitsize = 1; contextdecision = context; continue; } // We have maximum patterns if (sc > score) { score = sc; startbit = sbit; bitsize = 1; contextdecision = context; } } context = !context; } while(!context); context = true; do { maxlength = 8*getMaximumLength(context); for(size=2;size <= 8;++size) { for(sbit=0;sbit score) { score = sc; startbit = sbit; bitsize = size; contextdecision = context; } } } context = !context; } while(!context); if (score <= 0.0) // If we failed to get a positive score bitsize = 0; // treat the node as terminal } void DecisionNode::consistentValues(vector &bins,DisjointPattern *pat) { // Produce all possible values of -pat- by // iterating through all possible values of the // "don't care" bits within the value of -pat- // that intersects with this node (startbit,bitsize,context) uintm m = (bitsize==8*sizeof(uintm)) ? 0 : (((uintm)1)<getMask(startbit,bitsize,contextdecision); uintm commonValue = commonMask & pat->getValue(startbit,bitsize,contextdecision); uintm dontCareMask = m^commonMask; for(uintm i=0;i<=dontCareMask;++i) { // Iterate over values that contain all don't care bits if ((i&dontCareMask)!=i) continue; // If all 1 bits in the value are don't cares bins.push_back( commonValue | i ); // add 1 bits into full value and store } } void DecisionNode::split(DecisionProperties &props) { if (list.size() <= 1) { bitsize = 0; // Only one pattern, terminal node by default return; } chooseOptimalField(); if (bitsize == 0) { orderPatterns(props); return; } if ((parent != (DecisionNode *)0) && (list.size() >= parent->num)) throw LowlevelError("Child has as many Patterns as parent"); int4 numChildren = 1 << bitsize; for(int4 i=0;i vals; // Bins this pattern belongs in // If the pattern does not care about some // bits in the field we are splitting on, that // pattern will get put into multiple bins consistentValues(vals,list[i].first); for(int4 j=0;jaddConstructorPair(list[i].first,list[i].second); delete list[i].first; // We no longer need original pattern } list.clear(); for(int4 i=0;isplit(props); } void DecisionNode::orderPatterns(DecisionProperties &props) { // This is a tricky routine. When this routine is called, the patterns remaining in the // the decision node can no longer be distinguished by examining additional bits. The basic // idea here is that the patterns should be ordered so that the most specialized should come // first in the list. Pattern 1 is a specialization of pattern 2, if the set of instructions // matching 1 is contained in the set matching 2. So in the simplest case, the pattern order // should represent a strict nesting. Unfortunately, there are many potential situations where // patterns don't necessarily nest. // 1) An "or" of two patterns. This can be an explicit '|' operator in the Constructor, in // which case this can be detected because the two patterns point to the same constructor // But the "or" can be implied across two constructors that do the same thing. This should // probably be flagged as an error except in the following case. // 2) Two patterns aren't properly nested, but they are "resolved" by a third pattern which // covers the intersection of the first two patterns. Sometimes its easier to specify // three cases that need to be distinguished in this way. // 3) Recursive constructors that use a "guard" context bit. The guard bit is used to prevent // the recursive constructor from matching repeatedly, but it's too much work to put a // constraint an the bit for every other pattern. // 4) Other situations where the ability to distinguish between constructors is hidden in // the subconstructors. // This routine can determine if an intersection results from case 1) or case 2) int4 i,j,k; vector > newlist; vector > conflictlist; // Check for identical patterns for(i=0;iidentical(jpat)) props.identicalPattern(list[i].second,list[j].second); } } newlist = list; for(i=0;ispecializes(jpat)) break; if (!jpat->specializes(ipat)) { // We have a potential conflict Constructor *iconst = newlist[i].second; Constructor *jconst = list[j].second; if (iconst == jconst) { // This is an OR in the pattern for ONE constructor // So there is no conflict } else { // A true conflict that needs to be resolved conflictlist.push_back(pair(ipat,iconst)); conflictlist.push_back(pair(jpat,jconst)); } } } for(k=i-1;k>=j;--k) list[k+1] = list[k]; list[j] = newlist[i]; } // Check if intersection patterns are present, which resolve conflicts for(i=0;iresolvesIntersect(pat1,pat2)) { resolved = true; break; } } if (!resolved) props.conflictingPattern(const1,const2); } } Constructor *DecisionNode::resolve(ParserWalker &walker) const { if (bitsize == 0) { // The node is terminal vector >::const_iterator iter; for(iter=list.begin();iter!=list.end();++iter) if ((*iter).first->isMatch(walker)) return (*iter).second; ostringstream s; s << walker.getAddr().getShortcut(); walker.getAddr().printRaw(s); s << ": Unable to resolve constructor"; throw BadDataError(s.str()); } uintm val; if (contextdecision) val = walker.getContextBits(startbit,bitsize); else val = walker.getInstructionBits(startbit,bitsize); return children[val]->resolve(walker); } void DecisionNode::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_DECISION); encoder.writeSignedInteger(sla::ATTRIB_NUMBER, num); encoder.writeBool(sla::ATTRIB_CONTEXT, contextdecision); encoder.writeSignedInteger(sla::ATTRIB_STARTBIT, startbit); encoder.writeSignedInteger(sla::ATTRIB_SIZE, bitsize); for(int4 i=0;igetId()); list[i].first->encode(encoder); encoder.closeElement(sla::ELEM_PAIR); } for(int4 i=0;iencode(encoder); encoder.closeElement(sla::ELEM_DECISION); } void DecisionNode::decode(Decoder &decoder,DecisionNode *par,SubtableSymbol *sub) { uint4 el = decoder.openElement(sla::ELEM_DECISION); parent = par; num = decoder.readSignedInteger(sla::ATTRIB_NUMBER); contextdecision = decoder.readBool(sla::ATTRIB_CONTEXT); startbit = decoder.readSignedInteger(sla::ATTRIB_STARTBIT); bitsize = decoder.readSignedInteger(sla::ATTRIB_SIZE); uint4 subel = decoder.peekElement(); while(subel != 0) { if (subel == sla::ELEM_PAIR) { decoder.openElement(); uintm id = decoder.readSignedInteger(sla::ATTRIB_ID); Constructor *ct = sub->getConstructor(id); DisjointPattern *pat = DisjointPattern::decodeDisjoint(decoder); list.push_back(pair(pat,ct)); decoder.closeElement(subel); } else if (subel == sla::ELEM_DECISION) { DecisionNode *subnode = new DecisionNode(); subnode->decode(decoder,this,sub); children.push_back(subnode); } subel = decoder.peekElement(); } decoder.closeElement(el); } static void calc_maskword(int4 sbit,int4 ebit,int4 &num,int4 &shift,uintm &mask) { num = sbit/(8*sizeof(uintm)); if ( num != ebit/(8*sizeof(uintm))) throw SleighError("Context field not contained within one machine int"); sbit -= num*8*sizeof(uintm); ebit -= num*8*sizeof(uintm); shift = 8*sizeof(uintm)-ebit-1; mask = (~((uintm)0))>>(sbit+shift); mask <<= shift; } ContextOp::ContextOp(int4 startbit,int4 endbit,PatternExpression *pe) { calc_maskword(startbit,endbit,num,shift,mask); patexp = pe; patexp->layClaim(); } void ContextOp::apply(ParserWalkerChange &walker) const { uintm val = patexp->getValue(walker); // Get our value based on context val <<= shift; walker.getParserContext()->setContextWord(num,val,mask); } void ContextOp::validate(void) const { // Throw an exception if the PatternExpression is not valid vector values; patexp->listValues(values); // Get all the expression tokens for(int4 i=0;i(values[i]); if (val == (const OperandValue *)0) continue; // Certain operands cannot be used in context expressions // because these are evaluated BEFORE the operand offset // has been recovered. If the offset is not relative to // the base constructor, then we throw an error if (!val->isConstructorRelative()) throw SleighError(val->getName()+": cannot be used in context expression"); } } void ContextOp::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_CONTEXT_OP); encoder.writeSignedInteger(sla::ATTRIB_I, num); encoder.writeSignedInteger(sla::ATTRIB_SHIFT, shift); encoder.writeUnsignedInteger(sla::ATTRIB_MASK, mask); patexp->encode(encoder); encoder.closeElement(sla::ELEM_CONTEXT_OP); } void ContextOp::decode(Decoder &decoder,SleighBase *trans) { uint4 el = decoder.openElement(sla::ELEM_CONTEXT_OP); num = decoder.readSignedInteger(sla::ATTRIB_I); shift = decoder.readSignedInteger(sla::ATTRIB_SHIFT); mask = decoder.readUnsignedInteger(sla::ATTRIB_MASK); patexp = PatternExpression::decodeExpression(decoder,trans); patexp->layClaim(); decoder.closeElement(el); } ContextChange *ContextOp::clone(void) const { ContextOp *res = new ContextOp(); (res->patexp = patexp)->layClaim(); res->mask = mask; res->num = num; res->shift = shift; return res; } ContextCommit::ContextCommit(TripleSymbol *s,int4 sbit,int4 ebit,bool fl) { sym = s; flow = fl; int4 shift; calc_maskword(sbit,ebit,num,shift,mask); } void ContextCommit::apply(ParserWalkerChange &walker) const { walker.getParserContext()->addCommit(sym,num,mask,flow,walker.getPoint()); } void ContextCommit::encode(Encoder &encoder) const { encoder.openElement(sla::ELEM_COMMIT); encoder.writeUnsignedInteger(sla::ATTRIB_ID, sym->getId()); encoder.writeSignedInteger(sla::ATTRIB_NUMBER, num); encoder.writeUnsignedInteger(sla::ATTRIB_MASK, mask); encoder.writeBool(sla::ATTRIB_FLOW, flow); encoder.closeElement(sla::ELEM_COMMIT); } void ContextCommit::decode(Decoder &decoder,SleighBase *trans) { uint4 el = decoder.openElement(sla::ELEM_COMMIT); uintm id = decoder.readUnsignedInteger(sla::ATTRIB_ID); sym = (TripleSymbol *)trans->findSymbol(id); num = decoder.readSignedInteger(sla::ATTRIB_NUMBER); mask = decoder.readUnsignedInteger(sla::ATTRIB_MASK); flow = decoder.readBool(sla::ATTRIB_FLOW); decoder.closeElement(el); } ContextChange *ContextCommit::clone(void) const { ContextCommit *res = new ContextCommit(); res->sym = sym; res->flow = flow; res->mask = mask; res->num = num; return res; } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/slghsymbol.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __SLGHSYMBOL_HH__ #define __SLGHSYMBOL_HH__ #include "semantics.hh" #include "slghpatexpress.hh" namespace ghidra { class SleighBase; // Forward declaration class SleighSymbol { friend class SymbolTable; public: enum symbol_type { space_symbol, token_symbol, userop_symbol, value_symbol, valuemap_symbol, name_symbol, varnode_symbol, varnodelist_symbol, operand_symbol, start_symbol, end_symbol, next2_symbol, subtable_symbol, macro_symbol, section_symbol, bitrange_symbol, context_symbol, epsilon_symbol, label_symbol, flowdest_symbol, flowref_symbol, dummy_symbol }; private: string name; uintm id; // Unique id across all symbols uintm scopeid; // Unique id of scope this symbol is in public: SleighSymbol(void) {} // For use with decode SleighSymbol(const string &nm) { name = nm; id = 0; } virtual ~SleighSymbol(void) {} const string &getName(void) const { return name; } uintm getId(void) const { return id; } virtual symbol_type getType(void) const { return dummy_symbol; } virtual void encodeHeader(Encoder &encoder) const; void decodeHeader(Decoder &decoder); virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; struct SymbolCompare { bool operator()(const SleighSymbol *a,const SleighSymbol *b) const { return (a->getName() < b->getName()); } }; typedef set SymbolTree; class SymbolScope { friend class SymbolTable; SymbolScope *parent; SymbolTree tree; uintm id; public: SymbolScope(SymbolScope *p,uintm i) { parent = p; id = i; } SymbolScope *getParent(void) const { return parent; } SleighSymbol *addSymbol(SleighSymbol *a); SleighSymbol *findSymbol(const string &nm) const; SymbolTree::const_iterator begin(void) const { return tree.begin(); } SymbolTree::const_iterator end(void) const { return tree.end(); } uintm getId(void) const { return id; } void removeSymbol(SleighSymbol *a) { tree.erase(a); } }; class SymbolTable { vector symbollist; vector table; SymbolScope *curscope; SymbolScope *skipScope(int4 i) const; SleighSymbol *findSymbolInternal(SymbolScope *scope,const string &nm) const; void renumber(void); public: SymbolTable(void) { curscope = (SymbolScope *)0; } ~SymbolTable(void); SymbolScope *getCurrentScope(void) { return curscope; } SymbolScope *getGlobalScope(void) { return table[0]; } void setCurrentScope(SymbolScope *scope) { curscope = scope; } void addScope(void); // Add new scope off of current scope, make it current void popScope(void); // Make parent of current scope current void addGlobalSymbol(SleighSymbol *a); void addSymbol(SleighSymbol *a); SleighSymbol *findSymbol(const string &nm) const { return findSymbolInternal(curscope,nm); } SleighSymbol *findSymbol(const string &nm,int4 skip) const { return findSymbolInternal(skipScope(skip),nm); } SleighSymbol *findGlobalSymbol(const string &nm) const { return findSymbolInternal(table[0],nm); } SleighSymbol *findSymbol(uintm id) const { return symbollist[id]; } void replaceSymbol(SleighSymbol *a,SleighSymbol *b); void encode(Encoder &encoder) const; void decode(Decoder &decoder,SleighBase *trans); void decodeSymbolHeader(Decoder &decoder); void purge(void); }; class SpaceSymbol : public SleighSymbol { AddrSpace *space; public: SpaceSymbol(AddrSpace *spc) : SleighSymbol(spc->getName()) { space = spc; } AddrSpace *getSpace(void) const { return space; } virtual symbol_type getType(void) const { return space_symbol; } }; class TokenSymbol : public SleighSymbol { Token *tok; public: TokenSymbol(Token *t) : SleighSymbol(t->getName()) { tok = t; } ~TokenSymbol(void) { delete tok; } Token *getToken(void) const { return tok; } virtual symbol_type getType(void) const { return token_symbol; } }; class SectionSymbol : public SleighSymbol { // Named p-code sections int4 templateid; // Index into the ConstructTpl array int4 define_count; // Number of definitions of this named section int4 ref_count; // Number of references to this named section public: SectionSymbol(const string &nm,int4 id) : SleighSymbol(nm) { templateid=id; define_count=0; ref_count=0; } int4 getTemplateId(void) const { return templateid; } void incrementDefineCount(void) { define_count += 1; } void incrementRefCount(void) { ref_count += 1; } int4 getDefineCount(void) const { return define_count; } int4 getRefCount(void) const { return ref_count; } virtual symbol_type getType(void) const { return section_symbol; } }; class UserOpSymbol : public SleighSymbol { // A user-defined pcode-op uint4 index; public: UserOpSymbol(void) {} // For use with decode UserOpSymbol(const string &nm) : SleighSymbol(nm) { index = 0; } void setIndex(uint4 ind) { index = ind; } uint4 getIndex(void) const { return index; } virtual symbol_type getType(void) const { return userop_symbol; } virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class Constructor; // Forward declaration // This is the central sleigh object class TripleSymbol : public SleighSymbol { public: TripleSymbol(void) {} TripleSymbol(const string &nm) : SleighSymbol(nm) {} virtual Constructor *resolve(ParserWalker &walker) { return (Constructor *)0; } virtual PatternExpression *getPatternExpression(void) const=0; virtual void getFixedHandle(FixedHandle &hand,ParserWalker &walker) const=0; virtual int4 getSize(void) const { return 0; } // Size out of context virtual void print(ostream &s,ParserWalker &walker) const=0; virtual void collectLocalValues(vector &results) const {} }; class FamilySymbol : public TripleSymbol { public: FamilySymbol(void) {} FamilySymbol(const string &nm) : TripleSymbol(nm) {} virtual PatternValue *getPatternValue(void) const=0; }; class SpecificSymbol : public TripleSymbol { public: SpecificSymbol(void) {} SpecificSymbol(const string &nm) : TripleSymbol(nm) {} virtual VarnodeTpl *getVarnode(void) const=0; }; class PatternlessSymbol : public SpecificSymbol { // Behaves like constant 0 pattern ConstantValue *patexp; public: PatternlessSymbol(void); // For use with decode PatternlessSymbol(const string &nm); virtual ~PatternlessSymbol(void); virtual PatternExpression *getPatternExpression(void) const { return patexp; } }; class EpsilonSymbol : public PatternlessSymbol { // Another name for zero pattern/value AddrSpace *const_space; public: EpsilonSymbol(void) {} // For use with decode EpsilonSymbol(const string &nm,AddrSpace *spc) : PatternlessSymbol(nm) { const_space=spc; } virtual void getFixedHandle(FixedHandle &hand,ParserWalker &walker) const; virtual void print(ostream &s,ParserWalker &walker) const; virtual symbol_type getType(void) const { return epsilon_symbol; } virtual VarnodeTpl *getVarnode(void) const; virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class ValueSymbol : public FamilySymbol { protected: PatternValue *patval; public: ValueSymbol(void) { patval = (PatternValue *)0; } // For use with decode ValueSymbol(const string &nm,PatternValue *pv); virtual ~ValueSymbol(void); virtual PatternValue *getPatternValue(void) const { return patval; } virtual PatternExpression *getPatternExpression(void) const { return patval; } virtual void getFixedHandle(FixedHandle &hand,ParserWalker &walker) const; virtual void print(ostream &s,ParserWalker &walker) const; virtual symbol_type getType(void) const { return value_symbol; } virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class ValueMapSymbol : public ValueSymbol { vector valuetable; bool tableisfilled; void checkTableFill(void); public: ValueMapSymbol(void) {} // For use with decode ValueMapSymbol(const string &nm,PatternValue *pv,const vector &vt) : ValueSymbol(nm,pv) { valuetable=vt; checkTableFill(); } virtual Constructor *resolve(ParserWalker &walker); virtual void getFixedHandle(FixedHandle &hand,ParserWalker &walker) const; virtual void print(ostream &s,ParserWalker &walker) const; virtual symbol_type getType(void) const { return valuemap_symbol; } virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class NameSymbol : public ValueSymbol { vector nametable; bool tableisfilled; void checkTableFill(void); public: NameSymbol(void) {} // For use with decode NameSymbol(const string &nm,PatternValue *pv,const vector &nt) : ValueSymbol(nm,pv) { nametable=nt; checkTableFill(); } virtual Constructor *resolve(ParserWalker &walker); virtual void print(ostream &s,ParserWalker &walker) const; virtual symbol_type getType(void) const { return name_symbol; } virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class VarnodeSymbol : public PatternlessSymbol { // A global varnode VarnodeData fix; bool context_bits; public: VarnodeSymbol(void) {} // For use with decode VarnodeSymbol(const string &nm,AddrSpace *base,uintb offset,int4 size); void markAsContext(void) { context_bits = true; } const VarnodeData &getFixedVarnode(void) const { return fix; } virtual VarnodeTpl *getVarnode(void) const; virtual void getFixedHandle(FixedHandle &hand,ParserWalker &walker) const; virtual int4 getSize(void) const { return fix.size; } virtual void print(ostream &s,ParserWalker &walker) const { s << getName(); } virtual void collectLocalValues(vector &results) const; virtual symbol_type getType(void) const { return varnode_symbol; } virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class BitrangeSymbol : public SleighSymbol { // A smaller bitrange within a varnode VarnodeSymbol *varsym; // Varnode containing the bitrange uint4 bitoffset; // least significant bit of range uint4 numbits; // number of bits in the range public: BitrangeSymbol(void) {} // For use with decode BitrangeSymbol(const string &nm,VarnodeSymbol *sym,uint4 bitoff,uint4 num) : SleighSymbol(nm) { varsym=sym; bitoffset=bitoff; numbits=num; } VarnodeSymbol *getParentSymbol(void) const { return varsym; } uint4 getBitOffset(void) const { return bitoffset; } uint4 numBits(void) const { return numbits; } virtual symbol_type getType(void) const { return bitrange_symbol; } }; class ContextSymbol : public ValueSymbol { VarnodeSymbol *vn; uint4 low,high; // into a varnode bool flow; public: ContextSymbol(void) {} // For use with decode ContextSymbol(const string &nm,ContextField *pate,VarnodeSymbol *v,uint4 l,uint4 h,bool flow); VarnodeSymbol *getVarnode(void) const { return vn; } uint4 getLow(void) const { return low; } uint4 getHigh(void) const { return high; } bool getFlow(void) const { return flow; } virtual symbol_type getType(void) const { return context_symbol; } virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class VarnodeListSymbol : public ValueSymbol { vector varnode_table; bool tableisfilled; void checkTableFill(void); public: VarnodeListSymbol(void) {} // For use with decode VarnodeListSymbol(const string &nm,PatternValue *pv,const vector &vt); virtual Constructor *resolve(ParserWalker &walker); virtual void getFixedHandle(FixedHandle &hand,ParserWalker &walker) const; virtual int4 getSize(void) const; virtual void print(ostream &s,ParserWalker &walker) const; virtual symbol_type getType(void) const { return varnodelist_symbol; } virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class OperandSymbol : public SpecificSymbol { friend class Constructor; friend class OperandEquation; public: enum { code_address=1, offset_irrel=2, variable_len=4, marked=8 }; private: uint4 reloffset; // Relative offset int4 offsetbase; // Base operand to which offset is relative (-1=constructor start) int4 minimumlength; // Minimum size of operand (within instruction tokens) int4 hand; // Handle index OperandValue *localexp; TripleSymbol *triple; // Defining symbol PatternExpression *defexp; // OR defining expression uint4 flags; void setVariableLength(void) { flags |= variable_len; } bool isVariableLength(void) const { return ((flags&variable_len)!=0); } public: OperandSymbol(void) {} // For use with decode OperandSymbol(const string &nm,int4 index,Constructor *ct); uint4 getRelativeOffset(void) const { return reloffset; } int4 getOffsetBase(void) const { return offsetbase; } int4 getMinimumLength(void) const { return minimumlength; } PatternExpression *getDefiningExpression(void) const { return defexp; } TripleSymbol *getDefiningSymbol(void) const { return triple; } int4 getIndex(void) const { return hand; } void defineOperand(PatternExpression *pe); void defineOperand(TripleSymbol *tri); void setCodeAddress(void) { flags |= code_address; } bool isCodeAddress(void) const { return ((flags&code_address)!=0); } void setOffsetIrrelevant(void) { flags |= offset_irrel; } bool isOffsetIrrelevant(void) const { return ((flags&offset_irrel)!=0); } void setMark(void) { flags |= marked; } void clearMark(void) { flags &= ~((uint4)marked); } bool isMarked(void) const { return ((flags&marked)!=0); } virtual ~OperandSymbol(void); virtual VarnodeTpl *getVarnode(void) const; virtual PatternExpression *getPatternExpression(void) const { return localexp; } virtual void getFixedHandle(FixedHandle &hnd,ParserWalker &walker) const; virtual int4 getSize(void) const; virtual void print(ostream &s,ParserWalker &walker) const; virtual void collectLocalValues(vector &results) const; virtual symbol_type getType(void) const { return operand_symbol; } virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class StartSymbol : public SpecificSymbol { AddrSpace *const_space; PatternExpression *patexp; public: StartSymbol(void) { patexp = (PatternExpression *)0; } // For use with decode StartSymbol(const string &nm,AddrSpace *cspc); virtual ~StartSymbol(void); virtual VarnodeTpl *getVarnode(void) const; virtual PatternExpression *getPatternExpression(void) const { return patexp; } virtual void getFixedHandle(FixedHandle &hand,ParserWalker &walker) const; virtual void print(ostream &s,ParserWalker &walker) const; virtual symbol_type getType(void) const { return start_symbol; } virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class EndSymbol : public SpecificSymbol { AddrSpace *const_space; PatternExpression *patexp; public: EndSymbol(void) { patexp = (PatternExpression *)0; } // For use with decode EndSymbol(const string &nm,AddrSpace *cspc); virtual ~EndSymbol(void); virtual VarnodeTpl *getVarnode(void) const; virtual PatternExpression *getPatternExpression(void) const { return patexp; } virtual void getFixedHandle(FixedHandle &hand,ParserWalker &walker) const; virtual void print(ostream &s,ParserWalker &walker) const; virtual symbol_type getType(void) const { return end_symbol; } virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class Next2Symbol : public SpecificSymbol { AddrSpace *const_space; PatternExpression *patexp; public: Next2Symbol(void) { patexp = (PatternExpression *)0; } // For use with decode Next2Symbol(const string &nm,AddrSpace *cspc); virtual ~Next2Symbol(void); virtual VarnodeTpl *getVarnode(void) const; virtual PatternExpression *getPatternExpression(void) const { return patexp; } virtual void getFixedHandle(FixedHandle &hand,ParserWalker &walker) const; virtual void print(ostream &s,ParserWalker &walker) const; virtual symbol_type getType(void) const { return next2_symbol; } virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class FlowDestSymbol : public SpecificSymbol { AddrSpace *const_space; public: FlowDestSymbol(void) {} // For use with decode FlowDestSymbol(const string &nm,AddrSpace *cspc); virtual VarnodeTpl *getVarnode(void) const; virtual PatternExpression *getPatternExpression(void) const { throw SleighError("Cannot use symbol in pattern"); } virtual void getFixedHandle(FixedHandle &hand,ParserWalker &walker) const; virtual void print(ostream &s,ParserWalker &walker) const; virtual symbol_type getType(void) const { return flowdest_symbol; } }; class FlowRefSymbol : public SpecificSymbol { AddrSpace *const_space; public: FlowRefSymbol(void) {} // For use with decode FlowRefSymbol(const string &nm,AddrSpace *cspc); virtual VarnodeTpl *getVarnode(void) const; virtual PatternExpression *getPatternExpression(void) const { throw SleighError("Cannot use symbol in pattern"); } virtual void getFixedHandle(FixedHandle &hand,ParserWalker &walker) const; virtual void print(ostream &s,ParserWalker &walker) const; virtual symbol_type getType(void) const { return flowref_symbol; } }; class ContextChange { // Change to context command public: virtual ~ContextChange(void) {} virtual void validate(void) const=0; virtual void encode(Encoder &encoder) const=0; virtual void decode(Decoder &decoder,SleighBase *trans)=0; virtual void apply(ParserWalkerChange &walker) const=0; virtual ContextChange *clone(void) const=0; }; class ContextOp : public ContextChange { PatternExpression *patexp; // Expression determining value int4 num; // index of word containing context variable to set uintm mask; // Mask off size of variable int4 shift; // Number of bits to shift value into place public: ContextOp(int4 startbit,int4 endbit,PatternExpression *pe); ContextOp(void) {} // For use with decode virtual ~ContextOp(void) { PatternExpression::release(patexp); } virtual void validate(void) const; virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); virtual void apply(ParserWalkerChange &walker) const; virtual ContextChange *clone(void) const; }; class ContextCommit : public ContextChange { TripleSymbol *sym; int4 num; // Index of word containing context commit uintm mask; // mask of bits in word being committed bool flow; // Whether the context "flows" from the point of change public: ContextCommit(void) {} // For use with decode ContextCommit(TripleSymbol *s,int4 sbit,int4 ebit,bool fl); virtual void validate(void) const {} virtual void encode(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); virtual void apply(ParserWalkerChange &walker) const; virtual ContextChange *clone(void) const; }; class SubtableSymbol; class Constructor { // This is NOT a symbol TokenPattern *pattern; SubtableSymbol *parent; PatternEquation *pateq; vector operands; vector printpiece; vector context; // Context commands ConstructTpl *templ; // The main p-code section vector namedtempl; // Other named p-code sections int4 minimumlength; // Minimum length taken up by this constructor in bytes uintm id; // Unique id of constructor within subtable int4 firstwhitespace; // Index of first whitespace piece in -printpiece- int4 flowthruindex; // if >=0 then print only a single operand no markup int4 lineno; int4 src_index; //source file index mutable bool inerror; // An error is associated with this Constructor void orderOperands(void); public: Constructor(void); // For use with decode Constructor(SubtableSymbol *p); ~Constructor(void); TokenPattern *buildPattern(ostream &s); TokenPattern *getPattern(void) const { return pattern; } void setMinimumLength(int4 l) { minimumlength = l; } int4 getMinimumLength(void) const { return minimumlength; } void setId(uintm i) { id = i; } uintm getId(void) const { return id; } void setLineno(int4 ln) { lineno = ln; } int4 getLineno(void) const { return lineno; } void setSrcIndex(int4 index) {src_index = index;} int4 getSrcIndex(void) {return src_index;} void addContext(const vector &vec) { context = vec; } void addOperand(OperandSymbol *sym); void addInvisibleOperand(OperandSymbol *sym); void addSyntax(const string &syn); void addEquation(PatternEquation *pe); void setMainSection(ConstructTpl *tpl) { templ = tpl; } void setNamedSection(ConstructTpl *tpl,int4 id); SubtableSymbol *getParent(void) const { return parent; } int4 getNumOperands(void) const { return operands.size(); } OperandSymbol *getOperand(int4 i) const { return operands[i]; } PatternEquation *getPatternEquation(void) const { return pateq; } ConstructTpl *getTempl(void) const { return templ; } ConstructTpl *getNamedTempl(int4 secnum) const; int4 getNumSections(void) const { return namedtempl.size(); } void printInfo(ostream &s) const; void print(ostream &s,ParserWalker &pos) const; void printMnemonic(ostream &s,ParserWalker &walker) const; void printBody(ostream &s,ParserWalker &walker) const; void removeTrailingSpace(void); void applyContext(ParserWalkerChange &walker) const { vector::const_iterator iter; for(iter=context.begin();iter!=context.end();++iter) (*iter)->apply(walker); } void markSubtableOperands(vector &check) const; void collectLocalExports(vector &results) const; void setError(bool val) const { inerror = val; } bool isError(void) const { return inerror; } bool isRecursive(void) const; void encode(Encoder &encoder) const; void decode(Decoder &decoder,SleighBase *trans); }; class DecisionProperties { vector > identerrors; vector > conflicterrors; public: void identicalPattern(Constructor *a,Constructor *b); void conflictingPattern(Constructor *a,Constructor *b); const vector > &getIdentErrors(void) const { return identerrors; } const vector > &getConflictErrors(void) const { return conflicterrors; } }; class DecisionNode { vector > list; vector children; int4 num; // Total number of patterns we distinguish bool contextdecision; // True if this is decision based on context int4 startbit,bitsize; // Bits in the stream on which to base the decision DecisionNode *parent; void chooseOptimalField(void); double getScore(int4 low,int4 size,bool context); int4 getNumFixed(int4 low,int4 size,bool context); int4 getMaximumLength(bool context); void consistentValues(vector &bins,DisjointPattern *pat); public: DecisionNode(void) {} // For use with decode DecisionNode(DecisionNode *p); ~DecisionNode(void); Constructor *resolve(ParserWalker &walker) const; void addConstructorPair(const DisjointPattern *pat,Constructor *ct); void split(DecisionProperties &props); void orderPatterns(DecisionProperties &props); void encode(Encoder &encoder) const; void decode(Decoder &decoder,DecisionNode *par,SubtableSymbol *sub); }; class SubtableSymbol : public TripleSymbol { TokenPattern *pattern; bool beingbuilt,errors; vector construct; // All the Constructors in this table DecisionNode *decisiontree; public: SubtableSymbol(void) { pattern = (TokenPattern *)0; decisiontree = (DecisionNode *)0; } // For use with decode SubtableSymbol(const string &nm); virtual ~SubtableSymbol(void); bool isBeingBuilt(void) const { return beingbuilt; } bool isError(void) const { return errors; } void addConstructor(Constructor *ct) { ct->setId(construct.size()); construct.push_back(ct); } void buildDecisionTree(DecisionProperties &props); TokenPattern *buildPattern(ostream &s); TokenPattern *getPattern(void) const { return pattern; } int4 getNumConstructors(void) const { return construct.size(); } Constructor *getConstructor(uintm id) const { return construct[id]; } virtual Constructor *resolve(ParserWalker &walker) { return decisiontree->resolve(walker); } virtual PatternExpression *getPatternExpression(void) const { throw SleighError("Cannot use subtable in expression"); } virtual void getFixedHandle(FixedHandle &hand,ParserWalker &walker) const { throw SleighError("Cannot use subtable in expression"); } virtual int4 getSize(void) const { return -1; } virtual void print(ostream &s,ParserWalker &walker) const { throw SleighError("Cannot use subtable in expression"); } virtual void collectLocalValues(vector &results) const; virtual symbol_type getType(void) const { return subtable_symbol; } virtual void encode(Encoder &encoder) const; virtual void encodeHeader(Encoder &encoder) const; virtual void decode(Decoder &decoder,SleighBase *trans); }; class MacroSymbol : public SleighSymbol { // A user-defined pcode-macro int4 index; ConstructTpl *construct; vector operands; public: MacroSymbol(const string &nm,int4 i) : SleighSymbol(nm) { index = i; construct = (ConstructTpl *)0; } int4 getIndex(void) const { return index; } void setConstruct(ConstructTpl *ct) { construct = ct; } ConstructTpl *getConstruct(void) const { return construct; } void addOperand(OperandSymbol *sym) { operands.push_back(sym); } int4 getNumOperands(void) const { return operands.size(); } OperandSymbol *getOperand(int4 i) const { return operands[i]; } virtual ~MacroSymbol(void) { if (construct != (ConstructTpl *)0) delete construct; } virtual symbol_type getType(void) const { return macro_symbol; } }; class LabelSymbol : public SleighSymbol { // A branch label uint4 index; // Local 1 up index of label bool isplaced; // Has the label been placed (not just referenced) uint4 refcount; // Number of references to this label public: LabelSymbol(const string &nm,uint4 i) : SleighSymbol(nm) { index = i; refcount = 0; isplaced=false; } uint4 getIndex(void) const { return index; } void incrementRefCount(void) { refcount += 1; } uint4 getRefCount(void) const { return refcount; } void setPlaced(void) { isplaced = true; } bool isPlaced(void) const { return isplaced; } virtual symbol_type getType(void) const { return label_symbol; } }; } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/space.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "space.hh" #include "translate.hh" namespace ghidra { AttributeId ATTRIB_BASE = AttributeId("base",89); AttributeId ATTRIB_DEADCODEDELAY = AttributeId("deadcodedelay",90); AttributeId ATTRIB_DELAY = AttributeId("delay", 91); AttributeId ATTRIB_LOGICALSIZE = AttributeId("logicalsize",92); AttributeId ATTRIB_PHYSICAL = AttributeId("physical",93); // ATTRIB_PIECE is a special attribute for supporting the legacy attributes "piece1", "piece2", ..., "piece9", // It is effectively a sequence of indexed attributes for use with Encoder::writeStringIndexed. // The index starts at the ids reserved for "piece1" thru "piece9" but can extend farther. AttributeId ATTRIB_PIECE = AttributeId("piece",94); // Open slots 94-102 /// Calculate \e highest based on \e addressSize, and \e wordsize. /// This also calculates the default pointerLowerBound void AddrSpace::calcScaleMask(void) { highest = calc_mask(addressSize); // Maximum address highest = highest * wordsize + (wordsize-1); // Maximum byte address pointerLowerBound = 0; pointerUpperBound = highest; uintb bufferSize = (addressSize < 3) ? 0x100 : 0x1000; pointerLowerBound += bufferSize; pointerUpperBound -= bufferSize; } /// Initialize an address space with its basic attributes /// \param m is the space manager associated with the new space /// \param t is the processor translator associated with the new space /// \param tp is the type of the new space (PROCESSOR, CONSTANT, INTERNAL,...) /// \param nm is the name of the new space /// \param bigEnd is \b true for big endian encoding /// \param size is the (offset encoding) size of the new space /// \param ws is the number of bytes in an addressable unit /// \param ind is the integer identifier for the new space /// \param fl can be 0 or AddrSpace::hasphysical /// \param dl is the number of rounds to delay heritage for the new space /// \param dead is the number of rounds to delay before dead code removal AddrSpace::AddrSpace(AddrSpaceManager *m,const Translate *t,spacetype tp,const string &nm,bool bigEnd, uint4 size,uint4 ws, int4 ind,uint4 fl,int4 dl,int4 dead) { refcount = 0; // No references to this space yet manage = m; trans = t; type = tp; name = nm; addressSize = size; wordsize = ws; index = ind; delay = dl; deadcodedelay = dead; minimumPointerSize = 0; // (initially) assume pointers must match the space size exactly shortcut = ' '; // Placeholder meaning shortcut is unassigned // These are the flags we allow to be set from constructor flags = (fl & hasphysical); if (bigEnd) flags |= big_endian; flags |= (heritaged | does_deadcode); // Always on unless explicitly turned off in derived constructor calcScaleMask(); } /// This is a partial constructor, for initializing a space /// via XML /// \param m the associated address space manager /// \param t is the processor translator /// \param tp the basic type of the space AddrSpace::AddrSpace(AddrSpaceManager *m,const Translate *t,spacetype tp) { refcount = 0; manage = m; trans = t; type = tp; flags = (heritaged | does_deadcode); // Always on unless explicitly turned off in derived constructor wordsize = 1; minimumPointerSize = 0; shortcut = ' '; // We let big_endian get set by attribute } /// The logical form of the space is truncated from its actual size /// Pointers may refer to this original size put the most significant bytes are ignored /// \param newsize is the size (in bytes) of the truncated (logical) space void AddrSpace::truncateSpace(uint4 newsize) { setFlags(truncated); addressSize = newsize; minimumPointerSize = newsize; calcScaleMask(); } /// \brief Determine if a given point is contained in an address range in \b this address space /// /// The point is specified as an address space and offset pair plus an additional number of bytes to "skip". /// A non-negative value is returned if the point falls in the address range. /// If the point falls on the first byte of the range, 0 is returned. For the second byte, 1 is returned, etc. /// Otherwise -1 is returned. /// \param offset is the starting offset of the address range within \b this space /// \param size is the size of the address range in bytes /// \param pointSpace is the address space of the given point /// \param pointOff is the offset of the given point /// \param pointSkip is the additional bytes to skip /// \return a non-negative value indicating where the point falls in the range, or -1 int4 AddrSpace::overlapJoin(uintb offset,int4 size,AddrSpace *pointSpace,uintb pointOff,int4 pointSkip) const { if (this != pointSpace) return -1; uintb dist = wrapOffset(pointOff+pointSkip-offset); if (dist >= size) return -1; // but must fall before op+size return (int4) dist; } /// Write the main attributes for an address within \b this space. /// The caller provides only the \e offset, and this routine fills /// in other details pertaining to this particular space. /// \param encoder is the stream encoder /// \param offset is the offset of the address void AddrSpace::encodeAttributes(Encoder &encoder,uintb offset) const { encoder.writeSpace(ATTRIB_SPACE,this); encoder.writeUnsignedInteger(ATTRIB_OFFSET, offset); } /// Write the main attributes of an address with \b this space /// and a size. The caller provides the \e offset and \e size, /// and other details about this particular space are filled in. /// \param encoder is the stream encoder /// \param offset is the offset of the address /// \param size is the size of the memory location void AddrSpace::encodeAttributes(Encoder &encoder,uintb offset,int4 size) const { encoder.writeSpace(ATTRIB_SPACE, this); encoder.writeUnsignedInteger(ATTRIB_OFFSET, offset); encoder.writeSignedInteger(ATTRIB_SIZE, size); } /// For an open element describing an address in \b this space, this routine /// recovers the offset and possibly the size described by the element /// \param decoder is the stream decoder /// \param size is a reference where the recovered size should be stored /// \return the recovered offset uintb AddrSpace::decodeAttributes(Decoder &decoder,uint4 &size) const { uintb offset; bool foundoffset = false; for(;;) { uint4 attribId = decoder.getNextAttributeId(); if (attribId == 0) break; if (attribId == ATTRIB_OFFSET) { foundoffset = true; offset = decoder.readUnsignedInteger(); } else if (attribId == ATTRIB_SIZE) { size = decoder.readSignedInteger(); } } if (!foundoffset) throw LowlevelError("Address is missing offset"); return offset; } /// Print the \e offset as hexidecimal digits. /// \param s is the stream to write to /// \param offset is the offset to be printed void AddrSpace::printOffset(ostream &s,uintb offset) const { s << "0x" << hex << offset; } /// This is a printing method for the debugging routines. It /// prints taking into account the \e wordsize, adding a /// "+n" if the offset is not on-cut with wordsize. It also /// returns the expected/typical size of values from this space. /// \param s is the stream being written /// \param offset is the offset to be printed void AddrSpace::printRaw(ostream &s,uintb offset) const { int4 sz = getAddrSize(); if (sz > 4) { if ((offset>>32) == 0) sz = 4; // Don't print a bunch of zeroes at front of address else if ((offset>>48) == 0) sz = 6; } s << "0x" << setfill('0') << setw(2*sz) << hex << byteToAddress(offset,wordsize); if (wordsize>1) { int4 cut = offset % wordsize; if (cut != 0) s << '+' << dec << cut; } } static int4 get_offset_size(const char *ptr,uintb &offset) { // Get optional size and offset fields from string int4 size; uint4 val; char *ptr2; val = 0; // Defaults size = -1; if (*ptr == ':') { size = strtoul(ptr+1,&ptr2,0); if (*ptr2 == '+') val = strtoul(ptr2+1,&ptr2,0); } if (*ptr == '+') val = strtoul(ptr+1,&ptr2,0); offset += val; // Adjust offset return size; } /// For the console mode, an address space can tailor how it /// converts user strings into offsets within the space. The /// base routine can read and convert register names as well /// as absolute hex addresses. A size can be indicated by /// appending a ':' and integer, .i.e. 0x1000:2. Offsets within /// a register can be indicated by appending a '+' and integer, /// i.e. eax+2 /// \param s is the string to be parsed /// \param size is a reference to the size being returned /// \return the parsed offset uintb AddrSpace::read(const string &s,int4 &size) const { const char *enddata; char *tmpdata; int4 expsize; string::size_type append; string frontpart; uintb offset; append = s.find_first_of(":+"); try { if (append == string::npos) { const VarnodeData &point(trans->getRegister(s)); offset = point.offset; size = point.size; } else { frontpart = s.substr(0,append); const VarnodeData &point(trans->getRegister(frontpart)); offset = point.offset; size = point.size; } } catch(LowlevelError &err) { // Name doesn't exist offset = strtoul(s.c_str(),&tmpdata,0); offset = addressToByte(offset,wordsize); enddata = (const char *) tmpdata; if (enddata - s.c_str() == s.size()) { // If no size or offset override size = manage->getDefaultSize(); // Return "natural" size return offset; } size = manage->getDefaultSize(); } if (append != string::npos) { enddata = s.c_str()+append; expsize = get_offset_size( enddata, offset ); if (expsize!=-1) { size = expsize; return offset; } } return offset; } /// Walk attributes of the current element and recover all the properties defining /// this space. The processor translator, \e trans, and the /// \e type must already be filled in. /// \param decoder is the stream decoder void AddrSpace::decodeBasicAttributes(Decoder &decoder) { deadcodedelay = -1; for (;;) { uint4 attribId = decoder.getNextAttributeId(); if (attribId == 0) break; if (attribId == ATTRIB_NAME) { name = decoder.readString(); } if (attribId == ATTRIB_INDEX) index = decoder.readSignedInteger(); else if (attribId == ATTRIB_SIZE) addressSize = decoder.readSignedInteger(); else if (attribId == ATTRIB_WORDSIZE) wordsize = decoder.readUnsignedInteger(); else if (attribId == ATTRIB_BIGENDIAN) { if (decoder.readBool()) flags |= big_endian; } else if (attribId == ATTRIB_DELAY) delay = decoder.readSignedInteger(); else if (attribId == ATTRIB_DEADCODEDELAY) deadcodedelay = decoder.readSignedInteger(); else if (attribId == ATTRIB_PHYSICAL) { if (decoder.readBool()) flags |= hasphysical; } } if (deadcodedelay == -1) deadcodedelay = delay; // If deadcodedelay attribute not present, set it to delay calcScaleMask(); } void AddrSpace::decode(Decoder &decoder) { uint4 elemId = decoder.openElement(); // Multiple tags: , , decodeBasicAttributes(decoder); decoder.closeElement(elemId); } const string ConstantSpace::NAME = "const"; const int4 ConstantSpace::INDEX = 0; /// This constructs the unique constant space /// By convention, the name is always "const" and the index /// is always 0. /// \param m is the associated address space manager /// \param t is the associated processor translator ConstantSpace::ConstantSpace(AddrSpaceManager *m,const Translate *t) : AddrSpace(m,t,IPTR_CONSTANT,NAME,false,sizeof(uintb),1,INDEX,0,0,0) { clearFlags(heritaged|does_deadcode|big_endian); if (HOST_ENDIAN==1) // Endianness always matches host setFlags(big_endian); } int4 ConstantSpace::overlapJoin(uintb offset,int4 size,AddrSpace *pointSpace,uintb pointOff,int4 pointSkip) const { return -1; } /// Constants are always printed as hexidecimal values in /// the debugger and console dumps void ConstantSpace::printRaw(ostream &s,uintb offset) const { s << "0x" << hex << offset; } /// As the ConstantSpace is never saved, it should never get /// decoded either. void ConstantSpace::decode(Decoder &decoder) { throw LowlevelError("Should never decode the constant space"); } const string OtherSpace::NAME = "OTHER"; const int4 OtherSpace::INDEX = 1; /// Construct the \b other space, which is automatically constructed /// by the compiler, and is only constructed once. The name should /// always by \b OTHER. /// \param m is the associated address space manager /// \param t is the associated processor translator /// \param ind is the integer identifier OtherSpace::OtherSpace(AddrSpaceManager *m,const Translate *t,int4 ind) : AddrSpace(m,t,IPTR_PROCESSOR,NAME,false,sizeof(uintb),1,INDEX,0,0,0) { clearFlags(heritaged|does_deadcode); setFlags(is_otherspace); } OtherSpace::OtherSpace(AddrSpaceManager *m,const Translate *t) : AddrSpace(m,t,IPTR_PROCESSOR) { clearFlags(heritaged|does_deadcode); setFlags(is_otherspace); } void OtherSpace::printRaw(ostream &s,uintb offset) const { s << "0x" << hex << offset; } const string UniqueSpace::NAME = "unique"; const uint4 UniqueSpace::SIZE = 4; /// This is the constructor for the \b unique space, which is /// automatically constructed by the analysis engine, and /// constructed only once. The name should always be \b unique. /// \param m is the associated address space manager /// \param t is the associated processor translator /// \param ind is the integer identifier /// \param fl are attribute flags (currently unused) UniqueSpace::UniqueSpace(AddrSpaceManager *m,const Translate *t,int4 ind,uint4 fl) : AddrSpace(m,t,IPTR_INTERNAL,NAME,t->isBigEndian(),SIZE,1,ind,fl,0,0) { setFlags(hasphysical); } UniqueSpace::UniqueSpace(AddrSpaceManager *m,const Translate *t) : AddrSpace(m,t,IPTR_INTERNAL) { setFlags(hasphysical); } const string JoinSpace::NAME = "join"; /// This is the constructor for the \b join space, which is automatically constructed by the /// analysis engine, and constructed only once. The name should always be \b join. /// \param m is the associated address space manager /// \param t is the associated processor translator /// \param ind is the integer identifier JoinSpace::JoinSpace(AddrSpaceManager *m,const Translate *t,int4 ind) : AddrSpace(m,t,IPTR_JOIN,NAME,t->isBigEndian(),sizeof(uintm),1,ind,0,0,0) { // This is a virtual space // setFlags(hasphysical); clearFlags(heritaged); // This space is never heritaged, but does dead-code analysis } int4 JoinSpace::overlapJoin(uintb offset,int4 size,AddrSpace *pointSpace,uintb pointOffset,int4 pointSkip) const { if (this == pointSpace) { // If the point is in the join space, translate the point into the piece address space JoinRecord *pieceRecord = getManager()->findJoin(pointOffset); int4 pos; Address addr = pieceRecord->getEquivalentAddress(pointOffset + pointSkip, pos); pointSpace = addr.getSpace(); pointOffset = addr.getOffset(); } else { if (pointSpace->getType() == IPTR_CONSTANT) return -1; pointOffset = pointSpace->wrapOffset(pointOffset + pointSkip); } JoinRecord *joinRecord = getManager()->findJoin(offset); // Set up so we traverse pieces in data order int4 startPiece,endPiece,dir; if (isBigEndian()) { startPiece = 0; endPiece = joinRecord->numPieces(); dir = 1; } else { startPiece = joinRecord->numPieces() - 1; endPiece = -1; dir = -1; } int4 bytesAccum = 0; for(int4 i=startPiece;i!=endPiece;i += dir) { const VarnodeData &vData(joinRecord->getPiece(i)); if (vData.space == pointSpace && pointOffset >= vData.offset && pointOffset <= vData.offset + (vData.size-1)) { int4 res = (int4)(pointOffset - vData.offset) + bytesAccum; if (res >= size) return -1; return res; } bytesAccum += vData.size; } return -1; } /// Encode a \e join address to the stream. This method in the interface only /// outputs attributes for a single element, so we are forced to encode what should probably /// be recursive elements into an attribute. /// \param encoder is the stream encoder /// \param offset is the offset within the address space to encode void JoinSpace::encodeAttributes(Encoder &encoder,uintb offset) const { JoinRecord *rec = getManager()->findJoin(offset); // Record must already exist encoder.writeSpace(ATTRIB_SPACE, this); int4 num = rec->numPieces(); if (num > MAX_PIECES) throw LowlevelError("Exceeded maximum pieces in one join address"); for(int4 i=0;igetPiece(i) ); ostringstream t; t << vdata.space->getName() << ":0x"; t << hex << vdata.offset << ':' << dec << vdata.size; encoder.writeStringIndexed(ATTRIB_PIECE, i, t.str()); } if (num == 1) encoder.writeUnsignedInteger(ATTRIB_LOGICALSIZE, rec->getUnified().size); } /// Encode a \e join address to the stream. This method in the interface only /// outputs attributes for a single element, so we are forced to encode what should probably /// be recursive elements into an attribute. /// \param encoder is the stream encoder /// \param offset is the offset within the address space to encode /// \param size is the size of the memory location being encoded void JoinSpace::encodeAttributes(Encoder &encoder,uintb offset,int4 size) const { encodeAttributes(encoder,offset); // Ignore size } /// Parse the current element as a join address. Pieces of the join are encoded as a sequence /// of ATTRIB_PIECE attributes. "piece1" corresponds to the most significant piece. The /// Translate::findAddJoin method is used to construct a logical address within the join space. /// \param decoder is the stream decoder /// \param size is a reference to be filled in as the size encoded by the tag /// \return the offset of the final address encoded by the tag uintb JoinSpace::decodeAttributes(Decoder &decoder,uint4 &size) const { vector pieces; uint4 logicalsize = 0; for(;;) { uint4 attribId = decoder.getNextAttributeId(); if (attribId == 0) break; if (attribId == ATTRIB_LOGICALSIZE) { logicalsize = decoder.readUnsignedInteger(); continue; } else if (attribId == ATTRIB_UNKNOWN) attribId = decoder.getIndexedAttributeId(ATTRIB_PIECE); if (attribId < ATTRIB_PIECE.getId()) continue; int4 pos = (int4)(attribId - ATTRIB_PIECE.getId()); if (pos > MAX_PIECES) continue; while(pieces.size() <= pos) pieces.emplace_back(); VarnodeData &vdat( pieces[pos] ); string attrVal = decoder.readString(); string::size_type offpos = attrVal.find(':'); if (offpos == string::npos) { const Translate *tr = getTrans(); const VarnodeData &point(tr->getRegister(attrVal)); vdat = point; } else { string::size_type szpos = attrVal.find(':',offpos+1); if (szpos==string::npos) throw LowlevelError("join address piece attribute is malformed"); string spcname = attrVal.substr(0,offpos); vdat.space = getManager()->getSpaceByName(spcname); istringstream s1(attrVal.substr(offpos+1,szpos)); s1.unsetf(ios::dec | ios::hex | ios::oct); s1 >> vdat.offset; istringstream s2(attrVal.substr(szpos+1)); s2.unsetf(ios::dec | ios::hex | ios::oct); s2 >> vdat.size; } } JoinRecord *rec = getManager()->findAddJoin(pieces,logicalsize); size = rec->getUnified().size; return rec->getUnified().offset; } void JoinSpace::printRaw(ostream &s,uintb offset) const { JoinRecord *rec = getManager()->findJoin(offset); int4 szsum = 0; int4 num = rec->numPieces(); s << '{'; for(int4 i=0;igetPiece(i) ); szsum += vdat.size; if (i!=0) s << ','; vdat.space->printRaw(s,vdat.offset); } if (num == 1) { szsum = rec->getUnified().size; s << ':' << szsum; } s << '}'; } uintb JoinSpace::read(const string &s,int4 &size) const { vector pieces; int4 szsum = 0; int4 i=0; while(i < s.size()) { pieces.emplace_back(); // Prepare to read next VarnodeData string token; while((igetRegister(token); } catch(LowlevelError &err) { // Name doesn't exist char tryShortcut = token[0]; AddrSpace *spc = getManager()->getSpaceByShortcut(tryShortcut); if (spc == (AddrSpace *)0) throw LowlevelError("Could not parse join string"); int4 subsize; pieces.back().space = spc; pieces.back().offset = spc->read(token.substr(1),subsize); pieces.back().size = subsize; } szsum += pieces.back().size; } JoinRecord *rec = getManager()->findAddJoin(pieces,0); size = szsum; return rec->getUnified().offset; } void JoinSpace::decode(Decoder &decoder) { throw LowlevelError("Should never decode join space"); } /// \param m is the address space manager /// \param t is the processor translator OverlaySpace::OverlaySpace(AddrSpaceManager *m,const Translate *t) : AddrSpace(m,t,IPTR_PROCESSOR) { baseSpace = (AddrSpace *)0; setFlags(overlay); } void OverlaySpace::decode(Decoder &decoder) { uint4 elemId = decoder.openElement(ELEM_SPACE_OVERLAY); name = decoder.readString(ATTRIB_NAME); index = decoder.readSignedInteger(ATTRIB_INDEX); baseSpace = decoder.readSpace(ATTRIB_BASE); decoder.closeElement(elemId); addressSize = baseSpace->getAddrSize(); wordsize = baseSpace->getWordSize(); delay = baseSpace->getDelay(); deadcodedelay = baseSpace->getDeadcodeDelay(); calcScaleMask(); if (baseSpace->isBigEndian()) setFlags(big_endian); if (baseSpace->hasPhysical()) setFlags(hasphysical); } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/space.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file space.hh /// \brief Classes for describing address spaces #ifndef __SPACE_HH__ #define __SPACE_HH__ #include "error.hh" #include "marshal.hh" namespace ghidra { /// \brief Fundemental address space types /// /// Every address space must be one of the following core types enum spacetype { IPTR_CONSTANT = 0, ///< Special space to represent constants IPTR_PROCESSOR = 1, ///< Normal spaces modelled by processor IPTR_SPACEBASE = 2, ///< addresses = offsets off of base register IPTR_INTERNAL = 3, ///< Internally managed temporary space IPTR_FSPEC = 4, ///< Special internal FuncCallSpecs reference IPTR_IOP = 5, ///< Special internal PcodeOp reference IPTR_JOIN = 6 ///< Special virtual space to represent split variables }; class AddrSpace; class AddrSpaceManager; struct VarnodeData; class Translate; extern AttributeId ATTRIB_BASE; ///< Marshaling attribute "base" extern AttributeId ATTRIB_DEADCODEDELAY; ///< Marshaling attribute "deadcodedelay" extern AttributeId ATTRIB_DELAY; ///< Marshaling attribute "delay" extern AttributeId ATTRIB_LOGICALSIZE; ///< Marshaling attribute "logicalsize" extern AttributeId ATTRIB_PHYSICAL; ///< Marshaling attribute "physical" extern AttributeId ATTRIB_PIECE; ///< Marshaling attribute "piece" /// \brief A region where processor data is stored /// /// An AddrSpace (Address Space) is an arbitrary sequence of /// bytes where a processor can store data. As is usual with /// most processors' concept of RAM, an integer offset /// paired with an AddrSpace forms the address (See Address) /// of a byte. The \e size of an AddrSpace indicates the number /// of bytes that can be separately addressed and is usually /// described by the number of bytes needed to encode the biggest /// offset. I.e. a \e 4-byte address space means that there are /// offsets ranging from 0x00000000 to 0xffffffff within the space /// for a total of 2^32 addressable bytes within the space. /// There can be multiple address spaces, and it is typical to have spaces /// - \b ram Modeling the main processor address bus /// - \b register Modeling a processors registers /// /// The processor specification can set up any address spaces it /// needs in an arbitrary manner, but \e all data manipulated by /// the processor, which the specification hopes to model, must /// be contained in some address space, including RAM, ROM, /// general registers, special registers, i/o ports, etc. /// /// The analysis engine also uses additional address spaces to /// model special concepts. These include /// - \b const There is a \e constant address space for /// modeling constant values in p-code expressions /// (See ConstantSpace) /// - \b unique There is always a \e unique address space used /// as a pool for temporary registers. (See UniqueSpace) /// class AddrSpace { friend class AddrSpaceManager; // Space container public: enum { big_endian = 1, ///< Space is big endian if set, little endian otherwise heritaged = 2, ///< This space is heritaged does_deadcode = 4, ///< Dead-code analysis is done on this space programspecific = 8, ///< Space is specific to a particular loadimage reverse_justification = 16, ///< Justification within aligned word is opposite of endianness formal_stackspace = 0x20, ///< Space attached to the formal \b stack \b pointer overlay = 0x40, ///< This space is an overlay of another space overlaybase = 0x80, ///< This is the base space for overlay space(s) truncated = 0x100, ///< Space is truncated from its original size, expect pointers larger than this size hasphysical = 0x200, ///< Has physical memory associated with it is_otherspace = 0x400, ///< Quick check for the OtherSpace derived class has_nearpointers = 0x800 ///< Does there exist near pointers into this space }; private: spacetype type; ///< Type of space (PROCESSOR, CONSTANT, INTERNAL, ...) AddrSpaceManager *manage; ///< Manager for processor using this space const Translate *trans; ///< Processor translator (for register names etc) for this space int4 refcount; ///< Number of managers using this space uint4 flags; ///< Attributes of the space uintb highest; ///< Highest (byte) offset into this space uintb pointerLowerBound; ///< Offset below which we don't search for pointers uintb pointerUpperBound; ///< Offset above which we don't search for pointers char shortcut; ///< Shortcut character for printing protected: string name; ///< Name of this space uint4 addressSize; ///< Size of an address into this space in bytes uint4 wordsize; ///< Size of unit being addressed (1=byte) int4 minimumPointerSize; ///< Smallest size of a pointer into \b this space (in bytes) int4 index; ///< An integer identifier for the space int4 delay; ///< Delay in heritaging this space int4 deadcodedelay; ///< Delay before deadcode removal is allowed on this space void calcScaleMask(void); ///< Calculate scale and mask void setFlags(uint4 fl); ///< Set a cached attribute void clearFlags(uint4 fl); ///< Clear a cached attribute void decodeBasicAttributes(Decoder &decoder); ///< Read attributes for \b this space from an open XML element void truncateSpace(uint4 newsize); public: AddrSpace(AddrSpaceManager *m,const Translate *t,spacetype tp,const string &nm,bool bigEnd, uint4 size,uint4 ws,int4 ind,uint4 fl,int4 dl,int4 dead); AddrSpace(AddrSpaceManager *m,const Translate *t,spacetype tp); ///< For use with decode virtual ~AddrSpace(void) {} ///< The address space destructor const string &getName(void) const; ///< Get the name AddrSpaceManager *getManager(void) const; ///< Get the space manager const Translate *getTrans(void) const; ///< Get the processor translator spacetype getType(void) const; ///< Get the type of space int4 getDelay(void) const; ///< Get number of heritage passes being delayed int4 getDeadcodeDelay(void) const; ///< Get number of passes before deadcode removal is allowed int4 getIndex(void) const; ///< Get the integer identifier uint4 getWordSize(void) const; ///< Get the addressable unit size uint4 getAddrSize(void) const; ///< Get the size of the space uintb getHighest(void) const; ///< Get the highest byte-scaled address uintb getPointerLowerBound(void) const; ///< Get lower bound for assuming an offset is a pointer uintb getPointerUpperBound(void) const; ///< Get upper bound for assuming an offset is a pointer int4 getMinimumPtrSize(void) const; ///< Get the minimum pointer size for \b this space uintb wrapOffset(uintb off) const; ///< Wrap -off- to the offset that fits into this space char getShortcut(void) const; ///< Get the shortcut character bool isHeritaged(void) const; ///< Return \b true if dataflow has been traced bool doesDeadcode(void) const; ///< Return \b true if dead code analysis should be done on this space bool hasPhysical(void) const; ///< Return \b true if data is physically stored in this bool isBigEndian(void) const; ///< Return \b true if values in this space are big endian bool isReverseJustified(void) const; ///< Return \b true if alignment justification does not match endianness bool isFormalStackSpace(void) const; ///< Return \b true if \b this is attached to the formal \b stack \b pointer bool isOverlay(void) const; ///< Return \b true if this is an overlay space bool isOverlayBase(void) const; ///< Return \b true if other spaces overlay this space bool isOtherSpace(void) const; ///< Return \b true if \b this is the \e other address space bool isTruncated(void) const; ///< Return \b true if this space is truncated from its original size bool hasNearPointers(void) const; ///< Return \b true if \e near (truncated) pointers into \b this space are possible void printOffset(ostream &s,uintb offset) const; ///< Write an address offset to a stream virtual int4 numSpacebase(void) const; ///< Number of base registers associated with this space virtual const VarnodeData &getSpacebase(int4 i) const; ///< Get a base register that creates this virtual space virtual const VarnodeData &getSpacebaseFull(int4 i) const; ///< Return original spacebase register before truncation virtual bool stackGrowsNegative(void) const; ///< Return \b true if a stack in this space grows negative virtual AddrSpace *getContain(void) const; ///< Return this space's containing space (if any) virtual int4 overlapJoin(uintb offset,int4 size,AddrSpace *pointSpace,uintb pointOff,int4 pointSkip) const; virtual void encodeAttributes(Encoder &encoder,uintb offset) const; ///< Encode address attributes to a stream virtual void encodeAttributes(Encoder &encoder,uintb offset,int4 size) const; ///< Encode an address and size attributes to a stream virtual uintb decodeAttributes(Decoder &decoder,uint4 &size) const; ///< Recover an offset and size virtual void printRaw(ostream &s,uintb offset) const; ///< Write an address in this space to a stream virtual uintb read(const string &s,int4 &size) const; ///< Read in an address (and possible size) from a string virtual void decode(Decoder &decoder); ///< Recover the details of this space from a stream static uintb addressToByte(uintb val,uint4 ws); ///< Scale from addressable units to byte units static uintb byteToAddress(uintb val,uint4 ws); ///< Scale from byte units to addressable units static int8 addressToByteInt(int8 val,uint4 ws); ///< Scale int4 from addressable units to byte units static int8 byteToAddressInt(int8 val,uint4 ws); ///< Scale int4 from byte units to addressable units static bool compareByIndex(const AddrSpace *a,const AddrSpace *b); ///< Compare two spaces by their index }; /// \brief Special AddrSpace for representing constants during analysis. /// /// The underlying RTL (See PcodeOp) represents all data in terms of /// an Address, which is made up of an AddrSpace and offset pair. /// In order to represent constants in the semantics of the RTL, /// there is a special \e constant address space. An \e offset /// within the address space encodes the actual constant represented /// by the pair. I.e. the pair (\b const,4) represents the constant /// \b 4 within the RTL. The \e size of the ConstantSpace has /// no meaning, as we always want to be able to represent an arbitrarily /// large constant. In practice, the size of a constant is limited /// by the offset field of an Address. class ConstantSpace : public AddrSpace { public: ConstantSpace(AddrSpaceManager *m,const Translate *t); ///< Only constructor virtual int4 overlapJoin(uintb offset,int4 size,AddrSpace *pointSpace,uintb pointOff,int4 pointSkip) const; virtual void printRaw(ostream &s,uintb offset) const; virtual void decode(Decoder &decoder); static const string NAME; ///< Reserved name for the address space static const int4 INDEX; ///< Reserved index for constant space }; /// \brief Special AddrSpace for special/user-defined address spaces class OtherSpace : public AddrSpace { public: OtherSpace(AddrSpaceManager *m, const Translate *t, int4 ind); ///< Constructor OtherSpace(AddrSpaceManager *m, const Translate *t); ///< For use with decode virtual void printRaw(ostream &s, uintb offset) const; static const string NAME; ///< Reserved name for the address space static const int4 INDEX; ///< Reserved index for the other space }; /// \brief The pool of temporary storage registers /// /// It is convenient both for modelling processor instructions /// in an RTL and for later transforming of the RTL to have a pool /// of temporary registers that can hold data but that aren't a /// formal part of the state of the processor. The UniqueSpace /// provides a specific location for this pool. The analysis /// engine always creates exactly one of these spaces named /// \b unique. class UniqueSpace : public AddrSpace { public: UniqueSpace(AddrSpaceManager *m,const Translate *t,int4 ind,uint4 fl); ///< Constructor UniqueSpace(AddrSpaceManager *m,const Translate *t); ///< For use with decode static const string NAME; ///< Reserved name for the unique space static const uint4 SIZE; ///< Fixed size (in bytes) for unique space offsets }; /// \brief The pool of logically joined variables /// /// Some logical variables are split across non-contiguous regions of memory. This space /// creates a virtual place for these logical variables to exist. Any memory location within this /// space is backed by 2 or more memory locations in other spaces that physically hold the pieces /// of the logical value. The database controlling symbols is responsible for keeping track of /// mapping the logical address in this space to its physical pieces. Offsets into this space do not /// have an absolute meaning, the database may vary what offset is assigned to what set of pieces. class JoinSpace : public AddrSpace { static const int4 MAX_PIECES = 64; ///< Maximum number of pieces that can be marshaled in one \e join address public: JoinSpace(AddrSpaceManager *m,const Translate *t,int4 ind); virtual int4 overlapJoin(uintb offset,int4 size,AddrSpace *pointSpace,uintb pointOff,int4 pointSkip) const; virtual void encodeAttributes(Encoder &encoder,uintb offset) const; virtual void encodeAttributes(Encoder &encoder,uintb offset,int4 size) const; virtual uintb decodeAttributes(Decoder &decoder,uint4 &size) const; virtual void printRaw(ostream &s,uintb offset) const; virtual uintb read(const string &s,int4 &size) const; virtual void decode(Decoder &decoder); static const string NAME; ///< Reserved name for the join space }; /// \brief An overlay space. /// /// A different code and data layout that occupies the same memory as another address space. /// Some compilers use this concept to increase the logical size of a program without increasing /// its physical memory requirements. An overlay space allows the same physical location to contain /// different code and be labeled with different symbols, depending on context. /// From the point of view of reverse engineering, the different code and symbols are viewed /// as a logically distinct space. class OverlaySpace : public AddrSpace { AddrSpace *baseSpace; ///< Space being overlayed public: OverlaySpace(AddrSpaceManager *m,const Translate *t); ///< Constructor virtual AddrSpace *getContain(void) const { return baseSpace; } virtual void decode(Decoder &decoder); }; /// An internal method for derived classes to set space attributes /// \param fl is the set of attributes to be set inline void AddrSpace::setFlags(uint4 fl) { flags |= fl; } /// An internal method for derived classes to clear space attibutes /// \param fl is the set of attributes to clear inline void AddrSpace::clearFlags(uint4 fl) { flags &= ~fl; } /// Every address space has a (unique) name, which is referred /// to especially in configuration files via XML. /// \return the name of this space inline const string &AddrSpace::getName(void) const { return name; } /// Every address space is associated with a manager of (all possible) spaces. /// This method recovers the address space manager object. /// \return a pointer to the address space manager inline AddrSpaceManager *AddrSpace::getManager(void) const { return manage; } /// Every address space is associated with a processor which may have additional objects /// like registers etc. associated with it. This method returns a pointer to that processor /// translator /// \return a pointer to the Translate object inline const Translate *AddrSpace::getTrans(void) const { return trans; } /// /// Return the defining type for this address space. /// - IPTR_CONSTANT for the constant space /// - IPTR_PROCESSOR for a normal space /// - IPTR_INTERNAL for the temporary register space /// - IPTR_FSPEC for special FuncCallSpecs references /// - IPTR_IOP for special PcodeOp references /// \return the basic type of this space inline spacetype AddrSpace::getType(void) const { return type; } /// If the heritage algorithms need to trace dataflow /// within this space, the algorithms can delay tracing this /// space in order to let indirect references into the space /// resolve themselves. This method indicates the number of /// rounds of dataflow analysis that should be skipped for this /// space to let this resolution happen /// \return the number of rounds to skip heritage inline int4 AddrSpace::getDelay(void) const { return delay; } /// The point at which deadcode removal is performed on varnodes within /// a space can be set to skip some number of heritage passes, in case /// not all the varnodes are created within a single pass. This method /// gives the number of rounds that should be skipped before deadcode /// elimination begins /// \return the number of rounds to skip deadcode removal inline int4 AddrSpace::getDeadcodeDelay(void) const { return deadcodedelay; } /// Each address space has an associated index that can be used /// as an integer encoding of the space. /// \return the unique index inline int4 AddrSpace::getIndex(void) const { return index; } /// This method indicates the number of bytes contained in an /// \e addressable \e unit of this space. This is almost always /// 1, but can be any other small integer. /// \return the number of bytes in a unit inline uint4 AddrSpace::getWordSize(void) const { return wordsize; } /// Return the number of bytes needed to represent an offset /// into this space. A space with 2^32 bytes has an address /// size of 4, for instance. /// \return the size of an address inline uint4 AddrSpace::getAddrSize(void) const { return addressSize; } /// Get the highest (byte) offset possible for this space /// \return the offset inline uintb AddrSpace::getHighest(void) const { return highest; } /// Constant offsets are tested against \b this lower bound as a quick filter before /// attempting to lookup symbols. /// \return the minimum offset that will be inferred as a pointer inline uintb AddrSpace::getPointerLowerBound(void) const { return pointerLowerBound; } /// Constant offsets are tested against \b this upper bound as a quick filter before /// attempting to lookup symbols. /// \return the maximum offset that will be inferred as a pointer inline uintb AddrSpace::getPointerUpperBound(void) const { return pointerUpperBound; } /// A value of 0 means the size must match exactly. If the space is truncated, or /// if there exists near pointers, this value may be non-zero. inline int4 AddrSpace::getMinimumPtrSize(void) const { return minimumPointerSize; } /// Calculate \e off modulo the size of this address space in /// order to construct the offset "equivalent" to \e off that /// fits properly into this space /// \param off is the offset requested /// \return the wrapped offset inline uintb AddrSpace::wrapOffset(uintb off) const { if (off <= highest) // Comparison is unsigned return off; intb mod = (intb)(highest+1); intb res = (intb)off % mod; // remainder is signed if (res<0) // Remainder may be negative res += mod; // Adding mod guarantees res is in (0,mod) return (uintb)res; } /// Return a unique short cut character that is associated /// with this space. The shortcut character can be used by /// the read method to quickly specify the space of an address. /// \return the shortcut character inline char AddrSpace::getShortcut(void) const { return shortcut; } /// During analysis, memory locations in most spaces need to /// have their data-flow traced. This method returns \b true /// for these spaces. For some of the special spaces, like /// the \e constant space, tracing data flow makes no sense, /// and this routine will return \b false. /// \return \b true if this space's data-flow is analyzed inline bool AddrSpace::isHeritaged(void) const { return ((flags & heritaged)!=0); } /// Most memory locations should have dead-code analysis performed, /// and this routine will return \b true. /// For certain special spaces like the \e constant space, dead-code /// analysis doesn't make sense, and this routine returns \b false. inline bool AddrSpace::doesDeadcode(void) const { return ((flags & does_deadcode)!=0); } /// This routine returns \b true, if, like most spaces, the space /// has actual read/writeable bytes associated with it. /// Some spaces, like the \e constant space, do not. /// \return \b true if the space has physical data in it. inline bool AddrSpace::hasPhysical(void) const { return ((flags & hasphysical) !=0); } /// If integer values stored in this space are encoded in this /// space using the big endian format, then return \b true. /// \return \b true if the space is big endian inline bool AddrSpace::isBigEndian(void) const { return ((flags&big_endian)!=0); } /// Certain architectures or compilers specify an alignment for accessing words within the space /// The space required for a variable must be rounded up to the alignment. For variables smaller /// than the alignment, there is the issue of how the variable is "justified" within the aligned /// word. Usually the justification depends on the endianness of the space, for certain weird /// cases the justification may be the opposite of the endianness. inline bool AddrSpace::isReverseJustified(void) const { return ((flags&reverse_justification)!=0); } /// Currently an architecture can declare only one formal stack pointer. inline bool AddrSpace::isFormalStackSpace(void) const { return ((flags&formal_stackspace)!=0); } inline bool AddrSpace::isOverlay(void) const { return ((flags&overlay)!=0); } inline bool AddrSpace::isOverlayBase(void) const { return ((flags&overlaybase)!=0); } inline bool AddrSpace::isOtherSpace(void) const { return ((flags&is_otherspace)!=0); } /// If this method returns \b true, the logical form of this space is truncated from its actual size /// Pointers may refer to this original size put the most significant bytes are ignored inline bool AddrSpace::isTruncated(void) const { return ((flags&truncated)!=0); } inline bool AddrSpace::hasNearPointers(void) const { return ((flags&has_nearpointers)!=0); } /// Some spaces are "virtual", like the stack spaces, where addresses are really relative to a /// base pointer stored in a register, like the stackpointer. This routine will return non-zero /// if \b this space is virtual and there is 1 (or more) associated pointer registers /// \return the number of base registers associated with this space inline int4 AddrSpace::numSpacebase(void) const { return 0; } /// For virtual spaces, like the stack space, this routine returns the location information for /// a base register of the space. This routine will throw an exception if the register does not exist /// \param i is the index of the base register starting at /// \return the VarnodeData that describes the register inline const VarnodeData &AddrSpace::getSpacebase(int4 i) const { throw LowlevelError(name+" space is not virtual and has no associated base register"); } /// If a stack pointer is truncated to fit the stack space, we may need to know the /// extent of the original register /// \param i is the index of the base register /// \return the original register before truncation inline const VarnodeData &AddrSpace::getSpacebaseFull(int4 i) const { throw LowlevelError(name+" has no truncated registers"); } /// For stack (or other spacebase) spaces, this routine returns \b true if the space can viewed as a stack /// and a \b push operation causes the spacebase pointer to be decreased (grow negative) /// \return \b true if stacks grow in negative direction. inline bool AddrSpace::stackGrowsNegative(void) const { return true; } /// If this space is virtual, then /// this routine returns the containing address space, otherwise /// it returns NULL. /// \return a pointer to the containing space or NULL inline AddrSpace *AddrSpace::getContain(void) const { return (AddrSpace *)0; } /// Given an offset into an address space based on the addressable unit size (wordsize), /// convert it into a byte relative offset /// \param val is the offset to convert /// \param ws is the number of bytes in the addressable word /// \return the scaled offset inline uintb AddrSpace::addressToByte(uintb val,uint4 ws) { return val*ws; } /// Given an offset in an address space based on bytes, convert it /// into an offset relative to the addressable unit of the space (wordsize) /// \param val is the offset to convert /// \param ws is the number of bytes in the addressable word /// \return the scaled offset inline uintb AddrSpace::byteToAddress(uintb val,uint4 ws) { return val/ws; } /// Given an int8 offset into an address space based on the addressable unit size (wordsize), /// convert it into a byte relative offset /// \param val is the offset to convert /// \param ws is the number of bytes in the addressable word /// \return the scaled offset inline int8 AddrSpace::addressToByteInt(int8 val,uint4 ws) { return val*ws; } /// Given an int8 offset in an address space based on bytes, convert it /// into an offset relative to the addressable unit of the space (wordsize) /// \param val is the offset to convert /// \param ws is the number of bytes in the addressable word /// \return the scaled offset inline int8 AddrSpace::byteToAddressInt(int8 val,uint4 ws) { return val/ws; } /// For sorting a sequence of address spaces. /// \param a is the first space /// \param b is the second space /// \return \b true if the first space should come before the second inline bool AddrSpace::compareByIndex(const AddrSpace *a,const AddrSpace *b) { return (a->index < b->index); } } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/translate.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "translate.hh" namespace ghidra { AttributeId ATTRIB_CODE = AttributeId("code",43); AttributeId ATTRIB_CONTAIN = AttributeId("contain",44); AttributeId ATTRIB_DEFAULTSPACE = AttributeId("defaultspace",45); AttributeId ATTRIB_UNIQBASE = AttributeId("uniqbase",46); ElementId ELEM_OP = ElementId("op",27); ElementId ELEM_SLEIGH = ElementId("sleigh",28); ElementId ELEM_SPACE = ElementId("space",29); ElementId ELEM_SPACEID = ElementId("spaceid",30); ElementId ELEM_SPACES = ElementId("spaces",31); ElementId ELEM_SPACE_BASE = ElementId("space_base",32); ElementId ELEM_SPACE_OTHER = ElementId("space_other",33); ElementId ELEM_SPACE_OVERLAY = ElementId("space_overlay",34); ElementId ELEM_SPACE_UNIQUE = ElementId("space_unique",35); ElementId ELEM_TRUNCATE_SPACE = ElementId("truncate_space",36); /// Parse a \ element to configure \b this object /// \param decoder is the stream decoder void TruncationTag::decode(Decoder &decoder) { uint4 elemId = decoder.openElement(ELEM_TRUNCATE_SPACE); spaceName = decoder.readString(ATTRIB_SPACE); size = decoder.readUnsignedInteger(ATTRIB_SIZE); decoder.closeElement(elemId); } /// Construct a virtual space. This is usually used for the stack /// space, which is indicated by the \b isFormal parameters, but multiple such spaces are allowed. /// \param m is the manager for this \b program \b specific address space /// \param t is associated processor translator /// \param nm is the name of the space /// \param ind is the integer identifier /// \param sz is the size of the space /// \param base is the containing space /// \param dl is the heritage delay /// \param isFormal is the formal stack space indicator SpacebaseSpace::SpacebaseSpace(AddrSpaceManager *m,const Translate *t,const string &nm,int4 ind,int4 sz, AddrSpace *base,int4 dl,bool isFormal) : AddrSpace(m,t,IPTR_SPACEBASE,nm,t->isBigEndian(),sz,base->getWordSize(),ind,0,dl,dl) { contain = base; hasbaseregister = false; // No base register assigned yet isNegativeStack = true; // default stack growth if (isFormal) setFlags(formal_stackspace); } /// This is a partial constructor, which must be followed up /// with decode in order to fillin the rest of the spaces /// attributes /// \param m is the associated address space manager /// \param t is the associated processor translator SpacebaseSpace::SpacebaseSpace(AddrSpaceManager *m,const Translate *t) : AddrSpace(m,t,IPTR_SPACEBASE) { hasbaseregister = false; isNegativeStack = true; setFlags(programspecific); } /// This routine sets the base register associated with this \b virtual space /// It will throw an exception if something tries to set two (different) base registers /// \param data is the location data for the base register /// \param truncSize is the size of the space covered by the register /// \param stackGrowth is \b true if the stack which this register manages grows in a negative direction void SpacebaseSpace::setBaseRegister(const VarnodeData &data,int4 truncSize,bool stackGrowth) { if (hasbaseregister) { if ((baseloc != data)||(isNegativeStack != stackGrowth)) throw LowlevelError("Attempt to assign more than one base register to space: "+getName()); } hasbaseregister = true; isNegativeStack = stackGrowth; baseOrig = data; baseloc = data; if (truncSize != baseloc.size) { if (baseloc.space->isBigEndian()) baseloc.offset += (baseloc.size - truncSize); baseloc.size = truncSize; } } int4 SpacebaseSpace::numSpacebase(void) const { return hasbaseregister ? 1 : 0; } const VarnodeData &SpacebaseSpace::getSpacebase(int4 i) const { if ((!hasbaseregister)||(i!=0)) throw LowlevelError("No base register specified for space: "+getName()); return baseloc; } const VarnodeData &SpacebaseSpace::getSpacebaseFull(int4 i) const { if ((!hasbaseregister)||(i!=0)) throw LowlevelError("No base register specified for space: "+getName()); return baseOrig; } void SpacebaseSpace::decode(Decoder &decoder) { uint4 elemId = decoder.openElement(ELEM_SPACE_BASE); decodeBasicAttributes(decoder); contain = decoder.readSpace(ATTRIB_CONTAIN); decoder.closeElement(elemId); } /// The \e join space range maps to the underlying pieces in a natural endian aware way. /// Given an offset in the range, figure out what address it is mapping to. /// The particular piece is passed back as an index, and the Address is returned. /// \param offset is the offset within \b this range to map /// \param pos will hold the passed back piece index /// \return the Address mapped to Address JoinRecord::getEquivalentAddress(uintb offset,int4 &pos) const { if (offset < unified.offset) return Address(); // offset comes before this range int4 smallOff = (int4)(offset - unified.offset); if (pieces[0].space->isBigEndian()) { for(pos=0;pos= 0; --pos) { int4 pieceSize = pieces[pos].size; if (smallOff < pieceSize) break; smallOff -= pieceSize; } if (pos < 0) return Address(); // offset comes after this range } return Address(pieces[pos].space,pieces[pos].offset + smallOff); } /// Allow sorting on JoinRecords so that a collection of pieces can be quickly mapped to /// its logical whole, specified with a join address bool JoinRecord::operator<(const JoinRecord &op2) const { // Some joins may have same piece but different unified size (floating point) if (unified.size != op2.unified.size) // Compare size first return (unified.size < op2.unified.size); // Lexigraphic sort on pieces int4 i=0; for(;;) { if (pieces.size()==i) { return (op2.pieces.size()>i); // If more pieces in op2, it is bigger (return true), if same number this==op2, return false } if (op2.pieces.size()==i) return false; // More pieces in -this-, so it is bigger, return false if (pieces[i] != op2.pieces[i]) return (pieces[i] < op2.pieces[i]); i += 1; } } /// Assuming the given list of VarnodeData go from most significant to least significant, /// merge any contiguous elements in the list. Varnodes that are not in the \e stack address space /// are only merged if the resulting byte range has a formal register name. /// \param seq is the given list of VarnodeData /// \param trans is the language to use for register names void JoinRecord::mergeSequence(vector &seq,const Translate *trans) { int4 i=1; while(i= seq.size()) return; vector res; i = 1; res.push_back(seq.front()); bool lastIsInformal = false; while(iisBigEndian() ? hi.offset : lo.offset; hi.size += lo.size; if (hi.space->getType() != IPTR_SPACEBASE) { lastIsInformal = trans->getExactRegisterName(hi.space, hi.offset, hi.size).size() == 0; } } else { if (lastIsInformal) break; res.push_back(lo); } i += 1; } if (lastIsInformal) // If the merge contains an informal register return; // throw it out and keep the original sequence seq = res; } /// Initialize manager containing no address spaces. All the cached space slots are set to null AddrSpaceManager::AddrSpaceManager(void) { defaultcodespace = (AddrSpace *)0; defaultdataspace = (AddrSpace *)0; constantspace = (AddrSpace *)0; iopspace = (AddrSpace *)0; fspecspace = (AddrSpace *)0; joinspace = (AddrSpace *)0; stackspace = (AddrSpace *)0; uniqspace = (AddrSpace *)0; joinallocate = 0; } /// The initialization of address spaces is the same across all /// variants of the Translate object. This routine initializes /// a single address space from a decoder element. It knows /// which class derived from AddrSpace to instantiate based on /// the ElementId. /// \param decoder is the stream decoder /// \param trans is the translator object to be associated with the new space /// \return a pointer to the initialized AddrSpace AddrSpace *AddrSpaceManager::decodeSpace(Decoder &decoder,const Translate *trans) { uint4 elemId = decoder.peekElement(); AddrSpace *res; if (elemId == ELEM_SPACE_BASE) res = new SpacebaseSpace(this,trans); else if (elemId == ELEM_SPACE_UNIQUE) res = new UniqueSpace(this,trans); else if (elemId == ELEM_SPACE_OTHER) res = new OtherSpace(this,trans); else if (elemId == ELEM_SPACE_OVERLAY) res = new OverlaySpace(this,trans); else res = new AddrSpace(this,trans,IPTR_PROCESSOR); res->decode(decoder); return res; } /// This routine initializes (almost) all the address spaces used /// for a particular processor by using a \b \ element, /// which contains child elements for the specific address spaces. /// This also instantiates the builtin \e constant space. It /// should probably also instantiate the \b iop, \b fspec, and \b join /// spaces, but this is currently done by the Architecture class. /// \param decoder is the stream decoder /// \param trans is the processor translator to be associated with the spaces void AddrSpaceManager::decodeSpaces(Decoder &decoder,const Translate *trans) { // The first space should always be the constant space insertSpace(new ConstantSpace(this,trans)); uint4 elemId = decoder.openElement(ELEM_SPACES); string defname = decoder.readString(ATTRIB_DEFAULTSPACE); while(decoder.peekElement() != 0) { AddrSpace *spc = decodeSpace(decoder,trans); insertSpace(spc); } decoder.closeElement(elemId); AddrSpace *spc = getSpaceByName(defname); if (spc == (AddrSpace *)0) throw LowlevelError("Bad 'defaultspace' attribute: "+defname); setDefaultCodeSpace(spc->getIndex()); } /// Once all the address spaces have been initialized, this routine /// should be called once to establish the official \e default /// space for the processor, via its index. Should only be /// called during initialization. /// \param index is the index of the desired default space void AddrSpaceManager::setDefaultCodeSpace(int4 index) { if (defaultcodespace != (AddrSpace *)0) throw LowlevelError("Default space set multiple times"); if (baselist.size()<=index || baselist[index] == (AddrSpace *)0) throw LowlevelError("Bad index for default space"); defaultcodespace = baselist[index]; defaultdataspace = defaultcodespace; // By default the default data space is the same } /// If the architecture has different code and data spaces, this routine can be called /// to set the \e data space after the \e code space has been set. /// \param index is the index of the desired default space void AddrSpaceManager::setDefaultDataSpace(int4 index) { if (defaultcodespace == (AddrSpace *)0) throw LowlevelError("Default data space must be set after the code space"); if (baselist.size()<=index || baselist[index] == (AddrSpace *)0) throw LowlevelError("Bad index for default data space"); defaultdataspace = baselist[index]; } /// For spaces with alignment restrictions, the address of a small variable must be justified /// within a larger aligned memory word, usually either to the left boundary for little endian encoding /// or to the right boundary for big endian encoding. Some compilers justify small variables to /// the opposite side of the one indicated by the endianness. Setting this property on a space /// causes the decompiler to use this justification void AddrSpaceManager::setReverseJustified(AddrSpace *spc) { spc->setFlags(AddrSpace::reverse_justification); } /// This adds a previously instantiated address space (AddrSpace) /// to the model for this processor. It checks a set of /// indexing and naming conventions for the space and throws /// an exception if the conventions are violated. Should /// only be called during initialization. /// \todo This really shouldn't be public. Need to move the /// allocation of \b iop, \b fspec, and \b join out of Architecture /// \param spc the address space to insert void AddrSpaceManager::insertSpace(AddrSpace *spc) { bool nameTypeMismatch = false; bool duplicateName = false; bool duplicateId = false; switch(spc->getType()) { case IPTR_CONSTANT: if (spc->getName() != ConstantSpace::NAME) nameTypeMismatch = true; if (spc->index != ConstantSpace::INDEX) throw LowlevelError("const space must be assigned index 0"); constantspace = spc; break; case IPTR_INTERNAL: if (spc->getName() != UniqueSpace::NAME) nameTypeMismatch = true; if (uniqspace != (AddrSpace *)0) duplicateName = true; uniqspace = spc; break; case IPTR_FSPEC: if (spc->getName() != "fspec") nameTypeMismatch = true; if (fspecspace != (AddrSpace *)0) duplicateName = true; fspecspace = spc; break; case IPTR_JOIN: if (spc->getName() != JoinSpace::NAME) nameTypeMismatch = true; if (joinspace != (AddrSpace *)0) duplicateName = true; joinspace = spc; break; case IPTR_IOP: if (spc->getName() != "iop") nameTypeMismatch = true; if (iopspace != (AddrSpace *)0) duplicateName = true; iopspace = spc; break; case IPTR_SPACEBASE: if (spc->getName() == "stack") { if (stackspace != (AddrSpace *)0) duplicateName = true; stackspace = spc; } // fallthru case IPTR_PROCESSOR: if (spc->isOverlay()) { // If this is a new overlay space spc->getContain()->setFlags(AddrSpace::overlaybase); // Mark the base as being overlayed } else if (spc->isOtherSpace()) { if (spc->index != OtherSpace::INDEX) throw LowlevelError("OTHER space must be assigned index 1"); } break; } if (baselist.size() <= spc->index) baselist.resize(spc->index+1, (AddrSpace *)0); duplicateId = baselist[spc->index] != (AddrSpace *)0; if (!nameTypeMismatch && !duplicateName && !duplicateId) { duplicateName = !name2Space.insert(pair(spc->getName(),spc)).second; } if (nameTypeMismatch || duplicateName || duplicateId) { string errMsg = "Space " + spc->getName(); if (nameTypeMismatch) errMsg = errMsg + " was initialized with wrong type"; if (duplicateName) errMsg = errMsg + " was initialized more than once"; if (duplicateId) errMsg = errMsg + " was assigned as id duplicating: "+baselist[spc->index]->getName(); if (spc->refcount == 0) delete spc; spc = (AddrSpace *)0; throw LowlevelError(errMsg); } baselist[spc->index] = spc; spc->refcount += 1; assignShortcut(spc); } /// Different managers may need to share the same spaces. I.e. if different programs being /// analyzed share the same processor. This routine pulls in a reference of every space in -op2- /// in order to manage it from within -this- /// \param op2 is a pointer to space manager being copied void AddrSpaceManager::copySpaces(const AddrSpaceManager *op2) { // Insert every space in -op2- into -this- manager for(int4 i=0;ibaselist.size();++i) { AddrSpace *spc = op2->baselist[i]; if (spc != (AddrSpace *)0) insertSpace(spc); } setDefaultCodeSpace(op2->getDefaultCodeSpace()->getIndex()); setDefaultDataSpace(op2->getDefaultDataSpace()->getIndex()); } /// Perform the \e privileged act of associating a base register with an existing \e virtual space /// \param basespace is the virtual space /// \param ptrdata is the location data for the base register /// \param truncSize is the size of the space covered by the base register /// \param stackGrowth is true if the stack grows "normally" towards address 0 void AddrSpaceManager::addSpacebasePointer(SpacebaseSpace *basespace,const VarnodeData &ptrdata,int4 truncSize,bool stackGrowth) { basespace->setBaseRegister(ptrdata,truncSize,stackGrowth); } /// Provide a new specialized resolver for a specific AddrSpace. The manager takes ownership of resolver. /// \param spc is the space to which the resolver is associated /// \param rsolv is the new resolver object void AddrSpaceManager::insertResolver(AddrSpace *spc,AddressResolver *rsolv) { int4 ind = spc->getIndex(); while(resolvelist.size() <= ind) resolvelist.push_back((AddressResolver *)0); if (resolvelist[ind] != (AddressResolver *)0) delete resolvelist[ind]; resolvelist[ind] = rsolv; } /// This method establishes for a single address space, what range of constants are checked /// as possible symbol starts, when it is not known apriori that a constant is a pointer. /// \param range is the range of values for a single address space void AddrSpaceManager::setInferPtrBounds(const Range &range) { range.getSpace()->pointerLowerBound = range.getFirst(); range.getSpace()->pointerUpperBound = range.getLast(); } /// Base destructor class, cleans up AddrSpace pointers which /// must be explicited created via \e new AddrSpaceManager::~AddrSpaceManager(void) { for(vector::iterator iter=baselist.begin();iter!=baselist.end();++iter) { AddrSpace *spc = *iter; if (spc == (AddrSpace *)0) continue; if (spc->refcount > 1) spc->refcount -= 1; else delete spc; } for(int4 i=0;ishortcut != ' ') { // If the shortcut is already assigned shortcut2Space.insert(pair(spc->shortcut,spc)); return; } char shortcut; switch(spc->getType()) { case IPTR_CONSTANT: shortcut = '#'; break; case IPTR_PROCESSOR: if (spc->getName() == "register") shortcut = '%'; else shortcut = spc->getName()[0]; break; case IPTR_SPACEBASE: shortcut = 's'; break; case IPTR_INTERNAL: shortcut = 'u'; break; case IPTR_FSPEC: shortcut = 'f'; break; case IPTR_JOIN: shortcut = 'j'; break; case IPTR_IOP: shortcut = 'i'; break; default: shortcut = 'x'; break; } if (shortcut >= 'A' && shortcut <= 'Z') shortcut += 0x20; int4 collisionCount = 0; while(!shortcut2Space.insert(pair(shortcut,spc)).second) { collisionCount += 1; if (collisionCount >26) { // Could not find a unique shortcut, but we just re-use 'z' as we // can always use the long form to specify the address if there are really so many // spaces that need to be distinguishable (in the console mode) spc->shortcut = 'z'; return; } shortcut += 1; if (shortcut < 'a' || shortcut > 'z') shortcut = 'a'; } spc->shortcut = (char)shortcut; } /// \param spc is the AddrSpace to mark /// \param size is the (minimum) size of a near pointer in bytes void AddrSpaceManager::markNearPointers(AddrSpace *spc,int4 size) { spc->setFlags(AddrSpace::has_nearpointers); if (spc->minimumPointerSize == 0 && spc->addressSize != size) spc->minimumPointerSize = size; } /// All address spaces have a unique name associated with them. /// This routine retrieves the AddrSpace object based on the /// desired name. /// \param nm is the name of the address space /// \return a pointer to the AddrSpace object AddrSpace *AddrSpaceManager::getSpaceByName(const string &nm) const { map::const_iterator iter = name2Space.find(nm); if (iter == name2Space.end()) return (AddrSpace *)0; return (*iter).second; } /// All address spaces have a unique shortcut (ASCII) character /// assigned to them. This routine retrieves an AddrSpace object /// given a specific shortcut. /// \param sc is the shortcut character /// \return a pointer to an AddrSpace AddrSpace *AddrSpaceManager::getSpaceByShortcut(char sc) const { map::const_iterator iter; iter = shortcut2Space.find(sc); if (iter == shortcut2Space.end()) return (AddrSpace *)0; return (*iter).second; } /// \brief Resolve a native constant into an Address /// /// If there is a special resolver for the AddrSpace, this is invoked, otherwise /// basic wordsize conversion and wrapping is performed. If the address encoding is /// partial (as in a \e near pointer) and the full encoding can be recovered, it is passed back. /// The \e sz parameter indicates the number of bytes in constant and is used to determine if /// the constant is a partial or full pointer encoding. A value of -1 indicates the value is /// known to be a full encoding. /// \param spc is the space to generate the address from /// \param val is the constant encoding of the address /// \param sz is the size of the constant encoding (or -1) /// \param point is the context address (for recovering full encoding info if necessary) /// \param fullEncoding is used to pass back the recovered full encoding of the pointer /// \return the formal Address associated with the encoding Address AddrSpaceManager::resolveConstant(AddrSpace *spc,uintb val,int4 sz,const Address &point,uintb &fullEncoding) const { int4 ind = spc->getIndex(); if (ind < resolvelist.size()) { AddressResolver *resolve = resolvelist[ind]; if (resolve != (AddressResolver *)0) return resolve->resolve(val,sz,point,fullEncoding); } fullEncoding = val; val = AddrSpace::addressToByte(val,spc->getWordSize()); val = spc->wrapOffset(val); return Address(spc,val); } /// Get the next space in the absolute order of addresses. /// This ordering is determined by the AddrSpace index. /// \param spc is the pointer to the space being queried /// \return the pointer to the next space in absolute order AddrSpace *AddrSpaceManager::getNextSpaceInOrder(AddrSpace *spc) const { if (spc == (AddrSpace *)0) { return baselist[0]; } if (spc == (AddrSpace *) ~((uintp)0)) { return (AddrSpace *)0; } int4 index = spc->getIndex() + 1; while (index < baselist.size()) { AddrSpace *res = baselist[index]; if (res != (AddrSpace *)0) return res; index += 1; } return (AddrSpace *) ~((uintp)0); } /// Given a list of memory locations, the \e pieces, either find a pre-existing JoinRecord or /// create a JoinRecord that represents the logical joining of the pieces. The pieces must /// be in order from most significant to least significant. /// \param pieces if the list memory locations to be joined /// \param logicalsize of a \e single \e piece join, or zero /// \return a pointer to the JoinRecord JoinRecord *AddrSpaceManager::findAddJoin(const vector &pieces,uint4 logicalsize) { // Find a pre-existing split record, or create a new one corresponding to the input -pieces- // If -logicalsize- is 0, calculate logical size as sum of pieces if (pieces.size() == 0) throw LowlevelError("Cannot create a join without pieces"); if ((pieces.size()==1)&&(logicalsize==0)) throw LowlevelError("Cannot create a single piece join without a logical size"); uint4 totalsize; if (logicalsize != 0) { if (pieces.size() != 1) throw LowlevelError("Cannot specify logical size for multiple piece join"); totalsize = logicalsize; } else { totalsize = 0; for(int4 i=0;i::const_iterator iter; iter = splitset.find(&testnode); if (iter != splitset.end()) // If already in the set return *iter; JoinRecord *newjoin = new JoinRecord(); newjoin->pieces = pieces; uint4 roundsize = (totalsize + 15) & ~((uint4)0xf); // Next biggest multiple of 16 newjoin->unified.space = joinspace; newjoin->unified.offset = joinallocate; joinallocate += roundsize; newjoin->unified.size = totalsize; splitset.insert(newjoin); splitlist.push_back(newjoin); return splitlist.back(); } /// Given a specific \e offset into the \e join address space, recover the JoinRecord that /// contains the offset, as a range in the \e join address space. If there is no existing /// record, null is returned. /// \param offset is an offset into the join space /// \return the JoinRecord containing that offset or null JoinRecord *AddrSpaceManager::findJoinInternal(uintb offset) const { int4 min=0; int4 max=splitlist.size()-1; while(min<=max) { // Binary search int4 mid = (min+max)/2; JoinRecord *rec = splitlist[mid]; uintb val = rec->unified.offset; if (val + rec->unified.size <= offset) min = mid + 1; else if (val > offset) max = mid - 1; else return rec; } return (JoinRecord *)0; } /// Given a specific \e offset into the \e join address space, recover the JoinRecord that /// lists the pieces corresponding to that offset. The offset must originally have come from /// a JoinRecord returned by \b findAddJoin, otherwise this method throws an exception. /// \param offset is an offset into the join space /// \return the JoinRecord for that offset JoinRecord *AddrSpaceManager::findJoin(uintb offset) const { int4 min=0; int4 max=splitlist.size()-1; while(min<=max) { // Binary search int4 mid = (min+max)/2; JoinRecord *rec = splitlist[mid]; uintb val = rec->unified.offset; if (val == offset) return rec; if (val < offset) min = mid + 1; else max = mid - 1; } throw LowlevelError("Unlinked join address"); } /// Set the number of passes for a specific AddrSpace before deadcode removal is allowed /// for that space. /// \param spc is the AddrSpace to change /// \param delaydelta is the number of rounds to the delay should be set to void AddrSpaceManager::setDeadcodeDelay(AddrSpace *spc,int4 delaydelta) { spc->deadcodedelay = delaydelta; } /// Mark the named space as truncated from its original size /// \param tag is a description of the space and how it should be truncated void AddrSpaceManager::truncateSpace(const TruncationTag &tag) { AddrSpace *spc = getSpaceByName(tag.getName()); if (spc == (AddrSpace *)0) throw LowlevelError("Unknown space in command: "+tag.getName()); spc->truncateSpace(tag.getSize()); } /// This handles the situation where we need to find a logical address to hold the lower /// precision floating-point value that is stored in a bigger register /// If the logicalsize (precision) requested matches the -realsize- of the register /// just return the real address. Otherwise construct a join address to hold the logical value /// \param realaddr is the address of the real floating-point register /// \param realsize is the size of the real floating-point register /// \param logicalsize is the size (lower precision) size of the logical value Address AddrSpaceManager::constructFloatExtensionAddress(const Address &realaddr,int4 realsize, int4 logicalsize) { if (logicalsize == realsize) return realaddr; vector pieces; pieces.emplace_back(); pieces.back().space = realaddr.getSpace(); pieces.back().offset = realaddr.getOffset(); pieces.back().size = realsize; JoinRecord *join = findAddJoin(pieces,logicalsize); return join->getUnified().getAddr(); } /// This handles the common case, of trying to find a join address given a high location and a low /// location. This may not return an address in the \e join address space. It checks for the case /// where the two pieces are contiguous locations in a mappable space, in which case it just returns /// the containing address /// \param translate is the Translate object used to find registers /// \param hiaddr is the address of the most significant piece to be joined /// \param hisz is the size of the most significant piece /// \param loaddr is the address of the least significant piece /// \param losz is the size of the least significant piece /// \return an address representing the start of the joined range Address AddrSpaceManager::constructJoinAddress(const Translate *translate, const Address &hiaddr,int4 hisz, const Address &loaddr,int4 losz) { spacetype hitp = hiaddr.getSpace()->getType(); spacetype lotp = loaddr.getSpace()->getType(); bool usejoinspace = true; if (((hitp != IPTR_SPACEBASE)&&(hitp != IPTR_PROCESSOR))|| ((lotp != IPTR_SPACEBASE)&&(lotp != IPTR_PROCESSOR))) throw LowlevelError("Trying to join in appropriate locations"); if ((hitp == IPTR_SPACEBASE)||(lotp == IPTR_SPACEBASE)|| (hiaddr.getSpace() == getDefaultCodeSpace())|| (loaddr.getSpace() == getDefaultCodeSpace())) usejoinspace = false; if (hiaddr.isContiguous(hisz,loaddr,losz)) { // If we are contiguous if (!usejoinspace) { // and in a mappable space, just return the earliest address if (hiaddr.isBigEndian()) return hiaddr; return loaddr; } else { // If we are in a non-mappable (register) space, check to see if a parent register exists if (hiaddr.isBigEndian()) { if (translate->getRegisterName(hiaddr.getSpace(),hiaddr.getOffset(),(hisz+losz)).size() != 0) return hiaddr; } else { if (translate->getRegisterName(loaddr.getSpace(),loaddr.getOffset(),(hisz+losz)).size() != 0) return loaddr; } } } // Otherwise construct a formal JoinRecord vector pieces; pieces.emplace_back(); pieces.emplace_back(); pieces[0].space = hiaddr.getSpace(); pieces[0].offset = hiaddr.getOffset(); pieces[0].size = hisz; pieces[1].space = loaddr.getSpace(); pieces[1].offset = loaddr.getOffset(); pieces[1].size = losz; JoinRecord *join = findAddJoin(pieces,0); return join->getUnified().getAddr(); } /// If an Address in the \e join AddressSpace is shifted from its original offset, it may no /// longer have a valid JoinRecord. The shift or size change may even make the address of /// one of the pieces a more natural representation. Given a new Address and size, this method /// decides if there is a matching JoinRecord. If not it either constructs a new JoinRecord or /// computes the address within the containing piece. The given Address is changed if necessary /// either to the offset corresponding to the new JoinRecord or to a normal \e non-join Address. /// \param addr is the given Address /// \param size is the size of the range in bytes void AddrSpaceManager::renormalizeJoinAddress(Address &addr,int4 size) { JoinRecord *joinRecord = findJoinInternal(addr.getOffset()); if (joinRecord == (JoinRecord *)0) throw LowlevelError("Join address not covered by a JoinRecord"); if (addr.getOffset() == joinRecord->unified.offset && size == joinRecord->unified.size) return; // JoinRecord matches perfectly, no change necessary int4 pos1; Address addr1 = joinRecord->getEquivalentAddress(addr.getOffset(), pos1); int4 pos2; Address addr2 = joinRecord->getEquivalentAddress(addr.getOffset() + (size-1), pos2); if (addr2.isInvalid()) throw LowlevelError("Join address range not covered"); if (pos1 == pos2) { addr = addr1; return; } vector newPieces; int4 sizeTrunc1 = (int4)(addr1.getOffset() - joinRecord->pieces[pos1].offset); int4 sizeTrunc2 = joinRecord->pieces[pos2].size - (int4)(addr2.getOffset() - joinRecord->pieces[pos2].offset) - 1; if (pos2 < pos1) { // Little endian newPieces.push_back(joinRecord->pieces[pos2]); pos2 += 1; while(pos2 <= pos1) { newPieces.push_back(joinRecord->pieces[pos2]); pos2 += 1; } newPieces.back().offset = addr1.getOffset(); newPieces.back().size -= sizeTrunc1; newPieces.front().size -= sizeTrunc2; } else { newPieces.push_back(joinRecord->pieces[pos1]); pos1 += 1; while(pos1 <= pos2) { newPieces.push_back(joinRecord->pieces[pos1]); pos1 += 1; } newPieces.front().offset = addr1.getOffset(); newPieces.front().size -= sizeTrunc1; newPieces.back().size -= sizeTrunc2; } JoinRecord *newJoinRecord = findAddJoin(newPieces, 0); addr = Address(newJoinRecord->unified.space,newJoinRecord->unified.offset); } /// The string \e must contain a hexadecimal offset. The offset may be optionally prepended with "0x". /// The string may optionally start with the name of the address space to associate with the offset, followed /// by ':' to separate it from the offset. If the name is not present, the default data space is assumed. /// \param val is the string to parse /// \return the parsed address Address AddrSpaceManager::parseAddressSimple(const string &val) { string::size_type col = val.find(':'); AddrSpace *spc; if (col==string::npos) { spc = getDefaultDataSpace(); col = 0; } else { string spcName = val.substr(0,col); spc = getSpaceByName(spcName); if (spc == (AddrSpace *)0) throw LowlevelError("Unknown address space: " + spcName); col += 1; } if (col + 2 <= val.size()) { if (val[col] == '0' && val[col+1] == 'x') col += 2; } istringstream s(val.substr(col)); uintb off; s >> hex >> off; return Address(spc,AddrSpace::addressToByte(off, spc->getWordSize())); } /// This constructs only a shell for the Translate object. It /// won't be usable until it is initialized for a specific processor /// The main entry point for this is the Translate::initialize method, /// which must be overridden by a derived class Translate::Translate(void) { target_isbigendian = false; unique_base=0; alignment = 1; } /// If no floating-point format objects were registered by the \b initialize method, this /// method will fill in some suitable default formats. These defaults are based on /// the 4-byte and 8-byte encoding specified by the IEEE 754 standard. void Translate::setDefaultFloatFormats(void) { if (floatformats.empty()) { // Default IEEE 754 float formats floatformats.push_back(FloatFormat(4)); floatformats.push_back(FloatFormat(8)); } } /// The pcode model for floating point encoding assumes that a /// consistent encoding is used for all values of a given size. /// This routine fetches the FloatFormat object given the size, /// in bytes, of the desired encoding. /// \param size is the size of the floating-point value in bytes /// \return a pointer to the floating-point format const FloatFormat *Translate::getFloatFormat(int4 size) const { vector::const_iterator iter; for(iter=floatformats.begin();iter!=floatformats.end();++iter) { if ((*iter).getSize() == size) return &(*iter); } return (const FloatFormat *)0; } /// A convenience method for passing around p-code operations via stream. /// A single p-code operation is parsed from an \ element and /// returned to the application via the PcodeEmit::dump method. /// \param addr is the address (of the instruction) to associate with the p-code op /// \param decoder is the stream decoder void PcodeEmit::decodeOp(const Address &addr,Decoder &decoder) { int4 opcode; int4 isize; VarnodeData outvar; VarnodeData invar[16]; VarnodeData *outptr; uint4 elemId = decoder.openElement(ELEM_OP); isize = decoder.readSignedInteger(ATTRIB_SIZE); outptr = &outvar; if (isize <= 16) opcode = PcodeOpRaw::decode(decoder, isize, invar, &outptr); else { vector varStorage(isize,VarnodeData()); opcode = PcodeOpRaw::decode(decoder, isize, varStorage.data(), &outptr); } decoder.closeElement(elemId); dump(addr,(OpCode)opcode,outptr,invar,isize); } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/translate.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file translate.hh /// \brief Classes for disassembly and pcode generation /// /// Classes for keeping track of spaces and registers (for a single architecture). #ifndef __TRANSLATE_HH__ #define __TRANSLATE_HH__ #include "pcoderaw.hh" #include "float.hh" namespace ghidra { extern AttributeId ATTRIB_CODE; ///< Marshaling attribute "code" extern AttributeId ATTRIB_CONTAIN; ///< Marshaling attribute "contain" extern AttributeId ATTRIB_DEFAULTSPACE; ///< Marshaling attribute "defaultspace" extern AttributeId ATTRIB_UNIQBASE; ///< Marshaling attribute "uniqbase" extern ElementId ELEM_OP; ///< Marshaling element \ extern ElementId ELEM_SLEIGH; ///< Marshaling element \ extern ElementId ELEM_SPACE; ///< Marshaling element \ extern ElementId ELEM_SPACEID; ///< Marshaling element \ extern ElementId ELEM_SPACES; ///< Marshaling element \ extern ElementId ELEM_SPACE_BASE; ///< Marshaling element \ extern ElementId ELEM_SPACE_OTHER; ///< Marshaling element \ extern ElementId ELEM_SPACE_OVERLAY; ///< Marshaling element \ extern ElementId ELEM_SPACE_UNIQUE; ///< Marshaling element \ extern ElementId ELEM_TRUNCATE_SPACE; ///< Marshaling element \ // Some errors specific to the translation unit /// \brief Exception for encountering unimplemented pcode /// /// This error is thrown when a particular machine instruction /// cannot be translated into pcode. This particular error /// means that the particular instruction being decoded was valid, /// but the system doesn't know how to represent it in pcode. struct UnimplError : public LowlevelError { int4 instruction_length; ///< Number of bytes in the unimplemented instruction /// \brief Constructor /// /// \param s is a more verbose description of the error /// \param l is the length (in bytes) of the unimplemented instruction UnimplError(const string &s,int4 l) : LowlevelError(s) { instruction_length = l; } }; /// \brief Exception for bad instruction data /// /// This error is thrown when the system cannot decode data /// for a particular instruction. This usually means that the /// data is not really a machine instruction, but may indicate /// that the system is unaware of the particular instruction. struct BadDataError : public LowlevelError { /// \brief Constructor /// /// \param s is a more verbose description of the error BadDataError(const string &s) : LowlevelError(s) {} }; class Translate; /// \brief Object for describing how a space should be truncated /// /// This can turn up in various XML configuration files and essentially acts /// as a command to override the size of an address space as defined by the architecture class TruncationTag { string spaceName; ///< Name of space to be truncated uint4 size; ///< Size truncated addresses into the space public: void decode(Decoder &decoder); ///< Restore \b this from a stream const string &getName(void) const { return spaceName; } ///< Get name of address space being truncated uint4 getSize(void) const { return size; } ///< Size (of pointers) for new truncated space }; /// \brief Abstract class for emitting pcode to an application /// /// Translation engines pass back the generated pcode for an /// instruction to the application using this class. class PcodeEmit { public: virtual ~PcodeEmit(void) {} ///< Virtual destructor /// \brief The main pcode emit method. /// /// A single pcode instruction is returned to the application /// via this method. Particular applications override it /// to tailor how the operations are used. /// \param addr is the Address of the machine instruction /// \param opc is the opcode of the particular pcode instruction /// \param outvar if not \e null is a pointer to data about the /// output varnode /// \param vars is a pointer to an array of VarnodeData for each /// input varnode /// \param isize is the number of input varnodes virtual void dump(const Address &addr,OpCode opc,VarnodeData *outvar,VarnodeData *vars,int4 isize)=0; /// Emit pcode directly from an \ element void decodeOp(const Address &addr,Decoder &decoder); }; /// \brief Abstract class for emitting disassembly to an application /// /// Translation engines pass back the disassembly character data /// for decoded machine instructions to an application using this class. class AssemblyEmit { public: virtual ~AssemblyEmit(void) {} ///< Virtual destructor /// \brief The main disassembly emitting method. /// /// The disassembly strings for a single machine instruction /// are passed back to an application through this method. /// Particular applications can tailor the use of the disassembly /// by overriding this method. /// \param addr is the Address of the machine instruction /// \param mnem is the decoded instruction mnemonic /// \param body is the decode body (or operands) of the instruction virtual void dump(const Address &addr,const string &mnem,const string &body)=0; }; /// \brief Abstract class for converting native constants to addresses /// /// This class is used if there is a special calculation to get from a constant embedded /// in the code being analyzed to the actual Address being referred to. This is used especially /// in the case of a segmented architecture, where "near" pointers must be extended to a full address /// with implied segment information. class AddressResolver { public: virtual ~AddressResolver(void) {} ///> Virtual destructor /// \brief The main resolver method. /// /// Given a native constant in a specific context, resolve what address is being referred to. /// The constant can be a partially encoded pointer, in which case the full pointer encoding /// is recovered as well as the address. Whether or not a pointer is partially encoded or not /// is determined by the \e sz parameter, indicating the number of bytes in the pointer. A value /// of -1 here indicates that the pointer is known to be a full encoding. /// \param val is constant to be resolved to an address /// \param sz is the size of \e val in context (or -1). /// \param point is the address at which this constant is being used /// \param fullEncoding is used to hold the full pointer encoding if \b val is a partial encoding /// \return the resolved Address virtual Address resolve(uintb val,int4 sz,const Address &point,uintb &fullEncoding)=0; }; /// \brief A virtual space \e stack space /// /// In a lot of analysis situations it is convenient to extend /// the notion of an address space to mean bytes that are indexed /// relative to some base register. The canonical example of this /// is the \b stack space, which models the concept of local /// variables stored on the stack. An address of (\b stack, 8) /// might model the address of a function parameter on the stack /// for instance, and (\b stack, 0xfffffff4) might be the address /// of a local variable. A space like this is inherently \e virtual /// and contained within whatever space is being indexed into. class SpacebaseSpace : public AddrSpace { friend class AddrSpaceManager; AddrSpace *contain; ///< Containing space bool hasbaseregister; ///< true if a base register has been attached bool isNegativeStack; ///< true if stack grows in negative direction VarnodeData baseloc; ///< location data of the base register VarnodeData baseOrig; ///< Original base register before any truncation void setBaseRegister(const VarnodeData &data,int4 origSize,bool stackGrowth); ///< Set the base register at time space is created public: SpacebaseSpace(AddrSpaceManager *m,const Translate *t,const string &nm,int4 ind,int4 sz,AddrSpace *base,int4 dl,bool isFormal); SpacebaseSpace(AddrSpaceManager *m,const Translate *t); ///< For use with decode virtual int4 numSpacebase(void) const; virtual const VarnodeData &getSpacebase(int4 i) const; virtual const VarnodeData &getSpacebaseFull(int4 i) const; virtual bool stackGrowsNegative(void) const { return isNegativeStack; } virtual AddrSpace *getContain(void) const { return contain; } ///< Return containing space virtual void decode(Decoder &decoder); }; /// \brief A record describing how logical values are split /// /// The decompiler can describe a logical value that is stored split across multiple /// physical memory locations. This record describes such a split. The pieces must be listed /// from \e most \e significant to \e least \e significant. class JoinRecord { friend class AddrSpaceManager; vector pieces; ///< All the physical pieces of the symbol, most significant to least VarnodeData unified; ///< Special entry representing entire symbol in one chunk public: int4 numPieces(void) const { return pieces.size(); } ///< Get number of pieces in this record bool isFloatExtension(void) const { return (pieces.size() == 1); } ///< Does this record extend a float varnode const VarnodeData &getPiece(int4 i) const { return pieces[i]; } ///< Get the i-th piece const VarnodeData &getUnified(void) const { return unified; } ///< Get the Varnode whole Address getEquivalentAddress(uintb offset,int4 &pos) const; ///< Given offset in \e join space, get equivalent address of piece bool operator<(const JoinRecord &op2) const; ///< Compare records lexigraphically by pieces static void mergeSequence(vector &seq,const Translate *trans); ///< Merge any contiguous ranges in a sequence }; /// \brief Comparator for JoinRecord objects struct JoinRecordCompare { bool operator()(const JoinRecord *a,const JoinRecord *b) const { return *a < *b; } ///< Compare to JoinRecords using their built-in comparison }; /// \brief A manager for different address spaces /// /// Allow creation, lookup by name, lookup by shortcut, lookup by name, and iteration /// over address spaces class AddrSpaceManager { vector baselist; ///< Every space we know about for this architecture vector resolvelist; ///< Special constant resolvers map name2Space; ///< Map from name -> space map shortcut2Space; ///< Map from shortcut -> space AddrSpace *constantspace; ///< Quick reference to constant space AddrSpace *defaultcodespace; ///< Default space where code lives, generally main RAM AddrSpace *defaultdataspace; ///< Default space where data lives AddrSpace *iopspace; ///< Space for internal pcode op pointers AddrSpace *fspecspace; ///< Space for internal callspec pointers AddrSpace *joinspace; ///< Space for unifying split variables AddrSpace *stackspace; ///< Stack space associated with processor AddrSpace *uniqspace; ///< Temporary space associated with processor uintb joinallocate; ///< Next offset to be allocated in join space set splitset; ///< Different splits that have been defined in join space vector splitlist; ///< JoinRecords indexed by join address protected: AddrSpace *decodeSpace(Decoder &decoder,const Translate *trans); ///< Add a space to the model based an on XML tag void decodeSpaces(Decoder &decoder,const Translate *trans); ///< Restore address spaces in the model from a stream void setDefaultCodeSpace(int4 index); ///< Set the default address space (for code) void setDefaultDataSpace(int4 index); ///< Set the default address space for data void setReverseJustified(AddrSpace *spc); ///< Set reverse justified property on this space void assignShortcut(AddrSpace *spc); ///< Select a shortcut character for a new space void markNearPointers(AddrSpace *spc,int4 size); ///< Mark that given space can be accessed with near pointers void insertSpace(AddrSpace *spc); ///< Add a new address space to the model void copySpaces(const AddrSpaceManager *op2); ///< Copy spaces from another manager void addSpacebasePointer(SpacebaseSpace *basespace,const VarnodeData &ptrdata,int4 truncSize,bool stackGrowth); ///< Set the base register of a spacebase space void insertResolver(AddrSpace *spc,AddressResolver *rsolv); ///< Override the base resolver for a space void setInferPtrBounds(const Range &range); ///< Set the range of addresses that can be inferred as pointers JoinRecord *findJoinInternal(uintb offset) const; ///< Find JoinRecord for \e offset in the join space public: AddrSpaceManager(void); ///< Construct an empty address space manager virtual ~AddrSpaceManager(void); ///< Destroy the manager int4 getDefaultSize(void) const; ///< Get size of addresses for the default space AddrSpace *getSpaceByName(const string &nm) const; ///< Get address space by name AddrSpace *getSpaceByShortcut(char sc) const; ///< Get address space from its shortcut AddrSpace *getIopSpace(void) const; ///< Get the internal pcode op space AddrSpace *getFspecSpace(void) const; ///< Get the internal callspec space AddrSpace *getJoinSpace(void) const; ///< Get the joining space AddrSpace *getStackSpace(void) const; ///< Get the stack space for this processor AddrSpace *getUniqueSpace(void) const; ///< Get the temporary register space for this processor AddrSpace *getDefaultCodeSpace(void) const; ///< Get the default address space of this processor AddrSpace *getDefaultDataSpace(void) const; ///< Get the default address space where data is stored AddrSpace *getConstantSpace(void) const; ///< Get the constant space Address getConstant(uintb val) const; ///< Get a constant encoded as an Address Address createConstFromSpace(AddrSpace *spc) const; ///< Create a constant address encoding an address space Address resolveConstant(AddrSpace *spc,uintb val,int4 sz,const Address &point,uintb &fullEncoding) const; int4 numSpaces(void) const; ///< Get the number of address spaces for this processor AddrSpace *getSpace(int4 i) const; ///< Get an address space via its index AddrSpace *getNextSpaceInOrder(AddrSpace *spc) const; ///< Get the next \e contiguous address space JoinRecord *findAddJoin(const vector &pieces,uint4 logicalsize); ///< Get (or create) JoinRecord for \e pieces JoinRecord *findJoin(uintb offset) const; ///< Find JoinRecord for \e offset in the join space void setDeadcodeDelay(AddrSpace *spc,int4 delaydelta); ///< Set the deadcodedelay for a specific space void truncateSpace(const TruncationTag &tag); ///< Mark a space as truncated from its original size /// \brief Build a logically lower precision storage location for a bigger floating point register Address constructFloatExtensionAddress(const Address &realaddr,int4 realsize,int4 logicalsize); /// \brief Build a logical whole from register pairs Address constructJoinAddress(const Translate *translate,const Address &hiaddr,int4 hisz,const Address &loaddr,int4 losz); /// \brief Make sure a possibly offset \e join address has a proper JoinRecord void renormalizeJoinAddress(Address &addr,int4 size); /// \brief Parse a string with just an \e address \e space name and a hex offset Address parseAddressSimple(const string &val); }; /// \brief The interface to a translation engine for a processor. /// /// This interface performs translations of instruction data /// for a particular processor. It has two main functions /// - Disassemble single machine instructions /// - %Translate single machine instructions into \e pcode. /// /// It is also the repository for information about the exact /// configuration of the reverse engineering model associated /// with the processor. In particular, it knows about all the /// address spaces, registers, and spacebases for the processor. class Translate : public AddrSpaceManager { public: /// Tagged addresses in the \e unique address space enum UniqueLayout { RUNTIME_BOOLEAN_INVERT=0, ///< Location of the runtime temporary for boolean inversion RUNTIME_RETURN_LOCATION=0x80, ///< Location of the runtime temporary storing the return value RUNTIME_BITRANGE_EA=0x100, ///< Location of the runtime temporary for storing an effective address INJECT=0x200, ///< Range of temporaries for use in compiling p-code snippets ANALYSIS=0x10000000 ///< Range of temporaries for use during decompiler analysis }; private: bool target_isbigendian; ///< \b true if the general endianness of the process is big endian uint4 unique_base; ///< Starting offset into unique space protected: int4 alignment; ///< Byte modulo on which instructions are aligned vector floatformats; ///< Floating point formats utilized by the processor void setBigEndian(bool val); ///< Set general endianness to \b big if val is \b true void setUniqueBase(uint4 val); ///< Set the base offset for new temporary registers public: Translate(void); ///< Constructor for the translator void setDefaultFloatFormats(void); ///< If no explicit float formats, set up default formats bool isBigEndian(void) const; ///< Is the processor big endian? const FloatFormat *getFloatFormat(int4 size) const; ///< Get format for a particular floating point encoding int4 getAlignment(void) const; ///< Get the instruction alignment for the processor uint4 getUniqueBase(void) const; ///< Get the base offset for new temporary registers uint4 getUniqueStart(UniqueLayout layout) const; ///< Get a tagged address within the \e unique space /// \brief Initialize the translator given XML configuration documents /// /// A translator gets initialized once, possibly using XML documents /// to configure it. /// \param store is a set of configuration documents virtual void initialize(DocumentStorage &store)=0; /// \brief Add a new context variable to the model for this processor /// /// Add the name of a context register used by the processor and /// how that register is packed into the context state. This /// information is used by a ContextDatabase to associate names /// with context information and to pack context into a single /// state variable for the translation engine. /// \param name is the name of the new context variable /// \param sbit is the first bit of the variable in the packed state /// \param ebit is the last bit of the variable in the packed state virtual void registerContext(const string &name,int4 sbit,int4 ebit) {} /// \brief Set the default value for a particular context variable /// /// Set the value to be returned for a context variable when /// there are no explicit address ranges specifying a value /// for the variable. /// \param name is the name of the context variable /// \param val is the value to be considered default virtual void setContextDefault(const string &name,uintm val) {} /// \brief Toggle whether disassembly is allowed to affect context /// /// By default the disassembly/pcode translation engine can change /// the global context, thereby affecting later disassembly. Context /// may be getting determined by something other than control flow in, /// the disassembly, in which case this function can turn off changes /// made by the disassembly /// \param val is \b true to allow context changes, \b false prevents changes virtual void allowContextSet(bool val) const {} /// \brief Get a register as VarnodeData given its name /// /// Retrieve the location and size of a register given its name /// \param nm is the name of the register /// \return the VarnodeData for the register virtual const VarnodeData &getRegister(const string &nm) const=0; /// \brief Get the name of the smallest containing register given a location and size /// /// Generic references to locations in a \e register space are translated into the /// register \e name. If a containing register isn't found, an empty string is returned. /// \param base is the address space containing the location /// \param off is the offset of the location /// \param size is the size of the location /// \return the name of the register, or an empty string virtual string getRegisterName(AddrSpace *base,uintb off,int4 size) const=0; /// \brief Get the name of a register with an exact location and size /// /// If a register exists with the given location and size, return the name of the register. /// Otherwise return the empty string. /// \param base is the address space containing the location /// \param off is the offset of the location /// \param size is the size of the location /// \return the name of the register, or an empty string virtual string getExactRegisterName(AddrSpace *base,uintb off,int4 size) const=0; /// \brief Get a list of all register names and the corresponding location /// /// Most processors have a list of named registers and possibly other memory locations /// that are specific to it. This function populates a map from the location information /// to the name, for every named location known by the translator /// \param reglist is the map which will be populated by the call virtual void getAllRegisters(map ®list) const=0; /// \brief Get a list of all \e user-defined pcode ops /// /// The pcode model allows processors to define new pcode /// instructions that are specific to that processor. These /// \e user-defined instructions are all identified by a name /// and an index. This method returns a list of these ops /// in index order. /// \param res is the resulting vector of user op names virtual void getUserOpNames(vector &res) const=0; /// \brief Get the length of a machine instruction /// /// This method decodes an instruction at a specific address /// just enough to find the number of bytes it uses within the /// instruction stream. /// \param baseaddr is the Address of the instruction /// \return the number of bytes in the instruction virtual int4 instructionLength(const Address &baseaddr) const=0; /// \brief Transform a single machine instruction into pcode /// /// This is the main interface to the pcode translation engine. /// The \e dump method in the \e emit object is invoked exactly /// once for each pcode operation in the translation for the /// machine instruction at the given address. /// This routine can throw either /// - UnimplError or /// - BadDataError /// /// \param emit is the tailored pcode emitting object /// \param baseaddr is the Address of the machine instruction /// \return the number of bytes in the machine instruction virtual int4 oneInstruction(PcodeEmit &emit,const Address &baseaddr) const=0; /// \brief Disassemble a single machine instruction /// /// This is the main interface to the disassembler for the /// processor. It disassembles a single instruction and /// returns the result to the application via the \e dump /// method in the \e emit object. /// \param emit is the disassembly emitting object /// \param baseaddr is the address of the machine instruction to disassemble virtual int4 printAssembly(AssemblyEmit &emit,const Address &baseaddr) const=0; }; /// Return the size of addresses for the processor's official /// default space. This space is usually the main RAM databus. /// \return the size of an address in bytes inline int4 AddrSpaceManager::getDefaultSize(void) const { return defaultcodespace->getAddrSize(); } /// There is a special address space reserved for encoding pointers /// to pcode operations as addresses. This allows a direct pointer /// to be \e hidden within an operation, when manipulating pcode /// internally. (See IopSpace) /// \return a pointer to the address space inline AddrSpace *AddrSpaceManager::getIopSpace(void) const { return iopspace; } /// There is a special address space reserved for encoding pointers /// to the FuncCallSpecs object as addresses. This allows direct /// pointers to be \e hidden within an operation, when manipulating /// pcode internally. (See FspecSpace) /// \return a pointer to the address space inline AddrSpace *AddrSpaceManager::getFspecSpace(void) const { return fspecspace; } /// There is a special address space reserved for providing a /// logical contiguous memory location for variables that are /// really split between two physical locations. This allows the /// the decompiler to work with the logical value. (See JoinSpace) /// \return a pointer to the address space inline AddrSpace *AddrSpaceManager::getJoinSpace(void) const { return joinspace; } /// Most processors have registers and instructions that are /// reserved for implementing a stack. In the pcode translation, /// these are translated into locations and operations on a /// dedicated \b stack address space. (See SpacebaseSpace) /// \return a pointer to the \b stack space inline AddrSpace *AddrSpaceManager::getStackSpace(void) const { return stackspace; } /// Both the pcode translation process and the simplification /// process need access to a pool of temporary registers that /// can be used for moving data around without affecting the /// address spaces used to formally model the processor's RAM /// and registers. These temporary locations are all allocated /// from a dedicated address space, referred to as the \b unique /// space. (See UniqueSpace) /// \return a pointer to the \b unique space inline AddrSpace *AddrSpaceManager::getUniqueSpace(void) const { return uniqspace; } /// Most processors have a main address bus, on which the bulk /// of the processor's RAM is mapped. This matches SLEIGH's notion /// of the \e default space. For Harvard architectures, this is the /// space where code exists (as opposed to data). /// \return a pointer to the \e default code space inline AddrSpace *AddrSpaceManager::getDefaultCodeSpace(void) const { return defaultcodespace; } /// Return the default address space for holding data. For most processors, this /// is just the main RAM space and is the same as the default \e code space. /// For Harvard architectures, this is the space where data is stored /// (as opposed to code). /// \return a pointer to the \e default data space inline AddrSpace *AddrSpaceManager::getDefaultDataSpace(void) const { return defaultdataspace; } /// Pcode represents constant values within an operation as /// offsets within a special \e constant address space. /// (See ConstantSpace) /// \return a pointer to the \b constant space inline AddrSpace *AddrSpaceManager::getConstantSpace(void) const { return constantspace; } /// This routine encodes a specific value as a \e constant /// address. I.e. the address space of the resulting Address /// will be the \b constant space, and the offset will be the /// value. /// \param val is the constant value to encode /// \return the \e constant address inline Address AddrSpaceManager::getConstant(uintb val) const { return Address(constantspace,val); } /// This routine is used to encode a pointer to an address space /// as a \e constant Address, for use in \b LOAD and \b STORE /// operations. This is used internally and is slightly more /// efficient than storing the formal index of the space /// param spc is the space pointer to be encoded /// \return the encoded Address inline Address AddrSpaceManager::createConstFromSpace(AddrSpace *spc) const { return Address(constantspace,(uintb)(uintp)spc); } /// This returns the total number of address spaces used by the /// processor, including all special spaces, like the \b constant /// space and the \b iop space. /// \return the number of spaces inline int4 AddrSpaceManager::numSpaces(void) const { return baselist.size(); } /// This retrieves a specific address space via its formal index. /// All spaces have an index, and in conjunction with the numSpaces /// method, this method can be used to iterate over all spaces. /// \param i is the index of the address space /// \return a pointer to the desired space inline AddrSpace *AddrSpaceManager::getSpace(int4 i) const { return baselist[i]; } /// Although endianness is usually specified on the space, most languages set an endianness /// across the entire processor. This routine sets the endianness to \b big if the -val- /// is passed in as \b true. Otherwise, the endianness is set to \b small. /// \param val is \b true if the endianness should be set to \b big inline void Translate::setBigEndian(bool val) { target_isbigendian = val; } /// The \e unique address space, for allocating temporary registers, /// is used for both registers needed by the pcode translation /// engine and, later, by the simplification engine. This routine /// sets the boundary of the portion of the space allocated /// for the pcode engine, and sets the base offset where registers /// created by the simplification process can start being allocated. /// \param val is the boundary offset inline void Translate::setUniqueBase(uint4 val) { if (val>unique_base) unique_base = val; } /// Processors can usually be described as using a big endian /// encoding or a little endian encoding. This routine returns /// \b true if the processor globally uses big endian encoding. /// \return \b true if big endian inline bool Translate::isBigEndian(void) const { return target_isbigendian; } /// If machine instructions need to have a specific alignment /// for this processor, this routine returns it. I.e. a return /// value of 4, means that the address of all instructions /// must be a multiple of 4. If there is no /// specific alignment requirement, this routine returns 1. /// \return the instruction alignment inline int4 Translate::getAlignment(void) const { return alignment; } /// Return the first offset within the \e unique space after the range statically reserved by Translate. /// This is generally the starting offset where dynamic temporary registers can start to be allocated. /// \return the first allocatable offset inline uint4 Translate::getUniqueBase(void) const { return unique_base; } /// Regions of the \e unique space are reserved for specific uses. We select the start of a specific /// region based on the given tag. /// \param layout is the given tag /// \return the absolute offset into the \e unique space inline uint4 Translate::getUniqueStart(UniqueLayout layout) const { return (layout != ANALYSIS) ? layout + unique_base : layout; } } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/types.h ================================================ /* ### * IP: GHIDRA * NOTE: Decompiler specific flags, refers to sparc,linux,windows,i386,apple,alpha,powerpc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* typedefs for getting specific word sizes */ #ifndef __TYPES_H__ #define __TYPES_H__ #include namespace ghidra { // Use of uintm and intm is deprecated. They must currently be set to be 32-bit. typedef uint32_t uintm; typedef int32_t intm; typedef uint64_t uint8; typedef int64_t int8; typedef uint32_t uint4; typedef int32_t int4; typedef uint16_t uint2; typedef int16_t int2; typedef uint8_t uint1; typedef int8_t int1; /* uintp is intended to be an unsigned integer that is the same size as a pointer */ typedef uintptr_t uintp; #if defined (__x86_64__) || defined (__i386__) #define HOST_ENDIAN 0 #else // other platforms (not compatible with g++ 4.8.5) class Endian { public: static constexpr const union { int4 whole; int1 part[4]; } host = { 1 }; }; #define HOST_ENDIAN Endian::host.part[3] #endif #if defined(_WINDOWS) #pragma warning (disable:4312) #pragma warning (disable:4311) #pragma warning (disable:4267) #pragma warning (disable:4018) #pragma warning (disable:4244) /* The windows standard template library list implementation seems to have a philosophical difference with the standard regarding the validity of iterators pointing to objects that are moved between containers (via the splice method) These defines turn off the validity checks (These have been moved to the VC project spec) */ //#define _SECURE_SCL 0 //#define _HAS_ITERATOR_DEBUGGING 0 #endif /* Big integers: These are intended to be arbitrary precison integers. However for efficiency, these are currently implemented as fixed precision. So for coding purposes, these should be interpreted as fixed precision integers that store as big a number as you would ever need. */ typedef int8 intb; /* This is a signed big integer */ typedef uint8 uintb; /* This is an unsigned big integer */ /* Other compilation flags CPUI_DEBUG -- This is the ONE debug switch that should be passed in from the compiler, all others are controlled below */ #ifdef CPUI_DEBUG # define OPACTION_DEBUG # define PRETTY_DEBUG # define TYPEPROP_DEBUG //# define __REMOTE_SOCKET__ //# define DFSVERIFY_DEBUG //# define BLOCKCONSISTENT_DEBUG //# define MERGEMULTI_DEBUG //# define VARBANK_DEBUG #endif } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/xml.cc ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* A Bison parser, made by GNU Bison 3.0.4. */ /* Bison implementation for Yacc-like parsers in C Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ /* As a special exception, you may create a larger work that contains part or all of the Bison parser skeleton and distribute that work under terms of your choice, so long as that work isn't itself a parser generator using the skeleton or a modified version thereof as a parser skeleton. Alternatively, if you modify or redistribute the parser skeleton itself, you may (at your option) remove this special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ /* C LALR(1) parser skeleton written by Richard Stallman, by simplifying the original so-called "semantic" parser. */ /* All symbols defined below should begin with yy or YY, to avoid infringing on user name space. This should be done even for local variables, as they might otherwise be expanded by user macros. There are some unavoidable exceptions within include files to define necessary library symbols; they are noted "INFRINGES ON USER NAME SPACE" below. */ /* Identify Bison output. */ #define YYBISON 1 /* Bison version. */ #define YYBISON_VERSION "3.0.4" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" /* Pure parsers. */ #define YYPURE 0 /* Push parsers. */ #define YYPUSH 0 /* Pull parsers. */ #define YYPULL 1 /* Substitute the type names. */ #define YYSTYPE XMLSTYPE /* Substitute the variable and function names. */ #define yyparse xmlparse #define yylex xmllex #define yyerror xmlerror #define yydebug xmldebug #define yynerrs xmlnerrs #define yylval xmllval #define yychar xmlchar /* Copy the first part of user declarations. */ #include "xml.hh" // CharData mode look for '<' '&' or "]]>" // Name mode look for non-name char // CData mode looking for "]]>" // Entity mode looking for ending ';' // AttValue mode looking for endquote or '&' // Comment mode looking for "--" #include #include #include #include namespace ghidra { string Attributes::bogus_uri("http://unused.uri"); /// \brief The XML character scanner /// /// Tokenize a byte stream suitably for the main XML parser. The scanner expects an ASCII or UTF-8 /// encoding. Characters is XML tag and attribute names are restricted to ASCII "letters", but /// extended UTF-8 characters can be used in any other character data: attribute values, content, comments. class XmlScan { public: /// \brief Modes of the scanner enum mode { CharDataMode, CDataMode, AttValueSingleMode, AttValueDoubleMode, CommentMode, CharRefMode, NameMode, SNameMode, SingleMode }; /// \brief Additional tokens returned by the scanner, in addition to byte values 00-ff enum token { CharDataToken = 258, CDataToken = 259, AttValueToken = 260, CommentToken =261, CharRefToken = 262, NameToken = 263, SNameToken = 264, ElementBraceToken = 265, CommandBraceToken = 266 }; private: mode curmode; ///< The current scanning mode istream &s; ///< The stream being scanned string *lvalue; ///< Current string being built int4 lookahead[4]; ///< Lookahead into the byte stream int4 pos; ///< Current position in the lookahead buffer bool endofstream; ///< Has end of stream been reached void clearlvalue(void); ///< Clear the current token string /// \brief Get the next byte in the stream /// /// Maintain a lookahead of 4 bytes at all times so that we can check for special /// XML character sequences without consuming. /// \return the next byte value as an integer int4 getxmlchar(void) { char c; int4 ret=lookahead[pos]; if (!endofstream) { s.get(c); if (s.eof()||(c=='\0')) { endofstream = true; lookahead[pos] = '\n'; } else lookahead[pos] = c; } else lookahead[pos] = -1; pos = (pos+1)&3; return ret; } int4 next(int4 i) { return lookahead[(pos+i)&3]; } ///< Peek at the next (i-th) byte without consuming bool isLetter(int4 val) { return (((val>=0x41)&&(val<=0x5a))||((val>=0x61)&&(val<=0x7a))); } ///< Is the given byte a \e letter bool isInitialNameChar(int4 val); ///< Is the given byte/character the valid start of an XML name bool isNameChar(int4 val); ///< Is the given byte/character valid for an XML name bool isChar(int4 val); ///< Is the given byte/character valid as an XML character int4 scanSingle(void); ///< Scan for the next token in Single Character mode int4 scanCharData(void); ///< Scan for the next token is Character Data mode int4 scanCData(void); ///< Scan for the next token in CDATA mode int4 scanAttValue(int4 quote); ///< Scan for the next token in Attribute Value mode int4 scanCharRef(void); ///< Scan for the next token in Character Reference mode int4 scanComment(void); ///< Scan for the next token in Comment mode int4 scanName(void); ///< Scan a Name or return single non-name character int4 scanSName(void); ///< Scan Name, allow white space before public: XmlScan(istream &t); ///< Construct scanner given a stream ~XmlScan(void); ///< Destructor void setmode(mode m) { curmode = m; } ///< Set the scanning mode int4 nexttoken(void); ///< Get the next token string *lval(void) { string *ret = lvalue; lvalue = (string *)0; return ret; } ///< Return the last \e lvalue string }; /// \brief A parsed name/value pair struct NameValue { string *name; ///< The name string *value; ///< The value }; extern int xmllex(void); ///< Interface to the scanner extern int xmlerror(const char *str); ///< Interface for registering an error in parsing extern void print_content(const string &str); ///< Send character data to the ContentHandler extern int4 convertEntityRef(const string &ref); ///< Convert an XML entity to its equivalent character extern int4 convertCharRef(const string &ref); ///< Convert an XML character reference to its equivalent character static XmlScan *global_scan; ///< Global reference to the scanner static ContentHandler *handler; ///< Global reference to the content handler static std::mutex global_scan_mutex; static std::mutex handler_mutex; # ifndef YY_NULLPTR # if defined __cplusplus && 201103L <= __cplusplus # define YY_NULLPTR nullptr # else # define YY_NULLPTR 0 # endif # endif /* Enabling verbose error messages. */ #ifdef YYERROR_VERBOSE # undef YYERROR_VERBOSE # define YYERROR_VERBOSE 1 #else # define YYERROR_VERBOSE 0 #endif /* Debug traces. */ #ifndef XMLDEBUG # if defined YYDEBUG #if YYDEBUG # define XMLDEBUG 1 # else # define XMLDEBUG 0 # endif # else /* ! defined YYDEBUG */ # define XMLDEBUG 0 # endif /* ! defined YYDEBUG */ #endif /* ! defined XMLDEBUG */ #if XMLDEBUG extern int xmldebug; #endif /* Token type. */ #ifndef XMLTOKENTYPE # define XMLTOKENTYPE enum xmltokentype { CHARDATA = 258, CDATA = 259, ATTVALUE = 260, COMMENT = 261, CHARREF = 262, NAME = 263, SNAME = 264, ELEMBRACE = 265, COMMBRACE = 266 }; #endif /* Value type. */ #if ! defined XMLSTYPE && ! defined XMLSTYPE_IS_DECLARED union XMLSTYPE { int4 i; string *str; Attributes *attr; NameValue *pair; }; typedef union XMLSTYPE XMLSTYPE; # define XMLSTYPE_IS_TRIVIAL 1 # define XMLSTYPE_IS_DECLARED 1 #endif extern XMLSTYPE xmllval; int xmlparse (void); /* Copy the second part of user declarations. */ #ifdef short # undef short #endif #ifdef YYTYPE_UINT8 typedef YYTYPE_UINT8 yytype_uint8; #else typedef unsigned char yytype_uint8; #endif #ifdef YYTYPE_INT8 typedef YYTYPE_INT8 yytype_int8; #else typedef signed char yytype_int8; #endif #ifdef YYTYPE_UINT16 typedef YYTYPE_UINT16 yytype_uint16; #else typedef unsigned short int yytype_uint16; #endif #ifdef YYTYPE_INT16 typedef YYTYPE_INT16 yytype_int16; #else typedef short int yytype_int16; #endif #ifndef YYSIZE_T # ifdef __SIZE_TYPE__ # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t # elif ! defined YYSIZE_T # include /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else # define YYSIZE_T unsigned int # endif #endif #define YYSIZE_MAXIMUM ((YYSIZE_T) -1) #ifndef YY_ # if defined YYENABLE_NLS && YYENABLE_NLS # if ENABLE_NLS # include /* INFRINGES ON USER NAME SPACE */ # define YY_(Msgid) dgettext ("bison-runtime", Msgid) # endif # endif # ifndef YY_ # define YY_(Msgid) Msgid # endif #endif #ifndef YY_ATTRIBUTE # if (defined __GNUC__ \ && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \ || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C # define YY_ATTRIBUTE(Spec) __attribute__(Spec) # else # define YY_ATTRIBUTE(Spec) /* empty */ # endif #endif #ifndef YY_ATTRIBUTE_PURE # define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__)) #endif #ifndef YY_ATTRIBUTE_UNUSED # define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__)) #endif #if !defined _Noreturn \ && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112) # if defined _MSC_VER && 1200 <= _MSC_VER # define _Noreturn __declspec (noreturn) # else # define _Noreturn YY_ATTRIBUTE ((__noreturn__)) # endif #endif /* Suppress unused-variable warnings by "using" E. */ #if ! defined lint || defined __GNUC__ # define YYUSE(E) ((void) (E)) #else # define YYUSE(E) /* empty */ #endif #if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ /* Suppress an incorrect diagnostic about yylval being uninitialized. */ # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") # define YY_IGNORE_MAYBE_UNINITIALIZED_END \ _Pragma ("GCC diagnostic pop") #else # define YY_INITIAL_VALUE(Value) Value #endif #ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_END #endif #ifndef YY_INITIAL_VALUE # define YY_INITIAL_VALUE(Value) /* Nothing. */ #endif #if ! defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ # ifdef YYSTACK_USE_ALLOCA # if YYSTACK_USE_ALLOCA # ifdef __GNUC__ # define YYSTACK_ALLOC __builtin_alloca # elif defined __BUILTIN_VA_ARG_INCR # include /* INFRINGES ON USER NAME SPACE */ # elif defined _AIX # define YYSTACK_ALLOC __alloca # elif defined _MSC_VER # include /* INFRINGES ON USER NAME SPACE */ # define alloca _alloca # else # define YYSTACK_ALLOC alloca # if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS # include /* INFRINGES ON USER NAME SPACE */ /* Use EXIT_SUCCESS as a witness for stdlib.h. */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # endif # endif # endif # ifdef YYSTACK_ALLOC /* Pacify GCC's 'empty if-body' warning. */ # define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) # ifndef YYSTACK_ALLOC_MAXIMUM /* The OS might guarantee only one guard page at the bottom of the stack, and a page size can be as small as 4096 bytes. So we cannot safely invoke alloca (N) if N exceeds 4096. Use a slightly smaller number to allow for a few compiler-allocated temporary stack slots. */ # define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ # endif # else # define YYSTACK_ALLOC YYMALLOC # define YYSTACK_FREE YYFREE # ifndef YYSTACK_ALLOC_MAXIMUM # define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM # endif # if (defined __cplusplus && ! defined EXIT_SUCCESS \ && ! ((defined YYMALLOC || defined malloc) \ && (defined YYFREE || defined free))) # include /* INFRINGES ON USER NAME SPACE */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 # endif # endif # ifndef YYMALLOC # define YYMALLOC malloc # if ! defined malloc && ! defined EXIT_SUCCESS void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free # if ! defined free && ! defined EXIT_SUCCESS void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif # endif #endif /* ! defined yyoverflow || YYERROR_VERBOSE */ #if (! defined yyoverflow \ && (! defined __cplusplus \ || (defined XMLSTYPE_IS_TRIVIAL && XMLSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ union yyalloc { yytype_int16 yyss_alloc; YYSTYPE yyvs_alloc; }; /* The size of the maximum gap between one aligned stack and the next. */ # define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with N elements. */ # define YYSTACK_BYTES(N) \ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ + YYSTACK_GAP_MAXIMUM) # define YYCOPY_NEEDED 1 /* Relocate STACK from its old location to the new one. The local variables YYSIZE and YYSTACKSIZE give the old and new number of elements in the stack, and YYPTR gives the new location of the stack. Advance YYPTR to a properly aligned location for the next stack. */ # define YYSTACK_RELOCATE(Stack_alloc, Stack) \ do \ { \ YYSIZE_T yynewbytes; \ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ Stack = &yyptr->Stack_alloc; \ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ yyptr += yynewbytes / sizeof (*yyptr); \ } \ while (0) #endif #if defined YYCOPY_NEEDED && YYCOPY_NEEDED /* Copy COUNT objects from SRC to DST. The source and destination do not overlap. */ # ifndef YYCOPY # if defined __GNUC__ && 1 < __GNUC__ # define YYCOPY(Dst, Src, Count) \ __builtin_memcpy (Dst, Src, (Count) * sizeof (*(Src))) # else # define YYCOPY(Dst, Src, Count) \ do \ { \ YYSIZE_T yyi; \ for (yyi = 0; yyi < (Count); yyi++) \ (Dst)[yyi] = (Src)[yyi]; \ } \ while (0) # endif # endif #endif /* !YYCOPY_NEEDED */ /* YYFINAL -- State number of the termination state. */ #define YYFINAL 25 /* YYLAST -- Last index in YYTABLE. */ #define YYLAST 205 /* YYNTOKENS -- Number of terminals. */ #define YYNTOKENS 50 /* YYNNTS -- Number of nonterminals. */ #define YYNNTS 37 /* YYNRULES -- Number of rules. */ #define YYNRULES 70 /* YYNSTATES -- Number of states. */ #define YYNSTATES 151 /* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned by yylex, with out-of-bounds checking. */ #define YYUNDEFTOK 2 #define YYMAXUTOK 266 #define YYTRANSLATE(YYX) \ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) /* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM as returned by yylex, without out-of-bounds checking. */ static const yytype_uint8 yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 15, 13, 2, 2, 14, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 12, 18, 17, 48, 2, 2, 47, 16, 2, 2, 2, 2, 2, 19, 2, 46, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 49, 2, 32, 20, 21, 2, 25, 2, 23, 24, 31, 2, 2, 2, 2, 2, 2, 2, 2, 2, 28, 30, 2, 2, 2, 26, 2, 2, 2, 2, 29, 2, 22, 2, 27, 2, 2, 2, 2, 2, 40, 41, 34, 2, 42, 2, 37, 2, 2, 45, 44, 39, 38, 2, 2, 35, 36, 2, 2, 33, 2, 43, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; #if XMLDEBUG /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ static const yytype_uint8 yyrline[] = { 0, 141, 141, 142, 143, 144, 145, 146, 147, 148, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 166, 167, 168, 169, 170, 171, 172, 174, 175, 176, 177, 178, 179, 180, 182, 183, 184, 185, 186, 187, 188, 190, 191, 193, 194, 195, 196, 198, 199, 200, 201, 202, 203, 205, 206, 207, 208, 209, 210, 211, 213, 214, 216, 217, 218, 219 }; #endif #if XMLDEBUG || YYERROR_VERBOSE || 0 /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. First, the terminals, then, starting at YYNTOKENS, nonterminals. */ static const char *const yytname[] = { "$end", "error", "$undefined", "CHARDATA", "CDATA", "ATTVALUE", "COMMENT", "CHARREF", "NAME", "SNAME", "ELEMBRACE", "COMMBRACE", "' '", "'\\n'", "'\\r'", "'\\t'", "'\\''", "'\"'", "'!'", "'-'", "'>'", "'?'", "'['", "'C'", "'D'", "'A'", "'T'", "']'", "'O'", "'Y'", "'P'", "'E'", "'='", "'v'", "'e'", "'r'", "'s'", "'i'", "'o'", "'n'", "'c'", "'d'", "'g'", "'x'", "'m'", "'l'", "'/'", "'&'", "'#'", "';'", "$accept", "document", "whitespace", "S", "attsinglemid", "attdoublemid", "AttValue", "elemstart", "commentstart", "Comment", "PI", "CDSect", "CDStart", "CDEnd", "doctypepro", "prologpre", "prolog", "doctypedecl", "Eq", "Misc", "VersionInfo", "EncodingDecl", "xmldeclstart", "XMLDecl", "element", "STag", "EmptyElemTag", "stagstart", "SAttribute", "etagbrace", "ETag", "content", "Reference", "refstart", "charrefstart", "CharRef", "EntityRef", YY_NULLPTR }; #endif # ifdef YYPRINT /* YYTOKNUM[NUM] -- (External) token number corresponding to the (internal) symbol number NUM (which must be that of a token). */ static const yytype_uint16 yytoknum[] = { 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 32, 10, 13, 9, 39, 34, 33, 45, 62, 63, 91, 67, 68, 65, 84, 93, 79, 89, 80, 69, 61, 118, 101, 114, 115, 105, 111, 110, 99, 100, 103, 120, 109, 108, 47, 38, 35, 59 }; # endif #define YYPACT_NINF -136 #define yypact_value_is_default(Yystate) \ (!!((Yystate) == (-136))) #define YYTABLE_NINF -1 #define yytable_value_is_error(Yytable_value) \ 0 /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing STATE-NUM. */ static const yytype_int16 yypact[] = { 132, -136, 42, -136, -136, -136, -136, 22, -136, 125, 9, 20, -136, -136, 143, 28, -136, 79, -136, 148, -136, -136, 16, 18, 6, -136, -136, -136, 32, 65, 148, -136, -136, 148, 38, 40, 93, 91, -136, -1, 63, -136, 39, 27, -136, 45, 26, 52, -12, -136, -136, -136, -136, 69, 57, 77, 104, -136, -3, -136, -136, -136, -136, 94, -136, 95, -136, -136, -4, 103, -136, -136, -136, 67, 136, -136, -136, 106, -136, 68, 109, 87, -136, 90, -136, 144, 2, -136, 138, 108, 117, -136, 118, -136, -136, -136, 125, -2, 3, -136, -136, 125, -136, 145, 131, -136, 147, 146, -136, -136, 121, -136, -136, -136, -136, -136, -136, -136, -136, 54, -136, 149, 130, 150, 152, -136, 142, 151, 140, 153, -136, 154, 155, 156, 157, 158, 159, 137, 161, 160, -136, 63, 162, 163, 136, -136, 164, -136, 63, 136, -136 }; /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. Performed when YYTABLE does not specify something else to do. Zero means the default is an error. */ static const yytype_uint8 yydefact[] = { 0, 18, 0, 4, 5, 6, 7, 0, 8, 38, 0, 0, 36, 37, 31, 0, 28, 0, 27, 0, 58, 46, 0, 0, 21, 1, 9, 52, 0, 0, 30, 25, 29, 0, 0, 0, 0, 0, 2, 0, 0, 48, 0, 0, 53, 0, 0, 0, 0, 21, 26, 3, 42, 0, 0, 0, 0, 59, 0, 67, 64, 63, 62, 0, 60, 0, 47, 61, 0, 0, 66, 65, 33, 0, 0, 50, 49, 0, 19, 0, 0, 0, 43, 0, 44, 0, 0, 55, 0, 0, 0, 68, 0, 34, 10, 13, 35, 0, 0, 54, 51, 0, 20, 0, 0, 45, 0, 0, 22, 56, 0, 70, 69, 11, 16, 12, 14, 17, 15, 0, 41, 0, 0, 0, 0, 57, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 23, 0, 40, 0, 0, 39 }; /* YYPGOTO[NTERM-NUM]. */ static const yytype_int16 yypgoto[] = { -136, -136, -8, -17, -136, -136, -133, -136, -136, 165, 166, -136, -136, -136, -136, -136, -136, -136, -135, 71, -136, -136, -136, -136, 17, -136, -136, -136, -136, -136, -136, -136, -64, -136, -136, -136, -136 }; /* YYDEFGOTO[NTERM-NUM]. */ static const yytype_int8 yydefgoto[] = { -1, 7, 8, 9, 97, 98, 99, 10, 11, 12, 13, 62, 63, 108, 30, 14, 15, 31, 74, 16, 120, 36, 17, 18, 19, 20, 21, 22, 44, 65, 66, 39, 67, 68, 69, 70, 71 }; /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If positive, shift that token. If negative, reduce the rule whose number is the opposite. If YYTABLE_NINF, syntax error. */ static const yytype_uint8 yytable[] = { 35, 26, 57, 113, 90, 43, 144, 45, 116, 1, 58, 147, 81, 149, 114, 86, 150, 27, 49, 56, 117, 45, 25, 73, 106, 40, 28, 26, 3, 4, 5, 6, 33, 115, 118, 26, 41, 45, 1, 3, 4, 5, 6, 87, 91, 59, 59, 76, 26, 46, 59, 47, 3, 4, 5, 6, 64, 96, 52, 75, 23, 53, 42, 24, 78, 26, 3, 4, 5, 6, 79, 80, 110, 77, 54, 3, 4, 5, 6, 3, 4, 5, 6, 48, 119, 32, 49, 126, 26, 82, 38, 3, 4, 5, 6, 72, 83, 84, 88, 93, 34, 50, 26, 89, 51, 3, 4, 5, 6, 23, 92, 26, 49, 101, 55, 103, 3, 4, 5, 6, 3, 4, 5, 6, 73, 85, 100, 96, 109, 102, 104, 73, 96, 3, 4, 5, 6, 3, 4, 5, 6, 125, 1, 2, 3, 4, 5, 6, 3, 4, 5, 6, 94, 95, 29, 3, 4, 5, 6, 37, 3, 4, 5, 6, 105, 107, 111, 112, 121, 122, 123, 128, 130, 124, 129, 127, 131, 133, 134, 141, 132, 0, 0, 138, 145, 136, 142, 0, 0, 135, 140, 0, 0, 0, 139, 137, 0, 143, 0, 0, 0, 146, 0, 148, 60, 61 }; static const yytype_int16 yycheck[] = { 17, 9, 3, 5, 8, 22, 141, 19, 5, 10, 11, 144, 24, 148, 16, 18, 149, 8, 21, 36, 17, 19, 0, 40, 22, 9, 6, 35, 12, 13, 14, 15, 15, 97, 98, 43, 20, 19, 10, 12, 13, 14, 15, 46, 48, 47, 47, 20, 56, 43, 47, 19, 12, 13, 14, 15, 39, 74, 20, 20, 18, 21, 46, 21, 19, 73, 12, 13, 14, 15, 44, 19, 89, 46, 34, 12, 13, 14, 15, 12, 13, 14, 15, 18, 101, 14, 21, 33, 96, 20, 19, 12, 13, 14, 15, 32, 39, 20, 4, 32, 21, 30, 110, 8, 33, 12, 13, 14, 15, 18, 7, 119, 21, 45, 21, 28, 12, 13, 14, 15, 12, 13, 14, 15, 141, 21, 20, 144, 20, 20, 40, 148, 149, 12, 13, 14, 15, 12, 13, 14, 15, 20, 10, 11, 12, 13, 14, 15, 12, 13, 14, 15, 16, 17, 11, 12, 13, 14, 15, 11, 12, 13, 14, 15, 20, 27, 49, 49, 23, 38, 23, 41, 20, 27, 24, 26, 34, 37, 25, 42, 29, -1, -1, 26, 22, 30, 25, -1, -1, 35, 31, -1, -1, -1, 36, 39, -1, 37, -1, -1, -1, 38, -1, 39, 39, 39 }; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing symbol of state STATE-NUM. */ static const yytype_uint8 yystos[] = { 0, 10, 11, 12, 13, 14, 15, 51, 52, 53, 57, 58, 59, 60, 65, 66, 69, 72, 73, 74, 75, 76, 77, 18, 21, 0, 52, 8, 6, 11, 64, 67, 69, 74, 21, 53, 71, 11, 69, 81, 9, 20, 46, 53, 78, 19, 43, 19, 18, 21, 69, 69, 20, 21, 34, 21, 53, 3, 11, 47, 59, 60, 61, 62, 74, 79, 80, 82, 83, 84, 85, 86, 32, 53, 68, 20, 20, 46, 19, 44, 19, 24, 20, 39, 20, 21, 18, 46, 4, 8, 8, 48, 7, 32, 16, 17, 53, 54, 55, 56, 20, 45, 20, 28, 40, 20, 22, 27, 63, 20, 53, 49, 49, 5, 16, 82, 5, 17, 82, 53, 70, 23, 38, 23, 27, 20, 33, 26, 41, 24, 20, 34, 29, 37, 25, 35, 30, 39, 26, 36, 31, 42, 25, 37, 68, 22, 38, 56, 39, 68, 56 }; /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ static const yytype_uint8 yyr1[] = { 0, 50, 51, 51, 52, 52, 52, 52, 53, 53, 54, 54, 54, 55, 55, 55, 56, 56, 57, 58, 59, 60, 61, 62, 63, 64, 64, 65, 65, 65, 66, 66, 67, 68, 68, 68, 69, 69, 69, 70, 71, 72, 73, 73, 73, 73, 74, 74, 75, 75, 76, 76, 77, 77, 78, 79, 80, 80, 81, 81, 81, 81, 81, 81, 81, 82, 82, 83, 84, 85, 86 }; /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */ static const yytype_uint8 yyr2[] = { 0, 2, 2, 3, 1, 1, 1, 1, 1, 2, 1, 2, 2, 1, 2, 2, 2, 2, 1, 4, 5, 2, 3, 9, 3, 1, 2, 1, 1, 2, 2, 1, 9, 1, 2, 2, 1, 1, 1, 10, 11, 6, 3, 4, 4, 5, 1, 3, 2, 3, 3, 4, 2, 2, 3, 2, 3, 4, 0, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 3, 3 }; #define yyerrok (yyerrstatus = 0) #define yyclearin (yychar = YYEMPTY) #define YYEMPTY (-2) #define YYEOF 0 #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab #define YYERROR goto yyerrorlab #define YYRECOVERING() (!!yyerrstatus) #define YYBACKUP(Token, Value) \ do \ if (yychar == YYEMPTY) \ { \ yychar = (Token); \ yylval = (Value); \ YYPOPSTACK (yylen); \ yystate = *yyssp; \ goto yybackup; \ } \ else \ { \ yyerror (YY_("syntax error: cannot back up")); \ YYERROR; \ } \ while (0) /* Error token number */ #define YYTERROR 1 #define YYERRCODE 256 /* Enable debugging if requested. */ #if XMLDEBUG # ifndef YYFPRINTF # include /* INFRINGES ON USER NAME SPACE */ # define YYFPRINTF fprintf # endif # define YYDPRINTF(Args) \ do { \ if (yydebug) \ YYFPRINTF Args; \ } while (0) /* This macro is provided for backward compatibility. */ #ifndef YY_LOCATION_PRINT # define YY_LOCATION_PRINT(File, Loc) ((void) 0) #endif # define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ do { \ if (yydebug) \ { \ YYFPRINTF (stderr, "%s ", Title); \ yy_symbol_print (stderr, \ Type, Value); \ YYFPRINTF (stderr, "\n"); \ } \ } while (0) /*----------------------------------------. | Print this symbol's value on YYOUTPUT. | `----------------------------------------*/ static void yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) { FILE *yyo = yyoutput; YYUSE (yyo); if (!yyvaluep) return; # ifdef YYPRINT if (yytype < YYNTOKENS) YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); # endif YYUSE (yytype); } /*--------------------------------. | Print this symbol on YYOUTPUT. | `--------------------------------*/ static void yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) { YYFPRINTF (yyoutput, "%s %s (", yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]); yy_symbol_value_print (yyoutput, yytype, yyvaluep); YYFPRINTF (yyoutput, ")"); } /*------------------------------------------------------------------. | yy_stack_print -- Print the state stack from its BOTTOM up to its | | TOP (included). | `------------------------------------------------------------------*/ static void yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) { YYFPRINTF (stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) { int yybot = *yybottom; YYFPRINTF (stderr, " %d", yybot); } YYFPRINTF (stderr, "\n"); } # define YY_STACK_PRINT(Bottom, Top) \ do { \ if (yydebug) \ yy_stack_print ((Bottom), (Top)); \ } while (0) /*------------------------------------------------. | Report that the YYRULE is going to be reduced. | `------------------------------------------------*/ static void yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, int yyrule) { unsigned long int yylno = yyrline[yyrule]; int yynrhs = yyr2[yyrule]; int yyi; YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", yyrule - 1, yylno); /* The symbols being reduced. */ for (yyi = 0; yyi < yynrhs; yyi++) { YYFPRINTF (stderr, " $%d = ", yyi + 1); yy_symbol_print (stderr, yystos[yyssp[yyi + 1 - yynrhs]], &(yyvsp[(yyi + 1) - (yynrhs)]) ); YYFPRINTF (stderr, "\n"); } } # define YY_REDUCE_PRINT(Rule) \ do { \ if (yydebug) \ yy_reduce_print (yyssp, yyvsp, Rule); \ } while (0) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ int yydebug; #else /* !XMLDEBUG */ # define YYDPRINTF(Args) # define YY_SYMBOL_PRINT(Title, Type, Value, Location) # define YY_STACK_PRINT(Bottom, Top) # define YY_REDUCE_PRINT(Rule) #endif /* !XMLDEBUG */ /* YYINITDEPTH -- initial size of the parser's stacks. */ #ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only if the built-in stack extension method is used). Do not make this value too large; the results are undefined if YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) evaluated with infinite-precision integer arithmetic. */ #ifndef YYMAXDEPTH # define YYMAXDEPTH 10000 #endif #if YYERROR_VERBOSE # ifndef yystrlen # if defined __GLIBC__ && defined _STRING_H # define yystrlen strlen # else /* Return the length of YYSTR. */ static YYSIZE_T yystrlen (const char *yystr) { YYSIZE_T yylen; for (yylen = 0; yystr[yylen]; yylen++) continue; return yylen; } # endif # endif # ifndef yystpcpy # if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE # define yystpcpy stpcpy # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ static char * yystpcpy (char *yydest, const char *yysrc) { char *yyd = yydest; const char *yys = yysrc; while ((*yyd++ = *yys++) != '\0') continue; return yyd - 1; } # endif # endif # ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary quotes and backslashes, so that it's suitable for yyerror. The heuristic is that double-quoting is unnecessary unless the string contains an apostrophe, a comma, or backslash (other than backslash-backslash). YYSTR is taken from yytname. If YYRES is null, do not copy; instead, return the length of what the result would have been. */ static YYSIZE_T yytnamerr (char *yyres, const char *yystr) { if (*yystr == '"') { YYSIZE_T yyn = 0; char const *yyp = yystr; for (;;) switch (*++yyp) { case '\'': case ',': goto do_not_strip_quotes; case '\\': if (*++yyp != '\\') goto do_not_strip_quotes; /* Fall through. */ default: if (yyres) yyres[yyn] = *yyp; yyn++; break; case '"': if (yyres) yyres[yyn] = '\0'; return yyn; } do_not_strip_quotes: ; } if (! yyres) return yystrlen (yystr); return yystpcpy (yyres, yystr) - yyres; } # endif /* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message about the unexpected token YYTOKEN for the state stack whose top is YYSSP. Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is not large enough to hold the message. In that case, also set *YYMSG_ALLOC to the required number of bytes. Return 2 if the required number of bytes is too large to store. */ static int yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, yytype_int16 *yyssp, int yytoken) { YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); YYSIZE_T yysize = yysize0; enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; /* Internationalized format string. */ const char *yyformat = YY_NULLPTR; /* Arguments of yyformat. */ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; /* Number of reported tokens (one for the "unexpected", one per "expected"). */ int yycount = 0; /* There are many possibilities here to consider: - If this state is a consistent state with a default action, then the only way this function was invoked is if the default action is an error action. In that case, don't check for expected tokens because there are none. - The only way there can be no lookahead present (in yychar) is if this state is a consistent state with a default action. Thus, detecting the absence of a lookahead is sufficient to determine that there is no unexpected or expected token to report. In that case, just report a simple "syntax error". - Don't assume there isn't a lookahead just because this state is a consistent state with a default action. There might have been a previous inconsistent state, consistent state with a non-default action, or user semantic action that manipulated yychar. - Of course, the expected token list depends on states to have correct lookahead information, and it depends on the parser not to perform extra reductions after fetching a lookahead from the scanner and before detecting a syntax error. Thus, state merging (from LALR or IELR) and default reductions corrupt the expected token list. However, the list is correct for canonical LR with one exception: it will still contain any token that will not be accepted due to an error action in a later state. */ if (yytoken != YYEMPTY) { int yyn = yypact[*yyssp]; yyarg[yycount++] = yytname[yytoken]; if (!yypact_value_is_default (yyn)) { /* Start YYX at -YYN if negative to avoid negative indexes in YYCHECK. In other words, skip the first -YYN actions for this state because they are default actions. */ int yyxbegin = yyn < 0 ? -yyn : 0; /* Stay within bounds of both yycheck and yytname. */ int yychecklim = YYLAST - yyn + 1; int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; int yyx; for (yyx = yyxbegin; yyx < yyxend; ++yyx) if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR && !yytable_value_is_error (yytable[yyx + yyn])) { if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { yycount = 1; yysize = yysize0; break; } yyarg[yycount++] = yytname[yyx]; { YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) return 2; yysize = yysize1; } } } } switch (yycount) { # define YYCASE_(N, S) \ case N: \ yyformat = S; \ break YYCASE_(0, YY_("syntax error")); YYCASE_(1, YY_("syntax error, unexpected %s")); YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); # undef YYCASE_ } { YYSIZE_T yysize1 = yysize + yystrlen (yyformat); if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) return 2; yysize = yysize1; } if (*yymsg_alloc < yysize) { *yymsg_alloc = 2 * yysize; if (! (yysize <= *yymsg_alloc && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM)) *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM; return 1; } /* Avoid sprintf, as that infringes on the user's name space. Don't have undefined behavior even if the translation produced a string with the wrong number of "%s"s. */ { char *yyp = *yymsg; int yyi = 0; while ((*yyp = *yyformat) != '\0') if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount) { yyp += yytnamerr (yyp, yyarg[yyi++]); yyformat += 2; } else { yyp++; yyformat++; } } return 0; } #endif /* YYERROR_VERBOSE */ /*-----------------------------------------------. | Release the memory associated to this symbol. | `-----------------------------------------------*/ static void yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep) { YYUSE (yyvaluep); if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN switch (yytype) { case 3: /* CHARDATA */ { delete ((*yyvaluep).str); } break; case 4: /* CDATA */ { delete ((*yyvaluep).str); } break; case 5: /* ATTVALUE */ { delete ((*yyvaluep).str); } break; case 6: /* COMMENT */ { delete ((*yyvaluep).str); } break; case 7: /* CHARREF */ { delete ((*yyvaluep).str); } break; case 8: /* NAME */ { delete ((*yyvaluep).str); } break; case 9: /* SNAME */ { delete ((*yyvaluep).str); } break; case 10: /* ELEMBRACE */ { delete ((*yyvaluep).str); } break; case 11: /* COMMBRACE */ { delete ((*yyvaluep).str); } break; case 54: /* attsinglemid */ { delete ((*yyvaluep).str); } break; case 55: /* attdoublemid */ { delete ((*yyvaluep).str); } break; case 56: /* AttValue */ { delete ((*yyvaluep).str); } break; case 61: /* CDSect */ { delete ((*yyvaluep).str); } break; case 75: /* STag */ { delete ((*yyvaluep).attr); } break; case 76: /* EmptyElemTag */ { delete ((*yyvaluep).attr); } break; case 77: /* stagstart */ { delete ((*yyvaluep).attr); } break; case 78: /* SAttribute */ { delete ((*yyvaluep).pair); } break; case 80: /* ETag */ { delete ((*yyvaluep).str); } break; case 82: /* Reference */ { } break; case 85: /* CharRef */ { delete ((*yyvaluep).str); } break; case 86: /* EntityRef */ { delete ((*yyvaluep).str); } break; default: break; } YY_IGNORE_MAYBE_UNINITIALIZED_END } /* The lookahead symbol. */ int yychar; /* The semantic value of the lookahead symbol. */ YYSTYPE yylval; /* Number of syntax errors so far. */ int yynerrs; /*----------. | yyparse. | `----------*/ int yyparse (void) { int yystate; /* Number of tokens to shift before error messages enabled. */ int yyerrstatus; /* The stacks and their tools: 'yyss': related to states. 'yyvs': related to semantic values. Refer to the stacks through separate pointers, to allow yyoverflow to reallocate them elsewhere. */ /* The state stack. */ yytype_int16 yyssa[YYINITDEPTH]; yytype_int16 *yyss; yytype_int16 *yyssp; /* The semantic value stack. */ YYSTYPE yyvsa[YYINITDEPTH]; YYSTYPE *yyvs; YYSTYPE *yyvsp; YYSIZE_T yystacksize; int yyn; int yyresult; /* Lookahead token as an internal (translated) token number. */ int yytoken = 0; /* The variables used to return semantic value and location from the action routines. */ YYSTYPE yyval; #if YYERROR_VERBOSE /* Buffer for error messages, and its allocated size. */ char yymsgbuf[128]; char *yymsg = yymsgbuf; YYSIZE_T yymsg_alloc = sizeof yymsgbuf; #endif #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) /* The number of symbols on the RHS of the reduced rule. Keep to zero when no symbol should be popped. */ int yylen = 0; yyssp = yyss = yyssa; yyvsp = yyvs = yyvsa; yystacksize = YYINITDEPTH; YYDPRINTF ((stderr, "Starting parse\n")); yystate = 0; yyerrstatus = 0; yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ goto yysetstate; /*------------------------------------------------------------. | yynewstate -- Push a new state, which is found in yystate. | `------------------------------------------------------------*/ yynewstate: /* In all cases, when you get here, the value and location stacks have just been pushed. So pushing a state here evens the stacks. */ yyssp++; yysetstate: *yyssp = yystate; if (yyss + yystacksize - 1 <= yyssp) { /* Get the current used size of the three stacks, in elements. */ YYSIZE_T yysize = yyssp - yyss + 1; #ifdef yyoverflow { /* Give user a chance to reallocate the stack. Use copies of these so that the &'s don't force the real ones into memory. */ YYSTYPE *yyvs1 = yyvs; yytype_int16 *yyss1 = yyss; /* Each stack pointer address is followed by the size of the data in use in that stack, in bytes. This used to be a conditional around just the two extra args, but that might be undefined if yyoverflow is a macro. */ yyoverflow (YY_("memory exhausted"), &yyss1, yysize * sizeof (*yyssp), &yyvs1, yysize * sizeof (*yyvsp), &yystacksize); yyss = yyss1; yyvs = yyvs1; } #else /* no yyoverflow */ # ifndef YYSTACK_RELOCATE goto yyexhaustedlab; # else /* Extend the stack our own way. */ if (YYMAXDEPTH <= yystacksize) goto yyexhaustedlab; yystacksize *= 2; if (YYMAXDEPTH < yystacksize) yystacksize = YYMAXDEPTH; { yytype_int16 *yyss1 = yyss; union yyalloc *yyptr = (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); if (! yyptr) goto yyexhaustedlab; YYSTACK_RELOCATE (yyss_alloc, yyss); YYSTACK_RELOCATE (yyvs_alloc, yyvs); # undef YYSTACK_RELOCATE if (yyss1 != yyssa) YYSTACK_FREE (yyss1); } # endif #endif /* no yyoverflow */ yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; YYDPRINTF ((stderr, "Stack size increased to %lu\n", (unsigned long int) yystacksize)); if (yyss + yystacksize - 1 <= yyssp) YYABORT; } YYDPRINTF ((stderr, "Entering state %d\n", yystate)); if (yystate == YYFINAL) YYACCEPT; goto yybackup; /*-----------. | yybackup. | `-----------*/ yybackup: /* Do appropriate processing given the current state. Read a lookahead token if we need one and don't already have one. */ /* First try to decide what to do without reference to lookahead token. */ yyn = yypact[yystate]; if (yypact_value_is_default (yyn)) goto yydefault; /* Not known => get a lookahead token if don't already have one. */ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */ if (yychar == YYEMPTY) { YYDPRINTF ((stderr, "Reading a token: ")); yychar = yylex (); } if (yychar <= YYEOF) { yychar = yytoken = YYEOF; YYDPRINTF ((stderr, "Now at end of input.\n")); } else { yytoken = YYTRANSLATE (yychar); YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); } /* If the proper action on seeing token YYTOKEN is to reduce or to detect an error, take that action. */ yyn += yytoken; if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) goto yydefault; yyn = yytable[yyn]; if (yyn <= 0) { if (yytable_value_is_error (yyn)) goto yyerrlab; yyn = -yyn; goto yyreduce; } /* Count tokens shifted since error; after three, turn off error status. */ if (yyerrstatus) yyerrstatus--; /* Shift the lookahead token. */ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); /* Discard the shifted token. */ yychar = YYEMPTY; yystate = yyn; YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END goto yynewstate; /*-----------------------------------------------------------. | yydefault -- do the default action for the current state. | `-----------------------------------------------------------*/ yydefault: yyn = yydefact[yystate]; if (yyn == 0) goto yyerrlab; goto yyreduce; /*-----------------------------. | yyreduce -- Do a reduction. | `-----------------------------*/ yyreduce: /* yyn is the number of a rule to reduce with. */ yylen = yyr2[yyn]; /* If YYLEN is nonzero, implement the default value of the action: '$$ = $1'. Otherwise, the following line sets YYVAL to garbage. This behavior is undocumented and Bison users should not rely upon it. Assigning to YYVAL unconditionally makes the parser a bit smaller, and it avoids a GCC warning that YYVAL may be used uninitialized. */ yyval = yyvsp[1-yylen]; YY_REDUCE_PRINT (yyn); switch (yyn) { case 10: { (yyval.str) = new string; global_scan->setmode(XmlScan::AttValueSingleMode); } break; case 11: { (yyval.str) = (yyvsp[-1].str); *(yyval.str) += *(yyvsp[0].str); delete (yyvsp[0].str); global_scan->setmode(XmlScan::AttValueSingleMode); } break; case 12: { (yyval.str) = (yyvsp[-1].str); *(yyval.str) += (yyvsp[0].i); global_scan->setmode(XmlScan::AttValueSingleMode); } break; case 13: { (yyval.str) = new string; global_scan->setmode(XmlScan::AttValueDoubleMode); } break; case 14: { (yyval.str) = (yyvsp[-1].str); *(yyval.str) += *(yyvsp[0].str); delete (yyvsp[0].str); global_scan->setmode(XmlScan::AttValueDoubleMode); } break; case 15: { (yyval.str) = (yyvsp[-1].str); *(yyval.str) += (yyvsp[0].i); global_scan->setmode(XmlScan::AttValueDoubleMode); } break; case 16: { (yyval.str) = (yyvsp[-1].str); } break; case 17: { (yyval.str) = (yyvsp[-1].str); } break; case 18: { global_scan->setmode(XmlScan::NameMode); delete (yyvsp[0].str); } break; case 19: { global_scan->setmode(XmlScan::CommentMode); delete (yyvsp[-3].str); } break; case 20: { delete (yyvsp[-3].str); } break; case 21: { delete (yyvsp[-1].str); yyerror("Processing instructions are not supported"); YYERROR; } break; case 22: { (yyval.str) = (yyvsp[-1].str); } break; case 23: { global_scan->setmode(XmlScan::CDataMode); delete (yyvsp[-8].str); } break; case 32: { delete (yyvsp[-8].str); yyerror("DTD's not supported"); YYERROR; } break; case 39: { handler->setVersion(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 40: { handler->setEncoding(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 46: { handler->endElement((yyvsp[0].attr)->getelemURI(),(yyvsp[0].attr)->getelemName(),(yyvsp[0].attr)->getelemName()); delete (yyvsp[0].attr); } break; case 47: { handler->endElement((yyvsp[-2].attr)->getelemURI(),(yyvsp[-2].attr)->getelemName(),(yyvsp[-2].attr)->getelemName()); delete (yyvsp[-2].attr); delete (yyvsp[0].str); } break; case 48: { handler->startElement((yyvsp[-1].attr)->getelemURI(),(yyvsp[-1].attr)->getelemName(),(yyvsp[-1].attr)->getelemName(),*(yyvsp[-1].attr)); (yyval.attr) = (yyvsp[-1].attr); } break; case 49: { handler->startElement((yyvsp[-2].attr)->getelemURI(),(yyvsp[-2].attr)->getelemName(),(yyvsp[-2].attr)->getelemName(),*(yyvsp[-2].attr)); (yyval.attr) = (yyvsp[-2].attr); } break; case 50: { handler->startElement((yyvsp[-2].attr)->getelemURI(),(yyvsp[-2].attr)->getelemName(),(yyvsp[-2].attr)->getelemName(),*(yyvsp[-2].attr)); (yyval.attr) = (yyvsp[-2].attr); } break; case 51: { handler->startElement((yyvsp[-3].attr)->getelemURI(),(yyvsp[-3].attr)->getelemName(),(yyvsp[-3].attr)->getelemName(),*(yyvsp[-3].attr)); (yyval.attr) = (yyvsp[-3].attr); } break; case 52: { (yyval.attr) = new Attributes((yyvsp[0].str)); global_scan->setmode(XmlScan::SNameMode); } break; case 53: { (yyval.attr) = (yyvsp[-1].attr); (yyval.attr)->add_attribute( (yyvsp[0].pair)->name, (yyvsp[0].pair)->value); delete (yyvsp[0].pair); global_scan->setmode(XmlScan::SNameMode); } break; case 54: { (yyval.pair) = new NameValue; (yyval.pair)->name = (yyvsp[-2].str); (yyval.pair)->value = (yyvsp[0].str); } break; case 55: { global_scan->setmode(XmlScan::NameMode); delete (yyvsp[-1].str); } break; case 56: { (yyval.str) = (yyvsp[-1].str); } break; case 57: { (yyval.str) = (yyvsp[-2].str); } break; case 58: { global_scan->setmode(XmlScan::CharDataMode); } break; case 59: { print_content( *(yyvsp[0].str) ); delete (yyvsp[0].str); global_scan->setmode(XmlScan::CharDataMode); } break; case 60: { global_scan->setmode(XmlScan::CharDataMode); } break; case 61: { string *tmp=new string(); *tmp += (yyvsp[0].i); print_content(*tmp); delete tmp; global_scan->setmode(XmlScan::CharDataMode); } break; case 62: { print_content( *(yyvsp[0].str) ); delete (yyvsp[0].str); global_scan->setmode(XmlScan::CharDataMode); } break; case 63: { global_scan->setmode(XmlScan::CharDataMode); } break; case 64: { global_scan->setmode(XmlScan::CharDataMode); } break; case 65: { (yyval.i) = convertEntityRef(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 66: { (yyval.i) = convertCharRef(*(yyvsp[0].str)); delete (yyvsp[0].str); } break; case 67: { global_scan->setmode(XmlScan::NameMode); } break; case 68: { global_scan->setmode(XmlScan::CharRefMode); } break; case 69: { (yyval.str) = (yyvsp[-1].str); } break; case 70: { (yyval.str) = (yyvsp[-1].str); } break; default: break; } /* User semantic actions sometimes alter yychar, and that requires that yytoken be updated with the new translation. We take the approach of translating immediately before every use of yytoken. One alternative is translating here after every semantic action, but that translation would be missed if the semantic action invokes YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an incorrect destructor might then be invoked immediately. In the case of YYERROR or YYBACKUP, subsequent parser actions might lead to an incorrect destructor call or verbose syntax error message before the lookahead is translated. */ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); *++yyvsp = yyval; /* Now 'shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ yyn = yyr1[yyn]; yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) yystate = yytable[yystate]; else yystate = yydefgoto[yyn - YYNTOKENS]; goto yynewstate; /*--------------------------------------. | yyerrlab -- here on detecting error. | `--------------------------------------*/ yyerrlab: /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar); /* If not already recovering from an error, report this error. */ if (!yyerrstatus) { ++yynerrs; #if ! YYERROR_VERBOSE yyerror (YY_("syntax error")); #else # define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \ yyssp, yytoken) { char const *yymsgp = YY_("syntax error"); int yysyntax_error_status; yysyntax_error_status = YYSYNTAX_ERROR; if (yysyntax_error_status == 0) yymsgp = yymsg; else if (yysyntax_error_status == 1) { if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc); if (!yymsg) { yymsg = yymsgbuf; yymsg_alloc = sizeof yymsgbuf; yysyntax_error_status = 2; } else { yysyntax_error_status = YYSYNTAX_ERROR; yymsgp = yymsg; } } yyerror (yymsgp); if (yysyntax_error_status == 2) goto yyexhaustedlab; } # undef YYSYNTAX_ERROR #endif } if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an error, discard it. */ if (yychar <= YYEOF) { /* Return failure if at end of input. */ if (yychar == YYEOF) YYABORT; } else { yydestruct ("Error: discarding", yytoken, &yylval); yychar = YYEMPTY; } } /* Else will try to reuse lookahead token after shifting the error token. */ goto yyerrlab1; /*---------------------------------------------------. | yyerrorlab -- error raised explicitly by YYERROR. | `---------------------------------------------------*/ yyerrorlab: /* Pacify compilers like GCC when the user code never invokes YYERROR and the label yyerrorlab therefore never appears in user code. */ if (/*CONSTCOND*/ 0) goto yyerrorlab; /* Do not reclaim the symbols of the rule whose action triggered this YYERROR. */ YYPOPSTACK (yylen); yylen = 0; YY_STACK_PRINT (yyss, yyssp); yystate = *yyssp; goto yyerrlab1; /*-------------------------------------------------------------. | yyerrlab1 -- common code for both syntax error and YYERROR. | `-------------------------------------------------------------*/ yyerrlab1: yyerrstatus = 3; /* Each real token shifted decrements this. */ for (;;) { yyn = yypact[yystate]; if (!yypact_value_is_default (yyn)) { yyn += YYTERROR; if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) { yyn = yytable[yyn]; if (0 < yyn) break; } } /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) YYABORT; yydestruct ("Error: popping", yystos[yystate], yyvsp); YYPOPSTACK (1); yystate = *yyssp; YY_STACK_PRINT (yyss, yyssp); } YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END /* Shift the error token. */ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); yystate = yyn; goto yynewstate; /*-------------------------------------. | yyacceptlab -- YYACCEPT comes here. | `-------------------------------------*/ yyacceptlab: yyresult = 0; goto yyreturn; /*-----------------------------------. | yyabortlab -- YYABORT comes here. | `-----------------------------------*/ yyabortlab: yyresult = 1; goto yyreturn; #if !defined yyoverflow || YYERROR_VERBOSE /*-------------------------------------------------. | yyexhaustedlab -- memory exhaustion comes here. | `-------------------------------------------------*/ yyexhaustedlab: yyerror (YY_("memory exhausted")); yyresult = 2; /* Fall through. */ #endif yyreturn: if (yychar != YYEMPTY) { /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ yytoken = YYTRANSLATE (yychar); yydestruct ("Cleanup: discarding lookahead", yytoken, &yylval); } /* Do not reclaim the symbols of the rule whose action triggered this YYABORT or YYACCEPT. */ YYPOPSTACK (yylen); YY_STACK_PRINT (yyss, yyssp); while (yyssp != yyss) { yydestruct ("Cleanup: popping", yystos[*yyssp], yyvsp); YYPOPSTACK (1); } #ifndef yyoverflow if (yyss != yyssa) YYSTACK_FREE (yyss); #endif #if YYERROR_VERBOSE if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); #endif return yyresult; } XmlScan::XmlScan(istream &t) : s(t) { curmode = SingleMode; lvalue = (string *)0; pos = 0; endofstream = false; getxmlchar(); getxmlchar(); getxmlchar(); getxmlchar(); // Fill lookahead buffer } XmlScan::~XmlScan(void) { clearlvalue(); } void XmlScan::clearlvalue(void) { if (lvalue != (string *)0) delete lvalue; } int4 XmlScan::scanSingle(void) { int4 res = getxmlchar(); if (res == '<') { if (isInitialNameChar(next(0))) return ElementBraceToken; return CommandBraceToken; } return res; } int4 XmlScan::scanCharData(void) { clearlvalue(); lvalue = new string(); while(next(0) != -1) { // look for '<' '&' or ']]>' if (next(0) == '<') break; if (next(0) == '&') break; if (next(0) == ']') if (next(1)== ']') if (next(2)=='>') break; *lvalue += getxmlchar(); } if (lvalue->size()==0) return scanSingle(); return CharDataToken; } int4 XmlScan::scanCData(void) { clearlvalue(); lvalue = new string(); while(next(0) != -1) { // Look for "]]>" and non-Char if (next(0)==']') if (next(1)==']') if (next(2)=='>') break; if (!isChar(next(0))) break; *lvalue += getxmlchar(); } return CDataToken; // CData can be empty } int4 XmlScan::scanCharRef(void) { int4 v; clearlvalue(); lvalue = new string(); if (next(0) == 'x') { *lvalue += getxmlchar(); while(next(0) != -1) { v = next(0); if (v < '0') break; if ((v>'9')&&(v<'A')) break; if ((v>'F')&&(v<'a')) break; if (v>'f') break; *lvalue += getxmlchar(); } if (lvalue->size()==1) return 'x'; // Must be at least 1 hex digit } else { while(next(0) != -1) { v = next(0); if (v<'0') break; if (v>'9') break; *lvalue += getxmlchar(); } if (lvalue->size()==0) return scanSingle(); } return CharRefToken; } int4 XmlScan::scanAttValue(int4 quote) { clearlvalue(); lvalue = new string(); while(next(0) != -1) { if (next(0) == quote) break; if (next(0) == '<') break; if (next(0) == '&') break; *lvalue += getxmlchar(); } if (lvalue->size() == 0) return scanSingle(); return AttValueToken; } int4 XmlScan::scanComment(void) { clearlvalue(); lvalue = new string(); while(next(0) != -1) { if (next(0)=='-') if (next(1)=='-') break; if (!isChar(next(0))) break; *lvalue += getxmlchar(); } return CommentToken; } int4 XmlScan::scanName(void) { clearlvalue(); lvalue = new string(); if (!isInitialNameChar(next(0))) return scanSingle(); *lvalue += getxmlchar(); while(next(0) != -1) { if (!isNameChar(next(0))) break; *lvalue += getxmlchar(); } return NameToken; } int4 XmlScan::scanSName(void) { int4 whitecount = 0; while((next(0)==' ')||(next(0)=='\n')||(next(0)=='\r')||(next(0)=='\t')) { whitecount += 1; getxmlchar(); } clearlvalue(); lvalue = new string(); if (!isInitialNameChar(next(0))) { // First non-whitespace is not Name char if (whitecount > 0) return ' '; return scanSingle(); } *lvalue += getxmlchar(); while(next(0) != -1) { if (!isNameChar(next(0))) break; *lvalue += getxmlchar(); } if (whitecount>0) return SNameToken; return NameToken; } bool XmlScan::isInitialNameChar(int4 val) { if (isLetter(val)) return true; if ((val=='_')||(val==':')) return true; return false; } bool XmlScan::isNameChar(int4 val) { if (isLetter(val)) return true; if ((val>='0')&&(val<='9')) return true; if ((val=='.')||(val=='-')||(val=='_')||(val==':')) return true; return false; } bool XmlScan::isChar(int4 val) { if (val>=0x20) return true; if ((val == 0xd)||(val==0xa)||(val==0x9)) return true; return false; } int4 XmlScan::nexttoken(void) { mode mymode = curmode; curmode = SingleMode; switch(mymode) { case CharDataMode: return scanCharData(); case CDataMode: return scanCData(); case AttValueSingleMode: return scanAttValue('\''); case AttValueDoubleMode: return scanAttValue('"'); case CommentMode: return scanComment(); case CharRefMode: return scanCharRef(); case NameMode: return scanName(); case SNameMode: return scanSName(); case SingleMode: return scanSingle(); } return -1; } void print_content(const string &str) { uint4 i; for(i=0;iignorableWhitespace(str.c_str(),0,str.size()); else handler->characters(str.c_str(),0,str.size()); } int4 convertEntityRef(const string &ref) { if (ref == "lt") return '<'; if (ref == "amp") return '&'; if (ref == "gt") return '>'; if (ref == "quot") return '"'; if (ref == "apos") return '\''; return -1; } int4 convertCharRef(const string &ref) { uint4 i; int4 mult,val,cur; if (ref[0]=='x') { i = 1; mult = 16; } else { i = 0; mult = 10; } val = 0; for(;inexttoken(); if (res>255) yylval.str = global_scan->lval(); return res; } int xmlerror(const char *str) { handler->setError(str); return 0; } int4 xml_parse(istream &i,ContentHandler *hand,int4 dbg) { #if YYDEBUG yydebug = dbg; #endif std::lock_guard global_scan_lock(global_scan_mutex); std::lock_guard handler_lock(handler_mutex); global_scan = new XmlScan(i); handler = hand; handler->startDocument(); int4 res = yyparse(); if (res == 0) handler->endDocument(); delete global_scan; return res; } void TreeHandler::startElement(const string &namespaceURI,const string &localName, const string &qualifiedName,const Attributes &atts) { Element *newel = new Element(cur); cur->addChild(newel); cur = newel; newel->setName(localName); for(int4 i=0;iaddAttribute(atts.getLocalName(i),atts.getValue(i)); } void TreeHandler::endElement(const string &namespaceURI,const string &localName, const string &qualifiedName) { cur = cur->getParent(); } void TreeHandler::characters(const char *text,int4 start,int4 length) { cur->addContent(text,start,length); } Element::~Element(void) { List::iterator iter; for(iter=children.begin();iter!=children.end();++iter) delete *iter; } const string &Element::getAttributeValue(const string &nm) const { for(uint4 i=0;igetName()] = el; } const Element *DocumentStorage::getTag(const string &nm) const { map::const_iterator iter; iter = tagmap.find(nm); if (iter != tagmap.end()) return (*iter).second; return (const Element *)0; } Document *xml_tree(istream &i) { Document *doc = new Document(); TreeHandler handle(doc); if (0!=xml_parse(i,&handle)) { delete doc; throw DecoderError(handle.getError()); } return doc; } void xml_escape(ostream &s,const char *str) { while(*str!='\0') { if (*str < '?') { if (*str=='<') s << "<"; else if (*str=='>') s << ">"; else if (*str=='&') s << "&"; else if (*str=='"') s << """; else if (*str=='\'') s << "'"; else s << *str; } else s << *str; str++; } } } // End namespace ghidra ================================================ FILE: pypcode/sleigh/xml.hh ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /// \file xml.hh /// \brief Lightweight (and incomplete) XML parser for marshaling data to and from the decompiler #ifndef __XML_HH__ #define __XML_HH__ #include "types.h" #include #include #include #include #include namespace ghidra { using std::string; using std::vector; using std::map; using std::istream; using std::ostream; using std::ifstream; using std::dec; using std::hex; /// \brief The \e attributes for a single XML element /// /// A container for name/value pairs (of strings) for the formal attributes, as collected during parsing. /// This object is used to initialize the Element object but is not part of the final, in memory, DOM model. /// This also holds other properties of the element that are unused in this implementation, /// including the \e namespace URI. class Attributes { static string bogus_uri; ///< A placeholder for the namespace URI that should be attached to the element // static string prefix; string *elementname; ///< The name of the XML element vector name; ///< List of names for each formal XML attribute vector value; ///< List of values for each formal XML attribute public: Attributes(string *el) { elementname = el; } ///< Construct from element name string ~Attributes(void) { for(uint4 i=0;i List; ///< A list of XML elements /// \brief An XML element. A node in the DOM tree. /// /// This is the main node for the in-memory representation of the XML (DOM) tree. class Element { string name; ///< The (local) name of the element string content; ///< Character content of the element vector attr; ///< A list of attribute names for \b this element vector value; ///< a (corresponding) list of attribute values for \b this element protected: Element *parent; ///< The parent Element (or null) List children; ///< A list of child Element objects public: Element(Element *par) { parent = par; } ///< Constructor given a parent Element ~Element(void); ///< Destructor void setName(const string &nm) { name = nm; } ///< Set the local name of the element /// \brief Append new character content to \b this element /// /// \param str is an array of character data /// \param start is the index of the first character to append /// \param length is the number of characters to append void addContent(const char *str,int4 start,int4 length) { // for(int4 i=0;i doclist; ///< The list of documents held by this container map tagmap; ///< The map from name to registered XML elements public: ~DocumentStorage(void); ///< Destructor /// \brief Parse an XML document from the given stream /// /// Parsing starts immediately on the stream, attempting to make an in-memory DOM tree. /// An XmlException is thrown for any parsing error. /// \param s is the given stream to parse /// \return the in-memory DOM tree Document *parseDocument(istream &s); /// \brief Open and parse an XML file /// /// The given filename is opened on the local filesystem and an attempt is made to parse /// its contents into an in-memory DOM tree. An XmlException is thrown for any parsing error. /// \param filename is the name of the XML document file /// \return the in-memory DOM tree Document *openDocument(const string &filename); /// \brief Register the given XML Element object under its tag name /// /// Only one Element can be stored on \b this object per tag name. /// \param el is the given XML element void registerTag(const Element *el); /// \brief Retrieve a registered XML Element by name /// /// \param nm is the XML tag name /// \return the matching registered Element or null const Element *getTag(const string &nm) const; }; /// \brief An exception thrown by the XML parser /// /// This object holds the error message as passed to the SAX interface callback /// and is thrown as a formal exception. struct DecoderError { string explain; ///< Explanatory string DecoderError(const string &s) { explain = s; } ///< Constructor const char *what() { return explain.c_str(); } }; /// \brief Start-up the XML parser given a stream and a handler /// /// This runs the low-level XML parser. /// \param i is the given stream to get character data from /// \param hand is the ContentHandler that stores or processes the XML content events /// \param dbg is non-zero if the parser should output debug information during its parse /// \return 0 if there is no error during parsing or a (non-zero) error condition extern int4 xml_parse(istream &i,ContentHandler *hand,int4 dbg=0); /// \brief Parse the given XML stream into an in-memory document /// /// The stream is parsed using the standard ContentHandler for producing an in-memory /// DOM representation of the XML document. /// \param i is the given stream /// \return the in-memory XML document extern Document *xml_tree(istream &i); /// \brief Send the given character array to a stream, escaping characters with special XML meaning /// /// This makes the following character substitutions: /// - '<' => "<" /// - '>' => ">" /// - '&' => "&" /// - '"' => """ /// - '\'' => "'" /// /// \param s is the stream to write to /// \param str is the given character array to escape extern void xml_escape(ostream &s,const char *str); // Some helper functions for writing XML documents directly to a stream /// \brief Output an XML attribute name/value pair to stream /// /// \param s is the output stream /// \param attr is the name of the attribute /// \param val is the attribute value inline void a_v(ostream &s,const string &attr,const string &val) { s << ' ' << attr << "=\""; xml_escape(s,val.c_str()); s << "\""; } /// \brief Output the given signed integer as an XML attribute value /// /// \param s is the output stream /// \param attr is the name of the attribute /// \param val is the given integer value inline void a_v_i(ostream &s,const string &attr,intb val) { s << ' ' << attr << "=\"" << dec << val << "\""; } /// \brief Output the given unsigned integer as an XML attribute value /// /// \param s is the output stream /// \param attr is the name of the attribute /// \param val is the given unsigned integer value inline void a_v_u(ostream &s,const string &attr,uintb val) { s << ' ' << attr << "=\"0x" << hex << val << "\""; } /// \brief Output the given boolean value as an XML attribute /// /// \param s is the output stream /// \param attr is the name of the attribute /// \param val is the given boolean value inline void a_v_b(ostream &s,const string &attr,bool val) { s << ' ' << attr << "=\""; if (val) s << "true"; else s << "false"; s << "\""; } /// \brief Read an XML attribute value as a boolean /// /// This method is intended to recognize the strings, "true", "yes", and "1" /// as a \b true value. Anything else is returned as \b false. /// \param attr is the given XML attribute value (as a string) /// \return either \b true or \b false inline bool xml_readbool(const string &attr) { if (attr.size()==0) return false; char firstc = attr[0]; if (firstc=='t') return true; if (firstc=='1') return true; if (firstc=='y') return true; // For backward compatibility return false; } } // End namespace ghidra #endif ================================================ FILE: pypcode/sleigh/xml.y ================================================ /* ### * IP: GHIDRA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ %define api.prefix {xml} %{ #include "xml.hh" // CharData mode look for '<' '&' or "]]>" // Name mode look for non-name char // CData mode looking for "]]>" // Entity mode looking for ending ';' // AttValue mode looking for endquote or '&' // Comment mode looking for "--" #include #include #include #include namespace ghidra { string Attributes::bogus_uri("http://unused.uri"); /// \brief The XML character scanner /// /// Tokenize a byte stream suitably for the main XML parser. The scanner expects an ASCII or UTF-8 /// encoding. Characters is XML tag and attribute names are restricted to ASCII "letters", but /// extended UTF-8 characters can be used in any other character data: attribute values, content, comments. class XmlScan { public: /// \brief Modes of the scanner enum mode { CharDataMode, CDataMode, AttValueSingleMode, AttValueDoubleMode, CommentMode, CharRefMode, NameMode, SNameMode, SingleMode }; /// \brief Additional tokens returned by the scanner, in addition to byte values 00-ff enum token { CharDataToken = 258, CDataToken = 259, AttValueToken = 260, CommentToken =261, CharRefToken = 262, NameToken = 263, SNameToken = 264, ElementBraceToken = 265, CommandBraceToken = 266 }; private: mode curmode; ///< The current scanning mode istream &s; ///< The stream being scanned string *lvalue; ///< Current string being built int4 lookahead[4]; ///< Lookahead into the byte stream int4 pos; ///< Current position in the lookahead buffer bool endofstream; ///< Has end of stream been reached void clearlvalue(void); ///< Clear the current token string /// \brief Get the next byte in the stream /// /// Maintain a lookahead of 4 bytes at all times so that we can check for special /// XML character sequences without consuming. /// \return the next byte value as an integer int4 getxmlchar(void) { char c; int4 ret=lookahead[pos]; if (!endofstream) { s.get(c); if (s.eof()||(c=='\0')) { endofstream = true; lookahead[pos] = '\n'; } else lookahead[pos] = c; } else lookahead[pos] = -1; pos = (pos+1)&3; return ret; } int4 next(int4 i) { return lookahead[(pos+i)&3]; } ///< Peek at the next (i-th) byte without consuming bool isLetter(int4 val) { return (((val>=0x41)&&(val<=0x5a))||((val>=0x61)&&(val<=0x7a))); } ///< Is the given byte a \e letter bool isInitialNameChar(int4 val); ///< Is the given byte/character the valid start of an XML name bool isNameChar(int4 val); ///< Is the given byte/character valid for an XML name bool isChar(int4 val); ///< Is the given byte/character valid as an XML character int4 scanSingle(void); ///< Scan for the next token in Single Character mode int4 scanCharData(void); ///< Scan for the next token is Character Data mode int4 scanCData(void); ///< Scan for the next token in CDATA mode int4 scanAttValue(int4 quote); ///< Scan for the next token in Attribute Value mode int4 scanCharRef(void); ///< Scan for the next token in Character Reference mode int4 scanComment(void); ///< Scan for the next token in Comment mode int4 scanName(void); ///< Scan a Name or return single non-name character int4 scanSName(void); ///< Scan Name, allow white space before public: XmlScan(istream &t); ///< Construct scanner given a stream ~XmlScan(void); ///< Destructor void setmode(mode m) { curmode = m; } ///< Set the scanning mode int4 nexttoken(void); ///< Get the next token string *lval(void) { string *ret = lvalue; lvalue = (string *)0; return ret; } ///< Return the last \e lvalue string }; /// \brief A parsed name/value pair struct NameValue { string *name; ///< The name string *value; ///< The value }; extern int xmllex(void); ///< Interface to the scanner extern int xmlerror(const char *str); ///< Interface for registering an error in parsing extern void print_content(const string &str); ///< Send character data to the ContentHandler extern int4 convertEntityRef(const string &ref); ///< Convert an XML entity to its equivalent character extern int4 convertCharRef(const string &ref); ///< Convert an XML character reference to its equivalent character static XmlScan *global_scan; ///< Global reference to the scanner static ContentHandler *handler; ///< Global reference to the content handler static std::mutex global_scan_mutex; static std::mutex handler_mutex; %} %union { int4 i; string *str; Attributes *attr; NameValue *pair; } %expect 8 %token CHARDATA CDATA ATTVALUE COMMENT CHARREF NAME SNAME ELEMBRACE COMMBRACE %type AttValue attsinglemid attdoublemid ETag CDSect CharRef EntityRef %type Reference %type EmptyElemTag STag stagstart %type SAttribute %destructor { } %destructor { delete $$; } <*> %% document: element Misc; | prolog element Misc; whitespace: ' ' | '\n' | '\r' | '\t'; S: whitespace | S whitespace ; attsinglemid: '\'' { $$ = new string; global_scan->setmode(XmlScan::AttValueSingleMode); } | attsinglemid ATTVALUE { $$ = $1; *$$ += *$2; delete $2; global_scan->setmode(XmlScan::AttValueSingleMode); } | attsinglemid Reference { $$ = $1; *$$ += $2; global_scan->setmode(XmlScan::AttValueSingleMode); }; attdoublemid: '"' { $$ = new string; global_scan->setmode(XmlScan::AttValueDoubleMode); } | attdoublemid ATTVALUE { $$ = $1; *$$ += *$2; delete $2; global_scan->setmode(XmlScan::AttValueDoubleMode); } | attdoublemid Reference { $$ = $1; *$$ += $2; global_scan->setmode(XmlScan::AttValueDoubleMode); }; AttValue: attsinglemid '\'' { $$ = $1; } | attdoublemid '"' { $$ = $1; }; elemstart: ELEMBRACE { global_scan->setmode(XmlScan::NameMode); delete $1; }; commentstart: COMMBRACE '!' '-' '-' { global_scan->setmode(XmlScan::CommentMode); delete $1; } ; Comment: commentstart COMMENT '-' '-' '>' { delete $2; } ; PI: COMMBRACE '?' { delete $1; yyerror("Processing instructions are not supported"); YYERROR; }; CDSect: CDStart CDATA CDEnd { $$ = $2; } ; CDStart: COMMBRACE '!' '[' 'C' 'D' 'A' 'T' 'A' '[' { global_scan->setmode(XmlScan::CDataMode); delete $1; } ; CDEnd: ']' ']' '>' ; doctypepro: doctypedecl | doctypepro Misc; prologpre: XMLDecl | Misc | prologpre Misc; prolog: prologpre doctypepro | prologpre ; doctypedecl: COMMBRACE '!' 'D' 'O' 'C' 'T' 'Y' 'P' 'E' { delete $1; yyerror("DTD's not supported"); YYERROR; }; Eq: '=' | S '=' | Eq S ; Misc: Comment | PI | S ; VersionInfo: S 'v' 'e' 'r' 's' 'i' 'o' 'n' Eq AttValue { handler->setVersion(*$10); delete $10; }; EncodingDecl: S 'e' 'n' 'c' 'o' 'd' 'i' 'n' 'g' Eq AttValue { handler->setEncoding(*$11); delete $11; }; xmldeclstart: COMMBRACE '?' 'x' 'm' 'l' VersionInfo XMLDecl: xmldeclstart '?' '>' | xmldeclstart S '?' '>' | xmldeclstart EncodingDecl '?' '>' | xmldeclstart EncodingDecl S '?' '>' ; element: EmptyElemTag { handler->endElement($1->getelemURI(),$1->getelemName(),$1->getelemName()); delete $1; } | STag content ETag { handler->endElement($1->getelemURI(),$1->getelemName(),$1->getelemName()); delete $1; delete $3; } ; STag: stagstart '>' { handler->startElement($1->getelemURI(),$1->getelemName(),$1->getelemName(),*$1); $$ = $1; } | stagstart S '>' { handler->startElement($1->getelemURI(),$1->getelemName(),$1->getelemName(),*$1); $$ = $1; }; EmptyElemTag: stagstart '/' '>' { handler->startElement($1->getelemURI(),$1->getelemName(),$1->getelemName(),*$1); $$ = $1; } | stagstart S '/' '>' { handler->startElement($1->getelemURI(),$1->getelemName(),$1->getelemName(),*$1); $$ = $1; }; stagstart: elemstart NAME { $$ = new Attributes($2); global_scan->setmode(XmlScan::SNameMode); } | stagstart SAttribute { $$ = $1; $$->add_attribute( $2->name, $2->value); delete $2; global_scan->setmode(XmlScan::SNameMode); }; SAttribute: SNAME Eq AttValue { $$ = new NameValue; $$->name = $1; $$->value = $3; }; etagbrace: COMMBRACE '/' { global_scan->setmode(XmlScan::NameMode); delete $1; }; ETag: etagbrace NAME '>' { $$ = $2; } | etagbrace NAME S '>' { $$ = $2; }; content: { global_scan->setmode(XmlScan::CharDataMode); } | content CHARDATA { print_content( *$2 ); delete $2; global_scan->setmode(XmlScan::CharDataMode); } | content element { global_scan->setmode(XmlScan::CharDataMode); } | content Reference { string *tmp=new string(); *tmp += $2; print_content(*tmp); delete tmp; global_scan->setmode(XmlScan::CharDataMode); } | content CDSect { print_content( *$2 ); delete $2; global_scan->setmode(XmlScan::CharDataMode); } | content PI { global_scan->setmode(XmlScan::CharDataMode); } | content Comment { global_scan->setmode(XmlScan::CharDataMode); }; Reference: EntityRef { $$ = convertEntityRef(*$1); delete $1; } | CharRef { $$ = convertCharRef(*$1); delete $1; }; refstart: '&' { global_scan->setmode(XmlScan::NameMode); } ; charrefstart: refstart '#' { global_scan->setmode(XmlScan::CharRefMode); }; CharRef: charrefstart CHARREF ';' { $$ = $2; }; EntityRef: refstart NAME ';' { $$ = $2; }; %% XmlScan::XmlScan(istream &t) : s(t) { curmode = SingleMode; lvalue = (string *)0; pos = 0; endofstream = false; getxmlchar(); getxmlchar(); getxmlchar(); getxmlchar(); // Fill lookahead buffer } XmlScan::~XmlScan(void) { clearlvalue(); } void XmlScan::clearlvalue(void) { if (lvalue != (string *)0) delete lvalue; } int4 XmlScan::scanSingle(void) { int4 res = getxmlchar(); if (res == '<') { if (isInitialNameChar(next(0))) return ElementBraceToken; return CommandBraceToken; } return res; } int4 XmlScan::scanCharData(void) { clearlvalue(); lvalue = new string(); while(next(0) != -1) { // look for '<' '&' or ']]>' if (next(0) == '<') break; if (next(0) == '&') break; if (next(0) == ']') if (next(1)== ']') if (next(2)=='>') break; *lvalue += getxmlchar(); } if (lvalue->size()==0) return scanSingle(); return CharDataToken; } int4 XmlScan::scanCData(void) { clearlvalue(); lvalue = new string(); while(next(0) != -1) { // Look for "]]>" and non-Char if (next(0)==']') if (next(1)==']') if (next(2)=='>') break; if (!isChar(next(0))) break; *lvalue += getxmlchar(); } return CDataToken; // CData can be empty } int4 XmlScan::scanCharRef(void) { int4 v; clearlvalue(); lvalue = new string(); if (next(0) == 'x') { *lvalue += getxmlchar(); while(next(0) != -1) { v = next(0); if (v < '0') break; if ((v>'9')&&(v<'A')) break; if ((v>'F')&&(v<'a')) break; if (v>'f') break; *lvalue += getxmlchar(); } if (lvalue->size()==1) return 'x'; // Must be at least 1 hex digit } else { while(next(0) != -1) { v = next(0); if (v<'0') break; if (v>'9') break; *lvalue += getxmlchar(); } if (lvalue->size()==0) return scanSingle(); } return CharRefToken; } int4 XmlScan::scanAttValue(int4 quote) { clearlvalue(); lvalue = new string(); while(next(0) != -1) { if (next(0) == quote) break; if (next(0) == '<') break; if (next(0) == '&') break; *lvalue += getxmlchar(); } if (lvalue->size() == 0) return scanSingle(); return AttValueToken; } int4 XmlScan::scanComment(void) { clearlvalue(); lvalue = new string(); while(next(0) != -1) { if (next(0)=='-') if (next(1)=='-') break; if (!isChar(next(0))) break; *lvalue += getxmlchar(); } return CommentToken; } int4 XmlScan::scanName(void) { clearlvalue(); lvalue = new string(); if (!isInitialNameChar(next(0))) return scanSingle(); *lvalue += getxmlchar(); while(next(0) != -1) { if (!isNameChar(next(0))) break; *lvalue += getxmlchar(); } return NameToken; } int4 XmlScan::scanSName(void) { int4 whitecount = 0; while((next(0)==' ')||(next(0)=='\n')||(next(0)=='\r')||(next(0)=='\t')) { whitecount += 1; getxmlchar(); } clearlvalue(); lvalue = new string(); if (!isInitialNameChar(next(0))) { // First non-whitespace is not Name char if (whitecount > 0) return ' '; return scanSingle(); } *lvalue += getxmlchar(); while(next(0) != -1) { if (!isNameChar(next(0))) break; *lvalue += getxmlchar(); } if (whitecount>0) return SNameToken; return NameToken; } bool XmlScan::isInitialNameChar(int4 val) { if (isLetter(val)) return true; if ((val=='_')||(val==':')) return true; return false; } bool XmlScan::isNameChar(int4 val) { if (isLetter(val)) return true; if ((val>='0')&&(val<='9')) return true; if ((val=='.')||(val=='-')||(val=='_')||(val==':')) return true; return false; } bool XmlScan::isChar(int4 val) { if (val>=0x20) return true; if ((val == 0xd)||(val==0xa)||(val==0x9)) return true; return false; } int4 XmlScan::nexttoken(void) { mode mymode = curmode; curmode = SingleMode; switch(mymode) { case CharDataMode: return scanCharData(); case CDataMode: return scanCData(); case AttValueSingleMode: return scanAttValue('\''); case AttValueDoubleMode: return scanAttValue('"'); case CommentMode: return scanComment(); case CharRefMode: return scanCharRef(); case NameMode: return scanName(); case SNameMode: return scanSName(); case SingleMode: return scanSingle(); } return -1; } void print_content(const string &str) { uint4 i; for(i=0;iignorableWhitespace(str.c_str(),0,str.size()); else handler->characters(str.c_str(),0,str.size()); } int4 convertEntityRef(const string &ref) { if (ref == "lt") return '<'; if (ref == "amp") return '&'; if (ref == "gt") return '>'; if (ref == "quot") return '"'; if (ref == "apos") return '\''; return -1; } int4 convertCharRef(const string &ref) { uint4 i; int4 mult,val,cur; if (ref[0]=='x') { i = 1; mult = 16; } else { i = 0; mult = 10; } val = 0; for(;inexttoken(); if (res>255) yylval.str = global_scan->lval(); return res; } int xmlerror(const char *str) { handler->setError(str); return 0; } int4 xml_parse(istream &i,ContentHandler *hand,int4 dbg) { #if YYDEBUG yydebug = dbg; #endif std::lock_guard global_scan_lock(global_scan_mutex); std::lock_guard handler_lock(handler_mutex); global_scan = new XmlScan(i); handler = hand; handler->startDocument(); int4 res = yyparse(); if (res == 0) handler->endDocument(); delete global_scan; return res; } void TreeHandler::startElement(const string &namespaceURI,const string &localName, const string &qualifiedName,const Attributes &atts) { Element *newel = new Element(cur); cur->addChild(newel); cur = newel; newel->setName(localName); for(int4 i=0;iaddAttribute(atts.getLocalName(i),atts.getValue(i)); } void TreeHandler::endElement(const string &namespaceURI,const string &localName, const string &qualifiedName) { cur = cur->getParent(); } void TreeHandler::characters(const char *text,int4 start,int4 length) { cur->addContent(text,start,length); } Element::~Element(void) { List::iterator iter; for(iter=children.begin();iter!=children.end();++iter) delete *iter; } const string &Element::getAttributeValue(const string &nm) const { for(uint4 i=0;igetName()] = el; } const Element *DocumentStorage::getTag(const string &nm) const { map::const_iterator iter; iter = tagmap.find(nm); if (iter != tagmap.end()) return (*iter).second; return (const Element *)0; } Document *xml_tree(istream &i) { Document *doc = new Document(); TreeHandler handle(doc); if (0!=xml_parse(i,&handle)) { delete doc; throw DecoderError(handle.getError()); } return doc; } void xml_escape(ostream &s,const char *str) { while(*str!='\0') { if (*str < '?') { if (*str=='<') s << "<"; else if (*str=='>') s << ">"; else if (*str=='&') s << "&"; else if (*str=='"') s << """; else if (*str=='\'') s << "'"; else s << *str; } else s << *str; str++; } } } // End namespace ghidra ================================================ FILE: pypcode/zlib/README.txt ================================================ The source files in this directory are copied from the zlib compression library, version 1.3.1 available from https://www.zlib.net/ . The source files here are only a subset of the complete zlib library. The files have not been changed except for the addition of a comment at the top of each file, noting its association with the zlib license and the version number. Within Ghidra, the zlib license is available (in both the source repository and distributions) in licenses/zlib_License.txt. Additionally the license appears at the top of zlib.h in this directory. ================================================ FILE: pypcode/zlib/adler32.c ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* adler32.c -- compute the Adler-32 checksum of a data stream * Copyright (C) 1995-2011, 2016 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ #include "zutil.h" #define BASE 65521U /* largest prime smaller than 65536 */ #define NMAX 5552 /* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ #define DO1(buf,i) {adler += (buf)[i]; sum2 += adler;} #define DO2(buf,i) DO1(buf,i); DO1(buf,i+1); #define DO4(buf,i) DO2(buf,i); DO2(buf,i+2); #define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); #define DO16(buf) DO8(buf,0); DO8(buf,8); /* use NO_DIVIDE if your processor does not do division in hardware -- try it both ways to see which is faster */ #ifdef NO_DIVIDE /* note that this assumes BASE is 65521, where 65536 % 65521 == 15 (thank you to John Reiser for pointing this out) */ # define CHOP(a) \ do { \ unsigned long tmp = a >> 16; \ a &= 0xffffUL; \ a += (tmp << 4) - tmp; \ } while (0) # define MOD28(a) \ do { \ CHOP(a); \ if (a >= BASE) a -= BASE; \ } while (0) # define MOD(a) \ do { \ CHOP(a); \ MOD28(a); \ } while (0) # define MOD63(a) \ do { /* this assumes a is not negative */ \ z_off64_t tmp = a >> 32; \ a &= 0xffffffffL; \ a += (tmp << 8) - (tmp << 5) + tmp; \ tmp = a >> 16; \ a &= 0xffffL; \ a += (tmp << 4) - tmp; \ tmp = a >> 16; \ a &= 0xffffL; \ a += (tmp << 4) - tmp; \ if (a >= BASE) a -= BASE; \ } while (0) #else # define MOD(a) a %= BASE # define MOD28(a) a %= BASE # define MOD63(a) a %= BASE #endif /* ========================================================================= */ uLong ZEXPORT adler32_z(uLong adler, const Bytef *buf, z_size_t len) { unsigned long sum2; unsigned n; /* split Adler-32 into component sums */ sum2 = (adler >> 16) & 0xffff; adler &= 0xffff; /* in case user likes doing a byte at a time, keep it fast */ if (len == 1) { adler += buf[0]; if (adler >= BASE) adler -= BASE; sum2 += adler; if (sum2 >= BASE) sum2 -= BASE; return adler | (sum2 << 16); } /* initial Adler-32 value (deferred check for len == 1 speed) */ if (buf == Z_NULL) return 1L; /* in case short lengths are provided, keep it somewhat fast */ if (len < 16) { while (len--) { adler += *buf++; sum2 += adler; } if (adler >= BASE) adler -= BASE; MOD28(sum2); /* only added so many BASE's */ return adler | (sum2 << 16); } /* do length NMAX blocks -- requires just one modulo operation */ while (len >= NMAX) { len -= NMAX; n = NMAX / 16; /* NMAX is divisible by 16 */ do { DO16(buf); /* 16 sums unrolled */ buf += 16; } while (--n); MOD(adler); MOD(sum2); } /* do remaining bytes (less than NMAX, still just one modulo) */ if (len) { /* avoid modulos if none remaining */ while (len >= 16) { len -= 16; DO16(buf); buf += 16; } while (len--) { adler += *buf++; sum2 += adler; } MOD(adler); MOD(sum2); } /* return recombined sums */ return adler | (sum2 << 16); } /* ========================================================================= */ uLong ZEXPORT adler32(uLong adler, const Bytef *buf, uInt len) { return adler32_z(adler, buf, len); } /* ========================================================================= */ local uLong adler32_combine_(uLong adler1, uLong adler2, z_off64_t len2) { unsigned long sum1; unsigned long sum2; unsigned rem; /* for negative len, return invalid adler32 as a clue for debugging */ if (len2 < 0) return 0xffffffffUL; /* the derivation of this formula is left as an exercise for the reader */ MOD63(len2); /* assumes len2 >= 0 */ rem = (unsigned)len2; sum1 = adler1 & 0xffff; sum2 = rem * sum1; MOD(sum2); sum1 += (adler2 & 0xffff) + BASE - 1; sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem; if (sum1 >= BASE) sum1 -= BASE; if (sum1 >= BASE) sum1 -= BASE; if (sum2 >= ((unsigned long)BASE << 1)) sum2 -= ((unsigned long)BASE << 1); if (sum2 >= BASE) sum2 -= BASE; return sum1 | (sum2 << 16); } /* ========================================================================= */ uLong ZEXPORT adler32_combine(uLong adler1, uLong adler2, z_off_t len2) { return adler32_combine_(adler1, adler2, len2); } uLong ZEXPORT adler32_combine64(uLong adler1, uLong adler2, z_off64_t len2) { return adler32_combine_(adler1, adler2, len2); } ================================================ FILE: pypcode/zlib/deflate.c ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* deflate.c -- compress data using the deflation algorithm * Copyright (C) 1995-2024 Jean-loup Gailly and Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* * ALGORITHM * * The "deflation" process depends on being able to identify portions * of the input text which are identical to earlier input (within a * sliding window trailing behind the input currently being processed). * * The most straightforward technique turns out to be the fastest for * most input files: try all possible matches and select the longest. * The key feature of this algorithm is that insertions into the string * dictionary are very simple and thus fast, and deletions are avoided * completely. Insertions are performed at each input character, whereas * string matches are performed only when the previous match ends. So it * is preferable to spend more time in matches to allow very fast string * insertions and avoid deletions. The matching algorithm for small * strings is inspired from that of Rabin & Karp. A brute force approach * is used to find longer strings when a small match has been found. * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze * (by Leonid Broukhis). * A previous version of this file used a more sophisticated algorithm * (by Fiala and Greene) which is guaranteed to run in linear amortized * time, but has a larger average cost, uses more memory and is patented. * However the F&G algorithm may be faster for some highly redundant * files if the parameter max_chain_length (described below) is too large. * * ACKNOWLEDGEMENTS * * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and * I found it in 'freeze' written by Leonid Broukhis. * Thanks to many people for bug reports and testing. * * REFERENCES * * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". * Available in http://tools.ietf.org/html/rfc1951 * * A description of the Rabin and Karp algorithm is given in the book * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. * * Fiala,E.R., and Greene,D.H. * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 * */ /* @(#) $Id$ */ #include "deflate.h" const char deflate_copyright[] = " deflate 1.3.1 Copyright 1995-2024 Jean-loup Gailly and Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot include such an acknowledgment, I would appreciate that you keep this copyright string in the executable of your product. */ typedef enum { need_more, /* block not completed, need more input or more output */ block_done, /* block flush performed */ finish_started, /* finish started, need only more output at next deflate */ finish_done /* finish done, accept no more input or output */ } block_state; typedef block_state (*compress_func)(deflate_state *s, int flush); /* Compression function. Returns the block state after the call. */ local block_state deflate_stored(deflate_state *s, int flush); local block_state deflate_fast(deflate_state *s, int flush); #ifndef FASTEST local block_state deflate_slow(deflate_state *s, int flush); #endif local block_state deflate_rle(deflate_state *s, int flush); local block_state deflate_huff(deflate_state *s, int flush); /* =========================================================================== * Local data */ #define NIL 0 /* Tail of hash chains */ #ifndef TOO_FAR # define TOO_FAR 4096 #endif /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ /* Values for max_lazy_match, good_match and max_chain_length, depending on * the desired pack level (0..9). The values given below have been tuned to * exclude worst case performance for pathological files. Better values may be * found for specific files. */ typedef struct config_s { ush good_length; /* reduce lazy search above this match length */ ush max_lazy; /* do not perform lazy search above this match length */ ush nice_length; /* quit search above this match length */ ush max_chain; compress_func func; } config; #ifdef FASTEST local const config configuration_table[2] = { /* good lazy nice chain */ /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ /* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */ #else local const config configuration_table[10] = { /* good lazy nice chain */ /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ /* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */ /* 2 */ {4, 5, 16, 8, deflate_fast}, /* 3 */ {4, 6, 32, 32, deflate_fast}, /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ /* 5 */ {8, 16, 32, 32, deflate_slow}, /* 6 */ {8, 16, 128, 128, deflate_slow}, /* 7 */ {8, 32, 128, 256, deflate_slow}, /* 8 */ {32, 128, 258, 1024, deflate_slow}, /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */ #endif /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 * For deflate_fast() (levels <= 3) good is ignored and lazy has a different * meaning. */ /* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */ #define RANK(f) (((f) * 2) - ((f) > 4 ? 9 : 0)) /* =========================================================================== * Update a hash value with the given input byte * IN assertion: all calls to UPDATE_HASH are made with consecutive input * characters, so that a running hash key can be computed from the previous * key instead of complete recalculation each time. */ #define UPDATE_HASH(s,h,c) (h = (((h) << s->hash_shift) ^ (c)) & s->hash_mask) /* =========================================================================== * Insert string str in the dictionary and set match_head to the previous head * of the hash chain (the most recent string with same hash key). Return * the previous length of the hash chain. * If this file is compiled with -DFASTEST, the compression level is forced * to 1, and no hash chains are maintained. * IN assertion: all calls to INSERT_STRING are made with consecutive input * characters and the first MIN_MATCH bytes of str are valid (except for * the last MIN_MATCH-1 bytes of the input file). */ #ifdef FASTEST #define INSERT_STRING(s, str, match_head) \ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ match_head = s->head[s->ins_h], \ s->head[s->ins_h] = (Pos)(str)) #else #define INSERT_STRING(s, str, match_head) \ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \ s->head[s->ins_h] = (Pos)(str)) #endif /* =========================================================================== * Initialize the hash table (avoiding 64K overflow for 16 bit systems). * prev[] will be initialized on the fly. */ #define CLEAR_HASH(s) \ do { \ s->head[s->hash_size - 1] = NIL; \ zmemzero((Bytef *)s->head, \ (unsigned)(s->hash_size - 1)*sizeof(*s->head)); \ } while (0) /* =========================================================================== * Slide the hash table when sliding the window down (could be avoided with 32 * bit values at the expense of memory usage). We slide even when level == 0 to * keep the hash table consistent if we switch back to level > 0 later. */ #if defined(__has_feature) # if __has_feature(memory_sanitizer) __attribute__((no_sanitize("memory"))) # endif #endif local void slide_hash(deflate_state *s) { unsigned n, m; Posf *p; uInt wsize = s->w_size; n = s->hash_size; p = &s->head[n]; do { m = *--p; *p = (Pos)(m >= wsize ? m - wsize : NIL); } while (--n); n = wsize; #ifndef FASTEST p = &s->prev[n]; do { m = *--p; *p = (Pos)(m >= wsize ? m - wsize : NIL); /* If n is not on any hash chain, prev[n] is garbage but * its value will never be used. */ } while (--n); #endif } /* =========================================================================== * Read a new buffer from the current input stream, update the adler32 * and total number of bytes read. All deflate() input goes through * this function so some applications may wish to modify it to avoid * allocating a large strm->next_in buffer and copying from it. * (See also flush_pending()). */ local unsigned read_buf(z_streamp strm, Bytef *buf, unsigned size) { unsigned len = strm->avail_in; if (len > size) len = size; if (len == 0) return 0; strm->avail_in -= len; zmemcpy(buf, strm->next_in, len); if (strm->state->wrap == 1) { strm->adler = adler32(strm->adler, buf, len); } #ifdef GZIP else if (strm->state->wrap == 2) { strm->adler = crc32(strm->adler, buf, len); } #endif strm->next_in += len; strm->total_in += len; return len; } /* =========================================================================== * Fill the window when the lookahead becomes insufficient. * Updates strstart and lookahead. * * IN assertion: lookahead < MIN_LOOKAHEAD * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD * At least one byte has been read, or avail_in == 0; reads are * performed for at least two bytes (required for the zip translate_eol * option -- not supported here). */ local void fill_window(deflate_state *s) { unsigned n; unsigned more; /* Amount of free space at the end of the window. */ uInt wsize = s->w_size; Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); do { more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); /* Deal with !@#$% 64K limit: */ if (sizeof(int) <= 2) { if (more == 0 && s->strstart == 0 && s->lookahead == 0) { more = wsize; } else if (more == (unsigned)(-1)) { /* Very unlikely, but possible on 16 bit machine if * strstart == 0 && lookahead == 1 (input done a byte at time) */ more--; } } /* If the window is almost full and there is insufficient lookahead, * move the upper half to the lower one to make room in the upper half. */ if (s->strstart >= wsize + MAX_DIST(s)) { zmemcpy(s->window, s->window + wsize, (unsigned)wsize - more); s->match_start -= wsize; s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ s->block_start -= (long) wsize; if (s->insert > s->strstart) s->insert = s->strstart; slide_hash(s); more += wsize; } if (s->strm->avail_in == 0) break; /* If there was no sliding: * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && * more == window_size - lookahead - strstart * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) * => more >= window_size - 2*WSIZE + 2 * In the BIG_MEM or MMAP case (not yet supported), * window_size == input_size + MIN_LOOKAHEAD && * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. * Otherwise, window_size == 2*WSIZE so more >= 2. * If there was sliding, more >= WSIZE. So in all cases, more >= 2. */ Assert(more >= 2, "more < 2"); n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); s->lookahead += n; /* Initialize the hash value now that we have some input: */ if (s->lookahead + s->insert >= MIN_MATCH) { uInt str = s->strstart - s->insert; s->ins_h = s->window[str]; UPDATE_HASH(s, s->ins_h, s->window[str + 1]); #if MIN_MATCH != 3 Call UPDATE_HASH() MIN_MATCH-3 more times #endif while (s->insert) { UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); #ifndef FASTEST s->prev[str & s->w_mask] = s->head[s->ins_h]; #endif s->head[s->ins_h] = (Pos)str; str++; s->insert--; if (s->lookahead + s->insert < MIN_MATCH) break; } } /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, * but this is not important since only literal bytes will be emitted. */ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); /* If the WIN_INIT bytes after the end of the current data have never been * written, then zero those bytes in order to avoid memory check reports of * the use of uninitialized (or uninitialised as Julian writes) bytes by * the longest match routines. Update the high water mark for the next * time through here. WIN_INIT is set to MAX_MATCH since the longest match * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. */ if (s->high_water < s->window_size) { ulg curr = s->strstart + (ulg)(s->lookahead); ulg init; if (s->high_water < curr) { /* Previous high water mark below current data -- zero WIN_INIT * bytes or up to end of window, whichever is less. */ init = s->window_size - curr; if (init > WIN_INIT) init = WIN_INIT; zmemzero(s->window + curr, (unsigned)init); s->high_water = curr + init; } else if (s->high_water < (ulg)curr + WIN_INIT) { /* High water mark at or above current data, but below current data * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up * to end of window, whichever is less. */ init = (ulg)curr + WIN_INIT - s->high_water; if (init > s->window_size - s->high_water) init = s->window_size - s->high_water; zmemzero(s->window + s->high_water, (unsigned)init); s->high_water += init; } } Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, "not enough room for search"); } /* ========================================================================= */ int ZEXPORT deflateInit_(z_streamp strm, int level, const char *version, int stream_size) { return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, version, stream_size); /* To do: ignore strm->next_in if we use it as window */ } /* ========================================================================= */ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, int windowBits, int memLevel, int strategy, const char *version, int stream_size) { deflate_state *s; int wrap = 1; static const char my_version[] = ZLIB_VERSION; if (version == Z_NULL || version[0] != my_version[0] || stream_size != sizeof(z_stream)) { return Z_VERSION_ERROR; } if (strm == Z_NULL) return Z_STREAM_ERROR; strm->msg = Z_NULL; if (strm->zalloc == (alloc_func)0) { #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; #endif } if (strm->zfree == (free_func)0) #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zfree = zcfree; #endif #ifdef FASTEST if (level != 0) level = 1; #else if (level == Z_DEFAULT_COMPRESSION) level = 6; #endif if (windowBits < 0) { /* suppress zlib wrapper */ wrap = 0; if (windowBits < -15) return Z_STREAM_ERROR; windowBits = -windowBits; } #ifdef GZIP else if (windowBits > 15) { wrap = 2; /* write gzip wrapper instead */ windowBits -= 16; } #endif if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED || (windowBits == 8 && wrap != 1)) { return Z_STREAM_ERROR; } if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */ s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); if (s == Z_NULL) return Z_MEM_ERROR; strm->state = (struct internal_state FAR *)s; s->strm = strm; s->status = INIT_STATE; /* to pass state test in deflateReset() */ s->wrap = wrap; s->gzhead = Z_NULL; s->w_bits = (uInt)windowBits; s->w_size = 1 << s->w_bits; s->w_mask = s->w_size - 1; s->hash_bits = (uInt)memLevel + 7; s->hash_size = 1 << s->hash_bits; s->hash_mask = s->hash_size - 1; s->hash_shift = ((s->hash_bits + MIN_MATCH-1) / MIN_MATCH); s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); s->high_water = 0; /* nothing written to s->window yet */ s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ /* We overlay pending_buf and sym_buf. This works since the average size * for length/distance pairs over any compressed block is assured to be 31 * bits or less. * * Analysis: The longest fixed codes are a length code of 8 bits plus 5 * extra bits, for lengths 131 to 257. The longest fixed distance codes are * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest * possible fixed-codes length/distance pair is then 31 bits total. * * sym_buf starts one-fourth of the way into pending_buf. So there are * three bytes in sym_buf for every four bytes in pending_buf. Each symbol * in sym_buf is three bytes -- two for the distance and one for the * literal/length. As each symbol is consumed, the pointer to the next * sym_buf value to read moves forward three bytes. From that symbol, up to * 31 bits are written to pending_buf. The closest the written pending_buf * bits gets to the next sym_buf symbol to read is just before the last * code is written. At that time, 31*(n - 2) bits have been written, just * after 24*(n - 2) bits have been consumed from sym_buf. sym_buf starts at * 8*n bits into pending_buf. (Note that the symbol buffer fills when n - 1 * symbols are written.) The closest the writing gets to what is unread is * then n + 14 bits. Here n is lit_bufsize, which is 16384 by default, and * can range from 128 to 32768. * * Therefore, at a minimum, there are 142 bits of space between what is * written and what is read in the overlain buffers, so the symbols cannot * be overwritten by the compressed data. That space is actually 139 bits, * due to the three-bit fixed-code block header. * * That covers the case where either Z_FIXED is specified, forcing fixed * codes, or when the use of fixed codes is chosen, because that choice * results in a smaller compressed block than dynamic codes. That latter * condition then assures that the above analysis also covers all dynamic * blocks. A dynamic-code block will only be chosen to be emitted if it has * fewer bits than a fixed-code block would for the same set of symbols. * Therefore its average symbol length is assured to be less than 31. So * the compressed data for a dynamic block also cannot overwrite the * symbols from which it is being constructed. */ s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, LIT_BUFS); s->pending_buf_size = (ulg)s->lit_bufsize * 4; if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || s->pending_buf == Z_NULL) { s->status = FINISH_STATE; strm->msg = ERR_MSG(Z_MEM_ERROR); deflateEnd (strm); return Z_MEM_ERROR; } #ifdef LIT_MEM s->d_buf = (ushf *)(s->pending_buf + (s->lit_bufsize << 1)); s->l_buf = s->pending_buf + (s->lit_bufsize << 2); s->sym_end = s->lit_bufsize - 1; #else s->sym_buf = s->pending_buf + s->lit_bufsize; s->sym_end = (s->lit_bufsize - 1) * 3; #endif /* We avoid equality with lit_bufsize*3 because of wraparound at 64K * on 16 bit machines and because stored blocks are restricted to * 64K-1 bytes. */ s->level = level; s->strategy = strategy; s->method = (Byte)method; return deflateReset(strm); } /* ========================================================================= * Check for a valid deflate stream state. Return 0 if ok, 1 if not. */ local int deflateStateCheck(z_streamp strm) { deflate_state *s; if (strm == Z_NULL || strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) return 1; s = strm->state; if (s == Z_NULL || s->strm != strm || (s->status != INIT_STATE && #ifdef GZIP s->status != GZIP_STATE && #endif s->status != EXTRA_STATE && s->status != NAME_STATE && s->status != COMMENT_STATE && s->status != HCRC_STATE && s->status != BUSY_STATE && s->status != FINISH_STATE)) return 1; return 0; } /* ========================================================================= */ int ZEXPORT deflateSetDictionary(z_streamp strm, const Bytef *dictionary, uInt dictLength) { deflate_state *s; uInt str, n; int wrap; unsigned avail; z_const unsigned char *next; if (deflateStateCheck(strm) || dictionary == Z_NULL) return Z_STREAM_ERROR; s = strm->state; wrap = s->wrap; if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead) return Z_STREAM_ERROR; /* when using zlib wrappers, compute Adler-32 for provided dictionary */ if (wrap == 1) strm->adler = adler32(strm->adler, dictionary, dictLength); s->wrap = 0; /* avoid computing Adler-32 in read_buf */ /* if dictionary would fill window, just replace the history */ if (dictLength >= s->w_size) { if (wrap == 0) { /* already empty otherwise */ CLEAR_HASH(s); s->strstart = 0; s->block_start = 0L; s->insert = 0; } dictionary += dictLength - s->w_size; /* use the tail */ dictLength = s->w_size; } /* insert dictionary into window and hash */ avail = strm->avail_in; next = strm->next_in; strm->avail_in = dictLength; strm->next_in = (z_const Bytef *)dictionary; fill_window(s); while (s->lookahead >= MIN_MATCH) { str = s->strstart; n = s->lookahead - (MIN_MATCH-1); do { UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); #ifndef FASTEST s->prev[str & s->w_mask] = s->head[s->ins_h]; #endif s->head[s->ins_h] = (Pos)str; str++; } while (--n); s->strstart = str; s->lookahead = MIN_MATCH-1; fill_window(s); } s->strstart += s->lookahead; s->block_start = (long)s->strstart; s->insert = s->lookahead; s->lookahead = 0; s->match_length = s->prev_length = MIN_MATCH-1; s->match_available = 0; strm->next_in = next; strm->avail_in = avail; s->wrap = wrap; return Z_OK; } /* ========================================================================= */ int ZEXPORT deflateGetDictionary(z_streamp strm, Bytef *dictionary, uInt *dictLength) { deflate_state *s; uInt len; if (deflateStateCheck(strm)) return Z_STREAM_ERROR; s = strm->state; len = s->strstart + s->lookahead; if (len > s->w_size) len = s->w_size; if (dictionary != Z_NULL && len) zmemcpy(dictionary, s->window + s->strstart + s->lookahead - len, len); if (dictLength != Z_NULL) *dictLength = len; return Z_OK; } /* ========================================================================= */ int ZEXPORT deflateResetKeep(z_streamp strm) { deflate_state *s; if (deflateStateCheck(strm)) { return Z_STREAM_ERROR; } strm->total_in = strm->total_out = 0; strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ strm->data_type = Z_UNKNOWN; s = (deflate_state *)strm->state; s->pending = 0; s->pending_out = s->pending_buf; if (s->wrap < 0) { s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */ } s->status = #ifdef GZIP s->wrap == 2 ? GZIP_STATE : #endif INIT_STATE; strm->adler = #ifdef GZIP s->wrap == 2 ? crc32(0L, Z_NULL, 0) : #endif adler32(0L, Z_NULL, 0); s->last_flush = -2; _tr_init(s); return Z_OK; } /* =========================================================================== * Initialize the "longest match" routines for a new zlib stream */ local void lm_init(deflate_state *s) { s->window_size = (ulg)2L*s->w_size; CLEAR_HASH(s); /* Set the default configuration parameters: */ s->max_lazy_match = configuration_table[s->level].max_lazy; s->good_match = configuration_table[s->level].good_length; s->nice_match = configuration_table[s->level].nice_length; s->max_chain_length = configuration_table[s->level].max_chain; s->strstart = 0; s->block_start = 0L; s->lookahead = 0; s->insert = 0; s->match_length = s->prev_length = MIN_MATCH-1; s->match_available = 0; s->ins_h = 0; } /* ========================================================================= */ int ZEXPORT deflateReset(z_streamp strm) { int ret; ret = deflateResetKeep(strm); if (ret == Z_OK) lm_init(strm->state); return ret; } /* ========================================================================= */ int ZEXPORT deflateSetHeader(z_streamp strm, gz_headerp head) { if (deflateStateCheck(strm) || strm->state->wrap != 2) return Z_STREAM_ERROR; strm->state->gzhead = head; return Z_OK; } /* ========================================================================= */ int ZEXPORT deflatePending(z_streamp strm, unsigned *pending, int *bits) { if (deflateStateCheck(strm)) return Z_STREAM_ERROR; if (pending != Z_NULL) *pending = strm->state->pending; if (bits != Z_NULL) *bits = strm->state->bi_valid; return Z_OK; } /* ========================================================================= */ int ZEXPORT deflatePrime(z_streamp strm, int bits, int value) { deflate_state *s; int put; if (deflateStateCheck(strm)) return Z_STREAM_ERROR; s = strm->state; #ifdef LIT_MEM if (bits < 0 || bits > 16 || (uchf *)s->d_buf < s->pending_out + ((Buf_size + 7) >> 3)) return Z_BUF_ERROR; #else if (bits < 0 || bits > 16 || s->sym_buf < s->pending_out + ((Buf_size + 7) >> 3)) return Z_BUF_ERROR; #endif do { put = Buf_size - s->bi_valid; if (put > bits) put = bits; s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid); s->bi_valid += put; _tr_flush_bits(s); value >>= put; bits -= put; } while (bits); return Z_OK; } /* ========================================================================= */ int ZEXPORT deflateParams(z_streamp strm, int level, int strategy) { deflate_state *s; compress_func func; if (deflateStateCheck(strm)) return Z_STREAM_ERROR; s = strm->state; #ifdef FASTEST if (level != 0) level = 1; #else if (level == Z_DEFAULT_COMPRESSION) level = 6; #endif if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) { return Z_STREAM_ERROR; } func = configuration_table[s->level].func; if ((strategy != s->strategy || func != configuration_table[level].func) && s->last_flush != -2) { /* Flush the last buffer: */ int err = deflate(strm, Z_BLOCK); if (err == Z_STREAM_ERROR) return err; if (strm->avail_in || (s->strstart - s->block_start) + s->lookahead) return Z_BUF_ERROR; } if (s->level != level) { if (s->level == 0 && s->matches != 0) { if (s->matches == 1) slide_hash(s); else CLEAR_HASH(s); s->matches = 0; } s->level = level; s->max_lazy_match = configuration_table[level].max_lazy; s->good_match = configuration_table[level].good_length; s->nice_match = configuration_table[level].nice_length; s->max_chain_length = configuration_table[level].max_chain; } s->strategy = strategy; return Z_OK; } /* ========================================================================= */ int ZEXPORT deflateTune(z_streamp strm, int good_length, int max_lazy, int nice_length, int max_chain) { deflate_state *s; if (deflateStateCheck(strm)) return Z_STREAM_ERROR; s = strm->state; s->good_match = (uInt)good_length; s->max_lazy_match = (uInt)max_lazy; s->nice_match = nice_length; s->max_chain_length = (uInt)max_chain; return Z_OK; } /* ========================================================================= * For the default windowBits of 15 and memLevel of 8, this function returns a * close to exact, as well as small, upper bound on the compressed size. This * is an expansion of ~0.03%, plus a small constant. * * For any setting other than those defaults for windowBits and memLevel, one * of two worst case bounds is returned. This is at most an expansion of ~4% or * ~13%, plus a small constant. * * Both the 0.03% and 4% derive from the overhead of stored blocks. The first * one is for stored blocks of 16383 bytes (memLevel == 8), whereas the second * is for stored blocks of 127 bytes (the worst case memLevel == 1). The * expansion results from five bytes of header for each stored block. * * The larger expansion of 13% results from a window size less than or equal to * the symbols buffer size (windowBits <= memLevel + 7). In that case some of * the data being compressed may have slid out of the sliding window, impeding * a stored block from being emitted. Then the only choice is a fixed or * dynamic block, where a fixed block limits the maximum expansion to 9 bits * per 8-bit byte, plus 10 bits for every block. The smallest block size for * which this can occur is 255 (memLevel == 2). * * Shifts are used to approximate divisions, for speed. */ uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen) { deflate_state *s; uLong fixedlen, storelen, wraplen; /* upper bound for fixed blocks with 9-bit literals and length 255 (memLevel == 2, which is the lowest that may not use stored blocks) -- ~13% overhead plus a small constant */ fixedlen = sourceLen + (sourceLen >> 3) + (sourceLen >> 8) + (sourceLen >> 9) + 4; /* upper bound for stored blocks with length 127 (memLevel == 1) -- ~4% overhead plus a small constant */ storelen = sourceLen + (sourceLen >> 5) + (sourceLen >> 7) + (sourceLen >> 11) + 7; /* if can't get parameters, return larger bound plus a zlib wrapper */ if (deflateStateCheck(strm)) return (fixedlen > storelen ? fixedlen : storelen) + 6; /* compute wrapper length */ s = strm->state; switch (s->wrap) { case 0: /* raw deflate */ wraplen = 0; break; case 1: /* zlib wrapper */ wraplen = 6 + (s->strstart ? 4 : 0); break; #ifdef GZIP case 2: /* gzip wrapper */ wraplen = 18; if (s->gzhead != Z_NULL) { /* user-supplied gzip header */ Bytef *str; if (s->gzhead->extra != Z_NULL) wraplen += 2 + s->gzhead->extra_len; str = s->gzhead->name; if (str != Z_NULL) do { wraplen++; } while (*str++); str = s->gzhead->comment; if (str != Z_NULL) do { wraplen++; } while (*str++); if (s->gzhead->hcrc) wraplen += 2; } break; #endif default: /* for compiler happiness */ wraplen = 6; } /* if not default parameters, return one of the conservative bounds */ if (s->w_bits != 15 || s->hash_bits != 8 + 7) return (s->w_bits <= s->hash_bits && s->level ? fixedlen : storelen) + wraplen; /* default settings: return tight bound for that case -- ~0.03% overhead plus a small constant */ return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + (sourceLen >> 25) + 13 - 6 + wraplen; } /* ========================================================================= * Put a short in the pending buffer. The 16-bit value is put in MSB order. * IN assertion: the stream state is correct and there is enough room in * pending_buf. */ local void putShortMSB(deflate_state *s, uInt b) { put_byte(s, (Byte)(b >> 8)); put_byte(s, (Byte)(b & 0xff)); } /* ========================================================================= * Flush as much pending output as possible. All deflate() output, except for * some deflate_stored() output, goes through this function so some * applications may wish to modify it to avoid allocating a large * strm->next_out buffer and copying into it. (See also read_buf()). */ local void flush_pending(z_streamp strm) { unsigned len; deflate_state *s = strm->state; _tr_flush_bits(s); len = s->pending; if (len > strm->avail_out) len = strm->avail_out; if (len == 0) return; zmemcpy(strm->next_out, s->pending_out, len); strm->next_out += len; s->pending_out += len; strm->total_out += len; strm->avail_out -= len; s->pending -= len; if (s->pending == 0) { s->pending_out = s->pending_buf; } } /* =========================================================================== * Update the header CRC with the bytes s->pending_buf[beg..s->pending - 1]. */ #define HCRC_UPDATE(beg) \ do { \ if (s->gzhead->hcrc && s->pending > (beg)) \ strm->adler = crc32(strm->adler, s->pending_buf + (beg), \ s->pending - (beg)); \ } while (0) /* ========================================================================= */ int ZEXPORT deflate(z_streamp strm, int flush) { int old_flush; /* value of flush param for previous deflate call */ deflate_state *s; if (deflateStateCheck(strm) || flush > Z_BLOCK || flush < 0) { return Z_STREAM_ERROR; } s = strm->state; if (strm->next_out == Z_NULL || (strm->avail_in != 0 && strm->next_in == Z_NULL) || (s->status == FINISH_STATE && flush != Z_FINISH)) { ERR_RETURN(strm, Z_STREAM_ERROR); } if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); old_flush = s->last_flush; s->last_flush = flush; /* Flush as much pending output as possible */ if (s->pending != 0) { flush_pending(strm); if (strm->avail_out == 0) { /* Since avail_out is 0, deflate will be called again with * more output space, but possibly with both pending and * avail_in equal to zero. There won't be anything to do, * but this is not an error situation so make sure we * return OK instead of BUF_ERROR at next call of deflate: */ s->last_flush = -1; return Z_OK; } /* Make sure there is something to do and avoid duplicate consecutive * flushes. For repeated and useless calls with Z_FINISH, we keep * returning Z_STREAM_END instead of Z_BUF_ERROR. */ } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) && flush != Z_FINISH) { ERR_RETURN(strm, Z_BUF_ERROR); } /* User must not provide more input after the first FINISH: */ if (s->status == FINISH_STATE && strm->avail_in != 0) { ERR_RETURN(strm, Z_BUF_ERROR); } /* Write the header */ if (s->status == INIT_STATE && s->wrap == 0) s->status = BUSY_STATE; if (s->status == INIT_STATE) { /* zlib header */ uInt header = (Z_DEFLATED + ((s->w_bits - 8) << 4)) << 8; uInt level_flags; if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2) level_flags = 0; else if (s->level < 6) level_flags = 1; else if (s->level == 6) level_flags = 2; else level_flags = 3; header |= (level_flags << 6); if (s->strstart != 0) header |= PRESET_DICT; header += 31 - (header % 31); putShortMSB(s, header); /* Save the adler32 of the preset dictionary: */ if (s->strstart != 0) { putShortMSB(s, (uInt)(strm->adler >> 16)); putShortMSB(s, (uInt)(strm->adler & 0xffff)); } strm->adler = adler32(0L, Z_NULL, 0); s->status = BUSY_STATE; /* Compression must start with an empty pending buffer */ flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } } #ifdef GZIP if (s->status == GZIP_STATE) { /* gzip header */ strm->adler = crc32(0L, Z_NULL, 0); put_byte(s, 31); put_byte(s, 139); put_byte(s, 8); if (s->gzhead == Z_NULL) { put_byte(s, 0); put_byte(s, 0); put_byte(s, 0); put_byte(s, 0); put_byte(s, 0); put_byte(s, s->level == 9 ? 2 : (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 4 : 0)); put_byte(s, OS_CODE); s->status = BUSY_STATE; /* Compression must start with an empty pending buffer */ flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } } else { put_byte(s, (s->gzhead->text ? 1 : 0) + (s->gzhead->hcrc ? 2 : 0) + (s->gzhead->extra == Z_NULL ? 0 : 4) + (s->gzhead->name == Z_NULL ? 0 : 8) + (s->gzhead->comment == Z_NULL ? 0 : 16) ); put_byte(s, (Byte)(s->gzhead->time & 0xff)); put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff)); put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff)); put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff)); put_byte(s, s->level == 9 ? 2 : (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 4 : 0)); put_byte(s, s->gzhead->os & 0xff); if (s->gzhead->extra != Z_NULL) { put_byte(s, s->gzhead->extra_len & 0xff); put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); } if (s->gzhead->hcrc) strm->adler = crc32(strm->adler, s->pending_buf, s->pending); s->gzindex = 0; s->status = EXTRA_STATE; } } if (s->status == EXTRA_STATE) { if (s->gzhead->extra != Z_NULL) { ulg beg = s->pending; /* start of bytes to update crc */ uInt left = (s->gzhead->extra_len & 0xffff) - s->gzindex; while (s->pending + left > s->pending_buf_size) { uInt copy = s->pending_buf_size - s->pending; zmemcpy(s->pending_buf + s->pending, s->gzhead->extra + s->gzindex, copy); s->pending = s->pending_buf_size; HCRC_UPDATE(beg); s->gzindex += copy; flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } beg = 0; left -= copy; } zmemcpy(s->pending_buf + s->pending, s->gzhead->extra + s->gzindex, left); s->pending += left; HCRC_UPDATE(beg); s->gzindex = 0; } s->status = NAME_STATE; } if (s->status == NAME_STATE) { if (s->gzhead->name != Z_NULL) { ulg beg = s->pending; /* start of bytes to update crc */ int val; do { if (s->pending == s->pending_buf_size) { HCRC_UPDATE(beg); flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } beg = 0; } val = s->gzhead->name[s->gzindex++]; put_byte(s, val); } while (val != 0); HCRC_UPDATE(beg); s->gzindex = 0; } s->status = COMMENT_STATE; } if (s->status == COMMENT_STATE) { if (s->gzhead->comment != Z_NULL) { ulg beg = s->pending; /* start of bytes to update crc */ int val; do { if (s->pending == s->pending_buf_size) { HCRC_UPDATE(beg); flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } beg = 0; } val = s->gzhead->comment[s->gzindex++]; put_byte(s, val); } while (val != 0); HCRC_UPDATE(beg); } s->status = HCRC_STATE; } if (s->status == HCRC_STATE) { if (s->gzhead->hcrc) { if (s->pending + 2 > s->pending_buf_size) { flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } } put_byte(s, (Byte)(strm->adler & 0xff)); put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); strm->adler = crc32(0L, Z_NULL, 0); } s->status = BUSY_STATE; /* Compression must start with an empty pending buffer */ flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } } #endif /* Start a new block or continue the current one. */ if (strm->avail_in != 0 || s->lookahead != 0 || (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { block_state bstate; bstate = s->level == 0 ? deflate_stored(s, flush) : s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) : s->strategy == Z_RLE ? deflate_rle(s, flush) : (*(configuration_table[s->level].func))(s, flush); if (bstate == finish_started || bstate == finish_done) { s->status = FINISH_STATE; } if (bstate == need_more || bstate == finish_started) { if (strm->avail_out == 0) { s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ } return Z_OK; /* If flush != Z_NO_FLUSH && avail_out == 0, the next call * of deflate should use the same flush parameter to make sure * that the flush is complete. So we don't have to output an * empty block here, this will be done at next call. This also * ensures that for a very small output buffer, we emit at most * one empty block. */ } if (bstate == block_done) { if (flush == Z_PARTIAL_FLUSH) { _tr_align(s); } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ _tr_stored_block(s, (char*)0, 0L, 0); /* For a full flush, this empty block will be recognized * as a special marker by inflate_sync(). */ if (flush == Z_FULL_FLUSH) { CLEAR_HASH(s); /* forget history */ if (s->lookahead == 0) { s->strstart = 0; s->block_start = 0L; s->insert = 0; } } } flush_pending(strm); if (strm->avail_out == 0) { s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ return Z_OK; } } } if (flush != Z_FINISH) return Z_OK; if (s->wrap <= 0) return Z_STREAM_END; /* Write the trailer */ #ifdef GZIP if (s->wrap == 2) { put_byte(s, (Byte)(strm->adler & 0xff)); put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); put_byte(s, (Byte)((strm->adler >> 16) & 0xff)); put_byte(s, (Byte)((strm->adler >> 24) & 0xff)); put_byte(s, (Byte)(strm->total_in & 0xff)); put_byte(s, (Byte)((strm->total_in >> 8) & 0xff)); put_byte(s, (Byte)((strm->total_in >> 16) & 0xff)); put_byte(s, (Byte)((strm->total_in >> 24) & 0xff)); } else #endif { putShortMSB(s, (uInt)(strm->adler >> 16)); putShortMSB(s, (uInt)(strm->adler & 0xffff)); } flush_pending(strm); /* If avail_out is zero, the application will call deflate again * to flush the rest. */ if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */ return s->pending != 0 ? Z_OK : Z_STREAM_END; } /* ========================================================================= */ int ZEXPORT deflateEnd(z_streamp strm) { int status; if (deflateStateCheck(strm)) return Z_STREAM_ERROR; status = strm->state->status; /* Deallocate in reverse order of allocations: */ TRY_FREE(strm, strm->state->pending_buf); TRY_FREE(strm, strm->state->head); TRY_FREE(strm, strm->state->prev); TRY_FREE(strm, strm->state->window); ZFREE(strm, strm->state); strm->state = Z_NULL; return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; } /* ========================================================================= * Copy the source state to the destination state. * To simplify the source, this is not supported for 16-bit MSDOS (which * doesn't have enough memory anyway to duplicate compression states). */ int ZEXPORT deflateCopy(z_streamp dest, z_streamp source) { #ifdef MAXSEG_64K (void)dest; (void)source; return Z_STREAM_ERROR; #else deflate_state *ds; deflate_state *ss; if (deflateStateCheck(source) || dest == Z_NULL) { return Z_STREAM_ERROR; } ss = source->state; zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); if (ds == Z_NULL) return Z_MEM_ERROR; dest->state = (struct internal_state FAR *) ds; zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state)); ds->strm = dest; ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); ds->pending_buf = (uchf *) ZALLOC(dest, ds->lit_bufsize, LIT_BUFS); if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || ds->pending_buf == Z_NULL) { deflateEnd (dest); return Z_MEM_ERROR; } /* following zmemcpy do not work for 16-bit MSDOS */ zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos)); zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos)); zmemcpy(ds->pending_buf, ss->pending_buf, ds->lit_bufsize * LIT_BUFS); ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); #ifdef LIT_MEM ds->d_buf = (ushf *)(ds->pending_buf + (ds->lit_bufsize << 1)); ds->l_buf = ds->pending_buf + (ds->lit_bufsize << 2); #else ds->sym_buf = ds->pending_buf + ds->lit_bufsize; #endif ds->l_desc.dyn_tree = ds->dyn_ltree; ds->d_desc.dyn_tree = ds->dyn_dtree; ds->bl_desc.dyn_tree = ds->bl_tree; return Z_OK; #endif /* MAXSEG_64K */ } #ifndef FASTEST /* =========================================================================== * Set match_start to the longest match starting at the given string and * return its length. Matches shorter or equal to prev_length are discarded, * in which case the result is equal to prev_length and match_start is * garbage. * IN assertions: cur_match is the head of the hash chain for the current * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 * OUT assertion: the match length is not greater than s->lookahead. */ local uInt longest_match(deflate_state *s, IPos cur_match) { unsigned chain_length = s->max_chain_length;/* max hash chain length */ register Bytef *scan = s->window + s->strstart; /* current string */ register Bytef *match; /* matched string */ register int len; /* length of current match */ int best_len = (int)s->prev_length; /* best match length so far */ int nice_match = s->nice_match; /* stop if match long enough */ IPos limit = s->strstart > (IPos)MAX_DIST(s) ? s->strstart - (IPos)MAX_DIST(s) : NIL; /* Stop when cur_match becomes <= limit. To simplify the code, * we prevent matches with the string of window index 0. */ Posf *prev = s->prev; uInt wmask = s->w_mask; #ifdef UNALIGNED_OK /* Compare two bytes at a time. Note: this is not always beneficial. * Try with and without -DUNALIGNED_OK to check. */ register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; register ush scan_start = *(ushf*)scan; register ush scan_end = *(ushf*)(scan + best_len - 1); #else register Bytef *strend = s->window + s->strstart + MAX_MATCH; register Byte scan_end1 = scan[best_len - 1]; register Byte scan_end = scan[best_len]; #endif /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. * It is easy to get rid of this optimization if necessary. */ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); /* Do not waste too much time if we already have a good match: */ if (s->prev_length >= s->good_match) { chain_length >>= 2; } /* Do not look for matches beyond the end of the input. This is necessary * to make deflate deterministic. */ if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead; Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, "need lookahead"); do { Assert(cur_match < s->strstart, "no future"); match = s->window + cur_match; /* Skip to next match if the match length cannot increase * or if the match length is less than 2. Note that the checks below * for insufficient lookahead only occur occasionally for performance * reasons. Therefore uninitialized memory will be accessed, and * conditional jumps will be made that depend on those values. * However the length of the match is limited to the lookahead, so * the output of deflate is not affected by the uninitialized values. */ #if (defined(UNALIGNED_OK) && MAX_MATCH == 258) /* This code assumes sizeof(unsigned short) == 2. Do not use * UNALIGNED_OK if your compiler uses a different size. */ if (*(ushf*)(match + best_len - 1) != scan_end || *(ushf*)match != scan_start) continue; /* It is not necessary to compare scan[2] and match[2] since they are * always equal when the other bytes match, given that the hash keys * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at * strstart + 3, + 5, up to strstart + 257. We check for insufficient * lookahead only every 4th comparison; the 128th check will be made * at strstart + 257. If MAX_MATCH-2 is not a multiple of 8, it is * necessary to put more guard bytes at the end of the window, or * to check more often for insufficient lookahead. */ Assert(scan[2] == match[2], "scan[2]?"); scan++, match++; do { } while (*(ushf*)(scan += 2) == *(ushf*)(match += 2) && *(ushf*)(scan += 2) == *(ushf*)(match += 2) && *(ushf*)(scan += 2) == *(ushf*)(match += 2) && *(ushf*)(scan += 2) == *(ushf*)(match += 2) && scan < strend); /* The funny "do {}" generates better code on most compilers */ /* Here, scan <= window + strstart + 257 */ Assert(scan <= s->window + (unsigned)(s->window_size - 1), "wild scan"); if (*scan == *match) scan++; len = (MAX_MATCH - 1) - (int)(strend - scan); scan = strend - (MAX_MATCH-1); #else /* UNALIGNED_OK */ if (match[best_len] != scan_end || match[best_len - 1] != scan_end1 || *match != *scan || *++match != scan[1]) continue; /* The check at best_len - 1 can be removed because it will be made * again later. (This heuristic is not always a win.) * It is not necessary to compare scan[2] and match[2] since they * are always equal when the other bytes match, given that * the hash keys are equal and that HASH_BITS >= 8. */ scan += 2, match++; Assert(*scan == *match, "match[2]?"); /* We check for insufficient lookahead only every 8th comparison; * the 256th check will be made at strstart + 258. */ do { } while (*++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && scan < strend); Assert(scan <= s->window + (unsigned)(s->window_size - 1), "wild scan"); len = MAX_MATCH - (int)(strend - scan); scan = strend - MAX_MATCH; #endif /* UNALIGNED_OK */ if (len > best_len) { s->match_start = cur_match; best_len = len; if (len >= nice_match) break; #ifdef UNALIGNED_OK scan_end = *(ushf*)(scan + best_len - 1); #else scan_end1 = scan[best_len - 1]; scan_end = scan[best_len]; #endif } } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length != 0); if ((uInt)best_len <= s->lookahead) return (uInt)best_len; return s->lookahead; } #else /* FASTEST */ /* --------------------------------------------------------------------------- * Optimized version for FASTEST only */ local uInt longest_match(deflate_state *s, IPos cur_match) { register Bytef *scan = s->window + s->strstart; /* current string */ register Bytef *match; /* matched string */ register int len; /* length of current match */ register Bytef *strend = s->window + s->strstart + MAX_MATCH; /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. * It is easy to get rid of this optimization if necessary. */ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, "need lookahead"); Assert(cur_match < s->strstart, "no future"); match = s->window + cur_match; /* Return failure if the match length is less than 2: */ if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1; /* The check at best_len - 1 can be removed because it will be made * again later. (This heuristic is not always a win.) * It is not necessary to compare scan[2] and match[2] since they * are always equal when the other bytes match, given that * the hash keys are equal and that HASH_BITS >= 8. */ scan += 2, match += 2; Assert(*scan == *match, "match[2]?"); /* We check for insufficient lookahead only every 8th comparison; * the 256th check will be made at strstart + 258. */ do { } while (*++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && scan < strend); Assert(scan <= s->window + (unsigned)(s->window_size - 1), "wild scan"); len = MAX_MATCH - (int)(strend - scan); if (len < MIN_MATCH) return MIN_MATCH - 1; s->match_start = cur_match; return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; } #endif /* FASTEST */ #ifdef ZLIB_DEBUG #define EQUAL 0 /* result of memcmp for equal strings */ /* =========================================================================== * Check that the match at match_start is indeed a match. */ local void check_match(deflate_state *s, IPos start, IPos match, int length) { /* check that the match is indeed a match */ Bytef *back = s->window + (int)match, *here = s->window + start; IPos len = length; if (match == (IPos)-1) { /* match starts one byte before the current window -- just compare the subsequent length-1 bytes */ back++; here++; len--; } if (zmemcmp(back, here, len) != EQUAL) { fprintf(stderr, " start %u, match %d, length %d\n", start, (int)match, length); do { fprintf(stderr, "(%02x %02x)", *back++, *here++); } while (--len != 0); z_error("invalid match"); } if (z_verbose > 1) { fprintf(stderr,"\\[%d,%d]", start - match, length); do { putc(s->window[start++], stderr); } while (--length != 0); } } #else # define check_match(s, start, match, length) #endif /* ZLIB_DEBUG */ /* =========================================================================== * Flush the current block, with given end-of-file flag. * IN assertion: strstart is set to the end of the current match. */ #define FLUSH_BLOCK_ONLY(s, last) { \ _tr_flush_block(s, (s->block_start >= 0L ? \ (charf *)&s->window[(unsigned)s->block_start] : \ (charf *)Z_NULL), \ (ulg)((long)s->strstart - s->block_start), \ (last)); \ s->block_start = s->strstart; \ flush_pending(s->strm); \ Tracev((stderr,"[FLUSH]")); \ } /* Same but force premature exit if necessary. */ #define FLUSH_BLOCK(s, last) { \ FLUSH_BLOCK_ONLY(s, last); \ if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \ } /* Maximum stored block length in deflate format (not including header). */ #define MAX_STORED 65535 /* Minimum of a and b. */ #define MIN(a, b) ((a) > (b) ? (b) : (a)) /* =========================================================================== * Copy without compression as much as possible from the input stream, return * the current block state. * * In case deflateParams() is used to later switch to a non-zero compression * level, s->matches (otherwise unused when storing) keeps track of the number * of hash table slides to perform. If s->matches is 1, then one hash table * slide will be done when switching. If s->matches is 2, the maximum value * allowed here, then the hash table will be cleared, since two or more slides * is the same as a clear. * * deflate_stored() is written to minimize the number of times an input byte is * copied. It is most efficient with large input and output buffers, which * maximizes the opportunities to have a single copy from next_in to next_out. */ local block_state deflate_stored(deflate_state *s, int flush) { /* Smallest worthy block size when not flushing or finishing. By default * this is 32K. This can be as small as 507 bytes for memLevel == 1. For * large input and output buffers, the stored block size will be larger. */ unsigned min_block = MIN(s->pending_buf_size - 5, s->w_size); /* Copy as many min_block or larger stored blocks directly to next_out as * possible. If flushing, copy the remaining available input to next_out as * stored blocks, if there is enough space. */ unsigned len, left, have, last = 0; unsigned used = s->strm->avail_in; do { /* Set len to the maximum size block that we can copy directly with the * available input data and output space. Set left to how much of that * would be copied from what's left in the window. */ len = MAX_STORED; /* maximum deflate stored block length */ have = (s->bi_valid + 42) >> 3; /* number of header bytes */ if (s->strm->avail_out < have) /* need room for header */ break; /* maximum stored block length that will fit in avail_out: */ have = s->strm->avail_out - have; left = s->strstart - s->block_start; /* bytes left in window */ if (len > (ulg)left + s->strm->avail_in) len = left + s->strm->avail_in; /* limit len to the input */ if (len > have) len = have; /* limit len to the output */ /* If the stored block would be less than min_block in length, or if * unable to copy all of the available input when flushing, then try * copying to the window and the pending buffer instead. Also don't * write an empty block when flushing -- deflate() does that. */ if (len < min_block && ((len == 0 && flush != Z_FINISH) || flush == Z_NO_FLUSH || len != left + s->strm->avail_in)) break; /* Make a dummy stored block in pending to get the header bytes, * including any pending bits. This also updates the debugging counts. */ last = flush == Z_FINISH && len == left + s->strm->avail_in ? 1 : 0; _tr_stored_block(s, (char *)0, 0L, last); /* Replace the lengths in the dummy stored block with len. */ s->pending_buf[s->pending - 4] = len; s->pending_buf[s->pending - 3] = len >> 8; s->pending_buf[s->pending - 2] = ~len; s->pending_buf[s->pending - 1] = ~len >> 8; /* Write the stored block header bytes. */ flush_pending(s->strm); #ifdef ZLIB_DEBUG /* Update debugging counts for the data about to be copied. */ s->compressed_len += len << 3; s->bits_sent += len << 3; #endif /* Copy uncompressed bytes from the window to next_out. */ if (left) { if (left > len) left = len; zmemcpy(s->strm->next_out, s->window + s->block_start, left); s->strm->next_out += left; s->strm->avail_out -= left; s->strm->total_out += left; s->block_start += left; len -= left; } /* Copy uncompressed bytes directly from next_in to next_out, updating * the check value. */ if (len) { read_buf(s->strm, s->strm->next_out, len); s->strm->next_out += len; s->strm->avail_out -= len; s->strm->total_out += len; } } while (last == 0); /* Update the sliding window with the last s->w_size bytes of the copied * data, or append all of the copied data to the existing window if less * than s->w_size bytes were copied. Also update the number of bytes to * insert in the hash tables, in the event that deflateParams() switches to * a non-zero compression level. */ used -= s->strm->avail_in; /* number of input bytes directly copied */ if (used) { /* If any input was used, then no unused input remains in the window, * therefore s->block_start == s->strstart. */ if (used >= s->w_size) { /* supplant the previous history */ s->matches = 2; /* clear hash */ zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size); s->strstart = s->w_size; s->insert = s->strstart; } else { if (s->window_size - s->strstart <= used) { /* Slide the window down. */ s->strstart -= s->w_size; zmemcpy(s->window, s->window + s->w_size, s->strstart); if (s->matches < 2) s->matches++; /* add a pending slide_hash() */ if (s->insert > s->strstart) s->insert = s->strstart; } zmemcpy(s->window + s->strstart, s->strm->next_in - used, used); s->strstart += used; s->insert += MIN(used, s->w_size - s->insert); } s->block_start = s->strstart; } if (s->high_water < s->strstart) s->high_water = s->strstart; /* If the last block was written to next_out, then done. */ if (last) return finish_done; /* If flushing and all input has been consumed, then done. */ if (flush != Z_NO_FLUSH && flush != Z_FINISH && s->strm->avail_in == 0 && (long)s->strstart == s->block_start) return block_done; /* Fill the window with any remaining input. */ have = s->window_size - s->strstart; if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) { /* Slide the window down. */ s->block_start -= s->w_size; s->strstart -= s->w_size; zmemcpy(s->window, s->window + s->w_size, s->strstart); if (s->matches < 2) s->matches++; /* add a pending slide_hash() */ have += s->w_size; /* more space now */ if (s->insert > s->strstart) s->insert = s->strstart; } if (have > s->strm->avail_in) have = s->strm->avail_in; if (have) { read_buf(s->strm, s->window + s->strstart, have); s->strstart += have; s->insert += MIN(have, s->w_size - s->insert); } if (s->high_water < s->strstart) s->high_water = s->strstart; /* There was not enough avail_out to write a complete worthy or flushed * stored block to next_out. Write a stored block to pending instead, if we * have enough input for a worthy block, or if flushing and there is enough * room for the remaining input as a stored block in the pending buffer. */ have = (s->bi_valid + 42) >> 3; /* number of header bytes */ /* maximum stored block length that will fit in pending: */ have = MIN(s->pending_buf_size - have, MAX_STORED); min_block = MIN(have, s->w_size); left = s->strstart - s->block_start; if (left >= min_block || ((left || flush == Z_FINISH) && flush != Z_NO_FLUSH && s->strm->avail_in == 0 && left <= have)) { len = MIN(left, have); last = flush == Z_FINISH && s->strm->avail_in == 0 && len == left ? 1 : 0; _tr_stored_block(s, (charf *)s->window + s->block_start, len, last); s->block_start += len; flush_pending(s->strm); } /* We've done all we can with the available input and output. */ return last ? finish_started : need_more; } /* =========================================================================== * Compress as much as possible from the input stream, return the current * block state. * This function does not perform lazy evaluation of matches and inserts * new strings in the dictionary only for unmatched strings or for short * matches. It is used only for the fast compression options. */ local block_state deflate_fast(deflate_state *s, int flush) { IPos hash_head; /* head of the hash chain */ int bflush; /* set if current block must be flushed */ for (;;) { /* Make sure that we always have enough lookahead, except * at the end of the input file. We need MAX_MATCH bytes * for the next match, plus MIN_MATCH bytes to insert the * string following the next match. */ if (s->lookahead < MIN_LOOKAHEAD) { fill_window(s); if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { return need_more; } if (s->lookahead == 0) break; /* flush the current block */ } /* Insert the string window[strstart .. strstart + 2] in the * dictionary, and set hash_head to the head of the hash chain: */ hash_head = NIL; if (s->lookahead >= MIN_MATCH) { INSERT_STRING(s, s->strstart, hash_head); } /* Find the longest match, discarding those <= prev_length. * At this point we have always match_length < MIN_MATCH */ if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { /* To simplify the code, we prevent matches with the string * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ s->match_length = longest_match (s, hash_head); /* longest_match() sets match_start */ } if (s->match_length >= MIN_MATCH) { check_match(s, s->strstart, s->match_start, s->match_length); _tr_tally_dist(s, s->strstart - s->match_start, s->match_length - MIN_MATCH, bflush); s->lookahead -= s->match_length; /* Insert new strings in the hash table only if the match length * is not too large. This saves time but degrades compression. */ #ifndef FASTEST if (s->match_length <= s->max_insert_length && s->lookahead >= MIN_MATCH) { s->match_length--; /* string at strstart already in table */ do { s->strstart++; INSERT_STRING(s, s->strstart, hash_head); /* strstart never exceeds WSIZE-MAX_MATCH, so there are * always MIN_MATCH bytes ahead. */ } while (--s->match_length != 0); s->strstart++; } else #endif { s->strstart += s->match_length; s->match_length = 0; s->ins_h = s->window[s->strstart]; UPDATE_HASH(s, s->ins_h, s->window[s->strstart + 1]); #if MIN_MATCH != 3 Call UPDATE_HASH() MIN_MATCH-3 more times #endif /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not * matter since it will be recomputed at next deflate call. */ } } else { /* No match, output a literal byte */ Tracevv((stderr,"%c", s->window[s->strstart])); _tr_tally_lit(s, s->window[s->strstart], bflush); s->lookahead--; s->strstart++; } if (bflush) FLUSH_BLOCK(s, 0); } s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; if (flush == Z_FINISH) { FLUSH_BLOCK(s, 1); return finish_done; } if (s->sym_next) FLUSH_BLOCK(s, 0); return block_done; } #ifndef FASTEST /* =========================================================================== * Same as above, but achieves better compression. We use a lazy * evaluation for matches: a match is finally adopted only if there is * no better match at the next window position. */ local block_state deflate_slow(deflate_state *s, int flush) { IPos hash_head; /* head of hash chain */ int bflush; /* set if current block must be flushed */ /* Process the input block. */ for (;;) { /* Make sure that we always have enough lookahead, except * at the end of the input file. We need MAX_MATCH bytes * for the next match, plus MIN_MATCH bytes to insert the * string following the next match. */ if (s->lookahead < MIN_LOOKAHEAD) { fill_window(s); if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { return need_more; } if (s->lookahead == 0) break; /* flush the current block */ } /* Insert the string window[strstart .. strstart + 2] in the * dictionary, and set hash_head to the head of the hash chain: */ hash_head = NIL; if (s->lookahead >= MIN_MATCH) { INSERT_STRING(s, s->strstart, hash_head); } /* Find the longest match, discarding those <= prev_length. */ s->prev_length = s->match_length, s->prev_match = s->match_start; s->match_length = MIN_MATCH-1; if (hash_head != NIL && s->prev_length < s->max_lazy_match && s->strstart - hash_head <= MAX_DIST(s)) { /* To simplify the code, we prevent matches with the string * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ s->match_length = longest_match (s, hash_head); /* longest_match() sets match_start */ if (s->match_length <= 5 && (s->strategy == Z_FILTERED #if TOO_FAR <= 32767 || (s->match_length == MIN_MATCH && s->strstart - s->match_start > TOO_FAR) #endif )) { /* If prev_match is also MIN_MATCH, match_start is garbage * but we will ignore the current match anyway. */ s->match_length = MIN_MATCH-1; } } /* If there was a match at the previous step and the current * match is not better, output the previous match: */ if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; /* Do not insert strings in hash table beyond this. */ check_match(s, s->strstart - 1, s->prev_match, s->prev_length); _tr_tally_dist(s, s->strstart - 1 - s->prev_match, s->prev_length - MIN_MATCH, bflush); /* Insert in hash table all strings up to the end of the match. * strstart - 1 and strstart are already inserted. If there is not * enough lookahead, the last two strings are not inserted in * the hash table. */ s->lookahead -= s->prev_length - 1; s->prev_length -= 2; do { if (++s->strstart <= max_insert) { INSERT_STRING(s, s->strstart, hash_head); } } while (--s->prev_length != 0); s->match_available = 0; s->match_length = MIN_MATCH-1; s->strstart++; if (bflush) FLUSH_BLOCK(s, 0); } else if (s->match_available) { /* If there was no match at the previous position, output a * single literal. If there was a match but the current match * is longer, truncate the previous match to a single literal. */ Tracevv((stderr,"%c", s->window[s->strstart - 1])); _tr_tally_lit(s, s->window[s->strstart - 1], bflush); if (bflush) { FLUSH_BLOCK_ONLY(s, 0); } s->strstart++; s->lookahead--; if (s->strm->avail_out == 0) return need_more; } else { /* There is no previous match to compare with, wait for * the next step to decide. */ s->match_available = 1; s->strstart++; s->lookahead--; } } Assert (flush != Z_NO_FLUSH, "no flush?"); if (s->match_available) { Tracevv((stderr,"%c", s->window[s->strstart - 1])); _tr_tally_lit(s, s->window[s->strstart - 1], bflush); s->match_available = 0; } s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; if (flush == Z_FINISH) { FLUSH_BLOCK(s, 1); return finish_done; } if (s->sym_next) FLUSH_BLOCK(s, 0); return block_done; } #endif /* FASTEST */ /* =========================================================================== * For Z_RLE, simply look for runs of bytes, generate matches only of distance * one. Do not maintain a hash table. (It will be regenerated if this run of * deflate switches away from Z_RLE.) */ local block_state deflate_rle(deflate_state *s, int flush) { int bflush; /* set if current block must be flushed */ uInt prev; /* byte at distance one to match */ Bytef *scan, *strend; /* scan goes up to strend for length of run */ for (;;) { /* Make sure that we always have enough lookahead, except * at the end of the input file. We need MAX_MATCH bytes * for the longest run, plus one for the unrolled loop. */ if (s->lookahead <= MAX_MATCH) { fill_window(s); if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) { return need_more; } if (s->lookahead == 0) break; /* flush the current block */ } /* See how many times the previous byte repeats */ s->match_length = 0; if (s->lookahead >= MIN_MATCH && s->strstart > 0) { scan = s->window + s->strstart - 1; prev = *scan; if (prev == *++scan && prev == *++scan && prev == *++scan) { strend = s->window + s->strstart + MAX_MATCH; do { } while (prev == *++scan && prev == *++scan && prev == *++scan && prev == *++scan && prev == *++scan && prev == *++scan && prev == *++scan && prev == *++scan && scan < strend); s->match_length = MAX_MATCH - (uInt)(strend - scan); if (s->match_length > s->lookahead) s->match_length = s->lookahead; } Assert(scan <= s->window + (uInt)(s->window_size - 1), "wild scan"); } /* Emit match if have run of MIN_MATCH or longer, else emit literal */ if (s->match_length >= MIN_MATCH) { check_match(s, s->strstart, s->strstart - 1, s->match_length); _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush); s->lookahead -= s->match_length; s->strstart += s->match_length; s->match_length = 0; } else { /* No match, output a literal byte */ Tracevv((stderr,"%c", s->window[s->strstart])); _tr_tally_lit(s, s->window[s->strstart], bflush); s->lookahead--; s->strstart++; } if (bflush) FLUSH_BLOCK(s, 0); } s->insert = 0; if (flush == Z_FINISH) { FLUSH_BLOCK(s, 1); return finish_done; } if (s->sym_next) FLUSH_BLOCK(s, 0); return block_done; } /* =========================================================================== * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. * (It will be regenerated if this run of deflate switches away from Huffman.) */ local block_state deflate_huff(deflate_state *s, int flush) { int bflush; /* set if current block must be flushed */ for (;;) { /* Make sure that we have a literal to write. */ if (s->lookahead == 0) { fill_window(s); if (s->lookahead == 0) { if (flush == Z_NO_FLUSH) return need_more; break; /* flush the current block */ } } /* Output a literal byte */ s->match_length = 0; Tracevv((stderr,"%c", s->window[s->strstart])); _tr_tally_lit(s, s->window[s->strstart], bflush); s->lookahead--; s->strstart++; if (bflush) FLUSH_BLOCK(s, 0); } s->insert = 0; if (flush == Z_FINISH) { FLUSH_BLOCK(s, 1); return finish_done; } if (s->sym_next) FLUSH_BLOCK(s, 0); return block_done; } ================================================ FILE: pypcode/zlib/deflate.h ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* deflate.h -- internal compression state * Copyright (C) 1995-2024 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* @(#) $Id$ */ #ifndef DEFLATE_H #define DEFLATE_H #include "zutil.h" /* define NO_GZIP when compiling if you want to disable gzip header and trailer creation by deflate(). NO_GZIP would be used to avoid linking in the crc code when it is not needed. For shared libraries, gzip encoding should be left enabled. */ #ifndef NO_GZIP # define GZIP #endif /* define LIT_MEM to slightly increase the speed of deflate (order 1% to 2%) at the cost of a larger memory footprint */ /* #define LIT_MEM */ /* =========================================================================== * Internal compression state. */ #define LENGTH_CODES 29 /* number of length codes, not counting the special END_BLOCK code */ #define LITERALS 256 /* number of literal bytes 0..255 */ #define L_CODES (LITERALS+1+LENGTH_CODES) /* number of Literal or Length codes, including the END_BLOCK code */ #define D_CODES 30 /* number of distance codes */ #define BL_CODES 19 /* number of codes used to transfer the bit lengths */ #define HEAP_SIZE (2*L_CODES+1) /* maximum heap size */ #define MAX_BITS 15 /* All codes must not exceed MAX_BITS bits */ #define Buf_size 16 /* size of bit buffer in bi_buf */ #define INIT_STATE 42 /* zlib header -> BUSY_STATE */ #ifdef GZIP # define GZIP_STATE 57 /* gzip header -> BUSY_STATE | EXTRA_STATE */ #endif #define EXTRA_STATE 69 /* gzip extra block -> NAME_STATE */ #define NAME_STATE 73 /* gzip file name -> COMMENT_STATE */ #define COMMENT_STATE 91 /* gzip comment -> HCRC_STATE */ #define HCRC_STATE 103 /* gzip header CRC -> BUSY_STATE */ #define BUSY_STATE 113 /* deflate -> FINISH_STATE */ #define FINISH_STATE 666 /* stream complete */ /* Stream status */ /* Data structure describing a single value and its code string. */ typedef struct ct_data_s { union { ush freq; /* frequency count */ ush code; /* bit string */ } fc; union { ush dad; /* father node in Huffman tree */ ush len; /* length of bit string */ } dl; } FAR ct_data; #define Freq fc.freq #define Code fc.code #define Dad dl.dad #define Len dl.len typedef struct static_tree_desc_s static_tree_desc; typedef struct tree_desc_s { ct_data *dyn_tree; /* the dynamic tree */ int max_code; /* largest code with non zero frequency */ const static_tree_desc *stat_desc; /* the corresponding static tree */ } FAR tree_desc; typedef ush Pos; typedef Pos FAR Posf; typedef unsigned IPos; /* A Pos is an index in the character window. We use short instead of int to * save space in the various tables. IPos is used only for parameter passing. */ typedef struct internal_state { z_streamp strm; /* pointer back to this zlib stream */ int status; /* as the name implies */ Bytef *pending_buf; /* output still pending */ ulg pending_buf_size; /* size of pending_buf */ Bytef *pending_out; /* next pending byte to output to the stream */ ulg pending; /* nb of bytes in the pending buffer */ int wrap; /* bit 0 true for zlib, bit 1 true for gzip */ gz_headerp gzhead; /* gzip header information to write */ ulg gzindex; /* where in extra, name, or comment */ Byte method; /* can only be DEFLATED */ int last_flush; /* value of flush param for previous deflate call */ /* used by deflate.c: */ uInt w_size; /* LZ77 window size (32K by default) */ uInt w_bits; /* log2(w_size) (8..16) */ uInt w_mask; /* w_size - 1 */ Bytef *window; /* Sliding window. Input bytes are read into the second half of the window, * and move to the first half later to keep a dictionary of at least wSize * bytes. With this organization, matches are limited to a distance of * wSize-MAX_MATCH bytes, but this ensures that IO is always * performed with a length multiple of the block size. Also, it limits * the window size to 64K, which is quite useful on MSDOS. * To do: use the user input buffer as sliding window. */ ulg window_size; /* Actual size of window: 2*wSize, except when the user input buffer * is directly used as sliding window. */ Posf *prev; /* Link to older string with same hash index. To limit the size of this * array to 64K, this link is maintained only for the last 32K strings. * An index in this array is thus a window index modulo 32K. */ Posf *head; /* Heads of the hash chains or NIL. */ uInt ins_h; /* hash index of string to be inserted */ uInt hash_size; /* number of elements in hash table */ uInt hash_bits; /* log2(hash_size) */ uInt hash_mask; /* hash_size-1 */ uInt hash_shift; /* Number of bits by which ins_h must be shifted at each input * step. It must be such that after MIN_MATCH steps, the oldest * byte no longer takes part in the hash key, that is: * hash_shift * MIN_MATCH >= hash_bits */ long block_start; /* Window position at the beginning of the current output block. Gets * negative when the window is moved backwards. */ uInt match_length; /* length of best match */ IPos prev_match; /* previous match */ int match_available; /* set if previous match exists */ uInt strstart; /* start of string to insert */ uInt match_start; /* start of matching string */ uInt lookahead; /* number of valid bytes ahead in window */ uInt prev_length; /* Length of the best match at previous step. Matches not greater than this * are discarded. This is used in the lazy match evaluation. */ uInt max_chain_length; /* To speed up deflation, hash chains are never searched beyond this * length. A higher limit improves compression ratio but degrades the * speed. */ uInt max_lazy_match; /* Attempt to find a better match only when the current match is strictly * smaller than this value. This mechanism is used only for compression * levels >= 4. */ # define max_insert_length max_lazy_match /* Insert new strings in the hash table only if the match length is not * greater than this length. This saves time but degrades compression. * max_insert_length is used only for compression levels <= 3. */ int level; /* compression level (1..9) */ int strategy; /* favor or force Huffman coding*/ uInt good_match; /* Use a faster search when the previous match is longer than this */ int nice_match; /* Stop searching when current match exceeds this */ /* used by trees.c: */ /* Didn't use ct_data typedef below to suppress compiler warning */ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ struct tree_desc_s l_desc; /* desc. for literal tree */ struct tree_desc_s d_desc; /* desc. for distance tree */ struct tree_desc_s bl_desc; /* desc. for bit length tree */ ush bl_count[MAX_BITS+1]; /* number of codes at each bit length for an optimal tree */ int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ int heap_len; /* number of elements in the heap */ int heap_max; /* element of largest frequency */ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. * The same heap array is used to build all trees. */ uch depth[2*L_CODES+1]; /* Depth of each subtree used as tie breaker for trees of equal frequency */ #ifdef LIT_MEM # define LIT_BUFS 5 ushf *d_buf; /* buffer for distances */ uchf *l_buf; /* buffer for literals/lengths */ #else # define LIT_BUFS 4 uchf *sym_buf; /* buffer for distances and literals/lengths */ #endif uInt lit_bufsize; /* Size of match buffer for literals/lengths. There are 4 reasons for * limiting lit_bufsize to 64K: * - frequencies can be kept in 16 bit counters * - if compression is not successful for the first block, all input * data is still in the window so we can still emit a stored block even * when input comes from standard input. (This can also be done for * all blocks if lit_bufsize is not greater than 32K.) * - if compression is not successful for a file smaller than 64K, we can * even emit a stored file instead of a stored block (saving 5 bytes). * This is applicable only for zip (not gzip or zlib). * - creating new Huffman trees less frequently may not provide fast * adaptation to changes in the input data statistics. (Take for * example a binary file with poorly compressible code followed by * a highly compressible string table.) Smaller buffer sizes give * fast adaptation but have of course the overhead of transmitting * trees more frequently. * - I can't count above 4 */ uInt sym_next; /* running index in symbol buffer */ uInt sym_end; /* symbol table full when sym_next reaches this */ ulg opt_len; /* bit length of current block with optimal trees */ ulg static_len; /* bit length of current block with static trees */ uInt matches; /* number of string matches in current block */ uInt insert; /* bytes at end of window left to insert */ #ifdef ZLIB_DEBUG ulg compressed_len; /* total bit length of compressed file mod 2^32 */ ulg bits_sent; /* bit length of compressed data sent mod 2^32 */ #endif ush bi_buf; /* Output buffer. bits are inserted starting at the bottom (least * significant bits). */ int bi_valid; /* Number of valid bits in bi_buf. All bits above the last valid bit * are always zero. */ ulg high_water; /* High water mark offset in window for initialized bytes -- bytes above * this are set to zero in order to avoid memory check warnings when * longest match routines access bytes past the input. This is then * updated to the new high water mark. */ } FAR deflate_state; /* Output a byte on the stream. * IN assertion: there is enough room in pending_buf. */ #define put_byte(s, c) {s->pending_buf[s->pending++] = (Bytef)(c);} #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) /* Minimum amount of lookahead, except at the end of the input file. * See deflate.c for comments about the MIN_MATCH+1. */ #define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) /* In order to simplify the code, particularly on 16 bit machines, match * distances are limited to MAX_DIST instead of WSIZE. */ #define WIN_INIT MAX_MATCH /* Number of bytes after end of data in window to initialize in order to avoid memory checker errors from longest match routines */ /* in trees.c */ void ZLIB_INTERNAL _tr_init(deflate_state *s); int ZLIB_INTERNAL _tr_tally(deflate_state *s, unsigned dist, unsigned lc); void ZLIB_INTERNAL _tr_flush_block(deflate_state *s, charf *buf, ulg stored_len, int last); void ZLIB_INTERNAL _tr_flush_bits(deflate_state *s); void ZLIB_INTERNAL _tr_align(deflate_state *s); void ZLIB_INTERNAL _tr_stored_block(deflate_state *s, charf *buf, ulg stored_len, int last); #define d_code(dist) \ ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)]) /* Mapping from a distance to a distance code. dist is the distance - 1 and * must not have side effects. _dist_code[256] and _dist_code[257] are never * used. */ #ifndef ZLIB_DEBUG /* Inline versions of _tr_tally for speed: */ #if defined(GEN_TREES_H) || !defined(STDC) extern uch ZLIB_INTERNAL _length_code[]; extern uch ZLIB_INTERNAL _dist_code[]; #else extern const uch ZLIB_INTERNAL _length_code[]; extern const uch ZLIB_INTERNAL _dist_code[]; #endif #ifdef LIT_MEM # define _tr_tally_lit(s, c, flush) \ { uch cc = (c); \ s->d_buf[s->sym_next] = 0; \ s->l_buf[s->sym_next++] = cc; \ s->dyn_ltree[cc].Freq++; \ flush = (s->sym_next == s->sym_end); \ } # define _tr_tally_dist(s, distance, length, flush) \ { uch len = (uch)(length); \ ush dist = (ush)(distance); \ s->d_buf[s->sym_next] = dist; \ s->l_buf[s->sym_next++] = len; \ dist--; \ s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ s->dyn_dtree[d_code(dist)].Freq++; \ flush = (s->sym_next == s->sym_end); \ } #else # define _tr_tally_lit(s, c, flush) \ { uch cc = (c); \ s->sym_buf[s->sym_next++] = 0; \ s->sym_buf[s->sym_next++] = 0; \ s->sym_buf[s->sym_next++] = cc; \ s->dyn_ltree[cc].Freq++; \ flush = (s->sym_next == s->sym_end); \ } # define _tr_tally_dist(s, distance, length, flush) \ { uch len = (uch)(length); \ ush dist = (ush)(distance); \ s->sym_buf[s->sym_next++] = (uch)dist; \ s->sym_buf[s->sym_next++] = (uch)(dist >> 8); \ s->sym_buf[s->sym_next++] = len; \ dist--; \ s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ s->dyn_dtree[d_code(dist)].Freq++; \ flush = (s->sym_next == s->sym_end); \ } #endif #else # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) # define _tr_tally_dist(s, distance, length, flush) \ flush = _tr_tally(s, distance, length) #endif #endif /* DEFLATE_H */ ================================================ FILE: pypcode/zlib/gzguts.h ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* gzguts.h -- zlib internal header definitions for gz* operations * Copyright (C) 2004-2024 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #ifdef _LARGEFILE64_SOURCE # ifndef _LARGEFILE_SOURCE # define _LARGEFILE_SOURCE 1 # endif # undef _FILE_OFFSET_BITS # undef _TIME_BITS #endif #ifdef HAVE_HIDDEN # define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) #else # define ZLIB_INTERNAL #endif #include #include "zlib.h" #ifdef STDC # include # include # include #endif #ifndef _POSIX_SOURCE # define _POSIX_SOURCE #endif #include #ifdef _WIN32 # include #endif #if defined(__TURBOC__) || defined(_MSC_VER) || defined(_WIN32) # include #endif #if defined(_WIN32) # define WIDECHAR #endif #ifdef WINAPI_FAMILY # define open _open # define read _read # define write _write # define close _close #endif #ifdef NO_DEFLATE /* for compatibility with old definition */ # define NO_GZCOMPRESS #endif #if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550) # ifndef HAVE_VSNPRINTF # define HAVE_VSNPRINTF # endif #endif #if defined(__CYGWIN__) # ifndef HAVE_VSNPRINTF # define HAVE_VSNPRINTF # endif #endif #if defined(MSDOS) && defined(__BORLANDC__) && (BORLANDC > 0x410) # ifndef HAVE_VSNPRINTF # define HAVE_VSNPRINTF # endif #endif #ifndef HAVE_VSNPRINTF # ifdef MSDOS /* vsnprintf may exist on some MS-DOS compilers (DJGPP?), but for now we just assume it doesn't. */ # define NO_vsnprintf # endif # ifdef __TURBOC__ # define NO_vsnprintf # endif # ifdef WIN32 /* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */ # if !defined(vsnprintf) && !defined(NO_vsnprintf) # if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 ) # define vsnprintf _vsnprintf # endif # endif # endif # ifdef __SASC # define NO_vsnprintf # endif # ifdef VMS # define NO_vsnprintf # endif # ifdef __OS400__ # define NO_vsnprintf # endif # ifdef __MVS__ # define NO_vsnprintf # endif #endif /* unlike snprintf (which is required in C99), _snprintf does not guarantee null termination of the result -- however this is only used in gzlib.c where the result is assured to fit in the space provided */ #if defined(_MSC_VER) && _MSC_VER < 1900 # define snprintf _snprintf #endif #ifndef local # define local static #endif /* since "static" is used to mean two completely different things in C, we define "local" for the non-static meaning of "static", for readability (compile with -Dlocal if your debugger can't find static symbols) */ /* gz* functions always use library allocation functions */ #ifndef STDC extern voidp malloc(uInt size); extern void free(voidpf ptr); #endif /* get errno and strerror definition */ #if defined UNDER_CE # include # define zstrerror() gz_strwinerror((DWORD)GetLastError()) #else # ifndef NO_STRERROR # include # define zstrerror() strerror(errno) # else # define zstrerror() "stdio error (consult errno)" # endif #endif /* provide prototypes for these when building zlib without LFS */ #if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0 ZEXTERN gzFile ZEXPORT gzopen64(const char *, const char *); ZEXTERN z_off64_t ZEXPORT gzseek64(gzFile, z_off64_t, int); ZEXTERN z_off64_t ZEXPORT gztell64(gzFile); ZEXTERN z_off64_t ZEXPORT gzoffset64(gzFile); #endif /* default memLevel */ #if MAX_MEM_LEVEL >= 8 # define DEF_MEM_LEVEL 8 #else # define DEF_MEM_LEVEL MAX_MEM_LEVEL #endif /* default i/o buffer size -- double this for output when reading (this and twice this must be able to fit in an unsigned type) */ #define GZBUFSIZE 8192 /* gzip modes, also provide a little integrity check on the passed structure */ #define GZ_NONE 0 #define GZ_READ 7247 #define GZ_WRITE 31153 #define GZ_APPEND 1 /* mode set to GZ_WRITE after the file is opened */ /* values for gz_state how */ #define LOOK 0 /* look for a gzip header */ #define COPY 1 /* copy input directly */ #define GZIP 2 /* decompress a gzip stream */ /* internal gzip file state data structure */ typedef struct { /* exposed contents for gzgetc() macro */ struct gzFile_s x; /* "x" for exposed */ /* x.have: number of bytes available at x.next */ /* x.next: next output data to deliver or write */ /* x.pos: current position in uncompressed data */ /* used for both reading and writing */ int mode; /* see gzip modes above */ int fd; /* file descriptor */ char *path; /* path or fd for error messages */ unsigned size; /* buffer size, zero if not allocated yet */ unsigned want; /* requested buffer size, default is GZBUFSIZE */ unsigned char *in; /* input buffer (double-sized when writing) */ unsigned char *out; /* output buffer (double-sized when reading) */ int direct; /* 0 if processing gzip, 1 if transparent */ /* just for reading */ int how; /* 0: get header, 1: copy, 2: decompress */ z_off64_t start; /* where the gzip data started, for rewinding */ int eof; /* true if end of input file reached */ int past; /* true if read requested past end */ /* just for writing */ int level; /* compression level */ int strategy; /* compression strategy */ int reset; /* true if a reset is pending after a Z_FINISH */ /* seek request */ z_off64_t skip; /* amount to skip (already rewound if backwards) */ int seek; /* true if seek request pending */ /* error information */ int err; /* error code */ char *msg; /* error message */ /* zlib inflate or deflate stream */ z_stream strm; /* stream structure in-place (not a pointer) */ } gz_state; typedef gz_state FAR *gz_statep; /* shared functions */ void ZLIB_INTERNAL gz_error(gz_statep, int, const char *); #if defined UNDER_CE char ZLIB_INTERNAL *gz_strwinerror(DWORD error); #endif /* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t value -- needed when comparing unsigned to z_off64_t, which is signed (possible z_off64_t types off_t, off64_t, and long are all signed) */ unsigned ZLIB_INTERNAL gz_intmax(void); #define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax()) ================================================ FILE: pypcode/zlib/inffast.c ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* inffast.c -- fast decoding * Copyright (C) 1995-2017 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include "zutil.h" #include "inftrees.h" #include "inflate.h" #include "inffast.h" #ifdef ASMINF # pragma message("Assembler code may have bugs -- use at your own risk") #else /* Decode literal, length, and distance codes and write out the resulting literal and match bytes until either not enough input or output is available, an end-of-block is encountered, or a data error is encountered. When large enough input and output buffers are supplied to inflate(), for example, a 16K input buffer and a 64K output buffer, more than 95% of the inflate execution time is spent in this routine. Entry assumptions: state->mode == LEN strm->avail_in >= 6 strm->avail_out >= 258 start >= strm->avail_out state->bits < 8 On return, state->mode is one of: LEN -- ran out of enough output space or enough available input TYPE -- reached end of block code, inflate() to interpret next block BAD -- error in block data Notes: - The maximum input bits used by a length/distance pair is 15 bits for the length code, 5 bits for the length extra, 15 bits for the distance code, and 13 bits for the distance extra. This totals 48 bits, or six bytes. Therefore if strm->avail_in >= 6, then there is enough input to avoid checking for available input while decoding. - The maximum bytes that a single length/distance pair can output is 258 bytes, which is the maximum length that can be coded. inflate_fast() requires strm->avail_out >= 258 for each loop to avoid checking for output space. */ void ZLIB_INTERNAL inflate_fast(z_streamp strm, unsigned start) { struct inflate_state FAR *state; z_const unsigned char FAR *in; /* local strm->next_in */ z_const unsigned char FAR *last; /* have enough input while in < last */ unsigned char FAR *out; /* local strm->next_out */ unsigned char FAR *beg; /* inflate()'s initial strm->next_out */ unsigned char FAR *end; /* while out < end, enough space available */ #ifdef INFLATE_STRICT unsigned dmax; /* maximum distance from zlib header */ #endif unsigned wsize; /* window size or zero if not using window */ unsigned whave; /* valid bytes in the window */ unsigned wnext; /* window write index */ unsigned char FAR *window; /* allocated sliding window, if wsize != 0 */ unsigned long hold; /* local strm->hold */ unsigned bits; /* local strm->bits */ code const FAR *lcode; /* local strm->lencode */ code const FAR *dcode; /* local strm->distcode */ unsigned lmask; /* mask for first level of length codes */ unsigned dmask; /* mask for first level of distance codes */ code const *here; /* retrieved table entry */ unsigned op; /* code bits, operation, extra bits, or */ /* window position, window bytes to copy */ unsigned len; /* match length, unused bytes */ unsigned dist; /* match distance */ unsigned char FAR *from; /* where to copy match from */ /* copy state to local variables */ state = (struct inflate_state FAR *)strm->state; in = strm->next_in; last = in + (strm->avail_in - 5); out = strm->next_out; beg = out - (start - strm->avail_out); end = out + (strm->avail_out - 257); #ifdef INFLATE_STRICT dmax = state->dmax; #endif wsize = state->wsize; whave = state->whave; wnext = state->wnext; window = state->window; hold = state->hold; bits = state->bits; lcode = state->lencode; dcode = state->distcode; lmask = (1U << state->lenbits) - 1; dmask = (1U << state->distbits) - 1; /* decode literals and length/distances until end-of-block or not enough input data or output space */ do { if (bits < 15) { hold += (unsigned long)(*in++) << bits; bits += 8; hold += (unsigned long)(*in++) << bits; bits += 8; } here = lcode + (hold & lmask); dolen: op = (unsigned)(here->bits); hold >>= op; bits -= op; op = (unsigned)(here->op); if (op == 0) { /* literal */ Tracevv((stderr, here->val >= 0x20 && here->val < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", here->val)); *out++ = (unsigned char)(here->val); } else if (op & 16) { /* length base */ len = (unsigned)(here->val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { hold += (unsigned long)(*in++) << bits; bits += 8; } len += (unsigned)hold & ((1U << op) - 1); hold >>= op; bits -= op; } Tracevv((stderr, "inflate: length %u\n", len)); if (bits < 15) { hold += (unsigned long)(*in++) << bits; bits += 8; hold += (unsigned long)(*in++) << bits; bits += 8; } here = dcode + (hold & dmask); dodist: op = (unsigned)(here->bits); hold >>= op; bits -= op; op = (unsigned)(here->op); if (op & 16) { /* distance base */ dist = (unsigned)(here->val); op &= 15; /* number of extra bits */ if (bits < op) { hold += (unsigned long)(*in++) << bits; bits += 8; if (bits < op) { hold += (unsigned long)(*in++) << bits; bits += 8; } } dist += (unsigned)hold & ((1U << op) - 1); #ifdef INFLATE_STRICT if (dist > dmax) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #endif hold >>= op; bits -= op; Tracevv((stderr, "inflate: distance %u\n", dist)); op = (unsigned)(out - beg); /* max distance in output */ if (dist > op) { /* see if copy from window */ op = dist - op; /* distance back in window */ if (op > whave) { if (state->sane) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR if (len <= op - whave) { do { *out++ = 0; } while (--len); continue; } len -= op - whave; do { *out++ = 0; } while (--op > whave); if (op == 0) { from = out - dist; do { *out++ = *from++; } while (--len); continue; } #endif } from = window; if (wnext == 0) { /* very common case */ from += wsize - op; if (op < len) { /* some from window */ len -= op; do { *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } else if (wnext < op) { /* wrap around window */ from += wsize + wnext - op; op -= wnext; if (op < len) { /* some from end of window */ len -= op; do { *out++ = *from++; } while (--op); from = window; if (wnext < len) { /* some from start of window */ op = wnext; len -= op; do { *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } } else { /* contiguous in window */ from += wnext - op; if (op < len) { /* some from window */ len -= op; do { *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } while (len > 2) { *out++ = *from++; *out++ = *from++; *out++ = *from++; len -= 3; } if (len) { *out++ = *from++; if (len > 1) *out++ = *from++; } } else { from = out - dist; /* copy direct from output */ do { /* minimum length is three */ *out++ = *from++; *out++ = *from++; *out++ = *from++; len -= 3; } while (len > 2); if (len) { *out++ = *from++; if (len > 1) *out++ = *from++; } } } else if ((op & 64) == 0) { /* 2nd level distance code */ here = dcode + here->val + (hold & ((1U << op) - 1)); goto dodist; } else { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } } else if ((op & 64) == 0) { /* 2nd level length code */ here = lcode + here->val + (hold & ((1U << op) - 1)); goto dolen; } else if (op & 32) { /* end-of-block */ Tracevv((stderr, "inflate: end of block\n")); state->mode = TYPE; break; } else { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } } while (in < last && out < end); /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ len = bits >> 3; in -= len; bits -= len << 3; hold &= (1U << bits) - 1; /* update state and return */ strm->next_in = in; strm->next_out = out; strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); strm->avail_out = (unsigned)(out < end ? 257 + (end - out) : 257 - (out - end)); state->hold = hold; state->bits = bits; return; } /* inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe): - Using bit fields for code structure - Different op definition to avoid & for extra bits (do & for table bits) - Three separate decoding do-loops for direct, window, and wnext == 0 - Special case for distance > 1 copies to do overlapped load and store copy - Explicit branch predictions (based on measured branch probabilities) - Deferring match copy and interspersed it with decoding subsequent codes - Swapping literal/length else - Swapping window/direct else - Larger unrolled copy loops (three is about right) - Moving len -= 3 statement into middle of loop */ #endif /* !ASMINF */ ================================================ FILE: pypcode/zlib/inffast.h ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* inffast.h -- header to use inffast.c * Copyright (C) 1995-2003, 2010 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ void ZLIB_INTERNAL inflate_fast(z_streamp strm, unsigned start); ================================================ FILE: pypcode/zlib/inffixed.h ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* inffixed.h -- table for decoding fixed codes * Generated automatically by makefixed(). */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of this library and is subject to change. Applications should only use zlib.h. */ static const code lenfix[512] = { {96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48}, {0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128}, {0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59}, {0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176}, {0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20}, {21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100}, {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8}, {0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216}, {18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76}, {0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114}, {0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2}, {0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148}, {20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42}, {0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86}, {0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15}, {0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236}, {16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62}, {0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142}, {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31}, {0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162}, {0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25}, {0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105}, {0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4}, {0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202}, {17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69}, {0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125}, {0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13}, {0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195}, {19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35}, {0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91}, {0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19}, {0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246}, {16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55}, {0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135}, {0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99}, {0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190}, {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16}, {20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96}, {0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6}, {0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209}, {17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72}, {0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116}, {0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4}, {0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153}, {20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44}, {0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82}, {0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11}, {0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229}, {16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58}, {0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138}, {0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51}, {0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173}, {0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30}, {0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110}, {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0}, {0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195}, {16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65}, {0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121}, {0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9}, {0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258}, {19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37}, {0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93}, {0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23}, {0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251}, {16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51}, {0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131}, {0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67}, {0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183}, {0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23}, {64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103}, {0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9}, {0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223}, {18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79}, {0,9,255} }; static const code distfix[32] = { {16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025}, {21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193}, {18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385}, {19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577}, {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073}, {22,5,193},{64,5,0} }; ================================================ FILE: pypcode/zlib/inflate.c ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* inflate.c -- zlib decompression * Copyright (C) 1995-2022 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* * Change history: * * 1.2.beta0 24 Nov 2002 * - First version -- complete rewrite of inflate to simplify code, avoid * creation of window when not needed, minimize use of window when it is * needed, make inffast.c even faster, implement gzip decoding, and to * improve code readability and style over the previous zlib inflate code * * 1.2.beta1 25 Nov 2002 * - Use pointers for available input and output checking in inffast.c * - Remove input and output counters in inffast.c * - Change inffast.c entry and loop from avail_in >= 7 to >= 6 * - Remove unnecessary second byte pull from length extra in inffast.c * - Unroll direct copy to three copies per loop in inffast.c * * 1.2.beta2 4 Dec 2002 * - Change external routine names to reduce potential conflicts * - Correct filename to inffixed.h for fixed tables in inflate.c * - Make hbuf[] unsigned char to match parameter type in inflate.c * - Change strm->next_out[-state->offset] to *(strm->next_out - state->offset) * to avoid negation problem on Alphas (64 bit) in inflate.c * * 1.2.beta3 22 Dec 2002 * - Add comments on state->bits assertion in inffast.c * - Add comments on op field in inftrees.h * - Fix bug in reuse of allocated window after inflateReset() * - Remove bit fields--back to byte structure for speed * - Remove distance extra == 0 check in inflate_fast()--only helps for lengths * - Change post-increments to pre-increments in inflate_fast(), PPC biased? * - Add compile time option, POSTINC, to use post-increments instead (Intel?) * - Make MATCH copy in inflate() much faster for when inflate_fast() not used * - Use local copies of stream next and avail values, as well as local bit * buffer and bit count in inflate()--for speed when inflate_fast() not used * * 1.2.beta4 1 Jan 2003 * - Split ptr - 257 statements in inflate_table() to avoid compiler warnings * - Move a comment on output buffer sizes from inffast.c to inflate.c * - Add comments in inffast.c to introduce the inflate_fast() routine * - Rearrange window copies in inflate_fast() for speed and simplification * - Unroll last copy for window match in inflate_fast() * - Use local copies of window variables in inflate_fast() for speed * - Pull out common wnext == 0 case for speed in inflate_fast() * - Make op and len in inflate_fast() unsigned for consistency * - Add FAR to lcode and dcode declarations in inflate_fast() * - Simplified bad distance check in inflate_fast() * - Added inflateBackInit(), inflateBack(), and inflateBackEnd() in new * source file infback.c to provide a call-back interface to inflate for * programs like gzip and unzip -- uses window as output buffer to avoid * window copying * * 1.2.beta5 1 Jan 2003 * - Improved inflateBack() interface to allow the caller to provide initial * input in strm. * - Fixed stored blocks bug in inflateBack() * * 1.2.beta6 4 Jan 2003 * - Added comments in inffast.c on effectiveness of POSTINC * - Typecasting all around to reduce compiler warnings * - Changed loops from while (1) or do {} while (1) to for (;;), again to * make compilers happy * - Changed type of window in inflateBackInit() to unsigned char * * * 1.2.beta7 27 Jan 2003 * - Changed many types to unsigned or unsigned short to avoid warnings * - Added inflateCopy() function * * 1.2.0 9 Mar 2003 * - Changed inflateBack() interface to provide separate opaque descriptors * for the in() and out() functions * - Changed inflateBack() argument and in_func typedef to swap the length * and buffer address return values for the input function * - Check next_in and next_out for Z_NULL on entry to inflate() * * The history for versions after 1.2.0 are in ChangeLog in zlib distribution. */ #include "zutil.h" #include "inftrees.h" #include "inflate.h" #include "inffast.h" #ifdef MAKEFIXED # ifndef BUILDFIXED # define BUILDFIXED # endif #endif local int inflateStateCheck(z_streamp strm) { struct inflate_state FAR *state; if (strm == Z_NULL || strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) return 1; state = (struct inflate_state FAR *)strm->state; if (state == Z_NULL || state->strm != strm || state->mode < HEAD || state->mode > SYNC) return 1; return 0; } int ZEXPORT inflateResetKeep(z_streamp strm) { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; strm->total_in = strm->total_out = state->total = 0; strm->msg = Z_NULL; if (state->wrap) /* to support ill-conceived Java test suite */ strm->adler = state->wrap & 1; state->mode = HEAD; state->last = 0; state->havedict = 0; state->flags = -1; state->dmax = 32768U; state->head = Z_NULL; state->hold = 0; state->bits = 0; state->lencode = state->distcode = state->next = state->codes; state->sane = 1; state->back = -1; Tracev((stderr, "inflate: reset\n")); return Z_OK; } int ZEXPORT inflateReset(z_streamp strm) { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; state->wsize = 0; state->whave = 0; state->wnext = 0; return inflateResetKeep(strm); } int ZEXPORT inflateReset2(z_streamp strm, int windowBits) { int wrap; struct inflate_state FAR *state; /* get the state */ if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; /* extract wrap request from windowBits parameter */ if (windowBits < 0) { if (windowBits < -15) return Z_STREAM_ERROR; wrap = 0; windowBits = -windowBits; } else { wrap = (windowBits >> 4) + 5; #ifdef GUNZIP if (windowBits < 48) windowBits &= 15; #endif } /* set number of window bits, free window if different */ if (windowBits && (windowBits < 8 || windowBits > 15)) return Z_STREAM_ERROR; if (state->window != Z_NULL && state->wbits != (unsigned)windowBits) { ZFREE(strm, state->window); state->window = Z_NULL; } /* update state and reset the rest of it */ state->wrap = wrap; state->wbits = (unsigned)windowBits; return inflateReset(strm); } int ZEXPORT inflateInit2_(z_streamp strm, int windowBits, const char *version, int stream_size) { int ret; struct inflate_state FAR *state; if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || stream_size != (int)(sizeof(z_stream))) return Z_VERSION_ERROR; if (strm == Z_NULL) return Z_STREAM_ERROR; strm->msg = Z_NULL; /* in case we return an error */ if (strm->zalloc == (alloc_func)0) { #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; #endif } if (strm->zfree == (free_func)0) #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zfree = zcfree; #endif state = (struct inflate_state FAR *) ZALLOC(strm, 1, sizeof(struct inflate_state)); if (state == Z_NULL) return Z_MEM_ERROR; Tracev((stderr, "inflate: allocated\n")); strm->state = (struct internal_state FAR *)state; state->strm = strm; state->window = Z_NULL; state->mode = HEAD; /* to pass state test in inflateReset2() */ ret = inflateReset2(strm, windowBits); if (ret != Z_OK) { ZFREE(strm, state); strm->state = Z_NULL; } return ret; } int ZEXPORT inflateInit_(z_streamp strm, const char *version, int stream_size) { return inflateInit2_(strm, DEF_WBITS, version, stream_size); } int ZEXPORT inflatePrime(z_streamp strm, int bits, int value) { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; if (bits == 0) return Z_OK; state = (struct inflate_state FAR *)strm->state; if (bits < 0) { state->hold = 0; state->bits = 0; return Z_OK; } if (bits > 16 || state->bits + (uInt)bits > 32) return Z_STREAM_ERROR; value &= (1L << bits) - 1; state->hold += (unsigned)value << state->bits; state->bits += (uInt)bits; return Z_OK; } /* Return state with length and distance decoding tables and index sizes set to fixed code decoding. Normally this returns fixed tables from inffixed.h. If BUILDFIXED is defined, then instead this routine builds the tables the first time it's called, and returns those tables the first time and thereafter. This reduces the size of the code by about 2K bytes, in exchange for a little execution time. However, BUILDFIXED should not be used for threaded applications, since the rewriting of the tables and virgin may not be thread-safe. */ local void fixedtables(struct inflate_state FAR *state) { #ifdef BUILDFIXED static int virgin = 1; static code *lenfix, *distfix; static code fixed[544]; /* build fixed huffman tables if first call (may not be thread safe) */ if (virgin) { unsigned sym, bits; static code *next; /* literal/length table */ sym = 0; while (sym < 144) state->lens[sym++] = 8; while (sym < 256) state->lens[sym++] = 9; while (sym < 280) state->lens[sym++] = 7; while (sym < 288) state->lens[sym++] = 8; next = fixed; lenfix = next; bits = 9; inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); /* distance table */ sym = 0; while (sym < 32) state->lens[sym++] = 5; distfix = next; bits = 5; inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); /* do this just once */ virgin = 0; } #else /* !BUILDFIXED */ # include "inffixed.h" #endif /* BUILDFIXED */ state->lencode = lenfix; state->lenbits = 9; state->distcode = distfix; state->distbits = 5; } #ifdef MAKEFIXED #include /* Write out the inffixed.h that is #include'd above. Defining MAKEFIXED also defines BUILDFIXED, so the tables are built on the fly. makefixed() writes those tables to stdout, which would be piped to inffixed.h. A small program can simply call makefixed to do this: void makefixed(void); int main(void) { makefixed(); return 0; } Then that can be linked with zlib built with MAKEFIXED defined and run: a.out > inffixed.h */ void makefixed(void) { unsigned low, size; struct inflate_state state; fixedtables(&state); puts(" /* inffixed.h -- table for decoding fixed codes"); puts(" * Generated automatically by makefixed()."); puts(" */"); puts(""); puts(" /* WARNING: this file should *not* be used by applications."); puts(" It is part of the implementation of this library and is"); puts(" subject to change. Applications should only use zlib.h."); puts(" */"); puts(""); size = 1U << 9; printf(" static const code lenfix[%u] = {", size); low = 0; for (;;) { if ((low % 7) == 0) printf("\n "); printf("{%u,%u,%d}", (low & 127) == 99 ? 64 : state.lencode[low].op, state.lencode[low].bits, state.lencode[low].val); if (++low == size) break; putchar(','); } puts("\n };"); size = 1U << 5; printf("\n static const code distfix[%u] = {", size); low = 0; for (;;) { if ((low % 6) == 0) printf("\n "); printf("{%u,%u,%d}", state.distcode[low].op, state.distcode[low].bits, state.distcode[low].val); if (++low == size) break; putchar(','); } puts("\n };"); } #endif /* MAKEFIXED */ /* Update the window with the last wsize (normally 32K) bytes written before returning. If window does not exist yet, create it. This is only called when a window is already in use, or when output has been written during this inflate call, but the end of the deflate stream has not been reached yet. It is also called to create a window for dictionary data when a dictionary is loaded. Providing output buffers larger than 32K to inflate() should provide a speed advantage, since only the last 32K of output is copied to the sliding window upon return from inflate(), and since all distances after the first 32K of output will fall in the output data, making match copies simpler and faster. The advantage may be dependent on the size of the processor's data caches. */ local int updatewindow(z_streamp strm, const Bytef *end, unsigned copy) { struct inflate_state FAR *state; unsigned dist; state = (struct inflate_state FAR *)strm->state; /* if it hasn't been done already, allocate space for the window */ if (state->window == Z_NULL) { state->window = (unsigned char FAR *) ZALLOC(strm, 1U << state->wbits, sizeof(unsigned char)); if (state->window == Z_NULL) return 1; } /* if window not in use yet, initialize */ if (state->wsize == 0) { state->wsize = 1U << state->wbits; state->wnext = 0; state->whave = 0; } /* copy state->wsize or less output bytes into the circular window */ if (copy >= state->wsize) { zmemcpy(state->window, end - state->wsize, state->wsize); state->wnext = 0; state->whave = state->wsize; } else { dist = state->wsize - state->wnext; if (dist > copy) dist = copy; zmemcpy(state->window + state->wnext, end - copy, dist); copy -= dist; if (copy) { zmemcpy(state->window, end - copy, copy); state->wnext = copy; state->whave = state->wsize; } else { state->wnext += dist; if (state->wnext == state->wsize) state->wnext = 0; if (state->whave < state->wsize) state->whave += dist; } } return 0; } /* Macros for inflate(): */ /* check function to use adler32() for zlib or crc32() for gzip */ #ifdef GUNZIP # define UPDATE_CHECK(check, buf, len) \ (state->flags ? crc32(check, buf, len) : adler32(check, buf, len)) #else # define UPDATE_CHECK(check, buf, len) adler32(check, buf, len) #endif /* check macros for header crc */ #ifdef GUNZIP # define CRC2(check, word) \ do { \ hbuf[0] = (unsigned char)(word); \ hbuf[1] = (unsigned char)((word) >> 8); \ check = crc32(check, hbuf, 2); \ } while (0) # define CRC4(check, word) \ do { \ hbuf[0] = (unsigned char)(word); \ hbuf[1] = (unsigned char)((word) >> 8); \ hbuf[2] = (unsigned char)((word) >> 16); \ hbuf[3] = (unsigned char)((word) >> 24); \ check = crc32(check, hbuf, 4); \ } while (0) #endif /* Load registers with state in inflate() for speed */ #define LOAD() \ do { \ put = strm->next_out; \ left = strm->avail_out; \ next = strm->next_in; \ have = strm->avail_in; \ hold = state->hold; \ bits = state->bits; \ } while (0) /* Restore state from registers in inflate() */ #define RESTORE() \ do { \ strm->next_out = put; \ strm->avail_out = left; \ strm->next_in = next; \ strm->avail_in = have; \ state->hold = hold; \ state->bits = bits; \ } while (0) /* Clear the input bit accumulator */ #define INITBITS() \ do { \ hold = 0; \ bits = 0; \ } while (0) /* Get a byte of input into the bit accumulator, or return from inflate() if there is no input available. */ #define PULLBYTE() \ do { \ if (have == 0) goto inf_leave; \ have--; \ hold += (unsigned long)(*next++) << bits; \ bits += 8; \ } while (0) /* Assure that there are at least n bits in the bit accumulator. If there is not enough available input to do that, then return from inflate(). */ #define NEEDBITS(n) \ do { \ while (bits < (unsigned)(n)) \ PULLBYTE(); \ } while (0) /* Return the low n bits of the bit accumulator (n < 16) */ #define BITS(n) \ ((unsigned)hold & ((1U << (n)) - 1)) /* Remove n bits from the bit accumulator */ #define DROPBITS(n) \ do { \ hold >>= (n); \ bits -= (unsigned)(n); \ } while (0) /* Remove zero to seven bits as needed to go to a byte boundary */ #define BYTEBITS() \ do { \ hold >>= bits & 7; \ bits -= bits & 7; \ } while (0) /* inflate() uses a state machine to process as much input data and generate as much output data as possible before returning. The state machine is structured roughly as follows: for (;;) switch (state) { ... case STATEn: if (not enough input data or output space to make progress) return; ... make progress ... state = STATEm; break; ... } so when inflate() is called again, the same case is attempted again, and if the appropriate resources are provided, the machine proceeds to the next state. The NEEDBITS() macro is usually the way the state evaluates whether it can proceed or should return. NEEDBITS() does the return if the requested bits are not available. The typical use of the BITS macros is: NEEDBITS(n); ... do something with BITS(n) ... DROPBITS(n); where NEEDBITS(n) either returns from inflate() if there isn't enough input left to load n bits into the accumulator, or it continues. BITS(n) gives the low n bits in the accumulator. When done, DROPBITS(n) drops the low n bits off the accumulator. INITBITS() clears the accumulator and sets the number of available bits to zero. BYTEBITS() discards just enough bits to put the accumulator on a byte boundary. After BYTEBITS() and a NEEDBITS(8), then BITS(8) would return the next byte in the stream. NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return if there is no input available. The decoding of variable length codes uses PULLBYTE() directly in order to pull just enough bytes to decode the next code, and no more. Some states loop until they get enough input, making sure that enough state information is maintained to continue the loop where it left off if NEEDBITS() returns in the loop. For example, want, need, and keep would all have to actually be part of the saved state in case NEEDBITS() returns: case STATEw: while (want < need) { NEEDBITS(n); keep[want++] = BITS(n); DROPBITS(n); } state = STATEx; case STATEx: As shown above, if the next state is also the next case, then the break is omitted. A state may also return if there is not enough output space available to complete that state. Those states are copying stored data, writing a literal byte, and copying a matching string. When returning, a "goto inf_leave" is used to update the total counters, update the check value, and determine whether any progress has been made during that inflate() call in order to return the proper return code. Progress is defined as a change in either strm->avail_in or strm->avail_out. When there is a window, goto inf_leave will update the window with the last output written. If a goto inf_leave occurs in the middle of decompression and there is no window currently, goto inf_leave will create one and copy output to the window for the next call of inflate(). In this implementation, the flush parameter of inflate() only affects the return code (per zlib.h). inflate() always writes as much as possible to strm->next_out, given the space available and the provided input--the effect documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers the allocation of and copying into a sliding window until necessary, which provides the effect documented in zlib.h for Z_FINISH when the entire input stream available. So the only thing the flush parameter actually does is: when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it will return Z_BUF_ERROR if it has not reached the end of the stream. */ int ZEXPORT inflate(z_streamp strm, int flush) { struct inflate_state FAR *state; z_const unsigned char FAR *next; /* next input */ unsigned char FAR *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ unsigned bits; /* bits in bit buffer */ unsigned in, out; /* save starting available input and output */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char FAR *from; /* where to copy match bytes from */ code here; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ #ifdef GUNZIP unsigned char hbuf[4]; /* buffer for gzip header crc calculation */ #endif static const unsigned short order[19] = /* permutation of code lengths */ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; if (inflateStateCheck(strm) || strm->next_out == Z_NULL || (strm->next_in == Z_NULL && strm->avail_in != 0)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */ LOAD(); in = have; out = left; ret = Z_OK; for (;;) switch (state->mode) { case HEAD: if (state->wrap == 0) { state->mode = TYPEDO; break; } NEEDBITS(16); #ifdef GUNZIP if ((state->wrap & 2) && hold == 0x8b1f) { /* gzip header */ if (state->wbits == 0) state->wbits = 15; state->check = crc32(0L, Z_NULL, 0); CRC2(state->check, hold); INITBITS(); state->mode = FLAGS; break; } if (state->head != Z_NULL) state->head->done = -1; if (!(state->wrap & 1) || /* check if zlib header allowed */ #else if ( #endif ((BITS(8) << 8) + (hold >> 8)) % 31) { strm->msg = (char *)"incorrect header check"; state->mode = BAD; break; } if (BITS(4) != Z_DEFLATED) { strm->msg = (char *)"unknown compression method"; state->mode = BAD; break; } DROPBITS(4); len = BITS(4) + 8; if (state->wbits == 0) state->wbits = len; if (len > 15 || len > state->wbits) { strm->msg = (char *)"invalid window size"; state->mode = BAD; break; } state->dmax = 1U << len; state->flags = 0; /* indicate zlib header */ Tracev((stderr, "inflate: zlib header ok\n")); strm->adler = state->check = adler32(0L, Z_NULL, 0); state->mode = hold & 0x200 ? DICTID : TYPE; INITBITS(); break; #ifdef GUNZIP case FLAGS: NEEDBITS(16); state->flags = (int)(hold); if ((state->flags & 0xff) != Z_DEFLATED) { strm->msg = (char *)"unknown compression method"; state->mode = BAD; break; } if (state->flags & 0xe000) { strm->msg = (char *)"unknown header flags set"; state->mode = BAD; break; } if (state->head != Z_NULL) state->head->text = (int)((hold >> 8) & 1); if ((state->flags & 0x0200) && (state->wrap & 4)) CRC2(state->check, hold); INITBITS(); state->mode = TIME; /* fallthrough */ case TIME: NEEDBITS(32); if (state->head != Z_NULL) state->head->time = hold; if ((state->flags & 0x0200) && (state->wrap & 4)) CRC4(state->check, hold); INITBITS(); state->mode = OS; /* fallthrough */ case OS: NEEDBITS(16); if (state->head != Z_NULL) { state->head->xflags = (int)(hold & 0xff); state->head->os = (int)(hold >> 8); } if ((state->flags & 0x0200) && (state->wrap & 4)) CRC2(state->check, hold); INITBITS(); state->mode = EXLEN; /* fallthrough */ case EXLEN: if (state->flags & 0x0400) { NEEDBITS(16); state->length = (unsigned)(hold); if (state->head != Z_NULL) state->head->extra_len = (unsigned)hold; if ((state->flags & 0x0200) && (state->wrap & 4)) CRC2(state->check, hold); INITBITS(); } else if (state->head != Z_NULL) state->head->extra = Z_NULL; state->mode = EXTRA; /* fallthrough */ case EXTRA: if (state->flags & 0x0400) { copy = state->length; if (copy > have) copy = have; if (copy) { if (state->head != Z_NULL && state->head->extra != Z_NULL && (len = state->head->extra_len - state->length) < state->head->extra_max) { zmemcpy(state->head->extra + len, next, len + copy > state->head->extra_max ? state->head->extra_max - len : copy); } if ((state->flags & 0x0200) && (state->wrap & 4)) state->check = crc32(state->check, next, copy); have -= copy; next += copy; state->length -= copy; } if (state->length) goto inf_leave; } state->length = 0; state->mode = NAME; /* fallthrough */ case NAME: if (state->flags & 0x0800) { if (have == 0) goto inf_leave; copy = 0; do { len = (unsigned)(next[copy++]); if (state->head != Z_NULL && state->head->name != Z_NULL && state->length < state->head->name_max) state->head->name[state->length++] = (Bytef)len; } while (len && copy < have); if ((state->flags & 0x0200) && (state->wrap & 4)) state->check = crc32(state->check, next, copy); have -= copy; next += copy; if (len) goto inf_leave; } else if (state->head != Z_NULL) state->head->name = Z_NULL; state->length = 0; state->mode = COMMENT; /* fallthrough */ case COMMENT: if (state->flags & 0x1000) { if (have == 0) goto inf_leave; copy = 0; do { len = (unsigned)(next[copy++]); if (state->head != Z_NULL && state->head->comment != Z_NULL && state->length < state->head->comm_max) state->head->comment[state->length++] = (Bytef)len; } while (len && copy < have); if ((state->flags & 0x0200) && (state->wrap & 4)) state->check = crc32(state->check, next, copy); have -= copy; next += copy; if (len) goto inf_leave; } else if (state->head != Z_NULL) state->head->comment = Z_NULL; state->mode = HCRC; /* fallthrough */ case HCRC: if (state->flags & 0x0200) { NEEDBITS(16); if ((state->wrap & 4) && hold != (state->check & 0xffff)) { strm->msg = (char *)"header crc mismatch"; state->mode = BAD; break; } INITBITS(); } if (state->head != Z_NULL) { state->head->hcrc = (int)((state->flags >> 9) & 1); state->head->done = 1; } strm->adler = state->check = crc32(0L, Z_NULL, 0); state->mode = TYPE; break; #endif case DICTID: NEEDBITS(32); strm->adler = state->check = ZSWAP32(hold); INITBITS(); state->mode = DICT; /* fallthrough */ case DICT: if (state->havedict == 0) { RESTORE(); return Z_NEED_DICT; } strm->adler = state->check = adler32(0L, Z_NULL, 0); state->mode = TYPE; /* fallthrough */ case TYPE: if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave; /* fallthrough */ case TYPEDO: if (state->last) { BYTEBITS(); state->mode = CHECK; break; } NEEDBITS(3); state->last = BITS(1); DROPBITS(1); switch (BITS(2)) { case 0: /* stored block */ Tracev((stderr, "inflate: stored block%s\n", state->last ? " (last)" : "")); state->mode = STORED; break; case 1: /* fixed block */ fixedtables(state); Tracev((stderr, "inflate: fixed codes block%s\n", state->last ? " (last)" : "")); state->mode = LEN_; /* decode codes */ if (flush == Z_TREES) { DROPBITS(2); goto inf_leave; } break; case 2: /* dynamic block */ Tracev((stderr, "inflate: dynamic codes block%s\n", state->last ? " (last)" : "")); state->mode = TABLE; break; case 3: strm->msg = (char *)"invalid block type"; state->mode = BAD; } DROPBITS(2); break; case STORED: BYTEBITS(); /* go to byte boundary */ NEEDBITS(32); if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { strm->msg = (char *)"invalid stored block lengths"; state->mode = BAD; break; } state->length = (unsigned)hold & 0xffff; Tracev((stderr, "inflate: stored length %u\n", state->length)); INITBITS(); state->mode = COPY_; if (flush == Z_TREES) goto inf_leave; /* fallthrough */ case COPY_: state->mode = COPY; /* fallthrough */ case COPY: copy = state->length; if (copy) { if (copy > have) copy = have; if (copy > left) copy = left; if (copy == 0) goto inf_leave; zmemcpy(put, next, copy); have -= copy; next += copy; left -= copy; put += copy; state->length -= copy; break; } Tracev((stderr, "inflate: stored end\n")); state->mode = TYPE; break; case TABLE: NEEDBITS(14); state->nlen = BITS(5) + 257; DROPBITS(5); state->ndist = BITS(5) + 1; DROPBITS(5); state->ncode = BITS(4) + 4; DROPBITS(4); #ifndef PKZIP_BUG_WORKAROUND if (state->nlen > 286 || state->ndist > 30) { strm->msg = (char *)"too many length or distance symbols"; state->mode = BAD; break; } #endif Tracev((stderr, "inflate: table sizes ok\n")); state->have = 0; state->mode = LENLENS; /* fallthrough */ case LENLENS: while (state->have < state->ncode) { NEEDBITS(3); state->lens[order[state->have++]] = (unsigned short)BITS(3); DROPBITS(3); } while (state->have < 19) state->lens[order[state->have++]] = 0; state->next = state->codes; state->lencode = (const code FAR *)(state->next); state->lenbits = 7; ret = inflate_table(CODES, state->lens, 19, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid code lengths set"; state->mode = BAD; break; } Tracev((stderr, "inflate: code lengths ok\n")); state->have = 0; state->mode = CODELENS; /* fallthrough */ case CODELENS: while (state->have < state->nlen + state->ndist) { for (;;) { here = state->lencode[BITS(state->lenbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if (here.val < 16) { DROPBITS(here.bits); state->lens[state->have++] = here.val; } else { if (here.val == 16) { NEEDBITS(here.bits + 2); DROPBITS(here.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } len = state->lens[state->have - 1]; copy = 3 + BITS(2); DROPBITS(2); } else if (here.val == 17) { NEEDBITS(here.bits + 3); DROPBITS(here.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { NEEDBITS(here.bits + 7); DROPBITS(here.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); } if (state->have + copy > state->nlen + state->ndist) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } while (copy--) state->lens[state->have++] = (unsigned short)len; } } /* handle error breaks in while */ if (state->mode == BAD) break; /* check for end-of-block code (better have one) */ if (state->lens[256] == 0) { strm->msg = (char *)"invalid code -- missing end-of-block"; state->mode = BAD; break; } /* build code tables -- note: do not change the lenbits or distbits values here (9 and 6) without reading the comments in inftrees.h concerning the ENOUGH constants, which depend on those values */ state->next = state->codes; state->lencode = (const code FAR *)(state->next); state->lenbits = 9; ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid literal/lengths set"; state->mode = BAD; break; } state->distcode = (const code FAR *)(state->next); state->distbits = 6; ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, &(state->next), &(state->distbits), state->work); if (ret) { strm->msg = (char *)"invalid distances set"; state->mode = BAD; break; } Tracev((stderr, "inflate: codes ok\n")); state->mode = LEN_; if (flush == Z_TREES) goto inf_leave; /* fallthrough */ case LEN_: state->mode = LEN; /* fallthrough */ case LEN: if (have >= 6 && left >= 258) { RESTORE(); inflate_fast(strm, out); LOAD(); if (state->mode == TYPE) state->back = -1; break; } state->back = 0; for (;;) { here = state->lencode[BITS(state->lenbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if (here.op && (here.op & 0xf0) == 0) { last = here; for (;;) { here = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); state->back += last.bits; } DROPBITS(here.bits); state->back += here.bits; state->length = (unsigned)here.val; if ((int)(here.op) == 0) { Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", here.val)); state->mode = LIT; break; } if (here.op & 32) { Tracevv((stderr, "inflate: end of block\n")); state->back = -1; state->mode = TYPE; break; } if (here.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } state->extra = (unsigned)(here.op) & 15; state->mode = LENEXT; /* fallthrough */ case LENEXT: if (state->extra) { NEEDBITS(state->extra); state->length += BITS(state->extra); DROPBITS(state->extra); state->back += state->extra; } Tracevv((stderr, "inflate: length %u\n", state->length)); state->was = state->length; state->mode = DIST; /* fallthrough */ case DIST: for (;;) { here = state->distcode[BITS(state->distbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if ((here.op & 0xf0) == 0) { last = here; for (;;) { here = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); state->back += last.bits; } DROPBITS(here.bits); state->back += here.bits; if (here.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } state->offset = (unsigned)here.val; state->extra = (unsigned)(here.op) & 15; state->mode = DISTEXT; /* fallthrough */ case DISTEXT: if (state->extra) { NEEDBITS(state->extra); state->offset += BITS(state->extra); DROPBITS(state->extra); state->back += state->extra; } #ifdef INFLATE_STRICT if (state->offset > state->dmax) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #endif Tracevv((stderr, "inflate: distance %u\n", state->offset)); state->mode = MATCH; /* fallthrough */ case MATCH: if (left == 0) goto inf_leave; copy = out - left; if (state->offset > copy) { /* copy from window */ copy = state->offset - copy; if (copy > state->whave) { if (state->sane) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR Trace((stderr, "inflate.c too far\n")); copy -= state->whave; if (copy > state->length) copy = state->length; if (copy > left) copy = left; left -= copy; state->length -= copy; do { *put++ = 0; } while (--copy); if (state->length == 0) state->mode = LEN; break; #endif } if (copy > state->wnext) { copy -= state->wnext; from = state->window + (state->wsize - copy); } else from = state->window + (state->wnext - copy); if (copy > state->length) copy = state->length; } else { /* copy from output */ from = put - state->offset; copy = state->length; } if (copy > left) copy = left; left -= copy; state->length -= copy; do { *put++ = *from++; } while (--copy); if (state->length == 0) state->mode = LEN; break; case LIT: if (left == 0) goto inf_leave; *put++ = (unsigned char)(state->length); left--; state->mode = LEN; break; case CHECK: if (state->wrap) { NEEDBITS(32); out -= left; strm->total_out += out; state->total += out; if ((state->wrap & 4) && out) strm->adler = state->check = UPDATE_CHECK(state->check, put - out, out); out = left; if ((state->wrap & 4) && ( #ifdef GUNZIP state->flags ? hold : #endif ZSWAP32(hold)) != state->check) { strm->msg = (char *)"incorrect data check"; state->mode = BAD; break; } INITBITS(); Tracev((stderr, "inflate: check matches trailer\n")); } #ifdef GUNZIP state->mode = LENGTH; /* fallthrough */ case LENGTH: if (state->wrap && state->flags) { NEEDBITS(32); if ((state->wrap & 4) && hold != (state->total & 0xffffffff)) { strm->msg = (char *)"incorrect length check"; state->mode = BAD; break; } INITBITS(); Tracev((stderr, "inflate: length matches trailer\n")); } #endif state->mode = DONE; /* fallthrough */ case DONE: ret = Z_STREAM_END; goto inf_leave; case BAD: ret = Z_DATA_ERROR; goto inf_leave; case MEM: return Z_MEM_ERROR; case SYNC: /* fallthrough */ default: return Z_STREAM_ERROR; } /* Return from inflate(), updating the total counts and the check value. If there was no progress during the inflate() call, return a buffer error. Call updatewindow() to create and/or update the window state. Note: a memory error from inflate() is non-recoverable. */ inf_leave: RESTORE(); if (state->wsize || (out != strm->avail_out && state->mode < BAD && (state->mode < CHECK || flush != Z_FINISH))) if (updatewindow(strm, strm->next_out, out - strm->avail_out)) { state->mode = MEM; return Z_MEM_ERROR; } in -= strm->avail_in; out -= strm->avail_out; strm->total_in += in; strm->total_out += out; state->total += out; if ((state->wrap & 4) && out) strm->adler = state->check = UPDATE_CHECK(state->check, strm->next_out - out, out); strm->data_type = (int)state->bits + (state->last ? 64 : 0) + (state->mode == TYPE ? 128 : 0) + (state->mode == LEN_ || state->mode == COPY_ ? 256 : 0); if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK) ret = Z_BUF_ERROR; return ret; } int ZEXPORT inflateEnd(z_streamp strm) { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (state->window != Z_NULL) ZFREE(strm, state->window); ZFREE(strm, strm->state); strm->state = Z_NULL; Tracev((stderr, "inflate: end\n")); return Z_OK; } int ZEXPORT inflateGetDictionary(z_streamp strm, Bytef *dictionary, uInt *dictLength) { struct inflate_state FAR *state; /* check state */ if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; /* copy dictionary */ if (state->whave && dictionary != Z_NULL) { zmemcpy(dictionary, state->window + state->wnext, state->whave - state->wnext); zmemcpy(dictionary + state->whave - state->wnext, state->window, state->wnext); } if (dictLength != Z_NULL) *dictLength = state->whave; return Z_OK; } int ZEXPORT inflateSetDictionary(z_streamp strm, const Bytef *dictionary, uInt dictLength) { struct inflate_state FAR *state; unsigned long dictid; int ret; /* check state */ if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (state->wrap != 0 && state->mode != DICT) return Z_STREAM_ERROR; /* check for correct dictionary identifier */ if (state->mode == DICT) { dictid = adler32(0L, Z_NULL, 0); dictid = adler32(dictid, dictionary, dictLength); if (dictid != state->check) return Z_DATA_ERROR; } /* copy dictionary to window using updatewindow(), which will amend the existing dictionary if appropriate */ ret = updatewindow(strm, dictionary + dictLength, dictLength); if (ret) { state->mode = MEM; return Z_MEM_ERROR; } state->havedict = 1; Tracev((stderr, "inflate: dictionary set\n")); return Z_OK; } int ZEXPORT inflateGetHeader(z_streamp strm, gz_headerp head) { struct inflate_state FAR *state; /* check state */ if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if ((state->wrap & 2) == 0) return Z_STREAM_ERROR; /* save header structure */ state->head = head; head->done = 0; return Z_OK; } /* Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found or when out of input. When called, *have is the number of pattern bytes found in order so far, in 0..3. On return *have is updated to the new state. If on return *have equals four, then the pattern was found and the return value is how many bytes were read including the last byte of the pattern. If *have is less than four, then the pattern has not been found yet and the return value is len. In the latter case, syncsearch() can be called again with more data and the *have state. *have is initialized to zero for the first call. */ local unsigned syncsearch(unsigned FAR *have, const unsigned char FAR *buf, unsigned len) { unsigned got; unsigned next; got = *have; next = 0; while (next < len && got < 4) { if ((int)(buf[next]) == (got < 2 ? 0 : 0xff)) got++; else if (buf[next]) got = 0; else got = 4 - got; next++; } *have = got; return next; } int ZEXPORT inflateSync(z_streamp strm) { unsigned len; /* number of bytes to look at or looked at */ int flags; /* temporary to save header status */ unsigned long in, out; /* temporary to save total_in and total_out */ unsigned char buf[4]; /* to restore bit buffer to byte string */ struct inflate_state FAR *state; /* check parameters */ if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR; /* if first time, start search in bit buffer */ if (state->mode != SYNC) { state->mode = SYNC; state->hold >>= state->bits & 7; state->bits -= state->bits & 7; len = 0; while (state->bits >= 8) { buf[len++] = (unsigned char)(state->hold); state->hold >>= 8; state->bits -= 8; } state->have = 0; syncsearch(&(state->have), buf, len); } /* search available input */ len = syncsearch(&(state->have), strm->next_in, strm->avail_in); strm->avail_in -= len; strm->next_in += len; strm->total_in += len; /* return no joy or set up to restart inflate() on a new block */ if (state->have != 4) return Z_DATA_ERROR; if (state->flags == -1) state->wrap = 0; /* if no header yet, treat as raw */ else state->wrap &= ~4; /* no point in computing a check value now */ flags = state->flags; in = strm->total_in; out = strm->total_out; inflateReset(strm); strm->total_in = in; strm->total_out = out; state->flags = flags; state->mode = TYPE; return Z_OK; } /* Returns true if inflate is currently at the end of a block generated by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored block. When decompressing, PPP checks that at the end of input packet, inflate is waiting for these length bytes. */ int ZEXPORT inflateSyncPoint(z_streamp strm) { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; return state->mode == STORED && state->bits == 0; } int ZEXPORT inflateCopy(z_streamp dest, z_streamp source) { struct inflate_state FAR *state; struct inflate_state FAR *copy; unsigned char FAR *window; unsigned wsize; /* check input */ if (inflateStateCheck(source) || dest == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)source->state; /* allocate space */ copy = (struct inflate_state FAR *) ZALLOC(source, 1, sizeof(struct inflate_state)); if (copy == Z_NULL) return Z_MEM_ERROR; window = Z_NULL; if (state->window != Z_NULL) { window = (unsigned char FAR *) ZALLOC(source, 1U << state->wbits, sizeof(unsigned char)); if (window == Z_NULL) { ZFREE(source, copy); return Z_MEM_ERROR; } } /* copy state */ zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); zmemcpy((voidpf)copy, (voidpf)state, sizeof(struct inflate_state)); copy->strm = dest; if (state->lencode >= state->codes && state->lencode <= state->codes + ENOUGH - 1) { copy->lencode = copy->codes + (state->lencode - state->codes); copy->distcode = copy->codes + (state->distcode - state->codes); } copy->next = copy->codes + (state->next - state->codes); if (window != Z_NULL) { wsize = 1U << state->wbits; zmemcpy(window, state->window, wsize); } copy->window = window; dest->state = (struct internal_state FAR *)copy; return Z_OK; } int ZEXPORT inflateUndermine(z_streamp strm, int subvert) { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; #ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR state->sane = !subvert; return Z_OK; #else (void)subvert; state->sane = 1; return Z_DATA_ERROR; #endif } int ZEXPORT inflateValidate(z_streamp strm, int check) { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (check && state->wrap) state->wrap |= 4; else state->wrap &= ~4; return Z_OK; } long ZEXPORT inflateMark(z_streamp strm) { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return -(1L << 16); state = (struct inflate_state FAR *)strm->state; return (long)(((unsigned long)((long)state->back)) << 16) + (state->mode == COPY ? state->length : (state->mode == MATCH ? state->was - state->length : 0)); } unsigned long ZEXPORT inflateCodesUsed(z_streamp strm) { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return (unsigned long)-1; state = (struct inflate_state FAR *)strm->state; return (unsigned long)(state->next - state->codes); } ================================================ FILE: pypcode/zlib/inflate.h ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* inflate.h -- internal inflate state definition * Copyright (C) 1995-2019 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* define NO_GZIP when compiling if you want to disable gzip header and trailer decoding by inflate(). NO_GZIP would be used to avoid linking in the crc code when it is not needed. For shared libraries, gzip decoding should be left enabled. */ #ifndef NO_GZIP # define GUNZIP #endif /* Possible inflate modes between inflate() calls */ typedef enum { HEAD = 16180, /* i: waiting for magic header */ FLAGS, /* i: waiting for method and flags (gzip) */ TIME, /* i: waiting for modification time (gzip) */ OS, /* i: waiting for extra flags and operating system (gzip) */ EXLEN, /* i: waiting for extra length (gzip) */ EXTRA, /* i: waiting for extra bytes (gzip) */ NAME, /* i: waiting for end of file name (gzip) */ COMMENT, /* i: waiting for end of comment (gzip) */ HCRC, /* i: waiting for header crc (gzip) */ DICTID, /* i: waiting for dictionary check value */ DICT, /* waiting for inflateSetDictionary() call */ TYPE, /* i: waiting for type bits, including last-flag bit */ TYPEDO, /* i: same, but skip check to exit inflate on new block */ STORED, /* i: waiting for stored size (length and complement) */ COPY_, /* i/o: same as COPY below, but only first time in */ COPY, /* i/o: waiting for input or output to copy stored block */ TABLE, /* i: waiting for dynamic block table lengths */ LENLENS, /* i: waiting for code length code lengths */ CODELENS, /* i: waiting for length/lit and distance code lengths */ LEN_, /* i: same as LEN below, but only first time in */ LEN, /* i: waiting for length/lit/eob code */ LENEXT, /* i: waiting for length extra bits */ DIST, /* i: waiting for distance code */ DISTEXT, /* i: waiting for distance extra bits */ MATCH, /* o: waiting for output space to copy string */ LIT, /* o: waiting for output space to write literal */ CHECK, /* i: waiting for 32-bit check value */ LENGTH, /* i: waiting for 32-bit length (gzip) */ DONE, /* finished check, done -- remain here until reset */ BAD, /* got a data error -- remain here until reset */ MEM, /* got an inflate() memory error -- remain here until reset */ SYNC /* looking for synchronization bytes to restart inflate() */ } inflate_mode; /* State transitions between above modes - (most modes can go to BAD or MEM on error -- not shown for clarity) Process header: HEAD -> (gzip) or (zlib) or (raw) (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME -> COMMENT -> HCRC -> TYPE (zlib) -> DICTID or TYPE DICTID -> DICT -> TYPE (raw) -> TYPEDO Read deflate blocks: TYPE -> TYPEDO -> STORED or TABLE or LEN_ or CHECK STORED -> COPY_ -> COPY -> TYPE TABLE -> LENLENS -> CODELENS -> LEN_ LEN_ -> LEN Read deflate codes in fixed or dynamic block: LEN -> LENEXT or LIT or TYPE LENEXT -> DIST -> DISTEXT -> MATCH -> LEN LIT -> LEN Process trailer: CHECK -> LENGTH -> DONE */ /* State maintained between inflate() calls -- approximately 7K bytes, not including the allocated sliding window, which is up to 32K bytes. */ struct inflate_state { z_streamp strm; /* pointer back to this zlib stream */ inflate_mode mode; /* current inflate mode */ int last; /* true if processing last block */ int wrap; /* bit 0 true for zlib, bit 1 true for gzip, bit 2 true to validate check value */ int havedict; /* true if dictionary provided */ int flags; /* gzip header method and flags, 0 if zlib, or -1 if raw or no header yet */ unsigned dmax; /* zlib header max distance (INFLATE_STRICT) */ unsigned long check; /* protected copy of check value */ unsigned long total; /* protected copy of output count */ gz_headerp head; /* where to save gzip header information */ /* sliding window */ unsigned wbits; /* log base 2 of requested window size */ unsigned wsize; /* window size or zero if not using window */ unsigned whave; /* valid bytes in the window */ unsigned wnext; /* window write index */ unsigned char FAR *window; /* allocated sliding window, if needed */ /* bit accumulator */ unsigned long hold; /* input bit accumulator */ unsigned bits; /* number of bits in "in" */ /* for string and stored block copying */ unsigned length; /* literal or length of data to copy */ unsigned offset; /* distance back to copy string from */ /* for table and code decoding */ unsigned extra; /* extra bits needed */ /* fixed and dynamic code tables */ code const FAR *lencode; /* starting table for length/literal codes */ code const FAR *distcode; /* starting table for distance codes */ unsigned lenbits; /* index bits for lencode */ unsigned distbits; /* index bits for distcode */ /* dynamic table building */ unsigned ncode; /* number of code length code lengths */ unsigned nlen; /* number of length code lengths */ unsigned ndist; /* number of distance code lengths */ unsigned have; /* number of code lengths in lens[] */ code FAR *next; /* next available space in codes[] */ unsigned short lens[320]; /* temporary storage for code lengths */ unsigned short work[288]; /* work area for code table building */ code codes[ENOUGH]; /* space for code tables */ int sane; /* if false, allow invalid distance too far */ int back; /* bits back of last unprocessed length/lit */ unsigned was; /* initial length of match */ }; ================================================ FILE: pypcode/zlib/inftrees.c ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* inftrees.c -- generate Huffman trees for efficient decoding * Copyright (C) 1995-2024 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include "zutil.h" #include "inftrees.h" #define MAXBITS 15 const char inflate_copyright[] = " inflate 1.3.1 Copyright 1995-2024 Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot include such an acknowledgment, I would appreciate that you keep this copyright string in the executable of your product. */ /* Build a set of tables to decode the provided canonical Huffman code. The code lengths are lens[0..codes-1]. The result starts at *table, whose indices are 0..2^bits-1. work is a writable array of at least lens shorts, which is used as a work area. type is the type of code to be generated, CODES, LENS, or DISTS. On return, zero is success, -1 is an invalid code, and +1 means that ENOUGH isn't enough. table on return points to the next available entry's address. bits is the requested root table index bits, and on return it is the actual root table index bits. It will differ if the request is greater than the longest code or if it is less than the shortest code. */ int ZLIB_INTERNAL inflate_table(codetype type, unsigned short FAR *lens, unsigned codes, code FAR * FAR *table, unsigned FAR *bits, unsigned short FAR *work) { unsigned len; /* a code's length in bits */ unsigned sym; /* index of code symbols */ unsigned min, max; /* minimum and maximum code lengths */ unsigned root; /* number of index bits for root table */ unsigned curr; /* number of index bits for current table */ unsigned drop; /* code bits to drop for sub-table */ int left; /* number of prefix codes available */ unsigned used; /* code entries in table used */ unsigned huff; /* Huffman code */ unsigned incr; /* for incrementing code, index */ unsigned fill; /* index for replicating entries */ unsigned low; /* low bits for current root entry */ unsigned mask; /* mask for low root bits */ code here; /* table entry for duplication */ code FAR *next; /* next available space in table */ const unsigned short FAR *base; /* base value table to use */ const unsigned short FAR *extra; /* extra bits table to use */ unsigned match; /* use base and extra for symbol >= match */ unsigned short count[MAXBITS+1]; /* number of codes of each length */ unsigned short offs[MAXBITS+1]; /* offsets in table for each length */ static const unsigned short lbase[31] = { /* Length codes 257..285 base */ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const unsigned short lext[31] = { /* Length codes 257..285 extra */ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 203, 77}; static const unsigned short dbase[32] = { /* Distance codes 0..29 base */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const unsigned short dext[32] = { /* Distance codes 0..29 extra */ 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 64, 64}; /* Process a set of code lengths to create a canonical Huffman code. The code lengths are lens[0..codes-1]. Each length corresponds to the symbols 0..codes-1. The Huffman code is generated by first sorting the symbols by length from short to long, and retaining the symbol order for codes with equal lengths. Then the code starts with all zero bits for the first code of the shortest length, and the codes are integer increments for the same length, and zeros are appended as the length increases. For the deflate format, these bits are stored backwards from their more natural integer increment ordering, and so when the decoding tables are built in the large loop below, the integer codes are incremented backwards. This routine assumes, but does not check, that all of the entries in lens[] are in the range 0..MAXBITS. The caller must assure this. 1..MAXBITS is interpreted as that code length. zero means that that symbol does not occur in this code. The codes are sorted by computing a count of codes for each length, creating from that a table of starting indices for each length in the sorted table, and then entering the symbols in order in the sorted table. The sorted table is work[], with that space being provided by the caller. The length counts are used for other purposes as well, i.e. finding the minimum and maximum length codes, determining if there are any codes at all, checking for a valid set of lengths, and looking ahead at length counts to determine sub-table sizes when building the decoding tables. */ /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ for (len = 0; len <= MAXBITS; len++) count[len] = 0; for (sym = 0; sym < codes; sym++) count[lens[sym]]++; /* bound code lengths, force root to be within code lengths */ root = *bits; for (max = MAXBITS; max >= 1; max--) if (count[max] != 0) break; if (root > max) root = max; if (max == 0) { /* no symbols to code at all */ here.op = (unsigned char)64; /* invalid code marker */ here.bits = (unsigned char)1; here.val = (unsigned short)0; *(*table)++ = here; /* make a table to force an error */ *(*table)++ = here; *bits = 1; return 0; /* no symbols, but wait for decoding to report error */ } for (min = 1; min < max; min++) if (count[min] != 0) break; if (root < min) root = min; /* check for an over-subscribed or incomplete set of lengths */ left = 1; for (len = 1; len <= MAXBITS; len++) { left <<= 1; left -= count[len]; if (left < 0) return -1; /* over-subscribed */ } if (left > 0 && (type == CODES || max != 1)) return -1; /* incomplete set */ /* generate offsets into symbol table for each length for sorting */ offs[1] = 0; for (len = 1; len < MAXBITS; len++) offs[len + 1] = offs[len] + count[len]; /* sort symbols by length, by symbol order within each length */ for (sym = 0; sym < codes; sym++) if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym; /* Create and fill in decoding tables. In this loop, the table being filled is at next and has curr index bits. The code being used is huff with length len. That code is converted to an index by dropping drop bits off of the bottom. For codes where len is less than drop + curr, those top drop + curr - len bits are incremented through all values to fill the table with replicated entries. root is the number of index bits for the root table. When len exceeds root, sub-tables are created pointed to by the root entry with an index of the low root bits of huff. This is saved in low to check for when a new sub-table should be started. drop is zero when the root table is being filled, and drop is root when sub-tables are being filled. When a new sub-table is needed, it is necessary to look ahead in the code lengths to determine what size sub-table is needed. The length counts are used for this, and so count[] is decremented as codes are entered in the tables. used keeps track of how many table entries have been allocated from the provided *table space. It is checked for LENS and DIST tables against the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in the initial root table size constants. See the comments in inftrees.h for more information. sym increments through all symbols, and the loop terminates when all codes of length max, i.e. all codes, have been processed. This routine permits incomplete codes, so another loop after this one fills in the rest of the decoding tables with invalid code markers. */ /* set up for code type */ switch (type) { case CODES: base = extra = work; /* dummy value--not used */ match = 20; break; case LENS: base = lbase; extra = lext; match = 257; break; default: /* DISTS */ base = dbase; extra = dext; match = 0; } /* initialize state for loop */ huff = 0; /* starting code */ sym = 0; /* starting code symbol */ len = min; /* starting code length */ next = *table; /* current table to fill in */ curr = root; /* current table index bits */ drop = 0; /* current bits to drop from code for index */ low = (unsigned)(-1); /* trigger new sub-table when len > root */ used = 1U << root; /* use root table entries */ mask = used - 1; /* mask for comparing low */ /* check available table space */ if ((type == LENS && used > ENOUGH_LENS) || (type == DISTS && used > ENOUGH_DISTS)) return 1; /* process all codes and make table entries */ for (;;) { /* create table entry */ here.bits = (unsigned char)(len - drop); if (work[sym] + 1U < match) { here.op = (unsigned char)0; here.val = work[sym]; } else if (work[sym] >= match) { here.op = (unsigned char)(extra[work[sym] - match]); here.val = base[work[sym] - match]; } else { here.op = (unsigned char)(32 + 64); /* end of block */ here.val = 0; } /* replicate for those indices with low len bits equal to huff */ incr = 1U << (len - drop); fill = 1U << curr; min = fill; /* save offset to next table */ do { fill -= incr; next[(huff >> drop) + fill] = here; } while (fill != 0); /* backwards increment the len-bit code huff */ incr = 1U << (len - 1); while (huff & incr) incr >>= 1; if (incr != 0) { huff &= incr - 1; huff += incr; } else huff = 0; /* go to next symbol, update count, len */ sym++; if (--(count[len]) == 0) { if (len == max) break; len = lens[work[sym]]; } /* create new sub-table if needed */ if (len > root && (huff & mask) != low) { /* if first time, transition to sub-tables */ if (drop == 0) drop = root; /* increment past last table */ next += min; /* here min is 1 << curr */ /* determine length of next table */ curr = len - drop; left = (int)(1 << curr); while (curr + drop < max) { left -= count[curr + drop]; if (left <= 0) break; curr++; left <<= 1; } /* check for enough space */ used += 1U << curr; if ((type == LENS && used > ENOUGH_LENS) || (type == DISTS && used > ENOUGH_DISTS)) return 1; /* point entry in root table to sub-table */ low = huff & mask; (*table)[low].op = (unsigned char)curr; (*table)[low].bits = (unsigned char)root; (*table)[low].val = (unsigned short)(next - *table); } } /* fill in remaining table entry if code is incomplete (guaranteed to have at most one remaining entry, since if the code is incomplete, the maximum code length that was allowed to get this far is one bit) */ if (huff != 0) { here.op = (unsigned char)64; /* invalid code marker */ here.bits = (unsigned char)(len - drop); here.val = (unsigned short)0; next[huff] = here; } /* set return parameters */ *table += used; *bits = root; return 0; } ================================================ FILE: pypcode/zlib/inftrees.h ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* inftrees.h -- header to use inftrees.c * Copyright (C) 1995-2005, 2010 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* Structure for decoding tables. Each entry provides either the information needed to do the operation requested by the code that indexed that table entry, or it provides a pointer to another table that indexes more bits of the code. op indicates whether the entry is a pointer to another table, a literal, a length or distance, an end-of-block, or an invalid code. For a table pointer, the low four bits of op is the number of index bits of that table. For a length or distance, the low four bits of op is the number of extra bits to get after the code. bits is the number of bits in this code or part of the code to drop off of the bit buffer. val is the actual byte to output in the case of a literal, the base length or distance, or the offset from the current table to the next table. Each entry is four bytes. */ typedef struct { unsigned char op; /* operation, extra bits, table bits */ unsigned char bits; /* bits in this part of the code */ unsigned short val; /* offset in table or code value */ } code; /* op values as set by inflate_table(): 00000000 - literal 0000tttt - table link, tttt != 0 is the number of table index bits 0001eeee - length or distance, eeee is the number of extra bits 01100000 - end of block 01000000 - invalid code */ /* Maximum size of the dynamic table. The maximum number of code structures is 1444, which is the sum of 852 for literal/length codes and 592 for distance codes. These values were found by exhaustive searches using the program examples/enough.c found in the zlib distribution. The arguments to that program are the number of symbols, the initial root table size, and the maximum bit length of a code. "enough 286 9 15" for literal/length codes returns 852, and "enough 30 6 15" for distance codes returns 592. The initial root table size (9 or 6) is found in the fifth argument of the inflate_table() calls in inflate.c and infback.c. If the root table size is changed, then these maximum sizes would be need to be recalculated and updated. */ #define ENOUGH_LENS 852 #define ENOUGH_DISTS 592 #define ENOUGH (ENOUGH_LENS+ENOUGH_DISTS) /* Type of code to build for inflate_table() */ typedef enum { CODES, LENS, DISTS } codetype; int ZLIB_INTERNAL inflate_table(codetype type, unsigned short FAR *lens, unsigned codes, code FAR * FAR *table, unsigned FAR *bits, unsigned short FAR *work); ================================================ FILE: pypcode/zlib/trees.c ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* trees.c -- output deflated data using Huffman coding * Copyright (C) 1995-2024 Jean-loup Gailly * detect_data_type() function provided freely by Cosmin Truta, 2006 * For conditions of distribution and use, see copyright notice in zlib.h */ /* * ALGORITHM * * The "deflation" process uses several Huffman trees. The more * common source values are represented by shorter bit sequences. * * Each code tree is stored in a compressed form which is itself * a Huffman encoding of the lengths of all the code strings (in * ascending order by source values). The actual code strings are * reconstructed from the lengths in the inflate process, as described * in the deflate specification. * * REFERENCES * * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc * * Storer, James A. * Data Compression: Methods and Theory, pp. 49-50. * Computer Science Press, 1988. ISBN 0-7167-8156-5. * * Sedgewick, R. * Algorithms, p290. * Addison-Wesley, 1983. ISBN 0-201-06672-6. */ /* @(#) $Id$ */ /* #define GEN_TREES_H */ #include "deflate.h" #ifdef ZLIB_DEBUG # include #endif /* =========================================================================== * Constants */ #define MAX_BL_BITS 7 /* Bit length codes must not exceed MAX_BL_BITS bits */ #define END_BLOCK 256 /* end of block literal code */ #define REP_3_6 16 /* repeat previous bit length 3-6 times (2 bits of repeat count) */ #define REPZ_3_10 17 /* repeat a zero length 3-10 times (3 bits of repeat count) */ #define REPZ_11_138 18 /* repeat a zero length 11-138 times (7 bits of repeat count) */ local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; local const int extra_dbits[D_CODES] /* extra bits for each distance code */ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; local const uch bl_order[BL_CODES] = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; /* The lengths of the bit length codes are sent in order of decreasing * probability, to avoid transmitting the lengths for unused bit length codes. */ /* =========================================================================== * Local data. These are initialized only once. */ #define DIST_CODE_LEN 512 /* see definition of array dist_code below */ #if defined(GEN_TREES_H) || !defined(STDC) /* non ANSI compilers may not accept trees.h */ local ct_data static_ltree[L_CODES+2]; /* The static literal tree. Since the bit lengths are imposed, there is no * need for the L_CODES extra codes used during heap construction. However * The codes 286 and 287 are needed to build a canonical tree (see _tr_init * below). */ local ct_data static_dtree[D_CODES]; /* The static distance tree. (Actually a trivial tree since all codes use * 5 bits.) */ uch _dist_code[DIST_CODE_LEN]; /* Distance codes. The first 256 values correspond to the distances * 3 .. 258, the last 256 values correspond to the top 8 bits of * the 15 bit distances. */ uch _length_code[MAX_MATCH-MIN_MATCH+1]; /* length code for each normalized match length (0 == MIN_MATCH) */ local int base_length[LENGTH_CODES]; /* First normalized length for each code (0 = MIN_MATCH) */ local int base_dist[D_CODES]; /* First normalized distance for each code (0 = distance of 1) */ #else # include "trees.h" #endif /* GEN_TREES_H */ struct static_tree_desc_s { const ct_data *static_tree; /* static tree or NULL */ const intf *extra_bits; /* extra bits for each code or NULL */ int extra_base; /* base index for extra_bits */ int elems; /* max number of elements in the tree */ int max_length; /* max bit length for the codes */ }; #ifdef NO_INIT_GLOBAL_POINTERS # define TCONST #else # define TCONST const #endif local TCONST static_tree_desc static_l_desc = {static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; local TCONST static_tree_desc static_d_desc = {static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; local TCONST static_tree_desc static_bl_desc = {(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; /* =========================================================================== * Output a short LSB first on the stream. * IN assertion: there is enough room in pendingBuf. */ #define put_short(s, w) { \ put_byte(s, (uch)((w) & 0xff)); \ put_byte(s, (uch)((ush)(w) >> 8)); \ } /* =========================================================================== * Reverse the first len bits of a code, using straightforward code (a faster * method would use a table) * IN assertion: 1 <= len <= 15 */ local unsigned bi_reverse(unsigned code, int len) { register unsigned res = 0; do { res |= code & 1; code >>= 1, res <<= 1; } while (--len > 0); return res >> 1; } /* =========================================================================== * Flush the bit buffer, keeping at most 7 bits in it. */ local void bi_flush(deflate_state *s) { if (s->bi_valid == 16) { put_short(s, s->bi_buf); s->bi_buf = 0; s->bi_valid = 0; } else if (s->bi_valid >= 8) { put_byte(s, (Byte)s->bi_buf); s->bi_buf >>= 8; s->bi_valid -= 8; } } /* =========================================================================== * Flush the bit buffer and align the output on a byte boundary */ local void bi_windup(deflate_state *s) { if (s->bi_valid > 8) { put_short(s, s->bi_buf); } else if (s->bi_valid > 0) { put_byte(s, (Byte)s->bi_buf); } s->bi_buf = 0; s->bi_valid = 0; #ifdef ZLIB_DEBUG s->bits_sent = (s->bits_sent + 7) & ~7; #endif } /* =========================================================================== * Generate the codes for a given tree and bit counts (which need not be * optimal). * IN assertion: the array bl_count contains the bit length statistics for * the given tree and the field len is set for all tree elements. * OUT assertion: the field code is set for all tree elements of non * zero code length. */ local void gen_codes(ct_data *tree, int max_code, ushf *bl_count) { ush next_code[MAX_BITS+1]; /* next code value for each bit length */ unsigned code = 0; /* running code value */ int bits; /* bit index */ int n; /* code index */ /* The distribution counts are first used to generate the code values * without bit reversal. */ for (bits = 1; bits <= MAX_BITS; bits++) { code = (code + bl_count[bits - 1]) << 1; next_code[bits] = (ush)code; } /* Check that the bit counts in bl_count are consistent. The last code * must be all ones. */ Assert (code + bl_count[MAX_BITS] - 1 == (1 << MAX_BITS) - 1, "inconsistent bit counts"); Tracev((stderr,"\ngen_codes: max_code %d ", max_code)); for (n = 0; n <= max_code; n++) { int len = tree[n].Len; if (len == 0) continue; /* Now reverse the bits */ tree[n].Code = (ush)bi_reverse(next_code[len]++, len); Tracecv(tree != static_ltree, (stderr,"\nn %3d %c l %2d c %4x (%x) ", n, (isgraph(n) ? n : ' '), len, tree[n].Code, next_code[len] - 1)); } } #ifdef GEN_TREES_H local void gen_trees_header(void); #endif #ifndef ZLIB_DEBUG # define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) /* Send a code of the given tree. c and tree must not have side effects */ #else /* !ZLIB_DEBUG */ # define send_code(s, c, tree) \ { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ send_bits(s, tree[c].Code, tree[c].Len); } #endif /* =========================================================================== * Send a value on a given number of bits. * IN assertion: length <= 16 and value fits in length bits. */ #ifdef ZLIB_DEBUG local void send_bits(deflate_state *s, int value, int length) { Tracevv((stderr," l %2d v %4x ", length, value)); Assert(length > 0 && length <= 15, "invalid length"); s->bits_sent += (ulg)length; /* If not enough room in bi_buf, use (valid) bits from bi_buf and * (16 - bi_valid) bits from value, leaving (width - (16 - bi_valid)) * unused bits in value. */ if (s->bi_valid > (int)Buf_size - length) { s->bi_buf |= (ush)value << s->bi_valid; put_short(s, s->bi_buf); s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); s->bi_valid += length - Buf_size; } else { s->bi_buf |= (ush)value << s->bi_valid; s->bi_valid += length; } } #else /* !ZLIB_DEBUG */ #define send_bits(s, value, length) \ { int len = length;\ if (s->bi_valid > (int)Buf_size - len) {\ int val = (int)value;\ s->bi_buf |= (ush)val << s->bi_valid;\ put_short(s, s->bi_buf);\ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ s->bi_valid += len - Buf_size;\ } else {\ s->bi_buf |= (ush)(value) << s->bi_valid;\ s->bi_valid += len;\ }\ } #endif /* ZLIB_DEBUG */ /* the arguments must not have side effects */ /* =========================================================================== * Initialize the various 'constant' tables. */ local void tr_static_init(void) { #if defined(GEN_TREES_H) || !defined(STDC) static int static_init_done = 0; int n; /* iterates over tree elements */ int bits; /* bit counter */ int length; /* length value */ int code; /* code value */ int dist; /* distance index */ ush bl_count[MAX_BITS+1]; /* number of codes at each bit length for an optimal tree */ if (static_init_done) return; /* For some embedded targets, global variables are not initialized: */ #ifdef NO_INIT_GLOBAL_POINTERS static_l_desc.static_tree = static_ltree; static_l_desc.extra_bits = extra_lbits; static_d_desc.static_tree = static_dtree; static_d_desc.extra_bits = extra_dbits; static_bl_desc.extra_bits = extra_blbits; #endif /* Initialize the mapping length (0..255) -> length code (0..28) */ length = 0; for (code = 0; code < LENGTH_CODES-1; code++) { base_length[code] = length; for (n = 0; n < (1 << extra_lbits[code]); n++) { _length_code[length++] = (uch)code; } } Assert (length == 256, "tr_static_init: length != 256"); /* Note that the length 255 (match length 258) can be represented * in two different ways: code 284 + 5 bits or code 285, so we * overwrite length_code[255] to use the best encoding: */ _length_code[length - 1] = (uch)code; /* Initialize the mapping dist (0..32K) -> dist code (0..29) */ dist = 0; for (code = 0 ; code < 16; code++) { base_dist[code] = dist; for (n = 0; n < (1 << extra_dbits[code]); n++) { _dist_code[dist++] = (uch)code; } } Assert (dist == 256, "tr_static_init: dist != 256"); dist >>= 7; /* from now on, all distances are divided by 128 */ for ( ; code < D_CODES; code++) { base_dist[code] = dist << 7; for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) { _dist_code[256 + dist++] = (uch)code; } } Assert (dist == 256, "tr_static_init: 256 + dist != 512"); /* Construct the codes of the static literal tree */ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; n = 0; while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; /* Codes 286 and 287 do not exist, but we must include them in the * tree construction to get a canonical Huffman tree (longest code * all ones) */ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); /* The static distance tree is trivial: */ for (n = 0; n < D_CODES; n++) { static_dtree[n].Len = 5; static_dtree[n].Code = bi_reverse((unsigned)n, 5); } static_init_done = 1; # ifdef GEN_TREES_H gen_trees_header(); # endif #endif /* defined(GEN_TREES_H) || !defined(STDC) */ } /* =========================================================================== * Generate the file trees.h describing the static trees. */ #ifdef GEN_TREES_H # ifndef ZLIB_DEBUG # include # endif # define SEPARATOR(i, last, width) \ ((i) == (last)? "\n};\n\n" : \ ((i) % (width) == (width) - 1 ? ",\n" : ", ")) void gen_trees_header(void) { FILE *header = fopen("trees.h", "w"); int i; Assert (header != NULL, "Can't open trees.h"); fprintf(header, "/* header created automatically with -DGEN_TREES_H */\n\n"); fprintf(header, "local const ct_data static_ltree[L_CODES+2] = {\n"); for (i = 0; i < L_CODES+2; i++) { fprintf(header, "{{%3u},{%3u}}%s", static_ltree[i].Code, static_ltree[i].Len, SEPARATOR(i, L_CODES+1, 5)); } fprintf(header, "local const ct_data static_dtree[D_CODES] = {\n"); for (i = 0; i < D_CODES; i++) { fprintf(header, "{{%2u},{%2u}}%s", static_dtree[i].Code, static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5)); } fprintf(header, "const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = {\n"); for (i = 0; i < DIST_CODE_LEN; i++) { fprintf(header, "%2u%s", _dist_code[i], SEPARATOR(i, DIST_CODE_LEN-1, 20)); } fprintf(header, "const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= {\n"); for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) { fprintf(header, "%2u%s", _length_code[i], SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20)); } fprintf(header, "local const int base_length[LENGTH_CODES] = {\n"); for (i = 0; i < LENGTH_CODES; i++) { fprintf(header, "%1u%s", base_length[i], SEPARATOR(i, LENGTH_CODES-1, 20)); } fprintf(header, "local const int base_dist[D_CODES] = {\n"); for (i = 0; i < D_CODES; i++) { fprintf(header, "%5u%s", base_dist[i], SEPARATOR(i, D_CODES-1, 10)); } fclose(header); } #endif /* GEN_TREES_H */ /* =========================================================================== * Initialize a new block. */ local void init_block(deflate_state *s) { int n; /* iterates over tree elements */ /* Initialize the trees. */ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; s->dyn_ltree[END_BLOCK].Freq = 1; s->opt_len = s->static_len = 0L; s->sym_next = s->matches = 0; } /* =========================================================================== * Initialize the tree data structures for a new zlib stream. */ void ZLIB_INTERNAL _tr_init(deflate_state *s) { tr_static_init(); s->l_desc.dyn_tree = s->dyn_ltree; s->l_desc.stat_desc = &static_l_desc; s->d_desc.dyn_tree = s->dyn_dtree; s->d_desc.stat_desc = &static_d_desc; s->bl_desc.dyn_tree = s->bl_tree; s->bl_desc.stat_desc = &static_bl_desc; s->bi_buf = 0; s->bi_valid = 0; #ifdef ZLIB_DEBUG s->compressed_len = 0L; s->bits_sent = 0L; #endif /* Initialize the first block of the first file: */ init_block(s); } #define SMALLEST 1 /* Index within the heap array of least frequent node in the Huffman tree */ /* =========================================================================== * Remove the smallest element from the heap and recreate the heap with * one less element. Updates heap and heap_len. */ #define pqremove(s, tree, top) \ {\ top = s->heap[SMALLEST]; \ s->heap[SMALLEST] = s->heap[s->heap_len--]; \ pqdownheap(s, tree, SMALLEST); \ } /* =========================================================================== * Compares to subtrees, using the tree depth as tie breaker when * the subtrees have equal frequency. This minimizes the worst case length. */ #define smaller(tree, n, m, depth) \ (tree[n].Freq < tree[m].Freq || \ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) /* =========================================================================== * Restore the heap property by moving down the tree starting at node k, * exchanging a node with the smallest of its two sons if necessary, stopping * when the heap property is re-established (each father smaller than its * two sons). */ local void pqdownheap(deflate_state *s, ct_data *tree, int k) { int v = s->heap[k]; int j = k << 1; /* left son of k */ while (j <= s->heap_len) { /* Set j to the smallest of the two sons: */ if (j < s->heap_len && smaller(tree, s->heap[j + 1], s->heap[j], s->depth)) { j++; } /* Exit if v is smaller than both sons */ if (smaller(tree, v, s->heap[j], s->depth)) break; /* Exchange v with the smallest son */ s->heap[k] = s->heap[j]; k = j; /* And continue down the tree, setting j to the left son of k */ j <<= 1; } s->heap[k] = v; } /* =========================================================================== * Compute the optimal bit lengths for a tree and update the total bit length * for the current block. * IN assertion: the fields freq and dad are set, heap[heap_max] and * above are the tree nodes sorted by increasing frequency. * OUT assertions: the field len is set to the optimal bit length, the * array bl_count contains the frequencies for each bit length. * The length opt_len is updated; static_len is also updated if stree is * not null. */ local void gen_bitlen(deflate_state *s, tree_desc *desc) { ct_data *tree = desc->dyn_tree; int max_code = desc->max_code; const ct_data *stree = desc->stat_desc->static_tree; const intf *extra = desc->stat_desc->extra_bits; int base = desc->stat_desc->extra_base; int max_length = desc->stat_desc->max_length; int h; /* heap index */ int n, m; /* iterate over the tree elements */ int bits; /* bit length */ int xbits; /* extra bits */ ush f; /* frequency */ int overflow = 0; /* number of elements with bit length too large */ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; /* In a first pass, compute the optimal bit lengths (which may * overflow in the case of the bit length tree). */ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ for (h = s->heap_max + 1; h < HEAP_SIZE; h++) { n = s->heap[h]; bits = tree[tree[n].Dad].Len + 1; if (bits > max_length) bits = max_length, overflow++; tree[n].Len = (ush)bits; /* We overwrite tree[n].Dad which is no longer needed */ if (n > max_code) continue; /* not a leaf node */ s->bl_count[bits]++; xbits = 0; if (n >= base) xbits = extra[n - base]; f = tree[n].Freq; s->opt_len += (ulg)f * (unsigned)(bits + xbits); if (stree) s->static_len += (ulg)f * (unsigned)(stree[n].Len + xbits); } if (overflow == 0) return; Tracev((stderr,"\nbit length overflow\n")); /* This happens for example on obj2 and pic of the Calgary corpus */ /* Find the first bit length which could increase: */ do { bits = max_length - 1; while (s->bl_count[bits] == 0) bits--; s->bl_count[bits]--; /* move one leaf down the tree */ s->bl_count[bits + 1] += 2; /* move one overflow item as its brother */ s->bl_count[max_length]--; /* The brother of the overflow item also moves one step up, * but this does not affect bl_count[max_length] */ overflow -= 2; } while (overflow > 0); /* Now recompute all bit lengths, scanning in increasing frequency. * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all * lengths instead of fixing only the wrong ones. This idea is taken * from 'ar' written by Haruhiko Okumura.) */ for (bits = max_length; bits != 0; bits--) { n = s->bl_count[bits]; while (n != 0) { m = s->heap[--h]; if (m > max_code) continue; if ((unsigned) tree[m].Len != (unsigned) bits) { Tracev((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); s->opt_len += ((ulg)bits - tree[m].Len) * tree[m].Freq; tree[m].Len = (ush)bits; } n--; } } } #ifdef DUMP_BL_TREE # include #endif /* =========================================================================== * Construct one Huffman tree and assigns the code bit strings and lengths. * Update the total bit length for the current block. * IN assertion: the field freq is set for all tree elements. * OUT assertions: the fields len and code are set to the optimal bit length * and corresponding code. The length opt_len is updated; static_len is * also updated if stree is not null. The field max_code is set. */ local void build_tree(deflate_state *s, tree_desc *desc) { ct_data *tree = desc->dyn_tree; const ct_data *stree = desc->stat_desc->static_tree; int elems = desc->stat_desc->elems; int n, m; /* iterate over heap elements */ int max_code = -1; /* largest code with non zero frequency */ int node; /* new node being created */ /* Construct the initial heap, with least frequent element in * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n + 1]. * heap[0] is not used. */ s->heap_len = 0, s->heap_max = HEAP_SIZE; for (n = 0; n < elems; n++) { if (tree[n].Freq != 0) { s->heap[++(s->heap_len)] = max_code = n; s->depth[n] = 0; } else { tree[n].Len = 0; } } /* The pkzip format requires that at least one distance code exists, * and that at least one bit should be sent even if there is only one * possible code. So to avoid special checks later on we force at least * two codes of non zero frequency. */ while (s->heap_len < 2) { node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); tree[node].Freq = 1; s->depth[node] = 0; s->opt_len--; if (stree) s->static_len -= stree[node].Len; /* node is 0 or 1 so it does not have extra bits */ } desc->max_code = max_code; /* The elements heap[heap_len/2 + 1 .. heap_len] are leaves of the tree, * establish sub-heaps of increasing lengths: */ for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); /* Construct the Huffman tree by repeatedly combining the least two * frequent nodes. */ node = elems; /* next internal node of the tree */ do { pqremove(s, tree, n); /* n = node of least frequency */ m = s->heap[SMALLEST]; /* m = node of next least frequency */ s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ s->heap[--(s->heap_max)] = m; /* Create a new node father of n and m */ tree[node].Freq = tree[n].Freq + tree[m].Freq; s->depth[node] = (uch)((s->depth[n] >= s->depth[m] ? s->depth[n] : s->depth[m]) + 1); tree[n].Dad = tree[m].Dad = (ush)node; #ifdef DUMP_BL_TREE if (tree == s->bl_tree) { fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)", node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); } #endif /* and insert the new node in the heap */ s->heap[SMALLEST] = node++; pqdownheap(s, tree, SMALLEST); } while (s->heap_len >= 2); s->heap[--(s->heap_max)] = s->heap[SMALLEST]; /* At this point, the fields freq and dad are set. We can now * generate the bit lengths. */ gen_bitlen(s, (tree_desc *)desc); /* The field len is now set, we can generate the bit codes */ gen_codes ((ct_data *)tree, max_code, s->bl_count); } /* =========================================================================== * Scan a literal or distance tree to determine the frequencies of the codes * in the bit length tree. */ local void scan_tree(deflate_state *s, ct_data *tree, int max_code) { int n; /* iterates over all tree elements */ int prevlen = -1; /* last emitted length */ int curlen; /* length of current code */ int nextlen = tree[0].Len; /* length of next code */ int count = 0; /* repeat count of the current code */ int max_count = 7; /* max repeat count */ int min_count = 4; /* min repeat count */ if (nextlen == 0) max_count = 138, min_count = 3; tree[max_code + 1].Len = (ush)0xffff; /* guard */ for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[n + 1].Len; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { s->bl_tree[curlen].Freq += count; } else if (curlen != 0) { if (curlen != prevlen) s->bl_tree[curlen].Freq++; s->bl_tree[REP_3_6].Freq++; } else if (count <= 10) { s->bl_tree[REPZ_3_10].Freq++; } else { s->bl_tree[REPZ_11_138].Freq++; } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138, min_count = 3; } else if (curlen == nextlen) { max_count = 6, min_count = 3; } else { max_count = 7, min_count = 4; } } } /* =========================================================================== * Send a literal or distance tree in compressed form, using the codes in * bl_tree. */ local void send_tree(deflate_state *s, ct_data *tree, int max_code) { int n; /* iterates over all tree elements */ int prevlen = -1; /* last emitted length */ int curlen; /* length of current code */ int nextlen = tree[0].Len; /* length of next code */ int count = 0; /* repeat count of the current code */ int max_count = 7; /* max repeat count */ int min_count = 4; /* min repeat count */ /* tree[max_code + 1].Len = -1; */ /* guard already set */ if (nextlen == 0) max_count = 138, min_count = 3; for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[n + 1].Len; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { do { send_code(s, curlen, s->bl_tree); } while (--count != 0); } else if (curlen != 0) { if (curlen != prevlen) { send_code(s, curlen, s->bl_tree); count--; } Assert(count >= 3 && count <= 6, " 3_6?"); send_code(s, REP_3_6, s->bl_tree); send_bits(s, count - 3, 2); } else if (count <= 10) { send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count - 3, 3); } else { send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count - 11, 7); } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138, min_count = 3; } else if (curlen == nextlen) { max_count = 6, min_count = 3; } else { max_count = 7, min_count = 4; } } } /* =========================================================================== * Construct the Huffman tree for the bit lengths and return the index in * bl_order of the last bit length code to send. */ local int build_bl_tree(deflate_state *s) { int max_blindex; /* index of last bit length code of non zero freq */ /* Determine the bit length frequencies for literal and distance trees */ scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); /* Build the bit length tree: */ build_tree(s, (tree_desc *)(&(s->bl_desc))); /* opt_len now includes the length of the tree representations, except the * lengths of the bit lengths codes and the 5 + 5 + 4 bits for the counts. */ /* Determine the number of bit length codes to send. The pkzip format * requires that at least 4 bit length codes be sent. (appnote.txt says * 3 but the actual value used is 4.) */ for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; } /* Update opt_len to include the bit length tree and counts */ s->opt_len += 3*((ulg)max_blindex + 1) + 5 + 5 + 4; Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", s->opt_len, s->static_len)); return max_blindex; } /* =========================================================================== * Send the header for a block using dynamic Huffman trees: the counts, the * lengths of the bit length codes, the literal tree and the distance tree. * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. */ local void send_all_trees(deflate_state *s, int lcodes, int dcodes, int blcodes) { int rank; /* index in bl_order */ Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, "too many codes"); Tracev((stderr, "\nbl counts: ")); send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */ send_bits(s, dcodes - 1, 5); send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */ for (rank = 0; rank < blcodes; rank++) { Tracev((stderr, "\nbl code %2d ", bl_order[rank])); send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); } Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); send_tree(s, (ct_data *)s->dyn_ltree, lcodes - 1); /* literal tree */ Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); send_tree(s, (ct_data *)s->dyn_dtree, dcodes - 1); /* distance tree */ Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); } /* =========================================================================== * Send a stored block */ void ZLIB_INTERNAL _tr_stored_block(deflate_state *s, charf *buf, ulg stored_len, int last) { send_bits(s, (STORED_BLOCK<<1) + last, 3); /* send block type */ bi_windup(s); /* align on byte boundary */ put_short(s, (ush)stored_len); put_short(s, (ush)~stored_len); if (stored_len) zmemcpy(s->pending_buf + s->pending, (Bytef *)buf, stored_len); s->pending += stored_len; #ifdef ZLIB_DEBUG s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; s->compressed_len += (stored_len + 4) << 3; s->bits_sent += 2*16; s->bits_sent += stored_len << 3; #endif } /* =========================================================================== * Flush the bits in the bit buffer to pending output (leaves at most 7 bits) */ void ZLIB_INTERNAL _tr_flush_bits(deflate_state *s) { bi_flush(s); } /* =========================================================================== * Send one empty static block to give enough lookahead for inflate. * This takes 10 bits, of which 7 may remain in the bit buffer. */ void ZLIB_INTERNAL _tr_align(deflate_state *s) { send_bits(s, STATIC_TREES<<1, 3); send_code(s, END_BLOCK, static_ltree); #ifdef ZLIB_DEBUG s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ #endif bi_flush(s); } /* =========================================================================== * Send the block data compressed using the given Huffman trees */ local void compress_block(deflate_state *s, const ct_data *ltree, const ct_data *dtree) { unsigned dist; /* distance of matched string */ int lc; /* match length or unmatched char (if dist == 0) */ unsigned sx = 0; /* running index in symbol buffers */ unsigned code; /* the code to send */ int extra; /* number of extra bits to send */ if (s->sym_next != 0) do { #ifdef LIT_MEM dist = s->d_buf[sx]; lc = s->l_buf[sx++]; #else dist = s->sym_buf[sx++] & 0xff; dist += (unsigned)(s->sym_buf[sx++] & 0xff) << 8; lc = s->sym_buf[sx++]; #endif if (dist == 0) { send_code(s, lc, ltree); /* send a literal byte */ Tracecv(isgraph(lc), (stderr," '%c' ", lc)); } else { /* Here, lc is the match length - MIN_MATCH */ code = _length_code[lc]; send_code(s, code + LITERALS + 1, ltree); /* send length code */ extra = extra_lbits[code]; if (extra != 0) { lc -= base_length[code]; send_bits(s, lc, extra); /* send the extra length bits */ } dist--; /* dist is now the match distance - 1 */ code = d_code(dist); Assert (code < D_CODES, "bad d_code"); send_code(s, code, dtree); /* send the distance code */ extra = extra_dbits[code]; if (extra != 0) { dist -= (unsigned)base_dist[code]; send_bits(s, dist, extra); /* send the extra distance bits */ } } /* literal or match pair ? */ /* Check for no overlay of pending_buf on needed symbols */ #ifdef LIT_MEM Assert(s->pending < 2 * (s->lit_bufsize + sx), "pendingBuf overflow"); #else Assert(s->pending < s->lit_bufsize + sx, "pendingBuf overflow"); #endif } while (sx < s->sym_next); send_code(s, END_BLOCK, ltree); } /* =========================================================================== * Check if the data type is TEXT or BINARY, using the following algorithm: * - TEXT if the two conditions below are satisfied: * a) There are no non-portable control characters belonging to the * "block list" (0..6, 14..25, 28..31). * b) There is at least one printable character belonging to the * "allow list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). * - BINARY otherwise. * - The following partially-portable control characters form a * "gray list" that is ignored in this detection algorithm: * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). * IN assertion: the fields Freq of dyn_ltree are set. */ local int detect_data_type(deflate_state *s) { /* block_mask is the bit mask of block-listed bytes * set bits 0..6, 14..25, and 28..31 * 0xf3ffc07f = binary 11110011111111111100000001111111 */ unsigned long block_mask = 0xf3ffc07fUL; int n; /* Check for non-textual ("block-listed") bytes. */ for (n = 0; n <= 31; n++, block_mask >>= 1) if ((block_mask & 1) && (s->dyn_ltree[n].Freq != 0)) return Z_BINARY; /* Check for textual ("allow-listed") bytes. */ if (s->dyn_ltree[9].Freq != 0 || s->dyn_ltree[10].Freq != 0 || s->dyn_ltree[13].Freq != 0) return Z_TEXT; for (n = 32; n < LITERALS; n++) if (s->dyn_ltree[n].Freq != 0) return Z_TEXT; /* There are no "block-listed" or "allow-listed" bytes: * this stream either is empty or has tolerated ("gray-listed") bytes only. */ return Z_BINARY; } /* =========================================================================== * Determine the best encoding for the current block: dynamic trees, static * trees or store, and write out the encoded block. */ void ZLIB_INTERNAL _tr_flush_block(deflate_state *s, charf *buf, ulg stored_len, int last) { ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ int max_blindex = 0; /* index of last bit length code of non zero freq */ /* Build the Huffman trees unless a stored block is forced */ if (s->level > 0) { /* Check if the file is binary or text */ if (s->strm->data_type == Z_UNKNOWN) s->strm->data_type = detect_data_type(s); /* Construct the literal and distance trees */ build_tree(s, (tree_desc *)(&(s->l_desc))); Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, s->static_len)); build_tree(s, (tree_desc *)(&(s->d_desc))); Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, s->static_len)); /* At this point, opt_len and static_len are the total bit lengths of * the compressed block data, excluding the tree representations. */ /* Build the bit length tree for the above two trees, and get the index * in bl_order of the last bit length code to send. */ max_blindex = build_bl_tree(s); /* Determine the best encoding. Compute the block lengths in bytes. */ opt_lenb = (s->opt_len + 3 + 7) >> 3; static_lenb = (s->static_len + 3 + 7) >> 3; Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, s->sym_next / 3)); #ifndef FORCE_STATIC if (static_lenb <= opt_lenb || s->strategy == Z_FIXED) #endif opt_lenb = static_lenb; } else { Assert(buf != (char*)0, "lost buf"); opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ } #ifdef FORCE_STORED if (buf != (char*)0) { /* force stored block */ #else if (stored_len + 4 <= opt_lenb && buf != (char*)0) { /* 4: two words for the lengths */ #endif /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. * Otherwise we can't have processed more than WSIZE input bytes since * the last block flush, because compression would have been * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to * transform a block into a stored block. */ _tr_stored_block(s, buf, stored_len, last); } else if (static_lenb == opt_lenb) { send_bits(s, (STATIC_TREES<<1) + last, 3); compress_block(s, (const ct_data *)static_ltree, (const ct_data *)static_dtree); #ifdef ZLIB_DEBUG s->compressed_len += 3 + s->static_len; #endif } else { send_bits(s, (DYN_TREES<<1) + last, 3); send_all_trees(s, s->l_desc.max_code + 1, s->d_desc.max_code + 1, max_blindex + 1); compress_block(s, (const ct_data *)s->dyn_ltree, (const ct_data *)s->dyn_dtree); #ifdef ZLIB_DEBUG s->compressed_len += 3 + s->opt_len; #endif } Assert (s->compressed_len == s->bits_sent, "bad compressed size"); /* The above check is made mod 2^32, for files larger than 512 MB * and uLong implemented on 32 bits. */ init_block(s); if (last) { bi_windup(s); #ifdef ZLIB_DEBUG s->compressed_len += 7; /* align on byte boundary */ #endif } Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len >> 3, s->compressed_len - 7*last)); } /* =========================================================================== * Save the match info and tally the frequency counts. Return true if * the current block must be flushed. */ int ZLIB_INTERNAL _tr_tally(deflate_state *s, unsigned dist, unsigned lc) { #ifdef LIT_MEM s->d_buf[s->sym_next] = (ush)dist; s->l_buf[s->sym_next++] = (uch)lc; #else s->sym_buf[s->sym_next++] = (uch)dist; s->sym_buf[s->sym_next++] = (uch)(dist >> 8); s->sym_buf[s->sym_next++] = (uch)lc; #endif if (dist == 0) { /* lc is the unmatched char */ s->dyn_ltree[lc].Freq++; } else { s->matches++; /* Here, lc is the match length - MIN_MATCH */ dist--; /* dist = match distance - 1 */ Assert((ush)dist < (ush)MAX_DIST(s) && (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); s->dyn_ltree[_length_code[lc] + LITERALS + 1].Freq++; s->dyn_dtree[d_code(dist)].Freq++; } return (s->sym_next == s->sym_end); } ================================================ FILE: pypcode/zlib/trees.h ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* header created automatically with -DGEN_TREES_H */ local const ct_data static_ltree[L_CODES+2] = { {{ 12},{ 8}}, {{140},{ 8}}, {{ 76},{ 8}}, {{204},{ 8}}, {{ 44},{ 8}}, {{172},{ 8}}, {{108},{ 8}}, {{236},{ 8}}, {{ 28},{ 8}}, {{156},{ 8}}, {{ 92},{ 8}}, {{220},{ 8}}, {{ 60},{ 8}}, {{188},{ 8}}, {{124},{ 8}}, {{252},{ 8}}, {{ 2},{ 8}}, {{130},{ 8}}, {{ 66},{ 8}}, {{194},{ 8}}, {{ 34},{ 8}}, {{162},{ 8}}, {{ 98},{ 8}}, {{226},{ 8}}, {{ 18},{ 8}}, {{146},{ 8}}, {{ 82},{ 8}}, {{210},{ 8}}, {{ 50},{ 8}}, {{178},{ 8}}, {{114},{ 8}}, {{242},{ 8}}, {{ 10},{ 8}}, {{138},{ 8}}, {{ 74},{ 8}}, {{202},{ 8}}, {{ 42},{ 8}}, {{170},{ 8}}, {{106},{ 8}}, {{234},{ 8}}, {{ 26},{ 8}}, {{154},{ 8}}, {{ 90},{ 8}}, {{218},{ 8}}, {{ 58},{ 8}}, {{186},{ 8}}, {{122},{ 8}}, {{250},{ 8}}, {{ 6},{ 8}}, {{134},{ 8}}, {{ 70},{ 8}}, {{198},{ 8}}, {{ 38},{ 8}}, {{166},{ 8}}, {{102},{ 8}}, {{230},{ 8}}, {{ 22},{ 8}}, {{150},{ 8}}, {{ 86},{ 8}}, {{214},{ 8}}, {{ 54},{ 8}}, {{182},{ 8}}, {{118},{ 8}}, {{246},{ 8}}, {{ 14},{ 8}}, {{142},{ 8}}, {{ 78},{ 8}}, {{206},{ 8}}, {{ 46},{ 8}}, {{174},{ 8}}, {{110},{ 8}}, {{238},{ 8}}, {{ 30},{ 8}}, {{158},{ 8}}, {{ 94},{ 8}}, {{222},{ 8}}, {{ 62},{ 8}}, {{190},{ 8}}, {{126},{ 8}}, {{254},{ 8}}, {{ 1},{ 8}}, {{129},{ 8}}, {{ 65},{ 8}}, {{193},{ 8}}, {{ 33},{ 8}}, {{161},{ 8}}, {{ 97},{ 8}}, {{225},{ 8}}, {{ 17},{ 8}}, {{145},{ 8}}, {{ 81},{ 8}}, {{209},{ 8}}, {{ 49},{ 8}}, {{177},{ 8}}, {{113},{ 8}}, {{241},{ 8}}, {{ 9},{ 8}}, {{137},{ 8}}, {{ 73},{ 8}}, {{201},{ 8}}, {{ 41},{ 8}}, {{169},{ 8}}, {{105},{ 8}}, {{233},{ 8}}, {{ 25},{ 8}}, {{153},{ 8}}, {{ 89},{ 8}}, {{217},{ 8}}, {{ 57},{ 8}}, {{185},{ 8}}, {{121},{ 8}}, {{249},{ 8}}, {{ 5},{ 8}}, {{133},{ 8}}, {{ 69},{ 8}}, {{197},{ 8}}, {{ 37},{ 8}}, {{165},{ 8}}, {{101},{ 8}}, {{229},{ 8}}, {{ 21},{ 8}}, {{149},{ 8}}, {{ 85},{ 8}}, {{213},{ 8}}, {{ 53},{ 8}}, {{181},{ 8}}, {{117},{ 8}}, {{245},{ 8}}, {{ 13},{ 8}}, {{141},{ 8}}, {{ 77},{ 8}}, {{205},{ 8}}, {{ 45},{ 8}}, {{173},{ 8}}, {{109},{ 8}}, {{237},{ 8}}, {{ 29},{ 8}}, {{157},{ 8}}, {{ 93},{ 8}}, {{221},{ 8}}, {{ 61},{ 8}}, {{189},{ 8}}, {{125},{ 8}}, {{253},{ 8}}, {{ 19},{ 9}}, {{275},{ 9}}, {{147},{ 9}}, {{403},{ 9}}, {{ 83},{ 9}}, {{339},{ 9}}, {{211},{ 9}}, {{467},{ 9}}, {{ 51},{ 9}}, {{307},{ 9}}, {{179},{ 9}}, {{435},{ 9}}, {{115},{ 9}}, {{371},{ 9}}, {{243},{ 9}}, {{499},{ 9}}, {{ 11},{ 9}}, {{267},{ 9}}, {{139},{ 9}}, {{395},{ 9}}, {{ 75},{ 9}}, {{331},{ 9}}, {{203},{ 9}}, {{459},{ 9}}, {{ 43},{ 9}}, {{299},{ 9}}, {{171},{ 9}}, {{427},{ 9}}, {{107},{ 9}}, {{363},{ 9}}, {{235},{ 9}}, {{491},{ 9}}, {{ 27},{ 9}}, {{283},{ 9}}, {{155},{ 9}}, {{411},{ 9}}, {{ 91},{ 9}}, {{347},{ 9}}, {{219},{ 9}}, {{475},{ 9}}, {{ 59},{ 9}}, {{315},{ 9}}, {{187},{ 9}}, {{443},{ 9}}, {{123},{ 9}}, {{379},{ 9}}, {{251},{ 9}}, {{507},{ 9}}, {{ 7},{ 9}}, {{263},{ 9}}, {{135},{ 9}}, {{391},{ 9}}, {{ 71},{ 9}}, {{327},{ 9}}, {{199},{ 9}}, {{455},{ 9}}, {{ 39},{ 9}}, {{295},{ 9}}, {{167},{ 9}}, {{423},{ 9}}, {{103},{ 9}}, {{359},{ 9}}, {{231},{ 9}}, {{487},{ 9}}, {{ 23},{ 9}}, {{279},{ 9}}, {{151},{ 9}}, {{407},{ 9}}, {{ 87},{ 9}}, {{343},{ 9}}, {{215},{ 9}}, {{471},{ 9}}, {{ 55},{ 9}}, {{311},{ 9}}, {{183},{ 9}}, {{439},{ 9}}, {{119},{ 9}}, {{375},{ 9}}, {{247},{ 9}}, {{503},{ 9}}, {{ 15},{ 9}}, {{271},{ 9}}, {{143},{ 9}}, {{399},{ 9}}, {{ 79},{ 9}}, {{335},{ 9}}, {{207},{ 9}}, {{463},{ 9}}, {{ 47},{ 9}}, {{303},{ 9}}, {{175},{ 9}}, {{431},{ 9}}, {{111},{ 9}}, {{367},{ 9}}, {{239},{ 9}}, {{495},{ 9}}, {{ 31},{ 9}}, {{287},{ 9}}, {{159},{ 9}}, {{415},{ 9}}, {{ 95},{ 9}}, {{351},{ 9}}, {{223},{ 9}}, {{479},{ 9}}, {{ 63},{ 9}}, {{319},{ 9}}, {{191},{ 9}}, {{447},{ 9}}, {{127},{ 9}}, {{383},{ 9}}, {{255},{ 9}}, {{511},{ 9}}, {{ 0},{ 7}}, {{ 64},{ 7}}, {{ 32},{ 7}}, {{ 96},{ 7}}, {{ 16},{ 7}}, {{ 80},{ 7}}, {{ 48},{ 7}}, {{112},{ 7}}, {{ 8},{ 7}}, {{ 72},{ 7}}, {{ 40},{ 7}}, {{104},{ 7}}, {{ 24},{ 7}}, {{ 88},{ 7}}, {{ 56},{ 7}}, {{120},{ 7}}, {{ 4},{ 7}}, {{ 68},{ 7}}, {{ 36},{ 7}}, {{100},{ 7}}, {{ 20},{ 7}}, {{ 84},{ 7}}, {{ 52},{ 7}}, {{116},{ 7}}, {{ 3},{ 8}}, {{131},{ 8}}, {{ 67},{ 8}}, {{195},{ 8}}, {{ 35},{ 8}}, {{163},{ 8}}, {{ 99},{ 8}}, {{227},{ 8}} }; local const ct_data static_dtree[D_CODES] = { {{ 0},{ 5}}, {{16},{ 5}}, {{ 8},{ 5}}, {{24},{ 5}}, {{ 4},{ 5}}, {{20},{ 5}}, {{12},{ 5}}, {{28},{ 5}}, {{ 2},{ 5}}, {{18},{ 5}}, {{10},{ 5}}, {{26},{ 5}}, {{ 6},{ 5}}, {{22},{ 5}}, {{14},{ 5}}, {{30},{ 5}}, {{ 1},{ 5}}, {{17},{ 5}}, {{ 9},{ 5}}, {{25},{ 5}}, {{ 5},{ 5}}, {{21},{ 5}}, {{13},{ 5}}, {{29},{ 5}}, {{ 3},{ 5}}, {{19},{ 5}}, {{11},{ 5}}, {{27},{ 5}}, {{ 7},{ 5}}, {{23},{ 5}} }; const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 }; const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= { 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28 }; local const int base_length[LENGTH_CODES] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0 }; local const int base_dist[D_CODES] = { 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576 }; ================================================ FILE: pypcode/zlib/zconf.h ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* zconf.h -- configuration of the zlib compression library * Copyright (C) 1995-2024 Jean-loup Gailly, Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ #ifndef ZCONF_H #define ZCONF_H /* * If you *really* need a unique prefix for all types and library functions, * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. * Even better than compiling with -DZ_PREFIX would be to use configure to set * this permanently in zconf.h using "./configure --zprefix". */ #ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ # define Z_PREFIX_SET /* all linked symbols and init macros */ # define _dist_code z__dist_code # define _length_code z__length_code # define _tr_align z__tr_align # define _tr_flush_bits z__tr_flush_bits # define _tr_flush_block z__tr_flush_block # define _tr_init z__tr_init # define _tr_stored_block z__tr_stored_block # define _tr_tally z__tr_tally # define adler32 z_adler32 # define adler32_combine z_adler32_combine # define adler32_combine64 z_adler32_combine64 # define adler32_z z_adler32_z # ifndef Z_SOLO # define compress z_compress # define compress2 z_compress2 # define compressBound z_compressBound # endif # define crc32 z_crc32 # define crc32_combine z_crc32_combine # define crc32_combine64 z_crc32_combine64 # define crc32_combine_gen z_crc32_combine_gen # define crc32_combine_gen64 z_crc32_combine_gen64 # define crc32_combine_op z_crc32_combine_op # define crc32_z z_crc32_z # define deflate z_deflate # define deflateBound z_deflateBound # define deflateCopy z_deflateCopy # define deflateEnd z_deflateEnd # define deflateGetDictionary z_deflateGetDictionary # define deflateInit z_deflateInit # define deflateInit2 z_deflateInit2 # define deflateInit2_ z_deflateInit2_ # define deflateInit_ z_deflateInit_ # define deflateParams z_deflateParams # define deflatePending z_deflatePending # define deflatePrime z_deflatePrime # define deflateReset z_deflateReset # define deflateResetKeep z_deflateResetKeep # define deflateSetDictionary z_deflateSetDictionary # define deflateSetHeader z_deflateSetHeader # define deflateTune z_deflateTune # define deflate_copyright z_deflate_copyright # define get_crc_table z_get_crc_table # ifndef Z_SOLO # define gz_error z_gz_error # define gz_intmax z_gz_intmax # define gz_strwinerror z_gz_strwinerror # define gzbuffer z_gzbuffer # define gzclearerr z_gzclearerr # define gzclose z_gzclose # define gzclose_r z_gzclose_r # define gzclose_w z_gzclose_w # define gzdirect z_gzdirect # define gzdopen z_gzdopen # define gzeof z_gzeof # define gzerror z_gzerror # define gzflush z_gzflush # define gzfread z_gzfread # define gzfwrite z_gzfwrite # define gzgetc z_gzgetc # define gzgetc_ z_gzgetc_ # define gzgets z_gzgets # define gzoffset z_gzoffset # define gzoffset64 z_gzoffset64 # define gzopen z_gzopen # define gzopen64 z_gzopen64 # ifdef _WIN32 # define gzopen_w z_gzopen_w # endif # define gzprintf z_gzprintf # define gzputc z_gzputc # define gzputs z_gzputs # define gzread z_gzread # define gzrewind z_gzrewind # define gzseek z_gzseek # define gzseek64 z_gzseek64 # define gzsetparams z_gzsetparams # define gztell z_gztell # define gztell64 z_gztell64 # define gzungetc z_gzungetc # define gzvprintf z_gzvprintf # define gzwrite z_gzwrite # endif # define inflate z_inflate # define inflateBack z_inflateBack # define inflateBackEnd z_inflateBackEnd # define inflateBackInit z_inflateBackInit # define inflateBackInit_ z_inflateBackInit_ # define inflateCodesUsed z_inflateCodesUsed # define inflateCopy z_inflateCopy # define inflateEnd z_inflateEnd # define inflateGetDictionary z_inflateGetDictionary # define inflateGetHeader z_inflateGetHeader # define inflateInit z_inflateInit # define inflateInit2 z_inflateInit2 # define inflateInit2_ z_inflateInit2_ # define inflateInit_ z_inflateInit_ # define inflateMark z_inflateMark # define inflatePrime z_inflatePrime # define inflateReset z_inflateReset # define inflateReset2 z_inflateReset2 # define inflateResetKeep z_inflateResetKeep # define inflateSetDictionary z_inflateSetDictionary # define inflateSync z_inflateSync # define inflateSyncPoint z_inflateSyncPoint # define inflateUndermine z_inflateUndermine # define inflateValidate z_inflateValidate # define inflate_copyright z_inflate_copyright # define inflate_fast z_inflate_fast # define inflate_table z_inflate_table # ifndef Z_SOLO # define uncompress z_uncompress # define uncompress2 z_uncompress2 # endif # define zError z_zError # ifndef Z_SOLO # define zcalloc z_zcalloc # define zcfree z_zcfree # endif # define zlibCompileFlags z_zlibCompileFlags # define zlibVersion z_zlibVersion /* all zlib typedefs in zlib.h and zconf.h */ # define Byte z_Byte # define Bytef z_Bytef # define alloc_func z_alloc_func # define charf z_charf # define free_func z_free_func # ifndef Z_SOLO # define gzFile z_gzFile # endif # define gz_header z_gz_header # define gz_headerp z_gz_headerp # define in_func z_in_func # define intf z_intf # define out_func z_out_func # define uInt z_uInt # define uIntf z_uIntf # define uLong z_uLong # define uLongf z_uLongf # define voidp z_voidp # define voidpc z_voidpc # define voidpf z_voidpf /* all zlib structs in zlib.h and zconf.h */ # define gz_header_s z_gz_header_s # define internal_state z_internal_state #endif #if defined(__MSDOS__) && !defined(MSDOS) # define MSDOS #endif #if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) # define OS2 #endif #if defined(_WINDOWS) && !defined(WINDOWS) # define WINDOWS #endif #if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) # ifndef WIN32 # define WIN32 # endif #endif #if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) # if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) # ifndef SYS16BIT # define SYS16BIT # endif # endif #endif /* * Compile with -DMAXSEG_64K if the alloc function cannot allocate more * than 64k bytes at a time (needed on systems with 16-bit int). */ #ifdef SYS16BIT # define MAXSEG_64K #endif #ifdef MSDOS # define UNALIGNED_OK #endif #ifdef __STDC_VERSION__ # ifndef STDC # define STDC # endif # if __STDC_VERSION__ >= 199901L # ifndef STDC99 # define STDC99 # endif # endif #endif #if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) # define STDC #endif #if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) # define STDC #endif #if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) # define STDC #endif #if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) # define STDC #endif #if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ # define STDC #endif #ifndef STDC # ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ # define const /* note: need a more gentle solution here */ # endif #endif #if defined(ZLIB_CONST) && !defined(z_const) # define z_const const #else # define z_const #endif #ifdef Z_SOLO # ifdef _WIN64 typedef unsigned long long z_size_t; # else typedef unsigned long z_size_t; # endif #else # define z_longlong long long # if defined(NO_SIZE_T) typedef unsigned NO_SIZE_T z_size_t; # elif defined(STDC) # include typedef size_t z_size_t; # else typedef unsigned long z_size_t; # endif # undef z_longlong #endif /* Maximum value for memLevel in deflateInit2 */ #ifndef MAX_MEM_LEVEL # ifdef MAXSEG_64K # define MAX_MEM_LEVEL 8 # else # define MAX_MEM_LEVEL 9 # endif #endif /* Maximum value for windowBits in deflateInit2 and inflateInit2. * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files * created by gzip. (Files created by minigzip can still be extracted by * gzip.) */ #ifndef MAX_WBITS # define MAX_WBITS 15 /* 32K LZ77 window */ #endif /* The memory requirements for deflate are (in bytes): (1 << (windowBits+2)) + (1 << (memLevel+9)) that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) plus a few kilobytes for small objects. For example, if you want to reduce the default memory requirements from 256K to 128K, compile with make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" Of course this will generally degrade compression (there's no free lunch). The memory requirements for inflate are (in bytes) 1 << windowBits that is, 32K for windowBits=15 (default value) plus about 7 kilobytes for small objects. */ /* Type declarations */ #ifndef OF /* function prototypes */ # ifdef STDC # define OF(args) args # else # define OF(args) () # endif #endif /* The following definitions for FAR are needed only for MSDOS mixed * model programming (small or medium model with some far allocations). * This was tested only with MSC; for other MSDOS compilers you may have * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, * just define FAR to be empty. */ #ifdef SYS16BIT # if defined(M_I86SM) || defined(M_I86MM) /* MSC small or medium model */ # define SMALL_MEDIUM # ifdef _MSC_VER # define FAR _far # else # define FAR far # endif # endif # if (defined(__SMALL__) || defined(__MEDIUM__)) /* Turbo C small or medium model */ # define SMALL_MEDIUM # ifdef __BORLANDC__ # define FAR _far # else # define FAR far # endif # endif #endif #if defined(WINDOWS) || defined(WIN32) /* If building or using zlib as a DLL, define ZLIB_DLL. * This is not mandatory, but it offers a little performance increase. */ # ifdef ZLIB_DLL # if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) # ifdef ZLIB_INTERNAL # define ZEXTERN extern __declspec(dllexport) # else # define ZEXTERN extern __declspec(dllimport) # endif # endif # endif /* ZLIB_DLL */ /* If building or using zlib with the WINAPI/WINAPIV calling convention, * define ZLIB_WINAPI. * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. */ # ifdef ZLIB_WINAPI # ifdef FAR # undef FAR # endif # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN # endif # include /* No need for _export, use ZLIB.DEF instead. */ /* For complete Windows compatibility, use WINAPI, not __stdcall. */ # define ZEXPORT WINAPI # ifdef WIN32 # define ZEXPORTVA WINAPIV # else # define ZEXPORTVA FAR CDECL # endif # endif #endif #if defined (__BEOS__) # ifdef ZLIB_DLL # ifdef ZLIB_INTERNAL # define ZEXPORT __declspec(dllexport) # define ZEXPORTVA __declspec(dllexport) # else # define ZEXPORT __declspec(dllimport) # define ZEXPORTVA __declspec(dllimport) # endif # endif #endif #ifndef ZEXTERN # define ZEXTERN extern #endif #ifndef ZEXPORT # define ZEXPORT #endif #ifndef ZEXPORTVA # define ZEXPORTVA #endif #ifndef FAR # define FAR #endif #if !defined(__MACTYPES__) typedef unsigned char Byte; /* 8 bits */ #endif typedef unsigned int uInt; /* 16 bits or more */ typedef unsigned long uLong; /* 32 bits or more */ #ifdef SMALL_MEDIUM /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ # define Bytef Byte FAR #else typedef Byte FAR Bytef; #endif typedef char FAR charf; typedef int FAR intf; typedef uInt FAR uIntf; typedef uLong FAR uLongf; #ifdef STDC typedef void const *voidpc; typedef void FAR *voidpf; typedef void *voidp; #else typedef Byte const *voidpc; typedef Byte FAR *voidpf; typedef Byte *voidp; #endif #if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) # include # if (UINT_MAX == 0xffffffffUL) # define Z_U4 unsigned # elif (ULONG_MAX == 0xffffffffUL) # define Z_U4 unsigned long # elif (USHRT_MAX == 0xffffffffUL) # define Z_U4 unsigned short # endif #endif #ifdef Z_U4 typedef Z_U4 z_crc_t; #else typedef unsigned long z_crc_t; #endif #ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */ # define Z_HAVE_UNISTD_H #endif #ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */ # define Z_HAVE_STDARG_H #endif #ifdef STDC # ifndef Z_SOLO # include /* for off_t */ # endif #endif #if defined(STDC) || defined(Z_HAVE_STDARG_H) # ifndef Z_SOLO # include /* for va_list */ # endif #endif #ifdef _WIN32 # ifndef Z_SOLO # include /* for wchar_t */ # endif #endif /* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even * though the former does not conform to the LFS document), but considering * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as * equivalently requesting no 64-bit operations */ #if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 # undef _LARGEFILE64_SOURCE #endif #ifndef Z_HAVE_UNISTD_H # ifdef __WATCOMC__ # define Z_HAVE_UNISTD_H # endif #endif #ifndef Z_HAVE_UNISTD_H # if defined(_LARGEFILE64_SOURCE) && !defined(_WIN32) # define Z_HAVE_UNISTD_H # endif #endif #ifndef Z_SOLO # if defined(Z_HAVE_UNISTD_H) # include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ # ifdef VMS # include /* for off_t */ # endif # ifndef z_off_t # define z_off_t off_t # endif # endif #endif #if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 # define Z_LFS64 #endif #if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) # define Z_LARGE64 #endif #if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) # define Z_WANT64 #endif #if !defined(SEEK_SET) && !defined(Z_SOLO) # define SEEK_SET 0 /* Seek from beginning of file. */ # define SEEK_CUR 1 /* Seek from current position. */ # define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ #endif #ifndef z_off_t # define z_off_t long #endif #if !defined(_WIN32) && defined(Z_LARGE64) # define z_off64_t off64_t #else # if defined(_WIN32) && !defined(__GNUC__) # define z_off64_t __int64 # else # define z_off64_t z_off_t # endif #endif /* MVS linker does not support external names larger than 8 bytes */ #if defined(__MVS__) #pragma map(deflateInit_,"DEIN") #pragma map(deflateInit2_,"DEIN2") #pragma map(deflateEnd,"DEEND") #pragma map(deflateBound,"DEBND") #pragma map(inflateInit_,"ININ") #pragma map(inflateInit2_,"ININ2") #pragma map(inflateEnd,"INEND") #pragma map(inflateSync,"INSY") #pragma map(inflateSetDictionary,"INSEDI") #pragma map(compressBound,"CMBND") #pragma map(inflate_table,"INTABL") #pragma map(inflate_fast,"INFA") #pragma map(inflate_copyright,"INCOPY") #endif #endif /* ZCONF_H */ ================================================ FILE: pypcode/zlib/zlib.h ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* zlib.h -- interface of the 'zlib' general purpose compression library version 1.3.1, January 22nd, 2024 Copyright (C) 1995-2024 Jean-loup Gailly and Mark Adler This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. Jean-loup Gailly Mark Adler jloup@gzip.org madler@alumni.caltech.edu The data format used by the zlib library is described by RFCs (Request for Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950 (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format). */ #ifndef ZLIB_H #define ZLIB_H #include "zconf.h" #ifdef __cplusplus extern "C" { #endif #define ZLIB_VERSION "1.3.1" #define ZLIB_VERNUM 0x1310 #define ZLIB_VER_MAJOR 1 #define ZLIB_VER_MINOR 3 #define ZLIB_VER_REVISION 1 #define ZLIB_VER_SUBREVISION 0 /* The 'zlib' compression library provides in-memory compression and decompression functions, including integrity checks of the uncompressed data. This version of the library supports only one compression method (deflation) but other algorithms will be added later and will have the same stream interface. Compression can be done in a single step if the buffers are large enough, or can be done by repeated calls of the compression function. In the latter case, the application must provide more input and/or consume the output (providing more output space) before each call. The compressed data format used by default by the in-memory functions is the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped around a deflate stream, which is itself documented in RFC 1951. The library also supports reading and writing files in gzip (.gz) format with an interface similar to that of stdio using the functions that start with "gz". The gzip format is different from the zlib format. gzip is a gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. This library can optionally read and write gzip and raw deflate streams in memory as well. The zlib format was designed to be compact and fast for use in memory and on communications channels. The gzip format was designed for single- file compression on file systems, has a larger header than zlib to maintain directory information, and uses a different, slower check method than zlib. The library does not install any signal handler. The decoder checks the consistency of the compressed data, so the library should never crash even in the case of corrupted input. */ typedef voidpf (*alloc_func)(voidpf opaque, uInt items, uInt size); typedef void (*free_func)(voidpf opaque, voidpf address); struct internal_state; typedef struct z_stream_s { z_const Bytef *next_in; /* next input byte */ uInt avail_in; /* number of bytes available at next_in */ uLong total_in; /* total number of input bytes read so far */ Bytef *next_out; /* next output byte will go here */ uInt avail_out; /* remaining free space at next_out */ uLong total_out; /* total number of bytes output so far */ z_const char *msg; /* last error message, NULL if no error */ struct internal_state FAR *state; /* not visible by applications */ alloc_func zalloc; /* used to allocate the internal state */ free_func zfree; /* used to free the internal state */ voidpf opaque; /* private data object passed to zalloc and zfree */ int data_type; /* best guess about the data type: binary or text for deflate, or the decoding state for inflate */ uLong adler; /* Adler-32 or CRC-32 value of the uncompressed data */ uLong reserved; /* reserved for future use */ } z_stream; typedef z_stream FAR *z_streamp; /* gzip header information passed to and from zlib routines. See RFC 1952 for more details on the meanings of these fields. */ typedef struct gz_header_s { int text; /* true if compressed data believed to be text */ uLong time; /* modification time */ int xflags; /* extra flags (not used when writing a gzip file) */ int os; /* operating system */ Bytef *extra; /* pointer to extra field or Z_NULL if none */ uInt extra_len; /* extra field length (valid if extra != Z_NULL) */ uInt extra_max; /* space at extra (only when reading header) */ Bytef *name; /* pointer to zero-terminated file name or Z_NULL */ uInt name_max; /* space at name (only when reading header) */ Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */ uInt comm_max; /* space at comment (only when reading header) */ int hcrc; /* true if there was or will be a header crc */ int done; /* true when done reading gzip header (not used when writing a gzip file) */ } gz_header; typedef gz_header FAR *gz_headerp; /* The application must update next_in and avail_in when avail_in has dropped to zero. It must update next_out and avail_out when avail_out has dropped to zero. The application must initialize zalloc, zfree and opaque before calling the init function. All other fields are set by the compression library and must not be updated by the application. The opaque value provided by the application will be passed as the first parameter for calls of zalloc and zfree. This can be useful for custom memory management. The compression library attaches no meaning to the opaque value. zalloc must return Z_NULL if there is not enough memory for the object. If zlib is used in a multi-threaded application, zalloc and zfree must be thread safe. In that case, zlib is thread-safe. When zalloc and zfree are Z_NULL on entry to the initialization function, they are set to internal routines that use the standard library functions malloc() and free(). On 16-bit systems, the functions zalloc and zfree must be able to allocate exactly 65536 bytes, but will not be required to allocate more than this if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers returned by zalloc for objects of exactly 65536 bytes *must* have their offset normalized to zero. The default allocation function provided by this library ensures this (see zutil.c). To reduce memory requirements and avoid any allocation of 64K objects, at the expense of compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h). The fields total_in and total_out can be used for statistics or progress reports. After compression, total_in holds the total size of the uncompressed data and may be saved for use by the decompressor (particularly if the decompressor wants to decompress everything in a single step). */ /* constants */ #define Z_NO_FLUSH 0 #define Z_PARTIAL_FLUSH 1 #define Z_SYNC_FLUSH 2 #define Z_FULL_FLUSH 3 #define Z_FINISH 4 #define Z_BLOCK 5 #define Z_TREES 6 /* Allowed flush values; see deflate() and inflate() below for details */ #define Z_OK 0 #define Z_STREAM_END 1 #define Z_NEED_DICT 2 #define Z_ERRNO (-1) #define Z_STREAM_ERROR (-2) #define Z_DATA_ERROR (-3) #define Z_MEM_ERROR (-4) #define Z_BUF_ERROR (-5) #define Z_VERSION_ERROR (-6) /* Return codes for the compression/decompression functions. Negative values * are errors, positive values are used for special but normal events. */ #define Z_NO_COMPRESSION 0 #define Z_BEST_SPEED 1 #define Z_BEST_COMPRESSION 9 #define Z_DEFAULT_COMPRESSION (-1) /* compression levels */ #define Z_FILTERED 1 #define Z_HUFFMAN_ONLY 2 #define Z_RLE 3 #define Z_FIXED 4 #define Z_DEFAULT_STRATEGY 0 /* compression strategy; see deflateInit2() below for details */ #define Z_BINARY 0 #define Z_TEXT 1 #define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */ #define Z_UNKNOWN 2 /* Possible values of the data_type field for deflate() */ #define Z_DEFLATED 8 /* The deflate compression method (the only one supported in this version) */ #define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ #define zlib_version zlibVersion() /* for compatibility with versions < 1.0.2 */ /* basic functions */ ZEXTERN const char * ZEXPORT zlibVersion(void); /* The application can compare zlibVersion and ZLIB_VERSION for consistency. If the first character differs, the library code actually used is not compatible with the zlib.h header file used by the application. This check is automatically made by deflateInit and inflateInit. */ /* ZEXTERN int ZEXPORT deflateInit(z_streamp strm, int level); Initializes the internal stream state for compression. The fields zalloc, zfree and opaque must be initialized before by the caller. If zalloc and zfree are set to Z_NULL, deflateInit updates them to use default allocation functions. total_in, total_out, adler, and msg are initialized. The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: 1 gives best speed, 9 gives best compression, 0 gives no compression at all (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION requests a default compromise between speed and compression (currently equivalent to level 6). deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if level is not a valid compression level, or Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible with the version assumed by the caller (ZLIB_VERSION). msg is set to null if there is no error message. deflateInit does not perform any compression: this will be done by deflate(). */ ZEXTERN int ZEXPORT deflate(z_streamp strm, int flush); /* deflate compresses as much data as possible, and stops when the input buffer becomes empty or the output buffer becomes full. It may introduce some output latency (reading input without producing any output) except when forced to flush. The detailed semantics are as follows. deflate performs one or both of the following actions: - Compress more input starting at next_in and update next_in and avail_in accordingly. If not all input can be processed (because there is not enough room in the output buffer), next_in and avail_in are updated and processing will resume at this point for the next call of deflate(). - Generate more output starting at next_out and update next_out and avail_out accordingly. This action is forced if the parameter flush is non zero. Forcing flush frequently degrades the compression ratio, so this parameter should be set only when necessary. Some output may be provided even if flush is zero. Before the call of deflate(), the application should ensure that at least one of the actions is possible, by providing more input and/or consuming more output, and updating avail_in or avail_out accordingly; avail_out should never be zero before the call. The application can consume the compressed output when it wants, for example when the output buffer is full (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK and with zero avail_out, it must be called again after making room in the output buffer because there might be more output pending. See deflatePending(), which can be used if desired to determine whether or not there is more output in that case. Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to decide how much data to accumulate before producing output, in order to maximize compression. If the parameter flush is set to Z_SYNC_FLUSH, all pending output is flushed to the output buffer and the output is aligned on a byte boundary, so that the decompressor can get all input data available so far. (In particular avail_in is zero after the call if enough output space has been provided before the call.) Flushing may degrade compression for some compression algorithms and so it should be used only when necessary. This completes the current deflate block and follows it with an empty stored block that is three bits plus filler bits to the next byte, followed by four bytes (00 00 ff ff). If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the output buffer, but the output is not aligned to a byte boundary. All of the input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. This completes the current deflate block and follows it with an empty fixed codes block that is 10 bits long. This assures that enough bytes are output in order for the decompressor to finish the block before the empty fixed codes block. If flush is set to Z_BLOCK, a deflate block is completed and emitted, as for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to seven bits of the current block are held to be written as the next byte after the next deflate block is completed. In this case, the decompressor may not be provided enough bits at this point in order to complete decompression of the data provided so far to the compressor. It may need to wait for the next block to be emitted. This is for advanced applications that need to control the emission of deflate blocks. If flush is set to Z_FULL_FLUSH, all output is flushed as with Z_SYNC_FLUSH, and the compression state is reset so that decompression can restart from this point if previous compressed data has been damaged or if random access is desired. Using Z_FULL_FLUSH too often can seriously degrade compression. If deflate returns with avail_out == 0, this function must be called again with the same value of the flush parameter and more output space (updated avail_out), until the flush is complete (deflate returns with non-zero avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that avail_out is greater than six when the flush marker begins, in order to avoid repeated flush markers upon calling deflate() again when avail_out == 0. If the parameter flush is set to Z_FINISH, pending input is processed, pending output is flushed and deflate returns with Z_STREAM_END if there was enough output space. If deflate returns with Z_OK or Z_BUF_ERROR, this function must be called again with Z_FINISH and more output space (updated avail_out) but no more input data, until it returns with Z_STREAM_END or an error. After deflate has returned Z_STREAM_END, the only possible operations on the stream are deflateReset or deflateEnd. Z_FINISH can be used in the first deflate call after deflateInit if all the compression is to be done in a single step. In order to complete in one call, avail_out must be at least the value returned by deflateBound (see below). Then deflate is guaranteed to return Z_STREAM_END. If not enough output space is provided, deflate will not return Z_STREAM_END, and it must be called again as described above. deflate() sets strm->adler to the Adler-32 checksum of all input read so far (that is, total_in bytes). If a gzip stream is being generated, then strm->adler will be the CRC-32 checksum of the input read so far. (See deflateInit2 below.) deflate() may update strm->data_type if it can make a good guess about the input data type (Z_BINARY or Z_TEXT). If in doubt, the data is considered binary. This field is only for information purposes and does not affect the compression algorithm in any manner. deflate() returns Z_OK if some progress has been made (more input processed or more output produced), Z_STREAM_END if all input has been consumed and all output has been produced (only when flush is set to Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example if next_in or next_out was Z_NULL or the state was inadvertently written over by the application), or Z_BUF_ERROR if no progress is possible (for example avail_in or avail_out was zero). Note that Z_BUF_ERROR is not fatal, and deflate() can be called again with more input and more output space to continue compressing. */ ZEXTERN int ZEXPORT deflateEnd(z_streamp strm); /* All dynamically allocated data structures for this stream are freed. This function discards any unprocessed input and does not flush any pending output. deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state was inconsistent, Z_DATA_ERROR if the stream was freed prematurely (some input or output was discarded). In the error case, msg may be set but then points to a static string (which must not be deallocated). */ /* ZEXTERN int ZEXPORT inflateInit(z_streamp strm); Initializes the internal stream state for decompression. The fields next_in, avail_in, zalloc, zfree and opaque must be initialized before by the caller. In the current version of inflate, the provided input is not read or consumed. The allocation of a sliding window will be deferred to the first call of inflate (if the decompression does not complete on the first call). If zalloc and zfree are set to Z_NULL, inflateInit updates them to use default allocation functions. total_in, total_out, adler, and msg are initialized. inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_VERSION_ERROR if the zlib library version is incompatible with the version assumed by the caller, or Z_STREAM_ERROR if the parameters are invalid, such as a null pointer to the structure. msg is set to null if there is no error message. inflateInit does not perform any decompression. Actual decompression will be done by inflate(). So next_in, and avail_in, next_out, and avail_out are unused and unchanged. The current implementation of inflateInit() does not process any header information -- that is deferred until inflate() is called. */ ZEXTERN int ZEXPORT inflate(z_streamp strm, int flush); /* inflate decompresses as much data as possible, and stops when the input buffer becomes empty or the output buffer becomes full. It may introduce some output latency (reading input without producing any output) except when forced to flush. The detailed semantics are as follows. inflate performs one or both of the following actions: - Decompress more input starting at next_in and update next_in and avail_in accordingly. If not all input can be processed (because there is not enough room in the output buffer), then next_in and avail_in are updated accordingly, and processing will resume at this point for the next call of inflate(). - Generate more output starting at next_out and update next_out and avail_out accordingly. inflate() provides as much output as possible, until there is no more input data or no more space in the output buffer (see below about the flush parameter). Before the call of inflate(), the application should ensure that at least one of the actions is possible, by providing more input and/or consuming more output, and updating the next_* and avail_* values accordingly. If the caller of inflate() does not provide both available input and available output space, it is possible that there will be no progress made. The application can consume the uncompressed output when it wants, for example when the output buffer is full (avail_out == 0), or after each call of inflate(). If inflate returns Z_OK and with zero avail_out, it must be called again after making room in the output buffer because there might be more output pending. The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much output as possible to the output buffer. Z_BLOCK requests that inflate() stop if and when it gets to the next deflate block boundary. When decoding the zlib or gzip format, this will cause inflate() to return immediately after the header and before the first block. When doing a raw inflate, inflate() will go ahead and process the first block, and will return when it gets to the end of that block, or when it runs out of data. The Z_BLOCK option assists in appending to or combining deflate streams. To assist in this, on return inflate() always sets strm->data_type to the number of unused bits in the last byte taken from strm->next_in, plus 64 if inflate() is currently decoding the last block in the deflate stream, plus 128 if inflate() returned immediately after decoding an end-of-block code or decoding the complete header up to just before the first byte of the deflate stream. The end-of-block will not be indicated until all of the uncompressed data from that block has been written to strm->next_out. The number of unused bits may in general be greater than seven, except when bit 7 of data_type is set, in which case the number of unused bits will be less than eight. data_type is set as noted here every time inflate() returns for all flush options, and so can be used to determine the amount of currently consumed input in bits. The Z_TREES option behaves as Z_BLOCK does, but it also returns when the end of each deflate block header is reached, before any actual data in that block is decoded. This allows the caller to determine the length of the deflate block header for later use in random access within a deflate block. 256 is added to the value of strm->data_type when inflate() returns immediately after reaching the end of the deflate block header. inflate() should normally be called until it returns Z_STREAM_END or an error. However if all decompression is to be performed in a single step (a single call of inflate), the parameter flush should be set to Z_FINISH. In this case all pending input is processed and all pending output is flushed; avail_out must be large enough to hold all of the uncompressed data for the operation to complete. (The size of the uncompressed data may have been saved by the compressor for this purpose.) The use of Z_FINISH is not required to perform an inflation in one step. However it may be used to inform inflate that a faster approach can be used for the single inflate() call. Z_FINISH also informs inflate to not maintain a sliding window if the stream completes, which reduces inflate's memory footprint. If the stream does not complete, either because not all of the stream is provided or not enough output space is provided, then a sliding window will be allocated and inflate() can be called again to continue the operation as if Z_NO_FLUSH had been used. In this implementation, inflate() always flushes as much output as possible to the output buffer, and always uses the faster approach on the first call. So the effects of the flush parameter in this implementation are on the return value of inflate() as noted below, when inflate() returns early when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of memory for a sliding window when Z_FINISH is used. If a preset dictionary is needed after this call (see inflateSetDictionary below), inflate sets strm->adler to the Adler-32 checksum of the dictionary chosen by the compressor and returns Z_NEED_DICT; otherwise it sets strm->adler to the Adler-32 checksum of all output produced so far (that is, total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described below. At the end of the stream, inflate() checks that its computed Adler-32 checksum is equal to that saved by the compressor and returns Z_STREAM_END only if the checksum is correct. inflate() can decompress and check either zlib-wrapped or gzip-wrapped deflate data. The header type is detected automatically, if requested when initializing with inflateInit2(). Any information contained in the gzip header is not retained unless inflateGetHeader() is used. When processing gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output produced so far. The CRC-32 is checked against the gzip trailer, as is the uncompressed length, modulo 2^32. inflate() returns Z_OK if some progress has been made (more input processed or more output produced), Z_STREAM_END if the end of the compressed data has been reached and all uncompressed output has been produced, Z_NEED_DICT if a preset dictionary is needed at this point, Z_DATA_ERROR if the input data was corrupted (input stream not conforming to the zlib format or incorrect check value, in which case strm->msg points to a string with a more specific error), Z_STREAM_ERROR if the stream structure was inconsistent (for example next_in or next_out was Z_NULL, or the state was inadvertently written over by the application), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no progress was possible or if there was not enough room in the output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and inflate() can be called again with more input and more output space to continue decompressing. If Z_DATA_ERROR is returned, the application may then call inflateSync() to look for a good compression block if a partial recovery of the data is to be attempted. */ ZEXTERN int ZEXPORT inflateEnd(z_streamp strm); /* All dynamically allocated data structures for this stream are freed. This function discards any unprocessed input and does not flush any pending output. inflateEnd returns Z_OK if success, or Z_STREAM_ERROR if the stream state was inconsistent. */ /* Advanced functions */ /* The following functions are needed only in some special applications. */ /* ZEXTERN int ZEXPORT deflateInit2(z_streamp strm, int level, int method, int windowBits, int memLevel, int strategy); This is another version of deflateInit with more compression options. The fields zalloc, zfree and opaque must be initialized before by the caller. The method parameter is the compression method. It must be Z_DEFLATED in this version of the library. The windowBits parameter is the base two logarithm of the window size (the size of the history buffer). It should be in the range 8..15 for this version of the library. Larger values of this parameter result in better compression at the expense of memory usage. The default value is 15 if deflateInit is used instead. For the current implementation of deflate(), a windowBits value of 8 (a window size of 256 bytes) is not supported. As a result, a request for 8 will result in 9 (a 512-byte window). In that case, providing 8 to inflateInit2() will result in an error when the zlib header with 9 is checked against the initialization of inflate(). The remedy is to not use 8 with deflateInit2() with this initialization, or at least in that case use 9 with inflateInit2(). windowBits can also be -8..-15 for raw deflate. In this case, -windowBits determines the window size. deflate() will then generate raw deflate data with no zlib header or trailer, and will not compute a check value. windowBits can also be greater than 15 for optional gzip encoding. Add 16 to windowBits to write a simple gzip header and trailer around the compressed data instead of a zlib wrapper. The gzip header will have no file name, no extra data, no comment, no modification time (set to zero), no header crc, and the operating system will be set to the appropriate value, if the operating system was determined at compile time. If a gzip stream is being written, strm->adler is a CRC-32 instead of an Adler-32. For raw deflate or gzip encoding, a request for a 256-byte window is rejected as invalid, since only the zlib header provides a means of transmitting the window size to the decompressor. The memLevel parameter specifies how much memory should be allocated for the internal compression state. memLevel=1 uses minimum memory but is slow and reduces compression ratio; memLevel=9 uses maximum memory for optimal speed. The default value is 8. See zconf.h for total memory usage as a function of windowBits and memLevel. The strategy parameter is used to tune the compression algorithm. Use the value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no string match), or Z_RLE to limit match distances to one (run-length encoding). Filtered data consists mostly of small values with a somewhat random distribution. In this case, the compression algorithm is tuned to compress them better. The effect of Z_FILTERED is to force more Huffman coding and less string matching; it is somewhat intermediate between Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The strategy parameter only affects the compression ratio but not the correctness of the compressed output even if it is not set appropriately. Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler decoder for special applications. deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible with the version assumed by the caller (ZLIB_VERSION). msg is set to null if there is no error message. deflateInit2 does not perform any compression: this will be done by deflate(). */ ZEXTERN int ZEXPORT deflateSetDictionary(z_streamp strm, const Bytef *dictionary, uInt dictLength); /* Initializes the compression dictionary from the given byte sequence without producing any compressed output. When using the zlib format, this function must be called immediately after deflateInit, deflateInit2 or deflateReset, and before any call of deflate. When doing raw deflate, this function must be called either before any call of deflate, or immediately after the completion of a deflate block, i.e. after all input has been consumed and all output has been delivered when using any of the flush options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The compressor and decompressor must use exactly the same dictionary (see inflateSetDictionary). The dictionary should consist of strings (byte sequences) that are likely to be encountered later in the data to be compressed, with the most commonly used strings preferably put towards the end of the dictionary. Using a dictionary is most useful when the data to be compressed is short and can be predicted with good accuracy; the data can then be compressed better than with the default empty dictionary. Depending on the size of the compression data structures selected by deflateInit or deflateInit2, a part of the dictionary may in effect be discarded, for example if the dictionary is larger than the window size provided in deflateInit or deflateInit2. Thus the strings most likely to be useful should be put at the end of the dictionary, not at the front. In addition, the current implementation of deflate will use at most the window size minus 262 bytes of the provided dictionary. Upon return of this function, strm->adler is set to the Adler-32 value of the dictionary; the decompressor may later use this value to determine which dictionary has been used by the compressor. (The Adler-32 value applies to the whole dictionary even if only a subset of the dictionary is actually used by the compressor.) If a raw deflate was requested, then the Adler-32 value is not computed and strm->adler is not set. deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is inconsistent (for example if deflate has already been called for this stream or if not at a block boundary for raw deflate). deflateSetDictionary does not perform any compression: this will be done by deflate(). */ ZEXTERN int ZEXPORT deflateGetDictionary(z_streamp strm, Bytef *dictionary, uInt *dictLength); /* Returns the sliding dictionary being maintained by deflate. dictLength is set to the number of bytes in the dictionary, and that many bytes are copied to dictionary. dictionary must have enough space, where 32768 bytes is always enough. If deflateGetDictionary() is called with dictionary equal to Z_NULL, then only the dictionary length is returned, and nothing is copied. Similarly, if dictLength is Z_NULL, then it is not set. deflateGetDictionary() may return a length less than the window size, even when more than the window size in input has been provided. It may return up to 258 bytes less in that case, due to how zlib's implementation of deflate manages the sliding window and lookahead for matches, where matches can be up to 258 bytes long. If the application needs the last window-size bytes of input, then that would need to be saved by the application outside of zlib. deflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the stream state is inconsistent. */ ZEXTERN int ZEXPORT deflateCopy(z_streamp dest, z_streamp source); /* Sets the destination stream as a complete copy of the source stream. This function can be useful when several compression strategies will be tried, for example when there are several ways of pre-processing the input data with a filter. The streams that will be discarded should then be freed by calling deflateEnd. Note that deflateCopy duplicates the internal compression state which can be quite large, so this strategy is slow and can consume lots of memory. deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if the source stream state was inconsistent (such as zalloc being Z_NULL). msg is left unchanged in both source and destination. */ ZEXTERN int ZEXPORT deflateReset(z_streamp strm); /* This function is equivalent to deflateEnd followed by deflateInit, but does not free and reallocate the internal compression state. The stream will leave the compression level and any other attributes that may have been set unchanged. total_in, total_out, adler, and msg are initialized. deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent (such as zalloc or state being Z_NULL). */ ZEXTERN int ZEXPORT deflateParams(z_streamp strm, int level, int strategy); /* Dynamically update the compression level and compression strategy. The interpretation of level and strategy is as in deflateInit2(). This can be used to switch between compression and straight copy of the input data, or to switch to a different kind of input data requiring a different strategy. If the compression approach (which is a function of the level) or the strategy is changed, and if there have been any deflate() calls since the state was initialized or reset, then the input available so far is compressed with the old level and strategy using deflate(strm, Z_BLOCK). There are three approaches for the compression levels 0, 1..3, and 4..9 respectively. The new level and strategy will take effect at the next call of deflate(). If a deflate(strm, Z_BLOCK) is performed by deflateParams(), and it does not have enough output space to complete, then the parameter change will not take effect. In this case, deflateParams() can be called again with the same parameters and more output space to try again. In order to assure a change in the parameters on the first try, the deflate stream should be flushed using deflate() with Z_BLOCK or other flush request until strm.avail_out is not zero, before calling deflateParams(). Then no more input data should be provided before the deflateParams() call. If this is done, the old level and strategy will be applied to the data compressed before deflateParams(), and the new level and strategy will be applied to the data compressed after deflateParams(). deflateParams returns Z_OK on success, Z_STREAM_ERROR if the source stream state was inconsistent or if a parameter was invalid, or Z_BUF_ERROR if there was not enough output space to complete the compression of the available input data before a change in the strategy or approach. Note that in the case of a Z_BUF_ERROR, the parameters are not changed. A return value of Z_BUF_ERROR is not fatal, in which case deflateParams() can be retried with more output space. */ ZEXTERN int ZEXPORT deflateTune(z_streamp strm, int good_length, int max_lazy, int nice_length, int max_chain); /* Fine tune deflate's internal compression parameters. This should only be used by someone who understands the algorithm used by zlib's deflate for searching for the best matching string, and even then only by the most fanatic optimizer trying to squeeze out the last compressed bit for their specific input data. Read the deflate.c source code for the meaning of the max_lazy, good_length, nice_length, and max_chain parameters. deflateTune() can be called after deflateInit() or deflateInit2(), and returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. */ ZEXTERN uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen); /* deflateBound() returns an upper bound on the compressed size after deflation of sourceLen bytes. It must be called after deflateInit() or deflateInit2(), and after deflateSetHeader(), if used. This would be used to allocate an output buffer for deflation in a single pass, and so would be called before deflate(). If that first deflate() call is provided the sourceLen input bytes, an output buffer allocated to the size returned by deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed to return Z_STREAM_END. Note that it is possible for the compressed size to be larger than the value returned by deflateBound() if flush options other than Z_FINISH or Z_NO_FLUSH are used. */ ZEXTERN int ZEXPORT deflatePending(z_streamp strm, unsigned *pending, int *bits); /* deflatePending() returns the number of bytes and bits of output that have been generated, but not yet provided in the available output. The bytes not provided would be due to the available output space having being consumed. The number of bits of output not provided are between 0 and 7, where they await more bits to join them in order to fill out a full byte. If pending or bits are Z_NULL, then those values are not set. deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ ZEXTERN int ZEXPORT deflatePrime(z_streamp strm, int bits, int value); /* deflatePrime() inserts bits in the deflate output stream. The intent is that this function is used to start off the deflate output with the bits leftover from a previous deflate stream when appending to it. As such, this function can only be used for raw deflate, and must be used before the first deflate() call after a deflateInit2() or deflateReset(). bits must be less than or equal to 16, and that many of the least significant bits of value will be inserted in the output. deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the source stream state was inconsistent. */ ZEXTERN int ZEXPORT deflateSetHeader(z_streamp strm, gz_headerp head); /* deflateSetHeader() provides gzip header information for when a gzip stream is requested by deflateInit2(). deflateSetHeader() may be called after deflateInit2() or deflateReset() and before the first call of deflate(). The text, time, os, extra field, name, and comment information in the provided gz_header structure are written to the gzip header (xflag is ignored -- the extra flags are set according to the compression level). The caller must assure that, if not Z_NULL, name and comment are terminated with a zero byte, and that if extra is not Z_NULL, that extra_len bytes are available there. If hcrc is true, a gzip header crc is included. Note that the current versions of the command-line version of gzip (up through version 1.3.x) do not support header crc's, and will report that it is a "multi-part gzip file" and give up. If deflateSetHeader is not used, the default gzip header has text false, the time set to zero, and os set to the current operating system, with no extra, name, or comment fields. The gzip header is returned to the default state by deflateReset(). deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ /* ZEXTERN int ZEXPORT inflateInit2(z_streamp strm, int windowBits); This is another version of inflateInit with an extra parameter. The fields next_in, avail_in, zalloc, zfree and opaque must be initialized before by the caller. The windowBits parameter is the base two logarithm of the maximum window size (the size of the history buffer). It should be in the range 8..15 for this version of the library. The default value is 15 if inflateInit is used instead. windowBits must be greater than or equal to the windowBits value provided to deflateInit2() while compressing, or it must be equal to 15 if deflateInit2() was not used. If a compressed stream with a larger window size is given as input, inflate() will return with the error code Z_DATA_ERROR instead of trying to allocate a larger window. windowBits can also be zero to request that inflate use the window size in the zlib header of the compressed stream. windowBits can also be -8..-15 for raw inflate. In this case, -windowBits determines the window size. inflate() will then process raw deflate data, not looking for a zlib or gzip header, not generating a check value, and not looking for any check values for comparison at the end of the stream. This is for use with other formats that use the deflate compressed data format such as zip. Those formats provide their own check values. If a custom format is developed using the raw deflate format for compressed data, it is recommended that a check value such as an Adler-32 or a CRC-32 be applied to the uncompressed data as is done in the zlib, gzip, and zip formats. For most applications, the zlib format should be used as is. Note that comments above on the use in deflateInit2() applies to the magnitude of windowBits. windowBits can also be greater than 15 for optional gzip decoding. Add 32 to windowBits to enable zlib and gzip decoding with automatic header detection, or add 16 to decode only the gzip format (the zlib format will return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a CRC-32 instead of an Adler-32. Unlike the gunzip utility and gzread() (see below), inflate() will *not* automatically decode concatenated gzip members. inflate() will return Z_STREAM_END at the end of the gzip member. The state would need to be reset to continue decoding a subsequent gzip member. This *must* be done if there is more data after a gzip member, in order for the decompression to be compliant with the gzip standard (RFC 1952). inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_VERSION_ERROR if the zlib library version is incompatible with the version assumed by the caller, or Z_STREAM_ERROR if the parameters are invalid, such as a null pointer to the structure. msg is set to null if there is no error message. inflateInit2 does not perform any decompression apart from possibly reading the zlib header if present: actual decompression will be done by inflate(). (So next_in and avail_in may be modified, but next_out and avail_out are unused and unchanged.) The current implementation of inflateInit2() does not process any header information -- that is deferred until inflate() is called. */ ZEXTERN int ZEXPORT inflateSetDictionary(z_streamp strm, const Bytef *dictionary, uInt dictLength); /* Initializes the decompression dictionary from the given uncompressed byte sequence. This function must be called immediately after a call of inflate, if that call returned Z_NEED_DICT. The dictionary chosen by the compressor can be determined from the Adler-32 value returned by that call of inflate. The compressor and decompressor must use exactly the same dictionary (see deflateSetDictionary). For raw inflate, this function can be called at any time to set the dictionary. If the provided dictionary is smaller than the window and there is already data in the window, then the provided dictionary will amend what's there. The application must insure that the dictionary that was used for compression is provided. inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the expected one (incorrect Adler-32 value). inflateSetDictionary does not perform any decompression: this will be done by subsequent calls of inflate(). */ ZEXTERN int ZEXPORT inflateGetDictionary(z_streamp strm, Bytef *dictionary, uInt *dictLength); /* Returns the sliding dictionary being maintained by inflate. dictLength is set to the number of bytes in the dictionary, and that many bytes are copied to dictionary. dictionary must have enough space, where 32768 bytes is always enough. If inflateGetDictionary() is called with dictionary equal to Z_NULL, then only the dictionary length is returned, and nothing is copied. Similarly, if dictLength is Z_NULL, then it is not set. inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the stream state is inconsistent. */ ZEXTERN int ZEXPORT inflateSync(z_streamp strm); /* Skips invalid compressed data until a possible full flush point (see above for the description of deflate with Z_FULL_FLUSH) can be found, or until all available input is skipped. No output is provided. inflateSync searches for a 00 00 FF FF pattern in the compressed data. All full flush points have this pattern, but not all occurrences of this pattern are full flush points. inflateSync returns Z_OK if a possible full flush point has been found, Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point has been found, or Z_STREAM_ERROR if the stream structure was inconsistent. In the success case, the application may save the current value of total_in which indicates where valid compressed data was found. In the error case, the application may repeatedly call inflateSync, providing more input each time, until success or end of the input data. */ ZEXTERN int ZEXPORT inflateCopy(z_streamp dest, z_streamp source); /* Sets the destination stream as a complete copy of the source stream. This function can be useful when randomly accessing a large stream. The first pass through the stream can periodically record the inflate state, allowing restarting inflate at those points when randomly accessing the stream. inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if the source stream state was inconsistent (such as zalloc being Z_NULL). msg is left unchanged in both source and destination. */ ZEXTERN int ZEXPORT inflateReset(z_streamp strm); /* This function is equivalent to inflateEnd followed by inflateInit, but does not free and reallocate the internal decompression state. The stream will keep attributes that may have been set by inflateInit2. total_in, total_out, adler, and msg are initialized. inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent (such as zalloc or state being Z_NULL). */ ZEXTERN int ZEXPORT inflateReset2(z_streamp strm, int windowBits); /* This function is the same as inflateReset, but it also permits changing the wrap and window size requests. The windowBits parameter is interpreted the same as it is for inflateInit2. If the window size is changed, then the memory allocated for the window is freed, and the window will be reallocated by inflate() if needed. inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent (such as zalloc or state being Z_NULL), or if the windowBits parameter is invalid. */ ZEXTERN int ZEXPORT inflatePrime(z_streamp strm, int bits, int value); /* This function inserts bits in the inflate input stream. The intent is that this function is used to start inflating at a bit position in the middle of a byte. The provided bits will be used before any bytes are used from next_in. This function should only be used with raw inflate, and should be used before the first inflate() call after inflateInit2() or inflateReset(). bits must be less than or equal to 16, and that many of the least significant bits of value will be inserted in the input. If bits is negative, then the input stream bit buffer is emptied. Then inflatePrime() can be called again to put bits in the buffer. This is used to clear out bits leftover after feeding inflate a block description prior to feeding inflate codes. inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ ZEXTERN long ZEXPORT inflateMark(z_streamp strm); /* This function returns two values, one in the lower 16 bits of the return value, and the other in the remaining upper bits, obtained by shifting the return value down 16 bits. If the upper value is -1 and the lower value is zero, then inflate() is currently decoding information outside of a block. If the upper value is -1 and the lower value is non-zero, then inflate is in the middle of a stored block, with the lower value equaling the number of bytes from the input remaining to copy. If the upper value is not -1, then it is the number of bits back from the current bit position in the input of the code (literal or length/distance pair) currently being processed. In that case the lower value is the number of bytes already emitted for that code. A code is being processed if inflate is waiting for more input to complete decoding of the code, or if it has completed decoding but is waiting for more output space to write the literal or match data. inflateMark() is used to mark locations in the input data for random access, which may be at bit positions, and to note those cases where the output of a code may span boundaries of random access blocks. The current location in the input stream can be determined from avail_in and data_type as noted in the description for the Z_BLOCK flush parameter for inflate. inflateMark returns the value noted above, or -65536 if the provided source stream state was inconsistent. */ ZEXTERN int ZEXPORT inflateGetHeader(z_streamp strm, gz_headerp head); /* inflateGetHeader() requests that gzip header information be stored in the provided gz_header structure. inflateGetHeader() may be called after inflateInit2() or inflateReset(), and before the first call of inflate(). As inflate() processes the gzip stream, head->done is zero until the header is completed, at which time head->done is set to one. If a zlib stream is being decoded, then head->done is set to -1 to indicate that there will be no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be used to force inflate() to return immediately after header processing is complete and before any actual data is decompressed. The text, time, xflags, and os fields are filled in with the gzip header contents. hcrc is set to true if there is a header CRC. (The header CRC was valid if done is set to one.) If extra is not Z_NULL, then extra_max contains the maximum number of bytes to write to extra. Once done is true, extra_len contains the actual extra field length, and extra contains the extra field, or that field truncated if extra_max is less than extra_len. If name is not Z_NULL, then up to name_max characters are written there, terminated with a zero unless the length is greater than name_max. If comment is not Z_NULL, then up to comm_max characters are written there, terminated with a zero unless the length is greater than comm_max. When any of extra, name, or comment are not Z_NULL and the respective field is not present in the header, then that field is set to Z_NULL to signal its absence. This allows the use of deflateSetHeader() with the returned structure to duplicate the header. However if those fields are set to allocated memory, then the application will need to save those pointers elsewhere so that they can be eventually freed. If inflateGetHeader is not used, then the header information is simply discarded. The header is always checked for validity, including the header CRC if present. inflateReset() will reset the process to discard the header information. The application would need to call inflateGetHeader() again to retrieve the header from the next gzip stream. inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ /* ZEXTERN int ZEXPORT inflateBackInit(z_streamp strm, int windowBits, unsigned char FAR *window); Initialize the internal stream state for decompression using inflateBack() calls. The fields zalloc, zfree and opaque in strm must be initialized before the call. If zalloc and zfree are Z_NULL, then the default library- derived memory allocation routines are used. windowBits is the base two logarithm of the window size, in the range 8..15. window is a caller supplied buffer of that size. Except for special applications where it is assured that deflate was used with small window sizes, windowBits must be 15 and a 32K byte window must be supplied to be able to decompress general deflate streams. See inflateBack() for the usage of these routines. inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of the parameters are invalid, Z_MEM_ERROR if the internal state could not be allocated, or Z_VERSION_ERROR if the version of the library does not match the version of the header file. */ typedef unsigned (*in_func)(void FAR *, z_const unsigned char FAR * FAR *); typedef int (*out_func)(void FAR *, unsigned char FAR *, unsigned); ZEXTERN int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc, out_func out, void FAR *out_desc); /* inflateBack() does a raw inflate with a single call using a call-back interface for input and output. This is potentially more efficient than inflate() for file i/o applications, in that it avoids copying between the output and the sliding window by simply making the window itself the output buffer. inflate() can be faster on modern CPUs when used with large buffers. inflateBack() trusts the application to not change the output buffer passed by the output function, at least until inflateBack() returns. inflateBackInit() must be called first to allocate the internal state and to initialize the state with the user-provided window buffer. inflateBack() may then be used multiple times to inflate a complete, raw deflate stream with each call. inflateBackEnd() is then called to free the allocated state. A raw deflate stream is one with no zlib or gzip header or trailer. This routine would normally be used in a utility that reads zip or gzip files and writes out uncompressed files. The utility would decode the header and process the trailer on its own, hence this routine expects only the raw deflate stream to decompress. This is different from the default behavior of inflate(), which expects a zlib header and trailer around the deflate stream. inflateBack() uses two subroutines supplied by the caller that are then called by inflateBack() for input and output. inflateBack() calls those routines until it reads a complete deflate stream and writes out all of the uncompressed data, or until it encounters an error. The function's parameters and return types are defined above in the in_func and out_func typedefs. inflateBack() will call in(in_desc, &buf) which should return the number of bytes of provided input, and a pointer to that input in buf. If there is no input available, in() must return zero -- buf is ignored in that case -- and inflateBack() will return a buffer error. inflateBack() will call out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. out() should return zero on success, or non-zero on failure. If out() returns non-zero, inflateBack() will return with an error. Neither in() nor out() are permitted to change the contents of the window provided to inflateBackInit(), which is also the buffer that out() uses to write from. The length written by out() will be at most the window size. Any non-zero amount of input may be provided by in(). For convenience, inflateBack() can be provided input on the first call by setting strm->next_in and strm->avail_in. If that input is exhausted, then in() will be called. Therefore strm->next_in must be initialized before calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in must also be initialized, and then if strm->avail_in is not zero, input will initially be taken from strm->next_in[0 .. strm->avail_in - 1]. The in_desc and out_desc parameters of inflateBack() is passed as the first parameter of in() and out() respectively when they are called. These descriptors can be optionally used to pass any information that the caller- supplied in() and out() functions need to do their job. On return, inflateBack() will set strm->next_in and strm->avail_in to pass back any unused input that was provided by the last in() call. The return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR if in() or out() returned an error, Z_DATA_ERROR if there was a format error in the deflate stream (in which case strm->msg is set to indicate the nature of the error), or Z_STREAM_ERROR if the stream was not properly initialized. In the case of Z_BUF_ERROR, an input or output error can be distinguished using strm->next_in which will be Z_NULL only if in() returned an error. If strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning non-zero. (in() will always be called before out(), so strm->next_in is assured to be defined if out() returns non-zero.) Note that inflateBack() cannot return Z_OK. */ ZEXTERN int ZEXPORT inflateBackEnd(z_streamp strm); /* All memory allocated by inflateBackInit() is freed. inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream state was inconsistent. */ ZEXTERN uLong ZEXPORT zlibCompileFlags(void); /* Return flags indicating compile-time options. Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other: 1.0: size of uInt 3.2: size of uLong 5.4: size of voidpf (pointer) 7.6: size of z_off_t Compiler, assembler, and debug options: 8: ZLIB_DEBUG 9: ASMV or ASMINF -- use ASM code 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention 11: 0 (reserved) One-time table building (smaller code, but not thread-safe if true): 12: BUILDFIXED -- build static block decoding tables when needed 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed 14,15: 0 (reserved) Library content (indicates missing functionality): 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking deflate code when not needed) 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect and decode gzip streams (to avoid linking crc code) 18-19: 0 (reserved) Operation variations (changes in library functionality): 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate 21: FASTEST -- deflate algorithm with only one, lowest compression level 22,23: 0 (reserved) The sprintf variant used by gzprintf (zero is best): 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure! 26: 0 = returns value, 1 = void -- 1 means inferred string length returned Remainder: 27-31: 0 (reserved) */ #ifndef Z_SOLO /* utility functions */ /* The following utility functions are implemented on top of the basic stream-oriented functions. To simplify the interface, some default options are assumed (compression level and memory usage, standard memory allocation functions). The source code of these utility functions can be modified if you need special options. */ ZEXTERN int ZEXPORT compress(Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen); /* Compresses the source buffer into the destination buffer. sourceLen is the byte length of the source buffer. Upon entry, destLen is the total size of the destination buffer, which must be at least the value returned by compressBound(sourceLen). Upon exit, destLen is the actual size of the compressed data. compress() is equivalent to compress2() with a level parameter of Z_DEFAULT_COMPRESSION. compress returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output buffer. */ ZEXTERN int ZEXPORT compress2(Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen, int level); /* Compresses the source buffer into the destination buffer. The level parameter has the same meaning as in deflateInit. sourceLen is the byte length of the source buffer. Upon entry, destLen is the total size of the destination buffer, which must be at least the value returned by compressBound(sourceLen). Upon exit, destLen is the actual size of the compressed data. compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output buffer, Z_STREAM_ERROR if the level parameter is invalid. */ ZEXTERN uLong ZEXPORT compressBound(uLong sourceLen); /* compressBound() returns an upper bound on the compressed size after compress() or compress2() on sourceLen bytes. It would be used before a compress() or compress2() call to allocate the destination buffer. */ ZEXTERN int ZEXPORT uncompress(Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen); /* Decompresses the source buffer into the destination buffer. sourceLen is the byte length of the source buffer. Upon entry, destLen is the total size of the destination buffer, which must be large enough to hold the entire uncompressed data. (The size of the uncompressed data must have been saved previously by the compressor and transmitted to the decompressor by some mechanism outside the scope of this compression library.) Upon exit, destLen is the actual size of the uncompressed data. uncompress returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In the case where there is not enough room, uncompress() will fill the output buffer with the uncompressed data up to that point. */ ZEXTERN int ZEXPORT uncompress2(Bytef *dest, uLongf *destLen, const Bytef *source, uLong *sourceLen); /* Same as uncompress, except that sourceLen is a pointer, where the length of the source is *sourceLen. On return, *sourceLen is the number of source bytes consumed. */ /* gzip file access functions */ /* This library supports reading and writing files in gzip (.gz) format with an interface similar to that of stdio, using the functions that start with "gz". The gzip format is different from the zlib format. gzip is a gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. */ typedef struct gzFile_s *gzFile; /* semi-opaque gzip file descriptor */ /* ZEXTERN gzFile ZEXPORT gzopen(const char *path, const char *mode); Open the gzip (.gz) file at path for reading and decompressing, or compressing and writing. The mode parameter is as in fopen ("rb" or "wb") but can also include a compression level ("wb9") or a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F' for fixed code compression as in "wb9F". (See the description of deflateInit2 for more information about the strategy parameter.) 'T' will request transparent writing or appending with no compression and not using the gzip format. "a" can be used instead of "w" to request that the gzip stream that will be written be appended to the file. "+" will result in an error, since reading and writing to the same gzip file is not supported. The addition of "x" when writing will create the file exclusively, which fails if the file already exists. On systems that support it, the addition of "e" when reading or writing will set the flag to close the file on an execve() call. These functions, as well as gzip, will read and decode a sequence of gzip streams in a file. The append function of gzopen() can be used to create such a file. (Also see gzflush() for another way to do this.) When appending, gzopen does not test whether the file begins with a gzip stream, nor does it look for the end of the gzip streams to begin appending. gzopen will simply append a gzip stream to the existing file. gzopen can be used to read a file which is not in gzip format; in this case gzread will directly read from the file without decompression. When reading, this will be detected automatically by looking for the magic two- byte gzip header. gzopen returns NULL if the file could not be opened, if there was insufficient memory to allocate the gzFile state, or if an invalid mode was specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). errno can be checked to determine if the reason gzopen failed was that the file could not be opened. */ ZEXTERN gzFile ZEXPORT gzdopen(int fd, const char *mode); /* Associate a gzFile with the file descriptor fd. File descriptors are obtained from calls like open, dup, creat, pipe or fileno (if the file has been previously opened with fopen). The mode parameter is as in gzopen. The next call of gzclose on the returned gzFile will also close the file descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, mode);. The duplicated descriptor should be saved to avoid a leak, since gzdopen does not close fd if it fails. If you are using fileno() to get the file descriptor from a FILE *, then you will have to use dup() to avoid double-close()ing the file descriptor. Both gzclose() and fclose() will close the associated file descriptor, so they need to have different file descriptors. gzdopen returns NULL if there was insufficient memory to allocate the gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not provided, or '+' was provided), or if fd is -1. The file descriptor is not used until the next gz* read, write, seek, or close operation, so gzdopen will not detect if fd is invalid (unless fd is -1). */ ZEXTERN int ZEXPORT gzbuffer(gzFile file, unsigned size); /* Set the internal buffer size used by this library's functions for file to size. The default buffer size is 8192 bytes. This function must be called after gzopen() or gzdopen(), and before any other calls that read or write the file. The buffer memory allocation is always deferred to the first read or write. Three times that size in buffer space is allocated. A larger buffer size of, for example, 64K or 128K bytes will noticeably increase the speed of decompression (reading). The new buffer size also affects the maximum length for gzprintf(). gzbuffer() returns 0 on success, or -1 on failure, such as being called too late. */ ZEXTERN int ZEXPORT gzsetparams(gzFile file, int level, int strategy); /* Dynamically update the compression level and strategy for file. See the description of deflateInit2 for the meaning of these parameters. Previously provided data is flushed before applying the parameter changes. gzsetparams returns Z_OK if success, Z_STREAM_ERROR if the file was not opened for writing, Z_ERRNO if there is an error writing the flushed data, or Z_MEM_ERROR if there is a memory allocation error. */ ZEXTERN int ZEXPORT gzread(gzFile file, voidp buf, unsigned len); /* Read and decompress up to len uncompressed bytes from file into buf. If the input file is not in gzip format, gzread copies the given number of bytes into the buffer directly from the file. After reaching the end of a gzip stream in the input, gzread will continue to read, looking for another gzip stream. Any number of gzip streams may be concatenated in the input file, and will all be decompressed by gzread(). If something other than a gzip stream is encountered after a gzip stream, that remaining trailing garbage is ignored (and no error is returned). gzread can be used to read a gzip file that is being concurrently written. Upon reaching the end of the input, gzread will return with the available data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then gzclearerr can be used to clear the end of file indicator in order to permit gzread to be tried again. Z_OK indicates that a gzip stream was completed on the last gzread. Z_BUF_ERROR indicates that the input file ended in the middle of a gzip stream. Note that gzread does not return -1 in the event of an incomplete gzip stream. This error is deferred until gzclose(), which will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip stream. Alternatively, gzerror can be used before gzclose to detect this case. gzread returns the number of uncompressed bytes actually read, less than len for end of file, or -1 for error. If len is too large to fit in an int, then nothing is read, -1 is returned, and the error state is set to Z_STREAM_ERROR. */ ZEXTERN z_size_t ZEXPORT gzfread(voidp buf, z_size_t size, z_size_t nitems, gzFile file); /* Read and decompress up to nitems items of size size from file into buf, otherwise operating as gzread() does. This duplicates the interface of stdio's fread(), with size_t request and return types. If the library defines size_t, then z_size_t is identical to size_t. If not, then z_size_t is an unsigned integer type that can contain a pointer. gzfread() returns the number of full items read of size size, or zero if the end of the file was reached and a full item could not be read, or if there was an error. gzerror() must be consulted if zero is returned in order to determine if there was an error. If the multiplication of size and nitems overflows, i.e. the product does not fit in a z_size_t, then nothing is read, zero is returned, and the error state is set to Z_STREAM_ERROR. In the event that the end of file is reached and only a partial item is available at the end, i.e. the remaining uncompressed data length is not a multiple of size, then the final partial item is nevertheless read into buf and the end-of-file flag is set. The length of the partial item read is not provided, but could be inferred from the result of gztell(). This behavior is the same as the behavior of fread() implementations in common libraries, but it prevents the direct use of gzfread() to read a concurrently written file, resetting and retrying on end-of-file, when size is not 1. */ ZEXTERN int ZEXPORT gzwrite(gzFile file, voidpc buf, unsigned len); /* Compress and write the len uncompressed bytes at buf to file. gzwrite returns the number of uncompressed bytes written or 0 in case of error. */ ZEXTERN z_size_t ZEXPORT gzfwrite(voidpc buf, z_size_t size, z_size_t nitems, gzFile file); /* Compress and write nitems items of size size from buf to file, duplicating the interface of stdio's fwrite(), with size_t request and return types. If the library defines size_t, then z_size_t is identical to size_t. If not, then z_size_t is an unsigned integer type that can contain a pointer. gzfwrite() returns the number of full items written of size size, or zero if there was an error. If the multiplication of size and nitems overflows, i.e. the product does not fit in a z_size_t, then nothing is written, zero is returned, and the error state is set to Z_STREAM_ERROR. */ ZEXTERN int ZEXPORTVA gzprintf(gzFile file, const char *format, ...); /* Convert, format, compress, and write the arguments (...) to file under control of the string format, as in fprintf. gzprintf returns the number of uncompressed bytes actually written, or a negative zlib error code in case of error. The number of uncompressed bytes written is limited to 8191, or one less than the buffer size given to gzbuffer(). The caller should assure that this limit is not exceeded. If it is exceeded, then gzprintf() will return an error (0) with nothing written. In this case, there may also be a buffer overflow with unpredictable consequences, which is possible only if zlib was compiled with the insecure functions sprintf() or vsprintf(), because the secure snprintf() or vsnprintf() functions were not available. This can be determined using zlibCompileFlags(). */ ZEXTERN int ZEXPORT gzputs(gzFile file, const char *s); /* Compress and write the given null-terminated string s to file, excluding the terminating null character. gzputs returns the number of characters written, or -1 in case of error. */ ZEXTERN char * ZEXPORT gzgets(gzFile file, char *buf, int len); /* Read and decompress bytes from file into buf, until len-1 characters are read, or until a newline character is read and transferred to buf, or an end-of-file condition is encountered. If any characters are read or if len is one, the string is terminated with a null character. If no characters are read due to an end-of-file or len is less than one, then the buffer is left untouched. gzgets returns buf which is a null-terminated string, or it returns NULL for end-of-file or in case of error. If there was an error, the contents at buf are indeterminate. */ ZEXTERN int ZEXPORT gzputc(gzFile file, int c); /* Compress and write c, converted to an unsigned char, into file. gzputc returns the value that was written, or -1 in case of error. */ ZEXTERN int ZEXPORT gzgetc(gzFile file); /* Read and decompress one byte from file. gzgetc returns this byte or -1 in case of end of file or error. This is implemented as a macro for speed. As such, it does not do all of the checking the other functions do. I.e. it does not check to see if file is NULL, nor whether the structure file points to has been clobbered or not. */ ZEXTERN int ZEXPORT gzungetc(int c, gzFile file); /* Push c back onto the stream for file to be read as the first character on the next read. At least one character of push-back is always allowed. gzungetc() returns the character pushed, or -1 on failure. gzungetc() will fail if c is -1, and may fail if a character has been pushed but not read yet. If gzungetc is used immediately after gzopen or gzdopen, at least the output buffer size of pushed characters is allowed. (See gzbuffer above.) The pushed character will be discarded if the stream is repositioned with gzseek() or gzrewind(). */ ZEXTERN int ZEXPORT gzflush(gzFile file, int flush); /* Flush all pending output to file. The parameter flush is as in the deflate() function. The return value is the zlib error number (see function gzerror below). gzflush is only permitted when writing. If the flush parameter is Z_FINISH, the remaining data is written and the gzip stream is completed in the output. If gzwrite() is called again, a new gzip stream will be started in the output. gzread() is able to read such concatenated gzip streams. gzflush should be called only when strictly necessary because it will degrade compression if called too often. */ /* ZEXTERN z_off_t ZEXPORT gzseek(gzFile file, z_off_t offset, int whence); Set the starting position to offset relative to whence for the next gzread or gzwrite on file. The offset represents a number of bytes in the uncompressed data stream. The whence parameter is defined as in lseek(2); the value SEEK_END is not supported. If the file is opened for reading, this function is emulated but can be extremely slow. If the file is opened for writing, only forward seeks are supported; gzseek then compresses a sequence of zeroes up to the new starting position. gzseek returns the resulting offset location as measured in bytes from the beginning of the uncompressed stream, or -1 in case of error, in particular if the file is opened for writing and the new starting position would be before the current position. */ ZEXTERN int ZEXPORT gzrewind(gzFile file); /* Rewind file. This function is supported only for reading. gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET). */ /* ZEXTERN z_off_t ZEXPORT gztell(gzFile file); Return the starting position for the next gzread or gzwrite on file. This position represents a number of bytes in the uncompressed data stream, and is zero when starting, even if appending or reading a gzip stream from the middle of a file using gzdopen(). gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) */ /* ZEXTERN z_off_t ZEXPORT gzoffset(gzFile file); Return the current compressed (actual) read or write offset of file. This offset includes the count of bytes that precede the gzip stream, for example when appending or when using gzdopen() for reading. When reading, the offset does not include as yet unused buffered input. This information can be used for a progress indicator. On error, gzoffset() returns -1. */ ZEXTERN int ZEXPORT gzeof(gzFile file); /* Return true (1) if the end-of-file indicator for file has been set while reading, false (0) otherwise. Note that the end-of-file indicator is set only if the read tried to go past the end of the input, but came up short. Therefore, just like feof(), gzeof() may return false even if there is no more data to read, in the event that the last read request was for the exact number of bytes remaining in the input file. This will happen if the input file size is an exact multiple of the buffer size. If gzeof() returns true, then the read functions will return no more data, unless the end-of-file indicator is reset by gzclearerr() and the input file has grown since the previous end of file was detected. */ ZEXTERN int ZEXPORT gzdirect(gzFile file); /* Return true (1) if file is being copied directly while reading, or false (0) if file is a gzip stream being decompressed. If the input file is empty, gzdirect() will return true, since the input does not contain a gzip stream. If gzdirect() is used immediately after gzopen() or gzdopen() it will cause buffers to be allocated to allow reading the file to determine if it is a gzip file. Therefore if gzbuffer() is used, it should be called before gzdirect(). When writing, gzdirect() returns true (1) if transparent writing was requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note: gzdirect() is not needed when writing. Transparent writing must be explicitly requested, so the application already knows the answer. When linking statically, using gzdirect() will include all of the zlib code for gzip file reading and decompression, which may not be desired.) */ ZEXTERN int ZEXPORT gzclose(gzFile file); /* Flush all pending output for file, if necessary, close file and deallocate the (de)compression state. Note that once file is closed, you cannot call gzerror with file, since its structures have been deallocated. gzclose must not be called more than once on the same file, just as free must not be called more than once on the same allocation. gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the last read ended in the middle of a gzip stream, or Z_OK on success. */ ZEXTERN int ZEXPORT gzclose_r(gzFile file); ZEXTERN int ZEXPORT gzclose_w(gzFile file); /* Same as gzclose(), but gzclose_r() is only for use when reading, and gzclose_w() is only for use when writing or appending. The advantage to using these instead of gzclose() is that they avoid linking in zlib compression or decompression code that is not used when only reading or only writing respectively. If gzclose() is used, then both compression and decompression code will be included the application when linking to a static zlib library. */ ZEXTERN const char * ZEXPORT gzerror(gzFile file, int *errnum); /* Return the error message for the last error which occurred on file. errnum is set to zlib error number. If an error occurred in the file system and not in the compression library, errnum is set to Z_ERRNO and the application may consult errno to get the exact error code. The application must not modify the returned string. Future calls to this function may invalidate the previously returned string. If file is closed, then the string previously returned by gzerror will no longer be available. gzerror() should be used to distinguish errors from end-of-file for those functions above that do not distinguish those cases in their return values. */ ZEXTERN void ZEXPORT gzclearerr(gzFile file); /* Clear the error and end-of-file flags for file. This is analogous to the clearerr() function in stdio. This is useful for continuing to read a gzip file that is being written concurrently. */ #endif /* !Z_SOLO */ /* checksum functions */ /* These functions are not related to compression but are exported anyway because they might be useful in applications using the compression library. */ ZEXTERN uLong ZEXPORT adler32(uLong adler, const Bytef *buf, uInt len); /* Update a running Adler-32 checksum with the bytes buf[0..len-1] and return the updated checksum. An Adler-32 value is in the range of a 32-bit unsigned integer. If buf is Z_NULL, this function returns the required initial value for the checksum. An Adler-32 checksum is almost as reliable as a CRC-32 but can be computed much faster. Usage example: uLong adler = adler32(0L, Z_NULL, 0); while (read_buffer(buffer, length) != EOF) { adler = adler32(adler, buffer, length); } if (adler != original_adler) error(); */ ZEXTERN uLong ZEXPORT adler32_z(uLong adler, const Bytef *buf, z_size_t len); /* Same as adler32(), but with a size_t length. */ /* ZEXTERN uLong ZEXPORT adler32_combine(uLong adler1, uLong adler2, z_off_t len2); Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note that the z_off_t type (like off_t) is a signed integer. If len2 is negative, the result has no meaning or utility. */ ZEXTERN uLong ZEXPORT crc32(uLong crc, const Bytef *buf, uInt len); /* Update a running CRC-32 with the bytes buf[0..len-1] and return the updated CRC-32. A CRC-32 value is in the range of a 32-bit unsigned integer. If buf is Z_NULL, this function returns the required initial value for the crc. Pre- and post-conditioning (one's complement) is performed within this function so it shouldn't be done by the application. Usage example: uLong crc = crc32(0L, Z_NULL, 0); while (read_buffer(buffer, length) != EOF) { crc = crc32(crc, buffer, length); } if (crc != original_crc) error(); */ ZEXTERN uLong ZEXPORT crc32_z(uLong crc, const Bytef *buf, z_size_t len); /* Same as crc32(), but with a size_t length. */ /* ZEXTERN uLong ZEXPORT crc32_combine(uLong crc1, uLong crc2, z_off_t len2); Combine two CRC-32 check values into one. For two sequences of bytes, seq1 and seq2 with lengths len1 and len2, CRC-32 check values were calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and len2. len2 must be non-negative. */ /* ZEXTERN uLong ZEXPORT crc32_combine_gen(z_off_t len2); Return the operator corresponding to length len2, to be used with crc32_combine_op(). len2 must be non-negative. */ ZEXTERN uLong ZEXPORT crc32_combine_op(uLong crc1, uLong crc2, uLong op); /* Give the same result as crc32_combine(), using op in place of len2. op is is generated from len2 by crc32_combine_gen(). This will be faster than crc32_combine() if the generated op is used more than once. */ /* various hacks, don't look :) */ /* deflateInit and inflateInit are macros to allow checking the zlib version * and the compiler's view of z_stream: */ ZEXTERN int ZEXPORT deflateInit_(z_streamp strm, int level, const char *version, int stream_size); ZEXTERN int ZEXPORT inflateInit_(z_streamp strm, const char *version, int stream_size); ZEXTERN int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, int windowBits, int memLevel, int strategy, const char *version, int stream_size); ZEXTERN int ZEXPORT inflateInit2_(z_streamp strm, int windowBits, const char *version, int stream_size); ZEXTERN int ZEXPORT inflateBackInit_(z_streamp strm, int windowBits, unsigned char FAR *window, const char *version, int stream_size); #ifdef Z_PREFIX_SET # define z_deflateInit(strm, level) \ deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) # define z_inflateInit(strm) \ inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) # define z_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) # define z_inflateInit2(strm, windowBits) \ inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ (int)sizeof(z_stream)) # define z_inflateBackInit(strm, windowBits, window) \ inflateBackInit_((strm), (windowBits), (window), \ ZLIB_VERSION, (int)sizeof(z_stream)) #else # define deflateInit(strm, level) \ deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) # define inflateInit(strm) \ inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) # define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) # define inflateInit2(strm, windowBits) \ inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ (int)sizeof(z_stream)) # define inflateBackInit(strm, windowBits, window) \ inflateBackInit_((strm), (windowBits), (window), \ ZLIB_VERSION, (int)sizeof(z_stream)) #endif #ifndef Z_SOLO /* gzgetc() macro and its supporting function and exposed data structure. Note * that the real internal state is much larger than the exposed structure. * This abbreviated structure exposes just enough for the gzgetc() macro. The * user should not mess with these exposed elements, since their names or * behavior could change in the future, perhaps even capriciously. They can * only be used by the gzgetc() macro. You have been warned. */ struct gzFile_s { unsigned have; unsigned char *next; z_off64_t pos; }; ZEXTERN int ZEXPORT gzgetc_(gzFile file); /* backward compatibility */ #ifdef Z_PREFIX_SET # undef z_gzgetc # define z_gzgetc(g) \ ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) #else # define gzgetc(g) \ ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) #endif /* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if * both are true, the application gets the *64 functions, and the regular * functions are changed to 64 bits) -- in case these are set on systems * without large file support, _LFS64_LARGEFILE must also be true */ #ifdef Z_LARGE64 ZEXTERN gzFile ZEXPORT gzopen64(const char *, const char *); ZEXTERN z_off64_t ZEXPORT gzseek64(gzFile, z_off64_t, int); ZEXTERN z_off64_t ZEXPORT gztell64(gzFile); ZEXTERN z_off64_t ZEXPORT gzoffset64(gzFile); ZEXTERN uLong ZEXPORT adler32_combine64(uLong, uLong, z_off64_t); ZEXTERN uLong ZEXPORT crc32_combine64(uLong, uLong, z_off64_t); ZEXTERN uLong ZEXPORT crc32_combine_gen64(z_off64_t); #endif #if !defined(ZLIB_INTERNAL) && defined(Z_WANT64) # ifdef Z_PREFIX_SET # define z_gzopen z_gzopen64 # define z_gzseek z_gzseek64 # define z_gztell z_gztell64 # define z_gzoffset z_gzoffset64 # define z_adler32_combine z_adler32_combine64 # define z_crc32_combine z_crc32_combine64 # define z_crc32_combine_gen z_crc32_combine_gen64 # else # define gzopen gzopen64 # define gzseek gzseek64 # define gztell gztell64 # define gzoffset gzoffset64 # define adler32_combine adler32_combine64 # define crc32_combine crc32_combine64 # define crc32_combine_gen crc32_combine_gen64 # endif # ifndef Z_LARGE64 ZEXTERN gzFile ZEXPORT gzopen64(const char *, const char *); ZEXTERN z_off_t ZEXPORT gzseek64(gzFile, z_off_t, int); ZEXTERN z_off_t ZEXPORT gztell64(gzFile); ZEXTERN z_off_t ZEXPORT gzoffset64(gzFile); ZEXTERN uLong ZEXPORT adler32_combine64(uLong, uLong, z_off_t); ZEXTERN uLong ZEXPORT crc32_combine64(uLong, uLong, z_off_t); ZEXTERN uLong ZEXPORT crc32_combine_gen64(z_off_t); # endif #else ZEXTERN gzFile ZEXPORT gzopen(const char *, const char *); ZEXTERN z_off_t ZEXPORT gzseek(gzFile, z_off_t, int); ZEXTERN z_off_t ZEXPORT gztell(gzFile); ZEXTERN z_off_t ZEXPORT gzoffset(gzFile); ZEXTERN uLong ZEXPORT adler32_combine(uLong, uLong, z_off_t); ZEXTERN uLong ZEXPORT crc32_combine(uLong, uLong, z_off_t); ZEXTERN uLong ZEXPORT crc32_combine_gen(z_off_t); #endif #else /* Z_SOLO */ ZEXTERN uLong ZEXPORT adler32_combine(uLong, uLong, z_off_t); ZEXTERN uLong ZEXPORT crc32_combine(uLong, uLong, z_off_t); ZEXTERN uLong ZEXPORT crc32_combine_gen(z_off_t); #endif /* !Z_SOLO */ /* undocumented functions */ ZEXTERN const char * ZEXPORT zError(int); ZEXTERN int ZEXPORT inflateSyncPoint(z_streamp); ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table(void); ZEXTERN int ZEXPORT inflateUndermine(z_streamp, int); ZEXTERN int ZEXPORT inflateValidate(z_streamp, int); ZEXTERN unsigned long ZEXPORT inflateCodesUsed(z_streamp); ZEXTERN int ZEXPORT inflateResetKeep(z_streamp); ZEXTERN int ZEXPORT deflateResetKeep(z_streamp); #if defined(_WIN32) && !defined(Z_SOLO) ZEXTERN gzFile ZEXPORT gzopen_w(const wchar_t *path, const char *mode); #endif #if defined(STDC) || defined(Z_HAVE_STDARG_H) # ifndef Z_SOLO ZEXTERN int ZEXPORTVA gzvprintf(gzFile file, const char *format, va_list va); # endif #endif #ifdef __cplusplus } #endif #endif /* ZLIB_H */ ================================================ FILE: pypcode/zlib/zutil.c ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* zutil.c -- target dependent utility functions for the compression library * Copyright (C) 1995-2017 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ #include "zutil.h" #ifndef Z_SOLO # include "gzguts.h" #endif z_const char * const z_errmsg[10] = { (z_const char *)"need dictionary", /* Z_NEED_DICT 2 */ (z_const char *)"stream end", /* Z_STREAM_END 1 */ (z_const char *)"", /* Z_OK 0 */ (z_const char *)"file error", /* Z_ERRNO (-1) */ (z_const char *)"stream error", /* Z_STREAM_ERROR (-2) */ (z_const char *)"data error", /* Z_DATA_ERROR (-3) */ (z_const char *)"insufficient memory", /* Z_MEM_ERROR (-4) */ (z_const char *)"buffer error", /* Z_BUF_ERROR (-5) */ (z_const char *)"incompatible version",/* Z_VERSION_ERROR (-6) */ (z_const char *)"" }; const char * ZEXPORT zlibVersion(void) { return ZLIB_VERSION; } uLong ZEXPORT zlibCompileFlags(void) { uLong flags; flags = 0; switch ((int)(sizeof(uInt))) { case 2: break; case 4: flags += 1; break; case 8: flags += 2; break; default: flags += 3; } switch ((int)(sizeof(uLong))) { case 2: break; case 4: flags += 1 << 2; break; case 8: flags += 2 << 2; break; default: flags += 3 << 2; } switch ((int)(sizeof(voidpf))) { case 2: break; case 4: flags += 1 << 4; break; case 8: flags += 2 << 4; break; default: flags += 3 << 4; } switch ((int)(sizeof(z_off_t))) { case 2: break; case 4: flags += 1 << 6; break; case 8: flags += 2 << 6; break; default: flags += 3 << 6; } #ifdef ZLIB_DEBUG flags += 1 << 8; #endif /* #if defined(ASMV) || defined(ASMINF) flags += 1 << 9; #endif */ #ifdef ZLIB_WINAPI flags += 1 << 10; #endif #ifdef BUILDFIXED flags += 1 << 12; #endif #ifdef DYNAMIC_CRC_TABLE flags += 1 << 13; #endif #ifdef NO_GZCOMPRESS flags += 1L << 16; #endif #ifdef NO_GZIP flags += 1L << 17; #endif #ifdef PKZIP_BUG_WORKAROUND flags += 1L << 20; #endif #ifdef FASTEST flags += 1L << 21; #endif #if defined(STDC) || defined(Z_HAVE_STDARG_H) # ifdef NO_vsnprintf flags += 1L << 25; # ifdef HAS_vsprintf_void flags += 1L << 26; # endif # else # ifdef HAS_vsnprintf_void flags += 1L << 26; # endif # endif #else flags += 1L << 24; # ifdef NO_snprintf flags += 1L << 25; # ifdef HAS_sprintf_void flags += 1L << 26; # endif # else # ifdef HAS_snprintf_void flags += 1L << 26; # endif # endif #endif return flags; } #ifdef ZLIB_DEBUG #include # ifndef verbose # define verbose 0 # endif int ZLIB_INTERNAL z_verbose = verbose; void ZLIB_INTERNAL z_error(char *m) { fprintf(stderr, "%s\n", m); exit(1); } #endif /* exported to allow conversion of error code to string for compress() and * uncompress() */ const char * ZEXPORT zError(int err) { return ERR_MSG(err); } #if defined(_WIN32_WCE) && _WIN32_WCE < 0x800 /* The older Microsoft C Run-Time Library for Windows CE doesn't have * errno. We define it as a global variable to simplify porting. * Its value is always 0 and should not be used. */ int errno = 0; #endif #ifndef HAVE_MEMCPY void ZLIB_INTERNAL zmemcpy(Bytef* dest, const Bytef* source, uInt len) { if (len == 0) return; do { *dest++ = *source++; /* ??? to be unrolled */ } while (--len != 0); } int ZLIB_INTERNAL zmemcmp(const Bytef* s1, const Bytef* s2, uInt len) { uInt j; for (j = 0; j < len; j++) { if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1; } return 0; } void ZLIB_INTERNAL zmemzero(Bytef* dest, uInt len) { if (len == 0) return; do { *dest++ = 0; /* ??? to be unrolled */ } while (--len != 0); } #endif #ifndef Z_SOLO #ifdef SYS16BIT #ifdef __TURBOC__ /* Turbo C in 16-bit mode */ # define MY_ZCALLOC /* Turbo C malloc() does not allow dynamic allocation of 64K bytes * and farmalloc(64K) returns a pointer with an offset of 8, so we * must fix the pointer. Warning: the pointer must be put back to its * original form in order to free it, use zcfree(). */ #define MAX_PTR 10 /* 10*64K = 640K */ local int next_ptr = 0; typedef struct ptr_table_s { voidpf org_ptr; voidpf new_ptr; } ptr_table; local ptr_table table[MAX_PTR]; /* This table is used to remember the original form of pointers * to large buffers (64K). Such pointers are normalized with a zero offset. * Since MSDOS is not a preemptive multitasking OS, this table is not * protected from concurrent access. This hack doesn't work anyway on * a protected system like OS/2. Use Microsoft C instead. */ voidpf ZLIB_INTERNAL zcalloc(voidpf opaque, unsigned items, unsigned size) { voidpf buf; ulg bsize = (ulg)items*size; (void)opaque; /* If we allocate less than 65520 bytes, we assume that farmalloc * will return a usable pointer which doesn't have to be normalized. */ if (bsize < 65520L) { buf = farmalloc(bsize); if (*(ush*)&buf != 0) return buf; } else { buf = farmalloc(bsize + 16L); } if (buf == NULL || next_ptr >= MAX_PTR) return NULL; table[next_ptr].org_ptr = buf; /* Normalize the pointer to seg:0 */ *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4; *(ush*)&buf = 0; table[next_ptr++].new_ptr = buf; return buf; } void ZLIB_INTERNAL zcfree(voidpf opaque, voidpf ptr) { int n; (void)opaque; if (*(ush*)&ptr != 0) { /* object < 64K */ farfree(ptr); return; } /* Find the original pointer */ for (n = 0; n < next_ptr; n++) { if (ptr != table[n].new_ptr) continue; farfree(table[n].org_ptr); while (++n < next_ptr) { table[n-1] = table[n]; } next_ptr--; return; } Assert(0, "zcfree: ptr not found"); } #endif /* __TURBOC__ */ #ifdef M_I86 /* Microsoft C in 16-bit mode */ # define MY_ZCALLOC #if (!defined(_MSC_VER) || (_MSC_VER <= 600)) # define _halloc halloc # define _hfree hfree #endif voidpf ZLIB_INTERNAL zcalloc(voidpf opaque, uInt items, uInt size) { (void)opaque; return _halloc((long)items, size); } void ZLIB_INTERNAL zcfree(voidpf opaque, voidpf ptr) { (void)opaque; _hfree(ptr); } #endif /* M_I86 */ #endif /* SYS16BIT */ #ifndef MY_ZCALLOC /* Any system without a special alloc function */ #ifndef STDC extern voidp malloc(uInt size); extern voidp calloc(uInt items, uInt size); extern void free(voidpf ptr); #endif voidpf ZLIB_INTERNAL zcalloc(voidpf opaque, unsigned items, unsigned size) { (void)opaque; return sizeof(uInt) > 2 ? (voidpf)malloc(items * size) : (voidpf)calloc(items, size); } void ZLIB_INTERNAL zcfree(voidpf opaque, voidpf ptr) { (void)opaque; free(ptr); } #endif /* MY_ZCALLOC */ #endif /* !Z_SOLO */ ================================================ FILE: pypcode/zlib/zutil.h ================================================ /* ### * IP: zlib License * NOTE: from zlib 1.3.1 */ /* zutil.h -- internal interface and configuration of the compression library * Copyright (C) 1995-2024 Jean-loup Gailly, Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* @(#) $Id$ */ #ifndef ZUTIL_H #define ZUTIL_H #ifdef HAVE_HIDDEN # define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) #else # define ZLIB_INTERNAL #endif #include "zlib.h" #if defined(STDC) && !defined(Z_SOLO) # if !(defined(_WIN32_WCE) && defined(_MSC_VER)) # include # endif # include # include #endif #ifndef local # define local static #endif /* since "static" is used to mean two completely different things in C, we define "local" for the non-static meaning of "static", for readability (compile with -Dlocal if your debugger can't find static symbols) */ typedef unsigned char uch; typedef uch FAR uchf; typedef unsigned short ush; typedef ush FAR ushf; typedef unsigned long ulg; #if !defined(Z_U8) && !defined(Z_SOLO) && defined(STDC) # include # if (ULONG_MAX == 0xffffffffffffffff) # define Z_U8 unsigned long # elif (ULLONG_MAX == 0xffffffffffffffff) # define Z_U8 unsigned long long # elif (UINT_MAX == 0xffffffffffffffff) # define Z_U8 unsigned # endif #endif extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ /* (size given to avoid silly warnings with Visual C++) */ #define ERR_MSG(err) z_errmsg[(err) < -6 || (err) > 2 ? 9 : 2 - (err)] #define ERR_RETURN(strm,err) \ return (strm->msg = ERR_MSG(err), (err)) /* To be used only when the state is known to be valid */ /* common constants */ #ifndef DEF_WBITS # define DEF_WBITS MAX_WBITS #endif /* default windowBits for decompression. MAX_WBITS is for compression only */ #if MAX_MEM_LEVEL >= 8 # define DEF_MEM_LEVEL 8 #else # define DEF_MEM_LEVEL MAX_MEM_LEVEL #endif /* default memLevel */ #define STORED_BLOCK 0 #define STATIC_TREES 1 #define DYN_TREES 2 /* The three kinds of block type */ #define MIN_MATCH 3 #define MAX_MATCH 258 /* The minimum and maximum match lengths */ #define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */ /* target dependencies */ #if defined(MSDOS) || (defined(WINDOWS) && !defined(WIN32)) # define OS_CODE 0x00 # ifndef Z_SOLO # if defined(__TURBOC__) || defined(__BORLANDC__) # if (__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__)) /* Allow compilation with ANSI keywords only enabled */ void _Cdecl farfree( void *block ); void *_Cdecl farmalloc( unsigned long nbytes ); # else # include # endif # else /* MSC or DJGPP */ # include # endif # endif #endif #ifdef AMIGA # define OS_CODE 1 #endif #if defined(VAXC) || defined(VMS) # define OS_CODE 2 # define F_OPEN(name, mode) \ fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512") #endif #ifdef __370__ # if __TARGET_LIB__ < 0x20000000 # define OS_CODE 4 # elif __TARGET_LIB__ < 0x40000000 # define OS_CODE 11 # else # define OS_CODE 8 # endif #endif #if defined(ATARI) || defined(atarist) # define OS_CODE 5 #endif #ifdef OS2 # define OS_CODE 6 # if defined(M_I86) && !defined(Z_SOLO) # include # endif #endif #if defined(MACOS) # define OS_CODE 7 #endif #ifdef __acorn # define OS_CODE 13 #endif #if defined(WIN32) && !defined(__CYGWIN__) # define OS_CODE 10 #endif #ifdef _BEOS_ # define OS_CODE 16 #endif #ifdef __TOS_OS400__ # define OS_CODE 18 #endif #ifdef __APPLE__ # define OS_CODE 19 #endif #if defined(__BORLANDC__) && !defined(MSDOS) #pragma warn -8004 #pragma warn -8008 #pragma warn -8066 #endif /* provide prototypes for these when building zlib without LFS */ #if !defined(_WIN32) && \ (!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0) ZEXTERN uLong ZEXPORT adler32_combine64(uLong, uLong, z_off_t); ZEXTERN uLong ZEXPORT crc32_combine64(uLong, uLong, z_off_t); ZEXTERN uLong ZEXPORT crc32_combine_gen64(z_off_t); #endif /* common defaults */ #ifndef OS_CODE # define OS_CODE 3 /* assume Unix */ #endif #ifndef F_OPEN # define F_OPEN(name, mode) fopen((name), (mode)) #endif /* functions */ #if defined(pyr) || defined(Z_SOLO) # define NO_MEMCPY #endif #if defined(SMALL_MEDIUM) && !defined(_MSC_VER) && !defined(__SC__) /* Use our own functions for small and medium model with MSC <= 5.0. * You may have to use the same strategy for Borland C (untested). * The __SC__ check is for Symantec. */ # define NO_MEMCPY #endif #if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY) # define HAVE_MEMCPY #endif #ifdef HAVE_MEMCPY # ifdef SMALL_MEDIUM /* MSDOS small or medium model */ # define zmemcpy _fmemcpy # define zmemcmp _fmemcmp # define zmemzero(dest, len) _fmemset(dest, 0, len) # else # define zmemcpy memcpy # define zmemcmp memcmp # define zmemzero(dest, len) memset(dest, 0, len) # endif #else void ZLIB_INTERNAL zmemcpy(Bytef* dest, const Bytef* source, uInt len); int ZLIB_INTERNAL zmemcmp(const Bytef* s1, const Bytef* s2, uInt len); void ZLIB_INTERNAL zmemzero(Bytef* dest, uInt len); #endif /* Diagnostic functions */ #ifdef ZLIB_DEBUG # include extern int ZLIB_INTERNAL z_verbose; extern void ZLIB_INTERNAL z_error(char *m); # define Assert(cond,msg) {if(!(cond)) z_error(msg);} # define Trace(x) {if (z_verbose>=0) fprintf x ;} # define Tracev(x) {if (z_verbose>0) fprintf x ;} # define Tracevv(x) {if (z_verbose>1) fprintf x ;} # define Tracec(c,x) {if (z_verbose>0 && (c)) fprintf x ;} # define Tracecv(c,x) {if (z_verbose>1 && (c)) fprintf x ;} #else # define Assert(cond,msg) # define Trace(x) # define Tracev(x) # define Tracevv(x) # define Tracec(c,x) # define Tracecv(c,x) #endif #ifndef Z_SOLO voidpf ZLIB_INTERNAL zcalloc(voidpf opaque, unsigned items, unsigned size); void ZLIB_INTERNAL zcfree(voidpf opaque, voidpf ptr); #endif #define ZALLOC(strm, items, size) \ (*((strm)->zalloc))((strm)->opaque, (items), (size)) #define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr)) #define TRY_FREE(s, p) {if (p) ZFREE(s, p);} /* Reverse the bytes in a 32-bit value */ #define ZSWAP32(q) ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) #endif /* ZUTIL_H */ ================================================ FILE: pyproject.toml ================================================ [project] name = "pypcode" description = "Machine code disassembly and IR translation library" license = "BSD-2-Clause AND Apache-2.0 AND Zlib" license-files = [ "LICENSE.txt" ] readme = { file = "README.md", content-type = "text/markdown" } classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: 3.14", ] requires-python = ">=3.12" dynamic = [ "version" ] [project.urls] Homepage = "https://api.angr.io/projects/pypcode/en/latest/" Repository = "https://github.com/angr/pypcode" [project.optional-dependencies] docs = [ "furo", "ipython", "myst-parser", "sphinx", "sphinx-autodoc-typehints", ] testing = [ "pytest", "pytest-cov", "coverage", "gcovr", ] [build-system] requires = [ "setuptools", "nanobind", "cmake" ] build-backend = "setuptools.build_meta" [tool.setuptools] dynamic = { version = { attr = "pypcode.__version__.__version__" } } packages = [ "pypcode" ] ext-modules = [ { name = "pypcode.pypcode_native", sources = [] } ] [tool.black] line-length = 120 target-version = ['py312'] [tool.ruff] line-length = 120 extend-ignore = [ "E402", # Bottom imports ] [tool.pytest.ini_options] addopts = [ "--cov=pypcode", "--cov-report=term-missing", "--cov-branch", ] testpaths = [ "tests" ] [tool.coverage.run] branch = true source = [ "pypcode" ] parallel = true omit = [ "tests/*" ] [tool.coverage.report] show_missing = true skip_covered = true exclude_lines = [ "if TYPE_CHECKING:", "if __name__ == .__main__.:" ] ================================================ FILE: scripts/benchmark.py ================================================ #!/usr/bin/env python3 # pylint:disable=import-outside-toplevel,wrong-import-position """ Benchmark disassembly and IR lifting performance for pypcode and other libraries. """ import argparse import csv import functools import gc import hashlib import logging import os import pickle import random import sys import time from dataclasses import dataclass from typing import cast, Any from collections.abc import Callable, Iterable logging.basicConfig(level=logging.INFO) log = logging.getLogger(__name__) log.setLevel(logging.INFO) try: import_start_time = time.time() import capstone # type: ignore CAPSTONE_IMPORT_DURATION = time.time() - import_start_time del import_start_time HAVE_CAPSTONE = True except ImportError: HAVE_CAPSTONE = False try: import_start_time = time.time() import pypcode PYPCODE_IMPORT_DURATION = time.time() - import_start_time del import_start_time HAVE_PYPCODE = True except ImportError: HAVE_PYPCODE = False try: import_start_time = time.time() import archinfo import pyvex PYVEX_IMPORT_DURATION = time.time() - import_start_time del import_start_time HAVE_PYVEX = True except ImportError: HAVE_PYVEX = False @dataclass class Block: """Block to translate""" addr: int data: bytes @dataclass class BenchmarkResult: """Result of a benchmark run""" startup_duration: float translation_duration: float def get_file_hash(binary_path: str) -> str: log.info("Calculating sha256 of file '%s'", binary_path) m = hashlib.sha256() with open(binary_path, "rb") as f: m.update(f.read()) d = m.hexdigest() log.info("sha256:%s", d) return d def get_blocks(binary_path: str) -> list[Block]: blocks_file_path = f"blocks_{get_file_hash(binary_path)[0:8]}.cache" blocks: list[Block] = [] if not os.path.exists(blocks_file_path): log.info("Recovering blocks from CFG...") try: import angr logging.getLogger("angr").setLevel(logging.WARNING) logging.getLogger("cle").setLevel(logging.WARNING) logging.getLogger("pyvex").setLevel(logging.WARNING) logging.getLogger("claripy").setLevel(logging.WARNING) except ImportError: log.error("Install angr to build list of blocks") sys.exit(1) p = angr.Project(binary_path, auto_load_libs=False) cfg = p.analyses[angr.analyses.CFGFast].prep(show_progressbar=True)( resolve_indirect_jumps=False, force_smart_scan=False ) for n in cast(Iterable[angr.knowledge_plugins.cfg.cfg_model.CFGNode], cfg.model.nodes()): if n.byte_string: blocks.append(Block(n.addr, n.byte_string)) log.info("Saving blocks to file '%s' for subsequent benchmarks...", blocks_file_path) with open(blocks_file_path, "wb") as f: pickle.dump(blocks, f) else: log.info("Loading blocks from cache file '%s'...", blocks_file_path) with open(blocks_file_path, "rb") as f: blocks = pickle.load(f) return blocks def benchmark_pypcode(blocks: list[Block], iter_ops: bool = False, iter_varnodes: bool = False) -> BenchmarkResult: assert pypcode is not None start_time = time.time() ctx = pypcode.Context("x86:LE:64:default") startup_duration = time.time() - start_time start_time = time.time() count = 0 for block in blocks: t = ctx.translate(block.data, block.addr) if iter_ops and not iter_varnodes: for _ in t.ops: count += 1 if iter_varnodes: for op in t.ops: for _ in op.inputs: count += 1 translation_duration = time.time() - start_time return BenchmarkResult(startup_duration, translation_duration) def benchmark_pypcode_disassembly(blocks: list[Block]) -> BenchmarkResult: assert pypcode is not None start_time = time.time() ctx = pypcode.Context("x86:LE:64:default") startup_duration = time.time() - start_time start_time = time.time() size = 0 for block in blocks: for ins in ctx.disassemble(block.data, block.addr).instructions: size += ins.length translation_duration = time.time() - start_time return BenchmarkResult(startup_duration, translation_duration) def benchmark_pyvex(blocks: list[Block], **vex_args) -> BenchmarkResult: assert pyvex is not None start_time = time.time() arch = archinfo.ArchAMD64() startup_duration = time.time() - start_time start_time = time.time() for block in blocks: pyvex.lift(block.data, block.addr, arch, **vex_args) translation_duration = time.time() - start_time return BenchmarkResult(startup_duration, translation_duration) def benchmark_capstone(blocks: list[Block], lite: bool = False) -> BenchmarkResult: assert capstone is not None start_time = time.time() md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64) startup_duration = time.time() - start_time start_time = time.time() size = 0 if lite: for block in blocks: for ins in md.disasm_lite(block.data, block.addr): size += ins[1] else: for block in blocks: for ins in md.disasm(block.data, block.addr): size += ins.size translation_duration = time.time() - start_time return BenchmarkResult(startup_duration, translation_duration) def gen_benchmarks_from_configurations( name: str, benchmark_func: Callable, configurations: list[dict[str, Any]] ) -> list[tuple[str, Callable]]: benchmarks: list[tuple[str, Callable]] = [] for config in configurations: name_append = "" if config: name_append += f" ({', '.join(f'{k}={v}' for k, v in config.items())})" benchmarks.append((name + name_append, functools.partial(benchmark_func, **config))) return benchmarks def main() -> None: ap = argparse.ArgumentParser(description=__doc__) ap.add_argument( "-b", "--binary", default=sys.executable, help="Binary to benchmark with (Python interpreter by default)" ) ap.add_argument( "-c", "--coverage", default=1, type=float, help="Percentage of blocks to include in benchmark in (0,1] (default=1)", ) ap.add_argument( "--skip", nargs="*", default=[], help="Skip benchmarks for a particular package (pypcode, pyvex, capstone)" ) ap.add_argument("--csv", help="Save results to CSV file") args = ap.parse_args() # Display import times (use python -Ximporttime for more accurate import profiling) imports = [] if HAVE_CAPSTONE: imports.append(("capstone", capstone.__version__, CAPSTONE_IMPORT_DURATION)) if HAVE_PYPCODE: imports.append(("pypcode", pypcode.__version__, PYPCODE_IMPORT_DURATION)) if HAVE_PYVEX: imports.append(("pyvex", pyvex.__version__, PYVEX_IMPORT_DURATION)) for name, version, import_duration in imports: log.info("%s v%s took %.2f ms to import", name, version, import_duration * 1000) benchmarks: list[tuple[str, Callable]] = [] if HAVE_CAPSTONE and "capstone" not in args.skip: benchmarks.extend( gen_benchmarks_from_configurations( "capstone Disassemble", benchmark_capstone, [ {}, {"lite": True}, ], ) ) if HAVE_PYPCODE and "pypcode" not in args.skip: benchmarks.append(("pypcode Disassemble", benchmark_pypcode_disassembly)) benchmarks.extend( gen_benchmarks_from_configurations( "pypcode Lift", benchmark_pypcode, [ {}, {"iter_ops": True}, # Iterate over all ops {"iter_varnodes": True}, # Iterate over all ops and varnodes ], ) ) if HAVE_PYVEX and "pyvex" not in args.skip: benchmarks.extend( gen_benchmarks_from_configurations( "pyvex Lift", benchmark_pyvex, [ {}, {"opt_level": -1}, {"opt_level": 0}, {"skip_stmts": True}, {"collect_data_refs": True}, ], ) ) if not benchmarks: log.error("No benchmarks to run. Install pypcode to run a benchmark.") sys.exit(1) log.info("Using blocks from binary '%s' for benchmarking", args.binary) blocks = get_blocks(args.binary) num_blocks = len(blocks) # Handle reduced block sampling assert 0 < args.coverage <= 1, "Specified coverage percentage not in range (0, 1]" if args.coverage < 1: blocks = random.choices(blocks, k=int(num_blocks * args.coverage)) num_blocks = len(blocks) if num_blocks == 0: log.error("No blocks included in benchmark!") sys.exit(1) blocks_total_size = sum(len(block.data) for block in blocks) log.info("Benchmark includes %d blocks totaling %.1f KiB", num_blocks, blocks_total_size / 1024) gc.collect() results: list[tuple[str, BenchmarkResult]] = [] gc.disable() for name, benchmark in benchmarks: log.info("Benchmarking %s performance...", name) results.append((name, benchmark(blocks))) gc.enable() rows = [["Benchmark", "Startup ms", "Process s", "KiB/s", "kBlock/s", "us/Block"]] num_cols = len(rows[0]) for name, result in results: rows.append( [ name, f"{result.startup_duration * 1000:.3f}", f"{result.translation_duration:.3f}", f"{(blocks_total_size / result.translation_duration) / 1024:.2f}", f"{num_blocks / result.translation_duration / 1000:.2f}", f"{result.translation_duration / num_blocks * 1000000:.3f}", ] ) col_widths = [max(len(row[c]) for row in rows) for c in range(num_cols)] row = rows[0] header = " | ".join(row[c].ljust(col_widths[c]) for c in range(num_cols)) log.info("-" * len(header)) log.info("%s", header) log.info("-" * len(header)) for row in rows[1:]: log.info("%s", " | ".join(row[c].ljust(col_widths[c]) for c in range(num_cols))) if args.csv: with open(args.csv, "w", encoding="utf-8") as f: writer = csv.writer(f) for row in rows: writer.writerow(row) if __name__ == "__main__": main() ================================================ FILE: scripts/sleigh_download.sh ================================================ #!/bin/bash set -e set -x TAG=12.0.2 GHIDRA_SRC_DIR=ghidra_src_${TAG} git clone --depth=1 -b Ghidra_${TAG}_build https://github.com/NationalSecurityAgency/ghidra.git ${GHIDRA_SRC_DIR} # We just need Makefile and $(LIBSLA_SOURCE) defined inside Makefile. Do it this # way to make sure we stay up to date with the list of required files. SLEIGH_SRC_DIR=sleigh pushd ${GHIDRA_SRC_DIR}/Ghidra/Features/Decompiler/src/decompile/cpp/ # Touch fake dependency files recently removed upstream. Not having these triggers build steps. mkdir -p com_opt com_dbg touch com_opt/depend com_dbg/depend echo -e "$SLEIGH_SRC_DIR:\n\tmkdir -p $SLEIGH_SRC_DIR\n\tcp \$(LIBSLA_SOURCE) Makefile $SLEIGH_SRC_DIR" >> Makefile make $SLEIGH_SRC_DIR SLEIGH_SRC_DIR=${PWD}/${SLEIGH_SRC_DIR} popd mkdir ${TAG} mv $SLEIGH_SRC_DIR ${TAG} mv ${GHIDRA_SRC_DIR}/Ghidra/Processors ${TAG}/processors ================================================ FILE: setup.py ================================================ #!/usr/bin/env python3 import os from pathlib import Path import platform import shutil import struct import subprocess import sys from setuptools import setup from setuptools.command.build_ext import build_ext class BuildExtension(build_ext): """ Runs cmake to build the pypcode_native extension, sleigh binary, and runs sleigh to build .sla files. """ def run(self): try: subprocess.check_output(["cmake", "--version"]) except OSError as exc: raise RuntimeError("Please install CMake to build") from exc cross_compiling_for_macos_arm64 = ( platform.system() == "Darwin" and platform.machine() == "x86_64" and "arm64" in os.getenv("ARCHFLAGS", "") ) cross_compiling_for_macos_amd64 = ( platform.system() == "Darwin" and platform.machine() != "x86_64" and "x86_64" in os.getenv("ARCHFLAGS", "") ) cross_compiling = cross_compiling_for_macos_arm64 or cross_compiling_for_macos_amd64 root_dir = Path(__file__).parent.absolute() target_build_dir = root_dir / "build" / "native" host_build_dir = target_build_dir / "host" install_pkg_root_dir = (root_dir if self.inplace else Path(self.build_lib).absolute()) / "pypcode" install_pkg_bin_dir = install_pkg_root_dir / "bin" host_bin_root_dir = host_build_dir if cross_compiling else install_pkg_bin_dir sleigh_filename = "sleigh" + (".exe" if platform.system() == "Windows" else "") sleigh_bin = host_bin_root_dir / sleigh_filename specfiles_dir = install_pkg_root_dir / "processors" # Build sleigh and pypcode_native extension cmake_config_args = [ f"-DCMAKE_INSTALL_PREFIX={install_pkg_root_dir}", f"-DPython_EXECUTABLE={sys.executable}", ] cmake_build_args = [] if platform.system() == "Windows": is_64b = struct.calcsize("P") * 8 == 64 cmake_config_args += ["-A", "x64" if is_64b else "Win32"] cmake_build_args += ["--config", "Release"] target_cmake_config_args = cmake_config_args[::] if cross_compiling: target_cmake_config_args += [ "-DCMAKE_OSX_DEPLOYMENT_TARGET=10.14", "-DCMAKE_OSX_ARCHITECTURES=" + os.getenv("ARCHFLAGS"), ] subprocess.check_call(["cmake", "-S", ".", "-B", target_build_dir] + target_cmake_config_args, cwd=root_dir) subprocess.check_call( ["cmake", "--build", target_build_dir, "--parallel", "--verbose"] + cmake_build_args, cwd=root_dir, ) if cross_compiling: # Also build a host version of sleigh to process .sla files host_cmake_config_args = cmake_config_args subprocess.check_call(["cmake", "-S", ".", "-B", host_build_dir] + host_cmake_config_args, cwd=root_dir) subprocess.check_call( ["cmake", "--build", host_build_dir, "--parallel", "--verbose", "--target", "sleigh"] + cmake_build_args, cwd=root_dir, ) # Install extension and sleigh binary into target package if cross_compiling: # Note: Manually install because cmake install step may refuse to install binaries for foreign architectures install_pkg_bin_dir.mkdir(exist_ok=True) ext_path = next(target_build_dir.glob("pypcode_native.*")) shutil.copy(target_build_dir / sleigh_filename, install_pkg_bin_dir / sleigh_filename) shutil.copy(ext_path, install_pkg_root_dir / ext_path.name) else: subprocess.check_call(["cmake", "--install", target_build_dir], cwd=root_dir) # Build sla files subprocess.check_call([sleigh_bin, "-a", specfiles_dir]) def add_pkg_data_dirs(pkg, dirs): pkg_data = [] for d in dirs: for root, _, files in os.walk(os.path.join(pkg, d)): r = os.path.relpath(root, pkg) pkg_data.extend([os.path.join(r, f) for f in files]) return pkg_data setup( package_data={ "pypcode": add_pkg_data_dirs("pypcode", ["bin", "docs", "processors"]) + ["py.typed", "pypcode_native.pyi"] }, cmdclass={"build_ext": BuildExtension}, ) ================================================ FILE: tests/test_cli.py ================================================ #!/usr/bin/env python3 # pylint:disable=no-self-use import unittest import base64 import tempfile import sys import os import io from unittest import mock from pypcode.__main__ import main def run_cli(*args): with mock.patch("sys.argv", [sys.executable, *args]), mock.patch("sys.stdout", new=io.StringIO()) as fake_out: try: main() except SystemExit: pass return fake_out.getvalue() class TestCli(unittest.TestCase): """ Test the pypcode module command line interface """ def test_language_list(self): output = run_cli("-l") assert "x86:LE:64:default" in output def test_language_suggestions(self): output = run_cli("x86", "_") assert 'Language "x86" not found.' in output assert "Suggestions:" in output assert "x86:LE:32:default" in output assert "x86:LE:64:default" in output def test_language_no_suggestions(self): output = run_cli("xyz", "_") assert "Suggestions:" not in output def test_translate(self): with tempfile.NamedTemporaryFile(delete=False) as tf: tf.write(base64.b64decode("McA5xnYRSInBg+EfigwKMAwHSP/A6+vD")) tf.close() path = tf.name try: output = run_cli("x86:LE:64:default", path) assert "0x17/1: RET" in output finally: os.unlink(path) def test_failed_translation(self): with tempfile.NamedTemporaryFile(delete=False) as tf: tf.write(b"\x40\x40") tf.close() path = tf.name try: output = run_cli("x86:LE:64:default", path) assert "An error occurred" in output finally: os.unlink(path) if __name__ == "__main__": unittest.main() ================================================ FILE: tests/test_pypcode.py ================================================ #!/usr/bin/env python3 # pylint:disable=no-self-use import gc import logging from unittest import main, TestCase from unittest.mock import create_autospec from typing import cast from pypcode import ( AddrSpace, Arch, ArchLanguage, BadDataError, Context, LowlevelError, OpCode, PcodeOp, TranslateFlags, Translation, UnimplError, Varnode, ) from pypcode.printing import OpFormat, PcodePrettyPrinter # logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) def get_imarks(translation: Translation) -> list[PcodeOp]: return [op for op in translation.ops if op.opcode == OpCode.IMARK] class ContextTests(TestCase): """ Basic Context tests """ def tearDown(self): gc.collect() def test_bad_context_language_type(self): with self.assertRaises(TypeError): Context(1234) def test_can_create_all_language_contexts(self): for arch in Arch.enumerate(): for lang in arch.languages: with self.subTest(lang=lang.id): log.debug("Creating context for %s", lang.id) Context(lang) def test_context_creation_failure(self): lang = ArchLanguage.from_id("x86:LE:64:default") bad_lang = ArchLanguage("/bad/arch/path", lang.ldef) with self.assertRaises(LowlevelError): Context(bad_lang) def test_context_premature_release(self): ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\xc3") del ctx log.debug("Should not crash: %d", len(tx.ops[0].inputs[0].space.name)) del tx log.debug("--") ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\xc3") op = tx.ops[0] del tx # Should not be released del ctx # Should not be released while op is alive log.debug("Should not crash: %d", len(op.inputs[0].space.name)) del op # Now ctx, tx can be released log.debug("--") ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\xc3") vn = tx.ops[0].inputs[0] del tx del ctx log.debug("Should not crash: %d", len(vn.space.name)) del vn # Now ctx, tx can be released log.debug("--") ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\xc3") space = tx.ops[0].inputs[0].space del tx del ctx log.debug("Should not crash: %d", len(space.name)) del space # Now ctx, tx can be released. Space is managed by context, so C++ obj should not be released. log.debug("--") class RegistersTests(TestCase): """ Context register lookup tests """ def test_registers(self): ctx = Context("x86:LE:64:default") assert "RAX" in ctx.registers def test_getRegisterName(self): ctx = Context("x86:LE:64:default") ri = ctx.registers["RAX"] assert ctx.getRegisterName(ri.space, ri.offset, ri.size) == "RAX" class AddrSpaceTests(TestCase): """ AddrSpace tests """ def test_name(self): ctx = Context("x86:LE:64:default") assert ctx.translate(b"\xeb\xfe").ops[1].inputs[0].space.name == "ram" class VarnodeTests(TestCase): """ Varnode tests """ def test_getSpaceFromConst(self): ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\x48\x8b\x41\x01") # mov rax, [rcx + 1] assert tx.ops[2].inputs[0].getSpaceFromConst().name == "ram" def test_getRegisterName(self): ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\x48\x8b\x41\x01") # mov rax, [rcx + 1] assert tx.ops[1].inputs[0].getRegisterName() == "RCX" assert tx.ops[1].inputs[1].getRegisterName() == "" def test_getUserDefinedOpName(self): ctx = Context("AARCH64:LE:64:AppleSilicon") ctx.setVariableDefault("ShowPAC", 1) ctx.setVariableDefault("PAC_clobber", 1) tx = ctx.translate(b"\x7f\x23\x03\xd5") # pacibsp # x30 = pacib(x30, sp) assert tx.ops[1].opcode == OpCode.CALLOTHER assert tx.ops[1].output.getRegisterName() == "x30" assert tx.ops[1].inputs[0].getUserDefinedOpName() == "pacib" assert tx.ops[1].inputs[1].getRegisterName() == "x30" assert tx.ops[1].inputs[2].getRegisterName() == "sp" class DisassembleTests(TestCase): """ Context::disassemble tests """ def test_disassemble(self): ctx = Context("x86:LE:64:default") dx = ctx.disassemble(b"\x90\xeb\xfe") assert len(dx.instructions) == 2 ins = dx.instructions[1] assert ins.addr.offset == 1 assert ins.length == 2 assert ins.mnem == "JMP" assert ins.body == "0x1" def test_decode_failure(self): ctx = Context("x86:LE:64:default") with self.assertRaises(BadDataError): ctx.disassemble(b"\x40\x40") def test_partial_decode_failure(self): ctx = Context("x86:LE:64:default") dx = ctx.disassemble(b"\xff\xc0\x90\x40\x40") # inc eax; nop; bad assert len(dx.instructions) == 2 def test_not_cached(self): ctx = Context("x86:LE:64:default") dx = ctx.disassemble(b"\xeb\xfe", 5) dx = ctx.disassemble(b"\xc3", 5) ins = dx.instructions[0] assert ins.addr.offset == 5 assert ins.length == 1 assert ins.mnem == "RET" assert ins.body == "" def test_arg_base_address(self): ctx = Context("x86:LE:64:default") dx = ctx.disassemble(b"\xeb\xfe", 10) assert len(dx.instructions) == 1 assert dx.instructions[0].mnem == "JMP" assert dx.instructions[0].body == "0xa" def test_arg_offset(self): ctx = Context("x86:LE:64:default") dx = ctx.disassemble(b"\x90\xeb\xfe", offset=1) assert len(dx.instructions) == 1 def test_arg_offset_out_of_range(self): ctx = Context("x86:LE:64:default") with self.assertRaises(IndexError): ctx.disassemble(b"\x90\xeb\xfe", offset=3) def test_arg_max_bytes(self): ctx = Context("x86:LE:64:default") dx = ctx.disassemble(b"\x90\xeb\xfe", max_bytes=1) assert len(dx.instructions) == 1 def test_arg_max_instructions(self): ctx = Context("x86:LE:64:default") dx = ctx.disassemble(b"\x90\xeb\xfe", max_instructions=1) assert len(dx.instructions) == 1 def test_pretty_printing(self): ctx = Context("x86:LE:64:default") dx = ctx.disassemble(b"\x48\x31\xc0\xc3") assert "0x0/3: XOR RAX,RAX" in str(dx) assert "0x3/1: RET" in str(dx) class TranslateTests(TestCase): """ Context::translate tests """ def test_translate(self): ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\x48\x35\x78\x56\x34\x12\xc3") assert len(get_imarks(tx)) == 2 def test_decode_failure(self): ctx = Context("x86:LE:64:default") with self.assertRaises(BadDataError): ctx.translate(b"\x40\x40") def test_partial_decode_failure(self): ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\xff\xc0\x40\x40") # inc eax; bad assert len(get_imarks(tx)) == 1 def test_unimpl_failure(self): ctx = Context("Toy:BE:32:default") with self.assertRaises(UnimplError): ctx.translate(b"\xa8\x00") def test_partial_unimpl_failure(self): ctx = Context("Toy:BE:32:default") tx = ctx.translate(b"\xd0\x00\xa8\x00") # and r0, r0; unimpl assert len(get_imarks(tx)) == 1 def test_not_cached(self): ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\xeb\xfe", 5) tx = ctx.translate(b"\xc3", 5) assert tx.ops[-1].opcode == OpCode.RETURN def test_translate_and_disassemble_not_cached(self): ctx = Context("x86:LE:64:default") dx = ctx.disassemble(b"\xeb\xfe", 5) tx = ctx.translate(b"\xc3", 5) assert tx.ops[-1].opcode == OpCode.RETURN ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\xc3", 5) dx = ctx.disassemble(b"\xeb\xfe", 5) assert len(dx.instructions) == 1 ins = dx.instructions[0] assert ins.addr.offset == 5 assert ins.length == 2 assert ins.mnem == "JMP" assert ins.body == "0x5" def test_arg_base_address(self): ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\xeb\xfe", 10) # jmp $ assert len(tx.ops) == 2 assert len(tx.ops[0].inputs) == 1 # Check just one instruction decoded assert tx.ops[0].inputs[0].offset == 10 # Check IMARK assert tx.ops[1].inputs[0].offset == 10 # Check jump target def test_arg_offset(self): ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\x90\x90\xeb\xfe", offset=2) # nop; nop; jmp $ assert len(get_imarks(tx)) == 1 def test_arg_offset_out_of_range(self): ctx = Context("x86:LE:64:default") with self.assertRaises(IndexError): ctx.translate(b"\x90\x90\xeb\xfe", offset=10) # nop; nop; jmp $ def test_arg_max_bytes(self): ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\x90\x90\x90", max_bytes=1) assert len(get_imarks(tx)) == 1 def test_arg_max_instructions(self): ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\x90\x90\x90", max_instructions=2) assert len(get_imarks(tx)) == 2 def test_arg_flag_bb_terminating(self): ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\x90\xeb\xfe\x90\x90", flags=TranslateFlags.BB_TERMINATING) assert len(get_imarks(tx)) == 2 def test_delay_slot(self): ctx = Context("MIPS:BE:32:default") tx = ctx.translate(b"\x10@\x00\x06\x00 \x08%", 0x4009F4) imarks = get_imarks(tx) assert len(imarks) == 1 assert len(imarks[0].inputs) == 2 assert imarks[0].inputs[0].offset == 0x4009F4 assert imarks[0].inputs[0].size == 4 assert imarks[0].inputs[1].offset == 0x4009F8 assert imarks[0].inputs[1].size == 4 def test_pretty_printing(self): ctx = Context("x86:LE:64:default") tx = ctx.translate(b"\x48\x31\xc0") assert "RAX = RAX ^ RAX" in str(tx) class PrintingTests(TestCase): """ Pretty printing tests. """ def test_branches(self): for opc, output in [ (OpCode.BRANCH, "goto ram[123:4]"), (OpCode.BRANCHIND, "goto [ram[123:4]]"), (OpCode.CALL, "call ram[123:4]"), (OpCode.CALLIND, "call [ram[123:4]]"), (OpCode.RETURN, "return ram[123:4]"), ]: vn = cast(Varnode, create_autospec(Varnode, instance=True, spec_set=True)) vn.space.name = "ram" vn.offset = 0x123 vn.size = 4 op = cast(PcodeOp, create_autospec(PcodeOp, instance=True, spec_set=True)) op.opcode = opc op.output = None op.inputs = [vn] assert PcodePrettyPrinter.fmt_op(op) == output def test_cbranch(self): target_vn = cast(Varnode, create_autospec(Varnode, instance=True, spec_set=True)) target_vn.space.name = "ram" target_vn.offset = 0x456 target_vn.size = 4 cond_vn = cast(Varnode, create_autospec(Varnode, instance=True, spec_set=True)) cond_vn.space.name = "ram" cond_vn.offset = 0x123 cond_vn.size = 1 op = cast(PcodeOp, create_autospec(PcodeOp, instance=True, spec_set=True)) op.opcode = OpCode.CBRANCH op.output = None op.inputs = [target_vn, cond_vn] assert PcodePrettyPrinter.fmt_op(op) == "if (ram[123:1]) goto ram[456:4]" def test_load(self): dest_vn = cast(Varnode, create_autospec(Varnode, instance=True, spec_set=True)) dest_vn.space.name = "ram" dest_vn.offset = 0x123 dest_vn.size = 1 space = cast(AddrSpace, create_autospec(AddrSpace, instance=True, spec_set=True)) space.name = "ram" space_vn = cast(Varnode, create_autospec(Varnode, instance=True, spec_set=True)) space_vn.space.name = "const" space_vn.getSpaceFromConst.return_value = space offset_vn = cast(Varnode, create_autospec(Varnode, instance=True, spec_set=True)) offset_vn.space.name = "const" offset_vn.offset = 0x456 offset_vn.size = 1 op = cast(PcodeOp, create_autospec(PcodeOp, instance=True, spec_set=True)) op.opcode = OpCode.LOAD op.output = dest_vn op.inputs = [space_vn, offset_vn] assert PcodePrettyPrinter.fmt_op(op) == "ram[123:1] = *[ram]0x456" def test_store(self): space = cast(AddrSpace, create_autospec(AddrSpace, instance=True, spec_set=True)) space.name = "ram" space_vn = cast(Varnode, create_autospec(Varnode, instance=True, spec_set=True)) space_vn.space.name = "const" space_vn.getSpaceFromConst.return_value = space offset_vn = cast(Varnode, create_autospec(Varnode, instance=True, spec_set=True)) offset_vn.space.name = "const" offset_vn.offset = 0x123 offset_vn.size = 1 value_vn = cast(Varnode, create_autospec(Varnode, instance=True, spec_set=True)) value_vn.space.name = "const" value_vn.offset = 0x456 value_vn.size = 1 op = cast(PcodeOp, create_autospec(PcodeOp, instance=True, spec_set=True)) op.opcode = OpCode.STORE op.output = None op.inputs = [space_vn, offset_vn, value_vn] assert PcodePrettyPrinter.fmt_op(op) == "*[ram]0x123 = 0x456" def test_callother(self): target_vn = cast(Varnode, create_autospec(Varnode, instance=True, spec_set=True)) target_vn.getUserDefinedOpName.return_value = "udop" arg_vn = cast(Varnode, create_autospec(Varnode, instance=True, spec_set=True)) arg_vn.space.name = "const" arg_vn.offset = 0x456 arg_vn.size = 1 op = cast(PcodeOp, create_autospec(PcodeOp, instance=True, spec_set=True)) op.opcode = OpCode.CALLOTHER op.output = None op.inputs = [target_vn, arg_vn] assert PcodePrettyPrinter.fmt_op(op) == "udop(0x456)" def test_no_regname(self): arg_vn = cast(Varnode, create_autospec(Varnode, instance=True, spec_set=True)) arg_vn.space.name = "register" arg_vn.offset = 0x123 arg_vn.size = 4 arg_vn.getRegisterName.return_value = None assert OpFormat.fmt_vn(arg_vn) == "register[123:4]" if __name__ == "__main__": main()